diff --git a/.codeclimate.yml b/.codeclimate.yml deleted file mode 100644 index 842a8700ff9..00000000000 --- a/.codeclimate.yml +++ /dev/null @@ -1,84 +0,0 @@ -# To validate, run ./scripts/code-climate.sh validate-config -# -# SEE https://docs.codeclimate.com/docs/advanced-configuration -# -version: "2" -# SEE https://docs.codeclimate.com/docs/maintainability#section-checks -checks: - argument-count: - enabled: false - config: - threshold: 6 - complex-logic: - enabled: true - config: - threshold: 10 - file-lines: - enabled: true - config: - threshold: 500 - method-complexity: - enabled: false - config: - threshold: 6 - method-count: - enabled: true - config: - threshold: 20 - method-lines: - enabled: true - config: - threshold: 50 - nested-control-flow: - enabled: true - config: - threshold: 4 - return-statements: - enabled: true - config: - threshold: 4 - similar-code: - enabled: false - config: - threshold: #language-specific defaults. overrides affect all languages. - identical-code: - enabled: true - config: - threshold: #language-specific defaults. overrides affect all languages. - -plugins: - # https://github.com/PyCQA/bandit#configuration - bandit: - enabled: true - # https://docs.codeclimate.com/docs/eslint - eslint: - enabled: false - channel: "eslint-6" - config: - extensions: - - .js - -exclude_patterns: - - "config/" - - "db/" - - "dist/" - - "features/" - - "**/node_modules/" - - "script/" - - "**/spec/" - - "**/test/" - - "**/tests/" - - "**/vendor/" - - "**/*.d.ts" - - "**/.venv/" - - ".venv/" - - "**/healthcheck.py" - - "**/client-sdk/" - - "**/generated_code/" - - "**/migrations/" - - "**/*.js" - - "**/pytest-simcore/" - - "**/pytest_plugin/" - - "**/sandbox/" - - packages/models-library/src/models_library/utils/_original_fastapi_encoders.py - - services/web/server/src/simcore_service_webserver/exporter/formatters/sds/xlsx/templates/code_description.py diff --git a/.codecov.yml b/.codecov.yml index 2b5040ee14b..a3f9e9e6dd0 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,59 +1,136 @@ codecov: - require_ci_to_pass: true + require_ci_to_pass: false branch: master +github_checks: + annotations: false -coverage: - precision: 1 - round: down - range: "70...100" +flag_management: + default_rules: + carryforward: true + statuses: + - type: project + target: auto + threshold: 5% + - type: patch + target: auto + threshold: 5% + + +component_management: + default_rules: + statuses: + - type: project + target: auto + threshold: 5% + branches: + - "!master" + individual_components: + - component_id: api + paths: + - api/** + - component_id: pkg_aws_library + paths: + - packages/aws-library/** + - component_id: pkg_dask_task_models_library + paths: + - packages/dask-task-models-library/** + - component_id: pkg_models_library + paths: + - packages/models-library/** + - component_id: pkg_notifications_library + paths: + - packages/notifications-library/** + - component_id: pkg_postgres_database + paths: + - packages/postgres-database/** + - component_id: pkg_service_integration + paths: + - packages/service-integration/** + - component_id: pkg_service_library + paths: + - packages/service-library/** + - component_id: pkg_settings_library + paths: + - packages/settings-library/** + - component_id: pkg_simcore_sdk + paths: + - packages/simcore-sdk/** + - component_id: agent + paths: + - services/agent/** + - component_id: api_server + paths: + - services/api-server/** + - component_id: autoscaling + paths: + - services/autoscaling/** + - component_id: catalog + paths: + - services/catalog/** + - component_id: clusters_keeper + paths: + - services/clusters-keeper/** + - component_id: dask_sidecar + paths: + - services/dask-sidecar/** + - component_id: datcore_adapter + paths: + - services/datcore-adapter/** + - component_id: director + paths: + - services/director/** + - component_id: director_v2 + paths: + - services/director-v2/** + - component_id: dynamic_scheduler + paths: + - services/dynamic-scheduler/** + - component_id: dynamic_sidecar + paths: + - services/dynamic-sidecar/** + - component_id: efs_guardian + paths: + - services/efs-guardian/** + - component_id: invitations + paths: + - services/invitations/** + - component_id: payments + paths: + - services/payments/** + - component_id: resource_usage_tracker + paths: + - services/resource-usage-tracker/** + - component_id: storage + paths: + - services/storage/** + - component_id: webclient + paths: + - services/static-webserver/client/** + - component_id: webserver + paths: + - services/web/server/** +coverage: status: project: default: informational: true - threshold: 1% - paths: - - api - - packages - - services - carryforward: true - api: - informational: true - threshold: 1% - paths: - - api - carryforward: true - packages: - informational: true - threshold: 1% - paths: - - packages - carryforward: true - services: - informational: true - threshold: 1% - paths: - - services - carryforward: true + threshold: 5% patch: default: informational: true - threshold: 1% - paths: - - api - - packages - - services - -parsers: - gcov: - branch_detection: - conditional: yes - loop: yes - method: no - macro: no + threshold: 5% comment: - layout: "reach,diff,flags,tree" + layout: "header,diff,flags,components,footer" behavior: default require_changes: false + show_carryforward_flags: true + + +ignore: + - "test_*.py" + - "**/generated_models/*.py" + - "**/generated_code/*.py" + - "**/node_modules/**" diff --git a/.coveragerc b/.coveragerc index 21400449ce4..c5ea6a88430 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,25 +3,27 @@ branch = True omit = */tests/* */generated_code/* + */_original_fastapi_encoders.py parallel = True [report] # Regexes for lines to exclude from consideration -exclude_lines = - # Have to re-enable the standard pragma - pragma: no cover - +exclude_also = # Don't complain about missing debug-only code: def __repr__ if self\.debug - # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError - # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: + if __name__ == __main__.: + class .*\bProtocol\): + # Don't complain about abstract methods, they aren't run: + @(abc\.)?abstract(((class|static)?method)|property) + # Don't complain about type checking + if TYPE_CHECKING: ignore_errors = True show_missing = True diff --git a/.dockerignore b/.dockerignore index 34ae3126ea5..a00f0b10508 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,7 +8,7 @@ ops/ *.py[cod] # virtualenv -.venv +**/.venv #python eggs **/*.egg-info diff --git a/.env-devel b/.env-devel index c3aeac82fc4..1842a982b85 100644 --- a/.env-devel +++ b/.env-devel @@ -1,5 +1,6 @@ +# local development # -# - Keep it alfphabetical order and grouped by prefix [see vscode cmd: Sort Lines Ascending] +# - Keep it alphabetical order within each group of env vars [see vscode cmd: Sort Lines Ascending] # - To expose: # set -o allexport # source .env @@ -9,45 +10,183 @@ # unset $(grep -v '^#' .env | sed -E 's/(.*)=.*/\1/' | xargs) # -AGENT_VOLUMES_CLEANUP_S3_SECURE=0 -AGENT_VOLUMES_CLEANUP_S3_ENDPOINT=172.17.0.1:9001 +AGENT_LOGLEVEL=INFO AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY=12345678 -AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY=12345678 AGENT_VOLUMES_CLEANUP_S3_BUCKET=simcore-volume-backups +AGENT_VOLUMES_CLEANUP_S3_ENDPOINT=http://172.17.0.1:9001 AGENT_VOLUMES_CLEANUP_S3_PROVIDER=MINIO +AGENT_VOLUMES_CLEANUP_S3_REGION=us-east-1 +AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY=12345678 +AGENT_TRACING=null API_SERVER_DEV_FEATURES_ENABLED=0 +API_SERVER_LOGLEVEL=INFO +API_SERVER_PROFILING=1 +API_SERVER_TRACING=null +TRAEFIK_API_SERVER_INFLIGHTREQ_AMOUNT=25 -BF_API_KEY=none -BF_API_SECRET=none +AUTOSCALING_DASK=null +AUTOSCALING_DRAIN_NODES_WITH_LABELS=False +AUTOSCALING_DOCKER_JOIN_DRAINED=True +AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION=False +AUTOSCALING_EC2_ACCESS=null +AUTOSCALING_EC2_INSTANCES=null +AUTOSCALING_LOGLEVEL=INFO +AUTOSCALING_NODES_MONITORING=null +AUTOSCALING_POLL_INTERVAL="00:00:10" +AUTOSCALING_SSM_ACCESS=null +AUTOSCALING_TRACING=null +AWS_S3_CLI_S3=null + +CATALOG_BACKGROUND_TASK_REST_TIME=60 CATALOG_DEV_FEATURES_ENABLED=0 +CATALOG_HOST=catalog +CATALOG_LOGLEVEL=INFO +CATALOG_PORT=8000 +CATALOG_PROFILING=1 CATALOG_SERVICES_DEFAULT_RESOURCES='{"CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": {"limit": 2147483648, "reservation": 2147483648}}' CATALOG_SERVICES_DEFAULT_SPECIFICATIONS='{}' +CATALOG_TRACING=null + +CELERY_RESULT_EXPIRES=P7D + +CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH='{"type":"tls","tls_ca_file":"/home/scu/.dask/dask-crt.pem","tls_client_cert":"/home/scu/.dask/dask-crt.pem","tls_client_key":"/home/scu/.dask/dask-key.pem"}' +CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG=master-github-latest +CLUSTERS_KEEPER_DASK_NTHREADS=0 +CLUSTERS_KEEPER_DASK_WORKER_SATURATION=inf +CLUSTERS_KEEPER_EC2_ACCESS=null +CLUSTERS_KEEPER_SSM_ACCESS=null +CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX="" +CLUSTERS_KEEPER_LOGLEVEL=INFO +CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION=5 +CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES=null +CLUSTERS_KEEPER_TASK_INTERVAL=00:00:30 +CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES=null +CLUSTERS_KEEPER_TRACING=null DASK_SCHEDULER_HOST=dask-scheduler DASK_SCHEDULER_PORT=8786 +DASK_SIDECAR_LOGLEVEL=INFO +DASK_TLS_CA_FILE=/home/scu/.dask/dask-crt.pem +DASK_TLS_CERT=/home/scu/.dask/dask-crt.pem +DASK_TLS_KEY=/home/scu/.dask/dask-key.pem -DIRECTOR_REGISTRY_CACHING_TTL=900 +DIRECTOR_DEFAULT_MAX_MEMORY=2147483648 +DIRECTOR_DEFAULT_MAX_NANO_CPUS=1000000000 +DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS='{}' +DIRECTOR_HOST=director +DIRECTOR_LOGLEVEL=INFO +DIRECTOR_MONITORING_ENABLED=True +DIRECTOR_PORT=8000 +DIRECTOR_PUBLISHED_HOST_NAME="127.0.0.1:9081" +DIRECTOR_REGISTRY_CACHING_TTL=00:15:00 DIRECTOR_REGISTRY_CACHING=True +DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=null +DIRECTOR_TRACING=null -COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL=tcp://dask-scheduler:8786 -DIRECTOR_V2_DEV_FEATURES_ENABLED=0 +DOCKER_API_PROXY_HOST=docker-api-proxy +DOCKER_API_PROXY_PASSWORD=admin +DOCKER_API_PROXY_PORT=8888 +DOCKER_API_PROXY_SECURE=False +DOCKER_API_PROXY_USER=admin + +EFS_USER_ID=8006 +EFS_USER_NAME=efs +EFS_GROUP_ID=8106 +EFS_GROUP_NAME=efs-group +EFS_DNS_NAME=fs-xxx.efs.us-east-1.amazonaws.com +EFS_MOUNTED_PATH=/tmp/efs +EFS_PROJECT_SPECIFIC_DATA_DIRECTORY=project-specific-data +EFS_GUARDIAN_TRACING=null +EFS_DEFAULT_USER_SERVICE_SIZE_BYTES=10000 + +# DATCORE_ADAPTER +DATCORE_ADAPTER_TRACING=null +# DIRECTOR_V2 ---- +COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH='{"type":"tls","tls_ca_file":"/home/scu/.dask/dask-crt.pem","tls_client_cert":"/home/scu/.dask/dask-crt.pem","tls_client_key":"/home/scu/.dask/dask-key.pem"}' +COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE=S3 +COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL=tls://dask-scheduler:8786 +COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE=PRESIGNED +COMPUTATIONAL_BACKEND_ON_DEMAND_CLUSTERS_FILE_LINK_TYPE=PRESIGNED +DIRECTOR_V2_DEV_FEATURES_ENABLED=0 +DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED=1 +DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL=PT0S +DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS='{}' +DIRECTOR_V2_HOST=director-v2 +DIRECTOR_V2_LOGLEVEL=INFO +DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH=null +DIRECTOR_V2_PORT=8000 +DIRECTOR_V2_PROFILING=1 +DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS=[] +DIRECTOR_V2_DOCKER_HUB_REGISTRY=null +DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS=False +DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED=0 DYNAMIC_SIDECAR_IMAGE=${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} DYNAMIC_SIDECAR_LOG_LEVEL=DEBUG +DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS=[] +DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS={} +DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT=01:00:00 +DIRECTOR_V2_TRACING=null + +# DYNAMIC_SCHEDULER ---- +DYNAMIC_SCHEDULER_LOGLEVEL=INFO +DYNAMIC_SCHEDULER_PROFILING=1 +DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER=0 +DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT=01:00:00 +DYNAMIC_SCHEDULER_TRACING=null +DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=adminadmin FUNCTION_SERVICES_AUTHORS='{"UN": {"name": "Unknown", "email": "unknown@osparc.io", "affiliation": "unknown"}}' +WEBSERVER_LICENSES={} +LICENSES_ITIS_VIP_SYNCER_ENABLED=false +LICENSES_ITIS_VIP_SYNCER_PERIODICITY=1D00:00:00 +LICENSES_ITIS_VIP_API_URL=https://replace-with-itis-api/{category} +LICENSES_ITIS_VIP_CATEGORIES='{"HumanWholeBody": "Humans", "HumanBodyRegion": "Humans (Region)", "AnimalWholeBody": "Animal"}' +LICENSES_SPEAG_PHANTOMS_API_URL=https://replace-with-speag-api/{category} +LICENSES_SPEAG_PHANTOMS_CATEGORIES='{"ComputationalPhantom": "Phantom of the Opera"}' + # Can use 'docker run -it itisfoundation/invitations:latest simcore-service-invitations generate-dotenv --auto-password' +INVITATIONS_DEFAULT_PRODUCT=osparc INVITATIONS_HOST=invitations INVITATIONS_LOGLEVEL=INFO INVITATIONS_OSPARC_URL=http://127.0.0.1.nip.io:9081 INVITATIONS_PASSWORD=adminadmin INVITATIONS_PORT=8000 INVITATIONS_SECRET_KEY='REPLACE_ME_with_result__Fernet_generate_key=' +INVITATIONS_SWAGGER_API_DOC_ENABLED=1 INVITATIONS_USERNAME=admin +INVITATIONS_TRACING=null + +LOG_FORMAT_LOCAL_DEV_ENABLED=1 +LOG_FILTER_MAPPING='{}' +NOTIFICATIONS_LOGLEVEL=INFO +NOTIFICATIONS_TRACING=null + +PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES=30 +PAYMENTS_ACCESS_TOKEN_SECRET_KEY=2c0411810565e063309be1457009fb39ce023946f6a354e6935107b57676 +PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT=10000 +PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT=100.0 +PAYMENTS_AUTORECHARGE_ENABLED=1 +PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS=100 +PAYMENTS_BCC_EMAIL=none +PAYMENTS_EMAIL={} +PAYMENTS_FAKE_COMPLETION_DELAY_SEC=10 +PAYMENTS_FAKE_COMPLETION=0 +PAYMENTS_GATEWAY_API_SECRET=adminadmin +PAYMENTS_GATEWAY_URL=http://127.0.0.1:32769 +PAYMENTS_HOST=payments +PAYMENTS_LOGLEVEL=INFO +PAYMENTS_PASSWORD=adminadmin +PAYMENTS_PORT=8000 +PAYMENTS_STRIPE_API_SECRET='REPLACE_ME_with_api_secret' +PAYMENTS_STRIPE_URL=https://api.stripe.com +PAYMENTS_SWAGGER_API_DOC_ENABLED=1 +PAYMENTS_USERNAME=admin +PAYMENTS_TRACING=null POSTGRES_DB=simcoredb POSTGRES_ENDPOINT=postgres:5432 @@ -56,66 +195,211 @@ POSTGRES_PASSWORD=adminadmin POSTGRES_PORT=5432 POSTGRES_USER=scu +POSTGRES_READONLY_PASSWORD=readonly +POSTGRES_READONLY_USER=postgres_readonly + + RABBIT_HOST=rabbit RABBIT_PASSWORD=adminadmin RABBIT_PORT=5672 +RABBIT_SECURE=false RABBIT_USER=admin REDIS_HOST=redis REDIS_PORT=6379 +REDIS_PASSWORD=adminadmin +REDIS_SECURE=false +REDIS_USER=null REGISTRY_AUTH=True -REGISTRY_PW=adminadmin +REGISTRY_PATH="" +REGISTRY_PW=adminadminadmin REGISTRY_SSL=True REGISTRY_URL=registry.osparc-master.speag.com REGISTRY_USER=admin +RESOURCE_MANAGER_RESOURCE_TTL_S=900 +RESOURCE_USAGE_TRACKER_HOST=resource-usage-tracker +RESOURCE_USAGE_TRACKER_PORT=8000 +RESOURCE_USAGE_TRACKER_EXTERNAL_PORT=8000 +RESOURCE_USAGE_TRACKER_LOGLEVEL=INFO +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED=1 +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL=6 +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC=300 +RESOURCE_USAGE_TRACKER_S3=null +RESOURCE_USAGE_TRACKER_TRACING=null + # NOTE: 172.17.0.1 is the docker0 interface, which redirect from inside a container onto the host network interface. -R_CLONE_ENABLED=false +R_CLONE_OPTION_BUFFER_SIZE=16M +R_CLONE_OPTION_RETRIES=3 +R_CLONE_OPTION_TRANSFERS=5 R_CLONE_PROVIDER=MINIO + +# simcore-user used in docker images +SC_USER_ID=8004 +SC_USER_NAME=scu + S3_ACCESS_KEY=12345678 S3_BUCKET_NAME=simcore -S3_ENDPOINT=172.17.0.1:9001 +S3_ENDPOINT=http://172.17.0.1:9001 +S3_REGION=us-east-1 S3_SECRET_KEY=12345678 -S3_SECURE=0 - - -SCICRUNCH_API_BASE_URL=https://scicrunch.org/api/1 -SCICRUNCH_API_KEY=REPLACE_ME_with_valid_api_key +SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet -SMTP_HOST=mail.speag.com +SMTP_HOST=fake.mail.server.com +SMTP_PASSWORD=it_doesnt_matter SMTP_PORT=25 +SMTP_PROTOCOL=UNENCRYPTED +SMTP_USERNAME=it_doesnt_matter -SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet - -# NOTE: STORAGE_ENDPOINT is needed by director-v0 +# STORAGE ---- STORAGE_ENDPOINT=storage:8080 STORAGE_HOST=storage +STORAGE_LOGLEVEL=INFO STORAGE_PORT=8080 +STORAGE_PROFILING=1 +STORAGE_TRACING=null +# STORAGE ---- SWARM_STACK_NAME=master-simcore -TRACING_ENABLED=1 -TRACING_ZIPKIN_ENDPOINT=http://jaeger:9411 -TRACING_THRIFT_COMPACT_ENDPOINT=http://jaeger:5775 +## VENDOR DEVELOPMENT SERVICES --- +VENDOR_DEV_MANUAL_IMAGE=containous/whoami +VENDOR_DEV_MANUAL_REPLICAS=1 +VENDOR_DEV_MANUAL_SUBDOMAIN=manual -TRAEFIK_SIMCORE_ZONE=internal_simcore_stack +## VENDOR DEVELOPMENT SERVICES --- + +WB_API_WEBSERVER_HOST=wb-api-server +WB_API_WEBSERVER_PORT=8080 + +WB_GC_ACTIVITY=null +WB_GC_ANNOUNCEMENTS=0 +WB_GC_CATALOG=null +WB_GC_DB_LISTENER=0 +WB_GC_DIAGNOSTICS=null +WB_GC_EMAIL=null +WB_GC_EXPORTER=null +WB_GC_FOLDERS=0 +WB_GC_FRONTEND=null +WB_GC_GARBAGE_COLLECTOR='{"GARBAGE_COLLECTOR_INTERVAL_S": 30}' +WB_GC_GROUPS=0 +WB_GC_INVITATIONS=null +WB_GC_LOGIN=null +WB_GC_LOGLEVEL=INFO +WB_GC_NOTIFICATIONS=0 +WB_GC_PAYMENTS=null +WB_GC_PRODUCTS=0 +WB_GC_PROJECTS=null +WB_GC_PUBLICATIONS=0 +WB_GC_RESOURCE_MANAGER_RESOURCE_TTL_S=60 +WB_GC_REST_SWAGGER_API_DOC_ENABLED=0 +WB_GC_SCICRUNCH=null +WB_GC_SOCKETIO=1 +WB_GC_STATICWEB=null +WB_GC_STUDIES_DISPATCHER=null +WB_GC_TAGS=0 +WB_GC_TRACING=null +WB_GC_USERS={} +WB_GC_WALLETS=0 -# NOTE: WEBSERVER_SESSION_SECRET_KEY = $(python3 -c "from cryptography.fernet import Fernet; print(Fernet.generate_key())") +WB_DB_EL_ACTIVITY=null +WB_DB_EL_ANNOUNCEMENTS=0 +WB_DB_EL_CATALOG=null +WB_DB_EL_DB_LISTENER=1 +WB_DB_EL_DIAGNOSTICS=null +WB_DB_EL_EMAIL=null +WB_DB_EL_EXPORTER=null +WB_DB_EL_FOLDERS=0 +WB_DB_EL_FRONTEND=null +WB_DB_EL_GARBAGE_COLLECTOR=null +WB_DB_EL_GROUPS=0 +WB_DB_EL_INVITATIONS=null +WB_DB_EL_LOGIN=null +WB_DB_EL_LOGLEVEL=INFO +WB_DB_EL_NOTIFICATIONS=0 +WB_DB_EL_PAYMENTS=null +WB_DB_EL_PRODUCTS=0 +WB_DB_EL_PROJECTS=null +WB_DB_EL_PUBLICATIONS=0 +WB_DB_EL_REST_SWAGGER_API_DOC_ENABLED=0 +WB_DB_EL_SCICRUNCH=null +WB_DB_EL_SOCKETIO=1 +WB_DB_EL_STATICWEB=null +WB_DB_EL_STORAGE=null +WB_DB_EL_STUDIES_DISPATCHER=null +WB_DB_EL_TAGS=0 +WB_DB_EL_TRACING=null +WB_DB_EL_USERS={} +WB_DB_EL_WALLETS=0 + +# WEBSERVER ---- +AIODEBUG_SLOW_DURATION_SECS=0 +DIAGNOSTICS_HEALTHCHECK_ENABLED=False +DIAGNOSTICS_MAX_AVG_LATENCY=10 +DIAGNOSTICS_MAX_TASK_DELAY=30 +DIAGNOSTICS_SLOW_DURATION_SECS=1 +LOGIN_2FA_CODE_EXPIRATION_SEC=60 +LOGIN_2FA_REQUIRED=0 +LOGIN_ACCOUNT_DELETION_RETENTION_DAYS=31 +LOGIN_REGISTRATION_CONFIRMATION_REQUIRED=0 +LOGIN_REGISTRATION_INVITATION_REQUIRED=0 +PROJECTS_INACTIVITY_INTERVAL=00:00:20 PROJECTS_MAX_COPY_SIZE_BYTES=30Gib PROJECTS_MAX_NUM_RUNNING_DYNAMIC_NODES=5 +REST_SWAGGER_API_DOC_ENABLED=1 +SCICRUNCH_API_BASE_URL=https://scicrunch.org/api/1 +SCICRUNCH_API_KEY=REPLACE_ME_with_valid_api_key +SESSION_COOKIE_HTTPONLY=True +SESSION_COOKIE_MAX_AGE=null +SESSION_COOKIE_SAMESITE=null +SESSION_COOKIE_SECURE=False +SIMCORE_VCS_RELEASE_TAG=latest +STUDIES_ACCESS_ANONYMOUS_ALLOWED=0 +STUDIES_DEFAULT_SERVICE_THUMBNAIL=https://via.placeholder.com/170x120.png +TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE=2 +TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT=http://opentelemetry-collector +TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT=http://jaeger:4318 +TRACING_OPENTELEMETRY_COLLECTOR_PORT=4318 +TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE=100 +TRAEFIK_SIMCORE_ZONE=internal_simcore_stack +TRASH_RETENTION_DAYS=7 +TWILIO_ACCOUNT_SID=DUMMY +TWILIO_AUTH_TOKEN=DUMMY +TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT=["41"] +WEBSERVER_ACTIVITY=null +WEBSERVER_ANNOUNCEMENTS=1 +WEBSERVER_CATALOG={} +WEBSERVER_CREDIT_COMPUTATION_ENABLED=1 +WEBSERVER_DB_LISTENER=0 WEBSERVER_DEV_FEATURES_ENABLED=0 +WEBSERVER_DIAGNOSTICS={} +WEBSERVER_EMAIL={} +WEBSERVER_EXPORTER={} +WEBSERVER_FOLDERS=1 +WEBSERVER_FRONTEND={} +WEBSERVER_FUNCTIONS=1 +WEBSERVER_GARBAGE_COLLECTOR=null +WEBSERVER_GROUPS=1 +WEBSERVER_GUNICORN_CMD_ARGS=--timeout=180 WEBSERVER_HOST=webserver -WEBSERVER_LOGIN_REGISTRATION_CONFIRMATION_REQUIRED=0 -WEBSERVER_LOGIN_REGISTRATION_INVITATION_REQUIRED=0 +WEBSERVER_LOGIN={} +WEBSERVER_LOGLEVEL=INFO +WEBSERVER_NOTIFICATIONS=1 +WEBSERVER_PAYMENTS={} +WEBSERVER_PORT=8080 +WEBSERVER_PRODUCTS=1 +WEBSERVER_PROFILING=1 +WEBSERVER_PROJECTS={} WEBSERVER_PROMETHEUS_API_VERSION=v1 -WEBSERVER_PROMETHEUS_HOST=http://prometheus -WEBSERVER_PROMETHEUS_PORT=9090 -WEBSERVER_RESOURCES_DELETION_TIMEOUT_SECONDS=900 +WEBSERVER_PROMETHEUS_URL=http://prometheus:9090 +WEBSERVER_PUBLICATIONS=1 +WEBSERVER_SCICRUNCH={} WEBSERVER_SESSION_SECRET_KEY='REPLACE_ME_with_result__Fernet_generate_key=' -WEBSERVER_STUDIES_ACCESS_ENABLED=0 -# for debugging -# PYTHONTRACEMALLOC=1 -# PYTHONASYNCIODEBUG=1 -# AIODEBUG_SLOW_DURATION_SECS=0.25 +WEBSERVER_SOCKETIO=1 +WEBSERVER_STATICWEB={} +WEBSERVER_STUDIES_DISPATCHER={} +WEBSERVER_TAGS=1 +WEBSERVER_TRACING=null +WEBSERVER_USERS={} diff --git a/.env-wb-garbage-collector b/.env-wb-garbage-collector deleted file mode 100644 index b4cbe1c4963..00000000000 --- a/.env-wb-garbage-collector +++ /dev/null @@ -1,37 +0,0 @@ -# -# Explicit plugins DISABLED in the webserver to create a garbage-collector -# Docs plugins config of services/web/server/src/simcore_service_webserver/application_settings.py -# - - -WEBSERVER_ACTIVITY=null -WEBSERVER_CATALOG=null -WEBSERVER_COMPUTATION=0 -WEBSERVER_DIAGNOSTICS=null -#WEBSERVER_DIRECTOR_V2 from .env -WEBSERVER_DIRECTOR=null -WEBSERVER_EMAIL=null -WEBSERVER_EXPORTER=null -WEBSERVER_FRONTEND=null -#WEBSERVER_GARBAGE_COLLECTOR explicit in -WEBSERVER_LOGIN=null -WEBSERVER_PROJECTS=null -#WEBSERVER_REDIS from .env -#WEBSERVER_REST needed for the healthcheck -#WEBSERVER_RESOURCE_MANAGER from .env -WEBSERVER_SCICRUNCH=null -WEBSERVER_STATICWEB=null -# WEBSERVER_STORAGE needed to delete data when removing anonymous service -WEBSERVER_STUDIES_DISPATCHER=null -WEBSERVER_TRACING=null -# -------- -WEBSERVER_CLUSTERS=0 -WEBSERVER_GROUPS=0 -WEBSERVER_META_MODELING=0 -WEBSERVER_PRODUCTS=0 -WEBSERVER_PUBLICATIONS=0 -WEBSERVER_SOCKETIO=0 -WEBSERVER_STUDIES_DISPATCHER=null -WEBSERVER_TAGS=0 -WEBSERVER_USERS=0 -WEBSERVER_VERSION_CONTROL=0 diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index 1164f0cae98..00000000000 --- a/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -services/web/client/source/resource/ -services/web/client/contrib/ -services/web/client/source-output/ -services/*node_modules/ -services/dy-modeling/client/source/resource/ -services/dy-modeling/client/source-output/ -services/dy-modeling/server/source/thrift/ diff --git a/.eslintrc.json b/.eslintrc.json index ba7959d66ee..3cb5aa6f6b4 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -23,7 +23,7 @@ "block-scoped-var": "warn", "brace-style": [ "warn", - "stroustrup" + "1tbs" ], "indent": [ "warn", @@ -42,7 +42,7 @@ }, "ObjectPattern": { "multiline": true, - "minProperties": 1 + "minProperties": 3 } } ], diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000000..839d1b1fcfb --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore=E501,W503 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d1f523a985f..d8684350361 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,42 +4,49 @@ # files and folders recursively -.codeclimate.yml @sanderegg @pcrespov -.env-* @pcrespov @Surfict @mrnicegyu11 -.travis.yml @sanderegg -Makefile @pcrespov @sanderegg +.env-* @mrnicegyu11 @YuryHrytsuk +Makefile @pcrespov @sanderegg # NOTE: '/' denotes the root of the repository -/.github/ @sanderegg @pcrespov -/api/ @sanderegg @pcrespov -/ci/ @sanderegg @pcrespov -/docs/ @pcrespov -/packages/models-library/ @sanderegg @pcrespov -/packages/pytest-simcore/ @pcrespov @sanderegg -/packages/service-integration/ @pcrespov @sanderegg @GitHK -/packages/service-library/ @pcrespov -/packages/settings-library/ @pcrespov @sanderegg -/requirements/ @pcrespov -/scripts/demo/ @odeimaiz @pcrespov -/scripts/json-schema-to-openapi-schema @sanderegg -/scripts/template-projects/ @odeimaiz @pcrespov -/services/agent @GitHK -/services/api-server/ @pcrespov -/services/autoscaling/ @Surfict @sanderegg @pcrespov -/services/catalog/ @pcrespov @sanderegg -/services/datcore-adapter/ @sanderegg -/services/director*/ @sanderegg @pcrespov @GitHK -/services/docker-compose*.yml @sanderegg @Surfict @mrnicegyu11 -/services/dynamic-sidecar/ @GitHK -/services/invitations/ @pcrespov -/services/migration/ @pcrespov -/services/static-webserver @GitHK -/services/static-webserver/client/ @odeimaiz -/services/storage/ @sanderegg @pcrespov -/services/web/server/ @pcrespov @sanderegg @GitHK -/tests/environment-setup/ @pcrespov -/tests/performance/ @pcrespov -/tests/public-api/ @pcrespov -requirements/* @pcrespov -tools/* @pcrespov +/.github/ @sanderegg @pcrespov +/api/ @sanderegg @pcrespov @matusdrobuliak66 +/ci/ @sanderegg @pcrespov +/docs/ @pcrespov +/packages/common-library/ @giancarloromeo +/packages/models-library/ @sanderegg @pcrespov @matusdrobuliak66 @giancarloromeo +/packages/postgres-database/ @matusdrobuliak66 +/packages/pytest-simcore/ @pcrespov @sanderegg +/packages/service-integration/ @pcrespov @sanderegg @GitHK +/packages/service-library/ @pcrespov +/packages/settings-library/ @pcrespov @sanderegg +/requirements/ @pcrespov @matusdrobuliak66 +/services/agent/ @GitHK +/services/api-server/ @pcrespov +/services/api-server/tests/unit/pact_broker/ @matusdrobuliak66 +/services/autoscaling/ @sanderegg +/services/catalog/ @pcrespov @sanderegg +/services/clusters-keeper/ @sanderegg +/services/datcore-adapter/ @sanderegg +/services/director*/ @sanderegg @pcrespov @GitHK +/services/docker-compose.yml @sanderegg @mrnicegyu11 @YuryHrytsuk +/services/docker-compose.*.yml @sanderegg +/services/dynamic-sidecar/ @GitHK +/services/efs-guardian/ @matusdrobuliak66 +/services/invitations/ @pcrespov +/services/migration/ @pcrespov +/services/notifications/ @GitHK +/services/payments/ @pcrespov @matusdrobuliak66 +/services/resource-usage-tracker/ @matusdrobuliak66 +/services/static-webserver/ @GitHK +/services/static-webserver/client/ @odeimaiz +/services/storage/ @sanderegg +/services/storage/modules/celery @giancarloromeo +/services/web/server/ @pcrespov @sanderegg @GitHK @matusdrobuliak66 +/tests/e2e-frontend/ @odeimaiz +/tests/e2e-playwright/ @matusdrobuliak66 +/tests/environment-setup/ @pcrespov +/tests/performance/ @pcrespov @sanderegg +/tests/public-api/ @pcrespov +requirements/* @pcrespov +tools/* @pcrespov diff --git a/.github/ISSUE_TEMPLATE/1_bug_report.yml b/.github/ISSUE_TEMPLATE/1_bug_report.yml index a05e7c144e3..a0af793a09d 100644 --- a/.github/ISSUE_TEMPLATE/1_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/1_bug_report.yml @@ -1,6 +1,6 @@ name: πŸ› Bug description: File a bug/issue -labels: [bug, "t:bug"] +labels: ["bug", "t:bug"] assignees: ["pcrespov"] body: - type: checkboxes diff --git a/.github/ISSUE_TEMPLATE/4_pre_release.yml b/.github/ISSUE_TEMPLATE/4_pre_release.yml index 39ceacf87c0..1502fd0b34f 100644 --- a/.github/ISSUE_TEMPLATE/4_pre_release.yml +++ b/.github/ISSUE_TEMPLATE/4_pre_release.yml @@ -2,7 +2,7 @@ name: πŸš€ Pre-release to staging (developers-only) description: Issue to plan and log pre-release from master to staging deploy (including staging hotfixes) title: "πŸš€ Pre-release master -> staging_" labels: ["t:maintenance", "release"] -assignees: ["pcrespov"] +assignees: ["matusdrobuliak66"] body: - type: dropdown id: prerelease_kind @@ -39,6 +39,14 @@ body: placeholder: 7d9dcc313f9ced0bd1e6508363148841683b6d7c validations: required: true + - type: input + id: pre_release_date + attributes: + label: Planned date + description: Projected release date for this upcoming version + placeholder: ex. Friday, April 23, 1976 + validations: + required: true - type: checkboxes attributes: label: Did the commit CI suceeded? @@ -64,7 +72,8 @@ body: attributes: label: What Changed description: | - List of commit messages in this release. Add a list of links to the corresponding PRs. This way we can trace the release of every single PR. TIP: use the autogenerate changelog feature during the draft release. + List of commit messages in this release. Add a list of links to the corresponding PRs. This way we can trace the release of every single PR. TIP: use the autogenerate changelog feature during the draft release + (TIP: use regex replace for better visualization ``\*.+ by (@[\w-]+) in (https:.*)`` -> ``* $2 by $1``). validations: required: false - type: textarea @@ -76,7 +85,7 @@ body: Log any issues since this procedure should be taken also as an exercise in preparation for the release to production as well. value: | - [ ] Add changes (if any) and release tags in https://github.com/ITISFoundation/osparc-ops-environments/tags - - [ ] Add changes (if any) and release tags in osparc ops configurations + - [ ] Add changes (if any) and release tags in **osparc-ops-deployment-configuration** (staging.osparc.io & osparc-staging.speag.com) - [ ] ... validations: required: false @@ -105,10 +114,34 @@ body: - Fill up value: | - [ ] `` make release-staging name= version= git_sha=`` + - `https://github.com/ITISFoundation/osparc-simcore/releases/new?prerelease=1&target=&tag=staging_&title=Staging%20` - [ ] Draft [pre-release](https://github.com/ITISFoundation/osparc-simcore/releases) - - [ ] Announce + - [ ] Announce (add redis key ```maintenance``` in every concerned deployment) ```json {"start": "2023-02-01T12:30:00.000Z", "end": "2023-02-01T13:00:00.000Z", "reason": "Release ResistanceIsFutile9 "} + ``` + - [ ] Announce release in Mattermost + ``` + :loud_sound: Β Maintenance scheduled forΒ **NAMED_DAY DD. MM from START_TIME - END_TIME**. + ========================================================================= + + @all Be aware that you will automatically be logged out and your projects stopped and saved during the maintenance time. Affected: + * [https://staging.osparc.io](https://staging.osparc.io/) + * [https://https://staging.s4l-lite.io/](https://https://staging.s4l-lite.io//) + + and on premises: + * [https://osparc-staging.speag.com](https://osparc-staging.speag.com/) + * [https://tip-staging.speag.com](https://tip-staging.speag.com/) + * [https://s4l-staging.speag.com](https://s4l-staging.speag.com/) + * [https://s4l-lite-staging.speag.com](https://s4l-lite-staging.speag.com/) + + + Reason: Scheduled staging-release of STAGING_NAME_AND_VERSION. + + Thanks for your understanding and sorry for the inconveniences, + + Your friendly oSparc Team + ``` - type: textarea attributes: @@ -116,6 +149,7 @@ body: value: | - [ ] Release (release draft) - [ ] Check Release CI + - [ ] Check hanging sidecars. Helper command to run in director-v2 CLI `simcore-service-director-v2 close-and-save-service ` - [ ] Check deployed - [ ] aws deploy - [ ] dalco deploy diff --git a/.github/ISSUE_TEMPLATE/5_release.yml b/.github/ISSUE_TEMPLATE/5_release.yml index 7336c4de752..bcc15b5be7b 100644 --- a/.github/ISSUE_TEMPLATE/5_release.yml +++ b/.github/ISSUE_TEMPLATE/5_release.yml @@ -2,7 +2,7 @@ name: πŸš€ Release to production (developers-only) description: Creates an issue to plan and log the release from staging to production title: "πŸš€ Release v" labels: ["t:maintenance", "release"] -assignees: ["pcrespov"] +assignees: ["matusdrobuliak66"] body: - type: input id: version @@ -17,7 +17,7 @@ body: attributes: label: Commit SHA description: | - Selets the commit from which the release takes placeholder. Check [commits](https://github.com/ITISFoundation/osparc-simcore/commits/master) + Selects the commit from which the release takes placeholder. Check [commits](https://github.com/ITISFoundation/osparc-simcore/commits/master) IMPORTANT: make sure t (i.e. tests passed and images were built and pushed) placeholder: 7d9dcc313f9ced0bd1e6508363148841683b6d7c validations: @@ -27,10 +27,18 @@ body: attributes: label: "Previous pre-release" description: | - Link to pre-release to staging tha preceeds this release + Link to pre-release to staging that preceeds this release value: https://github.com/ITISFoundation/osparc-simcore/releases/tag/staging_ validations: required: true + - type: input + id: release_date + attributes: + label: Planned date + description: Projected release date for this upcoming version + placeholder: ex. Friday, April 23, 1976 + validations: + required: true - type: checkboxes attributes: label: Did the commit CI suceeded? @@ -58,7 +66,7 @@ body: description: | Changes introduced by this release. List links to the PRs associated to every commit in the release. This will automatically create references between PRs and this release that will be very valuable for traceability. - TIP: use the autogenerate changelog feature during the draft release. + TIP: use the autogenerate changelog feature during the draft release. (TIP: use regex replace for better visualization ``\*.+ by (@[\w-]+) in (https:.*)`` -> ``* $2 by $1``) validations: required: false - type: textarea @@ -70,7 +78,7 @@ body: Log below completed steps and issues value: | - [ ] Add changes (if any) and release tags in https://github.com/ITISFoundation/osparc-ops-environments/tags - - [ ] Add changes (if any) and release tags in osparc ops configurations + - [ ] Add changes (if any) and release tags in **osparc-ops-deployment-configuration** (osparc.io & osparc.speag.com & tip.itis.swiss) - [ ] ... validations: required: false @@ -119,30 +127,38 @@ body: ``` - [ ] Draft [release changelog](https://github.com/ITISFoundation/osparc-simcore/releases) - [ ] Announce maintenance ( ** ANNOUNCE AT LEAST 24 HOURS BEFORE ** ) - - redis ``{"start": "2023-03-06T13:00:00.000Z", "end": "2023-03-06T15:00:00.000Z", "reason": "Release "}`` - - [ ] aws - - [ ] dalco - - [ ] tip - - status page - - [ ] osparc - - [ ] s4l + - redis add an entry in db `scheduled_maintenance` with key `maintenance` ``{"start": "2023-03-06T13:00:00.000Z", "end": "2023-03-06T15:00:00.000Z", "reason": "Release "}``. Note the time is in UTC, please adjust accordingly + - [ ] sim4life.io + - [ ] osparc.speag.com + - [ ] osparc.io + - [ ] tip.science + - status page (https://manage.statuspage.io/) + - [ ] sim4life.io + - [ ] s4l-lite.io + - [ ] osparc.io + - [ ] tip.science - mattermost channels - [ ] maintenance - [ ] power users + - [ ] Publish draft [draft](https://github.com/ITISFoundation/osparc-simcore/releases) + - [ ] Check release CI - type: textarea attributes: label: Releasing πŸš€ value: | - - [ ] Maintenance page up. - - [ ] Release by publishing [draft](https://github.com/ITISFoundation/osparc-simcore/releases) - - [ ] Check release CI + - [ ] Maintenance page up (https://git.speag.com/oSparc/osparc-ops-deployment-configuration/-/pipeline_schedules) + - [ ] Check hanging sidecars. Helper command to run in director-v2 CLI `simcore-service-director-v2 close-and-save-service ` + - [ ] Remove redis key - [ ] Check deployed - - [ ] aws deploy - - [ ] dalco deploy - - [ ] tip deploy - - [ ] Delete announcement + - [ ] sim4life.io + - [ ] osparc.speag.com + - [ ] osparc.io + - [ ] tip.science + - [ ] Check testing endpoint ex. `https://testing.osparc.speag.com/` + - [ ] Update status page + - [ ] Update Release notes if hotfix (https://github.com/ITISFoundation/osparc-issues/blob/master/scripts/run_after_hotfix_to_prod.py) - [ ] Check e2e runs - - [ ] Announce + - [ ] Announce in mattermost ``` md :tada: https://github.com/ITISFoundation/osparc-simcore/releases/tag/v ``` diff --git a/.github/ISSUE_TEMPLATE/6_hotfix.yml b/.github/ISSUE_TEMPLATE/6_hotfix.yml new file mode 100644 index 00000000000..49c0a02ed72 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/6_hotfix.yml @@ -0,0 +1,110 @@ +name: πŸš‘οΈ Hotfix (developers-only) +description: Critical hotfix to staging or production +title: "πŸš‘οΈ Release hotfix/v (hotfix)" +labels: ["t:maintenance", "release"] +assignees: ["matusdrobuliak66"] +body: + - type: input + id: version + attributes: + label: Release version + description: Release version as major.minor.patch .Check [Releases](https://github.com/ITISFoundation/osparc-simcore/releases) + placeholder: e.g. 1.51.1 + validations: + required: true + - type: input + id: branch_commit_sha + attributes: + label: Branching Commit SHA + description: | + Select the commit from which the hot-fix branch needs to emerge . Check [commits](https://github.com/ITISFoundation/osparc-simcore/commits/master) + IMPORTANT: make sure t (i.e. tests passed and images were built and pushed) + placeholder: e.g. `8d9a27ebb3e64956e6a41f31839748b3f6a27074` + validations: + required: true + - type: input + id: previous_release + attributes: + label: "Previous release" + description: | + Link to release that preceeds this release + value: https://github.com/ITISFoundation/osparc-simcore/releases/tag/v + validations: + required: true + - type: input + id: hotfix_date + attributes: + label: Planned date + description: Projected release date for this upcoming version + placeholder: ex. Friday, April 23, 1976 + validations: + required: true + - type: textarea + attributes: + label: Motivation + description: | + What is the motivation to release hotfix to stagging/production? + - Explain what motivates this release? + - Which important changes we might pay attention to? + - How should we test them? + - Is there anything in particular we should monitor? + validations: + required: true + - type: textarea + attributes: + label: Cherry Picks + description: | + List of changes to cherry-pick into the hotfix branch. + List links to the PRs associated to every commit in the release. This will automatically create references between PRs and this release that will be very valuable for traceability. + TIP: use the autogenerate changelog feature during the draft release. + validations: + required: false + - type: textarea + attributes: + label: "πŸš‘οΈ Hotfix branch" + value: | + - [ ] create hotfix branch [``hotfix_v1_51_x``](https://github.com/ITISFoundation/osparc-simcore/tree/hotfix_v1_51_x) directly in *upstream* repo. + - WARNING: never push this branch BEFORE the actual release is completed + - **Do not delete hotfix branches** at least until next *full* release takes place. + - See more [details](https://github.com/ITISFoundation/osparc-simcore/blob/master/docs/releasing-workflow-instructions.md#hotfix-release-hotfix-branch---production) + - [ ] prepare hotfix by applying patches/cherry-picks (see *what changed* section) + - [ ] test hotfix in local deployment + - [ ] push and [check whether CI passed](https://github.com/ITISFoundation/osparc-simcore/actions) + - [ ] check [images in dockerhub](https://registry.hub.docker.com/u/itisfoundation) + validations: + required: false + - type: input + id: hotfix_commit_sha + attributes: + label: Hotfix Commit SHA + description: | + Selects the the head commit of the hotfix branch + IMPORTANT: make sure t (i.e. tests passed and images were built and pushed) + placeholder: e.g. `d1f2e9ed-3b82-424e-8afa-17940614f042` + validations: + required: false + - type: textarea + attributes: + label: Changes (for changelog) + description: | + List of changes in the hotfix branch + validations: + required: false + - type: textarea + attributes: + label: Releasing πŸš€ + value: | + - ``make release-hotfix version= git_sha=`` + - [ ] Draft [release notes](https://github.com/ITISFoundation/osparc-simcore/releases) πŸš€ + - [ ] No need for maintenance announcement?: Target update of ```` + - [ ] Deploy hot-fix. + - [ ] [release](https://github.com/ITISFoundation/osparc-simcore/releases) πŸš€ !!!! + - [ ] aws-prod (updated target service) + - [ ] dalco-prod (updated target service) + - [ ] ti (updated target service) + - [ ] Test and Monitor. Revert back if needed! + - [ ] Update Release notes (https://github.com/ITISFoundation/osparc-issues/blob/master/scripts/run_after_hotfix_to_prod.py) + - [ ] Announce hot-fix + ``` md + :tada: https://github.com/ITISFoundation/osparc-simcore/releases/tag/v + ``` diff --git a/.github/ISSUE_TEMPLATE/7_regular_maintenance.yml b/.github/ISSUE_TEMPLATE/7_regular_maintenance.yml new file mode 100644 index 00000000000..a6d821cdca1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/7_regular_maintenance.yml @@ -0,0 +1,28 @@ +name: ⬆️ Regular maintenance (developers-only) +description: Creates an issue to plan a regular maintenance each sprint +title: " ⬆️ Regular maintenance " +labels: ["t:maintenance"] +assignees: ["matusdrobuliak66"] +body: + - type: input + id: sprint + attributes: + label: Sprint name + placeholder: e.g. JellyBeans + validations: + required: true + - type: textarea + attributes: + label: ⬆️ Requirements + description: | + Upgrade of Requirements + value: | + - Update of test & tools dependencies repository-wise + - [ ] ``make reqs`` + - Update of most important libraries repository-wise make ``reqs-all upgrade=foo==1.2.3`` + - [ ] fastapi + - [ ] pydantic + - [ ] aio-pika + - [ ] aiohttp + - [ ] redis + - [ ] sqlalchemy diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 35ce2ed8608..08ea869c9b6 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,33 +3,42 @@ WIP: work in progress πŸ› Fix a bug. ✨ Introduce new features. - ♻️ Refactor code. + 🎨 Enhance existing feature. + ♻️ Refactor code. πŸš‘οΈ Critical hotfix. - βš—οΈ Perform experiments. - ⬆️ Upgrade dependencies. + βš—οΈ Perform experiments. + ⬆️ Upgrade dependencies. πŸ“ Add or update documentation. πŸ”¨ Add or update development scripts. + βœ… Add, update or pass tests. πŸ”’οΈ Fix security issues. - ⚠️ Changes in devops configuration - πŸ—ƒοΈ Migration of database + ⚠️ Changes in ops configuration etc. are required before deploying. + [ Please add a link to the associated ops-issue or PR, such as in https://github.com/ITISFoundation/osparc-ops-environments or https://git.speag.com/oSparc/osparc-infra ] + πŸ—ƒοΈ Database table changed (relevant for devops). + πŸ‘½οΈ Public API changes (meaning: dev features are moved to being exposed in production) + 🚨 Do manual testing when deployed or from https://gitmoji.dev/ --> ## What do these changes do? + ## Related issue/s - - + ## How to test + +## Dev-ops + + diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml index ace67457635..adac3b13795 100644 --- a/.github/codeql/codeql-config.yml +++ b/.github/codeql/codeql-config.yml @@ -3,6 +3,7 @@ name: "ospac-simcore CodeQL config" disable-default-queries: false paths: + - packages/aws-library/src - packages/dask-task-models-library/src - packages/models-library/src/models_library - packages/postgres-database/src/simcore_postgres_database diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000000..a5bb20c31e4 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,55 @@ +# GitHub Copilot Instructions + +This document provides guidelines and best practices for using GitHub Copilot in the `osparc-simcore` repository and other Python and Node.js projects. + +## General Guidelines + +1. **Use Python 3.11**: Ensure that all Python-related suggestions align with Python 3.11 features and syntax. +2. **Node.js Compatibility**: For Node.js projects, ensure compatibility with the version specified in the project (e.g., Node.js 14 or later). +3. **Follow Coding Conventions**: Adhere to the coding conventions outlined in the `docs/coding-conventions.md` file. +4. **Test-Driven Development**: Write unit tests for all new functions and features. Use `pytest` for Python and appropriate testing frameworks for Node.js. +5. **Environment Variables**: Use environment variables as specified in `docs/env-vars.md` for configuration. Avoid hardcoding sensitive information. +6. **Documentation**: Prefer self-explanatory code; add documentation only if explicitly requested by the developer. + +## Python-Specific Instructions + +- Always use type hints and annotations to improve code clarity and compatibility with tools like `mypy`. + - An exception to that rule is in `test_*` functions return type hint must not be added +- Follow the dependency management practices outlined in `requirements/`. +- Use `ruff` for code formatting and for linting. +- Use `black` for code formatting and `pylint` for linting. +- ensure we use `sqlalchemy` >2 compatible code. +- ensure we use `pydantic` >2 compatible code. +- ensure we use `fastapi` >0.100 compatible code +- use f-string formatting +- Only add comments in function if strictly necessary +- use relative imports +- imports should be at top of the file + + +### Json serialization + +- Generally use `json_dumps`/`json_loads` from `common_library.json_serialization` to built-in `json.dumps` / `json.loads`. +- Prefer Pydantic model methods (e.g., `model.model_dump_json()`) for serialization. + + +## Node.js-Specific Instructions + +- Use ES6+ syntax and features. +- Follow the `package.json` configuration for dependencies and scripts. +- Use `eslint` for linting and `prettier` for code formatting. +- Write modular and reusable code, adhering to the project's structure. + +## Copilot Usage Tips + +1. **Be Specific**: Provide clear and detailed prompts to Copilot for better suggestions. +2. **Iterate**: Review and refine Copilot's suggestions to ensure they meet project standards. +3. **Split Tasks**: Break down complex tasks into smaller, manageable parts for better suggestions. +4. **Test Suggestions**: Always test Copilot-generated code to ensure it works as expected. + +## Additional Resources + +- [Python Coding Conventions](../docs/coding-conventions.md) +- [Environment Variables Guide](../docs/env-vars.md) +- [Steps to Upgrade Python](../docs/steps-to-upgrade-python.md) +- [Node.js Installation Script](../scripts/install_nodejs_14.bash) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index df1b58c0af3..1eb0bbe45fd 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,10 +11,10 @@ updates: - sanderegg assignees: - pcrespov + - sanderegg + labels: + - t:maintenance ignore: - - dependency-name: aiozipkin - versions: - - ">= 1.a, < 2" - dependency-name: docker-compose versions: - 1.28.2 @@ -29,38 +29,10 @@ updates: - dependency-name: httpx versions: - 0.17.0 - - dependency-name: minio - versions: - - 7.0.0 - - package-ecosystem: pip - directory: "/packages/service-library" - schedule: - interval: weekly - time: "04:00" - open-pull-requests-limit: 10 - reviewers: - - pcrespov - - sanderegg - assignees: - - pcrespov - ignore: - - dependency-name: aiozipkin - versions: - - ">= 1.a, < 2" - dependency-name: openapi-core versions: - "> 0.12.0, < 1" - - package-ecosystem: pip - directory: "/packages/postgres-database" - schedule: - interval: weekly - time: "04:00" - open-pull-requests-limit: 10 - reviewers: - - pcrespov - - sanderegg - assignees: - - pcrespov + - package-ecosystem: "github-actions" directory: "/" schedule: @@ -71,3 +43,5 @@ updates: - pcrespov assignees: - sanderegg + labels: + - t:maintenance diff --git a/.github/mergify.yml b/.github/mergify.yml new file mode 100644 index 00000000000..8e116deb45a --- /dev/null +++ b/.github/mergify.yml @@ -0,0 +1,14 @@ +queue_rules: + - name: default + queue_conditions: # condtions to be met to add the PR to the queue (manually or automatically) + # general prerequisits fo accept the PR in the queue + - label=πŸ€–-automerge # let Mergify know that the PR can be merged (added manually) + - label!=πŸ€–-do-not-merge # block Mergify from merging the PR (added manually) + - base=master + - -draft # PR is not in draft state + - -conflict # No merge conflicts + + # Check for required reviews + - "#approved-reviews-by>=2" # Requires 2 approving reviews + - "#changes-requested-reviews-by=0" # No changes requested + - "#review-threads-unresolved=0" # All review threads resolved diff --git a/.github/workflows/_reusable-build-images.yml b/.github/workflows/_reusable-build-images.yml new file mode 100644 index 00000000000..f634e1c2d78 --- /dev/null +++ b/.github/workflows/_reusable-build-images.yml @@ -0,0 +1,61 @@ +name: Reusable Build Images + +on: + workflow_call: + inputs: + build-backend: + required: true + type: boolean + description: "Whether backend images should be built" + build-frontend: + required: true + type: boolean + description: "Whether frontend images should be built" + python-version: + required: true + type: string + description: "Python version to use" + os: + required: true + type: string + description: "Runner OS to use" + +jobs: + build-test-images: + timeout-minutes: 30 + runs-on: ${{ inputs.os }} + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: expose github runtime for buildx + uses: crazy-max/ghaction-github-runtime@v3 + - name: show system environs + run: ./ci/helpers/show_system_versions.bash + - name: build backend images + if: ${{ inputs.build-backend }} + run: | + export DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash) + mkdir --parents /${{ runner.temp }}/build + make build local-dest=/${{ runner.temp }}/build exclude=static-webserver + - name: build frontend images + if: ${{ inputs.build-frontend }} + run: | + export DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash) + mkdir --parents /${{ runner.temp }}/build + make build local-dest=/${{ runner.temp }}/build target=static-webserver + - name: upload backend artifacts + if: ${{ inputs.build-backend }} + uses: actions/upload-artifact@v4 + with: + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + - name: upload frontend artifacts + if: ${{ inputs.build-frontend }} + uses: actions/upload-artifact@v4 + with: + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-frontend + path: /${{ runner.temp }}/build diff --git a/.github/workflows/ci-arm-build.yml b/.github/workflows/ci-arm-build.yml new file mode 100644 index 00000000000..ee4b9a194d7 --- /dev/null +++ b/.github/workflows/ci-arm-build.yml @@ -0,0 +1,58 @@ +name: CI ARM64 Build and Push + +on: + push: + branches: + - "master" + tags-ignore: + - "*" + + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-and-push-arm64: + timeout-minutes: 60 # intentionally long to allow for slow builds + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-24.04] + python: ["3.10"] + env: + # secrets can be set in settings/secrets on github + DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }} + steps: + - uses: actions/checkout@v4 + - name: setup QEMU + uses: docker/setup-qemu-action@v3 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: expose github runtime for buildx + uses: crazy-max/ghaction-github-runtime@v3 + - name: show system environs + run: ./ci/helpers/show_system_versions.bash + - name: login to Dockerhub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Set deployment variables + run: | + if [ "${GITHUB_REF}" == "refs/heads/master" ]; then + echo "TAG_PREFIX=master-github" >> $GITHUB_ENV + elif [[ "${GITHUB_REF}" == refs/heads/hotfix_v* ]]; then + echo "TAG_PREFIX=hotfix-github" >> $GITHUB_ENV + elif [[ "${GITHUB_REF}" == refs/heads/hotfix_staging_* ]]; then + echo "TAG_PREFIX=hotfix-staging-github" >> $GITHUB_ENV + fi + - name: build & push images for latest tag + run: | + export DOCKER_IMAGE_TAG="$TAG_PREFIX-latest-arm64" + export DOCKER_TARGET_PLATFORMS=linux/arm64 + make build push=true diff --git a/.github/workflows/ci-multi-architecture-fusing.yml b/.github/workflows/ci-multi-architecture-fusing.yml new file mode 100644 index 00000000000..dadeedf1038 --- /dev/null +++ b/.github/workflows/ci-multi-architecture-fusing.yml @@ -0,0 +1,58 @@ +name: CI Multi-Architecture Fusing + +on: + workflow_run: + workflows: ["CI ARM64 Build and Push", "CI"] + types: + - completed + branches: + - "master" + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + multi-architecture-fusing: + if: ${{ github.event.workflow_run.conclusion == 'success' }} + timeout-minutes: 60 # intentionally long to allow for slow builds + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-24.04] + python: ["3.11"] + env: + # secrets can be set in settings/secrets on github + DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }} + steps: + - uses: actions/checkout@v4 + - name: setup QEMU + uses: docker/setup-qemu-action@v3 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: expose github runtime for buildx + uses: crazy-max/ghaction-github-runtime@v3 + - name: show system environs + run: ./ci/helpers/show_system_versions.bash + - name: login to Dockerhub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Set deployment variables + run: | + if [ "${GITHUB_REF}" == "refs/heads/master" ]; then + echo "TAG_PREFIX=master-github" >> $GITHUB_ENV + elif [[ "${GITHUB_REF}" == refs/heads/hotfix_v* ]]; then + echo "TAG_PREFIX=hotfix-github" >> $GITHUB_ENV + elif [[ "${GITHUB_REF}" == refs/heads/hotfix_staging_* ]]; then + echo "TAG_PREFIX=hotfix-staging-github" >> $GITHUB_ENV + fi + - name: fuse images in the registry for latest tag + run: | + export DOCKER_IMAGE_TAG="$TAG_PREFIX-latest" + make docker-image-fuse SUFFIX=arm64 diff --git a/.github/workflows/ci-pact-master.yml b/.github/workflows/ci-pact-master.yml new file mode 100644 index 00000000000..b6df75fe973 --- /dev/null +++ b/.github/workflows/ci-pact-master.yml @@ -0,0 +1,41 @@ +# This workflow holds jobs which are required to pass before merging into master + +name: CI PACT Master +on: + push: + branches: + - "master" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + pact-tests: + timeout-minutes: 10 + name: "Run PACT tests" + runs-on: ubuntu-latest + env: + # secrets can be set in settings/secrets on github + PACT_BROKER_URL: ${{ secrets.PACT_BROKER_URL }} + PACT_BROKER_USERNAME: ${{ secrets.PACT_BROKER_USERNAME }} + PACT_BROKER_PASSWORD: ${{ secrets.PACT_BROKER_PASSWORD }} + steps: + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + - name: checkout source branch + uses: actions/checkout@v4 + - name: Run pact tests + run: | + make devenv + source .venv/bin/activate + cd services/api-server + make install-ci + make test-pacts diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index b4f018e9efe..e18e43c6cbf 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -21,33 +21,21 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - # ensure the docker_compose_sha corresponds to the the version!! - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false env: TO_TAG_PREFIX: release-github steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: find branch name shell: bash run: echo "BRANCH_NAME=$(git name-rev --refs="refs/remotes/origin/master" --refs="refs/remotes/origin/hotfix_v*" --refs="refs/remotes/origin/hotfix_staging_*" --name-only ${GITHUB_SHA})" >> $GITHUB_ENV - - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 - with: - version: ${{ matrix.docker_buildx }} - - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} + uses: docker/setup-buildx-action@v3 - name: set owner variable run: echo "OWNER=${GITHUB_REPOSITORY%/*}" >> $GITHUB_ENV - name: set git tag diff --git a/.github/workflows/ci-staging.yml b/.github/workflows/ci-staging.yml index 80bb898c7fd..4340eb65078 100644 --- a/.github/workflows/ci-staging.yml +++ b/.github/workflows/ci-staging.yml @@ -3,7 +3,7 @@ name: Github-CI-Staging on: push: tags: - - staging_[a-zA-Z]+[0-9]+ + - staging_[a-zA-Z0-9]+[0-9]+ env: # secrets can be set in settings/secrets on github @@ -21,32 +21,21 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false env: TO_TAG_PREFIX: staging-github steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: find branch name shell: bash run: echo "BRANCH_NAME=$(git name-rev --refs="refs/remotes/origin/master" --refs="refs/remotes/origin/hotfix_v*" --refs="refs/remotes/origin/hotfix_staging_*" --name-only ${GITHUB_SHA})" >> $GITHUB_ENV - - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 - with: - version: ${{ matrix.docker_buildx }} - - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} + uses: docker/setup-buildx-action@v3 - name: set owner variable run: echo "OWNER=${GITHUB_REPOSITORY%/*}" >> $GITHUB_ENV - name: set git tag diff --git a/.github/workflows/ci-testing-deploy.yml b/.github/workflows/ci-testing-deploy.yml index 978f76c9a9a..e248ca73cae 100644 --- a/.github/workflows/ci-testing-deploy.yml +++ b/.github/workflows/ci-testing-deploy.yml @@ -18,23 +18,23 @@ on: pull_request: branches: - "*" + # https://github.blog/changelog/2023-02-08-pull-request-merge-queue-public-beta/ + merge_group: + branches: + - "master" workflow_dispatch: inputs: - tests: - description: tests to run - required: true - default: "all" - type: choice - options: - - unit-tests - - integration-tests - - system-tests - - all + force_all_builds: + description: 'Run all tests and builds' + required: false + type: boolean + default: false env: - DEFAULT_MAX_NANO_CPUS: 10000000 - DEFAULT_MAX_MEMORY: 268435456 + # NOTE: 'COLUMNS' is a shell env var that represents the width (number of columns) + # of the terminal or command-line interface in characters. + COLUMNS: 120 concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -46,26 +46,35 @@ jobs: runs-on: ubuntu-latest # Set job outputs to values from filter step outputs: + aws-library: ${{ steps.filter.outputs.aws-library }} dask-task-models-library: ${{ steps.filter.outputs.dask-task-models-library }} models-library: ${{ steps.filter.outputs.models-library }} + common-library: ${{ steps.filter.outputs.common-library }} + notifications-library: ${{ steps.filter.outputs.notifications-library }} postgres-database: ${{ steps.filter.outputs.postgres-database }} service-integration: ${{ steps.filter.outputs.service-integration }} service-library: ${{ steps.filter.outputs.service-library }} settings-library: ${{ steps.filter.outputs.settings-library }} simcore-sdk: ${{ steps.filter.outputs.simcore-sdk }} agent: ${{ steps.filter.outputs.agent }} + notifications: ${{ steps.filter.outputs.notifications }} api: ${{ steps.filter.outputs.api }} api-server: ${{ steps.filter.outputs.api-server }} autoscaling: ${{ steps.filter.outputs.autoscaling }} catalog: ${{ steps.filter.outputs.catalog }} + clusters-keeper: ${{ steps.filter.outputs.clusters-keeper }} dask-sidecar: ${{ steps.filter.outputs.dask-sidecar }} datcore-adapter: ${{ steps.filter.outputs.datcore-adapter }} director: ${{ steps.filter.outputs.director }} director-v2: ${{ steps.filter.outputs.director-v2 }} dynamic-sidecar: ${{ steps.filter.outputs.dynamic-sidecar }} + efs-guardian: ${{ steps.filter.outputs.efs-guardian }} invitations: ${{ steps.filter.outputs.invitations }} migration: ${{ steps.filter.outputs.migration }} - osparc-gateway-server: ${{ steps.filter.outputs.osparc-gateway-server }} + payments: ${{ steps.filter.outputs.payments }} + dynamic-scheduler: ${{ steps.filter.outputs.dynamic-scheduler }} + docker-api-proxy: ${{ steps.filter.outputs.docker-api-proxy }} + resource-usage-tracker: ${{ steps.filter.outputs.resource-usage-tracker }} static-webserver: ${{ steps.filter.outputs.static-webserver }} storage: ${{ steps.filter.outputs.storage }} webserver: ${{ steps.filter.outputs.webserver }} @@ -73,26 +82,50 @@ jobs: anything-py: ${{ steps.filter.outputs.anything-py }} anything-js: ${{ steps.filter.outputs.anything-js }} steps: - - uses: actions/checkout@v3 - if: ${{ github.event_name == 'push' }} + - uses: actions/checkout@v4 # For pull requests it's not necessary to checkout the code - - uses: dorny/paths-filter@v2 + - uses: dorny/paths-filter@v3 id: filter with: filters: | + aws-library: + - 'packages/aws-library/**' + - 'packages/pytest-simcore/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' dask-task-models-library: - 'packages/dask-task-models-library/**' - 'packages/pytest-simcore/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' models-library: - 'packages/models-library/**' - 'packages/postgres-database/**' - 'packages/pytest-simcore/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + common-library: + - 'packages/common-library/**' + - 'packages/pytest-simcore/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + notifications-library: + - 'packages/notifications-library/**' + - 'packages/postgres-database/**' + - 'packages/pytest-simcore/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' postgres-database: - 'packages/postgres-database/**' - 'packages/pytest-simcore/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' service-integration: - 'packages/models-library/**' - 'packages/pytest-simcore/**' @@ -102,6 +135,8 @@ jobs: - 'packages/pytest-simcore/**' - 'packages/service-library/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' settings-library: - 'packages/pytest-simcore/**' - 'packages/settings-library/**' @@ -113,52 +148,107 @@ jobs: - 'packages/**' - 'services/agent/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + notifications: + - 'packages/**' + - 'services/notifications/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' api: - 'api/**' api-server: - 'packages/**' - 'services/api-server/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' autoscaling: - 'packages/**' - 'services/autoscaling/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' catalog: - 'packages/**' - 'services/catalog/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + clusters-keeper: + - 'packages/**' + - 'services/clusters-keeper/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' dask-sidecar: - 'packages/**' - 'services/dask-sidecar/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' datcore-adapter: - 'packages/**' - 'services/datcore-adapter/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' director: - 'packages/**' - 'services/director/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' director-v2: - 'packages/**' - 'services/director-v2/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' dynamic-sidecar: - 'packages/**' - 'services/dynamic-sidecar/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + efs-guardian: + - 'packages/**' + - 'services/efs-guardian/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' invitations: - 'packages/**' - 'services/invitations/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' migration: - 'packages/**' - 'services/migration/**' - 'services/docker-compose*' - osparc-gateway-server: + payments: + - 'packages/**' + - 'services/payments/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + dynamic-scheduler: + - 'packages/**' + - 'services/dynamic-scheduler/**' + - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' + docker-api-proxy: + - 'packages/**' + - 'services/docker-api-proxy/**' + resource-usage-tracker: - 'packages/**' - - 'services/osparc-gateway-server/**' + - 'services/resource-usage-tracker/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' static-webserver: - 'services/static-webserver/**' - 'services/docker-compose*' @@ -166,6 +256,8 @@ jobs: - 'packages/**' - 'services/storage/**' - 'services/docker-compose*' + - 'scripts/mypy/*' + - 'mypy.ini' webserver: - 'packages/**' - 'services/web/**' @@ -179,251 +271,271 @@ jobs: anything-js: - '**/*.js' build-test-images: - # this step comes first, so that it is executed as first job in push calls - # in PR calls this step is anyway skipped needs: changes - if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' }} - runs-on: ${{ matrix.os }} + if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + type: [backend, frontend] fail-fast: false - name: "[build] docker images" - steps: - - uses: actions/checkout@v3 - - name: setup docker buildx - id: buildx - uses: docker/setup-buildx-action@v2 - with: - version: ${{ matrix.docker_buildx }} - driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: show system environs - run: ./ci/helpers/show_system_versions.bash - - name: build images - run: | - export DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash) - mkdir --parents /${{ runner.temp }}/build - make build local-dest=/${{ runner.temp }}/build - - name: upload build artifacts - uses: actions/upload-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: ./.github/workflows/_reusable-build-images.yml + with: + build-backend: ${{ matrix.type == 'backend' }} + build-frontend: ${{ matrix.type == 'frontend' }} + os: ubuntu-24.04 + python-version: "3.10" unit-test-webserver-01: needs: changes - if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 25 # if this timeout gets too small, then split the tests name: "[unit] webserver 01" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/web/server/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install webserver run: ./ci/github/unit-testing/webserver.bash install + - name: typecheck + run: ./ci/github/unit-testing/webserver.bash typecheck - name: test isolated + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/webserver.bash test_isolated - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/webserver.bash test_with_db 01 - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-webserver-02: needs: changes - if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 25 # if this timeout gets too small, then split the tests name: "[unit] webserver 02" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/web/server/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install webserver run: ./ci/github/unit-testing/webserver.bash install - name: test run: ./ci/github/unit-testing/webserver.bash test_with_db 02 - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-webserver-03: needs: changes - if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 25 # if this timeout gets too small, then split the tests name: "[unit] webserver 03" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/web/server/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install webserver run: ./ci/github/unit-testing/webserver.bash install - name: test run: ./ci/github/unit-testing/webserver.bash test_with_db 03 - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + + unit-test-webserver-04: + needs: changes + if: ${{ needs.changes.outputs.webserver == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 25 # if this timeout gets too small, then split the tests + name: "[unit] webserver 04" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install webserver + run: ./ci/github/unit-testing/webserver.bash install + - name: test + run: ./ci/github/unit-testing/webserver.bash test_with_db 04 + - uses: codecov/codecov-action@v5 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/test-results-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + unit-test-storage: needs: changes - if: ${{ needs.changes.outputs.storage == 'true' || github.event_name == 'push' }} - timeout-minutes: 18 # if this timeout gets too small, then split the tests + if: ${{ needs.changes.outputs.storage == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 25 # if this timeout gets too small, then split the tests name: "[unit] storage" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/storage/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/storage/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/storage.bash install + - name: typecheck + run: ./ci/github/unit-testing/storage.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/storage.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-agent: needs: changes - if: ${{ needs.changes.outputs.agent == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.agent == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] agent" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/agent/requirements/ci.txt" - name: install rclone run: sudo ./ci/github/helpers/install_rclone.bash + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/agent/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -431,125 +543,177 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/agent.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/agent.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + unit-test-notifications: + needs: changes + if: ${{ needs.changes.outputs.notifications == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] notifications" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/notifications/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/notifications.bash install + - name: typecheck + run: ./ci/github/unit-testing/notifications.bash typecheck + - name: test + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/notifications.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional unit-test-api: needs: changes - if: ${{ needs.changes.outputs.api == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.api == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] api" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "api/tests/requirements.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/api/tests/requirements.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install api run: ./ci/github/unit-testing/api.bash install - name: test run: ./ci/github/unit-testing/api.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + unit-test-api-server: needs: changes - if: ${{ needs.changes.outputs.api-server == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.api-server == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] api-server" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/api-server/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/api-server/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/api-server.bash install + - name: typecheck + run: ./ci/github/unit-testing/api-server.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/api-server.bash test - - uses: codecov/codecov-action@v3.1.1 + - name: OAS backwards compatibility check + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/api-server.bash openapi-diff + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-autoscaling: needs: changes - if: ${{ needs.changes.outputs.autoscaling == 'true' || github.event_name == 'push' }} - timeout-minutes: 18 # if this timeout gets too small, then split the tests + if: ${{ needs.changes.outputs.autoscaling == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 22 # temporary: mypy takes a huge amount of time to run here, maybe we should cache it name: "[unit] autoscaling" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/autoscaling/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/autoscaling/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -557,92 +721,151 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/autoscaling.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/autoscaling.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-catalog: needs: changes - if: ${{ needs.changes.outputs.catalog == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.catalog == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] catalog" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/catalog/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/catalog/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/catalog.bash install + - name: typecheck + run: ./ci/github/unit-testing/catalog.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/catalog.bash test - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/catalog/test_failures - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + + unit-test-clusters-keeper: + needs: changes + if: ${{ needs.changes.outputs.clusters-keeper == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] clusters-keeper" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/clusters-keeper/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: | + make devenv + source .venv/bin/activate && \ + pushd services/clusters-keeper && \ + make install-ci + - name: typecheck + run: | + source .venv/bin/activate && \ + uv pip install mypy && \ + pushd services/clusters-keeper && \ + make mypy + - name: test + if: ${{ !cancelled() }} + run: | + source .venv/bin/activate && \ + pushd services/clusters-keeper && \ + make test-ci-unit + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + unit-test-datcore-adapter: needs: changes - if: ${{ needs.changes.outputs.datcore-adapter == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.datcore-adapter == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] datcore-adapter" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/datcore-adapter/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/datcore-adapter/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -650,142 +873,197 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/datcore-adapter.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/datcore-adapter.bash test - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/datcore-adapter/test_failures - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-director: needs: changes - if: ${{ needs.changes.outputs.director == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.director == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] director" runs-on: ${{ matrix.os }} strategy: matrix: - # KEEP 3.6 Development of this service is frozen - # KEEP ubuntu 20.04, else no python 3.6 - python: [3.6] - os: [ubuntu-20.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/director/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/director/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/director.bash install + - name: typecheck + run: ./ci/github/unit-testing/director.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/director.bash test - - uses: codecov/codecov-action@v3.1.1 + - name: upload failed tests logs + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ github.job }}_docker_logs + path: ./services/director/test_failures + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-director-v2: needs: changes - if: ${{ needs.changes.outputs.director-v2 == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.director-v2 == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] director-v2" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/director-v2/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/director-v2/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/director-v2.bash install + - name: typecheck + run: ./ci/github/unit-testing/director-v2.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/director-v2.bash test - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/director-v2/test_failures - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + + unit-test-aws-library: + needs: changes + if: ${{ needs.changes.outputs.aws-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] aws-library" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/aws-library/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/aws-library.bash install + - name: typecheck + run: ./ci/github/unit-testing/aws-library.bash typecheck + - name: test + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/aws-library.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + unit-test-dask-task-models-library: needs: changes - if: ${{ needs.changes.outputs.dask-task-models-library == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.dask-task-models-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] dask-task-models-library" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/dask-task-models-library/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/dask-task-models-library/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -793,44 +1071,44 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/dask-task-models-library.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/dask-task-models-library.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-dask-sidecar: needs: changes - if: ${{ needs.changes.outputs.dask-sidecar == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.dask-sidecar == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] dask-sidecar" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/dask-sidecar/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/dask-sidecar/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -838,97 +1116,189 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/dask-sidecar.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/dask-sidecar.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + + unit-test-payments: + needs: changes + if: ${{ needs.changes.outputs.payments == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] payments" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/payments/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/payments.bash install + - name: typecheck + run: ./ci/github/unit-testing/payments.bash typecheck + - name: test + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/payments.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional - unit-test-osparc-gateway-server: + + unit-test-dynamic-scheduler: needs: changes - if: ${{ needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.dynamic-scheduler == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests - name: "[unit] osparc-gateway-server" + name: "[unit] dynamic-scheduler" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/osparc-gateway-server/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/dynamic-scheduler/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/dynamic-scheduler.bash install + - name: typecheck + run: ./ci/github/unit-testing/dynamic-scheduler.bash typecheck + - name: test + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/dynamic-scheduler.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + + unit-test-resource-usage-tracker: + needs: changes + if: ${{ needs.changes.outputs.resource-usage-tracker == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] resource-usage-tracker" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/resource-usage-tracker/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: | make devenv source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ + pushd services/resource-usage-tracker && \ make install-ci - name: typecheck run: | source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ + pushd services/resource-usage-tracker && \ make mypy - name: test + if: ${{ !cancelled() }} run: | source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ + pushd services/resource-usage-tracker && \ make test-ci-unit - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-dynamic-sidecar: needs: changes - if: ${{ needs.changes.outputs.dynamic-sidecar == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.dynamic-sidecar == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] dynamic-sidecar" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/dynamic-sidecar/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/dynamic-sidecar/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -936,85 +1306,100 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/dynamic-sidecar.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/dynamic-sidecar.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional - unit-test-frontend: + + unit-test-efs-guardian: needs: changes - if: ${{ needs.changes.outputs.static-webserver == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.efs-guardian == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests - name: "[unit] frontend" + name: "[unit] efs-guardian" runs-on: ${{ matrix.os }} strategy: matrix: - node: [14] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - - uses: actions/setup-node@v3.6.0 + - name: setup python environment + uses: actions/setup-python@v5 with: - node-version: ${{ matrix.node }} - cache: "npm" + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/efs-guardian/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install - run: ./ci/github/unit-testing/frontend.bash install + run: | + make devenv + source .venv/bin/activate && \ + pushd services/efs-guardian && \ + make install-ci + - name: typecheck + run: | + source .venv/bin/activate && \ + uv pip install mypy && \ + pushd services/efs-guardian && \ + make mypy - name: test - run: ./ci/github/unit-testing/frontend.bash test - # no coverage here?? - # - uses: codecov/codecov-action@v3.1.1 - # with: - # flags: unittests #optional + if: ${{ !cancelled() }} + run: | + source .venv/bin/activate && \ + pushd services/efs-guardian && \ + make test-ci-unit + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + unit-test-python-linting: needs: changes - if: ${{ needs.changes.outputs.anything-py == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything-py == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] python-linting" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9", "3.10", "3.11"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10", "3.12"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/service-library/requirements/_test.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/ci/helpers/requirements.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -1024,36 +1409,32 @@ jobs: unit-test-postgres-database: needs: changes - if: ${{ needs.changes.outputs.postgres-database == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.postgres-database == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] postgres-database" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/postgres-database/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/postgres-database/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -1061,44 +1442,44 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/postgres-database.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/postgres-database.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-invitations: needs: changes - if: ${{ needs.changes.outputs.invitations == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.invitations == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] invitations" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/invitations/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/notifications-library/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -1106,245 +1487,358 @@ jobs: - name: typecheck run: ./ci/github/unit-testing/invitations.bash typecheck - name: test - if: always() + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/invitations.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-service-integration: needs: changes - if: ${{ needs.changes.outputs.service-integration == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.service-integration == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] service-integration" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/service-integration/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/service-integration/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/service-integration.bash install + - name: typecheck + run: ./ci/github/unit-testing/service-integration.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/service-integration.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-service-library: needs: changes - if: ${{ needs.changes.outputs.service-library == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.service-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] service-library" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/service-library/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/service-library/requirements/ci*.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/service-library.bash install_all + - name: typecheck + run: ./ci/github/unit-testing/service-library.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/service-library.bash test_all - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-settings-library: needs: changes - if: ${{ needs.changes.outputs.settings-library == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.settings-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] settings-library" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/settings-library/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/settings-library/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/settings-library.bash install + - name: typecheck + run: ./ci/github/unit-testing/settings-library.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/settings-library.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-test-models-library: needs: changes - if: ${{ needs.changes.outputs.models-library == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.models-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] models-library" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/models-library/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/models-library/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/models-library.bash install + - name: typecheck + run: ./ci/github/unit-testing/models-library.bash typecheck - name: test run: ./ci/github/unit-testing/models-library.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + + unit-test-common-library: + needs: changes + if: ${{ needs.changes.outputs.common-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] common-library" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/common-library/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/common-library.bash install + - name: typecheck + run: ./ci/github/unit-testing/common-library.bash typecheck + - name: test + run: ./ci/github/unit-testing/common-library.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + unit-test-notifications-library: + needs: changes + if: ${{ needs.changes.outputs.notifications-library == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 18 # if this timeout gets too small, then split the tests + name: "[unit] notifications-library" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/notifications-library/requirements/ci.txt" + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/unit-testing/notifications-library.bash install + - name: typecheck + run: ./ci/github/unit-testing/notifications-library.bash typecheck + - name: test + if: ${{ !cancelled() }} + run: ./ci/github/unit-testing/notifications-library.bash test + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: unittests #optional + + unit-test-simcore-sdk: needs: changes - if: ${{ needs.changes.outputs.simcore-sdk == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.simcore-sdk == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 18 # if this timeout gets too small, then split the tests name: "[unit] simcore-sdk" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: install rclone run: sudo ./ci/github/helpers/install_rclone.bash - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/simcore-sdk/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/simcore-sdk/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install run: ./ci/github/unit-testing/simcore-sdk.bash install + - name: typecheck + run: ./ci/github/unit-testing/simcore-sdk.bash typecheck - name: test + if: ${{ !cancelled() }} run: ./ci/github/unit-testing/simcore-sdk.bash test - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: unittests #optional + unit-tests: # NOTE: this is a github required status check! if: ${{ always() }} needs: [ unit-test-agent, - unit-test-api, unit-test-api-server, + unit-test-api, unit-test-autoscaling, unit-test-catalog, + unit-test-clusters-keeper, unit-test-dask-sidecar, + unit-test-aws-library, unit-test-dask-task-models-library, unit-test-datcore-adapter, - unit-test-director, unit-test-director-v2, + unit-test-director, unit-test-dynamic-sidecar, - unit-test-frontend, + unit-test-efs-guardian, unit-test-models-library, - unit-test-osparc-gateway-server, + unit-test-common-library, + unit-test-notifications-library, + unit-test-payments, + unit-test-notifications, + unit-test-dynamic-scheduler, unit-test-postgres-database, unit-test-python-linting, + unit-test-resource-usage-tracker, unit-test-service-integration, unit-test-service-library, unit-test-settings-library, @@ -1353,6 +1847,7 @@ jobs: unit-test-webserver-01, unit-test-webserver-02, unit-test-webserver-03, + unit-test-webserver-04, ] runs-on: ubuntu-latest steps: @@ -1368,41 +1863,42 @@ jobs: integration-test-webserver-01: needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.webserver == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.webserver == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 30 # if this timeout gets too small, then split the tests name: "[int] webserver 01" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/web/server/requirements/ci.txt" - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1412,15 +1908,18 @@ jobs: - name: test run: ./ci/github/integration-testing/webserver.bash test 01 - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/web/server/test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/integration-testing/webserver.bash clean_up - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests #optional @@ -1432,37 +1931,38 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/web/server/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/web/server/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1472,15 +1972,18 @@ jobs: - name: test run: ./ci/github/integration-testing/webserver.bash test 02 - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/web/server/test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/integration-testing/webserver.bash clean_up - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests #optional @@ -1492,37 +1995,38 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/director-v2/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/director-v2/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1532,15 +2036,18 @@ jobs: - name: test run: ./ci/github/integration-testing/director-v2.bash test 01 - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/director-v2/test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/integration-testing/director-v2.bash clean_up - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests #optional @@ -1552,40 +2059,47 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false + env: + # NOTE: DIRECTOR_DEFAULT_MAX_* used for integration-tests that include `director` service + DIRECTOR_DEFAULT_MAX_MEMORY: 268435456 + DIRECTOR_DEFAULT_MAX_NANO_CPUS: 10000000 + DIRECTOR_TRACING: null steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup rclone docker volume plugin run: sudo ./ci/github/helpers/install_rclone_docker_volume_plugin.bash - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/director-v2/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install rclone + run: sudo ./ci/github/helpers/install_rclone.bash + - name: install uv + uses: astral-sh/setup-uv@v6 with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/director-v2/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1595,90 +2109,149 @@ jobs: - name: test run: ./ci/github/integration-testing/director-v2.bash test 02 - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./services/director-v2/test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/integration-testing/director-v2.bash clean_up - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests #optional - integration-test-osparc-gateway-server: + integration-test-dynamic-sidecar: needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.osparc-gateway-server == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.dynamic-sidecar == 'true' || github.event_name == 'push'}} timeout-minutes: 30 # if this timeout gets too small, then split the tests - name: "[int] osparc-gateway-server" + name: "[int] dynamic-sidecar" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "services/osparc-gateway-server/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install rclone + run: sudo ./ci/github/helpers/install_rclone.bash + - name: install uv + uses: astral-sh/setup-uv@v6 with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/dynamic-sidecar/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install - run: | - make devenv && \ - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make install-ci - - name: integration-test - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make test-ci-integration - - name: system-test - run: | - source .venv/bin/activate && \ - pushd services/osparc-gateway-server && \ - make test-system + run: ./ci/github/integration-testing/dynamic-sidecar.bash install + - name: test + run: ./ci/github/integration-testing/dynamic-sidecar.bash test 01 - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs - path: ./services/director-v2/test_failures + path: ./services/dynamic-sidecar/test_failures - name: cleanup - if: always() - run: | - pushd services/osparc-gateway-server && \ - make down - - uses: codecov/codecov-action@v3.1.1 + if: ${{ !cancelled() }} + run: ./ci/github/integration-testing/dynamic-sidecar.bash clean_up + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + with: + flags: integrationtests #optional + + + integration-test-docker-api-proxy: + needs: [changes, build-test-images] + if: ${{ needs.changes.outputs.anything-py == 'true' || needs.changes.outputs.docker-api-proxy == 'true' || github.event_name == 'push'}} + timeout-minutes: 30 # if this timeout gets too small, then split the tests + name: "[int] docker-api-proxy" + runs-on: ${{ matrix.os }} + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: expose github runtime for buildx + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/docker-api-proxy/requirements/ci.txt" + - name: load docker images + run: make load-images local-src=/${{ runner.temp }}/build + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: install + run: ./ci/github/integration-testing/docker-api-proxy.bash install + - name: test + run: ./ci/github/integration-testing/docker-api-proxy.bash test + - name: upload failed tests logs + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ github.job }}_docker_logs + path: ./services/docker-api-proxy/test_failures + - name: cleanup + if: ${{ !cancelled() }} + run: ./ci/github/integration-testing/docker-api-proxy.bash clean_up + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests #optional @@ -1690,37 +2263,38 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: install rclone run: sudo ./ci/github/helpers/install_rclone.bash - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "packages/simcore-sdk/requirements/ci.txt" - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + name: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-backend + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/simcore-sdk/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1730,26 +2304,30 @@ jobs: - name: test run: ./ci/github/integration-testing/simcore-sdk.bash test - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./packages/simcore-sdk/test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/integration-testing/simcore-sdk.bash clean_up - - uses: codecov/codecov-action@v3.1.1 + - uses: codecov/codecov-action@v5 + if: ${{ !cancelled() }} + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: flags: integrationtests integration-tests: # NOTE: this is a github required status check! - if: always() + if: ${{ always() }} needs: [ integration-test-director-v2-01, integration-test-director-v2-02, - integration-test-osparc-gateway-server, + integration-test-dynamic-sidecar, + integration-test-docker-api-proxy, integration-test-simcore-sdk, integration-test-webserver-01, integration-test-webserver-02, @@ -1767,100 +2345,104 @@ jobs: system-test-public-api: needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 25 # if this timeout gets too small, then split the tests name: "[sys] public api" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "tests/public-api/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + pattern: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-* + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/public-api/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: ./ci/github/system-testing/public-api.bash install - name: test run: ./ci/github/system-testing/public-api.bash test - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/system-testing/public-api.bash clean_up system-test-swarm-deploy: needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 30 # if this timeout gets too small, then split the tests name: "[sys] deploy simcore" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "tests/swarm-deploy/requirements/ci.txt" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + pattern: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-* + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/swarm-deploy/requirements/ci.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1872,65 +2454,66 @@ jobs: - name: dump services setting schemas run: export DOCKER_REGISTRY=local; export DOCKER_IMAGE_TAG=production; make settings-schema.json - name: upload services settings schemas - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_services_settings_schemas path: ./services/**/settings-schema.json - name: upload failed tests logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ failure() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./test_failures - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/system-testing/swarm-deploy.bash clean_up system-test-e2e: needs: [changes, build-test-images] - if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' }} + if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} timeout-minutes: 30 # if this timeout gets too small, then split the tests name: "[sys] e2e" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] + python: ["3.10"] node: [14] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "tests/e2e/requirements/requirements.txt" - - uses: actions/setup-node@v3.6.0 + - uses: actions/setup-node@v4.1.0 with: node-version: ${{ matrix.node }} cache: "npm" cache-dependency-path: "tests/e2e/package-lock.json" - name: expose github runtime for buildx - uses: crazy-max/ghaction-github-runtime@v2 - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + uses: crazy-max/ghaction-github-runtime@v3 + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + pattern: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-* + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/e2e/requirements/requirements.txt" - name: load docker images run: make load-images local-src=/${{ runner.temp }}/build - name: show system version @@ -1941,60 +2524,123 @@ jobs: run: ./ci/github/system-testing/e2e.bash test - name: dump docker logs id: docker_logs_dump - if: failure() + if: ${{ !cancelled() }} run: ./ci/github/system-testing/e2e.bash dump_docker_logs - name: upload docker logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_docker_logs path: ./tests/e2e/test_failures - name: upload screenshots - if: always() - uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_screenshots path: tests/e2e/screenshots - name: upload e2e logs - if: failure() - uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 with: name: ${{ github.job }}_logs path: tests/e2e/logs - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/system-testing/e2e.bash clean_up + system-test-e2e-playwright: + needs: [changes, build-test-images] + if: ${{ needs.changes.outputs.anything == 'true' || github.event_name == 'push' || github.event.inputs.force_all_builds == 'true' }} + timeout-minutes: 30 # if this timeout gets too small, then split the tests + name: "[sys] e2e-playwright" + runs-on: ${{ matrix.os }} + # NOTE: this is an interesting way, but generate a load of issues like not having docker installed, etc etc. + # container: + # image: mcr.microsoft.com/playwright/python:v1.39.0-jammy + strategy: + matrix: + python: ["3.10"] + os: [ubuntu-24.04] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: setup docker buildx + id: buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/e2e-playwright/requirements/ci.txt" + - name: expose github runtime for buildx + uses: crazy-max/ghaction-github-runtime@v3 + - name: download docker images + uses: actions/download-artifact@v4 + with: + pattern: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-* + path: /${{ runner.temp }}/build + - name: load docker images + run: make load-images local-src=/${{ runner.temp }}/build + - name: prepare devenv + run: make devenv + - name: show system version + run: ./ci/helpers/show_system_versions.bash + - name: setup + run: | + ./ci/github/system-testing/e2e-playwright.bash install + - name: test + run: | + ./ci/github/system-testing/e2e-playwright.bash test + - name: dump docker logs + id: docker_logs_dump + if: ${{ !cancelled() }} + run: ./ci/github/system-testing/e2e-playwright.bash dump_docker_logs + - name: upload docker logs + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ github.job }}_docker_logs + path: ./tests/e2e-playwright/test_failures + - name: upload tracing if failed + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ github.job }}_tracing + path: tests/e2e-playwright/test-results + system-test-environment-setup: timeout-minutes: 30 # if this timeout gets too small, then split the tests name: "[sys] environment setup" runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - name: setup python environment - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - cache: "pip" - cache-dependency-path: "tests/environment-setup/requirements/ci.txt" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/environment-setup/requirements/ci.txt" - name: show system version run: ./ci/helpers/show_system_versions.bash - name: install @@ -2002,18 +2648,19 @@ jobs: - name: test run: ./ci/github/system-testing/environment-setup.bash test - name: cleanup - if: always() + if: ${{ !cancelled() }} run: ./ci/github/system-testing/environment-setup.bash clean_up system-tests: # NOTE: this is a github required status check! - if: always() + if: ${{ always() }} needs: [ system-test-e2e, + system-test-e2e-playwright, system-test-environment-setup, system-test-public-api, - system-test-swarm-deploy, + system-test-swarm-deploy ] runs-on: ubuntu-latest steps: @@ -2033,13 +2680,8 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] - os: [ubuntu-22.04] - docker_buildx: [v0.10.4] - docker_compose: [1.29.1] - include: - - docker_compose: 1.29.1 - docker_compose_sha: 8097769d32e34314125847333593c8edb0dfc4a5b350e4839bef8c2fe8d09de7 + python: ["3.10"] + os: [ubuntu-24.04] fail-fast: false env: # secrets can be set in settings/secrets on github @@ -2047,20 +2689,28 @@ jobs: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: - version: ${{ matrix.docker_buildx }} driver: docker-container - - name: setup docker-compose - run: sudo ./ci/github/helpers/setup_docker_compose.bash ${{ matrix.docker_compose }} ${{ matrix.docker_compose_sha }} - - name: download docker images - uses: actions/download-artifact@v3 - with: - name: docker-buildx-images-${{ runner.os }}-${{ github.sha }} - path: /${{ runner.temp }}/build + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + cache-dependency-glob: "**/e2e-playwright/requirements/ci.txt" + # FIXME: Workaround for https://github.com/actions/download-artifact/issues/249 + - name: download docker images with retry + uses: Wandalen/wretry.action@master + with: + action: actions/download-artifact@v4 + with: | + pattern: docker-buildx-images-${{ runner.os }}-${{ github.sha }}-* + path: /${{ runner.temp }}/build + attempt_limit: 5 + attempt_delay: 1000 - name: load docker images run: | make load-images local-src=/${{ runner.temp }}/build diff --git a/.github/workflows/ci-testing-pull-request.yml b/.github/workflows/ci-testing-pull-request.yml new file mode 100644 index 00000000000..ea0bcbaf37c --- /dev/null +++ b/.github/workflows/ci-testing-pull-request.yml @@ -0,0 +1,114 @@ +# This workflow holds jobs which are required to pass before merging into master + +name: PR CI +on: + pull_request: + branches: + - "*" + # https://github.blog/changelog/2023-02-08-pull-request-merge-queue-public-beta/ + merge_group: + branches: + - "master" + + workflow_dispatch: + inputs: + target_repo: + description: full repository name (e.g. 'ITISFoundation/osparc-simcore') + required: true + default: "ITISFoundation/osparc-simcore" + type: environment + target_branch: + description: Check backwards compatibility against target_branch in target_repo + required: true + default: "master" + type: environment + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + api-specs: + timeout-minutes: 10 + name: "check OAS' are up to date" + runs-on: ubuntu-latest + steps: + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.6.x" + enable-cache: false + - name: checkout source branch + uses: actions/checkout@v4 + - name: Generate openapi specs + run: | + make devenv + source .venv/bin/activate + make openapi-specs + - name: Check openapi specs are up to date + run: | + if ! ./ci/github/helpers/openapi-specs-diff.bash diff \ + "https://raw.githubusercontent.com/$GITHUB_REPOSITORY/$GITHUB_SHA" \ + .; then \ + echo "::error:: OAS are not up to date. Run 'make openapi-specs' to update them"; exit 1; \ + fi + + api-server-oas-breaking: + needs: api-specs + timeout-minutes: 10 + name: "api-server backwards compatibility" + runs-on: ubuntu-latest + steps: + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: checkout + uses: actions/checkout@v4 + - name: Set environment variables based on event type + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "REPO=${{ inputs.target_repo }}" >> $GITHUB_ENV + echo "BRANCH=${{ inputs.target_branch }}" >> $GITHUB_ENV + else + echo "REPO=${{ github.event.pull_request.base.repo.full_name }}" >> $GITHUB_ENV + echo "BRANCH=${{ github.base_ref }}" >> $GITHUB_ENV + fi + - name: check api-server backwards compatibility + run: | + ./scripts/openapi-diff.bash breaking --fail-on ERR\ + "https://raw.githubusercontent.com/$REPO/refs/heads/$BRANCH/services/api-server/openapi.json" \ + /specs/services/api-server/openapi.json + + all-oas-breaking: + needs: api-specs + continue-on-error: true + timeout-minutes: 10 + name: "OAS backwards compatibility" + runs-on: ubuntu-latest + steps: + - name: setup python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: checkout + uses: actions/checkout@v4 + - name: Set environment variables based on event type + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "REPO=${{ inputs.target_repo }}" >> $GITHUB_ENV + echo "BRANCH=${{ inputs.target_branch }}" >> $GITHUB_ENV + else + echo "REPO=${{ github.event.pull_request.base.repo.full_name }}" >> $GITHUB_ENV + echo "BRANCH=${{ github.base_ref }}" >> $GITHUB_ENV + fi + - name: Check openapi-specs backwards compatibility + run: | + ./ci/github/helpers/openapi-specs-diff.bash breaking \ + "https://raw.githubusercontent.com/$REPO/refs/heads/$BRANCH" \ + . diff --git a/.github/workflows/cleanup-caches-by-branches.yml b/.github/workflows/cleanup-caches-by-branches.yml new file mode 100644 index 00000000000..bd3f5e3290b --- /dev/null +++ b/.github/workflows/cleanup-caches-by-branches.yml @@ -0,0 +1,30 @@ +# originates from https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#managing-caches +name: cleanup caches by a branch +on: + pull_request: + types: + - closed + +jobs: + cleanup: + runs-on: ubuntu-latest + steps: + - name: Cleanup + run: | + gh extension install actions/gh-actions-cache + + echo "Fetching list of cache key" + cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 ) + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + done + echo "Done" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + BRANCH: refs/pull/${{ github.event.pull_request.number }}/merge diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index fa2be82bab1..1bd3c6b40c1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -26,13 +26,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL tools for scanning - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} config-file: ./.github/codeql/codeql-config.yml - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.gitignore b/.gitignore index adaad37dd54..964ac9035e6 100644 --- a/.gitignore +++ b/.gitignore @@ -40,17 +40,22 @@ pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports -htmlcov/ -.tox/ +.cache .coverage .coverage.* -.cache -nosetests.xml -coverage.xml -*.cover .hypothesis/ .pytest_cache/ +.tox/ +*.cover +cov.xml +coverage.xml +htmlcov/ +junit.xml +locust_report/ +nosetests.xml test_failures/ + + # Translations *.mo *.pot @@ -75,9 +80,6 @@ target/ # Jupyter Notebook .ipynb_checkpoints -# pyenv -.python-version - # SageMath parsed files *.sage.py @@ -110,6 +112,7 @@ node_modules/ # IDEs config (except templates) .vscode/* !.vscode/*.template.json +.idea/ # manual overrides services/docker-compose.override.yml @@ -150,9 +153,6 @@ prof/ # outputs from make .stack-*.yml -# copies -services/**/.codeclimate.yml - # WSL .fake_hostname_file .bash_history @@ -177,3 +177,8 @@ Untitled* # service settings.schemas.json services/**/settings-schema.json + +tests/public-api/osparc_python_wheels/* + +# osparc-config repo files +repo.config diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 87b6dad37bd..45c66092f34 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ # See https://pre-commit.com/hooks.html for more hooks exclude: "^.venv$|^.cache$|^.pytest_cache$" -fail_fast: true +fail_fast: false default_language_version: - python: python3.9 + python: python3.10 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v5.0.0 hooks: - id: check-added-large-files args: ["--maxkb=1024"] @@ -22,26 +22,41 @@ repos: - id: no-commit-to-branch # NOTE: Keep order as pyupgrade (will update code) then pycln (remove unused imports), then isort (sort them) and black (final formatting) - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v3.19.1 hooks: - id: pyupgrade args: - - "--py39-plus" + - "--py311-plus" name: upgrade code + exclude: ^scripts/maintenance/computational-clusters/autoscaled_monitor/cli\.py$ # Optional get replaced and typer does not like it - repo: https://github.com/hadialqattan/pycln - rev: v1.2.5 + rev: v2.5.0 hooks: - id: pycln args: [--all, --expand-stars] name: prune imports - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + rev: 6.0.0 hooks: - id: isort args: ["--profile", "black"] name: sort imports - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 25.1.0 hooks: - id: black name: black format code + - repo: local + hooks: + - id: pytest-testit + name: pytest-testit + language: script + types: [file, python] + entry: scripts/precommit/pytest-testit.bash + - repo: local + hooks: + - id: validate-docker-compose + name: validate-docker-compose + language: script + types: [file] + entry: scripts/precommit/validate-docker-compose.bash diff --git a/.pylintrc b/.pylintrc index f23d3956c42..9f0e88f06ef 100644 --- a/.pylintrc +++ b/.pylintrc @@ -46,7 +46,10 @@ fail-under=10 # Files or directories to be skipped. They should be base names, not paths. ignore=CVS, - migration + migration, + sandbox, + generated_code + # Add files or directories matching the regex patterns to the ignore-list. The # regex matches against paths and can be in Posix or Windows format. @@ -86,10 +89,10 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.11 # Discover python modules and packages in the file system subtree. -recursive=no +recursive=true # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. @@ -240,7 +243,7 @@ contextmanager-decorators=contextlib.contextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members= +generated-members=sh # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. @@ -463,6 +466,8 @@ max-statements=50 # Minimum number of public methods for a class (see R0903). min-public-methods=2 +# Minimum number of public methods for a class (see R0903). +max-positional-arguments=12 [EXCEPTIONS] diff --git a/.python-version b/.python-version new file mode 100644 index 00000000000..2c0733315e4 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 00000000000..5100a4d23d2 --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,73 @@ + +lint.select = [ + "A", # [https://pypi.org/project/flake8-builtins/] + "ARG", # [https://pypi.org/project/flake8-unused-arguments/] + "ASYNC", # [https://pypi.org/project/flake8-async/] + "B", # [https://pypi.org/project/flake8-bugbear/] + "C4", # [https://pypi.org/project/flake8-comprehensions/] + "C90", # [https://pypi.org/project/mccabe/] (complexity) + "DTZ", # [https://pypi.org/project/flake8-datetimez/] + "E", # [https://pypi.org/project/pycodestyle/] errors + "EM", # [https://pypi.org/project/flake8-errmsg/] + "ERA", # [https://pypi.org/project/eradicate/] + "F", # [https://pypi.org/project/pyflakes/] + "FBT", # [https://pypi.org/project/flake8-boolean-trap/] + "FIX", # [https://github.com/tommilligan/flake8-fixme] + "G", # [https://pypi.org/project/flake8-logging-format/0.9.0/] + "I", # [https://pypi.org/project/isort/] + "ICN", # [https://github.com/joaopalmeiro/flake8-import-conventions] + "ISC", # [https://pypi.org/project/flake8-implicit-str-concat/] + "N", # [https://pypi.org/project/pep8-naming/] + "NPY", # NumPy-speficic rules + "PERF", # [https://pypi.org/project/perflint/] + "PIE", # [https://pypi.org/project/flake8-pie/] + "PL", # [https://pypi.org/project/pylint/] + "PT", # [https://pypi.org/project/flake8-pytest-style/] + "PTH", # [https://pypi.org/project/flake8-use-pathlib/] + "PYI", # [https://pypi.org/project/flake8-pyi/] + "RET", # [https://pypi.org/project/flake8-return/] + "RSE", # [https://pypi.org/project/flake8-raise/] + "RUF", # RUFF-specific rules + "S", # [https://pypi.org/project/flake8-bandit/] (Automated security testing) + "SIM", # [https://pypi.org/project/flake8-simplify/] + "SLF", # [https://pypi.org/project/flake8-self/] + "SLOT", # [https://pypi.org/project/flake8-slots/] + "T10", # https://pypi.org/project/flake8-debugger/ + "T20", # [https://pypi.org/project/flake8-print/] + "TID", # [https://pypi.org/project/flake8-tidy-imports/] + "TRY", # [https://pypi.org/project/tryceratops/1.1.0/] (exception anti-patterns) + "UP", # [https://pypi.org/project/pyupgrade/] + "W", # [https://pypi.org/project/pycodestyle/] warnings + "YTT", # [https://pypi.org/project/flake8-2020/] +] +lint.ignore = [ + "E501", # line too long, handled by black + "S101", # use of `assert` detected hanbled by pylance, does not support noseq + "TID252", # [*] Relative imports from parent modules are banned + "TRY300", # Checks for return statements in try blocks. SEE https://beta.ruff.rs/docs/rules/try-consider-else/ +] + +target-version = "py311" + + +[lint.per-file-ignores] +"**/{tests,pytest_simcore}/**" = [ + "T201", # print found + "ARG001", # unused function argument + "PT019", # user pytest.mark.usefixture + "PLR2004", # use of magic values + "PLR0913", # too many arguments + "N806", # Uppercase variables in functions + "PT001", # use pytest.fixture over pytest.fixture() whatsoever + "PT004", # does not return anythin, add leading underscore + "ERA001", # found commented out code + "FBT001", # Boolean positional arg in function definition +] + +[lint.flake8-pytest-style] +fixture-parentheses = false +parametrize-names-type = "csv" + + +[lint.pylint] +max-args = 10 diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 76cbaab4204..29b5d5b3663 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,9 +1,17 @@ { "recommendations": [ + "42Crunch.vscode-openapi", + "charliermarsh.ruff", + "DevSoft.svg-viewer-vscode", "eamodio.gitlens", "exiasr.hadolint", + "ms-azuretools.vscode-containers", + "ms-python.black-formatter", + "ms-python.pylint", "ms-python.python", + "njpwerner.autodocstring", "samuelcolvin.jinjahtml", - "timonwong.shellcheck" + "timonwong.shellcheck", + "vscode-icons-team.vscode-icons", ] } diff --git a/.vscode/launch.template.json b/.vscode/launch.template.json index 4abbb153965..dc3fd1cd481 100644 --- a/.vscode/launch.template.json +++ b/.vscode/launch.template.json @@ -7,7 +7,7 @@ "configurations": [ { "name": "Python: Test", - "type": "python", + "type": "debugpy", "request": "launch", "module": "pytest", "args": [ @@ -15,6 +15,7 @@ "--log-cli-level=INFO", "--pdb", "--setup-show", + "--durations=5", "-sx", "-vv", "${file}" @@ -25,7 +26,7 @@ }, { "name": "Python: Testit", - "type": "python", + "type": "debugpy", "request": "launch", "module": "pytest", "args": [ @@ -42,12 +43,68 @@ "console": "integratedTerminal", "justMyCode": false }, + { + "name": "Python: Test-Httpx-Spy", + "type": "debugpy", + "request": "launch", + "module": "pytest", + "args": [ + "--ff", + "--log-cli-level=INFO", + "--pdb", + "--setup-show", + "-sx", + "-vv", + "--spy-httpx-calls-enabled=true", + "--spy-httpx-calls-capture-path=test-httpx-spy-capture.ignore.keep.json", + "--faker-user-id=1", + "--faker-user-api-key=test", + "--faker-user-api-secret=test", + "--faker-project-id=85ebc649-9694-44bb-96d4-9dd8205cf18b", + "${file}" + ], + "cwd": "${workspaceFolder}", + "console": "integratedTerminal", + "justMyCode": false + }, { "name": "Python: Remote Attach api-server", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3006, - "host": "127.0.0.1", + "connect": { + "port": 3006, + "host": "127.0.0.1" + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/devel" + } + ] + }, + { + "name": "Python: Remote Attach autoscaling", + "type": "debugpy", + "request": "attach", + "connect": { + "port": 3012, + "host": "127.0.0.1" + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/devel" + } + ] + }, + { + "name": "Python: Remote Attach clusters-keeper", + "type": "debugpy", + "request": "attach", + "connect": { + "port": 3006, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -57,10 +114,12 @@ }, { "name": "Python: Remote Attach datcore-adapter", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3010, - "host": "127.0.0.1", + "connect": { + "port": 3010, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -70,10 +129,12 @@ }, { "name": "Python: Remote Attach director", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3004, - "host": "127.0.0.1", + "connect": { + "port": 3004, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -83,10 +144,12 @@ }, { "name": "Python: Remote Attach director-v2", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3009, - "host": "127.0.0.1", + "connect": { + "port": 3009, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -96,10 +159,27 @@ }, { "name": "Python: Remote Attach webserver", - "type": "python", + "type": "debugpy", + "request": "attach", + "connect": { + "port": 3001, + "host": "127.0.0.1" + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/devel" + } + ] + }, + { + "name": "Python: Remote Attach wb-api-server", + "type": "debugpy", "request": "attach", - "port": 3001, - "host": "127.0.0.1", + "connect": { + "port": 3019, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -109,10 +189,12 @@ }, { "name": "Python: Remote Attach webserver-garbage-collector", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3011, - "host": "127.0.0.1", + "connect": { + "port": 3011, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -122,10 +204,12 @@ }, { "name": "Python: Remote Attach storage", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3003, - "host": "127.0.0.1", + "connect": { + "port": 3003, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -135,10 +219,27 @@ }, { "name": "Python: Remote Attach catalog", - "type": "python", + "type": "debugpy", "request": "attach", - "port": 3005, - "host": "127.0.0.1", + "connect": { + "port": 3005, + "host": "127.0.0.1" + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/devel" + } + ] + }, + { + "name": "Python: Remote Attach dynamic-schduler (SCALE 1 replica first)", + "type": "debugpy", + "request": "attach", + "connect": { + "port": 3016, + "host": "127.0.0.1" + }, "pathMappings": [ { "localRoot": "${workspaceFolder}", @@ -149,18 +250,15 @@ { "type": "node", "request": "launch", - "name": "Debug e2e tests", - "runtimeArgs": [ - "--inspect-brk", - "${workspaceRoot}/tests/e2e/node_modules/.bin/jest", - "--runInBand", - "--colors" + "name": "Debug tests/e2e/tutorials/{FILE}", + "program": "${workspaceFolder}/tests/e2e/tutorials/${fileBasename}", + "args": [ + "http://127.0.0.1:9081" ], "cwd": "${workspaceFolder}/tests/e2e", - "restart": true, + "stopOnEntry": false, "console": "integratedTerminal", - "internalConsoleOptions": "neverOpen", - "port": 9229 + "internalConsoleOptions": "neverOpen" } ] } diff --git a/.vscode/settings.template.json b/.vscode/settings.template.json index 7fc4ef0392b..019fe9af8a7 100644 --- a/.vscode/settings.template.json +++ b/.vscode/settings.template.json @@ -1,5 +1,7 @@ // This is a template. Clone and replace extension ".template.json" by ".json" { + "autoDocstring.docstringFormat": "pep257", + "editor.tabSize": 2, "editor.insertSpaces": true, "editor.detectIndentation": false, @@ -7,11 +9,12 @@ "files.associations": { ".*rc": "ini", ".env*": "ini", + "*.logs*": "log", "**/requirements/*.in": "pip-requirements", "**/requirements/*.txt": "pip-requirements", "*logs.txt": "log", - "*.logs*": "log", "*Makefile": "makefile", + "*.sql": "sql", "docker-compose*.yml": "dockercompose", "Dockerfile*": "dockerfile" }, @@ -27,10 +30,11 @@ "**/.git/subtree-cache/**": true, "**/node_modules/*/**": true }, - "python.formatting.autopep8Args": [ - "--max-line-length 140" - ], + "python.analysis.autoImportCompletions": true, + "python.analysis.typeCheckingMode": "basic", + "python.defaultInterpreterPath": "./.venv/bin/python", "python.analysis.extraPaths": [ + "./packages/aws-library/src", "./packages/models-library/src", "./packages/postgres-database/src", "./packages/postgres-database/tests", @@ -45,11 +49,8 @@ "./services/director/src", "./services/storage/src", "./services/web/server/src", - "./services/web/server/tests/unit/with_dbs", - "./services/web/server/tests/unit/with_dbs/slow" + "./services/web/server/tests/unit/with_dbs" ], - "python.linting.pylintEnabled": true, - "python.linting.enabled": true, "[python]": { "editor.detectIndentation": false, "editor.tabSize": 4 @@ -57,18 +58,13 @@ "[makefile]": { "editor.insertSpaces": false }, - "python.testing.pytestEnabled": true, - "autoDocstring.docstringFormat": "sphinx", "hadolint.hadolintPath": "${workspaceFolder}/scripts/hadolint.bash", "hadolint.cliOptions": [], + "ruff.configuration": "${workspaceFolder}/.ruff.toml", + "ruff.path": [ + "${workspaceFolder}/.venv/bin/ruff" + ], "shellcheck.executablePath": "${workspaceFolder}/scripts/shellcheck.bash", "shellcheck.run": "onSave", - "shellcheck.enableQuickFix": true, - "python.formatting.provider": "black", - "isort.path": [ - "${workspaceFolder}/.venv/bin/isort" - ], - "isort.args": [ - "--settings-path=${workspaceFolder}/.isort.cfg" - ] + "shellcheck.enableQuickFix": true } diff --git a/CITATION.cff b/CITATION.cff index ae5dc64b168..3f16efed1a6 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -57,6 +57,15 @@ authors: family-names: Iavarone affiliation: "IT'IS Foundation" orcid: "https://orcid.org/0000-0001-5157-247X" + - given-names: Matus + family-names: Drobuliak + affiliation: "IT'IS Foundation" + - given-names: Mads Rystok + family-names: Bisgaard + affiliation: "IT'IS Foundation" + - given-names: Yury + family-names: Hrytsuk + affiliation: "IT'IS Foundation" - given-names: Tobias family-names: Oetiker affiliation: "OETIKER+PARTNER AG" diff --git a/Makefile b/Makefile index a62c059bb7e..61cae56cc0d 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ .DEFAULT_GOAL := help SHELL := /bin/bash - +.SHELLFLAGS := -o errexit -o pipefail -c MAKE_C := $(MAKE) --no-print-directory --directory # Operating system @@ -35,14 +35,20 @@ SERVICES_NAMES_TO_BUILD := \ api-server \ autoscaling \ catalog \ + clusters-keeper \ dask-sidecar \ datcore-adapter \ director \ director-v2 \ dynamic-sidecar \ + efs-guardian \ invitations \ migration \ - osparc-gateway-server \ + notifications \ + payments \ + resource-usage-tracker \ + dynamic-scheduler \ + docker-api-proxy \ service-integration \ static-webserver \ storage \ @@ -66,6 +72,9 @@ export DIRECTOR_API_VERSION := $(shell cat $(CURDIR)/services/director/VERSION export DIRECTOR_V2_API_VERSION:= $(shell cat $(CURDIR)/services/director-v2/VERSION) export STORAGE_API_VERSION := $(shell cat $(CURDIR)/services/storage/VERSION) export INVITATIONS_API_VERSION := $(shell cat $(CURDIR)/services/invitations/VERSION) +export PAYMENTS_API_VERSION := $(shell cat $(CURDIR)/services/payments/VERSION) +export DYNAMIC_SCHEDULER_API_VERSION := $(shell cat $(CURDIR)/services/dynamic-scheduler/VERSION) +export NOTIFICATIONS_API_VERSION := $(shell cat $(CURDIR)/services/notifications/VERSION) export DATCORE_ADAPTER_API_VERSION := $(shell cat $(CURDIR)/services/datcore-adapter/VERSION) export WEBSERVER_API_VERSION := $(shell cat $(CURDIR)/services/web/server/VERSION) @@ -78,18 +87,15 @@ export SWARM_STACK_NAME_NO_HYPHEN = $(subst -,_,$(SWARM_STACK_NAME)) export DOCKER_IMAGE_TAG ?= latest export DOCKER_REGISTRY ?= itisfoundation -# NOTE: this is only for WSL1 as /etc/hostname is not accessible there -ifeq ($(IS_WSL),WSL) -ETC_HOSTNAME = $(CURDIR)/.fake_hostname_file -export ETC_HOSTNAME -host := $(shell echo $$(hostname) > $(ETC_HOSTNAME)) -endif +MAKEFILES_WITH_OPENAPI_SPECS := $(shell find . -mindepth 2 -type f -name 'Makefile' -not -path '*/.*' -exec grep -l '^openapi-specs:' {} \; | xargs realpath) -get_my_ip := $(shell hostname --all-ip-addresses | cut --delimiter=" " --fields=1) +get_my_ip := $(shell (hostname --all-ip-addresses || hostname -i) 2>/dev/null | cut --delimiter=" " --fields=1) # NOTE: this is only for WSL2 as the WSL2 subsystem IP is changing on each reboot -S3_ENDPOINT := $(get_my_ip):9001 +ifeq ($(IS_WSL2),WSL2) +S3_ENDPOINT := http://$(get_my_ip):9001 export S3_ENDPOINT +endif # Check that given variables are set and all have non-empty values, # die with an error otherwise. @@ -120,17 +126,23 @@ __check_defined = \ .PHONY: help help: ## help on rule's targets -ifeq ($(IS_WIN),) - @awk --posix 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) -else - @awk --posix 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "%-20s %s\n", $$1, $$2}' $(MAKEFILE_LIST) -endif + @awk 'BEGIN {FS = ":.*?## "}; /^[^.[:space:]].*?:.*?## / {if ($$1 != "help" && NF == 2) {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}}' $(MAKEFILE_LIST) +test_python_version: ## Check Python version, throw error if compilation would fail with the installed version + # Checking python version + @.venv/bin/python ./scripts/test_python_version.py + + +.PHONY: _check_venv_active +_check_venv_active: + # Checking whether virtual environment was activated + @python3 -c "import sys; assert sys.base_prefix!=sys.prefix" + ## DOCKER BUILD ------------------------------- # -# - all builds are inmediatly tagged as 'local/{service}:${BUILD_TARGET}' where BUILD_TARGET='development', 'production', 'cache' +# - all builds are immediatly tagged as 'local/{service}:${BUILD_TARGET}' where BUILD_TARGET='development', 'production', 'cache' # - only production and cache images are released (i.e. tagged pushed into registry) # SWARM_HOSTS = $(shell docker node ls --format="{{.Hostname}}" 2>$(if $(IS_WIN),NUL,/dev/null)) @@ -144,6 +156,7 @@ DOCKER_TARGET_PLATFORMS ?= linux/amd64 comma := , define _docker_compose_build +$(eval INCLUDED_SERVICES := $(filter-out $(exclude), $(SERVICES_NAMES_TO_BUILD))) \ export BUILD_TARGET=$(if $(findstring -devel,$@),development,production) &&\ pushd services &&\ $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ @@ -151,7 +164,7 @@ $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ export $(subst -,_,$(shell echo $(service) | tr a-z A-Z))_VERSION=$(shell cat services/$(service)/VERSION);\ ,) \ )\ -docker buildx bake \ +docker buildx bake --allow=fs.read=.. \ $(if $(findstring -devel,$@),,\ --set *.platform=$(DOCKER_TARGET_PLATFORMS) \ )\ @@ -162,8 +175,16 @@ docker buildx bake \ ,--load\ )\ )\ + $(if $(push),\ + $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ + --set $(service).tags=$(DOCKER_REGISTRY)/$(service):$(DOCKER_IMAGE_TAG) \ + ) \ + $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ + --set $(service).output="type=registry$(comma)push=true" \ + )\ + ,) \ $(if $(push),--push,) \ - $(if $(push),--file docker-bake.hcl,) --file docker-compose-build.yml $(if $(target),$(target),) \ + --file docker-compose-build.yml $(if $(target),$(target),$(INCLUDED_SERVICES)) \ $(if $(findstring -nc,$@),--no-cache,\ $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ --set $(service).cache-to=type=gha$(comma)mode=max$(comma)scope=$(service) \ @@ -174,22 +195,20 @@ endef rebuild: build-nc # alias build build-nc: .env ## Builds production images and tags them as 'local/{service-name}:production'. For single target e.g. 'make target=webserver build'. To export to a folder: `make local-dest=/tmp/build` - # Building service$(if $(target),,s) $(target) + # Building service$(if $(target),,s) $(target) $(if $(exclude),excluding,) $(exclude) @$(_docker_compose_build) # List production images @docker images --filter="reference=local/*:production" load-images: guard-local-src ## loads images from local-src - # loading from images from $(local-src)... - @$(foreach service, $(SERVICES_NAMES_TO_BUILD),\ - docker load --input $(local-src)/$(service).tar; \ - ) + # loading from any tar images from $(local-src)... + @find $(local-src) -name '*.tar' -print0 | xargs -0 -n1 -P $(shell nproc) --no-run-if-empty --verbose docker load --input # all images loaded @docker images build-devel build-devel-nc: .env ## Builds development images and tags them as 'local/{service-name}:development'. For single target e.g. 'make target=webserver build-devel' ifeq ($(target),) - # Building services + # Building services $(if $(exclude),excluding,) $(exclude) @$(_docker_compose_build) else ifeq ($(findstring static-webserver,$(target)),static-webserver) @@ -233,7 +252,7 @@ CPU_COUNT = $(shell cat /proc/cpuinfo | grep processor | wc -l ) @export DOCKER_REGISTRY=local && \ export DOCKER_IMAGE_TAG=development && \ export DEV_PC_CPU_COUNT=${CPU_COUNT} && \ - scripts/docker/docker-compose-config.bash -e .env \ + scripts/docker/docker-stack-config.bash -e .env \ services/docker-compose.yml \ services/docker-compose.local.yml \ services/docker-compose.devel.yml \ @@ -243,43 +262,63 @@ CPU_COUNT = $(shell cat /proc/cpuinfo | grep processor | wc -l ) # Creating config for stack with 'local/{service}:production' to $@ @export DOCKER_REGISTRY=local && \ export DOCKER_IMAGE_TAG=production && \ - scripts/docker/docker-compose-config.bash -e .env \ + scripts/docker/docker-stack-config.bash -e .env \ services/docker-compose.yml \ services/docker-compose.local.yml \ > $@ + +.stack-simcore-development-frontend.yml: .env $(docker-compose-configs) + # Creating config for stack with 'local/{service}:production' (except of static-webserver -> static-webserver:development) to $@ + @export DOCKER_REGISTRY=local && \ + export DOCKER_IMAGE_TAG=production && \ + scripts/docker/docker-stack-config.bash -e $< \ + services/docker-compose.yml \ + services/docker-compose.local.yml \ + services/docker-compose.devel-frontend.yml \ + > $@ + .stack-simcore-version.yml: .env $(docker-compose-configs) # Creating config for stack with '$(DOCKER_REGISTRY)/{service}:${DOCKER_IMAGE_TAG}' to $@ - @scripts/docker/docker-compose-config.bash -e .env \ + @scripts/docker/docker-stack-config.bash -e .env \ services/docker-compose.yml \ services/docker-compose.local.yml \ > $@ +.stack-vendor-services.yml: .env $(docker-compose-configs) + # Creating config for vendors stack to $@ + @scripts/docker/docker-stack-config.bash -e $< \ + services/docker-compose-dev-vendors.yml \ + > $@ .stack-ops.yml: .env $(docker-compose-configs) - # Compiling config file for filestash - $(eval TMP_PATH_TO_FILESTASH_CONFIG=$(shell set -o allexport && \ - source $(CURDIR)/.env && \ - set +o allexport && \ - python3 scripts/filestash/create_config.py)) # Creating config for ops stack to $@ - # -> filestash config at $(TMP_PATH_TO_FILESTASH_CONFIG) +ifdef ops_ci + @$(shell \ + scripts/docker/docker-stack-config.bash -e .env \ + services/docker-compose-ops-ci.yml \ + > $@ \ + ) +else @$(shell \ - export TMP_PATH_TO_FILESTASH_CONFIG="${TMP_PATH_TO_FILESTASH_CONFIG}" && \ - scripts/docker/docker-compose-config.bash -e .env \ + scripts/docker/docker-stack-config.bash -e .env \ services/docker-compose-ops.yml \ > $@ \ ) +endif + +.PHONY: up-devel up-prod up-prod-ci up-version up-latest .deploy-ops .deploy-vendors -.PHONY: up-devel up-prod up-version up-latest .deploy-ops +.deploy-vendors: .stack-vendor-services.yml + # Deploy stack 'vendors' + docker stack deploy --detach=true --with-registry-auth -c $< vendors .deploy-ops: .stack-ops.yml # Deploy stack 'ops' ifndef ops_disabled - # -> filestash config at $(TMP_PATH_TO_FILESTASH_CONFIG) - docker stack deploy --with-registry-auth -c $< ops + docker stack deploy --detach=true --with-registry-auth -c $< ops else @echo "Explicitly disabled with ops_disabled flag in CLI" endif @@ -296,18 +335,21 @@ TableWidth=140;\ printf "%24s | %90s | %12s | %12s\n" Name Endpoint User Password;\ printf "%.$${TableWidth}s\n" "$$separator";\ printf "$$rows" "oSparc platform" "http://$(get_my_ip).nip.io:9081";\ -printf "$$rows" "oSparc web API doc" "http://$(get_my_ip).nip.io:9081/dev/doc";\ printf "$$rows" "oSparc public API doc" "http://$(get_my_ip).nip.io:8006/dev/doc";\ -printf "$$rows" "Postgres DB" "http://$(get_my_ip).nip.io:18080/?pgsql=postgres&username="$${POSTGRES_USER}"&db="$${POSTGRES_DB}"&ns=public" $${POSTGRES_USER} $${POSTGRES_PASSWORD};\ -printf "$$rows" "Portainer" "http://$(get_my_ip).nip.io:9000" admin adminadmin;\ -printf "$$rows" "Redis" "http://$(get_my_ip).nip.io:18081";\ +printf "$$rows" "oSparc web API doc" "http://$(get_my_ip).nip.io:9081/dev/doc";\ printf "$$rows" "Dask Dashboard" "http://$(get_my_ip).nip.io:8787";\ -printf "$$rows" "Docker Registry" "$${REGISTRY_URL}" $${REGISTRY_USER} $${REGISTRY_PW};\ +printf "$$rows" "Dy-scheduler Dashboard" "http://$(get_my_ip).nip.io:8012";\ +printf "$$rows" "Docker Registry" "http://$${REGISTRY_URL}/v2/_catalog" $${REGISTRY_USER} $${REGISTRY_PW};\ printf "$$rows" "Invitations" "http://$(get_my_ip).nip.io:8008/dev/doc" $${INVITATIONS_USERNAME} $${INVITATIONS_PASSWORD};\ +printf "$$rows" "Jaeger" "http://$(get_my_ip).nip.io:16686";\ +printf "$$rows" "Payments" "http://$(get_my_ip).nip.io:8011/dev/doc" $${PAYMENTS_USERNAME} $${PAYMENTS_PASSWORD};\ +printf "$$rows" "Portainer" "http://$(get_my_ip).nip.io:9000" admin adminadmin;\ +printf "$$rows" "Postgres DB" "http://$(get_my_ip).nip.io:18080/?pgsql=postgres&username="$${POSTGRES_USER}"&db="$${POSTGRES_DB}"&ns=public" $${POSTGRES_USER} $${POSTGRES_PASSWORD};\ printf "$$rows" "Rabbit Dashboard" "http://$(get_my_ip).nip.io:15672" admin adminadmin;\ -printf "$$rows" "Traefik Dashboard" "http://$(get_my_ip).nip.io:8080/dashboard/";\ -printf "$$rows" "Storage S3 Filestash" "http://$(get_my_ip).nip.io:9002" 12345678 12345678;\ +printf "$$rows" "Redis" "http://$(get_my_ip).nip.io:18081";\ printf "$$rows" "Storage S3 Minio" "http://$(get_my_ip).nip.io:9001" 12345678 12345678;\ +printf "$$rows" "Traefik Dashboard" "http://$(get_my_ip).nip.io:8080/dashboard/";\ +printf "$$rows" "Vendor Manual (Fake)" "http://manual.$(get_my_ip).nip.io:9081";\ printf "\n%s\n" "⚠️ if a DNS is not used (as displayed above), the interactive services started via dynamic-sidecar";\ echo "⚠️ will not be shown. The frontend accesses them via the uuid.services.YOUR_IP.nip.io:9081"; @@ -317,30 +359,49 @@ endef show-endpoints: @$(_show_endpoints) +export HOST_UV_CACHE_DIR := $(shell uv cache dir) + up-devel: .stack-simcore-development.yml .init-swarm $(CLIENT_WEB_OUTPUT) ## Deploys local development stack, qx-compile+watch and ops stack (pass 'make ops_disabled=1 up-...' to disable) # Start compile+watch front-end container [front-end] @$(MAKE_C) services/static-webserver/client down compile-dev flags=--watch + @$(MAKE_C) services/dask-sidecar certificates # Deploy stack $(SWARM_STACK_NAME) [back-end] - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) + @docker stack deploy --detach=true --with-registry-auth -c $< $(SWARM_STACK_NAME) + @$(MAKE) .deploy-vendors @$(MAKE) .deploy-ops @$(_show_endpoints) @$(MAKE_C) services/static-webserver/client follow-dev-logs +up-devel-frontend: .stack-simcore-development-frontend.yml .init-swarm ## Every service in production except static-webserver. For front-end development + # Start compile+watch front-end container [front-end] + @$(MAKE_C) services/static-webserver/client down compile-dev flags=--watch + @$(MAKE_C) services/dask-sidecar certificates + # Deploy stack $(SWARM_STACK_NAME) [back-end] + @docker stack deploy --detach=true --with-registry-auth -c $< $(SWARM_STACK_NAME) + @$(MAKE) .deploy-vendors + @$(MAKE) .deploy-ops + @$(_show_endpoints) + @$(MAKE_C) services/static-webserver/client follow-dev-logs -up-prod: .stack-simcore-production.yml .init-swarm ## Deploys local production stack and ops stack (pass 'make ops_disabled=1 up-...' to disable or target= to deploy a single service) + +up-prod: .stack-simcore-production.yml .init-swarm ## Deploys local production stack and ops stack (pass 'make ops_disabled=1 ops_ci=1 up-...' to disable or target= to deploy a single service) ifeq ($(target),) + @$(MAKE_C) services/dask-sidecar certificates # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) + @docker stack deploy --detach=true --with-registry-auth -c $< $(SWARM_STACK_NAME) + @$(MAKE) .deploy-vendors @$(MAKE) .deploy-ops else # deploys ONLY $(target) service - @docker-compose --file $< up --detach $(target) + @docker compose --file $< up --detach $(target) endif @$(_show_endpoints) up-version: .stack-simcore-version.yml .init-swarm ## Deploys versioned stack '$(DOCKER_REGISTRY)/{service}:$(DOCKER_IMAGE_TAG)' and ops stack (pass 'make ops_disabled=1 up-...' to disable) + @$(MAKE_C) services/dask-sidecar certificates # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) + @docker stack deploy --detach=true --with-registry-auth -c $< $(SWARM_STACK_NAME) + @$(MAKE) .deploy-vendors @$(MAKE) .deploy-ops @$(_show_endpoints) @@ -359,9 +420,11 @@ down: ## Stops and removes stack # Removing client containers (if any) -@$(MAKE_C) services/static-webserver/client down # Removing generated docker compose configurations, i.e. .stack-* +ifneq ($(wildcard .stack-*), ) -@rm $(wildcard .stack-*) +endif # Removing local registry if any - -@docker rm --force $(LOCAL_REGISTRY_HOSTNAME) + -@docker ps --all --quiet --filter "name=$(LOCAL_REGISTRY_HOSTNAME)" | xargs --no-run-if-empty docker rm --force leave: ## Forces to stop all services, networks, etc by the node leaving the swarm -docker swarm leave -f @@ -369,8 +432,8 @@ leave: ## Forces to stop all services, networks, etc by the node leaving the swa .PHONY: .init-swarm .init-swarm: - # Ensures swarm is initialized - $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip)) + # Ensures swarm is initialized (careful we use a default pool of 172.20.0.0/14. Ensure you do not use private IPs in that range!) + $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip) --default-addr-pool 172.20.0.0/14) ## DOCKER TAGS ------------------------------- @@ -404,7 +467,7 @@ tag-latest: ## Tags last locally built production images as '${DOCKER_REGISTRY}/ pull-version: .env ## pulls images from DOCKER_REGISTRY tagged as DOCKER_IMAGE_TAG # Pulling images '${DOCKER_REGISTRY}/{service}:${DOCKER_IMAGE_TAG}' - @docker-compose --file services/docker-compose-deploy.yml pull + @docker compose --file services/docker-compose-deploy.yml pull .PHONY: push-version push-latest @@ -417,25 +480,37 @@ push-latest: tag-latest push-version: tag-version # pushing '${DOCKER_REGISTRY}/{service}:${DOCKER_IMAGE_TAG}' @export BUILD_TARGET=undefined; \ - docker-compose --file services/docker-compose-build.yml --file services/docker-compose-deploy.yml push + docker compose --file services/docker-compose-build.yml --file services/docker-compose-deploy.yml push ## ENVIRONMENT ------------------------------- .PHONY: devenv devenv-all node-env -.venv: - @python3 --version - python3 -m venv $@ - ## upgrading tools to latest version in $(shell python3 --version) - $@/bin/pip3 --quiet install --upgrade \ - pip~=23.0 \ +.check-uv-installed: + @echo "Checking if 'uv' is installed..." + @if ! command -v uv >/dev/null 2>&1; then \ + curl -LsSf https://astral.sh/uv/install.sh | sh; \ + else \ + printf "\033[32m'uv' is installed. Version: \033[0m"; \ + uv --version; \ + fi + # upgrading uv + -@uv self --quiet update + + +.venv: .check-uv-installed + @uv venv $@ + @echo "# upgrading tools to latest version in" && $@/bin/python --version + @uv pip --quiet install --upgrade \ + pip~=24.0 \ wheel \ - setuptools - @$@/bin/pip3 list --verbose + setuptools \ + uv + @uv pip list -devenv: .venv ## create a python virtual environment with dev tools (e.g. linters, etc) - $ $@ + .PHONY: postgres-upgrade postgres-upgrade: ## initalize or upgrade postgres db to latest state @@ -568,6 +668,8 @@ local-registry: .env ## creates a local docker registry and configure simcore to echo configuring host file to redirect $(LOCAL_REGISTRY_HOSTNAME) to 127.0.0.1; \ sudo echo 127.0.0.1 $(LOCAL_REGISTRY_HOSTNAME) | sudo tee -a /etc/hosts;\ echo done) + @$(if $(shell test -f /etc/docker/daemon.json),, \ + sudo touch /etc/docker/daemon.json) @$(if $(shell jq -e '.["insecure-registries"]? | index("http://$(LOCAL_REGISTRY_HOSTNAME):5000")? // empty' /etc/docker/daemon.json),,\ echo configuring docker engine to use insecure local registry...; \ jq 'if .["insecure-registries"] | index("http://$(LOCAL_REGISTRY_HOSTNAME):5000") then . else .["insecure-registries"] += ["http://$(LOCAL_REGISTRY_HOSTNAME):5000"] end' /etc/docker/daemon.json > /tmp/daemon.json &&\ @@ -613,9 +715,9 @@ info-registry: ## info on local registry (if any) ## INFO ------------------------------- -.PHONY: info info-images info-swarm info-tools +.PHONY: info info-images info-swarm info: ## displays setup information - # setup info: + @echo setup info -------------------------------- @echo ' Detected OS : $(IS_LINUX)$(IS_OSX)$(IS_WSL)$(IS_WSL2)$(IS_WIN)' @echo ' SWARM_STACK_NAME : ${SWARM_STACK_NAME}' @echo ' DOCKER_REGISTRY : $(DOCKER_REGISTRY)' @@ -625,19 +727,23 @@ info: ## displays setup information @echo ' - ULR : ${VCS_URL}' @echo ' - REF : ${VCS_REF}' @echo ' - (STATUS)REF_CLIENT : (${VCS_STATUS_CLIENT}) ${VCS_REF_CLIENT}' - @echo ' DIRECTOR_API_VERSION : ${DIRECTOR_API_VERSION}' - @echo ' STORAGE_API_VERSION : ${STORAGE_API_VERSION}' - @echo ' DATCORE_ADAPTER_API_VERSION : ${DATCORE_ADAPTER_API_VERSION}' - @echo ' WEBSERVER_API_VERSION : ${WEBSERVER_API_VERSION}' - # dev tools version - @echo ' make : $(shell make --version 2>&1 | head -n 1)' - @echo ' jq : $(shell jq --version)' + @make --silent info-tools + + +.PHONY: show-tools +info-tools: ## displays tools versions + @echo dev-tools versions ------------------------- @echo ' awk : $(shell awk -W version 2>&1 | head -n 1)' - @echo ' python : $(shell python3 --version)' - @echo ' node : $(shell node --version 2> /dev/null || echo ERROR nodejs missing)' @echo ' docker : $(shell docker --version)' @echo ' docker buildx : $(shell docker buildx version)' - @echo ' docker-compose: $(shell docker-compose --version)' + @echo ' docker compose: $(shell docker compose version)' + @echo ' jq : $(shell jq --version)' + @echo ' make : $(shell make --version 2>&1 | head -n 1)' + @echo ' node : $(shell node --version 2> /dev/null || echo ERROR nodejs missing)' + @echo ' python : $(shell python3 --version)' + @echo ' uv : $(shell uv --version 2> /dev/null || echo ERROR uv missing)' + @echo ' ubuntu : $(shell lsb_release --description --short 2> /dev/null | tail || echo ERROR Not an Ubuntu OS )' + define show-meta @@ -687,8 +793,8 @@ _running_containers = $(shell docker ps -aq) clean-venv: devenv ## Purges .venv into original configuration # Cleaning your venv - .venv/bin/pip-sync --quiet $(CURDIR)/requirements/devenv.txt - @pip list + @uv pip sync --quiet $(CURDIR)/requirements/devenv.txt + @uv pip list clean-hooks: ## Uninstalls git pre-commit hooks @-pre-commit uninstall 2> /dev/null || rm .git/hooks/pre-commit @@ -700,12 +806,12 @@ clean: .check-clean ## cleans all unversioned files in project and temp files cr @$(MAKE_C) services/static-webserver/client clean-files clean-more: ## cleans containers and unused volumes - # stops and deletes running containers - @$(if $(_running_containers), docker rm --force $(_running_containers),) # pruning unused volumes -@docker volume prune --force # pruning buildx cache -@docker buildx prune --force + # stops and deletes running containers + @$(if $(_running_containers), docker rm --force $(_running_containers),) clean-images: ## removes all created images # Cleaning all service images @@ -769,3 +875,8 @@ release-staging release-prod: .check-on-master-branch ## Helper to create a sta .PHONY: release-hotfix release-staging-hotfix release-hotfix release-staging-hotfix: ## Helper to create a hotfix release in Github (usage: make release-hotfix version=1.2.4 git_sha=optional or make release-staging-hotfix name=Sprint version=2) $(create_github_release_url) + +.PHONY: docker-image-fuse +docker-image-fuse: + $(foreach service, $(SERVICES_NAMES_TO_BUILD),\ + docker buildx imagetools create --tag $(DOCKER_REGISTRY)/$(service):$(DOCKER_IMAGE_TAG) $(DOCKER_REGISTRY)/$(service):$(DOCKER_IMAGE_TAG)-$(SUFFIX) $(DOCKER_REGISTRY)/$(service):$(DOCKER_IMAGE_TAG);) diff --git a/README.md b/README.md index da0f1259d22..08ab8d4df35 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,11 @@ # osparc-simcore platform

+ +

- [![black_badge]](https://github.com/psf/black) [![ci_badge]](https://github.com/ITISFoundation/osparc-simcore/actions/workflows/ci-testing-deploy.yml) @@ -28,20 +29,20 @@ [osparc_status]:https://img.shields.io/badge/dynamic/json?label=osparc.io&query=%24.status.description&url=https%3A%2F%2Fstatus.osparc.io%2Fapi%2Fv2%2Fstatus.json - - - - The SIM-CORE, named **o2S2PARC** – **O**pen **O**nline **S**imulations for **S**timulating **P**eripheral **A**ctivity to **R**elieve **C**onditions – is one of the three integrative cores of the SPARC program’s Data Resource Center (DRC). The aim of o2S2PARC is to establish a comprehensive, freely accessible, intuitive, and interactive online platform for simulating peripheral nerve system neuromodulation/ stimulation and its impact on organ physiology in a precise and predictive manner. To achieve this, the platform will comprise both state-of-the art and highly detailed animal and human anatomical models with realistic tissue property distributions that make it possible to perform simulations ranging from the molecular scale up to the complexity of the human body. + ## Getting Started -This is the common workflow to build and deploy locally: +A production instance of **o2S2PARC** is running at [oSPARC.io](https://osparc.io). + +If you want to spin up your own instance, you can follow the common workflow to build and deploy locally using the **Linux commandline** (Ubuntu recommended). +Make sure you first install all the [requirements](#Requirements) mentioned in the section below. ```bash - # clone repo + # clone code repository git clone https://github.com/ITISFoundation/osparc-simcore.git cd osparc-simcore @@ -59,7 +60,7 @@ This is the common workflow to build and deploy locally: # xdg-open http://127.0.0.1.nip.io:9081/ - # stops + # to stop the swarm make down ``` @@ -69,34 +70,39 @@ Services are deployed in two stacks:``simcore-stack`` comprises all core-service ### Requirements -To verify current base OS, Docker and Python build versions have a look at: - -- GitHub Actions [config](.github/workflows/ci-testing-deploy.yml) - To build and run: -- docker +- git +- [docker](https://docs.docker.com/engine/install/ubuntu/#installation-methods) - make >=4.2 - awk, jq (optional tools within makefiles) To develop, in addition: -- python 3.9 -- nodejs for client part (this dependency will be deprecated soon) -- swagger-cli (make sure to have a recent version of nodejs) +- *python 3.10*: we recommend using the python manager [pyenv](https://brain2life.hashnode.dev/how-to-install-pyenv-python-version-manager-on-ubuntu-2004) +- *nodejs* for client part: we recommend using the node manager [nvm](https://github.com/nvm-sh/nvm#install--update-script) - [vscode] (highly recommended) -This project works and is developed under **linux (Ubuntu recommended)**. +To verify current base OS, Docker and Python build versions have a look at: + +- GitHub Actions [config](.github/workflows/ci-testing-deploy.yml) -#### Setting up Other Operating Systems +If you want to verify if your system has all the necessary requirements: + +```bash + make info +``` + + +#### Setting up other Operating Systems When developing on these platforms you are on your own. -In **windows**, it works under [WSL2] (windows subsystem for linux **version2**). Some details on the setup: +On **Windows**, it works under [WSL2] (Windows Subsystem for Linux **version2**). Some details on the setup: - Follow **all details** on [how to setup WSL2 with docker and ZSH](https://nickymeuleman.netlify.app/blog/linux-on-windows-wsl2-zsh-docker) docker for windows and [WSL2] -In **MacOS**, [replacing the MacOS utilities with GNU utils](https://apple.stackexchange.com/a/69332) might be required. +**MacOS** is currently not supported. #### Upgrading services requirements @@ -129,14 +135,51 @@ To upgrade a single requirement named `fastapi`run: - [Git release workflow](docs/releasing-workflow-instructions.md) - Public [releases](https://github.com/ITISFoundation/osparc-simcore/releases) -- Production in https://osparc.io +- Production in - [Staging instructions](docs/releasing-workflow-instructions.md#staging-example) - [User Manual](https://itisfoundation.github.io/osparc-manual/) +## Development build + +For developers wanting to add/test code changes, a version can be built that will on-the-fly incorporate changes made in the source directory into the running containers. +To enable this, the following commands should be used to build, instead of the ones provided in the [Getting Started](#getting-started) section: + +```bash + # clone code repository + git clone https://github.com/ITISFoundation/osparc-simcore.git + cd osparc-simcore + + # setup python environment and activate + make devenv + source .venv/bin/activate + + # show setup info and build core services + make info build build-devel + + # starts swarm and deploys services + make up-devel + + # The above command will keep in running with "[RUN] Running command..." + # Open another terminal session, to continue + + # display swarm configuration + make info-swarm + + # open front-end in the browser + # 127.0.0.1.nip.io:9081 - simcore front-end site + # + xdg-open http://127.0.0.1.nip.io:9081/ + + # to stop the swarm + make down +``` + ## Contributing Would you like to make a change or add something new? Please read the [contributing guidelines](CONTRIBUTING.md). + + ## License This project is licensed under the terms of the [MIT license](LICENSE). @@ -144,10 +187,11 @@ This project is licensed under the terms of the [MIT license](LICENSE). ---

-Made with love at www.z43.swiss + +Made with love (and lots of hard work) at www.z43.swiss +

-[chocolatey]:https://chocolatey.org/ [vscode]:https://code.visualstudio.com/ [WSL2]:https://docs.microsoft.com/en-us/windows/wsl diff --git a/SERVICES.md b/SERVICES.md new file mode 100644 index 00000000000..ff52fa0dd99 --- /dev/null +++ b/SERVICES.md @@ -0,0 +1,67 @@ +# services +> +> Auto generated on `2025-05-26 09:50:15` using +```cmd +cd osparc-simcore +python ./scripts/echo_services_markdown.py +``` +| Name|Files| | +| ----------|----------|---------- | +| **AGENT**|| | +| |[services/agent/Dockerfile](./services/agent/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/agent)](https://hub.docker.com/r/itisfoundation/agent/tags) | +| **API-SERVER**|| | +| |[services/api-server/openapi.json](./services/api-server/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/api-server/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/api-server/openapi.json) | +| |[services/api-server/Dockerfile](./services/api-server/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/api-server)](https://hub.docker.com/r/itisfoundation/api-server/tags) | +| **AUTOSCALING**|| | +| |[services/autoscaling/Dockerfile](./services/autoscaling/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/autoscaling)](https://hub.docker.com/r/itisfoundation/autoscaling/tags) | +| **CATALOG**|| | +| |[services/catalog/openapi.json](./services/catalog/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/catalog/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/catalog/openapi.json) | +| |[services/catalog/Dockerfile](./services/catalog/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/catalog)](https://hub.docker.com/r/itisfoundation/catalog/tags) | +| **CLUSTERS-KEEPER**|| | +| |[services/clusters-keeper/Dockerfile](./services/clusters-keeper/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/clusters-keeper)](https://hub.docker.com/r/itisfoundation/clusters-keeper/tags) | +| **DASK-SIDECAR**|| | +| |[services/dask-sidecar/Dockerfile](./services/dask-sidecar/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/dask-sidecar)](https://hub.docker.com/r/itisfoundation/dask-sidecar/tags) | +| **DATCORE-ADAPTER**|| | +| |[services/datcore-adapter/Dockerfile](./services/datcore-adapter/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/datcore-adapter)](https://hub.docker.com/r/itisfoundation/datcore-adapter/tags) | +| **DIRECTOR**|| | +| |[services/director/src/simcore_service_director/api/v0/openapi.yaml](./services/director/src/simcore_service_director/api/v0/openapi.yaml)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/director/src/simcore_service_director/api/v0/openapi.yaml) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/director/src/simcore_service_director/api/v0/openapi.yaml) | +| |[services/director/Dockerfile](./services/director/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/director)](https://hub.docker.com/r/itisfoundation/director/tags) | +| **DIRECTOR-V2**|| | +| |[services/director-v2/openapi.json](./services/director-v2/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/director-v2/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/director-v2/openapi.json) | +| |[services/director-v2/Dockerfile](./services/director-v2/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/director-v2)](https://hub.docker.com/r/itisfoundation/director-v2/tags) | +| **DOCKER-API-PROXY**|| | +| |[services/docker-api-proxy/Dockerfile](./services/docker-api-proxy/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/docker-api-proxy)](https://hub.docker.com/r/itisfoundation/docker-api-proxy/tags) | +| **DYNAMIC-SCHEDULER**|| | +| |[services/dynamic-scheduler/openapi.json](./services/dynamic-scheduler/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/dynamic-scheduler/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/dynamic-scheduler/openapi.json) | +| |[services/dynamic-scheduler/Dockerfile](./services/dynamic-scheduler/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/dynamic-scheduler)](https://hub.docker.com/r/itisfoundation/dynamic-scheduler/tags) | +| **DYNAMIC-SIDECAR**|| | +| |[services/dynamic-sidecar/openapi.json](./services/dynamic-sidecar/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/dynamic-sidecar/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/dynamic-sidecar/openapi.json) | +| |[services/dynamic-sidecar/Dockerfile](./services/dynamic-sidecar/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/dynamic-sidecar)](https://hub.docker.com/r/itisfoundation/dynamic-sidecar/tags) | +| **EFS-GUARDIAN**|| | +| |[services/efs-guardian/Dockerfile](./services/efs-guardian/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/efs-guardian)](https://hub.docker.com/r/itisfoundation/efs-guardian/tags) | +| **INVITATIONS**|| | +| |[services/invitations/openapi.json](./services/invitations/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/invitations/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/invitations/openapi.json) | +| |[services/invitations/Dockerfile](./services/invitations/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/invitations)](https://hub.docker.com/r/itisfoundation/invitations/tags) | +| **MIGRATION**|| | +| |[services/migration/Dockerfile](./services/migration/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/migration)](https://hub.docker.com/r/itisfoundation/migration/tags) | +| **NOTIFICATIONS**|| | +| |[services/notifications/openapi.json](./services/notifications/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/notifications/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/notifications/openapi.json) | +| |[services/notifications/Dockerfile](./services/notifications/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/notifications)](https://hub.docker.com/r/itisfoundation/notifications/tags) | +| **PAYMENTS**|| | +| |[services/payments/openapi.json](./services/payments/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/payments/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/payments/openapi.json) | +| |[services/payments/Dockerfile](./services/payments/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/payments)](https://hub.docker.com/r/itisfoundation/payments/tags) | +| **RESOURCE-USAGE-TRACKER**|| | +| |[services/resource-usage-tracker/openapi.json](./services/resource-usage-tracker/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/resource-usage-tracker/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/resource-usage-tracker/openapi.json) | +| |[services/resource-usage-tracker/Dockerfile](./services/resource-usage-tracker/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/resource-usage-tracker)](https://hub.docker.com/r/itisfoundation/resource-usage-tracker/tags) | +| **STATIC-WEBSERVER**|| | +| |[services/static-webserver/client/tools/qooxdoo-kit/builder/Dockerfile](./services/static-webserver/client/tools/qooxdoo-kit/builder/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/static-webserver)](https://hub.docker.com/r/itisfoundation/static-webserver/tags) | +| |[services/static-webserver/client/qx_packages/ITISFoundation_qx-iconfont-material_v0_1_7/Dockerfile](./services/static-webserver/client/qx_packages/ITISFoundation_qx-iconfont-material_v0_1_7/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/static-webserver)](https://hub.docker.com/r/itisfoundation/static-webserver/tags) | +| |[services/static-webserver/client/qx_packages/ITISFoundation_qx-iconfont-fontawesome5_v0_2_2/Dockerfile](./services/static-webserver/client/qx_packages/ITISFoundation_qx-iconfont-fontawesome5_v0_2_2/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/static-webserver)](https://hub.docker.com/r/itisfoundation/static-webserver/tags) | +| |[services/static-webserver/client/qx_packages/ITISFoundation_qx-osparc-theme_v0_5_6/Dockerfile](./services/static-webserver/client/qx_packages/ITISFoundation_qx-osparc-theme_v0_5_6/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/static-webserver)](https://hub.docker.com/r/itisfoundation/static-webserver/tags) | +| **STORAGE**|| | +| |[services/storage/openapi.json](./services/storage/openapi.json)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/storage/openapi.json) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/storage/openapi.json) | +| |[services/storage/Dockerfile](./services/storage/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/storage)](https://hub.docker.com/r/itisfoundation/storage/tags) | +| **WEB**|| | +| |[services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml](./services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml)|[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml) [![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/web/server/src/simcore_service_webserver/api/v0/openapi.yaml) | +| |[services/web/Dockerfile](./services/web/Dockerfile)|[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/webserver)](https://hub.docker.com/r/itisfoundation/webserver/tags) | +| || | diff --git a/TOX.md b/TOX.md new file mode 100644 index 00000000000..3140622a09a --- /dev/null +++ b/TOX.md @@ -0,0 +1,114 @@ +# Tox Configuration for Multi-Environment Testing + +This project now includes a `tox.ini` configuration file that enables multi-environment testing across different Python versions and testing tools. + +## Available Environments + +### Python Testing Environments +- **py310**: Run tests using Python 3.10 +- **py311**: Run tests using Python 3.11 (current default) + +### Code Quality Environments +- **lint**: Code linting with ruff and pylint +- **type-check**: Type checking with mypy +- **format-check**: Code formatting check with ruff +- **format**: Auto-format code with ruff + +### Utility Environments +- **coverage**: Generate coverage reports +- **clean**: Clean up generated files +- **docs**: Build documentation (optional) +- **security**: Run security scans (optional) + +## Usage + +### Prerequisites +```bash +# Install tox +pip install tox + +# Or using the project's development environment +make devenv +source .venv/bin/activate +pip install tox +``` + +### Running Tests + +#### Test all environments +```bash +tox +``` + +#### Test specific Python version +```bash +tox -e py310 # Test with Python 3.10 +tox -e py311 # Test with Python 3.11 +``` + +#### Run code quality checks +```bash +tox -e lint # Run linting +tox -e type-check # Run type checking +tox -e format-check # Check code formatting +``` + +#### Format code +```bash +tox -e format # Auto-format code +``` + +#### Generate coverage report +```bash +tox -e coverage # Generate coverage report +``` + +#### List all available environments +```bash +tox -l +``` + +### Running Tests with Additional Arguments + +You can pass additional pytest arguments: +```bash +tox -e py310 -- tests/specific_test.py -v +tox -e py311 -- --maxfail=1 +``` + +## Python 3.10 Support + +The configuration includes explicit support for Python 3.10 environment testing: + +- **py310** environment uses `python3.10` interpreter +- All testing tools (pytest, coverage, etc.) are compatible with Python 3.10 +- Mypy configuration already targets Python 3.10 (see `mypy.ini`) +- Ruff configuration targets Python 3.11 but is backward compatible + +## Integration with CI/CD + +This tox configuration can be easily integrated into CI/CD pipelines: + +```yaml +# Example GitHub Actions usage +- name: Test with tox + run: | + pip install tox + tox -e py310,py311,lint,type-check +``` + +## Project Structure Compatibility + +The tox configuration is designed to work with the osparc-simcore project structure: + +- Tests are located in the `tests/` directory +- Source code is in `services/` and `packages/` directories +- Uses existing configuration files (`.ruff.toml`, `mypy.ini`, `.pylintrc`) +- Integrates with the project's requirement files + +## Notes + +- The configuration uses `skip_missing_interpreters = true` so missing Python interpreters won't fail the entire test run +- Each environment runs in its own virtual environment +- Coverage files are separated by environment name +- The configuration excludes generated models and director service from linting as per project conventions \ No newline at end of file diff --git a/api/specs/common/schemas/Makefile b/api/specs/common/schemas/Makefile deleted file mode 100644 index cb1785ccd95..00000000000 --- a/api/specs/common/schemas/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -.PHONY: all build clean help - -objects = $(wildcard *.json) -outputs := $(objects:.json=-converted.yaml) - -TOOL_IMAGE_NAME := json-schema-to-openapi-schema - - -# target: all – convert all json-schemas to open-api compatible and formats them in yaml -all: check $(outputs) - -%-converted.yaml:%.json - docker run \ - -v ${CURDIR}:/input \ - -v ${CURDIR}:/output \ - $(TOOL_IMAGE_NAME) - -# target: check – Checks whether tool is installed -check: - @docker history $(TOOL_IMAGE_NAME) 2>/dev/null \ - && echo "$(TOOL_IMAGE_NAME) is in place" \ - || echo "MISSING $(TOOL_IMAGE_NAME) tool. SEE "scripts/$(TOOL_IMAGE_NAME)" to build it" - -# target: clean – Cleans all *-converted.yaml -clean: check - - rm $(outputs) - -# target: help – Display all callable targets -help: - @echo - @egrep "^\s*#\s*target\s*:\s*" [Mm]akefile \ - | sed -r "s/^\s*#\s*target\s*:\s*//g" - @echo diff --git a/api/specs/common/schemas/error.yaml b/api/specs/common/schemas/error.yaml deleted file mode 100644 index 563484844a9..00000000000 --- a/api/specs/common/schemas/error.yaml +++ /dev/null @@ -1,35 +0,0 @@ -components: - schemas: - ErrorEnveloped: - type: object - required: - - error - properties: - data: - nullable: true - default: null - error: - $ref: '#/components/schemas/ErrorType' - - ErrorType: - type: object - required: - - status - - message - properties: - message: - description: Error message - type: string - example: Unexpected error - errors: - type: array - items: - properties: - code: - type: string - description: Server Exception - example: ServiceUUIDNotFoundError - status: - description: Error code - type: integer - example: 404 diff --git a/api/specs/common/schemas/health_check.yaml b/api/specs/common/schemas/health_check.yaml deleted file mode 100644 index c65389d9794..00000000000 --- a/api/specs/common/schemas/health_check.yaml +++ /dev/null @@ -1,28 +0,0 @@ -components: - schemas: - HealthCheckEnveloped: - type: object - required: - - data - properties: - data: - $ref: '#/components/schemas/HealthCheckType' - error: - nullable: true - default: null - - HealthCheckType: - type: object - properties: - name: - type: string - example: director service - status: - type: string - example: SERVICE_RUNNING - api_version: - type: string - example: 1.0.0-dev - version: - type: string - example: 1dfcfdc \ No newline at end of file diff --git a/api/specs/common/schemas/node-meta-v0.0.1-converted.yaml b/api/specs/common/schemas/node-meta-v0.0.1-converted.yaml deleted file mode 100644 index 6a506cf7107..00000000000 --- a/api/specs/common/schemas/node-meta-v0.0.1-converted.yaml +++ /dev/null @@ -1,348 +0,0 @@ -title: simcore node -description: Description of a simcore node 'class' with input and output -type: object -additionalProperties: false -required: - - key - - version - - type - - name - - description - - authors - - contact - - inputs - - outputs -properties: - key: - type: string - description: distinctive name for the node based on the docker registry path - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - example: simcore/services/comp/itis/sleeper - integration-version: - type: string - description: integration version number - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: 1.0.0 - version: - type: string - description: service version number - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: 1.0.0 - type: - type: string - description: service type - enum: - - frontend - - computational - - dynamic - example: computational - name: - type: string - description: short, human readable name for the node - example: Fast Counter - thumbnail: - type: string - description: url to the thumbnail - example: >- - https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png - badges: - type: array - items: - type: object - required: - - name - - image - - url - additionalProperties: false - properties: - name: - type: string - description: Name of the subject - example: travis-ci - image: - type: string - description: Url to the shield - example: >- - https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master - url: - type: string - description: Link to status - example: >- - https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: - build, test and pushing images' - description: - type: string - description: human readable description of the purpose of the node - example: Our best node type - authors: - type: array - minItems: 1 - items: - type: object - required: - - name - - email - additionalProperties: false - properties: - name: - type: string - description: Name of the author - example: Sun Bak - email: - description: Email address - type: string - format: email - example: sun@sense.eight - affiliation: - description: Affiliation of the author - type: string - example: Sense8 - contact: - type: string - format: email - description: email to correspond to the authors about the node - example: lab@net.flix - inputs: - type: object - description: definition of the inputs of this node - x-patternProperties: - ^[-_a-zA-Z0-9]+$: - type: object - description: all the input configurable for this service - additionalProperties: false - required: - - displayOrder - - label - - description - - type - properties: - displayOrder: - description: >- - DEPRECATED: new display order is taken from the item position. - This property will be removed. - deprecated: true - type: number - label: - type: string - description: short name for the property - example: - - Age - description: - type: string - description: description of the property - example: - - Age in seconds since 1970 - type: - type: string - pattern: >- - ^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ - description: >- - data type expected on this input glob matching for data type is - allowed - example: - - number - - boolean - - data:*/* - - data:text/* - - data:[image/jpeg,image/png] - - data:application/json - - >- - data:application/json;schema=https://my-schema/not/really/schema.json - - data:application/vnd.ms-excel - - data:text/plain - - data:application/hdf5 - - data:application/edu.ucdavis@ceclancy.xyz - contentSchema: - title: Content Schema - description: >- - jsonschema of the content at this input/output. Required when - type='ref_contentSchema' - type: object - fileToKeyMap: - description: Place the data associated with the named keys in files - type: object - patternProperties: - .+: - type: string - pattern: ^[-_a-zA-Z0-9]+$ - example: - - dir/input1.txt: key_1 - dir33/input2.txt: key2 - defaultValue: - description: initial value for this input - type: - - string - - number - - integer - - boolean - example: - - Dog - - true - unit: - title: Unit - description: Units of this input value, if a physical quantity - type: string - widget: - description: >- - custom widget to use instead of the default one determined from - the data-type - anyOf: - - type: object - additionalProperties: false - required: - - type - properties: - type: - description: type of the property - type: string - enum: - - TextArea - minHeight: - description: minimum Height of the textarea - type: integer - minimum: 1 - - type: object - additionalProperties: false - required: - - type - - structure - properties: - type: - description: type of the property - type: string - enum: - - SelectBox - structure: - type: array - minItems: 1 - items: - type: object - additionalProperties: false - required: - - key - - label - properties: - key: - type: - - string - - boolean - - number - label: - type: string - example: - - - key: rat - label: The Rat - - key: dog - label: Bello the Dog - additionalProperties: true - outputs: - type: object - description: definition of the outputs of this node - x-patternProperties: - ^[-_a-zA-Z0-9]+$: - type: object - description: all the output produced by this node - additionalProperties: false - required: - - displayOrder - - label - - description - - type - properties: - displayOrder: - type: number - description: use this to numerically sort the properties for display - example: - - 1 - - -0.2 - label: - type: string - description: short name for the property - example: - - Age - description: - type: string - description: description of the property - example: - - Age in seconds since 1970 - type: - type: string - pattern: >- - ^(number|integer|boolean|string|ref_contentSchema|data:[^/\s,]+/[^/\s,]+)$ - description: data type expected on this output - example: - - number - - integer - - boolean - - string - - data:application/json - - 'data:application/vnd.ms-excel ' - - data:text/plain - - data:application/hdf5 - contentSchema: - title: Content Schema - description: >- - jsonschema of this input/output. Required when - type='ref_contentSchema' - type: object - fileToKeyMap: - description: >- - Place the data stored in the named files and store it in the - locations pointed to by the respective output key. - type: object - patternProperties: - .+: - type: string - pattern: ^[-_a-zA-Z0-9]+$ - example: - - dir/input1.txt: key_1 - dir33/input2.txt: key2 - unit: - title: Unit - description: Units of the output value, if a physical quantity - type: string - additionalProperties: true - boot-options: - title: Boot Options - description: >- - Service defined boot options. These get injected in the service as env - variables. - type: object - x-patternProperties: - ^[_a-zA-Z0-9]+$: - title: BootOptionMode - type: object - properties: - label: - title: Label - type: string - description: - title: Description - type: string - default: - title: Default - type: string - items: - title: Items - type: object - additionalProperties: - title: BootOptionItem - type: object - properties: - label: - title: Label - type: string - description: - title: Description - type: string - required: - - label - - description - required: - - label - - description - - default - - items - additionalProperties: true diff --git a/api/specs/common/schemas/node-meta-v0.0.1.json b/api/specs/common/schemas/node-meta-v0.0.1.json deleted file mode 100644 index a47f6c2ad3b..00000000000 --- a/api/specs/common/schemas/node-meta-v0.0.1.json +++ /dev/null @@ -1,477 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "simcore node", - "description": "Description of a simcore node 'class' with input and output", - "type": "object", - "additionalProperties": false, - "required": [ - "key", - "version", - "type", - "name", - "description", - "authors", - "contact", - "inputs", - "outputs" - ], - "properties": { - "key": { - "type": "string", - "description": "distinctive name for the node based on the docker registry path", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "examples": [ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer" - ] - }, - "integration-version": { - "type": "string", - "description": "integration version number", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "examples": [ - "1.0.0" - ] - }, - "version": { - "type": "string", - "description": "service version number", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "examples": [ - "1.0.0", - "0.0.1" - ] - }, - "type": { - "type": "string", - "description": "service type", - "enum": [ - "frontend", - "computational", - "dynamic" - ], - "examples": [ - "computational" - ] - }, - "name": { - "type": "string", - "description": "short, human readable name for the node", - "examples": [ - "Fast Counter" - ] - }, - "thumbnail": { - "type": "string", - "description": "url to the thumbnail", - "examples": [ - "https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png" - ] - }, - "badges": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "image", - "url" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "Name of the subject", - "examples": [ - "travis-ci", - "coverals.io", - "github.io" - ] - }, - "image": { - "type": "string", - "description": "Url to the shield", - "examples": [ - "https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master", - "https://coveralls.io/repos/github/ITISFoundation/osparc-simcore/badge.svg?branch=master", - "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation" - ] - }, - "url": { - "type": "string", - "description": "Link to status", - "examples": [ - "https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: build, test and pushing images'", - "https://coveralls.io/github/ITISFoundation/osparc-simcore?branch=master 'Test coverage'", - "https://itisfoundation.github.io/" - ] - } - } - } - }, - "description": { - "type": "string", - "description": "human readable description of the purpose of the node", - "examples": [ - "Our best node type", - "The mother of all nodes, makes your numbers shine!" - ] - }, - "authors": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "required": [ - "name", - "email" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "Name of the author", - "examples": [ - "Sun Bak", - "Delenn" - ] - }, - "email": { - "description": "Email address", - "type": "string", - "format": "email", - "examples": [ - "sun@sense.eight", - "deleen@minbar.bab" - ] - }, - "affiliation": { - "description": "Affiliation of the author", - "type": "string", - "examples": [ - "Sense8", - "Babylon 5" - ] - } - } - } - }, - "contact": { - "type": "string", - "format": "email", - "description": "email to correspond to the authors about the node", - "examples": [ - "lab@net.flix" - ] - }, - "inputs": { - "type": "object", - "description": "definition of the inputs of this node", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "type": "object", - "description": "all the input configurable for this service", - "additionalProperties": false, - "required": [ - "displayOrder", - "label", - "description", - "type" - ], - "properties": { - "displayOrder": { - "description": "DEPRECATED: new display order is taken from the item position. This property will be removed.", - "deprecated": true, - "type": "number" - }, - "label": { - "type": "string", - "description": "short name for the property", - "examples": [ - "Age" - ] - }, - "description": { - "type": "string", - "description": "description of the property", - "examples": [ - "Age in seconds since 1970" - ] - }, - "type": { - "type": "string", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", - "description": "data type expected on this input glob matching for data type is allowed", - "examples": [ - "number", - "boolean", - "data:*/*", - "data:text/*", - "data:[image/jpeg,image/png]", - "data:application/json", - "data:application/json;schema=https://my-schema/not/really/schema.json", - "data:application/vnd.ms-excel", - "data:text/plain", - "data:application/hdf5", - "data:application/edu.ucdavis@ceclancy.xyz" - ] - }, - "contentSchema": { - "title": "Content Schema", - "description": "jsonschema of the content at this input/output. Required when type='ref_contentSchema'", - "type": "object" - }, - "fileToKeyMap": { - "description": "Place the data associated with the named keys in files", - "type": "object", - "patternProperties": { - ".+": { - "type": "string", - "pattern": "^[-_a-zA-Z0-9]+$" - } - }, - "examples": [ - { - "dir/input1.txt": "key_1", - "dir33/input2.txt": "key2" - } - ] - }, - "defaultValue": { - "description": "initial value for this input", - "type": [ - "string", - "number", - "integer", - "boolean" - ], - "examples": [ - "Dog", - true - ] - }, - "unit": { - "title": "Unit", - "description": "Units of this input value, if a physical quantity", - "type": "string" - }, - "widget": { - "description": "custom widget to use instead of the default one determined from the data-type", - "anyOf": [ - { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "description": "type of the property", - "type": "string", - "enum": [ - "TextArea" - ] - }, - "minHeight": { - "description": "minimum Height of the textarea", - "type": "integer", - "minimum": 1 - } - } - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "structure" - ], - "properties": { - "type": { - "description": "type of the property", - "type": "string", - "enum": [ - "SelectBox" - ] - }, - "structure": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "additionalProperties": false, - "required": [ - "key", - "label" - ], - "properties": { - "key": { - "type": [ - "string", - "boolean", - "number" - ] - }, - "label": { - "type": "string" - } - }, - "examples": [ - [ - { - "key": "rat", - "label": "The Rat" - }, - { - "key": "dog", - "label": "Bello the Dog" - } - ] - ] - } - } - } - } - ] - } - } - } - } - }, - "outputs": { - "type": "object", - "description": "definition of the outputs of this node", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "type": "object", - "description": "all the output produced by this node", - "additionalProperties": false, - "required": [ - "displayOrder", - "label", - "description", - "type" - ], - "properties": { - "displayOrder": { - "type": "number", - "description": "use this to numerically sort the properties for display", - "examples": [ - 1, - -0.2 - ] - }, - "label": { - "type": "string", - "description": "short name for the property", - "examples": [ - "Age" - ] - }, - "description": { - "type": "string", - "description": "description of the property", - "examples": [ - "Age in seconds since 1970" - ] - }, - "type": { - "type": "string", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:[^/\\s,]+/[^/\\s,]+)$", - "description": "data type expected on this output", - "examples": [ - "number", - "integer", - "boolean", - "string", - "data:application/json", - "data:application/vnd.ms-excel ", - "data:text/plain", - "data:application/hdf5" - ] - }, - "contentSchema": { - "title": "Content Schema", - "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", - "type": "object" - }, - "fileToKeyMap": { - "description": "Place the data stored in the named files and store it in the locations pointed to by the respective output key.", - "type": "object", - "patternProperties": { - ".+": { - "type": "string", - "pattern": "^[-_a-zA-Z0-9]+$" - } - }, - "examples": [ - { - "dir/input1.txt": "key_1", - "dir33/input2.txt": "key2" - } - ] - }, - "unit": { - "title": "Unit", - "description": "Units of the output value, if a physical quantity", - "type": "string" - } - } - } - } - }, - "boot-options": { - "title": "Boot Options", - "description": "Service defined boot options. These get injected in the service as env variables.", - "type": "object", - "patternProperties": { - "^[_a-zA-Z0-9]+$": { - "title": "BootOptionMode", - "type": "object", - "properties": { - "label": { - "title": "Label", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - }, - "default": { - "title": "Default", - "type": "string" - }, - "items": { - "title": "Items", - "type": "object", - "additionalProperties": { - "title": "BootOptionItem", - "type": "object", - "properties": { - "label": { - "title": "Label", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - } - }, - "required": [ - "label", - "description" - ] - } - } - }, - "required": [ - "label", - "description", - "default", - "items" - ] - } - } - } - } -} diff --git a/api/specs/common/schemas/node-output-list-api-v0.0.1.yaml b/api/specs/common/schemas/node-output-list-api-v0.0.1.yaml deleted file mode 100644 index 1bc8f7661ef..00000000000 --- a/api/specs/common/schemas/node-output-list-api-v0.0.1.yaml +++ /dev/null @@ -1,57 +0,0 @@ -$schema: http://json-schema.org/draft-07/schema# -$id: https://simcore.io/api/specs/common/schemas/node-output-list-api-v0.0.1.yaml - -title: node output list api -description: nodes using the list representation for the output - must be able to handle the following requests -type: object -required: - # the validator does not appreciate when required is missing here... and - # sadly does not throw any meaningful error about it... so for now I put this... - - getItemList - - getItem -properties: - getItemList: - description: all the items in the list - type: object - properties: - request: - description: oa3 json schema description of the request structure - type: object - required: - - start - - count - properties: - start: - type: integer - count: - type: integer - filter: - type: string - orderBy: - type: string - response: - type: array - items: - type: object - properties: - key: - type: string - label: - type: string - thumbnail: - description: data url - https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs - type: string - getItem: - description: get details about an item in the list - type: object - properties: - request: - type: object - required: - - key - properties: - key: - type: string - response: - type: object diff --git a/api/specs/common/schemas/node-output-tree-api-v0.0.1.yaml b/api/specs/common/schemas/node-output-tree-api-v0.0.1.yaml deleted file mode 100644 index 61525d2c120..00000000000 --- a/api/specs/common/schemas/node-output-tree-api-v0.0.1.yaml +++ /dev/null @@ -1,54 +0,0 @@ -$schema: http://json-schema.org/draft-07/schema# -$id: https://simcore.io/api/specs/common/schemas/node-output-tree-api-v0.0.1.yaml - -title: node output tree api -description: nodes using the tree representation for the output - must be able to handle the following requests -type: object -required: - # the validator does not appreciate when required is missing here... and - # sadly does not throw any meaningful error about it... so for now I put this... - - getItemList - - getItem -properties: - getItemList: - description: a list of items making up one level of the tree - type: object - properties: - request: - # FAILS openapi. DO NOT ADD: summary: oa3 json schema description of the request structure. - description: | - oa3 json schema description of the request structure. - If no `rootKey` is specified, the first level of the tree is returned. - The `filter` will return any items matching the filter string as well as any - folder items containing matching items further down the tree. - type: object - properties: - rootKey: - type: string - filter: - type: string - response: - type: array - items: - type: object - properties: - key: - type: string - label: - type: string - folder: - type: boolean - getItem: - description: get details about an item in the list - type: object - properties: - request: - type: object - required: - - key - properties: - key: - type: string - response: - type: object diff --git a/api/specs/common/schemas/project-v0.0.1-converted.yaml b/api/specs/common/schemas/project-v0.0.1-converted.yaml deleted file mode 100644 index 87da6e07177..00000000000 --- a/api/specs/common/schemas/project-v0.0.1-converted.yaml +++ /dev/null @@ -1,558 +0,0 @@ -title: simcore project -description: Description of a simcore project -type: object -additionalProperties: false -required: - - uuid - - name - - description - - prjOwner - - accessRights - - creationDate - - lastChangeDate - - thumbnail - - workbench -properties: - uuid: - type: string - format: uuid - description: project unique identifier - example: 07640335-a91f-468c-ab69-a374fa82078d - name: - type: string - description: project name - example: Temporal Distortion Simulator - description: - type: string - description: longer one-line description about the project - example: Dabbling in temporal transitions ... - prjOwner: - type: string - format: email - description: user email - accessRights: - type: object - description: >- - object containing the GroupID as key and read/write/execution permissions - as value - x-patternProperties: - ^\S+$: - type: object - description: the group id - additionalProperties: false - required: - - read - - write - - delete - properties: - read: - type: boolean - description: gives read access - write: - type: boolean - description: gives write access - delete: - type: boolean - description: gives deletion rights - additionalProperties: true - creationDate: - type: string - description: project creation date - pattern: >- - \d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\d)T(2[0-3]|1\d|0?[0-9])(:(\d|[0-5]\d)){2}(\.\d{3})?Z - example: '2018-07-01T11:13:43Z' - lastChangeDate: - type: string - description: last save date - pattern: >- - \d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\d)T(2[0-3]|1\d|0?[0-9])(:(\d|[0-5]\d)){2}(\.\d{3})?Z - example: '2018-07-01T11:13:43Z' - thumbnail: - type: string - minLength: 0 - maxLength: 2083 - format: uri - description: url of the latest screenshot of the project - example: https://placeimg.com/171/96/tech/grayscale/?0.jpg - workbench: - type: object - x-patternProperties: - ^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$: - type: object - additionalProperties: false - required: - - key - - version - - label - properties: - key: - type: string - description: distinctive name for the node based on the docker registry path - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - example: - - simcore/services/comp/sleeper - - simcore/services/dynamic/3dviewer - - simcore/services/frontend/file-picker - version: - type: string - description: semantic version number of the node - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: - - 1.0.0 - - 0.0.1 - label: - type: string - description: The short name of the node - example: - - JupyterLab - progress: - type: number - maximum: 100 - minimum: 0 - description: the node progress value - thumbnail: - minLength: 0 - maxLength: 2083 - format: uri - type: string - description: url of the latest screenshot of the node - example: - - https://placeimg.com/171/96/tech/grayscale/?0.jpg - runHash: - description: >- - the hex digest of the resolved inputs +outputs hash at the time - when the last outputs were generated - type: - - string - - 'null' - example: - - a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2 - inputs: - type: object - description: values of input properties - patternProperties: - ^[-_a-zA-Z0-9]+$: - oneOf: - - type: - - integer - - boolean - - string - - number - - 'null' - - type: object - additionalProperties: false - required: - - nodeUuid - - output - properties: - nodeUuid: - type: string - format: uuid - output: - type: string - pattern: ^[-_a-zA-Z0-9]+$ - - type: object - additionalProperties: false - required: - - store - - path - properties: - store: - type: - - string - - integer - dataset: - type: string - path: - type: string - label: - type: string - eTag: - type: string - - type: object - additionalProperties: false - required: - - downloadLink - properties: - downloadLink: - minLength: 1 - maxLength: 65536 - type: string - format: uri - label: - type: string - - type: array - items: {} - inputsUnits: - type: object - description: values of input unit - patternProperties: - ^[-_a-zA-Z0-9]+$: - type: string - example: - - kilo-meter - - milli-second - - micro-gram - - kelvin - inputAccess: - description: map with key - access level pairs - type: object - patternProperties: - ^[-_a-zA-Z0-9]+$: - type: string - enum: - - Invisible - - ReadOnly - - ReadAndWrite - default: ReadAndWrite - example: - - ReadOnly - inputNodes: - type: array - items: - type: string - format: uuid - description: node IDs of where the node is connected to - example: - - nodeUuid1 - - nodeUuid2 - outputs: - default: {} - type: object - patternProperties: - ^[-_a-zA-Z0-9]+$: - oneOf: - - type: - - integer - - boolean - - string - - number - - 'null' - - type: object - additionalProperties: false - required: - - store - - path - properties: - store: - type: - - string - - integer - dataset: - type: string - path: - type: string - label: - type: string - eTag: - type: string - - type: object - additionalProperties: false - required: - - downloadLink - properties: - downloadLink: - minLength: 1 - maxLength: 65536 - type: string - format: uri - label: - type: string - - type: array - items: {} - outputNode: - type: boolean - deprecated: true - outputNodes: - type: array - items: - type: string - format: uuid - description: Used in group-nodes. Node IDs of those connected to the output - example: - - nodeUuid1 - - nodeUuid2 - parent: - type: - - 'null' - - string - format: uuid - description: Parent's (group-nodes') node ID s. - example: - - nodeUuid1 - - nodeUuid2 - position: - type: object - additionalProperties: false - required: - - x - - 'y' - properties: - x: - type: integer - description: The x position - example: - - '12' - 'y': - type: integer - description: The y position - example: - - '15' - deprecated: true - state: - title: NodeState - type: object - properties: - modified: - title: Modified - description: true if the node's outputs need to be re-computed - default: true - type: boolean - dependencies: - title: Dependencies - description: >- - contains the node inputs dependencies if they need to be - computed first - type: array - uniqueItems: true - items: - type: string - format: uuid - currentStatus: - description: the node's current state - default: NOT_STARTED - example: - - RUNNING - - FAILED - enum: - - UNKNOWN - - PUBLISHED - - NOT_STARTED - - PENDING - - STARTED - - RETRY - - SUCCESS - - FAILED - - ABORTED - type: string - additionalProperties: false - bootOptions: - title: Boot Options - description: >- - Some services provide alternative parameters to be injected at - boot time. The user selection should be stored here, and it will - overwrite the services's defaults. - type: object - patternProperties: - '[a-zA-Z][a-azA-Z0-9_]*': - type: string - additionalProperties: true - ui: - type: object - additionalProperties: true - properties: - workbench: - type: object - x-patternProperties: - ^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$: - type: object - additionalProperties: false - required: - - position - properties: - position: - type: object - additionalProperties: false - required: - - x - - 'y' - properties: - x: - type: integer - description: The x position - example: - - '12' - 'y': - type: integer - description: The y position - example: - - '15' - marker: - type: object - additionalProperties: false - required: - - color - properties: - color: - type: string - description: Marker's color - example: - - '#FF0000' - - '#0000FF' - additionalProperties: true - slideshow: - type: object - x-patternProperties: - ^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$: - type: object - additionalProperties: false - required: - - position - properties: - position: - type: integer - description: Slide's position - example: - - 0 - - 2 - instructions: - type: - - string - - 'null' - description: Instructions about what to do in this step - example: - - This is a **sleeper** - - Please, select the config file defined [in this link](asdf) - additionalProperties: true - currentNodeId: - type: string - format: uuid - annotations: - type: object - x-patternProperties: - ^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$: - type: object - additionalProperties: false - required: - - type - - color - - attributes - properties: - type: - type: string - description: Annotation type - example: - - rect - - text - color: - type: string - description: Annotation's color - example: - - '#FF0000' - - '#0000FF' - attributes: - type: object - description: svg attributes - additionalProperties: true - tags: - type: array - items: - type: integer - classifiers: - type: array - description: Contains the reference to the project classifiers - items: - type: string - example: some:id:to:a:classifier - dev: - type: object - description: object used for development purposes only - state: - title: State - description: Project state - anyOf: - - nullable: true - - title: ProjectState - type: object - additionalProperties: false - properties: - locked: - title: Locked - description: The project lock state - allOf: - - title: ProjectLocked - type: object - additionalProperties: false - properties: - value: - title: Value - description: True if the project is locked - type: boolean - owner: - title: Owner - description: If locked, the user that owns the lock - allOf: - - title: Owner - type: object - additionalProperties: false - properties: - user_id: - title: User Id - type: integer - description: >- - Owner's identifier when registered in the user's - database table - example: - - 2 - first_name: - title: First Name - description: Owner first name - example: - - John - type: string - last_name: - title: Last Name - description: Owner last name - example: - - Smith - type: string - required: - - user_id - - first_name - - last_name - status: - title: Status - description: The status of the project - enum: - - CLOSED - - CLOSING - - CLONING - - OPENING - - EXPORTING - - OPENED - type: string - required: - - value - - status - state: - title: State - description: The project running state - allOf: - - title: ProjectRunningState - type: object - additionalProperties: false - properties: - value: - title: RunningState - description: An enumeration. - enum: - - UNKNOWN - - NOT_STARTED - - PUBLISHED - - PENDING - - STARTED - - RETRY - - SUCCESS - - FAILED - - ABORTED - type: string - required: - - value - required: - - locked - - state - quality: - type: object - title: Quality - description: Object containing Quality Assessment related data diff --git a/api/specs/common/schemas/project-v0.0.1.json b/api/specs/common/schemas/project-v0.0.1.json deleted file mode 100644 index 8c178845ccb..00000000000 --- a/api/specs/common/schemas/project-v0.0.1.json +++ /dev/null @@ -1,768 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://simcore.io/api/specs/webserver/v0/components/schemas/project-v0.0.1.json", - "title": "simcore project", - "description": "Description of a simcore project", - "type": "object", - "additionalProperties": false, - "required": [ - "uuid", - "name", - "description", - "prjOwner", - "accessRights", - "creationDate", - "lastChangeDate", - "thumbnail", - "workbench" - ], - "properties": { - "uuid": { - "type": "string", - "format": "uuid", - "description": "project unique identifier", - "examples": [ - "07640335-a91f-468c-ab69-a374fa82078d", - "9bcf8feb-c1b1-41b6-b201-639cd6ccdba8" - ] - }, - "name": { - "type": "string", - "description": "project name", - "examples": [ - "Temporal Distortion Simulator" - ] - }, - "description": { - "type": "string", - "description": "longer one-line description about the project", - "examples": [ - "Dabbling in temporal transitions ..." - ] - }, - "prjOwner": { - "type": "string", - "format": "email", - "description": "user email" - }, - "accessRights": { - "type": "object", - "description": "object containing the GroupID as key and read/write/execution permissions as value", - "patternProperties": { - "^\\S+$": { - "type": "object", - "description": "the group id", - "additionalProperties": false, - "required": [ - "read", - "write", - "delete" - ], - "properties": { - "read": { - "type": "boolean", - "description": "gives read access" - }, - "write": { - "type": "boolean", - "description": "gives write access" - }, - "delete": { - "type": "boolean", - "description": "gives deletion rights" - } - } - } - } - }, - "creationDate": { - "type": "string", - "description": "project creation date", - "pattern": "\\d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\\d)T(2[0-3]|1\\d|0?[0-9])(:(\\d|[0-5]\\d)){2}(\\.\\d{3})?Z", - "examples": [ - "2018-07-01T11:13:43Z" - ] - }, - "lastChangeDate": { - "type": "string", - "description": "last save date", - "pattern": "\\d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\\d)T(2[0-3]|1\\d|0?[0-9])(:(\\d|[0-5]\\d)){2}(\\.\\d{3})?Z", - "examples": [ - "2018-07-01T11:13:43Z" - ] - }, - "thumbnail": { - "type": "string", - "minLength": 0, - "maxLength": 2083, - "format": "uri", - "description": "url of the latest screenshot of the project", - "examples": [ - "https://placeimg.com/171/96/tech/grayscale/?0.jpg" - ] - }, - "workbench": { - "type": "object", - "patternProperties": { - "^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$": { - "type": "object", - "additionalProperties": false, - "required": [ - "key", - "version", - "label" - ], - "properties": { - "key": { - "type": "string", - "description": "distinctive name for the node based on the docker registry path", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "examples": [ - "simcore/services/comp/sleeper", - "simcore/services/dynamic/3dviewer", - "simcore/services/frontend/file-picker" - ] - }, - "version": { - "type": "string", - "description": "semantic version number of the node", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "examples": [ - "1.0.0", - "0.0.1" - ] - }, - "label": { - "type": "string", - "description": "The short name of the node", - "example": [ - "JupyterLab" - ] - }, - "progress": { - "type": "number", - "maximum": 100, - "minimum": 0, - "description": "the node progress value" - }, - "thumbnail": { - "minLength": 0, - "maxLength": 2083, - "format": "uri", - "type": "string", - "description": "url of the latest screenshot of the node", - "examples": [ - "https://placeimg.com/171/96/tech/grayscale/?0.jpg" - ] - }, - "runHash": { - "description": "the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated", - "type": [ - "string", - "null" - ], - "examples": [ - "a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2" - ] - }, - "inputs": { - "type": "object", - "description": "values of input properties", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "oneOf": [ - { - "type": [ - "integer", - "boolean", - "string", - "number", - "null" - ] - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "nodeUuid", - "output" - ], - "properties": { - "nodeUuid": { - "type": "string", - "format": "uuid" - }, - "output": { - "type": "string", - "pattern": "^[-_a-zA-Z0-9]+$" - } - } - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "store", - "path" - ], - "properties": { - "store": { - "type": [ - "string", - "integer" - ] - }, - "dataset": { - "type": "string" - }, - "path": { - "type": "string" - }, - "label": { - "type": "string" - }, - "eTag": { - "type": "string" - } - } - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "downloadLink" - ], - "properties": { - "downloadLink": { - "minLength": 1, - "maxLength": 65536, - "type": "string", - "format": "uri" - }, - "label": { - "type": "string" - } - } - }, - { - "type": "array", - "items": {} - } - ] - } - } - }, - "inputsUnits": { - "type": "object", - "description": "values of input unit", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "type": "string", - "examples": [ - "kilo-meter", - "milli-second", - "micro-gram", - "kelvin" - ] - } - } - }, - "inputAccess": { - "description": "map with key - access level pairs", - "type": "object", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "type": "string", - "enum": [ - "Invisible", - "ReadOnly", - "ReadAndWrite" - ], - "default": "ReadAndWrite", - "examples": [ - "ReadOnly" - ] - } - } - }, - "inputNodes": { - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "description": "node IDs of where the node is connected to", - "examples": [ - "nodeUuid1", - "nodeUuid2" - ] - }, - "outputs": { - "default": {}, - "type": "object", - "patternProperties": { - "^[-_a-zA-Z0-9]+$": { - "oneOf": [ - { - "type": [ - "integer", - "boolean", - "string", - "number", - "null" - ] - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "store", - "path" - ], - "properties": { - "store": { - "type": [ - "string", - "integer" - ] - }, - "dataset": { - "type": "string" - }, - "path": { - "type": "string" - }, - "label": { - "type": "string" - }, - "eTag": { - "type": "string" - } - } - }, - { - "type": "object", - "additionalProperties": false, - "required": [ - "downloadLink" - ], - "properties": { - "downloadLink": { - "minLength": 1, - "maxLength": 65536, - "type": "string", - "format": "uri" - }, - "label": { - "type": "string" - } - } - }, - { - "type": "array", - "items": {} - } - ] - } - } - }, - "outputNode": { - "type": "boolean", - "deprecated": true - }, - "outputNodes": { - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "description": "Used in group-nodes. Node IDs of those connected to the output", - "examples": [ - "nodeUuid1", - "nodeUuid2" - ] - }, - "parent": { - "type": [ - "null", - "string" - ], - "format": "uuid", - "description": "Parent's (group-nodes') node ID s.", - "examples": [ - "nodeUuid1", - "nodeUuid2" - ] - }, - "position": { - "type": "object", - "additionalProperties": false, - "required": [ - "x", - "y" - ], - "properties": { - "x": { - "type": "integer", - "description": "The x position", - "example": [ - "12" - ] - }, - "y": { - "type": "integer", - "description": "The y position", - "example": [ - "15" - ] - } - }, - "deprecated": true - }, - "state": { - "title": "NodeState", - "type": "object", - "properties": { - "modified": { - "title": "Modified", - "description": "true if the node's outputs need to be re-computed", - "default": true, - "type": "boolean" - }, - "dependencies": { - "title": "Dependencies", - "description": "contains the node inputs dependencies if they need to be computed first", - "type": "array", - "uniqueItems": true, - "items": { - "type": "string", - "format": "uuid" - } - }, - "currentStatus": { - "description": "the node's current state", - "default": "NOT_STARTED", - "examples": [ - "RUNNING", - "FAILED" - ], - "enum": [ - "UNKNOWN", - "PUBLISHED", - "NOT_STARTED", - "PENDING", - "STARTED", - "RETRY", - "SUCCESS", - "FAILED", - "ABORTED" - ], - "type": "string" - } - }, - "additionalProperties": false - }, - "bootOptions": { - "title": "Boot Options", - "description": "Some services provide alternative parameters to be injected at boot time. The user selection should be stored here, and it will overwrite the services's defaults.", - "type": "object", - "patternProperties": { - "[a-zA-Z][a-azA-Z0-9_]*": { - "type": "string" - } - } - } - } - } - } - }, - "ui": { - "type": "object", - "additionalProperties": true, - "properties": { - "workbench": { - "type": "object", - "patternProperties": { - "^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$": { - "type": "object", - "additionalProperties": false, - "required": [ - "position" - ], - "properties": { - "position": { - "type": "object", - "additionalProperties": false, - "required": [ - "x", - "y" - ], - "properties": { - "x": { - "type": "integer", - "description": "The x position", - "example": [ - "12" - ] - }, - "y": { - "type": "integer", - "description": "The y position", - "example": [ - "15" - ] - } - } - }, - "marker": { - "type": "object", - "additionalProperties": false, - "required": [ - "color" - ], - "properties": { - "color": { - "type": "string", - "description": "Marker's color", - "examples": [ - "#FF0000", - "#0000FF" - ] - } - } - } - } - } - } - }, - "slideshow": { - "type": "object", - "patternProperties": { - "^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$": { - "type": "object", - "additionalProperties": false, - "required": [ - "position" - ], - "properties": { - "position": { - "type": "integer", - "description": "Slide's position", - "examples": [ - 0, - 2 - ] - }, - "instructions": { - "type": [ - "string", - "null" - ], - "description": "Instructions about what to do in this step", - "examples": [ - "This is a **sleeper**", - "Please, select the config file defined [in this link](asdf)" - ] - } - } - } - } - }, - "currentNodeId": { - "type": "string", - "format": "uuid" - }, - "annotations": { - "type": "object", - "patternProperties": { - "^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?4[0-9a-fA-F]{3}-?[89abAB][0-9a-fA-F]{3}-?[0-9a-fA-F]{12}$": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "color", - "attributes" - ], - "properties": { - "type": { - "type": "string", - "description": "Annotation type", - "examples": [ - "rect", - "text" - ] - }, - "color": { - "type": "string", - "description": "Annotation's color", - "examples": [ - "#FF0000", - "#0000FF" - ] - }, - "attributes": { - "type": "object", - "description": "svg attributes" - } - } - } - } - } - } - }, - "tags": { - "type": "array", - "items": { - "type": "integer" - } - }, - "classifiers": { - "type": "array", - "description": "Contains the reference to the project classifiers", - "examples": [ - "some:id:to:a:classifier" - ], - "items": { - "type": "string" - } - }, - "dev": { - "type": "object", - "description": "object used for development purposes only" - }, - "state": { - "title": "State", - "description": "Project state", - "anyOf": [ - { - "type": "null" - }, - { - "title": "ProjectState", - "type": "object", - "additionalProperties": false, - "properties": { - "locked": { - "title": "Locked", - "description": "The project lock state", - "allOf": [ - { - "title": "ProjectLocked", - "type": "object", - "additionalProperties": false, - "properties": { - "value": { - "title": "Value", - "description": "True if the project is locked", - "type": "boolean" - }, - "owner": { - "title": "Owner", - "description": "If locked, the user that owns the lock", - "allOf": [ - { - "title": "Owner", - "type": "object", - "additionalProperties": false, - "properties": { - "user_id": { - "title": "User Id", - "type": "integer", - "description": "Owner's identifier when registered in the user's database table", - "example": [ - 2 - ] - }, - "first_name": { - "title": "First Name", - "description": "Owner first name", - "example": [ - "John" - ], - "type": "string" - }, - "last_name": { - "title": "Last Name", - "description": "Owner last name", - "example": [ - "Smith" - ], - "type": "string" - } - }, - "required": [ - "user_id", - "first_name", - "last_name" - ] - } - ] - }, - "status": { - "title": "Status", - "description": "The status of the project", - "enum": [ - "CLOSED", - "CLOSING", - "CLONING", - "OPENING", - "EXPORTING", - "OPENED" - ], - "type": "string" - } - }, - "required": [ - "value", - "status" - ] - } - ] - }, - "state": { - "title": "State", - "description": "The project running state", - "allOf": [ - { - "title": "ProjectRunningState", - "type": "object", - "additionalProperties": false, - "properties": { - "value": { - "title": "RunningState", - "description": "An enumeration.", - "enum": [ - "UNKNOWN", - "NOT_STARTED", - "PUBLISHED", - "PENDING", - "STARTED", - "RETRY", - "SUCCESS", - "FAILED", - "ABORTED" - ], - "type": "string" - } - }, - "required": [ - "value" - ] - } - ] - } - }, - "required": [ - "locked", - "state" - ] - } - ] - }, - "quality": { - "type": "object", - "title": "Quality", - "description": "Object containing Quality Assessment related data" - } - } -} diff --git a/api/specs/common/schemas/project.yaml b/api/specs/common/schemas/project.yaml deleted file mode 100644 index 1e9e31a52d6..00000000000 --- a/api/specs/common/schemas/project.yaml +++ /dev/null @@ -1,72 +0,0 @@ -components: - schemas: - ProjectIn: - $ref: "./project-v0.0.1-converted.yaml" - - ProjectOut: - allOf: - - $ref: "./project-v0.0.1-converted.yaml" - - type: object - properties: - state: - $ref: "#/components/schemas/ProjectState" - - ProjectEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/ProjectOut" - error: - nullable: true - default: null - - ProjectArrayEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/components/schemas/ProjectOut" - error: - nullable: true - default: null - - ProjectState: - type: object - required: - - locked - properties: - locked: - type: object - description: describes the project lock state - required: - - value - properties: - value: - type: boolean - description: true if the project is locked - owner: - type: object - properties: - first_name: - type: string - last_name: - type: string - required: - - firstName - - lastName - - ProjectStateEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/ProjectState" - error: - nullable: true - default: null diff --git a/api/specs/common/schemas/task.yaml b/api/specs/common/schemas/task.yaml deleted file mode 100644 index 331d97a9cae..00000000000 --- a/api/specs/common/schemas/task.yaml +++ /dev/null @@ -1,54 +0,0 @@ -Task: - type: object - properties: - task_id: - type: string - status_href: - type: string - result_href: - type: string - - required: - - task_id - - status_href - - result_href - -TaskEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/Task" - error: - nullable: true - default: null - -TaskStatus: - type: object - required: - - task_progress - - done - - started - properties: - task_progress: - type: number - minimum: 0 - maximum: 1 - done: - type: boolean - started: - type: string - pattern: '\d{4}-(12|11|10|0?[1-9])-(31|30|[0-2]?\d)T(2[0-3]|1\d|0?[0-9])(:(\d|[0-5]\d)){2}(\.\d{3})?Z' - example: '2018-07-01T11:13:43Z' - -TaskStatusEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/TaskStatus" - error: - nullable: true - default: null diff --git a/api/specs/director/openapi.yaml b/api/specs/director/openapi.yaml deleted file mode 100644 index 1b1d05181cd..00000000000 --- a/api/specs/director/openapi.yaml +++ /dev/null @@ -1,492 +0,0 @@ -openapi: "3.0.0" -info: - description: This is the oSparc's director API - version: 0.1.0 - title: Director API - contact: - name: IT'IS Foundation - email: support@simcore.com - license: - name: MIT - url: https://github.com/ITISFoundation/osparc-simcore/blob/master/LICENSE - -servers: - - description: Development server - url: http://{host}:{port}/{version} - variables: - host: - default: "localhost" - port: - default: "8080" - version: - default: "v0" - enum: - - "v0" - - description: Production server - url: http://director:{port}/{version} - variables: - port: - default: "8080" - version: - default: "v0" - enum: - - "v0" - -# tags are used for organizing operations -tags: - - name: admins - description: Secured Admin-only calls - - name: developers - description: Operations available to regular developers - - name: users - description: Operations available to regular users - -paths: - /: - get: - tags: - - users - summary: Service health-check endpoint - description: Some general information on the API and state of the service behind - operationId: root_get - responses: - "200": - description: Service information - content: - application/json: - schema: - $ref: "#/components/schemas/HealthCheckEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /services: - get: - tags: - - users - summary: Lists available services in the oSparc platform - description: Lists available services in the oSparc platform - operationId: services_get - parameters: - - $ref: "#/components/parameters/ServiceType" - responses: - "200": - description: Success, returns the list of available services - content: - application/json: - schema: - $ref: "#/components/schemas/ServicesEnveloped" - "401": - description: Unauthorized access - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /services/{service_key}/{service_version}: - get: - tags: - - users - summary: Returns details of the selected service if available in the oSparc platform - description: Returns details of the selected service if available in the oSparc platform - operationId: services_by_key_version_get - parameters: - - $ref: "#/components/parameters/ServiceKeyPath" - - $ref: "#/components/parameters/ServiceVersionPath" - responses: - "200": - description: Success, returns the details of the service - content: - application/json: - schema: - $ref: "#/components/schemas/ServicesEnveloped" - "401": - description: Unauthorized access - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /services/{service_key}/{service_version}/labels: - get: - tags: - - users - summary: Returns the list of tags attached to a service - operationId: get_service_labels - parameters: - - $ref: "#/components/parameters/ServiceKeyPath" - - $ref: "#/components/parameters/ServiceVersionPath" - responses: - "200": - description: Success, returns the details of the service - content: - application/json: - schema: - type: object - additionalProperties: - type: string - "401": - description: Unauthorized access - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /service_extras/{service_key}/{service_version}: - get: - tags: - - users - summary: Returns the service's details which should be hidden from the user defined as extras. - description: Currently returns the node_requirements an array of resoruces needed for scheduling. - operationId: service_extras_by_key_version_get - parameters: - - $ref: "#/components/parameters/ServiceKeyPath" - - $ref: "#/components/parameters/ServiceVersionPath" - responses: - "200": - description: Success, returns an object containing details hidden from the user - content: - application/json: - schema: - $ref: "#/components/schemas/ServiceExtrasEnveloped" - "401": - description: Unauthorized access - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /running_interactive_services: - get: - tags: - - users - summary: Returns a list of interactive services - operationId: running_interactive_services_list_get - parameters: - - in: query - name: user_id - required: false - schema: - type: string - - in: query - name: project_id - required: false - schema: - type: string - responses: - "200": - description: Returns the running services instances - content: - application/json: - schema: - $ref: "#/components/schemas/RunningServicesEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - post: - tags: - - users - summary: Starts an interactive service in the oSparc platform - operationId: running_interactive_services_post - parameters: - - $ref: "#/components/parameters/UserId" - - $ref: "#/components/parameters/ProjectId" - - $ref: "#/components/parameters/ServiceKey" - - $ref: "#/components/parameters/ServiceVersion" - - $ref: "#/components/parameters/AssignmentUuid" - - $ref: "#/components/parameters/ServiceBasePath" - responses: - "201": - description: Succesfully created the service in the oSparc platform. Returns the location where the service runs. - content: - application/json: - schema: - $ref: "#/components/schemas/RunningServiceEnveloped" - "400": - description: Malformed function call, missing field - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "401": - description: Unauthorized access - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "409": - description: A service with the same uuid already exists - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /running_interactive_services/{service_uuid}: - get: - tags: - - users - summary: Succesfully returns if a service with the defined uuid is up and running - description: Succesfully returns if a service with the defined uuid is up and running - operationId: running_interactive_services_get - parameters: - - $ref: "#/components/parameters/ServiceUuid" - responses: - "200": - description: OK service exists and runs. Returns service location. - content: - application/json: - schema: - $ref: "#/components/schemas/RunningServiceEnveloped" - "400": - description: Malformed function call, missing field - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - delete: - tags: - - users - summary: Stops and removes an interactive service from the oSparc platform - description: Stops and removes an interactive service from the oSparc platform - operationId: running_interactive_services_delete - parameters: - - $ref: "#/components/parameters/ServiceUuid" - - $ref: "#/components/parameters/SaveState" - responses: - "204": - description: Succesfully stopped and removed the service from the oSparc platform - "400": - description: Malformed function call, missing field - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - "404": - description: Service not found - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - -components: - parameters: - UserId: - in: query - name: user_id - description: The ID of the user that starts the service - required: true - schema: - type: string - example: asdfgj233 - ProjectId: - in: query - name: project_id - description: The ID of the project in which the service starts - required: true - schema: - type: string - example: asdfgj233 - AssignmentUuid: - in: query - name: service_uuid - description: The uuid to assign the service with - required: true - schema: - type: string - # format: uuid - example: 123e4567-e89b-12d3-a456-426655440000 - - ServiceKeyPath: - in: path - name: service_key - description: The key (url) of the service - required: true - schema: - type: string - description: distinctive name for the node based on the docker registry path - pattern: '^(simcore)/(services)/(comp|dynamic)(/[\w/-]+)+$' - example: - - simcore/services/comp/itis/sleeper - - simcore/services/dynamic/3dviewer - - ServiceKey: - in: query - name: service_key - description: The key (url) of the service - required: true - schema: - type: string - description: distinctive name for the node based on the docker registry path - pattern: '^(simcore)/(services)/(comp|dynamic)(/[\w/-]+)+$' - example: - - simcore/services/comp/itis/sleeper - - simcore/services/dynamic/3dviewer - - ServiceType: - in: query - name: service_type - description: | - The service type: - * computational - a computational service - * interactive - an interactive service - required: false - schema: - type: string - enum: - - computational - - interactive - example: computational - - ServiceBasePath: - in: query - name: service_basepath - description: predefined basepath for the backend service otherwise uses root - required: false - schema: - type: string - example: "/x/EycCXbU0H/" - default: "" - - ServiceUuid: - in: path - name: service_uuid - description: The uuid of the service - required: true - schema: - type: string - # format: uuid - example: 123e4567-e89b-12d3-a456-426655440000 - - ServiceVersionPath: - in: path - name: service_version - description: The tag/version of the service - required: true - schema: - type: string - description: semantic version number - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: - - 1.0.0 - - 0.0.1 - - ServiceVersion: - in: query - name: service_tag - description: The tag/version of the service - required: false - schema: - type: string - description: semantic version number - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: - - 1.0.0 - - 0.0.1 - - SaveState: - in: query - name: save_state - description: Save the state prior to removing the service - required: false - schema: - type: boolean - default: true - - schemas: - ErrorEnveloped: - $ref: "../common/schemas/error.yaml#/components/schemas/ErrorEnveloped" - - RunningServiceEnveloped: - $ref: "../common/schemas/running_service.yaml#/components/schemas/RunningServiceEnveloped" - - RunningServicesEnveloped: - $ref: "../common/schemas/running_service.yaml#/components/schemas/RunningServicesEnveloped" - - ServicesEnveloped: - $ref: "../common/schemas/services.yaml#/components/schemas/ServicesEnveloped" - - ServiceExtrasEnveloped: - $ref: "../common/schemas/services.yaml#/components/schemas/ServiceExtrasEnveloped" - - HealthCheckEnveloped: - $ref: "../common/schemas/health_check.yaml#/components/schemas/HealthCheckEnveloped" diff --git a/api/specs/director/schemas/error.yaml b/api/specs/director/schemas/error.yaml new file mode 100644 index 00000000000..6dabfe55940 --- /dev/null +++ b/api/specs/director/schemas/error.yaml @@ -0,0 +1,35 @@ +components: + schemas: + ErrorEnveloped: + type: object + required: + - error + properties: + data: + nullable: true + default: null + error: + $ref: '#/components/schemas/ErrorType' + + ErrorType: + type: object + required: + - status + - message + properties: + message: + description: Error message + type: string + example: Unexpected error + errors: + type: array + items: + properties: + code: + type: string + description: Server Exception + example: ServiceUUIDNotFoundError + status: + description: Error code + type: integer + example: 404 diff --git a/api/specs/director/schemas/health_check.yaml b/api/specs/director/schemas/health_check.yaml new file mode 100644 index 00000000000..7dfec6819a6 --- /dev/null +++ b/api/specs/director/schemas/health_check.yaml @@ -0,0 +1,28 @@ +components: + schemas: + HealthCheckEnveloped: + type: object + required: + - data + properties: + data: + $ref: '#/components/schemas/HealthCheckType' + error: + nullable: true + default: null + + HealthCheckType: + type: object + properties: + name: + type: string + example: director service + status: + type: string + example: SERVICE_RUNNING + api_version: + type: string + example: 1.0.0-dev + version: + type: string + example: 1dfcfdc diff --git a/api/specs/director/schemas/node-meta-v0.0.1-pydantic-converted-clean.yaml b/api/specs/director/schemas/node-meta-v0.0.1-pydantic-converted-clean.yaml new file mode 100644 index 00000000000..fa5e4067ed8 --- /dev/null +++ b/api/specs/director/schemas/node-meta-v0.0.1-pydantic-converted-clean.yaml @@ -0,0 +1,901 @@ +additionalProperties: false +description: 'Static metadata for a service injected in the image labels + + + This is one to one with node-meta-v0.0.1.json' +example: + authors: + - affiliation: Company + email: smith@company.com + name: John Smith + - affiliation: University + email: brown@uni.edu + name: Richard Brown + contact: smith@company.com + description: oSparc Python Runner + inputs: + input_1: + description: Any code, requirements or data file + displayOrder: 1 + label: Input data + type: data:*/* + integration-version: 1.0.0 + key: simcore/services/comp/osparc-python-runner + name: oSparc Python Runner + outputs: + output_1: + description: All data produced by the script is zipped as output_data.zip + displayOrder: 1 + fileToKeyMap: + output_data.zip: output_1 + label: Output data + type: data:*/* + type: computational + version: 1.7.0 +properties: + authors: + items: + additionalProperties: false + properties: + affiliation: + description: Affiliation of the author + example: Sense8 + title: Affiliation + type: string + email: + description: Email address + example: sun@sense.eight + format: email + title: Email + type: string + name: + description: Name of the author + example: Jim Knopf + title: Name + type: string + required: + - name + - email + title: Author + type: object + minItems: 1 + title: Authors + type: array + badges: + items: + additionalProperties: false + properties: + image: + description: Url to the badge + example: https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master + format: uri + maxLength: 2083 + minLength: 1 + title: Image + type: string + name: + description: Name of the subject + example: travis-ci + title: Name + type: string + url: + description: Link to the status + example: 'https://travis-ci.org/ITISFoundation/osparc-simcore ''State of + CI: build, test and pushing images''' + format: uri + maxLength: 2083 + minLength: 1 + title: Url + type: string + required: + - name + - image + - url + title: Badge + type: object + title: Badges + type: array + boot-options: + additionalProperties: + example: + default: '0' + description: Start it in web page mode + items: + '0': + description: Tooltip for non Voila boot mode + label: Non Voila + '1': + description: Tooltip for Voila boot mode + label: Voila + label: Boot mode + properties: + default: + title: Default + type: string + description: + title: Description + type: string + items: + additionalProperties: + example: + default: '0' + description: Start it in web page mode + items: + '0': + description: Tooltip for non Voila boot mode + label: Non Voila + '1': + description: Tooltip for Voila boot mode + label: Voila + label: Boot mode + properties: + description: + title: Description + type: string + label: + title: Label + type: string + required: + - label + - description + title: BootChoice + type: object + title: Items + type: object + label: + title: Label + type: string + required: + - label + - description + - default + - items + title: BootOption + type: object + description: Service defined boot options. These get injected in the service as + env variables. + title: Boot-Options + type: object + x-patternProperties: + '[a-zA-Z][a-azA-Z0-9_]*': + example: + - default: '0' + description: Start it in web page mode + items: + '0': + description: Tooltip for non Voila boot mode + label: Non Voila + '1': + description: Tooltip for Voila boot mode + label: Voila + label: Boot mode + - default: b + description: Select a theme for the application + items: + a: + description: Using white background + label: Clear + b: + description: Using black and gray tones + label: Dark + label: Application theme + properties: + default: + title: Default + type: string + description: + title: Description + type: string + items: + additionalProperties: + example: + - default: '0' + description: Start it in web page mode + items: + '0': + description: Tooltip for non Voila boot mode + label: Non Voila + '1': + description: Tooltip for Voila boot mode + label: Voila + label: Boot mode + - default: b + description: Select a theme for the application + items: + a: + description: Using white background + label: Clear + b: + description: Using black and gray tones + label: Dark + label: Application theme + properties: + description: + title: Description + type: string + label: + title: Label + type: string + required: + - label + - description + title: BootChoice + type: object + title: Items + type: object + label: + title: Label + type: string + required: + - label + - description + - default + - items + title: BootOption + type: object + contact: + description: email to correspond to the authors about the node + example: lab@net.flix + format: email + title: Contact + type: string + description: + description: human readable description of the purpose of the node + example: Our best node type + title: Description + type: string + inputs: + additionalProperties: + additionalProperties: false + description: Metadata on a service input port + example: + description: Files downloaded from service connected at the input + displayOrder: 1 + label: Input files - file-wo-widget + type: data:*/* + properties: + contentSchema: + description: jsonschema of this input/output. Required when type='ref_contentSchema' + title: Contentschema + type: object + defaultValue: + anyOf: + - type: boolean + - type: integer + - type: number + - type: string + example: Dog + title: Defaultvalue + description: + description: description of the property + example: Age in seconds since 1970 + title: Description + type: string + displayOrder: + deprecated: true + description: 'DEPRECATED: new display order is taken from the item position. + This will be removed.' + title: Displayorder + type: number + fileToKeyMap: + additionalProperties: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + description: Place the data associated with the named keys in files + example: + dir/input1.txt: key_1 + dir33/input2.txt: key2 + title: Filetokeymap + type: object + x-patternProperties: + .+: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + label: + description: short name for the property + example: Age + title: Label + type: string + type: + description: data type expected on this input glob matching for data type + is allowed + example: number + pattern: ^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ + title: Type + type: string + unit: + description: Units, when it refers to a physical quantity + title: Unit + type: string + widget: + allOf: + - additionalProperties: false + properties: + details: + anyOf: + - additionalProperties: false + properties: + minHeight: + description: minimum Height of the textarea + exclusiveMinimum: true + minimum: 0 + title: Minheight + type: integer + required: + - minHeight + title: TextArea + type: object + - additionalProperties: false + properties: + structure: + items: + additionalProperties: false + properties: + key: + anyOf: + - type: string + - type: boolean + - type: number + title: Key + label: + title: Label + type: string + required: + - key + - label + title: Structure + type: object + minItems: 1 + title: Structure + type: array + required: + - structure + title: SelectBox + type: object + title: Details + type: + allOf: + - description: An enumeration. + enum: + - TextArea + - SelectBox + title: WidgetType + type: string + description: type of the property + required: + - type + - details + title: Widget + type: object + description: custom widget to use instead of the default one determined + from the data-type + title: Widget + required: + - label + - description + - type + title: ServiceInput + type: object + description: definition of the inputs of this node + title: Inputs + type: object + x-patternProperties: + ^[-_a-zA-Z0-9]+$: + additionalProperties: false + description: Metadata on a service input port + example: + - description: Files downloaded from service connected at the input + displayOrder: 1 + label: Input files - file-wo-widget + type: data:*/* + - defaultValue: 0 + description: Time to wait before completion + displayOrder: 2 + label: Sleep Time - v2 + type: number + unit: second + widget: + details: + minHeight: 3 + type: TextArea + - defaultValue: 0 + description: Time to wait before completion + label: Sleep Time - latest + type: number + unit: second + widget: + details: + minHeight: 3 + type: TextArea + - contentSchema: + items: + type: number + title: list[number] + type: array + description: Some array of numbers + label: array_numbers + type: ref_contentSchema + - contentSchema: + properties: + b: + title: Bool + type: boolean + i: + default: 3 + title: Int + type: integer + s: + title: Str + type: string + required: + - b + - s + title: an object named A + type: object + description: Some object + label: my_object + type: ref_contentSchema + properties: + contentSchema: + description: jsonschema of this input/output. Required when type='ref_contentSchema' + title: Contentschema + type: object + defaultValue: + anyOf: + - type: boolean + - type: integer + - type: number + - type: string + example: + - Dog + - true + title: Defaultvalue + description: + description: description of the property + example: Age in seconds since 1970 + title: Description + type: string + displayOrder: + deprecated: true + description: 'DEPRECATED: new display order is taken from the item position. + This will be removed.' + title: Displayorder + type: number + fileToKeyMap: + additionalProperties: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + description: Place the data associated with the named keys in files + example: + - dir/input1.txt: key_1 + dir33/input2.txt: key2 + patternProperties: + .+: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + title: Filetokeymap + type: object + label: + description: short name for the property + example: Age + title: Label + type: string + type: + description: data type expected on this input glob matching for data type + is allowed + example: + - number + - boolean + - data:*/* + - data:text/* + - data:[image/jpeg,image/png] + - data:application/json + - data:application/json;schema=https://my-schema/not/really/schema.json + - data:application/vnd.ms-excel + - data:text/plain + - data:application/hdf5 + - data:application/edu.ucdavis@ceclancy.xyz + pattern: ^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ + title: Type + type: string + unit: + description: Units, when it refers to a physical quantity + title: Unit + type: string + widget: + allOf: + - additionalProperties: false + properties: + details: + anyOf: + - additionalProperties: false + properties: + minHeight: + description: minimum Height of the textarea + exclusiveMinimum: 0 + title: Minheight + type: integer + required: + - minHeight + title: TextArea + type: object + - additionalProperties: false + properties: + structure: + items: + additionalProperties: false + properties: + key: + anyOf: + - type: string + - type: boolean + - type: number + title: Key + label: + title: Label + type: string + required: + - key + - label + title: Structure + type: object + minItems: 1 + title: Structure + type: array + required: + - structure + title: SelectBox + type: object + title: Details + type: + allOf: + - description: An enumeration. + enum: + - TextArea + - SelectBox + title: WidgetType + type: string + description: type of the property + required: + - type + - details + title: Widget + type: object + description: custom widget to use instead of the default one determined + from the data-type + title: Widget + required: + - label + - description + - type + title: ServiceInput + type: object + integration-version: + description: integration version number + example: 1.0.0 + pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ + title: Integration-Version + type: string + key: + description: distinctive name for the node based on the docker registry path + pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ + title: Key + type: string + name: + description: short, human readable name for the node + example: Fast Counter + title: Name + type: string + outputs: + additionalProperties: + additionalProperties: false + description: Base class for service input/outputs + example: + description: Time the service waited before completion + displayOrder: 2 + label: Time Slept + type: number + properties: + contentSchema: + description: jsonschema of this input/output. Required when type='ref_contentSchema' + title: Contentschema + type: object + description: + description: description of the property + example: Age in seconds since 1970 + title: Description + type: string + displayOrder: + deprecated: true + description: 'DEPRECATED: new display order is taken from the item position. + This will be removed.' + title: Displayorder + type: number + fileToKeyMap: + additionalProperties: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + description: Place the data associated with the named keys in files + example: + dir/input1.txt: key_1 + dir33/input2.txt: key2 + title: Filetokeymap + type: object + x-patternProperties: + .+: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + label: + description: short name for the property + example: Age + title: Label + type: string + type: + description: data type expected on this input glob matching for data type + is allowed + example: number + pattern: ^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ + title: Type + type: string + unit: + description: Units, when it refers to a physical quantity + title: Unit + type: string + widget: + allOf: + - additionalProperties: false + properties: + details: + anyOf: + - additionalProperties: false + properties: + minHeight: + description: minimum Height of the textarea + exclusiveMinimum: true + minimum: 0 + title: Minheight + type: integer + required: + - minHeight + title: TextArea + type: object + - additionalProperties: false + properties: + structure: + items: + additionalProperties: false + properties: + key: + anyOf: + - type: string + - type: boolean + - type: number + title: Key + label: + title: Label + type: string + required: + - key + - label + title: Structure + type: object + minItems: 1 + title: Structure + type: array + required: + - structure + title: SelectBox + type: object + title: Details + type: + allOf: + - description: An enumeration. + enum: + - TextArea + - SelectBox + title: WidgetType + type: string + description: type of the property + required: + - type + - details + title: Widget + type: object + deprecated: true + description: custom widget to use instead of the default one determined + from the data-type + title: Widget + required: + - label + - description + - type + title: ServiceOutput + type: object + description: definition of the outputs of this node + title: Outputs + type: object + x-patternProperties: + ^[-_a-zA-Z0-9]+$: + additionalProperties: false + description: Base class for service input/outputs + example: + - description: Time the service waited before completion + displayOrder: 2 + label: Time Slept + type: number + - description: Time the service waited before completion + displayOrder: 2 + label: Time Slept - units + type: number + unit: second + - description: Time the service waited before completion + label: Time Slept - w/o displayorder + type: number + unit: second + - description: Output file uploaded from the outputs folder + displayOrder: 4 + label: Output file 1 + type: data:*/* + properties: + contentSchema: + description: jsonschema of this input/output. Required when type='ref_contentSchema' + title: Contentschema + type: object + description: + description: description of the property + example: Age in seconds since 1970 + title: Description + type: string + displayOrder: + deprecated: true + description: 'DEPRECATED: new display order is taken from the item position. + This will be removed.' + title: Displayorder + type: number + fileToKeyMap: + additionalProperties: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + description: Place the data associated with the named keys in files + example: + - dir/input1.txt: key_1 + dir33/input2.txt: key2 + patternProperties: + .+: + pattern: ^[-_a-zA-Z0-9]+$ + type: string + title: Filetokeymap + type: object + label: + description: short name for the property + example: Age + title: Label + type: string + type: + description: data type expected on this input glob matching for data type + is allowed + example: + - number + - boolean + - data:*/* + - data:text/* + - data:[image/jpeg,image/png] + - data:application/json + - data:application/json;schema=https://my-schema/not/really/schema.json + - data:application/vnd.ms-excel + - data:text/plain + - data:application/hdf5 + - data:application/edu.ucdavis@ceclancy.xyz + pattern: ^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ + title: Type + type: string + unit: + description: Units, when it refers to a physical quantity + title: Unit + type: string + widget: + allOf: + - additionalProperties: false + properties: + details: + anyOf: + - additionalProperties: false + properties: + minHeight: + description: minimum Height of the textarea + exclusiveMinimum: 0 + title: Minheight + type: integer + required: + - minHeight + title: TextArea + type: object + - additionalProperties: false + properties: + structure: + items: + additionalProperties: false + properties: + key: + anyOf: + - type: string + - type: boolean + - type: number + title: Key + label: + title: Label + type: string + required: + - key + - label + title: Structure + type: object + minItems: 1 + title: Structure + type: array + required: + - structure + title: SelectBox + type: object + title: Details + type: + allOf: + - description: An enumeration. + enum: + - TextArea + - SelectBox + title: WidgetType + type: string + description: type of the property + required: + - type + - details + title: Widget + type: object + deprecated: true + description: custom widget to use instead of the default one determined + from the data-type + title: Widget + required: + - label + - description + - type + title: ServiceOutput + type: object + thumbnail: + description: url to the thumbnail + example: https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png + format: uri + maxLength: 2083 + minLength: 1 + title: Thumbnail + type: string + type: + allOf: + - description: An enumeration. + enum: + - computational + - dynamic + - frontend + - backend + title: ServiceType + type: string + description: service type + example: computational + version: + description: service version number + pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ + title: Version + type: string +required: +- name +- description +- key +- version +- type +- authors +- contact +- inputs +- outputs +title: ServiceDockerData +type: object diff --git a/api/specs/director/schemas/node-meta-v0.0.1-pydantic.json b/api/specs/director/schemas/node-meta-v0.0.1-pydantic.json new file mode 100644 index 00000000000..86d743d1d1b --- /dev/null +++ b/api/specs/director/schemas/node-meta-v0.0.1-pydantic.json @@ -0,0 +1,2419 @@ +{ + "title": "ServiceDockerData", + "description": "Static metadata for a service injected in the image labels\n\nThis is one to one with node-meta-v0.0.1.json", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "short, human readable name for the node", + "example": "Fast Counter", + "type": "string" + }, + "thumbnail": { + "title": "Thumbnail", + "description": "url to the thumbnail", + "examples": [ + "https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png" + ], + "minLength": 1, + "maxLength": 2083, + "format": "uri", + "type": "string" + }, + "description": { + "title": "Description", + "description": "human readable description of the purpose of the node", + "examples": [ + "Our best node type", + "The mother of all nodes, makes your numbers shine!" + ], + "type": "string" + }, + "key": { + "title": "Key", + "description": "distinctive name for the node based on the docker registry path", + "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", + "type": "string" + }, + "version": { + "title": "Version", + "description": "service version number", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "integration-version": { + "title": "Integration-Version", + "description": "integration version number", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "examples": [ + "1.0.0" + ], + "type": "string" + }, + "type": { + "description": "service type", + "examples": [ + "computational" + ], + "allOf": [ + { + "title": "ServiceType", + "description": "An enumeration.", + "enum": [ + "computational", + "dynamic", + "frontend", + "backend" + ], + "type": "string" + } + ] + }, + "badges": { + "title": "Badges", + "type": "array", + "items": { + "title": "Badge", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "Name of the subject", + "examples": [ + "travis-ci", + "coverals.io", + "github.io" + ], + "type": "string" + }, + "image": { + "title": "Image", + "description": "Url to the badge", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master", + "https://coveralls.io/repos/github/ITISFoundation/osparc-simcore/badge.svg?branch=master", + "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation" + ], + "minLength": 1, + "maxLength": 2083, + "format": "uri", + "type": "string" + }, + "url": { + "title": "Url", + "description": "Link to the status", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: build, test and pushing images'", + "https://coveralls.io/github/ITISFoundation/osparc-simcore?branch=master 'Test coverage'", + "https://itisfoundation.github.io/" + ], + "minLength": 1, + "maxLength": 2083, + "format": "uri", + "type": "string" + } + }, + "required": [ + "name", + "image", + "url" + ], + "additionalProperties": false + } + }, + "authors": { + "title": "Authors", + "minItems": 1, + "type": "array", + "items": { + "title": "Author", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "Name of the author", + "example": "Jim Knopf", + "type": "string" + }, + "email": { + "title": "Email", + "description": "Email address", + "examples": [ + "sun@sense.eight", + "deleen@minbar.bab" + ], + "type": "string", + "format": "email" + }, + "affiliation": { + "title": "Affiliation", + "description": "Affiliation of the author", + "examples": [ + "Sense8", + "Babylon 5" + ], + "type": "string" + } + }, + "required": [ + "name", + "email" + ], + "additionalProperties": false + } + }, + "contact": { + "title": "Contact", + "description": "email to correspond to the authors about the node", + "examples": [ + "lab@net.flix" + ], + "type": "string", + "format": "email" + }, + "inputs": { + "title": "Inputs", + "description": "definition of the inputs of this node", + "type": "object", + "patternProperties": { + "^[-_a-zA-Z0-9]+$": { + "title": "ServiceInput", + "description": "Metadata on a service input port", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "defaultValue": { + "title": "Defaultvalue", + "examples": [ + "Dog", + true + ], + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ] + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 1, + "label": "Input files - file-wo-widget", + "description": "Files downloaded from service connected at the input", + "type": "data:*/*" + }, + { + "displayOrder": 2, + "label": "Sleep Time - v2", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "Sleep Time - latest", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "array_numbers", + "description": "Some array of numbers", + "type": "ref_contentSchema", + "contentSchema": { + "title": "list[number]", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "label": "my_object", + "description": "Some object", + "type": "ref_contentSchema", + "contentSchema": { + "title": "an object named A", + "type": "object", + "properties": { + "i": { + "title": "Int", + "type": "integer", + "default": 3 + }, + "b": { + "title": "Bool", + "type": "boolean" + }, + "s": { + "title": "Str", + "type": "string" + } + }, + "required": [ + "b", + "s" + ] + } + } + ] + } + }, + "additionalProperties": { + "title": "ServiceInput", + "description": "Metadata on a service input port", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "defaultValue": { + "title": "Defaultvalue", + "examples": [ + "Dog", + true + ], + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ] + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 1, + "label": "Input files - file-wo-widget", + "description": "Files downloaded from service connected at the input", + "type": "data:*/*" + }, + { + "displayOrder": 2, + "label": "Sleep Time - v2", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "Sleep Time - latest", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "array_numbers", + "description": "Some array of numbers", + "type": "ref_contentSchema", + "contentSchema": { + "title": "list[number]", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "label": "my_object", + "description": "Some object", + "type": "ref_contentSchema", + "contentSchema": { + "title": "an object named A", + "type": "object", + "properties": { + "i": { + "title": "Int", + "type": "integer", + "default": 3 + }, + "b": { + "title": "Bool", + "type": "boolean" + }, + "s": { + "title": "Str", + "type": "string" + } + }, + "required": [ + "b", + "s" + ] + } + } + ] + } + }, + "outputs": { + "title": "Outputs", + "description": "definition of the outputs of this node", + "type": "object", + "patternProperties": { + "^[-_a-zA-Z0-9]+$": { + "title": "ServiceOutput", + "description": "Base class for service input/outputs", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "deprecated": true, + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 2, + "label": "Time Slept", + "description": "Time the service waited before completion", + "type": "number" + }, + { + "displayOrder": 2, + "label": "Time Slept - units", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Time Slept - w/o displayorder", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Output file 1", + "displayOrder": 4.0, + "description": "Output file uploaded from the outputs folder", + "type": "data:*/*" + } + ] + } + }, + "additionalProperties": { + "title": "ServiceOutput", + "description": "Base class for service input/outputs", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "deprecated": true, + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 2, + "label": "Time Slept", + "description": "Time the service waited before completion", + "type": "number" + }, + { + "displayOrder": 2, + "label": "Time Slept - units", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Time Slept - w/o displayorder", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Output file 1", + "displayOrder": 4.0, + "description": "Output file uploaded from the outputs folder", + "type": "data:*/*" + } + ] + } + }, + "boot-options": { + "title": "Boot-Options", + "description": "Service defined boot options. These get injected in the service as env variables.", + "type": "object", + "patternProperties": { + "[a-zA-Z][a-azA-Z0-9_]*": { + "title": "BootOption", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "default": { + "title": "Default", + "type": "string" + }, + "items": { + "title": "Items", + "type": "object", + "additionalProperties": { + "title": "BootChoice", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "required": [ + "label", + "description" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + } + }, + "required": [ + "label", + "description", + "default", + "items" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + }, + "additionalProperties": { + "title": "BootOption", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "default": { + "title": "Default", + "type": "string" + }, + "items": { + "title": "Items", + "type": "object", + "additionalProperties": { + "title": "BootChoice", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "required": [ + "label", + "description" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + } + }, + "required": [ + "label", + "description", + "default", + "items" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + } + }, + "required": [ + "name", + "description", + "key", + "version", + "type", + "authors", + "contact", + "inputs", + "outputs" + ], + "additionalProperties": false, + "examples": [ + { + "name": "oSparc Python Runner", + "key": "simcore/services/comp/osparc-python-runner", + "type": "computational", + "integration-version": "1.0.0", + "version": "1.7.0", + "description": "oSparc Python Runner", + "contact": "smith@company.com", + "authors": [ + { + "name": "John Smith", + "email": "smith@company.com", + "affiliation": "Company" + }, + { + "name": "Richard Brown", + "email": "brown@uni.edu", + "affiliation": "University" + } + ], + "inputs": { + "input_1": { + "displayOrder": 1, + "label": "Input data", + "description": "Any code, requirements or data file", + "type": "data:*/*" + } + }, + "outputs": { + "output_1": { + "displayOrder": 1, + "label": "Output data", + "description": "All data produced by the script is zipped as output_data.zip", + "type": "data:*/*", + "fileToKeyMap": { + "output_data.zip": "output_1" + } + } + } + }, + { + "name": "oSparc Python Runner", + "key": "simcore/services/comp/osparc-python-runner", + "type": "computational", + "integration-version": "1.0.0", + "version": "1.7.0", + "description": "oSparc Python Runner with boot options", + "contact": "smith@company.com", + "authors": [ + { + "name": "John Smith", + "email": "smith@company.com", + "affiliation": "Company" + }, + { + "name": "Richard Brown", + "email": "brown@uni.edu", + "affiliation": "University" + } + ], + "inputs": { + "input_1": { + "label": "Input data", + "description": "Any code, requirements or data file", + "type": "data:*/*" + } + }, + "outputs": { + "output_1": { + "label": "Output data", + "description": "All data produced by the script is zipped as output_data.zip", + "type": "data:*/*", + "fileToKeyMap": { + "output_data.zip": "output_1" + } + } + }, + "boot-options": { + "example_service_defined_boot_mode": { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + "example_service_defined_theme_selection": { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + } + } + ], + "definitions": { + "ServiceType": { + "title": "ServiceType", + "description": "An enumeration.", + "enum": [ + "computational", + "dynamic", + "frontend", + "backend" + ], + "type": "string" + }, + "Badge": { + "title": "Badge", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "Name of the subject", + "examples": [ + "travis-ci", + "coverals.io", + "github.io" + ], + "type": "string" + }, + "image": { + "title": "Image", + "description": "Url to the badge", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master", + "https://coveralls.io/repos/github/ITISFoundation/osparc-simcore/badge.svg?branch=master", + "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation" + ], + "minLength": 1, + "maxLength": 2083, + "format": "uri", + "type": "string" + }, + "url": { + "title": "Url", + "description": "Link to the status", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: build, test and pushing images'", + "https://coveralls.io/github/ITISFoundation/osparc-simcore?branch=master 'Test coverage'", + "https://itisfoundation.github.io/" + ], + "minLength": 1, + "maxLength": 2083, + "format": "uri", + "type": "string" + } + }, + "required": [ + "name", + "image", + "url" + ], + "additionalProperties": false + }, + "Author": { + "title": "Author", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "Name of the author", + "example": "Jim Knopf", + "type": "string" + }, + "email": { + "title": "Email", + "description": "Email address", + "examples": [ + "sun@sense.eight", + "deleen@minbar.bab" + ], + "type": "string", + "format": "email" + }, + "affiliation": { + "title": "Affiliation", + "description": "Affiliation of the author", + "examples": [ + "Sense8", + "Babylon 5" + ], + "type": "string" + } + }, + "required": [ + "name", + "email" + ], + "additionalProperties": false + }, + "WidgetType": { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + }, + "TextArea": { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + "Structure": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + }, + "SelectBox": { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + }, + "Widget": { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + }, + "ServiceInput": { + "title": "ServiceInput", + "description": "Metadata on a service input port", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "defaultValue": { + "title": "Defaultvalue", + "examples": [ + "Dog", + true + ], + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ] + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 1, + "label": "Input files - file-wo-widget", + "description": "Files downloaded from service connected at the input", + "type": "data:*/*" + }, + { + "displayOrder": 2, + "label": "Sleep Time - v2", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "Sleep Time - latest", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": { + "type": "TextArea", + "details": { + "minHeight": 3 + } + } + }, + { + "label": "array_numbers", + "description": "Some array of numbers", + "type": "ref_contentSchema", + "contentSchema": { + "title": "list[number]", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "label": "my_object", + "description": "Some object", + "type": "ref_contentSchema", + "contentSchema": { + "title": "an object named A", + "type": "object", + "properties": { + "i": { + "title": "Int", + "type": "integer", + "default": 3 + }, + "b": { + "title": "Bool", + "type": "boolean" + }, + "s": { + "title": "Str", + "type": "string" + } + }, + "required": [ + "b", + "s" + ] + } + } + ] + }, + "ServiceOutput": { + "title": "ServiceOutput", + "description": "Base class for service input/outputs", + "type": "object", + "properties": { + "displayOrder": { + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "title": "Label", + "description": "short name for the property", + "example": "Age", + "type": "string" + }, + "description": { + "title": "Description", + "description": "description of the property", + "example": "Age in seconds since 1970", + "type": "string" + }, + "type": { + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ], + "type": "string" + }, + "contentSchema": { + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files", + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ], + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "additionalProperties": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "unit": { + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "type": "string" + }, + "widget": { + "title": "Widget", + "description": "custom widget to use instead of the default one determined from the data-type", + "deprecated": true, + "allOf": [ + { + "title": "Widget", + "type": "object", + "properties": { + "type": { + "description": "type of the property", + "allOf": [ + { + "title": "WidgetType", + "description": "An enumeration.", + "enum": [ + "TextArea", + "SelectBox" + ], + "type": "string" + } + ] + }, + "details": { + "title": "Details", + "anyOf": [ + { + "title": "TextArea", + "type": "object", + "properties": { + "minHeight": { + "title": "Minheight", + "description": "minimum Height of the textarea", + "exclusiveMinimum": 0, + "type": "integer" + } + }, + "required": [ + "minHeight" + ], + "additionalProperties": false + }, + { + "title": "SelectBox", + "type": "object", + "properties": { + "structure": { + "title": "Structure", + "minItems": 1, + "type": "array", + "items": { + "title": "Structure", + "type": "object", + "properties": { + "key": { + "title": "Key", + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "number" + } + ] + }, + "label": { + "title": "Label", + "type": "string" + } + }, + "required": [ + "key", + "label" + ], + "additionalProperties": false + } + } + }, + "required": [ + "structure" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "type", + "details" + ], + "additionalProperties": false + } + ] + } + }, + "required": [ + "label", + "description", + "type" + ], + "additionalProperties": false, + "examples": [ + { + "displayOrder": 2, + "label": "Time Slept", + "description": "Time the service waited before completion", + "type": "number" + }, + { + "displayOrder": 2, + "label": "Time Slept - units", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Time Slept - w/o displayorder", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second" + }, + { + "label": "Output file 1", + "displayOrder": 4.0, + "description": "Output file uploaded from the outputs folder", + "type": "data:*/*" + } + ] + }, + "BootChoice": { + "title": "BootChoice", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "required": [ + "label", + "description" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + }, + "BootOption": { + "title": "BootOption", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "default": { + "title": "Default", + "type": "string" + }, + "items": { + "title": "Items", + "type": "object", + "additionalProperties": { + "title": "BootChoice", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "required": [ + "label", + "description" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + } + }, + "required": [ + "label", + "description", + "default", + "items" + ], + "examples": [ + { + "label": "Boot mode", + "description": "Start it in web page mode", + "default": "0", + "items": { + "0": { + "label": "Non Voila", + "description": "Tooltip for non Voila boot mode" + }, + "1": { + "label": "Voila", + "description": "Tooltip for Voila boot mode" + } + } + }, + { + "label": "Application theme", + "description": "Select a theme for the application", + "default": "b", + "items": { + "a": { + "label": "Clear", + "description": "Using white background" + }, + "b": { + "label": "Dark", + "description": "Using black and gray tones" + } + } + } + ] + } + } +} diff --git a/api/specs/director/schemas/node-meta-v0.0.1.json b/api/specs/director/schemas/node-meta-v0.0.1.json new file mode 100644 index 00000000000..388939d5716 --- /dev/null +++ b/api/specs/director/schemas/node-meta-v0.0.1.json @@ -0,0 +1,488 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "simcore node", + "description": "Description of a simcore node 'class' with input and output", + "type": "object", + "additionalProperties": false, + "required": [ + "key", + "version", + "type", + "name", + "description", + "authors", + "contact", + "inputs", + "outputs" + ], + "properties": { + "key": { + "type": "string", + "description": "distinctive name for the node based on the docker registry path", + "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", + "examples": [ + "simcore/services/comp/itis/sleeper", + "simcore/services/dynamic/3dviewer" + ] + }, + "integration-version": { + "type": "string", + "description": "integration version number", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "examples": [ + "1.0.0" + ] + }, + "version": { + "type": "string", + "description": "service version number", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "examples": [ + "1.0.0", + "0.0.1" + ] + }, + "version_display": { + "type": "string", + "description": "human readable version" + }, + "type": { + "type": "string", + "description": "service type", + "enum": [ + "frontend", + "computational", + "dynamic" + ], + "examples": [ + "computational" + ] + }, + "name": { + "type": "string", + "description": "short, human readable name for the node", + "examples": [ + "Fast Counter" + ] + }, + "thumbnail": { + "type": "string", + "description": "url to the thumbnail", + "examples": [ + "https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png" + ] + }, + "badges": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "image", + "url" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Name of the subject", + "examples": [ + "travis-ci", + "coverals.io", + "github.io" + ] + }, + "image": { + "type": "string", + "description": "Url to the shield", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master", + "https://coveralls.io/repos/github/ITISFoundation/osparc-simcore/badge.svg?branch=master", + "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation" + ] + }, + "url": { + "type": "string", + "description": "Link to status", + "examples": [ + "https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: build, test and pushing images'", + "https://coveralls.io/github/ITISFoundation/osparc-simcore?branch=master 'Test coverage'", + "https://itisfoundation.github.io/" + ] + } + } + } + }, + "description": { + "type": "string", + "description": "human readable description of the purpose of the node", + "examples": [ + "Our best node type", + "The mother of all nodes, makes your numbers shine!" + ] + }, + "authors": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "required": [ + "name", + "email" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "Name of the author", + "examples": [ + "Sun Bak", + "Delenn" + ] + }, + "email": { + "description": "Email address", + "type": "string", + "format": "email", + "examples": [ + "sun@sense.eight", + "deleen@minbar.bab" + ] + }, + "affiliation": { + "description": "Affiliation of the author", + "type": "string", + "examples": [ + "Sense8", + "Babylon 5" + ] + } + } + } + }, + "contact": { + "type": "string", + "format": "email", + "description": "email to correspond to the authors about the node", + "examples": [ + "lab@net.flix" + ] + }, + "inputs": { + "type": "object", + "description": "definition of the inputs of this node", + "patternProperties": { + "^[-_a-zA-Z0-9]+$": { + "type": "object", + "description": "all the input configurable for this service", + "additionalProperties": false, + "required": [ + "displayOrder", + "label", + "description", + "type" + ], + "properties": { + "displayOrder": { + "description": "DEPRECATED: new display order is taken from the item position. This property will be removed.", + "deprecated": true, + "type": "number" + }, + "label": { + "type": "string", + "description": "short name for the property", + "examples": [ + "Age" + ] + }, + "description": { + "type": "string", + "description": "description of the property", + "examples": [ + "Age in seconds since 1970" + ] + }, + "type": { + "type": "string", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "description": "data type expected on this input glob matching for data type is allowed", + "examples": [ + "number", + "boolean", + "data:*/*", + "data:text/*", + "data:[image/jpeg,image/png]", + "data:application/json", + "data:application/json;schema=https://my-schema/not/really/schema.json", + "data:application/vnd.ms-excel", + "data:text/plain", + "data:application/hdf5", + "data:application/edu.ucdavis@ceclancy.xyz" + ] + }, + "contentSchema": { + "title": "Content Schema", + "description": "jsonschema of the content at this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "description": "Place the data associated with the named keys in files", + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ] + }, + "defaultValue": { + "description": "initial value for this input", + "type": [ + "string", + "number", + "integer", + "boolean" + ], + "examples": [ + "Dog", + true + ] + }, + "unit": { + "title": "Unit", + "description": "Units of this input value, if a physical quantity", + "type": "string" + }, + "widget": { + "description": "custom widget to use instead of the default one determined from the data-type", + "anyOf": [ + { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "description": "type of the property", + "type": "string", + "enum": [ + "TextArea" + ] + }, + "minHeight": { + "description": "minimum Height of the textarea", + "type": "integer", + "minimum": 1 + } + } + }, + { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "structure" + ], + "properties": { + "type": { + "description": "type of the property", + "type": "string", + "enum": [ + "SelectBox" + ] + }, + "structure": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "key", + "label" + ], + "properties": { + "key": { + "type": [ + "string", + "boolean", + "number" + ] + }, + "label": { + "type": "string" + } + }, + "examples": [ + [ + { + "key": "rat", + "label": "The Rat" + }, + { + "key": "dog", + "label": "Bello the Dog" + } + ] + ] + } + } + } + } + ] + } + } + } + } + }, + "outputs": { + "type": "object", + "description": "definition of the outputs of this node", + "patternProperties": { + "^[-_a-zA-Z0-9]+$": { + "type": "object", + "description": "all the output produced by this node", + "additionalProperties": false, + "required": [ + "displayOrder", + "label", + "description", + "type" + ], + "properties": { + "displayOrder": { + "type": "number", + "description": "use this to numerically sort the properties for display", + "examples": [ + 1, + -0.2 + ] + }, + "label": { + "type": "string", + "description": "short name for the property", + "examples": [ + "Age" + ] + }, + "description": { + "type": "string", + "description": "description of the property", + "examples": [ + "Age in seconds since 1970" + ] + }, + "type": { + "type": "string", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:[^/\\s,]+/[^/\\s,]+)$", + "description": "data type expected on this output", + "examples": [ + "number", + "integer", + "boolean", + "string", + "data:application/json", + "data:application/vnd.ms-excel ", + "data:text/plain", + "data:application/hdf5" + ] + }, + "contentSchema": { + "title": "Content Schema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'", + "type": "object" + }, + "fileToKeyMap": { + "description": "Place the data stored in the named files and store it in the locations pointed to by the respective output key.", + "type": "object", + "patternProperties": { + ".+": { + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" + } + }, + "examples": [ + { + "dir/input1.txt": "key_1", + "dir33/input2.txt": "key2" + } + ] + }, + "unit": { + "title": "Unit", + "description": "Units of the output value, if a physical quantity", + "type": "string" + } + } + } + } + }, + "boot-options": { + "title": "Boot Options", + "description": "Service defined boot options. These get injected in the service as env variables.", + "type": "object", + "patternProperties": { + "^[_a-zA-Z0-9]+$": { + "title": "BootOptionMode", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "default": { + "title": "Default", + "type": "string" + }, + "items": { + "title": "Items", + "type": "object", + "additionalProperties": { + "title": "BootOptionItem", + "type": "object", + "properties": { + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "required": [ + "label", + "description" + ] + } + } + }, + "required": [ + "label", + "description", + "default", + "items" + ] + } + } + }, + "image_digest": { + "type": "string", + "description": "Image manifest digest. Provides a 'footprint' for the service image", + "examples": [ + "sha256:b7c8f6a401cb12d7fe36970b6927e03cb429b395fc9f2b0104291e12b81a5100" + ] + } + } +} diff --git a/api/specs/common/schemas/running_service.yaml b/api/specs/director/schemas/running_service.yaml similarity index 96% rename from api/specs/common/schemas/running_service.yaml rename to api/specs/director/schemas/running_service.yaml index 09a8cf35d07..8b82cf89674 100644 --- a/api/specs/common/schemas/running_service.yaml +++ b/api/specs/director/schemas/running_service.yaml @@ -1,112 +1,112 @@ -components: - schemas: - RunningServicesEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/RunningServicesArray" - error: - nullable: true - default: null - - RunningServicesArray: - type: array - items: - $ref: "#/components/schemas/RunningServiceType" - - RunningServiceEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/RunningServiceType" - error: - nullable: true - default: null - - RunningServiceType: - type: object - required: - - published_port - - service_uuid - - service_key - - service_version - - service_host - - service_port - - service_state - - user_id - properties: - published_port: - description: The ports where the service provides its interface - type: integer - format: int32 - minimum: 1 - example: 30000 - entry_point: - description: The entry point where the service provides its interface if specified - type: string - example: /the/entry/point/is/here - service_uuid: - description: The UUID attached to this service - type: string - # format: UUID - example: 123e4567-e89b-12d3-a456-426655440000 - service_key: - type: string - description: distinctive name for the node based on the docker registry path - pattern: '^(simcore)/(services)/(comp|dynamic)(/[\w/-]+)+$' - example: - - simcore/services/comp/itis/sleeper - - simcore/services/dynamic/3dviewer - service_version: - type: string - description: semantic version number - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - example: - - 1.0.0 - - 0.0.1 - service_host: - description: service host name within the network - type: string - example: jupyter_E1O2E-LAH - service_port: - description: port to access the service within the network - type: integer - minimum: 1 - example: 8081 - service_basepath: - description: different base path where current service is mounted otherwise defaults to root - type: string - example: "/x/E1O2E-LAH" - default: "" - service_state: - description: > - the service state - * 'pending' - The service is waiting for resources to start - * 'pulling' - The service is being pulled from the registry - * 'starting' - The service is starting - * 'running' - The service is running - * 'complete' - The service completed - * 'failed' - The service failed to start - type: string - enum: - - pending - - pulling - - starting - - running - - complete - - failed - service_message: - description: >- - the service message - type: string - example: no suitable node (insufficient resources on 1 node) - user_id: - description: >- - the user that started the service - type: string - example: "123" +components: + schemas: + RunningServicesEnveloped: + type: object + required: + - data + properties: + data: + $ref: "#/components/schemas/RunningServicesArray" + error: + nullable: true + default: null + + RunningServicesArray: + type: array + items: + $ref: "#/components/schemas/RunningServiceType" + + RunningServiceEnveloped: + type: object + required: + - data + properties: + data: + $ref: "#/components/schemas/RunningServiceType" + error: + nullable: true + default: null + + RunningServiceType: + type: object + required: + - published_port + - service_uuid + - service_key + - service_version + - service_host + - service_port + - service_state + - user_id + properties: + published_port: + description: The ports where the service provides its interface + type: integer + format: int32 + minimum: 1 + example: 30000 + entry_point: + description: The entry point where the service provides its interface if specified + type: string + example: /the/entry/point/is/here + service_uuid: + description: The UUID attached to this service + type: string + # format: UUID + example: 123e4567-e89b-12d3-a456-426655440000 + service_key: + type: string + description: distinctive name for the node based on the docker registry path + pattern: '^(simcore)/(services)/(comp|dynamic)(/[\w/-]+)+$' + example: + - simcore/services/comp/itis/sleeper + - simcore/services/dynamic/3dviewer + service_version: + type: string + description: semantic version number + pattern: >- + ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ + example: + - 1.0.0 + - 0.0.1 + service_host: + description: service host name within the network + type: string + example: jupyter_E1O2E-LAH + service_port: + description: port to access the service within the network + type: integer + minimum: 1 + example: 8081 + service_basepath: + description: different base path where current service is mounted otherwise defaults to root + type: string + example: "/x/E1O2E-LAH" + default: "" + service_state: + description: > + the service state + * 'pending' - The service is waiting for resources to start + * 'pulling' - The service is being pulled from the registry + * 'starting' - The service is starting + * 'running' - The service is running + * 'complete' - The service completed + * 'failed' - The service failed to start + type: string + enum: + - pending + - pulling + - starting + - running + - complete + - failed + service_message: + description: >- + the service message + type: string + example: no suitable node (insufficient resources on 1 node) + user_id: + description: >- + the user that started the service + type: string + example: "123" diff --git a/api/specs/director/schemas/scripts/create_node-meta-schema.py b/api/specs/director/schemas/scripts/create_node-meta-schema.py new file mode 100644 index 00000000000..89623889b9f --- /dev/null +++ b/api/specs/director/schemas/scripts/create_node-meta-schema.py @@ -0,0 +1,22 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import json +import sys +from pathlib import Path + +import jsonref +from common_library.json_serialization import json_dumps +from models_library.services import ServiceMetaDataPublished + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +if __name__ == "__main__": + with Path.open(CURRENT_DIR.parent / "node-meta-v0.0.1-pydantic.json", "w") as f: + schema = json_dumps(ServiceMetaDataPublished.model_json_schema()) + schema_without_ref = jsonref.loads(schema) + + json.dump(schema_without_ref, f, indent=2) diff --git a/api/specs/director/schemas/scripts/create_project-schema.py b/api/specs/director/schemas/scripts/create_project-schema.py new file mode 100644 index 00000000000..6bcbc3a9d1a --- /dev/null +++ b/api/specs/director/schemas/scripts/create_project-schema.py @@ -0,0 +1,24 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import json +import sys +from pathlib import Path + +import jsonref +from common_library.json_serialization import json_dumps +from models_library.projects import Project + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +if __name__ == "__main__": + with Path.open( + CURRENT_DIR.parent / "common/schemas/project-v0.0.1-pydantic.json", "w" + ) as f: + schema = json_dumps(Project.model_json_schema()) + schema_without_ref = jsonref.loads(schema) + + json.dump(schema_without_ref, f, indent=2) diff --git a/api/specs/director/schemas/scripts/remove_definitions.py b/api/specs/director/schemas/scripts/remove_definitions.py new file mode 100644 index 00000000000..87bf47a8304 --- /dev/null +++ b/api/specs/director/schemas/scripts/remove_definitions.py @@ -0,0 +1,37 @@ +import argparse +import logging +import sys +from pathlib import Path + +import yaml + +_logger = logging.getLogger(__name__) + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +SCHEMAS_DIR = CURRENT_DIR.parent + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + "Remove Definitions", + description="prunes 'definitions' from json-schemas in 'source_file_name' and dumps it into 'target_file_name'", + ) + parser.add_argument("source_file_name", type=str) + parser.add_argument("target_file_name", type=str) + args = parser.parse_args() + + file_source_path: Path = SCHEMAS_DIR / args.source_file_name + file_target_path: Path = SCHEMAS_DIR / args.target_file_name + + try: + data = yaml.safe_load(file_source_path.read_text()) + data.pop("definitions", None) + with Path.open(file_target_path, "w") as file_stream: + yaml.safe_dump(data, file_stream) + except yaml.YAMLError: + _logger.exception( + "Ignoring error while load+pop+dump %s -> %s", + file_source_path, + file_target_path, + ) diff --git a/api/specs/common/schemas/services.yaml b/api/specs/director/schemas/services.yaml similarity index 92% rename from api/specs/common/schemas/services.yaml rename to api/specs/director/schemas/services.yaml index 803ad83294e..f88b3f48f10 100644 --- a/api/specs/common/schemas/services.yaml +++ b/api/specs/director/schemas/services.yaml @@ -1,69 +1,69 @@ -components: - schemas: - ServicesEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "./node-meta-v0.0.1-converted.yaml" - error: - nullable: true - default: null - - ServiceExtras: - type: object - required: - - node_requirements - properties: - node_requirements: - type: object - required: - - CPU - - RAM - properties: - CPU: - type: number - default: 1.0 - minimum: 1.0 - GPU: - type: integer - minimum: 0 - RAM: - type: integer - format: int64 - minimum: 1024 - MPI: - type: integer - maximum: 1 - - service_build_details: - type: object - properties: - build_date: - type: string - vcs_ref: - type: string - vcs_url: - type: string - - container_spec: - type: object - properties: - command: - type: array - items: - type: string - - ServiceExtrasEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/ServiceExtras" - error: - nullable: true - default: null +components: + schemas: + ServicesEnveloped: + type: object + required: + - data + properties: + data: + type: array + items: + $ref: "./node-meta-v0.0.1-pydantic-converted-clean.yaml" + error: + nullable: true + default: null + + ServiceExtras: + type: object + required: + - node_requirements + properties: + node_requirements: + type: object + required: + - CPU + - RAM + properties: + CPU: + type: number + default: 1.0 + minimum: 1.0 + GPU: + type: integer + minimum: 0 + RAM: + type: integer + format: int64 + minimum: 1024 + MPI: + type: integer + maximum: 1 + + service_build_details: + type: object + properties: + build_date: + type: string + vcs_ref: + type: string + vcs_url: + type: string + + container_spec: + type: object + properties: + command: + type: array + items: + type: string + + ServiceExtrasEnveloped: + type: object + required: + - data + properties: + data: + $ref: "#/components/schemas/ServiceExtras" + error: + nullable: true + default: null diff --git a/api/specs/storage/openapi.yaml b/api/specs/storage/openapi.yaml deleted file mode 100644 index 19697fc6d5d..00000000000 --- a/api/specs/storage/openapi.yaml +++ /dev/null @@ -1,1191 +0,0 @@ -openapi: 3.0.0 -info: - description: API definition for simcore-service-storage service - version: 0.3.0 - title: simcore-service-storage API - contact: - name: IT'IS Foundation - email: support@simcore.io - license: - name: MIT - url: https://github.com/ITISFoundation/osparc-simcore/blob/master/LICENSE -servers: - - description: API server - url: "/v0" - - description: Development server - url: http://{host}:{port}/{basePath} - variables: - host: - default: "localhost" - port: - default: "8080" - basePath: - enum: - - v0 - default: v0 -tags: - - name: maintenance - - name: location - - name: dataset - - name: file - - name: tasks -paths: - /: - get: - summary: Service health-check endpoint - description: Some general information on the API and state of the service behind - tags: - - maintenance - operationId: health_check - responses: - "200": - description: Service information - content: - application/json: - schema: - $ref: "#/components/schemas/HealthCheckEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /status: - get: - summary: checks status of self and connected services - operationId: get_status - tags: - - maintenance - responses: - "200": - description: returns app status check - - /locations: - get: - summary: Lists available storage locations - operationId: get_storage_locations - tags: - - location - parameters: - - name: user_id - in: query - required: true - schema: - type: string - responses: - "200": - description: "List of available storage locations" - content: - application/json: - schema: - $ref: "#/components/schemas/FileLocationArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}:sync: - post: - summary: Manually triggers the synchronisation of the file meta data table in the database - operationId: synchronise_meta_data_table - tags: - - location - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: dry_run - in: query - required: false - schema: - type: boolean - default: true - - name: fire_and_forget - in: query - required: false - schema: - type: boolean - default: false - responses: - "200": - description: An object containing added, changed and removed paths - content: - application/json: - schema: - $ref: "#/components/schemas/TableSynchronisationEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/datasets: - get: - summary: Lists all dataset's metadata - operationId: get_datasets_metadata - tags: - - dataset - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "200": - description: "list of dataset meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/DatasetMetaDataArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/metadata: - get: - summary: Lists all file's metadata - operationId: get_files_metadata - tags: - - file - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - - name: uuid_filter - in: query - required: false - schema: - type: string - responses: - "200": - description: "list of file meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaDataArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/datasets/{dataset_id}/metadata: - get: - summary: Get dataset metadata - operationId: get_files_metadata_dataset - tags: - - dataset - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: dataset_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "200": - description: "list of file meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaDataArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/{file_id}/metadata: - get: - summary: Get file metadata - operationId: get_file_metadata - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "200": - description: "Returns file metadata" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaEnvelope" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/{file_id}: - get: - summary: Gets download link for file at location - operationId: download_file - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - - name: link_type - in: query - required: false - schema: - type: string - default: "presigned" - enum: - - presigned - - s3 - - responses: - "200": - description: "Returns presigned link" - content: - application/json: - schema: - $ref: "#/components/schemas/PresignedLinkEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - put: - summary: Returns upload object - operationId: upload_file - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - - name: link_type - in: query - required: false - schema: - type: string - default: "presigned" - enum: - - presigned - - s3 - - name: file_size - in: query - required: false - description: will be required once all legacy services are gone or updated. if null returns the PresignedLinkEnveloped, else returns FileUploadEnveloped - schema: - type: integer - format: int64 - minimum: 0 - responses: - "200": - description: "Creates and returns file upload object" - content: - application/json: - schema: - oneOf: - - $ref: "#/components/schemas/PresignedLinkEnveloped" - - $ref: "#/components/schemas/FileUploadEnveloped" - links: - CompleteUpload: - operationId: complete_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - query.user_id: "$request.query.user_id" - AbortUpload: - operationId: abort_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - query.user_id: "$request.query.user_id" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - summary: Deletes file - operationId: delete_file - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "204": - description: everything is OK - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/{file_id}:abort: - post: - summary: Asks the server to abort the upload and revert to the last valid version if any - operationId: abort_upload_file - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "204": - description: Abort OK - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/{file_id}:complete: - post: - summary: Asks the server to complete the upload - operationId: complete_upload_file - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - type: object - required: - - parts - properties: - parts: - type: array - items: - type: object - required: - - number - - e_tag - properties: - number: - type: integer - minimum: 1 - e_tag: - type: string - responses: - "202": - description: Completion of upload is accepted - content: - application/json: - schema: - $ref: "#/components/schemas/FileUploadCompleteEnveloped" - links: - CompleteUploadStatus: - operationId: is_completed_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - path.future_id: "$response.body.data.links.state" - query.user_id: "$request.query.user_id" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /locations/{location_id}/files/{file_id}:complete/futures/{future_id}: - post: - summary: Check for upload completion - operationId: is_completed_upload_file - tags: - - file - parameters: - - name: future_id - in: path - required: true - schema: - type: string - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "200": - description: returns state of upload completion - content: - application/json: - schema: - $ref: "#/components/schemas/FileUploadCompleteFutureEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /simcore-s3:access: - post: - summary: Returns the temporary access credentials for the user storage space - operationId: get_or_create_temporary_s3_access - tags: - - file - parameters: - - name: user_id - in: query - required: true - schema: - type: integer - responses: - "200": - description: the S3 access credentials - content: - application/json: - schema: - $ref: "#/components/schemas/S3AccessCredentialsEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /simcore-s3/files/metadata:search: - post: - summary: Returns metadata for all files matching a pattern - operationId: search_files_starting_with - tags: - - file - parameters: - - name: user_id - in: query - required: true - schema: - type: integer - - name: startswith - description: matches starting string of the file_id - in: query - schema: - type: string - default: "" - responses: - "200": - description: list of matching files found - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaDataArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /simcore-s3/folders: - post: - summary: Deep copies of all data from source to destination project in s3 - operationId: copy_folders_from_project - tags: - - file - parameters: - - name: user_id - in: query - required: true - schema: - type: integer - requestBody: - content: - application/json: - schema: - type: object - properties: - source: - $ref: "#/components/schemas/Project" - destination: - $ref: "#/components/schemas/Project" - nodes_map: - type: object - description: maps source and destination node uuids - additionalProperties: - type: string - responses: - "201": - description: Data from destination project copied and returns project - content: - application/json: - schema: - $ref: "#/components/schemas/Project" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /simcore-s3/folders/{folder_id}: - delete: - summary: Deletes all objects within a node_id or within a project_id if node_id is omitted - operationId: delete_folders_of_project - tags: - - file - parameters: - - name: folder_id - in: path - required: true - schema: - type: string - - name: node_id - in: query - required: false - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: string - responses: - "204": - description: folder has been successfully deleted - - /files/{file_id}:soft-copy: - post: - summary: Copy as soft link - operationId: copy_as_soft_link - tags: - - file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: user_id - in: query - required: true - schema: - type: integer - requestBody: - content: - application/json: - schema: - type: object - required: - - link_id - properties: - link_id: - type: string - responses: - "200": - description: "Returns link metadata" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaEnvelope" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /tasks: - get: - operationId: list_tasks - tags: - - tasks - responses: - "200": - description: Returns the list of active tasks (running and/or done) - content: - application/json: - schema: - type: array - items: - $ref: "../common/schemas/task.yaml#/TaskEnveloped" - /tasks/{task_id}: - parameters: - - name: task_id - in: path - required: true - schema: - type: string - get: - operationId: get_task_status - tags: - - tasks - responses: - "200": - description: Returns the task status - content: - application/json: - schema: - $ref: "../common/schemas/task.yaml#/TaskStatusEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - operationId: cancel_and_delete_task - description: Aborts and remove the task - tags: - - tasks - responses: - "204": - description: Task was successfully aborted - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /tasks/{task_id}/result: - parameters: - - name: task_id - in: path - required: true - schema: - type: string - get: - operationId: get_task_result - tags: - - tasks - responses: - "2XX": - description: Retrieve the task result and returns directly its HTTP code - default: - $ref: "#/components/responses/DefaultErrorResponse" -components: - schemas: - HealthCheckEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/HealthCheck" - error: - nullable: true - default: null - - HealthCheck: - type: object - properties: - name: - type: string - status: - type: string - api_version: - type: string - version: - type: string - example: - name: "simcore-director-service" - status: SERVICE_RUNNING - api_version: 0.1.0-dev+NJuzzD9S - version: 0.1.0-dev+N127Mfv9H - - ErrorEnveloped: - # - notice that data is defaulted to null - # - type: object - required: - - data - - error - properties: - data: - nullable: true - default: null - error: - $ref: "#/components/schemas/Error" - - Error: - # - Normally transmitted as a response from server to client - # - can exchage log messages between server and client. Possible applications: - # - e.g. client side can render a widget to display messages logged to 'user' - # - contains meta-information to allow client programatically understand the error. Possible applications: - # - e.g. metadata can serialize an exception in server that can be reproduced in client side - # - type: object - properties: - logs: - description: log messages - type: array - items: - $ref: "#/components/schemas/LogMessage" - errors: - description: errors metadata - type: array - items: - $ref: "#/components/schemas/ErrorItem" - status: - description: HTTP error code - type: integer - example: - BadRequestError: - logs: - - message: "Requested information is incomplete or malformed" - level: ERROR - - message: "Invalid email and password" - level: ERROR - logger: USER - errors: - - code: "InvalidEmail" - message: "Email is malformed" - field: email - - code: "UnsavePassword" - message: "Password is not secure" - field: pasword - status: 400 - - ErrorItem: - type: object - required: - - code - - message - properties: - code: - type: string - description: Typically the name of the exception that produced it otherwise some known error code - message: - type: string - description: Error message specific to this item - resource: - type: string - description: API resource affected by this error - field: - type: string - description: Specific field within the resource - - LogMessageEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/LogMessage" - error: - nullable: true - default: null - - LogMessage: - # - logger can be use as a way for the client to filter messages. - # - E.g. logger naming can be hierarchical, and all including "*.user.*" - # are displayed as a flash message in the front-end - # - type: object - properties: - level: - description: log level - type: string - default: INFO - enum: - - DEBUG - - WARNING - - INFO - - ERROR - message: - description: log message. If logger is USER, then it MUST be human readable - type: string - logger: - description: name of the logger receiving this message - type: string - required: - - message - example: - message: "Hi there, Mr user" - level: INFO - logger: user-logger - - FakeEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/Fake" - error: - nullable: true - default: null - - Fake: - type: object - required: - - path_value - - query_value - - body_value - properties: - path_value: - type: string - query_value: - type: string - body_value: - type: object - additionalProperties: true - example: - path_value: foo - query_value: bar - body_value: - key1: value1 - key2: value2 - - TableSynchronisationEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/TableSynchronisation" - error: - nullable: true - default: null - - TableSynchronisation: - type: object - required: - - removed - properties: - dry_run: - type: boolean - fire_and_forget: - type: boolean - removed: - type: array - items: - type: string - - FileLocationArrayEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileLocationArray" - error: - nullable: true - default: null - - FileLocationArray: - type: array - items: - $ref: "#/components/schemas/FileLocation" - - FileLocationEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileLocation" - error: - nullable: true - default: null - - FileLocation: - type: object - properties: - name: - type: string - id: - type: integer - example: - filename: "simcore.s3" - id: 0 - - DatasetMetaDataArrayEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/DatasetMetaDataArray" - error: - nullable: true - default: null - - DatasetMetaEnvelope: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/DatasetMetaData" - error: - nullable: true - default: null - - DatasetMetaData: - type: object - properties: - dataset_id: - type: string - display_name: - type: string - example: - dataset_uuid: "N:id-aaaa" - display_name: "simcore-testing" - - DatasetMetaDataArray: - type: array - items: - $ref: "#/components/schemas/DatasetMetaData" - - FileMetaEnvelope: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileMetaData" - error: - nullable: true - default: null - - FileMetaData: - type: object - properties: - file_uuid: - type: string - location_id: - type: string - project_name: - type: string - node_name: - type: string - file_name: - type: string - file_id: - type: string - created_at: - type: string - last_modified: - type: string - file_size: - type: integer - entity_tag: - type: string - - FileMetaDataArrayEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileMetaDataArray" - error: - nullable: true - default: null - - FileMetaDataArray: - type: array - items: - $ref: "#/components/schemas/FileMetaData" - - PresignedLinkEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/PresignedLink" - error: - nullable: true - default: null - - PresignedLink: - type: object - required: - - link - properties: - link: - type: string - example: - link: "example_link" - - FileUploadEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadSchema" - error: - nullable: true - default: null - - FileUploadSchema: - type: object - required: - - chunk_size - - urls - - links - properties: - chunk_size: - type: integer - format: int64 - minimum: 0 - urls: - type: array - items: - type: string - links: - type: object - required: - - abort_upload - - complete_upload - properties: - abort_upload: - type: string - complete_upload: - type: string - - FileUploadCompleteEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadComplete" - error: - nullable: true - default: null - - FileUploadComplete: - type: object - required: - - links - properties: - links: - type: object - required: - - state - properties: - state: - type: string - - FileUploadCompleteFutureEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadCompleteFuture" - error: - nullable: true - default: null - - FileUploadCompleteFuture: - type: object - required: - - state - properties: - state: - type: string - enum: - - ok - - nok - e_tag: - type: string - nullable: true - - S3AccessCredentialsEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/S3AccessCredentials" - error: - nullable: true - default: null - - S3AccessCredentials: - type: object - required: - - access - - secret - - token - - endpoint - properties: - access: - type: string - secret: - type: string - token: - type: string - endpoint: - type: string - - Project: - $ref: "../common/schemas/project.yaml#/components/schemas/ProjectIn" - - responses: - DefaultErrorResponse: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" diff --git a/api/specs/web-server/Makefile b/api/specs/web-server/Makefile new file mode 100644 index 00000000000..168d7c9ec78 --- /dev/null +++ b/api/specs/web-server/Makefile @@ -0,0 +1,21 @@ +.DEFAULT_GOAL := all + +.PHONY: _check_venv_active +_check_venv_active: + # Checking whether virtual environment was activated + @python3 -c "import sys; assert sys.base_prefix!=sys.prefix" + + +.PHONY: install +install-dev install: _check_venv_active + # installing webserver and requirements.txt + @cd ./../../../services/web/server && make install-dev && cd - + @uv pip install -r requirements.txt + + +.PHONY: all +all: _check_venv_active install + python openapi.py + +.PHONY: openapi-specs +openapi-specs: all diff --git a/api/specs/web-server/_activity.py b/api/specs/web-server/_activity.py new file mode 100644 index 00000000000..52e03c13cd1 --- /dev/null +++ b/api/specs/web-server/_activity.py @@ -0,0 +1,24 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from fastapi import APIRouter +from models_library.api_schemas_webserver.activity import ActivityStatusDict +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "tasks", + ], +) + + +@router.get( + "/activity/status", + response_model=Envelope[ActivityStatusDict], +) +def get_activity_status(): + pass diff --git a/api/specs/web-server/_admin.py b/api/specs/web-server/_admin.py new file mode 100644 index 00000000000..87c72ce371f --- /dev/null +++ b/api/specs/web-server/_admin.py @@ -0,0 +1,34 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Union + +from fastapi import APIRouter, Header +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.email._handlers import ( + EmailTestFailed, + EmailTestPassed, + TestEmail, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "admin", + ], +) + + +@router.post( + "/email:test", + response_model=Envelope[Union[EmailTestFailed, EmailTestPassed]], +) +async def test_email( + _body: TestEmail, x_simcore_products_name: str | None = Header(default=None) +): + # X-Simcore-Products-Name + ... diff --git a/api/specs/web-server/_announcements.py b/api/specs/web-server/_announcements.py new file mode 100644 index 00000000000..c8cad88bb25 --- /dev/null +++ b/api/specs/web-server/_announcements.py @@ -0,0 +1,25 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from fastapi import APIRouter +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.announcements._handlers import Announcement + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "announcements", + ], +) + + +@router.get( + "/announcements", + response_model=Envelope[list[Announcement]], +) +async def list_announcements(): + ... diff --git a/api/specs/web-server/_auth.py b/api/specs/web-server/_auth.py new file mode 100644 index 00000000000..7860ef98f03 --- /dev/null +++ b/api/specs/web-server/_auth.py @@ -0,0 +1,267 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Any + +from fastapi import APIRouter, status +from models_library.api_schemas_webserver.auth import ( + AccountRequestInfo, + UnregisterCheck, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError, Log +from pydantic import BaseModel, Field, confloat +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.login._controller.rest.auth import ( + LoginBody, + LoginNextPage, + LoginTwoFactorAuthBody, + LogoutBody, +) +from simcore_service_webserver.login._controller.rest.change import ( + ChangeEmailBody, + ChangePasswordBody, + ResetPasswordBody, +) +from simcore_service_webserver.login._controller.rest.confirmation import ( + PhoneConfirmationBody, + ResetPasswordConfirmation, +) +from simcore_service_webserver.login._controller.rest.registration import ( + InvitationCheck, + InvitationInfo, + RegisterBody, + RegisterPhoneBody, + RegisterPhoneNextPage, +) +from simcore_service_webserver.login._controller.rest.twofa import Resend2faBody + +router = APIRouter(prefix=f"/{API_VTAG}", tags=["auth"]) + + +@router.post( + "/auth/request-account", + operation_id="request_product_account", + status_code=status.HTTP_204_NO_CONTENT, +) +async def request_product_account(_body: AccountRequestInfo): ... + + +@router.post( + "/auth/register/invitations:check", + response_model=Envelope[InvitationInfo], + operation_id="auth_check_registration_invitation", +) +async def check_registration_invitation(check: InvitationCheck): + """Check invitation and returns associated email or None""" + + +@router.post( + "/auth/register", + response_model=Envelope[Log], + operation_id="auth_register", +) +async def register(_body: RegisterBody): + """User registration""" + + +@router.post( + "/auth/unregister", + response_model=Envelope[Log], + status_code=status.HTTP_200_OK, + responses={status.HTTP_409_CONFLICT: {"model": EnvelopedError}}, +) +async def unregister_account(_body: UnregisterCheck): ... + + +@router.post( + "/auth/verify-phone-number", + response_model=Envelope[RegisterPhoneNextPage], + operation_id="auth_register_phone", +) +async def register_phone(_body: RegisterPhoneBody): + """user tries to verify phone number for 2 Factor Authentication when registering""" + + +@router.post( + "/auth/validate-code-register", + response_model=Envelope[Log], + operation_id="auth_phone_confirmation", +) +async def phone_confirmation(_body: PhoneConfirmationBody): + """user enters 2 Factor Authentication code when registering""" + + +@router.post( + "/auth/login", + response_model=Envelope[LoginNextPage], + status_code=status.HTTP_201_CREATED, + operation_id="auth_login", + responses={ + # status.HTTP_503_SERVICE_UNAVAILABLE + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized reset due to invalid token code", + } + }, +) +async def login(_body: LoginBody): + """user logs in""" + + +@router.post( + "/auth/validate-code-login", + response_model=Envelope[Log], + operation_id="auth_login_2fa", + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized reset due to invalid token code", + } + }, +) +async def login_2fa(_body: LoginTwoFactorAuthBody): + """user enters 2 Factor Authentication code when login in""" + + +@router.post( + "/auth/two_factor:resend", + response_model=Envelope[Log], + operation_id="auth_resend_2fa_code", + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized reset due to invalid token code", + } + }, +) +async def resend_2fa_code(resend: Resend2faBody): + """Resends 2FA either via email or sms""" + + +@router.post( + "/auth/logout", + response_model=Envelope[Log], + operation_id="auth_logout", +) +async def logout(_body: LogoutBody): + """user logout""" + + +@router.get( + "/auth:check", + operation_id="check_authentication", + status_code=status.HTTP_204_NO_CONTENT, + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized reset due to invalid token code", + } + }, +) +async def check_auth(): + """checks if user is authenticated in the platform""" + + +@router.post( + "/auth/reset-password", + response_model=Envelope[Log], + operation_id="initiate_reset_password", + responses={status.HTTP_503_SERVICE_UNAVAILABLE: {"model": EnvelopedError}}, +) +async def initiate_reset_password(_body: ResetPasswordBody): ... + + +@router.post( + "/auth/reset-password/{code}", + response_model=Envelope[Log], + operation_id="complete_reset_password", + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "Invalid token code", + } + }, +) +async def complete_reset_password(code: str, _body: ResetPasswordConfirmation): ... + + +@router.post( + "/auth/change-email", + response_model=Envelope[Log], + operation_id="auth_change_email", + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized user. Login required", + }, + status.HTTP_503_SERVICE_UNAVAILABLE: { + "model": EnvelopedError, + "description": "unable to send confirmation email", + }, + }, + # Disabled in https://github.com/ITISFoundation/osparc-simcore/pull/5472 + include_in_schema=False, +) +async def change_email(_body: ChangeEmailBody): + """logged in user changes email""" + + +class PasswordCheckSchema(BaseModel): + strength: confloat(ge=0.0, le=1.0) = Field( # type: ignore + ..., + description="The strength of the password ranges from 0 (extremely weak) and 1 (extremely strong)", + ) + rating: str | None = Field( + None, description="Human readable rating from infinitely weak to very strong" + ) + improvements: Any | None = None + + +@router.post( + "/auth/change-password", + response_model=Envelope[Log], + operation_id="auth_change_password", + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": EnvelopedError, + "description": "unauthorized user. Login required", + }, + status.HTTP_409_CONFLICT: { + "model": EnvelopedError, + "description": "mismatch between new and confirmation passwords", + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "model": EnvelopedError, + "description": "current password is invalid", + }, + }, +) +async def change_password(_body: ChangePasswordBody): + """logged in user changes password""" + + +@router.get( + "/auth/confirmation/{code}", + response_model=Envelope[Log], + operation_id="auth_confirmation", + responses={ + "3XX": { + "description": "redirection to specific ui application page", + }, + }, +) +async def email_confirmation(code: str): + """email link sent to user to confirm an action""" + + +@router.get( + "/auth/captcha", + operation_id="create_captcha", + status_code=status.HTTP_200_OK, + responses={status.HTTP_200_OK: {"content": {"image/png": {}}}}, +) +async def create_captcha(): ... diff --git a/api/specs/web-server/_auth_api_keys.py b/api/specs/web-server/_auth_api_keys.py new file mode 100644 index 00000000000..bcebb042376 --- /dev/null +++ b/api/specs/web-server/_auth_api_keys.py @@ -0,0 +1,62 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.auth import ( + ApiKeyCreateRequest, + ApiKeyCreateResponse, + ApiKeyGet, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.api_keys._controller.rest import ApiKeysPathParams +from simcore_service_webserver.api_keys._controller.rest_exceptions import ( + _TO_HTTP_ERROR_MAP, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["auth"], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.post( + "/auth/api-keys", + operation_id="create_api_key", + status_code=status.HTTP_201_CREATED, + response_model=Envelope[ApiKeyCreateResponse], +) +async def create_api_key(_body: ApiKeyCreateRequest): + """creates API keys to access public API""" + + +@router.get( + "/auth/api-keys", + operation_id="list_api_keys", + response_model=Envelope[list[ApiKeyGet]], + status_code=status.HTTP_200_OK, +) +async def list_api_keys(): + """lists API keys by this user""" + + +@router.get( + "/auth/api-keys/{api_key_id}", + operation_id="get_api_key", + response_model=Envelope[ApiKeyGet], + status_code=status.HTTP_200_OK, +) +async def get_api_key(_path: Annotated[ApiKeysPathParams, Depends()]): + """returns the API Key with the given ID""" + + +@router.delete( + "/auth/api-keys/{api_key_id}", + operation_id="delete_api_key", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_api_key(_path: Annotated[ApiKeysPathParams, Depends()]): + """deletes the API key with the given ID""" diff --git a/api/specs/web-server/_catalog.py b/api/specs/web-server/_catalog.py new file mode 100644 index 00000000000..4902e331f88 --- /dev/null +++ b/api/specs/web-server/_catalog.py @@ -0,0 +1,132 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas_api_server.pricing_plans import ServicePricingPlanGet +from models_library.api_schemas_webserver.catalog import ( + CatalogLatestServiceGet, + CatalogServiceGet, + CatalogServiceUpdate, + ServiceInputGet, + ServiceInputKey, + ServiceOutputGet, + ServiceOutputKey, + ServiceResourcesGet, +) +from models_library.generics import Envelope +from models_library.rest_pagination import Page +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.catalog._controller_rest_schemas import ( + FromServiceOutputQueryParams, + ListServiceParams, + ServiceInputsPathParams, + ServiceOutputsPathParams, + ServicePathParams, + ToServiceInputsQueryParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "catalog", + ], +) + + +@router.get( + "/catalog/services/-/latest", + response_model=Page[CatalogLatestServiceGet], +) +def list_services_latest(_query: Annotated[ListServiceParams, Depends()]): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}", + response_model=Envelope[CatalogServiceGet], +) +def get_service(_path: Annotated[ServicePathParams, Depends()]): ... + + +@router.patch( + "/catalog/services/{service_key}/{service_version}", + response_model=Envelope[CatalogServiceGet], +) +def update_service( + _path: Annotated[ServicePathParams, Depends()], + _body: CatalogServiceUpdate, +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/inputs", + response_model=Envelope[list[ServiceInputGet]], +) +def list_service_inputs( + _path: Annotated[ServicePathParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/inputs/{input_key}", + response_model=Envelope[ServiceInputGet], +) +def get_service_input( + _path: Annotated[ServiceInputsPathParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/inputs:match", + response_model=Envelope[list[ServiceInputKey]], +) +def get_compatible_inputs_given_source_output( + _path: Annotated[ServicePathParams, Depends()], + _query: Annotated[FromServiceOutputQueryParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/outputs", + response_model=Envelope[list[ServiceOutputKey]], +) +def list_service_outputs( + _path: Annotated[ServicePathParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/outputs/{output_key}", + response_model=Envelope[list[ServiceOutputGet]], +) +def get_service_output( + _path: Annotated[ServiceOutputsPathParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/outputs:match", + response_model=Envelope[list[ServiceOutputKey]], +) +def get_compatible_outputs_given_target_input( + _path: Annotated[ServicePathParams, Depends()], + _query: Annotated[ToServiceInputsQueryParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key}/{service_version}/resources", + response_model=Envelope[ServiceResourcesGet], +) +def get_service_resources( + _path: Annotated[ServicePathParams, Depends()], +): ... + + +@router.get( + "/catalog/services/{service_key:path}/{service_version}/pricing-plan", + response_model=Envelope[ServicePricingPlanGet], + description="Retrieve default pricing plan for provided service", + tags=["pricing-plans"], +) +async def get_service_pricing_plan( + _path: Annotated[ServicePathParams, Depends()], +): ... diff --git a/api/specs/web-server/_catalog_tags.py b/api/specs/web-server/_catalog_tags.py new file mode 100644 index 00000000000..9b66b92dfb2 --- /dev/null +++ b/api/specs/web-server/_catalog_tags.py @@ -0,0 +1,52 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas_webserver.catalog import CatalogServiceGet +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.catalog._controller_rest_schemas import ( + ServicePathParams, + ServiceTagPathParams, +) +from simcore_service_webserver.tags.schemas import TagGet + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "catalog", + "tags", + ], +) + + +@router.get( + "/catalog/services/{service_key}/{service_version}/tags", + response_model=Envelope[list[TagGet]], +) +def list_service_tags( + _path_params: Annotated[ServicePathParams, Depends()], +): ... + + +@router.post( + "/catalog/services/{service_key}/{service_version}/tags/{tag_id}:add", + response_model=Envelope[CatalogServiceGet], +) +def add_service_tag( + _path_params: Annotated[ServiceTagPathParams, Depends()], +): ... + + +@router.post( + "/catalog/services/{service_key}/{service_version}/tags/{tag_id}:remove", + response_model=Envelope[CatalogServiceGet], +) +def remove_service_tag( + _path_params: Annotated[ServiceTagPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_common.py b/api/specs/web-server/_common.py new file mode 100644 index 00000000000..5afbea3d1d2 --- /dev/null +++ b/api/specs/web-server/_common.py @@ -0,0 +1,128 @@ +""" Common utils for OAS script generators +""" + +import inspect +import sys +from collections.abc import Callable +from pathlib import Path +from typing import ( + Annotated, + Any, + Generic, + NamedTuple, + Optional, + TypeVar, + Union, + get_args, + get_origin, +) + +from common_library.json_serialization import json_dumps +from common_library.pydantic_fields_extension import get_type +from fastapi import Query +from pydantic import BaseModel, Json, create_model +from pydantic.fields import FieldInfo + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +def _replace_basemodel_in_annotation(annotation, new_type): + origin = get_origin(annotation) + + # Handle Annotated + if origin is Annotated: + args = get_args(annotation) + base_type = args[0] + metadata = args[1:] + if isinstance(base_type, type) and issubclass(base_type, BaseModel): + # Replace the BaseModel subclass + base_type = new_type + + return Annotated[(base_type, *metadata)] + + # Handle Optionals, Unions, or other generic types + if origin in (Optional, Union, list, dict, tuple): # Extendable for other generics + new_args = tuple( + _replace_basemodel_in_annotation(arg, new_type) + for arg in get_args(annotation) + ) + return origin[new_args] + + # Replace BaseModel subclass directly + if isinstance(annotation, type) and issubclass(annotation, BaseModel): + return new_type + + # Return as-is if no changes + return annotation + + +def as_query(model_class: type[BaseModel]) -> type[BaseModel]: + fields = {} + for field_name, field_info in model_class.model_fields.items(): + + field_default = field_info.default + assert not field_info.default_factory # nosec + query_kwargs = { + "alias": field_info.alias, + "title": field_info.title, + "description": field_info.description, + "metadata": field_info.metadata, + "json_schema_extra": field_info.json_schema_extra, + } + + annotation = _replace_basemodel_in_annotation( + # NOTE: still missing description=query_kwargs["description"] and example=query_kwargs.get("json_schema_extra", {}).get("example_json") + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/6786 + field_info.annotation, + new_type=Json, + ) + + if annotation != field_info.annotation: + # Complex fields are transformed to Json + field_default = json_dumps(field_default) if field_default else None + + fields[field_name] = (annotation, Query(default=field_default, **query_kwargs)) + + new_model_name = f"{model_class.__name__}Query" + return create_model(new_model_name, **fields) + + +ErrorT = TypeVar("ErrorT") + + +class EnvelopeE(BaseModel, Generic[ErrorT]): + """Complementary to models_library.generics.Envelope just for the generators""" + + error: ErrorT | None = None + data: Any | None = None + + +class ParamSpec(NamedTuple): + name: str + annotated_type: type + field_info: FieldInfo + + +def assert_handler_signature_against_model( + handler: Callable, model_cls: type[BaseModel] +): + sig = inspect.signature(handler) + + # query, path and body parameters + specs_params = [ + ParamSpec(param.name, param.annotation, param.default) + for param in sig.parameters.values() + ] + + # query and path parameters + implemented_params = [ + ParamSpec(name, get_type(info), info) + for name, info in model_cls.model_fields.items() + ] + + implemented_names = {p.name for p in implemented_params} + specified_names = {p.name for p in specs_params} + + if not implemented_names.issubset(specified_names): + msg = f"Entrypoint {handler} does not implement OAS: {implemented_names} not in {specified_names}" + raise AssertionError(msg) diff --git a/api/specs/web-server/_computations.py b/api/specs/web-server/_computations.py new file mode 100644 index 00000000000..fb60fce4175 --- /dev/null +++ b/api/specs/web-server/_computations.py @@ -0,0 +1,97 @@ +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, status +from fastapi_pagination import Page +from models_library.api_schemas_webserver.computations import ( + ComputationGet, + ComputationPathParams, + ComputationRunIterationsLatestListQueryParams, + ComputationRunIterationsListQueryParams, + ComputationRunPathParams, + ComputationRunRestGet, + ComputationStart, + ComputationStarted, + ComputationTaskRestGet, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.director_v2._controller.computations_rest import ( + ComputationTaskListQueryParams, + ComputationTaskPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "computations", + "projects", + ], +) + + +@router.get( + "/computations/{project_id}", + response_model=Envelope[ComputationGet], +) +async def get_computation(_path: Annotated[ComputationPathParams, Depends()]): ... + + +@router.post( + "/computations/{project_id}:start", + response_model=Envelope[ComputationStarted], + responses={ + status.HTTP_402_PAYMENT_REQUIRED: { + "description": "Insufficient credits to run computation" + }, + status.HTTP_404_NOT_FOUND: { + "description": "Project/wallet/pricing details were not found" + }, + status.HTTP_406_NOT_ACCEPTABLE: {"description": "Cluster not found"}, + status.HTTP_409_CONFLICT: {"description": "Project already started"}, + status.HTTP_422_UNPROCESSABLE_ENTITY: {"description": "Configuration error"}, + status.HTTP_503_SERVICE_UNAVAILABLE: {"description": "Service not available"}, + }, +) +async def start_computation( + _path: Annotated[ComputationPathParams, Depends()], + _body: ComputationStart, +): ... + + +@router.post( + "/computations/{project_id}:stop", + status_code=status.HTTP_204_NO_CONTENT, +) +async def stop_computation(_path: Annotated[ComputationPathParams, Depends()]): ... + + +@router.get( + "/computations/-/iterations/latest", + response_model=Page[ComputationRunRestGet], +) +async def list_computations_latest_iteration( + _query: Annotated[ + as_query(ComputationRunIterationsLatestListQueryParams), Depends() + ], +): ... + + +@router.get( + "/computations/{project_id}/iterations", + response_model=Page[ComputationRunRestGet], +) +async def list_computation_iterations( + _query: Annotated[as_query(ComputationRunIterationsListQueryParams), Depends()], + _path: Annotated[ComputationRunPathParams, Depends()], +): ... + + +@router.get( + "/computations/{project_id}/iterations/latest/tasks", + response_model=Page[ComputationTaskRestGet], +) +async def list_computations_latest_iteration_tasks( + _query: Annotated[as_query(ComputationTaskListQueryParams), Depends()], + _path: Annotated[ComputationTaskPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_diagnostics.py b/api/specs/web-server/_diagnostics.py new file mode 100644 index 00000000000..85422308418 --- /dev/null +++ b/api/specs/web-server/_diagnostics.py @@ -0,0 +1,81 @@ +from typing import Annotated, Any + +from fastapi import APIRouter, Depends +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.diagnostics._handlers import ( + AppStatusCheck, + StatusDiagnosticsGet, + StatusDiagnosticsQueryParam, +) +from simcore_service_webserver.rest.healthcheck import HealthInfoDict + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "maintenance", + ], +) + + +@router.get( + "/", + response_model=Envelope[HealthInfoDict], +) +async def healthcheck_readiness_probe(): + """Readiness probe: check if the container is ready to receive traffic""" + + +@router.get( + "/health", + response_model=Envelope[dict[str, Any]], +) +async def healthcheck_liveness_probe(): + """Liveness probe: check if the container is alive""" + + +@router.get( + "/config", + description="Front end runtime configuration", + response_model=Envelope[dict[str, Any]], +) +async def get_config(): + """Returns app and products configs""" + + +@router.get( + "/scheduled_maintenance", + response_model=Envelope[str], +) +async def get_scheduled_maintenance(): + ... + + +@router.get( + "/status", + description="checks status of self and connected services", + response_model=Envelope[AppStatusCheck], + response_description="Returns app status check", +) +async def get_app_status(): + ... + + +@router.get( + "/status/diagnostics", + response_model=Envelope[StatusDiagnosticsGet], + response_description="Returns app diagnostics report", +) +async def get_app_diagnostics( + _query: Annotated[StatusDiagnosticsQueryParam, Depends()] +): + ... + + +@router.get( + "/status/{service_name}", + response_model=Envelope[AppStatusCheck], + response_description="Returns app status check", +) +async def get_service_status(service_name: str): + ... diff --git a/api/specs/web-server/_exporter.py b/api/specs/web-server/_exporter.py new file mode 100644 index 00000000000..9acf44f65cc --- /dev/null +++ b/api/specs/web-server/_exporter.py @@ -0,0 +1,27 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from fastapi import APIRouter +from fastapi.responses import FileResponse +from simcore_service_webserver._meta import API_VTAG + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "exporter", + ], +) + + +@router.post( + "/projects/{project_id}:xport", + response_class=FileResponse, +) +def export_project(project_id: str): + """ + creates an archive of the project and downloads it + """ diff --git a/api/specs/web-server/_folders.py b/api/specs/web-server/_folders.py new file mode 100644 index 00000000000..4c97c697743 --- /dev/null +++ b/api/specs/web-server/_folders.py @@ -0,0 +1,114 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.folders_v2 import ( + FolderCreateBodyParams, + FolderGet, + FolderReplaceBodyParams, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.folders._common.exceptions_handlers import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.folders._common.models import ( + FolderSearchQueryParams, + FoldersListQueryParams, + FoldersPathParams, + FolderWorkspacesPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "folders", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.post( + "/folders", + response_model=Envelope[FolderGet], + status_code=status.HTTP_201_CREATED, +) +async def create_folder( + _body: FolderCreateBodyParams, +): + ... + + +@router.get( + "/folders", + response_model=Envelope[list[FolderGet]], +) +async def list_folders( + _query: Annotated[as_query(FoldersListQueryParams), Depends()], +): + ... + + +@router.get( + "/folders:search", + response_model=Envelope[list[FolderGet]], +) +async def list_folders_full_search( + _query: Annotated[as_query(FolderSearchQueryParams), Depends()], +): + ... + + +@router.get( + "/folders/{folder_id}", + response_model=Envelope[FolderGet], +) +async def get_folder( + _path: Annotated[FoldersPathParams, Depends()], +): + ... + + +@router.put( + "/folders/{folder_id}", + response_model=Envelope[FolderGet], +) +async def replace_folder( + _path: Annotated[FoldersPathParams, Depends()], + _body: FolderReplaceBodyParams, +): + ... + + +@router.delete( + "/folders/{folder_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_folder( + _path: Annotated[FoldersPathParams, Depends()], +): + ... + + +@router.post( + "/folders/{folder_id}/workspaces/{workspace_id}:move", + status_code=status.HTTP_204_NO_CONTENT, + description="Move folder to the workspace", + tags=["workspaces"], +) +async def move_folder_to_workspace( + _path: Annotated[FolderWorkspacesPathParams, Depends()], +): + ... diff --git a/api/specs/web-server/_functions.py b/api/specs/web-server/_functions.py new file mode 100644 index 00000000000..72f4c8af48e --- /dev/null +++ b/api/specs/web-server/_functions.py @@ -0,0 +1,53 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.functions import ( + FunctionToRegister, + RegisteredFunctionGet, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.functions._controller._functions_rest_schemas import ( + FunctionPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "functions", + ], +) + + +@router.post( + "/functions", + response_model=Envelope[RegisteredFunctionGet], +) +async def register_function( + _body: FunctionToRegister, +) -> Envelope[RegisteredFunctionGet]: ... + + +@router.get( + "/functions/{function_id}", + response_model=Envelope[RegisteredFunctionGet], +) +async def get_function( + _path: Annotated[FunctionPathParams, Depends()], +): ... + + +@router.delete( + "/functions/{function_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_function( + _path: Annotated[FunctionPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_groups.py b/api/specs/web-server/_groups.py new file mode 100644 index 00000000000..6f0f1f1e616 --- /dev/null +++ b/api/specs/web-server/_groups.py @@ -0,0 +1,196 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from enum import Enum +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.groups import ( + GroupCreate, + GroupGet, + GroupUpdate, + GroupUserAdd, + GroupUserGet, + GroupUserUpdate, + MyGroupsGet, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.groups._common.schemas import ( + GroupsClassifiersQuery, + GroupsPathParams, + GroupsUsersPathParams, +) +from simcore_service_webserver.scicrunch.models import ResearchResource, ResourceHit + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "groups", + ], +) + + +@router.get( + "/groups", + response_model=Envelope[MyGroupsGet], +) +async def list_groups(): + """ + List all groups (organizations, primary, everyone and products) I belong to + """ + + +@router.post( + "/groups", + response_model=Envelope[GroupGet], + status_code=status.HTTP_201_CREATED, +) +async def create_group(_body: GroupCreate): + """ + Creates an organization group + """ + + +@router.get( + "/groups/{gid}", + response_model=Envelope[GroupGet], +) +async def get_group(_path: Annotated[GroupsPathParams, Depends()]): + """ + Get an organization group + """ + + +@router.patch( + "/groups/{gid}", + response_model=Envelope[GroupGet], +) +async def update_group( + _path: Annotated[GroupsPathParams, Depends()], + _body: GroupUpdate, +): + """ + Updates organization groups + """ + + +@router.delete( + "/groups/{gid}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_group(_path: Annotated[GroupsPathParams, Depends()]): + """ + Deletes organization groups + """ + + +_extra_tags: list[str | Enum] = ["users"] + + +@router.get( + "/groups/{gid}/users", + response_model=Envelope[list[GroupUserGet]], + tags=_extra_tags, +) +async def get_all_group_users(_path: Annotated[GroupsPathParams, Depends()]): + """ + Gets users in organization or primary groups + """ + + +@router.post( + "/groups/{gid}/users", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def add_group_user( + _path: Annotated[GroupsPathParams, Depends()], + _body: GroupUserAdd, +): + """ + Adds a user to an organization group using their username, user ID, or email (subject to privacy settings) + """ + + +@router.get( + "/groups/{gid}/users/{uid}", + response_model=Envelope[GroupUserGet], + tags=_extra_tags, +) +async def get_group_user( + _path: Annotated[GroupsUsersPathParams, Depends()], +): + """ + Gets specific user in an organization group + """ + + +@router.patch( + "/groups/{gid}/users/{uid}", + response_model=Envelope[GroupUserGet], + tags=_extra_tags, +) +async def update_group_user( + _path: Annotated[GroupsUsersPathParams, Depends()], + _body: GroupUserUpdate, +): + """ + Updates user (access-rights) to an organization group + """ + + +@router.delete( + "/groups/{gid}/users/{uid}", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def delete_group_user( + _path: Annotated[GroupsUsersPathParams, Depends()], +): + """ + Removes a user from an organization group + """ + + +# +# Classifiers +# + + +@router.get( + "/groups/{gid}/classifiers", + response_model=Envelope[dict[str, Any]], +) +async def get_group_classifiers( + _path: Annotated[GroupsPathParams, Depends()], + _query: Annotated[GroupsClassifiersQuery, Depends()], +): + ... + + +@router.get( + "/groups/sparc/classifiers/scicrunch-resources/{rrid}", + response_model=Envelope[ResearchResource], +) +async def get_scicrunch_resource(rrid: str): + ... + + +@router.post( + "/groups/sparc/classifiers/scicrunch-resources/{rrid}", + response_model=Envelope[ResearchResource], +) +async def add_scicrunch_resource(rrid: str): + ... + + +@router.get( + "/groups/sparc/classifiers/scicrunch-resources:search", + response_model=Envelope[list[ResourceHit]], +) +async def search_scicrunch_resources(guess_name: str): + ... diff --git a/api/specs/web-server/_licensed_items.py b/api/specs/web-server/_licensed_items.py new file mode 100644 index 00000000000..3385028c1ce --- /dev/null +++ b/api/specs/web-server/_licensed_items.py @@ -0,0 +1,59 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends +from models_library.api_schemas_webserver.licensed_items import LicensedItemRestGet +from models_library.api_schemas_webserver.licensed_items_purchases import ( + LicensedItemPurchaseGet, +) +from models_library.rest_error import EnvelopedError +from models_library.rest_pagination import Page +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.licenses._common.exceptions_handlers import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.licenses._common.models import ( + LicensedItemsBodyParams, + LicensedItemsListQueryParams, + LicensedItemsPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "licenses", + "catalog", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.get( + "/catalog/licensed-items", + response_model=Page[LicensedItemRestGet], +) +async def list_licensed_items( + _query: Annotated[as_query(LicensedItemsListQueryParams), Depends()], +): + ... + + +@router.post( + "/catalog/licensed-items/{licensed_item_id}:purchase", + response_model=LicensedItemPurchaseGet, +) +async def purchase_licensed_item( + _path: Annotated[LicensedItemsPathParams, Depends()], + _body: LicensedItemsBodyParams, +): + ... diff --git a/api/specs/web-server/_licensed_items_checkouts.py b/api/specs/web-server/_licensed_items_checkouts.py new file mode 100644 index 00000000000..85ebe8073db --- /dev/null +++ b/api/specs/web-server/_licensed_items_checkouts.py @@ -0,0 +1,59 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends +from models_library.api_schemas_webserver.licensed_items_purchases import ( + LicensedItemPurchaseGet, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from models_library.rest_pagination import Page +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.licenses._common.exceptions_handlers import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.licenses._licensed_items_checkouts_models import ( + LicensedItemCheckoutPathParams, + LicensedItemsCheckoutsListQueryParams, +) +from simcore_service_webserver.wallets._handlers import WalletsPathParams + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "licenses", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.get( + "/wallets/{wallet_id}/licensed-items-checkouts", + response_model=Page[LicensedItemPurchaseGet], + tags=["wallets"], +) +async def list_licensed_item_checkouts_for_wallet( + _path: Annotated[WalletsPathParams, Depends()], + _query: Annotated[as_query(LicensedItemsCheckoutsListQueryParams), Depends()], +): + ... + + +@router.get( + "/licensed-items-checkouts/{licensed_item_checkout_id}", + response_model=Envelope[LicensedItemPurchaseGet], +) +async def get_licensed_item_checkout( + _path: Annotated[LicensedItemCheckoutPathParams, Depends()], +): + ... diff --git a/api/specs/web-server/_licensed_items_purchases.py b/api/specs/web-server/_licensed_items_purchases.py new file mode 100644 index 00000000000..7283f424df9 --- /dev/null +++ b/api/specs/web-server/_licensed_items_purchases.py @@ -0,0 +1,59 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends +from models_library.api_schemas_webserver.licensed_items_purchases import ( + LicensedItemPurchaseGet, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from models_library.rest_pagination import Page +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.licenses._common.exceptions_handlers import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.licenses._common.models import ( + LicensedItemsPurchasesListQueryParams, + LicensedItemsPurchasesPathParams, +) +from simcore_service_webserver.wallets._handlers import WalletsPathParams + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "licenses", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.get( + "/wallets/{wallet_id}/licensed-items-purchases", + response_model=Page[LicensedItemPurchaseGet], + tags=["wallets"], +) +async def list_wallet_licensed_items_purchases( + _path: Annotated[WalletsPathParams, Depends()], + _query: Annotated[as_query(LicensedItemsPurchasesListQueryParams), Depends()], +): + ... + + +@router.get( + "/licensed-items-purchases/{licensed_item_purchase_id}", + response_model=Envelope[LicensedItemPurchaseGet], +) +async def get_licensed_item_purchase( + _path: Annotated[LicensedItemsPurchasesPathParams, Depends()], +): + ... diff --git a/api/specs/web-server/_long_running_tasks.py b/api/specs/web-server/_long_running_tasks.py new file mode 100644 index 00000000000..f204c1de5b4 --- /dev/null +++ b/api/specs/web-server/_long_running_tasks.py @@ -0,0 +1,74 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, status +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from servicelib.aiohttp.long_running_tasks._routes import _PathParam +from servicelib.long_running_tasks._models import TaskGet, TaskStatus +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.tasks._exception_handlers import ( + _TO_HTTP_ERROR_MAP as export_data_http_error_map, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "long-running-tasks", + ], +) + +_export_data_responses: dict[int | str, dict[str, Any]] = { + i.status_code: {"model": EnvelopedError} + for i in export_data_http_error_map.values() +} + + +@router.get( + "/tasks", + response_model=Envelope[list[TaskGet]], + name="list_tasks", + description="Lists all long running tasks", + responses=_export_data_responses, +) +def get_async_jobs(): ... + + +@router.get( + "/tasks/{task_id}", + response_model=Envelope[TaskStatus], + name="get_task_status", + description="Retrieves the status of a task", + responses=_export_data_responses, +) +def get_async_job_status( + _path_params: Annotated[_PathParam, Depends()], +): ... + + +@router.delete( + "/tasks/{task_id}", + name="cancel_and_delete_task", + description="Cancels and deletes a task", + responses=_export_data_responses, + status_code=status.HTTP_204_NO_CONTENT, +) +def cancel_async_job( + _path_params: Annotated[_PathParam, Depends()], +): ... + + +@router.get( + "/tasks/{task_id}/result", + name="get_task_result", + description="Retrieves the result of a task", + responses=_export_data_responses, +) +def get_async_job_result( + _path_params: Annotated[_PathParam, Depends()], +): ... diff --git a/api/specs/web-server/_long_running_tasks_legacy.py b/api/specs/web-server/_long_running_tasks_legacy.py new file mode 100644 index 00000000000..fcbe3508c4d --- /dev/null +++ b/api/specs/web-server/_long_running_tasks_legacy.py @@ -0,0 +1,61 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.generics import Envelope +from servicelib.aiohttp.long_running_tasks._routes import _PathParam +from servicelib.long_running_tasks._models import TaskGet, TaskStatus +from simcore_service_webserver._meta import API_VTAG + +router = APIRouter( + prefix=f"/{API_VTAG}/tasks-legacy", + tags=[ + "long-running-tasks-legacy", + ], +) + + +@router.get( + "", + response_model=Envelope[list[TaskGet]], + name="list_tasks", + description="Lists all long running tasks", +) +def list_tasks(): ... + + +@router.get( + "/{task_id}", + response_model=Envelope[TaskStatus], + name="get_task_status", + description="Retrieves the status of a task", +) +def get_task_status( + _path_params: Annotated[_PathParam, Depends()], +): ... + + +@router.delete( + "/{task_id}", + name="cancel_and_delete_task", + description="Cancels and deletes a task", + status_code=status.HTTP_204_NO_CONTENT, +) +def cancel_and_delete_task( + _path_params: Annotated[_PathParam, Depends()], +): ... + + +@router.get( + "/{task_id}/result", + name="get_task_result", + description="Retrieves the result of a task", +) +def get_task_result( + _path_params: Annotated[_PathParam, Depends()], +): ... diff --git a/api/specs/web-server/_nih_sparc.py b/api/specs/web-server/_nih_sparc.py new file mode 100644 index 00000000000..7d457be5578 --- /dev/null +++ b/api/specs/web-server/_nih_sparc.py @@ -0,0 +1,57 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from fastapi import APIRouter +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.studies_dispatcher._rest_handlers import ( + ServiceGet, + Viewer, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "nih-sparc", + ], +) + + +@router.get( + "/services", + response_model=Envelope[list[ServiceGet]], +) +async def list_latest_services(): + """Returns a list latest version of services""" + + +@router.get( + "/viewers", + response_model=Envelope[list[Viewer]], +) +async def list_viewers(file_type: str | None = None): + """Lists all publically available viewers + + Notice that this might contain multiple services for the same filetype + + If file_type is provided, then it filters viewer for that filetype + """ + + +@router.get( + "/viewers/default", + response_model=Envelope[list[Viewer]], +) +async def list_default_viewers(file_type: str | None = None): + """Lists the default viewer for each supported filetype + + This was interfaced as a subcollection of viewers because it is a very common use-case + + Only publicaly available viewers + + If file_type is provided, then it filters viewer for that filetype + """ diff --git a/api/specs/web-server/_nih_sparc_redirections.py b/api/specs/web-server/_nih_sparc_redirections.py new file mode 100644 index 00000000000..df1693e2877 --- /dev/null +++ b/api/specs/web-server/_nih_sparc_redirections.py @@ -0,0 +1,49 @@ +""" Helper script to generate OAS automatically NIH-sparc portal API section +""" + +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from fastapi import APIRouter, status +from fastapi.responses import RedirectResponse +from models_library.projects import ProjectID +from models_library.services import ServiceKey, ServiceKeyVersion +from pydantic import HttpUrl, PositiveInt + +router = APIRouter( + prefix="", # NOTE: no API vtag! + tags=[ + "nih-sparc", + ], +) + + +@router.get( + "/view", + response_class=RedirectResponse, + response_description="Opens osparc and starts viewer for selected data", + status_code=status.HTTP_302_FOUND, +) +async def get_redirection_to_viewer( + file_type: str, + viewer_key: ServiceKey, + viewer_version: ServiceKeyVersion, + file_size: PositiveInt, + download_link: HttpUrl, + file_name: str | None = "unknown", +): + """Opens a viewer in osparc for data in the NIH-sparc portal""" + + +@router.get( + "/study/{id}", + response_class=RedirectResponse, + response_description="Opens osparc and opens a copy of publised study", + status_code=status.HTTP_302_FOUND, +) +async def get_redirection_to_study_page(id: ProjectID): + """Opens a study published in osparc""" diff --git a/api/specs/web-server/_products.py b/api/specs/web-server/_products.py new file mode 100644 index 00000000000..a77f50f3193 --- /dev/null +++ b/api/specs/web-server/_products.py @@ -0,0 +1,64 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas_webserver.products import ( + CreditPriceGet, + InvitationGenerate, + InvitationGenerated, + ProductGet, + ProductUIGet, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.products._controller.rest_schemas import ( + ProductsRequestParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "products", + ], +) + + +@router.get( + "/credits-price", + response_model=Envelope[CreditPriceGet], +) +async def get_current_product_price(): ... + + +@router.get( + "/products/{product_name}", + response_model=Envelope[ProductGet], + description="NOTE: `/products/current` is used to define current project w/o naming it", + tags=[ + "po", + ], +) +async def get_product(_params: Annotated[ProductsRequestParams, Depends()]): ... + + +@router.get( + "/products/current/ui", + response_model=Envelope[ProductUIGet], +) +async def get_current_product_ui(): ... + + +@router.post( + "/invitation:generate", + response_model=Envelope[InvitationGenerated], + tags=[ + "po", + ], +) +async def generate_invitation(_body: InvitationGenerate): ... diff --git a/api/specs/web-server/_projects.py b/api/specs/web-server/_projects.py new file mode 100644 index 00000000000..87a5cea975e --- /dev/null +++ b/api/specs/web-server/_projects.py @@ -0,0 +1,164 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, Header, status +from models_library.api_schemas_directorv2.dynamic_services import ( + GetProjectInactivityResponse, +) +from models_library.api_schemas_long_running_tasks.tasks import TaskGet +from models_library.api_schemas_webserver.projects import ( + ProjectCopyOverride, + ProjectCreateNew, + ProjectGet, + ProjectListItem, + ProjectPatch, +) +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rest_error import EnvelopedError +from models_library.rest_pagination import Page +from pydantic import BaseModel +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller._rest_exceptions import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.projects._controller._rest_schemas import ( + ProjectPathParams, +) +from simcore_service_webserver.projects._controller.projects_rest_schemas import ( + ProjectActiveQueryParams, + ProjectCreateQueryParams, + ProjectsListQueryParams, + ProjectsSearchQueryParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +class _ProjectCreateHeaderParams(BaseModel): + x_simcore_user_agent: Annotated[ + str | None, Header(description="Optional simcore user agent") + ] = "undefined" + x_simcore_parent_project_uuid: Annotated[ + ProjectID | None, + Header( + description="Optionally sets a parent project UUID (both project and node must be set)", + ), + ] = None + x_simcore_parent_node_id: Annotated[ + NodeID | None, + Header( + description="Optionally sets a parent node ID (both project and node must be set)", + ), + ] = None + + +@router.post( + "/projects", + response_model=Envelope[TaskGet], + description="Creates a new project or copies an existing one. " + "NOTE: implemented as a long running task, " + "i.e. requires polling `status_href` (HTTP_200_OK) to get status and `result_href` (HTTP_201_CREATED) to get created project", + status_code=status.HTTP_202_ACCEPTED, +) +async def create_project( + _h: Annotated[_ProjectCreateHeaderParams, Depends()], + _query: Annotated[ProjectCreateQueryParams, Depends()], + _body: ProjectCreateNew | ProjectCopyOverride, +): ... + + +@router.get( + "/projects", + response_model=Page[ProjectListItem], +) +async def list_projects( + _query: Annotated[as_query(ProjectsListQueryParams), Depends()], +): ... + + +@router.get( + "/projects/active", + response_model=Envelope[ProjectGet], +) +async def get_active_project( + _query: Annotated[ProjectActiveQueryParams, Depends()], +): ... + + +@router.get( + "/projects/{project_id}", + response_model=Envelope[ProjectGet], +) +async def get_project( + _path: Annotated[ProjectPathParams, Depends()], +): ... + + +@router.patch( + "/projects/{project_id}", + response_model=None, + status_code=status.HTTP_204_NO_CONTENT, +) +async def patch_project( + _path: Annotated[ProjectPathParams, Depends()], + _body: ProjectPatch, +): ... + + +@router.delete( + "/projects/{project_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_project( + _path: Annotated[ProjectPathParams, Depends()], +): ... + + +@router.post( + "/projects/{project_id}:clone", + response_model=Envelope[TaskGet], + status_code=status.HTTP_201_CREATED, +) +async def clone_project( + _path: Annotated[ProjectPathParams, Depends()], +): ... + + +@router.get( + "/projects:search", + response_model=Page[ProjectListItem], +) +async def list_projects_full_search( + _query: Annotated[as_query(ProjectsSearchQueryParams), Depends()], +): ... + + +@router.get( + "/projects/{project_id}/inactivity", + response_model=Envelope[GetProjectInactivityResponse], + status_code=status.HTTP_200_OK, +) +async def get_project_inactivity( + _path: Annotated[ProjectPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_projects_access_rights.py b/api/specs/web-server/_projects_access_rights.py new file mode 100644 index 00000000000..3b71a23723d --- /dev/null +++ b/api/specs/web-server/_projects_access_rights.py @@ -0,0 +1,79 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.projects_access_rights import ( + ProjectsGroupsBodyParams, + ProjectsGroupsPathParams, + ProjectShare, + ProjectShareAccepted, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller._rest_schemas import ( + ProjectPathParams, +) +from simcore_service_webserver.projects._groups_service import ProjectGroupGet + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["projects", "groups"], +) + + +@router.post( + "/projects/{project_id}:share", + response_model=Envelope[ProjectShareAccepted], + status_code=status.HTTP_202_ACCEPTED, + responses={ + status.HTTP_202_ACCEPTED: { + "description": "The request to share the project has been accepted, but the actual sharing process has to be confirmd." + } + }, +) +async def share_project( + _path: Annotated[ProjectPathParams, Depends()], + _body: ProjectShare, +): ... + + +@router.post( + "/projects/{project_id}/groups/{group_id}", + response_model=Envelope[ProjectGroupGet], + status_code=status.HTTP_201_CREATED, +) +async def create_project_group( + _path: Annotated[ProjectsGroupsPathParams, Depends()], + _body: ProjectsGroupsBodyParams, +): ... + + +@router.get( + "/projects/{project_id}/groups", + response_model=Envelope[list[ProjectGroupGet]], +) +async def list_project_groups(_path: Annotated[ProjectPathParams, Depends()]): ... + + +@router.put( + "/projects/{project_id}/groups/{group_id}", + response_model=Envelope[ProjectGroupGet], +) +async def replace_project_group( + _path: Annotated[ProjectsGroupsPathParams, Depends()], + _body: ProjectsGroupsBodyParams, +): ... + + +@router.delete( + "/projects/{project_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_project_group( + _path: Annotated[ProjectsGroupsPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_projects_comments.py b/api/specs/web-server/_projects_comments.py new file mode 100644 index 00000000000..04ad1f1fa43 --- /dev/null +++ b/api/specs/web-server/_projects_comments.py @@ -0,0 +1,116 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Literal + +from _common import assert_handler_signature_against_model +from fastapi import APIRouter, status +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.projects_comments import CommentID, ProjectsCommentsAPI +from pydantic import NonNegativeInt +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.comments_rest import ( + _ProjectCommentsBodyParams, + _ProjectCommentsPathParams, + _ProjectCommentsWithCommentPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "comments", + ], +) + + +# +# API entrypoints +# + + +@router.post( + "/projects/{project_uuid}/comments", + response_model=Envelope[dict[Literal["comment_id"], CommentID]], + description="Create a new comment for a specific project. The request body should contain the comment contents and user information.", + status_code=status.HTTP_201_CREATED, + deprecated=True, +) +async def create_project_comment( + project_uuid: ProjectID, body: _ProjectCommentsBodyParams +): ... + + +assert_handler_signature_against_model( + create_project_comment, _ProjectCommentsPathParams +) + + +@router.get( + "/projects/{project_uuid}/comments", + response_model=Envelope[list[ProjectsCommentsAPI]], + description="Retrieve all comments for a specific project.", + deprecated=True, +) +async def list_project_comments( + project_uuid: ProjectID, limit: int = 20, offset: NonNegativeInt = 0 +): ... + + +assert_handler_signature_against_model( + list_project_comments, _ProjectCommentsPathParams +) + + +@router.put( + "/projects/{project_uuid}/comments/{comment_id}", + response_model=Envelope[ProjectsCommentsAPI], + description="Update the contents of a specific comment for a project. The request body should contain the updated comment contents.", + deprecated=True, +) +async def update_project_comment( + project_uuid: ProjectID, + comment_id: CommentID, + body: _ProjectCommentsBodyParams, +): ... + + +assert_handler_signature_against_model( + update_project_comment, _ProjectCommentsWithCommentPathParams +) + + +@router.delete( + "/projects/{project_uuid}/comments/{comment_id}", + description="Delete a specific comment associated with a project.", + status_code=status.HTTP_204_NO_CONTENT, + deprecated=True, +) +async def delete_project_comment(project_uuid: ProjectID, comment_id: CommentID): ... + + +assert_handler_signature_against_model( + delete_project_comment, _ProjectCommentsWithCommentPathParams +) + + +@router.get( + "/projects/{project_uuid}/comments/{comment_id}", + response_model=Envelope[ProjectsCommentsAPI], + description="Retrieve a specific comment by its ID within a project.", + deprecated=True, +) +async def get_project_comment(project_uuid: ProjectID, comment_id: CommentID): ... + + +assert_handler_signature_against_model( + get_project_comment, _ProjectCommentsWithCommentPathParams +) diff --git a/api/specs/web-server/_projects_conversations.py b/api/specs/web-server/_projects_conversations.py new file mode 100644 index 00000000000..044800772fd --- /dev/null +++ b/api/specs/web-server/_projects_conversations.py @@ -0,0 +1,148 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.projects_conversations import ( + ConversationMessageRestGet, + ConversationRestGet, +) +from models_library.generics import Envelope +from models_library.rest_pagination import Page +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller._rest_schemas import ( + ProjectPathParams, +) +from simcore_service_webserver.projects._controller.conversations_rest import ( + _ListProjectConversationMessagesQueryParams, + _ListProjectConversationsQueryParams, + _ProjectConversationMessagesCreateBodyParams, + _ProjectConversationMessagesPutBodyParams, + _ProjectConversationsCreateBodyParams, + _ProjectConversationsMessagesPathParams, + _ProjectConversationsPathParams, + _ProjectConversationsPutBodyParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "conversations", + ], +) + + +# +# API entrypoints PROJECTS/*/CONVERSATIONS/* +# + + +@router.post( + "/projects/{project_id}/conversations", + response_model=Envelope[ConversationRestGet], + status_code=status.HTTP_201_CREATED, +) +async def create_project_conversation( + _params: Annotated[ProjectPathParams, Depends()], + _body: _ProjectConversationsCreateBodyParams, +): ... + + +@router.get( + "/projects/{project_id}/conversations", + response_model=Page[ConversationRestGet], +) +async def list_project_conversations( + _params: Annotated[ProjectPathParams, Depends()], + _query: Annotated[_ListProjectConversationsQueryParams, Depends()], +): ... + + +@router.put( + "/projects/{project_id}/conversations/{conversation_id}", + response_model=Envelope[ConversationRestGet], +) +async def update_project_conversation( + _params: Annotated[_ProjectConversationsPathParams, Depends()], + _body: _ProjectConversationsPutBodyParams, +): ... + + +@router.delete( + "/projects/{project_id}/conversations/{conversation_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_project_conversation( + _params: Annotated[_ProjectConversationsPathParams, Depends()], +): ... + + +@router.get( + "/projects/{project_id}/conversations/{conversation_id}", + response_model=Envelope[ConversationRestGet], +) +async def get_project_conversation( + _params: Annotated[_ProjectConversationsPathParams, Depends()], +): ... + + +### Conversation Messages + + +@router.post( + "/projects/{project_id}/conversations/{conversation_id}/messages", + response_model=Envelope[ConversationMessageRestGet], + status_code=status.HTTP_201_CREATED, +) +async def create_project_conversation_message( + _params: Annotated[_ProjectConversationsPathParams, Depends()], + _body: _ProjectConversationMessagesCreateBodyParams, +): ... + + +@router.get( + "/projects/{project_id}/conversations/{conversation_id}/messages", + response_model=Page[ConversationMessageRestGet], +) +async def list_project_conversation_messages( + _params: Annotated[_ProjectConversationsPathParams, Depends()], + _query: Annotated[_ListProjectConversationMessagesQueryParams, Depends()], +): ... + + +@router.put( + "/projects/{project_id}/conversations/{conversation_id}/messages/{message_id}", + response_model=Envelope[ConversationMessageRestGet], +) +async def update_project_conversation_message( + _params: Annotated[_ProjectConversationsMessagesPathParams, Depends()], + _body: _ProjectConversationMessagesPutBodyParams, +): ... + + +@router.delete( + "/projects/{project_id}/conversations/{conversation_id}/messages/{message_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_project_conversation_message( + _params: Annotated[_ProjectConversationsMessagesPathParams, Depends()], +): ... + + +@router.get( + "/projects/{project_id}/conversations/{conversation_id}/messages/{message_id}", + response_model=Envelope[ConversationMessageRestGet], +) +async def get_project_conversation_message( + _params: Annotated[_ProjectConversationsMessagesPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_projects_folders.py b/api/specs/web-server/_projects_folders.py new file mode 100644 index 00000000000..f3c5b337b49 --- /dev/null +++ b/api/specs/web-server/_projects_folders.py @@ -0,0 +1,33 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.folders_rest import ( + _ProjectsFoldersPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["projects", "folders"], +) + + +@router.put( + "/projects/{project_id}/folders/{folder_id}", + status_code=status.HTTP_204_NO_CONTENT, + description="Move project to the folder", +) +async def replace_project_folder( + _path: Annotated[_ProjectsFoldersPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_projects_metadata.py b/api/specs/web-server/_projects_metadata.py new file mode 100644 index 00000000000..0e97d475aa3 --- /dev/null +++ b/api/specs/web-server/_projects_metadata.py @@ -0,0 +1,48 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.projects_metadata import ( + ProjectMetadataGet, + ProjectMetadataUpdate, +) +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.metadata_rest import ( + ProjectPathParams, +) + +router = APIRouter(prefix=f"/{API_VTAG}", tags=["projects", "metadata"]) + + +# +# API entrypoints +# + + +@router.get( + "/projects/{project_id}/metadata", + response_model=Envelope[ProjectMetadataGet], + status_code=status.HTTP_200_OK, +) +async def get_project_metadata(_params: Annotated[ProjectPathParams, Depends()]): ... + + +@router.patch( + "/projects/{project_id}/metadata", + response_model=Envelope[ProjectMetadataGet], + status_code=status.HTTP_200_OK, +) +async def update_project_metadata( + _params: Annotated[ProjectPathParams, Depends()], _body: ProjectMetadataUpdate +): ... diff --git a/api/specs/web-server/_projects_nodes.py b/api/specs/web-server/_projects_nodes.py new file mode 100644 index 00000000000..454d7c4f733 --- /dev/null +++ b/api/specs/web-server/_projects_nodes.py @@ -0,0 +1,210 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from _common import assert_handler_signature_against_model +from fastapi import APIRouter, status +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_long_running_tasks.tasks import TaskGet +from models_library.api_schemas_webserver.projects_nodes import ( + NodeCreate, + NodeCreated, + NodeGet, + NodeGetIdle, + NodeGetUnknown, + NodeOutputs, + NodePatch, + NodeRetrieve, + NodeRetrieved, + ProjectNodeServicesGet, + ServiceResourcesDict, +) +from models_library.generics import Envelope +from models_library.groups import GroupID +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.nodes_rest import ( + NodePathParams, + _ProjectGroupAccess, + _ProjectNodePreview, +) +from simcore_service_webserver.projects._controller.projects_rest import ( + ProjectPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "nodes", + ], +) + +# projects/*/nodes COLLECTION ------------------------- + + +@router.post( + "/projects/{project_id}/nodes", + response_model=Envelope[NodeCreated], + status_code=status.HTTP_201_CREATED, +) +def create_node(project_id: str, body: NodeCreate): # noqa: ARG001 + ... + + +@router.get( + # issues with this endpoint https://github.com/ITISFoundation/osparc-simcore/issues/5245 + "/projects/{project_id}/nodes/{node_id}", + response_model=Envelope[NodeGetIdle | NodeGetUnknown | DynamicServiceGet | NodeGet], +) +def get_node(project_id: str, node_id: str): # noqa: ARG001 + ... + + +@router.delete( + "/projects/{project_id}/nodes/{node_id}", + response_model=None, + status_code=status.HTTP_204_NO_CONTENT, +) +def delete_node(project_id: str, node_id: str): # noqa: ARG001 + ... + + +@router.post( + "/projects/{project_id}/nodes/{node_id}:retrieve", + response_model=Envelope[NodeRetrieved], +) +def retrieve_node( + project_id: str, node_id: str, _retrieve: NodeRetrieve # noqa: ARG001 +): ... + + +@router.post( + "/projects/{project_id}/nodes/{node_id}:start", + status_code=status.HTTP_204_NO_CONTENT, + response_model=None, +) +def start_node(project_id: str, node_id: str): # noqa: ARG001 + ... + + +@router.post( + "/projects/{project_id}/nodes/{node_id}:stop", + response_model=Envelope[TaskGet], +) +def stop_node(project_id: str, node_id: str): # noqa: ARG001 + ... + + +@router.post( + "/projects/{project_id}/nodes/{node_id}:restart", + response_model=None, + status_code=status.HTTP_204_NO_CONTENT, +) +def restart_node(project_id: str, node_id: str): # noqa: ARG001 + """Note that it has only effect on nodes associated to dynamic services""" + + +@router.patch( + "/projects/{project_id}/nodes/{node_id}/outputs", + response_model=None, + status_code=status.HTTP_204_NO_CONTENT, +) +def update_node_outputs( + project_id: str, node_id: str, _new: NodeOutputs +): # noqa: ARG001 + ... + + +@router.patch( + "/projects/{project_id}/nodes/{node_id}", + response_model=None, + status_code=status.HTTP_204_NO_CONTENT, +) +def patch_project_node( + project_id: ProjectID, node_id: str, _new: NodePatch +): # noqa: ARG001 + ... + + +# +# projects/*/nodes/*/resources COLLECTION ------------------------- +# + + +@router.get( + "/projects/{project_id}/nodes/{node_id}/resources", + response_model=Envelope[ServiceResourcesDict], +) +def get_node_resources(project_id: str, node_id: str): # noqa: ARG001 + ... + + +@router.put( + "/projects/{project_id}/nodes/{node_id}/resources", + response_model=Envelope[ServiceResourcesDict], +) +def replace_node_resources( + project_id: str, node_id: str, _new: ServiceResourcesDict # noqa: ARG001 +): ... + + +# +# projects/*/nodes/-/services +# + + +@router.get( + "/projects/{project_id}/nodes/-/services", + response_model=Envelope[ProjectNodeServicesGet], +) +async def get_project_services(project_id: ProjectID): ... + + +@router.get( + "/projects/{project_id}/nodes/-/services:access", + response_model=Envelope[_ProjectGroupAccess], + description="Check whether provided group has access to the project services", +) +async def get_project_services_access_for_gid( + project_id: ProjectID, for_gid: GroupID # noqa: ARG001 +): ... + + +assert_handler_signature_against_model( + get_project_services_access_for_gid, ProjectPathParams +) + + +# +# projects/*/nodes/-/preview +# + + +@router.get( + "/projects/{project_id}/nodes/-/preview", + response_model=Envelope[list[_ProjectNodePreview]], + description="Lists all previews in the node's project", +) +async def list_project_nodes_previews(project_id: ProjectID): # noqa: ARG001 + ... + + +assert_handler_signature_against_model(list_project_nodes_previews, ProjectPathParams) + + +@router.get( + "/projects/{project_id}/nodes/{node_id}/preview", + response_model=Envelope[_ProjectNodePreview], + description="Gets a give node's preview", + responses={status.HTTP_404_NOT_FOUND: {"description": "Node has no preview"}}, +) +async def get_project_node_preview( + project_id: ProjectID, node_id: NodeID # noqa: ARG001 +): ... + + +assert_handler_signature_against_model(get_project_node_preview, NodePathParams) diff --git a/api/specs/web-server/_projects_nodes_pricing_unit.py b/api/specs/web-server/_projects_nodes_pricing_unit.py new file mode 100644 index 00000000000..91c7f0cdb19 --- /dev/null +++ b/api/specs/web-server/_projects_nodes_pricing_unit.py @@ -0,0 +1,59 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from _common import assert_handler_signature_against_model +from fastapi import APIRouter, status +from models_library.api_schemas_webserver.resource_usage import PricingUnitGet +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.resource_tracker import PricingPlanId, PricingUnitId +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.nodes_pricing_unit_rest import ( + _ProjectNodePricingUnitPathParams, +) +from simcore_service_webserver.projects._controller.nodes_rest import NodePathParams + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + ], +) + + +@router.get( + "/projects/{project_id}/nodes/{node_id}/pricing-unit", + response_model=Envelope[PricingUnitGet | None], + description="Get currently connected pricing unit to the project node.", +) +async def get_project_node_pricing_unit(project_id: ProjectID, node_id: NodeID): ... + + +assert_handler_signature_against_model(get_project_node_pricing_unit, NodePathParams) + + +@router.put( + "/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", + description="Connect pricing unit to the project node (Project node can have only one pricing unit)", + status_code=status.HTTP_204_NO_CONTENT, +) +async def connect_pricing_unit_to_project_node( + project_id: ProjectID, + node_id: NodeID, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, +): ... + + +assert_handler_signature_against_model( + connect_pricing_unit_to_project_node, _ProjectNodePricingUnitPathParams +) diff --git a/api/specs/web-server/_projects_ports.py b/api/specs/web-server/_projects_ports.py new file mode 100644 index 00000000000..a5874b8d071 --- /dev/null +++ b/api/specs/web-server/_projects_ports.py @@ -0,0 +1,59 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from fastapi import APIRouter +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.projects_nodes import NodeID +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.ports_rest import ( + ProjectInputGet, + ProjectInputUpdate, + ProjectMetadataPortGet, + ProjectOutputGet, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "ports", + ], +) + + +@router.get( + "/projects/{project_id}/inputs", + response_model=Envelope[dict[NodeID, ProjectInputGet]], +) +async def get_project_inputs(project_id: ProjectID): + """New in version *0.10*""" + + +@router.patch( + "/projects/{project_id}/inputs", + response_model=Envelope[dict[NodeID, ProjectInputGet]], +) +async def update_project_inputs( + project_id: ProjectID, _updates: list[ProjectInputUpdate] +): + """New in version *0.10*""" + + +@router.get( + "/projects/{project_id}/outputs", + response_model=Envelope[dict[NodeID, ProjectOutputGet]], +) +async def get_project_outputs(project_id: ProjectID): + """New in version *0.10*""" + + +@router.get( + "/projects/{project_id}/metadata/ports", + response_model=Envelope[list[ProjectMetadataPortGet]], +) +async def list_project_metadata_ports(project_id: ProjectID): + """New in version *0.12*""" diff --git a/api/specs/web-server/_projects_states.py b/api/specs/web-server/_projects_states.py new file mode 100644 index 00000000000..5c1b2a5299b --- /dev/null +++ b/api/specs/web-server/_projects_states.py @@ -0,0 +1,86 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Body, Depends +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.generics import Envelope +from models_library.projects_state import ProjectState +from pydantic import ValidationError +from servicelib.aiohttp import status +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.director_v2.exceptions import DirectorServiceError +from simcore_service_webserver.projects._controller.projects_states_rest import ( + ProjectPathParams, + _OpenProjectQuery, +) +from simcore_service_webserver.projects.exceptions import ( + ProjectInvalidRightsError, + ProjectNotFoundError, + ProjectTooManyProjectOpenedError, +) +from simcore_service_webserver.users.exceptions import UserDefaultWalletNotFoundError +from simcore_service_webserver.wallets.errors import WalletNotEnoughCreditsError + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + ], +) + + +def to_desc(exceptions: list[type[Exception]] | type[Exception]): + exc_classes = [exceptions] if not isinstance(exceptions, list) else exceptions + return ", ".join(f"{cls.__name__}" for cls in exc_classes) + + +@router.post( + "/projects/{project_id}:open", + response_model=Envelope[ProjectGet], + responses={ + status.HTTP_400_BAD_REQUEST: {"description": to_desc([ValidationError])}, + status.HTTP_402_PAYMENT_REQUIRED: { + "description": to_desc([WalletNotEnoughCreditsError]) + }, + status.HTTP_403_FORBIDDEN: { + "description": to_desc([ProjectInvalidRightsError]) + }, + status.HTTP_404_NOT_FOUND: { + "description": to_desc( + [ProjectNotFoundError, UserDefaultWalletNotFoundError] + ) + }, + status.HTTP_409_CONFLICT: { + "description": to_desc([ProjectTooManyProjectOpenedError]), + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "description": to_desc([ValidationError]) + }, + status.HTTP_503_SERVICE_UNAVAILABLE: { + "description": to_desc([DirectorServiceError]) + }, + }, +) +def open_project( + client_session_id: Annotated[str, Body(...)], + _path_params: Annotated[ProjectPathParams, Depends()], + _query_params: Annotated[_OpenProjectQuery, Depends()], +): ... + + +@router.post("/projects/{project_id}:close", status_code=status.HTTP_204_NO_CONTENT) +def close_project( + _path_params: Annotated[ProjectPathParams, Depends()], + client_session_id: Annotated[str, Body(...)], +): ... + + +@router.get("/projects/{project_id}/state", response_model=Envelope[ProjectState]) +def get_project_state( + _path_params: Annotated[ProjectPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_projects_tags.py b/api/specs/web-server/_projects_tags.py new file mode 100644 index 00000000000..71e71237ccd --- /dev/null +++ b/api/specs/web-server/_projects_tags.py @@ -0,0 +1,49 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from fastapi import APIRouter +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.generics import Envelope +from models_library.projects import ProjectID +from simcore_service_webserver._meta import API_VTAG + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + "tags", + ], +) + + +@router.post( + "/projects/{project_uuid}/tags/{tag_id}:add", + response_model=Envelope[ProjectGet], +) +def add_project_tag( + project_uuid: ProjectID, + tag_id: int, +): + """ + Links an existing label with an existing study + + NOTE: that the tag is not created here + """ + + +@router.post( + "/projects/{project_uuid}/tags/{tag_id}:remove", + response_model=Envelope[ProjectGet], +) +def remove_project_tag( + project_uuid: ProjectID, + tag_id: int, +): + """ + Removes an existing link between a label and a study + + NOTE: that the tag is not deleted here + """ diff --git a/api/specs/web-server/_projects_wallet.py b/api/specs/web-server/_projects_wallet.py new file mode 100644 index 00000000000..78878bc163e --- /dev/null +++ b/api/specs/web-server/_projects_wallet.py @@ -0,0 +1,71 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Annotated + +from _common import assert_handler_signature_against_model +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.wallets import WalletGet +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.wallets import WalletID +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller._rest_schemas import ( + ProjectPathParams, +) +from simcore_service_webserver.projects._controller.wallets_rest import ( + _PayProjectDebtBody, + _ProjectWalletPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "projects", + ], +) + + +@router.get( + "/projects/{project_id}/wallet", + response_model=Envelope[WalletGet | None], + description="Get current connected wallet to the project.", +) +async def get_project_wallet(project_id: ProjectID): ... + + +assert_handler_signature_against_model(get_project_wallet, ProjectPathParams) + + +@router.put( + "/projects/{project_id}/wallet/{wallet_id}", + response_model=Envelope[WalletGet], + description="Connect wallet to the project (Project can have only one wallet)", +) +async def connect_wallet_to_project( + project_id: ProjectID, + wallet_id: WalletID, +): ... + + +assert_handler_signature_against_model(connect_wallet_to_project, ProjectPathParams) + + +@router.post( + "/projects/{project_id}/wallet/{wallet_id}:pay-debt", + status_code=status.HTTP_204_NO_CONTENT, +) +async def pay_project_debt( + _path: Annotated[_ProjectWalletPathParams, Depends()], + _body: Annotated[_PayProjectDebtBody, Depends()], +): ... + + +assert_handler_signature_against_model(connect_wallet_to_project, ProjectPathParams) diff --git a/api/specs/web-server/_projects_workspaces.py b/api/specs/web-server/_projects_workspaces.py new file mode 100644 index 00000000000..17ec63fc367 --- /dev/null +++ b/api/specs/web-server/_projects_workspaces.py @@ -0,0 +1,33 @@ +"""Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.projects._controller.workspaces_rest import ( + _ProjectWorkspacesPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["projects", "workspaces"], +) + + +@router.post( + "/projects/{project_id}/workspaces/{workspace_id}:move", + status_code=status.HTTP_204_NO_CONTENT, + description="Move project to the workspace", +) +async def move_project_to_workspace( + _path: Annotated[_ProjectWorkspacesPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_publications.py b/api/specs/web-server/_publications.py new file mode 100644 index 00000000000..2d435b18545 --- /dev/null +++ b/api/specs/web-server/_publications.py @@ -0,0 +1,24 @@ +from typing import Annotated + +from fastapi import APIRouter, File, status +from simcore_service_webserver._meta import API_VTAG + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "publication", + ], +) + + +@router.post( + "/publications/service-submission", + status_code=status.HTTP_204_NO_CONTENT, +) +def service_submission( + file: Annotated[bytes, File(description="metadata.json submission file")] +): + """ + Submits files with new service candidate + """ + assert file # nosec diff --git a/api/specs/web-server/_resource_usage.py b/api/specs/web-server/_resource_usage.py new file mode 100644 index 00000000000..7e202d2b5c2 --- /dev/null +++ b/api/specs/web-server/_resource_usage.py @@ -0,0 +1,239 @@ +""" Helper script to automatically generate OAS + +This OAS are the source of truth +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + OsparcCreditsAggregatedByServiceGet, +) +from models_library.api_schemas_webserver.resource_usage import ( + ConnectServiceToPricingPlanBodyParams, + CreatePricingPlanBodyParams, + CreatePricingUnitBodyParams, + PricingPlanAdminGet, + PricingPlanGet, + PricingPlanToServiceAdminGet, + PricingUnitAdminGet, + PricingUnitGet, + ServiceRunGet, + UpdatePricingPlanBodyParams, + UpdatePricingUnitBodyParams, +) +from models_library.generics import Envelope +from models_library.rest_pagination import Page, PageQueryParameters +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.resource_usage._pricing_plans_admin_rest import ( + PricingPlanGetPathParams, + PricingUnitGetPathParams, +) +from simcore_service_webserver.resource_usage._pricing_plans_rest import ( + PricingPlanUnitGetPathParams, +) +from simcore_service_webserver.resource_usage._service_runs_rest import ( + ServicesAggregatedUsagesListQueryParams, + ServicesResourceUsagesListQueryParams, + ServicesResourceUsagesReportQueryParams, +) + +router = APIRouter(prefix=f"/{API_VTAG}") + + +@router.get( + "/services/-/resource-usages", + response_model=Page[ServiceRunGet], + description="Retrieve finished and currently running user services" + " (user and product are taken from context, optionally wallet_id parameter might be provided).", + tags=["usage"], +) +async def list_resource_usage_services( + _query: Annotated[as_query(ServicesResourceUsagesListQueryParams), Depends()], +): + ... + + +@router.get( + "/services/-/aggregated-usages", + response_model=Page[OsparcCreditsAggregatedByServiceGet], + description="Used credits based on aggregate by type, currently supported `services`" + ". (user and product are taken from context, optionally wallet_id parameter might be provided).", + tags=["usage"], +) +async def list_osparc_credits_aggregated_usages( + _query: Annotated[as_query(ServicesAggregatedUsagesListQueryParams), Depends()] +): + ... + + +@router.get( + "/services/-/usage-report", + status_code=status.HTTP_302_FOUND, + responses={ + status.HTTP_302_FOUND: { + "description": "redirection to download link", + } + }, + tags=["usage"], + description="Redirects to download CSV link. CSV obtains finished and currently running " + "user services (user and product are taken from context, optionally wallet_id parameter might be provided).", +) +async def export_resource_usage_services( + _query: Annotated[as_query(ServicesResourceUsagesReportQueryParams), Depends()] +): + ... + + +@router.get( + "/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}", + response_model=Envelope[PricingUnitGet], + tags=["pricing-plans"], +) +async def get_pricing_plan_unit( + _path: Annotated[PricingPlanUnitGetPathParams, Depends()], +): + ... + + +@router.get( + "/pricing-plans", + response_model=Page[PricingPlanGet], + tags=["pricing-plans"], + description="To keep the listing lightweight, the pricingUnits field is None.", +) +async def list_pricing_plans( + _query: Annotated[as_query(PageQueryParameters), Depends()] +): + ... + + +@router.get( + "/pricing-plans/{pricing_plan_id}", + response_model=Envelope[PricingPlanGet], + tags=["pricing-plans"], +) +async def get_pricing_plan( + _path: Annotated[PricingPlanGetPathParams, Depends()], +): + ... + + +## Pricing plans for Admin panel + + +@router.get( + "/admin/pricing-plans", + response_model=Page[PricingPlanAdminGet], + tags=["admin"], + description="To keep the listing lightweight, the pricingUnits field is None.", +) +async def list_pricing_plans_for_admin_user( + _query: Annotated[as_query(PageQueryParameters), Depends()] +): + ... + + +@router.get( + "/admin/pricing-plans/{pricing_plan_id}", + response_model=Envelope[PricingPlanAdminGet], + tags=["admin"], +) +async def get_pricing_plan_for_admin_user( + _path: Annotated[PricingPlanGetPathParams, Depends()], +): + ... + + +@router.post( + "/admin/pricing-plans", + response_model=Envelope[PricingPlanAdminGet], + tags=["admin"], +) +async def create_pricing_plan( + _body: CreatePricingPlanBodyParams, +): + ... + + +@router.put( + "/admin/pricing-plans/{pricing_plan_id}", + response_model=Envelope[PricingPlanAdminGet], + tags=["admin"], +) +async def update_pricing_plan( + _path: Annotated[PricingPlanGetPathParams, Depends()], + _body: UpdatePricingPlanBodyParams, +): + ... + + +## Pricing units for Admin panel + + +@router.get( + "/admin/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}", + response_model=Envelope[PricingUnitAdminGet], + tags=["admin"], +) +async def get_pricing_unit( + _path: Annotated[PricingUnitGetPathParams, Depends()], +): + ... + + +@router.post( + "/admin/pricing-plans/{pricing_plan_id}/pricing-units", + response_model=Envelope[PricingUnitAdminGet], + tags=["admin"], +) +async def create_pricing_unit( + _path: Annotated[PricingPlanGetPathParams, Depends()], + _body: CreatePricingUnitBodyParams, +): + ... + + +@router.put( + "/admin/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}", + response_model=Envelope[PricingUnitAdminGet], + tags=["admin"], +) +async def update_pricing_unit( + _path: Annotated[PricingUnitGetPathParams, Depends()], + _body: UpdatePricingUnitBodyParams, +): + ... + + +## Pricing Plans to Service Admin panel + + +@router.get( + "/admin/pricing-plans/{pricing_plan_id}/billable-services", + response_model=Envelope[list[PricingPlanToServiceAdminGet]], + tags=["admin"], +) +async def list_connected_services_to_pricing_plan( + _path: Annotated[PricingPlanGetPathParams, Depends()], +): + ... + + +@router.post( + "/admin/pricing-plans/{pricing_plan_id}/billable-services", + response_model=Envelope[PricingPlanToServiceAdminGet], + tags=["admin"], +) +async def connect_service_to_pricing_plan( + _path: Annotated[PricingPlanGetPathParams, Depends()], + _body: ConnectServiceToPricingPlanBodyParams, +): + ... diff --git a/api/specs/web-server/_statics.py b/api/specs/web-server/_statics.py new file mode 100644 index 00000000000..da1a1667e02 --- /dev/null +++ b/api/specs/web-server/_statics.py @@ -0,0 +1,35 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Any + +from fastapi import APIRouter +from fastapi.responses import HTMLResponse +from simcore_service_webserver.constants import INDEX_RESOURCE_NAME +from simcore_service_webserver.statics.settings import FrontEndInfoDict + +router = APIRouter( + tags=["statics"], +) + + +@router.get("/", response_class=HTMLResponse) +async def get_cached_frontend_index(): + ... + + +assert get_cached_frontend_index.__name__ == INDEX_RESOURCE_NAME + + +class StaticFrontEndDict(FrontEndInfoDict, total=False): + issues: Any + vendor: Any + manuals: Any + + +@router.get("/static-frontend-data.json", response_model=StaticFrontEndDict) +async def static_frontend_data(): + """Generic static info on the product's app""" diff --git a/api/specs/web-server/_storage.py b/api/specs/web-server/_storage.py new file mode 100644 index 00000000000..52cefbd0b66 --- /dev/null +++ b/api/specs/web-server/_storage.py @@ -0,0 +1,238 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated, Any, TypeAlias + +from fastapi import APIRouter, Depends, Query, status +from models_library.api_schemas_long_running_tasks.tasks import ( + TaskGet, +) +from models_library.api_schemas_storage.storage_schemas import ( + FileLocation, + FileMetaDataGet, + FileUploadCompleteFutureResponse, + FileUploadCompleteResponse, + FileUploadCompletionBody, + FileUploadSchema, + LinkType, + PathMetaDataGet, + PresignedLink, +) +from models_library.api_schemas_webserver.storage import ( + BatchDeletePathsBodyParams, + DataExportPost, + ListPathsQueryParams, + StorageLocationPathParams, + StoragePathComputeSizeParams, +) +from models_library.generics import Envelope +from models_library.projects_nodes_io import LocationID +from models_library.rest_error import EnvelopedError +from pydantic import AnyUrl, ByteSize +from servicelib.fastapi.rest_pagination import CustomizedPathsCursorPage +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.storage.schemas import DatasetMetaData, FileMetaData +from simcore_service_webserver.tasks._exception_handlers import ( + _TO_HTTP_ERROR_MAP as export_data_http_error_map, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["storage"], +) + + +# NOTE: storage generates URLs that contain double encoded +# slashes, and when applying validation via `StorageFileID` +# it raises an error. Before `StorageFileID`, `str` was the +# type used in the OpenAPI specs. +StorageFileIDStr: TypeAlias = str + + +@router.get( + "/storage/locations", + response_model=list[FileLocation], + description="Get available storage locations", +) +async def list_storage_locations(): + """Returns the list of available storage locations""" + + +@router.get( + "/storage/locations/{location_id}/paths", + response_model=CustomizedPathsCursorPage[PathMetaDataGet], +) +async def list_storage_paths( + _path: Annotated[StorageLocationPathParams, Depends()], + _query: Annotated[ListPathsQueryParams, Depends()], +): + """Lists the files/directories in WorkingDirectory""" + + +@router.post( + "/storage/locations/{location_id}/paths/{path}:size", + response_model=Envelope[TaskGet], + status_code=status.HTTP_202_ACCEPTED, +) +async def compute_path_size(_path: Annotated[StoragePathComputeSizeParams, Depends()]): + """Compute the size of a path""" + + +@router.post( + "/storage/locations/{location_id}/-/paths:batchDelete", + response_model=Envelope[TaskGet], + status_code=status.HTTP_202_ACCEPTED, + description="Deletes Paths", +) +async def batch_delete_paths( + _path: Annotated[StorageLocationPathParams, Depends()], + _body: Annotated[BatchDeletePathsBodyParams, Depends()], +): + """deletes files/folders if user has the rights to""" + + +@router.get( + "/storage/locations/{location_id}/datasets", + response_model=Envelope[list[DatasetMetaData]], + description="Get datasets metadata", +) +async def list_datasets_metadata( + _path: Annotated[StorageLocationPathParams, Depends()], +): + """returns all the top level datasets a user has access to""" + + +@router.get( + "/storage/locations/{location_id}/files/metadata", + response_model=Envelope[list[DatasetMetaData]], + description="Get datasets metadata", +) +async def get_files_metadata( + _path: Annotated[StorageLocationPathParams, Depends()], + uuid_filter: str = "", + expand_dirs: bool = Query( + True, + description=( + "Automatic directory expansion. This will be replaced by pagination the future" + ), + ), +): + """returns all the file meta data a user has access to (uuid_filter may be used)""" + + +@router.get( + "/storage/locations/{location_id}/datasets/{dataset_id}/metadata", + response_model=Envelope[list[FileMetaDataGet]], + description="Get Files Metadata", +) +async def list_dataset_files_metadata( + location_id: LocationID, + dataset_id: str, + expand_dirs: bool = Query( + True, + description=( + "Automatic directory expansion. This will be replaced by pagination the future" + ), + ), +): + """returns all the file meta data inside dataset with dataset_id""" + + +@router.get( + "/storage/locations/{location_id}/files/{file_id}/metadata", + response_model=FileMetaData | Envelope[FileMetaDataGet], + description="Get File Metadata", +) +async def get_file_metadata(location_id: LocationID, file_id: StorageFileIDStr): + """returns the file meta data of file_id if user_id has the rights to""" + + +@router.get( + "/storage/locations/{location_id}/files/{file_id}", + response_model=Envelope[PresignedLink], + description="Returns download link for requested file", +) +async def download_file( + location_id: LocationID, + file_id: StorageFileIDStr, + link_type: LinkType = LinkType.PRESIGNED, +): + """creates a download file link if user has the rights to""" + + +@router.put( + "/storage/locations/{location_id}/files/{file_id}", + response_model=Envelope[FileUploadSchema] | Envelope[AnyUrl], + description="Returns upload link", +) +async def upload_file( + location_id: LocationID, + file_id: StorageFileIDStr, + file_size: ByteSize | None, + link_type: LinkType = LinkType.PRESIGNED, + is_directory: bool = False, +): + """creates one or more upload file links if user has the rights to, expects the client to complete/abort upload""" + + +@router.delete( + "/storage/locations/{location_id}/files/{file_id}", + status_code=status.HTTP_204_NO_CONTENT, + description="Deletes File", +) +async def delete_file(location_id: LocationID, file_id: StorageFileIDStr): + """deletes file if user has the rights to""" + + +@router.post( + "/storage/locations/{location_id}/files/{file_id}:abort", + status_code=status.HTTP_204_NO_CONTENT, +) +async def abort_upload_file(location_id: LocationID, file_id: StorageFileIDStr): + """aborts an upload if user has the rights to, and reverts + to the latest version if available, else will delete the file""" + + +@router.post( + "/storage/locations/{location_id}/files/{file_id}:complete", + status_code=status.HTTP_202_ACCEPTED, + response_model=Envelope[FileUploadCompleteResponse], +) +async def complete_upload_file( + body_item: Envelope[FileUploadCompletionBody], + location_id: LocationID, + file_id: StorageFileIDStr, +): + """completes an upload if the user has the rights to""" + + +@router.post( + "/storage/locations/{location_id}/files/{file_id}:complete/futures/{future_id}", + response_model=Envelope[FileUploadCompleteFutureResponse], + description="Check for upload completion", +) +async def is_completed_upload_file( + location_id: LocationID, file_id: StorageFileIDStr, future_id: str +): + """Returns state of upload completion""" + + +# data export +_export_data_responses: dict[int | str, dict[str, Any]] = { + i.status_code: {"model": EnvelopedError} + for i in export_data_http_error_map.values() +} + + +@router.post( + "/storage/locations/{location_id}/export-data", + response_model=Envelope[TaskGet], + name="export_data", + description="Export data", + responses=_export_data_responses, +) +async def export_data(export_data: DataExportPost, location_id: LocationID): + """Trigger data export. Returns async job id for getting status and results""" diff --git a/api/specs/web-server/_tags.py b/api/specs/web-server/_tags.py new file mode 100644 index 00000000000..c1b768b0dc5 --- /dev/null +++ b/api/specs/web-server/_tags.py @@ -0,0 +1,54 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.generics import Envelope +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.tags.schemas import ( + TagCreate, + TagGet, + TagPathParams, + TagUpdate, +) + +router = APIRouter(prefix=f"/{API_VTAG}", tags=["tags"]) + + +@router.post( + "/tags", + response_model=Envelope[TagGet], + status_code=status.HTTP_201_CREATED, +) +async def create_tag(_body: TagCreate): + ... + + +@router.get( + "/tags", + response_model=Envelope[list[TagGet]], +) +async def list_tags(): + ... + + +@router.patch( + "/tags/{tag_id}", + response_model=Envelope[TagGet], +) +async def update_tag( + _path_params: Annotated[TagPathParams, Depends()], _body: TagUpdate +): + ... + + +@router.delete( + "/tags/{tag_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_tag(_path_params: Annotated[TagPathParams, Depends()]): + ... diff --git a/api/specs/web-server/_tags_groups.py b/api/specs/web-server/_tags_groups.py new file mode 100644 index 00000000000..38dfbf40158 --- /dev/null +++ b/api/specs/web-server/_tags_groups.py @@ -0,0 +1,68 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.tags._rest import _TO_HTTP_ERROR_MAP +from simcore_service_webserver.tags.schemas import ( + TagGet, + TagGroupCreate, + TagGroupGet, + TagGroupPathParams, + TagPathParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "tags", + "groups", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.get( + "/tags/{tag_id}/groups", + response_model=Envelope[list[TagGroupGet]], +) +async def list_tag_groups(_path_params: Annotated[TagPathParams, Depends()]): + """Lists all groups associated to this tag""" + + +@router.post( + "/tags/{tag_id}/groups/{group_id}", + response_model=Envelope[TagGet], + status_code=status.HTTP_201_CREATED, +) +async def create_tag_group( + _path_params: Annotated[TagGroupPathParams, Depends()], _body: TagGroupCreate +): + """Shares tag `tag_id` with an organization or user with `group_id` providing access-rights to it""" + + +@router.put( + "/tags/{tag_id}/groups/{group_id}", + response_model=Envelope[list[TagGroupGet]], +) +async def replace_tag_group( + _path_params: Annotated[TagGroupPathParams, Depends()], _body: TagGroupCreate +): + """Replace access rights on tag for associated organization or user with `group_id`""" + + +@router.delete( + "/tags/{tag_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_tag_group(_path_params: Annotated[TagGroupPathParams, Depends()]): + """Delete access rights on tag to an associated organization or user with `group_id`""" diff --git a/api/specs/web-server/_trash.py b/api/specs/web-server/_trash.py new file mode 100644 index 00000000000..7ec30e777bc --- /dev/null +++ b/api/specs/web-server/_trash.py @@ -0,0 +1,140 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from enum import Enum +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.rest_error import EnvelopedError +from models_library.trash import RemoveQueryParams +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.folders._common.models import ( + FoldersPathParams, + FolderTrashQueryParams, +) +from simcore_service_webserver.projects._controller._rest_exceptions import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.projects._controller.trash_rest import ProjectPathParams +from simcore_service_webserver.workspaces._common.models import ( + WorkspacesPathParams, + WorkspaceTrashQueryParams, +) + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=["trash"], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.post( + "/trash:empty", + status_code=status.HTTP_204_NO_CONTENT, +) +def empty_trash(): ... + + +_extra_tags: list[str | Enum] = ["projects"] + + +@router.post( + "/projects/{project_id}:trash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, + responses={ + status.HTTP_404_NOT_FOUND: { + "description": "Not such a project", + "model": EnvelopedError, + }, + status.HTTP_409_CONFLICT: { + "description": "Project is in use and cannot be trashed", + "model": EnvelopedError, + }, + status.HTTP_503_SERVICE_UNAVAILABLE: { + "description": "Trash service error", + "model": EnvelopedError, + }, + }, +) +def trash_project( + _path: Annotated[ProjectPathParams, Depends()], + _query: Annotated[RemoveQueryParams, Depends()], +): ... + + +@router.post( + "/projects/{project_id}:untrash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, +) +def untrash_project( + _path: Annotated[ProjectPathParams, Depends()], +): ... + + +_extra_tags = ["folders"] + + +@router.post( + "/folders/{folder_id}:trash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, + responses={ + status.HTTP_404_NOT_FOUND: {"description": "Not such a folder"}, + status.HTTP_409_CONFLICT: { + "description": "One or more projects in the folder are in use and cannot be trashed" + }, + status.HTTP_503_SERVICE_UNAVAILABLE: {"description": "Trash service error"}, + }, +) +def trash_folder( + _path: Annotated[FoldersPathParams, Depends()], + _query: Annotated[FolderTrashQueryParams, Depends()], +): ... + + +@router.post( + "/folders/{folder_id}:untrash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, +) +def untrash_folder( + _path: Annotated[FoldersPathParams, Depends()], +): ... + + +_extra_tags = ["workspaces"] + + +@router.post( + "/workspaces/{workspace_id}:trash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, + responses={ + status.HTTP_404_NOT_FOUND: {"description": "Not such a workspace"}, + status.HTTP_409_CONFLICT: { + "description": "One or more projects in the workspace are in use and cannot be trashed" + }, + status.HTTP_503_SERVICE_UNAVAILABLE: {"description": "Trash service error"}, + }, +) +def trash_workspace( + _path: Annotated[WorkspacesPathParams, Depends()], + _query: Annotated[WorkspaceTrashQueryParams, Depends()], +): ... + + +@router.post( + "/workspaces/{workspace_id}:untrash", + tags=_extra_tags, + status_code=status.HTTP_204_NO_CONTENT, +) +def untrash_workspace( + _path: Annotated[WorkspacesPathParams, Depends()], +): ... diff --git a/api/specs/web-server/_users.py b/api/specs/web-server/_users.py new file mode 100644 index 00000000000..bd957858c49 --- /dev/null +++ b/api/specs/web-server/_users.py @@ -0,0 +1,194 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from enum import Enum +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.users import ( + MyPermissionGet, + MyProfileGet, + MyProfilePatch, + MyTokenCreate, + MyTokenGet, + UserAccountApprove, + UserAccountGet, + UserAccountReject, + UserAccountSearchQueryParams, + UserGet, + UsersAccountListQueryParams, + UsersSearch, +) +from models_library.api_schemas_webserver.users_preferences import PatchRequestBody +from models_library.generics import Envelope +from models_library.rest_pagination import Page +from models_library.user_preferences import PreferenceIdentifier +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.users._common.schemas import PreRegisteredUserGet +from simcore_service_webserver.users._notifications import ( + UserNotification, + UserNotificationCreate, + UserNotificationPatch, +) +from simcore_service_webserver.users._notifications_rest import _NotificationPathParams +from simcore_service_webserver.users._tokens_rest import _TokenPathParams + +router = APIRouter(prefix=f"/{API_VTAG}", tags=["users"]) + + +@router.get( + "/me", + response_model=Envelope[MyProfileGet], +) +async def get_my_profile(): ... + + +@router.patch( + "/me", + status_code=status.HTTP_204_NO_CONTENT, +) +async def update_my_profile(_body: MyProfilePatch): ... + + +@router.patch( + "/me/preferences/{preference_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def set_frontend_preference( + preference_id: PreferenceIdentifier, + _body: PatchRequestBody, +): ... + + +@router.get( + "/me/tokens", + response_model=Envelope[list[MyTokenGet]], +) +async def list_tokens(): ... + + +@router.post( + "/me/tokens", + response_model=Envelope[MyTokenGet], + status_code=status.HTTP_201_CREATED, +) +async def create_token(_body: MyTokenCreate): ... + + +@router.get( + "/me/tokens/{service}", + response_model=Envelope[MyTokenGet], +) +async def get_token( + _path: Annotated[_TokenPathParams, Depends()], +): ... + + +@router.delete( + "/me/tokens/{service}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_token(_path: Annotated[_TokenPathParams, Depends()]): ... + + +@router.get( + "/me/notifications", + response_model=Envelope[list[UserNotification]], +) +async def list_user_notifications(): ... + + +@router.post( + "/me/notifications", + status_code=status.HTTP_204_NO_CONTENT, +) +async def create_user_notification( + _body: UserNotificationCreate, +): ... + + +@router.patch( + "/me/notifications/{notification_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def mark_notification_as_read( + _path: Annotated[_NotificationPathParams, Depends()], + _body: UserNotificationPatch, +): ... + + +@router.get( + "/me/permissions", + response_model=Envelope[list[MyPermissionGet]], +) +async def list_user_permissions(): ... + + +# +# USERS public +# + + +@router.post( + "/users:search", + response_model=Envelope[list[UserGet]], + description="Search among users who are publicly visible to the caller (i.e., me) based on their privacy settings.", +) +async def search_users(_body: UsersSearch): ... + + +# +# USERS admin +# + +_extra_tags: list[str | Enum] = ["admin"] + + +@router.get( + "/admin/user-accounts", + response_model=Page[UserAccountGet], + tags=_extra_tags, +) +async def list_users_accounts( + _query: Annotated[as_query(UsersAccountListQueryParams), Depends()], +): ... + + +@router.post( + "/admin/user-accounts:approve", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def approve_user_account(_body: UserAccountApprove): ... + + +@router.post( + "/admin/user-accounts:reject", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def reject_user_account(_body: UserAccountReject): ... + + +@router.get( + "/admin/user-accounts:search", + response_model=Envelope[list[UserAccountGet]], + tags=_extra_tags, +) +async def search_user_accounts( + _query: Annotated[UserAccountSearchQueryParams, Depends()], +): + # NOTE: see `Search` in `Common Custom Methods` in https://cloud.google.com/apis/design/custom_methods + ... + + +@router.post( + "/admin/user-accounts:pre-register", + response_model=Envelope[UserAccountGet], + tags=_extra_tags, +) +async def pre_register_user_account(_body: PreRegisteredUserGet): ... diff --git a/api/specs/web-server/_wallets.py b/api/specs/web-server/_wallets.py new file mode 100644 index 00000000000..c4e490ec711 --- /dev/null +++ b/api/specs/web-server/_wallets.py @@ -0,0 +1,259 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from enum import Enum +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.wallets import ( + CreateWalletBodyParams, + CreateWalletPayment, + GetWalletAutoRecharge, + PaymentID, + PaymentMethodGet, + PaymentMethodID, + PaymentMethodInitiated, + PaymentTransaction, + PutWalletBodyParams, + ReplaceWalletAutoRecharge, + WalletGet, + WalletGetWithAvailableCredits, + WalletPaymentInitiated, +) +from models_library.generics import Envelope +from models_library.groups import GroupID +from models_library.rest_pagination import Page, PageQueryParameters +from models_library.wallets import WalletID +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.wallets._groups_api import WalletGroupGet +from simcore_service_webserver.wallets._groups_handlers import _WalletsGroupsBodyParams + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "wallets", + ], +) + +### Wallets + + +@router.post( + "/wallets", + response_model=Envelope[WalletGet], + status_code=status.HTTP_201_CREATED, +) +async def create_wallet(body: CreateWalletBodyParams): + ... + + +@router.get( + "/wallets", + response_model=Envelope[list[WalletGetWithAvailableCredits]], +) +async def list_wallets(): + ... + + +@router.get( + "/wallets/default", + response_model=Envelope[WalletGetWithAvailableCredits], +) +async def get_default_wallet(): + ... + + +@router.get( + "/wallets/{wallet_id}", + response_model=Envelope[WalletGetWithAvailableCredits], +) +async def get_wallet(wallet_id: WalletID): + ... + + +@router.put( + "/wallets/{wallet_id}", + response_model=Envelope[WalletGet], +) +async def update_wallet(wallet_id: WalletID, body: PutWalletBodyParams): + ... + + +### Wallets payments + + +@router.post( + "/wallets/{wallet_id}/payments", + response_model=Envelope[WalletPaymentInitiated], + response_description="Payment initialized", + status_code=status.HTTP_202_ACCEPTED, +) +async def create_payment(wallet_id: WalletID, body: CreateWalletPayment): + """Creates payment to wallet `wallet_id`""" + + +@router.get( + "/wallets/-/payments", + response_model=Page[PaymentTransaction], +) +async def list_all_payments(params: Annotated[PageQueryParameters, Depends()]): + """Lists all user payments to his/her wallets (only the ones he/she created)""" + + +@router.get( + "/wallets/{wallet_id}/payments/{payment_id}/invoice-link", + status_code=status.HTTP_302_FOUND, + responses={ + status.HTTP_302_FOUND: { + "description": "redirection to invoice download link", + } + }, +) +async def get_payment_invoice_link(wallet_id: WalletID, payment_id: PaymentID): + ... + + +@router.post( + "/wallets/{wallet_id}/payments/{payment_id}:cancel", + response_description="Successfully cancelled", + status_code=status.HTTP_204_NO_CONTENT, +) +async def cancel_payment(wallet_id: WalletID, payment_id: PaymentID): + ... + + +### Wallets payment-methods + + +@router.post( + "/wallets/{wallet_id}/payments-methods:init", + response_model=Envelope[PaymentMethodInitiated], + response_description="Successfully initialized", + status_code=status.HTTP_202_ACCEPTED, +) +async def init_creation_of_payment_method(wallet_id: WalletID): + ... + + +@router.post( + "/wallets/{wallet_id}/payments-methods/{payment_method_id}:cancel", + status_code=status.HTTP_204_NO_CONTENT, + response_description="Successfully cancelled", +) +async def cancel_creation_of_payment_method( + wallet_id: WalletID, payment_method_id: PaymentMethodID +): + ... + + +@router.get( + "/wallets/{wallet_id}/payments-methods", + response_model=Envelope[list[PaymentMethodGet]], +) +async def list_payments_methods(wallet_id: WalletID): + """Lists all payments method associated to `wallet_id`""" + + +@router.get( + "/wallets/{wallet_id}/payments-methods/{payment_method_id}", + response_model=Envelope[PaymentMethodGet], +) +async def get_payment_method(wallet_id: WalletID, payment_method_id: PaymentMethodID): + ... + + +@router.delete( + "/wallets/{wallet_id}/payments-methods/{payment_method_id}", + status_code=status.HTTP_204_NO_CONTENT, + response_description="Successfully deleted", +) +async def delete_payment_method( + wallet_id: WalletID, payment_method_id: PaymentMethodID +): + ... + + +@router.post( + "/wallets/{wallet_id}/payments-methods/{payment_method_id}:pay", + response_model=Envelope[WalletPaymentInitiated], + response_description="Pay with payment-method", + status_code=status.HTTP_202_ACCEPTED, +) +async def pay_with_payment_method( + wallet_id: WalletID, payment_method_id: PaymentMethodID, _body: CreateWalletPayment +): + ... + + +# +# payment-autorecharge. Implemented as a singleton-subresource +# + + +@router.get( + "/wallets/{wallet_id}/auto-recharge", + response_model=Envelope[GetWalletAutoRecharge], +) +async def get_wallet_autorecharge(wallet_id: WalletID): + ... + + +@router.put( + "/wallets/{wallet_id}/auto-recharge", + response_model=Envelope[GetWalletAutoRecharge], +) +async def replace_wallet_autorecharge( + wallet_id: WalletID, _body: ReplaceWalletAutoRecharge +): + ... + + +### Wallets groups +_extra_tags: list[str | Enum] = ["groups"] + + +@router.post( + "/wallets/{wallet_id}/groups/{group_id}", + response_model=Envelope[WalletGroupGet], + status_code=status.HTTP_201_CREATED, + tags=_extra_tags, +) +async def create_wallet_group( + wallet_id: WalletID, group_id: GroupID, body: _WalletsGroupsBodyParams +): + ... + + +@router.get( + "/wallets/{wallet_id}/groups", + response_model=Envelope[list[WalletGroupGet]], + tags=_extra_tags, +) +async def list_wallet_groups(wallet_id: WalletID): + ... + + +@router.put( + "/wallets/{wallet_id}/groups/{group_id}", + response_model=Envelope[WalletGroupGet], + tags=_extra_tags, +) +async def update_wallet_group( + wallet_id: WalletID, group_id: GroupID, body: _WalletsGroupsBodyParams +): + ... + + +@router.delete( + "/wallets/{wallet_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def delete_wallet_group(wallet_id: WalletID, group_id: GroupID): + ... diff --git a/api/specs/web-server/_workspaces.py b/api/specs/web-server/_workspaces.py new file mode 100644 index 00000000000..a86f5ceaaae --- /dev/null +++ b/api/specs/web-server/_workspaces.py @@ -0,0 +1,144 @@ +""" Helper script to generate OAS automatically +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from enum import Enum +from typing import Annotated + +from _common import as_query +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_webserver.workspaces import ( + WorkspaceCreateBodyParams, + WorkspaceGet, + WorkspaceReplaceBodyParams, +) +from models_library.generics import Envelope +from models_library.rest_error import EnvelopedError +from simcore_service_webserver._meta import API_VTAG +from simcore_service_webserver.folders._common.exceptions_handlers import ( + _TO_HTTP_ERROR_MAP, +) +from simcore_service_webserver.workspaces._common.models import ( + WorkspacesGroupsBodyParams, + WorkspacesGroupsPathParams, + WorkspacesListQueryParams, + WorkspacesPathParams, +) +from simcore_service_webserver.workspaces._groups_service import WorkspaceGroupGet + +router = APIRouter( + prefix=f"/{API_VTAG}", + tags=[ + "workspaces", + ], + responses={ + i.status_code: {"model": EnvelopedError} for i in _TO_HTTP_ERROR_MAP.values() + }, +) + + +@router.post( + "/workspaces", + response_model=Envelope[WorkspaceGet], + status_code=status.HTTP_201_CREATED, +) +async def create_workspace( + _body: WorkspaceCreateBodyParams, +): + ... + + +@router.get( + "/workspaces", + response_model=Envelope[list[WorkspaceGet]], +) +async def list_workspaces( + _query: Annotated[as_query(WorkspacesListQueryParams), Depends()], +): + ... + + +@router.get( + "/workspaces/{workspace_id}", + response_model=Envelope[WorkspaceGet], +) +async def get_workspace( + _path: Annotated[WorkspacesPathParams, Depends()], +): + ... + + +@router.put( + "/workspaces/{workspace_id}", + response_model=Envelope[WorkspaceGet], +) +async def replace_workspace( + _path: Annotated[WorkspacesPathParams, Depends()], + _body: WorkspaceReplaceBodyParams, +): + ... + + +@router.delete( + "/workspaces/{workspace_id}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def delete_workspace( + _path: Annotated[WorkspacesPathParams, Depends()], +): + ... + + +### Workspaces groups +_extra_tags: list[str | Enum] = ["groups"] + + +@router.post( + "/workspaces/{workspace_id}/groups/{group_id}", + response_model=Envelope[WorkspaceGroupGet], + status_code=status.HTTP_201_CREATED, + tags=_extra_tags, +) +async def create_workspace_group( + _path: Annotated[WorkspacesGroupsPathParams, Depends()], + _body: WorkspacesGroupsBodyParams, +): + ... + + +@router.get( + "/workspaces/{workspace_id}/groups", + response_model=Envelope[list[WorkspaceGroupGet]], + tags=_extra_tags, +) +async def list_workspace_groups( + _path: Annotated[WorkspacesPathParams, Depends()], +): + ... + + +@router.put( + "/workspaces/{workspace_id}/groups/{group_id}", + response_model=Envelope[WorkspaceGroupGet], + tags=_extra_tags, +) +async def replace_workspace_group( + _path: Annotated[WorkspacesGroupsPathParams, Depends()], + _body: WorkspacesGroupsBodyParams, +): + ... + + +@router.delete( + "/workspaces/{workspace_id}/groups/{group_id}", + status_code=status.HTTP_204_NO_CONTENT, + tags=_extra_tags, +) +async def delete_workspace_group( + _path: Annotated[WorkspacesGroupsPathParams, Depends()], +): + ... diff --git a/api/specs/web-server/openapi.py b/api/specs/web-server/openapi.py new file mode 100644 index 00000000000..700bdc7d63c --- /dev/null +++ b/api/specs/web-server/openapi.py @@ -0,0 +1,115 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import importlib + +import yaml +from fastapi import FastAPI +from fastapi.routing import APIRoute +from servicelib.fastapi.openapi import create_openapi_specs +from simcore_service_webserver._meta import API_VERSION, PROJECT_NAME, SUMMARY +from simcore_service_webserver._resources import webserver_resources + +openapi_modules = [ + importlib.import_module(name) + for name in ( + # NOTE: order matters on how the paths are displayed in the OAS! + # It does not have to be alphabetical + # + # core --- + "_auth", + "_auth_api_keys", + "_groups", + "_tags", + "_tags_groups", # after _tags + "_products", + "_users", + "_wallets", + # add-ons --- + "_activity", + "_announcements", + "_catalog", + "_catalog_tags", # MUST BE after _catalog + "_computations", + "_exporter", + "_folders", + "_functions", + "_long_running_tasks", + "_long_running_tasks_legacy", + "_licensed_items", + "_licensed_items_purchases", + "_licensed_items_checkouts", + "_nih_sparc", + "_nih_sparc_redirections", + "_projects", + "_projects_access_rights", + "_projects_comments", + "_projects_conversations", + "_projects_folders", + "_projects_metadata", + "_projects_nodes", + "_projects_nodes_pricing_unit", # after _projects_nodes + "_projects_ports", + "_projects_states", + "_projects_tags", + "_projects_wallet", + "_projects_workspaces", + "_publications", + "_resource_usage", + "_statics", + "_storage", + "_trash", + "_workspaces", + # maintenance ---- + "_admin", + "_diagnostics", + ) +] + + +def main(): + app = FastAPI( + title=PROJECT_NAME, + version=API_VERSION, + description=SUMMARY, + license={ + "name": "MIT", + "url": "https://github.com/ITISFoundation/osparc-simcore/blob/master/LICENSE", + }, + servers=[ + {"description": "webserver", "url": ""}, + { + "description": "development server", + "url": "http://{host}:{port}", + "variables": { + "host": {"default": "localhost"}, + "port": {"default": "8001"}, + }, + }, + ], + ) + + for module in openapi_modules: + # enforces operation_id == handler function name + for route in module.router.routes: + if isinstance(route, APIRoute) and route.operation_id is None: + route.operation_id = route.endpoint.__name__ + app.include_router(module.router) + + openapi = create_openapi_specs(app, remove_main_sections=False) + + # .yaml + oas_path = webserver_resources.get_path("api/v0/openapi.yaml").resolve() + if not oas_path.exists(): + oas_path.parent.mkdir(parents=True) + oas_path.write_text("") + print(f"Writing {oas_path}...", end=None) + with oas_path.open("wt") as fh: + yaml.safe_dump(openapi, stream=fh, sort_keys=False) + print("done") + + +if __name__ == "__main__": + main() diff --git a/api/specs/web-server/requirements.txt b/api/specs/web-server/requirements.txt new file mode 100644 index 00000000000..8ffca6a489d --- /dev/null +++ b/api/specs/web-server/requirements.txt @@ -0,0 +1,10 @@ +# Extra reqs, besides webserver's + +--constraint ../../../requirements/constraints.txt + +fastapi +fastapi-pagination +jsonref +pydantic +pydantic-extra-types +python-multipart diff --git a/api/specs/webserver/components/schemas/activity.yaml b/api/specs/webserver/components/schemas/activity.yaml deleted file mode 100644 index dc75293b8a1..00000000000 --- a/api/specs/webserver/components/schemas/activity.yaml +++ /dev/null @@ -1,38 +0,0 @@ -ActivityEnveloped: - type: object - required: - - data - properties: - data: - $ref: '#/Activity' - additionalProperties: true - error: - nullable: true - default: null - -Activity: - type: object - properties: - stats: - $ref: '#/Status' - limits: - $ref: '#/Limits' - queued: - type: boolean - -Status: - type: object - properties: - cpuUsage: - type: number - minimum: 0 - memoryUsage: - type: number - -Limits: - type: object - properties: - cpus: - type: number - mem: - type: number \ No newline at end of file diff --git a/api/specs/webserver/components/schemas/cluster.yaml b/api/specs/webserver/components/schemas/cluster.yaml deleted file mode 100644 index f601ed8207d..00000000000 --- a/api/specs/webserver/components/schemas/cluster.yaml +++ /dev/null @@ -1,290 +0,0 @@ -ClusterPing: - type: object - properties: - endpoint: - type: string - minLength: 1 - maxLength: 65536 - # format: uri - authentication: - description: Dask gateway authentication - anyOf: - [ - $ref: "#/SimpleAuthentication", - $ref: "#/KerberosAuthentication", - $ref: "#/JupyterHubTokenAuthentication", - ] - - required: - - endpoint - - authentication - additionalProperties: false - -ClusterCreate: - type: object - properties: - name: - description: the cluster name - type: string - description: - description: the cluster description - type: string - type: - description: the cluster type - type: string - enum: - - ON_PREMISE - - AWS - thumbnail: - type: string - # format: uri - endpoint: - type: string - minLength: 1 - maxLength: 65536 - # format: uri - authentication: - description: Dask gateway authentication - anyOf: - [ - $ref: "#/SimpleAuthentication", - $ref: "#/KerberosAuthentication", - $ref: "#/JupyterHubTokenAuthentication", - ] - - required: - - name - - type - - endpoint - - authentication - additionalProperties: false - -ClusterPatch: - type: object - properties: - name: - description: the cluster name - type: string - description: - description: the cluster description - type: string - type: - description: the cluster type - type: string - enum: - - ON_PREMISE - - AWS - owner: - description: the cluster owner group - type: integer - minimum: 1 - thumbnail: - type: string - # format: uri - endpoint: - type: string - minLength: 1 - maxLength: 65536 - # format: uri - authentication: - description: Dask gateway authentication - anyOf: - [ - $ref: "#/SimpleAuthentication", - $ref: "#/KerberosAuthentication", - $ref: "#/JupyterHubTokenAuthentication", - ] - accessRights: - type: object - description: >- - object containing the GroupID as key and read/write/execution permissions - as value - x-patternProperties: - ^\S+$: - $ref: "#/ClusterAccessRights" - additionalProperties: false - -Cluster: - type: object - properties: - id: - description: the cluster id - type: integer - minimum: 1 - name: - description: the cluster name - type: string - description: - description: the cluster description - type: string - type: - description: the cluster type - type: string - enum: - - ON_PREMISE - - AWS - owner: - description: the cluster owner group - type: integer - minimum: 1 - thumbnail: - type: string - format: uri - endpoint: - type: string - minLength: 1 - maxLength: 65536 - # format: uri - authentication: - description: Dask gateway authentication - anyOf: - [ - $ref: "#/SimpleAuthentication", - $ref: "#/KerberosAuthentication", - $ref: "#/JupyterHubTokenAuthentication", - ] - - accessRights: - type: object - description: >- - object containing the GroupID as key and read/write/execution permissions - as value - x-patternProperties: - ^\S+$: - $ref: "#/ClusterAccessRights" - required: - - id - - name - - accessRights - - endpoint - - authentication - additionalProperties: false - example: - - id: 1 - name: AWS cluster - type: AWS - endpoint: https://registry.osparc-development.fake.dev - authentication: - type: simple - username: someuser - password: somepassword - owner: 2 - accessRights: - "2": - read: true - write: true - delete: true - -ClusterDetails: - type: object - properties: - scheduler: - type: object - description: contains information about the cluster scheduler - cluster: - type: object - description: contains information about the cluster workers - dashboardLink: - type: string - minLength: 1 - maxLength: 65536 - description: contains the link to the cluster dashboard - # format: uri - - required: - - scheduler - - cluster - - dashboardLink - -ClusterDetailsEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/ClusterDetails" - error: - nullable: true - default: null - -ClusterEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/Cluster" - error: - nullable: true - default: null - -ClustersEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/Cluster" - error: - nullable: true - default: null - -SimpleAuthentication: - type: object - properties: - type: - type: string - enum: [simple] - default: simple - username: - type: string - password: - type: string - # format: password - writeOnly: true - required: - - username - - password - additionalProperties: false - -KerberosAuthentication: - type: object - properties: - type: - type: string - enum: [kerberos] - default: kerberos - additionalProperties: false - -JupyterHubTokenAuthentication: - type: object - properties: - type: - type: string - enum: [jupyterhub] - default: jupyterhub - api_token: - type: string - required: - - api_token - additionalProperties: false - -ClusterAccessRights: - description: defines acesss rights for the cluster - type: object - properties: - read: - type: boolean - description: allows usage of the cluster - write: - type: boolean - description: allows modification of the cluster - delete: - type: boolean - description: allows deletion of the cluster - required: - - read - - write - - delete diff --git a/api/specs/webserver/components/schemas/config.yaml b/api/specs/webserver/components/schemas/config.yaml deleted file mode 100644 index b559764b789..00000000000 --- a/api/specs/webserver/components/schemas/config.yaml +++ /dev/null @@ -1,18 +0,0 @@ -ConfigEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/ConfigSchema" - error: - nullable: true - default: null - -ConfigSchema: - type: object - properties: - invitation_required: - type: boolean - example: - invitation_required: true diff --git a/api/specs/webserver/components/schemas/error.yaml b/api/specs/webserver/components/schemas/error.yaml deleted file mode 100644 index 570fbbb8719..00000000000 --- a/api/specs/webserver/components/schemas/error.yaml +++ /dev/null @@ -1,74 +0,0 @@ -ErrorEnveloped: -# - notice that data is defaulted to null -# - type: object - required: - - error - properties: - data: - nullable: true - default: null - error: - $ref: "#/ErrorType" - - - -ErrorType: -# - Normally transmitted as a response from server to client -# - can exchage log messages between server and client. Possible applications: -# - e.g. client side can render a widget to display messages logged to 'user' -# - contains meta-information to allow client programatically understand the error. Possible applications: -# - e.g. metadata can serialize an exception in server that can be reproduced in client side -# - type: object - nullable: true - properties: - logs: - description: log messages - type: array - items: - $ref: './log_message.yaml#/LogMessageType' - errors: - description: errors metadata - type: array - items: - $ref: '#/ErrorItemType' - status: - description: HTTP error code - type: integer - example: - BadRequestError: - logs: - - message: 'Requested information is incomplete or malformed' - level: ERROR - - message: 'Invalid email and password' - level: ERROR - logger: USER - errors: - - code: "InvalidEmail" - message: "Email is malformed" - field: email - - code: "UnsavePassword" - message: "Password is not secure" - field: pasword - status: 400 - - -ErrorItemType: - type: object - required: - - code - - message - properties: - code: - type: string - description: Typically the name of the exception that produced it otherwise some known error code - message: - type: string - description: Error message specific to this item - resource: - type: string - description: API resource affected by this error - field: - type: string - description: Specific field within the resource diff --git a/api/specs/webserver/components/schemas/group.yaml b/api/specs/webserver/components/schemas/group.yaml deleted file mode 100644 index 0c4ef5feaa7..00000000000 --- a/api/specs/webserver/components/schemas/group.yaml +++ /dev/null @@ -1,157 +0,0 @@ -GroupAccessRights: - description: defines acesss rights for the user - type: object - properties: - read: - type: boolean - write: - type: boolean - delete: - type: boolean - required: - - read - - write - - delete - example: - # Member - - read: true - write: false - delete: false - # Manager - - read: true - write: true - delete: false - # Administrator - - read: true - write: true - delete: true - -UsersGroup: - type: object - properties: - gid: - description: the group ID - type: string - label: - description: the group name - type: string - description: - description: the group description - type: string - thumbnail: - description: url to the group thumbnail - type: string - format: uri - accessRights: - $ref: "#/GroupAccessRights" - required: - - gid - - label - - description - - accessRights - example: - - gid: "27" - label: "A user" - description: "A very special user" - thumbnail: https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png - - gid: "1" - label: "ITIS Foundation" - description: "The Foundation for Research on Information Technologies in Society" - thumbnail: https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png - - gid: "0" - label: "All" - description: "Open to all users" - thumbnail: https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png - -UsersGroupEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/UsersGroup" - error: - nullable: true - default: null - -AllUsersGroups: - type: object - properties: - me: - $ref: "#/UsersGroup" - organizations: - type: array - items: - $ref: "#/UsersGroup" - all: - $ref: "#/UsersGroup" - product: - $ref: "#/UsersGroup" - -AllUsersGroupsEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/AllUsersGroups" - error: - nullable: true - default: null - -GroupUser: - type: object - allOf: - - type: object - properties: - first_name: - type: string - description: the user first name - last_name: - type: string - description: the user last name - login: - type: string - format: email - description: the user login email - gravatar_id: - type: string - description: the user gravatar id hash - id: - type: string - description: the user id - gid: - type: string - description: the user primary gid - example: - first_name: Mr - last_name: Smith - login: mr.smith@matrix.com - gravatar_id: a1af5c6ecc38e81f29695f01d6ceb540 - id: "1" - gid: "3" - - $ref: "#/GroupAccessRights" - -GroupUsersArrayEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/GroupUser" - error: - nullable: true - default: null - -GroupUserEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/GroupUser" - error: - nullable: true - default: null diff --git a/api/specs/webserver/components/schemas/health_check.yaml b/api/specs/webserver/components/schemas/health_check.yaml deleted file mode 100644 index a0fa55b9430..00000000000 --- a/api/specs/webserver/components/schemas/health_check.yaml +++ /dev/null @@ -1,28 +0,0 @@ -HealthCheckEnveloped: - type: object - required: - - data - properties: - data: - $ref: '#/HealthCheckType' - error: - nullable: true - default: null - - -HealthCheckType: - type: object - properties: - name: - type: string - status: - type: string - api_version: - type: string - version: - type: string - example: - name: 'simcore-director-service' - status: SERVICE_RUNNING - api_version: 0.1.0-dev+NJuzzD9S - version: 0.1.0-dev+N127Mfv9H diff --git a/api/specs/webserver/components/schemas/log_message.yaml b/api/specs/webserver/components/schemas/log_message.yaml deleted file mode 100644 index 85381ab9641..00000000000 --- a/api/specs/webserver/components/schemas/log_message.yaml +++ /dev/null @@ -1,40 +0,0 @@ -LogMessageEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/LogMessageType" - error: - nullable: true - default: null - - -LogMessageType: -# - logger can be use as a way for the client to filter messages. -# - E.g. logger naming can be hierarchical, and all including "*.user.*" -# are displayed as a flash message in the front-end -# - type: object - properties: - level: - description: log level - type: string - default: INFO - enum: - - DEBUG - - WARNING - - INFO - - ERROR - message: - description: log message. If logger is USER, then it MUST be human readable - type: string - logger: - description: name of the logger receiving this message - type: string - required: - - message - example: - message: 'Hi there, Mr user' - level: INFO - logger: user-logger diff --git a/api/specs/webserver/components/schemas/me.yaml b/api/specs/webserver/components/schemas/me.yaml deleted file mode 100644 index 58be4b85d87..00000000000 --- a/api/specs/webserver/components/schemas/me.yaml +++ /dev/null @@ -1,104 +0,0 @@ -ProfileCommon: - type: object - properties: - first_name: - type: string - last_name: - type: string - example: - first_name: Pedro - last_name: Crespo - -ProfileUpdate: - allOf: - - $ref: "#/ProfileCommon" - -ProfileGet: - allOf: - - $ref: "#/ProfileCommon" - - type: object - properties: - id: - type: integer - login: - type: string - format: email - role: - type: string - groups: - $ref: "./group.yaml#/AllUsersGroups" - gravatar_id: - type: string - expirationDate: - type: string - format: date - description: "If user has a trial account, it sets the expiration date, otherwise None" - -ProfileEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/ProfileGet" - error: - nullable: true - default: null - -Token: - description: api keys for third party services - type: object - properties: - service: - description: uniquely identifies the service where this token is used - type: string - token_key: - description: basic token key - type: string - format: uuid - token_secret: - type: string - format: uuid - required: - - service - - token_key - -TokenId: - description: toke identifier - type: string - # format: uuid - -TokenEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/Token" - error: - nullable: true - default: null - -TokensArrayEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/Token" - error: - nullable: true - default: null - -TokenIdEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/TokenId" - error: - nullable: true - default: null diff --git a/api/specs/webserver/components/schemas/node_resources.yaml b/api/specs/webserver/components/schemas/node_resources.yaml deleted file mode 100644 index db4ed4637bf..00000000000 --- a/api/specs/webserver/components/schemas/node_resources.yaml +++ /dev/null @@ -1,30 +0,0 @@ -components: - schemas: - NodeResourcesEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/NodeResources" - error: - nullable: true - default: null - NodeResources: - type: object - additionalProperties: - type: object - required: - - limit - - reservation - properties: - limit: - anyOf: - - type: integer - - type: number - - type: string - reservation: - anyOf: - - type: integer - - type: number - - type: string diff --git a/api/specs/webserver/components/schemas/pipeline.yaml b/api/specs/webserver/components/schemas/pipeline.yaml deleted file mode 100644 index 78df6a68de3..00000000000 --- a/api/specs/webserver/components/schemas/pipeline.yaml +++ /dev/null @@ -1,47 +0,0 @@ -components: - schemas: - PipelineEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/PipelineSchema" - error: - nullable: true - default: null - - PipelineSchema: - type: object - properties: - iteration: - type: integer - minimum: 1 - cluster_id: - type: integer - minimum: 0 - - PipelineCreatedEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/PipelineCreatedSchema" - error: - nullable: true - default: null - - PipelineCreatedSchema: - type: object - required: - - pipelines_id - properties: - pipeline_id: - type: string - description: "ID for created pipeline (=project identifier)" - ref_ids: - type: array - items: - type: integer - description: "Checkpoints IDs for created pipeline" diff --git a/api/specs/webserver/openapi-activity.yaml b/api/specs/webserver/openapi-activity.yaml deleted file mode 100644 index f31ab04b76c..00000000000 --- a/api/specs/webserver/openapi-activity.yaml +++ /dev/null @@ -1,20 +0,0 @@ -paths: - /activity/status: - get: - operationId: get_status - tags: - - activity - responses: - "200": - description: Object containing queuing, CPU and Memory usage/limits information of services - content: - application/json: - schema: - $ref: "./components/schemas/activity.yaml#/ActivityEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - -components: - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-admin.yaml b/api/specs/webserver/openapi-admin.yaml deleted file mode 100644 index 8b6965a8831..00000000000 --- a/api/specs/webserver/openapi-admin.yaml +++ /dev/null @@ -1,105 +0,0 @@ -paths: - /email:test: - post: - tags: - - admin - summary: Test Email - operationId: test_email - parameters: - - required: false - schema: - title: X-Simcore-Products-Name - type: string - name: x-simcore-products-name - in: header - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/TestEmail' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Union_TestFailed__TestPassed__' -components: - schemas: - Envelope_Union_TestFailed__TestPassed__: - title: Envelope[Union[TestFailed, TestPassed]] - type: object - properties: - data: - title: Data - anyOf: - - $ref: '#/components/schemas/TestFailed' - - $ref: '#/components/schemas/TestPassed' - error: - title: Error - TestEmail: - title: TestEmail - required: - - to - type: object - properties: - from_: - title: 'From ' - type: string - description: Email sender - format: email - to: - title: To - type: string - description: Email receiver - format: email - template_name: - title: Template Name - enum: - - change_email_email.jinja2 - - new_2fa_code.jinja2 - - registration_email.jinja2 - - reset_password_email_failed.jinja2 - - reset_password_email.jinja2 - - service_submission.jinja2 - type: string - default: registration_email.jinja2 - template_context: - title: Template Context - type: object - default: {} - TestFailed: - title: TestFailed - required: - - test_name - - error_type - - error_message - - traceback - type: object - properties: - test_name: - title: Test Name - type: string - error_type: - title: Error Type - type: string - error_message: - title: Error Message - type: string - traceback: - title: Traceback - type: string - TestPassed: - title: TestPassed - required: - - fixtures - - info - type: object - properties: - fixtures: - title: Fixtures - type: object - info: - title: Info - type: object diff --git a/api/specs/webserver/openapi-auth.yaml b/api/specs/webserver/openapi-auth.yaml deleted file mode 100644 index 8d39b1b0e34..00000000000 --- a/api/specs/webserver/openapi-auth.yaml +++ /dev/null @@ -1,864 +0,0 @@ -paths: - /auth/register/invitations:check: - post: - tags: - - authentication - summary: Check Registration Invitation - description: Check invitation and returns associated email or None - operationId: auth_check_registration_invitation - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InvitationCheck' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_InvitationInfo_' - /auth/register: - post: - tags: - - authentication - summary: Register - description: User registration - operationId: auth_register - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - /auth/verify-phone-number: - post: - tags: - - authentication - summary: Register Phone - description: user tries to verify phone number for 2 Factor Authentication when - registering - operationId: auth_register_phone - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterPhoneBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_RegisterPhoneNextPage_' - /auth/validate-code-register: - post: - tags: - - authentication - summary: Phone Confirmation - description: user enters 2 Factor Authentication code when registering - operationId: auth_phone_confirmation - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/PhoneConfirmationBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - /auth/login: - post: - tags: - - authentication - summary: Login - description: user logs in - operationId: auth_login - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/LoginBody' - required: true - responses: - '201': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_LoginNextPage_' - '401': - description: unauthorized reset due to invalid token code - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/validate-code-login: - post: - tags: - - authentication - summary: Login 2Fa - description: user enters 2 Factor Authentication code when login in - operationId: auth_login_2fa - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/LoginTwoFactorAuthBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '401': - description: unauthorized reset due to invalid token code - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/two_factor:resend: - post: - tags: - - authentication - summary: Resend 2Fa Code - description: Resends 2FA either via email or sms - operationId: auth_resend_2fa_code - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/Resend2faBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '401': - description: unauthorized reset due to invalid token code - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/logout: - post: - tags: - - authentication - summary: Logout - description: user logout - operationId: auth_logout - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/LogoutBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - /auth/reset-password: - post: - tags: - - authentication - summary: Reset Password - description: a non logged-in user requests a password reset - operationId: auth_reset_password - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ResetPasswordBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '503': - description: Service Unavailable - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/reset-password/{code}: - post: - tags: - - authentication - summary: Reset Password Allowed - description: changes password using a token code without being logged in - operationId: auth_reset_password_allowed - parameters: - - required: true - schema: - title: Code - type: string - name: code - in: path - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ResetPasswordConfirmation' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '401': - description: unauthorized reset due to invalid token code - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/change-email: - post: - tags: - - authentication - summary: Change Email - description: logged in user changes email - operationId: auth_change_email - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ChangeEmailBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '401': - description: unauthorized user. Login required - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - '503': - description: unable to send confirmation email - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/change-password: - post: - tags: - - authentication - summary: Change Password - description: logged in user changes password - operationId: auth_change_password - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ChangePasswordBody' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - '401': - description: unauthorized user. Login required - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - '409': - description: mismatch between new and confirmation passwords - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - '422': - description: current password is invalid - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Error_' - /auth/confirmation/{code}: - get: - tags: - - authentication - summary: Email Confirmation - description: email link sent to user to confirm an action - operationId: auth_confirmation - parameters: - - required: true - schema: - title: Code - type: string - name: code - in: path - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_Log_' - 3XX: - description: redirection to specific ui application page - /auth/api-keys: - get: - tags: - - authentication - summary: List Api Keys - description: lists display names of API keys by this user - operationId: list_api_keys - parameters: - - required: true - schema: - title: Code - type: string - name: code - in: query - responses: - '200': - description: returns the display names of API keys - content: - application/json: - schema: - title: Response 200 List Api Keys - type: array - items: - type: string - '400': - description: key name requested is invalid - '401': - description: requires login to list keys - '403': - description: not enough permissions to list keys - post: - tags: - - authentication - summary: Create Api Key - description: creates API keys to access public API - operationId: create_api_key - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ApiKeyCreate' - required: true - responses: - '200': - description: Authorization granted returning API key - content: - application/json: - schema: - $ref: '#/components/schemas/ApiKeyGet' - '400': - description: key name requested is invalid - '401': - description: requires login to list keys - '403': - description: not enough permissions to list keys - delete: - tags: - - authentication - summary: Delete Api Key - description: deletes API key by name - operationId: delete_api_key - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ApiKeyCreate' - required: true - responses: - '204': - description: api key successfully deleted - '401': - description: requires login to delete a key - '403': - description: not enough permissions to delete a key -components: - schemas: - ApiKeyCreate: - title: ApiKeyCreate - required: - - display_name - type: object - properties: - display_name: - title: Display Name - minLength: 3 - type: string - expiration: - title: Expiration - type: number - description: Time delta from creation time to expiration. If None, then - it does not expire. - format: time-delta - ApiKeyGet: - title: ApiKeyGet - required: - - display_name - - api_key - - api_secret - type: object - properties: - display_name: - title: Display Name - minLength: 3 - type: string - api_key: - title: Api Key - type: string - api_secret: - title: Api Secret - type: string - ChangeEmailBody: - title: ChangeEmailBody - required: - - email - type: object - properties: - email: - title: Email - type: string - format: email - additionalProperties: false - ChangePasswordBody: - title: ChangePasswordBody - required: - - current - - new - - confirm - type: object - properties: - current: - title: Current - type: string - format: password - writeOnly: true - new: - title: New - type: string - format: password - writeOnly: true - confirm: - title: Confirm - type: string - format: password - writeOnly: true - additionalProperties: false - CodePageParams: - title: CodePageParams - required: - - message - type: object - properties: - message: - title: Message - type: string - retry_2fa_after: - title: Retry 2Fa After - exclusiveMinimum: true - type: integer - minimum: 0 - next_url: - title: Next Url - type: string - Envelope_Error_: - title: Envelope[Error] - type: object - properties: - data: - $ref: '#/components/schemas/Error' - error: - title: Error - Envelope_InvitationInfo_: - title: Envelope[InvitationInfo] - type: object - properties: - data: - $ref: '#/components/schemas/InvitationInfo' - error: - title: Error - Envelope_Log_: - title: Envelope[Log] - type: object - properties: - data: - $ref: '#/components/schemas/Log' - error: - title: Error - Envelope_LoginNextPage_: - title: Envelope[LoginNextPage] - type: object - properties: - data: - $ref: '#/components/schemas/LoginNextPage' - error: - title: Error - Envelope_RegisterPhoneNextPage_: - title: Envelope[RegisterPhoneNextPage] - type: object - properties: - data: - $ref: '#/components/schemas/RegisterPhoneNextPage' - error: - title: Error - Error: - title: Error - type: object - properties: - logs: - title: Logs - type: array - items: - $ref: '#/components/schemas/Log' - description: log messages - errors: - title: Errors - type: array - items: - $ref: '#/components/schemas/ErrorItem' - description: errors metadata - status: - title: Status - type: integer - description: HTTP error code - ErrorItem: - title: ErrorItem - required: - - code - - message - type: object - properties: - code: - title: Code - type: string - description: Typically the name of the exception that produced it otherwise - some known error code - message: - title: Message - type: string - description: Error message specific to this item - resource: - title: Resource - type: string - description: API resource affected by this error - field: - title: Field - type: string - description: Specific field within the resource - InvitationCheck: - title: InvitationCheck - required: - - invitation - type: object - properties: - invitation: - title: Invitation - type: string - description: Invitation code - additionalProperties: false - InvitationInfo: - title: InvitationInfo - type: object - properties: - email: - title: Email - type: string - description: Email associated to invitation or None - format: email - additionalProperties: false - Log: - title: Log - required: - - message - type: object - properties: - level: - allOf: - - $ref: '#/components/schemas/LogLevel' - description: log level - default: INFO - message: - title: Message - type: string - description: log message. If logger is USER, then it MUST be human readable - logger: - title: Logger - type: string - description: name of the logger receiving this message - example: - message: Hi there, Mr user - level: INFO - logger: user-logger - LogLevel: - title: LogLevel - enum: - - DEBUG - - INFO - - WARNING - - ERROR - type: string - description: An enumeration. - LoginBody: - title: LoginBody - required: - - email - - password - type: object - properties: - email: - title: Email - type: string - format: email - password: - title: Password - type: string - format: password - writeOnly: true - additionalProperties: false - LoginNextPage: - title: LoginNextPage - required: - - name - - code - - reason - type: object - properties: - name: - title: Name - type: string - description: Code name to the front-end page - parameters: - $ref: '#/components/schemas/CodePageParams' - code: - title: Code - type: string - deprecated: true - reason: - title: Reason - type: string - deprecated: true - description: 'This is the body of a 2XX response to pass the front-end - - what kind of page shall be display next and some information about it - - - An analogous structure is used in the redirects (see create_redirect_response) - but - - using a path+query in the fragment of the URL' - LoginTwoFactorAuthBody: - title: LoginTwoFactorAuthBody - required: - - email - - code - type: object - properties: - email: - title: Email - type: string - format: email - code: - title: Code - type: string - format: password - writeOnly: true - additionalProperties: false - LogoutBody: - title: LogoutBody - type: object - properties: - client_session_id: - title: Client Session Id - type: string - example: 5ac57685-c40f-448f-8711-70be1936fd63 - additionalProperties: false - PhoneConfirmationBody: - title: PhoneConfirmationBody - required: - - email - - phone - - code - type: object - properties: - email: - title: Email - type: string - format: email - phone: - title: Phone - type: string - description: Phone number E.164, needed on the deployments with 2FA - code: - title: Code - type: string - format: password - writeOnly: true - additionalProperties: false - RegisterBody: - title: RegisterBody - required: - - email - - password - type: object - properties: - email: - title: Email - type: string - format: email - password: - title: Password - type: string - format: password - writeOnly: true - confirm: - title: Confirm - type: string - description: Password confirmation - format: password - writeOnly: true - invitation: - title: Invitation - type: string - description: Invitation code - additionalProperties: false - RegisterPhoneBody: - title: RegisterPhoneBody - required: - - email - - phone - type: object - properties: - email: - title: Email - type: string - format: email - phone: - title: Phone - type: string - description: Phone number E.164, needed on the deployments with 2FA - additionalProperties: false - RegisterPhoneNextPage: - title: RegisterPhoneNextPage - required: - - name - - message - type: object - properties: - name: - title: Name - type: string - description: Code name to the front-end page - parameters: - $ref: '#/components/schemas/_PageParams' - logger: - title: Logger - type: string - default: user - deprecated: true - level: - title: Level - enum: - - INFO - - WARNING - - ERROR - type: string - default: INFO - message: - title: Message - type: string - description: 'This is the body of a 2XX response to pass the front-end - - what kind of page shall be display next and some information about it - - - An analogous structure is used in the redirects (see create_redirect_response) - but - - using a path+query in the fragment of the URL' - Resend2faBody: - title: Resend2faBody - required: - - email - type: object - properties: - email: - title: Email - type: string - description: User email (identifier) - format: email - via: - title: Via - enum: - - SMS - - Email - type: string - default: SMS - additionalProperties: false - ResetPasswordBody: - title: ResetPasswordBody - required: - - email - type: object - properties: - email: - title: Email - type: string - additionalProperties: false - ResetPasswordConfirmation: - title: ResetPasswordConfirmation - required: - - password - - confirm - type: object - properties: - password: - title: Password - type: string - format: password - writeOnly: true - confirm: - title: Confirm - type: string - format: password - writeOnly: true - additionalProperties: false - _PageParams: - title: _PageParams - type: object - properties: - retry_2fa_after: - title: Retry 2Fa After - exclusiveMinimum: true - type: integer - minimum: 0 diff --git a/api/specs/webserver/openapi-catalog.yaml b/api/specs/webserver/openapi-catalog.yaml deleted file mode 100644 index 4e0c6654387..00000000000 --- a/api/specs/webserver/openapi-catalog.yaml +++ /dev/null @@ -1,715 +0,0 @@ -paths: - /catalog/dags: - get: - tags: - - catalog - operationId: list_catalog_dags - responses: - "200": - description: List of catalog dags - "422": - description: Validation Error - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - post: - tags: - - catalog - summary: Creates a new dag in catalog - operationId: create_catalog_dag - requestBody: - content: - application/json: - schema: - type: object - additionalProperties: true - responses: - "201": - description: The dag was successfully created - "422": - description: Validation Error - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - /catalog/dags/{dag_id}: - parameters: - - in: path - name: dag_id - required: true - schema: - title: Dag Id - type: integer - put: - tags: - - catalog - summary: Replaces a dag in catalog - operationId: replace_catalog_dag - requestBody: - content: - application/json: - schema: - type: object - additionalProperties: true - responses: - "200": - description: The dag was replaced in catalog - "422": - description: Validation Error - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - delete: - tags: - - catalog - summary: Deletes an existing dag - operationId: delete_catalog_dag - responses: - "204": - description: Successfully deleted - "422": - description: Validation Error - - catalog_services: - get: - tags: - - catalog - summary: List Services - operationId: list_services_handler - responses: - "200": - description: Returns list of services from the catalog - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - catalog_services_service_key_service_version: - parameters: - - in: path - name: service_key - required: true - schema: - title: Service key - type: string - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - - in: path - name: service_version - required: true - schema: - title: Service version - type: string - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - get: - tags: - - catalog - summary: Get Service - operationId: get_service_handler - responses: - "200": - description: Returns service - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - patch: - tags: - - catalog - summary: Update Service - operationId: update_service_handler - requestBody: - content: - application/json: - schema: - type: object - additionalProperties: true - responses: - "200": - description: Returns modified service - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - - catalog_services_service_key_service_version_inputs: - get: - tags: - - catalog - operationId: list_service_inputs_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/ServiceInputApiOut" - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: List Service Inputs - catalog_services_service_key_service_version_inputs_input_key: - get: - tags: - - catalog - operationId: get_service_input_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - - in: path - name: input_key - required: true - schema: - pattern: ^[-_a-zA-Z0-9]+$ - title: Input Key - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/ServiceInputApiOut" - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: Get Service Input - catalog_services_service_key_service_version_inputs_match: - get: - tags: - - catalog - description: "Filters inputs of this service that match a given service output. Returns compatible input ports of the service, provided an output port of a connected node." - operationId: get_compatible_inputs_given_source_output_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - - in: query - name: fromService - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Fromservice - type: string - - in: query - name: fromVersion - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Fromversion - type: string - - in: query - name: fromOutput - required: true - schema: - pattern: ^[-_a-zA-Z0-9]+$ - title: Fromoutput - type: string - responses: - "200": - content: - application/json: - schema: - items: - pattern: ^[-_a-zA-Z0-9]+$ - type: string - title: - Response Get Compatible Inputs Given Source Output Catalog - Services Service Key Service Version Inputs Match Get - type: array - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: Get Compatible Inputs Given Source Output - catalog_services_service_key_service_version_outputs: - get: - tags: - - catalog - operationId: list_service_outputs_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - responses: - "200": - content: - application/json: - schema: - items: - $ref: "#/components/schemas/ServiceOutputApiOut" - title: - Response List Service Outputs Catalog Services Service Key Service - Version Outputs Get - type: array - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: List Service Outputs - catalog_services_service_key_service_version_outputs_output_key: - get: - tags: - - catalog - operationId: get_service_output_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - - in: path - name: output_key - required: true - schema: - pattern: ^[-_a-zA-Z0-9]+$ - title: Output Key - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/ServiceOutputApiOut" - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: Get Service Output - catalog_services_service_key_service_version_outputs_match: - get: - tags: - - catalog - description: "Filters outputs of this service that match a given service input. Returns compatible output port of a connected node for a given input" - operationId: get_compatible_outputs_given_target_input_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - - in: query - name: toService - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Toservice - type: string - - in: query - name: toVersion - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Toversion - type: string - - in: query - name: toInput - required: true - schema: - pattern: ^[-_a-zA-Z0-9]+$ - title: Toinput - type: string - responses: - "200": - content: - application/json: - schema: - items: - pattern: ^[-_a-zA-Z0-9]+$ - type: string - title: - Response Get Compatible Outputs Given Target Input Catalog - Services Service Key Service Version Outputs Match Get - type: array - description: Successful Response - "422": - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - description: Validation Error - summary: Get Compatible Outputs Given Target Input - - catalog_services_service_key_service_version_resources: - get: - tags: - - catalog - description: Returns the service default resources - operationId: get_service_resources_handler - parameters: - - in: path - name: service_key - required: true - schema: - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - title: Service Key - type: string - - in: path - name: service_version - required: true - schema: - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - title: Service Version - type: string - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/ServiceResourcesEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" -components: - parameters: - ServiceType: - in: query - name: service_type - description: | - The service type: - * computational - a computational service - * interactive - an interactive service - required: false - schema: - type: string - enum: - - computational - - interactive - example: computational - schemas: - ServicesEnveloped: - $ref: "../common/schemas/services.yaml#/components/schemas/ServicesEnveloped" - HTTPValidationError: - properties: - detail: - items: - $ref: "#/components/schemas/ValidationError" - title: Detail - type: array - title: HTTPValidationError - type: object - SelectBox: - additionalProperties: false - properties: - structure: - items: - $ref: "#/components/schemas/Structure" - minItems: 1 - title: Structure - type: array - required: - - structure - title: SelectBox - type: object - ServiceInputApiOut: - additionalProperties: false - description: Metadata on a service input port - example: - defaultValue: 0 - description: Time to wait before completion - displayOrder: 2 - keyId: input_2 - label: Sleep Time - type: number - unit: second - unitLong: seconds - unitShort: sec - widget: - details: - minHeight: 1 - type: TextArea - properties: - defaultValue: - anyOf: - - type: boolean - - type: integer - - type: number - - type: string - title: Defaultvalue - description: - description: description of the property - example: Age in seconds since 1970 - title: Description - type: string - displayOrder: - description: use this to numerically sort the properties for display - title: Displayorder - type: number - fileToKeyMap: - description: Place the data associated with the named keys in files - title: Filetokeymap - type: object - keyId: - description: Unique name identifier for this input - pattern: ^[-_a-zA-Z0-9]+$ - title: Keyid - type: string - label: - description: short name for the property - example: Age - title: Label - type: string - type: - description: - data type expected on this input glob matching for data type - is allowed - pattern: ^(number|integer|boolean|string|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ - title: Type - type: string - unit: - description: Units, when it refers to a physical quantity - title: Unit - type: string - unitLong: - description: Long name of the unit, if available - title: Unitlong - type: string - unitShort: - description: Short name for the unit, if available - title: Unitshort - type: string - widget: - allOf: - - $ref: "#/components/schemas/Widget" - description: - custom widget to use instead of the default one determined - from the data-type - title: Widget - required: - - displayOrder - - label - - description - - type - - keyId - title: ServiceInputApiOut - type: object - ServiceOutputApiOut: - additionalProperties: false - description: Metadata on a service input or output port - example: - defaultValue: 0 - description: Time to wait before completion - displayOrder: 2 - keyId: input_2 - label: Sleep Time - type: number - unit: second - unitLong: seconds - unitShort: sec - widget: - details: - minHeight: 1 - type: TextArea - properties: - defaultValue: - anyOf: - - type: boolean - - type: integer - - type: number - - type: string - title: Defaultvalue - description: - description: description of the property - example: Age in seconds since 1970 - title: Description - type: string - displayOrder: - description: use this to numerically sort the properties for display - title: Displayorder - type: number - fileToKeyMap: - description: Place the data associated with the named keys in files - title: Filetokeymap - type: object - keyId: - description: Unique name identifier for this input - pattern: ^[-_a-zA-Z0-9]+$ - title: Keyid - type: string - label: - description: short name for the property - example: Age - title: Label - type: string - type: - description: - data type expected on this input glob matching for data type - is allowed - pattern: ^(number|integer|boolean|string|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$ - title: Type - type: string - unit: - description: Units, when it refers to a physical quantity - title: Unit - type: string - unitLong: - description: Long name of the unit, if available - title: Unitlong - type: string - unitShort: - description: Short name for the unit, if available - title: Unitshort - type: string - widget: - allOf: - - $ref: "#/components/schemas/Widget" - deprecated: true - description: - custom widget to use instead of the default one determined - from the data-type - title: Widget - required: - - displayOrder - - label - - description - - type - - keyId - title: ServiceOutputApiOut - type: object - Structure: - additionalProperties: false - properties: - key: - anyOf: - - type: string - - type: boolean - - type: number - title: Key - label: - title: Label - type: string - required: - - key - - label - title: Structure - type: object - TextArea: - additionalProperties: false - properties: - minHeight: - description: minimum Height of the textarea - x-exclusiveMinimum: 0.0 - title: Minheight - type: integer - required: - - minHeight - title: TextArea - type: object - ValidationError: - properties: - loc: - items: - type: string - title: Location - type: array - msg: - title: Message - type: string - type: - title: Error Type - type: string - required: - - loc - - msg - - type - title: ValidationError - type: object - Widget: - additionalProperties: false - properties: - details: - anyOf: - - $ref: "#/components/schemas/TextArea" - - $ref: "#/components/schemas/SelectBox" - title: Details - type: - allOf: - - $ref: "#/components/schemas/WidgetType" - description: type of the property - required: - - type - - details - title: Widget - type: object - WidgetType: - description: An enumeration. - enum: - - TextArea - - SelectBox - title: WidgetType - type: string - ServiceResourcesEnveloped: - $ref: "./components/schemas/node_resources.yaml#/components/schemas/NodeResourcesEnveloped" - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-clusters.yaml b/api/specs/webserver/openapi-clusters.yaml deleted file mode 100644 index 0e10b06ab76..00000000000 --- a/api/specs/webserver/openapi-clusters.yaml +++ /dev/null @@ -1,154 +0,0 @@ -paths: - /clusters: - get: - summary: List my clusters - operationId: list_clusters_handler - tags: - - cluster - responses: - "200": - description: list of the clusters I have access to - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClustersEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - summary: Create a new cluster - operationId: create_cluster_handler - tags: - - cluster - requestBody: - required: true - description: the cluster to create - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterCreate" - responses: - "201": - description: cluster created - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - director_v2_clusters_ping: - post: - summary: test connectivity with cluster - operationId: ping_cluster_handler - tags: - - cluster - requestBody: - required: true - description: the cluster endpoint/authentication to test - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterPing" - responses: - "204": - description: connectivity is OK - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /clusters/{cluster_id}: - parameters: - - name: cluster_id - in: path - required: true - schema: - type: string - get: - tags: - - cluster - summary: Gets one cluster - operationId: get_cluster_handler - responses: - "200": - description: got cluster - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - patch: - summary: Update one cluster - operationId: update_cluster_handler - tags: - - cluster - requestBody: - required: true - description: the cluster to update - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterPatch" - responses: - "200": - description: the modified cluster - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - tags: - - cluster - summary: Deletes one cluster - operationId: delete_cluster_handler - responses: - "204": - description: cluster has been successfully deleted - default: - $ref: "#/components/responses/DefaultErrorResponse" - - director_v2_clusters_cluster_id_ping: - parameters: - - name: cluster_id - in: path - required: true - schema: - type: string - post: - summary: test connectivity with cluster - operationId: ping_cluster_cluster_id_handler - tags: - - cluster - responses: - "204": - description: connectivity is OK - default: - $ref: "#/components/responses/DefaultErrorResponse" - - director_v2_clusters_cluster_id_details: - parameters: - - name: cluster_id - in: path - required: true - schema: - type: string - get: - tags: - - cluster - summary: Gets one cluster details - operationId: get_cluster_details_handler - responses: - "200": - description: got cluster - content: - application/json: - schema: - $ref: "./components/schemas/cluster.yaml#/ClusterDetailsEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - -components: - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-computations.yaml b/api/specs/webserver/openapi-computations.yaml deleted file mode 100644 index dbcabd9d6e1..00000000000 --- a/api/specs/webserver/openapi-computations.yaml +++ /dev/null @@ -1,91 +0,0 @@ -paths: - computations_project_id: - get: - description: Returns the last computation data - tags: - - computations - operationId: get_computation - parameters: - - $ref: "#/components/parameters/ProjectId" - responses: - "200": - description: Succesffully retrieved computation - content: - application/json: - schema: - $ref: "./components/schemas/pipeline.yaml#/components/schemas/PipelineEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - computations_project_id_start: - post: - description: Starts the pipeline(s) of a given (meta) project - tags: - - computations - operationId: start_computation - parameters: - - $ref: "#/components/parameters/ProjectId" - requestBody: - required: false - content: - application/json: - schema: - type: object - properties: - force_restart: - type: boolean - default: false - description: "if true will force re-running all dependent nodes" - cluster_id: - type: integer - description: the computation shall use the cluster described by its id, 0 is the default cluster - default: 0 - minimum: 0 - subgraph: - description: The node uuids selected for running a partial pipeline - type: array - uniqueItems: true - items: - type: string - format: uuid - - responses: - "201": - description: Successfully started the pipeline - content: - application/json: - schema: - $ref: "./components/schemas/pipeline.yaml#/components/schemas/PipelineCreatedEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - computations_project_id_stop: - post: - description: Stops (all) pipeline(s) of a given (meta) project - tags: - - computations - operationId: stop_computation - parameters: - - $ref: "#/components/parameters/ProjectId" - responses: - "204": - description: Succesffully stopped the pipeline - default: - $ref: "#/components/responses/DefaultErrorResponse" - - # PROJECT SERVICES ----------------------------------------------------------------- -components: - parameters: - ProjectId: - in: path - name: project_id - required: true - description: the uuid of the project - schema: - type: string - # format: uuid - example: 123e4567-e89b-12d3-a456-426655440000 - - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-diagnostics.yaml b/api/specs/webserver/openapi-diagnostics.yaml deleted file mode 100644 index 121972539ad..00000000000 --- a/api/specs/webserver/openapi-diagnostics.yaml +++ /dev/null @@ -1,122 +0,0 @@ -paths: - /: - get: - tags: - - maintenance - summary: readiness probe for - operationId: healthcheck_readiness_probe - responses: - "200": - description: Service information - content: - application/json: - schema: - $ref: "./components/schemas/health_check.yaml#/HealthCheckEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - /health: - get: - tags: - - maintenance - summary: liveliness probe - operationId: healthcheck_liveness_probe - responses: - "200": - description: Service information - content: - application/json: - schema: - $ref: "./components/schemas/health_check.yaml#/HealthCheckEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /status: - get: - tags: - - maintenance - summary: checks status of self and connected services - operationId: get_app_status - responses: - "200": - description: returns app status check - - /status/diagnostics: - get: - tags: - - maintenance - operationId: get_app_diagnostics - responses: - "200": - description: returns app diagnostics report - - /status/{service_name}: - get: - tags: - - maintenance - operationId: get_service_status - parameters: - - in: path - required: true - name: service_name - schema: - type: string - responses: - "200": - description: returns status of connected service - - /config: - get: - summary: Front end runtime configuration - operationId: get_config - tags: - - configuration - responses: - "200": - description: configuration details - content: - application/json: - schema: - $ref: "./components/schemas/config.yaml#/ConfigEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /scheduled_maintenance: - get: - tags: - - maintenance - summary: Get Scheduled Maintenance - operationId: get_scheduled_maintenance - responses: - "200": - description: Maintenance scheduled - content: - application/json: - schema: - $ref: "#/components/schemas/ScheduledMaintenance" - "204": - description: No maintenance scheduled - default: - $ref: "#/components/responses/DefaultErrorResponse" - -components: - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - schemas: - ScheduledMaintenance: - title: ScheduledMaintenance - type: object - properties: - start: - title: Start - type: string - description: At what time the Maintenance starts (UTC) - end: - title: End - type: string - description: At what time the Maintenance ends (UTC) - reason: - title: Reason - type: string - description: What the purpose of the Maintenance is - additionalProperties: false diff --git a/api/specs/webserver/openapi-groups.yaml b/api/specs/webserver/openapi-groups.yaml deleted file mode 100644 index 7ffa3349e94..00000000000 --- a/api/specs/webserver/openapi-groups.yaml +++ /dev/null @@ -1,296 +0,0 @@ -paths: - /groups: - get: - summary: List my groups - operationId: list_groups - tags: - - group - responses: - "200": - description: list of the groups I belonged to - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/AllUsersGroupsEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - summary: Create a new group - operationId: create_group - tags: - - group - requestBody: - required: true - description: the group to create - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/UsersGroup" - responses: - "201": - description: group created - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/UsersGroupEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/{gid}: - parameters: - - name: gid - in: path - required: true - schema: - type: string - get: - tags: - - group - summary: Gets one group details - operationId: get_group - responses: - "200": - description: got group - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/UsersGroupEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - patch: - summary: Update one group - operationId: update_group - tags: - - group - requestBody: - required: true - description: the group to update - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/UsersGroup" - responses: - "200": - description: the modified group - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/UsersGroupEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - tags: - - group - summary: Deletes one group - operationId: delete_group - responses: - "204": - description: group has been successfully deleted - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/{gid}/users: - parameters: - - name: gid - in: path - required: true - schema: - type: string - get: - tags: - - group - summary: Gets list of users in group - operationId: get_group_users - responses: - "200": - description: got list of users and their respective rights - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/GroupUsersArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - tags: - - group - summary: Adds a user in the group - operationId: add_group_user - requestBody: - required: true - description: the user to add - content: - application/json: - schema: - anyOf: - - type: object - required: - - uid - properties: - uid: - type: string - description: the user id - - type: object - required: - - email - properties: - email: - type: string - format: email - description: the user email - responses: - "204": - description: user successfully added - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/{gid}/users/{uid}: - parameters: - - name: gid - in: path - required: true - schema: - type: string - - name: uid - in: path - required: true - schema: - type: string - get: - tags: - - group - summary: Gets specific user in group - operationId: get_group_user - responses: - "200": - description: got user - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/GroupUserEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - patch: - tags: - - group - summary: Modify specific user in group - operationId: update_group_user - requestBody: - required: true - description: the user rights to modify - content: - application/json: - schema: - type: object - properties: - accessRights: - $ref: "./components/schemas/group.yaml#/GroupAccessRights" - required: - - accessRights - - - - responses: - "200": - description: modified user - content: - application/json: - schema: - $ref: "./components/schemas/group.yaml#/GroupUserEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - tags: - - group - summary: Delete specific user in group - operationId: delete_group_user - responses: - "204": - description: successfully removed user - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/{gid}/classifiers: - get: - parameters: - - name: gid - in: path - required: true - schema: - type: string - - name: tree_view - in: query - description: "Some classifiers (e.g. sparc's) offer different tree views" - schema: - type: string - enum: [std, sort] - default: std - tags: - - group - summary: Gets classifiers bundle for this group - operationId: get_group_classifiers - responses: - "200": - description: got a bundle with all information about classifiers - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/sparc/classifiers/scicrunch-resources/{rrid}: - parameters: - - name: rrid - in: path - required: true - schema: - type: string - get: - tags: - - group - summary: "Returns information on a valid RRID (https://www.force11.org/group/resource-identification-initiative)" - operationId: get_scicrunch_resource - responses: - "200": - description: Got information of a valid RRID - "400": - description: Invalid RRID - "503": - description: scircrunch.org service is not reachable - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - tags: - - group - summary: "Adds new RRID to classifiers" - operationId: add_scicrunch_resource - responses: - "200": - description: Got information of a valid RRID - "400": - description: Invalid RRID - "503": - description: scircrunch.org service is not reachable - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /groups/sparc/classifiers/scicrunch-resources:search: - get: - parameters: - - name: guess_name - in: query - required: true - schema: - type: string - tags: - - group - summary: "Returns a list of related resource provided a search name" - operationId: search_scicrunch_resources - responses: - "200": - description: Got information of a valid RRID - "503": - description: scircrunch.org service is not reachable - default: - $ref: "#/components/responses/DefaultErrorResponse" -components: - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-meta-projects.yaml b/api/specs/webserver/openapi-meta-projects.yaml deleted file mode 100644 index ab3adcc5fe7..00000000000 --- a/api/specs/webserver/openapi-meta-projects.yaml +++ /dev/null @@ -1,382 +0,0 @@ -paths: - /projects/{project_uuid}/checkpoint/{ref_id}/iterations: - get: - tags: - - meta-projects - summary: List Project Iterations - description: Lists current project's iterations - operationId: "simcore_service_webserver.meta_modeling_handlers._list_meta_project_iterations_handler" - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - - required: true - schema: - title: Ref Id - anyOf: - - type: integer - - type: string - name: ref_id - in: path - - description: index to the first item to return (pagination) - required: false - schema: - title: Offset - exclusiveMinimum: false - type: integer - description: index to the first item to return (pagination) - default: 0 - minimum: 0 - name: offset - in: query - - description: maximum number of items to return (pagination) - required: false - schema: - title: Limit - maximum: 50.0 - minimum: 1.0 - type: integer - description: maximum number of items to return (pagination) - default: 20 - name: limit - in: query - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Page_IterationItem_" - "404": - description: - This project has no iterations.Only meta-project have iterations - and they must be explicitly created. - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/{iter_id}: - get: - tags: - - meta-projects - summary: Get Project Iterations - description: Get current project's iterations - operationId: "simcore_service_webserver.meta_modeling_handlers._get_meta_project_iterations_handler" - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - - required: true - schema: - title: Ref Id - anyOf: - - type: integer - - type: string - name: ref_id - in: path - - required: true - name: iter_id - schema: - type: integer - in: path - responses: - "200": - description: Successful Response - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/-/results: - get: - tags: - - meta-projects - summary: List Project Iterations Results - description: Lists current project's iterations results table - operationId: "simcore_service_webserver.meta_modeling_handlers._list_meta_project_iterations_results_handler" - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - - required: true - schema: - title: Ref Id - anyOf: - - type: integer - - type: string - name: ref_id - in: path - - description: index to the first item to return (pagination) - required: false - schema: - title: Offset - exclusiveMinimum: false - type: integer - description: index to the first item to return (pagination) - default: 0 - minimum: 0 - name: offset - in: query - - description: maximum number of items to return (pagination) - required: false - schema: - title: Limit - maximum: 50.0 - minimum: 1.0 - type: integer - description: maximum number of items to return (pagination) - default: 20 - name: limit - in: query - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Page_IterationResultItem_" - "404": - description: - This project has no iterations.Only meta-project have iterations - and they must be explicitly created. - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/{iter_id}/results: - get: - tags: - - meta-projects - summary: Get Project Iteration Results - description: Get current project's iterations - operationId: "simcore_service_webserver.meta_modeling_handlers._get_meta_project_iteration_results_handler" - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - - required: true - schema: - title: Ref Id - anyOf: - - type: integer - - type: string - name: ref_id - in: path - - required: true - schema: - type: integer - name: iter_id - in: path - - responses: - "200": - description: Successful Response - -components: - schemas: - HTTPValidationError: - title: HTTPValidationError - type: object - properties: - detail: - title: Detail - type: array - items: - $ref: "#/components/schemas/ValidationError" - IterationItem: - title: IterationItem - required: - - name - - parent - - workcopy_project_id - - workcopy_project_url - - url - type: object - properties: - name: - title: Name - type: string - description: Iteration's resource name [AIP-122](https://google.aip.dev/122) - parent: - title: Parent - allOf: - - $ref: "#/components/schemas/ParentMetaProjectRef" - description: Reference to the the meta-project that defines this iteration - workcopy_project_id: - title: Workcopy's Project Id - type: string - description: - ID to this iteration's working copy.A working copy is a real - project where this iteration is run - format: uuid - workcopy_project_url: - title: Workcopy's Project Url - maxLength: 2083 - minLength: 1 - type: string - format: uri - url: - title: Url - maxLength: 2083 - minLength: 1 - type: string - format: uri - PageLinks: - title: PageLinks - required: - - self - - first - - last - type: object - properties: - self: - title: Self - maxLength: 65536 - minLength: 1 - type: string - format: uri - first: - title: First - maxLength: 65536 - minLength: 1 - type: string - format: uri - prev: - title: Prev - maxLength: 65536 - minLength: 1 - type: string - format: uri - next: - title: Next - maxLength: 65536 - minLength: 1 - type: string - format: uri - last: - title: Last - maxLength: 65536 - minLength: 1 - type: string - format: uri - additionalProperties: false - PageMetaInfoLimitOffset: - title: PageMetaInfoLimitOffset - required: - - total - - count - type: object - properties: - limit: - title: Limit - exclusiveMinimum: true - type: integer - default: 20 - minimum: 0 - total: - title: Total - minimum: 0.0 - type: integer - offset: - title: Offset - minimum: 0.0 - type: integer - default: 0 - count: - title: Count - minimum: 0.0 - type: integer - Page_IterationItem_: - title: Page[IterationItem] - required: - - _meta - - _links - - data - type: object - properties: - _meta: - $ref: "#/components/schemas/PageMetaInfoLimitOffset" - _links: - $ref: "#/components/schemas/PageLinks" - data: - title: Data - type: array - items: - $ref: "#/components/schemas/IterationItem" - ParentMetaProjectRef: - title: ParentMetaProjectRef - required: - - project_id - - ref_id - type: object - properties: - project_id: - title: Project Id - type: string - format: uuid - ref_id: - title: Ref Id - type: integer - ValidationError: - title: ValidationError - required: - - loc - - msg - - type - type: object - properties: - loc: - title: Location - type: array - items: - type: string - msg: - title: Message - type: string - type: - title: Error Type - type: string - Page_IterationResultItem_: - title: Page[IterationResultItem] - required: - - _meta - - _links - - data - type: object - properties: - _meta: - $ref: "#/components/schemas/PageMetaInfoLimitOffset" - _links: - $ref: "#/components/schemas/PageLinks" - data: - title: Data - type: array - items: - $ref: "#/components/schemas/IterationItem" - # NOTE: intentionally wrong. Will be deprecated diff --git a/api/specs/webserver/openapi-nih-sparc.yaml b/api/specs/webserver/openapi-nih-sparc.yaml deleted file mode 100644 index 5d42b3f53bb..00000000000 --- a/api/specs/webserver/openapi-nih-sparc.yaml +++ /dev/null @@ -1,271 +0,0 @@ -paths: - /services: - get: - tags: - - nih-sparc - summary: List Services - description: Returns a list latest version of services - operationId: list_services - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_simcore_service_webserver.studies_dispatcher.handlers_rest.ServiceGet__' - /viewers: - get: - tags: - - nih-sparc - summary: List Viewers - description: 'Lists all publically available viewers - - - Notice that this might contain multiple services for the same filetype - - - If file_type is provided, then it filters viewer for that filetype' - operationId: list_viewers - parameters: - - required: false - schema: - title: File Type - type: string - name: file_type - in: query - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_simcore_service_webserver.studies_dispatcher.handlers_rest.Viewer__' - /viewers/default: - get: - tags: - - nih-sparc - summary: List Default Viewers - description: 'Lists the default viewer for each supported filetype - - - This was interfaced as a subcollection of viewers because it is a very common - use-case - - - Only publicaly available viewers - - - If file_type is provided, then it filters viewer for that filetype' - operationId: list_default_viewers - parameters: - - required: false - schema: - title: File Type - type: string - name: file_type - in: query - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_simcore_service_webserver.studies_dispatcher.handlers_rest.Viewer__' - /view: - get: - tags: - - nih-sparc - summary: Get Redirection To Viewer - description: Opens a viewer in osparc for data in the NIH-sparc portal - operationId: get_redirection_to_viewer - parameters: - - required: true - schema: - title: File Type - type: string - name: file_type - in: query - - required: true - schema: - title: Viewer Key - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - type: string - name: viewer_key - in: query - - required: true - schema: - title: File Size - exclusiveMinimum: true - type: integer - minimum: 0 - name: file_size - in: query - - required: true - schema: - title: Download Link - maxLength: 2083 - minLength: 1 - type: string - format: uri - name: download_link - in: query - - required: false - schema: - title: File Name - type: string - default: unknown - name: file_name - in: query - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ServiceKeyVersion' - required: true - responses: - '302': - description: Opens osparc and starts viewer for selected data - /study/{study_id}: - get: - tags: - - nih-sparc - summary: Get Redirection To Study Page - description: Opens a study published in osparc - operationId: get_redirection_to_study_page - parameters: - - required: true - schema: - title: Study Id - type: string - format: uuid - name: study_id - in: path - responses: - '302': - description: Opens osparc and opens a copy of publised study -components: - schemas: - Envelope_list_simcore_service_webserver.studies_dispatcher.handlers_rest.ServiceGet__: - title: Envelope[list[simcore_service_webserver.studies_dispatcher.handlers_rest.ServiceGet]] - type: object - properties: - data: - title: Data - type: array - items: - $ref: '#/components/schemas/ServiceGet' - error: - title: Error - Envelope_list_simcore_service_webserver.studies_dispatcher.handlers_rest.Viewer__: - title: Envelope[list[simcore_service_webserver.studies_dispatcher.handlers_rest.Viewer]] - type: object - properties: - data: - title: Data - type: array - items: - $ref: '#/components/schemas/Viewer' - error: - title: Error - ServiceGet: - title: ServiceGet - required: - - key - - title - - description - - thumbnail - - view_url - type: object - properties: - key: - title: Key - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - type: string - description: Service key ID - title: - title: Title - type: string - description: Service name for display - description: - title: Description - type: string - description: Long description of the service - thumbnail: - title: Thumbnail - maxLength: 2083 - minLength: 1 - type: string - description: Url to service thumbnail - format: uri - file_extensions: - title: File Extensions - type: array - items: - type: string - description: File extensions that this service can process - view_url: - title: View Url - maxLength: 2083 - minLength: 1 - type: string - description: Redirection to open a service in osparc (see /view) - format: uri - example: - key: simcore/services/dynamic/sim4life - title: Sim4Life Mattermost - description: It is also sim4life for the web - thumbnail: https://via.placeholder.com/170x120.png - file_extensions: - - smash - - h5 - view_url: https://host.com/view?viewer_key=simcore/services/dynamic/raw-graphs&viewer_version=1.2.3 - ServiceKeyVersion: - title: ServiceKeyVersion - required: - - key - - version - type: object - properties: - key: - title: Key - pattern: ^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$ - type: string - description: distinctive name for the node based on the docker registry - path - version: - title: Version - pattern: ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - type: string - description: service version number - description: This pair uniquely identifies a services - Viewer: - title: Viewer - required: - - title - - file_type - - view_url - type: object - properties: - title: - title: Title - type: string - description: Short formatted label with name and version of the viewer - file_type: - title: File Type - type: string - description: Identifier for the file type - view_url: - title: View Url - maxLength: 2083 - minLength: 1 - type: string - description: Base url to execute viewer. Needs appending file_size,[file_name] - and download_link as query parameters - format: uri - description: "API model for a viewer resource\n\nA viewer is a service with\ - \ an associated filetype.\nYou can think of it as a tuple (filetype, service)\n\ - \nThe service could consume other filetypes BUT at this\ninterface this is\ - \ represented in yet another viewer resource\n\nFor instance, the same service\ - \ can be in two different viewer resources\n - viewer1=(JPEG, RawGraph service)\n\ - \ - viewer2=(CSV, RawGraph service)\n\nA viewer can be dispatched using the\ - \ view_url and appending the" diff --git a/api/specs/webserver/openapi-node-v0.0.1.yaml b/api/specs/webserver/openapi-node-v0.0.1.yaml deleted file mode 100644 index f8fdfd18526..00000000000 --- a/api/specs/webserver/openapi-node-v0.0.1.yaml +++ /dev/null @@ -1,110 +0,0 @@ -paths: - /nodes/{nodeInstanceUUID}/outputUi/{outputKey}: - get: - tags: - - node - description: - get a json description of the ui for presenting the output within the mainUi - and a list of open api json schema objects describing the possible - json payloads and responses for the api calls available at this endpoint - operationId: get_node_output_ui - parameters: - - $ref: "#/components/parameters/nodeInstanceUUID" - - $ref: "#/components/parameters/outputKey" - responses: - "200": - description: Service Information - content: - application/json: - schema: - $ref: "#/components/schemas/outputUi" - default: - description: Unexpected error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorEnveloped" - - /nodes/{nodeInstanceUUID}/outputUi/{outputKey}/{apiCall}: - post: - tags: - - node - summary: send data back to the output api ... protocol depends on the definition - operationId: send_to_node_output_api - parameters: - - $ref: "#/components/parameters/nodeInstanceUUID" - - $ref: "#/components/parameters/outputKey" - - $ref: "#/components/parameters/apiCall" - requestBody: - content: - application/json: - schema: - oneOf: - - $ref: "../common/schemas/node-output-list-api-v0.0.1.yaml#/properties/getItemList/properties/request" - - $ref: "../common/schemas/node-output-list-api-v0.0.1.yaml#/properties/getItem/properties/request" - - $ref: "../common/schemas/node-output-tree-api-v0.0.1.yaml#/properties/getItemList/properties/request" - - $ref: "../common/schemas/node-output-tree-api-v0.0.1.yaml#/properties/getItem/properties/request" - responses: - default: - description: node type specific api call according to the node type presented - content: - application/json: - schema: - oneOf: - - $ref: "../common/schemas/node-output-list-api-v0.0.1.yaml#/properties/getItemList/properties/response" - - $ref: "../common/schemas/node-output-list-api-v0.0.1.yaml#/properties/getItem/properties/response" - - $ref: "../common/schemas/node-output-tree-api-v0.0.1.yaml#/properties/getItemList/properties/response" - - $ref: "../common/schemas/node-output-tree-api-v0.0.1.yaml#/properties/getItem/properties/response" - - /nodes/{nodeInstanceUUID}/iframe: - get: - tags: - - node - summary: entry point for iframe interaction with the node. This relies on the reverse proxy code. - operationId: get_node_output_iframe - parameters: - - $ref: "#/components/parameters/nodeInstanceUUID" - responses: - default: - description: "any response appropriate in the iframe context" - -components: - parameters: - nodeInstanceUUID: - in: path - name: nodeInstanceUUID - required: true - schema: - type: string - outputKey: - in: path - name: outputKey - required: true - schema: - type: string - apiCall: - in: path - name: apiCall - required: true - schema: - type: string - schemas: - outputUi: - type: object - properties: - plugin: - type: string - config: - type: object - outputApiCall: - type: object - properties: - call: - type: string - request: - type: object - outputApiResponse: - type: object - - ErrorEnveloped: - $ref: "../common/schemas/error.yaml#/components/schemas/ErrorEnveloped" diff --git a/api/specs/webserver/openapi-projects-ports.yaml b/api/specs/webserver/openapi-projects-ports.yaml deleted file mode 100644 index 5fe002b554f..00000000000 --- a/api/specs/webserver/openapi-projects-ports.yaml +++ /dev/null @@ -1,211 +0,0 @@ -paths: - /projects/{project_id}/inputs: - get: - tags: - - project - summary: Get Project Inputs - description: New in version *0.10* - operationId: get_project_inputs - parameters: - - required: true - schema: - title: Project Id - type: string - format: uuid - name: project_id - in: path - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_dict_uuid.UUID__simcore_service_webserver.projects.projects_ports_handlers.ProjectInputGet__' - patch: - tags: - - project - summary: Update Project Inputs - description: New in version *0.10* - operationId: update_project_inputs - parameters: - - required: true - schema: - title: Project Id - type: string - format: uuid - name: project_id - in: path - requestBody: - content: - application/json: - schema: - title: Updates - type: array - items: - $ref: '#/components/schemas/ProjectInputUpdate' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_dict_uuid.UUID__simcore_service_webserver.projects.projects_ports_handlers.ProjectInputGet__' - /projects/{project_id}/outputs: - get: - tags: - - project - summary: Get Project Outputs - description: New in version *0.10* - operationId: get_project_outputs - parameters: - - required: true - schema: - title: Project Id - type: string - format: uuid - name: project_id - in: path - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_dict_uuid.UUID__simcore_service_webserver.projects.projects_ports_handlers.ProjectOutputGet__' - /projects/{project_id}/metadata/ports: - get: - tags: - - project - summary: List Project Metadata Ports - description: New in version *0.12* - operationId: list_project_metadata_ports - parameters: - - required: true - schema: - title: Project Id - type: string - format: uuid - name: project_id - in: path - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_simcore_service_webserver.projects.projects_ports_handlers.ProjectMetadataPortGet__' -components: - schemas: - Envelope_dict_uuid.UUID__simcore_service_webserver.projects.projects_ports_handlers.ProjectInputGet__: - title: Envelope[dict[uuid.UUID, simcore_service_webserver.projects.projects_ports_handlers.ProjectInputGet]] - type: object - properties: - data: - title: Data - type: object - additionalProperties: - $ref: '#/components/schemas/ProjectInputGet' - error: - title: Error - Envelope_dict_uuid.UUID__simcore_service_webserver.projects.projects_ports_handlers.ProjectOutputGet__: - title: Envelope[dict[uuid.UUID, simcore_service_webserver.projects.projects_ports_handlers.ProjectOutputGet]] - type: object - properties: - data: - title: Data - type: object - additionalProperties: - $ref: '#/components/schemas/ProjectOutputGet' - error: - title: Error - Envelope_list_simcore_service_webserver.projects.projects_ports_handlers.ProjectMetadataPortGet__: - title: Envelope[list[simcore_service_webserver.projects.projects_ports_handlers.ProjectMetadataPortGet]] - type: object - properties: - data: - title: Data - type: array - items: - $ref: '#/components/schemas/ProjectMetadataPortGet' - error: - title: Error - ProjectInputGet: - title: ProjectInputGet - required: - - key - - value - - label - type: object - properties: - key: - title: Key - type: string - description: Project port's unique identifer. Same as the UUID of the associated - port node - format: uuid - value: - title: Value - description: Value assigned to this i/o port - label: - title: Label - type: string - ProjectInputUpdate: - title: ProjectInputUpdate - required: - - key - - value - type: object - properties: - key: - title: Key - type: string - description: Project port's unique identifer. Same as the UUID of the associated - port node - format: uuid - value: - title: Value - description: Value assigned to this i/o port - ProjectMetadataPortGet: - title: ProjectMetadataPortGet - required: - - key - - kind - type: object - properties: - key: - title: Key - type: string - description: Project port's unique identifer. Same as the UUID of the associated - port node - format: uuid - kind: - title: Kind - enum: - - input - - output - type: string - content_schema: - title: Content Schema - type: object - description: jsonschema for the port's value. SEE https://json-schema.org/understanding-json-schema/ - ProjectOutputGet: - title: ProjectOutputGet - required: - - key - - value - - label - type: object - properties: - key: - title: Key - type: string - description: Project port's unique identifer. Same as the UUID of the associated - port node - format: uuid - value: - title: Value - description: Value assigned to this i/o port - label: - title: Label - type: string diff --git a/api/specs/webserver/openapi-projects.yaml b/api/specs/webserver/openapi-projects.yaml deleted file mode 100644 index b54b5b8c8ba..00000000000 --- a/api/specs/webserver/openapi-projects.yaml +++ /dev/null @@ -1,710 +0,0 @@ -paths: - /projects: - get: - tags: - - project - summary: List all projects - operationId: list_projects - parameters: - - name: type - in: query - schema: - type: string - default: "all" - enum: [template, user, all] - - name: show_hidden - in: query - schema: - type: boolean - default: false - description: includes projects marked as hidden in the listing - - name: offset - in: query - schema: - type: integer - minimum: 0 - default: 0 - required: false - description: index to the first item to return - - name: limit - in: query - schema: - type: integer - default: 20 - minimum: 1 - maximum: 50 - required: false - description: maximum number of items to return - responses: - "200": - description: list of projects - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectArrayEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - tags: - - project - summary: Create new project - operationId: create_projects - parameters: - - name: from_study - in: query - schema: - type: string - description: "Option to create a project from existing template or study: from_study={study_uuid}" - - name: as_template - in: query - schema: - type: boolean - default: false - description: "Option to create a template from existing project: as_template=true" - - name: copy_data - in: query - schema: - type: boolean - default: True - description: "Option to copy data when creating from an existing template or as a template, defaults to True" - - name: hidden - in: query - schema: - type: boolean - default: false - description: Enables/disables hidden flag. Hidden projects are by default unlisted. - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectIn" - responses: - "202": - description: project created - content: - application/json: - schema: - $ref: "../common/schemas/task.yaml#/TaskEnveloped" - links: - CreationStatus: - operationId: get_task_status - parameters: - task_id: $response.body#/data/task_id - CreationResult: - operationId: get_task_result - description: Returns 201 if creation succeeded - parameters: - task_id: $response.body#/data/task_id - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/active: - get: - tags: - - project - summary: Gets active project - operationId: get_active_project - responses: - "200": - description: returns active project - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - get: - tags: - - project - summary: Gets given project - operationId: get_project - responses: - "200": - description: got detailed project - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - put: - tags: - - project - summary: Enclosed entity replaces given project - operationId: replace_project - parameters: - - name: run - in: query - required: false - schema: - type: boolean - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectIn" - responses: - "200": - description: got detailed project - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - tags: - - project - summary: Delete given project - operationId: delete_project - responses: - "204": - description: project has been successfully deleted - - /projects/{project_id}/open: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - post: - tags: - - project - summary: Open a given project - operationId: open_project - requestBody: - description: browser tab identifier - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ClientSessionId" - responses: - "200": - description: project successfuly opened - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/state: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - get: - tags: - - project - summary: returns the state of a project - operationId: get_project_state - responses: - "200": - description: returns the project current state - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectStateEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/xport: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - post: - tags: - - exporter - summary: creates an archive of the project and downloads it - operationId: export_project - responses: - "200": - description: creates an archive from a project file - content: - application/zip: - schema: - type: string - format: binary - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/duplicate: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - post: - tags: - - exporter - summary: duplicates an existing project - operationId: duplicate_project - responses: - "200": - description: project was duplicated correctly - content: - application/json: - schema: - type: object - properties: - uuid: - type: string - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/import: - post: - tags: - - exporter - summary: Create new project from an archive - operationId: import_project - requestBody: - content: - multipart/form-data: - schema: - type: object - properties: - fileName: - type: string - format: binary - responses: - "200": - description: creates a new project from an archive - content: - application/json: - schema: - type: object - properties: - uuid: - type: string - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/close: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - post: - tags: - - project - summary: Closes a given project - operationId: close_project - requestBody: - description: browser tab identifier - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ClientSessionId" - responses: - "204": - description: project succesfuly closed - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - post: - tags: - - project - summary: Create a new node - operationId: create_node - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - service_id: - type: string - description: the uuid to assign to the service - service_key: - type: string - pattern: '^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$' - description: The key (url) of the service - service_version: - type: string - pattern: >- - ^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$ - description: The tag/version of the service - required: - - service_key - - service_version - example: - service_key: simcore/services/dynamic/3d-viewer - service_version: "1.4.0" - - responses: - "201": - description: created - content: - application/json: - schema: - $ref: "./openapi-projects.yaml#/components/schemas/NodeEnveloped" - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - - get: - tags: - - project - description: Gets node status - operationId: get_node - responses: - "200": - description: OK service exists and runs. Returns node details. - content: - application/json: - schema: - $ref: "#/components/schemas/RunningServiceEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - delete: - tags: - - project - description: Stops and removes a node from the project - operationId: delete_node - responses: - "204": - description: node has been successfully deleted from project - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}/retrieve: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - - post: - tags: - - project - description: Triggers service retrieve - operationId: retrieve_node - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - port_keys: - description: list of por keys to be retrieved - type: array - items: - type: string - responses: - "200": - description: Returns the amount of transferred bytes when pulling data via nodeports - content: - application/json: - schema: - type: object - properties: - data: - type: object - description: response payload - properties: - size_bytes: - type: integer - description: amount of transferred bytes - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}/start: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - - post: - tags: - - project - description: Starts a project dynamic service - operationId: start_node - responses: - "204": - description: started service (needs to be long running though) - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}/stop: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - - post: - tags: - - project - description: Stops a project node - operationId: stop_node - responses: - "204": - description: stopped service - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}/restart: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - - post: - tags: - - project - description: Restarts containers started by the dynamic-sidecar - operationId: restart_node - - responses: - "204": - description: Restarts containers started by the dynamic-sidecar - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{project_id}/nodes/{node_id}/resources: - parameters: - - name: project_id - in: path - required: true - schema: - type: string - - name: node_id - in: path - required: true - schema: - type: string - get: - tags: - - project - description: Returns the node resources - operationId: get_node_resources - responses: - "200": - description: Returns the node resources. - content: - application/json: - schema: - $ref: "#/components/schemas/NodeResourcesEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - put: - tags: - - project - description: Replaces the node resources - operationId: replace_node_resources - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/NodeResources" - responses: - "200": - description: Returns the udpated node resources. - content: - application/json: - schema: - $ref: "#/components/schemas/NodeResourcesEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /projects/{study_uuid}/tags/{tag_id}: - parameters: - - name: tag_id - in: path - required: true - schema: - type: integer - - name: study_uuid - in: path - required: true - schema: - type: string - put: - tags: - - project - summary: Links an existing label with an existing study - operationId: add_tag - responses: - "200": - description: The tag has been successfully linked to the study - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - tags: - - project - summary: Removes an existing link between a label and a study - operationId: remove_tag - responses: - "200": - description: The tag has been successfully removed from the study - content: - application/json: - schema: - $ref: "#/components/schemas/ProjectEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - -components: - schemas: - ClientSessionId: - type: string - example: 5ac57685-c40f-448f-8711-70be1936fd63 - - Node: - type: object - required: - - node_id - properties: - node_id: - type: string - description: The UUID attached to this node - example: 123e4567-e89b-12d3-a456-426655440000 - - NodeEnveloped: - type: object - required: - - data - properties: - data: - $ref: "./openapi-projects.yaml#/components/schemas/Node" - error: - nullable: true - default: null - - ProjectIn: - $ref: "../common/schemas/project.yaml#/components/schemas/ProjectIn" - - ProjectEnveloped: - $ref: "../common/schemas/project.yaml#/components/schemas/ProjectEnveloped" - - ProjectArrayEnveloped: - $ref: "../common/schemas/project.yaml#/components/schemas/ProjectArrayEnveloped" - - ProjectStateEnveloped: - $ref: "../common/schemas/project.yaml#/components/schemas/ProjectStateEnveloped" - - RunningServiceEnveloped: - $ref: "../common/schemas/running_service.yaml#/components/schemas/RunningServiceEnveloped" - - NodeResources: - $ref: "./components/schemas/node_resources.yaml#/components/schemas/NodeResources" - - NodeResourcesEnveloped: - $ref: "./components/schemas/node_resources.yaml#/components/schemas/NodeResourcesEnveloped" - - HTTPValidationError: - title: HTTPValidationError - type: object - properties: - detail: - title: Detail - type: array - items: - $ref: "#/components/schemas/ValidationError" - - ValidationError: - title: ValidationError - required: - - loc - - msg - - type - type: object - properties: - loc: - title: Location - type: array - items: - type: string - msg: - title: Message - type: string - type: - title: Error Type - type: string - - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-publications.yaml b/api/specs/webserver/openapi-publications.yaml deleted file mode 100644 index 43581c7331a..00000000000 --- a/api/specs/webserver/openapi-publications.yaml +++ /dev/null @@ -1,26 +0,0 @@ -paths: - /publications/service-submission: - post: - tags: - - publication - summary: Submits a new service candidate - operationId: service_submission - requestBody: - content: - multipart/form-data: - schema: - type: object - required: - - metadata - properties: - metadata: - type: string - format: binary - attachment: - type: string - format: binary - responses: - '204': - description: Submission has been registered - default: - $ref: './openapi.yaml#/components/responses/DefaultErrorResponse' diff --git a/api/specs/webserver/openapi-storage.yaml b/api/specs/webserver/openapi-storage.yaml deleted file mode 100644 index efa84a9d773..00000000000 --- a/api/specs/webserver/openapi-storage.yaml +++ /dev/null @@ -1,597 +0,0 @@ -paths: - /storage/locations: - get: - summary: Get available storage locations - tags: - - storage - operationId: get_storage_locations - responses: - "200": - description: "List of availabe storage locations" - content: - application/json: - schema: - $ref: "#/components/schemas/FileLocationArray" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}:sync: - post: - summary: Manually triggers the synchronisation of the file meta data table in the database - tags: - - storage - operationId: synchronise_meta_data_table - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: dry_run - in: query - required: false - schema: - type: boolean - default: true - - name: fire_and_forget - in: query - required: false - schema: - type: boolean - default: false - responses: - "200": - description: An object containing added, changed and removed paths - content: - application/json: - schema: - $ref: "#/components/schemas/TableSynchronisationEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/datasets: - get: - summary: Get datasets metadata - tags: - - storage - operationId: get_datasets_metadata - parameters: - - name: location_id - in: path - required: true - schema: - type: string - responses: - "200": - description: "list of dataset meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/DatasetMetaDataArray" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/files/metadata: - get: - summary: Get list of file meta data - tags: - - storage - operationId: get_files_metadata - parameters: - - name: location_id - in: path - required: true - schema: - type: string - responses: - "200": - description: "list of file meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaDataArray" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/datasets/{dataset_id}/metadata: - get: - summary: Get Files Metadata - tags: - - storage - operationId: get_files_metadata_dataset - parameters: - - name: location_id - in: path - required: true - schema: - type: string - - name: dataset_id - in: path - required: true - schema: - type: string - responses: - "200": - description: "list of file meta-datas" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaDataArray" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/files/{file_id}: - get: - summary: Returns download link for requested file - tags: - - storage - operationId: download_file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - responses: - "200": - $ref: "#/components/responses/PresignedLink_200" - put: - summary: Returns upload link - tags: - - storage - operationId: upload_file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - - name: file_size - in: query - required: false - schema: - type: integer - format: int64 - minimum: 0 - responses: - "200": - description: Return upload object - content: - application/json: - schema: - $ref: "#/components/schemas/FileUploadEnveloped" - links: - CompleteUpload: - operationId: complete_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - query.user_id: "$request.query.user_id" - AbortUpload: - operationId: abort_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - query.user_id: "$request.query.user_id" - delete: - summary: Deletes File - tags: - - storage - operationId: delete_file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - responses: - "204": - description: "" - - /storage/locations/{location_id}/files/{file_id}:abort: - post: - summary: Asks the server to abort the upload and revert to the last valid version if any - operationId: abort_upload_file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - responses: - "204": - description: Abort OK - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/files/{file_id}:complete: - post: - summary: Asks the server to complete the upload - operationId: complete_upload_file - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - type: object - required: - - parts - properties: - parts: - type: array - items: - type: object - required: - - number - - e_tag - properties: - number: - type: integer - minimum: 1 - e_tag: - type: string - responses: - "202": - description: Completion of upload is accepted - content: - application/json: - schema: - $ref: "#/components/schemas/FileUploadCompleteEnveloped" - links: - CompleteUploadStatus: - operationId: is_completed_upload_file - parameters: - path.location_id: "$request.path.location_id" - path.file_id: "$request.path.file_id" - path.future_id: "$response.body.data.links.state" - query.user_id: "$request.query.user_id" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/files/{file_id}:complete/futures/{future_id}: - post: - summary: Check for upload completion - operationId: is_completed_upload_file - parameters: - - name: future_id - in: path - required: true - schema: - type: string - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - responses: - "200": - description: returns state of upload completion - content: - application/json: - schema: - $ref: "#/components/schemas/FileUploadCompleteFutureEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /storage/locations/{location_id}/files/{file_id}/metadata: - get: - summary: Get File Metadata - tags: - - storage - operationId: get_file_metadata - parameters: - - name: file_id - in: path - required: true - schema: - type: string - - name: location_id - in: path - required: true - schema: - type: string - responses: - "200": - $ref: "#/components/responses/FileMetaData_200" - -components: - requestBodies: - FileMetaDataBody: - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaData" - - responses: - FileMetaData_200: - description: "Returns file metadata" - content: - application/json: - schema: - $ref: "#/components/schemas/FileMetaData" - - PresignedLink_200: - description: "Returns presigned link" - content: - application/json: - schema: - $ref: "#/components/schemas/PresignedLink" - - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - - schemas: - FileLocationEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/FileLocation" - error: - nullable: true - default: null - - FileLocation: - type: object - properties: - name: - type: string - id: - type: number - example: - filename: "simcore.s3" - id: 0 - - FileLocationArray: - type: array - items: - $ref: "#/components/schemas/FileLocation" - - TableSynchronisationEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/TableSynchronisation" - error: - nullable: true - default: null - - TableSynchronisation: - type: object - required: - - removed - properties: - dry_run: - type: boolean - fire_and_forget: - type: boolean - removed: - type: array - items: - type: string - - FileUploadEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadSchema" - error: - nullable: true - default: null - - FileUploadSchema: - type: object - required: - - chunk_size - - urls - - links - properties: - chunk_size: - type: integer - format: int64 - minimum: 0 - urls: - type: array - items: - type: string - links: - type: object - required: - - abort_upload - - complete_upload - properties: - abort_upload: - type: string - complete_upload: - type: string - - FileUploadCompleteEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadComplete" - error: - nullable: true - default: null - - FileUploadComplete: - type: object - required: - - links - properties: - links: - type: object - required: - - state - properties: - state: - type: string - - FileUploadCompleteFutureEnveloped: - type: object - required: - - data - - error - properties: - data: - $ref: "#/components/schemas/FileUploadCompleteFuture" - error: - nullable: true - default: null - - FileUploadCompleteFuture: - type: object - required: - - state - properties: - state: - type: string - enum: - - ok - - nok - e_tag: - type: string - nullable: true - - DatasetMetaEnvelope: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/DatasetMetaData" - error: - nullable: true - default: null - - DatasetMetaData: - type: object - properties: - dataset_id: - type: string - display_name: - type: string - example: - dataset_uuid: "N:id-aaaa" - display_name: "simcore-testing" - - DatasetMetaDataArray: - type: array - items: - $ref: "#/components/schemas/DatasetMetaData" - - FileMetaEnvelope: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/FileMetaData" - error: - nullable: true - default: null - - # TODO: Rename with suffix *Type - FileMetaData: - type: object - properties: - file_uuid: - type: string - location_id: - type: string - project_name: - type: string - node_name: - type: string - file_name: - type: string - file_id: - type: string - created_at: - type: string - last_modified: - type: string - file_size: - type: integer - entity_tag: - type: string - example: - file_uuid: "simcore-testing/105/1000/3" - location_id: "0" - project_name: "futurology" - node_name: "alpha" - file_name: "example.txt" - file_id: "N:package:e263da07-2d89-45a6-8b0f-61061b913873" - created_at: "2019-06-19T12:29:03.308611Z" - last_modified: "2019-06-19T12:29:03.78852Z" - file_size: 73 - entity_tag: a87ff679a2f3e71d9181a67b7542122c - - FileMetaDataArray: - type: array - items: - $ref: "#/components/schemas/FileMetaData" - - PresignedLinkEnveloped: - type: object - required: - - data - properties: - data: - $ref: "#/components/schemas/PresignedLink" - error: - nullable: true - default: null - - PresignedLink: - type: object - properties: - link: - type: string - example: - link: "example_link" diff --git a/api/specs/webserver/openapi-tags.yaml b/api/specs/webserver/openapi-tags.yaml deleted file mode 100644 index 888890b31fc..00000000000 --- a/api/specs/webserver/openapi-tags.yaml +++ /dev/null @@ -1,169 +0,0 @@ -paths: - /tags: - get: - tags: - - tag - summary: List Tags - operationId: list_tags - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_list_simcore_service_webserver.tags_handlers.TagGet__' - post: - tags: - - tag - summary: Create Tag - operationId: create_tag - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/TagCreate' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_TagGet_' - /tags/{tag_id}: - delete: - tags: - - tag - summary: Delete Tag - operationId: delete_tag - parameters: - - required: true - schema: - title: Tag Id - type: integer - name: tag_id - in: path - responses: - '204': - description: Successful Response - patch: - tags: - - tag - summary: Update Tag - operationId: update_tag - parameters: - - required: true - schema: - title: Tag Id - type: integer - name: tag_id - in: path - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/TagUpdate' - required: true - responses: - '200': - description: Successful Response - content: - application/json: - schema: - $ref: '#/components/schemas/Envelope_TagGet_' -components: - schemas: - Envelope_TagGet_: - title: Envelope[TagGet] - type: object - properties: - data: - $ref: '#/components/schemas/TagGet' - error: - title: Error - Envelope_list_simcore_service_webserver.tags_handlers.TagGet__: - title: Envelope[list[simcore_service_webserver.tags_handlers.TagGet]] - type: object - properties: - data: - title: Data - type: array - items: - $ref: '#/components/schemas/TagGet' - error: - title: Error - TagAccessRights: - title: TagAccessRights - required: - - read - - write - - delete - type: object - properties: - read: - title: Read - type: boolean - write: - title: Write - type: boolean - delete: - title: Delete - type: boolean - TagCreate: - title: TagCreate - required: - - name - - color - type: object - properties: - name: - title: Name - type: string - description: - title: Description - type: string - color: - title: Color - pattern: ^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$ - type: string - additionalProperties: false - TagGet: - title: TagGet - required: - - id - - name - - color - - accessRights - type: object - properties: - id: - title: Id - exclusiveMinimum: true - type: integer - minimum: 0 - name: - title: Name - type: string - description: - title: Description - type: string - color: - title: Color - type: string - accessRights: - $ref: '#/components/schemas/TagAccessRights' - TagUpdate: - title: TagUpdate - type: object - properties: - name: - title: Name - type: string - description: - title: Description - type: string - color: - title: Color - pattern: ^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$ - type: string - additionalProperties: false diff --git a/api/specs/webserver/openapi-tasks.yaml b/api/specs/webserver/openapi-tasks.yaml deleted file mode 100644 index e0c205899c3..00000000000 --- a/api/specs/webserver/openapi-tasks.yaml +++ /dev/null @@ -1,68 +0,0 @@ -paths: - /tasks: - get: - operationId: list_tasks - tags: - - tasks - responses: - "200": - description: Returns the list of active tasks (running and/or done) - content: - application/json: - schema: - type: array - items: - $ref: "../common/schemas/task.yaml#/TaskEnveloped" - - /tasks/{task_id}: - parameters: - - name: task_id - in: path - required: true - schema: - type: string - get: - operationId: get_task_status - tags: - - tasks - responses: - "200": - description: Returns the task status - content: - application/json: - schema: - $ref: "../common/schemas/task.yaml#/TaskStatusEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - delete: - operationId: cancel_and_delete_task - description: Aborts and remove the task - tags: - - tasks - responses: - "204": - description: Task was successfully aborted - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /tasks/{task_id}/result: - parameters: - - name: task_id - in: path - required: true - schema: - type: string - get: - operationId: get_task_result - tags: - - tasks - responses: - "2XX": - description: Retrieve the task result and returns directly its HTTP code - default: - $ref: "#/components/responses/DefaultErrorResponse" - -components: - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-user.yaml b/api/specs/webserver/openapi-user.yaml deleted file mode 100644 index 553a340a0eb..00000000000 --- a/api/specs/webserver/openapi-user.yaml +++ /dev/null @@ -1,251 +0,0 @@ -paths: - /me: - get: - operationId: get_my_profile - tags: - - user - responses: - "200": - description: current user profile - content: - application/json: - schema: - $ref: "./components/schemas/me.yaml#/ProfileEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - put: - operationId: update_my_profile - tags: - - user - requestBody: - content: - application/json: - schema: - $ref: "./components/schemas/me.yaml#/ProfileUpdate" - responses: - "204": - description: updated profile - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /me/tokens: - get: - summary: List tokens - operationId: list_tokens - tags: - - user - responses: - "200": - description: list of tokens - content: - application/json: - schema: - $ref: "./components/schemas/me.yaml#/TokensArrayEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - summary: Create tokens - operationId: create_tokens - tags: - - user - requestBody: - content: - application/json: - schema: - # FIXME: this body should NOT be enveloped! - $ref: "./components/schemas/me.yaml#/TokenEnveloped" - responses: - "201": - description: token created - content: - application/json: - schema: - $ref: "./components/schemas/me.yaml#/TokenEnveloped" - - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /me/tokens/{service}: - parameters: - - name: service - in: path - required: true - schema: - type: string - get: - summary: Gets specific token - operationId: get_token - tags: - - user - responses: - "200": - description: got detailed token - content: - application/json: - schema: - $ref: "./components/schemas/me.yaml#/TokenEnveloped" - put: - summary: Updates token - operationId: update_token - tags: - - user - responses: - "204": - description: token has been successfully updated - delete: - summary: Delete token - operationId: delete_token - tags: - - user - responses: - "204": - description: token has been successfully deleted - - /me/notifications: - get: - tags: - - user - summary: List of Notifications for the specific user - operationId: get_user_notifications - responses: - "200": - description: List of Notifications - content: - application/json: - schema: - $ref: "#/components/schemas/NotificationsOutListEnveloped" - default: - $ref: "#/components/responses/DefaultErrorResponse" - post: - tags: - - user - summary: Submit a new Notification - operationId: post_user_notification - requestBody: - required: true - description: the new notification - content: - application/json: - schema: - $ref: "#/components/schemas/NotificationIn" - responses: - "204": - description: Notification registered - default: - $ref: "#/components/responses/DefaultErrorResponse" - - /me/notifications/{id}: - parameters: - - name: id - in: path - required: true - schema: - type: string - patch: - tags: - - user - summary: Update a Notification - operationId: update_user_notification - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/NotificationUpdate" - responses: - "204": - description: All good - default: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" - -components: - schemas: - NotificationIn: - type: object - required: - - user_id - - category - - actionable_path - - title - - text - - date - properties: - user_id: - type: string - description: >- - the user that will receive the notification - example: "123" - category: - type: string - enum: - - NEW_ORGANIZATION - - STUDY_SHARED - - TEMPLATE_SHARED - description: >- - notification type, the frontend will use this to decorate the notification - example: new_organization - actionable_path: - type: string - description: >- - the frontend will use this information to trigger an action when the user click on it - example: organization/123 - title: - type: string - description: >- - the notification title to show in the frontend - example: New Organization - text: - type: string - description: >- - the notification text to show in the frontend - example: You are now part of Dummy Organization! - date: - type: string - format: date-time - description: >- - when it was created - - NotificationOut: - # extend NotificationIn - allOf: - - $ref: "#/components/schemas/NotificationIn" - - type: "object" - - required: - - id - - read - properties: - id: - type: string - description: >- - notification id - example: "123" - read: - type: boolean - description: >- - wether the notification has been read - - NotificationsOutListEnveloped: - type: object - required: - - data - properties: - data: - type: array - items: - $ref: "#/components/schemas/NotificationOut" - error: - nullable: true - default: null - - NotificationUpdate: - type: object - required: - - read - properties: - read: - type: boolean - description: >- - notification has been read - - responses: - DefaultErrorResponse: - $ref: "./openapi.yaml#/components/responses/DefaultErrorResponse" diff --git a/api/specs/webserver/openapi-version-control.yaml b/api/specs/webserver/openapi-version-control.yaml deleted file mode 100644 index c1eb12ffa94..00000000000 --- a/api/specs/webserver/openapi-version-control.yaml +++ /dev/null @@ -1,605 +0,0 @@ -paths: - /repos/projects: - get: - tags: - - repository - summary: List Repos - description: List info about versioned projects - operationId: simcore_service_webserver.version_control_handlers._list_repos_handler - parameters: - - description: index to the first item to return (pagination) - required: false - schema: - title: Offset - exclusiveMinimum: false - type: integer - description: index to the first item to return (pagination) - default: 0 - name: offset - in: query - - description: maximum number of items to return (pagination) - required: false - schema: - title: Limit - maximum: 50 - minimum: 1 - type: integer - description: maximum number of items to return (pagination) - default: 20 - name: limit - in: query - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Page_Repo_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - /repos/projects/{project_uuid}/checkpoints: - get: - tags: - - repository - summary: List Checkpoints - description: Lists commits&tags tree of the project - operationId: simcore_service_webserver.version_control_handlers._list_checkpoints_handler - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - - description: index to the first item to return (pagination) - required: false - schema: - title: Offset - exclusiveMinimum: false - type: integer - description: index to the first item to return (pagination) - default: 0 - name: offset - in: query - - description: maximum number of items to return (pagination) - required: false - schema: - title: Limit - maximum: 50 - minimum: 1 - type: integer - description: maximum number of items to return (pagination) - default: 20 - name: limit - in: query - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Page_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - post: - tags: - - repository - summary: Create Checkpoint - operationId: simcore_service_webserver.version_control_handlers._create_checkpoint_handler - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/CheckpointNew" - required: true - responses: - "201": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - /repos/projects/{project_uuid}/checkpoints/HEAD: - get: - tags: - - repository - summary: Gets HEAD (i.e. current) checkpoint - description: Get current commit - operationId: simcore_service_webserver.version_control_handlers._get_checkpoint_handler_head - parameters: - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - /repos/projects/{project_uuid}/checkpoints/{ref_id}: - get: - tags: - - repository - summary: Get Checkpoint - description: Set ref_id=HEAD to return current commit - operationId: simcore_service_webserver.version_control_handlers._get_checkpoint_handler - parameters: - - description: A repository ref (commit, tag or branch) - required: true - schema: - title: Ref Id - anyOf: - - type: string - format: uuid - - type: string - description: A repository ref (commit, tag or branch) - name: ref_id - in: path - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - patch: - tags: - - repository - summary: Update Checkpoint Annotations - operationId: simcore_service_webserver.version_control_handlers._update_checkpoint_annotations_handler - parameters: - - description: A repository ref (commit, tag or branch) - required: true - schema: - title: Ref Id - anyOf: - - type: string - format: uuid - - type: string - description: A repository ref (commit, tag or branch) - name: ref_id - in: path - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/CheckpointAnnotations" - required: true - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - /repos/projects/{project_uuid}/checkpoints/{ref_id}:checkout: - post: - tags: - - repository - summary: Checkout - description: |- - Affect current working copy of the project, i.e. get_project will now return - the check out - operationId: simcore_service_webserver.version_control_handlers._checkout_handler - parameters: - - description: A repository ref (commit, tag or branch) - required: true - schema: - title: Ref Id - anyOf: - - type: string - format: uuid - - type: string - description: A repository ref (commit, tag or branch) - name: ref_id - in: path - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_Checkpoint_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" - /repos/projects/{project_uuid}/checkpoints/{ref_id}/workbench/view: - get: - tags: - - repository - summary: View Project Workbench - description: Returns a view of the workbench for a given project's version - operationId: simcore_service_webserver.version_control_handlers._view_project_workbench_handler - parameters: - - description: A repository ref (commit, tag or branch) - required: true - schema: - title: Ref Id - anyOf: - - type: string - format: uuid - - type: string - description: A repository ref (commit, tag or branch) - name: ref_id - in: path - - description: Project unique identifier - required: true - schema: - title: Project Uuid - type: string - description: Project unique identifier - format: uuid - name: project_uuid - in: path - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/Envelope_WorkbenchView_" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" -components: - schemas: - Checkpoint: - title: Checkpoint - required: - - id - - checksum - - tag - - message - - parent - - created_at - - url - type: object - properties: - id: - title: Id - exclusiveMinimum: false - type: integer - checksum: - title: Checksum - type: string - tag: - title: Tag - type: string - message: - title: Message - type: string - parent: - title: Parent - exclusiveMinimum: false - type: integer - created_at: - title: Created At - type: string - format: date-time - url: - title: Url - maxLength: 2083 - minLength: 1 - type: string - format: uri - CheckpointAnnotations: - title: CheckpointAnnotations - type: object - properties: - tag: - title: Tag - type: string - message: - title: Message - type: string - CheckpointNew: - title: CheckpointNew - required: - - tag - type: object - properties: - tag: - title: Tag - type: string - message: - title: Message - type: string - Envelope_Checkpoint_: - title: Envelope[Checkpoint] - type: object - properties: - data: - $ref: "#/components/schemas/Checkpoint" - error: - $ref: "#/components/schemas/Error" - Envelope_WorkbenchView_: - title: Envelope[WorkbenchView] - type: object - properties: - data: - $ref: "#/components/schemas/WorkbenchView" - error: - $ref: "#/components/schemas/Error" - Error: - title: Error - required: - - code - - message - type: object - properties: - code: - title: Code - type: integer - message: - title: Message - type: string - HTTPValidationError: - title: HTTPValidationError - type: object - properties: - detail: - title: Detail - type: array - items: - $ref: "#/components/schemas/ValidationError" - Node: - title: Node - required: - - key - - version - - label - - inputs - - outputs - type: object - properties: - key: - title: Key - type: string - version: - title: Version - pattern: \d+\.\d+\.\d+ - type: string - label: - title: Label - type: string - inputs: - title: Inputs - type: object - outputs: - title: Outputs - type: object - PageLinks: - title: PageLinks - required: - - self - - first - - last - type: object - properties: - self: - title: Self - maxLength: 65536 - minLength: 1 - type: string - format: uri - first: - title: First - maxLength: 65536 - minLength: 1 - type: string - format: uri - prev: - title: Prev - maxLength: 65536 - minLength: 1 - type: string - format: uri - next: - title: Next - maxLength: 65536 - minLength: 1 - type: string - format: uri - last: - title: Last - maxLength: 65536 - minLength: 1 - type: string - format: uri - additionalProperties: false - PageMetaInfoLimitOffset: - title: PageMetaInfoLimitOffset - required: - - limit - - total - - count - type: object - properties: - limit: - title: Limit - exclusiveMinimum: false - type: integer - total: - title: Total - minimum: 0 - type: integer - offset: - title: Offset - minimum: 0 - type: integer - default: 0 - count: - title: Count - minimum: 0 - type: integer - additionalProperties: false - Page_Checkpoint_: - title: Page[Checkpoint] - required: - - _meta - - _links - - data - type: object - properties: - _meta: - $ref: "#/components/schemas/PageMetaInfoLimitOffset" - _links: - $ref: "#/components/schemas/PageLinks" - data: - title: Data - type: array - items: - $ref: "#/components/schemas/Checkpoint" - Page_Repo_: - title: Page[Repo] - required: - - _meta - - _links - - data - type: object - properties: - _meta: - $ref: "#/components/schemas/PageMetaInfoLimitOffset" - _links: - $ref: "#/components/schemas/PageLinks" - data: - title: Data - type: array - items: - $ref: "#/components/schemas/Repo" - Repo: - title: Repo - required: - - project_uuid - - url - type: object - properties: - project_uuid: - title: Project Uuid - type: string - format: uuid - url: - title: Url - maxLength: 2083 - minLength: 1 - type: string - format: uri - ValidationError: - title: ValidationError - required: - - loc - - msg - - type - type: object - properties: - loc: - title: Location - type: array - items: - type: string - msg: - title: Message - type: string - type: - title: Error Type - type: string - WorkbenchView: - title: WorkbenchView - type: object - properties: - workbench: - title: Workbench - type: object - additionalProperties: - $ref: "#/components/schemas/Node" - default: {} - ui: - title: Ui - type: object - default: {} - description: A view (i.e. read-only and visual) of the project's workbench diff --git a/api/specs/webserver/openapi.yaml b/api/specs/webserver/openapi.yaml deleted file mode 100644 index 3810698e28d..00000000000 --- a/api/specs/webserver/openapi.yaml +++ /dev/null @@ -1,381 +0,0 @@ -openapi: 3.0.0 -info: - title: "osparc-simcore web API" - version: 0.17.1 - description: "API designed for the front-end app" - contact: - name: IT'IS Foundation - email: support@simcore.io - license: - name: MIT - url: https://github.com/ITISFoundation/osparc-simcore/blob/master/LICENSE -servers: - - description: API server - url: "/v0" - - description: Development server - url: http://{host}:{port}/{basePath} - variables: - host: - default: "localhost" - port: - default: "8001" - basePath: - enum: - - v0 - default: v0 -tags: - - name: activity - - name: admin - - name: authentication - - name: catalog - - name: cluster - - name: configuration - - name: maintenance - - name: nih-sparc - - name: node - - name: project - - name: publication - - name: repository - - name: storage - - name: tag - - name: tasks - - name: user - -paths: - # ADMIN ------------- - /email:test: - $ref: "./openapi-admin.yaml#/paths/~1email:test" - - # DIAGNOSTICS --------------------------------------------------------- - /: - $ref: "./openapi-diagnostics.yaml#/paths/~1" - - /health: - $ref: "./openapi-diagnostics.yaml#/paths/~1health" - - /status: - $ref: "./openapi-diagnostics.yaml#/paths/~1status" - - /status/diagnostics: - $ref: "./openapi-diagnostics.yaml#/paths/~1status~1diagnostics" - - /status/{service_name}: - $ref: "./openapi-diagnostics.yaml#/paths/~1status~1{service_name}" - - /config: - $ref: "./openapi-diagnostics.yaml#/paths/~1config" - - # AUTHENTICATION & AUTHORIZATION -------------------------------------- - - /auth/register/invitations:check: - $ref: "./openapi-auth.yaml#/paths/~1auth~1register~1invitations:check" - - /auth/register: - $ref: "./openapi-auth.yaml#/paths/~1auth~1register" - - /auth/two_factor:resend: - $ref: "./openapi-auth.yaml#/paths/~1auth~1two_factor:resend" - - /auth/verify-phone-number: - $ref: "./openapi-auth.yaml#/paths/~1auth~1verify-phone-number" - - /auth/validate-code-register: - $ref: "./openapi-auth.yaml#/paths/~1auth~1validate-code-register" - - /auth/login: - $ref: "./openapi-auth.yaml#/paths/~1auth~1login" - - /auth/validate-code-login: - $ref: "./openapi-auth.yaml#/paths/~1auth~1validate-code-login" - - /auth/logout: - $ref: "./openapi-auth.yaml#/paths/~1auth~1logout" - - /auth/reset-password: - $ref: "./openapi-auth.yaml#/paths/~1auth~1reset-password" - - /auth/reset-password/{code}: - $ref: "./openapi-auth.yaml#/paths/~1auth~1reset-password~1{code}" - - /auth/change-email: - $ref: "./openapi-auth.yaml#/paths/~1auth~1change-email" - - /auth/change-password: - $ref: "./openapi-auth.yaml#/paths/~1auth~1change-password" - - /auth/confirmation/{code}: - $ref: "./openapi-auth.yaml#/paths/~1auth~1confirmation~1{code}" - - /auth/api-keys: - $ref: "./openapi-auth.yaml#/paths/~1auth~1api-keys" - - # USER SETTINGS ------------------------------------------------------------------ - - /me: - $ref: "./openapi-user.yaml#/paths/~1me" - - /me/tokens: - $ref: "./openapi-user.yaml#/paths/~1me~1tokens" - - /me/tokens/{service}: - $ref: "./openapi-user.yaml#/paths/~1me~1tokens~1{service}" - - /me/notifications: - $ref: "./openapi-user.yaml#/paths/~1me~1notifications" - - /me/notifications/{id}: - $ref: "./openapi-user.yaml#/paths/~1me~1notifications~1{id}" - - # GROUP SETTINGS ------------------------------------------------------------------ - - /groups: - $ref: "./openapi-groups.yaml#/paths/~1groups" - - /groups/{gid}: - $ref: "./openapi-groups.yaml#/paths/~1groups~1{gid}" - - /groups/{gid}/users: - $ref: "./openapi-groups.yaml#/paths/~1groups~1{gid}~1users" - - /groups/{gid}/users/{uid}: - $ref: "./openapi-groups.yaml#/paths/~1groups~1{gid}~1users~1{uid}" - - /groups/{gid}/classifiers: - $ref: "./openapi-groups.yaml#/paths/~1groups~1{gid}~1classifiers" - - /groups/sparc/classifiers/scicrunch-resources/{rrid}: - $ref: "./openapi-groups.yaml#/paths/~1groups~1sparc~1classifiers~1scicrunch-resources~1{rrid}" - - /groups/sparc/classifiers/scicrunch-resources:search: - $ref: "./openapi-groups.yaml#/paths/~1groups~1sparc~1classifiers~1scicrunch-resources:search" - - # DATA STORAGE SERVICES ---------------------------------------------------------- - - /storage/locations: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations" - - /storage/locations/{location_id}:sync: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}:sync" - - /storage/locations/{location_id}/files/metadata: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1metadata" - - /storage/locations/{location_id}/files/{file_id}: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1{file_id}" - - /storage/locations/{location_id}/files/{file_id}:complete: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1{file_id}:complete" - - /storage/locations/{location_id}/files/{file_id}:abort: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1{file_id}:abort" - - /storage/locations/{location_id}/files/{file_id}:complete/futures/{future_id}: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1{file_id}:complete~1futures~1{future_id}" - - /storage/locations/{location_id}/files/{file_id}/metadata: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1files~1{file_id}~1metadata" - - /storage/locations/{location_id}/datasets/{dataset_id}/metadata: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1datasets~1{dataset_id}~1metadata" - - /storage/locations/{location_id}/datasets: - $ref: "./openapi-storage.yaml#/paths/~1storage~1locations~1{location_id}~1datasets" - - # SERVICES ------------------------------------------------------------------------ - /computations/{project_id}: - $ref: "./openapi-computations.yaml#/paths/computations_project_id" - /computations/{project_id}:start: - $ref: "./openapi-computations.yaml#/paths/computations_project_id_start" - /computations/{project_id}:stop: - $ref: "./openapi-computations.yaml#/paths/computations_project_id_stop" - - # PROJECT SERVICES ----------------------------------------------------------------- - - /projects: - $ref: "./openapi-projects.yaml#/paths/~1projects" - - /projects/active: - $ref: "./openapi-projects.yaml#/paths/~1projects~1active" - - /projects/{project_id}: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}" - - /projects/{project_id}:open: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1open" - - /projects/{project_id}/state: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1state" - - /projects/{project_id}:xport: # do not change there "export" will not work - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1xport" - - /projects/{project_id}:duplicate: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1duplicate" - - /projects:import: - $ref: "./openapi-projects.yaml#/paths/~1projects~1import" - - /projects/{project_id}:close: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1close" - - /projects/{project_id}/nodes: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes" - - /projects/{project_id}/nodes/{node_id}: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}" - - /projects/{project_id}/nodes/{node_id}:retrieve: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}~1retrieve" - - /projects/{project_id}/nodes/{node_id}:start: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}~1start" - - /projects/{project_id}/nodes/{node_id}:stop: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}~1stop" - - /projects/{project_id}/nodes/{node_id}:restart: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}~1restart" - - /projects/{project_id}/nodes/{node_id}/resources: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{project_id}~1nodes~1{node_id}~1resources" - - /projects/{project_id}/inputs: - $ref: "./openapi-projects-ports.yaml#/paths/~1projects~1{project_id}~1inputs" - - /projects/{project_id}/outputs: - $ref: "./openapi-projects-ports.yaml#/paths/~1projects~1{project_id}~1outputs" - - /projects/{project_id}/metadata/ports: - $ref: "./openapi-projects-ports.yaml#/paths/~1projects~1{project_id}~1metadata~1ports" - - /nodes/{nodeInstanceUUID}/outputUi/{outputKey}: - $ref: "./openapi-node-v0.0.1.yaml#/paths/~1nodes~1{nodeInstanceUUID}~1outputUi~1{outputKey}" - - /nodes/{nodeInstanceUUID}/outputUi/{outputKey}/{apiCall}: - $ref: "./openapi-node-v0.0.1.yaml#/paths/~1nodes~1{nodeInstanceUUID}~1outputUi~1{outputKey}~1{apiCall}" - - /nodes/{nodeInstanceUUID}/iframe: - $ref: "./openapi-node-v0.0.1.yaml#/paths/~1nodes~1{nodeInstanceUUID}~1iframe" - - /projects/{study_uuid}/tags/{tag_id}: - $ref: "./openapi-projects.yaml#/paths/~1projects~1{study_uuid}~1tags~1{tag_id}" - - # META-PROJECTS ------------------------------------------------------------------------- - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations: - $ref: "./openapi-meta-projects.yaml#/paths/~1projects~1{project_uuid}~1checkpoint~1{ref_id}~1iterations" - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/{iter_id}: - $ref: "./openapi-meta-projects.yaml#/paths/~1projects~1{project_uuid}~1checkpoint~1{ref_id}~1iterations~1{iter_id}" - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/-/results: - $ref: "./openapi-meta-projects.yaml#/paths/~1projects~1{project_uuid}~1checkpoint~1{ref_id}~1iterations~1-~1results" - - /projects/{project_uuid}/checkpoint/{ref_id}/iterations/{iter_id}/results: - $ref: "./openapi-meta-projects.yaml#/paths/~1projects~1{project_uuid}~1checkpoint~1{ref_id}~1iterations~1{iter_id}~1results" - - # REPOSITORY ------------------------------------------------------------------------- - /repos/projects: - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects" - - "/repos/projects/{project_uuid}/checkpoints": - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects~1{project_uuid}~1checkpoints" - - "/repos/projects/{project_uuid}/checkpoints/HEAD": - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects~1{project_uuid}~1checkpoints~1HEAD" - - "/repos/projects/{project_uuid}/checkpoints/{ref_id}": - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects~1{project_uuid}~1checkpoints~1{ref_id}" - - "/repos/projects/{project_uuid}/checkpoints/{ref_id}:checkout": - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects~1{project_uuid}~1checkpoints~1{ref_id}:checkout" - - "/repos/projects/{project_uuid}/checkpoints/{ref_id}/workbench/view": - $ref: "./openapi-version-control.yaml#/paths/~1repos~1projects~1{project_uuid}~1checkpoints~1{ref_id}~1workbench~1view" - - # ACTIVITY ------------------------------------------------------------------------- - /activity/status: - $ref: "./openapi-activity.yaml#/paths/~1activity~1status" - - # TAGS ------------------------------------------------------------------------- - /tags: - $ref: "./openapi-tags.yaml#/paths/~1tags" - - /tags/{tag_id}: - $ref: "./openapi-tags.yaml#/paths/~1tags~1{tag_id}" - - # PUBLICATIONS ------------------------------------------------------------------------- - /publications/service-submission: - $ref: "./openapi-publications.yaml#/paths/~1publications~1service-submission" - - # CATALOG ------------------------------------------------------------------------- - /catalog/dags: - $ref: "./openapi-catalog.yaml#/paths/~1catalog~1dags" - /catalog/dags/{dag_id}: - $ref: "./openapi-catalog.yaml#/paths/~1catalog~1dags~1{dag_id}" - - /catalog/services: - $ref: "./openapi-catalog.yaml#/paths/catalog_services" - /catalog/services/{service_key}/{service_version}: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version" - /catalog/services/{service_key}/{service_version}/inputs: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_inputs" - /catalog/services/{service_key}/{service_version}/inputs/{input_key}: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_inputs_input_key" - /catalog/services/{service_key}/{service_version}/inputs:match: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_inputs_match" - /catalog/services/{service_key}/{service_version}/outputs: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_outputs" - /catalog/services/{service_key}/{service_version}/outputs/{output_key}: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_outputs_output_key" - /catalog/services/{service_key}/{service_version}/outputs:match: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_outputs_match" - /catalog/services/{service_key}/{service_version}/resources: - $ref: "./openapi-catalog.yaml#/paths/catalog_services_service_key_service_version_resources" - - # CLUSTER ------------------------------------------------------------------------- - /clusters:ping: - $ref: "./openapi-clusters.yaml#/paths/director_v2_clusters_ping" - /clusters: - $ref: "./openapi-clusters.yaml#/paths/~1clusters" - /clusters/{cluster_id}: - $ref: "./openapi-clusters.yaml#/paths/~1clusters~1{cluster_id}" - /clusters/{cluster_id}:ping: - $ref: "./openapi-clusters.yaml#/paths/director_v2_clusters_cluster_id_ping" - /clusters/{cluster_id}/details: - $ref: "./openapi-clusters.yaml#/paths/director_v2_clusters_cluster_id_details" - - # TASKS -------------------------------------------------------------------------- - /tasks: - $ref: "./openapi-tasks.yaml#/paths/~1tasks" - - /tasks/{task_id}: - $ref: "./openapi-tasks.yaml#/paths/~1tasks~1{task_id}" - - /tasks/{task_id}/result: - $ref: "./openapi-tasks.yaml#/paths/~1tasks~1{task_id}~1result" - - # SPARC ------------------------------------------------------------------------- - /services: - $ref: "./openapi-nih-sparc.yaml#/paths/~1services" - - /viewers: - $ref: "./openapi-nih-sparc.yaml#/paths/~1viewers" - - /viewers/default: - $ref: "./openapi-nih-sparc.yaml#/paths/~1viewers~1default" - - /view: - $ref: "./openapi-nih-sparc.yaml#/paths/~1view" - - /study/{study_id}: - $ref: "./openapi-nih-sparc.yaml#/paths/~1study~1{study_id}" - -components: - responses: - DefaultErrorResponse: - description: Default http error response body - content: - application/json: - schema: - $ref: "./components/schemas/error.yaml#/ErrorEnveloped" diff --git a/api/specs/webserver/scripts/Makefile b/api/specs/webserver/scripts/Makefile deleted file mode 100644 index 7e870d03aa6..00000000000 --- a/api/specs/webserver/scripts/Makefile +++ /dev/null @@ -1,17 +0,0 @@ - -.PHONY: _check_venv_active -_check_venv_active: - # Checking whether virtual environment was activated - @python3 -c "import sys; assert sys.base_prefix!=sys.prefix" - - -.PHONY: install -install: _check_venv_active - pip install -r requirements.txt - - -.PHONY: all -all: _check_venv_active - @for file in *.py; do \ - python $$file; \ - done diff --git a/api/specs/webserver/scripts/_common.py b/api/specs/webserver/scripts/_common.py deleted file mode 100644 index 15fc7e12cb5..00000000000 --- a/api/specs/webserver/scripts/_common.py +++ /dev/null @@ -1,83 +0,0 @@ -""" Common utils for OAS script generators -""" - -import sys -from pathlib import Path -from typing import Optional - -import yaml -from fastapi import FastAPI -from models_library.basic_types import LogLevel -from pydantic import BaseModel, Field -from servicelib.fastapi.openapi import override_fastapi_openapi_method - -CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent - - -class Log(BaseModel): - level: Optional[LogLevel] = Field("INFO", description="log level") - message: str = Field( - ..., - description="log message. If logger is USER, then it MUST be human readable", - ) - logger: Optional[str] = Field( - None, description="name of the logger receiving this message" - ) - - class Config: - schema_extra = { - "example": { - "message": "Hi there, Mr user", - "level": "INFO", - "logger": "user-logger", - } - } - - -class ErrorItem(BaseModel): - code: str = Field( - ..., - description="Typically the name of the exception that produced it otherwise some known error code", - ) - message: str = Field(..., description="Error message specific to this item") - resource: Optional[str] = Field( - None, description="API resource affected by this error" - ) - field: Optional[str] = Field(None, description="Specific field within the resource") - - -class Error(BaseModel): - logs: Optional[list[Log]] = Field(None, description="log messages") - errors: Optional[list[ErrorItem]] = Field(None, description="errors metadata") - status: Optional[int] = Field(None, description="HTTP error code") - - -def create_openapi_specs( - app: FastAPI, file_path: Path, *, drop_fastapi_default_422: bool = True -): - override_fastapi_openapi_method(app) - openapi = app.openapi() - - # Remove these sections - for section in ("info", "openapi"): - openapi.pop(section) - - schemas = openapi["components"]["schemas"] - for section in ("HTTPValidationError", "ValidationError"): - schemas.pop(section) - - # Removes default response 422 - if drop_fastapi_default_422: - for _, method_item in openapi.get("paths", {}).items(): - for _, param in method_item.items(): - # NOTE: If description is like this, - # it assumes it is the default HTTPValidationError from fastapi - if (e422 := param.get("responses", {}).get("422", None)) and e422.get( - "description" - ) == "Validation Error": - param.get("responses", {}).pop("422", None) - - with file_path.open("wt") as fh: - yaml.safe_dump(openapi, fh, indent=1, sort_keys=False) - - print("Saved OAS to", file_path) diff --git a/api/specs/webserver/scripts/openapi_admin.py b/api/specs/webserver/scripts/openapi_admin.py deleted file mode 100644 index 1979fd620e5..00000000000 --- a/api/specs/webserver/scripts/openapi_admin.py +++ /dev/null @@ -1,41 +0,0 @@ -""" Helper script to generate OAS automatically -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from enum import Enum -from typing import Union - -from fastapi import FastAPI, Header -from models_library.generics import Envelope -from simcore_service_webserver.email_handlers import TestEmail, TestFailed, TestPassed - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "admin", -] - - -@app.post( - "/email:test", - response_model=Envelope[Union[TestFailed, TestPassed]], - tags=TAGS, - operation_id="test_email", -) -async def test_email( - test: TestEmail, x_simcore_products_name: Union[str, None] = Header(default=None) -): - # X-Simcore-Products-Name - ... - - -if __name__ == "__main__": - - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-admin.yaml") diff --git a/api/specs/webserver/scripts/openapi_auth.py b/api/specs/webserver/scripts/openapi_auth.py deleted file mode 100644 index cc456c7f043..00000000000 --- a/api/specs/webserver/scripts/openapi_auth.py +++ /dev/null @@ -1,319 +0,0 @@ -""" Helper script to generate OAS automatically -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from enum import Enum -from typing import Any, Optional, Union - -from _common import Error, Log -from fastapi import FastAPI, status -from models_library.generics import Envelope -from pydantic import BaseModel, Field, confloat -from simcore_service_webserver.login.api_keys_handlers import ApiKeyCreate, ApiKeyGet -from simcore_service_webserver.login.handlers_2fa import Resend2faBody -from simcore_service_webserver.login.handlers_auth import ( - LoginBody, - LoginNextPage, - LoginTwoFactorAuthBody, - LogoutBody, -) -from simcore_service_webserver.login.handlers_change import ( - ChangeEmailBody, - ChangePasswordBody, - ResetPasswordBody, -) -from simcore_service_webserver.login.handlers_confirmation import ( - PhoneConfirmationBody, - ResetPasswordConfirmation, -) -from simcore_service_webserver.login.handlers_registration import ( - InvitationCheck, - InvitationInfo, - RegisterBody, - RegisterPhoneBody, - RegisterPhoneNextPage, -) - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "authentication", -] - - -@app.post( - "/auth/register/invitations:check", - response_model=Envelope[InvitationInfo], - tags=TAGS, - operation_id="auth_check_registration_invitation", -) -async def check_registration_invitation(check: InvitationCheck): - """Check invitation and returns associated email or None""" - - -@app.post( - "/auth/register", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_register", -) -async def register(registration: RegisterBody): - """User registration""" - - -@app.post( - "/auth/verify-phone-number", - response_model=Envelope[RegisterPhoneNextPage], - tags=TAGS, - operation_id="auth_register_phone", -) -async def register_phone(registration: RegisterPhoneBody): - """user tries to verify phone number for 2 Factor Authentication when registering""" - - -@app.post( - "/auth/validate-code-register", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_phone_confirmation", -) -async def phone_confirmation(confirmation: PhoneConfirmationBody): - """user enters 2 Factor Authentication code when registering""" - - -@app.post( - "/auth/login", - response_model=Envelope[LoginNextPage], - status_code=status.HTTP_201_CREATED, - tags=TAGS, - operation_id="auth_login", - responses={ - # status.HTTP_503_SERVICE_UNAVAILABLE - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized reset due to invalid token code", - } - }, -) -async def login(authentication: LoginBody): - """user logs in""" - - -@app.post( - "/auth/validate-code-login", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_login_2fa", - responses={ - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized reset due to invalid token code", - } - }, -) -async def login_2fa(authentication: LoginTwoFactorAuthBody): - """user enters 2 Factor Authentication code when login in""" - - -@app.post( - "/auth/two_factor:resend", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_resend_2fa_code", - responses={ - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized reset due to invalid token code", - } - }, -) -async def resend_2fa_code(resend: Resend2faBody): - """Resends 2FA either via email or sms""" - - -@app.post( - "/auth/logout", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_logout", -) -async def logout(data: LogoutBody): - """user logout""" - - -@app.post( - "/auth/reset-password", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_reset_password", - responses={status.HTTP_503_SERVICE_UNAVAILABLE: {"model": Envelope[Error]}}, -) -async def reset_password(data: ResetPasswordBody): - """a non logged-in user requests a password reset""" - - -@app.post( - "/auth/reset-password/{code}", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_reset_password_allowed", - responses={ - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized reset due to invalid token code", - } - }, -) -async def reset_password_allowed(code: str, data: ResetPasswordConfirmation): - """changes password using a token code without being logged in""" - - -@app.post( - "/auth/change-email", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_change_email", - responses={ - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized user. Login required", - }, - status.HTTP_503_SERVICE_UNAVAILABLE: { - "model": Envelope[Error], - "description": "unable to send confirmation email", - }, - }, -) -async def change_email(data: ChangeEmailBody): - """logged in user changes email""" - - -class PasswordCheckSchema(BaseModel): - strength: confloat(ge=0.0, le=1.0) = Field( # type: ignore - ..., - description="The strength of the password ranges from 0 (extremely weak) and 1 (extremely strong)", - ) - rating: Optional[str] = Field( - None, description="Human readable rating from infinitely weak to very strong" - ) - improvements: Optional[Any] = None - - -@app.post( - "/auth/change-password", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_change_password", - responses={ - status.HTTP_401_UNAUTHORIZED: { - "model": Envelope[Error], - "description": "unauthorized user. Login required", - }, - status.HTTP_409_CONFLICT: { - "model": Envelope[Error], - "description": "mismatch between new and confirmation passwords", - }, - status.HTTP_422_UNPROCESSABLE_ENTITY: { - "model": Envelope[Error], - "description": "current password is invalid", - }, - }, -) -async def change_password(data: ChangePasswordBody): - """logged in user changes password""" - - -@app.get( - "/auth/confirmation/{code}", - response_model=Envelope[Log], - tags=TAGS, - operation_id="auth_confirmation", - responses={ - "3XX": { - "description": "redirection to specific ui application page", - }, - }, -) -async def email_confirmation(code: str): - """email link sent to user to confirm an action""" - - -@app.get( - "/auth/api-keys", - tags=TAGS, - operation_id="list_api_keys", - responses={ - status.HTTP_200_OK: { - "description": "returns the display names of API keys", - "model": list[str], - }, - status.HTTP_400_BAD_REQUEST: { - "description": "key name requested is invalid", - }, - status.HTTP_401_UNAUTHORIZED: { - "description": "requires login to list keys", - }, - status.HTTP_403_FORBIDDEN: { - "description": "not enough permissions to list keys", - }, - }, -) -async def list_api_keys(code: str): - """lists display names of API keys by this user""" - - -@app.post( - "/auth/api-keys", - tags=TAGS, - operation_id="create_api_key", - responses={ - status.HTTP_200_OK: { - "description": "Authorization granted returning API key", - "model": ApiKeyGet, - }, - status.HTTP_400_BAD_REQUEST: { - "description": "key name requested is invalid", - }, - status.HTTP_401_UNAUTHORIZED: { - "description": "requires login to list keys", - }, - status.HTTP_403_FORBIDDEN: { - "description": "not enough permissions to list keys", - }, - }, -) -async def create_api_key(data: ApiKeyCreate): - """creates API keys to access public API""" - - -@app.delete( - "/auth/api-keys", - tags=TAGS, - operation_id="delete_api_key", - status_code=status.HTTP_204_NO_CONTENT, - responses={ - status.HTTP_204_NO_CONTENT: { - "description": "api key successfully deleted", - }, - status.HTTP_401_UNAUTHORIZED: { - "description": "requires login to delete a key", - }, - status.HTTP_403_FORBIDDEN: { - "description": "not enough permissions to delete a key", - }, - }, -) -async def delete_api_key(data: ApiKeyCreate): - """deletes API key by name""" - - -if __name__ == "__main__": - - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-auth.yaml") diff --git a/api/specs/webserver/scripts/openapi_nih_sparc.py b/api/specs/webserver/scripts/openapi_nih_sparc.py deleted file mode 100644 index fa2f565827d..00000000000 --- a/api/specs/webserver/scripts/openapi_nih_sparc.py +++ /dev/null @@ -1,107 +0,0 @@ -""" Helper script to generate OAS automatically NIH-sparc portal API section -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - -from enum import Enum -from typing import Optional, Union - -from fastapi import FastAPI, status -from fastapi.responses import RedirectResponse -from models_library.generics import Envelope -from models_library.projects import ProjectID -from models_library.services import ServiceKey, ServiceKeyVersion -from pydantic import HttpUrl, PositiveInt -from simcore_service_webserver.studies_dispatcher.handlers_rest import ( - ServiceGet, - Viewer, -) - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "nih-sparc", -] - - -@app.get( - "/services", - response_model=Envelope[list[ServiceGet]], - tags=TAGS, - operation_id="list_services", -) -async def list_services(): - """Returns a list latest version of services""" - - -@app.get( - "/viewers", - response_model=Envelope[list[Viewer]], - tags=TAGS, - operation_id="list_viewers", -) -async def list_viewers(file_type: Optional[str] = None): - """Lists all publically available viewers - - Notice that this might contain multiple services for the same filetype - - If file_type is provided, then it filters viewer for that filetype - """ - - -@app.get( - "/viewers/default", - response_model=Envelope[list[Viewer]], - tags=TAGS, - operation_id="list_default_viewers", -) -async def list_default_viewers(file_type: Optional[str] = None): - """Lists the default viewer for each supported filetype - - This was interfaced as a subcollection of viewers because it is a very common use-case - - Only publicaly available viewers - - If file_type is provided, then it filters viewer for that filetype - """ - - -@app.get( - "/view", - response_class=RedirectResponse, - response_description="Opens osparc and starts viewer for selected data", - status_code=status.HTTP_302_FOUND, - tags=TAGS, - operation_id="get_redirection_to_viewer", -) -async def get_redirection_to_viewer( - file_type: str, - viewer_key: ServiceKey, - viewer_version: ServiceKeyVersion, - file_size: PositiveInt, - download_link: HttpUrl, - file_name: Optional[str] = "unknown", -): - """Opens a viewer in osparc for data in the NIH-sparc portal""" - - -@app.get( - "/study/{study_id}", - tags=TAGS, - response_class=RedirectResponse, - response_description="Opens osparc and opens a copy of publised study", - status_code=status.HTTP_302_FOUND, - operation_id="get_redirection_to_study_page", -) -async def get_redirection_to_study_page(study_id: ProjectID): - """Opens a study published in osparc""" - - -if __name__ == "__main__": - - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-nih-sparc.yaml") diff --git a/api/specs/webserver/scripts/openapi_projects_ports.py b/api/specs/webserver/scripts/openapi_projects_ports.py deleted file mode 100644 index 67389d0432f..00000000000 --- a/api/specs/webserver/scripts/openapi_projects_ports.py +++ /dev/null @@ -1,78 +0,0 @@ -""" Helper script to automatically generate OAS - -This OAS are the source of truth -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from enum import Enum -from typing import Union - -from fastapi import FastAPI -from models_library.generics import Envelope -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from simcore_service_webserver.projects.projects_ports_handlers import ( - ProjectInputGet, - ProjectInputUpdate, - ProjectMetadataPortGet, - ProjectOutputGet, -) - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "project", -] - - -@app.get( - "/projects/{project_id}/inputs", - response_model=Envelope[dict[NodeID, ProjectInputGet]], - tags=TAGS, - operation_id="get_project_inputs", -) -async def get_project_inputs(project_id: ProjectID): - """New in version *0.10*""" - - -@app.patch( - "/projects/{project_id}/inputs", - response_model=Envelope[dict[NodeID, ProjectInputGet]], - tags=TAGS, - operation_id="update_project_inputs", -) -async def update_project_inputs( - project_id: ProjectID, updates: list[ProjectInputUpdate] -): - """New in version *0.10*""" - - -@app.get( - "/projects/{project_id}/outputs", - response_model=Envelope[dict[NodeID, ProjectOutputGet]], - tags=TAGS, - operation_id="get_project_outputs", -) -async def get_project_outputs(project_id: ProjectID): - """New in version *0.10*""" - - -@app.get( - "/projects/{project_id}/metadata/ports", - response_model=Envelope[list[ProjectMetadataPortGet]], - tags=TAGS, - operation_id="list_project_metadata_ports", -) -async def list_project_metadata_ports(project_id: ProjectID): - """New in version *0.12*""" - - -if __name__ == "__main__": - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-projects-ports.yaml") diff --git a/api/specs/webserver/scripts/openapi_storage.py b/api/specs/webserver/scripts/openapi_storage.py deleted file mode 100644 index b7359da087d..00000000000 --- a/api/specs/webserver/scripts/openapi_storage.py +++ /dev/null @@ -1,174 +0,0 @@ -""" Helper script to generate OAS automatically -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from enum import Enum -from typing import Union - -from fastapi import FastAPI, status -from models_library.generics import Envelope -from pydantic import NonNegativeInt -from simcore_service_webserver.storage_schemas import ( - CompleteUpload, - DatasetMetaData, - FileLocation, - FileMetaData, - FileUploadComplete, - FileUploadCompleteFuture, - FileUploadSchema, - PresignedLink, - TableSynchronisation, -) - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "storage", -] - - -@app.get( - "/storage/locations", - response_model=list[FileLocation], - tags=TAGS, - operation_id="get_storage_locations", - summary="Get available storage locations", -) -async def get_storage_locations(): - """Returns the list of available storage locations""" - - -@app.post( - "/storage/locations/{location_id}:sync", - response_model=Envelope[TableSynchronisation], - tags=TAGS, - operation_id="synchronise_meta_data_table", - summary="Manually triggers the synchronisation of the file meta data table in the database", -) -async def synchronise_meta_data_table( - location_id: str, dry_run: bool = True, fire_and_forget: bool = False -): - """Returns an object containing added, changed and removed paths""" - - -@app.get( - "storage/locations/{location_id}/datasets", - response_model=Envelope[DatasetMetaData], - tags=TAGS, - operation_id="get_datasets_metadata", - summary="Get datasets metadata", -) -async def get_datasets_metadata(location_id: str): - """Returns the list of dataset meta-datas""" - - -@app.get( - "/storage/locations/{location_id}/files/metadata", - response_model=list[DatasetMetaData], - tags=TAGS, - operation_id="get_files_metadata", - summary="Get datasets metadata", -) -async def get_files_metadata(location_id: str): - """list of file meta-datas""" - - -@app.get( - "/storage/locations/{location_id}/datasets/{dataset_id}/metadata", - response_model=list[FileMetaData], - tags=TAGS, - operation_id="get_files_metadata_dataset", - summary="Get Files Metadata", -) -async def get_files_metadata_dataset(location_id: str, dataset_id: str): - """list of file meta-datas""" - - -@app.get( - "/storage/locations/{location_id}/files/{file_id}", - response_model=PresignedLink, - tags=TAGS, - operation_id="download_file", - summary="Returns download link for requested file", -) -async def download_file(location_id: str, file_id: str): - """Returns a presigned link""" - - -@app.put( - "/storage/locations/{location_id}/files/{file_id}", - response_model=Envelope[FileUploadSchema], - tags=TAGS, - operation_id="upload_file", - summary="Returns upload link", -) -async def upload_file(location_id: str, file_id: str, file_size: NonNegativeInt): - """Return upload object""" - # TODO: links !!! - - -@app.delete( - "/storage/locations/{location_id}/files/{file_id}", - status_code=status.HTTP_204_NO_CONTENT, - tags=TAGS, - operation_id="delete_file", - summary="Deletes File", -) -async def delete_file(location_id: str, file_id: str): - ... - - -@app.post( - "/storage/locations/{location_id}/files/{file_id}:abort", - status_code=status.HTTP_204_NO_CONTENT, - tags=TAGS, - operation_id="abort_upload_file", -) -async def abort_upload_file(location_id: str, file_id: str): - """Asks the server to abort the upload and revert to the last valid version if any""" - - -@app.post( - "/storage/locations/{location_id}/files/{file_id}:complete", - status_code=status.HTTP_202_ACCEPTED, # TODO: Completion of upload is accepted - response_model=Envelope[FileUploadComplete], - tags=TAGS, - operation_id="complete_upload_file", -) -async def complete_upload_file(location_id: str, file_id: str, upload: CompleteUpload): - """Asks the server to complete the upload""" - # TODO: links CompleteUploadStatus - - -@app.post( - "/storage/locations/{location_id}/files/{file_id}:complete/futures/{future_id}", - response_model=Envelope[FileUploadCompleteFuture], - tags=TAGS, - summary="Check for upload completion", - operation_id="is_completed_upload_file", -) -async def is_completed_upload_file(location_id: str, file_id: str, future_id: str): - """Returns state of upload completion""" - - -@app.get( - "/storage/locations/{location_id}/files/{file_id}/metadata", - response_model=FileMetaData, - tags=TAGS, - summary="Get File Metadata", - operation_id="get_file_metadata", -) -async def get_file_metadata(location_id: str, file_id: str): - ... - - -if __name__ == "__main__": - - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-storage.ignore.yaml") diff --git a/api/specs/webserver/scripts/openapi_tags.py b/api/specs/webserver/scripts/openapi_tags.py deleted file mode 100644 index 35982cb00be..00000000000 --- a/api/specs/webserver/scripts/openapi_tags.py +++ /dev/null @@ -1,68 +0,0 @@ -""" Helper script to generate OAS automatically -""" - -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from enum import Enum -from typing import Union - -from fastapi import FastAPI, status -from models_library.generics import Envelope -from simcore_service_webserver.tags_handlers import TagCreate, TagGet, TagUpdate - -app = FastAPI(redoc_url=None) - -TAGS: list[Union[str, Enum]] = [ - "tag", -] - - -@app.post( - "/tags", - response_model=Envelope[TagGet], - tags=TAGS, - operation_id="create_tag", -) -async def create_tag(create: TagCreate): - ... - - -@app.get( - "/tags", - response_model=Envelope[list[TagGet]], - tags=TAGS, - operation_id="list_tags", -) -async def list_tags(): - ... - - -@app.patch( - "/tags/{tag_id}", - response_model=Envelope[TagGet], - tags=TAGS, - operation_id="update_tag", -) -async def update_tag(tag_id: int, update: TagUpdate): - ... - - -@app.delete( - "/tags/{tag_id}", - tags=TAGS, - status_code=status.HTTP_204_NO_CONTENT, - operation_id="delete_tag", -) -async def delete_tag(tag_id: int): - ... - - -if __name__ == "__main__": - - from _common import CURRENT_DIR, create_openapi_specs - - create_openapi_specs(app, CURRENT_DIR.parent / "openapi-tags.yaml") diff --git a/api/specs/webserver/scripts/requirements.txt b/api/specs/webserver/scripts/requirements.txt deleted file mode 100644 index 30c49abedff..00000000000 --- a/api/specs/webserver/scripts/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Extra reqs, besides webserver's - -fastapi diff --git a/api/specs/webserver/upload-v0.0.1.yaml b/api/specs/webserver/upload-v0.0.1.yaml deleted file mode 100644 index d6a3fc61c6b..00000000000 --- a/api/specs/webserver/upload-v0.0.1.yaml +++ /dev/null @@ -1,221 +0,0 @@ -openapi: 3.0.2 -servers: [] - -info: - description: OSparc Upload API - version: "0.0.1" - title: OSparc Upload REST API - contact: - name: Tobias Oetiker - email: tobi@itis.swiss - license: - name: MIT - url: 'https://opensource.org/licenses/MIT' - -tags: - - name: multi-part upload - description: | - The REST API is modeled after the S3 multi part upload service - which makes this very simple to implement on the server side - especially if you have an S3 like [minio.io](https://www.minio.io/). - -paths: - /upload/start: - post: - tags: - - upload - summary: start an upload - operationId: uploadStart - description: Start or resume an upload - requestBody: - description: the properties of the File Object - content: - application/json: - schema: - type: object - example: - name: big-date.bin - size: 1000000000000 - lastModified: 15294862961234 - required: - - name - - size - - lastModified - properties: - name: - type: string - description: file name - size: - type: integer - format: int64 - description: file size in bytes - lastModified: - type: integer - format: int64 - description: last file modification in milliseconds since 1970-01-01 - - responses: - '200': - description: | - Meta information about the upload. If a pending upload has matched - the information given in the fileMetaData the response will contain - information about the already uploaded chunks of data. - content: - application/json: - schema: - type: object - example: - uploadId: EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbD... - partsPresent: - - partNumber: 1 - size: 1024 - eTag: 7778aef83f66abc1fa1e8477f296d394 - - partNumber: 3 - size: 1024 - eTag: 8234979234987eafff384 - required: - - uploadId - - partsPresent - properties: - uploadId: - type: string - partsPresent: - type: array - minItems: 0 - items: - type: object - properties: - partNumber: - type: integer - format: int64 - size: - type: integer - format: int64 - eTag: - type: string - required: - - partNumber - - size - - eTag - '405': - description: Request was not formed as expected - /upload/part: - post: - tags: - - upload - summary: upload a data block - operationId: uploadPart - description: Upload a block of data - parameters: - - name: X-Upload-Id - in: header - required: true - description: which upload does this belong to - example: EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbD - schema: - type: string - - name: X-Upload-Part-Number - in: header - required: true - description: which part of the upload is this ? - example: 22 - schema: - type: integer - format: int64 - requestBody: - description: a raw block of data ... not encoded - # example: big-bad-binary-data - content: - application/octet-stream: - schema: - type: string - format: binary - responses: - '200': - description: confirmation for the successful part upload - - content: - application/json: - schema: - type: object - required: - - eTag - - size - properties: - eTag: - description: Entity Tag of the part just uploaded - type: string - size: - type: integer - format: int64 - example: - eTag: 7e10e7d25dc4581d89b9285be5f384fd - size: 9388854884884884 - '404': - description: No Such Upload - /upload/complete: - post: - tags: - - upload - summary: complete upload process - operationId: uploadComplete - description: Complete upload process - requestBody: - description: a list of uploaded parts to be assembled into a file - content: - application/json: - schema: - type: array - # minItems: 1 - example: - - partNumber: 1 - eTag: 7e10e7d25dc4581d89b9285be5f384fd - - partNumber: 2 - eTag: 1c10d25dc4581d89b9285be5f334fec5 - items: - type: object - properties: - partNumber: - type: integer - format: int64 - eTag: - type: string - required: - - partNumber - - eTag - responses: - '201': - description: new file has been created - headers: - ETag: - description: Entity Tag of the newly created file - schema: - type: string - example: 1c10d25dc4581d89b9285be5f334fec5 - '404': - description: No Such Upload - /upload/abort: - post: - tags: - - upload - summary: abort the pending upload - operationId: uploadAbort - description: Abort a pending multi part upload - requestBody: - description: cancel a pending upload - content: - application/json: - schema: - type: object - example: - uploadId: EXAMPLEJZ6e0YupT2h66iePQCc9IEbYbD - required: - - uploadId - properties: - uploadId: - type: string - responses: - '200': - description: upload canceled - '404': - description: upload does not exist diff --git a/api/tests/Makefile b/api/tests/Makefile index c7d59618e27..7f6ac2b4d85 100644 --- a/api/tests/Makefile +++ b/api/tests/Makefile @@ -9,15 +9,16 @@ all: install test-dev .PHONY: reqs requirements.txt: requirements.in # pip compiling $< - pip-compile $(UPGRADE_OPTION) --build-isolation --output-file $@ $< - + uv pip compile $(UPGRADE_OPTION) \ + --no-header \ + --output-file $@ $< reqs: requirements.txt ## alias to compile requirements.txt .PHONY: install install: _check_venv_active requirements.txt ## installs dependencies # installing requirements - pip-sync requirements.txt + @uv pip sync requirements.txt .PHONY: test-dev @@ -25,6 +26,25 @@ test-dev: _check_venv_active ## runs all tests [DEV] # running unit tests pytest -vv --exitfirst --failed-first --durations=10 --pdb $(CURDIR) +.PHONY: test-ci +test-ci: _check_venv_active ## runs all tests [DEV] + # running unit tests + pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-append \ + --cov-config=.coveragerc \ + --cov-report=term-missing \ + --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ + --cov=. \ + --durations=10 \ + --log-date-format="%Y-%m-%d %H:%M:%S" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ + --verbose \ + -m "not heavy_load" \ + $(PYTEST_ADDITIONAL_PARAMETERS) \ + $(TEST_TARGET) .PHONY: help diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 1f0319393b7..8be5481a3c5 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -1,5 +1,6 @@ -# pylint: disable=unused-argument # pylint: disable=redefined-outer-name +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument # pylint: disable=unused-variable import logging diff --git a/api/tests/requirements.in b/api/tests/requirements.in index d78e89f90dd..85ed6d83d2f 100644 --- a/api/tests/requirements.in +++ b/api/tests/requirements.in @@ -3,9 +3,9 @@ aiohttp coverage -openapi-core<0.15 # Broken compatibility. This library in any case is marked as deprecated and will be removed! +openapi-core pytest -pytest-aiohttp +pytest-asyncio pytest-cov pytest-instafail pytest-sugar diff --git a/api/tests/requirements.txt b/api/tests/requirements.txt index 8ee75bb2ebd..6711f5c7180 100644 --- a/api/tests/requirements.txt +++ b/api/tests/requirements.txt @@ -1,112 +1,125 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements.txt requirements.in -# -aiohttp==3.8.4 +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 # via # -c ../../requirements/constraints.txt # -r requirements.in - # pytest-aiohttp -aiosignal==1.3.1 - # via aiohttp -async-timeout==4.0.2 +aiosignal==1.3.2 # via aiohttp -attrs==22.2.0 +attrs==25.1.0 # via # aiohttp # jsonschema - # openapi-core - # pytest -charset-normalizer==3.0.1 - # via aiohttp -coverage[toml]==7.2.1 + # referencing +certifi==2025.1.31 + # via + # -c ../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +coverage==7.6.12 # via # -r requirements.in # pytest-cov -dictpath==0.1.3 - # via openapi-core -exceptiongroup==1.1.0 - # via pytest -frozenlist==1.3.3 +frozenlist==1.5.0 # via # aiohttp # aiosignal -idna==3.4 - # via yarl +idna==3.10 + # via + # requests + # yarl iniconfig==2.0.0 # via pytest -isodate==0.6.1 +isodate==0.7.2 # via openapi-core -jsonschema==4.17.3 +jsonschema==4.23.0 # via + # openapi-core # openapi-schema-validator # openapi-spec-validator -lazy-object-proxy==1.9.0 - # via openapi-core -markupsafe==2.1.2 +jsonschema-path==0.3.4 + # via + # openapi-core + # openapi-spec-validator +jsonschema-specifications==2024.10.1 + # via + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +markupsafe==3.0.2 # via werkzeug -more-itertools==9.1.0 +more-itertools==10.6.0 # via openapi-core -multidict==6.0.4 +multidict==6.1.0 # via # aiohttp # yarl -openapi-core==0.14.5 +openapi-core==0.19.4 # via -r requirements.in -openapi-schema-validator==0.2.3 +openapi-schema-validator==0.6.3 # via # openapi-core # openapi-spec-validator -openapi-spec-validator==0.4.0 +openapi-spec-validator==0.7.1 # via openapi-core -packaging==23.0 +packaging==24.2 # via # pytest # pytest-sugar -parse==1.19.0 +parse==1.20.2 # via openapi-core -pluggy==1.0.0 +pathable==0.4.4 + # via jsonschema-path +pluggy==1.5.0 # via pytest -pyrsistent==0.19.3 - # via jsonschema -pytest==7.2.1 +propcache==0.3.0 + # via + # aiohttp + # yarl +pytest==8.3.5 # via # -r requirements.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-instafail # pytest-sugar -pytest-aiohttp==1.0.4 +pytest-asyncio==0.26.0 # via -r requirements.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements.in -pyyaml==6.0 +pyyaml==6.0.2 # via # -c ../../requirements/constraints.txt - # openapi-spec-validator -six==1.16.0 + # jsonschema-path +referencing==0.35.1 # via - # isodate - # openapi-core -termcolor==2.2.0 + # -c ../../requirements/constraints.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +requests==2.32.3 + # via jsonschema-path +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.23.1 + # via + # jsonschema + # referencing +six==1.17.0 + # via rfc3339-validator +termcolor==2.5.0 # via pytest-sugar -tomli==2.0.1 +urllib3==2.3.0 # via - # coverage - # pytest -werkzeug==2.2.3 + # -c ../../requirements/constraints.txt + # requests +werkzeug==3.1.3 # via openapi-core -yarl==1.8.2 +yarl==1.18.3 # via aiohttp - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/api/tests/test_against_openapi_core.py b/api/tests/test_against_openapi_core.py index 3c5359d8173..01cdeb01484 100644 --- a/api/tests/test_against_openapi_core.py +++ b/api/tests/test_against_openapi_core.py @@ -12,7 +12,6 @@ import openapi_core import pytest import yaml -from openapi_core.spec.paths import SpecPath as OpenApiSpec from utils import list_all_openapi @@ -24,7 +23,7 @@ def test_can_create_specs_from_path(openapi_path: str): spec_dict = yaml.safe_load(fh) # will raise if openapi_core cannot handle OAS - specs = openapi_core.create_spec(spec_dict, oas_path.as_uri()) + specs = openapi_core.Spec.from_dict(spec_dict, base_uri=oas_path.as_uri()) assert specs - assert isinstance(specs, OpenApiSpec) + assert isinstance(specs, openapi_core.Spec) diff --git a/api/tests/test_conventions_openapi.py b/api/tests/test_conventions_openapi.py index 942f47602f7..bb1f2d9f893 100644 --- a/api/tests/test_conventions_openapi.py +++ b/api/tests/test_conventions_openapi.py @@ -7,7 +7,6 @@ import pytest import yaml from utils import list_files_in_api_specs -from yarl import URL # Conventions _REQUIRED_FIELDS = [ @@ -22,15 +21,15 @@ non_converted_yamls = [ pathstr for pathstr in list_files_in_api_specs("*.yaml") - if not pathstr.endswith(CONVERTED_SUFFIX) + if not f"{pathstr}".endswith(CONVERTED_SUFFIX) ] # skip converted schemas assert non_converted_yamls -@pytest.mark.parametrize("path", non_converted_yamls) -def test_openapi_envelope_required_fields(path: str): - with open(path) as file_stream: +@pytest.mark.parametrize("path", non_converted_yamls, ids=lambda p: p.name) +def test_openapi_envelope_required_fields(path: Path): + with Path.open(path) as file_stream: oas_dict = yaml.safe_load(file_stream) for key, value in oas_dict.items(): if "Envelope" in key: @@ -42,34 +41,3 @@ def test_openapi_envelope_required_fields(path: str): assert "error" in required_fields or "data" in required_fields assert "error" in fields_definitions or "data" in fields_definitions - - -main_openapi_yamls = [ - pathstr - for pathstr in list_files_in_api_specs("openapi.y*ml") - if not pathstr.endswith(CONVERTED_SUFFIX) -] # skip converted schemas - -assert main_openapi_yamls - - -@pytest.mark.parametrize("openapi_path", main_openapi_yamls) -def test_versioning_and_basepath(openapi_path): - openapi_path = Path(openapi_path) - - # version in folder name is only major! - with openapi_path.open() as f: - oas_dict = yaml.safe_load(f) - - # version in specs info is M.m.n - version_in_info = [int(i) for i in oas_dict["info"]["version"].split(".")] - - # basepath in servers must also be as '/v0' - for server in oas_dict["servers"]: - kwargs = { - key: value["default"] for key, value in server.get("variables", {}).items() - } - url = URL(server["url"].format(**kwargs)) - assert url.path == "/v%d" % version_in_info[0], ( - "Wrong basepath in server: %s" % server - ) diff --git a/api/tests/test_full_openapis.py b/api/tests/test_full_openapis.py index 7a6c01b98ae..a0f24376002 100644 --- a/api/tests/test_full_openapis.py +++ b/api/tests/test_full_openapis.py @@ -1,8 +1,8 @@ from pathlib import Path import pytest -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError +from openapi_spec_validator.exceptions import OpenAPISpecValidatorError +from openapi_spec_validator.shortcuts import get_validator_cls from utils import is_openapi_schema, list_files_in_api_specs, load_specs # NOTE: parametrizing tests per file makes more visible which file failed @@ -12,12 +12,14 @@ @pytest.mark.parametrize( "spec_file_path", list_files_in_api_specs("*.json") + list_files_in_api_specs("*.y*ml"), + ids=lambda o: f"{o.parent.name}/{o.name}", ) -def test_valid_openapi_specs(spec_file_path): - spec_file_path = Path(spec_file_path) +def test_valid_openapi_specs(spec_file_path: Path): specs = load_specs(spec_file_path) if is_openapi_schema(specs): try: - validate_spec(specs, spec_url=spec_file_path.as_uri()) - except OpenAPIValidationError as err: + openapi_validator_cls = get_validator_cls(specs) + openapi_validator_cls(specs) + + except OpenAPISpecValidatorError as err: pytest.fail(f"Failed validating {spec_file_path}:\n{err.message}") diff --git a/api/tests/test_individual_json_schemas.py b/api/tests/test_individual_json_schemas.py index 733ecd57c79..bfd2c4976fb 100644 --- a/api/tests/test_individual_json_schemas.py +++ b/api/tests/test_individual_json_schemas.py @@ -12,8 +12,7 @@ "spec_file_path", list_files_in_api_specs("*.json") + list_files_in_api_specs("*.y*ml"), ) -def test_valid_individual_json_schemas_specs(spec_file_path): - spec_file_path = Path(spec_file_path) +def test_valid_individual_json_schemas_specs(spec_file_path: Path): specs_dict = load_specs(spec_file_path) if is_json_schema(specs_dict): diff --git a/api/tests/test_individual_openapi_schemas.py b/api/tests/test_individual_openapi_schemas.py deleted file mode 100644 index 9adef7ad7e4..00000000000 --- a/api/tests/test_individual_openapi_schemas.py +++ /dev/null @@ -1,148 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import os -import shutil -from pathlib import Path - -import pytest -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError -from utils import dump_specs, is_json_schema, is_openapi_schema, load_specs - -# Conventions -_REQUIRED_FIELDS = ["error", "data"] -CONVERTED_SUFFIX = "-converted.yaml" -_FAKE_SCHEMA_NAME = "FakeSchema" - -_FAKE_OPEN_API_HEADERS = { - "openapi": "3.0.0", - "info": { - "title": "An include file to define sortable attributes", - "version": "1.0.0", - }, - "paths": {}, - "components": {"parameters": {}, "schemas": {}}, -} - - -def add_namespace_for_converted_schemas(schema_specs: dict): - # schemas converted from jsonschema do not have an overarching namespace. - # the openapi validator does not like this - # we use the jsonschema title to create a fake namespace - fake_schema_specs = {_FAKE_SCHEMA_NAME: schema_specs} - return fake_schema_specs - - -def change_references_to_schemas(filepath: Path, specs: dict): - from os.path import abspath, exists, isabs, relpath - - filedir = filepath.parent - - for key, value in specs.items(): - if isinstance(value, dict): - # navigate specs - change_references_to_schemas(filepath, value) - - elif key in ("allOf", "oneOf", "anyOf"): # navigates allOf, oneOf, anyOf - for item in value: - change_references_to_schemas(filepath, item) - - elif key == "$ref": - # Ensures value = "file_ref#section_ref" - value = str(value) - if value.startswith("#"): - value = str(filepath) + value - elif "#" not in value: - value = value + "# " - - file_ref, section_ref = value.split("#") - - if not isabs(file_ref): - file_ref = str(filedir / file_ref) - - file_ref = abspath(file_ref) # resolves - assert exists(file_ref), file_ref - - if ( - "schemas" in file_ref - ): # reference to a schema file (i.e. inside a schemas folder) - if not section_ref.startswith("/components/schemas/"): # not updated! - section_ref = ( - "/components/schemas/" + section_ref.lstrip("/").strip() - ) - if file_ref.endswith( - CONVERTED_SUFFIX - ): # fake name used in converted schemas - section_ref += _FAKE_SCHEMA_NAME - - file_ref = ( - "./" + relpath(file_ref, filedir) - if not filepath.samefile(file_ref) - else "" - ) - specs[key] = file_ref + "#" + section_ref - - -@pytest.fixture(scope="session") -def converted_specs_testdir(api_specs_dir, all_api_specs_tails, tmpdir_factory): - """ - - All api_specs files are copied into tmpdir - - All openapi files under schemas/ folders are processed into valid openapi specs - - All references to these files are replaced from - $ref: ... /schemas/some_file.yaml#Reference - to - $ref: ... /schemas/some_file.yaml#/components/reference/Reference - - """ - basedir = api_specs_dir - testdir = Path(tmpdir_factory.mktemp("converted-specs")) - - print(testdir) - - for tail in all_api_specs_tails: - - # directory with converted specs - os.makedirs(testdir / tail.parent, exist_ok=True) - - specs = load_specs(basedir / tail) - - if ( - "schemas" in str(tail) - and not is_openapi_schema(specs) - and not is_json_schema(specs) - ): - - # convert to valid openapi - if tail.name.endswith(CONVERTED_SUFFIX): - specs = add_namespace_for_converted_schemas(specs) - - new_specs = _FAKE_OPEN_API_HEADERS - new_specs["components"]["schemas"] = specs - - # change references - change_references_to_schemas(basedir / tail, new_specs) - dump_specs(new_specs, testdir / tail) - - elif is_openapi_schema(specs): - new_specs = specs - # change references - change_references_to_schemas(basedir / tail, new_specs) - dump_specs(new_specs, testdir / tail) - else: - shutil.copy2(basedir / tail, testdir / tail) - - return testdir - - -@pytest.mark.skip(reason="Implementing in PR 324") -def test_valid_individual_openapi_specs(api_specs_tail, converted_specs_testdir): - # NOTE: api_specs_tail is a parametrized **fixture** - # - api_specs_path = converted_specs_testdir / api_specs_tail - try: - specs = load_specs(api_specs_path) - validate_spec(specs, spec_url=api_specs_path.as_uri()) - except OpenAPIValidationError as err: - pytest.fail(f"Failed validating {api_specs_path}:\n{err.message}") diff --git a/api/tests/test_repo_data.py b/api/tests/test_repo_data.py index 3ce4e911fe3..03d9245ec55 100644 --- a/api/tests/test_repo_data.py +++ b/api/tests/test_repo_data.py @@ -15,7 +15,7 @@ SYNCED_VERSIONS_SUFFIX = [ ".json", # json-schema specs file - "-converted.yaml", # equivalent openapi specs file (see scripts/json-schema-to-openapi-schema) + "-converted-clean.yaml", # equivalent openapi specs file (see scripts/json-schema-to-openapi-schema) ] # Add here paths to files containing project's data that can be validated with projects schema diff --git a/api/tests/utils.py b/api/tests/utils.py index 4f840fe4cf3..508bda389f7 100644 --- a/api/tests/utils.py +++ b/api/tests/utils.py @@ -26,7 +26,7 @@ def specs_folder(): return current_dir.parent / "specs" -def list_files_in_api_specs(wildcard: str) -> list[str]: +def list_files_in_api_specs(wildcard: str) -> list[Path]: """Helper function to parameterize tests with list of files e.g. pytest -v test_individual_openapi_schemas.py @@ -36,7 +36,7 @@ def list_files_in_api_specs(wildcard: str) -> list[str]: specs_dir = specs_folder() # NOTE: keep as string and not path, so it can be rendered - return list(str(p) for p in specs_dir.rglob(wildcard)) + return [Path(p) for p in specs_dir.rglob(wildcard)] def list_all_openapi() -> list[str]: diff --git a/ci/deploy/dockerhub-tag-version.bash b/ci/deploy/dockerhub-tag-version.bash index 3f6fbc15093..e68013061b1 100755 --- a/ci/deploy/dockerhub-tag-version.bash +++ b/ci/deploy/dockerhub-tag-version.bash @@ -47,4 +47,28 @@ export DOCKER_IMAGE_TAG log_info "pushing images ${DOCKER_IMAGE_TAG} to ${DOCKER_REGISTRY}" make push-version +# push latest image to matching git tag if on git tag +# +# Explanation on how checking if a variable is set works with `set -o nounset`: +# +# - `${MY_ENV_VAR}` : This would normally return the value of `MY_ENV_VAR`. +# If `MY_ENV_VAR` is not set and `set -o nounset` is active, using this causes an error and the script would exit. +# - `${MY_ENV_VAR+x}` : This is a form of parameter expansion. If `MY_ENV_VAR` is unset or null, this expands to nothing (i.e., it's an empty string). +# If `MY_ENV_VAR` is set, this expands to `x`. Importantly, even if `MY_ENV_VAR` is unset, this will not cause an error even with `set -o nounset` active, +# because you're not actually trying to use the value of an unset variable - you're just checking if it is set or not. +# The `if [ ! -z ${MY_ENV_VAR+x} ]` line checks if `${MY_ENV_VAR+x}` is not an empty string (`! -z` checks for a non-empty string). +# If `MY_ENV_VAR` is set, `${MY_ENV_VAR+x}` will be `x`, and the condition will be true. If `MY_ENV_VAR` is unset, `${MY_ENV_VAR+x}` will be an empty string, and the condition will be false. +# `MY_ENV_VAR` is unset, this will not cause an error even with `set -o nounset` active. + +if [ ! -z ${GIT_TAG+x} ]; then + echo "GIT_TAG is '$GIT_TAG'" + DOCKER_IMAGE_TAG=${GIT_TAG} + export DOCKER_IMAGE_TAG + log_info "pushing images ${DOCKER_IMAGE_TAG} to ${DOCKER_REGISTRY}" + make push-version +else + echo "GIT_TAG is not set, we assume we are on the master branch." +fi + + log_info "complete!" diff --git a/ci/github/helpers/install_7zip.bash b/ci/github/helpers/install_7zip.bash new file mode 100755 index 00000000000..f30532a8ec8 --- /dev/null +++ b/ci/github/helpers/install_7zip.bash @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Installs the latest version of 7zip plugin +# + +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +exec "$( dirname -- "$0"; )"/../../../scripts/install_7zip.bash diff --git a/ci/github/helpers/install_aws_cli_v2.bash b/ci/github/helpers/install_aws_cli_v2.bash new file mode 100755 index 00000000000..3647f2b5568 --- /dev/null +++ b/ci/github/helpers/install_aws_cli_v2.bash @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Installs the latest version of AWS CLI V2 +# + +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +AWS_CLI_VERSION="2.11.11" +ARCH="x86_64" + +curl "https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}-${AWS_CLI_VERSION}.zip" --output "awscliv2.zip" && + apt-get update && + apt-get install -y unzip && + unzip awscliv2.zip && + ./aws/install --update && + apt-get remove --purge -y unzip && + rm awscliv2.zip && + rm -rf awscliv2 diff --git a/ci/github/helpers/install_rclone_docker_volume_plugin.bash b/ci/github/helpers/install_rclone_docker_volume_plugin.bash index c81109892f3..1f0e54658fb 100755 --- a/ci/github/helpers/install_rclone_docker_volume_plugin.bash +++ b/ci/github/helpers/install_rclone_docker_volume_plugin.bash @@ -4,14 +4,13 @@ # # http://redsymbol.net/articles/unofficial-bash-strict-mode/ -set -o errexit # abort on nonzero exitstatus -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes IFS=$'\n\t' - # Installation instructions from https://rclone.org/docker/ -R_CLONE_VERSION="1.62.1" +R_CLONE_VERSION="1.66.0" mkdir --parents /var/lib/docker-plugins/rclone/config mkdir --parents /var/lib/docker-plugins/rclone/cache docker plugin install rclone/docker-volume-rclone:amd64-${R_CLONE_VERSION} args="-v" --alias rclone --grant-all-permissions diff --git a/ci/github/helpers/openapi-specs-diff.bash b/ci/github/helpers/openapi-specs-diff.bash new file mode 100755 index 00000000000..b4409c174a8 --- /dev/null +++ b/ci/github/helpers/openapi-specs-diff.bash @@ -0,0 +1,47 @@ +#!/bin/bash + +# Recursively checks if all openapi specs within a local osparc-simcore revision are different/backwards compatible with a remote base +# Example: +# bash osparc-simcore/ci/github/helpers/openapi-specs-diff.bash diff \ +# https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master \ +# ./osparc-simcore/ +# or +# bash osparc-simcore/ci/github/helpers/openapi-specs-diff.bash breaking \ +# https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master \ +# ./osparc-simcore/ +# +# The script generates github error annotations for better being able to locate issues. + +operation=$1 +base_remote=$2 +revision_local=$3 + +repo_base_dir=$(realpath "$(dirname "${BASH_SOURCE[0]}")/../../..") +openapi_specs=$(find "${revision_local}" -type f \( -name 'openapi.json' -o -name 'openapi.yaml' \) -not -path '*/.*' -exec realpath --relative-to="${revision_local}" {} \;) + +cd "${revision_local}" || exit 1 # required to mount correct dir for diff tool + + +function run_diff_tool() { + exit_status=0 + for spec in ${openapi_specs}; do + echo "Comparing ${spec}" + if ! "${repo_base_dir}/scripts/openapi-diff.bash" "$@" "${base_remote}/${spec}" "/specs/${spec}"; then + echo "::error file=${spec}:: Error when checking ${spec}" + exit_status=$(("${exit_status}" + "1")) + fi + printf "%0.s=" {1..100} && printf "\n" + done + + exit "${exit_status}" +} + + +if [[ "${operation}" == "diff" ]]; then + run_diff_tool "diff" "--fail-on-diff" +elif [[ "${operation}" == "breaking" ]]; then + run_diff_tool "breaking" "--fail-on" "ERR" +else + echo "the operation '${operation}' is not supported" + exit 1 +fi diff --git a/ci/github/helpers/setup_docker_compose.bash b/ci/github/helpers/setup_docker_compose.bash deleted file mode 100755 index 64d618fcf16..00000000000 --- a/ci/github/helpers/setup_docker_compose.bash +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# strict mode -set -o errexit # abort on nonzero exitstatus -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes -IFS=$'\n\t' - -# when changing the DOCKER_COMPOSE_VERSION please compute the sha256sum on an ubuntu box (macOS has different checksum) -DOCKER_COMPOSE_VERSION="$1" -# Check for sha256 file in Asset section on https://github.com/docker/compose/releases -DOCKER_COMPOSE_SHA256SUM="$2" -DOCKER_COMPOSE_BIN=/usr/local/bin/docker-compose -curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o $DOCKER_COMPOSE_BIN -chmod +x $DOCKER_COMPOSE_BIN - -# checks it runs -$DOCKER_COMPOSE_BIN --version - -# location -where=$(command -v which docker-compose) -[ "$where" != "$DOCKER_COMPOSE_BIN" ] && echo "WARNING: docker-compose already pre-sintalled in $where " - - -# To create new DOCKER_COMPOSE_SHA256SUM = sha256sum ${DOCKER_COMPOSE_BIN} -# SEE https://superuser.com/a/1465221 -echo "$DOCKER_COMPOSE_SHA256SUM $DOCKER_COMPOSE_BIN" | sha256sum --check diff --git a/ci/github/integration-testing/director-v2.bash b/ci/github/integration-testing/director-v2.bash index 80c3020adf2..5388e68b623 100755 --- a/ci/github/integration-testing/director-v2.bash +++ b/ci/github/integration-testing/director-v2.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/director-v2 make install-ci popd - .venv/bin/pip list --verbose + uv pip list make info-images } @@ -21,7 +20,7 @@ test() { # shellcheck source=/dev/null source .venv/bin/activate pushd services/director-v2 - make test-ci-integration test-subfolder="$1" + make test-ci-integration test-path="$1" popd } diff --git a/ci/github/integration-testing/docker-api-proxy.bash b/ci/github/integration-testing/docker-api-proxy.bash new file mode 100755 index 00000000000..c7ad9775c07 --- /dev/null +++ b/ci/github/integration-testing/docker-api-proxy.bash @@ -0,0 +1,40 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/docker-api-proxy + make install-ci + popd + uv pip list + make info-images +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/docker-api-proxy + make test-ci-integration + popd +} + +clean_up() { + docker images + make down +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/integration-testing/dynamic-sidecar.bash b/ci/github/integration-testing/dynamic-sidecar.bash new file mode 100755 index 00000000000..568fd23bcd0 --- /dev/null +++ b/ci/github/integration-testing/dynamic-sidecar.bash @@ -0,0 +1,41 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + sudo ./ci/github/helpers/install_7zip.bash + pushd services/dynamic-sidecar + make install-ci + popd + uv pip list + make info-images +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/dynamic-sidecar + make test-ci-integration + popd +} + +clean_up() { + docker images + make down +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/integration-testing/simcore-sdk.bash b/ci/github/integration-testing/simcore-sdk.bash index f32cd3006d4..952192cf09b 100755 --- a/ci/github/integration-testing/simcore-sdk.bash +++ b/ci/github/integration-testing/simcore-sdk.bash @@ -5,14 +5,14 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate + sudo ./ci/github/helpers/install_aws_cli_v2.bash pushd packages/simcore-sdk make install-ci popd - .venv/bin/pip list --verbose + uv pip list make info-images } diff --git a/ci/github/integration-testing/webserver.bash b/ci/github/integration-testing/webserver.bash index 2cfec244808..7c9a303d8fd 100755 --- a/ci/github/integration-testing/webserver.bash +++ b/ci/github/integration-testing/webserver.bash @@ -6,14 +6,14 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate + sudo ./ci/github/helpers/install_7zip.bash pushd services/web/server make install-ci popd - .venv/bin/pip list --verbose + uv pip list make info-images } @@ -21,7 +21,7 @@ test() { # shellcheck source=/dev/null source .venv/bin/activate pushd services/web/server - make test-ci-integration test-subfolder="$1" + make test-ci-integration test-path="$1" popd } diff --git a/ci/github/system-testing/e2e-playwright.bash b/ci/github/system-testing/e2e-playwright.bash new file mode 100755 index 00000000000..b0a6e498f66 --- /dev/null +++ b/ci/github/system-testing/e2e-playwright.bash @@ -0,0 +1,52 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +# https://github.com/GoogleChrome/puppeteer/blob/master/docs/troubleshooting.md#running-puppeteer-on-travis-ci +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +# in case it's a Pull request, the env are never available, default to itisfoundation to get a maybe not too old version for caching +DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash) +export DOCKER_IMAGE_TAG + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd tests/e2e-playwright + make install-ci-up-simcore + popd +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd tests/e2e-playwright + make test-sleepers + make test-platform + popd +} + +dump_docker_logs() { + # get docker logs + # NOTE: Timeout avoids issue with dumping logs that hang! + out_dir=tests/e2e-playwright/test_failures + mkdir --parents "$out_dir" + + for service_id in $(docker service ls -q); do + service_name=$(docker service inspect "$service_id" --format="{{.Spec.Name}}") + echo "Dumping logs for $service_name" + (timeout 30 docker service logs --timestamps --tail=400 --details "$service_id" >"$out_dir/$service_name.log" 2>&1) || true + done +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/system-testing/environment-setup.bash b/ci/github/system-testing/environment-setup.bash index b537043dd99..3aa26cb7c7d 100755 --- a/ci/github/system-testing/environment-setup.bash +++ b/ci/github/system-testing/environment-setup.bash @@ -2,20 +2,24 @@ # # http://redsymbol.net/articles/unofficial-bash-strict-mode/ -set -o errexit # abort on nonzero exitstatus -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate pushd tests/environment-setup - pip3 install -r requirements/ci.txt + make install-ci popd - make .env + uv pip list } test() { + # shellcheck source=/dev/null + source .venv/bin/activate pytest --color=yes -v tests/environment-setup --log-level=DEBUG --asyncio-mode=auto } diff --git a/ci/github/system-testing/public-api.bash b/ci/github/system-testing/public-api.bash index 1287dfd39b6..96abf7d7004 100755 --- a/ci/github/system-testing/public-api.bash +++ b/ci/github/system-testing/public-api.bash @@ -11,28 +11,23 @@ set -o nounset # abort on unbound variable set -o pipefail # don't hide errors within pipes IFS=$'\n\t' - install() { - bash ci/helpers/ensure_python_pip.bash + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate pushd tests/public-api - pip3 install -r requirements/ci.txt - pip freeze + make install-ci popd - make .env - pip list -v make info-images } test() { # WARNING: this test is heavy. Due to limited CI machine power, please do not # add too much overhead (e.g. low log-level etc) - pytest \ - --color=yes \ - --cov-report=term-missing \ - --keep-docker-up \ - --durations=5 \ - -v \ - tests/public-api + # shellcheck source=/dev/null + source .venv/bin/activate + pushd tests/public-api + make test-ci } clean_up() { diff --git a/ci/github/system-testing/swarm-deploy.bash b/ci/github/system-testing/swarm-deploy.bash index eba2bdd9273..d5df32a6d0f 100755 --- a/ci/github/system-testing/swarm-deploy.bash +++ b/ci/github/system-testing/swarm-deploy.bash @@ -12,24 +12,27 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate pushd tests/swarm-deploy - pip3 install -r requirements/ci.txt + make install-ci popd - make .env - pip list -v + uv pip list make info-images } test() { # WARNING: this test is heavy. Due to limited CI machine power, please do not # add too much overhead (e.g. low log-level etc) + # shellcheck source=/dev/null + source .venv/bin/activate pytest \ + --asyncio-mode=auto \ --color=yes \ - --cov-report=term-missing \ - -v \ --durations=5 \ --log-level=INFO \ + -v \ tests/swarm-deploy } diff --git a/ci/github/unit-testing/agent.bash b/ci/github/unit-testing/agent.bash index db54a21b879..c9552e5ea20 100755 --- a/ci/github/unit-testing/agent.bash +++ b/ci/github/unit-testing/agent.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/agent make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/agent make mypy popd diff --git a/ci/github/unit-testing/api-server.bash b/ci/github/unit-testing/api-server.bash index c43fad43d66..7d8377445ea 100755 --- a/ci/github/unit-testing/api-server.bash +++ b/ci/github/unit-testing/api-server.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/api-server make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,11 +24,22 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/api-server make mypy popd } +openapi-diff() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/api-server + make openapi-dev-diff.json + popd +} + # Check if the function exists (bash specific) if declare -f "$1" >/dev/null; then # call arguments verbatim diff --git a/ci/github/unit-testing/api.bash b/ci/github/unit-testing/api.bash index 4ec3d574472..eb0db9e62fb 100755 --- a/ci/github/unit-testing/api.bash +++ b/ci/github/unit-testing/api.bash @@ -6,20 +6,21 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash - pip3 install --requirement api/tests/requirements.txt - pip list --verbose + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd api/tests + make install + popd + uv pip list } test() { - pytest \ - --color=yes \ - --durations=10 \ - --log-date-format="%Y-%m-%d %H:%M:%S" \ - --log-format="%(asctime)s %(levelname)s %(message)s" \ - --verbose \ - -m "not heavy_load" \ - api/tests + # shellcheck source=/dev/null + source .venv/bin/activate + pushd api/tests + make test-ci + popd } # Check if the function exists (bash specific) diff --git a/ci/github/unit-testing/autoscaling.bash b/ci/github/unit-testing/autoscaling.bash index 55f33abeb0d..d5de2bf5e81 100755 --- a/ci/github/unit-testing/autoscaling.bash +++ b/ci/github/unit-testing/autoscaling.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/autoscaling make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/autoscaling make mypy popd diff --git a/ci/github/unit-testing/aws-library.bash b/ci/github/unit-testing/aws-library.bash new file mode 100755 index 00000000000..c6328d7ce2d --- /dev/null +++ b/ci/github/unit-testing/aws-library.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/aws-library + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/aws-library + make tests-ci + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd packages/aws-library + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/catalog.bash b/ci/github/unit-testing/catalog.bash index c85123ade63..656518a6694 100755 --- a/ci/github/unit-testing/catalog.bash +++ b/ci/github/unit-testing/catalog.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/catalog make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/catalog make mypy popd diff --git a/ci/github/unit-testing/common-library.bash b/ci/github/unit-testing/common-library.bash new file mode 100755 index 00000000000..71547174103 --- /dev/null +++ b/ci/github/unit-testing/common-library.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/common-library + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/common-library + make tests-ci + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd packages/common-library + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/dask-sidecar.bash b/ci/github/unit-testing/dask-sidecar.bash index 853502fde5e..410c06f4bea 100755 --- a/ci/github/unit-testing/dask-sidecar.bash +++ b/ci/github/unit-testing/dask-sidecar.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/dask-sidecar make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/dask-sidecar make mypy popd diff --git a/ci/github/unit-testing/dask-task-models-library.bash b/ci/github/unit-testing/dask-task-models-library.bash index af3b3aae759..039ffea9959 100755 --- a/ci/github/unit-testing/dask-task-models-library.bash +++ b/ci/github/unit-testing/dask-task-models-library.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/dask-task-models-library make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/dask-task-models-library make mypy popd diff --git a/ci/github/unit-testing/datcore-adapter.bash b/ci/github/unit-testing/datcore-adapter.bash index 0b9bd84f4e5..3b9ddc2f6f8 100755 --- a/ci/github/unit-testing/datcore-adapter.bash +++ b/ci/github/unit-testing/datcore-adapter.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/datcore-adapter make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/datcore-adapter make mypy popd diff --git a/ci/github/unit-testing/director-v2.bash b/ci/github/unit-testing/director-v2.bash index f1b73b1ec1d..cc59003afee 100755 --- a/ci/github/unit-testing/director-v2.bash +++ b/ci/github/unit-testing/director-v2.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/director-v2 make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -23,11 +22,14 @@ test() { pushd services/director-v2 make test-ci-unit pytest-parameters="--numprocesses=auto --ignore-glob=**/with_dbs/**" # these tests cannot be run in parallel - make test-ci-unit test-subfolder=with_dbs + make test-ci-unit test-path=with_dbs popd } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/director-v2 make mypy popd diff --git a/ci/github/unit-testing/director.bash b/ci/github/unit-testing/director.bash index d6aa3f9c720..a29764642ee 100755 --- a/ci/github/unit-testing/director.bash +++ b/ci/github/unit-testing/director.bash @@ -6,45 +6,29 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - # Replaces 'bash ci/helpers/ensure_python_pip.bash' - - echo "INFO:" "$(python --version)" "@" "$(command -v python)" - - # installs pip if not in place - python -m ensurepip - - echo "INFO:" "$(pip --version)" "@" "$(command -v pip)" - # NOTE: pip<22.0 for python 3.6 - pip3 install --upgrade \ - pip~=21.0 \ - wheel \ - setuptools - python3 -m venv .venv + make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/director - pip3 install -r requirements/ci.txt + make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { # shellcheck source=/dev/null source .venv/bin/activate pushd services/director - pytest \ - --color=yes \ - --cov-append \ - --cov-config=.coveragerc \ - --cov-report=term-missing \ - --cov-report=xml \ - --cov=simcore_service_director \ - --durations=10 \ - --keep-docker-up \ - --log-date-format="%Y-%m-%d %H:%M:%S" \ - --log-format="%(asctime)s %(levelname)s %(message)s" \ - --verbose \ - tests/ + make test-ci-unit + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd services/director + make mypy popd } diff --git a/ci/github/unit-testing/dynamic-scheduler.bash b/ci/github/unit-testing/dynamic-scheduler.bash new file mode 100755 index 00000000000..8bc8a51722a --- /dev/null +++ b/ci/github/unit-testing/dynamic-scheduler.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/dynamic-scheduler + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/dynamic-scheduler + make test-ci-unit + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd services/dynamic-scheduler + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/dynamic-sidecar.bash b/ci/github/unit-testing/dynamic-sidecar.bash index 3101500d99b..3816782d5ee 100755 --- a/ci/github/unit-testing/dynamic-sidecar.bash +++ b/ci/github/unit-testing/dynamic-sidecar.bash @@ -6,14 +6,14 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate + sudo ./ci/github/helpers/install_7zip.bash pushd services/dynamic-sidecar make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +25,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/dynamic-sidecar make mypy popd diff --git a/ci/github/unit-testing/frontend.bash b/ci/github/unit-testing/frontend.bash deleted file mode 100755 index cffd77cf64a..00000000000 --- a/ci/github/unit-testing/frontend.bash +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# http://redsymbol.net/articles/unofficial-bash-strict-mode/ -set -o errexit # abort on nonzero exitstatus -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes -IFS=$'\n\t' - -install() { - npm install - make -C services/static-webserver/client clean - npx eslint --version - make -C services/static-webserver/client info -} - -test() { - echo "# Running Linter" - npm run linter - - pushd services/static-webserver/client - - echo "# Building build version" - make compile - - echo "# Building source version" - make compile-dev flags=--machine-readable - - echo "# Serving source version" - make serve-dev flags="--machine-readable --target=source --listen-port=8080" detached=test-server - - #TODO: move this inside qx-kit container - echo "# Waiting for build to complete" - while ! nc -z localhost 8080; do - sleep 1 # wait for 10 second before check again - done - - # FIXME: reports ERROR ReferenceError: URL is not defined. See https://github.com/ITISFoundation/osparc-simcore/issues/1071 - ## node source-output/resource/qxl/testtapper/run.js --diag --verbose http://localhost:8080/testtapper - wget --spider http://localhost:8080/ - - make clean - popd - - #TODO: no idea what is this doing... disabled at the moment since travis is supposed to do it as well - - # # prepare documentation site ... - # git clone --depth 1 https://github.com/ITISFoundation/itisfoundation.github.io.git - # rm -rf itisfoundation.github.io/.git - - # # if we have old cruft hanging around, we should remove all this will - # # only trigger once - # if [ -d itisfoundation.github.io/transpiled ]; then - # rm -rf itisfoundation.github.io/* - # fi - - # # add the default homepage - # cp -rp docs/webdocroot/* itisfoundation.github.io - - # # add our build - # if [ -d services/static-webserver/client/build-output ]; then - # rm -rf itisfoundation.github.io/frontend - # cp -rp services/static-webserver/client/build-output itisfoundation.github.io/frontend - # fi -} - -# Check if the function exists (bash specific) -if declare -f "$1" > /dev/null -then - # call arguments verbatim - "$@" -else - # Show a helpful error - echo "'$1' is not a known function name" >&2 - exit 1 -fi diff --git a/ci/github/unit-testing/invitations.bash b/ci/github/unit-testing/invitations.bash index c0c4be650ed..e05fdecfdaa 100755 --- a/ci/github/unit-testing/invitations.bash +++ b/ci/github/unit-testing/invitations.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/invitations make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/invitations make mypy popd diff --git a/ci/github/unit-testing/models-library.bash b/ci/github/unit-testing/models-library.bash index 17834863eb5..5bf385d1f8b 100755 --- a/ci/github/unit-testing/models-library.bash +++ b/ci/github/unit-testing/models-library.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/models-library make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/models-library make mypy popd diff --git a/ci/github/unit-testing/notifications-library.bash b/ci/github/unit-testing/notifications-library.bash new file mode 100755 index 00000000000..acbeba7d0f6 --- /dev/null +++ b/ci/github/unit-testing/notifications-library.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/notifications-library + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd packages/notifications-library + make tests-ci + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd packages/notifications-library + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/notifications.bash b/ci/github/unit-testing/notifications.bash new file mode 100755 index 00000000000..4f78013c19e --- /dev/null +++ b/ci/github/unit-testing/notifications.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/notifications + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/notifications + make test-ci-unit + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd services/notifications + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/payments.bash b/ci/github/unit-testing/payments.bash new file mode 100755 index 00000000000..fca02db45f4 --- /dev/null +++ b/ci/github/unit-testing/payments.bash @@ -0,0 +1,43 @@ +#!/bin/bash +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +install() { + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/payments + make install-ci + popd + uv pip list +} + +test() { + # shellcheck source=/dev/null + source .venv/bin/activate + pushd services/payments + make test-ci-unit + popd +} + +typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy + pushd services/payments + make mypy + popd +} + +# Check if the function exists (bash specific) +if declare -f "$1" >/dev/null; then + # call arguments verbatim + "$@" +else + # Show a helpful error + echo "'$1' is not a known function name" >&2 + exit 1 +fi diff --git a/ci/github/unit-testing/postgres-database.bash b/ci/github/unit-testing/postgres-database.bash index fd613cf7b66..7ffa21d1ebb 100755 --- a/ci/github/unit-testing/postgres-database.bash +++ b/ci/github/unit-testing/postgres-database.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/postgres-database make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/postgres-database make mypy popd diff --git a/ci/github/unit-testing/python-linting.bash b/ci/github/unit-testing/python-linting.bash index 8cea7414cad..b810efa5896 100755 --- a/ci/github/unit-testing/python-linting.bash +++ b/ci/github/unit-testing/python-linting.bash @@ -6,12 +6,16 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash + make devenv + # shellcheck source=/dev/null + source .venv/bin/activate bash ci/helpers/install_pylint.bash - pip freeze + uv pip freeze } test() { + # shellcheck source=/dev/null + source .venv/bin/activate make pylint } diff --git a/ci/github/unit-testing/service-integration.bash b/ci/github/unit-testing/service-integration.bash index c7492b07b94..d0037314899 100755 --- a/ci/github/unit-testing/service-integration.bash +++ b/ci/github/unit-testing/service-integration.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/service-integration make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/service-integration make mypy popd diff --git a/ci/github/unit-testing/service-library.bash b/ci/github/unit-testing/service-library.bash index c0f22069453..e7845743303 100755 --- a/ci/github/unit-testing/service-library.bash +++ b/ci/github/unit-testing/service-library.bash @@ -6,16 +6,15 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' # NOTE: notice that the CI uses [all] -# TODO: add STEPS where pip-sync individual extras and test separately install_all() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate + sudo ./ci/github/helpers/install_7zip.bash pushd packages/service-library make "install-ci[all]" popd - .venv/bin/pip list --verbose + uv pip list } test_all() { @@ -27,6 +26,9 @@ test_all() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/service-library make mypy popd diff --git a/ci/github/unit-testing/settings-library.bash b/ci/github/unit-testing/settings-library.bash index 2cc53a33e05..d36cc3b9225 100755 --- a/ci/github/unit-testing/settings-library.bash +++ b/ci/github/unit-testing/settings-library.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/settings-library make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -25,6 +24,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/settings-library make mypy popd diff --git a/ci/github/unit-testing/simcore-sdk.bash b/ci/github/unit-testing/simcore-sdk.bash index bbda509702a..b3cc1152d42 100755 --- a/ci/github/unit-testing/simcore-sdk.bash +++ b/ci/github/unit-testing/simcore-sdk.bash @@ -9,14 +9,13 @@ DOCKER_IMAGE_TAG=$(exec ci/helpers/build_docker_image_tag.bash) export DOCKER_IMAGE_TAG install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd packages/simcore-sdk make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { @@ -28,6 +27,9 @@ test() { } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd packages/simcore-sdk make mypy popd diff --git a/ci/github/unit-testing/storage.bash b/ci/github/unit-testing/storage.bash index 1f9f258b3e6..3295e4d6895 100755 --- a/ci/github/unit-testing/storage.bash +++ b/ci/github/unit-testing/storage.bash @@ -6,25 +6,27 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/storage make install-ci popd - .venv/bin/pip list --verbose + uv pip list } test() { # shellcheck source=/dev/null source .venv/bin/activate pushd services/storage - make test-ci-unit + make test-ci-unit pytest-parameters="--disk-usage" popd } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/storage make mypy popd diff --git a/ci/github/unit-testing/webserver.bash b/ci/github/unit-testing/webserver.bash index 4cf936a9304..55aa9ad19b5 100755 --- a/ci/github/unit-testing/webserver.bash +++ b/ci/github/unit-testing/webserver.bash @@ -6,14 +6,13 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' install() { - bash ci/helpers/ensure_python_pip.bash make devenv # shellcheck source=/dev/null source .venv/bin/activate pushd services/web/server make install-ci popd - .venv/bin/pip list --verbose + uv pip list } # isolated = these tests are (IMO) real unit tests, they do not need any dependencies and were already in the root test/unit folder before @@ -27,7 +26,7 @@ test_isolated() { # shellcheck source=/dev/null source .venv/bin/activate pushd services/web/server - make test-ci-unit test-subfolder=isolated pytest-parameters="--numprocesses=auto" + make test-ci-unit test-path=isolated pytest-parameters="--numprocesses=auto" popd } @@ -36,11 +35,14 @@ test_with_db() { source .venv/bin/activate pushd services/web/server echo "testing in services/web/server/tests/unit/with_dbs/$1" - make test-ci-unit test-subfolder="with_dbs/$1" + make test-ci-unit test-path="with_dbs/$1" popd } typecheck() { + # shellcheck source=/dev/null + source .venv/bin/activate + uv pip install mypy pushd services/web/server make mypy popd diff --git a/ci/helpers/ensure_python_pip.bash b/ci/helpers/ensure_python_pip.bash index 77b6bb88f78..e18b9839347 100755 --- a/ci/helpers/ensure_python_pip.bash +++ b/ci/helpers/ensure_python_pip.bash @@ -11,7 +11,7 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' # Pin pip version to a compatible release https://www.python.org/dev/peps/pep-0440/#compatible-release -PIP_VERSION=23.0 +PIP_VERSION=24.0 echo "INFO:" "$(python --version)" "@" "$(command -v python)" @@ -20,7 +20,7 @@ python -m ensurepip echo "INFO:" "$(pip --version)" "@" "$(command -v pip)" -pip3 install --upgrade \ +pip install --upgrade \ pip~=$PIP_VERSION \ wheel \ setuptools diff --git a/ci/helpers/install_pylint.bash b/ci/helpers/install_pylint.bash index d2ab10cc81b..3a32516aa82 100755 --- a/ci/helpers/install_pylint.bash +++ b/ci/helpers/install_pylint.bash @@ -13,9 +13,10 @@ CURDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" REQUIREMENTS=packages/service-library/requirements/_tools.txt PYLINT_VERSION="$(grep pylint== $REQUIREMENTS | awk '{print $1}')" -pip3 install "$PYLINT_VERSION" +uv pip install "$PYLINT_VERSION" # Minimal packages to pass linter -pip install -r "$CURDIR/requirements.txt" +echo "$CURDIR/requirements/requirements.txt" +uv pip install -r "$CURDIR/requirements/requirements.txt" echo "INFO:" "$(pylint --version)" "@" "$(command -v pylint)" diff --git a/ci/helpers/requirements.txt b/ci/helpers/requirements.txt deleted file mode 100644 index dc7ae81f90a..00000000000 --- a/ci/helpers/requirements.txt +++ /dev/null @@ -1,65 +0,0 @@ -# -# This file is autogenerated by pip-compile with python 3.9 -# To update, run: -# -# pip-compile --output-file=requirements.txt requirements.in -# -aiohttp==3.8.3 - # via -r requirements.in -aiosignal==1.2.0 - # via aiohttp -anyio==3.6.2 - # via starlette -async-timeout==4.0.2 - # via aiohttp -attrs==22.1.0 - # via aiohttp -certifi==2022.9.24 - # via requests -charset-normalizer==2.1.1 - # via - # aiohttp - # requests -docker==6.0.0 - # via -r requirements.in -fastapi==0.85.1 - # via -r requirements.in -frozenlist==1.3.1 - # via - # aiohttp - # aiosignal -idna==3.4 - # via - # anyio - # requests - # yarl -multidict==6.0.2 - # via - # aiohttp - # yarl -packaging==21.3 - # via docker -pydantic==1.10.2 - # via fastapi -pyjwt==2.6.0 - # via -r requirements.in -pyparsing==3.0.9 - # via packaging -requests==2.28.1 - # via docker -sniffio==1.3.0 - # via anyio -starlette==0.20.4 - # via fastapi -typing-extensions==4.4.0 - # via - # pydantic - # starlette -urllib3==1.26.12 - # via - # docker - # requests -websocket-client==1.4.1 - # via docker -yarl==1.8.1 - # via aiohttp diff --git a/ci/helpers/requirements/Makefile b/ci/helpers/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/ci/helpers/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/ci/helpers/requirements.in b/ci/helpers/requirements/requirements.in similarity index 76% rename from ci/helpers/requirements.in rename to ci/helpers/requirements/requirements.in index 52d348e5e29..5da5f6589c5 100644 --- a/ci/helpers/requirements.in +++ b/ci/helpers/requirements/requirements.in @@ -2,7 +2,8 @@ # # Installing these void e.g. E0611: No name 'UploadFile' in module 'fastapi' (no-name-in-module) # +--constraint ../../../requirements/constraints.txt + aiohttp -docker fastapi -pyjwt +docker diff --git a/ci/helpers/requirements/requirements.txt b/ci/helpers/requirements/requirements.txt new file mode 100644 index 00000000000..872f14cc7e1 --- /dev/null +++ b/ci/helpers/requirements/requirements.txt @@ -0,0 +1,67 @@ +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/requirements.in +aiosignal==1.3.1 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.3.0 + # via starlette +attrs==23.2.0 + # via aiohttp +certifi==2024.12.14 + # via + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +docker==7.1.0 + # via -r requirements/requirements.in +fastapi==0.115.12 + # via -r requirements/requirements.in +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +idna==3.7 + # via + # anyio + # requests + # yarl +multidict==6.0.5 + # via + # aiohttp + # yarl +propcache==0.3.1 + # via + # aiohttp + # yarl +pydantic==2.10.5 + # via + # -c requirements/../../../requirements/constraints.txt + # fastapi +pydantic-core==2.27.2 + # via pydantic +requests==2.32.3 + # via docker +sniffio==1.3.1 + # via anyio +starlette==0.46.2 + # via + # -c requirements/../../../requirements/constraints.txt + # fastapi +typing-extensions==4.12.2 + # via + # fastapi + # pydantic + # pydantic-core +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # docker + # requests +yarl==1.20.0 + # via aiohttp diff --git a/ci/helpers/show_system_versions.bash b/ci/helpers/show_system_versions.bash index c0eabe0bdfa..f8e076066fc 100755 --- a/ci/helpers/show_system_versions.bash +++ b/ci/helpers/show_system_versions.bash @@ -6,7 +6,7 @@ set -o pipefail # don't hide errors within pipes IFS=$'\n\t' echo "------------------------------ environs -----------------------------------" -env +env | sort echo "------------------------------ uname -----------------------------------" uname -a @@ -14,25 +14,44 @@ lsb_release -a echo "------------------------------ python -----------------------------------" if command -v python; then - python --version + python --version fi echo "------------------------------ python3 -----------------------------------" if command -v python3; then - python3 --version + python3 --version +fi + +echo "------------------------------ pip -----------------------------------" +if command -v pip; then + pip --version + echo "cache location:" + pip cache dir +fi + +echo "------------------------------ uv -----------------------------------" +if command -v uv; then + uv --version + echo "cache location:" + uv cache dir fi echo "------------------------------ docker -----------------------------------" if command -v docker; then - docker version + docker version fi echo "------------------------------ docker buildx-----------------------------------" if command -v docker; then - docker buildx version + docker buildx version fi echo "------------------------------ docker-compose -----------------------------------" if command -v docker-compose; then - docker-compose version + docker-compose version +fi + +echo "------------------------------ docker compose -----------------------------------" +if command -v docker; then + docker compose version fi diff --git a/docs/coding-conventions.md b/docs/coding-conventions.md index 493a37840a6..f3f23fedd71 100644 --- a/docs/coding-conventions.md +++ b/docs/coding-conventions.md @@ -1,13 +1,72 @@ -# Coding Conventions and Linters +# Coding Conventions, Linters and Definitions -Coding styles and linters are provided for the Javascript and Python. +Some conventions on coding style and tools for the Javascript and Python in this repository. -## Javascript +---- -In general the `qooxdoo` naming convention/style is followed. The [Access](http://qooxdoo.org/docs/#/core/oo_feature_summary?id=access) paragraph is the most notable. It is recommended to read the entire document. +## Definitions -Have a look at `ESLint`'s configuration files [.eslintrc.json](.eslintrc.json) and [.eslintignore](.eslintignore). +What is a ... + +- **Controller-Service-Repository** design-pattern ? + - An introduction: https://tom-collings.medium.com/controller-service-repository-16e29a4684e5 + - Example of adopted convention: https://github.com/ITISFoundation/osparc-simcore/pull/4389 + + + +---- +## General Coding Conventions + + + +### CC1: Can I use ``TODO:``, ``FIXME:``? + +We should avoid merging PRs with ``TODO:`` and ``FIXME:`` into master. One of our bots detects those and flag them as code-smells. If we still want to keep this idea/fix noted in the code, those can be rewritten as ``NOTE:`` and should be extended with a link to a github issue with more details. For a context, see [discussion here](https://github.com/ITISFoundation/osparc-simcore/pull/3380#discussion_r979893502). + + +### CC2: No commented code + +Avoid commented code, but if you *really* want to keep it then add an explanatory `NOTE:` +```python +import os +# import bar +# x = "not very useful" + +# NOTE: I need to keep this becase ... +# import foo +# x = "ok" +``` + +### CC3: Naming fixtures that return `Callable`s + +- If the callable creates an instance, you can use `_factory` as suffix instead (e.g. the [`tmp_path_factory` fixture](https://docs.pytest.org/en/7.1.x/how-to/tmp_path.html#the-tmp-path-factory-fixture)) +- Use verb as prefix, as seen in the example below + + +```python + +@pytest.fixture +def something() -> Something: + return Something("nice") + +@pytest.fixture +def create_something() -> Callable[..., Something]: + + def _create(*args, **kwargs) -> Something: + + # ... + return _create +def test_it(something: Something, create_something: Callable[..., Something]): + + new_something = create_something(color="blue") + assert new_something != something + +``` + +### CC4: ... + +---- ## Python In short we use the following naming convention ( roughly [PEP8](https://peps.python.org/pep-0008/) ): @@ -24,35 +83,60 @@ In short we use the following naming convention ( roughly [PEP8](https://peps.p - We encourage marking protected/private entities. We do it adding the prefix `_`/`__`: e.g. `_PROTECTED_CONSTANT`, `A.__private_func` - We encourage **meaningful** type annotations +- We encourage [pep257] for **simple** code documentation + - Priorize having good variable names and type annotations than a verbose and redundant documentation + - Examples of useful documentation: + - Raised *Exceptions* in a function + - *Rationale* of a design + - *Extra information* on variable/argument that cannot be deduced from its name or type annotation + - Use vscode tool `njpwerner.autodocstring` + - See [example](https://github.com/NilsJPWerner/autoDocstring/blob/HEAD/docs/pep257.md) of [pep257] doc. + +### For the rest ... (tools) -For the rest basically: - [black] will enforce the style: Just use it. - [pylint] will check the some extra conventions: see [.pylintrc](../.pylintrc). -- [mypy] will check syntax : see [mypy.ini](../mypy.ini) + - ``make pylint`` recipe available on ``packages`` or ``services`` +- [mypy] is a type-checker that will check syntax : see [mypy.ini](../mypy.ini) + - See intro in [mypy-doc] + - ``make mypy`` recipe available on ``packages`` or ``services`` -[mypy]:https://www.mypy-lang.org/ -[black]:https://black.readthedocs.io/en/stable/index.html -[pylint]:https://pylint.readthedocs.io/en/latest/ +---- ## Postgres -### Foreign keys - -- Name pattern: ```fk_$(this_table)_$(this_column)```, for example ```fk_projects_to_product_product_name``` +- **Foreign Keys** follow this name pattern: ```fk_$(this_table)_$(this_column)```, for example ```fk_projects_to_product_product_name``` +---- ## Shell Scripts - Recommended style: https://google.github.io/styleguide/shellguide.html - Automatic analysis tool: [shellcheck](https://www.shellcheck.net) - see ``scripts/shellcheck.bash`` and ``.vscode/settings.template.json`` +- Recommended inside of a ``scripts`` folder -## General - -### CC1: Can I use ``TODO:``, ``FIXME:``? +---- +## Javascript -We should avoid merging PRs with ``TODO:`` and ``FIXME:`` into master. One of our bots detects those and flag them as code-smells. If we still want to keep this idea/fix noted in the code, those can be rewritten as ``NOTE:`` and should be extended with a link to a github issue with more details. For a context, see [discussion here](https://github.com/ITISFoundation/osparc-simcore/pull/3380#discussion_r979893502). +In general the `qooxdoo` naming convention/style is followed. The [Access](http://qooxdoo.org/docs/#/core/oo_feature_summary?id=access) paragraph is the most notable. It is recommended to read the entire document. + +Have a look at `ESLint`'s configuration files [.eslintrc.json](.eslintrc.json) and [.eslintignore](.eslintignore). + + + + + +[black]:https://black.readthedocs.io/en/stable/index.html +[mypy-doc]:https://mypy.readthedocs.io/en/latest/ +[mypy]:https://www.mypy-lang.org/ +[pep257]:https://peps.python.org/pep-0257/ +[pylint]:https://pylint.readthedocs.io/en/latest/ + +# My first osparc-simcore PR: common pitfalls + +- Make sure to run `make mypy` and `make pylint`, as the associated github-actions are required to pass. If you include new dependencies in `requirements/*.in`, make sure to run `make touch && make reqs "upgrade=NAME_OF_YOUR_NEW_DEPENDENCY"`. It is best to do this inside a reproducible environment, for this purpose a shell inside a docker container can be used: Go to `osparc-simcore/requirements/tools` and run `make shell`. Inside the new shell the osparc-simcore repo is placed in `~`. Run `make reqs` from inside this shell. diff --git a/docs/controller-service-repository.drawio.svg b/docs/controller-service-repository.drawio.svg new file mode 100644 index 00000000000..6a0e6ae91d2 --- /dev/null +++ b/docs/controller-service-repository.drawio.svg @@ -0,0 +1,479 @@ + + + + + + + + + + + + + + + + +
+
+
+ web +
+ APP +
+
+
+
+ + web... + +
+
+
+ + + + + + + +
+
+
+ CONTROLLER +
+
+
+
+ + CONTROLLER + +
+
+
+ + + + + + + +
+
+
+ SERVICE +
+
+
+
+ + SERVICE + +
+
+
+ + + + + + + + +
+
+
+ Persistence +
+
+
+
+ + Persistence + +
+
+
+ + + + + + + +
+
+
+ REPOSITORY +
+
+
+
+ + REPOSITORY + +
+
+
+ + + + + + + + + + + +
+
+
+ Dependencies go inwards +
+
+
+
+ + Dependencies go inwards + +
+
+
+ + + + + + + + + + + + + + +
+
+
+ CONTROLLER +
+
+
+
+ + CONTROLLER + +
+
+
+ + + + + + + + + + + + + + + + + + +
+
+
+ OTHER +
+ web APPS +
+
+
+
+ + OTHER... + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ Domain A +
+
+
+
+ + Domain A + +
+
+
+ + + + + + + +
+
+
+ + rest + + api +
+ (schema-model, http-errors) +
+
+
+
+
+ + rest api... + +
+
+
+ + + + + + + +
+
+
+ + service api +
+ + io: + + + domain-model, domain-errors + +
+
+
+
+
+ + service api... + +
+
+
+ + + + + + + +
+
+
+ + repository api + +
+ + io: + + + domain-model, domain-errors + +
+
+
+
+
+ + repository api... + +
+
+
+ + + + + + + +
+
+
+ Domain B +
+
+
+
+ + Domain B + +
+
+
+ + + + + + + + + + + +
+
+
+ + service api +
+ + io: + + + domain-model, domain-errors + +
+
+
+
+
+ + service api... + +
+
+
+ + + + + + + +
+
+
+ repository api +
+ + io: + + + domain-model, domain-errors + +
+
+
+
+
+ + repository api... + +
+
+
+ + + + + + + +
+
+
+ + rpc + + api +
+ + io: schema + + + -model, rpc-exceptions + +
+
+
+
+
+ + rpc api... + +
+
+
+ + + + + + + +
+
+
+ + rest + + api +
+ + io: schema + + + -model, http-errors + +
+
+
+
+
+ + rest api... + +
+
+
+
+ + + + + Text is not SVG - cannot display + + + +
diff --git a/docs/devops-checklist.md b/docs/devops-checklist.md new file mode 100644 index 00000000000..e9752dc69cd --- /dev/null +++ b/docs/devops-checklist.md @@ -0,0 +1,15 @@ +# Devops checklist + +- No ENV changes or I properly updated ENV ([read the instruction](https://git.speag.com/oSparc/osparc-ops-deployment-configuration/-/blob/configs/README.md?ref_type=heads#how-to-update-env-variables)) + +- Some checks that might help your code run stable on production, and help devops assess criticality. + - How can DevOps check the health of the service ? + - How can DevOps safely and gracefully restart the service ? + - How and why would this code fail ? + - What kind of metrics are you exposing ? + - Is there any documentation/design specification for the service ? + - How (e.g. through which loglines) can DevOps detect unexpected situations that require escalation to human ? + - What are the resource limitations (CPU, RAM) expected for this service ? + - Are all relevant variables documented and adjustable via environment variables (i.e. no hardcoded magic numbers) ? + +Ref: Modified from https://oschvr.com/posts/what-id-like-as-sre/ diff --git a/docs/dyn-services.md b/docs/dyn-services.md new file mode 100644 index 00000000000..fd0dd8320b7 --- /dev/null +++ b/docs/dyn-services.md @@ -0,0 +1,22 @@ +# Dynamic services + +## Definitions + + +### legacy dynamic service: + is managed by the director-v0 + can be 1 or more docker services that can run anywhere in the cluster +### modern dynamic service: + the service is managed via the dynamic-sidecar by the director-v2 + is composed of at least a dynamic-sidecar that act as a pod controller + is composed of at least a reverse-proxy that act as the service web entrypoint + can be 1 or more docker containers that run on the same node as the dynamic-sidecar + +## How to determine if a service is legacy or not + +*Taken from @sanderegg via https://github.com/ITISFoundation/osparc-simcore/issues/3964#issuecomment-1486300837* + +1. list all the services +2. get all the ones containing a docker image label `simcore.service.paths-mapping`, these are modern. Remove them from the list. +3. In the modern services, check for the docker image label `simcore.service.compose-spec`. If it is available, look for the services listed in this docker image label and remove them from original list as well +4. what remains are the legacy services diff --git a/docs/env-vars.md b/docs/env-vars.md new file mode 100644 index 00000000000..24eb75c0c6a --- /dev/null +++ b/docs/env-vars.md @@ -0,0 +1,9 @@ +# Environment variables management + +As a developer you will need to extend the current env vars for a service. + +The following rules must be followed: + +1. for each service that requires it, add it to the `services/docker-compose.yml` file (such as `MY_VAR=${MY_VAR}`) +2. add a meaningful default value for development inside `.env-devel` so that developers can work, and that `osparc-simcore` is **self contained**. +3. inside the repo where devops keep all the secrets follow the instructions to add the new env var diff --git a/docs/init-prompt-ack_flow.drawio.png b/docs/init-prompt-ack_flow.drawio.png new file mode 100644 index 00000000000..a0174691e3e Binary files /dev/null and b/docs/init-prompt-ack_flow.drawio.png differ diff --git a/docs/llm-prompts/pydantic-annotated-fields.md b/docs/llm-prompts/pydantic-annotated-fields.md new file mode 100644 index 00000000000..9b128e7bd72 --- /dev/null +++ b/docs/llm-prompts/pydantic-annotated-fields.md @@ -0,0 +1,84 @@ +# Prompt + +``` +Please convert all pydantic model fields that use `Field()` with default values to use the Annotated pattern instead. +Follow these guidelines: + +1. Move default values outside of `Field()` like this: `field_name: Annotated[field_type, Field(description="")] = default_value`. +2. Keep all other parameters like validation_alias and descriptions inside `Field()`. +3. For fields using default_factory, keep that parameter as is in the `Field()` constructor, but set the default value outside to DEFAULT_FACTORY from common_library.basic_types. Example: `field_name: Annotated[dict_type, Field(default_factory=dict)] = DEFAULT_FACTORY`. +4. Add the import: `from common_library.basic_types import DEFAULT_FACTORY` if it's not already present. +5. If `Field()` has no parameters (empty), don't use Annotated at all. Just use: `field_name: field_type = default_value`. +6. Leave any model validations, `model_config` settings, and `field_validators` untouched. +``` +## Examples + +### Before: + +```python +from pydantic import BaseModel, Field + +class UserModel(BaseModel): + name: str = Field(default="Anonymous", description="User's display name") + age: int = Field(default=18, ge=0, lt=120) + tags: list[str] = Field(default_factory=list, description="User tags") + metadata: dict[str, str] = Field(default_factory=dict) + is_active: bool = Field(default=True) +``` + +- **After** + +```python +from typing import Annotated +from pydantic import BaseModel, Field +from common_library.basic_types import DEFAULT_FACTORY + +class UserModel(BaseModel): + name: Annotated[str, Field(description="User's display name")] = "Anonymous" + age: Annotated[int, Field(ge=0, lt=120)] = 18 + tags: Annotated[list[str], Field(default_factory=list, description="User tags")] = DEFAULT_FACTORY + metadata: Annotated[dict[str, str], Field(default_factory=dict)] = DEFAULT_FACTORY + is_active: bool = True +``` + +## Another Example with Complex Fields + +### Before: + +```python +from pydantic import BaseModel, Field, field_validator +from datetime import datetime + +class ProjectModel(BaseModel): + id: str = Field(default_factory=uuid.uuid4, description="Unique project identifier") + name: str = Field(default="Untitled Project", min_length=3, max_length=50) + created_at: datetime = Field(default_factory=datetime.now) + config: dict = Field(default={"version": "1.0", "theme": "default"}) + + @field_validator("name") + def validate_name(cls, v): + if v.isdigit(): + raise ValueError("Name cannot be only digits") + return v +``` + +### After: + +```python +from typing import Annotated +from pydantic import BaseModel, Field, field_validator +from datetime import datetime +from common_library.basic_types import DEFAULT_FACTORY + +class ProjectModel(BaseModel): + id: Annotated[str, Field(default_factory=uuid.uuid4, description="Unique project identifier")] = DEFAULT_FACTORY + name: Annotated[str, Field(min_length=3, max_length=50)] = "Untitled Project" + created_at: Annotated[datetime, Field(default_factory=datetime.now)] = DEFAULT_FACTORY + config: dict = {"version": "1.0", "theme": "default"} + + @field_validator("name") + def validate_name(cls, v): + if v.isdigit(): + raise ValueError("Name cannot be only digits") + return v +``` diff --git a/docs/messages-guidelines.md b/docs/messages-guidelines.md new file mode 100644 index 00000000000..cc07d2c2d1f --- /dev/null +++ b/docs/messages-guidelines.md @@ -0,0 +1,134 @@ +# Error and Warning Message Guidelines + +These guidelines ensure that messages are user-friendly, clear, and helpful while maintaining a professional tone. πŸš€ + +Some details: + +- Originated from [guidelines](https://wiki.speag.com/projects/SuperMash/wiki/Concepts/GUI) by @eofli and refined iterating with AI +- Here’s the fully expanded and rewritten list of **error and warning message guidelines**, each with: + - A **guideline** + - A **rationale** + - A ❌ **bad example** + - A βœ… **good example** + - A **reference** +- This list is intended to be short enough to be read and understood for humans as well as complete so that it can be used as context for automatic correction of error/warning messages + +--- + +## 1. Be Clear and Concise + +- **Guideline:** Use straightforward language to describe the issue without unnecessary words. +- **Rationale:** Users can quickly understand the problem and take corrective action when messages are simple and to the point. +- ❌ **Bad Example:** + `"An error has occurred due to an unexpected input that couldn't be parsed correctly."` +- βœ… **Good Example:** + `"We couldn't process your request. Please check your input and try again."` +- **[Reference](https://uxwritinghub.com/error-message-examples/)** + +--- + +## 2. Provide Specific and Actionable Information + +- **Guideline:** Clearly state what went wrong and how the user can fix it. +- **Rationale:** Specific guidance helps users resolve issues efficiently, reducing frustration. +- ❌ **Bad Example:** + `"Something went wrong."` +- βœ… **Good Example:** + `"Your session has expired. Please log in again to continue."` +- **[Reference](https://www.nngroup.com/articles/error-message-guidelines/)** + +--- + +## 3. Avoid Technical Jargon + +- **Guideline:** Use plain language instead of technical terms or codes. +- **Rationale:** Non-technical users may not understand complex terminology, hindering their ability to resolve the issue. +- ❌ **Bad Example:** + `"Error 429: Too many requests per second."` +- βœ… **Good Example:** + `"You’ve made too many requests. Please wait a moment and try again."` +- **[Reference](https://cxl.com/blog/error-messages/)** + +--- + +## 4. Use a Polite and Non-Blaming Tone + +- **Guideline:** Frame messages in a way that doesn't place blame on the user. +- **Rationale:** A respectful tone maintains a positive user experience and encourages users to continue using the application. +- ❌ **Bad Example:** + `"You entered the wrong password."` +- βœ… **Good Example:** + `"The password doesn't match. Please try again."` +- **[Reference](https://atlassian.design/content/writing-guidelines/writing-error-messages/)** + +--- + +## 5. Avoid Negative Words and Phrases + +- **Guideline:** Steer clear of words like "error," "failed," "invalid," or "illegal." +- **Rationale:** Positive language reduces user anxiety and creates a more supportive experience. +- ❌ **Bad Example:** + `"Invalid email address."` +- βœ… **Good Example:** + `"The email address format doesn't look correct. Please check and try again."` +- **[Reference](https://atlassian.design/content/writing-guidelines/writing-error-messages/)** + +--- + +## 6. Place Messages Appropriately + +- **Guideline:** Display error messages near the relevant input field or in a clear, noticeable location. +- **Rationale:** Proper placement ensures users notice the message and understand where the issue occurred. +- ❌ **Bad Example:** + Showing a generic "Form submission failed" message at the top of the page. +- βœ… **Good Example:** + Placing "Please enter a valid phone number" directly below the phone input field. +- **[Reference](https://www.smashingmagazine.com/2022/08/error-messages-ux-design/)** + +--- + +## 7. Use Inline Validation When Possible + +- **Guideline:** Provide real-time feedback as users interact with input fields. +- **Rationale:** Inline validation allows users to correct errors immediately, enhancing the flow and efficiency of the interaction. +- ❌ **Bad Example:** + Waiting until form submission to show all validation errors. +- βœ… **Good Example:** + Displaying "Password must be at least 8 characters" while the user types. +- **[Reference](https://cxl.com/blog/error-messages/)** + +--- + +## 8. Avoid Using All-Caps and Excessive Punctuation + +- **Guideline:** Refrain from writing messages in all capital letters or using multiple exclamation marks. +- **Rationale:** All-caps and excessive punctuation can be perceived as shouting, which may frustrate users. +- ❌ **Bad Example:** + `"INVALID INPUT!!!"` +- βœ… **Good Example:** + `"This input doesn't look correct. Please check and try again."` +- **[Reference](https://uxwritinghub.com/error-message-examples/)** + +--- + +## 9. Use Humor Sparingly + +- **Guideline:** Incorporate light-hearted language only when appropriate and aligned with the application's tone. +- **Rationale:** While humor can ease tension, it may not be suitable for all users or situations and can sometimes be misinterpreted. +- ❌ **Bad Example:** + `"Oopsie daisy! You broke something!"` +- βœ… **Good Example:** + `"Something went wrong. Try again, or contact support if the issue continues."` +- **[Reference](https://cxl.com/blog/error-messages/)** + +--- + +## 10. Offer Alternative Solutions or Support + +- **Guideline:** If the user cannot resolve the issue independently, provide a way to contact support or access help resources. +- **Rationale:** Offering support options ensures users don't feel stranded and can seek help to resolve their issues. +- ❌ **Bad Example:** + `"Access denied."` +- βœ… **Good Example:** + `"You don't have permission to view this page. Contact support if you think this is a mistake."` +- **[Reference](https://learn.microsoft.com/en-us/dynamics365/business-central/dev-itpro/developer/devenv-error-handling-guidelines/)** diff --git a/docs/releasing-workflow-instructions.md b/docs/releasing-workflow-instructions.md index 4f2043ffb53..0b36ad5276e 100644 --- a/docs/releasing-workflow-instructions.md +++ b/docs/releasing-workflow-instructions.md @@ -144,6 +144,9 @@ A bug was found in version 1.2.0 of the simcore stack. The team decides to fix i # develop the fix here, git commit, git push, have someone review your code git commit -m "this is my awsome fix for this problematic issue" + + + # WARNING: never push this branch BEFORE the actual release is completed. pcrespov did it and the release CI job could not push images!! git push --set-upstream origin/hotfix_v1_4_x # - NO NEED to pull request diff --git a/docs/remote-work-aws-ssm.md b/docs/remote-work-aws-ssm.md new file mode 100644 index 00000000000..093ed4e2405 --- /dev/null +++ b/docs/remote-work-aws-ssm.md @@ -0,0 +1,18 @@ +# How to use VSCode on a remote private EC2 +[reference](https://medium.com/@dbpprt/transparently-develop-on-an-ec2-instance-with-vscode-remote-ssh-through-ssm-6e5c5e599ee1) + +## to use from the terminal + +```bash +host i-* mi-* +User ec2-user +ProxyCommand sh -c "aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters 'portNumber=%p'" +``` + +## to use from VSCode + +```bash +host i-*.*.* +User ec2-user +ProxyCommand bash -c "aws ssm start-session --target $(echo %h|cut -d'.' -f1) --profile $(echo %h|/usr/bin/cut -d'.' -f2) --region $(echo %h|/usr/bin/cut -d'.' -f3) --document-name AWS-StartSSHSession --parameters 'portNumber=%p'" +``` diff --git a/docs/steps-to-upgrade-python.md b/docs/steps-to-upgrade-python.md new file mode 100644 index 00000000000..06efeb3a925 --- /dev/null +++ b/docs/steps-to-upgrade-python.md @@ -0,0 +1,23 @@ +# Setps to upgrade python + +This is a guideline for repo-wide upgrade of python. Here we assume we are moving from an *current* version ``py3.X`` to a newer version ``py3.Y`` + + +- [ ] Open an issue and paste the following steps (exemple https://github.com/ITISFoundation/osparc-issues/issues/877) +- [ ] Upgrade tests & tools requirements (in `py3.X`) +- [ ] Upgrade primary libraries (e.g. fastapi, etc) (in `py3.X`) +- [ ] Upgrade ``pip`` (in `py3.X`) +- [ ] Check compatibility and bugs sections in [requirements/constraints.txt](../requirements/constraints.txt) +- [ ] Prune unused libraries repo-wide see [how-to-prune-requirements.md](../requirements/how-to-prune-requirements.md) (in `py3.X`) +- [ ] Unify versions repo wide when possible. See [how-to-unify-versions.md](../requirements/how-to-unify-versions.md) +- [ ] Upgrade to `py3.Y` + - read [requirements/how-to-upgrade-python.md](../requirements/how-to-upgrade-python.md) + - read release notes to check for warnings/recommendations for the upgrade +- [ ] Run repo-wide pip-tools with new python version (all ``requirements.txt`` should at least change doc headers) +- [ ] Check deprecation warnings both in code and libraries +- [ ] Remove backport libraries. See [/requirements/packages-notes.md](../requirements/packages-notes.md) + - https://github.com/ITISFoundation/osparc-simcore/pull/4047 +- [ ] Remove ``pylint`` `py3.X` github action and add new step to pylint against next version of `py3.Y` (if any) +- [ ] Update ``pyupgrade`` config in ``pre-commit-config.yaml``: e.g. ``--py3Y-plus`` +- [ ] Is there something we can automate better? Do it now or open an issue +- [ ] Is there something we can document better? Do it! diff --git a/docs/type-checker.md b/docs/type-checker.md deleted file mode 100644 index f02c6019d8a..00000000000 --- a/docs/type-checker.md +++ /dev/null @@ -1,13 +0,0 @@ -# Type checker - - -- We use [mypy] (intro in [mypy-doc]) -- Single repo-wide ``mypy.ini`` configuration at the osparc base folder -- ``make mypy`` recipe is exposed at every ``packages`` or ``services`` module. It runs [mypy] on the ``src`` folder -- - - - - -[mypy-doc]:https://mypy.readthedocs.io/en/latest/ -[mypy]:http://mypy-lang.org/ diff --git a/eslint.config.js b/eslint.config.js new file mode 100644 index 00000000000..6628342cd8b --- /dev/null +++ b/eslint.config.js @@ -0,0 +1,6 @@ +module.exports = { + ignores: [ + "services/static-webserver/client/source/resource/", + "services/static-webserver/client/source-output/", + ] +}; diff --git a/mypy.ini b/mypy.ini index bbd2479ab01..6d2dbbae389 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,32 +1,28 @@ -# Global options: +# Global options [mypy] -python_version = 3.9 - +check_untyped_defs = True +disallow_any_generics = False +# disallow_untyped_defs: if True, it enforces things like `def __init__(self) -> CLASSNAME` or `def test_() -> None` which does not worth the effort +disallow_untyped_defs = False +follow_imports = silent +# ignore_missing_imports: setting this to True ignores issues from imported libraries, so do not set it!! +ignore_missing_imports = False +namespace_packages = True +no_implicit_reexport = True +# NOTE: this crashes mypy when declared here, therefore it is declared in the setup.cfg files +# plugins = pydantic.mypy +python_version = 3.10 +show_column_numbers = True +show_error_context = False +strict_optional = True +; no_implicit_optional = True +warn_redundant_casts = True warn_return_any = True warn_unused_configs = True -warn_redundant_casts = True warn_unused_ignores = True -namespace_packages = True - -show_error_context = False -show_column_numbers = True - -strict_optional = False -follow_imports = silent - -disallow_any_generics = False -check_untyped_defs = True -no_implicit_reexport = True - -# If True, it enforces things like `def __init__(self) -> CLASSNAME` or `def test_() -> None` which does not worth the effort -disallow_untyped_defs = False - -# removes all the missing imports stuff from external libraries which is annoying to the least -ignore_missing_imports = True - -plugins = pydantic.mypy - +# SEE https://docs.pydantic.dev/mypy_plugin/#plugin-settings +# SEE https://docs.pydantic.dev/1.10/mypy_plugin/#plugin-settings [pydantic-mypy] init_forbid_extra = True init_typed = True @@ -34,5 +30,5 @@ warn_required_dynamic_aliases = True warn_untyped_fields = True # Per-module options (one is kept as example): -[mypy-aio-pika.*] -ignore_missing_imports = True +; [mypy-aio-pika.*] +; ignore_missing_imports = True diff --git a/package-lock.json b/package-lock.json index d8377077621..2bd1885319f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,691 +1,961 @@ { + "name": "osparc-simcore", + "lockfileVersion": 3, "requires": true, - "lockfileVersion": 1, - "dependencies": { - "@babel/code-frame": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", - "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", + "packages": { + "": { + "devDependencies": { + "@pact-foundation/pact-cli": "^16.0.4", + "babel-eslint": "^10.1.0", + "eslint": "^6.8.0", + "eslint-config-qx": "^0.0.1", + "eslint-plugin-qx-rules": "^0.1.0", + "puppeteer": "^1.19.0", + "puppeteer-to-istanbul": "^1.2.2", + "yargs": "^13.3.0", + "yargs-parser": ">=13.1.2" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "dev": true, - "requires": { - "@babel/highlight": "^7.8.3" + "dependencies": { + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/@babel/generator": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.5.tgz", + "integrity": "sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.26.5", + "@babel/types": "^7.26.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" } }, - "@babel/generator": { - "version": "7.8.8", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.8.tgz", - "integrity": "sha512-HKyUVu69cZoclptr8t8U5b6sx6zoWjh8jiUhnuj3MpZuKT2dJ8zPTuiy31luq32swhI0SpwItCIlU8XW7BZeJg==", + "node_modules/@babel/helper-string-parser": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", "dev": true, - "requires": { - "@babel/types": "^7.8.7", - "jsesc": "^2.5.1", - "lodash": "^4.17.13", - "source-map": "^0.5.0" + "engines": { + "node": ">=6.9.0" } }, - "@babel/helper-function-name": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.8.3.tgz", - "integrity": "sha512-BCxgX1BC2hD/oBlIFUgOCQDOPV8nSINxCwM3o93xP4P9Fq6aV5sgv2cOOITDMtCfQ+3PvHp3l689XZvAM9QyOA==", + "node_modules/@babel/helper-validator-identifier": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", "dev": true, - "requires": { - "@babel/helper-get-function-arity": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.8.3" + "engines": { + "node": ">=6.9.0" } }, - "@babel/helper-get-function-arity": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz", - "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==", + "node_modules/@babel/parser": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.7.tgz", + "integrity": "sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w==", "dev": true, - "requires": { - "@babel/types": "^7.8.3" + "dependencies": { + "@babel/types": "^7.26.7" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" } }, - "@babel/helper-split-export-declaration": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz", - "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==", + "node_modules/@babel/template": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", + "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", "dev": true, - "requires": { - "@babel/types": "^7.8.3" + "dependencies": { + "@babel/code-frame": "^7.25.9", + "@babel/parser": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" } }, - "@babel/highlight": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", - "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", + "node_modules/@babel/traverse": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.7.tgz", + "integrity": "sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA==", "dev": true, - "requires": { - "chalk": "^2.0.0", - "esutils": "^2.0.2", - "js-tokens": "^4.0.0" + "dependencies": { + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/parser": "^7.26.7", + "@babel/template": "^7.25.9", + "@babel/types": "^7.26.7", + "debug": "^4.3.1", + "globals": "^11.1.0" }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dev": true, "dependencies": { - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true } } }, - "@babel/parser": { - "version": "7.8.8", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.8.tgz", - "integrity": "sha512-mO5GWzBPsPf6865iIbzNE0AvkKF3NE+2S3eRUpE+FE07BOAkXh6G+GW/Pj01hhXjve1WScbaIO4UlY1JKeqCcA==", + "node_modules/@babel/types": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.7.tgz", + "integrity": "sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", "dev": true }, - "@babel/template": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", - "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@pact-foundation/pact-cli": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli/-/pact-cli-16.0.4.tgz", + "integrity": "sha512-qXzJUnXb6XMZyiXwfKgRwUQfpS61uSiLguR2hQWC3m+RrdnzrYug+YBHoACdIlvH6Lj/SQ86/26UJ4Z9V+OYMw==", + "cpu": [ + "x64", + "arm64" + ], + "dev": true, + "os": [ + "darwin", + "linux", + "win32" + ], + "dependencies": { + "chalk": "4.1.2", + "check-types": "11.2.3", + "cross-spawn": "7.0.5", + "mkdirp": "3.0.1", + "needle": "^3.3.1", + "pino": "^9.5.0", + "pino-pretty": "^13.0.0", + "promise-timeout": "1.3.0", + "rimraf": "4.4.1", + "underscore": "1.13.7" + }, + "bin": { + "pact": "bin/pact.js", + "pact-broker": "bin/pact-broker.js", + "pact-message": "bin/pact-message.js", + "pact-mock-service": "bin/pact-mock-service.js", + "pact-provider-verifier": "bin/pact-provider-verifier.js", + "pact-stub-service": "bin/pact-stub-service.js", + "pactflow": "bin/pactflow.js" + }, + "engines": { + "node": ">=16" + }, + "optionalDependencies": { + "@pact-foundation/pact-cli-darwin-arm64": "16.0.4", + "@pact-foundation/pact-cli-darwin-x64": "16.0.4", + "@pact-foundation/pact-cli-linux-arm64": "16.0.4", + "@pact-foundation/pact-cli-linux-x64": "16.0.4", + "@pact-foundation/pact-cli-windows-x64": "16.0.4" + } + }, + "node_modules/@pact-foundation/pact-cli-darwin-arm64": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli-darwin-arm64/-/pact-cli-darwin-arm64-16.0.4.tgz", + "integrity": "sha512-WWAZn+3HrnItVXqh04e99DgCdiW2T6I4ZRg3MPC5HeOQ3aowspPa1+VSoPMhM7txG0ZkmiQUbBiXPJjebhYLwg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@pact-foundation/pact-cli-darwin-x64": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli-darwin-x64/-/pact-cli-darwin-x64-16.0.4.tgz", + "integrity": "sha512-THSBPlwA3boHUlxMAyv11H6RPXYEiNas2D/PmFlwgWqRjNsLxC52wUCimBPMFiRgAZEuMVbyb4spQI4+UqZe9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@pact-foundation/pact-cli-linux-arm64": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli-linux-arm64/-/pact-cli-linux-arm64-16.0.4.tgz", + "integrity": "sha512-e4tLUlUJgK2vJG1OlaVx2oJRnFERdMPryVuvkVnJ9Lbd8RLT07s5i10A92rOUkSVYOM0BV6Ulp7GY0brFg4ZMg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@pact-foundation/pact-cli-linux-x64": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli-linux-x64/-/pact-cli-linux-x64-16.0.4.tgz", + "integrity": "sha512-VjEOjStCDR+kCy9WHg8k8nW4zZMqbPaCTKn5xBhgTdG/b1xTc29HZAb2Q/+XHwK8AB3Yi6+BDMeIEp/JBeRy9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@pact-foundation/pact-cli-windows-x64": { + "version": "16.0.4", + "resolved": "https://registry.npmjs.org/@pact-foundation/pact-cli-windows-x64/-/pact-cli-windows-x64-16.0.4.tgz", + "integrity": "sha512-xvVx/xXYPIjuR1PhK+VxiksnQfmq0h6z4WGYZcS3c6ygSMvvcBZL7ZT5zLr+LEF6EnSnD7eQr2QK7cY6YX5ugg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@pact-foundation/pact-cli/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.6", - "@babel/types": "^7.8.6" + "dependencies": { + "balanced-match": "^1.0.0" } }, - "@babel/traverse": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.6.tgz", - "integrity": "sha512-2B8l0db/DPi8iinITKuo7cbPznLCEk0kCxDoB9/N6gGNg/gxOXiR/IcymAFPiBwk5w6TtQ27w4wpElgp9btR9A==", + "node_modules/@pact-foundation/pact-cli/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.8.6", - "@babel/helper-function-name": "^7.8.3", - "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.8.6", - "@babel/types": "^7.8.6", - "debug": "^4.1.0", - "globals": "^11.1.0", - "lodash": "^4.17.13" - }, - "dependencies": { - "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - } + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "@babel/types": { - "version": "7.8.7", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.7.tgz", - "integrity": "sha512-k2TreEHxFA4CjGkL+GYjRyx35W0Mr7DP5+9q6WMkyKXB+904bYmG40syjMFV0oLlhhFCwWl0vA0DyzTDkwAiJw==", + "node_modules/@pact-foundation/pact-cli/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, - "requires": { - "esutils": "^2.0.2", - "lodash": "^4.17.13", - "to-fast-properties": "^2.0.0" + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/@pact-foundation/pact-cli/node_modules/cross-spawn": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.5.tgz", + "integrity": "sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/glob": { + "version": "9.3.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-9.3.5.tgz", + "integrity": "sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "minimatch": "^8.0.2", + "minipass": "^4.2.4", + "path-scurry": "^1.6.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/minimatch": { + "version": "8.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-8.0.4.tgz", + "integrity": "sha512-W0Wvr9HyFXZRGIDgCicunpQ299OKXs9RgZfaukz4qAW/pJhcpUfupc9c+OObPOFueNy8VSrZgEmDtk6Kh4WzDA==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "dev": true, + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/rimraf": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-4.4.1.tgz", + "integrity": "sha512-Gk8NlF062+T9CqNGn6h4tls3k6T1+/nXdOcSZVikNVtlRdYpA7wRJJMoXmuvOnLW844rPjdQ7JgXCYM6PPC/og==", + "dev": true, + "dependencies": { + "glob": "^9.2.0" + }, + "bin": { + "rimraf": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@pact-foundation/pact-cli/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" } }, - "@types/color-name": { + "node_modules/@types/color-name": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==", "dev": true }, - "acorn": { + "node_modules/acorn": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.1.1.tgz", "integrity": "sha512-add7dgA5ppRPxCFJoAGfMDi7PIBXq1RtGo7BhbLaxwrXPOmw8gq48Y9ozT01hUKy9byMjlR20EJhu5zlkErEkg==", - "dev": true + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } }, - "acorn-jsx": { + "node_modules/acorn-jsx": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", - "dev": true + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0" + } }, - "agent-base": { + "node_modules/agent-base": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-4.3.0.tgz", "integrity": "sha512-salcGninV0nPrwpGNn4VTXBb1SOuXQBiqbrNXoeizJsHrsL6ERFM2Ne3JUSBWRE6aeNJI2ROP/WEEIDUiDe3cg==", "dev": true, - "requires": { + "dependencies": { "es6-promisify": "^5.0.0" + }, + "engines": { + "node": ">= 4.0.0" } }, - "ajv": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz", - "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==", + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, - "requires": { + "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "ajv-keywords": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-1.5.1.tgz", - "integrity": "sha1-MU3QpLM2j609/NxU7eYXG4htrzw=", - "dev": true - }, - "ansi-escapes": { + "node_modules/ansi-escapes": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", "dev": true, - "requires": { + "dependencies": { "type-fest": "^0.11.0" }, - "dependencies": { - "type-fest": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", - "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", - "dev": true - } + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", + "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } }, - "ansi-styles": { + "node_modules/ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, - "requires": { + "dependencies": { "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" } }, - "argparse": { + "node_modules/argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, - "requires": { + "dependencies": { "sprintf-js": "~1.0.2" } }, - "astral-regex": { + "node_modules/astral-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", - "dev": true + "dev": true, + "engines": { + "node": ">=4" + } }, - "async-limiter": { + "node_modules/async-limiter": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==", "dev": true }, - "babel-code-frame": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", - "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", + "node_modules/atomic-sleep": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/atomic-sleep/-/atomic-sleep-1.0.0.tgz", + "integrity": "sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==", "dev": true, - "requires": { - "chalk": "^1.1.3", - "esutils": "^2.0.2", - "js-tokens": "^3.0.2" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - } + "engines": { + "node": ">=8.0.0" } }, - "babel-eslint": { + "node_modules/babel-eslint": { "version": "10.1.0", "resolved": "https://registry.npmjs.org/babel-eslint/-/babel-eslint-10.1.0.tgz", "integrity": "sha512-ifWaTHQ0ce+448CYop8AdrQiBsGrnC+bMgfyKFdi6EsPLTAWG+QfyDeM6OH+FmWnKvEq5NnBMLvlBUPKQZoDSg==", + "deprecated": "babel-eslint is now @babel/eslint-parser. This package will no longer receive updates.", "dev": true, - "requires": { + "dependencies": { "@babel/code-frame": "^7.0.0", "@babel/parser": "^7.7.0", "@babel/traverse": "^7.7.0", "@babel/types": "^7.7.0", "eslint-visitor-keys": "^1.0.0", "resolve": "^1.12.0" + }, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "eslint": ">= 4.12.1" } }, - "balanced-match": { + "node_modules/balanced-match": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", "dev": true }, - "brace-expansion": { + "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dev": true, - "requires": { + "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, - "buffer-crc32": { + "node_modules/buffer-crc32": { "version": "0.2.13", "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", - "dev": true + "dev": true, + "engines": { + "node": "*" + } }, - "buffer-from": { + "node_modules/buffer-from": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", "dev": true }, - "caller-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-0.1.0.tgz", - "integrity": "sha1-lAhe9jWB7NPaqSREqP6U6CV3dR8=", - "dev": true, - "requires": { - "callsites": "^0.2.0" - }, - "dependencies": { - "callsites": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-0.2.0.tgz", - "integrity": "sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo=", - "dev": true - } - } - }, - "callsites": { + "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true + "dev": true, + "engines": { + "node": ">=6" + } }, - "camelcase": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", - "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", - "dev": true + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } }, - "chalk": { + "node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dev": true, - "requires": { + "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" } }, - "chardet": { + "node_modules/chardet": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", "dev": true }, - "circular-json": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.3.3.tgz", - "integrity": "sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A==", + "node_modules/check-types": { + "version": "11.2.3", + "resolved": "https://registry.npmjs.org/check-types/-/check-types-11.2.3.tgz", + "integrity": "sha512-+67P1GkJRaxQD6PKK0Et9DhwQB+vGg3PM5+aavopCpZT1lj9jeqfvpgTLAWErNj8qApkkmXlu/Ug74kmhagkXg==", "dev": true }, - "cli-cursor": { + "node_modules/cli-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", "dev": true, - "requires": { + "dependencies": { "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" } }, - "cli-width": { + "node_modules/cli-width": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz", "integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=", "dev": true }, - "cliui": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-4.1.0.tgz", - "integrity": "sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ==", + "node_modules/cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", "dev": true, - "requires": { - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0", - "wrap-ansi": "^2.0.0" - }, "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" } }, - "clone": { + "node_modules/clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=", - "dev": true - }, - "co": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", - "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=", - "dev": true - }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.8" + } }, - "color-convert": { + "node_modules/color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, - "requires": { + "dependencies": { "color-name": "1.1.3" } }, - "color-name": { + "node_modules/color-name": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, - "concat-map": { + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true + }, + "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", "dev": true }, - "concat-stream": { + "node_modules/concat-stream": { "version": "1.6.2", "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", "dev": true, - "requires": { + "engines": [ + "node >= 0.8" + ], + "dependencies": { "buffer-from": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^2.2.2", "typedarray": "^0.0.6" } }, - "core-util-is": { + "node_modules/core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", "dev": true }, - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "node_modules/cross-spawn": { + "version": "6.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz", + "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==", "dev": true, - "requires": { + "dependencies": { "nice-try": "^1.0.4", "path-key": "^2.0.1", "semver": "^5.5.0", "shebang-command": "^1.2.0", "which": "^1.2.9" }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true - } + "engines": { + "node": ">=4.8" } }, - "d": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", - "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "node_modules/cross-spawn/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, - "requires": { - "es5-ext": "^0.10.50", - "type": "^1.0.1" + "bin": { + "semver": "bin/semver" } }, - "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "node_modules/dateformat": { + "version": "4.6.3", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-4.6.3.tgz", + "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==", "dev": true, - "requires": { + "engines": { + "node": "*" + } + }, + "node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "dependencies": { "ms": "^2.1.1" } }, - "decamelize": { + "node_modules/decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "deep-is": { + "node_modules/deep-is": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", "dev": true }, - "doctrine": { + "node_modules/doctrine": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "requires": { + "dependencies": { "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" } }, - "emoji-regex": { + "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true }, - "end-of-stream": { + "node_modules/end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", "dev": true, - "requires": { + "dependencies": { "once": "^1.4.0" } }, - "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", - "dev": true, - "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" - } - }, - "es6-iterator": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", - "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", - "dev": true, - "requires": { - "d": "1", - "es5-ext": "^0.10.35", - "es6-symbol": "^3.1.1" - } - }, - "es6-map": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/es6-map/-/es6-map-0.1.5.tgz", - "integrity": "sha1-kTbgUD3MBqMBaQ8LsU/042TpSfA=", - "dev": true, - "requires": { - "d": "1", - "es5-ext": "~0.10.14", - "es6-iterator": "~2.0.1", - "es6-set": "~0.1.5", - "es6-symbol": "~3.1.1", - "event-emitter": "~0.3.5" - } - }, - "es6-promise": { + "node_modules/es6-promise": { "version": "4.2.8", "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==", "dev": true }, - "es6-promisify": { + "node_modules/es6-promisify": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/es6-promisify/-/es6-promisify-5.0.0.tgz", "integrity": "sha1-UQnWLz5W6pZ8S2NQWu8IKRyKUgM=", "dev": true, - "requires": { + "dependencies": { "es6-promise": "^4.0.3" } }, - "es6-set": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/es6-set/-/es6-set-0.1.5.tgz", - "integrity": "sha1-0rPsXU2ADO2BjbU40ol02wpzzLE=", - "dev": true, - "requires": { - "d": "1", - "es5-ext": "~0.10.14", - "es6-iterator": "~2.0.1", - "es6-symbol": "3.1.1", - "event-emitter": "~0.3.5" - }, - "dependencies": { - "es6-symbol": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.1.tgz", - "integrity": "sha1-vwDvT9q2uhtG7Le2KbTH7VcVzHc=", - "dev": true, - "requires": { - "d": "1", - "es5-ext": "~0.10.14" - } - } - } - }, - "es6-symbol": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", - "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", "dev": true, - "requires": { - "d": "^1.0.1", - "ext": "^1.1.2" + "engines": { + "node": ">=0.8.0" } }, - "es6-weak-map": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-2.0.3.tgz", - "integrity": "sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==", + "node_modules/eslint": { + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", + "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, - "requires": { - "d": "1", - "es5-ext": "^0.10.46", - "es6-iterator": "^2.0.3", - "es6-symbol": "^3.1.1" - } - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, - "escope": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/escope/-/escope-3.6.0.tgz", - "integrity": "sha1-4Bl16BJ4GhY6ba392AOY3GTIicM=", - "dev": true, - "requires": { - "es6-map": "^0.1.3", - "es6-weak-map": "^2.0.1", - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - } - }, - "eslint": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz", - "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==", - "dev": true, - "requires": { + "dependencies": { "@babel/code-frame": "^7.0.0", "ajv": "^6.10.0", "chalk": "^2.1.0", @@ -724,827 +994,515 @@ "text-table": "^0.2.0", "v8-compile-cache": "^2.0.3" }, - "dependencies": { - "@babel/code-frame": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", - "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", - "dev": true, - "requires": { - "@babel/highlight": "^7.8.3" - } - }, - "@babel/highlight": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", - "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", - "dev": true, - "requires": { - "chalk": "^2.0.0", - "esutils": "^2.0.2", - "js-tokens": "^4.0.0" - } - }, - "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - }, - "eslint-scope": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", - "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", - "dev": true, - "requires": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - } - }, - "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "requires": { - "type-fest": "^0.8.1" - } - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - } + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^8.10.0 || ^10.13.0 || >=11.10.1" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, - "eslint-config-qx": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-qx/-/eslint-config-qx-0.1.0.tgz", - "integrity": "sha512-fkQSBji7cwh63SuFoM1byJgO6ZWE2m1C0e8dwxEJy2S5RQrqFAIu7faU0Szi7Cub+bPPKlpVfIv3dfieaaAbYg==", + "node_modules/eslint-config-qx": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/eslint-config-qx/-/eslint-config-qx-0.0.1.tgz", + "integrity": "sha512-OeAw+muVzN21SiOTLZ86ZJn/Tr99caGtLaNuTnhUQz2SegPH9G3Y5zCKdNrs1ngvGOD/bqfNh+x6M2fA07v0hw==", + "deprecated": "moved to @qooxdoo/eslint-qx-rules", "dev": true, - "requires": { - "eslint": "^3.19.0", - "eslint-plugin-qx-rules": "^0.1.0" - }, "dependencies": { - "acorn": { - "version": "5.7.4", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.4.tgz", - "integrity": "sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg==", - "dev": true - }, - "acorn-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-3.0.1.tgz", - "integrity": "sha1-r9+UiPsezvyDSPb7IvRk4ypYs2s=", - "dev": true, - "requires": { - "acorn": "^3.0.4" - }, - "dependencies": { - "acorn": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-3.3.0.tgz", - "integrity": "sha1-ReN/s56No/JbruP/U2niu18iAXo=", - "dev": true - } - } - }, - "ajv": { - "version": "4.11.8", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-4.11.8.tgz", - "integrity": "sha1-gv+wKynmYq5TvcIK8VlHcGc5xTY=", - "dev": true, - "requires": { - "co": "^4.6.0", - "json-stable-stringify": "^1.0.1" - } - }, - "ansi-escapes": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-1.4.0.tgz", - "integrity": "sha1-06ioOzGapneTZisT52HHkRQiMG4=", - "dev": true - }, - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "cli-cursor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-1.0.2.tgz", - "integrity": "sha1-ZNo/fValRBLll5S9Ytw1KV6PKYc=", - "dev": true, - "requires": { - "restore-cursor": "^1.0.1" - } - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "eslint": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-3.19.0.tgz", - "integrity": "sha1-yPxiAcf0DdCJQbh8CFdnOGpnmsw=", - "dev": true, - "requires": { - "babel-code-frame": "^6.16.0", - "chalk": "^1.1.3", - "concat-stream": "^1.5.2", - "debug": "^2.1.1", - "doctrine": "^2.0.0", - "escope": "^3.6.0", - "espree": "^3.4.0", - "esquery": "^1.0.0", - "estraverse": "^4.2.0", - "esutils": "^2.0.2", - "file-entry-cache": "^2.0.0", - "glob": "^7.0.3", - "globals": "^9.14.0", - "ignore": "^3.2.0", - "imurmurhash": "^0.1.4", - "inquirer": "^0.12.0", - "is-my-json-valid": "^2.10.0", - "is-resolvable": "^1.0.0", - "js-yaml": "^3.5.1", - "json-stable-stringify": "^1.0.0", - "levn": "^0.3.0", - "lodash": "^4.0.0", - "mkdirp": "^0.5.0", - "natural-compare": "^1.4.0", - "optionator": "^0.8.2", - "path-is-inside": "^1.0.1", - "pluralize": "^1.2.1", - "progress": "^1.1.8", - "require-uncached": "^1.0.2", - "shelljs": "^0.7.5", - "strip-bom": "^3.0.0", - "strip-json-comments": "~2.0.1", - "table": "^3.7.8", - "text-table": "~0.2.0", - "user-home": "^2.0.0" - } - }, - "espree": { - "version": "3.5.4", - "resolved": "https://registry.npmjs.org/espree/-/espree-3.5.4.tgz", - "integrity": "sha512-yAcIQxtmMiB/jL32dzEp2enBeidsB7xWPLNiw3IIkpVds1P+h7qF9YwJq1yUNzp2OKXgAprs4F61ih66UsoD1A==", - "dev": true, - "requires": { - "acorn": "^5.5.0", - "acorn-jsx": "^3.0.0" - } - }, - "figures": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", - "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5", - "object-assign": "^4.1.0" - } - }, - "file-entry-cache": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-2.0.0.tgz", - "integrity": "sha1-w5KZDD5oR4PYOLjISkXYoEhFg2E=", - "dev": true, - "requires": { - "flat-cache": "^1.2.1", - "object-assign": "^4.0.1" - } - }, - "flat-cache": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-1.3.4.tgz", - "integrity": "sha512-VwyB3Lkgacfik2vhqR4uv2rvebqmDvFu4jlN/C1RzWoJEo8I7z4Q404oiqYCkq41mni8EzQnm95emU9seckwtg==", - "dev": true, - "requires": { - "circular-json": "^0.3.1", - "graceful-fs": "^4.1.2", - "rimraf": "~2.6.2", - "write": "^0.2.1" - } - }, - "globals": { - "version": "9.18.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", - "dev": true - }, - "ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==", - "dev": true - }, - "inquirer": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-0.12.0.tgz", - "integrity": "sha1-HvK/1jUE3wvHV4X/+MLEHfEvB34=", - "dev": true, - "requires": { - "ansi-escapes": "^1.1.0", - "ansi-regex": "^2.0.0", - "chalk": "^1.0.0", - "cli-cursor": "^1.0.1", - "cli-width": "^2.0.0", - "figures": "^1.3.5", - "lodash": "^4.3.0", - "readline2": "^1.0.1", - "run-async": "^0.1.0", - "rx-lite": "^3.1.2", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.0", - "through": "^2.3.6" - } - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "onetime": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-1.1.0.tgz", - "integrity": "sha1-ofeDj4MUxRbwXs78vEzP4EtO14k=", - "dev": true - }, - "progress": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz", - "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=", - "dev": true - }, - "restore-cursor": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-1.0.1.tgz", - "integrity": "sha1-NGYfRohjJ/7SmRR5FSJS35LapUE=", - "dev": true, - "requires": { - "exit-hook": "^1.0.0", - "onetime": "^1.0.0" - } - }, - "run-async": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-0.1.0.tgz", - "integrity": "sha1-yK1KXhEGYeQCp9IbUw4AnyX444k=", - "dev": true, - "requires": { - "once": "^1.3.0" - } - }, - "slice-ansi": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-0.0.4.tgz", - "integrity": "sha1-7b+JA/ZvfOL46v1s7tZeJkyDGzU=", - "dev": true - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "dev": true - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - }, - "table": { - "version": "3.8.3", - "resolved": "https://registry.npmjs.org/table/-/table-3.8.3.tgz", - "integrity": "sha1-K7xULw/amGGnVdOUf+/Ys/UThV8=", - "dev": true, - "requires": { - "ajv": "^4.7.0", - "ajv-keywords": "^1.0.0", - "chalk": "^1.1.1", - "lodash": "^4.0.0", - "slice-ansi": "0.0.4", - "string-width": "^2.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "write": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/write/-/write-0.2.1.tgz", - "integrity": "sha1-X8A4KOJkzqP+kUVUdvejxWbLB1c=", - "dev": true, - "requires": { - "mkdirp": "^0.5.1" - } - } + "eslint-plugin-qx-rules": "^0.0.1" + }, + "engines": { + "node": ">=4.5" + }, + "peerDependencies": { + "eslint": ">=3.4.0" + } + }, + "node_modules/eslint-config-qx/node_modules/eslint-plugin-qx-rules": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-qx-rules/-/eslint-plugin-qx-rules-0.0.1.tgz", + "integrity": "sha512-AAesCN007yFoX5aXN6qE/dW7u/poZk8dhhCMxxW5XbIFCgtWMikfr3aJuErqCpsztcLcYQXMx5U9J8yjM6guFA==", + "deprecated": "moved to @qooxdoo/eslint-qx-plugins", + "dev": true, + "engines": { + "node": ">= 4.5.0" } }, - "eslint-plugin-qx-rules": { + "node_modules/eslint-plugin-qx-rules": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/eslint-plugin-qx-rules/-/eslint-plugin-qx-rules-0.1.0.tgz", "integrity": "sha512-TmldxfvDvatPOtJxr1lFJsC0dTg3idZ3svQwRoR01zoZW7mJvBWxBGHeITUuH7qw9BcNPdyF4cV1fzUXejBvPg==", - "dev": true + "deprecated": "moved to @qooxdoo/eslint-qx-plugins", + "dev": true, + "engines": { + "node": ">= 4.5.0" + } }, - "eslint-utils": { + "node_modules/eslint-utils": { "version": "1.4.3", "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", "dev": true, - "requires": { + "dependencies": { "eslint-visitor-keys": "^1.1.0" + }, + "engines": { + "node": ">=6" } }, - "eslint-visitor-keys": { + "node_modules/eslint-visitor-keys": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==", - "dev": true + "dev": true, + "engines": { + "node": ">=4" + } }, - "espree": { + "node_modules/eslint/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dev": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", + "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "dev": true, + "dependencies": { + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/espree": { "version": "6.2.1", "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", "dev": true, - "requires": { + "dependencies": { "acorn": "^7.1.1", "acorn-jsx": "^5.2.0", "eslint-visitor-keys": "^1.1.0" + }, + "engines": { + "node": ">=6.0.0" } }, - "esprima": { + "node_modules/esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } }, - "esquery": { + "node_modules/esquery": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.1.0.tgz", "integrity": "sha512-MxYW9xKmROWF672KqjO75sszsA8Mxhw06YFeS5VHlB98KDHbOSurm3ArsjO60Eaf3QmGMCP1yn+0JQkNLo/97Q==", "dev": true, - "requires": { + "dependencies": { "estraverse": "^4.0.0" + }, + "engines": { + "node": ">=0.6" } }, - "esrecurse": { + "node_modules/esrecurse": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", "dev": true, - "requires": { + "dependencies": { "estraverse": "^4.1.0" + }, + "engines": { + "node": ">=4.0" } }, - "estraverse": { + "node_modules/estraverse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true + "dev": true, + "engines": { + "node": ">=4.0" + } }, - "esutils": { + "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "event-emitter": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", - "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", - "dev": true, - "requires": { - "d": "1", - "es5-ext": "~0.10.14" - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", "dev": true, - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" + "engines": { + "node": ">=0.10.0" } }, - "exit-hook": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/exit-hook/-/exit-hook-1.1.1.tgz", - "integrity": "sha1-8FyiM7SMBdVP/wd2XfhQfpXAL/g=", - "dev": true - }, - "ext": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", - "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", - "dev": true, - "requires": { - "type": "^2.0.0" - }, - "dependencies": { - "type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", - "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==", - "dev": true - } - } - }, - "external-editor": { + "node_modules/external-editor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", "dev": true, - "requires": { + "dependencies": { "chardet": "^0.7.0", "iconv-lite": "^0.4.24", "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" } }, - "extract-zip": { + "node_modules/extract-zip": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", "dev": true, - "requires": { + "dependencies": { "concat-stream": "^1.6.2", "debug": "^2.6.9", "mkdirp": "^0.5.4", "yauzl": "^2.10.0" }, + "bin": { + "extract-zip": "cli.js" + } + }, + "node_modules/extract-zip/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "dev": true, - "requires": { - "pend": "~1.2.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "dev": true - }, - "yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "dev": true, - "requires": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } + "ms": "2.0.0" } }, - "fast-deep-equal": { + "node_modules/extract-zip/node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dev": true, + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/extract-zip/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true + }, + "node_modules/extract-zip/node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dev": true, + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/fast-copy": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/fast-copy/-/fast-copy-3.0.2.tgz", + "integrity": "sha512-dl0O9Vhju8IrcLndv2eU4ldt1ftXMqqfgN4H1cpmGV7P6jeB9FwpN9a2c8DPGE1Ys88rNUJVYDHq73CGAGOPfQ==", + "dev": true + }, + "node_modules/fast-deep-equal": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==", "dev": true }, - "fast-json-stable-stringify": { + "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, - "fast-levenshtein": { + "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", "dev": true }, - "figures": { + "node_modules/fast-redact": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/fast-redact/-/fast-redact-3.5.0.tgz", + "integrity": "sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + "dev": true + }, + "node_modules/figures": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", "dev": true, - "requires": { + "dependencies": { "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "file-entry-cache": { + "node_modules/file-entry-cache": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", "dev": true, - "requires": { + "dependencies": { "flat-cache": "^2.0.1" + }, + "engines": { + "node": ">=4" } }, - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, - "requires": { - "locate-path": "^2.0.0" + "engines": { + "node": ">=8" } }, - "flat-cache": { + "node_modules/flat-cache": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", "dev": true, - "requires": { + "dependencies": { "flatted": "^2.0.0", "rimraf": "2.6.3", "write": "1.0.3" + }, + "engines": { + "node": ">=4" } }, - "flatted": { + "node_modules/flatted": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.1.tgz", "integrity": "sha512-a1hQMktqW9Nmqr5aktAux3JMNqaucxGcjtjWnZLHX7yyPCmlSV3M54nGYbqT8K+0GhF3NBgmJCc3ma+WOgX8Jg==", "dev": true }, - "fs.realpath": { + "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", "dev": true }, - "functional-red-black-tree": { + "node_modules/functional-red-black-tree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", "dev": true }, - "generate-function": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/generate-function/-/generate-function-2.3.1.tgz", - "integrity": "sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==", - "dev": true, - "requires": { - "is-property": "^1.0.2" - } - }, - "generate-object-property": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/generate-object-property/-/generate-object-property-1.2.0.tgz", - "integrity": "sha1-nA4cQDCM6AT0eDYYuTf6iPmdUNA=", + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", "dev": true, - "requires": { - "is-property": "^1.0.0" + "engines": { + "node": "6.* || 8.* || >= 10.*" } }, - "get-caller-file": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.3.tgz", - "integrity": "sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==", - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - }, - "glob": { + "node_modules/glob": { "version": "7.1.6", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, - "requires": { + "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.0.4", "once": "^1.3.0", "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "glob-parent": { + "node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, - "requires": { + "dependencies": { "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" } }, - "globals": { + "node_modules/globals": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true - }, - "graceful-fs": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", - "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", - "dev": true - }, - "has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - } + "engines": { + "node": ">=4" } }, - "has-flag": { + "node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/help-me": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz", + "integrity": "sha512-7xgomUX6ADmcYzFik0HzAxh/73YlKR9bmFzf51CZwR+b6YtzU2m0u49hQCqV6SvlqIqsaxovfwdvbnsw3b/zpg==", "dev": true }, - "https-proxy-agent": { + "node_modules/https-proxy-agent": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-2.2.4.tgz", "integrity": "sha512-OmvfoQ53WLjtA9HeYP9RNrWMJzzAz1JGaSFr1nijg0PVR1JaD/xbJq1mdEIIlxGpXp9eSe/O2LgU9DJmTPd0Eg==", "dev": true, - "requires": { + "dependencies": { "agent-base": "^4.3.0", "debug": "^3.1.0" + }, + "engines": { + "node": ">= 4.5.0" } }, - "iconv-lite": { + "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dev": true, - "requires": { + "dependencies": { "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" } }, - "ignore": { + "node_modules/ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true + "dev": true, + "engines": { + "node": ">= 4" + } }, - "import-fresh": { + "node_modules/import-fresh": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", "dev": true, - "requires": { + "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" } }, - "imurmurhash": { + "node_modules/imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.8.19" + } }, - "inflight": { + "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, - "requires": { + "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, - "inherits": { + "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, - "inquirer": { + "node_modules/inquirer": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.1.0.tgz", "integrity": "sha512-5fJMWEmikSYu0nv/flMc475MhGbB7TSPd/2IpFV4I4rMklboCH2rQjYY5kKiYGHqUF9gvaambupcJFFG9dvReg==", "dev": true, - "requires": { + "dependencies": { "ansi-escapes": "^4.2.1", "chalk": "^3.0.0", "cli-cursor": "^3.1.0", @@ -1559,549 +1517,650 @@ "strip-ansi": "^6.0.0", "through": "^2.3.6" }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/inquirer/node_modules/ansi-styles": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", + "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "dev": true, "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "interpret": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", - "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==", - "dev": true + "node_modules/inquirer/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } }, - "invert-kv": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-2.0.0.tgz", - "integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==", + "node_modules/inquirer/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/inquirer/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "dev": true }, - "is-extglob": { + "node_modules/inquirer/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/inquirer/node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/inquirer/node_modules/supports-color": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "is-fullwidth-code-point": { + "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true + "dev": true, + "engines": { + "node": ">=8" + } }, - "is-glob": { + "node_modules/is-glob": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", "dev": true, - "requires": { + "dependencies": { "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" } }, - "is-my-ip-valid": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-my-ip-valid/-/is-my-ip-valid-1.0.0.tgz", - "integrity": "sha512-gmh/eWXROncUzRnIa1Ubrt5b8ep/MGSnfAUI3aRp+sqTCs1tv1Isl8d8F6JmkN3dXKc3ehZMrtiPN9eL03NuaQ==", - "dev": true - }, - "is-my-json-valid": { - "version": "2.20.0", - "resolved": "https://registry.npmjs.org/is-my-json-valid/-/is-my-json-valid-2.20.0.tgz", - "integrity": "sha512-XTHBZSIIxNsIsZXg7XB5l8z/OBFosl1Wao4tXLpeC7eKU4Vm/kdop2azkPqULwnfGQjmeDIyey9g7afMMtdWAA==", - "dev": true, - "requires": { - "generate-function": "^2.0.0", - "generate-object-property": "^1.1.0", - "is-my-ip-valid": "^1.0.0", - "jsonpointer": "^4.0.0", - "xtend": "^4.0.0" - } - }, - "is-promise": { + "node_modules/is-promise": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz", "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=", "dev": true }, - "is-property": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-property/-/is-property-1.0.2.tgz", - "integrity": "sha1-V/4cTkhHTt1lsJkR8msc1Ald2oQ=", - "dev": true - }, - "is-resolvable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==", - "dev": true - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "dev": true - }, - "isarray": { + "node_modules/isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", "dev": true }, - "isexe": { + "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", "dev": true }, - "js-tokens": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", - "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", - "dev": true + "node_modules/joycon": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/joycon/-/joycon-3.1.1.tgz", + "integrity": "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==", + "dev": true, + "engines": { + "node": ">=10" + } }, - "js-yaml": { + "node_modules/js-yaml": { "version": "3.13.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", "dev": true, - "requires": { + "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" } }, - "jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } }, - "json-schema-traverse": { + "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, - "json-stable-stringify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz", - "integrity": "sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=", - "dev": true, - "requires": { - "jsonify": "~0.0.0" - } - }, - "json-stable-stringify-without-jsonify": { + "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", "dev": true }, - "jsonify": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", - "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", - "dev": true - }, - "jsonpointer": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-4.0.1.tgz", - "integrity": "sha1-T9kss04OnbPInIYi7PUfm5eMbLk=", - "dev": true - }, - "lcid": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lcid/-/lcid-2.0.0.tgz", - "integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==", - "dev": true, - "requires": { - "invert-kv": "^2.0.0" - } - }, - "levn": { + "node_modules/levn": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", "dev": true, - "requires": { + "dependencies": { "prelude-ls": "~1.1.2", "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" } }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" } }, - "lodash": { + "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "dev": true }, - "map-age-cleaner": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", - "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", - "dev": true, - "requires": { - "p-defer": "^1.0.0" - } - }, - "mem": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz", - "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==", - "dev": true, - "requires": { - "map-age-cleaner": "^0.1.1", - "mimic-fn": "^2.0.0", - "p-is-promise": "^2.0.0" - } + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true }, - "mime": { + "node_modules/mime": { "version": "2.4.4", "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.4.tgz", "integrity": "sha512-LRxmNwziLPT828z+4YkNzloCFC2YM4wrB99k+AV5ZbEyfGNWfG8SO1FUXLmLDBSo89NrJZ4DIWeLjy1CHGhMGA==", - "dev": true + "dev": true, + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4.0.0" + } }, - "mimic-fn": { + "node_modules/mimic-fn": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true + "dev": true, + "engines": { + "node": ">=6" + } }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "requires": { + "dependencies": { "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "4.2.8", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-4.2.8.tgz", + "integrity": "sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ==", + "dev": true, + "engines": { + "node": ">=8" } }, - "mkdirp": { + "node_modules/mkdirp": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", "dev": true, - "requires": { + "dependencies": { "minimist": "^1.2.6" }, - "dependencies": { - "minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true - } + "bin": { + "mkdirp": "bin/cmd.js" } }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, - "mute-stream": { + "node_modules/mute-stream": { "version": "0.0.8", "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", "dev": true }, - "natural-compare": { + "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", "dev": true }, - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=", - "dev": true + "node_modules/needle": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/needle/-/needle-3.3.1.tgz", + "integrity": "sha512-6k0YULvhpw+RoLNiQCRKOl09Rv1dPLr8hHnVjHqdolKwDrdNyk+Hmrthi4lIGPPz3r39dLx0hsF5s40sZ3Us4Q==", + "dev": true, + "dependencies": { + "iconv-lite": "^0.6.3", + "sax": "^1.2.4" + }, + "bin": { + "needle": "bin/needle" + }, + "engines": { + "node": ">= 4.4.x" + } + }, + "node_modules/needle/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } }, - "nice-try": { + "node_modules/nice-try": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", "dev": true }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "node_modules/on-exit-leak-free": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.2.tgz", + "integrity": "sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==", "dev": true, - "requires": { - "path-key": "^2.0.0" + "engines": { + "node": ">=14.0.0" } }, - "number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true - }, - "once": { + "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", "dev": true, - "requires": { + "dependencies": { "wrappy": "1" } }, - "onetime": { + "node_modules/onetime": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", "dev": true, - "requires": { + "dependencies": { "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" } }, - "optionator": { + "node_modules/optionator": { "version": "0.8.3", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", "dev": true, - "requires": { + "dependencies": { "deep-is": "~0.1.3", "fast-levenshtein": "~2.0.6", "levn": "~0.3.0", "prelude-ls": "~1.1.2", "type-check": "~0.3.2", "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" } }, - "os-homedir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", - "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", - "dev": true - }, - "os-locale": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", - "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", - "dev": true, - "requires": { - "execa": "^1.0.0", - "lcid": "^2.0.0", - "mem": "^4.0.0" - } - }, - "os-tmpdir": { + "node_modules/os-tmpdir": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true - }, - "p-defer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", - "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=", - "dev": true - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "dev": true - }, - "p-is-promise": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz", - "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==", - "dev": true + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, - "requires": { - "p-try": "^1.0.0" + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "requires": { - "p-limit": "^1.1.0" + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" } }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } }, - "parent-module": { + "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, - "requires": { + "dependencies": { "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" } }, - "path-exists": { + "node_modules/path-exists": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true + "dev": true, + "engines": { + "node": ">=4" + } }, - "path-is-absolute": { + "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true - }, - "path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "path-key": { + "node_modules/path-key": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "dev": true + "dev": true, + "engines": { + "node": ">=4" + } }, - "path-parse": { + "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, - "pend": { + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/pend": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", "dev": true }, - "pluralize": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-1.2.1.tgz", - "integrity": "sha1-0aIUg/0iu0HlihL6NCGCMUCJfEU=", + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "dev": true }, - "prelude-ls": { + "node_modules/pino": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/pino/-/pino-9.6.0.tgz", + "integrity": "sha512-i85pKRCt4qMjZ1+L7sy2Ag4t1atFcdbEt76+7iRJn1g2BvsnRMGu9p8pivl9fs63M2kF/A0OacFZhTub+m/qMg==", + "dev": true, + "dependencies": { + "atomic-sleep": "^1.0.0", + "fast-redact": "^3.1.1", + "on-exit-leak-free": "^2.1.0", + "pino-abstract-transport": "^2.0.0", + "pino-std-serializers": "^7.0.0", + "process-warning": "^4.0.0", + "quick-format-unescaped": "^4.0.3", + "real-require": "^0.2.0", + "safe-stable-stringify": "^2.3.1", + "sonic-boom": "^4.0.1", + "thread-stream": "^3.0.0" + }, + "bin": { + "pino": "bin.js" + } + }, + "node_modules/pino-abstract-transport": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pino-abstract-transport/-/pino-abstract-transport-2.0.0.tgz", + "integrity": "sha512-F63x5tizV6WCh4R6RHyi2Ml+M70DNRXt/+HANowMflpgGFMAym/VKm6G7ZOQRjqN7XbGxK1Lg9t6ZrtzOaivMw==", + "dev": true, + "dependencies": { + "split2": "^4.0.0" + } + }, + "node_modules/pino-pretty": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/pino-pretty/-/pino-pretty-13.0.0.tgz", + "integrity": "sha512-cQBBIVG3YajgoUjo1FdKVRX6t9XPxwB9lcNJVD5GCnNM4Y6T12YYx8c6zEejxQsU0wrg9TwmDulcE9LR7qcJqA==", + "dev": true, + "dependencies": { + "colorette": "^2.0.7", + "dateformat": "^4.6.3", + "fast-copy": "^3.0.2", + "fast-safe-stringify": "^2.1.1", + "help-me": "^5.0.0", + "joycon": "^3.1.1", + "minimist": "^1.2.6", + "on-exit-leak-free": "^2.1.0", + "pino-abstract-transport": "^2.0.0", + "pump": "^3.0.0", + "secure-json-parse": "^2.4.0", + "sonic-boom": "^4.0.1", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "pino-pretty": "bin.js" + } + }, + "node_modules/pino-std-serializers": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pino-std-serializers/-/pino-std-serializers-7.0.0.tgz", + "integrity": "sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA==", + "dev": true + }, + "node_modules/prelude-ls": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", - "dev": true + "dev": true, + "engines": { + "node": ">= 0.8.0" + } }, - "process-nextick-args": { + "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", "dev": true }, - "progress": { + "node_modules/process-warning": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/process-warning/-/process-warning-4.0.1.tgz", + "integrity": "sha512-3c2LzQ3rY9d0hc1emcsHhfT9Jwz0cChib/QN89oME2R451w5fy3f0afAhERFZAwrbDU43wk12d0ORBpDVME50Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ] + }, + "node_modules/progress": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/promise-timeout": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/promise-timeout/-/promise-timeout-1.3.0.tgz", + "integrity": "sha512-5yANTE0tmi5++POym6OgtFmwfDvOXABD9oj/jLQr5GPEyuNEb7jH4wbbANJceJid49jwhi1RddxnhnEAb/doqg==", "dev": true }, - "proxy-from-env": { + "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", "dev": true }, - "pump": { + "node_modules/pump": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", "dev": true, - "requires": { + "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, - "punycode": { + "node_modules/punycode": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "dev": true + "dev": true, + "engines": { + "node": ">=6" + } }, - "puppeteer": { + "node_modules/puppeteer": { "version": "1.20.0", "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-1.20.0.tgz", "integrity": "sha512-bt48RDBy2eIwZPrkgbcwHtb51mj2nKvHOPMaSH2IsWiv7lOG9k9zhaRzpDZafrk05ajMc3cu+lSQYYOfH2DkVQ==", + "deprecated": "< 22.8.2 is no longer supported", "dev": true, - "requires": { + "hasInstallScript": true, + "dependencies": { "debug": "^4.1.0", "extract-zip": "^1.6.6", "https-proxy-agent": "^2.2.1", @@ -2111,100 +2170,98 @@ "rimraf": "^2.6.1", "ws": "^6.1.0" }, + "engines": { + "node": ">=6.4.0" + } + }, + "node_modules/puppeteer-to-istanbul": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/puppeteer-to-istanbul/-/puppeteer-to-istanbul-1.4.0.tgz", + "integrity": "sha512-dzW8u/PMqMZppvoXCFod8IkCTI2JL0yP2YUBbaALnX+iJJ6gqjk77fIoK9MqnMqRZAcoa81GLFfZExakWg/Q4Q==", + "dev": true, "dependencies": { - "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - } + "clone": "^2.1.2", + "mkdirp": "^1.0.4", + "v8-to-istanbul": "^1.2.1", + "yargs": "^15.3.1" } }, - "puppeteer-to-istanbul": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/puppeteer-to-istanbul/-/puppeteer-to-istanbul-1.2.2.tgz", - "integrity": "sha512-uXj2WKvcrszD0BHBp6Ht3FDed4Kfzvzn1fP4IdrYLjZ9Gbxc/YRhT1JBdTz1TMHZVs+HHT/Bbwz3KwSLLK4UBg==", + "node_modules/puppeteer-to-istanbul/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "dev": true, - "requires": { - "clone": "^2.1.1", - "mkdirp": "^0.5.1", - "v8-to-istanbul": "^1.2.0", - "yargs": "^11.0.0" + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/puppeteer-to-istanbul/node_modules/yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "dev": true, + "dependencies": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/puppeteer-to-istanbul/node_modules/yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/puppeteer/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dev": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - }, - "yargs": { - "version": "11.1.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-11.1.1.tgz", - "integrity": "sha512-PRU7gJrJaXv3q3yQZ/+/X6KBswZiaQ+zOmdprZcouPYtQgvNU35i+68M4b1ZHLZtYFT5QObFLV+ZkmJYcwKdiw==", - "dev": true, - "requires": { - "cliui": "^4.0.0", - "decamelize": "^1.1.1", - "find-up": "^2.1.0", - "get-caller-file": "^1.0.1", - "os-locale": "^3.1.0", - "require-directory": "^2.1.1", - "require-main-filename": "^1.0.1", - "set-blocking": "^2.0.0", - "string-width": "^2.0.0", - "which-module": "^2.0.0", - "y18n": "^3.2.1", - "yargs-parser": "^9.0.2" - }, - "dependencies": { - "yargs-parser": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-9.0.2.tgz", - "integrity": "sha1-nM9qQ0YP5O1Aqbto9I1DuKaMwHc=", - "dev": true, - "requires": { - "camelcase": "^4.1.0" - } - } - } + "peerDependenciesMeta": { + "supports-color": { + "optional": true } } }, - "readable-stream": { + "node_modules/quick-format-unescaped": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz", + "integrity": "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==", + "dev": true + }, + "node_modules/readable-stream": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "requires": { + "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", @@ -2214,542 +2271,583 @@ "util-deprecate": "~1.0.1" } }, - "readline2": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/readline2/-/readline2-1.0.1.tgz", - "integrity": "sha1-QQWWCP/BVHV7cV2ZidGZ/783LjU=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "mute-stream": "0.0.5" - }, - "dependencies": { - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "mute-stream": { - "version": "0.0.5", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.5.tgz", - "integrity": "sha1-j7+rsKmKJT0xhDMfno3rc3L6xsA=", - "dev": true - } - } - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", + "node_modules/real-require": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/real-require/-/real-require-0.2.0.tgz", + "integrity": "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==", "dev": true, - "requires": { - "resolve": "^1.1.6" + "engines": { + "node": ">= 12.13.0" } }, - "regexpp": { + "node_modules/regexpp": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", - "dev": true + "dev": true, + "engines": { + "node": ">=6.5.0" + } }, - "require-directory": { + "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true - }, - "require-main-filename": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-1.0.1.tgz", - "integrity": "sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=", - "dev": true - }, - "require-uncached": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/require-uncached/-/require-uncached-1.0.3.tgz", - "integrity": "sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM=", "dev": true, - "requires": { - "caller-path": "^0.1.0", - "resolve-from": "^1.0.0" - }, - "dependencies": { - "resolve-from": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-1.0.1.tgz", - "integrity": "sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY=", - "dev": true - } + "engines": { + "node": ">=0.10.0" } }, - "resolve": { + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "node_modules/resolve": { "version": "1.15.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", "dev": true, - "requires": { + "dependencies": { "path-parse": "^1.0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "resolve-from": { + "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true + "dev": true, + "engines": { + "node": ">=4" + } }, - "restore-cursor": { + "node_modules/restore-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", "dev": true, - "requires": { + "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" } }, - "rimraf": { + "node_modules/rimraf": { "version": "2.6.3", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, - "requires": { + "dependencies": { "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" } }, - "run-async": { + "node_modules/run-async": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.0.tgz", "integrity": "sha512-xJTbh/d7Lm7SBhc1tNvTpeCHaEzoyxPrqNlvSdMfBTYwaY++UJFyXUOxAtsRUXjlqOfj8luNaR9vjCh4KeV+pg==", "dev": true, - "requires": { + "dependencies": { "is-promise": "^2.1.0" + }, + "engines": { + "node": ">=0.12.0" } }, - "rx-lite": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/rx-lite/-/rx-lite-3.1.2.tgz", - "integrity": "sha1-Gc5QLKVyZl87ZHsQk5+X/RYV8QI=", - "dev": true - }, - "rxjs": { + "node_modules/rxjs": { "version": "6.5.4", "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.4.tgz", "integrity": "sha512-naMQXcgEo3csAEGvw/NydRA0fuS2nDZJiw1YUWFKU7aPPAPGZEsD4Iimit96qwCieH6y614MCLYwdkrWx7z/7Q==", "dev": true, - "requires": { + "dependencies": { "tslib": "^1.9.0" + }, + "engines": { + "npm": ">=2.0.0" } }, - "safe-buffer": { + "node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "safer-buffer": { + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "dev": true }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "node_modules/sax": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==", + "dev": true + }, + "node_modules/secure-json-parse": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz", + "integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==", "dev": true }, - "set-blocking": { + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", "dev": true }, - "shebang-command": { + "node_modules/shebang-command": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", "dev": true, - "requires": { + "dependencies": { "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" } }, - "shebang-regex": { + "node_modules/shebang-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "dev": true - }, - "shelljs": { - "version": "0.7.8", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.7.8.tgz", - "integrity": "sha1-3svPh0sNHl+3LhSxZKloMEjprLM=", "dev": true, - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" + "engines": { + "node": ">=0.10.0" } }, - "signal-exit": { + "node_modules/signal-exit": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", "dev": true }, - "slice-ansi": { + "node_modules/slice-ansi": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", "dev": true, - "requires": { + "dependencies": { "ansi-styles": "^3.2.0", "astral-regex": "^1.0.0", "is-fullwidth-code-point": "^2.0.0" }, + "engines": { + "node": ">=6" + } + }, + "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/sonic-boom": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.2.0.tgz", + "integrity": "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww==", + "dev": true, "dependencies": { - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - } + "atomic-sleep": "^1.0.0" } }, - "source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "dev": true, + "engines": { + "node": ">= 10.x" + } }, - "sprintf-js": { + "node_modules/sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", "dev": true }, - "string-width": { + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string-width": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", "dev": true, - "requires": { + "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.0" }, - "dependencies": { - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } + "engines": { + "node": ">=8" } }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "node_modules/string-width/node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", "dev": true, - "requires": { - "safe-buffer": "~5.1.0" + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" } }, - "strip-ansi": { + "node_modules/strip-ansi": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", "dev": true, - "requires": { + "dependencies": { "ansi-regex": "^4.1.0" }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "dev": true - } + "engines": { + "node": ">=6" } }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "dev": true + "node_modules/strip-ansi/node_modules/ansi-regex": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", + "dev": true, + "engines": { + "node": ">=6" + } }, - "strip-json-comments": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz", - "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==", - "dev": true + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, - "supports-color": { + "node_modules/supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, - "requires": { + "dependencies": { "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" } }, - "table": { + "node_modules/table": { "version": "5.4.6", "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", "dev": true, - "requires": { + "dependencies": { "ajv": "^6.10.2", "lodash": "^4.17.14", "slice-ansi": "^2.1.0", "string-width": "^3.0.0" }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/table/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "node_modules/table/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/table/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, "dependencies": { - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" } }, - "text-table": { + "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", "dev": true }, - "through": { + "node_modules/thread-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.1.0.tgz", + "integrity": "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==", + "dev": true, + "dependencies": { + "real-require": "^0.2.0" + } + }, + "node_modules/through": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", "dev": true }, - "tmp": { + "node_modules/tmp": { "version": "0.0.33", "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", "dev": true, - "requires": { + "dependencies": { "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" } }, - "to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "dev": true - }, - "tslib": { + "node_modules/tslib": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==", "dev": true }, - "type": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", - "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==", - "dev": true - }, - "type-check": { + "node_modules/type-check": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", "dev": true, - "requires": { + "dependencies": { "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" } }, - "type-fest": { + "node_modules/type-fest": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true + "dev": true, + "engines": { + "node": ">=8" + } }, - "typedarray": { + "node_modules/typedarray": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", "dev": true }, - "uri-js": { + "node_modules/underscore": { + "version": "1.13.7", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.7.tgz", + "integrity": "sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g==", + "dev": true + }, + "node_modules/uri-js": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", "dev": true, - "requires": { + "dependencies": { "punycode": "^2.1.0" } }, - "user-home": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/user-home/-/user-home-2.0.0.tgz", - "integrity": "sha1-nHC/2Babwdy/SGBODwS4tJzenp8=", - "dev": true, - "requires": { - "os-homedir": "^1.0.0" - } - }, - "util-deprecate": { + "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", "dev": true }, - "v8-compile-cache": { + "node_modules/v8-compile-cache": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz", "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==", "dev": true }, - "v8-to-istanbul": { + "node_modules/v8-to-istanbul": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-1.2.1.tgz", "integrity": "sha512-NglPycIwSQeSJj7VJ6L8vTsPKC9MG5Lcx4n3SvYqNHzklbMI4dGcLJnkLPEPJ3uB8UyTdWviMhM0Ptq+xD5UFQ==", - "dev": true + "dev": true, + "engines": { + "node": ">=10.10.0" + } }, - "which": { + "node_modules/which": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", "dev": true, - "requires": { + "dependencies": { "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" } }, - "which-module": { + "node_modules/which-module": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", "dev": true }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "wrap-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", - "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, - "requires": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1" + "dependencies": { + "color-convert": "^2.0.1" }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - } + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" } }, - "wrappy": { + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", "dev": true }, - "write": { + "node_modules/write": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", "dev": true, - "requires": { + "dependencies": { "mkdirp": "^0.5.1" + }, + "engines": { + "node": ">=4" } }, - "ws": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", - "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", + "node_modules/ws": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.3.tgz", + "integrity": "sha512-jmTjYU0j60B+vHey6TfR3Z7RD61z/hmxBS3VMSGIrroOWXQEneK1zNuotOUrGyBHQj0yrpsLHPWtigEFd13ndA==", "dev": true, - "requires": { + "dependencies": { "async-limiter": "~1.0.0" } }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "dev": true - }, - "y18n": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", - "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==", + "node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", "dev": true }, - "yargs": { + "node_modules/yargs": { "version": "13.3.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", "dev": true, - "requires": { + "dependencies": { "cliui": "^5.0.0", "find-up": "^3.0.0", "get-caller-file": "^2.0.1", @@ -2760,137 +2858,117 @@ "which-module": "^2.0.0", "y18n": "^4.0.0", "yargs-parser": "^13.1.2" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dev": true, - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - }, - "y18n": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.1.tgz", - "integrity": "sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ==", - "dev": true - }, - "yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } } }, - "yargs-parser": { + "node_modules/yargs-parser": { "version": "19.0.4", "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-19.0.4.tgz", "integrity": "sha512-eXeQm7yXRjPFFyf1voPkZgXQZJjYfjgQUmGPbD2TLtZeIYzvacgWX7sQ5a1HsRgVP+pfKAkRZDNtTGev4h9vhw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", "dev": true + }, + "node_modules/yargs/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/yargs/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/yargs/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dev": true, + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } } } } diff --git a/package.json b/package.json index 8533c589643..c50867df4ce 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,7 @@ { "scripts": { - "linter": "npx eslint ./services/*/client/source/class/*/" + "linter": "npx eslint ./services/*/client/source/class/*/", + "linter-fix": "npx eslint ./services/*/client/source/class/*/ --fix" }, "devDependencies": { "babel-eslint": "^10.1.0", diff --git a/packages/aws-library/Makefile b/packages/aws-library/Makefile new file mode 100644 index 00000000000..31000dd211f --- /dev/null +++ b/packages/aws-library/Makefile @@ -0,0 +1,50 @@ +# +# Targets for DEVELOPMENT of aws Library +# +include ../../scripts/common.Makefile +include ../../scripts/common-package.Makefile + +.PHONY: requirements +requirements: ## compiles pip requirements (.in -> .txt) + @$(MAKE_C) requirements reqs + + +.PHONY: install-dev install-prod install-ci +install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode + # installing in $(subst install-,,$@) mode + @uv pip sync requirements/$(subst install-,,$@).txt + + +.PHONY: tests tests-ci +tests: ## runs unit tests + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov=aws_library \ + --durations=10 \ + --exitfirst \ + --failed-first \ + --pdb \ + -vv \ + $(CURDIR)/tests + +tests-ci: ## runs unit tests + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-append \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ + --cov=aws_library \ + --durations=10 \ + --log-date-format="%Y-%m-%d %H:%M:%S" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ + --verbose \ + -m "not heavy_load" \ + $(CURDIR)/tests diff --git a/packages/aws-library/README.md b/packages/aws-library/README.md new file mode 100644 index 00000000000..c7df3095401 --- /dev/null +++ b/packages/aws-library/README.md @@ -0,0 +1,22 @@ +# simcore AWS library + +Provides a wrapper around AWS python libraries. + +Requirements to be compatible with the library: + +- only AWS-related code + + +## Installation + +```console +make help +make install-dev +``` + +## Test + +```console +make help +make test-dev +``` diff --git a/packages/aws-library/VERSION b/packages/aws-library/VERSION new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/packages/aws-library/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/packages/aws-library/requirements/Makefile b/packages/aws-library/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/packages/aws-library/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/packages/aws-library/requirements/_base.in b/packages/aws-library/requirements/_base.in new file mode 100644 index 00000000000..c3882458c66 --- /dev/null +++ b/packages/aws-library/requirements/_base.in @@ -0,0 +1,16 @@ +# +# Specifies third-party dependencies for 'aws-library' +# +--constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in + +aioboto3 +aiocache +arrow +pydantic[email] +types-aiobotocore[ec2,s3,ssm] +opentelemetry-instrumentation-botocore +sh diff --git a/packages/aws-library/requirements/_base.txt b/packages/aws-library/requirements/_base.txt new file mode 100644 index 00000000000..34ee547b218 --- /dev/null +++ b/packages/aws-library/requirements/_base.txt @@ -0,0 +1,447 @@ +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aioboto3==14.3.0 + # via -r requirements/_base.in +aiobotocore==2.22.0 + # via aioboto3 +aiocache==0.12.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # aioboto3 +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiobotocore + # aiodocker +aioitertools==0.12.0 + # via aiobotocore +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 + # via + # fast-depends + # faststream +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +attrs==25.1.0 + # via + # aiohttp + # jsonschema + # referencing +boto3==1.37.3 + # via aiobotocore +botocore==1.37.3 + # via + # aiobotocore + # boto3 + # s3transfer +botocore-stubs==1.37.4 + # via types-aiobotocore +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via typer +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.68.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +idna==3.10 + # via + # anyio + # email-validator + # requests + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +jmespath==1.0.1 + # via + # aiobotocore + # boto3 + # botocore +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiobotocore + # aiohttp + # yarl +opentelemetry-api==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-propagator-aws-xray + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-botocore==0.51b0 + # via -r requirements/_base.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-propagator-aws-xray==1.0.2 + # via opentelemetry-instrumentation-botocore +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fast-depends + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via + # aiobotocore + # arrow + # botocore +python-dotenv==1.0.1 + # via pydantic-settings +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.23.1 + # via + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +sh==2.2.2 + # via -r requirements/_base.in +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +types-aiobotocore==2.21.1 + # via -r requirements/_base.in +types-aiobotocore-ec2==2.21.0 + # via types-aiobotocore +types-aiobotocore-s3==2.21.0 + # via types-aiobotocore +types-aiobotocore-ssm==2.21.0 + # via types-aiobotocore +types-awscrt==0.23.10 + # via botocore-stubs +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # anyio + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # typer + # types-aiobotocore + # types-aiobotocore-ec2 + # types-aiobotocore-s3 + # types-aiobotocore-ssm +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # botocore + # requests +wrapt==1.17.2 + # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.18.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/aws-library/requirements/_test.in b/packages/aws-library/requirements/_test.in new file mode 100644 index 00000000000..43480f6a427 --- /dev/null +++ b/packages/aws-library/requirements/_test.in @@ -0,0 +1,30 @@ +# +# Specifies dependencies required to run 'models-library' +# +--constraint ../../../requirements/constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + +# testing +coverage +faker +fastapi +httpx +moto[server] +pint +pytest +pytest-asyncio +pytest-benchmark +pytest-cov +pytest-icdiff +pytest-instafail +pytest-mock +pytest-runner +pytest-sugar +python-dotenv +pyyaml +types-aioboto3 +types-boto3 diff --git a/packages/aws-library/requirements/_test.txt b/packages/aws-library/requirements/_test.txt new file mode 100644 index 00000000000..03936ad2eb0 --- /dev/null +++ b/packages/aws-library/requirements/_test.txt @@ -0,0 +1,332 @@ +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.8.0 + # via + # -c requirements/_base.txt + # httpx + # starlette +attrs==25.1.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +aws-sam-translator==1.95.0 + # via cfn-lint +aws-xray-sdk==2.14.0 + # via moto +blinker==1.9.0 + # via flask +boto3==1.37.3 + # via + # -c requirements/_base.txt + # aws-sam-translator + # moto +botocore==1.37.3 + # via + # -c requirements/_base.txt + # aws-xray-sdk + # boto3 + # moto + # s3transfer +botocore-stubs==1.37.4 + # via + # -c requirements/_base.txt + # types-aioboto3 + # types-aiobotocore + # types-boto3 +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +cffi==1.17.1 + # via cryptography +cfn-lint==1.27.0 + # via moto +charset-normalizer==3.4.1 + # via + # -c requirements/_base.txt + # requests +click==8.1.8 + # via + # -c requirements/_base.txt + # flask +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +cryptography==44.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # joserfc + # moto +docker==7.1.0 + # via moto +faker==36.1.1 + # via -r requirements/_test.in +fastapi==0.115.12 + # via -r requirements/_test.in +flask==3.1.0 + # via + # flask-cors + # moto +flask-cors==5.0.1 + # via moto +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +graphql-core==3.2.6 + # via moto +h11==0.14.0 + # via httpcore +httpcore==1.0.7 + # via httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests +iniconfig==2.0.0 + # via pytest +itsdangerous==2.2.0 + # via flask +jinja2==3.1.5 + # via + # -c requirements/../../../requirements/constraints.txt + # flask + # moto +jmespath==1.0.1 + # via + # -c requirements/_base.txt + # boto3 + # botocore +joserfc==1.0.4 + # via moto +jsonpatch==1.33 + # via cfn-lint +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 + # via jsonpatch +jsonschema==4.23.0 + # via + # -c requirements/_base.txt + # aws-sam-translator + # openapi-schema-validator + # openapi-spec-validator +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2024.10.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +markupsafe==3.0.2 + # via + # jinja2 + # werkzeug +moto==5.1.4 + # via -r requirements/_test.in +mpmath==1.3.0 + # via sympy +networkx==3.4.2 + # via cfn-lint +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 + # via moto +packaging==24.2 + # via + # -c requirements/_base.txt + # pytest + # pytest-sugar +pathable==0.4.4 + # via jsonschema-path +pint==0.24.4 + # via -r requirements/_test.in +platformdirs==4.3.6 + # via pint +pluggy==1.5.0 + # via pytest +ply==3.11 + # via jsonpath-ng +pprintpp==0.4.0 + # via pytest-icdiff +py-cpuinfo==9.0.0 + # via pytest-benchmark +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 + # via cffi +pydantic==2.10.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aws-sam-translator + # fastapi +pydantic-core==2.27.2 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.2.1 + # via moto +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-benchmark + # pytest-cov + # pytest-icdiff + # pytest-instafail + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-benchmark==5.1.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-icdiff==0.9 + # via -r requirements/_test.in +pytest-instafail==0.5.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 + # via + # -c requirements/_base.txt + # botocore + # moto +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in + # cfn-lint + # jsonschema-path + # moto + # responses +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker + # jsonschema-path + # moto + # responses +responses==0.25.6 + # via moto +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.23.1 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 + # via + # -c requirements/_base.txt + # boto3 +setuptools==75.8.2 + # via moto +six==1.17.0 + # via + # -c requirements/_base.txt + # python-dateutil + # rfc3339-validator +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio +starlette==0.46.0 + # via + # -c requirements/../../../requirements/constraints.txt + # fastapi +sympy==1.13.3 + # via cfn-lint +termcolor==2.5.0 + # via pytest-sugar +types-aioboto3==14.1.0 + # via -r requirements/_test.in +types-aiobotocore==2.21.1 + # via + # -c requirements/_base.txt + # types-aioboto3 +types-awscrt==0.23.10 + # via + # -c requirements/_base.txt + # botocore-stubs +types-boto3==1.38.2 + # via -r requirements/_test.in +types-s3transfer==0.11.3 + # via + # types-aioboto3 + # types-boto3 +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # anyio + # aws-sam-translator + # cfn-lint + # fastapi + # flexcache + # flexparser + # pint + # pydantic + # pydantic-core + # types-aioboto3 + # types-aiobotocore + # types-boto3 +tzdata==2025.1 + # via faker +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore + # docker + # requests + # responses +werkzeug==3.1.3 + # via + # flask + # flask-cors + # moto +wrapt==1.17.2 + # via + # -c requirements/_base.txt + # aws-xray-sdk +xmltodict==0.14.2 + # via moto diff --git a/packages/aws-library/requirements/_tools.in b/packages/aws-library/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/packages/aws-library/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/packages/aws-library/requirements/_tools.txt b/packages/aws-library/requirements/_tools.txt new file mode 100644 index 00000000000..51d5e1879ce --- /dev/null +++ b/packages/aws-library/requirements/_tools.txt @@ -0,0 +1,87 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # -c requirements/_test.txt + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_test.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via + # -c requirements/_test.txt + # pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/packages/aws-library/requirements/ci.txt b/packages/aws-library/requirements/ci.txt new file mode 100644 index 00000000000..bac75da67f8 --- /dev/null +++ b/packages/aws-library/requirements/ci.txt @@ -0,0 +1,22 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'models-library' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library/ +pytest-simcore @ ../pytest-simcore +simcore-service-library @ ../service-library/ +simcore-settings-library @ ../settings-library/ + +# current module +simcore-aws-library @ . diff --git a/packages/aws-library/requirements/dev.txt b/packages/aws-library/requirements/dev.txt new file mode 100644 index 00000000000..34cc644b370 --- /dev/null +++ b/packages/aws-library/requirements/dev.txt @@ -0,0 +1,22 @@ +# Shortcut to install all packages needed to develop 'models-library' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../common-library/ +--editable ../models-library/ +--editable ../pytest-simcore/ +--editable ../service-library/ +--editable ../settings-library/ + +# current module +--editable . diff --git a/packages/aws-library/setup.cfg b/packages/aws-library/setup.cfg new file mode 100644 index 00000000000..8e5bee5ba44 --- /dev/null +++ b/packages/aws-library/setup.cfg @@ -0,0 +1,22 @@ +[bumpversion] +current_version = 0.1.0 +commit = True +message = packages/aws-library version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[bdist_wheel] +universal = 1 + +[aliases] +test = pytest + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +[mypy] +plugins = + pydantic.mypy diff --git a/packages/aws-library/setup.py b/packages/aws-library/setup.py new file mode 100644 index 00000000000..cb9eba89604 --- /dev/null +++ b/packages/aws-library/setup.py @@ -0,0 +1,59 @@ +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +INSTALL_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.in") +) # WEAK requirements + +TEST_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_test.txt") +) # STRICT requirements + + +SETUP = { + "name": "simcore-aws-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Sylvain Anderegg (sanderegg)", + "description": "Core service library for AWS APIs", + "python_requires": ">=3.10", + "classifiers": [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python :: 3.10", + ], + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "include_package_data": True, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} + + +if __name__ == "__main__": + setup(**SETUP) diff --git a/packages/aws-library/src/aws_library/__init__.py b/packages/aws-library/src/aws_library/__init__.py new file mode 100644 index 00000000000..4f4d58a4a0a --- /dev/null +++ b/packages/aws-library/src/aws_library/__init__.py @@ -0,0 +1,3 @@ +from importlib.metadata import version + +__version__: str = version("simcore-aws-library") diff --git a/packages/aws-library/src/aws_library/ec2/__init__.py b/packages/aws-library/src/aws_library/ec2/__init__.py new file mode 100644 index 00000000000..112c70861b2 --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/__init__.py @@ -0,0 +1,37 @@ +from ._client import SimcoreEC2API +from ._errors import EC2AccessError, EC2NotConnectedError, EC2RuntimeError +from ._models import ( + AWS_TAG_KEY_MAX_LENGTH, + AWS_TAG_KEY_MIN_LENGTH, + AWS_TAG_VALUE_MAX_LENGTH, + AWS_TAG_VALUE_MIN_LENGTH, + AWSTagKey, + AWSTagValue, + EC2InstanceBootSpecific, + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + EC2Tags, + Resources, +) + +__all__: tuple[str, ...] = ( + "AWSTagKey", + "AWSTagValue", + "AWS_TAG_KEY_MIN_LENGTH", + "AWS_TAG_KEY_MAX_LENGTH", + "AWS_TAG_VALUE_MIN_LENGTH", + "AWS_TAG_VALUE_MAX_LENGTH", + "EC2AccessError", + "EC2InstanceBootSpecific", + "EC2InstanceConfig", + "EC2InstanceData", + "EC2InstanceType", + "EC2NotConnectedError", + "EC2RuntimeError", + "EC2Tags", + "Resources", + "SimcoreEC2API", +) + +# nopycln: file diff --git a/packages/aws-library/src/aws_library/ec2/_client.py b/packages/aws-library/src/aws_library/ec2/_client.py new file mode 100644 index 00000000000..970d6130e69 --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/_client.py @@ -0,0 +1,369 @@ +import contextlib +import logging +from collections.abc import Iterable, Sequence +from dataclasses import dataclass +from typing import Literal, cast + +import aioboto3 +import botocore.exceptions +from aiobotocore.session import ClientCreatorContext +from aiocache import cached # type: ignore[import-untyped] +from pydantic import ByteSize, PositiveInt +from servicelib.logging_utils import log_context +from settings_library.ec2 import EC2Settings +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.type_defs import FilterTypeDef, TagTypeDef + +from ._error_handler import ec2_exception_handler +from ._errors import EC2InstanceNotFoundError, EC2TooManyInstancesError +from ._models import ( + AWSTagKey, + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + EC2Tags, + Resources, +) +from ._utils import compose_user_data, ec2_instance_data_from_aws_instance + +_logger = logging.getLogger(__name__) + + +@dataclass() +class SimcoreEC2API: + client: EC2Client + session: aioboto3.Session + exit_stack: contextlib.AsyncExitStack + + @classmethod + async def create(cls, settings: EC2Settings) -> "SimcoreEC2API": + session = aioboto3.Session() + session_client = session.client( + "ec2", + endpoint_url=settings.EC2_ENDPOINT, + aws_access_key_id=settings.EC2_ACCESS_KEY_ID, + aws_secret_access_key=settings.EC2_SECRET_ACCESS_KEY, + region_name=settings.EC2_REGION_NAME, + ) + assert isinstance(session_client, ClientCreatorContext) # nosec + exit_stack = contextlib.AsyncExitStack() + ec2_client = cast( + EC2Client, await exit_stack.enter_async_context(session_client) + ) + return cls(ec2_client, session, exit_stack) + + async def close(self) -> None: + await self.exit_stack.aclose() + + async def ping(self) -> bool: + with contextlib.suppress(Exception): + await self.client.describe_account_attributes() + return True + return False + + @cached(noself=True) + @ec2_exception_handler(_logger) + async def get_ec2_instance_capabilities( + self, + instance_type_names: set[InstanceTypeType] | Literal["ALL"], + ) -> list[EC2InstanceType]: + """Returns the ec2 instance types from a list of instance type names (sorted by name) + + Arguments: + instance_type_names -- the types to filter with or "ALL", to return all EC2 possible instances + + Raises: + Ec2InstanceTypeInvalidError: some invalid types were used as filter + ClustersKeeperRuntimeError: unexpected error communicating with EC2 + + """ + if instance_type_names == "ALL": + selection_or_all_if_empty = [] + else: + selection_or_all_if_empty = list(instance_type_names) + if len(selection_or_all_if_empty) == 0: + msg = "`instance_type_names` cannot be an empty set. Use either a selection or 'ALL'" + raise ValueError(msg) + + instance_types = await self.client.describe_instance_types( + InstanceTypes=selection_or_all_if_empty + ) + list_instances: list[EC2InstanceType] = [] + for instance in instance_types.get("InstanceTypes", []): + with contextlib.suppress(KeyError): + list_instances.append( + EC2InstanceType( + name=instance["InstanceType"], + resources=Resources( + cpus=instance["VCpuInfo"]["DefaultVCpus"], + ram=ByteSize( + int(instance["MemoryInfo"]["SizeInMiB"]) * 1024 * 1024 + ), + ), + ) + ) + return sorted(list_instances, key=lambda i: i.name) + + @ec2_exception_handler(_logger) + async def launch_instances( + self, + instance_config: EC2InstanceConfig, + *, + min_number_of_instances: PositiveInt, + number_of_instances: PositiveInt, + max_total_number_of_instances: PositiveInt = 10, + ) -> list[EC2InstanceData]: + """launch new EC2 instance(s) + + Arguments: + instance_config -- The EC2 instance configuration + min_number_of_instances -- the minimal number of instances needed (fails if this amount cannot be reached) + number_of_instances -- the ideal number of instances needed (it it cannot be reached AWS will return a number >=min_number_of_instances) + + Keyword Arguments: + max_total_number_of_instances -- The total maximum allowed number of instances for this given instance_config (default: {10}) + + Raises: + EC2TooManyInstancesError: + + Returns: + The created instance data infos + """ + with log_context( + _logger, + logging.INFO, + msg=f"launch {number_of_instances} AWS instance(s) {instance_config.type.name} with {instance_config.tags=}", + ): + # first check the max amount is not already reached + current_instances = await self.get_instances( + key_names=[instance_config.key_name], tags=instance_config.tags + ) + if ( + len(current_instances) + number_of_instances + > max_total_number_of_instances + ): + raise EC2TooManyInstancesError( + num_instances=max_total_number_of_instances + ) + + resource_tags: list[TagTypeDef] = [ + {"Key": tag_key, "Value": tag_value} + for tag_key, tag_value in instance_config.tags.items() + ] + + instances = await self.client.run_instances( + ImageId=instance_config.ami_id, + MinCount=min_number_of_instances, + MaxCount=number_of_instances, + IamInstanceProfile=( + {"Arn": instance_config.iam_instance_profile} + if instance_config.iam_instance_profile + else {} + ), + InstanceType=instance_config.type.name, + InstanceInitiatedShutdownBehavior="terminate", + KeyName=instance_config.key_name, + TagSpecifications=[ + {"ResourceType": "instance", "Tags": resource_tags}, + {"ResourceType": "volume", "Tags": resource_tags}, + {"ResourceType": "network-interface", "Tags": resource_tags}, + ], + UserData=compose_user_data(instance_config.startup_script), + NetworkInterfaces=[ + { + "AssociatePublicIpAddress": True, + "DeviceIndex": 0, + "SubnetId": instance_config.subnet_id, + "Groups": instance_config.security_group_ids, + } + ], + ) + instance_ids = [i["InstanceId"] for i in instances["Instances"]] + _logger.info( + "%s New instances launched: %s, waiting for them to start now...", + len(instance_ids), + instance_ids, + ) + + # wait for the instance to be in a pending state + # NOTE: reference to EC2 states https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html + waiter = self.client.get_waiter("instance_exists") + await waiter.wait(InstanceIds=instance_ids) + _logger.debug("instances %s exists now.", instance_ids) + + # NOTE: waiting for pending ensure we get all the IPs back + described_instances = await self.client.describe_instances( + InstanceIds=instance_ids + ) + assert "Instances" in described_instances["Reservations"][0] # nosec + instance_datas = [ + await ec2_instance_data_from_aws_instance(self, i) + for i in described_instances["Reservations"][0]["Instances"] + ] + _logger.info( + "%s are pending now", + f"{instance_ids=}", + ) + return instance_datas + + @ec2_exception_handler(_logger) + async def get_instances( + self, + *, + key_names: list[str], + tags: EC2Tags, + state_names: list[InstanceStateNameType] | None = None, + ) -> list[EC2InstanceData]: + """returns the instances matching the given criteria + + Arguments: + key_names -- filter the instances by key names + tags -- filter instances by key and their values + + Keyword Arguments: + state_names -- filters the instances by state (pending, running, etc...) (default: {None}) + + Returns: + the instances found + """ + # NOTE: be careful: Name=instance-state-name,Values=["pending", "running"] means pending OR running + # NOTE2: AND is done by repeating Name=instance-state-name,Values=pending Name=instance-state-name,Values=running + if state_names is None: + state_names = ["pending", "running"] + + filters: list[FilterTypeDef] = [ + { + "Name": "key-name", + "Values": key_names, + }, + {"Name": "instance-state-name", "Values": state_names}, + ] + filters.extend( + [{"Name": f"tag:{key}", "Values": [value]} for key, value in tags.items()] + ) + + instances = await self.client.describe_instances(Filters=filters) + all_instances = [] + for reservation in instances["Reservations"]: + assert "Instances" in reservation # nosec + all_instances.extend( + [ + await ec2_instance_data_from_aws_instance(self, i) + for i in reservation["Instances"] + ] + ) + _logger.debug( + "received: %s instances with %s", f"{len(all_instances)}", f"{state_names=}" + ) + return all_instances + + @ec2_exception_handler(_logger) + async def start_instances( + self, instance_datas: Iterable[EC2InstanceData] + ) -> list[EC2InstanceData]: + """starts stopped instances. Will return once the started instances are pending so that their IPs are available. + + Arguments: + instance_datas -- the instances to start + + Raises: + EC2InstanceNotFoundError: if some of the instance_datas are not found + + Returns: + the started instance datas with their respective IPs + """ + instance_ids = [i.id for i in instance_datas] + with log_context( + _logger, + logging.INFO, + msg=f"start instances {instance_ids}", + ): + await self.client.start_instances(InstanceIds=instance_ids) + # wait for the instance to be in a pending state + # NOTE: reference to EC2 states https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html + waiter = self.client.get_waiter("instance_exists") + await waiter.wait(InstanceIds=instance_ids) + _logger.info("instances %s exists now.", instance_ids) + # NOTE: waiting for pending ensure we get all the IPs back + aws_instances = await self.client.describe_instances( + InstanceIds=instance_ids + ) + assert len(aws_instances["Reservations"]) == 1 # nosec + assert "Instances" in aws_instances["Reservations"][0] # nosec + return [ + await ec2_instance_data_from_aws_instance(self, i) + for i in aws_instances["Reservations"][0]["Instances"] + ] + + @ec2_exception_handler(_logger) + async def stop_instances(self, instance_datas: Iterable[EC2InstanceData]) -> None: + """Stops running instances. + Stopping an already stopped instance will do nothing. + + Arguments: + instance_datas -- the instances to stop + + Raises: + EC2InstanceNotFoundError: any of the instance_datas are not found + """ + with log_context( + _logger, + logging.INFO, + msg=f"stop instances {[i.id for i in instance_datas]}", + ): + await self.client.stop_instances(InstanceIds=[i.id for i in instance_datas]) + + @ec2_exception_handler(_logger) + async def terminate_instances( + self, instance_datas: Iterable[EC2InstanceData] + ) -> None: + with log_context( + _logger, + logging.INFO, + msg=f"terminate instances {[i.id for i in instance_datas]}", + ): + await self.client.terminate_instances( + InstanceIds=[i.id for i in instance_datas] + ) + + @ec2_exception_handler(_logger) + async def set_instances_tags( + self, instances: Sequence[EC2InstanceData], *, tags: EC2Tags + ) -> None: + try: + with log_context( + _logger, + logging.DEBUG, + msg=f"set {tags=} on instances '[{[i.id for i in instances]}]'", + ): + await self.client.create_tags( + Resources=[i.id for i in instances], + Tags=[ + {"Key": tag_key, "Value": tag_value} + for tag_key, tag_value in tags.items() + ], + ) + except botocore.exceptions.ClientError as exc: + if exc.response.get("Error", {}).get("Code", "") == "InvalidID": + raise EC2InstanceNotFoundError from exc + raise # pragma: no cover + + @ec2_exception_handler(_logger) + async def remove_instances_tags( + self, instances: Sequence[EC2InstanceData], *, tag_keys: Iterable[AWSTagKey] + ) -> None: + try: + with log_context( + _logger, + logging.DEBUG, + msg=f"removal of {tag_keys=} from instances '[{[i.id for i in instances]}]'", + ): + await self.client.delete_tags( + Resources=[i.id for i in instances], + Tags=[{"Key": tag_key} for tag_key in tag_keys], + ) + except botocore.exceptions.ClientError as exc: + if exc.response.get("Error", {}).get("Code", "") == "InvalidID": + raise EC2InstanceNotFoundError from exc + raise # pragma: no cover diff --git a/packages/aws-library/src/aws_library/ec2/_error_handler.py b/packages/aws-library/src/aws_library/ec2/_error_handler.py new file mode 100644 index 00000000000..8984cf6a0a3 --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/_error_handler.py @@ -0,0 +1,83 @@ +import functools +import logging +from collections.abc import Callable, Coroutine +from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar + +from botocore import exceptions as botocore_exc + +from ._errors import ( + EC2AccessError, + EC2InstanceNotFoundError, + EC2InstanceTypeInvalidError, + EC2NotConnectedError, + EC2RuntimeError, + EC2TimeoutError, +) + +if TYPE_CHECKING: + # NOTE: TYPE_CHECKING is True when static type checkers are running, + # allowing for circular imports only for them (mypy, pylance, ruff) + from ._client import SimcoreEC2API + + +P = ParamSpec("P") +R = TypeVar("R") +T = TypeVar("T") +Self = TypeVar("Self", bound="SimcoreEC2API") + + +def _map_botocore_client_exception( + botocore_error: botocore_exc.ClientError, + *args, # pylint: disable=unused-argument # noqa: ARG001 + **kwargs, # pylint: disable=unused-argument # noqa: ARG001 +) -> EC2AccessError: + status_code = int( + botocore_error.response.get("ResponseMetadata", {}).get("HTTPStatusCode") + or botocore_error.response.get("Error", {}).get("Code", -1) + ) + operation_name = botocore_error.operation_name + match status_code, operation_name: + case 400, "StartInstances": + return EC2InstanceNotFoundError() + case 400, "StopInstances": + return EC2InstanceNotFoundError() + case 400, "TerminateInstances": + return EC2InstanceNotFoundError() + case 400, "DescribeInstanceTypes": + return EC2InstanceTypeInvalidError() + case _: + return EC2AccessError( + operation_name=operation_name, + code=status_code, + error=f"{botocore_error}", + ) + + +def ec2_exception_handler( + logger: logging.Logger, +) -> Callable[ + [Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]], + Callable[Concatenate[Self, P], Coroutine[Any, Any, R]], +]: + def decorator( + func: Callable[Concatenate[Self, P], Coroutine[Any, Any, R]], + ) -> Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]: + @functools.wraps(func) + async def wrapper(self: Self, *args: P.args, **kwargs: P.kwargs) -> R: + try: + return await func(self, *args, **kwargs) + except botocore_exc.ClientError as exc: + raise _map_botocore_client_exception(exc, *args, **kwargs) from exc + except botocore_exc.WaiterError as exc: + raise EC2TimeoutError(details=f"{exc}") from exc + except botocore_exc.EndpointConnectionError as exc: + raise EC2NotConnectedError from exc + except botocore_exc.BotoCoreError as exc: + logger.exception("Unexpected error in EC2 client: ") + raise EC2RuntimeError from exc + + wrapper.__doc__ = f"{func.__doc__}\n\n{ec2_exception_handler.__doc__}" + + return wrapper + + return decorator diff --git a/packages/aws-library/src/aws_library/ec2/_errors.py b/packages/aws-library/src/aws_library/ec2/_errors.py new file mode 100644 index 00000000000..4fb0e611ed2 --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/_errors.py @@ -0,0 +1,38 @@ +# pylint: disable=too-many-ancestors +from common_library.errors_classes import OsparcErrorMixin + + +class EC2BaseError(OsparcErrorMixin, Exception): + pass + + +class EC2RuntimeError(EC2BaseError, RuntimeError): + msg_template: str = "EC2 client unexpected error" + + +class EC2NotConnectedError(EC2RuntimeError): + msg_template: str = "Cannot connect with EC2 server" + + +class EC2AccessError(EC2RuntimeError): + msg_template: str = ( + "Unexpected error while accessing EC2 backend: {operation_name}:{code}:{error}" + ) + + +class EC2TimeoutError(EC2AccessError): + msg_template: str = "Timeout while accessing EC2 backend: {details}" + + +class EC2InstanceNotFoundError(EC2AccessError): + msg_template: str = "EC2 instance was not found" + + +class EC2InstanceTypeInvalidError(EC2AccessError): + msg_template: str = "EC2 instance type invalid" + + +class EC2TooManyInstancesError(EC2AccessError): + msg_template: str = ( + "The maximum amount of instances {num_instances} is already reached!" + ) diff --git a/packages/aws-library/src/aws_library/ec2/_models.py b/packages/aws-library/src/aws_library/ec2/_models.py new file mode 100644 index 00000000000..621adc0f4ee --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/_models.py @@ -0,0 +1,236 @@ +import datetime +import re +import tempfile +from dataclasses import dataclass +from typing import Annotated, Final, TypeAlias + +import sh # type: ignore[import-untyped] +from models_library.docker import DockerGenericTag +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + NonNegativeFloat, + NonNegativeInt, + StringConstraints, + field_validator, +) +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType + + +class Resources(BaseModel, frozen=True): + cpus: NonNegativeFloat + ram: ByteSize + + @classmethod + def create_as_empty(cls) -> "Resources": + return cls(cpus=0, ram=ByteSize(0)) + + def __ge__(self, other: "Resources") -> bool: + return self.cpus >= other.cpus and self.ram >= other.ram + + def __gt__(self, other: "Resources") -> bool: + return self.cpus > other.cpus or self.ram > other.ram + + def __add__(self, other: "Resources") -> "Resources": + return Resources.model_construct( + **{ + key: a + b + for (key, a), b in zip( + self.model_dump().items(), other.model_dump().values(), strict=True + ) + } + ) + + def __sub__(self, other: "Resources") -> "Resources": + return Resources.model_construct( + **{ + key: a - b + for (key, a), b in zip( + self.model_dump().items(), other.model_dump().values(), strict=True + ) + } + ) + + @field_validator("cpus", mode="before") + @classmethod + def _floor_cpus_to_0(cls, v: float) -> float: + return max(v, 0) + + +@dataclass(frozen=True, kw_only=True, slots=True) +class EC2InstanceType: + name: InstanceTypeType + resources: Resources + + +InstancePrivateDNSName: TypeAlias = str + + +AWS_TAG_KEY_MIN_LENGTH: Final[int] = 1 +AWS_TAG_KEY_MAX_LENGTH: Final[int] = 128 +AWSTagKey: TypeAlias = Annotated[ + # see [https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions] + str, + StringConstraints( + min_length=AWS_TAG_KEY_MIN_LENGTH, + max_length=AWS_TAG_KEY_MAX_LENGTH, + pattern=re.compile(r"^(?!(_index|\.{1,2})$)[a-zA-Z0-9\+\-=\._:@]+$"), + ), +] + + +AWS_TAG_VALUE_MIN_LENGTH: Final[int] = 0 +AWS_TAG_VALUE_MAX_LENGTH: Final[int] = 256 +AWSTagValue: TypeAlias = Annotated[ + # see [https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions] + # quotes []{} were added as it allows to json encode. it seems to be accepted as a value + str, + StringConstraints( + min_length=AWS_TAG_VALUE_MIN_LENGTH, + max_length=AWS_TAG_VALUE_MAX_LENGTH, + pattern=r"^[a-zA-Z0-9\s\+\-=\.,_:/@\"\'\[\]\{\}]*$", + ), +] + + +EC2Tags: TypeAlias = dict[AWSTagKey, AWSTagValue] + + +@dataclass(frozen=True) +class EC2InstanceData: + launch_time: datetime.datetime + id: str + aws_private_dns: InstancePrivateDNSName + aws_public_ip: str | None + type: InstanceTypeType + state: InstanceStateNameType + resources: Resources + tags: EC2Tags + + def __hash__(self) -> int: + return hash( + ( + self.launch_time, + self.id, + self.aws_private_dns, + self.aws_public_ip, + self.type, + self.state, + self.resources, + tuple(sorted(self.tags.items())), + ) + ) + + +@dataclass(frozen=True) +class EC2InstanceConfig: + type: EC2InstanceType + tags: EC2Tags + startup_script: str + + ami_id: str + key_name: str + security_group_ids: list[str] + subnet_id: str + iam_instance_profile: str + + +AMIIdStr: TypeAlias = str +CommandStr: TypeAlias = str + + +class EC2InstanceBootSpecific(BaseModel): + ami_id: AMIIdStr + custom_boot_scripts: list[CommandStr] = Field( + default_factory=list, + description="script(s) to run on EC2 instance startup (be careful!), " + "each entry is run one after the other using '&&' operator", + ) + pre_pull_images: list[DockerGenericTag] = Field( + default_factory=list, + description="a list of docker image/tags to pull on instance cold start", + ) + pre_pull_images_cron_interval: datetime.timedelta = Field( + default=datetime.timedelta(minutes=30), + description="time interval between pulls of images (minimum is 1 minute) " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ) + buffer_count: NonNegativeInt = Field( + default=0, description="number of buffer EC2s to keep (defaults to 0)" + ) + + @field_validator("custom_boot_scripts") + @classmethod + def validate_bash_calls(cls, v): + try: + with tempfile.NamedTemporaryFile(mode="wt", delete=True) as temp_file: + temp_file.writelines(v) + temp_file.flush() + # NOTE: this will not capture runtime errors, but at least some syntax errors such as invalid quotes + sh.bash("-n", temp_file.name) + except sh.ErrorReturnCode as exc: + msg = f"Invalid bash call in custom_boot_scripts: {v}, Error: {exc.stderr}" + raise ValueError(msg) from exc + + return v + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + # just AMI + "ami_id": "ami-123456789abcdef", + }, + { + # AMI + scripts + "ami_id": "ami-123456789abcdef", + "custom_boot_scripts": ["ls -tlah", "echo blahblah"], + }, + { + # AMI + scripts + pre-pull + "ami_id": "ami-123456789abcdef", + "custom_boot_scripts": ["ls -tlah", "echo blahblah"], + "pre_pull_images": [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ], + }, + { + # AMI + pre-pull + "ami_id": "ami-123456789abcdef", + "pre_pull_images": [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ], + }, + { + # AMI + pre-pull + cron + "ami_id": "ami-123456789abcdef", + "pre_pull_images": [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ], + "pre_pull_images_cron_interval": "01:00:00", + }, + { + # AMI + pre-pull + buffer count + "ami_id": "ami-123456789abcdef", + "pre_pull_images": [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ], + "buffer_count": 10, + }, + ] + } + ) diff --git a/packages/aws-library/src/aws_library/ec2/_utils.py b/packages/aws-library/src/aws_library/ec2/_utils.py new file mode 100644 index 00000000000..d16be2cf9ea --- /dev/null +++ b/packages/aws-library/src/aws_library/ec2/_utils.py @@ -0,0 +1,45 @@ +from textwrap import dedent +from typing import TYPE_CHECKING, cast + +from types_aiobotocore_ec2.type_defs import InstanceTypeDef + +from ._models import EC2InstanceData, EC2Tags + +if TYPE_CHECKING: + from ._client import SimcoreEC2API + + +def compose_user_data(docker_join_bash_command: str) -> str: + return dedent( + f"""\ +#!/bin/bash +{docker_join_bash_command} +""" + ) + + +async def ec2_instance_data_from_aws_instance( + ec2_client: "SimcoreEC2API", + instance: InstanceTypeDef, +) -> EC2InstanceData: + assert "LaunchTime" in instance # nosec + assert "InstanceId" in instance # nosec + assert "PrivateDnsName" in instance # nosec + assert "InstanceType" in instance # nosec + assert "State" in instance # nosec + assert "Name" in instance["State"] # nosec + ec2_instance_types = await ec2_client.get_ec2_instance_capabilities( + {instance["InstanceType"]} + ) + assert len(ec2_instance_types) == 1 # nosec + assert "Tags" in instance # nosec + return EC2InstanceData( + launch_time=instance["LaunchTime"], + id=instance["InstanceId"], + aws_private_dns=instance["PrivateDnsName"], + aws_public_ip=instance.get("PublicIpAddress", None), + type=instance["InstanceType"], + state=instance["State"]["Name"], + resources=ec2_instance_types[0].resources, + tags=cast(EC2Tags, {tag["Key"]: tag["Value"] for tag in instance["Tags"]}), + ) diff --git a/packages/service-integration/src/service_integration/commands/__init__.py b/packages/aws-library/src/aws_library/py.typed similarity index 100% rename from packages/service-integration/src/service_integration/commands/__init__.py rename to packages/aws-library/src/aws_library/py.typed diff --git a/packages/aws-library/src/aws_library/s3/__init__.py b/packages/aws-library/src/aws_library/s3/__init__.py new file mode 100644 index 00000000000..ea8f6264d60 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/__init__.py @@ -0,0 +1,44 @@ +from ._client import ( + CopiedBytesTransferredCallback, + SimcoreS3API, + UploadedBytesTransferredCallback, +) +from ._constants import PRESIGNED_LINK_MAX_SIZE, S3_MAX_FILE_SIZE +from ._errors import ( + S3AccessError, + S3BucketInvalidError, + S3DestinationNotEmptyError, + S3KeyNotFoundError, + S3NotConnectedError, + S3RuntimeError, + S3UploadNotFoundError, +) +from ._models import ( + MultiPartUploadLinks, + S3DirectoryMetaData, + S3MetaData, + S3ObjectKey, + UploadID, +) + +__all__: tuple[str, ...] = ( + "CopiedBytesTransferredCallback", + "MultiPartUploadLinks", + "PRESIGNED_LINK_MAX_SIZE", + "S3_MAX_FILE_SIZE", + "S3AccessError", + "S3BucketInvalidError", + "S3DestinationNotEmptyError", + "S3DirectoryMetaData", + "S3KeyNotFoundError", + "S3MetaData", + "S3NotConnectedError", + "S3ObjectKey", + "S3RuntimeError", + "S3UploadNotFoundError", + "SimcoreS3API", + "UploadedBytesTransferredCallback", + "UploadID", +) + +# nopycln: file diff --git a/packages/aws-library/src/aws_library/s3/_client.py b/packages/aws-library/src/aws_library/s3/_client.py new file mode 100644 index 00000000000..69dba4fa343 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_client.py @@ -0,0 +1,655 @@ +import asyncio +import contextlib +import functools +import logging +import urllib.parse +from collections.abc import AsyncGenerator, Sequence +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Final, Literal, Protocol, cast + +import aioboto3 +from aiobotocore.session import ClientCreatorContext +from boto3.s3.transfer import TransferConfig +from botocore import exceptions as botocore_exc +from botocore.client import Config +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + S3BucketName, + UploadedPart, +) +from models_library.basic_types import SHA256Str +from models_library.bytes_iters import BytesIter, DataSize +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.bytes_iters import DEFAULT_READ_CHUNK_SIZE, BytesStreamer +from servicelib.logging_utils import log_catch, log_context +from servicelib.s3_utils import FileLikeReader +from servicelib.utils import limited_gather +from settings_library.s3 import S3Settings +from types_aiobotocore_s3 import S3Client +from types_aiobotocore_s3.literals import BucketLocationConstraintType +from types_aiobotocore_s3.type_defs import ( + ListObjectsV2RequestTypeDef, + ObjectIdentifierTypeDef, +) + +from ._constants import ( + MULTIPART_COPY_THRESHOLD, + MULTIPART_UPLOADS_MIN_TOTAL_SIZE, + S3_OBJECT_DELIMITER, +) +from ._error_handler import s3_exception_handler, s3_exception_handler_async_gen +from ._errors import S3DestinationNotEmptyError, S3KeyNotFoundError +from ._models import ( + MultiPartUploadLinks, + PathCursor, + S3DirectoryMetaData, + S3MetaData, + S3ObjectKey, + S3ObjectPrefix, + UploadID, +) +from ._utils import compute_num_file_chunks, create_final_prefix + +_logger = logging.getLogger(__name__) + +_S3_MAX_CONCURRENCY_DEFAULT: Final[int] = 10 +_DEFAULT_AWS_REGION: Final[str] = "us-east-1" +_MAX_ITEMS_PER_PAGE: Final[int] = 500 +_MAX_CONCURRENT_COPY: Final[int] = 4 +_AWS_MAX_ITEMS_PER_PAGE: Final[int] = 1000 + + +ListAnyUrlTypeAdapter: Final[TypeAdapter[list[AnyUrl]]] = TypeAdapter(list[AnyUrl]) + + +class UploadedBytesTransferredCallback(Protocol): + def __call__(self, bytes_transferred: int, *, file_name: str) -> None: ... + + +class CopiedBytesTransferredCallback(Protocol): + def __call__(self, total_bytes_copied: int, *, file_name: str) -> None: ... + + +@dataclass(frozen=True) +class SimcoreS3API: # pylint: disable=too-many-public-methods + _client: S3Client + _session: aioboto3.Session + _exit_stack: contextlib.AsyncExitStack = field( + default_factory=contextlib.AsyncExitStack + ) + transfer_max_concurrency: int = _S3_MAX_CONCURRENCY_DEFAULT + + @classmethod + async def create( + cls, settings: S3Settings, s3_max_concurrency: int = _S3_MAX_CONCURRENCY_DEFAULT + ) -> "SimcoreS3API": + session = aioboto3.Session() + session_client = None + exit_stack = contextlib.AsyncExitStack() + try: + config = Config( + # This setting tells the S3 client to only calculate checksums when explicitly required + # by the operation. This avoids unnecessary checksum calculations for operations that + # don't need them, improving performance. + # See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3.html#calculating-checksums + signature_version="s3v4", + request_checksum_calculation="when_required", # type: ignore[call-arg] + ) + session_client = session.client( # type: ignore[call-overload] + "s3", + endpoint_url=f"{settings.S3_ENDPOINT}", + aws_access_key_id=settings.S3_ACCESS_KEY, + aws_secret_access_key=settings.S3_SECRET_KEY, + region_name=settings.S3_REGION, + config=config, + ) + assert isinstance(session_client, ClientCreatorContext) # nosec + + s3_client = cast( + S3Client, await exit_stack.enter_async_context(session_client) + ) + # NOTE: this triggers a botocore.exception.ClientError in case the connection is not made to the S3 backend + await s3_client.list_buckets() + + return cls(s3_client, session, exit_stack, s3_max_concurrency) + except Exception: + await exit_stack.aclose() + + raise + + async def close(self) -> None: + await self._exit_stack.aclose() + + async def http_check_bucket_connected(self, *, bucket: S3BucketName) -> bool: + with log_catch(_logger, reraise=False): + return await self.bucket_exists(bucket=bucket) + return False + + @s3_exception_handler(_logger) + async def create_bucket( + self, + *, + bucket: S3BucketName, + region: BucketLocationConstraintType | Literal["us-east-1"], + ) -> None: + with log_context( + _logger, logging.INFO, msg=f"Create bucket {bucket} in {region}" + ): + try: + # NOTE: see https://github.com/boto/boto3/issues/125 why this is so... (sic) + # setting it for the us-east-1 creates issue when creating buckets + create_bucket_config: dict[str, Any] = {"Bucket": f"{bucket}"} + if region != _DEFAULT_AWS_REGION: + create_bucket_config["CreateBucketConfiguration"] = { + "LocationConstraint": region + } + + await self._client.create_bucket(**create_bucket_config) + + except self._client.exceptions.BucketAlreadyOwnedByYou: + _logger.info( + "Bucket %s already exists and is owned by us", + bucket, + ) + + @s3_exception_handler(_logger) + async def bucket_exists(self, *, bucket: S3BucketName) -> bool: + """ + :raises: S3AccessError for any other error + """ + try: + await self._client.head_bucket(Bucket=bucket) + return True + except botocore_exc.ClientError as exc: + status_code = exc.response.get("Error", {}).get("Code", -1) + if status_code == "404": + return False + raise + + @s3_exception_handler(_logger) + async def object_exists( + self, *, bucket: S3BucketName, object_key: S3ObjectKey + ) -> bool: + # SEE https://www.peterbe.com/plog/fastest-way-to-find-out-if-a-file-exists-in-s3 + response = await self._client.list_objects_v2(Bucket=bucket, Prefix=object_key) + return len(response.get("Contents", [])) > 0 + + @s3_exception_handler(_logger) + async def get_object_metadata( + self, *, bucket: S3BucketName, object_key: S3ObjectKey + ) -> S3MetaData: + response = await self._client.head_object( + Bucket=bucket, Key=object_key, ChecksumMode="ENABLED" + ) + return S3MetaData.from_botocore_head_object(object_key, response) + + @s3_exception_handler(_logger) + async def get_directory_metadata( + self, *, bucket: S3BucketName, prefix: str + ) -> S3DirectoryMetaData: + size = 0 + async for s3_object in self._list_all_objects(bucket=bucket, prefix=prefix): + size += s3_object.size + return S3DirectoryMetaData(prefix=S3ObjectPrefix(prefix), size=ByteSize(size)) + + @s3_exception_handler(_logger) + async def count_objects( + self, + *, + bucket: S3BucketName, + prefix: S3ObjectPrefix | None, + start_after: S3ObjectKey | None, + is_partial_prefix: bool = False, + use_delimiter: bool = True, + ) -> int: + """returns the number of entries in the bucket, defined + by prefix and start_after same as list_objects + """ + paginator = self._client.get_paginator("list_objects_v2") + total_count = 0 + async for page in paginator.paginate( + Bucket=bucket, + Prefix=create_final_prefix(prefix, is_partial_prefix=is_partial_prefix), + StartAfter=start_after or "", + Delimiter=S3_OBJECT_DELIMITER if use_delimiter else "", + ): + total_count += page.get("KeyCount", 0) + return total_count + + @s3_exception_handler(_logger) + async def list_objects( + self, + *, + bucket: S3BucketName, + prefix: S3ObjectPrefix | None, + start_after: S3ObjectKey | None, + limit: int = _MAX_ITEMS_PER_PAGE, + next_cursor: PathCursor | None = None, + is_partial_prefix: bool = False, + ) -> tuple[list[S3MetaData | S3DirectoryMetaData], PathCursor | None]: + """returns a number of entries in the bucket, defined by limit + the entries are sorted alphabetically by key. If a cursor is returned + then the client can call the function again with the cursor to get the + next entries. + + the first entry is defined by start_after + if start_after is None, the first entry is the first one in the bucket + if prefix is not None, only entries with the given prefix are returned + if prefix is None, all entries in the bucket are returned + if next_cursor is set, then the call will return the next entries after the cursor + if is_partial_prefix is set then the prefix is not auto-delimited + (if False equivalent to `ls /home/user/` + if True equivalent to `ls /home/user*`) + limit must be >= 1 and <= _AWS_MAX_ITEMS_PER_PAGE + + Raises: + ValueError: in case of invalid limit + """ + if limit < 1: + msg = "num_objects must be >= 1" + raise ValueError(msg) + if limit > _AWS_MAX_ITEMS_PER_PAGE: + msg = f"num_objects must be <= {_AWS_MAX_ITEMS_PER_PAGE}" + raise ValueError(msg) + + list_config: ListObjectsV2RequestTypeDef = { + "Bucket": bucket, + "Prefix": create_final_prefix(prefix, is_partial_prefix=is_partial_prefix), + "MaxKeys": limit, + "Delimiter": S3_OBJECT_DELIMITER, + } + if start_after: + list_config["StartAfter"] = start_after + if next_cursor: + list_config["ContinuationToken"] = next_cursor + listed_objects = await self._client.list_objects_v2(**list_config) + found_objects: list[S3MetaData | S3DirectoryMetaData] = [] + if "CommonPrefixes" in listed_objects: + # we have folders here + list_subfolders = listed_objects["CommonPrefixes"] + found_objects.extend( + S3DirectoryMetaData.model_construct( + prefix=S3ObjectPrefix(subfolder["Prefix"], size=None) + ) + for subfolder in list_subfolders + if "Prefix" in subfolder + ) + if "Contents" in listed_objects: + found_objects.extend( + S3MetaData.from_botocore_list_objects(obj) + for obj in listed_objects["Contents"] + ) + next_cursor = None + if listed_objects["IsTruncated"]: + next_cursor = listed_objects["NextContinuationToken"] + return found_objects, next_cursor + + @s3_exception_handler_async_gen(_logger) + async def list_objects_paginated( + self, + bucket: S3BucketName, + prefix: str, + *, + items_per_page: int = _MAX_ITEMS_PER_PAGE, + ) -> AsyncGenerator[list[S3MetaData], None]: + if items_per_page > _AWS_MAX_ITEMS_PER_PAGE: + msg = f"items_per_page must be <= {_AWS_MAX_ITEMS_PER_PAGE}" + raise ValueError(msg) + async for page in self._client.get_paginator("list_objects_v2").paginate( + Bucket=bucket, + Prefix=prefix, + PaginationConfig={ + "PageSize": items_per_page, + }, + ): + yield [ + S3MetaData.from_botocore_list_objects(obj) + for obj in page.get("Contents", []) + ] + + async def _list_all_objects( + self, *, bucket: S3BucketName, prefix: str + ) -> AsyncGenerator[S3MetaData, None]: + async for s3_objects in self.list_objects_paginated( + bucket=bucket, prefix=prefix + ): + for obj in s3_objects: + yield obj + + @s3_exception_handler(_logger) + async def delete_objects_recursively( + self, *, bucket: S3BucketName, prefix: str + ) -> None: + # NOTE: deletion of objects is done in batches of max 1000 elements, + # the maximum accepted by the S3 API + with log_context( + _logger, logging.DEBUG, f"deleting objects in {prefix=}", log_duration=True + ): + async for s3_objects in self.list_objects_paginated( + bucket=bucket, prefix=prefix + ): + objects_to_delete: Sequence[ObjectIdentifierTypeDef] = [ + {"Key": f"{_.object_key}"} for _ in s3_objects + ] + if objects_to_delete: + await self._client.delete_objects( + Bucket=bucket, + Delete={"Objects": objects_to_delete}, + ) + + @s3_exception_handler(_logger) + async def delete_object( + self, *, bucket: S3BucketName, object_key: S3ObjectKey + ) -> None: + await self._client.delete_object(Bucket=bucket, Key=object_key) + + @s3_exception_handler(_logger) + async def undelete_object( + self, *, bucket: S3BucketName, object_key: S3ObjectKey + ) -> None: + """this allows to restore a file that was deleted. + **NOT to restore previous versions!""" + with log_context( + _logger, logging.DEBUG, msg=f"undeleting {bucket}/{object_key}" + ): + response = await self._client.list_object_versions( + Bucket=bucket, Prefix=object_key, MaxKeys=1 + ) + _logger.debug("%s", f"{response=}") + if not response["IsTruncated"] and all( + _ not in response for _ in ("Versions", "DeleteMarkers") + ): + raise S3KeyNotFoundError(key=object_key, bucket=bucket) + if "DeleteMarkers" in response: + # we have something to undelete + latest_version = response["DeleteMarkers"][0] + assert "IsLatest" in latest_version # nosec + assert "VersionId" in latest_version # nosec + await self._client.delete_object( + Bucket=bucket, + Key=object_key, + VersionId=latest_version["VersionId"], + ) + _logger.debug("restored %s", f"{bucket}/{object_key}") + + @s3_exception_handler(_logger) + async def create_single_presigned_download_link( + self, + *, + bucket: S3BucketName, + object_key: S3ObjectKey, + expiration_secs: int, + ) -> AnyUrl: + # NOTE: ensure the bucket/object exists, this will raise if not + await self._client.head_bucket(Bucket=bucket) + await self._client.head_object(Bucket=bucket, Key=object_key) + generated_link = await self._client.generate_presigned_url( + "get_object", + Params={"Bucket": bucket, "Key": object_key}, + ExpiresIn=expiration_secs, + ) + return TypeAdapter(AnyUrl).validate_python(generated_link) + + @s3_exception_handler(_logger) + async def create_single_presigned_upload_link( + self, *, bucket: S3BucketName, object_key: S3ObjectKey, expiration_secs: int + ) -> AnyUrl: + # NOTE: ensure the bucket/object exists, this will raise if not + await self._client.head_bucket(Bucket=bucket) + generated_link = await self._client.generate_presigned_url( + "put_object", + Params={"Bucket": bucket, "Key": object_key}, + ExpiresIn=expiration_secs, + ) + return TypeAdapter(AnyUrl).validate_python(generated_link) + + @s3_exception_handler(_logger) + async def create_multipart_upload_links( + self, + *, + bucket: S3BucketName, + object_key: S3ObjectKey, + file_size: ByteSize, + expiration_secs: int, + sha256_checksum: SHA256Str | None, + ) -> MultiPartUploadLinks: + # NOTE: ensure the bucket exists, this will raise if not + await self._client.head_bucket(Bucket=bucket) + # first initiate the multipart upload + create_input: dict[str, Any] = {"Bucket": bucket, "Key": object_key} + if sha256_checksum: + create_input["Metadata"] = {"sha256_checksum": sha256_checksum} + response = await self._client.create_multipart_upload(**create_input) + upload_id = response["UploadId"] + # compute the number of links, based on the announced file size + num_upload_links, chunk_size = compute_num_file_chunks(file_size) + # now create the links + upload_links = ListAnyUrlTypeAdapter.validate_python( + await asyncio.gather( + *( + self._client.generate_presigned_url( + "upload_part", + Params={ + "Bucket": bucket, + "Key": object_key, + "PartNumber": i + 1, + "UploadId": upload_id, + }, + ExpiresIn=expiration_secs, + ) + for i in range(num_upload_links) + ), + ), + ) + return MultiPartUploadLinks( + upload_id=upload_id, chunk_size=chunk_size, urls=upload_links + ) + + @s3_exception_handler(_logger) + async def list_ongoing_multipart_uploads( + self, + *, + bucket: S3BucketName, + ) -> list[tuple[UploadID, S3ObjectKey]]: + """Returns all the currently ongoing multipart uploads + + NOTE: minio does not implement the same behaviour as AWS here and will + only return the uploads if a prefix or object name is given [minio issue](https://github.com/minio/minio/issues/7632). + + :return: list of AWS uploads see [boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_multipart_uploads) + """ + response = await self._client.list_multipart_uploads( + Bucket=bucket, + ) + + return [ + ( + upload.get("UploadId", "undefined-uploadid"), + S3ObjectKey(upload.get("Key", "undefined-key")), + ) + for upload in response.get("Uploads", []) + ] + + @s3_exception_handler(_logger) + async def abort_multipart_upload( + self, *, bucket: S3BucketName, object_key: S3ObjectKey, upload_id: UploadID + ) -> None: + await self._client.abort_multipart_upload( + Bucket=bucket, Key=object_key, UploadId=upload_id + ) + + @s3_exception_handler(_logger) + async def complete_multipart_upload( + self, + *, + bucket: S3BucketName, + object_key: S3ObjectKey, + upload_id: UploadID, + uploaded_parts: list[UploadedPart], + ) -> ETag: + inputs: dict[str, Any] = { + "Bucket": bucket, + "Key": object_key, + "UploadId": upload_id, + "MultipartUpload": { + "Parts": [ + {"ETag": part.e_tag, "PartNumber": part.number} + for part in uploaded_parts + ] + }, + } + response = await self._client.complete_multipart_upload(**inputs) + return response["ETag"] + + @s3_exception_handler(_logger) + async def upload_file( + self, + *, + bucket: S3BucketName, + file: Path, + object_key: S3ObjectKey, + bytes_transfered_cb: UploadedBytesTransferredCallback | None, + ) -> None: + """upload a file using aioboto3 transfer manager (e.g. works >5Gb and creates multiple threads)""" + upload_options: dict[str, Any] = { + "Bucket": bucket, + "Key": object_key, + "Config": TransferConfig(max_concurrency=self.transfer_max_concurrency), + } + if bytes_transfered_cb: + upload_options |= { + "Callback": functools.partial( + bytes_transfered_cb, file_name=f"{object_key}" + ) + } + await self._client.upload_file(f"{file}", **upload_options) + + @s3_exception_handler(_logger) + async def copy_object( + self, + *, + bucket: S3BucketName, + src_object_key: S3ObjectKey, + dst_object_key: S3ObjectKey, + bytes_transfered_cb: CopiedBytesTransferredCallback | None, + object_metadata: S3MetaData | None = None, + ) -> None: + """copy a file in S3 using aioboto3 transfer manager (e.g. works >5Gb and creates multiple threads)""" + copy_options: dict[str, Any] = { + "CopySource": {"Bucket": bucket, "Key": src_object_key}, + "Bucket": bucket, + "Key": dst_object_key, + "Config": TransferConfig( + max_concurrency=self.transfer_max_concurrency, + multipart_threshold=MULTIPART_COPY_THRESHOLD, + ), + } + if bytes_transfered_cb: + copy_options |= { + "Callback": functools.partial( + bytes_transfered_cb, file_name=f"{dst_object_key}" + ) + } + # NOTE: boto3 copy function uses copy_object until 'multipart_threshold' is reached then switches to multipart copy + # copy_object does not provide any callbacks so we can't track progress so we need to ensure at least the completion + # of the object is tracked + await self._client.copy(**copy_options) + if bytes_transfered_cb: + if object_metadata is None: + object_metadata = await self.get_object_metadata( + bucket=bucket, object_key=dst_object_key + ) + bytes_transfered_cb(object_metadata.size, file_name=f"{dst_object_key}") + + @s3_exception_handler(_logger) + async def copy_objects_recursively( + self, + *, + bucket: S3BucketName, + src_prefix: str, + dst_prefix: str, + bytes_transfered_cb: CopiedBytesTransferredCallback | None, + ) -> None: + """copy from 1 location in S3 to another recreating the same structure""" + dst_metadata = await self.get_directory_metadata( + bucket=bucket, prefix=dst_prefix + ) + if dst_metadata.size and dst_metadata.size > 0: + raise S3DestinationNotEmptyError(dst_prefix=dst_prefix) + await limited_gather( + *[ + self.copy_object( + bucket=bucket, + src_object_key=s3_object.object_key, + dst_object_key=s3_object.object_key.replace(src_prefix, dst_prefix), + bytes_transfered_cb=bytes_transfered_cb, + object_metadata=s3_object, + ) + async for s3_object in self._list_all_objects( + bucket=bucket, prefix=src_prefix + ) + ], + limit=_MAX_CONCURRENT_COPY, + ) + + async def get_bytes_streamer_from_object( + self, + bucket_name: S3BucketName, + object_key: S3ObjectKey, + *, + chunk_size: int = DEFAULT_READ_CHUNK_SIZE, + ) -> BytesStreamer: + """stream read an object from S3 chunk by chunk""" + + # NOTE `download_fileobj` cannot be used to implement this because + # it will buffer the entire file in memory instead of reading it + # chunk by chunk + + # below is a quick call + head_response = await self._client.head_object( + Bucket=bucket_name, Key=object_key + ) + data_size = DataSize(head_response["ContentLength"]) + + async def _() -> BytesIter: + # Download the file in chunks + position = 0 + while position < data_size: + # Calculate the range for this chunk + end = min(position + chunk_size - 1, data_size - 1) + range_header = f"bytes={position}-{end}" + + # Download the chunk + response = await self._client.get_object( + Bucket=bucket_name, Key=object_key, Range=range_header + ) + + chunk = await response["Body"].read() + + # Yield the chunk for processing + yield chunk + + position += chunk_size + + return BytesStreamer(data_size, _) + + @s3_exception_handler(_logger) + async def upload_object_from_file_like( + self, + bucket_name: S3BucketName, + object_key: S3ObjectKey, + file_like_reader: FileLikeReader, + ) -> None: + """streams write an object in S3 from an AsyncIterable[bytes]""" + await self._client.upload_fileobj(file_like_reader, bucket_name, object_key) # type: ignore[arg-type] + + @staticmethod + def is_multipart(file_size: ByteSize) -> bool: + return file_size >= MULTIPART_UPLOADS_MIN_TOTAL_SIZE + + @staticmethod + def compute_s3_url(*, bucket: S3BucketName, object_key: S3ObjectKey) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"s3://{bucket}/{urllib.parse.quote(object_key)}" + ) diff --git a/packages/aws-library/src/aws_library/s3/_constants.py b/packages/aws-library/src/aws_library/s3/_constants.py new file mode 100644 index 00000000000..258a890f835 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_constants.py @@ -0,0 +1,18 @@ +from typing import Final + +from pydantic import ByteSize, TypeAdapter + +# NOTE: AWS S3 upload limits https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +MULTIPART_UPLOADS_MIN_TOTAL_SIZE: Final[ByteSize] = TypeAdapter( + ByteSize +).validate_python("100MiB") +MULTIPART_COPY_THRESHOLD: Final[ByteSize] = TypeAdapter(ByteSize).validate_python( + "100MiB" +) +STREAM_READER_CHUNK_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python( + "10MiB" +) + +PRESIGNED_LINK_MAX_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python("5GiB") +S3_MAX_FILE_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python("5TiB") +S3_OBJECT_DELIMITER: Final[str] = "/" diff --git a/packages/aws-library/src/aws_library/s3/_error_handler.py b/packages/aws-library/src/aws_library/s3/_error_handler.py new file mode 100644 index 00000000000..b0bf38e8d63 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_error_handler.py @@ -0,0 +1,143 @@ +import functools +import inspect +import logging +from collections.abc import AsyncGenerator, Callable, Coroutine +from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar + +from botocore import exceptions as botocore_exc + +from ._errors import ( + S3AccessError, + S3BucketInvalidError, + S3KeyNotFoundError, + S3UploadNotFoundError, +) + +if TYPE_CHECKING: + # NOTE: TYPE_CHECKING is True when static type checkers are running, + # allowing for circular imports only for them (mypy, pylance, ruff) + from ._client import SimcoreS3API + + +def _map_botocore_client_exception( + botocore_error: botocore_exc.ClientError, **kwargs +) -> S3AccessError: + status_code = int( + botocore_error.response.get("ResponseMetadata", {}).get("HTTPStatusCode") + or botocore_error.response.get("Error", {}).get("Code", -1) + ) + operation_name = botocore_error.operation_name + match status_code, operation_name: + case 404, "HeadObject": + return S3KeyNotFoundError( + bucket=kwargs["bucket"], + key=kwargs.get("object_key") or kwargs.get("src_object_key"), + ) + case (404, "HeadBucket") | (403, "HeadBucket"): + return S3BucketInvalidError(bucket=kwargs["bucket"]) + case (404, "AbortMultipartUpload") | ( + 500, + "CompleteMultipartUpload", + ): + return S3UploadNotFoundError( + bucket=kwargs["bucket"], key=kwargs["object_key"] + ) + case _: + return S3AccessError() + + +P = ParamSpec("P") +R = TypeVar("R") +T = TypeVar("T") +Self = TypeVar("Self", bound="SimcoreS3API") + + +def s3_exception_handler( + logger: logging.Logger, +) -> Callable[ + [Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]], + Callable[Concatenate[Self, P], Coroutine[Any, Any, R]], +]: + """ + Raises: + S3BucketInvalidError: + S3KeyNotFoundError: + S3BucketInvalidError: + S3UploadNotFoundError: + S3AccessError: + """ + + def decorator( + func: Callable[Concatenate[Self, P], Coroutine[Any, Any, R]] + ) -> Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]: + @functools.wraps(func) + async def wrapper(self: Self, *args: P.args, **kwargs: P.kwargs) -> R: + try: + return await func(self, *args, **kwargs) + except ( + self._client.exceptions.NoSuchBucket # pylint: disable=protected-access + ) as exc: + raise S3BucketInvalidError( + bucket=exc.response.get("Error", {}).get("BucketName", "undefined") + ) from exc + except botocore_exc.ClientError as exc: + raise _map_botocore_client_exception(exc, **kwargs) from exc + except botocore_exc.EndpointConnectionError as exc: + raise S3AccessError from exc + except botocore_exc.BotoCoreError as exc: + logger.exception("Unexpected error in s3 client: ") + raise S3AccessError from exc + + wrapper.__doc__ = f"{func.__doc__}\n\n{s3_exception_handler.__doc__}" + + return wrapper + + return decorator + + +def s3_exception_handler_async_gen( + logger: logging.Logger, +) -> Callable[ + [Callable[Concatenate[Self, P], AsyncGenerator[T, None]]], + Callable[Concatenate[Self, P], AsyncGenerator[T, None]], +]: + """ + Raises: + S3BucketInvalidError: + S3KeyNotFoundError: + S3BucketInvalidError: + S3UploadNotFoundError: + S3AccessError: + """ + + def decorator( + func: Callable[Concatenate[Self, P], AsyncGenerator[T, None]] + ) -> Callable[Concatenate[Self, P], AsyncGenerator[T, None]]: + @functools.wraps(func) + async def async_generator_wrapper( + self: Self, *args: P.args, **kwargs: P.kwargs + ) -> AsyncGenerator[T, None]: + try: + assert inspect.isasyncgenfunction(func) # nosec + async for item in func(self, *args, **kwargs): + yield item + except ( + self._client.exceptions.NoSuchBucket # pylint: disable=protected-access + ) as exc: + raise S3BucketInvalidError( + bucket=exc.response.get("Error", {}).get("BucketName", "undefined") + ) from exc + except botocore_exc.ClientError as exc: + raise _map_botocore_client_exception(exc, **kwargs) from exc + except botocore_exc.EndpointConnectionError as exc: + raise S3AccessError from exc + except botocore_exc.BotoCoreError as exc: + logger.exception("Unexpected error in s3 client: ") + raise S3AccessError from exc + + async_generator_wrapper.__doc__ = ( + f"{func.__doc__}\n\n{s3_exception_handler_async_gen.__doc__}" + ) + return async_generator_wrapper + + return decorator diff --git a/packages/aws-library/src/aws_library/s3/_errors.py b/packages/aws-library/src/aws_library/s3/_errors.py new file mode 100644 index 00000000000..3bafa217257 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_errors.py @@ -0,0 +1,29 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class S3RuntimeError(OsparcErrorMixin, RuntimeError): + msg_template: str = "S3 client unexpected error" + + +class S3NotConnectedError(S3RuntimeError): + msg_template: str = "Cannot connect with s3 server" + + +class S3AccessError(S3RuntimeError): + msg_template: str = "Unexpected error while accessing S3 backend" + + +class S3BucketInvalidError(S3AccessError): + msg_template: str = "The bucket '{bucket}' is invalid" + + +class S3KeyNotFoundError(S3AccessError): + msg_template: str = "The file {key} in {bucket} was not found" + + +class S3UploadNotFoundError(S3AccessError): + msg_template: str = "The upload for {key} in {bucket} was not found" + + +class S3DestinationNotEmptyError(S3AccessError): + msg_template: str = "The destination {dst_prefix} is not empty" diff --git a/packages/aws-library/src/aws_library/s3/_models.py b/packages/aws-library/src/aws_library/s3/_models.py new file mode 100644 index 00000000000..4d722386526 --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_models.py @@ -0,0 +1,68 @@ +import datetime +from pathlib import Path +from typing import TypeAlias, cast + +from models_library.api_schemas_storage.storage_schemas import ETag +from models_library.basic_types import SHA256Str +from pydantic import AnyUrl, BaseModel, ByteSize, Field +from types_aiobotocore_s3.type_defs import HeadObjectOutputTypeDef, ObjectTypeDef + +S3ObjectKey: TypeAlias = str +S3ObjectPrefix: TypeAlias = Path +UploadID: TypeAlias = str +PathCursor: TypeAlias = str + + +class S3MetaData(BaseModel, frozen=True): + object_key: S3ObjectKey + last_modified: datetime.datetime + e_tag: ETag + sha256_checksum: SHA256Str | None + size: ByteSize + + @staticmethod + def from_botocore_head_object( + object_key: S3ObjectKey, obj: HeadObjectOutputTypeDef + ) -> "S3MetaData": + return S3MetaData( + object_key=object_key, + last_modified=obj["LastModified"], + e_tag=obj["ETag"].strip('"'), + sha256_checksum=obj.get("ChecksumSHA256"), + size=ByteSize(obj["ContentLength"]), + ) + + @staticmethod + def from_botocore_list_objects( + obj: ObjectTypeDef, + ) -> "S3MetaData": + assert "Key" in obj # nosec + assert "LastModified" in obj # nosec + assert "ETag" in obj # nosec + assert "Size" in obj # nosec + return S3MetaData( + object_key=obj["Key"], + last_modified=obj["LastModified"], + e_tag=obj["ETag"].strip('"'), + sha256_checksum=cast(SHA256Str | None, obj.get("ChecksumSHA256")), + size=ByteSize(obj["Size"]), + ) + + def as_path(self) -> Path: + return Path(self.object_key) + + +class S3DirectoryMetaData(BaseModel, frozen=True): + prefix: S3ObjectPrefix + size: ByteSize | None = Field( + ..., description="Size of the directory if computed, None if unknown" + ) + + def as_path(self) -> Path: + return self.prefix + + +class MultiPartUploadLinks(BaseModel): + upload_id: UploadID + chunk_size: ByteSize + urls: list[AnyUrl] diff --git a/packages/aws-library/src/aws_library/s3/_utils.py b/packages/aws-library/src/aws_library/s3/_utils.py new file mode 100644 index 00000000000..51024f0f15a --- /dev/null +++ b/packages/aws-library/src/aws_library/s3/_utils.py @@ -0,0 +1,51 @@ +from typing import Final + +from pydantic import ByteSize, TypeAdapter + +from ._constants import S3_OBJECT_DELIMITER +from ._models import S3ObjectPrefix + +_MULTIPART_MAX_NUMBER_OF_PARTS: Final[int] = 10000 + +# this is artifically defined, if possible we keep a maximum number of requests for parallel +# uploading. If that is not possible then we create as many upload part as the max part size allows +_MULTIPART_UPLOADS_TARGET_MAX_PART_SIZE: Final[list[ByteSize]] = [ + TypeAdapter(ByteSize).validate_python(x) + for x in [ + "10Mib", + "50Mib", + "100Mib", + "200Mib", + "400Mib", + "600Mib", + "800Mib", + "1Gib", + "2Gib", + "3Gib", + "4Gib", + "5Gib", + ] +] + + +def compute_num_file_chunks(file_size: ByteSize) -> tuple[int, ByteSize]: + for chunk in _MULTIPART_UPLOADS_TARGET_MAX_PART_SIZE: + num_upload_links = int(file_size / chunk) + (1 if file_size % chunk > 0 else 0) + if num_upload_links < _MULTIPART_MAX_NUMBER_OF_PARTS: + return (num_upload_links, chunk) + msg = f"Could not determine number of upload links for {file_size=}" + raise ValueError( + msg, + ) + + +def create_final_prefix( + prefix: S3ObjectPrefix | None, *, is_partial_prefix: bool +) -> str: + final_prefix = f"{prefix}" if prefix else "" + if prefix and not is_partial_prefix: + final_prefix = ( + f"{final_prefix.rstrip(S3_OBJECT_DELIMITER)}{S3_OBJECT_DELIMITER}" + ) + + return final_prefix diff --git a/packages/aws-library/src/aws_library/ssm/__init__.py b/packages/aws-library/src/aws_library/ssm/__init__.py new file mode 100644 index 00000000000..377916231b0 --- /dev/null +++ b/packages/aws-library/src/aws_library/ssm/__init__.py @@ -0,0 +1,23 @@ +from ._client import SimcoreSSMAPI +from ._errors import ( + SSMAccessError, + SSMCommandExecutionResultError, + SSMCommandExecutionTimeoutError, + SSMInvalidCommandError, + SSMNotConnectedError, + SSMRuntimeError, + SSMSendCommandInstancesNotReadyError, +) + +__all__: tuple[str, ...] = ( + "SimcoreSSMAPI", + "SSMAccessError", + "SSMNotConnectedError", + "SSMRuntimeError", + "SSMSendCommandInstancesNotReadyError", + "SSMInvalidCommandError", + "SSMCommandExecutionResultError", + "SSMCommandExecutionTimeoutError", +) + +# nopycln: file diff --git a/packages/aws-library/src/aws_library/ssm/_client.py b/packages/aws-library/src/aws_library/ssm/_client.py new file mode 100644 index 00000000000..2b51a93b82a --- /dev/null +++ b/packages/aws-library/src/aws_library/ssm/_client.py @@ -0,0 +1,192 @@ +import contextlib +import datetime +import logging +from collections.abc import Sequence +from dataclasses import dataclass +from typing import Final, cast + +import aioboto3 +import arrow +import botocore +import botocore.exceptions +from aiobotocore.session import ClientCreatorContext +from servicelib.logging_utils import log_decorator +from settings_library.ssm import SSMSettings +from types_aiobotocore_ssm import SSMClient +from types_aiobotocore_ssm.literals import CommandStatusType + +from ._error_handler import ssm_exception_handler +from ._errors import SSMCommandExecutionResultError, SSMCommandExecutionTimeoutError + +_logger = logging.getLogger(__name__) + +_AWS_WAIT_MAX_DELAY: Final[int] = 5 +_AWS_WAIT_NUM_RETRIES: Final[int] = 3 + +_CLOUD_INIT_STATUS_COMMAND: Final[str] = "cloud-init status" +_CLOUD_INIT_STATUS_COMMAND_NAME: Final[str] = _CLOUD_INIT_STATUS_COMMAND + + +@dataclass(frozen=True) +class SSMCommand: + name: str + command_id: str + instance_ids: Sequence[str] + status: CommandStatusType + start_time: datetime.datetime | None + finish_time: datetime.datetime | None + message: str | None = None + + +@dataclass(frozen=True) +class SimcoreSSMAPI: + _client: SSMClient + _session: aioboto3.Session + _exit_stack: contextlib.AsyncExitStack + + @classmethod + async def create(cls, settings: SSMSettings) -> "SimcoreSSMAPI": + session = aioboto3.Session() + session_client = session.client( + "ssm", + endpoint_url=settings.SSM_ENDPOINT, + aws_access_key_id=settings.SSM_ACCESS_KEY_ID.get_secret_value(), + aws_secret_access_key=settings.SSM_SECRET_ACCESS_KEY.get_secret_value(), + region_name=settings.SSM_REGION_NAME, + ) + assert isinstance(session_client, ClientCreatorContext) # nosec + exit_stack = contextlib.AsyncExitStack() + ec2_client = cast( + SSMClient, await exit_stack.enter_async_context(session_client) + ) + return cls(ec2_client, session, exit_stack) + + async def close(self) -> None: + await self._exit_stack.aclose() + + async def ping(self) -> bool: + try: + await self._client.list_commands(MaxResults=1) + return True + except Exception: # pylint: disable=broad-except + return False + + # a function to send a command via ssm + @log_decorator(_logger, logging.DEBUG) + @ssm_exception_handler(_logger) + async def send_command( + self, instance_ids: Sequence[str], *, command: str, command_name: str + ) -> SSMCommand: + # NOTE: using Targets instead of instances as this is limited to 50 instances + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm.html#SSM.Client.send_command + response = await self._client.send_command( + Targets=[{"Key": "InstanceIds", "Values": instance_ids}], + DocumentName="AWS-RunShellScript", + Comment=command_name, + Parameters={"commands": [command]}, + CloudWatchOutputConfig={ + "CloudWatchOutputEnabled": True, + "CloudWatchLogGroupName": "simcore-ssm-logs", + }, + ) + assert response["Command"] # nosec + assert "Comment" in response["Command"] # nosec + assert "CommandId" in response["Command"] # nosec + assert "Status" in response["Command"] # nosec + assert "RequestedDateTime" in response["Command"] # nosec + + return SSMCommand( + name=response["Command"]["Comment"], + command_id=response["Command"]["CommandId"], + status=response["Command"]["Status"], + instance_ids=instance_ids, + start_time=None, + finish_time=None, + ) + + @log_decorator(_logger, logging.DEBUG) + @ssm_exception_handler(_logger) + async def get_command(self, instance_id: str, *, command_id: str) -> SSMCommand: + + response = await self._client.get_command_invocation( + CommandId=command_id, InstanceId=instance_id + ) + + return SSMCommand( + name=response["Comment"], + command_id=response["CommandId"], + instance_ids=[response["InstanceId"]], + status=response["Status"] if response["Status"] != "Delayed" else "Pending", + message=response["StatusDetails"], + start_time=( + arrow.get(response["ExecutionStartDateTime"]).datetime + if response.get("ExecutionStartDateTime") + else None + ), + finish_time=( + arrow.get(response["ExecutionEndDateTime"]).datetime + if response.get("ExecutionEndDateTime") + else None + ), + ) + + @log_decorator(_logger, logging.DEBUG) + @ssm_exception_handler(_logger) + async def is_instance_connected_to_ssm_server(self, instance_id: str) -> bool: + response = await self._client.describe_instance_information( + InstanceInformationFilterList=[ + { + "key": "InstanceIds", + "valueSet": [ + instance_id, + ], + } + ], + ) + if response.get( + "InstanceInformationList" + ): # NOTE: the key is actually NOT REQUIRED! + assert len(response["InstanceInformationList"]) == 1 # nosec + assert "PingStatus" in response["InstanceInformationList"][0] # nosec + return bool( + response["InstanceInformationList"][0]["PingStatus"] == "Online" + ) + return False + + @log_decorator(_logger, logging.DEBUG) + @ssm_exception_handler(_logger) + async def wait_for_has_instance_completed_cloud_init( + self, instance_id: str + ) -> bool: + cloud_init_status_command = await self.send_command( + (instance_id,), + command=_CLOUD_INIT_STATUS_COMMAND, + command_name=_CLOUD_INIT_STATUS_COMMAND_NAME, + ) + # wait for command to complete + waiter = self._client.get_waiter( # pylint: disable=assignment-from-no-return + "command_executed" + ) + try: + await waiter.wait( + CommandId=cloud_init_status_command.command_id, + InstanceId=instance_id, + WaiterConfig={ + "Delay": _AWS_WAIT_MAX_DELAY, + "MaxAttempts": _AWS_WAIT_NUM_RETRIES, + }, + ) + except botocore.exceptions.WaiterError as exc: + msg = f"Timed-out waiting for {instance_id} to complete cloud-init" + raise SSMCommandExecutionTimeoutError(details=msg) from exc + response = await self._client.get_command_invocation( + CommandId=cloud_init_status_command.command_id, InstanceId=instance_id + ) + if response["Status"] != "Success": + raise SSMCommandExecutionResultError( + id=response["CommandId"], + name=_CLOUD_INIT_STATUS_COMMAND_NAME, + details=response["StatusDetails"], + ) + # check if cloud-init is done + return bool("status: done" in response["StandardOutputContent"]) diff --git a/packages/aws-library/src/aws_library/ssm/_error_handler.py b/packages/aws-library/src/aws_library/ssm/_error_handler.py new file mode 100644 index 00000000000..ef2ae026c92 --- /dev/null +++ b/packages/aws-library/src/aws_library/ssm/_error_handler.py @@ -0,0 +1,85 @@ +import functools +import logging +from collections.abc import Callable, Coroutine +from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar + +from botocore import exceptions as botocore_exc + +from ._errors import ( + SSMAccessError, + SSMInvalidCommandError, + SSMNotConnectedError, + SSMRuntimeError, + SSMSendCommandInstancesNotReadyError, + SSMTimeoutError, +) + +if TYPE_CHECKING: + # NOTE: TYPE_CHECKING is True when static type checkers are running, + # allowing for circular imports only for them (mypy, pylance, ruff) + from ._client import SimcoreSSMAPI + + +def _map_botocore_client_exception( + botocore_error: botocore_exc.ClientError, **kwargs +) -> SSMAccessError: + status_code = int( + botocore_error.response.get("ResponseMetadata", {}).get("HTTPStatusCode") + or botocore_error.response.get("Error", {}).get("Code", -1) + ) + operation_name = botocore_error.operation_name + match status_code, operation_name: + case 400, "SendCommand": + return SSMSendCommandInstancesNotReadyError() + case 400, "GetCommandInvocation": + assert "Error" in botocore_error.response # nosec + assert "Message" in botocore_error.response["Error"] # nosec + return SSMInvalidCommandError(command_id=kwargs["command_id"]) + + case _: + return SSMAccessError( + operation_name=operation_name, + code=status_code, + error=f"{botocore_error}", + ) + + +P = ParamSpec("P") +R = TypeVar("R") +T = TypeVar("T") +Self = TypeVar("Self", bound="SimcoreSSMAPI") + + +def ssm_exception_handler( + logger: logging.Logger, +) -> Callable[ + [Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]], + Callable[Concatenate[Self, P], Coroutine[Any, Any, R]], +]: + """ + Raises: + SSMAccessError: + """ + + def decorator( + func: Callable[Concatenate[Self, P], Coroutine[Any, Any, R]] + ) -> Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]: + @functools.wraps(func) + async def wrapper(self: Self, *args: P.args, **kwargs: P.kwargs) -> R: + try: + return await func(self, *args, **kwargs) + except botocore_exc.ClientError as exc: + raise _map_botocore_client_exception(exc, **kwargs) from exc + except botocore_exc.WaiterError as exc: + raise SSMTimeoutError(details=f"{exc}") from exc + except botocore_exc.EndpointConnectionError as exc: + raise SSMNotConnectedError from exc + except botocore_exc.BotoCoreError as exc: + logger.exception("Unexpected error in SSM client: ") + raise SSMRuntimeError from exc + + wrapper.__doc__ = f"{func.__doc__}\n\n{ssm_exception_handler.__doc__}" + + return wrapper + + return decorator diff --git a/packages/aws-library/src/aws_library/ssm/_errors.py b/packages/aws-library/src/aws_library/ssm/_errors.py new file mode 100644 index 00000000000..5d3ea16b6c6 --- /dev/null +++ b/packages/aws-library/src/aws_library/ssm/_errors.py @@ -0,0 +1,35 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class SSMRuntimeError(OsparcErrorMixin, RuntimeError): + msg_template: str = "SSM client unexpected error" + + +class SSMNotConnectedError(SSMRuntimeError): + msg_template: str = "Cannot connect with SSM server" + + +class SSMAccessError(SSMRuntimeError): + msg_template: str = ( + "Unexpected error while accessing SSM backend: {operation_name}:{code}:{error}" + ) + + +class SSMTimeoutError(SSMAccessError): + msg_template: str = "Timeout while accessing SSM backend: {details}" + + +class SSMSendCommandInstancesNotReadyError(SSMAccessError): + msg_template: str = "Instance not ready to receive commands" + + +class SSMCommandExecutionResultError(SSMAccessError): + msg_template: str = "Command {id}:{name} execution resulted in an error: {details}" + + +class SSMCommandExecutionTimeoutError(SSMAccessError): + msg_template: str = "Command execution timed-out: {details}" + + +class SSMInvalidCommandError(SSMAccessError): + msg_template: str = "Invalid command ID: {command_id}" diff --git a/packages/aws-library/tests/conftest.py b/packages/aws-library/tests/conftest.py new file mode 100644 index 00000000000..47fcdd327e3 --- /dev/null +++ b/packages/aws-library/tests/conftest.py @@ -0,0 +1,32 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import +from pathlib import Path + +import aws_library +import pytest +from settings_library.ec2 import EC2Settings + +pytest_plugins = [ + "pytest_simcore.aws_ec2_service", + "pytest_simcore.aws_s3_service", + "pytest_simcore.aws_server", + "pytest_simcore.aws_ssm_service", + "pytest_simcore.environment_configs", + "pytest_simcore.file_extra", + "pytest_simcore.pydantic_models", + "pytest_simcore.pytest_global_environs", + "pytest_simcore.repository_paths", +] + + +@pytest.fixture(scope="session") +def package_dir() -> Path: + pdir = Path(aws_library.__file__).resolve().parent + assert pdir.exists() + return pdir + + +@pytest.fixture +def ec2_settings(mocked_ec2_server_settings: EC2Settings) -> EC2Settings: + return mocked_ec2_server_settings diff --git a/packages/aws-library/tests/test_aiobotocore.py b/packages/aws-library/tests/test_aiobotocore.py new file mode 100644 index 00000000000..a07aea6294d --- /dev/null +++ b/packages/aws-library/tests/test_aiobotocore.py @@ -0,0 +1,61 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from aiobotocore.session import get_session +from botocore import exceptions as boto_exceptions +from moto.server import ThreadedMotoServer + + +async def test_s3_client_fails_if_no_s3(): + """this tests shows that initializing the client actually checks if the S3 server is connected""" + session = get_session() + with pytest.raises(boto_exceptions.ClientError): + async with session.create_client( + "s3", + aws_secret_access_key="xxx", # noqa: S106 + aws_access_key_id="xxx", + ) as client: + await client.list_buckets() + with pytest.raises(boto_exceptions.ClientError): + async with session.create_client( + "s3", + aws_secret_access_key="xxx", # noqa: S106 + aws_access_key_id="xxx", + ) as client: + await client.list_buckets() + + +async def test_s3_client_reconnects_if_s3_server_restarts( + mocked_aws_server: ThreadedMotoServer, +): + """this tests shows that we do not need to restart the client if the S3 server restarts""" + session = get_session() + # pylint: disable=protected-access + async with session.create_client( + "s3", + endpoint_url=f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # noqa: SLF001 + aws_secret_access_key="xxx", # noqa: S106 + aws_access_key_id="xxx", + ) as client: + assert client + response = await client.list_buckets() + assert response + assert "Buckets" in response + assert isinstance(response["Buckets"], list) + assert not response["Buckets"] + + # stop the server, the client shall be unhappy + mocked_aws_server.stop() + with pytest.raises(boto_exceptions.EndpointConnectionError): + response = await client.list_buckets() + + # restart the server and check that the aiobotocore client is connected again + mocked_aws_server.start() + response = await client.list_buckets() + assert response + assert "Buckets" in response + assert isinstance(response["Buckets"], list) + assert not response["Buckets"] diff --git a/packages/aws-library/tests/test_ec2_client.py b/packages/aws-library/tests/test_ec2_client.py new file mode 100644 index 00000000000..a1cbdf55c57 --- /dev/null +++ b/packages/aws-library/tests/test_ec2_client.py @@ -0,0 +1,577 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +import random +from collections.abc import AsyncIterator, Callable +from dataclasses import fields +from typing import cast, get_args + +import botocore.exceptions +import pytest +from aws_library.ec2._client import SimcoreEC2API +from aws_library.ec2._errors import ( + EC2InstanceNotFoundError, + EC2InstanceTypeInvalidError, + EC2TooManyInstancesError, +) +from aws_library.ec2._models import ( + AWSTagKey, + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + EC2Tags, +) +from faker import Faker +from moto.server import ThreadedMotoServer +from settings_library.ec2 import EC2Settings +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType + + +def _ec2_allowed_types() -> list[InstanceTypeType]: + return ["t2.nano", "m5.12xlarge", "g4dn.4xlarge"] + + +@pytest.fixture(scope="session") +def ec2_allowed_instances() -> list[InstanceTypeType]: + return _ec2_allowed_types() + + +@pytest.fixture +async def simcore_ec2_api( + mocked_ec2_server_settings: EC2Settings, +) -> AsyncIterator[SimcoreEC2API]: + ec2 = await SimcoreEC2API.create(settings=mocked_ec2_server_settings) + assert ec2 + assert ec2.client + assert ec2.exit_stack + assert ec2.session + yield ec2 + await ec2.close() + + +async def test_ec2_client_lifespan(simcore_ec2_api: SimcoreEC2API): ... + + +async def test_aiobotocore_ec2_client_when_ec2_server_goes_up_and_down( + mocked_aws_server: ThreadedMotoServer, + ec2_client: EC2Client, +): + # passes without exception + await ec2_client.describe_account_attributes(DryRun=True) + mocked_aws_server.stop() + with pytest.raises(botocore.exceptions.EndpointConnectionError): + await ec2_client.describe_account_attributes(DryRun=True) + + # restart + mocked_aws_server.start() + # passes without exception + await ec2_client.describe_account_attributes(DryRun=True) + + +async def test_ping( + mocked_aws_server: ThreadedMotoServer, + simcore_ec2_api: SimcoreEC2API, +): + assert await simcore_ec2_api.ping() is True + mocked_aws_server.stop() + assert await simcore_ec2_api.ping() is False + mocked_aws_server.start() + assert await simcore_ec2_api.ping() is True + + +@pytest.fixture +def ec2_instance_config( + fake_ec2_instance_type: EC2InstanceType, + faker: Faker, + aws_subnet_id: str, + aws_security_group_id: str, + aws_ami_id: str, +) -> EC2InstanceConfig: + return EC2InstanceConfig( + type=fake_ec2_instance_type, + tags=faker.pydict(allowed_types=(str,)), + startup_script=faker.pystr(), + ami_id=aws_ami_id, + key_name=faker.pystr(), + security_group_ids=[aws_security_group_id], + subnet_id=aws_subnet_id, + iam_instance_profile="", + ) + + +async def test_get_ec2_instance_capabilities( + simcore_ec2_api: SimcoreEC2API, + ec2_allowed_instances: list[InstanceTypeType], +): + instance_types: list[EC2InstanceType] = ( + await simcore_ec2_api.get_ec2_instance_capabilities( + cast( + set[InstanceTypeType], + set(ec2_allowed_instances), + ) + ) + ) + assert instance_types + assert [_.name for _ in instance_types] == sorted(ec2_allowed_instances) + + +async def test_get_ec2_instance_capabilities_returns_all_options( + simcore_ec2_api: SimcoreEC2API, +): + instance_types = await simcore_ec2_api.get_ec2_instance_capabilities("ALL") + assert instance_types + # NOTE: this might need adaptation when moto is updated + assert ( + 850 < len(instance_types) < 877 + ), f"received {len(instance_types)}, the test might need adaptation" + + +async def test_get_ec2_instance_capabilities_raise_with_empty_set( + simcore_ec2_api: SimcoreEC2API, +): + with pytest.raises(ValueError, match="instance_type_names"): + await simcore_ec2_api.get_ec2_instance_capabilities(set()) + + +async def test_get_ec2_instance_capabilities_with_invalid_type_raises( + simcore_ec2_api: SimcoreEC2API, + faker: Faker, +): + with pytest.raises(EC2InstanceTypeInvalidError): + await simcore_ec2_api.get_ec2_instance_capabilities( + faker.pyset(allowed_types=(str,)) + ) + + +@pytest.fixture(params=_ec2_allowed_types()) +async def fake_ec2_instance_type( + simcore_ec2_api: SimcoreEC2API, + request: pytest.FixtureRequest, +) -> EC2InstanceType: + instance_type_name: InstanceTypeType = request.param + instance_types: list[EC2InstanceType] = ( + await simcore_ec2_api.get_ec2_instance_capabilities({instance_type_name}) + ) + + assert len(instance_types) == 1 + return instance_types[0] + + +async def _assert_no_instances_in_ec2(ec2_client: EC2Client) -> None: + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + +async def _assert_instances_in_ec2( + ec2_client: EC2Client, + *, + expected_num_reservations: int, + expected_num_instances: int, + expected_instance_type: EC2InstanceType, + expected_tags: EC2Tags, + expected_state: str, +) -> None: + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == expected_num_reservations + for reservation in all_instances["Reservations"]: + assert "Instances" in reservation + assert len(reservation["Instances"]) == expected_num_instances + for instance in reservation["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == expected_instance_type.name + assert "Tags" in instance + assert instance["Tags"] == [ + {"Key": key, "Value": value} for key, value in expected_tags.items() + ] + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == expected_state + + +async def test_launch_instances( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + ec2_instance_config: EC2InstanceConfig, +): + await _assert_no_instances_in_ec2(ec2_client) + + number_of_instances = 1 + + # let's create a first reservation and check that it is correctly created in EC2 + await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=number_of_instances, + number_of_instances=number_of_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=number_of_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + # create a second reservation + await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=number_of_instances, + number_of_instances=number_of_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=2, + expected_num_instances=number_of_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + +@pytest.mark.parametrize("max_num_instances", [13]) +async def test_launch_instances_is_limited_in_number_of_instances( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + ec2_instance_config: EC2InstanceConfig, + max_num_instances: int, +): + await _assert_no_instances_in_ec2(ec2_client) + + # create many instances in one go shall fail + with pytest.raises(EC2TooManyInstancesError): + await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=max_num_instances + 1, + number_of_instances=max_num_instances + 1, + max_total_number_of_instances=max_num_instances, + ) + await _assert_no_instances_in_ec2(ec2_client) + + # create instances 1 by 1 + for _ in range(max_num_instances): + await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=1, + number_of_instances=1, + max_total_number_of_instances=max_num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=max_num_instances, + expected_num_instances=1, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + # now creating one more shall fail + with pytest.raises(EC2TooManyInstancesError): + await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=1, + number_of_instances=1, + max_total_number_of_instances=max_num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=max_num_instances, + expected_num_instances=1, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + +async def test_get_instances( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + faker: Faker, + ec2_instance_config: EC2InstanceConfig, +): + # we have nothing running now in ec2 + await _assert_no_instances_in_ec2(ec2_client) + assert ( + await simcore_ec2_api.get_instances( + key_names=[ec2_instance_config.key_name], tags={} + ) + == [] + ) + + # create some instance + _MAX_NUM_INSTANCES = 10 + num_instances = faker.pyint(min_value=1, max_value=_MAX_NUM_INSTANCES) + created_instances = await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=num_instances, + number_of_instances=num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + # this returns all the entries using thes key names + instance_received = await simcore_ec2_api.get_instances( + key_names=[ec2_instance_config.key_name], tags={} + ) + assert created_instances == instance_received + + # passing the tags will return the same + instance_received = await simcore_ec2_api.get_instances( + key_names=[ec2_instance_config.key_name], tags=ec2_instance_config.tags + ) + assert created_instances == instance_received + + # asking for running state will also return the same + instance_received = await simcore_ec2_api.get_instances( + key_names=[ec2_instance_config.key_name], + tags=ec2_instance_config.tags, + state_names=["running"], + ) + assert created_instances == instance_received + + # asking for other states shall return nothing + for state in get_args(InstanceStateNameType): + instance_received = await simcore_ec2_api.get_instances( + key_names=[ec2_instance_config.key_name], + tags=ec2_instance_config.tags, + state_names=[state], + ) + if state == "running": + assert created_instances == instance_received + else: + assert not instance_received + + +async def test_stop_start_instances( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + faker: Faker, + ec2_instance_config: EC2InstanceConfig, +): + # we have nothing running now in ec2 + await _assert_no_instances_in_ec2(ec2_client) + # create some instance + _NUM_INSTANCES = 10 + num_instances = faker.pyint(min_value=1, max_value=_NUM_INSTANCES) + created_instances = await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=num_instances, + number_of_instances=num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + # stop the instances + await simcore_ec2_api.stop_instances(created_instances) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="stopped", + ) + + # stop again is ok + await simcore_ec2_api.stop_instances(created_instances) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="stopped", + ) + + # start the instances now + started_instances = await simcore_ec2_api.start_instances(created_instances) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + # the public IPs change when the instances are stopped and started + for s, c in zip(started_instances, created_instances, strict=True): + # the rest shall be the same + for f in fields(EC2InstanceData): + if f.name == "aws_public_ip": + assert getattr(s, f.name) != getattr(c, f.name) + else: + assert getattr(s, f.name) == getattr(c, f.name) + + +async def test_terminate_instance( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + faker: Faker, + ec2_instance_config: EC2InstanceConfig, +): + # we have nothing running now in ec2 + await _assert_no_instances_in_ec2(ec2_client) + # create some instance + _NUM_INSTANCES = 10 + num_instances = faker.pyint(min_value=1, max_value=_NUM_INSTANCES) + created_instances = await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=num_instances, + number_of_instances=num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + # terminate the instance + await simcore_ec2_api.terminate_instances(created_instances) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="terminated", + ) + # calling it several times is ok, the instance stays a while + await simcore_ec2_api.terminate_instances(created_instances) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="terminated", + ) + + +async def test_start_instance_not_existing_raises( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + await _assert_no_instances_in_ec2(ec2_client) + with pytest.raises(EC2InstanceNotFoundError): + await simcore_ec2_api.start_instances([fake_ec2_instance_data()]) + + +async def test_stop_instance_not_existing_raises( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + await _assert_no_instances_in_ec2(ec2_client) + with pytest.raises(EC2InstanceNotFoundError): + await simcore_ec2_api.stop_instances([fake_ec2_instance_data()]) + + +async def test_terminate_instance_not_existing_raises( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + await _assert_no_instances_in_ec2(ec2_client) + with pytest.raises(EC2InstanceNotFoundError): + await simcore_ec2_api.terminate_instances([fake_ec2_instance_data()]) + + +async def test_set_instance_tags( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + faker: Faker, + ec2_instance_config: EC2InstanceConfig, +): + await _assert_no_instances_in_ec2(ec2_client) + # create some instance + _MAX_NUM_INSTANCES = 10 + num_instances = faker.pyint(min_value=1, max_value=_MAX_NUM_INSTANCES) + created_instances = await simcore_ec2_api.launch_instances( + ec2_instance_config, + min_number_of_instances=num_instances, + number_of_instances=num_instances, + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags, + expected_state="running", + ) + + new_tags = faker.pydict(allowed_types=(str,)) + await simcore_ec2_api.set_instances_tags(created_instances, tags=new_tags) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags | new_tags, + expected_state="running", + ) + + # now remove some, this should do nothing + await simcore_ec2_api.remove_instances_tags( + created_instances, tag_keys=[AWSTagKey("whatever_i_dont_exist")] + ) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags | new_tags, + expected_state="running", + ) + # now remove some real ones + tag_key_to_remove = random.choice(list(new_tags)) # noqa: S311 + await simcore_ec2_api.remove_instances_tags( + created_instances, tag_keys=[tag_key_to_remove] + ) + new_tags.pop(tag_key_to_remove) + await _assert_instances_in_ec2( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_instances, + expected_instance_type=ec2_instance_config.type, + expected_tags=ec2_instance_config.tags | new_tags, + expected_state="running", + ) + + +async def test_set_instance_tags_not_existing_raises( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + await _assert_no_instances_in_ec2(ec2_client) + with pytest.raises(EC2InstanceNotFoundError): + await simcore_ec2_api.set_instances_tags([fake_ec2_instance_data()], tags={}) + + +async def test_remove_instance_tags_not_existing_raises( + simcore_ec2_api: SimcoreEC2API, + ec2_client: EC2Client, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + await _assert_no_instances_in_ec2(ec2_client) + with pytest.raises(EC2InstanceNotFoundError): + await simcore_ec2_api.remove_instances_tags( + [fake_ec2_instance_data()], tag_keys=[] + ) diff --git a/packages/aws-library/tests/test_ec2_models.py b/packages/aws-library/tests/test_ec2_models.py new file mode 100644 index 00000000000..ed232ad0043 --- /dev/null +++ b/packages/aws-library/tests/test_ec2_models.py @@ -0,0 +1,172 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from aws_library.ec2._models import AWSTagKey, AWSTagValue, EC2InstanceData, Resources +from faker import Faker +from pydantic import ByteSize, TypeAdapter, ValidationError + + +@pytest.mark.parametrize( + "a,b,a_greater_or_equal_than_b", + [ + ( + Resources(cpus=0.2, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.1, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.1, ram=ByteSize(1)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.05, ram=ByteSize(1)), + Resources(cpus=0.1, ram=ByteSize(0)), + False, + ), + ( + Resources(cpus=0.1, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(1)), + False, + ), + ], +) +def test_resources_ge_operator( + a: Resources, b: Resources, a_greater_or_equal_than_b: bool +): + assert (a >= b) is a_greater_or_equal_than_b + + +@pytest.mark.parametrize( + "a,b,a_greater_than_b", + [ + ( + Resources(cpus=0.2, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.1, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(0)), + False, + ), + ( + Resources(cpus=0.1, ram=ByteSize(1)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.05, ram=ByteSize(1)), + Resources(cpus=0.1, ram=ByteSize(0)), + True, + ), + ( + Resources(cpus=0.1, ram=ByteSize(0)), + Resources(cpus=0.1, ram=ByteSize(1)), + False, + ), + ], +) +def test_resources_gt_operator(a: Resources, b: Resources, a_greater_than_b: bool): + assert (a > b) is a_greater_than_b + + +@pytest.mark.parametrize( + "a,b,result", + [ + ( + Resources(cpus=0, ram=ByteSize(0)), + Resources(cpus=1, ram=ByteSize(34)), + Resources(cpus=1, ram=ByteSize(34)), + ), + ( + Resources(cpus=0.1, ram=ByteSize(1)), + Resources(cpus=1, ram=ByteSize(34)), + Resources(cpus=1.1, ram=ByteSize(35)), + ), + ], +) +def test_resources_add(a: Resources, b: Resources, result: Resources): + assert a + b == result + a += b + assert a == result + + +def test_resources_create_as_empty(): + assert Resources.create_as_empty() == Resources(cpus=0, ram=ByteSize(0)) + + +@pytest.mark.parametrize( + "a,b,result", + [ + ( + Resources(cpus=0, ram=ByteSize(34)), + Resources(cpus=1, ram=ByteSize(0)), + Resources.model_construct(cpus=-1, ram=ByteSize(34)), + ), + ( + Resources(cpus=0.1, ram=ByteSize(34)), + Resources(cpus=1, ram=ByteSize(1)), + Resources.model_construct(cpus=-0.9, ram=ByteSize(33)), + ), + ], +) +def test_resources_sub(a: Resources, b: Resources, result: Resources): + assert a - b == result + a -= b + assert a == result + + +@pytest.mark.parametrize("ec2_tag_key", ["", "/", " ", ".", "..", "_index"]) +def test_aws_tag_key_invalid(ec2_tag_key: str): + # for a key it raises + with pytest.raises(ValidationError): + TypeAdapter(AWSTagKey).validate_python(ec2_tag_key) + + # for a value it does not + TypeAdapter(AWSTagValue).validate_python(ec2_tag_key) + + +def test_ec2_instance_data_hashable(faker: Faker): + first_set_of_ec2s = { + EC2InstanceData( + faker.date_time(), + faker.pystr(), + faker.pystr(), + f"{faker.ipv4()}", + "g4dn.xlarge", + "running", + Resources( + cpus=faker.pyfloat(min_value=0.1), + ram=ByteSize(faker.pyint(min_value=123)), + ), + {AWSTagKey("mytagkey"): AWSTagValue("mytagvalue")}, + ) + } + second_set_of_ec2s = { + EC2InstanceData( + faker.date_time(), + faker.pystr(), + faker.pystr(), + f"{faker.ipv4()}", + "g4dn.xlarge", + "running", + Resources( + cpus=faker.pyfloat(min_value=0.1), + ram=ByteSize(faker.pyint(min_value=123)), + ), + {AWSTagKey("mytagkey"): AWSTagValue("mytagvalue")}, + ) + } + + union_of_sets = first_set_of_ec2s.union(second_set_of_ec2s) + assert next(iter(first_set_of_ec2s)) in union_of_sets + assert next(iter(second_set_of_ec2s)) in union_of_sets diff --git a/packages/aws-library/tests/test_ec2_utils.py b/packages/aws-library/tests/test_ec2_utils.py new file mode 100644 index 00000000000..bb61f243529 --- /dev/null +++ b/packages/aws-library/tests/test_ec2_utils.py @@ -0,0 +1,7 @@ +from aws_library.ec2._utils import compose_user_data +from faker import Faker + + +def test_compose_user_data(faker: Faker): + assert compose_user_data(faker.pystr()).startswith("#!/bin/bash\n") + assert compose_user_data(faker.pystr()).endswith("\n") diff --git a/packages/aws-library/tests/test_s3_client.py b/packages/aws-library/tests/test_s3_client.py new file mode 100644 index 00000000000..0015b261a76 --- /dev/null +++ b/packages/aws-library/tests/test_s3_client.py @@ -0,0 +1,1928 @@ +# pylint:disable=contextmanager-generator-missing-cleanup +# pylint:disable=no-name-in-module +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable + + +import asyncio +import filecmp +import json +import logging +import random +import time +from collections import defaultdict +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator +from contextlib import contextmanager +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Final +from unittest.mock import AsyncMock, Mock + +import aiofiles +import botocore.exceptions +import pytest +from aiohttp import ClientSession +from aws_library.s3._client import _AWS_MAX_ITEMS_PER_PAGE, S3ObjectKey, SimcoreS3API +from aws_library.s3._constants import ( + MULTIPART_UPLOADS_MIN_TOTAL_SIZE, + STREAM_READER_CHUNK_SIZE, +) +from aws_library.s3._errors import ( + S3BucketInvalidError, + S3DestinationNotEmptyError, + S3KeyNotFoundError, + S3UploadNotFoundError, +) +from aws_library.s3._models import MultiPartUploadLinks, S3DirectoryMetaData, S3MetaData +from faker import Faker +from models_library.api_schemas_storage.storage_schemas import ( + S3BucketName, + UploadedPart, +) +from models_library.basic_types import SHA256Str +from moto.server import ThreadedMotoServer +from pydantic import AnyUrl, ByteSize, NonNegativeInt, TypeAdapter +from pytest_benchmark.plugin import BenchmarkFixture +from pytest_mock import MockerFixture +from pytest_simcore.helpers.comparing import ( + assert_same_contents, + assert_same_file_content, + get_files_info_from_path, +) +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.parametrizations import ( + byte_size_ids, + parametrized_file_size, +) +from pytest_simcore.helpers.s3 import ( + delete_all_object_versions, + upload_file_to_presigned_link, +) +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.archiving_utils import unarchive_dir +from servicelib.bytes_iters import ArchiveEntries, DiskStreamReader, get_zip_bytes_iter +from servicelib.bytes_iters._models import DataSize +from servicelib.file_utils import remove_directory +from servicelib.progress_bar import ProgressBarData +from servicelib.s3_utils import FileLikeBytesIterReader +from servicelib.utils import limited_as_completed, limited_gather +from settings_library.s3 import S3Settings +from types_aiobotocore_s3 import S3Client +from types_aiobotocore_s3.literals import BucketLocationConstraintType + + +@pytest.fixture +async def simcore_s3_api( + mocked_s3_server_settings: S3Settings, + mocked_s3_server_envs: EnvVarsDict, +) -> AsyncIterator[SimcoreS3API]: + s3 = await SimcoreS3API.create(settings=mocked_s3_server_settings) + assert s3 + assert s3._client # noqa: SLF001 + assert s3._exit_stack # noqa: SLF001 + assert s3._session # noqa: SLF001 + yield s3 + await s3.close() + + +@pytest.fixture +def bucket_name(faker: Faker) -> S3BucketName: + # NOTE: no faker here as we need some specific namings + return TypeAdapter(S3BucketName).validate_python( + faker.pystr().replace("_", "-").lower() + ) + + +@pytest.fixture +async def ensure_bucket_name_deleted( + bucket_name: S3BucketName, s3_client: S3Client +) -> AsyncIterator[None]: + yield + await s3_client.delete_bucket(Bucket=bucket_name) + + +@pytest.fixture +async def with_s3_bucket( + s3_client: S3Client, bucket_name: S3BucketName +) -> AsyncIterator[S3BucketName]: + await s3_client.create_bucket(Bucket=bucket_name) + yield bucket_name + await s3_client.delete_bucket(Bucket=bucket_name) + + +@pytest.fixture +def non_existing_s3_bucket(faker: Faker) -> S3BucketName: + return TypeAdapter(S3BucketName).validate_python( + faker.pystr().replace("_", "-").lower() + ) + + +@pytest.fixture +async def upload_to_presigned_link( + s3_client: S3Client, +) -> AsyncIterator[ + Callable[[Path, AnyUrl, S3BucketName, S3ObjectKey], Awaitable[None]] +]: + uploaded_object_keys: dict[S3BucketName, list[S3ObjectKey]] = defaultdict(list) + + async def _( + file: Path, presigned_url: AnyUrl, bucket: S3BucketName, s3_object: S3ObjectKey + ) -> None: + await upload_file_to_presigned_link( + file, + MultiPartUploadLinks( + upload_id="fake", + chunk_size=TypeAdapter(ByteSize).validate_python(file.stat().st_size), + urls=[presigned_url], + ), + ) + uploaded_object_keys[bucket].append(s3_object) + + yield _ + + for bucket, object_keys in uploaded_object_keys.items(): + await delete_all_object_versions(s3_client, bucket, object_keys) + + +@dataclass(frozen=True, slots=True, kw_only=True) +class UploadedFile: + local_path: Path + s3_key: S3ObjectKey + + +@pytest.fixture +async def with_uploaded_file_on_s3( + create_file_of_size: Callable[[ByteSize], Path], + s3_client: S3Client, + with_s3_bucket: S3BucketName, +) -> AsyncIterator[UploadedFile]: + test_file = create_file_of_size(TypeAdapter(ByteSize).validate_python("10Kib")) + await s3_client.upload_file( + Filename=f"{test_file}", + Bucket=with_s3_bucket, + Key=test_file.name, + ) + + yield UploadedFile(local_path=test_file, s3_key=test_file.name) + + await delete_all_object_versions(s3_client, with_s3_bucket, [test_file.name]) + + +@pytest.fixture +def default_expiration_time_seconds(faker: Faker) -> int: + return faker.pyint(min_value=10) + + +@pytest.mark.parametrize("region", ["us-east-1", "us-east-2", "us-west-1", "us-west-2"]) +async def test_create_bucket( + simcore_s3_api: SimcoreS3API, + bucket_name: S3BucketName, + ensure_bucket_name_deleted: None, + region: BucketLocationConstraintType, +): + assert not await simcore_s3_api.bucket_exists(bucket=bucket_name) + await simcore_s3_api.create_bucket(bucket=bucket_name, region=region) + assert await simcore_s3_api.bucket_exists(bucket=bucket_name) + # calling again works and silently does nothing + await simcore_s3_api.create_bucket(bucket=bucket_name, region=region) + + +@pytest.fixture +async def with_versioning_enabled( + s3_client: S3Client, + with_s3_bucket: S3BucketName, +) -> None: + await s3_client.put_bucket_versioning( + Bucket=with_s3_bucket, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) + + +@pytest.fixture +async def upload_file_to_multipart_presigned_link_without_completing( + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + create_file_of_size: Callable[[ByteSize], Path], + faker: Faker, + default_expiration_time_seconds: int, + s3_client: S3Client, +) -> AsyncIterator[ + Callable[ + ..., Awaitable[tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]] + ] +]: + possibly_updated_files: list[S3ObjectKey] = [] + + async def _uploader( + file_size: ByteSize, + object_key: S3ObjectKey | None = None, + ) -> tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]: + file = create_file_of_size(file_size) + if not object_key: + object_key = S3ObjectKey(file.name) + upload_links = await simcore_s3_api.create_multipart_upload_links( + bucket=with_s3_bucket, + object_key=object_key, + file_size=ByteSize(file.stat().st_size), + expiration_secs=default_expiration_time_seconds, + sha256_checksum=TypeAdapter(SHA256Str).validate_python(faker.sha256()), + ) + assert upload_links + + # check there is no file yet + with pytest.raises(S3KeyNotFoundError, match=f"{object_key}"): + await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=object_key + ) + + # check we have the multipart upload initialized and listed + ongoing_multipart_uploads = await simcore_s3_api.list_ongoing_multipart_uploads( + bucket=with_s3_bucket + ) + assert ongoing_multipart_uploads + assert len(ongoing_multipart_uploads) == 1 + ongoing_upload_id, ongoing_object_key = ongoing_multipart_uploads[0] + assert ongoing_upload_id == upload_links.upload_id + assert ongoing_object_key == object_key + + # upload the file + uploaded_parts: list[UploadedPart] = await upload_file_to_presigned_link( + file, + upload_links, + ) + assert len(uploaded_parts) == len(upload_links.urls) + + # check there is no file yet + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=object_key + ) + + # check we have the multipart upload initialized and listed + ongoing_multipart_uploads = await simcore_s3_api.list_ongoing_multipart_uploads( + bucket=with_s3_bucket + ) + assert ongoing_multipart_uploads + assert len(ongoing_multipart_uploads) == 1 + ongoing_upload_id, ongoing_object_key = ongoing_multipart_uploads[0] + assert ongoing_upload_id == upload_links.upload_id + assert ongoing_object_key == object_key + + possibly_updated_files.append(object_key) + + return ( + object_key, + upload_links, + uploaded_parts, + ) + + yield _uploader + + await delete_all_object_versions(s3_client, with_s3_bucket, possibly_updated_files) + + +@dataclass +class _UploadProgressCallback: + file_size: int + action: str + logger: logging.Logger + _total_bytes_transfered: int = 0 + + def __call__(self, bytes_transferred: int, *, file_name: str) -> None: + self._total_bytes_transfered += bytes_transferred + assert self._total_bytes_transfered <= self.file_size + self.logger.info( + "progress: %s", + f"{self.action} {file_name=} {self._total_bytes_transfered} / {self.file_size} bytes", + ) + + +@dataclass +class _CopyProgressCallback: + file_size: int + action: str + logger: logging.Logger + _total_bytes_transfered: int = 0 + + def __call__(self, total_bytes_copied: int, *, file_name: str) -> None: + self._total_bytes_transfered = total_bytes_copied + assert self._total_bytes_transfered <= self.file_size + self.logger.info( + "progress: %s", + f"{self.action} {file_name=} {self._total_bytes_transfered} / {self.file_size} bytes", + ) + + +@pytest.fixture +async def upload_file( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + s3_client: S3Client, +) -> AsyncIterator[Callable[[Path], Awaitable[UploadedFile]]]: + uploaded_object_keys = [] + + async def _uploader(file: Path, base_path: Path | None = None) -> UploadedFile: + object_key = file.name + if base_path: + object_key = f"{file.relative_to(base_path)}" + with log_context( + logging.INFO, msg=f"uploading {file} to {with_s3_bucket}/{object_key}" + ) as ctx: + progress_cb = _UploadProgressCallback( + file_size=file.stat().st_size, action="uploaded", logger=ctx.logger + ) + response = await simcore_s3_api.upload_file( + bucket=with_s3_bucket, + file=file, + object_key=object_key, + bytes_transfered_cb=progress_cb, + ) + # there is no response from aioboto3... + assert not response + + assert ( + await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=object_key + ) + is True + ) + uploaded_object_keys.append(object_key) + return UploadedFile(local_path=file, s3_key=object_key) + + yield _uploader + + with log_context(logging.INFO, msg=f"delete {len(uploaded_object_keys)}"): + await delete_all_object_versions( + s3_client, with_s3_bucket, uploaded_object_keys + ) + + +@pytest.fixture(autouse=True) +def set_log_levels_for_noisy_libraries() -> None: + # Reduce the log level for 'werkzeug' + logging.getLogger("werkzeug").setLevel(logging.WARNING) + + +@pytest.fixture +async def create_folder_on_s3( + create_folder_of_size_with_multiple_files: Callable[ + [ByteSize, ByteSize, ByteSize, Path | None, NonNegativeInt | None], Path + ], + upload_file: Callable[[Path, Path], Awaitable[UploadedFile]], + directory_size: ByteSize, + min_file_size: ByteSize, + max_file_size: ByteSize, + depth: NonNegativeInt | None, +) -> Callable[[], Awaitable[list[UploadedFile]]]: + async def _() -> list[UploadedFile]: + # create random files of random size and upload to S3 + folder = create_folder_of_size_with_multiple_files( + ByteSize(directory_size), + ByteSize(min_file_size), + ByteSize(max_file_size), + None, + depth, + ) + list_uploaded_files = [] + + with log_context(logging.INFO, msg=f"uploading {folder}") as ctx: + list_uploaded_files = [ + await uploaded_file + async for uploaded_file in limited_as_completed( + ( + upload_file(file, folder.parent) + for file in folder.rglob("*") + if file.is_file() + ), + limit=20, + ) + ] + ctx.logger.info("uploaded %s files", len(list_uploaded_files)) + return list_uploaded_files + + return _ + + +@pytest.fixture +async def with_uploaded_folder_on_s3( + create_folder_on_s3: Callable[[], Awaitable[list[UploadedFile]]], +) -> list[UploadedFile]: + return await create_folder_on_s3() + + +@pytest.fixture +async def copy_file( + simcore_s3_api: SimcoreS3API, with_s3_bucket: S3BucketName, s3_client: S3Client +) -> AsyncIterator[Callable[[S3ObjectKey, S3ObjectKey], Awaitable[S3ObjectKey]]]: + copied_object_keys = [] + + async def _copier(src_key: S3ObjectKey, dst_key: S3ObjectKey) -> S3ObjectKey: + file_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=src_key + ) + with log_context(logging.INFO, msg=f"copying {src_key} to {dst_key}") as ctx: + progress_cb = _CopyProgressCallback( + file_size=file_metadata.size, action="copied", logger=ctx.logger + ) + await simcore_s3_api.copy_object( + bucket=with_s3_bucket, + src_object_key=src_key, + dst_object_key=dst_key, + bytes_transfered_cb=progress_cb, + ) + copied_object_keys.append(dst_key) + return dst_key + + yield _copier + + # cleanup + await delete_all_object_versions(s3_client, with_s3_bucket, copied_object_keys) + + +@pytest.fixture +async def copy_files_recursively( + simcore_s3_api: SimcoreS3API, with_s3_bucket: S3BucketName, s3_client: S3Client +) -> AsyncIterator[Callable[[str, str], Awaitable[str]]]: + copied_dst_prefixes = [] + + async def _copier(src_prefix: str, dst_prefix: str) -> str: + src_directory_metadata = await simcore_s3_api.get_directory_metadata( + bucket=with_s3_bucket, prefix=src_prefix + ) + assert src_directory_metadata.size is not None + with log_context( + logging.INFO, + msg=f"copying {src_prefix} [{src_directory_metadata.size.human_readable()}] to {dst_prefix}", + ) as ctx: + progress_cb = _CopyProgressCallback( + file_size=src_directory_metadata.size, + action="copied", + logger=ctx.logger, + ) + await simcore_s3_api.copy_objects_recursively( + bucket=with_s3_bucket, + src_prefix=src_prefix, + dst_prefix=dst_prefix, + bytes_transfered_cb=progress_cb, + ) + + dst_directory_metadata = await simcore_s3_api.get_directory_metadata( + bucket=with_s3_bucket, prefix=dst_prefix + ) + assert dst_directory_metadata.size == src_directory_metadata.size + + copied_dst_prefixes.append(dst_prefix) + return dst_prefix + + yield _copier + + # cleanup + for dst_prefix in copied_dst_prefixes: + await simcore_s3_api.delete_objects_recursively( + bucket=with_s3_bucket, prefix=dst_prefix + ) + + +async def test_aiobotocore_s3_client_when_s3_server_goes_up_and_down( + mocked_aws_server: ThreadedMotoServer, + mocked_s3_server_envs: EnvVarsDict, + s3_client: S3Client, +): + # passes without exception + await s3_client.list_buckets() + mocked_aws_server.stop() + with pytest.raises(botocore.exceptions.EndpointConnectionError): + await s3_client.list_buckets() + + # restart + mocked_aws_server.start() + # passes without exception + await s3_client.list_buckets() + + +async def test_bucket_exists( + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, +): + assert not await simcore_s3_api.bucket_exists(bucket=non_existing_s3_bucket) + assert await simcore_s3_api.bucket_exists(bucket=with_s3_bucket) + assert not await simcore_s3_api.http_check_bucket_connected( + bucket=non_existing_s3_bucket + ) + assert await simcore_s3_api.http_check_bucket_connected(bucket=with_s3_bucket) + + +async def test_http_check_bucket_connected( + mocked_aws_server: ThreadedMotoServer, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, +): + assert ( + await simcore_s3_api.http_check_bucket_connected(bucket=with_s3_bucket) is True + ) + mocked_aws_server.stop() + assert ( + await simcore_s3_api.http_check_bucket_connected(bucket=with_s3_bucket) is False + ) + mocked_aws_server.start() + assert ( + await simcore_s3_api.http_check_bucket_connected(bucket=with_s3_bucket) is True + ) + + +_ROOT_LEVEL: Final[int] = -2 + + +def _get_paths_with_prefix( + uploaded_files: list[UploadedFile], *, prefix_level: int, path_prefix: Path | None +) -> tuple[set[Path], set[Path]]: + def _filter_by_prefix(uploaded_file: UploadedFile) -> bool: + return Path(uploaded_file.s3_key).is_relative_to(path_prefix or "") + + directories = { + Path(file.s3_key).parents[_ROOT_LEVEL - prefix_level] + for file in filter(_filter_by_prefix, uploaded_files) + if Path(file.s3_key).parent != path_prefix + } + files = { + Path(file.s3_key) + for file in filter(_filter_by_prefix, uploaded_files) + if Path(file.s3_key).parent == path_prefix + } + return directories, files + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_count_objects( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + simcore_s3_api: SimcoreS3API, +): + # assert pre-conditions + assert len(with_uploaded_folder_on_s3) >= 1, "wrong initialization of test!" + + def find_deepest_file(files: list[UploadedFile]) -> Path: + return Path(max(files, key=lambda f: f.s3_key.count("/")).s3_key) + + deepest_file_path = find_deepest_file(with_uploaded_folder_on_s3) + prefixes = deepest_file_path.parents[0].parts + + # Start from the root and go down to the directory containing the deepest file + for level in range(len(prefixes)): + current_prefix = ( + Path(prefixes[0]).joinpath(*prefixes[1:level]) if level > 0 else None + ) + + directories, files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=level, path_prefix=current_prefix + ) + all_paths = directories | files + + num_objects = await simcore_s3_api.count_objects( + bucket=with_s3_bucket, prefix=current_prefix, start_after=None + ) + assert num_objects == len(all_paths) + + # get number on root is 1 + got = await simcore_s3_api.count_objects( + bucket=with_s3_bucket, prefix=None, start_after=None + ) + assert got == len(directories) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_list_objects_prefix( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + simcore_s3_api: SimcoreS3API, +): + # assert pre-conditions + assert len(with_uploaded_folder_on_s3) >= 1, "wrong initialization of test!" + + def find_deepest_file(files: list[UploadedFile]) -> Path: + return Path(max(files, key=lambda f: f.s3_key.count("/")).s3_key) + + deepest_file_path = find_deepest_file(with_uploaded_folder_on_s3) + prefixes = deepest_file_path.parents[0].parts + + # Start from the root and go down to the directory containing the deepest file + for level in range(len(prefixes)): + current_prefix = ( + Path(prefixes[0]).joinpath(*prefixes[1:level]) if level > 0 else None + ) + + directories, files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=level, path_prefix=current_prefix + ) + all_paths = directories | files + + objects, next_cursor = await simcore_s3_api.list_objects( + bucket=with_s3_bucket, prefix=current_prefix, start_after=None + ) + assert next_cursor is None + assert len(objects) == len(all_paths) + assert {_.as_path() for _ in objects} == all_paths + + # Check files and directories are correctly separated + received_files = {_ for _ in objects if isinstance(_, S3MetaData)} + received_directories = { + _ for _ in objects if isinstance(_, S3DirectoryMetaData) + } + assert len(received_files) == len(files) + assert len(received_directories) == len(directories) + + +async def test_list_objects_pagination_num_objects_limits( + faker: Faker, + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, +): + with pytest.raises(ValueError, match=r"num_objects must be >= 1"): + await simcore_s3_api.list_objects( + bucket=with_s3_bucket, + prefix=None, + start_after=None, + limit=faker.pyint(max_value=0), + ) + + with pytest.raises(ValueError, match=r"num_objects must be <= \d+"): + await simcore_s3_api.list_objects( + bucket=with_s3_bucket, + prefix=None, + start_after=None, + limit=_AWS_MAX_ITEMS_PER_PAGE + 1, + ) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + 0, + ) + ], + ids=byte_size_ids, +) +@pytest.mark.parametrize("limit", [10, 50, 300], ids=lambda x: f"limit={x}") +async def test_list_objects_pagination( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + simcore_s3_api: SimcoreS3API, + limit: int, +): + total_num_files = len(with_uploaded_folder_on_s3) + # pre-condition + directories, files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=0, path_prefix=None + ) + assert len(directories) == 1, "test pre-condition not fulfilled!" + assert not files + + first_level_prefix = next(iter(directories)) + first_level_directories, first_level_files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=1, path_prefix=first_level_prefix + ) + assert ( + not first_level_directories + ), "test pre-condition not fulfilled, there should be only files for this test" + assert len(first_level_files) == total_num_files + + # now we will fetch the file objects according to the given limit + num_fetch = int(round(total_num_files / limit + 0.5)) + assert num_fetch >= 1 + start_after_key = None + for i in range(num_fetch - 1): + objects, next_cursor = await simcore_s3_api.list_objects( + bucket=with_s3_bucket, + prefix=first_level_prefix, + start_after=start_after_key, + limit=limit, + ) + assert len(objects) == limit, f"fetch {i} returned a wrong number of objects" + assert isinstance(objects[-1], S3MetaData) + start_after_key = objects[-1].object_key + # last fetch + objects, next_cursor = await simcore_s3_api.list_objects( + bucket=with_s3_bucket, + prefix=first_level_prefix, + start_after=start_after_key, + limit=limit, + ) + assert next_cursor is None + assert len(objects) == (total_num_files - (num_fetch - 1) * limit) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + 0, + ) + ], + ids=byte_size_ids, +) +async def test_list_objects_partial_prefix( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + simcore_s3_api: SimcoreS3API, +): + total_num_files = len(with_uploaded_folder_on_s3) + # pre-condition + directories, files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=0, path_prefix=None + ) + assert len(directories) == 1, "test pre-condition not fulfilled!" + assert not files + + first_level_prefix = next(iter(directories)) + first_level_directories, first_level_files = _get_paths_with_prefix( + with_uploaded_folder_on_s3, prefix_level=1, path_prefix=first_level_prefix + ) + assert ( + not first_level_directories + ), "test pre-condition not fulfilled, there should be only files for this test" + assert len(first_level_files) == total_num_files + + a_random_file = random.choice(list(first_level_files)) # noqa: S311 + a_partial_prefix = a_random_file.name[0:1] + expected_files = { + file for file in first_level_files if file.name.startswith(a_partial_prefix) + } + + # now we will fetch the file objects according to the given limit + objects, next_cursor = await simcore_s3_api.list_objects( + bucket=with_s3_bucket, + prefix=first_level_prefix / a_partial_prefix, + start_after=None, + is_partial_prefix=True, + ) + assert next_cursor is None + assert len(objects) == len(expected_files) + assert {_.as_path() for _ in objects} == expected_files + + +async def test_get_file_metadata( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, + s3_client: S3Client, +): + s3_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + aioboto_s3_object_response = await s3_client.get_object( + Bucket=with_s3_bucket, Key=with_uploaded_file_on_s3.s3_key + ) + assert s3_metadata.object_key == with_uploaded_file_on_s3.s3_key + assert s3_metadata.last_modified == aioboto_s3_object_response["LastModified"] + assert s3_metadata.e_tag == json.loads(aioboto_s3_object_response["ETag"]) + assert s3_metadata.sha256_checksum is None + assert s3_metadata.size == aioboto_s3_object_response["ContentLength"] + + +async def test_get_file_metadata_with_non_existing_bucket_raises( + mocked_s3_server_envs: EnvVarsDict, + non_existing_s3_bucket: S3BucketName, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, +): + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.get_object_metadata( + bucket=non_existing_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + +async def test_get_file_metadata_with_non_existing_key_raises( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + faker: Faker, +): + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=faker.pystr() + ) + + +async def test_delete_file( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + with_uploaded_file_on_s3: UploadedFile, +): + # delete the file + await simcore_s3_api.delete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + # check it is not available + assert not await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + # calling again does not raise + await simcore_s3_api.delete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + +async def test_delete_file_non_existing_bucket_raises( + mocked_s3_server_envs: EnvVarsDict, + non_existing_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + faker: Faker, +): + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.delete_object( + bucket=non_existing_s3_bucket, object_key=faker.pystr() + ) + + +async def test_undelete_file( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_versioning_enabled: None, + simcore_s3_api: SimcoreS3API, + with_uploaded_file_on_s3: UploadedFile, + upload_file: Callable[[Path, Path], Awaitable[UploadedFile]], + create_file_of_size: Callable[[ByteSize], Path], + s3_client: S3Client, +): + # we have a file uploaded + file_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + assert file_metadata.size == with_uploaded_file_on_s3.local_path.stat().st_size + + # upload another file on top of the existing one + new_file = create_file_of_size(TypeAdapter(ByteSize).validate_python("5Kib")) + await s3_client.upload_file( + Filename=f"{new_file}", + Bucket=with_s3_bucket, + Key=file_metadata.object_key, + ) + + # check that the metadata changed + new_file_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + assert new_file_metadata.size == new_file.stat().st_size + assert file_metadata.e_tag != new_file_metadata.e_tag + + # this deletes the new_file, so it's gone + await simcore_s3_api.delete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + assert not await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + # undelete the file, the new file is back + await simcore_s3_api.undelete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + assert ( + await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + == new_file_metadata + ) + # does nothing + await simcore_s3_api.undelete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + # delete the file again + await simcore_s3_api.delete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + # check it is not available + assert ( + await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + is False + ) + + +async def test_undelete_file_raises_if_file_does_not_exists( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + faker: Faker, +): + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.undelete_object( + bucket=non_existing_s3_bucket, object_key=faker.pystr() + ) + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.undelete_object( + bucket=with_s3_bucket, object_key=faker.pystr() + ) + + +async def test_undelete_file_with_no_versioning_raises( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + with_uploaded_file_on_s3: UploadedFile, +): + await simcore_s3_api.delete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.undelete_object( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + + +async def test_create_single_presigned_download_link( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, + default_expiration_time_seconds: int, + tmp_path: Path, + faker: Faker, +): + assert await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=with_uploaded_file_on_s3.s3_key + ) + download_url = await simcore_s3_api.create_single_presigned_download_link( + bucket=with_s3_bucket, + object_key=with_uploaded_file_on_s3.s3_key, + expiration_secs=default_expiration_time_seconds, + ) + assert download_url + + dest_file = tmp_path / faker.file_name() + async with ClientSession() as session: + response = await session.get(f"{download_url}") + response.raise_for_status() + with dest_file.open("wb") as fp: + fp.write(await response.read()) + assert dest_file.exists() + + assert filecmp.cmp(dest_file, with_uploaded_file_on_s3.local_path) is True + + +async def test_create_single_presigned_download_link_of_invalid_object_key_raises( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + default_expiration_time_seconds: int, + faker: Faker, +): + with pytest.raises(S3KeyNotFoundError): + await simcore_s3_api.create_single_presigned_download_link( + bucket=with_s3_bucket, + object_key=faker.file_name(), + expiration_secs=default_expiration_time_seconds, + ) + + +async def test_create_single_presigned_download_link_of_invalid_bucket_raises( + mocked_s3_server_envs: EnvVarsDict, + non_existing_s3_bucket: S3BucketName, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, + default_expiration_time_seconds: int, +): + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.create_single_presigned_download_link( + bucket=non_existing_s3_bucket, + object_key=with_uploaded_file_on_s3.s3_key, + expiration_secs=default_expiration_time_seconds, + ) + + +async def test_create_single_presigned_upload_link( + mocked_s3_server_envs: EnvVarsDict, + with_s3_bucket: S3BucketName, + simcore_s3_api: SimcoreS3API, + create_file_of_size: Callable[[ByteSize], Path], + default_expiration_time_seconds: int, + upload_to_presigned_link: Callable[ + [Path, AnyUrl, S3BucketName, S3ObjectKey], Awaitable[None] + ], +): + file = create_file_of_size(TypeAdapter(ByteSize).validate_python("1Mib")) + s3_object_key = file.name + presigned_url = await simcore_s3_api.create_single_presigned_upload_link( + bucket=with_s3_bucket, + object_key=s3_object_key, + expiration_secs=default_expiration_time_seconds, + ) + assert presigned_url + + # upload the file with a fake multipart upload links structure + await upload_to_presigned_link(file, presigned_url, with_s3_bucket, s3_object_key) + + # check it is there + s3_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=s3_object_key + ) + assert s3_metadata.size == file.stat().st_size + assert s3_metadata.last_modified + assert s3_metadata.e_tag + + +async def test_create_single_presigned_upload_link_with_non_existing_bucket_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + non_existing_s3_bucket: S3BucketName, + create_file_of_size: Callable[[ByteSize], Path], + default_expiration_time_seconds: int, +): + file = create_file_of_size(TypeAdapter(ByteSize).validate_python("1Mib")) + s3_object_key = file.name + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.create_single_presigned_upload_link( + bucket=non_existing_s3_bucket, + object_key=s3_object_key, + expiration_secs=default_expiration_time_seconds, + ) + + +@pytest.mark.parametrize( + "file_size", + [ + parametrized_file_size("10Mib"), + parametrized_file_size("100Mib"), + parametrized_file_size("1000Mib"), + ], + ids=byte_size_ids, +) +async def test_create_multipart_presigned_upload_link( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + upload_file_to_multipart_presigned_link_without_completing: Callable[ + ..., Awaitable[tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]] + ], + file_size: ByteSize, +): + ( + file_id, + upload_links, + uploaded_parts, + ) = await upload_file_to_multipart_presigned_link_without_completing(file_size) + + # now complete it + received_e_tag = await simcore_s3_api.complete_multipart_upload( + bucket=with_s3_bucket, + object_key=file_id, + upload_id=upload_links.upload_id, + uploaded_parts=uploaded_parts, + ) + + # check that the multipart upload is not listed anymore + list_ongoing_uploads = await simcore_s3_api.list_ongoing_multipart_uploads( + bucket=with_s3_bucket + ) + assert list_ongoing_uploads == [] + + # check the object is complete + s3_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=file_id + ) + assert s3_metadata.size == file_size + assert s3_metadata.last_modified + assert s3_metadata.e_tag == f"{json.loads(received_e_tag)}" + + # completing again does not raise anymore (was raising until moto==5.0.21) + await simcore_s3_api.complete_multipart_upload( + bucket=with_s3_bucket, + object_key=file_id, + upload_id=upload_links.upload_id, + uploaded_parts=uploaded_parts, + ) + + +@pytest.mark.parametrize( + "file_size", + [ + parametrized_file_size(MULTIPART_UPLOADS_MIN_TOTAL_SIZE.human_readable()), + ], + ids=byte_size_ids, +) +async def test_create_multipart_presigned_upload_link_invalid_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, + upload_file_to_multipart_presigned_link_without_completing: Callable[ + ..., Awaitable[tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]] + ], + file_size: ByteSize, + create_file_of_size: Callable[[ByteSize], Path], + faker: Faker, + default_expiration_time_seconds: int, +): + file = create_file_of_size(file_size) + # creating links with invalid bucket + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.create_multipart_upload_links( + bucket=non_existing_s3_bucket, + object_key=faker.pystr(), + file_size=ByteSize(file.stat().st_size), + expiration_secs=default_expiration_time_seconds, + sha256_checksum=TypeAdapter(SHA256Str).validate_python(faker.sha256()), + ) + + # completing with invalid bucket + ( + object_key, + upload_links, + uploaded_parts, + ) = await upload_file_to_multipart_presigned_link_without_completing(file_size) + + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.complete_multipart_upload( + bucket=non_existing_s3_bucket, + object_key=object_key, + upload_id=upload_links.upload_id, + uploaded_parts=uploaded_parts, + ) + + # with pytest.raises(S3KeyNotFoundError): + # NOTE: this does not raise... and it returns the file_id of the original file... + await simcore_s3_api.complete_multipart_upload( + bucket=with_s3_bucket, + object_key=faker.pystr(), + upload_id=upload_links.upload_id, + uploaded_parts=uploaded_parts, + ) + + +@pytest.mark.parametrize("file_size", [parametrized_file_size("1Gib")]) +async def test_break_completion_of_multipart_upload( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + upload_file_to_multipart_presigned_link_without_completing: Callable[ + ..., Awaitable[tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]] + ], + file_size: ByteSize, +): + ( + object_key, + upload_links, + uploaded_parts, + ) = await upload_file_to_multipart_presigned_link_without_completing(file_size) + # let's break the completion very quickly task and see what happens + VERY_SHORT_TIMEOUT = 0.2 + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for( + simcore_s3_api.complete_multipart_upload( + bucket=with_s3_bucket, + object_key=object_key, + upload_id=upload_links.upload_id, + uploaded_parts=uploaded_parts, + ), + timeout=VERY_SHORT_TIMEOUT, + ) + # check we have the multipart upload initialized and listed + ongoing_multipart_uploads = await simcore_s3_api.list_ongoing_multipart_uploads( + bucket=with_s3_bucket + ) + assert ongoing_multipart_uploads + assert len(ongoing_multipart_uploads) == 1 + ongoing_upload_id, ongoing_file_id = ongoing_multipart_uploads[0] + assert ongoing_upload_id == upload_links.upload_id + assert ongoing_file_id == object_key + + # now wait + await asyncio.sleep(10) + + # check that the completion of the update completed... + assert ( + await simcore_s3_api.list_ongoing_multipart_uploads(bucket=with_s3_bucket) == [] + ) + + # check the object is complete + s3_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=object_key + ) + assert s3_metadata.size == file_size + assert s3_metadata.last_modified + assert s3_metadata.e_tag + + +@pytest.mark.parametrize( + "file_size", + [parametrized_file_size(f"{MULTIPART_UPLOADS_MIN_TOTAL_SIZE}")], + ids=byte_size_ids, +) +async def test_abort_multipart_upload( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, + upload_file_to_multipart_presigned_link_without_completing: Callable[ + ..., Awaitable[tuple[S3ObjectKey, MultiPartUploadLinks, list[UploadedPart]]] + ], + file_size: ByteSize, + faker: Faker, +): + ( + object_key, + upload_links, + _, + ) = await upload_file_to_multipart_presigned_link_without_completing(file_size) + + # first abort with wrong bucket shall raise + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.abort_multipart_upload( + bucket=non_existing_s3_bucket, + object_key=object_key, + upload_id=upload_links.upload_id, + ) + + # now abort it + await simcore_s3_api.abort_multipart_upload( + bucket=with_s3_bucket, + object_key=faker.pystr(), + upload_id=upload_links.upload_id, + ) + # doing it again raises + with pytest.raises(S3UploadNotFoundError): + await simcore_s3_api.abort_multipart_upload( + bucket=with_s3_bucket, + object_key=object_key, + upload_id=upload_links.upload_id, + ) + + # now check that the listing is empty + ongoing_multipart_uploads = await simcore_s3_api.list_ongoing_multipart_uploads( + bucket=with_s3_bucket + ) + assert ongoing_multipart_uploads == [] + + # check it is not available + assert ( + await simcore_s3_api.object_exists(bucket=with_s3_bucket, object_key=object_key) + is False + ) + + +@pytest.mark.parametrize( + "file_size", + [parametrized_file_size("500Mib")], + ids=byte_size_ids, +) +async def test_upload_file( + mocked_s3_server_envs: EnvVarsDict, + upload_file: Callable[[Path], Awaitable[UploadedFile]], + file_size: ByteSize, + create_file_of_size: Callable[[ByteSize], Path], +): + file = create_file_of_size(file_size) + await upload_file(file) + + +async def test_upload_file_invalid_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + non_existing_s3_bucket: S3BucketName, + create_file_of_size: Callable[[ByteSize, str | None], Path], + faker: Faker, +): + file = create_file_of_size(ByteSize(10), None) + with pytest.raises(S3BucketInvalidError): + await simcore_s3_api.upload_file( + bucket=non_existing_s3_bucket, + file=file, + object_key=faker.pystr(), + bytes_transfered_cb=None, + ) + + +@pytest.mark.parametrize( + "file_size", + [parametrized_file_size("500Mib")], + ids=byte_size_ids, +) +async def test_copy_file( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + file_size: ByteSize, + upload_file: Callable[[Path], Awaitable[UploadedFile]], + copy_file: Callable[[S3ObjectKey, S3ObjectKey], Awaitable[S3ObjectKey]], + create_file_of_size: Callable[[ByteSize], Path], + faker: Faker, +): + file = create_file_of_size(file_size) + uploaded_file = await upload_file(file) + dst_object_key = faker.file_name() + await copy_file(uploaded_file.s3_key, dst_object_key) + + # check the object is uploaded + assert ( + await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=dst_object_key + ) + is True + ) + dst_file_metadata = await simcore_s3_api.get_object_metadata( + bucket=with_s3_bucket, object_key=dst_object_key + ) + assert uploaded_file.local_path.stat().st_size == dst_file_metadata.size + + +async def test_copy_file_invalid_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, + upload_file: Callable[[Path], Awaitable[UploadedFile]], + create_file_of_size: Callable[[ByteSize], Path], + faker: Faker, +): + file = create_file_of_size(TypeAdapter(ByteSize).validate_python("1MiB")) + uploaded_file = await upload_file(file) + dst_object_key = faker.file_name() + # NOTE: since aioboto3 13.1.0 this raises S3KeyNotFoundError instead of S3BucketInvalidError + with pytest.raises(S3KeyNotFoundError, match=f"{non_existing_s3_bucket}"): + await simcore_s3_api.copy_object( + bucket=non_existing_s3_bucket, + src_object_key=uploaded_file.s3_key, + dst_object_key=dst_object_key, + bytes_transfered_cb=None, + ) + fake_src_key = faker.file_name() + with pytest.raises(S3KeyNotFoundError, match=rf"{fake_src_key}"): + await simcore_s3_api.copy_object( + bucket=with_s3_bucket, + src_object_key=fake_src_key, + dst_object_key=dst_object_key, + bytes_transfered_cb=None, + ) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_get_directory_metadata( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + directory_size: ByteSize, +): + metadata = await simcore_s3_api.get_directory_metadata( + bucket=with_s3_bucket, + prefix=Path(with_uploaded_folder_on_s3[0].s3_key).parts[0], + ) + assert metadata + assert metadata.size == directory_size + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_get_directory_metadata_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + non_existing_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], +): + with pytest.raises(S3BucketInvalidError, match=rf"{non_existing_s3_bucket}"): + await simcore_s3_api.get_directory_metadata( + bucket=non_existing_s3_bucket, + prefix=Path(with_uploaded_folder_on_s3[0].s3_key).parts[0], + ) + + wrong_prefix = "/" + metadata = await simcore_s3_api.get_directory_metadata( + bucket=with_s3_bucket, + prefix=wrong_prefix, + ) + assert metadata.size == 0 + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_delete_file_recursively( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], +): + # deleting from the root + await simcore_s3_api.delete_objects_recursively( + bucket=with_s3_bucket, + prefix=Path(with_uploaded_folder_on_s3[0].s3_key).parts[0], + ) + files_exists = set( + await asyncio.gather( + *[ + simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=file.s3_key + ) + for file in with_uploaded_folder_on_s3 + ] + ) + ) + assert len(files_exists) == 1 + assert next(iter(files_exists)) is False + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_delete_file_recursively_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + non_existing_s3_bucket: S3BucketName, + with_s3_bucket: S3BucketName, + with_uploaded_folder_on_s3: list[UploadedFile], + faker: Faker, +): + with pytest.raises(S3BucketInvalidError, match=rf"{non_existing_s3_bucket}"): + await simcore_s3_api.delete_objects_recursively( + bucket=non_existing_s3_bucket, + prefix=Path(with_uploaded_folder_on_s3[0].s3_key).parts[0], + ) + # this will do nothing + await simcore_s3_api.delete_objects_recursively( + bucket=with_s3_bucket, + prefix=f"{faker.pystr()}", + ) + # and this files still exist + some_file = next( + iter(filter(lambda f: f.local_path.is_file(), with_uploaded_folder_on_s3)) + ) + await simcore_s3_api.object_exists( + bucket=with_s3_bucket, object_key=some_file.s3_key + ) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ) + ], + ids=byte_size_ids, +) +async def test_copy_files_recursively( + mocked_s3_server_envs: EnvVarsDict, + with_uploaded_folder_on_s3: list[UploadedFile], + copy_files_recursively: Callable[[str, str], Awaitable[str]], +): + src_folder = Path(with_uploaded_folder_on_s3[0].s3_key).parts[0] + dst_folder = f"{src_folder}-copy" + await copy_files_recursively(src_folder, dst_folder) + + # doing it again shall raise + with pytest.raises(S3DestinationNotEmptyError, match=rf"{dst_folder}"): + await copy_files_recursively(src_folder, dst_folder) + + +async def test_copy_files_recursively_raises( + mocked_s3_server_envs: EnvVarsDict, + simcore_s3_api: SimcoreS3API, + non_existing_s3_bucket: S3BucketName, +): + with pytest.raises(S3BucketInvalidError, match=rf"{non_existing_s3_bucket}"): + await simcore_s3_api.copy_objects_recursively( + bucket=non_existing_s3_bucket, + src_prefix="", + dst_prefix="", + bytes_transfered_cb=None, + ) + + +@pytest.mark.parametrize( + "file_size, expected_multipart", + [ + (MULTIPART_UPLOADS_MIN_TOTAL_SIZE - 1, False), + (MULTIPART_UPLOADS_MIN_TOTAL_SIZE, True), + ], +) +def test_is_multipart(file_size: ByteSize, expected_multipart: bool): + assert SimcoreS3API.is_multipart(file_size) == expected_multipart + + +@pytest.mark.parametrize( + "bucket, object_key, expected_s3_url", + [ + ( + "some-bucket", + "an/object/separate/by/slashes", + TypeAdapter(AnyUrl).validate_python( + "s3://some-bucket/an/object/separate/by/slashes" + ), + ), + ( + "some-bucket", + "an/object/separate/by/slashes-?/3#$", + TypeAdapter(AnyUrl).validate_python( + r"s3://some-bucket/an/object/separate/by/slashes-%3F/3%23%24" + ), + ), + ], +) +def test_compute_s3_url( + bucket: S3BucketName, object_key: S3ObjectKey, expected_s3_url: AnyUrl +): + assert ( + SimcoreS3API.compute_s3_url(bucket=bucket, object_key=object_key) + == expected_s3_url + ) + + +@pytest.mark.parametrize( + "file_size", + [ + parametrized_file_size("10Mib"), + parametrized_file_size("100Mib"), + parametrized_file_size("1000Mib"), + ], + ids=byte_size_ids, +) +def test_upload_file_performance( + mocked_s3_server_envs: EnvVarsDict, + create_file_of_size: Callable[[ByteSize], Path], + file_size: ByteSize, + upload_file: Callable[[Path, Path | None], Awaitable[UploadedFile]], + benchmark: BenchmarkFixture, +): + # create random files of random size and upload to S3 + file = create_file_of_size(file_size) + + def run_async_test(*args, **kwargs) -> None: + asyncio.get_event_loop().run_until_complete(upload_file(file, None)) + + benchmark(run_async_test) + + +@pytest.mark.parametrize( + "directory_size, min_file_size, max_file_size, depth", + [ + ( + TypeAdapter(ByteSize).validate_python("1Mib"), + TypeAdapter(ByteSize).validate_python("1B"), + TypeAdapter(ByteSize).validate_python("10Kib"), + None, + ), + ( + TypeAdapter(ByteSize).validate_python("500Mib"), + TypeAdapter(ByteSize).validate_python("10Mib"), + TypeAdapter(ByteSize).validate_python("50Mib"), + None, + ), + ], + ids=byte_size_ids, +) +def test_copy_recurively_performance( + mocked_s3_server_envs: EnvVarsDict, + with_uploaded_folder_on_s3: list[UploadedFile], + copy_files_recursively: Callable[[str, str], Awaitable[str]], + benchmark: BenchmarkFixture, +): + src_folder = Path(with_uploaded_folder_on_s3[0].s3_key).parts[0] + + folder_index = 0 + + def dst_folder_setup() -> tuple[tuple[str], dict[str, Any]]: + nonlocal folder_index + dst_folder = f"{src_folder}-copy-{folder_index}" + folder_index += 1 + return (dst_folder,), {} + + def run_async_test(dst_folder: str) -> None: + asyncio.get_event_loop().run_until_complete( + copy_files_recursively(src_folder, dst_folder) + ) + + benchmark.pedantic(run_async_test, setup=dst_folder_setup, rounds=4) + + +async def test_read_from_bytes_streamer( + mocked_s3_server_envs: EnvVarsDict, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + fake_file_name: Path, +): + async with aiofiles.open(fake_file_name, "wb") as f: + bytes_streamer = await simcore_s3_api.get_bytes_streamer_from_object( + with_s3_bucket, with_uploaded_file_on_s3.s3_key, chunk_size=1024 + ) + assert isinstance(bytes_streamer.data_size, DataSize) + async for chunk in bytes_streamer.with_progress_bytes_iter(AsyncMock()): + await f.write(chunk) + + assert bytes_streamer.data_size == fake_file_name.stat().st_size + + await assert_same_file_content(with_uploaded_file_on_s3.local_path, fake_file_name) + + +@pytest.mark.parametrize("upload_from_s3", [True, False]) +async def test_upload_object_from_file_like( + mocked_s3_server_envs: EnvVarsDict, + with_uploaded_file_on_s3: UploadedFile, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + upload_from_s3: bool, +): + object_key = "read_from_s3_write_to_s3" + + if upload_from_s3: + bytes_streamer = await simcore_s3_api.get_bytes_streamer_from_object( + with_s3_bucket, with_uploaded_file_on_s3.s3_key + ) + assert isinstance(bytes_streamer.data_size, DataSize) + await simcore_s3_api.upload_object_from_file_like( + with_s3_bucket, + object_key, + FileLikeBytesIterReader( + bytes_streamer.with_progress_bytes_iter(AsyncMock()) + ), + ) + else: + await simcore_s3_api.upload_object_from_file_like( + with_s3_bucket, + object_key, + FileLikeBytesIterReader( + DiskStreamReader(with_uploaded_file_on_s3.local_path) + .get_bytes_streamer() + .bytes_iter_callable() + ), + ) + + await simcore_s3_api.delete_object(bucket=with_s3_bucket, object_key=object_key) + + +@contextmanager +def _folder_with_files( + create_folder_of_size_with_multiple_files: Callable[ + [ByteSize, ByteSize, ByteSize, Path | None], Path + ], + target_folder: Path, +) -> Iterator[dict[str, Path]]: + target_folder.mkdir(parents=True, exist_ok=True) + folder_path = create_folder_of_size_with_multiple_files( + TypeAdapter(ByteSize).validate_python("10MiB"), + TypeAdapter(ByteSize).validate_python("10KiB"), + TypeAdapter(ByteSize).validate_python("100KiB"), + target_folder, + ) + + relative_names_to_paths = get_files_info_from_path(folder_path) + + yield relative_names_to_paths + + for file in relative_names_to_paths.values(): + file.unlink() + + +@pytest.fixture +def path_local_files_for_archive( + tmp_path: Path, + create_folder_of_size_with_multiple_files: Callable[ + [ByteSize, ByteSize, ByteSize, Path | None], Path + ], +) -> Iterator[Path]: + dir_path = tmp_path / "not_uploaded" + with _folder_with_files(create_folder_of_size_with_multiple_files, dir_path): + yield dir_path + + +@pytest.fixture +async def path_s3_files_for_archive( + tmp_path: Path, + create_folder_of_size_with_multiple_files: Callable[ + [ByteSize, ByteSize, ByteSize, Path | None], Path + ], + s3_client: S3Client, + with_s3_bucket: S3BucketName, +) -> AsyncIterator[Path]: + dir_path = tmp_path / "stored_in_s3" + with _folder_with_files( + create_folder_of_size_with_multiple_files, dir_path + ) as relative_names_to_paths: + await limited_gather( + *( + s3_client.upload_file( + Filename=f"{file}", Bucket=with_s3_bucket, Key=s3_object_key + ) + for s3_object_key, file in relative_names_to_paths.items() + ), + limit=10, + ) + yield dir_path + + await delete_all_object_versions( + s3_client, with_s3_bucket, relative_names_to_paths.keys() + ) + + +@pytest.fixture +def archive_download_path(tmp_path: Path, faker: Faker) -> Iterator[Path]: + path = tmp_path / f"downlaoded_ardhive_{faker.uuid4()}.zip" + yield path + if path.exists(): + path.unlink() + + +@pytest.fixture +async def extracted_archive_path(tmp_path: Path, faker: Faker) -> AsyncIterator[Path]: + path = tmp_path / f"decomrepssed_archive{faker.uuid4()}" + path.mkdir(parents=True, exist_ok=True) + assert path.is_dir() + yield path + await remove_directory(path) + assert not path.is_dir() + + +@pytest.fixture +async def archive_s3_object_key( + with_s3_bucket: S3BucketName, simcore_s3_api: SimcoreS3API +) -> AsyncIterator[S3ObjectKey]: + s3_object_key = "read_from_s3_write_to_s3" + yield s3_object_key + await simcore_s3_api.delete_object(bucket=with_s3_bucket, object_key=s3_object_key) + + +@pytest.fixture +def mocked_progress_bar_cb(mocker: MockerFixture) -> Mock: + def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + + return mocker.Mock(side_effect=_progress_cb) + + +async def test_workflow_compress_s3_objects_and_local_files_in_a_single_archive_then_upload_to_s3( + mocked_s3_server_envs: EnvVarsDict, + path_local_files_for_archive: Path, + path_s3_files_for_archive: Path, + archive_download_path: Path, + extracted_archive_path: Path, + simcore_s3_api: SimcoreS3API, + with_s3_bucket: S3BucketName, + s3_client: S3Client, + archive_s3_object_key: S3ObjectKey, + mocked_progress_bar_cb: Mock, +): + # In this test: + # - files are read form disk and S3 + # - a zip archive is created on the go + # - the zip archive is streamed to S3 as soon as chunks inside it are created + # Uses no disk and constant memory for the entire opration. + + # 1. assemble and upload zip archive + + archive_entries: ArchiveEntries = [] + + local_files = get_files_info_from_path(path_local_files_for_archive) + for file_name, file_path in local_files.items(): + archive_entries.append( + ( + file_name, + DiskStreamReader(file_path).get_bytes_streamer(), + ) + ) + + s3_files = get_files_info_from_path(path_s3_files_for_archive) + + for s3_object_key in s3_files: + archive_entries.append( + ( + s3_object_key, + await simcore_s3_api.get_bytes_streamer_from_object( + with_s3_bucket, s3_object_key + ), + ) + ) + + # shuffle order of files in archive. + # some will be read from S3 and some from the disk + random.shuffle(archive_entries) + + started = time.time() + + async with ProgressBarData( + num_steps=1, + progress_report_cb=mocked_progress_bar_cb, + description="root_bar", + ) as progress_bar: + await simcore_s3_api.upload_object_from_file_like( + with_s3_bucket, + archive_s3_object_key, + FileLikeBytesIterReader( + get_zip_bytes_iter( + archive_entries, + progress_bar=progress_bar, + chunk_size=STREAM_READER_CHUNK_SIZE, + ) + ), + ) + + duration = time.time() - started + print(f"Zip created on S3 in {duration:.2f} seconds") + + # 2. download zip archive form S3 + print(f"downloading {archive_download_path}") + await s3_client.download_file( + with_s3_bucket, archive_s3_object_key, f"{archive_download_path}" + ) + + # 3. extract archive + await unarchive_dir(archive_download_path, extracted_archive_path) + + # 4. compare + print("comparing files") + all_files_in_zip = get_files_info_from_path(path_local_files_for_archive) | s3_files + + await assert_same_contents( + all_files_in_zip, get_files_info_from_path(extracted_archive_path) + ) diff --git a/packages/aws-library/tests/test_s3_utils.py b/packages/aws-library/tests/test_s3_utils.py new file mode 100644 index 00000000000..cfba1634943 --- /dev/null +++ b/packages/aws-library/tests/test_s3_utils.py @@ -0,0 +1,90 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from aws_library.s3._utils import ( + _MULTIPART_MAX_NUMBER_OF_PARTS, + _MULTIPART_UPLOADS_TARGET_MAX_PART_SIZE, + compute_num_file_chunks, +) +from pydantic import ByteSize, TypeAdapter +from pytest_simcore.helpers.parametrizations import byte_size_ids + + +@pytest.mark.parametrize( + "file_size, expected_num_chunks, expected_chunk_size", + [ + ( + TypeAdapter(ByteSize).validate_python("5Mib"), + 1, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("10Mib"), + 1, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("20Mib"), + 2, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("50Mib"), + 5, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("150Mib"), + 15, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("550Mib"), + 55, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("560Gib"), + 5735, + TypeAdapter(ByteSize).validate_python("100Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("5Tib"), + 8739, + TypeAdapter(ByteSize).validate_python("600Mib"), + ), + ( + TypeAdapter(ByteSize).validate_python("15Tib"), + 7680, + TypeAdapter(ByteSize).validate_python("2Gib"), + ), + ( + TypeAdapter(ByteSize).validate_python("9431773844"), + 900, + TypeAdapter(ByteSize).validate_python("10Mib"), + ), + ], + ids=byte_size_ids, +) +def test_compute_num_file_chunks( + file_size: ByteSize, expected_num_chunks: int, expected_chunk_size: ByteSize +): + num_chunks, chunk_size = compute_num_file_chunks(file_size) + assert num_chunks == expected_num_chunks + assert chunk_size == expected_chunk_size + + +def test_enormous_file_size_raises_value_error(): + enormous_file_size = TypeAdapter(ByteSize).validate_python( + ( + max(_MULTIPART_UPLOADS_TARGET_MAX_PART_SIZE) + * _MULTIPART_MAX_NUMBER_OF_PARTS + + 1 + ), + ) + with pytest.raises(ValueError): + compute_num_file_chunks(enormous_file_size) diff --git a/packages/aws-library/tests/test_ssm_client.py b/packages/aws-library/tests/test_ssm_client.py new file mode 100644 index 00000000000..4c3a6c0c772 --- /dev/null +++ b/packages/aws-library/tests/test_ssm_client.py @@ -0,0 +1,218 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + + +import dataclasses +from collections.abc import AsyncIterator + +import botocore.exceptions +import pytest +from aws_library.ssm import ( + SimcoreSSMAPI, + SSMCommandExecutionResultError, + SSMCommandExecutionTimeoutError, + SSMInvalidCommandError, + SSMNotConnectedError, +) +from aws_library.ssm._client import _AWS_WAIT_NUM_RETRIES +from faker import Faker +from moto.server import ThreadedMotoServer +from pytest_mock.plugin import MockerFixture +from settings_library.ssm import SSMSettings +from types_aiobotocore_ssm import SSMClient + + +@pytest.fixture +async def simcore_ssm_api( + mocked_ssm_server_settings: SSMSettings, +) -> AsyncIterator[SimcoreSSMAPI]: + ec2 = await SimcoreSSMAPI.create(settings=mocked_ssm_server_settings) + assert ec2 + assert ec2._client + assert ec2._exit_stack + assert ec2._session + yield ec2 + await ec2.close() + + +async def test_ssm_client_lifespan(simcore_ssm_api: SimcoreSSMAPI): + ... + + +async def test_aiobotocore_ssm_client_when_ssm_server_goes_up_and_down( + mocked_aws_server: ThreadedMotoServer, + ssm_client: SSMClient, +): + # passes without exception + await ssm_client.list_commands(MaxResults=1) + mocked_aws_server.stop() + with pytest.raises(botocore.exceptions.EndpointConnectionError): + await ssm_client.list_commands(MaxResults=1) + + # restart + mocked_aws_server.start() + # passes without exception + await ssm_client.list_commands(MaxResults=1) + + +@pytest.fixture +def fake_command_id(faker: Faker) -> str: + return faker.pystr(min_chars=36, max_chars=36) + + +async def test_ping( + mocked_aws_server: ThreadedMotoServer, + simcore_ssm_api: SimcoreSSMAPI, + fake_command_id: str, + faker: Faker, +): + assert await simcore_ssm_api.ping() is True + mocked_aws_server.stop() + assert await simcore_ssm_api.ping() is False + with pytest.raises(SSMNotConnectedError): + await simcore_ssm_api.get_command(faker.pystr(), command_id=fake_command_id) + mocked_aws_server.start() + assert await simcore_ssm_api.ping() is True + + +async def test_get_command( + mocked_aws_server: ThreadedMotoServer, + simcore_ssm_api: SimcoreSSMAPI, + faker: Faker, + fake_command_id: str, +): + with pytest.raises(SSMInvalidCommandError): + await simcore_ssm_api.get_command(faker.pystr(), command_id=fake_command_id) + + +async def test_send_command( + mocked_aws_server: ThreadedMotoServer, + simcore_ssm_api: SimcoreSSMAPI, + faker: Faker, + fake_command_id: str, +): + command_name = faker.word() + target_instance_id = faker.pystr() + sent_command = await simcore_ssm_api.send_command( + instance_ids=[target_instance_id], + command=faker.text(), + command_name=command_name, + ) + assert sent_command + assert sent_command.command_id + assert sent_command.name == command_name + assert sent_command.instance_ids == [target_instance_id] + assert sent_command.status == "Success" + + got = await simcore_ssm_api.get_command( + target_instance_id, command_id=sent_command.command_id + ) + assert dataclasses.asdict(got) == { + **dataclasses.asdict(sent_command), + "message": "Success", + "start_time": got.start_time, + "finish_time": got.finish_time, + } + with pytest.raises(SSMInvalidCommandError): + await simcore_ssm_api.get_command( + faker.pystr(), command_id=sent_command.command_id + ) + with pytest.raises(SSMInvalidCommandError): + await simcore_ssm_api.get_command( + target_instance_id, command_id=fake_command_id + ) + + +async def test_is_instance_connected_to_ssm_server( + mocked_aws_server: ThreadedMotoServer, + simcore_ssm_api: SimcoreSSMAPI, + faker: Faker, + mocker: MockerFixture, +): + # NOTE: moto does not provide that mock functionality and therefore we mock it ourselves + mock = mocker.patch( + "pytest_simcore.helpers.moto._patch_describe_instance_information", + autospec=True, + return_value={"InstanceInformationList": [{"PingStatus": "Inactive"}]}, + ) + assert ( + await simcore_ssm_api.is_instance_connected_to_ssm_server(faker.pystr()) + is False + ) + mock.return_value = {"InstanceInformationList": [{"PingStatus": "Online"}]} + assert ( + await simcore_ssm_api.is_instance_connected_to_ssm_server(faker.pystr()) is True + ) + + +async def test_wait_for_has_instance_completed_cloud_init( + mocked_aws_server: ThreadedMotoServer, + simcore_ssm_api: SimcoreSSMAPI, + faker: Faker, + mocker: MockerFixture, +): + assert ( + await simcore_ssm_api.wait_for_has_instance_completed_cloud_init(faker.pystr()) + is False + ) + original_get_command_invocation = ( + simcore_ssm_api._client.get_command_invocation # noqa: SLF001 + ) + + # NOTE: wait_for_has_instance_completed_cloud_init calls twice get_command_invocation + async def mock_send_command_timesout(*args, **kwargs): + return {"Status": "Failure", "StatusDetails": faker.text()} + + mocked_command_invocation = mocker.patch.object( + simcore_ssm_api._client, # noqa: SLF001 + "get_command_invocation", + side_effect=mock_send_command_timesout, + ) + with pytest.raises(SSMCommandExecutionTimeoutError, match="Timed-out"): + await simcore_ssm_api.wait_for_has_instance_completed_cloud_init(faker.pystr()) + + assert mocked_command_invocation.call_count == _AWS_WAIT_NUM_RETRIES + + mocked_command_invocation.reset_mock() + call_count = 0 + + async def mock_wait_command_failed(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count == 2: + return { + "CommandId": kwargs["CommandId"], + "Status": "Failure", + "StatusDetails": faker.text(), + } + return await original_get_command_invocation(*args, **kwargs) + + mocked_command_invocation.side_effect = mock_wait_command_failed + with pytest.raises(SSMCommandExecutionResultError): + await simcore_ssm_api.wait_for_has_instance_completed_cloud_init(faker.pystr()) + assert mocked_command_invocation.call_count == 2 + + # NOTE: default will return False as we need to mock the return value of the cloud-init function + assert ( + await simcore_ssm_api.wait_for_has_instance_completed_cloud_init(faker.pystr()) + is False + ) + + mocked_command_invocation.reset_mock() + call_count = 0 + + async def mock_wait_command_successful(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count == 2: + return {"Status": "Success", "StandardOutputContent": "status: done\n"} + return await original_get_command_invocation(*args, **kwargs) + + mocked_command_invocation.side_effect = mock_wait_command_successful + assert ( + await simcore_ssm_api.wait_for_has_instance_completed_cloud_init(faker.pystr()) + is True + ) + assert mocked_command_invocation.call_count == 2 diff --git a/services/agent/src/simcore_service_agent/modules/__init__.py b/packages/common-library/.gitignore similarity index 100% rename from services/agent/src/simcore_service_agent/modules/__init__.py rename to packages/common-library/.gitignore diff --git a/packages/common-library/Makefile b/packages/common-library/Makefile new file mode 100644 index 00000000000..b554ec6f9c0 --- /dev/null +++ b/packages/common-library/Makefile @@ -0,0 +1,49 @@ +# +# Targets for DEVELOPMENT of common Library +# +include ../../scripts/common.Makefile +include ../../scripts/common-package.Makefile + +.PHONY: requirements +requirements: ## compiles pip requirements (.in -> .txt) + @$(MAKE_C) requirements reqs + + +.PHONY: install-dev install-prod install-ci +install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode + # installing in $(subst install-,,$@) mode + @uv pip sync requirements/$(subst install-,,$@).txt + + +.PHONY: tests tests-ci +tests: ## runs unit tests + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov=common_library \ + --durations=10 \ + --exitfirst \ + --failed-first \ + --pdb \ + -vv \ + $(CURDIR)/tests + +tests-ci: ## runs unit tests [ci-mode] + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-append \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov-report=xml \ + --cov=common_library \ + --durations=10 \ + --log-date-format="%Y-%m-%d %H:%M:%S" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ + --verbose \ + -m "not heavy_load" \ + $(CURDIR)/tests diff --git a/packages/common-library/README.md b/packages/common-library/README.md new file mode 100644 index 00000000000..8e5c489787b --- /dev/null +++ b/packages/common-library/README.md @@ -0,0 +1,42 @@ +# simcore pydantic common library + +Contains the common classes, functions and in general utilities for use in the simcore platform. + +## Installation + +```console +make help +make install-dev +``` + +## Test + +```console +make help +make test-dev +``` + + +## Diagnostics + +How run diagnostics on the service metadata published in a docker registry? + +1. Setup environment +```bash +make devenv +source .venv/bin/activate + +cd packages/common-library +make install-dev +``` +2. Set ``REGISTRY_*`` env vars in ``.env`` (in the repository base folder) +3. Download test data, run diagnostics, archive tests-data, and cleanup +```bash +export DEPLOY_NAME=my-deploy + +make pull_test_data >$DEPLOY_NAME-registry-diagnostics.log 2>&1 +pytest -vv -m diagnostics >>$DEPLOY_NAME-registry-diagnostics.log 2>&1 +zip -r $DEPLOY_NAME-registry-test-data.zip tests/data/.downloaded-ignore +rm -r tests/data/.downloaded-ignore +``` +4. Move all ``$DEPLOY_NAME-*`` files to an archive diff --git a/packages/common-library/VERSION b/packages/common-library/VERSION new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/packages/common-library/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/packages/common-library/requirements/Makefile b/packages/common-library/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/packages/common-library/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/packages/common-library/requirements/_base.in b/packages/common-library/requirements/_base.in new file mode 100644 index 00000000000..2277d690e64 --- /dev/null +++ b/packages/common-library/requirements/_base.in @@ -0,0 +1,8 @@ +# +# Specifies third-party dependencies for 'common-library' +# +--constraint ../../../requirements/constraints.txt + +orjson +pydantic +pydantic-extra-types diff --git a/packages/common-library/requirements/_base.txt b/packages/common-library/requirements/_base.txt new file mode 100644 index 00000000000..062e97cee34 --- /dev/null +++ b/packages/common-library/requirements/_base.txt @@ -0,0 +1,20 @@ +annotated-types==0.7.0 + # via pydantic +orjson==3.10.15 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +pydantic==2.10.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in + # pydantic-extra-types +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via -r requirements/_base.in +typing-extensions==4.12.2 + # via + # pydantic + # pydantic-core + # pydantic-extra-types diff --git a/packages/common-library/requirements/_test.in b/packages/common-library/requirements/_test.in new file mode 100644 index 00000000000..1fe37ac0151 --- /dev/null +++ b/packages/common-library/requirements/_test.in @@ -0,0 +1,22 @@ +# +# Specifies dependencies required to run 'common-library' +# +--constraint ../../../requirements/constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + +coverage +faker +pydantic-settings +pytest +pytest-asyncio +pytest-cov +pytest-icdiff +pytest-instafail +pytest-mock +pytest-runner +pytest-sugar +python-dotenv diff --git a/packages/common-library/requirements/_test.txt b/packages/common-library/requirements/_test.txt new file mode 100644 index 00000000000..9737c253a39 --- /dev/null +++ b/packages/common-library/requirements/_test.txt @@ -0,0 +1,71 @@ +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +faker==36.1.1 + # via -r requirements/_test.in +icdiff==2.0.7 + # via pytest-icdiff +iniconfig==2.0.0 + # via pytest +packaging==24.2 + # via + # pytest + # pytest-sugar +pluggy==1.5.0 + # via pytest +pprintpp==0.4.0 + # via pytest-icdiff +pydantic==2.10.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # pydantic-settings +pydantic-core==2.27.2 + # via + # -c requirements/_base.txt + # pydantic +pydantic-settings==2.7.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-icdiff + # pytest-instafail + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-icdiff==0.9 + # via -r requirements/_test.in +pytest-instafail==0.5.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dotenv==1.0.1 + # via + # -r requirements/_test.in + # pydantic-settings +termcolor==2.5.0 + # via pytest-sugar +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # pydantic + # pydantic-core +tzdata==2025.1 + # via faker diff --git a/packages/common-library/requirements/_tools.in b/packages/common-library/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/packages/common-library/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/packages/common-library/requirements/_tools.txt b/packages/common-library/requirements/_tools.txt new file mode 100644 index 00000000000..8e681c5a583 --- /dev/null +++ b/packages/common-library/requirements/_tools.txt @@ -0,0 +1,79 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/packages/common-library/requirements/ci.txt b/packages/common-library/requirements/ci.txt new file mode 100644 index 00000000000..ed9eb3028e8 --- /dev/null +++ b/packages/common-library/requirements/ci.txt @@ -0,0 +1,17 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'common-library' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt + +# installs this repo's packages +pytest-simcore @ ../pytest-simcore + +# current module +simcore-common-library @ . diff --git a/packages/common-library/requirements/dev.txt b/packages/common-library/requirements/dev.txt new file mode 100644 index 00000000000..02718f95c3a --- /dev/null +++ b/packages/common-library/requirements/dev.txt @@ -0,0 +1,18 @@ +# Shortcut to install all packages needed to develop 'common-library' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../pytest-simcore/ + +# current module +--editable . diff --git a/packages/common-library/setup.cfg b/packages/common-library/setup.cfg new file mode 100644 index 00000000000..f5e56c109bc --- /dev/null +++ b/packages/common-library/setup.cfg @@ -0,0 +1,25 @@ +[bumpversion] +current_version = 0.2.0 +commit = True +message = packages/common-library version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[bdist_wheel] +universal = 1 + +[aliases] +test = pytest + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + diagnostics: "can be used to run diagnostics against deployed data (e.g. database, registry etc)" + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/packages/common-library/setup.py b/packages/common-library/setup.py new file mode 100644 index 00000000000..bcbb6726687 --- /dev/null +++ b/packages/common-library/setup.py @@ -0,0 +1,60 @@ +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +INSTALL_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.in") +) # WEAK requirements + +TEST_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_test.txt") +) # STRICK requirements + + +SETUP = { + "name": "simcore-common-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Giancarlo Romeo (giancarloromeo)", + "description": "Core service library for simcore pydantic common", + "python_requires": ">=3.10", + "classifiers": [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python :: 3.11", + ], + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "include_package_data": True, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} + + +if __name__ == "__main__": + setup(**SETUP) diff --git a/packages/common-library/src/common_library/__init__.py b/packages/common-library/src/common_library/__init__.py new file mode 100644 index 00000000000..dc0c65ff721 --- /dev/null +++ b/packages/common-library/src/common_library/__init__.py @@ -0,0 +1,12 @@ +""" osparc's service common library + +""" + +# +# NOTE: +# - "examples" = [ ...] keyword and NOT "example". See https://json-schema.org/understanding-json-schema/reference/generic.html#annotations +# + +from importlib.metadata import version + +__version__: str = version("simcore-common-library") diff --git a/packages/common-library/src/common_library/async_tools.py b/packages/common-library/src/common_library/async_tools.py new file mode 100644 index 00000000000..205de066851 --- /dev/null +++ b/packages/common-library/src/common_library/async_tools.py @@ -0,0 +1,64 @@ +import asyncio +import functools +from collections.abc import Awaitable, Callable +from concurrent.futures import Executor +from inspect import isawaitable +from typing import ParamSpec, TypeVar, overload + +R = TypeVar("R") +P = ParamSpec("P") + + +def make_async( + executor: Executor | None = None, +) -> Callable[[Callable[P, R]], Callable[P, Awaitable[R]]]: + def decorator(func: Callable[P, R]) -> Callable[P, Awaitable[R]]: + @functools.wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + loop = asyncio.get_running_loop() + return await loop.run_in_executor( + executor, functools.partial(func, *args, **kwargs) + ) + + return wrapper + + return decorator + + +_AwaitableResult = TypeVar("_AwaitableResult") + + +@overload +async def maybe_await(obj: Awaitable[_AwaitableResult]) -> _AwaitableResult: ... + + +@overload +async def maybe_await(obj: _AwaitableResult) -> _AwaitableResult: ... + + +async def maybe_await( + obj: Awaitable[_AwaitableResult] | _AwaitableResult, +) -> _AwaitableResult: + """Helper function to handle both async and sync database results. + + This function allows code to work with both aiopg (async) and asyncpg (sync) result methods + by automatically detecting and handling both cases. + + Args: + obj: Either an awaitable coroutine or direct result value + + Returns: + The result value, after awaiting if necessary + + Example: + ```python + result = await conn.execute(query) + # Works with both aiopg and asyncpg + row = await maybe_await(result.fetchone()) + ``` + """ + if isawaitable(obj): + assert isawaitable(obj) # nosec + return await obj + assert not isawaitable(obj) # nosec + return obj diff --git a/packages/common-library/src/common_library/basic_types.py b/packages/common-library/src/common_library/basic_types.py new file mode 100644 index 00000000000..dc92a3efac2 --- /dev/null +++ b/packages/common-library/src/common_library/basic_types.py @@ -0,0 +1,48 @@ +from enum import StrEnum +from typing import Any + +from pydantic_core import PydanticUndefined + +Undefined = PydanticUndefined +DEFAULT_FACTORY: Any = Undefined +# Use `DEFAULT_FACTORY` as field default when using Field(default_factory=...) +# SEE https://github.com/ITISFoundation/osparc-simcore/pull/6882 +# SEE https://github.com/ITISFoundation/osparc-simcore/pull/7112#discussion_r1933432238 +# SEE https://github.com/fastapi/fastapi/blob/master/fastapi/_compat.py#L75-L78 + + +class LogLevel(StrEnum): + DEBUG = "DEBUG" + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + + +class BootModeEnum(StrEnum): + """ + Values taken by SC_BOOT_MODE environment variable + set in Dockerfile and used during docker/boot.sh + """ + + DEFAULT = "default" + LOCAL = "local-development" + DEBUG = "debug" + PRODUCTION = "production" + DEVELOPMENT = "development" + + def is_devel_mode(self) -> bool: + """returns True if this boot mode is used for development""" + return self in (self.DEBUG, self.DEVELOPMENT, self.LOCAL) + + +class BuildTargetEnum(StrEnum): + """ + Values taken by SC_BUILD_TARGET environment variable + set in Dockerfile that defines the stage targeted in the + docker image build + """ + + BUILD = "build" + CACHE = "cache" + PRODUCTION = "production" + DEVELOPMENT = "development" diff --git a/packages/common-library/src/common_library/changelog.py b/packages/common-library/src/common_library/changelog.py new file mode 100644 index 00000000000..885a121f59a --- /dev/null +++ b/packages/common-library/src/common_library/changelog.py @@ -0,0 +1,243 @@ +""" +CHANGELOG formatted-messages for API routes + +- Append at the bottom of the route's description +- These are displayed in the swagger/redoc doc +- These are displayed in client's doc as well (auto-generator) +- Inspired on this idea https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#describing-changes-between-versions +""" + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from enum import Enum, auto +from typing import Any, ClassVar, cast + +from packaging.version import Version + + +class ChangelogType(Enum): + """Types of changelog entries in their lifecycle order""" + + NEW = auto() + CHANGED = auto() + DEPRECATED = auto() + RETIRED = auto() + + +class ChangelogEntryAbstract(ABC): + """Base class for changelog entries""" + + entry_type: ClassVar[ChangelogType] + + @abstractmethod + def to_string(self) -> str: + """Converts entry to a formatted string for documentation""" + + @abstractmethod + def get_version(self) -> Version | None: + """Returns the version associated with this entry, if any""" + + +class NewEndpoint(ChangelogEntryAbstract): + """Indicates when an endpoint was first added""" + + entry_type = ChangelogType.NEW + + def __init__(self, version: str): + self.version = version + + def to_string(self) -> str: + return f"New in *version {self.version}*" + + def get_version(self) -> Version: + return Version(self.version) + + +class ChangedEndpoint(ChangelogEntryAbstract): + """Indicates a change to an existing endpoint""" + + entry_type = ChangelogType.CHANGED + + def __init__(self, version: str, message: str): + self.version = version + self.message = message + + def to_string(self) -> str: + return f"Changed in *version {self.version}*: {self.message}" + + def get_version(self) -> Version: + return Version(self.version) + + +class DeprecatedEndpoint(ChangelogEntryAbstract): + """Indicates an endpoint is deprecated and should no longer be used""" + + entry_type = ChangelogType.DEPRECATED + + def __init__(self, alternative_route: str, version: str | None = None): + self.alternative_route = alternative_route + self.version = version + + def to_string(self) -> str: + base_message = "🚨 **Deprecated**" + if self.version: + base_message += f" in *version {self.version}*" + + return ( + f"{base_message}: This endpoint is deprecated and will be removed in a future release.\n" + f"Please use `{self.alternative_route}` instead." + ) + + def get_version(self) -> Version | None: + return Version(self.version) if self.version else None + + +class RetiredEndpoint(ChangelogEntryAbstract): + """Indicates when an endpoint will be or was removed""" + + entry_type = ChangelogType.RETIRED + + def __init__(self, version: str, message: str): + self.version = version + self.message = message + + def to_string(self) -> str: + return f"Retired in *version {self.version}*: {self.message}" + + def get_version(self) -> Version: + return Version(self.version) + + +def create_route_description( + *, + base: str = "", + changelog: Sequence[ChangelogEntryAbstract] | None = None, +) -> str: + """ + Builds a consistent route description with optional changelog information. + + Args: + base (str): Main route description. + changelog (Sequence[ChangelogEntry]): List of changelog entries. + + Returns: + str: Final description string. + """ + parts = [] + + if base: + parts.append(base) + + if changelog: + # NOTE: Adds a markdown section as : | New in version 0.6.0 + changelog_strings = [f"> {entry.to_string()}\n" for entry in changelog] + parts.append("\n".join(changelog_strings)) + + return "\n".join(parts) + + +def validate_changelog(changelog: Sequence[ChangelogEntryAbstract]) -> None: + """ + Validates that the changelog entries follow the correct lifecycle order. + + Args: + changelog: List of changelog entries to validate + + Raises: + ValueError: If the changelog entries are not in a valid order + """ + if not changelog: + return + + # Check each entry's type is greater than or equal to the previous + prev_type = None + for entry in changelog: + if prev_type is not None and entry.entry_type.value < prev_type.value: + msg = ( + f"Changelog entries must be in lifecycle order. " + f"Found {entry.entry_type.name} after {prev_type.name}." + ) + raise ValueError(msg) + prev_type = entry.entry_type + + # Ensure there's exactly one NEW entry as the first entry + if changelog and changelog[0].entry_type != ChangelogType.NEW: + msg = "First changelog entry must be NEW type" + raise ValueError(msg) + + # Ensure there's at most one DEPRECATED entry + deprecated_entries = [ + e for e in changelog if e.entry_type == ChangelogType.DEPRECATED + ] + if len(deprecated_entries) > 1: + msg = "Only one DEPRECATED entry is allowed in a changelog" + raise ValueError(msg) + + # Ensure all versions are valid + for entry in changelog: + version = entry.get_version() + if version is None and entry.entry_type != ChangelogType.DEPRECATED: + msg = f"Entry of type {entry.entry_type.name} must have a valid version" + raise ValueError(msg) + + +def create_route_config( + base_description: str = "", + *, + current_version: str | Version, + changelog: Sequence[ChangelogEntryAbstract] | None = None, +) -> dict[str, Any]: + """ + Creates route configuration options including description based on changelog entries. + + The function analyzes the changelog to determine if the endpoint: + - Is released and visible (if the earliest entry version is not in the future and not removed) + - Is deprecated (if there's a DEPRECATED entry in the changelog) + + Args: + base_description: Main route description + current_version: Current version of the API + changelog: List of changelog entries indicating version history + + Returns: + dict: Route configuration options that can be used as kwargs for route decorators + """ + route_options: dict[str, Any] = {} + changelog_list = list(changelog) if changelog else [] + + validate_changelog(changelog_list) + + if isinstance(current_version, str): + current_version = Version(current_version) + + # Determine endpoint state + is_deprecated = False + is_released = True # Assume released by default + is_removed = False + + # Get the first entry (NEW) to check if released + if changelog_list and changelog_list[0].entry_type == ChangelogType.NEW: + first_entry = cast(NewEndpoint, changelog_list[0]) + first_version = first_entry.get_version() + if first_version and first_version > current_version: + is_released = False + + # Check for deprecation and removal + for entry in changelog_list: + if entry.entry_type == ChangelogType.DEPRECATED: + is_deprecated = True + elif entry.entry_type == ChangelogType.RETIRED: + is_removed = True + + # Set route options based on endpoint state + # An endpoint is included in schema if it's released and not removed + route_options["include_in_schema"] = is_released and not is_removed + route_options["deprecated"] = is_deprecated + + # Create description + route_options["description"] = create_route_description( + base=base_description, + changelog=changelog_list, + ) + + return route_options diff --git a/packages/common-library/src/common_library/dict_tools.py b/packages/common-library/src/common_library/dict_tools.py new file mode 100644 index 00000000000..43ef7166308 --- /dev/null +++ b/packages/common-library/src/common_library/dict_tools.py @@ -0,0 +1,60 @@ +""" A collection of free functions to manipulate dicts +""" + +from collections.abc import Mapping +from copy import copy, deepcopy +from typing import Any + + +def remap_keys(data: dict, rename: dict[str, str]) -> dict[str, Any]: + """A new dict that renames the keys of a dict while keeping the values unchanged + + NOTE: Does not support renaming of nested keys + """ + return {rename.get(k, k): v for k, v in data.items()} + + +def get_from_dict(obj: Mapping[str, Any], dotted_key: str, default=None) -> Any: + keys = dotted_key.split(".") + value = obj + for key in keys[:-1]: + value = value.get(key, {}) + return value.get(keys[-1], default) + + +def copy_from_dict_ex(data: dict[str, Any], exclude: set[str]) -> dict[str, Any]: + # NOTE: to be refactored by someone and merged with the next method + return {k: v for k, v in data.items() if k not in exclude} + + +def copy_from_dict( + data: dict[str, Any], *, include: set | dict | None = None, deep: bool = False +): + # + # Analogous to advanced includes from pydantic exports + # https://pydantic-docs.helpmanual.io/usage/exporting_models/#advanced-include-and-exclude + # + + if include is None: + return deepcopy(data) if deep else copy(data) + + if include == ...: + return deepcopy(data) if deep else copy(data) + + if isinstance(include, set): + return {key: data[key] for key in include} + + assert isinstance(include, dict) # nosec + + return { + key: copy_from_dict(data[key], include=include[key], deep=deep) + for key in include + } + + +def update_dict(obj: dict, **updates): + for key, update_value in updates.items(): + obj.update( + {key: update_value(obj[key]) if callable(update_value) else update_value} + ) + return obj diff --git a/packages/common-library/src/common_library/error_codes.py b/packages/common-library/src/common_library/error_codes.py new file mode 100644 index 00000000000..70829a059ca --- /dev/null +++ b/packages/common-library/src/common_library/error_codes.py @@ -0,0 +1,85 @@ +"""osparc ERROR CODES (OEC) + Unique identifier of an exception instance + Intended to report a user about unexpected errors. + Unexpected exceptions can be traced by matching the + logged error code with that appeneded to the user-friendly message + +SEE test_error_codes for some use cases +""" + +import hashlib +import re +import traceback +from datetime import UTC, datetime +from typing import Annotated, Final, TypeAlias + +from pydantic import StringConstraints, TypeAdapter + +_LABEL = "OEC:{fingerprint}-{timestamp}" + +_LEN = 12 # chars (~48 bits) +_NAMED_PATTERN = re.compile( + r"OEC:(?P[a-fA-F0-9]{12})-(?P\d{13,14})" + # NOTE: timestamp limits: 13 digits (from 2001), 14 digits (good for ~500+ years) +) +_PATTERN = re.compile(r"OEC:[a-fA-F0-9]{12}-\d{13,14}") + + +ErrorCodeStr: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, pattern=_NAMED_PATTERN) +] + + +def _create_fingerprint(exc: BaseException) -> str: + """ + Unique error fingerprint of the **traceback** for deduplication purposes + """ + tb = traceback.extract_tb(exc.__traceback__) + frame_sigs = [f"{frame.name}:{frame.lineno}" for frame in tb] + fingerprint = f"{type(exc).__name__}|" + "|".join(frame_sigs) + # E.g. ZeroDivisionError|foo:23|main:10 + return hashlib.sha256(fingerprint.encode()).hexdigest()[:_LEN] + + +_SECS_TO_MILISECS: Final[int] = 1000 # ms + + +def _create_timestamp() -> int: + """Timestamp as milliseconds since epoch + NOTE: this reduces the precission to milliseconds but it is good enough for our purpose + """ + ts = datetime.now(UTC).timestamp() * _SECS_TO_MILISECS + return int(ts) + + +def create_error_code(exception: BaseException) -> ErrorCodeStr: + """ + Generates a unique error code for the given exception. + + The error code follows the format: `OEC:{traceback}-{timestamp}`. + This code is intended to be shared with the front-end as a `SupportID` + for debugging and support purposes. + """ + return TypeAdapter(ErrorCodeStr).validate_python( + _LABEL.format( + fingerprint=_create_fingerprint(exception), + timestamp=_create_timestamp(), + ) + ) + + +def parse_error_codes(obj) -> list[ErrorCodeStr]: + return TypeAdapter(list[ErrorCodeStr]).validate_python(_PATTERN.findall(f"{obj}")) + + +def parse_error_code_parts(oec: ErrorCodeStr) -> tuple[str, datetime]: + """Returns traceback-fingerprint and timestamp from `OEC:{traceback}-{timestamp}`""" + match = _NAMED_PATTERN.match(oec) + if not match: + msg = f"Invalid error code format: {oec}" + raise ValueError(msg) + fingerprint = match.group("fingerprint") + timestamp = datetime.fromtimestamp( + float(match.group("timestamp")) / _SECS_TO_MILISECS, tz=UTC + ) + return fingerprint, timestamp diff --git a/packages/common-library/src/common_library/errors_classes.py b/packages/common-library/src/common_library/errors_classes.py new file mode 100644 index 00000000000..dfee557d38c --- /dev/null +++ b/packages/common-library/src/common_library/errors_classes.py @@ -0,0 +1,54 @@ +from typing import Any + +from pydantic.errors import PydanticErrorMixin + +from .error_codes import create_error_code + + +class _DefaultDict(dict): + def __missing__(self, key): + return f"'{key}=?'" + + +class OsparcErrorMixin(PydanticErrorMixin): + code: str # type: ignore[assignment] + msg_template: str + + def __new__(cls, *_args, **_kwargs): + if not hasattr(cls, "code"): + cls.code = cls._get_full_class_name() + return super().__new__(cls) + + def __init__(self, **ctx: Any) -> None: + self.__dict__ = ctx + super().__init__(message=self._build_message(), code=self.code) # type: ignore[arg-type] + + def __str__(self) -> str: + return self._build_message() + + def _build_message(self) -> str: + # NOTE: safe. Does not raise KeyError + return self.msg_template.format_map(_DefaultDict(**self.__dict__)) + + @classmethod + def _get_full_class_name(cls) -> str: + relevant_classes = [ + c.__name__ + for c in cls.__mro__[:-1] + if c.__name__ + not in ( + "PydanticErrorMixin", + "OsparcErrorMixin", + "Exception", + "BaseException", + ) + ] + return ".".join(reversed(relevant_classes)) + + def error_context(self) -> dict[str, Any]: + """Returns context in which error occurred and stored within the exception""" + return dict(**self.__dict__) + + def error_code(self) -> str: + assert isinstance(self, Exception), "subclass must be exception" # nosec + return create_error_code(self) diff --git a/packages/common-library/src/common_library/exclude.py b/packages/common-library/src/common_library/exclude.py new file mode 100644 index 00000000000..502e99fa32c --- /dev/null +++ b/packages/common-library/src/common_library/exclude.py @@ -0,0 +1,32 @@ +from typing import Any, Final + + +class Unset: + """Sentinel value to indicate that a parameter is not set.""" + + VALUE: "Unset" + + +unuset: Final = Unset() +Unset.VALUE = Unset() + + +def is_unset(v: Any) -> bool: + return isinstance(v, Unset) + + +def is_set(v: Any) -> bool: + return not isinstance(v, Unset) + + +def as_dict_exclude_unset(**params) -> dict[str, Any]: + """Excludes parameters that are instances of UnSet.""" + return {k: v for k, v in params.items() if not isinstance(v, Unset)} + + +def as_dict_exclude_none(**params) -> dict[str, Any]: + """Analogous to `as_dict_exclude_unset` but with None. + + Sometimes None is used as a sentinel value, use this function to exclude it. + """ + return {k: v for k, v in params.items() if v is not None} diff --git a/packages/common-library/src/common_library/groups_dicts.py b/packages/common-library/src/common_library/groups_dicts.py new file mode 100644 index 00000000000..f709eb2cdbf --- /dev/null +++ b/packages/common-library/src/common_library/groups_dicts.py @@ -0,0 +1,7 @@ +from typing_extensions import TypedDict + + +class AccessRightsDict(TypedDict): + read: bool + write: bool + delete: bool diff --git a/packages/common-library/src/common_library/groups_enums.py b/packages/common-library/src/common_library/groups_enums.py new file mode 100644 index 00000000000..215edf335f1 --- /dev/null +++ b/packages/common-library/src/common_library/groups_enums.py @@ -0,0 +1,13 @@ +import enum + + +class GroupType(enum.Enum): + """ + standard: standard group, e.g. any group that is not a primary group or special group such as the everyone group + primary: primary group, e.g. the primary group is the user own defined group that typically only contain the user (same as in linux) + everyone: the only group for all users + """ + + STANDARD = "standard" + PRIMARY = "primary" + EVERYONE = "everyone" diff --git a/packages/common-library/src/common_library/json_serialization.py b/packages/common-library/src/common_library/json_serialization.py new file mode 100644 index 00000000000..418b8342bc0 --- /dev/null +++ b/packages/common-library/src/common_library/json_serialization.py @@ -0,0 +1,184 @@ +"""Helpers for json serialization +- built-in json-like API +- implemented using orjson, which performs better. SEE https://github.com/ijl/orjson?tab=readme-ov-file#performance +""" + +import datetime +from collections import deque +from collections.abc import Callable +from decimal import Decimal +from enum import Enum +from ipaddress import ( + IPv4Address, + IPv4Interface, + IPv4Network, + IPv6Address, + IPv6Interface, + IPv6Network, +) +from pathlib import Path +from re import Pattern +from types import GeneratorType +from typing import Any, Final, NamedTuple +from uuid import UUID + +import orjson +from pydantic import AnyHttpUrl, AnyUrl, HttpUrl, NameEmail, SecretBytes, SecretStr +from pydantic_core import Url +from pydantic_extra_types.color import Color + + +class SeparatorTuple(NamedTuple): + item_separator: str + key_separator: str + + +_orjson_default_separator: Final = SeparatorTuple(item_separator=",", key_separator=":") + + +def isoformat(o: datetime.date | datetime.time) -> str: + return o.isoformat() + + +def decimal_encoder(dec_value: Decimal) -> int | float: + """ + Encodes a Decimal as int of there's no exponent, otherwise float + + This is useful when we use ConstrainedDecimal to represent Numeric(x,0) + where a integer (but not int typed) is used. Encoding this as a float + results in failed round-tripping between encode and parse. + Our Id type is a prime example of this. + + >>> decimal_encoder(Decimal("1.0")) + 1.0 + + >>> decimal_encoder(Decimal("1")) + 1 + """ + if dec_value.as_tuple().exponent >= 0: # type: ignore[operator] + return int(dec_value) + + return float(dec_value) + + +ENCODERS_BY_TYPE: dict[type[Any], Callable[[Any], Any]] = { + AnyHttpUrl: str, + AnyUrl: str, + bytes: lambda o: o.decode(), + Color: str, + datetime.date: isoformat, + datetime.datetime: isoformat, + datetime.time: isoformat, + datetime.timedelta: lambda td: td.total_seconds(), + Decimal: decimal_encoder, + Enum: lambda o: o.value, + frozenset: list, + deque: list, + GeneratorType: list, + HttpUrl: str, + IPv4Address: str, + IPv4Interface: str, + IPv4Network: str, + IPv6Address: str, + IPv6Interface: str, + IPv6Network: str, + NameEmail: str, + Path: str, + Pattern: lambda o: o.pattern, + SecretBytes: str, + SecretStr: str, + set: list, + Url: str, + UUID: str, +} + + +def pydantic_encoder(obj: Any) -> Any: + from dataclasses import asdict, is_dataclass + + from pydantic.main import BaseModel + + if isinstance(obj, BaseModel): + return obj.model_dump() + + if is_dataclass(obj): + assert not isinstance(obj, type) # nosec + return asdict(obj) + + # Check the class type and its superclasses for a matching encoder + for base in obj.__class__.__mro__[:-1]: + try: + encoder = ENCODERS_BY_TYPE[base] + except KeyError: + continue + return encoder(obj) + + # We have exited the for loop without finding a suitable encoder + msg = f"Object of type '{obj.__class__.__name__}' is not JSON serializable" + raise TypeError(msg) + + +def representation_encoder(obj: Any): + """ + A fallback encoder that uses `pydantic_encoder` to serialize objects. + If serialization fails, it falls back to using `str(obj)`. + + This is practical for representation purposes, such as logging or debugging. + + Example: + >>> from common_library.json_serialization import json_dumps, representation_encoder + >>> class CustomObject: + ... def __str__(self): + ... return "CustomObjectRepresentation" + >>> obj = CustomObject() + >>> json_dumps(obj, default=representation_encoder) + '"CustomObjectRepresentation"' + """ + try: + return pydantic_encoder(obj) + except TypeError: + return str(obj) + + +def json_dumps( + obj: Any, + *, + default=pydantic_encoder, + sort_keys: bool = False, + indent: int | None = None, + separators: SeparatorTuple | tuple[str, str] | None = None, +) -> str: + """json.dumps-like API implemented with orjson.dumps in the core + + NOTE: only separator=(",",":") is supported + """ + # SEE https://github.com/ijl/orjson?tab=readme-ov-file#serialize + option = ( + # if a dict has a key of a type other than str it will NOT raise + orjson.OPT_NON_STR_KEYS + ) + if indent: + option |= orjson.OPT_INDENT_2 + if sort_keys: + option |= orjson.OPT_SORT_KEYS + + if separators is not None and separators != _orjson_default_separator: + # NOTE1: replacing separators in the result is no only time-consuming but error prone. We had + # some examples with time-stamps that were corrupted because of this replacement. + msg = f"Only {_orjson_default_separator} supported, got {separators}" + raise ValueError(msg) + + # serialize + result: str = orjson.dumps(obj, default=default, option=option).decode("utf-8") + + return result + + +json_loads: Callable = orjson.loads + + +class JsonNamespace: + """Namespace to use our customized serialization functions for interfaces where the built-in json Api is expected""" + + dumps = json_dumps + loads = json_loads diff --git a/packages/common-library/src/common_library/network.py b/packages/common-library/src/common_library/network.py new file mode 100644 index 00000000000..2842434460e --- /dev/null +++ b/packages/common-library/src/common_library/network.py @@ -0,0 +1,9 @@ +import ipaddress + + +def is_ip_address(host: str) -> bool: + try: + ipaddress.ip_address(host) + return True + except ValueError: + return False diff --git a/packages/common-library/src/common_library/pagination_tools.py b/packages/common-library/src/common_library/pagination_tools.py new file mode 100644 index 00000000000..f85482dbf1b --- /dev/null +++ b/packages/common-library/src/common_library/pagination_tools.py @@ -0,0 +1,87 @@ +from collections.abc import Iterable +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field, NonNegativeInt, PositiveInt + + +class PageParams(BaseModel): + offset_initial: Annotated[NonNegativeInt, Field(frozen=True)] = 0 + offset_current: NonNegativeInt = 0 # running offset + limit: Annotated[PositiveInt, Field(frozen=True)] + total_number_of_items: int | None = None + + model_config = ConfigDict(validate_assignment=True) + + @property + def offset(self) -> NonNegativeInt: + return self.offset_current + + def has_items_left(self) -> bool: + return ( + self.total_number_of_items is None + or self.offset_current < self.total_number_of_items + ) + + def total_number_of_pages(self) -> NonNegativeInt: + assert self.total_number_of_items # nosec + num_items = self.total_number_of_items - self.offset_initial + return num_items // self.limit + (1 if num_items % self.limit else 0) + + +def iter_pagination_params( + *, + limit: PositiveInt, + offset: NonNegativeInt, + total_number_of_items: NonNegativeInt | None = None, +) -> Iterable[PageParams]: + """Iterates through pages of a collection by yielding PageParams for each page. + + Args: + limit: The maximum number of items to return in a single page. + offset: The number of items to skip before starting to collect the items for the current page. + total_number_of_items: The total count of items in the collection being paginated. + Must be set during the first iteration if not provided initially. + + Yields: + PageParams for each page in the collection. + + Raises: + RuntimeError: If total_number_of_items is not set before first iteration or if it changes between iterations. + """ + + kwargs = {} + if total_number_of_items: + kwargs["total_number_of_items"] = total_number_of_items + + page_params = PageParams( + offset_initial=offset, offset_current=offset, limit=limit, **kwargs + ) + + assert page_params.offset_current == page_params.offset_initial # nosec + + total_count_before = page_params.total_number_of_items + page_index = 0 + + while page_params.has_items_left(): + + yield page_params + + if page_params.total_number_of_items is None: + msg = "Must be updated at least before the first iteration, i.e. page_args.total_number_of_items = total_count" + raise RuntimeError(msg) + + if ( + total_count_before + and total_count_before != page_params.total_number_of_items + ): + msg = ( + f"total_number_of_items cannot change on every iteration: before={total_count_before}, now={page_params.total_number_of_items}." + "WARNING: the size of the paginated collection might be changing while it is being iterated?" + ) + raise RuntimeError(msg) + + if page_index == 0: + total_count_before = page_params.total_number_of_items + + page_params.offset_current += limit + assert page_params.offset == page_params.offset_current # nosec diff --git a/services/api-server/src/simcore_service_api_server/api/errors/__init__.py b/packages/common-library/src/common_library/py.typed similarity index 100% rename from services/api-server/src/simcore_service_api_server/api/errors/__init__.py rename to packages/common-library/src/common_library/py.typed diff --git a/packages/common-library/src/common_library/pydantic_basic_types.py b/packages/common-library/src/common_library/pydantic_basic_types.py new file mode 100644 index 00000000000..452c118dae9 --- /dev/null +++ b/packages/common-library/src/common_library/pydantic_basic_types.py @@ -0,0 +1,79 @@ +from re import Pattern +from typing import Annotated, Final, TypeAlias + +from pydantic import Field +from pydantic_core import core_schema + +# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Registered_ports +RegisteredPortInt: TypeAlias = Annotated[int, Field(gt=1024, lt=65535)] + +# non-empty bounded string used as identifier +# e.g. "123" or "name_123" or "fa327c73-52d8-462a-9267-84eeaf0f90e3" but NOT "" +_ELLIPSIS_CHAR: Final[str] = "..." + + +class ConstrainedStr(str): # noqa: SLOT000 + pattern: str | Pattern[str] | None = None + min_length: int | None = None + max_length: int | None = None + strip_whitespace: bool = False + curtail_length: int | None = None + + @classmethod + def _validate(cls, __input_value: str) -> str: + if cls.curtail_length and len(__input_value) > cls.curtail_length: + __input_value = __input_value[: cls.curtail_length] + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__(cls, _source_type, _handler): + return core_schema.no_info_after_validator_function( + cls._validate, + core_schema.str_schema( + pattern=cls.pattern, + min_length=cls.min_length, + max_length=cls.max_length, + strip_whitespace=cls.strip_whitespace, + ), + ) + + +class IDStr(ConstrainedStr): + strip_whitespace = True + min_length = 1 + max_length = 100 + + @staticmethod + def concatenate(*args: "IDStr", link_char: str = " ") -> "IDStr": + result = link_char.join(args).strip() + assert IDStr.min_length # nosec + assert IDStr.max_length # nosec + if len(result) > IDStr.max_length: + if IDStr.max_length > len(_ELLIPSIS_CHAR): + result = ( + result[: IDStr.max_length - len(_ELLIPSIS_CHAR)].rstrip() + + _ELLIPSIS_CHAR + ) + else: + result = _ELLIPSIS_CHAR[0] * IDStr.max_length + if len(result) < IDStr.min_length: + msg = f"IDStr.concatenate: result is too short: {result}" + raise ValueError(msg) + return IDStr(result) + + +class ShortTruncatedStr(ConstrainedStr): + # NOTE: Use to input e.g. titles or display names + # A truncated string: + # - Strips whitespaces and truncate strings that exceed the specified characters limit (curtail_length). + # - Ensures that the **input** data length to the API is controlled and prevents exceeding large inputs silently, i.e. without raising errors. + # SEE https://github.com/ITISFoundation/osparc-simcore/pull/5989#discussion_r1650506583 + strip_whitespace = True + curtail_length = 600 + + +class LongTruncatedStr(ConstrainedStr): + # NOTE: Use to input e.g. descriptions or summaries + # Analogous to ShortTruncatedStr + strip_whitespace = True + curtail_length = 65536 # same as github descripton diff --git a/packages/common-library/src/common_library/pydantic_fields_extension.py b/packages/common-library/src/common_library/pydantic_fields_extension.py new file mode 100644 index 00000000000..59303b0a1b3 --- /dev/null +++ b/packages/common-library/src/common_library/pydantic_fields_extension.py @@ -0,0 +1,22 @@ +from types import UnionType +from typing import Any, Literal, get_args, get_origin + +from pydantic.fields import FieldInfo + + +def get_type(info: FieldInfo) -> Any: + field_type = info.annotation + if args := get_args(info.annotation): + field_type = next(a for a in args if a is not type(None)) + return field_type + + +def is_literal(info: FieldInfo) -> bool: + return get_origin(info.annotation) is Literal + + +def is_nullable(info: FieldInfo) -> bool: + origin = get_origin(info.annotation) # X | None or Optional[X] will return Union + if origin is UnionType: + return any(x in get_args(info.annotation) for x in (type(None), Any)) + return False diff --git a/packages/common-library/src/common_library/pydantic_validators.py b/packages/common-library/src/common_library/pydantic_validators.py new file mode 100644 index 00000000000..a0122fccbe8 --- /dev/null +++ b/packages/common-library/src/common_library/pydantic_validators.py @@ -0,0 +1,59 @@ +import datetime as dt +import re +import warnings + +from pydantic import TypeAdapter, field_validator + + +def _validate_legacy_timedelta_str(time_str: str | dt.timedelta) -> str | dt.timedelta: + if not isinstance(time_str, str): + return time_str + + # Match the format [-][DD ][HH:MM]SS[.ffffff] + match = re.match( + r"^(?P-)?(?:(?P\d+)\s)?(?:(?P\d+):)?(?:(?P\d+):)?(?P\d+)(?P\.\d+)?$", + time_str, + ) + if not match: + return time_str + + # Extract components with defaults if not present + sign = match.group("sign") or "" + days = match.group("days") or "0" + hours = match.group("hours") or "0" + minutes = match.group("minutes") or "0" + seconds = match.group("seconds") + fraction = match.group("fraction") or "" + + # Convert to the format [-][DD]D[,][HH:MM:]SS[.ffffff] + return f"{sign}{int(days)}D,{int(hours):02}:{int(minutes):02}:{seconds}{fraction}" + + +def validate_numeric_string_as_timedelta(field: str): + """Transforms a float/int number into a valid datetime as it used to work in the past""" + + def _numeric_string_as_timedelta( + v: dt.timedelta | str | float, + ) -> dt.timedelta | str | float: + if isinstance(v, str): + try: + converted_value = float(v) + + iso8601_format = TypeAdapter(dt.timedelta).dump_python( + dt.timedelta(seconds=converted_value), mode="json" + ) + warnings.warn( + f"{field}='{v}' -should be set to-> {field}='{iso8601_format}' (ISO8601 datetime format). " + "Please also convert the value in the >>OPS REPOSITORY<<. " + "For details: https://docs.pydantic.dev/1.10/usage/types/#datetime-types.", + DeprecationWarning, + stacklevel=8, + ) + + return converted_value + except ValueError: + # returns format like "1:00:00" + return _validate_legacy_timedelta_str(v) + return v + + return field_validator(field, mode="before")(_numeric_string_as_timedelta) diff --git a/packages/common-library/src/common_library/serialization.py b/packages/common-library/src/common_library/serialization.py new file mode 100644 index 00000000000..70dd53e13c4 --- /dev/null +++ b/packages/common-library/src/common_library/serialization.py @@ -0,0 +1,41 @@ +import contextlib +from datetime import timedelta +from typing import Any + +from pydantic import BaseModel, SecretStr, TypeAdapter, ValidationError +from pydantic_core import Url + + +def model_dump_with_secrets( + settings_obj: BaseModel, *, show_secrets: bool, **pydantic_export_options +) -> dict[str, Any]: + data = settings_obj.model_dump(**pydantic_export_options) + + for field_name in settings_obj.model_fields: + if field_name not in data: + continue + + field_data = data[field_name] + + if isinstance(field_data, timedelta): + data[field_name] = field_data.total_seconds() + + elif isinstance(field_data, SecretStr): + data[field_name] = ( + field_data.get_secret_value() if show_secrets else str(field_data) + ) + + elif isinstance(field_data, Url): + data[field_name] = str(field_data) + + elif isinstance(field_data, dict): + possible_pydantic_model = settings_obj.model_fields[field_name].annotation + # NOTE: data could be a dict which does not represent a pydantic model or a union of models + with contextlib.suppress(AttributeError, ValidationError): + data[field_name] = model_dump_with_secrets( + TypeAdapter(possible_pydantic_model).validate_python(field_data), + show_secrets=show_secrets, + **pydantic_export_options, + ) + + return data diff --git a/packages/common-library/src/common_library/test_network.py b/packages/common-library/src/common_library/test_network.py new file mode 100644 index 00000000000..b7f423df3a1 --- /dev/null +++ b/packages/common-library/src/common_library/test_network.py @@ -0,0 +1,19 @@ +import pytest +from common_library.network import is_ip_address + + +@pytest.mark.parametrize( + "host, expected", + [ + ("127.0.0.1", True), + ("::1", True), + ("192.168.1.1", True), + ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", True), + ("256.256.256.256", False), + ("invalid_host", False), + ("", False), + ("1234:5678:9abc:def0:1234:5678:9abc:defg", False), + ], +) +def test_is_ip_address(host: str, expected: bool): + assert is_ip_address(host) == expected diff --git a/packages/common-library/src/common_library/users_enums.py b/packages/common-library/src/common_library/users_enums.py new file mode 100644 index 00000000000..4a0606bf46e --- /dev/null +++ b/packages/common-library/src/common_library/users_enums.py @@ -0,0 +1,67 @@ +from enum import Enum +from functools import total_ordering + +_USER_ROLE_TO_LEVEL = { + "ANONYMOUS": 0, + "GUEST": 10, + "USER": 20, + "TESTER": 30, + "PRODUCT_OWNER": 40, + "ADMIN": 100, +} + + +@total_ordering +class UserRole(Enum): + """SORTED enumeration of user roles + + A role defines a set of privileges the user can perform + Roles are sorted from lower to highest privileges + USER is the role assigned by default A user with a higher/lower role is denoted super/infra user + + ANONYMOUS : The user is not logged in + GUEST : Temporary user with very limited access. Main used for demos and for a limited amount of time + USER : Registered user. Basic permissions to use the platform [default] + TESTER : Upgraded user. First level of super-user with privileges to test the framework. + Can use everything but does not have an effect in other users or actual data + ADMIN : Framework admin. + + See security_access.py + """ + + ANONYMOUS = "ANONYMOUS" + GUEST = "GUEST" + USER = "USER" + TESTER = "TESTER" + PRODUCT_OWNER = "PRODUCT_OWNER" + ADMIN = "ADMIN" + + @property + def privilege_level(self) -> int: + return _USER_ROLE_TO_LEVEL[self.name] + + def __lt__(self, other: "UserRole") -> bool: + if self.__class__ is other.__class__: + return self.privilege_level < other.privilege_level + return NotImplemented + + +class UserStatus(str, Enum): + # This is a transition state. The user is registered but not confirmed. NOTE that state is optional depending on LOGIN_REGISTRATION_CONFIRMATION_REQUIRED + CONFIRMATION_PENDING = "CONFIRMATION_PENDING" + # This user can now operate the platform + ACTIVE = "ACTIVE" + # This user is inactive because it expired after a trial period + EXPIRED = "EXPIRED" + # This user is inactive because he has been a bad boy + BANNED = "BANNED" + # This user is inactive because it was marked for deletion + DELETED = "DELETED" + + +class AccountRequestStatus(str, Enum): + """Status of the request for an account""" + + PENDING = "PENDING" # Pending PO review to approve/reject the request + APPROVED = "APPROVED" # PO approved the request + REJECTED = "REJECTED" # PO rejected the request diff --git a/packages/common-library/tests/conftest.py b/packages/common-library/tests/conftest.py new file mode 100644 index 00000000000..46f09f86b46 --- /dev/null +++ b/packages/common-library/tests/conftest.py @@ -0,0 +1,33 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +import sys +from pathlib import Path + +import common_library +import pytest + +pytest_plugins = [ + "pytest_simcore.pydantic_models", + "pytest_simcore.pytest_global_environs", + "pytest_simcore.repository_paths", + "pytest_simcore.schemas", +] + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +@pytest.fixture(scope="session") +def package_dir(): + pdir = Path(common_library.__file__).resolve().parent + assert pdir.exists() + return pdir + + +@pytest.fixture(scope="session") +def project_slug_dir() -> Path: + folder = CURRENT_DIR.parent + assert folder.exists() + assert any(folder.glob("src/common_library")) + return folder diff --git a/packages/common-library/tests/test_async_tools.py b/packages/common-library/tests/test_async_tools.py new file mode 100644 index 00000000000..850945d39b2 --- /dev/null +++ b/packages/common-library/tests/test_async_tools.py @@ -0,0 +1,95 @@ +import asyncio +from concurrent.futures import ThreadPoolExecutor +from typing import Any + +import pytest +from common_library.async_tools import make_async, maybe_await + + +@make_async() +def sync_function(x: int, y: int) -> int: + return x + y + + +@make_async() +def sync_function_with_exception() -> None: + raise ValueError("This is an error!") + + +@pytest.mark.asyncio +async def test_make_async_returns_coroutine(): + result = sync_function(2, 3) + assert asyncio.iscoroutine(result), "Function should return a coroutine" + + +@pytest.mark.asyncio +async def test_make_async_execution(): + result = await sync_function(2, 3) + assert result == 5, "Function should return 5" + + +@pytest.mark.asyncio +async def test_make_async_exception(): + with pytest.raises(ValueError, match="This is an error!"): + await sync_function_with_exception() + + +@pytest.mark.asyncio +async def test_make_async_with_executor(): + executor = ThreadPoolExecutor() + + @make_async(executor) + def heavy_computation(x: int) -> int: + return x * x + + result = await heavy_computation(4) + assert result == 16, "Function should return 16" + + +@pytest.mark.asyncio +async def test_maybe_await_with_coroutine(): + """Test maybe_await with an async function""" + + async def async_value(): + return 42 + + result = await maybe_await(async_value()) + assert result == 42 + + +@pytest.mark.asyncio +async def test_maybe_await_with_direct_value(): + """Test maybe_await with a direct value""" + value = 42 + result = await maybe_await(value) + assert result == 42 + + +@pytest.mark.asyncio +async def test_maybe_await_with_none(): + """Test maybe_await with None value""" + result = await maybe_await(None) + assert result is None + + +@pytest.mark.asyncio +async def test_maybe_await_with_result_proxy(): + """Test maybe_await with both async and sync ResultProxy implementations""" + + class AsyncResultProxy: + """Mock async result proxy (aiopg style)""" + + async def fetchone(self) -> Any: # pylint: disable=no-self-use + return {"id": 1, "name": "test"} + + class SyncResultProxy: + """Mock sync result proxy (asyncpg style)""" + + def fetchone(self) -> Any: # pylint: disable=no-self-use + return {"id": 2, "name": "test2"} + + async_result = await maybe_await(AsyncResultProxy().fetchone()) + assert async_result == {"id": 1, "name": "test"} + + sync_result = await maybe_await(SyncResultProxy().fetchone()) + assert sync_result == {"id": 2, "name": "test2"} diff --git a/packages/common-library/tests/test_changelog.py b/packages/common-library/tests/test_changelog.py new file mode 100644 index 00000000000..16640ee17ff --- /dev/null +++ b/packages/common-library/tests/test_changelog.py @@ -0,0 +1,284 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from common_library.changelog import ( + ChangedEndpoint, + ChangelogType, + DeprecatedEndpoint, + NewEndpoint, + RetiredEndpoint, + create_route_config, + create_route_description, + validate_changelog, +) +from packaging.version import Version +from pytest_mock import MockerFixture + + +@pytest.fixture +def current_api_version(mocker: MockerFixture) -> str: + """Fixture to mock the API_VERSION for testing purposes""" + return "0.7.0" + + +def test_changelog_entry_types(): + assert ChangelogType.NEW.value < ChangelogType.CHANGED.value + assert ChangelogType.CHANGED.value < ChangelogType.DEPRECATED.value + assert ChangelogType.DEPRECATED.value < ChangelogType.RETIRED.value + + +def test_changelog_entry_classes(): + # Test NewEndpoint + new_entry = NewEndpoint("0.5.0") + assert new_entry.entry_type == ChangelogType.NEW + assert new_entry.get_version() == Version("0.5.0") + assert "New in *version 0.5.0*" in new_entry.to_string() + + # Test ChangedEndpoint + changed_entry = ChangedEndpoint("0.6.0", "Added authentication") + assert changed_entry.entry_type == ChangelogType.CHANGED + assert changed_entry.get_version() == Version("0.6.0") + assert ( + "Changed in *version 0.6.0*: Added authentication" in changed_entry.to_string() + ) + + # Test DeprecatedEndpoint + deprecated_entry = DeprecatedEndpoint("/v1/better-endpoint", "0.7.0") + assert deprecated_entry.entry_type == ChangelogType.DEPRECATED + assert deprecated_entry.get_version() == Version("0.7.0") + assert "Deprecated" in deprecated_entry.to_string() + assert "in *version 0.7.0*" in deprecated_entry.to_string() + assert "/v1/better-endpoint" in deprecated_entry.to_string() + + # Test DeprecatedEndpoint without version + deprecated_no_version = DeprecatedEndpoint("/v1/better-endpoint") + assert "Deprecated" in deprecated_no_version.to_string() + assert "in *version" not in deprecated_no_version.to_string() + + # Test RetiredEndpoint + removed_entry = RetiredEndpoint("0.9.0", "Use the new endpoint instead") + assert removed_entry.entry_type == ChangelogType.RETIRED + assert removed_entry.get_version() == Version("0.9.0") + assert "Retired in *version 0.9.0*" in removed_entry.to_string() + assert "Use the new endpoint instead" in removed_entry.to_string() + + +def test_validate_changelog(): + """Test the validate_changelog function""" + # Valid changelog + valid_changelog = [ + NewEndpoint("0.5.0"), + ChangedEndpoint("0.6.0", "Added authentication"), + DeprecatedEndpoint("/v1/better-endpoint", "0.7.0"), + ] + validate_changelog(valid_changelog) # Should not raise + + # Invalid order + invalid_order = [ + NewEndpoint("0.5.0"), + DeprecatedEndpoint("/v1/better-endpoint", "0.7.0"), + ChangedEndpoint("0.6.0", "Added authentication"), # Wrong order + ] + with pytest.raises(ValueError, match="order"): + validate_changelog(invalid_order) + + # Missing NEW as first entry + missing_new = [ + ChangedEndpoint("0.6.0", "Added authentication"), + DeprecatedEndpoint("/v1/better-endpoint", "0.7.0"), + ] + with pytest.raises(ValueError, match="First changelog entry must be NEW"): + validate_changelog(missing_new) + + # Multiple DEPRECATED entries + multiple_deprecated = [ + NewEndpoint("0.5.0"), + DeprecatedEndpoint("/v1/better-endpoint", "0.7.0"), + DeprecatedEndpoint("/v1/another-endpoint", "0.8.0"), + ] + with pytest.raises(ValueError, match="Only one DEPRECATED entry"): + validate_changelog(multiple_deprecated) + + +def test_create_route_description(): + """Test the create_route_description function""" + # Basic description + base_desc = "This is a test endpoint" + changelog = [ + NewEndpoint("0.5.0"), + ChangedEndpoint("0.6.0", "Added authentication"), + ] + + desc = create_route_description(base=base_desc, changelog=changelog) + + assert base_desc in desc + assert "New in *version 0.5.0*" in desc + assert "Changed in *version 0.6.0*: Added authentication" in desc + + +def test_create_route_config_for_deprecated_endpoints(current_api_version: str) -> None: + """Test route configuration for deprecated endpoints""" + alternative_route = "/v1/new-endpoint" + changelog = [ + NewEndpoint("0.5.0"), + DeprecatedEndpoint(alternative_route), + ] + + config = create_route_config( + base_description="This is a deprecated endpoint", + changelog=changelog, + current_version=Version(current_api_version), + ) + + expected_config = { + "deprecated": True, + "include_in_schema": True, + "description": create_route_description( + base="This is a deprecated endpoint", + changelog=changelog, + ), + } + + assert config == expected_config + + +def test_create_route_config_for_to_be_released_endpoints( + current_api_version: str, +) -> None: + """Test route configuration for endpoints that will be released in future versions""" + future_version = f"{int(Version(current_api_version).major) + 1}.0.0" + changelog = [ + NewEndpoint(future_version), + ] + + config = create_route_config( + base_description=f"This is a feature coming in version {future_version}", + changelog=changelog, + current_version=Version(current_api_version), + ) + + expected_config = { + "deprecated": False, + "include_in_schema": False, + "description": create_route_description( + base=f"This is a feature coming in version {future_version}", + changelog=changelog, + ), + } + + assert config == expected_config + + +def test_create_route_config_with_removal_notice(current_api_version: str) -> None: + """Test route configuration with explicit removal notice in changelog""" + removal_message = "Use the new endpoint instead" + alternative_route = "/v1/better-endpoint" + + changelog = [ + NewEndpoint("0.5.0"), + DeprecatedEndpoint(alternative_route), + RetiredEndpoint("0.9.0", removal_message), + ] + + config = create_route_config( + base_description="This endpoint will be removed in version 0.9.0", + changelog=changelog, + current_version=current_api_version, + ) + + expected_config = { + "deprecated": True, + "include_in_schema": False, # Changed from True to False due to REMOVED entry + "description": create_route_description( + base="This endpoint will be removed in version 0.9.0", + changelog=changelog, + ), + } + + assert config == expected_config + + +def test_create_route_config_with_regular_endpoint(current_api_version: str) -> None: + """Test route configuration for a standard endpoint (not deprecated, not upcoming)""" + changelog = [NewEndpoint("0.5.0")] + + config = create_route_config( + base_description="This is a standard endpoint", + changelog=changelog, + current_version=current_api_version, + ) + + expected_config = { + "deprecated": False, + "include_in_schema": True, + "description": create_route_description( + base="This is a standard endpoint", + changelog=changelog, + ), + } + + assert config == expected_config + + +def test_create_route_config_with_mixed_changelog(current_api_version: str) -> None: + + alternative_route = "/v1/better-endpoint" + changelog = [ + NewEndpoint("0.5.0"), + ChangedEndpoint("0.6.0", "Added authentication"), + ChangedEndpoint("0.6.2", "Fixed a bug"), + DeprecatedEndpoint(alternative_route), + RetiredEndpoint("0.9.0", "Use the new endpoint instead"), + ] + + config = create_route_config( + base_description="This endpoint has a complex history", + changelog=changelog, + current_version=current_api_version, + ) + + expected_config = { + "deprecated": True, + "include_in_schema": False, # Changed from True to False due to REMOVED entry + "description": create_route_description( + base="This endpoint has a complex history", + changelog=changelog, + ), + } + + assert config == expected_config + + +def test_create_route_config_with_empty_changelog(current_api_version: str) -> None: + + config = create_route_config( + base_description="This endpoint has no changelog", + current_version=current_api_version, + ) + + expected_config = { + "deprecated": False, + "include_in_schema": True, + "description": create_route_description( + base="This endpoint has no changelog", + changelog=[], + ), + } + + assert config == expected_config + + +# Add a new test to explicitly verify the version display in deprecated endpoints +def test_deprecated_endpoint_with_version(): + """Test that DeprecatedEndpoint correctly displays the version information when available""" + # With version + deprecated_with_version = DeprecatedEndpoint("/new/endpoint", "0.8.0") + assert "in *version 0.8.0*" in deprecated_with_version.to_string() + + # Without version + deprecated_without_version = DeprecatedEndpoint("/new/endpoint") + assert "in *version" not in deprecated_without_version.to_string() diff --git a/packages/common-library/tests/test_dict_tools.py b/packages/common-library/tests/test_dict_tools.py new file mode 100644 index 00000000000..fb374ff1791 --- /dev/null +++ b/packages/common-library/tests/test_dict_tools.py @@ -0,0 +1,162 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Any + +import pytest +from common_library.dict_tools import ( + copy_from_dict, + get_from_dict, + remap_keys, + update_dict, +) + + +@pytest.fixture +def data() -> dict[str, Any]: + return { + "ID": "3ifd79yhz2vpgu1iz43mf9m2d", + "Version": {"Index": 176}, + "CreatedAt": "2021-11-10T17:09:01.892109221Z", + "UpdatedAt": "2021-11-10T17:09:35.291164864Z", + "Labels": {}, + "Spec": { + "ContainerSpec": { + "Image": "local/api-server:production", + "Labels": {"com.docker.stack.namespace": "master-simcore"}, + "Hostname": "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}", + "Env": [ + "API_SERVER_DEV_FEATURES_ENABLED=1", + "BF_API_KEY=none", + "BF_API_SECRET=none", + ], + "Privileges": {"CredentialSpec": None, "SELinuxContext": None}, + "Init": True, + "Isolation": "default", + }, + "Resources": {}, + "Placement": {}, + "Networks": [ + {"Target": "roybucjnp44t561jvgy47dd14", "Aliases": ["api-server"]} + ], + "ForceUpdate": 0, + }, + "ServiceID": "77hyhjm6bqs81xp5g3e4ov7wv", + "Slot": 1, + "NodeID": "iz7unuzyzuxbpr80kzheskbbf", + "Status": { + "Timestamp": "2021-11-10T17:09:35.237847117Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "8dadeb42eecbcb58295e0508c27c76d46f5106859af30276abcdcd4e4608f39c", + "PID": 1772378, + "ExitCode": 0, + }, + "PortStatus": {}, + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "q6ojghy5phzllv63cmwhorbhy", + "Version": {"Index": 6}, + "CreatedAt": "2021-11-10T17:08:36.840863313Z", + "UpdatedAt": "2021-11-10T17:08:36.846648842Z", + "Spec": { + "Name": "ingress", + "Labels": {}, + "DriverConfiguration": {}, + "Ingress": True, + "IPAMOptions": {"Driver": {}}, + "Scope": "swarm", + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "4096" + }, + }, + "IPAMOptions": { + "Driver": {"Name": "default"}, + "Configs": [{"Subnet": "10.1.1.0/24", "Gateway": "10.1.1.1"}], + }, + }, + "Addresses": ["10.1.1.24/24"], + }, + { + "Network": { + "ID": "roybucjnp44t561jvgy47dd14", + "Version": {"Index": 14}, + "CreatedAt": "2021-11-10T17:08:37.532148857Z", + "UpdatedAt": "2021-11-10T17:08:37.533461228Z", + "Spec": { + "Name": "master-simcore_default", + "Labels": {"com.docker.stack.namespace": "master-simcore"}, + "DriverConfiguration": {"Name": "overlay"}, + "Attachable": True, + "Scope": "swarm", + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "4098" + }, + }, + "IPAMOptions": { + "Driver": {"Name": "default"}, + "Configs": [{"Subnet": "10.0.1.0/24", "Gateway": "10.0.1.1"}], + }, + }, + "Addresses": ["10.1.1.1/24"], + }, + ], + } + + +def test_remap_keys(): + assert remap_keys({"a": 1, "b": 2}, rename={"a": "A"}) == {"A": 1, "b": 2} + + +def test_update_dict(): + def _increment(x): + return x + 1 + + data = {"a": 1, "b": 2, "c": 3} + + assert update_dict(data, a=_increment, b=42) == {"a": 2, "b": 42, "c": 3} + + +def test_get_from_dict(data: dict[str, Any]): + + assert get_from_dict(data, "Spec.ContainerSpec.Labels") == { + "com.docker.stack.namespace": "master-simcore" + } + # TODO: see that dotted keys cannot be used here, + assert get_from_dict(data, "Invalid.Invalid.Invalid", default=42) == 42 + + +def test_copy_from_dict(data: dict[str, Any]): + + selected_data = copy_from_dict( + data, + include={ + "ID": ..., + "CreatedAt": ..., + "UpdatedAt": ..., + "Spec": {"ContainerSpec": {"Image"}}, + "Status": {"Timestamp", "State", "ContainerStatus"}, + "DesiredState": ..., + }, + ) + + assert selected_data["ID"] == data["ID"] + assert ( + selected_data["Spec"]["ContainerSpec"]["Image"] + == data["Spec"]["ContainerSpec"]["Image"] + ) + assert selected_data["Status"]["State"] == data["Status"]["State"] + assert "Message" not in selected_data["Status"]["State"] + assert "running" in data["Status"]["State"] diff --git a/packages/common-library/tests/test_error_codes.py b/packages/common-library/tests/test_error_codes.py new file mode 100644 index 00000000000..80f7b8b0808 --- /dev/null +++ b/packages/common-library/tests/test_error_codes.py @@ -0,0 +1,105 @@ +# pylint: disable=broad-except +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import logging +import time + +import pytest +from common_library.error_codes import ( + create_error_code, + parse_error_code_parts, + parse_error_codes, +) + +logger = logging.getLogger(__name__) + + +def _level_three(v): + msg = f"An error occurred in level three with {v}" + raise RuntimeError(msg) + + +def _level_two(v): + _level_three(v) + + +def _level_one(v=None): + _level_two(v) + + +def test_exception_fingerprint_consistency(): + error_codes = [] + + for v in range(2): + # emulates different runs of the same function (e.g. different sessions) + try: + _level_one(v) # same even if different value! + except Exception as err: + time.sleep(1) + error_code = create_error_code(err) + error_codes.append(error_code) + + fingerprints, timestamps = list( + zip( + *[parse_error_code_parts(error_code) for error_code in error_codes], + strict=True, + ) + ) + + assert fingerprints[0] == fingerprints[1] + assert timestamps[0] < timestamps[1] + + try: + # Same function but different location + _level_one(0) + except Exception as e2: + time.sleep(1) + error_code_2 = create_error_code(e2) + fingerprint_2, timestamp_2 = parse_error_code_parts(error_code_2) + + assert fingerprints[0] != fingerprint_2 + assert timestamps[1] < timestamp_2 + + +def test_create_log_and_parse_error_code(caplog: pytest.LogCaptureFixture): + with pytest.raises(RuntimeError) as exc_info: + _level_one() + + # 1. Unexpected ERROR + err = exc_info.value + + # 2. create error-code + error_code = create_error_code(err) + + # 3. log all details in service + caplog.clear() + + # Can add a formatter that prefix error-codes + syslog = logging.StreamHandler() + syslog.setFormatter(logging.Formatter("%(asctime)s %(error_code)s : %(message)s")) + logger.addHandler(syslog) + + logger.exception("Fake Unexpected error", extra={"error_code": error_code}) + + # logs something like E.g. 2022-07-06 14:31:13,432 OEC:140350117529856 : Fake Unexpected error + assert parse_error_codes( + f"2022-07-06 14:31:13,432 {error_code} : Fake Unexpected error" + ) == [ + error_code, + ] + + assert caplog.records[0].error_code == error_code + assert caplog.records[0] + + logger.exception("Fake without error_code") + + # 4. inform user (e.g. with new error or sending message) + user_message = ( + f"This is a user-friendly message to inform about an error. [{error_code}]" + ) + + assert parse_error_codes(user_message) == [ + error_code, + ] diff --git a/packages/common-library/tests/test_errors_classes.py b/packages/common-library/tests/test_errors_classes.py new file mode 100644 index 00000000000..808ed09c40d --- /dev/null +++ b/packages/common-library/tests/test_errors_classes.py @@ -0,0 +1,156 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member + + +from datetime import datetime +from typing import Any + +import pytest +from common_library.errors_classes import OsparcErrorMixin + + +def test_get_full_class_name(): + class A(OsparcErrorMixin): ... + + class B1(A): ... + + class B2(A): ... + + class C(B2): ... + + class B12(B1, ValueError): ... + + assert B1._get_full_class_name() == "A.B1" + assert C._get_full_class_name() == "A.B2.C" + assert A._get_full_class_name() == "A" + + # diamond inheritance (not usual but supported) + assert B12._get_full_class_name() == "ValueError.A.B1.B12" + + +def test_error_codes_and_msg_template(): + class MyBaseError(OsparcErrorMixin, Exception): + pass + + class MyValueError(MyBaseError, ValueError): + msg_template = "Wrong value {value}" + + error = MyValueError(value=42) + + assert error.code == "ValueError.MyBaseError.MyValueError" + assert f"{error}" == "Wrong value 42" + + class MyTypeError(MyBaseError, TypeError): + msg_template = "Wrong type {type}" + + error = MyTypeError(type="int") + + assert f"{error}" == "Wrong type int" + + +def test_error_msg_template_override(): + class MyError(OsparcErrorMixin, Exception): + msg_template = "Wrong value {value}" + + error_override_msg = MyError(msg_template="I want this message") + assert str(error_override_msg) == "I want this message" + + error = MyError(value=42) + assert hasattr(error, "value") + assert str(error) == f"Wrong value {error.value}" + + +def test_error_msg_template_nicer_override(): + class MyError(OsparcErrorMixin, Exception): + msg_template = "Wrong value {value}" + + def __init__(self, msg=None, **ctx: Any) -> None: + super().__init__(**ctx) + # positional argument msg (if defined) overrides the msg_template + if msg: + self.msg_template = msg + + error_override_msg = MyError("I want this message") + assert str(error_override_msg) == "I want this message" + + error = MyError(value=42) + assert hasattr(error, "value") + assert str(error) == f"Wrong value {error.value}" + + +def test_error_with_constructor(): + class MyError(OsparcErrorMixin, ValueError): + msg_template = "Wrong value {value}" + + # handy e.g. autocompletion + def __init__(self, *, my_value: int = 42, **extra): + super().__init__(**extra) + self.value = my_value + + error = MyError(my_value=33, something_else="yes") + assert error.value == 33 + assert str(error) == "Wrong value 33" + assert not hasattr(error, "my_value") + + # the autocompletion does not see this + assert error.something_else == "yes" + + +@pytest.mark.parametrize( + "str_format,ctx,expected", + [ + pytest.param("{value:10}", {"value": "Python"}, "Python ", id="left-align"), + pytest.param( + "{value:>10}", {"value": "Python"}, " Python", id="right-align" + ), + pytest.param( + "{value:^10}", {"value": "Python"}, " Python ", id="center-align" + ), + pytest.param("{v:.2f}", {"v": 3.1415926}, "3.14", id="decimals"), + pytest.param( + "{dt:%Y-%m-%d %H:%M}", + {"dt": datetime(2020, 5, 17, 18, 45)}, + "2020-05-17 18:45", + id="datetime", + ), + ], +) +def test_msg_template_with_different_formats( + str_format: str, ctx: dict[str, Any], expected: str +): + class MyError(OsparcErrorMixin, ValueError): + msg_template = str_format + + error = MyError(**ctx) + assert str(error) == expected + + +def test_missing_keys_in_msg_template_does_not_raise(): + class MyError(OsparcErrorMixin, ValueError): + msg_template = "{value} and {missing}" + + assert str(MyError(value=42)) == "42 and 'missing=?'" + + +def test_exception_context(): + class MyError(OsparcErrorMixin, ValueError): + msg_template = "{value} and {missing}" + + exc = MyError(value=42, missing="foo", extra="bar") + assert exc.error_context() == { + "code": "ValueError.MyError", + "message": "42 and foo", + "value": 42, + "missing": "foo", + "extra": "bar", + } + + exc = MyError(value=42) + assert exc.error_context() == { + "code": "ValueError.MyError", + "message": "42 and 'missing=?'", + "value": 42, + } diff --git a/packages/common-library/tests/test_exclude.py b/packages/common-library/tests/test_exclude.py new file mode 100644 index 00000000000..a708def5778 --- /dev/null +++ b/packages/common-library/tests/test_exclude.py @@ -0,0 +1,22 @@ +from typing import Any + +from common_library.exclude import Unset, as_dict_exclude_none, as_dict_exclude_unset + + +def test_as_dict_exclude_unset(): + def f( + par1: str | Unset = Unset.VALUE, par2: int | Unset = Unset.VALUE + ) -> dict[str, Any]: + return as_dict_exclude_unset(par1=par1, par2=par2) + + assert f() == {} + assert f(par1="hi") == {"par1": "hi"} + assert f(par2=4) == {"par2": 4} + assert f(par1="hi", par2=4) == {"par1": "hi", "par2": 4} + + # still expected behavior + assert as_dict_exclude_unset(par1=None) == {"par1": None} + + +def test_as_dict_exclude_none(): + assert as_dict_exclude_none(par1=None) == {} diff --git a/packages/common-library/tests/test_json_serialization.py b/packages/common-library/tests/test_json_serialization.py new file mode 100644 index 00000000000..b1062902032 --- /dev/null +++ b/packages/common-library/tests/test_json_serialization.py @@ -0,0 +1,135 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import json +from copy import deepcopy +from typing import Annotated, Any, TypeAlias +from uuid import uuid4 + +import pytest +from common_library.json_serialization import ( + JsonNamespace, + SeparatorTuple, + json_dumps, + json_loads, + representation_encoder, +) +from faker import Faker +from pydantic import AnyHttpUrl, AnyUrl, BaseModel, Field, HttpUrl, TypeAdapter +from pydantic.json import pydantic_encoder + + +@pytest.fixture +def fake_data_dict(faker: Faker) -> dict[str, Any]: + data = { + "uuid_as_UUID": faker.uuid4(cast_to=None), + "uuid_as_str": faker.uuid4(), + "int": faker.pyint(), + "float": faker.pyfloat(), + "str": faker.pystr(), + "dict": faker.pydict(), + "list": faker.pylist(), + } + data["object"] = deepcopy(data) + return data + + +def test_json_dump_variants(): + + uuid_obj = uuid4() + + with pytest.raises(TypeError) as exc_info: + json.dumps(uuid_obj) + + assert str(exc_info.value) == "Object of type UUID is not JSON serializable" + + assert json_dumps(uuid_obj) == json.dumps(str(uuid_obj)) + + +def test_serialized_non_str_dict_keys(): + # tests orjson.OPT_NON_STR_KEYS option + + # if a dict has a key of a type other than str it will NOT raise + json_dumps({1: "foo"}) + + +ConstrainedFloat: TypeAlias = Annotated[float, Field(ge=0.0, le=1.0)] + + +def test_serialized_constraint_floats(): + # test extension of ENCODERS_BY_TYPE used in pydantic_encoder + + json_dumps({"value": 1.0}) + + # TypeError: Type is not JSON serializable: ProgressPercent + json_dumps({"value": TypeAdapter(ConstrainedFloat).validate_python(1.0)}) + + +def _expected_json_dumps(obj: Any, default=pydantic_encoder, **json_dumps_kwargs): + if "indent" not in json_dumps_kwargs: + json_dumps_kwargs.setdefault( + "separators", + SeparatorTuple(item_separator=",", key_separator=":"), # compact separators + ) + return json.dumps(obj, default=default, **json_dumps_kwargs) + + +@pytest.mark.parametrize( + "kwargs", + [ + pytest.param({}, id="no-kw"), + pytest.param({"sort_keys": True}, id="sort_keys-kw"), + pytest.param( + {"separators": (",", ":")}, id="default_separators-kw" + ), # NOTE: e.g. engineio.packet has `self.json.dumps(self.data, separators=(',', ':'))` + pytest.param( + {"indent": 2}, id="indent-kw" + ), # NOTE: only one-to-one with indent=2 + ], +) +def test_compatiblity_with_json_interface( + fake_data_dict: dict[str, Any], kwargs: dict[str, Any] +): + orjson_dump = JsonNamespace.dumps(fake_data_dict, **kwargs) + json_dump = _expected_json_dumps(fake_data_dict, **kwargs) + + # NOTE: cannot compare dumps directly because orjson compacts it more + assert json_loads(orjson_dump) == json_loads(json_dump) + + +def test_serialized_model_with_urls(faker: Faker): + # See: https://github.com/ITISFoundation/osparc-simcore/pull/6852 + class M(BaseModel): + any_http_url: AnyHttpUrl + any_url: AnyUrl + http_url: HttpUrl + + obj = M( + any_http_url=faker.url(), + any_url=faker.url(), + http_url=faker.url(), + ) + json_dumps(obj) + + +def test_json_dumps_with_representation_encoder(): + class CustomObject: + def __str__(self): + return "CustomObjectRepresentation" + + class SomeModel(BaseModel): + x: int + + obj = { + "custom": CustomObject(), + "some": SomeModel(x=42), + } + + # Using representation_encoder as the default encoder + result = json_dumps(obj, default=representation_encoder, indent=1) + + assert ( + result + == '{\n "custom": "CustomObjectRepresentation",\n "some": {\n "x": 42\n }\n}' + ) diff --git a/packages/common-library/tests/test_pagination_tools.py b/packages/common-library/tests/test_pagination_tools.py new file mode 100644 index 00000000000..987eabc0ee2 --- /dev/null +++ b/packages/common-library/tests/test_pagination_tools.py @@ -0,0 +1,92 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import asyncio +from collections.abc import Callable + +import pytest +from common_library.pagination_tools import iter_pagination_params +from pydantic import ValidationError + + +@pytest.fixture +def all_items() -> list[int]: + return list(range(11)) + + +@pytest.fixture +async def get_page(all_items: list[int]) -> Callable: + async def _get_page(offset, limit) -> tuple[list[int], int]: + await asyncio.sleep(0) + return all_items[offset : offset + limit], len(all_items) + + return _get_page + + +@pytest.mark.parametrize("limit", [2, 3, 5]) +@pytest.mark.parametrize("offset", [0, 1, 5]) +async def test_iter_pages_args( + limit: int, offset: int, get_page: Callable, all_items: list[int] +): + + last_page = [None] * limit + + num_items = len(all_items) - offset + expected_num_pages = num_items // limit + (1 if num_items % limit else 0) + + num_pages = 0 + page_args = None + for page_index, page_args in enumerate( + iter_pagination_params(offset=offset, limit=limit) + ): + + page_items, page_args.total_number_of_items = await get_page( + page_args.offset_current, page_args.limit + ) + + assert set(last_page) != set(page_items) + last_page = list(page_items) + + # contains sub-sequence + assert str(page_items)[1:-1] in str(all_items)[1:-1] + + num_pages = page_index + 1 + + assert last_page[-1] == all_items[-1] + assert num_pages == expected_num_pages + + assert page_args is not None + assert not page_args.has_items_left() + assert page_args.total_number_of_pages() == num_pages + + +@pytest.mark.parametrize("limit", [-1, 0]) +@pytest.mark.parametrize("offset", [-1]) +def test_iter_pages_args_invalid(limit: int, offset: int): + + with pytest.raises(ValidationError): # noqa: PT012 + for _ in iter_pagination_params(offset=offset, limit=limit): + pass + + +def test_fails_if_total_number_of_items_not_set(): + with pytest.raises( # noqa: PT012 + RuntimeError, + match="page_args.total_number_of_items = total_count", + ): + for _ in iter_pagination_params(offset=0, limit=2): + pass + + +def test_fails_if_total_number_of_items_changes(): + with pytest.raises( # noqa: PT012 + RuntimeError, + match="total_number_of_items cannot change on every iteration", + ): + for page_params in iter_pagination_params( + offset=0, limit=2, total_number_of_items=4 + ): + assert page_params.total_number_of_items == 4 + page_params.total_number_of_items += 1 diff --git a/packages/common-library/tests/test_pydantic_fields_extension.py b/packages/common-library/tests/test_pydantic_fields_extension.py new file mode 100644 index 00000000000..3461344062a --- /dev/null +++ b/packages/common-library/tests/test_pydantic_fields_extension.py @@ -0,0 +1,72 @@ +from typing import Any, Callable, Literal + +import pytest +from common_library.pydantic_fields_extension import get_type, is_literal, is_nullable +from pydantic import BaseModel, Field + + +class MyModel(BaseModel): + a: int + b: float | None = Field(...) + c: str = "bla" + d: bool | None = None + e: Literal["bla"] + + +@pytest.mark.parametrize( + "fn,expected,name", + [ + ( + get_type, + int, + "a", + ), + ( + get_type, + float, + "b", + ), + ( + get_type, + str, + "c", + ), + (get_type, bool, "d"), + ( + is_literal, + False, + "a", + ), + ( + is_literal, + False, + "b", + ), + ( + is_literal, + False, + "c", + ), + (is_literal, False, "d"), + (is_literal, True, "e"), + ( + is_nullable, + False, + "a", + ), + ( + is_nullable, + True, + "b", + ), + ( + is_nullable, + False, + "c", + ), + (is_nullable, True, "d"), + (is_nullable, False, "e"), + ], +) +def test_field_fn(fn: Callable[[Any], Any], expected: Any, name: str): + assert expected == fn(MyModel.model_fields.get(name)) diff --git a/packages/common-library/tests/test_pydantic_validators.py b/packages/common-library/tests/test_pydantic_validators.py new file mode 100644 index 00000000000..c1cfea84c67 --- /dev/null +++ b/packages/common-library/tests/test_pydantic_validators.py @@ -0,0 +1,72 @@ +from datetime import timedelta +from typing import Annotated + +import pytest +from common_library.pydantic_validators import ( + _validate_legacy_timedelta_str, + validate_numeric_string_as_timedelta, +) +from faker import Faker +from pydantic import BeforeValidator, Field +from pydantic_settings import BaseSettings, SettingsConfigDict +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict + + +def test_validate_legacy_timedelta(monkeypatch: pytest.MonkeyPatch, faker: Faker): + class Settings(BaseSettings): + APP_NAME: str + REQUEST_TIMEOUT: Annotated[ + timedelta, BeforeValidator(_validate_legacy_timedelta_str) + ] = Field(default=timedelta(hours=1)) + + model_config = SettingsConfigDict() + + app_name = faker.pystr() + env_vars: dict[str, str | bool] = {"APP_NAME": app_name} + + # without timedelta + setenvs_from_dict(monkeypatch, env_vars) + settings = Settings() + print(settings.model_dump()) + assert app_name == settings.APP_NAME + assert timedelta(hours=1) == settings.REQUEST_TIMEOUT + + # with timedelta in seconds + env_vars["REQUEST_TIMEOUT"] = "2 1:10:00" + setenvs_from_dict(monkeypatch, env_vars) + settings = Settings() + print(settings.model_dump()) + assert app_name == settings.APP_NAME + assert timedelta(days=2, hours=1, minutes=10) == settings.REQUEST_TIMEOUT + + +def test_validate_timedelta_in_legacy_mode( + monkeypatch: pytest.MonkeyPatch, faker: Faker +): + class Settings(BaseSettings): + APP_NAME: str + REQUEST_TIMEOUT: timedelta = Field(default=timedelta(seconds=40)) + + _validate_request_timeout = validate_numeric_string_as_timedelta( + "REQUEST_TIMEOUT" + ) + + model_config = SettingsConfigDict() + + app_name = faker.pystr() + env_vars: dict[str, str | bool] = {"APP_NAME": app_name} + + # without timedelta + setenvs_from_dict(monkeypatch, env_vars) + settings = Settings() + print(settings.model_dump()) + assert app_name == settings.APP_NAME + assert timedelta(seconds=40) == settings.REQUEST_TIMEOUT + + # with timedelta in seconds + env_vars["REQUEST_TIMEOUT"] = "5555" + setenvs_from_dict(monkeypatch, env_vars) + settings = Settings() + print(settings.model_dump()) + assert app_name == settings.APP_NAME + assert timedelta(seconds=5555) == settings.REQUEST_TIMEOUT diff --git a/packages/common-library/tests/test_serialization.py b/packages/common-library/tests/test_serialization.py new file mode 100644 index 00000000000..d5dea70ec22 --- /dev/null +++ b/packages/common-library/tests/test_serialization.py @@ -0,0 +1,34 @@ +import pytest +from common_library.serialization import model_dump_with_secrets +from pydantic import BaseModel, SecretStr + + +class Credentials(BaseModel): + username: str + password: SecretStr + + +class Access(BaseModel): + credentials: Credentials + + +@pytest.mark.parametrize( + "expected,show_secrets", + [ + ( + {"credentials": {"username": "DeepThought", "password": "42"}}, + True, + ), + ( + {"credentials": {"username": "DeepThought", "password": "**********"}}, + False, # hide secrets + ), + ], +) +def test_model_dump_with_secrets(expected: dict, show_secrets: bool): + assert expected == model_dump_with_secrets( + Access( + credentials=Credentials(username="DeepThought", password=SecretStr("42")) + ), + show_secrets=show_secrets, + ) diff --git a/packages/common-library/tests/test_users_enums.py b/packages/common-library/tests/test_users_enums.py new file mode 100644 index 00000000000..e52d66b3f11 --- /dev/null +++ b/packages/common-library/tests/test_users_enums.py @@ -0,0 +1,79 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from common_library.users_enums import _USER_ROLE_TO_LEVEL, UserRole + + +def test_user_role_to_level_map_in_sync(): + # If fails, then update _USER_ROLE_TO_LEVEL map + assert set(_USER_ROLE_TO_LEVEL.keys()) == set(UserRole.__members__.keys()) + + +def test_user_roles_compares_to_admin(): + assert UserRole.ANONYMOUS < UserRole.ADMIN + assert UserRole.GUEST < UserRole.ADMIN + assert UserRole.USER < UserRole.ADMIN + assert UserRole.TESTER < UserRole.ADMIN + assert UserRole.PRODUCT_OWNER < UserRole.ADMIN + assert UserRole.ADMIN == UserRole.ADMIN + + +def test_user_roles_compares_to_product_owner(): + assert UserRole.ANONYMOUS < UserRole.PRODUCT_OWNER + assert UserRole.GUEST < UserRole.PRODUCT_OWNER + assert UserRole.USER < UserRole.PRODUCT_OWNER + assert UserRole.TESTER < UserRole.PRODUCT_OWNER + assert UserRole.PRODUCT_OWNER == UserRole.PRODUCT_OWNER + assert UserRole.ADMIN > UserRole.PRODUCT_OWNER + + +def test_user_roles_compares_to_tester(): + assert UserRole.ANONYMOUS < UserRole.TESTER + assert UserRole.GUEST < UserRole.TESTER + assert UserRole.USER < UserRole.TESTER + assert UserRole.TESTER == UserRole.TESTER + assert UserRole.PRODUCT_OWNER > UserRole.TESTER + assert UserRole.ADMIN > UserRole.TESTER + + +def test_user_roles_compares_to_user(): + assert UserRole.ANONYMOUS < UserRole.USER + assert UserRole.GUEST < UserRole.USER + assert UserRole.USER == UserRole.USER + assert UserRole.TESTER > UserRole.USER + assert UserRole.PRODUCT_OWNER > UserRole.USER + assert UserRole.ADMIN > UserRole.USER + + +def test_user_roles_compares_to_guest(): + assert UserRole.ANONYMOUS < UserRole.GUEST + assert UserRole.GUEST == UserRole.GUEST + assert UserRole.USER > UserRole.GUEST + assert UserRole.TESTER > UserRole.GUEST + assert UserRole.PRODUCT_OWNER > UserRole.GUEST + assert UserRole.ADMIN > UserRole.GUEST + + +def test_user_roles_compares_to_anonymous(): + assert UserRole.ANONYMOUS == UserRole.ANONYMOUS + assert UserRole.GUEST > UserRole.ANONYMOUS + assert UserRole.USER > UserRole.ANONYMOUS + assert UserRole.TESTER > UserRole.ANONYMOUS + assert UserRole.PRODUCT_OWNER > UserRole.ANONYMOUS + assert UserRole.ADMIN > UserRole.ANONYMOUS + + +def test_user_roles_compares(): + # < and > + assert UserRole.TESTER < UserRole.ADMIN + assert UserRole.ADMIN > UserRole.TESTER + + # >=, == and <= + assert UserRole.TESTER <= UserRole.ADMIN + assert UserRole.ADMIN >= UserRole.TESTER + + assert UserRole.ADMIN <= UserRole.ADMIN + assert UserRole.ADMIN == UserRole.ADMIN diff --git a/packages/dask-task-models-library/Makefile b/packages/dask-task-models-library/Makefile index a8fff267681..5862b7bc646 100644 --- a/packages/dask-task-models-library/Makefile +++ b/packages/dask-task-models-library/Makefile @@ -12,7 +12,7 @@ requirements: ## compiles pip requirements (.in -> .txt) .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests tests-ci @@ -40,6 +40,7 @@ tests-ci: ## runs unit tests --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=dask_task_models_library \ --durations=10 \ --log-date-format="%Y-%m-%d %H:%M:%S" \ diff --git a/packages/dask-task-models-library/requirements/_base.in b/packages/dask-task-models-library/requirements/_base.in index b137bd88365..f25da08947b 100644 --- a/packages/dask-task-models-library/requirements/_base.in +++ b/packages/dask-task-models-library/requirements/_base.in @@ -2,7 +2,9 @@ # Specifies third-party dependencies for 'dask-task-models-library' # --constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in dask[distributed] pydantic[email] diff --git a/packages/dask-task-models-library/requirements/_base.txt b/packages/dask-task-models-library/requirements/_base.txt index 8e04334ca1f..75b9136287c 100644 --- a/packages/dask-task-models-library/requirements/_base.txt +++ b/packages/dask-task-models-library/requirements/_base.txt @@ -1,84 +1,187 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -attrs==22.2.0 - # via jsonschema -click==8.1.3 +annotated-types==0.7.0 + # via pydantic +arrow==1.3.0 + # via -r requirements/../../../packages/models-library/requirements/_base.in +attrs==25.3.0 + # via + # jsonschema + # referencing +click==8.1.8 # via # dask # distributed -cloudpickle==2.2.1 + # typer +cloudpickle==3.1.1 # via # dask # distributed -dask==2023.2.1 +dask==2025.5.0 # via # -r requirements/_base.in # distributed -distributed==2023.2.1 +distributed==2025.5.0 # via dask -dnspython==2.3.0 +dnspython==2.7.0 # via email-validator -email-validator==1.3.1 +email-validator==2.2.0 # via pydantic -fsspec==2023.1.0 +fsspec==2025.3.2 # via dask -heapdict==1.0.1 - # via zict -idna==3.4 +idna==3.10 # via email-validator -jinja2==3.1.2 +importlib-metadata==8.7.0 + # via dask +jinja2==3.1.6 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # distributed -jsonschema==4.17.3 +jsonschema==4.23.0 # via -r requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2025.4.1 + # via jsonschema locket==1.0.0 # via # distributed # partd -markupsafe==2.1.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via jinja2 -msgpack==1.0.4 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 # via distributed -packaging==23.0 +orjson==3.10.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==25.0 # via # dask # distributed -partd==1.3.0 +partd==1.4.2 # via dask -psutil==5.9.4 +psutil==7.0.0 # via distributed -pydantic==1.10.2 +pydantic==2.11.4 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in -pyrsistent==0.19.3 - # via jsonschema -pyyaml==6.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-extra-types==2.10.4 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.1.0 + # via pydantic-settings +pyyaml==6.0.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # dask # distributed +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +rich==14.0.0 + # via + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.25.0 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil sortedcontainers==2.4.0 # via distributed -tblib==1.7.0 +tblib==3.1.0 # via distributed -toolz==0.12.0 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.2 +tornado==6.5 # via distributed -typing-extensions==4.5.0 +typer==0.15.4 + # via -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20250516 + # via arrow +typing-extensions==4.13.2 + # via + # pydantic + # pydantic-core + # pydantic-extra-types + # typer + # typing-inspection +typing-inspection==0.4.0 # via pydantic -urllib3==1.26.14 +urllib3==2.4.0 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # distributed -zict==2.2.0 +zict==3.0.0 # via distributed +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/dask-task-models-library/requirements/_test.in b/packages/dask-task-models-library/requirements/_test.in index 08574992af2..9e54003c3ef 100644 --- a/packages/dask-task-models-library/requirements/_test.in +++ b/packages/dask-task-models-library/requirements/_test.in @@ -9,13 +9,11 @@ --constraint _base.txt # testing - coverage -coveralls faker pint pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-asyncio pytest-cov pytest-icdiff pytest-instafail diff --git a/packages/dask-task-models-library/requirements/_test.txt b/packages/dask-task-models-library/requirements/_test.txt index 9b5b1c4f92a..9daa7aacc47 100644 --- a/packages/dask-task-models-library/requirements/_test.txt +++ b/packages/dask-task-models-library/requirements/_test.txt @@ -1,112 +1,65 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 - # via - # -c requirements/../../../requirements/constraints.txt - # pytest-aiohttp -aiosignal==1.3.1 - # via aiohttp -async-timeout==4.0.2 - # via aiohttp -attrs==22.2.0 - # via - # -c requirements/_base.txt - # aiohttp - # pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.0.1 - # via - # aiohttp - # requests -coverage==6.5.0 +coverage==7.8.0 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 +faker==37.3.0 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 - # via -r requirements/_test.in -frozenlist==1.3.3 - # via - # aiohttp - # aiosignal -icdiff==2.0.6 +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +icdiff==2.0.7 # via pytest-icdiff -idna==3.4 - # via - # -c requirements/_base.txt - # requests - # yarl -iniconfig==2.0.0 +iniconfig==2.1.0 # via pytest -multidict==6.0.4 - # via - # aiohttp - # yarl -packaging==23.0 +packaging==25.0 # via # -c requirements/_base.txt # pytest # pytest-sugar -pint==0.20.1 +pint==0.24.4 # via -r requirements/_test.in -pluggy==1.0.0 +platformdirs==4.3.8 + # via pint +pluggy==1.6.0 # via pytest pprintpp==0.4.0 # via pytest-icdiff -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-icdiff # pytest-instafail # pytest-mock # pytest-sugar -pytest-aiohttp==1.0.4 - # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-cov==6.1.1 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 - # via faker -pyyaml==6.0 +pytest-sugar==1.0.0 # via -r requirements/_test.in -requests==2.28.2 - # via coveralls -six==1.16.0 - # via python-dateutil -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 +pyyaml==6.0.2 # via - # coverage - # pytest -urllib3==1.26.14 + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +termcolor==3.1.0 + # via pytest-sugar +typing-extensions==4.13.2 # via # -c requirements/_base.txt - # requests -yarl==1.8.2 - # via aiohttp + # flexcache + # flexparser + # pint +tzdata==2025.2 + # via faker diff --git a/packages/dask-task-models-library/requirements/_tools.txt b/packages/dask-task-models-library/requirements/_tools.txt index e0b6a58e72d..e0213f1353c 100644 --- a/packages/dask-task-models-library/requirements/_tools.txt +++ b/packages/dask-task-models-library/requirements/_tools.txt @@ -1,90 +1,84 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.10 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.4.0 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.18.0 # via virtualenv -identify==2.5.18 +identify==2.6.10 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==25.0 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.1.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.8 # via + # -c requirements/_test.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.2.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.7 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==6.0 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # pre-commit -tomli==2.0.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pre-commit +ruff==0.11.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==80.7.1 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.13.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.31.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/dask-task-models-library/requirements/ci.txt b/packages/dask-task-models-library/requirements/ci.txt index e4f199bc3f8..d7fc2c347fa 100644 --- a/packages/dask-task-models-library/requirements/ci.txt +++ b/packages/dask-task-models-library/requirements/ci.txt @@ -12,8 +12,10 @@ --requirement _tools.txt # installs this repo's packages -../pytest-simcore/ -../models-library/ +pytest-simcore @ ../pytest-simcore +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ # current module -. +simcore-dask-task-models-library @ . diff --git a/packages/dask-task-models-library/requirements/dev.txt b/packages/dask-task-models-library/requirements/dev.txt index 33506f6a8be..a9d9555b2e8 100644 --- a/packages/dask-task-models-library/requirements/dev.txt +++ b/packages/dask-task-models-library/requirements/dev.txt @@ -13,7 +13,9 @@ # installs this repo's packages --editable ../pytest-simcore/ +--editable ../common-library/ --editable ../models-library/ +--editable ../settings-library/ # current module --editable . diff --git a/packages/dask-task-models-library/setup.cfg b/packages/dask-task-models-library/setup.cfg index d072b6ab557..3d91dba1cea 100644 --- a/packages/dask-task-models-library/setup.cfg +++ b/packages/dask-task-models-library/setup.cfg @@ -15,3 +15,4 @@ test = pytest [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function diff --git a/packages/dask-task-models-library/setup.py b/packages/dask-task-models-library/setup.py index 3745ccf3420..7adeda33e09 100644 --- a/packages/dask-task-models-library/setup.py +++ b/packages/dask-task-models-library/setup.py @@ -31,30 +31,31 @@ def read_reqs(reqs_path: Path) -> set[str]: ) # STRICT requirements -SETUP = dict( - name="simcore-dask-task-models-library", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author="Sylvain Anderegg (sanderegg)", - description="Core service library for simcore pydantic dask task models", - python_requires="~=3.9", - classifiers=[ +SETUP = { + "name": "simcore-dask-task-models-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Sylvain Anderegg (sanderegg)", + "description": "Core service library for simcore pydantic dask task models", + "python_requires": ">=3.10", + "classifiers": [ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], - long_description=Path(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - install_requires=INSTALL_REQUIREMENTS, - packages=find_packages(where="src"), - package_dir={"": "src"}, - include_package_data=True, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - zip_safe=False, -) + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "include_package_data": True, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} if __name__ == "__main__": diff --git a/packages/dask-task-models-library/src/dask_task_models_library/__init__.py b/packages/dask-task-models-library/src/dask_task_models_library/__init__.py index 6a7f93bb57b..c5eb71c445e 100644 --- a/packages/dask-task-models-library/src/dask_task_models_library/__init__.py +++ b/packages/dask-task-models-library/src/dask_task_models_library/__init__.py @@ -1,5 +1,3 @@ -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution( - "simcore-dask-task-models-library" -).version +__version__: str = version("simcore-dask-task-models-library") diff --git a/packages/dask-task-models-library/src/dask_task_models_library/constants.py b/packages/dask-task-models-library/src/dask_task_models_library/constants.py new file mode 100644 index 00000000000..4c9db9d83c2 --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/constants.py @@ -0,0 +1,3 @@ +from typing import Final + +DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY: Final[str] = "EC2-INSTANCE-TYPE" diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/docker.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/docker.py index 4e9d36df3fb..b4fa976b665 100644 --- a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/docker.py +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/docker.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, Extra, SecretStr +from pydantic import BaseModel, ConfigDict, SecretStr class DockerBasicAuth(BaseModel): @@ -6,9 +6,9 @@ class DockerBasicAuth(BaseModel): username: str password: SecretStr - class Config: - extra = Extra.forbid - schema_extra = { + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ { "server_address": "docker.io", @@ -16,4 +16,5 @@ class Config: "password": "123456", } ] - } + }, + ) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/errors.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/errors.py index 9849eaecb1a..1597ddfb6f4 100644 --- a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/errors.py +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/errors.py @@ -1,22 +1,20 @@ """ Dask task exceptions """ -from pydantic.errors import PydanticErrorMixin +from common_library.errors_classes import OsparcErrorMixin -class TaskValueError(PydanticErrorMixin, ValueError): - code = "task.value_error" +class TaskValueError(OsparcErrorMixin, ValueError): + ... -class TaskCancelledError(PydanticErrorMixin, RuntimeError): - code = "task.cancelled_error" +class TaskCancelledError(OsparcErrorMixin, RuntimeError): msg_template = "The task was cancelled" -class ServiceRuntimeError(PydanticErrorMixin, RuntimeError): - code = "service.runtime_error" +class ServiceRuntimeError(OsparcErrorMixin, RuntimeError): msg_template = ( "The service {service_key}:{service_version}" - " in container {container_id} failed with code" + " running in container {container_id} failed with code" " {exit_code}. Last logs:\n{service_logs}" ) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/events.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/events.py index 624ed49e900..ea9292d483c 100644 --- a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/events.py +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/events.py @@ -1,99 +1,85 @@ from abc import ABC, abstractmethod -from typing import Optional, Union +import dask.typing from distributed.worker import get_worker -from models_library.projects_state import RunningState -from pydantic import BaseModel, Extra, NonNegativeFloat +from pydantic import BaseModel, ConfigDict, field_validator + +from .protocol import TaskOwner class BaseTaskEvent(BaseModel, ABC): job_id: str - msg: Optional[str] = None + task_owner: TaskOwner + msg: str | None = None @staticmethod @abstractmethod def topic_name() -> str: raise NotImplementedError - class Config: - extra = Extra.forbid - - -class TaskStateEvent(BaseTaskEvent): - state: RunningState + model_config = ConfigDict(extra="forbid") - @staticmethod - def topic_name() -> str: - return "task_state" - @classmethod - def from_dask_worker( - cls, state: RunningState, msg: Optional[str] = None - ) -> "TaskStateEvent": - return cls(job_id=get_worker().get_current_task(), state=state, msg=msg) - - class Config(BaseTaskEvent.Config): - schema_extra = { - "examples": [ - { - "job_id": "simcore/services/comp/sleeper:1.1.0:projectid_ec7e595a-63ee-46a1-a04a-901b11b649f8:nodeid_39467d89-b659-4914-9359-c40b1b6d1d6d:uuid_5ee5c655-450d-4711-a3ec-32ffe16bc580", - "state": RunningState.STARTED.value, - }, - { - "job_id": "simcore/services/comp/sleeper:1.1.0:projectid_ec7e595a-63ee-46a1-a04a-901b11b649f8:nodeid_39467d89-b659-4914-9359-c40b1b6d1d6d:uuid_5ee5c655-450d-4711-a3ec-32ffe16bc580", - "msg": "some unexpected error happened", - "state": RunningState.FAILED.value, - }, - ] - } +def _dask_key_to_dask_task_id(key: dask.typing.Key) -> str: + if isinstance(key, bytes): + return key.decode("utf-8") + if isinstance(key, tuple): + return "(" + ", ".join(_dask_key_to_dask_task_id(k) for k in key) + ")" + return f"{key}" class TaskProgressEvent(BaseTaskEvent): - progress: NonNegativeFloat + progress: float @staticmethod def topic_name() -> str: return "task_progress" @classmethod - def from_dask_worker(cls, progress: float) -> "TaskProgressEvent": - return cls(job_id=get_worker().get_current_task(), progress=progress) - - class Config(BaseTaskEvent.Config): - schema_extra = { + def from_dask_worker( + cls, progress: float, *, task_owner: TaskOwner + ) -> "TaskProgressEvent": + worker = get_worker() + job_id = worker.get_current_task() + + return cls( + job_id=_dask_key_to_dask_task_id(job_id), + progress=progress, + task_owner=task_owner, + ) + + model_config = ConfigDict( + json_schema_extra={ "examples": [ { "job_id": "simcore/services/comp/sleeper:1.1.0:projectid_ec7e595a-63ee-46a1-a04a-901b11b649f8:nodeid_39467d89-b659-4914-9359-c40b1b6d1d6d:uuid_5ee5c655-450d-4711-a3ec-32ffe16bc580", "progress": 0, + "task_owner": { + "user_id": 32, + "project_id": "ec7e595a-63ee-46a1-a04a-901b11b649f8", + "node_id": "39467d89-b659-4914-9359-c40b1b6d1d6d", + "parent_project_id": None, + "parent_node_id": None, + }, }, { "job_id": "simcore/services/comp/sleeper:1.1.0:projectid_ec7e595a-63ee-46a1-a04a-901b11b649f8:nodeid_39467d89-b659-4914-9359-c40b1b6d1d6d:uuid_5ee5c655-450d-4711-a3ec-32ffe16bc580", "progress": 1.0, + "task_owner": { + "user_id": 32, + "project_id": "ec7e595a-63ee-46a1-a04a-901b11b649f8", + "node_id": "39467d89-b659-4914-9359-c40b1b6d1d6d", + "parent_project_id": "887e595a-63ee-46a1-a04a-901b11b649f8", + "parent_node_id": "aa467d89-b659-4914-9359-c40b1b6d1d6d", + }, }, ] } + ) - -class TaskLogEvent(BaseTaskEvent): - log: str - - @staticmethod - def topic_name() -> str: - return "task_logs" - + @field_validator("progress") @classmethod - def from_dask_worker(cls, log: str) -> "TaskLogEvent": - return cls(job_id=get_worker().get_current_task(), log=log) - - class Config(BaseTaskEvent.Config): - schema_extra = { - "examples": [ - { - "job_id": "simcore/services/comp/sleeper:1.1.0:projectid_ec7e595a-63ee-46a1-a04a-901b11b649f8:nodeid_39467d89-b659-4914-9359-c40b1b6d1d6d:uuid_5ee5c655-450d-4711-a3ec-32ffe16bc580", - "log": "some logs", - }, - ] - } - - -DaskTaskEvents = type[Union[TaskLogEvent, TaskProgressEvent, TaskStateEvent]] + def ensure_between_0_1(cls, v): + if 0 <= v <= 1: + return v + return min(max(0, v), 1) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/io.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/io.py index 7d178bd4a77..0f443c57f68 100644 --- a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/io.py +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/io.py @@ -1,22 +1,23 @@ import json from contextlib import suppress from pathlib import Path -from typing import Any, Optional, Union, cast +from typing import Annotated, Any, TypeAlias +from common_library.json_serialization import json_loads from models_library.basic_regex import MIME_TYPE_RE from models_library.generics import DictModel -from models_library.services import PROPERTY_KEY_RE +from models_library.services_types import ServicePortKey from pydantic import ( AnyUrl, BaseModel, - Extra, + ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr, ) -from pydantic.types import constr +from pydantic.config import JsonDict TaskCancelEventName = "cancel_event_{}" @@ -24,125 +25,165 @@ class PortSchema(BaseModel): required: bool - class Config: - extra = Extra.forbid - schema_extra: dict[str, Any] = { - "examples": [ - { - "required": True, - }, - { - "required": False, - }, - ] - } + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "required": True, + }, + { + "required": False, + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", + json_schema_extra=_update_json_schema_extra, + ) class FilePortSchema(PortSchema): - mapping: Optional[str] = None + mapping: str | None = None url: AnyUrl - class Config(PortSchema.Config): - schema_extra = { - "examples": [ - { - "mapping": "some_filename.txt", - "url": "ftp://some_file_url", - "required": True, - }, - { - "required": False, - "url": "s3://another_file_url", - }, - ] - } + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "mapping": "some_filename.txt", + "url": "sftp://some_file_url", + "required": True, + }, + { + "required": False, + "url": "s3://another_file_url", + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) class FileUrl(BaseModel): url: AnyUrl - file_mapping: Optional[str] = Field( + file_mapping: str | None = Field( default=None, description="Local file relpath name (if given), otherwise it takes the url filename", ) - file_mime_type: Optional[str] = Field( - default=None, description="the file MIME type", regex=MIME_TYPE_RE + file_mime_type: str | None = Field( + default=None, description="the file MIME type", pattern=MIME_TYPE_RE ) - class Config: - extra = Extra.forbid - schema_extra = { - "examples": [ - {"url": "https://some_file_url", "file_mime_type": "application/json"}, - { - "url": "https://some_file_url", - "file_mapping": "some_file_name.txt", - "file_mime_type": "application/json", - }, - ] - } - - -PortKey = constr(regex=PROPERTY_KEY_RE) -PortValue = Union[ - StrictBool, - StrictInt, - StrictFloat, - StrictStr, - FileUrl, - list[Any], - dict[str, Any], - None, + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "url": "https://some_file_url", + "file_mime_type": "application/json", + }, + { + "url": "https://some_file_url", + "file_mapping": "some_file_name.txt", + "file_mime_type": "application/json", + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", + json_schema_extra=_update_json_schema_extra, + ) + + +PortValue: TypeAlias = Annotated[ + StrictBool + | StrictInt + | StrictFloat + | StrictStr + | FileUrl + | list[Any] + | dict[str, Any] + | None, + Field(union_mode="left_to_right"), ] -class TaskInputData(DictModel[PortKey, PortValue]): - class Config(DictModel.Config): - schema_extra = { - "examples": [ - { - "boolean_input": False, - "int_input": -45, - "float_input": 4564.45, - "string_input": "nobody thinks like a string", - "file_input": {"url": "s3://thatis_file_url"}, - }, - ] - } +class TaskInputData(DictModel[ServicePortKey, PortValue]): + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "boolean_input": False, + "int_input": -45, + "float_input": 4564.45, + "string_input": "nobody thinks like a string", + "file_input": {"url": "s3://thatis_file_url"}, + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) -PortSchemaValue = Union[PortSchema, FilePortSchema] +PortSchemaValue: TypeAlias = Annotated[ + PortSchema | FilePortSchema, Field(union_mode="left_to_right") +] -class TaskOutputDataSchema(DictModel[PortKey, PortSchemaValue]): +class TaskOutputDataSchema(DictModel[ServicePortKey, PortSchemaValue]): # # NOTE: Expected output data is only determined at runtime. A possibility # would be to create pydantic models dynamically but dask serialization # does not work well in that case. For that reason, the schema is # sent as a json-schema instead of with a dynamically-created model class # - class Config(DictModel.Config): - schema_extra = { - "examples": [ - { - "boolean_output": {"required": False}, - "int_output": {"required": True}, - "float_output": {"required": True}, - "string_output": {"required": False}, - "file_output": { - "required": True, - "url": "https://some_file_url", - "mapping": "the_output_filename", - }, - "optional_file_output": { - "required": False, - "url": "s3://one_file_url", + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "boolean_output": {"required": False}, + "int_output": {"required": True}, + "float_output": {"required": True}, + "string_output": {"required": False}, + "file_output": { + "required": True, + "url": "https://some_file_url", + "mapping": "the_output_filename", + }, + "optional_file_output": { + "required": False, + "url": "s3://one_file_url", + }, }, - }, - ] - } + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) -class TaskOutputData(DictModel[PortKey, PortValue]): +class TaskOutputData(DictModel[ServicePortKey, PortValue]): @classmethod def from_task_output( cls, schema: TaskOutputDataSchema, output_folder: Path, output_file_ext: str @@ -154,13 +195,13 @@ def from_task_output( with suppress(json.JSONDecodeError): # NOTE: The suppression here is ok, since if the data is empty, # there will be a validation error anyway - data = json.loads(output_data_file.read_text()) + loaded_data = json_loads(output_data_file.read_text()) + # ignore what is not in the schema + data = {k: v for k, v in loaded_data.items() if k in schema} for output_key, output_params in schema.items(): if isinstance(output_params, FilePortSchema): file_relpath = output_params.mapping or output_key - # TODO: file_path is built here, saved truncated in file_mapping and - # then rebuild again int _retrieve_output_data. Review. file_path = output_folder / file_relpath if file_path.exists(): data[output_key] = { @@ -168,27 +209,28 @@ def from_task_output( "file_mapping": file_relpath, } elif output_params.required: - raise ValueError( - f"Could not locate '{file_path}' in {output_folder}" - ) - else: - if output_key not in data and output_params.required: - raise ValueError( - f"Could not locate '{output_key}' in {output_data_file}" - ) - - # NOTE: this cast is necessary to make mypy happy - return cast(TaskOutputData, cls.parse_obj(data)) - - class Config(DictModel.Config): - schema_extra = { - "examples": [ - { - "boolean_output": False, - "int_output": -45, - "float_output": 4564.45, - "string_output": "nobody thinks like a string", - "file_output": {"url": "s3://yet_another_file_url"}, - }, - ] - } + msg = f"Could not locate '{file_path}' in {output_folder}" + raise ValueError(msg) + elif output_key not in data and output_params.required: + msg = f"Could not locate '{output_key}' in {output_data_file}" + raise ValueError(msg) + + return cls.model_validate(data) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "boolean_output": False, + "int_output": -45, + "float_output": 4564.45, + "string_output": "nobody thinks like a string", + "file_output": {"url": "s3://yet_another_file_url"}, + }, + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/protocol.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/protocol.py new file mode 100644 index 00000000000..f7179be78c0 --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/protocol.py @@ -0,0 +1,116 @@ +from typing import Any, Protocol, TypeAlias + +from models_library.basic_types import EnvVarKey +from models_library.docker import DockerLabelKey +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_resources import BootMode +from models_library.users import UserID +from pydantic import AnyUrl, BaseModel, ConfigDict, model_validator +from pydantic.config import JsonDict +from settings_library.s3 import S3Settings + +from .docker import DockerBasicAuth +from .io import TaskInputData, TaskOutputData, TaskOutputDataSchema + +ContainerImage: TypeAlias = str +ContainerTag: TypeAlias = str +LogFileUploadURL: TypeAlias = AnyUrl +ContainerCommands: TypeAlias = list[str] +ContainerEnvsDict: TypeAlias = dict[EnvVarKey, str] +ContainerLabelsDict: TypeAlias = dict[DockerLabelKey, str] + + +class TaskOwner(BaseModel): + user_id: UserID + project_id: ProjectID + node_id: NodeID + + parent_project_id: ProjectID | None + parent_node_id: NodeID | None + + @property + def has_parent(self) -> bool: + return bool(self.parent_node_id and self.parent_project_id) + + @model_validator(mode="before") + @classmethod + def check_parent_valid(cls, values: dict[str, Any]) -> dict[str, Any]: + parent_project_id = values.get("parent_project_id") + parent_node_id = values.get("parent_node_id") + if (parent_node_id is None and parent_project_id is not None) or ( + parent_node_id is not None and parent_project_id is None + ): + msg = "either both parent_node_id and parent_project_id are None or both are set!" + raise ValueError(msg) + return values + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "user_id": 32, + "project_id": "ec7e595a-63ee-46a1-a04a-901b11b649f8", + "node_id": "39467d89-b659-4914-9359-c40b1b6d1d6d", + "parent_project_id": None, + "parent_node_id": None, + }, + { + "user_id": 32, + "project_id": "ec7e595a-63ee-46a1-a04a-901b11b649f8", + "node_id": "39467d89-b659-4914-9359-c40b1b6d1d6d", + "parent_project_id": "887e595a-63ee-46a1-a04a-901b11b649f8", + "parent_node_id": "aa467d89-b659-4914-9359-c40b1b6d1d6d", + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class ContainerTaskParameters(BaseModel): + image: ContainerImage + tag: ContainerTag + input_data: TaskInputData + output_data_keys: TaskOutputDataSchema + command: ContainerCommands + envs: ContainerEnvsDict + labels: ContainerLabelsDict + boot_mode: BootMode + task_owner: TaskOwner + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "image": "ubuntu", + "tag": "latest", + "input_data": TaskInputData.model_json_schema()["examples"][0], + "output_data_keys": TaskOutputDataSchema.model_json_schema()[ + "examples" + ][0], + "command": ["sleep 10", "echo hello"], + "envs": {"MYENV": "is an env"}, + "labels": {"io.simcore.thelabel": "is amazing"}, + "boot_mode": BootMode.CPU.value, + "task_owner": TaskOwner.model_json_schema()["examples"][0], + }, + ] + } + ) + + +class ContainerRemoteFct(Protocol): + def __call__( + self, + *, + task_parameters: ContainerTaskParameters, + docker_auth: DockerBasicAuth, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, + ) -> TaskOutputData: ... diff --git a/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/utils.py b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/utils.py new file mode 100644 index 00000000000..d97b0c896c3 --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/container_tasks/utils.py @@ -0,0 +1,44 @@ +from typing import Final +from uuid import uuid4 + +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter + +from ..models import DaskJobID + + +def generate_dask_job_id( + service_key: ServiceKey, + service_version: ServiceVersion, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> DaskJobID: + """creates a dask job id: + The job ID shall contain the user_id, project_id, node_id + Also, it must be unique + and it is shown in the Dask scheduler dashboard website + """ + return DaskJobID( + f"{service_key}:{service_version}:userid_{user_id}:projectid_{project_id}:nodeid_{node_id}:uuid_{uuid4()}" + ) + + +_JOB_ID_PARTS: Final[int] = 6 + + +def parse_dask_job_id( + job_id: str, +) -> tuple[ServiceKey, ServiceVersion, UserID, ProjectID, NodeID]: + parts = job_id.split(":") + assert len(parts) == _JOB_ID_PARTS # nosec + return ( + parts[0], + parts[1], + TypeAdapter(UserID).validate_python(parts[2][len("userid_") :]), + ProjectID(parts[3][len("projectid_") :]), + NodeID(parts[4][len("nodeid_") :]), + ) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/models.py b/packages/dask-task-models-library/src/dask_task_models_library/models.py new file mode 100644 index 00000000000..2f701440d1d --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/models.py @@ -0,0 +1,72 @@ +from typing import Final, Literal, TypeAlias + +from dask.typing import Key +from distributed.scheduler import TaskStateState as SchedulerTaskState +from distributed.worker_state_machine import TaskStateState as WorkerTaskState +from models_library.projects_state import RunningState +from pydantic import BaseModel + +DaskJobID: TypeAlias = str +DaskResources: TypeAlias = dict[str, int | float] + +TASK_LIFE_CYCLE_EVENT: Final[str] = "task-lifecycle-{key}" +TASK_RUNNING_PROGRESS_EVENT: Final[str] = "task-progress-{key}" +_SCHEDULER_TASK_STATE_TO_RUNNING_STATE: Final[ + dict[SchedulerTaskState, RunningState] +] = { + "released": RunningState.NOT_STARTED, # Known but not actively computing or in memory + "waiting": RunningState.PENDING, # On track to be computed, waiting on dependencies to arrive in memory + "no-worker": RunningState.WAITING_FOR_RESOURCES, # Ready to be computed, but no appropriate worker exists (for example because of resource restrictions, or because no worker is connected at all). + "queued": RunningState.WAITING_FOR_RESOURCES, # Ready to be computed, but all workers are already full. + "processing": RunningState.PENDING, # All dependencies are available and the task is assigned to a worker for compute (the scheduler doesn’t know whether it’s in a worker queue or actively being computed). + "memory": RunningState.SUCCESS, # In memory on one or more workers + "erred": RunningState.FAILED, # Task computation, or one of its dependencies, has encountered an error + "forgotten": RunningState.UNKNOWN, # Task is no longer needed by any client or dependent task, so it disappears from the scheduler as well. As soon as a task reaches this state, it is immediately dereferenced from the scheduler. +} + +_WORKER_TASK_STATE_TO_RUNNING_STATE: Final[dict[WorkerTaskState, RunningState]] = { + "cancelled": RunningState.ABORTED, # The scheduler asked to forget about this task, but it’s technically impossible at the moment. See Task cancellation. The task can be found in whatever collections it was in its previous state. + "constrained": RunningState.PENDING, # Like ready, but the user specified resource constraints for this task. The task can be found in the WorkerState.constrained queue. + "error": RunningState.FAILED, # Task execution failed + "executing": RunningState.STARTED, # The task is currently being computed on a thread. It can be found in the WorkerState.executing set and in the distributed.worker.Worker.active_threads dict. + "fetch": RunningState.PENDING, # This task is in memory on one or more peer workers, but not on this worker. Its data is queued to be transferred over the network, either because it’s a dependency of a task in waiting state, or because the Active Memory Manager requested it to be replicated here. The task can be found in the WorkerState.data_needed heap. + "flight": RunningState.PENDING, # The task data is currently being transferred over the network from another worker. The task can be found in the WorkerState.in_flight_tasks and WorkerState.in_flight_workers collections. + "forgotten": RunningState.UNKNOWN, # The scheduler asked this worker to forget about the task, and there are neither dependents nor dependencies on the same worker. + "long-running": RunningState.STARTED, # Like executing, but the user code called distributed.secede() so the task no longer counts towards the maximum number of concurrent tasks. It can be found in the WorkerState.long_running set and in the distributed.worker.Worker.active_threads dict. + "memory": RunningState.SUCCESS, # Task execution completed, or the task was successfully transferred from another worker, and is now held in either WorkerState.data or WorkerState.actors. + "missing": RunningState.PENDING, # Like fetch, but all peer workers that were listed by the scheduler are either unreachable or have responded they don’t actually have the task data. The worker will periodically ask the scheduler if it knows of additional replicas; when it does, the task will transition again to fetch. The task can be found in the WorkerState.missing_dep_flight set. + "ready": RunningState.PENDING, # The task is ready to be computed; all of its dependencies are in memory on the current worker and it’s waiting for an available thread. The task can be found in the WorkerState.ready heap. + "released": RunningState.PENDING, # Known but not actively computing or in memory. A task can stay in this state when the scheduler asked to forget it, but it has dependent tasks on the same worker. + "rescheduled": RunningState.PENDING, # The task just raised the Reschedule exception. This is a transitory state, which is not stored permanently. + "resumed": RunningState.PENDING, # The task was recovered from cancelled state. See Task cancellation. The task can be found in whatever collections it was in its previous state. + "waiting": RunningState.PENDING, # The scheduler has added the task to the worker queue. All of its dependencies are in memory somewhere on the cluster, but not all of them are in memory on the current worker, so they need to be fetched. +} + + +class TaskLifeCycleState(BaseModel): + key: str + source: Literal["scheduler", "worker"] + worker: str | None + state: RunningState + + @classmethod + def from_scheduler_task_state( + cls, key: Key, worker: str | None, task_state: SchedulerTaskState + ) -> "TaskLifeCycleState": + return cls( + key=f"{key!r}", + source="scheduler", + worker=worker, + state=_SCHEDULER_TASK_STATE_TO_RUNNING_STATE[task_state], + ) + + @classmethod + def from_worker_task_state( + cls, key: Key, worker: str | None, task_state: WorkerTaskState + ) -> "TaskLifeCycleState": + return cls( + key=f"{key!r}", + source="worker", + worker=worker, + state=_WORKER_TASK_STATE_TO_RUNNING_STATE[task_state], + ) diff --git a/services/api-server/src/simcore_service_api_server/db/__init__.py b/packages/dask-task-models-library/src/dask_task_models_library/plugins/__init__.py similarity index 100% rename from services/api-server/src/simcore_service_api_server/db/__init__.py rename to packages/dask-task-models-library/src/dask_task_models_library/plugins/__init__.py diff --git a/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_scheduler_plugin.py b/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_scheduler_plugin.py new file mode 100644 index 00000000000..69fbc35e21e --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_scheduler_plugin.py @@ -0,0 +1,54 @@ +# pylint: disable=unused-argument +import logging +from typing import Any + +import click +from dask.typing import Key +from distributed import Scheduler, SchedulerPlugin +from distributed.scheduler import TaskStateState + +from ..models import TASK_LIFE_CYCLE_EVENT, TaskLifeCycleState + +_logger = logging.getLogger(__name__) + + +class TaskLifecycleSchedulerPlugin(SchedulerPlugin): + def __init__(self) -> None: + self.scheduler = None + _logger.info("initialized TaskLifecycleSchedulerPlugin") + + async def start(self, scheduler: Scheduler) -> None: + self.scheduler = scheduler # type: ignore[assignment] + _logger.info("started TaskLifecycleSchedulerPlugin") + + def transition( + self, + key: Key, + start: TaskStateState, + finish: TaskStateState, + *args: Any, # noqa: ARG002 + stimulus_id: str, + **kwargs: Any, + ): + _logger.debug( + "Task %s transition from %s to %s due to %s", + key, + start, + finish, + stimulus_id, + ) + + assert self.scheduler # nosec + + self.scheduler.log_event( + TASK_LIFE_CYCLE_EVENT.format(key=key), + TaskLifeCycleState.from_scheduler_task_state( + key, kwargs.get("worker"), finish + ).model_dump(mode="json"), + ) + + +@click.command() +def dask_setup(scheduler): + plugin = TaskLifecycleSchedulerPlugin() + scheduler.add_plugin(plugin) diff --git a/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_worker_plugin.py b/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_worker_plugin.py new file mode 100644 index 00000000000..ebc6aabcad8 --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/plugins/task_life_cycle_worker_plugin.py @@ -0,0 +1,48 @@ +import logging +from collections.abc import Awaitable +from typing import Any + +import click +from dask.typing import Key +from distributed import WorkerPlugin +from distributed.worker import Worker +from distributed.worker_state_machine import TaskStateState + +from ..models import TASK_LIFE_CYCLE_EVENT, TaskLifeCycleState + +_logger = logging.getLogger(__name__) + + +class TaskLifecycleWorkerPlugin(WorkerPlugin): + def __init__(self) -> None: + self._worker = None + _logger.info("TaskLifecycleWorkerPlugin initialized") + + def setup(self, worker: Worker) -> Awaitable[None]: + async def _() -> None: + self._worker = worker # type: ignore[assignment] + _logger.info("TaskLifecycleWorkerPlugin setup completed") + + return _() + + def transition( + self, + key: Key, + start: TaskStateState, + finish: TaskStateState, + **kwargs: Any, + ): + _logger.info("Task '%s' transition from %s to %s", key, start, finish) + assert self._worker # nosec + self._worker.log_event( + TASK_LIFE_CYCLE_EVENT.format(key=key), + TaskLifeCycleState.from_worker_task_state( + key, kwargs.get("worker"), finish + ).model_dump(mode="json"), + ) + + +@click.command() +async def dask_setup(worker: Worker) -> None: + plugin = TaskLifecycleWorkerPlugin() + await worker.plugin_add(plugin) diff --git a/services/api-server/src/simcore_service_api_server/core/errors.py b/packages/dask-task-models-library/src/dask_task_models_library/py.typed similarity index 100% rename from services/api-server/src/simcore_service_api_server/core/errors.py rename to packages/dask-task-models-library/src/dask_task_models_library/py.typed diff --git a/packages/dask-task-models-library/src/dask_task_models_library/resource_constraints.py b/packages/dask-task-models-library/src/dask_task_models_library/resource_constraints.py new file mode 100644 index 00000000000..3a81114ef87 --- /dev/null +++ b/packages/dask-task-models-library/src/dask_task_models_library/resource_constraints.py @@ -0,0 +1,18 @@ +from typing import Any, TypeAlias + +from .constants import DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY + +DaskTaskResources: TypeAlias = dict[str, Any] + + +def create_ec2_resource_constraint_key(ec2_instance_type: str) -> str: + return f"{DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY}:{ec2_instance_type}" + + +def get_ec2_instance_type_from_resources( + task_resources: DaskTaskResources, +) -> str | None: + for resource_name in task_resources: + if resource_name.startswith(DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY): + return resource_name.split(":")[-1] + return None diff --git a/packages/dask-task-models-library/tests/conftest.py b/packages/dask-task-models-library/tests/conftest.py index ba84b389ca9..e551898ea95 100644 --- a/packages/dask-task-models-library/tests/conftest.py +++ b/packages/dask-task-models-library/tests/conftest.py @@ -7,10 +7,9 @@ import pytest pytest_plugins = [ - "pytest_simcore.repository_paths", - "pytest_simcore.schemas", "pytest_simcore.pydantic_models", "pytest_simcore.pytest_global_environs", + "pytest_simcore.repository_paths", ] diff --git a/packages/dask-task-models-library/tests/container_tasks/test_docker.py b/packages/dask-task-models-library/tests/container_tasks/test_docker.py index 307fe175547..4eb5bc74980 100644 --- a/packages/dask-task-models-library/tests/container_tasks/test_docker.py +++ b/packages/dask-task-models-library/tests/container_tasks/test_docker.py @@ -4,7 +4,7 @@ @pytest.mark.parametrize("model_cls", [(DockerBasicAuth)]) def test_docker_models_examples(model_cls): - examples = model_cls.Config.schema_extra["examples"] + examples = model_cls.model_config["json_schema_extra"]["examples"] for index, example in enumerate(examples): print(f"{index:-^10}:\n", example) diff --git a/packages/dask-task-models-library/tests/container_tasks/test_events.py b/packages/dask-task-models-library/tests/container_tasks/test_events.py index 7c7b20465a3..2d49f7d0310 100644 --- a/packages/dask-task-models-library/tests/container_tasks/test_events.py +++ b/packages/dask-task-models-library/tests/container_tasks/test_events.py @@ -5,14 +5,14 @@ # pylint:disable=protected-access # pylint:disable=too-many-arguments + import pytest from dask_task_models_library.container_tasks.events import ( BaseTaskEvent, - TaskLogEvent, TaskProgressEvent, - TaskStateEvent, ) -from models_library.projects_state import RunningState +from dask_task_models_library.container_tasks.protocol import TaskOwner +from faker import Faker from pytest_mock.plugin import MockerFixture @@ -22,9 +22,9 @@ def test_task_event_abstract(): BaseTaskEvent(job_id="some_fake") # type: ignore -@pytest.mark.parametrize("model_cls", [TaskStateEvent, TaskProgressEvent, TaskLogEvent]) +@pytest.mark.parametrize("model_cls", [TaskProgressEvent]) def test_events_models_examples(model_cls): - examples = model_cls.Config.schema_extra["examples"] + examples = model_cls.model_config["json_schema_extra"]["examples"] for index, example in enumerate(examples): print(f"{index:-^10}:\n", example) @@ -35,34 +35,57 @@ def test_events_models_examples(model_cls): assert model_instance.topic_name() +@pytest.fixture(params=["string", "bytes"]) +def job_id(faker: Faker, request: pytest.FixtureRequest) -> str | bytes: + return faker.pystr() if request.param == "string" else faker.pystr().encode() + + @pytest.fixture() -def mocked_dask_worker_job_id(mocker: MockerFixture) -> str: +def mocked_dask_worker_job_id( + mocker: MockerFixture, job_id: str | bytes +) -> str | bytes: mock_get_worker = mocker.patch( "dask_task_models_library.container_tasks.events.get_worker", autospec=True ) - fake_job_id = "some_fake_job_id" - mock_get_worker.return_value.get_current_task.return_value = fake_job_id - return fake_job_id + mock_get_worker.return_value.get_current_task.return_value = job_id + return job_id -def test_task_state_from_worker(mocked_dask_worker_job_id: str): - event = TaskStateEvent.from_dask_worker( - RunningState.FAILED, msg="some test message" - ) - assert event.job_id == mocked_dask_worker_job_id - assert event.state == RunningState.FAILED - assert event.msg == "some test message" +@pytest.fixture(params=TaskOwner.model_json_schema()["examples"]) +def task_owner(request: pytest.FixtureRequest) -> TaskOwner: + return TaskOwner(**request.param) -def test_task_progress_from_worker(mocked_dask_worker_job_id: str): - event = TaskProgressEvent.from_dask_worker(0.7) +def test_task_progress_from_worker( + mocked_dask_worker_job_id: str | bytes, task_owner: TaskOwner +): + event = TaskProgressEvent.from_dask_worker(0.7, task_owner=task_owner) - assert event.job_id == mocked_dask_worker_job_id + assert ( + event.job_id == mocked_dask_worker_job_id.decode() + if isinstance(mocked_dask_worker_job_id, bytes) + else mocked_dask_worker_job_id + ) assert event.progress == 0.7 -def test_task_log_from_worker(mocked_dask_worker_job_id: str): - event = TaskLogEvent.from_dask_worker(log="here is the amazing logs") - - assert event.job_id == mocked_dask_worker_job_id - assert event.log == "here is the amazing logs" +@pytest.mark.parametrize( + "progress_value, expected_progress", [(1.5, 1), (-0.5, 0), (0.75, 0.75)] +) +def test_task_progress_progress_value_is_capped_between_0_and_1( + mocked_dask_worker_job_id: str | bytes, + task_owner: TaskOwner, + progress_value: float, + expected_progress: float, +): + event = TaskProgressEvent( + job_id=( + mocked_dask_worker_job_id.decode() + if isinstance(mocked_dask_worker_job_id, bytes) + else mocked_dask_worker_job_id + ), + task_owner=task_owner, + progress=progress_value, + ) + assert event + assert event.progress == expected_progress diff --git a/packages/dask-task-models-library/tests/container_tasks/test_io.py b/packages/dask-task-models-library/tests/container_tasks/test_io.py index 0b70e866e4e..4e780e5fd61 100644 --- a/packages/dask-task-models-library/tests/container_tasks/test_io.py +++ b/packages/dask-task-models-library/tests/container_tasks/test_io.py @@ -1,7 +1,6 @@ import json from pathlib import Path from pprint import pformat -from typing import Optional import pytest from cloudpickle import dumps, loads @@ -31,7 +30,7 @@ def test_io_models_examples(model_cls, model_cls_examples): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) - model_instance = model_cls.parse_obj(example) + model_instance = model_cls.model_validate(example) assert model_instance, f"Failed with {name}" print(name, ":", model_instance) @@ -42,7 +41,7 @@ def _create_fake_outputs( output_folder: Path, set_optional_field: bool, faker: Faker, -) -> Optional[str]: +) -> str | None: jsonable_data = {} for key, value in schema.items(): if not value.required and not set_optional_field: @@ -54,9 +53,9 @@ def _create_fake_outputs( a_file.write_text(faker.text(max_nb_chars=450)) assert a_file.exists() else: - jsonable_data[ - key - ] = "some value just for testing, does not represent any kind of type" + jsonable_data[key] = ( + "some value just for testing, does not represent any kind of type" + ) if jsonable_data: output_file = output_folder / faker.file_name() with output_file.open("wt") as fp: @@ -70,9 +69,8 @@ def _create_fake_outputs( def test_create_task_output_from_task_with_optional_fields_as_required( tmp_path: Path, optional_fields_set: bool, faker: Faker ): - for schema_example in TaskOutputDataSchema.Config.schema_extra["examples"]: - - task_output_schema = TaskOutputDataSchema.parse_obj(schema_example) + for schema_example in TaskOutputDataSchema.model_json_schema()["examples"]: + task_output_schema = TaskOutputDataSchema.model_validate(schema_example) outputs_file_name = _create_fake_outputs( task_output_schema, tmp_path, optional_fields_set, faker ) @@ -93,7 +91,7 @@ def test_create_task_output_from_task_with_optional_fields_as_required( def test_create_task_output_from_task_throws_when_there_are_missing_files( tmp_path: Path, faker: Faker ): - task_output_schema = TaskOutputDataSchema.parse_obj( + task_output_schema = TaskOutputDataSchema.model_validate( { "required_file_output": { "required": True, @@ -114,7 +112,7 @@ def test_create_task_output_from_task_throws_when_there_are_missing_files( def test_create_task_output_from_task_does_not_throw_when_there_are_optional_missing_files( tmp_path: Path, faker: Faker ): - task_output_schema = TaskOutputDataSchema.parse_obj( + task_output_schema = TaskOutputDataSchema.model_validate( { "optional_file_output": { "required": False, @@ -135,7 +133,7 @@ def test_create_task_output_from_task_does_not_throw_when_there_are_optional_mis def test_create_task_output_from_task_throws_when_there_are_entries( tmp_path: Path, faker: Faker ): - task_output_schema = TaskOutputDataSchema.parse_obj( + task_output_schema = TaskOutputDataSchema.model_validate( { "some_output": { "required": True, @@ -154,7 +152,7 @@ def test_create_task_output_from_task_throws_when_there_are_entries( def test_create_task_output_from_task_does_not_throw_when_there_are_optional_entries( tmp_path: Path, faker: Faker ): - task_output_schema = TaskOutputDataSchema.parse_obj( + task_output_schema = TaskOutputDataSchema.model_validate( { "some_output": { "required": False, @@ -183,6 +181,42 @@ def test_objects_are_compatible_with_dask_requirements(model_cls, model_cls_exam for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) - model_instance = model_cls.parse_obj(example) + model_instance = model_cls.model_validate(example) reloaded_instance = loads(dumps(model_instance)) assert reloaded_instance == model_instance + + +def test_create_task_output_from_task_ignores_additional_entries( + tmp_path: Path, faker: Faker +): + task_output_schema = TaskOutputDataSchema.model_validate( + { + "some_output_1": { + "required": True, + }, + "some_output_2": { + "required": True, + }, + } + ) + output_file = _create_fake_outputs(task_output_schema, tmp_path, False, faker) + assert output_file + # Add more data to the output file to simulate additional entries + file_path = tmp_path / output_file + data = json.loads(file_path.read_text()) + # Ensure the file contains the expected keys first + for key in task_output_schema: + assert key in data + # Add an extra key + data["extra_key"] = "extra_value" + file_path.write_text(json.dumps(data)) + + task_output_data = TaskOutputData.from_task_output( + schema=task_output_schema, + output_folder=tmp_path, + output_file_ext=output_file, + ) + # Only keys defined in the schema should be present + assert set(task_output_data.keys()) == set( + task_output_schema.keys() + ), "Should only contain the expected keys" diff --git a/packages/dask-task-models-library/tests/container_tasks/test_protocol.py b/packages/dask-task-models-library/tests/container_tasks/test_protocol.py new file mode 100644 index 00000000000..68f8aec751a --- /dev/null +++ b/packages/dask-task-models-library/tests/container_tasks/test_protocol.py @@ -0,0 +1,31 @@ +import pytest +from dask_task_models_library.container_tasks.protocol import ( + ContainerTaskParameters, + TaskOwner, +) +from faker import Faker +from pydantic import ValidationError + + +@pytest.mark.parametrize("model_cls", [TaskOwner, ContainerTaskParameters]) +def test_events_models_examples(model_cls): + examples = model_cls.model_json_schema()["examples"] + + for index, example in enumerate(examples): + print(f"{index:-^10}:\n", example) + + model_instance = model_cls(**example) + assert model_instance + + +def test_task_owner_parent_valid(faker: Faker): + invalid_task_owner_example = TaskOwner.model_json_schema()["examples"][0] + invalid_task_owner_example["parent_project_id"] = faker.uuid4() + assert invalid_task_owner_example["parent_node_id"] is None + with pytest.raises(ValidationError, match=r".+ are None or both are set!"): + TaskOwner(**invalid_task_owner_example) + + invalid_task_owner_example["parent_project_id"] = None + invalid_task_owner_example["parent_node_id"] = faker.uuid4() + with pytest.raises(ValidationError, match=r".+ are None or both are set!"): + TaskOwner(**invalid_task_owner_example) diff --git a/packages/dask-task-models-library/tests/container_tasks/test_utils.py b/packages/dask-task-models-library/tests/container_tasks/test_utils.py new file mode 100644 index 00000000000..6f2ec983cf0 --- /dev/null +++ b/packages/dask-task-models-library/tests/container_tasks/test_utils.py @@ -0,0 +1,68 @@ +# pylint: disable=too-many-positional-arguments +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable + +import pytest +from dask_task_models_library.container_tasks.utils import ( + generate_dask_job_id, + parse_dask_job_id, +) +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter + + +@pytest.fixture( + params=["simcore/service/comp/some/fake/service/key", "dockerhub-style/service_key"] +) +def service_key(request) -> ServiceKey: + return request.param + + +@pytest.fixture() +def service_version() -> str: + return "1234.32432.2344" + + +@pytest.fixture +def user_id(faker: Faker) -> UserID: + return TypeAdapter(UserID).validate_python(faker.pyint(min_value=1)) + + +@pytest.fixture +def project_id(faker: Faker) -> ProjectID: + return ProjectID(faker.uuid4()) + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return NodeID(faker.uuid4()) + + +def test_dask_job_id_serialization( + service_key: ServiceKey, + service_version: ServiceVersion, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +): + dask_job_id = generate_dask_job_id( + service_key, service_version, user_id, project_id, node_id + ) + ( + parsed_service_key, + parsed_service_version, + parsed_user_id, + parsed_project_id, + parsed_node_id, + ) = parse_dask_job_id(dask_job_id) + assert service_key == parsed_service_key + assert service_version == parsed_service_version + assert user_id == parsed_user_id + assert project_id == parsed_project_id + assert node_id == parsed_node_id diff --git a/packages/dask-task-models-library/tests/test_resource_constraints.py b/packages/dask-task-models-library/tests/test_resource_constraints.py new file mode 100644 index 00000000000..9a2c1e59e26 --- /dev/null +++ b/packages/dask-task-models-library/tests/test_resource_constraints.py @@ -0,0 +1,34 @@ +from dask_task_models_library.constants import DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY +from dask_task_models_library.resource_constraints import ( + create_ec2_resource_constraint_key, + get_ec2_instance_type_from_resources, +) +from faker import Faker + + +def test_create_ec2_resource_constraint_key(faker: Faker): + faker_instance_type = faker.pystr() + assert ( + create_ec2_resource_constraint_key(faker_instance_type) + == f"{DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY}:{faker_instance_type}" + ) + + empty_instance_type = "" + assert ( + create_ec2_resource_constraint_key(empty_instance_type) + == f"{DASK_TASK_EC2_RESOURCE_RESTRICTION_KEY}:" + ) + + +def test_get_ec2_instance_type_from_resources(faker: Faker): + empty_task_resources = {} + assert get_ec2_instance_type_from_resources(empty_task_resources) is None + no_ec2_types_in_resources = {"blahblah": 1} + assert get_ec2_instance_type_from_resources(no_ec2_types_in_resources) is None + + faker_instance_type = faker.pystr() + ec2_type_in_resources = {create_ec2_resource_constraint_key(faker_instance_type): 1} + assert ( + get_ec2_instance_type_from_resources(ec2_type_in_resources) + == faker_instance_type + ) diff --git a/packages/models-library/Makefile b/packages/models-library/Makefile index 0279fbedfa8..01e5b586df2 100644 --- a/packages/models-library/Makefile +++ b/packages/models-library/Makefile @@ -12,7 +12,7 @@ requirements: ## compiles pip requirements (.in -> .txt) .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests tests-ci @@ -40,6 +40,7 @@ tests-ci: ## runs unit tests [ci-mode] --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=models_library \ --durations=10 \ --log-date-format="%Y-%m-%d %H:%M:%S" \ @@ -48,20 +49,13 @@ tests-ci: ## runs unit tests [ci-mode] -m "not heavy_load" \ $(CURDIR)/tests -.PHONY: project-jsonschema.ignore.json -project-jsonschema.ignore.json: ## creates project-v0.0.1.json for DEV purposes - python3 -c "from models_library.projects import Project; print(Project.schema_json(indent=2))" > $@ - -.PHONY: service-jsonschema.ignore.json -node-meta-jsonschema.ignore.json: ## creates node-meta-v0.0.1.json for DEV purposes - python3 -c "from models_library.services import ServiceDockerData as cls; print(cls.schema_json(indent=2))" > $@ DOCKER_API_VERSION ?= 1.41 .PHONY: docker_rest_api.py docker_rest_api.py: ## auto-generates pydantic models for Docker REST API models # auto-generates $@ from $< @$(SCRIPTS_DIR)/openapi-pydantic-models-generator.bash \ - --url https://docs.docker.com/engine/api/v$(DOCKER_API_VERSION).yaml \ + --url https://docs.docker.com/reference/api/engine/version/v$(DOCKER_API_VERSION).yaml \ --output $@ # formats @@ -84,7 +78,7 @@ docker_rest_api.py: ## auto-generates pydantic models for Docker REST API models .PHONY: _erdantic _erdantic: _check_venv_active # ensures erdantic installed - @python3 -c "import erdantic" 2>/dev/null || pip install erdantic + @python3 -c "import erdantic" 2>/dev/null || uv pip install erdantic erd-Project.svg: _erdantic erdantic models_library.projects.Project \ @@ -110,14 +104,15 @@ erd-ServiceInput.svg: _erdantic DOWNLOADED_TEST_DATA_DIR = "$(CURDIR)/tests/data/.downloaded-ignore" .PHONY: _httpx -_httpx: _check_venv_active +_ensure_httpx: _check_venv_active # ensures requirements installed - @python3 -c "import httpx" 2>/dev/null || pip install httpx + @python3 -c "import httpx" 2>/dev/null || uv pip install httpx -PHONY: pull_test_data -pull_test_data: $(DOT_ENV_FILE) _httpx ## downloads tests data from registry (this can take some time!) - # downloading all metadata files +PHONY: tests-data +tests-data: $(DOT_ENV_FILE) _ensure_httpx ## downloads tests data from registry defined in .env (time-intensive!) + # Downloading all metadata files ... @set -o allexport; \ source $<; \ set +o allexport; \ - python3 "$(PACKAGES_DIR)/pytest-simcore/src/pytest_simcore/helpers/utils_docker_registry.py" $(DOWNLOADED_TEST_DATA_DIR) + python3 "$(PACKAGES_DIR)/pytest-simcore/src/pytest_simcore/helpers/docker_registry.py" $(DOWNLOADED_TEST_DATA_DIR) + @echo "Run now 'pytest -vv -m diagnostics tests'" diff --git a/packages/models-library/requirements/_base.in b/packages/models-library/requirements/_base.in index 93bb35df56e..b33d20bdd6b 100644 --- a/packages/models-library/requirements/_base.in +++ b/packages/models-library/requirements/_base.in @@ -2,6 +2,11 @@ # Specifies third-party dependencies for 'models-library' # --constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in +arrow jsonschema +orjson pydantic[email] +pydantic-settings +pydantic-extra-types diff --git a/packages/models-library/requirements/_base.txt b/packages/models-library/requirements/_base.txt index 929883fcf41..9daa42c4b0a 100644 --- a/packages/models-library/requirements/_base.txt +++ b/packages/models-library/requirements/_base.txt @@ -1,22 +1,66 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -attrs==22.2.0 - # via jsonschema -dnspython==2.3.0 +annotated-types==0.7.0 + # via pydantic +arrow==1.3.0 + # via -r requirements/_base.in +attrs==25.1.0 + # via + # jsonschema + # referencing +dnspython==2.7.0 # via email-validator -email-validator==1.3.1 +email-validator==2.2.0 # via pydantic -idna==3.4 +idna==3.10 # via email-validator -jsonschema==4.17.3 - # via -r requirements/_base.in -pydantic==1.10.2 +jsonschema==4.23.0 # via -r requirements/_base.in -pyrsistent==0.19.3 +jsonschema-specifications==2024.10.1 # via jsonschema -typing-extensions==4.5.0 +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/_base.in +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/_base.in + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via pydantic-settings +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +rpds-py==0.23.1 + # via + # jsonschema + # referencing +six==1.17.0 + # via python-dateutil +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 + # via + # pydantic + # pydantic-core + # pydantic-extra-types diff --git a/packages/models-library/requirements/_test.in b/packages/models-library/requirements/_test.in index 94c50d1c257..60082df116f 100644 --- a/packages/models-library/requirements/_test.in +++ b/packages/models-library/requirements/_test.in @@ -8,17 +8,20 @@ # --constraint _base.txt ---requirement ../../../packages/postgres-database/requirements/_base.in coverage -coveralls faker pint +psutil pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-asyncio pytest-cov pytest-icdiff pytest-instafail pytest-mock pytest-runner pytest-sugar +python-dotenv pyyaml +yarl +types-jsonschema +types-PyYAML diff --git a/packages/models-library/requirements/_test.txt b/packages/models-library/requirements/_test.txt index 6cc2d98d6e6..f54c9d6f0f7 100644 --- a/packages/models-library/requirements/_test.txt +++ b/packages/models-library/requirements/_test.txt @@ -1,129 +1,96 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 - # via - # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt - # pytest-aiohttp -aiosignal==1.3.1 - # via aiohttp -alembic==1.9.4 - # via -r requirements/../../../packages/postgres-database/requirements/_base.in -async-timeout==4.0.2 - # via aiohttp -attrs==22.2.0 +attrs==25.1.0 # via # -c requirements/_base.txt - # aiohttp - # pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.0.1 - # via - # aiohttp - # requests -coverage==6.5.0 + # referencing +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 +faker==36.1.1 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 - # via -r requirements/_test.in -frozenlist==1.3.3 - # via - # aiohttp - # aiosignal -greenlet==2.0.2 - # via sqlalchemy -icdiff==2.0.6 +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +icdiff==2.0.7 # via pytest-icdiff -idna==3.4 +idna==3.10 # via # -c requirements/_base.txt - # requests # yarl iniconfig==2.0.0 # via pytest -mako==1.2.4 - # via - # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt - # alembic -markupsafe==2.1.2 - # via mako -multidict==6.0.4 - # via - # aiohttp - # yarl -packaging==23.0 +multidict==6.1.0 + # via yarl +packaging==24.2 # via # pytest # pytest-sugar -pint==0.20.1 +pint==0.24.4 # via -r requirements/_test.in -pluggy==1.0.0 +platformdirs==4.3.6 + # via pint +pluggy==1.5.0 # via pytest pprintpp==0.4.0 # via pytest-icdiff -psycopg2-binary==2.9.5 - # via sqlalchemy -pytest==7.2.1 +propcache==0.3.0 + # via yarl +psutil==7.0.0 + # via -r requirements/_test.in +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-icdiff # pytest-instafail # pytest-mock # pytest-sugar -pytest-aiohttp==1.0.4 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -python-dateutil==2.8.2 - # via faker -pyyaml==6.0 - # via -r requirements/_test.in -requests==2.28.2 - # via coveralls -six==1.16.0 - # via python-dateutil -sqlalchemy==1.4.46 +python-dotenv==1.0.1 # via - # -r requirements/../../../packages/postgres-database/requirements/_base.in - # alembic -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 # via - # coverage - # pytest -urllib3==1.26.14 + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +referencing==0.35.1 # via - # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt - # requests -yarl==1.8.2 + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # types-jsonschema +rpds-py==0.23.1 # via - # -r requirements/../../../packages/postgres-database/requirements/_base.in - # aiohttp + # -c requirements/_base.txt + # referencing +termcolor==2.5.0 + # via pytest-sugar +types-jsonschema==4.23.0.20241208 + # via -r requirements/_test.in +types-pyyaml==6.0.12.20241230 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # flexcache + # flexparser + # pint +tzdata==2025.1 + # via faker +yarl==1.18.3 + # via -r requirements/_test.in diff --git a/packages/models-library/requirements/_tools.txt b/packages/models-library/requirements/_tools.txt index 9fbc4fa6452..3ae7f8fc714 100644 --- a/packages/models-library/requirements/_tools.txt +++ b/packages/models-library/requirements/_tools.txt @@ -1,92 +1,95 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # black # pip-tools # typer -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid +markdown-it-py==3.0.0 + # via rich mccabe==0.7.0 # via pylint +mdurl==0.1.2 + # via markdown-it-py +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via + # -c requirements/_test.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pygments==2.19.1 + # via rich +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==6.0 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # pre-commit -tomli==2.0.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pre-commit +rich==13.9.4 + # via typer +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +shellingham==1.5.4 + # via typer +tomlkit==0.13.2 # via pylint -typer==0.7.0 +typer==0.15.2 # via -r requirements/_tools.in -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy + # typer +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/models-library/requirements/ci.txt b/packages/models-library/requirements/ci.txt index 4a217c5215d..fa3c1d99410 100644 --- a/packages/models-library/requirements/ci.txt +++ b/packages/models-library/requirements/ci.txt @@ -9,10 +9,12 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../postgres-database/[migration] -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-postgres-database[migration] @ ../postgres-database/ +pytest-simcore @ ../pytest-simcore # current module -. +simcore-models-library @ . diff --git a/packages/models-library/requirements/dev.txt b/packages/models-library/requirements/dev.txt index 901530f3644..e8372a6f3f6 100644 --- a/packages/models-library/requirements/dev.txt +++ b/packages/models-library/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../common-library/ --editable ../postgres-database/[migration] --editable ../pytest-simcore/ diff --git a/packages/models-library/scripts/validate-pg-projects.py b/packages/models-library/scripts/validate-pg-projects.py old mode 100755 new mode 100644 index e74b438698d..648b6846876 --- a/packages/models-library/scripts/validate-pg-projects.py +++ b/packages/models-library/scripts/validate-pg-projects.py @@ -4,14 +4,10 @@ import typer from models_library.projects import ProjectAtDB -from pydantic import Json, ValidationError, validator -from pydantic.main import Extra +from pydantic import ConfigDict, Json, ValidationError, field_validator class ProjectFromCsv(ProjectAtDB): - class Config(ProjectAtDB.Config): - extra = Extra.forbid - # TODO: missing in ProjectAtDB access_rights: Json @@ -22,9 +18,11 @@ class Config(ProjectAtDB.Config): hidden: bool + model_config = ConfigDict(extra="forbid") + # NOTE: validators introduced to parse CSV - @validator("published", "hidden", pre=True, check_fields=False) + @field_validator("published", "hidden", mode="before", check_fields=False) @classmethod def empty_str_as_false(cls, v): # See booleans for >v1.0 https://pydantic-docs.helpmanual.io/usage/types/#booleans @@ -32,7 +30,7 @@ def empty_str_as_false(cls, v): return False return v - @validator("workbench", pre=True, check_fields=False) + @field_validator("workbench", mode="before", check_fields=False) @classmethod def jsonstr_to_dict(cls, v): if isinstance(v, str): @@ -61,12 +59,12 @@ def validate_csv_exported_pg_project( pid = row.get("uuid", index + 1) try: - model = ProjectFromCsv.parse_obj(row) + model = ProjectFromCsv.model_validate(row) if verbose > 1: typer.secho(f"{pid} OK", fg=typer.colors.GREEN) if verbose > 2: - typer.echo(model.json(indent=2)) + typer.echo(model.model_dump_json(indent=2)) except ValidationError as err: failed.append(pid) typer.secho( diff --git a/packages/models-library/setup.cfg b/packages/models-library/setup.cfg index f7b1dd020a1..b483a024d04 100644 --- a/packages/models-library/setup.cfg +++ b/packages/models-library/setup.cfg @@ -15,5 +15,11 @@ test = pytest [tool:pytest] asyncio_mode = auto -markers = +asyncio_default_fixture_loop_scope = function +markers = diagnostics: "can be used to run diagnostics against deployed data (e.g. database, registry etc)" + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/packages/models-library/setup.py b/packages/models-library/setup.py index d57014008d1..fe1c4c89991 100644 --- a/packages/models-library/setup.py +++ b/packages/models-library/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -30,30 +29,31 @@ def read_reqs(reqs_path: Path) -> Set[str]: ) # STRICK requirements -SETUP = dict( - name="simcore-models-library", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author="Sylvain Anderegg (sanderegg)", - description="Core service library for simcore pydantic models", - python_requires="~=3.9", - classifiers=[ +SETUP = { + "name": "simcore-models-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Sylvain Anderegg (sanderegg)", + "description": "Core service library for simcore pydantic models", + "python_requires": ">=3.10", + "classifiers": [ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], - long_description=Path(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - install_requires=INSTALL_REQUIREMENTS, - packages=find_packages(where="src"), - package_dir={"": "src"}, - include_package_data=True, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - zip_safe=False, -) + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "include_package_data": True, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} if __name__ == "__main__": diff --git a/packages/models-library/src/models_library/__init__.py b/packages/models-library/src/models_library/__init__.py index 767abb3e3d1..94f1dfccf3c 100644 --- a/packages/models-library/src/models_library/__init__.py +++ b/packages/models-library/src/models_library/__init__.py @@ -1,11 +1,12 @@ """ osparc's service models library """ + # # NOTE: # - "examples" = [ ...] keyword and NOT "example". See https://json-schema.org/understanding-json-schema/reference/generic.html#annotations # -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution("simcore-models-library").version +__version__: str = version("simcore-models-library") diff --git a/packages/models-library/src/models_library/access_rights.py b/packages/models-library/src/models_library/access_rights.py new file mode 100644 index 00000000000..a66bdf168b8 --- /dev/null +++ b/packages/models-library/src/models_library/access_rights.py @@ -0,0 +1,28 @@ +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field + + +class AccessRights(BaseModel): + read: Annotated[bool, Field(description="has read access")] + write: Annotated[bool, Field(description="has write access")] + delete: Annotated[bool, Field(description="has deletion rights")] + + model_config = ConfigDict(extra="forbid") + + def verify_access_integrity(self): + """Helper function that checks extra constraints in access-rights flags""" + if self.write and not self.read: + msg = "Write access requires read access" + raise ValueError(msg) + if self.delete and not self.write: + msg = "Delete access requires read access" + raise ValueError(msg) + return self + + +class ExecutableAccessRights(BaseModel): + write: Annotated[bool, Field(description="can change executable settings")] + execute: Annotated[bool, Field(description="can run executable")] + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/aiodocker_api.py b/packages/models-library/src/models_library/aiodocker_api.py index e187419249d..2093f8ff8c1 100644 --- a/packages/models-library/src/models_library/aiodocker_api.py +++ b/packages/models-library/src/models_library/aiodocker_api.py @@ -1,6 +1,4 @@ -from typing import Optional - -from pydantic import Field, validator +from pydantic import Field, field_validator from .generated_models.docker_rest_api import ( ContainerSpec, @@ -9,16 +7,16 @@ ServiceSpec, TaskSpec, ) -from .utils.converters import to_snake_case class AioDockerContainerSpec(ContainerSpec): - Env: Optional[dict[str, Optional[str]]] = Field( - None, - description="aiodocker expects here a dictionary and re-convert it back internally`.\n", + env: dict[str, str | None] | None = Field( # type: ignore[assignment] + default=None, + alias="Env", + description="aiodocker expects here a dictionary and re-convert it back internally", ) - @validator("Env", pre=True) + @field_validator("env", mode="before") @classmethod def convert_list_to_dict(cls, v): if v is not None and isinstance(v, list): @@ -35,29 +33,18 @@ def convert_list_to_dict(cls, v): class AioDockerResources1(Resources1): # NOTE: The Docker REST API documentation is wrong!!! # Do not set that back to singular Reservation. - Reservation: Optional[ResourceObject] = Field( + reservation: ResourceObject | None = Field( None, description="Define resources reservation.", alias="Reservations" ) - class Config(Resources1.Config): - allow_population_by_field_name = True - class AioDockerTaskSpec(TaskSpec): - ContainerSpec: Optional[AioDockerContainerSpec] = Field( - None, + container_spec: AioDockerContainerSpec | None = Field( + default=None, alias="ContainerSpec" ) - Resources: Optional[AioDockerResources1] = Field( - None, - description="Resource requirements which apply to each individual container created\nas part of the service.\n", - ) + resources: AioDockerResources1 | None = Field(default=None, alias="Resources") class AioDockerServiceSpec(ServiceSpec): - - TaskTemplate: Optional[AioDockerTaskSpec] = None - - class Config(ServiceSpec.Config): - alias_generator = to_snake_case - allow_population_by_field_name = True + task_template: AioDockerTaskSpec | None = Field(default=None, alias="TaskTemplate") diff --git a/services/api-server/src/simcore_service_api_server/modules/__init__.py b/packages/models-library/src/models_library/api_schemas__common/__init__.py similarity index 100% rename from services/api-server/src/simcore_service_api_server/modules/__init__.py rename to packages/models-library/src/models_library/api_schemas__common/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas__common/errors.py b/packages/models-library/src/models_library/api_schemas__common/errors.py new file mode 100644 index 00000000000..d1f7d6aa34d --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas__common/errors.py @@ -0,0 +1,26 @@ +import http +from typing import Any + +from pydantic import BaseModel, Field + +from ..basic_types import IDStr + + +class DefaultApiError(BaseModel): + name: IDStr = Field( + ..., + description="Error identifier as a code or a name. " + "Mainly for machine-machine communication purposes.", + ) + detail: Any | None = Field(default=None, description="Human readable error message") + + @classmethod + def from_status_code( + cls, code: int, *, detail: str | None = None + ) -> "DefaultApiError": + httplib_code = http.HTTPStatus(code) + + return cls( + name=f"{code}", # type: ignore[arg-type] + detail=detail or httplib_code.description or httplib_code.phrase, + ) diff --git a/packages/models-library/src/models_library/api_schemas__common/health.py b/packages/models-library/src/models_library/api_schemas__common/health.py new file mode 100644 index 00000000000..827ec533418 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas__common/health.py @@ -0,0 +1,12 @@ +from pydantic import BaseModel, ConfigDict + + +class HealthCheckGet(BaseModel): + timestamp: str + model_config = ConfigDict( + json_schema_extra={ + "example": { + "timestamp": "simcore_service_directorv2.api.routes.health@2023-07-03T12:59:12.024551+00:00" + } + } + ) diff --git a/packages/models-library/src/models_library/api_schemas__common/meta.py b/packages/models-library/src/models_library/api_schemas__common/meta.py new file mode 100644 index 00000000000..514abdc7d6d --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas__common/meta.py @@ -0,0 +1,21 @@ +from pydantic import BaseModel, ConfigDict, Field + +from ..basic_types import VersionStr + + +class BaseMeta(BaseModel): + name: str + version: VersionStr + released: dict[str, VersionStr] | None = Field( + default=None, description="Maps every route's path tag with a released version" + ) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "name": "simcore_service_foo", + "version": "2.4.45", + "released": {"v1": "1.3.4", "v2": "2.4.45"}, + } + } + ) diff --git a/services/catalog/src/simcore_service_catalog/api/dependencies/__init__.py b/packages/models-library/src/models_library/api_schemas_api_server/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/api/dependencies/__init__.py rename to packages/models-library/src/models_library/api_schemas_api_server/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_api_server/api_keys.py b/packages/models-library/src/models_library/api_schemas_api_server/api_keys.py new file mode 100644 index 00000000000..999cb2f192c --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_api_server/api_keys.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel, ConfigDict, Field, SecretStr + + +class ApiKey(BaseModel): + api_key: str + api_secret: SecretStr + + +class ApiKeyInDB(BaseModel): + api_key: str + api_secret: str + + id_: int = Field(0, alias="id") + display_name: str + user_id: int + product_name: str + + model_config = ConfigDict(from_attributes=True) diff --git a/packages/models-library/src/models_library/api_schemas_api_server/functions.py b/packages/models-library/src/models_library/api_schemas_api_server/functions.py new file mode 100644 index 00000000000..988544b13fc --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_api_server/functions.py @@ -0,0 +1,77 @@ +# pylint: disable=unused-import + +from ..functions import ( + Function, + FunctionClass, + FunctionClassSpecificData, + FunctionID, + FunctionIDNotFoundError, + FunctionInputs, + FunctionInputSchema, + FunctionInputsList, + FunctionInputsValidationError, + FunctionJob, + FunctionJobClassSpecificData, + FunctionJobCollection, + FunctionJobCollectionID, + FunctionJobCollectionIDNotFoundError, + FunctionJobCollectionsListFilters, + FunctionJobCollectionStatus, + FunctionJobID, + FunctionJobIDNotFoundError, + FunctionJobStatus, + FunctionOutputs, + FunctionOutputSchema, + FunctionSchemaClass, + JSONFunctionInputSchema, + JSONFunctionOutputSchema, + ProjectFunction, + ProjectFunctionJob, + RegisteredFunction, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, + RegisteredProjectFunction, + RegisteredProjectFunctionJob, + SolverFunction, + SolverFunctionJob, + UnsupportedFunctionClassError, + UnsupportedFunctionFunctionJobClassCombinationError, +) + +__all__ = [ + "Function", + "FunctionClass", + "FunctionClassSpecificData", + "FunctionID", + "FunctionIDNotFoundError", + "FunctionInputSchema", + "FunctionInputs", + "FunctionInputsList", + "FunctionInputsValidationError", + "FunctionJob", + "FunctionJobClassSpecificData", + "FunctionJobCollection", + "FunctionJobCollectionID", + "FunctionJobCollectionIDNotFoundError", + "FunctionJobCollectionStatus", + "FunctionJobCollectionsListFilters", + "FunctionJobID", + "FunctionJobIDNotFoundError", + "FunctionJobStatus", + "FunctionOutputSchema", + "FunctionOutputs", + "FunctionSchemaClass", + "JSONFunctionInputSchema", + "JSONFunctionOutputSchema", + "ProjectFunction", + "ProjectFunctionJob", + "RegisteredFunction", + "RegisteredFunctionJob", + "RegisteredFunctionJobCollection", + "RegisteredProjectFunction", + "RegisteredProjectFunctionJob", + "SolverFunction", + "SolverFunctionJob", + "UnsupportedFunctionClassError", + "UnsupportedFunctionFunctionJobClassCombinationError", +] diff --git a/packages/models-library/src/models_library/api_schemas_api_server/pricing_plans.py b/packages/models-library/src/models_library/api_schemas_api_server/pricing_plans.py new file mode 100644 index 00000000000..4c5ecc4e746 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_api_server/pricing_plans.py @@ -0,0 +1,15 @@ +from datetime import datetime + +from ..api_schemas_webserver._base import OutputSchema +from ..api_schemas_webserver.resource_usage import PricingUnitGet +from ..resource_tracker import PricingPlanClassification, PricingPlanId + + +class ServicePricingPlanGet(OutputSchema): + pricing_plan_id: PricingPlanId + display_name: str + description: str + classification: PricingPlanClassification + created_at: datetime + pricing_plan_key: str + pricing_units: list[PricingUnitGet] diff --git a/packages/models-library/src/models_library/api_schemas_catalog/__init__.py b/packages/models-library/src/models_library/api_schemas_catalog/__init__.py new file mode 100644 index 00000000000..2e8c8f75a24 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +CATALOG_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter(RPCNamespace).validate_python( + "catalog" +) diff --git a/packages/models-library/src/models_library/api_schemas_catalog/_base.py b/packages/models-library/src/models_library/api_schemas_catalog/_base.py new file mode 100644 index 00000000000..35930723500 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/_base.py @@ -0,0 +1,9 @@ +from pydantic import BaseModel + + +class CatalogInputSchema(BaseModel): + ... + + +class CatalogOutputSchema(BaseModel): + ... diff --git a/packages/models-library/src/models_library/api_schemas_catalog/service_access_rights.py b/packages/models-library/src/models_library/api_schemas_catalog/service_access_rights.py new file mode 100644 index 00000000000..b4aa1173adc --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/service_access_rights.py @@ -0,0 +1,10 @@ +from pydantic import BaseModel + +from ..groups import GroupID +from ..services import ServiceKey, ServiceVersion + + +class ServiceAccessRightsGet(BaseModel): + service_key: ServiceKey + service_version: ServiceVersion + gids_with_access_rights: dict[GroupID, dict[str, bool]] diff --git a/packages/models-library/src/models_library/api_schemas_catalog/services.py b/packages/models-library/src/models_library/api_schemas_catalog/services.py new file mode 100644 index 00000000000..f94d6be84ef --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/services.py @@ -0,0 +1,446 @@ +from datetime import datetime +from typing import Annotated, Any, TypeAlias + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.rpc_pagination import PageRpc +from pydantic import ConfigDict, Field, HttpUrl, NonNegativeInt +from pydantic.config import JsonDict + +from ..boot_options import BootOptions +from ..emails import LowerCaseEmailStr +from ..groups import GroupID +from ..rest_filters import Filters +from ..services_access import ServiceAccessRights, ServiceGroupAccessRightsV2 +from ..services_authoring import Author +from ..services_enums import ServiceType +from ..services_history import ServiceRelease +from ..services_metadata_editable import ServiceMetaDataEditable +from ..services_metadata_published import ( + ServiceInputsDict, + ServiceMetaDataPublished, + ServiceOutputsDict, +) +from ..services_resources import ServiceResourcesDict +from ..services_types import ServiceKey, ServiceVersion +from ..utils.change_case import snake_to_camel +from ._base import CatalogInputSchema, CatalogOutputSchema + +_EXAMPLE_FILEPICKER: dict[str, Any] = { + "name": "File Picker", + "thumbnail": None, + "description": "description", + "classifiers": [], + "quality": {}, + "accessRights": { + "1": {"execute_access": True, "write_access": False}, + "4": {"execute_access": True, "write_access": True}, + }, + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "type": "dynamic", + "authors": [ + { + "name": "Red Pandas", + "email": "redpandas@wonderland.com", + "affiliation": None, + } + ], + "contact": "redpandas@wonderland.com", + "inputs": {}, + "outputs": { + "outFile": { + "displayOrder": 0, + "label": "File", + "description": "Chosen File", + "type": "data:*/*", + "fileToKeyMap": None, + "widget": None, + } + }, + "owner": "redpandas@wonderland.com", +} + +_EXAMPLE_FILEPICKER_V2 = { + **_EXAMPLE_FILEPICKER, + "accessRights": { + "1": {"execute": True, "write": False}, + "4": {"execute": True, "write": True}, + }, +} + + +_EXAMPLE_SLEEPER: dict[str, Any] = { + "name": "sleeper", + "thumbnail": None, + "description": "A service which awaits for time to pass, two times.", + "description_ui": True, + "icon": "https://cdn-icons-png.flaticon.com/512/25/25231.png", + "classifiers": [], + "quality": {}, + "accessRights": {"1": {"execute": True, "write": False}}, + "key": "simcore/services/comp/itis/sleeper", + "version": "2.2.1", + "version_display": "2 Xtreme", + "type": "computational", + "authors": [ + { + "name": "Author Bar", + "email": "author@acme.com", + "affiliation": "ACME", + }, + ], + "contact": "contact@acme.com", + "inputs": { + "input_1": { + "displayOrder": 1, + "label": "File with int number", + "description": "Pick a file containing only one integer", + "type": "data:text/plain", + "fileToKeyMap": {"single_number.txt": "input_1"}, + }, + "input_2": { + "label": "Sleep interval", + "description": "Choose an amount of time to sleep in range [0:]", + "displayOrder": 2, + "type": "integer", + "defaultValue": 2, + }, + "input_3": { + "displayOrder": 3, + "label": "Fail after sleep", + "description": "If set to true will cause service to fail after it sleeps", + "type": "boolean", + "defaultValue": False, + }, + "input_4": { + "label": "Distance to bed", + "description": "It will first walk the distance to bed", + "displayOrder": 4, + "type": "integer", + "defaultValue": 0, + }, + "input_5": { + "label": "Dream (or nightmare) of the night", + "description": "Defines the size of the dream that will be generated [0:]", + "displayOrder": 5, + "type": "integer", + "defaultValue": 0, + }, + }, + "outputs": { + "output_1": { + "displayOrder": 1, + "label": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + "type": "data:text/plain", + "fileToKeyMap": {"single_number.txt": "output_1"}, + }, + "output_2": { + "label": "Random sleep interval", + "description": "Interval is generated in range [1-9]", + "displayOrder": 2, + "type": "integer", + }, + "output_3": { + "displayOrder": 3, + "label": "Dream output", + "description": "Contains some random data representing a dream", + "type": "data:text/plain", + "fileToKeyMap": {"dream.txt": "output_3"}, + }, + }, + "owner": "owner@acme.com", +} + + +class ServiceGet( + ServiceMetaDataPublished, ServiceAccessRights, ServiceMetaDataEditable +): # pylint: disable=too-many-ancestors + owner: Annotated[ + LowerCaseEmailStr | None, + Field(description="None when the owner email cannot be found in the database"), + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update({"examples": [_EXAMPLE_FILEPICKER, _EXAMPLE_SLEEPER]}) + + model_config = ConfigDict( + extra="ignore", + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) + + +class ServiceSummary(CatalogOutputSchema): + key: ServiceKey + version: ServiceVersion + name: str + description: str + version_display: str | None = None + contact: LowerCaseEmailStr | None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "key": _EXAMPLE_SLEEPER["key"], + "version": _EXAMPLE_SLEEPER["version"], + "name": _EXAMPLE_SLEEPER["name"], + "description": _EXAMPLE_SLEEPER["description"], + "version_display": _EXAMPLE_SLEEPER["version_display"], + "contact": _EXAMPLE_SLEEPER["contact"], + }, + { + "key": _EXAMPLE_SLEEPER["key"], + "version": "100.0.0", + "name": "sleeper", + "description": "short description", + "version_display": "HUGE Release", + "contact": "contact@acme.com", + }, + { + "key": _EXAMPLE_FILEPICKER["key"], + "version": _EXAMPLE_FILEPICKER["version"], + "name": _EXAMPLE_FILEPICKER["name"], + "description": _EXAMPLE_FILEPICKER["description"], + "version_display": None, + "contact": _EXAMPLE_FILEPICKER["contact"], + }, + ] + } + ) + + model_config = ConfigDict( + extra="ignore", + populate_by_name=True, + alias_generator=snake_to_camel, + json_schema_extra=_update_json_schema_extra, + ) + + +class _BaseServiceGetV2(ServiceSummary): + service_type: Annotated[ServiceType, Field(alias="type")] + + thumbnail: HttpUrl | None = None + icon: HttpUrl | None = None + + description_ui: bool = False + + authors: Annotated[list[Author], Field(min_length=1)] + owner: Annotated[ + LowerCaseEmailStr | None, + Field(description="None when the owner email cannot be found in the database"), + ] + + inputs: ServiceInputsDict + outputs: ServiceOutputsDict + + boot_options: BootOptions | None = None + min_visible_inputs: NonNegativeInt | None = None + + access_rights: dict[GroupID, ServiceGroupAccessRightsV2] | None + + classifiers: Annotated[ + list[str] | None, + Field(default_factory=list), + ] = DEFAULT_FACTORY + + quality: Annotated[ + dict[str, Any], + Field(default_factory=dict), + ] = DEFAULT_FACTORY + + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + alias_generator=snake_to_camel, + json_schema_extra={"example": _EXAMPLE_SLEEPER}, + ) + + +class LatestServiceGet(_BaseServiceGetV2): + release: Annotated[ + ServiceRelease, + Field(description="release information of current (latest) service"), + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + **_EXAMPLE_SLEEPER, # v2.2.1 (latest) + "release": { + "version": _EXAMPLE_SLEEPER["version"], + "version_display": "Summer Release", + "released": "2025-07-20T15:00:00", + }, + } + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class ServiceGetV2(_BaseServiceGetV2): + history: Annotated[ + list[ServiceRelease], + Field( + default_factory=list, + description="history of releases for this service at this point in time, starting from the newest to the oldest." + " It includes current release.", + json_schema_extra={"default": []}, + ), + ] = DEFAULT_FACTORY + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + **_EXAMPLE_SLEEPER, # v2.2.1 (latest) + "history": [ + { + "version": _EXAMPLE_SLEEPER["version"], + "version_display": "Summer Release", + "released": "2024-07-21T15:00:00", + }, + { + "version": "2.0.0", + "compatibility": { + "canUpdateTo": { + "version": _EXAMPLE_SLEEPER["version"] + }, + }, + }, + {"version": "0.9.11"}, + {"version": "0.9.10"}, + { + "version": "0.9.8", + "compatibility": { + "canUpdateTo": {"version": "0.9.11"}, + }, + }, + { + "version": "0.9.1", + "versionDisplay": "Matterhorn", + "released": "2024-01-20T18:49:17", + "compatibility": { + "can_update_to": {"version": "0.9.11"}, + }, + }, + { + "version": "0.9.0", + "retired": "2024-07-20T16:00:00", + }, + {"version": "0.8.0"}, + {"version": "0.1.0"}, + ], + }, + { + **_EXAMPLE_FILEPICKER_V2, + "history": [ + { + "version": _EXAMPLE_FILEPICKER_V2["version"], + "version_display": "Odei Release", + "released": "2025-03-25T00:00:00", + } + ], + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +PageRpcLatestServiceGet: TypeAlias = PageRpc[ + # WARNING: keep this definition in models_library and not in the RPC interface + # otherwise the metaclass PageRpc[*] will create *different* classes in server/client side + # and will fail to serialize/deserialize these parameters when transmitted/received + LatestServiceGet +] + +PageRpcServiceRelease: TypeAlias = PageRpc[ + # WARNING: keep this definition in models_library and not in the RPC interface + # otherwise the metaclass PageRpc[*] will create *different* classes in server/client side + # and will fail to serialize/deserialize these parameters when transmitted/received + ServiceRelease +] + +# Create PageRpc types +PageRpcServiceSummary = PageRpc[ServiceSummary] + +ServiceResourcesGet: TypeAlias = ServiceResourcesDict + + +class ServiceUpdateV2(CatalogInputSchema): + name: str | None = None + thumbnail: HttpUrl | None = None + icon: HttpUrl | None = None + + description: str | None = None + description_ui: bool = False + version_display: str | None = None + + deprecated: datetime | None = None + + classifiers: list[str] | None = None + quality: dict[str, Any] = {} + + access_rights: dict[GroupID, ServiceGroupAccessRightsV2] | None = None + + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + alias_generator=snake_to_camel, + ) + + +assert set(ServiceUpdateV2.model_fields.keys()) - set( # nosec + ServiceGetV2.model_fields.keys() +) == {"deprecated"} + + +class MyServiceGet(CatalogOutputSchema): + key: ServiceKey + release: ServiceRelease + + owner: GroupID | None + my_access_rights: ServiceGroupAccessRightsV2 + + +class ServiceListFilters(Filters): + service_type: Annotated[ + ServiceType | None, + Field( + description="Filter only services of a given type. If None, then all types are returned" + ), + ] = None + + service_key_pattern: Annotated[ + str | None, + Field( + description="Filter services by key pattern (e.g. 'simcore/services/comp/itis/*')", + ), + ] = None + + version_display_pattern: Annotated[ + str | None, + Field( + description="Filter services by version display pattern (e.g. '*2023*')", + ), + ] = None + + +__all__: tuple[str, ...] = ("ServiceRelease",) diff --git a/packages/models-library/src/models_library/api_schemas_catalog/services_ports.py b/packages/models-library/src/models_library/api_schemas_catalog/services_ports.py new file mode 100644 index 00000000000..4911f8a5ebb --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/services_ports.py @@ -0,0 +1,90 @@ +from typing import Annotated, Any, Literal + +from pydantic import BaseModel, ConfigDict, Field +from pydantic.config import JsonDict + +from ..basic_regex import PUBLIC_VARIABLE_NAME_RE +from ..services import ServiceInput, ServiceOutput +from ..utils.services_io import ( + get_service_io_json_schema, + guess_media_type, + update_schema_doc, +) + + +class ServicePortGet(BaseModel): + key: Annotated[ + str, + Field( + description="Port identifier name", + pattern=PUBLIC_VARIABLE_NAME_RE, + title="Key name", + ), + ] + kind: Literal["input", "output"] + content_media_type: str | None = None + content_schema: Annotated[ + dict[str, Any] | None, + Field( + description="jsonschema for the port's value. SEE https://json-schema.org/understanding-json-schema/", + ), + ] = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + example_input: dict[str, Any] = { + "key": "input_1", + "kind": "input", + "content_schema": { + "title": "Sleep interval", + "type": "integer", + "x_unit": "second", + "minimum": 0, + "maximum": 5, + }, + } + schema.update( + { + "example": example_input, + "examples": [ + example_input, + { + "key": "output_1", + "kind": "output", + "content_media_type": "text/plain", + "content_schema": { + "type": "string", + "title": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + }, + }, + ], + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + @classmethod + def from_domain_model( + cls, + kind: Literal["input", "output"], + key: str, + port: ServiceInput | ServiceOutput, + ) -> "ServicePortGet": + kwargs: dict[str, Any] = {"key": key, "kind": kind} + + # Convert old format into schemas + schema = port.content_schema + if not schema: + schema = get_service_io_json_schema(port) + + # Deduce media_type + if port.property_type.startswith("data:"): + kwargs["content_media_type"] = guess_media_type(port) + # Based on https://swagger.io/docs/specification/describing-request-body/file-upload/ + schema = update_schema_doc({"type": "string"}, port) + + kwargs["content_schema"] = schema + return cls(**kwargs) diff --git a/packages/models-library/src/models_library/api_schemas_catalog/services_specifications.py b/packages/models-library/src/models_library/api_schemas_catalog/services_specifications.py new file mode 100644 index 00000000000..331ef23f83e --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_catalog/services_specifications.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel, Field + +from ..generated_models.docker_rest_api import ServiceSpec as DockerServiceSpec + + +class ServiceSpecifications(BaseModel): + sidecar: DockerServiceSpec | None = Field( + default=None, + description="schedule-time specifications for the service sidecar (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.25/#operation/ServiceCreate)", + ) + service: DockerServiceSpec | None = Field( + default=None, + description="schedule-time specifications specifications for the service (follows Docker Service creation API (specifically only the Resources part), see https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate", + ) + + +class ServiceSpecificationsGet(ServiceSpecifications): + ... diff --git a/packages/models-library/src/models_library/api_schemas_clusters_keeper/__init__.py b/packages/models-library/src/models_library/api_schemas_clusters_keeper/__init__.py new file mode 100644 index 00000000000..79be28f2021 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_clusters_keeper/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +CLUSTERS_KEEPER_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("clusters-keeper") diff --git a/packages/models-library/src/models_library/api_schemas_clusters_keeper/clusters.py b/packages/models-library/src/models_library/api_schemas_clusters_keeper/clusters.py new file mode 100644 index 00000000000..135b42188b8 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_clusters_keeper/clusters.py @@ -0,0 +1,25 @@ +import datetime +from enum import auto + +from pydantic import AnyUrl, BaseModel, Field + +from ..clusters import ClusterAuthentication +from ..users import UserID +from ..utils.enums import StrAutoEnum +from ..wallets import WalletID + + +class ClusterState(StrAutoEnum): + STARTED = auto() + RUNNING = auto() + STOPPED = auto() + + +class OnDemandCluster(BaseModel): + endpoint: AnyUrl + authentication: ClusterAuthentication = Field(discriminator="type") + state: ClusterState + user_id: UserID + wallet_id: WalletID | None + dask_scheduler_ready: bool + eta: datetime.timedelta diff --git a/packages/models-library/src/models_library/api_schemas_clusters_keeper/ec2_instances.py b/packages/models-library/src/models_library/api_schemas_clusters_keeper/ec2_instances.py new file mode 100644 index 00000000000..057c02e1815 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_clusters_keeper/ec2_instances.py @@ -0,0 +1,10 @@ +from dataclasses import dataclass + +from pydantic import ByteSize, NonNegativeFloat + + +@dataclass(frozen=True) +class EC2InstanceTypeGet: + name: str + cpus: NonNegativeFloat + ram: ByteSize diff --git a/services/catalog/src/simcore_service_catalog/api/errors/__init__.py b/packages/models-library/src/models_library/api_schemas_datcore_adapter/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/api/errors/__init__.py rename to packages/models-library/src/models_library/api_schemas_datcore_adapter/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_datcore_adapter/datasets.py b/packages/models-library/src/models_library/api_schemas_datcore_adapter/datasets.py new file mode 100644 index 00000000000..16d67cb8ddd --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_datcore_adapter/datasets.py @@ -0,0 +1,45 @@ +from datetime import datetime +from enum import Enum, unique +from pathlib import Path +from typing import Annotated + +from pydantic import BaseModel, ByteSize, Field + + +class DatasetMetaData(BaseModel): + id: str + display_name: str + size: Annotated[ + ByteSize | None, Field(description="Size of the dataset in bytes if available") + ] + + +@unique +class DataType(str, Enum): + FILE = "FILE" + FOLDER = "FOLDER" + + +class PackageMetaData(BaseModel): + path: Path + display_path: Path + package_id: str + name: str + filename: str + s3_bucket: str + size: ByteSize + created_at: datetime + updated_at: datetime + + +class FileMetaData(BaseModel): + dataset_id: str + package_id: str + id: str + name: str + type: str + path: Path + size: int + created_at: datetime + last_modified_at: datetime + data_type: DataType diff --git a/services/catalog/src/simcore_service_catalog/api/routes/__init__.py b/packages/models-library/src/models_library/api_schemas_director/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/api/routes/__init__.py rename to packages/models-library/src/models_library/api_schemas_director/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_director/services.py b/packages/models-library/src/models_library/api_schemas_director/services.py new file mode 100644 index 00000000000..52578fd7a69 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_director/services.py @@ -0,0 +1,5 @@ +from ..services_metadata_published import ServiceMetaDataPublished + + +class ServiceDataGet(ServiceMetaDataPublished): + ... diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/__init__.py b/packages/models-library/src/models_library/api_schemas_directorv2/__init__.py new file mode 100644 index 00000000000..6ab84683cdb --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/__init__.py @@ -0,0 +1,20 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace +from . import clusters, dynamic_services + +assert clusters # nosec +assert dynamic_services # nosec + + +DIRECTOR_V2_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("director-v2") + + +__all__: tuple[str, ...] = ( + "clusters", + "dynamic_services", +) diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py new file mode 100644 index 00000000000..26b7d10d0be --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/clusters.py @@ -0,0 +1,75 @@ +from typing import Annotated, Any, TypeAlias + +from pydantic import ( + BaseModel, + Field, + NonNegativeFloat, + field_validator, + model_validator, +) +from pydantic.networks import AnyUrl +from pydantic.types import ByteSize, PositiveFloat + +from ..generics import DictModel + + +class TaskCounts(BaseModel): + error: int = 0 + memory: int = 0 + executing: int = 0 + + +class WorkerMetrics(BaseModel): + cpu: float = Field(..., description="consumed % of cpus") + memory: ByteSize = Field(..., description="consumed memory") + num_fds: int = Field(..., description="consumed file descriptors") + task_counts: TaskCounts = Field(..., description="task details") + + +AvailableResources: TypeAlias = DictModel[str, PositiveFloat] + + +class UsedResources(DictModel[str, NonNegativeFloat]): + @model_validator(mode="before") + @classmethod + def ensure_negative_value_is_zero(cls, values: dict[str, Any]): + # dasks adds/remove resource values and sometimes + # they end up being negative instead of 0 + for res_key, res_value in values.items(): + if res_value < 0: + values[res_key] = 0 + return values + + +class Worker(BaseModel): + id: str + name: str + resources: AvailableResources + used_resources: UsedResources + memory_limit: ByteSize + metrics: WorkerMetrics + + +WorkersDict: TypeAlias = dict[AnyUrl, Worker] + + +class Scheduler(BaseModel): + status: str = Field(..., description="The running status of the scheduler") + workers: Annotated[WorkersDict | None, Field(default_factory=dict)] + + @field_validator("workers", mode="before") + @classmethod + def ensure_workers_is_empty_dict(cls, v): + if v is None: + return {} + return v + + +class ClusterDetails(BaseModel): + scheduler: Scheduler = Field( + ..., + description="This contains dask scheduler information given by the underlying dask library", + ) + dashboard_link: AnyUrl = Field( + ..., description="Link to this scheduler's dashboard" + ) diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/comp_runs.py b/packages/models-library/src/models_library/api_schemas_directorv2/comp_runs.py new file mode 100644 index 00000000000..7dc2b03c41b --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/comp_runs.py @@ -0,0 +1,102 @@ +from datetime import datetime +from typing import Any, NamedTuple + +from models_library.services_types import ServiceRunID +from pydantic import ( + AnyUrl, + BaseModel, + ConfigDict, + PositiveInt, +) + +from ..projects import ProjectID +from ..projects_nodes_io import NodeID +from ..projects_state import RunningState + + +class ComputationRunRpcGet(BaseModel): + project_uuid: ProjectID + iteration: int + state: RunningState + info: dict[str, Any] + submitted_at: datetime + started_at: datetime | None + ended_at: datetime | None + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "project_uuid": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "iteration": 1, + "state": "SUCCESS", + "info": { + "wallet_id": 9866, + "user_email": "test@example.net", + "wallet_name": "test", + "product_name": "osparc", + "project_name": "test", + "project_metadata": { + "parent_node_id": "12e0c8b2-bad6-40fb-9948-8dec4f65d4d9", + "parent_node_name": "UJyfwFVYySnPCaLuQIaz", + "parent_project_id": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "parent_project_name": "qTjDmYPxeqAWfCKCQCYF", + "root_parent_node_id": "37176e84-d977-4993-bc49-d76fcfc6e625", + "root_parent_node_name": "UEXExIZVPeFzGRmMglPr", + "root_parent_project_id": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "root_parent_project_name": "FuDpjjFIyeNTWRUWCuKo", + }, + "node_id_names_map": {}, + "simcore_user_agent": "agent", + }, + "submitted_at": "2023-01-11 13:11:47.293595", + "started_at": "2023-01-11 13:11:47.293595", + "ended_at": "2023-01-11 13:11:47.293595", + } + ] + } + ) + + +class ComputationRunRpcGetPage(NamedTuple): + items: list[ComputationRunRpcGet] + total: PositiveInt + + +class ComputationTaskRpcGet(BaseModel): + project_uuid: ProjectID + node_id: NodeID + state: RunningState + progress: float + image: dict[str, Any] + started_at: datetime | None + ended_at: datetime | None + log_download_link: AnyUrl | None + service_run_id: ServiceRunID + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "project_uuid": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "node_id": "12e0c8b2-bad6-40fb-9948-8dec4f65d4d9", + "state": "SUCCESS", + "progress": 0.0, + "image": { + "name": "simcore/services/comp/ti-solutions-optimizer", + "tag": "1.0.19", + "node_requirements": {"CPU": 8.0, "RAM": 25769803776}, + }, + "started_at": "2023-01-11 13:11:47.293595", + "ended_at": "2023-01-11 13:11:47.293595", + "log_download_link": "https://example.com/logs", + "service_run_id": "comp_1_12e0c8b2-bad6-40fb-9948-8dec4f65d4d9_1", + } + ] + } + ) + + +class ComputationTaskRpcGetPage(NamedTuple): + items: list[ComputationTaskRpcGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/computations.py b/packages/models-library/src/models_library/api_schemas_directorv2/computations.py new file mode 100644 index 00000000000..3691fdbf6ee --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/computations.py @@ -0,0 +1,116 @@ +from typing import Annotated, Any, TypeAlias + +from pydantic import ( + AnyHttpUrl, + AnyUrl, + BaseModel, + ConfigDict, + Field, + ValidationInfo, + field_validator, +) + +from ..basic_types import IDStr +from ..projects import ProjectID +from ..projects_nodes_io import NodeID +from ..projects_pipeline import ComputationTask +from ..users import UserID +from ..wallets import WalletInfo + + +class ComputationGet(ComputationTask): + url: Annotated[ + AnyHttpUrl, Field(description="the link where to get the status of the task") + ] + stop_url: Annotated[ + AnyHttpUrl | None, Field(description="the link where to stop the task") + ] = None + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + x | {"url": "https://url.local"} + for x in ComputationTask.model_json_schema()["examples"] + ] + } + ) + + +class ComputationCreate(BaseModel): + user_id: UserID + project_id: ProjectID + start_pipeline: Annotated[ + bool | None, + Field(description="if True the computation pipeline will start right away"), + ] = False + product_name: Annotated[str, Field()] + product_api_base_url: Annotated[ + AnyHttpUrl, + Field(description="Base url of the product"), + ] + subgraph: Annotated[ + list[NodeID] | None, + Field( + description="An optional set of nodes that must be executed, if empty the whole pipeline is executed" + ), + ] = None + force_restart: Annotated[ + bool | None, + Field(description="if True will force re-running all dependent nodes"), + ] = False + simcore_user_agent: str = "" + use_on_demand_clusters: Annotated[ + bool, + Field( + description="if True, a cluster will be created as necessary (wallet_id cannot be None)", + validate_default=True, + ), + ] = False + wallet_info: Annotated[ + WalletInfo | None, + Field( + description="contains information about the wallet used to bill the running service" + ), + ] = None + + @field_validator("product_name") + @classmethod + def _ensure_product_name_defined_if_computation_starts( + cls, v, info: ValidationInfo + ): + if info.data.get("start_pipeline") and v is None: + msg = "product_name must be set if computation shall start!" + raise ValueError(msg) + return v + + +class ComputationStop(BaseModel): + user_id: UserID + + +class ComputationDelete(ComputationStop): + force: Annotated[ + bool | None, + Field( + description="if True then the pipeline will be removed even if it is running" + ), + ] = False + + +class TaskLogFileGet(BaseModel): + task_id: NodeID + download_link: Annotated[ + AnyUrl | None, + Field(description="Presigned link for log file or None if still not available"), + ] = None + + +class TasksSelection(BaseModel): + nodes_ids: list[NodeID] + + +OutputName: TypeAlias = IDStr + + +class TasksOutputs(BaseModel): + nodes_outputs: dict[NodeID, dict[OutputName, Any]] diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services.py b/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services.py new file mode 100644 index 00000000000..565580b84bd --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services.py @@ -0,0 +1,106 @@ +from typing import Annotated, TypeAlias + +from pydantic import AnyHttpUrl, BaseModel, BeforeValidator, ByteSize, ConfigDict, Field +from pydantic.config import JsonDict + +from ..resource_tracker import HardwareInfo, PricingInfo +from ..services import ServicePortKey +from ..services_resources import ServiceResourcesDict, ServiceResourcesDictHelpers +from ..wallets import WalletInfo +from .dynamic_services_service import RunningDynamicServiceDetails, ServiceDetails + + +class RetrieveDataIn(BaseModel): + port_keys: list[ServicePortKey] = Field( + ..., description="The port keys to retrieve data from" + ) + + +class RetrieveDataOut(BaseModel): + size_bytes: ByteSize = Field( + ..., description="The amount of data transferred by the retrieve call" + ) + + +class RetrieveDataOutEnveloped(BaseModel): + data: RetrieveDataOut + + @classmethod + def from_transferred_bytes( + cls, transferred_bytes: int + ) -> "RetrieveDataOutEnveloped": + return cls(data=RetrieveDataOut(size_bytes=ByteSize(transferred_bytes))) + + model_config = ConfigDict( + json_schema_extra={"examples": [{"data": {"size_bytes": 42}}]} + ) + + +class DynamicServiceCreate(ServiceDetails): + service_resources: ServiceResourcesDict + + product_name: Annotated[str, Field(..., description="Current product name")] + product_api_base_url: Annotated[ + str, + BeforeValidator(lambda v: f"{AnyHttpUrl(v)}"), + Field(..., description="Current product API base URL"), + ] + can_save: Annotated[ + bool, Field(..., description="the service data must be saved when closing") + ] + wallet_info: Annotated[ + WalletInfo | None, + Field( + default=None, + description="contains information about the wallet used to bill the running service", + ), + ] + pricing_info: Annotated[ + PricingInfo | None, + Field( + default=None, + description="contains pricing information (ex. pricing plan and unit ids)", + ), + ] + hardware_info: Annotated[ + HardwareInfo | None, + Field( + default=None, + description="contains hardware information (ex. aws_ec2_instances)", + ), + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "key": "simcore/services/dynamic/3dviewer", + "version": "2.4.5", + "user_id": 234, + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "product_name": "osparc", + "product_api_base_url": "https://api.local/", + "can_save": True, + "service_resources": ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "wallet_info": WalletInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "pricing_info": PricingInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "hardware_info": HardwareInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + } + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +DynamicServiceGet: TypeAlias = RunningDynamicServiceDetails + + +class GetProjectInactivityResponse(BaseModel): + is_inactive: bool + + model_config = ConfigDict(json_schema_extra={"example": {"is_inactive": "false"}}) diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services_service.py b/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services_service.py new file mode 100644 index 00000000000..769e1fc9419 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/dynamic_services_service.py @@ -0,0 +1,129 @@ +from functools import cached_property +from pathlib import Path + +from pydantic import BaseModel, ConfigDict, Field + +from ..basic_types import PortInt +from ..projects import ProjectID +from ..projects_nodes_io import NodeID +from ..services import DynamicServiceKey, ServiceVersion +from ..services_enums import ServiceBootType, ServiceState +from ..users import UserID + + +class CommonServiceDetails(BaseModel): + key: DynamicServiceKey = Field( + ..., + description="distinctive name for the node based on the docker registry path", + examples=[ + "simcore/services/dynamic/3dviewer", + ], + alias="service_key", + ) + version: ServiceVersion = Field( + ..., + description="semantic version number of the node", + examples=["1.0.0", "0.0.1"], + alias="service_version", + ) + + user_id: UserID + project_id: ProjectID + node_uuid: NodeID = Field(..., alias="service_uuid") + + +class ServiceDetails(CommonServiceDetails): + basepath: Path | None = Field( + default=None, + description="predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint.", + alias="service_basepath", + ) + model_config = ConfigDict( + populate_by_name=True, + json_schema_extra={ + "example": { + "key": "simcore/services/dynamic/3dviewer", + "version": "2.4.5", + "user_id": 234, + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + } + }, + ) + + +class RunningDynamicServiceDetails(ServiceDetails): + boot_type: ServiceBootType = Field( + default=ServiceBootType.V0, + description=( + "Describes how the dynamic services was started (legacy=V0, modern=V2)." + "Since legacy services do not have this label it defaults to V0." + ), + ) + + host: str = Field( + ..., description="the service swarm internal host name", alias="service_host" + ) + internal_port: PortInt = Field( + ..., description="the service swarm internal port", alias="service_port" + ) + published_port: PortInt | None = Field( + default=None, + description="the service swarm published port if any", + deprecated=True, + ) + + entry_point: str | None = Field( + default=None, + description="if empty the service entrypoint is on the root endpoint.", + deprecated=True, + ) + state: ServiceState = Field( + ..., description="service current state", alias="service_state" + ) + message: str | None = Field( + default=None, + description="additional information related to service state", + alias="service_message", + ) + + model_config = ConfigDict( + ignored_types=(cached_property,), + json_schema_extra={ + "examples": [ + # legacy + { + "service_key": "simcore/services/dynamic/raw-graphs", + "service_version": "2.10.6", + "user_id": 1, + "project_id": "32fb4eb6-ab30-11ef-9ee4-0242ac140008", + "service_uuid": "0cd049ba-cd6b-4a12-b416-a50c9bc8e7bb", + "service_basepath": "/x/0cd049ba-cd6b-4a12-b416-a50c9bc8e7bb", + "service_host": "raw-graphs_0cd049ba-cd6b-4a12-b416-a50c9bc8e7bb", + "service_port": 4000, + "published_port": None, + "entry_point": "", + "service_state": "running", + "service_message": "", + }, + # new style + { + "service_key": "simcore/services/dynamic/jupyter-math", + "service_version": "3.0.3", + "user_id": 1, + "project_id": "32fb4eb6-ab30-11ef-9ee4-0242ac140008", + "service_uuid": "6e3cad3a-eb64-43de-b476-9ac3c413fd9c", + "boot_type": "V2", + "service_host": "dy-sidecar_6e3cad3a-eb64-43de-b476-9ac3c413fd9c", + "service_port": 8888, + "service_state": "running", + "service_message": "", + }, + ] + }, + ) + + @cached_property + def legacy_service_url(self) -> str: + return f"http://{self.host}:{self.internal_port}{self.basepath}" # NOSONAR diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/errors.py b/packages/models-library/src/models_library/api_schemas_directorv2/errors.py new file mode 100644 index 00000000000..ecf33eefd14 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/errors.py @@ -0,0 +1,15 @@ +from pydantic import BaseModel, Field + + +class Error(BaseModel): + code: str | None = Field(None, description="Server Exception") + + +class ErrorType(BaseModel): + message: str = Field(..., description="Error message") + errors: list[Error] | None = None + status: int = Field(..., description="Error code") + + +class ErrorEnveloped(BaseModel): + error: ErrorType diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/notifications.py b/packages/models-library/src/models_library/api_schemas_directorv2/notifications.py new file mode 100644 index 00000000000..b0bbe171912 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/notifications.py @@ -0,0 +1,8 @@ +from models_library.projects_nodes_io import NodeID +from models_library.wallets import WalletID +from pydantic import BaseModel + + +class ServiceNoMoreCredits(BaseModel): + node_id: NodeID + wallet_id: WalletID diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/services.py b/packages/models-library/src/models_library/api_schemas_directorv2/services.py new file mode 100644 index 00000000000..61a661720a7 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/services.py @@ -0,0 +1,111 @@ +from typing import Final + +from pydantic import BaseModel, ConfigDict, Field, JsonValue, field_validator +from pydantic.config import JsonDict +from pydantic.types import ByteSize, NonNegativeInt + +from ..service_settings_labels import ContainerSpec + + +class ServiceBuildDetails(BaseModel): + build_date: str + vcs_ref: str + vcs_url: str + + +class NodeRequirements(BaseModel): + cpu: float = Field( + ..., + description="defines the required (maximum) CPU shares for running the services", + alias="CPU", + gt=0.0, + ) + gpu: NonNegativeInt | None = Field( + None, + description="defines the required (maximum) GPU for running the services", + alias="GPU", + validate_default=True, + ) + ram: ByteSize = Field( + ..., + description="defines the required (maximum) amount of RAM for running the services", + alias="RAM", + ) + vram: ByteSize | None = Field( + default=None, + description="defines the required (maximum) amount of VRAM for running the services", + alias="VRAM", + validate_default=True, + ) + + @field_validator("vram", "gpu", mode="before") + @classmethod + def check_0_is_none(cls, v): + if v == 0: + v = None + return v + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"CPU": 1.0, "RAM": 4194304}, + {"CPU": 1.0, "GPU": 1, "RAM": 4194304}, + { + "CPU": 1.0, + "RAM": 4194304, + }, + ] + } + ) + + +class ServiceExtras(BaseModel): + node_requirements: NodeRequirements + service_build_details: ServiceBuildDetails | None = None + container_spec: ContainerSpec | None = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + + node_requirements_examples = NodeRequirements.model_json_schema()["examples"] + + examples: list[JsonValue] = [ + {"node_requirements": node_example} + for node_example in node_requirements_examples + ] + examples += [ + { + "node_requirements": node_example, + "service_build_details": { + "build_date": "2021-08-13T12:56:28Z", + "vcs_ref": "8251ade", + "vcs_url": "git@github.com:ITISFoundation/osparc-simcore.git", + }, + } + for node_example in node_requirements_examples + ] + examples += [ + { + "node_requirements": node_example, + "service_build_details": { + "build_date": "2021-08-13T12:56:28Z", + "vcs_ref": "8251ade", + "vcs_url": "git@github.com:ITISFoundation/osparc-simcore.git", + }, + "container_spec": {"Command": ["run", "subcommand"]}, + } + for node_example in node_requirements_examples + ] + + schema.update({"examples": examples}) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME: Final[NonNegativeInt] = 89 + + +DYNAMIC_SIDECAR_SERVICE_PREFIX: Final[str] = "dy-sidecar" +DYNAMIC_PROXY_SERVICE_PREFIX: Final[str] = "dy-proxy" diff --git a/packages/models-library/src/models_library/api_schemas_directorv2/socketio.py b/packages/models-library/src/models_library/api_schemas_directorv2/socketio.py new file mode 100644 index 00000000000..b368d7606a3 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_directorv2/socketio.py @@ -0,0 +1,3 @@ +from typing import Final + +SOCKET_IO_SERVICE_NO_MORE_CREDITS_EVENT: Final[str] = "serviceNoMoreCredits" diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/__init__.py b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/__init__.py new file mode 100644 index 00000000000..70a4f1247ba --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +DYNAMIC_SCHEDULER_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("dynamic-scheduler") diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/dynamic_services.py b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/dynamic_services.py new file mode 100644 index 00000000000..c8324f0bca0 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/dynamic_services.py @@ -0,0 +1,61 @@ +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceCreate +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.resource_tracker import HardwareInfo, PricingInfo +from models_library.services_resources import ServiceResourcesDictHelpers +from models_library.users import UserID +from models_library.wallets import WalletInfo +from pydantic import BaseModel, ConfigDict +from pydantic.config import JsonDict + + +class DynamicServiceStart(DynamicServiceCreate): + request_dns: str + request_scheme: str + simcore_user_agent: str + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "product_name": "osparc", + "product_api_base_url": "https://api.local", + "can_save": True, + "user_id": 234, + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + "service_key": "simcore/services/dynamic/3dviewer", + "service_version": "2.4.5", + "service_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "request_dns": "some.local", + "request_scheme": "http", + "simcore_user_agent": "", + "service_resources": ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "wallet_info": WalletInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "pricing_info": PricingInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "hardware_info": HardwareInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + } + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + +class DynamicServiceStop(BaseModel): + user_id: UserID + project_id: ProjectID + node_id: NodeID + simcore_user_agent: str + save_state: bool + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "user_id": 234, + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + "node_id": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "simcore_user_agent": "", + "save_state": True, + } + } + ) diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/socketio.py b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/socketio.py new file mode 100644 index 00000000000..89a493a56cc --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_scheduler/socketio.py @@ -0,0 +1,3 @@ +from typing import Final + +SOCKET_IO_SERVICE_STATUS_EVENT: Final[str] = "serviceStatus" diff --git a/services/catalog/src/simcore_service_catalog/db/__init__.py b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/db/__init__.py rename to packages/models-library/src/models_library/api_schemas_dynamic_sidecar/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/containers.py b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/containers.py new file mode 100644 index 00000000000..74569078af4 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/containers.py @@ -0,0 +1,20 @@ +from typing import TypeAlias + +from pydantic import BaseModel, ConfigDict, NonNegativeFloat + + +class ActivityInfo(BaseModel): + seconds_inactive: NonNegativeFloat + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"seconds_inactive": 0}, + {"seconds_inactive": 100}, + ] + } + ) + + +ActivityInfoOrNone: TypeAlias = ActivityInfo | None + +DockerComposeYamlStr: TypeAlias = str diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/ports.py b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/ports.py new file mode 100644 index 00000000000..01214a39537 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/ports.py @@ -0,0 +1,35 @@ +from enum import auto + +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.utils.enums import StrAutoEnum +from pydantic import BaseModel + + +class OutputStatus(StrAutoEnum): + UPLOAD_STARTED = auto() + UPLOAD_WAS_ABORTED = auto() + UPLOAD_FINISHED_SUCCESSFULLY = auto() + UPLOAD_FINISHED_WITH_ERROR = auto() + + +class InputStatus(StrAutoEnum): + DOWNLOAD_STARTED = auto() + DOWNLOAD_WAS_ABORTED = auto() + DOWNLOAD_FINISHED_SUCCESSFULLY = auto() + DOWNLOAD_FINISHED_WITH_ERROR = auto() + + +class _PortStatusCommon(BaseModel): + project_id: ProjectID + node_id: NodeID + port_key: ServicePortKey + + +class OutputPortStatus(_PortStatusCommon): + status: OutputStatus + + +class InputPortSatus(_PortStatusCommon): + status: InputStatus diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/socketio.py b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/socketio.py new file mode 100644 index 00000000000..93e34a1682b --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/socketio.py @@ -0,0 +1,5 @@ +from typing import Final + +SOCKET_IO_SERVICE_DISK_USAGE_EVENT: Final[str] = "serviceDiskUsage" +SOCKET_IO_STATE_OUTPUT_PORTS_EVENT: Final[str] = "stateOutputPorts" +SOCKET_IO_STATE_INPUT_PORTS_EVENT: Final[str] = "stateInputPorts" diff --git a/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/telemetry.py b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/telemetry.py new file mode 100644 index 00000000000..25b71df9e0f --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_dynamic_sidecar/telemetry.py @@ -0,0 +1,100 @@ +from abc import abstractmethod +from enum import auto +from typing import Any, Final, Protocol + +from pydantic import ( + BaseModel, + ByteSize, + Field, + NonNegativeFloat, + NonNegativeInt, + model_validator, +) + +from ..projects_nodes_io import NodeID +from ..utils.enums import StrAutoEnum + +_EPSILON: Final[NonNegativeFloat] = 1e-16 + + +class MountPathCategory(StrAutoEnum): + HOST = auto() + STATES_VOLUMES = auto() + INPUTS_VOLUMES = auto() + OUTPUTS_VOLUMES = auto() + + +class SDiskUsageProtocol(Protocol): + @property + @abstractmethod + def total(self) -> int: + ... + + @property + @abstractmethod + def used(self) -> int: + ... + + @property + @abstractmethod + def free(self) -> int: + ... + + @property + @abstractmethod + def percent(self) -> float: + ... + + +def _get_percent(used: float, total: float) -> float: + return round(used * 100 / (total + _EPSILON), 2) + + +class DiskUsage(BaseModel): + used: ByteSize = Field(description="used space") + free: ByteSize = Field(description="remaining space") + + total: ByteSize = Field(description="total space = free + used") + used_percent: float = Field( + ge=0.00, + le=100.00, + description="Percent of used space relative to the total space", + ) + + @model_validator(mode="before") + @classmethod + def _check_total(cls, values: dict[str, Any]) -> dict[str, Any]: + total = values["total"] + free = values["free"] + used = values["used"] + if total != free + used: + msg = f"{total=} is different than the sum of {free=}+{used=} => sum={free+used}" + raise ValueError(msg) + return values + + @classmethod + def from_efs_guardian( + cls, used: NonNegativeInt, total: NonNegativeInt + ) -> "DiskUsage": + free = total - used + return cls( + used=ByteSize(used), + free=ByteSize(free), + total=ByteSize(total), + used_percent=_get_percent(used, total), + ) + + @classmethod + def from_ps_util_disk_usage( + cls, ps_util_disk_usage: SDiskUsageProtocol + ) -> "DiskUsage": + total = ps_util_disk_usage.free + ps_util_disk_usage.used + return cls.from_efs_guardian(ps_util_disk_usage.used, total) + + def __hash__(self): + return hash((self.used, self.free, self.total, self.used_percent)) + + +class ServiceDiskUsage(BaseModel): + node_id: NodeID + usage: dict[MountPathCategory, DiskUsage] diff --git a/packages/models-library/src/models_library/api_schemas_efs_guardian/__init__.py b/packages/models-library/src/models_library/api_schemas_efs_guardian/__init__.py new file mode 100644 index 00000000000..f47a9a3f8d3 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_efs_guardian/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +EFS_GUARDIAN_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("efs-guardian") diff --git a/services/catalog/src/simcore_service_catalog/models/domain/__init__.py b/packages/models-library/src/models_library/api_schemas_invitations/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/models/domain/__init__.py rename to packages/models-library/src/models_library/api_schemas_invitations/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_invitations/invitations.py b/packages/models-library/src/models_library/api_schemas_invitations/invitations.py new file mode 100644 index 00000000000..8c5fd85d2e8 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_invitations/invitations.py @@ -0,0 +1,49 @@ +from typing import Any + +from pydantic import BaseModel, ConfigDict, Field, HttpUrl + +from ..invitations import InvitationContent, InvitationInputs +from ..products import ProductName + +_INPUTS_EXAMPLE: dict[str, Any] = { + "issuer": "issuerid", + "guest": "invitedguest@company.com", + "trial_account_days": 2, +} + + +class ApiInvitationInputs(InvitationInputs): + model_config = ConfigDict(json_schema_extra={"example": _INPUTS_EXAMPLE}) + + +class ApiInvitationContent(InvitationContent): + + product: ProductName = Field( + ..., description="This invitations can only be used for this product." + ) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + **_INPUTS_EXAMPLE, + "product": "osparc", + "created": "2023-01-11 13:11:47.293595", + } + } + ) + + +class ApiInvitationContentAndLink(ApiInvitationContent): + invitation_url: HttpUrl = Field(..., description="Invitation link") + model_config = ConfigDict( + json_schema_extra={ + "example": { + **ApiInvitationContent.model_config["json_schema_extra"]["example"], # type: ignore[index,dict-item] + "invitation_url": "https://foo.com/#/registration?invitation=1234", + } + } + ) + + +class ApiEncryptedInvitation(BaseModel): + invitation_url: HttpUrl = Field(..., description="Invitation link") diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/__init__.py b/packages/models-library/src/models_library/api_schemas_long_running_tasks/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/models/schemas/__init__.py rename to packages/models-library/src/models_library/api_schemas_long_running_tasks/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_long_running_tasks/base.py b/packages/models-library/src/models_library/api_schemas_long_running_tasks/base.py new file mode 100644 index 00000000000..a3bb93813dc --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_long_running_tasks/base.py @@ -0,0 +1,50 @@ +import logging +from typing import Annotated, TypeAlias + +from pydantic import BaseModel, Field, field_validator, validate_call + +_logger = logging.getLogger(__name__) + +TaskId = str + +ProgressMessage: TypeAlias = str + +ProgressPercent: TypeAlias = Annotated[float, Field(ge=0.0, le=1.0)] + + +class TaskProgress(BaseModel): + """ + Helps the user to keep track of the progress. Progress is expected to be + defined as a float bound between 0.0 and 1.0 + """ + + task_id: TaskId | None = Field(default=None) + message: ProgressMessage = Field(default="") + percent: ProgressPercent = Field(default=0.0) + + @validate_call + def update( + self, + *, + message: ProgressMessage | None = None, + percent: ProgressPercent | None = None, + ) -> None: + """`percent` must be between 0.0 and 1.0 otherwise ValueError is raised""" + if message: + self.message = message + if percent: + if not (0.0 <= percent <= 1.0): + msg = f"percent={percent!r} must be in range [0.0, 1.0]" + raise ValueError(msg) + self.percent = percent + + _logger.debug("Progress update: %s", f"{self}") + + @classmethod + def create(cls, task_id: TaskId | None = None) -> "TaskProgress": + return cls(task_id=task_id) + + @field_validator("percent") + @classmethod + def round_value_to_3_digit(cls, v): + return round(v, 3) diff --git a/packages/models-library/src/models_library/api_schemas_long_running_tasks/tasks.py b/packages/models-library/src/models_library/api_schemas_long_running_tasks/tasks.py new file mode 100644 index 00000000000..acd73831b22 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_long_running_tasks/tasks.py @@ -0,0 +1,31 @@ +import urllib.parse +from datetime import datetime +from typing import Any + +from pydantic import BaseModel, field_validator + +from .base import TaskId, TaskProgress + + +class TaskStatus(BaseModel): + task_progress: TaskProgress + done: bool + started: datetime | None + + +class TaskResult(BaseModel): + result: Any | None + error: Any | None + + +class TaskGet(BaseModel): + task_id: TaskId + task_name: str + status_href: str + result_href: str + abort_href: str + + @field_validator("task_name") + @classmethod + def unquote_str(cls, v) -> str: + return urllib.parse.unquote(v) diff --git a/packages/models-library/src/models_library/api_schemas_notifications/__init__.py b/packages/models-library/src/models_library/api_schemas_notifications/__init__.py new file mode 100644 index 00000000000..dfa868dc522 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_notifications/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +NOTIFICATIONS_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("notifications") diff --git a/packages/models-library/src/models_library/api_schemas_payments/__init__.py b/packages/models-library/src/models_library/api_schemas_payments/__init__.py new file mode 100644 index 00000000000..73928d6ccd7 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_payments/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +PAYMENTS_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter(RPCNamespace).validate_python( + "payments" +) diff --git a/packages/models-library/src/models_library/api_schemas_payments/errors.py b/packages/models-library/src/models_library/api_schemas_payments/errors.py new file mode 100644 index 00000000000..362482772f7 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_payments/errors.py @@ -0,0 +1,65 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class _BaseRpcApiError(OsparcErrorMixin, ValueError): + @classmethod + def get_full_class_name(cls) -> str: + # Can be used as unique code identifier + return f"{cls.__module__}.{cls.__name__}" + + +# +# service-wide errors +# + + +class PaymentServiceUnavailableError(_BaseRpcApiError): + msg_template = "Payments are currently unavailable: {human_readable_detail}" + + +# +# payment transactions errors +# + + +class PaymentsError(_BaseRpcApiError): + msg_template = "Error in payment transaction '{payment_id}'" + + +class PaymentNotFoundError(PaymentsError): + msg_template = "Payment transaction '{payment_id}' was not found" + + +class PaymentAlreadyExistsError(PaymentsError): + msg_template = "Payment transaction '{payment_id}' was already initialized" + + +class PaymentAlreadyAckedError(PaymentsError): + msg_template = "Payment transaction '{payment_id}' cannot be changes since it was already closed." + + +# +# payment-methods errors +# + + +class PaymentsMethodsError(_BaseRpcApiError): + ... + + +class PaymentMethodNotFoundError(PaymentsMethodsError): + msg_template = "The specified payment method '{payment_method_id}' does not exist" + + +class PaymentMethodAlreadyAckedError(PaymentsMethodsError): + msg_template = ( + "Cannot create payment-method '{payment_method_id}' since it was already closed" + ) + + +class PaymentMethodUniqueViolationError(PaymentsMethodsError): + msg_template = "Payment method '{payment_method_id}' aready exists" + + +class InvalidPaymentMethodError(PaymentsMethodsError): + msg_template = "Invalid payment method '{payment_method_id}'" diff --git a/packages/models-library/src/models_library/api_schemas_payments/socketio.py b/packages/models-library/src/models_library/api_schemas_payments/socketio.py new file mode 100644 index 00000000000..73753f34e46 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_payments/socketio.py @@ -0,0 +1,4 @@ +from typing import Final + +SOCKET_IO_PAYMENT_COMPLETED_EVENT: Final[str] = "paymentCompleted" +SOCKET_IO_PAYMENT_METHOD_ACKED_EVENT: Final[str] = "paymentMethodAcknowledged" diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/__init__.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/__init__.py new file mode 100644 index 00000000000..d32b474edf6 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +RESOURCE_USAGE_TRACKER_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("resource-usage-tracker") diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/credit_transactions.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/credit_transactions.py new file mode 100644 index 00000000000..db235ce8094 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/credit_transactions.py @@ -0,0 +1,36 @@ +from datetime import datetime +from decimal import Decimal +from typing import Annotated + +from pydantic import BaseModel, BeforeValidator, PlainSerializer + +from ..products import ProductName +from ..resource_tracker import CreditTransactionId +from ..users import UserID +from ..wallets import WalletID + + +class WalletTotalCredits(BaseModel): + wallet_id: WalletID + available_osparc_credits: Annotated[ + Decimal, + BeforeValidator(lambda x: round(x, 2)), + PlainSerializer(float, return_type=float, when_used="json"), + ] + + +class CreditTransactionCreateBody(BaseModel): + product_name: ProductName + wallet_id: WalletID + wallet_name: str + user_id: UserID + user_email: str + osparc_credits: Decimal + payment_transaction_id: str + created_at: datetime + + +class CreditTransactionCreated(BaseModel): + """Response Create Credit Transaction V1 Credit Transactions Post""" + + credit_transaction_id: CreditTransactionId diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_checkouts.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_checkouts.py new file mode 100644 index 00000000000..8257aa35186 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_checkouts.py @@ -0,0 +1,53 @@ +from datetime import datetime +from typing import NamedTuple + +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict, PositiveInt + + +class LicensedItemCheckoutGet(BaseModel): + licensed_item_checkout_id: LicensedItemCheckoutID + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + user_id: UserID + user_email: str + product_name: ProductName + service_run_id: ServiceRunID + started_at: datetime + stopped_at: datetime | None + num_of_seats: int + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "licensed_item_checkout_id": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "licensed_item_id": "303942ef-6d31-4ba8-afbe-dbb1fce2a953", + "key": "Duke", + "version": "1.0.0", + "wallet_id": 1, + "user_id": 1, + "user_email": "test@test.com", + "product_name": "osparc", + "service_run_id": "run_1", + "started_at": "2023-01-11 13:11:47.293595", + "stopped_at": "2023-01-11 13:11:47.293595", + "num_of_seats": 1, + } + ] + } + ) + + +class LicensedItemsCheckoutsPage(NamedTuple): + items: list[LicensedItemCheckoutGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_purchases.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_purchases.py new file mode 100644 index 00000000000..e9ee9e4ae67 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/licensed_items_purchases.py @@ -0,0 +1,62 @@ +from datetime import datetime +from decimal import Decimal +from typing import NamedTuple + +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker import PricingUnitCostId +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, +) +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict, PositiveInt + + +class LicensedItemPurchaseGet(BaseModel): + licensed_item_purchase_id: LicensedItemPurchaseID + product_name: ProductName + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + wallet_name: str + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + start_at: datetime + expire_at: datetime + num_of_seats: int + purchased_by_user: UserID + user_email: str + purchased_at: datetime + modified: datetime + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "licensed_item_purchase_id": "beb16d18-d57d-44aa-a638-9727fa4a72ef", + "product_name": "osparc", + "licensed_item_id": "303942ef-6d31-4ba8-afbe-dbb1fce2a953", + "key": "Duke", + "version": "1.0.0", + "wallet_id": 1, + "wallet_name": "My Wallet", + "pricing_unit_cost_id": 1, + "pricing_unit_cost": 10, + "start_at": "2023-01-11 13:11:47.293595", + "expire_at": "2023-01-11 13:11:47.293595", + "num_of_seats": 1, + "purchased_by_user": 1, + "user_email": "test@test.com", + "purchased_at": "2023-01-11 13:11:47.293595", + "modified": "2023-01-11 13:11:47.293595", + } + ] + } + ) + + +class LicensedItemsPurchasesPage(NamedTuple): + items: list[LicensedItemPurchaseGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/pricing_plans.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/pricing_plans.py new file mode 100644 index 00000000000..08696b5b61c --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/pricing_plans.py @@ -0,0 +1,168 @@ +from datetime import datetime +from decimal import Decimal +from typing import NamedTuple + +from pydantic import BaseModel, ConfigDict, PositiveInt, model_validator + +from ..resource_tracker import ( + HardwareInfo, + PricingPlanClassification, + PricingPlanId, + PricingUnitCostId, + PricingUnitId, + UnitExtraInfoLicense, + UnitExtraInfoTier, +) +from ..services_types import ServiceKey, ServiceVersion + + +class RutPricingUnitGet(BaseModel): + pricing_unit_id: PricingUnitId + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + current_cost_per_unit: Decimal + current_cost_per_unit_id: PricingUnitCostId + default: bool + specific_info: HardwareInfo + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_unit_id": 1, + "unit_name": "SMALL", + "unit_extra_info": UnitExtraInfoTier.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "current_cost_per_unit": 5.7, + "current_cost_per_unit_id": 1, + "default": True, + "specific_info": HardwareInfo.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + }, + { + "pricing_unit_id": 1, + "unit_name": "SMALL", + "unit_extra_info": UnitExtraInfoTier.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "current_cost_per_unit": 5.7, + "current_cost_per_unit_id": 1, + "default": True, + "specific_info": HardwareInfo.model_config["json_schema_extra"]["examples"][1], # type: ignore [index] + }, + { + "pricing_unit_id": 2, + "unit_name": "5 seats", + "unit_extra_info": UnitExtraInfoLicense.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "current_cost_per_unit": 10.5, + "current_cost_per_unit_id": 2, + "default": False, + "specific_info": HardwareInfo.model_config["json_schema_extra"]["examples"][1], # type: ignore [index] + }, + ] + } + ) + + +class RutPricingPlanGet(BaseModel): + pricing_plan_id: PricingPlanId + display_name: str + description: str + classification: PricingPlanClassification + created_at: datetime + pricing_plan_key: str + pricing_units: list[RutPricingUnitGet] | None + is_active: bool + + @model_validator(mode="after") + def ensure_classification_matches_extra_info(self): + """Enforce that all PricingUnitGet.unit_extra_info match the plan's classification.""" + if not self.pricing_units: + return self # No units to check + + for unit in self.pricing_units: + if ( + self.classification == PricingPlanClassification.TIER + and not isinstance(unit.unit_extra_info, UnitExtraInfoTier) + ): + error_message = ( + "For TIER classification, unit_extra_info must be UnitExtraInfoTier" + ) + raise ValueError(error_message) + if ( + self.classification == PricingPlanClassification.LICENSE + and not isinstance(unit.unit_extra_info, UnitExtraInfoLicense) + ): + error_message = "For LICENSE classification, unit_extra_info must be UnitExtraInfoLicense" + raise ValueError(error_message) + return self + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "display_name": "Pricing Plan for Sleeper", + "description": "Special Pricing Plan for Sleeper", + "classification": "TIER", + "created_at": "2023-01-11 13:11:47.293595", + "pricing_plan_key": "pricing-plan-sleeper", + "pricing_units": [ + RutPricingUnitGet.model_config["json_schema_extra"]["examples"][ # type: ignore [index] + 0 # type: ignore [index] + ] + ], + "is_active": True, + }, + { + "pricing_plan_id": 1, + "display_name": "Pricing Plan for Sleeper", + "description": "Special Pricing Plan for Sleeper", + "classification": "TIER", + "created_at": "2023-01-11 13:11:47.293595", + "pricing_plan_key": "pricing-plan-sleeper", + "pricing_units": [ + RutPricingUnitGet.model_config["json_schema_extra"]["examples"][ # type: ignore [index] + 1 # type: ignore [index] + ] + ], + "is_active": True, + }, + { + "pricing_plan_id": 2, + "display_name": "VIP model A", + "description": "Special Pricing Plan for VIP", + "classification": "LICENSE", + "created_at": "2023-01-11 13:11:47.293595", + "pricing_plan_key": "vip-model-a", + "pricing_units": [ + RutPricingUnitGet.model_config["json_schema_extra"]["examples"][ # type: ignore [index] + 2 # type: ignore [index] + ] + ], + "is_active": True, + }, + ] + } + ) + + +class RutPricingPlanPage(NamedTuple): + items: list[RutPricingPlanGet] + total: PositiveInt + + +class PricingPlanToServiceGet(BaseModel): + pricing_plan_id: PricingPlanId + service_key: ServiceKey + service_version: ServiceVersion + created: datetime + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "service_key": "simcore/services/comp/itis/sleeper", + "service_version": "2.0.2", + "created": "2023-01-11 13:11:47.293595", + } + ] + } + ) diff --git a/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/service_runs.py b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/service_runs.py new file mode 100644 index 00000000000..e16ba7ce108 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_resource_usage_tracker/service_runs.py @@ -0,0 +1,53 @@ +from datetime import datetime +from decimal import Decimal +from typing import NamedTuple + +from pydantic import BaseModel, PositiveInt + +from ..projects import ProjectID +from ..projects_nodes_io import NodeID +from ..resource_tracker import CreditTransactionStatus, ServiceRunStatus +from ..services import ServiceKey, ServiceVersion +from ..services_types import ServiceRunID +from ..users import UserID +from ..wallets import WalletID + + +class ServiceRunGet(BaseModel): + service_run_id: ServiceRunID + wallet_id: WalletID | None + wallet_name: str | None + user_id: UserID + user_email: str + project_id: ProjectID + project_name: str + project_tags: list[str] + node_id: NodeID + node_name: str + root_parent_project_id: ProjectID + root_parent_project_name: str + service_key: ServiceKey + service_version: ServiceVersion + service_type: str + started_at: datetime + stopped_at: datetime | None + service_run_status: ServiceRunStatus + # Cost in credits + credit_cost: Decimal | None + transaction_status: CreditTransactionStatus | None + + +class ServiceRunPage(NamedTuple): + items: list[ServiceRunGet] + total: PositiveInt + + +class OsparcCreditsAggregatedByServiceGet(BaseModel): + osparc_credits: Decimal + service_key: ServiceKey + running_time_in_hours: Decimal + + +class OsparcCreditsAggregatedUsagesPage(NamedTuple): + items: list[OsparcCreditsAggregatedByServiceGet] + total: PositiveInt diff --git a/services/catalog/src/simcore_service_catalog/services/__init__.py b/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/__init__.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/services/__init__.py rename to packages/models-library/src/models_library/api_schemas_rpc_async_jobs/__init__.py diff --git a/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/async_jobs.py b/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/async_jobs.py new file mode 100644 index 00000000000..3b19513ca36 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/async_jobs.py @@ -0,0 +1,40 @@ +from typing import Annotated, Any, TypeAlias +from uuid import UUID + +from pydantic import BaseModel, StringConstraints + +from ..products import ProductName +from ..progress_bar import ProgressReport +from ..users import UserID + +AsyncJobId: TypeAlias = UUID +AsyncJobName: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, min_length=1) +] + + +class AsyncJobStatus(BaseModel): + job_id: AsyncJobId + progress: ProgressReport + done: bool + + +class AsyncJobResult(BaseModel): + result: Any + + +class AsyncJobGet(BaseModel): + job_id: AsyncJobId + job_name: AsyncJobName + + +class AsyncJobAbort(BaseModel): + result: bool + job_id: AsyncJobId + + +class AsyncJobNameData(BaseModel): + """Data for controlling access to an async job""" + + product_name: ProductName + user_id: UserID diff --git a/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/exceptions.py b/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/exceptions.py new file mode 100644 index 00000000000..7399b6ff303 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_rpc_async_jobs/exceptions.py @@ -0,0 +1,31 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class BaseAsyncjobRpcError(OsparcErrorMixin, RuntimeError): + pass + + +class JobSchedulerError(BaseAsyncjobRpcError): + msg_template: str = "Celery exception: {exc}" + + +class JobMissingError(BaseAsyncjobRpcError): + msg_template: str = "Job {job_id} does not exist" + + +class JobStatusError(BaseAsyncjobRpcError): + msg_template: str = "Could not get status of job {job_id}" + + +class JobNotDoneError(BaseAsyncjobRpcError): + msg_template: str = "Job {job_id} not done" + + +class JobAbortedError(BaseAsyncjobRpcError): + msg_template: str = "Job {job_id} aborted" + + +class JobError(BaseAsyncjobRpcError): + msg_template: str = ( + "Job '{job_id}' failed with exception type '{exc_type}' and message: {exc_msg}" + ) diff --git a/packages/models-library/src/models_library/api_schemas_storage.py b/packages/models-library/src/models_library/api_schemas_storage.py deleted file mode 100644 index 81303b25c54..00000000000 --- a/packages/models-library/src/models_library/api_schemas_storage.py +++ /dev/null @@ -1,299 +0,0 @@ -""" - Models used in storage API: - - Specifically services/storage/src/simcore_service_storage/api/v0/openapi.yaml#/components/schemas - - IMPORTANT: DO NOT COUPLE these schemas until storage is refactored -""" - -import re -from datetime import datetime -from enum import Enum -from typing import Any, Optional, Pattern, Union -from uuid import UUID - -from models_library.projects_nodes_io import ( - LocationID, - LocationName, - NodeID, - SimcoreS3FileID, - StorageFileID, -) -from pydantic import ( - BaseModel, - ByteSize, - ConstrainedStr, - Extra, - Field, - PositiveInt, - root_validator, - validator, -) -from pydantic.networks import AnyUrl - -from .basic_regex import DATCORE_DATASET_NAME_RE, S3_BUCKET_NAME_RE -from .generics import ListModel - -ETag = str - - -class S3BucketName(ConstrainedStr): - regex: Optional[Pattern[str]] = re.compile(S3_BUCKET_NAME_RE) - - -class DatCoreDatasetName(ConstrainedStr): - regex: Optional[Pattern[str]] = re.compile(DATCORE_DATASET_NAME_RE) - - -# / -class HealthCheck(BaseModel): - name: Optional[str] - status: Optional[str] - api_version: Optional[str] - version: Optional[str] - - -# /locations - - -class FileLocation(BaseModel): - name: LocationName - id: LocationID - - class Config: - extra = Extra.forbid - schema_extra = { - "examples": [{"name": "simcore.s3", "id": 0}, {"name": "datcore", "id": 1}] - } - - -FileLocationArray = ListModel[FileLocation] - -# /locations/{location_id}/datasets - - -class DatasetMetaDataGet(BaseModel): - dataset_id: Union[UUID, DatCoreDatasetName] - display_name: str - - class Config: - extra = Extra.forbid - orm_mode = True - schema_extra = { - "examples": [ - # simcore dataset - { - "dataset_id": "74a84992-8c99-47de-b88a-311c068055ea", - "display_name": "api", - }, - { - "dataset_id": "1c46752c-b096-11ea-a3c4-02420a00392e", - "display_name": "Octave JupyterLab", - }, - { - "dataset_id": "2de04d1a-f346-11ea-9c22-02420a00085a", - "display_name": "Sleepers", - }, - # datcore datasets - { - "dataset_id": "N:dataset:be862eb8-861e-4b36-afc3-997329dd02bf", - "display_name": "simcore-testing-bucket", - }, - { - "dataset_id": "N:dataset:9ad8adb0-8ea2-4be6-bc45-ecbec7546393", - "display_name": "YetAnotherTest", - }, - ] - } - - -# /locations/{location_id}/files/metadata: -# /locations/{location_id}/files/{file_id}/metadata: -class FileMetaDataGet(BaseModel): - # Used by frontend - file_uuid: str = Field( - description="NOT a unique ID, like (api|uuid)/uuid/file_name or DATCORE folder structure", - ) - location_id: LocationID = Field(..., description="Storage location") - project_name: Optional[str] = Field( - default=None, - description="optional project name, used by frontend to display path", - ) - node_name: Optional[str] = Field( - default=None, - description="optional node name, used by frontend to display path", - ) - file_name: str = Field(..., description="Display name for a file") - file_id: StorageFileID = Field( - ..., - description="THIS IS the unique ID for the file. either (api|project_id)/node_id/file_name.ext for S3 and N:package:UUID for datcore", - ) - created_at: datetime - last_modified: datetime - file_size: ByteSize = Field(-1, description="File size in bytes (-1 means invalid)") - entity_tag: Optional[ETag] = Field( - default=None, - description="Entity tag (or ETag), represents a specific version of the file, None if invalid upload or datcore", - ) - is_soft_link: bool = Field( - False, - description="If true, this file is a soft link." - "i.e. is another entry with the same object_name", - ) - - @validator("location_id", pre=True) - @classmethod - def ensure_location_is_integer(cls, v): - if v is not None: - return int(v) - return v - - class Config: - extra = Extra.forbid - orm_mode = True - schema_extra = { - "examples": [ - # typical S3 entry - { - "created_at": "2020-06-17 12:28:55.705340", - "entity_tag": "8711cf258714b2de5498f5a5ef48cc7b", - "file_id": "1c46752c-b096-11ea-a3c4-02420a00392e/e603724d-4af1-52a1-b866-0d4b792f8c4a/work.zip", - "file_name": "work.zip", - "file_size": 17866343, - "file_uuid": "1c46752c-b096-11ea-a3c4-02420a00392e/e603724d-4af1-52a1-b866-0d4b792f8c4a/work.zip", - "is_soft_link": False, - "last_modified": "2020-06-22 13:48:13.398000+00:00", - "location_id": 0, - "node_name": "JupyterLab Octave", - "project_name": "Octave JupyterLab", - }, - # api entry (not soft link) - { - "created_at": "2020-06-17 12:28:55.705340", - "entity_tag": "8711cf258714b2de5498f5a5ef48cc7b", - "file_id": "api/7b6b4e3d-39ae-3559-8765-4f815a49984e/tmpf_qatpzx", - "file_name": "tmpf_qatpzx", - "file_size": 86, - "file_uuid": "api/7b6b4e3d-39ae-3559-8765-4f815a49984e/tmpf_qatpzx", - "is_soft_link": False, - "last_modified": "2020-06-22 13:48:13.398000+00:00", - "location_id": 0, - "node_name": None, - "project_name": None, - }, - # api entry (soft link) - { - "created_at": "2020-06-17 12:28:55.705340", - "entity_tag": "36aa3644f526655a6f557207e4fd25b8", - "file_id": "api/6f788ad9-0ad8-3d0d-9722-72f08c24a212/output_data.json", - "file_name": "output_data.json", - "file_size": 183, - "file_uuid": "api/6f788ad9-0ad8-3d0d-9722-72f08c24a212/output_data.json", - "is_soft_link": True, - "last_modified": "2020-06-22 13:48:13.398000+00:00", - "location_id": 0, - "node_name": None, - "project_name": None, - }, - # datcore entry - { - "created_at": "2020-05-28T15:48:34.386302+00:00", - "entity_tag": None, - "file_id": "N:package:ce145b61-7e4f-470b-a113-033653e86d3d", - "file_name": "templatetemplate.json", - "file_size": 238, - "file_uuid": "Kember Cardiac Nerve Model/templatetemplate.json", - "is_soft_link": False, - "last_modified": "2020-05-28T15:48:37.507387+00:00", - "location_id": 1, - "node_name": None, - "project_name": None, - }, - ] - } - - -class FileMetaDataArray(BaseModel): - __root__: list[FileMetaDataGet] = [] - - -# /locations/{location_id}/files/{file_id} - - -class LinkType(str, Enum): - PRESIGNED = "PRESIGNED" - S3 = "S3" - - -class PresignedLink(BaseModel): - link: AnyUrl - - -class FileUploadLinks(BaseModel): - abort_upload: AnyUrl - complete_upload: AnyUrl - - -class FileUploadSchema(BaseModel): - chunk_size: ByteSize - urls: list[AnyUrl] - links: FileUploadLinks - - -# /locations/{location_id}/files/{file_id}:complete -class UploadedPart(BaseModel): - number: PositiveInt - e_tag: ETag - - -class FileUploadCompletionBody(BaseModel): - parts: list[UploadedPart] - - -class FileUploadCompleteLinks(BaseModel): - state: AnyUrl - - -class FileUploadCompleteResponse(BaseModel): - links: FileUploadCompleteLinks - - -# /locations/{location_id}/files/{file_id}:complete/futures/{future_id} -class FileUploadCompleteState(Enum): - OK = "ok" - NOK = "nok" - - -class FileUploadCompleteFutureResponse(BaseModel): - state: FileUploadCompleteState - e_tag: Optional[ETag] = Field(default=None) - - -# /simcore-s3/ - - -class FoldersBody(BaseModel): - source: dict[str, Any] = Field(default_factory=dict) - destination: dict[str, Any] = Field(default_factory=dict) - nodes_map: dict[NodeID, NodeID] = Field(default_factory=dict) - - @root_validator() - @classmethod - def ensure_consistent_entries(cls, values): - source_node_keys = ( - NodeID(n) for n in values["source"].get("workbench", {}).keys() - ) - if set(source_node_keys) != set(values["nodes_map"].keys()): - raise ValueError("source project nodes do not fit with nodes_map entries") - destination_node_keys = ( - NodeID(n) for n in values["destination"].get("workbench", {}).keys() - ) - if set(destination_node_keys) != set(values["nodes_map"].values()): - raise ValueError( - "destination project nodes do not fit with nodes_map values" - ) - return values - - -class SoftCopyBody(BaseModel): - link_id: SimcoreS3FileID diff --git a/packages/models-library/src/models_library/api_schemas_storage/__init__.py b/packages/models-library/src/models_library/api_schemas_storage/__init__.py new file mode 100644 index 00000000000..912aa218e54 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_storage/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +STORAGE_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter(RPCNamespace).validate_python( + "storage" +) diff --git a/packages/models-library/src/models_library/api_schemas_storage/export_data_async_jobs.py b/packages/models-library/src/models_library/api_schemas_storage/export_data_async_jobs.py new file mode 100644 index 00000000000..8e482f49be4 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_storage/export_data_async_jobs.py @@ -0,0 +1,19 @@ +# pylint: disable=R6301 + +from common_library.errors_classes import OsparcErrorMixin + +### Exceptions + + +class StorageRpcBaseError(OsparcErrorMixin, RuntimeError): + pass + + +class InvalidFileIdentifierError(StorageRpcBaseError): + msg_template: str = "Could not find the file {file_id}" + + +class AccessRightError(StorageRpcBaseError): + msg_template: str = ( + "User {user_id} does not have access to file {file_id} with location {location_id}" + ) diff --git a/packages/models-library/src/models_library/api_schemas_storage/storage_schemas.py b/packages/models-library/src/models_library/api_schemas_storage/storage_schemas.py new file mode 100644 index 00000000000..000db167a10 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_storage/storage_schemas.py @@ -0,0 +1,492 @@ +""" +Models used in storage API: + +Specifically services/storage/src/simcore_service_storage/api/v0/openapi.yaml#/components/schemas + +IMPORTANT: DO NOT COUPLE these schemas until storage is refactored +""" + +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Annotated, Any, Final, Literal, Self, TypeAlias +from uuid import UUID + +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + PositiveInt, + RootModel, + StringConstraints, + field_validator, + model_validator, +) +from pydantic.config import JsonDict +from pydantic.networks import AnyUrl + +from ..basic_regex import ( + DATCORE_COLLECTION_NAME_RE, + DATCORE_DATASET_NAME_RE, + DATCORE_FILE_ID_RE, + S3_BUCKET_NAME_RE, +) +from ..basic_types import SHA256Str +from ..generics import ListModel +from ..projects import ProjectID +from ..projects_nodes_io import ( + LocationID, + LocationName, + NodeID, + SimcoreS3FileID, + StorageFileID, +) +from ..users import UserID + +ETag: TypeAlias = str + +S3BucketName: TypeAlias = Annotated[str, StringConstraints(pattern=S3_BUCKET_NAME_RE)] + +DatCoreDatasetName: TypeAlias = Annotated[ + str, StringConstraints(pattern=DATCORE_DATASET_NAME_RE) +] +DatCoreCollectionName: TypeAlias = Annotated[ + str, StringConstraints(pattern=DATCORE_COLLECTION_NAME_RE) +] +DatCorePackageName: TypeAlias = Annotated[ + str, StringConstraints(pattern=DATCORE_FILE_ID_RE) +] + + +# / +class HealthCheck(BaseModel): + name: str | None + status: str | None + api_version: str | None + version: str | None + + +# /locations +class FileLocation(BaseModel): + name: LocationName + id: LocationID + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + {"name": "simcore.s3", "id": 0}, + {"name": "datcore", "id": 1}, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", + json_schema_extra=_update_json_schema_extra, + ) + + +FileLocationArray: TypeAlias = ListModel[FileLocation] + + +# /locations/{location_id}/datasets +class DatasetMetaDataGet(BaseModel): + dataset_id: UUID | DatCoreDatasetName + display_name: str + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # simcore dataset + { + "dataset_id": "74a84992-8c99-47de-b88a-311c068055ea", + "display_name": "api", + }, + { + "dataset_id": "1c46752c-b096-11ea-a3c4-02420a00392e", + "display_name": "Octave JupyterLab", + }, + { + "dataset_id": "2de04d1a-f346-11ea-9c22-02420a00085a", + "display_name": "Sleepers", + }, + # datcore datasets + { + "dataset_id": "N:dataset:be862eb8-861e-4b36-afc3-997329dd02bf", + "display_name": "simcore-testing-bucket", + }, + { + "dataset_id": "N:dataset:9ad8adb0-8ea2-4be6-bc45-ecbec7546393", + "display_name": "YetAnotherTest", + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", + from_attributes=True, + json_schema_extra=_update_json_schema_extra, + ) + + +UNDEFINED_SIZE_TYPE: TypeAlias = Literal[-1] +UNDEFINED_SIZE: UNDEFINED_SIZE_TYPE = -1 + + +class FileMetaDataGetv010(BaseModel): + file_uuid: str + location_id: LocationID + location: LocationName + bucket_name: str + object_name: str + project_id: ProjectID | None + project_name: str | None + node_id: NodeID | None + node_name: str | None + file_name: str + user_id: UserID | None + user_name: str | None + + model_config = ConfigDict(extra="forbid", frozen=True) + + +class FileMetaDataGet(BaseModel): + # Used by frontend + file_uuid: str = Field( + description="NOT a unique ID, like (api|uuid)/uuid/file_name or DATCORE folder structure", + ) + location_id: LocationID = Field(..., description="Storage location") + project_name: str | None = Field( + default=None, + description="optional project name, used by frontend to display path", + ) + node_name: str | None = Field( + default=None, + description="optional node name, used by frontend to display path", + ) + file_name: str = Field(..., description="Display name for a file") + file_id: StorageFileID = Field( + ..., + description="THIS IS the unique ID for the file. either (api|project_id)/node_id/file_name.ext for S3 and N:package:UUID for datcore", + ) + created_at: datetime + last_modified: datetime + file_size: UNDEFINED_SIZE_TYPE | ByteSize = Field( + default=UNDEFINED_SIZE, description="File size in bytes (-1 means invalid)" + ) + entity_tag: ETag | None = Field( + default=None, + description="Entity tag (or ETag), represents a specific version of the file, None if invalid upload or datcore", + ) + is_soft_link: bool = Field( + default=False, + description="If true, this file is a soft link." + "i.e. is another entry with the same object_name", + ) + is_directory: bool = Field(default=False, description="if True this is a directory") + sha256_checksum: SHA256Str | None = Field( + default=None, + description="SHA256 message digest of the file content. Main purpose: cheap lookup.", + ) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # typical S3 entry + { + "created_at": "2020-06-17 12:28:55.705340", + "entity_tag": "8711cf258714b2de5498f5a5ef48cc7b", + "file_id": "1c46752c-b096-11ea-a3c4-02420a00392e/e603724d-4af1-52a1-b866-0d4b792f8c4a/work.zip", + "file_name": "work.zip", + "file_size": 17866343, + "file_uuid": "1c46752c-b096-11ea-a3c4-02420a00392e/e603724d-4af1-52a1-b866-0d4b792f8c4a/work.zip", + "is_soft_link": False, + "last_modified": "2020-06-22 13:48:13.398000+00:00", + "location_id": 0, + "node_name": "JupyterLab Octave", + "project_name": "Octave JupyterLab", + }, + # typical directory entry + { + "created_at": "2020-06-17 12:28:55.705340", + "entity_tag": "8711cf258714b2de5498f5a5ef48cc7b", + "file_id": "9a759caa-9890-4537-8c26-8edefb7a4d7c/be165f45-ddbf-4911-a04d-bc0b885914ef/workspace", + "file_name": "workspace", + "file_size": -1, + "file_uuid": "9a759caa-9890-4537-8c26-8edefb7a4d7c/be165f45-ddbf-4911-a04d-bc0b885914ef/workspace", + "is_soft_link": False, + "last_modified": "2020-06-22 13:48:13.398000+00:00", + "location_id": 0, + "node_name": None, + "project_name": None, + "is_directory": True, + }, + # api entry (not soft link) + { + "created_at": "2020-06-17 12:28:55.705340", + "entity_tag": "8711cf258714b2de5498f5a5ef48cc7b", + "file_id": "api/7b6b4e3d-39ae-3559-8765-4f815a49984e/tmpf_qatpzx", + "file_name": "tmpf_qatpzx", + "file_size": 86, + "file_uuid": "api/7b6b4e3d-39ae-3559-8765-4f815a49984e/tmpf_qatpzx", + "is_soft_link": False, + "last_modified": "2020-06-22 13:48:13.398000+00:00", + "location_id": 0, + "node_name": None, + "project_name": None, + }, + # api entry (soft link) + { + "created_at": "2020-06-17 12:28:55.705340", + "entity_tag": "36aa3644f526655a6f557207e4fd25b8", + "file_id": "api/6f788ad9-0ad8-3d0d-9722-72f08c24a212/output_data.json", + "file_name": "output_data.json", + "file_size": 183, + "file_uuid": "api/6f788ad9-0ad8-3d0d-9722-72f08c24a212/output_data.json", + "is_soft_link": True, + "last_modified": "2020-06-22 13:48:13.398000+00:00", + "location_id": 0, + "node_name": None, + "project_name": None, + }, + # datcore entry + { + "created_at": "2020-05-28T15:48:34.386302+00:00", + "entity_tag": None, + "file_id": "N:package:ce145b61-7e4f-470b-a113-033653e86d3d", + "file_name": "templatetemplate.json", + "file_size": 238, + "file_uuid": "Kember Cardiac Nerve Model/templatetemplate.json", + "is_soft_link": False, + "last_modified": "2020-05-28T15:48:37.507387+00:00", + "location_id": 1, + "node_name": None, + "project_name": None, + }, + ] + } + ) + + model_config = ConfigDict( + extra="ignore", + from_attributes=True, + json_schema_extra=_update_json_schema_extra, + ) + + @field_validator("location_id", mode="before") + @classmethod + def ensure_location_is_integer(cls, v): + if v is not None: + return int(v) + return v + + +class FileMetaDataArray(RootModel[list[FileMetaDataGet]]): + root: list[FileMetaDataGet] = Field(default_factory=list) + + +class LinkType(str, Enum): + PRESIGNED = "PRESIGNED" + S3 = "S3" + + +class PresignedLink(BaseModel): + link: AnyUrl + + +class FileUploadLinks(BaseModel): + abort_upload: AnyUrl + complete_upload: AnyUrl + + +class FileUploadSchema(BaseModel): + chunk_size: ByteSize + urls: list[AnyUrl] + links: FileUploadLinks + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # typical S3 entry + { + "chunk_size": "10000000", + "urls": [ + "https://s3.amazonaws.com/bucket-name/key-name?AWSAccessKeyId=AKIAIOSFODNN7EXAMPLE&Expires=1698298164&Signature=WObYM%2F%2B4t7O3%2FZS3Kegb%2Bc4%3D", + ], + "links": { + "abort_upload": "https://storage.com:3021/bucket-name/key-name:abort", + "complete_upload": "https://storage.com:3021/bucket-name/key-name:complete", + }, + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", json_schema_extra=_update_json_schema_extra + ) + + +class TableSynchronisation(BaseModel): + dry_run: bool | None = None + fire_and_forget: bool | None = None + removed: list[str] + + +# /locations/{location_id}/files/{file_id}:complete +class UploadedPart(BaseModel): + number: PositiveInt + e_tag: ETag + + +class FileUploadCompletionBody(BaseModel): + parts: list[UploadedPart] + + @field_validator("parts") + @classmethod + def ensure_sorted(cls, value: list[UploadedPart]) -> list[UploadedPart]: + return sorted(value, key=lambda uploaded_part: uploaded_part.number) + + +class FileUploadCompleteLinks(BaseModel): + state: AnyUrl + + +class FileUploadCompleteResponse(BaseModel): + links: FileUploadCompleteLinks + + +# /locations/{location_id}/files/{file_id}:complete/futures/{future_id} +class FileUploadCompleteState(Enum): + OK = "ok" + NOK = "nok" + + +class FileUploadCompleteFutureResponse(BaseModel): + state: FileUploadCompleteState + e_tag: ETag | None = Field(default=None) + + +# /simcore-s3/ + + +class FoldersBody(BaseModel): + source: Annotated[dict[str, Any], Field(default_factory=dict)] + destination: Annotated[dict[str, Any], Field(default_factory=dict)] + nodes_map: Annotated[dict[NodeID, NodeID], Field(default_factory=dict)] + + @model_validator(mode="after") + def ensure_consistent_entries(self: Self) -> Self: + source_node_keys = (NodeID(n) for n in self.source.get("workbench", {})) + if set(source_node_keys) != set(self.nodes_map.keys()): + msg = "source project nodes do not fit with nodes_map entries" + raise ValueError(msg) + destination_node_keys = ( + NodeID(n) for n in self.destination.get("workbench", {}) + ) + if set(destination_node_keys) != set(self.nodes_map.values()): + msg = "destination project nodes do not fit with nodes_map values" + raise ValueError(msg) + return self + + +class SoftCopyBody(BaseModel): + link_id: SimcoreS3FileID + + +DEFAULT_NUMBER_OF_PATHS_PER_PAGE: Final[int] = 50 +MAX_NUMBER_OF_PATHS_PER_PAGE: Final[int] = 1000 + + +class PathMetaDataGet(BaseModel): + path: Annotated[Path, Field(description="the path to the current path")] + display_path: Annotated[ + Path, + Field( + description="the path to display with UUID replaced (URL Encoded by parts as names may contain '/')" + ), + ] + + file_meta_data: Annotated[ + FileMetaDataGet | None, + Field(description="if filled, this is the file meta data of the s3 object"), + ] = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # ls no filter + { + "path": "f8da77a9-24b9-4eab-aee7-1f0608da1e3e", + "display_path": "my amazing project", + }, + # ls f8da77a9-24b9-4eab-aee7-1f0608da1e3e + { + "path": "f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7", + "display_path": "my amazing project/awesome node", + }, + # ls f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7 + { + "path": "f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs", + "display_path": "my amazing project/awesome node/outputs", + }, + # ls f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs + { + "path": "f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs/output5", + "display_path": "my amazing project/awesome node/outputs/output5", + }, + # ls f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs/output_5 + { + "path": f"f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs/output5/{FileMetaDataGet.model_json_schema()['examples'][0]['file_name']}", + "display_path": f"my amazing project/awesome node/outputs/output5/{FileMetaDataGet.model_json_schema()['examples'][0]['file_name']}", + "file_meta_data": FileMetaDataGet.model_json_schema()[ + "examples" + ][0], + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", json_schema_extra=_update_json_schema_extra + ) + + +class PathTotalSizeCreate(BaseModel): + path: Path + size: ByteSize + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # a folder + { + "path": "f8da77a9-24b9-4eab-aee7-1f0608da1e3e", + "size": 15728640, + }, + # 1 file + { + "path": f"f8da77a9-24b9-4eab-aee7-1f0608da1e3e/2f94f80f-633e-4dfa-a983-226b7babe3d7/outputs/output5/{FileMetaDataGet.model_json_schema()['examples'][0]['file_name']}", + "size": 1024, + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", json_schema_extra=_update_json_schema_extra + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/__init__.py b/packages/models-library/src/models_library/api_schemas_webserver/__init__.py new file mode 100644 index 00000000000..c95f68ab78c --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/__init__.py @@ -0,0 +1,9 @@ +from typing import Final + +from pydantic import TypeAdapter + +from ..rabbitmq_basic_types import RPCNamespace + +WEBSERVER_RPC_NAMESPACE: Final[RPCNamespace] = TypeAdapter( + RPCNamespace +).validate_python("webserver") diff --git a/packages/models-library/src/models_library/api_schemas_webserver/_base.py b/packages/models-library/src/models_library/api_schemas_webserver/_base.py new file mode 100644 index 00000000000..4dfcf1473dd --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/_base.py @@ -0,0 +1,87 @@ +""" + Base model classes for schemas in OpenAPI specs (OAS) for this service + +""" + +from typing import Any + +from pydantic import BaseModel, ConfigDict + +from ..utils.change_case import snake_to_camel + + +class EmptyModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + def to_domain_model(self) -> dict[str, Any]: + return self.model_dump( + exclude_unset=True, + by_alias=True, + exclude_none=True, + ) + + +class InputSchemaWithoutCamelCase(BaseModel): + model_config = ConfigDict( + populate_by_name=False, + extra="ignore", # Non-strict inputs policy: Used to prune extra field + frozen=True, + ) + + +class InputSchema(BaseModel): + model_config = ConfigDict( + **InputSchemaWithoutCamelCase.model_config, + alias_generator=snake_to_camel, + ) + + +class OutputSchemaWithoutCamelCase(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + extra="ignore", + frozen=True, + ) + + +class OutputSchema(BaseModel): + model_config = ConfigDict( + alias_generator=snake_to_camel, + populate_by_name=True, + extra="ignore", # Used to prune extra fields from internal data + frozen=True, + ) + + def data( + self, + *, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + **kwargs + ) -> dict[str, Any]: + """Helper function to get envelope's data as a dict""" + return self.model_dump( + by_alias=True, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + **kwargs, + ) + + def data_json( + self, + *, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + **kwargs + ) -> str: + """Helper function to get envelope's data as a json str""" + return self.model_dump_json( + by_alias=True, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + **kwargs, + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/activity.py b/packages/models-library/src/models_library/api_schemas_webserver/activity.py new file mode 100644 index 00000000000..ce1683bed78 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/activity.py @@ -0,0 +1,24 @@ +from typing import TypeAlias + +from pydantic import BaseModel, PositiveFloat + +from ..projects_nodes_io import NodeID + + +class Stats(BaseModel): + cpuUsage: PositiveFloat + memUsage: PositiveFloat + + +class Limits(BaseModel): + cpus: PositiveFloat + mem: PositiveFloat + + +class Activity(BaseModel): + stats: Stats + limits: Limits + queued: bool | None = None # TODO: review since it in NOT filled + + +ActivityStatusDict: TypeAlias = dict[NodeID, Activity] diff --git a/packages/models-library/src/models_library/api_schemas_webserver/auth.py b/packages/models-library/src/models_library/api_schemas_webserver/auth.py new file mode 100644 index 00000000000..697867d93b8 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/auth.py @@ -0,0 +1,155 @@ +from datetime import timedelta +from typing import Annotated, Any + +from models_library.basic_types import IDStr +from pydantic import AliasGenerator, ConfigDict, Field, HttpUrl, SecretStr +from pydantic.alias_generators import to_camel + +from ..emails import LowerCaseEmailStr +from ._base import InputSchema, OutputSchema + + +class AccountRequestInfo(InputSchema): + form: dict[str, Any] + captcha: str + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + # NOTE: this is just informative. The format of the form is defined + # currently in the front-end and it might change + # SEE image in https://github.com/ITISFoundation/osparc-simcore/pull/5378 + json_schema_extra={ + "example": { + "form": { + "firstName": "James", + "lastName": "Maxwel", + "email": "maxwel@email.com", + "phone": "+1 123456789", + "company": "EM Com", + "address": "Infinite Loop", + "city": "Washington", + "postalCode": "98001", + "country": "USA", + "application": "Antenna_Design", + "description": "Description of something", + "hear": "Search_Engine", + "privacyPolicy": True, + "eula": True, + }, + "captcha": "A12B34", + } + }, + ) + + +class UnregisterCheck(InputSchema): + email: LowerCaseEmailStr + password: SecretStr + + +# +# API keys +# + + +class ApiKeyCreateRequest(InputSchema): + display_name: Annotated[str, Field(..., min_length=3)] + expiration: Annotated[ + timedelta | None, + Field( + None, + description="Time delta from creation time to expiration. If None, then it does not expire.", + ), + ] + + model_config = ConfigDict( + alias_generator=AliasGenerator( + validation_alias=to_camel, + ), + from_attributes=True, + json_schema_extra={ + "examples": [ + { + "displayName": "test-api-forever", + }, + { + "displayName": "test-api-for-one-day", + "expiration": 60 * 60 * 24, + }, + { + "displayName": "test-api-for-another-day", + "expiration": "24:00:00", + }, + ] + }, + ) + + +class ApiKeyCreateResponse(OutputSchema): + id: IDStr + display_name: Annotated[str, Field(..., min_length=3)] + expiration: Annotated[ + timedelta | None, + Field( + None, + description="Time delta from creation time to expiration. If None, then it does not expire.", + ), + ] + api_base_url: HttpUrl | None = None + api_key: str + api_secret: str + + model_config = ConfigDict( + alias_generator=AliasGenerator( + serialization_alias=to_camel, + ), + from_attributes=True, + json_schema_extra={ + "examples": [ + { + "id": "42", + "display_name": "test-api-forever", + "api_base_url": "http://api.osparc.io/v0", # NOSONAR + "api_key": "key", + "api_secret": "secret", + }, + { + "id": "48", + "display_name": "test-api-for-one-day", + "expiration": 60 * 60 * 24, + "api_base_url": "http://api.sim4life.io/v0", # NOSONAR + "api_key": "key", + "api_secret": "secret", + }, + { + "id": "54", + "display_name": "test-api-for-another-day", + "expiration": "24:00:00", + "api_base_url": "http://api.osparc-master.io/v0", # NOSONAR + "api_key": "key", + "api_secret": "secret", + }, + ] + }, + ) + + +class ApiKeyGet(OutputSchema): + id: IDStr + display_name: Annotated[str, Field(..., min_length=3)] + + model_config = ConfigDict( + alias_generator=AliasGenerator( + serialization_alias=to_camel, + ), + from_attributes=True, + json_schema_extra={ + "examples": [ + { + "id": "42", + "display_name": "myapi", + }, + ] + }, + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/catalog.py b/packages/models-library/src/models_library/api_schemas_webserver/catalog.py new file mode 100644 index 00000000000..7b490ad338e --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/catalog.py @@ -0,0 +1,272 @@ +from typing import Annotated, TypeAlias + +from pydantic import ConfigDict, Field +from pydantic.config import JsonDict +from pydantic.main import BaseModel + +from ..api_schemas_catalog import services as api_schemas_catalog_services +from ..services_io import ServiceInput, ServiceOutput +from ..services_types import ServicePortKey +from ..utils.change_case import snake_to_camel +from ._base import InputSchema, OutputSchema + +ServiceInputKey: TypeAlias = ServicePortKey +ServiceOutputKey: TypeAlias = ServicePortKey + + +class _BaseCommonApiExtension(BaseModel): + unit_long: str | None = Field( + None, + description="Long name of the unit for display (html-compatible), if available", + ) + unit_short: str | None = Field( + None, + description="Short name for the unit for display (html-compatible), if available", + ) + + model_config = ConfigDict( + alias_generator=snake_to_camel, populate_by_name=True, extra="forbid" + ) + + +class ServiceInputGet(ServiceInput, _BaseCommonApiExtension): + """Extends fields of api_schemas_catalog.services.ServiceGet.outputs[*]""" + + key_id: Annotated[ + ServiceInputKey, Field(description="Unique name identifier for this input") + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "displayOrder": 2, + "label": "Sleep Time", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": {"type": "TextArea", "details": {"minHeight": 1}}, + "keyId": "input_2", + "unitLong": "seconds", + "unitShort": "sec", + }, + "examples": [ + { + "label": "Acceleration", + "description": "acceleration with units", + "type": "ref_contentSchema", + "contentSchema": { + "title": "Acceleration", + "type": "number", + "x_unit": "m/s**2", + }, + "keyId": "input_1", + "unitLong": "meter/second3", + "unitShort": "m/s3", + } + ], + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class ServiceOutputGet(ServiceOutput, _BaseCommonApiExtension): + """Extends fields of api_schemas_catalog.services.ServiceGet.outputs[*]""" + + key_id: Annotated[ + ServiceOutputKey, Field(description="Unique name identifier for this input") + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "displayOrder": 2, + "label": "Time Slept", + "description": "Time the service waited before completion", + "type": "number", + "unit": "second", + "unitLong": "seconds", + "unitShort": "sec", + "keyId": "output_2", + } + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +ServiceInputsGetDict: TypeAlias = dict[ServicePortKey, ServiceInputGet] +ServiceOutputsGetDict: TypeAlias = dict[ServicePortKey, ServiceOutputGet] +ServiceResourcesGet: TypeAlias = api_schemas_catalog_services.ServiceResourcesGet + + +class CatalogLatestServiceGet(api_schemas_catalog_services.LatestServiceGet): + inputs: ServiceInputsGetDict # type: ignore[assignment] + outputs: ServiceOutputsGetDict # type: ignore[assignment] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + base_example = ( + api_schemas_catalog_services.LatestServiceGet.model_json_schema()[ + "examples" + ][0] + ) + + schema.update( + { + "example": { + **base_example, + "inputs": { + "input_1": { + "displayOrder": 1, + "label": "File with int number", + "description": "Pick a file containing only one integer", + "type": "data:text/plain", + "fileToKeyMap": {"single_number.txt": "input_1"}, + "keyId": "input_1", + }, + "input_2": { + "unitLong": "second", + "unitShort": "s", + "label": "Sleep interval", + "description": "Choose an amount of time to sleep in range [0:]", + "keyId": "input_2", + "displayOrder": 2, + "type": "ref_contentSchema", + "contentSchema": { + "title": "Sleep interval", + "type": "integer", + "x_unit": "second", + "minimum": 0, + }, + "defaultValue": 2, + }, + "input_3": { + "displayOrder": 3, + "label": "Fail after sleep", + "description": "If set to true will cause service to fail after it sleeps", + "type": "boolean", + "defaultValue": False, + "keyId": "input_3", + }, + "input_4": { + "unitLong": "meter", + "unitShort": "m", + "label": "Distance to bed", + "description": "It will first walk the distance to bed", + "keyId": "input_4", + "displayOrder": 4, + "type": "ref_contentSchema", + "contentSchema": { + "title": "Distance to bed", + "type": "integer", + "x_unit": "meter", + }, + "defaultValue": 0, + }, + "input_5": { + "unitLong": "byte", + "unitShort": "B", + "label": "Dream (or nightmare) of the night", + "description": "Defines the size of the dream that will be generated [0:]", + "keyId": "input_5", + "displayOrder": 5, + "type": "ref_contentSchema", + "contentSchema": { + "title": "Dream of the night", + "type": "integer", + "x_unit": "byte", + "minimum": 0, + }, + "defaultValue": 0, + }, + }, + "outputs": { + "output_1": { + "displayOrder": 1, + "label": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + "type": "data:text/plain", + "fileToKeyMap": {"single_number.txt": "output_1"}, + "keyId": "output_1", + }, + "output_2": { + "unitLong": "second", + "unitShort": "s", + "label": "Random sleep interval", + "description": "Interval is generated in range [1-9]", + "keyId": "output_2", + "displayOrder": 2, + "type": "ref_contentSchema", + "contentSchema": { + "title": "Random sleep interval", + "type": "integer", + "x_unit": "second", + }, + }, + "output_3": { + "displayOrder": 3, + "label": "Dream output", + "description": "Contains some random data representing a dream", + "type": "data:text/plain", + "fileToKeyMap": {"dream.txt": "output_3"}, + "keyId": "output_3", + }, + }, + } + } + ) + + model_config = ConfigDict( + **OutputSchema.model_config, + json_schema_extra=_update_json_schema_extra, + ) + + +class CatalogServiceGet(api_schemas_catalog_services.ServiceGetV2): + # pylint: disable=too-many-ancestors + inputs: Annotated[ # type: ignore[assignment] + ServiceInputsGetDict, Field(description="inputs with extended information") + ] + outputs: Annotated[ # type: ignore[assignment] + ServiceOutputsGetDict, Field(description="outputs with extended information") + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + **api_schemas_catalog_services.ServiceGetV2.model_json_schema()[ + "examples" + ][0], + "inputs": { + f"input{i}": example + for i, example in enumerate( + ServiceInputGet.model_json_schema()["examples"] + ) + }, + "outputs": { + "outFile": ServiceOutputGet.model_json_schema()["example"] + }, + } + } + ) + + model_config = ConfigDict( + **OutputSchema.model_config, + json_schema_extra=_update_json_schema_extra, + ) + + +class CatalogServiceUpdate(api_schemas_catalog_services.ServiceUpdateV2): + model_config = InputSchema.model_config diff --git a/packages/models-library/src/models_library/api_schemas_webserver/computations.py b/packages/models-library/src/models_library/api_schemas_webserver/computations.py new file mode 100644 index 00000000000..0cd3d993b6d --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/computations.py @@ -0,0 +1,155 @@ +from datetime import datetime +from decimal import Decimal +from typing import Annotated, Any + +from common_library.basic_types import DEFAULT_FACTORY +from pydantic import ( + AnyUrl, + BaseModel, + ConfigDict, + Field, +) + +from ..api_schemas_directorv2.computations import ( + ComputationGet as _DirectorV2ComputationGet, +) +from ..basic_types import IDStr +from ..projects import CommitID, ProjectID +from ..projects_nodes_io import NodeID +from ..projects_state import RunningState +from ..rest_ordering import OrderBy, create_ordering_query_model_class +from ..rest_pagination import PageQueryParameters +from ._base import ( + InputSchemaWithoutCamelCase, + OutputSchema, + OutputSchemaWithoutCamelCase, +) + + +class ComputationPathParams(BaseModel): + project_id: ProjectID + + +class ComputationGet(_DirectorV2ComputationGet, OutputSchemaWithoutCamelCase): + # NOTE: this is a copy of the same class in models_library.api_schemas_directorv2 + # but it is used in a different context (webserver) + # and we need to add the `OutputSchema` mixin + # so that it can be used as a response model in FastAPI + pass + + +class ComputationStart(InputSchemaWithoutCamelCase): + force_restart: bool = False + subgraph: Annotated[ + set[str], Field(default_factory=set, json_schema_extra={"default": []}) + ] = DEFAULT_FACTORY + + +class ComputationStarted(OutputSchemaWithoutCamelCase): + pipeline_id: Annotated[ + ProjectID, Field(description="ID for created pipeline (=project identifier)") + ] + ref_ids: Annotated[ + list[CommitID], + Field( + default_factory=list, + description="Checkpoints IDs for created pipeline", + json_schema_extra={"default": []}, + ), + ] = DEFAULT_FACTORY + + +### Computation Run + + +ComputationRunListOrderParams = create_ordering_query_model_class( + ordering_fields={ + "submitted_at", + "started_at", + "ended_at", + "state", + }, + default=OrderBy(field=IDStr("submitted_at")), + ordering_fields_api_to_column_map={ + "submitted_at": "created", + "started_at": "started", + "ended_at": "ended", + }, +) + + +class ComputationRunListQueryParams( + PageQueryParameters, + ComputationRunListOrderParams, # type: ignore[misc, valid-type] +): ... + + +class ComputationRunIterationsLatestListQueryParams(ComputationRunListQueryParams): + filter_only_running: bool = Field( + default=False, + description="If true, only running computations are returned", + ) + + +class ComputationRunIterationsListQueryParams(ComputationRunListQueryParams): + include_children: bool = Field( + default=False, + description="If true, all computational runs of the project and its children are returned (Currently supported only for root projects)", + ) + + +class ComputationRunRestGet(OutputSchema): + project_uuid: ProjectID + iteration: int + state: RunningState + info: dict[str, Any] + submitted_at: datetime + started_at: datetime | None + ended_at: datetime | None + root_project_name: str + project_custom_metadata: dict[str, Any] + + +class ComputationRunPathParams(BaseModel): + project_id: ProjectID + model_config = ConfigDict(populate_by_name=True, extra="forbid") + + +### Computation Task + + +class ComputationTaskPathParams(BaseModel): + project_id: ProjectID + model_config = ConfigDict(populate_by_name=True, extra="forbid") + + +ComputationTaskListOrderParams = create_ordering_query_model_class( + ordering_fields={ + "started_at", + }, + default=OrderBy(field=IDStr("started_at")), + ordering_fields_api_to_column_map={"started_at": "start"}, +) + + +class ComputationTaskListQueryParams( + PageQueryParameters, + ComputationTaskListOrderParams, # type: ignore[misc, valid-type] +): + include_children: bool = Field( + default=False, + description="If true, all tasks of the project and its children are returned (Currently supported only for root projects)", + ) + + +class ComputationTaskRestGet(OutputSchema): + project_uuid: ProjectID + node_id: NodeID + state: RunningState + progress: float + image: dict[str, Any] + started_at: datetime | None + ended_at: datetime | None + log_download_link: AnyUrl | None + node_name: str + osparc_credits: Decimal | None diff --git a/packages/models-library/src/models_library/api_schemas_webserver/folders_v2.py b/packages/models-library/src/models_library/api_schemas_webserver/folders_v2.py new file mode 100644 index 00000000000..88333f0b0d9 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/folders_v2.py @@ -0,0 +1,77 @@ +from datetime import datetime +from typing import Annotated, Self + +from pydantic import ConfigDict, Field, field_validator + +from ..access_rights import AccessRights +from ..basic_types import IDStr +from ..folders import FolderDB, FolderID +from ..groups import GroupID +from ..utils.common_validators import null_or_none_str_to_none_validator +from ..workspaces import WorkspaceID +from ._base import InputSchema, OutputSchema + + +class FolderGet(OutputSchema): + folder_id: FolderID + parent_folder_id: FolderID | None = None + name: str + + created_at: datetime + modified_at: datetime + trashed_at: datetime | None + trashed_by: Annotated[ + GroupID | None, Field(description="The primary gid of the user who trashed") + ] + owner: GroupID + workspace_id: WorkspaceID | None + my_access_rights: AccessRights + + @classmethod + def from_domain_model( + cls, + folder_db: FolderDB, + trashed_by_primary_gid: GroupID | None, + user_folder_access_rights: AccessRights, + ) -> Self: + if (folder_db.trashed_by is None) ^ (trashed_by_primary_gid is None): + msg = f"Incompatible inputs: {folder_db.trashed_by=} but not {trashed_by_primary_gid=}" + raise ValueError(msg) + + return cls( + folder_id=folder_db.folder_id, + parent_folder_id=folder_db.parent_folder_id, + name=folder_db.name, + created_at=folder_db.created, + modified_at=folder_db.modified, + trashed_at=folder_db.trashed, + trashed_by=trashed_by_primary_gid, + owner=folder_db.created_by_gid, + workspace_id=folder_db.workspace_id, + my_access_rights=user_folder_access_rights, + ) + + +class FolderCreateBodyParams(InputSchema): + name: IDStr + parent_folder_id: FolderID | None = None + workspace_id: WorkspaceID | None = None + model_config = ConfigDict(extra="forbid") + + _null_or_none_str_to_none_validator = field_validator( + "parent_folder_id", mode="before" + )(null_or_none_str_to_none_validator) + + _null_or_none_str_to_none_validator2 = field_validator( + "workspace_id", mode="before" + )(null_or_none_str_to_none_validator) + + +class FolderReplaceBodyParams(InputSchema): + name: IDStr + parent_folder_id: FolderID | None = None + model_config = ConfigDict(extra="forbid") + + _null_or_none_str_to_none_validator = field_validator( + "parent_folder_id", mode="before" + )(null_or_none_str_to_none_validator) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/functions.py b/packages/models-library/src/models_library/api_schemas_webserver/functions.py new file mode 100644 index 00000000000..7e1e4b99a35 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/functions.py @@ -0,0 +1,127 @@ +from typing import Annotated, TypeAlias + +from pydantic import Field + +from ..functions import ( + Function, + FunctionBase, + FunctionClass, + FunctionClassSpecificData, + FunctionID, + FunctionIDNotFoundError, + FunctionInputs, + FunctionInputSchema, + FunctionInputsList, + FunctionInputsValidationError, + FunctionJob, + FunctionJobClassSpecificData, + FunctionJobCollection, + FunctionJobCollectionID, + FunctionJobCollectionIDNotFoundError, + FunctionJobCollectionsListFilters, + FunctionJobCollectionStatus, + FunctionJobID, + FunctionJobIDNotFoundError, + FunctionJobStatus, + FunctionOutputs, + FunctionOutputSchema, + FunctionSchemaClass, + JSONFunctionInputSchema, + JSONFunctionOutputSchema, + ProjectFunction, + ProjectFunctionJob, + RegisteredFunction, + RegisteredFunctionBase, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, + RegisteredProjectFunction, + RegisteredProjectFunctionJob, + RegisteredSolverFunction, + SolverFunction, + SolverFunctionJob, + UnsupportedFunctionClassError, + UnsupportedFunctionFunctionJobClassCombinationError, +) +from ._base import InputSchema, OutputSchema + +__all__ = [ + "Function", + "FunctionBase", + "FunctionClass", + "FunctionClassSpecificData", + "FunctionClassSpecificData", + "FunctionID", + "FunctionID", + "FunctionIDNotFoundError", + "FunctionIDNotFoundError", + "FunctionInputSchema", + "FunctionInputs", + "FunctionInputs", + "FunctionInputsList", + "FunctionInputsList", + "FunctionInputsValidationError", + "FunctionInputsValidationError", + "FunctionJob", + "FunctionJobClassSpecificData", + "FunctionJobClassSpecificData", + "FunctionJobCollection", + "FunctionJobCollectionID", + "FunctionJobCollectionID", + "FunctionJobCollectionIDNotFoundError", + "FunctionJobCollectionIDNotFoundError", + "FunctionJobCollectionStatus", + "FunctionJobCollectionStatus", + "FunctionJobCollectionsListFilters", + "FunctionJobID", + "FunctionJobID", + "FunctionJobIDNotFoundError", + "FunctionJobIDNotFoundError", + "FunctionJobStatus", + "FunctionJobStatus", + "FunctionOutputSchema", + "FunctionOutputs", + "FunctionSchemaClass", + "FunctionToRegister", + "FunctionToRegister", + "JSONFunctionInputSchema", + "JSONFunctionOutputSchema", + "ProjectFunction", + "ProjectFunctionJob", + "RegisteredFunction", + "RegisteredFunctionBase", + "RegisteredFunctionGet", + "RegisteredFunctionJob", + "RegisteredFunctionJobCollection", + "RegisteredProjectFunction", + "RegisteredProjectFunctionGet", + "RegisteredProjectFunctionJob", + "RegisteredSolverFunction", + "RegisteredSolverFunctionGet", + "SolverFunction", + "SolverFunctionJob", + "UnsupportedFunctionClassError", + "UnsupportedFunctionFunctionJobClassCombinationError", +] + + +class RegisteredSolverFunctionGet(RegisteredSolverFunction, OutputSchema): ... + + +class RegisteredProjectFunctionGet(RegisteredProjectFunction, OutputSchema): ... + + +class SolverFunctionToRegister(SolverFunction, InputSchema): ... + + +class ProjectFunctionToRegister(ProjectFunction, InputSchema): ... + + +FunctionToRegister: TypeAlias = Annotated[ + ProjectFunctionToRegister | SolverFunctionToRegister, + Field(discriminator="function_class"), +] + +RegisteredFunctionGet: TypeAlias = Annotated[ + RegisteredProjectFunctionGet | RegisteredSolverFunctionGet, + Field(discriminator="function_class"), +] diff --git a/packages/models-library/src/models_library/api_schemas_webserver/groups.py b/packages/models-library/src/models_library/api_schemas_webserver/groups.py new file mode 100644 index 00000000000..643c66b817a --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/groups.py @@ -0,0 +1,385 @@ +from contextlib import suppress +from typing import Annotated, Self, TypeVar + +from common_library.basic_types import DEFAULT_FACTORY +from common_library.dict_tools import remap_keys +from pydantic import ( + AnyHttpUrl, + AnyUrl, + BaseModel, + ConfigDict, + Field, + TypeAdapter, + ValidationError, + field_validator, + model_validator, +) +from pydantic.config import JsonDict + +from ..emails import LowerCaseEmailStr +from ..groups import ( + EVERYONE_GROUP_ID, + AccessRightsDict, + Group, + GroupID, + GroupMember, + GroupsByTypeTuple, + StandardGroupCreate, + StandardGroupUpdate, +) +from ..users import UserID, UserNameID +from ..utils.common_validators import create__check_only_one_is_set__root_validator +from ._base import InputSchema, OutputSchema, OutputSchemaWithoutCamelCase + +S = TypeVar("S", bound=BaseModel) + + +class GroupAccessRights(BaseModel): + """ + defines acesss rights for the user + """ + + read: bool + write: bool + delete: bool + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"read": True, "write": False, "delete": False}, + {"read": True, "write": True, "delete": False}, + {"read": True, "write": True, "delete": True}, + ] + } + ) + + +class GroupGet(OutputSchema): + gid: GroupID = Field(..., description="the group ID") + label: str = Field(..., description="the group name") + description: str = Field(..., description="the group description") + thumbnail: AnyUrl | None = Field( + default=None, description="url to the group thumbnail" + ) + access_rights: GroupAccessRights = Field(..., alias="accessRights") + + inclusion_rules: Annotated[ + dict[str, str], + Field( + default_factory=dict, + alias="inclusionRules", + deprecated=True, + ), + ] = DEFAULT_FACTORY + + @classmethod + def from_domain_model(cls, group: Group, access_rights: AccessRightsDict) -> Self: + # Adapts these domain models into this schema + return cls.model_validate( + { + **remap_keys( + group.model_dump( + include={ + "gid", + "name", + "description", + "thumbnail", + }, + exclude={ + "inclusion_rules", # deprecated + }, + exclude_unset=True, + by_alias=False, + ), + rename={ + "name": "label", + }, + ), + "access_rights": access_rights, + } + ) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "gid": "27", + "label": "A user", + "description": "A very special user", + "thumbnail": "https://placekitten.com/10/10", + "accessRights": {"read": True, "write": False, "delete": False}, + }, + { + "gid": 1, + "label": "ITIS Foundation", + "description": "The Foundation for Research on Information Technologies in Society", + "accessRights": {"read": True, "write": False, "delete": False}, + }, + { + "gid": "1", + "label": "All", + "description": "Open to all users", + "accessRights": {"read": True, "write": True, "delete": True}, + }, + { + "gid": 5, + "label": "SPARCi", + "description": "Stimulating Peripheral Activity to Relieve Conditions", + "thumbnail": "https://placekitten.com/15/15", + "accessRights": {"read": True, "write": True, "delete": True}, + }, + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + @field_validator("thumbnail", mode="before") + @classmethod + def _sanitize_legacy_data(cls, v): + if v: + # Enforces null if thumbnail is not valid URL or empty + with suppress(ValidationError): + return TypeAdapter(AnyHttpUrl).validate_python(v) + return None + + +class GroupCreate(InputSchema): + label: str + description: str + thumbnail: AnyUrl | None = None + + def to_domain_model(self) -> StandardGroupCreate: + data = remap_keys( + self.model_dump( + mode="json", + # NOTE: intentionally inclusion_rules are not exposed to the REST api + include={"label", "description", "thumbnail"}, + exclude_unset=True, + ), + rename={"label": "name"}, + ) + return StandardGroupCreate(**data) + + +class GroupUpdate(InputSchema): + label: str | None = None + description: str | None = None + thumbnail: AnyUrl | None = None + + def to_domain_model(self) -> StandardGroupUpdate: + data = remap_keys( + self.model_dump( + mode="json", + # NOTE: intentionally inclusion_rules are not exposed to the REST api + include={"label", "description", "thumbnail"}, + exclude_unset=True, + ), + rename={"label": "name"}, + ) + return StandardGroupUpdate(**data) + + +class MyGroupsGet(OutputSchema): + me: GroupGet + organizations: list[GroupGet] | None = None + all: GroupGet + product: GroupGet | None = None + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "me": { + "gid": "27", + "label": "A user", + "description": "A very special user", + "accessRights": {"read": True, "write": True, "delete": True}, + }, + "organizations": [ + { + "gid": "15", + "label": "ITIS Foundation", + "description": "The Foundation for Research on Information Technologies in Society", + "accessRights": { + "read": True, + "write": False, + "delete": False, + }, + }, + { + "gid": "16", + "label": "Blue Fundation", + "description": "Some foundation", + "accessRights": { + "read": True, + "write": False, + "delete": False, + }, + }, + ], + "all": { + "gid": EVERYONE_GROUP_ID, + "label": "All", + "description": "Open to all users", + "accessRights": {"read": True, "write": False, "delete": False}, + }, + } + } + ) + + @classmethod + def from_domain_model( + cls, + groups_by_type: GroupsByTypeTuple, + my_product_group: tuple[Group, AccessRightsDict] | None, + ) -> Self: + assert groups_by_type.primary # nosec + assert groups_by_type.everyone # nosec + + return cls( + me=GroupGet.from_domain_model(*groups_by_type.primary), + organizations=[ + GroupGet.from_domain_model(*gi) for gi in groups_by_type.standard + ], + all=GroupGet.from_domain_model(*groups_by_type.everyone), + product=( + GroupGet.from_domain_model(*my_product_group) + if my_product_group + else None + ), + ) + + +class GroupUserGet(OutputSchemaWithoutCamelCase): + + id: Annotated[UserID | None, Field(description="the user's id")] = None + user_name: Annotated[ + UserNameID | None, Field(alias="userName", description="None if private") + ] = None + gid: Annotated[ + GroupID | None, + Field(description="the user primary gid"), + ] = None + + login: Annotated[ + LowerCaseEmailStr | None, + Field(description="the user's email or None if private"), + ] = None + first_name: Annotated[str | None, Field(description="None if private")] = None + last_name: Annotated[str | None, Field(description="None if private")] = None + gravatar_id: Annotated[ + str | None, Field(description="the user gravatar id hash", deprecated=True) + ] = None + + # Access Rights + access_rights: Annotated[ + GroupAccessRights | None, + Field( + alias="accessRights", + description="If group is standard, these are these are the access rights of the user to it." + "None if primary group.", + ), + ] = None + + model_config = ConfigDict( + populate_by_name=True, + json_schema_extra={ + "example": { + "id": "1", + "userName": "mrmith", + "login": "mr.smith@matrix.com", + "first_name": "Mr", + "last_name": "Smith", + "gravatar_id": "a1af5c6ecc38e81f29695f01d6ceb540", + "gid": "3", + "accessRights": { + "read": True, + "write": False, + "delete": False, + }, + }, + "examples": [ + # unique member on a primary group with two different primacy settings + { + "id": "16", + "userName": "mrprivate", + "gid": "55", + }, + # very private user + { + "id": "6", + "gid": "55", + }, + { + "id": "56", + "userName": "mrpublic", + "login": "mrpublic@email.me", + "first_name": "Mr", + "last_name": "Public", + "gid": "42", + }, + ], + }, + ) + + @classmethod + def from_domain_model(cls, user: GroupMember) -> Self: + return cls.model_validate( + { + "id": user.id, + "user_name": user.name, + "login": user.email, + "first_name": user.first_name, + "last_name": user.last_name, + "gid": user.primary_gid, + "access_rights": user.access_rights, + } + ) + + +class GroupUserAdd(InputSchema): + """ + Identify the user with either `email` or `uid` β€” only one. + """ + + uid: UserID | None = None + user_name: Annotated[UserNameID | None, Field(alias="userName")] = None + email: Annotated[ + LowerCaseEmailStr | None, + Field( + description="Accessible only if the user has opted to share their email in privacy settings" + ), + ] = None + + _check_uid_or_email = model_validator(mode="after")( + create__check_only_one_is_set__root_validator(["uid", "email", "user_name"]) + ) + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"uid": 42}, + {"email": "foo@email.com"}, + ] + } + ) + + +class GroupUserUpdate(InputSchema): + # NOTE: since it is a single item, it is required. Cannot + # update for the moment partial attributes e.g. {read: False} + access_rights: GroupAccessRights + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "accessRights": { + "read": True, + "write": False, + "delete": False, + }, + } + } + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/licensed_items.py b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items.py new file mode 100644 index 00000000000..616c81a1859 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items.py @@ -0,0 +1,197 @@ +from datetime import date, datetime +from typing import Literal, NamedTuple, NotRequired, Self, cast + +from models_library.basic_types import IDStr +from models_library.resource_tracker import PricingPlanId +from pydantic import BaseModel, ConfigDict, HttpUrl, PositiveInt +from pydantic.config import JsonDict +from typing_extensions import TypedDict + +from ..licenses import ( + VIP_DETAILS_EXAMPLE, + FeaturesDict, + LicensedItem, + LicensedItemID, + LicensedItemKey, + LicensedItemVersion, + LicensedResourceType, +) +from ._base import OutputSchema + +# RPC + + +class LicensedResourceSourceFeaturesDict(TypedDict): + age: NotRequired[str] + date: date + ethnicity: NotRequired[str] + functionality: NotRequired[str] + height: NotRequired[str] + name: NotRequired[str] + sex: NotRequired[str] + species: NotRequired[str] + version: NotRequired[str] + weight: NotRequired[str] + + +class LicensedResourceSource(BaseModel): + id: int + description: str + thumbnail: str + features: LicensedResourceSourceFeaturesDict + doi: str | None + license_key: str + license_version: str + protection: Literal["Code", "PayPal"] + available_from_url: HttpUrl | None + + +class LicensedResource(BaseModel): + source: LicensedResourceSource + category_id: IDStr + category_display: str + terms_of_use_url: HttpUrl | None = None + + +class LicensedItemRpcGet(BaseModel): + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + display_name: str + licensed_resource_type: LicensedResourceType + licensed_resources: list[LicensedResource] + pricing_plan_id: PricingPlanId + is_hidden_on_market: bool + created_at: datetime + modified_at: datetime + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "licensed_item_id": "0362b88b-91f8-4b41-867c-35544ad1f7a1", + "key": "Duke", + "version": "1.0.0", + "display_name": "best-model", + "licensed_resource_type": f"{LicensedResourceType.VIP_MODEL}", + "licensed_resources": [ + { + "source": cast(JsonDict, VIP_DETAILS_EXAMPLE), + "category_id": "HumanWholeBody", + "category_display": "Humans", + "terms_of_use_url": None, + } + ], + "pricing_plan_id": "15", + "is_hidden_on_market": False, + "created_at": "2024-12-12 09:59:26.422140", + "modified_at": "2024-12-12 09:59:26.422140", + } + ] + }, + ) + + +class LicensedItemRpcGetPage(NamedTuple): + items: list[LicensedItemRpcGet] + total: PositiveInt + + +# Rest + + +class _ItisVipRestData(OutputSchema): + id: int + description: str + thumbnail: str + features: FeaturesDict # NOTE: here there is a bit of coupling with domain model + doi: str | None + license_version: str + + +class _ItisVipResourceRestData(OutputSchema): + source: _ItisVipRestData + + +class LicensedItemRestGet(OutputSchema): + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + + display_name: str + licensed_resource_type: LicensedResourceType + licensed_resources: list[_ItisVipResourceRestData] + pricing_plan_id: PricingPlanId + + category_id: IDStr + category_display: str + category_icon: HttpUrl | None = None # NOTE: Placeholder until provide @odeimaiz + terms_of_use_url: HttpUrl | None = None # NOTE: Placeholder until provided @mguidon + + created_at: datetime + modified_at: datetime + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "licensedItemId": "0362b88b-91f8-4b41-867c-35544ad1f7a1", + "key": "Duke", + "version": "1.0.0", + "displayName": "my best model", + "licensedResourceType": f"{LicensedResourceType.VIP_MODEL}", + "licensedResources": [ + cast( + JsonDict, + { + "source": {**VIP_DETAILS_EXAMPLE, "doi": doi}, + }, + ) + ], + "pricingPlanId": "15", + "categoryId": "HumanWholeBody", + "categoryDisplay": "Humans", + "createdAt": "2024-12-12 09:59:26.422140", + "modifiedAt": "2024-12-12 09:59:26.422140", + } + for doi in ["10.1000/xyz123", None] + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + @classmethod + def from_domain_model(cls, item: LicensedItem) -> Self: + return cls.model_validate( + { + **item.model_dump( + include={ + "licensed_item_id", + "key", + "version", + "display_name", + "licensed_resource_type", + "pricing_plan_id", + "created_at", + "modified_at", + }, + exclude_unset=True, + ), + "licensed_resources": [ + _ItisVipResourceRestData(**x) for x in item.licensed_resources + ], + "category_id": item.licensed_resources[0]["category_id"], + "category_display": item.licensed_resources[0]["category_display"], + "terms_of_use_url": item.licensed_resources[0].get( + "terms_of_use_url", None + ), + } + ) + + +class LicensedItemRestGetPage(NamedTuple): + items: list[LicensedItemRestGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_checkouts.py b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_checkouts.py new file mode 100644 index 00000000000..38e1f11ba28 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_checkouts.py @@ -0,0 +1,72 @@ +from datetime import datetime +from typing import NamedTuple + +from models_library.emails import LowerCaseEmailStr +from pydantic import BaseModel, ConfigDict, PositiveInt + +from ..licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from ..products import ProductName +from ..resource_tracker_licensed_items_checkouts import LicensedItemCheckoutID +from ..users import UserID +from ..wallets import WalletID +from ._base import OutputSchema + +# RPC + + +class LicensedItemCheckoutRpcGet(BaseModel): + licensed_item_checkout_id: LicensedItemCheckoutID + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + user_id: UserID + product_name: ProductName + started_at: datetime + stopped_at: datetime | None + num_of_seats: int + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "licensed_item_checkout_id": "633ef980-6f3e-4b1a-989a-bd77bf9a5d6b", + "licensed_item_id": "0362b88b-91f8-4b41-867c-35544ad1f7a1", + "key": "Duke", + "version": "1.0.0", + "wallet_id": 6, + "user_id": 27845, + "product_name": "osparc", + "started_at": "2024-12-12 09:59:26.422140", + "stopped_at": "2024-12-12 09:59:26.423540", + "num_of_seats": 78, + } + ] + } + ) + + +class LicensedItemCheckoutRpcGetPage(NamedTuple): + items: list[LicensedItemCheckoutRpcGet] + total: PositiveInt + + +# Rest + + +class LicensedItemCheckoutRestGet(OutputSchema): + licensed_item_checkout_id: LicensedItemCheckoutID + licensed_item_id: LicensedItemID + key: str + version: str + wallet_id: WalletID + user_id: UserID + user_email: LowerCaseEmailStr + product_name: ProductName + started_at: datetime + stopped_at: datetime | None + num_of_seats: int + + +class LicensedItemCheckoutRestGetPage(NamedTuple): + items: list[LicensedItemCheckoutRestGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_purchases.py b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_purchases.py new file mode 100644 index 00000000000..139df916b25 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/licensed_items_purchases.py @@ -0,0 +1,37 @@ +from datetime import datetime +from decimal import Decimal +from typing import NamedTuple + +from models_library.emails import LowerCaseEmailStr +from pydantic import PositiveInt + +from ..licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from ..products import ProductName +from ..resource_tracker import PricingUnitCostId +from ..resource_tracker_licensed_items_purchases import LicensedItemPurchaseID +from ..users import UserID +from ..wallets import WalletID +from ._base import OutputSchema + + +class LicensedItemPurchaseGet(OutputSchema): + licensed_item_purchase_id: LicensedItemPurchaseID + product_name: ProductName + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + start_at: datetime + expire_at: datetime + num_of_seats: int + purchased_by_user: UserID + user_email: LowerCaseEmailStr + purchased_at: datetime + modified_at: datetime + + +class LicensedItemPurchaseGetPage(NamedTuple): + items: list[LicensedItemPurchaseGet] + total: PositiveInt diff --git a/packages/models-library/src/models_library/api_schemas_webserver/permalinks.py b/packages/models-library/src/models_library/api_schemas_webserver/permalinks.py new file mode 100644 index 00000000000..f409d9d70df --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/permalinks.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel, HttpUrl + + +class ProjectPermalink(BaseModel): + url: HttpUrl + is_public: bool diff --git a/packages/models-library/src/models_library/api_schemas_webserver/products.py b/packages/models-library/src/models_library/api_schemas_webserver/products.py new file mode 100644 index 00000000000..61f03a2c5e9 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/products.py @@ -0,0 +1,181 @@ +from datetime import datetime +from decimal import Decimal +from typing import Annotated, Any, TypeAlias + +from common_library.basic_types import DEFAULT_FACTORY +from pydantic import ( + BaseModel, + ConfigDict, + Field, + HttpUrl, + NonNegativeFloat, + NonNegativeInt, + PlainSerializer, + PositiveInt, +) +from pydantic.config import JsonDict + +from ..basic_types import IDStr, NonNegativeDecimal +from ..emails import LowerCaseEmailStr +from ..products import ProductName +from ._base import InputSchema, OutputSchema + + +class CreditResultRpcGet(BaseModel): + product_name: ProductName + credit_amount: Decimal + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "product_name": "s4l", + "credit_amount": Decimal("15.5"), # type: ignore[dict-item] + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class CreditPriceGet(OutputSchema): + product_name: str + usd_per_credit: Annotated[ + Annotated[ + NonNegativeDecimal, + PlainSerializer(float, return_type=NonNegativeFloat, when_used="json"), + ] + | None, + Field( + description="Price of a credit in USD. " + "If None, then this product's price is UNDEFINED", + ), + ] + + min_payment_amount_usd: Annotated[ + NonNegativeInt | None, + Field( + description="Minimum amount (included) in USD that can be paid for this product" + "Can be None if this product's price is UNDEFINED", + ), + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "productName": "osparc", + "usdPerCredit": None, + "minPaymentAmountUsd": None, + }, + { + "productName": "osparc", + "usdPerCredit": "10", + "minPaymentAmountUsd": "10", + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class ProductTemplateGet(OutputSchema): + id_: Annotated[IDStr, Field(alias="id")] + content: str + + +class ProductGet(OutputSchema): + name: ProductName + display_name: str + short_name: Annotated[ + str | None, Field(description="Short display name for SMS") + ] = None + + vendor: Annotated[dict | None, Field(description="vendor attributes")] = None + issues: Annotated[ + list[dict] | None, Field(description="Reference to issues tracker") + ] = None + manuals: Annotated[list[dict] | None, Field(description="List of manuals")] = None + support: Annotated[ + list[dict] | None, Field(description="List of support resources") + ] = None + + login_settings: dict + max_open_studies_per_user: PositiveInt | None + is_payment_enabled: bool + credits_per_usd: NonNegativeDecimal | None + + templates: Annotated[ + list[ProductTemplateGet], + Field( + description="List of templates available to this product for communications (e.g. emails, sms, etc)", + default_factory=list, + ), + ] = DEFAULT_FACTORY + + +class ProductUIGet(OutputSchema): + product_name: ProductName + ui: Annotated[ + dict[str, Any], + Field(description="Front-end owned ui product configuration"), + ] + + +ExtraCreditsUsdRangeInt: TypeAlias = Annotated[int, Field(ge=0, lt=500)] + + +class InvitationGenerate(InputSchema): + guest: LowerCaseEmailStr + trial_account_days: PositiveInt | None = None + extra_credits_in_usd: ExtraCreditsUsdRangeInt | None = None + + +class InvitationGenerated(OutputSchema): + product_name: ProductName + issuer: str + guest: LowerCaseEmailStr + trial_account_days: PositiveInt | None = None + extra_credits_in_usd: PositiveInt | None = None + created: datetime + invitation_link: HttpUrl + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "productName": "osparc", + "issuer": "john.doe", + "guest": "guest@example.com", + "trialAccountDays": 7, + "extraCreditsInUsd": 30, + "created": "2023-09-27T15:30:00", + "invitationLink": "https://example.com/invitation#1234", + }, + # w/o optional + { + "productName": "osparc", + "issuer": "john.doe@email.com", + "guest": "guest@example.com", + "created": "2023-09-27T15:30:00", + "invitationLink": "https://example.com/invitation#1234", + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects.py b/packages/models-library/src/models_library/api_schemas_webserver/projects.py new file mode 100644 index 00000000000..2b15e052944 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects.py @@ -0,0 +1,278 @@ +"""rest API schema models for projects + + +SEE rationale in https://fastapi.tiangolo.com/tutorial/extra-models/#multiple-models + +""" + +import copy +from datetime import datetime +from typing import Annotated, Any, Literal, Self, TypeAlias + +from common_library.basic_types import DEFAULT_FACTORY +from common_library.dict_tools import remap_keys +from pydantic import ( + BeforeValidator, + ConfigDict, + Field, + HttpUrl, + PlainSerializer, + field_validator, +) +from pydantic.config import JsonDict + +from ..api_schemas_long_running_tasks.tasks import TaskGet +from ..basic_types import LongTruncatedStr, ShortTruncatedStr +from ..emails import LowerCaseEmailStr +from ..folders import FolderID +from ..groups import GroupID +from ..projects import ( + ClassifierID, + DateTimeStr, + NodesDict, + ProjectID, + ProjectTemplateType, + ProjectType, +) +from ..projects_access import AccessRights, GroupIDStr +from ..projects_state import ProjectState +from ..utils._original_fastapi_encoders import jsonable_encoder +from ..utils.common_validators import ( + empty_str_to_none_pre_validator, + none_to_empty_str_pre_validator, + null_or_none_str_to_none_validator, +) +from ..workspaces import WorkspaceID +from ._base import EmptyModel, InputSchema, OutputSchema +from .permalinks import ProjectPermalink +from .projects_ui import StudyUI + + +class ProjectCreateNew(InputSchema): + uuid: ProjectID | None = None # NOTE: suggested uuid! but could be different! + + # display + name: str + description: str | None = None + thumbnail: HttpUrl | None = None + + workbench: NodesDict + + access_rights: dict[GroupIDStr, AccessRights] + + tags: Annotated[list[int], Field(default_factory=list)] = DEFAULT_FACTORY + classifiers: Annotated[list[ClassifierID], Field(default_factory=list)] = ( + DEFAULT_FACTORY + ) + + ui: StudyUI | None = None + + workspace_id: WorkspaceID | None = None + folder_id: FolderID | None = None + + _empty_is_none = field_validator("uuid", "thumbnail", "description", mode="before")( + empty_str_to_none_pre_validator + ) + + _null_or_none_to_none = field_validator("workspace_id", "folder_id", mode="before")( + null_or_none_str_to_none_validator + ) + + def to_domain_model(self) -> dict[str, Any]: + return self.model_dump( + exclude_unset=True, + by_alias=True, + exclude_none=True, + ) + + +# NOTE: based on OVERRIDABLE_DOCUMENT_KEYS +class ProjectCopyOverride(InputSchema): + name: str + description: str | None = None + thumbnail: HttpUrl | None = None + prj_owner: LowerCaseEmailStr + + _empty_is_none = field_validator("thumbnail", mode="before")( + empty_str_to_none_pre_validator + ) + + def to_domain_model(self) -> dict[str, Any]: + return self.model_dump( + exclude_unset=True, + by_alias=True, + exclude_none=True, + ) + + +class ProjectGet(OutputSchema): + uuid: ProjectID + + # display + name: str + description: str + thumbnail: HttpUrl | Literal[""] + + type: ProjectType + template_type: ProjectTemplateType | None + + workbench: NodesDict + + prj_owner: LowerCaseEmailStr + access_rights: dict[GroupIDStr, AccessRights] + + # state + creation_date: DateTimeStr + last_change_date: DateTimeStr + state: ProjectState | None = None + trashed_at: datetime | None + trashed_by: Annotated[ + GroupID | None, Field(description="The primary gid of the user who trashed") + ] + + # labeling + tags: list[int] + classifiers: Annotated[ + list[ClassifierID], + Field(default_factory=list, json_schema_extra={"default": []}), + ] = DEFAULT_FACTORY + + quality: Annotated[ + dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}}) + ] = DEFAULT_FACTORY + + # front-end + ui: EmptyModel | StudyUI | None = None + dev: dict | None + + permalink: ProjectPermalink | None = None + + workspace_id: WorkspaceID | None + folder_id: FolderID | None + + _empty_description = field_validator("description", mode="before")( + none_to_empty_str_pre_validator + ) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + examples=[ + { + "uuid": "a8b0f384-bd08-4793-ab25-65d5a755f4b6", + "name": "My Project", + "description": "This is a sample project", + "thumbnail": "https://example.com/thumbnail.png", + "type": "STANDARD", + "template_type": None, + "workbench": {}, + "prj_owner": "user@email.com", + "access_rights": {}, + "trashed_at": None, + "trashed_by": None, + "dev": {}, + "tags": [], + "workspace_id": None, + "folder_id": None, + "creation_date": "2023-01-01T00:00:00Z", + "last_change_date": "2023-01-02T00:00:00Z", + } + ] + ) + + model_config = ConfigDict(frozen=False, json_schema_extra=_update_json_schema_extra) + + @classmethod + def from_domain_model(cls, project_data: dict[str, Any]) -> Self: + trimmed_data = copy.deepcopy(project_data) + # NOTE: project_data["trashed_by"] is a UserID + # NOTE: project_data["trashed_by_primary_gid"] is a GroupID + trimmed_data.pop("trashed_by", None) + trimmed_data.pop("trashedBy", None) + + return cls.model_validate( + remap_keys( + trimmed_data, + rename={ + "trashed": "trashed_at", + "trashed_by_primary_gid": "trashed_by", + "trashedByPrimaryGid": "trashedBy", + }, + ) + ) + + +TaskProjectGet: TypeAlias = TaskGet + + +class ProjectListItem(ProjectGet): ... + + +class ProjectReplace(InputSchema): + uuid: ProjectID + + name: ShortTruncatedStr + description: LongTruncatedStr + thumbnail: Annotated[ + HttpUrl | None, + BeforeValidator(empty_str_to_none_pre_validator), + ] = None + + creation_date: DateTimeStr + last_change_date: DateTimeStr + workbench: NodesDict + access_rights: dict[GroupIDStr, AccessRights] + + tags: Annotated[ + list[int] | None, Field(default_factory=list, json_schema_extra={"default": []}) + ] = DEFAULT_FACTORY + + classifiers: Annotated[ + list[ClassifierID] | None, + Field(default_factory=list, json_schema_extra={"default": []}), + ] = DEFAULT_FACTORY + + ui: StudyUI | None = None + + quality: Annotated[ + dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}}) + ] = DEFAULT_FACTORY + + +class ProjectPatch(InputSchema): + name: ShortTruncatedStr | None = None + description: LongTruncatedStr | None = None + thumbnail: Annotated[ + HttpUrl | None, + BeforeValidator(empty_str_to_none_pre_validator), + PlainSerializer(lambda x: str(x) if x is not None else None), + ] = None + + access_rights: dict[GroupIDStr, AccessRights] | None = None + classifiers: list[ClassifierID] | None = None + dev: dict | None = None + ui: Annotated[ + StudyUI | None, + BeforeValidator(empty_str_to_none_pre_validator), + PlainSerializer( + lambda obj: jsonable_encoder( + obj, exclude_unset=True, by_alias=False + ) # For the sake of backward compatibility + ), + ] = None + quality: dict[str, Any] | None = None + template_type: ProjectTemplateType | None = None + + def to_domain_model(self) -> dict[str, Any]: + return self.model_dump(exclude_unset=True, by_alias=False) + + +__all__: tuple[str, ...] = ( + "EmptyModel", + "ProjectCopyOverride", + "ProjectCreateNew", + "ProjectGet", + "ProjectListItem", + "ProjectReplace", + "TaskProjectGet", +) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_access_rights.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_access_rights.py new file mode 100644 index 00000000000..6e1fa7be49e --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_access_rights.py @@ -0,0 +1,55 @@ +from typing import Annotated, Self + +from models_library.groups import GroupID +from models_library.projects import ProjectID +from pydantic import ( + BaseModel, + ConfigDict, + EmailStr, + Field, + HttpUrl, + StringConstraints, + model_validator, +) + +from ..access_rights import AccessRights +from ._base import InputSchema, OutputSchema + + +class ProjectsGroupsPathParams(BaseModel): + project_id: ProjectID + group_id: GroupID + + model_config = ConfigDict(extra="forbid") + + +class ProjectsGroupsBodyParams(InputSchema): + read: bool + write: bool + delete: bool + + +class ProjectShare(InputSchema): + sharee_email: EmailStr + sharer_message: Annotated[ + str, + StringConstraints(max_length=500, strip_whitespace=True), + Field(description="An optional message from sharer to sharee"), + ] = "" + + # Sharing access rights + read: bool + write: bool + delete: bool + + @model_validator(mode="after") + def _validate_access_rights(self) -> Self: + AccessRights.model_construct( + read=self.read, write=self.write, delete=self.delete + ).verify_access_integrity() + return self + + +class ProjectShareAccepted(OutputSchema): + sharee_email: EmailStr + confirmation_link: HttpUrl diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_conversations.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_conversations.py new file mode 100644 index 00000000000..b3d7a3c2590 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_conversations.py @@ -0,0 +1,76 @@ +from datetime import datetime +from typing import Annotated, Self + +from pydantic import Field + +from ..conversations import ( + ConversationGetDB, + ConversationID, + ConversationMessageGetDB, + ConversationMessageID, + ConversationMessageType, + ConversationType, +) +from ..groups import GroupID +from ..products import ProductName +from ..projects import ProjectID +from ._base import InputSchema, OutputSchema + +### PROJECT CONVERSATION ------------------------------------------------------------------- + + +class ConversationRestGet(OutputSchema): + conversation_id: ConversationID + product_name: ProductName + name: Annotated[str, Field(max_length=50)] + project_uuid: ProjectID | None + user_group_id: GroupID + type: ConversationType + created: datetime + modified: datetime + + @classmethod + def from_domain_model(cls, domain: ConversationGetDB) -> Self: + return cls( + conversation_id=domain.conversation_id, + product_name=domain.product_name, + name=domain.name, + project_uuid=domain.project_uuid, + user_group_id=domain.user_group_id, + type=domain.type, + created=domain.created, + modified=domain.modified, + ) + + +class ConversationPatch(InputSchema): + name: str | None = None + + +### PROJECT CONVERSATION MESSAGES --------------------------------------------------------------- + + +class ConversationMessageRestGet(OutputSchema): + message_id: ConversationMessageID + conversation_id: ConversationID + user_group_id: GroupID + content: Annotated[str, Field(max_length=4096)] + type: ConversationMessageType + created: datetime + modified: datetime + + @classmethod + def from_domain_model(cls, domain: ConversationMessageGetDB) -> Self: + return cls( + message_id=domain.message_id, + conversation_id=domain.conversation_id, + user_group_id=domain.user_group_id, + content=domain.content, + type=domain.type, + created=domain.created, + modified=domain.modified, + ) + + +class ConversationMessagePatch(InputSchema): + content: str | None = None diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_metadata.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_metadata.py new file mode 100644 index 00000000000..c108dcd2fc2 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_metadata.py @@ -0,0 +1,21 @@ +from typing import TypeAlias + +from pydantic import Field, StrictBool, StrictFloat, StrictInt + +from ..projects import ProjectID +from ._base import InputSchema, OutputSchema + +# Limits metadata values +MetaValueType: TypeAlias = StrictBool | StrictInt | StrictFloat | str +MetadataDict: TypeAlias = dict[str, MetaValueType] + + +class ProjectMetadataGet(OutputSchema): + project_uuid: ProjectID + custom: MetadataDict = Field( + default_factory=dict, description="Custom key-value map" + ) + + +class ProjectMetadataUpdate(InputSchema): + custom: MetadataDict diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py new file mode 100644 index 00000000000..a8932553201 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes.py @@ -0,0 +1,220 @@ +# mypy: disable-error-code=truthy-function +from typing import Annotated, Any, Literal, TypeAlias + +from models_library.groups import GroupID +from models_library.projects import ProjectID +from models_library.services_history import ServiceRelease +from pydantic import ConfigDict, Field + +from ..access_rights import ExecutableAccessRights +from ..api_schemas_directorv2.dynamic_services import RetrieveDataOut +from ..basic_types import PortInt +from ..projects_nodes import InputID, InputsDict, PartialNode +from ..projects_nodes_io import NodeID +from ..services import ServiceKey, ServicePortKey, ServiceVersion +from ..services_enums import ServiceState +from ..services_resources import ServiceResourcesDict +from ._base import InputSchemaWithoutCamelCase, OutputSchema + +assert ServiceResourcesDict # nosec +__all__: tuple[str, ...] = ("ServiceResourcesDict",) + + +class NodeCreate(InputSchemaWithoutCamelCase): + service_key: ServiceKey + service_version: ServiceVersion + service_id: str | None = None + + +BootOptions: TypeAlias = dict + + +class NodePatch(InputSchemaWithoutCamelCase): + service_key: Annotated[ + ServiceKey | None, + Field(alias="key"), + ] = None + service_version: Annotated[ + ServiceVersion | None, + Field(alias="version"), + ] = None + label: str | None = None + inputs: Annotated[ + InputsDict, Field(default_factory=dict, json_schema_extra={"default": {}}) + ] + inputs_required: Annotated[ + list[InputID] | None, + Field(alias="inputsRequired"), + ] = None + input_nodes: Annotated[ + list[NodeID] | None, + Field(alias="inputNodes"), + ] = None + progress: Annotated[ + float | None, + Field( + # NOTE: it is used by frontend for File Picker progress + ge=0, + le=100, + ), + ] = None + boot_options: Annotated[BootOptions | None, Field(alias="bootOptions")] = None + outputs: dict[str, Any] | None = ( + None # NOTE: it is used by frontend for File Picker + ) + + def to_domain_model(self) -> PartialNode: + data = self.model_dump( + exclude_unset=True, + by_alias=True, + ) + return PartialNode.model_construct(**data) + + +class NodeCreated(OutputSchema): + node_id: NodeID + + +class NodeGet(OutputSchema): + published_port: PortInt | None = Field( + ..., + description="The ports where the service provides its interface", + ) + entry_point: str | None = Field( + None, + description="The entry point where the service provides its interface if specified", + ) + service_uuid: str = Field( + ..., + description="The UUID attached to this service", + ) + service_key: ServiceKey = Field( + ..., + description="distinctive name for the node based on the docker registry path", + examples=[ + "simcore/services/comp/itis/sleeper", + "simcore/services/dynamic/3dviewer", + ], + ) + service_version: ServiceVersion = Field( + ..., description="semantic version number", examples=["1.0.0", "0.0.1"] + ) + service_host: str = Field( + ..., + description="service host name within the network", + ) + service_port: PortInt = Field( + ..., description="port to access the service within the network" + ) + service_basepath: str | None = Field( + "", + description="different base path where current service is mounted otherwise defaults to root", + ) + service_state: ServiceState = Field( + ..., + description="the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start\n", + ) + service_message: str | None = Field( + None, + description="the service message", + ) + user_id: str = Field(..., description="the user that started the service") + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + # computational + { + "published_port": 30000, + "entrypoint": "/the/entry/point/is/here", + "service_uuid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "service_key": "simcore/services/comp/itis/sleeper", + "service_version": "1.2.3", + "service_host": "jupyter_E1O2E-LAH", + "service_port": 8081, + "service_basepath": "/x/E1O2E-LAH", + "service_state": "pending", + "service_message": "no suitable node (insufficient resources on 1 node)", + "user_id": "123", + }, + # dynamic + { + "published_port": 30000, + "entrypoint": "/the/entry/point/is/here", + "service_uuid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "service_key": "simcore/services/dynamic/some-dynamic-service", + "service_version": "1.2.3", + "service_host": "jupyter_E1O2E-LAH", + "service_port": 8081, + "service_basepath": "/x/E1O2E-LAH", + "service_state": "pending", + "service_message": "no suitable node (insufficient resources on 1 node)", + "user_id": "123", + }, + ] + } + ) + + +class NodeGetIdle(OutputSchema): + service_state: Literal["idle"] + service_uuid: NodeID + + @classmethod + def from_node_id(cls, node_id: NodeID) -> "NodeGetIdle": + return cls(service_state="idle", service_uuid=node_id) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "service_uuid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "service_state": "idle", + } + } + ) + + +class NodeGetUnknown(OutputSchema): + service_state: Literal["unknown"] + service_uuid: NodeID + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "service_uuid": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "service_state": "unknown", + } + } + ) + + @classmethod + def from_node_id(cls, node_id: NodeID) -> "NodeGetUnknown": + return cls(service_state="unknown", service_uuid=node_id) + + +class NodeOutputs(InputSchemaWithoutCamelCase): + outputs: dict[str, Any] + + +class NodeRetrieve(InputSchemaWithoutCamelCase): + port_keys: list[ServicePortKey] = [] + + +class NodeRetrieved(RetrieveDataOut): + model_config = OutputSchema.model_config + + +class NodeServiceGet(OutputSchema): + key: ServiceKey + release: ServiceRelease + owner: Annotated[ + GroupID | None, + Field( + description="Service owner primary group id or None if ownership still not defined" + ), + ] + my_access_rights: ExecutableAccessRights + + +class ProjectNodeServicesGet(OutputSchema): + project_uuid: ProjectID + services: list[NodeServiceGet] diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes_ui.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes_ui.py new file mode 100644 index 00000000000..a1e3b7755b1 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_nodes_ui.py @@ -0,0 +1,14 @@ +from typing import Annotated, TypeAlias + +from pydantic import BaseModel, ConfigDict, Field, PlainSerializer +from pydantic_extra_types.color import Color + +from ..projects_nodes_layout import Position + +PositionUI: TypeAlias = Position + + +class MarkerUI(BaseModel): + color: Annotated[Color, PlainSerializer(Color.as_hex), Field(...)] + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_ports.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_ports.py new file mode 100644 index 00000000000..6582542525b --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_ports.py @@ -0,0 +1,30 @@ +from typing import Any + +from pydantic import BaseModel, Field + +from ..projects_nodes_io import NodeID +from ._base import InputSchemaWithoutCamelCase, OutputSchema + + +class _ProjectIOBase(BaseModel): + key: NodeID = Field( + ..., + description="Project port's unique identifer. Same as the UUID of the associated port node", + ) + value: Any = Field(..., description="Value assigned to this i/o port") + + +class ProjectInputUpdate(_ProjectIOBase): + model_config = InputSchemaWithoutCamelCase.model_config + + +class ProjectInputGet(OutputSchema, _ProjectIOBase): + label: str + + model_config = InputSchemaWithoutCamelCase.model_config + + +class ProjectOutputGet(_ProjectIOBase): + label: str + + model_config = OutputSchema.model_config diff --git a/services/api-server/tools/templates/schemas.py.jinja2 b/packages/models-library/src/models_library/api_schemas_webserver/projects_tags.py similarity index 100% rename from services/api-server/tools/templates/schemas.py.jinja2 rename to packages/models-library/src/models_library/api_schemas_webserver/projects_tags.py diff --git a/packages/models-library/src/models_library/api_schemas_webserver/projects_ui.py b/packages/models-library/src/models_library/api_schemas_webserver/projects_ui.py new file mode 100644 index 00000000000..9bbb92f447c --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/projects_ui.py @@ -0,0 +1,184 @@ +""" +Models Front-end UI +""" + +from typing import Annotated, Literal, NotRequired + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + HttpUrl, + PlainSerializer, + field_validator, +) +from pydantic.config import JsonDict +from pydantic_extra_types.color import Color +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + +from ..projects_nodes_io import NodeID, NodeIDStr +from ..utils.common_validators import empty_str_to_none_pre_validator +from ._base import OutputSchema +from .projects_nodes_ui import MarkerUI, PositionUI + + +class WorkbenchUI(BaseModel): + position: Annotated[ + PositionUI, + Field(description="The node position in the workbench"), + ] + marker: MarkerUI | None = None + + model_config = ConfigDict(extra="forbid") + + +class SlideshowUI(TypedDict): + position: int + instructions: NotRequired[str | None] # Instructions about what to do in this step + + +class AnnotationUI(BaseModel): + type: Literal["note", "rect", "text"] + color: Annotated[Color, PlainSerializer(Color.as_hex)] + attributes: Annotated[dict, Field(description="svg attributes")] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "type": "note", + "color": "#FFFF00", + "attributes": { + "x": 415, + "y": 100, + "width": 117, + "height": 26, + "destinataryGid": 4, + "text": "ToDo", + }, + }, + { + "type": "rect", + "color": "#FF0000", + "attributes": {"x": 415, "y": 100, "width": 117, "height": 26}, + }, + { + "type": "text", + "color": "#0000FF", + "attributes": {"x": 415, "y": 100, "text": "Hey!"}, + }, + ] + }, + ) + + model_config = ConfigDict( + extra="forbid", json_schema_extra=_update_json_schema_extra + ) + + +class StudyUI(OutputSchema): + # Model fully controlled by the UI and stored under `projects.ui` + icon: HttpUrl | None = None + + workbench: dict[NodeIDStr, WorkbenchUI] | None = None + slideshow: dict[NodeIDStr, SlideshowUI] | None = None + current_node_id: NodeID | None = None + annotations: dict[NodeIDStr, AnnotationUI] | None = None + template_type: Literal["hypertool"] | None = None + + _empty_is_none = field_validator("*", mode="before")( + empty_str_to_none_pre_validator + ) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "workbench": { + "801407c9-abb1-400d-ac49-35b0b2334a34": { + "position": {"x": 250, "y": 100} + } + } + }, + { + "icon": "https://cdn-icons-png.flaticon.com/512/25/25231.png", + "mode": "app", + "slideshow": { + "4b3345e5-861f-47b0-8b52-a4508449be79": { + "position": 1, + "instructions": None, + }, + "eaeee3dc-9ae1-4bf6-827e-798fd7cad848": { + "position": 0, + "instructions": None, + }, + }, + "workbench": { + "4b3345e5-861f-47b0-8b52-a4508449be79": { + "position": {"x": 460, "y": 260} + }, + "eaeee3dc-9ae1-4bf6-827e-798fd7cad848": { + "position": {"x": 220, "y": 600} + }, + }, + "annotations": { + "4375ae62-76ce-42a4-9cea-608a2ba74762": { + "type": "rect", + "color": "#650cff", + "attributes": { + "x": 79, + "y": 194, + "width": "320", + "height": "364", + }, + }, + "52567518-cedc-47e0-ad7f-6989fb8c5649": { + "type": "note", + "color": "#ffff01", + "attributes": { + "x": 151, + "y": 376, + "text": "ll", + "recipientGid": None, + }, + }, + "764a17c8-36d7-4865-a5cb-db9b4f82ce80": { + "type": "note", + "color": "#650cff", + "attributes": { + "x": 169, + "y": 19, + "text": "yeah m", + "recipientGid": 20630, + }, + }, + "cf94f068-259c-4192-89f9-b2a56d51249c": { + "type": "text", + "color": "#e9aeab", + "attributes": { + "x": 119, + "y": 223, + "text": "pppoo", + "color": "#E9AEAB", + "fontSize": 12, + }, + }, + }, + "current_node_id": "4b3345e5-861f-47b0-8b52-a4508449be79", + "template_type": "hypertool", + }, + ] + } + ) + + model_config = ConfigDict( + extra="allow", + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/resource_usage.py b/packages/models-library/src/models_library/api_schemas_webserver/resource_usage.py new file mode 100644 index 00000000000..78e0c005abc --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/resource_usage.py @@ -0,0 +1,156 @@ +from datetime import datetime +from decimal import Decimal + +from pydantic import BaseModel, ConfigDict, Field + +from ..projects import ProjectID +from ..projects_nodes_io import NodeID +from ..resource_tracker import ( + CreditTransactionStatus, + HardwareInfo, + PricingPlanClassification, + PricingPlanId, + PricingUnitCostUpdate, + PricingUnitId, + ServiceRunStatus, + SpecificInfo, + UnitExtraInfoLicense, + UnitExtraInfoTier, +) +from ..services import ServiceKey, ServiceVersion +from ..services_types import ServiceRunID +from ..users import UserID +from ..wallets import WalletID +from ._base import InputSchema, OutputSchema + +# Frontend API + + +class ServiceRunGet( + BaseModel +): # NOTE: this is already in use so I didnt modify inheritance from OutputSchema + service_run_id: ServiceRunID + wallet_id: WalletID | None + wallet_name: str | None + user_id: UserID + user_email: str + project_id: ProjectID + project_name: str + project_tags: list[str] + node_id: NodeID + node_name: str + root_parent_project_id: ProjectID + root_parent_project_name: str + service_key: ServiceKey + service_version: ServiceVersion + service_type: str + started_at: datetime + stopped_at: datetime | None + service_run_status: ServiceRunStatus + # Cost in credits + credit_cost: Decimal | None + transaction_status: CreditTransactionStatus | None + + +class PricingUnitGet(OutputSchema): + pricing_unit_id: PricingUnitId + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + current_cost_per_unit: Decimal + default: bool + + +class PricingPlanGet(OutputSchema): + pricing_plan_id: PricingPlanId + display_name: str + description: str + classification: PricingPlanClassification + created_at: datetime + pricing_plan_key: str + pricing_units: list[PricingUnitGet] | None + is_active: bool + + +## Admin Pricing Plan and Unit + + +class PricingUnitAdminGet(PricingUnitGet): + specific_info: HardwareInfo + + +class PricingPlanAdminGet(OutputSchema): + pricing_plan_id: PricingPlanId + display_name: str + description: str + classification: PricingPlanClassification + created_at: datetime + pricing_plan_key: str + pricing_units: list[PricingUnitAdminGet] | None + is_active: bool + + +class PricingPlanToServiceAdminGet(OutputSchema): + pricing_plan_id: PricingPlanId + service_key: ServiceKey + service_version: ServiceVersion + created: datetime + + +class CreatePricingPlanBodyParams(InputSchema): + display_name: str + description: str + classification: PricingPlanClassification + pricing_plan_key: str + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + ) + + +class UpdatePricingPlanBodyParams(InputSchema): + display_name: str + description: str + is_active: bool + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + ) + + +class CreatePricingUnitBodyParams(InputSchema): + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + default: bool + specific_info: SpecificInfo + cost_per_unit: Decimal + comment: str + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + ) + + +class UpdatePricingUnitBodyParams(InputSchema): + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + default: bool + specific_info: SpecificInfo + pricing_unit_cost_update: PricingUnitCostUpdate | None = Field(default=None) + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + ) + + +class ConnectServiceToPricingPlanBodyParams(InputSchema): + service_key: ServiceKey + service_version: ServiceVersion + + model_config = ConfigDict( + str_strip_whitespace=True, + str_max_length=200, + ) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/socketio.py b/packages/models-library/src/models_library/api_schemas_webserver/socketio.py new file mode 100644 index 00000000000..6e3f987198a --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/socketio.py @@ -0,0 +1,17 @@ +from ..basic_types import IDStr +from ..groups import GroupID +from ..users import UserID + + +class SocketIORoomStr(IDStr): + @classmethod + def from_socket_id(cls, socket_id: str) -> "SocketIORoomStr": + return cls(socket_id) + + @classmethod + def from_group_id(cls, group_id: GroupID) -> "SocketIORoomStr": + return cls(f"group:{group_id}") + + @classmethod + def from_user_id(cls, user_id: UserID) -> "SocketIORoomStr": + return cls(f"user:{user_id}") diff --git a/packages/models-library/src/models_library/api_schemas_webserver/storage.py b/packages/models-library/src/models_library/api_schemas_webserver/storage.py new file mode 100644 index 00000000000..8460493348c --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/storage.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Annotated + +from pydantic import BaseModel, Field + +from ..api_schemas_storage.storage_schemas import ( + DEFAULT_NUMBER_OF_PATHS_PER_PAGE, + MAX_NUMBER_OF_PATHS_PER_PAGE, +) +from ..projects_nodes_io import LocationID +from ..rest_pagination import CursorQueryParameters +from ._base import InputSchema + + +class StorageLocationPathParams(BaseModel): + location_id: LocationID + + +class StoragePathComputeSizeParams(StorageLocationPathParams): + path: Path + + +class ListPathsQueryParams(InputSchema, CursorQueryParameters): + file_filter: Path | None = None + + size: Annotated[ + int, + Field( + description="maximum number of items to return (pagination)", + ge=1, + lt=MAX_NUMBER_OF_PATHS_PER_PAGE, + ), + ] = DEFAULT_NUMBER_OF_PATHS_PER_PAGE + + +class BatchDeletePathsBodyParams(InputSchema): + paths: set[Path] + + +PathToExport = Path + + +class DataExportPost(InputSchema): + paths: list[PathToExport] diff --git a/packages/models-library/src/models_library/api_schemas_webserver/users.py b/packages/models-library/src/models_library/api_schemas_webserver/users.py new file mode 100644 index 00000000000..ed102bf746a --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/users.py @@ -0,0 +1,374 @@ +import re +from datetime import date, datetime +from enum import Enum +from typing import Annotated, Any, Literal, Self + +import annotated_types +from common_library.basic_types import DEFAULT_FACTORY +from common_library.dict_tools import remap_keys +from common_library.users_enums import AccountRequestStatus, UserStatus +from models_library.groups import AccessRightsDict +from models_library.rest_filters import Filters +from models_library.rest_pagination import PageQueryParameters +from pydantic import ( + ConfigDict, + EmailStr, + Field, + StringConstraints, + ValidationInfo, + field_validator, +) +from pydantic.config import JsonDict + +from ..basic_types import IDStr +from ..emails import LowerCaseEmailStr +from ..groups import AccessRightsDict, Group, GroupID, GroupsByTypeTuple +from ..products import ProductName +from ..rest_base import RequestParameters +from ..users import ( + FirstNameStr, + LastNameStr, + MyProfile, + UserID, + UserNameID, + UserPermission, + UserThirdPartyToken, +) +from ._base import ( + InputSchema, + InputSchemaWithoutCamelCase, + OutputSchema, + OutputSchemaWithoutCamelCase, +) +from .groups import MyGroupsGet +from .products import InvitationGenerate +from .users_preferences import AggregatedPreferences + +# +# MY PROFILE +# + + +class MyProfilePrivacyGet(OutputSchema): + hide_username: bool + hide_fullname: bool + hide_email: bool + + +class MyProfilePrivacyPatch(InputSchema): + hide_username: bool | None = None + hide_fullname: bool | None = None + hide_email: bool | None = None + + +class MyProfileGet(OutputSchemaWithoutCamelCase): + id: UserID + user_name: Annotated[ + IDStr, Field(description="Unique username identifier", alias="userName") + ] + first_name: FirstNameStr | None = None + last_name: LastNameStr | None = None + login: LowerCaseEmailStr + + role: Literal["ANONYMOUS", "GUEST", "USER", "TESTER", "PRODUCT_OWNER", "ADMIN"] + groups: MyGroupsGet | None = None + gravatar_id: Annotated[str | None, Field(deprecated=True)] = None + + expiration_date: Annotated[ + date | None, + Field( + description="If user has a trial account, it sets the expiration date, otherwise None", + alias="expirationDate", + ), + ] = None + + privacy: MyProfilePrivacyGet + preferences: AggregatedPreferences + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "id": 42, + "login": "bla@foo.com", + "userName": "bla42", + "role": "admin", # pre + "expirationDate": "2022-09-14", # optional + "preferences": {}, + "privacy": { + "hide_username": 0, + "hide_fullname": 0, + "hide_email": 1, + }, + }, + ] + } + ) + + model_config = ConfigDict( + # NOTE: old models have an hybrid between snake and camel cases! + # Should be unified at some point + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) + + @field_validator("role", mode="before") + @classmethod + def _to_upper_string(cls, v): + if isinstance(v, str): + return v.upper() + if isinstance(v, Enum): + return v.name.upper() + return v + + @classmethod + def from_domain_model( + cls, + my_profile: MyProfile, + my_groups_by_type: GroupsByTypeTuple, + my_product_group: tuple[Group, AccessRightsDict] | None, + my_preferences: AggregatedPreferences, + ) -> Self: + data = remap_keys( + my_profile.model_dump( + include={ + "id", + "user_name", + "first_name", + "last_name", + "email", + "role", + "privacy", + "expiration_date", + }, + exclude_unset=True, + ), + rename={"email": "login"}, + ) + return cls( + **data, + groups=MyGroupsGet.from_domain_model(my_groups_by_type, my_product_group), + preferences=my_preferences, + ) + + +class MyProfilePatch(InputSchemaWithoutCamelCase): + first_name: FirstNameStr | None = None + last_name: LastNameStr | None = None + user_name: Annotated[IDStr | None, Field(alias="userName", min_length=4)] = None + + privacy: MyProfilePrivacyPatch | None = None + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "first_name": "Pedro", + "last_name": "Crespo", + } + } + ) + + @field_validator("user_name") + @classmethod + def _validate_user_name(cls, value: str): + # Ensure valid characters (alphanumeric + . _ -) + if not re.match(r"^[a-zA-Z][a-zA-Z0-9._-]*$", value): + msg = f"Username '{value}' must start with a letter and can only contain letters, numbers and '_', '.' or '-'." + raise ValueError(msg) + + # Ensure no consecutive special characters + if re.search(r"[_.-]{2,}", value): + msg = f"Username '{value}' cannot contain consecutive special characters like '__'." + raise ValueError(msg) + + # Ensure it doesn't end with a special character + if {value[0], value[-1]}.intersection({"_", "-", "."}): + msg = f"Username '{value}' cannot end with a special character." + raise ValueError(msg) + + # Check reserved words (example list; extend as needed) + reserved_words = { + "admin", + "root", + "system", + "null", + "undefined", + "support", + "moderator", + # NOTE: add here extra via env vars + } + if any(w in value.lower() for w in reserved_words): + msg = f"Username '{value}' cannot be used." + raise ValueError(msg) + + return value + + +# +# USER +# + + +class UsersGetParams(RequestParameters): + user_id: UserID + + +class UsersSearch(InputSchema): + match_: Annotated[ + str, + StringConstraints(strip_whitespace=True, min_length=1, max_length=80), + Field( + description="Search string to match with usernames and public profiles (e.g. emails, first/last name)", + alias="match", + ), + ] + limit: Annotated[int, annotated_types.Interval(ge=1, le=50)] = 10 + + +class UserGet(OutputSchema): + # Public profile of a user subject to its privacy settings + user_id: UserID + group_id: GroupID + user_name: UserNameID | None = None + first_name: str | None = None + last_name: str | None = None + email: EmailStr | None = None + + @classmethod + def from_domain_model(cls, data): + return cls.model_validate(data, from_attributes=True) + + +class UsersForAdminListFilter(Filters): + # 1. account_request_status: PENDING, REJECTED, APPROVED + # 2. If APPROVED AND user uses the invitation link, then when user is registered, + # it can be in any of these statuses: + # CONFIRMATION_PENDING, ACTIVE, EXPIRED, BANNED, DELETED + # + review_status: Literal["PENDING", "REVIEWED"] | None = None + + model_config = ConfigDict(extra="forbid") + + +class UsersAccountListQueryParams(UsersForAdminListFilter, PageQueryParameters): ... + + +class UserAccountApprove(InputSchema): + email: EmailStr + invitation: InvitationGenerate | None = None + + +class UserAccountReject(InputSchema): + email: EmailStr + + +class UserAccountSearchQueryParams(RequestParameters): + email: Annotated[ + str, + Field( + min_length=3, + max_length=200, + description="complete or glob pattern for an email", + ), + ] + + +class UserAccountGet(OutputSchema): + # ONLY for admins + first_name: str | None + last_name: str | None + email: LowerCaseEmailStr + institution: str | None + phone: str | None + address: str | None + city: str | None + state: Annotated[str | None, Field(description="State, province, canton, ...")] + postal_code: str | None + country: str | None + extras: Annotated[ + dict[str, Any], + Field( + default_factory=dict, + description="Keeps extra information provided in the request form", + ), + ] = DEFAULT_FACTORY + + # pre-registration + pre_registration_id: int | None + invited_by: str | None = None + account_request_status: AccountRequestStatus | None + account_request_reviewed_by: UserID | None = None + account_request_reviewed_at: datetime | None = None + + # user status + registered: bool + status: UserStatus | None + products: Annotated[ + list[ProductName] | None, + Field( + description="List of products this users is included or None if fields is unset", + ), + ] = None + + @field_validator("status") + @classmethod + def _consistency_check(cls, v, info: ValidationInfo): + registered = info.data["registered"] + status = v + if not registered and status is not None: + msg = f"{registered=} and {status=} is not allowed" + raise ValueError(msg) + return v + + +# +# THIRD-PARTY TOKENS +# + + +class MyTokenCreate(InputSchemaWithoutCamelCase): + service: Annotated[ + IDStr, + Field(description="uniquely identifies the service where this token is used"), + ] + token_key: IDStr + token_secret: IDStr + + def to_domain_model(self) -> UserThirdPartyToken: + return UserThirdPartyToken( + service=self.service, + token_key=self.token_key, + token_secret=self.token_secret, + ) + + +class MyTokenGet(OutputSchemaWithoutCamelCase): + service: IDStr + token_key: IDStr + token_secret: Annotated[ + IDStr | None, Field(deprecated=True, description="Will be removed") + ] = None + + @classmethod + def from_domain_model(cls, token: UserThirdPartyToken) -> Self: + return cls( + service=token.service, # type: ignore[arg-type] + token_key=token.token_key, # type: ignore[arg-type] + token_secret=None, + ) + + +# +# PERMISSIONS +# + + +class MyPermissionGet(OutputSchema): + name: str + allowed: bool + + @classmethod + def from_domain_model(cls, permission: UserPermission) -> Self: + return cls(name=permission.name, allowed=permission.allowed) diff --git a/packages/models-library/src/models_library/api_schemas_webserver/users_preferences.py b/packages/models-library/src/models_library/api_schemas_webserver/users_preferences.py new file mode 100644 index 00000000000..e5789cafbdd --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/users_preferences.py @@ -0,0 +1,22 @@ +from typing import Any, TypeAlias + +from pydantic import BaseModel, Field + +from ..user_preferences import PreferenceIdentifier +from ._base import InputSchema, OutputSchema + + +class Preference(OutputSchema): + default_value: Any = Field(default=..., description="used by the frontend") + value: Any = Field(default=..., description="preference value") + + +AggregatedPreferences: TypeAlias = dict[PreferenceIdentifier, Preference] + + +class PatchRequestBody(InputSchema): + value: Any + + +class PatchPathParams(BaseModel): + preference_id: PreferenceIdentifier diff --git a/packages/models-library/src/models_library/api_schemas_webserver/wallets.py b/packages/models-library/src/models_library/api_schemas_webserver/wallets.py new file mode 100644 index 00000000000..464d0a5c1f9 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/wallets.py @@ -0,0 +1,250 @@ +from datetime import datetime +from decimal import Decimal +from typing import Literal, TypeAlias + +from pydantic import ConfigDict, Field, HttpUrl, ValidationInfo, field_validator + +from ..basic_types import AmountDecimal, IDStr, NonNegativeDecimal +from ..groups import GroupID +from ..wallets import WalletID, WalletStatus +from ._base import InputSchema, OutputSchema + + +class WalletGet(OutputSchema): + wallet_id: WalletID + name: IDStr + description: str | None = None + owner: GroupID + thumbnail: str | None = None + status: WalletStatus + created: datetime + modified: datetime + + model_config = ConfigDict( + from_attributes=True, + frozen=False, + json_schema_extra={ + "examples": [ + { + "wallet_id": 1, + "name": "My wallet", + "description": "My description", + "owner": 1, + "thumbnail": "https://example.com/payment-method/form", + "status": "ACTIVE", + "created": "2024-03-25T00:00:00", + "modified": "2024-03-25T00:00:00", + } + ] + }, + ) + + +class WalletGetWithAvailableCredits(WalletGet): + available_credits: Decimal + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + **WalletGet.model_config["json_schema_extra"]["examples"][0], # type: ignore + "available_credits": 10.5, + } + ] + } + ) + + +class WalletGetPermissions(WalletGet): + read: bool + write: bool + delete: bool + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + **WalletGet.model_config["json_schema_extra"]["examples"][0], # type: ignore + "read": True, + "write": True, + "delete": True, + } + ] + } + ) + + +class CreateWalletBodyParams(InputSchema): + name: str + description: str | None = None + thumbnail: str | None = None + + +class PutWalletBodyParams(InputSchema): + name: str + description: str | None + thumbnail: str | None = None + status: WalletStatus + + +# +# Payments to top-up credits in wallets +# + +# NOTE: that these can be UUIDs (or not) +PaymentID: TypeAlias = IDStr +PaymentMethodID: TypeAlias = IDStr + + +class CreateWalletPayment(InputSchema): + price_dollars: AmountDecimal + comment: str | None = Field(default=None, max_length=100) + + +class WalletPaymentInitiated(OutputSchema): + payment_id: PaymentID + payment_form_url: HttpUrl | None = Field( + default=None, + description="Link to external site that holds the payment submission form." + "None if no prompt step is required (e.g. pre-selected credit card)", + ) + + +class PaymentTransaction(OutputSchema): + payment_id: PaymentID + price_dollars: Decimal + wallet_id: WalletID + osparc_credits: Decimal + comment: str | None = Field(default=None) + created_at: datetime + completed_at: datetime | None + # SEE PaymentTransactionState enum + state: Literal["PENDING", "SUCCESS", "FAILED", "CANCELED"] = Field( + ..., alias="completedStatus" + ) + state_message: str | None = Field(default=None) + invoice_url: HttpUrl | None = Field(default=None) + + +class PaymentMethodInitiated(OutputSchema): + wallet_id: WalletID + payment_method_id: PaymentMethodID + payment_method_form_url: HttpUrl = Field( + ..., description="Link to external site that holds the payment submission form" + ) + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "wallet_id": 1, + "payment_method_id": "pm_0987654321", + "payment_method_form_url": "https://example.com/payment-method/form", + } + ] + } + ) + + +class PaymentMethodTransaction(OutputSchema): + # Used ONLY in socketio interface + wallet_id: WalletID + payment_method_id: PaymentMethodID + state: Literal["PENDING", "SUCCESS", "FAILED", "CANCELED"] + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "walletId": 1, + "paymentMethodId": "pm_0987654321", + "state": "SUCCESS", + } + ] + } + ) + + +class PaymentMethodGet(OutputSchema): + idr: PaymentMethodID + wallet_id: WalletID + card_holder_name: str | None = None + card_number_masked: str | None = None + card_type: str | None = None + expiration_month: int | None = None + expiration_year: int | None = None + created: datetime + auto_recharge: bool = Field( + default=False, + description="If true, this payment-method is used for auto-recharge", + ) + + model_config = ConfigDict( + frozen=False, + json_schema_extra={ + "examples": [ + { + "idr": "pm_1234567890", + "walletId": 1, + "cardHolderName": "John Doe", + "cardNumberMasked": "**** **** **** 1234", + "cardType": "Visa", + "expirationMonth": 10, + "expirationYear": 2025, + "created": "2023-09-13T15:30:00Z", + "autoRecharge": "False", + }, + { + "idr": "pm_1234567890", + "walletId": 3, + "created": "2024-09-13T15:30:00Z", + "autoRecharge": "False", + }, + ], + }, + ) + + +# +# Auto-recharge mechanism associated to a wallet +# + + +class GetWalletAutoRecharge(OutputSchema): + enabled: bool = Field( + default=False, + description="Enables/disables auto-recharge trigger in this wallet", + ) + payment_method_id: PaymentMethodID | None = Field( + ..., + description="Payment method in the wallet used to perform the auto-recharge payments or None if still undefined", + ) + min_balance_in_credits: NonNegativeDecimal = Field( + ..., + description="Minimum balance in credits that triggers an auto-recharge [Read only]", + ) + top_up_amount_in_usd: NonNegativeDecimal = Field( + ..., + description="Amount in USD payed when auto-recharge condition is satisfied", + ) + monthly_limit_in_usd: NonNegativeDecimal | None = Field( + ..., + description="Maximum amount in USD charged within a natural month." + "None indicates no limit.", + ) + + +class ReplaceWalletAutoRecharge(InputSchema): + enabled: bool + payment_method_id: PaymentMethodID + top_up_amount_in_usd: NonNegativeDecimal + monthly_limit_in_usd: NonNegativeDecimal | None + + @field_validator("monthly_limit_in_usd") + @classmethod + def _monthly_limit_greater_than_top_up(cls, v, info: ValidationInfo): + top_up = info.data["top_up_amount_in_usd"] + if v is not None and v < top_up: + msg = "Monthly limit ({v} USD) should be greater than top up amount ({top_up} USD)" + raise ValueError(msg) + return v diff --git a/packages/models-library/src/models_library/api_schemas_webserver/workspaces.py b/packages/models-library/src/models_library/api_schemas_webserver/workspaces.py new file mode 100644 index 00000000000..1305af4f345 --- /dev/null +++ b/packages/models-library/src/models_library/api_schemas_webserver/workspaces.py @@ -0,0 +1,56 @@ +from datetime import datetime +from typing import Annotated, Self + +from pydantic import ConfigDict, Field + +from ..access_rights import AccessRights +from ..basic_types import IDStr +from ..groups import GroupID +from ..workspaces import UserWorkspaceWithAccessRights, WorkspaceID +from ._base import InputSchema, OutputSchema + + +class WorkspaceGet(OutputSchema): + workspace_id: WorkspaceID + name: str + description: str | None + thumbnail: str | None + created_at: datetime + modified_at: datetime + trashed_at: datetime | None + trashed_by: Annotated[ + GroupID | None, Field(description="The primary gid of the user who trashed") + ] + my_access_rights: AccessRights + access_rights: dict[GroupID, AccessRights] + + @classmethod + def from_domain_model(cls, wks: UserWorkspaceWithAccessRights) -> Self: + return cls( + workspace_id=wks.workspace_id, + name=wks.name, + description=wks.description, + thumbnail=wks.thumbnail, + created_at=wks.created, + modified_at=wks.modified, + trashed_at=wks.trashed, + trashed_by=wks.trashed_by_primary_gid if wks.trashed else None, + my_access_rights=wks.my_access_rights, + access_rights=wks.access_rights, + ) + + +class WorkspaceCreateBodyParams(InputSchema): + name: str + description: str | None = None + thumbnail: str | None = None + + model_config = ConfigDict(extra="forbid") + + +class WorkspaceReplaceBodyParams(InputSchema): + name: IDStr + description: str | None = None + thumbnail: str | None = None + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/app_diagnostics.py b/packages/models-library/src/models_library/app_diagnostics.py index ca38769699b..dee4dc726cc 100644 --- a/packages/models-library/src/models_library/app_diagnostics.py +++ b/packages/models-library/src/models_library/app_diagnostics.py @@ -1,25 +1,38 @@ -from typing import Any, Optional +from typing import Annotated, Any +from common_library.basic_types import DEFAULT_FACTORY from pydantic import AnyUrl, BaseModel, Field class AppStatusCheck(BaseModel): app_name: str = Field(..., description="Application name") version: str = Field(..., description="Application's version") - services: dict[str, Any] = Field( - default={}, description="Other backend services connected from this service" - ) + services: Annotated[ + dict[str, Any], + Field( + default_factory=dict, + description="Other backend services connected from this service", + json_schema_extra={"default": {}}, + ), + ] = DEFAULT_FACTORY - sessions: Optional[dict[str, Any]] = Field( - default={}, - description="Client sessions info. If single session per app, then is denoted as main", - ) + sessions: Annotated[ + dict[str, Any] | None, + Field( + default_factory=dict, + description="Client sessions info. If single session per app, then is denoted as main", + json_schema_extra={"default": {}}, + ), + ] = DEFAULT_FACTORY - url: Optional[AnyUrl] = Field( + url: AnyUrl | None = Field( default=None, description="Link to current resource", ) - diagnostics_url: Optional[AnyUrl] = Field( - default=None, - description="Link to diagnostics report sub-resource. This MIGHT take some time to compute", - ) + + diagnostics_url: Annotated[ + AnyUrl | None, + Field( + description="Link to diagnostics report sub-resource. This MIGHT take some time to compute", + ), + ] = None diff --git a/packages/models-library/src/models_library/authentification.py b/packages/models-library/src/models_library/authentification.py new file mode 100644 index 00000000000..7125e89a5ac --- /dev/null +++ b/packages/models-library/src/models_library/authentification.py @@ -0,0 +1,9 @@ +from enum import auto + +from .utils.enums import StrAutoEnum + + +class TwoFactorAuthentificationMethod(StrAutoEnum): + SMS = auto() + EMAIL = auto() + DISABLED = auto() diff --git a/packages/models-library/src/models_library/basic_regex.py b/packages/models-library/src/models_library/basic_regex.py index 72561063206..5df967c1128 100644 --- a/packages/models-library/src/models_library/basic_regex.py +++ b/packages/models-library/src/models_library/basic_regex.py @@ -1,9 +1,10 @@ -""" Regular expressions patterns to build pydantic contrained strings +"""Regular expressions patterns to build pydantic contrained strings - - Variants of the patterns with 'Named Groups' captured are suffixed with NG_RE +- Variants of the patterns with 'Named Groups' captured are suffixed with NG_RE - SEE tests_basic_regex.py for examples +SEE tests_basic_regex.py for examples """ + # TODO: for every pattern we should have a formatter function # NOTE: some sites to manualy check ideas # https://regex101.com/ @@ -24,7 +25,7 @@ # python-like version -VERSION_RE = r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$" +SIMPLE_VERSION_RE = r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$" # Semantic version # SEE https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string @@ -45,14 +46,20 @@ ) # Storage basic file ID -SIMCORE_S3_FILE_ID_RE = rf"^(api|({UUID_RE_BASE}))\/({UUID_RE_BASE})\/(.+)$" +SIMCORE_S3_FILE_ID_RE = rf"^(exports\/\d+\/{UUID_RE_BASE}\.zip)|((api|({UUID_RE_BASE}))\/({UUID_RE_BASE})\/(.+)$)" + + +SIMCORE_S3_DIRECTORY_ID_RE = rf"^({UUID_RE_BASE})\/({UUID_RE_BASE})\/(.+)\/$" # S3 - AWS bucket names [https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html] -S3_BUCKET_NAME_RE = r"(?!(^xn--|-s3alias$))^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$" +S3_BUCKET_NAME_RE = re.compile( + r"^(?!xn--)[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$(?(?:(?:(?:[a-zA-Z0-9-]+\.)+[a-zA-Z0-9-]+(?::\d+)?)|[a-zA-Z0-9-]+:\d+))?(?:/)?(?P(?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])(?::(?P[\w][\w.-]{0,126}[\w]))?(?P\@sha256:[a-fA-F0-9]{64})?$" + # NOTE: https://docs.docker.com/engine/reference/commandline/tag/#description + r"^(?:(?P[a-z0-9-]+(?:\.[a-z0-9-]+)+(?::\d+)?|[a-z0-9-]+:\d+)/)?" + r"(?P(?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])" + r"(?::(?P[\w][\w.-]{0,127}))?" + r"(?P\@sha256:[a-fA-F0-9]{32,64})?$" ) + +PROPERTY_KEY_RE = r"^[-_a-zA-Z0-9]+$" # TODO: PC->* it would be advisable to have this "variable friendly" (see VARIABLE_NAME_RE) diff --git a/packages/models-library/src/models_library/basic_types.py b/packages/models-library/src/models_library/basic_types.py index dbb906a5bfa..fe367a04a29 100644 --- a/packages/models-library/src/models_library/basic_types.py +++ b/packages/models-library/src/models_library/basic_types.py @@ -1,68 +1,201 @@ +from decimal import Decimal from enum import Enum +from re import Pattern +from typing import Annotated, ClassVar, Final, TypeAlias -from pydantic import HttpUrl, PositiveInt, conint, constr +import annotated_types +from common_library.basic_types import BootModeEnum, BuildTargetEnum, LogLevel +from pydantic import Field, HttpUrl, PositiveInt, StringConstraints +from pydantic_core import core_schema -from .basic_regex import UUID_RE, VERSION_RE +from .basic_regex import ( + PROPERTY_KEY_RE, + SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS, + SIMPLE_VERSION_RE, + UUID_RE, +) +from .utils.common_validators import trim_string_before + +assert issubclass(LogLevel, Enum) # nosec +assert issubclass(BootModeEnum, Enum) # nosec +assert issubclass(BuildTargetEnum, Enum) # nosec + +__all__: tuple[str, ...] = ( + "BootModeEnum", + "BuildTargetEnum", + "LogLevel", +) + + +NonNegativeDecimal: TypeAlias = Annotated[Decimal, Field(ge=0)] + +PositiveDecimal: TypeAlias = Annotated[Decimal, Field(gt=0)] + +# Used for amounts like credits or dollars +# NOTE: upper limit to avoid https://github.com/ITISFoundation/appmotion-exchange/issues/2 +# NOTE: do not contraint in decimal places. Too strong validation error rather Decimal.quantize +# before passing the value +AmountDecimal: TypeAlias = Annotated[Decimal, Field(gt=0, lt=1e6)] # port number range -PortInt = conint(gt=0, lt=65535) +PortInt: TypeAlias = Annotated[int, Field(gt=0, lt=65535)] + + +# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Registered_ports +RegisteredPortInt: TypeAlias = Annotated[int, Field(gt=1024, lt=65535)] + # e.g. 'v5' -VersionTag = constr(regex=r"^v\d$") +VersionTag: TypeAlias = Annotated[str, StringConstraints(pattern=r"^v\d$")] + +VersionStr: TypeAlias = Annotated[str, StringConstraints(pattern=SIMPLE_VERSION_RE)] -# e.g. '1.23.11' or '2.1.0-rc2' -VersionStr = constr(regex=VERSION_RE) +# e.g. '1.23.11' or '2.1.0-rc2' or not 0.1.0-alpha (see test_SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS) +SemanticVersionStr: TypeAlias = Annotated[ + str, StringConstraints(pattern=SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS) +] # checksums -SHA1Str = constr(regex=r"^[a-fA-F0-9]{40}$") -MD5Str = constr(regex=r"^[a-fA-F0-9]{32}$") +# sha1sum path/to/file +SHA1Str: TypeAlias = Annotated[str, StringConstraints(pattern=r"^[a-fA-F0-9]{40}$")] + +# sha256sum path/to/file +SHA256Str: TypeAlias = Annotated[str, StringConstraints(pattern=r"^[a-fA-F0-9]{64}$")] + +# md5sum path/to/file +MD5Str: TypeAlias = Annotated[str, StringConstraints(pattern=r"^[a-fA-F0-9]{32}$")] # env var -EnvVarKey = constr(regex=r"[a-zA-Z][a-azA-Z0-9_]*") +EnvVarKey: TypeAlias = Annotated[str, StringConstraints(pattern=r"^[a-zA-Z]\w*")] # e.g. '5c833a78-1af3-43a7-9ed7-6a63b188f4d8' -UUIDStr = constr(regex=UUID_RE) +UUIDStr: TypeAlias = Annotated[str, StringConstraints(pattern=UUID_RE)] -# auto-incremented primary-key IDs -IdInt = PrimaryKeyInt = PositiveInt -# https e.g. https://techterms.com/definition/https -class HttpSecureUrl(HttpUrl): - allowed_schemes = {"https"} +SafeQueryStr: TypeAlias = Annotated[ + str, + StringConstraints( + max_length=512, # Reasonable limit for query parameters to avoid overflows + strip_whitespace=True, + ), + annotated_types.doc( + """ + A string that is safe to use in URLs and query parameters. + """, + ), +] -class LogLevel(str, Enum): - DEBUG = "DEBUG" - INFO = "INFO" - WARNING = "WARNING" - ERROR = "ERROR" +# non-empty bounded string used as identifier +# e.g. "123" or "name_123" or "fa327c73-52d8-462a-9267-84eeaf0f90e3" but NOT "" +_ELLIPSIS_CHAR: Final[str] = "..." -class BootModeEnum(str, Enum): +class ConstrainedStr(str): + """Emulates pydantic's v1 constrained types + + DEPRECATED: Use instead Annotated[str, StringConstraints(...)] """ - Values taken by SC_BOOT_MODE environment variable - set in Dockerfile and used during docker/boot.sh + + pattern: str | Pattern[str] | None = None + min_length: int | None = None + max_length: int | None = None + strip_whitespace: bool = False + curtail_length: int | None = None + + @classmethod + def _validate(cls, __input_value: str) -> str: + if cls.curtail_length and len(__input_value) > cls.curtail_length: + __input_value = __input_value[: cls.curtail_length] + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__(cls, _source_type, _handler): + return core_schema.no_info_after_validator_function( + cls._validate, + core_schema.str_schema( + pattern=cls.pattern, + min_length=cls.min_length, + max_length=cls.max_length, + strip_whitespace=cls.strip_whitespace, + ), + ) + + +class IDStr(ConstrainedStr): + """Non-empty bounded string used as identifier + + DEPRECATED: Use instead Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=100)] """ - DEFAULT = "default" - LOCAL = "local-development" - DEBUG = "debug-ptvsd" - PRODUCTION = "production" - DEVELOPMENT = "development" + strip_whitespace = True + min_length = 1 + max_length = 100 + + @staticmethod + def concatenate(*args: "IDStr", link_char: str = " ") -> "IDStr": + result = link_char.join(args).strip() + assert IDStr.min_length # nosec + assert IDStr.max_length # nosec + if len(result) > IDStr.max_length: + if IDStr.max_length > len(_ELLIPSIS_CHAR): + result = ( + result[: IDStr.max_length - len(_ELLIPSIS_CHAR)].rstrip() + + _ELLIPSIS_CHAR + ) + else: + result = _ELLIPSIS_CHAR[0] * IDStr.max_length + if len(result) < IDStr.min_length: + msg = f"IDStr.concatenate: result is too short: {result}" + raise ValueError(msg) + return IDStr(result) + + +_SHORT_TRUNCATED_STR_MAX_LENGTH: Final[int] = 600 +ShortTruncatedStr: TypeAlias = Annotated[ + str, + StringConstraints(strip_whitespace=True), + trim_string_before(max_length=_SHORT_TRUNCATED_STR_MAX_LENGTH), + annotated_types.doc( + """ + A truncated string used to input e.g. titles or display names. + Strips whitespaces and truncate strings that exceed the specified characters limit (curtail_length). + Ensures that the **input** data length to the API is controlled and prevents exceeding large inputs silently, + i.e. without raising errors. + """ + # SEE https://github.com/ITISFoundation/osparc-simcore/pull/5989#discussion_r1650506583 + ), +] + +_LONG_TRUNCATED_STR_MAX_LENGTH: Final[int] = 65536 # same as github description +LongTruncatedStr: TypeAlias = Annotated[ + str, + StringConstraints(strip_whitespace=True), + trim_string_before(max_length=_LONG_TRUNCATED_STR_MAX_LENGTH), + annotated_types.doc( + """ + A truncated string used to input e.g. descriptions or summaries. + Strips whitespaces and truncate strings that exceed the specified characters limit (curtail_length). + Ensures that the **input** data length to the API is controlled and prevents exceeding large inputs silently, + i.e. without raising errors. + """ + ), +] - def is_devel_mode(self) -> bool: - """returns True if this boot mode is used for development""" - return self in (self.DEBUG, self.DEVELOPMENT, self.LOCAL) +# auto-incremented primary-key IDs +IdInt: TypeAlias = PositiveInt +PrimaryKeyInt: TypeAlias = PositiveInt + + +# https e.g. https://techterms.com/definition/https +class HttpSecureUrl(HttpUrl): + allowed_schemes: ClassVar[set[str]] = {"https"} + + +class HttpUrlWithCustomMinLength(HttpUrl): + # Overwriting min length to be back compatible when generating OAS + min_length = 0 -class BuildTargetEnum(str, Enum): - """ - Values taken by SC_BUILD_TARGET environment variable - set in Dockerfile that defines the stage targeted in the - docker image build - """ - BUILD = "build" - CACHE = "cache" - PRODUCTION = "production" - DEVELOPMENT = "development" +KeyIDStr = Annotated[str, StringConstraints(pattern=PROPERTY_KEY_RE)] diff --git a/packages/models-library/src/models_library/boot_options.py b/packages/models-library/src/models_library/boot_options.py index 35ca89ebbab..8b26f70c210 100644 --- a/packages/models-library/src/models_library/boot_options.py +++ b/packages/models-library/src/models_library/boot_options.py @@ -1,7 +1,7 @@ -from typing import Dict - -from pydantic import BaseModel, validator -from typing_extensions import TypedDict +from pydantic import BaseModel, ConfigDict, ValidationInfo, field_validator +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) from .basic_types import EnvVarKey @@ -15,20 +15,19 @@ class BootOption(BaseModel): label: str description: str default: str - items: Dict[str, BootChoice] + items: dict[str, BootChoice] - @validator("items") + @field_validator("items") @classmethod - def ensure_default_included(cls, v, values): - default = values["default"] + def ensure_default_included(cls, v, info: ValidationInfo): + default = info.data["default"] if default not in v: - raise ValueError( - f"Expected default={default} to be present a key of items={v}" - ) + msg = f"Expected default={default} to be present a key of items={v}" + raise ValueError(msg) return v - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "examples": [ { "label": "Boot mode", @@ -62,6 +61,7 @@ class Config: }, ] } + ) -BootOptions = Dict[EnvVarKey, BootOption] +BootOptions = dict[EnvVarKey, BootOption] diff --git a/packages/models-library/src/models_library/bytes_iters.py b/packages/models-library/src/models_library/bytes_iters.py new file mode 100644 index 00000000000..5ec9bb961f3 --- /dev/null +++ b/packages/models-library/src/models_library/bytes_iters.py @@ -0,0 +1,9 @@ +from collections.abc import AsyncIterable, Callable +from typing import TypeAlias + +from pydantic import ByteSize + +BytesIter: TypeAlias = AsyncIterable[bytes] + +BytesIterCallable: TypeAlias = Callable[[], BytesIter] +DataSize: TypeAlias = ByteSize diff --git a/packages/models-library/src/models_library/callbacks_mapping.py b/packages/models-library/src/models_library/callbacks_mapping.py new file mode 100644 index 00000000000..475ff3c823a --- /dev/null +++ b/packages/models-library/src/models_library/callbacks_mapping.py @@ -0,0 +1,97 @@ +from collections.abc import Sequence +from typing import Annotated, Final + +from pydantic import BaseModel, ConfigDict, Field, NonNegativeFloat, field_validator + +INACTIVITY_TIMEOUT_CAP: Final[NonNegativeFloat] = 5 +TIMEOUT_MIN: Final[NonNegativeFloat] = 1 + + +class UserServiceCommand(BaseModel): + service: str = Field( + ..., description="name of the docker-compose service in the docker-compose spec" + ) + command: str | Sequence[str] = Field(..., description="command to run in container") + timeout: NonNegativeFloat = Field( + ..., description="after this interval the command will be timed-out" + ) + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "examples": [ + {"service": "rt-web", "command": "ls", "timeout": 1}, + {"service": "s4l-core", "command": ["ls", "-lah"], "timeout": 1}, + ] + }, + ) + + +class CallbacksMapping(BaseModel): + metrics: UserServiceCommand | None = Field( + None, + description="command to recover prometheus metrics from a specific user service", + ) + before_shutdown: Annotated[ + list[UserServiceCommand], + Field( + default_factory=list, + description=( + "commands to run before shutting down the user services" + "commands get executed first to last, multiple commands for the same" + "user services are allowed" + ), + ), + ] + inactivity: UserServiceCommand | None = Field( + None, + description=( + "command used to figure out for how much time the " + "user service(s) were inactive for" + ), + ) + + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "examples": [ + { + # empty validates + }, + { + "metrics": None, + "before_shutdown": [], + }, + {"metrics": UserServiceCommand.model_config["json_schema_extra"]["examples"][0]}, # type: ignore [index] + { + "metrics": UserServiceCommand.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "before_shutdown": [ + UserServiceCommand.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + UserServiceCommand.model_config["json_schema_extra"]["examples"][1], # type: ignore [index] + ], + }, + { + "metrics": UserServiceCommand.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "before_shutdown": [ + UserServiceCommand.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + UserServiceCommand.model_config["json_schema_extra"]["examples"][1], # type: ignore [index] + ], + "inactivity": UserServiceCommand.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + }, + ] + }, + ) + + @field_validator("inactivity") + @classmethod + def ensure_inactivity_timeout_is_capped( + cls, v: UserServiceCommand + ) -> UserServiceCommand: + if v is not None and ( + v.timeout < TIMEOUT_MIN or v.timeout > INACTIVITY_TIMEOUT_CAP + ): + msg = ( + f"Constraint not respected for inactivity timeout={v.timeout}: " + f"interval=({TIMEOUT_MIN}, {INACTIVITY_TIMEOUT_CAP})" + ) + raise ValueError(msg) + return v diff --git a/packages/models-library/src/models_library/clusters.py b/packages/models-library/src/models_library/clusters.py index c4387161de0..14f3b77b390 100644 --- a/packages/models-library/src/models_library/clusters.py +++ b/packages/models-library/src/models_library/clusters.py @@ -1,197 +1,117 @@ -from typing import Dict, Final, Literal, Optional, Union +from enum import auto +from pathlib import Path +from typing import Literal, TypeAlias -from pydantic import AnyUrl, BaseModel, Extra, Field, HttpUrl, SecretStr, root_validator +from pydantic import AnyUrl, BaseModel, ConfigDict, Field, HttpUrl, field_validator +from pydantic.config import JsonDict from pydantic.types import NonNegativeInt -from simcore_postgres_database.models.clusters import ClusterType -from .users import GroupID +from .groups import GroupID +from .utils.common_validators import create_enums_pre_validator +from .utils.enums import StrAutoEnum -class ClusterAccessRights(BaseModel): - read: bool = Field(..., description="allows to run pipelines on that cluster") - write: bool = Field(..., description="allows to modify the cluster") - delete: bool = Field(..., description="allows to delete a cluster") +class ClusterTypeInModel(StrAutoEnum): + # This enum contains more types than its equivalent to `simcore_postgres_database.models.clusters.ClusterType` + # SEE models-library/tests/test__pydantic_models_and_enums.py + AWS = auto() + ON_PREMISE = auto() + ON_DEMAND = auto() - class Config: - extra = Extra.forbid +class _AuthenticationBase(BaseModel): + type: str -CLUSTER_ADMIN_RIGHTS = ClusterAccessRights(read=True, write=True, delete=True) -CLUSTER_MANAGER_RIGHTS = ClusterAccessRights(read=True, write=True, delete=False) -CLUSTER_USER_RIGHTS = ClusterAccessRights(read=True, write=False, delete=False) -CLUSTER_NO_RIGHTS = ClusterAccessRights(read=False, write=False, delete=False) - + model_config = ConfigDict(frozen=True, extra="forbid") -class BaseAuthentication(BaseModel): - type: str - class Config: - extra = Extra.forbid - - -class SimpleAuthentication(BaseAuthentication): - type: Literal["simple"] = "simple" - username: str - password: SecretStr - - class Config(BaseAuthentication.Config): - schema_extra = { - "examples": [ - { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - ] - } - - -class KerberosAuthentication(BaseAuthentication): - type: Literal["kerberos"] = "kerberos" - # NOTE: the entries here still need to be defined - class Config(BaseAuthentication.Config): - schema_extra = { - "examples": [ - { - "type": "kerberos", - }, - ] - } - - -class JupyterHubTokenAuthentication(BaseAuthentication): - type: Literal["jupyterhub"] = "jupyterhub" - api_token: str - - class Config(BaseAuthentication.Config): - schema_extra = { - "examples": [ - {"type": "jupyterhub", "api_token": "some_jupyterhub_token"}, - ] - } - - -class NoAuthentication(BaseAuthentication): +class NoAuthentication(_AuthenticationBase): type: Literal["none"] = "none" + model_config = ConfigDict(json_schema_extra={"examples": [{"type": "none"}]}) + + +class TLSAuthentication(_AuthenticationBase): + type: Literal["tls"] = "tls" + tls_ca_file: Path + tls_client_cert: Path + tls_client_key: Path + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "type": "tls", + "tls_ca_file": "/path/to/ca_file", + "tls_client_cert": "/path/to/cert_file", + "tls_client_key": "/path/to/key_file", + }, + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) -InternalClusterAuthentication = NoAuthentication -ExternalClusterAuthentication = Union[ - SimpleAuthentication, KerberosAuthentication, JupyterHubTokenAuthentication -] -ClusterAuthentication = Union[ - ExternalClusterAuthentication, - InternalClusterAuthentication, -] + +ClusterAuthentication: TypeAlias = NoAuthentication | TLSAuthentication class BaseCluster(BaseModel): name: str = Field(..., description="The human readable name of the cluster") - description: Optional[str] = None - type: ClusterType + type: ClusterTypeInModel owner: GroupID - thumbnail: Optional[HttpUrl] = Field( - None, + thumbnail: HttpUrl | None = Field( + default=None, description="url to the image describing this cluster", examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"], + validate_default=True, ) endpoint: AnyUrl authentication: ClusterAuthentication = Field( - ..., description="Dask gateway authentication" + ..., description="Dask gateway authentication", discriminator="type" + ) + _from_equivalent_enums = field_validator("type", mode="before")( + create_enums_pre_validator(ClusterTypeInModel) ) - access_rights: Dict[GroupID, ClusterAccessRights] = Field(default_factory=dict) - - class Config: - extra = Extra.forbid - use_enum_values = True - -ClusterID = NonNegativeInt -DEFAULT_CLUSTER_ID: Final[NonNegativeInt] = 0 + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "name": "My awesome cluster", + "type": ClusterTypeInModel.ON_PREMISE, + "owner": 12, + "endpoint": "https://registry.osparc-development.fake.dev", + "authentication": { + "type": "tls", + "tls_ca_file": "/path/to/ca_file", + "tls_client_cert": "/path/to/cert_file", + "tls_client_key": "/path/to/key_file", + }, + }, + { + "name": "My AWS cluster", + "type": ClusterTypeInModel.AWS, + "owner": 154, + "endpoint": "https://registry.osparc-development.fake.dev", + "authentication": { + "type": "tls", + "tls_ca_file": "/path/to/ca_file", + "tls_client_cert": "/path/to/cert_file", + "tls_client_key": "/path/to/key_file", + }, + }, + ] + } + ) + model_config = ConfigDict( + use_enum_values=True, json_schema_extra=_update_json_schema_extra + ) -class Cluster(BaseCluster): - id: ClusterID = Field(..., description="The cluster ID") - class Config(BaseCluster.Config): - schema_extra = { - "examples": [ - { - "id": DEFAULT_CLUSTER_ID, - "name": "The default cluster", - "type": ClusterType.ON_PREMISE, - "owner": 1456, - "endpoint": "tcp://default-dask-scheduler:8786", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "id": 432, - "name": "My awesome cluster", - "type": ClusterType.ON_PREMISE, - "owner": 12, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "id": 432546, - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": ClusterType.AWS, - "owner": 154, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": {"type": "kerberos"}, - "access_rights": { - 154: CLUSTER_ADMIN_RIGHTS, - 12: CLUSTER_MANAGER_RIGHTS, - 7899: CLUSTER_USER_RIGHTS, - }, - }, - { - "id": 325436, - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": ClusterType.AWS, - "owner": 2321, - "endpoint": "https://registry.osparc-development.fake2.dev", - "authentication": { - "type": "jupyterhub", - "api_token": "some_fake_token", - }, - "access_rights": { - 154: CLUSTER_ADMIN_RIGHTS, - 12: CLUSTER_MANAGER_RIGHTS, - 7899: CLUSTER_USER_RIGHTS, - }, - }, - ] - } - - @root_validator(pre=True) - @classmethod - def check_owner_has_access_rights(cls, values): - is_default_cluster = bool(values["id"] == DEFAULT_CLUSTER_ID) - owner_gid = values["owner"] - - # check owner is in the access rights, if not add it - access_rights = values.get("access_rights", values.get("accessRights", {})) - if owner_gid not in access_rights: - access_rights[owner_gid] = ( - CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS - ) - # check owner has the expected access - if access_rights[owner_gid] != ( - CLUSTER_USER_RIGHTS if is_default_cluster else CLUSTER_ADMIN_RIGHTS - ): - raise ValueError( - f"the cluster owner access rights are incorrectly set: {access_rights[owner_gid]}" - ) - values["access_rights"] = access_rights - return values +ClusterID: TypeAlias = NonNegativeInt diff --git a/packages/models-library/src/models_library/computations.py b/packages/models-library/src/models_library/computations.py new file mode 100644 index 00000000000..6b88aff83ad --- /dev/null +++ b/packages/models-library/src/models_library/computations.py @@ -0,0 +1,38 @@ +from datetime import datetime +from decimal import Decimal +from typing import Any + +from pydantic import AnyUrl, BaseModel + +from .projects import ProjectID +from .projects_nodes_io import NodeID +from .projects_state import RunningState + + +class ComputationTaskWithAttributes(BaseModel): + project_uuid: ProjectID + node_id: NodeID + state: RunningState + progress: float + image: dict[str, Any] + started_at: datetime | None + ended_at: datetime | None + log_download_link: AnyUrl | None + + # Attributes added by the webserver + node_name: str + osparc_credits: Decimal | None + + +class ComputationRunWithAttributes(BaseModel): + project_uuid: ProjectID + iteration: int + state: RunningState + info: dict[str, Any] + submitted_at: datetime + started_at: datetime | None + ended_at: datetime | None + + # Attributes added by the webserver + root_project_name: str + project_custom_metadata: dict[str, Any] diff --git a/packages/models-library/src/models_library/conversations.py b/packages/models-library/src/models_library/conversations.py new file mode 100644 index 00000000000..5d33a0fcd45 --- /dev/null +++ b/packages/models-library/src/models_library/conversations.py @@ -0,0 +1,70 @@ +from datetime import datetime +from enum import auto +from typing import TypeAlias +from uuid import UUID + +from models_library.groups import GroupID +from models_library.projects import ProjectID +from pydantic import BaseModel, ConfigDict + +from .products import ProductName +from .utils.enums import StrAutoEnum + +ConversationID: TypeAlias = UUID +ConversationMessageID: TypeAlias = UUID + + +class ConversationType(StrAutoEnum): + PROJECT_STATIC = auto() # Static conversation for the project + PROJECT_ANNOTATION = ( + auto() # Something like sticky note, can be located anywhere in the pipeline UI + ) + + +class ConversationMessageType(StrAutoEnum): + MESSAGE = auto() + NOTIFICATION = ( + auto() # Special type of message used for storing notifications in the conversation + ) + + +# +# DB +# + + +class ConversationGetDB(BaseModel): + conversation_id: ConversationID + product_name: ProductName + name: str + project_uuid: ProjectID | None + user_group_id: GroupID + type: ConversationType + + # states + created: datetime + modified: datetime + + model_config = ConfigDict(from_attributes=True) + + +class ConversationMessageGetDB(BaseModel): + message_id: ConversationMessageID + conversation_id: ConversationID + user_group_id: GroupID + content: str + type: ConversationMessageType + + # states + created: datetime + modified: datetime + + model_config = ConfigDict(from_attributes=True) + + +class ConversationPatchDB(BaseModel): + name: str | None = None + + +class ConversationMessagePatchDB(BaseModel): + content: str | None = None diff --git a/packages/models-library/src/models_library/docker.py b/packages/models-library/src/models_library/docker.py index dfc94fbe90c..ae23ba7eec4 100644 --- a/packages/models-library/src/models_library/docker.py +++ b/packages/models-library/src/models_library/docker.py @@ -1,70 +1,239 @@ +import contextlib import re -import warnings -from typing import Any, Optional +from typing import Annotated, Any, Final, TypeAlias -from models_library.generated_models.docker_rest_api import Task -from models_library.products import ProductName -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.users import UserID -from pydantic import BaseModel, ConstrainedStr, Field, root_validator +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + StringConstraints, + TypeAdapter, + ValidationError, + model_validator, +) from .basic_regex import DOCKER_GENERIC_TAG_KEY_RE, DOCKER_LABEL_KEY_REGEX +from .basic_types import ConstrainedStr +from .generated_models.docker_rest_api import Task +from .products import ProductName +from .projects import ProjectID +from .projects_nodes_io import NodeID +from .users import UserID class DockerLabelKey(ConstrainedStr): # NOTE: https://docs.docker.com/config/labels-custom-metadata/#key-format-recommendations # good practice: use reverse DNS notation - regex: Optional[re.Pattern[str]] = DOCKER_LABEL_KEY_REGEX + pattern = DOCKER_LABEL_KEY_REGEX + + @classmethod + def from_key(cls, key: str) -> "DockerLabelKey": + return cls(key.lower().replace("_", "-")) + + +# NOTE: https://docs.docker.com/engine/reference/commandline/tag/#description +DockerGenericTag: TypeAlias = Annotated[ + str, StringConstraints(pattern=DOCKER_GENERIC_TAG_KEY_RE) +] + +DockerPlacementConstraint: TypeAlias = Annotated[ + str, + StringConstraints( + strip_whitespace=True, + pattern=re.compile( + r"^(?!-)(?![.])(?!.*--)(?!.*[.][.])[a-zA-Z0-9.-]*(? DockerLabelKey: + return DockerLabelKey( + f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}{key.replace('_', '-').lower()}" + ) -class SimcoreServiceDockerLabelKeys(BaseModel): - # NOTE: in a next PR, this should be moved to packages models-library and used - # all over, and aliases should use io.simcore.service.* - # https://github.com/ITISFoundation/osparc-simcore/issues/3638 +class StandardSimcoreDockerLabels(BaseModel): + """ + Represents the standard label on oSparc created containers (not yet services) + In order to create this object in code, please use model_construct() method! + """ - user_id: UserID = Field(..., alias="user_id") - project_id: ProjectID = Field(..., alias="study_id") - node_id: NodeID = Field(..., alias="uuid") + user_id: UserID = Field(..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}user-id") # type: ignore[literal-required] + project_id: ProjectID = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}project-id" + ) + node_id: NodeID = Field(..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}node-id") # type: ignore[literal-required] - product_name: ProductName - simcore_user_agent: str + product_name: ProductName = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}product-name" + ) + simcore_user_agent: str = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}simcore-user-agent" + ) - @root_validator(pre=True) + swarm_stack_name: str = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}swarm-stack-name" + ) + + memory_limit: ByteSize = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}memory-limit" + ) + cpu_limit: float = Field( # type: ignore[literal-required] + ..., alias=f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}cpu-limit" + ) + + @model_validator(mode="before") @classmethod - def ensure_defaults(cls, values: dict[str, Any]) -> dict[str, Any]: - warnings.warn( - ( - "Once https://github.com/ITISFoundation/osparc-simcore/pull/3990 " - "reaches production this entire root_validator function " - "can be safely removed. Please check " - "https://github.com/ITISFoundation/osparc-simcore/issues/3996" - ), - DeprecationWarning, - stacklevel=2, - ) - if values.get("product_name", None) is None: - values["product_name"] = "opsarc" - if values.get("simcore_user_agent", None) is None: - values["simcore_user_agent"] = "" + def _backwards_compatibility(cls, values: dict[str, Any]) -> dict[str, Any]: + # NOTE: this is necessary for dy-sidecar and legacy service until they are adjusted + if mapped_values := { + _BACKWARDS_COMPATIBILITY_SIMCORE_RUNTIME_DOCKER_LABELS_MAP[k]: v + for k, v in values.items() + if k in _BACKWARDS_COMPATIBILITY_SIMCORE_RUNTIME_DOCKER_LABELS_MAP + }: + # these values were sometimes omitted, so let's provide some defaults + for key in ["product-name", "simcore-user-agent", "swarm-stack-name"]: + mapped_values.setdefault( + f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}{key}", + _UNDEFINED_LABEL_VALUE_STR, + ) + + mapped_values.setdefault( + f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}memory-limit", + values.get("memory_limit", _UNDEFINED_LABEL_VALUE_INT), + ) + + def _convert_nano_cpus_to_cpus(nano_cpu: str) -> str: + with contextlib.suppress(ValidationError): + return f"{TypeAdapter(float).validate_python(nano_cpu) / (1.0 * 10**9):.2f}" + return _UNDEFINED_LABEL_VALUE_INT + + mapped_values.setdefault( + f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}cpu-limit", + values.get( + "cpu_limit", + _convert_nano_cpus_to_cpus( + values.get( + "nano_cpus_limit", + _UNDEFINED_LABEL_VALUE_INT, + ) + ), + ), + ) + return mapped_values return values - def to_docker_labels(self) -> dict[str, str]: + def to_simcore_runtime_docker_labels(self) -> dict[DockerLabelKey, str]: """returns a dictionary of strings as required by docker""" - std_export = self.dict(by_alias=True) - return {k: f"{v}" for k, v in sorted(std_export.items())} + return { + to_simcore_runtime_docker_label_key(k): f"{v}" + for k, v in sorted(self.model_dump().items()) + } @classmethod - def from_docker_task(cls, docker_task: Task) -> "SimcoreServiceDockerLabelKeys": - assert docker_task.Spec # nosec - assert docker_task.Spec.ContainerSpec # nosec - task_labels = docker_task.Spec.ContainerSpec.Labels or {} - return cls.parse_obj(task_labels) - - class Config: - allow_population_by_field_name = True + def from_docker_task(cls, docker_task: Task) -> "StandardSimcoreDockerLabels": + assert docker_task.spec # nosec + assert docker_task.spec.container_spec # nosec + task_labels = docker_task.spec.container_spec.labels or {} + return cls.model_validate(task_labels) + + model_config = ConfigDict( + populate_by_name=True, + json_schema_extra={ + "examples": [ + # legacy service labels + { + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "swarm_stack_name": "devel-simcore", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # legacy container labels + { + "mem_limit": "1073741824", + "nano_cpus_limit": "4000000000", + "node_id": "1f963626-66e1-43f1-a777-33955c08b909", + "simcore_user_agent": "puppeteer", + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "swarm_stack_name": "devel-simcore", + "user_id": "5", + }, + # dy-sidecar service labels + { + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "swarm_stack_name": "devel-simcore", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # dy-sidecar container labels + { + "mem_limit": "1073741824", + "nano_cpus_limit": "4000000000", + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # dy-proxy service labels + { + "dynamic-type": "dynamic-sidecar", + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "swarm_stack_name": "devel-simcore", + "type": "dependency-v2", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # dy-proxy container labels + { + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # dy-sidecar user-services labels + { + "product_name": "osparc", + "simcore_user_agent": "puppeteer", + "study_id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "user_id": "5", + "uuid": "1f963626-66e1-43f1-a777-33955c08b909", + }, + # modern both dynamic-sidecar services and computational services + { + "io.simcore.runtime.cpu-limit": "2.4", + "io.simcore.runtime.memory-limit": "1073741824", + "io.simcore.runtime.node-id": "1f963626-66e1-43f1-a777-33955c08b909", + "io.simcore.runtime.product-name": "osparc", + "io.simcore.runtime.project-id": "29f393fc-1410-47b3-b4b9-61dfce21a2a6", + "io.simcore.runtime.simcore-user-agent": "puppeteer", + "io.simcore.runtime.swarm-stack-name": "devel-osparc", + "io.simcore.runtime.user-id": "5", + }, + ] + }, + ) + + +DockerNodeID: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, pattern=re.compile(r"[a-zA-Z0-9]")) +] diff --git a/packages/models-library/src/models_library/emails.py b/packages/models-library/src/models_library/emails.py index 80996eed76f..72835f4c754 100644 --- a/packages/models-library/src/models_library/emails.py +++ b/packages/models-library/src/models_library/emails.py @@ -1,7 +1,5 @@ -from pydantic import EmailStr +from typing import Annotated, TypeAlias +from pydantic import AfterValidator, EmailStr -class LowerCaseEmailStr(EmailStr): - @classmethod - def validate(cls, value: str) -> str: - return super().validate(value).lower() +LowerCaseEmailStr: TypeAlias = Annotated[str, EmailStr, AfterValidator(str.lower)] diff --git a/packages/models-library/src/models_library/errors.py b/packages/models-library/src/models_library/errors.py index 505ad1dc316..d1498f63474 100644 --- a/packages/models-library/src/models_library/errors.py +++ b/packages/models-library/src/models_library/errors.py @@ -1,6 +1,10 @@ -from typing import Any, TypedDict, Union +from typing import Any -Loc = tuple[Union[int, str], ...] +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + +Loc = tuple[int | str, ...] class _ErrorDictRequired(TypedDict): @@ -30,6 +34,12 @@ class ErrorDict(_ErrorDictRequired, total=False): ctx: dict[str, Any] +RABBITMQ_CLIENT_UNHEALTHY_MSG = "RabbitMQ cannot be reached!" +POSRGRES_DATABASE_UNHEALTHY_MSG = "Postgres cannot be reached!" +REDIS_CLIENT_UNHEALTHY_MSG = "Redis cannot be reached!" +DOCKER_API_PROXY_UNHEALTHY_MSG = "docker-api-proxy cannot be reached!" + + # NOTE: Here we do not just import as 'from pydantic.error_wrappers import ErrorDict' # because that only works if TYPE_CHECKING=True. __all__ = ("ErrorDict",) diff --git a/packages/models-library/src/models_library/folders.py b/packages/models-library/src/models_library/folders.py new file mode 100644 index 00000000000..0333b463b34 --- /dev/null +++ b/packages/models-library/src/models_library/folders.py @@ -0,0 +1,66 @@ +from datetime import datetime +from enum import auto +from typing import NamedTuple, TypeAlias + +from pydantic import BaseModel, ConfigDict, PositiveInt, ValidationInfo, field_validator + +from .access_rights import AccessRights +from .groups import GroupID +from .users import UserID +from .utils.enums import StrAutoEnum +from .workspaces import WorkspaceID + +FolderID: TypeAlias = PositiveInt + + +class FolderScope(StrAutoEnum): + ROOT = auto() + SPECIFIC = auto() + ALL = auto() + + +class FolderQuery(BaseModel): + folder_scope: FolderScope + folder_id: PositiveInt | None = None + + @field_validator("folder_id", mode="before") + @classmethod + def validate_folder_id(cls, value, info: ValidationInfo): + scope = info.data.get("folder_scope") + if scope == FolderScope.SPECIFIC and value is None: + msg = "folder_id must be provided when folder_scope is SPECIFIC." + raise ValueError(msg) + if scope != FolderScope.SPECIFIC and value is not None: + msg = "folder_id should be None when folder_scope is not SPECIFIC." + raise ValueError(msg) + return value + + +class FolderDB(BaseModel): + folder_id: FolderID + name: str + parent_folder_id: FolderID | None + + created_by_gid: GroupID + created: datetime + modified: datetime + + trashed: datetime | None + trashed_by: UserID | None + trashed_explicitly: bool + + user_id: UserID | None # owner? + workspace_id: WorkspaceID | None + model_config = ConfigDict(from_attributes=True) + + +class UserFolder(FolderDB): + my_access_rights: AccessRights + + model_config = ConfigDict(from_attributes=True) + + +class FolderTuple(NamedTuple): + folder_db: FolderDB + trashed_by_primary_gid: GroupID | None + my_access_rights: AccessRights diff --git a/packages/models-library/src/models_library/function_services_catalog/_key_labels.py b/packages/models-library/src/models_library/function_services_catalog/_key_labels.py index 408515bacea..599b2998d2e 100644 --- a/packages/models-library/src/models_library/function_services_catalog/_key_labels.py +++ b/packages/models-library/src/models_library/function_services_catalog/_key_labels.py @@ -1,10 +1,11 @@ from typing import Final from ..services import ServiceKey +from ..services_constants import FRONTEND_SERVICE_KEY_PREFIX # NOTE: due to legacy reasons, the name remains with 'frontend' in it but # it now refers to a more general group: function sections that contains front-end services as well -FUNCTION_SERVICE_KEY_PREFIX: Final[str] = "simcore/services/frontend" +FUNCTION_SERVICE_KEY_PREFIX: Final[str] = FRONTEND_SERVICE_KEY_PREFIX def is_function_service(service_key: ServiceKey) -> bool: diff --git a/packages/models-library/src/models_library/function_services_catalog/_registry.py b/packages/models-library/src/models_library/function_services_catalog/_registry.py index ad1d5635308..dc006e9fb43 100644 --- a/packages/models-library/src/models_library/function_services_catalog/_registry.py +++ b/packages/models-library/src/models_library/function_services_catalog/_registry.py @@ -20,7 +20,7 @@ probes, ) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) catalog = FunctionServices(settings=FunctionServiceSettings()) diff --git a/packages/models-library/src/models_library/function_services_catalog/_settings.py b/packages/models-library/src/models_library/function_services_catalog/_settings.py index 3ca4260d8ce..b55fc09b9d2 100644 --- a/packages/models-library/src/models_library/function_services_catalog/_settings.py +++ b/packages/models-library/src/models_library/function_services_catalog/_settings.py @@ -1,11 +1,12 @@ import json import os -from pydantic import BaseSettings +from common_library.json_serialization import json_loads +from pydantic_settings import BaseSettings # Expects env var: FUNCTION_SERVICES_AUTHORS='{"OM":{"name": ...}, "EN":{...} }' try: - AUTHORS = json.loads(os.environ.get("FUNCTION_SERVICES_AUTHORS", "{}")) + AUTHORS = json_loads(os.environ.get("FUNCTION_SERVICES_AUTHORS", "{}")) except json.decoder.JSONDecodeError: AUTHORS = {} diff --git a/packages/models-library/src/models_library/function_services_catalog/_utils.py b/packages/models-library/src/models_library/function_services_catalog/_utils.py index 26eaa13e6c8..a58a524d094 100644 --- a/packages/models-library/src/models_library/function_services_catalog/_utils.py +++ b/packages/models-library/src/models_library/function_services_catalog/_utils.py @@ -1,12 +1,12 @@ import logging +from collections.abc import Callable, Iterator from dataclasses import dataclass -from typing import Callable, Dict, Iterator, Optional, Tuple from urllib.parse import quote -from ..services import Author, ServiceDockerData, ServiceKey, ServiceVersion +from ..services import Author, ServiceKey, ServiceMetaDataPublished, ServiceVersion from ._settings import AUTHORS, FunctionServiceSettings -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) _DEFAULT = { @@ -14,9 +14,10 @@ "email": "unknown@osparc.io", "affiliation": "unknown", } -EN = Author.parse_obj(AUTHORS.get("EN", _DEFAULT)) -OM = Author.parse_obj(AUTHORS.get("OM", _DEFAULT)) -PC = Author.parse_obj(AUTHORS.get("PC", _DEFAULT)) +EN = Author.model_validate(AUTHORS.get("EN", _DEFAULT)) +OM = Author.model_validate(AUTHORS.get("OM", _DEFAULT)) +PC = Author.model_validate(AUTHORS.get("PC", _DEFAULT)) +WVG = Author.model_validate(AUTHORS.get("WVG", _DEFAULT)) def create_fake_thumbnail_url(label: str) -> str: @@ -29,33 +30,35 @@ class ServiceNotFound(KeyError): @dataclass class _Record: - meta: ServiceDockerData - implementation: Optional[Callable] = None + meta: ServiceMetaDataPublished + implementation: Callable | None = None is_under_development: bool = False class FunctionServices: """Used to register a collection of function services""" - def __init__(self, settings: Optional[FunctionServiceSettings] = None): - self._functions: Dict[Tuple[ServiceKey, ServiceVersion], _Record] = {} + def __init__(self, settings: FunctionServiceSettings | None = None): + self._functions: dict[tuple[ServiceKey, ServiceVersion], _Record] = {} self.settings = settings def add( self, - meta: ServiceDockerData, - implementation: Optional[Callable] = None, + meta: ServiceMetaDataPublished, + implementation: Callable | None = None, is_under_development: bool = False, ): """ raises ValueError """ - if not isinstance(meta, ServiceDockerData): - raise ValueError(f"Expected ServiceDockerData, got {type(meta)}") + if not isinstance(meta, ServiceMetaDataPublished): + msg = f"Expected ServiceDockerData, got {type(meta)}" + raise ValueError(msg) # ensure unique if (meta.key, meta.version) in self._functions: - raise ValueError(f"{(meta.key, meta.version)} is already registered") + msg = f"{meta.key, meta.version} is already registered" + raise ValueError(msg) # TODO: ensure callable signature fits metadata @@ -77,19 +80,23 @@ def _skip_dev(self): skip = not self.settings.is_dev_feature_enabled() return skip - def _items(self) -> Iterator[Tuple[Tuple[ServiceKey, ServiceVersion], _Record]]: + def _items( + self, + ) -> Iterator[tuple[tuple[ServiceKey, ServiceVersion], _Record]]: skip_dev = self._skip_dev() for key, value in self._functions.items(): if value.is_under_development and skip_dev: continue yield key, value - def iter_metadata(self) -> Iterator[ServiceDockerData]: + def iter_metadata(self) -> Iterator[ServiceMetaDataPublished]: """WARNING: this function might skip services marked as 'under development'""" for _, f in self._items(): yield f.meta - def iter_services_key_version(self) -> Iterator[Tuple[ServiceKey, ServiceVersion]]: + def iter_services_key_version( + self, + ) -> Iterator[tuple[ServiceKey, ServiceVersion]]: """WARNING: this function might skip services makred as 'under development'""" for kv, f in self._items(): assert kv == (f.meta.key, f.meta.version) # nosec @@ -97,26 +104,24 @@ def iter_services_key_version(self) -> Iterator[Tuple[ServiceKey, ServiceVersion def get_implementation( self, service_key: ServiceKey, service_version: ServiceVersion - ) -> Optional[Callable]: + ) -> Callable | None: """raises ServiceNotFound""" try: func = self._functions[(service_key, service_version)] except KeyError as err: - raise ServiceNotFound( - f"{service_key}:{service_version} not found in registry" - ) from err + msg = f"{service_key}:{service_version} not found in registry" + raise ServiceNotFound(msg) from err return func.implementation def get_metadata( self, service_key: ServiceKey, service_version: ServiceVersion - ) -> ServiceDockerData: + ) -> ServiceMetaDataPublished: """raises ServiceNotFound""" try: func = self._functions[(service_key, service_version)] except KeyError as err: - raise ServiceNotFound( - f"{service_key}:{service_version} not found in registry" - ) from err + msg = f"{service_key}:{service_version} not found in registry" + raise ServiceNotFound(msg) from err return func.meta def __len__(self): diff --git a/packages/models-library/src/models_library/function_services_catalog/api.py b/packages/models-library/src/models_library/function_services_catalog/api.py index ca75107d587..0b99e4d6682 100644 --- a/packages/models-library/src/models_library/function_services_catalog/api.py +++ b/packages/models-library/src/models_library/function_services_catalog/api.py @@ -1,3 +1,4 @@ +# mypy: disable-error-code=truthy-function """ Factory to build catalog of i/o metadata for functions implemented in the front-end @@ -6,27 +7,33 @@ director2->catalog, it was decided to share as a library """ -from typing import Iterator, Tuple +from collections.abc import Iterator -from ..services import ServiceDockerData +from ..services import ServiceMetaDataPublished from ._key_labels import is_function_service, is_iterator_service from ._registry import catalog +from .services.parameters import is_parameter_service +from .services.probes import is_probe_service assert catalog # nosec assert is_iterator_service # nosec +assert is_parameter_service # nosec +assert is_probe_service # nosec -def iter_service_docker_data() -> Iterator[ServiceDockerData]: +def iter_service_docker_data() -> Iterator[ServiceMetaDataPublished]: for meta_obj in catalog.iter_metadata(): # NOTE: the originals are this way not modified from outside - copied_meta_obj = meta_obj.copy(deep=True) + copied_meta_obj = meta_obj.model_copy(deep=True) assert is_function_service(copied_meta_obj.key) # nosec yield copied_meta_obj -__all__: Tuple[str, ...] = ( +__all__: tuple[str, ...] = ( "catalog", "is_function_service", "is_iterator_service", + "is_parameter_service", + "is_probe_service", "iter_service_docker_data", ) diff --git a/packages/models-library/src/models_library/function_services_catalog/services/demo_units.py b/packages/models-library/src/models_library/function_services_catalog/services/demo_units.py index 3e739d14b6e..44bd30e0899 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/demo_units.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/demo_units.py @@ -1,10 +1,10 @@ from ...services import ( - LATEST_INTEGRATION_VERSION, - ServiceDockerData, ServiceInput, + ServiceMetaDataPublished, ServiceOutput, ServiceType, ) +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import OM, PC, FunctionServices, create_fake_thumbnail_url @@ -15,7 +15,7 @@ # If this assumption cannot be guaranteed anymore the test must be updated. # -META = ServiceDockerData.parse_obj( +META = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/data-iterator/demo-units", diff --git a/packages/models-library/src/models_library/function_services_catalog/services/file_picker.py b/packages/models-library/src/models_library/function_services_catalog/services/file_picker.py index 8062b36fa56..2245a8ba3ff 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/file_picker.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/file_picker.py @@ -1,10 +1,14 @@ from typing import Final -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...services import ( + LATEST_INTEGRATION_VERSION, + ServiceMetaDataPublished, + ServiceType, +) from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import OM, FunctionServices -META: Final = ServiceDockerData.parse_obj( +META: Final = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/file-picker", diff --git a/packages/models-library/src/models_library/function_services_catalog/services/iter_range.py b/packages/models-library/src/models_library/function_services_catalog/services/iter_range.py index a64d92d29eb..d59e37735e8 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/iter_range.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/iter_range.py @@ -1,15 +1,18 @@ -from typing import Iterator, Optional +from collections.abc import Iterator -from ...projects_nodes import OutputsDict -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...projects_nodes import OutputID, OutputsDict +from ...services import ServiceMetaDataPublished, ServiceType +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import OM, FunctionServices, create_fake_thumbnail_url -def create_metadata(type_name: str, prefix: Optional[str] = None) -> ServiceDockerData: +def create_metadata( + type_name: str, prefix: str | None = None +) -> ServiceMetaDataPublished: prefix = prefix or type_name LABEL = f"{type_name.capitalize()} iterator" - return ServiceDockerData.parse_obj( + return ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/data-iterator/{prefix}-range", @@ -56,15 +59,14 @@ def create_metadata(type_name: str, prefix: Optional[str] = None) -> ServiceDock def _linspace_func( linspace_start: int = 0, linspace_stop: int = 1, linspace_step: int = 1 ) -> Iterator[int]: - for value in range(linspace_start, linspace_stop, linspace_step): - yield value + yield from range(linspace_start, linspace_stop, linspace_step) def _linspace_generator(**kwargs) -> Iterator[OutputsDict]: # Maps generator with iterable outputs. # Can have non-iterable outputs as well for value in _linspace_func(**kwargs): - yield {"out_1": value} + yield {OutputID("out_1"): value} services = FunctionServices() diff --git a/packages/models-library/src/models_library/function_services_catalog/services/iter_sensitivity.py b/packages/models-library/src/models_library/function_services_catalog/services/iter_sensitivity.py index de99306ac60..cfe170bc7f4 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/iter_sensitivity.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/iter_sensitivity.py @@ -1,17 +1,22 @@ +from collections.abc import Iterator from copy import deepcopy -from typing import Any, Dict, Iterator, List, Tuple +from typing import Any -from pydantic import schema_of +from pydantic import TypeAdapter -from ...projects_nodes import OutputsDict -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...projects_nodes import OutputID, OutputsDict +from ...services import ServiceMetaDataPublished, ServiceType +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import EN, OM, FunctionServices, create_fake_thumbnail_url -LIST_NUMBERS_SCHEMA: Dict[str, Any] = schema_of(List[float], title="list[number]") +LIST_NUMBERS_SCHEMA: dict[str, Any] = { + **TypeAdapter(list[float]).json_schema(), + "title": "list[number]", +} -META = ServiceDockerData.parse_obj( +META = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/data-iterator/sensitivity", @@ -66,11 +71,10 @@ def eval_sensitivity( *, - paramrefs: List[float], - paramdiff: List[float], + paramrefs: list[float], + paramdiff: list[float], diff_or_fact: bool, -) -> Iterator[Tuple[int, List[float], List[float]]]: - +) -> Iterator[tuple[int, list[float], list[float]]]: # This code runs in the backend assert len(paramrefs) == len(paramdiff) # nosec @@ -95,12 +99,16 @@ def eval_sensitivity( def _sensitivity_generator( - paramrefs: List[float], paramdiff: List[float], diff_or_fact: bool + paramrefs: list[float], paramdiff: list[float], diff_or_fact: bool ) -> Iterator[OutputsDict]: for i, paramtestplus, paramtestminus in eval_sensitivity( paramrefs=paramrefs, paramdiff=paramdiff, diff_or_fact=diff_or_fact ): - yield {"out_1": i, "out_2": paramtestplus, "out_3": paramtestminus} + yield { + OutputID("out_1"): i, + OutputID("out_2"): paramtestplus, + OutputID("out_3"): paramtestminus, + } services = FunctionServices() diff --git a/packages/models-library/src/models_library/function_services_catalog/services/nodes_group.py b/packages/models-library/src/models_library/function_services_catalog/services/nodes_group.py index c14ff00efb3..40adb28f342 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/nodes_group.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/nodes_group.py @@ -1,4 +1,5 @@ -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...services import ServiceMetaDataPublished, ServiceType +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import OM, FunctionServices @@ -6,7 +7,7 @@ # NOTE: DO not mistake with simcore/services/frontend/nodes-group/macros/ # which needs to be redefined. # -META = ServiceDockerData.parse_obj( +META = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/nodes-group", diff --git a/packages/models-library/src/models_library/function_services_catalog/services/parameters.py b/packages/models-library/src/models_library/function_services_catalog/services/parameters.py index 4d63883f66e..d62a4a88dfb 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/parameters.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/parameters.py @@ -1,31 +1,26 @@ -from typing import Optional +from typing import Final -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...services import ServiceMetaDataPublished, ServiceType +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX from .._utils import OM, FunctionServices, create_fake_thumbnail_url -def create_metadata( - output_type: str, output_name: Optional[str] = None -) -> ServiceDockerData: +def _create_metadata(type_name: str) -> ServiceMetaDataPublished: """ Represents a parameter (e.g. "x":5) in a study This is a parametrized node (or param-node in short) """ - LABEL = output_name or f"{output_type.capitalize()} Parameter" - DESCRIPTION = f"Parameter of type {output_type}" - output_name = output_name or "out_1" - - meta = ServiceDockerData.parse_obj( + meta = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, - "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/parameter/{output_type}", + "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/parameter/{type_name}", "version": "1.0.0", "type": ServiceType.FRONTEND, - "name": LABEL, - "description": DESCRIPTION, - "thumbnail": create_fake_thumbnail_url(f"{output_type}"), + "name": f"{type_name.capitalize()} parameter", + "description": f"Produces a {type_name} value at its outputs", + "thumbnail": create_fake_thumbnail_url(f"{type_name}"), "authors": [ OM, ], @@ -33,9 +28,9 @@ def create_metadata( "inputs": {}, "outputs": { "out_1": { - "label": output_name, - "description": DESCRIPTION, - "type": output_type, + "label": f"{type_name}_source", + "description": f"Input {type_name} value", + "type": type_name, } }, } @@ -46,18 +41,18 @@ def create_metadata( return meta -META_NUMBER, META_BOOL, META_INT, META_STR = [ - create_metadata(output_type=t) for t in ("number", "boolean", "integer", "string") -] - -META_ARRAY = ServiceDockerData.parse_obj( +META_NUMBER: Final = _create_metadata(type_name="number") +META_BOOL: Final = _create_metadata(type_name="boolean") +META_INT: Final = _create_metadata(type_name="integer") +META_STR: Final = _create_metadata(type_name="string") +META_ARRAY: Final = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/parameter/array", "version": "1.0.0", "type": ServiceType.FRONTEND, "name": "Array Parameter", - "description": "Parameter of type array", + "description": "Array of numbers", "thumbnail": create_fake_thumbnail_url("array"), "authors": [ OM, @@ -80,6 +75,11 @@ def create_metadata( ) +def is_parameter_service(service_key: str) -> bool: + return service_key.startswith(f"{FUNCTION_SERVICE_KEY_PREFIX}/parameter/") + + services = FunctionServices() for m in (META_NUMBER, META_BOOL, META_INT, META_STR, META_ARRAY): + assert is_parameter_service(m.key) # nosec services.add(meta=m) diff --git a/packages/models-library/src/models_library/function_services_catalog/services/probes.py b/packages/models-library/src/models_library/function_services_catalog/services/probes.py index c41f957523d..4c710a90ade 100644 --- a/packages/models-library/src/models_library/function_services_catalog/services/probes.py +++ b/packages/models-library/src/models_library/function_services_catalog/services/probes.py @@ -1,22 +1,20 @@ -from typing import Optional +from typing import Final -from ...services import LATEST_INTEGRATION_VERSION, ServiceDockerData, ServiceType +from ...services import ServiceMetaDataPublished, ServiceType +from ...services_constants import LATEST_INTEGRATION_VERSION from .._key_labels import FUNCTION_SERVICE_KEY_PREFIX -from .._utils import OM, FunctionServices, create_fake_thumbnail_url +from .._utils import OM, WVG, FunctionServices, create_fake_thumbnail_url -def create_metadata(type_name: str, prefix: Optional[str] = None) -> ServiceDockerData: - prefix = prefix or type_name - LABEL = f"{type_name.capitalize()} probe" - - return ServiceDockerData.parse_obj( +def _create_metadata(type_name: str) -> ServiceMetaDataPublished: + obj: ServiceMetaDataPublished = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, - "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/iterator-consumer/probe/{prefix}", + "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/iterator-consumer/probe/{type_name}", "version": "1.0.0", "type": ServiceType.FRONTEND, - "name": LABEL, - "description": f"Probes its input for {type_name} values", + "name": f"{type_name.capitalize()} probe", + "description": f"Captures {type_name} values at its inputs", "thumbnail": create_fake_thumbnail_url(f"{type_name}"), "authors": [ OM, @@ -24,22 +22,23 @@ def create_metadata(type_name: str, prefix: Optional[str] = None) -> ServiceDock "contact": OM.email, "inputs": { "in_1": { - "label": f"{type_name} Probe", - "description": f"Captures {type_name} values attached to it", - "defaultValue": 0, + "label": f"{type_name}_probe", + "description": f"Output {type_name} value", + # NOTE: no default provided to input probes "type": type_name, } }, "outputs": {}, } ) + return obj -META_NUMBER, META_BOOL, META_INT, META_STR = [ - create_metadata(t) for t in ("number", "boolean", "integer", "string") -] - -META_ARRAY = ServiceDockerData.parse_obj( +META_NUMBER: Final = _create_metadata("number") +META_BOOL: Final = _create_metadata("boolean") +META_INT: Final = _create_metadata("integer") +META_STR: Final = _create_metadata("string") +META_ARRAY: Final = ServiceMetaDataPublished.model_validate( { "integration-version": LATEST_INTEGRATION_VERSION, "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/iterator-consumer/probe/array", @@ -68,7 +67,38 @@ def create_metadata(type_name: str, prefix: Optional[str] = None) -> ServiceDock } ) +META_FILE: Final = ServiceMetaDataPublished.model_validate( + { + "integration-version": LATEST_INTEGRATION_VERSION, + "key": f"{FUNCTION_SERVICE_KEY_PREFIX}/iterator-consumer/probe/file", + "version": "1.0.0", + "type": ServiceType.FRONTEND, + "name": "File probe", + "description": "Probes its input for files", + "thumbnail": create_fake_thumbnail_url("file"), + "authors": [ + WVG, + ], + "contact": WVG.email, + "inputs": { + "in_1": { + "label": "file", + "description": "file", + "type": "data:*/*", + } + }, + "outputs": {}, + } +) + + +def is_probe_service(service_key: str) -> bool: + return service_key.startswith( + f"{FUNCTION_SERVICE_KEY_PREFIX}/iterator-consumer/probe/" + ) + services = FunctionServices() -for m in (META_NUMBER, META_BOOL, META_INT, META_STR, META_ARRAY): +for m in (META_NUMBER, META_BOOL, META_INT, META_STR, META_ARRAY, META_FILE): + assert is_probe_service(m.key) # nosec services.add(meta=m) diff --git a/packages/models-library/src/models_library/functions.py b/packages/models-library/src/models_library/functions.py new file mode 100644 index 00000000000..6dd02d86f35 --- /dev/null +++ b/packages/models-library/src/models_library/functions.py @@ -0,0 +1,288 @@ +from collections.abc import Mapping +from enum import Enum +from typing import Annotated, Any, Literal, TypeAlias +from uuid import UUID + +from common_library.errors_classes import OsparcErrorMixin +from models_library import projects +from models_library.basic_regex import UUID_RE_BASE +from models_library.basic_types import ConstrainedStr +from models_library.services_types import ServiceKey, ServiceVersion +from pydantic import BaseModel, Field + +from .projects import ProjectID + +FunctionID: TypeAlias = UUID +FunctionJobID: TypeAlias = UUID +FileID: TypeAlias = UUID + +InputTypes: TypeAlias = FileID | float | int | bool | str | list + + +class FunctionSchemaClass(str, Enum): + json_schema = "application/schema+json" + + +class FunctionSchemaBase(BaseModel): + schema_content: Any | None = Field(default=None) + schema_class: FunctionSchemaClass + + +class JSONFunctionSchema(FunctionSchemaBase): + schema_content: Mapping[str, Any] = Field( + default={}, description="JSON Schema", title="JSON Schema" + ) # json-schema library defines a schema as Mapping[str, Any] + schema_class: FunctionSchemaClass = FunctionSchemaClass.json_schema + + +class JSONFunctionInputSchema(JSONFunctionSchema): + schema_class: Literal[FunctionSchemaClass.json_schema] = ( + FunctionSchemaClass.json_schema + ) + + +class JSONFunctionOutputSchema(JSONFunctionSchema): + schema_class: Literal[FunctionSchemaClass.json_schema] = ( + FunctionSchemaClass.json_schema + ) + + +FunctionInputSchema: TypeAlias = Annotated[ + JSONFunctionInputSchema, + Field(discriminator="schema_class"), +] + +FunctionOutputSchema: TypeAlias = Annotated[ + JSONFunctionOutputSchema, + Field(discriminator="schema_class"), +] + + +class FunctionClass(str, Enum): + PROJECT = "PROJECT" + SOLVER = "SOLVER" + PYTHON_CODE = "PYTHON_CODE" + + +FunctionClassSpecificData: TypeAlias = dict[str, Any] +FunctionJobClassSpecificData: TypeAlias = FunctionClassSpecificData + + +# NOTE, use InputTypes here, but api is throwing weird errors and asking for dict for elements +# see here https://github.com/ITISFoundation/osparc-simcore/issues/7659 +FunctionInputs: TypeAlias = dict[str, Any] | None + +FunctionInputsList: TypeAlias = list[FunctionInputs] + +FunctionOutputs: TypeAlias = dict[str, Any] | None + +FunctionOutputsLogfile: TypeAlias = Any + + +class FunctionBase(BaseModel): + function_class: FunctionClass + title: str = "" + description: str = "" + input_schema: FunctionInputSchema + output_schema: FunctionOutputSchema + default_inputs: FunctionInputs + + +class RegisteredFunctionBase(FunctionBase): + uid: FunctionID + + +class ProjectFunction(FunctionBase): + function_class: Literal[FunctionClass.PROJECT] = FunctionClass.PROJECT + project_id: ProjectID + + +class RegisteredProjectFunction(ProjectFunction, RegisteredFunctionBase): + pass + + +SolverJobID: TypeAlias = UUID + + +class SolverFunction(FunctionBase): + function_class: Literal[FunctionClass.SOLVER] = FunctionClass.SOLVER + solver_key: ServiceKey + solver_version: ServiceVersion + + +class RegisteredSolverFunction(SolverFunction, RegisteredFunctionBase): + pass + + +class PythonCodeFunction(FunctionBase): + function_class: Literal[FunctionClass.PYTHON_CODE] = FunctionClass.PYTHON_CODE + code_url: str + + +class RegisteredPythonCodeFunction(PythonCodeFunction, RegisteredFunctionBase): + pass + + +Function: TypeAlias = Annotated[ + ProjectFunction | PythonCodeFunction | SolverFunction, + Field(discriminator="function_class"), +] +RegisteredFunction: TypeAlias = Annotated[ + RegisteredProjectFunction | RegisteredPythonCodeFunction | RegisteredSolverFunction, + Field(discriminator="function_class"), +] + +FunctionJobCollectionID: TypeAlias = projects.ProjectID + + +class FunctionJobBase(BaseModel): + title: str = "" + description: str = "" + function_uid: FunctionID + inputs: FunctionInputs + outputs: FunctionOutputs + function_class: FunctionClass + + +class RegisteredFunctionJobBase(FunctionJobBase): + uid: FunctionJobID + + +class ProjectFunctionJob(FunctionJobBase): + function_class: Literal[FunctionClass.PROJECT] = FunctionClass.PROJECT + project_job_id: ProjectID + + +class RegisteredProjectFunctionJob(ProjectFunctionJob, RegisteredFunctionJobBase): + pass + + +class SolverFunctionJob(FunctionJobBase): + function_class: Literal[FunctionClass.SOLVER] = FunctionClass.SOLVER + solver_job_id: ProjectID + + +class RegisteredSolverFunctionJob(SolverFunctionJob, RegisteredFunctionJobBase): + pass + + +class PythonCodeFunctionJob(FunctionJobBase): + function_class: Literal[FunctionClass.PYTHON_CODE] = FunctionClass.PYTHON_CODE + + +class RegisteredPythonCodeFunctionJob(PythonCodeFunctionJob, RegisteredFunctionJobBase): + pass + + +FunctionJob: TypeAlias = Annotated[ + ProjectFunctionJob | PythonCodeFunctionJob | SolverFunctionJob, + Field(discriminator="function_class"), +] + +RegisteredFunctionJob: TypeAlias = Annotated[ + RegisteredProjectFunctionJob + | RegisteredPythonCodeFunctionJob + | RegisteredSolverFunctionJob, + Field(discriminator="function_class"), +] + + +class FunctionJobStatus(BaseModel): + status: str + + +class FunctionJobCollection(BaseModel): + """Model for a collection of function jobs""" + + title: str = "" + description: str = "" + job_ids: list[FunctionJobID] = [] + + +class RegisteredFunctionJobCollection(FunctionJobCollection): + uid: FunctionJobCollectionID + + +class FunctionJobCollectionStatus(BaseModel): + status: list[str] + + +class FunctionBaseError(OsparcErrorMixin, Exception): + pass + + +class FunctionIDNotFoundError(FunctionBaseError): + msg_template: str = "Function {function_id} not found" + + +class FunctionJobIDNotFoundError(FunctionBaseError): + msg_template: str = "Function job {function_job_id} not found" + + +class FunctionJobCollectionIDNotFoundError(FunctionBaseError): + msg_template: str = "Function job collection {function_job_collection_id} not found" + + +class UnsupportedFunctionClassError(FunctionBaseError): + msg_template: str = "Function class {function_class} is not supported" + + +class UnsupportedFunctionJobClassError(FunctionBaseError): + msg_template: str = "Function job class {function_job_class} is not supported" + + +class UnsupportedFunctionFunctionJobClassCombinationError(FunctionBaseError): + msg_template: str = ( + "Function class {function_class} and function job class {function_job_class} combination is not supported" + ) + + +class FunctionInputsValidationError(FunctionBaseError): + msg_template: str = "Function inputs validation failed: {error}" + + +class FunctionJobDB(BaseModel): + function_uuid: FunctionID + title: str = "" + description: str = "" + inputs: FunctionInputs + outputs: FunctionOutputs + class_specific_data: FunctionJobClassSpecificData + function_class: FunctionClass + + +class RegisteredFunctionJobDB(FunctionJobDB): + uuid: FunctionJobID + + +class FunctionDB(BaseModel): + function_class: FunctionClass + title: str = "" + description: str = "" + input_schema: FunctionInputSchema + output_schema: FunctionOutputSchema + default_inputs: FunctionInputs + class_specific_data: FunctionClassSpecificData + + +class RegisteredFunctionDB(FunctionDB): + uuid: FunctionID + + +class FunctionJobCollectionDB(BaseModel): + title: str = "" + description: str = "" + + +class RegisteredFunctionJobCollectionDB(FunctionJobCollectionDB): + uuid: FunctionJobCollectionID + + +class FunctionIDString(ConstrainedStr): + pattern = UUID_RE_BASE + + +class FunctionJobCollectionsListFilters(BaseModel): + """Filters for listing function job collections""" + + has_function_id: FunctionIDString | None = None diff --git a/packages/models-library/src/models_library/generated_models/docker_rest_api.py b/packages/models-library/src/models_library/generated_models/docker_rest_api.py index d4facc10e20..961628fffc5 100644 --- a/packages/models-library/src/models_library/generated_models/docker_rest_api.py +++ b/packages/models-library/src/models_library/generated_models/docker_rest_api.py @@ -1,18 +1,21 @@ # generated by datamodel-codegen: -# filename: https://docs.docker.com/engine/api/v1.41.yaml -# timestamp: 2022-11-28T14:56:37+00:00 +# filename: https://docs.docker.com/reference/api/engine/version/v1.41.yaml +# timestamp: 2024-12-03T18:55:58+00:00 from __future__ import annotations from datetime import datetime from enum import Enum -from typing import Any, Optional +from typing import Annotated, Any -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field, RootModel -class Model(BaseModel): - __root__: Any +class Model(RootModel[Any]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Any class Type(str, Enum): @@ -26,17 +29,28 @@ class Port(BaseModel): An open port on a container """ - IP: Optional[str] = Field( - None, description="Host IP address that the container's port is mapped to" + model_config = ConfigDict( + populate_by_name=True, ) - PrivatePort: int = Field(..., description="Port on the container") - PublicPort: Optional[int] = Field(None, description="Port exposed on the host") - Type: Type + ip: Annotated[ + str | None, + Field( + alias="IP", + description="Host IP address that the container's port is mapped to", + ), + ] = None + private_port: Annotated[ + int, Field(alias="PrivatePort", description="Port on the container") + ] + public_port: Annotated[ + int | None, Field(alias="PublicPort", description="Port exposed on the host") + ] = None + type: Annotated[Type, Field(alias="Type")] class Type1(str, Enum): """ - The mount type: + The mount type: - `bind` a mount of a file or directory from the host into the container. - `volume` a docker volume with the given `Name`. @@ -53,51 +67,78 @@ class Type1(str, Enum): class MountPoint(BaseModel): """ - MountPoint represents a mount point configuration inside the container. + MountPoint represents a mount point configuration inside the container. This is used for reporting the mountpoints in use by a container. """ - Type: Optional[Type1] = Field( - None, - description="The mount type:\n\n- `bind` a mount of a file or directory from the host into the container.\n- `volume` a docker volume with the given `Name`.\n- `tmpfs` a `tmpfs`.\n- `npipe` a named pipe from the host into the container.\n", - example="volume", - ) - Name: Optional[str] = Field( - None, - description="Name is the name reference to the underlying data defined by `Source`\ne.g., the volume name.\n", - example="myvolume", - ) - Source: Optional[str] = Field( - None, - description="Source location of the mount.\n\nFor volumes, this contains the storage location of the volume (within\n`/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains\nthe source (host) part of the bind-mount. For `tmpfs` mount points, this\nfield is empty.\n", - example="/var/lib/docker/volumes/myvolume/_data", - ) - Destination: Optional[str] = Field( - None, - description="Destination is the path relative to the container root (`/`) where\nthe `Source` is mounted inside the container.\n", - example="/usr/share/nginx/html/", - ) - Driver: Optional[str] = Field( - None, - description="Driver is the volume driver used to create the volume (if it is a volume).\n", - example="local", - ) - Mode: Optional[str] = Field( - None, - description='Mode is a comma separated list of options supplied by the user when\ncreating the bind/volume mount.\n\nThe default is platform-specific (`"z"` on Linux, empty on Windows).\n', - example="z", - ) - RW: Optional[bool] = Field( - None, - description="Whether the mount is mounted writable (read-write).\n", - example=True, - ) - Propagation: Optional[str] = Field( - None, - description="Propagation describes how mounts are propagated from the host into the\nmount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)\nfor details. This field is not used on Windows.\n", - example="", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Type1 | None, + Field( + alias="Type", + description="The mount type:\n\n- `bind` a mount of a file or directory from the host into the container.\n- `volume` a docker volume with the given `Name`.\n- `tmpfs` a `tmpfs`.\n- `npipe` a named pipe from the host into the container.\n", + examples=["volume"], + ), + ] = None + name: Annotated[ + str | None, + Field( + alias="Name", + description="Name is the name reference to the underlying data defined by `Source`\ne.g., the volume name.\n", + examples=["myvolume"], + ), + ] = None + source: Annotated[ + str | None, + Field( + alias="Source", + description="Source location of the mount.\n\nFor volumes, this contains the storage location of the volume (within\n`/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains\nthe source (host) part of the bind-mount. For `tmpfs` mount points, this\nfield is empty.\n", + examples=["/var/lib/docker/volumes/myvolume/_data"], + ), + ] = None + destination: Annotated[ + str | None, + Field( + alias="Destination", + description="Destination is the path relative to the container root (`/`) where\nthe `Source` is mounted inside the container.\n", + examples=["/usr/share/nginx/html/"], + ), + ] = None + driver: Annotated[ + str | None, + Field( + alias="Driver", + description="Driver is the volume driver used to create the volume (if it is a volume).\n", + examples=["local"], + ), + ] = None + mode: Annotated[ + str | None, + Field( + alias="Mode", + description='Mode is a comma separated list of options supplied by the user when\ncreating the bind/volume mount.\n\nThe default is platform-specific (`"z"` on Linux, empty on Windows).\n', + examples=["z"], + ), + ] = None + rw: Annotated[ + bool | None, + Field( + alias="RW", + description="Whether the mount is mounted writable (read-write).\n", + examples=[True], + ), + ] = None + propagation: Annotated[ + str | None, + Field( + alias="Propagation", + description="Propagation describes how mounts are propagated from the host into the\nmount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)\nfor details. This field is not used on Windows.\n", + examples=[""], + ), + ] = None class DeviceMapping(BaseModel): @@ -105,9 +146,12 @@ class DeviceMapping(BaseModel): A device mapping between the host and container """ - PathOnHost: Optional[str] = None - PathInContainer: Optional[str] = None - CgroupPermissions: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + path_on_host: Annotated[str | None, Field(alias="PathOnHost")] = None + path_in_container: Annotated[str | None, Field(alias="PathInContainer")] = None + cgroup_permissions: Annotated[str | None, Field(alias="CgroupPermissions")] = None class DeviceRequest(BaseModel): @@ -115,25 +159,58 @@ class DeviceRequest(BaseModel): A request for devices to be sent to device drivers """ - Driver: Optional[str] = Field(None, example="nvidia") - Count: Optional[int] = Field(None, example=-1) - DeviceIDs: Optional[list[str]] = Field( - None, example=["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - ) - Capabilities: Optional[list[list[str]]] = Field( - None, - description="A list of capabilities; an OR list of AND lists of capabilities.\n", - example=[["gpu", "nvidia", "compute"]], - ) - Options: Optional[dict[str, str]] = Field( - None, - description="Driver-specific options, specified as a key/value pairs. These options\nare passed directly to the driver.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + driver: Annotated[str | None, Field(alias="Driver", examples=["nvidia"])] = None + count: Annotated[int | None, Field(alias="Count", examples=[-1])] = None + device_i_ds: Annotated[ + list[str] | None, + Field( + alias="DeviceIDs", + examples=[["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]], + ), + ] = None + capabilities: Annotated[ + list[list[str]] | None, + Field( + alias="Capabilities", + description="A list of capabilities; an OR list of AND lists of capabilities.\n", + examples=[[["gpu", "nvidia", "compute"]]], + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="Driver-specific options, specified as a key/value pairs. These options\nare passed directly to the driver.\n", + ), + ] = None class ThrottleDevice(BaseModel): - Path: Optional[str] = Field(None, description="Device path") - Rate: Optional[int] = Field(None, description="Rate", ge=0) + model_config = ConfigDict( + populate_by_name=True, + ) + path: Annotated[str | None, Field(alias="Path", description="Device path")] = None + rate: Annotated[int | None, Field(alias="Rate", description="Rate", ge=0)] = None + + +class Type2(str, Enum): + """ + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + + """ + + bind = "bind" + volume = "volume" + tmpfs = "tmpfs" + npipe = "npipe" class Propagation(str, Enum): @@ -154,13 +231,20 @@ class BindOptions(BaseModel): Optional configuration for the `bind` type. """ - Propagation: Optional[Propagation] = Field( - None, - description="A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.", - ) - NonRecursive: Optional[bool] = Field( - False, description="Disable recursive bind mount." + model_config = ConfigDict( + populate_by_name=True, ) + propagation: Annotated[ + Propagation | None, + Field( + alias="Propagation", + description="A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`.", + ), + ] = None + non_recursive: Annotated[ + bool | None, + Field(alias="NonRecursive", description="Disable recursive bind mount."), + ] = False class DriverConfig(BaseModel): @@ -168,12 +252,19 @@ class DriverConfig(BaseModel): Map of driver specific options """ - Name: Optional[str] = Field( - None, description="Name of the driver to use to create the volume." - ) - Options: Optional[dict[str, str]] = Field( - None, description="key/value map of driver specific options." + model_config = ConfigDict( + populate_by_name=True, ) + name: Annotated[ + str | None, + Field( + alias="Name", description="Name of the driver to use to create the volume." + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field(alias="Options", description="key/value map of driver specific options."), + ] = None class VolumeOptions(BaseModel): @@ -181,15 +272,21 @@ class VolumeOptions(BaseModel): Optional configuration for the `volume` type. """ - NoCopy: Optional[bool] = Field( - False, description="Populate volume with data from the target." - ) - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - DriverConfig: Optional[DriverConfig] = Field( - None, description="Map of driver specific options" + model_config = ConfigDict( + populate_by_name=True, ) + no_copy: Annotated[ + bool | None, + Field(alias="NoCopy", description="Populate volume with data from the target."), + ] = False + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + driver_config: Annotated[ + DriverConfig | None, + Field(alias="DriverConfig", description="Map of driver specific options"), + ] = None class TmpfsOptions(BaseModel): @@ -197,44 +294,80 @@ class TmpfsOptions(BaseModel): Optional configuration for the `tmpfs` type. """ - SizeBytes: Optional[int] = Field( - None, description="The size for the tmpfs mount in bytes." - ) - Mode: Optional[int] = Field( - None, description="The permission mode for the tmpfs mount in an integer." + model_config = ConfigDict( + populate_by_name=True, ) + size_bytes: Annotated[ + int | None, + Field(alias="SizeBytes", description="The size for the tmpfs mount in bytes."), + ] = None + mode: Annotated[ + int | None, + Field( + alias="Mode", + description="The permission mode for the tmpfs mount in an integer.", + ), + ] = None class Mount(BaseModel): - Target: Optional[str] = Field(None, description="Container path.") - Source: Optional[str] = Field( - None, description="Mount source (e.g. a volume name, a host path)." - ) - Type: Optional[Type1] = Field( - None, - description="The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n", - ) - ReadOnly: Optional[bool] = Field( - None, description="Whether the mount should be read-only." - ) - Consistency: Optional[str] = Field( - None, - description="The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.", - ) - BindOptions: Optional[BindOptions] = Field( - None, description="Optional configuration for the `bind` type." - ) - VolumeOptions: Optional[VolumeOptions] = Field( - None, description="Optional configuration for the `volume` type." - ) - TmpfsOptions: Optional[TmpfsOptions] = Field( - None, description="Optional configuration for the `tmpfs` type." - ) + model_config = ConfigDict( + populate_by_name=True, + ) + target: Annotated[ + str | None, Field(alias="Target", description="Container path.") + ] = None + source: Annotated[ + str | None, + Field( + alias="Source", + description="Mount source (e.g. a volume name, a host path).", + ), + ] = None + type: Annotated[ + Type2 | None, + Field( + alias="Type", + description="The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n", + ), + ] = None + read_only: Annotated[ + bool | None, + Field(alias="ReadOnly", description="Whether the mount should be read-only."), + ] = None + consistency: Annotated[ + str | None, + Field( + alias="Consistency", + description="The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`.", + ), + ] = None + bind_options: Annotated[ + BindOptions | None, + Field( + alias="BindOptions", + description="Optional configuration for the `bind` type.", + ), + ] = None + volume_options: Annotated[ + VolumeOptions | None, + Field( + alias="VolumeOptions", + description="Optional configuration for the `volume` type.", + ), + ] = None + tmpfs_options: Annotated[ + TmpfsOptions | None, + Field( + alias="TmpfsOptions", + description="Optional configuration for the `tmpfs` type.", + ), + ] = None class Name(str, Enum): """ - - Empty string means not to restart + - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container @@ -242,7 +375,7 @@ class Name(str, Enum): """ - _ = "" + field_ = "" no = "no" always = "always" unless_stopped = "unless-stopped" @@ -251,7 +384,7 @@ class Name(str, Enum): class RestartPolicy(BaseModel): """ - The behavior to apply when the container exits. The default is not to + The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is @@ -259,25 +392,42 @@ class RestartPolicy(BaseModel): """ - Name: Optional[Name] = Field( - None, - description="- Empty string means not to restart\n- `no` Do not automatically restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n", - ) - MaximumRetryCount: Optional[int] = Field( - None, - description="If `on-failure` is used, the number of times to retry before giving up.\n", + model_config = ConfigDict( + populate_by_name=True, ) + name: Annotated[ + Name | None, + Field( + alias="Name", + description="- Empty string means not to restart\n- `no` Do not automatically restart\n- `always` Always restart\n- `unless-stopped` Restart always except when the user has manually stopped the container\n- `on-failure` Restart only when the container exit code is non-zero\n", + ), + ] = None + maximum_retry_count: Annotated[ + int | None, + Field( + alias="MaximumRetryCount", + description="If `on-failure` is used, the number of times to retry before giving up.\n", + ), + ] = None class BlkioWeightDeviceItem(BaseModel): - Path: Optional[str] = None - Weight: Optional[int] = Field(None, ge=0) + model_config = ConfigDict( + populate_by_name=True, + ) + path: Annotated[str | None, Field(alias="Path")] = None + weight: Annotated[int | None, Field(alias="Weight", ge=0)] = None class Ulimit(BaseModel): - Name: Optional[str] = Field(None, description="Name of ulimit") - Soft: Optional[int] = Field(None, description="Soft limit") - Hard: Optional[int] = Field(None, description="Hard limit") + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, Field(alias="Name", description="Name of ulimit") + ] = None + soft: Annotated[int | None, Field(alias="Soft", description="Soft limit")] = None + hard: Annotated[int | None, Field(alias="Hard", description="Hard limit")] = None class Resources(BaseModel): @@ -285,126 +435,228 @@ class Resources(BaseModel): A container's resources (cgroups config, ulimits, etc) """ - CpuShares: Optional[int] = Field( - None, - description="An integer value representing this container's relative CPU weight\nversus other containers.\n", - ) - Memory: Optional[int] = Field(0, description="Memory limit in bytes.") - CgroupParent: Optional[str] = Field( - None, - description="Path to `cgroups` under which the container's `cgroup` is created. If\nthe path is not absolute, the path is considered to be relative to the\n`cgroups` path of the init process. Cgroups are created if they do not\nalready exist.\n", - ) - BlkioWeight: Optional[int] = Field( - None, description="Block IO weight (relative weight).", ge=0, le=1000 - ) - BlkioWeightDevice: Optional[list[BlkioWeightDeviceItem]] = Field( - None, - description='Block IO weight (relative device weight) in the form:\n\n```\n[{"Path": "device_path", "Weight": weight}]\n```\n', - ) - BlkioDeviceReadBps: Optional[list[ThrottleDevice]] = Field( - None, - description='Limit read rate (bytes per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', - ) - BlkioDeviceWriteBps: Optional[list[ThrottleDevice]] = Field( - None, - description='Limit write rate (bytes per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', - ) - BlkioDeviceReadIOps: Optional[list[ThrottleDevice]] = Field( - None, - description='Limit read rate (IO per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', - ) - BlkioDeviceWriteIOps: Optional[list[ThrottleDevice]] = Field( - None, - description='Limit write rate (IO per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', - ) - CpuPeriod: Optional[int] = Field( - None, description="The length of a CPU period in microseconds." - ) - CpuQuota: Optional[int] = Field( - None, - description="Microseconds of CPU time that the container can get in a CPU period.\n", - ) - CpuRealtimePeriod: Optional[int] = Field( - None, - description="The length of a CPU real-time period in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n", - ) - CpuRealtimeRuntime: Optional[int] = Field( - None, - description="The length of a CPU real-time runtime in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n", - ) - CpusetCpus: Optional[str] = Field( - None, - description="CPUs in which to allow execution (e.g., `0-3`, `0,1`).\n", - example="0-3", - ) - CpusetMems: Optional[str] = Field( - None, - description="Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only\neffective on NUMA systems.\n", - ) - Devices: Optional[list[DeviceMapping]] = Field( - None, description="A list of devices to add to the container." - ) - DeviceCgroupRules: Optional[list[str]] = Field( - None, description="a list of cgroup rules to apply to the container" - ) - DeviceRequests: Optional[list[DeviceRequest]] = Field( - None, - description="A list of requests for devices to be sent to device drivers.\n", - ) - KernelMemory: Optional[int] = Field( - None, - description="Kernel memory limit in bytes.\n\n


\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n", - example=209715200, - ) - KernelMemoryTCP: Optional[int] = Field( - None, description="Hard limit for kernel TCP buffer memory (in bytes)." - ) - MemoryReservation: Optional[int] = Field( - None, description="Memory soft limit in bytes." - ) - MemorySwap: Optional[int] = Field( - None, - description="Total memory limit (memory + swap). Set as `-1` to enable unlimited\nswap.\n", - ) - MemorySwappiness: Optional[int] = Field( - None, - description="Tune a container's memory swappiness behavior. Accepts an integer\nbetween 0 and 100.\n", - ge=0, - le=100, - ) - NanoCpus: Optional[int] = Field( - None, description="CPU quota in units of 10-9 CPUs." - ) - OomKillDisable: Optional[bool] = Field( - None, description="Disable OOM Killer for the container." - ) - Init: Optional[bool] = Field( - None, - description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n", - ) - PidsLimit: Optional[int] = Field( - None, - description="Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`\nto not change.\n", - ) - Ulimits: Optional[list[Ulimit]] = Field( - None, - description='A list of resource limits to set in the container. For example:\n\n```\n{"Name": "nofile", "Soft": 1024, "Hard": 2048}\n```\n', - ) - CpuCount: Optional[int] = Field( - None, - description="The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n", - ) - CpuPercent: Optional[int] = Field( - None, - description="The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n", - ) - IOMaximumIOps: Optional[int] = Field( - None, description="Maximum IOps for the container system drive (Windows only)" - ) - IOMaximumBandwidth: Optional[int] = Field( - None, - description="Maximum IO in bytes per second for the container system drive\n(Windows only).\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + cpu_shares: Annotated[ + int | None, + Field( + alias="CpuShares", + description="An integer value representing this container's relative CPU weight\nversus other containers.\n", + ), + ] = None + memory: Annotated[ + int | None, Field(alias="Memory", description="Memory limit in bytes.") + ] = 0 + cgroup_parent: Annotated[ + str | None, + Field( + alias="CgroupParent", + description="Path to `cgroups` under which the container's `cgroup` is created. If\nthe path is not absolute, the path is considered to be relative to the\n`cgroups` path of the init process. Cgroups are created if they do not\nalready exist.\n", + ), + ] = None + blkio_weight: Annotated[ + int | None, + Field( + alias="BlkioWeight", + description="Block IO weight (relative weight).", + ge=0, + le=1000, + ), + ] = None + blkio_weight_device: Annotated[ + list[BlkioWeightDeviceItem] | None, + Field( + alias="BlkioWeightDevice", + description='Block IO weight (relative device weight) in the form:\n\n```\n[{"Path": "device_path", "Weight": weight}]\n```\n', + ), + ] = None + blkio_device_read_bps: Annotated[ + list[ThrottleDevice] | None, + Field( + alias="BlkioDeviceReadBps", + description='Limit read rate (bytes per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', + ), + ] = None + blkio_device_write_bps: Annotated[ + list[ThrottleDevice] | None, + Field( + alias="BlkioDeviceWriteBps", + description='Limit write rate (bytes per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', + ), + ] = None + blkio_device_read_i_ops: Annotated[ + list[ThrottleDevice] | None, + Field( + alias="BlkioDeviceReadIOps", + description='Limit read rate (IO per second) from a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', + ), + ] = None + blkio_device_write_i_ops: Annotated[ + list[ThrottleDevice] | None, + Field( + alias="BlkioDeviceWriteIOps", + description='Limit write rate (IO per second) to a device, in the form:\n\n```\n[{"Path": "device_path", "Rate": rate}]\n```\n', + ), + ] = None + cpu_period: Annotated[ + int | None, + Field( + alias="CpuPeriod", description="The length of a CPU period in microseconds." + ), + ] = None + cpu_quota: Annotated[ + int | None, + Field( + alias="CpuQuota", + description="Microseconds of CPU time that the container can get in a CPU period.\n", + ), + ] = None + cpu_realtime_period: Annotated[ + int | None, + Field( + alias="CpuRealtimePeriod", + description="The length of a CPU real-time period in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n", + ), + ] = None + cpu_realtime_runtime: Annotated[ + int | None, + Field( + alias="CpuRealtimeRuntime", + description="The length of a CPU real-time runtime in microseconds. Set to 0 to\nallocate no time allocated to real-time tasks.\n", + ), + ] = None + cpuset_cpus: Annotated[ + str | None, + Field( + alias="CpusetCpus", + description="CPUs in which to allow execution (e.g., `0-3`, `0,1`).\n", + examples=["0-3"], + ), + ] = None + cpuset_mems: Annotated[ + str | None, + Field( + alias="CpusetMems", + description="Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only\neffective on NUMA systems.\n", + ), + ] = None + devices: Annotated[ + list[DeviceMapping] | None, + Field( + alias="Devices", description="A list of devices to add to the container." + ), + ] = None + device_cgroup_rules: Annotated[ + list[str] | None, + Field( + alias="DeviceCgroupRules", + description="a list of cgroup rules to apply to the container", + ), + ] = None + device_requests: Annotated[ + list[DeviceRequest] | None, + Field( + alias="DeviceRequests", + description="A list of requests for devices to be sent to device drivers.\n", + ), + ] = None + kernel_memory: Annotated[ + int | None, + Field( + alias="KernelMemory", + description="Kernel memory limit in bytes.\n\n


\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n", + examples=[209715200], + ), + ] = None + kernel_memory_tcp: Annotated[ + int | None, + Field( + alias="KernelMemoryTCP", + description="Hard limit for kernel TCP buffer memory (in bytes).", + ), + ] = None + memory_reservation: Annotated[ + int | None, + Field(alias="MemoryReservation", description="Memory soft limit in bytes."), + ] = None + memory_swap: Annotated[ + int | None, + Field( + alias="MemorySwap", + description="Total memory limit (memory + swap). Set as `-1` to enable unlimited\nswap.\n", + ), + ] = None + memory_swappiness: Annotated[ + int | None, + Field( + alias="MemorySwappiness", + description="Tune a container's memory swappiness behavior. Accepts an integer\nbetween 0 and 100.\n", + ge=0, + le=100, + ), + ] = None + nano_cpus: Annotated[ + int | None, + Field( + alias="NanoCpus", description="CPU quota in units of 10-9 CPUs." + ), + ] = None + oom_kill_disable: Annotated[ + bool | None, + Field( + alias="OomKillDisable", description="Disable OOM Killer for the container." + ), + ] = None + init: Annotated[ + bool | None, + Field( + alias="Init", + description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n", + ), + ] = None + pids_limit: Annotated[ + int | None, + Field( + alias="PidsLimit", + description="Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`\nto not change.\n", + ), + ] = None + ulimits: Annotated[ + list[Ulimit] | None, + Field( + alias="Ulimits", + description='A list of resource limits to set in the container. For example:\n\n```\n{"Name": "nofile", "Soft": 1024, "Hard": 2048}\n```\n', + ), + ] = None + cpu_count: Annotated[ + int | None, + Field( + alias="CpuCount", + description="The number of usable CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n", + ), + ] = None + cpu_percent: Annotated[ + int | None, + Field( + alias="CpuPercent", + description="The usable percentage of the available CPUs (Windows only).\n\nOn Windows Server containers, the processor resource controls are\nmutually exclusive. The order of precedence is `CPUCount` first, then\n`CPUShares`, and `CPUPercent` last.\n", + ), + ] = None + io_maximum_i_ops: Annotated[ + int | None, + Field( + alias="IOMaximumIOps", + description="Maximum IOps for the container system drive (Windows only)", + ), + ] = None + io_maximum_bandwidth: Annotated[ + int | None, + Field( + alias="IOMaximumBandwidth", + description="Maximum IO in bytes per second for the container system drive\n(Windows only).\n", + ), + ] = None class Limit(BaseModel): @@ -413,46 +665,76 @@ class Limit(BaseModel): """ - NanoCPUs: Optional[int] = Field(None, example=4000000000) - MemoryBytes: Optional[int] = Field(None, example=8272408576) - Pids: Optional[int] = Field( - 0, - description="Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n", - example=100, + model_config = ConfigDict( + populate_by_name=True, ) + nano_cp_us: Annotated[ + int | None, Field(alias="NanoCPUs", examples=[4000000000]) + ] = None + memory_bytes: Annotated[ + int | None, Field(alias="MemoryBytes", examples=[8272408576]) + ] = None + pids: Annotated[ + int | None, + Field( + alias="Pids", + description="Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n", + examples=[100], + ), + ] = 0 class NamedResourceSpec(BaseModel): - Kind: Optional[str] = None - Value: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + kind: Annotated[str | None, Field(alias="Kind")] = None + value: Annotated[str | None, Field(alias="Value")] = None class DiscreteResourceSpec(BaseModel): - Kind: Optional[str] = None - Value: Optional[int] = None + model_config = ConfigDict( + populate_by_name=True, + ) + kind: Annotated[str | None, Field(alias="Kind")] = None + value: Annotated[int | None, Field(alias="Value")] = None class GenericResource(BaseModel): - NamedResourceSpec: Optional[NamedResourceSpec] = None - DiscreteResourceSpec: Optional[DiscreteResourceSpec] = None + model_config = ConfigDict( + populate_by_name=True, + ) + named_resource_spec: Annotated[ + NamedResourceSpec | None, Field(alias="NamedResourceSpec") + ] = None + discrete_resource_spec: Annotated[ + DiscreteResourceSpec | None, Field(alias="DiscreteResourceSpec") + ] = None -class GenericResources(BaseModel): +class GenericResources(RootModel[list[GenericResource]]): """ - User-defined resources can be either Integer resources (e.g, `SSD=3`) or + User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). """ - __root__: list[GenericResource] = Field( - ..., - description="User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`).\n", - example=[ - {"DiscreteResourceSpec": {"Kind": "SSD", "Value": 3}}, - {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID1"}}, - {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID2"}}, - ], + model_config = ConfigDict( + populate_by_name=True, ) + root: Annotated[ + list[GenericResource], + Field( + description="User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`).\n", + examples=[ + [ + {"DiscreteResourceSpec": {"Kind": "SSD", "Value": 3}}, + {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID1"}}, + {"NamedResourceSpec": {"Kind": "GPU", "Value": "UUID2"}}, + ] + ], + ), + ] class HealthConfig(BaseModel): @@ -460,31 +742,49 @@ class HealthConfig(BaseModel): A test to perform to check that the container is healthy. """ - Test: Optional[list[str]] = Field( - None, - description='The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `["NONE"]` disable healthcheck\n- `["CMD", args...]` exec arguments directly\n- `["CMD-SHELL", command]` run command with system\'s default shell\n', - ) - Interval: Optional[int] = Field( - None, - description="The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n", - ) - Timeout: Optional[int] = Field( - None, - description="The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n", - ) - Retries: Optional[int] = Field( - None, - description="The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n", - ) - StartPeriod: Optional[int] = Field( - None, - description="Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + test: Annotated[ + list[str] | None, + Field( + alias="Test", + description='The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `["NONE"]` disable healthcheck\n- `["CMD", args...]` exec arguments directly\n- `["CMD-SHELL", command]` run command with system\'s default shell\n', + ), + ] = None + interval: Annotated[ + int | None, + Field( + alias="Interval", + description="The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n", + ), + ] = None + timeout: Annotated[ + int | None, + Field( + alias="Timeout", + description="The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n", + ), + ] = None + retries: Annotated[ + int | None, + Field( + alias="Retries", + description="The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n", + ), + ] = None + start_period: Annotated[ + int | None, + Field( + alias="StartPeriod", + description="Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n", + ), + ] = None class Status(str, Enum): """ - Status is one of `none`, `starting`, `healthy` or `unhealthy` + Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready @@ -505,22 +805,36 @@ class HealthcheckResult(BaseModel): """ - Start: Optional[datetime] = Field( - None, - description="Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2020-01-04T10:44:24.496525531Z", - ) - End: Optional[str] = Field( - None, - description="Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2020-01-04T10:45:21.364524523Z", - ) - ExitCode: Optional[int] = Field( - None, - description="ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n", - example=0, - ) - Output: Optional[str] = Field(None, description="Output from last check") + model_config = ConfigDict( + populate_by_name=True, + ) + start: Annotated[ + datetime | None, + Field( + alias="Start", + description="Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2020-01-04T10:44:24.496525531Z"], + ), + ] = None + end: Annotated[ + str | None, + Field( + alias="End", + description="Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2020-01-04T10:45:21.364524523Z"], + ), + ] = None + exit_code: Annotated[ + int | None, + Field( + alias="ExitCode", + description="ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n", + examples=[0], + ), + ] = None + output: Annotated[ + str | None, Field(alias="Output", description="Output from last check") + ] = None class Type3(str, Enum): @@ -540,13 +854,16 @@ class LogConfig(BaseModel): The logging configuration for this container """ - Type: Optional[Type3] = None - Config_: Optional[dict[str, str]] = Field(None, alias="Config") + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[Type3 | None, Field(alias="Type")] = None + config: Annotated[dict[str, str] | None, Field(alias="Config")] = None class CgroupnsMode(str, Enum): """ - cgroup namespace mode for the container. Possible values are: + cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace @@ -560,8 +877,11 @@ class CgroupnsMode(str, Enum): host = "host" -class ConsoleSizeItem(BaseModel): - __root__: int = Field(..., ge=0) +class ConsoleSizeItem(RootModel[int]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: Annotated[int, Field(ge=0)] class Isolation(str, Enum): @@ -577,7 +897,7 @@ class Isolation(str, Enum): class ContainerConfig(BaseModel): """ - Configuration for a container that is portable between hosts. + Configuration for a container that is portable between hosts. When used as `ContainerConfig` field in an image, `ContainerConfig` is an optional field containing the configuration of the container that was last @@ -588,102 +908,390 @@ class ContainerConfig(BaseModel): """ - Hostname: Optional[str] = Field( - None, - description="The hostname to use for the container, as a valid RFC 1123 hostname.\n", - example="439f4e91bd1d", - ) - Domainname: Optional[str] = Field( - None, description="The domain name to use for the container.\n" - ) - User: Optional[str] = Field( - None, description="The user that commands are run as inside the container." - ) - AttachStdin: Optional[bool] = Field( - False, description="Whether to attach to `stdin`." - ) - AttachStdout: Optional[bool] = Field( - True, description="Whether to attach to `stdout`." - ) - AttachStderr: Optional[bool] = Field( - True, description="Whether to attach to `stderr`." - ) - ExposedPorts: Optional[dict[str, dict[str, Any]]] = Field( - None, - description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n', - example={"80/tcp": {}, "443/tcp": {}}, - ) - Tty: Optional[bool] = Field( - False, - description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n", - ) - OpenStdin: Optional[bool] = Field(False, description="Open `stdin`") - StdinOnce: Optional[bool] = Field( - False, description="Close `stdin` after one attached client disconnects" - ) - Env: Optional[list[str]] = Field( - None, - description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n', - example=["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"], - ) - Cmd: Optional[list[str]] = Field( - None, - description="Command to run specified as a string or an array of strings.\n", - example=["/bin/sh"], - ) - Healthcheck: Optional[HealthConfig] = None - ArgsEscaped: Optional[bool] = Field( - False, description="Command is already escaped (Windows only)", example=False - ) - Image: Optional[str] = Field( - None, - description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n", - example="example-image:1.0", - ) - Volumes: Optional[dict[str, dict[str, Any]]] = Field( - None, - description="An object mapping mount point paths inside the container to empty\nobjects.\n", - ) - WorkingDir: Optional[str] = Field( - None, - description="The working directory for commands to run in.", - example="/public/", - ) - Entrypoint: Optional[list[str]] = Field( - None, - description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n', - example=[], - ) - NetworkDisabled: Optional[bool] = Field( - None, description="Disable networking for the container." - ) - MacAddress: Optional[str] = Field(None, description="MAC address of the container.") - OnBuild: Optional[list[str]] = Field( - None, - description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n", - example=[], - ) - Labels: Optional[dict[str, str]] = Field( - None, - description="User-defined key/value metadata.", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) - StopSignal: Optional[str] = Field( - None, - description="Signal to stop a container as a string or unsigned integer.\n", - example="SIGTERM", - ) - StopTimeout: Optional[int] = Field( - 10, description="Timeout to stop a container in seconds." - ) - Shell: Optional[list[str]] = Field( - None, - description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n", - example=["/bin/sh", "-c"], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + hostname: Annotated[ + str | None, + Field( + alias="Hostname", + description="The hostname to use for the container, as a valid RFC 1123 hostname.\n", + examples=["439f4e91bd1d"], + ), + ] = None + domainname: Annotated[ + str | None, + Field( + alias="Domainname", + description="The domain name to use for the container.\n", + ), + ] = None + user: Annotated[ + str | None, + Field( + alias="User", + description="The user that commands are run as inside the container.", + ), + ] = None + attach_stdin: Annotated[ + bool | None, + Field(alias="AttachStdin", description="Whether to attach to `stdin`."), + ] = False + attach_stdout: Annotated[ + bool | None, + Field(alias="AttachStdout", description="Whether to attach to `stdout`."), + ] = True + attach_stderr: Annotated[ + bool | None, + Field(alias="AttachStderr", description="Whether to attach to `stderr`."), + ] = True + exposed_ports: Annotated[ + dict[str, dict[str, Any]] | None, + Field( + alias="ExposedPorts", + description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n', + examples=[{"80/tcp": {}, "443/tcp": {}}], + ), + ] = None + tty: Annotated[ + bool | None, + Field( + alias="Tty", + description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n", + ), + ] = False + open_stdin: Annotated[ + bool | None, Field(alias="OpenStdin", description="Open `stdin`") + ] = False + stdin_once: Annotated[ + bool | None, + Field( + alias="StdinOnce", + description="Close `stdin` after one attached client disconnects", + ), + ] = False + env: Annotated[ + list[str] | None, + Field( + alias="Env", + description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n', + examples=[ + ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + ], + ), + ] = None + cmd: Annotated[ + list[str] | None, + Field( + alias="Cmd", + description="Command to run specified as a string or an array of strings.\n", + examples=[["/bin/sh"]], + ), + ] = None + healthcheck: Annotated[HealthConfig | None, Field(alias="Healthcheck")] = None + args_escaped: Annotated[ + bool | None, + Field( + alias="ArgsEscaped", + description="Command is already escaped (Windows only)", + examples=[False], + ), + ] = False + image: Annotated[ + str | None, + Field( + alias="Image", + description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n", + examples=["example-image:1.0"], + ), + ] = None + volumes: Annotated[ + dict[str, dict[str, Any]] | None, + Field( + alias="Volumes", + description="An object mapping mount point paths inside the container to empty\nobjects.\n", + ), + ] = None + working_dir: Annotated[ + str | None, + Field( + alias="WorkingDir", + description="The working directory for commands to run in.", + examples=["/public/"], + ), + ] = None + entrypoint: Annotated[ + list[str] | None, + Field( + alias="Entrypoint", + description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n', + examples=[[]], + ), + ] = None + network_disabled: Annotated[ + bool | None, + Field( + alias="NetworkDisabled", description="Disable networking for the container." + ), + ] = None + mac_address: Annotated[ + str | None, + Field(alias="MacAddress", description="MAC address of the container."), + ] = None + on_build: Annotated[ + list[str] | None, + Field( + alias="OnBuild", + description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n", + examples=[[]], + ), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None + stop_signal: Annotated[ + str | None, + Field( + alias="StopSignal", + description="Signal to stop a container as a string or unsigned integer.\n", + examples=["SIGTERM"], + ), + ] = None + stop_timeout: Annotated[ + int | None, + Field( + alias="StopTimeout", description="Timeout to stop a container in seconds." + ), + ] = 10 + shell: Annotated[ + list[str] | None, + Field( + alias="Shell", + description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n", + examples=[["/bin/sh", "-c"]], + ), + ] = None + + +class ImageConfig(BaseModel): + """ + Configuration of the image. These fields are used as defaults + when starting a container from the image. + + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + hostname: Annotated[ + str | None, + Field( + alias="Hostname", + description="The hostname to use for the container, as a valid RFC 1123 hostname.\n\n


\n\n> **Note**: this field is always empty and must not be used.\n", + examples=[""], + ), + ] = None + domainname: Annotated[ + str | None, + Field( + alias="Domainname", + description="The domain name to use for the container.\n\n


\n\n> **Note**: this field is always empty and must not be used.\n", + examples=[""], + ), + ] = None + user: Annotated[ + str | None, + Field( + alias="User", + description="The user that commands are run as inside the container.", + examples=["web:web"], + ), + ] = None + attach_stdin: Annotated[ + bool | None, + Field( + alias="AttachStdin", + description="Whether to attach to `stdin`.\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + attach_stdout: Annotated[ + bool | None, + Field( + alias="AttachStdout", + description="Whether to attach to `stdout`.\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + attach_stderr: Annotated[ + bool | None, + Field( + alias="AttachStderr", + description="Whether to attach to `stderr`.\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + exposed_ports: Annotated[ + dict[str, dict[str, Any]] | None, + Field( + alias="ExposedPorts", + description='An object mapping ports to an empty object in the form:\n\n`{"/": {}}`\n', + examples=[{"80/tcp": {}, "443/tcp": {}}], + ), + ] = None + tty: Annotated[ + bool | None, + Field( + alias="Tty", + description="Attach standard streams to a TTY, including `stdin` if it is not closed.\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + open_stdin: Annotated[ + bool | None, + Field( + alias="OpenStdin", + description="Open `stdin`\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + stdin_once: Annotated[ + bool | None, + Field( + alias="StdinOnce", + description="Close `stdin` after one attached client disconnects.\n\n


\n\n> **Note**: this field is always false and must not be used.\n", + examples=[False], + ), + ] = False + env: Annotated[ + list[str] | None, + Field( + alias="Env", + description='A list of environment variables to set inside the container in the\nform `["VAR=value", ...]`. A variable without `=` is removed from the\nenvironment, rather than to have an empty value.\n', + examples=[ + ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + ], + ), + ] = None + cmd: Annotated[ + list[str] | None, + Field( + alias="Cmd", + description="Command to run specified as a string or an array of strings.\n", + examples=[["/bin/sh"]], + ), + ] = None + healthcheck: Annotated[HealthConfig | None, Field(alias="Healthcheck")] = None + args_escaped: Annotated[ + bool | None, + Field( + alias="ArgsEscaped", + description="Command is already escaped (Windows only)", + examples=[False], + ), + ] = False + image: Annotated[ + str | None, + Field( + alias="Image", + description="The name (or reference) of the image to use when creating the container,\nor which was used when the container was created.\n\n


\n\n> **Note**: this field is always empty and must not be used.\n", + examples=[""], + ), + ] = "" + volumes: Annotated[ + dict[str, dict[str, Any]] | None, + Field( + alias="Volumes", + description="An object mapping mount point paths inside the container to empty\nobjects.\n", + examples=[{"/app/data": {}, "/app/config": {}}], + ), + ] = None + working_dir: Annotated[ + str | None, + Field( + alias="WorkingDir", + description="The working directory for commands to run in.", + examples=["/public/"], + ), + ] = None + entrypoint: Annotated[ + list[str] | None, + Field( + alias="Entrypoint", + description='The entry point for the container as a string or an array of strings.\n\nIf the array consists of exactly one empty string (`[""]`) then the\nentry point is reset to system default (i.e., the entry point used by\ndocker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).\n', + examples=[[]], + ), + ] = None + network_disabled: Annotated[ + bool | None, + Field( + alias="NetworkDisabled", + description="Disable networking for the container.\n\n


\n\n> **Note**: this field is always omitted and must not be used.\n", + examples=[False], + ), + ] = False + mac_address: Annotated[ + str | None, + Field( + alias="MacAddress", + description="MAC address of the container.\n\n


\n\n> **Note**: this field is always omitted and must not be used.\n", + examples=[""], + ), + ] = "" + on_build: Annotated[ + list[str] | None, + Field( + alias="OnBuild", + description="`ONBUILD` metadata that were defined in the image's `Dockerfile`.\n", + examples=[[]], + ), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None + stop_signal: Annotated[ + str | None, + Field( + alias="StopSignal", + description="Signal to stop a container as a string or unsigned integer.\n", + examples=["SIGTERM"], + ), + ] = None + stop_timeout: Annotated[ + int | None, + Field( + alias="StopTimeout", + description="Timeout to stop a container in seconds.\n\n


\n\n> **Note**: this field is always omitted and must not be used.\n", + ), + ] = 10 + shell: Annotated[ + list[str] | None, + Field( + alias="Shell", + description="Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.\n", + examples=[["/bin/sh", "-c"]], + ), + ] = None class Address(BaseModel): @@ -691,95 +1299,120 @@ class Address(BaseModel): Address represents an IPv4 or IPv6 IP address. """ - Addr: Optional[str] = Field(None, description="IP address.") - PrefixLen: Optional[int] = Field(None, description="Mask length of the IP address.") - - -class PortMap(BaseModel): - """ - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - - """ - - pass - - class Config: - extra = Extra.allow + model_config = ConfigDict( + populate_by_name=True, + ) + addr: Annotated[str | None, Field(alias="Addr", description="IP address.")] = None + prefix_len: Annotated[ + int | None, + Field(alias="PrefixLen", description="Mask length of the IP address."), + ] = None class PortBinding(BaseModel): """ - PortBinding represents a binding between a host IP address and a host + PortBinding represents a binding between a host IP address and a host port. """ - HostIp: Optional[str] = Field( - None, - description="Host IP address that the container's port is mapped to.", - example="127.0.0.1", - ) - HostPort: Optional[str] = Field( - None, - description="Host port number that the container's port is mapped to.", - example="4443", + model_config = ConfigDict( + populate_by_name=True, ) + host_ip: Annotated[ + str | None, + Field( + alias="HostIp", + description="Host IP address that the container's port is mapped to.", + examples=["127.0.0.1"], + ), + ] = None + host_port: Annotated[ + str | None, + Field( + alias="HostPort", + description="Host port number that the container's port is mapped to.", + examples=["4443"], + ), + ] = None class GraphDriverData(BaseModel): """ - Information about the storage driver used to store the container's and + Information about the storage driver used to store the container's and image's filesystem. """ - Name: str = Field( - ..., description="Name of the storage driver.", example="overlay2" - ) - Data: dict[str, str] = Field( - ..., - description="Low-level storage metadata, provided as key/value pairs.\n\nThis information is driver-specific, and depends on the storage-driver\nin use, and should be used for informational purposes only.\n", - example={ - "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", - "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", - "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work", - }, - ) - - -class RootFS(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str, + Field( + alias="Name", + description="Name of the storage driver.", + examples=["overlay2"], + ), + ] + data: Annotated[ + dict[str, str], + Field( + alias="Data", + description="Low-level storage metadata, provided as key/value pairs.\n\nThis information is driver-specific, and depends on the storage-driver\nin use, and should be used for informational purposes only.\n", + examples=[ + { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work", + } + ], + ), + ] + + +class RootFs(BaseModel): """ Information about the image's RootFS, including the layer IDs. """ - Type: str = Field(..., example="layers") - Layers: Optional[list[str]] = Field( - None, - example=[ - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", - ], + model_config = ConfigDict( + populate_by_name=True, ) + type: Annotated[str, Field(alias="Type", examples=["layers"])] + layers: Annotated[ + list[str] | None, + Field( + alias="Layers", + examples=[ + [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + ] + ], + ), + ] = None class Metadata(BaseModel): """ - Additional metadata of the image in the local cache. This information + Additional metadata of the image in the local cache. This information is local to the daemon, and not part of the image itself. """ - LastTagTime: Optional[str] = Field( - None, - description="Date and time at which the image was last tagged in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n\nThis information is only available if the image was tagged locally,\nand omitted otherwise.\n", - example="2022-02-28T14:40:02.623929178Z", + model_config = ConfigDict( + populate_by_name=True, ) + last_tag_time: Annotated[ + str | None, + Field( + alias="LastTagTime", + description="Date and time at which the image was last tagged in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n\nThis information is only available if the image was tagged locally,\nand omitted otherwise.\n", + examples=["2022-02-28T14:40:02.623929178Z"], + ), + ] = None class ImageInspect(BaseModel): @@ -788,184 +1421,295 @@ class ImageInspect(BaseModel): """ - Id: Optional[str] = Field( - None, - description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n", - example="sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710", - ) - RepoTags: Optional[list[str]] = Field( - None, - description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same imagem and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n', - example=[ - "example:1.0", - "example:latest", - "example:stable", - "internal.registry.example.com:5000/example:1.0", - ], - ) - RepoDigests: Optional[list[str]] = Field( - None, - description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n", - example=[ - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", - ], - ) - Parent: Optional[str] = Field( - None, - description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n", - example="", - ) - Comment: Optional[str] = Field( - None, - description="Optional message that was set when committing or importing the image.\n", - example="", - ) - Created: Optional[str] = Field( - None, - description="Date and time at which the image was created, formatted in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2022-02-04T21:20:12.497794809Z", - ) - Container: Optional[str] = Field( - None, - description="The ID of the container that was used to create the image.\n\nDepending on how the image was created, this field may be empty.\n", - example="65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735", - ) - ContainerConfig: Optional[ContainerConfig] = None - DockerVersion: Optional[str] = Field( - None, - description="The version of Docker that was used to build the image.\n\nDepending on how the image was created, this field may be empty.\n", - example="20.10.7", - ) - Author: Optional[str] = Field( - None, - description="Name of the author that was specified when committing the image, or as\nspecified through MAINTAINER (deprecated) in the Dockerfile.\n", - example="", - ) - Config_: Optional[ContainerConfig] = Field(None, alias="Config") - Architecture: Optional[str] = Field( - None, - description="Hardware CPU architecture that the image runs on.\n", - example="arm", - ) - Variant: Optional[str] = Field( - None, - description="CPU architecture variant (presently ARM-only).\n", - example="v7", - ) - Os: Optional[str] = Field( - None, - description="Operating System the image is built to run on.\n", - example="linux", - ) - OsVersion: Optional[str] = Field( - None, - description="Operating System version the image is built to run on (especially\nfor Windows).\n", - example="", - ) - Size: Optional[int] = Field( - None, - description="Total size of the image including all layers it is composed of.\n", - example=1239828, - ) - VirtualSize: Optional[int] = Field( - None, - description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n", - example=1239828, - ) - GraphDriver: Optional[GraphDriverData] = None - RootFS: Optional[RootFS] = Field( - None, - description="Information about the image's RootFS, including the layer IDs.\n", - ) - Metadata: Optional[Metadata] = Field( - None, - description="Additional metadata of the image in the local cache. This information\nis local to the daemon, and not part of the image itself.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="Id", + description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n", + examples=[ + "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ], + ), + ] = None + repo_tags: Annotated[ + list[str] | None, + Field( + alias="RepoTags", + description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n', + examples=[ + [ + "example:1.0", + "example:latest", + "example:stable", + "internal.registry.example.com:5000/example:1.0", + ] + ], + ), + ] = None + repo_digests: Annotated[ + list[str] | None, + Field( + alias="RepoDigests", + description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n", + examples=[ + [ + "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + ] + ], + ), + ] = None + parent: Annotated[ + str | None, + Field( + alias="Parent", + description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n", + examples=[""], + ), + ] = None + comment: Annotated[ + str | None, + Field( + alias="Comment", + description="Optional message that was set when committing or importing the image.\n", + examples=[""], + ), + ] = None + created: Annotated[ + str | None, + Field( + alias="Created", + description="Date and time at which the image was created, formatted in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2022-02-04T21:20:12.497794809Z"], + ), + ] = None + container: Annotated[ + str | None, + Field( + alias="Container", + description="The ID of the container that was used to create the image.\n\nDepending on how the image was created, this field may be empty.\n", + examples=[ + "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" + ], + ), + ] = None + container_config: Annotated[ + ContainerConfig | None, Field(alias="ContainerConfig") + ] = None + docker_version: Annotated[ + str | None, + Field( + alias="DockerVersion", + description="The version of Docker that was used to build the image.\n\nDepending on how the image was created, this field may be empty.\n", + examples=["20.10.7"], + ), + ] = None + author: Annotated[ + str | None, + Field( + alias="Author", + description="Name of the author that was specified when committing the image, or as\nspecified through MAINTAINER (deprecated) in the Dockerfile.\n", + examples=[""], + ), + ] = None + config: Annotated[ImageConfig | None, Field(alias="Config")] = None + architecture: Annotated[ + str | None, + Field( + alias="Architecture", + description="Hardware CPU architecture that the image runs on.\n", + examples=["arm"], + ), + ] = None + variant: Annotated[ + str | None, + Field( + alias="Variant", + description="CPU architecture variant (presently ARM-only).\n", + examples=["v7"], + ), + ] = None + os: Annotated[ + str | None, + Field( + alias="Os", + description="Operating System the image is built to run on.\n", + examples=["linux"], + ), + ] = None + os_version: Annotated[ + str | None, + Field( + alias="OsVersion", + description="Operating System version the image is built to run on (especially\nfor Windows).\n", + examples=[""], + ), + ] = None + size: Annotated[ + int | None, + Field( + alias="Size", + description="Total size of the image including all layers it is composed of.\n", + examples=[1239828], + ), + ] = None + virtual_size: Annotated[ + int | None, + Field( + alias="VirtualSize", + description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n", + examples=[1239828], + ), + ] = None + graph_driver: Annotated[GraphDriverData | None, Field(alias="GraphDriver")] = None + root_fs: Annotated[ + RootFs | None, + Field( + alias="RootFS", + description="Information about the image's RootFS, including the layer IDs.\n", + ), + ] = None + metadata: Annotated[ + Metadata | None, + Field( + alias="Metadata", + description="Additional metadata of the image in the local cache. This information\nis local to the daemon, and not part of the image itself.\n", + ), + ] = None class ImageSummary(BaseModel): - Id: str = Field( - ..., - description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n", - example="sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710", - ) - ParentId: str = Field( - ..., - description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n", - example="", - ) - RepoTags: list[str] = Field( - ..., - description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same imagem and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n', - example=[ - "example:1.0", - "example:latest", - "example:stable", - "internal.registry.example.com:5000/example:1.0", - ], - ) - RepoDigests: list[str] = Field( - ..., - description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n", - example=[ - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", - ], - ) - Created: int = Field( - ..., - description="Date and time at which the image was created as a Unix timestamp\n(number of seconds sinds EPOCH).\n", - example="1644009612", - ) - Size: int = Field( - ..., - description="Total size of the image including all layers it is composed of.\n", - example=172064416, - ) - SharedSize: int = Field( - ..., - description="Total size of image layers that are shared between this image and other\nimages.\n\nThis size is not calculated by default. `-1` indicates that the value\nhas not been set / calculated.\n", - example=1239828, - ) - VirtualSize: int = Field( - ..., - description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n", - example=172064416, - ) - Labels: dict[str, str] = Field( - ..., - description="User-defined key/value metadata.", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) - Containers: int = Field( - ..., - description="Number of containers using this image. Includes both stopped and running\ncontainers.\n\nThis size is not calculated by default, and depends on which API endpoint\nis used. `-1` indicates that the value has not been set / calculated.\n", - example=2, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str, + Field( + alias="Id", + description="ID is the content-addressable ID of an image.\n\nThis identifier is a content-addressable digest calculated from the\nimage's configuration (which includes the digests of layers used by\nthe image).\n\nNote that this digest differs from the `RepoDigests` below, which\nholds digests of image manifests that reference the image.\n", + examples=[ + "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ], + ), + ] + parent_id: Annotated[ + str, + Field( + alias="ParentId", + description="ID of the parent image.\n\nDepending on how the image was created, this field may be empty and\nis only set for images that were built/created locally. This field\nis empty if the image was pulled from an image registry.\n", + examples=[""], + ), + ] + repo_tags: Annotated[ + list[str], + Field( + alias="RepoTags", + description='List of image names/tags in the local image cache that reference this\nimage.\n\nMultiple image tags can refer to the same image, and this list may be\nempty if no tags reference the image, in which case the image is\n"untagged", in which case it can still be referenced by its ID.\n', + examples=[ + [ + "example:1.0", + "example:latest", + "example:stable", + "internal.registry.example.com:5000/example:1.0", + ] + ], + ), + ] + repo_digests: Annotated[ + list[str], + Field( + alias="RepoDigests", + description="List of content-addressable digests of locally available image manifests\nthat the image is referenced from. Multiple manifests can refer to the\nsame image.\n\nThese digests are usually only available if the image was either pulled\nfrom a registry, or if the image was pushed to a registry, which is when\nthe manifest is generated and its digest calculated.\n", + examples=[ + [ + "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + ] + ], + ), + ] + created: Annotated[ + int, + Field( + alias="Created", + description="Date and time at which the image was created as a Unix timestamp\n(number of seconds since EPOCH).\n", + examples=["1644009612"], + ), + ] + size: Annotated[ + int, + Field( + alias="Size", + description="Total size of the image including all layers it is composed of.\n", + examples=[172064416], + ), + ] + shared_size: Annotated[ + int, + Field( + alias="SharedSize", + description="Total size of image layers that are shared between this image and other\nimages.\n\nThis size is not calculated by default. `-1` indicates that the value\nhas not been set / calculated.\n", + examples=[1239828], + ), + ] + virtual_size: Annotated[ + int, + Field( + alias="VirtualSize", + description="Total size of the image including all layers it is composed of.\n\nIn versions of Docker before v1.10, this field was calculated from\nthe image itself and all of its parent images. Docker v1.10 and up\nstore images self-contained, and no longer use a parent-chain, making\nthis field an equivalent of the Size field.\n\nThis field is kept for backward compatibility, but may be removed in\na future version of the API.\n", + examples=[172064416], + ), + ] + labels: Annotated[ + dict[str, str], + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] + containers: Annotated[ + int, + Field( + alias="Containers", + description="Number of containers using this image. Includes both stopped and running\ncontainers.\n\nThis size is not calculated by default, and depends on which API endpoint\nis used. `-1` indicates that the value has not been set / calculated.\n", + examples=[2], + ), + ] class AuthConfig(BaseModel): - username: Optional[str] = None - password: Optional[str] = None - email: Optional[str] = None - serveraddress: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + username: str | None = None + password: str | None = None + email: str | None = None + serveraddress: str | None = None class ProcessConfig(BaseModel): - privileged: Optional[bool] = None - user: Optional[str] = None - tty: Optional[bool] = None - entrypoint: Optional[str] = None - arguments: Optional[list[str]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + privileged: bool | None = None + user: str | None = None + tty: bool | None = None + entrypoint: str | None = None + arguments: list[str] | None = None class Scope(str, Enum): """ - The level at which the volume exists. Either `global` for cluster-wide, + The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. """ @@ -976,65 +1720,105 @@ class Scope(str, Enum): class UsageData(BaseModel): """ - Usage details about the volume. This information is used by the + Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. """ - Size: int = Field( - ..., - description='Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `"local"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` ("not available")\n', - ) - RefCount: int = Field( - ..., - description="The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n", + model_config = ConfigDict( + populate_by_name=True, ) + size: Annotated[ + int, + Field( + alias="Size", + description='Amount of disk space used by the volume (in bytes). This information\nis only available for volumes created with the `"local"` volume\ndriver. For volumes created with other volume drivers, this field\nis set to `-1` ("not available")\n', + ), + ] + ref_count: Annotated[ + int, + Field( + alias="RefCount", + description="The number of containers referencing this volume. This field\nis set to `-1` if the reference-count is not available.\n", + ), + ] class Volume(BaseModel): - Name: str = Field(..., description="Name of the volume.", example="tardis") - Driver: str = Field( - ..., - description="Name of the volume driver used by the volume.", - example="custom", - ) - Mountpoint: str = Field( - ..., - description="Mount path of the volume on the host.", - example="/var/lib/docker/volumes/tardis", - ) - CreatedAt: Optional[str] = Field( - None, - description="Date/Time the volume was created.", - example="2016-06-07T20:31:11.853781916Z", - ) - Status: Optional[dict[str, dict[str, Any]]] = Field( - None, - description='Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{"key":"value","key2":"value2"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n', - example={"hello": "world"}, - ) - Labels: dict[str, str] = Field( - ..., - description="User-defined key/value metadata.", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) - Scope: Scope = Field( - ..., - description="The level at which the volume exists. Either `global` for cluster-wide,\nor `local` for machine level.\n", - example="local", - ) - Options: dict[str, str] = Field( - ..., - description="The driver specific options used when creating the volume.\n", - example={"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}, - ) - UsageData: Optional[UsageData] = Field( - None, - description="Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str, Field(alias="Name", description="Name of the volume.", examples=["tardis"]) + ] + driver: Annotated[ + str, + Field( + alias="Driver", + description="Name of the volume driver used by the volume.", + examples=["custom"], + ), + ] + mountpoint: Annotated[ + str, + Field( + alias="Mountpoint", + description="Mount path of the volume on the host.", + examples=["/var/lib/docker/volumes/tardis"], + ), + ] + created_at: Annotated[ + str | None, + Field( + alias="CreatedAt", + description="Date/Time the volume was created.", + examples=["2016-06-07T20:31:11.853781916Z"], + ), + ] = None + status: Annotated[ + dict[str, dict[str, Any]] | None, + Field( + alias="Status", + description='Low-level details about the volume, provided by the volume driver.\nDetails are returned as a map with key/value pairs:\n`{"key":"value","key2":"value2"}`.\n\nThe `Status` field is optional, and is omitted if the volume driver\ndoes not support this feature.\n', + examples=[{"hello": "world"}], + ), + ] = None + labels: Annotated[ + dict[str, str], + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] + scope: Annotated[ + Scope, + Field( + alias="Scope", + description="The level at which the volume exists. Either `global` for cluster-wide,\nor `local` for machine level.\n", + examples=["local"], + ), + ] + options: Annotated[ + dict[str, str], + Field( + alias="Options", + description="The driver specific options used when creating the volume.\n", + examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}], + ), + ] + usage_data: Annotated[ + UsageData | None, + Field( + alias="UsageData", + description="Usage details about the volume. This information is used by the\n`GET /system/df` endpoint, and omitted in other endpoints.\n", + ), + ] = None class VolumeConfig(BaseModel): @@ -1042,42 +1826,157 @@ class VolumeConfig(BaseModel): Volume configuration """ - Name: Optional[str] = Field( - None, - description="The new volume's name. If not specified, Docker generates a name.\n", - example="tardis", - ) - Driver: Optional[str] = Field( - "local", description="Name of the volume driver to use.", example="custom" - ) - DriverOpts: Optional[dict[str, str]] = Field( - None, - description="A mapping of driver options and values. These options are\npassed directly to the driver and are driver specific.\n", - example={"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}, - ) - Labels: Optional[dict[str, str]] = Field( - None, - description="User-defined key/value metadata.", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field( + alias="Name", + description="The new volume's name. If not specified, Docker generates a name.\n", + examples=["tardis"], + ), + ] = None + driver: Annotated[ + str | None, + Field( + alias="Driver", + description="Name of the volume driver to use.", + examples=["custom"], + ), + ] = "local" + driver_opts: Annotated[ + dict[str, str] | None, + Field( + alias="DriverOpts", + description="A mapping of driver options and values. These options are\npassed directly to the driver and are driver specific.\n", + examples=[{"device": "tmpfs", "o": "size=100m,uid=1000", "type": "tmpfs"}], + ), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None + + +class VolumeListResponse(BaseModel): + """ + Volume list response + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + volumes: Annotated[ + list[Volume] | None, Field(alias="Volumes", description="List of volumes") + ] = None + warnings: Annotated[ + list[str] | None, + Field( + alias="Warnings", + description="Warnings that occurred when fetching the list of volumes.\n", + examples=[[]], + ), + ] = None + + +class ConfigReference(BaseModel): + """ + The config-only network source to provide the configuration for + this network. + + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + network: Annotated[ + str | None, + Field( + alias="Network", + description="The name of the config-only network that provides the network's\nconfiguration. The specified network must be an existing config-only\nnetwork. Only network names are allowed, not network IDs.\n", + examples=["config_only_network_01"], + ), + ] = None class IPAMConfig(BaseModel): - Subnet: Optional[str] = None - IPRange: Optional[str] = None - Gateway: Optional[str] = None - AuxiliaryAddresses: Optional[dict[str, str]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + subnet: Annotated[ + str | None, Field(alias="Subnet", examples=["172.20.0.0/16"]) + ] = None + ip_range: Annotated[ + str | None, Field(alias="IPRange", examples=["172.20.10.0/24"]) + ] = None + gateway: Annotated[ + str | None, Field(alias="Gateway", examples=["172.20.10.11"]) + ] = None + auxiliary_addresses: Annotated[ + dict[str, str] | None, Field(alias="AuxiliaryAddresses") + ] = None class NetworkContainer(BaseModel): - Name: Optional[str] = None - EndpointID: Optional[str] = None - MacAddress: Optional[str] = None - IPv4Address: Optional[str] = None - IPv6Address: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str | None, Field(alias="Name", examples=["container_1"])] = None + endpoint_id: Annotated[ + str | None, + Field( + alias="EndpointID", + examples=[ + "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + ], + ), + ] = None + mac_address: Annotated[ + str | None, Field(alias="MacAddress", examples=["02:42:ac:13:00:02"]) + ] = None + i_pv4_address: Annotated[ + str | None, Field(alias="IPv4Address", examples=["172.19.0.2/16"]) + ] = None + i_pv6_address: Annotated[ + str | None, Field(alias="IPv6Address", examples=[""]) + ] = None + + +class PeerInfo(BaseModel): + """ + PeerInfo represents one peer of an overlay network. + + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field( + alias="Name", + description="ID of the peer-node in the Swarm cluster.", + examples=["6869d7c1732b"], + ), + ] = None + ip: Annotated[ + str | None, + Field( + alias="IP", + description="IP-address of the peer-node in the Swarm cluster.", + examples=["10.133.77.91"], + ), + ] = None class Type4(str, Enum): @@ -1100,46 +1999,80 @@ class BuildCache(BaseModel): """ - ID: Optional[str] = Field( - None, - description="Unique ID of the build cache record.\n", - example="ndlpt0hhvkqcdfkputsk4cq9c", - ) - Parent: Optional[str] = Field( - None, - description="ID of the parent build cache record.\n", - example="hw53o5aio51xtltp5xjp8v7fx", - ) - Type: Optional[Type4] = Field( - None, description="Cache record type.\n", example="regular" - ) - Description: Optional[str] = Field( - None, - description="Description of the build-step that produced the build cache.\n", - example="mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache", - ) - InUse: Optional[bool] = Field( - None, description="Indicates if the build cache is in use.\n", example=False - ) - Shared: Optional[bool] = Field( - None, description="Indicates if the build cache is shared.\n", example=True - ) - Size: Optional[int] = Field( - None, - description="Amount of disk space used by the build cache (in bytes).\n", - example=51, - ) - CreatedAt: Optional[str] = Field( - None, - description="Date and time at which the build cache was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2016-08-18T10:44:24.496525531Z", - ) - LastUsedAt: Optional[str] = Field( - None, - description="Date and time at which the build cache was last used in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2017-08-09T07:09:37.632105588Z", - ) - UsageCount: Optional[int] = Field(None, example=26) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="ID", + description="Unique ID of the build cache record.\n", + examples=["ndlpt0hhvkqcdfkputsk4cq9c"], + ), + ] = None + parent: Annotated[ + str | None, + Field( + alias="Parent", + description="ID of the parent build cache record.\n", + examples=["hw53o5aio51xtltp5xjp8v7fx"], + ), + ] = None + type: Annotated[ + Type4 | None, + Field(alias="Type", description="Cache record type.\n", examples=["regular"]), + ] = None + description: Annotated[ + str | None, + Field( + alias="Description", + description="Description of the build-step that produced the build cache.\n", + examples=[ + "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + ], + ), + ] = None + in_use: Annotated[ + bool | None, + Field( + alias="InUse", + description="Indicates if the build cache is in use.\n", + examples=[False], + ), + ] = None + shared: Annotated[ + bool | None, + Field( + alias="Shared", + description="Indicates if the build cache is shared.\n", + examples=[True], + ), + ] = None + size: Annotated[ + int | None, + Field( + alias="Size", + description="Amount of disk space used by the build cache (in bytes).\n", + examples=[51], + ), + ] = None + created_at: Annotated[ + str | None, + Field( + alias="CreatedAt", + description="Date and time at which the build cache was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2016-08-18T10:44:24.496525531Z"], + ), + ] = None + last_used_at: Annotated[ + str | None, + Field( + alias="LastUsedAt", + description="Date and time at which the build cache was last used in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2017-08-09T07:09:37.632105588Z"], + ), + ] = None + usage_count: Annotated[int | None, Field(alias="UsageCount", examples=[26])] = None class ImageID(BaseModel): @@ -1147,17 +2080,26 @@ class ImageID(BaseModel): Image ID or Digest """ - ID: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[str | None, Field(alias="ID")] = None class ErrorDetail(BaseModel): - code: Optional[int] = None - message: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + code: int | None = None + message: str | None = None class ProgressDetail(BaseModel): - current: Optional[int] = None - total: Optional[int] = None + model_config = ConfigDict( + populate_by_name=True, + ) + current: int | None = None + total: int | None = None class ErrorResponse(BaseModel): @@ -1165,7 +2107,10 @@ class ErrorResponse(BaseModel): Represents an error. """ - message: str = Field(..., description="The error message.") + model_config = ConfigDict( + populate_by_name=True, + ) + message: Annotated[str, Field(description="The error message.")] class IdResponse(BaseModel): @@ -1173,7 +2118,12 @@ class IdResponse(BaseModel): Response to an API call that returns just an Id """ - Id: str = Field(..., description="The id of the newly created object.") + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str, Field(alias="Id", description="The id of the newly created object.") + ] class EndpointIPAMConfig(BaseModel): @@ -1182,53 +2132,81 @@ class EndpointIPAMConfig(BaseModel): """ - IPv4Address: Optional[str] = Field(None, example="172.20.30.33") - IPv6Address: Optional[str] = Field(None, example="2001:db8:abcd::3033") - LinkLocalIPs: Optional[list[str]] = Field( - None, example=["169.254.34.68", "fe80::3468"] + model_config = ConfigDict( + populate_by_name=True, ) + i_pv4_address: Annotated[ + str | None, Field(alias="IPv4Address", examples=["172.20.30.33"]) + ] = None + i_pv6_address: Annotated[ + str | None, Field(alias="IPv6Address", examples=["2001:db8:abcd::3033"]) + ] = None + link_local_i_ps: Annotated[ + list[str] | None, + Field(alias="LinkLocalIPs", examples=[["169.254.34.68", "fe80::3468"]]), + ] = None class PluginMount(BaseModel): - Name: str = Field(..., example="some-mount") - Description: str = Field(..., example="This is a mount that's used by the plugin.") - Settable: list[str] - Source: str = Field(..., example="/var/lib/docker/plugins/") - Destination: str = Field(..., example="/mnt/state") - Type: str = Field(..., example="bind") - Options: list[str] = Field(..., example=["rbind", "rw"]) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str, Field(alias="Name", examples=["some-mount"])] + description: Annotated[ + str, + Field( + alias="Description", examples=["This is a mount that's used by the plugin."] + ), + ] + settable: Annotated[list[str], Field(alias="Settable")] + source: Annotated[str, Field(alias="Source", examples=["/var/lib/docker/plugins/"])] + destination: Annotated[str, Field(alias="Destination", examples=["/mnt/state"])] + type: Annotated[str, Field(alias="Type", examples=["bind"])] + options: Annotated[list[str], Field(alias="Options", examples=[["rbind", "rw"]])] class PluginDevice(BaseModel): - Name: str - Description: str - Settable: list[str] - Path: str = Field(..., example="/dev/fuse") + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str, Field(alias="Name")] + description: Annotated[str, Field(alias="Description")] + settable: Annotated[list[str], Field(alias="Settable")] + path: Annotated[str, Field(alias="Path", examples=["/dev/fuse"])] class PluginEnv(BaseModel): - Name: str - Description: str - Settable: list[str] - Value: str + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str, Field(alias="Name")] + description: Annotated[str, Field(alias="Description")] + settable: Annotated[list[str], Field(alias="Settable")] + value: Annotated[str, Field(alias="Value")] class PluginInterfaceType(BaseModel): - Prefix: str - Capability: str - Version: str + model_config = ConfigDict( + populate_by_name=True, + ) + prefix: Annotated[str, Field(alias="Prefix")] + capability: Annotated[str, Field(alias="Capability")] + version: Annotated[str, Field(alias="Version")] class PluginPrivilege(BaseModel): """ - Describes a permission the user has to accept upon installing + Describes a permission the user has to accept upon installing the plugin. """ - Name: Optional[str] = Field(None, example="network") - Description: Optional[str] = None - Value: Optional[list[str]] = Field(None, example=["host"]) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str | None, Field(alias="Name", examples=["network"])] = None + description: Annotated[str | None, Field(alias="Description")] = None + value: Annotated[list[str] | None, Field(alias="Value", examples=[["host"]])] = None class Settings(BaseModel): @@ -1236,10 +2214,13 @@ class Settings(BaseModel): Settings that can be modified by users. """ - Mounts: list[PluginMount] - Env: list[str] = Field(..., example=["DEBUG=0"]) - Args: list[str] - Devices: list[PluginDevice] + model_config = ConfigDict( + populate_by_name=True, + ) + mounts: Annotated[list[PluginMount], Field(alias="Mounts")] + env: Annotated[list[str], Field(alias="Env", examples=[["DEBUG=0"]])] + args: Annotated[list[str], Field(alias="Args")] + devices: Annotated[list[PluginDevice], Field(alias="Devices")] class ProtocolScheme(str, Enum): @@ -1247,7 +2228,7 @@ class ProtocolScheme(str, Enum): Protocol to use for clients connecting to the plugin. """ - _ = "" + field_ = "" moby_plugins_http_v1 = "moby.plugins.http/v1" @@ -1256,46 +2237,79 @@ class Interface(BaseModel): The interface between Docker and the plugin """ - Types: list[PluginInterfaceType] = Field(..., example=["docker.volumedriver/1.0"]) - Socket: str = Field(..., example="plugins.sock") - ProtocolScheme: Optional[ProtocolScheme] = Field( - None, - description="Protocol to use for clients connecting to the plugin.", - example="some.protocol/v1.0", + model_config = ConfigDict( + populate_by_name=True, ) + types: Annotated[ + list[PluginInterfaceType], + Field(alias="Types", examples=[["docker.volumedriver/1.0"]]), + ] + socket: Annotated[str, Field(alias="Socket", examples=["plugins.sock"])] + protocol_scheme: Annotated[ + ProtocolScheme | None, + Field( + alias="ProtocolScheme", + description="Protocol to use for clients connecting to the plugin.", + examples=["some.protocol/v1.0"], + ), + ] = None class User(BaseModel): - UID: Optional[int] = Field(None, example=1000) - GID: Optional[int] = Field(None, example=1000) + model_config = ConfigDict( + populate_by_name=True, + ) + uid: Annotated[int | None, Field(alias="UID", examples=[1000])] = None + gid: Annotated[int | None, Field(alias="GID", examples=[1000])] = None class Network1(BaseModel): - Type: str = Field(..., example="host") + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[str, Field(alias="Type", examples=["host"])] class Linux(BaseModel): - Capabilities: list[str] = Field(..., example=["CAP_SYS_ADMIN", "CAP_SYSLOG"]) - AllowAllDevices: bool = Field(..., example=False) - Devices: list[PluginDevice] + model_config = ConfigDict( + populate_by_name=True, + ) + capabilities: Annotated[ + list[str], + Field(alias="Capabilities", examples=[["CAP_SYS_ADMIN", "CAP_SYSLOG"]]), + ] + allow_all_devices: Annotated[bool, Field(alias="AllowAllDevices", examples=[False])] + devices: Annotated[list[PluginDevice], Field(alias="Devices")] class Args(BaseModel): - Name: str = Field(..., example="args") - Description: str = Field(..., example="command line arguments") - Settable: list[str] - Value: list[str] + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str, Field(alias="Name", examples=["args"])] + description: Annotated[ + str, Field(alias="Description", examples=["command line arguments"]) + ] + settable: Annotated[list[str], Field(alias="Settable")] + value: Annotated[list[str], Field(alias="Value")] class Rootfs(BaseModel): - type: Optional[str] = Field(None, example="layers") - diff_ids: Optional[list[str]] = Field( - None, - example=[ - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887", - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8", - ], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[str | None, Field(examples=["layers"])] = None + diff_ids: Annotated[ + list[str] | None, + Field( + examples=[ + [ + "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887", + "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8", + ] + ] + ), + ] = None class Config(BaseModel): @@ -1303,40 +2317,67 @@ class Config(BaseModel): The config of a plugin. """ - DockerVersion: Optional[str] = Field( - None, - description="Docker Version used to create the plugin", - example="17.06.0-ce", - ) - Description: str = Field(..., example="A sample volume plugin for Docker") - Documentation: str = Field(..., example="/engine/extend/plugins/") - Interface: Interface = Field( - ..., description="The interface between Docker and the plugin" - ) - Entrypoint: list[str] = Field( - ..., example=["/usr/bin/sample-volume-plugin", "/data"] - ) - WorkDir: str = Field(..., example="/bin/") - User: Optional[User] = None - Network: Network1 - Linux: Linux - PropagatedMount: str = Field(..., example="/mnt/volumes") - IpcHost: bool = Field(..., example=False) - PidHost: bool = Field(..., example=False) - Mounts: list[PluginMount] - Env: list[PluginEnv] = Field( - ..., - example=[ - { - "Name": "DEBUG", - "Description": "If set, prints debug messages", - "Settable": None, - "Value": "0", - } - ], - ) - Args: Args - rootfs: Optional[Rootfs] = None + model_config = ConfigDict( + populate_by_name=True, + ) + docker_version: Annotated[ + str | None, + Field( + alias="DockerVersion", + description="Docker Version used to create the plugin", + examples=["17.06.0-ce"], + ), + ] = None + description: Annotated[ + str, Field(alias="Description", examples=["A sample volume plugin for Docker"]) + ] + documentation: Annotated[ + str, + Field( + alias="Documentation", + examples=["https://docs.docker.com/engine/extend/plugins/"], + ), + ] + interface: Annotated[ + Interface, + Field( + alias="Interface", description="The interface between Docker and the plugin" + ), + ] + entrypoint: Annotated[ + list[str], + Field( + alias="Entrypoint", examples=[["/usr/bin/sample-volume-plugin", "/data"]] + ), + ] + work_dir: Annotated[str, Field(alias="WorkDir", examples=["/bin/"])] + user: Annotated[User | None, Field(alias="User")] = None + network: Annotated[Network1, Field(alias="Network")] + linux: Annotated[Linux, Field(alias="Linux")] + propagated_mount: Annotated[ + str, Field(alias="PropagatedMount", examples=["/mnt/volumes"]) + ] + ipc_host: Annotated[bool, Field(alias="IpcHost", examples=[False])] + pid_host: Annotated[bool, Field(alias="PidHost", examples=[False])] + mounts: Annotated[list[PluginMount], Field(alias="Mounts")] + env: Annotated[ + list[PluginEnv], + Field( + alias="Env", + examples=[ + [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": None, + "Value": "0", + } + ] + ], + ), + ] + args: Annotated[Args, Field(alias="Args")] + rootfs: Rootfs | None = None class Plugin(BaseModel): @@ -1344,29 +2385,49 @@ class Plugin(BaseModel): A plugin for the Engine API """ - Id: Optional[str] = Field( - None, example="5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - ) - Name: str = Field(..., example="tiborvass/sample-volume-plugin") - Enabled: bool = Field( - ..., - description="True if the plugin is running. False if the plugin is not running, only installed.", - example=True, - ) - Settings: Settings = Field( - ..., description="Settings that can be modified by users." - ) - PluginReference: Optional[str] = Field( - None, - description="plugin remote reference used to push/pull the plugin", - example="localhost:5000/tiborvass/sample-volume-plugin:latest", - ) - Config_: Config = Field(..., alias="Config", description="The config of a plugin.") + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="Id", + examples=[ + "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + ], + ), + ] = None + name: Annotated[ + str, Field(alias="Name", examples=["tiborvass/sample-volume-plugin"]) + ] + enabled: Annotated[ + bool, + Field( + alias="Enabled", + description="True if the plugin is running. False if the plugin is not running, only installed.", + examples=[True], + ), + ] + settings: Annotated[ + Settings, + Field(alias="Settings", description="Settings that can be modified by users."), + ] + plugin_reference: Annotated[ + str | None, + Field( + alias="PluginReference", + description="plugin remote reference used to push/pull the plugin", + examples=["localhost:5000/tiborvass/sample-volume-plugin:latest"], + ), + ] = None + config: Annotated[ + Config, Field(alias="Config", description="The config of a plugin.") + ] class ObjectVersion(BaseModel): """ - The version number of the object such as node, service, etc. This is needed + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. @@ -1379,7 +2440,10 @@ class ObjectVersion(BaseModel): """ - Index: Optional[int] = Field(None, example=373531) + model_config = ConfigDict( + populate_by_name=True, + ) + index: Annotated[int | None, Field(alias="Index", examples=[373531])] = None class Role(str, Enum): @@ -1402,18 +2466,29 @@ class Availability(str, Enum): class NodeSpec(BaseModel): - Name: Optional[str] = Field( - None, description="Name for the node.", example="my-node" - ) - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - Role: Optional[Role] = Field( - None, description="Role of the node.", example="manager" - ) - Availability: Optional[Availability] = Field( - None, description="Availability of the node.", example="active" - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field(alias="Name", description="Name for the node.", examples=["my-node"]), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + role: Annotated[ + Role | None, + Field(alias="Role", description="Role of the node.", examples=["manager"]), + ] = None + availability: Annotated[ + Availability | None, + Field( + alias="Availability", + description="Availability of the node.", + examples=["active"], + ), + ] = None class Platform(BaseModel): @@ -1422,21 +2497,33 @@ class Platform(BaseModel): """ - Architecture: Optional[str] = Field( - None, - description="Architecture represents the hardware architecture (for example,\n`x86_64`).\n", - example="x86_64", - ) - OS: Optional[str] = Field( - None, - description="OS represents the Operating System (for example, `linux` or `windows`).\n", - example="linux", + model_config = ConfigDict( + populate_by_name=True, ) + architecture: Annotated[ + str | None, + Field( + alias="Architecture", + description="Architecture represents the hardware architecture (for example,\n`x86_64`).\n", + examples=["x86_64"], + ), + ] = None + os: Annotated[ + str | None, + Field( + alias="OS", + description="OS represents the Operating System (for example, `linux` or `windows`).\n", + examples=["linux"], + ), + ] = None class Plugin1(BaseModel): - Type: Optional[str] = None - Name: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[str | None, Field(alias="Type")] = None + name: Annotated[str | None, Field(alias="Name")] = None class EngineDescription(BaseModel): @@ -1444,51 +2531,75 @@ class EngineDescription(BaseModel): EngineDescription provides information about an engine. """ - EngineVersion: Optional[str] = Field(None, example="17.06.0") - Labels: Optional[dict[str, str]] = Field(None, example={"foo": "bar"}) - Plugins: Optional[list[Plugin1]] = Field( - None, - example=[ - {"Type": "Log", "Name": "awslogs"}, - {"Type": "Log", "Name": "fluentd"}, - {"Type": "Log", "Name": "gcplogs"}, - {"Type": "Log", "Name": "gelf"}, - {"Type": "Log", "Name": "journald"}, - {"Type": "Log", "Name": "json-file"}, - {"Type": "Log", "Name": "logentries"}, - {"Type": "Log", "Name": "splunk"}, - {"Type": "Log", "Name": "syslog"}, - {"Type": "Network", "Name": "bridge"}, - {"Type": "Network", "Name": "host"}, - {"Type": "Network", "Name": "ipvlan"}, - {"Type": "Network", "Name": "macvlan"}, - {"Type": "Network", "Name": "null"}, - {"Type": "Network", "Name": "overlay"}, - {"Type": "Volume", "Name": "local"}, - {"Type": "Volume", "Name": "localhost:5000/vieux/sshfs:latest"}, - {"Type": "Volume", "Name": "vieux/sshfs:latest"}, - ], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + engine_version: Annotated[ + str | None, Field(alias="EngineVersion", examples=["17.06.0"]) + ] = None + labels: Annotated[ + dict[str, str] | None, Field(alias="Labels", examples=[{"foo": "bar"}]) + ] = None + plugins: Annotated[ + list[Plugin1] | None, + Field( + alias="Plugins", + examples=[ + [ + {"Type": "Log", "Name": "awslogs"}, + {"Type": "Log", "Name": "fluentd"}, + {"Type": "Log", "Name": "gcplogs"}, + {"Type": "Log", "Name": "gelf"}, + {"Type": "Log", "Name": "journald"}, + {"Type": "Log", "Name": "json-file"}, + {"Type": "Log", "Name": "splunk"}, + {"Type": "Log", "Name": "syslog"}, + {"Type": "Network", "Name": "bridge"}, + {"Type": "Network", "Name": "host"}, + {"Type": "Network", "Name": "ipvlan"}, + {"Type": "Network", "Name": "macvlan"}, + {"Type": "Network", "Name": "null"}, + {"Type": "Network", "Name": "overlay"}, + {"Type": "Volume", "Name": "local"}, + {"Type": "Volume", "Name": "localhost:5000/vieux/sshfs:latest"}, + {"Type": "Volume", "Name": "vieux/sshfs:latest"}, + ] + ], + ), + ] = None class TLSInfo(BaseModel): """ - Information about the issuer of leaf TLS certificates and the trusted root + Information about the issuer of leaf TLS certificates and the trusted root CA certificate. """ - TrustRoot: Optional[str] = Field( - None, - description="The root CA certificate(s) that are used to validate leaf TLS\ncertificates.\n", - ) - CertIssuerSubject: Optional[str] = Field( - None, description="The base64-url-safe-encoded raw subject bytes of the issuer." - ) - CertIssuerPublicKey: Optional[str] = Field( - None, - description="The base64-url-safe-encoded raw public key bytes of the issuer.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + trust_root: Annotated[ + str | None, + Field( + alias="TrustRoot", + description="The root CA certificate(s) that are used to validate leaf TLS\ncertificates.\n", + ), + ] = None + cert_issuer_subject: Annotated[ + str | None, + Field( + alias="CertIssuerSubject", + description="The base64-url-safe-encoded raw subject bytes of the issuer.", + ), + ] = None + cert_issuer_public_key: Annotated[ + str | None, + Field( + alias="CertIssuerPublicKey", + description="The base64-url-safe-encoded raw public key bytes of the issuer.\n", + ), + ] = None class NodeState(str, Enum): @@ -1517,11 +2628,17 @@ class Orchestration(BaseModel): Orchestration configuration. """ - TaskHistoryRetentionLimit: Optional[int] = Field( - None, - description="The number of historic tasks to keep per instance or node. If\nnegative, never remove completed or failed tasks.\n", - example=10, + model_config = ConfigDict( + populate_by_name=True, ) + task_history_retention_limit: Annotated[ + int | None, + Field( + alias="TaskHistoryRetentionLimit", + description="The number of historic tasks to keep per instance or node. If\nnegative, never remove completed or failed tasks.\n", + examples=[10], + ), + ] = None class Raft(BaseModel): @@ -1529,28 +2646,48 @@ class Raft(BaseModel): Raft configuration. """ - SnapshotInterval: Optional[int] = Field( - None, description="The number of log entries between snapshots.", example=10000 - ) - KeepOldSnapshots: Optional[int] = Field( - None, - description="The number of snapshots to keep beyond the current snapshot.\n", - ) - LogEntriesForSlowFollowers: Optional[int] = Field( - None, - description="The number of log entries to keep around to sync up slow followers\nafter a snapshot is created.\n", - example=500, - ) - ElectionTick: Optional[int] = Field( - None, - description="The number of ticks that a follower will wait for a message from\nthe leader before becoming a candidate and starting an election.\n`ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n", - example=3, - ) - HeartbeatTick: Optional[int] = Field( - None, - description="The number of ticks between heartbeats. Every HeartbeatTick ticks,\nthe leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n", - example=1, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + snapshot_interval: Annotated[ + int | None, + Field( + alias="SnapshotInterval", + description="The number of log entries between snapshots.", + examples=[10000], + ), + ] = None + keep_old_snapshots: Annotated[ + int | None, + Field( + alias="KeepOldSnapshots", + description="The number of snapshots to keep beyond the current snapshot.\n", + ), + ] = None + log_entries_for_slow_followers: Annotated[ + int | None, + Field( + alias="LogEntriesForSlowFollowers", + description="The number of log entries to keep around to sync up slow followers\nafter a snapshot is created.\n", + examples=[500], + ), + ] = None + election_tick: Annotated[ + int | None, + Field( + alias="ElectionTick", + description="The number of ticks that a follower will wait for a message from\nthe leader before becoming a candidate and starting an election.\n`ElectionTick` must be greater than `HeartbeatTick`.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n", + examples=[3], + ), + ] = None + heartbeat_tick: Annotated[ + int | None, + Field( + alias="HeartbeatTick", + description="The number of ticks between heartbeats. Every HeartbeatTick ticks,\nthe leader will send a heartbeat to the followers.\n\nA tick currently defaults to one second, so these translate\ndirectly to seconds currently, but this is NOT guaranteed.\n", + examples=[1], + ), + ] = None class Dispatcher(BaseModel): @@ -1558,16 +2695,22 @@ class Dispatcher(BaseModel): Dispatcher configuration. """ - HeartbeatPeriod: Optional[int] = Field( - None, - description="The delay for an agent to send a heartbeat to the dispatcher.\n", - example=5000000000, + model_config = ConfigDict( + populate_by_name=True, ) + heartbeat_period: Annotated[ + int | None, + Field( + alias="HeartbeatPeriod", + description="The delay for an agent to send a heartbeat to the dispatcher.\n", + examples=[5000000000], + ), + ] = None class Protocol(str, Enum): """ - Protocol for communication with the external CA (currently + Protocol for communication with the external CA (currently only `cfssl` is supported). """ @@ -1576,49 +2719,83 @@ class Protocol(str, Enum): class ExternalCA(BaseModel): - Protocol: Optional[Protocol] = Field( - Protocol.cfssl, - description="Protocol for communication with the external CA (currently\nonly `cfssl` is supported).\n", - ) - URL: Optional[str] = Field( - None, description="URL where certificate signing requests should be sent.\n" - ) - Options: Optional[dict[str, str]] = Field( - None, - description="An object with key/value pairs that are interpreted as\nprotocol-specific options for the external CA driver.\n", - ) - CACert: Optional[str] = Field( - None, - description="The root CA certificate (in PEM format) this external CA uses\nto issue TLS certificates (assumed to be to the current swarm\nroot CA certificate if not provided).\n", - ) - - -class CAConfig(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + protocol: Annotated[ + Protocol | None, + Field( + alias="Protocol", + description="Protocol for communication with the external CA (currently\nonly `cfssl` is supported).\n", + ), + ] = Protocol.cfssl + url: Annotated[ + str | None, + Field( + alias="URL", + description="URL where certificate signing requests should be sent.\n", + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="An object with key/value pairs that are interpreted as\nprotocol-specific options for the external CA driver.\n", + ), + ] = None + ca_cert: Annotated[ + str | None, + Field( + alias="CACert", + description="The root CA certificate (in PEM format) this external CA uses\nto issue TLS certificates (assumed to be to the current swarm\nroot CA certificate if not provided).\n", + ), + ] = None + + +class CaConfig(BaseModel): """ CA configuration. """ - NodeCertExpiry: Optional[int] = Field( - None, - description="The duration node certificates are issued for.", - example=7776000000000000, - ) - ExternalCAs: Optional[list[ExternalCA]] = Field( - None, - description="Configuration for forwarding signing requests to an external\ncertificate authority.\n", - ) - SigningCACert: Optional[str] = Field( - None, - description="The desired signing CA certificate for all swarm node TLS leaf\ncertificates, in PEM format.\n", - ) - SigningCAKey: Optional[str] = Field( - None, - description="The desired signing CA key for all swarm node TLS leaf certificates,\nin PEM format.\n", - ) - ForceRotate: Optional[int] = Field( - None, - description="An integer whose purpose is to force swarm to generate a new\nsigning CA certificate and key, if none have been specified in\n`SigningCACert` and `SigningCAKey`\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + node_cert_expiry: Annotated[ + int | None, + Field( + alias="NodeCertExpiry", + description="The duration node certificates are issued for.", + examples=[7776000000000000], + ), + ] = None + external_c_as: Annotated[ + list[ExternalCA] | None, + Field( + alias="ExternalCAs", + description="Configuration for forwarding signing requests to an external\ncertificate authority.\n", + ), + ] = None + signing_ca_cert: Annotated[ + str | None, + Field( + alias="SigningCACert", + description="The desired signing CA certificate for all swarm node TLS leaf\ncertificates, in PEM format.\n", + ), + ] = None + signing_ca_key: Annotated[ + str | None, + Field( + alias="SigningCAKey", + description="The desired signing CA key for all swarm node TLS leaf certificates,\nin PEM format.\n", + ), + ] = None + force_rotate: Annotated[ + int | None, + Field( + alias="ForceRotate", + description="An integer whose purpose is to force swarm to generate a new\nsigning CA certificate and key, if none have been specified in\n`SigningCACert` and `SigningCAKey`\n", + ), + ] = None class EncryptionConfig(BaseModel): @@ -1626,16 +2803,22 @@ class EncryptionConfig(BaseModel): Parameters related to encryption-at-rest. """ - AutoLockManagers: Optional[bool] = Field( - None, - description="If set, generate a key and use it to lock data stored on the\nmanagers.\n", - example=False, + model_config = ConfigDict( + populate_by_name=True, ) + auto_lock_managers: Annotated[ + bool | None, + Field( + alias="AutoLockManagers", + description="If set, generate a key and use it to lock data stored on the\nmanagers.\n", + examples=[False], + ), + ] = None class LogDriver(BaseModel): """ - The log driver to use for tasks created in the orchestrator if + The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue @@ -1643,16 +2826,25 @@ class LogDriver(BaseModel): """ - Name: Optional[str] = Field( - None, - description="The log driver to use as a default for new tasks.\n", - example="json-file", - ) - Options: Optional[dict[str, str]] = Field( - None, - description="Driver-specific options for the selectd log driver, specified\nas key/value pairs.\n", - example={"max-file": "10", "max-size": "100m"}, + model_config = ConfigDict( + populate_by_name=True, ) + name: Annotated[ + str | None, + Field( + alias="Name", + description="The log driver to use as a default for new tasks.\n", + examples=["json-file"], + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="Driver-specific options for the selected log driver, specified\nas key/value pairs.\n", + examples=[{"max-file": "10", "max-size": "100m"}], + ), + ] = None class TaskDefaults(BaseModel): @@ -1660,10 +2852,16 @@ class TaskDefaults(BaseModel): Defaults for creating tasks in this cluster. """ - LogDriver: Optional[LogDriver] = Field( - None, - description="The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n", + model_config = ConfigDict( + populate_by_name=True, ) + log_driver: Annotated[ + LogDriver | None, + Field( + alias="LogDriver", + description="The log driver to use for tasks created in the orchestrator if\nunspecified by a service.\n\nUpdating this value only affects new tasks. Existing tasks continue\nto use their previously configured log driver until recreated.\n", + ), + ] = None class SwarmSpec(BaseModel): @@ -1671,76 +2869,125 @@ class SwarmSpec(BaseModel): User modifiable swarm configuration. """ - Name: Optional[str] = Field( - None, description="Name of the swarm.", example="default" - ) - Labels: Optional[dict[str, str]] = Field( - None, - description="User-defined key/value metadata.", - example={ - "com.example.corp.type": "production", - "com.example.corp.department": "engineering", - }, - ) - Orchestration: Optional[Orchestration] = Field( - None, description="Orchestration configuration." - ) - Raft: Optional[Raft] = Field(None, description="Raft configuration.") - Dispatcher: Optional[Dispatcher] = Field( - None, description="Dispatcher configuration." - ) - CAConfig: Optional[CAConfig] = Field(None, description="CA configuration.") - EncryptionConfig: Optional[EncryptionConfig] = Field( - None, description="Parameters related to encryption-at-rest." - ) - TaskDefaults: Optional[TaskDefaults] = Field( - None, description="Defaults for creating tasks in this cluster." - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field(alias="Name", description="Name of the swarm.", examples=["default"]), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.corp.type": "production", + "com.example.corp.department": "engineering", + } + ], + ), + ] = None + orchestration: Annotated[ + Orchestration | None, + Field(alias="Orchestration", description="Orchestration configuration."), + ] = None + raft: Annotated[ + Raft | None, Field(alias="Raft", description="Raft configuration.") + ] = None + dispatcher: Annotated[ + Dispatcher | None, + Field(alias="Dispatcher", description="Dispatcher configuration."), + ] = None + ca_config: Annotated[ + CaConfig | None, Field(alias="CAConfig", description="CA configuration.") + ] = None + encryption_config: Annotated[ + EncryptionConfig | None, + Field( + alias="EncryptionConfig", + description="Parameters related to encryption-at-rest.", + ), + ] = None + task_defaults: Annotated[ + TaskDefaults | None, + Field( + alias="TaskDefaults", + description="Defaults for creating tasks in this cluster.", + ), + ] = None class ClusterInfo(BaseModel): """ - ClusterInfo represents information about the swarm as is returned by the + ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. """ - ID: Optional[str] = Field( - None, description="The ID of the swarm.", example="abajmipo7b4xz5ip2nrla6b11" - ) - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = Field( - None, - description="Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2016-08-18T10:44:24.496525531Z", - ) - UpdatedAt: Optional[str] = Field( - None, - description="Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2017-08-09T07:09:37.632105588Z", - ) - Spec: Optional[SwarmSpec] = None - TLSInfo: Optional[TLSInfo] = None - RootRotationInProgress: Optional[bool] = Field( - None, - description="Whether there is currently a root CA rotation in progress for the swarm\n", - example=False, - ) - DataPathPort: Optional[int] = Field( - 4789, - description="DataPathPort specifies the data path port number for data traffic.\nAcceptable port range is 1024 to 49151.\nIf no port is set or is set to 0, the default port (4789) is used.\n", - example=4789, - ) - DefaultAddrPool: Optional[list[str]] = Field( - None, - description="Default Address Pool specifies default subnet pools for global scope\nnetworks.\n", - ) - SubnetSize: Optional[int] = Field( - 24, - description="SubnetSize specifies the subnet size of the networks created from the\ndefault subnet pool.\n", - example=24, - le=29, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="ID", + description="The ID of the swarm.", + examples=["abajmipo7b4xz5ip2nrla6b11"], + ), + ] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[ + str | None, + Field( + alias="CreatedAt", + description="Date and time at which the swarm was initialised in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2016-08-18T10:44:24.496525531Z"], + ), + ] = None + updated_at: Annotated[ + str | None, + Field( + alias="UpdatedAt", + description="Date and time at which the swarm was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2017-08-09T07:09:37.632105588Z"], + ), + ] = None + spec: Annotated[SwarmSpec | None, Field(alias="Spec")] = None + tls_info: Annotated[TLSInfo | None, Field(alias="TLSInfo")] = None + root_rotation_in_progress: Annotated[ + bool | None, + Field( + alias="RootRotationInProgress", + description="Whether there is currently a root CA rotation in progress for the swarm\n", + examples=[False], + ), + ] = None + data_path_port: Annotated[ + int | None, + Field( + alias="DataPathPort", + description="DataPathPort specifies the data path port number for data traffic.\nAcceptable port range is 1024 to 49151.\nIf no port is set or is set to 0, the default port (4789) is used.\n", + examples=[4789], + ), + ] = 4789 + default_addr_pool: Annotated[ + list[str] | None, + Field( + alias="DefaultAddrPool", + description="Default Address Pool specifies default subnet pools for global scope\nnetworks.\n", + ), + ] = None + subnet_size: Annotated[ + int | None, + Field( + alias="SubnetSize", + description="SubnetSize specifies the subnet size of the networks created from the\ndefault subnet pool.\n", + examples=[24], + le=29, + ), + ] = 24 class JoinTokens(BaseModel): @@ -1749,25 +2996,41 @@ class JoinTokens(BaseModel): """ - Worker: Optional[str] = Field( - None, - description="The token workers can use to join the swarm.\n", - example="SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", - ) - Manager: Optional[str] = Field( - None, - description="The token managers can use to join the swarm.\n", - example="SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + worker: Annotated[ + str | None, + Field( + alias="Worker", + description="The token workers can use to join the swarm.\n", + examples=[ + "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + ], + ), + ] = None + manager: Annotated[ + str | None, + Field( + alias="Manager", + description="The token managers can use to join the swarm.\n", + examples=[ + "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + ], + ), + ] = None class Swarm(ClusterInfo): - JoinTokens: Optional[JoinTokens] = None + model_config = ConfigDict( + populate_by_name=True, + ) + join_tokens: Annotated[JoinTokens | None, Field(alias="JoinTokens")] = None class PluginSpec(BaseModel): """ - Plugin spec for the service. *(Experimental release only.)* + Plugin spec for the service. *(Experimental release only.)*


@@ -1778,16 +3041,24 @@ class PluginSpec(BaseModel): """ - Name: Optional[str] = Field( - None, description="The name or 'alias' to use for the plugin." - ) - Remote: Optional[str] = Field( - None, description="The plugin image reference to use." + model_config = ConfigDict( + populate_by_name=True, ) - Disabled: Optional[bool] = Field( - None, description="Disable the plugin once scheduled." - ) - PluginPrivilege: Optional[list[PluginPrivilege]] = None + name: Annotated[ + str | None, + Field(alias="Name", description="The name or 'alias' to use for the plugin."), + ] = None + remote: Annotated[ + str | None, + Field(alias="Remote", description="The plugin image reference to use."), + ] = None + disabled: Annotated[ + bool | None, + Field(alias="Disabled", description="Disable the plugin once scheduled."), + ] = None + plugin_privilege: Annotated[ + list[PluginPrivilege] | None, Field(alias="PluginPrivilege") + ] = None class CredentialSpec(BaseModel): @@ -1795,33 +3066,57 @@ class CredentialSpec(BaseModel): CredentialSpec for managed service account (Windows only) """ - Config_: Optional[str] = Field( - None, - alias="Config", - description="Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", - example="0bt9dmxjvjiqermk6xrop3ekq", - ) - File: Optional[str] = Field( - None, - description="Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", - example="spec.json", - ) - Registry: Optional[str] = Field( - None, - description="Load credential spec from this value in the Windows\nregistry. The specified registry value must be located in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", - ) - - -class SELinuxContext(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + config: Annotated[ + str | None, + Field( + alias="Config", + description="Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", + examples=["0bt9dmxjvjiqermk6xrop3ekq"], + ), + ] = None + file: Annotated[ + str | None, + Field( + alias="File", + description="Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", + examples=["spec.json"], + ), + ] = None + registry: Annotated[ + str | None, + Field( + alias="Registry", + description="Load credential spec from this value in the Windows\nregistry. The specified registry value must be located in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", + ), + ] = None + + +class SeLinuxContext(BaseModel): """ SELinux labels of the container """ - Disable: Optional[bool] = Field(None, description="Disable SELinux") - User: Optional[str] = Field(None, description="SELinux user label") - Role: Optional[str] = Field(None, description="SELinux role label") - Type: Optional[str] = Field(None, description="SELinux type label") - Level: Optional[str] = Field(None, description="SELinux level label") + model_config = ConfigDict( + populate_by_name=True, + ) + disable: Annotated[ + bool | None, Field(alias="Disable", description="Disable SELinux") + ] = None + user: Annotated[ + str | None, Field(alias="User", description="SELinux user label") + ] = None + role: Annotated[ + str | None, Field(alias="Role", description="SELinux role label") + ] = None + type: Annotated[ + str | None, Field(alias="Type", description="SELinux type label") + ] = None + level: Annotated[ + str | None, Field(alias="Level", description="SELinux level label") + ] = None class Privileges(BaseModel): @@ -1829,31 +3124,47 @@ class Privileges(BaseModel): Security options for the container """ - CredentialSpec: Optional[CredentialSpec] = Field( - None, description="CredentialSpec for managed service account (Windows only)" - ) - SELinuxContext: Optional[SELinuxContext] = Field( - None, description="SELinux labels of the container" + model_config = ConfigDict( + populate_by_name=True, ) + credential_spec: Annotated[ + CredentialSpec | None, + Field( + alias="CredentialSpec", + description="CredentialSpec for managed service account (Windows only)", + ), + ] = None + se_linux_context: Annotated[ + SeLinuxContext | None, + Field(alias="SELinuxContext", description="SELinux labels of the container"), + ] = None -class DNSConfig(BaseModel): +class DnsConfig(BaseModel): """ - Specification for DNS related configurations in resolver configuration + Specification for DNS related configurations in resolver configuration file (`resolv.conf`). """ - Nameservers: Optional[list[str]] = Field( - None, description="The IP addresses of the name servers." - ) - Search: Optional[list[str]] = Field( - None, description="A search list for host-name lookup." - ) - Options: Optional[list[str]] = Field( - None, - description="A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n", + model_config = ConfigDict( + populate_by_name=True, ) + nameservers: Annotated[ + list[str] | None, + Field(alias="Nameservers", description="The IP addresses of the name servers."), + ] = None + search: Annotated[ + list[str] | None, + Field(alias="Search", description="A search list for host-name lookup."), + ] = None + options: Annotated[ + list[str] | None, + Field( + alias="Options", + description="A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n", + ), + ] = None class File(BaseModel): @@ -1862,34 +3173,58 @@ class File(BaseModel): """ - Name: Optional[str] = Field( - None, description="Name represents the final filename in the filesystem.\n" - ) - UID: Optional[str] = Field(None, description="UID represents the file UID.") - GID: Optional[str] = Field(None, description="GID represents the file GID.") - Mode: Optional[int] = Field( - None, description="Mode represents the FileMode of the file." + model_config = ConfigDict( + populate_by_name=True, ) + name: Annotated[ + str | None, + Field( + alias="Name", + description="Name represents the final filename in the filesystem.\n", + ), + ] = None + uid: Annotated[ + str | None, Field(alias="UID", description="UID represents the file UID.") + ] = None + gid: Annotated[ + str | None, Field(alias="GID", description="GID represents the file GID.") + ] = None + mode: Annotated[ + int | None, + Field(alias="Mode", description="Mode represents the FileMode of the file."), + ] = None class Secret(BaseModel): - File: Optional[File] = Field( - None, - description="File represents a specific target that is backed by a file.\n", - ) - SecretID: Optional[str] = Field( - None, - description="SecretID represents the ID of the specific secret that we're\nreferencing.\n", - ) - SecretName: Optional[str] = Field( - None, - description="SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n", - ) - - -class File1(File): + model_config = ConfigDict( + populate_by_name=True, + ) + file: Annotated[ + File | None, + Field( + alias="File", + description="File represents a specific target that is backed by a file.\n", + ), + ] = None + secret_id: Annotated[ + str | None, + Field( + alias="SecretID", + description="SecretID represents the ID of the specific secret that we're\nreferencing.\n", + ), + ] = None + secret_name: Annotated[ + str | None, + Field( + alias="SecretName", + description="SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n", + ), + ] = None + + +class File1(BaseModel): """ - File represents a specific target that is backed by a file. + File represents a specific target that is backed by a file.


@@ -1897,35 +3232,77 @@ class File1(File): """ - pass + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field( + alias="Name", + description="Name represents the final filename in the filesystem.\n", + ), + ] = None + uid: Annotated[ + str | None, Field(alias="UID", description="UID represents the file UID.") + ] = None + gid: Annotated[ + str | None, Field(alias="GID", description="GID represents the file GID.") + ] = None + mode: Annotated[ + int | None, + Field(alias="Mode", description="Mode represents the FileMode of the file."), + ] = None class Config1(BaseModel): - File: Optional[File1] = Field( - None, - description="File represents a specific target that is backed by a file.\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive\n", - ) - Runtime: Optional[dict[str, Any]] = Field( - None, - description="Runtime represents a target that is not mounted into the\ncontainer but is used by the task\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually\n> exclusive\n", - ) - ConfigID: Optional[str] = Field( - None, - description="ConfigID represents the ID of the specific config that we're\nreferencing.\n", - ) - ConfigName: Optional[str] = Field( - None, - description="ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + file: Annotated[ + File1 | None, + Field( + alias="File", + description="File represents a specific target that is backed by a file.\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive\n", + ), + ] = None + runtime: Annotated[ + dict[str, Any] | None, + Field( + alias="Runtime", + description="Runtime represents a target that is not mounted into the\ncontainer but is used by the task\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually\n> exclusive\n", + ), + ] = None + config_id: Annotated[ + str | None, + Field( + alias="ConfigID", + description="ConfigID represents the ID of the specific config that we're\nreferencing.\n", + ), + ] = None + config_name: Annotated[ + str | None, + Field( + alias="ConfigName", + description="ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n", + ), + ] = None + + +class Isolation1(str, Enum): + """ + Isolation technology of the containers running the service. + (Windows only) + """ -class Ulimit1(Ulimit): - pass + default = "default" + process = "process" + hyperv = "hyperv" class ContainerSpec(BaseModel): """ - Container spec for the service. + Container spec for the service.


@@ -1936,98 +3313,166 @@ class ContainerSpec(BaseModel): """ - Image: Optional[str] = Field( - None, description="The image name to use for the container" - ) - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value data." - ) - Command: Optional[list[str]] = Field( - None, description="The command to be run in the image." - ) - Args: Optional[list[str]] = Field(None, description="Arguments to the command.") - Hostname: Optional[str] = Field( - None, - description="The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n", - ) - Env: Optional[list[str]] = Field( - None, description="A list of environment variables in the form `VAR=value`.\n" - ) - Dir: Optional[str] = Field( - None, description="The working directory for commands to run in." - ) - User: Optional[str] = Field(None, description="The user inside the container.") - Groups: Optional[list[str]] = Field( - None, - description="A list of additional groups that the container process will run as.\n", - ) - Privileges: Optional[Privileges] = Field( - None, description="Security options for the container" - ) - TTY: Optional[bool] = Field( - None, description="Whether a pseudo-TTY should be allocated." - ) - OpenStdin: Optional[bool] = Field(None, description="Open `stdin`") - ReadOnly: Optional[bool] = Field( - None, description="Mount the container's root filesystem as read only." - ) - Mounts: Optional[list[Mount]] = Field( - None, - description="Specification for mounts to be added to containers created as part\nof the service.\n", - ) - StopSignal: Optional[str] = Field(None, description="Signal to stop the container.") - StopGracePeriod: Optional[int] = Field( - None, - description="Amount of time to wait for the container to terminate before\nforcefully killing it.\n", - ) - HealthCheck: Optional[HealthConfig] = None - Hosts: Optional[list[str]] = Field( - None, - description="A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n", - ) - DNSConfig: Optional[DNSConfig] = Field( - None, - description="Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n", - ) - Secrets: Optional[list[Secret]] = Field( - None, - description="Secrets contains references to zero or more secrets that will be\nexposed to the service.\n", - ) - Configs: Optional[list[Config1]] = Field( - None, - description="Configs contains references to zero or more configs that will be\nexposed to the service.\n", - ) - Isolation: Optional[Isolation] = Field( - None, - description="Isolation technology of the containers running the service.\n(Windows only)\n", - ) - Init: Optional[bool] = Field( - None, - description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n", - ) - Sysctls: Optional[dict[str, str]] = Field( - None, - description="Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n", - ) - CapabilityAdd: Optional[list[str]] = Field( - None, - description="A list of kernel capabilities to add to the default set\nfor the container.\n", - example=["CAP_NET_RAW", "CAP_SYS_ADMIN", "CAP_SYS_CHROOT", "CAP_SYSLOG"], - ) - CapabilityDrop: Optional[list[str]] = Field( - None, - description="A list of kernel capabilities to drop from the default set\nfor the container.\n", - example=["CAP_NET_RAW"], - ) - Ulimits: Optional[list[Ulimit1]] = Field( - None, - description='A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"\n', - ) + model_config = ConfigDict( + populate_by_name=True, + ) + image: Annotated[ + str | None, + Field(alias="Image", description="The image name to use for the container"), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value data."), + ] = None + command: Annotated[ + list[str] | None, + Field(alias="Command", description="The command to be run in the image."), + ] = None + args: Annotated[ + list[str] | None, Field(alias="Args", description="Arguments to the command.") + ] = None + hostname: Annotated[ + str | None, + Field( + alias="Hostname", + description="The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n", + ), + ] = None + env: Annotated[ + list[str] | None, + Field( + alias="Env", + description="A list of environment variables in the form `VAR=value`.\n", + ), + ] = None + dir: Annotated[ + str | None, + Field(alias="Dir", description="The working directory for commands to run in."), + ] = None + user: Annotated[ + str | None, Field(alias="User", description="The user inside the container.") + ] = None + groups: Annotated[ + list[str] | None, + Field( + alias="Groups", + description="A list of additional groups that the container process will run as.\n", + ), + ] = None + privileges: Annotated[ + Privileges | None, + Field(alias="Privileges", description="Security options for the container"), + ] = None + tty: Annotated[ + bool | None, + Field(alias="TTY", description="Whether a pseudo-TTY should be allocated."), + ] = None + open_stdin: Annotated[ + bool | None, Field(alias="OpenStdin", description="Open `stdin`") + ] = None + read_only: Annotated[ + bool | None, + Field( + alias="ReadOnly", + description="Mount the container's root filesystem as read only.", + ), + ] = None + mounts: Annotated[ + list[Mount] | None, + Field( + alias="Mounts", + description="Specification for mounts to be added to containers created as part\nof the service.\n", + ), + ] = None + stop_signal: Annotated[ + str | None, + Field(alias="StopSignal", description="Signal to stop the container."), + ] = None + stop_grace_period: Annotated[ + int | None, + Field( + alias="StopGracePeriod", + description="Amount of time to wait for the container to terminate before\nforcefully killing it.\n", + ), + ] = None + health_check: Annotated[HealthConfig | None, Field(alias="HealthCheck")] = None + hosts: Annotated[ + list[str] | None, + Field( + alias="Hosts", + description="A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n", + ), + ] = None + dns_config: Annotated[ + DnsConfig | None, + Field( + alias="DNSConfig", + description="Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n", + ), + ] = None + secrets: Annotated[ + list[Secret] | None, + Field( + alias="Secrets", + description="Secrets contains references to zero or more secrets that will be\nexposed to the service.\n", + ), + ] = None + configs: Annotated[ + list[Config1] | None, + Field( + alias="Configs", + description="Configs contains references to zero or more configs that will be\nexposed to the service.\n", + ), + ] = None + isolation: Annotated[ + Isolation1 | None, + Field( + alias="Isolation", + description="Isolation technology of the containers running the service.\n(Windows only)\n", + ), + ] = None + init: Annotated[ + bool | None, + Field( + alias="Init", + description="Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n", + ), + ] = None + sysctls: Annotated[ + dict[str, str] | None, + Field( + alias="Sysctls", + description="Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n", + ), + ] = None + capability_add: Annotated[ + list[str] | None, + Field( + alias="CapabilityAdd", + description="A list of kernel capabilities to add to the default set\nfor the container.\n", + examples=[["CAP_NET_RAW", "CAP_SYS_ADMIN", "CAP_SYS_CHROOT", "CAP_SYSLOG"]], + ), + ] = None + capability_drop: Annotated[ + list[str] | None, + Field( + alias="CapabilityDrop", + description="A list of kernel capabilities to drop from the default set\nfor the container.\n", + examples=[["CAP_NET_RAW"]], + ), + ] = None + ulimits: Annotated[ + list[Ulimit] | None, + Field( + alias="Ulimits", + description='A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"\n', + ), + ] = None class NetworkAttachmentSpec(BaseModel): """ - Read-only spec type for non-swarm containers attached to swarm overlay + Read-only spec type for non-swarm containers attached to swarm overlay networks.


@@ -2039,9 +3484,16 @@ class NetworkAttachmentSpec(BaseModel): """ - ContainerID: Optional[str] = Field( - None, description="ID of the container represented by this task" + model_config = ConfigDict( + populate_by_name=True, ) + container_id: Annotated[ + str | None, + Field( + alias="ContainerID", + description="ID of the container represented by this task", + ), + ] = None class Condition(str, Enum): @@ -2056,73 +3508,118 @@ class Condition(str, Enum): class RestartPolicy1(BaseModel): """ - Specification for the restart policy which applies to containers + Specification for the restart policy which applies to containers created as part of this service. """ - Condition: Optional[Condition] = Field(None, description="Condition for restart.") - Delay: Optional[int] = Field(None, description="Delay between restart attempts.") - MaxAttempts: Optional[int] = Field( - 0, - description="Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n", - ) - Window: Optional[int] = Field( - 0, - description="Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + condition: Annotated[ + Condition | None, Field(alias="Condition", description="Condition for restart.") + ] = None + delay: Annotated[ + int | None, Field(alias="Delay", description="Delay between restart attempts.") + ] = None + max_attempts: Annotated[ + int | None, + Field( + alias="MaxAttempts", + description="Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n", + ), + ] = 0 + window: Annotated[ + int | None, + Field( + alias="Window", + description="Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n", + ), + ] = 0 class Spread(BaseModel): - SpreadDescriptor: Optional[str] = Field( - None, description="label descriptor, such as `engine.labels.az`.\n" + model_config = ConfigDict( + populate_by_name=True, ) + spread_descriptor: Annotated[ + str | None, + Field( + alias="SpreadDescriptor", + description="label descriptor, such as `engine.labels.az`.\n", + ), + ] = None class Preference(BaseModel): - Spread: Optional[Spread] = None + model_config = ConfigDict( + populate_by_name=True, + ) + spread: Annotated[Spread | None, Field(alias="Spread")] = None class Placement(BaseModel): - Constraints: Optional[list[str]] = Field( - None, - description="An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n", - example=[ - "node.hostname!=node3.corp.example.com", - "node.role!=manager", - "node.labels.type==production", - "node.platform.os==linux", - "node.platform.arch==x86_64", - ], - ) - Preferences: Optional[list[Preference]] = Field( - None, - description="Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n", - example=[ - {"Spread": {"SpreadDescriptor": "node.labels.datacenter"}}, - {"Spread": {"SpreadDescriptor": "node.labels.rack"}}, - ], - ) - MaxReplicas: Optional[int] = Field( - 0, - description="Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n", - ) - Platforms: Optional[list[Platform]] = Field( - None, - description="Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + constraints: Annotated[ + list[str] | None, + Field( + alias="Constraints", + description="An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n", + examples=[ + [ + "node.hostname!=node3.corp.example.com", + "node.role!=manager", + "node.labels.type==production", + "node.platform.os==linux", + "node.platform.arch==x86_64", + ] + ], + ), + ] = None + preferences: Annotated[ + list[Preference] | None, + Field( + alias="Preferences", + description="Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n", + examples=[ + [ + {"Spread": {"SpreadDescriptor": "node.labels.datacenter"}}, + {"Spread": {"SpreadDescriptor": "node.labels.rack"}}, + ] + ], + ), + ] = None + max_replicas: Annotated[ + int | None, + Field( + alias="MaxReplicas", + description="Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n", + ), + ] = 0 + platforms: Annotated[ + list[Platform] | None, + Field( + alias="Platforms", + description="Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n", + ), + ] = None class LogDriver1(BaseModel): """ - Specifies the log driver to use for tasks created from this spec. If + Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. """ - Name: Optional[str] = None - Options: Optional[dict[str, str]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str | None, Field(alias="Name")] = None + options: Annotated[dict[str, str] | None, Field(alias="Options")] = None class TaskState(str, Enum): @@ -2144,37 +3641,58 @@ class TaskState(str, Enum): class ContainerStatus(BaseModel): - ContainerID: Optional[str] = None - PID: Optional[int] = None - ExitCode: Optional[int] = None + model_config = ConfigDict( + populate_by_name=True, + ) + container_id: Annotated[str | None, Field(alias="ContainerID")] = None + pid: Annotated[int | None, Field(alias="PID")] = None + exit_code: Annotated[int | None, Field(alias="ExitCode")] = None class Status1(BaseModel): - Timestamp: Optional[str] = None - State: Optional[TaskState] = None - Message: Optional[str] = None - Err: Optional[str] = None - ContainerStatus: Optional[ContainerStatus] = None + model_config = ConfigDict( + populate_by_name=True, + ) + timestamp: Annotated[str | None, Field(alias="Timestamp")] = None + state: Annotated[TaskState | None, Field(alias="State")] = None + message: Annotated[str | None, Field(alias="Message")] = None + err: Annotated[str | None, Field(alias="Err")] = None + container_status: Annotated[ + ContainerStatus | None, Field(alias="ContainerStatus") + ] = None class Replicated(BaseModel): - Replicas: Optional[int] = None + model_config = ConfigDict( + populate_by_name=True, + ) + replicas: Annotated[int | None, Field(alias="Replicas")] = None class ReplicatedJob(BaseModel): """ - The mode used for services with a finite number of tasks that run + The mode used for services with a finite number of tasks that run to a completed state. """ - MaxConcurrent: Optional[int] = Field( - 1, description="The maximum number of replicas to run simultaneously.\n" - ) - TotalCompletions: Optional[int] = Field( - None, - description="The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n", + model_config = ConfigDict( + populate_by_name=True, ) + max_concurrent: Annotated[ + int | None, + Field( + alias="MaxConcurrent", + description="The maximum number of replicas to run simultaneously.\n", + ), + ] = 1 + total_completions: Annotated[ + int | None, + Field( + alias="TotalCompletions", + description="The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n", + ), + ] = None class Mode(BaseModel): @@ -2182,21 +3700,30 @@ class Mode(BaseModel): Scheduling mode for the service. """ - Replicated: Optional[Replicated] = None - Global: Optional[dict[str, Any]] = None - ReplicatedJob: Optional[ReplicatedJob] = Field( - None, - description="The mode used for services with a finite number of tasks that run\nto a completed state.\n", - ) - GlobalJob: Optional[dict[str, Any]] = Field( - None, - description="The mode used for services which run a task to the completed state\non each valid node.\n", + model_config = ConfigDict( + populate_by_name=True, ) + replicated: Annotated[Replicated | None, Field(alias="Replicated")] = None + global_: Annotated[dict[str, Any] | None, Field(alias="Global")] = None + replicated_job: Annotated[ + ReplicatedJob | None, + Field( + alias="ReplicatedJob", + description="The mode used for services with a finite number of tasks that run\nto a completed state.\n", + ), + ] = None + global_job: Annotated[ + dict[str, Any] | None, + Field( + alias="GlobalJob", + description="The mode used for services which run a task to the completed state\non each valid node.\n", + ), + ] = None class FailureAction(str, Enum): """ - Action to take if an updated task fails to run, or stops running + Action to take if an updated task fails to run, or stops running during the update. """ @@ -2208,7 +3735,7 @@ class FailureAction(str, Enum): class Order(str, Enum): """ - The order of operations when rolling out an updated task. Either + The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. @@ -2223,34 +3750,55 @@ class UpdateConfig(BaseModel): Specification for the update strategy of the service. """ - Parallelism: Optional[int] = Field( - None, - description="Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n", - ) - Delay: Optional[int] = Field( - None, description="Amount of time between updates, in nanoseconds." - ) - FailureAction: Optional[FailureAction] = Field( - None, - description="Action to take if an updated task fails to run, or stops running\nduring the update.\n", - ) - Monitor: Optional[int] = Field( - None, - description="Amount of time to monitor each updated task for failures, in\nnanoseconds.\n", - ) - MaxFailureRatio: Optional[float] = Field( - 0, - description="The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", - ) - Order: Optional[Order] = Field( - None, - description="The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + parallelism: Annotated[ + int | None, + Field( + alias="Parallelism", + description="Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n", + ), + ] = None + delay: Annotated[ + int | None, + Field( + alias="Delay", description="Amount of time between updates, in nanoseconds." + ), + ] = None + failure_action: Annotated[ + FailureAction | None, + Field( + alias="FailureAction", + description="Action to take if an updated task fails to run, or stops running\nduring the update.\n", + ), + ] = None + monitor: Annotated[ + int | None, + Field( + alias="Monitor", + description="Amount of time to monitor each updated task for failures, in\nnanoseconds.\n", + ), + ] = None + max_failure_ratio: Annotated[ + float | None, + Field( + alias="MaxFailureRatio", + description="The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", + ), + ] = 0 + order: Annotated[ + Order | None, + Field( + alias="Order", + description="The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n", + ), + ] = None class FailureAction1(str, Enum): """ - Action to take if an rolled back task fails to run, or stops + Action to take if an rolled back task fails to run, or stops running during the rollback. """ @@ -2259,40 +3807,73 @@ class FailureAction1(str, Enum): pause = "pause" +class Order1(str, Enum): + """ + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + + """ + + stop_first = "stop-first" + start_first = "start-first" + + class RollbackConfig(BaseModel): """ Specification for the rollback strategy of the service. """ - Parallelism: Optional[int] = Field( - None, - description="Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n", - ) - Delay: Optional[int] = Field( - None, - description="Amount of time between rollback iterations, in nanoseconds.\n", - ) - FailureAction: Optional[FailureAction1] = Field( - None, - description="Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n", - ) - Monitor: Optional[int] = Field( - None, - description="Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n", - ) - MaxFailureRatio: Optional[float] = Field( - 0, - description="The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", - ) - Order: Optional[Order] = Field( - None, - description="The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + parallelism: Annotated[ + int | None, + Field( + alias="Parallelism", + description="Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n", + ), + ] = None + delay: Annotated[ + int | None, + Field( + alias="Delay", + description="Amount of time between rollback iterations, in nanoseconds.\n", + ), + ] = None + failure_action: Annotated[ + FailureAction1 | None, + Field( + alias="FailureAction", + description="Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n", + ), + ] = None + monitor: Annotated[ + int | None, + Field( + alias="Monitor", + description="Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n", + ), + ] = None + max_failure_ratio: Annotated[ + float | None, + Field( + alias="MaxFailureRatio", + description="The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", + ), + ] = 0 + order: Annotated[ + Order1 | None, + Field( + alias="Order", + description="The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n", + ), + ] = None class PublishMode(str, Enum): """ - The mode in which port is published. + The mode in which port is published.


@@ -2309,19 +3890,27 @@ class PublishMode(str, Enum): class EndpointPortConfig(BaseModel): - Name: Optional[str] = None - Protocol: Optional[Type] = None - TargetPort: Optional[int] = Field( - None, description="The port inside the container." - ) - PublishedPort: Optional[int] = Field( - None, description="The port on the swarm hosts." - ) - PublishMode: Optional[PublishMode] = Field( - PublishMode.ingress, - description='The mode in which port is published.\n\n


\n\n- "ingress" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- "host" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n', - example="ingress", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str | None, Field(alias="Name")] = None + protocol: Annotated[Type | None, Field(alias="Protocol")] = None + target_port: Annotated[ + int | None, + Field(alias="TargetPort", description="The port inside the container."), + ] = None + published_port: Annotated[ + int | None, + Field(alias="PublishedPort", description="The port on the swarm hosts."), + ] = None + publish_mode: Annotated[ + PublishMode | None, + Field( + alias="PublishMode", + description='The mode in which port is published.\n\n


\n\n- "ingress" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- "host" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n', + examples=["ingress"], + ), + ] = PublishMode.ingress class Mode1(str, Enum): @@ -2339,25 +3928,40 @@ class EndpointSpec(BaseModel): Properties that can be configured to access and load balance a service. """ - Mode: Optional[Mode1] = Field( - Mode1.vip, - description="The mode of resolution to use for internal load balancing between tasks.\n", - ) - Ports: Optional[list[EndpointPortConfig]] = Field( - None, - description="List of exposed ports that this service is accessible on from the\noutside. Ports can only be provided if `vip` resolution mode is used.\n", + model_config = ConfigDict( + populate_by_name=True, ) + mode: Annotated[ + Mode1 | None, + Field( + alias="Mode", + description="The mode of resolution to use for internal load balancing between tasks.\n", + ), + ] = Mode1.vip + ports: Annotated[ + list[EndpointPortConfig] | None, + Field( + alias="Ports", + description="List of exposed ports that this service is accessible on from the\noutside. Ports can only be provided if `vip` resolution mode is used.\n", + ), + ] = None class VirtualIP(BaseModel): - NetworkID: Optional[str] = None - Addr: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + network_id: Annotated[str | None, Field(alias="NetworkID")] = None + addr: Annotated[str | None, Field(alias="Addr")] = None class Endpoint(BaseModel): - Spec: Optional[EndpointSpec] = None - Ports: Optional[list[EndpointPortConfig]] = None - VirtualIPs: Optional[list[VirtualIP]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + spec: Annotated[EndpointSpec | None, Field(alias="Spec")] = None + ports: Annotated[list[EndpointPortConfig] | None, Field(alias="Ports")] = None + virtual_i_ps: Annotated[list[VirtualIP] | None, Field(alias="VirtualIPs")] = None class State(str, Enum): @@ -2371,69 +3975,109 @@ class UpdateStatus(BaseModel): The status of a service update. """ - State: Optional[State] = None - StartedAt: Optional[str] = None - CompletedAt: Optional[str] = None - Message: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + state: Annotated[State | None, Field(alias="State")] = None + started_at: Annotated[str | None, Field(alias="StartedAt")] = None + completed_at: Annotated[str | None, Field(alias="CompletedAt")] = None + message: Annotated[str | None, Field(alias="Message")] = None class ServiceStatus(BaseModel): """ - The status of the service's tasks. Provided only when requested as + The status of the service's tasks. Provided only when requested as part of a ServiceList operation. """ - RunningTasks: Optional[int] = Field( - None, - description="The number of tasks for the service currently in the Running state.\n", - example=7, - ) - DesiredTasks: Optional[int] = Field( - None, - description="The number of tasks for the service desired to be running.\nFor replicated services, this is the replica count from the\nservice spec. For global services, this is computed by taking\ncount of all tasks for the service with a Desired State other\nthan Shutdown.\n", - example=10, - ) - CompletedTasks: Optional[int] = Field( - None, - description="The number of tasks for a job that are in the Completed state.\nThis field must be cross-referenced with the service type, as the\nvalue of 0 may mean the service is not in a job mode, or it may\nmean the job-mode service has no tasks yet Completed.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + running_tasks: Annotated[ + int | None, + Field( + alias="RunningTasks", + description="The number of tasks for the service currently in the Running state.\n", + examples=[7], + ), + ] = None + desired_tasks: Annotated[ + int | None, + Field( + alias="DesiredTasks", + description="The number of tasks for the service desired to be running.\nFor replicated services, this is the replica count from the\nservice spec. For global services, this is computed by taking\ncount of all tasks for the service with a Desired State other\nthan Shutdown.\n", + examples=[10], + ), + ] = None + completed_tasks: Annotated[ + int | None, + Field( + alias="CompletedTasks", + description="The number of tasks for a job that are in the Completed state.\nThis field must be cross-referenced with the service type, as the\nvalue of 0 may mean the service is not in a job mode, or it may\nmean the job-mode service has no tasks yet Completed.\n", + ), + ] = None class JobStatus(BaseModel): """ - The status of the service when it is in one of ReplicatedJob or + The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. """ - JobIteration: Optional[ObjectVersion] = Field( - None, - description='JobIteration is a value increased each time a Job is executed,\nsuccessfully or otherwise. "Executed", in this case, means the\njob as a whole has been started, not that an individual Task has\nbeen launched. A job is "Executed" when its ServiceSpec is\nupdated. JobIteration can be used to disambiguate Tasks belonging\nto different executions of a job. Though JobIteration will\nincrease with each subsequent execution, it may not necessarily\nincrease by 1, and so JobIteration should not be used to\n', - ) - LastExecution: Optional[str] = Field( - None, - description="The last time, as observed by the server, that this job was\nstarted.\n", + model_config = ConfigDict( + populate_by_name=True, ) + job_iteration: Annotated[ + ObjectVersion | None, + Field( + alias="JobIteration", + description='JobIteration is a value increased each time a Job is executed,\nsuccessfully or otherwise. "Executed", in this case, means the\njob as a whole has been started, not that an individual Task has\nbeen launched. A job is "Executed" when its ServiceSpec is\nupdated. JobIteration can be used to disambiguate Tasks belonging\nto different executions of a job. Though JobIteration will\nincrease with each subsequent execution, it may not necessarily\nincrease by 1, and so JobIteration should not be used to\n', + ), + ] = None + last_execution: Annotated[ + str | None, + Field( + alias="LastExecution", + description="The last time, as observed by the server, that this job was\nstarted.\n", + ), + ] = None class ImageDeleteResponseItem(BaseModel): - Untagged: Optional[str] = Field( - None, description="The image ID of an image that was untagged" - ) - Deleted: Optional[str] = Field( - None, description="The image ID of an image that was deleted" + model_config = ConfigDict( + populate_by_name=True, ) + untagged: Annotated[ + str | None, + Field( + alias="Untagged", description="The image ID of an image that was untagged" + ), + ] = None + deleted: Annotated[ + str | None, + Field(alias="Deleted", description="The image ID of an image that was deleted"), + ] = None class ServiceUpdateResponse(BaseModel): - Warnings: Optional[list[str]] = Field(None, description="Optional warning messages") + model_config = ConfigDict( + populate_by_name=True, + ) + warnings: Annotated[ + list[str] | None, + Field(alias="Warnings", description="Optional warning messages"), + ] = None class HostConfig1(BaseModel): - NetworkMode: Optional[str] = None + model_config = ConfigDict( + populate_by_name=True, + ) + network_mode: Annotated[str | None, Field(alias="NetworkMode")] = None class Driver(BaseModel): @@ -2441,76 +4085,134 @@ class Driver(BaseModel): Driver represents a driver (network, logging, secrets). """ - Name: str = Field(..., description="Name of the driver.", example="some-driver") - Options: Optional[dict[str, str]] = Field( - None, - description="Key/value map of driver-specific options.", - example={ - "OptionA": "value for driver-specific option A", - "OptionB": "value for driver-specific option B", - }, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str, + Field( + alias="Name", description="Name of the driver.", examples=["some-driver"] + ), + ] + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="Key/value map of driver-specific options.", + examples=[ + { + "OptionA": "value for driver-specific option A", + "OptionB": "value for driver-specific option B", + } + ], + ), + ] = None class SecretSpec(BaseModel): - Name: Optional[str] = Field(None, description="User-defined name of the secret.") - Labels: Optional[dict[str, str]] = Field( - None, - description="User-defined key/value metadata.", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) - Data: Optional[str] = Field( - None, - description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n", - example="", - ) - Driver: Optional[Driver] = Field( - None, - description="Name of the secrets driver used to fetch the secret's value from an\nexternal secret store.\n", - ) - Templating: Optional[Driver] = Field( - None, - description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, Field(alias="Name", description="User-defined name of the secret.") + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None + data: Annotated[ + str | None, + Field( + alias="Data", + description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\ndata to store as secret.\n\nThis field is only used to _create_ a secret, and is not returned by\nother endpoints.\n", + examples=[""], + ), + ] = None + driver: Annotated[ + Driver | None, + Field( + alias="Driver", + description="Name of the secrets driver used to fetch the secret's value from an\nexternal secret store.\n", + ), + ] = None + templating: Annotated[ + Driver | None, + Field( + alias="Templating", + description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n", + ), + ] = None class Secret1(BaseModel): - ID: Optional[str] = Field(None, example="blt1owaxmitz71s9v5zh81zun") - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = Field(None, example="2017-07-20T13:55:28.678958722Z") - UpdatedAt: Optional[str] = Field(None, example="2017-07-20T13:55:28.678958722Z") - Spec: Optional[SecretSpec] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, Field(alias="ID", examples=["blt1owaxmitz71s9v5zh81zun"]) + ] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[ + str | None, + Field(alias="CreatedAt", examples=["2017-07-20T13:55:28.678958722Z"]), + ] = None + updated_at: Annotated[ + str | None, + Field(alias="UpdatedAt", examples=["2017-07-20T13:55:28.678958722Z"]), + ] = None + spec: Annotated[SecretSpec | None, Field(alias="Spec")] = None class ConfigSpec(BaseModel): - Name: Optional[str] = Field(None, description="User-defined name of the config.") - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - Data: Optional[str] = Field( - None, - description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\nconfig data.\n", - ) - Templating: Optional[Driver] = Field( - None, - description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, Field(alias="Name", description="User-defined name of the config.") + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + data: Annotated[ + str | None, + Field( + alias="Data", + description="Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))\nconfig data.\n", + ), + ] = None + templating: Annotated[ + Driver | None, + Field( + alias="Templating", + description="Templating driver, if applicable\n\nTemplating controls whether and how to evaluate the config payload as\na template. If no driver is set, no templating is used.\n", + ), + ] = None class Config2(BaseModel): - ID: Optional[str] = None - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = None - UpdatedAt: Optional[str] = None - Spec: Optional[ConfigSpec] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[str | None, Field(alias="ID")] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[str | None, Field(alias="CreatedAt")] = None + updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None + spec: Annotated[ConfigSpec | None, Field(alias="Spec")] = None class Status2(str, Enum): """ - String representation of the container state. Can be one of "created", + String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". """ @@ -2529,22 +4231,44 @@ class ContainerWaitExitError(BaseModel): container waiting error, if any """ - Message: Optional[str] = Field(None, description="Details of an error") + model_config = ConfigDict( + populate_by_name=True, + ) + message: Annotated[ + str | None, Field(alias="Message", description="Details of an error") + ] = None class Platform1(BaseModel): - Name: str + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[str, Field(alias="Name")] class Component(BaseModel): - Name: str = Field(..., description="Name of the component\n", example="Engine") - Version: str = Field( - ..., description="Version of the component\n", example="19.03.12" - ) - Details: Optional[dict[str, Any]] = Field( - None, - description="Key/value pairs of strings with additional information about the\ncomponent. These values are intended for informational purposes\nonly, and their content is not defined, and not part of the API\nspecification.\n\nThese messages can be printed by the client as information to the user.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str, + Field(alias="Name", description="Name of the component\n", examples=["Engine"]), + ] + version: Annotated[ + str, + Field( + alias="Version", + description="Version of the component\n", + examples=["19.03.12"], + ), + ] + details: Annotated[ + dict[str, Any] | None, + Field( + alias="Details", + description="Key/value pairs of strings with additional information about the\ncomponent. These values are intended for informational purposes\nonly, and their content is not defined, and not part of the API\nspecification.\n\nThese messages can be printed by the client as information to the user.\n", + ), + ] = None class SystemVersion(BaseModel): @@ -2553,58 +4277,94 @@ class SystemVersion(BaseModel): """ - Platform: Optional[Platform1] = None - Components: Optional[list[Component]] = Field( - None, description="Information about system components\n" - ) - Version: Optional[str] = Field( - None, description="The version of the daemon", example="19.03.12" - ) - ApiVersion: Optional[str] = Field( - None, - description="The default (and highest) API version that is supported by the daemon\n", - example="1.40", - ) - MinAPIVersion: Optional[str] = Field( - None, - description="The minimum API version that is supported by the daemon\n", - example="1.12", - ) - GitCommit: Optional[str] = Field( - None, - description="The Git commit of the source code that was used to build the daemon\n", - example="48a66213fe", - ) - GoVersion: Optional[str] = Field( - None, - description="The version Go used to compile the daemon, and the version of the Go\nruntime in use.\n", - example="go1.13.14", - ) - Os: Optional[str] = Field( - None, - description='The operating system that the daemon is running on ("linux" or "windows")\n', - example="linux", - ) - Arch: Optional[str] = Field( - None, - description="The architecture that the daemon is running on\n", - example="amd64", - ) - KernelVersion: Optional[str] = Field( - None, - description="The kernel version (`uname -r`) that the daemon is running on.\n\nThis field is omitted when empty.\n", - example="4.19.76-linuxkit", - ) - Experimental: Optional[bool] = Field( - None, - description="Indicates if the daemon is started with experimental features enabled.\n\nThis field is omitted when empty / false.\n", - example=True, - ) - BuildTime: Optional[str] = Field( - None, - description="The date and time that the daemon was compiled.\n", - example="2020-06-22T15:49:27.000000000+00:00", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + platform: Annotated[Platform1 | None, Field(alias="Platform")] = None + components: Annotated[ + list[Component] | None, + Field(alias="Components", description="Information about system components\n"), + ] = None + version: Annotated[ + str | None, + Field( + alias="Version", + description="The version of the daemon", + examples=["19.03.12"], + ), + ] = None + api_version: Annotated[ + str | None, + Field( + alias="ApiVersion", + description="The default (and highest) API version that is supported by the daemon\n", + examples=["1.40"], + ), + ] = None + min_api_version: Annotated[ + str | None, + Field( + alias="MinAPIVersion", + description="The minimum API version that is supported by the daemon\n", + examples=["1.12"], + ), + ] = None + git_commit: Annotated[ + str | None, + Field( + alias="GitCommit", + description="The Git commit of the source code that was used to build the daemon\n", + examples=["48a66213fe"], + ), + ] = None + go_version: Annotated[ + str | None, + Field( + alias="GoVersion", + description="The version Go used to compile the daemon, and the version of the Go\nruntime in use.\n", + examples=["go1.13.14"], + ), + ] = None + os: Annotated[ + str | None, + Field( + alias="Os", + description='The operating system that the daemon is running on ("linux" or "windows")\n', + examples=["linux"], + ), + ] = None + arch: Annotated[ + str | None, + Field( + alias="Arch", + description="The architecture that the daemon is running on\n", + examples=["amd64"], + ), + ] = None + kernel_version: Annotated[ + str | None, + Field( + alias="KernelVersion", + description="The kernel version (`uname -r`) that the daemon is running on.\n\nThis field is omitted when empty.\n", + examples=["4.19.76-linuxkit"], + ), + ] = None + experimental: Annotated[ + bool | None, + Field( + alias="Experimental", + description="Indicates if the daemon is started with experimental features enabled.\n\nThis field is omitted when empty / false.\n", + examples=[True], + ), + ] = None + build_time: Annotated[ + str | None, + Field( + alias="BuildTime", + description="The date and time that the daemon was compiled.\n", + examples=["2020-06-22T15:49:27.000000000+00:00"], + ), + ] = None class CgroupDriver(str, Enum): @@ -2630,7 +4390,7 @@ class CgroupVersion(str, Enum): class Isolation2(str, Enum): """ - Represents the isolation technology to use as a default for containers. + Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, @@ -2646,15 +4406,26 @@ class Isolation2(str, Enum): class DefaultAddressPool(BaseModel): - Base: Optional[str] = Field( - None, description="The network address in CIDR format", example="10.10.0.0/16" - ) - Size: Optional[int] = Field(None, description="The network pool size", example="24") + model_config = ConfigDict( + populate_by_name=True, + ) + base: Annotated[ + str | None, + Field( + alias="Base", + description="The network address in CIDR format", + examples=["10.10.0.0/16"], + ), + ] = None + size: Annotated[ + int | None, + Field(alias="Size", description="The network pool size", examples=["24"]), + ] = None class PluginsInfo(BaseModel): """ - Available plugins per type. + Available plugins per type.


@@ -2664,36 +4435,52 @@ class PluginsInfo(BaseModel): """ - Volume: Optional[list[str]] = Field( - None, - description="Names of available volume-drivers, and network-driver plugins.", - example=["local"], - ) - Network: Optional[list[str]] = Field( - None, - description="Names of available network-drivers, and network-driver plugins.", - example=["bridge", "host", "ipvlan", "macvlan", "null", "overlay"], - ) - Authorization: Optional[list[str]] = Field( - None, - description="Names of available authorization plugins.", - example=["img-authz-plugin", "hbm"], - ) - Log: Optional[list[str]] = Field( - None, - description="Names of available logging-drivers, and logging-driver plugins.", - example=[ - "awslogs", - "fluentd", - "gcplogs", - "gelf", - "journald", - "json-file", - "logentries", - "splunk", - "syslog", - ], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + volume: Annotated[ + list[str] | None, + Field( + alias="Volume", + description="Names of available volume-drivers, and network-driver plugins.", + examples=[["local"]], + ), + ] = None + network: Annotated[ + list[str] | None, + Field( + alias="Network", + description="Names of available network-drivers, and network-driver plugins.", + examples=[["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]], + ), + ] = None + authorization: Annotated[ + list[str] | None, + Field( + alias="Authorization", + description="Names of available authorization plugins.", + examples=[["img-authz-plugin", "hbm"]], + ), + ] = None + log: Annotated[ + list[str] | None, + Field( + alias="Log", + description="Names of available logging-drivers, and logging-driver plugins.", + examples=[ + [ + "awslogs", + "fluentd", + "gcplogs", + "gelf", + "journald", + "json-file", + "splunk", + "syslog", + ] + ], + ), + ] = None class IndexInfo(BaseModel): @@ -2701,35 +4488,52 @@ class IndexInfo(BaseModel): IndexInfo contains information about a registry. """ - Name: Optional[str] = Field( - None, - description='Name of the registry, such as "docker.io".\n', - example="docker.io", - ) - Mirrors: Optional[list[str]] = Field( - None, - description="List of mirrors, expressed as URIs.\n", - example=[ - "https://hub-mirror.corp.example.com:5000/", - "https://registry-2.docker.io/", - "https://registry-3.docker.io/", - ], - ) - Secure: Optional[bool] = Field( - None, - description="Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n", - example=True, - ) - Official: Optional[bool] = Field( - None, - description="Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n", - example=True, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field( + alias="Name", + description='Name of the registry, such as "docker.io".\n', + examples=["docker.io"], + ), + ] = None + mirrors: Annotated[ + list[str] | None, + Field( + alias="Mirrors", + description="List of mirrors, expressed as URIs.\n", + examples=[ + [ + "https://hub-mirror.corp.example.com:5000/", + "https://registry-2.docker.io/", + "https://registry-3.docker.io/", + ] + ], + ), + ] = None + secure: Annotated[ + bool | None, + Field( + alias="Secure", + description="Indicates if the registry is part of the list of insecure\nregistries.\n\nIf `false`, the registry is insecure. Insecure registries accept\nun-encrypted (HTTP) and/or untrusted (HTTPS with certificates from\nunknown CAs) communication.\n\n> **Warning**: Insecure registries can be useful when running a local\n> registry. However, because its use creates security vulnerabilities\n> it should ONLY be enabled for testing purposes. For increased\n> security, users should add their CA to their system's list of\n> trusted CAs instead of enabling this option.\n", + examples=[True], + ), + ] = None + official: Annotated[ + bool | None, + Field( + alias="Official", + description="Indicates whether this is an official registry (i.e., Docker Hub / docker.io)\n", + examples=[True], + ), + ] = None class Runtime(BaseModel): """ - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI @@ -2738,36 +4542,53 @@ class Runtime(BaseModel): """ - path: Optional[str] = Field( - None, - description="Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n", - example="/usr/local/bin/my-oci-runtime", - ) - runtimeArgs: Optional[list[str]] = Field( - None, - description="List of command-line arguments to pass to the runtime when invoked.\n", - example=["--debug", "--systemd-cgroup=false"], + model_config = ConfigDict( + populate_by_name=True, ) + path: Annotated[ + str | None, + Field( + description="Name and, optional, path, of the OCI executable binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n", + examples=["/usr/local/bin/my-oci-runtime"], + ), + ] = None + runtime_args: Annotated[ + list[str] | None, + Field( + alias="runtimeArgs", + description="List of command-line arguments to pass to the runtime when invoked.\n", + examples=[["--debug", "--systemd-cgroup=false"]], + ), + ] = None class Commit(BaseModel): """ - Commit holds the Git-commit (SHA1) that a binary was built from, as + Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. """ - ID: Optional[str] = Field( - None, - description="Actual commit ID of external tool.", - example="cfb82a876ecc11b5ca0977d1733adbe58599088a", - ) - Expected: Optional[str] = Field( - None, - description="Commit ID of external tool expected by dockerd as set at build time.\n", - example="2d41c047c83e09a6d61d464906feb2a2f3c52aa4", + model_config = ConfigDict( + populate_by_name=True, ) + id: Annotated[ + str | None, + Field( + alias="ID", + description="Actual commit ID of external tool.", + examples=["cfb82a876ecc11b5ca0977d1733adbe58599088a"], + ), + ] = None + expected: Annotated[ + str | None, + Field( + alias="Expected", + description="Commit ID of external tool expected by dockerd as set at build time.\n", + examples=["2d41c047c83e09a6d61d464906feb2a2f3c52aa4"], + ), + ] = None class LocalNodeState(str, Enum): @@ -2775,7 +4596,7 @@ class LocalNodeState(str, Enum): Current local status of this node. """ - _ = "" + field_ = "" inactive = "inactive" pending = "pending" active = "active" @@ -2788,12 +4609,23 @@ class PeerNode(BaseModel): Represents a peer-node in the swarm """ - NodeID: Optional[str] = Field( - None, description="Unique identifier of for this node in the swarm." - ) - Addr: Optional[str] = Field( - None, description="IP address and ports at which this node can be reached.\n" + model_config = ConfigDict( + populate_by_name=True, ) + node_id: Annotated[ + str | None, + Field( + alias="NodeID", + description="Unique identifier of for this node in the swarm.", + ), + ] = None + addr: Annotated[ + str | None, + Field( + alias="Addr", + description="IP address and ports at which this node can be reached.\n", + ), + ] = None class NetworkAttachmentConfig(BaseModel): @@ -2802,40 +4634,66 @@ class NetworkAttachmentConfig(BaseModel): """ - Target: Optional[str] = Field( - None, - description="The target network for attachment. Must be a network name or ID.\n", - ) - Aliases: Optional[list[str]] = Field( - None, - description="Discoverable alternate names for the service on this network.\n", - ) - DriverOpts: Optional[dict[str, str]] = Field( - None, description="Driver attachment options for the network target.\n" - ) + model_config = ConfigDict( + populate_by_name=True, + ) + target: Annotated[ + str | None, + Field( + alias="Target", + description="The target network for attachment. Must be a network name or ID.\n", + ), + ] = None + aliases: Annotated[ + list[str] | None, + Field( + alias="Aliases", + description="Discoverable alternate names for the service on this network.\n", + ), + ] = None + driver_opts: Annotated[ + dict[str, str] | None, + Field( + alias="DriverOpts", + description="Driver attachment options for the network target.\n", + ), + ] = None class EventActor(BaseModel): """ - Actor describes something that generates events, like a container, network, + Actor describes something that generates events, like a container, network, or a volume. """ - ID: Optional[str] = Field( - None, - description="The ID of the object emitting the event", - example="ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", - ) - Attributes: Optional[dict[str, str]] = Field( - None, - description="Various key/value attributes of the object, depending on its type.\n", - example={ - "com.example.some-label": "some-label-value", - "image": "alpine:latest", - "name": "my-container", - }, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="ID", + description="The ID of the object emitting the event", + examples=[ + "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + ], + ), + ] = None + attributes: Annotated[ + dict[str, str] | None, + Field( + alias="Attributes", + description="Various key/value attributes of the object, depending on its type.\n", + examples=[ + { + "com.example.some-label": "some-label-value", + "image": "alpine:latest", + "name": "my-container", + } + ], + ), + ] = None class Type5(str, Enum): @@ -2858,7 +4716,7 @@ class Type5(str, Enum): class Scope1(str, Enum): """ - Scope of the event. Engine events are `local` scope. Cluster (Swarm) + Scope of the event. Engine events are `local` scope. Cluster (Swarm) events are `swarm` scope. """ @@ -2873,108 +4731,162 @@ class SystemEventsResponse(BaseModel): """ - Type: Optional[Type5] = Field( - None, description="The type of object emitting the event", example="container" - ) - Action: Optional[str] = Field( - None, description="The type of event", example="create" - ) - Actor: Optional[EventActor] = None - scope: Optional[Scope1] = Field( - None, - description="Scope of the event. Engine events are `local` scope. Cluster (Swarm)\nevents are `swarm` scope.\n", - ) - time: Optional[int] = Field( - None, description="Timestamp of event", example=1629574695 - ) - timeNano: Optional[int] = Field( - None, - description="Timestamp of event, with nanosecond accuracy", - example=1629574695515050031, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Type5 | None, + Field( + alias="Type", + description="The type of object emitting the event", + examples=["container"], + ), + ] = None + action: Annotated[ + str | None, + Field(alias="Action", description="The type of event", examples=["create"]), + ] = None + actor: Annotated[EventActor | None, Field(alias="Actor")] = None + scope: Annotated[ + Scope1 | None, + Field( + description="Scope of the event. Engine events are `local` scope. Cluster (Swarm)\nevents are `swarm` scope.\n" + ), + ] = None + time: Annotated[ + int | None, Field(description="Timestamp of event", examples=[1629574695]) + ] = None + time_nano: Annotated[ + int | None, + Field( + alias="timeNano", + description="Timestamp of event, with nanosecond accuracy", + examples=[1629574695515050031], + ), + ] = None class OCIDescriptor(BaseModel): """ - A descriptor struct containing digest, media type, and size, as defined in + A descriptor struct containing digest, media type, and size, as defined in the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). """ - mediaType: Optional[str] = Field( - None, - description="The media type of the object this schema refers to.\n", - example="application/vnd.docker.distribution.manifest.v2+json", - ) - digest: Optional[str] = Field( - None, - description="The digest of the targeted content.\n", - example="sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", - ) - size: Optional[int] = Field( - None, description="The size in bytes of the blob.\n", example=3987495 - ) + model_config = ConfigDict( + populate_by_name=True, + ) + media_type: Annotated[ + str | None, + Field( + alias="mediaType", + description="The media type of the object this schema refers to.\n", + examples=["application/vnd.docker.distribution.manifest.v2+json"], + ), + ] = None + digest: Annotated[ + str | None, + Field( + description="The digest of the targeted content.\n", + examples=[ + "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + ], + ), + ] = None + size: Annotated[ + int | None, + Field(description="The size in bytes of the blob.\n", examples=[3987495]), + ] = None class OCIPlatform(BaseModel): """ - Describes the platform which the image in the manifest runs on, as defined + Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). """ - architecture: Optional[str] = Field( - None, - description="The CPU architecture, for example `amd64` or `ppc64`.\n", - example="arm", - ) - os: Optional[str] = Field( - None, - description="The operating system, for example `linux` or `windows`.\n", - example="windows", - ) - os_version: Optional[str] = Field( - None, - alias="os.version", - description="Optional field specifying the operating system version, for example on\nWindows `10.0.19041.1165`.\n", - example="10.0.19041.1165", - ) - os_features: Optional[list[str]] = Field( - None, - alias="os.features", - description="Optional field specifying an array of strings, each listing a required\nOS feature (for example on Windows `win32k`).\n", - example=["win32k"], - ) - variant: Optional[str] = Field( - None, - description="Optional field specifying a variant of the CPU, for example `v7` to\nspecify ARMv7 when architecture is `arm`.\n", - example="v7", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + architecture: Annotated[ + str | None, + Field( + description="The CPU architecture, for example `amd64` or `ppc64`.\n", + examples=["arm"], + ), + ] = None + os: Annotated[ + str | None, + Field( + description="The operating system, for example `linux` or `windows`.\n", + examples=["windows"], + ), + ] = None + os_version: Annotated[ + str | None, + Field( + alias="os.version", + description="Optional field specifying the operating system version, for example on\nWindows `10.0.19041.1165`.\n", + examples=["10.0.19041.1165"], + ), + ] = None + os_features: Annotated[ + list[str] | None, + Field( + alias="os.features", + description="Optional field specifying an array of strings, each listing a required\nOS feature (for example on Windows `win32k`).\n", + examples=[["win32k"]], + ), + ] = None + variant: Annotated[ + str | None, + Field( + description="Optional field specifying a variant of the CPU, for example `v7` to\nspecify ARMv7 when architecture is `arm`.\n", + examples=["v7"], + ), + ] = None class DistributionInspectResponse(BaseModel): """ - Describes the result obtained from contacting the registry to retrieve + Describes the result obtained from contacting the registry to retrieve image metadata. """ - Descriptor: OCIDescriptor - Platforms: list[OCIPlatform] = Field( - ..., description="An array containing all platforms supported by the image.\n" + model_config = ConfigDict( + populate_by_name=True, ) + descriptor: Annotated[OCIDescriptor, Field(alias="Descriptor")] + platforms: Annotated[ + list[OCIPlatform], + Field( + alias="Platforms", + description="An array containing all platforms supported by the image.\n", + ), + ] class ResourceObject(BaseModel): """ - An object describing the resources which can be advertised by a node and + An object describing the resources which can be advertised by a node and requested by a task. """ - NanoCPUs: Optional[int] = Field(None, example=4000000000) - MemoryBytes: Optional[int] = Field(None, example=8272408576) - GenericResources: Optional[GenericResources] = None + model_config = ConfigDict( + populate_by_name=True, + ) + nano_cp_us: Annotated[ + int | None, Field(alias="NanoCPUs", examples=[4000000000]) + ] = None + memory_bytes: Annotated[ + int | None, Field(alias="MemoryBytes", examples=[8272408576]) + ] = None + generic_resources: Annotated[ + GenericResources | None, Field(alias="GenericResources") + ] = None class Health(BaseModel): @@ -2983,199 +4895,120 @@ class Health(BaseModel): """ - Status: Optional[Status] = Field( - None, - description='Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- "none" Indicates there is no healthcheck\n- "starting" Starting indicates that the container is not yet ready\n- "healthy" Healthy indicates that the container is running correctly\n- "unhealthy" Unhealthy indicates that the container has a problem\n', - example="healthy", - ) - FailingStreak: Optional[int] = Field( - None, - description="FailingStreak is the number of consecutive failures", - example=0, - ) - Log: Optional[list[HealthcheckResult]] = Field( - None, description="Log contains the last few results (oldest first)\n" - ) + model_config = ConfigDict( + populate_by_name=True, + ) + status: Annotated[ + Status | None, + Field( + alias="Status", + description='Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- "none" Indicates there is no healthcheck\n- "starting" Starting indicates that the container is not yet ready\n- "healthy" Healthy indicates that the container is running correctly\n- "unhealthy" Unhealthy indicates that the container has a problem\n', + examples=["healthy"], + ), + ] = None + failing_streak: Annotated[ + int | None, + Field( + alias="FailingStreak", + description="FailingStreak is the number of consecutive failures", + examples=[0], + ), + ] = None + log: Annotated[ + list[HealthcheckResult] | None, + Field( + alias="Log", + description="Log contains the last few results (oldest first)\n", + ), + ] = None + + +class PortMap(RootModel[dict[str, list[PortBinding]] | None]): + """ + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. -class HostConfig(Resources): - """ - Container configuration that depends on the host we are running on """ - Binds: Optional[list[str]] = Field( - None, - description="A list of volume bindings for this container. Each volume binding\nis a string in one of these forms:\n\n- `host-src:container-dest[:options]` to bind-mount a host path\n into the container. Both `host-src`, and `container-dest` must\n be an _absolute_ path.\n- `volume-name:container-dest[:options]` to bind-mount a volume\n managed by a volume driver into the container. `container-dest`\n must be an _absolute_ path.\n\n`options` is an optional, comma-delimited list of:\n\n- `nocopy` disables automatic copying of data from the container\n path to the volume. The `nocopy` flag only applies to named volumes.\n- `[ro|rw]` mounts a volume read-only or read-write, respectively.\n If omitted or set to `rw`, volumes are mounted read-write.\n- `[z|Z]` applies SELinux labels to allow or deny multiple containers\n to read and write to the same volume.\n - `z`: a _shared_ content label is applied to the content. This\n label indicates that multiple containers can share the volume\n content, for both reading and writing.\n - `Z`: a _private unshared_ label is applied to the content.\n This label indicates that only the current container can use\n a private volume. Labeling systems such as SELinux require\n proper labels to be placed on volume content that is mounted\n into a container. Without a label, the security system can\n prevent a container's processes from using the content. By\n default, the labels set by the host operating system are not\n modified.\n- `[[r]shared|[r]slave|[r]private]` specifies mount\n [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).\n This only applies to bind-mounted volumes, not internal volumes\n or named volumes. Mount propagation requires the source mount\n point (the location where the source directory is mounted in the\n host operating system) to have the correct propagation properties.\n For shared volumes, the source mount point must be set to `shared`.\n For slave volumes, the mount must be set to either `shared` or\n `slave`.\n", - ) - ContainerIDFile: Optional[str] = Field( - None, description="Path to a file where the container ID is written" - ) - LogConfig: Optional[LogConfig] = Field( - None, description="The logging configuration for this container" - ) - NetworkMode: Optional[str] = Field( - None, - description="Network mode to use for this container. Supported standard values\nare: `bridge`, `host`, `none`, and `container:`. Any\nother value is taken as a custom network's name to which this\ncontainer should connect to.\n", - ) - PortBindings: Optional[PortMap] = None - RestartPolicy: Optional[RestartPolicy] = None - AutoRemove: Optional[bool] = Field( - None, - description="Automatically remove the container when the container's process\nexits. This has no effect if `RestartPolicy` is set.\n", - ) - VolumeDriver: Optional[str] = Field( - None, description="Driver that this container uses to mount volumes." - ) - VolumesFrom: Optional[list[str]] = Field( - None, - description="A list of volumes to inherit from another container, specified in\nthe form `[:]`.\n", - ) - Mounts: Optional[list[Mount]] = Field( - None, description="Specification for mounts to be added to the container.\n" - ) - CapAdd: Optional[list[str]] = Field( - None, - description="A list of kernel capabilities to add to the container. Conflicts\nwith option 'Capabilities'.\n", - ) - CapDrop: Optional[list[str]] = Field( - None, - description="A list of kernel capabilities to drop from the container. Conflicts\nwith option 'Capabilities'.\n", - ) - CgroupnsMode: Optional[CgroupnsMode] = Field( - None, - description='cgroup namespace mode for the container. Possible values are:\n\n- `"private"`: the container runs in its own private cgroup namespace\n- `"host"`: use the host system\'s cgroup namespace\n\nIf not specified, the daemon default is used, which can either be `"private"`\nor `"host"`, depending on daemon version, kernel support and configuration.\n', - ) - Dns: Optional[list[str]] = Field( - None, description="A list of DNS servers for the container to use." - ) - DnsOptions: Optional[list[str]] = Field(None, description="A list of DNS options.") - DnsSearch: Optional[list[str]] = Field( - None, description="A list of DNS search domains." - ) - ExtraHosts: Optional[list[str]] = Field( - None, - description='A list of hostnames/IP mappings to add to the container\'s `/etc/hosts`\nfile. Specified in the form `["hostname:IP"]`.\n', - ) - GroupAdd: Optional[list[str]] = Field( - None, - description="A list of additional groups that the container process will run as.\n", - ) - IpcMode: Optional[str] = Field( - None, - description='IPC sharing mode for the container. Possible values are:\n\n- `"none"`: own private IPC namespace, with /dev/shm not mounted\n- `"private"`: own private IPC namespace\n- `"shareable"`: own private IPC namespace, with a possibility to share it with other containers\n- `"container:"`: join another (shareable) container\'s IPC namespace\n- `"host"`: use the host system\'s IPC namespace\n\nIf not specified, daemon default is used, which can either be `"private"`\nor `"shareable"`, depending on daemon version and configuration.\n', - ) - Cgroup: Optional[str] = Field(None, description="Cgroup to use for the container.") - Links: Optional[list[str]] = Field( - None, - description="A list of links for the container in the form `container_name:alias`.\n", - ) - OomScoreAdj: Optional[int] = Field( - None, - description="An integer value containing the score given to the container in\norder to tune OOM killer preferences.\n", - example=500, - ) - PidMode: Optional[str] = Field( - None, - description='Set the PID (Process) Namespace mode for the container. It can be\neither:\n\n- `"container:"`: joins another container\'s PID namespace\n- `"host"`: use the host\'s PID namespace inside the container\n', - ) - Privileged: Optional[bool] = Field( - None, description="Gives the container full access to the host." - ) - PublishAllPorts: Optional[bool] = Field( - None, - description="Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when\nthe container starts. The allocated port might be changed when\nrestarting the container.\n\nThe port is selected from the ephemeral port range that depends on\nthe kernel. For example, on Linux the range is defined by\n`/proc/sys/net/ipv4/ip_local_port_range`.\n", - ) - ReadonlyRootfs: Optional[bool] = Field( - None, description="Mount the container's root filesystem as read only." - ) - SecurityOpt: Optional[list[str]] = Field( - None, - description="A list of string values to customize labels for MLS systems, such\nas SELinux.\n", - ) - StorageOpt: Optional[dict[str, str]] = Field( - None, - description='Storage driver options for this container, in the form `{"size": "120G"}`.\n', - ) - Tmpfs: Optional[dict[str, str]] = Field( - None, - description='A map of container directories which should be replaced by tmpfs\nmounts, and their corresponding mount options. For example:\n\n```\n{ "/run": "rw,noexec,nosuid,size=65536k" }\n```\n', - ) - UTSMode: Optional[str] = Field( - None, description="UTS namespace to use for the container." - ) - UsernsMode: Optional[str] = Field( - None, - description="Sets the usernamespace mode for the container when usernamespace\nremapping option is enabled.\n", - ) - ShmSize: Optional[int] = Field( - None, - description="Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.\n", - ge=0, - ) - Sysctls: Optional[dict[str, str]] = Field( - None, - description='A list of kernel parameters (sysctls) to set in the container.\nFor example:\n\n```\n{"net.ipv4.ip_forward": "1"}\n```\n', - ) - Runtime: Optional[str] = Field( - None, description="Runtime to use with this container." - ) - ConsoleSize: Optional[list[ConsoleSizeItem]] = Field( - None, - description="Initial console size, as an `[height, width]` array. (Windows only)\n", - max_items=2, - min_items=2, - ) - Isolation: Optional[Isolation] = Field( - None, description="Isolation technology of the container. (Windows only)\n" - ) - MaskedPaths: Optional[list[str]] = Field( - None, - description="The list of paths to be masked inside the container (this overrides\nthe default set of paths).\n", - ) - ReadonlyPaths: Optional[list[str]] = Field( - None, - description="The list of paths to be set as read-only inside the container\n(this overrides the default set of paths).\n", + model_config = ConfigDict( + populate_by_name=True, ) + root: dict[str, list[PortBinding]] | None = None class IPAM(BaseModel): - Driver: Optional[str] = Field( - "default", description="Name of the IPAM driver to use." - ) - Config_: Optional[list[IPAMConfig]] = Field( - None, - alias="Config", - description='List of IPAM configuration options, specified as a map:\n\n```\n{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }\n```\n', - ) - Options: Optional[dict[str, str]] = Field( - None, description="Driver-specific options, specified as a map." - ) + model_config = ConfigDict( + populate_by_name=True, + ) + driver: Annotated[ + str | None, + Field( + alias="Driver", + description="Name of the IPAM driver to use.", + examples=["default"], + ), + ] = "default" + config: Annotated[ + list[IPAMConfig] | None, + Field( + alias="Config", + description='List of IPAM configuration options, specified as a map:\n\n```\n{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }\n```\n', + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="Driver-specific options, specified as a map.", + examples=[{"foo": "bar"}], + ), + ] = None class BuildInfo(BaseModel): - id: Optional[str] = None - stream: Optional[str] = None - error: Optional[str] = None - errorDetail: Optional[ErrorDetail] = None - status: Optional[str] = None - progress: Optional[str] = None - progressDetail: Optional[ProgressDetail] = None - aux: Optional[ImageID] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: str | None = None + stream: str | None = None + error: str | None = None + error_detail: Annotated[ErrorDetail | None, Field(alias="errorDetail")] = None + status: str | None = None + progress: str | None = None + progress_detail: Annotated[ + ProgressDetail | None, Field(alias="progressDetail") + ] = None + aux: ImageID | None = None class CreateImageInfo(BaseModel): - id: Optional[str] = None - error: Optional[str] = None - status: Optional[str] = None - progress: Optional[str] = None - progressDetail: Optional[ProgressDetail] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: str | None = None + error: str | None = None + error_detail: Annotated[ErrorDetail | None, Field(alias="errorDetail")] = None + status: str | None = None + progress: str | None = None + progress_detail: Annotated[ + ProgressDetail | None, Field(alias="progressDetail") + ] = None class PushImageInfo(BaseModel): - error: Optional[str] = None - status: Optional[str] = None - progress: Optional[str] = None - progressDetail: Optional[ProgressDetail] = None + model_config = ConfigDict( + populate_by_name=True, + ) + error: str | None = None + status: str | None = None + progress: str | None = None + progress_detail: Annotated[ + ProgressDetail | None, Field(alias="progressDetail") + ] = None class EndpointSettings(BaseModel): @@ -3183,110 +5016,187 @@ class EndpointSettings(BaseModel): Configuration for a network endpoint. """ - IPAMConfig: Optional[EndpointIPAMConfig] = None - Links: Optional[list[str]] = Field(None, example=["container_1", "container_2"]) - Aliases: Optional[list[str]] = Field(None, example=["server_x", "server_y"]) - NetworkID: Optional[str] = Field( - None, - description="Unique ID of the network.\n", - example="08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a", - ) - EndpointID: Optional[str] = Field( - None, - description="Unique ID for the service endpoint in a Sandbox.\n", - example="b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b", - ) - Gateway: Optional[str] = Field( - None, description="Gateway address for this network.\n", example="172.17.0.1" - ) - IPAddress: Optional[str] = Field( - None, description="IPv4 address.\n", example="172.17.0.4" - ) - IPPrefixLen: Optional[int] = Field( - None, description="Mask length of the IPv4 address.\n", example=16 - ) - IPv6Gateway: Optional[str] = Field( - None, description="IPv6 gateway address.\n", example="2001:db8:2::100" - ) - GlobalIPv6Address: Optional[str] = Field( - None, description="Global IPv6 address.\n", example="2001:db8::5689" - ) - GlobalIPv6PrefixLen: Optional[int] = Field( - None, description="Mask length of the global IPv6 address.\n", example=64 - ) - MacAddress: Optional[str] = Field( - None, - description="MAC address for the endpoint on this network.\n", - example="02:42:ac:11:00:04", - ) - DriverOpts: Optional[dict[str, str]] = Field( - None, - description="DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n", - example={ - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value", - }, - ) + model_config = ConfigDict( + populate_by_name=True, + ) + ipam_config: Annotated[EndpointIPAMConfig | None, Field(alias="IPAMConfig")] = None + links: Annotated[ + list[str] | None, + Field(alias="Links", examples=[["container_1", "container_2"]]), + ] = None + aliases: Annotated[ + list[str] | None, Field(alias="Aliases", examples=[["server_x", "server_y"]]) + ] = None + network_id: Annotated[ + str | None, + Field( + alias="NetworkID", + description="Unique ID of the network.\n", + examples=[ + "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + ], + ), + ] = None + endpoint_id: Annotated[ + str | None, + Field( + alias="EndpointID", + description="Unique ID for the service endpoint in a Sandbox.\n", + examples=[ + "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + ], + ), + ] = None + gateway: Annotated[ + str | None, + Field( + alias="Gateway", + description="Gateway address for this network.\n", + examples=["172.17.0.1"], + ), + ] = None + ip_address: Annotated[ + str | None, + Field( + alias="IPAddress", description="IPv4 address.\n", examples=["172.17.0.4"] + ), + ] = None + ip_prefix_len: Annotated[ + int | None, + Field( + alias="IPPrefixLen", + description="Mask length of the IPv4 address.\n", + examples=[16], + ), + ] = None + i_pv6_gateway: Annotated[ + str | None, + Field( + alias="IPv6Gateway", + description="IPv6 gateway address.\n", + examples=["2001:db8:2::100"], + ), + ] = None + global_i_pv6_address: Annotated[ + str | None, + Field( + alias="GlobalIPv6Address", + description="Global IPv6 address.\n", + examples=["2001:db8::5689"], + ), + ] = None + global_i_pv6_prefix_len: Annotated[ + int | None, + Field( + alias="GlobalIPv6PrefixLen", + description="Mask length of the global IPv6 address.\n", + examples=[64], + ), + ] = None + mac_address: Annotated[ + str | None, + Field( + alias="MacAddress", + description="MAC address for the endpoint on this network.\n", + examples=["02:42:ac:11:00:04"], + ), + ] = None + driver_opts: Annotated[ + dict[str, str] | None, + Field( + alias="DriverOpts", + description="DriverOpts is a mapping of driver options and values. These options\nare passed directly to the driver and are driver specific.\n", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None class NodeDescription(BaseModel): """ - NodeDescription encapsulates the properties of the Node as reported by the + NodeDescription encapsulates the properties of the Node as reported by the agent. """ - Hostname: Optional[str] = Field(None, example="bf3067039e47") - Platform: Optional[Platform] = None - Resources: Optional[ResourceObject] = None - Engine: Optional[EngineDescription] = None - TLSInfo: Optional[TLSInfo] = None + model_config = ConfigDict( + populate_by_name=True, + ) + hostname: Annotated[ + str | None, Field(alias="Hostname", examples=["bf3067039e47"]) + ] = None + platform: Annotated[Platform | None, Field(alias="Platform")] = None + resources: Annotated[ResourceObject | None, Field(alias="Resources")] = None + engine: Annotated[EngineDescription | None, Field(alias="Engine")] = None + tls_info: Annotated[TLSInfo | None, Field(alias="TLSInfo")] = None class NodeStatus(BaseModel): """ - NodeStatus represents the status of a node. + NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. """ - State: Optional[NodeState] = None - Message: Optional[str] = Field(None, example="") - Addr: Optional[str] = Field( - None, description="IP address of the node.", example="172.17.0.2" + model_config = ConfigDict( + populate_by_name=True, ) + state: Annotated[NodeState | None, Field(alias="State")] = None + message: Annotated[str | None, Field(alias="Message", examples=[""])] = None + addr: Annotated[ + str | None, + Field( + alias="Addr", description="IP address of the node.", examples=["172.17.0.2"] + ), + ] = None class ManagerStatus(BaseModel): """ - ManagerStatus represents the status of a manager. + ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. """ - Leader: Optional[bool] = Field(False, example=True) - Reachability: Optional[Reachability] = None - Addr: Optional[str] = Field( - None, - description="The IP address and port at which the manager is reachable.\n", - example="10.0.0.46:2377", + model_config = ConfigDict( + populate_by_name=True, ) + leader: Annotated[bool | None, Field(alias="Leader", examples=[True])] = False + reachability: Annotated[Reachability | None, Field(alias="Reachability")] = None + addr: Annotated[ + str | None, + Field( + alias="Addr", + description="The IP address and port at which the manager is reachable.\n", + examples=["10.0.0.46:2377"], + ), + ] = None class Resources1(BaseModel): """ - Resource requirements which apply to each individual container created + Resource requirements which apply to each individual container created as part of the service. """ - Limits: Optional[Limit] = Field(None, description="Define resources limits.") - Reservations: Optional[ResourceObject] = Field( - None, description="Define resources reservation." + model_config = ConfigDict( + populate_by_name=True, ) + limits: Annotated[ + Limit | None, Field(alias="Limits", description="Define resources limits.") + ] = None + reservations: Annotated[ + ResourceObject | None, + Field(alias="Reservations", description="Define resources reservation."), + ] = None class TaskSpec(BaseModel): @@ -3294,68 +5204,116 @@ class TaskSpec(BaseModel): User modifiable task configuration. """ - PluginSpec: Optional[PluginSpec] = Field( - None, - description="Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", - ) - ContainerSpec: Optional[ContainerSpec] = Field( - None, - description="Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", - ) - NetworkAttachmentSpec: Optional[NetworkAttachmentSpec] = Field( - None, - description="Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", - ) - Resources: Optional[Resources1] = Field( - None, - description="Resource requirements which apply to each individual container created\nas part of the service.\n", - ) - RestartPolicy: Optional[RestartPolicy1] = Field( - None, - description="Specification for the restart policy which applies to containers\ncreated as part of this service.\n", - ) - Placement: Optional[Placement] = None - ForceUpdate: Optional[int] = Field( - None, - description="A counter that triggers an update even if no relevant parameters have\nbeen changed.\n", - ) - Runtime: Optional[str] = Field( - None, - description="Runtime is the type of runtime specified for the task executor.\n", - ) - Networks: Optional[list[NetworkAttachmentConfig]] = Field( - None, description="Specifies which networks the service should attach to." - ) - LogDriver: Optional[LogDriver1] = Field( - None, - description="Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + plugin_spec: Annotated[ + PluginSpec | None, + Field( + alias="PluginSpec", + description="Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", + ), + ] = None + container_spec: Annotated[ + ContainerSpec | None, + Field( + alias="ContainerSpec", + description="Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", + ), + ] = None + network_attachment_spec: Annotated[ + NetworkAttachmentSpec | None, + Field( + alias="NetworkAttachmentSpec", + description="Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n", + ), + ] = None + resources: Annotated[ + Resources1 | None, + Field( + alias="Resources", + description="Resource requirements which apply to each individual container created\nas part of the service.\n", + ), + ] = None + restart_policy: Annotated[ + RestartPolicy1 | None, + Field( + alias="RestartPolicy", + description="Specification for the restart policy which applies to containers\ncreated as part of this service.\n", + ), + ] = None + placement: Annotated[Placement | None, Field(alias="Placement")] = None + force_update: Annotated[ + int | None, + Field( + alias="ForceUpdate", + description="A counter that triggers an update even if no relevant parameters have\nbeen changed.\n", + ), + ] = None + runtime: Annotated[ + str | None, + Field( + alias="Runtime", + description="Runtime is the type of runtime specified for the task executor.\n", + ), + ] = None + networks: Annotated[ + list[NetworkAttachmentConfig] | None, + Field( + alias="Networks", + description="Specifies which networks the service should attach to.", + ), + ] = None + log_driver: Annotated[ + LogDriver1 | None, + Field( + alias="LogDriver", + description="Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n", + ), + ] = None class Task(BaseModel): - ID: Optional[str] = Field(None, description="The ID of the task.") - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = None - UpdatedAt: Optional[str] = None - Name: Optional[str] = Field(None, description="Name of the task.") - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - Spec: Optional[TaskSpec] = None - ServiceID: Optional[str] = Field( - None, description="The ID of the service this task is part of." - ) - Slot: Optional[int] = None - NodeID: Optional[str] = Field( - None, description="The ID of the node that this task is on." - ) - AssignedGenericResources: Optional[GenericResources] = None - Status: Optional[Status1] = None - DesiredState: Optional[TaskState] = None - JobIteration: Optional[ObjectVersion] = Field( - None, - description="If the Service this Task belongs to is a job-mode service, contains\nthe JobIteration of the Service this Task was created for. Absent if\nthe Task was created for a Replicated or Global Service.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, Field(alias="ID", description="The ID of the task.") + ] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[str | None, Field(alias="CreatedAt")] = None + updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None + name: Annotated[ + str | None, Field(alias="Name", description="Name of the task.") + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + spec: Annotated[TaskSpec | None, Field(alias="Spec")] = None + service_id: Annotated[ + str | None, + Field( + alias="ServiceID", description="The ID of the service this task is part of." + ), + ] = None + slot: Annotated[int | None, Field(alias="Slot")] = None + node_id: Annotated[ + str | None, + Field(alias="NodeID", description="The ID of the node that this task is on."), + ] = None + assigned_generic_resources: Annotated[ + GenericResources | None, Field(alias="AssignedGenericResources") + ] = None + status: Annotated[Status1 | None, Field(alias="Status")] = None + desired_state: Annotated[TaskState | None, Field(alias="DesiredState")] = None + job_iteration: Annotated[ + ObjectVersion | None, + Field( + alias="JobIteration", + description="If the Service this Task belongs to is a job-mode service, contains\nthe JobIteration of the Service this Task was created for. Absent if\nthe Task was created for a Replicated or Global Service.\n", + ), + ] = None class ServiceSpec(BaseModel): @@ -3363,42 +5321,72 @@ class ServiceSpec(BaseModel): User modifiable configuration for a service. """ - Name: Optional[str] = Field(None, description="Name of the service.") - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - TaskTemplate: Optional[TaskSpec] = None - Mode: Optional[Mode] = Field(None, description="Scheduling mode for the service.") - UpdateConfig: Optional[UpdateConfig] = Field( - None, description="Specification for the update strategy of the service." - ) - RollbackConfig: Optional[RollbackConfig] = Field( - None, description="Specification for the rollback strategy of the service." - ) - Networks: Optional[list[NetworkAttachmentConfig]] = Field( - None, description="Specifies which networks the service should attach to." - ) - EndpointSpec: Optional[EndpointSpec] = None + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, Field(alias="Name", description="Name of the service.") + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + task_template: Annotated[TaskSpec | None, Field(alias="TaskTemplate")] = None + mode: Annotated[ + Mode | None, Field(alias="Mode", description="Scheduling mode for the service.") + ] = None + update_config: Annotated[ + UpdateConfig | None, + Field( + alias="UpdateConfig", + description="Specification for the update strategy of the service.", + ), + ] = None + rollback_config: Annotated[ + RollbackConfig | None, + Field( + alias="RollbackConfig", + description="Specification for the rollback strategy of the service.", + ), + ] = None + networks: Annotated[ + list[NetworkAttachmentConfig] | None, + Field( + alias="Networks", + description="Specifies which networks the service should attach to.", + ), + ] = None + endpoint_spec: Annotated[EndpointSpec | None, Field(alias="EndpointSpec")] = None class Service(BaseModel): - ID: Optional[str] = None - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = None - UpdatedAt: Optional[str] = None - Spec: Optional[ServiceSpec] = None - Endpoint: Optional[Endpoint] = None - UpdateStatus: Optional[UpdateStatus] = Field( - None, description="The status of a service update." - ) - ServiceStatus: Optional[ServiceStatus] = Field( - None, - description="The status of the service's tasks. Provided only when requested as\npart of a ServiceList operation.\n", - ) - JobStatus: Optional[JobStatus] = Field( - None, - description="The status of the service when it is in one of ReplicatedJob or\nGlobalJob modes. Absent on Replicated and Global mode services. The\nJobIteration is an ObjectVersion, but unlike the Service's version,\ndoes not need to be sent with an update request.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[str | None, Field(alias="ID")] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[str | None, Field(alias="CreatedAt")] = None + updated_at: Annotated[str | None, Field(alias="UpdatedAt")] = None + spec: Annotated[ServiceSpec | None, Field(alias="Spec")] = None + endpoint: Annotated[Endpoint | None, Field(alias="Endpoint")] = None + update_status: Annotated[ + UpdateStatus | None, + Field(alias="UpdateStatus", description="The status of a service update."), + ] = None + service_status: Annotated[ + ServiceStatus | None, + Field( + alias="ServiceStatus", + description="The status of the service's tasks. Provided only when requested as\npart of a ServiceList operation.\n", + ), + ] = None + job_status: Annotated[ + JobStatus | None, + Field( + alias="JobStatus", + description="The status of the service when it is in one of ReplicatedJob or\nGlobalJob modes. Absent on Replicated and Global mode services. The\nJobIteration is an ObjectVersion, but unlike the Service's version,\ndoes not need to be sent with an update request.\n", + ), + ] = None class NetworkSettings1(BaseModel): @@ -3406,98 +5394,177 @@ class NetworkSettings1(BaseModel): A summary of the container's network settings """ - Networks: Optional[dict[str, EndpointSettings]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + networks: Annotated[ + dict[str, EndpointSettings] | None, Field(alias="Networks") + ] = None class ContainerSummary(BaseModel): - Id: Optional[str] = Field(None, description="The ID of this container") - Names: Optional[list[str]] = Field( - None, description="The names that this container has been given" - ) - Image: Optional[str] = Field( - None, description="The name of the image used when creating this container" - ) - ImageID: Optional[str] = Field( - None, description="The ID of the image that this container was created from" - ) - Command: Optional[str] = Field( - None, description="Command to run when starting the container" - ) - Created: Optional[int] = Field(None, description="When the container was created") - Ports: Optional[list[Port]] = Field( - None, description="The ports exposed by this container" - ) - SizeRw: Optional[int] = Field( - None, - description="The size of files that have been created or changed by this container", - ) - SizeRootFs: Optional[int] = Field( - None, description="The total size of all the files in this container" - ) - Labels: Optional[dict[str, str]] = Field( - None, description="User-defined key/value metadata." - ) - State: Optional[str] = Field( - None, description="The state of this container (e.g. `Exited`)" - ) - Status: Optional[str] = Field( - None, - description="Additional human-readable status of this container (e.g. `Exit 0`)", - ) - HostConfig: Optional[HostConfig1] = None - NetworkSettings: Optional[NetworkSettings1] = Field( - None, description="A summary of the container's network settings" - ) - Mounts: Optional[list[MountPoint]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, Field(alias="Id", description="The ID of this container") + ] = None + names: Annotated[ + list[str] | None, + Field( + alias="Names", description="The names that this container has been given" + ), + ] = None + image: Annotated[ + str | None, + Field( + alias="Image", + description="The name of the image used when creating this container", + ), + ] = None + image_id: Annotated[ + str | None, + Field( + alias="ImageID", + description="The ID of the image that this container was created from", + ), + ] = None + command: Annotated[ + str | None, + Field( + alias="Command", description="Command to run when starting the container" + ), + ] = None + created: Annotated[ + int | None, Field(alias="Created", description="When the container was created") + ] = None + ports: Annotated[ + list[Port] | None, + Field(alias="Ports", description="The ports exposed by this container"), + ] = None + size_rw: Annotated[ + int | None, + Field( + alias="SizeRw", + description="The size of files that have been created or changed by this container", + ), + ] = None + size_root_fs: Annotated[ + int | None, + Field( + alias="SizeRootFs", + description="The total size of all the files in this container", + ), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field(alias="Labels", description="User-defined key/value metadata."), + ] = None + state: Annotated[ + str | None, + Field(alias="State", description="The state of this container (e.g. `Exited`)"), + ] = None + status: Annotated[ + str | None, + Field( + alias="Status", + description="Additional human-readable status of this container (e.g. `Exit 0`)", + ), + ] = None + host_config: Annotated[HostConfig1 | None, Field(alias="HostConfig")] = None + network_settings: Annotated[ + NetworkSettings1 | None, + Field( + alias="NetworkSettings", + description="A summary of the container's network settings", + ), + ] = None + mounts: Annotated[list[MountPoint] | None, Field(alias="Mounts")] = None class ContainerState(BaseModel): """ - ContainerState stores container's running state. It's part of ContainerJSONBase + ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. """ - Status: Optional[Status2] = Field( - None, - description='String representation of the container state. Can be one of "created",\n"running", "paused", "restarting", "removing", "exited", or "dead".\n', - example="running", - ) - Running: Optional[bool] = Field( - None, - description='Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container\'s state is "running".\n', - example=True, - ) - Paused: Optional[bool] = Field( - None, description="Whether this container is paused.", example=False - ) - Restarting: Optional[bool] = Field( - None, description="Whether this container is restarting.", example=False - ) - OOMKilled: Optional[bool] = Field( - None, - description="Whether this container has been killed because it ran out of memory.\n", - example=False, - ) - Dead: Optional[bool] = Field(None, example=False) - Pid: Optional[int] = Field( - None, description="The process ID of this container", example=1234 - ) - ExitCode: Optional[int] = Field( - None, description="The last exit code of this container", example=0 - ) - Error: Optional[str] = None - StartedAt: Optional[str] = Field( - None, - description="The time when this container was last started.", - example="2020-01-06T09:06:59.461876391Z", - ) - FinishedAt: Optional[str] = Field( - None, - description="The time when this container last exited.", - example="2020-01-06T09:07:59.461876391Z", - ) - Health: Optional[Health] = None + model_config = ConfigDict( + populate_by_name=True, + ) + status: Annotated[ + Status2 | None, + Field( + alias="Status", + description='String representation of the container state. Can be one of "created",\n"running", "paused", "restarting", "removing", "exited", or "dead".\n', + examples=["running"], + ), + ] = None + running: Annotated[ + bool | None, + Field( + alias="Running", + description='Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container\'s state is "running".\n', + examples=[True], + ), + ] = None + paused: Annotated[ + bool | None, + Field( + alias="Paused", + description="Whether this container is paused.", + examples=[False], + ), + ] = None + restarting: Annotated[ + bool | None, + Field( + alias="Restarting", + description="Whether this container is restarting.", + examples=[False], + ), + ] = None + oom_killed: Annotated[ + bool | None, + Field( + alias="OOMKilled", + description="Whether this container has been killed because it ran out of memory.\n", + examples=[False], + ), + ] = None + dead: Annotated[bool | None, Field(alias="Dead", examples=[False])] = None + pid: Annotated[ + int | None, + Field( + alias="Pid", description="The process ID of this container", examples=[1234] + ), + ] = None + exit_code: Annotated[ + int | None, + Field( + alias="ExitCode", + description="The last exit code of this container", + examples=[0], + ), + ] = None + error: Annotated[str | None, Field(alias="Error")] = None + started_at: Annotated[ + str | None, + Field( + alias="StartedAt", + description="The time when this container was last started.", + examples=["2020-01-06T09:06:59.461876391Z"], + ), + ] = None + finished_at: Annotated[ + str | None, + Field( + alias="FinishedAt", + description="The time when this container last exited.", + examples=["2020-01-06T09:07:59.461876391Z"], + ), + ] = None + health: Annotated[Health | None, Field(alias="Health")] = None class ContainerWaitResponse(BaseModel): @@ -3505,8 +5572,13 @@ class ContainerWaitResponse(BaseModel): OK response to ContainerWait operation """ - StatusCode: int = Field(..., description="Exit code of the container") - Error: Optional[ContainerWaitExitError] = None + model_config = ConfigDict( + populate_by_name=True, + ) + status_code: Annotated[ + int, Field(alias="StatusCode", description="Exit code of the container") + ] + error: Annotated[ContainerWaitExitError | None, Field(alias="Error")] = None class RegistryServiceConfig(BaseModel): @@ -3515,61 +5587,85 @@ class RegistryServiceConfig(BaseModel): """ - AllowNondistributableArtifactsCIDRs: Optional[list[str]] = Field( - None, - description="List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n", - example=["::1/128", "127.0.0.0/8"], - ) - AllowNondistributableArtifactsHostnames: Optional[list[str]] = Field( - None, - description="List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n", - example=[ - "registry.internal.corp.example.com:3000", - "[2001:db8:a0b:12f0::1]:443", - ], - ) - InsecureRegistryCIDRs: Optional[list[str]] = Field( - None, - description="List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n", - example=["::1/128", "127.0.0.0/8"], - ) - IndexConfigs: Optional[dict[str, IndexInfo]] = Field( - None, - example={ - "127.0.0.1:5000": { - "Name": "127.0.0.1:5000", - "Mirrors": [], - "Secure": False, - "Official": False, - }, - "[2001:db8:a0b:12f0::1]:80": { - "Name": "[2001:db8:a0b:12f0::1]:80", - "Mirrors": [], - "Secure": False, - "Official": False, - }, - "docker.io": { - "Name": "docker.io", - "Mirrors": ["https://hub-mirror.corp.example.com:5000/"], - "Secure": True, - "Official": True, - }, - "registry.internal.corp.example.com:3000": { - "Name": "registry.internal.corp.example.com:3000", - "Mirrors": [], - "Secure": False, - "Official": False, - }, - }, - ) - Mirrors: Optional[list[str]] = Field( - None, - description="List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n", - example=[ - "https://hub-mirror.corp.example.com:5000/", - "https://[2001:db8:a0b:12f0::1]/", - ], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + allow_nondistributable_artifacts_cid_rs: Annotated[ + list[str] | None, + Field( + alias="AllowNondistributableArtifactsCIDRs", + description="List of IP ranges to which nondistributable artifacts can be pushed,\nusing the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior, and enables the daemon to\npush nondistributable artifacts to all registries whose resolved IP\naddress is within the subnet described by the CIDR syntax.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n", + examples=[["::1/128", "127.0.0.0/8"]], + ), + ] = None + allow_nondistributable_artifacts_hostnames: Annotated[ + list[str] | None, + Field( + alias="AllowNondistributableArtifactsHostnames", + description="List of registry hostnames to which nondistributable artifacts can be\npushed, using the format `[:]` or `[:]`.\n\nSome images (for example, Windows base images) contain artifacts\nwhose distribution is restricted by license. When these images are\npushed to a registry, restricted artifacts are not included.\n\nThis configuration override this behavior for the specified\nregistries.\n\nThis option is useful when pushing images containing\nnondistributable artifacts to a registry on an air-gapped network so\nhosts on that network can pull the images without connecting to\nanother server.\n\n> **Warning**: Nondistributable artifacts typically have restrictions\n> on how and where they can be distributed and shared. Only use this\n> feature to push artifacts to private registries and ensure that you\n> are in compliance with any terms that cover redistributing\n> nondistributable artifacts.\n", + examples=[ + [ + "registry.internal.corp.example.com:3000", + "[2001:db8:a0b:12f0::1]:443", + ] + ], + ), + ] = None + insecure_registry_cid_rs: Annotated[ + list[str] | None, + Field( + alias="InsecureRegistryCIDRs", + description="List of IP ranges of insecure registries, using the CIDR syntax\n([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries\naccept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates\nfrom unknown CAs) communication.\n\nBy default, local registries (`127.0.0.0/8`) are configured as\ninsecure. All other registries are secure. Communicating with an\ninsecure registry is not possible if the daemon assumes that registry\nis secure.\n\nThis configuration override this behavior, insecure communication with\nregistries whose resolved IP address is within the subnet described by\nthe CIDR syntax.\n\nRegistries can also be marked insecure by hostname. Those registries\nare listed under `IndexConfigs` and have their `Secure` field set to\n`false`.\n\n> **Warning**: Using this option can be useful when running a local\n> registry, but introduces security vulnerabilities. This option\n> should therefore ONLY be used for testing purposes. For increased\n> security, users should add their CA to their system's list of trusted\n> CAs instead of enabling this option.\n", + examples=[["::1/128", "127.0.0.0/8"]], + ), + ] = None + index_configs: Annotated[ + dict[str, IndexInfo] | None, + Field( + alias="IndexConfigs", + examples=[ + { + "127.0.0.1:5000": { + "Name": "127.0.0.1:5000", + "Mirrors": [], + "Secure": False, + "Official": False, + }, + "[2001:db8:a0b:12f0::1]:80": { + "Name": "[2001:db8:a0b:12f0::1]:80", + "Mirrors": [], + "Secure": False, + "Official": False, + }, + "docker.io": { + "Name": "docker.io", + "Mirrors": ["https://hub-mirror.corp.example.com:5000/"], + "Secure": True, + "Official": True, + }, + "registry.internal.corp.example.com:3000": { + "Name": "registry.internal.corp.example.com:3000", + "Mirrors": [], + "Secure": False, + "Official": False, + }, + } + ], + ), + ] = None + mirrors: Annotated[ + list[str] | None, + Field( + alias="Mirrors", + description="List of registry URLs that act as a mirror for the official\n(`docker.io`) registry.\n", + examples=[ + [ + "https://hub-mirror.corp.example.com:5000/", + "https://[2001:db8:a0b:12f0::1]/", + ] + ], + ), + ] = None class SwarmInfo(BaseModel): @@ -3578,50 +5674,336 @@ class SwarmInfo(BaseModel): """ - NodeID: Optional[str] = Field( - "", - description="Unique identifier of for this node in the swarm.", - example="k67qz4598weg5unwwffg6z1m1", - ) - NodeAddr: Optional[str] = Field( - "", - description="IP address at which this node can be reached by other nodes in the\nswarm.\n", - example="10.0.0.46", - ) - LocalNodeState: Optional[LocalNodeState] = "" - ControlAvailable: Optional[bool] = Field(False, example=True) - Error: Optional[str] = "" - RemoteManagers: Optional[list[PeerNode]] = Field( - None, - description="List of ID's and addresses of other managers in the swarm.\n", - example=[ - {"NodeID": "71izy0goik036k48jg985xnds", "Addr": "10.0.0.158:2377"}, - {"NodeID": "79y6h1o4gv8n120drcprv5nmc", "Addr": "10.0.0.159:2377"}, - {"NodeID": "k67qz4598weg5unwwffg6z1m1", "Addr": "10.0.0.46:2377"}, - ], - ) - Nodes: Optional[int] = Field( - None, description="Total number of nodes in the swarm.", example=4 - ) - Managers: Optional[int] = Field( - None, description="Total number of managers in the swarm.", example=3 - ) - Cluster: Optional[ClusterInfo] = None + model_config = ConfigDict( + populate_by_name=True, + ) + node_id: Annotated[ + str | None, + Field( + alias="NodeID", + description="Unique identifier of for this node in the swarm.", + examples=["k67qz4598weg5unwwffg6z1m1"], + ), + ] = "" + node_addr: Annotated[ + str | None, + Field( + alias="NodeAddr", + description="IP address at which this node can be reached by other nodes in the\nswarm.\n", + examples=["10.0.0.46"], + ), + ] = "" + local_node_state: Annotated[ + LocalNodeState | None, Field(alias="LocalNodeState") + ] = "" # type: ignore[assignment] + control_available: Annotated[ + bool | None, Field(alias="ControlAvailable", examples=[True]) + ] = False + error: Annotated[str | None, Field(alias="Error")] = "" + remote_managers: Annotated[ + list[PeerNode] | None, + Field( + alias="RemoteManagers", + description="List of ID's and addresses of other managers in the swarm.\n", + examples=[ + [ + {"NodeID": "71izy0goik036k48jg985xnds", "Addr": "10.0.0.158:2377"}, + {"NodeID": "79y6h1o4gv8n120drcprv5nmc", "Addr": "10.0.0.159:2377"}, + {"NodeID": "k67qz4598weg5unwwffg6z1m1", "Addr": "10.0.0.46:2377"}, + ] + ], + ), + ] = None + nodes: Annotated[ + int | None, + Field( + alias="Nodes", + description="Total number of nodes in the swarm.", + examples=[4], + ), + ] = None + managers: Annotated[ + int | None, + Field( + alias="Managers", + description="Total number of managers in the swarm.", + examples=[3], + ), + ] = None + cluster: Annotated[ClusterInfo | None, Field(alias="Cluster")] = None + + +class HostConfig(Resources): + """ + Container configuration that depends on the host we are running on + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + binds: Annotated[ + list[str] | None, + Field( + alias="Binds", + description="A list of volume bindings for this container. Each volume binding\nis a string in one of these forms:\n\n- `host-src:container-dest[:options]` to bind-mount a host path\n into the container. Both `host-src`, and `container-dest` must\n be an _absolute_ path.\n- `volume-name:container-dest[:options]` to bind-mount a volume\n managed by a volume driver into the container. `container-dest`\n must be an _absolute_ path.\n\n`options` is an optional, comma-delimited list of:\n\n- `nocopy` disables automatic copying of data from the container\n path to the volume. The `nocopy` flag only applies to named volumes.\n- `[ro|rw]` mounts a volume read-only or read-write, respectively.\n If omitted or set to `rw`, volumes are mounted read-write.\n- `[z|Z]` applies SELinux labels to allow or deny multiple containers\n to read and write to the same volume.\n - `z`: a _shared_ content label is applied to the content. This\n label indicates that multiple containers can share the volume\n content, for both reading and writing.\n - `Z`: a _private unshared_ label is applied to the content.\n This label indicates that only the current container can use\n a private volume. Labeling systems such as SELinux require\n proper labels to be placed on volume content that is mounted\n into a container. Without a label, the security system can\n prevent a container's processes from using the content. By\n default, the labels set by the host operating system are not\n modified.\n- `[[r]shared|[r]slave|[r]private]` specifies mount\n [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).\n This only applies to bind-mounted volumes, not internal volumes\n or named volumes. Mount propagation requires the source mount\n point (the location where the source directory is mounted in the\n host operating system) to have the correct propagation properties.\n For shared volumes, the source mount point must be set to `shared`.\n For slave volumes, the mount must be set to either `shared` or\n `slave`.\n", + ), + ] = None + container_id_file: Annotated[ + str | None, + Field( + alias="ContainerIDFile", + description="Path to a file where the container ID is written", + ), + ] = None + log_config: Annotated[ + LogConfig | None, + Field( + alias="LogConfig", + description="The logging configuration for this container", + ), + ] = None + network_mode: Annotated[ + str | None, + Field( + alias="NetworkMode", + description="Network mode to use for this container. Supported standard values\nare: `bridge`, `host`, `none`, and `container:`. Any\nother value is taken as a custom network's name to which this\ncontainer should connect to.\n", + ), + ] = None + port_bindings: Annotated[PortMap | None, Field(alias="PortBindings")] = None + restart_policy: Annotated[RestartPolicy | None, Field(alias="RestartPolicy")] = None + auto_remove: Annotated[ + bool | None, + Field( + alias="AutoRemove", + description="Automatically remove the container when the container's process\nexits. This has no effect if `RestartPolicy` is set.\n", + ), + ] = None + volume_driver: Annotated[ + str | None, + Field( + alias="VolumeDriver", + description="Driver that this container uses to mount volumes.", + ), + ] = None + volumes_from: Annotated[ + list[str] | None, + Field( + alias="VolumesFrom", + description="A list of volumes to inherit from another container, specified in\nthe form `[:]`.\n", + ), + ] = None + mounts: Annotated[ + list[Mount] | None, + Field( + alias="Mounts", + description="Specification for mounts to be added to the container.\n", + ), + ] = None + cap_add: Annotated[ + list[str] | None, + Field( + alias="CapAdd", + description="A list of kernel capabilities to add to the container. Conflicts\nwith option 'Capabilities'.\n", + ), + ] = None + cap_drop: Annotated[ + list[str] | None, + Field( + alias="CapDrop", + description="A list of kernel capabilities to drop from the container. Conflicts\nwith option 'Capabilities'.\n", + ), + ] = None + cgroupns_mode: Annotated[ + CgroupnsMode | None, + Field( + alias="CgroupnsMode", + description='cgroup namespace mode for the container. Possible values are:\n\n- `"private"`: the container runs in its own private cgroup namespace\n- `"host"`: use the host system\'s cgroup namespace\n\nIf not specified, the daemon default is used, which can either be `"private"`\nor `"host"`, depending on daemon version, kernel support and configuration.\n', + ), + ] = None + dns: Annotated[ + list[str] | None, + Field( + alias="Dns", description="A list of DNS servers for the container to use." + ), + ] = None + dns_options: Annotated[ + list[str] | None, + Field(alias="DnsOptions", description="A list of DNS options."), + ] = None + dns_search: Annotated[ + list[str] | None, + Field(alias="DnsSearch", description="A list of DNS search domains."), + ] = None + extra_hosts: Annotated[ + list[str] | None, + Field( + alias="ExtraHosts", + description='A list of hostnames/IP mappings to add to the container\'s `/etc/hosts`\nfile. Specified in the form `["hostname:IP"]`.\n', + ), + ] = None + group_add: Annotated[ + list[str] | None, + Field( + alias="GroupAdd", + description="A list of additional groups that the container process will run as.\n", + ), + ] = None + ipc_mode: Annotated[ + str | None, + Field( + alias="IpcMode", + description='IPC sharing mode for the container. Possible values are:\n\n- `"none"`: own private IPC namespace, with /dev/shm not mounted\n- `"private"`: own private IPC namespace\n- `"shareable"`: own private IPC namespace, with a possibility to share it with other containers\n- `"container:"`: join another (shareable) container\'s IPC namespace\n- `"host"`: use the host system\'s IPC namespace\n\nIf not specified, daemon default is used, which can either be `"private"`\nor `"shareable"`, depending on daemon version and configuration.\n', + ), + ] = None + cgroup: Annotated[ + str | None, + Field(alias="Cgroup", description="Cgroup to use for the container."), + ] = None + links: Annotated[ + list[str] | None, + Field( + alias="Links", + description="A list of links for the container in the form `container_name:alias`.\n", + ), + ] = None + oom_score_adj: Annotated[ + int | None, + Field( + alias="OomScoreAdj", + description="An integer value containing the score given to the container in\norder to tune OOM killer preferences.\n", + examples=[500], + ), + ] = None + pid_mode: Annotated[ + str | None, + Field( + alias="PidMode", + description='Set the PID (Process) Namespace mode for the container. It can be\neither:\n\n- `"container:"`: joins another container\'s PID namespace\n- `"host"`: use the host\'s PID namespace inside the container\n', + ), + ] = None + privileged: Annotated[ + bool | None, + Field( + alias="Privileged", + description="Gives the container full access to the host.", + ), + ] = None + publish_all_ports: Annotated[ + bool | None, + Field( + alias="PublishAllPorts", + description="Allocates an ephemeral host port for all of a container's\nexposed ports.\n\nPorts are de-allocated when the container stops and allocated when\nthe container starts. The allocated port might be changed when\nrestarting the container.\n\nThe port is selected from the ephemeral port range that depends on\nthe kernel. For example, on Linux the range is defined by\n`/proc/sys/net/ipv4/ip_local_port_range`.\n", + ), + ] = None + readonly_rootfs: Annotated[ + bool | None, + Field( + alias="ReadonlyRootfs", + description="Mount the container's root filesystem as read only.", + ), + ] = None + security_opt: Annotated[ + list[str] | None, + Field( + alias="SecurityOpt", + description="A list of string values to customize labels for MLS systems, such\nas SELinux.\n", + ), + ] = None + storage_opt: Annotated[ + dict[str, str] | None, + Field( + alias="StorageOpt", + description='Storage driver options for this container, in the form `{"size": "120G"}`.\n', + ), + ] = None + tmpfs: Annotated[ + dict[str, str] | None, + Field( + alias="Tmpfs", + description='A map of container directories which should be replaced by tmpfs\nmounts, and their corresponding mount options. For example:\n\n```\n{ "/run": "rw,noexec,nosuid,size=65536k" }\n```\n', + ), + ] = None + uts_mode: Annotated[ + str | None, + Field(alias="UTSMode", description="UTS namespace to use for the container."), + ] = None + userns_mode: Annotated[ + str | None, + Field( + alias="UsernsMode", + description="Sets the usernamespace mode for the container when usernamespace\nremapping option is enabled.\n", + ), + ] = None + shm_size: Annotated[ + int | None, + Field( + alias="ShmSize", + description="Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.\n", + ge=0, + ), + ] = None + sysctls: Annotated[ + dict[str, str] | None, + Field( + alias="Sysctls", + description='A list of kernel parameters (sysctls) to set in the container.\nFor example:\n\n```\n{"net.ipv4.ip_forward": "1"}\n```\n', + ), + ] = None + runtime: Annotated[ + str | None, + Field(alias="Runtime", description="Runtime to use with this container."), + ] = None + console_size: Annotated[ + list[ConsoleSizeItem] | None, + Field( + alias="ConsoleSize", + description="Initial console size, as an `[height, width]` array. (Windows only)\n", + max_length=2, + min_length=2, + ), + ] = None + isolation: Annotated[ + Isolation | None, + Field( + alias="Isolation", + description="Isolation technology of the container. (Windows only)\n", + ), + ] = None + masked_paths: Annotated[ + list[str] | None, + Field( + alias="MaskedPaths", + description="The list of paths to be masked inside the container (this overrides\nthe default set of paths).\n", + ), + ] = None + readonly_paths: Annotated[ + list[str] | None, + Field( + alias="ReadonlyPaths", + description="The list of paths to be set as read-only inside the container\n(this overrides the default set of paths).\n", + ), + ] = None class NetworkingConfig(BaseModel): """ - NetworkingConfig represents the container's networking configuration for + NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. """ - EndpointsConfig: Optional[dict[str, EndpointSettings]] = Field( - None, - description="A mapping of network name to endpoint configuration for that network.\n", + model_config = ConfigDict( + populate_by_name=True, ) + endpoints_config: Annotated[ + dict[str, EndpointSettings] | None, + Field( + alias="EndpointsConfig", + description="A mapping of network name to endpoint configuration for that network.\n", + ), + ] = None class NetworkSettings(BaseModel): @@ -3629,400 +6011,798 @@ class NetworkSettings(BaseModel): NetworkSettings exposes the network settings in the API """ - Bridge: Optional[str] = Field( - None, - description="Name of the network'a bridge (for example, `docker0`).", - example="docker0", - ) - SandboxID: Optional[str] = Field( - None, - description="SandboxID uniquely represents a container's network stack.", - example="9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3", - ) - HairpinMode: Optional[bool] = Field( - None, - description="Indicates if hairpin NAT should be enabled on the virtual interface.\n", - example=False, - ) - LinkLocalIPv6Address: Optional[str] = Field( - None, - description="IPv6 unicast address using the link-local prefix.", - example="fe80::42:acff:fe11:1", - ) - LinkLocalIPv6PrefixLen: Optional[int] = Field( - None, description="Prefix length of the IPv6 unicast address.", example="64" - ) - Ports: Optional[PortMap] = None - SandboxKey: Optional[str] = Field( - None, - description="SandboxKey identifies the sandbox", - example="/var/run/docker/netns/8ab54b426c38", - ) - SecondaryIPAddresses: Optional[list[Address]] = Field(None, description="") - SecondaryIPv6Addresses: Optional[list[Address]] = Field(None, description="") - EndpointID: Optional[str] = Field( - None, - description='EndpointID uniquely represents a service endpoint in a Sandbox.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b", - ) - Gateway: Optional[str] = Field( - None, - description='Gateway address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="172.17.0.1", - ) - GlobalIPv6Address: Optional[str] = Field( - None, - description='Global IPv6 address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="2001:db8::5689", - ) - GlobalIPv6PrefixLen: Optional[int] = Field( - None, - description='Mask length of the global IPv6 address.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example=64, - ) - IPAddress: Optional[str] = Field( - None, - description='IPv4 address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="172.17.0.4", - ) - IPPrefixLen: Optional[int] = Field( - None, - description='Mask length of the IPv4 address.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example=16, - ) - IPv6Gateway: Optional[str] = Field( - None, - description='IPv6 gateway address for this network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="2001:db8:2::100", - ) - MacAddress: Optional[str] = Field( - None, - description='MAC address for the container on the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', - example="02:42:ac:11:00:04", - ) - Networks: Optional[dict[str, EndpointSettings]] = Field( - None, - description="Information about all networks that the container is connected to.\n", - ) + model_config = ConfigDict( + populate_by_name=True, + ) + bridge: Annotated[ + str | None, + Field( + alias="Bridge", + description="Name of the network's bridge (for example, `docker0`).", + examples=["docker0"], + ), + ] = None + sandbox_id: Annotated[ + str | None, + Field( + alias="SandboxID", + description="SandboxID uniquely represents a container's network stack.", + examples=[ + "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + ], + ), + ] = None + hairpin_mode: Annotated[ + bool | None, + Field( + alias="HairpinMode", + description="Indicates if hairpin NAT should be enabled on the virtual interface.\n", + examples=[False], + ), + ] = None + link_local_i_pv6_address: Annotated[ + str | None, + Field( + alias="LinkLocalIPv6Address", + description="IPv6 unicast address using the link-local prefix.", + examples=["fe80::42:acff:fe11:1"], + ), + ] = None + link_local_i_pv6_prefix_len: Annotated[ + int | None, + Field( + alias="LinkLocalIPv6PrefixLen", + description="Prefix length of the IPv6 unicast address.", + examples=["64"], + ), + ] = None + ports: Annotated[PortMap | None, Field(alias="Ports")] = None + sandbox_key: Annotated[ + str | None, + Field( + alias="SandboxKey", + description="SandboxKey identifies the sandbox", + examples=["/var/run/docker/netns/8ab54b426c38"], + ), + ] = None + secondary_ip_addresses: Annotated[ + list[Address] | None, Field(alias="SecondaryIPAddresses", description="") + ] = None + secondary_i_pv6_addresses: Annotated[ + list[Address] | None, Field(alias="SecondaryIPv6Addresses", description="") + ] = None + endpoint_id: Annotated[ + str | None, + Field( + alias="EndpointID", + description='EndpointID uniquely represents a service endpoint in a Sandbox.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=[ + "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + ], + ), + ] = None + gateway: Annotated[ + str | None, + Field( + alias="Gateway", + description='Gateway address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=["172.17.0.1"], + ), + ] = None + global_i_pv6_address: Annotated[ + str | None, + Field( + alias="GlobalIPv6Address", + description='Global IPv6 address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=["2001:db8::5689"], + ), + ] = None + global_i_pv6_prefix_len: Annotated[ + int | None, + Field( + alias="GlobalIPv6PrefixLen", + description='Mask length of the global IPv6 address.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=[64], + ), + ] = None + ip_address: Annotated[ + str | None, + Field( + alias="IPAddress", + description='IPv4 address for the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=["172.17.0.4"], + ), + ] = None + ip_prefix_len: Annotated[ + int | None, + Field( + alias="IPPrefixLen", + description='Mask length of the IPv4 address.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=[16], + ), + ] = None + i_pv6_gateway: Annotated[ + str | None, + Field( + alias="IPv6Gateway", + description='IPv6 gateway address for this network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=["2001:db8:2::100"], + ), + ] = None + mac_address: Annotated[ + str | None, + Field( + alias="MacAddress", + description='MAC address for the container on the default "bridge" network.\n\n


\n\n> **Deprecated**: This field is only propagated when attached to the\n> default "bridge" network. Use the information from the "bridge"\n> network inside the `Networks` map instead, which contains the same\n> information. This field was deprecated in Docker 1.9 and is scheduled\n> to be removed in Docker 17.12.0\n', + examples=["02:42:ac:11:00:04"], + ), + ] = None + networks: Annotated[ + dict[str, EndpointSettings] | None, + Field( + alias="Networks", + description="Information about all networks that the container is connected to.\n", + ), + ] = None class Network(BaseModel): - Name: Optional[str] = None - Id: Optional[str] = None - Created: Optional[str] = None - Scope: Optional[str] = None - Driver: Optional[str] = None - EnableIPv6: Optional[bool] = None - IPAM: Optional[IPAM] = None - Internal: Optional[bool] = None - Attachable: Optional[bool] = None - Ingress: Optional[bool] = None - Containers: Optional[dict[str, NetworkContainer]] = None - Options: Optional[dict[str, str]] = None - Labels: Optional[dict[str, str]] = None + model_config = ConfigDict( + populate_by_name=True, + ) + name: Annotated[ + str | None, + Field( + alias="Name", description="Name of the network.\n", examples=["my_network"] + ), + ] = None + id: Annotated[ + str | None, + Field( + alias="Id", + description="ID that uniquely identifies a network on a single machine.\n", + examples=[ + "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + ], + ), + ] = None + created: Annotated[ + str | None, + Field( + alias="Created", + description="Date and time at which the network was created in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2016-10-19T04:33:30.360899459Z"], + ), + ] = None + scope: Annotated[ + str | None, + Field( + alias="Scope", + description="The level at which the network exists (e.g. `swarm` for cluster-wide\nor `local` for machine level)\n", + examples=["local"], + ), + ] = None + driver: Annotated[ + str | None, + Field( + alias="Driver", + description="The name of the driver used to create the network (e.g. `bridge`,\n`overlay`).\n", + examples=["overlay"], + ), + ] = None + enable_i_pv6: Annotated[ + bool | None, + Field( + alias="EnableIPv6", + description="Whether the network was created with IPv6 enabled.\n", + examples=[False], + ), + ] = None + ipam: Annotated[IPAM | None, Field(alias="IPAM")] = None + internal: Annotated[ + bool | None, + Field( + alias="Internal", + description="Whether the network is created to only allow internal networking\nconnectivity.\n", + examples=[False], + ), + ] = False + attachable: Annotated[ + bool | None, + Field( + alias="Attachable", + description="Whether a global / swarm scope network is manually attachable by regular\ncontainers from workers in swarm mode.\n", + examples=[False], + ), + ] = False + ingress: Annotated[ + bool | None, + Field( + alias="Ingress", + description="Whether the network is providing the routing-mesh for the swarm cluster.\n", + examples=[False], + ), + ] = False + config_from: Annotated[ConfigReference | None, Field(alias="ConfigFrom")] = None + config_only: Annotated[ + bool | None, + Field( + alias="ConfigOnly", + description="Whether the network is a config-only network. Config-only networks are\nplaceholder networks for network configurations to be used by other\nnetworks. Config-only networks cannot be used directly to run containers\nor services.\n", + ), + ] = False + containers: Annotated[ + dict[str, NetworkContainer] | None, + Field( + alias="Containers", + description="Contains endpoints attached to the network.\n", + examples=[ + { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "", + } + } + ], + ), + ] = None + options: Annotated[ + dict[str, str] | None, + Field( + alias="Options", + description="Network-specific options uses when creating the network.\n", + examples=[ + { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500", + } + ], + ), + ] = None + labels: Annotated[ + dict[str, str] | None, + Field( + alias="Labels", + description="User-defined key/value metadata.", + examples=[ + { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value", + } + ], + ), + ] = None + peers: Annotated[ + list[PeerInfo] | None, + Field( + alias="Peers", + description="List of peer nodes for an overlay network. This field is only present\nfor overlay networks, and omitted for other network types.\n", + ), + ] = None class Node(BaseModel): - ID: Optional[str] = Field(None, example="24ifsmvkjbyhk") - Version: Optional[ObjectVersion] = None - CreatedAt: Optional[str] = Field( - None, - description="Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2016-08-18T10:44:24.496525531Z", - ) - UpdatedAt: Optional[str] = Field( - None, - description="Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", - example="2017-08-09T07:09:37.632105588Z", - ) - Spec: Optional[NodeSpec] = None - Description: Optional[NodeDescription] = None - Status: Optional[NodeStatus] = None - ManagerStatus: Optional[ManagerStatus] = None + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[str | None, Field(alias="ID", examples=["24ifsmvkjbyhk"])] = None + version: Annotated[ObjectVersion | None, Field(alias="Version")] = None + created_at: Annotated[ + str | None, + Field( + alias="CreatedAt", + description="Date and time at which the node was added to the swarm in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2016-08-18T10:44:24.496525531Z"], + ), + ] = None + updated_at: Annotated[ + str | None, + Field( + alias="UpdatedAt", + description="Date and time at which the node was last updated in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n", + examples=["2017-08-09T07:09:37.632105588Z"], + ), + ] = None + spec: Annotated[NodeSpec | None, Field(alias="Spec")] = None + description: Annotated[NodeDescription | None, Field(alias="Description")] = None + status: Annotated[NodeStatus | None, Field(alias="Status")] = None + manager_status: Annotated[ManagerStatus | None, Field(alias="ManagerStatus")] = None class SystemInfo(BaseModel): - ID: Optional[str] = Field( - None, - description="Unique identifier of the daemon.\n\n


\n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n", - example="7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", - ) - Containers: Optional[int] = Field( - None, description="Total number of containers on the host.", example=14 - ) - ContainersRunning: Optional[int] = Field( - None, description='Number of containers with status `"running"`.\n', example=3 - ) - ContainersPaused: Optional[int] = Field( - None, description='Number of containers with status `"paused"`.\n', example=1 - ) - ContainersStopped: Optional[int] = Field( - None, description='Number of containers with status `"stopped"`.\n', example=10 - ) - Images: Optional[int] = Field( - None, - description="Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n", - example=508, - ) - Driver: Optional[str] = Field( - None, description="Name of the storage driver in use.", example="overlay2" - ) - DriverStatus: Optional[list[list[str]]] = Field( - None, - description='Information specific to the storage driver, provided as\n"label" / "value" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n


\n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n', - example=[ - ["Backing Filesystem", "extfs"], - ["Supports d_type", "true"], - ["Native Overlay Diff", "true"], - ], - ) - DockerRootDir: Optional[str] = Field( - None, - description="Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n", - example="/var/lib/docker", - ) - Plugins: Optional[PluginsInfo] = None - MemoryLimit: Optional[bool] = Field( - None, - description="Indicates if the host has memory limit support enabled.", - example=True, - ) - SwapLimit: Optional[bool] = Field( - None, - description="Indicates if the host has memory swap limit support enabled.", - example=True, - ) - KernelMemory: Optional[bool] = Field( - None, - description="Indicates if the host has kernel memory limit support enabled.\n\n


\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n", - example=True, - ) - KernelMemoryTCP: Optional[bool] = Field( - None, - description="Indicates if the host has kernel memory TCP limit support enabled.\n\nKernel memory TCP limits are not supported when using cgroups v2, which\ndoes not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.\n", - example=True, - ) - CpuCfsPeriod: Optional[bool] = Field( - None, - description="Indicates if CPU CFS(Completely Fair Scheduler) period is supported by\nthe host.\n", - example=True, - ) - CpuCfsQuota: Optional[bool] = Field( - None, - description="Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by\nthe host.\n", - example=True, - ) - CPUShares: Optional[bool] = Field( - None, - description="Indicates if CPU Shares limiting is supported by the host.\n", - example=True, - ) - CPUSet: Optional[bool] = Field( - None, - description="Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n", - example=True, - ) - PidsLimit: Optional[bool] = Field( - None, - description="Indicates if the host kernel has PID limit support enabled.", - example=True, - ) - OomKillDisable: Optional[bool] = Field( - None, description="Indicates if OOM killer disable is supported on the host." - ) - IPv4Forwarding: Optional[bool] = Field( - None, description="Indicates IPv4 forwarding is enabled.", example=True - ) - BridgeNfIptables: Optional[bool] = Field( - None, - description="Indicates if `bridge-nf-call-iptables` is available on the host.", - example=True, - ) - BridgeNfIp6tables: Optional[bool] = Field( - None, - description="Indicates if `bridge-nf-call-ip6tables` is available on the host.", - example=True, - ) - Debug: Optional[bool] = Field( - None, - description="Indicates if the daemon is running in debug-mode / with debug-level\nlogging enabled.\n", - example=True, - ) - NFd: Optional[int] = Field( - None, - description="The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n", - example=64, - ) - NGoroutines: Optional[int] = Field( - None, - description="The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n", - example=174, - ) - SystemTime: Optional[str] = Field( - None, - description="Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n", - example="2017-08-08T20:28:29.06202363Z", - ) - LoggingDriver: Optional[str] = Field( - None, description="The logging driver to use as a default for new containers.\n" - ) - CgroupDriver: Optional[CgroupDriver] = Field( - CgroupDriver.cgroupfs, - description="The driver to use for managing cgroups.\n", - example="cgroupfs", - ) - CgroupVersion: Optional[CgroupVersion] = Field( - CgroupVersion.field_1, description="The version of the cgroup.\n", example="1" - ) - NEventsListener: Optional[int] = Field( - None, description="Number of event listeners subscribed.", example=30 - ) - KernelVersion: Optional[str] = Field( - None, - description='Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.\n', - example="4.9.38-moby", - ) - OperatingSystem: Optional[str] = Field( - None, - description='Name of the host\'s operating system, for example: "Ubuntu 16.04.2 LTS"\nor "Windows Server 2016 Datacenter"\n', - example="Alpine Linux v3.5", - ) - OSVersion: Optional[str] = Field( - None, - description="Version of the host's operating system\n\n


\n\n> **Note**: The information returned in this field, including its\n> very existence, and the formatting of values, should not be considered\n> stable, and may change without notice.\n", - example="16.04", - ) - OSType: Optional[str] = Field( - None, - description='Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are "linux" and "windows". A full list of\npossible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n', - example="linux", - ) - Architecture: Optional[str] = Field( - None, - description="Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).\n", - example="x86_64", - ) - NCPU: Optional[int] = Field( - None, - description="The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n", - example=4, - ) - MemTotal: Optional[int] = Field( - None, - description="Total amount of physical memory available on the host, in bytes.\n", - example=2095882240, - ) - IndexServerAddress: Optional[str] = Field( - "https://index.docker.io/v1/", - description="Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n", - example="https://index.docker.io/v1/", - ) - RegistryConfig: Optional[RegistryServiceConfig] = None - GenericResources: Optional[GenericResources] = None - HttpProxy: Optional[str] = Field( - None, - description="HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n", - example="http://xxxxx:xxxxx@proxy.corp.example.com:8080", - ) - HttpsProxy: Optional[str] = Field( - None, - description="HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n", - example="https://xxxxx:xxxxx@proxy.corp.example.com:4443", - ) - NoProxy: Optional[str] = Field( - None, - description="Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n", - example="*.local, 169.254/16", - ) - Name: Optional[str] = Field( - None, description="Hostname of the host.", example="node5.corp.example.com" - ) - Labels: Optional[list[str]] = Field( - None, - description="User-defined labels (key/value metadata) as set on the daemon.\n\n


\n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n", - example=["storage=ssd", "production"], - ) - ExperimentalBuild: Optional[bool] = Field( - None, - description="Indicates if experimental features are enabled on the daemon.\n", - example=True, - ) - ServerVersion: Optional[str] = Field( - None, - description="Version string of the daemon.\n\n> **Note**: the [standalone Swarm API](/swarm/swarm-api/)\n> returns the Swarm version instead of the daemon version, for example\n> `swarm/1.2.8`.\n", - example="17.06.0-ce", - ) - ClusterStore: Optional[str] = Field( - None, - description="URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n


\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n", - example="consul://consul.corp.example.com:8600/some/path", - ) - ClusterAdvertise: Optional[str] = Field( - None, - description="The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n


\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n", - example="node5.corp.example.com:8000", - ) - Runtimes: Optional[dict[str, Runtime]] = Field( - {"runc": {"path": "runc"}}, - description='List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the "name" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n', - example={ - "runc": {"path": "runc"}, - "runc-master": {"path": "/go/bin/runc"}, - "custom": { - "path": "/usr/local/bin/my-oci-runtime", - "runtimeArgs": ["--debug", "--systemd-cgroup=false"], - }, - }, - ) - DefaultRuntime: Optional[str] = Field( - "runc", - description="Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n", - example="runc", - ) - Swarm: Optional[SwarmInfo] = None - LiveRestoreEnabled: Optional[bool] = Field( - False, - description="Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n", - example=False, - ) - Isolation: Optional[Isolation2] = Field( - Isolation2.default, - description="Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n", - ) - InitBinary: Optional[str] = Field( - None, - description="Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n", - example="docker-init", - ) - ContainerdCommit: Optional[Commit] = None - RuncCommit: Optional[Commit] = None - InitCommit: Optional[Commit] = None - SecurityOptions: Optional[list[str]] = Field( - None, - description="List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, user-namespaces (userns), and rootless.\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n", - example=[ - "name=apparmor", - "name=seccomp,profile=default", - "name=selinux", - "name=userns", - "name=rootless", - ], - ) - ProductLicense: Optional[str] = Field( - None, - description="Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n", - example="Community Engine", - ) - DefaultAddressPools: Optional[list[DefaultAddressPool]] = Field( - None, - description='List of custom default address pools for local networks, which can be\nspecified in the daemon.json file or dockerd option.\n\nExample: a Base "10.10.0.0/16" with Size 24 will define the set of 256\n10.10.[0-255].0/24 address pools.\n', - ) - Warnings: Optional[list[str]] = Field( - None, - description="List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n", - example=[ - "WARNING: No memory limit support", - "WARNING: bridge-nf-call-iptables is disabled", - "WARNING: bridge-nf-call-ip6tables is disabled", - ], - ) + model_config = ConfigDict( + populate_by_name=True, + ) + id: Annotated[ + str | None, + Field( + alias="ID", + description="Unique identifier of the daemon.\n\n


\n\n> **Note**: The format of the ID itself is not part of the API, and\n> should not be considered stable.\n", + examples=["7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"], + ), + ] = None + containers: Annotated[ + int | None, + Field( + alias="Containers", + description="Total number of containers on the host.", + examples=[14], + ), + ] = None + containers_running: Annotated[ + int | None, + Field( + alias="ContainersRunning", + description='Number of containers with status `"running"`.\n', + examples=[3], + ), + ] = None + containers_paused: Annotated[ + int | None, + Field( + alias="ContainersPaused", + description='Number of containers with status `"paused"`.\n', + examples=[1], + ), + ] = None + containers_stopped: Annotated[ + int | None, + Field( + alias="ContainersStopped", + description='Number of containers with status `"stopped"`.\n', + examples=[10], + ), + ] = None + images: Annotated[ + int | None, + Field( + alias="Images", + description="Total number of images on the host.\n\nBoth _tagged_ and _untagged_ (dangling) images are counted.\n", + examples=[508], + ), + ] = None + driver: Annotated[ + str | None, + Field( + alias="Driver", + description="Name of the storage driver in use.", + examples=["overlay2"], + ), + ] = None + driver_status: Annotated[ + list[list[str]] | None, + Field( + alias="DriverStatus", + description='Information specific to the storage driver, provided as\n"label" / "value" pairs.\n\nThis information is provided by the storage driver, and formatted\nin a way consistent with the output of `docker info` on the command\nline.\n\n


\n\n> **Note**: The information returned in this field, including the\n> formatting of values and labels, should not be considered stable,\n> and may change without notice.\n', + examples=[ + [ + ["Backing Filesystem", "extfs"], + ["Supports d_type", "true"], + ["Native Overlay Diff", "true"], + ] + ], + ), + ] = None + docker_root_dir: Annotated[ + str | None, + Field( + alias="DockerRootDir", + description="Root directory of persistent Docker state.\n\nDefaults to `/var/lib/docker` on Linux, and `C:\\ProgramData\\docker`\non Windows.\n", + examples=["/var/lib/docker"], + ), + ] = None + plugins: Annotated[PluginsInfo | None, Field(alias="Plugins")] = None + memory_limit: Annotated[ + bool | None, + Field( + alias="MemoryLimit", + description="Indicates if the host has memory limit support enabled.", + examples=[True], + ), + ] = None + swap_limit: Annotated[ + bool | None, + Field( + alias="SwapLimit", + description="Indicates if the host has memory swap limit support enabled.", + examples=[True], + ), + ] = None + kernel_memory: Annotated[ + bool | None, + Field( + alias="KernelMemory", + description="Indicates if the host has kernel memory limit support enabled.\n\n


\n\n> **Deprecated**: This field is deprecated as the kernel 5.4 deprecated\n> `kmem.limit_in_bytes`.\n", + examples=[True], + ), + ] = None + kernel_memory_tcp: Annotated[ + bool | None, + Field( + alias="KernelMemoryTCP", + description="Indicates if the host has kernel memory TCP limit support enabled.\n\nKernel memory TCP limits are not supported when using cgroups v2, which\ndoes not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup.\n", + examples=[True], + ), + ] = None + cpu_cfs_period: Annotated[ + bool | None, + Field( + alias="CpuCfsPeriod", + description="Indicates if CPU CFS(Completely Fair Scheduler) period is supported by\nthe host.\n", + examples=[True], + ), + ] = None + cpu_cfs_quota: Annotated[ + bool | None, + Field( + alias="CpuCfsQuota", + description="Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by\nthe host.\n", + examples=[True], + ), + ] = None + cpu_shares: Annotated[ + bool | None, + Field( + alias="CPUShares", + description="Indicates if CPU Shares limiting is supported by the host.\n", + examples=[True], + ), + ] = None + cpu_set: Annotated[ + bool | None, + Field( + alias="CPUSet", + description="Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.\n\nSee [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)\n", + examples=[True], + ), + ] = None + pids_limit: Annotated[ + bool | None, + Field( + alias="PidsLimit", + description="Indicates if the host kernel has PID limit support enabled.", + examples=[True], + ), + ] = None + oom_kill_disable: Annotated[ + bool | None, + Field( + alias="OomKillDisable", + description="Indicates if OOM killer disable is supported on the host.", + ), + ] = None + i_pv4_forwarding: Annotated[ + bool | None, + Field( + alias="IPv4Forwarding", + description="Indicates IPv4 forwarding is enabled.", + examples=[True], + ), + ] = None + bridge_nf_iptables: Annotated[ + bool | None, + Field( + alias="BridgeNfIptables", + description="Indicates if `bridge-nf-call-iptables` is available on the host.", + examples=[True], + ), + ] = None + bridge_nf_ip6tables: Annotated[ + bool | None, + Field( + alias="BridgeNfIp6tables", + description="Indicates if `bridge-nf-call-ip6tables` is available on the host.", + examples=[True], + ), + ] = None + debug: Annotated[ + bool | None, + Field( + alias="Debug", + description="Indicates if the daemon is running in debug-mode / with debug-level\nlogging enabled.\n", + examples=[True], + ), + ] = None + n_fd: Annotated[ + int | None, + Field( + alias="NFd", + description="The total number of file Descriptors in use by the daemon process.\n\nThis information is only returned if debug-mode is enabled.\n", + examples=[64], + ), + ] = None + n_goroutines: Annotated[ + int | None, + Field( + alias="NGoroutines", + description="The number of goroutines that currently exist.\n\nThis information is only returned if debug-mode is enabled.\n", + examples=[174], + ), + ] = None + system_time: Annotated[ + str | None, + Field( + alias="SystemTime", + description="Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat with nano-seconds.\n", + examples=["2017-08-08T20:28:29.06202363Z"], + ), + ] = None + logging_driver: Annotated[ + str | None, + Field( + alias="LoggingDriver", + description="The logging driver to use as a default for new containers.\n", + ), + ] = None + cgroup_driver: Annotated[ + CgroupDriver | None, + Field( + alias="CgroupDriver", + description="The driver to use for managing cgroups.\n", + examples=["cgroupfs"], + ), + ] = CgroupDriver.cgroupfs + cgroup_version: Annotated[ + CgroupVersion | None, + Field( + alias="CgroupVersion", + description="The version of the cgroup.\n", + examples=["1"], + ), + ] = CgroupVersion.field_1 + n_events_listener: Annotated[ + int | None, + Field( + alias="NEventsListener", + description="Number of event listeners subscribed.", + examples=[30], + ), + ] = None + kernel_version: Annotated[ + str | None, + Field( + alias="KernelVersion", + description='Kernel version of the host.\n\nOn Linux, this information obtained from `uname`. On Windows this\ninformation is queried from the HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\\nregistry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.\n', + examples=["4.9.38-moby"], + ), + ] = None + operating_system: Annotated[ + str | None, + Field( + alias="OperatingSystem", + description='Name of the host\'s operating system, for example: "Ubuntu 16.04.2 LTS"\nor "Windows Server 2016 Datacenter"\n', + examples=["Alpine Linux v3.5"], + ), + ] = None + os_version: Annotated[ + str | None, + Field( + alias="OSVersion", + description="Version of the host's operating system\n\n


\n\n> **Note**: The information returned in this field, including its\n> very existence, and the formatting of values, should not be considered\n> stable, and may change without notice.\n", + examples=["16.04"], + ), + ] = None + os_type: Annotated[ + str | None, + Field( + alias="OSType", + description='Generic type of the operating system of the host, as returned by the\nGo runtime (`GOOS`).\n\nCurrently returned values are "linux" and "windows". A full list of\npossible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n', + examples=["linux"], + ), + ] = None + architecture: Annotated[ + str | None, + Field( + alias="Architecture", + description="Hardware architecture of the host, as returned by the Go runtime\n(`GOARCH`).\n\nA full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).\n", + examples=["x86_64"], + ), + ] = None + ncpu: Annotated[ + int | None, + Field( + alias="NCPU", + description="The number of logical CPUs usable by the daemon.\n\nThe number of available CPUs is checked by querying the operating\nsystem when the daemon starts. Changes to operating system CPU\nallocation after the daemon is started are not reflected.\n", + examples=[4], + ), + ] = None + mem_total: Annotated[ + int | None, + Field( + alias="MemTotal", + description="Total amount of physical memory available on the host, in bytes.\n", + examples=[2095882240], + ), + ] = None + index_server_address: Annotated[ + str | None, + Field( + alias="IndexServerAddress", + description="Address / URL of the index server that is used for image search,\nand as a default for user authentication for Docker Hub and Docker Cloud.\n", + examples=["https://index.docker.io/v1/"], + ), + ] = "https://index.docker.io/v1/" + registry_config: Annotated[ + RegistryServiceConfig | None, Field(alias="RegistryConfig") + ] = None + generic_resources: Annotated[ + GenericResources | None, Field(alias="GenericResources") + ] = None + http_proxy: Annotated[ + str | None, + Field( + alias="HttpProxy", + description="HTTP-proxy configured for the daemon. This value is obtained from the\n[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n", + examples=["http://xxxxx:xxxxx@proxy.corp.example.com:8080"], + ), + ] = None + https_proxy: Annotated[ + str | None, + Field( + alias="HttpsProxy", + description="HTTPS-proxy configured for the daemon. This value is obtained from the\n[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.\nCredentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL\nare masked in the API response.\n\nContainers do not automatically inherit this configuration.\n", + examples=["https://xxxxx:xxxxx@proxy.corp.example.com:4443"], + ), + ] = None + no_proxy: Annotated[ + str | None, + Field( + alias="NoProxy", + description="Comma-separated list of domain extensions for which no proxy should be\nused. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)\nenvironment variable.\n\nContainers do not automatically inherit this configuration.\n", + examples=["*.local, 169.254/16"], + ), + ] = None + name: Annotated[ + str | None, + Field( + alias="Name", + description="Hostname of the host.", + examples=["node5.corp.example.com"], + ), + ] = None + labels: Annotated[ + list[str] | None, + Field( + alias="Labels", + description="User-defined labels (key/value metadata) as set on the daemon.\n\n


\n\n> **Note**: When part of a Swarm, nodes can both have _daemon_ labels,\n> set through the daemon configuration, and _node_ labels, set from a\n> manager node in the Swarm. Node labels are not included in this\n> field. Node labels can be retrieved using the `/nodes/(id)` endpoint\n> on a manager node in the Swarm.\n", + examples=[["storage=ssd", "production"]], + ), + ] = None + experimental_build: Annotated[ + bool | None, + Field( + alias="ExperimentalBuild", + description="Indicates if experimental features are enabled on the daemon.\n", + examples=[True], + ), + ] = None + server_version: Annotated[ + str | None, + Field( + alias="ServerVersion", + description="Version string of the daemon.\n", + examples=["20.10.25"], + ), + ] = None + cluster_store: Annotated[ + str | None, + Field( + alias="ClusterStore", + description="URL of the distributed storage backend.\n\n\nThe storage backend is used for multihost networking (to store\nnetwork and endpoint information) and by the node discovery mechanism.\n\n


\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n", + examples=["consul://consul.corp.example.com:8600/some/path"], + ), + ] = None + cluster_advertise: Annotated[ + str | None, + Field( + alias="ClusterAdvertise", + description="The network endpoint that the Engine advertises for the purpose of\nnode discovery. ClusterAdvertise is a `host:port` combination on which\nthe daemon is reachable by other hosts.\n\n


\n\n> **Deprecated**: This field is only propagated when using standalone Swarm\n> mode, and overlay networking using an external k/v store. Overlay\n> networks with Swarm mode enabled use the built-in raft store, and\n> this field will be empty.\n", + examples=["node5.corp.example.com:8000"], + ), + ] = None + runtimes: Annotated[ + dict[str, Runtime] | None, + Field( + alias="Runtimes", + description='List of [OCI compliant](https://github.com/opencontainers/runtime-spec)\nruntimes configured on the daemon. Keys hold the "name" used to\nreference the runtime.\n\nThe Docker daemon relies on an OCI compliant runtime (invoked via the\n`containerd` daemon) as its interface to the Linux kernel namespaces,\ncgroups, and SELinux.\n\nThe default runtime is `runc`, and automatically configured. Additional\nruntimes can be configured by the user and will be listed here.\n', + examples=[ + { + "runc": {"path": "runc"}, + "runc-master": {"path": "/go/bin/runc"}, + "custom": { + "path": "/usr/local/bin/my-oci-runtime", + "runtimeArgs": ["--debug", "--systemd-cgroup=false"], + }, + } + ], + ), + ] = { + "runc": {"path": "runc"} # type: ignore[dict-item] + } + default_runtime: Annotated[ + str | None, + Field( + alias="DefaultRuntime", + description="Name of the default OCI runtime that is used when starting containers.\n\nThe default can be overridden per-container at create time.\n", + examples=["runc"], + ), + ] = "runc" + swarm: Annotated[SwarmInfo | None, Field(alias="Swarm")] = None + live_restore_enabled: Annotated[ + bool | None, + Field( + alias="LiveRestoreEnabled", + description="Indicates if live restore is enabled.\n\nIf enabled, containers are kept running when the daemon is shutdown\nor upon daemon start if running containers are detected.\n", + examples=[False], + ), + ] = False + isolation: Annotated[ + Isolation2 | None, + Field( + alias="Isolation", + description="Represents the isolation technology to use as a default for containers.\nThe supported values are platform-specific.\n\nIf no isolation value is specified on daemon start, on Windows client,\nthe default is `hyperv`, and on Windows server, the default is `process`.\n\nThis option is currently not used on other platforms.\n", + ), + ] = Isolation2.default + init_binary: Annotated[ + str | None, + Field( + alias="InitBinary", + description="Name and, optional, path of the `docker-init` binary.\n\nIf the path is omitted, the daemon searches the host's `$PATH` for the\nbinary and uses the first result.\n", + examples=["docker-init"], + ), + ] = None + containerd_commit: Annotated[Commit | None, Field(alias="ContainerdCommit")] = None + runc_commit: Annotated[Commit | None, Field(alias="RuncCommit")] = None + init_commit: Annotated[Commit | None, Field(alias="InitCommit")] = None + security_options: Annotated[ + list[str] | None, + Field( + alias="SecurityOptions", + description="List of security features that are enabled on the daemon, such as\napparmor, seccomp, SELinux, user-namespaces (userns), and rootless.\n\nAdditional configuration options for each security feature may\nbe present, and are included as a comma-separated list of key/value\npairs.\n", + examples=[ + [ + "name=apparmor", + "name=seccomp,profile=default", + "name=selinux", + "name=userns", + "name=rootless", + ] + ], + ), + ] = None + product_license: Annotated[ + str | None, + Field( + alias="ProductLicense", + description="Reports a summary of the product license on the daemon.\n\nIf a commercial license has been applied to the daemon, information\nsuch as number of nodes, and expiration are included.\n", + examples=["Community Engine"], + ), + ] = None + default_address_pools: Annotated[ + list[DefaultAddressPool] | None, + Field( + alias="DefaultAddressPools", + description='List of custom default address pools for local networks, which can be\nspecified in the daemon.json file or dockerd option.\n\nExample: a Base "10.10.0.0/16" with Size 24 will define the set of 256\n10.10.[0-255].0/24 address pools.\n', + ), + ] = None + warnings: Annotated[ + list[str] | None, + Field( + alias="Warnings", + description="List of warnings / informational messages about missing features, or\nissues related to the daemon configuration.\n\nThese messages can be printed by the client as information to the user.\n", + examples=[ + [ + "WARNING: No memory limit support", + "WARNING: bridge-nf-call-iptables is disabled", + "WARNING: bridge-nf-call-ip6tables is disabled", + ] + ], + ), + ] = None diff --git a/packages/models-library/src/models_library/generics.py b/packages/models-library/src/models_library/generics.py index f14b441389d..753510d088b 100644 --- a/packages/models-library/src/models_library/generics.py +++ b/packages/models-library/src/models_library/generics.py @@ -1,85 +1,66 @@ -from typing import ( - Any, - Dict, - Generic, - ItemsView, - Iterable, - Iterator, - KeysView, - List, - Optional, - TypeVar, - ValuesView, -) - -from pydantic import validator -from pydantic.generics import GenericModel +from collections.abc import ItemsView, Iterable, Iterator, KeysView, ValuesView +from typing import Any, Generic, TypeVar + +from pydantic import BaseModel, RootModel DictKey = TypeVar("DictKey") DictValue = TypeVar("DictValue") -class DictModel(GenericModel, Generic[DictKey, DictValue]): - __root__: Dict[DictKey, DictValue] +class DictModel(RootModel[dict[DictKey, DictValue]], Generic[DictKey, DictValue]): + root: dict[DictKey, DictValue] def __getitem__(self, k: DictKey) -> DictValue: - return self.__root__.__getitem__(k) + return self.root.__getitem__(k) def __setitem__(self, k: DictKey, v: DictValue) -> None: - self.__root__.__setitem__(k, v) + self.root.__setitem__(k, v) def items(self) -> ItemsView[DictKey, DictValue]: - return self.__root__.items() + return self.root.items() def keys(self) -> KeysView[DictKey]: - return self.__root__.keys() + return self.root.keys() def values(self) -> ValuesView[DictValue]: - return self.__root__.values() + return self.root.values() def update(self, *s: Iterable[tuple[DictKey, DictValue]]) -> None: - return self.__root__.update(*s) + return self.root.update(*s) - def __iter__(self) -> Iterator[DictKey]: - return self.__root__.__iter__() + def __iter__(self) -> Iterator[DictKey]: # type: ignore + return self.root.__iter__() - def get(self, key: DictKey, default: Optional[DictValue] = None): - return self.__root__.get(key, default) + def get(self, key: DictKey, default: DictValue | None = None): + return self.root.get(key, default) def setdefault(self, key: DictKey, default: DictValue): - return self.__root__.setdefault(key, default) + return self.root.setdefault(key, default) def __len__(self) -> int: - return self.__root__.__len__() + return self.root.__len__() DataT = TypeVar("DataT") -class ListModel(GenericModel, Generic[DataT]): - __root__: List[DataT] +class ListModel(RootModel[list[DataT]], Generic[DataT]): + root: list[DataT] def __iter__(self): - return iter(self.__root__) + return iter(self.root) def __getitem__(self, item): - return self.__root__[item] + return self.root[item] def __len__(self): - return len(self.__root__) - + return len(self.root) -class Envelope(GenericModel, Generic[DataT]): - data: Optional[DataT] = None - error: Optional[Any] = None - @classmethod - def parse_data(cls, obj): - return cls.parse_obj({"data": obj}) +class Envelope(BaseModel, Generic[DataT]): + data: DataT | None = None + error: Any | None = None - @validator("data", pre=True) @classmethod - def empty_dict_is_none(cls, v): - if v == {}: - return None - return v + def from_data(cls, obj: Any) -> "Envelope": + return cls.model_validate({"data": obj}) diff --git a/packages/models-library/src/models_library/groups.py b/packages/models-library/src/models_library/groups.py new file mode 100644 index 00000000000..d35b1de7dcc --- /dev/null +++ b/packages/models-library/src/models_library/groups.py @@ -0,0 +1,148 @@ +from typing import Annotated, Final, NamedTuple, TypeAlias + +from common_library.basic_types import DEFAULT_FACTORY +from common_library.groups_enums import GroupType as GroupType +from pydantic import BaseModel, ConfigDict, EmailStr, Field, field_validator +from pydantic.config import JsonDict +from pydantic.types import PositiveInt +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + +from .users import UserID, UserNameID +from .utils.common_validators import create_enums_pre_validator + +EVERYONE_GROUP_ID: Final[int] = 1 + +GroupID: TypeAlias = PositiveInt + +__all__: tuple[str, ...] = ("GroupType",) + + +class Group(BaseModel): + gid: PositiveInt + name: str + description: str + group_type: Annotated[GroupType, Field(alias="type")] + thumbnail: str | None + + inclusion_rules: Annotated[ + dict[str, str], + Field( + default_factory=dict, + ), + ] = DEFAULT_FACTORY + + _from_equivalent_enums = field_validator("group_type", mode="before")( + create_enums_pre_validator(GroupType) + ) + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "gid": 1, + "name": "Everyone", + "type": "everyone", + "description": "all users", + "thumbnail": None, + }, + { + "gid": 2, + "name": "User", + "description": "primary group", + "type": "primary", + "thumbnail": None, + }, + { + "gid": 3, + "name": "Organization", + "description": "standard group", + "type": "standard", + "thumbnail": None, + "inclusionRules": {}, + }, + { + "gid": 4, + "name": "Product", + "description": "standard group for products", + "type": "standard", + "thumbnail": None, + }, + ] + } + ) + + model_config = ConfigDict( + populate_by_name=True, json_schema_extra=_update_json_schema_extra + ) + + +class AccessRightsDict(TypedDict): + read: bool + write: bool + delete: bool + + +GroupInfoTuple: TypeAlias = tuple[Group, AccessRightsDict] + + +class GroupsByTypeTuple(NamedTuple): + primary: GroupInfoTuple | None + standard: list[GroupInfoTuple] + everyone: GroupInfoTuple | None + + +class GroupMember(BaseModel): + # identifiers + id: UserID + primary_gid: GroupID + + # private profile + name: UserNameID | None + email: EmailStr | None + first_name: str | None + last_name: str | None + + # group access + access_rights: AccessRightsDict | None = None + + model_config = ConfigDict(from_attributes=True) + + +class StandardGroupCreate(BaseModel): + name: str + description: str | None = None + thumbnail: str | None = None + inclusion_rules: Annotated[ + dict[str, str], + Field( + default_factory=dict, + description="Maps user's column and regular expression", + ), + ] = DEFAULT_FACTORY + + +class StandardGroupUpdate(BaseModel): + name: str | None = None + description: str | None = None + thumbnail: str | None = None + inclusion_rules: dict[str, str] | None = None + + +class GroupAtDB(Group): + # NOTE: deprecate and use `Group` instead + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "example": { + "gid": 218, + "name": "Friends group", + "description": "Joey, Ross, Rachel, Monica, Phoeby and Chandler", + "type": "standard", + "thumbnail": "https://image.flaticon.com/icons/png/512/23/23374.png", + } + }, + ) diff --git a/packages/models-library/src/models_library/healthchecks.py b/packages/models-library/src/models_library/healthchecks.py new file mode 100644 index 00000000000..6e78b31a5dd --- /dev/null +++ b/packages/models-library/src/models_library/healthchecks.py @@ -0,0 +1,28 @@ +# +# healthcheck models for liveness probes and readiness probes +# +# SEE https://medium.com/polarsquad/how-should-i-answer-a-health-check-aa1fcf6e858e +# SEE https://docs.docker.com/engine/reference/builder/#healthcheck + + +from datetime import timedelta +from typing import TypeAlias + +from pydantic import BaseModel + + +class IsResponsive(BaseModel): + elapsed: timedelta # time elapsed to respond + + def __bool__(self) -> bool: + return True + + +class IsNonResponsive(BaseModel): + reason: str + + def __bool__(self) -> bool: + return False + + +LivenessResult: TypeAlias = IsResponsive | IsNonResponsive diff --git a/packages/models-library/src/models_library/invitations.py b/packages/models-library/src/models_library/invitations.py new file mode 100644 index 00000000000..595c09b6012 --- /dev/null +++ b/packages/models-library/src/models_library/invitations.py @@ -0,0 +1,65 @@ +from datetime import datetime, timezone +from typing import Final + +from pydantic import BaseModel, EmailStr, Field, PositiveInt, field_validator + +from .products import ProductName + +_MAX_LEN: Final = 40 + + +class InvitationInputs(BaseModel): + """Input data necessary to create an invitation""" + + issuer: str = Field( + ..., + description="Identifies who issued the invitation. E.g. an email, a service name etc. NOTE: it will be trimmed if exceeds maximum", + min_length=1, + max_length=_MAX_LEN, + ) + guest: EmailStr = Field( + ..., + description="Invitee's email. Note that the registration can ONLY be used with this email", + ) + trial_account_days: PositiveInt | None = Field( + default=None, + description="If set, this invitation will activate a trial account." + "Sets the number of days from creation until the account expires", + ) + extra_credits_in_usd: PositiveInt | None = Field( + default=None, + description="If set, the account's primary wallet will add extra credits corresponding to this ammount in USD", + ) + product: ProductName | None = Field( + default=None, + description="If None, it will use INVITATIONS_DEFAULT_PRODUCT", + ) + + @field_validator("issuer", mode="before") + @classmethod + def trim_long_issuers_to_max_length(cls, v): + if v and isinstance(v, str): + return v[:_MAX_LEN] + return v + + +class InvitationContent(InvitationInputs): + """Data in an invitation""" + + # avoid using default to mark exactly the time + created: datetime = Field(..., description="Timestamp for creation") + + def as_invitation_inputs(self) -> InvitationInputs: + return self.model_validate(self.model_dump(exclude={"created"})) # copy excluding "created" + + @classmethod + def create_from_inputs( + cls, invitation_inputs: InvitationInputs, default_product: ProductName + ) -> "InvitationContent": + + kwargs = invitation_inputs.model_dump(exclude_none=True) + kwargs.setdefault("product", default_product) + return cls( + created=datetime.now(tz=timezone.utc), + **kwargs, + ) diff --git a/packages/models-library/src/models_library/licenses.py b/packages/models-library/src/models_library/licenses.py new file mode 100644 index 00000000000..b65b7f9d6fe --- /dev/null +++ b/packages/models-library/src/models_library/licenses.py @@ -0,0 +1,172 @@ +from datetime import date, datetime +from enum import auto +from typing import Annotated, Any, NamedTuple, NewType, NotRequired, TypeAlias, cast +from uuid import UUID + +from models_library.resource_tracker import PricingPlanId +from pydantic import BaseModel, ConfigDict, PositiveInt, StringConstraints +from pydantic.config import JsonDict +from typing_extensions import TypedDict + +from .products import ProductName +from .resource_tracker import PricingPlanId +from .utils.enums import StrAutoEnum + +LicensedItemID: TypeAlias = UUID +LicensedResourceID: TypeAlias = UUID + +LICENSED_ITEM_VERSION_RE = r"^\d+\.\d+\.\d+$" +LicensedItemKey = NewType("LicensedItemKey", str) +LicensedItemVersion = Annotated[ + str, StringConstraints(pattern=LICENSED_ITEM_VERSION_RE) +] + + +class LicensedResourceType(StrAutoEnum): + VIP_MODEL = auto() + + +_VIP_FEATURES_EXAMPLE = { + # NOTE: this view is how it would be after parsed and validated + "age": "34 years", + "date": "2015-03-01", + "ethnicity": "Caucasian", + "functionality": "Static", + "height": "1.77 m", + "name": "Duke", + "sex": "Male", + "version": "V2.0", + "weight": "70.2 Kg", + # other + "additional_field": "allowed", +} + + +class FeaturesDict(TypedDict): + # keep alphabetical + age: NotRequired[str] + date: date + ethnicity: NotRequired[str] + functionality: NotRequired[str] + height: NotRequired[str] + name: NotRequired[str] + sex: NotRequired[str] + species: NotRequired[str] + version: NotRequired[str] + weight: NotRequired[str] + + +VIP_DETAILS_EXAMPLE = { + "id": 1, + "description": "A detailed description of the VIP model", + "thumbnail": "https://example.com/thumbnail.jpg", + "features": _VIP_FEATURES_EXAMPLE, + "doi": "10.1000/xyz123", + "license_key": "ABC123XYZ", + "license_version": "1.0", + "protection": "Code", + "available_from_url": "https://example.com/download", + "additional_field": "trimmed if rest", +} + + +# +# DB +# + + +class LicensedItemDB(BaseModel): + licensed_item_id: LicensedItemID + display_name: str + + key: LicensedItemKey + version: LicensedItemVersion + licensed_resource_type: LicensedResourceType + + pricing_plan_id: PricingPlanId + product_name: ProductName + is_hidden_on_market: bool + + # states + created: datetime + modified: datetime + + model_config = ConfigDict(from_attributes=True) + + +class LicensedItemPatchDB(BaseModel): + display_name: str | None = None + pricing_plan_id: PricingPlanId | None = None + + +class LicensedResourceDB(BaseModel): + licensed_resource_id: LicensedResourceID + display_name: str + + licensed_resource_name: str + licensed_resource_type: LicensedResourceType + licensed_resource_data: dict[str, Any] | None + priority: int + + # states + created: datetime + modified: datetime + trashed: datetime | None + + model_config = ConfigDict(from_attributes=True) + + +class LicensedResourcePatchDB(BaseModel): + display_name: str | None = None + licensed_resource_name: str | None = None + trash: bool | None = None + + +class LicensedItem(BaseModel): + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + display_name: str + licensed_resource_type: LicensedResourceType + licensed_resources: list[dict[str, Any]] + pricing_plan_id: PricingPlanId + is_hidden_on_market: bool + created_at: datetime + modified_at: datetime + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "licensed_item_id": "0362b88b-91f8-4b41-867c-35544ad1f7a1", + "key": "Duke", + "version": "1.0.0", + "display_name": "my best model", + "licensed_resource_type": f"{LicensedResourceType.VIP_MODEL}", + "licensed_resources": [ + cast( + JsonDict, + { + "category_id": "HumanWholeBody", + "category_display": "Humans", + "source": VIP_DETAILS_EXAMPLE, + }, + ) + ], + "pricing_plan_id": "15", + "is_hidden_on_market": False, + "created_at": "2024-12-12 09:59:26.422140", + "modified_at": "2024-12-12 09:59:26.422140", + } + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + +class LicensedItemPage(NamedTuple): + total: PositiveInt + items: list[LicensedItem] diff --git a/packages/models-library/src/models_library/osparc_variable_identifier.py b/packages/models-library/src/models_library/osparc_variable_identifier.py new file mode 100644 index 00000000000..80a8e6d0fc0 --- /dev/null +++ b/packages/models-library/src/models_library/osparc_variable_identifier.py @@ -0,0 +1,138 @@ +from copy import deepcopy +from typing import Any, TypeVar + +from common_library.errors_classes import OsparcErrorMixin +from models_library.basic_types import ConstrainedStr + +from pydantic import BaseModel + +from .utils.string_substitution import OSPARC_IDENTIFIER_PREFIX + +T = TypeVar("T") + + +class OsparcVariableIdentifier(ConstrainedStr): + # NOTE: To allow parametrized value, set the type to Union[OsparcVariableIdentifier, ...] + # NOTE: When dealing with str types, to avoid unexpected behavior, the following + # order is suggested `OsparcVariableIdentifier | str` + # NOTE: in below regex `{`` and `}` are respectively escaped with `{{` and `}}` + pattern = ( + rf"^\${{1,2}}(?:\{{)?{OSPARC_IDENTIFIER_PREFIX}[A-Za-z0-9_]+(?:\}})?(:-.+)?$" + ) + + def _get_without_template_markers(self) -> str: + # $VAR + # ${VAR} + # ${VAR:-} + # ${VAR:-default} + # ${VAR:-{}} + return ( + self.removeprefix("$$") + .removeprefix("$") + .removeprefix("{") + .removesuffix("}") + ) + + @property + def name(self) -> str: + return self._get_without_template_markers().split(":-")[0] + + @property + def default_value(self) -> str | None: + parts = self._get_without_template_markers().split(":-") + return parts[1] if len(parts) > 1 else None + + +class UnresolvedOsparcVariableIdentifierError(OsparcErrorMixin, TypeError): + msg_template = "Provided argument is unresolved: value={value}" + + +def raise_if_unresolved(var: OsparcVariableIdentifier | T) -> T: + """Raise error or return original value + + Use like below to make linters play nice. + ``` + def example_func(par: OsparcVariableIdentifier | int) -> None: + _ = 12 + check_if_unresolved(par) + ``` + + Raises: + TypeError: if the the OsparcVariableIdentifier was unresolved + """ + if isinstance(var, OsparcVariableIdentifier): + raise UnresolvedOsparcVariableIdentifierError(value=var) + return var + + +def replace_osparc_variable_identifier( # noqa: C901 + obj: T, osparc_variables: dict[str, Any] +) -> T: + """Replaces mostly in place an instance of `OsparcVariableIdentifier` with the + value provided inside `osparc_variables`. + + NOTE: when using make sure that `obj` is of type `BaseModel` or + `OsparcVariableIdentifier` otherwise it will nto work as intended. + + NOTE: if the provided `obj` is instance of OsparcVariableIdentifier in place + replacement cannot be done. You need to assign it to the previous handler. + + To be safe, always use like so: + ``` + to_replace_obj = replace_osparc_variable_identifier(to_replace_obj) + + Or like so: + ``` + obj.to_replace_attribute = replace_osparc_variable_identifier(obj.to_replace_attribute) + ``` + """ + + if isinstance(obj, OsparcVariableIdentifier): + if obj.name in osparc_variables: + return deepcopy(osparc_variables[obj.name]) # type: ignore + if obj.default_value is not None: + return deepcopy(obj.default_value) # type: ignore + elif isinstance(obj, dict): + for key, value in obj.items(): + obj[key] = replace_osparc_variable_identifier(value, osparc_variables) + elif isinstance(obj, BaseModel): + for key, value in obj.__dict__.items(): + obj.__dict__[key] = replace_osparc_variable_identifier( + value, osparc_variables + ) + elif isinstance(obj, list): + for i, item in enumerate(obj): + obj[i] = replace_osparc_variable_identifier(item, osparc_variables) + elif isinstance(obj, tuple): + new_tuple = tuple( + replace_osparc_variable_identifier(item, osparc_variables) for item in obj + ) + obj = new_tuple # type: ignore + elif isinstance(obj, set): + new_set = { + replace_osparc_variable_identifier(item, osparc_variables) for item in obj + } + obj = new_set # type: ignore + return obj + + +def raise_if_unresolved_osparc_variable_identifier_found(obj: Any) -> None: + """ + NOTE: when using make sure that `obj` is of type `BaseModel` or + `OsparcVariableIdentifier` otherwise it will nto work as intended. + + Raises: + UnresolvedOsparcVariableIdentifierError: if not all instances of + `OsparcVariableIdentifier` were replaced + """ + if isinstance(obj, OsparcVariableIdentifier): + raise_if_unresolved(obj) + elif isinstance(obj, dict): + for key, value in obj.items(): + raise_if_unresolved_osparc_variable_identifier_found(key) + raise_if_unresolved_osparc_variable_identifier_found(value) + elif isinstance(obj, BaseModel): + for value in obj.__dict__.values(): + raise_if_unresolved_osparc_variable_identifier_found(value) + elif isinstance(obj, list | tuple | set): + for item in obj: + raise_if_unresolved_osparc_variable_identifier_found(item) diff --git a/packages/models-library/src/models_library/payments.py b/packages/models-library/src/models_library/payments.py new file mode 100644 index 00000000000..ff704ab7d2e --- /dev/null +++ b/packages/models-library/src/models_library/payments.py @@ -0,0 +1,65 @@ +from decimal import Decimal +from typing import TypeAlias + +from pydantic import BaseModel, ConfigDict, Field, field_validator + +from .emails import LowerCaseEmailStr +from .products import StripePriceID, StripeTaxRateID + +StripeInvoiceID: TypeAlias = str + + +class UserInvoiceAddress(BaseModel): + line1: str | None = None + state: str | None = None + postal_code: str | None = None + city: str | None = None + country: str = Field( + ..., + description="Currently validated in webserver via pycountry library. Two letter country code alpha_2 expected.", + ) + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "line1": None, + "state": None, + "postal_code": None, + "city": None, + "country": "CH", + }, + ] + } + ) + + @field_validator("*", mode="before") + @classmethod + def parse_empty_string_as_null(cls, v): + if isinstance(v, str) and len(v.strip()) == 0: + return None + return v + + +class InvoiceDataGet(BaseModel): + credit_amount: Decimal + stripe_price_id: StripePriceID + stripe_tax_rate_id: StripeTaxRateID + user_invoice_address: UserInvoiceAddress + user_display_name: str + user_email: LowerCaseEmailStr + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "credit_amount": Decimal(15.5), # type: ignore[dict-item] + "stripe_price_id": "stripe-price-id", + "stripe_tax_rate_id": "stripe-tax-rate-id", + "user_invoice_address": UserInvoiceAddress.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "user_display_name": "My Name", + "user_email": "email@example.itis", + }, + ] + } + ) diff --git a/packages/models-library/src/models_library/products.py b/packages/models-library/src/models_library/products.py index e7f714848f5..d9f25a000f5 100644 --- a/packages/models-library/src/models_library/products.py +++ b/packages/models-library/src/models_library/products.py @@ -1 +1,5 @@ -ProductName = str +from typing import TypeAlias + +ProductName: TypeAlias = str +StripePriceID: TypeAlias = str +StripeTaxRateID: TypeAlias = str diff --git a/packages/models-library/src/models_library/progress_bar.py b/packages/models-library/src/models_library/progress_bar.py new file mode 100644 index 00000000000..ad8130570e5 --- /dev/null +++ b/packages/models-library/src/models_library/progress_bar.py @@ -0,0 +1,103 @@ +from typing import Literal, TypeAlias + +from pydantic import BaseModel, ConfigDict + +# NOTE: keep a list of possible unit, and please use correct official unit names +ProgressUnit: TypeAlias = Literal["Byte"] + + +class ProgressStructuredMessage(BaseModel): + description: str + current: float + total: int + unit: str | None = None + sub: "ProgressStructuredMessage | None" = None + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "description": "some description", + "current": 12.2, + "total": 123, + }, + { + "description": "some description", + "current": 12.2, + "total": 123, + "unit": "Byte", + }, + { + "description": "downloading", + "current": 2.0, + "total": 5, + "sub": { + "description": "port 2", + "current": 12.2, + "total": 123, + "unit": "Byte", + }, + }, + ] + } + ) + + +UNITLESS = None + + +class ProgressReport(BaseModel): + actual_value: float + total: float = 1.0 + attempt: int = 0 + unit: ProgressUnit | None = UNITLESS + message: ProgressStructuredMessage | None = None + + @property + def percent_value(self) -> float: + if self.total != 0: + return max(min(self.actual_value / self.total, 1.0), 0.0) + return 0 + + def _recursive_compose_message(self, struct_msg: ProgressStructuredMessage) -> str: + msg = f"{struct_msg.description}" + if struct_msg.sub: + return f"{msg}/{self._recursive_compose_message(struct_msg.sub)}" + msg = f"{msg} {struct_msg.current} / {struct_msg.total}" + return f"{msg} {struct_msg.unit}" if struct_msg.unit is not UNITLESS else msg + + @property + def composed_message(self) -> str: + msg = f"{self.actual_value} / {self.total}" + msg = f"{msg} {self.unit}" if self.unit is not UNITLESS else msg + if self.message: + msg = f"{self.message.description} ({msg})" + if self.message.sub: + msg = f"{msg}/{self._recursive_compose_message(self.message.sub)}" + + return msg + + model_config = ConfigDict( + frozen=True, + json_schema_extra={ + "examples": [ + # typical percent progress (no units) + { + "actual_value": 0.3, + "total": 1.0, + }, + # typical byte progress + { + "actual_value": 128.5, + "total": 1024.0, + "unit": "Byte", + }, + # typical progress with sub progresses + { + "actual_value": 0.3, + "total": 1.0, + "message": ProgressStructuredMessage.model_config["json_schema_extra"]["examples"][2], # type: ignore [index] + }, + ] + }, + ) diff --git a/packages/models-library/src/models_library/projects.py b/packages/models-library/src/models_library/projects.py index f4a02fa1683..0c4dd0884b9 100644 --- a/packages/models-library/src/models_library/projects.py +++ b/packages/models-library/src/models_library/projects.py @@ -1,104 +1,150 @@ """ - Models a study's project document +Models a study's project document """ -from copy import deepcopy + from datetime import datetime from enum import Enum -from typing import Any, Optional +from typing import Annotated, Any, Final, TypeAlias from uuid import UUID -from pydantic import BaseModel, Extra, Field, HttpUrl, constr, validator +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import ConstrainedStr +from models_library.folders import FolderID +from models_library.workspaces import WorkspaceID +from pydantic import ( + BaseModel, + ConfigDict, + Field, + HttpUrl, + StringConstraints, + field_validator, +) from .basic_regex import DATE_RE, UUID_RE_BASE from .emails import LowerCaseEmailStr +from .groups import GroupID from .projects_access import AccessRights, GroupIDStr from .projects_nodes import Node from .projects_nodes_io import NodeIDStr from .projects_state import ProjectState -from .projects_ui import StudyUI +from .users import UserID +from .utils.common_validators import ( + empty_str_to_none_pre_validator, + none_to_empty_str_pre_validator, +) +from .utils.enums import StrAutoEnum + +ProjectID: TypeAlias = UUID +CommitID: TypeAlias = int +ClassifierID: TypeAlias = str + +NodesDict: TypeAlias = dict[NodeIDStr, Node] +_DATETIME_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%S.%fZ" -ProjectID = UUID -ProjectIDStr = constr(regex=UUID_RE_BASE) -ClassifierID = str +ProjectIDStr: TypeAlias = Annotated[str, StringConstraints(pattern=UUID_RE_BASE)] -# TODO: for some reason class Workbench(BaseModel): __root__= does not work as I thought ... investigate! -Workbench = dict[NodeIDStr, Node] + +class DateTimeStr(ConstrainedStr): + pattern = DATE_RE + + @classmethod + def to_datetime(cls, s: "DateTimeStr"): + return datetime.strptime(s, _DATETIME_FORMAT) # NOTE: careful this is in sync with packages/postgres-database/src/simcore_postgres_database/models/projects.py!!! class ProjectType(str, Enum): - """ - template: template project - standard: standard project - """ - TEMPLATE = "TEMPLATE" STANDARD = "STANDARD" +class ProjectTemplateType(StrAutoEnum): + TEMPLATE = "TEMPLATE" + TUTORIAL = "TUTORIAL" + HYPERTOOL = "HYPERTOOL" + + class BaseProjectModel(BaseModel): # Description of the project - uuid: ProjectID = Field( - ..., - description="project unique identifier", - examples=[ - "07640335-a91f-468c-ab69-a374fa82078d", - "9bcf8feb-c1b1-41b6-b201-639cd6ccdba8", - ], - ) - name: str = Field( - ..., description="project name", examples=["Temporal Distortion Simulator"] - ) - description: str = Field( - ..., - description="longer one-line description about the project", - examples=["Dabbling in temporal transitions ..."], - ) - thumbnail: Optional[HttpUrl] = Field( - ..., - description="url of the project thumbnail", - examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"], - ) - - creation_date: datetime = Field(...) - last_change_date: datetime = Field(...) + uuid: Annotated[ + ProjectID, + Field( + description="project unique identifier", + examples=[ + "07640335-a91f-468c-ab69-a374fa82078d", + "9bcf8feb-c1b1-41b6-b201-639cd6ccdba8", + ], + ), + ] + + name: Annotated[ + str, + Field(description="project name", examples=["Temporal Distortion Simulator"]), + ] + description: Annotated[ + str, + Field( + description="longer one-line description about the project", + examples=["Dabbling in temporal transitions ..."], + ), + ] + thumbnail: Annotated[ + HttpUrl | None, + Field( + description="url of the project thumbnail", + examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"], + ), + ] + + creation_date: datetime + last_change_date: datetime # Pipeline of nodes (SEE projects_nodes.py) - workbench: Workbench = Field(..., description="Project's pipeline") + workbench: Annotated[NodesDict, Field(description="Project's pipeline")] - @validator("thumbnail", always=True, pre=True) - @classmethod - def convert_empty_str_to_none(cls, v): - if isinstance(v, str) and v == "": - return None - return v + # validators + _empty_thumbnail_is_none = field_validator("thumbnail", mode="before")( + empty_str_to_none_pre_validator + ) + + _none_description_is_empty = field_validator("description", mode="before")( + none_to_empty_str_pre_validator + ) class ProjectAtDB(BaseProjectModel): # Model used to READ from database - id: int = Field(..., description="The table primary index") + id: Annotated[int, Field(description="The table primary index")] - project_type: ProjectType = Field(..., alias="type", description="The project type") + project_type: Annotated[ + ProjectType, Field(alias="type", description="The project type") + ] + template_type: Annotated[ + ProjectTemplateType | None, + Field( + examples=["TEMPLATE", "TUTORIAL", "HYPERTOOL", None], + ), + ] - prj_owner: Optional[int] = Field(..., description="The project owner id") + prj_owner: Annotated[int | None, Field(description="The project owner id")] - published: Optional[bool] = Field( - False, description="Defines if a study is available publicly" - ) + published: Annotated[ + bool | None, + Field(description="Defines if a study is available publicly"), + ] = False - @validator("project_type", pre=True) + @field_validator("project_type", mode="before") @classmethod - def convert_sql_alchemy_enum(cls, v): + def _convert_sql_alchemy_enum(cls, v): if isinstance(v, Enum): return v.value return v - class Config: - orm_mode = True - use_enum_values = True - allow_population_by_field_name = True + model_config = ConfigDict( + from_attributes=True, use_enum_values=True, populate_by_name=True + ) class Project(BaseProjectModel): @@ -106,68 +152,103 @@ class Project(BaseProjectModel): # NOT for usage with DB!! # Ownership and Access (SEE projects_access.py) - prj_owner: LowerCaseEmailStr = Field( - ..., description="user email", alias="prjOwner" - ) - - # Timestamps TODO: should we use datetime?? - creation_date: str = Field( - ..., - regex=DATE_RE, - description="project creation date", - examples=["2018-07-01T11:13:43Z"], - alias="creationDate", - ) - last_change_date: str = Field( - ..., - regex=DATE_RE, - description="last save date", - examples=["2018-07-01T11:13:43Z"], - alias="lastChangeDate", - ) - access_rights: dict[GroupIDStr, AccessRights] = Field( - ..., - description="object containing the GroupID as key and read/write/execution permissions as value", - alias="accessRights", - ) - - # Classification - tags: Optional[list[int]] = [] - classifiers: Optional[list[ClassifierID]] = Field( - default_factory=list, - description="Contains the reference to the project classifiers", - examples=["some:id:to:a:classifier"], - ) + prj_owner: Annotated[ + LowerCaseEmailStr, Field(description="user email", alias="prjOwner") + ] + access_rights: Annotated[ + dict[GroupIDStr, AccessRights], + Field( + description="object containing the GroupID as key and read/write/execution permissions as value", + alias="accessRights", + ), + ] + + # Lifecycle + creation_date: Annotated[ # type: ignore[assignment] + DateTimeStr, + Field( + description="project creation date", + examples=["2018-07-01T11:13:43Z"], + alias="creationDate", + ), + ] + last_change_date: Annotated[ # type: ignore[assignment] + DateTimeStr, + Field( + description="last save date", + examples=["2018-07-01T11:13:43Z"], + alias="lastChangeDate", + ), + ] # Project state (SEE projects_state.py) - state: Optional[ProjectState] = None - - # UI front-end setup (SEE projects_ui.py) - ui: Optional[StudyUI] = None - - # Quality - quality: dict[str, Any] = Field( - default_factory=dict, - description="stores the study quality assessment", - ) - - # Dev only - dev: Optional[dict] = Field( - default=None, description="object used for development purposes only" + state: ProjectState | None = None + + # Type of project + type: Annotated[ + ProjectType, + Field( + description="The project type", + examples=["TEMPLATE", "STANDARD"], + ), + ] + template_type: Annotated[ + ProjectTemplateType | None, + Field( + alias="templateType", + examples=["TEMPLATE", "TUTORIAL", "HYPERTOOL", None], + ), + ] + + # UI front-end fields (SEE projects_ui.py) + ui: dict[str, Any] | None = None + dev: dict[str, Any] | None = None + + # Parenthood + workspace_id: Annotated[ + WorkspaceID | None, + Field( + description="To which workspace project belongs. If None, belongs to private user workspace.", + alias="workspaceId", + ), + ] = None + + folder_id: Annotated[ + FolderID | None, + Field( + description="To which folder project belongs. If None, belongs to root folder.", + alias="folderId", + ), + ] = None + + # trash state + trashed: datetime | None = None + trashed_by: Annotated[UserID | None, Field(alias="trashedBy")] = None + trashed_by_primary_gid: Annotated[ + GroupID | None, Field(alias="trashedByPrimaryGid") + ] = None + trashed_explicitly: Annotated[bool, Field(alias="trashedExplicitly")] = False + + # Labeling + tags: Annotated[list[int] | None, Field(default_factory=list)] = DEFAULT_FACTORY + classifiers: Annotated[ + list[ClassifierID] | None, + Field( + default_factory=list, + description="Contains the reference to the project classifiers", + examples=["some:id:to:a:classifier"], + ), + ] = DEFAULT_FACTORY + quality: Annotated[ + dict[str, Any], + Field( + default_factory=dict, + description="stores the study quality assessment", + ), + ] = DEFAULT_FACTORY + + model_config = ConfigDict( + # NOTE: this is a security measure until we get rid of the ProjectDict variants + extra="forbid", + populate_by_name=True, ) - - class Config: - description = "Document that stores metadata, pipeline and UI setup of a study" - title = "osparc-simcore project" - extra = Extra.forbid - - @staticmethod - def schema_extra(schema: dict, _model: "Project"): - # pylint: disable=unsubscriptable-object - - # Patch to allow jsonschema nullable - # SEE https://github.com/samuelcolvin/pydantic/issues/990#issuecomment-645961530 - state_pydantic_schema = deepcopy(schema["properties"]["state"]) - schema["properties"]["state"] = { - "anyOf": [{"type": "null"}, state_pydantic_schema] - } diff --git a/packages/models-library/src/models_library/projects_access.py b/packages/models-library/src/models_library/projects_access.py index b7fe3593f60..a1e4db0cc31 100644 --- a/packages/models-library/src/models_library/projects_access.py +++ b/packages/models-library/src/models_library/projects_access.py @@ -4,10 +4,15 @@ from enum import Enum -from pydantic import BaseModel, Extra, Field, constr +from pydantic import BaseModel, ConfigDict, Field from pydantic.types import PositiveInt -GroupIDStr = constr(regex=r"^\S+$") +from .basic_types import IDStr +from .users import FirstNameStr, LastNameStr + + +class GroupIDStr(IDStr): + ... class AccessEnum(str, Enum): @@ -17,22 +22,26 @@ class AccessEnum(str, Enum): class AccessRights(BaseModel): - read: bool = Field(..., description="gives read access") - write: bool = Field(..., description="gives write access") - delete: bool = Field(..., description="gives deletion rights") + read: bool = Field(..., description="has read access") + write: bool = Field(..., description="has write access") + delete: bool = Field(..., description="has deletion rights") - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") class Owner(BaseModel): - user_id: PositiveInt = Field( - ..., - description="Owner's identifier when registered in the user's database table", - examples=[2], + user_id: PositiveInt = Field(..., description="Owner's user id") + first_name: FirstNameStr | None = Field(..., description="Owner's first name") + last_name: LastNameStr | None = Field(..., description="Owner's last name") + + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "examples": [ + # NOTE: None and empty string are both defining an undefined value + {"user_id": 1, "first_name": None, "last_name": None}, + {"user_id": 2, "first_name": "", "last_name": ""}, + {"user_id": 3, "first_name": "John", "last_name": "Smith"}, + ] + }, ) - first_name: str = Field(..., description="Owner first name", examples=["John"]) - last_name: str = Field(..., description="Owner last name", examples=["Smith"]) - - class Config: - extra = Extra.forbid diff --git a/packages/models-library/src/models_library/projects_comments.py b/packages/models-library/src/models_library/projects_comments.py new file mode 100644 index 00000000000..88937d83d78 --- /dev/null +++ b/packages/models-library/src/models_library/projects_comments.py @@ -0,0 +1,40 @@ +from datetime import datetime +from typing import TypeAlias + +from pydantic import BaseModel, ConfigDict, Field, PositiveInt + +from .projects import ProjectID +from .users import UserID + +CommentID: TypeAlias = PositiveInt + + +class _ProjectsCommentsBase(BaseModel): + comment_id: CommentID = Field( + ..., description="Primary key, identifies the comment" + ) + project_uuid: ProjectID = Field(..., description="project reference for this table") + user_id: UserID = Field( + ..., + description="user reference for this table", + ) + contents: str = Field( + ..., + description="Contents of the comment", + ) + created: datetime = Field( + ..., + description="Timestamp on creation", + ) + modified: datetime = Field( + ..., + description="Timestamp with last update", + ) + + +class ProjectsCommentsDB(_ProjectsCommentsBase): + model_config = ConfigDict(extra="forbid") + + +class ProjectsCommentsAPI(_ProjectsCommentsBase): + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/projects_networks.py b/packages/models-library/src/models_library/projects_networks.py index 3ec096c3c6f..ee255dd80ff 100644 --- a/packages/models-library/src/models_library/projects_networks.py +++ b/packages/models-library/src/models_library/projects_networks.py @@ -1,15 +1,20 @@ -from models_library.projects import ProjectID -from pydantic import BaseModel, Field, constr +import re +from typing import Annotated, Final, TypeAlias + +from pydantic import BaseModel, ConfigDict, Field, StringConstraints from .generics import DictModel +from .projects import ProjectID from .projects_nodes_io import NodeIDStr -SERVICE_NETWORK_RE = r"^[a-zA-Z]([a-zA-Z0-9_-]{0,63})$" +SERVICE_NETWORK_RE: Final[re.Pattern] = re.compile(r"^[a-zA-Z]([a-zA-Z0-9_-]{0,63})$") + +PROJECT_NETWORK_PREFIX: Final[str] = "prj-ntwrk" -PROJECT_NETWORK_PREFIX = "prj-ntwrk" -DockerNetworkName = constr(regex=SERVICE_NETWORK_RE) -DockerNetworkAlias = constr(regex=SERVICE_NETWORK_RE) +DockerNetworkName: TypeAlias = Annotated[str, StringConstraints(pattern=SERVICE_NETWORK_RE)] + +DockerNetworkAlias: TypeAlias = Annotated[str, StringConstraints(pattern=SERVICE_NETWORK_RE)] class ContainerAliases(DictModel[NodeIDStr, DockerNetworkAlias]): @@ -17,8 +22,8 @@ class ContainerAliases(DictModel[NodeIDStr, DockerNetworkAlias]): class NetworksWithAliases(DictModel[DockerNetworkName, ContainerAliases]): - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "examples": [ { "network_one": { @@ -28,6 +33,7 @@ class Config: }, ] } + ) class ProjectsNetworks(BaseModel): @@ -39,10 +45,9 @@ class ProjectsNetworks(BaseModel): "is given a user defined alias by which it is identified on the network." ), ) - - class Config: - orm_mode = True - schema_extra = { + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ "example": { "project_uuid": "ec5cdfea-f24e-4aa1-83b8-6dccfdc8cf4d", "networks_with_aliases": { @@ -52,4 +57,5 @@ class Config: } }, } - } + }, + ) diff --git a/packages/models-library/src/models_library/projects_nodes.py b/packages/models-library/src/models_library/projects_nodes.py index 8d3c11c1b48..66683369f35 100644 --- a/packages/models-library/src/models_library/projects_nodes.py +++ b/packages/models-library/src/models_library/projects_nodes.py @@ -1,24 +1,25 @@ """ - Models Node as a central element in a project's pipeline +Models Node as a central element in a project's pipeline """ -from copy import deepcopy -from typing import Any, Optional, Union +from typing import Annotated, Any, TypeAlias, Union +from common_library.basic_types import DEFAULT_FACTORY from pydantic import ( BaseModel, - Extra, + ConfigDict, Field, HttpUrl, Json, StrictBool, StrictFloat, StrictInt, - constr, - validator, + StringConstraints, + field_validator, ) +from pydantic.config import JsonDict -from .basic_types import EnvVarKey +from .basic_types import EnvVarKey, KeyIDStr from .projects_access import AccessEnum from .projects_nodes_io import ( DatCoreFileLink, @@ -27,57 +28,86 @@ PortLink, SimCoreFileLink, ) -from .projects_nodes_ui import Position +from .projects_nodes_layout import Position from .projects_state import RunningState -from .services import PROPERTY_KEY_RE, ServiceKey, ServiceVersion - -# NOTE: WARNING the order here matters +from .services import ServiceKey, ServiceVersion InputTypes = Union[ + # NOTE: WARNING the order in Union[*] below matters! StrictBool, StrictInt, StrictFloat, - Json, # FIXME: remove if OM sends object/array. create project does NOT use pydantic + Json, str, PortLink, - Union[SimCoreFileLink, DatCoreFileLink], # *FileLink to service + SimCoreFileLink | DatCoreFileLink, # *FileLink to service DownloadLink, - Union[list[Any], dict[str, Any]], # arrays | object + list[Any] | dict[str, Any], # arrays | object ] OutputTypes = Union[ + # NOTE: WARNING the order in Union[*] below matters! StrictBool, StrictInt, StrictFloat, - Json, # TODO: remove when OM sends object/array instead of json-formatted strings + Json, str, - Union[SimCoreFileLink, DatCoreFileLink], # *FileLink to service + SimCoreFileLink | DatCoreFileLink, # *FileLink to service DownloadLink, - Union[list[Any], dict[str, Any]], # arrays | object + list[Any] | dict[str, Any], # arrays | object +] + + +InputID: TypeAlias = KeyIDStr +OutputID: TypeAlias = KeyIDStr + +# union_mode="smart" by default for Pydantic>=2: https://docs.pydantic.dev/latest/concepts/unions/#union-modes +InputsDict: TypeAlias = dict[ + InputID, Annotated[InputTypes, Field(union_mode="left_to_right")] +] +OutputsDict: TypeAlias = dict[ + OutputID, Annotated[OutputTypes, Field(union_mode="left_to_right")] ] -InputID = OutputID = constr(regex=PROPERTY_KEY_RE) -InputsDict = dict[InputID, InputTypes] -OutputsDict = dict[OutputID, OutputTypes] -UnitStr = constr(strip_whitespace=True) +UnitStr: TypeAlias = Annotated[str, StringConstraints(strip_whitespace=True)] class NodeState(BaseModel): - modified: bool = Field( - default=True, description="true if the node's outputs need to be re-computed" - ) - dependencies: set[NodeID] = Field( - default_factory=set, - description="contains the node inputs dependencies if they need to be computed first", - ) - current_status: RunningState = Field( - default=RunningState.NOT_STARTED, - description="the node's current state", - alias="currentStatus", - ) + modified: Annotated[ + bool, + Field( + description="true if the node's outputs need to be re-computed", + ), + ] = True + + dependencies: Annotated[ + set[NodeID], + Field( + default_factory=set, + description="contains the node inputs dependencies if they need to be computed first", + ), + ] = DEFAULT_FACTORY + + current_status: Annotated[ + RunningState, + Field( + description="the node's current state", + alias="currentStatus", + ), + ] = RunningState.NOT_STARTED - class Config: - extra = Extra.forbid - schema_extra = { + progress: Annotated[ + float | None, + Field( + ge=0.0, + le=1.0, + description="current progress of the task if available (None if not started or not a computational task)", + ), + ] = 0 + + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + json_schema_extra={ "examples": [ { "modified": True, @@ -95,133 +125,270 @@ class Config: "currentStatus": "SUCCESS", }, ] - } + }, + ) + + +def _convert_old_enum_name(v) -> RunningState: + if v == "FAILURE": + return RunningState.FAILED + return RunningState(v) class Node(BaseModel): - key: ServiceKey = Field( - ..., - description="distinctive name for the node based on the docker registry path", - examples=[ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer", - "simcore/services/frontend/file-picker", - ], - ) - version: ServiceVersion = Field( - ..., - description="semantic version number of the node", - examples=["1.0.0", "0.0.1"], - ) - label: str = Field( - ..., description="The short name of the node", examples=["JupyterLab"] - ) - progress: Optional[float] = Field( - default=None, ge=0, le=100, description="the node progress value" - ) - thumbnail: Optional[HttpUrl] = Field( - default=None, - description="url of the latest screenshot of the node", - examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"], - ) + key: Annotated[ + ServiceKey, + Field( + description="distinctive name for the node based on the docker registry path", + examples=[ + "simcore/services/comp/itis/sleeper", + "simcore/services/dynamic/3dviewer", + "simcore/services/frontend/file-picker", + ], + ), + ] + version: Annotated[ + ServiceVersion, + Field( + description="semantic version number of the node", + examples=["1.0.0", "0.0.1"], + ), + ] + label: Annotated[ + str, + Field(description="The short name of the node", examples=["JupyterLab"]), + ] + progress: Annotated[ + float | None, + Field( + ge=0, + le=100, + description="the node progress value (deprecated in DB, still used for API only)", + deprecated=True, + ), + ] = None + + thumbnail: Annotated[ + str | HttpUrl | None, + Field( + description="url of the latest screenshot of the node", + examples=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"], + ), + ] = None # RUN HASH - run_hash: Optional[str] = Field( - default=None, - description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated", - alias="runHash", - ) + run_hash: Annotated[ + str | None, + Field( + description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated", + alias="runHash", + ), + ] = None # INPUT PORTS --- - inputs: Optional[InputsDict] = Field( - default_factory=dict, description="values of input properties" - ) - inputs_units: Optional[dict[InputID, UnitStr]] = Field( - default=None, - description="Overrides default unit (if any) defined in the service for each port", - alias="inputsUnits", - ) - input_access: Optional[dict[InputID, AccessEnum]] = Field( - default=None, - description="map with key - access level pairs", - alias="inputAccess", - ) - input_nodes: Optional[list[NodeID]] = Field( - default_factory=list, - description="node IDs of where the node is connected to", - alias="inputNodes", - ) + inputs: Annotated[ + InputsDict | None, + Field(default_factory=dict, description="values of input properties"), + ] = DEFAULT_FACTORY + + inputs_required: Annotated[ + list[InputID], + Field( + default_factory=list, + description="Defines inputs that are required in order to run the service", + alias="inputsRequired", + ), + ] = DEFAULT_FACTORY + + inputs_units: Annotated[ + dict[InputID, UnitStr] | None, + Field( + description="Overrides default unit (if any) defined in the service for each port", + alias="inputsUnits", + ), + ] = None + + input_access: Annotated[ + dict[InputID, AccessEnum] | None, + Field( + description="map with key - access level pairs", + alias="inputAccess", + ), + ] = None + + input_nodes: Annotated[ + list[NodeID] | None, + Field( + default_factory=list, + description="node IDs of where the node is connected to", + alias="inputNodes", + ), + ] = DEFAULT_FACTORY # OUTPUT PORTS --- - outputs: Optional[OutputsDict] = Field( - default_factory=dict, description="values of output properties" - ) - output_node: Optional[bool] = Field( - default=None, deprecated=True, alias="outputNode" - ) - output_nodes: Optional[list[NodeID]] = Field( - default=None, - description="Used in group-nodes. Node IDs of those connected to the output", - alias="outputNodes", - ) + outputs: Annotated[ + OutputsDict | None, + Field(default_factory=dict, description="values of output properties"), + ] = DEFAULT_FACTORY - parent: Optional[NodeID] = Field( - default=None, - description="Parent's (group-nodes') node ID s. Used to group", + output_node: Annotated[bool | None, Field(deprecated=True, alias="outputNode")] = ( + None ) - position: Optional[Position] = Field( - default=None, - deprecated=True, - description="Use projects_ui.WorkbenchUI.position instead", - ) + output_nodes: Annotated[ + list[NodeID] | None, + Field( + description="Used in group-nodes. Node IDs of those connected to the output", + alias="outputNodes", + ), + ] = None - state: Optional[NodeState] = Field( - default_factory=NodeState, description="The node's state object" - ) + parent: Annotated[ + NodeID | None, + Field( + description="Parent's (group-nodes') node ID s. Used to group", + ), + ] = None - boot_options: Optional[dict[EnvVarKey, str]] = Field( - default=None, - alias="bootOptions", - description=( - "Some services provide alternative parameters to be injected at boot time. " - "The user selection should be stored here, and it will overwrite the " - "services's defaults." + position: Annotated[ + Position | None, + Field( + deprecated=True, + description="Use projects_ui.WorkbenchUI.position instead", ), - ) + ] = None + + state: Annotated[ + NodeState | None, + Field(default_factory=NodeState, description="The node's state object"), + ] = DEFAULT_FACTORY + + boot_options: Annotated[ + dict[EnvVarKey, str] | None, + Field( + alias="bootOptions", + description=( + "Some services provide alternative parameters to be injected at boot time. " + "The user selection should be stored here, and it will overwrite the " + "services's defaults." + ), + ), + ] = None - @validator("thumbnail", pre=True) + @field_validator("thumbnail", mode="before") @classmethod - def convert_empty_str_to_none(cls, v): + def _convert_empty_str_to_none(cls, v): if isinstance(v, str) and v == "": return None return v + @field_validator("state", mode="before") @classmethod - def convert_old_enum_name(cls, v) -> RunningState: - if v == "FAILURE": - return RunningState.FAILED - return v - - @validator("state", pre=True) - @classmethod - def convert_from_enum(cls, v): + def _convert_from_enum(cls, v): if isinstance(v, str): + # the old version of state was a enum of RunningState - running_state_value = cls.convert_old_enum_name(v) - return NodeState(currentStatus=running_state_value) + running_state_value = _convert_old_enum_name(v) + return NodeState(current_status=running_state_value) return v - class Config: - extra = Extra.forbid - - # NOTE: exporting without this trick does not make runHash as nullable. - # It is a Pydantic issue see https://github.com/samuelcolvin/pydantic/issues/1270 - @staticmethod - def schema_extra(schema, _model: "Node"): - # NOTE: the variant with anyOf[{type: null}, { other }] is compatible with OpenAPI - # The other as type = [null, other] is only jsonschema compatible - for prop_name in ["parent", "runHash"]: - if prop_name in schema.get("properties", {}): - was = deepcopy(schema["properties"][prop_name]) - schema["properties"][prop_name] = {"anyOf": [{"type": "null"}, was]} + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # Minimal example with only required fields + { + "key": "simcore/services/comp/no_ports", + "version": "1.0.0", + "label": "Sleep", + }, + # Complete example with optional fields + { + "key": "simcore/services/comp/only_inputs", + "version": "1.0.0", + "label": "Only INputs", + "inputs": { + "input_1": 1, + "input_2": 2, + "input_3": 3, + }, + }, + # Complete example with optional fields + { + "key": "simcore/services/comp/only_outputs", + "version": "1.0.0", + "label": "Only Outputs", + "outputs": { + "output_1": 1, + "output_2": 2, + "output_3": 3, + }, + }, + # Example with all possible input and output types + { + "key": "simcore/services/comp/itis/all-types", + "version": "1.0.0", + "label": "All Types Demo", + "inputs": { + "boolean_input": True, + "integer_input": 42, + "float_input": 3.14159, + "string_input": "text value", + "json_input": {"key": "value", "nested": {"data": 123}}, + "port_link_input": { + "nodeUuid": "f2700a54-adcf-45d4-9881-01ec30fd75a2", + "output": "out_1", + }, + "simcore_file_link": { + "store": "simcore.s3", + "path": "123e4567-e89b-12d3-a456-426614174000/test.csv", + }, + "datcore_file_link": { + "store": "datcore", + "dataset": "N:dataset:123", + "path": "path/to/file.txt", + }, + "download_link": { + "downloadLink": "https://example.com/downloadable/file.txt" + }, + "array_input": [1, 2, 3, 4, 5], + "object_input": {"name": "test", "value": 42}, + }, + "outputs": { + "boolean_output": False, + "integer_output": 100, + "float_output": 2.71828, + "string_output": "result text", + "json_output": {"status": "success", "data": [1, 2, 3]}, + "simcore_file_output": { + "store": "simcore.s3", + "path": "987e6543-e21b-12d3-a456-426614174000/result.csv", + }, + "datcore_file_output": { + "store": "datcore", + "dataset": "N:dataset:456", + "path": "results/output.txt", + }, + "download_link_output": { + "downloadLink": "https://example.com/results/download.txt" + }, + "array_output": ["a", "b", "c", "d"], + "object_output": {"status": "complete", "count": 42}, + }, + }, + ], + } + ) + + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) + + +class PartialNode(Node): + key: Annotated[ServiceKey, Field(default=None)] + version: Annotated[ServiceVersion, Field(default=None)] + label: Annotated[str, Field(default=None)] diff --git a/packages/models-library/src/models_library/projects_nodes_io.py b/packages/models-library/src/models_library/projects_nodes_io.py index d10aadd786b..90fdf141278 100644 --- a/packages/models-library/src/models_library/projects_nodes_io.py +++ b/packages/models-library/src/models_library/projects_nodes_io.py @@ -1,43 +1,96 @@ """ - Link models used at i/o port nodes: - - Link to files: - - Generic: DownloadLink - - At Custom Service: SimCoreFileLink, DatCoreFileLink - - Link to another port: PortLink +Link models used at i/o port nodes: + - Link to files: + - Generic: DownloadLink + - At Custom Service: SimCoreFileLink, DatCoreFileLink + - Link to another port: PortLink """ -import re from pathlib import Path -from typing import Optional, Pattern, Union +from typing import Annotated, TypeAlias from uuid import UUID -from pydantic import AnyUrl, BaseModel, ConstrainedStr, Extra, Field, validator - -from .basic_regex import DATCORE_FILE_ID_RE, SIMCORE_S3_FILE_ID_RE, UUID_RE -from .services import PROPERTY_KEY_RE +from models_library.basic_types import ConstrainedStr, KeyIDStr +from pydantic import ( + AnyUrl, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + StringConstraints, + TypeAdapter, + ValidationInfo, + field_validator, +) + +from .basic_regex import ( + DATCORE_FILE_ID_RE, + SIMCORE_S3_DIRECTORY_ID_RE, + SIMCORE_S3_FILE_ID_RE, + UUID_RE, +) NodeID = UUID +UUIDStr: TypeAlias = Annotated[str, StringConstraints(pattern=UUID_RE)] + +NodeIDStr: TypeAlias = UUIDStr + +LocationID: TypeAlias = int +LocationName: TypeAlias = str + -class UUIDStr(ConstrainedStr): - regex: Optional[Pattern[str]] = re.compile(UUID_RE) +SimcoreS3FileID: TypeAlias = Annotated[ + str, StringConstraints(pattern=SIMCORE_S3_FILE_ID_RE) +] -NodeIDStr = UUIDStr +class SimcoreS3DirectoryID(ConstrainedStr): + """ + A simcore directory has the following structure: + `{project_id}/{node_id}/simcore-dir-name/` + """ -LocationID = int -LocationName = str + pattern: str = SIMCORE_S3_DIRECTORY_ID_RE + @staticmethod + def _get_parent(s3_object: str, *, parent_index: int) -> str: + # NOTE: s3_object, sometimes is a directory, in that case + # append a fake file so that the parent count still works + if s3_object.endswith("/"): + s3_object += "__placeholder_file_when_s3_object_is_a_directory__" + + parents: list[Path] = list(Path(s3_object).parents) + try: + return f"{parents[-parent_index]}" + except IndexError as err: + msg = ( + f"'{s3_object}' does not have enough parents, " + f"expected {parent_index} found {parents}" + ) + raise ValueError(msg) from err + + @classmethod + def _validate(cls, __input_value: str) -> str: + value = super()._validate(__input_value) + value = value.rstrip("/") + parent = cls._get_parent(value, parent_index=3) -class SimcoreS3FileID(ConstrainedStr): - regex: Optional[Pattern[str]] = re.compile(SIMCORE_S3_FILE_ID_RE) + directory_candidate = value.strip(parent) + if "/" in directory_candidate: + msg = f"Not allowed subdirectory found in '{directory_candidate}'" + raise ValueError(msg) + return f"{value}/" + @classmethod + def from_simcore_s3_object(cls, s3_object: str) -> "SimcoreS3DirectoryID": + parent_path: str = cls._get_parent(s3_object, parent_index=4) + return TypeAdapter(cls).validate_python(f"{parent_path}/") -class DatCoreFileID(ConstrainedStr): - regex: Optional[Pattern[str]] = re.compile(DATCORE_FILE_ID_RE) +DatCoreFileID: TypeAlias = Annotated[str, StringConstraints(pattern=DATCORE_FILE_ID_RE)] -StorageFileID = Union[SimcoreS3FileID, DatCoreFileID] +StorageFileID: TypeAlias = SimcoreS3FileID | DatCoreFileID class PortLink(BaseModel): @@ -48,15 +101,13 @@ class PortLink(BaseModel): description="The node to get the port output from", alias="nodeUuid", ) - output: str = Field( + output: KeyIDStr = Field( ..., description="The port key in the node given by nodeUuid", - regex=PROPERTY_KEY_RE, ) - - class Config: - extra = Extra.forbid - schema_extra = { + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ # minimal { @@ -64,25 +115,28 @@ class Config: "output": "out_2", } ], - } + }, + ) class DownloadLink(BaseModel): """I/O port type to hold a generic download link to a file (e.g. S3 pre-signed link, etc)""" - download_link: AnyUrl = Field(..., alias="downloadLink") - label: Optional[str] = Field(default=None, description="Display name") - - class Config: - extra = Extra.forbid - schema_extra = { + download_link: Annotated[ + str, BeforeValidator(lambda x: str(TypeAdapter(AnyUrl).validate_python(x))) + ] = Field(..., alias="downloadLink") + label: str | None = Field(default=None, description="Display name") + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ # minimal { "downloadLink": "https://fakeimg.pl/250x100/", } ], - } + }, + ) ## CUSTOM STORAGE SERVICES ----------- @@ -92,25 +146,26 @@ class BaseFileLink(BaseModel): store: LocationID = Field( ..., description="The store identifier: 0 for simcore S3, 1 for datcore", + validate_default=True, ) path: StorageFileID = Field( ..., description="The path to the file in the storage provider domain", + union_mode="left_to_right", ) - label: Optional[str] = Field( - default=None, - description="The real file name", + label: str | None = Field( + default=None, description="The real file name", validate_default=True ) - e_tag: Optional[str] = Field( + e_tag: str | None = Field( default=None, description="Entity tag that uniquely represents the file. The method to generate the tag is not specified (black box).", alias="eTag", ) - @validator("store", pre=True) + @field_validator("store", mode="before") @classmethod def legacy_enforce_str_to_int(cls, v): # SEE example 'legacy: store as string' @@ -118,42 +173,43 @@ def legacy_enforce_str_to_int(cls, v): return int(v) return v + model_config = ConfigDict(populate_by_name=True) + class SimCoreFileLink(BaseFileLink): """I/O port type to hold a link to a file in simcore S3 storage""" - dataset: Optional[str] = Field( + dataset: str | None = Field( default=None, - deprecated=True - # TODO: Remove with storage refactoring + deprecated=True, ) - @validator("store", always=True) + @field_validator("store") @classmethod def check_discriminator(cls, v): """Used as discriminator to cast to this class""" if v != 0: - raise ValueError(f"SimCore store identifier must be set to 0, got {v}") + msg = f"SimCore store identifier must be set to 0, got {v}" + raise ValueError(msg) return 0 - @validator("label", always=True, pre=True) + @field_validator("label", mode="before") @classmethod - def pre_fill_label_with_filename_ext(cls, v, values): - if v is None and "path" in values: - return Path(values["path"]).name + def pre_fill_label_with_filename_ext(cls, v, info: ValidationInfo): + if v is None and "path" in info.data: + return Path(info.data["path"]).name return v - class Config: - extra = Extra.forbid - schema_extra = { - # a project file - "example": { - "store": 0, - "path": "94453a6a-c8d4-52b3-a22d-ccbf81f8d636/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", - "eTag": "859fda0cb82fc4acb4686510a172d9a9-1", - "label": "input.txt", - }, + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ + { + "store": 0, + "path": "94453a6a-c8d4-52b3-a22d-ccbf81f8d636/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "eTag": "859fda0cb82fc4acb4686510a172d9a9-1", + "label": "input.txt", + }, # legacy: store as string (SEE incident https://git.speag.com/oSparc/e2e-testing/-/issues/1) { "store": "0", @@ -171,7 +227,8 @@ class Config: "path": "94453a6a-c8d4-52b3-a22d-ccbf81f8d636/d4442ca4-23fd-5b6b-ba6d-0b75f711c109/y_1D.txt", }, ], - } + }, + ) class DatCoreFileLink(BaseFileLink): @@ -187,26 +244,27 @@ class DatCoreFileLink(BaseFileLink): description="Unique identifier to access the dataset on datcore (REQUIRED for datcore)", ) - @validator("store", always=True) + @field_validator("store") @classmethod def check_discriminator(cls, v): """Used as discriminator to cast to this class""" if v != 1: - raise ValueError(f"DatCore store must be set to 1, got {v}") + msg = f"DatCore store must be set to 1, got {v}" + raise ValueError(msg) return 1 - class Config: - extra = Extra.forbid - schema_extra = { - "example": { - # minimal - "store": 1, - "dataset": "N:dataset:ea2325d8-46d7-4fbd-a644-30f6433070b4", - "path": "N:package:32df09ba-e8d6-46da-bd54-f696157de6ce", - "label": "initial_WTstates", - }, + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ + { + # minimal + "store": 1, + "dataset": "N:dataset:ea2325d8-46d7-4fbd-a644-30f6433070b4", + "path": "N:package:32df09ba-e8d6-46da-bd54-f696157de6ce", + "label": "initial_WTstates", + }, # with store id as str { "store": 1, @@ -215,8 +273,9 @@ class Config: "label": "initial_WTstates", }, ], - } + }, + ) # Bundles all model links to a file vs PortLink -LinkToFileTypes = Union[SimCoreFileLink, DatCoreFileLink, DownloadLink] +LinkToFileTypes = SimCoreFileLink | DatCoreFileLink | DownloadLink diff --git a/packages/models-library/src/models_library/projects_nodes_layout.py b/packages/models-library/src/models_library/projects_nodes_layout.py new file mode 100644 index 00000000000..774a2355c95 --- /dev/null +++ b/packages/models-library/src/models_library/projects_nodes_layout.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel, ConfigDict, Field + + +class Position(BaseModel): + x: int = Field(..., description="The x position", examples=[["12"]]) + y: int = Field(..., description="The y position", examples=[["15"]]) + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/projects_nodes_ui.py b/packages/models-library/src/models_library/projects_nodes_ui.py deleted file mode 100644 index aa55332ccba..00000000000 --- a/packages/models-library/src/models_library/projects_nodes_ui.py +++ /dev/null @@ -1,21 +0,0 @@ -""" - Models node UI (legacy model, use instead projects.ui.py) -""" - -from pydantic import BaseModel, Extra, Field -from pydantic.color import Color - - -class Position(BaseModel): - x: int = Field(..., description="The x position", example=["12"]) - y: int = Field(..., description="The y position", example=["15"]) - - class Config: - extra = Extra.forbid - - -class Marker(BaseModel): - color: Color = Field(...) - - class Config: - extra = Extra.forbid diff --git a/packages/models-library/src/models_library/projects_pipeline.py b/packages/models-library/src/models_library/projects_pipeline.py index 0b6d29e2440..50db902d624 100644 --- a/packages/models-library/src/models_library/projects_pipeline.py +++ b/packages/models-library/src/models_library/projects_pipeline.py @@ -1,98 +1,130 @@ -from typing import Dict, List, Optional +import datetime +from typing import TypeAlias from uuid import UUID -from pydantic import BaseModel, Field, PositiveInt +import arrow +from pydantic import BaseModel, ConfigDict, Field, PositiveInt +from pydantic.config import JsonDict -from .clusters import ClusterID -from .projects_nodes import NodeID, NodeState +from .projects_nodes import NodeState +from .projects_nodes_io import NodeID from .projects_state import RunningState class PipelineDetails(BaseModel): - adjacency_list: Dict[NodeID, List[NodeID]] = Field( + adjacency_list: dict[NodeID, list[NodeID]] = Field( ..., description="The adjacency list of the current pipeline in terms of {NodeID: [successor NodeID]}", ) - node_states: Dict[NodeID, NodeState] = Field( + progress: float | None = Field( + ..., + ge=0, + le=1.0, + description="the progress of the pipeline (None if there are no computational tasks)", + ) + node_states: dict[NodeID, NodeState] = Field( ..., description="The states of each of the computational nodes in the pipeline" ) -TaskID = UUID +TaskID: TypeAlias = UUID class ComputationTask(BaseModel): id: TaskID = Field(..., description="the id of the computation task") state: RunningState = Field(..., description="the state of the computational task") - result: Optional[str] = Field( - None, description="the result of the computational task" - ) + result: str | None = Field(None, description="the result of the computational task") pipeline_details: PipelineDetails = Field( ..., description="the details of the generated pipeline" ) - iteration: Optional[PositiveInt] = Field( + iteration: PositiveInt | None = Field( ..., description="the iteration id of the computation task (none if no task ran yet)", ) - cluster_id: Optional[ClusterID] = Field( + started: datetime.datetime | None = Field( + ..., + description="the timestamp when the computation was started or None if not started yet", + ) + stopped: datetime.datetime | None = Field( + ..., + description="the timestamp when the computation was stopped or None if not started nor stopped yet", + ) + submitted: datetime.datetime | None = Field( ..., - description="the cluster on which the computaional task runs/ran (none if no task ran yet)", + description="task last modification timestamp or None if the there is no task", ) - class Config: - schema_extra = { - "examples": [ - { - "id": "42838344-03de-4ce2-8d93-589a5dcdfd05", - "state": "PUBLISHED", - "pipeline_details": { - "adjacency_list": { - "2fb4808a-e403-4a46-b52c-892560d27862": [], - "19a40c7b-0a40-458a-92df-c77a5df7c886": [ - "2fb4808a-e403-4a46-b52c-892560d27862" - ], - }, - "node_states": { - "2fb4808a-e403-4a46-b52c-892560d27862": { - "modified": True, - "dependencies": [], - }, - "19a40c7b-0a40-458a-92df-c77a5df7c886": { - "modified": False, - "dependencies": [ + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "id": "42838344-03de-4ce2-8d93-589a5dcdfd05", + "state": "PUBLISHED", + "pipeline_details": { + "adjacency_list": { + "2fb4808a-e403-4a46-b52c-892560d27862": [], + "19a40c7b-0a40-458a-92df-c77a5df7c886": [ "2fb4808a-e403-4a46-b52c-892560d27862" ], }, + "node_states": { + "2fb4808a-e403-4a46-b52c-892560d27862": { + "modified": True, + "dependencies": [], + "progress": 0.0, + }, + "19a40c7b-0a40-458a-92df-c77a5df7c886": { + "modified": False, + "dependencies": [ + "2fb4808a-e403-4a46-b52c-892560d27862" + ], + "progress": 0.0, + }, + }, + "progress": 0.0, }, + "iteration": None, + "started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item] + "stopped": None, + "submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item] }, - "iteration": None, - "cluster_id": None, - }, - { - "id": "f81d7994-9ccc-4c95-8c32-aa70d6bbb1b0", - "state": "SUCCESS", - "pipeline_details": { - "adjacency_list": { - "2fb4808a-e403-4a46-b52c-892560d27862": [], - "19a40c7b-0a40-458a-92df-c77a5df7c886": [ - "2fb4808a-e403-4a46-b52c-892560d27862" - ], - }, - "node_states": { - "2fb4808a-e403-4a46-b52c-892560d27862": { - "modified": False, - "dependencies": [], - }, - "19a40c7b-0a40-458a-92df-c77a5df7c886": { - "modified": False, - "dependencies": [ + { + "id": "f81d7994-9ccc-4c95-8c32-aa70d6bbb1b0", + "state": "SUCCESS", + "pipeline_details": { + "adjacency_list": { + "2fb4808a-e403-4a46-b52c-892560d27862": [], + "19a40c7b-0a40-458a-92df-c77a5df7c886": [ "2fb4808a-e403-4a46-b52c-892560d27862" ], }, + "node_states": { + "2fb4808a-e403-4a46-b52c-892560d27862": { + "modified": False, + "dependencies": [], + "progress": 1.0, + }, + "19a40c7b-0a40-458a-92df-c77a5df7c886": { + "modified": False, + "dependencies": [ + "2fb4808a-e403-4a46-b52c-892560d27862" + ], + "progress": 1.0, + }, + }, + "progress": 1.0, }, + "iteration": 2, + "started": arrow.utcnow().shift(minutes=-50).datetime, # type: ignore[dict-item] + "stopped": arrow.utcnow().shift(minutes=-20).datetime, # type: ignore[dict-item] + "submitted": arrow.utcnow().shift(hours=-1).datetime, # type: ignore[dict-item] }, - "iteration": 2, - "cluster_id": 0, - }, - ] - } + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) diff --git a/packages/models-library/src/models_library/projects_state.py b/packages/models-library/src/models_library/projects_state.py index 587b2296523..cef15bce5b5 100644 --- a/packages/models-library/src/models_library/projects_state.py +++ b/packages/models-library/src/models_library/projects_state.py @@ -1,11 +1,18 @@ """ - Models both project and node states +Models both project and node states """ from enum import Enum, unique -from typing import Optional +from typing import Annotated -from pydantic import BaseModel, Extra, Field, validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + ValidationInfo, + field_validator, + model_validator, +) from .projects_access import Owner @@ -21,19 +28,25 @@ class RunningState(str, Enum): PUBLISHED = "PUBLISHED" NOT_STARTED = "NOT_STARTED" PENDING = "PENDING" + WAITING_FOR_RESOURCES = "WAITING_FOR_RESOURCES" STARTED = "STARTED" - RETRY = "RETRY" SUCCESS = "SUCCESS" FAILED = "FAILED" ABORTED = "ABORTED" + WAITING_FOR_CLUSTER = "WAITING_FOR_CLUSTER" - def is_running(self) -> bool: - return self in ( + @staticmethod + def list_running_states() -> list["RunningState"]: + return [ RunningState.PUBLISHED, RunningState.PENDING, + RunningState.WAITING_FOR_RESOURCES, RunningState.STARTED, - RunningState.RETRY, - ) + RunningState.WAITING_FOR_CLUSTER, + ] + + def is_running(self) -> bool: + return self in self.list_running_states() @unique @@ -50,19 +63,21 @@ class ProjectStatus(str, Enum): EXPORTING = "EXPORTING" OPENING = "OPENING" OPENED = "OPENED" + MAINTAINING = "MAINTAINING" class ProjectLocked(BaseModel): value: bool = Field(..., description="True if the project is locked") - owner: Optional[Owner] = Field( - default=None, description="If locked, the user that owns the lock" - ) status: ProjectStatus = Field(..., description="The status of the project") - - class Config: - extra = Extra.forbid - use_enum_values = True - schema_extra = { + owner: Owner | None = Field( + default=None, + description="If locked, the user that owns the lock", + validate_default=True, + ) + model_config = ConfigDict( + extra="forbid", + use_enum_values=True, + json_schema_extra={ "examples": [ {"value": False, "status": ProjectStatus.CLOSED}, { @@ -75,27 +90,36 @@ class Config: }, }, ] - } + }, + ) - @validator("owner", pre=True, always=True) + @field_validator("status", mode="after") @classmethod - def check_not_null(cls, v, values): - if values["value"] is True and v is None: - raise ValueError("value cannot be None when project is locked") + def check_status_compatible(cls, v, info: ValidationInfo): + if info.data["value"] is False and v not in ["CLOSED", "OPENED"]: + msg = f"status is set to {v} and lock is set to {info.data['value']}!" + raise ValueError(msg) + if info.data["value"] is True and v == "CLOSED": + msg = f"status is set to {v} and lock is set to {info.data['value']}!" + raise ValueError(msg) return v - @validator("status", always=True) + @model_validator(mode="before") @classmethod - def check_status_compatible(cls, v, values): - if values["value"] is False and v not in ["CLOSED", "OPENED"]: - raise ValueError( - f"status is set to {v} and lock is set to {values['value']}!" - ) - if values["value"] is True and v == "CLOSED": - raise ValueError( - f"status is set to {v} and lock is set to {values['value']}!" - ) - return v + def check_owner_compatible(cls, values): + if ( + values["value"] is True + and values.get("owner") is None + and values["status"] + in [ + status.value + for status in ProjectStatus + if status != ProjectStatus.MAINTAINING + ] + ): + msg = "Owner must be specified when the project is not in the 'MAINTAINING' status." + raise ValueError(msg) + return values class ProjectRunningState(BaseModel): @@ -103,13 +127,11 @@ class ProjectRunningState(BaseModel): ..., description="The running state of the project", examples=["STARTED"] ) - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") class ProjectState(BaseModel): - locked: ProjectLocked = Field(..., description="The project lock state") + locked: Annotated[ProjectLocked, Field(..., description="The project lock state")] state: ProjectRunningState = Field(..., description="The project running state") - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/projects_ui.py b/packages/models-library/src/models_library/projects_ui.py deleted file mode 100644 index 5eed8c1b1b2..00000000000 --- a/packages/models-library/src/models_library/projects_ui.py +++ /dev/null @@ -1,60 +0,0 @@ -""" - Models Front-end UI -""" - -from typing import Literal, Optional, TypedDict - -from pydantic import BaseModel, Extra, Field -from pydantic.color import Color - -from .projects_nodes_io import NodeID, NodeIDStr -from .projects_nodes_ui import Marker, Position - - -class WorkbenchUI(BaseModel): - position: Position = Field(..., description="The node position in the workbench") - marker: Optional[Marker] = None - - class Config: - extra = Extra.forbid - - -class _SlideshowRequired(TypedDict): - position: int - - -class Slideshow(_SlideshowRequired, total=False): - instructions: Optional[str] # "Instructions about what to do in this step" - - -class Annotation(BaseModel): - type: Literal["rect", "text"] = Field(...) - color: Color = Field(...) - attributes: dict = Field(..., description="svg attributes") - - class Config: - extra = Extra.forbid - schema_extra = { - "examples": [ - { - "type": "rect", - "color": "#FF0000", - "attributes": {"x": 415, "y": 100, "width": 117, "height": 26}, - }, - { - "type": "text", - "color": "#0000FF", - "attributes": {"x": 415, "y": 100, "text": "Hey!"}, - }, - ] - } - - -class StudyUI(BaseModel): - workbench: Optional[dict[NodeIDStr, WorkbenchUI]] = None - slideshow: Optional[dict[NodeIDStr, Slideshow]] = None - current_node_id: Optional[NodeID] = Field(default=None, alias="currentNodeId") - annotations: Optional[dict[NodeIDStr, Annotation]] = None - - class Config: - extra = Extra.allow diff --git a/services/catalog/src/simcore_service_catalog/core/errors.py b/packages/models-library/src/models_library/py.typed similarity index 100% rename from services/catalog/src/simcore_service_catalog/core/errors.py rename to packages/models-library/src/models_library/py.typed diff --git a/packages/models-library/src/models_library/rabbitmq_basic_types.py b/packages/models-library/src/models_library/rabbitmq_basic_types.py new file mode 100644 index 00000000000..c1602f9d418 --- /dev/null +++ b/packages/models-library/src/models_library/rabbitmq_basic_types.py @@ -0,0 +1,29 @@ +from typing import Annotated, Final, TypeAlias + +from models_library.basic_types import ConstrainedStr +from pydantic import StringConstraints, TypeAdapter + +REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS: Final[str] = r"^[\w\-\.]*$" + + +class RPCNamespace(ConstrainedStr): + pattern = REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS + min_length: int = 1 + max_length: int = 252 + + @classmethod + def from_entries(cls, entries: dict[str, str]) -> "RPCNamespace": + """ + Given a list of entries creates a namespace to be used in declaring the rabbitmq queue. + Keeping this to a predefined length + """ + composed_string = "-".join(f"{k}_{v}" for k, v in sorted(entries.items())) + return TypeAdapter(cls).validate_python(composed_string) + + +RPCMethodName: TypeAlias = Annotated[ + str, + StringConstraints( + pattern=REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS, min_length=1, max_length=252 + ), +] diff --git a/packages/models-library/src/models_library/rabbitmq_messages.py b/packages/models-library/src/models_library/rabbitmq_messages.py index e4cda73e93a..44d2b7ddc9a 100644 --- a/packages/models-library/src/models_library/rabbitmq_messages.py +++ b/packages/models-library/src/models_library/rabbitmq_messages.py @@ -1,27 +1,53 @@ +import datetime import logging -from enum import Enum, auto -from typing import Any, Literal, Optional - -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.projects_state import RunningState -from models_library.users import UserID -from models_library.utils.enums import StrAutoEnum +from abc import abstractmethod +from decimal import Decimal +from enum import Enum, IntEnum, auto +from typing import Any, Literal, TypeAlias + +import arrow from pydantic import BaseModel, Field -from pydantic.types import NonNegativeFloat + +from .products import ProductName +from .progress_bar import ProgressReport +from .projects import ProjectID +from .projects_nodes_io import NodeID +from .projects_state import RunningState +from .services import ServiceKey, ServiceType, ServiceVersion +from .services_resources import ServiceResourcesDict +from .services_types import ServiceRunID +from .users import UserID +from .utils.enums import StrAutoEnum +from .wallets import WalletID + +LogLevelInt: TypeAlias = int +LogMessageStr: TypeAlias = str class RabbitEventMessageType(str, Enum): + __slots__ = () + RELOAD_IFRAME = "RELOAD_IFRAME" class RabbitMessageBase(BaseModel): - channel_name: str = Field(..., const=True) + channel_name: str @classmethod def get_channel_name(cls) -> str: # NOTE: this returns the channel type name - return cls.__fields__["channel_name"].default + name: str = cls.model_fields["channel_name"].default + return name + + @abstractmethod + def routing_key(self) -> str | None: + """this is used to define the topic of the message + + :return: the topic or None (NOTE: None will implicitly use a FANOUT exchange) + """ + + def body(self) -> bytes: + return self.model_dump_json().encode() class ProjectMessageBase(BaseModel): @@ -34,15 +60,22 @@ class NodeMessageBase(ProjectMessageBase): class LoggerRabbitMessage(RabbitMessageBase, NodeMessageBase): - channel_name: Literal["simcore.services.logs"] = "simcore.services.logs" - messages: list[str] - log_level: int = logging.INFO + channel_name: Literal["simcore.services.logs.v2"] = "simcore.services.logs.v2" + node_id: NodeID | None # type: ignore[assignment] + messages: list[LogMessageStr] + log_level: LogLevelInt = logging.INFO + + def routing_key(self) -> str: + return f"{self.project_id}.{self.log_level}" class EventRabbitMessage(RabbitMessageBase, NodeMessageBase): channel_name: Literal["simcore.services.events"] = "simcore.services.events" action: RabbitEventMessageType + def routing_key(self) -> str | None: + return None + class ProgressType(StrAutoEnum): COMPUTATION_RUNNING = auto() # NOTE: this is the original only progress report @@ -53,6 +86,7 @@ class ProgressType(StrAutoEnum): SERVICE_OUTPUTS_PULLING = auto() SERVICE_STATE_PULLING = auto() SERVICE_IMAGES_PULLING = auto() + SERVICE_CONTAINERS_STARTING = auto() SERVICE_STATE_PUSHING = auto() SERVICE_OUTPUTS_PUSHING = auto() @@ -61,41 +95,53 @@ class ProgressType(StrAutoEnum): class ProgressMessageMixin(RabbitMessageBase): - channel_name: Literal["simcore.services.progress"] = "simcore.services.progress" + channel_name: Literal["simcore.services.progress.v2"] = ( + "simcore.services.progress.v2" + ) progress_type: ProgressType = ( ProgressType.COMPUTATION_RUNNING ) # NOTE: backwards compatible - progress: NonNegativeFloat + report: ProgressReport + + def routing_key(self) -> str | None: + return None class ProgressRabbitMessageNode(ProgressMessageMixin, NodeMessageBase): - ... + def routing_key(self) -> str | None: + return f"{self.project_id}.{self.node_id}" class ProgressRabbitMessageProject(ProgressMessageMixin, ProjectMessageBase): - ... + def routing_key(self) -> str | None: + return f"{self.project_id}.all_nodes" class InstrumentationRabbitMessage(RabbitMessageBase, NodeMessageBase): - channel_name: Literal[ + channel_name: Literal["simcore.services.instrumentation"] = ( "simcore.services.instrumentation" - ] = "simcore.services.instrumentation" + ) metrics: str service_uuid: NodeID service_type: str service_key: str service_tag: str - result: Optional[RunningState] = None + result: RunningState | None = None + simcore_user_agent: str + + def routing_key(self) -> str | None: + return None class _RabbitAutoscalingBaseMessage(RabbitMessageBase): - channel_name: Literal["io.simcore.autoscaling"] = Field( - default="io.simcore.autoscaling", const=True - ) + channel_name: Literal["io.simcore.autoscaling"] = "io.simcore.autoscaling" origin: str = Field( ..., description="autoscaling app type, in case there would be more than one" ) + def routing_key(self) -> str | None: + return None + class RabbitAutoscalingStatusMessage(_RabbitAutoscalingBaseMessage): nodes_total: int = Field( @@ -122,3 +168,151 @@ class RabbitAutoscalingStatusMessage(_RabbitAutoscalingBaseMessage): instances_running: int = Field( ..., description="the number of EC2 instances currently in running state in AWS" ) + + +class RabbitResourceTrackingMessageType(StrAutoEnum): + TRACKING_STARTED = auto() + TRACKING_HEARTBEAT = auto() + TRACKING_STOPPED = auto() + + +class RabbitResourceTrackingBaseMessage(RabbitMessageBase): + channel_name: Literal["io.simcore.service.tracking"] = "io.simcore.service.tracking" + + service_run_id: ServiceRunID = Field( + ..., description="uniquely identitifies the service run" + ) + created_at: datetime.datetime = Field( + default_factory=lambda: arrow.utcnow().datetime, + description="message creation datetime", + ) + + def routing_key(self) -> str | None: + return None + + +class DynamicServiceRunningMessage(RabbitMessageBase): + channel_name: Literal["io.simcore.service.dynamic-service-running"] = Field( + default="io.simcore.service.dynamic-service-running" + ) + + project_id: ProjectID + node_id: NodeID + user_id: UserID + product_name: ProductName | None + created_at: datetime.datetime = Field( + default_factory=lambda: arrow.utcnow().datetime, + description="message creation datetime", + ) + + def routing_key(self) -> str | None: + return None + + +class RabbitResourceTrackingStartedMessage(RabbitResourceTrackingBaseMessage): + message_type: Literal[RabbitResourceTrackingMessageType.TRACKING_STARTED] = ( + RabbitResourceTrackingMessageType.TRACKING_STARTED + ) + + wallet_id: WalletID | None + wallet_name: str | None + + pricing_plan_id: int | None + pricing_unit_id: int | None + pricing_unit_cost_id: int | None + + product_name: str + simcore_user_agent: str + + user_id: UserID + user_email: str + + project_id: ProjectID + project_name: str + + node_id: NodeID + node_name: str + + parent_project_id: ProjectID + root_parent_project_id: ProjectID + root_parent_project_name: str + + parent_node_id: NodeID + root_parent_node_id: NodeID + + service_key: ServiceKey + service_version: ServiceVersion + service_type: ServiceType + service_resources: ServiceResourcesDict + service_additional_metadata: dict[str, Any] = Field( + default_factory=dict, description="service additional 'free' metadata" + ) + + +class RabbitResourceTrackingHeartbeatMessage(RabbitResourceTrackingBaseMessage): + message_type: Literal[RabbitResourceTrackingMessageType.TRACKING_HEARTBEAT] = ( + RabbitResourceTrackingMessageType.TRACKING_HEARTBEAT + ) + + +class SimcorePlatformStatus(StrAutoEnum): + OK = auto() + BAD = auto() + + +class RabbitResourceTrackingStoppedMessage(RabbitResourceTrackingBaseMessage): + message_type: Literal[RabbitResourceTrackingMessageType.TRACKING_STOPPED] = ( + RabbitResourceTrackingMessageType.TRACKING_STOPPED + ) + + simcore_platform_status: SimcorePlatformStatus = Field( + ..., + description=f"{SimcorePlatformStatus.BAD} if simcore failed to run the service properly", + ) + + +RabbitResourceTrackingMessages: TypeAlias = ( + RabbitResourceTrackingStartedMessage + | RabbitResourceTrackingStoppedMessage + | RabbitResourceTrackingHeartbeatMessage +) + + +class WalletCreditsMessage(RabbitMessageBase): + channel_name: Literal["io.simcore.service.wallets"] = "io.simcore.service.wallets" + created_at: datetime.datetime = Field( + default_factory=lambda: arrow.utcnow().datetime, + description="message creation datetime", + ) + wallet_id: WalletID + credits: Decimal + product_name: ProductName + + def routing_key(self) -> str | None: + return f"{self.wallet_id}" + + +class CreditsLimit(IntEnum): + OUT_OF_CREDITS = 0 + + +class WalletCreditsLimitReachedMessage(RabbitMessageBase): + channel_name: Literal["io.simcore.service.wallets-credit-limit-reached"] = ( + "io.simcore.service.wallets-credit-limit-reached" + ) + created_at: datetime.datetime = Field( + default_factory=lambda: arrow.utcnow().datetime, + description="message creation datetime", + ) + service_run_id: str = Field( + ..., description="uniquely identitifies the service run" + ) + user_id: UserID + project_id: ProjectID + node_id: NodeID + wallet_id: WalletID + credits: Decimal + credits_limit: CreditsLimit + + def routing_key(self) -> str | None: + return f"{self.wallet_id}.{self.credits_limit}" diff --git a/packages/models-library/src/models_library/resource_tracker.py b/packages/models-library/src/models_library/resource_tracker.py new file mode 100644 index 00000000000..a1d27cd74eb --- /dev/null +++ b/packages/models-library/src/models_library/resource_tracker.py @@ -0,0 +1,354 @@ +import logging +from datetime import datetime, timezone +from decimal import Decimal +from enum import IntEnum, auto +from typing import NamedTuple, TypeAlias + +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + NonNegativeInt, + PositiveInt, + field_validator, +) + +from .products import ProductName +from .rest_filters import Filters +from .utils.enums import StrAutoEnum + +_logger = logging.getLogger(__name__) + +PricingPlanId: TypeAlias = PositiveInt +PricingUnitId: TypeAlias = PositiveInt +PricingUnitCostId: TypeAlias = PositiveInt +CreditTransactionId: TypeAlias = PositiveInt + + +class ResourceTrackerServiceType(StrAutoEnum): + COMPUTATIONAL_SERVICE = auto() + DYNAMIC_SERVICE = auto() + + +class ServiceRunStatus(StrAutoEnum): + RUNNING = auto() + SUCCESS = auto() + ERROR = auto() + + +class CreditTransactionStatus(StrAutoEnum): + # Represents the possible statuses of a credit transaction. + + PENDING = auto() + # The transaction is pending and has not yet been finalized. + # Example: During the running of a service, the transaction remains in the Pending state until the service is stopped. + + BILLED = auto() + # The transaction has been successfully billed. + + IN_DEBT = auto() + # The transaction is marked as in debt. + # Example: This occurs when a computational job continues to run even though the user does not have sufficient credits in their wallet. + + NOT_BILLED = auto() + # The transaction will not be billed. + # Example: This status is used when there is an issue on our side, and we decide not to bill the user. + + REQUIRES_MANUAL_REVIEW = auto() + # The transaction requires manual review due to potential issues. + # NOTE: This status is currently not in use. + + +class CreditClassification(StrAutoEnum): + # Represents the different types of credit classifications. + + ADD_WALLET_TOP_UP = auto() + # Indicates that credits have been added to the user's wallet through a top-up. + # Example: The user adds funds to their wallet to increase their available credits. + + DEDUCT_SERVICE_RUN = auto() + # Represents a deduction from the user's wallet due to the costs of running a computational or dynamic service. + # Example: Credits are deducted when the user runs a simulation. + + DEDUCT_LICENSE_PURCHASE = auto() + # Represents a deduction from the user's wallet for purchasing a license. + # Example: The user purchases a license to access premium features such as VIP models. + + ADD_WALLET_EXCHANGE = auto() + # Represents the addition of credits to the user's wallet through an exchange. + # Example: Credits are added due to credit exchange between wallets. + + DEDUCT_WALLET_EXCHANGE = auto() + # Represents a deduction of credits from the user's wallet through an exchange. + # Example: Credits are deducted due to credit exchange between wallets. + + +class PricingPlanClassification(StrAutoEnum): + TIER = auto() + LICENSE = auto() + + +class PricingInfo(BaseModel): + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + pricing_unit_cost_id: PricingUnitCostId + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"pricing_plan_id": 1, "pricing_unit_id": 1, "pricing_unit_cost_id": 1} + ] + } + ) + + +class HardwareInfo(BaseModel): + aws_ec2_instances: list[str] + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"aws_ec2_instances": ["c6a.4xlarge"]}, + {"aws_ec2_instances": []}, + ] + } + ) + + @field_validator("aws_ec2_instances") + @classmethod + def warn_if_too_many_instances_are_present(cls, v: list[str]) -> list[str]: + if len(v) > 1: + msg = f"Only 1 entry is supported at the moment, received {v}" + raise ValueError(msg) + return v + + +class PricingAndHardwareInfoTuple(NamedTuple): + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + current_cost_per_unit_id: PricingUnitCostId + aws_ec2_instances: list[str] + + +class PricingPlanAndUnitIdsTuple(NamedTuple): + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + + +# Filtering for listing service runs/usages + + +class StartedAt(BaseModel): + from_: datetime | None = Field(None, alias="from") + until: datetime | None = Field(None) + + model_config = ConfigDict(populate_by_name=True) + + @field_validator("from_", mode="before") + @classmethod + def parse_from_filter(cls, v): + """Parse the filters field.""" + if v: + if isinstance(v, datetime): + return v + try: + from_ = datetime.strptime(v, "%Y-%m-%d").replace(tzinfo=timezone.utc) + except Exception as exc: + msg = "'from' value must be provided in proper format ." + raise ValueError(msg) from exc + return from_ + return v + + @field_validator("until", mode="before") + @classmethod + def parse_until_filter(cls, v): + """Parse the filters field.""" + if v: + if isinstance(v, datetime): + return v + try: + until = datetime.strptime(v, "%Y-%m-%d").replace(tzinfo=timezone.utc) + except Exception as exc: + msg = "'until' value must be provided in proper format ." + raise ValueError(msg) from exc + return until + return v + + +class ServiceResourceUsagesFilters(Filters): + started_at: StartedAt + + +## Pricing Plans + + +class PricingPlanCreate(BaseModel): + product_name: ProductName + display_name: str + description: str + classification: PricingPlanClassification + pricing_plan_key: str + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "product_name": "osparc", + "display_name": "My pricing plan", + "description": "This is general pricing plan", + "classification": PricingPlanClassification.TIER, + "pricing_plan_key": "my-unique-pricing-plan", + } + ] + } + ) + + +class PricingPlanUpdate(BaseModel): + pricing_plan_id: PricingPlanId + display_name: str + description: str + is_active: bool + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "display_name": "My pricing plan", + "description": "This is general pricing plan", + "is_active": True, + } + ] + } + ) + + +## Pricing Units + + +class SpecificInfo(HardwareInfo): + """Custom information that is not propagated to the frontend. For example can be used + to store aws ec2 instance type.""" + + +class UnitExtraInfoTier(BaseModel): + """Custom information that is propagated to the frontend. Defined fields are mandatory.""" + + CPU: NonNegativeInt + RAM: ByteSize + VRAM: ByteSize + + model_config = ConfigDict( + populate_by_name=True, + extra="allow", + json_schema_extra={ + "examples": [ + { + "CPU": 32, + "RAM": 64, + "VRAM": 0, + "SSD": 600, + "custom key": "custom value", + } + ] + }, + ) + + +class UnitExtraInfoLicense(BaseModel): + """Custom information that is propagated to the frontend. Defined fields are mandatory.""" + + num_of_seats: NonNegativeInt + + model_config = ConfigDict( + populate_by_name=True, + extra="allow", + json_schema_extra={ + "examples": [ + { + "num_of_seats": 5, + "custom key": "custom value", + } + ] + }, + ) + + +class PricingUnitWithCostCreate(BaseModel): + pricing_plan_id: PricingPlanId + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + default: bool + specific_info: SpecificInfo + cost_per_unit: Decimal + comment: str + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "unit_name": "My pricing plan", + "unit_extra_info": UnitExtraInfoTier.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "default": True, + "specific_info": {"aws_ec2_instances": ["t3.medium"]}, + "cost_per_unit": 10, + "comment": "This pricing unit was create by Foo", + } + ] + } + ) + + +class PricingUnitCostUpdate(BaseModel): + cost_per_unit: Decimal + comment: str + + +class PricingUnitWithCostUpdate(BaseModel): + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + default: bool + specific_info: SpecificInfo + pricing_unit_cost_update: PricingUnitCostUpdate | None + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "pricing_unit_id": 1, + "unit_name": "My pricing plan", + "unit_extra_info": UnitExtraInfoTier.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "default": True, + "specific_info": {"aws_ec2_instances": ["t3.medium"]}, + "pricing_unit_cost_update": { + "cost_per_unit": 10, + "comment": "This pricing unit was updated by Foo", + }, + }, + { + "pricing_plan_id": 1, + "pricing_unit_id": 1, + "unit_name": "My pricing plan", + "unit_extra_info": UnitExtraInfoTier.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "default": True, + "specific_info": {"aws_ec2_instances": ["t3.medium"]}, + "pricing_unit_cost_update": None, + }, + ] + } + ) + + +class ServicesAggregatedUsagesType(StrAutoEnum): + services = "services" + + +class ServicesAggregatedUsagesTimePeriod(IntEnum): + ONE_DAY = 1 + ONE_WEEK = 7 + ONE_MONTH = 30 diff --git a/packages/models-library/src/models_library/resource_tracker_licensed_items_checkouts.py b/packages/models-library/src/models_library/resource_tracker_licensed_items_checkouts.py new file mode 100644 index 00000000000..cd09440b822 --- /dev/null +++ b/packages/models-library/src/models_library/resource_tracker_licensed_items_checkouts.py @@ -0,0 +1,4 @@ +from typing import TypeAlias +from uuid import UUID + +LicensedItemCheckoutID: TypeAlias = UUID diff --git a/packages/models-library/src/models_library/resource_tracker_licensed_items_purchases.py b/packages/models-library/src/models_library/resource_tracker_licensed_items_purchases.py new file mode 100644 index 00000000000..1ea79606965 --- /dev/null +++ b/packages/models-library/src/models_library/resource_tracker_licensed_items_purchases.py @@ -0,0 +1,35 @@ +from datetime import datetime +from decimal import Decimal +from typing import TypeAlias +from uuid import UUID + +from pydantic import BaseModel, ConfigDict + +from .licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from .products import ProductName +from .resource_tracker import PricingPlanId, PricingUnitCostId, PricingUnitId +from .users import UserID +from .wallets import WalletID + +LicensedItemPurchaseID: TypeAlias = UUID + + +class LicensedItemsPurchasesCreate(BaseModel): + product_name: ProductName + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + wallet_name: str + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + start_at: datetime + expire_at: datetime + num_of_seats: int + purchased_by_user: UserID + user_email: str + purchased_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/packages/models-library/src/models_library/rest_base.py b/packages/models-library/src/models_library/rest_base.py new file mode 100644 index 00000000000..372b5139ce5 --- /dev/null +++ b/packages/models-library/src/models_library/rest_base.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel, ConfigDict + + +class RequestParameters(BaseModel): + """ + Base model for any type of request parameters, + i.e. context, path, query, headers + """ + + def as_params(self, **export_options) -> dict[str, str]: + data = self.model_dump(**export_options) + return {k: f"{v}" for k, v in data.items()} + + +class StrictRequestParameters(RequestParameters): + """Use a base class for context, path and query parameters""" + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/rest_error.py b/packages/models-library/src/models_library/rest_error.py new file mode 100644 index 00000000000..71cc1b877b6 --- /dev/null +++ b/packages/models-library/src/models_library/rest_error.py @@ -0,0 +1,128 @@ +from dataclasses import dataclass +from typing import Annotated + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.generics import Envelope +from pydantic import BaseModel, ConfigDict, Field + +from .basic_types import IDStr, LogLevel + + +class Log(BaseModel): + level: Annotated[LogLevel | None, Field(description="log level")] = LogLevel.INFO + message: str = Field( + ..., + description="log message. If logger is USER, then it MUST be human readable", + ) + logger: str | None = Field( + None, description="name of the logger receiving this message" + ) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "message": "Hi there, Mr user", + "level": "INFO", + "logger": "user-logger", + } + } + ) + + +class ErrorItem(BaseModel): + code: str = Field( + ..., + description="Typically the name of the exception that produced it otherwise some known error code", + ) + message: str = Field(..., description="Error message specific to this item") + resource: str | None = Field( + None, description="API resource affected by this error" + ) + field: str | None = Field(None, description="Specific field within the resource") + + +@dataclass +class LogMessageType: + # NOTE: deprecated! + message: str + level: str = "INFO" + logger: str = "user" + + +@dataclass +class ErrorItemType: + # NOTE: deprecated! + code: str + message: str + resource: str | None + field: str | None + + @classmethod + def from_error(cls, err: BaseException): + return cls( + code=err.__class__.__name__, message=str(err), resource=None, field=None + ) + + +class ErrorGet(BaseModel): + message: Annotated[ + str, + Field( + min_length=5, + description="Message displayed to the user", + ), + ] + support_id: Annotated[ + IDStr | None, + Field(description="ID to track the incident during support", alias="supportId"), + ] = None + status: int + + # NOTE: The fields blow are DEPRECATED. Still here to keep compatibilty with front-end until updated + errors: Annotated[ + list[ErrorItemType], + Field(deprecated=True, default_factory=list, json_schema_extra={"default": []}), + ] = DEFAULT_FACTORY + logs: Annotated[ + list[LogMessageType], + Field(deprecated=True, default_factory=list, json_schema_extra={"default": []}), + ] = DEFAULT_FACTORY + + model_config = ConfigDict( + populate_by_name=True, + extra="ignore", # Used to prune extra fields from internal data + frozen=True, + json_schema_extra={ + "examples": [ + { + "message": "Sorry you do not have sufficient access rights for product", + "status": 401, + }, + { + "message": "Opps this error was unexpected. We are working on that!", + "supportId": "OEC:12346789", + "status": 500, + }, + ] + }, + ) + + +class EnvelopedError(Envelope[None]): + error: ErrorGet + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + {"error": {"message": "display error message here", "status": 401}}, + { + "error": { + "message": "failure", + "supportId": "OEC:123455", + "status": 500, + }, + "data": None, + }, + ] + }, + ) diff --git a/packages/models-library/src/models_library/rest_filters.py b/packages/models-library/src/models_library/rest_filters.py new file mode 100644 index 00000000000..c6edfb730ad --- /dev/null +++ b/packages/models-library/src/models_library/rest_filters.py @@ -0,0 +1,25 @@ +from typing import Annotated, Generic, TypeVar + +from pydantic import BaseModel, BeforeValidator, Field + +from .utils.common_validators import parse_json_pre_validator + + +class Filters(BaseModel): + """ + Encoded as JSON. Each available filter can have its own logic (should be well documented) + Inspired by Docker API https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList. + """ + + +# Custom filter +FilterT = TypeVar("FilterT", bound=Filters) + + +class FiltersQueryParameters(BaseModel, Generic[FilterT]): + filters: Annotated[ + FilterT | None, BeforeValidator(parse_json_pre_validator) + ] = Field( # pylint: disable=unsubscriptable-object + default=None, + description="Custom filter query parameter encoded as JSON", + ) diff --git a/packages/models-library/src/models_library/rest_ordering.py b/packages/models-library/src/models_library/rest_ordering.py new file mode 100644 index 00000000000..eb2a5adf5f4 --- /dev/null +++ b/packages/models-library/src/models_library/rest_ordering.py @@ -0,0 +1,120 @@ +from enum import Enum +from typing import Annotated + +from common_library.json_serialization import json_dumps +from pydantic import BaseModel, BeforeValidator, ConfigDict, Field, field_validator + +from .basic_types import IDStr +from .rest_base import RequestParameters +from .utils.common_validators import parse_json_pre_validator + + +class OrderDirection(str, Enum): + ASC = "asc" + DESC = "desc" + + +class OrderBy(BaseModel): + # Based on https://google.aip.dev/132#ordering + field: IDStr = Field(..., description="field name identifier") + direction: OrderDirection = Field( + default=OrderDirection.ASC, + description=( + f"As [A,B,C,...] if `{OrderDirection.ASC.value}`" + f" or [Z,Y,X, ...] if `{OrderDirection.DESC.value}`" + ), + ) + + +class _BaseOrderQueryParams(RequestParameters): + order_by: OrderBy + + +def create_ordering_query_model_class( + *, + ordering_fields: set[str], + default: OrderBy, + ordering_fields_api_to_column_map: dict[str, str] | None = None, +) -> type[_BaseOrderQueryParams]: + """Factory to create an uniform model used as ordering parameters in a query + + Arguments: + ordering_fields -- A set of valid fields that can be used for ordering. + These should correspond to API field names. + default -- The default ordering configuration to be applied if no explicit + ordering is provided + + Keyword Arguments: + ordering_fields_api_to_column_map -- A mapping of API field names to + database column names. If provided, fields specified in the API + will be automatically translated to their corresponding database + column names for seamless integration with database queries. + """ + _ordering_fields_api_to_column_map = ordering_fields_api_to_column_map or {} + + assert set(_ordering_fields_api_to_column_map.keys()).issubset( # nosec + ordering_fields + ) + + assert default.field in ordering_fields # nosec + + msg_field_options = "|".join(sorted(ordering_fields)) + msg_direction_options = "|".join(sorted(OrderDirection)) + + class _OrderBy(OrderBy): + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ + "examples": [ + { + "field": next(iter(ordering_fields)), + "direction": OrderDirection.DESC.value, + } + ] + }, + # Necessary to run _check_ordering_field_and_map in defaults and assignments + validate_assignment=True, + validate_default=True, + ) + + @field_validator("field", mode="before") + @classmethod + def _check_ordering_field_and_map(cls, v): + if v not in ordering_fields: + msg = ( + f"We do not support ordering by provided field '{v}'. " + f"Fields supported are {msg_field_options}." + ) + raise ValueError(msg) + + # API field name -> DB column_name conversion + return _ordering_fields_api_to_column_map.get(v) or v + + assert "json_schema_extra" in _OrderBy.model_config # nosec + assert isinstance(_OrderBy.model_config["json_schema_extra"], dict) # nosec + assert isinstance( # nosec + _OrderBy.model_config["json_schema_extra"]["examples"], list + ) + order_by_example = _OrderBy.model_config["json_schema_extra"]["examples"][0] + order_by_example_json = json_dumps(order_by_example) + assert _OrderBy.model_validate(order_by_example), "Example is invalid" # nosec + + converted_default = _OrderBy.model_validate( + # NOTE: enforces ordering_fields_api_to_column_map + default.model_dump() + ) + + class _OrderQueryParams(_BaseOrderQueryParams): + order_by: Annotated[ + _OrderBy, BeforeValidator(parse_json_pre_validator) + ] = Field( + default=converted_default, + description=( + f"Order by field (`{msg_field_options}`) and direction (`{msg_direction_options}`). " + f"The default sorting order is `{json_dumps(default)}`." + ), + examples=[order_by_example], + json_schema_extra={"example_json": order_by_example_json}, + ) + + return _OrderQueryParams diff --git a/packages/models-library/src/models_library/rest_pagination.py b/packages/models-library/src/models_library/rest_pagination.py index 597ddaa1777..2158d6ba411 100644 --- a/packages/models-library/src/models_library/rest_pagination.py +++ b/packages/models-library/src/models_library/rest_pagination.py @@ -1,131 +1,165 @@ -from typing import Generic, List, Optional, TypeVar +from typing import Annotated, Final, Generic, TypeAlias, TypeVar from pydantic import ( AnyHttpUrl, BaseModel, - Extra, + BeforeValidator, + ConfigDict, Field, NonNegativeInt, PositiveInt, - validator, + TypeAdapter, + ValidationInfo, + field_validator, ) -from pydantic.generics import GenericModel -DEFAULT_NUMBER_OF_ITEMS_PER_PAGE = 20 +from .rest_base import RequestParameters +from .utils.common_validators import none_to_empty_list_pre_validator + +# Default limit values +# - Using same values across all pagination entrypoints simplifies +# interconnecting paginated calls +MINIMUM_NUMBER_OF_ITEMS_PER_PAGE: Final[int] = 1 +MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE: Final[int] = 50 + +PageLimitInt: TypeAlias = Annotated[ + int, + Field( + ge=MINIMUM_NUMBER_OF_ITEMS_PER_PAGE, + le=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, + description="The maximum number of items to return in a single page.", + ), +] +PageOffsetInt: TypeAlias = Annotated[ + int, + Field( + ge=0, + description="The number of items to skip before starting to collect the items for the current pag", + ), +] +PageTotalCount: TypeAlias = NonNegativeInt + + +DEFAULT_NUMBER_OF_ITEMS_PER_PAGE: Final[PageLimitInt] = TypeAdapter( + PageLimitInt +).validate_python(20) + + +class CursorQueryParameters(RequestParameters): + """Query parameters for Cursor-Based Pagination + + SEE https://uriyyo-fastapi-pagination.netlify.app/learn/pagination/techniques/#cursor-based-pagination + """ + + size: PageLimitInt = Field( + default=DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + description="maximum number of items to return (pagination)", + ) + cursor: Annotated[ + str | None, + Field( + description="unique identifier that represent the position in the dataset" + ), + ] = None + + +class PageQueryParameters(RequestParameters): + """Query parameters for Limit-Offset Pagination + + SEE https://uriyyo-fastapi-pagination.netlify.app/learn/pagination/techniques/#limit-offset-pagination + """ + + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE + offset: PageOffsetInt = 0 class PageMetaInfoLimitOffset(BaseModel): limit: PositiveInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE - total: NonNegativeInt + total: PageTotalCount offset: NonNegativeInt = 0 count: NonNegativeInt - @validator("offset") + @field_validator("offset") @classmethod - def check_offset(cls, v, values): - if v > 0 and v >= values["total"]: - raise ValueError( - f"offset {v} cannot be equal or bigger than total {values['total']}, please check" - ) + def _check_offset(cls, v, info: ValidationInfo): + if v > 0 and v >= info.data["total"]: + msg = f"offset {v} cannot be equal or bigger than total {info.data['total']}, please check" + raise ValueError(msg) return v - @validator("count") + @field_validator("count") @classmethod - def check_count(cls, v, values): - if v > values["limit"]: - raise ValueError( - f"count {v} bigger than limit {values['limit']}, please check" - ) - if v > values["total"]: - raise ValueError( - f"count {v} bigger than expected total {values['total']}, please check" - ) - if "offset" in values and (values["offset"] + v) > values["total"]: - raise ValueError( - f"offset {values['offset']} + count {v} is bigger than allowed total {values['total']}, please check" - ) + def _check_count(cls, v, info: ValidationInfo): + if v > info.data["limit"]: + msg = f"count {v} bigger than limit {info.data['limit']}, please check" + raise ValueError(msg) + if v > info.data["total"]: + msg = f"count {v} bigger than expected total {info.data['total']}, please check" + raise ValueError(msg) + if "offset" in info.data and (info.data["offset"] + v) > info.data["total"]: + msg = f"offset {info.data['offset']} + count {v} is bigger than allowed total {info.data['total']}, please check" + raise ValueError(msg) return v - class Config: - extra = Extra.forbid - - schema_extra = { + model_config = ConfigDict( + extra="forbid", + json_schema_extra={ "examples": [ {"total": 7, "count": 4, "limit": 4, "offset": 0}, ] - } + }, + ) + +RefT = TypeVar("RefT") -class PageLinks(BaseModel): - self: AnyHttpUrl - first: AnyHttpUrl - prev: Optional[AnyHttpUrl] - next: Optional[AnyHttpUrl] - last: AnyHttpUrl - class Config: - extra = Extra.forbid +class PageRefs(BaseModel, Generic[RefT]): + self: RefT + first: RefT + prev: RefT | None + next: RefT | None + last: RefT + + model_config = ConfigDict(extra="forbid") + + +class PageLinks( + PageRefs[ + Annotated[ + str, + BeforeValidator(lambda x: str(TypeAdapter(AnyHttpUrl).validate_python(x))), + ] + ] +): ... ItemT = TypeVar("ItemT") -class Page(GenericModel, Generic[ItemT]): +class Page(BaseModel, Generic[ItemT]): """ Paginated response model of ItemTs """ meta: PageMetaInfoLimitOffset = Field(alias="_meta") links: PageLinks = Field(alias="_links") - data: List[ItemT] + data: list[ItemT] - @validator("data", pre=True) - @classmethod - def convert_none_to_empty_list(cls, v): - if v is None: - v = [] - return v + _none_is_empty = field_validator("data", mode="before")( + none_to_empty_list_pre_validator + ) - @validator("data") + @field_validator("data") @classmethod - def check_data_compatible_with_meta(cls, v, values): - if "meta" not in values: + def _check_data_compatible_with_meta(cls, v, info: ValidationInfo): + if "meta" not in info.data: # if the validation failed in meta this happens - raise ValueError("meta not in values") - if len(v) != values["meta"].count: - raise ValueError( - f"container size [{len(v)}] must be equal to count [{values['meta'].count}]" - ) + msg = "meta not in values" + raise ValueError(msg) + if len(v) != info.data["meta"].count: + msg = f"container size [{len(v)}] must be equal to count [{info.data['meta'].count}]" + raise ValueError(msg) return v - class Config: - extra = Extra.forbid - - schema_extra = { - "examples": [ - # first page Page[str] - { - "_meta": {"total": 7, "count": 4, "limit": 4, "offset": 0}, - "_links": { - "self": "http://osparc.io/v2/listing?offset=0&limit=4", - "first": "http://osparc.io/v2/listing?offset=0&limit=4", - "prev": None, - "next": "http://osparc.io/v2/listing?offset=1&limit=4", - "last": "http://osparc.io/v2/listing?offset=1&limit=4", - }, - "data": ["data 1", "data 2", "data 3", "data 4"], - }, - # second and last page - { - "_meta": {"total": 7, "count": 3, "limit": 4, "offset": 1}, - "_links": { - "self": "http://osparc.io/v2/listing?offset=1&limit=4", - "first": "http://osparc.io/v2/listing?offset=0&limit=4", - "prev": "http://osparc.io/v2/listing?offset=0&limit=4", - "next": None, - "last": "http://osparc.io/v2/listing?offset=1&limit=4", - }, - "data": ["data 5", "data 6", "data 7"], - }, - ] - } + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/rest_pagination_utils.py b/packages/models-library/src/models_library/rest_pagination_utils.py index 9774349c898..1bd952cfd12 100644 --- a/packages/models-library/src/models_library/rest_pagination_utils.py +++ b/packages/models-library/src/models_library/rest_pagination_utils.py @@ -1,5 +1,10 @@ from math import ceil -from typing import Any, Dict, List, Protocol, TypedDict, Union, runtime_checkable +from typing import Any, Protocol, runtime_checkable + +from pydantic import AnyHttpUrl, TypeAdapter +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) from .rest_pagination import PageLinks, PageMetaInfoLimitOffset @@ -26,26 +31,29 @@ def replace_query_params(self, **kwargs: Any) -> "_StarletteURL": ... -_URLType = Union[_YarlURL, _StarletteURL] +_URLType = _YarlURL | _StarletteURL -def _replace_query(url: _URLType, query: Dict[str, Any]): +def _replace_query(url: _URLType, query: dict[str, Any]) -> str: """This helper function ensures query replacement works with both""" + new_url: _URLType | _StarletteURL if isinstance(url, _YarlURL): new_url = url.update_query(query) else: new_url = url.replace_query_params(**query) - return f"{new_url}" + + new_url_str = f"{new_url}" + return f"{TypeAdapter(AnyHttpUrl).validate_python(new_url_str)}" class PageDict(TypedDict): _meta: Any _links: Any - data: List[Any] + data: list[Any] def paginate_data( - chunk: List[Any], + chunk: list[Any], *, request_url: _URLType, total: int, @@ -57,15 +65,19 @@ def paginate_data( Usage: obj: PageDict = paginate_data( ... ) - model = Page[MyModelItem].parse_obj(obj) + model = Page[MyModelItem].model_validate(obj) raises ValidationError """ last_page = ceil(total / limit) - 1 + data = [ + item.model_dump() if hasattr(item, "model_dump") else item for item in chunk + ] + return PageDict( _meta=PageMetaInfoLimitOffset( - total=total, count=len(chunk), limit=limit, offset=offset + total=total, count=len(data), limit=limit, offset=offset ), _links=PageLinks( self=_replace_query(request_url, {"offset": offset, "limit": limit}), @@ -85,5 +97,5 @@ def paginate_data( request_url, {"offset": last_page * limit, "limit": limit} ), ), - data=chunk, + data=data, ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/__init__.py b/packages/models-library/src/models_library/rpc/__init__.py similarity index 100% rename from services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/__init__.py rename to packages/models-library/src/models_library/rpc/__init__.py diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/__init__.py b/packages/models-library/src/models_library/rpc/webserver/__init__.py similarity index 100% rename from services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/__init__.py rename to packages/models-library/src/models_library/rpc/webserver/__init__.py diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/__init__.py b/packages/models-library/src/models_library/rpc/webserver/auth/__init__.py similarity index 100% rename from services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/__init__.py rename to packages/models-library/src/models_library/rpc/webserver/auth/__init__.py diff --git a/packages/models-library/src/models_library/rpc/webserver/auth/api_keys.py b/packages/models-library/src/models_library/rpc/webserver/auth/api_keys.py new file mode 100644 index 00000000000..80d248d9045 --- /dev/null +++ b/packages/models-library/src/models_library/rpc/webserver/auth/api_keys.py @@ -0,0 +1,58 @@ +import datetime as dt +import hashlib +import re +import secrets +import string +from typing import Annotated, Final + +from models_library.basic_types import IDStr +from pydantic import BaseModel, ConfigDict, Field + +_PUNCTUATION_REGEX = re.compile( + pattern="[" + re.escape(string.punctuation.replace("_", "")) + "]" +) + +_KEY_LEN: Final = 10 +_SECRET_LEN: Final = 20 + + +def generate_unique_api_key(name: str, length: int = _KEY_LEN) -> str: + prefix = _PUNCTUATION_REGEX.sub("_", name[:5]) + hashed = hashlib.sha256(name.encode()).hexdigest() + return f"{prefix}_{hashed[:length]}" + + +def generate_api_key_and_secret(name: str): + api_key = generate_unique_api_key(name) + api_secret = secrets.token_hex(_SECRET_LEN) + return api_key, api_secret + + +class ApiKeyCreate(BaseModel): + display_name: Annotated[str, Field(..., min_length=3)] + expiration: dt.timedelta | None = None + + model_config = ConfigDict( + from_attributes=True, + ) + + +class ApiKeyGet(BaseModel): + id: IDStr + display_name: Annotated[str, Field(..., min_length=3)] + api_key: str | None = None + api_secret: str | None = None + + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ + { + "id": "42", + "display_name": "test-api-forever", + "api_key": "key", + "api_secret": "secret", + }, + ] + }, + ) diff --git a/packages/models-library/src/models_library/rpc/webserver/projects.py b/packages/models-library/src/models_library/rpc/webserver/projects.py new file mode 100644 index 00000000000..d1bfff34213 --- /dev/null +++ b/packages/models-library/src/models_library/rpc/webserver/projects.py @@ -0,0 +1,143 @@ +from datetime import datetime +from typing import Annotated, TypeAlias +from uuid import uuid4 + +from pydantic import BaseModel, ConfigDict, Field +from pydantic.config import JsonDict + +from ...projects import NodesDict, ProjectID +from ...projects_nodes import Node +from ...rpc_pagination import PageRpc + + +class MetadataFilterItem(BaseModel): + name: str + pattern: str + + +class ListProjectsMarkedAsJobRpcFilters(BaseModel): + """Filters model for the list_projects_marked_as_jobs RPC. + + NOTE: Filters models are used to validate all possible filters in an API early on, + particularly to ensure compatibility and prevent conflicts between different filters. + """ + + job_parent_resource_name_prefix: str | None = None + + any_custom_metadata: Annotated[ + list[MetadataFilterItem] | None, + Field(description="Searchs for matches of any of the custom metadata fields"), + ] = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "job_parent_resource_name_prefix": "solvers/solver123", + "any_custom_metadata": [ + { + "name": "solver_type", + "pattern": "FEM", + }, + { + "name": "mesh_cells", + "pattern": "1*", + }, + ], + }, + { + "any_custom_metadata": [ + { + "name": "solver_type", + "pattern": "*CFD*", + } + ], + }, + {"job_parent_resource_name_prefix": "solvers/solver123"}, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class ProjectJobRpcGet(BaseModel): + """ + Minimal information about a project that (for now) will fullfill + the needs of the api-server. Specifically, the fields needed in + project to call create_job_from_project + """ + + uuid: Annotated[ + ProjectID, + Field(description="project unique identifier"), + ] + name: Annotated[ + str, + Field(description="project display name"), + ] + description: str + + workbench: NodesDict + + # timestamps + created_at: datetime + modified_at: datetime + + # Specific to jobs + job_parent_resource_name: str + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + nodes_examples = Node.model_json_schema()["examples"] + schema.update( + { + "examples": [ + { + "uuid": "12345678-1234-5678-1234-123456789012", + "name": "A solver job", + "description": "A description of a solver job with a single node", + "workbench": {f"{uuid4()}": n for n in nodes_examples[2:3]}, + "created_at": "2023-01-01T00:00:00Z", + "modified_at": "2023-01-01T00:00:00Z", + "job_parent_resource_name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2", + }, + { + "uuid": "00000000-1234-5678-1234-123456789012", + "name": "A study job", + "description": "A description of a study job with many node", + "workbench": {f"{uuid4()}": n for n in nodes_examples}, + "created_at": "2023-02-01T00:00:00Z", + "modified_at": "2023-02-01T00:00:00Z", + "job_parent_resource_name": "studies/96642f2a-a72c-11ef-8776-02420a00087d", + }, + { + "uuid": "00000000-0000-5678-1234-123456789012", + "name": "A program job", + "description": "A program of a solver job with a single node", + "workbench": {f"{uuid4()}": n for n in nodes_examples[2:3]}, + "created_at": "2023-03-01T00:00:00Z", + "modified_at": "2023-03-01T00:00:00Z", + "job_parent_resource_name": "program/simcore%2Fservices%2Fdynamic%2Fjupyter/releases/5.0.2", + }, + ] + } + ) + + model_config = ConfigDict( + extra="forbid", + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) + + +PageRpcProjectJobRpcGet: TypeAlias = PageRpc[ + # WARNING: keep this definition in models_library and not in the RPC interface + # otherwise the metaclass PageRpc[*] will create *different* classes in server/client side + # and will fail to serialize/deserialize these parameters when transmitted/received + ProjectJobRpcGet +] diff --git a/packages/models-library/src/models_library/rpc_filters.py b/packages/models-library/src/models_library/rpc_filters.py new file mode 100644 index 00000000000..ffc7c77f1c6 --- /dev/null +++ b/packages/models-library/src/models_library/rpc_filters.py @@ -0,0 +1,5 @@ +from .rest_filters import Filters + +__all__: tuple[str, ...] = ("Filters",) + +# nopycln:file diff --git a/packages/models-library/src/models_library/rpc_pagination.py b/packages/models-library/src/models_library/rpc_pagination.py new file mode 100644 index 00000000000..f1aecabab81 --- /dev/null +++ b/packages/models-library/src/models_library/rpc_pagination.py @@ -0,0 +1,77 @@ +# mypy: disable-error-code=truthy-function +from math import ceil +from typing import Any, Generic + +from pydantic import ConfigDict, Field + +from .rest_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, + ItemT, + Page, + PageLimitInt, + PageMetaInfoLimitOffset, + PageQueryParameters, + PageRefs, +) + +assert DEFAULT_NUMBER_OF_ITEMS_PER_PAGE # nosec +assert MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE # nosec +assert PageLimitInt # nosec + +__all__: tuple[str, ...] = ( + "DEFAULT_NUMBER_OF_ITEMS_PER_PAGE", + "MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE", + "PageLimitInt", + "PageMetaInfoLimitOffset", +) + + +class PageRefsParams(PageRefs[PageQueryParameters]): + @classmethod + def create(cls, total: int, limit: int, offset: int) -> "PageRefsParams": + last_page = ceil(total / limit) - 1 if total > 0 else 0 + return cls.model_validate( + { + "self": {"offset": offset, "limit": limit}, + "first": {"offset": 0, "limit": limit}, + "prev": ( + {"offset": max(offset - limit, 0), "limit": limit} + if offset > 0 and total > 0 + else None + ), + "next": ( + { + "offset": min(offset + limit, last_page * limit), + "limit": limit, + } + if offset < (last_page * limit) and total > 0 + else None + ), + "last": {"offset": last_page * limit, "limit": limit}, + } + ) + + +class PageRpc(Page[ItemT], Generic[ItemT]): + + links: PageRefsParams = Field(alias="_links") # type: ignore + + @classmethod + def create( + cls, + chunk: list[Any], + *, + total: int, + limit: int, + offset: int, + ) -> "PageRpc": + return cls( + _meta=PageMetaInfoLimitOffset( + total=total, count=len(chunk), limit=limit, offset=offset + ), + _links=PageRefsParams.create(total=total, limit=limit, offset=offset), + data=chunk, + ) + + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/service_settings_labels.py b/packages/models-library/src/models_library/service_settings_labels.py index be1ffd21223..b3e1956caba 100644 --- a/packages/models-library/src/models_library/service_settings_labels.py +++ b/packages/models-library/src/models_library/service_settings_labels.py @@ -1,33 +1,37 @@ # pylint: disable=unsubscriptable-object -import json from enum import Enum from functools import cached_property from pathlib import Path -from typing import Any, Final, Iterator, Literal, Optional, Union +from typing import Annotated, Any, Literal, TypeAlias +from common_library.basic_types import DEFAULT_FACTORY +from common_library.json_serialization import json_dumps from pydantic import ( BaseModel, - Extra, + ByteSize, + ConfigDict, Field, Json, PrivateAttr, - root_validator, - validator, + TypeAdapter, + ValidationError, + ValidationInfo, + field_validator, + model_validator, ) +from pydantic.config import JsonDict -from .basic_types import PortInt +from .callbacks_mapping import CallbacksMapping from .generics import ListModel +from .service_settings_nat_rule import NATRule from .services_resources import DEFAULT_SINGLE_SERVICE_NAME -# Cloudflare DNS server address -DEFAULT_DNS_SERVER_ADDRESS: Final[str] = "1.1.1.1" # NOSONAR -DEFAULT_DNS_SERVER_PORT: Final[PortInt] = 53 - - -class _BaseConfig: - extra = Extra.forbid - keep_untouched = (cached_property,) +_BaseConfig = ConfigDict( + extra="forbid", + arbitrary_types_allowed=True, + ignored_types=(cached_property,), +) class ContainerSpec(BaseModel): @@ -35,359 +39,561 @@ class ContainerSpec(BaseModel): request body: TaskTemplate -> ContainerSpec """ - command: list[str] = Field( - alias="Command", - description="Used to override the container's command", - # NOTE: currently constraint to our use cases. Might mitigate some security issues. - min_items=1, - max_items=2, - ) + command: Annotated[ + list[str], + Field( + alias="Command", + description="Used to override the container's command", + # NOTE: currently constraint to our use cases. Might mitigate some security issues. + min_length=1, + max_length=2, + ), + ] + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + {"Command": ["executable"]}, + {"Command": ["executable", "subcommand"]}, + {"Command": ["ofs", "linear-regression"]}, + ] + } + ) - class Config(_BaseConfig): - schema_extra = { - "examples": [ - {"Command": ["executable"]}, - {"Command": ["executable", "subcommand"]}, - {"Command": ["ofs", "linear-regression"]}, - ] - } + model_config = _BaseConfig | ConfigDict(json_schema_extra=_update_json_schema_extra) class SimcoreServiceSettingLabelEntry(BaseModel): - """These values are used to build the request body of https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate + """Content of "simcore.service.settings" label + + These values are used to build the request body of https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate Specifically the section under ``TaskTemplate`` """ _destination_containers: list[str] = PrivateAttr() - name: str = Field(..., description="The name of the service setting") - setting_type: Literal[ - "string", - "int", - "integer", - "number", - "object", - "ContainerSpec", - "Resources", - ] = Field( - ..., - description="The type of the service setting (follows Docker REST API naming scheme)", - alias="type", - ) - value: Any = Field( - ..., - description="The value of the service setting (shall follow Docker REST API scheme for services", - ) + name: Annotated[str, Field(description="The name of the service setting")] + + setting_type: Annotated[ + Literal[ + "string", + "int", + "integer", + "number", + "object", + "ContainerSpec", + "Resources", + ], + Field( + description="The type of the service setting (follows Docker REST API naming scheme)", + alias="type", + ), + ] + + value: Annotated[ + Any, + Field( + description="The value of the service setting (shall follow Docker REST API scheme for services", + ), + ] + + def set_destination_containers(self, value: list[str]) -> None: + # NOTE: private attributes cannot be transformed into properties + # since it conflicts with pydantic's internals which treats them + # as fields + self._destination_containers = value - @validator("setting_type", pre=True) + def get_destination_containers(self) -> list[str]: + # NOTE: private attributes cannot be transformed into properties + # since it conflicts with pydantic's internals which treats them + # as fields + return self._destination_containers + + @field_validator("setting_type", mode="before") @classmethod - def ensure_backwards_compatible_setting_type(cls, v): + def _ensure_backwards_compatible_setting_type(cls, v): if v == "resources": # renamed in the latest version as return "Resources" return v - class Config(_BaseConfig): - schema_extra = { - "examples": [ - # constraints - { - "name": "constraints", - "type": "string", - "value": ["node.platform.os == linux"], - }, - # SEE service_settings_labels.py::ContainerSpec - { - "name": "ContainerSpec", - "type": "ContainerSpec", - "value": {"Command": ["run"]}, - }, - # SEE services_resources.py::ResourceValue - { - "name": "Resources", - "type": "Resources", - "value": { - "Limits": {"NanoCPUs": 4000000000, "MemoryBytes": 17179869184}, - "Reservations": { - "NanoCPUs": 100000000, - "MemoryBytes": 536870912, - "GenericResources": [ - {"DiscreteResourceSpec": {"Kind": "VRAM", "Value": 1}} - ], + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # constraints + { + "name": "constraints", + "type": "string", + "value": ["node.platform.os == linux"], + }, + # SEE service_settings_labels.py::ContainerSpec + { + "name": "ContainerSpec", + "type": "ContainerSpec", + "value": {"Command": ["run"]}, + }, + # SEE services_resources.py::ResourceValue + { + "name": "Resources", + "type": "Resources", + "value": { + "Limits": { + "NanoCPUs": 4000000000, + "MemoryBytes": 17179869184, + }, + "Reservations": { + "NanoCPUs": 100000000, + "MemoryBytes": 536870912, + "GenericResources": [ + { + "DiscreteResourceSpec": { + "Kind": "VRAM", + "Value": 1, + } + } + ], + }, }, }, - }, - # mounts - { - "name": "mount", - "type": "object", - "value": [ - { - "ReadOnly": True, - "Source": "/tmp/.X11-unix", # nosec - "Target": "/tmp/.X11-unix", # nosec - "Type": "bind", - } - ], - }, - # environments - {"name": "env", "type": "string", "value": ["DISPLAY=:0"]}, - # SEE 'simcore.service.settings' label annotations for simcore/services/dynamic/jupyter-octave-python-math:1.6.5 - {"name": "ports", "type": "int", "value": 8888}, - { - "name": "resources", - "type": "resources", - "value": { - "Limits": {"NanoCPUs": 4000000000, "MemoryBytes": 8589934592} + # mounts + { + "name": "mount", + "type": "object", + "value": [ + { + "ReadOnly": True, + "Source": "/tmp/.X11-unix", # nosec # noqa: S108 + "Target": "/tmp/.X11-unix", # nosec # noqa: S108 + "Type": "bind", + } + ], }, - }, - ] - } + # environments + {"name": "env", "type": "string", "value": ["DISPLAY=:0"]}, + # SEE 'simcore.service.settings' label annotations for simcore/services/dynamic/jupyter-octave-python-math:1.6.5 + {"name": "ports", "type": "int", "value": 8888}, + { + "name": "resources", + "type": "resources", + "value": { + "Limits": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8589934592, + } + }, + }, + ] + } + ) + + model_config = _BaseConfig | ConfigDict( + populate_by_name=True, json_schema_extra=_update_json_schema_extra + ) SimcoreServiceSettingsLabel = ListModel[SimcoreServiceSettingLabelEntry] -class PathMappingsLabel(BaseModel): - inputs_path: Path = Field( - ..., description="folder path where the service expects all the inputs" - ) - outputs_path: Path = Field( - ..., - description="folder path where the service is expected to provide all its outputs", - ) - state_paths: list[Path] = Field( - [], - description="optional list of paths which contents need to be persisted", - ) +class LegacyState(BaseModel): + old_state_path: Path + new_state_path: Path - state_exclude: Optional[set[str]] = Field( - None, - description="optional list unix shell rules used to exclude files from the state", - ) - class Config(_BaseConfig): - schema_extra = { - "example": { - "outputs_path": "/tmp/outputs", # nosec - "inputs_path": "/tmp/inputs", # nosec - "state_paths": ["/tmp/save_1", "/tmp_save_2"], # nosec - "state_exclude": ["/tmp/strip_me/*", "*.py"], # nosec - } - } +class PathMappingsLabel(BaseModel): + """Content of "simcore.service.paths-mapping" label""" + inputs_path: Annotated[ + Path, Field(description="folder path where the service expects all the inputs") + ] -ComposeSpecLabel = dict[str, Any] + outputs_path: Annotated[ + Path, + Field( + description="folder path where the service is expected to provide all its outputs", + ), + ] + state_paths: Annotated[ + list[Path], + Field( + description="optional list of paths which contents need to be persisted", + default_factory=list, + ), + ] = DEFAULT_FACTORY -class RestartPolicy(str, Enum): - NO_RESTART = "no-restart" - ON_INPUTS_DOWNLOADED = "on-inputs-downloaded" + state_exclude: Annotated[ + set[str] | None, + Field( + description="optional list unix shell rules used to exclude files from the state", + ), + ] = None + + volume_size_limits: Annotated[ + dict[str, str] | None, + Field( + description=( + "Apply volume size limits to entries in: `inputs_path`, `outputs_path` " + "and `state_paths`. Limits must be parsable by Pydantic's ByteSize." + ), + ), + ] = None + + legacy_state: Annotated[ + LegacyState | None, + Field( + description=( + "if present, the service needs to first try to download the legacy state" + "coming from a different path." + ), + ), + ] = None + @field_validator("legacy_state") + @classmethod + def _validate_legacy_state( + cls, v: LegacyState | None, info: ValidationInfo + ) -> LegacyState | None: + if v is None: + return v -class _PortRange(BaseModel): - """`lower` and `upper` are included""" + state_paths: list[Path] = info.data.get("state_paths", []) + if v.new_state_path not in state_paths: + msg = f"legacy_state={v} not found in state_paths={state_paths}" + raise ValueError(msg) - lower: PortInt - upper: PortInt + return v - @validator("upper") + @field_validator("volume_size_limits") @classmethod - def lower_less_than_upper(cls, v, values) -> PortInt: - upper = v - lower: Optional[PortInt] = values.get("lower") - if lower is None or lower >= upper: - raise ValueError(f"Condition not satisfied: {lower=} < {upper=}") - return v + def _validate_volume_limits(cls, v, info: ValidationInfo) -> str | None: + if v is None: + return v + + for path_str, size_str in v.items(): + # checks that format is correct + try: + TypeAdapter(ByteSize).validate_python(size_str) + except ValidationError as e: + msg = f"Provided size='{size_str}' contains invalid charactes: {e!s}" + raise ValueError(msg) from e + + inputs_path: Path | None = info.data.get("inputs_path") + outputs_path: Path | None = info.data.get("outputs_path") + state_paths: list[Path] | None = info.data.get("state_paths") + path = Path(path_str) + if not ( + path in (inputs_path, outputs_path) + or (state_paths is not None and path in state_paths) + ): + msg = f"path={path!r} not found in inputs_path={inputs_path!r}, outputs_path={outputs_path!r}, state_paths={state_paths!r}" + raise ValueError(msg) + output: str | None = v + return output + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "outputs_path": "/tmp/outputs", # noqa: S108 nosec + "inputs_path": "/tmp/inputs", # noqa: S108 nosec + "state_paths": [ + "/tmp/save_1", # noqa: S108 nosec + "/tmp_save_2", # noqa: S108 nosec + ], + "state_exclude": ["/tmp/strip_me/*"], # noqa: S108 nosec + }, + { + "outputs_path": "/t_out", + "inputs_path": "/t_inp", + "state_paths": [ + "/s", + "/s0", + "/s1", + "/s2", + "/s3", + "/i_have_no_limit", + ], + "volume_size_limits": { + "/s": "1", + "/s0": "1m", + "/s1": "1kib", + "/s2": "1TIB", + "/s3": "1G", + "/t_out": "12", + "/t_inp": "1EIB", + }, + }, + { + "outputs_path": "/tmp/outputs", # noqa: S108 nosec + "inputs_path": "/tmp/inputs", # noqa: S108 nosec + "state_paths": [ + "/tmp/save_1", # noqa: S108 nosec + "/tmp_save_2", # noqa: S108 nosec + ], + "state_exclude": ["/tmp/strip_me/*"], # noqa: S108 nosec + "legacy_state": { + "old_state_path": "/tmp/save_1_legacy", # noqa: S108 nosec + "new_state_path": "/tmp/save_1", # noqa: S108 nosec + }, + }, + ] + } + ) + model_config = _BaseConfig | ConfigDict(json_schema_extra=_update_json_schema_extra) -class DNSResolver(BaseModel): - address: str = Field( - ..., description="this is not an url address is derived from IP address" - ) - port: PortInt - - class Config(_BaseConfig): - extra = Extra.allow - schema_extra = { - "examples": [ - {"address": "1.1.1.1", "port": 53}, # NOSONAR - {"address": "ns1.example.com", "port": 53}, - ] - } +ComposeSpecLabelDict: TypeAlias = dict[str, Any] -class NATRule(BaseModel): - hostname: str - tcp_ports: list[Union[_PortRange, PortInt]] - dns_resolver: DNSResolver = Field( - default_factory=lambda: DNSResolver( - address=DEFAULT_DNS_SERVER_ADDRESS, port=DEFAULT_DNS_SERVER_PORT - ), - description="specify a DNS resolver address and port", - ) - def iter_tcp_ports(self) -> Iterator[PortInt]: - for port in self.tcp_ports: - if type(port) == _PortRange: - yield from range(port.lower, port.upper + 1) - else: - yield port +class RestartPolicy(str, Enum): + """Content of "simcore.service.restart-policy" label""" + + NO_RESTART = "no-restart" + ON_INPUTS_DOWNLOADED = "on-inputs-downloaded" class DynamicSidecarServiceLabels(BaseModel): - paths_mapping: Optional[Json[PathMappingsLabel]] = Field( - None, - alias="simcore.service.paths-mapping", - description=( - "json encoded, determines how the folders are mapped in " - "the service. Required by dynamic-sidecar." + """All "simcore.service.*" labels including keys""" + + paths_mapping: Annotated[ + Json[PathMappingsLabel] | None, + Field( + alias="simcore.service.paths-mapping", + description=( + "json encoded, determines how the folders are mapped in " + "the service. Required by dynamic-sidecar." + ), ), - ) - - compose_spec: Optional[Json[ComposeSpecLabel]] = Field( - None, - alias="simcore.service.compose-spec", - description=( - "json encoded docker-compose specifications. see " - "https://docs.docker.com/compose/compose-file/, " - "only used by dynamic-sidecar." + ] = None + + compose_spec: Annotated[ + Json[ComposeSpecLabelDict | None] | None, + Field( + alias="simcore.service.compose-spec", + description=( + "json encoded docker-compose specifications. see " + "https://docs.docker.com/compose/compose-file/, " + "only used by dynamic-sidecar." + ), ), - ) - container_http_entry: Optional[str] = Field( - None, - alias="simcore.service.container-http-entrypoint", - description=( - "When a docker-compose specifications is provided, " - "the container where the traffic must flow has to be " - "specified. Required by dynamic-sidecar when " - "compose_spec is set." + ] = None + + container_http_entry: Annotated[ + str | None, + Field( + alias="simcore.service.container-http-entrypoint", + description=( + "When a docker-compose specifications is provided, " + "the container where the traffic must flow has to be " + "specified. Required by dynamic-sidecar when " + "compose_spec is set." + ), + validate_default=True, ), - ) - - restart_policy: RestartPolicy = Field( - RestartPolicy.NO_RESTART, - alias="simcore.service.restart-policy", - description=( - "the dynamic-sidecar can restart all running containers " - "on certain events. Supported events:\n" - "- `no-restart` default\n" - "- `on-inputs-downloaded` after inputs are downloaded\n" + ] = None + + user_preferences_path: Annotated[ + Path | None, + Field( + alias="simcore.service.user-preferences-path", + description=( + "path where the user user preferences folder " + "will be mounted in the user services" + ), ), - ) + ] = None + + restart_policy: Annotated[ + RestartPolicy, + Field( + alias="simcore.service.restart-policy", + description=( + "the dynamic-sidecar can restart all running containers " + "on certain events. Supported events:\n" + "- `no-restart` default\n" + "- `on-inputs-downloaded` after inputs are downloaded\n" + ), + ), + ] = RestartPolicy.NO_RESTART - containers_allowed_outgoing_permit_list: Optional[ - Json[dict[str, list[NATRule]]] - ] = Field( - None, - alias="simcore.service.containers-allowed-outgoing-permit-list", - description="allow internet access to certain domain names and ports per container", - ) + containers_allowed_outgoing_permit_list: Annotated[ + None | (Json[dict[str, list[NATRule]]]), + Field( + alias="simcore.service.containers-allowed-outgoing-permit-list", + description="allow internet access to certain domain names and ports per container", + ), + ] = None - containers_allowed_outgoing_internet: Optional[Json[set[str]]] = Field( - None, - alias="simcore.service.containers-allowed-outgoing-internet", - description="allow complete internet access to containers in here", - ) + containers_allowed_outgoing_internet: Annotated[ + Json[set[str]] | None, + Field( + alias="simcore.service.containers-allowed-outgoing-internet", + description="allow complete internet access to containers in here", + ), + ] = None + + callbacks_mapping: Annotated[ + Json[CallbacksMapping] | None, + Field( + alias="simcore.service.callbacks-mapping", + description="exposes callbacks from user services to the sidecar", + default_factory=CallbacksMapping, + ), + ] = DEFAULT_FACTORY @cached_property def needs_dynamic_sidecar(self) -> bool: """if paths mapping is present the service needs to be ran via dynamic-sidecar""" return self.paths_mapping is not None - @validator("container_http_entry", always=True) + @field_validator("container_http_entry") @classmethod - def compose_spec_requires_container_http_entry(cls, v, values) -> Optional[str]: + def _compose_spec_requires_container_http_entry( + cls, v, info: ValidationInfo + ) -> str | None: v = None if v == "" else v - if v is None and values.get("compose_spec") is not None: - raise ValueError( - "Field `container_http_entry` must be defined but is missing" - ) - if v is not None and values.get("compose_spec") is None: - raise ValueError( - "`container_http_entry` not allowed if `compose_spec` is missing" - ) - return v - - @validator("containers_allowed_outgoing_permit_list") + if v is None and info.data.get("compose_spec") is not None: + msg = "Field `container_http_entry` must be defined but is missing" + raise ValueError(msg) + if v is not None and info.data.get("compose_spec") is None: + msg = "`container_http_entry` not allowed if `compose_spec` is missing" + raise ValueError(msg) + return f"{v}" if v else v + + @field_validator("containers_allowed_outgoing_permit_list") @classmethod - def _containers_allowed_outgoing_permit_list_in_compose_spec(cls, v, values): + def _containers_allowed_outgoing_permit_list_in_compose_spec( + cls, v, info: ValidationInfo + ): if v is None: return v - compose_spec: Optional[dict] = values.get("compose_spec") + compose_spec: dict | None = info.data.get("compose_spec") if compose_spec is None: keys = set(v.keys()) if len(keys) != 1 or DEFAULT_SINGLE_SERVICE_NAME not in keys: - raise ValueError( - f"Expected only one entry '{DEFAULT_SINGLE_SERVICE_NAME}' not '{keys.pop()}'" - ) + err_msg = f"Expected only one entry '{DEFAULT_SINGLE_SERVICE_NAME}' not '{keys.pop()}'" + raise ValueError(err_msg) else: containers_in_compose_spec = set(compose_spec["services"].keys()) - for container in v.keys(): + for container in v: if container not in containers_in_compose_spec: - raise ValueError( - f"Trying to permit list {container=} which was not found in {compose_spec=}" - ) + err_msg = f"Trying to permit list {container=} which was not found in {compose_spec=}" + raise ValueError(err_msg) return v - @validator("containers_allowed_outgoing_internet") + @field_validator("containers_allowed_outgoing_internet") @classmethod - def _containers_allowed_outgoing_internet_in_compose_spec(cls, v, values): + def _containers_allowed_outgoing_internet_in_compose_spec( + cls, v, info: ValidationInfo + ): if v is None: - return v + return None - compose_spec: Optional[dict] = values.get("compose_spec") + compose_spec: dict | None = info.data.get("compose_spec") if compose_spec is None: if {DEFAULT_SINGLE_SERVICE_NAME} != v: - raise ValueError( + err_msg = ( f"Expected only 1 entry '{DEFAULT_SINGLE_SERVICE_NAME}' not '{v}'" ) + raise ValueError(err_msg) else: containers_in_compose_spec = set(compose_spec["services"].keys()) for container in v: if container not in containers_in_compose_spec: - raise ValueError(f"{container=} not found in {compose_spec=}") + err_msg = f"{container=} not found in {compose_spec=}" + raise ValueError(err_msg) return v - @root_validator + @field_validator("callbacks_mapping") @classmethod - def not_allowed_in_both_specs(cls, values): + def _ensure_callbacks_mapping_container_names_defined_in_compose_spec( + cls, v: CallbacksMapping, info: ValidationInfo + ): + if v is None: + return {} + + defined_services: set[str] = {x.service for x in v.before_shutdown} + if v.metrics: + defined_services.add(v.metrics.service) + + if len(defined_services) == 0: + return v + + compose_spec: dict | None = info.data.get("compose_spec") + if compose_spec is None: + if {DEFAULT_SINGLE_SERVICE_NAME} != defined_services: + err_msg = f"Expected only 1 entry '{DEFAULT_SINGLE_SERVICE_NAME}' not '{defined_services}'" + raise ValueError(err_msg) + else: + containers_in_compose_spec = set(compose_spec["services"].keys()) + for service_name in defined_services: + if service_name not in containers_in_compose_spec: + err_msg = f"{service_name=} not found in {compose_spec=}" + raise ValueError(err_msg) + return v + + @field_validator("user_preferences_path", mode="before") + @classmethod + def _deserialize_from_json(cls, v): + return f"{v}".removeprefix('"').removesuffix('"') if v else None + + @field_validator("user_preferences_path") + @classmethod + def _user_preferences_path_no_included_in_other_volumes( + cls, v: CallbacksMapping, info: ValidationInfo + ): + paths_mapping: PathMappingsLabel | None = info.data.get("paths_mapping", None) + if paths_mapping is None: + return v + + for test_path in [ + paths_mapping.inputs_path, + paths_mapping.outputs_path, + *paths_mapping.state_paths, + ]: + if f"{test_path}".startswith(f"{v}"): + msg = f"user_preferences_path={v} cannot be a subpath of {test_path}" + raise ValueError(msg) + return v + + @model_validator(mode="after") + def _not_allowed_in_both_specs(self): match_keys = { "containers_allowed_outgoing_internet", "containers_allowed_outgoing_permit_list", } - if match_keys & set(values.keys()) != match_keys: - raise ValueError( - f"Expected the following keys {match_keys} to be present {values=}" - ) + if match_keys & set(self.model_fields) != match_keys: + err_msg = f"Expected the following keys {match_keys} to be present {self.model_fields=}" + raise ValueError(err_msg) - containers_allowed_outgoing_internet = values[ - "containers_allowed_outgoing_internet" - ] - containers_allowed_outgoing_permit_list = values[ - "containers_allowed_outgoing_permit_list" - ] if ( - containers_allowed_outgoing_internet is None - or containers_allowed_outgoing_permit_list is None + self.containers_allowed_outgoing_internet is None + or self.containers_allowed_outgoing_permit_list is None ): - return values + return self - common_containers = set(containers_allowed_outgoing_internet) & set( - containers_allowed_outgoing_permit_list.keys() + common_containers = set(self.containers_allowed_outgoing_internet) & set( + self.containers_allowed_outgoing_permit_list.keys() ) if len(common_containers) > 0: - raise ValueError( + err_msg = ( f"Not allowed {common_containers=} detected between " "`containers-allowed-outgoing-permit-list` and " "`containers-allowed-outgoing-internet`." ) + raise ValueError(err_msg) - return values + return self - class Config(_BaseConfig): - pass + model_config = _BaseConfig class SimcoreServiceLabels(DynamicSidecarServiceLabels): @@ -406,68 +612,101 @@ class SimcoreServiceLabels(DynamicSidecarServiceLabels): spec will be generated before starting the service. """ - settings: Json[SimcoreServiceSettingsLabel] = Field( - ..., - alias="simcore.service.settings", - description=( - "Json encoded. Contains setting like environment variables and " - "resource constraints which are required by the service. " - "Should be compatible with Docker REST API." + settings: Annotated[ + Json[SimcoreServiceSettingsLabel], + Field( + alias="simcore.service.settings", + description=( + "Json encoded. Contains setting like environment variables and " + "resource constraints which are required by the service. " + "Should be compatible with Docker REST API." + ), + default_factory=lambda: SimcoreServiceSettingsLabel.model_validate([]), ), - ) - - class Config(_BaseConfig): - extra = Extra.allow - schema_extra = { - "examples": [ - # WARNING: do not change order. Used in tests! - # legacy service - { - "simcore.service.settings": json.dumps( - SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] - ) - }, - # dynamic-service - { - "simcore.service.settings": json.dumps( - SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] - ), - "simcore.service.paths-mapping": json.dumps( - PathMappingsLabel.Config.schema_extra["example"] - ), - "simcore.service.restart-policy": RestartPolicy.NO_RESTART.value, - }, - # dynamic-service with compose spec - { - "simcore.service.settings": json.dumps( - SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] - ), - "simcore.service.paths-mapping": json.dumps( - PathMappingsLabel.Config.schema_extra["example"] - ), - "simcore.service.compose-spec": json.dumps( - { - "version": "2.3", - "services": { - "rt-web": { - "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life:${SERVICE_VERSION}", - "init": True, - "depends_on": ["s4l-core"], - }, - "s4l-core": { - "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core:${SERVICE_VERSION}", - "runtime": "nvidia", - "init": True, - "environment": ["DISPLAY=${DISPLAY}"], - "volumes": [ - "/tmp/.X11-unix:/tmp/.X11-unix" # nosec - ], + ] = DEFAULT_FACTORY + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # WARNING: do not change order. Used in tests! + # legacy service + { + "simcore.service.settings": json_dumps( + SimcoreServiceSettingLabelEntry.model_json_schema()[ + "examples" + ] + ) + }, + # dynamic-service + { + "simcore.service.settings": json_dumps( + SimcoreServiceSettingLabelEntry.model_json_schema()[ + "examples" + ] + ), + "simcore.service.paths-mapping": json_dumps( + PathMappingsLabel.model_json_schema()["examples"][0] + ), + "simcore.service.restart-policy": RestartPolicy.NO_RESTART.value, + "simcore.service.callbacks-mapping": json_dumps( + { + "metrics": { + "service": DEFAULT_SINGLE_SERVICE_NAME, + "command": "ls", + "timeout": 1, + } + } + ), + "simcore.service.user-preferences-path": json_dumps( + "/tmp/path_to_preferences" # noqa: S108 + ), + }, + # dynamic-service with compose spec + { + "simcore.service.settings": json_dumps( + SimcoreServiceSettingLabelEntry.model_json_schema()[ + "examples" + ] + ), + "simcore.service.paths-mapping": json_dumps( + PathMappingsLabel.model_json_schema()["examples"][0], + ), + "simcore.service.compose-spec": json_dumps( + { + "version": "2.3", + "services": { + "rt-web": { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life:${SERVICE_VERSION}", + "init": True, + "depends_on": ["s4l-core"], + "storage_opt": {"size": "10M"}, + }, + "s4l-core": { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core:${SERVICE_VERSION}", + "runtime": "nvidia", + "storage_opt": {"size": "5G"}, + "init": True, + "environment": ["DISPLAY=${DISPLAY}"], + "volumes": [ + "/tmp/.X11-unix:/tmp/.X11-unix" # nosec # noqa: S108 + ], + }, }, - }, - } - ), - "simcore.service.container-http-entrypoint": "rt-web", - "simcore.service.restart-policy": RestartPolicy.ON_INPUTS_DOWNLOADED.value, - }, - ] - } + } + ), + "simcore.service.container-http-entrypoint": "rt-web", + "simcore.service.restart-policy": RestartPolicy.ON_INPUTS_DOWNLOADED.value, + "simcore.service.callbacks-mapping": json_dumps( + CallbacksMapping.model_json_schema()["examples"][3] + ), + }, + ] + }, + ) + + model_config = _BaseConfig | ConfigDict( + extra="allow", + json_schema_extra=_update_json_schema_extra, + ) diff --git a/packages/models-library/src/models_library/service_settings_nat_rule.py b/packages/models-library/src/models_library/service_settings_nat_rule.py new file mode 100644 index 00000000000..1f50b62f503 --- /dev/null +++ b/packages/models-library/src/models_library/service_settings_nat_rule.py @@ -0,0 +1,84 @@ +from collections.abc import Generator +from typing import Final + +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, ValidationInfo, field_validator + +from .basic_types import PortInt +from .osparc_variable_identifier import OsparcVariableIdentifier, raise_if_unresolved + +# Cloudflare DNS server address +DEFAULT_DNS_SERVER_ADDRESS: Final[str] = "1.1.1.1" # NOSONAR +DEFAULT_DNS_SERVER_PORT: Final[PortInt] = TypeAdapter(PortInt).validate_python(53) + + +class _PortRange(BaseModel): + """`lower` and `upper` are included""" + + lower: PortInt | OsparcVariableIdentifier + upper: PortInt | OsparcVariableIdentifier + + @field_validator("upper") + @classmethod + def lower_less_than_upper(cls, v, info: ValidationInfo) -> PortInt: + if isinstance(v, OsparcVariableIdentifier): + return v # type: ignore # bypass validation if unresolved + + upper = v + lower: PortInt | OsparcVariableIdentifier | None = info.data.get("lower") + + if lower and isinstance(lower, OsparcVariableIdentifier): + return v # type: ignore # bypass validation if unresolved + + if lower is None or lower >= upper: + msg = f"Condition not satisfied: lower={lower!r} < upper={upper!r}" + raise ValueError(msg) + return PortInt(v) + + model_config = ConfigDict(arbitrary_types_allowed=True, validate_assignment=True) + + +class DNSResolver(BaseModel): + address: OsparcVariableIdentifier | str = Field( + ..., description="this is not an url address is derived from IP address" + ) + port: PortInt | OsparcVariableIdentifier + + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + extra="allow", + json_schema_extra={ + "examples": [ + {"address": "1.1.1.1", "port": 53}, # NOSONAR + {"address": "ns1.example.com", "port": 53}, + ] + }, + ) + + +class NATRule(BaseModel): + """Content of "simcore.service.containers-allowed-outgoing-permit-list" label""" + + hostname: OsparcVariableIdentifier | str + tcp_ports: list[PortInt | OsparcVariableIdentifier | _PortRange] + dns_resolver: DNSResolver = Field( + default_factory=lambda: DNSResolver( + address=DEFAULT_DNS_SERVER_ADDRESS, port=DEFAULT_DNS_SERVER_PORT + ), + description="specify a DNS resolver address and port", + ) + + model_config = ConfigDict(arbitrary_types_allowed=True, validate_assignment=True) + + def iter_tcp_ports(self) -> Generator[PortInt, None, None]: + for port in self.tcp_ports: + if isinstance(port, _PortRange): + yield from ( + PortInt(i) + for i in range( + raise_if_unresolved(port.lower), + raise_if_unresolved(port.upper) + 1, + ) + ) + else: + yield raise_if_unresolved(port) diff --git a/packages/models-library/src/models_library/services.py b/packages/models-library/src/models_library/services.py index 1a02f3fba11..cd20682f52d 100644 --- a/packages/models-library/src/models_library/services.py +++ b/packages/models-library/src/models_library/services.py @@ -1,608 +1,35 @@ -""" - -NOTE: to dump json-schema from CLI use - python -c "from models_library.services import ServiceDockerData as cls; print(cls.schema_json(indent=2))" > services-schema.json -""" - -import re -from datetime import datetime -from enum import Enum -from typing import Any, Optional, Union -from uuid import UUID - -from pydantic import ( - BaseModel, - ConstrainedStr, - Extra, - Field, - HttpUrl, - StrictBool, - StrictFloat, - StrictInt, - constr, - validator, -) - -from .basic_regex import VERSION_RE from .boot_options import BootOption, BootOptions -from .emails import LowerCaseEmailStr -from .services_constants import FILENAME_RE, PROPERTY_TYPE_RE -from .services_ui import Widget -from .utils.json_schema import ( - InvalidJsonSchema, - any_ref_key, - jsonschema_validate_schema, +from .services_authoring import Author, Badge +from .services_base import ServiceKeyVersion +from .services_constants import LATEST_INTEGRATION_VERSION +from .services_enums import ServiceType +from .services_io import BaseServiceIOModel, ServiceInput, ServiceOutput +from .services_metadata_published import ServiceInputsDict, ServiceMetaDataPublished +from .services_types import ( + DynamicServiceKey, + ServiceKey, + ServicePortKey, + ServiceRunID, + ServiceVersion, ) -# CONSTANTS ------------------------------------------- -# NOTE: move to _constants.py: SEE https://github.com/ITISFoundation/osparc-simcore/issues/3486 -# NOTE: needs to end with / !! -SERVICE_KEY_RE = r"^(simcore)/(services)/(comp|dynamic|frontend)(/[\w/-]+)+$" - -DYNAMIC_SERVICE_KEY_RE = r"^(simcore)/(services)/dynamic(/[\w/-]+)+$" -DYNAMIC_SERVICE_KEY_FORMAT = "simcore/services/dynamic/{service_name}" - -COMPUTATIONAL_SERVICE_KEY_RE = r"^(simcore)/(services)/comp(/[\w/-]+)+$" -COMPUTATIONAL_SERVICE_KEY_FORMAT = "simcore/services/comp/{service_name}" - -KEY_RE = SERVICE_KEY_RE # TODO: deprecate this global constant by SERVICE_KEY_RE - -PROPERTY_KEY_RE = r"^[-_a-zA-Z0-9]+$" # TODO: PC->* it would be advisable to have this "variable friendly" (see VARIABLE_NAME_RE) - -LATEST_INTEGRATION_VERSION = "1.0.0" - -# CONSTRAINT TYPES ------------------------------------------- - -ServicePortKey = constr(regex=PROPERTY_KEY_RE) -FileName = constr(regex=FILENAME_RE) - - -class ServiceKey(ConstrainedStr): - regex = re.compile(SERVICE_KEY_RE) - - -class ServiceVersion(ConstrainedStr): - regex = re.compile(VERSION_RE) - - -RunID = UUID - - -class ServiceType(str, Enum): - COMPUTATIONAL = "computational" - DYNAMIC = "dynamic" - FRONTEND = "frontend" - BACKEND = "backend" - - -# TODO: create a flags enum that accounts for every column -# -# | service name | defininition | implementation | runs | ``ServiceType`` | | -# | --------------- | ------------ | -------------- | ----------------------- | ----------------------------- | --------------- | -# | ``file-picker`` | BE | FE | FE | ``ServiceType.FRONTEND`` | function | -# | ``isolve`` | DI-labels | DI | Dask-BE (own container) | ``ServiceType.COMPUTATIONAL`` | container | -# | ``jupyter-*`` | DI-labels | DI | DySC-BE (own container) | ``ServiceType.DYNAMIC`` | container | -# | ``iterator-*`` | BE | BE | BE (webserver) | ``ServiceType.BACKEND`` | function | -# | ``pyfun-*`` | BE | BE | Dask-BE (dask-sidecar) | ``ServiceType.COMPUTATIONAL`` | function | -# -# -# where FE (front-end), DI (docker image), Dask/DySC (dask/dynamic sidecar), BE (backend). - - -# MODELS ------------------------------------------- -class Badge(BaseModel): - name: str = Field( - ..., - description="Name of the subject", - examples=["travis-ci", "coverals.io", "github.io"], - ) - image: HttpUrl = Field( - ..., - description="Url to the badge", - examples=[ - "https://travis-ci.org/ITISFoundation/osparc-simcore.svg?branch=master", - "https://coveralls.io/repos/github/ITISFoundation/osparc-simcore/badge.svg?branch=master", - "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation", - ], - ) - url: HttpUrl = Field( - ..., - description="Link to the status", - examples=[ - "https://travis-ci.org/ITISFoundation/osparc-simcore 'State of CI: build, test and pushing images'", - "https://coveralls.io/github/ITISFoundation/osparc-simcore?branch=master 'Test coverage'", - "https://itisfoundation.github.io/", - ], - ) - - class Config: - extra = Extra.forbid - - -class Author(BaseModel): - name: str = Field(..., description="Name of the author", example="Jim Knopf") - email: LowerCaseEmailStr = Field( - ..., - examples=["sun@sense.eight", "deleen@minbar.bab"], - description="Email address", - ) - affiliation: Optional[str] = Field( - None, examples=["Sense8", "Babylon 5"], description="Affiliation of the author" - ) - - class Config: - extra = Extra.forbid - - -class BaseServiceIOModel(BaseModel): - """ - Base class for service input/outputs - """ - - ## management - - ### human readable descriptors - display_order: Optional[float] = Field( - None, - alias="displayOrder", - deprecated=True, - description="DEPRECATED: new display order is taken from the item position. This will be removed.", - ) - - label: str = Field(..., description="short name for the property", example="Age") - description: str = Field( - ..., - description="description of the property", - example="Age in seconds since 1970", - ) - - # mathematical and physics descriptors - property_type: str = Field( - ..., - alias="type", - description="data type expected on this input glob matching for data type is allowed", - examples=[ - "number", - "boolean", - "data:*/*", - "data:text/*", - "data:[image/jpeg,image/png]", - "data:application/json", - "data:application/json;schema=https://my-schema/not/really/schema.json", - "data:application/vnd.ms-excel", - "data:text/plain", - "data:application/hdf5", - "data:application/edu.ucdavis@ceclancy.xyz", - ], - regex=PROPERTY_TYPE_RE, - ) - - content_schema: Optional[dict[str, Any]] = Field( - None, - description="jsonschema of this input/output. Required when type='ref_contentSchema'", - alias="contentSchema", - ) - - # value - file_to_key_map: Optional[dict[FileName, ServicePortKey]] = Field( - None, - alias="fileToKeyMap", - description="Place the data associated with the named keys in files", - examples=[{"dir/input1.txt": "key_1", "dir33/input2.txt": "key2"}], - ) - - # TODO: should deprecate since content_schema include units - unit: Optional[str] = Field( - None, - description="Units, when it refers to a physical quantity", - ) - - class Config: - extra = Extra.forbid - - @validator("content_schema") - @classmethod - def check_type_is_set_to_schema(cls, v, values): - if v is not None: - if (ptype := values["property_type"]) != "ref_contentSchema": - raise ValueError( - "content_schema is defined but set the wrong type." - f"Expected type=ref_contentSchema but got ={ptype}." - ) - return v - - @validator("content_schema") - @classmethod - def check_valid_json_schema(cls, v): - if v is not None: - try: - jsonschema_validate_schema(schema=v) - - if any_ref_key(v): - # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3030 - raise ValueError("Schemas with $ref are still not supported") - - except InvalidJsonSchema as err: - failed_path = "->".join(map(str, err.path)) - raise ValueError( - f"Invalid json-schema at {failed_path}: {err.message}" - ) from err - return v - - @classmethod - def _from_json_schema_base_implementation( - cls, port_schema: dict[str, Any] - ) -> dict[str, Any]: - description = port_schema.pop("description", port_schema["title"]) - data = { - "label": port_schema["title"], - "description": description, - "type": "ref_contentSchema", - "contentSchema": port_schema, - } - return data - - -class ServiceInput(BaseServiceIOModel): - """ - Metadata on a service input port - """ - - # TODO: should deprecate since content_schema include defaults as well - default_value: Optional[Union[StrictBool, StrictInt, StrictFloat, str]] = Field( - None, alias="defaultValue", examples=["Dog", True] - ) - - widget: Optional[Widget] = Field( - None, - description="custom widget to use instead of the default one determined from the data-type", - ) - - class Config(BaseServiceIOModel.Config): - schema_extra = { - "examples": [ - # file-wo-widget: - { - "displayOrder": 1, - "label": "Input files - file-wo-widget", - "description": "Files downloaded from service connected at the input", - "type": "data:*/*", - }, - # v2 - { - "displayOrder": 2, - "label": "Sleep Time - v2", - "description": "Time to wait before completion", - "type": "number", - "defaultValue": 0, - "unit": "second", - "widget": {"type": "TextArea", "details": {"minHeight": 3}}, - }, - # latest: - { - "label": "Sleep Time - latest", - "description": "Time to wait before completion", - "type": "number", - "defaultValue": 0, - "unit": "second", - "widget": {"type": "TextArea", "details": {"minHeight": 3}}, - }, - { - "label": "array_numbers", - "description": "Some array of numbers", - "type": "ref_contentSchema", - "contentSchema": { - "title": "list[number]", - "type": "array", - "items": {"type": "number"}, - }, - }, - { - "label": "my_object", - "description": "Some object", - "type": "ref_contentSchema", - "contentSchema": { - "title": "an object named A", - "type": "object", - "properties": { - "i": {"title": "Int", "type": "integer", "default": 3}, - "b": {"title": "Bool", "type": "boolean"}, - "s": {"title": "Str", "type": "string"}, - }, - "required": ["b", "s"], - }, - }, - ], - } - - @classmethod - def from_json_schema(cls, port_schema: dict[str, Any]) -> "ServiceInput": - """Creates input port model from a json-schema""" - data = cls._from_json_schema_base_implementation(port_schema) - return cls.parse_obj(data) - - -class ServiceOutput(BaseServiceIOModel): - widget: Optional[Widget] = Field( - None, - description="custom widget to use instead of the default one determined from the data-type", - deprecated=True, - ) - - class Config(BaseServiceIOModel.Config): - schema_extra = { - "examples": [ - { - "displayOrder": 2, - "label": "Time Slept", - "description": "Time the service waited before completion", - "type": "number", - }, - { - "displayOrder": 2, - "label": "Time Slept - units", - "description": "Time the service waited before completion", - "type": "number", - "unit": "second", - }, - { - "label": "Time Slept - w/o displayorder", - "description": "Time the service waited before completion", - "type": "number", - "unit": "second", - }, - { - "label": "Output file 1", - "displayOrder": 4.0, - "description": "Output file uploaded from the outputs folder", - "type": "data:*/*", - }, - ] - } - - @classmethod - def from_json_schema(cls, port_schema: dict[str, Any]) -> "ServiceOutput": - """Creates output port model from a json-schema""" - data = cls._from_json_schema_base_implementation(port_schema) - return cls.parse_obj(data) - - -class ServiceKeyVersion(BaseModel): - """This pair uniquely identifies a services""" - - key: str = Field( - ..., - description="distinctive name for the node based on the docker registry path", - regex=KEY_RE, - examples=[ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer", - ], - ) - version: str = Field( - ..., - description="service version number", - regex=VERSION_RE, - examples=["1.0.0", "0.0.1"], - ) - - -class _BaseServiceCommonDataModel(BaseModel): - name: str = Field( - ..., - description="short, human readable name for the node", - example="Fast Counter", - ) - thumbnail: Optional[HttpUrl] = Field( - None, - description="url to the thumbnail", - examples=[ - "https://user-images.githubusercontent.com/32800795/61083844-ff48fb00-a42c-11e9-8e63-fa2d709c8baf.png" - ], - ) - description: str = Field( - ..., - description="human readable description of the purpose of the node", - examples=[ - "Our best node type", - "The mother of all nodes, makes your numbers shine!", - ], - ) - - @validator("thumbnail", pre=True, always=False) - @classmethod - def validate_thumbnail(cls, value): # pylint: disable=no-self-argument,no-self-use - if value == "": - return None - return value - - -ServiceInputsDict = dict[ServicePortKey, ServiceInput] -ServiceOutputsDict = dict[ServicePortKey, ServiceOutput] - - -class ServiceDockerData(ServiceKeyVersion, _BaseServiceCommonDataModel): - """ - Static metadata for a service injected in the image labels - - This is one to one with node-meta-v0.0.1.json - """ - - integration_version: Optional[str] = Field( - None, - alias="integration-version", - description="integration version number", - regex=VERSION_RE, - examples=["1.0.0"], - ) - service_type: ServiceType = Field( - ..., - alias="type", - description="service type", - examples=["computational"], - ) - - badges: Optional[list[Badge]] = Field(None) - - authors: list[Author] = Field(..., min_items=1) - contact: LowerCaseEmailStr = Field( - ..., - description="email to correspond to the authors about the node", - examples=["lab@net.flix"], - ) - inputs: Optional[ServiceInputsDict] = Field( - ..., description="definition of the inputs of this node" - ) - outputs: Optional[ServiceOutputsDict] = Field( - ..., description="definition of the outputs of this node" - ) - - boot_options: Optional[BootOptions] = Field( - None, - alias="boot-options", - description="Service defined boot options. These get injected in the service as env variables.", - ) - - class Config: - description = "Description of a simcore node 'class' with input and output" - extra = Extra.forbid - - schema_extra = { - "examples": [ - { - "name": "oSparc Python Runner", - "key": "simcore/services/comp/osparc-python-runner", - "type": "computational", - "integration-version": "1.0.0", - "version": "1.7.0", - "description": "oSparc Python Runner", - "contact": "smith@company.com", - "authors": [ - { - "name": "John Smith", - "email": "smith@company.com", - "affiliation": "Company", - }, - { - "name": "Richard Brown", - "email": "brown@uni.edu", - "affiliation": "University", - }, - ], - "inputs": { - "input_1": { - "displayOrder": 1, - "label": "Input data", - "description": "Any code, requirements or data file", - "type": "data:*/*", - } - }, - "outputs": { - "output_1": { - "displayOrder": 1, - "label": "Output data", - "description": "All data produced by the script is zipped as output_data.zip", - "type": "data:*/*", - "fileToKeyMap": {"output_data.zip": "output_1"}, - } - }, - }, - # latest - { - "name": "oSparc Python Runner", - "key": "simcore/services/comp/osparc-python-runner", - "type": "computational", - "integration-version": "1.0.0", - "version": "1.7.0", - "description": "oSparc Python Runner with boot options", - "contact": "smith@company.com", - "authors": [ - { - "name": "John Smith", - "email": "smith@company.com", - "affiliation": "Company", - }, - { - "name": "Richard Brown", - "email": "brown@uni.edu", - "affiliation": "University", - }, - ], - "inputs": { - "input_1": { - "label": "Input data", - "description": "Any code, requirements or data file", - "type": "data:*/*", - } - }, - "outputs": { - "output_1": { - "label": "Output data", - "description": "All data produced by the script is zipped as output_data.zip", - "type": "data:*/*", - "fileToKeyMap": {"output_data.zip": "output_1"}, - } - }, - "boot-options": { - "example_service_defined_boot_mode": BootOption.Config.schema_extra[ - "examples" - ][ - 0 - ], - "example_service_defined_theme_selection": BootOption.Config.schema_extra[ - "examples" - ][ - 1 - ], - }, - }, - ] - } - - -class ServiceMetaData(_BaseServiceCommonDataModel): - # Overrides all fields of _BaseServiceCommonDataModel: - # - for a partial update all members must be Optional - # FIXME: if API entry needs a schema to allow partial updates (e.g. patch/put), - # it should be implemented with a different model e.g. ServiceMetaDataUpdate - # - - name: Optional[str] - thumbnail: Optional[HttpUrl] - description: Optional[str] - deprecated: Optional[datetime] = Field( - default=None, - description="If filled with a date, then the service is to be deprecated at that date (e.g. cannot start anymore)", - ) - - # user-defined metatada - classifiers: Optional[list[str]] - quality: dict[str, Any] = {} - - class Config: - schema_extra = { - "example": { - "key": "simcore/services/dynamic/sim4life", - "version": "1.0.9", - "name": "sim4life", - "description": "s4l web", - "thumbnail": "http://thumbnailit.org/image", - "quality": { - "enabled": True, - "tsr_target": { - f"r{n:02d}": {"level": 4, "references": ""} - for n in range(1, 11) - }, - "annotations": { - "vandv": "", - "limitations": "", - "certificationLink": "", - "certificationStatus": "Uncertified", - }, - "tsr_current": { - f"r{n:02d}": {"level": 0, "references": ""} - for n in range(1, 11) - }, - }, - } - } +__all__: tuple[str, ...] = ( + "Author", + "Badge", + "BaseServiceIOModel", + "BootOption", + "BootOptions", + "DynamicServiceKey", + "LATEST_INTEGRATION_VERSION", + "ServiceInput", + "ServiceInputsDict", + "ServiceKey", + "ServiceKeyVersion", + "ServiceMetaDataPublished", + "ServiceOutput", + "ServicePortKey", + "ServiceRunID", + "ServiceType", + "ServiceVersion", +) +# nopycln: file diff --git a/packages/models-library/src/models_library/services_access.py b/packages/models-library/src/models_library/services_access.py index c8daca69255..4c450684700 100644 --- a/packages/models-library/src/models_library/services_access.py +++ b/packages/models-library/src/models_library/services_access.py @@ -1,27 +1,40 @@ """Service access rights models """ -from typing import Optional -from pydantic import BaseModel, Field -from pydantic.types import PositiveInt +from typing import Annotated -GroupId = PositiveInt +from pydantic import BaseModel, ConfigDict, Field + +from .groups import GroupID +from .utils.change_case import snake_to_camel class ServiceGroupAccessRights(BaseModel): - execute_access: bool = Field( - default=False, - description="defines whether the group can execute the service", - ) - write_access: bool = Field( - default=False, description="defines whether the group can modify the service" + execute_access: Annotated[ + bool, Field(description="defines whether the group can execute the service") + ] = False + write_access: Annotated[ + bool, Field(description="defines whether the group can modify the service") + ] = False + + +class ServiceGroupAccessRightsV2(BaseModel): + execute: bool = False + write: bool = False + + model_config = ConfigDict( + alias_generator=snake_to_camel, + populate_by_name=True, + extra="forbid", ) class ServiceAccessRights(BaseModel): - access_rights: Optional[dict[GroupId, ServiceGroupAccessRights]] = Field( - None, - alias="accessRights", - description="service access rights per group id", - ) + access_rights: Annotated[ + dict[GroupID, ServiceGroupAccessRights] | None, + Field( + alias="accessRights", + description="service access rights per group id", + ), + ] = None diff --git a/packages/models-library/src/models_library/services_authoring.py b/packages/models-library/src/models_library/services_authoring.py new file mode 100644 index 00000000000..05b5197994c --- /dev/null +++ b/packages/models-library/src/models_library/services_authoring.py @@ -0,0 +1,54 @@ +from pydantic import BaseModel, ConfigDict, Field, HttpUrl + +from .emails import LowerCaseEmailStr + + +class Badge(BaseModel): + name: str = Field( + ..., + description="Name of the subject", + ) + image: HttpUrl = Field( + ..., + description="Url to the badge", + ) + url: HttpUrl = Field( + ..., + description="Link to the status", + ) + model_config = ConfigDict( + json_schema_extra={ + "example": { + "name": "osparc.io", + "image": "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation", + "url": "https://itisfoundation.github.io/", + } + } + ) + + +class Author(BaseModel): + name: str = Field( + ..., + description="Name of the author", + ) + email: LowerCaseEmailStr = Field( + ..., + description="Email address", + ) + affiliation: str | None = Field(None) + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "name": "Jim Knopf", + "email": "deleen@minbar.bab", + "affiliation": "Babylon 5", + }, + { + "name": "John Smith", + "email": "smith@acme.com", + }, + ] + } + ) diff --git a/packages/models-library/src/models_library/services_base.py b/packages/models-library/src/models_library/services_base.py new file mode 100644 index 00000000000..8e989b39ec9 --- /dev/null +++ b/packages/models-library/src/models_library/services_base.py @@ -0,0 +1,75 @@ +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field, HttpUrl, field_validator + +from .services_types import ServiceKey, ServiceVersion +from .utils.common_validators import empty_str_to_none_pre_validator + + +class ServiceKeyVersion(BaseModel): + """Service `key-version` pair uniquely identifies a service""" + + key: Annotated[ + ServiceKey, + Field( + ..., + description="distinctive name for the node based on the docker registry path", + ), + ] + version: Annotated[ + ServiceVersion, + Field( + description="service version number", + ), + ] + + model_config = ConfigDict(frozen=True) + + +class ServiceBaseDisplay(BaseModel): + name: Annotated[ + str, + Field( + description="Display name: short, human readable name for the node", + examples=["Fast Counter"], + ), + ] + thumbnail: Annotated[ + str | None, + Field( + description="URL to the service thumbnail", + validate_default=True, + ), + ] = None + icon: Annotated[ + HttpUrl | None, + Field(description="URL to the service icon"), + ] = None + description: Annotated[ + str, + Field( + description="human readable description of the purpose of the node", + examples=[ + "Our best node type", + "The mother of all nodes, makes your numbers shine!", + ], + ), + ] + description_ui: Annotated[ + bool, + Field( + description="A flag to enable the `description` to be presented as a single web page (=true) or in another structured format (default=false)." + ), + ] = False + version_display: Annotated[ + str | None, + Field( + description="A user-friendly or marketing name for the release." + "This can be used to reference the release in a more readable and recognizable format, such as 'Matterhorn Release,' 'Spring Update,' or 'Holiday Edition.' " + "This name is not used for version comparison but is useful for communication and documentation purposes." + ), + ] = None + + _empty_is_none = field_validator( + "icon", "thumbnail", "version_display", mode="before" + )(empty_str_to_none_pre_validator) diff --git a/packages/models-library/src/models_library/services_constants.py b/packages/models-library/src/models_library/services_constants.py index 8b5b21d8b53..c3779791bd8 100644 --- a/packages/models-library/src/models_library/services_constants.py +++ b/packages/models-library/src/models_library/services_constants.py @@ -1,13 +1,27 @@ -# -# NOTE: https://github.com/ITISFoundation/osparc-simcore/issues/3486 -# - -PROPERTY_TYPE_RE = r"^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$" -PROPERTY_TYPE_TO_PYTHON_TYPE_MAP = { - "integer": int, - "number": float, - "boolean": bool, - "string": str, -} - -FILENAME_RE = r".+" +from types import MappingProxyType +from typing import Final + +from .services_enums import ServiceType + +LATEST_INTEGRATION_VERSION: Final[str] = "1.0.0" + +ANY_FILETYPE: Final[str] = "data:*/*" + +SERVICE_TYPE_TO_NAME_MAP = MappingProxyType( + { + ServiceType.COMPUTATIONAL: "comp", + ServiceType.DYNAMIC: "dynamic", + ServiceType.FRONTEND: "frontend", + } +) + + +def _create_key_prefix(service_type: ServiceType) -> str: + return f"simcore/services/{SERVICE_TYPE_TO_NAME_MAP[service_type]}" + + +COMPUTATIONAL_SERVICE_KEY_PREFIX: Final[str] = _create_key_prefix( + ServiceType.COMPUTATIONAL +) +DYNAMIC_SERVICE_KEY_PREFIX: Final[str] = _create_key_prefix(ServiceType.DYNAMIC) +FRONTEND_SERVICE_KEY_PREFIX: Final[str] = _create_key_prefix(ServiceType.FRONTEND) diff --git a/packages/models-library/src/models_library/services_creation.py b/packages/models-library/src/models_library/services_creation.py new file mode 100644 index 00000000000..5abb8c9e4d2 --- /dev/null +++ b/packages/models-library/src/models_library/services_creation.py @@ -0,0 +1,48 @@ +from typing import Any + +from pydantic import BaseModel, ConfigDict, TypeAdapter + +from .services_resources import ServiceResourcesDict +from .services_types import ServiceKey, ServiceVersion +from .wallets import WalletID + + +class CreateServiceMetricsAdditionalParams(BaseModel): + wallet_id: WalletID | None + wallet_name: str | None + pricing_plan_id: int | None + pricing_unit_id: int | None + pricing_unit_cost_id: int | None + product_name: str + simcore_user_agent: str + user_email: str + project_name: str + node_name: str + service_key: ServiceKey + service_version: ServiceVersion + service_resources: ServiceResourcesDict + service_additional_metadata: dict[str, Any] + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "wallet_id": 1, + "wallet_name": "a private wallet for me", + "pricing_plan_id": 1, + "pricing_unit_id": 1, + "pricing_unit_detail_id": 1, + "product_name": "osparc", + "simcore_user_agent": "undefined", + "user_email": "test@test.com", + "project_name": "_!New Study", + "node_name": "the service of a lifetime _ *!", + "service_key": TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/test" + ), + "service_version": TypeAdapter(ServiceVersion).validate_python("0.0.1"), + "service_resources": {}, + "service_additional_metadata": {}, + "pricing_unit_cost_id": None, + } + } + ) diff --git a/packages/models-library/src/models_library/services_db.py b/packages/models-library/src/models_library/services_db.py deleted file mode 100644 index bdfbd96a832..00000000000 --- a/packages/models-library/src/models_library/services_db.py +++ /dev/null @@ -1,78 +0,0 @@ -""" - -NOTE: to dump json-schema from CLI use - python -c "from models_library.services import ServiceDockerData as cls; print(cls.schema_json(indent=2))" > services-schema.json -""" -from typing import Optional - -from pydantic import Field -from pydantic.types import PositiveInt - -from .services import ServiceKeyVersion, ServiceMetaData -from .services_access import ServiceGroupAccessRights - -# ------------------------------------------------------------------- -# Databases models -# - table services_meta_data -# - table services_access_rights - - -class ServiceMetaDataAtDB(ServiceKeyVersion, ServiceMetaData): - # for a partial update all members must be Optional - classifiers: Optional[list[str]] = Field([]) - owner: Optional[PositiveInt] - - class Config: - orm_mode = True - schema_extra = { - "example": { - "key": "simcore/services/dynamic/sim4life", - "version": "1.0.9", - "owner": 8, - "name": "sim4life", - "description": "s4l web", - "thumbnail": "http://thumbnailit.org/image", - "created": "2021-01-18 12:46:57.7315", - "modified": "2021-01-19 12:45:00", - "deprecated": "2099-01-19 12:45:00", - "quality": { - "enabled": True, - "tsr_target": { - f"r{n:02d}": {"level": 4, "references": ""} - for n in range(1, 11) - }, - "annotations": { - "vandv": "", - "limitations": "", - "certificationLink": "", - "certificationStatus": "Uncertified", - }, - "tsr_current": { - f"r{n:02d}": {"level": 0, "references": ""} - for n in range(1, 11) - }, - }, - } - } - - -class ServiceAccessRightsAtDB(ServiceKeyVersion, ServiceGroupAccessRights): - gid: PositiveInt = Field(..., description="defines the group id", example=1) - product_name: str = Field( - ..., description="defines the product name", example="osparc" - ) - - class Config: - orm_mode = True - schema_extra = { - "example": { - "key": "simcore/services/dynamic/sim4life", - "version": "1.0.9", - "gid": 8, - "execute_access": True, - "write_access": True, - "product_name": "osparc", - "created": "2021-01-18 12:46:57.7315", - "modified": "2021-01-19 12:45:00", - } - } diff --git a/packages/models-library/src/models_library/services_enums.py b/packages/models-library/src/models_library/services_enums.py new file mode 100644 index 00000000000..ec5414218e3 --- /dev/null +++ b/packages/models-library/src/models_library/services_enums.py @@ -0,0 +1,68 @@ +import functools +from enum import Enum, unique + + +@unique +class ServiceBootType(str, Enum): + V0 = "V0" + V2 = "V2" + + +@functools.total_ordering +@unique +class ServiceState(Enum): + FAILED = "failed" + + PENDING = "pending" + PULLING = "pulling" + STARTING = "starting" + RUNNING = "running" + + STOPPING = "stopping" + + COMPLETE = "complete" + IDLE = "idle" + + def __lt__(self, other): + if self.__class__ is other.__class__: + comparison_order = ServiceState.comparison_order() + self_index = comparison_order[self] + other_index = comparison_order[other] + return self_index < other_index + return NotImplemented + + @staticmethod + @functools.lru_cache(maxsize=2) + def comparison_order() -> dict["ServiceState", int]: + """States are comparable to supportmin() on a list of ServiceState""" + return { + ServiceState.FAILED: 0, + ServiceState.PENDING: 1, + ServiceState.PULLING: 2, + ServiceState.STARTING: 3, + ServiceState.RUNNING: 4, + ServiceState.STOPPING: 5, + ServiceState.COMPLETE: 6, + ServiceState.IDLE: 7, + } + + +class ServiceType(str, Enum): + COMPUTATIONAL = "computational" + DYNAMIC = "dynamic" + FRONTEND = "frontend" + BACKEND = "backend" + + +# NOTE on services: +# +# | service name | defininition | implementation | runs | ``ServiceType`` | | +# | --------------- | ------------ | -------------- | ----------------------- | ----------------------------- | --------------- | +# | ``file-picker`` | BE | FE | FE | ``ServiceType.FRONTEND`` | function | +# | ``isolve`` | DI-labels | DI | Dask-BE (own container) | ``ServiceType.COMPUTATIONAL`` | container | +# | ``jupyter-*`` | DI-labels | DI | DySC-BE (own container) | ``ServiceType.DYNAMIC`` | container | +# | ``iterator-*`` | BE | BE | BE (webserver) | ``ServiceType.BACKEND`` | function | +# | ``pyfun-*`` | BE | BE | Dask-BE (dask-sidecar) | ``ServiceType.COMPUTATIONAL`` | function | +# +# +# where FE (front-end), DI (docker image), Dask/DySC (dask/dynamic sidecar), BE (backend). diff --git a/packages/models-library/src/models_library/services_history.py b/packages/models-library/src/models_library/services_history.py new file mode 100644 index 00000000000..91ed08fbe4b --- /dev/null +++ b/packages/models-library/src/models_library/services_history.py @@ -0,0 +1,73 @@ +from datetime import datetime +from typing import Annotated, TypeAlias + +from pydantic import BaseModel, ConfigDict, Field + +from .services_types import ServiceKey, ServiceVersion +from .utils.change_case import snake_to_camel + + +class CompatibleService(BaseModel): + key: Annotated[ + ServiceKey | None, + Field( + description="If None, it refer to current service. Used only for inter-service compatibility" + ), + ] = None + version: ServiceVersion + + +class Compatibility(BaseModel): + can_update_to: Annotated[ + CompatibleService, Field(description="Latest compatible service at this moment") + ] + + model_config = ConfigDict(alias_generator=snake_to_camel, populate_by_name=True) + + +class ServiceRelease(BaseModel): + version: ServiceVersion + version_display: Annotated[ + str | None, Field(description="If None, then display `version`") + ] = None + released: Annotated[ + datetime | None, + Field(description="When provided, it indicates the release timestamp"), + ] = None + retired: Annotated[ + datetime | None, + Field( + description="whether this service is planned to be retired. If None, the service is still active. If now dict[str, Any]: + description = port_schema.pop("description", port_schema["title"]) + return { + "label": port_schema["title"], + "description": description, + "type": "ref_contentSchema", + "contentSchema": port_schema, + } + + +class ServiceInput(BaseServiceIOModel): + """ + Metadata on a service input port + """ + + default_value: StrictBool | StrictInt | StrictFloat | str | None = Field( + None, + alias="defaultValue", + examples=["Dog", True], + deprecated=True, # Use content_schema defaults instead + ) + + widget: Widget | None = Field( + None, + description="custom widget to use instead of the default one determined from the data-type", + ) + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + # file-wo-widget: + { + "displayOrder": 1, + "label": "Input files - file-wo-widget", + "description": "Files downloaded from service connected at the input", + "type": ANY_FILETYPE, + }, + # v2 + { + "displayOrder": 2, + "label": "Sleep Time - v2", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": {"type": "TextArea", "details": {"minHeight": 3}}, + }, + # latest: + { + "label": "Sleep Time - latest", + "description": "Time to wait before completion", + "type": "number", + "defaultValue": 0, + "unit": "second", + "widget": {"type": "TextArea", "details": {"minHeight": 3}}, + }, + { + "label": "array_numbers", + "description": "Some array of numbers", + "type": "ref_contentSchema", + "contentSchema": { + "title": "list[number]", + "type": "array", + "items": {"type": "number"}, + }, + }, + { + "label": "my_object", + "description": "Some object", + "type": "ref_contentSchema", + "contentSchema": { + "title": "an object named A", + "type": "object", + "properties": { + "i": {"title": "Int", "type": "integer", "default": 3}, + "b": {"title": "Bool", "type": "boolean"}, + "s": {"title": "Str", "type": "string"}, + }, + "required": ["b", "s"], + }, + }, + ], + }, + ) + + @classmethod + def from_json_schema(cls, port_schema: dict[str, Any]) -> "ServiceInput": + """Creates input port model from a json-schema""" + data = cls._from_json_schema_base_implementation(port_schema) + return cls.model_validate(data) + + +class ServiceOutput(BaseServiceIOModel): + widget: Widget | None = Field( + None, + description="custom widget to use instead of the default one determined from the data-type", + deprecated=True, + ) + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "displayOrder": 2, + "label": "Time Slept", + "description": "Time the service waited before completion", + "type": "number", + }, + { + "displayOrder": 2, + "label": "Time Slept - units", + "description": "Time with units", + "type": "number", + "unit": "second", + }, + { + "label": "Time Slept - w/o displayorder", + "description": "Time without display order", + "type": "number", + "unit": "second", + }, + { + "label": "Output file 1", + "displayOrder": 4.0, + "description": "Output file uploaded from the outputs folder", + "type": ANY_FILETYPE, + }, + ] + }, + ) + + @classmethod + def from_json_schema(cls, port_schema: dict[str, Any]) -> "ServiceOutput": + """Creates output port model from a json-schema""" + data = cls._from_json_schema_base_implementation(port_schema) + return cls.model_validate(data) diff --git a/packages/models-library/src/models_library/services_metadata_editable.py b/packages/models-library/src/models_library/services_metadata_editable.py new file mode 100644 index 00000000000..c0acd484eb0 --- /dev/null +++ b/packages/models-library/src/models_library/services_metadata_editable.py @@ -0,0 +1,78 @@ +# mypy: disable-error-code=truthy-function +from datetime import datetime +from typing import Annotated, Any + +from common_library.basic_types import DEFAULT_FACTORY +from pydantic import ConfigDict, Field, HttpUrl +from pydantic.config import JsonDict + +from .services_base import ServiceBaseDisplay +from .services_constants import LATEST_INTEGRATION_VERSION +from .services_enums import ServiceType +from .services_types import DynamicServiceKey, ServiceKey, ServiceVersion + +assert DynamicServiceKey # nosec +assert LATEST_INTEGRATION_VERSION # nosec +assert ServiceKey # nosec +assert ServiceType # nosec +assert ServiceVersion # nosec + + +class ServiceMetaDataEditable(ServiceBaseDisplay): + # Overrides ServiceBaseDisplay fields to Optional for a partial update + name: str | None # type: ignore[assignment] + thumbnail: str | None + icon: HttpUrl | None + description: str | None # type: ignore[assignment] + description_ui: bool = False + version_display: str | None = None + + # Below fields only in the database ---- + deprecated: Annotated[ + datetime | None, + Field( + description="Owner can set the date to retire the service. Three possibilities:" + "If None, the service is marked as `published`;" + "If now=deprecated, the service is retired", + ), + ] = None + classifiers: list[str] | None + quality: Annotated[ + dict[str, Any], Field(default_factory=dict, json_schema_extra={"default": {}}) + ] = DEFAULT_FACTORY + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "key": "simcore/services/dynamic/sim4life", + "version": "1.0.9", + "name": "sim4life", + "description": "s4l web", + "thumbnail": "https://thumbnailit.org/image", + "icon": "https://cdn-icons-png.flaticon.com/512/25/25231.png", + "quality": { + "enabled": True, + "tsr_target": { + f"r{n:02d}": {"level": 4, "references": ""} + for n in range(1, 11) + }, + "annotations": { + "vandv": "", + "limitations": "", + "certificationLink": "", + "certificationStatus": "Uncertified", + }, + "tsr_current": { + f"r{n:02d}": {"level": 0, "references": ""} + for n in range(1, 11) + }, + }, + "classifiers": [], + } + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) diff --git a/packages/models-library/src/models_library/services_metadata_published.py b/packages/models-library/src/models_library/services_metadata_published.py new file mode 100644 index 00000000000..51fba05b7f4 --- /dev/null +++ b/packages/models-library/src/models_library/services_metadata_published.py @@ -0,0 +1,176 @@ +from datetime import datetime +from typing import Final, TypeAlias + +from pydantic import ConfigDict, Field, NonNegativeInt + +from .basic_types import SemanticVersionStr +from .boot_options import BootOption, BootOptions +from .emails import LowerCaseEmailStr +from .services_authoring import Author, Badge +from .services_base import ServiceBaseDisplay, ServiceKeyVersion +from .services_constants import ANY_FILETYPE +from .services_enums import ServiceType +from .services_io import ServiceInput, ServiceOutput +from .services_types import ServicePortKey + +ServiceInputsDict: TypeAlias = dict[ServicePortKey, ServiceInput] +ServiceOutputsDict: TypeAlias = dict[ServicePortKey, ServiceOutput] + + +_EXAMPLE: Final = { + "name": "oSparc Python Runner", + "key": "simcore/services/comp/osparc-python-runner", + "type": "computational", + "integration-version": "1.0.0", + "progress_regexp": "^(?:\\[?PROGRESS\\]?:?)?\\s*(?P[0-1]?\\.\\d+|\\d+\\s*(?P%))", + "version": "1.7.0", + "description": "oSparc Python Runner", + "contact": "smith@company.com", + "authors": [ + { + "name": "John Smith", + "email": "smith@company.com", + "affiliation": "Company", + }, + { + "name": "Richard Brown", + "email": "brown@uni.edu", + "affiliation": "University", + }, + ], + "inputs": { + "input_1": { + "displayOrder": 1, + "label": "Input data", + "description": "Any code, requirements or data file", + "type": ANY_FILETYPE, + } + }, + "outputs": { + "output_1": { + "displayOrder": 1, + "label": "Output data", + "description": "All data produced by the script is zipped as output_data.zip", + "type": ANY_FILETYPE, + "fileToKeyMap": {"output_data.zip": "output_1"}, + } + }, +} + +_EXAMPLE_W_BOOT_OPTIONS_AND_NO_DISPLAY_ORDER = { + **_EXAMPLE, + "description": "oSparc Python Runner with boot options", + "inputs": { + "input_1": { + "label": "Input data", + "description": "Any code, requirements or data file", + "type": ANY_FILETYPE, + } + }, + "outputs": { + "output_1": { + "label": "Output data", + "description": "All data produced by the script is zipped as output_data.zip", + "type": ANY_FILETYPE, + "fileToKeyMap": {"output_data.zip": "output_1"}, + } + }, + "boot-options": { + "example_service_defined_boot_mode": BootOption.model_config["json_schema_extra"]["examples"][0], # type: ignore [index] + "example_service_defined_theme_selection": BootOption.model_config["json_schema_extra"]["examples"][1], # type: ignore [index] + }, + "min-visible-inputs": 2, +} + + +class ServiceMetaDataPublished(ServiceKeyVersion, ServiceBaseDisplay): + """ + Service metadata at publication time + + - read-only (can only be changed overwriting the image labels in the registry) + - base metaddata + - injected in the image labels + + NOTE: This model is serialized in .osparc/metadata.yml and in the labels of the docker image + """ + + release_date: datetime | None = Field( + None, + description="A timestamp when the specific version of the service was released." + " This field helps in tracking the timeline of releases and understanding the sequence of updates." + " A timestamp string should be formatted as YYYY-MM-DD[T]HH:MM[:SS[.ffffff]][Z or [Β±]HH[:]MM]", + ) + + integration_version: SemanticVersionStr | None = Field( + None, + alias="integration-version", + description="This version is used to maintain backward compatibility when there are changes in the way a service is integrated into the framework", + ) + + service_type: ServiceType = Field( + ..., + alias="type", + description="service type", + examples=["computational"], + ) + + badges: list[Badge] | None = Field(None, deprecated=True) + + authors: list[Author] = Field(..., min_length=1) + contact: LowerCaseEmailStr = Field( + ..., + description="email to correspond to the authors about the node", + examples=["lab@net.flix"], + ) + inputs: ServiceInputsDict | None = Field( + ..., description="definition of the inputs of this node" + ) + outputs: ServiceOutputsDict | None = Field( + ..., description="definition of the outputs of this node" + ) + + boot_options: BootOptions | None = Field( + None, + alias="boot-options", + description="Service defined boot options. These get injected in the service as env variables.", + ) + + min_visible_inputs: NonNegativeInt | None = Field( + None, + alias="min-visible-inputs", + description=( + "The number of 'data type inputs' displayed by default in the UI. " + "When None all 'data type inputs' are displayed." + ), + ) + + progress_regexp: str | None = Field( + None, + alias="progress_regexp", + description="regexp pattern for detecting computational service's progress", + ) + + # SEE https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys + image_digest: str | None = Field( + None, + description="Image manifest digest. Note that this is NOT injected as an image label", + ) + + model_config = ConfigDict( + extra="forbid", + frozen=False, + populate_by_name=True, + json_schema_extra={ + "examples": [ + _EXAMPLE, # type: ignore[list-item] + _EXAMPLE_W_BOOT_OPTIONS_AND_NO_DISPLAY_ORDER, # type: ignore[list-item] + # latest + { + **_EXAMPLE_W_BOOT_OPTIONS_AND_NO_DISPLAY_ORDER, # type: ignore[dict-item] + "version_display": "Matterhorn Release", + "description_ui": True, + "release_date": "2024-05-31T13:45:30", + }, + ] + }, + ) diff --git a/packages/models-library/src/models_library/services_regex.py b/packages/models-library/src/models_library/services_regex.py new file mode 100644 index 00000000000..08154982df4 --- /dev/null +++ b/packages/models-library/src/models_library/services_regex.py @@ -0,0 +1,78 @@ +import re +from types import MappingProxyType +from typing import Final + +from .services_constants import ( + COMPUTATIONAL_SERVICE_KEY_PREFIX, + DYNAMIC_SERVICE_KEY_PREFIX, + FRONTEND_SERVICE_KEY_PREFIX, + SERVICE_TYPE_TO_NAME_MAP, +) +from .services_enums import ServiceType + +PROPERTY_TYPE_RE = r"^(number|integer|boolean|string|ref_contentSchema|data:([^/\s,]+/[^/\s,]+|\[[^/\s,]+/[^/\s,]+(,[^/\s]+/[^/,\s]+)*\]))$" +PROPERTY_TYPE_TO_PYTHON_TYPE_MAP = { + "integer": int, + "number": float, + "boolean": bool, + "string": str, +} + +FILENAME_RE = r".+" + +# e.g. simcore/services/comp/opencor +SERVICE_KEY_RE: Final[re.Pattern[str]] = re.compile( + r"^simcore/services/" + rf"(?P({ '|'.join(SERVICE_TYPE_TO_NAME_MAP.values()) }))/" + r"(?P[a-z0-9][a-z0-9_.-]*/)*" + r"(?P[a-z0-9-_]+[a-z0-9])$" +) + +# e.g. simcore%2Fservices%2Fcomp%2Fopencor +SERVICE_ENCODED_KEY_RE: Final[re.Pattern[str]] = re.compile( + r"^simcore%2Fservices%2F" + rf"(?P({'|'.join(SERVICE_TYPE_TO_NAME_MAP.values())}))%2F" + r"(?P[a-z0-9][a-z0-9_.-]*%2F)*" + r"(?P[a-z0-9-_]+[a-z0-9])$" +) + + +def _create_key_regex(service_type: ServiceType) -> re.Pattern[str]: + return re.compile( + rf"^simcore/services/{SERVICE_TYPE_TO_NAME_MAP[service_type]}/" + r"(?P[a-z0-9][a-z0-9_.-]*/)*" + r"(?P[a-z0-9-_]+[a-z0-9])$" + ) + + +def _create_key_format(service_type: ServiceType) -> str: + return f"simcore/services/{SERVICE_TYPE_TO_NAME_MAP[service_type]}/{{service_name}}" + + +COMPUTATIONAL_SERVICE_KEY_RE: Final[re.Pattern[str]] = _create_key_regex( + ServiceType.COMPUTATIONAL +) +COMPUTATIONAL_SERVICE_KEY_FORMAT: Final[str] = _create_key_format( + ServiceType.COMPUTATIONAL +) + +DYNAMIC_SERVICE_KEY_RE: Final[re.Pattern[str]] = _create_key_regex(ServiceType.DYNAMIC) +DYNAMIC_SERVICE_KEY_FORMAT: Final[str] = _create_key_format(ServiceType.DYNAMIC) + +FRONTEND_SERVICE_KEY_RE: Final[re.Pattern[str]] = _create_key_regex( + ServiceType.FRONTEND +) +FRONTEND_SERVICE_KEY_FORMAT: Final[str] = _create_key_format(ServiceType.FRONTEND) + + +SERVICE_TYPE_TO_PREFIX_MAP = MappingProxyType( + { + ServiceType.COMPUTATIONAL: COMPUTATIONAL_SERVICE_KEY_PREFIX, + ServiceType.DYNAMIC: DYNAMIC_SERVICE_KEY_PREFIX, + ServiceType.FRONTEND: FRONTEND_SERVICE_KEY_PREFIX, + } +) + +assert all( # nosec + not prefix.endswith("/") for prefix in SERVICE_TYPE_TO_PREFIX_MAP.values() +), "Service type prefixes must not end with '/'" diff --git a/packages/models-library/src/models_library/services_resources.py b/packages/models-library/src/models_library/services_resources.py index ac7514cf659..175c56f968a 100644 --- a/packages/models-library/src/models_library/services_resources.py +++ b/packages/models-library/src/models_library/services_resources.py @@ -1,48 +1,46 @@ -import logging from enum import auto -from typing import Any, Final, Optional, Union +from typing import Any, Final, TypeAlias -from models_library.docker import DockerGenericTag -from models_library.utils.enums import StrAutoEnum from pydantic import ( BaseModel, ByteSize, + ConfigDict, Field, StrictFloat, StrictInt, - parse_obj_as, - root_validator, + TypeAdapter, + model_validator, ) +from .docker import DockerGenericTag +from .utils.enums import StrAutoEnum from .utils.fastapi_encoders import jsonable_encoder -logger = logging.getLogger(__name__) - - ResourceName = str # NOTE: replace hard coded `container` with function which can # extract the name from the `service_key` or `registry_address/service_key` -DEFAULT_SINGLE_SERVICE_NAME: Final[DockerGenericTag] = parse_obj_as( - DockerGenericTag, "container" -) +DEFAULT_SINGLE_SERVICE_NAME: Final[DockerGenericTag] = TypeAdapter( + DockerGenericTag +).validate_python("container") -MEMORY_50MB: Final[int] = parse_obj_as(ByteSize, "50mib") -MEMORY_250MB: Final[int] = parse_obj_as(ByteSize, "250mib") -MEMORY_1GB: Final[int] = parse_obj_as(ByteSize, "1gib") +MEMORY_50MB: Final[int] = TypeAdapter(ByteSize).validate_python("50mib") +MEMORY_250MB: Final[int] = TypeAdapter(ByteSize).validate_python("250mib") +MEMORY_1GB: Final[int] = TypeAdapter(ByteSize).validate_python("1gib") GIGA: Final[float] = 1e9 CPU_10_PERCENT: Final[int] = int(0.1 * GIGA) CPU_100_PERCENT: Final[int] = int(1 * GIGA) -class ResourceValue(BaseModel): - limit: Union[StrictInt, StrictFloat, str] - reservation: Union[StrictInt, StrictFloat, str] +class ResourceValue(BaseModel, validate_assignment=True): + limit: StrictInt | StrictFloat | str + reservation: StrictInt | StrictFloat | str - @root_validator() + @model_validator(mode="before") @classmethod - def ensure_limits_are_equal_or_above_reservations(cls, values): + def _ensure_limits_are_equal_or_above_reservations(cls, values): + # WARNING: this does not validate ON-ASSIGNMENT! if isinstance(values["reservation"], str): # in case of string, the limit is the same as the reservation values["limit"] = values["reservation"] @@ -53,11 +51,14 @@ def ensure_limits_are_equal_or_above_reservations(cls, values): return values - class Config: - validate_assignment = True + def set_reservation_same_as_limit(self) -> None: + self.reservation = self.limit + + def set_value(self, value: StrictInt | StrictFloat | str) -> None: + self.limit = self.reservation = value -ResourcesDict = dict[ResourceName, ResourceValue] +ResourcesDict: TypeAlias = dict[ResourceName, ResourceValue] class BootMode(StrAutoEnum): @@ -82,8 +83,12 @@ class ImageResources(BaseModel): description="describe how a service shall be booted, using CPU, MPI, openMP or GPU", ) - class Config: - schema_extra = { + def set_reservation_same_as_limit(self) -> None: + for resource in self.resources.values(): + resource.set_reservation_same_as_limit() + + model_config = ConfigDict( + json_schema_extra={ "example": { "image": "simcore/service/dynamic/pretty-intense:1.0.0", "resources": { @@ -98,9 +103,10 @@ class Config: }, } } + ) -ServiceResourcesDict = dict[DockerGenericTag, ImageResources] +ServiceResourcesDict: TypeAlias = dict[DockerGenericTag, ImageResources] class ServiceResourcesDictHelpers: @@ -108,12 +114,11 @@ class ServiceResourcesDictHelpers: def create_from_single_service( image: DockerGenericTag, resources: ResourcesDict, - boot_modes: Optional[list[BootMode]] = None, + boot_modes: list[BootMode] | None = None, ) -> ServiceResourcesDict: if boot_modes is None: boot_modes = [BootMode.CPU] - return parse_obj_as( - ServiceResourcesDict, + return TypeAdapter(ServiceResourcesDict).validate_python( { DEFAULT_SINGLE_SERVICE_NAME: { "image": image, @@ -127,10 +132,11 @@ def create_from_single_service( def create_jsonable( service_resources: ServiceResourcesDict, ) -> dict[DockerGenericTag, Any]: - return jsonable_encoder(service_resources) + output: dict[DockerGenericTag, Any] = jsonable_encoder(service_resources) + return output - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "examples": [ # no compose spec (majority of services) { @@ -139,8 +145,10 @@ class Config: "resources": { "CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), }, }, "boot_modes": [BootMode.CPU], @@ -170,8 +178,10 @@ class Config: "resources": { "CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), }, }, "boot_modes": [BootMode.CPU], @@ -184,8 +194,10 @@ class Config: "resources": { "CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), }, }, "boot_modes": [BootMode.CPU], @@ -195,8 +207,10 @@ class Config: "resources": { "CPU": {"limit": 0.1, "reservation": 0.1}, "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), }, }, "boot_modes": [BootMode.CPU], @@ -204,3 +218,4 @@ class Config: }, ] } + ) diff --git a/packages/models-library/src/models_library/services_types.py b/packages/models-library/src/models_library/services_types.py new file mode 100644 index 00000000000..b6689fdf888 --- /dev/null +++ b/packages/models-library/src/models_library/services_types.py @@ -0,0 +1,101 @@ +from typing import TYPE_CHECKING, Annotated, Any, Self, TypeAlias +from uuid import uuid4 + +import arrow +from pydantic import ( + GetCoreSchemaHandler, + PositiveInt, + StringConstraints, + ValidationInfo, +) +from pydantic_core import CoreSchema, core_schema + +from .basic_regex import PROPERTY_KEY_RE, SIMPLE_VERSION_RE +from .projects_nodes_io import NodeID +from .services_regex import ( + COMPUTATIONAL_SERVICE_KEY_RE, + DYNAMIC_SERVICE_KEY_RE, + FILENAME_RE, + SERVICE_ENCODED_KEY_RE, + SERVICE_KEY_RE, +) +from .users import UserID + +if TYPE_CHECKING: + from .projects import ProjectID + +ServicePortKey: TypeAlias = Annotated[str, StringConstraints(pattern=PROPERTY_KEY_RE)] + +FileName: TypeAlias = Annotated[str, StringConstraints(pattern=FILENAME_RE)] + +ServiceKey: TypeAlias = Annotated[str, StringConstraints(pattern=SERVICE_KEY_RE)] + +ServiceKeyEncoded: TypeAlias = Annotated[ + str, StringConstraints(pattern=SERVICE_ENCODED_KEY_RE) +] + +DynamicServiceKey: TypeAlias = Annotated[ + str, StringConstraints(pattern=DYNAMIC_SERVICE_KEY_RE) +] + +ComputationalServiceKey: TypeAlias = Annotated[ + str, StringConstraints(pattern=COMPUTATIONAL_SERVICE_KEY_RE) +] + +ServiceVersion: TypeAlias = Annotated[str, StringConstraints(pattern=SIMPLE_VERSION_RE)] + + +class ServiceRunID(str): + """ + Used to assign a unique identifier to the run of a service. + + Example usage: + The dynamic-sidecar uses this to distinguish between current + and old volumes for different runs. + Avoids overwriting data that left dropped on the node (due to an error) + and gives the osparc-agent an opportunity to back it up. + The resource-usage-tracker tracker uses these RunIDs to keep track of + resource usage from computational and dynamic services. + """ + + __slots__ = () + + @classmethod + def get_resource_tracking_run_id_for_dynamic(cls) -> Self: + """used for dynamic services""" + # NOTE: there was a legacy version of this RunID + # legacy version: + # '0ac3ed64-665b-42d2-95f7-e59e0db34242' + # current version: + # '1690203099_0ac3ed64-665b-42d2-95f7-e59e0db34242' + utc_int_timestamp: int = arrow.utcnow().int_timestamp + run_id_format = f"{utc_int_timestamp}_{uuid4()}" + return cls(run_id_format) + + @classmethod + def get_resource_tracking_run_id_for_computational( + cls, + user_id: UserID, + project_id: "ProjectID", + node_id: NodeID, + iteration: PositiveInt, + ) -> Self: + """used by computational services""" + return cls(f"comp_{user_id}_{project_id}_{node_id}_{iteration}") + + @classmethod + def __get_pydantic_core_schema__( + cls, + source_type: Any, # pylint:disable=unused-argument + handler: GetCoreSchemaHandler, + ) -> CoreSchema: + return core_schema.no_info_after_validator_function(cls, handler(str)) + + @classmethod + def validate(cls, v: "ServiceRunID | str", _: ValidationInfo) -> "ServiceRunID": + if isinstance(v, cls): + return v + if isinstance(v, str): + return cls(v) + msg = f"Invalid value for RunID: {v}" + raise TypeError(msg) diff --git a/packages/models-library/src/models_library/services_ui.py b/packages/models-library/src/models_library/services_ui.py index 18dbd0b7469..055fa58fd7b 100644 --- a/packages/models-library/src/models_library/services_ui.py +++ b/packages/models-library/src/models_library/services_ui.py @@ -1,7 +1,6 @@ from enum import Enum -from typing import List, Union -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field from pydantic.types import PositiveInt @@ -15,30 +14,26 @@ class TextArea(BaseModel): ..., alias="minHeight", description="minimum Height of the textarea" ) - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") class Structure(BaseModel): - key: Union[str, bool, float] + key: str | bool | float label: str - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") class SelectBox(BaseModel): - structure: List[Structure] = Field(..., min_items=1) + structure: list[Structure] = Field(..., min_length=1) - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") class Widget(BaseModel): widget_type: WidgetType = Field( ..., alias="type", description="type of the property" ) - details: Union[TextArea, SelectBox] + details: TextArea | SelectBox - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") diff --git a/packages/models-library/src/models_library/shared_user_preferences.py b/packages/models-library/src/models_library/shared_user_preferences.py new file mode 100644 index 00000000000..57f291999ab --- /dev/null +++ b/packages/models-library/src/models_library/shared_user_preferences.py @@ -0,0 +1,6 @@ +from .user_preferences import FrontendUserPreference + + +class AllowMetricsCollectionFrontendUserPreference(FrontendUserPreference): + preference_identifier: str = "allowMetricsCollection" + value: bool = True diff --git a/packages/models-library/src/models_library/sidecar_volumes.py b/packages/models-library/src/models_library/sidecar_volumes.py new file mode 100644 index 00000000000..0e9bcf16c0b --- /dev/null +++ b/packages/models-library/src/models_library/sidecar_volumes.py @@ -0,0 +1,55 @@ +from datetime import datetime +from enum import auto + +import arrow +from pydantic import BaseModel, Field + +from .utils.enums import StrAutoEnum + + +class VolumeCategory(StrAutoEnum): + """ + These uniquely identify volumes which are mounted by + the dynamic-sidecar and user services. + + This is primarily used to keep track of the status of + each individual volume on the volumes. + + The status is ingested by the agent and processed + when the volume is removed. + """ + + # contains data relative to output ports + OUTPUTS = auto() + + # contains data relative to input ports + INPUTS = auto() + + # contains files which represent the state of the service + # usually the user's workspace + STATES = auto() + + # contains dynamic-sidecar data required to maintain state + # between restarts + SHARED_STORE = auto() + + +class VolumeStatus(StrAutoEnum): + """ + Used by the agent to figure out what to do with the data + present on the volume. + """ + + CONTENT_NEEDS_TO_BE_SAVED = auto() + CONTENT_WAS_SAVED = auto() + CONTENT_NO_SAVE_REQUIRED = auto() + + +class VolumeState(BaseModel): + status: VolumeStatus + last_changed: datetime = Field(default_factory=lambda: arrow.utcnow().datetime) + + def __eq__(self, other: object) -> bool: + # only include status for equality last_changed is not important + is_equal: bool = self.status == getattr(other, "status", None) + return is_equal diff --git a/packages/models-library/src/models_library/socketio.py b/packages/models-library/src/models_library/socketio.py new file mode 100644 index 00000000000..abc5cf92c1b --- /dev/null +++ b/packages/models-library/src/models_library/socketio.py @@ -0,0 +1,10 @@ +from typing import Any + +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + + +class SocketMessageDict(TypedDict): + event_type: str + data: dict[str, Any] diff --git a/packages/models-library/src/models_library/trash.py b/packages/models-library/src/models_library/trash.py new file mode 100644 index 00000000000..306787ab60f --- /dev/null +++ b/packages/models-library/src/models_library/trash.py @@ -0,0 +1,7 @@ +from pydantic import BaseModel, Field + + +class RemoveQueryParams(BaseModel): + force: bool = Field( + default=False, description="Force removal (even if resource is active)" + ) diff --git a/packages/models-library/src/models_library/user_preferences.py b/packages/models-library/src/models_library/user_preferences.py new file mode 100644 index 00000000000..435fd972cb5 --- /dev/null +++ b/packages/models-library/src/models_library/user_preferences.py @@ -0,0 +1,136 @@ +from enum import auto +from typing import Annotated, Any, ClassVar, Literal, TypeAlias + +from common_library.pydantic_fields_extension import get_type +from pydantic import BaseModel, Field +from pydantic._internal._model_construction import ModelMetaclass +from pydantic.fields import FieldInfo + +from .services import ServiceKey, ServiceVersion +from .utils.enums import StrAutoEnum + + +class _AutoRegisterMeta(ModelMetaclass): + registered_user_preference_classes: ClassVar[dict[str, type]] = {} + + def __new__(cls, name, bases, attrs, *args, **kwargs): + new_class = super().__new__(cls, name, bases, attrs, *args, **kwargs) + + if name != cls.__name__: + if name in cls.registered_user_preference_classes: + msg = ( + f"Class named '{name}' was already defined at " + f"{cls.registered_user_preference_classes[name]}." + " Please choose a different class name!" + ) + raise TypeError(msg) + cls.registered_user_preference_classes[name] = new_class + + return new_class + + +PreferenceName: TypeAlias = str +PreferenceIdentifier: TypeAlias = str + + +class _ExtendedBaseModel(BaseModel, metaclass=_AutoRegisterMeta): + ... + + +class PreferenceType(StrAutoEnum): + FRONTEND = auto() + USER_SERVICE = auto() + + +class NoPreferenceFoundError(RuntimeError): + def __init__(self, preference_name) -> None: + self.preference_name = preference_name + super().__init__(f"No preference class found for provided {preference_name=}") + + +class _BaseUserPreferenceModel(_ExtendedBaseModel): + preference_type: PreferenceType = Field( + ..., description="distinguish between the types of preferences" + ) + + value: Any = Field(..., description="value of the preference") + + @classmethod + def get_preference_class_from_name( + cls, preference_name: PreferenceName + ) -> type["_BaseUserPreferenceModel"]: + preference_class: type[ + "_BaseUserPreferenceModel" + ] | None = cls.registered_user_preference_classes.get(preference_name, None) + if preference_class is None: + raise NoPreferenceFoundError(preference_name) + return preference_class + + @classmethod + def get_preference_name(cls) -> PreferenceName: + # NOTE: this will be `unique` among all subclasses. + # No class inherited from this one, can be defined using the same name, + # even if the context is different. + return cls.__name__ + + @classmethod + def get_default_value(cls) -> Any: + value_field: FieldInfo = dict(cls.model_fields)["value"] + + return ( + value_field.default_factory() # type: ignore[call-arg] + if callable(value_field.default_factory) + else value_field.default + ) + + +class FrontendUserPreference(_BaseUserPreferenceModel): + preference_type: Literal[PreferenceType.FRONTEND] = PreferenceType.FRONTEND + + preference_identifier: PreferenceIdentifier = Field( + ..., description="used by the frontend" + ) + + value: Any + + def to_db(self) -> dict: + return self.model_dump(exclude={"preference_identifier", "preference_type"}) + + @classmethod + def update_preference_default_value(cls, new_default: Any) -> None: + # pylint: disable=unsubscriptable-object + expected_type = get_type(cls.model_fields["value"]) + detected_type = type(new_default) + if expected_type != detected_type: + msg = ( + f"Error, {cls.__name__} {expected_type=} differs from {detected_type=}" + ) + raise TypeError(msg) + + if cls.model_fields["value"].default is None: + cls.model_fields["value"].default_factory = lambda: new_default + else: + cls.model_fields["value"].default = new_default + cls.model_fields["value"].default_factory = None + + cls.model_rebuild(force=True) + + +class UserServiceUserPreference(_BaseUserPreferenceModel): + preference_type: Literal[PreferenceType.USER_SERVICE] = PreferenceType.USER_SERVICE + + service_key: ServiceKey = Field( + ..., description="the service which manages the preferences" + ) + service_version: ServiceVersion = Field( + ..., description="version of the service which manages the preference" + ) + + def to_db(self) -> dict: + return self.model_dump(exclude={"preference_type"}) + + +AnyUserPreference: TypeAlias = Annotated[ + FrontendUserPreference | UserServiceUserPreference, + Field(discriminator="preference_type"), +] diff --git a/packages/models-library/src/models_library/users.py b/packages/models-library/src/models_library/users.py index 9847e22c809..3b8d4344b4b 100644 --- a/packages/models-library/src/models_library/users.py +++ b/packages/models-library/src/models_library/users.py @@ -1,4 +1,111 @@ -from pydantic import PositiveInt +import datetime +from typing import Annotated, TypeAlias -UserID = PositiveInt -GroupID = PositiveInt +from common_library.users_enums import UserRole +from models_library.basic_types import IDStr +from pydantic import BaseModel, ConfigDict, Field, PositiveInt, StringConstraints +from pydantic.config import JsonDict +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + +from .emails import LowerCaseEmailStr + +UserID: TypeAlias = PositiveInt +UserNameID: TypeAlias = IDStr + + +FirstNameStr: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, max_length=255) +] + +LastNameStr: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, max_length=255) +] + + +class PrivacyDict(TypedDict): + hide_username: bool + hide_fullname: bool + hide_email: bool + + +class MyProfile(BaseModel): + id: UserID + user_name: UserNameID + first_name: str | None + last_name: str | None + email: LowerCaseEmailStr + role: UserRole + privacy: PrivacyDict + expiration_date: datetime.date | None = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "id": 1, + "email": "PtN5Ab0uv@guest-at-osparc.io", + "user_name": "PtN5Ab0uv", + "first_name": "PtN5Ab0uv", + "last_name": "", + "role": "GUEST", + "privacy": { + "hide_email": True, + "hide_fullname": False, + "hide_username": False, + }, + } + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + +class UserBillingDetails(BaseModel): + first_name: str | None + last_name: str | None + institution: str | None + address: str | None + city: str | None + state: str | None = Field(description="State, province, canton, ...") + country: str # Required for taxes + postal_code: str | None + phone: str | None + + model_config = ConfigDict(from_attributes=True) + + +# +# THIRD-PARTY TOKENS +# + + +class UserThirdPartyToken(BaseModel): + """ + Tokens used to access third-party services connected to osparc (e.g. pennsieve, scicrunch, etc) + """ + + service: str + token_key: str + token_secret: str | None = None + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "service": "github-api-v1", + "token_key": "5f21abf5-c596-47b7-bfd1-c0e436ef1107", + } + } + ) + + +# +# PERMISSIONS +# + + +class UserPermission(BaseModel): + name: str + allowed: bool diff --git a/packages/models-library/src/models_library/utils/_original_fastapi_encoders.py b/packages/models-library/src/models_library/utils/_original_fastapi_encoders.py index b51096b6bbe..5eac7c1b2f1 100644 --- a/packages/models-library/src/models_library/utils/_original_fastapi_encoders.py +++ b/packages/models-library/src/models_library/utils/_original_fastapi_encoders.py @@ -1,30 +1,35 @@ # pylint: disable-all -# nopycln: file + # # wget https://raw.githubusercontent.com/tiangolo/fastapi/master/fastapi/encoders.py --output-document=_original_fastapi_encoders # import dataclasses -from collections import defaultdict +from collections import defaultdict, deque from enum import Enum from pathlib import PurePath from types import GeneratorType -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Union, get_origin +from common_library.json_serialization import ENCODERS_BY_TYPE from pydantic import BaseModel -from pydantic.json import ENCODERS_BY_TYPE +from pydantic_core import PydanticUndefined, PydanticUndefinedType +from typing_extensions import Annotated, Doc + +Undefined = PydanticUndefined +UndefinedType = PydanticUndefinedType -SetIntStr = Set[Union[int, str]] -DictIntStrAny = Dict[Union[int, str], Any] +IncEx = Union[set[int], set[str], dict[int, Any], dict[str, Any]] def generate_encoders_by_class_tuples( - type_encoder_map: Dict[Any, Callable[[Any], Any]] -) -> Dict[Callable[[Any], Any], Tuple[Any, ...]]: - encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict( + type_encoder_map: dict[Any, Callable[[Any], Any]], +) -> dict[Callable[[Any], Any], tuple[Any, ...]]: + encoders_by_class_tuples: dict[Callable[[Any], Any], tuple[Any, ...]] = defaultdict( tuple ) for type_, encoder in type_encoder_map.items(): - encoders_by_class_tuples[encoder] += (type_,) + if get_origin(type_) is not Annotated: + encoders_by_class_tuples[encoder] += (type_,) return encoders_by_class_tuples @@ -32,16 +37,107 @@ def generate_encoders_by_class_tuples( def jsonable_encoder( - obj: Any, - include: Optional[Union[SetIntStr, DictIntStrAny]] = None, - exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None, - by_alias: bool = True, - exclude_unset: bool = False, - exclude_defaults: bool = False, - exclude_none: bool = False, - custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None, - sqlalchemy_safe: bool = True, + obj: Annotated[ + Any, + Doc( + """ + The input object to convert to JSON. + """ + ), + ], + include: Annotated[ + IncEx | None, + Doc( + """ + Pydantic's `include` parameter, passed to Pydantic models to set the + fields to include. + """ + ), + ] = None, + exclude: Annotated[ + IncEx | None, + Doc( + """ + Pydantic's `exclude` parameter, passed to Pydantic models to set the + fields to exclude. + """ + ), + ] = None, + by_alias: Annotated[ + bool, + Doc( + """ + Pydantic's `by_alias` parameter, passed to Pydantic models to define if + the output should use the alias names (when provided) or the Python + attribute names. In an API, if you set an alias, it's probably because you + want to use it in the result, so you probably want to leave this set to + `True`. + """ + ), + ] = True, + exclude_unset: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_unset` parameter, passed to Pydantic models to define + if it should exclude from the output the fields that were not explicitly + set (and that only had their default values). + """ + ), + ] = False, + exclude_defaults: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_defaults` parameter, passed to Pydantic models to define + if it should exclude from the output the fields that had the same default + value, even when they were explicitly set. + """ + ), + ] = False, + exclude_none: Annotated[ + bool, + Doc( + """ + Pydantic's `exclude_none` parameter, passed to Pydantic models to define + if it should exclude from the output any fields that have a `None` value. + """ + ), + ] = False, + custom_encoder: Annotated[ + dict[Any, Callable[[Any], Any]] | None, + Doc( + """ + Pydantic's `custom_encoder` parameter, passed to Pydantic models to define + a custom encoder. + """ + ), + ] = None, + sqlalchemy_safe: Annotated[ + bool, + Doc( + """ + Exclude from the output any fields that start with the name `_sa`. + + This is mainly a hack for compatibility with SQLAlchemy objects, they + store internal SQLAlchemy-specific state in attributes named with `_sa`, + and those objects can't (and shouldn't be) serialized to JSON. + """ + ), + ] = True, ) -> Any: + """ + Convert any object to something that can be encoded in JSON. + + This is used internally by FastAPI to make sure anything you return can be + encoded as JSON before it is sent to the client. + + You can also use it yourself, for example to convert objects before saving them + in a database that supports only JSON. + + Read more about it in the + [FastAPI docs for JSON Compatible Encoder](https://fastapi.tiangolo.com/tutorial/encoder/). + """ custom_encoder = custom_encoder or {} if custom_encoder: if type(obj) in custom_encoder: @@ -55,10 +151,9 @@ def jsonable_encoder( if exclude is not None and not isinstance(exclude, (set, dict)): exclude = set(exclude) if isinstance(obj, BaseModel): - encoder = getattr(obj.__config__, "json_encoders", {}) - if custom_encoder: - encoder.update(custom_encoder) - obj_dict = obj.dict( + obj_dict = BaseModel.model_dump( + obj, + mode="json", include=include, exclude=exclude, by_alias=by_alias, @@ -72,11 +167,10 @@ def jsonable_encoder( obj_dict, exclude_none=exclude_none, exclude_defaults=exclude_defaults, - custom_encoder=encoder, sqlalchemy_safe=sqlalchemy_safe, ) if dataclasses.is_dataclass(obj): - obj_dict = dataclasses.asdict(obj) + obj_dict = dataclasses.asdict(obj) # type: ignore[arg-type] return jsonable_encoder( obj_dict, include=include, @@ -94,6 +188,8 @@ def jsonable_encoder( return str(obj) if isinstance(obj, (str, int, float, type(None))): return obj + if isinstance(obj, UndefinedType): + return None if isinstance(obj, dict): encoded_dict = {} allowed_keys = set(obj.keys()) @@ -129,7 +225,7 @@ def jsonable_encoder( ) encoded_dict[encoded_key] = encoded_value return encoded_dict - if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): + if isinstance(obj, (list, set, frozenset, GeneratorType, tuple, deque)): encoded_list = [] for item in obj: encoded_list.append( @@ -156,13 +252,13 @@ def jsonable_encoder( try: data = dict(obj) except Exception as e: - errors: List[Exception] = [] + errors: list[Exception] = [] errors.append(e) try: data = vars(obj) except Exception as e: errors.append(e) - raise ValueError(errors) + raise ValueError(errors) from e return jsonable_encoder( data, include=include, diff --git a/packages/models-library/src/models_library/utils/change_case.py b/packages/models-library/src/models_library/utils/change_case.py index fa35fa1d879..9cba0145d44 100644 --- a/packages/models-library/src/models_library/utils/change_case.py +++ b/packages/models-library/src/models_library/utils/change_case.py @@ -1,21 +1,21 @@ -""" String convesion +"""String convesion Example of usage in pydantic: [...] - class Config: - extra = Extra.forbid - alias_generator = snake_to_camel # <-------- - json_loads = orjson.loads - json_dumps = json_dumps + model_config = ConfigDict( + alias_generator=snake_to_camel, # <-- note + ) """ + # Partially taken from https://github.com/autoferrit/python-change-case/blob/master/change_case/change_case.py#L131 import re +from typing import Final -_underscorer1 = re.compile(r"(.)([A-Z][a-z]+)") -_underscorer2 = re.compile(r"([a-z0-9])([A-Z])") +_UNDERSCORER1: Final = re.compile(r"(.)([A-Z][a-z]+)") +_UNDERSCORER2: Final = re.compile(r"([a-z0-9])([A-Z])") def snake_to_camel(subject: str) -> str: @@ -39,5 +39,5 @@ def snake_to_upper_camel(subject: str) -> str: def camel_to_snake(subject: str) -> str: - subbed = _underscorer1.sub(r"\1_\2", subject) - return _underscorer2.sub(r"\1_\2", subbed).lower() + subbed = _UNDERSCORER1.sub(r"\1_\2", subject) + return _UNDERSCORER2.sub(r"\1_\2", subbed).lower() diff --git a/packages/models-library/src/models_library/utils/common_validators.py b/packages/models-library/src/models_library/utils/common_validators.py new file mode 100644 index 00000000000..c55db09c5f5 --- /dev/null +++ b/packages/models-library/src/models_library/utils/common_validators.py @@ -0,0 +1,147 @@ +"""Reusable validators + + Example: + + from pydantic import BaseModel, validator + from models_library.utils.common_validators import empty_str_to_none_pre_validator + + class MyModel(BaseModel): + thumbnail: str | None + + _empty_is_none = validator("thumbnail", mode="before")( + empty_str_to_none_pre_validator + ) + +SEE https://docs.pydantic.dev/usage/validators/#reuse-validators +""" + +import enum +import functools +import operator +from typing import Any + +from common_library.json_serialization import json_loads +from orjson import JSONDecodeError +from pydantic import BaseModel, BeforeValidator +from pydantic.alias_generators import to_camel + + +def trim_string_before(max_length: int) -> BeforeValidator: + def _trim(value: str): + if isinstance(value, str): + return value[:max_length] + return value + + return BeforeValidator(_trim) + + +def empty_str_to_none_pre_validator(value: Any): + if isinstance(value, str) and value.strip() == "": + return None + return value + + +def none_to_empty_str_pre_validator(value: Any): + if value is None: + return "" + return value + + +def none_to_empty_list_pre_validator(value: Any): + if value is None: + return [] + return value + + +def parse_json_pre_validator(value: Any): + if isinstance(value, str): + try: + return json_loads(value) + except JSONDecodeError as err: + msg = f"Invalid JSON {value=}: {err}" + raise ValueError(msg) from err + return value + + +def create_enums_pre_validator(enum_cls: type[enum.Enum]): + """Enables parsing enums from equivalent enums + + SEE test__pydantic_models_and_enumps.py for more details + """ + + def _validator(value: Any): + if value and not isinstance(value, enum_cls) and isinstance(value, enum.Enum): + return value.value + return value + + return _validator + + +def ensure_unique_list_values_validator(list_data: list) -> list: + if len(list_data) != len(set(list_data)): + msg = f"List values must be unique, provided: {list_data}" + raise ValueError(msg) + return list_data + + +def ensure_unique_dict_values_validator(dict_data: dict) -> dict: + if len(dict_data) != len(set(dict_data.values())): + msg = f"Dictionary values must be unique, provided: {dict_data}" + raise ValueError(msg) + return dict_data + + +def null_or_none_str_to_none_validator(value: Any): + if isinstance(value, str) and value.lower() in ("null", "none"): + return None + return value + + +def create__check_only_one_is_set__root_validator( + mutually_exclusive_field_names: list[str], +): + """Ensure exactly one and only one of the alternatives is set + + NOTE: a field is considered here `unset` when it is `not None`. When None + is used to indicate something else, please do not use this validator. + + This is useful when you want to give the client alternative + ways to set the same thing e.g. set the user by email or id or username + and each of those has a different field + + NOTE: Alternatevely, the previous example can also be solved using a + single field as `user: Email | UserID | UserName` + + SEE test_uid_or_email_are_set.py for more details + """ + + def _validator(cls: type[BaseModel], values): + assert set(mutually_exclusive_field_names).issubset( # nosec + cls.model_fields + ), f"Invalid {mutually_exclusive_field_names=} passed in the factory arguments" + got = { + field_name: getattr(values, field_name) + for field_name in mutually_exclusive_field_names + } + + if not functools.reduce(operator.xor, (v is not None for v in got.values())): + msg = f"Either { ' or '.join(got.keys()) } must be set, but not both. Got {got}" + raise ValueError(msg) + return values + + return _validator + + +def to_camel_recursive(data: dict[str, Any]) -> dict[str, Any]: + """Recursively convert dictionary keys to camelCase""" + if not isinstance(data, dict): + return data # Return as-is if it's not a dictionary + + new_dict = {} + for key, value in data.items(): + new_key = to_camel(key) # Convert key to camelCase + if isinstance(value, dict): + new_dict[new_key] = to_camel_recursive(value) # Recursive call for dicts + else: + new_dict[new_key] = value + return new_dict diff --git a/packages/models-library/src/models_library/utils/converters.py b/packages/models-library/src/models_library/utils/converters.py deleted file mode 100644 index 14a2cfec6fa..00000000000 --- a/packages/models-library/src/models_library/utils/converters.py +++ /dev/null @@ -1,2 +0,0 @@ -def to_snake_case(string: str) -> str: - return "".join(["_" + i.lower() if i.isupper() else i for i in string]).lstrip("_") diff --git a/packages/models-library/src/models_library/utils/database_models_factory.py b/packages/models-library/src/models_library/utils/database_models_factory.py deleted file mode 100644 index b98395e7139..00000000000 --- a/packages/models-library/src/models_library/utils/database_models_factory.py +++ /dev/null @@ -1,171 +0,0 @@ -""" Automatic creation of pydantic model classes from a sqlalchemy table - -SEE: Copied and adapted from https://github.com/tiangolo/pydantic-sqlalchemy/blob/master/pydantic_sqlalchemy/main.py -""" - -import json -import warnings -from datetime import datetime -from typing import Any, Callable, Container, Optional -from uuid import UUID - -import sqlalchemy as sa -import sqlalchemy.sql.functions -from pydantic import BaseConfig, BaseModel, Field, create_model -from pydantic.types import NonNegativeInt -from sqlalchemy import null -from sqlalchemy.sql.schema import Column - -warnings.warn( - "This is still a concept under development. " - "Currently only inteded for testing. " - "DO NOT USE in production.", - category=UserWarning, -) - - -class OrmConfig(BaseConfig): - orm_mode = True - - -_RESERVED = { - "schema", - # e.g. Field name "schema" shadows a BaseModel attribute; use a different field name with "alias='schema'". -} - - -def _eval_defaults( - column: Column, pydantic_type: type, *, include_server_defaults: bool = True -): - """ - Uses some heuristics to determine the default value/factory produced - parsing both the client and the server (if include_server_defaults==True) defaults - in the sa model. - """ - default: Optional[Any] = None - default_factory: Optional[Callable] = None - - if ( - column.default is None - and (include_server_defaults and column.server_default is None) - and not column.nullable - ): - default = ... - - if column.default and column.default.is_scalar: - assert not column.default.is_server_default # nosec - default = column.default.arg - - if include_server_defaults and column.server_default: - assert column.server_default.is_server_default # nosec - # - # FIXME: Map server's DefaultClauses to correct values - # Heuristics based on test against all our tables - # - if pydantic_type: - if issubclass(pydantic_type, list): - assert column.server_default.arg == "{}" # nosec - default_factory = list - elif issubclass(pydantic_type, dict): - assert column.server_default.arg.text.endswith("::jsonb") # nosec - default = json.loads( - column.server_default.arg.text.replace("::jsonb", "").replace( - "'", "" - ) - ) - elif issubclass(pydantic_type, datetime): - assert isinstance( # nosec - column.server_default.arg, - (type(null()), sqlalchemy.sql.functions.now), - ) - default_factory = datetime.now - return default, default_factory - - -PolicyCallable = Callable[[Column, Any, type], tuple[Any, type]] - - -def eval_name_policy(column: Column, default: Any, pydantic_type: type): - """All string columns including 'uuid' in their name are set as UUIDs""" - new_default, new_pydantic_type = default, pydantic_type - if "uuid" in str(column.name).split("_") and pydantic_type == str: - new_pydantic_type = UUID - if isinstance(default, str): - new_default = UUID(default) - return new_default, new_pydantic_type - - -DEFAULT_EXTRA_POLICIES = [ - eval_name_policy, -] - - -def create_pydantic_model_from_sa_table( - table: sa.Table, - *, - config: type = OrmConfig, - exclude: Optional[Container[str]] = None, - include_server_defaults: bool = False, - extra_policies: Optional[list[PolicyCallable]] = None, -) -> type[BaseModel]: - - fields = {} - exclude = exclude or [] - extra_policies = extra_policies or DEFAULT_EXTRA_POLICIES - - for column in table.columns: - name = str(column.name) - - if name in exclude: - continue - - field_args: dict[str, Any] = {} - - if name in _RESERVED: - field_args["alias"] = name - name = f"{table.name.lower()}_{name}" - - # type --- - pydantic_type: Optional[type] = None - if hasattr(column.type, "impl"): - if hasattr(column.type.impl, "python_type"): - pydantic_type = column.type.impl.python_type - elif hasattr(column.type, "python_type"): - pydantic_type = column.type.python_type - - assert pydantic_type, f"Could not infer pydantic_type for {column}" # nosec - - # big integer primary keys - if column.primary_key and issubclass(pydantic_type, int): - pydantic_type = NonNegativeInt - - # default ---- - default, default_factory = _eval_defaults( - column, pydantic_type, include_server_defaults=include_server_defaults - ) - - # Policies based on naming conventions - # - # TODO: implement it as a pluggable policy class. - # Base policy class is abstract interface - # and user can add as many in a given order in the arguments - # - for apply_policy in extra_policies: - default, pydantic_type = apply_policy(column, default, pydantic_type) - - if default_factory: - field_args["default_factory"] = default_factory - else: - field_args["default"] = default - - if hasattr(column, "doc") and column.doc: - field_args["description"] = column.doc - - fields[name] = (pydantic_type, Field(**field_args)) - - # create domain models from db-schemas - pydantic_model = create_model( - table.name.capitalize(), __config__=config, **fields # type: ignore - ) - assert issubclass(pydantic_model, BaseModel) # nosec - return pydantic_model diff --git a/packages/models-library/src/models_library/utils/docker_compose.py b/packages/models-library/src/models_library/utils/docker_compose.py index e74c0945740..cec056f02b3 100644 --- a/packages/models-library/src/models_library/utils/docker_compose.py +++ b/packages/models-library/src/models_library/utils/docker_compose.py @@ -1,6 +1,7 @@ import yaml -from .string_substitution import SubstitutionsDict, TemplateText +from ..service_settings_labels import ComposeSpecLabelDict +from .string_substitution import SubstitutionsDict, TextTemplate # Notes on below env var names: # - SIMCORE_REGISTRY will be replaced by the url of the simcore docker registry @@ -17,7 +18,7 @@ def replace_env_vars_in_compose_spec( - service_spec: "ComposeSpecLabel", + service_spec: ComposeSpecLabelDict, *, replace_simcore_registry: str, replace_service_version: str, @@ -29,7 +30,7 @@ def replace_env_vars_in_compose_spec( content: str = yaml.safe_dump(service_spec) - template = TemplateText(content) + template = TextTemplate(content) substitutions = SubstitutionsDict( { "SERVICE_VERSION": replace_service_version, diff --git a/packages/models-library/src/models_library/utils/enums.py b/packages/models-library/src/models_library/utils/enums.py index b8e624ea3be..7f0ff7eaf48 100644 --- a/packages/models-library/src/models_library/utils/enums.py +++ b/packages/models-library/src/models_library/utils/enums.py @@ -1,8 +1,27 @@ -from enum import Enum, unique +import inspect +from enum import Enum, StrEnum, unique +from typing import Any @unique -class StrAutoEnum(str, Enum): +class StrAutoEnum(StrEnum): @staticmethod def _generate_next_value_(name, start, count, last_values): return name.upper() + + +def enum_to_dict(enum_cls: type[Enum]) -> dict[str, Any]: + return {m.name: m.value for m in enum_cls} + + +def are_equivalent_enums(enum_cls1: type[Enum], enum_cls2: type[Enum]) -> bool: + assert inspect.isclass(enum_cls1) # nosec + assert issubclass(enum_cls1, Enum) # nosec + assert inspect.isclass(enum_cls2) # nosec + assert issubclass(enum_cls2, Enum) # nosec + + try: + return enum_to_dict(enum_cls1) == enum_to_dict(enum_cls2) + + except (AttributeError, TypeError): + return False diff --git a/packages/models-library/src/models_library/utils/fastapi_encoders.py b/packages/models-library/src/models_library/utils/fastapi_encoders.py index d42ce159fa9..bf7775de0ba 100644 --- a/packages/models-library/src/models_library/utils/fastapi_encoders.py +++ b/packages/models-library/src/models_library/utils/fastapi_encoders.py @@ -7,10 +7,9 @@ # try: - from fastapi.encoders import jsonable_encoder + from fastapi.encoders import jsonable_encoder # type: ignore[import-not-found] except ImportError: # for aiohttp-only services - # Taken 'as is' from https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py # to be used in aiohttp-based services w/o having to install fastapi # @@ -20,3 +19,5 @@ from ._original_fastapi_encoders import jsonable_encoder servicelib_jsonable_encoder = jsonable_encoder # alias + +__all__ = ("jsonable_encoder",) diff --git a/packages/models-library/src/models_library/utils/json_schema.py b/packages/models-library/src/models_library/utils/json_schema.py index f6c381c1f57..1c5afc4ca55 100644 --- a/packages/models-library/src/models_library/utils/json_schema.py +++ b/packages/models-library/src/models_library/utils/json_schema.py @@ -11,7 +11,7 @@ from collections.abc import Sequence from contextlib import suppress from copy import deepcopy -from typing import Any, Dict, Tuple +from typing import Any import jsonschema from jsonschema import validators @@ -35,13 +35,12 @@ def set_defaults(validator, properties, instance, schema): if "default" in subschema: instance.setdefault(prop, subschema["default"]) - for error in validate_properties( + yield from validate_properties( validator, properties, instance, schema, - ): - yield error + ) return validators.extend( validator_class, @@ -54,7 +53,7 @@ def set_defaults(validator, properties, instance, schema): def jsonschema_validate_data( - instance: Any, schema: Dict[str, Any], *, return_with_default: bool = False + instance: Any, schema: dict[str, Any], *, return_with_default: bool = False ): """Checks whether data satisfies schema contract @@ -72,20 +71,20 @@ def jsonschema_validate_data( return out -def jsonschema_validate_schema(schema: Dict[str, Any]): +def jsonschema_validate_schema(schema: dict[str, Any]): """Checks whether schema is a valid json-schema :raises InvalidJsonSchema """ with suppress(jsonschema.ValidationError): - dummy_data = {} + dummy_data: dict = {} validators.validate(instance=dummy_data, schema=schema) return schema def any_ref_key(obj): if isinstance(obj, dict): - return "$ref" in obj.keys() or any_ref_key(tuple(obj.values())) + return "$ref" in obj or any_ref_key(tuple(obj.values())) if isinstance(obj, Sequence) and not isinstance(obj, str): return any(any_ref_key(v) for v in obj) @@ -93,7 +92,7 @@ def any_ref_key(obj): return False -__all__: Tuple[str, ...] = ( +__all__: tuple[str, ...] = ( "any_ref_key", "InvalidJsonSchema", "jsonschema_validate_data", diff --git a/packages/models-library/src/models_library/utils/labels_annotations.py b/packages/models-library/src/models_library/utils/labels_annotations.py new file mode 100644 index 00000000000..e08f66e71cb --- /dev/null +++ b/packages/models-library/src/models_library/utils/labels_annotations.py @@ -0,0 +1,69 @@ +"""Image labels annotations + +osparc expects the service configuration (in short: config) attached to the service's image as label annotations. +This module defines how this config is serialized/deserialized to/from docker labels annotations +""" + +from json.decoder import JSONDecodeError +from typing import Any, TypeAlias + +from common_library.json_serialization import json_dumps, json_loads + +LabelsAnnotationsDict: TypeAlias = dict[str, str | float | bool | None] + +# SEE https://docs.docker.com/config/labels-custom-metadata/#label-keys-and-values +# "Authors of third-party tools should prefix each label key with the reverse DNS notation of a +# domain they own, such as com.example.some-label "" +# FIXME: review and define a z43-wide inverse DNS e.g. swiss.z43 +OSPARC_LABEL_PREFIXES = ( + "io.simcore", + "simcore.service", +) + + +def to_labels( + config: dict[str, Any], *, prefix_key: str, trim_key_head: bool = True +) -> LabelsAnnotationsDict: + """converts config into labels annotations""" + + # FIXME: null is loaded as 'null' string value? is that correct? json -> None upon deserialization? + labels: LabelsAnnotationsDict = {} + for key, value in config.items(): + if trim_key_head: + if isinstance(value, str): + # Avoids double quotes, i.e. '"${VERSION}"' + label = value + else: + label = json_dumps(value, sort_keys=False) + else: + label = json_dumps({key: value}, sort_keys=False) + + # NOTE: docker-compose env var interpolation gets confused with schema's '$ref' and + # will replace it '$ref' with an empty string. + if isinstance(label, str) and "$ref" in label: + label = label.replace("$ref", "$$ref") + + labels[f"{prefix_key}.{key}"] = label + + return labels + + +def from_labels( + labels: LabelsAnnotationsDict, *, prefix_key: str, trim_key_head: bool = True +) -> dict[str, Any]: + """convert labels annotations into config""" + config: dict[str, Any] = {} + for key, label in labels.items(): + if key.startswith(f"{prefix_key}."): + try: + value = json_loads(label) + except JSONDecodeError: + value = label + + if not trim_key_head: + if isinstance(value, dict): + config.update(value) + else: + config[key.replace(f"{prefix_key}.", "")] = value + + return config diff --git a/packages/models-library/src/models_library/utils/misc.py b/packages/models-library/src/models_library/utils/misc.py deleted file mode 100644 index c06dede6056..00000000000 --- a/packages/models-library/src/models_library/utils/misc.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Any, Dict, List, Type, Union - -from pydantic import BaseModel -from pydantic.config import SchemaExtraCallable - - -def extract_examples(model_cls: Type[BaseModel]) -> List[Dict[str, Any]]: - """Extracts examples from pydantic classes""" - - examples = [] - - schema_extra: Union[ - Dict[str, Any], SchemaExtraCallable - ] = model_cls.__config__.schema_extra - - if isinstance(schema_extra, dict): - # NOTE: Sometimes an example (singular) mistaken - # by exampleS. The assertions below should - # help catching this error while running tests - - examples = schema_extra.get("examples", []) - assert isinstance(examples, list) # nosec - - if example := schema_extra.get("example"): - assert not isinstance(example, list) # nosec - examples.append(example) - - # TODO: treat SchemaExtraCallable case (so far we only have one example) - # TODO: extract examples from single fields and compose model? - - return examples diff --git a/packages/models-library/src/models_library/utils/nodes.py b/packages/models-library/src/models_library/utils/nodes.py index 1953531a0e3..fd542768f83 100644 --- a/packages/models-library/src/models_library/utils/nodes.py +++ b/packages/models-library/src/models_library/utils/nodes.py @@ -1,26 +1,26 @@ import hashlib import json import logging +from collections.abc import Callable, Coroutine from copy import deepcopy -from typing import Any, Callable, Coroutine, Dict +from typing import Any -from pydantic import BaseModel +from pydantic import BaseModel, TypeAdapter from ..projects import Project -from ..projects_nodes import NodeID -from ..projects_nodes_io import PortLink +from ..projects_nodes_io import NodeID, PortLink, UUIDStr -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) def project_node_io_payload_cb( project: Project, -) -> Callable[[NodeID], Coroutine[Any, Any, Dict[str, Any]]]: +) -> Callable[[NodeID], Coroutine[Any, Any, dict[str, Any]]]: """callback fct to use together with compute_node_hash when a Project as input""" - async def node_io_payload_cb(node_id: NodeID) -> Dict[str, Any]: - node_io_payload = {"inputs": None, "outputs": None} - node = project.workbench.get(str(node_id)) + async def node_io_payload_cb(node_id: NodeID) -> dict[str, Any]: + node_io_payload: dict[str, Any] = {"inputs": None, "outputs": None} + node = project.workbench.get(TypeAdapter(UUIDStr).validate_python(node_id)) if node: node_io_payload = {"inputs": node.inputs, "outputs": node.outputs} @@ -31,13 +31,13 @@ async def node_io_payload_cb(node_id: NodeID) -> Dict[str, Any]: async def compute_node_hash( node_id: NodeID, - get_node_io_payload_cb: Callable[[NodeID], Coroutine[Any, Any, Dict[str, Any]]], + get_node_io_payload_cb: Callable[[NodeID], Coroutine[Any, Any, dict[str, Any]]], ) -> str: # resolve the port links if any and get only the payload node_payload = deepcopy(await get_node_io_payload_cb(node_id)) assert all(k in node_payload for k in ["inputs", "outputs"]) # nosec - resolved_payload = {} + resolved_payload: dict[str, Any] = {} for port_type in ["inputs", "outputs"]: port_type_payloads = node_payload.get(port_type) @@ -58,13 +58,15 @@ async def compute_node_hash( # ensure we do not get pydantic types for hashing here, only jsoneable stuff if isinstance(payload, BaseModel): - payload = payload.dict(by_alias=True, exclude_unset=True) + payload = payload.model_dump(by_alias=True, exclude_unset=True) # remove the payload if it is null and it was resolved if payload is not None: resolved_payload[port_type][port_key] = payload - # now create the hash + # WARNING: Here we cannot change to json_serialization.json_dumps because if would create a different dump string and therefore a different hash + # typically test_node_ports_v2_serialization_v2.py::test_dump will fail if you do this change. + # NOTE that these hashes might have been already stored elsewhere block_string = json.dumps(resolved_payload, sort_keys=True).encode("utf-8") raw_hash = hashlib.sha256(block_string) return raw_hash.hexdigest() diff --git a/packages/models-library/src/models_library/utils/pydantic_models_factory.py b/packages/models-library/src/models_library/utils/pydantic_models_factory.py deleted file mode 100644 index 4361364902a..00000000000 --- a/packages/models-library/src/models_library/utils/pydantic_models_factory.py +++ /dev/null @@ -1,212 +0,0 @@ -""" Collection of functions to create BaseModel subclasses - - -Maintaining large models representing a resource can be challenging, specially when every interface (e.g. i/o rest API, i/o db, ...) -needs a slightly different variation of the original domain model. For instance, assume we want to implement an API with CRUD -routes on a resource R. This needs similar models for the request bodies and response payloads to represent R. A careful analysis -reveals that these models are all basically variants that include/exclude fields and/or changes constraints on them (e.g. read-only, -nullable, optional/required, etc). - -These variants are typically achived by splitting common fields into smaller models and using inheritance to compose them back -and/or override constraints. Nonetheless, this approach can be very tedious to maintain: it is very verbose and difficult to -see the final model layout. In addition, new variants that exclude fields will force to redesign how all models were split in -the first place. - -In order to overcome these drawbacks, we propose here a functional approach based on a model's factory that can "copy the -necessary parts" from a "reference" model and build a new pydantic model that can be either used as new base or as is. - -The design should remain as close to pydantic's conventions as possible to reduce maintenance costs -since we are aware that future releases of pydantic will address part of the features we implement here -(e.g. exclude fields) - -Usage of these tools are demonstrated in packages/models-library/tests/test_utils_pydantic_models_factory.py -""" -import json -import warnings -from typing import Dict, Iterable, Optional, Set, Tuple, Type - -from pydantic import BaseModel, create_model, validator -from pydantic.fields import ModelField, Undefined -from pydantic.main import BaseConfig - -warnings.warn( - "This is still a concept under development. " - "SEE https://github.com/ITISFoundation/osparc-simcore/issues/2725" - "Currently only inteded for testing. " - "DO NOT USE in production.", - category=UserWarning, -) - - -def collect_fields_attrs(model_cls: Type[BaseModel]) -> Dict[str, Dict[str, str]]: - """ - - >>> class MyModel(BaseModel): - ... x : int - ... - >>> print(json.dumps(collect_fields_attrs(MyModel), indent=1)) - { - "x": { - "type_": " - int", - "outer_type_": " - int", - "sub_fields": "None", - "key_field": "None", - "validators": "[\" - int_validator\"]", - "pre_validators": "None", - "post_validators": "None", - "default": "None", - "default_factory": "None", - "required": "True", - "model_config": " - Config", - "name": "x", - "alias": "x", - "has_alias": "False", - "field_info": "default=PydanticUndefined extra={}", - "validate_always": "False", - "allow_none": "False", - "shape": "1", - "class_validators": "{}", - "parse_json": "False" - } - } - """ - - def _stringify(obj): - if callable(obj): - obj_str = f"{getattr(obj, '__class__', None)} - {obj.__name__}" - elif isinstance(obj, dict): - obj_str = json.dumps( - {f"{key}": _stringify(value) for key, value in obj.items()} - ) - elif isinstance(obj, list): - obj_str = json.dumps([_stringify(item) for item in obj]) - - else: - obj_str = f"{obj}" - if "object" in obj_str: - obj_str = obj_str.split("object")[0] - - assert obj_str # nosec - return obj_str - - return { - field.name: { - attr_name: _stringify(getattr(field, attr_name)) - for attr_name in ModelField.__slots__ - } - for field in model_cls.__fields__.values() - } - - -def _eval_selection( - model_fields: Iterable[ModelField], - include: Optional[Set[str]], - exclude: Optional[Set[str]], - exclude_optionals: bool, -) -> Set[str]: - # TODO: use dict for deep include/exclude! SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/ - - if include is None: - include = set(f.name for f in model_fields) - if exclude is None: - exclude = set() - if exclude_optionals: - exclude = exclude.union( - set(f.name for f in model_fields if f.required == False) - ) - - selection = include - exclude - return selection - - -def _extract_field_definitions( - model_cls: Type[BaseModel], - *, - include: Optional[Set[str]], - exclude: Optional[Set[str]], - exclude_optionals: bool, - set_all_optional: bool, -) -> Dict[str, Tuple]: - """ - Returns field_definitions: fields of the model in the format - `=(, )` or `=`, - e.g. - `foobar=(str, ...)` or `foobar=123`, - - or, for complex use-cases, in the format - `=`, - e.g. - `foo=Field(default_factory=datetime.utcnow, alias='bar')` - - """ - field_names = _eval_selection( - model_cls.__fields__.values(), include, exclude, exclude_optionals - ) - field_definitions = {} - - field: ModelField - - for field in model_cls.__fields__.values(): - if field.name in field_names: - field_definitions[field.name] = ( - # - field.type_ if field.type_ == field.outer_type_ else field.outer_type_, - # - field.default - or field.default_factory - or (None if set_all_optional or not field.required else Undefined), - ) - return field_definitions - - -def copy_model( - reference_cls: Type[BaseModel], - *, - name: str = None, - include: Optional[Set[str]] = None, - exclude: Optional[Set[str]] = None, - exclude_optionals: bool = False, - as_update_model: bool = False, - skip_validators: bool = False, - __config__: Type[BaseConfig] = None, -) -> Type[BaseModel]: - """ - Creates a clone of `reference_cls` with a different name and a subset of fields - - - skip_validators: when data source is already validated, there is not need to use these - validators - """ - name = name or f"_Base{reference_cls.__name__.upper()}" - - # FIELDS - fields_definitions = _extract_field_definitions( - reference_cls, - exclude=exclude, - include=include, - exclude_optionals=exclude_optionals, - set_all_optional=as_update_model, - ) - - # VALIDATORS - - validators_funs: Dict[str, classmethod] = {} - # A dict of method names and @validator class methods - # SEE example in https://pydantic-docs.helpmanual.io/usage/models/#dynamic-model-creation - if not skip_validators and reference_cls != BaseModel: - for n, vals in reference_cls.__validators__.items(): - for i, v in enumerate(vals): - validators_funs[f"{n}_validator_{i}"] = validator(n, allow_reuse=True)( - v.func - ) - - new_model_cls = create_model( - name, - __config__=__config__, - __base__=BaseModel, - __module__=reference_cls.__module__, - __validators__=validators_funs, - **fields_definitions, - ) - - return new_model_cls diff --git a/packages/models-library/src/models_library/utils/pydantic_tools_extension.py b/packages/models-library/src/models_library/utils/pydantic_tools_extension.py new file mode 100644 index 00000000000..81112418c56 --- /dev/null +++ b/packages/models-library/src/models_library/utils/pydantic_tools_extension.py @@ -0,0 +1,12 @@ +from typing import TypeVar + +from pydantic import TypeAdapter, ValidationError + +T = TypeVar("T") + + +def parse_obj_or_none(type_: type[T], obj) -> T | None: + try: + return TypeAdapter(type_).validate_python(obj) + except ValidationError: + return None diff --git a/packages/models-library/src/models_library/utils/services_io.py b/packages/models-library/src/models_library/utils/services_io.py index 67225495e49..f7dbce8dcea 100644 --- a/packages/models-library/src/models_library/utils/services_io.py +++ b/packages/models-library/src/models_library/utils/services_io.py @@ -1,34 +1,39 @@ import mimetypes from copy import deepcopy -from typing import Any, Literal, Optional, Union +from typing import Any, Literal -from pydantic import schema_of +from pydantic import TypeAdapter from ..services import ServiceInput, ServiceOutput -from ..services_constants import PROPERTY_TYPE_TO_PYTHON_TYPE_MAP +from ..services_regex import PROPERTY_TYPE_TO_PYTHON_TYPE_MAP PortKindStr = Literal["input", "output"] JsonSchemaDict = dict[str, Any] + _PROPERTY_TYPE_TO_SCHEMAS = { - property_type: schema_of(python_type, title=property_type.capitalize()) + property_type: { + **TypeAdapter(python_type).json_schema(), + "title": property_type.capitalize(), + } for property_type, python_type in PROPERTY_TYPE_TO_PYTHON_TYPE_MAP.items() } -def guess_media_type(io: Union[ServiceInput, ServiceOutput]) -> str: +def guess_media_type(io: ServiceInput | ServiceOutput) -> str: # SEE https://docs.python.org/3/library/mimetypes.html # SEE https://www.iana.org/assignments/media-types/media-types.xhtml media_type = io.property_type.removeprefix("data:") if media_type == "*/*" and io.file_to_key_map: - filename = list(io.file_to_key_map.keys())[0] - media_type, _ = mimetypes.guess_type(filename) - if media_type is None: - media_type = "*/*" + filename = next(iter(io.file_to_key_map.keys())) + guessed_media_type, _ = mimetypes.guess_type(filename) + if guessed_media_type is None: + return "*/*" + return guessed_media_type return media_type -def update_schema_doc(schema: dict[str, Any], port: Union[ServiceInput, ServiceOutput]): +def update_schema_doc(schema: dict[str, Any], port: ServiceInput | ServiceOutput): schema["title"] = port.label if port.label != port.description: schema["description"] = port.description @@ -36,8 +41,8 @@ def update_schema_doc(schema: dict[str, Any], port: Union[ServiceInput, ServiceO def get_service_io_json_schema( - port: Union[ServiceInput, ServiceOutput] -) -> Optional[JsonSchemaDict]: + port: ServiceInput | ServiceOutput, +) -> JsonSchemaDict | None: """Get json-schema for a i/o service For legacy metadata with property_type = integer, etc ... , it applies a conversion diff --git a/packages/models-library/src/models_library/utils/specs_substitution.py b/packages/models-library/src/models_library/utils/specs_substitution.py new file mode 100644 index 00000000000..d1278d69912 --- /dev/null +++ b/packages/models-library/src/models_library/utils/specs_substitution.py @@ -0,0 +1,132 @@ +from typing import Any, NamedTuple, TypeAlias, cast + +from common_library.errors_classes import OsparcErrorMixin +from common_library.json_serialization import json_dumps, json_loads +from pydantic import StrictBool, StrictFloat, StrictInt + +from .string_substitution import ( + SubstitutionsDict, + TextTemplate, + substitute_all_legacy_identifiers, +) + +# This constraint on substitution values is to avoid +# deserialization issues on the TextTemplate substitution! +SubstitutionValue: TypeAlias = StrictBool | StrictInt | StrictFloat | str + + +class IdentifierSubstitutionError(OsparcErrorMixin, KeyError): + msg_template: str = ( + "Was not able to substitute identifier " + "'{name}'. It was not found in: {substitutions}" + ) + + +class _EnvVarData(NamedTuple): + substitution_identifier: str + identifier_name: str + default_value: Any | None + + +class SpecsSubstitutionsResolver: + """ + Resolve specs dict by substituting identifiers + + 'specs' is defined here as dict[str, Any]. E.g. a docker-compose.yml loaded as a dict are 'specs'. + + """ + + def __init__(self, specs: dict[str, Any], *, upgrade: bool): + self._template = self._create_text_template(specs, upgrade=upgrade) + self._substitutions: SubstitutionsDict = SubstitutionsDict() + + @classmethod + def _create_text_template( + cls, specs: dict[str, Any], *, upgrade: bool + ) -> TextTemplate: + # convert to yaml (less symbols as in json) + service_spec_str: str = json_dumps(specs) + + if upgrade: # legacy + service_spec_str = substitute_all_legacy_identifiers(service_spec_str) + + # template + template = TextTemplate(service_spec_str) + assert template.is_valid() # nosec + + return template + + def get_identifiers(self) -> list[str]: + """lists identifiers in specs in order of apperance. Can have repetitions""" + output: list[str] = self._template.get_identifiers() + return output + + def get_replaced(self) -> set[str]: + return self._substitutions.used + + @property + def substitutions(self): + return self._substitutions + + def set_substitutions( + self, mappings: dict[str, SubstitutionValue] + ) -> SubstitutionsDict: + """ + NOTE: ONLY targets identifiers declared in the specs + NOTE:`${identifier:-a_default_value}` will replace the identifier with `a_default_value` + if not provided + """ + + required_identifiers = self.get_identifiers() + + required_identifiers_with_defaults: list[_EnvVarData] = [] + for identifier in required_identifiers: + parts = identifier.split(":-", maxsplit=1) + required_identifiers_with_defaults.append( + _EnvVarData( + substitution_identifier=identifier, + identifier_name=parts[0], + default_value=( + parts[1] if len(parts) == 2 else None # noqa: PLR2004 + ), + ) + ) + + resolved_identifiers: dict[str, str] = {} + for env_var_data in required_identifiers_with_defaults: + if env_var_data.identifier_name in mappings: + resolved_identifiers[env_var_data.substitution_identifier] = cast( + str, mappings[env_var_data.identifier_name] + ) + # NOTE: default is used only if not found in the provided substitutions + elif env_var_data.default_value is not None: + resolved_identifiers[ + env_var_data.substitution_identifier + ] = env_var_data.default_value + + # picks only needed for substitution + self._substitutions = SubstitutionsDict(resolved_identifiers) + return self._substitutions + + def run(self, *, safe: bool = True) -> dict[str, Any]: + """ + Keyword Arguments: + safe -- if False will raise an error if not all identifiers + are substituted (default: {True}) + + Raises: + IdentifierSubstitutionError: when identifier is not found and safe is False + """ + try: + new_specs_txt: str = ( + self._template.safe_substitute(self._substitutions) + if safe + else self._template.substitute(self._substitutions) + ) + new_specs = json_loads(new_specs_txt) + assert isinstance(new_specs, dict) # nosec + return new_specs + except KeyError as e: + raise IdentifierSubstitutionError( + name=e.args[0], substitutions=self._substitutions + ) from e diff --git a/packages/models-library/src/models_library/utils/string_substitution.py b/packages/models-library/src/models_library/utils/string_substitution.py index e30ecc09c6d..e9a3a6d71d2 100644 --- a/packages/models-library/src/models_library/utils/string_substitution.py +++ b/packages/models-library/src/models_library/utils/string_substitution.py @@ -5,9 +5,9 @@ import sys from collections import UserDict from string import Template -from typing import Any +from typing import Any, Final -OSPARC_IDENTIFIER_PREFIX = "OSPARC_ENVIRONMENT" +OSPARC_IDENTIFIER_PREFIX: Final[str] = "OSPARC_VARIABLE_" def upgrade_identifier(identifier: str) -> str: @@ -18,7 +18,8 @@ def upgrade_identifier(identifier: str) -> str: identifier = re.sub(r"[.-]", "_", identifier) identifier = identifier.upper() if not identifier.startswith(OSPARC_IDENTIFIER_PREFIX): - identifier = f"{OSPARC_IDENTIFIER_PREFIX}_{identifier}" + assert OSPARC_IDENTIFIER_PREFIX.endswith("_") # nosec + identifier = OSPARC_IDENTIFIER_PREFIX + identifier return identifier @@ -29,7 +30,7 @@ def upgrade_identifier(identifier: str) -> str: def substitute_all_legacy_identifiers(text: str) -> str: """Substitutes all legacy identifiers found in the text by the new format expected in TemplateText - For instance: '%%this-identifier%%' will be substituted by '$OSPARC_ENVIRONMENT_THIS_IDENTIFIER' + For instance: '%%this-identifier%%' will be substituted by '$OSPARC_VARIABLE_THIS_IDENTIFIER' """ def _upgrade(match): @@ -40,7 +41,7 @@ def _upgrade(match): return re.sub(_LEGACY_IDENTIFIER_RE_PATTERN, _upgrade, text) -class TemplateText(Template): +class TextTemplate(Template): """Template strings support `$`-based substitutions, using the following rules: - `$$` is an escape; it is replaced with a single `$`. @@ -51,9 +52,14 @@ class TemplateText(Template): - `${identifier}` is equivalent to `$identifier`. It is required when valid identifier characters follow the placeholder but are not part of the placeholder, such as `"${noun}ification"`. + EXTENSION: + - `${identifier:-a_default_value}` is now also supported. + SEE https://docs.python.org/3/library/string.html#template-strings """ + idpattern = r"(?a:[_a-z][_a-z0-9]*)(?::-(.*?))?" + if sys.version_info < (3, 11): # Backports methods added in py 3.11 # NOTE: Keep it compatible with multiple version @@ -69,9 +75,8 @@ def is_valid(self): ): # If all the groups are None, there must be # another group we're not expecting - raise ValueError( - "Unrecognized named group in pattern", self.pattern - ) + msg = "Unrecognized named group in pattern" + raise ValueError(msg, self.pattern) return True def get_identifiers(self): @@ -88,9 +93,8 @@ def get_identifiers(self): ): # If all the groups are None, there must be # another group we're not expecting - raise ValueError( - "Unrecognized named group in pattern", self.pattern - ) + msg = "Unrecognized named group in pattern" + raise ValueError(msg, self.pattern) return ids @@ -109,4 +113,4 @@ def __getitem__(self, key) -> Any: @property def unused(self): - return {key for key in self.keys() if key not in self.used} + return {key for key in self if key not in self.used} diff --git a/packages/models-library/src/models_library/wallets.py b/packages/models-library/src/models_library/wallets.py new file mode 100644 index 00000000000..29d12226972 --- /dev/null +++ b/packages/models-library/src/models_library/wallets.py @@ -0,0 +1,69 @@ +from datetime import datetime +from decimal import Decimal +from enum import auto +from typing import TypeAlias + +from pydantic import BaseModel, ConfigDict, Field, PositiveInt + +from .utils.enums import StrAutoEnum + +WalletID: TypeAlias = PositiveInt + + +class WalletStatus(StrAutoEnum): + ACTIVE = auto() + INACTIVE = auto() + + +class WalletInfo(BaseModel): + wallet_id: WalletID + wallet_name: str + wallet_credit_amount: Decimal + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "wallet_id": 1, + "wallet_name": "My Wallet", + "wallet_credit_amount": Decimal(10), # type: ignore[dict-item] + } + ] + } + ) + + +ZERO_CREDITS = Decimal(0) + +# +# DB +# + + +class WalletDB(BaseModel): + wallet_id: WalletID + name: str + description: str | None + owner: PositiveInt = Field( + ..., + description="GID of the group that owns this wallet", + ) + thumbnail: str | None + status: WalletStatus = Field( + ..., + description="Wallet status (ACTIVE or INACTIVE)", + ) + created: datetime = Field( + ..., + description="Timestamp on creation", + ) + modified: datetime = Field( + ..., + description="Timestamp of last modification", + ) + + +class UserWalletDB(WalletDB): + read: bool + write: bool + delete: bool diff --git a/packages/models-library/src/models_library/workspaces.py b/packages/models-library/src/models_library/workspaces.py new file mode 100644 index 00000000000..ca31304a869 --- /dev/null +++ b/packages/models-library/src/models_library/workspaces.py @@ -0,0 +1,82 @@ +from datetime import datetime +from enum import auto +from typing import TypeAlias + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PositiveInt, + ValidationInfo, + field_validator, +) + +from .access_rights import AccessRights +from .groups import GroupID +from .users import UserID +from .utils.enums import StrAutoEnum + +WorkspaceID: TypeAlias = PositiveInt + + +class WorkspaceScope(StrAutoEnum): + PRIVATE = auto() + SHARED = auto() + ALL = auto() + + +class WorkspaceQuery(BaseModel): + workspace_scope: WorkspaceScope + workspace_id: PositiveInt | None = None + + @field_validator("workspace_id", mode="before") + @classmethod + def _validate_workspace_id(cls, value, info: ValidationInfo): + scope = info.data.get("workspace_scope") + if scope == WorkspaceScope.SHARED and value is None: + msg = f"workspace_id must be provided when workspace_scope is SHARED. Got {scope=}, {value=}" + raise ValueError(msg) + + if scope != WorkspaceScope.SHARED and value is not None: + msg = f"workspace_id should be None when workspace_scope is not SHARED. Got {scope=}, {value=}" + raise ValueError(msg) + return value + + +class Workspace(BaseModel): + workspace_id: WorkspaceID + name: str + description: str | None + owner_primary_gid: GroupID = Field( + ..., + description="GID of the group that owns this wallet", + ) + thumbnail: str | None + created: datetime = Field( + ..., + description="Timestamp on creation", + ) + modified: datetime = Field( + ..., + description="Timestamp of last modification", + ) + trashed: datetime | None + trashed_by: UserID | None + trashed_by_primary_gid: GroupID | None = None + + model_config = ConfigDict(from_attributes=True) + + +class UserWorkspaceWithAccessRights(Workspace): + my_access_rights: AccessRights + access_rights: dict[GroupID, AccessRights] + + model_config = ConfigDict(from_attributes=True) + + +class WorkspaceUpdates(BaseModel): + name: str | None = None + description: str | None = None + thumbnail: str | None = None + trashed: datetime | None = None + trashed_by: UserID | None = None diff --git a/packages/models-library/tests/conftest.py b/packages/models-library/tests/conftest.py index 7044dae9ad1..8bf433b901d 100644 --- a/packages/models-library/tests/conftest.py +++ b/packages/models-library/tests/conftest.py @@ -9,6 +9,7 @@ import pytest pytest_plugins = [ + "pytest_simcore.faker_projects_data", "pytest_simcore.pydantic_models", "pytest_simcore.pytest_global_environs", "pytest_simcore.repository_paths", @@ -31,3 +32,11 @@ def project_slug_dir() -> Path: assert folder.exists() assert any(folder.glob("src/models_library")) return folder + + +@pytest.fixture +def tests_data_dir(project_tests_dir: Path) -> Path: + path = project_tests_dir / "data" + assert path.exists() + assert path.is_dir() + return path diff --git a/packages/models-library/tests/test__models_examples.py b/packages/models-library/tests/test__models_examples.py index 7c0adedd935..482f586df7c 100644 --- a/packages/models-library/tests/test__models_examples.py +++ b/packages/models-library/tests/test__models_examples.py @@ -1,59 +1,32 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - import json -from contextlib import suppress -from importlib import import_module -from inspect import getmembers, isclass -from pathlib import Path -from typing import Any, Iterable, Optional, Set, Tuple, Type +from itertools import chain +from typing import Any import models_library import pytest -from models_library.utils.misc import extract_examples -from pydantic import BaseModel, NonNegativeInt -from pydantic.json import pydantic_encoder - - -def iter_model_cls_examples( - exclude: Optional[Set] = None, -) -> Iterable[Tuple[str, Type[BaseModel], NonNegativeInt, Any]]: - def _is_model_cls(cls) -> bool: - with suppress(TypeError): - # NOTE: issubclass( dict[models_library.services.ConstrainedStrValue, models_library.services.ServiceInput] ) raises TypeError - return cls is not BaseModel and isclass(cls) and issubclass(cls, BaseModel) - return False - - exclude = exclude or set() +from models_library.rest_pagination import Page +from models_library.rpc_pagination import PageRpc +from pydantic import BaseModel +from pytest_simcore.examples.models_library import PAGE_EXAMPLES, RPC_PAGE_EXAMPLES +from pytest_simcore.pydantic_models import ( + ModelExample, + iter_examples, + walk_model_examples_in_package, +) - for filepath in Path(models_library.__file__).resolve().parent.glob("*.py"): - if not filepath.name.startswith("_"): - mod = import_module(f"models_library.{filepath.stem}") - for name, model_cls in getmembers(mod, _is_model_cls): - if name in exclude: - continue - # NOTE: this is part of utils.misc and is tested here - examples = extract_examples(model_cls) - for index, example in enumerate(examples): - yield (name, model_cls, index, example) +GENERIC_EXAMPLES: list[ModelExample] = [ + *iter_examples(model_cls=Page[str], examples=PAGE_EXAMPLES), + *iter_examples(model_cls=PageRpc[str], examples=RPC_PAGE_EXAMPLES), +] @pytest.mark.parametrize( - "class_name, model_cls, example_index, test_example", iter_model_cls_examples() + "model_cls, example_name, example_data", + chain(GENERIC_EXAMPLES, walk_model_examples_in_package(models_library)), ) -def test_all_module_model_examples( - class_name: str, - model_cls: Type[BaseModel], - example_index: NonNegativeInt, - test_example: Any, +def test_all_models_library_models_config_examples( + model_cls: type[BaseModel], example_name: int, example_data: Any ): - """Automatically collects all BaseModel subclasses having examples and tests them against schemas""" - print( - f"test {example_index=} for {class_name=}:\n", - json.dumps(test_example, default=pydantic_encoder, indent=2), - "---", - ) - model_instance = model_cls.parse_obj(test_example) - assert isinstance(model_instance, model_cls) + assert model_cls.model_validate( + example_data + ), f"Failed {example_name} : {json.dumps(example_data)}" diff --git a/packages/models-library/tests/test__models_fit_schemas.py b/packages/models-library/tests/test__models_fit_schemas.py index aed9f354b34..7b940696e8c 100644 --- a/packages/models-library/tests/test__models_fit_schemas.py +++ b/packages/models-library/tests/test__models_fit_schemas.py @@ -2,18 +2,21 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name # pylint:disable=protected-access -import json -from typing import Callable +from collections.abc import Callable import pytest from models_library.projects import Project -from models_library.services import ServiceDockerData +from models_library.services import ServiceMetaDataPublished from pydantic.main import BaseModel +@pytest.mark.skip(reason="waiting for PC PR") @pytest.mark.parametrize( "pydantic_model, original_json_schema", - [(ServiceDockerData, "node-meta-v0.0.1.json"), (Project, "project-v0.0.1.json")], + [ + (ServiceMetaDataPublished, "node-meta-v0.0.1-pydantic.json"), + (Project, "project-v0.0.1-pydantic.json"), + ], ) def test_generated_schema_same_as_original( pydantic_model: BaseModel, @@ -24,7 +27,7 @@ def test_generated_schema_same_as_original( # TODO: create instead a fixture that returns a Callable and do these checks # on separate test_* files that follow the same package submodule's hierarchy # - generated_schema = json.loads(pydantic_model.schema_json(indent=2)) + generated_schema = pydantic_model.model_json_schema() original_schema = json_schema_dict(original_json_schema) # NOTE: A change is considered an addition when the destination schema has become more permissive relative to the source schema. For example {"type": "string"} -> {"type": ["string", "number"]}. diff --git a/packages/models-library/tests/test__pydantic_models.py b/packages/models-library/tests/test__pydantic_models.py index 5399a05fd09..1d9dc14e3b4 100644 --- a/packages/models-library/tests/test__pydantic_models.py +++ b/packages/models-library/tests/test__pydantic_models.py @@ -6,13 +6,15 @@ """ -from typing import List, Union, get_args, get_origin +from typing import Any, Union, get_args, get_origin import pytest +from common_library.json_serialization import json_dumps from models_library.projects_nodes import InputTypes, OutputTypes from models_library.projects_nodes_io import SimCoreFileLink -from pydantic import BaseModel, ValidationError, schema_json_of +from pydantic import BaseModel, Field, TypeAdapter, ValidationError from pydantic.types import Json +from pydantic.version import version_short # NOTE: pydantic at a glance (just a few key features): # @@ -36,7 +38,9 @@ class ArgumentAnnotation(BaseModel): data_schema: Json # notice that this is a raw string! - jsonschema_of_x = schema_json_of(List[int], title="schema[x]") + jsonschema_of_x = json_dumps( + {**TypeAdapter(list[int]).json_schema(), "title": "schema[x]"} + ) assert isinstance(jsonschema_of_x, str) x_annotation = ArgumentAnnotation(name="x", data_schema=jsonschema_of_x) @@ -49,7 +53,7 @@ class ArgumentAnnotation(BaseModel): "items": {"type": "integer"}, } - assert x_annotation.dict() == { + assert x_annotation.model_dump() == { "name": "x", "data_schema": { "title": "schema[x]", @@ -63,29 +67,34 @@ class ArgumentAnnotation(BaseModel): # # the constructor would expect a raw string but we produced a nested dict with pytest.raises(ValidationError) as exc_info: - ArgumentAnnotation(**x_annotation.dict()) + ArgumentAnnotation(**x_annotation.model_dump()) assert exc_info.value.errors()[0] == { + "input": {"items": {"type": "integer"}, "title": "schema[x]", "type": "array"}, "loc": ("data_schema",), - "msg": "JSON object must be str, bytes or bytearray", - "type": "type_error.json", + "msg": "JSON input should be string, bytes or bytearray", + "type": "json_type", + "url": f"https://errors.pydantic.dev/{version_short()}/v/json_type", } with pytest.raises(ValidationError) as exc_info: ArgumentAnnotation(name="foo", data_schema="invalid-json") assert exc_info.value.errors()[0] == { + "ctx": {"error": "expected value at line 1 column 1"}, + "input": "invalid-json", "loc": ("data_schema",), - "msg": "Invalid JSON", - "type": "value_error.json", + "msg": "Invalid JSON: expected value at line 1 column 1", + "type": "json_invalid", + "url": f"https://errors.pydantic.dev/{version_short()}/v/json_invalid", } def test_union_types_coercion(): # SEE https://pydantic-docs.helpmanual.io/usage/types/#unions class Func(BaseModel): - input: InputTypes - output: OutputTypes + input: InputTypes = Field(union_mode="left_to_right") + output: OutputTypes = Field(union_mode="left_to_right") assert get_origin(InputTypes) is Union assert get_origin(OutputTypes) is Union @@ -94,70 +103,109 @@ class Func(BaseModel): # NOTE: it is recommended that, when defining Union annotations, the most specific type is included first and followed by less specific types. # - assert Func.schema()["properties"]["input"] == { + assert Func.model_json_schema()["properties"]["input"] == { "title": "Input", "anyOf": [ {"type": "boolean"}, {"type": "integer"}, {"type": "number"}, - {"format": "json-string", "type": "string"}, + { + "contentMediaType": "application/json", + "contentSchema": {}, + "type": "string", + }, {"type": "string"}, - {"$ref": "#/definitions/PortLink"}, - {"$ref": "#/definitions/SimCoreFileLink"}, - {"$ref": "#/definitions/DatCoreFileLink"}, - {"$ref": "#/definitions/DownloadLink"}, + {"$ref": "#/$defs/PortLink"}, + {"$ref": "#/$defs/SimCoreFileLink"}, + {"$ref": "#/$defs/DatCoreFileLink"}, + {"$ref": "#/$defs/DownloadLink"}, {"type": "array", "items": {}}, {"type": "object"}, ], } # integers ------------------------ - model = Func.parse_obj({"input": "0", "output": 1}) - print(model.json(indent=1)) + model = Func.model_validate({"input": "0", "output": 1}) + print(model.model_dump_json(indent=1)) assert model.input == 0 assert model.output == 1 # numbers and bool ------------------------ - model = Func.parse_obj({"input": "0.5", "output": "false"}) - print(model.json(indent=1)) + model = Func.model_validate({"input": "0.5", "output": "false"}) + print(model.model_dump_json(indent=1)) assert model.input == 0.5 - assert model.output == False + assert model.output is False # (undefined) json string vs string ------------------------ - model = Func.parse_obj( + model = Func.model_validate( { "input": '{"w": 42, "z": false}', # NOTE: this is a raw json string "output": "some/path/or/string", } ) - print(model.json(indent=1)) + print(model.model_dump_json(indent=1)) assert model.input == {"w": 42, "z": False} assert model.output == "some/path/or/string" - # (undefined) json string vs SimCoreFileLink.dict() ------------ - MINIMAL = 1 + # (undefined) json string vs SimCoreFileLink.model_dump() ------------ + MINIMAL = 2 # <--- index of the example with the minimum required fields assert SimCoreFileLink in get_args(OutputTypes) - example = SimCoreFileLink.parse_obj( - SimCoreFileLink.Config.schema_extra["examples"][MINIMAL] + example = SimCoreFileLink.model_validate( + SimCoreFileLink.model_config["json_schema_extra"]["examples"][MINIMAL] ) - model = Func.parse_obj( + model = Func.model_validate( { "input": '{"w": 42, "z": false}', - "output": example.dict( + "output": example.model_dump( exclude_unset=True ), # NOTE: this is NOT a raw json string } ) - print(model.json(indent=1)) + print(model.model_dump_json(indent=1)) assert model.input == {"w": 42, "z": False} assert model.output == example assert isinstance(model.output, SimCoreFileLink) # json array and objects - model = Func.parse_obj({"input": {"w": 42, "z": False}, "output": [1, 2, 3, None]}) - print(model.json(indent=1)) + model = Func.model_validate( + {"input": {"w": 42, "z": False}, "output": [1, 2, 3, None]} + ) + print(model.model_dump_json(indent=1)) assert model.input == {"w": 42, "z": False} assert model.output == [1, 2, 3, None] + + +def test_nullable_fields_from_pydantic_v1(): + # Tests issue found during migration. Pydantic v1 would default to None all nullable fields when they were not **explicitly** set with `...` as required + # SEE https://github.com/ITISFoundation/osparc-simcore/pull/6751 + class MyModel(BaseModel): + # pydanticv1 would add a default to fields set as nullable + nullable_required: str | None # <--- This was default to =None in pydantic 1 !!! + nullable_required_with_hyphen: str | None = Field(default=...) + nullable_optional: str | None = None + + # but with non-nullable "required" worked both ways + non_nullable_required: int + non_nullable_required_with_hyphen: int = Field(default=...) + non_nullable_optional: int = 42 + + data: dict[str, Any] = { + "nullable_required_with_hyphen": "foo", + "non_nullable_required_with_hyphen": 1, + "non_nullable_required": 2, + } + + with pytest.raises(ValidationError) as err_info: + MyModel.model_validate(data) + + assert err_info.value.error_count() == 1 + error = err_info.value.errors()[0] + assert error["type"] == "missing" + assert error["loc"] == ("nullable_required",) + + data["nullable_required"] = None + model = MyModel.model_validate(data) + assert model.model_dump(exclude_unset=True) == data diff --git a/packages/models-library/tests/test__pydantic_models_and_enums.py b/packages/models-library/tests/test__pydantic_models_and_enums.py new file mode 100644 index 00000000000..00c67c32c9b --- /dev/null +++ b/packages/models-library/tests/test__pydantic_models_and_enums.py @@ -0,0 +1,126 @@ +from enum import Enum, StrEnum, unique + +import pytest +from models_library.utils.enums import are_equivalent_enums, enum_to_dict +from pydantic import BaseModel, TypeAdapter, ValidationError + + +# +# Enum Color1 is **equivalent** to enum Color2 but not equal +# +@unique +class Color1(Enum): + RED = "RED" + + +@unique +class Color2(Enum): + RED = "RED" + + +def test_equivalent_enums_are_not_strictly_equal(): + assert Color1 != Color2 + + assert enum_to_dict(Color1) == enum_to_dict(Color2) + + assert are_equivalent_enums(Color1, Color2) + assert are_equivalent_enums(Color1, Color1) + + +# +# Here two equivalent enum BUT of type str-enum +# +# SEE from models_library.utils.enums.AutoStrEnum +# SEE https://docs.pydantic.dev/dev-v2/usage/types/enums/ +# + + +@unique +class ColorStrAndEnum1(StrEnum): + RED = "RED" + + +@unique +class ColorStrAndEnum2(StrEnum): + RED = "RED" + + +def test_enums_vs_strenums(): + # here are the differences + assert f"{Color1.RED}" == "Color1.RED" + assert f"{ColorStrAndEnum1.RED}" == "RED" + + assert Color1.RED != "RED" + assert ColorStrAndEnum1.RED == "RED" + + assert Color1.RED != ColorStrAndEnum1.RED + + # here are the analogies + assert Color1.RED.name == "RED" + assert ColorStrAndEnum1.RED.name == "RED" + + assert Color1.RED.value == "RED" + assert ColorStrAndEnum1.RED.value == "RED" + + +def test_enums_and_strenums_are_equivalent(): + + assert are_equivalent_enums(Color1, ColorStrAndEnum1) + assert are_equivalent_enums(Color2, ColorStrAndEnum2) + assert are_equivalent_enums(Color1, ColorStrAndEnum2) + + +class Model(BaseModel): + color: Color1 + + +def test_parsing_enums_in_pydantic(): + + model = TypeAdapter(Model).validate_python({"color": Color1.RED}) + assert model.color == Color1.RED + + # Can parse from STRING + model = TypeAdapter(Model).validate_python({"color": "RED"}) + assert model.color == Color1.RED + + # Can **NOT** parse from equilalent enum + with pytest.raises(ValidationError): + TypeAdapter(Model).validate_python({"color": Color2.RED}) + + +class ModelStrAndEnum(BaseModel): + color: ColorStrAndEnum1 + + +def test_parsing_strenum_in_pydantic(): + assert are_equivalent_enums(Color1, ColorStrAndEnum1) + + model = TypeAdapter(ModelStrAndEnum).validate_python( + {"color": ColorStrAndEnum1.RED} + ) + assert model.color == ColorStrAndEnum1.RED + + # Can parse from string + model = TypeAdapter(ModelStrAndEnum).validate_python({"color": "RED"}) + assert model.color == ColorStrAndEnum1.RED + + # **CAN** parse other equivalent str-enum + # Using str-enums allow you to parse from equivalent enums! + TypeAdapter(ModelStrAndEnum).validate_python({"color": ColorStrAndEnum2.RED}) + + +def test_parsing_str_and_enum_in_pydantic(): + + # Can still NOT parse equivalent enum(-only) + # with pytest.raises(ValidationError): + # TypeAdapter(ModelStrAndEnum).validate_python({"color": Color1.RED}) + + # And the opposite? NO!!! + with pytest.raises(ValidationError): + TypeAdapter(Color1).validate_python({"color": ColorStrAndEnum1.RED}) + + with pytest.raises(ValidationError): + TypeAdapter(Color1).validate_python({"color": ColorStrAndEnum2.RED}) + + # CONCLUSION: we need a validator to pre-process inputs ! + # SEE models_library.utils.common_validators diff --git a/packages/models-library/tests/test_api_schemas__common.py b/packages/models-library/tests/test_api_schemas__common.py new file mode 100644 index 00000000000..444460f9dc2 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas__common.py @@ -0,0 +1,21 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import http + +import pytest +from models_library.api_schemas__common.errors import DefaultApiError + + +@pytest.mark.parametrize("code", [e.value for e in http.HTTPStatus if e.value >= 400]) +def test_create_default_api_error_from_status_code(code: int): + + error = DefaultApiError.from_status_code(code) + assert error.name == f"{code}" + assert error.detail + + assert DefaultApiError.from_status_code(code, detail="FOO").detail == "FOO" diff --git a/packages/models-library/tests/test_api_schemas_catalog.py b/packages/models-library/tests/test_api_schemas_catalog.py new file mode 100644 index 00000000000..280f74f2ee8 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_catalog.py @@ -0,0 +1,65 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.services import ServiceInput + + +def test_service_port_with_file(): + + io = ServiceInput.model_validate( + { + "displayOrder": 1, + "label": "Input files", + "description": "Files downloaded from service connected at the input", + "type": "data:*/*", # < --- generic mimetype! + "fileToKeyMap": { + "single_number.txt": "input_1" + }, # <-- provides a file with an extension + } + ) + + port = ServicePortGet.from_domain_model("input", "input_1", io).model_dump( + exclude_unset=True + ) + + assert port == { + "key": "input_1", + "kind": "input", + "content_media_type": "text/plain", # <-- deduced from extension + "content_schema": { + "type": "string", + "title": "Input files", + "description": "Files downloaded from service connected at the input", + }, + } + + +def test_service_port_with_boolean(): + + io = ServiceInput.model_validate( + { + "displayOrder": 3, + "label": "Same title and description is more usual than you might think", + "description": "Same title and description is more usual than you might think", # <- same label and description! + "type": "boolean", + "defaultValue": False, # <- has a default + } + ) + + port = ServicePortGet.from_domain_model("input", "input_1", io).model_dump( + exclude_unset=True + ) + + assert port == { + "key": "input_1", + "kind": "input", + # "content_media_type": None, # <-- no content media + "content_schema": { + "type": "boolean", + "title": "Same title and description is more usual than you might think", # <-- no description + "default": False, # <-- + }, + } diff --git a/packages/models-library/tests/test_api_schemas_dynamic_sidecar_telemetry.py b/packages/models-library/tests/test_api_schemas_dynamic_sidecar_telemetry.py new file mode 100644 index 00000000000..d5ffc459397 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_dynamic_sidecar_telemetry.py @@ -0,0 +1,62 @@ +import psutil +import pytest +from models_library.api_schemas_dynamic_sidecar.telemetry import DiskUsage +from psutil._common import sdiskusage +from pydantic import ByteSize, ValidationError + + +def _assert_same_value(ps_util_disk_usage: sdiskusage) -> None: + disk_usage = DiskUsage.from_ps_util_disk_usage(ps_util_disk_usage) + assert disk_usage.used == ps_util_disk_usage.used + assert disk_usage.free == ps_util_disk_usage.free + assert disk_usage.used_percent == pytest.approx(ps_util_disk_usage.percent, abs=1e3) + + +@pytest.mark.parametrize( + "ps_util_disk_usage", + [ + sdiskusage(total=77851254784, used=58336940032, free=19497537536, percent=74.9), + sdiskusage(total=77851254784, used=58573619200, free=19260858368, percent=75.3), + sdiskusage(total=77851254784, used=58573529088, free=19260948480, percent=75.3), + sdiskusage(total=77851254784, used=58573664256, free=19260813312, percent=75.3), + ], +) +def test_disk_usage_regression_cases(ps_util_disk_usage: sdiskusage): + _assert_same_value(ps_util_disk_usage) + + +def test_disk_usage(): + ps_util_disk_usage = psutil.disk_usage("/") + _assert_same_value(ps_util_disk_usage) + + +def test_from_efs_guardian_constructor(): + result = DiskUsage.from_efs_guardian(10, 100) + assert result.used == ByteSize(10) + assert result.free == ByteSize(90) + assert result.total == ByteSize(100) + assert result.used_percent == 10 + + +def test_failing_validation(): + with pytest.raises(ValidationError) as exc: + assert DiskUsage.from_efs_guardian(100, 10) + + assert "free" in f"{exc.value}" + assert "input_value=-90" in f"{exc.value}" + + with pytest.raises(ValidationError) as exc: + assert DiskUsage( + used=-10, # type: ignore + free=ByteSize(10), + total=ByteSize(0), + used_percent=-10, + ) + assert "used" in f"{exc.value}" + assert "input_value=-10" in f"{exc.value}" + + with pytest.raises(ValidationError) as exc: + DiskUsage( + used=ByteSize(10), free=ByteSize(10), total=ByteSize(21), used_percent=0 + ) + assert "is different than the sum of" in f"{exc.value}" diff --git a/packages/models-library/tests/test_api_schemas_storage.py b/packages/models-library/tests/test_api_schemas_storage.py deleted file mode 100644 index 07349d39f92..00000000000 --- a/packages/models-library/tests/test_api_schemas_storage.py +++ /dev/null @@ -1,23 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import pytest -from models_library.api_schemas_storage import ( - DatasetMetaDataGet, - FileLocation, - FileMetaDataGet, -) - - -@pytest.mark.parametrize( - "model_cls", (FileLocation, FileMetaDataGet, DatasetMetaDataGet) -) -def test_storage_api_models_examples(model_cls): - examples = model_cls.Config.schema_extra["examples"] - - for index, example in enumerate(examples): - print(f"{index:-^10}:\n", example) - - model_instance = model_cls(**example) - assert model_instance diff --git a/packages/models-library/tests/test_api_schemas_webserver_groups.py b/packages/models-library/tests/test_api_schemas_webserver_groups.py new file mode 100644 index 00000000000..c254ef40cf4 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_webserver_groups.py @@ -0,0 +1,27 @@ +from typing import Any + +import pytest +from models_library.api_schemas_webserver.groups import GroupUserAdd +from pydantic import ValidationError + +unset = object() + + +@pytest.mark.parametrize("uid", [1, None, unset]) +@pytest.mark.parametrize("email", ["user@foo.com", None, unset]) +def test_uid_or_email_are_set(uid: Any, email: Any): + kwargs = {} + if uid != unset: + kwargs["uid"] = uid + if email != unset: + kwargs["email"] = email + + none_are_defined = kwargs.get("uid") is None and kwargs.get("email") is None + both_are_defined = kwargs.get("uid") is not None and kwargs.get("email") is not None + + if none_are_defined or both_are_defined: + with pytest.raises(ValidationError, match="not both"): + GroupUserAdd(**kwargs) + else: + got = GroupUserAdd(**kwargs) + assert bool(got.email) ^ bool(got.uid) diff --git a/packages/models-library/tests/test_api_schemas_webserver_projects.py b/packages/models-library/tests/test_api_schemas_webserver_projects.py new file mode 100644 index 00000000000..acd7a5fa443 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_webserver_projects.py @@ -0,0 +1,89 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from models_library.api_schemas_webserver.projects import ( + ProjectCreateNew, + ProjectGet, + ProjectListItem, + ProjectReplace, + TaskProjectGet, +) +from models_library.generics import Envelope +from models_library.rest_pagination import Page +from pydantic import TypeAdapter +from pytest_simcore.simcore_webserver_projects_rest_api import ( + CREATE_FROM_SERVICE, + CREATE_FROM_TEMPLATE, + CREATE_FROM_TEMPLATE__TASK_RESULT, + GET_PROJECT, + LIST_PROJECTS, + NEW_PROJECT, + REPLACE_PROJECT, + REPLACE_PROJECT_ON_MODIFIED, + HttpApiCallCapture, +) + + +@pytest.mark.parametrize( + "api_call", + [NEW_PROJECT, CREATE_FROM_SERVICE, CREATE_FROM_TEMPLATE], + ids=lambda c: c.name, +) +def test_create_project_schemas(api_call: HttpApiCallCapture): + request_payload = ProjectCreateNew.model_validate(api_call.request_payload) + assert request_payload + + response_body = TypeAdapter( + Envelope[ProjectGet] | Envelope[TaskProjectGet] + ).validate_python(api_call.response_body) + assert response_body + + +@pytest.mark.parametrize( + "api_call", + [LIST_PROJECTS], + ids=lambda c: c.name, +) +def test_list_project_schemas(api_call: HttpApiCallCapture): + assert api_call.request_payload is None + + response_body = TypeAdapter(Page[ProjectListItem]).validate_python( + api_call.response_body + ) + assert response_body + + +@pytest.mark.parametrize( + "api_call", + [GET_PROJECT, CREATE_FROM_TEMPLATE__TASK_RESULT], + ids=lambda c: c.name, +) +def test_get_project_schemas(api_call: HttpApiCallCapture): + # NOTE: that response_body here is the exported values + # and therefore ProjectGet has to be implemented in such a way that + # can also parse exported values! (e.g. Json does not allow that, or ocassionaly exclude_none) + response_body = TypeAdapter(Envelope[ProjectGet]).validate_python( + api_call.response_body + ) + assert response_body + + +@pytest.mark.parametrize( + "api_call", + [REPLACE_PROJECT, REPLACE_PROJECT_ON_MODIFIED], + ids=lambda c: c.name, +) +def test_replace_project_schemas(api_call: HttpApiCallCapture): + request_payload = TypeAdapter(ProjectReplace).validate_python( + api_call.request_payload + ) + assert request_payload + + response_body = TypeAdapter(Envelope[ProjectGet]).validate_python( + api_call.response_body + ) + assert response_body diff --git a/packages/models-library/tests/test_api_schemas_webserver_socketio.py b/packages/models-library/tests/test_api_schemas_webserver_socketio.py new file mode 100644 index 00000000000..a78ebea2432 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_webserver_socketio.py @@ -0,0 +1,28 @@ +# pylint:disable=redefined-outer-name + +import pytest +from faker import Faker +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.groups import GroupID +from models_library.users import UserID + + +@pytest.fixture +def user_id(faker: Faker) -> UserID: + return UserID(faker.pyint()) + + +@pytest.fixture +def group_id(faker: Faker) -> GroupID: + return GroupID(faker.pyint()) + + +@pytest.fixture +def socket_id(faker: Faker) -> str: + return faker.pystr() + + +def test_socketio_room(user_id: UserID, group_id: GroupID, socket_id: str): + assert SocketIORoomStr.from_user_id(user_id) == f"user:{user_id}" + assert SocketIORoomStr.from_group_id(group_id) == f"group:{group_id}" + assert SocketIORoomStr.from_socket_id(socket_id) == socket_id diff --git a/packages/models-library/tests/test_api_schemas_webserver_users.py b/packages/models-library/tests/test_api_schemas_webserver_users.py new file mode 100644 index 00000000000..afefb91c481 --- /dev/null +++ b/packages/models-library/tests/test_api_schemas_webserver_users.py @@ -0,0 +1,81 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from copy import deepcopy + +import pytest +from common_library.users_enums import UserRole +from models_library.api_schemas_webserver.users import ( + MyProfileGet, + MyProfilePatch, +) +from pydantic import ValidationError + + +@pytest.mark.parametrize("user_role", [u.name for u in UserRole]) +def test_profile_get_role(user_role: str): + for example in MyProfileGet.model_json_schema()["examples"]: + data = deepcopy(example) + data["role"] = user_role + m1 = MyProfileGet(**data) + + data["role"] = UserRole(user_role) + m2 = MyProfileGet(**data) + assert m1 == m2 + + +def test_my_profile_patch_username_min_len(): + # minimum length username is 4 + with pytest.raises(ValidationError) as err_info: + MyProfilePatch.model_validate({"userName": "abc"}) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == "too_short" + + MyProfilePatch.model_validate({"userName": "abcd"}) # OK + + +def test_my_profile_patch_username_valid_characters(): + # Ensure valid characters (alphanumeric + . _ -) + with pytest.raises(ValidationError, match="start with a letter") as err_info: + MyProfilePatch.model_validate({"userName": "1234"}) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == "value_error" + + MyProfilePatch.model_validate({"userName": "u1234"}) # OK + + +def test_my_profile_patch_username_special_characters(): + # Ensure no consecutive special characters + with pytest.raises( + ValidationError, match="consecutive special characters" + ) as err_info: + MyProfilePatch.model_validate({"userName": "u1__234"}) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == "value_error" + + MyProfilePatch.model_validate({"userName": "u1_234"}) # OK + + # Ensure it doesn't end with a special character + with pytest.raises(ValidationError, match="end with") as err_info: + MyProfilePatch.model_validate({"userName": "u1234_"}) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == "value_error" + + MyProfilePatch.model_validate({"userName": "u1_234"}) # OK + + +def test_my_profile_patch_username_reserved_words(): + # Check reserved words (example list; extend as needed) + with pytest.raises(ValidationError, match="cannot be used") as err_info: + MyProfilePatch.model_validate({"userName": "admin"}) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == "value_error" + + MyProfilePatch.model_validate({"userName": "midas"}) # OK diff --git a/packages/models-library/tests/test_app_diagnostics.py b/packages/models-library/tests/test_app_diagnostics.py new file mode 100644 index 00000000000..0e5f5864eae --- /dev/null +++ b/packages/models-library/tests/test_app_diagnostics.py @@ -0,0 +1,18 @@ +from models_library.app_diagnostics import AppStatusCheck + + +def test_annotated_defaults_and_default_factories(): + + model = AppStatusCheck(app_name="foo", version="1.2.3") + assert model.app_name == "foo" + assert model.version == "1.2.3" + + # checks default_factory + assert model.services == {} + assert model.sessions == {} + + # checks default inside Annotated[, Field(default=None, ...)] + assert model.url is None + + # checks default outside Annotated + assert model.diagnostics_url is None diff --git a/packages/models-library/tests/test_basic_regex.py b/packages/models-library/tests/test_basic_regex.py index e0d42f76cb9..e935776f385 100644 --- a/packages/models-library/tests/test_basic_regex.py +++ b/packages/models-library/tests/test_basic_regex.py @@ -6,19 +6,22 @@ import keyword import re +from collections.abc import Sequence from datetime import datetime -from typing import Any, Optional, Pattern, Sequence, Union +from re import Pattern +from typing import Any import pytest from models_library.basic_regex import ( DATE_RE, + DOCKER_GENERIC_TAG_KEY_RE, DOCKER_LABEL_KEY_REGEX, PUBLIC_VARIABLE_NAME_RE, SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS, SEMANTIC_VERSION_RE_W_NAMED_GROUPS, + SIMPLE_VERSION_RE, TWILIO_ALPHANUMERIC_SENDER_ID_RE, UUID_RE, - VERSION_RE, ) from packaging.version import Version @@ -28,12 +31,12 @@ def assert_match_and_get_capture( - regex_or_str: Union[str, Pattern[str]], + regex_or_str: str | Pattern[str], test_str: str, expected: Any, *, - group_names: Optional[tuple[str]] = None, -) -> Optional[Sequence]: + group_names: tuple[str] | None = None, +) -> Sequence | None: match = re.match(regex_or_str, test_str) if expected is INVALID: assert match is None @@ -59,8 +62,8 @@ def assert_match_and_get_capture( ("2.1.0-rc2", ("2", ".0", "0", "-rc2", "rc2", None, None, None, None)), ], ) -def test_VERSION_RE(version_str, expected): - assert_match_and_get_capture(VERSION_RE, version_str, expected) +def test_SIMPLE_VERSION_RE(version_str, expected): + assert_match_and_get_capture(SIMPLE_VERSION_RE, version_str, expected) # Many taken from https://regex101.com/r/Ly7O1x/3/ @@ -338,3 +341,95 @@ def test_TWILIO_ALPHANUMERIC_SENDER_ID_RE(sample, expected): ) def test_DOCKER_LABEL_KEY_REGEX(sample, expected): assert_match_and_get_capture(DOCKER_LABEL_KEY_REGEX, sample, expected) + + +@pytest.mark.parametrize( + "sample, expected", + [ + ("fedora/httpd:version1.0", VALID), + ("fedora/httpd:version1.0.test", VALID), + ("itisfoundation/dynamic-sidecar:release-latest", VALID), + ("simcore/service/comp/itis/sleepers:2.0.2", VALID), + ("registry.osparc.io/simcore/service/comp/itis/sleepers:2.0.2", VALID), + ("nginx:2.0.2", VALID), + ("envoyproxy/envoy:v1.25-latest", VALID), + ("myregistryhost:5000/fedora/httpd:version1.0", VALID), + ("alpine", VALID), + ("alpine:latest", VALID), + ("localhost/latest", VALID), + ("library/alpine", VALID), + ("localhost:1234/test", VALID), + ("test:1234/blaboon", VALID), + ("alpine:3.7", VALID), + ("docker.example.edu/gmr/alpine:3.7", VALID), + ( + "docker.example.com:5000/gmr/alpine@sha256:5a156ff125e5a12ac7ff43ee5120fa249cf62248337b6d04abc574c8", + VALID, + ), + ("docker.example.co.uk/gmr/alpine/test2:latest", VALID), + ("registry.dobby.org/dobby/dobby-servers/arthound:2019-08-08", VALID), + ("owasp/zap:3.8.0", VALID), + ("registry.dobby.co/dobby/dobby-servers/github-run:2021-10-04", VALID), + ("docker.elastic.co/kibana/kibana:7.6.2", VALID), + ("registry.dobby.org/dobby/dobby-servers/lerphound:latest", VALID), + ("registry.dobby.org/dobby/dobby-servers/marbletown-poc:2021-03-29", VALID), + ("marbles/marbles:v0.38.1", VALID), + ( + "registry.dobby.org/dobby/dobby-servers/loophole@sha256:5a156ff125e5a12ac7ff43ee5120fa249cf62248337b6d04abc574c8", + VALID, + ), + ("sonatype/nexon:3.30.0", VALID), + ("prom/node-exporter:v1.1.1", VALID), + ( + "sosedoff/pgweb@sha256:5a156ff125e5a12ac7ff43ee5120fa249cf62248337b6d04abc574c8", + VALID, + ), + ("sosedoff/pgweb:latest", VALID), + ("registry.dobby.org/dobby/dobby-servers/arpeggio:2021-06-01", VALID), + ("registry.dobby.org/dobby/antique-penguin:release-production", VALID), + ("dalprodictus/halcon:6.7.5", VALID), + ("antigua/antigua:v31", VALID), + ("weblate/weblate:4.7.2-1", VALID), + ("redis:4.0.01-alpine", VALID), + ("registry.dobby.com/dobby/dobby-servers/github-run:latest", VALID), + ("portainer/portainer:latest", VALID), + ( + "registry:2@sha256:5a156ff125e5a12ac7fdec2b90b7e2ae5120fa249cf62248337b6d04abc574c8", + VALID, + ), + ("localhost/test", VALID), + ("test:1234/bla", VALID), + ("docker.example.com/gmr/alpine:3.7", VALID), + ("docker.example.com/gmr/alpine/test2:3.7", VALID), + ("docker.example.com/gmr/alpine/test2/test3:3.7", VALID), + ("docker.example.com:5000/gmr/alpine:latest", VALID), + ( + "docker.example.com:5000/gmr/alpine:latest@sha256:5ae13221a775e9ded1d00f4dd6a3ad869ed1d662eb8cf81cb1fc2ba06f2b7172", + VALID, + ), + ( + "docker.example.com:5000/gmr/alpine/test2:latest@sha256:5ae13221a775e9ded1d00f4dd6a3ad869ed1d662eb8cf81cb1fc2ba06f2b7172", + VALID, + ), + ( + "docker.example.com/gmr/alpine/test2:latest@sha256:5ae13221a775e9ded1d00f4dd6a3ad869ed1d662eb8cf81cb1fc2ba06f2b7172", + VALID, + ), + ( + "docker.example.com/gmr/alpine/test2@sha256:5ae13221a775e9ded1d00f4dd6a3ad869ed1d662eb8cf81cb1fc2ba06f2b7172", + VALID, + ), + ("myregist_ryhost:5000/fedora/httpd:version1.0", INVALID), # undescrore + ("myregistryhost:5000/fe_dora/http_d:ver_sion1.0", VALID), + ("myregistryHOST:5000/fedora/httpd:version1.0", INVALID), # upper case + ("myregistryhost:5000/fedora/httpd:-version1.0", INVALID), # tag starts with - + ("myregistryhost:5000/fedora/httpd:.version1.0", INVALID), # tag starts with . + ( + "simcore/services/dynamic/some/sub/folder/my_service-key:123.456.3214@sha256:2aef165ab4f30fbb109e88959271d8b57489790ea13a77d27c02d8adb8feb20f", + VALID, + ), + ], + ids=lambda d: f"{d if isinstance(d, str) else ('INVALID' if d is INVALID else 'VALID')}", +) +def test_DOCKER_GENERIC_TAG_KEY_RE(sample, expected): + assert_match_and_get_capture(DOCKER_GENERIC_TAG_KEY_RE, sample, expected) diff --git a/packages/models-library/tests/test_basic_types.py b/packages/models-library/tests/test_basic_types.py index 5f8317eab73..adf7fe5ecb3 100644 --- a/packages/models-library/tests/test_basic_types.py +++ b/packages/models-library/tests/test_basic_types.py @@ -1,18 +1,104 @@ +from typing import NamedTuple + import pytest -from faker import Faker -from models_library.basic_types import UUIDStr -from pydantic import ValidationError -from pydantic.tools import parse_obj_as +from models_library.basic_types import ( + _SHORT_TRUNCATED_STR_MAX_LENGTH, + EnvVarKey, + IDStr, + MD5Str, + SHA1Str, + ShortTruncatedStr, + UUIDStr, + VersionTag, +) +from pydantic import TypeAdapter, ValidationError + + +class _Example(NamedTuple): + constr: type[str] + good: str + bad: str + +_EXAMPLES = [ + _Example(constr=VersionTag, good="v5", bad="v5.2"), + _Example( + constr=SHA1Str, + good="74e56e8a00c1ac4797eb15ada9affea692d48b25", + bad="d2cbbd98-d0f8-4de1-864e-b390713194eb", + ), # sha1sum .pylintrc + _Example( + constr=MD5Str, + good="3461a73124b5e63a1a0d912bc239cc94", + bad="d2cbbd98-d0f8-4de1-864e-b390713194eb", + ), # md5sum .pylintrc + _Example(constr=EnvVarKey, good="env_VAR", bad="12envar"), + _Example( + constr=UUIDStr, + good="d2cbbd98-d0f8-4de1-864e-b390713194eb", + bad="123456-is-not-an-uuid", + ), + _Example( + constr=IDStr, + good="d2cbbd98-d0f8-4de1-864e-b390713194eb", # as an uuid + bad="", # empty string not allowed + ), +] -@pytest.mark.skip(reason="DEV: testing parse_obj_as") -def test_parse_uuid_as_a_string(faker: Faker): - expected_uuid = faker.uuid4() - got_uuid = parse_obj_as(UUIDStr, expected_uuid) +@pytest.mark.parametrize( + "constraint_str_type,sample", + [(p.constr, p.good) for p in _EXAMPLES], +) +def test_constrained_str_succeeds(constraint_str_type: type[str], sample: str): + assert TypeAdapter(constraint_str_type).validate_python(sample) == sample - assert isinstance(got_uuid, str) - assert got_uuid == expected_uuid +@pytest.mark.parametrize( + "constraint_str_type,sample", + [(p.constr, p.bad) for p in _EXAMPLES], +) +def test_constrained_str_fails(constraint_str_type: type[str], sample: str): with pytest.raises(ValidationError): - parse_obj_as(UUIDStr, "123456-is-not-an-uuid") + TypeAdapter(constraint_str_type).validate_python(sample) + + +def test_string_identifier_constraint_type(): + + # strip spaces + assert ( + TypeAdapter(IDStr).validate_python(" 123 trim spaces ") == "123 trim spaces" + ) + + # limited to 100! + TypeAdapter(IDStr).validate_python("X" * IDStr.max_length) + with pytest.raises(ValidationError): + TypeAdapter(IDStr).validate_python("X" * (IDStr.max_length + 1)) + + +def test_short_truncated_string(): + curtail_length = _SHORT_TRUNCATED_STR_MAX_LENGTH + assert ( + TypeAdapter(ShortTruncatedStr).validate_python("X" * curtail_length) + == "X" * curtail_length + ), "Max length string should remain intact" + + assert ( + TypeAdapter(ShortTruncatedStr).validate_python("X" * (curtail_length + 1)) + == "X" * curtail_length + ), "Overlong string should be truncated exactly to max length" + + assert ( + TypeAdapter(ShortTruncatedStr).validate_python("X" * (curtail_length + 100)) + == "X" * curtail_length + ), "Much longer string should still truncate to exact max length" + + # below limit + assert TypeAdapter(ShortTruncatedStr).validate_python( + "X" * (curtail_length - 1) + ) == "X" * (curtail_length - 1), "Under-length string should not be modified" + + # spaces are trimmed + assert ( + TypeAdapter(ShortTruncatedStr).validate_python(" " * (curtail_length + 1)) == "" + ), "Only-whitespace string should become empty string" diff --git a/packages/models-library/tests/test_callbacks_mapping.py b/packages/models-library/tests/test_callbacks_mapping.py new file mode 100644 index 00000000000..e39db6367ad --- /dev/null +++ b/packages/models-library/tests/test_callbacks_mapping.py @@ -0,0 +1,29 @@ +from typing import Any + +import pytest +from models_library.callbacks_mapping import ( + INACTIVITY_TIMEOUT_CAP, + TIMEOUT_MIN, + CallbacksMapping, +) +from pydantic import TypeAdapter, ValidationError + + +def _format_with_timeout(timeout: float) -> dict[str, Any]: + return {"inactivity": {"service": "a-service", "command": "", "timeout": timeout}} + + +def test_inactivity_time_out_is_max_capped(): + for in_bounds in [ + TIMEOUT_MIN, + TIMEOUT_MIN + 1, + INACTIVITY_TIMEOUT_CAP - 1, + INACTIVITY_TIMEOUT_CAP, + ]: + TypeAdapter(CallbacksMapping).validate_python(_format_with_timeout(in_bounds)) + + for out_of_bounds in [INACTIVITY_TIMEOUT_CAP + 1, TIMEOUT_MIN - 1]: + with pytest.raises(ValidationError): + TypeAdapter(CallbacksMapping).validate_python( + _format_with_timeout(out_of_bounds) + ) diff --git a/packages/models-library/tests/test_clusters.py b/packages/models-library/tests/test_clusters.py deleted file mode 100644 index d4d9b4b4c13..00000000000 --- a/packages/models-library/tests/test_clusters.py +++ /dev/null @@ -1,84 +0,0 @@ -from copy import deepcopy -from typing import Any, Dict, Type - -import pytest -from faker import Faker -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - DEFAULT_CLUSTER_ID, - Cluster, -) -from pydantic import BaseModel, ValidationError - - -@pytest.mark.parametrize( - "model_cls", - (Cluster,), -) -def test_cluster_access_rights_correctly_created_when_owner_access_rights_not_present( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - owner_gid = modified_example["owner"] - # remove the owner from the access rights if any - modified_example.get("access_rights", {}).pop(owner_gid, None) - - instance = model_cls(**modified_example) - if instance.id != DEFAULT_CLUSTER_ID: - assert instance.access_rights[owner_gid] == CLUSTER_ADMIN_RIGHTS # type: ignore - else: - assert instance.access_rights[owner_gid] == CLUSTER_USER_RIGHTS # type: ignore - - -@pytest.mark.parametrize( - "model_cls", - (Cluster,), -) -def test_cluster_fails_when_owner_has_no_admin_rights_unless_default_cluster( - model_cls: Type[BaseModel], - model_cls_examples: Dict[str, Dict[str, Any]], - faker: Faker, -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - modified_example["id"] = faker.pyint(min_value=1) - owner_gid = modified_example["owner"] - # ensure there are access rights - modified_example.setdefault("access_rights", {}) - # set the owner with manager rights - modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - # set the owner with user rights - modified_example["access_rights"][owner_gid] = CLUSTER_USER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - -@pytest.mark.parametrize( - "model_cls", - (Cluster,), -) -def test_cluster_fails_when_owner_has_no_user_rights_if_default_cluster( - model_cls: Type[BaseModel], - model_cls_examples: Dict[str, Dict[str, Any]], -): - for example in model_cls_examples.values(): - modified_example = deepcopy(example) - modified_example["id"] = DEFAULT_CLUSTER_ID - owner_gid = modified_example["owner"] - # ensure there are access rights - modified_example.setdefault("access_rights", {}) - # set the owner with manager rights - modified_example["access_rights"][owner_gid] = CLUSTER_MANAGER_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) - - # set the owner with user rights - modified_example["access_rights"][owner_gid] = CLUSTER_ADMIN_RIGHTS - with pytest.raises(ValidationError): - model_cls(**modified_example) diff --git a/packages/models-library/tests/test_docker.py b/packages/models-library/tests/test_docker.py index 066502c03a0..ae1c636a9e2 100644 --- a/packages/models-library/tests/test_docker.py +++ b/packages/models-library/tests/test_docker.py @@ -4,15 +4,17 @@ from typing import Any +from uuid import UUID import pytest from faker import Faker from models_library.docker import ( + _SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX, DockerGenericTag, DockerLabelKey, - SimcoreServiceDockerLabelKeys, + StandardSimcoreDockerLabels, ) -from pydantic import ValidationError, parse_obj_as +from pydantic import ByteSize, TypeAdapter, ValidationError _faker = Faker() @@ -31,7 +33,6 @@ ("com.hey_hey.hello", False), ("node.labels.standard_worker", False), ("Node.labels.standard_worker", False), - ("Node.labels.standard_worker", False), ("Node.labels.standardworker", False), ("node.labels.standardworker", True), ("io.simcore.auto-scaler", True), @@ -40,11 +41,11 @@ def test_docker_label_key(label_key: str, valid: bool): # NOTE: https://docs.docker.com/config/labels-custom-metadata/#key-format-recommendations if valid: - instance = parse_obj_as(DockerLabelKey, label_key) + instance = TypeAdapter(DockerLabelKey).validate_python(label_key) assert instance else: with pytest.raises(ValidationError): - parse_obj_as(DockerLabelKey, label_key) + TypeAdapter(DockerLabelKey).validate_python(label_key) @pytest.mark.parametrize( @@ -83,55 +84,56 @@ def test_docker_label_key(label_key: str, valid: bool): True, ), ( - "registry:5000/si.m--c_ore/services/1234/jupyter-smash:AUPPER_CASE_TAG_IS_OK_2.3.4", - True, - ), - ( - f"registry:5000/si.m--c_ore/services/1234/jupyter-smash:{'A'*128}", + f"registry:5000/si.m--c_ore/services/1234/jupyter-smash:{'A' * 128}", True, ), ( - f"registry:5000/si.m--c_ore/services/1234/jupyter-smash:{'A'*129}", + f"registry:5000/si.m--c_ore/services/1234/jupyter-smash:{'A' * 129}", False, ), ), ) def test_docker_generic_tag(image_name: str, valid: bool): if valid: - instance = parse_obj_as(DockerGenericTag, image_name) + instance = TypeAdapter(DockerGenericTag).validate_python(image_name) assert instance else: with pytest.raises(ValidationError): - parse_obj_as(DockerGenericTag, image_name) + TypeAdapter(DockerGenericTag).validate_python(image_name) @pytest.mark.parametrize( "obj_data", - [ - pytest.param( - { - "user_id": _faker.pyint(), - "project_id": _faker.uuid4(), - "node_id": _faker.uuid4(), - }, - id="parse_existing_service_labels", - ), - pytest.param( - { - "user_id": _faker.pyint(), - "project_id": _faker.uuid4(), - "node_id": _faker.uuid4(), - "product": "test_p", - "simcore_user_agent": "a-test-puppet", - }, - id="parse_new_service_labels", - ), - ], + StandardSimcoreDockerLabels.model_config["json_schema_extra"]["examples"], + ids=str, ) def test_simcore_service_docker_label_keys(obj_data: dict[str, Any]): - simcore_service_docker_label_keys = SimcoreServiceDockerLabelKeys.parse_obj( + simcore_service_docker_label_keys = StandardSimcoreDockerLabels.model_validate( obj_data ) - exported_dict = simcore_service_docker_label_keys.to_docker_labels() - assert all(isinstance(v, str) for v in exported_dict.values()) - assert parse_obj_as(SimcoreServiceDockerLabelKeys, exported_dict) + exported_dict = simcore_service_docker_label_keys.to_simcore_runtime_docker_labels() + assert all( + isinstance(v, str) for v in exported_dict.values() + ), "docker labels must be strings!" + assert all( + key.startswith(_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX) for key in exported_dict + ) + re_imported_docker_label_keys = TypeAdapter( + StandardSimcoreDockerLabels + ).validate_python(exported_dict) + assert re_imported_docker_label_keys + assert simcore_service_docker_label_keys == re_imported_docker_label_keys + + +def test_simcore_service_docker_label_keys_construction(): + simcore_service_docker_label_keys = StandardSimcoreDockerLabels( + user_id=8268, + project_id=UUID("5ea24ce0-0e4d-4ee6-a3f1-e4799752a684"), + node_id=UUID("c17c6279-23c6-412f-8826-867323a7711a"), + product_name="osparc", + simcore_user_agent="oePqmjQbZndJghceKRJR", + swarm_stack_name="UNDEFINED_DOCKER_LABEL", # NOTE: there is currently no need for this label in the comp backend + memory_limit=ByteSize(23424324), + cpu_limit=1.0, + ) + assert simcore_service_docker_label_keys.cpu_limit == 1.0 diff --git a/packages/models-library/tests/test_emails.py b/packages/models-library/tests/test_emails.py index 6958dc6ad28..f2b431c55d3 100644 --- a/packages/models-library/tests/test_emails.py +++ b/packages/models-library/tests/test_emails.py @@ -1,15 +1,21 @@ import pytest from models_library.emails import LowerCaseEmailStr -from pydantic import BaseModel +from pydantic import BaseModel, ValidationError + + +class Profile(BaseModel): + email: LowerCaseEmailStr @pytest.mark.parametrize( "email_input", ["bla@gmail.com", "BlA@gMaIL.com", "BLA@GMAIL.COM"] ) def test_lowercase_email(email_input: str): - class Profile(BaseModel): - email: LowerCaseEmailStr - data = Profile(email=email_input) assert data.email == "bla@gmail.com" + +@pytest.mark.parametrize("email_input", ["blagmail.com", "BlA@.com", "bLA@", ""]) +def test_malformed_email(email_input: str): + with pytest.raises(ValidationError): + Profile(email=email_input) diff --git a/packages/models-library/tests/test_errors.py b/packages/models-library/tests/test_errors.py index bbdf47a452c..82cf979e463 100644 --- a/packages/models-library/tests/test_errors.py +++ b/packages/models-library/tests/test_errors.py @@ -3,23 +3,20 @@ # pylint: disable=unused-variable -from typing import List - import pytest from models_library.errors import ErrorDict -from pydantic import BaseModel, ValidationError, conint - - -class B(BaseModel): - y: List[int] - - -class A(BaseModel): - x: conint(ge=2) - b: B +from pydantic import BaseModel, Field, ValidationError +from pydantic.version import version_short +from typing_extensions import Annotated def test_pydantic_error_dict(): + class B(BaseModel): + y: list[int] + + class A(BaseModel): + x: Annotated[int, Field(ge=2)] + b: B with pytest.raises(ValidationError) as exc_info: A(x=-1, b={"y": [0, "wrong"]}) @@ -27,7 +24,7 @@ def test_pydantic_error_dict(): assert isinstance(exc_info.value, ValidationError) # demos ValidationError.errors() work - errors: List[ErrorDict] = exc_info.value.errors() + errors: list[ErrorDict] = exc_info.value.errors() assert len(errors) == 2 # checks ErrorDict interface @@ -39,13 +36,15 @@ def _copy(d, exclude): return {k: v for k, v in d.items() if k not in exclude} assert _copy(errors[0], exclude={"msg"}) == { + "ctx": {"ge": 2}, + "input": -1, "loc": ("x",), - # "msg": "ensure this value is...equal to 2", - "type": "value_error.number.not_ge", - "ctx": {"limit_value": 2}, + "type": "greater_than_equal", + "url": f"https://errors.pydantic.dev/{version_short()}/v/greater_than_equal", } assert _copy(errors[1], exclude={"msg"}) == { + "input": "wrong", "loc": ("b", "y", 1), - # "msg": "value is not a valid integer", - "type": "type_error.integer", + "type": "int_parsing", + "url": f"https://errors.pydantic.dev/{version_short()}/v/int_parsing", } diff --git a/packages/models-library/tests/test_function_services_catalog.py b/packages/models-library/tests/test_function_services_catalog.py index 8602767a582..b5f0c21b0bc 100644 --- a/packages/models-library/tests/test_function_services_catalog.py +++ b/packages/models-library/tests/test_function_services_catalog.py @@ -15,15 +15,14 @@ is_function_service, iter_service_docker_data, ) -from models_library.services import ServiceDockerData -from pytest import MonkeyPatch +from models_library.services import ServiceMetaDataPublished @pytest.mark.parametrize( "image_metadata", iter_service_docker_data(), ids=lambda obj: obj.name ) def test_create_frontend_services_metadata(image_metadata): - assert isinstance(image_metadata, ServiceDockerData) + assert isinstance(image_metadata, ServiceMetaDataPublished) assert is_function_service(image_metadata.key) @@ -32,7 +31,7 @@ def test_catalog_frontend_services_registry(): registry = {(s.key, s.version): s for s in iter_service_docker_data()} for s in registry.values(): - print(s.json(exclude_unset=True, indent=1)) + print(s.model_dump_json(exclude_unset=True, indent=1)) # one version per front-end service? versions_per_service = defaultdict(list) @@ -42,7 +41,7 @@ def test_catalog_frontend_services_registry(): assert not any(len(v) > 1 for v in versions_per_service.values()) -def test_catalog_registry(monkeypatch: MonkeyPatch): +def test_catalog_registry(monkeypatch: pytest.MonkeyPatch): assert catalog._functions assert catalog.settings diff --git a/packages/models-library/tests/test_generics.py b/packages/models-library/tests/test_generics.py index cfb908f985a..f94436f1214 100644 --- a/packages/models-library/tests/test_generics.py +++ b/packages/models-library/tests/test_generics.py @@ -1,9 +1,17 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + from pathlib import Path from typing import Any import pytest from faker import Faker from models_library.generics import DictModel, Envelope +from pydantic import BaseModel, ValidationError +from pydantic.version import version_short def test_dict_base_model(): @@ -12,21 +20,21 @@ def test_dict_base_model(): "another key": "a string value", "yet another key": Path("some_path"), } - some_instance = DictModel[str, Any].parse_obj(some_dict) + some_instance = DictModel[str, Any].model_validate(some_dict) assert some_instance # test some typical dict methods assert len(some_instance) == 3 - for k, k2 in zip(some_dict, some_instance): + for k, k2 in zip(some_dict, some_instance, strict=False): assert k == k2 - for k, k2 in zip(some_dict.keys(), some_instance.keys()): + for k, k2 in zip(some_dict.keys(), some_instance.keys(), strict=False): assert k == k2 - for v, v2 in zip(some_dict.values(), some_instance.values()): + for v, v2 in zip(some_dict.values(), some_instance.values(), strict=False): assert v == v2 - for i, i2 in zip(some_dict.items(), some_instance.items()): + for i, i2 in zip(some_dict.items(), some_instance.items(), strict=False): assert i == i2 assert some_instance.get("a key") == 123 @@ -39,20 +47,95 @@ def test_dict_base_model(): assert some_instance["a new key"] == 23 -def test_data_enveloped(faker: Faker): - some_enveloped_string = Envelope[str]() - assert some_enveloped_string - assert not some_enveloped_string.data - assert not some_enveloped_string.error - - random_float = faker.pyfloat() - some_enveloped_float = Envelope[float](data=random_float) - assert some_enveloped_float - assert some_enveloped_float.data == random_float - assert not some_enveloped_float.error - +def test_enveloped_error_str(faker: Faker): random_text = faker.text() some_enveloped_bool = Envelope[bool](error=random_text) assert some_enveloped_bool assert not some_enveloped_bool.data assert some_enveloped_bool.error == random_text + + +@pytest.fixture +def builtin_value(faker: Faker, builtin_type: type) -> Any: + return { + "str": faker.pystr(), + "float": faker.pyfloat(), + "int": faker.pyint(), + "bool": faker.pybool(), + "dict": faker.pydict(), + "tuple": faker.pytuple(), + "set": faker.pyset(), + }[builtin_type.__name__] + + +@pytest.mark.parametrize( + "builtin_type", [str, float, int, bool, tuple, set], ids=lambda x: x.__name__ +) +def test_enveloped_data_builtin(builtin_type: type, builtin_value: Any): + # constructors + envelope = Envelope[builtin_type](data=builtin_value) + + assert envelope == Envelope[builtin_type].from_data(builtin_value) + + # exports + assert envelope.model_dump(exclude_unset=True, exclude_none=True) == { + "data": builtin_value + } + assert envelope.model_dump() == {"data": builtin_value, "error": None} + + +def test_enveloped_data_model(): + class User(BaseModel): + idr: int + name: str = "Jane Doe" + + enveloped = Envelope[User](data={"idr": 3}) + + assert isinstance(enveloped.data, User) + assert enveloped.model_dump(exclude_unset=True, exclude_none=True) == { + "data": {"idr": 3} + } + + +def test_enveloped_data_dict(): + # error + with pytest.raises(ValidationError) as err_info: + Envelope[dict](data="not-a-dict") + + error: ValidationError = err_info.value + assert error.errors() == [ + { + "input": "not-a-dict", + "loc": ("data",), + "msg": "Input should be a valid dictionary", + "type": "dict_type", + "url": f"https://errors.pydantic.dev/{version_short()}/v/dict_type", + } + ] + + # empty dict + enveloped = Envelope[dict](data={}) + assert enveloped.data == {} + assert enveloped.error is None + + +def test_enveloped_data_list(): + # error + with pytest.raises(ValidationError) as err_info: + Envelope[list](data="not-a-list") + + error: ValidationError = err_info.value + assert error.errors() == [ + { + "input": "not-a-list", + "loc": ("data",), + "msg": "Input should be a valid list", + "type": "list_type", + "url": f"https://errors.pydantic.dev/{version_short()}/v/list_type", + } + ] + + # empty list + enveloped = Envelope[list](data=[]) + assert enveloped.data == [] + assert enveloped.error is None diff --git a/packages/models-library/tests/test_licenses.py b/packages/models-library/tests/test_licenses.py new file mode 100644 index 00000000000..67e4bff9c11 --- /dev/null +++ b/packages/models-library/tests/test_licenses.py @@ -0,0 +1,38 @@ +from models_library.api_schemas_webserver.licensed_items import LicensedItemRestGet +from models_library.licenses import LicensedItem +from pydantic import ConfigDict + + +def test_licensed_item_from_domain_model(): + for example in LicensedItem.model_json_schema()["examples"]: + item = LicensedItem.model_validate(example) + + got = LicensedItemRestGet.from_domain_model(item) + + assert item.display_name == got.display_name + + # nullable doi + assert ( + got.licensed_resources[0].source.doi + == item.licensed_resources[0]["source"]["doi"] + ) + + # date is required + assert got.licensed_resources[0].source.features["date"] + + # id is required + assert ( + got.licensed_resources[0].source.id + == item.licensed_resources[0]["source"]["id"] + ) + + # checks unset fields + assert "category_icon" not in got.licensed_resources[0].model_fields_set + + +def test_strict_check_of_examples(): + class TestLicensedItemRestGet(LicensedItemRestGet): + model_config = ConfigDict(extra="forbid") + + for example in LicensedItemRestGet.model_json_schema()["examples"]: + TestLicensedItemRestGet.model_validate(example) diff --git a/packages/models-library/tests/test_osparc_variable_identifier.py b/packages/models-library/tests/test_osparc_variable_identifier.py new file mode 100644 index 00000000000..cb23b19f60a --- /dev/null +++ b/packages/models-library/tests/test_osparc_variable_identifier.py @@ -0,0 +1,167 @@ +# pylint: disable=redefined-outer-name + +from typing import Any + +import pytest +from models_library.osparc_variable_identifier import ( + OsparcVariableIdentifier, + UnresolvedOsparcVariableIdentifierError, + raise_if_unresolved, + raise_if_unresolved_osparc_variable_identifier_found, + replace_osparc_variable_identifier, +) +from pydantic import BaseModel, TypeAdapter, ValidationError + +VALID_IDENTIFIERS: list[str] = [ + "$OSPARC_VARIABLE_One121_", + "$OSPARC_VARIABLE_121Asdasd_", + "$OSPARC_VARIABLE_1212aaS_", + "${OSPARC_VARIABLE_ONE}", + "${OSPARC_VARIABLE_1}", + "${OSPARC_VARIABLE_1:-default_value}", + "${OSPARC_VARIABLE_1:-{}}", + "${OSPARC_VARIABLE_1:-}", + "$$OSPARC_VARIABLE_One121_", + "$$OSPARC_VARIABLE_121Asdasd_", + "$$OSPARC_VARIABLE_1212aaS_", + "$${OSPARC_VARIABLE_ONE}", + "$${OSPARC_VARIABLE_1}", + "$${OSPARC_VARIABLE_1:-default_value}", + "$${OSPARC_VARIABLE_1:-{}}", + "$${OSPARC_VARIABLE_1:-}", +] + +INVALID_IDENTIFIERS: list[str] = [ + "${OSPARC_VARIABLE_1:default_value}", + "${OSPARC_VARIABLE_1:{}}", + "${OSPARC_VARIABLE_1:}", + "${OSPARC_VARIABLE_1-default_value}", + "${OSPARC_VARIABLE_1-{}}", + "${OSPARC_VARIABLE_1-}", +] + + +_OSPARC_VARIABLE_IDENTIFIER_ADAPTER: TypeAdapter[ + OsparcVariableIdentifier +] = TypeAdapter(OsparcVariableIdentifier) + + +@pytest.fixture(params=VALID_IDENTIFIERS) +def osparc_variable_identifier_str(request: pytest.FixtureRequest) -> str: + return request.param + + +@pytest.fixture +def identifier( + osparc_variable_identifier_str: str, +) -> OsparcVariableIdentifier: + return _OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python( + osparc_variable_identifier_str + ) + + +@pytest.mark.parametrize("invalid_var_name", INVALID_IDENTIFIERS) +def test_osparc_variable_identifier_does_not_validate(invalid_var_name: str): + with pytest.raises(ValidationError): + _OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python(invalid_var_name) + + +def test_raise_if_unresolved(identifier: OsparcVariableIdentifier): + def example_func(par: OsparcVariableIdentifier | int) -> None: + _ = 12 + raise_if_unresolved(par) + + example_func(1) + + with pytest.raises(UnresolvedOsparcVariableIdentifierError): + example_func(identifier) + + +class Example(BaseModel): + nested_objects: OsparcVariableIdentifier | str + + +@pytest.mark.parametrize( + "object_template", + [ + _OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python("$OSPARC_VARIABLE_1"), + [_OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python("$OSPARC_VARIABLE_1")], + (_OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python("$OSPARC_VARIABLE_1"),), + {_OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python("$OSPARC_VARIABLE_1")}, + { + "test": _OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python( + "$OSPARC_VARIABLE_1" + ) + }, + Example( + nested_objects=_OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python( + "$OSPARC_VARIABLE_1" + ) + ), + ], +) +def test_raise_if_unresolved_osparc_variable_identifier_found(object_template: Any): + with pytest.raises(UnresolvedOsparcVariableIdentifierError): + raise_if_unresolved_osparc_variable_identifier_found(object_template) + + replaced = replace_osparc_variable_identifier( + object_template, {"OSPARC_VARIABLE_1": "1"} + ) + raise_if_unresolved_osparc_variable_identifier_found(replaced) + assert "OSPARC_VARIABLE_1" not in f"{replaced}" + + +@pytest.mark.parametrize( + "str_identifier, expected_osparc_variable_name, expected_default_value", + list( + zip( + VALID_IDENTIFIERS, + [ + "OSPARC_VARIABLE_One121_", + "OSPARC_VARIABLE_121Asdasd_", + "OSPARC_VARIABLE_1212aaS_", + "OSPARC_VARIABLE_ONE", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_One121_", + "OSPARC_VARIABLE_121Asdasd_", + "OSPARC_VARIABLE_1212aaS_", + "OSPARC_VARIABLE_ONE", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + "OSPARC_VARIABLE_1", + ], + [ + None, + None, + None, + None, + None, + "default_value", + "{}", + "", + None, + None, + None, + None, + None, + "default_value", + "{}", + "", + ], + strict=True, + ) + ), +) +def test_osparc_variable_name_and_default_value( + str_identifier: str, + expected_osparc_variable_name: str, + expected_default_value: str | None, +): + osparc_variable_identifer = _OSPARC_VARIABLE_IDENTIFIER_ADAPTER.validate_python( + str_identifier + ) + assert osparc_variable_identifer.name == expected_osparc_variable_name + assert osparc_variable_identifer.default_value == expected_default_value diff --git a/packages/models-library/tests/test_project_networks.py b/packages/models-library/tests/test_project_networks.py index 85465159c15..a929ac2a0aa 100644 --- a/packages/models-library/tests/test_project_networks.py +++ b/packages/models-library/tests/test_project_networks.py @@ -1,7 +1,4 @@ # pylint: disable=redefined-outer-name -import json -from pprint import pformat -from typing import Any, Dict, Type from uuid import UUID import pytest @@ -9,27 +6,8 @@ DockerNetworkAlias, DockerNetworkName, NetworksWithAliases, - ProjectsNetworks, ) -from pydantic import BaseModel, ValidationError, parse_obj_as - - -@pytest.mark.parametrize( - "model_cls", - ( - ProjectsNetworks, - NetworksWithAliases, - ), -) -def test_service_settings_model_examples( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] -) -> None: - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - - model_instance = model_cls.parse_obj(example) - assert json.loads(model_instance.json()) == example - assert model_instance.json() == json.dumps(example) +from pydantic import TypeAdapter, ValidationError @pytest.mark.parametrize( @@ -40,8 +18,8 @@ def test_service_settings_model_examples( {"shr-ntwrk_5c743ad2-8fdb-11ec-bb3a-02420a000008_default": {}}, ], ) -def test_networks_with_aliases_ok(valid_example: Dict) -> None: - assert NetworksWithAliases.parse_obj(valid_example) +def test_networks_with_aliases_ok(valid_example: dict) -> None: + assert NetworksWithAliases.model_validate(valid_example) @pytest.mark.parametrize( @@ -59,28 +37,28 @@ def test_networks_with_aliases_ok(valid_example: Dict) -> None: {"i_am_ok": {"5057e2c1-d392-4d31-b5c8-19f3db780390": "1_I_AM_INVALID"}}, ], ) -def test_networks_with_aliases_fail(invalid_example: Dict) -> None: +def test_networks_with_aliases_fail(invalid_example: dict) -> None: with pytest.raises(ValidationError): - assert NetworksWithAliases.parse_obj(invalid_example) + assert NetworksWithAliases.model_validate(invalid_example) @pytest.mark.parametrize("network_name", ["a", "ok", "a_", "A_", "a1", "a-"]) def test_projects_networks_validation(network_name: str) -> None: - assert parse_obj_as(DockerNetworkName, network_name) == network_name - assert parse_obj_as(DockerNetworkAlias, network_name) == network_name + assert TypeAdapter(DockerNetworkName).validate_python(network_name) == network_name + assert TypeAdapter(DockerNetworkAlias).validate_python(network_name) == network_name @pytest.mark.parametrize("network_name", ["", "1", "-", "_"]) def test_projects_networks_validation_fails(network_name: str) -> None: with pytest.raises(ValidationError): - parse_obj_as(DockerNetworkName, network_name) + TypeAdapter(DockerNetworkName).validate_python(network_name) with pytest.raises(ValidationError): - parse_obj_as(DockerNetworkAlias, network_name) + TypeAdapter(DockerNetworkAlias).validate_python(network_name) def test_class_constructors_fail() -> None: with pytest.raises(ValidationError): - NetworksWithAliases.parse_obj( + NetworksWithAliases.model_validate( { "ok-netowrk_naeme": { UUID( diff --git a/packages/models-library/tests/test_project_nodes.py b/packages/models-library/tests/test_project_nodes.py index bbdb2079dee..54a5d14bf24 100644 --- a/packages/models-library/tests/test_project_nodes.py +++ b/packages/models-library/tests/test_project_nodes.py @@ -1,8 +1,9 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument +# pylint:disable=no-member # pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +# pylint:disable=unused-variable -from typing import Any, Dict +from typing import Any import pytest from models_library.projects_nodes import Node @@ -10,20 +11,21 @@ @pytest.fixture() -def minimal_node_data_sample() -> Dict[str, Any]: - return dict( - key="simcore/services/dynamic/3dviewer", - version="1.3.0-alpha", - label="3D viewer human message", - ) +def minimal_node_data_sample() -> dict[str, Any]: + return { + "key": "simcore/services/dynamic/3dviewer", + "version": "1.3.0-alpha", + "label": "3D viewer human message", + } -def test_create_minimal_node(minimal_node_data_sample: Dict[str, Any]): +def test_create_minimal_node(minimal_node_data_sample: dict[str, Any]): node = Node(**minimal_node_data_sample) # a nice way to see how the simplest node looks like assert node.inputs == {} assert node.outputs == {} + assert node.state is not None assert node.state.current_status == RunningState.NOT_STARTED assert node.state.modified is True assert node.state.dependencies == set() @@ -31,11 +33,11 @@ def test_create_minimal_node(minimal_node_data_sample: Dict[str, Any]): assert node.parent is None assert node.progress is None - assert node.dict(exclude_unset=True) == minimal_node_data_sample + assert node.model_dump(exclude_unset=True) == minimal_node_data_sample def test_create_minimal_node_with_new_data_type( - minimal_node_data_sample: Dict[str, Any] + minimal_node_data_sample: dict[str, Any] ): old_node_data = minimal_node_data_sample # found some old data with this aspect @@ -57,7 +59,7 @@ def test_create_minimal_node_with_new_data_type( assert node.state.dependencies == set() -def test_backwards_compatibility_node_data(minimal_node_data_sample: Dict[str, Any]): +def test_backwards_compatibility_node_data(minimal_node_data_sample: dict[str, Any]): old_node_data = minimal_node_data_sample # found some old data with this aspect old_node_data.update({"thumbnail": "", "state": "FAILURE"}) @@ -69,4 +71,4 @@ def test_backwards_compatibility_node_data(minimal_node_data_sample: Dict[str, A assert node.state.modified is True assert node.state.dependencies == set() - assert node.dict(exclude_unset=True) != old_node_data + assert node.model_dump(exclude_unset=True) != old_node_data diff --git a/packages/models-library/tests/test_project_nodes_io.py b/packages/models-library/tests/test_project_nodes_io.py index 0fbe03f2980..056f5cc7b46 100644 --- a/packages/models-library/tests/test_project_nodes_io.py +++ b/packages/models-library/tests/test_project_nodes_io.py @@ -2,33 +2,40 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -from pprint import pformat -from typing import Any, Dict -from uuid import uuid4 +from typing import Any, Final +from uuid import UUID import pytest +from faker import Faker from models_library.projects_nodes import Node, PortLink -from models_library.projects_nodes_io import DatCoreFileLink, SimCoreFileLink +from models_library.projects_nodes_io import ( + DatCoreFileLink, + SimCoreFileLink, + SimcoreS3DirectoryID, + SimcoreS3FileID, +) +from models_library.users import UserID +from pydantic import TypeAdapter, ValidationError + +UUID_0: Final[str] = f"{UUID(int=0)}" +USER_ID_0: Final[UserID] = 0 @pytest.fixture() -def minimal_simcore_file_link() -> Dict[str, Any]: - return dict( - store=0, - path=f"{uuid4()}/{uuid4()}/file.ext", - ) +def minimal_simcore_file_link(faker: Faker) -> dict[str, Any]: + return {"store": 0, "path": f"{faker.uuid4()}/{faker.uuid4()}/file.ext"} -def test_simcore_file_link_default_label(minimal_simcore_file_link: Dict[str, Any]): +def test_simcore_file_link_default_label(minimal_simcore_file_link: dict[str, Any]): simcore_file_link = SimCoreFileLink(**minimal_simcore_file_link) assert simcore_file_link.store == minimal_simcore_file_link["store"] assert simcore_file_link.path == minimal_simcore_file_link["path"] assert simcore_file_link.label == "file.ext" - assert simcore_file_link.e_tag == None + assert simcore_file_link.e_tag is None -def test_simcore_file_link_with_label(minimal_simcore_file_link: Dict[str, Any]): +def test_simcore_file_link_with_label(minimal_simcore_file_link: dict[str, Any]): old_link = minimal_simcore_file_link old_link.update({"label": "some new label that is amazing"}) simcore_file_link = SimCoreFileLink(**old_link) @@ -36,18 +43,7 @@ def test_simcore_file_link_with_label(minimal_simcore_file_link: Dict[str, Any]) assert simcore_file_link.store == minimal_simcore_file_link["store"] assert simcore_file_link.path == minimal_simcore_file_link["path"] assert simcore_file_link.label == "some new label that is amazing" - assert simcore_file_link.e_tag == None - - -@pytest.mark.parametrize("model_cls", (SimCoreFileLink, DatCoreFileLink)) -def test_project_nodes_io_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - - model_instance = model_cls(**example) - - assert model_instance, f"Failed with {name}" - print(name, ":", model_instance) + assert simcore_file_link.e_tag is None def test_store_discriminator(): @@ -106,9 +102,15 @@ def test_store_discriminator(): }, } - datacore_node = Node.parse_obj(workbench["89f95b67-a2a3-4215-a794-2356684deb61"]) - rawgraph_node = Node.parse_obj(workbench["88119776-e869-4df2-a529-4aae9d9fa35c"]) - simcore_node = Node.parse_obj(workbench["75c1707c-ec1c-49ac-a7bf-af6af9088f38"]) + datacore_node = Node.model_validate( + workbench["89f95b67-a2a3-4215-a794-2356684deb61"] + ) + rawgraph_node = Node.model_validate( + workbench["88119776-e869-4df2-a529-4aae9d9fa35c"] + ) + simcore_node = Node.model_validate( + workbench["75c1707c-ec1c-49ac-a7bf-af6af9088f38"] + ) # must cast to the right subclass within project_nodes.py's InputTypes and OutputTypes unions assert datacore_node.outputs @@ -117,3 +119,82 @@ def test_store_discriminator(): assert isinstance(simcore_node.outputs["outFile"], SimCoreFileLink) assert rawgraph_node.inputs assert isinstance(rawgraph_node.inputs["input_1"], PortLink) + + +def test_simcore_s3_directory_id(): + # the only allowed path is the following + result = TypeAdapter(SimcoreS3DirectoryID).validate_python( + f"{UUID_0}/{UUID_0}/ok-simcore-dir/" + ) + assert result == f"{UUID_0}/{UUID_0}/ok-simcore-dir/" + + # re-parsing must work the same thing works + assert TypeAdapter(SimcoreS3DirectoryID).validate_python(result) + + # all below are not allowed + for invalid_path in ( + f"{UUID_0}/{UUID_0}/a-file", + f"{UUID_0}/{UUID_0}/a-dir/a-file", + ): + with pytest.raises(ValidationError): + TypeAdapter(SimcoreS3DirectoryID).validate_python(invalid_path) + + with pytest.raises(ValidationError, match="Not allowed subdirectory found in"): + TypeAdapter(SimcoreS3DirectoryID).validate_python( + f"{UUID_0}/{UUID_0}/a-dir/a-subdir/" + ) + + +@pytest.mark.parametrize( + "s3_object, expected", + [ + ( + f"{UUID_0}/{UUID_0}/just-a-dir/", + f"{UUID_0}/{UUID_0}/just-a-dir/", + ), + ( + f"{UUID_0}/{UUID_0}/a-dir/a-file", + f"{UUID_0}/{UUID_0}/a-dir/", + ), + ( + f"{UUID_0}/{UUID_0}/a-dir/another-dir/a-file", + f"{UUID_0}/{UUID_0}/a-dir/", + ), + ( + f"{UUID_0}/{UUID_0}/a-dir/a/b/c/d/e/f/g/h/file.py", + f"{UUID_0}/{UUID_0}/a-dir/", + ), + ], +) +def test_simcore_s3_directory_id_from_simcore_s3_file_id(s3_object: str, expected: str): + result = SimcoreS3DirectoryID.from_simcore_s3_object(s3_object) + assert f"{result}" == expected + + +def test_simcore_s3_directory_get_parent(): + # pylint: disable=protected-access + + with pytest.raises(ValueError, match="does not have enough parents, expected 4"): + SimcoreS3DirectoryID._get_parent("hello/object", parent_index=4) # noqa SLF001 + + with pytest.raises(ValueError, match="does not have enough parents, expected 4"): + SimcoreS3DirectoryID._get_parent("hello/object/", parent_index=4) # noqa SLF001 + with pytest.raises(ValueError, match="does not have enough parents, expected 4"): + SimcoreS3DirectoryID._get_parent( # noqa SLF001 + "/hello/object/", parent_index=4 + ) + + +@pytest.mark.parametrize( + "object_key", + [ + f"api/{UUID_0}/some-random-file.png", + f"exports/{USER_ID_0}/{UUID_0}.zip", + f"{UUID_0}/{UUID_0}/some-random-file.png", + f"api/{UUID_0}/some-path/some-random-file.png", + f"{UUID_0}/{UUID_0}/some-path/some-random-file.png", + ], +) +def test_simcore_s3_file_id_accepted_patterns(object_key: str): + file_id = TypeAdapter(SimcoreS3FileID).validate_python(object_key) + assert f"{file_id}" == object_key diff --git a/packages/models-library/tests/test_projects.py b/packages/models-library/tests/test_projects.py index a8384257090..86514df2da2 100644 --- a/packages/models-library/tests/test_projects.py +++ b/packages/models-library/tests/test_projects.py @@ -3,15 +3,17 @@ # pylint:disable=redefined-outer-name from copy import deepcopy -from typing import Any, Dict +from typing import Any import pytest from faker import Faker +from models_library.api_schemas_webserver.projects import ProjectPatch +from models_library.basic_types import _LONG_TRUNCATED_STR_MAX_LENGTH from models_library.projects import Project @pytest.fixture() -def minimal_project(faker: Faker) -> Dict[str, Any]: +def minimal_project(faker: Faker) -> dict[str, Any]: # API request body payload return { "uuid": faker.uuid4(), @@ -23,30 +25,37 @@ def minimal_project(faker: Faker) -> Dict[str, Any]: "creationDate": "2019-05-24T10:36:57.813Z", "lastChangeDate": "2019-05-24T10:36:57.813Z", "workbench": {}, + "type": "STANDARD", + "templateType": None, } -def test_project_minimal_model(minimal_project: Dict[str, Any]): - project = Project.parse_obj(minimal_project) +def test_project_minimal_model(minimal_project: dict[str, Any]): + project = Project.model_validate(minimal_project) assert project - assert project.thumbnail == None + assert project.thumbnail is None -def test_project_with_thumbnail_as_empty_string(minimal_project: Dict[str, Any]): +def test_project_with_thumbnail_as_empty_string(minimal_project: dict[str, Any]): thumbnail_empty_string = deepcopy(minimal_project) thumbnail_empty_string.update({"thumbnail": ""}) - project = Project.parse_obj(thumbnail_empty_string) + project = Project.model_validate(thumbnail_empty_string) assert project - assert project.thumbnail == None + assert project.thumbnail is None -def test_project_type_in_models_package_same_as_in_postgres_database_package(): - from models_library.projects import ProjectType as ml_project_type - from simcore_postgres_database.models.projects import ProjectType as pg_project_type +def test_project_patch_truncates_description(): + # NOTE: checks https://github.com/ITISFoundation/osparc-simcore/issues/5988 + len_truncated = _LONG_TRUNCATED_STR_MAX_LENGTH - # pylint: disable=no-member - assert ( - ml_project_type.__members__.keys() == pg_project_type.__members__.keys() - ), f"The enum in models_library package and postgres package shall have the same values. models_pck: {ml_project_type.__members__}, postgres_pck: {pg_project_type.__members__}" + long_description = "X" * (len_truncated + 10) + assert len(long_description) > len_truncated + + update = ProjectPatch(description=long_description) + assert len(update.description) == len_truncated + + short_description = "X" + update = ProjectPatch(description=short_description) + assert len(update.description) == len(short_description) diff --git a/packages/models-library/tests/test_projects_nodes_ui.py b/packages/models-library/tests/test_projects_nodes_ui.py new file mode 100644 index 00000000000..25ce45b446d --- /dev/null +++ b/packages/models-library/tests/test_projects_nodes_ui.py @@ -0,0 +1,11 @@ +import pytest +from models_library.api_schemas_webserver.projects_nodes_ui import MarkerUI +from pydantic_extra_types.color import Color + + +@pytest.mark.parametrize( + "color_str,expected_color_str", [("#b7e28d", "#b7e28d"), ("Cyan", "#0ff")] +) +def test_marker_color_serialized_to_hex(color_str, expected_color_str): + m = MarkerUI(color=Color(color_str)) + assert m.model_dump_json() == f'{{"color":"{expected_color_str}"}}' diff --git a/packages/models-library/tests/test_projects_pipeline.py b/packages/models-library/tests/test_projects_pipeline.py deleted file mode 100644 index 0cbf054eaca..00000000000 --- a/packages/models-library/tests/test_projects_pipeline.py +++ /dev/null @@ -1,23 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -from pprint import pformat -from typing import Dict, Type - -import pytest -from models_library.projects_pipeline import ComputationTask -from pydantic import BaseModel - - -@pytest.mark.parametrize( - "model_cls", - (ComputationTask,), -) -def test_computation_task_model_examples( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict] -): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" diff --git a/packages/models-library/tests/test_projects_state.py b/packages/models-library/tests/test_projects_state.py index d4b73b689a1..08493f9f3b1 100644 --- a/packages/models-library/tests/test_projects_state.py +++ b/packages/models-library/tests/test_projects_state.py @@ -1,24 +1,15 @@ -from pprint import pformat - import pytest from models_library.projects_state import ProjectLocked, ProjectStatus -@pytest.mark.parametrize( - "model_cls", - (ProjectLocked,), -) -def test_projects_state_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - def test_project_locked_with_missing_owner_raises(): with pytest.raises(ValueError): - ProjectLocked(**{"value": True, "status": ProjectStatus.OPENED}) - ProjectLocked.parse_obj({"value": False, "status": ProjectStatus.OPENED}) + ProjectLocked(value=True, status=ProjectStatus.OPENED) + ProjectLocked.model_validate({"value": False, "status": ProjectStatus.OPENED}) + + +def test_project_locked_with_missing_owner_ok_during_maintaining(): + ProjectLocked.model_validate({"value": True, "status": ProjectStatus.MAINTAINING}) @pytest.mark.parametrize( @@ -32,4 +23,4 @@ def test_project_locked_with_missing_owner_raises(): ) def test_project_locked_with_allowed_values(lock: bool, status: ProjectStatus): with pytest.raises(ValueError): - ProjectLocked.parse_obj({"value": lock, "status": status}) + ProjectLocked.model_validate({"value": lock, "status": status}) diff --git a/packages/models-library/tests/test_projects_ui.py b/packages/models-library/tests/test_projects_ui.py new file mode 100644 index 00000000000..99ede58e231 --- /dev/null +++ b/packages/models-library/tests/test_projects_ui.py @@ -0,0 +1,14 @@ +import pytest +from models_library.api_schemas_webserver.projects_ui import AnnotationUI +from pydantic_extra_types.color import Color + + +@pytest.mark.parametrize( + "color_str,expected_color_str", [("#b7e28d", "#b7e28d"), ("Cyan", "#0ff")] +) +def test_annotation_color_serialized_to_hex(color_str, expected_color_str): + m = AnnotationUI(type="text", color=Color(color_str), attributes={}) + assert ( + m.model_dump_json() + == f'{{"type":"text","color":"{expected_color_str}","attributes":{{}}}}' + ) diff --git a/packages/models-library/tests/test_rabbit_messages.py b/packages/models-library/tests/test_rabbit_messages.py index 7819568b9f1..a07f9deccaf 100644 --- a/packages/models-library/tests/test_rabbit_messages.py +++ b/packages/models-library/tests/test_rabbit_messages.py @@ -1,13 +1,12 @@ -from typing import Union - import pytest from faker import Faker +from models_library.progress_bar import ProgressReport from models_library.rabbitmq_messages import ( ProgressRabbitMessageNode, ProgressRabbitMessageProject, ProgressType, ) -from pydantic import parse_raw_as +from pydantic import TypeAdapter faker = Faker() @@ -17,34 +16,29 @@ [ pytest.param( ProgressRabbitMessageNode( - **{ - "project_id": faker.uuid4(cast_to=None), - "user_id": faker.uuid4(cast_to=None), - "node_id": faker.uuid4(cast_to=None), - "progress_type": ProgressType.SERVICE_OUTPUTS_PULLING, - "progress": 0.4, - } - ).json(), + project_id=faker.uuid4(cast_to=None), + user_id=faker.pyint(min_value=1), + node_id=faker.uuid4(cast_to=None), + progress_type=ProgressType.SERVICE_OUTPUTS_PULLING, + report=ProgressReport(actual_value=0.4, total=1), + ).model_dump_json(), ProgressRabbitMessageNode, id="node_progress", ), pytest.param( ProgressRabbitMessageProject( - **{ - "project_id": faker.uuid4(cast_to=None), - "user_id": faker.uuid4(cast_to=None), - "progress_type": ProgressType.PROJECT_CLOSING, - "progress": 0.4, - } - ).json(), + project_id=faker.uuid4(cast_to=None), + user_id=faker.pyint(min_value=1), + progress_type=ProgressType.PROJECT_CLOSING, + report=ProgressReport(actual_value=0.4, total=1), + ).model_dump_json(), ProgressRabbitMessageProject, id="project_progress", ), ], ) async def test_raw_message_parsing(raw_data: str, class_type: type): - result = parse_raw_as( - Union[ProgressRabbitMessageNode, ProgressRabbitMessageProject], - raw_data, - ) - assert type(result) == class_type + result = TypeAdapter( + ProgressRabbitMessageNode | ProgressRabbitMessageProject + ).validate_json(raw_data) + assert type(result) is class_type diff --git a/packages/models-library/tests/test_resource_tracker.py b/packages/models-library/tests/test_resource_tracker.py new file mode 100644 index 00000000000..5871c9b8f73 --- /dev/null +++ b/packages/models-library/tests/test_resource_tracker.py @@ -0,0 +1,19 @@ +import pytest +from models_library.resource_tracker import HardwareInfo +from pydantic import ValidationError + + +@pytest.mark.parametrize( + "aws_ec2_instances, raises_error", + [ + (["1", "2"], True), + (["1"], False), + ([], False), + ], +) +def test_hardware_info_warning(aws_ec2_instances: list[str], raises_error: bool): + if raises_error: + with pytest.raises(ValidationError, match="Only 1 entry is supported"): + HardwareInfo(aws_ec2_instances=aws_ec2_instances) + else: + HardwareInfo(aws_ec2_instances=aws_ec2_instances) diff --git a/packages/models-library/tests/test_rest_filters.py b/packages/models-library/tests/test_rest_filters.py new file mode 100644 index 00000000000..1b470fc1767 --- /dev/null +++ b/packages/models-library/tests/test_rest_filters.py @@ -0,0 +1,63 @@ +import logging + +import pytest +from models_library.rest_filters import Filters, FiltersQueryParameters +from pydantic import ConfigDict, ValidationError + + +# 1. create filter model +class CustomFilter(Filters): + is_trashed: bool | None = None + is_hidden: bool | None = None + + +class CustomFilterStrict(CustomFilter): + model_config = ConfigDict(extra="forbid") + + +def test_custom_filter_query_parameters(): + + # 2. use generic as query parameters + logging.info( + "json schema is for the query \n %s", + FiltersQueryParameters[CustomFilter].model_json_schema(), + ) + + # lets filter only is_trashed and unset is_hidden + custom_filter = CustomFilter(is_trashed=True) + assert custom_filter.model_dump_json() == '{"is_trashed":true,"is_hidden":null}' + + # default to None (optional) + query_param = FiltersQueryParameters[CustomFilter]() + assert query_param.filters is None + + +@pytest.mark.parametrize( + "url_query_value,expects", + [ + ('{"is_trashed": true, "is_hidden": null}', CustomFilter(is_trashed=True)), + ('{"is_trashed": true}', CustomFilter(is_trashed=True)), + (None, None), + ], +) +def test_valid_filter_queries( + url_query_value: str | None, expects: CustomFilter | None +): + query_param = FiltersQueryParameters[CustomFilter](filters=url_query_value) + assert query_param.filters == expects + + +def test_invalid_filter_query_is_ignored(): + # NOTE: invalid filter get ignored! + url_query_value = '{"undefined_filter": true, "is_hidden": true}' + + query_param = FiltersQueryParameters[CustomFilter](filters=url_query_value) + assert query_param.filters == CustomFilter(is_hidden=True) + + +def test_invalid_filter_query_fails(): + # with pydantic1 this used to not pass but now passes + url_query_value = '{"undefined_filter": true, "is_hidden": true}' + + with pytest.raises(ValidationError): + FiltersQueryParameters[CustomFilterStrict](filters=url_query_value) diff --git a/packages/models-library/tests/test_rest_ordering.py b/packages/models-library/tests/test_rest_ordering.py new file mode 100644 index 00000000000..f7b9673c059 --- /dev/null +++ b/packages/models-library/tests/test_rest_ordering.py @@ -0,0 +1,239 @@ +import pickle + +import pytest +from common_library.json_serialization import json_dumps +from models_library.basic_types import IDStr +from models_library.rest_ordering import ( + OrderBy, + OrderDirection, + create_ordering_query_model_class, +) +from pydantic import ( + BaseModel, + ConfigDict, + Field, + Json, + TypeAdapter, + ValidationError, + field_validator, +) + + +class ReferenceOrderQueryParamsClass(BaseModel): + # NOTE: this class is a copy of `FolderListSortParams` from + # services/web/server/src/simcore_service_webserver/folders/_models.py + # and used as a reference in these tests to ensure the same functionality + + # pylint: disable=unsubscriptable-object + order_by: Json[OrderBy] = Field( + default=OrderBy(field=IDStr("modified_at"), direction=OrderDirection.DESC), + description="Order by field (modified_at|name|description) and direction (asc|desc). The default sorting order is ascending.", + json_schema_extra={"examples": ['{"field": "name", "direction": "desc"}']}, + ) + + @field_validator("order_by", check_fields=False) + @classmethod + def _validate_order_by_field(cls, v): + if v.field not in { + "modified_at", + "name", + "description", + }: + msg = f"We do not support ordering by provided field {v.field}" + raise ValueError(msg) + if v.field == "modified_at": + v.field = "modified_column" + return v + + model_config = ConfigDict( + extra="forbid", + ) + + +@pytest.mark.xfail( + reason="create_ordering_query_model_class.._OrderBy is still not pickable" +) +def test_pickle_ordering_query_model_class(): + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"name", "description"}, + default=OrderBy(field=IDStr("name"), direction=OrderDirection.DESC), + ) + + data = {"order_by": {"field": "name", "direction": "asc"}} + query_model = OrderQueryParamsModel.model_validate(data) + + # https://docs.pydantic.dev/latest/concepts/serialization/#pickledumpsmodel + expected = query_model.order_by + + # see https://github.com/ITISFoundation/osparc-simcore/pull/6828 + # FAILURE: raises `AttributeError: Can't pickle local object 'create_ordering_query_model_class.._OrderBy'` + data = pickle.dumps(expected) + + loaded = pickle.loads(data) + assert loaded == expected + + +def test_conversion_order_by_from_query_to_domain_model(): + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified_at", "name", "description"}, + default=OrderBy(field=IDStr("modified_at"), direction=OrderDirection.DESC), + ) + + # normal + data = {"order_by": {"field": "modified_at", "direction": "asc"}} + query_model = OrderQueryParamsModel.model_validate(data) + + expected_data = data["order_by"] + + assert type(query_model.order_by) is not OrderBy + assert isinstance(query_model.order_by, OrderBy) + + # NOTE: This does NOT convert to OrderBy but has correct data + order_by = TypeAdapter(OrderBy).validate_python( + query_model.order_by, from_attributes=True + ) + assert type(order_by) is not OrderBy + assert order_by.model_dump(mode="json") == expected_data + + order_by = OrderBy.model_validate(query_model.order_by.model_dump()) + assert type(order_by) is OrderBy + assert order_by.model_dump(mode="json") == expected_data + + # NOTE: This does NOT convert to OrderBy but has correct data + order_by = OrderBy.model_validate(query_model.order_by, from_attributes=True) + assert type(order_by) is not OrderBy + assert order_by.model_dump(mode="json") == expected_data + + order_by = OrderBy(**query_model.order_by.model_dump()) + assert type(order_by) is OrderBy + assert order_by.model_dump(mode="json") == expected_data + + # we should use this !!! + order_by = OrderBy.model_construct(**query_model.order_by.model_dump()) + assert type(order_by) is OrderBy + assert order_by.model_dump(mode="json") == expected_data + + +def test_ordering_query_model_class_factory(): + BaseOrderingQueryModel = create_ordering_query_model_class( + ordering_fields={"modified_at", "name", "description"}, + default=OrderBy(field=IDStr("modified_at"), direction=OrderDirection.DESC), + ordering_fields_api_to_column_map={"modified_at": "modified_column"}, + ) + + # inherits to add extra post-validator + class OrderQueryParamsModel(BaseOrderingQueryModel): + ... + + # normal + data = {"order_by": {"field": "modified_at", "direction": "asc"}} + model = OrderQueryParamsModel.model_validate(data) + + assert model.order_by + assert model.order_by.model_dump() == { + "field": "modified_column", + "direction": "asc", + } + + # test against reference + expected = ReferenceOrderQueryParamsClass.model_validate( + {"order_by": json_dumps({"field": "modified_at", "direction": "asc"})} + ) + assert expected.model_dump() == model.model_dump() + + +def test_ordering_query_model_class__fails_with_invalid_fields(): + + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified", "name", "description"}, + default=OrderBy(field=IDStr("modified"), direction=OrderDirection.DESC), + ) + + # fails with invalid field to sort + with pytest.raises(ValidationError) as err_info: + OrderQueryParamsModel.model_validate({"order_by": {"field": "INVALID"}}) + + error = err_info.value.errors()[0] + + assert error["type"] == "value_error" + assert "INVALID" in error["msg"] + assert error["loc"] == ("order_by", "field") + + +def test_ordering_query_model_class__fails_with_invalid_direction(): + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified", "name", "description"}, + default=OrderBy(field=IDStr("modified"), direction=OrderDirection.DESC), + ) + + with pytest.raises(ValidationError) as err_info: + OrderQueryParamsModel.model_validate( + {"order_by": {"field": "modified", "direction": "INVALID"}} + ) + + error = err_info.value.errors()[0] + + assert error["type"] == "enum" + assert error["loc"] == ("order_by", "direction") + + +def test_ordering_query_model_class__defaults(): + + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified", "name", "description"}, + default=OrderBy(field=IDStr("modified"), direction=OrderDirection.DESC), + ordering_fields_api_to_column_map={"modified": "modified_at"}, + ) + + # checks all defaults + model = OrderQueryParamsModel() + assert model.order_by is not None + assert ( + model.order_by.field == "modified_at" # pylint: disable=no-member + ) # NOTE that this was mapped! + assert model.order_by.direction is OrderDirection.DESC # pylint: disable=no-member + + # partial defaults + model = OrderQueryParamsModel.model_validate({"order_by": {"field": "name"}}) + assert model.order_by + assert model.order_by.field == "name" + assert model.order_by.direction == OrderBy.model_fields.get("direction").default + + # direction alone is invalid + with pytest.raises(ValidationError) as err_info: + OrderQueryParamsModel.model_validate({"order_by": {"direction": "asc"}}) + + error = err_info.value.errors()[0] + assert error["loc"] == ("order_by", "field") + assert error["type"] == "missing" + + +def test_ordering_query_model_with_map(): + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified", "name", "description"}, + default=OrderBy(field=IDStr("modified"), direction=OrderDirection.DESC), + ordering_fields_api_to_column_map={"modified": "some_db_column_name"}, + ) + + model = OrderQueryParamsModel.model_validate({"order_by": {"field": "modified"}}) + assert model.order_by + assert model.order_by.field == "some_db_column_name" + + +def test_ordering_query_parse_json_pre_validator(): + + OrderQueryParamsModel = create_ordering_query_model_class( + ordering_fields={"modified", "name"}, + default=OrderBy(field=IDStr("modified"), direction=OrderDirection.DESC), + ) + + bad_json_value = ",invalid json" + with pytest.raises(ValidationError) as err_info: + OrderQueryParamsModel.model_validate({"order_by": bad_json_value}) + + exc = err_info.value + assert exc.error_count() == 1 + error = exc.errors()[0] + assert error["loc"] == ("order_by",) + assert error["type"] == "value_error" + assert error["input"] == bad_json_value diff --git a/packages/models-library/tests/test_rest_pagination.py b/packages/models-library/tests/test_rest_pagination.py index a9da9db2f1b..85669e4a6de 100644 --- a/packages/models-library/tests/test_rest_pagination.py +++ b/packages/models-library/tests/test_rest_pagination.py @@ -2,12 +2,21 @@ import pytest from models_library.rest_pagination import Page, PageMetaInfoLimitOffset -from pydantic.main import BaseModel +from pydantic import BaseModel, ValidationError +from pytest_simcore.examples.models_library import PAGE_EXAMPLES -@pytest.mark.parametrize("cls_model", [Page[str], PageMetaInfoLimitOffset]) -def test_page_response_limit_offset_models(cls_model: BaseModel): - examples = cls_model.Config.schema_extra["examples"] +@pytest.mark.parametrize( + "cls_model, examples", + [ + (Page[str], PAGE_EXAMPLES), + ( + PageMetaInfoLimitOffset, + PageMetaInfoLimitOffset.model_config["json_schema_extra"]["examples"], + ), + ], +) +def test_page_response_limit_offset_models(cls_model: BaseModel, examples: list[dict]): for index, example in enumerate(examples): print(f"{index:-^10}:\n", example) @@ -17,7 +26,7 @@ def test_page_response_limit_offset_models(cls_model: BaseModel): def test_invalid_offset(): - with pytest.raises(ValueError): + with pytest.raises(ValidationError): PageMetaInfoLimitOffset(limit=6, total=5, offset=5, count=2) @@ -30,19 +39,19 @@ def test_invalid_offset(): ], ) def test_invalid_count(count: int, offset: int): - with pytest.raises(ValueError): + with pytest.raises(ValidationError): PageMetaInfoLimitOffset(limit=6, total=5, offset=offset, count=count) def test_data_size_does_not_fit_count(): - example = deepcopy(Page[str].Config.schema_extra["examples"][0]) + example = deepcopy(PAGE_EXAMPLES[0]) example["_meta"]["count"] = len(example["data"]) - 1 - with pytest.raises(ValueError): + with pytest.raises(ValidationError): Page[str](**example) def test_empty_data_is_converted_to_list(): - example = deepcopy(Page[str].Config.schema_extra["examples"][0]) + example = deepcopy(PAGE_EXAMPLES[0]) example["data"] = None example["_meta"]["count"] = 0 model_instance = Page[str](**example) diff --git a/packages/models-library/tests/test_rest_pagination_utils.py b/packages/models-library/tests/test_rest_pagination_utils.py index f9887a1bf71..acaf6bc9d5c 100644 --- a/packages/models-library/tests/test_rest_pagination_utils.py +++ b/packages/models-library/tests/test_rest_pagination_utils.py @@ -41,7 +41,7 @@ def test_paginating_data(base_url): ) assert data_obj - model_instance = Page[int].parse_obj(data_obj) + model_instance = Page[int].model_validate(data_obj) assert model_instance assert model_instance.meta == PageMetaInfoLimitOffset( total=total_number_of_items, count=len(data_chunk), limit=limit, offset=offset @@ -75,7 +75,7 @@ def test_paginating_data(base_url): offset += len(data_chunk) assert model_instance.links.next is not None - data_obj: PageDict = paginate_data( + data_obj: PageDict = paginate_data( # type: ignore[no-redef] data_chunk, request_url=URL(model_instance.links.next), total=total_number_of_items, @@ -83,7 +83,7 @@ def test_paginating_data(base_url): offset=offset, ) - model_instance = Page[int].parse_obj(data_obj) + model_instance = Page[int].model_validate(data_obj) assert model_instance assert model_instance.meta == PageMetaInfoLimitOffset( total=total_number_of_items, @@ -127,7 +127,7 @@ def test_paginating_data(base_url): assert offset == last_chunk_offset assert model_instance.links.next is not None - data_obj: PageDict = paginate_data( + data_obj: PageDict = paginate_data( # type: ignore[no-redef] data_chunk, request_url=URL(model_instance.links.next), total=total_number_of_items, @@ -136,7 +136,7 @@ def test_paginating_data(base_url): ) assert data_obj - model_instance = Page[int].parse_obj(data_obj) + model_instance = Page[int].model_validate(data_obj) assert model_instance assert model_instance.meta == PageMetaInfoLimitOffset( diff --git a/packages/models-library/tests/test_rpc_pagination.py b/packages/models-library/tests/test_rpc_pagination.py new file mode 100644 index 00000000000..b8f78c737e5 --- /dev/null +++ b/packages/models-library/tests/test_rpc_pagination.py @@ -0,0 +1,18 @@ +from typing import Any + +import pytest +from models_library.rpc_pagination import PageRpc +from pytest_simcore.examples.models_library import RPC_PAGE_EXAMPLES + + +@pytest.mark.parametrize("example", RPC_PAGE_EXAMPLES) +def test_create_page_rpc(example: dict[str, Any]): + + expected = PageRpc.model_validate(example) + + assert PageRpc[str].create( + expected.data, + total=expected.meta.total, + limit=expected.meta.limit, + offset=expected.meta.offset, + ) diff --git a/packages/models-library/tests/test_service_resources.py b/packages/models-library/tests/test_service_resources.py index 34300d6b277..2bc0ccf7483 100644 --- a/packages/models-library/tests/test_service_resources.py +++ b/packages/models-library/tests/test_service_resources.py @@ -13,7 +13,7 @@ ServiceResourcesDict, ServiceResourcesDictHelpers, ) -from pydantic import parse_obj_as +from pydantic import TypeAdapter @pytest.mark.parametrize( @@ -27,19 +27,19 @@ ), ) def test_compose_image(example: str) -> None: - parse_obj_as(DockerGenericTag, example) + TypeAdapter(DockerGenericTag).validate_python(example) @pytest.fixture def resources_dict() -> ResourcesDict: - return parse_obj_as( - ResourcesDict, ImageResources.Config.schema_extra["example"]["resources"] + return TypeAdapter(ResourcesDict).validate_python( + ImageResources.model_config["json_schema_extra"]["example"]["resources"] ) @pytest.fixture def compose_image() -> DockerGenericTag: - return parse_obj_as(DockerGenericTag, "image:latest") + return TypeAdapter(DockerGenericTag).validate_python("image:latest") def _ensure_resource_value_is_an_object(data: ResourcesDict) -> None: @@ -56,21 +56,21 @@ def test_resources_dict_parsed_as_expected(resources_dict: ResourcesDict) -> Non def test_image_resources_parsed_as_expected() -> None: - result: ImageResources = ImageResources.parse_obj( - ImageResources.Config.schema_extra["example"] + result: ImageResources = ImageResources.model_validate( + ImageResources.model_config["json_schema_extra"]["example"] ) _ensure_resource_value_is_an_object(result.resources) assert type(result) == ImageResources - result: ImageResources = parse_obj_as( - ImageResources, ImageResources.Config.schema_extra["example"] + result: ImageResources = TypeAdapter(ImageResources).validate_python( + ImageResources.model_config["json_schema_extra"]["example"] ) assert type(result) == ImageResources _ensure_resource_value_is_an_object(result.resources) @pytest.mark.parametrize( - "example", ServiceResourcesDictHelpers.Config.schema_extra["examples"] + "example", ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"] ) def test_service_resource_parsed_as_expected( example: dict[DockerGenericTag, Any], compose_image: DockerGenericTag @@ -81,30 +81,30 @@ def _assert_service_resources_dict( assert type(service_resources_dict) == dict print(service_resources_dict) - for _, image_resources in service_resources_dict.items(): + for image_resources in service_resources_dict.values(): _ensure_resource_value_is_an_object(image_resources.resources) - service_resources_dict: ServiceResourcesDict = parse_obj_as( - ServiceResourcesDict, example - ) + service_resources_dict: ServiceResourcesDict = TypeAdapter( + ServiceResourcesDict + ).validate_python(example) _assert_service_resources_dict(service_resources_dict) for image_resources in example.values(): service_resources_dict_from_single_service = ( ServiceResourcesDictHelpers.create_from_single_service( image=compose_image, - resources=ImageResources.parse_obj(image_resources).resources, + resources=ImageResources.model_validate(image_resources).resources, ) ) _assert_service_resources_dict(service_resources_dict_from_single_service) @pytest.mark.parametrize( - "example", ServiceResourcesDictHelpers.Config.schema_extra["examples"] + "example", ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"] ) def test_create_jsonable_dict(example: dict[DockerGenericTag, Any]) -> None: - service_resources_dict: ServiceResourcesDict = parse_obj_as( - ServiceResourcesDict, example - ) + service_resources_dict: ServiceResourcesDict = TypeAdapter( + ServiceResourcesDict + ).validate_python(example) result = ServiceResourcesDictHelpers.create_jsonable(service_resources_dict) assert example == result diff --git a/packages/models-library/tests/test_service_settings_labels.py b/packages/models-library/tests/test_service_settings_labels.py index 941a62dddba..c056902d8e9 100644 --- a/packages/models-library/tests/test_service_settings_labels.py +++ b/packages/models-library/tests/test_service_settings_labels.py @@ -2,144 +2,147 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name + import json -from collections import namedtuple from copy import deepcopy -from pprint import pformat -from typing import Any +from typing import Any, Final, NamedTuple import pytest +from common_library.json_serialization import json_dumps +from models_library.basic_types import PortInt +from models_library.osparc_variable_identifier import ( + OsparcVariableIdentifier, + replace_osparc_variable_identifier, +) from models_library.service_settings_labels import ( - DEFAULT_DNS_SERVER_ADDRESS, - DEFAULT_DNS_SERVER_PORT, - DNSResolver, + ComposeSpecLabelDict, DynamicSidecarServiceLabels, NATRule, PathMappingsLabel, SimcoreServiceLabels, SimcoreServiceSettingLabelEntry, SimcoreServiceSettingsLabel, +) +from models_library.service_settings_nat_rule import ( + DEFAULT_DNS_SERVER_ADDRESS, + DEFAULT_DNS_SERVER_PORT, + DNSResolver, _PortRange, ) from models_library.services_resources import DEFAULT_SINGLE_SERVICE_NAME -from pydantic import BaseModel, ValidationError - -SimcoreServiceExample = namedtuple( - "SimcoreServiceExample", "example, items, uses_dynamic_sidecar, id" +from models_library.utils.string_substitution import TextTemplate +from pydantic import BaseModel, TypeAdapter, ValidationError +from pytest_simcore.pydantic_models import ( + assert_validation_model, + iter_model_examples_in_class, ) -SIMCORE_SERVICE_EXAMPLES = [ - SimcoreServiceExample( - example=SimcoreServiceLabels.Config.schema_extra["examples"][0], +class _Parametrization(NamedTuple): + example: dict[str, Any] + items: int + uses_dynamic_sidecar: bool + + +SIMCORE_SERVICE_EXAMPLES = { + "legacy": _Parametrization( + example=SimcoreServiceLabels.model_json_schema()["examples"][0], items=1, uses_dynamic_sidecar=False, - id="legacy", ), - SimcoreServiceExample( - example=SimcoreServiceLabels.Config.schema_extra["examples"][1], - items=3, + "dynamic-service": _Parametrization( + example=SimcoreServiceLabels.model_json_schema()["examples"][1], + items=5, uses_dynamic_sidecar=True, - id="dynamic-service", ), - SimcoreServiceExample( - example=SimcoreServiceLabels.Config.schema_extra["examples"][2], - items=5, + "dynamic-service-with-compose-spec": _Parametrization( + example=SimcoreServiceLabels.model_json_schema()["examples"][2], + items=6, uses_dynamic_sidecar=True, - id="dynamic-service-with-compose-spec", ), -] +} @pytest.mark.parametrize( "example, items, uses_dynamic_sidecar", - [(x.example, x.items, x.uses_dynamic_sidecar) for x in SIMCORE_SERVICE_EXAMPLES], - ids=[x.id for x in SIMCORE_SERVICE_EXAMPLES], + list(SIMCORE_SERVICE_EXAMPLES.values()), + ids=list(SIMCORE_SERVICE_EXAMPLES.keys()), ) -def test_simcore_service_labels( - example: dict, items: int, uses_dynamic_sidecar: bool -) -> None: - simcore_service_labels = SimcoreServiceLabels.parse_obj(example) +def test_simcore_service_labels(example: dict, items: int, uses_dynamic_sidecar: bool): + simcore_service_labels = SimcoreServiceLabels.model_validate(example) assert simcore_service_labels - assert len(simcore_service_labels.dict(exclude_unset=True)) == items + assert len(simcore_service_labels.model_dump(exclude_unset=True)) == items assert simcore_service_labels.needs_dynamic_sidecar == uses_dynamic_sidecar -def test_service_settings() -> None: - simcore_settings_settings_label = SimcoreServiceSettingsLabel.parse_obj( - SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] +def test_service_settings(): + simcore_settings_settings_label = SimcoreServiceSettingsLabel.model_validate( + SimcoreServiceSettingLabelEntry.model_json_schema()["examples"] ) assert simcore_settings_settings_label assert len(simcore_settings_settings_label) == len( - SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] + SimcoreServiceSettingLabelEntry.model_json_schema()["examples"] ) assert simcore_settings_settings_label[0] # ensure private attribute assignment for service_setting in simcore_settings_settings_label: # pylint: disable=protected-access - service_setting._destination_containers = ["random_value1", "random_value2"] + service_setting.set_destination_containers(["random_value1", "random_value2"]) @pytest.mark.parametrize( - "model_cls", - ( - SimcoreServiceSettingLabelEntry, - SimcoreServiceSettingsLabel, - SimcoreServiceLabels, - ), + "model_cls, example_name, example_data", + iter_model_examples_in_class(SimcoreServiceLabels), ) -def test_service_settings_model_examples( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -) -> None: - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" +def test_correctly_detect_dynamic_sidecar_boot( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + model_instance = assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) -@pytest.mark.parametrize( - "model_cls", - (SimcoreServiceLabels,), -) -def test_correctly_detect_dynamic_sidecar_boot( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -) -> None: - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance.needs_dynamic_sidecar == ( - "simcore.service.paths-mapping" in example - ) + assert isinstance(model_instance, SimcoreServiceLabels) + assert model_instance.callbacks_mapping is not None + assert model_instance.needs_dynamic_sidecar == ( + "simcore.service.paths-mapping" in example_data + ) -def test_raises_error_if_http_entrypoint_is_missing() -> None: +def test_raises_error_if_http_entrypoint_is_missing(): simcore_service_labels: dict[str, Any] = deepcopy( - SimcoreServiceLabels.Config.schema_extra["examples"][2] + SimcoreServiceLabels.model_json_schema()["examples"][2] ) del simcore_service_labels["simcore.service.container-http-entrypoint"] - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 SimcoreServiceLabels(**simcore_service_labels) -def test_path_mappings_none_state_paths() -> None: - sample_data = deepcopy(PathMappingsLabel.Config.schema_extra["example"]) +def test_path_mappings_none_state_paths(): + sample_data = deepcopy(PathMappingsLabel.model_json_schema()["examples"][0]) sample_data["state_paths"] = None with pytest.raises(ValidationError): PathMappingsLabel(**sample_data) -def test_path_mappings_json_encoding() -> None: - example = PathMappingsLabel.Config.schema_extra["example"] - path_mappings = PathMappingsLabel.parse_obj(example) - print(path_mappings) - assert PathMappingsLabel.parse_raw(path_mappings.json()) == path_mappings +def test_path_mappings_json_encoding(): + for example in PathMappingsLabel.model_json_schema()["examples"]: + path_mappings = PathMappingsLabel.model_validate(example) + print(path_mappings) + assert ( + PathMappingsLabel.model_validate_json(path_mappings.model_dump_json()) + == path_mappings + ) -def test_simcore_services_labels_compose_spec_null_container_http_entry_provided() -> None: - sample_data = deepcopy(SimcoreServiceLabels.Config.schema_extra["examples"][2]) +def test_simcore_services_labels_compose_spec_null_container_http_entry_provided(): + sample_data: dict[str, Any] = deepcopy( + SimcoreServiceLabels.model_json_schema()["examples"][2] + ) + assert sample_data["simcore.service.container-http-entrypoint"] sample_data["simcore.service.compose-spec"] = None @@ -147,35 +150,65 @@ def test_simcore_services_labels_compose_spec_null_container_http_entry_provided SimcoreServiceLabels(**sample_data) -def test_raises_error_wrong_restart_policy() -> None: +def test_raises_error_wrong_restart_policy(): simcore_service_labels: dict[str, Any] = deepcopy( - SimcoreServiceLabels.Config.schema_extra["examples"][2] + SimcoreServiceLabels.model_json_schema()["examples"][2] ) simcore_service_labels["simcore.service.restart-policy"] = "__not_a_valid_policy__" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 SimcoreServiceLabels(**simcore_service_labels) +def test_path_mappings_label_unsupported_size_constraints(): + with pytest.raises(ValidationError) as exec_into: + PathMappingsLabel.model_validate( + { + "outputs_path": "/ok_input_path", + "inputs_path": "/ok_output_path", + "state_paths": [], + "volume_size_limits": {"/ok_input_path": "1d"}, + }, + ) + assert "Provided size='1d' contains invalid charactes:" in f"{exec_into.value}" + + +def test_path_mappings_label_defining_constraing_on_missing_path(): + with pytest.raises(ValidationError) as exec_into: + PathMappingsLabel.model_validate( + { + "outputs_path": "/ok_input_path", + "inputs_path": "/ok_output_path", + "state_paths": [], + "volume_size_limits": {"/path_is_missing_from_above": "1"}, + }, + ) + assert ( + "path=PosixPath('/path_is_missing_from_above') not found in" + in f"{exec_into.value}" + ) + + +PORT_1: Final[PortInt] = TypeAdapter(PortInt).validate_python(1) +PORT_3: Final[PortInt] = TypeAdapter(PortInt).validate_python(3) +PORT_20: Final[PortInt] = TypeAdapter(PortInt).validate_python(20) +PORT_99: Final[PortInt] = TypeAdapter(PortInt).validate_python(99) + + def test_port_range(): with pytest.raises(ValidationError): - _PortRange(lower=1, upper=1) + _PortRange(lower=PORT_1, upper=PORT_1) with pytest.raises(ValidationError): - _PortRange(lower=20, upper=1) + _PortRange(lower=PORT_20, upper=PORT_1) - assert _PortRange(lower=1, upper=2) + assert _PortRange(lower=PORT_1, upper=PORT_20) def test_host_permit_list_policy(): host_permit_list_policy = NATRule( - hostname="hostname", - tcp_ports=[ - _PortRange(lower=1, upper=3), - 99, - ], + hostname="hostname", tcp_ports=[_PortRange(lower=PORT_1, upper=PORT_3), PORT_99] ) - assert set(host_permit_list_policy.iter_tcp_ports()) == {1, 2, 3, 99} @@ -236,7 +269,7 @@ def test_container_outgoing_permit_list_and_container_allow_internet_with_compos "simcore.service.container-http-entrypoint": container_name_1, } - instance = DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + instance = DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert ( instance.containers_allowed_outgoing_permit_list[container_name_1][0] == expected_host_permit_list_policy @@ -265,7 +298,9 @@ def test_container_outgoing_permit_list_and_container_allow_internet_without_com ) }, ): - assert DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + assert TypeAdapter(DynamicSidecarServiceLabels).validate_json( + json.dumps(dict_data) + ) def test_container_allow_internet_no_compose_spec_not_ok(): @@ -273,7 +308,7 @@ def test_container_allow_internet_no_compose_spec_not_ok(): "simcore.service.containers-allowed-outgoing-internet": json.dumps(["hoho"]), } with pytest.raises(ValidationError) as exec_info: - assert DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + assert DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert "Expected only 1 entry 'container' not '{'hoho'}" in f"{exec_info.value}" @@ -286,7 +321,7 @@ def test_container_allow_internet_compose_spec_not_ok(): "simcore.service.containers-allowed-outgoing-internet": json.dumps(["hoho"]), } with pytest.raises(ValidationError) as exec_info: - assert DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + assert DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert f"container='hoho' not found in {compose_spec=}" in f"{exec_info.value}" @@ -305,7 +340,7 @@ def test_container_outgoing_permit_list_no_compose_spec_not_ok(): ), } with pytest.raises(ValidationError) as exec_info: - assert DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + assert DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert ( f"Expected only one entry '{DEFAULT_SINGLE_SERVICE_NAME}' not 'container_name'" in f"{exec_info.value}" @@ -329,7 +364,7 @@ def test_container_outgoing_permit_list_compose_spec_not_ok(): "simcore.service.compose-spec": json.dumps(compose_spec), } with pytest.raises(ValidationError) as exec_info: - assert DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + assert DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert ( f"Trying to permit list container='container_name' which was not found in {compose_spec=}" in f"{exec_info.value}" @@ -352,9 +387,229 @@ def test_not_allowed_in_both_permit_list_and_outgoing_internet(): } with pytest.raises(ValidationError) as exec_info: - DynamicSidecarServiceLabels.parse_raw(json.dumps(dict_data)) + DynamicSidecarServiceLabels.model_validate_json(json.dumps(dict_data)) assert ( f"Not allowed common_containers={{'{container_name}'}} detected" in f"{exec_info.value}" ) + + +@pytest.fixture +def vendor_environments() -> dict[str, Any]: + return { + "OSPARC_VARIABLE_VENDOR_SECRET_DNS_RESOLVER_ADDRESS": "172.0.0.1", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOSTNAME": "license.com", + "OSPARC_VARIABLE_VENDOR_SECRET_DNS_RESOLVER_PORT": 1234, + "OSPARC_VARIABLE_VENDOR_SECRET_LICENCE_HOSTNAME": "hostname", + "OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS": [ + 1, + 2, + 3, + 4, + ], + "OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_1": 1, + "OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_2": 2, + "OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_3": 3, + "OSPARC_VARIABLE_OS_TYPE_LINUX": "linux", + } + + +@pytest.fixture +def service_labels() -> dict[str, str]: + return { + "simcore.service.paths-mapping": json.dumps( + { + "inputs_path": "/tmp/inputs", # noqa: S108 + "outputs_path": "/tmp/outputs", # noqa: S108 + "state_paths": ["/tmp/save_1", "/tmp_save_2"], # noqa: S108 + "state_exclude": ["/tmp/strip_me/*"], # noqa: S108 + } + ), + "simcore.service.compose-spec": json.dumps( + { + "version": "2.3", + "services": { + "rt-web": { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life:${SERVICE_VERSION}", + "init": True, + "depends_on": ["s4l-core"], + }, + "s4l-core": { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core:${SERVICE_VERSION}", + "runtime": "nvidia", + "init": True, + "environment": ["DISPLAY=${DISPLAY}"], + "volumes": ["/tmp/.X11-unix:/tmp/.X11-unix"], # noqa: S108 + }, + }, + } + ), + "simcore.service.container-http-entrypoint": "rt-web", + "simcore.service.restart-policy": "on-inputs-downloaded", + "simcore.service.containers-allowed-outgoing-permit-list": json.dumps( + { + "s4l-core": [ + { + "hostname": "${OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOSTNAME}", + "tcp_ports": [ + "$OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_1", + "$OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_2", + 3, + ], + "dns_resolver": { + "address": "$OSPARC_VARIABLE_VENDOR_SECRET_DNS_RESOLVER_ADDRESS", + "port": "$OSPARC_VARIABLE_VENDOR_SECRET_DNS_RESOLVER_PORT", + }, + } + ] + } + ), + "simcore.service.settings": json.dumps( + [ + { + "name": "constraints", + "type": "string", + "value": ["node.platform.os == $OSPARC_VARIABLE_OS_TYPE_LINUX"], + }, + { + "name": "ContainerSpec", + "type": "ContainerSpec", + "value": {"Command": ["run"]}, + }, + { + "name": "Resources", + "type": "Resources", + "value": { + "Limits": {"NanoCPUs": 4000000000, "MemoryBytes": 17179869184}, + "Reservations": { + "NanoCPUs": 100000000, + "MemoryBytes": 536870912, + "GenericResources": [ + {"DiscreteResourceSpec": {"Kind": "VRAM", "Value": 1}} + ], + }, + }, + }, + { + "name": "mount", + "type": "object", + "value": [ + { + "ReadOnly": True, + "Source": "/tmp/.X11-unix", # noqa: S108 + "Target": "/tmp/.X11-unix", # noqa: S108 + "Type": "bind", + } + ], + }, + { + "name": "env", + "type": "string", + "value": ["DISPLAY=${DISPLAY}"], + }, + { + "name": "ports", + "type": "int", + "value": 8888, + }, + { + "name": "resources", + "type": "Resources", + "value": { + "Limits": {"NanoCPUs": 4000000000, "MemoryBytes": 8589934592} + }, + }, + ] + ), + } + + +def test_can_parse_labels_with_osparc_identifiers( + vendor_environments: dict[str, Any], service_labels: dict[str, str] +): + # can load OSPARC_VARIABLE_ identifiers!! + service_meta = SimcoreServiceLabels.model_validate(service_labels) + + assert service_meta.containers_allowed_outgoing_permit_list + nat_rule: NATRule = service_meta.containers_allowed_outgoing_permit_list[ + "s4l-core" + ][0] + assert nat_rule.hostname == TypeAdapter(OsparcVariableIdentifier).validate_python( + "${OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOSTNAME}", + ) + assert nat_rule.tcp_ports == [ + TypeAdapter(OsparcVariableIdentifier).validate_python( + "$OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_1", + ), + TypeAdapter(OsparcVariableIdentifier).validate_python( + "$OSPARC_VARIABLE_VENDOR_SECRET_TCP_PORTS_2", + ), + 3, + ] + + service_meta = replace_osparc_variable_identifier(service_meta, vendor_environments) + service_meta_str = service_meta.model_dump_json() + + not_replaced_vars = {"OSPARC_VARIABLE_OS_TYPE_LINUX"} + + for osparc_variable_name in vendor_environments: + if osparc_variable_name in not_replaced_vars: + continue + assert osparc_variable_name not in service_meta_str + + service_meta_str = service_meta.model_dump_json( + include={"containers_allowed_outgoing_permit_list"} + ) + + assert "$" not in service_meta_str + + +def test_resolving_some_service_labels_at_load_time( + vendor_environments: dict[str, Any], service_labels: dict[str, str] +): + print(json.dumps(service_labels, indent=1)) + service_meta = SimcoreServiceLabels.model_validate(service_labels) + + # NOTE: replacing all OsparcVariableIdentifier instances nested inside objects + # this also does a partial replacement if there is no entry inside the vendor_environments + # mapped to that name + replace_osparc_variable_identifier(service_meta, vendor_environments) + + for attribute_name, pydantic_model in ( + ("compose_spec", ComposeSpecLabelDict), + ("settings", SimcoreServiceSettingsLabel), + ): + to_serialize = getattr(service_meta, attribute_name) + template = TextTemplate(json_dumps(to_serialize)) + assert template.is_valid() + resolved_label: str = template.safe_substitute(vendor_environments) + to_restore = TypeAdapter(pydantic_model).validate_json(resolved_label) + setattr(service_meta, attribute_name, to_restore) + + print(json.dumps(service_labels, indent=1)) + + # NOTE: that this model needs all values to be resolved before parsing them + # otherwise it might fail!! The question is whether these values can be resolved at this point + # NOTE: vendor values are in the database and therefore are available at this point + labels = SimcoreServiceLabels.model_validate(service_labels) + + print("After", labels.model_dump_json(indent=1)) + formatted_json = service_meta.model_dump_json(indent=1) + print("After", formatted_json) + for entry in vendor_environments: + print(entry) + assert entry not in formatted_json + + +def test_user_preferences_path_is_part_of_exiting_volume(): + labels_data = { + "simcore.service.paths-mapping": json.dumps( + PathMappingsLabel.model_json_schema()["examples"][0] + ), + "simcore.service.user-preferences-path": json.dumps( + "/tmp/outputs" # noqa: S108 + ), + } + with pytest.raises(ValidationError, match="user_preferences_path=/tmp/outputs"): + assert DynamicSidecarServiceLabels.model_validate_json(json.dumps(labels_data)) diff --git a/packages/models-library/tests/test_service_settings_nat_rule.py b/packages/models-library/tests/test_service_settings_nat_rule.py new file mode 100644 index 00000000000..c6f9f05497c --- /dev/null +++ b/packages/models-library/tests/test_service_settings_nat_rule.py @@ -0,0 +1,169 @@ +from itertools import combinations +from typing import Any + +import pytest +from _pytest.mark.structures import ParameterSet +from models_library.osparc_variable_identifier import ( + OsparcVariableIdentifier, + UnresolvedOsparcVariableIdentifierError, + replace_osparc_variable_identifier, +) +from models_library.service_settings_nat_rule import NATRule +from pydantic import TypeAdapter + +SUPPORTED_TEMPLATES: set[str] = { + "$OSPARC_VARIABLE_%s", + "${OSPARC_VARIABLE_%s}", + "${OSPARC_VARIABLE_%s:-%s}", +} + + +def _format(template: str, *, name: str, default: str) -> str: + try: + return template % (name, default) + except TypeError: + return template % (name) + + +SERIALIZED_NAT_RULES: list[ParameterSet] = [ + pytest.param( + { + "hostname": _format(template, name="hostname", default=""), + "tcp_ports": [ + _format(template, name="p1", default="12"), + { + "lower": _format(template, name="port_low", default="10"), + "upper": _format(template, name="port_high", default="11"), + }, + 8000, + ], + "dns_resolver": { + "address": _format(template, name="dns_address", default="some"), + "port": _format(template, name="dns_port", default="11111"), + }, + }, + id=template, + ) + for template in SUPPORTED_TEMPLATES +] + + +def _all_combinations_from_list( + elements: list[Any], +) -> list[tuple[Any, ...]]: + result: list[tuple[Any, ...]] = [] + for group_size in range(1, len(elements) + 1): + for combination in combinations(elements, group_size): + result.append(combination) # noqa: PERF402 + return result + + +def _all_combinations_from_dict(data: dict[Any, Any]) -> list[dict[Any, Any]]: + return [dict(c) for c in _all_combinations_from_list(list(data.items()))] + + +@pytest.mark.parametrize("nat_rule_dict", SERIALIZED_NAT_RULES) +@pytest.mark.parametrize( + "osparc_variables", + _all_combinations_from_dict( + { + "OSPARC_VARIABLE_hostname": "a-host-name", + "OSPARC_VARIABLE_p1": 12, + "OSPARC_VARIABLE_port_low": 10, + "OSPARC_VARIABLE_port_high": 12, + "OSPARC_VARIABLE_dns_address": "dms.local", + "OSPARC_VARIABLE_dns_port": 44, + } + ), +) +def test_nat_rule_with_osparc_variable_identifier( + nat_rule_dict: dict[str, Any], osparc_variables: dict[str, Any] +): + nat_rule = TypeAdapter(NATRule).validate_python(nat_rule_dict) + + with pytest.raises(UnresolvedOsparcVariableIdentifierError): + list(nat_rule.iter_tcp_ports()) + + # NOTE: values are mostly replaced in place unless it's used as first level + replace_osparc_variable_identifier(nat_rule, osparc_variables) + + nat_rule_str = nat_rule.model_dump_json() + for osparc_variable_name in osparc_variables: + assert osparc_variable_name not in nat_rule_str + + # when all env vars are converted now it works + if len(osparc_variables) == 6: + assert list(nat_rule.iter_tcp_ports()) + + +@pytest.mark.parametrize( + "replace_with_value", + [ + "a_string", + 1, + True, + {"a_set"}, + {"a": "dict"}, + ("a", "tuple"), + ], +) +def test_______(replace_with_value: Any): + a_var = TypeAdapter(OsparcVariableIdentifier).validate_python( + "$OSPARC_VARIABLE_some_var" + ) + assert isinstance(a_var, OsparcVariableIdentifier) + + replaced_var = replace_osparc_variable_identifier( + a_var, {"OSPARC_VARIABLE_some_var": replace_with_value} + ) + # NOTE: after replacement the original reference still points + assert isinstance(a_var, OsparcVariableIdentifier) + assert replaced_var == replace_with_value + + +@pytest.mark.parametrize( + "var_template", + ["$OSPARC_VARIABLE_a", "${OSPARC_VARIABLE_a}", "${OSPARC_VARIABLE_a:-%s}"], +) +@pytest.mark.parametrize( + "default_value", ["", "a", "1", "1.1", "aa", "$", "$$$$", "[]", "{}"] +) +@pytest.mark.parametrize( + "replace_with_value", + [ + "a_string", + 1, + True, + {"a_set"}, + {"a": "dict"}, + ("a", "tuple"), + ], +) +@pytest.mark.parametrize("replace_with_default", [True, False]) +def test_replace_an_instance_of_osparc_variable_identifier( + var_template: str, + default_value: str, + replace_with_value: Any, + replace_with_default: bool, +): + identifier_has_default = False + try: + formatted_template = var_template % default_value + identifier_has_default = True + except TypeError: + formatted_template = var_template + + a_var = TypeAdapter(OsparcVariableIdentifier).validate_python(formatted_template) + assert isinstance(a_var, OsparcVariableIdentifier) + + replace_with_identifier_default = identifier_has_default and replace_with_default + replacement_content = ( + {} if replace_with_identifier_default else {a_var.name: replace_with_value} + ) + replaced_var = replace_osparc_variable_identifier(a_var, replacement_content) + # NOTE: after replacement the original reference still points + assert isinstance(a_var, OsparcVariableIdentifier) + if replace_with_identifier_default: + assert replaced_var == default_value + else: + assert replaced_var == replace_with_value diff --git a/packages/models-library/tests/test_services.py b/packages/models-library/tests/test_services.py index 7cba14f9725..c7b7562eaa6 100644 --- a/packages/models-library/tests/test_services.py +++ b/packages/models-library/tests/test_services.py @@ -3,56 +3,53 @@ # pylint:disable=redefined-outer-name import re +import urllib.parse +from collections.abc import Callable from copy import deepcopy -from pprint import pformat -from typing import Any, Callable, Dict, List +from typing import Any import pytest -from models_library.basic_regex import VERSION_RE -from models_library.services import ( +from models_library.basic_regex import SIMPLE_VERSION_RE +from models_library.services import BootOption, ServiceMetaDataPublished +from models_library.services_base import ServiceBaseDisplay +from models_library.services_regex import ( COMPUTATIONAL_SERVICE_KEY_FORMAT, DYNAMIC_SERVICE_KEY_FORMAT, + SERVICE_ENCODED_KEY_RE, SERVICE_KEY_RE, - BootOption, - ServiceDockerData, - ServiceInput, - ServiceMetaData, - ServiceOutput, - _BaseServiceCommonDataModel, ) -from models_library.services_db import ServiceAccessRightsAtDB, ServiceMetaDataAtDB @pytest.fixture() -def minimal_service_common_data() -> Dict[str, Any]: - return dict( - name="this is a nice sample service", - description="this is the description of the service", - ) +def minimal_service_common_data() -> dict[str, Any]: + return { + "name": "this is a nice sample service", + "description": "this is the description of the service", + } def test_create_minimal_service_common_data( - minimal_service_common_data: Dict[str, Any] + minimal_service_common_data: dict[str, Any] ): - service = _BaseServiceCommonDataModel(**minimal_service_common_data) + service = ServiceBaseDisplay(**minimal_service_common_data) assert service.name == minimal_service_common_data["name"] assert service.description == minimal_service_common_data["description"] - assert service.thumbnail == None + assert service.thumbnail is None -def test_node_with_empty_thumbnail(minimal_service_common_data: Dict[str, Any]): +def test_node_with_empty_thumbnail(minimal_service_common_data: dict[str, Any]): service_data = minimal_service_common_data service_data.update({"thumbnail": ""}) - service = _BaseServiceCommonDataModel(**minimal_service_common_data) + service = ServiceBaseDisplay(**minimal_service_common_data) assert service.name == minimal_service_common_data["name"] assert service.description == minimal_service_common_data["description"] - assert service.thumbnail == None + assert service.thumbnail is None -def test_node_with_thumbnail(minimal_service_common_data: Dict[str, Any]): +def test_node_with_thumbnail(minimal_service_common_data: dict[str, Any]): service_data = minimal_service_common_data service_data.update( { @@ -60,7 +57,7 @@ def test_node_with_thumbnail(minimal_service_common_data: Dict[str, Any]): } ) - service = _BaseServiceCommonDataModel(**minimal_service_common_data) + service = ServiceBaseDisplay(**minimal_service_common_data) assert service.name == minimal_service_common_data["name"] assert service.description == minimal_service_common_data["description"] @@ -70,21 +67,7 @@ def test_node_with_thumbnail(minimal_service_common_data: Dict[str, Any]): ) -@pytest.mark.parametrize( - "model_cls", - ( - ServiceInput, - ServiceOutput, - BootOption, - ), -) -def test_service_models_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - +@pytest.mark.parametrize("pattern", (SERVICE_KEY_RE, SERVICE_ENCODED_KEY_RE)) @pytest.mark.parametrize( "service_key", [ @@ -115,8 +98,6 @@ def test_service_models_examples(model_cls, model_cls_examples): "simcore/services/comp/usf-simrun", "simcore/services/dynamic/3d-viewer", "simcore/services/dynamic/3d-viewer-gpu", - "simcore/services/dynamic/3d-viewer", - "simcore/services/dynamic/3d-viewer-gpu", "simcore/services/dynamic/bornstein-viewer", "simcore/services/dynamic/btl-pc", "simcore/services/dynamic/cc-0d-viewer", @@ -134,19 +115,16 @@ def test_service_models_examples(model_cls, model_cls_examples): "simcore/services/dynamic/raw-graphs-table", "simcore/services/dynamic/tissue-properties", ], + ids=str, ) -@pytest.mark.parametrize( - "regex_pattern", - [SERVICE_KEY_RE, r"^(simcore)/(services)/(comp|dynamic|frontend)(/[^\s/]+)+$"], - ids=["pattern_with_w", "pattern_with_s"], -) -def test_service_key_regex_patterns(service_key: str, regex_pattern: str): - match = re.match(regex_pattern, service_key) +def test_SERVICE_KEY_RE(service_key: str, pattern: re.Pattern): + if pattern == SERVICE_ENCODED_KEY_RE: + service_key = urllib.parse.quote(service_key, safe="") + + match = re.match(pattern, service_key) assert match - assert match.group(1) == "simcore" - assert match.group(2) == "services" - assert match.group(3) in ["comp", "dynamic", "frontend"] + assert match.group("type") in ["comp", "dynamic", "frontend"] assert match.group(4) is not None # tests formatters @@ -163,41 +141,32 @@ def test_service_key_regex_patterns(service_key: str, regex_pattern: str): new_service_key = DYNAMIC_SERVICE_KEY_FORMAT.format(service_name=service_name) if new_service_key: - new_match = re.match(regex_pattern, new_service_key) + new_match = re.match(pattern, new_service_key) assert new_match assert new_match.groups() == match.groups() -@pytest.mark.parametrize( - "model_cls", - (ServiceAccessRightsAtDB, ServiceMetaDataAtDB, ServiceMetaData, ServiceDockerData), -) -def test_services_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - +@pytest.mark.skip(reason="will be disabled by PC") @pytest.mark.parametrize( "python_regex_pattern, json_schema_file_name, json_schema_entry_paths", [ - (SERVICE_KEY_RE, "project-v0.0.1.json", ["key"]), - (VERSION_RE, "project-v0.0.1.json", ["version"]), - (VERSION_RE, "node-meta-v0.0.1.json", ["version"]), - (SERVICE_KEY_RE, "node-meta-v0.0.1.json", ["key"]), + (SERVICE_KEY_RE, "project-v0.0.1-pydantic.json", ["key"]), + (SIMPLE_VERSION_RE, "project-v0.0.1-pydantic.json", ["version"]), + (SIMPLE_VERSION_RE, "node-meta-v0.0.1-pydantic.json", ["version"]), + (SERVICE_KEY_RE, "node-meta-v0.0.1-pydantic.json", ["key"]), ], ) def test_same_regex_patterns_in_jsonschema_and_python( python_regex_pattern: str, json_schema_file_name: str, - json_schema_entry_paths: List[str], + json_schema_entry_paths: list[str], json_schema_dict: Callable, ): # read file in json_schema_config = json_schema_dict(json_schema_file_name) + # go to keys - def _find_pattern_entry(obj: Dict[str, Any], key: str) -> Any: + def _find_pattern_entry(obj: dict[str, Any], key: str) -> Any: if key in obj: return obj[key]["pattern"] for v in obj.values(): @@ -213,7 +182,36 @@ def _find_pattern_entry(obj: Dict[str, Any], key: str) -> Any: def test_boot_option_wrong_default() -> None: - for example in [deepcopy(x) for x in BootOption.Config.schema_extra["examples"]]: + for example in [deepcopy(x) for x in BootOption.model_config["json_schema_extra"]["examples"]]: with pytest.raises(ValueError): example["default"] = "__undefined__" assert BootOption(**example) + + +# NOTE: do not add items to this list, you are wrong to do so! +FIELD_NAME_EXCEPTIONS: set[str] = { + "integration-version", + "boot-options", + "min-visible-inputs", +} + + +def test_service_docker_data_labels_convesion(): + # tests that no future fields have "dashed names" + # we want labels to look like io.simcore.a_label_property + convension_breaking_fields: set[tuple[str, str]] = set() + + fields_with_aliases: list[tuple[str, str]] = [ + (name, info.alias) for name, info in ServiceMetaDataPublished.model_fields.items() + if info.alias is not None + ] + + for name, alias in fields_with_aliases: + if alias in FIELD_NAME_EXCEPTIONS: + continue + # check dashes and uppercase + if alias.lower() != alias or "-" in alias: + convension_breaking_fields.add((name, alias)) + assert ( + len(convension_breaking_fields) == 0 + ), "You are no longer allowed to add labels with dashes in them. All lables should be snake cased!" diff --git a/packages/models-library/tests/test_services_io.py b/packages/models-library/tests/test_services_io.py index fac0a2ca8a9..6f794eff85a 100644 --- a/packages/models-library/tests/test_services_io.py +++ b/packages/models-library/tests/test_services_io.py @@ -5,33 +5,32 @@ from pathlib import Path import yaml -from models_library.services import ServiceDockerData, ServiceInput +from common_library.json_serialization import json_dumps +from models_library.services import ServiceInput, ServiceMetaDataPublished from pint import Unit, UnitRegistry -def test_service_port_units(project_tests_dir: Path): +def test_service_port_units(tests_data_dir: Path): ureg = UnitRegistry() - data = yaml.safe_load( - (project_tests_dir / "data" / "metadata-sleeper-2.0.2.yaml").read_text() - ) - print(ServiceDockerData.schema_json(indent=2)) + data = yaml.safe_load((tests_data_dir / "metadata-sleeper-2.0.2.yaml").read_text()) + print(json_dumps(ServiceMetaDataPublished.model_json_schema(), indent=2)) - service_meta = ServiceDockerData.parse_obj(data) + service_meta = ServiceMetaDataPublished.model_validate(data) assert service_meta.inputs for input_nameid, input_meta in service_meta.inputs.items(): assert input_nameid # validation - valid_unit: Unit = ureg.parse_units(input_meta.unit) + # WARNING: pint>=0.21 parse_units(None) raises!!! + valid_unit: Unit = ureg.parse_units(input_meta.unit or "") assert isinstance(valid_unit, Unit) assert valid_unit.dimensionless def test_build_input_ports_from_json_schemas(): - # builds ServiceInput using json-schema port_meta = ServiceInput.from_json_schema( port_schema={ diff --git a/packages/models-library/tests/test_services_resources.py b/packages/models-library/tests/test_services_resources.py new file mode 100644 index 00000000000..3bc4c83c0ec --- /dev/null +++ b/packages/models-library/tests/test_services_resources.py @@ -0,0 +1,21 @@ +import pytest +from models_library.services_resources import ResourceValue + + +@pytest.mark.xfail() +def test_reservation_is_cap_by_limit_on_assigment_pydantic_2_bug(): + + res = ResourceValue(limit=10, reservation=30) + assert res.limit == 10 + assert res.reservation == 10 + + # https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict.validate_assignment + # before-validators DO NOT work on Assignment!!! + # SEE https://github.com/pydantic/pydantic/issues/7105 + res.reservation = 30 + assert res.reservation == 10 + + # update here is not validated neither + # + # res.model_copy(update={"reservation": 30}) + # diff --git a/packages/models-library/tests/test_services_types.py b/packages/models-library/tests/test_services_types.py new file mode 100644 index 00000000000..206c531a78f --- /dev/null +++ b/packages/models-library/tests/test_services_types.py @@ -0,0 +1,40 @@ +import pytest +from models_library.projects import ProjectID +from models_library.projects_nodes import NodeID +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from pydantic import PositiveInt + + +@pytest.mark.parametrize( + "user_id, project_id, node_id, iteration, expected_result", + [ + ( + 2, + ProjectID("e08356e4-eb74-49e9-b769-2c26e34c61d9"), + NodeID("a08356e4-eb74-49e9-b769-2c26e34c61d1"), + 5, + "comp_2_e08356e4-eb74-49e9-b769-2c26e34c61d9_a08356e4-eb74-49e9-b769-2c26e34c61d1_5", + ) + ], +) +def test_run_id_get_resource_tracking_run_id( + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + iteration: PositiveInt, + expected_result: str, +): + resource_tracking_service_run_id = ( + ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, project_id, node_id, iteration + ) + ) + assert isinstance(resource_tracking_service_run_id, ServiceRunID) + assert resource_tracking_service_run_id == expected_result + + +def test_get_resource_tracking_run_id_for_dynamic(): + assert isinstance( + ServiceRunID.get_resource_tracking_run_id_for_dynamic(), ServiceRunID + ) diff --git a/packages/models-library/tests/test_sidecar_volumes.py b/packages/models-library/tests/test_sidecar_volumes.py new file mode 100644 index 00000000000..b6de8518d11 --- /dev/null +++ b/packages/models-library/tests/test_sidecar_volumes.py @@ -0,0 +1,17 @@ +# pylint: disable=redefined-outer-name + +import pytest +from models_library.sidecar_volumes import VolumeState, VolumeStatus + + +@pytest.fixture(params=VolumeStatus) +def status(request: pytest.FixtureRequest) -> VolumeStatus: + return request.param + + +def test_volume_state_equality_does_not_use_last_changed(status: VolumeStatus): + # NOTE: `last_changed` is initialized with the utc datetime + # at the moment of the creation of the object. + assert VolumeState(status=status) == VolumeState(status=status) + schema_property_count = len(VolumeState.model_json_schema()["properties"]) + assert len(VolumeState(status=status).model_dump()) == schema_property_count diff --git a/packages/models-library/tests/test_user_preferences.py b/packages/models-library/tests/test_user_preferences.py new file mode 100644 index 00000000000..edac734f0c7 --- /dev/null +++ b/packages/models-library/tests/test_user_preferences.py @@ -0,0 +1,154 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from collections.abc import Iterator +from pathlib import Path +from typing import Any + +import pytest +from models_library.services import ServiceKey, ServiceVersion +from models_library.user_preferences import ( + FrontendUserPreference, + NoPreferenceFoundError, + PreferenceType, + UserServiceUserPreference, + _AutoRegisterMeta, + _BaseUserPreferenceModel, +) +from pydantic import TypeAdapter + +_SERVICE_KEY_AND_VERSION_SAMPLES: list[tuple[ServiceKey, ServiceVersion]] = [ + ( + TypeAdapter(ServiceKey).validate_python("simcore/services/comp/something-1231"), + TypeAdapter(ServiceVersion).validate_python("0.0.1"), + ), + ( + TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/something-1231" + ), + TypeAdapter(ServiceVersion).validate_python("0.0.1"), + ), + ( + TypeAdapter(ServiceKey).validate_python( + "simcore/services/frontend/something-1231" + ), + TypeAdapter(ServiceVersion).validate_python("0.0.1"), + ), +] + + +@pytest.fixture(params=[None, 1, 1.0, "str", {"a": "dict"}, ["a", "list"]]) +def value(request: pytest.FixtureRequest) -> Any: + return request.param + + +@pytest.fixture +def mock_file_path() -> Path: + return Path("/a/file/path") + + +def _get_base_user_preferences_data( + preference_type: PreferenceType, value: Any +) -> dict[str, Any]: + return {"preference_type": preference_type, "value": value} + + +@pytest.mark.parametrize("preference_type", PreferenceType) +def test_base_user_preference_model(value: Any, preference_type: PreferenceType): + base_data = _get_base_user_preferences_data( + preference_type=preference_type, value=value + ) + assert TypeAdapter(_BaseUserPreferenceModel).validate_python(base_data) + + +def test_frontend_preferences(value: Any): + base_data = _get_base_user_preferences_data( + preference_type=PreferenceType.FRONTEND, value=value + ) + + base_data.update({"preference_identifier": "pref-name"}) + # check serialization + frontend_preference = TypeAdapter(FrontendUserPreference).validate_python(base_data) + assert set(frontend_preference.to_db().keys()) == {"value"} + + +def test_user_service_preferences(value: Any, mock_file_path: Path): + base_data = _get_base_user_preferences_data( + preference_type=PreferenceType.USER_SERVICE, value=value + ) + service_key, service_version = _SERVICE_KEY_AND_VERSION_SAMPLES[0] + base_data.update( + { + "service_key": service_key, + "service_version": service_version, + "file_path": mock_file_path, + } + ) + instance = TypeAdapter(UserServiceUserPreference).validate_python(base_data) + assert set(instance.to_db().keys()) == { + "value", + "service_key", + "service_version", + } + + +@pytest.fixture +def unregister_defined_classes() -> Iterator[None]: + yield + # pylint: disable=protected-access + _AutoRegisterMeta.registered_user_preference_classes.pop("Pref1", None) + + +def test__frontend__user_preference(value: Any, unregister_defined_classes: None): + pref1 = FrontendUserPreference.model_validate( + {"preference_identifier": "pref_id", "value": value} + ) + assert isinstance(pref1, FrontendUserPreference) + + +@pytest.mark.parametrize( + "service_key, service_version", _SERVICE_KEY_AND_VERSION_SAMPLES +) +def test__user_service__user_preference( + value: Any, + service_key: ServiceKey, + service_version: ServiceVersion, + mock_file_path: Path, + unregister_defined_classes: None, +): + pref1 = UserServiceUserPreference.model_validate( + { + "value": value, + "service_key": service_key, + "service_version": service_version, + } + ) + assert isinstance(pref1, UserServiceUserPreference) + + # NOTE: these will be stored as bytes, + # check bytes serialization/deserialization + pref1_as_bytes = pref1.model_dump_json().encode() + new_instance = UserServiceUserPreference.model_validate_json(pref1_as_bytes) + assert new_instance == pref1 + + +def test_redefine_class_with_same_name_is_not_allowed(unregister_defined_classes: None): + # pylint: disable=unused-variable + def def_class_1(): + class APreference(_BaseUserPreferenceModel): + ... + + def def_class_2(): + class APreference(_BaseUserPreferenceModel): + ... + + def_class_1() + with pytest.raises(TypeError, match="was already defined"): + def_class_2() + + +def test_get_preference_class_from_name_not_found(): + with pytest.raises(NoPreferenceFoundError, match="No preference class found"): + _BaseUserPreferenceModel.get_preference_class_from_name( + "__missing_preference_name__" + ) diff --git a/packages/models-library/tests/test_users.py b/packages/models-library/tests/test_users.py new file mode 100644 index 00000000000..4c9d2756934 --- /dev/null +++ b/packages/models-library/tests/test_users.py @@ -0,0 +1,27 @@ +from models_library.api_schemas_webserver.users import MyProfileGet +from models_library.api_schemas_webserver.users_preferences import Preference +from models_library.groups import AccessRightsDict, Group, GroupsByTypeTuple +from models_library.users import MyProfile +from pydantic import TypeAdapter + + +def test_adapter_from_model_to_schema(): + my_profile = MyProfile.model_validate(MyProfile.model_json_schema()["example"]) + + groups = TypeAdapter(list[Group]).validate_python( + Group.model_json_schema()["examples"] + ) + + ar = AccessRightsDict(read=False, write=False, delete=False) + + my_groups_by_type = GroupsByTypeTuple( + primary=(groups[1], ar), standard=[(groups[2], ar)], everyone=(groups[0], ar) + ) + my_product_group = groups[-1], AccessRightsDict( + read=False, write=False, delete=False + ) + my_preferences = {"foo": Preference(default_value=3, value=1)} + + MyProfileGet.from_domain_model( + my_profile, my_groups_by_type, my_product_group, my_preferences + ) diff --git a/packages/models-library/tests/test_utils_common_validators.py b/packages/models-library/tests/test_utils_common_validators.py new file mode 100644 index 00000000000..5212f5d5bab --- /dev/null +++ b/packages/models-library/tests/test_utils_common_validators.py @@ -0,0 +1,157 @@ +from enum import Enum +from typing import Annotated + +import pytest +from models_library.utils.common_validators import ( + create_enums_pre_validator, + empty_str_to_none_pre_validator, + none_to_empty_str_pre_validator, + null_or_none_str_to_none_validator, + trim_string_before, +) +from pydantic import BaseModel, StringConstraints, ValidationError, field_validator + + +def test_enums_pre_validator(): + class Enum1(Enum): + RED = "RED" + + class Model(BaseModel): + color: Enum1 + + class ModelWithPreValidator(BaseModel): + color: Enum1 + + _from_equivalent_enums = field_validator("color", mode="before")( + create_enums_pre_validator(Enum1) + ) + + # with Enum1 + model = Model(color=Enum1.RED) + # See: https://docs.pydantic.dev/latest/migration/#changes-to-pydanticbasemodel + assert ModelWithPreValidator(color=Enum1.RED).model_dump() == model.model_dump() + + # with Enum2 + class Enum2(Enum): + RED = "RED" + + with pytest.raises(ValidationError): + Model(color=Enum2.RED) + + # See: https://docs.pydantic.dev/latest/migration/#changes-to-pydanticbasemodel + assert ModelWithPreValidator(color=Enum2.RED).model_dump() == model.model_dump() + + +def test_empty_str_to_none_pre_validator(): + class Model(BaseModel): + nullable_message: str | None + + _empty_is_none = field_validator("nullable_message", mode="before")( + empty_str_to_none_pre_validator + ) + + model = Model.model_validate({"nullable_message": None}) + assert model == Model.model_validate({"nullable_message": ""}) + + +def test_none_to_empty_str_pre_validator(): + class Model(BaseModel): + message: str + + _none_is_empty = field_validator("message", mode="before")( + none_to_empty_str_pre_validator + ) + + model = Model.model_validate({"message": ""}) + assert model == Model.model_validate({"message": None}) + + +def test_null_or_none_str_to_none_validator(): + class Model(BaseModel): + message: str | None + + _null_or_none_str_to_none_validator = field_validator("message", mode="before")( + null_or_none_str_to_none_validator + ) + + model = Model.model_validate({"message": "none"}) + assert model == Model.model_validate({"message": None}) + + model = Model.model_validate({"message": "null"}) + assert model == Model.model_validate({"message": None}) + + model = Model.model_validate({"message": "NoNe"}) + assert model == Model.model_validate({"message": None}) + + model = Model.model_validate({"message": "NuLl"}) + assert model == Model.model_validate({"message": None}) + + model = Model.model_validate({"message": None}) + assert model == Model.model_validate({"message": None}) + + model = Model.model_validate({"message": ""}) + assert model == Model.model_validate({"message": ""}) + + +def test_trim_string_before(): + max_length = 10 + + class ModelWithTrim(BaseModel): + text: Annotated[str, trim_string_before(max_length=max_length)] + + # Test with string shorter than max_length + short_text = "Short" + model = ModelWithTrim(text=short_text) + assert model.text == short_text + + # Test with string equal to max_length + exact_text = "1234567890" # 10 characters + model = ModelWithTrim(text=exact_text) + assert model.text == exact_text + + # Test with string longer than max_length + long_text = "This is a very long text that should be trimmed" + model = ModelWithTrim(text=long_text) + assert model.text == long_text[:max_length] + assert len(model.text) == max_length + + # Test with non-string value (should be left unchanged) + class ModelWithTrimOptional(BaseModel): + text: Annotated[str | None, trim_string_before(max_length=max_length)] + + model = ModelWithTrimOptional(text=None) + assert model.text is None + + +def test_trim_string_before_with_string_constraints(): + max_length = 10 + + class ModelWithTrimAndConstraints(BaseModel): + text: Annotated[ + str | None, + StringConstraints( + max_length=max_length + ), # NOTE: order does not matter for validation but has an effect in the openapi schema + trim_string_before(max_length=max_length), + ] + + # Check that the OpenAPI schema contains the string constraint + schema = ModelWithTrimAndConstraints.model_json_schema() + assert schema["properties"]["text"] == { + "anyOf": [{"maxLength": max_length, "type": "string"}, {"type": "null"}], + "title": "Text", + } + + # Test with string longer than max_length + # This should pass because trim_string_before runs first and trims the input + # before StringConstraints validation happens + long_text = "This is a very long text that should be trimmed" + model = ModelWithTrimAndConstraints(text=long_text) + assert model.text is not None + assert model.text == long_text[:max_length] + assert len(model.text) == max_length + + # Test with string exactly at max_length + exact_text = "1234567890" # 10 characters + model = ModelWithTrimAndConstraints(text=exact_text) + assert model.text == exact_text diff --git a/packages/models-library/tests/test_utils_database_models_factory.py b/packages/models-library/tests/test_utils_database_models_factory.py deleted file mode 100644 index 9e7a5a6ed17..00000000000 --- a/packages/models-library/tests/test_utils_database_models_factory.py +++ /dev/null @@ -1,32 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import pytest -from models_library.utils.database_models_factory import ( - create_pydantic_model_from_sa_table, -) -from pydantic import BaseModel - -# pylint: disable=wildcard-import -# pylint: disable=unused-wildcard-import -from simcore_postgres_database.models import * - -# pylint: enable=wildcard-import -# pylint: enable=unused-wildcard-import -from simcore_postgres_database.models.base import metadata - - -@pytest.mark.parametrize("table_cls", metadata.tables.values(), ids=lambda t: t.name) -def test_table_to_pydantic_models(table_cls): - - PydanticOrm = create_pydantic_model_from_sa_table( - table=table_cls, include_server_defaults=True - ) - assert issubclass(PydanticOrm, BaseModel) - - print(PydanticOrm.schema_json(indent=2)) - - # TODO: create fakes automatically? SEE packages/pytest-simcore/src/pytest_simcore/helpers/rawdata_fakers.py - # instance = PydanticModelAtDB.create_fake(**overrides) - # assert issubclass(instance, PydanticModelAtDB) diff --git a/packages/models-library/tests/test_utils_docker_compose.py b/packages/models-library/tests/test_utils_docker_compose.py index 15674d4b6bc..357016e7b87 100644 --- a/packages/models-library/tests/test_utils_docker_compose.py +++ b/packages/models-library/tests/test_utils_docker_compose.py @@ -1,4 +1,8 @@ # pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + import pytest import yaml @@ -10,7 +14,7 @@ @pytest.fixture() -def service_spec() -> "ComposeSpecLabel": +def docker_compose_spec() -> "ComposeSpecLabelDict": return { "version": "2.3", "services": { @@ -32,25 +36,27 @@ def service_spec() -> "ComposeSpecLabel": @pytest.fixture() def simcore_registry() -> str: - return "mock_reg" + return "mock_docker_registry_base_name" @pytest.fixture() def service_version() -> str: - return "mock_reg" + return "1.2.3" def test_replace_env_vars_in_compose_spec( - service_spec: "ComposeSpecLabel", simcore_registry: str, service_version: str + docker_compose_spec: "ComposeSpecLabelDict", + simcore_registry: str, + service_version: str, ) -> None: stringified_service_spec: str = replace_env_vars_in_compose_spec( - service_spec, + docker_compose_spec, replace_simcore_registry=simcore_registry, replace_service_version=service_version, ) test_replaced_spec = ( - yaml.safe_dump(service_spec) + yaml.safe_dump(docker_compose_spec) .replace(MATCH_SERVICE_VERSION, service_version) .replace(MATCH_SIMCORE_REGISTRY, simcore_registry) ) diff --git a/packages/models-library/tests/test_utils_enums.py b/packages/models-library/tests/test_utils_enums.py index c0d5ed6be8c..5eece557e05 100644 --- a/packages/models-library/tests/test_utils_enums.py +++ b/packages/models-library/tests/test_utils_enums.py @@ -11,4 +11,4 @@ class _Ordinal(StrAutoEnum): def test_strautoenum(): - assert list(f"{n}" for n in _Ordinal) == ["NORTH", "EAST", "SOUTH", "WEST"] + assert [f"{n}" for n in _Ordinal] == ["NORTH", "EAST", "SOUTH", "WEST"] diff --git a/packages/models-library/tests/test_utils_fastapi_encoders.py b/packages/models-library/tests/test_utils_fastapi_encoders.py index bf0a19bfb47..ecd046af24e 100644 --- a/packages/models-library/tests/test_utils_fastapi_encoders.py +++ b/packages/models-library/tests/test_utils_fastapi_encoders.py @@ -4,37 +4,25 @@ # pylint: disable=too-many-arguments import json -from typing import Any from uuid import uuid4 -import pytest +from common_library.json_serialization import json_dumps from faker import Faker from models_library.utils.fastapi_encoders import servicelib_jsonable_encoder -from pydantic.json import pydantic_encoder - - -def servicelib__json_serialization__json_dumps(obj: Any, **kwargs): - # Analogous to 'servicelib.json_serialization.json_dumps' - return json.dumps(obj, default=pydantic_encoder, **kwargs) def test_using_uuids_as_keys(faker: Faker): - uuid_key = uuid4() - with pytest.raises(TypeError): - # IMPORTANT NOTE: we cannot serialize UUID objects as keys. - # We have to convert them to strings but then the class information is lost upon deserialization i.e. it is not reversable! - # NOTE: This could potentially be solved using 'orjson' !! - # - servicelib__json_serialization__json_dumps({uuid_key: "value"}, indent=1) + # this was previously failing + assert json_dumps({uuid_key: "value"}, indent=1) - # use encoder + # uuid keys now serialize without raising to the expected format string data = servicelib_jsonable_encoder({uuid_key: "value"}) assert data == {f"{uuid_key}": "value"} # serialize w/o raising - dumped_data = servicelib__json_serialization__json_dumps(data, indent=1) + dumped_data = json_dumps(data, indent=1) # deserialize w/o raising loaded_data = json.loads(dumped_data) diff --git a/packages/models-library/tests/test_utils_json_serialization.py b/packages/models-library/tests/test_utils_json_serialization.py new file mode 100644 index 00000000000..a229c16b75d --- /dev/null +++ b/packages/models-library/tests/test_utils_json_serialization.py @@ -0,0 +1,48 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from copy import deepcopy +from typing import Any +from uuid import uuid4 + +import pytest +from common_library.json_serialization import json_dumps, json_loads +from faker import Faker +from models_library.utils.fastapi_encoders import jsonable_encoder + + +@pytest.fixture +def fake_data_dict(faker: Faker) -> dict[str, Any]: + data = { + "uuid_as_UUID": faker.uuid4(cast_to=None), + "uuid_as_str": faker.uuid4(), + "int": faker.pyint(), + "float": faker.pyfloat(), + "str": faker.pystr(), + "dict": faker.pydict(), + "list": faker.pylist(), + } + data["object"] = deepcopy(data) + return data + + +def test_serialization_of_uuids(fake_data_dict: dict[str, Any]): + # NOTE: UUIDS serialization/deserialization is asymetric. + # We should eventually fix this but adding a corresponding decoder? + + uuid_obj = uuid4() + assert json_dumps(uuid_obj) == f'"{uuid_obj}"' + + obj = {"ids": [uuid4() for _ in range(3)]} + dump = json_dumps(obj) + assert json_loads(dump) == jsonable_encoder(obj) + + +def test_serialization_of_nested_dicts(fake_data_dict: dict[str, Any]): + + obj = {"data": fake_data_dict, "ids": [uuid4() for _ in range(3)]} + + dump = json_dumps(obj) + assert json_loads(dump) == jsonable_encoder(obj) diff --git a/packages/models-library/tests/test_utils_labels_annotations.py b/packages/models-library/tests/test_utils_labels_annotations.py new file mode 100644 index 00000000000..68509a43a37 --- /dev/null +++ b/packages/models-library/tests/test_utils_labels_annotations.py @@ -0,0 +1,37 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pathlib import Path +from typing import Any + +import pytest +import yaml +from models_library.utils.labels_annotations import from_labels, to_labels + + +@pytest.fixture +def metadata_config(tests_data_dir: Path): + config = yaml.safe_load( + (tests_data_dir / "metadata-sleeper-2.0.2.yaml").read_text() + ) + # adds some env-vars + # FIXME: if version is set as '1.0' then pydantic will resolve it as a float!! + config.update({"schema-version": "1.0.0", "build-date": "${BUILD_DATE}"}) + return config + + +@pytest.mark.parametrize("trim_key_head", [True, False]) +def test_to_and_from_labels(metadata_config: dict[str, Any], trim_key_head: bool): + + metadata_labels = to_labels( + metadata_config, prefix_key="swiss.itisfoundation", trim_key_head=trim_key_head + ) + print(f"\n{trim_key_head=:*^100}") + + assert all(key.startswith("swiss.itisfoundation.") for key in metadata_labels) + + got_config = from_labels( + metadata_labels, prefix_key="swiss.itisfoundation", trim_key_head=trim_key_head + ) + assert got_config == metadata_config diff --git a/packages/models-library/tests/test_utils_nodes.py b/packages/models-library/tests/test_utils_nodes.py index 736dc8bab6f..a41595ec568 100644 --- a/packages/models-library/tests/test_utils_nodes.py +++ b/packages/models-library/tests/test_utils_nodes.py @@ -2,7 +2,7 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -from typing import Any, Dict +from typing import Any from uuid import uuid4 import pytest @@ -14,13 +14,6 @@ SimcoreS3FileID, ) from models_library.utils.nodes import compute_node_hash -from pydantic import AnyUrl, parse_obj_as - - -@pytest.fixture() -def node_id() -> NodeID: - return uuid4() - ANOTHER_NODE_ID = uuid4() ANOTHER_NODE_OUTPUT_KEY = "the_output_link" @@ -45,9 +38,7 @@ def node_id() -> NodeID: "input_bool": True, "input_string": "string", "input_downloadlink": DownloadLink( - downloadLink=parse_obj_as( - AnyUrl, "http://httpbin.org/image/jpeg" - ) + downloadLink="http://httpbin.org/image/jpeg" ), "input_simcorelink": SimCoreFileLink( store=0, @@ -77,9 +68,9 @@ def node_id() -> NodeID: ], ) async def test_compute_node_hash( - node_id: NodeID, node_payload: Dict[str, Any], expected_hash: str + node_id: NodeID, node_payload: dict[str, Any], expected_hash: str ): - async def get_node_io_payload_cb(some_node_id: NodeID) -> Dict[str, Any]: + async def get_node_io_payload_cb(some_node_id: NodeID) -> dict[str, Any]: assert some_node_id in [node_id, ANOTHER_NODE_ID] return node_payload if some_node_id == node_id else ANOTHER_NODE_PAYLOAD diff --git a/packages/models-library/tests/test_utils_pydantic_models_factory.py b/packages/models-library/tests/test_utils_pydantic_models_factory.py deleted file mode 100644 index 16a2abbdb3a..00000000000 --- a/packages/models-library/tests/test_utils_pydantic_models_factory.py +++ /dev/null @@ -1,301 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import json -from typing import Callable, Optional - -import pytest -from faker import Faker -from models_library.generics import Envelope -from models_library.rest_pagination import Page -from models_library.rest_pagination_utils import PageDict, paginate_data -from models_library.utils.pydantic_models_factory import ( - collect_fields_attrs, - copy_model, -) -from pydantic import BaseModel, validator -from pydantic.types import PositiveInt -from yarl import URL - - -def assert_same_fields(model_cls, reference_model_cls): - - got_fields = collect_fields_attrs(model_cls) - expected_fields = collect_fields_attrs(reference_model_cls) - - assert set(got_fields.keys()) == set(expected_fields.keys()) - - # FIXME: can be tmp used to debug but cannot compare uuids of - assert got_fields == expected_fields - - -def _trim_descriptions(schema: dict): - data = {} - for key in schema: - if key not in ("description", "title"): - value = schema[key] - if isinstance(value, dict): - value = _trim_descriptions(value) - data[key] = value - return data - - -def _validators_factory() -> Callable: - """Common validator functions""" - - def name_must_contain_space(v): - if " " not in v: - raise ValueError("must contain a space") - return v.title() - - def passwords_match(v, values, **kwargs): - pasword = values.get("password") - if pasword is None: - raise ValueError("reference password missing") - - if v != pasword: - raise ValueError("passwords do not match") - return v - - def username_alphanumeric(v): - assert v.isalnum(), "must be alphanumeric" - return v - - _map = { - "display_name": name_must_contain_space, - "username": username_alphanumeric, - "password2": passwords_match, - } - - def _create(field_name) -> classmethod: - return validator(field_name, allow_reuse=True)(_map[field_name]) - - return _create - - -create_validator_for = _validators_factory() - -# -# NOTE: Rationale of this test-suite -# -# Below we represent different views of a 'user' resource represented with -# different models depending on the context. We have a domain model 'User', that -# is used to exchange internally in the business logic, as well as different -# views used in the request body (e.g. 'UserCreate', 'UserUpdate', ...) or response payload -# (e.g. 'UserGet', 'UserListItem', ...) for CRUD entrypoints in an API. -# -# Note that every context demands not only a different set of fields but also -# different constraints. Those will depend on nature of the parsed data sources -# as well as the guarantees defined on the data captured in the model. -# -# This approach should be applicable to any resource but we find that -# 'user' is a good use case that naturally incorporates many of the variants -# that we have typically encountered. -# -# All these variants have many features in common so the idea is to implement a minimalistic -# policy-based tools that can safely compose them all. -# -# Good examples on how to use model polices can be found -# in https://fastapi-crudrouter.awtkns.com/schemas or -# in https://fastapi.tiangolo.com/tutorial/body-updates/#body-updates -# -# -class User(BaseModel): - """Domain model""" - - id: PositiveInt - display_name: str - username: str - password_hash: str - - # validators when model created in code - _name_must_contain_space = create_validator_for("display_name") - _username_alphanumeric = create_validator_for("username") - - -class UserCreate(BaseModel): - """in -> Model for body of POST /users""" - - display_name: str - username: str - password: str - password2: str - - # parses json-body from Create request - _name_must_contain_space = create_validator_for("display_name") - _username_alphanumeric = create_validator_for("username") - _passwords_match = create_validator_for("password2") - - -class UserUpdate(BaseModel): - """in -> Model for body of PATCH /users/{id}""" - - display_name: Optional[str] - username: Optional[str] - password: Optional[str] - password2: Optional[str] - - # parses json-body from Update request - _name_must_contain_space = create_validator_for("display_name") - _username_alphanumeric = create_validator_for("username") - _passwords_match = create_validator_for("password2") - - -# Model for body of PUT /users/{id} -UserReplace = UserCreate - - -class UserGet(BaseModel): - """<- out Detailed model for response in GET /users/{id}""" - - id: PositiveInt - display_name: str - username: str - - # parses from User (i.e. validated domain model) - - -class UserListItem(BaseModel): - """<- out Item model for response in GET /users - - Usage: Page[UserListItem] - """ - - id: PositiveInt - username: str - - # parses from User - - -@pytest.fixture -def fake_user(faker: Faker) -> User: - """a fake domain model of a User resource""" - return User( - id=faker.pyint(min_value=1), - display_name=faker.name(), - username=faker.user_name(), - password_hash=faker.md5(), - ) - - -def test_build_UserCreate_model(): - # In UserCreate, we exclude the primary key - _BaseUserCreate = copy_model( - User, name="_BaseUserCreate", exclude={"id", "password_hash"} - ) - - # With the new base, we have everything in User (including validators) - # except for the primary key, then we just need to extend it to include - # the second password - class _UserCreate(_BaseUserCreate): - """in -> Model for body of POST /users""" - - password: str - password2: str - _passwords_match = create_validator_for("password2") - - assert _trim_descriptions(UserCreate.schema()) == _trim_descriptions( - _UserCreate.schema() - ) - - assert_same_fields(_UserCreate, UserCreate) - - -def test_build_UserUpdate_model(faker: Faker): - # model for request body Update method https://google.aip.dev/134 (PATCH) - - # in UserUpdate, is as UserCreate but all optional - _UserUpdate = copy_model(UserCreate, name="UserUpdate", as_update_model=True) - - assert _trim_descriptions(UserUpdate.schema()) == _trim_descriptions( - _UserUpdate.schema() - ) - # - # SEE insight on how to partially update a model - # in https://fastapi.tiangolo.com/tutorial/body-updates/#partial-updates-with-patch - # - - update_change_display = _UserUpdate(display_name=faker.name()) - update_reset_password = _UserUpdate(password="secret", password2="secret") - update_username = _UserUpdate(username=faker.user_name()) - - -def test_build_UserReplace_model(): - # model for request body Replace method https://google.aip.dev/134 - - # Replace is like create but w/o primary key (if it would be defined on the client) - class _UserReplace(copy_model(User, exclude={"id", "password_hash"})): - password: str - password2: str - _passwords_match = create_validator_for("password2") - - assert _trim_descriptions(UserReplace.schema()) == _trim_descriptions( - _UserReplace.schema() - ) - # - # SEE insights on how to replace a model in - # https://fastapi.tiangolo.com/tutorial/body-updates/#update-replacing-with-put - # - - -def test_build_UserGet_model(fake_user: User): - # model for response payload of Get method https://google.aip.dev/131 - - # if the source is User domain model, then the data - # is already guaranteed (and we could skip validators) - # or alternative use UserGet.construct() - # - _UserGet = copy_model( - User, - name="UserGet", - exclude={"password_hash"}, - skip_validators=True, - ) - - assert _trim_descriptions(UserGet.schema()) == _trim_descriptions(_UserGet.schema()) - - payload_user: dict = ( - Envelope[_UserGet].parse_data(fake_user).dict(exclude_unset=True) - ) - - # NOTE: this would be the solid way to get a jsonable dict ... but requires fastapi! - # from fastapi.encoders import jsonable_encoder - # jsonable_encoder(payload_user) - # - print(json.dumps(payload_user, indent=1)) - - -def test_build_UserListItem_model(fake_user: User, faker: Faker): - # model for response payload of List method https://google.aip.dev/132) - - # Typically a light version of the Get model - _UserListItem = copy_model( - UserGet, - name="UserListItem", - exclude={"display_name"}, - skip_validators=True, - ) - - assert _trim_descriptions(UserListItem.schema()) == _trim_descriptions( - _UserListItem.schema() - ) - - # to build the pagination model, simply apply the Page generic - assert _trim_descriptions(Page[_UserListItem].schema()) == _trim_descriptions( - Page[UserListItem].schema() - ) - - # parse stored data - item_user = _UserListItem.parse_obj(fake_user).dict(exclude_unset=True) - - page: PageDict = paginate_data( - chunk=[item_user], - request_url=URL(faker.url()).with_path("/users"), - total=100, - limit=1, - offset=0, - ) - page_users = Page[_UserListItem].parse_obj(page) - print(page_users.json(indent=2, exclude_unset=True)) diff --git a/packages/models-library/tests/test_utils_pydantic_tools_extension.py b/packages/models-library/tests/test_utils_pydantic_tools_extension.py new file mode 100644 index 00000000000..174233c7adb --- /dev/null +++ b/packages/models-library/tests/test_utils_pydantic_tools_extension.py @@ -0,0 +1,45 @@ +from models_library.utils.pydantic_tools_extension import parse_obj_or_none +from pydantic import BaseModel, Field, StrictInt + + +class MyModel(BaseModel): + a: int + b: int | None = Field(...) + c: int = 42 + d: int | None = None + e: int = Field(default=324, description="optional non-nullable") + + +def test_schema(): + assert MyModel.model_json_schema() == { + "title": "MyModel", + "type": "object", + "properties": { + "a": {"title": "A", "type": "integer"}, + "b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "B"}, + "c": {"title": "C", "default": 42, "type": "integer"}, + "d": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "title": "D", + }, + "e": { + "default": 324, + "title": "E", + "type": "integer", + "description": "optional non-nullable", + }, + }, + "required": ["a", "b"], + } + + +def test_only_required(): + model = MyModel(a=1, b=2) + assert model.model_dump() == {"a": 1, "b": 2, "c": 42, "d": None, "e": 324} + assert model.model_dump(exclude_unset=True) == {"a": 1, "b": 2} + + +def test_parse_obj_or_none(): + assert parse_obj_or_none(StrictInt, 42) == 42 + assert parse_obj_or_none(StrictInt, 3.14) is None diff --git a/packages/models-library/tests/test_utils_service_io.py b/packages/models-library/tests/test_utils_service_io.py index 041dc1b598b..7ef8d4070a0 100644 --- a/packages/models-library/tests/test_utils_service_io.py +++ b/packages/models-library/tests/test_utils_service_io.py @@ -6,38 +6,41 @@ import itertools import json +import re import sys +from collections.abc import Iterable +from contextlib import suppress from pathlib import Path -from typing import Union import pytest +from models_library.basic_regex import SIMPLE_VERSION_RE from models_library.services import ServiceInput, ServiceOutput, ServicePortKey from models_library.utils.json_schema import jsonschema_validate_schema from models_library.utils.services_io import get_service_io_json_schema -from pydantic import parse_obj_as +from pydantic import TypeAdapter example_inputs_labels = [ - e for e in ServiceInput.Config.schema_extra["examples"] if e["label"] + e for e in ServiceInput.model_config["json_schema_extra"]["examples"] if e["label"] ] example_outputs_labels = [ - e for e in ServiceOutput.Config.schema_extra["examples"] if e["label"] + e for e in ServiceOutput.model_config["json_schema_extra"]["examples"] if e["label"] ] @pytest.fixture(params=example_inputs_labels + example_outputs_labels) -def service_port(request: pytest.FixtureRequest) -> Union[ServiceInput, ServiceOutput]: +def service_port(request: pytest.FixtureRequest) -> ServiceInput | ServiceOutput: try: index = example_inputs_labels.index(request.param) - example = ServiceInput.Config.schema_extra["examples"][index] - return ServiceInput.parse_obj(example) + example = ServiceInput.model_config["json_schema_extra"]["examples"][index] + return ServiceInput.model_validate(example) except ValueError: index = example_outputs_labels.index(request.param) - example = ServiceOutput.Config.schema_extra["examples"][index] - return ServiceOutput.parse_obj(example) + example = ServiceOutput.model_config["json_schema_extra"]["examples"][index] + return ServiceOutput.model_validate(example) -def test_get_schema_from_port(service_port: Union[ServiceInput, ServiceOutput]): - print(service_port.json(indent=2)) +def test_get_schema_from_port(service_port: ServiceInput | ServiceOutput): + print(service_port.model_dump_json(indent=2)) # get schema = get_service_io_json_schema(service_port) @@ -55,7 +58,7 @@ def test_get_schema_from_port(service_port: Union[ServiceInput, ServiceOutput]): TEST_DATA_FOLDER = CURRENT_DIR / "data" -@pytest.mark.diagnostics +@pytest.mark.diagnostics() @pytest.mark.parametrize( "metadata_path", TEST_DATA_FOLDER.rglob("metadata*.json"), @@ -70,8 +73,12 @@ def test_against_service_metadata_configs(metadata_path: Path): meta = json.loads(metadata_path.read_text()) - inputs = parse_obj_as(dict[ServicePortKey, ServiceInput], meta["inputs"]) - outputs = parse_obj_as(dict[ServicePortKey, ServiceOutput], meta["outputs"]) + inputs = TypeAdapter(dict[ServicePortKey, ServiceInput]).validate_python( + meta["inputs"] + ) + outputs = TypeAdapter(dict[ServicePortKey, ServiceOutput]).validate_python( + meta["outputs"] + ) for port in itertools.chain(inputs.values(), outputs.values()): schema = get_service_io_json_schema(port) @@ -82,3 +89,37 @@ def test_against_service_metadata_configs(metadata_path: Path): assert schema # check valid jsons-schema jsonschema_validate_schema(schema) + + +assert SIMPLE_VERSION_RE[0] == "^" +assert SIMPLE_VERSION_RE[-1] == "$" +_VERSION_SEARCH_RE = re.compile(SIMPLE_VERSION_RE[1:-1]) # without $ and ^ + + +def _iter_main_services() -> Iterable[Path]: + """NOTE: Filters the main service when there is a group + of services behind a node. + """ + for p in TEST_DATA_FOLDER.rglob("metadata-*.json"): + with suppress(Exception): + meta = json.loads(p.read_text()) + if (meta.get("type") == "computational") or meta.get( + "service.container-http-entrypoint" + ): + yield p + + +@pytest.mark.diagnostics() +@pytest.mark.parametrize( + "metadata_path", + (p for p in _iter_main_services() if "latest" not in p.name), + ids=lambda p: f"{p.parent.name}/{p.name}", +) +def test_service_metadata_has_same_version_as_tag(metadata_path: Path): + meta = json.loads(metadata_path.read_text()) + + # metadata-M.m.b.json + match = _VERSION_SEARCH_RE.search(metadata_path.name) + assert match, f"tag {metadata_path.name} is not a version" + version_in_tag = match.group() + assert meta["version"] == version_in_tag diff --git a/packages/models-library/tests/test_utils_specs_substitution.py b/packages/models-library/tests/test_utils_specs_substitution.py new file mode 100644 index 00000000000..c523271bd2a --- /dev/null +++ b/packages/models-library/tests/test_utils_specs_substitution.py @@ -0,0 +1,218 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Any + +import pytest +import yaml +from models_library.utils.specs_substitution import ( + IdentifierSubstitutionError, + SpecsSubstitutionsResolver, + SubstitutionValue, +) +from pydantic import TypeAdapter + + +@pytest.fixture() +def simcore_registry() -> str: + return "mock_registry_basename" + + +@pytest.fixture() +def service_version() -> str: + return "1.2.3" + + +@pytest.fixture() +def available_osparc_variables( + simcore_registry: str, + service_version: str, +) -> dict[str, SubstitutionValue]: + osparc_vendor_variables = { + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOST": "product_a-server", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_PRIMARY_PORT": 1, + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_SECONDARY_PORT": 2, + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_IP": "1.1.1.1", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_PORT": "21", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE": "license.txt", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE_PRODUCT1": "license-p1.txt", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE_PRODUCT2": "license-p2.txt", + "OSPARC_VARIABLE_VENDOR_SECRET_LIST": "[1, 2, 3]", + "OSPARC_VARIABLE__WITH_BRACES": "has_a_value", + } + + environs = { + **osparc_vendor_variables, + "SIMCORE_REGISTRY": simcore_registry, + "SERVICE_VERSION": service_version, + "DISPLAY": "True", + } + return TypeAdapter(dict[str, SubstitutionValue]).validate_python(environs) + + +@pytest.mark.parametrize( + "service_name,service_spec,expected_service_spec", + [ + ( + "other_service", + { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/other_service:${SERVICE_VERSION}", + "init": True, + "depends_on": ["this_service"], + }, + { + "depends_on": ["this_service"], + "image": "mock_registry_basename/simcore/services/dynamic/other_service:1.2.3", + "init": True, + }, + ), + ( + "this_service", + { + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/this_service:${SERVICE_VERSION}", + "runtime": "nvidia", + "init": True, + "environment": [ + "DISPLAY=${DISPLAY}", + "SOME_LIST=$OSPARC_VARIABLE_VENDOR_SECRET_LIST", + "MY_LICENSE=$OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE", + "USING_EMPTY_DEFAULT=${OSPARC_VARIABLE__EMPTY_DEFAULT:-}", + "USING_DEFAULT=${OSPARC_VARIABLE__WITH_A_DEFAULT:-{}}", + "RESOLVES_EXTERNALLY=${OSPARC_VARIABLE__WITH_BRACES}", + "OVERWRITING_DEFAULT_BECAUSE_RESOLVES_EXTERNALLY=${OSPARC_VARIABLE__WITH_BRACES:-ignore_default}", + ], + "volumes": ["/tmp/.X11-unix:/tmp/.X11-unix"], + }, + { + "environment": [ + "DISPLAY=True", + "SOME_LIST=[1, 2, 3]", + "MY_LICENSE=license.txt", + "USING_EMPTY_DEFAULT=", + "USING_DEFAULT={}", + "RESOLVES_EXTERNALLY=has_a_value", + "OVERWRITING_DEFAULT_BECAUSE_RESOLVES_EXTERNALLY=has_a_value", + ], + "image": "mock_registry_basename/simcore/services/dynamic/this_service:1.2.3", + "init": True, + "runtime": "nvidia", + "volumes": ["/tmp/.X11-unix:/tmp/.X11-unix"], + }, + ), + ], +) +def test_substitutions_in_compose_spec( + available_osparc_variables: dict[str, SubstitutionValue], + service_name: str, + service_spec: dict[str, Any], + expected_service_spec: dict[str, Any], +): + specs_resolver = SpecsSubstitutionsResolver(service_spec, upgrade=True) + + identifiers_requested = specs_resolver.get_identifiers() + + substitutions = specs_resolver.set_substitutions(available_osparc_variables) + assert substitutions is specs_resolver.substitutions + + assert set(identifiers_requested) == set(substitutions.keys()) + + new_service_spec = specs_resolver.run() + + assert not substitutions.unused + assert substitutions.used == set(identifiers_requested) + + new_service_spec_text = yaml.safe_dump(new_service_spec) + + assert ( + "$" not in new_service_spec_text + ), f"All should be replaced in '{service_name}': {substitutions.used}" + + assert new_service_spec == expected_service_spec + + +def test_nothing_to_substitute(): + original_spec = {"x": 33, "y": {"z": True}} + + specs_resolver = SpecsSubstitutionsResolver(original_spec, upgrade=False) + + # no substitutions + assert specs_resolver.run() == original_spec + + +def test_no_identifier_present( + available_osparc_variables: dict[str, SubstitutionValue] +): + original_spec = {"x": 33, "y": {"z": True}, "foo": "$UNREGISTERED_ID"} + + specs_resolver = SpecsSubstitutionsResolver(original_spec, upgrade=False) + + assert specs_resolver.get_identifiers() == ["UNREGISTERED_ID"] + assert specs_resolver.set_substitutions(available_osparc_variables) == {} + + # no substitutions + assert specs_resolver.run() == original_spec + + +@pytest.mark.parametrize("var_template", ["$VAR", "${VAR}", "${VAR:-%s}"]) +@pytest.mark.parametrize("value", ["", "a", "1", "1.1", "aa", "$", "$$$$", "[]", "{}"]) +def test_specs_substitutions_resolver_various_cases(var_template: str, value: str): + env_includes_default_value = False + try: + formatted_template = var_template % value + env_includes_default_value = True + except TypeError: + formatted_template = var_template + + input_dict = {"key": f"{formatted_template}"} + text_template = SpecsSubstitutionsResolver(input_dict, upgrade=True) + + replace_with: dict[str, Any] = ( + {} + if env_includes_default_value + else {i: value for i in text_template.get_identifiers()} + ) + + text_template.set_substitutions(replace_with) + replaced_dict = text_template.run() + + assert input_dict != replaced_dict + assert replaced_dict["key"] == value + + +def test_safe_unsafe_substitution(): + input_dict = {"key": "$VAR"} + text_template = SpecsSubstitutionsResolver(input_dict, upgrade=True) + + # var is found + replace_with: dict[str, Any] = {"VAR": "a_value"} + text_template.set_substitutions(replace_with) + replaced_dict = text_template.run(safe=True) + assert replaced_dict == {"key": "a_value"} + + # var is not found and not replaced without raising an error + text_template.set_substitutions({}) + replaced_dict = text_template.run(safe=True) + assert replaced_dict == {"key": "$VAR"} + + # when var is not replace with safe=False an error will be raised + with pytest.raises( + IdentifierSubstitutionError, match="Was not able to substitute identifier" + ): + text_template.run(safe=False) + + +def test_substitution_with_defaults_and_same_var_name(): + input_dict = {"k1": "${VAR:-v1}", "k2": "${VAR:-v2}"} + text_template = SpecsSubstitutionsResolver(input_dict, upgrade=True) + + # with a provided value + text_template.set_substitutions({"VAR": "a_value"}) + replaced_dict = text_template.run() + assert replaced_dict == {"k1": "a_value", "k2": "a_value"} + + # using defaults + text_template.set_substitutions({}) + replaced_dict = text_template.run() + assert replaced_dict == {"k1": "v1", "k2": "v2"} diff --git a/packages/models-library/tests/test_utils_string_substitution.py b/packages/models-library/tests/test_utils_string_substitution.py index 7978112cb65..cfc627ae819 100644 --- a/packages/models-library/tests/test_utils_string_substitution.py +++ b/packages/models-library/tests/test_utils_string_substitution.py @@ -3,16 +3,19 @@ # pylint: disable=unused-variable # pylint: disable=too-many-arguments +import json import sys from pathlib import Path +from textwrap import dedent import pytest from models_library.utils.string_substitution import ( SubstitutionsDict, - TemplateText, + TextTemplate, substitute_all_legacy_identifiers, upgrade_identifier, ) +from pytest_simcore.helpers.monkeypatch_envs import load_dotenv @pytest.mark.parametrize( @@ -20,15 +23,15 @@ [ ( "%%container_name.sym-server%%", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_SYM_SERVER", + "OSPARC_VARIABLE_CONTAINER_NAME_SYM_SERVER", ), ( "%service_uuid%", - "OSPARC_ENVIRONMENT_SERVICE_UUID", + "OSPARC_VARIABLE_SERVICE_UUID", ), ( "$SERVICE_VERSION", - "OSPARC_ENVIRONMENT_SERVICE_VERSION", + "OSPARC_VARIABLE_SERVICE_VERSION", ), ], ) @@ -37,7 +40,6 @@ def test_upgrade_identifiers(legacy: str, expected: str): def test_substitution_with_new_and_legacy_identifiers(): - stringified_config = """ compose_spec: service-one: @@ -48,26 +50,26 @@ def test_substitution_with_new_and_legacy_identifiers(): - SYM_SERVER_HOSTNAME=%%container_name.sym-server%% - APP_HOSTNAME=%%container_name.dsistudio-app%% - APP_HOSTNAME=some-prefix_%service_uuid% - - MY_LICENSE_FILE=${OSPARC_ENVIRONMENT_VENDOR_LICENSE_FILE} - - MY_PRODUCT=$OSPARC_ENVIRONMENT_CURRENT_PRODUCT - - MY_EMAIL=$OSPARC_ENVIRONMENT_USER_EMAIL + - MY_LICENSE_FILE=${OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE} + - MY_PRODUCT=$OSPARC_VARIABLE_CURRENT_PRODUCT + - MY_EMAIL=$OSPARC_VARIABLE_USER_EMAIL - AS_VOILA=1 - DISPLAY1=$${KEEP_SINCE_IT_USES_DOLLAR_ESCAPE_SIGN} - DISPLAY2=${KEEP_SINCE_IT_WAS_EXCLUDED_FROM_SUBSTITUTIONS} containers-allowed-outgoing-permit-list: s4l-core: - - hostname: $OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_HOST - tcp_ports: [$OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_PRIMARY_PORT, $OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_SECONDARY_PORT] + - hostname: $OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOST + tcp_ports: [$OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_PRIMARY_PORT, $OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_SECONDARY_PORT] dns_resolver: - address: $OSPARC_ENVIRONMENT_VENDOR_LICENSE_DNS_RESOLVER_IP - port: $OSPARC_ENVIRONMENT_VENDOR_LICENSE_DNS_RESOLVER_PORT + address: $OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_IP + port: $OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_PORT containers-allowed-outgoing-internet: - s4l-core-stream """ stringified_config = substitute_all_legacy_identifiers(stringified_config) - template = TemplateText(stringified_config) + template = TextTemplate(stringified_config) assert template.is_valid() identifiers = template.get_identifiers() @@ -75,19 +77,19 @@ def test_substitution_with_new_and_legacy_identifiers(): "SIMCORE_REGISTRY", "SERVICE_VERSION", # NOTE: these identifier names were upgraded from legacy - "OSPARC_ENVIRONMENT_CONTAINER_NAME_SYM_SERVER", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_DSISTUDIO_APP", - "OSPARC_ENVIRONMENT_SERVICE_UUID", + "OSPARC_VARIABLE_CONTAINER_NAME_SYM_SERVER", + "OSPARC_VARIABLE_CONTAINER_NAME_DSISTUDIO_APP", + "OSPARC_VARIABLE_SERVICE_UUID", # ----- - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_FILE", - "OSPARC_ENVIRONMENT_CURRENT_PRODUCT", - "OSPARC_ENVIRONMENT_USER_EMAIL", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_FILE", + "OSPARC_VARIABLE_CURRENT_PRODUCT", + "OSPARC_VARIABLE_USER_EMAIL", "KEEP_SINCE_IT_WAS_EXCLUDED_FROM_SUBSTITUTIONS", - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_HOST", - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_PRIMARY_PORT", - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_SERVER_SECONDARY_PORT", - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_DNS_RESOLVER_IP", - "OSPARC_ENVIRONMENT_VENDOR_LICENSE_DNS_RESOLVER_PORT", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_HOST", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_PRIMARY_PORT", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_SERVER_SECONDARY_PORT", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_IP", + "OSPARC_VARIABLE_VENDOR_SECRET_LICENSE_DNS_RESOLVER_PORT", ] # prepare substitutions map {id: value, ...} @@ -142,31 +144,30 @@ def test_substitution_with_new_and_legacy_identifiers(): # Some fo the supported identifiers KNOWN_IDENTIFIERS = { "DISPLAY", # NOTE: this might be a mistake! - "OSPARC_ENVIRONMENT_CONTAINER_NAME_DSISTUDIO_APP", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_FSL_APP", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_ISEG_APP", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_S4L_CORE", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_SCT_LABEL_UTILS_APP", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_SPINAL_CORD_TOOLBOX_APP", - "OSPARC_ENVIRONMENT_CONTAINER_NAME_SYM_SERVER", - "OSPARC_ENVIRONMENT_SERVICE_UUID", + "OSPARC_VARIABLE_CONTAINER_NAME_DSISTUDIO_APP", + "OSPARC_VARIABLE_CONTAINER_NAME_FSL_APP", + "OSPARC_VARIABLE_CONTAINER_NAME_ISEG_APP", + "OSPARC_VARIABLE_CONTAINER_NAME_S4L_CORE", + "OSPARC_VARIABLE_CONTAINER_NAME_SCT_LABEL_UTILS_APP", + "OSPARC_VARIABLE_CONTAINER_NAME_SPINAL_CORD_TOOLBOX_APP", + "OSPARC_VARIABLE_CONTAINER_NAME_SYM_SERVER", + "OSPARC_VARIABLE_SERVICE_UUID", "SERVICE_VERSION", "SIMCORE_REGISTRY", } -@pytest.mark.diagnostics +@pytest.mark.diagnostics() @pytest.mark.parametrize( "metadata_path", TEST_DATA_FOLDER.rglob("metadata*.json"), ids=lambda p: f"{p.parent.name}/{p.name}", ) def test_substitution_against_service_metadata_configs(metadata_path: Path): - meta_str = metadata_path.read_text() meta_str = substitute_all_legacy_identifiers(meta_str) - template = TemplateText(meta_str) + template = TextTemplate(meta_str) assert template.is_valid() found = template.get_identifiers() @@ -174,3 +175,76 @@ def test_substitution_against_service_metadata_configs(metadata_path: Path): assert all( identifier in KNOWN_IDENTIFIERS for identifier in found ), f"some identifiers in {found} are new and therefore potentially unsupported" + + +def test_template_substitution_on_envfiles(): + envfile_template = dedent( + """ + x=$VALUE1 + y=$VALUE2 + """ + ) + template = TextTemplate(envfile_template) + assert set(template.get_identifiers()) == {"VALUE1", "VALUE2"} + + # NOTE how it casts string to to int + assert template.substitute({"VALUE1": "3", "VALUE2": 3}) == dedent( + """ + x=3 + y=3 + """ + ) + + # NOTE does not cast if it is in a container + assert template.substitute({"VALUE1": ["3", "4"], "VALUE2": [3, 4]}) == dedent( + """ + x=['3', '4'] + y=[3, 4] + """ + ) + + # deserialized AFTER substitution in envfile template + deserialize = load_dotenv(template.substitute({"VALUE1": "3", "VALUE2": 3})) + assert deserialize == { + "x": "3", + "y": "3", + } + + deserialize = load_dotenv( + template.substitute({"VALUE1": ["3", "4"], "VALUE2": [3, 4]}) + ) + assert deserialize == { + "x": "['3', '4']", + "y": "[3, 4]", + } + + +def test_template_substitution_on_jsondumps(): + # NOTE: compare with test_template_substitution_on_envfiles + + json_template = {"x": "$VALUE1", "y": "$VALUE2"} + json_dumps_template = json.dumps(json_template) # LIKE image labels! + + # NOTE: that here we are enforcing the values to be strings! + assert json_dumps_template == '{"x": "$VALUE1", "y": "$VALUE2"}' + + template = TextTemplate(json_dumps_template) + assert set(template.get_identifiers()) == {"VALUE1", "VALUE2"} + + # NOTE how it casts string to str + deserialized = json.loads(template.substitute({"VALUE1": "3", "VALUE2": 3})) + + assert deserialized == { + "x": "3", + "y": "3", # <--- NOTE cast to str! + } + + # NOTE does not cast if it is in a container + deserialized = json.loads( + template.substitute({"VALUE1": ["3", "4"], "VALUE2": [3, 4]}) + ) + + assert deserialized == { + "x": "['3', '4']", + "y": "[3, 4]", + } diff --git a/packages/notifications-library/Makefile b/packages/notifications-library/Makefile new file mode 100644 index 00000000000..57f80644979 --- /dev/null +++ b/packages/notifications-library/Makefile @@ -0,0 +1,49 @@ +# +# Targets for DEVELOPMENT of aws Library +# +include ../../scripts/common.Makefile +include ../../scripts/common-package.Makefile + +.PHONY: requirements +requirements: ## compiles pip requirements (.in -> .txt) + @$(MAKE_C) requirements reqs + + +.PHONY: install-dev install-prod install-ci +install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode + # installing in $(subst install-,,$@) mode + @uv pip sync requirements/$(subst install-,,$@).txt + + +.PHONY: tests tests-ci +tests: ## runs unit tests + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov=notifications_library \ + --durations=10 \ + --exitfirst \ + --failed-first \ + --pdb \ + -vv \ + $(CURDIR)/tests + +tests-ci: ## runs unit tests + # running unit tests + @pytest \ + --asyncio-mode=auto \ + --color=yes \ + --cov-append \ + --cov-config=../../.coveragerc \ + --cov-report=term-missing \ + --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ + --cov=notifications_library \ + --durations=10 \ + --log-date-format="%Y-%m-%d %H:%M:%S" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ + --verbose \ + $(CURDIR)/tests diff --git a/packages/notifications-library/README.md b/packages/notifications-library/README.md new file mode 100644 index 00000000000..ba7c355b51e --- /dev/null +++ b/packages/notifications-library/README.md @@ -0,0 +1,6 @@ +# simcore Notifications Library + +Utilities and message templates for users notifications + +## Rationale + - Initial design https://github.com/ITISFoundation/osparc-simcore/pull/5333 diff --git a/packages/notifications-library/VERSION b/packages/notifications-library/VERSION new file mode 100644 index 00000000000..6e8bf73aa55 --- /dev/null +++ b/packages/notifications-library/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/packages/notifications-library/requirements/Makefile b/packages/notifications-library/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/packages/notifications-library/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/packages/notifications-library/requirements/_base.in b/packages/notifications-library/requirements/_base.in new file mode 100644 index 00000000000..047005b4a39 --- /dev/null +++ b/packages/notifications-library/requirements/_base.in @@ -0,0 +1,12 @@ +# +# Specifies third-party dependencies for 'notifications-library' +# +--constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in + +aiofiles +aiosmtplib +Jinja2 diff --git a/packages/notifications-library/requirements/_base.txt b/packages/notifications-library/requirements/_base.txt new file mode 100644 index 00000000000..59793cd02df --- /dev/null +++ b/packages/notifications-library/requirements/_base.txt @@ -0,0 +1,205 @@ +aiofiles==24.1.0 + # via -r requirements/_base.in +aiosmtplib==4.0.0 + # via -r requirements/_base.in +alembic==1.14.1 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +arrow==1.3.0 + # via -r requirements/../../../packages/models-library/requirements/_base.in +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.1.0 + # via + # jsonschema + # referencing +click==8.1.8 + # via typer +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via pydantic +greenlet==3.1.1 + # via sqlalchemy +idna==3.10 + # via + # email-validator + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.5 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +jsonschema==4.23.0 + # via -r requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via yarl +opentelemetry-api==1.30.0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asyncpg + # opentelemetry-semantic-conventions +opentelemetry-instrumentation==0.51b0 + # via opentelemetry-instrumentation-asyncpg +opentelemetry-instrumentation-asyncpg==0.51b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asyncpg +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via opentelemetry-instrumentation +propcache==0.3.0 + # via yarl +psycopg2-binary==2.9.10 + # via sqlalchemy +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via pydantic-settings +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +rich==13.9.4 + # via + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +typer==0.15.2 + # via -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 + # via + # alembic + # pydantic + # pydantic-core + # pydantic-extra-types + # typer +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation +yarl==1.18.3 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/notifications-library/requirements/_test.in b/packages/notifications-library/requirements/_test.in new file mode 100644 index 00000000000..005795b87e7 --- /dev/null +++ b/packages/notifications-library/requirements/_test.in @@ -0,0 +1,27 @@ +# +# Specifies dependencies required to run 'models-library' +# +--constraint ../../../requirements/constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + +aiodocker +coverage +docker +faker +pytest +pytest-asyncio +pytest-cov +pytest-icdiff +pytest-instafail +pytest-mock +pytest-runner +pytest-sugar +python-dotenv +pyyaml +sqlalchemy[mypy] +tenacity +types-aiofiles diff --git a/packages/notifications-library/requirements/_test.txt b/packages/notifications-library/requirements/_test.txt new file mode 100644 index 00000000000..404a02002ce --- /dev/null +++ b/packages/notifications-library/requirements/_test.txt @@ -0,0 +1,130 @@ +aiodocker==0.24.0 + # via -r requirements/_test.in +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiosignal==1.3.2 + # via aiohttp +attrs==25.1.0 + # via + # -c requirements/_base.txt + # aiohttp +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +docker==7.1.0 + # via -r requirements/_test.in +faker==36.1.1 + # via -r requirements/_test.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +greenlet==3.1.1 + # via + # -c requirements/_base.txt + # sqlalchemy +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 + # via + # -c requirements/_base.txt + # requests + # yarl +iniconfig==2.0.0 + # via pytest +multidict==6.1.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +packaging==24.2 + # via + # -c requirements/_base.txt + # pytest + # pytest-sugar +pluggy==1.5.0 + # via pytest +pprintpp==0.4.0 + # via pytest-icdiff +propcache==0.3.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-icdiff + # pytest-instafail + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-icdiff==0.9 + # via -r requirements/_test.in +pytest-instafail==0.5.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +requests==2.32.3 + # via docker +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +tenacity==9.0.0 + # via -r requirements/_test.in +termcolor==2.5.0 + # via pytest-sugar +types-aiofiles==24.1.0.20241221 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # mypy + # sqlalchemy2-stubs +tzdata==2025.1 + # via faker +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # docker + # requests +yarl==1.18.3 + # via + # -c requirements/_base.txt + # aiohttp diff --git a/packages/notifications-library/requirements/_tools.in b/packages/notifications-library/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/packages/notifications-library/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/packages/notifications-library/requirements/_tools.txt b/packages/notifications-library/requirements/_tools.txt new file mode 100644 index 00000000000..b5ed94588ea --- /dev/null +++ b/packages/notifications-library/requirements/_tools.txt @@ -0,0 +1,85 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_base.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_test.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/packages/notifications-library/requirements/ci.txt b/packages/notifications-library/requirements/ci.txt new file mode 100644 index 00000000000..105d6a514b3 --- /dev/null +++ b/packages/notifications-library/requirements/ci.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'models-library' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../common-library/ +simcore-models-library @ ../models-library/ +simcore-postgres-database @ ../postgres-database/ +pytest-simcore @ ../pytest-simcore/ +simcore-settings-library @ ../settings-library/ + + +# current module +simcore-notifications-library @ . diff --git a/packages/notifications-library/requirements/dev.txt b/packages/notifications-library/requirements/dev.txt new file mode 100644 index 00000000000..0a010051348 --- /dev/null +++ b/packages/notifications-library/requirements/dev.txt @@ -0,0 +1,22 @@ +# Shortcut to install all packages needed to develop 'models-library' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../common-library/ +--editable ../models-library/ +--editable ../postgres-database/ +--editable ../pytest-simcore/ +--editable ../settings-library/ + +# current module +--editable . diff --git a/packages/notifications-library/setup.cfg b/packages/notifications-library/setup.cfg new file mode 100644 index 00000000000..d0afa4c1e60 --- /dev/null +++ b/packages/notifications-library/setup.cfg @@ -0,0 +1,23 @@ +[bumpversion] +current_version = 0.1.0 +commit = True +message = packages/notifications-library version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[bdist_wheel] +universal = 1 + +[aliases] +test = pytest + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/packages/notifications-library/setup.py b/packages/notifications-library/setup.py new file mode 100644 index 00000000000..104f665d3a3 --- /dev/null +++ b/packages/notifications-library/setup.py @@ -0,0 +1,71 @@ +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +INSTALL_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.in") + | { + "simcore-models-library", + "simcore-postgres-database", + "simcore-settings-library", + } +) # WEAK requirements + +TEST_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_test.txt") | {"pytest-simcore"} +) # STRICT requirements + + +SETUP = { + "name": "simcore-notifications-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Pedro Crespo-Valero (pcrespov)", + "description": "simcore library for user notifications e.g. emails, sms, etc", + "python_requires": ">=3.10", + "classifiers": [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python :: 3.10", + ], + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_dir": {"": "src"}, + "include_package_data": True, + "package_data": { + "": [ + "py.typed", + "templates/**/*.jinja2", + "templates/**/*.html", + "templates/**/*.txt", + ] + }, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} + + +if __name__ == "__main__": + setup(**SETUP) diff --git a/packages/notifications-library/src/notifications_library/__init__.py b/packages/notifications-library/src/notifications_library/__init__.py new file mode 100644 index 00000000000..122ae8dcd88 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/__init__.py @@ -0,0 +1,3 @@ +from importlib.metadata import version + +__version__: str = version("simcore-notifications-library") diff --git a/packages/notifications-library/src/notifications_library/_email.py b/packages/notifications-library/src/notifications_library/_email.py new file mode 100644 index 00000000000..54845ef3703 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_email.py @@ -0,0 +1,88 @@ +import logging +import mimetypes +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from email.headerregistry import Address +from email.message import EmailMessage + +from aiosmtplib import SMTP +from settings_library.email import EmailProtocol, SMTPSettings + +_logger = logging.getLogger(__name__) + + +def compose_email( + from_: Address, + to: Address, + subject: str, + content_text: str, + content_html: str | None = None, + reply_to: Address | None = None, + bcc: Address | None = None, +) -> EmailMessage: + msg = EmailMessage() + msg["From"] = from_ + msg["To"] = to + if reply_to: + msg["Reply-To"] = reply_to + if bcc: + msg["Bcc"] = bcc + + msg["Subject"] = subject + + msg.set_content(content_text) + if content_html: + msg.add_alternative(content_html, subtype="html") + return msg + + +def _guess_file_type(file_name: str) -> tuple[str, str]: + """ + Guess the MIME type based on the file name extension. + """ + mimetype, _encoding = mimetypes.guess_type(file_name) + if mimetype: + maintype, subtype = mimetype.split("/", maxsplit=1) + else: + maintype, subtype = "application", "octet-stream" + return maintype, subtype + + +def add_attachments(msg: EmailMessage, attachments: list[tuple[bytes, str]]): + for file_data, file_name in attachments: + # Use the filename to guess the file type + maintype, subtype = _guess_file_type(file_name) + + # Add the attachment + msg.add_attachment( + file_data, + filename=file_name, + maintype=maintype, + subtype=subtype, + ) + + +@asynccontextmanager +async def create_email_session( + settings: SMTPSettings, +) -> AsyncIterator[SMTP]: + async with SMTP( + hostname=settings.SMTP_HOST, + port=settings.SMTP_PORT, + # FROM https://aiosmtplib.readthedocs.io/en/stable/usage.html#starttls-connections + # By default, if the server advertises STARTTLS support, aiosmtplib will upgrade the connection automatically. + # Setting use_tls=True for STARTTLS servers will typically result in a connection error + # To opt out of STARTTLS on connect, pass start_tls=False. + # NOTE: for that reason TLS and STARTLS are mutally exclusive + use_tls=settings.SMTP_PROTOCOL == EmailProtocol.TLS, + start_tls=settings.SMTP_PROTOCOL == EmailProtocol.STARTTLS, + ) as smtp: + if settings.has_credentials: + assert settings.SMTP_USERNAME # nosec + assert settings.SMTP_PASSWORD # nosec + await smtp.login( + settings.SMTP_USERNAME, + settings.SMTP_PASSWORD.get_secret_value(), + ) + + yield smtp diff --git a/packages/notifications-library/src/notifications_library/_email_render.py b/packages/notifications-library/src/notifications_library/_email_render.py new file mode 100644 index 00000000000..a462778a234 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_email_render.py @@ -0,0 +1,62 @@ +import logging +from email.headerregistry import Address +from typing import NamedTuple + +from jinja2 import Environment +from jinja2.exceptions import TemplateNotFound + +from ._models import ProductData, UserData + +_logger = logging.getLogger(__name__) + + +class EmailPartsTuple(NamedTuple): + subject: str + text_content: str + html_content: str | None + + +def get_user_address( + user: UserData, +) -> Address: + return Address( + display_name=f"{user.first_name} {user.last_name}", + addr_spec=user.email, + ) + + +def get_support_address(product: ProductData) -> Address: + return Address( + display_name=f"{product.display_name} support", + addr_spec=product.support_email, + ) + + +def render_email_parts( + env: Environment, + event_name: str, + *, + user: UserData, + product: ProductData, + **other_data, +) -> EmailPartsTuple: + + data = other_data | {"user": user, "product": product} + + # NOTE: assumes template convention! + subject = env.get_template(f"{event_name}.email.subject.txt").render(data) + + # Body + text_template = env.get_template(f"{event_name}.email.content.txt") + text_content = text_template.render(data) + + try: + html_template = env.get_template(f"{event_name}.email.content.html") + html_content = html_template.render(data) + except TemplateNotFound as err: + _logger.debug("Event %s has no html template: %s", event_name, err) + html_content = None + + return EmailPartsTuple( + subject=subject, text_content=text_content, html_content=html_content + ) diff --git a/packages/notifications-library/src/notifications_library/_models.py b/packages/notifications-library/src/notifications_library/_models.py new file mode 100644 index 00000000000..c7087cf7f7e --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_models.py @@ -0,0 +1,49 @@ +from dataclasses import dataclass + +from models_library.products import ProductName + +# +# *Data are models used for rendering +# + + +@dataclass(frozen=True) +class JinjaTemplateDbGet: + product_name: ProductName + name: str + content: str + + +@dataclass(frozen=True) +class UserData: + user_name: str + first_name: str + last_name: str + email: str + + +@dataclass(frozen=True) +class SharerData: + user_name: str + message: str + + +@dataclass(frozen=True) +class ProductUIData: + project_alias: str + logo_url: str | None = ( + None # default_logo = "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/static-webserver/client/source/resource/osparc/osparc-white.svg" in base.html + ) + strong_color: str | None = ( + None # default_strong_color = "rgb(131, 0, 191)" in base.html + ) + + +@dataclass(frozen=True) +class ProductData: + product_name: ProductName + display_name: str + vendor_display_inline: str + support_email: str + homepage_url: str | None # default_homepage = "https://osparc.io/" in base.html + ui: ProductUIData diff --git a/packages/notifications-library/src/notifications_library/_render.py b/packages/notifications-library/src/notifications_library/_render.py new file mode 100644 index 00000000000..4be7ba3225c --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_render.py @@ -0,0 +1,26 @@ +import logging +from pathlib import Path + +import notifications_library +from jinja2 import Environment, FileSystemLoader, PackageLoader, select_autoescape + +_logger = logging.getLogger(__name__) + + +def create_render_environment_from_notifications_library(**kwargs) -> Environment: + return Environment( + loader=PackageLoader(notifications_library.__name__, "templates"), + autoescape=select_autoescape(["html", "xml"]), + **kwargs + ) + + +def create_render_environment_from_folder(top_dir: Path) -> Environment: + assert top_dir.exists() # nosec + assert top_dir.is_dir() # nosec + return Environment( + loader=FileSystemLoader(top_dir), + autoescape=select_autoescape( + ["html", "xml"], + ), + ) diff --git a/packages/notifications-library/src/notifications_library/_repository.py b/packages/notifications-library/src/notifications_library/_repository.py new file mode 100644 index 00000000000..bb211fa82db --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_repository.py @@ -0,0 +1,84 @@ +from collections.abc import AsyncIterable + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.users import UserID +from simcore_postgres_database.models.jinja2_templates import jinja2_templates +from simcore_postgres_database.models.products_to_templates import products_to_templates +from simcore_postgres_database.models.users import users +from simcore_postgres_database.utils_repos import pass_or_acquire_connection +from sqlalchemy.ext.asyncio import AsyncEngine + +from ._models import ( + JinjaTemplateDbGet, + UserData, +) + + +class _BaseRepo: + def __init__(self, db_engine: AsyncEngine): + assert db_engine is not None # nosec + self.db_engine = db_engine + + +class UsersRepo(_BaseRepo): + async def get_user_data(self, user_id: UserID) -> UserData: + query = sa.select( + # NOTE: careful! privacy applies here! + users.c.name, + users.c.first_name, + users.c.last_name, + users.c.email, + ).where(users.c.id == user_id) + async with pass_or_acquire_connection(self.db_engine) as conn: + result = await conn.execute(query) + row = result.one_or_none() + + if row is None: + msg = f"User not found {user_id=}" + raise ValueError(msg) + + return UserData( + user_name=row.name, + first_name=row.first_name, + last_name=row.last_name, + email=row.email, + ) + + +class TemplatesRepo(_BaseRepo): + async def iter_email_templates( + self, product_name: ProductName + ) -> AsyncIterable[JinjaTemplateDbGet]: + async with pass_or_acquire_connection(self.db_engine) as conn: + async for row in await conn.stream( + sa.select( + jinja2_templates.c.name, + jinja2_templates.c.content, + ) + .select_from(products_to_templates.join(jinja2_templates)) + .where( + (products_to_templates.c.product_name == product_name) + & (jinja2_templates.c.name.ilike("%.email.%")) + ) + ): + yield JinjaTemplateDbGet( + product_name=product_name, name=row.name, content=row.content + ) + + async def iter_product_templates( + self, product_name: ProductName + ) -> AsyncIterable[JinjaTemplateDbGet]: + async with pass_or_acquire_connection(self.db_engine) as conn: + async for row in await conn.stream( + sa.select( + products_to_templates.c.product_name, + jinja2_templates.c.name, + jinja2_templates.c.content, + ) + .select_from(products_to_templates.join(jinja2_templates)) + .where(products_to_templates.c.product_name == product_name) + ): + yield JinjaTemplateDbGet( + product_name=row.product_name, name=row.name, content=row.template + ) diff --git a/packages/notifications-library/src/notifications_library/_templates.py b/packages/notifications-library/src/notifications_library/_templates.py new file mode 100644 index 00000000000..5857bd01589 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/_templates.py @@ -0,0 +1,119 @@ +import importlib.resources +import logging +import os +import shutil +from pathlib import Path +from typing import NamedTuple + +import aiofiles +import notifications_library +from aiofiles.os import wrap as sync_to_async +from models_library.products import ProductName + +from ._repository import TemplatesRepo + +_logger = logging.getLogger(__name__) + + +_templates = importlib.resources.files(notifications_library.__name__).joinpath( + "templates" +) +_templates_dir = Path(os.fspath(_templates)) # type:ignore + +# Templates are organised as: +# +# - named-templates: have a hierarchical names used to identify the event, provider (e.g. email, sms), +# part of the message (e.g. subject, content) and format (e.g. html or txt). (see test__templates.py) +# - generic: are used in other templates (can be seen as "templates of templates") +# +# e.g. base.html is a generic template vs on_payed.email.content.html that is a named template + + +class NamedTemplateTuple(NamedTuple): + # Named templates are named as "{event_name}.{provider}.{part}.{format}" + event: str + media: str + part: str + ext: str + + +_TEMPLATE_NAME_SEPARATOR = "." + + +def split_template_name(template_name: str) -> NamedTemplateTuple: + return NamedTemplateTuple(*template_name.split(_TEMPLATE_NAME_SEPARATOR)) + + +def get_default_named_templates( + event: str = "*", media: str = "*", part: str = "*", ext: str = "*" +) -> dict[str, Path]: + pattern = _TEMPLATE_NAME_SEPARATOR.join([event, media, part, ext]) + return {p.name: p for p in _templates_dir.glob(pattern)} + + +def _print_tree(top: Path, indent=0, prefix="", **print_kwargs): + prefix = indent * " " + prefix + if top.is_file(): + file_size = f"{top.stat().st_size}B" + entry = f"{top.name:<50}{file_size}" + print(prefix + entry, **print_kwargs) # noqa: T201 + elif top.is_dir(): + children = sorted(top.iterdir()) + entry = f"{top.name} {len(children)}" + print(prefix + entry, **print_kwargs) # noqa: T201 + for child in children[:-1]: + _print_tree(child, indent + 1, "β”œβ”€β”€ ", **print_kwargs) + if children: + _print_tree(children[-1], indent + 1, "└── ", **print_kwargs) + + +_aioshutil_copy = sync_to_async(shutil.copy) + + +async def _copy_files(src: Path, dst: Path): + for p in src.iterdir(): + if p.is_file(): + await _aioshutil_copy(p, dst / p.name, follow_symlinks=False) + + +async def consolidate_templates( + new_dir: Path, product_names: list[ProductName], repo: TemplatesRepo +): + """Consolidates all templates in new_dir folder for each product + + Builds a structure under new_dir and dump all templates (T) for each product (P) with the following + precedence rules: + 1. T found in *database* associated to P in products_to_templates.join(jinja2_templates), otherwise + 2. found in notifications_library/templates/P/T *file*, otherwise + 3. found in notifications_library/T *file* + + After consolidation, the tree dir would have the follow structure + new_dir: + product_1: + template1 + ... + product_2: + template1 + ... + + """ + assert new_dir.is_dir() # nosec + + for product_name in product_names: + product_folder = new_dir / product_name + product_folder.mkdir(parents=True, exist_ok=True) + + # takes common as defaults + await _copy_files(_templates_dir, product_folder) + + # overrides with customs in-place + if (_templates_dir / product_name).exists(): + await _copy_files(_templates_dir / product_name, product_folder) + + # overrides with customs in database + async for custom_template in repo.iter_product_templates(product_name): + assert custom_template.product_name == product_name # nosec + + template_path = product_folder / custom_template.name + async with aiofiles.open(template_path, "wt") as fh: + await fh.write(custom_template.content) diff --git a/packages/notifications-library/src/notifications_library/errors.py b/packages/notifications-library/src/notifications_library/errors.py new file mode 100644 index 00000000000..9c250909f21 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/errors.py @@ -0,0 +1,9 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class NotifierError(OsparcErrorMixin, Exception): + pass + + +class TemplatesNotFoundError(NotifierError): + msg_template = "Could not find {templates}" diff --git a/packages/notifications-library/src/notifications_library/payments.py b/packages/notifications-library/src/notifications_library/payments.py new file mode 100644 index 00000000000..13192889a63 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/payments.py @@ -0,0 +1,47 @@ +""" Groups notifications on payments events + +""" + +import logging +from dataclasses import dataclass + +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodTransaction, + PaymentTransaction, +) +from models_library.users import UserID + +from ._templates import get_default_named_templates + +_logger = logging.getLogger(__name__) + + +ON_PAYED_EVENT_EMAIL_TEMPLATES = { + "base.html", +} | set(get_default_named_templates(event="on_payed", media="email")) + + +@dataclass +class PaymentData: + price_dollars: str + osparc_credits: str + invoice_url: str + invoice_pdf_url: str + + +async def notify_payment_completed( + user_id: UserID, + payment: PaymentTransaction, +): + assert user_id # nosec + assert payment # nosec + raise NotImplementedError + + +async def notify_payment_method_acked( + user_id: UserID, + payment_method: PaymentMethodTransaction, +): + assert user_id # nosec + assert payment_method # nosec + raise NotImplementedError diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/schemas/__init__.py b/packages/notifications-library/src/notifications_library/py.typed similarity index 100% rename from services/datcore-adapter/src/simcore_service_datcore_adapter/models/schemas/__init__.py rename to packages/notifications-library/src/notifications_library/py.typed diff --git a/packages/notifications-library/src/notifications_library/templates/base.html b/packages/notifications-library/src/notifications_library/templates/base.html new file mode 100644 index 00000000000..7bb67679bfb --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/base.html @@ -0,0 +1,110 @@ +{% set default_strong_color = "rgb(131, 0, 191)" %} +{% set default_logo = "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/static-webserver/client/source/resource/osparc/osparc-black.svg" %} +{% set default_homepage = "https://osparc.io/" %} + + + + + + +{% block title %}{% endblock %} + + + + + + diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.html new file mode 100644 index 00000000000..3a7c0f8a48c --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.html @@ -0,0 +1,23 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_account_approved.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+ +

+ Thank you for your interest in {{ product.display_name }}. We are pleased to provide you with a one-time invitation link to register on the platform. +

+ +

+ Click here to access the registration page. +

+ +

+ Please follow the on-screen information and proceed with your registration. If any problem should occur, please feel free to contact us at {{ product.support_email }}. +

+ +

Enjoy {{ product.display_name }}!

+ +

Best regards,

+ +

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.txt new file mode 100644 index 00000000000..13fa87c5915 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.content.txt @@ -0,0 +1,13 @@ +Dear {{ user.first_name or user.user_name }}, + +Thank you for your interest in {{ product.display_name }} . We are pleased to provide you with a one-time invitation link to register on the platform. + +Click {{ link }} to access the registration page. + +Please follow the on-screen information and proceed with your registration. If any problem should occur, please feel free to contact us at {{ product.support_email }}. + +Enjoy {{ product.display_name }} ! + +Best regards, + +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.subject.txt new file mode 100644 index 00000000000..2e1c9474f9b --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_approved.email.subject.txt @@ -0,0 +1 @@ +Your Registration Request for {{ product.display_name }} Has Been Accepted diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.html new file mode 100644 index 00000000000..abe3482435d --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.html @@ -0,0 +1,17 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_account_rejected.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+ +

+ Thank you for your interest in {{ product.display_name }}. Unfortunately, your registration is not eligible for participation in this program. +

+ +

+ In case of further interest in this or other solutions we might offer, please contact us at {{ product.support_email }}. +

+ +

Best regards,

+ +

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.txt new file mode 100644 index 00000000000..74094386114 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.content.txt @@ -0,0 +1,8 @@ +Dear {{ user.first_name or user.user_name }}, + +Thank you for your interest in {{ product.display_name }}. Unfortunately, your registration is not eligible for participation in this program. + +In case of further interest in this or other solutions we might offer, please check contact us at {{ product.support_email }}. + +Best regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.subject.txt new file mode 100644 index 00000000000..6ff381db88f --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_rejected.email.subject.txt @@ -0,0 +1 @@ +Your Registration Request for {{ product.display_name }} Has Been Denied diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.html new file mode 100644 index 00000000000..ae26bed2bf4 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.html @@ -0,0 +1,32 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_account_requested.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear Support team + +

+ We have received the following request form for an account in {{ product.display_name }} from {{ host }} +

+ +
+{{ dumps(request_form) }}
+
+ +

+ Some details about the requested product follow: +

+ +
+{{ dumps(product_info) }}
+
+ +

+ To validate the request form, here you can get further info on this request: +

+ +
+{{ dumps(ipinfo) }}
+
+ +

TIP: Use https://ipinfo.io/ to find out more information on the ip

+ +{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt new file mode 100644 index 00000000000..0eb9d7d4a64 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.content.txt @@ -0,0 +1,15 @@ +Dear Support team, + +We have received the following request form for an account in {{ product.display_name }} from **{{ host }}**: + +{{ dumps(request_form) }} + +Some details about the requested product follow: + +{{ dumps(product_info) }} + +To validate the request form, here you can get further info on this request: + +{{ dumps(ipinfo) }} + +TIP: Use https://ipinfo.io/ to find out more information on the IP. diff --git a/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.subject.txt new file mode 100644 index 00000000000..8691a173291 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_account_requested.email.subject.txt @@ -0,0 +1 @@ +Request for an Account in {{ product.display_name }}. diff --git a/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.html new file mode 100644 index 00000000000..bc328c4cc01 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.html @@ -0,0 +1,21 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_change_email.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+

+ You have requested to change the email address linked to your {{ product.display_name }} account. + Please follow the on-screen information in the link below: +

+ +

+ + +

+ +

Please don't hesitate to contact us at {{ product.support_email }} if you need further help. +

+

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.txt new file mode 100644 index 00000000000..8f837cee41d --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.content.txt @@ -0,0 +1,10 @@ +Dear {{ user.first_name or user.user_name }}, + +You have requested to change the email address linked to your {{ product.display_name }} account. Please follow the on-screen information in the link below: + +{{ link }} + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_change_email.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.subject.txt new file mode 100644 index 00000000000..8dde292c232 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_change_email.email.subject.txt @@ -0,0 +1 @@ +Change of Your Sign-In Email Address on {{ host }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.html new file mode 100644 index 00000000000..7b88926ef60 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.html @@ -0,0 +1,14 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_new_code.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+

Please find below the Two-factor Authentication sign-in code for your {{ host }} account.

+ +

+ Your code is: {{ code }} +

+ +

Please don't hesitate to contact us at {{ product.support_email }} if you need further help.

+

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.txt new file mode 100644 index 00000000000..07c54d9f01f --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.content.txt @@ -0,0 +1,10 @@ +Dear {{ user.first_name or user.user_name }}, + +Please find below the Two-factor Authentication sign-in code for your {{ host }} account. + +Your code is: {{ code }} + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_code.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.subject.txt new file mode 100644 index 00000000000..11d7bc18b61 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_code.email.subject.txt @@ -0,0 +1 @@ +{{ code }} is Your 2FA Sign-In Code for {{ host }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.html new file mode 100644 index 00000000000..1b5b5998a9a --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.html @@ -0,0 +1,17 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_new_invitation.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+

Thank you for your interest in {{ product.display_name }}. We are pleased to provide you with a one-time invitation link to register on the platform.

+

+ + +

+

Please follow the on-screen information and proceed with your registration.

+

If any problem should occur, please feel free to contact us at {{ product.support_email }}.

+

Enjoy {{ product.display_name }}!

+

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.txt new file mode 100644 index 00000000000..3cd0ded7186 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.content.txt @@ -0,0 +1,14 @@ +Dear {{ user.first_name or user.user_name }}, + +Thank you for your interest in {{ product.display_name }}. We are pleased to provide you with a one-time invitation link to register on the platform. + +Click here to access the registration page: {{ link }} + +Please follow the on-screen information and proceed with your registration. + +If any problem should occur, please feel free to contact us at {{ product.support_email }}. + +Enjoy {{ product.display_name }}! + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.subject.txt new file mode 100644 index 00000000000..3bedf0fb8cc --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_new_invitation.email.subject.txt @@ -0,0 +1 @@ +Request for an Account in {{ product.display_name }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.html new file mode 100644 index 00000000000..c7ffbee377c --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.html @@ -0,0 +1,15 @@ +{% extends 'base.html' %} + +{% block title %} {% include 'on_payed.email.subject.txt' %} {% endblock %} + +{% block content %} + +

Dear {{ user.first_name or user.user_name }},

+

We are delighted to confirm the successful processing of your payment of {{ payment.price_dollars }} USD for the purchase of {{ payment.osparc_credits }} credits. + The credits have been added to your {{ product.display_name }} account, and you are all set to utilize them.

+

For more details you can view or download your receipt.

+

Please don't hesitate to contact us at {{ product.support_email }} if you need further help.

+

Best Regards,

+

The {{ product.display_name }} Team

+ +{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.txt new file mode 100644 index 00000000000..4e0609d2018 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_payed.email.content.txt @@ -0,0 +1,10 @@ +Dear {{ user.first_name or user.user_name }}, + +We are delighted to confirm the successful processing of your payment of {{ payment.price_dollars }} USD for the purchase of {{ payment.osparc_credits }} credits. The credits have been added to your {{ product.display_name }} account, and you are all set to utilize them. + +For more details you can view or download your receipt: {{ payment.invoice_url }}. + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_payed.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_payed.email.subject.txt new file mode 100644 index 00000000000..5d75a7394c9 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_payed.email.subject.txt @@ -0,0 +1 @@ +Your Payment {{ payment.price_dollars }} USD for {{ payment.osparc_credits }} Credits Was Successful diff --git a/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.html new file mode 100644 index 00000000000..9c64a687e12 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.html @@ -0,0 +1,15 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_registered.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+

Thank you for your interest in {{ product.display_name }}. You have successfully registered for {{ host }}.

+

Please activate your account via the link below:

+

+ +

+

Please don't hesitate to contact us at {{ product.support_email }} if you need further help.

+

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.txt new file mode 100644 index 00000000000..31d29aeae73 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_registered.email.content.txt @@ -0,0 +1,11 @@ +Dear {{ user.first_name or user.user_name }}, + +Thank you for your interest in {{ product.display_name }}. You have successfully registered for {{ host }}. + +Please activate your account via the link below: +{{ link }} + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_registered.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_registered.email.subject.txt new file mode 100644 index 00000000000..a3e891b7f92 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_registered.email.subject.txt @@ -0,0 +1 @@ +Welcome to {{ product.display_name }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.html new file mode 100644 index 00000000000..12d73ee939d --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.html @@ -0,0 +1,27 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_reset_password.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+ +

A request to reset your {{ host }} password has been made.

+{% if success %} + +

To complete the process, please click the link below:

+

+ +

+ +{% else %} + +

It could not be completed due to the following reason:

+

{{ reason }}

+ +{% endif %} + +

If you did not request this, please contact us immediately at {{ product.support_email }} for security reasons.

+ +

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.txt new file mode 100644 index 00000000000..f19749194e8 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.content.txt @@ -0,0 +1,14 @@ +Dear {{ user.first_name or user.user_name }}, + +A request to reset your {{ host }} password has been made. +{% if success %} +To complete the process, please click the link below: +{{ link }} +{% else %} +It could not be completed due to the following reason: +{{ reason }} +{% endif %} +If you did not request this, please contact us immediately at {{ product.support_email }} for security reasons. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.subject.txt new file mode 100644 index 00000000000..8f3c3d22811 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_reset_password.email.subject.txt @@ -0,0 +1 @@ +Reset Password on {{ host }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html new file mode 100644 index 00000000000..2bfd9404271 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.html @@ -0,0 +1,29 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_share_project.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+ +

Great news! {{ sharer.user_name }} has shared a {{ product.ui.project_alias }} with you on {{ product.display_name }}.

+ +

To view the {{ product.ui.project_alias }} and accept the sharing, follow below:

+ +

+{% if sharer.message %} +

+

{{ sharer.message }}

+ +
+{% else %} + +{% endif %} +

+ +

Please don't hesitate to contact us at {{ product.support_email }} if you need further help.

+ +

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt new file mode 100644 index 00000000000..2fae91408f5 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.content.txt @@ -0,0 +1,13 @@ +Dear {{ user.first_name or user.user_name }}, + +Great news! {{ sharer.user_name }} has shared a {{ product.ui.project_alias }} with you on {{ product.display_name }}. + +To view the {{ product.ui.project_alias }} and accept the sharing, follow below: + +{{ sharer.message }} +{{ accept_link }} + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt new file mode 100644 index 00000000000..0a7f2157a39 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_share_project.email.subject.txt @@ -0,0 +1 @@ +A {{ product.ui.project_alias }} was shared with you on {{ host }} diff --git a/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.html b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.html new file mode 100644 index 00000000000..49cee1f4906 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.html @@ -0,0 +1,26 @@ +{% extends 'base.html' %} +{% block title %} {% include 'on_unregister.email.subject.txt' %} {% endblock %} +{% block content %} +

Dear {{ user.first_name or user.user_name }},

+

+ We have received your account closure request, and we want to say thank you for being a part of our platform. While + we're sad to see you go, we respect your decision. +

+

+ Your studies and data will be securely retained for {{ retention_days }} days. + Within that period if you ever decide to return, you can reactivate your account by sending us an email to {{ + product.support_email }}. Afterwards it will be completely deleted from our system. +

+ +

+ Our support team is here to help with anything you need. Please feel free to contact us to {{ product.support_email }} + in case of questions or any required assistance. +

+ +

+ We wish you all the best in your future endeavors. +

+ +

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} diff --git a/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.txt b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.txt new file mode 100644 index 00000000000..0b45d4afc66 --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.content.txt @@ -0,0 +1,12 @@ +Dear {{ user.first_name or user.user_name }}, + +We have received your account closure request, and we want to say thank you for being a part of our platform. While we're sad to see you go, we respect your decision. + +Your studies and data will be securely retained for {{ retention_days }} days. Within that period, if you ever decide to return, you can reactivate your account by sending us an email to {{ product.support_email }}. Afterwards, it will be completely deleted from our system. + +Our support team is here to help with anything you need. Please feel free to contact us at {{ product.support_email }} in case of questions or any required assistance. + +We wish you all the best in your future endeavors. + +Best Regards, +The {{ product.display_name }} Team diff --git a/packages/notifications-library/src/notifications_library/templates/on_unregister.email.subject.txt b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.subject.txt new file mode 100644 index 00000000000..a756735bf7c --- /dev/null +++ b/packages/notifications-library/src/notifications_library/templates/on_unregister.email.subject.txt @@ -0,0 +1 @@ +Closing Your Account in {{ host }} diff --git a/packages/notifications-library/tests/conftest.py b/packages/notifications-library/tests/conftest.py new file mode 100644 index 00000000000..006b7ed1a7b --- /dev/null +++ b/packages/notifications-library/tests/conftest.py @@ -0,0 +1,109 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from pathlib import Path +from typing import Any + +import notifications_library +import pytest +from faker import Faker +from models_library.products import ProductName +from notifications_library._models import ( + ProductData, + ProductUIData, + SharerData, + UserData, +) +from notifications_library.payments import PaymentData +from pydantic import EmailStr +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_postgres_database.models.products import Vendor + +pytest_plugins = [ + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_payments_data", + "pytest_simcore.faker_products_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.postgres_service", + "pytest_simcore.repository_paths", +] + + +@pytest.fixture(scope="session") +def package_dir() -> Path: + pdir = Path(notifications_library.__file__).resolve().parent + assert pdir.exists() + return pdir + + +@pytest.fixture(scope="session") +def external_envfile_dict(external_envfile_dict: EnvVarsDict) -> EnvVarsDict: + if external_envfile_dict: + assert "PAYMENTS_GATEWAY_API_SECRET" in external_envfile_dict + assert "PAYMENTS_GATEWAY_URL" in external_envfile_dict + return external_envfile_dict + + +# +# mock data for templates +# + + +@pytest.fixture +def product_data( + product_name: ProductName, + product: dict[str, Any], +) -> ProductData: + vendor: Vendor = product["vendor"] + + vendor_ui = vendor.get("ui", {}) + + product_ui = ProductUIData( + logo_url=vendor_ui.get("logo_url"), + strong_color=vendor_ui.get("strong_color"), + project_alias=vendor_ui["project_alias"], + ) + + return ProductData( # type: ignore + product_name=product_name, + display_name=product["display_name"], + vendor_display_inline=f"{vendor.get('name','')}, {vendor.get('address','')}", + support_email=product["support_email"], + homepage_url=vendor.get("url"), + ui=product_ui, + ) + + +@pytest.fixture +def user_data( + user_name: str, user_email: EmailStr, user_first_name: str, user_last_name: str +) -> UserData: + return UserData( + user_name=user_name, + first_name=user_first_name, + last_name=user_last_name, + email=user_email, + ) + + +@pytest.fixture +def sharer_data(user_name: str, faker: Faker) -> SharerData: + return SharerData( + user_name=user_name, + message=faker.random_element(elements=(faker.sentence(), "")), + ) + + +@pytest.fixture +def payment_data(successful_transaction: dict[str, Any]) -> PaymentData: + return PaymentData( + price_dollars=successful_transaction["price_dollars"], + osparc_credits=successful_transaction["osparc_credits"], + invoice_url=successful_transaction["invoice_url"], + invoice_pdf_url=successful_transaction["invoice_pdf_url"], + ) diff --git a/packages/notifications-library/tests/email/conftest.py b/packages/notifications-library/tests/email/conftest.py new file mode 100644 index 00000000000..f0f47f0e3d2 --- /dev/null +++ b/packages/notifications-library/tests/email/conftest.py @@ -0,0 +1,32 @@ +from unittest.mock import MagicMock + +import pytest +from pydantic import EmailStr +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + env_devel_dict: EnvVarsDict, + external_envfile_dict: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **env_devel_dict, + **external_envfile_dict, + }, + ) + + +@pytest.fixture +def smtp_mock_or_none( + mocker: MockerFixture, is_external_user_email: EmailStr | None, user_email: EmailStr +) -> MagicMock | None: + if not is_external_user_email: + return mocker.patch("notifications_library._email.SMTP") + print("🚨 Emails might be sent to", f"{user_email=}") + return None diff --git a/packages/notifications-library/tests/email/test_email_events.py b/packages/notifications-library/tests/email/test_email_events.py new file mode 100644 index 00000000000..52cc84eeedd --- /dev/null +++ b/packages/notifications-library/tests/email/test_email_events.py @@ -0,0 +1,321 @@ +""" +These tests can be run against external configuration + +cd packages/notifications-library +make install-dev + +pytest \ + --external-envfile=.my-env \ + --faker-support-email=support@email.com \ + --faker-user-email=my@email.com \ + tests/email + +""" + +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +# pylint: disable=too-many-return-statements + + +import functools +import json +from dataclasses import asdict +from pathlib import Path +from typing import Any +from unittest.mock import AsyncMock, MagicMock + +import pytest +from faker import Faker +from jinja2 import StrictUndefined +from models_library.api_schemas_webserver.auth import AccountRequestInfo +from models_library.products import ProductName +from models_library.utils.fastapi_encoders import jsonable_encoder +from notifications_library._email import ( + add_attachments, + compose_email, + create_email_session, +) +from notifications_library._email_render import ( + get_support_address, + get_user_address, + render_email_parts, +) +from notifications_library._models import ProductData, SharerData, UserData +from notifications_library._render import ( + create_render_environment_from_notifications_library, +) +from notifications_library.payments import PaymentData +from pydantic import EmailStr +from pydantic.json import pydantic_encoder +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.email import SMTPSettings + + +def _safe_json_dumps(obj: Any, **kwargs): + return json.dumps(jsonable_encoder(obj), default=pydantic_encoder, **kwargs) + + +@pytest.fixture +def ipinfo(faker: Faker) -> dict[str, Any]: + return { + "x-real-ip": faker.ipv4(), + "x-forwarded-for": faker.ipv4(), + "peername": faker.ipv4(), + } + + +@pytest.fixture +def request_form(faker: Faker) -> dict[str, Any]: + return AccountRequestInfo.model_validate( + AccountRequestInfo.model_json_schema()["example"] + ).model_dump() + + +@pytest.fixture +def event_extra_data( # noqa: PLR0911 + event_name: str, + faker: Faker, + product_name: ProductName, + payment_data: PaymentData, + product: dict[str, Any], + request_form: dict[str, Any], + ipinfo: dict[str, Any], +) -> dict[str, Any]: + + code = faker.pystr_format(string_format="######", letters="") + host_url = f"https://{product_name}.io" + + match event_name: + case "on_account_requested": + return { + "host": host_url, + "name": "support-team", + "product_info": { + k: product.get(k) + for k in ( + "name", + "display_name", + "vendor", + "is_payment_enabled", + ) + } + | {"is_payment_enabled": faker.pybool()}, + "request_form": request_form, + "ipinfo": ipinfo, + "dumps": functools.partial(_safe_json_dumps, indent=1), + } + case "on_account_rejected": + return { + "host": host_url, + } + case "on_account_approved": + return { + "host": host_url, + "link": f"{host_url}?invitation={code}", + } + case "on_change_email": + return { + "host": host_url, + "link": f"{host_url}?change-email={code}", + } + case "on_new_code": + return { + "host": host_url, + "code": code, + } + case "on_new_invitation": + return { + "link": f"{host_url}?invitation={code}", + } + case "on_payed": + return { + "payment": payment_data, + } + case "on_registered": + return { + "host": host_url, + "link": f"{host_url}?registration={code}", + } + case "on_reset_password": + return { + "host": host_url, + "success": faker.pybool(), + "reason": faker.sentence(), + "link": f"{host_url}?reset-password={code}", + } + case "on_share_project": + return { + "host": host_url, + "resource_alias": "Project", + "sharer": SharerData( + user_name=faker.name(), + message=faker.random_element(elements=(faker.sentence(), "")), + ), + "accept_link": f"{host_url}?code={code}", + } + case "on_unregister": + return { + "host": host_url, + "retention_days": 30, + } + + case _: + return {} + + +@pytest.fixture +def event_attachments(event_name: str, faker: Faker) -> list[tuple[bytes, str]]: + attachments = [] + match event_name: + case "on_payed": + # Create a fake PDF-like byte content and its filename + file_name = "test-payed-invoice.pdf" + # Simulate generating PDF data. + fake_pdf_content = faker.text().encode("utf-8") + attachments.append((fake_pdf_content, file_name)) + + return attachments + + +@pytest.mark.parametrize( + "event_name", + [ + "on_account_approved", + "on_account_requested", + "on_account_rejected", + "on_change_email", + "on_new_code", + "on_new_invitation", + "on_payed", + "on_registered", + "on_reset_password", + "on_share_project", + "on_unregister", + ], +) +async def test_email_event( + app_environment: EnvVarsDict, + smtp_mock_or_none: MagicMock | None, + user_data: UserData, + user_email: EmailStr, + sharer_data: SharerData | None, + product_data: ProductData, + product_name: ProductName, + event_name: str, + event_extra_data: dict[str, Any], + event_attachments: list[tuple[bytes, str]], + tmp_path: Path, +): + assert user_data.email == user_email + assert product_data.product_name == product_name + + event_extra_data = event_extra_data | (asdict(sharer_data) if sharer_data else {}) + + parts = render_email_parts( + env=create_render_environment_from_notifications_library( + undefined=StrictUndefined + ), + event_name=event_name, + user=user_data, + product=product_data, + # extras + **event_extra_data, + ) + + from_ = get_support_address(product_data) + to = get_user_address(user_data) + + assert from_.addr_spec == product_data.support_email + assert to.addr_spec == user_email + + msg = compose_email( + from_, + to, + subject=parts.subject, + content_text=parts.text_content, + content_html=parts.html_content, + ) + if event_attachments: + add_attachments(msg, event_attachments) + + # keep copy for comparison + dump_path = tmp_path / event_name + if parts.html_content: + p = dump_path.with_suffix(".html") + p.write_text(parts.html_content) + if parts.text_content: + p = dump_path.with_suffix(".txt") + p.write_text(parts.text_content) + + async with create_email_session(settings=SMTPSettings.create_from_envs()) as smtp: + await smtp.send_message(msg) + + # check email was sent + if smtp_mock_or_none: + assert smtp_mock_or_none.called + assert isinstance(smtp, AsyncMock) + assert smtp.login.called + assert smtp.send_message.called + + +@pytest.mark.parametrize( + "event_name", + [ + "on_account_requested", + ], +) +async def test_email_with_reply_to( + app_environment: EnvVarsDict, + smtp_mock_or_none: MagicMock | None, + user_data: UserData, + user_email: EmailStr, + support_email: EmailStr, + product_data: ProductData, + event_name: str, + event_extra_data: dict[str, Any], +): + if smtp_mock_or_none is None: + pytest.skip( + reason="Skipping to avoid spamming issue-tracker system." + "Remove this only for manual exploratory testing." + ) + + parts = render_email_parts( + env=create_render_environment_from_notifications_library( + undefined=StrictUndefined + ), + event_name=event_name, + user=user_data, + product=product_data, + # extras + **event_extra_data, + ) + + from_ = get_support_address(product_data) + to = get_support_address(product_data) + reply_to = get_user_address(user_data) + + assert user_email == reply_to.addr_spec + assert from_.addr_spec == to.addr_spec + assert to.addr_spec == support_email + + msg = compose_email( + from_, + to, + subject=parts.subject, + content_text=parts.text_content, + content_html=parts.html_content, + reply_to=reply_to, + ) + + async with create_email_session(settings=SMTPSettings.create_from_envs()) as smtp: + await smtp.send_message(msg) + + # check email was sent + if smtp_mock_or_none: + assert smtp_mock_or_none.called + assert isinstance(smtp, AsyncMock) + assert smtp.login.called + assert smtp.send_message.called diff --git a/packages/notifications-library/tests/test__render.py b/packages/notifications-library/tests/test__render.py new file mode 100644 index 00000000000..cef9dd277d4 --- /dev/null +++ b/packages/notifications-library/tests/test__render.py @@ -0,0 +1,34 @@ +import shutil +from pathlib import Path + +from models_library.products import ProductName +from notifications_library._models import ProductData +from notifications_library._render import ( + create_render_environment_from_folder, + create_render_environment_from_notifications_library, +) +from notifications_library._templates import _print_tree, _templates_dir + + +def test_render_env_from_folder( + tmp_path: Path, product_name: ProductName, product_data: ProductData +): + + pkg_env = create_render_environment_from_notifications_library() + + top_dir = tmp_path / "consolidated" + top_dir.mkdir() + + product_name_dir = top_dir / product_name + shutil.copytree(_templates_dir, product_name_dir) + shutil.copytree(_templates_dir, top_dir / "osparc") + + _print_tree(top_dir) + + consolidated_env = create_render_environment_from_folder(top_dir) + + product_template = consolidated_env.get_template(f"{product_name}/base.html") + common_template = pkg_env.get_template("base.html") + + data = {"product": product_data} + assert product_template.render(data) == common_template.render(data) diff --git a/packages/notifications-library/tests/test__templates.py b/packages/notifications-library/tests/test__templates.py new file mode 100644 index 00000000000..58bce016289 --- /dev/null +++ b/packages/notifications-library/tests/test__templates.py @@ -0,0 +1,55 @@ +from pathlib import Path + +import pytest +from notifications_library._templates import ( + _templates_dir, + get_default_named_templates, + split_template_name, +) + + +@pytest.mark.parametrize( + "event_name", + [ + "on_account_requested", + "on_change_email", + "on_new_code", + "on_new_invitation", + "on_payed", + "on_registered", + "on_reset_password", + "on_share_project", + "on_unregister", + ], +) +def test_email_templates_are_complete(event_name: str): + + event_templates = set(get_default_named_templates(event=event_name, media="email")) + + assert event_templates + + with_html = { + f"{event_name}.email.{suffix}" + for suffix in ["subject.txt", "content.html", "content.txt"] + } + without_html = { + f"{event_name}.email.{suffix}" for suffix in ["subject.txt", "content.txt"] + } + + assert event_templates in (with_html, without_html) + + +@pytest.mark.parametrize( + "template_name,template_path", get_default_named_templates().items() +) +def test_named_templates(template_name: str, template_path: Path): + + parts = split_template_name(template_name) + assert get_default_named_templates(*parts) == {template_name: template_path} + + +def test_generic_templates(): + assert (_templates_dir / "base.html").exists() + + with pytest.raises(TypeError): + split_template_name("base.html") diff --git a/packages/notifications-library/tests/with_db/conftest.py b/packages/notifications-library/tests/with_db/conftest.py new file mode 100644 index 00000000000..9dda5da676d --- /dev/null +++ b/packages/notifications-library/tests/with_db/conftest.py @@ -0,0 +1,193 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator +from typing import Any + +import pytest +import sqlalchemy as sa +from models_library.basic_types import IDStr +from models_library.groups import GroupID +from models_library.products import ProductName +from models_library.users import UserID +from notifications_library._templates import get_default_named_templates +from pydantic import validate_call +from simcore_postgres_database.models.jinja2_templates import jinja2_templates +from simcore_postgres_database.models.payments_transactions import payments_transactions +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.products_to_templates import products_to_templates +from simcore_postgres_database.models.users import users +from sqlalchemy.engine.row import Row +from sqlalchemy.ext.asyncio.engine import AsyncEngine + + +async def _insert_and_get_row( + conn, table: sa.Table, values: dict[str, Any], pk_col: sa.Column, pk_value: Any +) -> Row: + result = await conn.execute(table.insert().values(**values).returning(pk_col)) + row = result.first() + assert getattr(row, pk_col.name) == pk_value + + result = await conn.execute(sa.select(table).where(pk_col == pk_value)) + return result.first() + + +async def _delete_row(conn, table, pk_col: sa.Column, pk_value: Any) -> None: + await conn.execute(table.delete().where(pk_col == pk_value)) + + +@pytest.fixture +async def user( + sqlalchemy_async_engine: AsyncEngine, + user: dict[str, Any], + user_id: UserID, +) -> AsyncIterator[dict[str, Any]]: + """Overrides pytest_simcore.faker_users_data.user + and injects a user in db + """ + assert user_id == user["id"] + pk_args = users.c.id, user["id"] + + # NOTE: creation of primary group and setting `groupid`` is automatically triggered after creation of user by postgres + async with sqlalchemy_async_engine.begin() as conn: + row: Row = await _insert_and_get_row(conn, users, user, *pk_args) + + yield row._asdict() + + async with sqlalchemy_async_engine.begin() as conn: + await _delete_row(conn, users, *pk_args) + + +@pytest.fixture +def user_primary_group_id(user: dict[str, Any]) -> GroupID: + # Overrides `user_primary_group_id` since new user triggers an automatic creation of a primary group + return user["primary_gid"] + + +@pytest.fixture +async def product( + sqlalchemy_async_engine: AsyncEngine, product: dict[str, Any] +) -> AsyncIterator[dict[str, Any]]: + """Overrides pytest_simcore.faker_products_data.product + and injects product in db + + """ + # NOTE: this fixture ignores products' group-id but it is fine for this test context + assert product["group_id"] is None + + # NOTE: osparc product is already in db. This is another product + assert product["name"] != "osparc" + + pk_args = products.c.name, product["name"] + + async with sqlalchemy_async_engine.begin() as conn: + row: Row = await _insert_and_get_row(conn, products, product, *pk_args) + + yield row._asdict() + + async with sqlalchemy_async_engine.begin() as conn: + await _delete_row(conn, products, *pk_args) + + +@pytest.fixture +async def products_names( + sqlalchemy_async_engine: AsyncEngine, product: dict[str, Any] +) -> list[ProductName]: + # overrides + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute(sa.select(products.c.name)) + all_product_names = [row.name for row in result.fetchall()] + assert product["name"] in all_product_names + return all_product_names + + +@pytest.fixture +async def successful_transaction( + sqlalchemy_async_engine: AsyncEngine, successful_transaction: dict[str, Any] +) -> AsyncIterator[dict[str, Any]]: + """Overrides pytest_simcore.faker_payments_data.successful_transaction + and injects transaction in db + """ + pk_args = payments_transactions.c.payment_id, successful_transaction["payment_id"] + + async with sqlalchemy_async_engine.begin() as conn: + row: Row = await _insert_and_get_row( + conn, payments_transactions, successful_transaction, *pk_args + ) + + yield row._asdict() + + async with sqlalchemy_async_engine.begin() as conn: + await _delete_row(conn, payments_transactions, *pk_args) + + +@pytest.fixture +def email_template_mark() -> str: + return f"Added by {__name__}:email_templates fixture" + + +@pytest.fixture +async def email_templates( + sqlalchemy_async_engine: AsyncEngine, email_template_mark: str +) -> AsyncIterator[dict[str, Any]]: + all_templates = {"other.html": f"Fake template {email_template_mark}"} + + # only subjects are overriden in db + subject_templates = get_default_named_templates(media="email", part="subject") + for name, path in subject_templates.items(): + assert "subject" in name + all_templates[name] = f"{email_template_mark} {path.read_text()}" + + async with sqlalchemy_async_engine.begin() as conn: + pk_to_row = { + pk_value: await _insert_and_get_row( + conn, + jinja2_templates, + {"name": pk_value, "content": content}, + jinja2_templates.c.name, + pk_value, + ) + for pk_value, content in all_templates.items() + } + + yield pk_to_row + + async with sqlalchemy_async_engine.begin() as conn: + for pk_value in pk_to_row: + await _delete_row(conn, jinja2_templates, jinja2_templates.c.name, pk_value) + + +@pytest.fixture +def set_template_to_product( + sqlalchemy_async_engine: AsyncEngine, product: dict[str, Any] +): + # NOTE: needs all fixture products in db + @validate_call + async def _(template_name: IDStr, product_name: ProductName) -> None: + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + products_to_templates.insert().values( + product_name=product_name, template_name=template_name + ) + ) + + return _ + + +@pytest.fixture +def unset_template_to_product(sqlalchemy_async_engine: AsyncEngine): + @validate_call + async def _(template_name: IDStr, product_name: ProductName) -> None: + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + products_to_templates.delete().where( + (products_to_templates.c.product_name == product_name) + & (products_to_templates.c.template_name == template_name) + ) + ) + + return _ diff --git a/packages/notifications-library/tests/with_db/test__repository.py b/packages/notifications-library/tests/with_db/test__repository.py new file mode 100644 index 00000000000..0d009ba4d25 --- /dev/null +++ b/packages/notifications-library/tests/with_db/test__repository.py @@ -0,0 +1,53 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Callable, Coroutine +from typing import Any + +from models_library.products import ProductName +from models_library.users import UserID +from notifications_library._models import UserData +from notifications_library._repository import TemplatesRepo, UsersRepo +from sqlalchemy.ext.asyncio.engine import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_user_data_repo( + sqlalchemy_async_engine: AsyncEngine, + user: dict, + user_id: UserID, + user_data: UserData, +): + assert user["id"] == user_id + + repo = UsersRepo(sqlalchemy_async_engine) + got = await repo.get_user_data(user_id) + assert got == user_data + + +async def test_templates_repo( + sqlalchemy_async_engine: AsyncEngine, + email_templates: dict[str, Any], + email_template_mark: dict, + product_name: ProductName, + set_template_to_product: Callable[[str, ProductName], Coroutine], +): + repo = TemplatesRepo(sqlalchemy_async_engine) + + one_template_name = next(_ for _ in email_templates if "email" in _) + await set_template_to_product(one_template_name, product_name) + + async for template in repo.iter_email_templates(product_name): + assert template.name in email_templates + assert email_template_mark in template.content + assert template.name == one_template_name diff --git a/packages/notifications-library/tests/with_db/test__templates_consolidation.py b/packages/notifications-library/tests/with_db/test__templates_consolidation.py new file mode 100644 index 00000000000..ab7179eb98e --- /dev/null +++ b/packages/notifications-library/tests/with_db/test__templates_consolidation.py @@ -0,0 +1,24 @@ +from pathlib import Path + +from notifications_library._repository import TemplatesRepo +from notifications_library._templates import _print_tree, consolidate_templates +from sqlalchemy.ext.asyncio.engine import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_templates_consolidation( + tmp_path: Path, sqlalchemy_async_engine: AsyncEngine, products_names: list[str] +): + new_templates_dir = tmp_path / "all-templates" + new_templates_dir.mkdir() + + repo = TemplatesRepo(sqlalchemy_async_engine) + await consolidate_templates(new_templates_dir, products_names, repo) + + _print_tree(new_templates_dir) diff --git a/packages/postgres-database/Makefile b/packages/postgres-database/Makefile index 896a1521a90..35f3024fa98 100644 --- a/packages/postgres-database/Makefile +++ b/packages/postgres-database/Makefile @@ -13,7 +13,7 @@ requirements: ## compiles pip requirements (.in -> .txt) .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests tests-ci @@ -41,6 +41,7 @@ tests-ci: ## runs unit tests [ci-mode] --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=simcore_postgres_database \ --durations=10 \ --log-date-format="%Y-%m-%d %H:%M:%S" \ @@ -121,14 +122,31 @@ migrate: $(DOT_ENV_FILE) ## basic migration update (use ONLY for development pur .PHONY: up-pg down-pg up-prod down-prod docker-compose-configs = $(wildcard tests/docker-compose*.yml) -up-pg up-prod: $(docker-compose-configs) ## starts pg server - docker-compose -f tests/docker-compose.yml $(if $(findstring -prod,$@),-f tests/docker-compose.prod.yml,) up -d +up-pg up-prod: $(docker-compose-configs) ## starts pg server. Optionally, you can use POSTGRES_DATA_VOLUME to pass name of the volume to mount + docker compose -f tests/docker-compose.yml $(if $(findstring -prod,$@),-f tests/docker-compose.prod.yml,) up -d down-pg down-prod: $(docker-compose-configs) ## stops pg server - docker-compose -f tests/docker-compose.yml $(if $(findstring -prod,$@),-f tests/docker-compose.prod.yml,) down + docker compose -f tests/docker-compose.yml $(if $(findstring -prod,$@),-f tests/docker-compose.prod.yml,) down -.PHONY: auto-doc -auto-doc: ## Creates entity relationship diagram (ERD) defined under ``simcore_postgres_database.models`` +.PHONY: doc-erd +doc-erd: ## Creates entity relationship diagram (ERD) defined under ``simcore_postgres_database.models`` $(MAKE) --directory=scripts/erd run + + + +.PHONY: merge-heads check-multiple-heads +check-multiple-heads: ## checks that multiple postgress heads exist + @cd src/simcore_postgres_database; \ + nheads=$$(alembic heads | wc -l); \ + echo "Number of postgress heads: $${nheads}"; \ + if [ "$${nheads}" -lt "2" ]; then \ + exit 1; \ + fi + +merge-heads: check-multiple-heads ## merges multiple postgress heads + @cd src/simcore_postgres_database; \ + merge_msg="merge "$$(alembic heads | sed 's/(head)//g' | tr '\n' ' '); \ + echo "$${merge_msg}"; \ + alembic merge heads -m "$${merge_msg}" diff --git a/packages/postgres-database/VERSION b/packages/postgres-database/VERSION index 8f0916f768f..d9df1bbc0c7 100644 --- a/packages/postgres-database/VERSION +++ b/packages/postgres-database/VERSION @@ -1 +1 @@ -0.5.0 +0.11.0 diff --git a/packages/postgres-database/doc/database-models.md b/packages/postgres-database/doc/database-models.md index 48b61ea4dbd..e7bbe4eaf48 100644 --- a/packages/postgres-database/doc/database-models.md +++ b/packages/postgres-database/doc/database-models.md @@ -25,6 +25,6 @@ cd packages/postgres-database make install-dev cd scripts -pip install eralchemy +uv pip install eralchemy python create_erd.py ``` diff --git a/packages/postgres-database/docker/Dockerfile b/packages/postgres-database/docker/Dockerfile index a7c7c0bffab..004cfed5185 100644 --- a/packages/postgres-database/docker/Dockerfile +++ b/packages/postgres-database/docker/Dockerfile @@ -1,4 +1,9 @@ -FROM python:3.6-slim as base +# syntax=docker/dockerfile:1 +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:${PYTHON_VERSION}-slim-bookworm AS base LABEL maintainer=sanderegg @@ -12,21 +17,23 @@ ENV PYTHONDONTWRITEBYTECODE=1 \ ENV PATH="${VIRTUAL_ENV}/bin:$PATH" -FROM base as build +FROM base AS build RUN apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - git \ + build-essential \ + git \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools @@ -34,14 +41,17 @@ ARG GIT_BRANCH ARG GIT_REPOSITORY RUN git clone --single-branch --branch ${GIT_BRANCH} ${GIT_REPOSITORY} osparc-simcore\ - && pip install osparc-simcore/packages/postgres-database[migration] + && uv pip install osparc-simcore/packages/postgres-database[migration] -FROM base as production +FROM base AS production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu - +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # bring installed package without build tools COPY --from=build ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY entrypoint.bash /home/entrypoint.bash diff --git a/packages/postgres-database/docker/Makefile b/packages/postgres-database/docker/Makefile index 8ab8a17c010..d6864030bae 100644 --- a/packages/postgres-database/docker/Makefile +++ b/packages/postgres-database/docker/Makefile @@ -23,7 +23,8 @@ SIMCORE_NETWORK ?= $(SWARM_STACK_NAME)_default .PHONY: build build: ## build postgres migration image # use GIT_REPOSITORY='$(GIT_REPOSITORY)' GIT_BRANCH='$(GIT_BRANCH)' - @docker build --file Dockerfile \ + @docker buildx build --file Dockerfile \ + --load \ --tag $(MIGRATER_TAG_NAME) \ --build-arg GIT_REPOSITORY=$(GIT_REPOSITORY) \ --build-arg GIT_BRANCH=$(GIT_BRANCH) \ diff --git a/packages/postgres-database/requirements/_base.in b/packages/postgres-database/requirements/_base.in index bfc39c603e7..c5aa128b710 100644 --- a/packages/postgres-database/requirements/_base.in +++ b/packages/postgres-database/requirements/_base.in @@ -3,8 +3,10 @@ # --constraint ../../../requirements/constraints.txt --constraint ./constraints.txt - +--requirement ../../../packages/common-library/requirements/_base.in alembic -sqlalchemy[postgresql_psycopg2binary] +opentelemetry-instrumentation-asyncpg +pydantic +sqlalchemy[postgresql_psycopg2binary,postgresql_asyncpg] # SEE extras in https://github.com/sqlalchemy/sqlalchemy/blob/main/setup.cfg#L43 yarl diff --git a/packages/postgres-database/requirements/_base.txt b/packages/postgres-database/requirements/_base.txt index fbb40b1aecd..b16bdd318cf 100644 --- a/packages/postgres-database/requirements/_base.txt +++ b/packages/postgres-database/requirements/_base.txt @@ -1,28 +1,80 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -alembic==1.9.4 +alembic==1.14.1 # via -r requirements/_base.in -greenlet==2.0.2 +annotated-types==0.7.0 + # via pydantic +asyncpg==0.30.0 # via sqlalchemy -idna==3.4 +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +greenlet==3.1.1 + # via sqlalchemy +idna==3.10 # via yarl -mako==1.2.4 +importlib-metadata==8.5.0 + # via opentelemetry-api +mako==1.3.9 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.2 +markupsafe==3.0.2 # via mako -multidict==6.0.4 +multidict==6.1.0 + # via yarl +opentelemetry-api==1.30.0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asyncpg + # opentelemetry-semantic-conventions +opentelemetry-instrumentation==0.51b0 + # via opentelemetry-instrumentation-asyncpg +opentelemetry-instrumentation-asyncpg==0.51b0 + # via -r requirements/_base.in +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asyncpg +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via opentelemetry-instrumentation +propcache==0.3.0 # via yarl -psycopg2-binary==2.9.5 +psycopg2-binary==2.9.10 # via sqlalchemy -sqlalchemy==1.4.46 +pydantic==2.10.6 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/_base.in + # pydantic-extra-types +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via -r requirements/../../../packages/common-library/requirements/_base.in +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/_base.in # alembic -yarl==1.8.2 +typing-extensions==4.12.2 + # via + # alembic + # pydantic + # pydantic-core + # pydantic-extra-types +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation +yarl==1.18.3 # via -r requirements/_base.in +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/postgres-database/requirements/_migration.in b/packages/postgres-database/requirements/_migration.in index 87474a49666..cde03004450 100644 --- a/packages/postgres-database/requirements/_migration.in +++ b/packages/postgres-database/requirements/_migration.in @@ -6,11 +6,6 @@ --constraint ../../../requirements/constraints.txt --constraint _base.txt -# There are incompatible versions in the resolved dependencies: -# websocket-client==1.1.0 (from -c requirements/_migration.txt (line 51)) -# websocket-client<1,>=0.32.0 (from docker-compose==1.29.1->-c requirements/../../../requirements/constraints.txt (line 25)) -websocket-client<1,>=0.32.0 - # ---------------------- alembic diff --git a/packages/postgres-database/requirements/_migration.txt b/packages/postgres-database/requirements/_migration.txt index 312748ca284..a9f890849e7 100644 --- a/packages/postgres-database/requirements/_migration.txt +++ b/packages/postgres-database/requirements/_migration.txt @@ -1,53 +1,50 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_migration.txt --resolver=backtracking --strip-extras requirements/_migration.in -# -alembic==1.9.4 - # via -r requirements/_migration.in -certifi==2022.12.7 - # via requests -charset-normalizer==3.0.1 +alembic==1.14.1 + # via + # -c requirements/_base.txt + # -r requirements/_migration.in +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 # via requests -click==8.1.3 +click==8.1.8 # via -r requirements/_migration.in -docker==6.0.1 +docker==7.1.0 # via -r requirements/_migration.in -greenlet==2.0.2 +greenlet==3.1.1 # via # -c requirements/_base.txt # sqlalchemy -idna==3.4 +idna==3.10 # via # -c requirements/_base.txt # requests -mako==1.2.4 +mako==1.3.9 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # alembic -markupsafe==2.1.2 +markupsafe==3.0.2 # via # -c requirements/_base.txt # mako -packaging==23.0 +requests==2.32.3 # via docker -requests==2.28.2 - # via docker -six==1.16.0 - # via websocket-client -sqlalchemy==1.4.46 +sqlalchemy==1.4.54 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # alembic -tenacity==8.2.2 +tenacity==9.0.0 # via -r requirements/_migration.in -urllib3==1.26.14 +typing-extensions==4.12.2 # via - # -r requirements/_migration.in - # docker - # requests -websocket-client==0.59.0 + # -c requirements/_base.txt + # alembic +urllib3==2.3.0 # via + # -c requirements/../../../requirements/constraints.txt # -r requirements/_migration.in # docker + # requests diff --git a/packages/postgres-database/requirements/_test.in b/packages/postgres-database/requirements/_test.in index d6a8959df72..d0b7af019df 100644 --- a/packages/postgres-database/requirements/_test.in +++ b/packages/postgres-database/requirements/_test.in @@ -10,13 +10,16 @@ --constraint _migration.txt aiopg[sa] +arrow coverage -coveralls faker pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-asyncio pytest-cov pytest-docker pytest-instafail pytest-runner pyyaml +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types-docker +types-psycopg2 diff --git a/packages/postgres-database/requirements/_test.txt b/packages/postgres-database/requirements/_test.txt index cb56b19ab82..da15f704e3e 100644 --- a/packages/postgres-database/requirements/_test.txt +++ b/packages/postgres-database/requirements/_test.txt @@ -1,124 +1,92 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 - # via - # -c requirements/../../../requirements/constraints.txt - # pytest-aiohttp aiopg==1.4.0 # via -r requirements/_test.in -aiosignal==1.3.1 - # via aiohttp -async-timeout==4.0.2 - # via - # aiohttp - # aiopg -attrs==22.2.0 - # via - # aiohttp - # pytest - # pytest-docker -certifi==2022.12.7 - # via - # -c requirements/_migration.txt - # requests -charset-normalizer==3.0.1 - # via - # -c requirements/_migration.txt - # aiohttp - # requests -coverage==6.5.0 +arrow==1.3.0 + # via -r requirements/_test.in +async-timeout==4.0.3 + # via aiopg +attrs==25.1.0 + # via pytest-docker +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 +faker==36.1.1 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 - # via -r requirements/_test.in -frozenlist==1.3.3 - # via - # aiohttp - # aiosignal -greenlet==2.0.2 +greenlet==3.1.1 # via + # -c requirements/_base.txt # -c requirements/_migration.txt # sqlalchemy -idna==3.4 - # via - # -c requirements/_migration.txt - # requests - # yarl iniconfig==2.0.0 # via pytest -multidict==6.0.4 +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +packaging==24.2 # via # -c requirements/_base.txt - # aiohttp - # yarl -packaging==23.0 - # via - # -c requirements/_migration.txt # pytest -pluggy==1.0.0 +pluggy==1.5.0 # via pytest -psycopg2-binary==2.9.5 +psycopg2-binary==2.9.10 # via # -c requirements/_base.txt # aiopg # sqlalchemy -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-docker # pytest-instafail -pytest-aiohttp==1.0.4 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-docker==1.0.1 +pytest-docker==3.2.0 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 - # via faker -pyyaml==6.0 - # via -r requirements/_test.in -requests==2.28.2 +python-dateutil==2.9.0.post0 + # via arrow +pyyaml==6.0.2 # via - # -c requirements/_migration.txt - # coveralls -six==1.16.0 - # via - # -c requirements/_migration.txt - # python-dateutil -sqlalchemy==1.4.46 + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +six==1.17.0 + # via python-dateutil +sqlalchemy==1.4.54 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_migration.txt + # -r requirements/_test.in # aiopg -tomli==2.0.1 - # via - # coverage - # pytest -urllib3==1.26.14 +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +types-docker==7.1.0.20241229 + # via -r requirements/_test.in +types-psycopg2==2.9.21.20250121 + # via -r requirements/_test.in +types-python-dateutil==2.9.0.20241206 + # via arrow +types-requests==2.32.0.20250301 + # via types-docker +typing-extensions==4.12.2 # via + # -c requirements/_base.txt # -c requirements/_migration.txt - # requests -yarl==1.8.2 + # mypy + # sqlalchemy2-stubs +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via - # -c requirements/_base.txt - # aiohttp + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_migration.txt + # types-docker + # types-requests diff --git a/packages/postgres-database/requirements/_tools.txt b/packages/postgres-database/requirements/_tools.txt index 47a32043c56..f896126c0b0 100644 --- a/packages/postgres-database/requirements/_tools.txt +++ b/packages/postgres-database/requirements/_tools.txt @@ -1,88 +1,84 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==6.0 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # pre-commit -tomli==2.0.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/postgres-database/requirements/ci.txt b/packages/postgres-database/requirements/ci.txt index b12bf394c2e..25346140a41 100644 --- a/packages/postgres-database/requirements/ci.txt +++ b/packages/postgres-database/requirements/ci.txt @@ -10,9 +10,11 @@ --requirement _base.txt --requirement _migration.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages ---editable ../../packages/pytest-simcore/ +simcore-common-library @ ../common-library/ +pytest-simcore @ ../pytest-simcore/ # current module -. +simcore-postgres-database @ . diff --git a/packages/postgres-database/requirements/dev.txt b/packages/postgres-database/requirements/dev.txt index 8136f1a48b5..095f8383b2a 100644 --- a/packages/postgres-database/requirements/dev.txt +++ b/packages/postgres-database/requirements/dev.txt @@ -13,7 +13,9 @@ --requirement _tools.txt # installs this repo's packages ---editable ../../packages/pytest-simcore/ +--editable ../common-library/ +--editable ../pytest-simcore/ + # current module --editable . diff --git a/packages/postgres-database/requirements/prod.txt b/packages/postgres-database/requirements/prod.txt index 20e0f06f33c..ba22361fcc3 100644 --- a/packages/postgres-database/requirements/prod.txt +++ b/packages/postgres-database/requirements/prod.txt @@ -8,4 +8,5 @@ --requirement _base.txt --requirement _migration.txt -. +simcore-common-library @ ../common-library/ +simcore-postgres-database @ . diff --git a/packages/postgres-database/scripts/erd/Dockerfile b/packages/postgres-database/scripts/erd/Dockerfile index abb61e39b9e..a80732357fc 100644 --- a/packages/postgres-database/scripts/erd/Dockerfile +++ b/packages/postgres-database/scripts/erd/Dockerfile @@ -1,5 +1,11 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:${PYTHON_VERSION}-slim-bookworm AS base RUN apt-get update \ && apt-get -y install --no-install-recommends\ @@ -14,14 +20,14 @@ RUN apt-get update \ && apt-get clean -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools # devenv -RUN pip install --no-cache-dir \ - pyparsing \ - pydot \ - sqlalchemy_schemadisplay +COPY requirements.txt requirements.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements.txt diff --git a/packages/postgres-database/scripts/erd/Makefile b/packages/postgres-database/scripts/erd/Makefile index b896d82e56d..c4adb54d2cb 100644 --- a/packages/postgres-database/scripts/erd/Makefile +++ b/packages/postgres-database/scripts/erd/Makefile @@ -3,7 +3,7 @@ # .DEFAULT_GOAL := help -PYTHON_VERSION=3.9.12 +PYTHON_VERSION=3.10.14 # locations REPODIR := $(shell git rev-parse --show-toplevel) @@ -20,7 +20,8 @@ IMAGE_NAME:=local/postgres-database-scripts-erd:${PYTHON_VERSION} # SEE https://medium.com/faun/set-current-host-user-for-docker-container-4e521cef9ffc .PHONY: build build build-nc: ## builds tooling image ${IMAGE_NAME} - docker build $(if $(findstring -nc,$@),--no-cache,) \ + docker buildx build $(if $(findstring -nc,$@),--no-cache,) \ + --load \ --build-arg PYTHON_VERSION="${PYTHON_VERSION}" \ --tag ${IMAGE_NAME} . @@ -49,7 +50,7 @@ run: build ## Runs upgrade in a container [WARNING! UNDER DEV. USE CAREFULY] --user=$(shell id -u):$(shell id -g) \ --entrypoint=/bin/bash \ ${IMAGE_NAME} \ - -c "pip install -e .; python scripts/erd/main.py" + -c "pip install -e .; python scripts/erd/cli.py" diff --git a/packages/postgres-database/scripts/erd/cli.py b/packages/postgres-database/scripts/erd/cli.py new file mode 100644 index 00000000000..0adb3bcab0a --- /dev/null +++ b/packages/postgres-database/scripts/erd/cli.py @@ -0,0 +1,85 @@ +# +# ERD (Entity Relationship Diagram) is used to visualize these relationships +# +# - Uses sqlalchemy_schemadisplay which is maintained by sqlalchemy +# - DROPPED 'eralchemy' since it fails with latest version and is not maintained anymore +# +# SEE https://github.com/sqlalchemy/sqlalchemy/wiki/SchemaDisplay +# SEE https://github.com/Alexis-benoist/eralchemy + +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import + + +import argparse +import importlib +import logging +from pathlib import Path +from typing import Any, Final + +import simcore_postgres_database.models +from simcore_postgres_database.models.base import metadata +from sqlalchemy_schemadisplay import create_schema_graph + +logging.basicConfig(level=logging.INFO) + +_models_folder: Final = Path(simcore_postgres_database.models.__file__).parent +# imports all models to fill "metadata" +for p in _models_folder.glob("*.py"): + if not p.name.startswith("__"): + importlib.import_module( + f"simcore_postgres_database.models.{p.name.removesuffix('.py')}" + ) + + +def create_erd(image_path: Path, include_table_names: list[str] | None = None): + """ + create the pydot graph object by autoloading all tables via a bound metadata object + """ + + kwargs: dict[str, Any] = { + "show_datatypes": True, # The image would get nasty big if we'd show the datatypes + "show_indexes": False, # ditto for indexes + "rankdir": "LR", # From left to right (instead of top to bottom) + "concentrate": False, # Don't try to join the relation lines together + } + + if include_table_names: + kwargs["tables"] = [metadata.tables[t] for t in include_table_names] + else: + kwargs["metadata"] = metadata + + graph = create_schema_graph(**kwargs) + + # pylint: disable=no-member + graph.write_svg(f'{image_path.with_suffix(".svg")}') + graph.write_png(f'{image_path.with_suffix(".png")}') + return image_path + + +def main(): + parser = argparse.ArgumentParser( + description="Creates file with an Entity Relationship Diagram (ERD) of simcore_postgres_database.models" + ) + parser.add_argument( + "--output", + help="Path to erd image (.svg, .png)", + default="postgres-database-erd-ignore.svg", + ) + parser.add_argument( + "--include", + nargs="*", + help="List of table names to include in the ERD e.g. '--include projects projects_nodes'", + default=None, + ) + args = parser.parse_args() + + created_path = create_erd( + image_path=Path(args.output), + include_table_names=args.include, + ) + logging.info("Created %s", f"{created_path=}") + + +if __name__ == "__main__": + main() diff --git a/packages/postgres-database/scripts/erd/main.py b/packages/postgres-database/scripts/erd/main.py deleted file mode 100644 index 199455938bf..00000000000 --- a/packages/postgres-database/scripts/erd/main.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# ERD (Entity Relationship Diagram) is used to visualize these relationships -# -# - Using sqlalchemy_schemadisplay which is maintained by sqlalchemy -# - Already tried 'eralchemy' but fails with latest version and not maintained anymore -# -# SEE https://github.com/sqlalchemy/sqlalchemy/wiki/SchemaDisplay -# SEE https://github.com/Alexis-benoist/eralchemy - -# pylint: disable=wildcard-import -# pylint: disable=unused-wildcard-import - -from pathlib import Path -from typing import Any, Optional - -from simcore_postgres_database.models import * # registers all schemas in metadata -from simcore_postgres_database.models.base import metadata -from sqlalchemy_schemadisplay import create_schema_graph - - -def create_erd(image_path: Path, tables: Optional[list[str]] = None): - """ - create the pydot graph object by autoloading all tables via a bound metadata object - """ - - kwargs: dict[str, Any] = dict( - show_datatypes=True, # The image would get nasty big if we'd show the datatypes - show_indexes=False, # ditto for indexes - rankdir="LR", # From left to right (instead of top to bottom) - concentrate=False, # Don't try to join the relation lines together - ) - - if tables: - kwargs["tables"] = [metadata.tables[t] for t in tables] - else: - kwargs["metadata"] = metadata - - graph = create_schema_graph(kwargs) - # pylint: disable=no-member - graph.write_svg(f'{image_path.with_suffix(".svg")}') - graph.write_png(f'{image_path.with_suffix(".png")}') - - -if __name__ == "__main__": - path = Path("postgres-database-erd-ignore.svg") - create_erd(path) - print("Created", path) diff --git a/packages/postgres-database/scripts/erd/requirements.txt b/packages/postgres-database/scripts/erd/requirements.txt new file mode 100644 index 00000000000..05c904f7b8b --- /dev/null +++ b/packages/postgres-database/scripts/erd/requirements.txt @@ -0,0 +1,6 @@ +setuptools>=45 +packaging>=20.9 +pyparsing +pydot +SQLAlchemy<2.0 +sqlalchemy_schemadisplay diff --git a/packages/postgres-database/setup.cfg b/packages/postgres-database/setup.cfg index 367d255c11d..90cbaba8af1 100644 --- a/packages/postgres-database/setup.cfg +++ b/packages/postgres-database/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.5.0 +current_version = 0.11.0 commit = True message = packages/postgres-database version: {current_version} β†’ {new_version} tag = False @@ -9,5 +9,13 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto -markers = +asyncio_default_fixture_loop_scope = function +addopts = -W error::sqlalchemy.exc.SAWarning +markers = + acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/packages/postgres-database/setup.py b/packages/postgres-database/setup.py index d8c50dff82f..a205de2f501 100644 --- a/packages/postgres-database/setup.py +++ b/packages/postgres-database/setup.py @@ -30,44 +30,46 @@ def read_reqs(reqs_path: Path) -> set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-postgres-database", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author="Pedro Crespo (pcrespov)", - description="Database models served by the simcore 'postgres' core service", +SETUP = { + "name": "simcore-postgres-database", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Pedro Crespo (pcrespov)", + "description": "Database models served by the simcore 'postgres' core service", + "python_requires": ">=3.10", # Get tags from https://pypi.org/classifiers/ - classifiers=[ + "classifiers": [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], - long_description=Path(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - packages=find_packages(where="src"), - package_dir={"": "src"}, - test_suite="tests", - install_requires=INSTALL_REQUIREMENTS, - tests_require=TEST_REQUIREMENTS, - extras_require={"migration": MIGRATION_REQUIREMENTS, "test": TEST_REQUIREMENTS}, - include_package_data=True, - package_data={ + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "packages": find_packages(where="src"), + "package_dir": {"": "src"}, + "test_suite": "tests", + "install_requires": INSTALL_REQUIREMENTS, + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"migration": MIGRATION_REQUIREMENTS, "test": TEST_REQUIREMENTS}, + "include_package_data": True, + "package_data": { "": [ + "py.typed", "*.ini", "migration/*.py", "migration/*.mako", "migration/versions/*.py", ] }, - entry_points={ + "entry_points": { "console_scripts": [ - "simcore-postgres-database=simcore_postgres_database.cli:main", + "simcore-service-postgres-database=simcore_postgres_database.cli:main", "sc-pg=simcore_postgres_database.cli:main", ] }, - zip_safe=False, -) + "zip_safe": False, +} if __name__ == "__main__": setup(**SETUP) diff --git a/packages/postgres-database/src/simcore_postgres_database/__init__.py b/packages/postgres-database/src/simcore_postgres_database/__init__.py index 1183dac2571..9a34ac80a31 100644 --- a/packages/postgres-database/src/simcore_postgres_database/__init__.py +++ b/packages/postgres-database/src/simcore_postgres_database/__init__.py @@ -1,12 +1,14 @@ -from typing import Tuple - -import pkg_resources +from importlib.metadata import version from . import storage_models, webserver_models from .models.base import metadata -__version__: str = pkg_resources.get_distribution("simcore-postgres-database").version +__version__: str = version("simcore-postgres-database") -__all__: tuple[str, ...] = ("metadata", "webserver_models", "storage_models") +__all__: tuple[str, ...] = ( + "metadata", + "webserver_models", + "storage_models", +) # nopycln: file diff --git a/packages/postgres-database/src/simcore_postgres_database/_protocols.py b/packages/postgres-database/src/simcore_postgres_database/_protocols.py new file mode 100644 index 00000000000..5e712fd3403 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/_protocols.py @@ -0,0 +1,49 @@ +"""Common protocols to annotate equivalent connections: + - sqlalchemy.ext.asyncio.AsyncConnection + - aiopg.sa.connection.SAConnection + +Purpose: to reduce dependency with aiopg (expected full migration to asyncpg) +""" + +from collections.abc import Awaitable +from typing import Any, Protocol, TypeAlias, TypeVar + +from sqlalchemy.sql import Executable + +# Type for query results +Result = TypeVar("Result") + +# Type alias for methods that can be either async or sync +MaybeCoro: TypeAlias = Awaitable[Result] | Result + + +class ResultProxy(Protocol): + """Protocol for query result objects from both engines + + Handles both aiopg's async methods and SQLAlchemy asyncpg's sync methods. + This is temporary until we fully migrate to asyncpg. + """ + + def fetchall(self) -> MaybeCoro[list[Any]]: ... + def fetchone(self) -> MaybeCoro[Any | None]: ... + def first(self) -> MaybeCoro[Any | None]: ... + + +class DBConnection(Protocol): + """Protocol to account for both aiopg and SQLAlchemy async connections""" + + async def scalar( + self, + statement: Executable, + parameters: dict[str, Any] | None = None, + *, + execution_options: dict[str, Any] | None = None, + ) -> Any: ... + + async def execute( + self, + statement: Executable, + parameters: dict[str, Any] | None = None, + *, + execution_options: dict[str, Any] | None = None, + ) -> ResultProxy: ... diff --git a/packages/postgres-database/src/simcore_postgres_database/aiopg_errors.py b/packages/postgres-database/src/simcore_postgres_database/aiopg_errors.py new file mode 100644 index 00000000000..730d6f630ac --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/aiopg_errors.py @@ -0,0 +1,65 @@ +"""aiopg errors + +WARNING: these errors are not raised by asyncpg. Therefore all code using new sqlalchemy.ext.asyncio + MUST use instead import sqlalchemy.exc exceptions!!!! + +StandardError +|__ Warning +|__ Error + |__ InterfaceError + |__ DatabaseError + |__ DataError + |__ OperationalError + |__ IntegrityError + |__ InternalError + |__ ProgrammingError + |__ NotSupportedError + +- aiopg reuses DBAPI exceptions + SEE https://aiopg.readthedocs.io/en/stable/core.html?highlight=Exception#exceptions + SEE http://initd.org/psycopg/docs/module.html#dbapi-exceptions + SEE https://www.postgresql.org/docs/current/errcodes-appendix.html +""" + +# NOTE: psycopg2.errors are created dynamically +# pylint: disable=no-name-in-module +from psycopg2 import ( + DatabaseError, + DataError, +) +from psycopg2 import Error as DBAPIError +from psycopg2 import ( + IntegrityError, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + ProgrammingError, +) +from psycopg2.errors import ( + CheckViolation, + ForeignKeyViolation, + InvalidTextRepresentation, + NotNullViolation, + UniqueViolation, +) + +assert issubclass(UniqueViolation, IntegrityError) # nosec + +__all__: tuple[str, ...] = ( + "CheckViolation", + "DatabaseError", + "DataError", + "DBAPIError", + "ForeignKeyViolation", + "IntegrityError", + "InterfaceError", + "InternalError", + "InvalidTextRepresentation", + "NotNullViolation", + "NotSupportedError", + "OperationalError", + "ProgrammingError", + "UniqueViolation", +) +# nopycln: file diff --git a/packages/postgres-database/src/simcore_postgres_database/cli.py b/packages/postgres-database/src/simcore_postgres_database/cli.py index 58ee27da34d..5fa3cf22025 100644 --- a/packages/postgres-database/src/simcore_postgres_database/cli.py +++ b/packages/postgres-database/src/simcore_postgres_database/cli.py @@ -1,15 +1,18 @@ """ command line interface for migration """ + # pylint: disable=wildcard-import # pylint: disable=unused-wildcard-import +# nopycln: file + import json import json.decoder import logging import os from logging.config import fileConfig -from typing import Optional +from pathlib import Path import alembic.command import click @@ -40,10 +43,17 @@ log = logging.getLogger("root") -if __name__ == "__main__": - # swallows up all log messages from tests - # only enable it during cli invocation - fileConfig(DEFAULT_INI) + +class PostgresNotFoundError(RuntimeError): + def __init__(self) -> None: + super().__init__("Postgres db was not discover") + + +class DiscoverConfigMissingError(ValueError): + def __init__(self, extra="") -> None: + super().__init__( + f"Missing discovery config file {extra}. Check for errors in discovery logs to find more details" + ) @click.group() @@ -57,7 +67,7 @@ def main(): @click.option("--host") @click.option("--port", type=int) @click.option("--database", "-d") -def discover(**cli_inputs) -> Optional[dict]: +def discover(**cli_inputs) -> dict | None: """Discovers databases and caches configs in ~/.simcore_postgres_database.json (except if --no-cache)""" # NOTE: Do not add defaults to user, password so we get a chance to ping urls # TODO: if multiple candidates online, then query user to select @@ -95,7 +105,7 @@ def _test_swarm() -> dict: for test in [_test_cached, _test_env, _test_swarm]: try: - click.echo("-> {0.__name__}: {0.__doc__}".format(test)) + click.echo(f"-> {test.__name__}: {test.__doc__}") cfg: dict = test() cfg.update(cli_cfg) # CLI always overrides @@ -104,12 +114,10 @@ def _test_swarm() -> dict: click.echo(f"ping {test.__name__}: {hide_url_pass(url)} ...") raise_if_not_responsive(url, verbose=False) - print("Saving config ") click.echo(f"Saving config at {DISCOVERED_CACHE}: {hide_dict_pass(cfg)}") - with open(DISCOVERED_CACHE, "wt") as fh: + with Path(DISCOVERED_CACHE).open("w") as fh: json.dump(cfg, fh, sort_keys=True, indent=4) - print("Saving config at ") click.secho( f"{test.__name__} succeeded: {hide_url_pass(url)} is online", blink=False, @@ -119,7 +127,7 @@ def _test_swarm() -> dict: return cfg - except Exception as err: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except # noqa: PERF203 inline_msg = str(err).replace("\n", ". ") click.echo(f"<- {test.__name__} failed : {inline_msg}") @@ -152,19 +160,20 @@ def clean(): @main.command() def upgrade_and_close(): """Used in migration service program to discover, upgrade and close""" - + assert discover.callback # nosec for attempt in Retrying(wait=wait_fixed(5), after=after_log(log, logging.ERROR)): with attempt: if not discover.callback(): - raise Exception("Postgres db was not discover") # pylint: disable=broad-exception-raised + raise PostgresNotFoundError - # FIXME: if database is not stampped!? try: + assert info.callback # nosec info.callback() + assert upgrade.callback # nosec upgrade.callback(revision="head") info.callback() except Exception: # pylint: disable=broad-except - log.exception("Unable to upgrade") + log.exception("Unable to upgrade to head. Skipping ...") click.echo("I did my job here. Bye!") @@ -192,7 +201,8 @@ def review(message): rev_id=None, ) else: - raise ValueError("Missing config") + msg = "while auto-generating new review" + raise DiscoverConfigMissingError(extra=msg) @main.command() @@ -216,7 +226,8 @@ def upgrade(revision): if config: alembic.command.upgrade(config, revision, sql=False, tag=None) else: - raise ValueError("Missing config") + msg = "while upgrading" + raise DiscoverConfigMissingError(extra=msg) @main.command() @@ -240,7 +251,8 @@ def downgrade(revision): if config: alembic.command.downgrade(config, str(revision), sql=False, tag=None) else: - raise ValueError("Missing config") + msg = "while downgrading" + raise DiscoverConfigMissingError(extra=msg) @main.command() @@ -252,4 +264,11 @@ def stamp(revision): if config: alembic.command.stamp(config, revision, sql=False, tag=None) else: - raise ValueError("Missing config") + msg = "while stamping" + raise DiscoverConfigMissingError(extra=msg) + + +if __name__ == "__main__": + # swallows up all log messages from tests + # only enable it during cli invocation + fileConfig(DEFAULT_INI) diff --git a/packages/postgres-database/src/simcore_postgres_database/constants.py b/packages/postgres-database/src/simcore_postgres_database/constants.py new file mode 100644 index 00000000000..69eb5203fa3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/constants.py @@ -0,0 +1,9 @@ +from decimal import Decimal +from typing import Final + +# NOTE: this is sync with DECIMAL_PLACES@ packages/models-library/src/models_library/basic_types.py using test_postgres_and_models_library_same_decimal_places_constant +DECIMAL_PLACES: Final = 2 + +# NOTE: Constant used in the exp argument of quantize to convert decimals as: Decimal().quantize(QUANTIZE_EXP_ARG) +# See https://docs.python.org/3/library/decimal.html#decimal.Decimal.quantize +QUANTIZE_EXP_ARG: Final = Decimal(f"1e-{DECIMAL_PLACES}") diff --git a/packages/postgres-database/src/simcore_postgres_database/errors.py b/packages/postgres-database/src/simcore_postgres_database/errors.py deleted file mode 100644 index 9a3b996c153..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/errors.py +++ /dev/null @@ -1,65 +0,0 @@ -""" aiopg errors - - StandardError - |__ Warning - |__ Error - |__ InterfaceError - |__ DatabaseError - |__ DataError - |__ OperationalError - |__ IntegrityError - |__ InternalError - |__ ProgrammingError - |__ NotSupportedError - - - aiopg reuses DBAPI exceptions - SEE https://aiopg.readthedocs.io/en/stable/core.html?highlight=Exception#exceptions - SEE http://initd.org/psycopg/docs/module.html#dbapi-exceptions -""" -# NOTE: psycopg2.errors are created dynamically -# pylint: disable=no-name-in-module -from psycopg2 import DatabaseError, DataError -from psycopg2 import Error as DBAPIError -from psycopg2 import ( - IntegrityError, - InterfaceError, - InternalError, - NotSupportedError, - OperationalError, - ProgrammingError, -) -from psycopg2.errors import ( - CheckViolation, - ForeignKeyViolation, - NotNullViolation, - UniqueViolation, -) - -assert issubclass(UniqueViolation, IntegrityError) # nosec - -# TODO: see https://stackoverflow.com/questions/58740043/how-do-i-catch-a-psycopg2-errors-uniqueviolation-error-in-a-python-flask-app -# from sqlalchemy.exc import IntegrityError -# -# from psycopg2.errors import UniqueViolation -# -# try: -# s.commit() -# except IntegrityError as e: -# assert isinstance(e.orig, UniqueViolation) - - -__all__: tuple[str, ...] = ( - "CheckViolation", - "DatabaseError", - "DataError", - "DBAPIError", - "ForeignKeyViolation", - "IntegrityError", - "InterfaceError", - "InternalError", - "NotNullViolation", - "NotSupportedError", - "OperationalError", - "ProgrammingError", -) -# nopycln: file diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/README.md b/packages/postgres-database/src/simcore_postgres_database/migration/README.md index f33850b585e..000d27e90ac 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/README.md +++ b/packages/postgres-database/src/simcore_postgres_database/migration/README.md @@ -80,7 +80,7 @@ We create a revision script for the change by using the local db as follows: ```bash pip install -r packages/postgres-database/requirements/dev.txt # install sc-pg package -docker-compose -f services/docker-compose.yml -f services/docker-compose-ops.yml up adminer # bring db and ui up +docker compose -f services/docker-compose.yml -f services/docker-compose-ops.yml up adminer # bring db and ui up docker ps # find the published port for the db sc-pg discover -u scu -p adminadmin --port=5432 # discover the db sc-pg info # what revision are we at? diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/env.py b/packages/postgres-database/src/simcore_postgres_database/migration/env.py index b4ed815e55a..febfee09bbc 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/env.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/env.py @@ -1,9 +1,8 @@ from logging.config import fileConfig from alembic import context -from sqlalchemy import engine_from_config, pool - from simcore_postgres_database.settings import target_metadatas +from sqlalchemy import engine_from_config, pool # this is the Alembic Config object, which provides # access to the values within the .ini file in use. @@ -15,6 +14,7 @@ if __name__ == "__main__": # swallows up all log messages from tests # only enable it during cli invocation + assert config.config_file_name is not None # nosec fileConfig(config.config_file_name) # add your model's MetaData object here diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/009c81406676_add_project_running_state.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/009c81406676_add_project_running_state.py index b3437caefbb..d709ba22924 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/009c81406676_add_project_running_state.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/009c81406676_add_project_running_state.py @@ -5,6 +5,7 @@ Create Date: 2020-10-12 08:38:40.807576+00:00 """ + import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql @@ -87,7 +88,7 @@ def upgrade(): FAILED: "FAILED", } [ - op.execute( + op.execute( # type: ignore[func-returns-value] # no reason to change now sa.DDL( f""" UPDATE comp_pipeline @@ -99,7 +100,7 @@ def upgrade(): for old, new in migration_map.items() ] [ - op.execute( + op.execute( # type: ignore[func-returns-value] # no reason to change now sa.DDL( f""" UPDATE comp_tasks @@ -146,7 +147,7 @@ def downgrade(): "FAILED": FAILED, } [ - op.execute( + op.execute( # type: ignore[func-returns-value] # no reason to change now sa.DDL( f""" UPDATE comp_pipeline @@ -158,7 +159,7 @@ def downgrade(): for old, new in migration_map.items() ] [ - op.execute( + op.execute( # type: ignore[func-returns-value] # no reason to change now sa.DDL( f""" UPDATE comp_tasks diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/056ed0eb1ba6_new_version_display_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/056ed0eb1ba6_new_version_display_column.py new file mode 100644 index 00000000000..eccf9fcea2f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/056ed0eb1ba6_new_version_display_column.py @@ -0,0 +1,29 @@ +"""new version_display column + +Revision ID: 056ed0eb1ba6 +Revises: d0e56c2d0a0d +Create Date: 2024-07-18 19:06:50.142232+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "056ed0eb1ba6" +down_revision = "d0e56c2d0a0d" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "services_meta_data", sa.Column("version_display", sa.String(), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("services_meta_data", "version_display") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/061607911a22_drop_projects_version_control.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/061607911a22_drop_projects_version_control.py new file mode 100644 index 00000000000..d7f6f4dc9ec --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/061607911a22_drop_projects_version_control.py @@ -0,0 +1,234 @@ +"""drop projects_version_control + +Revision ID: 061607911a22 +Revises: 3fe27ff48f73 +Create Date: 2025-02-06 19:28:49.918139+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "061607911a22" +down_revision = "3fe27ff48f73" +branch_labels = None +depends_on = None + + +def upgrade(): + op.drop_table("projects_vc_heads") + op.drop_table("projects_vc_branches") + op.drop_table("projects_vc_tags") + op.drop_table("projects_vc_commits") + op.drop_table("projects_vc_snapshots") + op.drop_table("projects_vc_repos") + + +def downgrade(): + + op.create_table( + "projects_vc_snapshots", + sa.Column("checksum", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "content", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + autoincrement=False, + nullable=False, + ), + sa.PrimaryKeyConstraint("checksum", name="projects_vc_snapshots_pkey"), + postgresql_ignore_search_path=False, + ) + + op.create_table( + "projects_vc_repos", + sa.Column( + "id", + sa.BIGINT(), + server_default=sa.text("nextval('projects_vc_repos_id_seq'::regclass)"), + autoincrement=True, + nullable=False, + ), + sa.Column("project_uuid", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("project_checksum", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_vc_repos_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="projects_vc_repos_pkey"), + sa.UniqueConstraint("project_uuid", name="projects_vc_repos_project_uuid_key"), + postgresql_ignore_search_path=False, + ) + + op.create_table( + "projects_vc_commits", + sa.Column( + "id", + sa.BIGINT(), + server_default=sa.text("nextval('projects_vc_commits_id_seq'::regclass)"), + autoincrement=True, + nullable=False, + ), + sa.Column("repo_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("parent_commit_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column( + "snapshot_checksum", sa.VARCHAR(), autoincrement=False, nullable=False + ), + sa.Column("message", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["parent_commit_id"], + ["projects_vc_commits.id"], + name="fk_projects_vc_commits_parent_commit_id", + onupdate="CASCADE", + ), + sa.ForeignKeyConstraint( + ["repo_id"], + ["projects_vc_repos.id"], + name="fk_projects_vc_commits_repo_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["snapshot_checksum"], + ["projects_vc_snapshots.checksum"], + name="fk_projects_vc_commits_snapshot_checksum", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("id", name="projects_vc_commits_pkey"), + postgresql_ignore_search_path=False, + ) + + op.create_table( + "projects_vc_branches", + sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False), + sa.Column("repo_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("head_commit_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["head_commit_id"], + ["projects_vc_commits.id"], + name="fk_projects_vc_branches_head_commit_id", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.ForeignKeyConstraint( + ["repo_id"], + ["projects_vc_repos.id"], + name="projects_vc_branches_repo_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="projects_vc_branches_pkey"), + sa.UniqueConstraint("name", "repo_id", name="repo_branch_uniqueness"), + ) + + op.create_table( + "projects_vc_tags", + sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False), + sa.Column("repo_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("commit_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("message", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("hidden", sa.BOOLEAN(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["commit_id"], + ["projects_vc_commits.id"], + name="fk_projects_vc_tags_commit_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["repo_id"], + ["projects_vc_repos.id"], + name="fk_projects_vc_tags_repo_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="projects_vc_tags_pkey"), + sa.UniqueConstraint("name", "repo_id", name="repo_tag_uniqueness"), + ) + + op.create_table( + "projects_vc_heads", + sa.Column("repo_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("head_branch_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["head_branch_id"], + ["projects_vc_branches.id"], + name="fk_projects_vc_heads_head_branch_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["repo_id"], + ["projects_vc_repos.id"], + name="projects_vc_branches_repo_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("repo_id", name="projects_vc_heads_pkey"), + sa.UniqueConstraint( + "head_branch_id", name="projects_vc_heads_head_branch_id_key" + ), + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ad000429e3d_rm_two_factor_enabled_col_in_users.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ad000429e3d_rm_two_factor_enabled_col_in_users.py new file mode 100644 index 00000000000..145793e2bf6 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ad000429e3d_rm_two_factor_enabled_col_in_users.py @@ -0,0 +1,36 @@ +"""rm two_factor_enabled col in users + +Revision ID: 0ad000429e3d +Revises: 215b2cac1dbc +Create Date: 2023-11-21 18:30:35.437738+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0ad000429e3d" +down_revision = "215b2cac1dbc" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("users", "two_factor_enabled") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "users", + sa.Column( + "two_factor_enabled", + sa.BOOLEAN(), + server_default=sa.text("true"), + autoincrement=False, + nullable=False, + ), + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0c084cb1091c_add_progress_to_comp_tasks.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0c084cb1091c_add_progress_to_comp_tasks.py new file mode 100644 index 00000000000..6c13987346a --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0c084cb1091c_add_progress_to_comp_tasks.py @@ -0,0 +1,30 @@ +"""add progress to comp_tasks + +Revision ID: 0c084cb1091c +Revises: 432aa859098b +Create Date: 2023-05-05 08:00:18.951040+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0c084cb1091c" +down_revision = "432aa859098b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_tasks", + sa.Column("progress", sa.Numeric(precision=3, scale=2), nullable=True), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_tasks", "progress") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0cdf095b10fe_adding_node_uuid_instance_cpu_memory_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0cdf095b10fe_adding_node_uuid_instance_cpu_memory_.py new file mode 100644 index 00000000000..dddf8531ac4 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0cdf095b10fe_adding_node_uuid_instance_cpu_memory_.py @@ -0,0 +1,106 @@ +"""adding node_uuid, instance, cpu/memory limits and indexes to resource tracker container table + +Revision ID: 0cdf095b10fe +Revises: 52b5c2466605 +Create Date: 2023-07-03 14:55:20.464906+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0cdf095b10fe" +down_revision = "52b5c2466605" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_container", + sa.Column("node_uuid", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_container", + sa.Column("node_label", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_container", sa.Column("instance", sa.String(), nullable=True) + ) + op.add_column( + "resource_tracker_container", + sa.Column("service_settings_limit_nano_cpus", sa.BigInteger(), nullable=True), + ) + op.add_column( + "resource_tracker_container", + sa.Column( + "service_settings_limit_memory_bytes", sa.BigInteger(), nullable=True + ), + ) + op.add_column( + "resource_tracker_container", + sa.Column("project_name", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_container", + sa.Column("user_email", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_container", + sa.Column("service_key", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_container", + sa.Column("service_version", sa.String(), nullable=False), + ) + op.create_index( + op.f("ix_resource_tracker_container_product_name"), + "resource_tracker_container", + ["product_name"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_container_prometheus_last_scraped"), + "resource_tracker_container", + ["prometheus_last_scraped"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_container_user_id"), + "resource_tracker_container", + ["user_id"], + unique=False, + ) + op.drop_column("resource_tracker_container", "image") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_container", + sa.Column("image", sa.VARCHAR(), autoincrement=False, nullable=True), + ) + op.drop_index( + op.f("ix_resource_tracker_container_user_id"), + table_name="resource_tracker_container", + ) + op.drop_index( + op.f("ix_resource_tracker_container_prometheus_last_scraped"), + table_name="resource_tracker_container", + ) + op.drop_index( + op.f("ix_resource_tracker_container_product_name"), + table_name="resource_tracker_container", + ) + op.drop_column("resource_tracker_container", "service_version") + op.drop_column("resource_tracker_container", "service_key") + op.drop_column("resource_tracker_container", "user_email") + op.drop_column("resource_tracker_container", "project_name") + op.drop_column("resource_tracker_container", "service_settings_limit_memory_bytes") + op.drop_column("resource_tracker_container", "service_settings_limit_nano_cpus") + op.drop_column("resource_tracker_container", "instance") + op.drop_column("resource_tracker_container", "node_label") + op.drop_column("resource_tracker_container", "node_uuid") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d52976dc616_add_conversations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d52976dc616_add_conversations.py new file mode 100644 index 00000000000..e4f12f72dd5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d52976dc616_add_conversations.py @@ -0,0 +1,140 @@ +"""add conversations + +Revision ID: 0d52976dc616 +Revises: 742123f0933a +Create Date: 2025-04-28 11:19:02.029533+00:00 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "0d52976dc616" +down_revision = "742123f0933a" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "conversations", + sa.Column( + "conversation_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("name", sa.String(), nullable=False), + sa.Column("project_uuid", sa.String(), nullable=True), + sa.Column("user_group_id", sa.BigInteger(), nullable=True), + sa.Column( + "type", + sa.Enum("PROJECT_STATIC", "PROJECT_ANNOTATION", name="conversationtype"), + nullable=True, + ), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_conversations_product_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_conversations_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_group_id"], + ["groups.gid"], + name="fk_conversations_user_primary_gid", + ondelete="SET NULL", + ), + sa.PrimaryKeyConstraint("conversation_id"), + ) + op.create_index( + op.f("ix_conversations_project_uuid"), + "conversations", + ["project_uuid"], + unique=False, + ) + op.create_table( + "conversation_messages", + sa.Column( + "message_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("conversation_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("user_group_id", sa.BigInteger(), nullable=True), + sa.Column("content", sa.String(), nullable=False), + sa.Column( + "type", + sa.Enum("MESSAGE", "NOTIFICATION", name="conversationmessagetype"), + nullable=True, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["conversation_id"], + ["conversations.conversation_id"], + name="fk_conversation_messages_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_group_id"], + ["groups.gid"], + name="fk_conversation_messages_user_primary_gid", + ondelete="SET NULL", + ), + sa.PrimaryKeyConstraint("message_id"), + ) + op.create_index( + op.f("ix_conversation_messages_conversation_id"), + "conversation_messages", + ["conversation_id"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_conversation_messages_conversation_id"), + table_name="conversation_messages", + ) + op.drop_table("conversation_messages") + op.drop_index(op.f("ix_conversations_project_uuid"), table_name="conversations") + op.drop_table("conversations") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d85bd35bdaa_add_optional_project_parents.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d85bd35bdaa_add_optional_project_parents.py new file mode 100644 index 00000000000..10a59cd6f27 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0d85bd35bdaa_add_optional_project_parents.py @@ -0,0 +1,70 @@ +"""add_optional_project_parents + +Revision ID: 0d85bd35bdaa +Revises: baf0ee1c37dc +Create Date: 2024-05-30 09:37:39.234834+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0d85bd35bdaa" +down_revision = "baf0ee1c37dc" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "projects_metadata", + sa.Column("parent_project_uuid", sa.String(), nullable=True), + ) + op.add_column( + "projects_metadata", sa.Column("parent_node_id", sa.String(), nullable=True) + ) + op.add_column( + "projects_metadata", + sa.Column("root_parent_project_uuid", sa.String(), nullable=True), + ) + op.add_column( + "projects_metadata", + sa.Column("root_parent_node_id", sa.String(), nullable=True), + ) + op.create_foreign_key( + "fk_projects_metadata_parent_node_id", + "projects_metadata", + "projects_nodes", + ["parent_project_uuid", "parent_node_id"], + ["project_uuid", "node_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + op.create_foreign_key( + "fk_projects_metadata_root_parent_node_id", + "projects_metadata", + "projects_nodes", + ["root_parent_project_uuid", "root_parent_node_id"], + ["project_uuid", "node_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_projects_metadata_root_parent_node_id", + "projects_metadata", + type_="foreignkey", + ) + op.drop_constraint( + "fk_projects_metadata_parent_node_id", "projects_metadata", type_="foreignkey" + ) + op.drop_column("projects_metadata", "root_parent_node_id") + op.drop_column("projects_metadata", "root_parent_project_uuid") + op.drop_column("projects_metadata", "parent_node_id") + op.drop_column("projects_metadata", "parent_project_uuid") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ed9f6eabeba_add_foreign_key_credit_transactions.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ed9f6eabeba_add_foreign_key_credit_transactions.py new file mode 100644 index 00000000000..11e1dd486e4 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/0ed9f6eabeba_add_foreign_key_credit_transactions.py @@ -0,0 +1,48 @@ +"""add foreign key credit transactions + +Revision ID: 0ed9f6eabeba +Revises: 95d0932aaa83 +Create Date: 2024-04-23 15:21:33.906701+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0ed9f6eabeba" +down_revision = "95d0932aaa83" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_foreign_key( + "resource_tracker_credit_trans_fkey", + "resource_tracker_credit_transactions", + "resource_tracker_service_runs", + ["product_name", "service_run_id"], + ["product_name", "service_run_id"], + onupdate="CASCADE", + ondelete="RESTRICT", + ) + op.create_index( + op.f("ix_resource_tracker_service_runs_started_at"), + "resource_tracker_service_runs", + ["started_at"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_resource_tracker_service_runs_started_at"), + table_name="resource_tracker_service_runs", + ) + op.drop_constraint( + "resource_tracker_credit_trans_fkey", + "resource_tracker_credit_transactions", + type_="foreignkey", + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/10729e07000d_improve_foreign_key_dependencies.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/10729e07000d_improve_foreign_key_dependencies.py new file mode 100644 index 00000000000..16bfc82acd8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/10729e07000d_improve_foreign_key_dependencies.py @@ -0,0 +1,110 @@ +"""improve foreign key dependencies + +Revision ID: 10729e07000d +Revises: 47ca7335e146 +Create Date: 2024-09-24 07:52:20.253076+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "10729e07000d" +down_revision = "47ca7335e146" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_foreign_key( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + "services_meta_data", + ["service_key", "service_version"], + ["key", "version"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.drop_index( + "ix_resource_tracker_pricing_plans_product_name", + table_name="resource_tracker_pricing_plans", + ) + op.create_foreign_key( + "fk_rut_pricing_plans_product_name", + "resource_tracker_pricing_plans", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.create_foreign_key( + "fk_resource_tracker_pricing_units_costs_pricing_plan_id", + "resource_tracker_pricing_unit_costs", + "resource_tracker_pricing_plans", + ["pricing_plan_id"], + ["pricing_plan_id"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.create_foreign_key( + "fk_resource_tracker_pricing_units_costs_pricing_unit_id", + "resource_tracker_pricing_unit_costs", + "resource_tracker_pricing_units", + ["pricing_unit_id"], + ["pricing_unit_id"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.create_foreign_key( + "fk_wallets_product_name", + "wallets", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.create_foreign_key( + "fk_workspaces_product_name", + "workspaces", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint("fk_workspaces_product_name", "workspaces", type_="foreignkey") + op.drop_constraint("fk_wallets_product_name", "wallets", type_="foreignkey") + op.drop_constraint( + "fk_resource_tracker_pricing_units_costs_pricing_unit_id", + "resource_tracker_pricing_unit_costs", + type_="foreignkey", + ) + op.drop_constraint( + "fk_resource_tracker_pricing_units_costs_pricing_plan_id", + "resource_tracker_pricing_unit_costs", + type_="foreignkey", + ) + op.drop_constraint( + "fk_rut_pricing_plans_product_name", + "resource_tracker_pricing_plans", + type_="foreignkey", + ) + op.create_index( + "ix_resource_tracker_pricing_plans_product_name", + "resource_tracker_pricing_plans", + ["product_name"], + unique=False, + ) + op.drop_constraint( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + type_="foreignkey", + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/10b293fdcd56_alters_product_login_settings.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/10b293fdcd56_alters_product_login_settings.py index 9129670a975..a429d377742 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/10b293fdcd56_alters_product_login_settings.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/10b293fdcd56_alters_product_login_settings.py @@ -5,6 +5,7 @@ Create Date: 2023-01-14 21:12:56.182870+00:00 """ + import json import sqlalchemy as sa @@ -21,15 +22,17 @@ def upgrade(): # Reassign items from two_factor_enabled -> LOGIN_2FA_REQUIRED conn = op.get_bind() - rows = conn.execute("SELECT name, login_settings FROM products").fetchall() + rows = conn.execute(sa.DDL("SELECT name, login_settings FROM products")).fetchall() for row in rows: data = row["login_settings"] or {} if "two_factor_enabled" in data: data["LOGIN_2FA_REQUIRED"] = data.pop("two_factor_enabled") data = json.dumps(data) conn.execute( - "UPDATE products SET login_settings = '{}' WHERE name = '{}'".format( # nosec - data, row["name"] + sa.DDL( + "UPDATE products SET login_settings = '{}' WHERE name = '{}'".format( # nosec + data, row["name"] + ) ) ) @@ -38,14 +41,14 @@ def upgrade(): "products", "login_settings", server_default=sa.text("'{}'::jsonb"), - existing_server_default=sa.text("'{\"two_factor_enabled\": false}'::jsonb"), + existing_server_default=sa.text("'{\"two_factor_enabled\": false}'::jsonb"), # type: ignore[arg-type] ) def downgrade(): # Reassign items from LOGIN_2FA_REQUIRED -> two_factor_enabled=false conn = op.get_bind() - rows = conn.execute("SELECT name, login_settings FROM products").fetchall() + rows = conn.execute(sa.DDL("SELECT name, login_settings FROM products")).fetchall() for row in rows: data = row["login_settings"] or {} data["two_factor_enabled"] = data.pop( @@ -53,8 +56,10 @@ def downgrade(): ) # back to default data = json.dumps(data) conn.execute( - "UPDATE products SET login_settings = '{}' WHERE name = '{}'".format( # nosec - data, row["name"] + sa.DDL( + "UPDATE products SET login_settings = '{}' WHERE name = '{}'".format( # nosec + data, row["name"] + ) ) ) @@ -63,6 +68,6 @@ def downgrade(): "products", "login_settings", existing_type=postgresql.JSONB(astext_type=sa.Text()), - existing_server_default=sa.text("'{}'::jsonb"), + existing_server_default=sa.text("'{}'::jsonb"), # type: ignore[arg-type] server_default=sa.text("'{\"two_factor_enabled\": false}'::jsonb"), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/163b11424cb1_convert_empty_str_to_null.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/163b11424cb1_convert_empty_str_to_null.py new file mode 100644 index 00000000000..781aa64ab01 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/163b11424cb1_convert_empty_str_to_null.py @@ -0,0 +1,52 @@ +"""enforce null + +Revision ID: 163b11424cb1 +Revises: a8d336ca9379 +Create Date: 2025-02-24 12:44:10.538469+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "163b11424cb1" +down_revision = "a8d336ca9379" +branch_labels = None +depends_on = None + + +def upgrade(): + + # SEE https://github.com/ITISFoundation/osparc-simcore/pull/7268 + + op.execute( + sa.DDL( + """ + UPDATE services_meta_data + SET thumbnail = NULL + WHERE thumbnail = ''; + """ + ) + ) + op.execute( + sa.DDL( + """ + UPDATE services_meta_data + SET version_display = NULL + WHERE version_display = ''; + """ + ) + ) + op.execute( + """ + UPDATE services_meta_data + SET icon = NULL + WHERE icon = ''; + """ + ) + + +def downgrade(): + """ + Nothing to be done here + """ diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/19f3d9085636_create_project_to_groups_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/19f3d9085636_create_project_to_groups_table.py new file mode 100644 index 00000000000..7465620a7f9 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/19f3d9085636_create_project_to_groups_table.py @@ -0,0 +1,138 @@ +"""create project_to_groups table + +Revision ID: 19f3d9085636 +Revises: d1fafda96f4c +Create Date: 2024-07-12 07:23:52.049378+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "19f3d9085636" +down_revision = "d1fafda96f4c" +branch_labels = None +depends_on = None + + +# ------------------------ TRIGGERS +new_project_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS project_creation on projects; +CREATE TRIGGER project_creation +AFTER INSERT ON projects + FOR EACH ROW + EXECUTE PROCEDURE set_project_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_project_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_project_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + -- Fetch the group_id based on the owner from the other table + SELECT u.primary_gid INTO group_id + FROM users u + WHERE u.id = NEW.prj_owner + LIMIT 1; + + IF group_id IS NOT NULL THEN + IF TG_OP = 'INSERT' THEN + INSERT INTO "project_to_groups" ("gid", "project_uuid", "read", "write", "delete") VALUES (group_id, NEW.uuid, TRUE, TRUE, TRUE); + END IF; + END IF; + + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "project_to_groups", + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column("gid", sa.BigInteger(), nullable=False), + sa.Column( + "read", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "write", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "delete", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_project_to_groups_gid_groups", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_project_to_groups_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("project_uuid", "gid"), + ) + op.create_index( + op.f("ix_project_to_groups_project_uuid"), + "project_to_groups", + ["project_uuid"], + unique=False, + ) + # ### end Alembic commands ### + + # Migration of access rights from projects table to new project_to_groups table + op.execute( + sa.DDL( + """ + INSERT INTO project_to_groups + select + projects.uuid as project_uuid, + CAST(js.key as bigint) as gid, + CAST(js.value ->> 'read' as bool) as read, + CAST(js.value ->> 'write' as bool) as write, + CAST(js.value ->> 'delete' as bool) as delete, + CURRENT_TIMESTAMP as created, + CURRENT_TIMESTAMP as modified + from projects, + json_each(projects.access_rights::json) AS js; + """ + ) + ) + + op.execute(assign_project_access_rights_to_owner_group_procedure) + op.execute(new_project_trigger) + + +def downgrade(): + op.execute(new_project_trigger) + op.execute(assign_project_access_rights_to_owner_group_procedure) + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_project_to_groups_project_uuid"), table_name="project_to_groups" + ) + op.drop_table("project_to_groups") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/1bc517536e0a_add_product_owners_email_col.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1bc517536e0a_add_product_owners_email_col.py new file mode 100644 index 00000000000..7d6f45919e4 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1bc517536e0a_add_product_owners_email_col.py @@ -0,0 +1,29 @@ +"""Add product owners email col + +Revision ID: 1bc517536e0a +Revises: 5f88b513cd4c +Create Date: 2025-01-29 10:05:58.254306+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "1bc517536e0a" +down_revision = "5f88b513cd4c" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "products", sa.Column("product_owners_email", sa.String(), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("products", "product_owners_email") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/1c069f85d5fd_add_index_to_checksum.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1c069f85d5fd_add_index_to_checksum.py new file mode 100644 index 00000000000..63764630a0c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1c069f85d5fd_add_index_to_checksum.py @@ -0,0 +1,33 @@ +"""add index to checksum + +Revision ID: 1c069f85d5fd +Revises: b13ca15c7ef8 +Create Date: 2024-04-26 11:46:55.745033+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "1c069f85d5fd" +down_revision = "b13ca15c7ef8" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index( + op.f("ix_file_meta_data_sha256_checksum"), + "file_meta_data", + ["sha256_checksum"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_file_meta_data_sha256_checksum"), table_name="file_meta_data" + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/1e3c9c804fec_set_privacy_hide_email_to_true.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1e3c9c804fec_set_privacy_hide_email_to_true.py new file mode 100644 index 00000000000..58e1115a1bf --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/1e3c9c804fec_set_privacy_hide_email_to_true.py @@ -0,0 +1,33 @@ +"""set privacy_hide_email to true. Reverts "set privacy_hide_email to false temporarily" (5e27063c3ac9) + +Revision ID: 1e3c9c804fec +Revises: d31c23845017 +Create Date: 2025-01-03 10:16:58.531083+00:00 + +""" +from alembic import op +from sqlalchemy.sql import expression + +# revision identifiers, used by Alembic. +revision = "1e3c9c804fec" +down_revision = "d31c23845017" +branch_labels = None +depends_on = None + + +def upgrade(): + # server_default of privacy_hide_email to true + with op.batch_alter_table("users") as batch_op: + batch_op.alter_column("privacy_hide_email", server_default=expression.true()) + + # Reset all to default: Revert existing values in the database to true + op.execute("UPDATE users SET privacy_hide_email = true") + + +def downgrade(): + # Change the server_default of privacy_hide_email to false + with op.batch_alter_table("users") as batch_op: + batch_op.alter_column("privacy_hide_email", server_default=expression.false()) + + # Reset all to default: Update existing values in the database + op.execute("UPDATE users SET privacy_hide_email = false") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/20d60d2663ad_add_db_extensions.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/20d60d2663ad_add_db_extensions.py new file mode 100644 index 00000000000..ee0f164d6f5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/20d60d2663ad_add_db_extensions.py @@ -0,0 +1,50 @@ +"""add_db_extensions + +Revision ID: 20d60d2663ad +Revises: f3a5484fe05d +Create Date: 2024-03-04 14:52:51.535716+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "20d60d2663ad" +down_revision = "f3a5484fe05d" +branch_labels = None +depends_on = None + + +def upgrade(): + # Check if the extension exists before attempting to create it + op.execute( + """ + DO + $$ + BEGIN + IF EXISTS(SELECT * FROM pg_available_extensions WHERE name = 'aws_commons') THEN + -- Create the extension + CREATE EXTENSION if not exists aws_commons; + END IF; + END + $$; + """ + ) + op.execute( + """ + DO + $$ + BEGIN + IF EXISTS(SELECT * FROM pg_available_extensions WHERE name = 'aws_s3') THEN + -- Create the extension + CREATE EXTENSION if not exists aws_s3; + END IF; + END + $$; + """ + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + pass + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/215b2cac1dbc_new_two_factor_enabled_user_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/215b2cac1dbc_new_two_factor_enabled_user_column.py new file mode 100644 index 00000000000..981432c75a1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/215b2cac1dbc_new_two_factor_enabled_user_column.py @@ -0,0 +1,35 @@ +"""new two_factor_enabled user column + +Revision ID: 215b2cac1dbc +Revises: 22404057a50c +Create Date: 2023-11-21 14:42:42.170235+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "215b2cac1dbc" +down_revision = "22404057a50c" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "users", + sa.Column( + "two_factor_enabled", + sa.Boolean(), + server_default=sa.text("true"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("users", "two_factor_enabled") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/21699ee569a7_added_folders_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/21699ee569a7_added_folders_tables.py new file mode 100644 index 00000000000..416f3fc914f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/21699ee569a7_added_folders_tables.py @@ -0,0 +1,137 @@ +"""added folders tables + +Revision ID: 21699ee569a7 +Revises: 056ed0eb1ba6 +Create Date: 2024-07-30 13:38:57.694754+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "21699ee569a7" +down_revision = "056ed0eb1ba6" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "folders", + sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("description", sa.String(), server_default="", nullable=False), + sa.Column("created_by", sa.BigInteger(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["created_by"], + ["groups.gid"], + name="fk_folders_to_groups_gid", + ondelete="SET NULL", + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "folders_access_rights", + sa.Column("folder_id", sa.BigInteger(), nullable=False), + sa.Column("gid", sa.BigInteger(), nullable=False), + sa.Column("traversal_parent_id", sa.BigInteger(), nullable=True), + sa.Column("original_parent_id", sa.BigInteger(), nullable=True), + sa.Column("read", sa.Boolean(), nullable=False), + sa.Column("write", sa.Boolean(), nullable=False), + sa.Column("delete", sa.Boolean(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["folder_id"], + ["folders.id"], + name="fk_folders_access_rights_to_folders_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_folders_access_rights_to_groups_gid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["original_parent_id"], + ["folders.id"], + name="fk_folders_to_folders_id_via_original_parent_id", + ondelete="SET NULL", + ), + sa.ForeignKeyConstraint( + ["traversal_parent_id"], + ["folders.id"], + name="fk_folders_to_folders_id_via_traversal_parent_id", + ondelete="SET NULL", + ), + sa.PrimaryKeyConstraint("folder_id", "gid", name="folders_access_rights_pk"), + ) + op.create_table( + "folders_to_projects", + sa.Column("folder_id", sa.BigInteger(), nullable=False), + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["folder_id"], + ["folders.id"], + name="fk_folders_to_projects_to_folders_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_folders_to_projects_to_projects_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "folder_id", "project_uuid", name="projects_to_folder_pk" + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("folders_to_projects") + op.drop_table("folders_access_rights") + op.drop_table("folders") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/22404057a50c_resource_tracker_service_runs_helpers_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/22404057a50c_resource_tracker_service_runs_helpers_.py new file mode 100644 index 00000000000..6319bfb80fe --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/22404057a50c_resource_tracker_service_runs_helpers_.py @@ -0,0 +1,40 @@ +"""resource_tracker_service_runs helpers for missing heartbeat + +Revision ID: 22404057a50c +Revises: d0d544695487 +Create Date: 2023-10-25 19:17:29.928871+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "22404057a50c" +down_revision = "d0d544695487" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_service_runs", + sa.Column("service_run_status_msg", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column( + "missed_heartbeat_counter", + sa.SmallInteger(), + server_default="0", + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_service_runs", "missed_heartbeat_counter") + op.drop_column("resource_tracker_service_runs", "service_run_status_msg") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/278daef7e99d_remove_whole_row_in_payload.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/278daef7e99d_remove_whole_row_in_payload.py new file mode 100644 index 00000000000..bd8f730a4b2 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/278daef7e99d_remove_whole_row_in_payload.py @@ -0,0 +1,134 @@ +"""remove whole row in payload + +Revision ID: 278daef7e99d +Revises: 4e7d8719855b +Create Date: 2025-05-22 21:22:11.084001+00:00 + +""" + +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "278daef7e99d" +down_revision = "4e7d8719855b" +branch_labels = None +depends_on = None + +DB_PROCEDURE_NAME: Final[str] = "notify_comp_tasks_changed" +DB_TRIGGER_NAME: Final[str] = f"{DB_PROCEDURE_NAME}_event" +DB_CHANNEL_NAME: Final[str] = "comp_tasks_output_events" + + +def upgrade(): + drop_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks; +""" + ) + + task_output_changed_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {DB_PROCEDURE_NAME}() RETURNS TRIGGER AS $$ + DECLARE + record RECORD; + payload JSON; + changes JSONB; + BEGIN + IF (TG_OP = 'DELETE') THEN + record = OLD; + ELSE + record = NEW; + END IF; + + SELECT jsonb_agg(pre.key ORDER BY pre.key) INTO changes + FROM jsonb_each(to_jsonb(OLD)) AS pre, jsonb_each(to_jsonb(NEW)) AS post + WHERE pre.key = post.key AND pre.value IS DISTINCT FROM post.value; + + payload = json_build_object( + 'table', TG_TABLE_NAME, + 'changes', changes, + 'action', TG_OP, + 'task_id', record.task_id, + 'project_id', record.project_id, + 'node_id', record.node_id + ); + + PERFORM pg_notify('{DB_CHANNEL_NAME}', payload::text); + + RETURN NULL; + END; +$$ LANGUAGE plpgsql; +""" + ) + + task_output_changed_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks; +CREATE TRIGGER {DB_TRIGGER_NAME} +AFTER UPDATE OF outputs,state ON comp_tasks + FOR EACH ROW + WHEN ((OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb OR OLD.state IS DISTINCT FROM NEW.state)) + EXECUTE PROCEDURE {DB_PROCEDURE_NAME}(); +""" + ) + + op.execute(drop_trigger) + op.execute(task_output_changed_procedure) + op.execute(task_output_changed_trigger) + + +def downgrade(): + drop_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks; +""" + ) + + task_output_changed_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {DB_PROCEDURE_NAME}() RETURNS TRIGGER AS $$ + DECLARE + record RECORD; + payload JSON; + changes JSONB; + BEGIN + IF (TG_OP = 'DELETE') THEN + record = OLD; + ELSE + record = NEW; + END IF; + + SELECT jsonb_agg(pre.key ORDER BY pre.key) INTO changes + FROM jsonb_each(to_jsonb(OLD)) AS pre, jsonb_each(to_jsonb(NEW)) AS post + WHERE pre.key = post.key AND pre.value IS DISTINCT FROM post.value; + + payload = json_build_object('table', TG_TABLE_NAME, + 'changes', changes, + 'action', TG_OP, + 'data', row_to_json(record)); + + PERFORM pg_notify('{DB_CHANNEL_NAME}', payload::text); + + RETURN NULL; + END; +$$ LANGUAGE plpgsql; +""" + ) + + task_output_changed_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks; +CREATE TRIGGER {DB_TRIGGER_NAME} +AFTER UPDATE OF outputs,state ON comp_tasks + FOR EACH ROW + WHEN ((OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb OR OLD.state IS DISTINCT FROM NEW.state)) + EXECUTE PROCEDURE {DB_PROCEDURE_NAME}(); +""" + ) + + op.execute(drop_trigger) + op.execute(task_output_changed_procedure) + op.execute(task_output_changed_trigger) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/2a4b4167e088_product_name_column_in_api_keys_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2a4b4167e088_product_name_column_in_api_keys_table.py new file mode 100644 index 00000000000..657459fc3a5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2a4b4167e088_product_name_column_in_api_keys_table.py @@ -0,0 +1,54 @@ +"""product_name column in api_keys table + +Revision ID: 2a4b4167e088 +Revises: be0dece4e67c +Create Date: 2023-10-26 06:53:52.079499+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "2a4b4167e088" +down_revision = "be0dece4e67c" +branch_labels = None +depends_on = None + + +def _find_default_product_name_or_none(conn): + query = sa.text("SELECT name FROM products ORDER BY priority LIMIT 1") + result = conn.execute(query) + row = result.fetchone() + return row[0] if row else None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("api_keys", sa.Column("product_name", sa.String(), nullable=True)) + op.create_foreign_key( + "fk_api_keys_product_name", + "api_keys", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + + conn = op.get_bind() + default_product = _find_default_product_name_or_none(conn) + if default_product: + op.execute(sa.DDL(f"UPDATE api_keys SET product_name = '{default_product}'")) + + # make it non nullable now + op.alter_column( + "api_keys", "product_name", existing_type=sa.String(), nullable=False + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint("fk_api_keys_product_name", "api_keys", type_="foreignkey") + op.drop_column("api_keys", "product_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/2cd329e47ea1_add_use_on_demand_clusters_in_comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2cd329e47ea1_add_use_on_demand_clusters_in_comp_runs.py new file mode 100644 index 00000000000..8dc5d5308be --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2cd329e47ea1_add_use_on_demand_clusters_in_comp_runs.py @@ -0,0 +1,58 @@ +"""add use on demand clusters in comp_runs + +Revision ID: 2cd329e47ea1 +Revises: 763666c698fb +Create Date: 2023-09-04 06:57:51.291084+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "2cd329e47ea1" +down_revision = "f53806935760" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_runs", sa.Column("use_on_demand_clusters", sa.Boolean(), nullable=True) + ) + # ### end Alembic commands ### + op.execute( + sa.DDL( + "UPDATE comp_runs SET use_on_demand_clusters = false WHERE use_on_demand_clusters IS NULL" + ) + ) + + op.alter_column( + "comp_runs", + "use_on_demand_clusters", + existing_type=sa.Boolean(), + nullable=False, + ) + + # new statetype + op.execute( + sa.DDL("ALTER TYPE statetype ADD VALUE IF NOT EXISTS 'WAITING_FOR_CLUSTER'") + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_runs", "use_on_demand_clusters") + # ### end Alembic commands ### + + # no need to downgrade the enum type, postgres does not allow to just remove a type + # instead the tables that use it are updated + op.execute( + sa.DDL( + """ +UPDATE comp_tasks SET state = 'PUBLISHED' WHERE state = 'WAITING_FOR_CLUSTER'; +UPDATE comp_pipeline SET state = 'PUBLISHED' WHERE state = 'WAITING_FOR_CLUSTER'; +UPDATE comp_runs SET result = 'PUBLISHED' WHERE result = 'WAITING_FOR_CLUSTER'; + """ + ) + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/2dda922a3261_adding_additional_changes_to_service_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2dda922a3261_adding_additional_changes_to_service_.py new file mode 100644 index 00000000000..429b2344229 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/2dda922a3261_adding_additional_changes_to_service_.py @@ -0,0 +1,66 @@ +"""adding additional changes to service_run table + +Revision ID: 2dda922a3261 +Revises: 77fce87f1474 +Create Date: 2023-08-29 12:01:59.892710+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "2dda922a3261" +down_revision = "77fce87f1474" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "resource_tracker_pricing_details", + "cost_per_unit", + existing_type=sa.NUMERIC(precision=3, scale=2), + nullable=True, + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column( + "pricing_detail_cost_per_unit", + sa.Numeric(precision=15, scale=2), + nullable=True, + ), + ) + op.create_index( + op.f("ix_resource_tracker_service_runs_user_id"), + "resource_tracker_service_runs", + ["user_id"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_service_runs_wallet_id"), + "resource_tracker_service_runs", + ["wallet_id"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_resource_tracker_service_runs_wallet_id"), + table_name="resource_tracker_service_runs", + ) + op.drop_index( + op.f("ix_resource_tracker_service_runs_user_id"), + table_name="resource_tracker_service_runs", + ) + op.drop_column("resource_tracker_service_runs", "pricing_detail_cost_per_unit") + op.alter_column( + "resource_tracker_pricing_details", + "cost_per_unit", + existing_type=sa.NUMERIC(precision=3, scale=2), + nullable=True, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/307017ee1a49_add_deprecated_submit_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/307017ee1a49_add_deprecated_submit_column.py new file mode 100644 index 00000000000..a93d032b8e8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/307017ee1a49_add_deprecated_submit_column.py @@ -0,0 +1,28 @@ +"""add deprecated submit column + +Revision ID: 307017ee1a49 +Revises: 1e3c9c804fec +Create Date: 2025-01-06 12:53:51.604189+00:00 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '307017ee1a49' +down_revision = '1e3c9c804fec' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('comp_tasks', sa.Column('submit', sa.DateTime(timezone=True), server_default=sa.text("'1900-01-01T00:00:00Z'::timestamptz"), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('comp_tasks', 'submit') + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/30e8b752e33e_phone_not_unique.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/30e8b752e33e_phone_not_unique.py new file mode 100644 index 00000000000..2ea3b3612e1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/30e8b752e33e_phone_not_unique.py @@ -0,0 +1,26 @@ +"""phone not-unique + +Revision ID: 30e8b752e33e +Revises: c1d0e98cd289 +Create Date: 2024-03-11 12:21:35.856004+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "30e8b752e33e" +down_revision = "c1d0e98cd289" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint("user_phone_unique_constraint", "users", type_="unique") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_unique_constraint("user_phone_unique_constraint", "users", ["phone"]) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/35724106de75_new_users_details_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/35724106de75_new_users_details_table.py new file mode 100644 index 00000000000..2e64c2dc072 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/35724106de75_new_users_details_table.py @@ -0,0 +1,96 @@ +"""new users_pre_registration_details table + +Revision ID: 35724106de75 +Revises: 20d60d2663ad +Create Date: 2024-03-05 13:13:37.921956+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "35724106de75" +down_revision = "20d60d2663ad" +branch_labels = None +depends_on = None + + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "users_pre_registration_details" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database + +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "users_pre_registration_details", + sa.Column("user_id", sa.Integer(), nullable=True), + sa.Column("pre_email", sa.String(), nullable=False), + sa.Column("pre_first_name", sa.String(), nullable=True), + sa.Column("pre_last_name", sa.String(), nullable=True), + sa.Column("pre_phone", sa.String(), nullable=True), + sa.Column("company_name", sa.String(), nullable=True), + sa.Column("address", sa.String(), nullable=True), + sa.Column("city", sa.String(), nullable=True), + sa.Column("state", sa.String(), nullable=True), + sa.Column("country", sa.String(), nullable=True), + sa.Column("postal_code", sa.String(), nullable=True), + sa.Column("created_by", sa.Integer(), nullable=True), + sa.Column( + "created", sa.DateTime(), server_default=sa.text("now()"), nullable=False + ), + sa.Column( + "modified", sa.DateTime(), server_default=sa.text("now()"), nullable=False + ), + sa.ForeignKeyConstraint( + ["created_by"], ["users.id"], onupdate="CASCADE", ondelete="SET NULL" + ), + sa.ForeignKeyConstraint( + ["user_id"], ["users.id"], onupdate="CASCADE", ondelete="CASCADE" + ), + sa.UniqueConstraint("pre_email"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("users_pre_registration_details") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/3810966d1534_adding_pricing_and_harware_info_to_comp_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/3810966d1534_adding_pricing_and_harware_info_to_comp_.py new file mode 100644 index 00000000000..b15524ce9de --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/3810966d1534_adding_pricing_and_harware_info_to_comp_.py @@ -0,0 +1,40 @@ +"""adding pricing and harware info to comp_tasks + +Revision ID: 3810966d1534 +Revises: 5c62b190e124 +Create Date: 2023-10-17 14:35:21.032940+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "3810966d1534" +down_revision = "5c62b190e124" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_tasks", + sa.Column( + "pricing_info", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + ) + op.add_column( + "comp_tasks", + sa.Column( + "hardware_info", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_tasks", "hardware_info") + op.drop_column("comp_tasks", "pricing_info") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/381336fa8001_add_product_name_to_licensed_item_to_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/381336fa8001_add_product_name_to_licensed_item_to_.py new file mode 100644 index 00000000000..c5a2137e0a3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/381336fa8001_add_product_name_to_licensed_item_to_.py @@ -0,0 +1,75 @@ +"""add product name to licensed item to resource table + +Revision ID: 381336fa8001 +Revises: d84edab53761 +Create Date: 2025-02-25 13:37:19.861701+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "381336fa8001" +down_revision = "d84edab53761" +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column( + "licensed_item_to_resource", + sa.Column("product_name", sa.String(), nullable=True), + ) + + ### Added Manually --> + op.execute( + """ + UPDATE licensed_item_to_resource + SET product_name = 's4l' + WHERE product_name IS NULL + """ + ) + op.alter_column("licensed_item_to_resource", "product_name", nullable=False) + ### <-- Added Manually + + op.drop_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + type_="unique", + ) + op.create_unique_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + ["product_name", "licensed_resource_id"], + ) + op.create_foreign_key( + "fk_licensed_item_to_resource_product_name", + "licensed_item_to_resource", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_licensed_item_to_resource_product_name", + "licensed_item_to_resource", + type_="foreignkey", + ) + op.drop_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + type_="unique", + ) + op.create_unique_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + ["licensed_resource_id"], + ) + op.drop_column("licensed_item_to_resource", "product_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/38c9ac332c58_new_user_privacy_columns.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/38c9ac332c58_new_user_privacy_columns.py new file mode 100644 index 00000000000..4d3e141e769 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/38c9ac332c58_new_user_privacy_columns.py @@ -0,0 +1,45 @@ +"""new user privacy columns + +Revision ID: 38c9ac332c58 +Revises: e5555076ef50 +Create Date: 2024-12-05 14:29:27.739650+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "38c9ac332c58" +down_revision = "e5555076ef50" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "users", + sa.Column( + "privacy_hide_fullname", + sa.Boolean(), + server_default=sa.text("true"), + nullable=False, + ), + ) + op.add_column( + "users", + sa.Column( + "privacy_hide_email", + sa.Boolean(), + server_default=sa.text("true"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("users", "privacy_hide_email") + op.drop_column("users", "privacy_hide_fullname") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/38fe651b4196_add_override_services_specifications.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/38fe651b4196_add_override_services_specifications.py new file mode 100644 index 00000000000..6100cefa980 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/38fe651b4196_add_override_services_specifications.py @@ -0,0 +1,54 @@ +"""add override_services_specifications + +Revision ID: 38fe651b4196 +Revises: 417f9eb848ce +Create Date: 2023-06-23 11:37:04.833354+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "38fe651b4196" +down_revision = "417f9eb848ce" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "groups_extra_properties", + sa.Column( + "override_services_specifications", + sa.Boolean(), + server_default=sa.text("false"), + nullable=True, + ), + ) + # ### end Alembic commands ### + groups_extra_properties_table = sa.table( + "groups_extra_properties", + sa.column("group_id"), + sa.column("node_id"), + sa.column("created"), + sa.column("modified"), + sa.column("override_services_specifications"), + ) + + # default to false + op.execute( + groups_extra_properties_table.update().values( + override_services_specifications=False + ) + ) + # # set to non nullable + op.alter_column( + "groups_extra_properties", "override_services_specifications", nullable=False + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("groups_extra_properties", "override_services_specifications") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/392a86f2e446_all_users_must_have_a_product.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/392a86f2e446_all_users_must_have_a_product.py new file mode 100644 index 00000000000..45bc2c3945f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/392a86f2e446_all_users_must_have_a_product.py @@ -0,0 +1,50 @@ +"""all users must have a product + +Revision ID: 392a86f2e446 +Revises: 0ad000429e3d +Create Date: 2024-01-09 15:14:11.504329+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "392a86f2e446" +down_revision = "0ad000429e3d" +branch_labels = None +depends_on = None + + +def upgrade(): + # If a user has NO associated product groups, assign them a default product + migration_query = sa.text( + """ + INSERT INTO user_to_groups (uid, gid) + SELECT u.id, ( + SELECT p.group_id + FROM products p + WHERE p.group_id IS NOT NULL + ORDER BY p.priority ASC + LIMIT 1 + ) AS default_group_id + FROM users u + WHERE u.id NOT IN ( + SELECT utg.uid + FROM user_to_groups utg + WHERE utg.gid IN ( + SELECT p.group_id + FROM products p + ) + ) + AND u.status = 'ACTIVE'; + """ + ) + + # Execute the migration query + conn = op.get_bind() + conn.execute(migration_query) + + +def downgrade(): + # Define the downgrade logic if needed + pass diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/3fe27ff48f73_new_icon_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/3fe27ff48f73_new_icon_table.py new file mode 100644 index 00000000000..3899ddb9787 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/3fe27ff48f73_new_icon_table.py @@ -0,0 +1,27 @@ +"""new icon table + +Revision ID: 3fe27ff48f73 +Revises: 611f956aa3e3 +Create Date: 2025-02-05 16:50:02.419293+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "3fe27ff48f73" +down_revision = "611f956aa3e3" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("services_meta_data", sa.Column("icon", sa.String(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("services_meta_data", "icon") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/417f9eb848ce_adding_study_id_to_resource_tracking_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/417f9eb848ce_adding_study_id_to_resource_tracking_.py new file mode 100644 index 00000000000..c3a877b9113 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/417f9eb848ce_adding_study_id_to_resource_tracking_.py @@ -0,0 +1,30 @@ +"""adding study id to resource-tracking-container table + +Revision ID: 417f9eb848ce +Revises: add0afaaf728 +Create Date: 2023-06-23 14:15:20.721005+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "417f9eb848ce" +down_revision = "add0afaaf728" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_container", + sa.Column("project_uuid", sa.String(), nullable=False), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_container", "project_uuid") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/432aa859098b_rm_services_latest_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/432aa859098b_rm_services_latest_table.py new file mode 100644 index 00000000000..6e05e33b1f6 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/432aa859098b_rm_services_latest_table.py @@ -0,0 +1,39 @@ +"""rm services_latest table + +Revision ID: 432aa859098b +Revises: 9b97b12cfe47 +Create Date: 2023-04-03 16:58:44.438678+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "432aa859098b" +down_revision = "9b97b12cfe47" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("services_latest") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_latest", + sa.Column("key", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("version", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.ForeignKeyConstraint( + ["key", "version"], + ["services_meta_data.key", "services_meta_data.version"], + name="services_latest_key_version_fkey", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("key", name="services_latest_pk"), + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/47ca7335e146_remove_old_folders.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/47ca7335e146_remove_old_folders.py new file mode 100644 index 00000000000..63fb1a29923 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/47ca7335e146_remove_old_folders.py @@ -0,0 +1,169 @@ +"""remove old folders + +Revision ID: 47ca7335e146 +Revises: 9f381dcb9b95 +Create Date: 2024-09-17 11:54:39.600025+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "47ca7335e146" +down_revision = "9f381dcb9b95" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("folders_to_projects") + op.drop_table("folders_access_rights") + op.drop_table("folders") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "folders", + sa.Column( + "id", + sa.BIGINT(), + server_default=sa.text("nextval('folders_id_seq'::regclass)"), + autoincrement=True, + nullable=False, + ), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "description", + sa.VARCHAR(), + server_default=sa.text("''::character varying"), + autoincrement=False, + nullable=False, + ), + sa.Column("created_by", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column("product_name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.ForeignKeyConstraint( + ["created_by"], + ["groups.gid"], + name="fk_folders_to_groups_gid", + ondelete="SET NULL", + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_folders_to_products_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id", name="folders_pkey"), + postgresql_ignore_search_path=False, + ) + op.create_table( + "folders_access_rights", + sa.Column("folder_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("gid", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column( + "traversal_parent_id", sa.BIGINT(), autoincrement=False, nullable=True + ), + sa.Column( + "original_parent_id", sa.BIGINT(), autoincrement=False, nullable=True + ), + sa.Column("read", sa.BOOLEAN(), autoincrement=False, nullable=False), + sa.Column("write", sa.BOOLEAN(), autoincrement=False, nullable=False), + sa.Column("delete", sa.BOOLEAN(), autoincrement=False, nullable=False), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["folder_id"], + ["folders.id"], + name="fk_folders_access_rights_to_folders_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_folders_access_rights_to_groups_gid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["original_parent_id"], + ["folders.id"], + name="fk_folders_to_folders_id_via_original_parent_id", + ondelete="SET NULL", + ), + sa.ForeignKeyConstraint( + ["traversal_parent_id"], + ["folders.id"], + name="fk_folders_to_folders_id_via_traversal_parent_id", + ondelete="SET NULL", + ), + sa.PrimaryKeyConstraint("folder_id", "gid", name="folders_access_rights_pk"), + ) + op.create_table( + "folders_to_projects", + sa.Column("folder_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("project_uuid", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["folder_id"], + ["folders.id"], + name="fk_folders_to_projects_to_folders_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_folders_to_projects_to_projects_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "folder_id", "project_uuid", name="projects_to_folder_pk" + ), + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/481d5b472721_add_parent_fields_to_rut.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/481d5b472721_add_parent_fields_to_rut.py new file mode 100644 index 00000000000..8458cf73442 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/481d5b472721_add_parent_fields_to_rut.py @@ -0,0 +1,120 @@ +"""add_parent_fields_to_rut + +Revision ID: 481d5b472721 +Revises: 0d85bd35bdaa +Create Date: 2024-06-03 08:58:35.086686+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "481d5b472721" +down_revision = "0d85bd35bdaa" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_service_runs", + sa.Column("parent_project_id", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("root_parent_project_id", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("root_parent_project_name", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("parent_node_id", sa.String(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("root_parent_node_id", sa.String(), nullable=True), + ) + + # Populate new columns with values from the existing column + op.execute( + sa.DDL( + f""" + + UPDATE resource_tracker_service_runs + SET parent_project_id = project_id, + root_parent_project_id = project_id, + root_parent_project_name = project_name, + parent_node_id = node_id, + root_parent_node_id = node_id + """ + ) + ) + + # Make newly created column non-nullable + op.alter_column( + "resource_tracker_service_runs", + "parent_project_id", + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "root_parent_project_id", + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "root_parent_project_name", + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "parent_node_id", + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "root_parent_node_id", + nullable=False, + ) + + # Make already existing columns non-nullable + op.alter_column( + "resource_tracker_service_runs", + "project_name", + existing_type=sa.VARCHAR(), + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "node_name", + existing_type=sa.VARCHAR(), + nullable=False, + ) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "resource_tracker_service_runs", + "node_name", + existing_type=sa.VARCHAR(), + nullable=True, + ) + op.alter_column( + "resource_tracker_service_runs", + "project_name", + existing_type=sa.VARCHAR(), + nullable=True, + ) + op.drop_column("resource_tracker_service_runs", "root_parent_node_id") + op.drop_column("resource_tracker_service_runs", "parent_node_id") + op.drop_column("resource_tracker_service_runs", "root_parent_project_name") + op.drop_column("resource_tracker_service_runs", "root_parent_project_id") + op.drop_column("resource_tracker_service_runs", "parent_project_id") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/48604dfdc5f4_new_projects_to_job_map.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/48604dfdc5f4_new_projects_to_job_map.py new file mode 100644 index 00000000000..5f742464ddd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/48604dfdc5f4_new_projects_to_job_map.py @@ -0,0 +1,62 @@ +"""new projects to job map + +Revision ID: 48604dfdc5f4 +Revises: 8403acca8759 +Create Date: 2025-03-26 12:00:14.763439+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "48604dfdc5f4" +down_revision = "8403acca8759" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "projects_to_jobs", + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column( + "job_parent_resource_name", + sa.String(), + nullable=False, + doc="Prefix for the job resource name. For example, if the relative resource name is shelves/shelf1/books/book2, the parent resource name is shelves/shelf1.", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_to_jobs_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "project_uuid", + "job_parent_resource_name", + name="uq_projects_to_jobs_project_uuid_job_parent_resource_name", + ), + ) + + # Populate the new table + op.execute( + sa.text( + r""" +INSERT INTO projects_to_jobs (project_uuid, job_parent_resource_name) +SELECT + uuid AS project_uuid, + regexp_replace(name, '/jobs/.+$', '', 'g') AS job_parent_resource_name -- trim /jobs/.+$ +FROM projects +WHERE name ~* '^solvers/.+/jobs/.+$' OR name ~* '^studies/.+/jobs/.+$'; + """ + ) + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("projects_to_jobs") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/4a0f4efe8c86_add_invoice_id_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4a0f4efe8c86_add_invoice_id_column.py new file mode 100644 index 00000000000..01a403733ef --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4a0f4efe8c86_add_invoice_id_column.py @@ -0,0 +1,30 @@ +"""add invoice id column + +Revision ID: 4a0f4efe8c86 +Revises: 5b37c3bc99af +Create Date: 2024-03-21 10:27:45.493789+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "4a0f4efe8c86" +down_revision = "5b37c3bc99af" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "payments_transactions", + sa.Column("stripe_invoice_id", sa.String(), nullable=True), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("payments_transactions", "stripe_invoice_id") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/4d007819e61a_add_license_type_to_pricing_plan.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4d007819e61a_add_license_type_to_pricing_plan.py new file mode 100644 index 00000000000..03b117ca485 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4d007819e61a_add_license_type_to_pricing_plan.py @@ -0,0 +1,25 @@ +"""add LICENSE type to pricing plan + +Revision ID: 4d007819e61a +Revises: 38c9ac332c58 +Create Date: 2024-12-09 14:25:45.024814+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "4d007819e61a" +down_revision = "38c9ac332c58" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute(sa.DDL("ALTER TYPE pricingplanclassification ADD VALUE 'LICENSE'")) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + pass + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/4e7d8719855b_add_index_to_projects_metadata.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4e7d8719855b_add_index_to_projects_metadata.py new file mode 100644 index 00000000000..b4961eb7306 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4e7d8719855b_add_index_to_projects_metadata.py @@ -0,0 +1,34 @@ +"""add index to projects_metadata + +Revision ID: 4e7d8719855b +Revises: ba9c4816a31b +Create Date: 2025-05-21 11:48:34.062860+00:00 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "4e7d8719855b" +down_revision = "ba9c4816a31b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index( + "idx_projects_metadata_root_parent_project_uuid", + "projects_metadata", + ["root_parent_project_uuid"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "idx_projects_metadata_root_parent_project_uuid", table_name="projects_metadata" + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f31760a63ba_add_data_to_licensed_items.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f31760a63ba_add_data_to_licensed_items.py new file mode 100644 index 00000000000..94acfc1df24 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/4f31760a63ba_add_data_to_licensed_items.py @@ -0,0 +1,96 @@ +"""add data to licensed_items + +Revision ID: 4f31760a63ba +Revises: 1bc517536e0a +Create Date: 2025-01-29 16:51:16.453069+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "4f31760a63ba" +down_revision = "1bc517536e0a" +branch_labels = None +depends_on = None + + +def upgrade(): + + with op.batch_alter_table("licensed_items") as batch_op: + batch_op.alter_column( + "name", + new_column_name="licensed_resource_name", + existing_type=sa.String(), + nullable=False, + ) + batch_op.alter_column( + "pricing_plan_id", + existing_type=sa.Integer(), + nullable=True, + ) + batch_op.alter_column( + "product_name", + existing_type=sa.String(), + nullable=True, + ) + + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_items", + sa.Column( + "licensed_resource_data", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + ) + op.add_column( + "licensed_items", + sa.Column( + "trashed", + sa.DateTime(timezone=True), + nullable=True, + comment="The date and time when the licensed_item was marked as trashed. Null if the licensed_item has not been trashed [default].", + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("licensed_items", "trashed") + op.drop_column("licensed_items", "licensed_resource_data") + # ### end Alembic commands ### + + # Delete rows with null values in pricing_plan_id and product_name + op.execute( + sa.DDL( + """ + DELETE FROM licensed_items + WHERE pricing_plan_id IS NULL OR product_name IS NULL; + """ + ) + ) + print( + "Warning: Rows with null values in pricing_plan_id or product_name have been deleted." + ) + + with op.batch_alter_table("licensed_items") as batch_op: + + batch_op.alter_column( + "product_name", + existing_type=sa.String(), + nullable=False, + ) + batch_op.alter_column( + "pricing_plan_id", + existing_type=sa.Integer(), + nullable=False, + ) + batch_op.alter_column( + "licensed_resource_name", + new_column_name="name", + existing_type=sa.String(), + nullable=False, + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52a0e8148dd5_remove_submit_timestamp.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52a0e8148dd5_remove_submit_timestamp.py new file mode 100644 index 00000000000..8589578abe7 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52a0e8148dd5_remove_submit_timestamp.py @@ -0,0 +1,28 @@ +"""remove submit timestamp + +Revision ID: 52a0e8148dd5 +Revises: 77ac824a77ff +Create Date: 2024-12-16 14:55:15.114923+00:00 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '52a0e8148dd5' +down_revision = '77ac824a77ff' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('comp_tasks', 'submit') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('comp_tasks', sa.Column('submit', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=True)) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py new file mode 100644 index 00000000000..403f571d0fa --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py @@ -0,0 +1,97 @@ +"""remove services_limitations + +Revision ID: 52b5c2466605 +Revises: 38fe651b4196 +Create Date: 2023-06-27 15:24:13.207340+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "52b5c2466605" +down_revision = "38fe651b4196" +branch_labels = None +depends_on = None + + +modified_timestamp_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS trigger_auto_update on services_limitations; +CREATE TRIGGER trigger_auto_update +BEFORE INSERT OR UPDATE ON services_limitations +FOR EACH ROW EXECUTE PROCEDURE services_limitations_auto_update_modified(); + """ +) + +update_modified_timestamp_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION services_limitations_auto_update_modified() +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("idx_unique_gid_cluster_id_null", table_name="services_limitations") + op.drop_table("services_limitations") + # ### end Alembic commands ### + # custom + op.execute("DROP FUNCTION services_limitations_auto_update_modified();") + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_limitations", + sa.Column("gid", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("cluster_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("ram", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("cpu", sa.NUMERIC(), autoincrement=False, nullable=True), + sa.Column("vram", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("gpu", sa.INTEGER(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["cluster_id"], + ["clusters.id"], + name="fk_services_limitations_to_clusters_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_services_limitations_to_groups_gid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("gid", "cluster_id", name="gid_cluster_id_uniqueness"), + ) + op.create_index( + "idx_unique_gid_cluster_id_null", "services_limitations", ["gid"], unique=False + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52cf00912ad9_project_comments_minor_improvements.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52cf00912ad9_project_comments_minor_improvements.py new file mode 100644 index 00000000000..aa66671897d --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52cf00912ad9_project_comments_minor_improvements.py @@ -0,0 +1,42 @@ +"""project comments minor improvements + +Revision ID: 52cf00912ad9 +Revises: af5de00bf4cf +Create Date: 2023-06-21 13:05:19.531115+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "52cf00912ad9" +down_revision = "af5de00bf4cf" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "projects_comments", "user_id", existing_type=sa.BIGINT(), nullable=True + ) + op.create_foreign_key( + "fk_projects_comments_user_id", + "projects_comments", + "users", + ["user_id"], + ["id"], + ondelete="SET NULL", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_projects_comments_user_id", "projects_comments", type_="foreignkey" + ) + op.alter_column( + "projects_comments", "user_id", existing_type=sa.BIGINT(), nullable=False + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/53e095260441_add_projects_access_rights_and_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/53e095260441_add_projects_access_rights_and_.py index 65b1f6516b4..c27d0faa73f 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/53e095260441_add_projects_access_rights_and_.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/53e095260441_add_projects_access_rights_and_.py @@ -7,6 +7,7 @@ """ import sqlalchemy as sa from alembic import op +from sqlalchemy.dialects.postgresql import JSONB # revision identifiers, used by Alembic. revision = "53e095260441" @@ -21,7 +22,7 @@ def upgrade(): "projects", sa.Column( "access_rights", - sa.dialects.postgresql.JSONB(), + JSONB, nullable=False, server_default=sa.text("'{}'::jsonb"), ), diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/542e6ee8a8ea_new_payments_autorecharge_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/542e6ee8a8ea_new_payments_autorecharge_table.py new file mode 100644 index 00000000000..fc43b119900 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/542e6ee8a8ea_new_payments_autorecharge_table.py @@ -0,0 +1,110 @@ +"""new payments autorecharge table + +Revision ID: 542e6ee8a8ea +Revises: f613247f5bb1 +Create Date: 2023-10-10 08:22:23.941072+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "542e6ee8a8ea" +down_revision = "f613247f5bb1" +branch_labels = None +depends_on = None + + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "payments_autorecharge" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "payments_autorecharge", + sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column( + "enabled", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column("primary_payment_method_id", sa.String(), nullable=False), + sa.Column( + "min_balance_in_usd", + sa.Numeric(scale=2), + server_default=sa.text("0"), + nullable=False, + ), + sa.Column("top_up_amount_in_usd", sa.Numeric(scale=2), nullable=False), + sa.Column("top_up_countdown", sa.Integer(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.CheckConstraint( + "(top_up_countdown >= 0) OR (top_up_countdown IS NULL)", + name="check_top_up_countdown_nonnegative", + ), + sa.ForeignKeyConstraint( + ["primary_payment_method_id"], + ["payments_methods.payment_method_id"], + name="fk_payments_autorecharge_primary_payment_method_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("primary_payment_method_id"), + sa.UniqueConstraint("wallet_id"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("payments_autorecharge") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5649397a81bf_add_checksum_to_database.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5649397a81bf_add_checksum_to_database.py new file mode 100644 index 00000000000..fc1612667ca --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5649397a81bf_add_checksum_to_database.py @@ -0,0 +1,35 @@ +"""add checksum to database + +Revision ID: 5649397a81bf +Revises: e3334cced752 +Create Date: 2023-09-20 08:39:58.776281+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "5649397a81bf" +down_revision = "e3334cced752" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "file_meta_data", + sa.Column( + "sha256_checksum", + sa.String(), + server_default=sa.text("NULL"), + nullable=True, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("file_meta_data", "sha256_checksum") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/57ab8c419ca6_project_nodes_modification_creation_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/57ab8c419ca6_project_nodes_modification_creation_.py new file mode 100644 index 00000000000..13773757821 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/57ab8c419ca6_project_nodes_modification_creation_.py @@ -0,0 +1,75 @@ +"""project_nodes modification + creation projects_node_to_pricing_unit + +Revision ID: 57ab8c419ca6 +Revises: b102946c8134 +Create Date: 2023-10-05 18:26:26.018893+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "57ab8c419ca6" +down_revision = "b102946c8134" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute("ALTER TABLE projects_nodes DROP CONSTRAINT projects_nodes_pkey") + op.execute( + "ALTER TABLE projects_nodes ADD COLUMN project_node_id SERIAL PRIMARY KEY" + ) + op.execute( + "ALTER TABLE projects_nodes ADD CONSTRAINT projects_nodes__node_project UNIQUE (node_id, project_uuid)" + ) + + op.create_index( + op.f("ix_projects_nodes_node_id"), "projects_nodes", ["node_id"], unique=False + ) + op.create_index( + op.f("ix_projects_nodes_project_uuid"), + "projects_nodes", + ["project_uuid"], + unique=False, + ) + + op.create_table( + "projects_node_to_pricing_unit", + sa.Column("project_node_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_unit_id", sa.BigInteger(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["project_node_id"], + ["projects_nodes.project_node_id"], + name="fk_projects_nodes__project_node_to_pricing_unit__uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("project_node_id"), + ) + + +def downgrade(): + op.drop_table("projects_node_to_pricing_unit") + + op.drop_index(op.f("ix_projects_nodes_project_uuid"), table_name="projects_nodes") + op.drop_index(op.f("ix_projects_nodes_node_id"), table_name="projects_nodes") + + op.execute("ALTER TABLE projects_nodes DROP CONSTRAINT projects_nodes_pkey") + op.execute( + "ALTER TABLE projects_nodes DROP CONSTRAINT projects_nodes__node_project" + ) + op.execute("ALTER TABLE projects_nodes ADD PRIMARY KEY (node_id, project_uuid)") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/58b24613c3f7_add_metadata_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/58b24613c3f7_add_metadata_column.py new file mode 100644 index 00000000000..09c49c5f065 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/58b24613c3f7_add_metadata_column.py @@ -0,0 +1,31 @@ +"""add metadata column + +Revision ID: 58b24613c3f7 +Revises: 0cdf095b10fe +Create Date: 2023-06-29 15:48:37.110117+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "58b24613c3f7" +down_revision = "0cdf095b10fe" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_runs", + sa.Column("metadata", postgresql.JSONB(astext_type=sa.Text()), nullable=True), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_runs", "metadata") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5ad02358751a_project_and_folder_trash_columns.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5ad02358751a_project_and_folder_trash_columns.py new file mode 100644 index 00000000000..2cd8adb00f0 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5ad02358751a_project_and_folder_trash_columns.py @@ -0,0 +1,73 @@ +"""project and folder trash columns + +Revision ID: 5ad02358751a +Revises: fce5d231e16d +Create Date: 2024-11-07 17:14:01.094583+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "5ad02358751a" +down_revision = "fce5d231e16d" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "folders_v2", + sa.Column( + "trashed_at", + sa.DateTime(timezone=True), + nullable=True, + comment="The date and time when the folder was marked as trashed.Null if the folder has not been trashed [default].", + ), + ) + op.add_column( + "folders_v2", + sa.Column( + "trashed_explicitly", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + comment="Indicates whether the folder was explicitly trashed by the user (true) or inherited its trashed status from a parent (false) [default].", + ), + ) + op.add_column( + "projects", + sa.Column( + "trashed_explicitly", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + comment="Indicates whether the project was explicitly trashed by the user (true) or inherited its trashed status from a parent (false) [default].", + ), + ) + op.alter_column( + "projects", + "trashed_at", + existing_type=postgresql.TIMESTAMP(timezone=True), + comment="The date and time when the project was marked as trashed. Null if the project has not been trashed [default].", + existing_nullable=True, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "projects", + "trashed_at", + existing_type=postgresql.TIMESTAMP(timezone=True), + comment=None, + existing_comment="The date and time when the project was marked as trashed. Null if the project has not been trashed [default].", + existing_nullable=True, + ) + op.drop_column("projects", "trashed_explicitly") + op.drop_column("folders_v2", "trashed_explicitly") + op.drop_column("folders_v2", "trashed_at") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b37c3bc99af_new_min_payment_amount_usd_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b37c3bc99af_new_min_payment_amount_usd_column.py new file mode 100644 index 00000000000..0ba4e985060 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5b37c3bc99af_new_min_payment_amount_usd_column.py @@ -0,0 +1,35 @@ +"""new min_payment_amount_usd column + +Revision ID: 5b37c3bc99af +Revises: 75f4afdd7a58 +Create Date: 2024-03-18 13:37:48.270611+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "5b37c3bc99af" +down_revision = "75f4afdd7a58" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "products_prices", + sa.Column( + "min_payment_amount_usd", + sa.Numeric(scale=2), + server_default=sa.text("10.00"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("products_prices", "min_payment_amount_usd") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5c62b190e124_migration_of_aws_ec2_instances_data_in_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5c62b190e124_migration_of_aws_ec2_instances_data_in_.py new file mode 100644 index 00000000000..ee51f46b103 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5c62b190e124_migration_of_aws_ec2_instances_data_in_.py @@ -0,0 +1,32 @@ +"""migration of aws_ec2_instances data in pricing units + +Revision ID: 5c62b190e124 +Revises: 7777d181dc1f +Create Date: 2023-10-17 05:15:29.780925+00:00 + +""" +from alembic import op +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) + +# revision identifiers, used by Alembic. +revision = "5c62b190e124" +down_revision = "7777d181dc1f" +branch_labels = None +depends_on = None + + +def upgrade(): + # One time migration to populate specific info with some reasonable value, it will be changed manually based on concrete needs + op.execute( + resource_tracker_pricing_units.update().values( + specific_info={"aws_ec2_instances": ["t3.medium"]} + ) + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + pass + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e27063c3ac9_set_privacy_hide_email_to_false_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e27063c3ac9_set_privacy_hide_email_to_false_.py new file mode 100644 index 00000000000..2381193baeb --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e27063c3ac9_set_privacy_hide_email_to_false_.py @@ -0,0 +1,34 @@ +"""set privacy_hide_email to false temporarily + +Revision ID: 5e27063c3ac9 +Revises: 4d007819e61a +Create Date: 2024-12-10 15:50:48.024204+00:00 + +""" +from alembic import op +from sqlalchemy.sql import expression + +# revision identifiers, used by Alembic. +revision = "5e27063c3ac9" +down_revision = "4d007819e61a" +branch_labels = None +depends_on = None + + +def upgrade(): + # Change the server_default of privacy_hide_email to false + with op.batch_alter_table("users") as batch_op: + batch_op.alter_column("privacy_hide_email", server_default=expression.false()) + + # Reset all to default: Update existing values in the database + op.execute("UPDATE users SET privacy_hide_email = false") + + +def downgrade(): + + # Revert the server_default of privacy_hide_email to true + with op.batch_alter_table("users") as batch_op: + batch_op.alter_column("privacy_hide_email", server_default=expression.true()) + + # Reset all to default: Revert existing values in the database to true + op.execute("UPDATE users SET privacy_hide_email = true") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e43b5ec7604_licensed_resources_add_priority_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e43b5ec7604_licensed_resources_add_priority_column.py new file mode 100644 index 00000000000..2505a68e37f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5e43b5ec7604_licensed_resources_add_priority_column.py @@ -0,0 +1,30 @@ +"""licensed_resources add priority column + +Revision ID: 5e43b5ec7604 +Revises: e8ffc0c96336 +Create Date: 2025-02-18 12:24:49.105989+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "5e43b5ec7604" +down_revision = "e8ffc0c96336" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_resources", + sa.Column("priority", sa.SmallInteger(), server_default="0", nullable=False), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("licensed_resources", "priority") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/5f88b513cd4c_add_user_email_col_to_purchases.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5f88b513cd4c_add_user_email_col_to_purchases.py new file mode 100644 index 00000000000..0ab593a294e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/5f88b513cd4c_add_user_email_col_to_purchases.py @@ -0,0 +1,30 @@ +"""add user email col to purchases + +Revision ID: 5f88b513cd4c +Revises: ecd4eadaa781 +Create Date: 2025-01-22 15:08:17.729337+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "5f88b513cd4c" +down_revision = "ecd4eadaa781" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("user_email", sa.String(), nullable=True), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_licensed_items_purchases", "user_email") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/611f956aa3e3_licensed_items_checkout_purchase_email_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/611f956aa3e3_licensed_items_checkout_purchase_email_.py new file mode 100644 index 00000000000..9aab17c2231 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/611f956aa3e3_licensed_items_checkout_purchase_email_.py @@ -0,0 +1,49 @@ +"""licensed items checkout/purchase email mandatory + +Revision ID: 611f956aa3e3 +Revises: e71ea59858f4 +Create Date: 2025-02-07 12:47:56.235193+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "611f956aa3e3" +down_revision = "e71ea59858f4" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "resource_tracker_licensed_items_checkouts", + "user_email", + existing_type=sa.VARCHAR(), + nullable=False, + ) + op.alter_column( + "resource_tracker_licensed_items_purchases", + "user_email", + existing_type=sa.VARCHAR(), + nullable=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "resource_tracker_licensed_items_purchases", + "user_email", + existing_type=sa.VARCHAR(), + nullable=True, + ) + op.alter_column( + "resource_tracker_licensed_items_checkouts", + "user_email", + existing_type=sa.VARCHAR(), + nullable=True, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/617e0ecaf602_added_product_name_to_folders.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/617e0ecaf602_added_product_name_to_folders.py new file mode 100644 index 00000000000..3fab89f27bd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/617e0ecaf602_added_product_name_to_folders.py @@ -0,0 +1,37 @@ +"""added product name to folders + +Revision ID: 617e0ecaf602 +Revises: 21699ee569a7 +Create Date: 2024-07-31 08:26:46.073511+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "617e0ecaf602" +down_revision = "21699ee569a7" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("folders", sa.Column("product_name", sa.String(), nullable=False)) + op.create_foreign_key( + "fk_folders_to_products_name", + "folders", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint("fk_folders_to_products_name", "folders", type_="foreignkey") + op.drop_column("folders", "product_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/61fa093c21bb_adding_product_name_to_wallets.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/61fa093c21bb_adding_product_name_to_wallets.py new file mode 100644 index 00000000000..71a130de3dc --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/61fa093c21bb_adding_product_name_to_wallets.py @@ -0,0 +1,34 @@ +"""adding product_name to wallets + +Revision ID: 61fa093c21bb +Revises: ae72826e75fc +Create Date: 2023-09-20 14:42:10.661569+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "61fa093c21bb" +down_revision = "ae72826e75fc" +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column("wallets", sa.Column("product_name", sa.String(), nullable=True)) + op.execute( + sa.DDL("UPDATE wallets SET product_name = 'osparc' WHERE product_name IS NULL") + ) + op.alter_column( + "wallets", + "product_name", + existing_type=sa.String(), + nullable=False, + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("wallets", "product_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/624a029738b8_new_payments_method_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/624a029738b8_new_payments_method_table.py new file mode 100644 index 00000000000..fb93fc7755f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/624a029738b8_new_payments_method_table.py @@ -0,0 +1,114 @@ +"""New payments_method table + +Revision ID: 624a029738b8 +Revises: e7b3d381efe4 +Create Date: 2023-09-13 15:05:41.094403+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "624a029738b8" +down_revision = "e7b3d381efe4" +branch_labels = None +depends_on = None + + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "payments_methods" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "payments_methods", + sa.Column("payment_method_id", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("initiated_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "state", + sa.Enum( + "PENDING", + "SUCCESS", + "FAILED", + "CANCELED", + name="initpromptackflowstate", + ), + nullable=False, + server_default="PENDING", + ), + sa.Column("state_message", sa.Text(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("payment_method_id"), + ) + op.create_index( + op.f("ix_payments_methods_user_id"), + "payments_methods", + ["user_id"], + unique=False, + ) + op.create_index( + op.f("ix_payments_methods_wallet_id"), + "payments_methods", + ["wallet_id"], + unique=False, + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_payments_methods_wallet_id"), table_name="payments_methods") + op.drop_index(op.f("ix_payments_methods_user_id"), table_name="payments_methods") + op.drop_table("payments_methods") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/68777fdf9539_add_licensed_resources.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/68777fdf9539_add_licensed_resources.py new file mode 100644 index 00000000000..745e7a2e74d --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/68777fdf9539_add_licensed_resources.py @@ -0,0 +1,95 @@ +"""add licensed resources + +Revision ID: 68777fdf9539 +Revises: 061607911a22 +Create Date: 2025-02-09 10:24:50.533653+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "68777fdf9539" +down_revision = "061607911a22" +branch_labels = None +depends_on = None + + +# Reuse the existing Enum type +licensed_resource_type = postgresql.ENUM( + "VIP_MODEL", name="licensedresourcetype", create_type=False +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "licensed_resources", + sa.Column( + "licensed_resource_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("display_name", sa.String(), nullable=False), + sa.Column("licensed_resource_name", sa.String(), nullable=False), + sa.Column( + "licensed_resource_type", + licensed_resource_type, # Reuse existing Enum instead of redefining it + nullable=False, + ), + sa.Column( + "licensed_resource_data", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "trashed", + sa.DateTime(timezone=True), + nullable=True, + comment="The date and time when the licensed_resources was marked as trashed. Null if the licensed_resources has not been trashed [default].", + ), + sa.PrimaryKeyConstraint("licensed_resource_id"), + sa.UniqueConstraint( + "licensed_resource_name", + "licensed_resource_type", + name="uq_licensed_resource_name_type2", + ), + ) + # ### end Alembic commands ### + + # Migration of licensed resources from licensed_items table to new licensed_resources table + op.execute( + sa.DDL( + """ + INSERT INTO licensed_resources (display_name, licensed_resource_name, licensed_resource_type, licensed_resource_data, created, modified) + SELECT + display_name, + licensed_resource_name, + licensed_resource_type, + licensed_resource_data, + CURRENT_TIMESTAMP as created, + CURRENT_TIMESTAMP as modified + FROM licensed_items + """ + ) + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("licensed_resources") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/6da4357ce10f_add_heartbeat_timestamps.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6da4357ce10f_add_heartbeat_timestamps.py new file mode 100644 index 00000000000..d58259bfc0e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6da4357ce10f_add_heartbeat_timestamps.py @@ -0,0 +1,76 @@ +"""add heartbeat timestamps + +Revision ID: 6da4357ce10f +Revises: 9b33ef4c690a +Create Date: 2023-08-07 06:31:14.681513+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "6da4357ce10f" +down_revision = "9b33ef4c690a" +branch_labels = None +depends_on = None + + +modified_timestamp_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS trigger_auto_update on comp_tasks; +CREATE TRIGGER trigger_auto_update +BEFORE INSERT OR UPDATE ON comp_tasks +FOR EACH ROW EXECUTE PROCEDURE comp_tasks_auto_update_modified(); + """ +) + +update_modified_timestamp_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION comp_tasks_auto_update_modified() +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_tasks", + sa.Column("last_heartbeat", sa.DateTime(timezone=True), nullable=True), + ) + op.add_column( + "comp_tasks", + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + ) + op.add_column( + "comp_tasks", + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + ) + # ### end Alembic commands ### + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + op.execute(sa.DDL("DROP TRIGGER IF EXISTS trigger_auto_update on comp_tasks;")) + op.execute(sa.DDL("DROP FUNCTION comp_tasks_auto_update_modified();")) + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_tasks", "modified") + op.drop_column("comp_tasks", "created") + op.drop_column("comp_tasks", "last_heartbeat") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e91067932f2_adding_resource_tracker_container_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e91067932f2_adding_resource_tracker_container_table.py new file mode 100644 index 00000000000..29db67d4af6 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e91067932f2_adding_resource_tracker_container_table.py @@ -0,0 +1,57 @@ +"""adding resource tracker container table + +Revision ID: 6e91067932f2 +Revises: 52cf00912ad9 +Create Date: 2023-06-21 14:12:40.292816+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "6e91067932f2" +down_revision = "52cf00912ad9" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_container", + sa.Column("container_id", sa.String(), nullable=False), + sa.Column("image", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column( + "service_settings_reservation_nano_cpus", sa.BigInteger(), nullable=True + ), + sa.Column( + "service_settings_reservation_memory_bytes", sa.BigInteger(), nullable=True + ), + sa.Column( + "service_settings_reservation_additional_info", + postgresql.JSONB(astext_type=sa.Text()), + nullable=False, + ), + sa.Column("container_cpu_usage_seconds_total", sa.Float(), nullable=False), + sa.Column("prometheus_created", sa.DateTime(timezone=True), nullable=False), + sa.Column( + "prometheus_last_scraped", sa.DateTime(timezone=True), nullable=False + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("container_id", name="resource_tracker_container_pkey"), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("resource_tracker_container") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e9f34338072_updates_user_roles.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e9f34338072_updates_user_roles.py new file mode 100644 index 00000000000..4956503c512 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/6e9f34338072_updates_user_roles.py @@ -0,0 +1,31 @@ +"""Updates user roles + +Revision ID: 6e9f34338072 +Revises: 7c552b906888 +Create Date: 2023-09-26 12:29:23.376889+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "6e9f34338072" +down_revision = "7c552b906888" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute("ALTER TYPE userrole ADD VALUE 'PRODUCT_OWNER'") + + +def downgrade(): + # NOTE: Downgrade new updates requires re-building the entire enum! + op.execute("ALTER TYPE userrole RENAME TO userrole_old") + op.execute( + "CREATE TYPE userrole AS ENUM('ANONYMOUS', 'GUEST', 'USER', 'TESTER', 'ADMIN')" + ) + op.execute( + "ALTER TABLE users ALTER COLUMN role TYPE userrole USING " + "role::text::userrole" + ) + op.execute("DROP TYPE userrole_old") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/71ea254837b0_remove_projects_to_projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/71ea254837b0_remove_projects_to_projects_nodes.py new file mode 100644 index 00000000000..883b6e48379 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/71ea254837b0_remove_projects_to_projects_nodes.py @@ -0,0 +1,136 @@ +"""remove projects_to_projects_nodes + +Revision ID: 71ea254837b0 +Revises: c8f072c72adc +Create Date: 2023-06-19 13:52:39.161616+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "71ea254837b0" +down_revision = "c8f072c72adc" +branch_labels = None +depends_on = None + + +# TRIGGERS ----------------- +drop_projects_to_projects_nodes_deleted_trigger = sa.DDL( + "DROP TRIGGER IF EXISTS entry_deleted on projects;" +) +projects_to_projects_nodes_deleted_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS entry_deleted on projects; +CREATE TRIGGER entry_deleted +AFTER DELETE ON projects +FOR EACH ROW +EXECUTE FUNCTION delete_orphaned_project_nodes(); + """ +) + +# PROCEDURES ------------------- +drop_delete_orphaned_project_nodes_procedure = sa.DDL( + "DROP FUNCTION delete_orphaned_project_nodes();" +) +delete_orphaned_project_nodes_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION delete_orphaned_project_nodes() +RETURNS TRIGGER AS $$ +BEGIN + DELETE FROM projects_nodes + WHERE NOT EXISTS ( + SELECT 1 FROM projects_to_projects_nodes + WHERE projects_to_projects_nodes.node_id = projects_nodes.node_id + ); + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("projects_to_projects_nodes") + op.add_column( + "projects_nodes", sa.Column("project_uuid", sa.String(), nullable=False) + ) + # custom + op.drop_constraint("projects_nodes_pkey", "projects_nodes", type_="primary") + op.create_primary_key( + "projects_nodes_pkey", + "projects_nodes", + ["project_uuid", "node_id"], + ) + op.create_foreign_key( + "fk_projects_to_projects_nodes_to_projects_uuid", + "projects_nodes", + "projects", + ["project_uuid"], + ["uuid"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + + # custom + op.execute(drop_projects_to_projects_nodes_deleted_trigger) + op.execute(drop_delete_orphaned_project_nodes_procedure) + + +def downgrade(): + # custom + op.drop_constraint("projects_nodes_pkey", "projects_nodes", type_="primary") + op.create_primary_key("projects_nodes_pkey", "projects_nodes", ["node_id"]) + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_projects_to_projects_nodes_to_projects_uuid", + "projects_nodes", + type_="foreignkey", + ) + op.drop_column("projects_nodes", "project_uuid") + op.create_table( + "projects_to_projects_nodes", + sa.Column("project_uuid", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("node_id", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["node_id"], + ["projects_nodes.node_id"], + name="fk_projects_to_projects_nodes_to_projects_nodes_node_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_to_projects_nodes_to_projects_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "project_uuid", + "node_id", + name="projects_to_projects_nodes_project_uuid_node_id_key", + ), + ) + # ### end Alembic commands ### + + # custom + op.execute(delete_orphaned_project_nodes_procedure) + op.execute(projects_to_projects_nodes_deleted_trigger) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/742123f0933a_hash_exising_api_secret_data.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/742123f0933a_hash_exising_api_secret_data.py new file mode 100644 index 00000000000..e223c24a615 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/742123f0933a_hash_exising_api_secret_data.py @@ -0,0 +1,28 @@ +"""hash existing api_secret data + +Revision ID: 742123f0933a +Revises: b0c988e3f348 +Create Date: 2025-03-13 09:39:43.895529+00:00 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "742123f0933a" +down_revision = "b0c988e3f348" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute( + """ + UPDATE api_keys + SET api_secret = crypt(api_secret, gen_salt('bf', 10)) + """ + ) + + +def downgrade(): + pass diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/75f4afdd7a58_user_details_extras_and_rename_col.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/75f4afdd7a58_user_details_extras_and_rename_col.py new file mode 100644 index 00000000000..48387f3a84b --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/75f4afdd7a58_user_details_extras_and_rename_col.py @@ -0,0 +1,44 @@ +"""user_details extras and rename col + +Revision ID: 75f4afdd7a58 +Revises: 30e8b752e33e +Create Date: 2024-03-15 15:19:38.076627+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "75f4afdd7a58" +down_revision = "30e8b752e33e" +branch_labels = None +depends_on = None + + +def upgrade(): + op.alter_column( + "users_pre_registration_details", + "company_name", + new_column_name="institution", + existing_type=sa.String(), + ) + op.add_column( + "users_pre_registration_details", + sa.Column( + "extras", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=True, + ), + ) + + +def downgrade(): + op.alter_column( + "users_pre_registration_details", + "institution", + new_column_name="company_name", + existing_type=sa.String(), + ) + op.drop_column("users_pre_registration_details", "extras") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7604e65e2f83_renamed_study_tags_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7604e65e2f83_renamed_study_tags_table.py new file mode 100644 index 00000000000..69b44c3f40c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7604e65e2f83_renamed_study_tags_table.py @@ -0,0 +1,29 @@ +"""renamed study_tags table + +Revision ID: 7604e65e2f83 +Revises: 617e0ecaf602 +Create Date: 2024-08-23 12:03:59.328670+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "7604e65e2f83" +down_revision = "617e0ecaf602" +branch_labels = None +depends_on = None + + +def upgrade(): + op.rename_table("study_tags", "projects_tags") + + # Rename the column from study_id to project_id in the renamed table + op.alter_column("projects_tags", "study_id", new_column_name="project_id") + + +def downgrade(): + # Reverse the column rename from project_id to study_id + op.alter_column("projects_tags", "project_id", new_column_name="study_id") + + # Reverse the table rename from projects_tags to study_tags + op.rename_table("projects_tags", "study_tags") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/763666c698fb_resource_tracker_refactoring.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/763666c698fb_resource_tracker_refactoring.py new file mode 100644 index 00000000000..dede73cc51b --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/763666c698fb_resource_tracker_refactoring.py @@ -0,0 +1,140 @@ +"""resource tracker refactoring + +Revision ID: 763666c698fb +Revises: 2dda922a3261 +Create Date: 2023-09-05 09:44:50.592134+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '763666c698fb' +down_revision = '2dda922a3261' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + + # Resource tracker credit transactions (NEW) + op.create_table('resource_tracker_credit_transactions', + sa.Column('transaction_id', sa.BigInteger(), nullable=False), + sa.Column('product_name', sa.String(), nullable=False), + sa.Column('wallet_id', sa.BigInteger(), nullable=False), + sa.Column('wallet_name', sa.String(), nullable=False), + sa.Column('pricing_plan_id', sa.BigInteger(), nullable=True), + sa.Column('pricing_detail_id', sa.BigInteger(), nullable=True), + sa.Column('user_id', sa.BigInteger(), nullable=False), + sa.Column('user_email', sa.String(), nullable=False), + sa.Column('osparc_credits', sa.Numeric(scale=2), nullable=False), + sa.Column('transaction_status', sa.Enum('PENDING', 'BILLED', 'NOT_BILLED', 'REQUIRES_MANUAL_REVIEW', name='credittransactionstatus'), nullable=True), + sa.Column('transaction_classification', sa.Enum('ADD_WALLET_TOP_UP', 'DEDUCT_SERVICE_RUN', name='credittransactionclassification'), nullable=True), + sa.Column('service_run_id', sa.String(), nullable=True), + sa.Column('payment_transaction_id', sa.String(), nullable=True), + sa.Column('created', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False), + sa.Column('last_heartbeat_at', sa.DateTime(timezone=True), nullable=False), + sa.Column('modified', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False), + sa.PrimaryKeyConstraint('transaction_id') + ) + op.create_index(op.f('ix_resource_tracker_credit_transactions_product_name'), 'resource_tracker_credit_transactions', ['product_name'], unique=False) + op.create_index(op.f('ix_resource_tracker_credit_transactions_service_run_id'), 'resource_tracker_credit_transactions', ['service_run_id'], unique=False) + op.create_index(op.f('ix_resource_tracker_credit_transactions_transaction_classification'), 'resource_tracker_credit_transactions', ['transaction_classification'], unique=False) + op.create_index(op.f('ix_resource_tracker_credit_transactions_transaction_status'), 'resource_tracker_credit_transactions', ['transaction_status'], unique=False) + op.create_index(op.f('ix_resource_tracker_credit_transactions_wallet_id'), 'resource_tracker_credit_transactions', ['wallet_id'], unique=False) + + # Resource tracker wallets credit transations (OLD) + op.drop_index('ix_resource_tracker_wallets_credit_transactions_product_name', table_name='resource_tracker_wallets_credit_transactions') + op.drop_index('ix_resource_tracker_wallets_credit_transactions_service_run_id', table_name='resource_tracker_wallets_credit_transactions') + op.drop_index('ix_resource_tracker_wallets_credit_transactions_transac_110f', table_name='resource_tracker_wallets_credit_transactions') + op.drop_index('ix_resource_tracker_wallets_credit_transactions_transac_e117', table_name='resource_tracker_wallets_credit_transactions') + op.drop_index('ix_resource_tracker_wallets_credit_transactions_wallet_id', table_name='resource_tracker_wallets_credit_transactions') + op.drop_index('ix_resource_tracker_wallets_credit_transactions_wallet_name', table_name='resource_tracker_wallets_credit_transactions') + op.drop_table('resource_tracker_wallets_credit_transactions') + + sa.Enum(name="transactionbillingstatus").drop(op.get_bind(), checkfirst=False) + sa.Enum(name="transactionclassification").drop(op.get_bind(), checkfirst=False) + + # Resource tracker pricing details + op.add_column('resource_tracker_pricing_details', sa.Column('simcore_default', sa.Boolean(), nullable=False)) + # op.alter_column('resource_tracker_pricing_details', 'cost_per_unit', + # existing_type=sa.NUMERIC(precision=3, scale=2), + # nullable=False) + op.drop_column('resource_tracker_pricing_details', 'cost_per_unit') + op.add_column('resource_tracker_pricing_details', sa.Column('cost_per_unit', sa.NUMERIC(scale=2), nullable=False)) + op.drop_index('ix_resource_tracker_pricing_details_unit_name', table_name='resource_tracker_pricing_details') + op.create_index(op.f('ix_resource_tracker_pricing_details_valid_to'), 'resource_tracker_pricing_details', ['valid_to'], unique=False) + + # Resource tracker pricing plan to service + op.add_column('resource_tracker_pricing_plan_to_service', sa.Column('product', sa.String(), nullable=False)) + op.drop_constraint('resource_tracker_pricing_plan_to_service__service_unique_key', 'resource_tracker_pricing_plan_to_service', type_='unique') + op.create_unique_constraint('rut_pricing_plan_to_service__service_product_unique_key', 'resource_tracker_pricing_plan_to_service', ['service_key', 'service_version', 'product']) + + # Resource tracker pricing plans + op.drop_index('ix_resource_tracker_pricing_plans_name', table_name='resource_tracker_pricing_plans') + + # Resource tracker service runs + op.drop_column('resource_tracker_service_runs', 'pricing_detail_cost_per_unit') + op.add_column('resource_tracker_service_runs', sa.Column('pricing_detail_cost_per_unit', sa.NUMERIC(scale=2), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + + # Resource tracker pricing plans name + op.create_index('ix_resource_tracker_pricing_plans_name', 'resource_tracker_pricing_plans', ['name'], unique=False) + + # Resource tracker pricing plan to service + op.drop_constraint('rut_pricing_plan_to_service__service_product_unique_key', 'resource_tracker_pricing_plan_to_service', type_='unique') + op.create_unique_constraint('resource_tracker_pricing_plan_to_service__service_unique_key', 'resource_tracker_pricing_plan_to_service', ['service_key', 'service_version']) + op.drop_column('resource_tracker_pricing_plan_to_service', 'product') + + # Resource tracker pricing details + op.drop_index(op.f('ix_resource_tracker_pricing_details_valid_to'), table_name='resource_tracker_pricing_details') + op.create_index('ix_resource_tracker_pricing_details_unit_name', 'resource_tracker_pricing_details', ['unit_name'], unique=False) + op.drop_column('resource_tracker_pricing_details', 'cost_per_unit') + op.add_column('resource_tracker_pricing_details', sa.Column('cost_per_unit', sa.NUMERIC(precision=3, scale=2), nullable=False)) + op.drop_column('resource_tracker_pricing_details', 'simcore_default') + + # Resource tracker wallets credit transations + op.create_table('resource_tracker_wallets_credit_transactions', + sa.Column('transaction_id', sa.BIGINT(), autoincrement=True, nullable=False), + sa.Column('product_name', sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column('wallet_id', sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column('wallet_name', sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column('user_id', sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column('user_email', sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column('credits', sa.NUMERIC(precision=3, scale=2), autoincrement=False, nullable=False), + sa.Column('transaction_status', postgresql.ENUM('PENDING', 'BILLED', 'NOT_BILLED', 'REQUIRES_MANUAL_REVIEW', name='transactionbillingstatus'), autoincrement=False, nullable=True), + sa.Column('transaction_classification', postgresql.ENUM('ADD_WALLET_TOP_UP', 'DEDUCT_SERVICE_RUN', name='transactionclassification'), autoincrement=False, nullable=True), + sa.Column('service_run_id', sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column('modified', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('now()'), autoincrement=False, nullable=False), + sa.PrimaryKeyConstraint('transaction_id', name='resource_tracker_wallets_credit_transactions_pkey') + ) + op.create_index('ix_resource_tracker_wallets_credit_transactions_wallet_name', 'resource_tracker_wallets_credit_transactions', ['wallet_name'], unique=False) + op.create_index('ix_resource_tracker_wallets_credit_transactions_wallet_id', 'resource_tracker_wallets_credit_transactions', ['wallet_id'], unique=False) + op.create_index('ix_resource_tracker_wallets_credit_transactions_transac_e117', 'resource_tracker_wallets_credit_transactions', ['transaction_status'], unique=False) + op.create_index('ix_resource_tracker_wallets_credit_transactions_transac_110f', 'resource_tracker_wallets_credit_transactions', ['transaction_classification'], unique=False) + op.create_index('ix_resource_tracker_wallets_credit_transactions_service_run_id', 'resource_tracker_wallets_credit_transactions', ['service_run_id'], unique=False) + op.create_index('ix_resource_tracker_wallets_credit_transactions_product_name', 'resource_tracker_wallets_credit_transactions', ['product_name'], unique=False) + + # Resource tracker credit transactions + op.drop_index(op.f('ix_resource_tracker_credit_transactions_wallet_id'), table_name='resource_tracker_credit_transactions') + op.drop_index(op.f('ix_resource_tracker_credit_transactions_transaction_status'), table_name='resource_tracker_credit_transactions') + op.drop_index(op.f('ix_resource_tracker_credit_transactions_transaction_classification'), table_name='resource_tracker_credit_transactions') + op.drop_index(op.f('ix_resource_tracker_credit_transactions_service_run_id'), table_name='resource_tracker_credit_transactions') + op.drop_index(op.f('ix_resource_tracker_credit_transactions_product_name'), table_name='resource_tracker_credit_transactions') + op.drop_table('resource_tracker_credit_transactions') + + sa.Enum(name="credittransactionstatus").drop(op.get_bind(), checkfirst=False) + sa.Enum(name="credittransactionclassification").drop(op.get_bind(), checkfirst=False) + + # Resource tracker service runs + op.drop_column('resource_tracker_service_runs', 'pricing_detail_cost_per_unit') + op.add_column('resource_tracker_service_runs', sa.Column('pricing_detail_cost_per_unit', sa.NUMERIC(precision=15, scale=2), nullable=True)) + + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/76d106b243c3_add_product_name_to_services_vendor_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/76d106b243c3_add_product_name_to_services_vendor_.py new file mode 100644 index 00000000000..8bf92adc91c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/76d106b243c3_add_product_name_to_services_vendor_.py @@ -0,0 +1,57 @@ +"""add product_name to services_vendor_secrets + +Revision ID: 76d106b243c3 +Revises: 3810966d1534 +Create Date: 2023-10-19 14:28:50.637834+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "76d106b243c3" +down_revision = "3810966d1534" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "services_vendor_secrets", + sa.Column("product_name", sa.String(), server_default="osparc", nullable=False), + ) + op.create_foreign_key( + "fk_services_name_products", + "services_vendor_secrets", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + + op.drop_constraint( + "services_vendor_secrets_pk", "services_vendor_secrets", type_="primary" + ) + op.create_primary_key( + "services_vendor_secrets_pk", + "services_vendor_secrets", + ["service_key", "service_base_version", "product_name"], + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_services_name_products", "services_vendor_secrets", type_="foreignkey" + ) + op.drop_column("services_vendor_secrets", "product_name") + # ### end Alembic commands ### + + op.create_primary_key( + "services_vendor_secrets_pk", + "services_vendor_secrets", + ["service_key", "service_base_version"], + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7777d181dc1f_new_invoice_url_column_in_payments_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7777d181dc1f_new_invoice_url_column_in_payments_.py new file mode 100644 index 00000000000..a8c0d080201 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7777d181dc1f_new_invoice_url_column_in_payments_.py @@ -0,0 +1,29 @@ +"""new invoice_url column in payments_transaction table + +Revision ID: 7777d181dc1f +Revises: 542e6ee8a8ea +Create Date: 2023-10-16 16:19:26.657533+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "7777d181dc1f" +down_revision = "542e6ee8a8ea" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "payments_transactions", sa.Column("invoice_url", sa.String(), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("payments_transactions", "invoice_url") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/77ac824a77ff_add_cols_to_licensed_items_purchases_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/77ac824a77ff_add_cols_to_licensed_items_purchases_.py new file mode 100644 index 00000000000..d829ece7e7a --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/77ac824a77ff_add_cols_to_licensed_items_purchases_.py @@ -0,0 +1,38 @@ +"""add cols to licensed_items_purchases table 3 + +Revision ID: 77ac824a77ff +Revises: d68b8128c23b +Create Date: 2024-12-10 16:42:14.041313+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "77ac824a77ff" +down_revision = "d68b8128c23b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_credit_transactions", + sa.Column( + "licensed_item_purchase_id", postgresql.UUID(as_uuid=True), nullable=True + ), + ) + # ### end Alembic commands ### + op.execute( + sa.DDL( + "ALTER TYPE credittransactionclassification ADD VALUE 'DEDUCT_LICENSE_PURCHASE'" + ) + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_credit_transactions", "licensed_item_purchase_id") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/77fce87f1474_removing_resource_usage_containers_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/77fce87f1474_removing_resource_usage_containers_table.py new file mode 100644 index 00000000000..220968f4374 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/77fce87f1474_removing_resource_usage_containers_table.py @@ -0,0 +1,118 @@ +"""removing resource usage containers table + +Revision ID: 77fce87f1474 +Revises: fa26ab3555c8 +Create Date: 2023-08-26 14:08:26.154412+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "77fce87f1474" +down_revision = "fa26ab3555c8" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "ix_resource_tracker_container_product_name", + table_name="resource_tracker_container", + ) + op.drop_index( + "ix_resource_tracker_container_prometheus_last_scraped", + table_name="resource_tracker_container", + ) + op.drop_index( + "ix_resource_tracker_container_user_id", table_name="resource_tracker_container" + ) + op.drop_table("resource_tracker_container") + # ### end Alembic commands ### + sa.Enum(name="containerclassification").drop(op.get_bind(), checkfirst=False) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_container", + sa.Column("container_id", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("product_name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "service_settings_reservation_additional_info", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + sa.Column( + "container_cpu_usage_seconds_total", + postgresql.DOUBLE_PRECISION(precision=53), + autoincrement=False, + nullable=False, + ), + sa.Column( + "prometheus_created", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=False, + ), + sa.Column( + "prometheus_last_scraped", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column("project_uuid", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("node_uuid", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("node_label", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("instance", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("project_name", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("user_email", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("service_key", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("service_version", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "cpu_limit", + sa.NUMERIC(precision=3, scale=2), + autoincrement=False, + nullable=False, + ), + sa.Column("memory_limit", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column( + "classification", + postgresql.ENUM( + "DYNAMIC_SIDECAR", "USER_SERVICE", name="containerclassification" + ), + autoincrement=False, + nullable=True, + ), + sa.PrimaryKeyConstraint("container_id", name="resource_tracker_container_pkey"), + ) + op.create_index( + "ix_resource_tracker_container_user_id", + "resource_tracker_container", + ["user_id"], + unique=False, + ) + op.create_index( + "ix_resource_tracker_container_prometheus_last_scraped", + "resource_tracker_container", + ["prometheus_last_scraped"], + unique=False, + ) + op.create_index( + "ix_resource_tracker_container_product_name", + "resource_tracker_container", + ["product_name"], + unique=False, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/78f24aaf3f78_new_products_ui_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/78f24aaf3f78_new_products_ui_column.py new file mode 100644 index 00000000000..3c36394729d --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/78f24aaf3f78_new_products_ui_column.py @@ -0,0 +1,36 @@ +"""new products ui column + +Revision ID: 78f24aaf3f78 +Revises: 68777fdf9539 +Create Date: 2025-02-12 16:06:09.815111+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "78f24aaf3f78" +down_revision = "68777fdf9539" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "products", + sa.Column( + "ui", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("products", "ui") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7994074c4d98_remove_cluster_to_groups.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7994074c4d98_remove_cluster_to_groups.py new file mode 100644 index 00000000000..404e537d5fa --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7994074c4d98_remove_cluster_to_groups.py @@ -0,0 +1,85 @@ +"""remove cluster_to_groups + +Revision ID: 7994074c4d98 +Revises: 381336fa8001 +Create Date: 2025-03-17 14:19:54.675073+00:00 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "7994074c4d98" +down_revision = "381336fa8001" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("cluster_to_groups") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "cluster_to_groups", + sa.Column("cluster_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("gid", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column( + "read", + sa.BOOLEAN(), + server_default=sa.text("false"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "write", + sa.BOOLEAN(), + server_default=sa.text("false"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "delete", + sa.BOOLEAN(), + server_default=sa.text("false"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["cluster_id"], + ["clusters.id"], + name="fk_cluster_to_groups_id_clusters", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_cluster_to_groups_gid_groups", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint( + "cluster_id", "gid", name="cluster_to_groups_cluster_id_gid_key" + ), + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py new file mode 100644 index 00000000000..fe56f4c548f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7ad64e963e0f_add_timezone_comp_tasks.py @@ -0,0 +1,68 @@ +"""add_timezone_comp_tasks + +Revision ID: 7ad64e963e0f +Revises: b7f23f6d8aa2 +Create Date: 2024-11-27 22:28:51.898433+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "7ad64e963e0f" +down_revision = "b7f23f6d8aa2" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "comp_tasks", + "submit", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + op.alter_column( + "comp_tasks", + "start", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + op.alter_column( + "comp_tasks", + "end", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "comp_tasks", + "end", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "comp_tasks", + "start", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "comp_tasks", + "submit", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7c552b906888_merge_61fa093c21bb_e108d9f673fe.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7c552b906888_merge_61fa093c21bb_e108d9f673fe.py new file mode 100644 index 00000000000..eac8ebb11c3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7c552b906888_merge_61fa093c21bb_e108d9f673fe.py @@ -0,0 +1,24 @@ +"""merge 61fa093c21bb e108d9f673fe + +Revision ID: 7c552b906888 +Revises: 61fa093c21bb, e108d9f673fe +Create Date: 2023-09-25 12:00:46.168477+00:00 + +""" + + +# revision identifiers, used by Alembic. +revision = "7c552b906888" +down_revision = ("61fa093c21bb", "e108d9f673fe") +branch_labels = None +depends_on = None + + +def upgrade(): + """Alembic left this function empty""" + pass + + +def downgrade(): + """Alembic left this function empty""" + pass diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/7d1c6425a51d_modify_licensed_items_db.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7d1c6425a51d_modify_licensed_items_db.py new file mode 100644 index 00000000000..4d720f20e18 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/7d1c6425a51d_modify_licensed_items_db.py @@ -0,0 +1,34 @@ +"""modify licensed items DB + +Revision ID: 7d1c6425a51d +Revises: 4f31760a63ba +Create Date: 2025-01-30 17:32:31.969343+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "7d1c6425a51d" +down_revision = "4f31760a63ba" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_items", sa.Column("display_name", sa.String(), nullable=False) + ) + op.drop_column("licensed_items", "license_key") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_items", + sa.Column("license_key", sa.VARCHAR(), autoincrement=False, nullable=True), + ) + op.drop_column("licensed_items", "display_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/8403acca8759_new_users_privacy_hide_username_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8403acca8759_new_users_privacy_hide_username_column.py new file mode 100644 index 00000000000..91e5a72207f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8403acca8759_new_users_privacy_hide_username_column.py @@ -0,0 +1,36 @@ +"""new users.privacy_hide_username column + +Revision ID: 8403acca8759 +Revises: f7f3c835f38a +Create Date: 2025-03-20 14:08:48.321587+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8403acca8759" +down_revision = "f7f3c835f38a" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "users", + sa.Column( + "privacy_hide_username", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("users", "privacy_hide_username") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/8a742f3efdd9_new_tags_priority_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8a742f3efdd9_new_tags_priority_column.py new file mode 100644 index 00000000000..abede70a281 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8a742f3efdd9_new_tags_priority_column.py @@ -0,0 +1,27 @@ +"""new tags priority column + +Revision ID: 8a742f3efdd9 +Revises: 10729e07000d +Create Date: 2024-10-02 15:23:27.446241+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8a742f3efdd9" +down_revision = "10729e07000d" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("tags", sa.Column("priority", sa.Integer(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("tags", "priority") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/8bfe65a5e294_add_cancellation_mark.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8bfe65a5e294_add_cancellation_mark.py new file mode 100644 index 00000000000..ecbe20b40e8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8bfe65a5e294_add_cancellation_mark.py @@ -0,0 +1,29 @@ +"""add cancellation mark + +Revision ID: 8bfe65a5e294 +Revises: 5ad02358751a +Create Date: 2024-11-08 14:40:59.266181+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8bfe65a5e294" +down_revision = "5ad02358751a" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_runs", sa.Column("cancelled", sa.DateTime(timezone=True), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_runs", "cancelled") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/8e1f83486be7_enhance_projects_tags_for_rut.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8e1f83486be7_enhance_projects_tags_for_rut.py new file mode 100644 index 00000000000..6c0d6608185 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8e1f83486be7_enhance_projects_tags_for_rut.py @@ -0,0 +1,90 @@ +"""enhance projects_tags for RUT + +Revision ID: 8e1f83486be7 +Revises: 8bfe65a5e294 +Create Date: 2024-11-15 09:12:57.789183+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8e1f83486be7" +down_revision = "8bfe65a5e294" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "projects_tags", sa.Column("project_uuid_for_rut", sa.String(), nullable=True) + ) + + # Migrate + op.execute( + sa.DDL( + """ + UPDATE projects_tags + SET project_uuid_for_rut = projects.uuid + FROM projects + WHERE projects_tags.project_id = projects.id; + """ + ) + ) + + op.alter_column( + "projects_tags", + "project_uuid_for_rut", + existing_type=sa.String(), + nullable=False, + ) + op.alter_column( + "projects_tags", "project_id", existing_type=sa.BIGINT(), nullable=True + ) + op.drop_constraint( + "study_tags_study_id_tag_id_key", "projects_tags", type_="unique" + ) + op.create_unique_constraint( + "project_tags_project_uuid_unique", + "projects_tags", + ["project_uuid_for_rut", "tag_id"], + ) + op.drop_constraint("study_tags_study_id_fkey", "projects_tags", type_="foreignkey") + op.create_foreign_key( + "project_tags_project_id_fkey", + "projects_tags", + "projects", + ["project_id"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "project_tags_project_id_fkey", "projects_tags", type_="foreignkey" + ) + op.create_foreign_key( + "study_tags_study_id_fkey", + "projects_tags", + "projects", + ["project_id"], + ["id"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.drop_constraint( + "project_tags_project_uuid_unique", "projects_tags", type_="unique" + ) + op.create_unique_constraint( + "study_tags_study_id_tag_id_key", "projects_tags", ["project_id", "tag_id"] + ) + op.alter_column( + "projects_tags", "project_id", existing_type=sa.BIGINT(), nullable=False + ) + op.drop_column("projects_tags", "project_uuid_for_rut") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/8fa15c4c3977_add_cols_to_licensed_items_purchases_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8fa15c4c3977_add_cols_to_licensed_items_purchases_.py new file mode 100644 index 00000000000..6f425116490 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/8fa15c4c3977_add_cols_to_licensed_items_purchases_.py @@ -0,0 +1,45 @@ +"""add cols to licensed_items_purchases table + +Revision ID: 8fa15c4c3977 +Revises: 5e27063c3ac9 +Create Date: 2024-12-10 06:42:23.319239+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "8fa15c4c3977" +down_revision = "5e27063c3ac9" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("wallet_name", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("pricing_unit_cost_id", sa.BigInteger(), nullable=False), + ) + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("pricing_unit_cost", sa.Numeric(scale=2), nullable=True), + ) + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("num_of_seats", sa.SmallInteger(), nullable=False), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_licensed_items_purchases", "num_of_seats") + op.drop_column("resource_tracker_licensed_items_purchases", "pricing_unit_cost") + op.drop_column("resource_tracker_licensed_items_purchases", "pricing_unit_cost_id") + op.drop_column("resource_tracker_licensed_items_purchases", "wallet_name") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/926c3eb2254e_new_description_ui_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/926c3eb2254e_new_description_ui_column.py new file mode 100644 index 00000000000..940f9213ed2 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/926c3eb2254e_new_description_ui_column.py @@ -0,0 +1,35 @@ +"""new description_ui column + +Revision ID: 926c3eb2254e +Revises: feca36c8e18f +Create Date: 2024-09-02 21:25:06.042365+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "926c3eb2254e" +down_revision = "feca36c8e18f" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "services_meta_data", + sa.Column( + "description_ui", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("services_meta_data", "description_ui") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/95d0932aaa83_add_invoice_pdf_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/95d0932aaa83_add_invoice_pdf_column.py new file mode 100644 index 00000000000..d0fb1767ae1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/95d0932aaa83_add_invoice_pdf_column.py @@ -0,0 +1,30 @@ +"""add invoice pdf column + +Revision ID: 95d0932aaa83 +Revises: 4a0f4efe8c86 +Create Date: 2024-04-17 11:36:46.653089+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "95d0932aaa83" +down_revision = "4a0f4efe8c86" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "payments_transactions", + sa.Column("invoice_pdf_url", sa.String(), nullable=True), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("payments_transactions", "invoice_pdf_url") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b33ef4c690a_adding_wallets_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b33ef4c690a_adding_wallets_tables.py new file mode 100644 index 00000000000..bb5ebc1050f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b33ef4c690a_adding_wallets_tables.py @@ -0,0 +1,173 @@ +"""adding wallets tables + +Revision ID: 9b33ef4c690a +Revises: afc752d10a6c +Create Date: 2023-07-31 11:40:38.764685+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "9b33ef4c690a" +down_revision = "afc752d10a6c" +branch_labels = None +depends_on = None + + +# ------------------------ TRIGGERS +wallet_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS wallet_modification on wallets; +CREATE TRIGGER wallet_modification +AFTER INSERT ON wallets + FOR EACH ROW + EXECUTE PROCEDURE set_wallet_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_wallet_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_wallet_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO "wallet_to_groups" ("gid", "wallet_id", "read", "write", "delete") VALUES (NEW.owner, NEW.wallet_id, TRUE, TRUE, TRUE); + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "wallets", + sa.Column("wallet_id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("description", sa.String(), nullable=True), + sa.Column("owner", sa.BigInteger(), nullable=False), + sa.Column("thumbnail", sa.String(), nullable=True), + sa.Column( + "status", sa.Enum("ACTIVE", "INACTIVE", name="walletstatus"), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["owner"], + ["groups.gid"], + name="fk_wallets_gid_groups", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("wallet_id"), + ) + op.create_table( + "wallet_to_groups", + sa.Column("wallet_id", sa.BigInteger(), nullable=True), + sa.Column("gid", sa.BigInteger(), nullable=True), + sa.Column( + "read", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "write", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "delete", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_wallet_to_groups_gid_groups", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["wallet_id"], + ["wallets.wallet_id"], + name="fk_wallet_to_groups_id_wallets", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("wallet_id", "gid"), + ) + op.create_table( + "projects_to_wallet", + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_comments_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["wallet_id"], + ["wallets.wallet_id"], + name="fk_projects_wallet_wallets_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("project_uuid"), + ) + op.create_index( + op.f("ix_projects_to_wallet_project_uuid"), + "projects_to_wallet", + ["project_uuid"], + unique=False, + ) + # ### end Alembic commands ### + op.execute(assign_wallet_access_rights_to_owner_group_procedure) + op.execute(wallet_trigger) + + +def downgrade(): + op.execute(wallet_trigger) + op.execute(assign_wallet_access_rights_to_owner_group_procedure) + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_projects_to_wallet_project_uuid"), table_name="projects_to_wallet" + ) + op.drop_table("projects_to_wallet") + op.drop_table("wallet_to_groups") + op.drop_table("wallets") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b97b12cfe47_set_cluster_id_to_null.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b97b12cfe47_set_cluster_id_to_null.py new file mode 100644 index 00000000000..0dc5f4ce341 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9b97b12cfe47_set_cluster_id_to_null.py @@ -0,0 +1,47 @@ +"""set cluster id to null + +Revision ID: 9b97b12cfe47 +Revises: 9014ae5fd6e5 +Create Date: 2023-03-28 10:20:20.670233+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "9b97b12cfe47" +down_revision = "9014ae5fd6e5" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_comp_runs_cluster_id_clusters", "comp_runs", type_="foreignkey" + ) + op.create_foreign_key( + "fk_comp_runs_cluster_id_clusters", + "comp_runs", + "clusters", + ["cluster_id"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_comp_runs_cluster_id_clusters", "comp_runs", type_="foreignkey" + ) + op.create_foreign_key( + "fk_comp_runs_cluster_id_clusters", + "comp_runs", + "clusters", + ["cluster_id"], + ["id"], + onupdate="CASCADE", + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/9f381dcb9b95_add_workspaces_and_folders_v2.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9f381dcb9b95_add_workspaces_and_folders_v2.py new file mode 100644 index 00000000000..7302c3e22b1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/9f381dcb9b95_add_workspaces_and_folders_v2.py @@ -0,0 +1,238 @@ +"""add_workspaces_and_folders_v2 + +Revision ID: 9f381dcb9b95 +Revises: 926c3eb2254e +Create Date: 2024-09-03 05:49:16.581965+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "9f381dcb9b95" +down_revision = "926c3eb2254e" +branch_labels = None +depends_on = None + + +# ------------------------ TRIGGERS +new_workspace_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS workspace_modification on workspaces; +CREATE TRIGGER workspace_modification +AFTER INSERT ON workspaces + FOR EACH ROW + EXECUTE PROCEDURE set_workspace_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_workspace_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_workspace_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO "workspaces_access_rights" ("gid", "workspace_id", "read", "write", "delete") VALUES (NEW.owner_primary_gid, NEW.workspace_id, TRUE, TRUE, TRUE); + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "workspaces", + sa.Column("workspace_id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("description", sa.String(), nullable=True), + sa.Column("thumbnail", sa.String(), nullable=True), + sa.Column("owner_primary_gid", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["owner_primary_gid"], + ["groups.gid"], + name="fk_workspaces_gid_groups", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("workspace_id"), + ) + op.create_table( + "folders_v2", + sa.Column("folder_id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("parent_folder_id", sa.BigInteger(), nullable=True), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=True), + sa.Column("workspace_id", sa.BigInteger(), nullable=True), + sa.Column("created_by_gid", sa.BigInteger(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["created_by_gid"], + ["groups.gid"], + name="fk_new_folders_to_groups_gid", + ondelete="SET NULL", + ), + sa.ForeignKeyConstraint( + ["parent_folder_id"], + ["folders_v2.folder_id"], + name="fk_new_folders_to_folders_id", + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_new_folders_to_products_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + name="fk_folders_to_user_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspaces.workspace_id"], + name="fk_folders_to_workspace_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("folder_id"), + ) + op.create_table( + "workspaces_access_rights", + sa.Column("workspace_id", sa.BigInteger(), nullable=True), + sa.Column("gid", sa.BigInteger(), nullable=True), + sa.Column( + "read", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "write", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "delete", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_workspaces_access_rights_gid_groups", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspaces.workspace_id"], + name="fk_workspaces_access_rights_id_workspaces", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("workspace_id", "gid"), + ) + op.create_table( + "projects_to_folders", + sa.Column("project_uuid", sa.String(), nullable=True), + sa.Column("folder_id", sa.BigInteger(), nullable=True), + sa.Column("user_id", sa.BigInteger(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["folder_id"], + ["folders_v2.folder_id"], + name="fk_projects_to_folders_to_folders_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_to_folders_to_projects_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + name="fk_projects_to_folders_to_user_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("project_uuid", "folder_id", "user_id"), + ) + op.add_column("projects", sa.Column("workspace_id", sa.BigInteger(), nullable=True)) + op.create_foreign_key( + "fk_projects_to_workspaces_id", + "projects", + "workspaces", + ["workspace_id"], + ["workspace_id"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + # ### end Alembic commands ### + op.execute(assign_workspace_access_rights_to_owner_group_procedure) + op.execute(new_workspace_trigger) + + +def downgrade(): + op.execute(new_workspace_trigger) + op.execute(assign_workspace_access_rights_to_owner_group_procedure) + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint("fk_projects_to_workspaces_id", "projects", type_="foreignkey") + op.drop_column("projects", "workspace_id") + op.drop_table("projects_to_folders") + op.drop_table("workspaces_access_rights") + op.drop_table("folders_v2") + op.drop_table("workspaces") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a3a58471b0f1_add_credit_transaction_classification_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a3a58471b0f1_add_credit_transaction_classification_.py new file mode 100644 index 00000000000..cef7f00e6bc --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a3a58471b0f1_add_credit_transaction_classification_.py @@ -0,0 +1,35 @@ +"""add credit transaction classification enums + +Revision ID: a3a58471b0f1 +Revises: f19905923355 +Create Date: 2025-01-14 13:44:05.025647+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a3a58471b0f1" +down_revision = "f19905923355" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute(sa.DDL("ALTER TYPE credittransactionstatus ADD VALUE 'IN_DEBT'")) + op.execute( + sa.DDL( + "ALTER TYPE credittransactionclassification ADD VALUE 'ADD_WALLET_EXCHANGE'" + ) + ) + op.execute( + sa.DDL( + "ALTER TYPE credittransactionclassification ADD VALUE 'DEDUCT_WALLET_EXCHANGE'" + ) + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + pass + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a53c3c153bc8_modify_licensed_items_resources_db.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a53c3c153bc8_modify_licensed_items_resources_db.py new file mode 100644 index 00000000000..3f07cd80eba --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a53c3c153bc8_modify_licensed_items_resources_db.py @@ -0,0 +1,159 @@ +"""modify licensed items/resources DB + +Revision ID: a53c3c153bc8 +Revises: 78f24aaf3f78 +Create Date: 2025-02-13 10:13:32.817207+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "a53c3c153bc8" +down_revision = "78f24aaf3f78" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "licensed_item_to_resource", + sa.Column("licensed_item_id", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column( + "licensed_resource_id", postgresql.UUID(as_uuid=True), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["licensed_item_id"], + ["licensed_items.licensed_item_id"], + name="fk_licensed_item_to_resource_licensed_item_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["licensed_resource_id"], + ["licensed_resources.licensed_resource_id"], + name="fk_licensed_item_to_resource_licensed_resource_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + ) + op.add_column("licensed_items", sa.Column("key", sa.String(), nullable=False)) + op.add_column("licensed_items", sa.Column("version", sa.String(), nullable=False)) + op.alter_column( + "licensed_items", "pricing_plan_id", existing_type=sa.BIGINT(), nullable=False + ) + op.alter_column( + "licensed_items", "product_name", existing_type=sa.VARCHAR(), nullable=False + ) + op.drop_constraint( + "uq_licensed_resource_name_type", "licensed_items", type_="unique" + ) + op.create_index( + "idx_licensed_items_key_version", + "licensed_items", + ["key", "version"], + unique=True, + ) + op.drop_column("licensed_items", "licensed_resource_data") + op.drop_column("licensed_items", "trashed") + op.drop_column("licensed_items", "licensed_resource_name") + op.add_column( + "resource_tracker_licensed_items_checkouts", + sa.Column("key", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_licensed_items_checkouts", + sa.Column("version", sa.String(), nullable=False), + ) + op.create_index( + "idx_licensed_items_checkouts_key_version", + "resource_tracker_licensed_items_checkouts", + ["key", "version"], + unique=False, + ) + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("key", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("version", sa.String(), nullable=False), + ) + op.create_index( + "idx_licensed_items_purchases_key_version", + "resource_tracker_licensed_items_purchases", + ["key", "version"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "idx_licensed_items_purchases_key_version", + table_name="resource_tracker_licensed_items_purchases", + ) + op.drop_column("resource_tracker_licensed_items_purchases", "version") + op.drop_column("resource_tracker_licensed_items_purchases", "key") + op.drop_index( + "idx_licensed_items_checkouts_key_version", + table_name="resource_tracker_licensed_items_checkouts", + ) + op.drop_column("resource_tracker_licensed_items_checkouts", "version") + op.drop_column("resource_tracker_licensed_items_checkouts", "key") + op.add_column( + "licensed_items", + sa.Column( + "licensed_resource_name", sa.VARCHAR(), autoincrement=False, nullable=False + ), + ) + op.add_column( + "licensed_items", + sa.Column( + "trashed", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=True, + comment="The date and time when the licensed_item was marked as trashed. Null if the licensed_item has not been trashed [default].", + ), + ) + op.add_column( + "licensed_items", + sa.Column( + "licensed_resource_data", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=True, + ), + ) + op.drop_index("idx_licensed_items_key_version", table_name="licensed_items") + op.create_unique_constraint( + "uq_licensed_resource_name_type", + "licensed_items", + ["licensed_resource_name", "licensed_resource_type"], + ) + op.alter_column( + "licensed_items", "product_name", existing_type=sa.VARCHAR(), nullable=True + ) + op.alter_column( + "licensed_items", "pricing_plan_id", existing_type=sa.BIGINT(), nullable=True + ) + op.drop_column("licensed_items", "version") + op.drop_column("licensed_items", "key") + op.drop_table("licensed_item_to_resource") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8762d5d43ae_add_is_directory_to_file_meta_data_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8762d5d43ae_add_is_directory_to_file_meta_data_table.py new file mode 100644 index 00000000000..8670153b464 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8762d5d43ae_add_is_directory_to_file_meta_data_table.py @@ -0,0 +1,35 @@ +"""add_is_directory_to_file_meta_data_table + +Revision ID: a8762d5d43ae +Revises: f3285aff5e84 +Create Date: 2023-07-10 14:00:48.388395+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a8762d5d43ae" +down_revision = "f3285aff5e84" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "file_meta_data", + sa.Column( + "is_directory", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("file_meta_data", "is_directory") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8d336ca9379_idx_licensed_items_key_version_product.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8d336ca9379_idx_licensed_items_key_version_product.py new file mode 100644 index 00000000000..830b8221ec5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8d336ca9379_idx_licensed_items_key_version_product.py @@ -0,0 +1,38 @@ +"""idx licensed items - key/version/product + +Revision ID: a8d336ca9379 +Revises: 5e43b5ec7604 +Create Date: 2025-02-21 14:29:42.575724+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a8d336ca9379" +down_revision = "5e43b5ec7604" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("idx_licensed_items_key_version", table_name="licensed_items") + op.create_index( + "idx_licensed_items_key_version_product", + "licensed_items", + ["key", "version", "product_name"], + unique=True, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("idx_licensed_items_key_version_product", table_name="licensed_items") + op.create_index( + "idx_licensed_items_key_version", + "licensed_items", + ["key", "version"], + unique=True, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8f0bacbbaef_product_issue_cols_nullable.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8f0bacbbaef_product_issue_cols_nullable.py index ee73e5fea4d..d8c6f9e747b 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8f0bacbbaef_product_issue_cols_nullable.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/a8f0bacbbaef_product_issue_cols_nullable.py @@ -5,6 +5,7 @@ Create Date: 2022-08-24 13:33:30.104287+00:00 """ + import sqlalchemy as sa from alembic import op @@ -22,7 +23,7 @@ def upgrade(): "issues_login_url", existing_type=sa.VARCHAR(), nullable=True, - existing_server_default=sa.text( + existing_server_default=sa.text( # type: ignore[arg-type] "'https://github.com/ITISFoundation/osparc-simcore/issues'::character varying" ), ) @@ -31,7 +32,7 @@ def upgrade(): "issues_new_url", existing_type=sa.VARCHAR(), nullable=True, - existing_server_default=sa.text( + existing_server_default=sa.text( # type: ignore[arg-type] "'https://github.com/ITISFoundation/osparc-simcore/issues/new'::character varying" ), ) @@ -45,7 +46,7 @@ def downgrade(): "issues_new_url", existing_type=sa.VARCHAR(), nullable=False, - existing_server_default=sa.text( + existing_server_default=sa.text( # type: ignore[arg-type] "'https://github.com/ITISFoundation/osparc-simcore/issues/new'::character varying" ), ) @@ -54,7 +55,7 @@ def downgrade(): "issues_login_url", existing_type=sa.VARCHAR(), nullable=False, - existing_server_default=sa.text( + existing_server_default=sa.text( # type: ignore[arg-type] "'https://github.com/ITISFoundation/osparc-simcore/issues'::character varying" ), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/aa6da21a0055_rename_usages_to_checkouts.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/aa6da21a0055_rename_usages_to_checkouts.py new file mode 100644 index 00000000000..882be09dd2c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/aa6da21a0055_rename_usages_to_checkouts.py @@ -0,0 +1,134 @@ +"""rename usages to checkouts + +Revision ID: aa6da21a0055 +Revises: 52a0e8148dd5 +Create Date: 2024-12-17 13:47:09.304574+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "aa6da21a0055" +down_revision = "52a0e8148dd5" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_licensed_items_checkouts", + sa.Column( + "licensed_item_checkout_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("licensed_item_id", postgresql.UUID(as_uuid=True), nullable=True), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("user_email", sa.String(), nullable=True), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("service_run_id", sa.String(), nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("stopped_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("num_of_seats", sa.SmallInteger(), nullable=False), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["product_name", "service_run_id"], + [ + "resource_tracker_service_runs.product_name", + "resource_tracker_service_runs.service_run_id", + ], + name="resource_tracker_license_checkouts_service_run_id_fkey", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("licensed_item_checkout_id"), + ) + op.create_index( + op.f("ix_resource_tracker_licensed_items_checkouts_wallet_id"), + "resource_tracker_licensed_items_checkouts", + ["wallet_id"], + unique=False, + ) + op.drop_index( + "ix_resource_tracker_licensed_items_usage_wallet_id", + table_name="resource_tracker_licensed_items_usage", + ) + op.drop_table("resource_tracker_licensed_items_usage") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_licensed_items_usage", + sa.Column( + "licensed_item_usage_id", + postgresql.UUID(), + server_default=sa.text("gen_random_uuid()"), + autoincrement=False, + nullable=False, + ), + sa.Column("wallet_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("user_email", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("product_name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("service_run_id", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "started_at", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=False, + ), + sa.Column( + "stopped_at", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=True, + ), + sa.Column("num_of_seats", sa.SMALLINT(), autoincrement=False, nullable=False), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "licensed_item_id", postgresql.UUID(), autoincrement=False, nullable=False + ), + sa.ForeignKeyConstraint( + ["product_name", "service_run_id"], + [ + "resource_tracker_service_runs.product_name", + "resource_tracker_service_runs.service_run_id", + ], + name="resource_tracker_license_checkouts_service_run_id_fkey", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint( + "licensed_item_usage_id", name="resource_tracker_licensed_items_usage_pkey" + ), + ) + op.create_index( + "ix_resource_tracker_licensed_items_usage_wallet_id", + "resource_tracker_licensed_items_usage", + ["wallet_id"], + unique=False, + ) + op.drop_index( + op.f("ix_resource_tracker_licensed_items_checkouts_wallet_id"), + table_name="resource_tracker_licensed_items_checkouts", + ) + op.drop_table("resource_tracker_licensed_items_checkouts") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/add0afaaf728_migrate_projects_workbench_step1.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/add0afaaf728_migrate_projects_workbench_step1.py new file mode 100644 index 00000000000..d0f01968ebe --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/add0afaaf728_migrate_projects_workbench_step1.py @@ -0,0 +1,58 @@ +"""migrate projects workbench step1 + +Revision ID: add0afaaf728 +Revises: 6e91067932f2 +Create Date: 2023-06-22 14:45:38.827559+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "add0afaaf728" +down_revision = "6e91067932f2" +branch_labels = None +depends_on = None + + +def upgrade(): + projects_table = sa.table( + "projects", + sa.column("uuid"), + sa.column("workbench"), + sa.column("creation_date"), + sa.column("last_change_date"), + ) + projects_nodes_table = sa.table( + "projects_nodes", + sa.column("project_uuid"), + sa.column("node_id"), + sa.column("created"), + sa.column("modified"), + ) + + connection = op.get_bind() + + for project_uuid, workbench, creation_date, last_change_date in connection.execute( + projects_table.select() + ): + for node_id in workbench.keys(): + connection.execute( + projects_nodes_table.insert().values( + project_uuid=project_uuid, + node_id=node_id, + created=creation_date, + modified=last_change_date, + ) + ) + + +def downgrade(): + projects_nodes_table = sa.table( + "projects_nodes", + sa.column("project_uuid"), + sa.column("node_id"), + ) + connection = op.get_bind() + + connection.execute(projects_nodes_table.delete()) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ae72826e75fc_resource_tracker_pricing_plan_to_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ae72826e75fc_resource_tracker_pricing_plan_to_.py new file mode 100644 index 00000000000..1e5eda69617 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ae72826e75fc_resource_tracker_pricing_plan_to_.py @@ -0,0 +1,34 @@ +"""resource tracker pricing plan to service remo unique constrain + +Revision ID: ae72826e75fc +Revises: e3334cced752 +Create Date: 2023-09-18 17:25:32.499378+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "ae72826e75fc" +down_revision = "e3334cced752" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "rut_pricing_plan_to_service__service_product_unique_key", + "resource_tracker_pricing_plan_to_service", + type_="unique", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_unique_constraint( + "rut_pricing_plan_to_service__service_product_unique_key", + "resource_tracker_pricing_plan_to_service", + ["service_key", "service_version", "product"], + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/af5de00bf4cf_new_projects_comments_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/af5de00bf4cf_new_projects_comments_table.py new file mode 100644 index 00000000000..13f9bf120bd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/af5de00bf4cf_new_projects_comments_table.py @@ -0,0 +1,62 @@ +"""new project comments table + +Revision ID: af5de00bf4cf +Revises: 71ea254837b0 +Create Date: 2023-06-19 08:41:09.835411+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "af5de00bf4cf" +down_revision = "71ea254837b0" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "projects_comments", + sa.Column("comment_id", sa.BigInteger(), autoincrement=True, nullable=False), + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("contents", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_comments_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("comment_id", name="projects_comments_pkey"), + ) + op.create_index( + op.f("ix_projects_comments_project_uuid"), + "projects_comments", + ["project_uuid"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_projects_comments_project_uuid"), table_name="projects_comments" + ) + op.drop_table("projects_comments") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/afc752d10a6c_add_waiting_for_resources_enum_field.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/afc752d10a6c_add_waiting_for_resources_enum_field.py new file mode 100644 index 00000000000..da7aff96689 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/afc752d10a6c_add_waiting_for_resources_enum_field.py @@ -0,0 +1,47 @@ +"""add waiting for resources enum field + +Revision ID: afc752d10a6c +Revises: ef931143b7cd +Create Date: 2023-07-26 13:20:10.928108+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "afc752d10a6c" +down_revision = "ef931143b7cd" +branch_labels = None +depends_on = None + + +def upgrade(): + # Step 1: Check if the new value already exists before attempting to add it + enum_type_name = "statetype" + new_value = "WAITING_FOR_RESOURCES" + + conn = op.get_bind() + result = conn.execute( + sa.DDL( + f"SELECT * FROM pg_enum WHERE enumtypid = (SELECT oid FROM pg_type WHERE typname = '{enum_type_name}') AND enumlabel = '{new_value}'" + ) + ) + value_exists = result.fetchone() is not None + + if not value_exists: + # Step 1: Use ALTER TYPE to add the new value to the existing enum + op.execute(sa.DDL(f"ALTER TYPE {enum_type_name} ADD VALUE '{new_value}'")) + + +def downgrade(): + # no need to downgrade the enum type, postgres does not allow to just remove a type + # instead the tables that use it are updated + op.execute( + sa.DDL( + """ +UPDATE comp_tasks SET state = 'PENDING' WHERE state = 'WAITING_FOR_RESOURCES'; +UPDATE comp_pipeline SET state = 'PENDING' WHERE state = 'WAITING_FOR_RESOURCES'; +UPDATE comp_runs SET result = 'PENDING' WHERE result = 'WAITING_FOR_RESOURCES'; + """ + ) + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b0c988e3f348_add_index_to_api_key_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b0c988e3f348_add_index_to_api_key_column.py new file mode 100644 index 00000000000..378d595c071 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b0c988e3f348_add_index_to_api_key_column.py @@ -0,0 +1,23 @@ +"""add index to api_key column + +Revision ID: b0c988e3f348 +Revises: f65f7786cd4b +Create Date: 2025-03-13 08:53:05.722855+00:00 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "b0c988e3f348" +down_revision = "f65f7786cd4b" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_index(op.f("ix_api_keys_api_key"), "api_keys", ["api_key"], unique=False) + + +def downgrade(): + op.drop_index(op.f("ix_api_keys_api_key"), table_name="api_keys") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b102946c8134_changes_in_pricing_plans.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b102946c8134_changes_in_pricing_plans.py new file mode 100644 index 00000000000..607bc364bf8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b102946c8134_changes_in_pricing_plans.py @@ -0,0 +1,294 @@ +"""changes in pricing plans + +Revision ID: b102946c8134 +Revises: 6e9f34338072 +Create Date: 2023-10-01 12:50:08.671566+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "b102946c8134" +down_revision = "6e9f34338072" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + + # CREATE RESOURCE_TRACKER_PRICING_UNIT_COSTS + op.create_table( + "resource_tracker_pricing_unit_costs", + sa.Column("pricing_unit_cost_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_plan_key", sa.String(), nullable=False), + sa.Column("pricing_unit_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_unit_name", sa.String(), nullable=False), + sa.Column("cost_per_unit", sa.Numeric(scale=2), nullable=False), + sa.Column("valid_from", sa.DateTime(timezone=True), nullable=False), + sa.Column("valid_to", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "specific_info", postgresql.JSONB(astext_type=sa.Text()), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column("comment", sa.String(), nullable=True), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("pricing_unit_cost_id"), + ) + op.create_index( + op.f("ix_resource_tracker_pricing_unit_costs_pricing_plan_id"), + "resource_tracker_pricing_unit_costs", + ["pricing_plan_id"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_pricing_unit_costs_pricing_unit_id"), + "resource_tracker_pricing_unit_costs", + ["pricing_unit_id"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_pricing_unit_costs_valid_to"), + "resource_tracker_pricing_unit_costs", + ["valid_to"], + unique=False, + ) + + # CREATE RESOURCE_TRACKER_PRICING_UNITS + op.create_table( + "resource_tracker_pricing_units", + sa.Column("pricing_unit_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("unit_name", sa.String(), nullable=False), + sa.Column("default", sa.Boolean(), nullable=False), + sa.Column( + "specific_info", postgresql.JSONB(astext_type=sa.Text()), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["pricing_plan_id"], + ["resource_tracker_pricing_plans.pricing_plan_id"], + name="fk_resource_tracker_pricing_units_pricing_plan_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("pricing_unit_id"), + sa.UniqueConstraint( + "pricing_plan_id", "unit_name", name="pricing_plan_and_unit_constrain_key" + ), + ) + op.create_index( + op.f("ix_resource_tracker_pricing_units_pricing_plan_id"), + "resource_tracker_pricing_units", + ["pricing_plan_id"], + unique=False, + ) + + # DROP RESOURCE_TRACKER_PRICING_DETAILS + op.drop_index( + "ix_resource_tracker_pricing_details_pricing_plan_id", + table_name="resource_tracker_pricing_details", + ) + op.drop_index( + "ix_resource_tracker_pricing_details_valid_to", + table_name="resource_tracker_pricing_details", + ) + op.drop_table("resource_tracker_pricing_details") + + # MODIFY RESOURCE_TRACKER_CREDIT_TRANSACTIONS + op.add_column( + "resource_tracker_credit_transactions", + sa.Column("pricing_unit_id", sa.BigInteger(), nullable=True), + ) + op.add_column( + "resource_tracker_credit_transactions", + sa.Column("pricing_unit_cost_id", sa.BigInteger(), nullable=True), + ) + op.drop_column("resource_tracker_credit_transactions", "pricing_detail_id") + + # MODIFY RESOURCE_TRACKER_PRICING_PLAN_TO_SERVICE + op.add_column( + "resource_tracker_pricing_plan_to_service", + sa.Column("service_default_plan", sa.Boolean(), nullable=False), + ) + op.drop_column("resource_tracker_pricing_plan_to_service", "product") + + # MODIFY RESOURCE_TRACKER_PRICING_PLANS + op.add_column( + "resource_tracker_pricing_plans", + sa.Column("display_name", sa.String(), nullable=False), + ) + op.add_column( + "resource_tracker_pricing_plans", + sa.Column("pricing_plan_key", sa.String(), nullable=False), + ) + op.create_unique_constraint( + "pricing_plans_pricing_plan_key", + "resource_tracker_pricing_plans", + ["product_name", "pricing_plan_key"], + ) + op.drop_column("resource_tracker_pricing_plans", "name") + + # MODIFY RESOURCE_TRACKER_SERVICE_RUNS + op.add_column( + "resource_tracker_service_runs", + sa.Column("pricing_unit_id", sa.BigInteger(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("pricing_unit_cost_id", sa.BigInteger(), nullable=True), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("pricing_unit_cost", sa.Numeric(scale=2), nullable=True), + ) + op.drop_column("resource_tracker_service_runs", "pricing_detail_id") + op.drop_column("resource_tracker_service_runs", "pricing_detail_cost_per_unit") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_service_runs", + sa.Column( + "pricing_detail_cost_per_unit", + sa.NUMERIC(), + autoincrement=False, + nullable=True, + ), + ) + op.add_column( + "resource_tracker_service_runs", + sa.Column("pricing_detail_id", sa.BIGINT(), autoincrement=False, nullable=True), + ) + op.drop_column("resource_tracker_service_runs", "pricing_unit_cost") + op.drop_column("resource_tracker_service_runs", "pricing_unit_cost_id") + op.drop_column("resource_tracker_service_runs", "pricing_unit_id") + op.add_column( + "resource_tracker_pricing_plans", + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=False), + ) + op.drop_constraint( + "pricing_plans_pricing_plan_key", + "resource_tracker_pricing_plans", + type_="unique", + ) + op.drop_column("resource_tracker_pricing_plans", "pricing_plan_key") + op.drop_column("resource_tracker_pricing_plans", "display_name") + op.add_column( + "resource_tracker_pricing_plan_to_service", + sa.Column("product", sa.VARCHAR(), autoincrement=False, nullable=False), + ) + op.drop_column("resource_tracker_pricing_plan_to_service", "service_default_plan") + op.add_column( + "resource_tracker_credit_transactions", + sa.Column("pricing_detail_id", sa.BIGINT(), autoincrement=False, nullable=True), + ) + op.drop_column("resource_tracker_credit_transactions", "pricing_unit_cost_id") + op.drop_column("resource_tracker_credit_transactions", "pricing_unit_id") + op.create_table( + "resource_tracker_pricing_details", + sa.Column("pricing_detail_id", sa.BIGINT(), autoincrement=True, nullable=False), + sa.Column("pricing_plan_id", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("unit_name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "valid_from", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=False, + ), + sa.Column( + "valid_to", + postgresql.TIMESTAMP(timezone=True), + autoincrement=False, + nullable=True, + ), + sa.Column( + "specific_info", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column("simcore_default", sa.BOOLEAN(), autoincrement=False, nullable=False), + sa.Column("cost_per_unit", sa.NUMERIC(), autoincrement=False, nullable=False), + sa.ForeignKeyConstraint( + ["pricing_plan_id"], + ["resource_tracker_pricing_plans.pricing_plan_id"], + name="fk_resource_tracker_pricing_details_pricing_plan_id", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint( + "pricing_detail_id", name="resource_tracker_pricing_details_pkey" + ), + ) + op.create_index( + "ix_resource_tracker_pricing_details_valid_to", + "resource_tracker_pricing_details", + ["valid_to"], + unique=False, + ) + op.create_index( + "ix_resource_tracker_pricing_details_pricing_plan_id", + "resource_tracker_pricing_details", + ["pricing_plan_id"], + unique=False, + ) + op.drop_index( + op.f("ix_resource_tracker_pricing_units_pricing_plan_id"), + table_name="resource_tracker_pricing_units", + ) + op.drop_table("resource_tracker_pricing_units") + op.drop_index( + op.f("ix_resource_tracker_pricing_unit_costs_valid_to"), + table_name="resource_tracker_pricing_unit_costs", + ) + op.drop_index( + op.f("ix_resource_tracker_pricing_unit_costs_pricing_unit_id"), + table_name="resource_tracker_pricing_unit_costs", + ) + op.drop_index( + op.f("ix_resource_tracker_pricing_unit_costs_pricing_plan_id"), + table_name="resource_tracker_pricing_unit_costs", + ) + op.drop_table("resource_tracker_pricing_unit_costs") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b13ca15c7ef8_add_indices_to_file_meta_data_and_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b13ca15c7ef8_add_indices_to_file_meta_data_and_.py new file mode 100644 index 00000000000..72d02792ab2 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b13ca15c7ef8_add_indices_to_file_meta_data_and_.py @@ -0,0 +1,46 @@ +"""add indices to file_meta_data and projects tables + +Revision ID: b13ca15c7ef8 +Revises: 0ed9f6eabeba +Create Date: 2024-04-26 08:11:52.163445+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "b13ca15c7ef8" +down_revision = "0ed9f6eabeba" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index( + op.f("ix_file_meta_data_is_directory"), + "file_meta_data", + ["is_directory"], + unique=False, + ) + op.create_index( + op.f("ix_file_meta_data_project_id"), + "file_meta_data", + ["project_id"], + unique=False, + ) + op.create_index( + op.f("ix_file_meta_data_user_id"), "file_meta_data", ["user_id"], unique=False + ) + op.create_index( + op.f("ix_projects_prj_owner"), "projects", ["prj_owner"], unique=False + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_projects_prj_owner"), table_name="projects") + op.drop_index(op.f("ix_file_meta_data_user_id"), table_name="file_meta_data") + op.drop_index(op.f("ix_file_meta_data_project_id"), table_name="file_meta_data") + op.drop_index(op.f("ix_file_meta_data_is_directory"), table_name="file_meta_data") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b39f2dc87ccd_add_templatetype_to_projects.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b39f2dc87ccd_add_templatetype_to_projects.py new file mode 100644 index 00000000000..61cbcb8496f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b39f2dc87ccd_add_templatetype_to_projects.py @@ -0,0 +1,44 @@ +"""add templateType to projects + +Revision ID: b39f2dc87ccd +Revises: fc1701bb7e93 +Create Date: 2025-05-14 11:59:27.033449+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "b39f2dc87ccd" +down_revision = "fc1701bb7e93" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + # Create enum type first + project_template_type = sa.Enum( + "TEMPLATE", "TUTORIAL", "HYPERTOOL", name="projecttemplatetype" + ) + project_template_type.create(op.get_bind()) + + op.add_column( + "projects", + sa.Column( + "template_type", + project_template_type, + nullable=True, + default=None, + ), + ) + # ### end Alembic commands ### + op.execute("UPDATE projects SET template_type='TEMPLATE' WHERE type='TEMPLATE'") + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("projects", "template_type") + # ### end Alembic commands ### + sa.Enum(name="projecttemplatetype").drop(op.get_bind()) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b4e1886bff95_new_service_vendor_secrets.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b4e1886bff95_new_service_vendor_secrets.py new file mode 100644 index 00000000000..77f3a4ec8c0 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b4e1886bff95_new_service_vendor_secrets.py @@ -0,0 +1,57 @@ +"""New service vendor secrets + +Revision ID: b4e1886bff95 +Revises: 0c084cb1091c +Create Date: 2023-06-01 15:41:23.571011+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "b4e1886bff95" +down_revision = "0c084cb1091c" +branch_labels = None +depends_on = None + + +def upgrade(): + + op.create_table( + "services_vendor_secrets", + sa.Column("service_key", sa.String(), nullable=False), + sa.Column("service_base_version", sa.String(), nullable=False), + sa.Column( + "secrets_map", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=False, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["service_key", "service_base_version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "service_key", "service_base_version", name="services_vendor_secrets_pk" + ), + ) + + +def downgrade(): + + op.drop_table("services_vendor_secrets") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py new file mode 100644 index 00000000000..b1e5bc9f30c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/b7f23f6d8aa2_added_distributed_comp_scheduler.py @@ -0,0 +1,33 @@ +"""added_distributed_comp_scheduler + +Revision ID: b7f23f6d8aa2 +Revises: c9db8bf5091e +Create Date: 2024-11-26 17:06:27.053774+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "b7f23f6d8aa2" +down_revision = "c9db8bf5091e" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "comp_runs", sa.Column("scheduled", sa.DateTime(timezone=True), nullable=True) + ) + op.add_column( + "comp_runs", sa.Column("processed", sa.DateTime(timezone=True), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("comp_runs", "processed") + op.drop_column("comp_runs", "scheduled") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ba9c4816a31b_new_pre_registration_columns.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ba9c4816a31b_new_pre_registration_columns.py new file mode 100644 index 00000000000..6bf5169538c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ba9c4816a31b_new_pre_registration_columns.py @@ -0,0 +1,140 @@ +"""new pre-registration columns + +Revision ID: ba9c4816a31b +Revises: b39f2dc87ccd +Create Date: 2025-05-19 15:21:40.182354+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "ba9c4816a31b" +down_revision = "b39f2dc87ccd" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + # Create the enum type first before using it + account_request_status = sa.Enum( + "PENDING", "APPROVED", "REJECTED", name="accountrequeststatus" + ) + account_request_status.create(op.get_bind(), checkfirst=True) + + op.add_column( + "users_pre_registration_details", + sa.Column( + "id", + sa.BigInteger(), + sa.Identity(always=False, start=1, cycle=False), + nullable=False, + ), + ) + op.add_column( + "users_pre_registration_details", + sa.Column( + "account_request_status", + account_request_status, # Use the created enum type + server_default="PENDING", # Simply use the string value as default + nullable=False, + ), + ) + op.add_column( + "users_pre_registration_details", + sa.Column( + "account_request_reviewed_by", + sa.Integer(), + nullable=True, + ), + ) + op.add_column( + "users_pre_registration_details", + sa.Column( + "account_request_reviewed_at", + sa.DateTime(timezone=True), + nullable=True, + ), + ) + op.add_column( + "users_pre_registration_details", + sa.Column("product_name", sa.String(), nullable=True), + ) + op.drop_constraint( + "users_pre_registration_details_pre_email_key", + "users_pre_registration_details", + type_="unique", + ) + op.create_foreign_key( + "fk_users_pre_registration_details_product_name", + "users_pre_registration_details", + "products", + ["product_name"], + ["name"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # Add foreign key for account_request_reviewed_by + op.create_foreign_key( + "fk_users_pre_registration_reviewed_by_user_id", + "users_pre_registration_details", + "users", + ["account_request_reviewed_by"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # Set primary key on id column + op.create_primary_key( + "users_pre_registration_details_pk", + "users_pre_registration_details", + ["id"], + ) + # Add composite unique constraint on pre_email and product_name + op.create_unique_constraint( + "users_pre_registration_details_pre_email_product_name_key", + "users_pre_registration_details", + ["pre_email", "product_name"], + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + # Drop the composite unique constraint + op.drop_constraint( + "users_pre_registration_details_pre_email_product_name_key", + "users_pre_registration_details", + type_="unique", + ) + op.drop_constraint( + "users_pre_registration_details_pk", + "users_pre_registration_details", + type_="primary", + ) + op.drop_constraint( + "fk_users_pre_registration_reviewed_by_user_id", + "users_pre_registration_details", + type_="foreignkey", + ) + op.drop_constraint( + "fk_users_pre_registration_details_product_name", + "users_pre_registration_details", + type_="foreignkey", + ) + op.create_unique_constraint( + "users_pre_registration_details_pre_email_key", + "users_pre_registration_details", + ["pre_email"], + ) + op.drop_column("users_pre_registration_details", "product_name") + op.drop_column("users_pre_registration_details", "account_request_reviewed_at") + op.drop_column("users_pre_registration_details", "account_request_reviewed_by") + op.drop_column("users_pre_registration_details", "account_request_status") + op.drop_column("users_pre_registration_details", "id") + + # Drop the enum type in downgrade + sa.Enum(name="accountrequeststatus").drop(op.get_bind(), checkfirst=True) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/baf0ee1c37dc_service_runs_credit_transaction_indexes.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/baf0ee1c37dc_service_runs_credit_transaction_indexes.py new file mode 100644 index 00000000000..5ff2f0709d8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/baf0ee1c37dc_service_runs_credit_transaction_indexes.py @@ -0,0 +1,58 @@ +"""service runs/credit_transaction indexes + +Revision ID: baf0ee1c37dc +Revises: 1c069f85d5fd +Create Date: 2024-04-29 15:42:52.926095+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "baf0ee1c37dc" +down_revision = "1c069f85d5fd" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "ix_resource_tracker_credit_transactions_product_name", + table_name="resource_tracker_credit_transactions", + ) + op.drop_index( + "ix_resource_tracker_credit_transactions_transaction_cla_7bac", + table_name="resource_tracker_credit_transactions", + ) + op.create_index( + "ix_resource_tracker_credit_transactions_status_running", + "resource_tracker_service_runs", + ["service_run_status"], + unique=False, + postgresql_where=sa.text("service_run_status = 'RUNNING'"), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "ix_resource_tracker_credit_transactions_status_running", + table_name="resource_tracker_service_runs", + postgresql_where=sa.text("service_run_status = 'RUNNING'"), + ) + op.create_index( + "ix_resource_tracker_credit_transactions_transaction_cla_7bac", + "resource_tracker_credit_transactions", + ["transaction_classification"], + unique=False, + ) + op.create_index( + "ix_resource_tracker_credit_transactions_product_name", + "resource_tracker_credit_transactions", + ["product_name"], + unique=False, + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/be0dece4e67c_rm_created_ip_and_add_deleted_user_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/be0dece4e67c_rm_created_ip_and_add_deleted_user_.py new file mode 100644 index 00000000000..f53c722a7cd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/be0dece4e67c_rm_created_ip_and_add_deleted_user_.py @@ -0,0 +1,51 @@ +"""rm created_ip and add DELETED user status + +Revision ID: be0dece4e67c +Revises: 76d106b243c3 +Create Date: 2023-10-23 17:36:46.349925+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "be0dece4e67c" +down_revision = "76d106b243c3" +branch_labels = None +depends_on = None + +column_name = "status" +enum_typename = "userstatus" +new_value = "DELETED" + + +def upgrade(): + # SEE https://medium.com/makimo-tech-blog/upgrading-postgresqls-enum-type-with-sqlalchemy-using-alembic-migration-881af1e30abe + + with op.get_context().autocommit_block(): + op.execute(f"ALTER TYPE {enum_typename} ADD VALUE '{new_value}'") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("users", "created_ip") + # ### end Alembic commands ### + + +def downgrade(): + + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "users", + sa.Column("created_ip", sa.VARCHAR(), autoincrement=False, nullable=True), + ) + # ### end Alembic commands ### + + # NOTE: Downgrade new updates requires re-building the entire enum! + op.execute(f"ALTER TYPE {enum_typename} RENAME TO {enum_typename}_old") + op.execute( + f"CREATE TYPE {enum_typename} AS ENUM('CONFIRMATION_PENDING', 'ACTIVE', 'BANNED')" + ) + op.execute( + f"ALTER TABLE users ALTER COLUMN {column_name} TYPE {enum_typename} USING " + f"{column_name}::text::{enum_typename}" + ) + op.execute(f"DROP TYPE {enum_typename}_old") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c1d0e98cd289_adding_stripe_fields_to_product_prices.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c1d0e98cd289_adding_stripe_fields_to_product_prices.py new file mode 100644 index 00000000000..ca05b89bbb8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c1d0e98cd289_adding_stripe_fields_to_product_prices.py @@ -0,0 +1,56 @@ +"""adding stripe fields to product prices + +Revision ID: c1d0e98cd289 +Revises: 35724106de75 +Create Date: 2024-03-01 14:00:03.634947+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "c1d0e98cd289" +down_revision = "35724106de75" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "products_prices", + sa.Column( + "stripe_price_id", + sa.String(), + server_default="stripe price id missing!!", + nullable=False, + ), + ) + op.add_column( + "products_prices", + sa.Column( + "stripe_tax_rate_id", + sa.String(), + server_default="stripe tax rate id missing!!", + nullable=False, + ), + ) + # ### end Alembic commands ### + + op.alter_column( + "products_prices", + "stripe_price_id", + server_default=None, + ) + op.alter_column( + "products_prices", + "stripe_tax_rate_id", + server_default=None, + ) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("products_prices", "stripe_tax_rate_id") + op.drop_column("products_prices", "stripe_price_id") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c2d3acc313e1_adds_admin_to_userrole_enum.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c2d3acc313e1_adds_admin_to_userrole_enum.py index 7e4c950e9b5..fd82d0904ad 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c2d3acc313e1_adds_admin_to_userrole_enum.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c2d3acc313e1_adds_admin_to_userrole_enum.py @@ -23,6 +23,7 @@ def upgrade(): def downgrade(): + # NOTE: Downgrade new updates requires re-building the entire enum! op.execute("ALTER TYPE userrole RENAME TO userrole_old") op.execute("CREATE TYPE userrole AS ENUM('ANONYMOUS', 'GUEST', 'USER', 'TESTER')") op.execute( diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c4245e9e0f72_payment_transactions_states.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c4245e9e0f72_payment_transactions_states.py new file mode 100644 index 00000000000..29f79271d61 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c4245e9e0f72_payment_transactions_states.py @@ -0,0 +1,101 @@ +"""payment transactions states + +Revision ID: c4245e9e0f72 +Revises: fc6ea424f586 +Create Date: 2023-09-07 16:00:26.832441+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "c4245e9e0f72" +down_revision = "fc6ea424f586" +branch_labels = None +depends_on = None + + +def upgrade(): + connection = op.get_bind() + payment_transaction_state = postgresql.ENUM( + "PENDING", + "SUCCESS", + "FAILED", + "CANCELED", + name="paymenttransactionstate", + ) + payment_transaction_state.create(connection) + + op.add_column( + "payments_transactions", + sa.Column( + "state", + sa.Enum( + "PENDING", + "SUCCESS", + "FAILED", + "CANCELED", + name="paymenttransactionstate", + ), + nullable=False, + server_default="PENDING", + ), + ) + op.add_column( + "payments_transactions", sa.Column("state_message", sa.Text(), nullable=True) + ) + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET state = 'SUCCESS' WHERE success = true" + ) + ) + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET state = 'FAILED' WHERE success = false" + ) + ) + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET state = 'PENDING' WHERE success IS NULL" + ) + ) + connection.execute( + sa.DDL("UPDATE payments_transactions SET state_message = errors") + ) + + op.drop_column("payments_transactions", "success") + op.drop_column("payments_transactions", "errors") + + +def downgrade(): + op.add_column( + "payments_transactions", + sa.Column("errors", sa.TEXT(), autoincrement=False, nullable=True), + ) + op.add_column( + "payments_transactions", + sa.Column("success", sa.BOOLEAN(), autoincrement=False, nullable=True), + ) + + connection = op.get_bind() + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET success = true WHERE state = 'SUCCESS'" + ) + ) + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET success = false WHERE completed_at IS NOT NULL AND state != 'SUCCESS'" + ) + ) + connection.execute( + sa.DDL( + "UPDATE payments_transactions SET success = NULL WHERE completed_at IS NULL AND state != 'SUCCESS'" + ) + ) + + op.drop_column("payments_transactions", "state_message") + op.drop_column("payments_transactions", "state") + + sa.Enum(name="paymenttransactionstate").drop(connection, checkfirst=False) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c8f072c72adc_add_projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c8f072c72adc_add_projects_nodes.py new file mode 100644 index 00000000000..b4d1e48dd68 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c8f072c72adc_add_projects_nodes.py @@ -0,0 +1,167 @@ +"""add projects nodes + +Revision ID: c8f072c72adc +Revises: e0a2557dec27 +Create Date: 2023-06-13 16:26:26.920891+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "c8f072c72adc" +down_revision = "e0a2557dec27" +branch_labels = None +depends_on = None + +# TRIGGERS ----------------- +drop_projects_to_projects_nodes_deleted_trigger = sa.DDL( + "DROP TRIGGER IF EXISTS entry_deleted on projects;" +) +projects_to_projects_nodes_deleted_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS entry_deleted on projects; +CREATE TRIGGER entry_deleted +AFTER DELETE ON projects +FOR EACH ROW +EXECUTE FUNCTION delete_orphaned_project_nodes(); + """ +) +drop_modified_timestamp_trigger = sa.DDL( + "DROP TRIGGER IF EXISTS trigger_auto_update on projects_nodes;" + "DROP TRIGGER IF EXISTS trigger_auto_update on projects_to_projects_nodes;" +) +modified_timestamp_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS trigger_auto_update on projects_nodes; +CREATE TRIGGER trigger_auto_update +BEFORE INSERT OR UPDATE ON projects_nodes +FOR EACH ROW EXECUTE PROCEDURE projects_nodes_auto_update_modified(); +DROP TRIGGER IF EXISTS trigger_auto_update on projects_to_projects_nodes; +CREATE TRIGGER trigger_auto_update +BEFORE INSERT OR UPDATE ON projects_to_projects_nodes +FOR EACH ROW EXECUTE PROCEDURE projects_to_projects_nodes_auto_update_modified(); + """ +) + +# PROCEDURES ------------------- +drop_delete_orphaned_project_nodes_procedure = sa.DDL( + "DROP FUNCTION delete_orphaned_project_nodes();" +) +delete_orphaned_project_nodes_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION delete_orphaned_project_nodes() +RETURNS TRIGGER AS $$ +BEGIN + DELETE FROM projects_nodes + WHERE NOT EXISTS ( + SELECT 1 FROM projects_to_projects_nodes + WHERE projects_to_projects_nodes.node_id = projects_nodes.node_id + ); + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + """ +) +drop_update_modified_timestamp_procedure = sa.DDL( + "DROP FUNCTION projects_nodes_auto_update_modified();" + "DROP FUNCTION projects_to_projects_nodes_auto_update_modified();" +) +update_modified_timestamp_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION projects_nodes_auto_update_modified() +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION projects_to_projects_nodes_auto_update_modified() +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "projects_nodes", + sa.Column("node_id", sa.String(), nullable=False), + sa.Column( + "required_resources", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=False, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("node_id"), + ) + op.create_table( + "projects_to_projects_nodes", + sa.Column("project_uuid", sa.String(), nullable=True), + sa.Column("node_id", sa.String(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["node_id"], + ["projects_nodes.node_id"], + name="fk_projects_to_projects_nodes_to_projects_nodes_node_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_to_projects_nodes_to_projects_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("project_uuid", "node_id"), + ) + # ### end Alembic commands ### + + # custom + op.execute(delete_orphaned_project_nodes_procedure) + op.execute(projects_to_projects_nodes_deleted_trigger) + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(drop_projects_to_projects_nodes_deleted_trigger) + op.execute(drop_delete_orphaned_project_nodes_procedure) + op.execute(drop_modified_timestamp_trigger) + op.execute(drop_update_modified_timestamp_procedure) + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("projects_to_projects_nodes") + op.drop_table("projects_nodes") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/c9db8bf5091e_trash_columns_in_workspaces.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c9db8bf5091e_trash_columns_in_workspaces.py new file mode 100644 index 00000000000..b61a9e21009 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/c9db8bf5091e_trash_columns_in_workspaces.py @@ -0,0 +1,57 @@ +"""trash columns in workspaces + +Revision ID: c9db8bf5091e +Revises: 8e1f83486be7 +Create Date: 2024-11-20 16:42:43.784855+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "c9db8bf5091e" +down_revision = "8e1f83486be7" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "workspaces", + sa.Column( + "trashed", + sa.DateTime(timezone=True), + nullable=True, + comment="The date and time when the workspace was marked as trashed. Null if the workspace has not been trashed [default].", + ), + ) + op.add_column( + "workspaces", + sa.Column( + "trashed_by", + sa.BigInteger(), + nullable=True, + comment="User who trashed the workspace, or null if not trashed or user is unknown.", + ), + ) + op.create_foreign_key( + "fk_workspace_trashed_by_user_id", + "workspaces", + "users", + ["trashed_by"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_workspace_trashed_by_user_id", "workspaces", type_="foreignkey" + ) + op.drop_column("workspaces", "trashed_by") + op.drop_column("workspaces", "trashed") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/cf8f743fd0b7_add_indexes.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/cf8f743fd0b7_add_indexes.py new file mode 100644 index 00000000000..40163061862 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/cf8f743fd0b7_add_indexes.py @@ -0,0 +1,90 @@ +"""add indexes + +Revision ID: cf8f743fd0b7 +Revises: 48604dfdc5f4 +Create Date: 2025-04-04 09:46:38.853675+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "cf8f743fd0b7" +down_revision = "48604dfdc5f4" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index( + "idx_project_to_groups_gid", "project_to_groups", ["gid"], unique=False + ) + op.create_index( + "idx_projects_last_change_date_desc", + "projects", + ["last_change_date"], + unique=False, + postgresql_using="btree", + postgresql_ops={"last_change_date": "DESC"}, + ) + op.create_index( + "ix_projects_partial_type", + "projects", + ["type"], + unique=False, + postgresql_where=sa.text("type = 'TEMPLATE'"), + ) + op.create_index( + "idx_project_to_folders_project_uuid", + "projects_to_folders", + ["project_uuid"], + unique=False, + ) + op.create_index( + "idx_project_to_folders_user_id", + "projects_to_folders", + ["user_id"], + unique=False, + ) + op.create_index( + "idx_projects_to_products_product_name", + "projects_to_products", + ["product_name"], + unique=False, + ) + op.create_index( + "idx_workspaces_access_rights_gid", + "workspaces_access_rights", + ["gid"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "idx_workspaces_access_rights_gid", table_name="workspaces_access_rights" + ) + op.drop_index( + "idx_projects_to_products_product_name", table_name="projects_to_products" + ) + op.drop_index("idx_project_to_folders_user_id", table_name="projects_to_folders") + op.drop_index( + "idx_project_to_folders_project_uuid", table_name="projects_to_folders" + ) + op.drop_index( + "ix_projects_partial_type", + table_name="projects", + postgresql_where=sa.text("type = 'TEMPLATE'"), + ) + op.drop_index( + "idx_projects_last_change_date_desc", + table_name="projects", + postgresql_using="btree", + postgresql_ops={"last_change_date": "DESC"}, + ) + op.drop_index("idx_project_to_groups_gid", table_name="project_to_groups") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0d544695487_rm_cols_in_payments_autorecharge.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0d544695487_rm_cols_in_payments_autorecharge.py new file mode 100644 index 00000000000..cd6fbd4323c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0d544695487_rm_cols_in_payments_autorecharge.py @@ -0,0 +1,46 @@ +"""rm cols in payments_autorecharge + +Revision ID: d0d544695487 +Revises: 2a4b4167e088 +Create Date: 2023-10-27 18:29:46.409910+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "d0d544695487" +down_revision = "2a4b4167e088" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "payments_autorecharge", + sa.Column("monthly_limit_in_usd", sa.Numeric(scale=2), nullable=True), + ) + op.drop_column("payments_autorecharge", "top_up_countdown") + op.drop_column("payments_autorecharge", "min_balance_in_usd") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "payments_autorecharge", + sa.Column( + "min_balance_in_usd", + sa.NUMERIC(), + server_default=sa.text("0"), + autoincrement=False, + nullable=False, + ), + ) + op.add_column( + "payments_autorecharge", + sa.Column("top_up_countdown", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.drop_column("payments_autorecharge", "monthly_limit_in_usd") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0e56c2d0a0d_new_services_comp_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0e56c2d0a0d_new_services_comp_table.py new file mode 100644 index 00000000000..160919d3e9c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d0e56c2d0a0d_new_services_comp_table.py @@ -0,0 +1,97 @@ +"""new services_comp table + +Revision ID: d0e56c2d0a0d +Revises: 19f3d9085636 +Create Date: 2024-07-17 16:15:49.970615+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "d0e56c2d0a0d" +down_revision = "19f3d9085636" +branch_labels = None +depends_on = None + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "services_compatibility" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_compatibility", + sa.Column("key", sa.String(), nullable=False), + sa.Column("version", sa.String(), nullable=False), + sa.Column( + "custom_policy", postgresql.JSONB(astext_type=sa.Text()), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column("modified_by", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["key", "version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["modified_by"], ["users.id"], onupdate="CASCADE", ondelete="SET NULL" + ), + sa.PrimaryKeyConstraint("key", "version", name="services_compatibility_pk"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("services_compatibility") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d1fafda96f4c_rm_dags_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d1fafda96f4c_rm_dags_table.py new file mode 100644 index 00000000000..45066eb3916 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d1fafda96f4c_rm_dags_table.py @@ -0,0 +1,49 @@ +"""rm dags table + +Revision ID: d1fafda96f4c +Revises: 481d5b472721 +Create Date: 2024-07-09 14:02:31.583952+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "d1fafda96f4c" +down_revision = "481d5b472721" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("ix_dags_contact", table_name="dags") + op.drop_index("ix_dags_id", table_name="dags") + op.drop_index("ix_dags_key", table_name="dags") + op.drop_table("dags") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "dags", + sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False), + sa.Column("key", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("version", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("description", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column("contact", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "workbench", + postgresql.JSON(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + sa.PrimaryKeyConstraint("id", name="dags_pkey"), + ) + op.create_index("ix_dags_key", "dags", ["key"], unique=False) + op.create_index("ix_dags_id", "dags", ["id"], unique=False) + op.create_index("ix_dags_contact", "dags", ["contact"], unique=False) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d31c23845017_add_license_key.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d31c23845017_add_license_key.py new file mode 100644 index 00000000000..59856c49d52 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d31c23845017_add_license_key.py @@ -0,0 +1,29 @@ +"""add license key + +Revision ID: d31c23845017 +Revises: aa6da21a0055 +Create Date: 2024-12-18 11:11:52.644534+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "d31c23845017" +down_revision = "aa6da21a0055" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_items", sa.Column("license_key", sa.String(), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("licensed_items", "license_key") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d68b8128c23b_add_cols_to_licensed_items_purchases_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d68b8128c23b_add_cols_to_licensed_items_purchases_.py new file mode 100644 index 00000000000..da729aec544 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d68b8128c23b_add_cols_to_licensed_items_purchases_.py @@ -0,0 +1,28 @@ +"""add cols to licensed_items_purchases table 2 + +Revision ID: d68b8128c23b +Revises: 8fa15c4c3977 +Create Date: 2024-12-10 10:24:28.071216+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "d68b8128c23b" +down_revision = "8fa15c4c3977" +branch_labels = None +depends_on = None + + +def upgrade(): + op.drop_column("resource_tracker_licensed_items_purchases", "licensed_item_id") + op.add_column( + "resource_tracker_licensed_items_purchases", + sa.Column("licensed_item_id", postgresql.UUID(as_uuid=True), nullable=False), + ) + + +def downgrade(): + ... diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/d84edab53761_add_restriction_ondelete.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d84edab53761_add_restriction_ondelete.py new file mode 100644 index 00000000000..f8f79b258a8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/d84edab53761_add_restriction_ondelete.py @@ -0,0 +1,62 @@ +"""add restriction ondelete + +Revision ID: d84edab53761 +Revises: 163b11424cb1 +Create Date: 2025-02-25 09:18:14.541874+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "d84edab53761" +down_revision = "163b11424cb1" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_unique_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + ["licensed_resource_id"], + ) + op.drop_constraint( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + type_="foreignkey", + ) + op.create_foreign_key( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + "services_meta_data", + ["service_key", "service_version"], + ["key", "version"], + onupdate="CASCADE", + ondelete="RESTRICT", + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + type_="foreignkey", + ) + op.create_foreign_key( + "fk_rut_pricing_plan_to_service_key_and_version", + "resource_tracker_pricing_plan_to_service", + "services_meta_data", + ["service_key", "service_version"], + ["key", "version"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.drop_constraint( + "uq_licensed_item_to_resource_resource_id", + "licensed_item_to_resource", + type_="unique", + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/da8abd0d8e42_add_comp_runs_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/da8abd0d8e42_add_comp_runs_table.py index b462bf84043..892ec72c184 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/da8abd0d8e42_add_comp_runs_table.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/da8abd0d8e42_add_comp_runs_table.py @@ -7,6 +7,7 @@ """ import sqlalchemy as sa from alembic import op +from sqlalchemy.dialects.postgresql import ENUM # revision identifiers, used by Alembic. revision = "da8abd0d8e42" @@ -31,7 +32,7 @@ def upgrade(): sa.Column("iteration", sa.BigInteger(), autoincrement=False, nullable=False), sa.Column( "result", - sa.dialects.postgresql.ENUM( + ENUM( "NOT_STARTED", "PUBLISHED", "PENDING", diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py new file mode 100644 index 00000000000..3d3d6c6896a --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e05bdc5b3c7b_add_timezone_comp_runs.py @@ -0,0 +1,87 @@ +"""add_timezone_comp_runs + +Revision ID: e05bdc5b3c7b +Revises: 7ad64e963e0f +Create Date: 2024-11-27 22:51:21.112336+00:00 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "e05bdc5b3c7b" +down_revision = "7ad64e963e0f" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "comp_runs", + "created", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=False, + existing_server_default="now()", + ) + op.alter_column( + "comp_runs", + "modified", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=False, + existing_server_default="now()", + ) + op.alter_column( + "comp_runs", + "started", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + op.alter_column( + "comp_runs", + "ended", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "comp_runs", + "ended", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "comp_runs", + "started", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "comp_runs", + "modified", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=False, + existing_server_default="now()", + ) + op.alter_column( + "comp_runs", + "created", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=False, + existing_server_default="now()", + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py new file mode 100644 index 00000000000..769dbd72250 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py @@ -0,0 +1,113 @@ +"""add services limitations + +Revision ID: e0a2557dec27 +Revises: b4e1886bff95 +Create Date: 2023-06-08 08:03:44.715899+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e0a2557dec27" +down_revision = "b4e1886bff95" +branch_labels = None +depends_on = None + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "services_limitations" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_limitations", + sa.Column("gid", sa.BigInteger(), nullable=False), + sa.Column("cluster_id", sa.BigInteger(), nullable=True), + sa.Column("ram", sa.BigInteger(), nullable=True), + sa.Column("cpu", sa.Numeric(), nullable=True), + sa.Column("vram", sa.BigInteger(), nullable=True), + sa.Column("gpu", sa.Integer(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["cluster_id"], + ["clusters.id"], + name="fk_services_limitations_to_clusters_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_services_limitations_to_groups_gid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("gid", "cluster_id", name="gid_cluster_id_uniqueness"), + ) + op.create_index( + "idx_unique_gid_cluster_id_null", + "services_limitations", + ["gid"], + unique=True, + postgresql_where=sa.text("cluster_id IS NULL"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "idx_unique_gid_cluster_id_null", + table_name="services_limitations", + postgresql_where=sa.text("cluster_id IS NULL"), + ) + op.drop_table("services_limitations") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e108d9f673fe_merge_5649397a81bf_ae72826e75fc.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e108d9f673fe_merge_5649397a81bf_ae72826e75fc.py new file mode 100644 index 00000000000..64c8b851ec3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e108d9f673fe_merge_5649397a81bf_ae72826e75fc.py @@ -0,0 +1,24 @@ +"""merge 5649397a81bf ae72826e75fc + +Revision ID: e108d9f673fe +Revises: 5649397a81bf, ae72826e75fc +Create Date: 2023-09-20 13:29:25.971362+00:00 + +""" + + +# revision identifiers, used by Alembic. +revision = "e108d9f673fe" +down_revision = ("5649397a81bf", "ae72826e75fc") +branch_labels = None +depends_on = None + + +def upgrade(): + """alembic left this empty""" + pass + + +def downgrade(): + """alembic left this empty""" + pass diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e3334cced752_new_products_prices_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e3334cced752_new_products_prices_table.py new file mode 100644 index 00000000000..96a968a3c78 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e3334cced752_new_products_prices_table.py @@ -0,0 +1,48 @@ +"""new products_prices table + +Revision ID: e3334cced752 +Revises: 624a029738b8 +Create Date: 2023-09-15 00:25:18.116227+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e3334cced752" +down_revision = "624a029738b8" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "products_prices", + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("usd_per_credit", sa.Numeric(scale=2), nullable=False), + sa.Column("comment", sa.String(), nullable=False), + sa.Column( + "valid_from", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.CheckConstraint( + "usd_per_credit >= 0", name="non_negative_usd_per_credit_constraint" + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_products_prices_product_name", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("products_prices") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e5555076ef50_add_license_db_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e5555076ef50_add_license_db_tables.py new file mode 100644 index 00000000000..abdfafefdf7 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e5555076ef50_add_license_db_tables.py @@ -0,0 +1,171 @@ +"""add license db tables + +Revision ID: e5555076ef50 +Revises: e05bdc5b3c7b +Create Date: 2024-12-05 10:57:16.867891+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "e5555076ef50" +down_revision = "e05bdc5b3c7b" +branch_labels = None +depends_on = None + + +def upgrade(): + # CREATE EXTENSION pgcrypto; + op.execute( + """ + DO + $$ + BEGIN + IF EXISTS(SELECT * FROM pg_available_extensions WHERE name = 'pgcrypto') THEN + -- Create the extension + CREATE EXTENSION if not exists pgcrypto; + END IF; + END + $$; + """ + ) + + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_licensed_items_purchases", + sa.Column( + "licensed_item_purchase_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("licensed_item_id", sa.BigInteger(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column( + "start_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "expire_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column("purchased_by_user", sa.BigInteger(), nullable=False), + sa.Column( + "purchased_at", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("licensed_item_purchase_id"), + ) + op.create_table( + "resource_tracker_licensed_items_usage", + sa.Column( + "licensed_item_usage_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("licensed_item_id", sa.String(), nullable=True), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("user_email", sa.String(), nullable=True), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("service_run_id", sa.String(), nullable=True), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("stopped_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("num_of_seats", sa.SmallInteger(), nullable=False), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["product_name", "service_run_id"], + [ + "resource_tracker_service_runs.product_name", + "resource_tracker_service_runs.service_run_id", + ], + name="resource_tracker_license_checkouts_service_run_id_fkey", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("licensed_item_usage_id"), + ) + op.create_index( + op.f("ix_resource_tracker_licensed_items_usage_wallet_id"), + "resource_tracker_licensed_items_usage", + ["wallet_id"], + unique=False, + ) + op.create_table( + "licensed_items", + sa.Column( + "licensed_item_id", + postgresql.UUID(as_uuid=True), + server_default=sa.text("gen_random_uuid()"), + nullable=False, + ), + sa.Column("name", sa.String(), nullable=False), + sa.Column( + "licensed_resource_type", + sa.Enum("VIP_MODEL", name="licensedresourcetype"), + nullable=False, + ), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["pricing_plan_id"], + ["resource_tracker_pricing_plans.pricing_plan_id"], + name="fk_resource_tracker_license_packages_pricing_plan_id", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_resource_tracker_license_packages_product_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("licensed_item_id"), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("licensed_items") + op.drop_index( + op.f("ix_resource_tracker_licensed_items_usage_wallet_id"), + table_name="resource_tracker_licensed_items_usage", + ) + op.drop_table("resource_tracker_licensed_items_usage") + op.drop_table("resource_tracker_licensed_items_purchases") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e71ea59858f4_add_uniqu_constraint_in_licensed_items.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e71ea59858f4_add_uniqu_constraint_in_licensed_items.py new file mode 100644 index 00000000000..3af7ff911f8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e71ea59858f4_add_uniqu_constraint_in_licensed_items.py @@ -0,0 +1,32 @@ +"""add uniqu constraint in licensed_items + +Revision ID: e71ea59858f4 +Revises: 7d1c6425a51d" +Create Date: 2025-01-30 18:42:15.192968+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e71ea59858f4" +down_revision = "7d1c6425a51d" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_unique_constraint( + "uq_licensed_resource_name_type", + "licensed_items", + ["licensed_resource_name", "licensed_resource_type"], + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "uq_licensed_resource_name_type", "licensed_items", type_="unique" + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e7b3d381efe4_add_use_on_demand_clusters_column_in_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e7b3d381efe4_add_use_on_demand_clusters_column_in_.py new file mode 100644 index 00000000000..af940228d97 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e7b3d381efe4_add_use_on_demand_clusters_column_in_.py @@ -0,0 +1,43 @@ +"""add use_on_demand_clusters column in grouops_extra_properties + +Revision ID: e7b3d381efe4 +Revises: 2cd329e47ea1 +Create Date: 2023-09-12 09:41:51.287118+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e7b3d381efe4" +down_revision = "2cd329e47ea1" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "groups_extra_properties", + sa.Column( + "use_on_demand_clusters", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + op.create_unique_constraint( + "group_id_product_name_uniqueness", + "groups_extra_properties", + ["group_id", "product_name"], + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "group_id_product_name_uniqueness", "groups_extra_properties", type_="unique" + ) + op.drop_column("groups_extra_properties", "use_on_demand_clusters") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8057a4a7bb0_new_services_tags_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8057a4a7bb0_new_services_tags_table.py new file mode 100644 index 00000000000..62998ed021f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8057a4a7bb0_new_services_tags_table.py @@ -0,0 +1,44 @@ +"""new services_tags table + +Revision ID: e8057a4a7bb0 +Revises: 7604e65e2f83 +Create Date: 2024-08-23 12:12:32.883771+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e8057a4a7bb0" +down_revision = "7604e65e2f83" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_tags", + sa.Column("service_key", sa.String(), nullable=False), + sa.Column("service_version", sa.String(), nullable=False), + sa.Column("tag_id", sa.BigInteger(), nullable=False), + sa.ForeignKeyConstraint( + ["service_key", "service_version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["tag_id"], ["tags.id"], onupdate="CASCADE", ondelete="CASCADE" + ), + sa.UniqueConstraint( + "service_key", "service_version", "tag_id", name="services_tags_uc" + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("services_tags") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8ffc0c96336_add_is_hidden_on_market_field.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8ffc0c96336_add_is_hidden_on_market_field.py new file mode 100644 index 00000000000..6a32dec314d --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e8ffc0c96336_add_is_hidden_on_market_field.py @@ -0,0 +1,35 @@ +"""add is_hidden_on_market field + +Revision ID: e8ffc0c96336 +Revises: a53c3c153bc8 +Create Date: 2025-02-13 18:05:42.851252+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "e8ffc0c96336" +down_revision = "a53c3c153bc8" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "licensed_items", + sa.Column( + "is_hidden_on_market", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("licensed_items", "is_hidden_on_market") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/e987caaec81b_adding_credit_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e987caaec81b_adding_credit_tables.py new file mode 100644 index 00000000000..c86cf00011c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/e987caaec81b_adding_credit_tables.py @@ -0,0 +1,325 @@ +"""adding credit tables + +Revision ID: e987caaec81b +Revises: 6da4357ce10f +Create Date: 2023-08-09 16:59:21.001729+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "e987caaec81b" +down_revision = "6da4357ce10f" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "resource_tracker_pricing_plans", + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("description", sa.String(), server_default="", nullable=False), + sa.Column( + "classification", + sa.Enum("TIER", name="pricingplanclassification"), + nullable=False, + ), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("pricing_plan_id"), + ) + op.create_index( + op.f("ix_resource_tracker_pricing_plans_name"), + "resource_tracker_pricing_plans", + ["name"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_pricing_plans_product_name"), + "resource_tracker_pricing_plans", + ["product_name"], + unique=False, + ) + op.create_table( + "resource_tracker_service_runs", + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("service_run_id", sa.String(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("wallet_name", sa.String(), nullable=True), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_detail_id", sa.BigInteger(), nullable=False), + sa.Column("simcore_user_agent", sa.String(), nullable=True), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("user_email", sa.String(), nullable=True), + sa.Column("project_id", sa.String(), nullable=False), + sa.Column("project_name", sa.String(), nullable=True), + sa.Column("node_id", sa.String(), nullable=False), + sa.Column("node_name", sa.String(), nullable=True), + sa.Column("service_key", sa.String(), nullable=False), + sa.Column("service_version", sa.String(), nullable=False), + sa.Column( + "service_type", + sa.Enum( + "COMPUTATIONAL_SERVICE", + "DYNAMIC_SERVICE", + name="resourcetrackerservicetype", + ), + nullable=False, + ), + sa.Column( + "service_resources", postgresql.JSONB(astext_type=sa.Text()), nullable=False + ), + sa.Column( + "service_additional_metadata", + postgresql.JSONB(astext_type=sa.Text()), + nullable=False, + ), + sa.Column("started_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("stopped_at", sa.DateTime(timezone=True), nullable=False), + sa.Column( + "service_run_status", + sa.Enum( + "RUNNING", "SUCCESS", "ERROR", name="resourcetrackerservicerunstatus" + ), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("product_name", "service_run_id"), + ) + op.create_table( + "resource_tracker_wallets_credit_transactions", + sa.Column("transaction_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("wallet_name", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("user_email", sa.String(), nullable=False), + sa.Column("credits", sa.Numeric(precision=3, scale=2), nullable=False), + sa.Column( + "transaction_status", + sa.Enum( + "PENDING", + "BILLED", + "NOT_BILLED", + "REQUIRES_MANUAL_REVIEW", + name="transactionbillingstatus", + ), + nullable=True, + ), + sa.Column( + "transaction_classification", + sa.Enum( + "ADD_WALLET_TOP_UP", + "DEDUCT_SERVICE_RUN", + name="transactionclassification", + ), + nullable=True, + ), + sa.Column("service_run_id", sa.BigInteger(), nullable=True), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("transaction_id"), + ) + op.create_index( + op.f("ix_resource_tracker_wallets_credit_transactions_product_name"), + "resource_tracker_wallets_credit_transactions", + ["product_name"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_wallets_credit_transactions_service_run_id"), + "resource_tracker_wallets_credit_transactions", + ["service_run_id"], + unique=False, + ) + op.create_index( + op.f( + "ix_resource_tracker_wallets_credit_transactions_transaction_classification" + ), + "resource_tracker_wallets_credit_transactions", + ["transaction_classification"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_wallets_credit_transactions_transaction_status"), + "resource_tracker_wallets_credit_transactions", + ["transaction_status"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_wallets_credit_transactions_wallet_id"), + "resource_tracker_wallets_credit_transactions", + ["wallet_id"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_wallets_credit_transactions_wallet_name"), + "resource_tracker_wallets_credit_transactions", + ["wallet_name"], + unique=False, + ) + op.create_table( + "resource_tracker_pricing_details", + sa.Column("pricing_detail_id", sa.BigInteger(), nullable=False), + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("unit_name", sa.String(), nullable=False), + sa.Column("cost_per_unit", sa.Numeric(precision=3, scale=2), nullable=False), + sa.Column("valid_from", sa.DateTime(timezone=True), nullable=False), + sa.Column("valid_to", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "specific_info", postgresql.JSONB(astext_type=sa.Text()), nullable=False + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["pricing_plan_id"], + ["resource_tracker_pricing_plans.pricing_plan_id"], + name="fk_resource_tracker_pricing_details_pricing_plan_id", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("pricing_detail_id"), + ) + op.create_index( + op.f("ix_resource_tracker_pricing_details_pricing_plan_id"), + "resource_tracker_pricing_details", + ["pricing_plan_id"], + unique=False, + ) + op.create_index( + op.f("ix_resource_tracker_pricing_details_unit_name"), + "resource_tracker_pricing_details", + ["unit_name"], + unique=False, + ) + op.create_table( + "resource_tracker_pricing_plan_to_service", + sa.Column("pricing_plan_id", sa.BigInteger(), nullable=False), + sa.Column("service_key", sa.String(), nullable=False), + sa.Column("service_version", sa.String(), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["pricing_plan_id"], + ["resource_tracker_pricing_plans.pricing_plan_id"], + name="fk_resource_tracker_pricing_details_pricing_plan_id", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.UniqueConstraint( + "service_key", + "service_version", + name="resource_tracker_pricing_plan_to_service__service_unique_key", + ), + ) + op.create_index( + op.f("ix_resource_tracker_pricing_plan_to_service_pricing_plan_id"), + "resource_tracker_pricing_plan_to_service", + ["pricing_plan_id"], + unique=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_resource_tracker_pricing_plan_to_service_pricing_plan_id"), + table_name="resource_tracker_pricing_plan_to_service", + ) + op.drop_table("resource_tracker_pricing_plan_to_service") + op.drop_index( + op.f("ix_resource_tracker_pricing_details_unit_name"), + table_name="resource_tracker_pricing_details", + ) + op.drop_index( + op.f("ix_resource_tracker_pricing_details_pricing_plan_id"), + table_name="resource_tracker_pricing_details", + ) + op.drop_table("resource_tracker_pricing_details") + op.drop_index( + op.f("ix_resource_tracker_wallets_credit_transactions_wallet_name"), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_index( + op.f("ix_resource_tracker_wallets_credit_transactions_wallet_id"), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_index( + op.f("ix_resource_tracker_wallets_credit_transactions_transaction_status"), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_index( + op.f( + "ix_resource_tracker_wallets_credit_transactions_transaction_classification" + ), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_index( + op.f("ix_resource_tracker_wallets_credit_transactions_service_run_id"), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_index( + op.f("ix_resource_tracker_wallets_credit_transactions_product_name"), + table_name="resource_tracker_wallets_credit_transactions", + ) + op.drop_table("resource_tracker_wallets_credit_transactions") + op.drop_table("resource_tracker_service_runs") + op.drop_index( + op.f("ix_resource_tracker_pricing_plans_product_name"), + table_name="resource_tracker_pricing_plans", + ) + op.drop_index( + op.f("ix_resource_tracker_pricing_plans_name"), + table_name="resource_tracker_pricing_plans", + ) + op.drop_table("resource_tracker_pricing_plans") + + sa.Enum(name="transactionbillingstatus").drop(op.get_bind(), checkfirst=False) + sa.Enum(name="transactionclassification").drop(op.get_bind(), checkfirst=False) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ea3952fe5a0e_add_enable_efs_to_group_extra_properties.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ea3952fe5a0e_add_enable_efs_to_group_extra_properties.py new file mode 100644 index 00000000000..7f66f3b3830 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ea3952fe5a0e_add_enable_efs_to_group_extra_properties.py @@ -0,0 +1,32 @@ +"""add `enable_efs` to group extra properties + +Revision ID: ea3952fe5a0e +Revises: 8a742f3efdd9 +Create Date: 2024-10-07 06:24:42.464942+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "ea3952fe5a0e" +down_revision = "8a742f3efdd9" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "groups_extra_properties", + sa.Column( + "enable_efs", sa.Boolean(), server_default=sa.text("false"), nullable=False + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("groups_extra_properties", "enable_efs") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ecd4eadaa781_extract_workbench_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ecd4eadaa781_extract_workbench_column.py new file mode 100644 index 00000000000..310a551f7e8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ecd4eadaa781_extract_workbench_column.py @@ -0,0 +1,227 @@ +"""extract workbench column + +Revision ID: ecd4eadaa781 +Revises: a3a58471b0f1 +Create Date: 2025-01-21 13:13:18.256109+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "ecd4eadaa781" +down_revision = "a3a58471b0f1" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "projects_nodes", + sa.Column( + "key", + sa.String(), + nullable=True, + comment="Distinctive name (based on the Docker registry path)", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "version", sa.String(), nullable=True, comment="Semantic version number" + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "label", sa.String(), nullable=True, comment="Short name used for display" + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "progress", sa.Numeric(), nullable=True, comment="Progress value (0-100)" + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "thumbnail", + sa.String(), + nullable=True, + comment="Url of the latest screenshot", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "input_access", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Map with key - access level pairs", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "input_nodes", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="IDs of the nodes where is connected to", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "inputs", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Input properties values", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "inputs_required", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Required input IDs", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "inputs_units", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Input units", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "output_nodes", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Node IDs of those connected to the output", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "outputs", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Output properties values", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "run_hash", + sa.String(), + nullable=True, + comment="HEX digest of the resolved inputs + outputs hash at the time when the last outputs were generated", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "state", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="State", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "parent", + sa.String(), + nullable=True, + comment="Parent's (group-nodes) node ID", + ), + ) + op.add_column( + "projects_nodes", + sa.Column( + "boot_options", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Some services provide alternative parameters to be injected at boot time.The user selection should be stored here, and it will overwrite the services's defaults", + ), + ) + # ### end Alembic commands ### + + op.execute( + """ +UPDATE projects_nodes +SET key = subquery.key, + version = subquery.version, + label = subquery.label, + progress = subquery.progress::numeric, + thumbnail = subquery.thumbnail, + input_access = subquery.input_access::jsonb, + input_nodes = subquery.input_nodes::jsonb, + inputs = subquery.inputs::jsonb, + inputs_required = subquery.inputs_required::jsonb, + inputs_units = subquery.inputs_units::jsonb, + output_nodes = subquery.output_nodes::jsonb, + outputs = subquery.outputs::jsonb, + run_hash = subquery.run_hash, + state = subquery.state::jsonb, + parent = subquery.parent, + boot_options = subquery.boot_options::jsonb +FROM ( + SELECT + projects.uuid AS project_id, + js.key AS node_id, + js.value::jsonb ->> 'key' AS key, + js.value::jsonb ->> 'label' AS label, + js.value::jsonb ->> 'version' AS version, + (js.value::jsonb ->> 'progress')::numeric AS progress, + js.value::jsonb ->> 'thumbnail' AS thumbnail, + js.value::jsonb ->> 'inputAccess' AS input_access, + js.value::jsonb ->> 'inputNodes' AS input_nodes, + js.value::jsonb ->> 'inputs' AS inputs, + js.value::jsonb ->> 'inputsRequired' AS inputs_required, + js.value::jsonb ->> 'inputsUnits' AS inputs_units, + js.value::jsonb ->> 'outputNodes' AS output_nodes, + js.value::jsonb ->> 'outputs' AS outputs, + js.value::jsonb ->> 'runHash' AS run_hash, + js.value::jsonb ->> 'state' AS state, + js.value::jsonb ->> 'parent' AS parent, + js.value::jsonb ->> 'bootOptions' AS boot_options + FROM projects, + json_each(projects.workbench) AS js +) AS subquery +WHERE projects_nodes.project_uuid = subquery.project_id +AND projects_nodes.node_id = subquery.node_id; +""" + ) + op.alter_column("projects_nodes", "key", nullable=False) + op.alter_column("projects_nodes", "version", nullable=False) + op.alter_column("projects_nodes", "label", nullable=False) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("projects_nodes", "boot_options") + op.drop_column("projects_nodes", "parent") + op.drop_column("projects_nodes", "state") + op.drop_column("projects_nodes", "run_hash") + op.drop_column("projects_nodes", "outputs") + op.drop_column("projects_nodes", "output_nodes") + op.drop_column("projects_nodes", "inputs_units") + op.drop_column("projects_nodes", "inputs_required") + op.drop_column("projects_nodes", "inputs") + op.drop_column("projects_nodes", "input_nodes") + op.drop_column("projects_nodes", "input_access") + op.drop_column("projects_nodes", "thumbnail") + op.drop_column("projects_nodes", "progress") + op.drop_column("projects_nodes", "label") + op.drop_column("projects_nodes", "version") + op.drop_column("projects_nodes", "key") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ef931143b7cd_refactoring_of_resource_tracker.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ef931143b7cd_refactoring_of_resource_tracker.py new file mode 100644 index 00000000000..1b7fcf001bf --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ef931143b7cd_refactoring_of_resource_tracker.py @@ -0,0 +1,96 @@ +"""refactoring of resource_tracker_container table + +Revision ID: ef931143b7cd +Revises: a8762d5d43ae +Create Date: 2023-07-11 14:37:57.455348+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "ef931143b7cd" +down_revision = "a8762d5d43ae" +branch_labels = None +depends_on = None + + +def upgrade(): + container_classification_enum = postgresql.ENUM( + "DYNAMIC_SIDECAR", "USER_SERVICE", name="containerclassification" + ) + container_classification_enum.create(op.get_bind()) + + op.execute("DELETE FROM resource_tracker_container;") + + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_container", + sa.Column("cpu_limit", sa.Numeric(precision=3, scale=2), nullable=False), + ) + op.add_column( + "resource_tracker_container", + sa.Column("memory_limit", sa.BigInteger(), nullable=False), + ) + op.add_column( + "resource_tracker_container", + sa.Column( + "classification", + sa.Enum("DYNAMIC_SIDECAR", "USER_SERVICE", name="containerclassification"), + nullable=True, + ), + ) + op.drop_column( + "resource_tracker_container", "service_settings_reservation_nano_cpus" + ) + op.drop_column("resource_tracker_container", "service_settings_limit_nano_cpus") + op.drop_column("resource_tracker_container", "service_settings_limit_memory_bytes") + op.drop_column( + "resource_tracker_container", "service_settings_reservation_memory_bytes" + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_container", + sa.Column( + "service_settings_reservation_memory_bytes", + sa.BIGINT(), + autoincrement=False, + nullable=True, + ), + ) + op.add_column( + "resource_tracker_container", + sa.Column( + "service_settings_limit_memory_bytes", + sa.BIGINT(), + autoincrement=False, + nullable=True, + ), + ) + op.add_column( + "resource_tracker_container", + sa.Column( + "service_settings_limit_nano_cpus", + sa.BIGINT(), + autoincrement=False, + nullable=True, + ), + ) + op.add_column( + "resource_tracker_container", + sa.Column( + "service_settings_reservation_nano_cpus", + sa.BIGINT(), + autoincrement=False, + nullable=True, + ), + ) + op.drop_column("resource_tracker_container", "classification") + op.drop_column("resource_tracker_container", "memory_limit") + op.drop_column("resource_tracker_container", "cpu_limit") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f19905923355_adds_trashed_by_column.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f19905923355_adds_trashed_by_column.py new file mode 100644 index 00000000000..0f7de458791 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f19905923355_adds_trashed_by_column.py @@ -0,0 +1,84 @@ +"""Adds trashed by column + +Revision ID: f19905923355 +Revises: 307017ee1a49 +Create Date: 2025-01-10 16:43:21.559138+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f19905923355" +down_revision = "307017ee1a49" +branch_labels = None +depends_on = None + + +def upgrade(): + + with op.batch_alter_table("folders_v2") as batch_op: + batch_op.alter_column( + "trashed_at", + new_column_name="trashed", + comment="The date and time when the folders was marked as trashed. Null if the folders has not been trashed [default].", + ) + batch_op.add_column( + sa.Column( + "trashed_by", + sa.BigInteger(), + nullable=True, + comment="User who trashed the folders, or null if not trashed or user is unknown.", + ) + ) + batch_op.create_foreign_key( + "fk_folders_trashed_by_user_id", + "users", + ["trashed_by"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + + with op.batch_alter_table("projects") as batch_op: + batch_op.alter_column( + "trashed_at", + new_column_name="trashed", + comment="The date and time when the projects was marked as trashed. Null if the projects has not been trashed [default].", + ) + batch_op.add_column( + sa.Column( + "trashed_by", + sa.BigInteger(), + nullable=True, + comment="User who trashed the projects, or null if not trashed or user is unknown.", + ) + ) + batch_op.create_foreign_key( + "fk_projects_trashed_by_user_id", + "users", + ["trashed_by"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + + +def downgrade(): + with op.batch_alter_table("projects") as batch_op: + batch_op.drop_constraint("fk_projects_trashed_by_user_id", type_="foreignkey") + batch_op.drop_column("trashed_by") + batch_op.alter_column( + "trashed", + new_column_name="trashed_at", + comment="The date and time when the project was marked as trashed. Null if the project has not been trashed [default].", + ) + + with op.batch_alter_table("folders_v2") as batch_op: + batch_op.drop_constraint("fk_folders_trashed_by_user_id", type_="foreignkey") + batch_op.drop_column("trashed_by") + batch_op.alter_column( + "trashed", + new_column_name="trashed_at", + comment="The date and time when the folder was marked as trashed. Null if the folder has not been trashed [default].", + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f20f4c9fca71_added_enable_telemetry_option_to_groups_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f20f4c9fca71_added_enable_telemetry_option_to_groups_.py new file mode 100644 index 00000000000..18d66b72a2e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f20f4c9fca71_added_enable_telemetry_option_to_groups_.py @@ -0,0 +1,35 @@ +"""added enable telemetry option to groups extra properties + +Revision ID: f20f4c9fca71 +Revises: f9f9a650bf4b +Create Date: 2024-01-19 14:11:16.354169+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f20f4c9fca71" +down_revision = "f9f9a650bf4b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "groups_extra_properties", + sa.Column( + "enable_telemetry", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("groups_extra_properties", "enable_telemetry") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3285aff5e84_new_projects_metadata_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3285aff5e84_new_projects_metadata_table.py new file mode 100644 index 00000000000..0e4dc98204e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3285aff5e84_new_projects_metadata_table.py @@ -0,0 +1,97 @@ +"""new projects_metadata table + +Revision ID: f3285aff5e84 +Revises: 58b24613c3f7 +Create Date: 2023-07-05 15:06:56.003418+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "f3285aff5e84" +down_revision = "58b24613c3f7" +branch_labels = None +depends_on = None + + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "projects_metadata" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "projects_metadata", + sa.Column("project_uuid", sa.String(), nullable=False), + sa.Column( + "custom", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=False, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["project_uuid"], + ["projects.uuid"], + name="fk_projects_metadata_project_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("project_uuid"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("projects_metadata") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3a5484fe05d_products_to_templates_map_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3a5484fe05d_products_to_templates_map_table.py new file mode 100644 index 00000000000..59a0543e740 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f3a5484fe05d_products_to_templates_map_table.py @@ -0,0 +1,93 @@ +"""products_to_templates map table + +Revision ID: f3a5484fe05d +Revises: f20f4c9fca71 +Create Date: 2024-02-21 19:33:48.169810+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f3a5484fe05d" +down_revision = "f20f4c9fca71" +branch_labels = None +depends_on = None + + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "products_to_templates" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database + +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "products_to_templates", + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("template_name", sa.String(), nullable=True), + sa.Column( + "created", sa.DateTime(), server_default=sa.text("now()"), nullable=False + ), + sa.Column( + "modified", sa.DateTime(), server_default=sa.text("now()"), nullable=False + ), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_products_to_templates_product_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["template_name"], + ["jinja2_templates.name"], + name="fk_products_to_templates_template_name", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("product_name", "template_name"), + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("products_to_templates") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f53806935760_added_user_prefereces_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f53806935760_added_user_prefereces_tables.py new file mode 100644 index 00000000000..ef99f22bf39 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f53806935760_added_user_prefereces_tables.py @@ -0,0 +1,81 @@ +"""added_user_prefereces_tables + +Revision ID: f53806935760 +Revises: c4245e9e0f72 +Create Date: 2023-09-08 13:41:07.591760+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f53806935760" +down_revision = "c4245e9e0f72" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "user_preferences_frontend", + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("preference_name", sa.String(), nullable=False), + sa.Column("payload", sa.JSON(), nullable=False), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_user_preferences_frontend_name_products", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + name="fk_user_preferences_frontend_id_users", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "user_id", + "product_name", + "preference_name", + name="user_preferences_frontend_pk", + ), + ) + op.create_table( + "user_preferences_user_service", + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("preference_name", sa.String(), nullable=False), + sa.Column("payload", sa.LargeBinary(), nullable=False), + sa.ForeignKeyConstraint( + ["product_name"], + ["products.name"], + name="fk_user_preferences_user_service_name_products", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + name="fk_user_preferences_user_service_id_users", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "user_id", + "product_name", + "preference_name", + name="user_preferences_user_service_pk", + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("user_preferences_user_service") + op.drop_table("user_preferences_frontend") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f613247f5bb1_refactor_pricing_units_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f613247f5bb1_refactor_pricing_units_table.py new file mode 100644 index 00000000000..418fcddd2fc --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f613247f5bb1_refactor_pricing_units_table.py @@ -0,0 +1,46 @@ +"""refactor pricing units table + +Revision ID: f613247f5bb1 +Revises: 57ab8c419ca6 +Create Date: 2023-10-07 15:13:38.557368+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "f613247f5bb1" +down_revision = "57ab8c419ca6" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_pricing_unit_costs", "specific_info") + op.add_column( + "resource_tracker_pricing_units", + sa.Column( + "unit_extra_info", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + nullable=False, + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("resource_tracker_pricing_units", "unit_extra_info") + op.add_column( + "resource_tracker_pricing_unit_costs", + sa.Column( + "specific_info", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + ) + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f65f7786cd4b_add_indexes_to_comp_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f65f7786cd4b_add_indexes_to_comp_tables.py new file mode 100644 index 00000000000..49c5aae9c92 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f65f7786cd4b_add_indexes_to_comp_tables.py @@ -0,0 +1,53 @@ +"""add indexes to comp tables + +Revision ID: f65f7786cd4b +Revises: cf8f743fd0b7 +Create Date: 2025-04-17 12:44:27.577984+00:00 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f65f7786cd4b" +down_revision = "cf8f743fd0b7" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index("ix_comp_runs_user_id", "comp_runs", ["user_id"], unique=False) + op.create_index( + "ix_comp_tasks_project_id", "comp_tasks", ["project_id"], unique=False + ) + op.drop_index("idx_projects_last_change_date_desc", table_name="projects") + op.create_index( + "idx_projects_last_change_date_desc", + "projects", + ["last_change_date"], + unique=False, + postgresql_using="btree", + postgresql_ops={"last_change_date": "DESC"}, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + "idx_projects_last_change_date_desc", + table_name="projects", + postgresql_using="btree", + postgresql_ops={"last_change_date": "DESC"}, + ) + op.create_index( + "idx_projects_last_change_date_desc", + "projects", + [sa.text("last_change_date DESC")], + unique=False, + ) + op.drop_index("ix_comp_tasks_project_id", table_name="comp_tasks") + op.drop_index("ix_comp_runs_user_id", table_name="comp_runs") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f7f3c835f38a_remove_clusters.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f7f3c835f38a_remove_clusters.py new file mode 100644 index 00000000000..875cdf21124 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f7f3c835f38a_remove_clusters.py @@ -0,0 +1,120 @@ +"""remove clusters + +Revision ID: f7f3c835f38a +Revises: 7994074c4d98 +Create Date: 2025-03-17 14:26:58.117504+00:00 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "f7f3c835f38a" +down_revision = "7994074c4d98" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_comp_runs_cluster_id_clusters", "comp_runs", type_="foreignkey" + ) + op.drop_column("comp_runs", "cluster_id") + op.drop_table("clusters") + op.execute("DROP TRIGGER IF EXISTS cluster_modification on clusters;") + op.execute("DROP FUNCTION set_cluster_to_owner_group() CASCADE") + op.execute("DROP TYPE IF EXISTS clustertype") + # ### end Alembic commands ### + + +new_cluster_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS cluster_modification on clusters; +CREATE TRIGGER cluster_modification +AFTER INSERT ON clusters + FOR EACH ROW + EXECUTE PROCEDURE set_cluster_to_owner_group(); +""" +) +assign_cluster_access_rights_to_owner_group_procedure_new = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_cluster_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO "cluster_to_groups" ("gid", "cluster_id", "read", "write", "delete") VALUES (NEW.owner, NEW.id, TRUE, TRUE, TRUE); + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.execute(sa.DDL("DROP TRIGGER IF EXISTS cluster_modification on clusters;")) + op.execute("DROP TYPE IF EXISTS clustertype") + op.create_table( + "clusters", + sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("description", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "type", + postgresql.ENUM("AWS", "ON_PREMISE", name="clustertype"), + autoincrement=False, + nullable=False, + ), + sa.Column("owner", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("thumbnail", sa.VARCHAR(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column("endpoint", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "authentication", + postgresql.JSONB(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["owner"], + ["groups.gid"], + name="fk_clusters_gid_groups", + onupdate="CASCADE", + ondelete="RESTRICT", + ), + sa.PrimaryKeyConstraint("id", name="clusters_pkey"), + ) + + op.add_column( + "comp_runs", + sa.Column("cluster_id", sa.BIGINT(), autoincrement=False, nullable=True), + ) + op.create_foreign_key( + "fk_comp_runs_cluster_id_clusters", + "comp_runs", + "clusters", + ["cluster_id"], + ["id"], + onupdate="CASCADE", + ondelete="SET NULL", + ) + # ### end Alembic commands ### + op.execute(assign_cluster_access_rights_to_owner_group_procedure_new) + op.execute(new_cluster_trigger) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/f9f9a650bf4b_new_user_name_cols.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f9f9a650bf4b_new_user_name_cols.py new file mode 100644 index 00000000000..ec1a1671124 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/f9f9a650bf4b_new_user_name_cols.py @@ -0,0 +1,77 @@ +"""new user name cols + +Revision ID: f9f9a650bf4b +Revises: 392a86f2e446 +Create Date: 2024-01-12 06:29:40.364669+00:00 + +""" +import re +import secrets +import string + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "f9f9a650bf4b" +down_revision = "392a86f2e446" +branch_labels = None +depends_on = None + +SEPARATOR = "." # Based on this info UserNameConverter: 'first_name.lastname' + + +def upgrade(): + # new columns + op.add_column("users", sa.Column("first_name", sa.String(), nullable=True)) + op.add_column("users", sa.Column("last_name", sa.String(), nullable=True)) + + # fill new and update existing + connection = op.get_bind() + result = connection.execute(sa.text("SELECT id, name FROM users")) + + used = set() + + for user_id, name in result: + # from name -> generate name + new_name = re.sub(r"[^a-zA-Z0-9]", "", name).lower() + while new_name in used: + new_name += f"{''.join(secrets.choice(string.digits) for _ in range(4))}" + + # from name -> create first_name, last_name + parts = name.split(SEPARATOR, 1) + first_name = parts[0].capitalize() + last_name = parts[1].capitalize() if len(parts) == 2 else None + + query = sa.text( + "UPDATE users SET first_name=:first, last_name=:last, name=:uname WHERE id=:id" + ) + values = { + "first": first_name, + "last": last_name, + "id": user_id, + "uname": new_name, + } + + connection.execute(query, values) + used.add(new_name) + + op.create_unique_constraint("user_name_ukey", "users", ["name"]) + + +def downgrade(): + connection = op.get_bind() + op.drop_constraint("user_name_ukey", "users", type_="unique") + + result = connection.execute(sa.text("SELECT id, first_name, last_name FROM users")) + + for user_id, first_name, last_name in result: + name = f"{first_name or ''}.{last_name or ''}".strip(".") + connection.execute( + sa.text("UPDATE users SET name=:name WHERE id=:id"), + {"name": name, "id": user_id}, + ) + + # delete + op.drop_column("users", "last_name") + op.drop_column("users", "first_name") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/fa26ab3555c8_adding_last_heartbeat_at_to_service_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fa26ab3555c8_adding_last_heartbeat_at_to_service_.py new file mode 100644 index 00000000000..59c844d2bb3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fa26ab3555c8_adding_last_heartbeat_at_to_service_.py @@ -0,0 +1,79 @@ +"""adding last_heartbeat_at to service_runs table + making nullable columns + +Revision ID: fa26ab3555c8 +Revises: e987caaec81b +Create Date: 2023-08-26 11:21:17.272272+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "fa26ab3555c8" +down_revision = "e987caaec81b" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "resource_tracker_service_runs", + sa.Column("last_heartbeat_at", sa.DateTime(timezone=True), nullable=False), + ) + op.alter_column( + "resource_tracker_service_runs", + "wallet_id", + existing_type=sa.BIGINT(), + nullable=True, + ) + op.alter_column( + "resource_tracker_service_runs", + "pricing_plan_id", + existing_type=sa.BIGINT(), + nullable=True, + ) + op.alter_column( + "resource_tracker_service_runs", + "pricing_detail_id", + existing_type=sa.BIGINT(), + nullable=True, + ) + op.alter_column( + "resource_tracker_service_runs", + "stopped_at", + existing_type=postgresql.TIMESTAMP(timezone=True), + nullable=True, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "resource_tracker_service_runs", + "stopped_at", + existing_type=postgresql.TIMESTAMP(timezone=True), + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "pricing_detail_id", + existing_type=sa.BIGINT(), + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "pricing_plan_id", + existing_type=sa.BIGINT(), + nullable=False, + ) + op.alter_column( + "resource_tracker_service_runs", + "wallet_id", + existing_type=sa.BIGINT(), + nullable=False, + ) + op.drop_column("resource_tracker_service_runs", "last_heartbeat_at") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc1701bb7e93_add_function_tables.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc1701bb7e93_add_function_tables.py new file mode 100644 index 00000000000..c0d29336e3e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc1701bb7e93_add_function_tables.py @@ -0,0 +1,197 @@ +"""Add function tables + +Revision ID: fc1701bb7e93 +Revises: 0d52976dc616 +Create Date: 2025-05-15 08:41:44.106941+00:00 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "fc1701bb7e93" +down_revision = "0d52976dc616" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "funcapi_function_job_collections", + sa.Column("uuid", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("title", sa.String(), nullable=True), + sa.Column("description", sa.String(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("uuid", name="funcapi_function_job_collections_pk"), + ) + op.create_index( + op.f("ix_funcapi_function_job_collections_uuid"), + "funcapi_function_job_collections", + ["uuid"], + unique=False, + ) + op.create_table( + "funcapi_functions", + sa.Column("uuid", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("title", sa.String(), nullable=True), + sa.Column("function_class", sa.String(), nullable=True), + sa.Column("description", sa.String(), nullable=True), + sa.Column( + "input_schema", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + sa.Column( + "output_schema", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + sa.Column( + "system_tags", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + sa.Column("user_tags", postgresql.JSONB(astext_type=sa.Text()), nullable=True), + sa.Column( + "class_specific_data", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + sa.Column( + "default_inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("uuid", name="funcapi_functions_pk"), + ) + op.create_index( + op.f("ix_funcapi_functions_uuid"), "funcapi_functions", ["uuid"], unique=False + ) + op.create_table( + "funcapi_function_jobs", + sa.Column("uuid", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("title", sa.String(), nullable=True), + sa.Column("description", sa.String(), nullable=True), + sa.Column("function_uuid", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column("function_class", sa.String(), nullable=True), + sa.Column("status", sa.String(), nullable=True), + sa.Column("inputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True), + sa.Column("outputs", postgresql.JSONB(astext_type=sa.Text()), nullable=True), + sa.Column( + "class_specific_data", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + ), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["function_uuid"], + ["funcapi_functions.uuid"], + name="fk_function_jobs_to_function_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint("uuid", name="funcapi_function_jobs_pk"), + ) + op.create_index( + op.f("ix_funcapi_function_jobs_function_uuid"), + "funcapi_function_jobs", + ["function_uuid"], + unique=False, + ) + op.create_index( + op.f("ix_funcapi_function_jobs_uuid"), + "funcapi_function_jobs", + ["uuid"], + unique=False, + ) + op.create_table( + "funcapi_function_job_collections_to_function_jobs", + sa.Column( + "function_job_collection_uuid", + postgresql.UUID(as_uuid=True), + nullable=False, + ), + sa.Column("function_job_uuid", postgresql.UUID(as_uuid=True), nullable=False), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.ForeignKeyConstraint( + ["function_job_collection_uuid"], + ["funcapi_function_job_collections.uuid"], + name="fk_func_job_coll_to_func_jobs_to_func_job_coll_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["function_job_uuid"], + ["funcapi_function_jobs.uuid"], + name="fk_func_job_coll_to_func_jobs_to_func_job_uuid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.PrimaryKeyConstraint( + "function_job_collection_uuid", + "function_job_uuid", + name="funcapi_function_job_collections_to_function_jobs_pk", + ), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("funcapi_function_job_collections_to_function_jobs") + op.drop_index( + op.f("ix_funcapi_function_jobs_uuid"), table_name="funcapi_function_jobs" + ) + op.drop_index( + op.f("ix_funcapi_function_jobs_function_uuid"), + table_name="funcapi_function_jobs", + ) + op.drop_table("funcapi_function_jobs") + op.drop_index(op.f("ix_funcapi_functions_uuid"), table_name="funcapi_functions") + op.drop_table("funcapi_functions") + op.drop_index( + op.f("ix_funcapi_function_job_collections_uuid"), + table_name="funcapi_function_job_collections", + ) + op.drop_table("funcapi_function_job_collections") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc6ea424f586_new_payments_transactions_table.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc6ea424f586_new_payments_transactions_table.py new file mode 100644 index 00000000000..dd3e852d472 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fc6ea424f586_new_payments_transactions_table.py @@ -0,0 +1,112 @@ +"""new payments_transactions table + +Revision ID: fc6ea424f586 +Revises: 763666c698fb +Create Date: 2023-09-04 14:13:28.201570+00:00 + +""" +from typing import Final + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "fc6ea424f586" +down_revision = "763666c698fb" +branch_labels = None +depends_on = None + +# auto-update modified +# TRIGGERS ------------------------ +_TABLE_NAME: Final[str] = "payments_transactions" +_TRIGGER_NAME: Final[str] = "trigger_auto_update" # NOTE: scoped on table +_PROCEDURE_NAME: Final[ + str +] = f"{_TABLE_NAME}_auto_update_modified()" # NOTE: scoped on database +modified_timestamp_trigger = sa.DDL( + f""" +DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME}; +CREATE TRIGGER {_TRIGGER_NAME} +BEFORE INSERT OR UPDATE ON {_TABLE_NAME} +FOR EACH ROW EXECUTE PROCEDURE {_PROCEDURE_NAME}; + """ +) + +# PROCEDURES ------------------------ +update_modified_timestamp_procedure = sa.DDL( + f""" +CREATE OR REPLACE FUNCTION {_PROCEDURE_NAME} +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "payments_transactions", + sa.Column("payment_id", sa.String(), nullable=False), + sa.Column("price_dollars", sa.Numeric(scale=2), nullable=False), + sa.Column("osparc_credits", sa.Numeric(scale=2), nullable=False), + sa.Column("product_name", sa.String(), nullable=False), + sa.Column("user_id", sa.BigInteger(), nullable=False), + sa.Column("user_email", sa.String(), nullable=False), + sa.Column("wallet_id", sa.BigInteger(), nullable=False), + sa.Column("comment", sa.Text(), nullable=True), + sa.Column("initiated_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("success", sa.Boolean(), nullable=True), + sa.Column("errors", sa.Text(), nullable=True), + sa.Column( + "created", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.Column( + "modified", + sa.DateTime(timezone=True), + server_default=sa.text("now()"), + nullable=False, + ), + sa.PrimaryKeyConstraint("payment_id"), + ) + op.create_index( + op.f("ix_payments_transactions_user_id"), + "payments_transactions", + ["user_id"], + unique=False, + ) + op.create_index( + op.f("ix_payments_transactions_wallet_id"), + "payments_transactions", + ["wallet_id"], + unique=False, + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) + + +def downgrade(): + + # custom + op.execute(f"DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {_TABLE_NAME};") + op.execute(f"DROP FUNCTION {_PROCEDURE_NAME};") + + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_payments_transactions_wallet_id"), table_name="payments_transactions" + ) + op.drop_index( + op.f("ix_payments_transactions_user_id"), table_name="payments_transactions" + ) + op.drop_table("payments_transactions") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/fce5d231e16d_new_projects_trashed_at.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fce5d231e16d_new_projects_trashed_at.py new file mode 100644 index 00000000000..200013e92ec --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/fce5d231e16d_new_projects_trashed_at.py @@ -0,0 +1,29 @@ +"""new projects trashed_at + +Revision ID: fce5d231e16d +Revises: ea3952fe5a0e +Create Date: 2024-10-23 14:32:32.350937+00:00 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "fce5d231e16d" +down_revision = "ea3952fe5a0e" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "projects", sa.Column("trashed_at", sa.DateTime(timezone=True), nullable=True) + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("projects", "trashed_at") + # ### end Alembic commands ### diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/feca36c8e18f_rename_tags_to_groups.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/feca36c8e18f_rename_tags_to_groups.py new file mode 100644 index 00000000000..a473492f632 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/feca36c8e18f_rename_tags_to_groups.py @@ -0,0 +1,23 @@ +"""rename tags_to_groups + +Revision ID: feca36c8e18f +Revises: e8057a4a7bb0 +Create Date: 2024-08-23 12:30:56.650085+00:00 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "feca36c8e18f" +down_revision = "e8057a4a7bb0" +branch_labels = None +depends_on = None + + +def upgrade(): + op.rename_table("tags_to_groups", "tags_access_rights") + + +def downgrade(): + # Reverse the table rename from projects_tags to study_tags + op.rename_table("tags_access_rights", "tags_to_groups") diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ff33dcf20b44_adding_column_product_name_to_groups_.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ff33dcf20b44_adding_column_product_name_to_groups_.py index 4e6d432799a..56c122941d1 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/ff33dcf20b44_adding_column_product_name_to_groups_.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/ff33dcf20b44_adding_column_product_name_to_groups_.py @@ -20,7 +20,7 @@ def upgrade(): conn = op.get_bind() default_product_name = conn.scalar( - "SELECT name from products ORDER BY priority LIMIT 1" + sa.DDL("SELECT name from products ORDER BY priority LIMIT 1") ) op.add_column( diff --git a/packages/postgres-database/src/simcore_postgres_database/models/_common.py b/packages/postgres-database/src/simcore_postgres_database/models/_common.py index 313812a0c01..47bfeb6ebf0 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/_common.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/_common.py @@ -1,22 +1,143 @@ +from typing import Final + import sqlalchemy as sa +from ..constants import DECIMAL_PLACES + + +class RefActions: + """Referential actions for `ON UPDATE`, `ON DELETE`""" + + # SEE https://docs.sqlalchemy.org/en/20/core/constraints.html#on-update-on-delete + CASCADE: Final[str] = "CASCADE" + SET_NULL: Final[str] = "SET NULL" + SET_DEFAULT: Final[str] = "SET DEFAULT" + RESTRICT: Final[str] = "RESTRICT" + NO_ACTION: Final[str] = "NO ACTION" + -def column_created_datetime() -> sa.Column: +def column_created_datetime(*, timezone: bool = True) -> sa.Column: return sa.Column( "created", - sa.DateTime(), + sa.DateTime(timezone=timezone), nullable=False, server_default=sa.sql.func.now(), doc="Timestamp auto-generated upon creation", ) -def column_modified_datetime() -> sa.Column: +def column_modified_datetime(*, timezone: bool = True) -> sa.Column: return sa.Column( "modified", - sa.DateTime(), + sa.DateTime(timezone=timezone), nullable=False, server_default=sa.sql.func.now(), onupdate=sa.sql.func.now(), doc="Timestamp with last row update", ) + + +def column_created_by_user( + *, users_table: sa.Table, required: bool = False +) -> sa.Column: + return sa.Column( + "created_by", + sa.Integer, + sa.ForeignKey( + users_table.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + ), + nullable=not required, + doc="Who created this row at `created`", + ) + + +def column_modified_by_user( + *, users_table: sa.Table, required: bool = False +) -> sa.Column: + return sa.Column( + "modified_by", + sa.Integer, + sa.ForeignKey( + users_table.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + ), + nullable=not required, + doc="Who modified this row at `modified`", + ) + + +def column_trashed_datetime(resource_name: str) -> sa.Column: + return sa.Column( + "trashed", + sa.DateTime(timezone=True), + nullable=True, + comment=f"The date and time when the {resource_name} was marked as trashed. " + f"Null if the {resource_name} has not been trashed [default].", + ) + + +def column_trashed_by_user(resource_name: str, users_table: sa.Table) -> sa.Column: + return sa.Column( + "trashed_by", + sa.BigInteger, + sa.ForeignKey( + users_table.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name=f"fk_{resource_name}_trashed_by_user_id", + ), + nullable=True, + comment=f"User who trashed the {resource_name}, or null if not trashed or user is unknown.", + ) + + +_TRIGGER_NAME: Final[str] = "auto_update_modified_timestamp" + + +def register_modified_datetime_auto_update_trigger(table: sa.Table) -> None: + """registers a trigger/procedure couple in order to ensure auto + update of the 'modified' timestamp column when a row is modified. + + NOTE: Add a *hard-coded* version in the alembic migration code!!! + SEE https://github.com/ITISFoundation/osparc-simcore/blob/78bc54e5815e8be5a8ed6a08a7bbe5591bbd2bd9/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py + + + Arguments: + table -- the table to add the auto-trigger to + """ + assert "modified" in table.columns # nosec + + # NOTE: scoped on database + procedure_name: Final[str] = f"{table.name}_auto_update_modified_timestamp()" + + # TRIGGER + modified_timestamp_trigger = sa.DDL( + f""" + DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {table.name}; + CREATE TRIGGER {_TRIGGER_NAME} + BEFORE INSERT OR UPDATE ON {table.name} + FOR EACH ROW EXECUTE PROCEDURE {procedure_name}; + """ + ) + # PROCEDURE + update_modified_timestamp_procedure = sa.DDL( + f""" + CREATE OR REPLACE FUNCTION {procedure_name} + RETURNS TRIGGER AS $$ + BEGIN + NEW.modified := current_timestamp; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + """ + ) + + # REGISTER THEM PROCEDURES/TRIGGERS + sa.event.listen(table, "after_create", update_modified_timestamp_procedure) + sa.event.listen(table, "after_create", modified_timestamp_trigger) + + +NUMERIC_KWARGS = {"scale": DECIMAL_PLACES} diff --git a/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py b/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py index 7f12e87eefe..2c3f12eca3a 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/api_keys.py @@ -1,4 +1,4 @@ -""" API keys to access public API +"""API keys to access public API These keys grant the client authorization to the API resources @@ -10,35 +10,66 @@ +--------+ +---------------+ """ + import sqlalchemy as sa from sqlalchemy.sql import func +from ._common import RefActions from .base import metadata from .users import users api_keys = sa.Table( "api_keys", metadata, - sa.Column("id", sa.BigInteger(), nullable=False, primary_key=True), - sa.Column("display_name", sa.String(), nullable=False), + sa.Column( + "id", + sa.BigInteger(), + nullable=False, + primary_key=True, + doc="Primary key identifier", + ), + sa.Column( + "display_name", + sa.String(), + nullable=False, + doc="Human readable name. Unique for each user. SEE unique constraint below", + ), sa.Column( "user_id", sa.BigInteger(), - sa.ForeignKey(users.c.id, ondelete="CASCADE"), + sa.ForeignKey(users.c.id, ondelete=RefActions.CASCADE), nullable=False, + doc="Identified user", + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_api_keys_product_name", + ), + nullable=False, + doc="Identified product", + ), + sa.Column("api_key", sa.String(), nullable=False, index=True), + sa.Column( + "api_secret", + sa.String(), + nullable=False, + doc="API key secret, hashed using blowfish algorithm", ), - sa.Column("api_key", sa.String(), nullable=False), - sa.Column("api_secret", sa.String(), nullable=False), sa.Column( "created", sa.DateTime(), - nullable=False, + nullable=False, # WARNING: still not updated to correct utc server_default=func.now(), doc="Timestamp auto-generated upon creation", ), sa.Column( "expires_at", - sa.DateTime(), + sa.DateTime(), # WARNING: still not updated to correct utc nullable=True, doc="Sets the expiration date for this api-key." "If set to NULL then the key does not expire.", diff --git a/packages/postgres-database/src/simcore_postgres_database/models/base.py b/packages/postgres-database/src/simcore_postgres_database/models/base.py index bf66f1731e1..cf4e28001bc 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/base.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/base.py @@ -3,10 +3,14 @@ - Collects all table's schemas - Metadata object needed to explicitly define table schemas """ + +from typing import cast + import sqlalchemy.orm +from sqlalchemy.ext.declarative import DeclarativeMeta # DO NOT inheriting from _base. Use instead explicit table definitions # See https://docs.sqlalchemy.org/en/latest/orm/mapping_styles.html#classical-mappings -Base = sqlalchemy.orm.declarative_base() +Base = cast(DeclarativeMeta, sqlalchemy.orm.declarative_base()) metadata = Base.metadata diff --git a/packages/postgres-database/src/simcore_postgres_database/models/classifiers.py b/packages/postgres-database/src/simcore_postgres_database/models/classifiers.py index 238c7b04f8c..7e4a2cf39df 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/classifiers.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/classifiers.py @@ -11,6 +11,7 @@ from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.sql import func +from ._common import RefActions from .base import metadata group_classifiers = sa.Table( @@ -32,8 +33,8 @@ sa.ForeignKey( "groups.gid", name="fk_group_classifiers_gid_to_groups_gid", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), unique=True, # Every Group can ONLY have one set of classifiers ), diff --git a/packages/postgres-database/src/simcore_postgres_database/models/cluster_to_groups.py b/packages/postgres-database/src/simcore_postgres_database/models/cluster_to_groups.py deleted file mode 100644 index dc56f7d7998..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/models/cluster_to_groups.py +++ /dev/null @@ -1,70 +0,0 @@ -import sqlalchemy as sa -from sqlalchemy.sql import expression, func - -from .base import metadata - -cluster_to_groups = sa.Table( - "cluster_to_groups", - metadata, - sa.Column( - "cluster_id", - sa.BigInteger, - sa.ForeignKey( - "clusters.id", - name="fk_cluster_to_groups_id_clusters", - onupdate="CASCADE", - ondelete="CASCADE", - ), - doc="Cluster unique ID", - ), - sa.Column( - "gid", - sa.BigInteger, - sa.ForeignKey( - "groups.gid", - name="fk_cluster_to_groups_gid_groups", - onupdate="CASCADE", - ondelete="CASCADE", - ), - doc="Group unique IDentifier", - ), - # Access Rights flags --- - sa.Column( - "read", - sa.Boolean, - nullable=False, - server_default=expression.false(), - doc="If true, group can use the cluster", - ), - sa.Column( - "write", - sa.Boolean, - nullable=False, - server_default=expression.false(), - doc="If true, group can modify the cluster", - ), - sa.Column( - "delete", - sa.Boolean, - nullable=False, - server_default=expression.false(), - doc="If true, group can delete the cluster", - ), - # ----- - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Timestamp auto-generated upon creation", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp with last row update", - ), - sa.UniqueConstraint("cluster_id", "gid"), -) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/clusters.py b/packages/postgres-database/src/simcore_postgres_database/models/clusters.py deleted file mode 100644 index bd0050311d0..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/models/clusters.py +++ /dev/null @@ -1,112 +0,0 @@ -from enum import Enum - -import sqlalchemy as sa -from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.sql import func - -from .base import metadata - -# FIXME: Needs some endpoint/credentials to access the cluster - - -class ClusterType(Enum): - AWS = "AWS" - ON_PREMISE = "ON_PREMISE" - - -clusters = sa.Table( - "clusters", - metadata, - sa.Column( - "id", - sa.BigInteger, - nullable=False, - primary_key=True, - doc="Clusters index", - ), - sa.Column("name", sa.String, nullable=False, doc="Display name"), - sa.Column("description", sa.String, nullable=True, doc="Short description"), - sa.Column( - "type", - sa.Enum(ClusterType), - nullable=False, - doc="Classification of the cluster", - ), - sa.Column( - "owner", - sa.BigInteger, - sa.ForeignKey( - "groups.gid", - name="fk_clusters_gid_groups", - onupdate="CASCADE", - ondelete="RESTRICT", - ), - nullable=False, - doc="Identifier of the group that owns this cluster", - ), - sa.Column( - "thumbnail", - sa.String, - nullable=True, - doc="Link to image as to cluster thumbnail", - ), - sa.Column("endpoint", sa.String, nullable=False, doc="URL to access the cluster"), - sa.Column( - "authentication", - JSONB, - nullable=False, - doc="Authentication options (can be any of simple password, kerberos or jupyterhub" - ", for details see https://gateway.dask.org/authentication.html#", - ), - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Timestamp auto-generated upon creation", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), # this will auto-update on modification - doc="Timestamp with last update", - ), -) - -# ------------------------ TRIGGERS -new_cluster_trigger = sa.DDL( - """ -DROP TRIGGER IF EXISTS cluster_modification on clusters; -CREATE TRIGGER cluster_modification -AFTER INSERT ON clusters - FOR EACH ROW - EXECUTE PROCEDURE set_cluster_to_owner_group(); -""" -) - - -# --------------------------- PROCEDURES -assign_cluster_access_rights_to_owner_group_procedure = sa.DDL( - """ -CREATE OR REPLACE FUNCTION set_cluster_to_owner_group() RETURNS TRIGGER AS $$ -DECLARE - group_id BIGINT; -BEGIN - IF TG_OP = 'INSERT' THEN - INSERT INTO "cluster_to_groups" ("gid", "cluster_id", "read", "write", "delete") VALUES (NEW.owner, NEW.id, TRUE, TRUE, TRUE); - END IF; - RETURN NULL; -END; $$ LANGUAGE 'plpgsql'; - """ -) - -sa.event.listen( - clusters, "after_create", assign_cluster_access_rights_to_owner_group_procedure -) -sa.event.listen( - clusters, - "after_create", - new_cluster_trigger, -) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py index 05f7607da93..a4e5645860c 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_pipeline.py @@ -22,6 +22,8 @@ class StateType(enum.Enum): SUCCESS = "SUCCESS" FAILED = "FAILED" ABORTED = "ABORTED" + WAITING_FOR_RESOURCES = "WAITING_FOR_RESOURCES" + WAITING_FOR_CLUSTER = "WAITING_FOR_CLUSTER" def _new_uuid(): diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py index ef523775c37..af14196d184 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_runs.py @@ -1,9 +1,9 @@ -""" Computational Runs Table +"""Computational Runs Table""" -""" import sqlalchemy as sa -from sqlalchemy.sql import func +from sqlalchemy.dialects.postgresql import JSONB +from ._common import RefActions, column_created_datetime, column_modified_datetime from .base import metadata from .comp_pipeline import StateType @@ -25,8 +25,8 @@ sa.ForeignKey( "projects.uuid", name="fk_comp_runs_project_uuid_projects", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), nullable=False, doc="The project uuid with which the run entry is associated", @@ -37,23 +37,12 @@ sa.ForeignKey( "users.id", name="fk_comp_runs_user_id_users", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), nullable=False, doc="The user id with which the run entry is associated", ), - sa.Column( - "cluster_id", - sa.BigInteger(), - sa.ForeignKey( - "clusters.id", - name="fk_comp_runs_cluster_id_clusters", - onupdate="CASCADE", - ), - nullable=True, - doc="The cluster id on which the run entry is associated, if NULL or 0 uses the default", - ), sa.Column( "iteration", sa.BigInteger, @@ -69,33 +58,46 @@ doc="The result of the run entry", ), # dag node id and class - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="When the run entry was created", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), # this will auto-update on modification - doc="When the run entry was last modified", - ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), # utc timestamps for submission/start/end sa.Column( "started", - sa.DateTime, + sa.DateTime(timezone=True), nullable=True, doc="When the run was started", ), sa.Column( "ended", - sa.DateTime, + sa.DateTime(timezone=True), nullable=True, doc="When the run was finished", ), + sa.Column( + "cancelled", + sa.DateTime(timezone=True), + nullable=True, + doc="If filled, when cancellation was requested", + ), + sa.Column( + "scheduled", + sa.DateTime(timezone=True), + nullable=True, + doc="last time the pipeline was scheduled to be processed", + ), + sa.Column( + "processed", + sa.DateTime(timezone=True), + nullable=True, + doc="last time the pipeline was actually processed", + ), + sa.Column("metadata", JSONB, nullable=True, doc="the run optional metadata"), + sa.Column( + "use_on_demand_clusters", + sa.Boolean(), + nullable=False, + doc="the run uses on demand clusters", + ), sa.UniqueConstraint("project_uuid", "user_id", "iteration"), + sa.Index("ix_comp_runs_user_id", "user_id"), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py b/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py index c1fd22b7893..3af09bfaa01 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/comp_tasks.py @@ -1,11 +1,15 @@ -""" Computational Tasks Table +"""Computational Tasks Table""" -""" import enum import sqlalchemy as sa from sqlalchemy.dialects import postgresql +from ._common import ( + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) from .base import metadata from .comp_pipeline import StateType @@ -56,7 +60,7 @@ class NodeClass(enum.Enum): sa.Enum(StateType), nullable=False, server_default=StateType.NOT_STARTED.value, - doc="Current state in the task lifecicle", + doc="Current state in the task lifecycle", ), sa.Column( "errors", @@ -65,13 +69,51 @@ class NodeClass(enum.Enum): doc="List[models_library.errors.ErrorDict] with error information" " for a failing state, otherwise set to None", ), + sa.Column( + "progress", + sa.Numeric(precision=3, scale=2), # numbers from 0.00 and 1.00 + nullable=True, + doc="current progress of the task if available", + ), + sa.Column( + "start", sa.DateTime(timezone=True), doc="UTC timestamp when task started" + ), + sa.Column( + "end", sa.DateTime(timezone=True), doc="UTC timestamp for task completion" + ), + sa.Column( + "last_heartbeat", + sa.DateTime(timezone=True), + doc="UTC timestamp for last task running check", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Column( + "pricing_info", + postgresql.JSONB, + nullable=True, + doc="Billing information of this task", + ), + sa.Column( + "hardware_info", + postgresql.JSONB, + nullable=True, + doc="Harware information of this task", + ), + # deprecated columns must be kept due to legacy services # utc timestamps for submission/start/end - sa.Column("submit", sa.DateTime, doc="UTC timestamp for task submission"), - sa.Column("start", sa.DateTime, doc="UTC timestamp when task started"), - sa.Column("end", sa.DateTime, doc="UTC timestamp for task completion"), + sa.Column( + "submit", + sa.DateTime(timezone=True), + server_default=sa.text("'1900-01-01T00:00:00Z'::timestamptz"), + doc="[DEPRECATED unused but kept for legacy services and must be filled with a default value of 1 January 1900]", + ), + # ------ sa.UniqueConstraint("project_id", "node_id", name="project_node_uniqueness"), + sa.Index("ix_comp_tasks_project_id", "project_id"), ) +register_modified_datetime_auto_update_trigger(comp_tasks) DB_PROCEDURE_NAME: str = "notify_comp_tasks_changed" DB_TRIGGER_NAME: str = f"{DB_PROCEDURE_NAME}_event" @@ -110,10 +152,14 @@ class NodeClass(enum.Enum): FROM jsonb_each(to_jsonb(OLD)) AS pre, jsonb_each(to_jsonb(NEW)) AS post WHERE pre.key = post.key AND pre.value IS DISTINCT FROM post.value; - payload = json_build_object('table', TG_TABLE_NAME, - 'changes', changes, - 'action', TG_OP, - 'data', row_to_json(record)); + payload = json_build_object( + 'table', TG_TABLE_NAME, + 'changes', changes, + 'action', TG_OP, + 'task_id', record.task_id, + 'project_id', record.project_id, + 'node_id', record.node_id + ); PERFORM pg_notify('{DB_CHANNEL_NAME}', payload::text); diff --git a/packages/postgres-database/src/simcore_postgres_database/models/confirmations.py b/packages/postgres-database/src/simcore_postgres_database/models/confirmations.py index caa781481f5..6fd56e8c8e0 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/confirmations.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/confirmations.py @@ -8,15 +8,15 @@ import enum import sqlalchemy as sa -from sqlalchemy.sql import func +from ._common import RefActions from .base import metadata from .users import users class ConfirmationAction(enum.Enum): REGISTRATION = "REGISTRATION" - RESET_PASSWORD = "RESET_PASSWORD" + RESET_PASSWORD = "RESET_PASSWORD" # noqa: S105 CHANGE_EMAIL = "CHANGE_EMAIL" INVITATION = "INVITATION" @@ -49,15 +49,18 @@ class ConfirmationAction(enum.Enum): ), sa.Column( "created_at", - sa.DateTime, + sa.DateTime(), nullable=False, - server_default=func.now(), + # NOTE: that here it would be convenient to have a server_default=now()! doc="Creation date of this code." "Can be used as reference to determine the expiration date. SEE ${ACTION}_CONFIRMATION_LIFETIME", ), # constraints ---------------- sa.PrimaryKeyConstraint("code", name="confirmation_code"), sa.ForeignKeyConstraint( - ["user_id"], [users.c.id], name="user_confirmation_fkey", ondelete="CASCADE" + ["user_id"], + [users.c.id], + name="user_confirmation_fkey", + ondelete=RefActions.CASCADE, ), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/conversation_messages.py b/packages/postgres-database/src/simcore_postgres_database/models/conversation_messages.py new file mode 100644 index 00000000000..c26805be087 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/conversation_messages.py @@ -0,0 +1,68 @@ +import enum + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .conversations import conversations +from .groups import groups + + +class ConversationMessageType(enum.Enum): + MESSAGE = "MESSAGE" + NOTIFICATION = "NOTIFICATION" # Special type of message used for storing notifications in the conversation + + +conversation_messages = sa.Table( + "conversation_messages", + metadata, + sa.Column( + "message_id", + UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "conversation_id", + UUID(as_uuid=True), + sa.ForeignKey( + conversations.c.conversation_id, + name="fk_conversation_messages_project_uuid", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + index=True, + nullable=False, + ), + # NOTE: if the user primary group ID gets deleted, it sets to null which should be interpreted as "unknown" user + sa.Column( + "user_group_id", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + name="fk_conversation_messages_user_primary_gid", + ondelete=RefActions.SET_NULL, + ), + doc="user primary group ID who created the message", + nullable=True, + ), + sa.Column( + "content", + sa.String, + nullable=False, + ), + sa.Column( + "type", + sa.Enum(ConversationMessageType), + doc="Classification of the node associated to this task", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + # indexes + sa.Index( + "idx_conversation_messages_created_desc", + sa.desc("created"), + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/conversations.py b/packages/postgres-database/src/simcore_postgres_database/models/conversations.py new file mode 100644 index 00000000000..3072d91dda5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/conversations.py @@ -0,0 +1,75 @@ +import enum + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .groups import groups +from .projects import projects + + +class ConversationType(enum.Enum): + PROJECT_STATIC = "PROJECT_STATIC" # Static conversation for the project + PROJECT_ANNOTATION = "PROJECT_ANNOTATION" # Something like sticky note, can be located anywhere in the pipeline UI + + +conversations = sa.Table( + "conversations", + metadata, + sa.Column( + "conversation_id", + UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "name", + sa.String, + nullable=False, + ), + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + name="fk_projects_conversations_project_uuid", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + index=True, + nullable=True, + ), + # NOTE: if the user primary group ID gets deleted, it sets to null which should be interpreted as "unknown" user + sa.Column( + "user_group_id", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + name="fk_conversations_user_primary_gid", + ondelete=RefActions.SET_NULL, + ), + doc="user primary group ID who created the message", + nullable=True, + ), + sa.Column( + "type", + sa.Enum(ConversationType), + doc="Classification of the node associated to this task", + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_conversations_product_name", + ), + nullable=False, + doc="Product name identifier. If None, then the item is not exposed", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/direct_acyclic_graphs.py b/packages/postgres-database/src/simcore_postgres_database/models/direct_acyclic_graphs.py deleted file mode 100644 index 16955ebf155..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/models/direct_acyclic_graphs.py +++ /dev/null @@ -1,24 +0,0 @@ -import sqlalchemy as sa -from sqlalchemy import Column, Integer, String - -from .base import Base - - -class DAG(Base): - """Table with Directed Acyclic Graphs - - Managed by the catalog's service - """ - - __tablename__ = "dags" - - id = Column(Integer, primary_key=True, index=True) - key = Column(String, index=True) - version = Column(String) - name = Column(String, nullable=False) - description = Column(String, nullable=True) - contact = Column(String, index=True) - workbench = Column(sa.JSON, nullable=False) - - -dags = DAG.__table__ diff --git a/packages/postgres-database/src/simcore_postgres_database/models/file_meta_data.py b/packages/postgres-database/src/simcore_postgres_database/models/file_meta_data.py index 0101bdcb51a..9ece039863f 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/file_meta_data.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/file_meta_data.py @@ -9,9 +9,9 @@ sa.Column("location", sa.String()), sa.Column("bucket_name", sa.String()), sa.Column("object_name", sa.String()), - sa.Column("project_id", sa.String()), + sa.Column("project_id", sa.String(), index=True), sa.Column("node_id", sa.String()), - sa.Column("user_id", sa.String()), + sa.Column("user_id", sa.String(), index=True), sa.Column("file_id", sa.String(), primary_key=True), sa.Column("created_at", sa.String()), sa.Column("last_modified", sa.String()), @@ -40,4 +40,20 @@ sa.Column( "upload_expires_at", sa.DateTime(), nullable=True, doc="Timestamp of expiration" ), + sa.Column( + "is_directory", + sa.Boolean(), + nullable=False, + server_default=sa.text("false"), + doc="Set True when file_id is a directory", + index=True, + ), + sa.Column( + "sha256_checksum", + sa.String(), + nullable=True, + server_default=sa.null(), + doc="SHA256 checksum of the file content", + index=True, + ), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/folders_v2.py b/packages/postgres-database/src/simcore_postgres_database/models/folders_v2.py new file mode 100644 index 00000000000..eebfd2079f8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/folders_v2.py @@ -0,0 +1,95 @@ +import sqlalchemy as sa +from sqlalchemy.sql import expression + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + column_trashed_by_user, + column_trashed_datetime, +) +from .base import metadata +from .users import users +from .workspaces import workspaces + +folders_v2 = sa.Table( + "folders_v2", + metadata, + sa.Column( + "folder_id", + sa.BigInteger, + nullable=False, + autoincrement=True, + primary_key=True, + ), + sa.Column( + "name", + sa.String, + nullable=False, + doc="name of the folder", + ), + sa.Column( + "parent_folder_id", + sa.BigInteger, + sa.ForeignKey( + "folders_v2.folder_id", + name="fk_new_folders_to_folders_id", + ), + nullable=True, + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_new_folders_to_products_name", + ), + nullable=False, + ), + sa.Column( + "user_id", + sa.BigInteger, + sa.ForeignKey( + "users.id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_folders_to_user_id", + ), + nullable=True, + ), + sa.Column( + "workspace_id", + sa.BigInteger, + sa.ForeignKey( + workspaces.c.workspace_id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_folders_to_workspace_id", + ), + nullable=True, + ), + sa.Column( + "created_by_gid", + sa.BigInteger, + sa.ForeignKey( + "groups.gid", + name="fk_new_folders_to_groups_gid", + ondelete=RefActions.SET_NULL, + ), + nullable=True, + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + column_trashed_datetime("folders"), + column_trashed_by_user("folders", users_table=users), + sa.Column( + "trashed_explicitly", + sa.Boolean, + nullable=False, + server_default=expression.false(), + comment="Indicates whether the folder was explicitly trashed by the user (true)" + " or inherited its trashed status from a parent (false) [default].", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_table.py new file mode 100644 index 00000000000..e2c199d84dd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_table.py @@ -0,0 +1,36 @@ +import uuid + +import sqlalchemy as sa +from simcore_postgres_database.models._common import ( + column_created_datetime, + column_modified_datetime, +) +from sqlalchemy.dialects.postgresql import UUID + +from .base import metadata + +function_job_collections_table = sa.Table( + "funcapi_function_job_collections", + metadata, + sa.Column( + "uuid", + UUID(as_uuid=True), + default=uuid.uuid4, + primary_key=True, + index=True, + doc="Unique id of the function job collection", + ), + sa.Column( + "title", + sa.String, + doc="Title of the function job collection", + ), + sa.Column( + "description", + sa.String, + doc="Description of the function job collection", + ), + column_created_datetime(), + column_modified_datetime(), + sa.PrimaryKeyConstraint("uuid", name="funcapi_function_job_collections_pk"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_to_function_jobs_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_to_function_jobs_table.py new file mode 100644 index 00000000000..ed8ec97249e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_job_collections_to_function_jobs_table.py @@ -0,0 +1,45 @@ +"""Functions table + +- List of functions served by the simcore platform +""" + +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .funcapi_function_job_collections_table import function_job_collections_table +from .funcapi_function_jobs_table import function_jobs_table + +function_job_collections_to_function_jobs_table = sa.Table( + "funcapi_function_job_collections_to_function_jobs", + metadata, + sa.Column( + "function_job_collection_uuid", + sa.ForeignKey( + function_job_collections_table.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_func_job_coll_to_func_jobs_to_func_job_coll_uuid", + ), + nullable=False, + doc="Unique identifier of the function job collection", + ), + sa.Column( + "function_job_uuid", + sa.ForeignKey( + function_jobs_table.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_func_job_coll_to_func_jobs_to_func_job_uuid", + ), + nullable=False, + doc="Unique identifier of the function job", + ), + column_created_datetime(), + column_modified_datetime(), + sa.PrimaryKeyConstraint( + "function_job_collection_uuid", + "function_job_uuid", + name="funcapi_function_job_collections_to_function_jobs_pk", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_jobs_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_jobs_table.py new file mode 100644 index 00000000000..a33b214fe00 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_function_jobs_table.py @@ -0,0 +1,77 @@ +"""Function jobs table + +- List of function jobs served by the simcore platform +""" + +import uuid + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .funcapi_functions_table import functions_table + +function_jobs_table = sa.Table( + "funcapi_function_jobs", + metadata, + sa.Column( + "uuid", + UUID(as_uuid=True), + primary_key=True, + index=True, + default=uuid.uuid4, + doc="Unique id of the function job", + ), + sa.Column( + "title", + sa.String, + doc="Name of the function job", + ), + sa.Column( + "description", + sa.String, + doc="Description of the function job", + ), + sa.Column( + "function_uuid", + sa.ForeignKey( + functions_table.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_function_jobs_to_function_uuid", + ), + nullable=False, + index=True, + doc="Unique identifier of the function", + ), + sa.Column( + "function_class", + sa.String, + doc="Class of the function", + ), + sa.Column( + "status", + sa.String, + doc="Status of the function job", + ), + sa.Column( + "inputs", + JSONB, + doc="Inputs of the function job", + ), + sa.Column( + "outputs", + JSONB, + doc="Outputs of the function job", + ), + sa.Column( + "class_specific_data", + JSONB, + nullable=True, + doc="Fields specific for a function class", + ), + column_created_datetime(), + column_modified_datetime(), + sa.PrimaryKeyConstraint("uuid", name="funcapi_function_jobs_pk"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_table.py b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_table.py new file mode 100644 index 00000000000..6d570463b9e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/funcapi_functions_table.py @@ -0,0 +1,80 @@ +"""Functions table + +- List of functions served by the simcore platform +""" + +import uuid + +import sqlalchemy as sa +from simcore_postgres_database.models._common import ( + column_created_datetime, + column_modified_datetime, +) +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from .base import metadata + +functions_table = sa.Table( + "funcapi_functions", + metadata, + sa.Column( + "uuid", + UUID(as_uuid=True), + primary_key=True, + index=True, + default=uuid.uuid4, + doc="Unique id of the function", + ), + sa.Column( + "title", + sa.String, + doc="Name of the function", + ), + sa.Column( + "function_class", + sa.String, + doc="Class of the function", + ), + sa.Column( + "description", + sa.String, + doc="Description of the function", + ), + sa.Column( + "input_schema", + JSONB, + doc="Input schema of the function", + ), + sa.Column( + "output_schema", + JSONB, + doc="Output schema of the function", + ), + sa.Column( + "system_tags", + JSONB, + nullable=True, + doc="System-level tags of the function", + ), + sa.Column( + "user_tags", + JSONB, + nullable=True, + doc="User-level tags of the function", + ), + sa.Column( + "class_specific_data", + JSONB, + nullable=True, + doc="Fields specific for a function class", + ), + sa.Column( + "default_inputs", + JSONB, + nullable=True, + doc="Default inputs of the function", + ), + column_created_datetime(), + column_modified_datetime(), + sa.PrimaryKeyConstraint("uuid", name="funcapi_functions_pk"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/groups.py b/packages/postgres-database/src/simcore_postgres_database/models/groups.py index 0aec758a6c6..940e1a78769 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/groups.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/groups.py @@ -4,26 +4,16 @@ - Groups have a ID, name and a list of users that belong to the group """ -import enum import sqlalchemy as sa +from common_library.groups_enums import GroupType from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.sql import func +from ._common import RefActions from .base import metadata - -class GroupType(enum.Enum): - """ - standard: standard group, e.g. any group that is not a primary group or special group such as the everyone group - primary: primary group, e.g. the primary group is the user own defined group that typically only contain the user (same as in linux) - everyone: the only group for all users - """ - - STANDARD = "standard" - PRIMARY = "primary" - EVERYONE = "everyone" - +__all__: tuple[str, ...] = ("GroupType",) groups = sa.Table( "groups", @@ -86,8 +76,8 @@ class GroupType(enum.Enum): sa.ForeignKey( "users.id", name="fk_user_to_groups_id_users", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), doc="User unique IDentifier", ), @@ -97,8 +87,8 @@ class GroupType(enum.Enum): sa.ForeignKey( "groups.gid", name="fk_user_to_groups_gid_groups", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), doc="Group unique IDentifier", ), diff --git a/packages/postgres-database/src/simcore_postgres_database/models/groups_extra_properties.py b/packages/postgres-database/src/simcore_postgres_database/models/groups_extra_properties.py index 384349eb238..e25a1bd3b2b 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/groups_extra_properties.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/groups_extra_properties.py @@ -1,21 +1,19 @@ import sqlalchemy as sa -from ._common import column_created_datetime, column_modified_datetime +from ._common import RefActions, column_created_datetime, column_modified_datetime from .base import metadata -# -# groups_extra_properties: Maps internet access permissions to groups -# groups_extra_properties = sa.Table( "groups_extra_properties", + # groups_extra_properties: Maps internet access permissions to groups metadata, sa.Column( "group_id", sa.BigInteger, sa.ForeignKey( "groups.gid", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, name="fk_groups_extra_properties_to_group_group_id", ), nullable=False, @@ -26,8 +24,8 @@ sa.VARCHAR, sa.ForeignKey( "products.name", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, name="fk_groups_extra_properties_to_products_name", ), nullable=False, @@ -42,7 +40,38 @@ "If a user is part of this group, it's " "service can access the internet.", ), + sa.Column( + "override_services_specifications", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="allows group to override default service specifications.", + ), + sa.Column( + "use_on_demand_clusters", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="If true, group will use on-demand clusters", + ), + sa.Column( + "enable_telemetry", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="If true, will send telemetry for new style dynamic services to frontend", + ), + sa.Column( + "enable_efs", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="If true, will mount efs distributed file system when dynamic services starts", + ), + sa.UniqueConstraint( + "group_id", "product_name", name="group_id_product_name_uniqueness" + ), # TIME STAMPS ---- - column_created_datetime(), - column_modified_datetime(), + column_created_datetime(timezone=False), + column_modified_datetime(timezone=False), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/licensed_item_to_resource.py b/packages/postgres-database/src/simcore_postgres_database/models/licensed_item_to_resource.py new file mode 100644 index 00000000000..81141bc132a --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/licensed_item_to_resource.py @@ -0,0 +1,60 @@ +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + +licensed_item_to_resource = sa.Table( + "licensed_item_to_resource", + metadata, + sa.Column( + "licensed_item_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey( + "licensed_items.licensed_item_id", + name="fk_licensed_item_to_resource_licensed_item_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + ), + sa.Column( + "licensed_resource_id", + postgresql.UUID(as_uuid=True), + sa.ForeignKey( + "licensed_resources.licensed_resource_id", + name="fk_licensed_item_to_resource_licensed_resource_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_licensed_item_to_resource_product_name", + ), + nullable=False, + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + ######### + sa.PrimaryKeyConstraint( + "licensed_item_id", + "licensed_resource_id", + name="pk_licensed_item_to_resource_item_and_resource_id", + ), + # NOTE: Currently, there is a constraint that a resource item ID cannot be in multiple licensed items. + # The reason is that the license key and license version coming from the internal license server are part of the licensed resource domain. + # Sim4Life performs a mapping on their side, where the license key and version are mapped to a licensed item. + # If this constraint is broken, the mapping logic in Sim4Life might break. + sa.UniqueConstraint( + "product_name", + "licensed_resource_id", + name="uq_licensed_item_to_resource_resource_id", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/licensed_items.py b/packages/postgres-database/src/simcore_postgres_database/models/licensed_items.py new file mode 100644 index 00000000000..34003d9b042 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/licensed_items.py @@ -0,0 +1,88 @@ +""" resource_tracker_service_runs table +""" + +import enum + +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + + +class LicensedResourceType(str, enum.Enum): + VIP_MODEL = "VIP_MODEL" + + +licensed_items = sa.Table( + "licensed_items", + metadata, + sa.Column( + "licensed_item_id", + postgresql.UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "key", + sa.String, + nullable=False, + ), + sa.Column( + "version", + sa.String, + nullable=False, + ), + sa.Column( + "display_name", + sa.String, + nullable=False, + doc="Display name for front-end", + ), + sa.Column( + "licensed_resource_type", + sa.Enum(LicensedResourceType), + nullable=False, + doc="Resource type, ex. VIP_MODEL", + ), + sa.Column( + "pricing_plan_id", + sa.BigInteger, + sa.ForeignKey( + "resource_tracker_pricing_plans.pricing_plan_id", + name="fk_resource_tracker_license_packages_pricing_plan_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + nullable=False, + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_resource_tracker_license_packages_product_name", + ), + nullable=False, + doc="Product name identifier. If None, then the item is not exposed", + ), + sa.Column( + "is_hidden_on_market", + sa.Boolean(), + nullable=False, + server_default=sa.text("false"), + doc="If true, the item is not listed on the market. (Public API might want to see all of them, even if they are not listed on the Market)", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Index( + "idx_licensed_items_key_version_product", + "key", + "version", + "product_name", + unique=True, + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/licensed_resources.py b/packages/postgres-database/src/simcore_postgres_database/models/licensed_resources.py new file mode 100644 index 00000000000..52747e9668e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/licensed_resources.py @@ -0,0 +1,65 @@ +""" resource_tracker_service_runs table +""" + + +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from ._common import ( + column_created_datetime, + column_modified_datetime, + column_trashed_datetime, +) +from .base import metadata +from .licensed_items import LicensedResourceType + +licensed_resources = sa.Table( + "licensed_resources", + metadata, + sa.Column( + "licensed_resource_id", + postgresql.UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "display_name", + sa.String, + nullable=False, + doc="Display name for front-end", + ), + sa.Column( + "licensed_resource_name", + sa.String, + nullable=False, + doc="Resource name identifier", + ), + sa.Column( + "licensed_resource_type", + sa.Enum(LicensedResourceType), + nullable=False, + doc="Resource type, ex. VIP_MODEL", + ), + sa.Column( + "licensed_resource_data", + postgresql.JSONB, + nullable=True, + doc="Resource metadata. Used for read-only purposes", + ), + sa.Column( + "priority", + sa.SmallInteger, + nullable=False, + server_default="0", + doc="Used for sorting 0 (first) > 1 (second) > 2 (third) (ex. if we want to manually adjust how it is presented in the Market)", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + column_trashed_datetime("licensed_resources"), + sa.UniqueConstraint( + "licensed_resource_name", + "licensed_resource_type", + name="uq_licensed_resource_name_type2", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/payments_autorecharge.py b/packages/postgres-database/src/simcore_postgres_database/models/payments_autorecharge.py new file mode 100644 index 00000000000..df30251c50c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/payments_autorecharge.py @@ -0,0 +1,84 @@ +import sqlalchemy as sa + +from ._common import ( + NUMERIC_KWARGS, + RefActions, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .payments_methods import payments_methods + +# +# NOTE: +# - This table was designed to work in an isolated database that contains only payments_* tables +# - Specifies autorecharge settings for each wallet, including a minimum balance and primary payment methods. + +payments_autorecharge = sa.Table( + "payments_autorecharge", + metadata, + sa.Column( + "id", + sa.BigInteger, + primary_key=True, + autoincrement=True, + nullable=False, + doc="Unique payment-automation identifier", + ), + sa.Column( + "wallet_id", + sa.BigInteger, + # NOTE: cannot use foreign-key because it would require a link to wallets table + nullable=False, + doc="Wallet associated to the auto-recharge", + unique=True, + ), + # + # Recharge Limits and Controls + # + sa.Column( + "enabled", + sa.Boolean, + nullable=False, + server_default=sa.false(), + doc="If true, the auto-recharge is enabled on this wallet", + ), + sa.Column( + "primary_payment_method_id", + sa.String, + sa.ForeignKey( + payments_methods.c.payment_method_id, + name="fk_payments_autorecharge_primary_payment_method_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + unique=True, + doc="Primary payment method selected for auto-recharge or None if unassigned", + # NOTE: Initially we thought 'ondelete=SET NULL' but it would require nullability and therefore dropping uniqueness + # Not to mention the state where 'enabled=True' and 'primary_payment_method_id=None'. Finally we decided to fully + # delete the line which will result in wallet default introduced by the api-layer. The only dissadvantage is that + # the user would loose his previous settings. + ), + sa.Column( + "top_up_amount_in_usd", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="[Required] Increase in USD when balance reaches minimum balance threshold", + ), + sa.Column( + "monthly_limit_in_usd", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=True, + server_default=None, + doc="[Optional] Maximum amount in USD charged within a natural month" + "If None, indicates no limit", + ), + # time-stamps + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), +) + + +register_modified_datetime_auto_update_trigger(payments_autorecharge) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/payments_methods.py b/packages/postgres-database/src/simcore_postgres_database/models/payments_methods.py new file mode 100644 index 00000000000..3aabc3f992c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/payments_methods.py @@ -0,0 +1,89 @@ +import enum + +import sqlalchemy as sa + +from ._common import ( + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata + + +@enum.unique +class InitPromptAckFlowState(str, enum.Enum): + PENDING = "PENDING" # initiated + SUCCESS = "SUCCESS" # completed (ack) with success + FAILED = "FAILED" # failed + CANCELED = "CANCELED" # explicitly aborted by user + + +# +# NOTE: +# - This table was designed to work in an isolated database. For that reason +# we do not use ForeignKeys to establish relations with other tables (e.g. user_id). +# - Payment methods are owned by a user and associated to a wallet. When the same CC is added +# in the framework by different users, the gateway will produce different payment_method_id for each +# of them (VERIFY assumption) +# - A payment method is unique, i.e. only one per wallet and user. For the moment, we intentially avoid the +# possibility of associating a payment method to more than one wallet to avoid complexity +# +payments_methods = sa.Table( + "payments_methods", + metadata, + sa.Column( + "payment_method_id", + sa.String, + nullable=False, + primary_key=True, + doc="Unique identifier of the payment method provided by payment gateway", + ), + sa.Column( + "user_id", + sa.BigInteger, + nullable=False, + doc="Unique identifier of the user", + index=True, + ), + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + doc="Unique identifier to the wallet owned by the user", + index=True, + ), + # + # States of Init-Prompt-Ack flow + # + sa.Column( + "initiated_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamps init step of the flow", + ), + sa.Column( + "completed_at", + sa.DateTime(timezone=True), + nullable=True, + doc="Timestamps ack step of the flow", + ), + sa.Column( + "state", + sa.Enum(InitPromptAckFlowState), + nullable=False, + default=InitPromptAckFlowState.PENDING, + doc="Current state of this row in the flow ", + ), + sa.Column( + "state_message", + sa.Text, + nullable=True, + doc="State message to with details on the state e.g. failure messages", + ), + # time-stamps + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), +) + + +register_modified_datetime_auto_update_trigger(payments_methods) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/payments_transactions.py b/packages/postgres-database/src/simcore_postgres_database/models/payments_transactions.py new file mode 100644 index 00000000000..21916b0615b --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/payments_transactions.py @@ -0,0 +1,145 @@ +from enum import StrEnum, unique + +import sqlalchemy as sa + +from ._common import ( + NUMERIC_KWARGS, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata + + +@unique +class PaymentTransactionState(StrEnum): + PENDING = "PENDING" # payment initiated + SUCCESS = "SUCCESS" # payment completed with success + FAILED = "FAILED" # payment failed + CANCELED = "CANCELED" # payment explicitly aborted by user + + def is_completed(self) -> bool: + return self != self.PENDING + + def is_acknowledged(self) -> bool: + return self in (self.SUCCESS, self.FAILED) + + +# +# NOTE: +# - This table was designed to work in an isolated database. For that reason +# we do not use ForeignKeys to establish relations with other tables (e.g. user_id, product_name, etc). +# +payments_transactions = sa.Table( + "payments_transactions", + metadata, + sa.Column( + "payment_id", + sa.String, + nullable=False, + primary_key=True, + doc="Identifer of the payment provided by payment gateway", + ), + sa.Column( + "price_dollars", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="Total amount of the transaction (in dollars). E.g. 1234.12 $", + ), + # + # Concept/Info + # + sa.Column( + "osparc_credits", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="Amount of credits that will be added to the wallet_id " + "once the transaction completes successfuly." + "E.g. 1234.12 credits", + ), + sa.Column( + "product_name", + sa.String, + nullable=False, + doc="Product name from which the transaction took place", + ), + sa.Column( + "user_id", + sa.BigInteger, + nullable=False, + doc="User unique identifier", + index=True, + ), + sa.Column( + "user_email", + sa.String, + nullable=False, + doc="User email at the time of the transaction", + ), + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + doc="Wallet identifier owned by the user", + index=True, + ), + sa.Column( + "comment", + sa.Text, + nullable=True, + doc="Extra comment on this payment (optional)", + ), + sa.Column( + "invoice_url", + sa.String, + nullable=True, + doc="Link to invoice of this transaction. Available when completed", + ), + sa.Column( + "stripe_invoice_id", + sa.String, + nullable=True, + doc="Invoice ID of invoice of this transaction. Available when completed", + ), + sa.Column( + "invoice_pdf_url", + sa.String, + nullable=True, + doc="Link to invoice PDF. Available when completed", + ), + # + # States + # + sa.Column( + "initiated_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamps when transaction initated (successful respose to /init)", + ), + sa.Column( + "completed_at", + sa.DateTime(timezone=True), + nullable=True, + doc="Timestamps when transaction completed (payment acked or cancelled)", + ), + sa.Column( + "state", + sa.Enum(PaymentTransactionState), + nullable=False, + default=PaymentTransactionState.PENDING, + doc="A transaction goes through through multiple states. " + "When initiated state=PENDING and is completed with SUCCESS/FAILURE/CANCELED", + ), + sa.Column( + "state_message", + sa.Text, + nullable=True, + doc="State message to with details on the state e.g. failure messages", + ), + # timestamps for this row + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), +) + + +register_modified_datetime_auto_update_trigger(payments_transactions) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/products.py b/packages/postgres-database/src/simcore_postgres_database/models/products.py index 2558956641f..bdb8e080d23 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/products.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/products.py @@ -1,17 +1,21 @@ -""" Products table +"""Products table - - List of products served by the simcore platform - - Products have a name and an associated host (defined by a regex) - - Every product has a front-end with exactly the same name +- List of products served by the simcore platform +- Products have a name and an associated host (defined by a regex) +- Every product has a front-end with exactly the same name """ -import json -from typing import Literal, TypedDict +from typing import Literal import sqlalchemy as sa +from common_library.json_serialization import json_dumps from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.sql import func +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) +from ._common import RefActions from .base import metadata from .groups import groups from .jinja2_templates import jinja2_templates @@ -25,17 +29,31 @@ # +class VendorUI(TypedDict, total=True): + logo_url: str # vendor logo url + strong_color: str # vendor main color + project_alias: str # project alias for the product (e.g. "project" or "study") + + class Vendor(TypedDict, total=False): """ Brand information about the vendor E.g. company name, address, copyright, etc. """ - name: str - copyright: str - url: str + name: str # e.g. IT'IS Foundation + address: str # e.g. Zeughausstrasse 43, 8004 Zurich, Switzerland + copyright: str # copyright message + + url: str # vendor website license_url: str # Which are the license terms? (if applies) + invitation_url: str # How to request a trial invitation? (if applies) + invitation_form: bool # If True, it takes precendence over invitation_url and asks the FE to show the form (if defined) + + release_notes_url_template: str # a template url where `{vtag}` will be replaced, eg: "https://example.com/{vtag}.md" + + ui: VendorUI class IssueTracker(TypedDict, total=True): @@ -96,7 +114,7 @@ class ProductLoginSettingsDict(TypedDict, total=False): # NOTE: defaults affects migration!! LOGIN_SETTINGS_DEFAULT = ProductLoginSettingsDict() # = {} -_LOGIN_SETTINGS_SERVER_DEFAULT = json.dumps(LOGIN_SETTINGS_DEFAULT) +_LOGIN_SETTINGS_SERVER_DEFAULT = json_dumps(LOGIN_SETTINGS_DEFAULT) # @@ -136,6 +154,7 @@ class ProductLoginSettingsDict(TypedDict, total=False): nullable=False, doc="Regular expression that matches product hostname from an url string", ), + # EMAILS -------------------- sa.Column( "support_email", sa.String, @@ -144,6 +163,12 @@ class ProductLoginSettingsDict(TypedDict, total=False): doc="Support email for this product" 'Therefore smtp_sender = f"{display_name} support <{support_email}>"', ), + sa.Column( + "product_owners_email", + sa.String, + nullable=True, + doc="Alternative support email directed to POs only (e.g. for account request, sales, etc)", + ), sa.Column( "twilio_messaging_sid", sa.String, @@ -184,18 +209,26 @@ class ProductLoginSettingsDict(TypedDict, total=False): doc="Overrides simcore_service_webserver.login.settings.LoginSettings." "SEE LoginSettingsForProduct", ), + sa.Column( + "ui", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Front-end owned UI configuration", + ), sa.Column( "registration_email_template", sa.String, sa.ForeignKey( jinja2_templates.c.name, name="fk_jinja2_templates_name", - ondelete="SET NULL", - onupdate="CASCADE", + ondelete=RefActions.SET_NULL, + onupdate=RefActions.CASCADE, ), nullable=True, doc="Custom jinja2 template for registration email", ), + # lifecycle sa.Column( "created", sa.DateTime(), @@ -229,8 +262,8 @@ class ProductLoginSettingsDict(TypedDict, total=False): sa.ForeignKey( groups.c.gid, name="fk_products_group_id", - ondelete="SET NULL", - onupdate="CASCADE", + ondelete=RefActions.SET_NULL, + onupdate=RefActions.CASCADE, ), unique=True, nullable=True, diff --git a/packages/postgres-database/src/simcore_postgres_database/models/products_prices.py b/packages/postgres-database/src/simcore_postgres_database/models/products_prices.py new file mode 100644 index 00000000000..b0b652cd310 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/products_prices.py @@ -0,0 +1,73 @@ +import sqlalchemy as sa + +from ._common import NUMERIC_KWARGS, RefActions +from .base import metadata +from .products import products + +# +# - Every product has an authorized price +# - The price is valid from the creation date until a new price is created +# - No rows are deleted! +# - If a product has no price, it is assumed zero +# + +products_prices = sa.Table( + "products_prices", + metadata, + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + products.c.name, + name="fk_products_prices_product_name", + ondelete=RefActions.RESTRICT, + onupdate=RefActions.CASCADE, + ), + nullable=False, + doc="Product name", + ), + sa.Column( + "usd_per_credit", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="Price in USD/credit >=0. Must be in sync with Stripe product price (stripe_price_id column in this table).", + ), + sa.Column( + "min_payment_amount_usd", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + server_default=sa.text("10.00"), + doc="Minimum amount in USD that can be paid for this product.", + ), + sa.Column( + "comment", + sa.String, + nullable=False, + doc="For the moment a comment on the product owner (PO) who authorized this price", + ), + sa.Column( + "valid_from", + sa.DateTime(timezone=True), + nullable=False, + server_default=sa.sql.func.now(), + doc="Timestamp auto-generated upon creation", + ), + sa.Column( + "stripe_price_id", + sa.String, + nullable=False, + doc="Stripe product price must be in sync with usd_per_credit rate field in this table. Currently created manually in Stripe", + ), + sa.Column( + "stripe_tax_rate_id", + sa.String, + nullable=False, + doc="Stripe tax rate ID associated to this product. Currently created manually in Stripe", + ), + sa.CheckConstraint( + "usd_per_credit >= 0", name="non_negative_usd_per_credit_constraint" + ), +) + + +__all__: tuple[str, ...] = ("NUMERIC_KWARGS",) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/products_to_templates.py b/packages/postgres-database/src/simcore_postgres_database/models/products_to_templates.py new file mode 100644 index 00000000000..44115660735 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/products_to_templates.py @@ -0,0 +1,44 @@ +import sqlalchemy as sa + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .jinja2_templates import jinja2_templates + +products_to_templates = sa.Table( + "products_to_templates", + metadata, + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_products_to_templates_product_name", + ), + nullable=False, + ), + sa.Column( + "template_name", + sa.String, + sa.ForeignKey( + jinja2_templates.c.name, + name="fk_products_to_templates_template_name", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + nullable=True, + doc="Custom jinja2 template", + ), + # TIME STAMPS ---- + column_created_datetime(timezone=False), + column_modified_datetime(timezone=False), + sa.UniqueConstraint("product_name", "template_name"), +) + +register_modified_datetime_auto_update_trigger(products_to_templates) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/project_to_groups.py b/packages/postgres-database/src/simcore_postgres_database/models/project_to_groups.py new file mode 100644 index 00000000000..4ae75fa4036 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/project_to_groups.py @@ -0,0 +1,64 @@ +import sqlalchemy as sa +from sqlalchemy.sql import expression + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .groups import groups +from .projects import projects + +project_to_groups = sa.Table( + "project_to_groups", + metadata, + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + name="fk_project_to_groups_project_uuid", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + index=True, + nullable=False, + doc="project reference for this table", + ), + sa.Column( + "gid", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + name="fk_project_to_groups_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + doc="Group unique IDentifier", + ), + # Access Rights flags --- + sa.Column( + "read", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can open the project", + ), + sa.Column( + "write", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can modify the project", + ), + sa.Column( + "delete", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can delete the project", + ), + # ----- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.UniqueConstraint("project_uuid", "gid"), + sa.Index("idx_project_to_groups_gid", "gid"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects.py b/packages/postgres-database/src/simcore_postgres_database/models/projects.py index 1d066ba575c..7af3c09fc65 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/projects.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects.py @@ -1,27 +1,27 @@ -""" Projects table +"""Projects table""" - - Every row fits a project document schemed as api/specs/webserver/v0/components/schemas/project-v0.0.1.json - -""" import enum import sqlalchemy as sa from sqlalchemy.dialects.postgresql import ARRAY, JSONB -from sqlalchemy.sql import func +from sqlalchemy.sql import expression, func +from ._common import RefActions, column_trashed_by_user, column_trashed_datetime from .base import metadata +from .users import users class ProjectType(enum.Enum): - """ - template: template project - standard: standard project - """ - TEMPLATE = "TEMPLATE" STANDARD = "STANDARD" +class ProjectTemplateType(str, enum.Enum): + TEMPLATE = "TEMPLATE" + TUTORIAL = "TUTORIAL" + HYPERTOOL = "HYPERTOOL" + + projects = sa.Table( "projects", metadata, @@ -35,6 +35,13 @@ class ProjectType(enum.Enum): default=ProjectType.STANDARD, doc="Either standard or template types", ), + sa.Column( + "template_type", + sa.Enum(ProjectTemplateType), + nullable=True, + default=None, + doc="None if type is STANDARD, otherwise it is one of the ProjectTemplateType", + ), sa.Column( "uuid", sa.String, @@ -42,6 +49,7 @@ class ProjectType(enum.Enum): unique=True, doc="Unique global identifier", ), + # DISPLAY ---------------------------- sa.Column( "name", sa.String, @@ -54,47 +62,47 @@ class ProjectType(enum.Enum): nullable=True, doc="Markdown-compatible display description", ), - sa.Column("thumbnail", sa.String, nullable=True, doc="Link to thumbnail image"), + sa.Column( + "thumbnail", + sa.String, + nullable=True, + doc="Link to thumbnail image", + ), + # OWNERSHIP ---------------------------- sa.Column( "prj_owner", sa.BigInteger, sa.ForeignKey( "users.id", name="fk_projects_prj_owner_users", - onupdate="CASCADE", - ondelete="RESTRICT", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, ), nullable=True, doc="Project's owner", + index=True, ), + # PARENTHOOD ---------------------------- sa.Column( - "creation_date", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Timestamp on creation", - ), - sa.Column( - "last_change_date", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp with last update", - ), - sa.Column( - "access_rights", - JSONB, - nullable=False, - server_default=sa.text("'{}'::jsonb"), - doc="Read/write/delete access rights of each group (gid) on this project", + "workspace_id", + sa.BigInteger, + sa.ForeignKey( + "workspaces.workspace_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_to_workspaces_id", + ), + nullable=True, + default=None, ), + # CHILDREN/CONTENT-------------------------- sa.Column( "workbench", sa.JSON, nullable=False, doc="Pipeline with the project's workflow. Schema in models_library.projects.Workbench", ), + # FRONT-END ---------------------------- sa.Column( "ui", JSONB, @@ -102,13 +110,6 @@ class ProjectType(enum.Enum): server_default=sa.text("'{}'::jsonb"), doc="UI components. Schema in models_library.projects_ui", ), - sa.Column( - "classifiers", - ARRAY(sa.String, dimensions=1), - nullable=False, - server_default="{}", - doc="A list of standard labels to classify this project", - ), sa.Column( "dev", JSONB, @@ -116,13 +117,7 @@ class ProjectType(enum.Enum): server_default=sa.text("'{}'::jsonb"), doc="Free JSON to use as sandbox. Development only", ), - sa.Column( - "quality", - JSONB, - nullable=False, - server_default=sa.text("'{}'::jsonb"), - doc="Free JSON with quality assesment based on TSR", - ), + # FLAGS ---------------------------- sa.Column( "published", sa.Boolean, @@ -137,4 +132,112 @@ class ProjectType(enum.Enum): default=False, doc="If true, the project is by default not listed in the API", ), + # LIFECYCLE ---------------------------- + sa.Column( + "creation_date", + sa.DateTime(), + nullable=False, + server_default=func.now(), + doc="Timestamp on creation", + ), + sa.Column( + "last_change_date", + sa.DateTime(), + nullable=False, + server_default=func.now(), + onupdate=func.now(), + doc="Timestamp with last update", + ), + column_trashed_datetime("projects"), + column_trashed_by_user("projects", users_table=users), + sa.Column( + "trashed_explicitly", + sa.Boolean, + nullable=False, + server_default=expression.false(), + comment="Indicates whether the project was explicitly trashed by the user (true)" + " or inherited its trashed status from a parent (false) [default].", + ), + # TAGGING ---------------------------- + sa.Column( + "classifiers", + ARRAY(sa.String, dimensions=1), + nullable=False, + server_default="{}", + # NOTE: I found this strange but + # https://stackoverflow.com/questions/30933266/empty-array-as-postgresql-array-column-default-value + doc="A list of standard labels to classify this project", + ), + sa.Column( + "quality", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Free JSON with quality assesment based on TSR", + ), + # DEPRECATED ---------------------------- + sa.Column( + "access_rights", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="DEPRECATED: Read/write/delete access rights of each group (gid) on this project", + ), + ### INDEXES ---------------------------- + sa.Index( + "idx_projects_last_change_date_desc", + sa.desc("last_change_date"), + ), +) + +# We define the partial index +sa.Index( + "ix_projects_partial_type", + projects.c.type, + postgresql_where=(projects.c.type == ProjectType.TEMPLATE), +) + + +# ------------------------ TRIGGERS +new_project_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS project_creation on projects; +CREATE TRIGGER project_creation +AFTER INSERT ON projects + FOR EACH ROW + EXECUTE PROCEDURE set_project_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_project_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_project_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + -- Fetch the group_id based on the owner from the other table + SELECT u.primary_gid INTO group_id + FROM users u + WHERE u.id = NEW.prj_owner + LIMIT 1; + + IF group_id IS NOT NULL THEN + IF TG_OP = 'INSERT' THEN + INSERT INTO "project_to_groups" ("gid", "project_uuid", "read", "write", "delete") VALUES (group_id, NEW.uuid, TRUE, TRUE, TRUE); + END IF; + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + +sa.event.listen( + projects, "after_create", assign_project_access_rights_to_owner_group_procedure +) +sa.event.listen( + projects, + "after_create", + new_project_trigger, ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_comments.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_comments.py new file mode 100644 index 00000000000..919b143bff3 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_comments.py @@ -0,0 +1,53 @@ +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .projects import projects +from .users import users + +projects_comments = sa.Table( + "projects_comments", + metadata, + sa.Column( + "comment_id", + sa.BigInteger, + nullable=False, + autoincrement=True, + primary_key=True, + doc="Primary key, identifies the comment", + ), + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + name="fk_projects_comments_project_uuid", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + index=True, + nullable=False, + doc="project reference for this table", + ), + # NOTE: if the user gets deleted, it sets to null which should be interpreted as "unknown" user + sa.Column( + "user_id", + sa.BigInteger, + sa.ForeignKey( + users.c.id, + name="fk_projects_comments_user_id", + ondelete=RefActions.SET_NULL, + ), + doc="user who created the comment", + nullable=True, + ), + sa.Column( + "contents", + sa.String, + nullable=False, + doc="Content of the comment", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.PrimaryKeyConstraint("comment_id", name="projects_comments_pkey"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_metadata.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_metadata.py new file mode 100644 index 00000000000..3e6ed034a96 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_metadata.py @@ -0,0 +1,110 @@ +""" +These tables were designed to be controled by projects-plugin in +the webserver's service +""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .projects import projects +from .projects_nodes import projects_nodes + +projects_metadata = sa.Table( + "projects_metadata", + # + # Keeps "third-party" metadata attached to a project + # + # CUSTOM metadata: + # These SHOULD NOT be actual properties of the project (e.g. uuid, name etc) + # but rather information attached by third-parties that "decorate" or qualify + # a project resource + # + # project genealogy: + # a project might be created via the public API, in which case it might be created + # 1. directly, as usual + # 2. via a parent project/node combination (think jupyter/sim4life job creating a bunch of jobs) + # 3. via a parent project/node that ran as a computation ("3rd generation" project, there is no limits to the number of generations) + # + # in cases 2., 3. the parent_project_uuid is the direct parent project, and parent_node_id is the direct node parent as + # a specific node is defined by a project AND a node (since node IDs are non unique) + # + # in cases 2., 3. the root_parent_project_uuid is the very first parent project, and root_parent_node_id is the very first parent node + # + metadata, + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_metadata_project_uuid", + ), + nullable=False, + primary_key=True, + doc="The project unique identifier", + ), + sa.Column( + "custom", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Reserved for the user to store custom metadata", + ), + sa.Column( + "parent_project_uuid", + sa.String, + nullable=True, + doc="If applicable the parent project UUID of this project (the node that ran the public API to start this project_uuid lives in a project with UUID parent_project_uuid)", + ), + sa.Column( + "parent_node_id", + sa.String, + nullable=True, + doc="If applicable the parent node UUID of this project (the node that ran the public API to start this project_uuid lives in a node with ID parent_node_id)", + ), + sa.Column( + "root_parent_project_uuid", + sa.String, + nullable=True, + doc="If applicable the root parent project UUID of this project (the root project UUID in which the root node created the very first child)", + ), + sa.Column( + "root_parent_node_id", + sa.String, + nullable=True, + doc="If applicable the root parent node UUID of this project (the root node ID of the node that created the very first child)", + ), + # TIME STAMPS ----ß + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.PrimaryKeyConstraint("project_uuid"), + sa.ForeignKeyConstraint( + ("parent_project_uuid", "parent_node_id"), + (projects_nodes.c.project_uuid, projects_nodes.c.node_id), + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name="fk_projects_metadata_parent_node_id", + ), + sa.ForeignKeyConstraint( + ("root_parent_project_uuid", "root_parent_node_id"), + (projects_nodes.c.project_uuid, projects_nodes.c.node_id), + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name="fk_projects_metadata_root_parent_node_id", + ), + ####### + sa.Index( + "idx_projects_metadata_root_parent_project_uuid", "root_parent_project_uuid" + ), +) + + +register_modified_datetime_auto_update_trigger(projects_metadata) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_networks.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_networks.py index efac321a539..905c1aa4cf0 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/projects_networks.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_networks.py @@ -1,6 +1,7 @@ import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB +from ._common import RefActions from .base import metadata from .projects import projects @@ -13,8 +14,8 @@ sa.ForeignKey( projects.c.uuid, name="fk_projects_networks_project_uuid_projects", - ondelete="CASCADE", - onupdate="CASCADE", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, ), primary_key=True, doc="project reference and primary key for this table", diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_node_to_pricing_unit.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_node_to_pricing_unit.py new file mode 100644 index 00000000000..903466c3f93 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_node_to_pricing_unit.py @@ -0,0 +1,52 @@ +""" Groups table + + - List of groups in the framework + - Groups have a ID, name and a list of users that belong to the group +""" + +import sqlalchemy as sa + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .projects_nodes import projects_nodes + +projects_node_to_pricing_unit = sa.Table( + "projects_node_to_pricing_unit", + metadata, + sa.Column( + "project_node_id", + sa.BIGINT, + sa.ForeignKey( + projects_nodes.c.project_node_id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_nodes__project_node_to_pricing_unit__uuid", + ), + nullable=False, + doc="The project node unique identifier", + ), + sa.Column( + "pricing_plan_id", + sa.BigInteger, + nullable=False, + doc="The pricing plan unique identifier", + ), + sa.Column( + "pricing_unit_id", + sa.BigInteger, + nullable=False, + doc="The pricing unit unique identifier", + ), + # TIME STAMPS ---- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.UniqueConstraint("project_node_id"), +) + + +register_modified_datetime_auto_update_trigger(projects_nodes) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_nodes.py new file mode 100644 index 00000000000..a5991e7f9db --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_nodes.py @@ -0,0 +1,160 @@ +""" Groups table + + - List of groups in the framework + - Groups have a ID, name and a list of users that belong to the group +""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .projects import projects + +projects_nodes = sa.Table( + "projects_nodes", + metadata, + sa.Column( + "project_node_id", + sa.Integer, + nullable=False, + autoincrement=True, + primary_key=True, + doc="Index of the project node", + ), + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_to_projects_nodes_to_projects_uuid", + ), + nullable=False, + index=True, + doc="Unique identifier of the project", + ), + sa.Column( + "node_id", + sa.String, + nullable=False, + index=True, + doc="Unique identifier of the node", + ), + sa.Column( + "required_resources", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Required resources", + ), + # TIME STAMPS ---- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Column( + "key", + sa.String, + nullable=False, + comment="Distinctive name (based on the Docker registry path)", + ), + sa.Column( + "version", + sa.String, + nullable=False, + comment="Semantic version number", + ), + sa.Column( + "label", + sa.String, + nullable=False, + comment="Short name used for display", + ), + sa.Column( + "progress", + sa.Numeric, + nullable=True, + comment="Progress value (0-100)", + ), + sa.Column( + "thumbnail", + sa.String, + nullable=True, + comment="Url of the latest screenshot", + ), + sa.Column( + "input_access", + JSONB, + nullable=True, + comment="Map with key - access level pairs", + ), + sa.Column( + "input_nodes", + JSONB, # Array + nullable=True, + comment="IDs of the nodes where is connected to", + ), + sa.Column( + "inputs", + JSONB, + nullable=True, + comment="Input properties values", + ), + sa.Column( + "inputs_required", + JSONB, # Array + nullable=True, + comment="Required input IDs", + ), + sa.Column( + "inputs_units", + JSONB, + nullable=True, + comment="Input units", + ), + sa.Column( + "output_nodes", + JSONB, # Array + nullable=True, + comment="Node IDs of those connected to the output", + ), + sa.Column( + "outputs", + JSONB, + nullable=True, + comment="Output properties values", + ), + sa.Column( + "run_hash", + sa.String, + nullable=True, + comment="HEX digest of the resolved inputs + outputs hash at the time when the last outputs were generated", + ), + sa.Column( + "state", + JSONB, + nullable=True, + comment="State", + ), + sa.Column( + "parent", + sa.String, + nullable=True, + comment="Parent's (group-nodes) node ID", + ), + sa.Column( + "boot_options", + JSONB, + nullable=True, + comment="Some services provide alternative parameters to be injected at boot time." + "The user selection should be stored here, and it will overwrite the services's defaults", + ), + sa.UniqueConstraint("project_uuid", "node_id"), +) + +register_modified_datetime_auto_update_trigger(projects_nodes) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_tags.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_tags.py new file mode 100644 index 00000000000..3507fc8d239 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_tags.py @@ -0,0 +1,42 @@ +import sqlalchemy as sa + +from ._common import RefActions +from .base import metadata +from .projects import projects +from .tags import tags + +projects_tags = sa.Table( + # + # Tags associated to a project (many-to-many relation) + # + "projects_tags", + metadata, + sa.Column( + "project_id", + sa.BigInteger, + sa.ForeignKey( + projects.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name="project_tags_project_id_fkey", + ), + nullable=True, # <-- NULL means that project was deleted + doc="NOTE that project.c.id != project.c.uuid. If project is deleted, we do not delete project in this table, we just set this column to NULL. Why? Because the `project_uuid_for_rut` is still used by resource usage tracker", + ), + sa.Column( + "tag_id", + sa.BigInteger, + sa.ForeignKey( + tags.c.id, onupdate=RefActions.CASCADE, ondelete=RefActions.CASCADE + ), + nullable=False, + ), + sa.Column( + "project_uuid_for_rut", + sa.String, + nullable=False, + ), + sa.UniqueConstraint( + "project_uuid_for_rut", "tag_id", name="project_tags_project_uuid_unique" + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_folders.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_folders.py new file mode 100644 index 00000000000..34c24221936 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_folders.py @@ -0,0 +1,47 @@ +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .folders_v2 import folders_v2 + +projects_to_folders = sa.Table( + "projects_to_folders", + metadata, + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + "projects.uuid", + name="fk_projects_to_folders_to_projects_uuid", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + ), + sa.Column( + "folder_id", + sa.BigInteger, + sa.ForeignKey( + folders_v2.c.folder_id, + name="fk_projects_to_folders_to_folders_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + ), + sa.Column( + "user_id", + sa.BigInteger, + sa.ForeignKey( + "users.id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_to_folders_to_user_id", + ), + nullable=True, + doc="If private workspace then user id is filled, otherwise its null", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.UniqueConstraint("project_uuid", "folder_id", "user_id"), + sa.Index("idx_project_to_folders_project_uuid", "project_uuid"), + sa.Index("idx_project_to_folders_user_id", "user_id"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py new file mode 100644 index 00000000000..4f3859fb36e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_jobs.py @@ -0,0 +1,37 @@ +import sqlalchemy as sa + +from ._common import RefActions +from .base import metadata +from .projects import projects + +projects_to_jobs = sa.Table( + # Maps projects used as jobs in the public-api + "projects_to_jobs", + metadata, + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_projects_to_jobs_project_uuid", + ), + nullable=False, + doc="Foreign key to projects.uuid", + ), + sa.Column( + "job_parent_resource_name", + sa.String, + nullable=False, + doc="Prefix for the job resource name use in the public-api. For example, if " + "the relative resource name is shelves/shelf1/jobs/job2, " + "the parent resource name is shelves/shelf1.", + ), + # Composite key (project_uuid, job_parent_resource_name) uniquely identifies very row + sa.UniqueConstraint( + "project_uuid", + "job_parent_resource_name", + name="uq_projects_to_jobs_project_uuid_job_parent_resource_name", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_products.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_products.py index 3faaa96d3ec..0d8429ab3ee 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_products.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_products.py @@ -1,6 +1,6 @@ import sqlalchemy as sa -from ._common import column_created_datetime, column_modified_datetime +from ._common import RefActions, column_created_datetime, column_modified_datetime from .base import metadata projects_to_products = sa.Table( @@ -11,8 +11,8 @@ sa.String, sa.ForeignKey( "projects.uuid", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, name="fk_projects_to_products_product_uuid", ), nullable=False, @@ -23,15 +23,16 @@ sa.String, sa.ForeignKey( "products.name", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, name="fk_projects_to_products_product_name", ), nullable=False, doc="Products unique name", ), # TIME STAMPS ---- - column_created_datetime(), - column_modified_datetime(), + column_created_datetime(timezone=False), + column_modified_datetime(timezone=False), sa.UniqueConstraint("project_uuid", "product_name"), + sa.Index("idx_projects_to_products_product_name", "product_name"), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_to_wallet.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_wallet.py new file mode 100644 index 00000000000..74e7a7ef635 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/projects_to_wallet.py @@ -0,0 +1,38 @@ +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .projects import projects +from .wallets import wallets + +projects_to_wallet = sa.Table( + "projects_to_wallet", + metadata, + sa.Column( + "project_uuid", + sa.String, + sa.ForeignKey( + projects.c.uuid, + name="fk_projects_comments_project_uuid", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + index=True, + primary_key=True, + nullable=False, + doc="project reference for this table", + ), + sa.Column( + "wallet_id", + sa.BigInteger, + sa.ForeignKey( + wallets.c.wallet_id, + name="fk_projects_wallet_wallets_id", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + nullable=False, + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/projects_version_control.py b/packages/postgres-database/src/simcore_postgres_database/models/projects_version_control.py deleted file mode 100644 index 8a22ab1f616..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/models/projects_version_control.py +++ /dev/null @@ -1,331 +0,0 @@ -# -# TODO: create template to produce these tables over another table other than project -# - -import sqlalchemy as sa -from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.sql import func - -from .base import metadata -from .projects import projects - -# REPOSITORES -# -# Projects under version-control are assigned a repository -# - keeps information of the current branch to recover HEAD ref -# - when repo is deleted, all project_vc_* get deleted -# - -projects_vc_repos = sa.Table( - "projects_vc_repos", - metadata, - sa.Column( - "id", - sa.BigInteger, - nullable=False, - primary_key=True, - doc="Global vc repo identifier index", - ), - sa.Column( - "project_uuid", - sa.String, - sa.ForeignKey( - projects.c.uuid, - name="fk_projects_vc_repos_project_uuid", - ondelete="CASCADE", # if project is deleted, all references in project_vc_* tables are deleted except for projects_vc_snapshots. - onupdate="CASCADE", - ), - nullable=False, - unique=True, - doc="Project under version control" - "Used as a working copy (WC) to produce/checkout snapshots.", - ), - sa.Column( - "project_checksum", - sa.String, - nullable=True, - doc="SHA-1 checksum of current working copy." - "Used as a cache mechanism stored at 'modified'" - "or to detect changes in state due to race conditions", - ), - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Creation timestamp for this row", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp for last changes", - ), -) - - -projects_vc_snapshots = sa.Table( - "projects_vc_snapshots", - metadata, - sa.Column( - "checksum", - sa.String, - primary_key=True, - nullable=False, - doc="SHA-1 checksum of snapshot." - "The columns projects_vc_repos.project_checksum and projects_vc_repos.snapshot_checksum " - "are both checksums of the same entity (i.e. a project) in two different states, " - "namely the project's WC and some snapshot respectively.", - ), - sa.Column( - "content", - JSONB, - nullable=False, - server_default=sa.text("'{}'::jsonb"), - doc="snapshot content", - ), -) - - -# -# COMMITS -# -# - should NEVER be modified explicitly after creation -# - commits are inter-related. WARNING with deletion -# -# SEE https://git-scm.com/book/en/v2/Git-Internals-Git-References - -projects_vc_commits = sa.Table( - "projects_vc_commits", - metadata, - sa.Column( - "id", - sa.BigInteger, - nullable=False, - primary_key=True, - doc="Global identifier throughout all repository's commits", - ), - sa.Column( - "repo_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_repos.c.id, - name="fk_projects_vc_commits_repo_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - nullable=False, - doc="Repository to which this commit belongs", - ), - sa.Column( - "parent_commit_id", - sa.BigInteger, - sa.ForeignKey( - "projects_vc_commits.id", - name="fk_projects_vc_commits_parent_commit_id", - onupdate="CASCADE", - ), - nullable=True, - doc="Preceding commit", - ), - sa.Column( - "snapshot_checksum", - sa.String, - sa.ForeignKey( - projects_vc_snapshots.c.checksum, - name="fk_projects_vc_commits_snapshot_checksum", - ondelete="RESTRICT", - onupdate="CASCADE", - ), - nullable=False, - doc="SHA-1 checksum of snapshot." - "Used as revision/commit identifier since it is unique per repo", - ), - sa.Column("message", sa.String, doc="Commit message"), - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Timestamp for this snapshot", - ), -) - - -# -# head/TAGS -# -# SEE https://git-scm.com/book/en/v2/Git-Internals-Git-References - -projects_vc_tags = sa.Table( - "projects_vc_tags", - metadata, - sa.Column( - "id", - sa.BigInteger, - nullable=False, - primary_key=True, - doc="Global identifier throughout all repositories tags", - ), - sa.Column( - "repo_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_repos.c.id, - name="fk_projects_vc_tags_repo_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - nullable=False, - doc="Repository to which this commit belongs", - ), - sa.Column( - "commit_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_commits.c.id, - name="fk_projects_vc_tags_commit_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - nullable=False, - doc="Points to the tagged commit", - ), - sa.Column("name", sa.String, doc="Tag display name"), - sa.Column("message", sa.String, doc="Tag annotation"), - sa.Column( - "hidden", - sa.Boolean, - default=False, - doc="Skipped by default from tag listings." - "Normally intended for internal use tags", - ), - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Creation timestamp", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp for last changes", - ), - # CONSTRAINTS -------------- - sa.UniqueConstraint("name", "repo_id", name="repo_tag_uniqueness"), -) - - -# -# head/BRANCHES -# -# SEE https://git-scm.com/book/en/v2/Git-Internals-Git-References - -projects_vc_branches = sa.Table( - "projects_vc_branches", - metadata, - sa.Column( - "id", - sa.BigInteger, - nullable=False, - primary_key=True, - doc="Global identifier throughout all repositories branches", - ), - sa.Column( - "repo_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_repos.c.id, - name="projects_vc_branches_repo_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - nullable=False, - doc="Repository to which this branch belongs", - ), - sa.Column( - "head_commit_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_commits.c.id, - name="fk_projects_vc_branches_head_commit_id", - ondelete="RESTRICT", - onupdate="CASCADE", - ), - nullable=True, - doc="Points to the head commit of this branch" "Null heads are detached", - ), - sa.Column("name", sa.String, default="main", doc="Branch display name"), - sa.Column( - "created", - sa.DateTime(), - nullable=False, - server_default=func.now(), - doc="Creation timestamp", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp for last changes", - ), - # CONSTRAINTS -------------- - sa.UniqueConstraint("name", "repo_id", name="repo_branch_uniqueness"), -) - - -# -# HEADS -# -# - the last commit in a given repo, also called the HEAD reference -# - added in an association table to avoid circular dependency between projects_vc_repos and projects_vc_branches -# -# SEE https://git-scm.com/book/en/v2/Git-Internals-Git-References - -projects_vc_heads = sa.Table( - "projects_vc_heads", - metadata, - sa.Column( - "repo_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_repos.c.id, - name="projects_vc_branches_repo_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - primary_key=True, - nullable=False, - doc="Repository to which this branch belongs", - ), - sa.Column( - "head_branch_id", - sa.BigInteger, - sa.ForeignKey( - projects_vc_branches.c.id, - name="fk_projects_vc_heads_head_branch_id", - ondelete="CASCADE", - onupdate="CASCADE", - ), - unique=True, - nullable=True, - doc="Points to the current branch that holds the HEAD" - "Null is used for detached head", - ), - sa.Column( - "modified", - sa.DateTime(), - nullable=False, - server_default=func.now(), - onupdate=func.now(), - doc="Timestamp for last changes on head branch", - ), -) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_credit_transactions.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_credit_transactions.py new file mode 100644 index 00000000000..70a3e1ed1ac --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_credit_transactions.py @@ -0,0 +1,152 @@ +""" Wallets credit transaction + - Basically this table is balance sheet of each wallet. +""" +import enum + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from ._common import ( + NUMERIC_KWARGS, + RefActions, + column_created_datetime, + column_modified_datetime, +) +from .base import metadata + + +class CreditTransactionStatus(str, enum.Enum): + PENDING = "PENDING" + BILLED = "BILLED" + IN_DEBT = "IN_DEBT" + NOT_BILLED = "NOT_BILLED" + REQUIRES_MANUAL_REVIEW = "REQUIRES_MANUAL_REVIEW" + + +class CreditTransactionClassification(str, enum.Enum): + ADD_WALLET_TOP_UP = "ADD_WALLET_TOP_UP" # user top up credits + DEDUCT_SERVICE_RUN = ( + "DEDUCT_SERVICE_RUN" # computational/dynamic service run costs) + ) + DEDUCT_LICENSE_PURCHASE = "DEDUCT_LICENSE_PURCHASE" + ADD_WALLET_EXCHANGE = "ADD_WALLET_EXCHANGE" + DEDUCT_WALLET_EXCHANGE = "DEDUCT_WALLET_EXCHANGE" + + +resource_tracker_credit_transactions = sa.Table( + "resource_tracker_credit_transactions", + metadata, + sa.Column( + "transaction_id", + sa.BigInteger, + nullable=False, + primary_key=True, + doc="Identifier index", + ), + sa.Column( + "product_name", + sa.String, + nullable=False, + doc="Product name", + ), + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + doc="Wallet id", + index=True, + ), + sa.Column( + "wallet_name", + sa.String, + nullable=False, + doc="Wallet name", + ), + sa.Column( + "pricing_plan_id", + sa.BigInteger, + nullable=True, + doc="Pricing plan", + ), + sa.Column( + "pricing_unit_id", + sa.BigInteger, + nullable=True, + doc="Pricing detail", + ), + sa.Column( + "pricing_unit_cost_id", + sa.BigInteger, + nullable=True, + doc="Pricing detail", + ), + sa.Column( + "user_id", + sa.BigInteger, + nullable=False, + doc="User id", + ), + sa.Column( + "user_email", + sa.String, + nullable=False, + doc="User email", + ), + sa.Column( + "osparc_credits", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="Credits", + ), + sa.Column( + "transaction_status", + sa.Enum(CreditTransactionStatus), + nullable=True, + doc="Transaction status, ex. PENDING, BILLED, NOT_BILLED, REQUIRES_MANUAL_REVIEW", + index=True, + ), + sa.Column( + "transaction_classification", + sa.Enum(CreditTransactionClassification), + nullable=True, + doc="Transaction classification, ex. ADD_WALLET_TOP_UP, DEDUCT_SERVICE_RUN", + ), + sa.Column( + "service_run_id", + sa.String, + nullable=True, + doc="Service run id connected with this transaction", + index=True, + ), + sa.Column( + "payment_transaction_id", + sa.String, + nullable=True, + doc="Payment transaction id connected with this transaction", + ), + sa.Column( + "licensed_item_purchase_id", + UUID(as_uuid=True), + nullable=True, + doc="Licensed item purchase id connected with this transaction", + ), + column_created_datetime(timezone=True), + sa.Column( + "last_heartbeat_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamp when was the last heartbeat", + ), + column_modified_datetime(timezone=True), + # --------------------------- + sa.ForeignKeyConstraint( + ["product_name", "service_run_id"], + [ + "resource_tracker_service_runs.product_name", + "resource_tracker_service_runs.service_run_id", + ], + name="resource_tracker_credit_trans_fkey", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_checkouts.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_checkouts.py new file mode 100644 index 00000000000..91da1539372 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_checkouts.py @@ -0,0 +1,87 @@ +""" resource_tracker_service_runs table +""" + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from ._common import RefActions, column_modified_datetime +from .base import metadata + +resource_tracker_licensed_items_checkouts = sa.Table( + "resource_tracker_licensed_items_checkouts", + metadata, + sa.Column( + "licensed_item_checkout_id", + UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "licensed_item_id", + UUID(as_uuid=True), + nullable=True, + ), + sa.Column( + "key", + sa.String, + nullable=False, + ), + sa.Column( + "version", + sa.String, + nullable=False, + ), + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + index=True, + ), + sa.Column( + "user_id", + sa.BigInteger, + nullable=False, + ), + sa.Column( + "user_email", + sa.String, + nullable=False, + ), + sa.Column("product_name", sa.String, nullable=False, doc="Product name"), + sa.Column( + "service_run_id", + sa.String, + nullable=True, + ), + sa.Column( + "started_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamp when the service was started", + ), + sa.Column( + "stopped_at", + sa.DateTime(timezone=True), + nullable=True, + doc="Timestamp when the service was stopped", + ), + sa.Column( + "num_of_seats", + sa.SmallInteger, + nullable=False, + ), + column_modified_datetime(timezone=True), + # --------------------------- + sa.ForeignKeyConstraint( + ["product_name", "service_run_id"], + [ + "resource_tracker_service_runs.product_name", + "resource_tracker_service_runs.service_run_id", + ], + name="resource_tracker_license_checkouts_service_run_id_fkey", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + sa.Index("idx_licensed_items_checkouts_key_version", "key", "version"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_purchases.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_purchases.py new file mode 100644 index 00000000000..8e09f322c73 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_licensed_items_purchases.py @@ -0,0 +1,98 @@ +""" resource_tracker_service_runs table +""" + + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import UUID + +from ._common import NUMERIC_KWARGS, column_modified_datetime +from .base import metadata + +resource_tracker_licensed_items_purchases = sa.Table( + "resource_tracker_licensed_items_purchases", + metadata, + sa.Column( + "licensed_item_purchase_id", + UUID(as_uuid=True), + nullable=False, + primary_key=True, + server_default=sa.text("gen_random_uuid()"), + ), + sa.Column( + "product_name", + sa.String, + nullable=False, + doc="Product name", + ), + sa.Column( + "licensed_item_id", + UUID(as_uuid=True), + nullable=False, + ), + sa.Column( + "key", + sa.String, + nullable=False, + ), + sa.Column( + "version", + sa.String, + nullable=False, + ), + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + ), + sa.Column( + "wallet_name", + sa.String, + nullable=False, + ), + sa.Column( + "pricing_unit_cost_id", + sa.BigInteger, + nullable=False, + ), + sa.Column( + "pricing_unit_cost", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=True, + doc="Pricing unit cost used for billing purposes", + ), + sa.Column( + "start_at", + sa.DateTime(timezone=True), + nullable=False, + server_default=sa.sql.func.now(), + ), + sa.Column( + "expire_at", + sa.DateTime(timezone=True), + nullable=False, + server_default=sa.sql.func.now(), + ), + sa.Column( + "num_of_seats", + sa.SmallInteger, + nullable=False, + ), + sa.Column( + "purchased_by_user", + sa.BigInteger, + nullable=False, + ), + sa.Column( + "user_email", + sa.String, + nullable=False, + ), + sa.Column( + "purchased_at", + sa.DateTime(timezone=True), + nullable=False, + server_default=sa.sql.func.now(), + ), + column_modified_datetime(timezone=True), + sa.Index("idx_licensed_items_purchases_key_version", "key", "version"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plan_to_service.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plan_to_service.py new file mode 100644 index 00000000000..b802b6724c4 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plan_to_service.py @@ -0,0 +1,55 @@ +""" Pricing plan to credits table + - Usecase: when client wants to ask for pricing plan for a concrete service +""" + +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + +resource_tracker_pricing_plan_to_service = sa.Table( + "resource_tracker_pricing_plan_to_service", + metadata, + sa.Column( + "pricing_plan_id", + sa.BigInteger, + sa.ForeignKey( + "resource_tracker_pricing_plans.pricing_plan_id", + name="fk_resource_tracker_pricing_details_pricing_plan_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + nullable=False, + doc="Identifier index", + index=True, + ), + sa.Column( + "service_key", + sa.String, + nullable=False, + doc="Hierarchical identifier of the service e.g. simcore/services/dynamic/my-super-service", + ), + sa.Column( + "service_version", + sa.String, + nullable=False, + doc="MAJOR.MINOR.PATCH semantic versioning (see https://semver.org)", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Column( + "service_default_plan", + sa.Boolean(), + nullable=False, + default=False, + doc="Option to mark default pricing plan for the service (ex. when there are more pricing plans for the same service)", + ), + # --------------------------- + sa.ForeignKeyConstraint( + ["service_key", "service_version"], + ["services_meta_data.key", "services_meta_data.version"], + name="fk_rut_pricing_plan_to_service_key_and_version", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plans.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plans.py new file mode 100644 index 00000000000..70c9ec53e03 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_plans.py @@ -0,0 +1,84 @@ +""" Pricing plan table +""" +import enum + +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + + +class PricingPlanClassification(str, enum.Enum): + """ + These are our custom pricing plan classifications, each of them can have different behaviour. + Potentional examples: + - TIER + - STORAGE + - CPU_HOUR + """ + + TIER = "TIER" + LICENSE = "LICENSE" + + +resource_tracker_pricing_plans = sa.Table( + "resource_tracker_pricing_plans", + metadata, + sa.Column( + "pricing_plan_id", + sa.BigInteger, + nullable=False, + primary_key=True, + doc="Identifier index", + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_rut_pricing_plans_product_name", + ), + nullable=False, + doc="Products unique name", + ), + sa.Column( + "display_name", + sa.String, + nullable=False, + doc="Name of the pricing plan, ex. DYNAMIC_SERVICES_TIERS, CPU_HOURS, STORAGE", + ), + sa.Column( + "description", + sa.String, + nullable=False, + server_default="", + doc="Description of the pricing plan", + ), + sa.Column( + "classification", + sa.Enum(PricingPlanClassification), + nullable=False, + doc="Pricing plan classification, ex. tier, storage, cpu_hour. Each classification can have different behaviour.", + ), + sa.Column( + "is_active", + sa.Boolean, + nullable=False, + doc="Is the pricing plan active", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Column( + "pricing_plan_key", + sa.String, + nullable=False, + default=False, + doc="Unique human readable pricing plan key that might be used for integration", + ), + # --------------------------- + sa.UniqueConstraint( + "product_name", "pricing_plan_key", name="pricing_plans_pricing_plan_key" + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_unit_costs.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_unit_costs.py new file mode 100644 index 00000000000..7a4c4e5f6a1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_unit_costs.py @@ -0,0 +1,92 @@ +""" Pricing details table + - each pricing plan table can have multiple units. These units are stored in the + pricing details table with their unit cost. Each unit cost (row in this table) has + id which uniquely defines the prices at this point of the time. We always store whole + history and do not update the rows of this table. +""" +import sqlalchemy as sa + +from ._common import ( + NUMERIC_KWARGS, + RefActions, + column_created_datetime, + column_modified_datetime, +) +from .base import metadata + +resource_tracker_pricing_unit_costs = sa.Table( + "resource_tracker_pricing_unit_costs", + metadata, + sa.Column( + "pricing_unit_cost_id", + sa.BigInteger, + nullable=False, + primary_key=True, + doc="Identifier index", + ), + sa.Column( + "pricing_plan_id", + sa.BigInteger, + sa.ForeignKey( + "resource_tracker_pricing_plans.pricing_plan_id", + name="fk_resource_tracker_pricing_units_costs_pricing_plan_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + doc="Foreign key to pricing plan", + index=True, + ), + sa.Column( + "pricing_plan_key", + sa.String, + nullable=False, + doc="Parent pricing key (storing for historical reasons)", + ), + sa.Column( + "pricing_unit_id", + sa.BigInteger, + sa.ForeignKey( + "resource_tracker_pricing_units.pricing_unit_id", + name="fk_resource_tracker_pricing_units_costs_pricing_unit_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + doc="Foreign key to pricing unit", + index=True, + ), + sa.Column( + "pricing_unit_name", + sa.String, + nullable=False, + doc="Parent pricing unit name (storing for historical reasons)", + ), + sa.Column( + "cost_per_unit", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=False, + doc="The cost per unit of the pricing plan in credits.", + ), + sa.Column( + "valid_from", + sa.DateTime(timezone=True), + nullable=False, + doc="From when the pricing unit is active", + ), + sa.Column( + "valid_to", + sa.DateTime(timezone=True), + nullable=True, + doc="To when the pricing unit was active, if null it is still active", + index=True, + ), + column_created_datetime(timezone=True), + sa.Column( + "comment", + sa.String, + nullable=True, + doc="Option to store comment", + ), + column_modified_datetime(timezone=True), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_units.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_units.py new file mode 100644 index 00000000000..aecbc1d07e1 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_pricing_units.py @@ -0,0 +1,71 @@ +""" Pricing details table + - each pricing plan table can have multiple units. These units are stored in the + pricing details table with their unit cost. Each unit cost (row in this table) has + id which uniquely defines the prices at this point of the time. We always store whole + history and do not update the rows of this table. +""" +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + +resource_tracker_pricing_units = sa.Table( + "resource_tracker_pricing_units", + metadata, + sa.Column( + "pricing_unit_id", + sa.BigInteger, + nullable=False, + primary_key=True, + doc="Identifier index", + ), + sa.Column( + "pricing_plan_id", + sa.BigInteger, + sa.ForeignKey( + "resource_tracker_pricing_plans.pricing_plan_id", + name="fk_resource_tracker_pricing_units_pricing_plan_id", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=False, + doc="Foreign key to pricing plan", + index=True, + ), + sa.Column( + "unit_name", + sa.String, + nullable=False, + doc="The custom name of the pricing plan, ex. SMALL, MEDIUM, LARGE", + ), + sa.Column( + "unit_extra_info", + JSONB, + nullable=False, + default="'{}'::jsonb", + doc="Additional public information about pricing unit, ex. more detail description or how many CPUs there are.", + ), + sa.Column( + "default", + sa.Boolean(), + nullable=False, + default=False, + doc="Option to mark default pricing plan by creator", + ), + sa.Column( + "specific_info", + JSONB, + nullable=False, + default="'{}'::jsonb", + doc="Specific internal info of the pricing unit, ex. for tiers we can store in which EC2 instance type we run the service.", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + # --------------------------- + sa.UniqueConstraint( + "pricing_plan_id", + "unit_name", + name="pricing_plan_and_unit_constrain_key", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_service_runs.py b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_service_runs.py new file mode 100644 index 00000000000..33eddcb9fc7 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/resource_tracker_service_runs.py @@ -0,0 +1,238 @@ +""" resource_tracker_service_runs table +""" +import enum + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB + +from ._common import NUMERIC_KWARGS, column_modified_datetime +from .base import metadata + + +class ResourceTrackerServiceType(str, enum.Enum): + COMPUTATIONAL_SERVICE = "COMPUTATIONAL_SERVICE" + DYNAMIC_SERVICE = "DYNAMIC_SERVICE" + + +class ResourceTrackerServiceRunStatus(str, enum.Enum): + RUNNING = "RUNNING" + SUCCESS = "SUCCESS" + ERROR = "ERROR" + + +resource_tracker_service_runs = sa.Table( + "resource_tracker_service_runs", + metadata, + # Primary keys + sa.Column( + "product_name", sa.String, nullable=False, doc="Product name", primary_key=True + ), + sa.Column( + "service_run_id", + sa.String, + nullable=False, + doc="Refers to the unique service_run_id provided by the director-v2/dynamic-sidecars.", + primary_key=True, + ), + # Wallet fields + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=True, + doc="We want to store the wallet id for tracking/billing purposes and be sure it stays there even when the wallet is deleted (that's also reason why we do not introduce foreign key)", + index=True, + ), + sa.Column( + "wallet_name", + sa.String, + nullable=True, + doc="We want to store the wallet name for tracking/billing purposes and be sure it stays there even when the wallet is deleted (that's also reason why we do not introduce foreign key)", + ), + # Pricing fields + sa.Column( + "pricing_plan_id", + sa.BigInteger, + nullable=True, + doc="Pricing plan id for billing purposes", + ), + sa.Column( + "pricing_unit_id", + sa.BigInteger, + nullable=True, + doc="Pricing unit id for billing purposes", + ), + sa.Column( + "pricing_unit_cost_id", + sa.BigInteger, + nullable=True, + doc="Pricing unit cost id for billing purposes", + ), + sa.Column( + "pricing_unit_cost", + sa.Numeric(**NUMERIC_KWARGS), # type: ignore + nullable=True, + doc="Pricing unit cost used for billing purposes", + ), + # User agent field + sa.Column( + "simcore_user_agent", + sa.String, + nullable=True, + doc="Information about whether it is Puppeteer or not", + ), + # User fields + sa.Column( + "user_id", + sa.BigInteger, + nullable=False, + doc="We want to store the user id for tracking/billing purposes and be sure it stays there even when the user is deleted (that's also reason why we do not introduce foreign key)", + index=True, + ), + sa.Column( + "user_email", + sa.String, + nullable=True, + doc="we want to store the email for tracking/billing purposes and be sure it stays there even when the user is deleted (that's also reason why we do not introduce foreign key)", + ), + # Project fields + sa.Column( + "project_id", # UUID + sa.String, + nullable=False, + doc="We want to store the project id for tracking/billing purposes and be sure it stays there even when the project is deleted (that's also reason why we do not introduce foreign key)", + ), + sa.Column( + "project_name", + sa.String, + nullable=False, + doc="we want to store the project name for tracking/billing purposes and be sure it stays there even when the project is deleted (that's also reason why we do not introduce foreign key)", + ), + # Node fields + sa.Column( + "node_id", # UUID + sa.String, + nullable=False, + doc="We want to store the node id for tracking/billing purposes and be sure it stays there even when the node is deleted (that's also reason why we do not introduce foreign key)", + ), + sa.Column( + "node_name", + sa.String, + nullable=False, + doc="we want to store the node/service name/label for tracking/billing purposes and be sure it stays there even when the node is deleted.", + ), + # Project/Node parent fields + sa.Column( + "parent_project_id", # UUID + sa.String, + nullable=False, + doc="If a user starts computational jobs via a dynamic service, a new project is created in the backend. This newly created project is considered a child project, and the project from which it was created is the parent project. We want to store the parent project ID for tracking and billing purposes, and ensure it remains even when the node is deleted. This is also the reason why we do not introduce a foreign key.", + ), + sa.Column( + "root_parent_project_id", # UUID + sa.String, + nullable=False, + doc="Similar to the parent project concept, we are flexible enough to allow multiple nested computational jobs, which create multiple nested projects. For this reason, we keep the parent project ID, so we know from which project the user started their computation.", + ), + sa.Column( + "root_parent_project_name", + sa.String, + nullable=False, + doc="We want to store the root parent project name for tracking/billing purposes.", + ), + sa.Column( + "parent_node_id", # UUID + sa.String, + nullable=False, + doc="Since each project can have multiple nodes, similar to the parent project concept, we also store the parent node..", + ), + sa.Column( + "root_parent_node_id", # UUID + sa.String, + nullable=False, + doc="Since each project can have multiple nodes, similar to the root parent project concept, we also store the root parent node.", + ), + # Service fields + sa.Column( + "service_key", + sa.String, + nullable=False, + doc="Service Key", + ), + sa.Column( + "service_version", + sa.String, + nullable=False, + doc="Service Version", + ), + sa.Column( + "service_type", + sa.Enum(ResourceTrackerServiceType), + nullable=False, + doc="Service type, ex. COMPUTATIONAL, DYNAMIC", + ), + sa.Column( + "service_resources", + JSONB, + nullable=False, + default="'{}'::jsonb", + doc="Service aresources, ex. cpu, gpu, memory, ...", + ), + sa.Column( + "service_additional_metadata", + JSONB, + nullable=False, + default="'{}'::jsonb", + doc="Service additional metadata.", + ), + # Run timestamps + sa.Column( + "started_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamp when the service was started", + index=True, + ), + sa.Column( + "stopped_at", + sa.DateTime(timezone=True), + nullable=True, + doc="Timestamp when the service was stopped", + ), + # Run status + sa.Column( + "service_run_status", # Partial index was defined bellow + sa.Enum(ResourceTrackerServiceRunStatus), + nullable=False, + ), + column_modified_datetime(timezone=True), + # Last Heartbeat + sa.Column( + "last_heartbeat_at", + sa.DateTime(timezone=True), + nullable=False, + doc="Timestamp when was the last heartbeat", + ), + sa.Column( + "service_run_status_msg", + sa.String, + nullable=True, + doc="Custom message/comment, for example to help understand root cause of the error during investigation", + ), + sa.Column( + "missed_heartbeat_counter", + sa.SmallInteger, + nullable=False, + default=0, + doc="How many heartbeat checks have been missed", + ), +) + +# We define the partial index +sa.Index( + "ix_resource_tracker_credit_transactions_status_running", + resource_tracker_service_runs.c.service_run_status, + postgresql_where=( + resource_tracker_service_runs.c.service_run_status + == ResourceTrackerServiceRunStatus.RUNNING + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services.py b/packages/postgres-database/src/simcore_postgres_database/models/services.py index 9e8b56bf5c2..ec12f0f3ca8 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/services.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/services.py @@ -1,25 +1,20 @@ -""" Services table - - - List of 3rd party services in the framework - - Services have a key, version, and access rights defined by group ids -""" - import sqlalchemy as sa -from sqlalchemy import null from sqlalchemy.dialects.postgresql import ARRAY, JSONB -from sqlalchemy.sql import expression, func +from sqlalchemy.sql import expression +from ._common import RefActions from .base import metadata -# -# Combines properties as -# - service identifier: key, version -# - overridable properties of the service metadata defined upon publication (injected in the image labels) -# - extra properties assigned during its lifetime (e.g. deprecated, quality, etc) - services_meta_data = sa.Table( + # + # Combines properties as + # - service identifier: key, version + # - overridable properties of the service metadata defined upon publication (injected in the image labels) + # - extra properties assigned during its lifetime (e.g. deprecated, quality, etc) + # "services_meta_data", metadata, + # PRIMARY KEY ---------------------------- sa.Column( "key", sa.String, @@ -30,85 +25,109 @@ "version", sa.String, nullable=False, - doc="MAJOR.MINOR.PATCH semantic versioning (see https://semver.org)", + doc="Service version. See format in ServiceVersion", ), + # OWNERSHIP ---------------------------- sa.Column( "owner", sa.BigInteger, sa.ForeignKey( "groups.gid", name="fk_services_meta_data_gid_groups", - onupdate="CASCADE", - ondelete="RESTRICT", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, ), nullable=True, - doc="Identifier of the group that owns this service", + doc="Identifier of the group that owns this service (editable)", ), + # DISPLAY ---------------------------- sa.Column( "name", sa.String, nullable=False, - doc="Display label", + doc="Display label (editable)", ), sa.Column( "description", sa.String, nullable=False, - doc="Markdown-compatible description", + doc="Markdown-compatible description (editable). SEE `description_ui`", + ), + sa.Column( + "description_ui", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="A flag that determines how the `description` column is rendered in the UI (editable)" + "Specifically, it indicates whether the `description` should be presented as a single web page (=true) or in another structured format (default=false)." + "This field is primarily used by the front-end of the application to decide on the presentation style of the service's metadata.", ), sa.Column( "thumbnail", sa.String, nullable=True, - doc="Link to image to us as service thumbnail", + doc="Link to image to us as service thumbnail (editable)", ), + sa.Column( + "icon", + sa.String, + nullable=True, + doc="Link to icon (editable)", + ), + sa.Column( + "version_display", + sa.String, + nullable=True, + doc="A user-friendly or version of the inner software e.g. Matterhorn 2.3 (editable)", + ), + # TAGGING ----------------------------- sa.Column( "classifiers", ARRAY(sa.String, dimensions=1), nullable=False, server_default="{}", - doc="List of standard labels that describe this service (see classifiers table)", + doc="List of standard labels that describe this service (see classifiers table) (editable) ", + ), + sa.Column( + "quality", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Free JSON with quality assesment based on TSR (editable)", ), + # LIFECYCLE ---------------------------- sa.Column( "created", sa.DateTime(), nullable=False, - server_default=func.now(), + server_default=sa.func.now(), doc="Timestamp on creation", ), sa.Column( "modified", sa.DateTime(), nullable=False, - server_default=func.now(), - onupdate=func.now(), + server_default=sa.func.now(), + onupdate=sa.func.now(), doc="Timestamp with last update", ), sa.Column( "deprecated", sa.DateTime(), nullable=True, - server_default=null(), - doc="Timestamp when the service is retired." - "A fixed time before this date, service is marked as deprecated", - ), - sa.Column( - "quality", - JSONB, - nullable=False, - server_default=sa.text("'{}'::jsonb"), - doc="Free JSON with quality assesment based on TSR", + server_default=sa.null(), + doc="Timestamp when the service is retired (editable)." + "A fixed time before this date, service is marked as deprecated.", ), sa.PrimaryKeyConstraint("key", "version", name="services_meta_data_pk"), ) -# -# Defines access rights (execute_access, write_access) on a service (key) -# for a given group (gid) on a product (project_name) -# - services_access_rights = sa.Table( + # + # Defines access rights (execute_access, write_access) on a service (key) + # for a given group (gid) on a product (project_name) + # "services_access_rights", metadata, sa.Column( @@ -129,24 +148,24 @@ sa.ForeignKey( "groups.gid", name="fk_services_gid_groups", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), - doc="Group Identifier", + doc="Group Identifier of user that get these access-rights", ), - # Access Rights flags --- + # ACCESS RIGHTS FLAGS --------------------------------------- sa.Column( "execute_access", sa.Boolean, nullable=False, - server_default=expression.false(), + server_default=sa.false(), doc="If true, group can execute the service", ), sa.Column( "write_access", sa.Boolean, nullable=False, - server_default=expression.false(), + server_default=sa.false(), doc="If true, group can modify the service", ), # ----- @@ -156,64 +175,34 @@ sa.ForeignKey( "products.name", name="fk_services_name_products", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), doc="Product Identifier", ), + # LIFECYCLE ---------------------------- sa.Column( "created", sa.DateTime(), nullable=False, - server_default=func.now(), + server_default=sa.func.now(), doc="Timestamp of creation", ), sa.Column( "modified", sa.DateTime(), nullable=False, - server_default=func.now(), - onupdate=func.now(), + server_default=sa.func.now(), + onupdate=sa.func.now(), doc="Timestamp on last update", ), sa.ForeignKeyConstraint( ["key", "version"], ["services_meta_data.key", "services_meta_data.version"], - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), sa.PrimaryKeyConstraint( "key", "version", "gid", "product_name", name="services_access_pk" ), ) - - -# NOTE: this table will be removed and probably converted into a view of agregated results on services -# SEE https://github.com/ITISFoundation/osparc-simcore/issues/4032 - -services_latest = sa.Table( - "services_latest", - metadata, - sa.Column( - "key", - sa.String, - nullable=False, - doc="Hierarchical identifier of the service e.g. simcore/services/dynamic/my-super-service", - ), - sa.Column( - "version", - sa.String, - nullable=False, - doc="latest MAJOR.MINOR.PATCH semantic version of the service (key)", - ), - # - # NOTE: might want to drop some of the columns in service_meta_data coming from image tags and keep a record only for the latest service - # - sa.ForeignKeyConstraint( - ["key", "version"], - ["services_meta_data.key", "services_meta_data.version"], - onupdate="CASCADE", - ondelete="CASCADE", - ), - sa.PrimaryKeyConstraint("key", name="services_latest_pk"), -) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_compatibility.py b/packages/postgres-database/src/simcore_postgres_database/models/services_compatibility.py new file mode 100644 index 00000000000..d151b665885 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/services_compatibility.py @@ -0,0 +1,69 @@ +""" Services table + + - List of 3rd party services in the framework + - Services have a key, version, and access rights defined by group ids +""" + + +import sqlalchemy as sa +import typing_extensions +from sqlalchemy.dialects.postgresql import JSONB +from typing_extensions import NotRequired, Required + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_by_user, + column_modified_datetime, +) +from .base import metadata +from .users import users + + +class CompatiblePolicyDict(typing_extensions.TypedDict, total=False): + # SpecifierSet e.g. ~=0.9 + # SEE https://packaging.python.org/en/latest/specifications/version-specifiers/#id5 + versions_specifier: Required[str] + # Only necessary if key!=PolicySpecifierDict.key + other_service_key: NotRequired[str | None] + + +services_compatibility = sa.Table( + # + # CUSTOM COMPATIBILITY POLICIES + # Otherwise default compatibility policy is employed. + # + "services_compatibility", + metadata, + sa.Column( + "key", + sa.String, + nullable=False, + doc="Service Key Identifier", + ), + sa.Column( + "version", + sa.String, + nullable=False, + doc="Service version", + ), + sa.Column( + "custom_policy", + JSONB, + nullable=False, + doc="PolicySpecifierDict with custom policy", + ), + # Traceability, i.e. when + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + # Traceability, i.e. who + column_modified_by_user(users_table=users, required=True), + # Constraints + sa.ForeignKeyConstraint( + ["key", "version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + sa.PrimaryKeyConstraint("key", "version", name="services_compatibility_pk"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_consume_filetypes.py b/packages/postgres-database/src/simcore_postgres_database/models/services_consume_filetypes.py index efbae9b9d32..65c6c8546b3 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/services_consume_filetypes.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/services_consume_filetypes.py @@ -7,6 +7,7 @@ """ import sqlalchemy as sa +from ._common import RefActions from .base import metadata # @@ -30,7 +31,7 @@ "service_version", sa.String, nullable=False, - doc="Version part of a $key:$version service resource name", + doc="Defines the minimum version (included) of this version from which this information applies", ), sa.Column( "service_display_name", @@ -75,8 +76,8 @@ sa.ForeignKeyConstraint( ["service_key", "service_version"], ["services_meta_data.key", "services_meta_data.version"], - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), # This table stores services (key:version) that consume filetype by AT LEAST one input_port # if more ports can consume, then it should only be added once in this table diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_environments.py b/packages/postgres-database/src/simcore_postgres_database/models/services_environments.py new file mode 100644 index 00000000000..498191b7267 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/services_environments.py @@ -0,0 +1,73 @@ +from typing import Final + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + +# Intentionally includes the term "SECRET" to avoid leaking this value on a public domain +VENDOR_SECRET_PREFIX: Final[str] = "OSPARC_VARIABLE_VENDOR_SECRET_" + + +services_vendor_secrets = sa.Table( + "services_vendor_secrets", + # + # - A secret is an environment value passed to the service at runtime + # - A vendor can associate secrets (e.g. a license code) to any of the services it owns + # - secrets_map + # - keys should be prefixed with OSPARC_VARIABLE_VENDOR_SECRET_ (can still normalize on read) + # - values might be encrypted + # + metadata, + sa.Column( + "service_key", + sa.String, + doc="A single environment is allowed per service", + ), + sa.Column( + "service_base_version", + sa.String, + doc="Defines the minimum version (included) from which these secrets apply", + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + name="fk_services_name_products", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + # NOTE: since this is part of the primary key this is required + # NOTE: an alternative would be to not use this as a primary key + server_default="osparc", + doc="Product Identifier", + ), + sa.Column( + "secrets_map", + JSONB, + nullable=False, + server_default=sa.text("'{}'::jsonb"), + doc="Maps OSPARC_VARIABLE_VENDOR_SECRET_* identifiers to a secret value (could be encrypted) " + "that can be replaced at runtime if found in the compose-specs", + ), + # TIME STAMPS ---- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + # CONSTRAINTS -- + sa.ForeignKeyConstraint( + ["service_key", "service_base_version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + # NOTE: this might be a problem: if a version in the metadata is deleted, + # all versions above will take the secret_map for the previous one. + ), + sa.PrimaryKeyConstraint( + "service_key", + "service_base_version", + "product_name", + name="services_vendor_secrets_pk", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_specifications.py b/packages/postgres-database/src/simcore_postgres_database/models/services_specifications.py index 40776119023..452be5e25f0 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/services_specifications.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/services_specifications.py @@ -8,6 +8,7 @@ import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB +from ._common import RefActions from .base import metadata services_specifications = sa.Table( @@ -19,15 +20,20 @@ nullable=False, doc="Service Key Identifier", ), - sa.Column("service_version", sa.String, nullable=False, doc="Service version"), + sa.Column( + "service_version", + sa.String, + nullable=False, + doc="Service version", + ), sa.Column( "gid", sa.BigInteger, sa.ForeignKey( "groups.gid", name="fk_services_specifications_gid_groups", - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), doc="Group Identifier", ), @@ -47,8 +53,8 @@ sa.ForeignKeyConstraint( ["service_key", "service_version"], ["services_meta_data.key", "services_meta_data.version"], - onupdate="CASCADE", - ondelete="CASCADE", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, ), # This table stores services (key:version) that consume filetype by AT LEAST one input_port # if more ports can consume, then it should only be added once in this table diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_tags.py b/packages/postgres-database/src/simcore_postgres_database/models/services_tags.py new file mode 100644 index 00000000000..083ea9f2807 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/services_tags.py @@ -0,0 +1,46 @@ +import sqlalchemy as sa + +from ._common import RefActions +from .base import metadata +from .tags import tags + +services_tags = sa.Table( + # + # Tags assigned to a service (many-to-many relation) + # + "services_tags", + metadata, + # Service + sa.Column( + "service_key", + sa.String, + nullable=False, + doc="Key name identifier for the service, without specifiying its versions", + ), + sa.Column( + "service_version", + sa.String, + nullable=False, + doc="Version of the service. Combined with 'service_key', it forms a unique identifier for this service.", + ), + # Tag + sa.Column( + "tag_id", + sa.BigInteger, + sa.ForeignKey( + tags.c.id, onupdate=RefActions.CASCADE, ondelete=RefActions.CASCADE + ), + nullable=False, + doc="Identifier of the tag assigned to this specific service (service_key, service_version).", + ), + # Constraints + sa.ForeignKeyConstraint( + ["service_key", "service_version"], + ["services_meta_data.key", "services_meta_data.version"], + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + sa.UniqueConstraint( + "service_key", "service_version", "tag_id", name="services_tags_uc" + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/tags.py b/packages/postgres-database/src/simcore_postgres_database/models/tags.py index 0806e268435..da7c788e02d 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/tags.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/tags.py @@ -1,13 +1,12 @@ import sqlalchemy as sa -from ._common import column_created_datetime, column_modified_datetime from .base import metadata -# -# tags: a way to mark any entity (e.g. a project, ...) -# this can be used to perform operations as filter, select, compare, etc -# tags = sa.Table( + # + # A way to mark any entity (e.g. a project, ...) + # this can be used to perform operations as filter, select, compare, etc + # "tags", metadata, sa.Column( @@ -15,106 +14,30 @@ sa.BigInteger(), nullable=False, primary_key=True, + doc="Unique identifier for each tag.", ), - sa.Column( - "name", - sa.String(), - nullable=False, - doc="display name", - ), + sa.Column("name", sa.String(), nullable=False, doc="The display name of the tag."), sa.Column( "description", sa.String(), nullable=True, - doc="description displayed", + doc="A brief description displayed for the tag.", ), sa.Column( "color", sa.String(), nullable=False, - doc="Hex color (see https://www.color-hex.com/)", + doc="Hexadecimal color code representing the tag (e.g., #FF5733).", ), -) - - -# -# tags_to_groups: Maps tags with groups to define the level of access -# of a group (group_id) for the corresponding tag (tag_id) -# -tags_to_groups = sa.Table( - "tags_to_groups", - metadata, sa.Column( - "tag_id", - sa.BigInteger(), - sa.ForeignKey( - tags.c.id, - onupdate="CASCADE", - ondelete="CASCADE", - name="fk_tag_to_group_tag_id", - ), - nullable=False, - doc="Tag unique ID", - ), - sa.Column( - "group_id", - sa.BigInteger, - sa.ForeignKey( - "groups.gid", - onupdate="CASCADE", - ondelete="CASCADE", - name="fk_tag_to_group_group_id", + "priority", + sa.Integer(), + nullable=True, + doc=( + "Explicit ordering priority when displaying tags. " + "Tags with a lower value are displayed first. " + "If NULL, tags are considered to have the lowest priority and " + "are displayed after non-NULL values, ordered by their ID (reflecting creation order)." ), - nullable=False, - doc="Group unique ID", - ), - # ACCESS RIGHTS --- - sa.Column( - "read", - sa.Boolean(), - nullable=False, - server_default=sa.sql.expression.true(), - doc="If true, group can *read* a tag." - "This column can be used to set the tag invisible", - ), - sa.Column( - "write", - sa.Boolean(), - nullable=False, - server_default=sa.sql.expression.false(), - doc="If true, group can *create* and *update* a tag", - ), - sa.Column( - "delete", - sa.Boolean(), - nullable=False, - server_default=sa.sql.expression.false(), - doc="If true, group can *delete* the tag", - ), - # TIME STAMPS ---- - column_created_datetime(), - column_modified_datetime(), - sa.UniqueConstraint("tag_id", "group_id"), -) - - -# -# study_tags: projects marked with tags -# -study_tags = sa.Table( - "study_tags", - metadata, - sa.Column( - "study_id", - sa.BigInteger, - sa.ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"), - nullable=False, - ), - sa.Column( - "tag_id", - sa.BigInteger, - sa.ForeignKey("tags.id", onupdate="CASCADE", ondelete="CASCADE"), - nullable=False, ), - sa.UniqueConstraint("study_id", "tag_id"), ) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/tags_access_rights.py b/packages/postgres-database/src/simcore_postgres_database/models/tags_access_rights.py new file mode 100644 index 00000000000..b818c975817 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/tags_access_rights.py @@ -0,0 +1,68 @@ +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .groups import groups +from .tags import tags + +tags_access_rights = sa.Table( + # + # Maps tags with groups to define the level of access rights + # of a group (group_id) for the corresponding tag (tag_id) + # + "tags_access_rights", + metadata, + sa.Column( + "tag_id", + sa.BigInteger(), + sa.ForeignKey( + tags.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_tag_to_group_tag_id", + ), + nullable=False, + doc="References the unique identifier of the tag that these access rights apply to.", + ), + sa.Column( + "group_id", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_tag_to_group_group_id", + ), + nullable=False, + doc="References the unique identifier of the group that has access rights to the tag.", + ), + # ACCESS RIGHTS --- + sa.Column( + "read", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.true(), + doc="Indicates whether the group has permission to view the tag. " + "A value of 'True' allows the group to access the tag's details.", + ), + sa.Column( + "write", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="Indicates whether the group has permission to modify the tag. " + "A value of 'True' grants write access to the group.", + ), + sa.Column( + "delete", + sa.Boolean(), + nullable=False, + server_default=sa.sql.expression.false(), + doc="Indicates whether the group has permission to delete the tag. " + "A value of 'True' allows the group to remove the tag.", + ), + # TIME STAMPS ---- + column_created_datetime(timezone=False), + column_modified_datetime(timezone=False), + sa.UniqueConstraint("tag_id", "group_id"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/user_preferences.py b/packages/postgres-database/src/simcore_postgres_database/models/user_preferences.py new file mode 100644 index 00000000000..e380cd23b94 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/user_preferences.py @@ -0,0 +1,83 @@ +import sqlalchemy as sa + +from ._common import RefActions +from .base import metadata +from .products import products +from .users import users + + +def _user_id_column(fk_name: str) -> sa.Column: + return sa.Column( + "user_id", + sa.BigInteger, + sa.ForeignKey( + users.c.id, + name=fk_name, + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + nullable=False, + ) + + +def _product_name_column(fk_name: str) -> sa.Column: + return sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + products.c.name, + name=fk_name, + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, + ), + nullable=False, + ) + + +def _preference_name_column() -> sa.Column: + return sa.Column( + "preference_name", + sa.String, + nullable=False, + ) + + +user_preferences_frontend = sa.Table( + "user_preferences_frontend", + metadata, + _user_id_column("fk_user_preferences_frontend_id_users"), + _product_name_column("fk_user_preferences_frontend_name_products"), + _preference_name_column(), + sa.Column( + "payload", + sa.JSON, + nullable=False, + doc="preference content encoded as json", + ), + sa.PrimaryKeyConstraint( + "user_id", + "product_name", + "preference_name", + name="user_preferences_frontend_pk", + ), +) + +user_preferences_user_service = sa.Table( + "user_preferences_user_service", + metadata, + _user_id_column("fk_user_preferences_user_service_id_users"), + _product_name_column("fk_user_preferences_user_service_name_products"), + _preference_name_column(), + sa.Column( + "payload", + sa.LargeBinary, + nullable=False, + doc="preference content encoded as bytes", + ), + sa.PrimaryKeyConstraint( + "user_id", + "product_name", + "preference_name", + name="user_preferences_user_service_pk", + ), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/user_to_projects.py b/packages/postgres-database/src/simcore_postgres_database/models/user_to_projects.py index 45147bef610..4a66e0be611 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/user_to_projects.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/user_to_projects.py @@ -1,5 +1,6 @@ import sqlalchemy as sa +from ._common import RefActions from .base import metadata from .projects import projects from .users import users @@ -15,8 +16,8 @@ sa.ForeignKey( users.c.id, name="fk_user_to_projects_id_users", - ondelete="CASCADE", - onupdate="CASCADE", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, ), nullable=False, ), @@ -26,8 +27,8 @@ sa.ForeignKey( projects.c.id, name="fk_user_to_projects_id_projects", - ondelete="CASCADE", - onupdate="CASCADE", + ondelete=RefActions.CASCADE, + onupdate=RefActions.CASCADE, ), nullable=False, ), diff --git a/packages/postgres-database/src/simcore_postgres_database/models/users.py b/packages/postgres-database/src/simcore_postgres_database/models/users.py index b2ef8831229..7be2161ff86 100644 --- a/packages/postgres-database/src/simcore_postgres_database/models/users.py +++ b/packages/postgres-database/src/simcore_postgres_database/models/users.py @@ -1,141 +1,137 @@ -""" Users table - - - List of users in the framework - - Users they have a role within the framework that provides - them different access levels to it -""" -from enum import Enum -from functools import total_ordering -from typing import Final, NamedTuple - import sqlalchemy as sa -from sqlalchemy.sql import func +from common_library.users_enums import UserRole, UserStatus +from sqlalchemy.sql import expression +from ._common import RefActions from .base import metadata -_USER_ROLE_TO_LEVEL = { - "ANONYMOUS": 0, - "GUEST": 10, - "USER": 20, - "TESTER": 30, - "ADMIN": 100, -} - - -@total_ordering -class UserRole(Enum): - """SORTED enumeration of user roles - - A role defines a set of privileges the user can perform - Roles are sorted from lower to highest privileges - USER is the role assigned by default A user with a higher/lower role is denoted super/infra user - - ANONYMOUS : The user is not logged in - GUEST : Temporary user with very limited access. Main used for demos and for a limited amount of time - USER : Registered user. Basic permissions to use the platform [default] - TESTER : Upgraded user. First level of super-user with privileges to test the framework. - Can use everything but does not have an effect in other users or actual data - ADMIN : Framework admin. - - See security_access.py - """ - - ANONYMOUS = "ANONYMOUS" - GUEST = "GUEST" - USER = "USER" - TESTER = "TESTER" - ADMIN = "ADMIN" - - @property - def privilege_level(self) -> int: - return _USER_ROLE_TO_LEVEL[self.name] - - def __lt__(self, other: "UserRole") -> bool: - if self.__class__ is other.__class__: - return self.privilege_level < other.privilege_level - return NotImplemented - - -class UserStatus(Enum): - """ - pending: user registered but not confirmed - active: user is confirmed and can use the platform - expired: user is not authorized because it expired after a trial period - banned: user is not authorized - """ - - CONFIRMATION_PENDING = "PENDING" - ACTIVE = "ACTIVE" - EXPIRED = "EXPIRED" - BANNED = "BANNED" - +__all__: tuple[str, ...] = ( + "UserRole", + "UserStatus", +) users = sa.Table( "users", metadata, + # + # User Identifiers ------------------ + # sa.Column( "id", - sa.BigInteger, + sa.BigInteger(), nullable=False, - doc="Primary key for user identifier", + doc="Primary key index for user identifier", ), sa.Column( "name", - sa.String, + sa.String(), nullable=False, - doc="Display name. NOTE: this is NOT a user name since uniqueness is NOT guaranteed", + doc="username is a unique short user friendly identifier e.g. pcrespov, sanderegg, GitHK, ..." + "This identifier **is public**.", + ), + sa.Column( + "primary_gid", + sa.BigInteger(), + sa.ForeignKey( + "groups.gid", + name="fk_users_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + doc="User's group ID", + ), + # + # User Information ------------------ + # + sa.Column( + "first_name", + sa.String(), + doc="User's first name", + ), + sa.Column( + "last_name", + sa.String(), + doc="User's last/family name", ), sa.Column( "email", - sa.String, + sa.String(), nullable=False, - doc="User email is used as username since it is a unique human-readable identifier", + doc="Validated email", ), sa.Column( "phone", - sa.String, + sa.String(), nullable=True, # since 2FA can be configured optional - doc="Confirmed user phone used e.g. to send a code for a two-factor-authentication", + doc="Confirmed user phone used e.g. to send a code for a two-factor-authentication." + "NOTE: new policy (NK) is that the same phone can be reused therefore it does not has to be unique", ), - sa.Column("password_hash", sa.String, nullable=False), + # + # User Secrets ------------------ + # sa.Column( - "primary_gid", - sa.BigInteger, - sa.ForeignKey( - "groups.gid", - name="fk_users_gid_groups", - onupdate="CASCADE", - ondelete="RESTRICT", - ), - doc="User's group ID", + "password_hash", + sa.String(), + nullable=False, + doc="Hashed password", ), + # + # User Account ------------------ + # sa.Column( "status", sa.Enum(UserStatus), nullable=False, default=UserStatus.CONFIRMATION_PENDING, - doc="Status of the user account. SEE UserStatus", + doc="Current status of the user's account", ), sa.Column( "role", sa.Enum(UserRole), nullable=False, default=UserRole.USER, - doc="Use for role-base authorization", + doc="Used for role-base authorization", + ), + # + # User Privacy Rules ------------------ + # + sa.Column( + "privacy_hide_username", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, it hides users.name to others", + ), + sa.Column( + "privacy_hide_fullname", + sa.Boolean, + nullable=False, + server_default=expression.true(), + doc="If true, it hides users.first_name, users.last_name to others", ), + sa.Column( + "privacy_hide_email", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, it hides users.email to others", + ), + # + # Timestamps --------------- + # sa.Column( "created_at", sa.DateTime(), nullable=False, - server_default=func.now(), + server_default=sa.func.now(), doc="Registration timestamp", ), sa.Column( "modified", sa.DateTime(), nullable=False, - server_default=func.now(), - onupdate=func.now(), # this will auto-update on modification + server_default=sa.func.now(), + onupdate=sa.func.now(), # this will auto-update on modification doc="Last modification timestamp", ), sa.Column( @@ -145,67 +141,13 @@ class UserStatus(Enum): doc="Sets the expiration date for trial accounts." "If set to NULL then the account does not expire.", ), - sa.Column( - "created_ip", - sa.String(), - nullable=True, - doc="User IP from which use was created", - ), # --------------------------- sa.PrimaryKeyConstraint("id", name="user_pkey"), + sa.UniqueConstraint("name", name="user_name_ukey"), sa.UniqueConstraint("email", name="user_login_key"), - sa.UniqueConstraint( - "phone", - name="user_phone_unique_constraint", - # NOTE: that cannot use same phone for two user accounts - ), ) -class FullNameTuple(NamedTuple): - first_name: str - last_name: str - - -class UserNameConverter: - """Helper functions to convert full-name to name in both directions""" - - # - # CONVENTION: Instead of having first and last name in the database - # we collapse it in the column name as 'first_name.lastname'. - # - # NOTE: there is a plan to change this https://github.com/ITISFoundation/osparc-simcore/issues/1574 - SEPARATOR: Final[str] = "." - TOKEN: Final[str] = "#" - - @classmethod - def get_full_name(cls, name: str) -> FullNameTuple: - """Parses value from users.name and returns separated full and last name in a tuple""" - first_name, last_name = name, "" - - if cls.SEPARATOR in name: - first_name, last_name = name.split(cls.SEPARATOR, maxsplit=1) - - return FullNameTuple( - first_name.replace(cls.TOKEN, cls.SEPARATOR), - last_name.replace(cls.TOKEN, cls.SEPARATOR), - ) - - @classmethod - def _safe_string(cls, value: str) -> str: - # removes any possible token in value (unlikely) - value = value.replace(cls.TOKEN, "") - # substitutes matching separators symbol with an alternative - return value.replace(cls.SEPARATOR, cls.TOKEN) - - @classmethod - def get_name(cls, first_name: str, last_name: str) -> str: - """Composes value for users.name column""" - return ( - cls._safe_string(first_name) + cls.SEPARATOR + cls._safe_string(last_name) - ) - - # ------------------------ TRIGGERS new_user_trigger = sa.DDL( @@ -231,7 +173,7 @@ def get_name(cls, first_name: str, last_name: str) -> str: INSERT INTO "groups" ("name", "description", "type") VALUES (NEW.name, 'primary group', 'PRIMARY') RETURNING gid INTO group_id; INSERT INTO "user_to_groups" ("uid", "gid") VALUES (NEW.id, group_id); UPDATE "users" SET "primary_gid" = group_id WHERE "id" = NEW.id; - -- set everyone goup + -- set everyone group INSERT INTO "user_to_groups" ("uid", "gid") VALUES (NEW.id, (SELECT "gid" FROM "groups" WHERE "type" = 'EVERYONE')); ELSIF TG_OP = 'UPDATE' THEN UPDATE "groups" SET "name" = NEW.name WHERE "gid" = NEW.primary_gid; @@ -243,7 +185,11 @@ def get_name(cls, first_name: str, last_name: str) -> str: """ ) -sa.event.listen(users, "after_create", set_user_groups_procedure) +sa.event.listen( + users, + "after_create", + set_user_groups_procedure, +) sa.event.listen( users, "after_create", diff --git a/packages/postgres-database/src/simcore_postgres_database/models/users_details.py b/packages/postgres-database/src/simcore_postgres_database/models/users_details.py new file mode 100644 index 00000000000..cf58af8187e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/users_details.py @@ -0,0 +1,130 @@ +import sqlalchemy as sa +from common_library.users_enums import AccountRequestStatus +from sqlalchemy.dialects import postgresql + +from ._common import ( + RefActions, + column_created_by_user, + column_created_datetime, + column_modified_datetime, + register_modified_datetime_auto_update_trigger, +) +from .base import metadata +from .products import products # Import the products table +from .users import users + +users_pre_registration_details = sa.Table( + "users_pre_registration_details", + # + # Provides extra attributes for a user that either not required or that are provided before the user is created. + # The latter state is denoted as "pre-registration" and specific attributes in this state are prefixed with `pre_`. Therefore, + # a row can be added in this table during pre-registration i.e. even before the `users` row exists. + # + metadata, + sa.Column( + "id", + sa.BigInteger, + sa.Identity(start=1, cycle=False), + primary_key=True, + doc="Primary key for the pre-registration entry", + ), + sa.Column( + "user_id", + sa.Integer, + sa.ForeignKey( + users.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + nullable=True, + doc="None if row was added during pre-registration or join column with `users` after registration", + ), + # Pre-registration columns: i.e. fields copied to `users` upon registration + sa.Column( + "pre_email", + sa.String(), + nullable=False, + doc="Email of the user on pre-registration (copied to users.email upon registration)", + ), + sa.Column( + "pre_first_name", + sa.String(), + doc="First name on pre-registration (copied to users.first_name upon registration)", + ), + sa.Column( + "pre_last_name", + sa.String(), + doc="Last name on pre-registration (copied to users.last_name upon registration)", + ), + sa.Column( + "pre_phone", + sa.String(), + doc="Phone provided on pre-registration" + "NOTE: this is not copied upon registration since it needs to be confirmed", + ), + # Account Request + sa.Column( + "account_request_status", + sa.Enum(AccountRequestStatus), + nullable=False, + server_default=AccountRequestStatus.PENDING.value, + doc="Status of review for the account request", + ), + sa.Column( + "account_request_reviewed_by", + sa.Integer, + sa.ForeignKey( + users.c.id, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name="fk_users_pre_registration_reviewed_by_user_id", + ), + nullable=True, + doc="Tracks who approved or rejected the account request", + ), + sa.Column( + "account_request_reviewed_at", + sa.DateTime(timezone=True), + nullable=True, + doc="Timestamp when the account request was reviewed", + ), + # Product the user is requesting access to + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + products.c.name, + onupdate=RefActions.CASCADE, + ondelete=RefActions.SET_NULL, + name="fk_users_pre_registration_details_product_name", + ), + nullable=True, + doc="Product that the user is requesting an account for", + ), + # Billable address columns: + sa.Column("institution", sa.String(), doc="the name of a company or university"), + sa.Column("address", sa.String()), + sa.Column("city", sa.String()), + sa.Column("state", sa.String()), + sa.Column("country", sa.String()), + sa.Column("postal_code", sa.String()), + sa.Column( + "extras", + postgresql.JSONB(astext_type=sa.Text()), + server_default=sa.text("'{}'::jsonb"), + doc="Extra information provided in the form but still not defined as a column.", + ), + # Other related users + column_created_by_user(users_table=users, required=False), + column_created_datetime(timezone=False), + column_modified_datetime(timezone=False), + # CONSTRAINTS: + # Composite unique constraint to ensure a user can only have one pre-registration per product + sa.UniqueConstraint( + "pre_email", + "product_name", + name="users_pre_registration_details_pre_email_product_name_key", + ), +) + +register_modified_datetime_auto_update_trigger(users_pre_registration_details) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/wallet_to_groups.py b/packages/postgres-database/src/simcore_postgres_database/models/wallet_to_groups.py new file mode 100644 index 00000000000..7679b5f5285 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/wallet_to_groups.py @@ -0,0 +1,60 @@ +import sqlalchemy as sa +from sqlalchemy.sql import expression + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .groups import groups +from .wallets import wallets + +wallet_to_groups = sa.Table( + "wallet_to_groups", + metadata, + sa.Column( + "wallet_id", + sa.BigInteger, + sa.ForeignKey( + wallets.c.wallet_id, + name="fk_wallet_to_groups_id_wallets", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + doc="Wallet unique ID", + ), + sa.Column( + "gid", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + name="fk_wallet_to_groups_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + doc="Group unique IDentifier", + ), + # Access Rights flags --- + sa.Column( + "read", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can use the wallet", + ), + sa.Column( + "write", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can modify the wallet", + ), + sa.Column( + "delete", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can delete the wallet", + ), + # ----- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.UniqueConstraint("wallet_id", "gid"), +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/wallets.py b/packages/postgres-database/src/simcore_postgres_database/models/wallets.py new file mode 100644 index 00000000000..27fa821a6bf --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/wallets.py @@ -0,0 +1,101 @@ +import enum + +import sqlalchemy as sa + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata + + +class WalletStatus(str, enum.Enum): + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + + +wallets = sa.Table( + "wallets", + metadata, + sa.Column( + "wallet_id", + sa.BigInteger, + nullable=False, + autoincrement=True, + primary_key=True, + doc="Wallet index", + ), + sa.Column("name", sa.String, nullable=False, doc="Display name"), + sa.Column("description", sa.String, nullable=True, doc="Short description"), + sa.Column( + "owner", + sa.BigInteger, + sa.ForeignKey( + "groups.gid", + name="fk_wallets_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + nullable=False, + doc="Identifier of the group that owns this wallet (Should be just PRIMARY GROUP)", + ), + sa.Column( + "thumbnail", + sa.String, + nullable=True, + doc="Link to image as to wallet thumbnail", + ), + sa.Column( + "status", + sa.Enum(WalletStatus), + nullable=False, + doc="Status of the wallet: ACTIVE or DEACTIVE", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_wallets_product_name", + ), + nullable=False, + doc="Products unique name", + ), +) + +# ------------------------ TRIGGERS +new_wallet_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS wallet_modification on wallets; +CREATE TRIGGER wallet_modification +AFTER INSERT ON wallets + FOR EACH ROW + EXECUTE PROCEDURE set_wallet_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_wallet_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_wallet_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO "wallet_to_groups" ("gid", "wallet_id", "read", "write", "delete") VALUES (NEW.owner, NEW.id, TRUE, TRUE, TRUE); + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + +sa.event.listen( + wallets, "after_create", assign_wallet_access_rights_to_owner_group_procedure +) +sa.event.listen( + wallets, + "after_create", + new_wallet_trigger, +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/workspaces.py b/packages/postgres-database/src/simcore_postgres_database/models/workspaces.py new file mode 100644 index 00000000000..756bbe9642e --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/workspaces.py @@ -0,0 +1,97 @@ +import sqlalchemy as sa + +from ._common import ( + RefActions, + column_created_datetime, + column_modified_datetime, + column_trashed_by_user, + column_trashed_datetime, +) +from .base import metadata +from .users import users + +workspaces = sa.Table( + "workspaces", + metadata, + sa.Column( + "workspace_id", + sa.BigInteger, + nullable=False, + autoincrement=True, + primary_key=True, + doc="Workspace index", + ), + sa.Column("name", sa.String, nullable=False, doc="Display name"), + sa.Column("description", sa.String, nullable=True, doc="Short description"), + sa.Column( + "thumbnail", + sa.String, + nullable=True, + doc="Link to image as to workspace thumbnail", + ), + sa.Column( + "owner_primary_gid", + sa.BigInteger, + sa.ForeignKey( + "groups.gid", + name="fk_workspaces_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.RESTRICT, + ), + nullable=False, + doc="Identifier of the group that owns this workspace (Should be just PRIMARY GROUP)", + ), + sa.Column( + "product_name", + sa.String, + sa.ForeignKey( + "products.name", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + name="fk_workspaces_product_name", + ), + nullable=False, + doc="Products unique name", + ), + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + column_trashed_datetime("workspace"), + column_trashed_by_user("workspace", users_table=users), +) + + +# ------------------------ TRIGGERS +new_workspace_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS workspace_modification on workspaces; +CREATE TRIGGER workspace_modification +AFTER INSERT ON workspaces + FOR EACH ROW + EXECUTE PROCEDURE set_workspace_to_owner_group(); +""" +) + + +# --------------------------- PROCEDURES +assign_workspace_access_rights_to_owner_group_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION set_workspace_to_owner_group() RETURNS TRIGGER AS $$ +DECLARE + group_id BIGINT; +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO "workspaces_access_rights" ("gid", "workspace_id", "read", "write", "delete") VALUES (NEW.owner_primary_gid, NEW.workspace_id, TRUE, TRUE, TRUE); + END IF; + RETURN NULL; +END; $$ LANGUAGE 'plpgsql'; + """ +) + +sa.event.listen( + workspaces, "after_create", assign_workspace_access_rights_to_owner_group_procedure +) +sa.event.listen( + workspaces, + "after_create", + new_workspace_trigger, +) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/workspaces_access_rights.py b/packages/postgres-database/src/simcore_postgres_database/models/workspaces_access_rights.py new file mode 100644 index 00000000000..6bc88d07338 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/models/workspaces_access_rights.py @@ -0,0 +1,61 @@ +import sqlalchemy as sa +from sqlalchemy.sql import expression + +from ._common import RefActions, column_created_datetime, column_modified_datetime +from .base import metadata +from .groups import groups +from .workspaces import workspaces + +workspaces_access_rights = sa.Table( + "workspaces_access_rights", + metadata, + sa.Column( + "workspace_id", + sa.BigInteger, + sa.ForeignKey( + workspaces.c.workspace_id, + name="fk_workspaces_access_rights_id_workspaces", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + doc="Workspace unique ID", + ), + sa.Column( + "gid", + sa.BigInteger, + sa.ForeignKey( + groups.c.gid, + name="fk_workspaces_access_rights_gid_groups", + onupdate=RefActions.CASCADE, + ondelete=RefActions.CASCADE, + ), + doc="Group unique IDentifier", + ), + # Access Rights flags --- + sa.Column( + "read", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can use the workspace", + ), + sa.Column( + "write", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can modify the workspace", + ), + sa.Column( + "delete", + sa.Boolean, + nullable=False, + server_default=expression.false(), + doc="If true, group can delete the workspace", + ), + # ----- + column_created_datetime(timezone=True), + column_modified_datetime(timezone=True), + sa.UniqueConstraint("workspace_id", "gid"), + sa.Index("idx_workspaces_access_rights_gid", "gid"), +) diff --git a/services/director-v2/src/simcore_service_director_v2/models/domains/__init__.py b/packages/postgres-database/src/simcore_postgres_database/py.typed similarity index 100% rename from services/director-v2/src/simcore_service_director_v2/models/domains/__init__.py rename to packages/postgres-database/src/simcore_postgres_database/py.typed diff --git a/packages/postgres-database/src/simcore_postgres_database/utils.py b/packages/postgres-database/src/simcore_postgres_database/utils.py index 0551c6a7067..4d8a52cdf40 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils.py @@ -1,8 +1,8 @@ import re from copy import deepcopy -from typing import Optional, Union import sqlalchemy as sa +from sqlalchemy.dialects import postgresql from sqlalchemy.engine import Engine from yarl import URL @@ -15,26 +15,25 @@ def build_url( user: str = "", password: str = "", host: str = "127.0.0.1", - port: int = 5432, + port: int | str = 5432, **_kwargs, ) -> URL: """ Safe build pg url as 'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}' """ - dsn = URL.build( + return URL.build( scheme="postgresql+psycopg2", user=user, password=password, host=host, - port=port, + port=int(port), path=f"/{database}", ) # _kwargs allows expand on larger dicts without raising exceptions - return dsn def create_tables(dsn: URL): - engine: Optional[Engine] = None + engine: Engine | None = None try: engine = sa.create_engine(str(dsn)) assert engine # nosec @@ -46,7 +45,7 @@ def create_tables(dsn: URL): def raise_if_not_responsive(dsn: URL, *, verbose=False): """Checks whether database is responsive, otherwise it throws exception""" - engine: Optional[Engine] = None + engine: Engine | None = None try: engine = sa.create_engine( str(dsn), echo=verbose, echo_pool=verbose, pool_timeout=5 @@ -62,7 +61,7 @@ def raise_if_not_responsive(dsn: URL, *, verbose=False): _URL_PASS_RE = re.compile(r":(\w+)@") -def hide_url_pass(url: Union[str, URL]) -> str: +def hide_url_pass(url: str | URL) -> str: return _URL_PASS_RE.sub(":********@", str(url)) @@ -71,6 +70,14 @@ def hide_dict_pass(data: dict) -> dict: for key in data_clone: if "pass" in key: data_clone[key] = "*" * 8 - elif "url" == key: + elif key == "url": data_clone[key] = hide_url_pass(data[key]) return data_clone + + +def as_postgres_sql_query_str(statement) -> str: + compiled = statement.compile( + compile_kwargs={"literal_binds": True}, + dialect=postgresql.dialect(), # type: ignore[misc] + ) + return f"{compiled}" diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_aiopg.py b/packages/postgres-database/src/simcore_postgres_database/utils_aiopg.py index 46086c268b0..d0ed2883361 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_aiopg.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_aiopg.py @@ -39,6 +39,5 @@ async def raise_if_migration_not_ready(engine: Engine): version_num = await conn.scalar('SELECT "version_num" FROM "alembic_version"') head_version_num = get_current_head() if version_num != head_version_num: - raise DBMigrationError( - f"Migration is incomplete, expected {head_version_num} but got {version_num}" - ) + msg = f"Migration is incomplete, expected {head_version_num} but got {version_num}" + raise DBMigrationError(msg) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_aiopg_orm.py b/packages/postgres-database/src/simcore_postgres_database/utils_aiopg_orm.py index 86cfbf9c513..4a582769d3e 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_aiopg_orm.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_aiopg_orm.py @@ -6,11 +6,12 @@ - the new async sqlalchemy ORM https://docs.sqlalchemy.org/en/14/orm/ - https://piccolo-orm.readthedocs.io/en/latest/index.html """ + # pylint: disable=no-value-for-parameter import functools import operator -from typing import Generic, Optional, TypeVar, Union +from typing import Any, Generic, TypeVar, cast import sqlalchemy as sa from aiopg.sa.connection import SAConnection @@ -25,7 +26,7 @@ RowUId = TypeVar("RowUId", int, str) # typically id or uuid -def _normalize(names: Union[str, list[str], None]) -> list[str]: +def _normalize(names: str | list[str] | None) -> list[str]: if not names: return [] if isinstance(names, str): @@ -37,6 +38,8 @@ def _normalize(names: Union[str, list[str], None]) -> list[str]: ALL_COLUMNS = f"{__name__}.ALL_COLUMNS" PRIMARY_KEY = f"{__name__}.PRIMARY_KEY" +QueryT = TypeVar("QueryT", bound=UpdateBase) + class BaseOrm(Generic[RowUId]): def __init__( @@ -44,8 +47,8 @@ def __init__( table: sa.Table, connection: SAConnection, *, - readonly: Optional[set] = None, - writeonce: Optional[set] = None, + readonly: set | None = None, + writeonce: set | None = None, ): """ :param readonly: read-only columns typically created in the server side, defaults to None @@ -57,19 +60,20 @@ def __init__( self._writeonce: set = writeonce or set() # row selection logic - self._where_clause = None + self._where_clause: Any = None try: self._primary_key: Column = next(c for c in table.columns if c.primary_key) # FIXME: how can I compare a concrete with a generic type?? # assert self._primary_key.type.python_type == RowUId # nosec except StopIteration as e: - raise ValueError(f"Table {table.name} MUST define a primary key") from e + msg = f"Table {table.name} MUST define a primary key" + raise ValueError(msg) from e self._table = table def _compose_select_query( self, - columns: Union[str, list[str]], + columns: str | list[str], ) -> Select: column_names: list[str] = _normalize(columns) @@ -82,13 +86,13 @@ def _compose_select_query( ] ) else: - query = sa.select([self._table.c[name] for name in column_names]) + query = sa.select(*[self._table.c[name] for name in column_names]) return query def _append_returning( - self, columns: Union[str, list[str]], query: UpdateBase - ) -> tuple[UpdateBase, bool]: + self, columns: str | list[str], query: QueryT + ) -> tuple[QueryT, bool]: column_names: list[str] = _normalize(columns) is_scalar: bool = len(column_names) == 1 @@ -111,20 +115,22 @@ def _append_returning( def _check_access_rights(access: set, values: dict) -> None: not_allowed: set[str] = access.intersection(values.keys()) if not_allowed: - raise ValueError(f"Columns {not_allowed} are read-only") + msg = f"Columns {not_allowed} are read-only" + raise ValueError(msg) @property def columns(self) -> ImmutableColumnCollection: return self._table.columns - def set_filter(self, rowid: Optional[RowUId] = None, **unique_id) -> "BaseOrm": + def set_filter(self, rowid: RowUId | None = None, **unique_id) -> "BaseOrm": """ Sets default for read operations either by passing a row identifier or a filter """ if unique_id and rowid: - raise ValueError("Either identifier or unique condition but not both") + msg = "Either identifier or unique condition but not both" + raise ValueError(msg) - if rowid: + if rowid is not None: self._where_clause = self._primary_key == rowid elif unique_id: self._where_clause = functools.reduce( @@ -135,9 +141,8 @@ def set_filter(self, rowid: Optional[RowUId] = None, **unique_id) -> "BaseOrm": ), ) if not self.is_filter_set(): - raise ValueError( - "Either identifier or unique condition required. None provided" - ) + msg = "Either identifier or unique condition required. None provided" + raise ValueError(msg) return self def clear_filter(self) -> None: @@ -149,10 +154,10 @@ def is_filter_set(self) -> bool: async def fetch( self, - returning_cols: Union[str, list[str]] = ALL_COLUMNS, + returning_cols: str | list[str] = ALL_COLUMNS, *, - rowid: Optional[RowUId] = None, - ) -> Optional[RowProxy]: + rowid: RowUId | None = None, + ) -> RowProxy | None: query = self._compose_select_query(returning_cols) if rowid: # overrides pinned row @@ -162,14 +167,13 @@ async def fetch( query = query.where(self._where_clause) result: ResultProxy = await self._conn.execute(query) - row: Optional[RowProxy] = await result.first() + row: RowProxy | None = await result.first() return row async def fetch_all( self, - returning_cols: Union[str, list[str]] = ALL_COLUMNS, + returning_cols: str | list[str] = ALL_COLUMNS, ) -> list[RowProxy]: - query = self._compose_select_query(returning_cols) if self.is_filter_set(): assert self._where_clause is not None # nosec @@ -181,10 +185,10 @@ async def fetch_all( async def fetch_page( self, - returning_cols: Union[str, list[str]] = ALL_COLUMNS, + returning_cols: str | list[str] = ALL_COLUMNS, *, offset: int, - limit: Optional[int] = None, + limit: int | None = None, sort_by=None, ) -> tuple[list[RowProxy], int]: """Support for paginated fetchall @@ -209,7 +213,7 @@ async def fetch_page( if offset > 0 or limit is not None: # eval total count if pagination options enabled total_count = await self._conn.scalar( - query.with_only_columns([func.count()]) + query.with_only_columns(func.count()) .select_from(self._table) .order_by(None) ) @@ -231,8 +235,8 @@ async def fetch_page( return rows, total_count async def update( - self, returning_cols: Union[str, list[str]] = PRIMARY_KEY, **values - ) -> Union[RowUId, RowProxy, None]: + self, returning_cols: str | list[str] = PRIMARY_KEY, **values + ) -> RowUId | RowProxy | None: self._check_access_rights(self._readonly, values) self._check_access_rights(self._writeonce, values) @@ -243,23 +247,23 @@ async def update( query, is_scalar = self._append_returning(returning_cols, query) if is_scalar: - return await self._conn.scalar(query) + return cast(RowUId, await self._conn.scalar(query)) result: ResultProxy = await self._conn.execute(query) - row: Optional[RowProxy] = await result.first() + row: RowProxy | None = await result.first() return row async def insert( - self, returning_cols: Union[str, list[str]] = PRIMARY_KEY, **values - ) -> Union[RowUId, RowProxy, None]: + self, returning_cols: str | list[str] = PRIMARY_KEY, **values + ) -> RowUId | RowProxy | None: self._check_access_rights(self._readonly, values) query: Insert = self._table.insert().values(**values) query, is_scalar = self._append_returning(returning_cols, query) if is_scalar: - return await self._conn.scalar(query) + return cast(RowUId, await self._conn.scalar(query)) result: ResultProxy = await self._conn.execute(query) - row: Optional[RowProxy] = await result.first() + row: RowProxy | None = await result.first() return row diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_aiosqlalchemy.py b/packages/postgres-database/src/simcore_postgres_database/utils_aiosqlalchemy.py index 078d076d90d..5cfaef1a219 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_aiosqlalchemy.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_aiosqlalchemy.py @@ -1,14 +1,19 @@ -from typing import Any +from typing import Any, TypeAlias, TypeVar import sqlalchemy as sa +import sqlalchemy.exc as sql_exc +from common_library.errors_classes import OsparcErrorMixin +from sqlalchemy.dialects.postgresql.asyncpg import AsyncAdapt_asyncpg_dbapi from sqlalchemy.ext.asyncio import AsyncEngine from .utils_migration import get_current_head -async def get_pg_engine_stateinfo(engine: AsyncEngine) -> dict[str, Any]: +async def get_pg_engine_stateinfo(engine: AsyncEngine) -> dict[str, str]: + checkedin = engine.pool.checkedin() # type: ignore + checkedout = engine.pool.checkedout() # type: ignore return { - "current pool connections": f"{engine.pool.checkedin()=},{engine.pool.checkedout()=}", + "current pool connections": f"{checkedin=},{checkedout=}", } @@ -16,7 +21,7 @@ class DBMigrationError(RuntimeError): pass -async def raise_if_migration_not_ready(engine: AsyncEngine): +async def raise_if_migration_not_ready(engine: AsyncEngine) -> None: """Ensures db migration is complete :raises DBMigrationError @@ -27,6 +32,51 @@ async def raise_if_migration_not_ready(engine: AsyncEngine): ) head_version_num = get_current_head() if version_num != head_version_num: - raise DBMigrationError( - f"Migration is incomplete, expected {head_version_num} but got {version_num}" - ) + msg = f"Migration is incomplete, expected {head_version_num} but got {version_num}" + raise DBMigrationError(msg) + + +AsyncpgSQLState: TypeAlias = str +ErrorT = TypeVar("ErrorT", bound=OsparcErrorMixin) +ErrorKwars: TypeAlias = dict[str, Any] + + +def map_db_exception( + exception: Exception, + exception_map: dict[AsyncpgSQLState, tuple[type[ErrorT], ErrorKwars]], + default_exception: type[ErrorT] | None = None, +) -> ErrorT | Exception: + """Maps SQLAlchemy database exceptions to domain-specific exceptions. + + This function inspects SQLAlchemy and asyncpg exceptions to identify the error type + by checking pgcodes or error messages, and converts them to appropriate domain exceptions. + + Args: + exception: The original exception from SQLAlchemy or the database driver + exception_map: Dictionary mapping pgcode + default_exception: Exception class to use if no matching error is found + + Returns: + Domain-specific exception instance or the original exception if no mapping found + and no default_exception provided + """ + pgcode = None + + # Handle SQLAlchemy wrapped exceptions + if isinstance(exception, sql_exc.IntegrityError) and hasattr(exception, "orig"): + orig_error = exception.orig + # Handle asyncpg adapter exceptions + if isinstance(orig_error, AsyncAdapt_asyncpg_dbapi.IntegrityError) and hasattr( + orig_error, "pgcode" + ): + assert hasattr(orig_error, "pgcode") # nosec + pgcode = orig_error.pgcode + + # Match by pgcode if available + if pgcode: + for key, (exc_class, params) in exception_map.items(): + if key == pgcode: + return exc_class(**params) + + # If no match found, return default exception or original + return default_exception() if default_exception else exception diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_cli.py b/packages/postgres-database/src/simcore_postgres_database/utils_cli.py index 5e2325a1d3d..b5866f94962 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_cli.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_cli.py @@ -2,9 +2,10 @@ import json.decoder import logging import os +from collections.abc import Callable from copy import deepcopy from functools import wraps -from typing import Callable, Final, Optional +from typing import Final import click import docker.client @@ -26,8 +27,7 @@ def decorator(func: Callable): @wraps(func) def wrapper(*args, **kargs): try: - res = func(*args, **kargs) - return res + return func(*args, **kargs) except RuntimeError as err: log.info( "%s failed: %s", @@ -91,8 +91,8 @@ def reset_cache(): def get_alembic_config_from_cache( - force_cfg: Optional[dict] = None, -) -> Optional[AlembicConfig]: + force_cfg: dict | None = None, +) -> AlembicConfig | None: """ Creates alembic config from cfg or cache @@ -101,10 +101,7 @@ def get_alembic_config_from_cache( # build url try: - if force_cfg: - cfg = force_cfg - else: - cfg = load_cache(raise_if_error=True) + cfg = force_cfg if force_cfg else load_cache(raise_if_error=True) url = build_url(**cfg) except Exception: # pylint: disable=broad-except diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_groups_extra_properties.py b/packages/postgres-database/src/simcore_postgres_database/utils_groups_extra_properties.py new file mode 100644 index 00000000000..b1cb32abf9f --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_groups_extra_properties.py @@ -0,0 +1,186 @@ +import datetime +import logging +import warnings +from collections.abc import Callable +from dataclasses import dataclass, fields +from typing import Any + +import sqlalchemy as sa +from common_library.async_tools import maybe_await +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ._protocols import DBConnection +from .models.groups import GroupType, groups, user_to_groups +from .models.groups_extra_properties import groups_extra_properties +from .utils_models import FromRowMixin +from .utils_repos import pass_or_acquire_connection + +_logger = logging.getLogger(__name__) + +_WARNING_FMSG = ( + f"{__name__}.{{}} uses aiopg which has been deprecated in this repo. Use {{}} instead. " + "SEE https://github.com/ITISFoundation/osparc-simcore/issues/4529" +) + + +class GroupExtraPropertiesError(Exception): ... + + +class GroupExtraPropertiesNotFoundError(GroupExtraPropertiesError): ... + + +@dataclass(frozen=True, slots=True, kw_only=True) +class GroupExtraProperties(FromRowMixin): + group_id: int + product_name: str + internet_access: bool + override_services_specifications: bool + use_on_demand_clusters: bool + enable_telemetry: bool + created: datetime.datetime + modified: datetime.datetime + enable_efs: bool + + +def _list_table_entries_ordered_by_group_type_stmt(user_id: int, product_name: str): + return ( + sa.select( + groups_extra_properties, + groups.c.type, + sa.case( + # NOTE: the ordering is important for the aggregation afterwards + (groups.c.type == GroupType.EVERYONE, sa.literal(3)), + (groups.c.type == GroupType.STANDARD, sa.literal(2)), + (groups.c.type == GroupType.PRIMARY, sa.literal(1)), + else_=sa.literal(4), + ).label("type_order"), + ) + .select_from( + sa.join( + sa.join( + groups_extra_properties, + user_to_groups, + groups_extra_properties.c.group_id == user_to_groups.c.gid, + ), + groups, + groups_extra_properties.c.group_id == groups.c.gid, + ) + ) + .where( + (groups_extra_properties.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + ) + .alias() + ) + + +def _merge_extra_properties_booleans( + instance1: GroupExtraProperties, instance2: GroupExtraProperties +) -> GroupExtraProperties: + merged_properties: dict[str, Any] = {} + for field in fields(instance1): + value1 = getattr(instance1, field.name) + value2 = getattr(instance2, field.name) + + if isinstance(value1, bool): + merged_properties[field.name] = value1 or value2 + else: + merged_properties[field.name] = value1 + return GroupExtraProperties(**merged_properties) # pylint: disable=missing-kwoa + + +@dataclass(frozen=True, slots=True, kw_only=True) +class GroupExtraPropertiesRepo: + @staticmethod + def _get_stmt(gid: int, product_name: str): + return sa.select(groups_extra_properties).where( + (groups_extra_properties.c.group_id == gid) + & (groups_extra_properties.c.product_name == product_name) + ) + + @staticmethod + async def get_v2( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + gid: int, + product_name: str, + ) -> GroupExtraProperties: + async with pass_or_acquire_connection(engine, connection) as conn: + query = GroupExtraPropertiesRepo._get_stmt(gid, product_name) + result = await conn.stream(query) + assert result # nosec + if row := await result.first(): + return GroupExtraProperties.from_row(row) + msg = f"Properties for group {gid} not found" + raise GroupExtraPropertiesNotFoundError(msg) + + @staticmethod + def _aggregate( + rows, user_id, product_name, from_row: Callable + ) -> GroupExtraProperties: + merged_standard_extra_properties = None + for row in rows: + group_extra_properties: GroupExtraProperties = from_row(row) + match row.type: + case GroupType.PRIMARY: + # this always has highest priority + return group_extra_properties + case GroupType.STANDARD: + if merged_standard_extra_properties: + merged_standard_extra_properties = ( + _merge_extra_properties_booleans( + merged_standard_extra_properties, + group_extra_properties, + ) + ) + else: + merged_standard_extra_properties = group_extra_properties + case GroupType.EVERYONE: + # if there are standard properties, they take precedence + return ( + merged_standard_extra_properties + if merged_standard_extra_properties + else group_extra_properties + ) + case _: + _logger.warning( + "Unexpected GroupType found in %s db table! Please adapt code here!", + groups_extra_properties.name, + ) + if merged_standard_extra_properties: + return merged_standard_extra_properties + msg = f"Properties for user {user_id} in {product_name} not found" + raise GroupExtraPropertiesNotFoundError(msg) + + @staticmethod + async def get_aggregated_properties_for_user( + connection: DBConnection, + *, + user_id: int, + product_name: str, + ) -> GroupExtraProperties: + warnings.warn( + _WARNING_FMSG.format( + "get_aggregated_properties_for_user", + "get_aggregated_properties_for_user_v2", + ), + DeprecationWarning, + stacklevel=1, + ) + + list_stmt = _list_table_entries_ordered_by_group_type_stmt( + user_id=user_id, product_name=product_name + ) + + result = await connection.execute( + sa.select(list_stmt).order_by(list_stmt.c.type_order) + ) + assert result # nosec + + rows = await maybe_await(result.fetchall()) + assert isinstance(rows, list) # nosec + + return GroupExtraPropertiesRepo._aggregate( + rows, user_id, product_name, GroupExtraProperties.from_row + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_migration.py b/packages/postgres-database/src/simcore_postgres_database/utils_migration.py index c03e6325d41..01779ae5ed1 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_migration.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_migration.py @@ -3,9 +3,8 @@ """ import sys from pathlib import Path -from typing import Final, Optional +from typing import Final -from alembic import __version__ as __alembic_version__ from alembic.config import Config as AlembicConfig from alembic.script.base import ScriptDirectory @@ -34,9 +33,10 @@ def get_current_head() -> RevisionID: config = create_basic_config() script: ScriptDirectory = ScriptDirectory.from_config(config) - head: Optional[str] = script.get_current_head() + head: str | None = script.get_current_head() if not head: - raise RuntimeError(f"Cannot find head revision in {script}") + msg = f"Cannot find head revision in {script}" + raise RuntimeError(msg) return head diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_models.py b/packages/postgres-database/src/simcore_postgres_database/utils_models.py new file mode 100644 index 00000000000..2d0a4b3a7f5 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_models.py @@ -0,0 +1,30 @@ +from collections.abc import Mapping +from dataclasses import fields, is_dataclass +from typing import Any, TypeVar + +from sqlalchemy.engine.row import Row + +ModelType = TypeVar("ModelType") + + +class FromRowMixin: + """Mixin to allow instance construction from database row objects""" + + @classmethod + def from_row(cls: type[ModelType], row: Any) -> ModelType: + """Creates an instance from a database row. + + Supports both Row objects and mapping-like objects. + """ + assert is_dataclass(cls) # nosec + + if isinstance(row, Row): + mapping = row._asdict() + elif isinstance(row, Mapping): + mapping = row + else: + msg = f"Row must be a Row or Mapping type, got {type(row)}" + raise TypeError(msg) + + field_names = [f.name for f in fields(cls)] + return cls(**{k: v for k, v in mapping.items() if k in field_names}) # type: ignore[return-value] diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_ordering.py b/packages/postgres-database/src/simcore_postgres_database/utils_ordering.py new file mode 100644 index 00000000000..4791692bd63 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_ordering.py @@ -0,0 +1,19 @@ +from enum import Enum +from typing import TypedDict + + +class OrderDirection(str, Enum): + ASC = "asc" + DESC = "desc" + + +class OrderByDict(TypedDict): + field: str + direction: OrderDirection + + +# Example usage +order_by_example: OrderByDict = { + "field": "example_field", + "direction": OrderDirection.ASC, +} diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_payments.py b/packages/postgres-database/src/simcore_postgres_database/utils_payments.py new file mode 100644 index 00000000000..de4db3abe11 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_payments.py @@ -0,0 +1,167 @@ +import datetime +import logging +from dataclasses import dataclass +from decimal import Decimal +from typing import Final, TypeAlias + +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import ResultProxy, RowProxy + +from . import aiopg_errors +from .models.payments_transactions import PaymentTransactionState, payments_transactions + +_logger = logging.getLogger(__name__) + + +PaymentID: TypeAlias = str +PaymentTransactionRow: TypeAlias = RowProxy + + +UNSET: Final[str] = "__UNSET__" + + +@dataclass +class PaymentFailure: + payment_id: str + + def __bool__(self): + return False + + +class PaymentAlreadyExists(PaymentFailure): ... + + +class PaymentNotFound(PaymentFailure): ... + + +class PaymentAlreadyAcked(PaymentFailure): ... + + +async def insert_init_payment_transaction( + connection: SAConnection, + *, + payment_id: str, + price_dollars: Decimal, + osparc_credits: Decimal, + product_name: str, + user_id: int, + user_email: str, + wallet_id: int, + comment: str | None, + initiated_at: datetime.datetime, +) -> PaymentID | PaymentAlreadyExists: + """Annotates 'init' transaction in the database""" + try: + await connection.execute( + payments_transactions.insert().values( + payment_id=payment_id, + price_dollars=price_dollars, + osparc_credits=osparc_credits, + product_name=product_name, + user_id=user_id, + user_email=user_email, + wallet_id=wallet_id, + comment=comment, + initiated_at=initiated_at, + ) + ) + except aiopg_errors.UniqueViolation: + return PaymentAlreadyExists(payment_id) + + return payment_id + + +async def update_payment_transaction_state( + connection: SAConnection, + *, + payment_id: str, + completion_state: PaymentTransactionState, + state_message: str | None = None, + invoice_url: str | None = UNSET, +) -> PaymentTransactionRow | PaymentNotFound | PaymentAlreadyAcked: + """ACKs payment by updating state with SUCCESS, ...""" + if completion_state == PaymentTransactionState.PENDING: + msg = f"cannot update state with {completion_state=} since it is already initiated" + raise ValueError(msg) + + optional: dict[str, str | None] = {} + if state_message: + optional["state_message"] = state_message + + if completion_state == PaymentTransactionState.SUCCESS and invoice_url is None: + _logger.warning( + "Payment %s completed as %s without invoice (%s)", + payment_id, + state_message, + f"{invoice_url=}", + ) + + if invoice_url != UNSET: + optional["invoice_url"] = invoice_url + + async with connection.begin(): + row = await ( + await connection.execute( + sa.select( + payments_transactions.c.initiated_at, + payments_transactions.c.completed_at, + ) + .where(payments_transactions.c.payment_id == payment_id) + .with_for_update() + ) + ).fetchone() + + if row is None: + return PaymentNotFound(payment_id=payment_id) + + if row.completed_at is not None: + assert row.initiated_at < row.completed_at # nosec + return PaymentAlreadyAcked(payment_id=payment_id) + + assert row.initiated_at # nosec + + result = await connection.execute( + payments_transactions.update() + .values(completed_at=sa.func.now(), state=completion_state, **optional) + .where(payments_transactions.c.payment_id == payment_id) + .returning(sa.literal_column("*")) + ) + row = await result.first() + assert row, "execute above should have caught this" # nosec + assert isinstance(row, RowProxy) # nosec + return row + + +async def get_user_payments_transactions( + connection: SAConnection, + *, + user_id: int, + offset: int | None = None, + limit: int | None = None, +) -> tuple[int, list[PaymentTransactionRow]]: + total_number_of_items = await connection.scalar( + sa.select(sa.func.count()) + .select_from(payments_transactions) + .where(payments_transactions.c.user_id == user_id) + ) + assert total_number_of_items is not None # nosec + + # NOTE: what if between these two calls there are new rows? can we get this in an atomic call?Γ₯ + stmt = ( + payments_transactions.select() + .where(payments_transactions.c.user_id == user_id) + .order_by(payments_transactions.c.created.desc()) + ) # newest first + + if offset is not None: + # psycopg2.errors.InvalidRowCountInResultOffsetClause: OFFSET must not be negative + stmt = stmt.offset(offset) + + if limit is not None: + # InvalidRowCountInLimitClause: LIMIT must not be negative + stmt = stmt.limit(limit) + + result: ResultProxy = await connection.execute(stmt) + rows = await result.fetchall() or [] + return total_number_of_items, rows diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_payments_autorecharge.py b/packages/postgres-database/src/simcore_postgres_database/utils_payments_autorecharge.py new file mode 100644 index 00000000000..c8e482f26ee --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_payments_autorecharge.py @@ -0,0 +1,80 @@ +import sqlalchemy as sa +from simcore_postgres_database.models.payments_autorecharge import payments_autorecharge +from simcore_postgres_database.models.payments_methods import ( + InitPromptAckFlowState, + payments_methods, +) +from sqlalchemy.dialects.postgresql import insert as pg_insert + + +class AutoRechargeStmts: + @staticmethod + def is_valid_payment_method(user_id, wallet_id, payment_method_id) -> sa.sql.Select: + return sa.select(payments_methods.c.payment_method_id).where( + (payments_methods.c.user_id == user_id) + & (payments_methods.c.wallet_id == wallet_id) + & (payments_methods.c.payment_method_id == payment_method_id) + & (payments_methods.c.state == InitPromptAckFlowState.SUCCESS) + ) + + @staticmethod + def get_wallet_autorecharge(wallet_id) -> sa.sql.Select: + return ( + sa.select( + payments_autorecharge.c.id.label("payments_autorecharge_id"), + payments_methods.c.user_id, + payments_methods.c.wallet_id, + payments_autorecharge.c.primary_payment_method_id, + payments_autorecharge.c.enabled, + payments_autorecharge.c.top_up_amount_in_usd, + payments_autorecharge.c.monthly_limit_in_usd, + ) + .select_from( + payments_methods.join( + payments_autorecharge, + (payments_methods.c.wallet_id == payments_autorecharge.c.wallet_id) + & ( + payments_methods.c.payment_method_id + == payments_autorecharge.c.primary_payment_method_id + ), + ) + ) + .where( + (payments_methods.c.wallet_id == wallet_id) + & (payments_methods.c.state == InitPromptAckFlowState.SUCCESS) + ) + ) + + @staticmethod + def upsert_wallet_autorecharge( + *, + wallet_id, + enabled, + primary_payment_method_id, + top_up_amount_in_usd, + monthly_limit_in_usd, + ): + # using this primary payment-method, create an autorecharge + # NOTE: requires the entire + values = { + "wallet_id": wallet_id, + "enabled": enabled, + "primary_payment_method_id": primary_payment_method_id, + "top_up_amount_in_usd": top_up_amount_in_usd, + "monthly_limit_in_usd": monthly_limit_in_usd, + } + + insert_stmt = pg_insert(payments_autorecharge).values(**values) + return insert_stmt.on_conflict_do_update( + index_elements=[payments_autorecharge.c.wallet_id], + set_=values, + ).returning(sa.literal_column("*")) + + @staticmethod + def update_wallet_autorecharge(wallet_id, **values) -> sa.sql.Update: + return ( + payments_autorecharge.update() + .values(**values) + .where(payments_autorecharge.c.wallet_id == wallet_id) + .returning(payments_autorecharge.c.id) + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_products.py b/packages/postgres-database/src/simcore_postgres_database/utils_products.py index b05e699b8a4..dba8caf074b 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_products.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_products.py @@ -1,10 +1,7 @@ -""" Common functions to access products table - -""" - -from typing import Any, Optional, Protocol +"""Common functions to access products table""" import sqlalchemy as sa +from sqlalchemy.ext.asyncio import AsyncConnection from .models.groups import GroupType, groups from .models.products import products @@ -13,80 +10,64 @@ _GroupID = int -class _DBConnection(Protocol): - # Prototype to account for aiopg and asyncio connection classes, i.e. - # from aiopg.sa.connection import SAConnection - # from sqlalchemy.ext.asyncio import AsyncConnection - async def scalar(self, *args, **kwargs): - ... - - -class _AiopgConnection(Protocol): - # Prototype to account for aiopg-only (this protocol avoids import <-> installation) - async def scalar(self, *args, **kwargs) -> Any: - ... +class EmptyProductsError(ValueError): ... - async def execute(self, *args, **kwargs): - ... - async def begin(self): - ... - - -async def get_default_product_name(conn: _DBConnection) -> str: +async def get_default_product_name(conn: AsyncConnection) -> str: """The first row in the table is considered as the default product :: raises ValueError if undefined """ product_name = await conn.scalar( - sa.select([products.c.name]).order_by(products.c.priority) + sa.select(products.c.name).order_by(products.c.priority) ) if not product_name: - raise ValueError("No product defined in database") + msg = "No product was defined in database. Upon construction, at least one product is added but there are none." + raise EmptyProductsError(msg) assert isinstance(product_name, str) # nosec return product_name -async def get_product_group_id( - connection: _DBConnection, product_name: str -) -> Optional[_GroupID]: +async def get_product_group_id_or_none( + connection: AsyncConnection, product_name: str +) -> _GroupID | None: group_id = await connection.scalar( - sa.select([products.c.group_id]).where(products.c.name == product_name) + sa.select(products.c.group_id).where(products.c.name == product_name) ) return None if group_id is None else _GroupID(group_id) async def get_or_create_product_group( - connection: _AiopgConnection, product_name: str + conn: AsyncConnection, product_name: str ) -> _GroupID: - """ - Returns group_id of a product. Creates it if undefined - """ - async with connection.begin(): - group_id = await connection.scalar( - sa.select([products.c.group_id]) - .where(products.c.name == product_name) - .with_for_update(read=True) - # a `FOR SHARE` lock: locks changes in the product until transaction is done. - # Read might return in None, but it is OK - ) - if group_id is None: - group_id = await connection.scalar( - groups.insert() - .values( - name=product_name, - description=f"{product_name} product group", - type=GroupType.STANDARD, - ) - .returning(groups.c.gid) + # + # NOTE: Separated so it can be used in asyncpg and aiopg environs while both + # coexist + # + group_id: int | None = await conn.scalar( + sa.select(products.c.group_id) + .where(products.c.name == product_name) + .with_for_update(read=True) + # a `FOR SHARE` lock: locks changes in the product until transaction is done. + # Read might return in None, but it is OK + ) + if group_id is None: + group_id = await conn.scalar( + groups.insert() + .values( + name=product_name, + description=f"{product_name} product group", + type=GroupType.STANDARD, ) - assert group_id # nosec + .returning(groups.c.gid) + ) + assert group_id # nosec - await connection.execute( - products.update() - .where(products.c.name == product_name) - .values(group_id=group_id) - ) + await conn.execute( + products.update() + .where(products.c.name == product_name) + .values(group_id=group_id) + ) - return _GroupID(group_id) + return group_id diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_products_prices.py b/packages/postgres-database/src/simcore_postgres_database/utils_products_prices.py new file mode 100644 index 00000000000..549bcd116e2 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_products_prices.py @@ -0,0 +1,66 @@ +from decimal import Decimal +from typing import NamedTuple, TypeAlias + +import sqlalchemy as sa +from sqlalchemy.ext.asyncio import AsyncConnection + +from .constants import QUANTIZE_EXP_ARG +from .models.products_prices import products_prices + +StripePriceID: TypeAlias = str +StripeTaxRateID: TypeAlias = str + + +class ProductPriceInfo(NamedTuple): + usd_per_credit: Decimal + min_payment_amount_usd: Decimal + + +async def get_product_latest_price_info_or_none( + conn: AsyncConnection, product_name: str +) -> ProductPriceInfo | None: + """If the product is not billable, it returns None""" + # newest price of a product + result = await conn.execute( + sa.select( + products_prices.c.usd_per_credit, + products_prices.c.min_payment_amount_usd, + ) + .where(products_prices.c.product_name == product_name) + .order_by(sa.desc(products_prices.c.valid_from)) + .limit(1) + ) + row = result.one_or_none() + + if row and row.usd_per_credit is not None: + assert row.min_payment_amount_usd is not None # nosec + return ProductPriceInfo( + usd_per_credit=Decimal(row.usd_per_credit).quantize(QUANTIZE_EXP_ARG), + min_payment_amount_usd=Decimal(row.min_payment_amount_usd).quantize( + QUANTIZE_EXP_ARG + ), + ) + return None + + +async def get_product_latest_stripe_info_or_none( + conn: AsyncConnection, product_name: str +) -> tuple[StripePriceID, StripeTaxRateID] | None: + # Stripe info of a product for latest price + result = await conn.execute( + sa.select( + products_prices.c.stripe_price_id, + products_prices.c.stripe_tax_rate_id, + ) + .where(products_prices.c.product_name == product_name) + .order_by(sa.desc(products_prices.c.valid_from)) + .limit(1) + ) + + row = result.one_or_none() + return (row.stripe_price_id, row.stripe_tax_rate_id) if row else None + + +async def is_payment_enabled(conn: AsyncConnection, product_name: str) -> bool: + p = await get_product_latest_price_info_or_none(conn, product_name=product_name) + return bool(p) # zero or None is disabled diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects.py new file mode 100644 index 00000000000..577f9441004 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects.py @@ -0,0 +1,41 @@ +import uuid +from datetime import UTC, datetime + +import sqlalchemy as sa +from common_library.errors_classes import OsparcErrorMixin +from pydantic import TypeAdapter +from sqlalchemy.ext.asyncio import AsyncConnection + +from .models.projects import projects +from .utils_repos import transaction_context + + +class DBBaseProjectError(OsparcErrorMixin, Exception): + msg_template: str = "Project utils unexpected error" + + +class DBProjectNotFoundError(DBBaseProjectError): + msg_template: str = "Project project_uuid={project_uuid!r} not found" + + +class ProjectsRepo: + def __init__(self, engine): + self.engine = engine + + async def get_project_last_change_date( + self, + project_uuid: uuid.UUID, + *, + connection: AsyncConnection | None = None, + ) -> datetime: + async with transaction_context(self.engine, connection) as conn: + get_stmt = sa.select(projects.c.last_change_date).where( + projects.c.uuid == f"{project_uuid}" + ) + + result = await conn.execute(get_stmt) + row = result.first() + if row is None: + raise DBProjectNotFoundError(project_uuid=project_uuid) + date = TypeAdapter(datetime).validate_python(row[0]) + return date.replace(tzinfo=UTC) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py new file mode 100644 index 00000000000..6451f1156ba --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects_metadata.py @@ -0,0 +1,282 @@ +import datetime +import uuid +from typing import Any + +import asyncpg # type: ignore[import-untyped] +import sqlalchemy as sa +import sqlalchemy.exc as sa_exc +from common_library.async_tools import maybe_await +from common_library.errors_classes import OsparcErrorMixin +from pydantic import BaseModel, ConfigDict +from sqlalchemy.dialects.postgresql import insert as pg_insert + +from ._protocols import DBConnection +from .aiopg_errors import ForeignKeyViolation +from .models.projects import projects +from .models.projects_metadata import projects_metadata +from .utils_aiosqlalchemy import map_db_exception + +# +# Errors +# + + +class BaseProjectsMetadataError(OsparcErrorMixin, RuntimeError): + msg_template: str = "Project metadata unexpected error" + + +class DBProjectNotFoundError(BaseProjectsMetadataError): + msg_template: str = "Project project_uuid={project_uuid!r} not found" + + +class DBProjectInvalidAncestorsError(BaseProjectsMetadataError): + msg_template: str = ( + "Projects metadata invalid ancestors given (both must be set or none)" + ) + + +class DBProjectInvalidParentProjectError(BaseProjectsMetadataError): + msg_template: str = ( + "Project project_uuid={project_uuid!r} has invalid parent project uuid={parent_project_uuid!r}" + ) + + +class DBProjectInvalidParentNodeError(BaseProjectsMetadataError): + msg_template: str = ( + "Project project_uuid={project_uuid!r} has invalid parent project uuid={parent_node_id!r}" + ) + + +# +# Data +# + + +class ProjectMetadata(BaseModel): + custom: dict[str, Any] | None + created: datetime.datetime | None + modified: datetime.datetime | None + parent_project_uuid: uuid.UUID | None + parent_node_id: uuid.UUID | None + root_parent_project_uuid: uuid.UUID | None + root_parent_node_id: uuid.UUID | None + model_config = ConfigDict(frozen=True, from_attributes=True) + + +# +# Helpers +# + + +async def get(connection: DBConnection, project_uuid: uuid.UUID) -> ProjectMetadata: + """ + Raises: + DBProjectNotFoundError: project not found + + """ + # JOIN LEFT OUTER + get_stmt = ( + sa.select( + projects.c.uuid, + projects_metadata.c.custom, + projects_metadata.c.created, + projects_metadata.c.modified, + projects_metadata.c.parent_project_uuid, + projects_metadata.c.parent_node_id, + projects_metadata.c.root_parent_project_uuid, + projects_metadata.c.root_parent_node_id, + ) + .select_from( + sa.join( + projects, + projects_metadata, + projects.c.uuid == projects_metadata.c.project_uuid, + isouter=True, + ) + ) + .where(projects.c.uuid == f"{project_uuid}") + ) + result = await connection.execute(get_stmt) + row = await maybe_await(result.first()) + if row is None: + raise DBProjectNotFoundError(project_uuid=project_uuid) + return ProjectMetadata.model_validate(row) + + +def _check_valid_ancestors_combination( + project_uuid: uuid.UUID, + parent_project_uuid: uuid.UUID | None, + parent_node_id: uuid.UUID | None, +) -> None: + if project_uuid == parent_project_uuid: + raise DBProjectInvalidAncestorsError + if parent_project_uuid is not None and parent_node_id is None: + raise DBProjectInvalidAncestorsError + if parent_project_uuid is None and parent_node_id is not None: + raise DBProjectInvalidAncestorsError + + +async def _project_has_any_child( + connection: DBConnection, project_uuid: uuid.UUID +) -> bool: + get_stmt = sa.select(projects_metadata.c.project_uuid).where( + projects_metadata.c.parent_project_uuid == f"{project_uuid}" + ) + if await connection.scalar(get_stmt) is not None: + return True + return False + + +async def _compute_root_parent_from_parent( + connection: DBConnection, + *, + project_uuid: uuid.UUID, + parent_project_uuid: uuid.UUID | None, + parent_node_id: uuid.UUID | None, +) -> tuple[uuid.UUID | None, uuid.UUID | None]: + if parent_project_uuid is None and parent_node_id is None: + return None, None + + try: + assert parent_project_uuid is not None # nosec + parent_project_metadata = await get(connection, parent_project_uuid) + if parent_project_metadata.root_parent_project_uuid is not None: + assert parent_project_metadata.root_parent_node_id is not None # nosec + return ( + parent_project_metadata.root_parent_project_uuid, + parent_project_metadata.root_parent_node_id, + ) + # that means this is the root already + return parent_project_uuid, parent_node_id + except DBProjectNotFoundError as err: + raise DBProjectInvalidParentProjectError( + project_uuid=project_uuid, parent_project_uuid=parent_project_uuid + ) from err + + +async def set_project_ancestors( + connection: DBConnection, + *, + project_uuid: uuid.UUID, + parent_project_uuid: uuid.UUID | None, + parent_node_id: uuid.UUID | None, +) -> ProjectMetadata: + """ + Raises: + NotImplementedError: if you touch ancestry of a project that has children + DBProjectInvalidAncestorsError: if you pass invalid parents + DBProjectInvalidParentProjectError: the parent_project_uuid is invalid + DBProjectInvalidParentNodeError: the parent_node_ID is invalid + DBProjectNotFoundError: the project_uuid is not found + """ + _check_valid_ancestors_combination( + project_uuid, parent_project_uuid, parent_node_id + ) + if await _project_has_any_child(connection, project_uuid): + msg = "Cannot set ancestors for a project with children" + raise NotImplementedError(msg) + ( + root_parent_project_uuid, + root_parent_node_id, + ) = await _compute_root_parent_from_parent( + connection, + project_uuid=project_uuid, + parent_project_uuid=parent_project_uuid, + parent_node_id=parent_node_id, + ) + data = { + "project_uuid": f"{project_uuid}", + "parent_project_uuid": ( + f"{parent_project_uuid}" if parent_project_uuid is not None else None + ), + "parent_node_id": f"{parent_node_id}" if parent_node_id is not None else None, + "root_parent_project_uuid": ( + f"{root_parent_project_uuid}" + if root_parent_project_uuid is not None + else None + ), + "root_parent_node_id": ( + f"{root_parent_node_id}" if root_parent_node_id is not None else None + ), + } + insert_stmt = pg_insert(projects_metadata).values(**data) + upsert_stmt = insert_stmt.on_conflict_do_update( + index_elements=[projects_metadata.c.project_uuid], + set_=data, + ).returning(sa.literal_column("*")) + + try: + result = await connection.execute(upsert_stmt) + row = await maybe_await(result.first()) + assert row # nosec + return ProjectMetadata.model_validate(row) + + except ForeignKeyViolation as err: + assert err.pgerror is not None # nosec # noqa: PT017 + if "fk_projects_metadata_parent_node_id" in err.pgerror: + raise DBProjectInvalidParentNodeError( + project_uuid=project_uuid, parent_node_id=parent_node_id + ) from err + + raise DBProjectNotFoundError(project_uuid=project_uuid) from err + except sa_exc.IntegrityError as exc: + if "fk_projects_metadata_parent_node_id" in exc.args[0]: + raise map_db_exception( + exc, + { + asyncpg.ForeignKeyViolationError.sqlstate: ( + DBProjectInvalidParentNodeError, + { + "project_uuid": project_uuid, + "parent_node_id": parent_node_id, + }, + ), + }, + ) from exc + raise map_db_exception( + exc, + { + asyncpg.ForeignKeyViolationError.sqlstate: ( + DBProjectNotFoundError, + { + "project_uuid": project_uuid, + }, + ), + }, + ) from exc + + +async def set_project_custom_metadata( + connection: DBConnection, + *, + project_uuid: uuid.UUID, + custom_metadata: dict[str, Any], +) -> ProjectMetadata: + data = { + "project_uuid": f"{project_uuid}", + "custom": custom_metadata, + } + insert_stmt = pg_insert(projects_metadata).values(**data) + upsert_stmt = insert_stmt.on_conflict_do_update( + index_elements=[projects_metadata.c.project_uuid], + set_=data, + ).returning(sa.literal_column("*")) + + try: + result = await connection.execute(upsert_stmt) + row = await maybe_await(result.first()) + assert row # nosec + return ProjectMetadata.model_validate(row) + + except ForeignKeyViolation as err: + raise DBProjectNotFoundError(project_uuid=project_uuid) from err + except sa_exc.IntegrityError as exc: + raise map_db_exception( + exc, + { + asyncpg.exceptions.ForeignKeyViolationError.sqlstate: ( + DBProjectNotFoundError, + {"project_uuid": project_uuid}, + ), + }, + ) from exc diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py new file mode 100644 index 00000000000..6fc72990b30 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_projects_nodes.py @@ -0,0 +1,326 @@ +import datetime +import uuid +from dataclasses import dataclass +from typing import Any + +import asyncpg.exceptions # type: ignore[import-untyped] +import sqlalchemy +import sqlalchemy.exc +from common_library.async_tools import maybe_await +from common_library.errors_classes import OsparcErrorMixin +from pydantic import BaseModel, ConfigDict, Field +from simcore_postgres_database.utils_aiosqlalchemy import map_db_exception +from sqlalchemy.dialects.postgresql import insert as pg_insert + +from ._protocols import DBConnection +from .aiopg_errors import ForeignKeyViolation, UniqueViolation +from .models.projects_node_to_pricing_unit import projects_node_to_pricing_unit +from .models.projects_nodes import projects_nodes + + +# +# Errors +# +class BaseProjectNodesError(OsparcErrorMixin, RuntimeError): + msg_template: str = "Project nodes unexpected error" + + +class ProjectNodesProjectNotFoundError(BaseProjectNodesError): + msg_template: str = "Project {project_uuid} not found" + + +class ProjectNodesNodeNotFoundError(BaseProjectNodesError): + msg_template: str = "Node {node_id!r} from project {project_uuid!r} not found" + + +class ProjectNodesNonUniqueNodeFoundError(BaseProjectNodesError): + msg_template: str = ( + "Multiple project found containing node {node_id}. TIP: misuse, the same node ID was found in several projects." + ) + + +class ProjectNodesDuplicateNodeError(BaseProjectNodesError): + msg_template: str = ( + "Project node already exists, you cannot have 2x the same node in the same project." + ) + + +class ProjectNodeCreate(BaseModel): + node_id: uuid.UUID + required_resources: dict[str, Any] = Field(default_factory=dict) + key: str + version: str + label: str + progress: float | None = None + thumbnail: str | None = None + input_access: dict[str, Any] | None = None + input_nodes: list[str] | None = None + inputs: dict[str, Any] | None = None + inputs_units: dict[str, Any] | None = None + output_nodes: list[str] | None = None + outputs: dict[str, Any] | None = None + run_hash: str | None = None + state: dict[str, Any] | None = None + parent: str | None = None + boot_options: dict[str, Any] | None = None + + @classmethod + def get_field_names(cls, *, exclude: set[str]) -> set[str]: + return cls.model_fields.keys() - exclude + + model_config = ConfigDict(frozen=True) + + +class ProjectNode(ProjectNodeCreate): + created: datetime.datetime + modified: datetime.datetime + + model_config = ConfigDict(from_attributes=True) + + +@dataclass(frozen=True, kw_only=True) +class ProjectNodesRepo: + project_uuid: uuid.UUID + + async def add( + self, + connection: DBConnection, + *, + nodes: list[ProjectNodeCreate], + ) -> list[ProjectNode]: + """Creates a new entry in *projects_nodes* table + + NOTE: Do not use this in an asyncio.gather call as this will fail! + + Raises: + ProjectNodesProjectNotFound: in case the project_uuid does not exist + ProjectNodesDuplicateNode: in case the node already exists + ProjectsNodesNodeNotFound: in case the node does not exist + + """ + if not nodes: + return [] + insert_stmt = ( + projects_nodes.insert() + .values( + [ + { + "project_uuid": f"{self.project_uuid}", + **node.model_dump(exclude_unset=True, mode="json"), + } + for node in nodes + ] + ) + .returning( + *[ + c + for c in projects_nodes.columns + if c is not projects_nodes.c.project_uuid + ] + ) + ) + + try: + result = await connection.execute(insert_stmt) + assert result # nosec + rows = await maybe_await(result.fetchall()) + assert isinstance(rows, list) # nosec + return [ProjectNode.model_validate(r) for r in rows] + except ForeignKeyViolation as exc: + # this happens when the project does not exist, as we first check the node exists + raise ProjectNodesProjectNotFoundError( + project_uuid=self.project_uuid + ) from exc + except UniqueViolation as exc: + # this happens if the node already exists on creation + raise ProjectNodesDuplicateNodeError from exc + except sqlalchemy.exc.IntegrityError as exc: + raise map_db_exception( + exc, + { + asyncpg.exceptions.UniqueViolationError.sqlstate: ( + ProjectNodesDuplicateNodeError, + {"project_uuid": self.project_uuid}, + ), + asyncpg.exceptions.ForeignKeyViolationError.sqlstate: ( + ProjectNodesProjectNotFoundError, + {"project_uuid": self.project_uuid}, + ), + }, + ) from exc + + async def list(self, connection: DBConnection) -> list[ProjectNode]: + """list the nodes in the current project + + NOTE: Do not use this in an asyncio.gather call as this will fail! + """ + list_stmt = sqlalchemy.select( + *[ + c + for c in projects_nodes.columns + if c is not projects_nodes.c.project_uuid + ] + ).where(projects_nodes.c.project_uuid == f"{self.project_uuid}") + result = await connection.execute(list_stmt) + assert result # nosec + rows = await maybe_await(result.fetchall()) + assert isinstance(rows, list) # nosec + return [ProjectNode.model_validate(row) for row in rows] + + async def get(self, connection: DBConnection, *, node_id: uuid.UUID) -> ProjectNode: + """get a node in the current project + + NOTE: Do not use this in an asyncio.gather call as this will fail! + + Raises: + ProjectsNodesNodeNotFound: _description_ + """ + + get_stmt = sqlalchemy.select( + *[c for c in projects_nodes.c if c is not projects_nodes.c.project_uuid] + ).where( + (projects_nodes.c.project_uuid == f"{self.project_uuid}") + & (projects_nodes.c.node_id == f"{node_id}") + ) + + result = await connection.execute(get_stmt) + assert result # nosec + row = await maybe_await(result.first()) + if row is None: + raise ProjectNodesNodeNotFoundError( + project_uuid=self.project_uuid, node_id=node_id + ) + assert row # nosec + return ProjectNode.model_validate(row) + + async def update( + self, connection: DBConnection, *, node_id: uuid.UUID, **values + ) -> ProjectNode: + """update a node in the current project + + NOTE: Do not use this in an asyncio.gather call as this will fail! + + Raises: + ProjectsNodesNodeNotFound: _description_ + """ + update_stmt = ( + projects_nodes.update() + .values(**values) + .where( + (projects_nodes.c.project_uuid == f"{self.project_uuid}") + & (projects_nodes.c.node_id == f"{node_id}") + ) + .returning( + *[c for c in projects_nodes.c if c is not projects_nodes.c.project_uuid] + ) + ) + result = await connection.execute(update_stmt) + row = await maybe_await(result.first()) + if not row: + raise ProjectNodesNodeNotFoundError( + project_uuid=self.project_uuid, node_id=node_id + ) + assert row # nosec + return ProjectNode.model_validate(row) + + async def delete(self, connection: DBConnection, *, node_id: uuid.UUID) -> None: + """delete a node in the current project + + NOTE: Do not use this in an asyncio.gather call as this will fail! + + Raises: + Nothing special + """ + delete_stmt = sqlalchemy.delete(projects_nodes).where( + (projects_nodes.c.project_uuid == f"{self.project_uuid}") + & (projects_nodes.c.node_id == f"{node_id}") + ) + await connection.execute(delete_stmt) + + async def get_project_node_pricing_unit_id( + self, connection: DBConnection, *, node_uuid: uuid.UUID + ) -> tuple | None: + """get a pricing unit that is connected to the project node or None if there is non connected + + NOTE: Do not use this in an asyncio.gather call as this will fail! + """ + result = await connection.execute( + sqlalchemy.select( + projects_node_to_pricing_unit.c.pricing_plan_id, + projects_node_to_pricing_unit.c.pricing_unit_id, + ) + .select_from( + projects_nodes.join( + projects_node_to_pricing_unit, + projects_nodes.c.project_node_id + == projects_node_to_pricing_unit.c.project_node_id, + ) + ) + .where( + (projects_nodes.c.project_uuid == f"{self.project_uuid}") + & (projects_nodes.c.node_id == f"{node_uuid}") + ) + ) + row = await maybe_await(result.fetchone()) + if row is not None: + return (row.pricing_plan_id, row.pricing_unit_id) # type: ignore[union-attr] + return None + + async def connect_pricing_unit_to_project_node( + self, + connection: DBConnection, + *, + node_uuid: uuid.UUID, + pricing_plan_id: int, + pricing_unit_id: int, + ) -> None: + result = await connection.scalar( + sqlalchemy.select(projects_nodes.c.project_node_id).where( + (projects_nodes.c.project_uuid == f"{self.project_uuid}") + & (projects_nodes.c.node_id == f"{node_uuid}") + ) + ) + project_node_id = int(result) if result else 0 + + insert_stmt = pg_insert(projects_node_to_pricing_unit).values( + project_node_id=project_node_id, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + created=sqlalchemy.func.now(), + modified=sqlalchemy.func.now(), + ) + on_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[ + projects_node_to_pricing_unit.c.project_node_id, + ], + set_={ + "pricing_plan_id": insert_stmt.excluded.pricing_plan_id, + "pricing_unit_id": insert_stmt.excluded.pricing_unit_id, + "modified": sqlalchemy.func.now(), + }, + ) + await connection.execute(on_update_stmt) + + @staticmethod + async def get_project_id_from_node_id( + connection: DBConnection, *, node_id: uuid.UUID + ) -> uuid.UUID: + """ + WARNING: this function should not be used! it has a flaw! a Node ID is not unique and there can + be more than one project linked to it. + + Raises: + ProjectNodesNodeNotFound: if no node_id found + ProjectNodesNonUniqueNodeFoundError: there are multiple projects that contain that node + """ + get_stmt = sqlalchemy.select(projects_nodes.c.project_uuid).where( + projects_nodes.c.node_id == f"{node_id}" + ) + result = await connection.execute(get_stmt) + project_ids = await maybe_await(result.fetchall()) + assert isinstance(project_ids, list) # nosec + if not project_ids: + raise ProjectNodesNodeNotFoundError(project_uuid=None, node_id=node_id) + if len(project_ids) > 1: + raise ProjectNodesNonUniqueNodeFoundError(node_id=node_id) + return uuid.UUID(project_ids[0].project_uuid) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_repos.py b/packages/postgres-database/src/simcore_postgres_database/utils_repos.py new file mode 100644 index 00000000000..7f3fd9283a8 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_repos.py @@ -0,0 +1,87 @@ +import logging +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from typing import TypeVar + +import sqlalchemy as sa +from pydantic import BaseModel +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +_logger = logging.getLogger(__name__) + + +@asynccontextmanager +async def pass_or_acquire_connection( + engine: AsyncEngine, connection: AsyncConnection | None = None +) -> AsyncIterator[AsyncConnection]: + """ + When to use: For READ operations! + It ensures that a connection is available for use within the context, + either by using an existing connection passed as a parameter or by acquiring a new one from the engine. + + The caller must manage the lifecycle of any connection explicitly passed in, but the function handles the + cleanup for connections it creates itself. + + This function **does not open new transactions** and therefore is recommended only for read-only database operations. + """ + # NOTE: When connection is passed, the engine is actually not needed + # NOTE: Creator is responsible of closing connection + is_connection_created = connection is None + if is_connection_created: + connection = await engine.connect() + try: + assert connection # nosec + yield connection + finally: + assert connection # nosec + assert not connection.closed # nosec + if is_connection_created and connection: + await connection.close() + + +@asynccontextmanager +async def transaction_context( + engine: AsyncEngine, connection: AsyncConnection | None = None +) -> AsyncIterator[AsyncConnection]: + """ + When to use: For WRITE operations! + This function manages the database connection and ensures that a transaction context is established for write operations. + It supports both outer and nested transactions, providing flexibility for scenarios where transactions may already exist in the calling context. + """ + async with pass_or_acquire_connection(engine, connection) as conn: + if conn.in_transaction(): + async with conn.begin_nested(): # inner transaction (savepoint) + yield conn + else: + try: + async with conn.begin(): # outer transaction (savepoint) + yield conn + finally: + assert not conn.closed # nosec + assert not conn.in_transaction() # nosec + + +SQLModel = TypeVar( + # Towards using https://sqlmodel.tiangolo.com/#create-a-sqlmodel-model + "SQLModel", + bound=BaseModel, +) + + +def get_columns_from_db_model( + table: sa.Table, model_cls: type[SQLModel] +) -> list[sa.Column]: + """ + Usage example: + + query = sa.select( get_columns_from_db_model(project, ProjectDB) ) + + or + + query = ( + project.insert(). + # ... + .returning(*get_columns_from_db_model(project, ProjectDB)) + ) + """ + return [table.columns[field_name] for field_name in model_cls.model_fields] diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_services.py b/packages/postgres-database/src/simcore_postgres_database/utils_services.py new file mode 100644 index 00000000000..a3f6a1a796d --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_services.py @@ -0,0 +1,26 @@ +import sqlalchemy as sa +import sqlalchemy.sql +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER + +from .models.services import services_meta_data + + +def create_select_latest_services_query( + column_version_label: str = "latest", +) -> sqlalchemy.sql.Select: + """ + Returns select query of service_meta_data table with columns 'key' and 'latest' (=version) + """ + assert issubclass(INTEGER, sa.Integer) # nosec + + return sa.select( + services_meta_data.c.key, + sa.func.array_to_string( + sa.func.max( + sa.func.string_to_array(services_meta_data.c.version, ".").cast( + ARRAY(INTEGER) + ) + ), + ".", + ).label(column_version_label), + ).group_by(services_meta_data.c.key) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_services_environments.py b/packages/postgres-database/src/simcore_postgres_database/utils_services_environments.py new file mode 100644 index 00000000000..17c709fe9a0 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_services_environments.py @@ -0,0 +1,78 @@ +from typing import Final, TypeAlias + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER + +from ._protocols import DBConnection +from .models.services_environments import VENDOR_SECRET_PREFIX, services_vendor_secrets + +# This constraint is to avoid deserialization issues after substitution! +VendorSecret: TypeAlias = bool | int | float | str + +LATEST: Final[str] = "latest" + + +async def get_vendor_secrets( + conn: DBConnection, + product_name: str, # NOTE: ProductName as defined in models_library + vendor_service_key: str, # NOTE: ServiceKey is defined in models_library + vendor_service_version: str = LATEST, # NOTE: ServiceVersion is defined in models_library + *, + normalize_names: bool = True, +) -> dict[str, VendorSecret]: + def _version(column_or_value): + # converts version value string to array[integer] that can be compared + return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) + + query = sa.select(services_vendor_secrets.c.secrets_map) + + if vendor_service_version == LATEST: + latest_version = sa.select( + sa.func.array_to_string( + sa.func.max(_version(services_vendor_secrets.c.service_base_version)), + ".", + ) + ).where(services_vendor_secrets.c.service_key == vendor_service_key) + + query = query.where( + (services_vendor_secrets.c.product_name == product_name) + & (services_vendor_secrets.c.service_key == vendor_service_key) + & ( + services_vendor_secrets.c.service_base_version + == latest_version.scalar_subquery() + ) + ) + else: + assert len([int(p) for p in vendor_service_version.split(".")]) == 3 # nosec + + query = ( + query.where( + (services_vendor_secrets.c.product_name == product_name) + & (services_vendor_secrets.c.service_key == vendor_service_key) + & ( + _version(services_vendor_secrets.c.service_base_version) + <= _version(vendor_service_version) + ) + ) + .order_by(_version(services_vendor_secrets.c.service_base_version).desc()) + .limit(1) + ) + + secrets_map = await conn.scalar(query) + secrets: dict[str, VendorSecret] = {} + + if secrets_map is not None: + secrets = dict(secrets_map) + + if secrets_map and normalize_names: + for key in list(secrets.keys()): + if not key.startswith(VENDOR_SECRET_PREFIX): + secrets[VENDOR_SECRET_PREFIX + key.upper()] = secrets.pop(key) + + assert all(key.startswith(VENDOR_SECRET_PREFIX) for key in secrets) # nosec + + assert all( # nosec + isinstance(value, bool | int | str | float) for value in secrets.values() + ) + + return secrets diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_sql.py b/packages/postgres-database/src/simcore_postgres_database/utils_sql.py new file mode 100644 index 00000000000..e3d4e1438af --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_sql.py @@ -0,0 +1,6 @@ +def assemble_array_groups(user_group_ids: list[int]) -> str: + return ( + "array[]::text[]" + if len(user_group_ids) == 0 + else f"""array[{', '.join(f"'{group_id}'" for group_id in user_group_ids)}]""" + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_tags.py b/packages/postgres-database/src/simcore_postgres_database/utils_tags.py index 5f2e265a758..14b283b7157 100644 --- a/packages/postgres-database/src/simcore_postgres_database/utils_tags.py +++ b/packages/postgres-database/src/simcore_postgres_database/utils_tags.py @@ -1,30 +1,39 @@ """ Repository pattern, errors and data structures for models.tags """ - -import functools -import itertools -from dataclasses import dataclass -from typing import Optional, TypedDict - -import sqlalchemy as sa -from aiopg.sa.connection import SAConnection -from simcore_postgres_database.models.groups import user_to_groups -from simcore_postgres_database.models.tags import tags, tags_to_groups -from simcore_postgres_database.models.users import users +from common_library.errors_classes import OsparcErrorMixin +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine +from typing_extensions import TypedDict + +from .utils_repos import pass_or_acquire_connection, transaction_context +from .utils_tags_sql import ( + TagAccessRightsDict, + count_groups_with_given_access_rights_stmt, + create_tag_stmt, + delete_tag_access_rights_stmt, + delete_tag_stmt, + get_tag_stmt, + has_access_rights_stmt, + list_tag_group_access_stmt, + list_tags_stmt, + update_tag_stmt, + upsert_tags_access_rights_stmt, +) + +__all__: tuple[str, ...] = ("TagAccessRightsDict",) # # Errors # -class BaseTagError(Exception): - pass +class _BaseTagError(OsparcErrorMixin, Exception): + msg_template = "Tag repo error on tag {tag_id}" -class TagNotFoundError(BaseTagError): +class TagNotFoundError(_BaseTagError): pass -class TagOperationNotAllowed(BaseTagError): # maps to AccessForbidden +class TagOperationNotAllowedError(_BaseTagError): # maps to AccessForbidden pass @@ -33,23 +42,6 @@ class TagOperationNotAllowed(BaseTagError): # maps to AccessForbidden # -_TAG_COLUMNS = [ - tags.c.id, - tags.c.name, - tags.c.description, - tags.c.color, -] - -_ACCESS_COLUMNS = [ - tags_to_groups.c.read, - tags_to_groups.c.write, - tags_to_groups.c.delete, -] - - -_COLUMNS = _TAG_COLUMNS + _ACCESS_COLUMNS - - class TagDict(TypedDict, total=True): id: int name: str @@ -61,76 +53,30 @@ class TagDict(TypedDict, total=True): delete: bool -@dataclass(frozen=True) class TagsRepo: - user_id: int - - def _join_user_groups_tag( - self, - access_condition, - tag_id: int, - ): - j = user_to_groups.join( - tags_to_groups, - (user_to_groups.c.uid == self.user_id) - & (user_to_groups.c.gid == tags_to_groups.c.group_id) - & (access_condition) - & (tags_to_groups.c.tag_id == tag_id), - ) - return j - - def _join_user_to_given_tag(self, access_condition, tag_id: int): - j = self._join_user_groups_tag( - access_condition=access_condition, - tag_id=tag_id, - ).join(tags) - return j - - def _join_user_to_tags( - self, - access_condition, - ): - j = user_to_groups.join( - tags_to_groups, - (user_to_groups.c.uid == self.user_id) - & (user_to_groups.c.gid == tags_to_groups.c.group_id) - & (access_condition), - ).join(tags) - return j + def __init__(self, engine: AsyncEngine): + self.engine = engine async def access_count( self, - conn: SAConnection, - tag_id: int, + connection: AsyncConnection | None = None, *, - read: Optional[bool] = None, - write: Optional[bool] = None, - delete: Optional[bool] = None, + user_id: int, + tag_id: int, + read: bool | None = None, + write: bool | None = None, + delete: bool | None = None, ) -> int: """ Returns 0 if tag does not match access Returns >0 if it does and represents the number of groups granting this access to the user """ - access = [] - if read is not None: - access.append(tags_to_groups.c.read == read) - if write is not None: - access.append(tags_to_groups.c.write == write) - if delete is not None: - access.append(tags_to_groups.c.delete == delete) - - if not access: - raise ValueError("Undefined access") - - j = self._join_user_groups_tag( - access_condition=functools.reduce(sa.and_, access), - tag_id=tag_id, - ) - stmt = sa.select(sa.func.count(user_to_groups.c.uid)).select_from(j) - - # The number of occurrences of the user_id = how many groups are giving this access permission - permissions_count: Optional[int] = await conn.scalar(stmt) - return permissions_count if permissions_count else 0 + async with pass_or_acquire_connection(self.engine, connection) as conn: + count_stmt = count_groups_with_given_access_rights_stmt( + user_id=user_id, tag_id=tag_id, read=read, write=write, delete=delete + ) + permissions_count: int | None = await conn.scalar(count_stmt) + return permissions_count if permissions_count else 0 # # CRUD operations @@ -138,127 +84,238 @@ async def access_count( async def create( self, - conn: SAConnection, + connection: AsyncConnection | None = None, *, + user_id: int, name: str, color: str, - description: Optional[str] = None, # =nullable + description: str | None = None, # =nullable read: bool = True, write: bool = True, delete: bool = True, + priority: int | None = None, ) -> TagDict: - values = {"name": name, "color": color} + """Creates tag and defaults to full access rights to `user_id`""" + values: dict[str, str | int] = { + "name": name, + "color": color, + } if description: values["description"] = description + if priority is not None: + values["priority"] = priority - async with conn.begin(): + async with transaction_context(self.engine, connection) as conn: # insert new tag - insert_stmt = tags.insert().values(**values).returning(*_TAG_COLUMNS) + insert_stmt = create_tag_stmt(**values) result = await conn.execute(insert_stmt) - tag = await result.first() + tag = result.first() assert tag # nosec # take tag ownership - scalar_subq = ( - sa.select(users.c.primary_gid) - .where(users.c.id == self.user_id) - .scalar_subquery() + access_stmt = upsert_tags_access_rights_stmt( + tag_id=tag.id, + user_id=user_id, + read=read, + write=write, + delete=delete, ) - result = await conn.execute( - tags_to_groups.insert() - .values( - tag_id=tag.id, - group_id=scalar_subq, - read=read, - write=write, - delete=delete, - ) - .returning(*_ACCESS_COLUMNS) + result = await conn.execute(access_stmt) + access = result.first() + assert access # nosec + + return TagDict( + id=tag.id, + name=tag.name, + description=tag.description, + color=tag.color, + read=access.read, + write=access.write, + delete=access.delete, ) - access = await result.first() - assert access - - return TagDict(itertools.chain(tag.items(), access.items())) # type: ignore - - async def list(self, conn: SAConnection) -> list[TagDict]: - select_stmt = ( - sa.select(_COLUMNS) - .select_from(self._join_user_to_tags(tags_to_groups.c.read == True)) - .order_by(tags.c.id) - ) - - return [TagDict(row.items()) async for row in conn.execute(select_stmt)] # type: ignore - - async def get(self, conn: SAConnection, tag_id: int) -> TagDict: - select_stmt = sa.select(_COLUMNS).select_from( - self._join_user_to_given_tag(tags_to_groups.c.read == True, tag_id=tag_id) - ) - - result = await conn.execute(select_stmt) - row = await result.first() - if not row: - raise TagNotFoundError( - f"{tag_id=} not found: either no access or does not exists" + + async def list_all( + self, + connection: AsyncConnection | None = None, + *, + user_id: int, + ) -> list[TagDict]: + async with pass_or_acquire_connection(self.engine, connection) as conn: + stmt_list = list_tags_stmt(user_id=user_id) + result = await conn.stream(stmt_list) + return [ + TagDict( + id=row.id, + name=row.name, + description=row.description, + color=row.color, + read=row.read, + write=row.write, + delete=row.delete, + ) + async for row in result + ] + + async def get( + self, + connection: AsyncConnection | None = None, + *, + user_id: int, + tag_id: int, + ) -> TagDict: + stmt_get = get_tag_stmt(user_id=user_id, tag_id=tag_id) + async with pass_or_acquire_connection(self.engine, connection) as conn: + result = await conn.execute(stmt_get) + row = result.first() + if not row: + raise TagNotFoundError(operation="get", tag_id=tag_id, user_id=user_id) + return TagDict( + id=row.id, + name=row.name, + description=row.description, + color=row.color, + read=row.read, + write=row.write, + delete=row.delete, ) - return TagDict(row.items()) # type: ignore async def update( self, - conn: SAConnection, + connection: AsyncConnection | None = None, + *, + user_id: int, tag_id: int, **fields, ) -> TagDict: + async with transaction_context(self.engine, connection) as conn: + updates = { + name: value + for name, value in fields.items() + if name in {"name", "color", "description", "priority"} + } + + if not updates: + # no updates == get + return await self.get(conn, user_id=user_id, tag_id=tag_id) + + update_stmt = update_tag_stmt(user_id=user_id, tag_id=tag_id, **updates) + result = await conn.execute(update_stmt) + row = result.first() + if not row: + raise TagOperationNotAllowedError( + operation="update", tag_id=tag_id, user_id=user_id + ) - updates = { - name: value - for name, value in fields.items() - if name in {"name", "color", "description"} - } + return TagDict( + id=row.id, + name=row.name, + description=row.description, + color=row.color, + read=row.read, + write=row.write, + delete=row.delete, + ) - if not updates: - # no updates == get - return await self.get(conn, tag_id=tag_id) + async def delete( + self, + connection: AsyncConnection | None = None, + *, + user_id: int, + tag_id: int, + ) -> None: + stmt_delete = delete_tag_stmt(user_id=user_id, tag_id=tag_id) + async with transaction_context(self.engine, connection) as conn: + deleted = await conn.scalar(stmt_delete) + if not deleted: + raise TagOperationNotAllowedError( + operation="delete", tag_id=tag_id, user_id=user_id + ) - update_stmt = ( - tags.update() - .where(tags.c.id == tag_id) - .where( - (tags.c.id == tags_to_groups.c.tag_id) - & (tags_to_groups.c.write == True) - ) - .where( - (tags_to_groups.c.group_id == user_to_groups.c.gid) - & (user_to_groups.c.uid == self.user_id) - ) - .values(**updates) - .returning(*_COLUMNS) - ) - - result = await conn.execute(update_stmt) - row = await result.first() - if not row: - raise TagOperationNotAllowed( - f"{tag_id=} not updated: either no access or not found" + # + # ACCESS RIGHTS + # + + async def has_access_rights( + self, + connection: AsyncConnection | None = None, + *, + user_id: int, + tag_id: int, + read: bool = False, + write: bool = False, + delete: bool = False, + ) -> bool: + async with pass_or_acquire_connection(self.engine, connection) as conn: + group_id_or_none = await conn.scalar( + has_access_rights_stmt( + tag_id=tag_id, + caller_user_id=user_id, + read=read, + write=write, + delete=delete, + ) ) + return bool(group_id_or_none) - return TagDict(row.items()) # type: ignore + async def list_access_rights( + self, + connection: AsyncConnection | None = None, + *, + tag_id: int, + ) -> list[TagAccessRightsDict]: + async with pass_or_acquire_connection(self.engine, connection) as conn: + result = await conn.execute(list_tag_group_access_stmt(tag_id=tag_id)) + return [ + TagAccessRightsDict( + tag_id=row.tag_id, + group_id=row.group_id, + read=row.read, + write=row.write, + delete=row.delete, + ) + for row in result.fetchall() + ] - async def delete(self, conn: SAConnection, tag_id: int) -> None: - delete_stmt = ( - tags.delete() - .where(tags.c.id == tag_id) - .where( - (tags_to_groups.c.tag_id == tag_id) & (tags_to_groups.c.delete == True) + async def create_or_update_access_rights( + self, + connection: AsyncConnection | None = None, + *, + tag_id: int, + group_id: int, + read: bool, + write: bool, + delete: bool, + ) -> TagAccessRightsDict: + async with transaction_context(self.engine, connection) as conn: + result = await conn.execute( + upsert_tags_access_rights_stmt( + tag_id=tag_id, + group_id=group_id, + read=read, + write=write, + delete=delete, + ) ) - .where( - (tags_to_groups.c.group_id == user_to_groups.c.gid) - & (user_to_groups.c.uid == self.user_id) + row = result.first() + assert row is not None + + return TagAccessRightsDict( + tag_id=row.tag_id, + group_id=row.group_id, + read=row.read, + write=row.write, + delete=row.delete, ) - .returning(tags_to_groups.c.delete) - ) - deleted = await conn.scalar(delete_stmt) - if not deleted: - raise TagOperationNotAllowed( - f"Could not delete {tag_id=}. Not found or insuficient access." + async def delete_access_rights( + self, + connection: AsyncConnection | None = None, + *, + tag_id: int, + group_id: int, + ) -> bool: + async with transaction_context(self.engine, connection) as conn: + deleted: bool = await conn.scalar( + delete_tag_access_rights_stmt(tag_id=tag_id, group_id=group_id) ) + return deleted diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_tags_sql.py b/packages/postgres-database/src/simcore_postgres_database/utils_tags_sql.py new file mode 100644 index 00000000000..d34b2fa8844 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_tags_sql.py @@ -0,0 +1,336 @@ +import functools +from uuid import UUID + +import sqlalchemy as sa +from simcore_postgres_database.models.groups import user_to_groups +from simcore_postgres_database.models.projects_tags import projects_tags +from simcore_postgres_database.models.services_tags import services_tags +from simcore_postgres_database.models.tags import tags +from simcore_postgres_database.models.tags_access_rights import tags_access_rights +from simcore_postgres_database.models.users import users +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.sql.selectable import ScalarSelect +from typing_extensions import TypedDict + +_TAG_COLUMNS = [ + tags.c.id, + tags.c.name, + tags.c.description, + tags.c.color, +] + +_ACCESS_RIGHTS_COLUMNS = [ + tags_access_rights.c.read, + tags_access_rights.c.write, + tags_access_rights.c.delete, +] + + +def _join_user_groups_tag(*, access_condition, tag_id: int, user_id: int): + return user_to_groups.join( + tags_access_rights, + (user_to_groups.c.uid == user_id) + & (user_to_groups.c.gid == tags_access_rights.c.group_id) + & (access_condition) + & (tags_access_rights.c.tag_id == tag_id), + ) + + +def _join_user_to_given_tag(*, access_condition, tag_id: int, user_id: int): + return _join_user_groups_tag( + access_condition=access_condition, + tag_id=tag_id, + user_id=user_id, + ).join(tags) + + +def _join_user_to_tags(*, access_condition, user_id: int): + return user_to_groups.join( + tags_access_rights, + (user_to_groups.c.uid == user_id) + & (user_to_groups.c.gid == tags_access_rights.c.group_id) + & (access_condition), + ).join(tags) + + +def get_tag_stmt( + user_id: int, + tag_id: int, +): + return ( + sa.select( + *_TAG_COLUMNS, + # aggregation ensures MOST PERMISSIVE policy of access-rights + sa.func.bool_or(tags_access_rights.c.read).label("read"), + sa.func.bool_or(tags_access_rights.c.write).label("write"), + sa.func.bool_or(tags_access_rights.c.delete).label("delete"), + ) + .select_from( + _join_user_to_given_tag( + access_condition=tags_access_rights.c.read.is_(True), + tag_id=tag_id, + user_id=user_id, + ) + ) + .group_by(tags.c.id) + ) + + +def list_tags_stmt(*, user_id: int): + return ( + sa.select( + *_TAG_COLUMNS, + # aggregation ensures MOST PERMISSIVE policy of access-rights + sa.func.bool_or(tags_access_rights.c.read).label("read"), + sa.func.bool_or(tags_access_rights.c.write).label("write"), + sa.func.bool_or(tags_access_rights.c.delete).label("delete"), + ) + .select_from( + _join_user_to_tags( + access_condition=tags_access_rights.c.read.is_(True), + user_id=user_id, + ) + ) + .group_by(tags.c.id) # makes it tag.id uniqueness + .order_by(tags.c.priority.nulls_last()) + .order_by(tags.c.id) + ) + + +def create_tag_stmt(**values): + return tags.insert().values(**values).returning(*_TAG_COLUMNS) + + +def count_groups_with_given_access_rights_stmt( + *, + user_id: int, + tag_id: int, + read: bool | None, + write: bool | None, + delete: bool | None, +): + """ + How many groups (from this user_id) are given EXACTLY these access permissions + """ + access = [] + if read is not None: + access.append(tags_access_rights.c.read == read) + if write is not None: + access.append(tags_access_rights.c.write == write) + if delete is not None: + access.append(tags_access_rights.c.delete == delete) + + if not access: + msg = "Undefined access" + raise ValueError(msg) + + j = _join_user_groups_tag( + access_condition=functools.reduce(sa.and_, access), + user_id=user_id, + tag_id=tag_id, + ) + return sa.select(sa.func.count(user_to_groups.c.uid)).select_from(j) + + +def update_tag_stmt(*, user_id: int, tag_id: int, **updates): + return ( + tags.update() + .where(tags.c.id == tag_id) + .where( + (tags.c.id == tags_access_rights.c.tag_id) + & (tags_access_rights.c.write.is_(True)) + ) + .where( + (tags_access_rights.c.group_id == user_to_groups.c.gid) + & (user_to_groups.c.uid == user_id) + ) + .values(**updates) + .returning(*_TAG_COLUMNS, *_ACCESS_RIGHTS_COLUMNS) + ) + + +def delete_tag_stmt(*, user_id: int, tag_id: int): + return ( + tags.delete() + .where(tags.c.id == tag_id) + .where( + (tags_access_rights.c.tag_id == tag_id) + & (tags_access_rights.c.delete.is_(True)) + ) + .where( + (tags_access_rights.c.group_id == user_to_groups.c.gid) + & (user_to_groups.c.uid == user_id) + ) + .returning(tags_access_rights.c.delete) + ) + + +# +# ACCESS RIGHTS +# + +_TAG_ACCESS_RIGHTS_COLS = [ + tags_access_rights.c.tag_id, + tags_access_rights.c.group_id, + *_ACCESS_RIGHTS_COLUMNS, +] + + +class TagAccessRightsDict(TypedDict): + tag_id: int + group_id: int + # access rights + read: bool + write: bool + delete: bool + + +def has_access_rights_stmt( + *, + tag_id: int, + caller_user_id: int | None = None, + caller_group_id: int | None = None, + read: bool = False, + write: bool = False, + delete: bool = False, +): + conditions = [] + + # caller + if caller_user_id is not None: + group_condition = ( + tags_access_rights.c.group_id + == sa.select(users.c.primary_gid) + .where(users.c.id == caller_user_id) + .scalar_subquery() + ) + elif caller_group_id is not None: + group_condition = tags_access_rights.c.group_id == caller_group_id + else: + msg = "Either caller_user_id or caller_group_id must be provided." + raise ValueError(msg) + + conditions.append(group_condition) + + # access-right + if read: + conditions.append(tags_access_rights.c.read.is_(True)) + if write: + conditions.append(tags_access_rights.c.write.is_(True)) + if delete: + conditions.append(tags_access_rights.c.delete.is_(True)) + + return sa.select(tags_access_rights.c.group_id).where( + sa.and_( + tags_access_rights.c.tag_id == tag_id, + *conditions, + ) + ) + + +def list_tag_group_access_stmt(*, tag_id: int): + return sa.select(*_TAG_ACCESS_RIGHTS_COLS).where( + tags_access_rights.c.tag_id == tag_id + ) + + +def upsert_tags_access_rights_stmt( + *, + tag_id: int, + group_id: int | None = None, + user_id: int | None = None, + read: bool, + write: bool, + delete: bool, +): + assert not (user_id is None and group_id is None) # nosec + assert not (user_id is not None and group_id is not None) # nosec + + target_group_id: int | ScalarSelect + + if user_id: + assert not group_id # nosec + target_group_id = ( + sa.select(users.c.primary_gid) + .where(users.c.id == user_id) + .scalar_subquery() + ) + else: + assert group_id # nosec + target_group_id = group_id + + return ( + pg_insert(tags_access_rights) + .values( + tag_id=tag_id, + group_id=target_group_id, + read=read, + write=write, + delete=delete, + ) + .on_conflict_do_update( + index_elements=["tag_id", "group_id"], + set_={"read": read, "write": write, "delete": delete}, + ) + .returning(*_TAG_ACCESS_RIGHTS_COLS) + ) + + +def delete_tag_access_rights_stmt(*, tag_id: int, group_id: int): + return ( + sa.delete(tags_access_rights) + .where( + (tags_access_rights.c.tag_id == tag_id) + & (tags_access_rights.c.group_id == group_id) + ) + .returning(tags_access_rights.c.tag_id.is_not(None)) + ) + + +# +# PROJECT TAGS +# + + +def get_tags_for_project_stmt(*, project_index: int): + return sa.select(projects_tags.c.tag_id).where( + projects_tags.c.project_id == project_index + ) + + +def add_tag_to_project_stmt( + *, project_index: int, tag_id: int, project_uuid_for_rut: UUID +): + return ( + pg_insert(projects_tags) + .values( + project_id=project_index, + tag_id=tag_id, + project_uuid_for_rut=f"{project_uuid_for_rut}", + ) + .on_conflict_do_nothing() + ) + + +# +# SERVICE TAGS +# + + +def get_tags_for_services_stmt(*, key: str, version: str): + return sa.select(services_tags.c.tag_id).where( + (services_tags.c.service_key == key) + & (services_tags.c.service_version == version) + ) + + +def add_tag_to_services_stmt(*, key: str, version: str, tag_id: int): + return ( + pg_insert(services_tags) + .values( + service_key=key, + service_version=version, + tag_id=tag_id, + ) + .on_conflict_do_nothing() + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_user_preferences.py b/packages/postgres-database/src/simcore_postgres_database/utils_user_preferences.py new file mode 100644 index 00000000000..4e803d44752 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_user_preferences.py @@ -0,0 +1,72 @@ +from typing import Any + +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncConnection + +from .models.user_preferences import ( + user_preferences_frontend, + user_preferences_user_service, +) + + +class CouldNotCreateOrUpdateUserPreferenceError(Exception): ... + + +class BasePreferencesRepo: + model: sa.Table + + @classmethod + async def save( + cls, + conn: AsyncConnection, + *, + user_id: int, + product_name: str, + preference_name: str, + payload: Any, + ) -> None: + data: dict[str, Any] = { + "user_id": user_id, + "product_name": product_name, + "preference_name": preference_name, + "payload": payload, + } + + insert_stmt = pg_insert(cls.model).values(**data) + upsert_stmt = insert_stmt.on_conflict_do_update( + index_elements=[ + cls.model.c.user_id, + cls.model.c.product_name, + cls.model.c.preference_name, + ], + set_=data, + ).returning(sa.literal_column("*")) + + await conn.execute(upsert_stmt) + + @classmethod + async def load( + cls, + conn: AsyncConnection, + *, + user_id: int, + product_name: str, + preference_name: Any, + ) -> Any | None: + payload: Any | None = await conn.scalar( + sa.select(cls.model.c.payload).where( + cls.model.c.user_id == user_id, + cls.model.c.product_name == product_name, + cls.model.c.preference_name == preference_name, + ) + ) + return payload + + +class FrontendUserPreferencesRepo(BasePreferencesRepo): + model = user_preferences_frontend + + +class UserServicesUserPreferencesRepo(BasePreferencesRepo): + model = user_preferences_user_service diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_users.py b/packages/postgres-database/src/simcore_postgres_database/utils_users.py new file mode 100644 index 00000000000..587f90ee504 --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_users.py @@ -0,0 +1,279 @@ +"""Free functions, repository pattern, errors and data structures for the users resource +i.e. models.users main table and all its relations +""" + +import re +import secrets +import string +from datetime import datetime +from typing import Any, Final + +import sqlalchemy as sa +from common_library.async_tools import maybe_await +from sqlalchemy import Column + +from ._protocols import DBConnection +from .aiopg_errors import UniqueViolation +from .models.users import UserRole, UserStatus, users +from .models.users_details import users_pre_registration_details + + +class BaseUserRepoError(Exception): + pass + + +class UserNotFoundInRepoError(BaseUserRepoError): + pass + + +# NOTE: see MyProfilePatch.user_name +MIN_USERNAME_LEN: Final[int] = 4 + + +def _generate_random_chars(length: int = MIN_USERNAME_LEN) -> str: + """returns `length` random digit character""" + return "".join(secrets.choice(string.digits) for _ in range(length)) + + +def _generate_username_from_email(email: str) -> str: + username = email.split("@")[0] + + # Remove any non-alphanumeric characters and convert to lowercase + username = re.sub(r"[^a-zA-Z0-9]", "", username).lower() + + # Ensure the username is at least 4 characters long + if len(username) < MIN_USERNAME_LEN: + username += _generate_random_chars(length=MIN_USERNAME_LEN - len(username)) + + return username + + +def generate_alternative_username(username: str) -> str: + return f"{username}_{_generate_random_chars()}" + + +class UsersRepo: + @staticmethod + async def new_user( + conn: DBConnection, + email: str, + password_hash: str, + status: UserStatus, + expires_at: datetime | None, + ) -> Any: + data: dict[str, Any] = { + "name": _generate_username_from_email(email), + "email": email, + "password_hash": password_hash, + "status": status, + "role": UserRole.USER, + "expires_at": expires_at, + } + + user_id = None + while user_id is None: + try: + user_id = await conn.scalar( + users.insert().values(**data).returning(users.c.id) + ) + except UniqueViolation: + data["name"] = generate_alternative_username(data["name"]) + + result = await conn.execute( + sa.select( + users.c.id, + users.c.name, + users.c.email, + users.c.role, + users.c.status, + ).where(users.c.id == user_id) + ) + return await maybe_await(result.first()) + + @staticmethod + async def link_and_update_user_from_pre_registration( + conn: DBConnection, + *, + new_user_id: int, + new_user_email: str, + update_user: bool = True, + ) -> None: + """After a user is created, it can be associated with information provided during invitation + + WARNING: Use ONLY upon new user creation. It might override user_details.user_id, users.first_name, users.last_name etc if already applied + or changes happen in users table + """ + assert new_user_email # nosec + assert new_user_id > 0 # nosec + + # link both tables first + result = await conn.execute( + users_pre_registration_details.update() + .where(users_pre_registration_details.c.pre_email == new_user_email) + .values(user_id=new_user_id) + ) + + if update_user: + # COPIES some pre-registration details to the users table + pre_columns = ( + users_pre_registration_details.c.pre_first_name, + users_pre_registration_details.c.pre_last_name, + # NOTE: pre_phone is not copied since it has to be validated. Otherwise, if + # phone is wrong, currently user won't be able to login! + ) + + assert {c.name for c in pre_columns} == { # nosec + c.name + for c in users_pre_registration_details.columns + if c + not in ( + users_pre_registration_details.c.pre_email, + users_pre_registration_details.c.pre_phone, + ) + and c.name.startswith("pre_") + }, "Different pre-cols detected. This code might need an update update" + + result = await conn.execute( + sa.select(*pre_columns).where( + users_pre_registration_details.c.pre_email == new_user_email + ) + ) + if pre_registration_details_data := result.first(): + # NOTE: could have many products! which to use? + await conn.execute( + users.update() + .where(users.c.id == new_user_id) + .values( + first_name=pre_registration_details_data.pre_first_name, # type: ignore[union-attr] + last_name=pre_registration_details_data.pre_last_name, # type: ignore[union-attr] + ) + ) + + @staticmethod + def get_billing_details_query(user_id: int): + return ( + sa.select( + users.c.first_name, + users.c.last_name, + users_pre_registration_details.c.institution, + users_pre_registration_details.c.address, + users_pre_registration_details.c.city, + users_pre_registration_details.c.state, + users_pre_registration_details.c.country, + users_pre_registration_details.c.postal_code, + users.c.phone, + ) + .select_from( + users.join( + users_pre_registration_details, + users.c.id == users_pre_registration_details.c.user_id, + ) + ) + .where(users.c.id == user_id) + ) + + @staticmethod + async def get_billing_details(conn: DBConnection, user_id: int) -> Any | None: + result = await conn.execute( + UsersRepo.get_billing_details_query(user_id=user_id) + ) + return await maybe_await(result.fetchone()) + + @staticmethod + async def get_role(conn: DBConnection, user_id: int) -> UserRole: + value: UserRole | None = await conn.scalar( + sa.select(users.c.role).where(users.c.id == user_id) + ) + if value: + assert isinstance(value, UserRole) # nosec + return UserRole(value) + + raise UserNotFoundInRepoError + + @staticmethod + async def get_email(conn: DBConnection, user_id: int) -> str: + value: str | None = await conn.scalar( + sa.select(users.c.email).where(users.c.id == user_id) + ) + if value: + assert isinstance(value, str) # nosec + return value + + raise UserNotFoundInRepoError + + @staticmethod + async def get_active_user_email(conn: DBConnection, user_id: int) -> str: + value: str | None = await conn.scalar( + sa.select(users.c.email).where( + (users.c.status == UserStatus.ACTIVE) & (users.c.id == user_id) + ) + ) + if value is not None: + assert isinstance(value, str) # nosec + return value + + raise UserNotFoundInRepoError + + @staticmethod + async def is_email_used(conn: DBConnection, email: str) -> bool: + email = email.lower() + + registered = await conn.scalar( + sa.select(users.c.id).where(users.c.email == email) + ) + if registered: + return True + + pre_registered = await conn.scalar( + sa.select(users_pre_registration_details.c.user_id).where( + users_pre_registration_details.c.pre_email == email + ) + ) + return bool(pre_registered) + + +# +# Privacy settings +# + + +def is_private(hide_attribute: Column, caller_id: int): + return hide_attribute.is_(True) & (users.c.id != caller_id) + + +def is_public(hide_attribute: Column, caller_id: int): + return hide_attribute.is_(False) | (users.c.id == caller_id) + + +def visible_user_profile_cols(caller_id: int, *, username_label: str): + """Returns user profile columns with visibility constraints applied based on privacy settings.""" + return ( + sa.case( + ( + is_private(users.c.privacy_hide_username, caller_id), + None, + ), + else_=users.c.name, + ).label(username_label), + sa.case( + ( + is_private(users.c.privacy_hide_email, caller_id), + None, + ), + else_=users.c.email, + ).label("email"), + sa.case( + ( + is_private(users.c.privacy_hide_fullname, caller_id), + None, + ), + else_=users.c.first_name, + ).label("first_name"), + sa.case( + ( + is_private(users.c.privacy_hide_fullname, caller_id), + None, + ), + else_=users.c.last_name, + ).label("last_name"), + ) diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_workspaces_sql.py b/packages/postgres-database/src/simcore_postgres_database/utils_workspaces_sql.py new file mode 100644 index 00000000000..05b24d969bd --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/utils_workspaces_sql.py @@ -0,0 +1,30 @@ +from simcore_postgres_database.models.groups import user_to_groups +from simcore_postgres_database.models.workspaces_access_rights import ( + workspaces_access_rights, +) +from sqlalchemy import func +from sqlalchemy.dialects.postgresql import BOOLEAN, INTEGER +from sqlalchemy.sql import Subquery, select + + +def create_my_workspace_access_rights_subquery(user_id: int) -> Subquery: + return ( + select( + workspaces_access_rights.c.workspace_id, + func.json_build_object( + "read", + func.max(workspaces_access_rights.c.read.cast(INTEGER)).cast(BOOLEAN), + "write", + func.max(workspaces_access_rights.c.write.cast(INTEGER)).cast(BOOLEAN), + "delete", + func.max(workspaces_access_rights.c.delete.cast(INTEGER)).cast(BOOLEAN), + ).label("my_access_rights"), + ) + .select_from( + workspaces_access_rights.join( + user_to_groups, user_to_groups.c.gid == workspaces_access_rights.c.gid + ) + ) + .where(user_to_groups.c.uid == user_id) + .group_by(workspaces_access_rights.c.workspace_id) + ).subquery("my_workspace_access_rights_subquery") diff --git a/packages/postgres-database/src/simcore_postgres_database/webserver_models.py b/packages/postgres-database/src/simcore_postgres_database/webserver_models.py index 5eefe798fc0..84fe833df19 100644 --- a/packages/postgres-database/src/simcore_postgres_database/webserver_models.py +++ b/packages/postgres-database/src/simcore_postgres_database/webserver_models.py @@ -1,9 +1,10 @@ -""" Facade for webserver service +"""Facade for webserver service - Facade to direct access to models in the database by - the webserver service +Facade to direct access to models in the database by +the webserver service """ + from .models.api_keys import api_keys from .models.classifiers import group_classifiers from .models.comp_pipeline import StateType, comp_pipeline @@ -11,9 +12,12 @@ from .models.confirmations import ConfirmationAction, confirmations from .models.groups import GroupType, groups, user_to_groups from .models.products import products -from .models.projects import ProjectType, projects +from .models.projects import ProjectTemplateType, ProjectType, projects +from .models.projects_nodes import projects_nodes +from .models.projects_tags import projects_tags +from .models.projects_to_wallet import projects_to_wallet from .models.scicrunch_resources import scicrunch_resources -from .models.tags import study_tags, tags +from .models.tags import tags from .models.tokens import tokens from .models.users import UserRole, UserStatus, users @@ -30,15 +34,18 @@ "NodeClass", "products", "projects", + "projects_nodes", "ProjectType", + "ProjectTemplateType", "scicrunch_resources", "StateType", - "study_tags", + "projects_tags", "tags", "tokens", "user_to_groups", "UserRole", "users", "UserStatus", + "projects_to_wallet", ) # nopycln: file diff --git a/packages/postgres-database/tests/conftest.py b/packages/postgres-database/tests/conftest.py index 24fd876c0b6..fdac39729b6 100644 --- a/packages/postgres-database/tests/conftest.py +++ b/packages/postgres-database/tests/conftest.py @@ -3,36 +3,54 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from typing import AsyncIterator, Awaitable, Callable, Iterator, Optional, Union +import uuid +import warnings +from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Iterator +from pathlib import Path import aiopg.sa -import aiopg.sa.exc import pytest +import simcore_postgres_database.cli import sqlalchemy as sa +import sqlalchemy.engine import yaml from aiopg.sa.connection import SAConnection from aiopg.sa.engine import Engine from aiopg.sa.result import ResultProxy, RowProxy -from pytest_simcore.helpers.rawdata_fakers import random_group, random_user +from faker import Faker +from pytest_simcore.helpers import postgres_tools +from pytest_simcore.helpers.faker_factories import ( + random_group, + random_project, + random_user, +) +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.utils_projects_nodes import ( + ProjectNode, + ProjectNodeCreate, + ProjectNodesRepo, +) from simcore_postgres_database.webserver_models import ( GroupType, groups, user_to_groups, users, ) -from sqlalchemy import literal_column +from sqlalchemy.engine.row import Row +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine, create_async_engine pytest_plugins = [ - "pytest_simcore.repository_paths", "pytest_simcore.pytest_global_environs", + "pytest_simcore.repository_paths", ] @pytest.fixture(scope="session") def postgres_service(docker_services, docker_ip, docker_compose_file) -> str: - """Deploys docker-compose and postgres service is responsive""" + """Deploys postgres and service is responsive""" # container environment - with open(docker_compose_file) as fh: + with Path.open(docker_compose_file) as fh: config = yaml.safe_load(fh) environ = config["services"]["postgres"]["environment"] @@ -53,17 +71,35 @@ def postgres_service(docker_services, docker_ip, docker_compose_file) -> str: return dsn -@pytest.fixture -def make_engine( - postgres_service: str, -) -> Callable[[bool], Union[Awaitable[Engine], sa.engine.base.Engine]]: - dsn = postgres_service +@pytest.fixture(scope="session") +def sync_engine(postgres_service: str) -> Iterable[sqlalchemy.engine.Engine]: + _engine: sqlalchemy.engine.Engine = sa.create_engine(url=postgres_service) + yield _engine + _engine.dispose() + - def _make(is_async=True) -> Union[Awaitable[Engine], sa.engine.base.Engine]: - engine = aiopg.sa.create_engine(dsn) if is_async else sa.create_engine(dsn) +@pytest.fixture +def _make_asyncpg_engine(postgres_service: str) -> Callable[[bool], AsyncEngine]: + # NOTE: users is responsible of `await engine.dispose()` + dsn = postgres_service.replace("postgresql://", "postgresql+asyncpg://") + minsize = 1 + maxsize = 50 + + def _(echo: bool): + engine: AsyncEngine = create_async_engine( + dsn, + pool_size=minsize, + max_overflow=maxsize - minsize, + connect_args={ + "server_settings": {"application_name": "postgres_database_tests"} + }, + pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects + future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released + echo=echo, + ) return engine - return _make + return _ def is_postgres_responsive(dsn) -> bool: @@ -72,7 +108,7 @@ def is_postgres_responsive(dsn) -> bool: engine = sa.create_engine(dsn) conn = engine.connect() conn.close() - except sa.exc.OperationalError: + except sa.exc.OperationalError: # type: ignore return False return True @@ -84,94 +120,153 @@ def db_metadata() -> sa.MetaData: return metadata -@pytest.fixture +@pytest.fixture(params=["sqlModels", "alembicMigration"]) def pg_sa_engine( - make_engine: Callable, db_metadata: sa.MetaData -) -> Iterator[sa.engine.Engine]: + sync_engine: sqlalchemy.engine.Engine, + db_metadata: sa.MetaData, + request: pytest.FixtureRequest, +) -> Iterator[sqlalchemy.engine.Engine]: """ Runs migration to create tables and return a sqlalchemy engine + + NOTE: use this fixture to ensure pg db: + - up, + - responsive, + - init (w/ tables) and/or migrated """ # NOTE: Using migration to upgrade/downgrade is not # such a great idea since these tests are used while developing # the tables, i.e. when no migration mechanism are in place # Best is therefore to start from scratch and delete all at # the end - sync_engine = make_engine(is_async=False) # NOTE: ALL is deleted before db_metadata.drop_all(sync_engine) - db_metadata.create_all(sync_engine) + if request.param == "sqlModels": + db_metadata.create_all(sync_engine) + else: + assert simcore_postgres_database.cli.discover.callback + assert simcore_postgres_database.cli.upgrade.callback + dsn = sync_engine.url + simcore_postgres_database.cli.discover.callback( + user=dsn.username, + password=dsn.password, + host=dsn.host, + database=dsn.database, + port=dsn.port, + ) + simcore_postgres_database.cli.upgrade.callback("head") yield sync_engine - # NOTE: ALL is deleted after - db_metadata.drop_all(sync_engine) - sync_engine.dispose() + postgres_tools.force_drop_all_tables(sync_engine) @pytest.fixture -async def pg_engine( - pg_sa_engine: sa.engine.Engine, make_engine: Callable +async def aiopg_engine( + pg_sa_engine: sqlalchemy.engine.Engine, + postgres_service: str, ) -> AsyncIterator[Engine]: """ Return an aiopg.sa engine connected to a responsive and migrated pg database """ - async_engine = await make_engine(is_async=True) - - yield async_engine + # first start sync + assert pg_sa_engine.url.database + assert postgres_service.endswith(pg_sa_engine.url.database) + + warnings.warn( + "The 'aiopg_engine' is deprecated since we are replacing `aiopg` library by `sqlalchemy.ext.asyncio`." + "SEE https://github.com/ITISFoundation/osparc-simcore/issues/4529. " + "Please use 'asyncpg_engine' instead.", + DeprecationWarning, + stacklevel=2, + ) - # closes async-engine connections and terminates - async_engine.close() - await async_engine.wait_closed() - async_engine.terminate() + async with aiopg.sa.create_engine( + dsn=f"{postgres_service}?application_name=aiopg_engine", + ) as aiopg_sa_engine: + yield aiopg_sa_engine @pytest.fixture -async def connection(pg_engine: Engine) -> AsyncIterator[SAConnection]: +async def connection(aiopg_engine: Engine) -> AsyncIterator[SAConnection]: """Returns an aiopg.sa connection from an engine to a fully furnished and ready pg database""" - async with pg_engine.acquire() as _conn: + async with aiopg_engine.acquire() as _conn: yield _conn +@pytest.fixture +async def asyncpg_engine( # <-- WE SHOULD USE THIS ONE + is_pdb_enabled: bool, + pg_sa_engine: sqlalchemy.engine.Engine, + _make_asyncpg_engine: Callable[[bool], AsyncEngine], +) -> AsyncIterator[AsyncEngine]: + assert ( + pg_sa_engine + ), "Ensures pg db up, responsive, init (w/ tables) and/or migrated" + + _apg_engine = _make_asyncpg_engine(is_pdb_enabled) + + yield _apg_engine + + await _apg_engine.dispose() + + +@pytest.fixture(params=["aiopg", "asyncpg"]) +async def connection_factory( + request: pytest.FixtureRequest, + aiopg_engine: Engine, + asyncpg_engine: AsyncEngine, +) -> AsyncIterator[SAConnection | AsyncConnection]: + """Returns an aiopg.sa connection or an asyncpg connection from an engine to a fully furnished and ready pg database""" + if request.param == "aiopg": + async with aiopg_engine.acquire() as conn: + yield conn + else: + async with asyncpg_engine.connect() as conn: + # NOTE: this is the default in aiopg so we use the same here to make the tests run + await conn.execution_options(isolation_level="AUTOCOMMIT") + yield conn + + # # FACTORY FIXTURES # @pytest.fixture -def create_fake_group( - make_engine: Callable[[bool], Union[Awaitable[Engine], sa.engine.base.Engine]] -) -> Iterator[Callable]: +def create_fake_group(sync_engine: sqlalchemy.engine.Engine) -> Iterator[Callable]: """factory to create standard group""" created_ids = [] - async def _create_group(conn: SAConnection, **overrides) -> RowProxy: + async def _creator(conn: SAConnection, **overrides) -> RowProxy: + if "type" not in overrides: + overrides["type"] = GroupType.STANDARD result: ResultProxy = await conn.execute( groups.insert() - .values(**random_group(type=GroupType.STANDARD, **overrides)) - .returning(literal_column("*")) + .values(**random_group(**overrides)) + .returning(sa.literal_column("*")) ) group = await result.fetchone() assert group created_ids.append(group.gid) return group - yield _create_group + yield _creator - sync_engine = make_engine(is_async=False) - sync_engine.execute(groups.delete().where(groups.c.gid.in_(created_ids))) + assert isinstance(sync_engine, sqlalchemy.engine.Engine) + with sync_engine.begin() as conn: + conn.execute(sa.delete(groups).where(groups.c.gid.in_(created_ids))) @pytest.fixture -def create_fake_user( - make_engine: Callable[[bool], Union[Awaitable[Engine], sa.engine.base.Engine]] -) -> Iterator[Callable]: +def create_fake_user(sync_engine: sqlalchemy.engine.Engine) -> Iterator[Callable]: """factory to create a user w/ or w/o a standard group""" created_ids = [] - async def _create_user( - conn, group: Optional[RowProxy] = None, **overrides + async def _creator( + conn: SAConnection, group: RowProxy | None = None, **overrides ) -> RowProxy: user_id = await conn.scalar( users.insert().values(**random_user(**overrides)).returning(users.c.id) @@ -196,7 +291,80 @@ async def _create_user( assert result return user - yield _create_user + yield _creator - sync_engine = make_engine(is_async=False) - sync_engine.execute(users.delete().where(users.c.id.in_(created_ids))) + assert isinstance(sync_engine, sqlalchemy.engine.Engine) + with sync_engine.begin() as conn: + conn.execute(users.delete().where(users.c.id.in_(created_ids))) + + +@pytest.fixture +async def create_fake_project( + aiopg_engine: Engine, +) -> AsyncIterator[Callable[..., Awaitable[RowProxy]]]: + created_project_uuids = [] + + async def _creator(conn, user: RowProxy, **overrides) -> RowProxy: + prj_to_insert = random_project(prj_owner=user.id, **overrides) + result = await conn.execute( + projects.insert().values(**prj_to_insert).returning(projects) + ) + assert result + new_project = await result.first() + assert new_project + created_project_uuids.append(new_project.uuid) + return new_project + + yield _creator + + async with aiopg_engine.acquire() as conn: + await conn.execute( + projects.delete().where(projects.c.uuid.in_(created_project_uuids)) + ) + + +@pytest.fixture +async def create_fake_projects_node( + connection: aiopg.sa.connection.SAConnection, + faker: Faker, +) -> Callable[[uuid.UUID], Awaitable[ProjectNode]]: + async def _creator(project_uuid: uuid.UUID) -> ProjectNode: + fake_node = ProjectNodeCreate( + node_id=uuid.uuid4(), + required_resources=faker.pydict(allowed_types=(str,)), + key=faker.pystr(), + version=faker.pystr(), + label=faker.pystr(), + ) + repo = ProjectNodesRepo(project_uuid=project_uuid) + created_nodes = await repo.add(connection, nodes=[fake_node]) + assert created_nodes + return created_nodes[0] + + return _creator + + +@pytest.fixture +async def create_fake_product( + asyncpg_engine: AsyncEngine, +) -> AsyncIterator[Callable[[str], Awaitable[Row]]]: + created_product_names = set() + + async def _creator(product_name: str) -> Row: + async with asyncpg_engine.begin() as connection: + result = await connection.execute( + sa.insert(products) + .values(name=product_name, host_regex=".*") + .returning(sa.literal_column("*")) + ) + assert result + row = result.one() + created_product_names.add(row.name) + return row + + yield _creator + + async with asyncpg_engine.begin() as conn: + await conn.execute( + products.delete().where(products.c.name.in_(created_product_names)) + ) diff --git a/packages/postgres-database/tests/docker-compose.prod.yml b/packages/postgres-database/tests/docker-compose.prod.yml index f7bbde389d1..cb0c8ffbe53 100644 --- a/packages/postgres-database/tests/docker-compose.prod.yml +++ b/packages/postgres-database/tests/docker-compose.prod.yml @@ -1,4 +1,3 @@ -version: "3.8" services: postgres: volumes: @@ -6,3 +5,4 @@ services: volumes: postgres_data: name: ${POSTGRES_DATA_VOLUME} + external: true diff --git a/packages/postgres-database/tests/docker-compose.yml b/packages/postgres-database/tests/docker-compose.yml index e411e6441d9..cfaa1c2b9ba 100644 --- a/packages/postgres-database/tests/docker-compose.yml +++ b/packages/postgres-database/tests/docker-compose.yml @@ -1,7 +1,6 @@ -version: "3.8" services: postgres: - image: "postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce" + image: "postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc" init: true environment: POSTGRES_USER: test @@ -42,7 +41,7 @@ services: # - net.ipv4.tcp_keepalive_probes=9 # - net.ipv4.tcp_keepalive_time=600 adminer: - image: adminer:4.8.0 + image: adminer:4.8.1 init: true environment: - ADMINER_DEFAULT_SERVER=postgres diff --git a/packages/postgres-database/tests/products/conftest.py b/packages/postgres-database/tests/products/conftest.py index aa963f32d9c..168ba260e18 100644 --- a/packages/postgres-database/tests/products/conftest.py +++ b/packages/postgres-database/tests/products/conftest.py @@ -4,15 +4,17 @@ # pylint: disable=unused-argument -from typing import Callable +from collections.abc import Callable import pytest -from aiopg.sa.exc import ResourceClosedError +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_product from simcore_postgres_database.webserver_models import products +from sqlalchemy.dialects.postgresql import insert as pg_insert @pytest.fixture -def products_regex() -> dict: +def products_regex() -> dict[str, str]: return { "s4l": r"(^s4l[\.-])|(^sim4life\.)", "osparc": r"^osparc.", @@ -21,24 +23,40 @@ def products_regex() -> dict: @pytest.fixture -def make_products_table( - products_regex: dict, -) -> Callable: +def products_names(products_regex: dict[str, str]) -> list[str]: + return list(products_regex) + + +@pytest.fixture +def make_products_table(products_regex: dict[str, str], faker: Faker) -> Callable: async def _make(conn) -> None: for n, (name, regex) in enumerate(products_regex.items()): + result = await conn.execute( - products.insert().values( - name=name, - display_name=f"Product {name.capitalize()}", - short_name=name[:3].lower(), - host_regex=regex, - priority=n, + pg_insert(products) + .values( + **random_product( + fake=faker, + name=name, + display_name=f"Product {name.capitalize()}", + short_name=name[:3].lower(), + host_regex=regex, + priority=n, + ) + ) + .on_conflict_do_update( + # osparc might be already injected as default! + index_elements=[products.c.name], + set_={ + "display_name": f"Product {name.capitalize()}", + "short_name": name[:3].lower(), + "host_regex": regex, + "priority": n, + }, ) ) - assert result.closed + assert not result.closed assert not result.returns_rows - with pytest.raises(ResourceClosedError): - await result.scalar() return _make diff --git a/packages/postgres-database/tests/products/test_models_products.py b/packages/postgres-database/tests/products/test_models_products.py index 1183c51a6a1..1f34fab7aa4 100644 --- a/packages/postgres-database/tests/products/test_models_products.py +++ b/packages/postgres-database/tests/products/test_models_products.py @@ -3,15 +3,10 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument - -import json +from collections.abc import Callable from pathlib import Path -from pprint import pprint -from typing import Callable import sqlalchemy as sa -from aiopg.sa.engine import Engine -from aiopg.sa.result import ResultProxy, RowProxy from simcore_postgres_database.models.jinja2_templates import jinja2_templates from simcore_postgres_database.models.products import ( EmailFeedback, @@ -22,42 +17,38 @@ WebFeedback, ) from simcore_postgres_database.webserver_models import products +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncEngine async def test_load_products( - pg_engine: Engine, make_products_table: Callable, products_regex: dict + asyncpg_engine: AsyncEngine, make_products_table: Callable, products_regex: dict ): exclude = { products.c.created, products.c.modified, } - async with pg_engine.acquire() as conn: + async with asyncpg_engine.connect() as conn: await make_products_table(conn) - stmt = sa.select([c for c in products.columns if c not in exclude]) - result: ResultProxy = await conn.execute(stmt) - assert result.returns_rows - - rows: list[RowProxy] = await result.fetchall() + stmt = sa.select(*[c for c in products.columns if c not in exclude]) + result = await conn.execute(stmt) + rows = result.fetchall() assert rows - assert { - row[products.c.name]: row[products.c.host_regex] for row in rows - } == products_regex + assert {row.name: row.host_regex for row in rows} == products_regex async def test_jinja2_templates_table( - pg_engine: Engine, osparc_simcore_services_dir: Path + asyncpg_engine: AsyncEngine, osparc_simcore_services_dir: Path ): - templates_common_dir = ( osparc_simcore_services_dir / "web/server/src/simcore_service_webserver/templates/common" ) - async with pg_engine.acquire() as conn: - + async with asyncpg_engine.connect() as conn: templates = [] # templates table for p in templates_common_dir.glob("*.jinja2"): @@ -92,19 +83,23 @@ async def test_jinja2_templates_table( ]: # aiopg doesn't support executemany!! await conn.execute( - products.insert().values(**params), + pg_insert(products) + .values(**params) + .on_conflict_do_update( + index_elements=[products.c.name], + set_=params, + ), ) # prints those products having customized templates j = products.join(jinja2_templates) stmt = sa.select( - [products.c.name, jinja2_templates.c.name, products.c.short_name] + products.c.name, jinja2_templates.c.name, products.c.short_name ).select_from(j) - result: ResultProxy = await conn.execute(stmt) - assert result.rowcount == 2 - rows = await result.fetchall() - assert sorted(r.as_tuple() for r in rows) == sorted( + result = await conn.execute(stmt) + rows = result.fetchall() + assert sorted(tuple(r) for r in rows) == sorted( [ ("osparc", "registration_email.jinja2", "osparc"), ("s4l", "registration_email.jinja2", "s4l web"), @@ -113,7 +108,7 @@ async def test_jinja2_templates_table( assert ( await conn.scalar( - sa.select([jinja2_templates.c.content]) + sa.select(jinja2_templates.c.content) .select_from(j) .where(products.c.name == "s4l") ) @@ -122,7 +117,7 @@ async def test_jinja2_templates_table( assert ( await conn.scalar( - sa.select([jinja2_templates.c.content]) + sa.select(jinja2_templates.c.content) .select_from(j) .where(products.c.name == "tis") ) @@ -131,7 +126,7 @@ async def test_jinja2_templates_table( async def test_insert_select_product( - pg_engine: Engine, + asyncpg_engine: AsyncEngine, ): osparc_product = { "name": "osparc", @@ -168,21 +163,23 @@ async def test_insert_select_product( ], } - print(json.dumps(osparc_product)) - - async with pg_engine.acquire() as conn: + async with asyncpg_engine.begin() as conn: # writes - stmt = products.insert().values(**osparc_product).returning(products.c.name) + stmt = ( + pg_insert(products) + .values(**osparc_product) + .on_conflict_do_update( + index_elements=[products.c.name], set_=osparc_product + ) + .returning(products.c.name) + ) name = await conn.scalar(stmt) # reads stmt = sa.select(products).where(products.c.name == name) - row = await (await conn.execute(stmt)).fetchone() - print(row) + row = (await conn.execute(stmt)).one_or_none() assert row - pprint(dict(**row)) - assert row.manuals assert row.manuals == osparc_product["manuals"] diff --git a/packages/postgres-database/tests/products/test_products_to_templates.py b/packages/postgres-database/tests/products/test_products_to_templates.py new file mode 100644 index 00000000000..9a78aaba94c --- /dev/null +++ b/packages/postgres-database/tests/products/test_products_to_templates.py @@ -0,0 +1,146 @@ +# pylint: disable=no-name-in-module +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + + +import shutil +from collections.abc import Callable +from pathlib import Path + +import pytest +import sqlalchemy as sa +from faker import Faker +from simcore_postgres_database.models.jinja2_templates import jinja2_templates +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.products_to_templates import products_to_templates +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncEngine + + +@pytest.fixture +def templates_names(faker: Faker) -> list[str]: + return [faker.file_name(extension="html") for _ in range(3)] + + +@pytest.fixture +def templates_dir( + tmp_path: Path, products_names: list[str], templates_names: list[str] +) -> Path: + templates_path = tmp_path / "templates" + + # common keeps default templates + (templates_path / "common").mkdir(parents=True) + for template_name in templates_names: + (templates_path / "common" / template_name).write_text( + "Fake template for 'common'" + ) + + # only odd products have the first template + for product_name in products_names[1::2]: + (templates_path / product_name).mkdir(parents=True) + (templates_path / product_name / templates_names[0]).write_text( + f"Fake template for {product_name=}" + ) + + return templates_path + + +@pytest.fixture +async def product_templates_in_db( + asyncpg_engine: AsyncEngine, + make_products_table: Callable, + products_names: list[str], + templates_names: list[str], +): + async with asyncpg_engine.begin() as conn: + await make_products_table(conn) + + # one version of all tempaltes + for template_name in templates_names: + await conn.execute( + jinja2_templates.insert().values( + name=template_name, content="fake template in database" + ) + ) + + # only even products have templates + for product_name in products_names[0::2]: + await conn.execute( + products_to_templates.insert().values( + template_name=template_name, product_name=product_name + ) + ) + + +async def test_export_and_import_table( + asyncpg_engine: AsyncEngine, + product_templates_in_db: None, +): + + async with asyncpg_engine.connect() as connection: + exported_values = [] + excluded_names = {"created", "modified", "group_id"} + result = await connection.stream( + sa.select(*(c for c in products.c if c.name not in excluded_names)) + ) + async for row in result: + assert row + exported_values.append(row._asdict()) + + # now just upsert them + for values in exported_values: + values["display_name"] += "-changed" + await connection.execute( + pg_insert(products) + .values(**values) + .on_conflict_do_update(index_elements=[products.c.name], set_=values) + ) + + +async def test_create_templates_products_folder( + asyncpg_engine: AsyncEngine, + templates_dir: Path, + products_names: list[str], + tmp_path: Path, + templates_names: list[str], + product_templates_in_db: None, +): + download_path = tmp_path / "downloaded" / "templates" + assert templates_dir != download_path + + for product_name in products_names: + product_folder = download_path / product_name + product_folder.mkdir(parents=True, exist_ok=True) + + # takes common as defaults + for p in (templates_dir / "common").iterdir(): + if p.is_file(): + shutil.copy(p, product_folder / p.name, follow_symlinks=False) + + # overrides with customs in-place + if (templates_dir / product_name).exists(): + for p in (templates_dir / product_name).iterdir(): + if p.is_file(): + shutil.copy(p, product_folder / p.name, follow_symlinks=False) + + # overrides if with files in database + async with asyncpg_engine.connect() as conn: + result = await conn.stream( + sa.select( + products_to_templates.c.product_name, + jinja2_templates.c.name, + jinja2_templates.c.content, + ) + .select_from(products_to_templates.join(jinja2_templates)) + .where(products_to_templates.c.product_name == product_name) + ) + + async for row in result: + assert row + template_path = product_folder / row.name + template_path.write_text(row.content) + + assert sorted( + product_folder / template_name for template_name in templates_names + ) == sorted(product_folder.rglob("*.*")) diff --git a/packages/postgres-database/tests/products/test_utils_products.py b/packages/postgres-database/tests/products/test_utils_products.py index ef38374535d..b25ffbc0ccf 100644 --- a/packages/postgres-database/tests/products/test_utils_products.py +++ b/packages/postgres-database/tests/products/test_utils_products.py @@ -3,43 +3,45 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument - -import asyncio -from typing import Callable +from collections.abc import Callable import pytest import sqlalchemy as sa -from aiopg.sa.engine import Engine from simcore_postgres_database.models.groups import GroupType, groups from simcore_postgres_database.models.products import products from simcore_postgres_database.utils_products import ( + EmptyProductsError, get_default_product_name, get_or_create_product_group, - get_product_group_id, + get_product_group_id_or_none, ) +from sqlalchemy.ext.asyncio import AsyncEngine -async def test_default_product(pg_engine: Engine, make_products_table: Callable): - async with pg_engine.acquire() as conn: +async def test_default_product( + asyncpg_engine: AsyncEngine, make_products_table: Callable +): + async with asyncpg_engine.begin() as conn: await make_products_table(conn) default_product = await get_default_product_name(conn) assert default_product == "s4l" -async def test_default_product_undefined(pg_engine: Engine): - async with pg_engine.acquire() as conn: - with pytest.raises(ValueError): +@pytest.mark.parametrize("pg_sa_engine", ["sqlModels"], indirect=True) +async def test_default_product_undefined(asyncpg_engine: AsyncEngine): + async with asyncpg_engine.connect() as conn: + with pytest.raises(EmptyProductsError): await get_default_product_name(conn) async def test_get_or_create_group_product( - pg_engine: Engine, make_products_table: Callable + asyncpg_engine: AsyncEngine, make_products_table: Callable ): - async with pg_engine.acquire() as conn: + async with asyncpg_engine.connect() as conn: await make_products_table(conn) - async for product_row in await conn.execute( - sa.select([products.c.name, products.c.group_id]).order_by( + async for product_row in await conn.stream( + sa.select(products.c.name, products.c.group_id).order_by( products.c.priority ) ): @@ -56,8 +58,7 @@ async def test_get_or_create_group_product( result = await conn.execute( groups.select().where(groups.c.gid == product_group_id) ) - assert result.rowcount == 1 - product_group = await result.first() + product_group = result.one() # check product's group assert product_group.type == GroupType.STANDARD @@ -77,9 +78,9 @@ async def test_get_or_create_group_product( result = await conn.execute( groups.select().where(groups.c.name == product_row.name) ) - assert result.rowcount == 1 + assert result.one() - assert product_group_id == await get_product_group_id( + assert product_group_id == await get_product_group_id_or_none( conn, product_name=product_row.name ) @@ -87,40 +88,14 @@ async def test_get_or_create_group_product( await conn.execute( groups.update().where(groups.c.gid == product_group_id).values(gid=1000) ) - product_group_id = await get_product_group_id( + product_group_id = await get_product_group_id_or_none( conn, product_name=product_row.name ) assert product_group_id == 1000 # if group is DELETED -> product.group_id=null await conn.execute(groups.delete().where(groups.c.gid == product_group_id)) - product_group_id = await get_product_group_id( + product_group_id = await get_product_group_id_or_none( conn, product_name=product_row.name ) assert product_group_id is None - - -async def test_get_or_create_group_product_concurrent( - pg_engine: Engine, make_products_table: Callable -): - async with pg_engine.acquire() as conn: - await make_products_table(conn) - - async def _auto_create_products_groups(): - async with pg_engine.acquire() as conn: - async for product_row in await conn.execute( - sa.select([products.c.name, products.c.group_id]).order_by( - products.c.priority - ) - ): - # get or create - product_group_id = await get_or_create_product_group( - conn, product_name=product_row.name - ) - return product_group_id - - tasks = [asyncio.create_task(_auto_create_products_groups()) for _ in range(5)] - - results = await asyncio.gather(*tasks) - - assert all(res == results[0] for res in results[1:]) diff --git a/packages/postgres-database/tests/projects/conftest.py b/packages/postgres-database/tests/projects/conftest.py deleted file mode 100644 index 705e5abc20d..00000000000 --- a/packages/postgres-database/tests/projects/conftest.py +++ /dev/null @@ -1,56 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -from typing import Optional - -import pytest -from aiopg.sa.connection import SAConnection -from aiopg.sa.engine import Engine -from aiopg.sa.result import ResultProxy, RowProxy -from pytest_simcore.helpers.rawdata_fakers import random_project, random_user -from simcore_postgres_database.models.projects import projects -from simcore_postgres_database.models.users import users - -USERNAME = f"{__name__}.me" -PARENT_PROJECT_NAME = f"{__name__}.parent" - - -@pytest.fixture -async def user(pg_engine: Engine) -> RowProxy: - # some user - async with pg_engine.acquire() as conn: - result: Optional[ResultProxy] = await conn.execute( - users.insert().values(**random_user(name=USERNAME)).returning(users) - ) - assert result.rowcount == 1 - - _user: Optional[RowProxy] = await result.first() - assert _user - assert _user.name == USERNAME - return _user - - -@pytest.fixture -async def project(pg_engine: Engine, user: RowProxy) -> RowProxy: - # a user's project - async with pg_engine.acquire() as conn: - result: Optional[ResultProxy] = await conn.execute( - projects.insert() - .values(**random_project(prj_owner=user.id, name=PARENT_PROJECT_NAME)) - .returning(projects) - ) - assert result.rowcount == 1 - - _project: Optional[RowProxy] = await result.first() - assert _project - assert _project.name == PARENT_PROJECT_NAME - return _project - - -@pytest.fixture -async def conn(pg_engine: Engine) -> SAConnection: - async with pg_engine.acquire() as conn: - yield conn diff --git a/packages/postgres-database/tests/projects/test_projects_version_control.py b/packages/postgres-database/tests/projects/test_projects_version_control.py deleted file mode 100644 index 8c33a95a8b9..00000000000 --- a/packages/postgres-database/tests/projects/test_projects_version_control.py +++ /dev/null @@ -1,288 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-statements - -import hashlib -import json -from typing import Any, Optional -from uuid import UUID, uuid3 - -import pytest -from aiopg.sa.connection import SAConnection -from aiopg.sa.result import RowProxy -from simcore_postgres_database.models.projects import projects -from simcore_postgres_database.models.projects_version_control import ( - projects_vc_branches, - projects_vc_commits, - projects_vc_heads, - projects_vc_repos, - projects_vc_snapshots, - projects_vc_tags, -) -from simcore_postgres_database.utils_aiopg_orm import BaseOrm - - -class ReposOrm(BaseOrm[int]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_repos, - connection, - readonly={"id", "created", "modified"}, - ) - - -class BranchesOrm(BaseOrm[int]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_branches, - connection, - readonly={"id", "created", "modified"}, - ) - - -class CommitsOrm(BaseOrm[int]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_commits, - connection, - readonly={"id", "created", "modified"}, - # pylint: disable=no-member - writeonce=set(projects_vc_commits.columns.keys()), - ) - - -class TagsOrm(BaseOrm[int]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_tags, - connection, - readonly={"id", "created", "modified"}, - ) - - -class ProjectsOrm(BaseOrm[str]): - def __init__(self, connection: SAConnection): - super().__init__( - projects, - connection, - readonly={"id", "creation_date", "last_change_date"}, - writeonce={"uuid"}, - ) - - -class SnapshotsOrm(BaseOrm[str]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_snapshots, - connection, - writeonce={"checksum"}, # TODO: all? cannot delete snapshots? - ) - - -class HeadsOrm(BaseOrm[int]): - def __init__(self, connection: SAConnection): - super().__init__( - projects_vc_heads, - connection, - writeonce={"repo_id"}, - ) - - -# ------------- - - -def eval_checksum(workbench: dict[str, Any]): - # FIXME: prototype - block_string = json.dumps(workbench, sort_keys=True).encode("utf-8") - raw_hash = hashlib.sha256(block_string) - return raw_hash.hexdigest() - - -def eval_snapshot_uuid(repo: RowProxy, commit: RowProxy) -> UUID: - assert repo.id == commit.repo_id # nosec - return uuid3(UUID(repo.project_uuid), f"{repo.id}.{commit.snapshot_checksum}") - - -async def add_snapshot( - project_wc: RowProxy, checksum: str, repo: RowProxy, conn: SAConnection -) -> str: - snapshot_orm = SnapshotsOrm(conn) - snapshot_checksum = checksum - row_id = await snapshot_orm.insert( - checksum=checksum, - content={"workbench": project_wc.workbench, "ui": project_wc.ui}, - ) - assert row_id == checksum - return checksum - - -async def test_basic_workflow(project: RowProxy, conn: SAConnection): - - # git init - async with conn.begin(): - # create repo - repo_orm = ReposOrm(conn) - repo_id = await repo_orm.insert(project_uuid=project.uuid) - assert repo_id is not None - assert isinstance(repo_id, int) - - repo_orm.set_filter(rowid=repo_id) - repo = await repo_orm.fetch() - assert repo - assert repo.project_uuid == project.uuid - assert repo.project_checksum is None - assert repo.created == repo.modified - - # create main branch - branches_orm = BranchesOrm(conn) - branch_id = await branches_orm.insert(repo_id=repo.id) - assert branch_id is not None - assert isinstance(branch_id, int) - - branches_orm.set_filter(rowid=branch_id) - main_branch: Optional[RowProxy] = await branches_orm.fetch() - assert main_branch - assert main_branch.name == "main", "Expected 'main' as default branch" - assert main_branch.head_commit_id is None, "still not assigned" - assert main_branch.created == main_branch.modified - - # assign head branch - heads_orm = HeadsOrm(conn) - await heads_orm.insert(repo_id=repo.id, head_branch_id=branch_id) - - heads_orm.set_filter(rowid=repo.id) - head = await heads_orm.fetch() - assert head - - # - # create first commit -- TODO: separate tests - - # fetch a *full copy* of the project (WC) - repo = await repo_orm.fetch("id project_uuid project_checksum") - assert repo - - project_orm = ProjectsOrm(conn).set_filter(uuid=repo.project_uuid) - project_wc = await project_orm.fetch() - assert project_wc - assert project == project_wc - - # call external function to compute checksum - checksum = eval_checksum(project_wc.workbench) - assert repo.project_checksum != checksum - - # take snapshot <=> git add & commit - async with conn.begin(): - - snapshot_checksum = await add_snapshot(project_wc, checksum, repo, conn) - - # get HEAD = repo.branch_id -> .head_commit_id - assert head.repo_id == repo.id - branches_orm.set_filter(head.head_branch_id) - branch = await branches_orm.fetch("head_commit_id name") - assert branch - assert branch.name == "main" - assert branch.head_commit_id is None, "First commit" - - # create commit - commits_orm = CommitsOrm(conn) - commit_id = await commits_orm.insert( - repo_id=repo.id, - parent_commit_id=branch.head_commit_id, - snapshot_checksum=snapshot_checksum, - message="first commit", - ) - assert commit_id - assert isinstance(commit_id, int) - - # update branch head - await branches_orm.update(head_commit_id=commit_id) - - # update checksum cache - await repo_orm.update(project_checksum=snapshot_checksum) - - # log history - commits = await commits_orm.fetch_all() - assert len(commits) == 1 - assert commits[0].id == commit_id - - # tag - tag_orm = TagsOrm(conn) - tag_id = await tag_orm.insert( - repo_id=repo.id, - commit_id=commit_id, - name="v1", - ) - assert tag_id is not None - assert isinstance(tag_id, int) - - tag = await tag_orm.fetch(rowid=tag_id) - assert tag - assert tag.name == "v1" - - ############# NEW COMMIT ##################### - - # user add some changes - repo = await repo_orm.fetch() - assert repo - - project_orm.set_filter(uuid=repo.project_uuid) - assert project_orm.is_filter_set() - - await project_orm.update( - workbench={ - "node": { - "input": 3, - } - } - ) - - project_wc = await project_orm.fetch("workbench ui") - assert project_wc - assert project.workbench != project_wc.workbench - - # get HEAD = repo.branch_id -> .head_commit_id - head = await heads_orm.fetch() - assert head - branch = await branches_orm.fetch("head_commit_id", rowid=head.head_branch_id) - assert branch - # TODO: get subquery ... and compose - head_commit = await commits_orm.fetch(rowid=branch.head_commit_id) - assert head_commit - - # compare checksums between wc and HEAD - checksum = eval_checksum(project_wc.workbench) - assert head_commit.snapshot_checksum != checksum - - # updates wc checksum cache - await repo_orm.update(project_checksum=checksum) - - # take snapshot = add & commit - async with conn.begin(): - snapshot_uuid: str = await add_snapshot(project_wc, checksum, repo, conn) - - commit_id = await commits_orm.insert( - repo_id=head_commit.repo_id, - parent_commit_id=head_commit.id, - snapshot_checksum=checksum, - message="second commit", - ) - assert commit_id - assert isinstance(commit_id, int) - - # update branch head - await branches_orm.update(head_commit_id=commit_id) - - # log history - commits = await commits_orm.fetch_all() - assert len(commits) == 2 - assert commits[1].id == commit_id - - ############# CHECKOUT TO TAG ##################### - - -@pytest.mark.skip(reason="DEV") -def test_concurrency(): - # several repos - # several threads - assert False diff --git a/packages/postgres-database/tests/test_classifiers.py b/packages/postgres-database/tests/test_classifiers.py index af09f73c44d..8e8e0eba24c 100644 --- a/packages/postgres-database/tests/test_classifiers.py +++ b/packages/postgres-database/tests/test_classifiers.py @@ -10,7 +10,7 @@ import pytest import sqlalchemy as sa from aiopg.sa.engine import Engine -from pytest_simcore.helpers.rawdata_fakers import random_group +from pytest_simcore.helpers.faker_factories import random_group from simcore_postgres_database.models.classifiers import group_classifiers from simcore_postgres_database.models.groups import groups from sqlalchemy import func, literal_column @@ -38,11 +38,10 @@ def classifiers_bundle(web_client_resource_folder: Path) -> dict: async def test_operations_on_group_classifiers( - pg_engine: Engine, classifiers_bundle: dict + aiopg_engine: Engine, classifiers_bundle: dict ): # NOTE: mostly for TDD - async with pg_engine.acquire() as conn: - + async with aiopg_engine.acquire() as conn: # creates a group stmt = ( groups.insert() @@ -66,9 +65,7 @@ async def test_operations_on_group_classifiers( # get bundle in one query bundle = await conn.scalar( - sa.select([group_classifiers.c.bundle]).where( - group_classifiers.c.gid == gid - ) + sa.select(group_classifiers.c.bundle).where(group_classifiers.c.gid == gid) ) assert bundle assert classifiers_bundle == bundle @@ -83,7 +80,7 @@ async def test_operations_on_group_classifiers( groups_count = await conn.scalar(sa.select(func.count(groups.c.gid))) classifiers_count = await conn.scalar( - sa.select([func.count()]).select_from(group_classifiers) + sa.select(func.count()).select_from(group_classifiers) ) assert ( @@ -96,8 +93,6 @@ async def test_operations_on_group_classifiers( # no bundle bundle = await conn.scalar( - sa.select([group_classifiers.c.bundle]).where( - group_classifiers.c.gid == gid - ) + sa.select(group_classifiers.c.bundle).where(group_classifiers.c.gid == gid) ) assert bundle is None diff --git a/packages/postgres-database/tests/test_clusters.py b/packages/postgres-database/tests/test_clusters.py deleted file mode 100644 index b3fac60ff43..00000000000 --- a/packages/postgres-database/tests/test_clusters.py +++ /dev/null @@ -1,138 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -from typing import AsyncIterable, Awaitable, Callable - -import pytest -import sqlalchemy as sa -from aiopg.sa.engine import Engine -from aiopg.sa.result import ResultProxy -from faker import Faker -from pytest_simcore.helpers.rawdata_fakers import random_user -from simcore_postgres_database.errors import ForeignKeyViolation, NotNullViolation -from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups -from simcore_postgres_database.models.clusters import ClusterType, clusters -from simcore_postgres_database.models.users import users - - -@pytest.fixture(scope="function") -async def user_id(pg_engine: Engine) -> AsyncIterable[int]: - async with pg_engine.acquire() as conn: - # a 'me' user - uid = await conn.scalar( - users.insert().values(**(random_user())).returning(users.c.id) - ) - assert uid is not None - yield uid - # cleanup - async with pg_engine.acquire() as conn: - # a 'me' user - uid = await conn.execute(users.delete().where(users.c.id == uid)) - - -@pytest.fixture -async def user_group_id(pg_engine: Engine, user_id: int) -> int: - async with pg_engine.acquire() as conn: - primary_gid = await conn.scalar( - sa.select([users.c.primary_gid]).where(users.c.id == user_id) - ) - assert primary_gid is not None - return primary_gid - - -@pytest.fixture -async def create_cluster( - pg_engine: Engine, faker: Faker -) -> AsyncIterable[Callable[..., Awaitable[int]]]: - cluster_ids = [] - - async def creator(**overrides) -> int: - insert_values = { - "name": "default cluster name", - "type": ClusterType.ON_PREMISE, - "description": None, - "endpoint": faker.domain_name(), - "authentication": faker.pydict(value_types=[str]), - } - insert_values.update(overrides) - async with pg_engine.acquire() as conn: - cluster_id = await conn.scalar( - clusters.insert().values(**insert_values).returning(clusters.c.id) - ) - cluster_ids.append(cluster_id) - assert cluster_id - return cluster_id - - yield creator - - # cleanup - async with pg_engine.acquire() as conn: - await conn.execute(clusters.delete().where(clusters.c.id.in_(cluster_ids))) - - -async def test_cluster_without_owner_forbidden( - create_cluster: Callable[..., Awaitable[int]] -): - with pytest.raises(NotNullViolation): - await create_cluster() - - -async def test_can_create_cluster_with_owner( - user_group_id: int, create_cluster: Callable[..., Awaitable[int]] -): - aws_cluster_id = await create_cluster( - name="test AWS cluster", type=ClusterType.AWS, owner=user_group_id - ) - assert aws_cluster_id > 0 - on_premise_cluster = await create_cluster( - name="test on premise cluster", - type=ClusterType.ON_PREMISE, - owner=user_group_id, - ) - assert on_premise_cluster > 0 - assert on_premise_cluster != aws_cluster_id - - -async def test_cannot_remove_owner_that_owns_cluster( - pg_engine: Engine, - user_id: int, - user_group_id: int, - create_cluster: Callable[..., Awaitable[int]], -): - cluster_id = await create_cluster(owner=user_group_id) - # now try removing the user - async with pg_engine.acquire() as conn: - with pytest.raises(ForeignKeyViolation): - await conn.execute(users.delete().where(users.c.id == user_id)) - - # now remove the cluster - async with pg_engine.acquire() as conn: - await conn.execute(clusters.delete().where(clusters.c.id == cluster_id)) - - # removing the user should work now - async with pg_engine.acquire() as conn: - await conn.execute(users.delete().where(users.c.id == user_id)) - - -async def test_cluster_owner_has_all_rights( - pg_engine: Engine, - user_group_id: int, - create_cluster: Callable[..., Awaitable[int]], -): - cluster_id = await create_cluster(owner=user_group_id) - - async with pg_engine.acquire() as conn: - result: ResultProxy = await conn.execute( - cluster_to_groups.select().where( - cluster_to_groups.c.cluster_id == cluster_id - ) - ) - - assert result.rowcount == 1 - row = await result.fetchone() - assert row is not None - - assert row.read == True - assert row.write == True - assert row.delete == True diff --git a/packages/postgres-database/tests/test_comp_tasks.py b/packages/postgres-database/tests/test_comp_tasks.py index cc4d30e442c..4759e074dc9 100644 --- a/packages/postgres-database/tests/test_comp_tasks.py +++ b/packages/postgres-database/tests/test_comp_tasks.py @@ -5,10 +5,10 @@ import asyncio import json +from collections.abc import AsyncIterator import pytest from aiopg.sa.engine import Engine, SAConnection -from aiopg.sa.result import RowProxy from simcore_postgres_database.models.comp_pipeline import StateType from simcore_postgres_database.models.comp_tasks import ( DB_CHANNEL_NAME, @@ -19,15 +19,18 @@ @pytest.fixture() -async def db_connection(pg_engine: Engine) -> SAConnection: - async with pg_engine.acquire() as conn: +async def db_connection(aiopg_engine: Engine) -> AsyncIterator[SAConnection]: + async with aiopg_engine.acquire() as conn: yield conn @pytest.fixture() -async def db_notification_queue(db_connection: SAConnection) -> asyncio.Queue: +async def db_notification_queue( + db_connection: SAConnection, +) -> AsyncIterator[asyncio.Queue]: listen_query = f"LISTEN {DB_CHANNEL_NAME};" await db_connection.execute(listen_query) + assert db_connection.connection notifications_queue: asyncio.Queue = db_connection.connection.notifies assert notifications_queue.empty() yield notifications_queue @@ -48,14 +51,15 @@ async def task( .values(outputs=json.dumps({}), node_class=task_class) .returning(literal_column("*")) ) - row: RowProxy = await result.fetchone() + row = await result.fetchone() + assert row task = dict(row) assert ( db_notification_queue.empty() ), "database triggered change although it should only trigger on updates!" - yield task + return task async def _assert_notification_queue_status( @@ -65,13 +69,20 @@ async def _assert_notification_queue_status( assert not notification_queue.empty() tasks = [] - for n in range(num_exp_messages): + for _ in range(num_exp_messages): msg = await notification_queue.get() assert msg, "notification msg from postgres is empty!" task_data = json.loads(msg.payload) - - for k in ["table", "changes", "action", "data"]: + expected_keys = [ + "task_id", + "project_id", + "node_id", + "changes", + "action", + "table", + ] + for k in expected_keys: assert k in task_data, f"invalid structure, expected [{k}] in {task_data}" tasks.append(task_data) @@ -106,21 +117,28 @@ async def test_listen_query( db_connection, task, outputs=updated_output, state=StateType.ABORTED ) tasks = await _assert_notification_queue_status(db_notification_queue, 1) - assert tasks[0]["changes"] == ["outputs", "state"] + assert tasks[0]["changes"] == ["modified", "outputs", "state"] + assert tasks[0]["action"] == "UPDATE" + assert tasks[0]["table"] == "comp_tasks" + assert tasks[0]["task_id"] == task["task_id"] + assert tasks[0]["project_id"] == task["project_id"] + assert tasks[0]["node_id"] == task["node_id"] + assert ( - tasks[0]["data"]["outputs"] == updated_output - ), f"the data received from the database is {tasks[0]}, expected new output is {updated_output}" + "data" not in tasks[0] + ), "data is not expected in the notification payload anymore" # setting the exact same data twice triggers only ONCE updated_output = {"some new stuff": "it is newer"} await _update_comp_task_with(db_connection, task, outputs=updated_output) await _update_comp_task_with(db_connection, task, outputs=updated_output) tasks = await _assert_notification_queue_status(db_notification_queue, 1) - assert tasks[0]["changes"] == ["outputs"] - assert ( - tasks[0]["data"]["outputs"] == updated_output - ), f"the data received from the database is {tasks[0]}, expected new output is {updated_output}" - + assert tasks[0]["changes"] == ["modified", "outputs"] + assert tasks[0]["action"] == "UPDATE" + assert tasks[0]["table"] == "comp_tasks" + assert tasks[0]["task_id"] == task["task_id"] + assert tasks[0]["project_id"] == task["project_id"] + assert tasks[0]["node_id"] == task["node_id"] # updating a number of times with different stuff comes out in FIFO order NUM_CALLS = 20 update_outputs = [] @@ -132,7 +150,10 @@ async def test_listen_query( tasks = await _assert_notification_queue_status(db_notification_queue, NUM_CALLS) for n, output in enumerate(update_outputs): - assert tasks[n]["changes"] == ["outputs"] - assert ( - tasks[n]["data"]["outputs"] == output - ), f"the data received from the database is {tasks[n]}, expected new output is {output}" + assert output + assert tasks[n]["changes"] == ["modified", "outputs"] + assert tasks[n]["action"] == "UPDATE" + assert tasks[n]["table"] == "comp_tasks" + assert tasks[n]["task_id"] == task["task_id"] + assert tasks[n]["project_id"] == task["project_id"] + assert tasks[n]["node_id"] == task["node_id"] diff --git a/packages/postgres-database/tests/test_delete_projects_and_users.py b/packages/postgres-database/tests/test_delete_projects_and_users.py index e1cc94e52d5..4c380156066 100644 --- a/packages/postgres-database/tests/test_delete_projects_and_users.py +++ b/packages/postgres-database/tests/test_delete_projects_and_users.py @@ -9,15 +9,14 @@ from aiopg.sa.engine import Engine from aiopg.sa.result import ResultProxy, RowProxy from psycopg2.errors import ForeignKeyViolation -from pytest_simcore.helpers.rawdata_fakers import random_project, random_user +from pytest_simcore.helpers.faker_factories import random_project, random_user from simcore_postgres_database.webserver_models import projects, users from sqlalchemy import func @pytest.fixture -async def engine(pg_engine: Engine): - - async with pg_engine.acquire() as conn: +async def engine(aiopg_engine: Engine): + async with aiopg_engine.acquire() as conn: await conn.execute(users.insert().values(**random_user(name="A"))) await conn.execute(users.insert().values(**random_user())) await conn.execute(users.insert().values(**random_user())) @@ -28,13 +27,12 @@ async def engine(pg_engine: Engine): with pytest.raises(ForeignKeyViolation): await conn.execute(projects.insert().values(**random_project(prj_owner=4))) - yield pg_engine + return aiopg_engine @pytest.mark.skip(reason="sandbox for dev purposes") async def test_insert_user(engine): async with engine.acquire() as conn: - # execute + scalar res: ResultProxy = await conn.execute( users.insert().values(**random_user(name="FOO")) @@ -84,16 +82,16 @@ async def test_insert_user(engine): async def test_count_users(engine): async with engine.acquire() as conn: - users_count = await conn.scalar(sa.select([func.count()]).select_from(users)) + users_count = await conn.scalar(sa.select(func.count()).select_from(users)) assert users_count == 3 users_count = await conn.scalar( - sa.select([sa.func.count()]).where(users.c.name == "A") + sa.select(sa.func.count()).where(users.c.name == "A") ) assert users_count == 1 users_count = await conn.scalar( - sa.select([sa.func.count()]).where(users.c.name == "UNKNOWN NAME") + sa.select(sa.func.count()).where(users.c.name == "UNKNOWN NAME") ) assert users_count == 0 diff --git a/packages/postgres-database/tests/test_groups.py b/packages/postgres-database/tests/test_groups.py deleted file mode 100644 index 8e6a2ccec4e..00000000000 --- a/packages/postgres-database/tests/test_groups.py +++ /dev/null @@ -1,257 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from typing import AsyncIterator, Awaitable, Callable, Optional, Union - -import aiopg.sa.exc -import pytest -import sqlalchemy as sa -from aiopg.sa.connection import SAConnection -from aiopg.sa.engine import Engine -from aiopg.sa.result import ResultProxy, RowProxy -from psycopg2.errors import ForeignKeyViolation, RaiseException, UniqueViolation -from pytest_simcore.helpers.rawdata_fakers import random_user -from simcore_postgres_database.models.base import metadata -from simcore_postgres_database.webserver_models import ( - GroupType, - groups, - user_to_groups, - users, -) -from sqlalchemy import func, literal_column, select - - -@pytest.fixture -async def connection( - make_engine: Callable[[bool], Union[Awaitable[Engine], sa.engine.base.Engine]] -) -> AsyncIterator[SAConnection]: - engine = await make_engine() - sync_engine = make_engine(is_async=False) - metadata.drop_all(sync_engine) - metadata.create_all(sync_engine) - - async with engine.acquire() as conn: - yield conn - - metadata.drop_all(sync_engine) - - -async def test_user_group_uniqueness( - connection: SAConnection, - create_fake_group: Callable, - create_fake_user: Callable, -): - - rory_group = await create_fake_group( - connection, name="Rory Storm and the Hurricanes" - ) - ringo = await create_fake_user(connection, name="Ringo", group=rory_group) - # test unique user/group pair - with pytest.raises(UniqueViolation, match="user_to_groups_uid_gid_key"): - await connection.execute( - user_to_groups.insert().values(uid=ringo.id, gid=rory_group.gid) - ) - - # Checks implementation of simcore_service_webserver/groups_api.py:get_group_from_gid - res: ResultProxy = await connection.execute( - groups.select().where(groups.c.gid == rory_group.gid) - ) - - the_one: Optional[RowProxy] = await res.first() - assert the_one.type == the_one["type"] - - with pytest.raises(aiopg.sa.exc.ResourceClosedError): - await res.fetchone() - - -async def test_all_group( - connection: SAConnection, -): - # now check the only available group is the all group - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == 1 - - result = await connection.execute( - groups.select().where(groups.c.type == GroupType.EVERYONE) - ) - all_group_gid = (await result.fetchone()).gid - assert all_group_gid == 1 # it's the first group so it gets a 1 - # try removing the all group - with pytest.raises(RaiseException): - await connection.execute(groups.delete().where(groups.c.gid == all_group_gid)) - - # check adding a user is automatically added to the all group - result = await connection.execute( - users.insert().values(**random_user()).returning(literal_column("*")) - ) - user: RowProxy = await result.fetchone() - - result = await connection.execute( - user_to_groups.select().where(user_to_groups.c.gid == all_group_gid) - ) - user_to_groups_row: RowProxy = await result.fetchone() - assert user_to_groups_row.uid == user.id - assert user_to_groups_row.gid == all_group_gid - - # try removing the all group - with pytest.raises(RaiseException): - await connection.execute(groups.delete().where(groups.c.gid == all_group_gid)) - - # remove the user now - await connection.execute(users.delete().where(users.c.id == user.id)) - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 0 - - # check the all group still exists - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == 1 - result = await connection.execute( - groups.select().where(groups.c.type == GroupType.EVERYONE) - ) - all_group_gid = (await result.fetchone()).gid - assert all_group_gid == 1 # it's the first group so it gets a 1 - - -async def test_own_group( - connection: SAConnection, -): - result = await connection.execute( - users.insert().values(**random_user()).returning(literal_column("*")) - ) - user: RowProxy = await result.fetchone() - assert not user.primary_gid - - # now fetch the same user that shall have a primary group set by the db - result = await connection.execute(users.select().where(users.c.id == user.id)) - user: RowProxy = await result.fetchone() - assert user.primary_gid - - # now check there is a primary group - result = await connection.execute( - groups.select().where(groups.c.type == GroupType.PRIMARY) - ) - primary_group: RowProxy = await result.fetchone() - assert primary_group.gid == user.primary_gid - - groups_count = await connection.scalar( - select([func.count(groups.c.gid)]).where(groups.c.gid == user.primary_gid) - ) - assert groups_count == 1 - - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == 2 # own group + all group - - # try removing the primary group - with pytest.raises(ForeignKeyViolation): - await connection.execute( - groups.delete().where(groups.c.gid == user.primary_gid) - ) - - # now remove the users should remove the primary group - await connection.execute(users.delete().where(users.c.id == user.id)) - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 0 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == 1 # the all group is still around - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count) - - -async def test_group( - connection: SAConnection, - create_fake_group: Callable, - create_fake_user: Callable, -): - rory_group = await create_fake_group( - connection, name="Rory Storm and the Hurricanes" - ) - quarrymen_group = await create_fake_group(connection, name="The Quarrymen") - await create_fake_user(connection, name="John", group=quarrymen_group) - await create_fake_user(connection, name="Paul", group=quarrymen_group) - await create_fake_user(connection, name="Georges", group=quarrymen_group) - pete = await create_fake_user(connection, name="Pete", group=quarrymen_group) - ringo = await create_fake_user(connection, name="Ringo", group=rory_group) - - # rationale: following linux user/group system, each user has its own group (primary group) + whatever other group (secondary groups) - # check DB contents - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 5 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == ( - users_count + 2 + 1 - ) # user primary groups, other groups, all group - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count + users_count) - - # change group name - result = await connection.execute( - groups.update() - .where(groups.c.gid == quarrymen_group.gid) - .values(name="The Beatles") - .returning(literal_column("*")) - ) - beatles_group = await result.fetchone() - assert beatles_group.modified > quarrymen_group.modified - - # delete 1 user - await connection.execute(users.delete().where(users.c.id == pete.id)) - - # check DB contents - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 4 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == (users_count + 2 + 1) - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count + users_count) - - # add one user to another group - await connection.execute( - user_to_groups.insert().values(uid=ringo.id, gid=beatles_group.gid) - ) - - # check DB contents - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 4 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == (users_count + 2 + 1) - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count + 1 + users_count) - - # delete 1 group - await connection.execute(groups.delete().where(groups.c.gid == rory_group.gid)) - - # check DB contents - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 4 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == (users_count + 1 + 1) - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count + users_count) - - # delete the other group - await connection.execute(groups.delete().where(groups.c.gid == beatles_group.gid)) - - # check DB contents - users_count = await connection.scalar(select([func.count()]).select_from(users)) - assert users_count == 4 - groups_count = await connection.scalar(select([func.count()]).select_from(groups)) - assert groups_count == (users_count + 0 + 1) - relations_count = await connection.scalar( - select([func.count()]).select_from(user_to_groups) - ) - assert relations_count == (users_count + users_count) diff --git a/packages/postgres-database/tests/test_models_api_keys.py b/packages/postgres-database/tests/test_models_api_keys.py new file mode 100644 index 00000000000..d8863f9ac74 --- /dev/null +++ b/packages/postgres-database/tests/test_models_api_keys.py @@ -0,0 +1,97 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import AsyncIterable + +import pytest +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from pytest_simcore.helpers.faker_factories import ( + random_api_auth, + random_product, + random_user, +) +from simcore_postgres_database.models.api_keys import api_keys +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.users import users + + +@pytest.fixture +async def user_id(connection: SAConnection) -> AsyncIterable[int]: + uid = await connection.scalar( + users.insert().values(random_user()).returning(users.c.id) + ) + assert uid + yield uid + + await connection.execute(users.delete().where(users.c.id == uid)) + + +@pytest.fixture +async def product_name(connection: SAConnection) -> AsyncIterable[str]: + name = await connection.scalar( + products.insert() + .values(random_product(name="s4l", group_id=None)) + .returning(products.c.name) + ) + assert name + yield name + + await connection.execute(products.delete().where(products.c.name == name)) + + +async def test_create_and_delete_api_key( + connection: SAConnection, user_id: int, product_name: str +): + apikey_id = await connection.scalar( + api_keys.insert() + .values(**random_api_auth(product_name, user_id)) + .returning(api_keys.c.id) + ) + + assert apikey_id + assert apikey_id >= 1 + + await connection.execute(api_keys.delete().where(api_keys.c.id == apikey_id)) + + +@pytest.fixture +async def session_auth( + connection: SAConnection, user_id: int, product_name: str +) -> AsyncIterable[RowProxy]: + # user_id under product_name creates an api-key+secret and + # uses to authenticate a session. + result = await connection.execute( + api_keys.insert() + .values(**random_api_auth(product_name, user_id)) + .returning(sa.literal_column("*")) + ) + row = await result.fetchone() + assert row + + yield row + + await connection.execute(api_keys.delete().where(api_keys.c.id == row.id)) + + +async def test_get_session_identity_for_api_server( + connection: SAConnection, user_id: int, product_name: str, session_auth: RowProxy +): + # NOTE: preview of what needs to implement api-server to authenticate and + # authorize a session + # + result = await connection.execute( + sa.select(api_keys.c.user_id, api_keys.c.product_name,).where( + (api_keys.c.api_key == session_auth.api_key) + & (api_keys.c.api_secret == session_auth.api_secret), + ) + ) + row = await result.fetchone() + assert row + + # session identity + assert row.user_id == user_id + assert row.product_name == product_name diff --git a/packages/postgres-database/tests/test_models_groups.py b/packages/postgres-database/tests/test_models_groups.py new file mode 100644 index 00000000000..6ce8a77c4cc --- /dev/null +++ b/packages/postgres-database/tests/test_models_groups.py @@ -0,0 +1,237 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import Callable + +import aiopg.sa.exc +import pytest +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import ResultProxy, RowProxy +from psycopg2.errors import ForeignKeyViolation, RaiseException, UniqueViolation +from pytest_simcore.helpers.faker_factories import random_user +from simcore_postgres_database.webserver_models import ( + GroupType, + groups, + user_to_groups, + users, +) +from sqlalchemy import func, literal_column, select + + +async def test_user_group_uniqueness( + connection: SAConnection, + create_fake_group: Callable, + create_fake_user: Callable, +): + rory_group = await create_fake_group( + connection, name="Rory Storm and the Hurricanes" + ) + ringo = await create_fake_user(connection, name="Ringo", group=rory_group) + # test unique user/group pair + with pytest.raises(UniqueViolation, match="user_to_groups_uid_gid_key"): + await connection.execute( + user_to_groups.insert().values(uid=ringo.id, gid=rory_group.gid) + ) + + # Checks implementation of simcore_service_webserver/groups_api.py:get_group_from_gid + res: ResultProxy = await connection.execute( + groups.select().where(groups.c.gid == rory_group.gid) + ) + + the_one: RowProxy | None = await res.first() + assert the_one.type == the_one["type"] + + with pytest.raises(aiopg.sa.exc.ResourceClosedError): + await res.fetchone() + + +async def test_all_group( + connection: SAConnection, +): + # now check the only available group is the all group + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == 1 + + result = await connection.execute( + groups.select().where(groups.c.type == GroupType.EVERYONE) + ) + all_group_gid = (await result.fetchone()).gid + assert all_group_gid == 1 # it's the first group so it gets a 1 + # try removing the all group + with pytest.raises(RaiseException): + await connection.execute(groups.delete().where(groups.c.gid == all_group_gid)) + + # check adding a user is automatically added to the all group + result = await connection.execute( + users.insert().values(**random_user()).returning(literal_column("*")) + ) + user: RowProxy = await result.fetchone() + + result = await connection.execute( + user_to_groups.select().where(user_to_groups.c.gid == all_group_gid) + ) + user_to_groups_row: RowProxy = await result.fetchone() + assert user_to_groups_row.uid == user.id + assert user_to_groups_row.gid == all_group_gid + + # try removing the all group + with pytest.raises(RaiseException): + await connection.execute(groups.delete().where(groups.c.gid == all_group_gid)) + + # remove the user now + await connection.execute(users.delete().where(users.c.id == user.id)) + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 0 + + # check the all group still exists + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == 1 + result = await connection.execute( + groups.select().where(groups.c.type == GroupType.EVERYONE) + ) + all_group_gid = (await result.fetchone()).gid + assert all_group_gid == 1 # it's the first group so it gets a 1 + + +async def test_own_group( + connection: SAConnection, +): + result = await connection.execute( + users.insert().values(**random_user()).returning(literal_column("*")) + ) + user: RowProxy = await result.fetchone() + assert not user.primary_gid + + # now fetch the same user that shall have a primary group set by the db + result = await connection.execute(users.select().where(users.c.id == user.id)) + user: RowProxy = await result.fetchone() + assert user.primary_gid + + # now check there is a primary group + result = await connection.execute( + groups.select().where(groups.c.type == GroupType.PRIMARY) + ) + primary_group: RowProxy = await result.fetchone() + assert primary_group.gid == user.primary_gid + + groups_count = await connection.scalar( + select(func.count(groups.c.gid)).where(groups.c.gid == user.primary_gid) + ) + assert groups_count == 1 + + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == 2 # own group + all group + + # try removing the primary group + with pytest.raises(ForeignKeyViolation): + await connection.execute( + groups.delete().where(groups.c.gid == user.primary_gid) + ) + + # now remove the users should remove the primary group + await connection.execute(users.delete().where(users.c.id == user.id)) + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 0 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == 1 # the all group is still around + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count) + + +async def test_group( + connection: SAConnection, + create_fake_group: Callable, + create_fake_user: Callable, +): + rory_group = await create_fake_group( + connection, name="Rory Storm and the Hurricanes" + ) + quarrymen_group = await create_fake_group(connection, name="The Quarrymen") + await create_fake_user(connection, name="John", group=quarrymen_group) + await create_fake_user(connection, name="Paul", group=quarrymen_group) + await create_fake_user(connection, name="Georges", group=quarrymen_group) + pete = await create_fake_user(connection, name="Pete", group=quarrymen_group) + ringo = await create_fake_user(connection, name="Ringo", group=rory_group) + + # rationale: following linux user/group system, each user has its own group (primary group) + whatever other group (secondary groups) + # check DB contents + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 5 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == ( + users_count + 2 + 1 + ) # user primary groups, other groups, all group + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count + users_count) + + # change group name + result = await connection.execute( + groups.update() + .where(groups.c.gid == quarrymen_group.gid) + .values(name="The Beatles") + .returning(literal_column("*")) + ) + beatles_group = await result.fetchone() + assert beatles_group.modified > quarrymen_group.modified + + # delete 1 user + await connection.execute(users.delete().where(users.c.id == pete.id)) + + # check DB contents + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 4 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == (users_count + 2 + 1) + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count + users_count) + + # add one user to another group + await connection.execute( + user_to_groups.insert().values(uid=ringo.id, gid=beatles_group.gid) + ) + + # check DB contents + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 4 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == (users_count + 2 + 1) + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count + 1 + users_count) + + # delete 1 group + await connection.execute(groups.delete().where(groups.c.gid == rory_group.gid)) + + # check DB contents + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 4 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == (users_count + 1 + 1) + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count + users_count) + + # delete the other group + await connection.execute(groups.delete().where(groups.c.gid == beatles_group.gid)) + + # check DB contents + users_count = await connection.scalar(select(func.count()).select_from(users)) + assert users_count == 4 + groups_count = await connection.scalar(select(func.count()).select_from(groups)) + assert groups_count == (users_count + 0 + 1) + relations_count = await connection.scalar( + select(func.count()).select_from(user_to_groups) + ) + assert relations_count == (users_count + users_count) diff --git a/packages/postgres-database/tests/test_models_payments_methods.py b/packages/postgres-database/tests/test_models_payments_methods.py new file mode 100644 index 00000000000..cb5b14ee70e --- /dev/null +++ b/packages/postgres-database/tests/test_models_payments_methods.py @@ -0,0 +1,74 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_payment_method +from simcore_postgres_database.aiopg_errors import UniqueViolation +from simcore_postgres_database.models.payments_methods import ( + InitPromptAckFlowState, + payments_methods, +) + + +@pytest.fixture +def payment_method_id(faker: Faker) -> str: + return "5495BF38-4A98-430C-A028-19E4585ADFC7" + + +async def test_create_payment_method( + connection: SAConnection, + payment_method_id: str, +): + init_values = random_payment_method(payment_method_id=payment_method_id) + await connection.execute(payments_methods.insert().values(**init_values)) + + # unique payment_method_id + with pytest.raises(UniqueViolation) as err_info: + await connection.execute(payments_methods.insert().values(**init_values)) + error = err_info.value + assert "payment_method_id" in f"{error}" + + # Create payment-method for another entity + for n in range(2): + # every user has its own wallet + wallet_id = init_values["wallet_id"] + n + user_id = init_values["user_id"] + n + for _ in range(3): # payments to wallet_id by user_id + await connection.execute( + payments_methods.insert().values( + **random_payment_method(wallet_id=wallet_id, user_id=user_id) + ) + ) + + # list payment methods in wallet_id (user_id) + result = await connection.execute( + sa.select(payments_methods).where( + (payments_methods.c.wallet_id == init_values["wallet_id"]) + & ( + payments_methods.c.user_id == init_values["user_id"] + ) # ensures ownership + & (payments_methods.c.state == InitPromptAckFlowState.PENDING) + ) + ) + rows = await result.fetchall() + assert rows + assert len(rows) == 1 + 3 + + # get payment-method wallet_id / payment_method_id + result = await connection.execute( + sa.select(payments_methods).where( + (payments_methods.c.payment_method_id == init_values["payment_method_id"]) + & (payments_methods.c.wallet_id == init_values["wallet_id"]) + ) + ) + row: RowProxy | None = await result.fetchone() + assert row is not None + + # a payment-method added by a user and associated to a wallet diff --git a/packages/postgres-database/tests/test_models_payments_transactions.py b/packages/postgres-database/tests/test_models_payments_transactions.py new file mode 100644 index 00000000000..6dde13b1abe --- /dev/null +++ b/packages/postgres-database/tests/test_models_payments_transactions.py @@ -0,0 +1,246 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unexpected-keyword-arg +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import decimal +from collections.abc import Callable +from typing import Any + +import pytest +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_payment_transaction, utcnow +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, + payments_transactions, +) +from simcore_postgres_database.utils_payments import ( + PaymentAlreadyAcked, + PaymentNotFound, + PaymentTransactionRow, + get_user_payments_transactions, + insert_init_payment_transaction, + update_payment_transaction_state, +) + + +async def test_numerics_precission_and_scale(connection: SAConnection): + # https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Numeric + # precision: This parameter specifies the total number of digits that can be stored, both before and after the decimal point. + # scale: This parameter specifies the number of digits that can be stored to the right of the decimal point. + + for order_of_magnitude in range(8): + expected = 10**order_of_magnitude + 0.123 + got = await connection.scalar( + payments_transactions.insert() + .values(**random_payment_transaction(price_dollars=expected)) + .returning(payments_transactions.c.price_dollars) + ) + assert isinstance(got, decimal.Decimal) + assert float(got) == expected + + +def _remove_not_required(data: dict[str, Any]) -> dict[str, Any]: + for to_remove in ( + "completed_at", + "invoice_url", + "invoice_pdf_url", + "state", + "state_message", + "stripe_invoice_id", + ): + data.pop(to_remove) + return data + + +@pytest.fixture +def init_transaction(connection: SAConnection): + async def _init(payment_id: str): + # get payment_id from payment-gateway + values = _remove_not_required(random_payment_transaction(payment_id=payment_id)) + + # init successful: set timestamp + values["initiated_at"] = utcnow() + + # insert + ok = await insert_init_payment_transaction(connection, **values) + assert ok + + return values + + return _init + + +@pytest.fixture +def payment_id() -> str: + return "5495BF38-4A98-430C-A028-19E4585ADFC7" + + +async def test_init_transaction_sets_it_as_pending( + connection: SAConnection, init_transaction: Callable, payment_id: str +): + values = await init_transaction(payment_id) + assert values["payment_id"] == payment_id + + # check init-ed but not completed! + result = await connection.execute( + sa.select( + payments_transactions.c.completed_at, + payments_transactions.c.state, + payments_transactions.c.state_message, + ).where(payments_transactions.c.payment_id == payment_id) + ) + row: RowProxy | None = await result.fetchone() + assert row is not None + + # tests that defaults are right? + assert dict(row.items()) == { + "completed_at": None, + "state": PaymentTransactionState.PENDING, + "state_message": None, + } + + +@pytest.fixture +def invoice_url(faker: Faker, expected_state: PaymentTransactionState) -> str | None: + if expected_state == PaymentTransactionState.SUCCESS: + return faker.url() + return None + + +@pytest.mark.parametrize( + "expected_state,expected_message", + [ + ( + state, + None if state is PaymentTransactionState.SUCCESS else f"with {state}", + ) + for state in [ + PaymentTransactionState.SUCCESS, + PaymentTransactionState.FAILED, + PaymentTransactionState.CANCELED, + ] + ], +) +async def test_complete_transaction( + connection: SAConnection, + init_transaction: Callable, + payment_id: str, + expected_state: PaymentTransactionState, + expected_message: str | None, + invoice_url: str | None, +): + await init_transaction(payment_id) + + payment_row = await update_payment_transaction_state( + connection, + payment_id=payment_id, + completion_state=expected_state, + state_message=expected_message, + invoice_url=invoice_url, + ) + + assert isinstance(payment_row, PaymentTransactionRow) + assert payment_row.state_message == expected_message + assert payment_row.state == expected_state + assert payment_row.initiated_at < payment_row.completed_at + assert PaymentTransactionState(payment_row.state).is_completed() + + +async def test_update_transaction_failures_and_exceptions( + connection: SAConnection, + init_transaction: Callable, + payment_id: str, +): + kwargs = { + "connection": connection, + "payment_id": payment_id, + "completion_state": PaymentTransactionState.SUCCESS, + } + + ok = await update_payment_transaction_state(**kwargs) + assert isinstance(ok, PaymentNotFound) + assert not ok + + # init & complete + await init_transaction(payment_id) + ok = await update_payment_transaction_state(**kwargs) + assert isinstance(ok, PaymentTransactionRow) + assert ok + + # repeat -> fails + ok = await update_payment_transaction_state(**kwargs) + assert isinstance(ok, PaymentAlreadyAcked) + assert not ok + + with pytest.raises(ValueError): + kwargs.update(completion_state=PaymentTransactionState.PENDING) + await update_payment_transaction_state(**kwargs) + + +@pytest.fixture +def user_id() -> int: + return 1 + + +@pytest.fixture +def create_fake_user_transactions(connection: SAConnection, user_id: int) -> Callable: + async def _go(expected_total=5): + payment_ids = [] + for _ in range(expected_total): + values = _remove_not_required(random_payment_transaction(user_id=user_id)) + + payment_id = await insert_init_payment_transaction(connection, **values) + assert payment_id + payment_ids.append(payment_id) + + return payment_ids + + return _go + + +async def test_get_user_payments_transactions( + connection: SAConnection, create_fake_user_transactions: Callable, user_id: int +): + expected_payments_ids = await create_fake_user_transactions() + expected_total = len(expected_payments_ids) + + # test offset and limit defaults + total, rows = await get_user_payments_transactions(connection, user_id=user_id) + assert total == expected_total + assert [r.payment_id for r in rows] == expected_payments_ids[::-1], "newest first" + + +async def test_get_user_payments_transactions_with_pagination_options( + connection: SAConnection, create_fake_user_transactions: Callable, user_id: int +): + expected_payments_ids = await create_fake_user_transactions() + expected_total = len(expected_payments_ids) + + # test offset, limit + offset = int(expected_total / 4) + limit = int(expected_total / 2) + + total, rows = await get_user_payments_transactions( + connection, user_id=user_id, limit=limit, offset=offset + ) + assert total == expected_total + assert [r.payment_id for r in rows] == expected_payments_ids[::-1][ + offset : (offset + limit) + ], "newest first" + + # test offset>=expected_total? + total, rows = await get_user_payments_transactions( + connection, user_id=user_id, offset=expected_total + ) + assert not rows + + # test limit==0? + total, rows = await get_user_payments_transactions( + connection, user_id=user_id, limit=0 + ) + assert not rows diff --git a/packages/postgres-database/tests/test_models_products_prices.py b/packages/postgres-database/tests/test_models_products_prices.py new file mode 100644 index 00000000000..406158af0bf --- /dev/null +++ b/packages/postgres-database/tests/test_models_products_prices.py @@ -0,0 +1,271 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from collections.abc import AsyncIterator + +import pytest +import sqlalchemy as sa +import sqlalchemy.exc +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_product +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.products_prices import products_prices +from simcore_postgres_database.utils_products_prices import ( + get_product_latest_price_info_or_none, + get_product_latest_stripe_info_or_none, + is_payment_enabled, +) +from sqlalchemy.engine.row import Row +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + + +@pytest.fixture +async def connection(asyncpg_engine: AsyncEngine) -> AsyncIterator[AsyncConnection]: + async with asyncpg_engine.connect() as conn: + isolation_level = await conn.get_isolation_level() + assert isolation_level == "READ COMMITTED" + yield conn + + +@pytest.fixture +async def fake_product(connection: AsyncConnection) -> Row: + result = await connection.execute( + products.insert() + .values(random_product(name="tip", group_id=None)) + .returning(sa.literal_column("*")), + ) + await connection.commit() + + async with connection.begin(): + result = await connection.execute( + products.insert() + .values(random_product(name="s4l", group_id=None)) + .returning(sa.literal_column("*")), + ) + + return result.one() + + +async def test_creating_product_prices( + asyncpg_engine: AsyncEngine, + connection: AsyncConnection, + fake_product: Row, + faker: Faker, +): + # a price per product + async with connection.begin(): + result = await connection.execute( + products_prices.insert() + .values( + product_name=fake_product.name, + usd_per_credit=100, + comment="PO Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + .returning(sa.literal_column("*")), + ) + got = result.one() + assert got + + # insert still NOT commited but can read from this connection + read_query = sa.select(products_prices).where( + products_prices.c.product_name == fake_product.name + ) + result = await connection.execute(read_query) + assert result.one()._asdict() == got._asdict() + + assert connection.in_transaction() is True + + # cannot read from other connection though + async with asyncpg_engine.connect() as other_connection: + result = await other_connection.execute(read_query) + assert result.one_or_none() is None + + # AFTER commit + assert connection.in_transaction() is False + async with asyncpg_engine.connect() as yet_another_connection: + result = await yet_another_connection.execute(read_query) + assert result.one()._asdict() == got._asdict() + + +async def test_non_negative_price_not_allowed( + connection: AsyncConnection, fake_product: Row, faker: Faker +): + + assert not connection.in_transaction() + + # WRITE: negative price not allowed + with pytest.raises(sqlalchemy.exc.IntegrityError) as exc_info: + await connection.execute( + products_prices.insert().values( + product_name=fake_product.name, + usd_per_credit=-100, # <----- NEGATIVE + comment="PO Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + ) + + assert exc_info.value + assert connection.in_transaction() + await connection.rollback() + assert not connection.in_transaction() + + # WRITE: zero price is allowed + result = await connection.execute( + products_prices.insert() + .values( + product_name=fake_product.name, + usd_per_credit=0, # <----- ZERO + comment="PO Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + .returning("*") + ) + + assert result.one() + + assert connection.in_transaction() + await connection.commit() + assert not connection.in_transaction() + + with pytest.raises(sqlalchemy.exc.ResourceClosedError): + # can only get result once! + assert result.one() + + # READ + result = await connection.execute(sa.select(products_prices)) + assert connection.in_transaction() + + assert result.one() + with pytest.raises(sqlalchemy.exc.ResourceClosedError): + # can only get result once! + assert result.one() + + +async def test_delete_price_constraints( + connection: AsyncConnection, fake_product: Row, faker: Faker +): + # products_prices + async with connection.begin(): + await connection.execute( + products_prices.insert().values( + product_name=fake_product.name, + usd_per_credit=10, + comment="PO Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + ) + + # BAD DELETE: + # should not be able to delete a product w/o deleting price first + async with connection.begin(): + with pytest.raises(sqlalchemy.exc.IntegrityError, match="delete") as exc_info: + await connection.execute(products.delete()) + + # NOTE: that asyncpg.exceptions are converted to sqlalchemy.exc + # sqlalchemy.exc.IntegrityError: (sqlalchemy.dialects.postgresql.asyncpg.IntegrityError) : + assert "asyncpg.exceptions.ForeignKeyViolationError" in exc_info.value.args[0] + + # GOOD DELETE: this is the correct way to delete + async with connection.begin(): + await connection.execute(products_prices.delete()) + await connection.execute(products.delete()) + + +async def test_get_product_latest_price_or_none( + connection: AsyncConnection, fake_product: Row, faker: Faker +): + # undefined product + assert ( + await get_product_latest_price_info_or_none( + connection, product_name="undefined" + ) + is None + ) + + assert await is_payment_enabled(connection, product_name="undefined") is False + + # defined product but undefined price + assert ( + await get_product_latest_price_info_or_none( + connection, product_name=fake_product.name + ) + is None + ) + + assert await is_payment_enabled(connection, product_name=fake_product.name) is False + + +async def test_price_history_of_a_product( + connection: AsyncConnection, fake_product: Row, faker: Faker +): + # initial price + async with connection.begin(): + await connection.execute( + products_prices.insert().values( + product_name=fake_product.name, + usd_per_credit=1, + comment="PO Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + ) + + # new price + async with connection.begin(): + await connection.execute( + products_prices.insert().values( + product_name=fake_product.name, + usd_per_credit=2, + comment="Update by Mr X", + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + ) + ) + + # latest is 2 USD! + assert await get_product_latest_price_info_or_none( + connection, product_name=fake_product.name + ) == (2, 10) + + assert await is_payment_enabled(connection, product_name=fake_product.name) is True + + +async def test_get_product_latest_stripe_info( + connection: AsyncConnection, fake_product: Row, faker: Faker +): + stripe_price_id_value = faker.word() + stripe_tax_rate_id_value = faker.word() + + # products_prices + async with connection.begin(): + await connection.execute( + products_prices.insert().values( + product_name=fake_product.name, + usd_per_credit=10, + comment="PO Mr X", + stripe_price_id=stripe_price_id_value, + stripe_tax_rate_id=stripe_tax_rate_id_value, + ) + ) + + # undefined product + undefined_product_stripe_info = await get_product_latest_stripe_info_or_none( + connection, product_name="undefined" + ) + assert undefined_product_stripe_info is None + + # defined product + product_stripe_info = await get_product_latest_stripe_info_or_none( + connection, product_name=fake_product.name + ) + assert product_stripe_info + assert product_stripe_info[0] == stripe_price_id_value + assert product_stripe_info[1] == stripe_tax_rate_id_value diff --git a/packages/postgres-database/tests/test_models_projects_to_jobs.py b/packages/postgres-database/tests/test_models_projects_to_jobs.py new file mode 100644 index 00000000000..d6f2879694d --- /dev/null +++ b/packages/postgres-database/tests/test_models_projects_to_jobs.py @@ -0,0 +1,152 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import Iterator + +import pytest +import simcore_postgres_database.cli +import sqlalchemy as sa +import sqlalchemy.engine +import sqlalchemy.exc +from faker import Faker +from pytest_simcore.helpers import postgres_tools +from pytest_simcore.helpers.faker_factories import random_project, random_user +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.models.projects_to_jobs import projects_to_jobs +from simcore_postgres_database.models.users import users + + +@pytest.fixture +def sync_engine( + sync_engine: sqlalchemy.engine.Engine, db_metadata: sa.MetaData +) -> Iterator[sqlalchemy.engine.Engine]: + # EXTENDS sync_engine fixture to include cleanup and parare migration + + # cleanup tables + db_metadata.drop_all(sync_engine) + + # prepare migration upgrade + assert simcore_postgres_database.cli.discover.callback + assert simcore_postgres_database.cli.upgrade.callback + + dsn = sync_engine.url + simcore_postgres_database.cli.discover.callback( + user=dsn.username, + password=dsn.password, + host=dsn.host, + database=dsn.database, + port=dsn.port, + ) + + yield sync_engine + + # cleanup tables + postgres_tools.force_drop_all_tables(sync_engine) + + +def test_populate_projects_to_jobs_during_migration( + sync_engine: sqlalchemy.engine.Engine, faker: Faker +): + assert simcore_postgres_database.cli.discover.callback + assert simcore_postgres_database.cli.upgrade.callback + + # UPGRADE just one before 48604dfdc5f4_new_projects_to_job_map.py + simcore_postgres_database.cli.upgrade.callback("8403acca8759") + + with sync_engine.connect() as conn: + + # Ensure the projects_to_jobs table does NOT exist yet + with pytest.raises(sqlalchemy.exc.ProgrammingError) as exc_info: + conn.execute( + sa.select(sa.func.count()).select_from(projects_to_jobs) + ).scalar() + assert "psycopg2.errors.UndefinedTable" in f"{exc_info.value}" + + # INSERT data (emulates data in-place) + user_data = random_user( + faker, name="test_populate_projects_to_jobs_during_migration" + ) + stmt = users.insert().values(**user_data).returning(users.c.id) + result = conn.execute(stmt) + user_id = result.scalar() + + SPACES = " " * 3 + projects_data = [ + random_project( + faker, + uuid="cd03450c-4c17-4c2c-85fd-0d951d7dcd5a", + name="solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.2.1/jobs/cd03450c-4c17-4c2c-85fd-0d951d7dcd5a", + description=( + "Study associated to solver job:" + """{ + "id": "cd03450c-4c17-4c2c-85fd-0d951d7dcd5a", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.2.1/jobs/cd03450c-4c2c-85fd-0d951d7dcd5a", + "inputs_checksum": "015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a", + "created_at": "2025-01-27T13:12:58.676564Z" + } + """ + ), + prj_owner=user_id, + ), + random_project( + faker, + uuid="bf204942-007b-11ef-befd-0242ac114f07", + name=f"studies/4b7a704a-007a-11ef-befd-0242ac114f07/jobs/bf204942-007b-11ef-befd-0242ac114f07{SPACES}", + description="Valid project 2", + prj_owner=user_id, + ), + random_project( + faker, + uuid="33333333-3333-3333-3333-333333333333", + name="invalid/project/name", + description="Invalid project", + prj_owner=user_id, + ), + ] + for prj in projects_data: + conn.execute(sa.insert(projects).values(prj)) + + # MIGRATE UPGRADE: this should populate + simcore_postgres_database.cli.upgrade.callback("head") + + with sync_engine.connect() as conn: + # Query the projects_to_jobs table + result = conn.execute( + sa.select( + projects_to_jobs.c.project_uuid, + projects_to_jobs.c.job_parent_resource_name, + ) + ).fetchall() + + # Assert only valid projects are added + assert len(result) == 2 + assert ( + "cd03450c-4c17-4c2c-85fd-0d951d7dcd5a", + "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.2.1", + ) in result + assert ( + "bf204942-007b-11ef-befd-0242ac114f07", + "studies/4b7a704a-007a-11ef-befd-0242ac114f07", + ) in result + + # Query project name and description for projects also in projects_to_jobs + result = conn.execute( + sa.select( + projects.c.name, + projects.c.uuid, + projects_to_jobs.c.job_parent_resource_name, + ).select_from( + projects.join( + projects_to_jobs, projects.c.uuid == projects_to_jobs.c.project_uuid + ) + ) + ).fetchall() + + # Print or assert the result as needed + for project_name, project_uuid, job_parent_resource_name in result: + assert ( + f"{job_parent_resource_name}/jobs/{project_uuid}" + == project_name.strip() + ) diff --git a/packages/postgres-database/tests/test_models_tags.py b/packages/postgres-database/tests/test_models_tags.py index 8a9caf9aa2d..7b129e5edc7 100644 --- a/packages/postgres-database/tests/test_models_tags.py +++ b/packages/postgres-database/tests/test_models_tags.py @@ -6,8 +6,9 @@ import pytest import sqlalchemy as sa +from simcore_postgres_database.models._common import RefActions from simcore_postgres_database.models.base import metadata -from simcore_postgres_database.models.tags import tags_to_groups +from simcore_postgres_database.models.tags_access_rights import tags_access_rights from simcore_postgres_database.models.users import users @@ -23,7 +24,9 @@ def test_migration_downgrade_script(): sa.Column( "user_id", sa.BigInteger, - sa.ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"), + sa.ForeignKey( + "users.id", onupdate=RefActions.CASCADE, ondelete=RefActions.CASCADE + ), nullable=False, ), sa.Column("name", sa.String, nullable=False), @@ -31,12 +34,14 @@ def test_migration_downgrade_script(): sa.Column("color", sa.String, nullable=False), ) - j = users.join(tags_to_groups, tags_to_groups.c.group_id == users.c.primary_gid) + j = users.join( + tags_access_rights, tags_access_rights.c.group_id == users.c.primary_gid + ) scalar_subq = ( sa.select(users.c.id) .select_from(j) - .where(old_tags.c.id == tags_to_groups.c.tag_id) + .where(old_tags.c.id == tags_access_rights.c.tag_id) .scalar_subquery() ) @@ -44,6 +49,6 @@ def test_migration_downgrade_script(): assert str(update_stmt).split("\n") == [ "UPDATE old_tags SET user_id=(SELECT users.id ", - "FROM users JOIN tags_to_groups ON tags_to_groups.group_id = users.primary_gid ", - "WHERE old_tags.id = tags_to_groups.tag_id)", + "FROM users JOIN tags_access_rights ON tags_access_rights.group_id = users.primary_gid ", + "WHERE old_tags.id = tags_access_rights.tag_id)", ] diff --git a/packages/postgres-database/tests/test_services__meta_data.py b/packages/postgres-database/tests/test_services__meta_data.py deleted file mode 100644 index a4fa923fd97..00000000000 --- a/packages/postgres-database/tests/test_services__meta_data.py +++ /dev/null @@ -1,271 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -import random -from typing import NamedTuple - -import pytest -import sqlalchemy as sa -from faker import Faker -from packaging.version import Version -from pytest_simcore.helpers.rawdata_fakers import random_group -from simcore_postgres_database.models.groups import GroupType, groups -from simcore_postgres_database.models.products import products -from simcore_postgres_database.models.services import ( - services_access_rights, - services_latest, - services_meta_data, -) -from simcore_postgres_database.models.services_consume_filetypes import ( - services_consume_filetypes, -) -from sqlalchemy.dialects.postgresql import insert as pg_insert - - -class ServicesFixture(NamedTuple): - expected_latest: dict - num_services: int - expected_public_service: dict - - -@pytest.fixture -def services_fixture(faker: Faker, pg_sa_engine: sa.engine.Engine) -> ServicesFixture: - # fake metadata from image - # emulate background - # - inject to database - # - create permissions - - expected_latest = {} - - with pg_sa_engine.connect() as conn: - # need PRODUCT - product_name = conn.execute( - products.insert() - .values( - name="osparc", - display_name="Product Osparc", - short_name="osparc", - host_regex=r"^osparc.", - priority=0, - ) - .returning(products.c.name) - ).scalar() - - # need GROUPS - product_gid = conn.execute( - groups.insert() - .values(**random_group(type=GroupType.STANDARD, name="osparc group")) - .returning(groups.c.gid) - ).scalar() - everyone_gid = conn.execute( - sa.select(groups.c.gid).where(groups.c.type == GroupType.EVERYONE) - ).scalar() - - assert product_gid != everyone_gid - - # fill w/ different versions - num_services = 3 - expected_public_service = {} - - for service_index in range(num_services): - service_name = faker.name() - key = f"simcore/services/dynamic/{service_name.lower().replace(' ','')}" - - expected_latest[key] = "0.0.0" - - num_versions = 4 - for _ in range(num_versions): - version = faker.numerify("%#.%#.%##") - if Version(expected_latest[key]) < Version(version): - expected_latest[key] = version - - query = services_meta_data.insert().values( - key=key, - version=version, - name=service_name, - description=faker.sentence(), - thumbnail=faker.image_url(120, 120), - classifiers=faker.random_choices(elements=("osparc", "nih", "foo")) - if service_index % 2 - else [], - ) - conn.execute(query) - - # services_access_rights = everyone - query = services_access_rights.insert().values( - key=key, - version=version, - gid=everyone_gid, - execute_access=True, - write_access=False, - product_name=product_name, - ) - - # services_consume_filetypes - num_filetypes = random.randint(0, 4) - for i, filetype in enumerate( - faker.uri_extension().removeprefix(".") - for _ in range(num_filetypes) - ): - is_public = random.choice([True, False]) - query = services_consume_filetypes.insert().values( - service_key=key, - service_version=version, - service_display_name=service_name, - service_input_port=f"input_{i}", - filetype=filetype.upper(), - is_guest_allowed=is_public, - ) - - if is_public: - expected_public_service = {"key": key, "version": version} - - conn.execute(query) - return ServicesFixture( - expected_latest=expected_latest, - num_services=num_services, - expected_public_service=expected_public_service, - ) - - -def test_trial_queries_for_service_metadata( - services_fixture: ServicesFixture, pg_sa_engine: sa.engine.Engine -): - # check if service exists and whether is public or not - with pg_sa_engine.connect() as conn: - query = sa.select( - services_consume_filetypes.c.service_key, - sa.func.array_agg( - sa.func.distinct(services_consume_filetypes.c.filetype) - ).label("file_extensions"), - ).group_by(services_consume_filetypes.c.service_key) - - rows: list = conn.execute(query).fetchall() - print(rows) - - with pg_sa_engine.connect() as conn: - query = ( - sa.select( - services_consume_filetypes.c.service_key, - sa.text( - "array_to_string(MAX(string_to_array(version, '.')::int[]), '.') AS latest_version" - ), - sa.func.array_agg( - sa.func.distinct(services_consume_filetypes.c.filetype) - ).label("file_extensions"), - ) - .select_from( - services_meta_data.join( - services_consume_filetypes, - services_meta_data.c.key - == services_consume_filetypes.c.service_key, - ) - ) - .group_by(services_consume_filetypes.c.service_key) - ) - - rows: list = conn.execute(query).fetchall() - - with pg_sa_engine.connect() as conn: - # Select query for latest - latest_select_query = sa.select( - services_meta_data.c.key, - sa.text( - "array_to_string(MAX(string_to_array(version, '.')::int[]), '.') AS version" - ), - # sa.func.max( sa.func.string_to_array(services_meta_data.c.version, ".").cast(sa.ARRAY(sa.Integer)).alias("latest_version") ) - ).group_by(services_meta_data.c.key) - - print(latest_select_query) - rows: list = conn.execute(latest_select_query).fetchall() - - assert len(rows) == services_fixture.num_services - assert set(services_fixture.expected_latest.items()) == set(rows) - - # Insert from select query (kept for reference) - def _insert_latest(): - ins = services_latest.insert().from_select( - [services_latest.c.key, services_latest.c.version], latest_select_query - ) - print(ins) - - result = conn.execute(ins) # fills services_latest the first time - print(result) - - # Upsert from fetched value (alternative 1 - kept for reference) - def _upsert_with_fetched_values(): - for row in rows: - data = dict(row.items()) - upsert_query = ( - pg_insert(services_latest) - .values(**data) - .on_conflict_do_update( - index_elements=[ - services_latest.c.key, - ], - set_=dict(version=data["version"]), - ) - ) - - conn.execute(upsert_query) - - # Upsert from subquery (alternative 2) - query = pg_insert(services_latest).from_select( - [services_latest.c.key, services_latest.c.version], latest_select_query - ) - upsert_query = query.on_conflict_do_update( - index_elements=[ - services_latest.c.key, - ], - set_=dict(version=query.excluded.version), - ) - conn.execute(upsert_query) - - latest_values = conn.execute(services_latest.select()).fetchall() - assert latest_values == rows - - # list latest services - - with pg_sa_engine.connect() as conn: - query = sa.select( - services_meta_data.c.key, - services_meta_data.c.version, - services_access_rights.c.gid, - services_access_rights.c.execute_access, - services_access_rights.c.write_access, - services_access_rights.c.product_name, - ).select_from( - services_latest.join( - services_meta_data, - (services_meta_data.c.key == services_latest.c.key) - & (services_meta_data.c.version == services_latest.c.version), - ).join( - services_access_rights, - (services_meta_data.c.key == services_access_rights.c.key) - & (services_meta_data.c.version == services_access_rights.c.version), - ) - ) - print(query) - - query1 = query.where(services_meta_data.c.classifiers.contains(["osparc"])) - - query2 = query.where( - sa.func.array_length(services_meta_data.c.classifiers, 1) > 0 - ) - - # list services with gid=1 (x=1, w=0) and with type dynamic and classifier include osparc - query3 = query.where( - services_latest.c.key.like("simcore/services/dynamic/%%") - & services_meta_data.c.classifiers.contains(["osparc"]) - & (services_access_rights.c.gid == 1) - & (services_access_rights.c.execute_access == True) - ) - - for n, query in enumerate([query1, query2, query3]): - print("query", n, "-----") - rows = conn.execute(query).fetchall() - assert len(rows) <= services_fixture.num_services - print(rows) diff --git a/packages/postgres-database/tests/test_services_consume_filetypes.py b/packages/postgres-database/tests/test_services_consume_filetypes.py index f1530b764f6..efe0a083c6f 100644 --- a/packages/postgres-database/tests/test_services_consume_filetypes.py +++ b/packages/postgres-database/tests/test_services_consume_filetypes.py @@ -4,18 +4,18 @@ # pylint: disable=no-value-for-parameter -from typing import Callable +from collections.abc import Callable import pytest import sqlalchemy as sa from aiopg.sa.connection import SAConnection from aiopg.sa.exc import ResourceClosedError from aiopg.sa.result import ResultProxy, RowProxy -from pytest_simcore.helpers.utils_services import ( +from pytest_simcore.helpers.webserver_fake_services_data import ( FAKE_FILE_CONSUMER_SERVICES, list_supported_filetypes, ) -from simcore_postgres_database.errors import CheckViolation +from simcore_postgres_database.aiopg_errors import CheckViolation from simcore_postgres_database.models.services import services_meta_data from simcore_postgres_database.models.services_consume_filetypes import ( services_consume_filetypes, @@ -25,9 +25,7 @@ @pytest.fixture def make_table() -> Callable: async def _make(connection: SAConnection): - for service in FAKE_FILE_CONSUMER_SERVICES: - await connection.execute( services_meta_data.insert().values( key=service["key"], @@ -38,7 +36,7 @@ async def _make(connection: SAConnection): ) for n, consumable in enumerate(service["consumes"]): - filetype, port, *_ = consumable.split(":") + ["input_1"] + filetype, port, *_ = [*consumable.split(":"), "input_1"] result: ResultProxy = await connection.execute( services_consume_filetypes.insert().values( @@ -60,10 +58,16 @@ async def _make(connection: SAConnection): @pytest.fixture -async def connection(connection: SAConnection, make_table: Callable): +async def connection( + aiopg_engine: sa.engine.Engine, connection: SAConnection, make_table: Callable +): + assert aiopg_engine + # NOTE: do not remove th pg_engine, or the test will fail as pytest + # cannot set the parameters in the fixture + # EXTENDS await make_table(connection) - yield connection + return connection async def test_check_constraint(connection: SAConnection): @@ -78,6 +82,7 @@ async def test_check_constraint(connection: SAConnection): error = error_info.value assert error.pgcode == "23514" + assert error.pgerror assert "ck_filetype_is_upper" in error.pgerror @@ -109,9 +114,7 @@ async def test_get_supported_filetypes(connection: SAConnection): stmt = ( sa.select( - [ - services_consume_filetypes.c.filetype, - ] + services_consume_filetypes.c.filetype, ) .where( services_consume_filetypes.c.service_key @@ -122,7 +125,8 @@ async def test_get_supported_filetypes(connection: SAConnection): ) result: ResultProxy = await connection.execute(stmt) - rows: list[RowProxy] = await result.fetchall() + rows = await result.fetchall() + assert rows is not None assert [v for row in rows for v in row.values()] == ["DCM", "S4LCACHEDATA"] @@ -131,16 +135,15 @@ async def test_list_supported_filetypes(connection: SAConnection): stmt = ( sa.select( - [ - services_consume_filetypes.c.filetype, - ] + services_consume_filetypes.c.filetype, ) .order_by(services_consume_filetypes.c.filetype) .distinct() ) result: ResultProxy = await connection.execute(stmt) - rows: list[RowProxy] = await result.fetchall() + rows = await result.fetchall() + assert rows is not None assert [v for row in rows for v in row.values()] == list_supported_filetypes() @@ -155,11 +158,9 @@ async def test_contraints(connection: SAConnection): stmt = ( sa.select( - [ - sa.func.count(services_consume_filetypes.c.service_key).label( - "num_services" - ), - ] + sa.func.count(services_consume_filetypes.c.service_key).label( + "num_services" + ), ) .where(services_consume_filetypes.c.filetype == "DCM") .scalar_subquery() diff --git a/packages/postgres-database/tests/test_uniqueness_in_comp_tasks.py b/packages/postgres-database/tests/test_uniqueness_in_comp_tasks.py index 0d62b5f306d..2935a0de45a 100644 --- a/packages/postgres-database/tests/test_uniqueness_in_comp_tasks.py +++ b/packages/postgres-database/tests/test_uniqueness_in_comp_tasks.py @@ -1,14 +1,19 @@ -# pylint:disable=no-value-for-parameter -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments import json +from collections.abc import AsyncIterator +import aiopg.sa.engine +import aiopg.sa.exc import pytest import sqlalchemy as sa +import sqlalchemy.engine from psycopg2.errors import UniqueViolation # pylint: disable=no-name-in-module -from pytest_simcore.helpers.rawdata_fakers import fake_pipeline, fake_task_factory +from pytest_simcore.helpers import postgres_tools +from pytest_simcore.helpers.faker_factories import fake_pipeline, fake_task_factory from simcore_postgres_database.models.base import metadata from simcore_postgres_database.webserver_models import comp_pipeline, comp_tasks @@ -16,14 +21,15 @@ @pytest.fixture -async def engine(make_engine): +async def engine( + sync_engine: sqlalchemy.engine.Engine, + aiopg_engine: aiopg.sa.engine.Engine, +) -> AsyncIterator[aiopg.sa.engine.Engine]: - engine = await make_engine() - sync_engine = make_engine(is_async=False) metadata.drop_all(sync_engine) metadata.create_all(sync_engine) - async with engine.acquire() as conn: + async with aiopg_engine.acquire() as conn: await conn.execute( comp_pipeline.insert().values(**fake_pipeline(project_id="PA")) ) @@ -31,16 +37,13 @@ async def engine(make_engine): comp_pipeline.insert().values(**fake_pipeline(project_id="PB")) ) - yield engine + yield aiopg_engine - engine.close() - await engine.wait_closed() + postgres_tools.force_drop_all_tables(sync_engine) async def test_unique_project_node_pairs(engine): - async with engine.acquire() as conn: - task_id = await conn.scalar( comp_tasks.insert().values(**fake_task(project_id="PA", node_id="N1")) ) @@ -61,7 +64,7 @@ async def test_unique_project_node_pairs(engine): ) task_inputs = await conn.scalar( - sa.select([comp_tasks.c.inputs]).where( + sa.select(comp_tasks.c.inputs).where( sa.and_( comp_tasks.c.project_id == "PB", comp_tasks.c.node_id == "N2", diff --git a/packages/postgres-database/tests/test_users.py b/packages/postgres-database/tests/test_users.py index b8fb8e9c2c2..8bfe2814ada 100644 --- a/packages/postgres-database/tests/test_users.py +++ b/packages/postgres-database/tests/test_users.py @@ -4,132 +4,187 @@ # pylint: disable=unused-variable from datetime import datetime, timedelta -from typing import Optional import pytest import sqlalchemy as sa -from aiopg.sa.engine import Engine +from aiopg.sa.connection import SAConnection from aiopg.sa.result import ResultProxy, RowProxy -from pytest_simcore.helpers.rawdata_fakers import random_user -from simcore_postgres_database.models.users import ( - _USER_ROLE_TO_LEVEL, - FullNameTuple, - UserNameConverter, - UserRole, - UserStatus, - users, +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_user +from simcore_postgres_database.aiopg_errors import ( + InvalidTextRepresentation, + UniqueViolation, +) +from simcore_postgres_database.models.users import UserRole, UserStatus, users +from simcore_postgres_database.utils_users import ( + UsersRepo, + _generate_username_from_email, + generate_alternative_username, ) from sqlalchemy.sql import func -def test_user_role_to_level_map_in_sync(): - # If fails, then update _USER_ROLE_TO_LEVEL map - assert set(_USER_ROLE_TO_LEVEL.keys()) == set(UserRole.__members__.keys()) - - -def test_user_role_comparison(): - - assert UserRole.ANONYMOUS < UserRole.ADMIN - assert UserRole.GUEST < UserRole.ADMIN - assert UserRole.USER < UserRole.ADMIN - assert UserRole.TESTER < UserRole.ADMIN - assert UserRole.ADMIN == UserRole.ADMIN - - assert UserRole.ANONYMOUS < UserRole.TESTER - assert UserRole.GUEST < UserRole.TESTER - assert UserRole.USER < UserRole.TESTER - assert UserRole.TESTER == UserRole.TESTER - assert UserRole.ADMIN > UserRole.TESTER +@pytest.fixture +async def clean_users_db_table(connection: SAConnection): + yield + await connection.execute(users.delete()) - assert UserRole.ANONYMOUS < UserRole.USER - assert UserRole.GUEST < UserRole.USER - assert UserRole.USER == UserRole.USER - assert UserRole.TESTER > UserRole.USER - assert UserRole.ADMIN > UserRole.USER - assert UserRole.ANONYMOUS < UserRole.GUEST - assert UserRole.GUEST == UserRole.GUEST - assert UserRole.USER > UserRole.GUEST - assert UserRole.TESTER > UserRole.GUEST - assert UserRole.ADMIN > UserRole.GUEST +async def test_user_status_as_pending( + connection: SAConnection, faker: Faker, clean_users_db_table: None +): + """Checks a bug where the expression - assert UserRole.ANONYMOUS == UserRole.ANONYMOUS - assert UserRole.GUEST > UserRole.ANONYMOUS - assert UserRole.USER > UserRole.ANONYMOUS - assert UserRole.TESTER > UserRole.ANONYMOUS - assert UserRole.ADMIN > UserRole.ANONYMOUS + `user_status = UserStatus(user["status"])` - # < and > - assert UserRole.TESTER < UserRole.ADMIN - assert UserRole.ADMIN > UserRole.TESTER + raise ValueError because **before** this change `UserStatus.CONFIRMATION_PENDING.value == "PENDING"` + """ + # after changing to UserStatus.CONFIRMATION_PENDING == "CONFIRMATION_PENDING" + with pytest.raises(ValueError): # noqa: PT011 + assert UserStatus("PENDING") == UserStatus.CONFIRMATION_PENDING - # >=, == and <= - assert UserRole.TESTER <= UserRole.ADMIN - assert UserRole.ADMIN >= UserRole.TESTER + assert UserStatus("CONFIRMATION_PENDING") == UserStatus.CONFIRMATION_PENDING + assert UserStatus.CONFIRMATION_PENDING.value == "CONFIRMATION_PENDING" + assert UserStatus.CONFIRMATION_PENDING == "CONFIRMATION_PENDING" + assert str(UserStatus.CONFIRMATION_PENDING) == "UserStatus.CONFIRMATION_PENDING" - assert UserRole.ADMIN <= UserRole.ADMIN - assert UserRole.ADMIN == UserRole.ADMIN - - -async def test_trial_accounts(pg_engine: Engine): - EXPIRATION_INTERVAL = timedelta(minutes=5) - - async with pg_engine.acquire() as conn: - - # creates trial user - client_now = datetime.utcnow() - user_id: Optional[int] = await conn.scalar( - users.insert() - .values( - **random_user( - status=UserStatus.ACTIVE, - # Using some magic from sqlachemy ... - expires_at=func.now() + EXPIRATION_INTERVAL, - ) - ) - .returning(users.c.id) - ) - assert user_id + # tests that the database never stores the word "PENDING" + data = random_user(faker, status="PENDING") + assert data["status"] == "PENDING" + with pytest.raises(InvalidTextRepresentation) as err_info: + await connection.execute(users.insert().values(data)) - # check expiration date - result: ResultProxy = await conn.execute( - sa.select([users.c.status, users.c.created_at, users.c.expires_at]).where( - users.c.id == user_id - ) - ) - row: Optional[RowProxy] = await result.first() - assert row - assert row.created_at - client_now < timedelta( - minutes=1 - ), "Difference between server and client now should not differ much" - assert row.expires_at - row.created_at == EXPIRATION_INTERVAL - assert row.status == UserStatus.ACTIVE - - # sets user as expired - await conn.execute( - users.update() - .values(status=UserStatus.EXPIRED) - .where(users.c.id == user_id) - ) + assert 'invalid input value for enum userstatus: "PENDING"' in f"{err_info.value}" @pytest.mark.parametrize( - "first_name,last_name", + "status_value", [ - ("Erdem", "Ofli"), - ("", "Ofli"), - ("Erdem", ""), - ("Dr. Erdem", "Ofli"), - ("Erdem", "Ofli PhD."), + UserStatus.CONFIRMATION_PENDING, + "CONFIRMATION_PENDING", ], ) -def test_user_name_conversions(first_name: str, last_name: str): - - # as 'update_user_profile' - full_name = FullNameTuple(first_name, last_name) - - # gets name - name = UserNameConverter.get_name(**full_name._asdict()) +async def test_user_status_inserted_as_enum_or_int( + status_value: UserStatus | str, + connection: SAConnection, + faker: Faker, + clean_users_db_table: None, +): + # insert as `status_value` + data = random_user(faker, status=status_value) + assert data["status"] == status_value + user_id = await connection.scalar(users.insert().values(data).returning(users.c.id)) + + # get as UserStatus.CONFIRMATION_PENDING + user = await ( + await connection.execute(users.select().where(users.c.id == user_id)) + ).first() + assert user + + assert UserStatus(user.status) == UserStatus.CONFIRMATION_PENDING + assert user.status == UserStatus.CONFIRMATION_PENDING + + +async def test_unique_username( + connection: SAConnection, faker: Faker, clean_users_db_table: None +): + data = random_user( + faker, + status=UserStatus.ACTIVE, + name="pcrespov", + email="p@email.com", + first_name="Pedro", + last_name="Crespo Valero", + ) + user_id = await connection.scalar(users.insert().values(data).returning(users.c.id)) + user = await ( + await connection.execute(users.select().where(users.c.id == user_id)) + ).first() + assert user + + assert user.id == user_id + assert user.name == "pcrespov" + + # same name fails + data["email"] = faker.email() + with pytest.raises(UniqueViolation): + await connection.scalar(users.insert().values(data).returning(users.c.id)) + + # generate new name + data["name"] = _generate_username_from_email(user.email) + data["email"] = faker.email() + await connection.scalar(users.insert().values(data).returning(users.c.id)) + + # and another one + data["name"] = generate_alternative_username(data["name"]) + data["email"] = faker.email() + await connection.scalar(users.insert().values(data).returning(users.c.id)) + + +async def test_new_user( + connection: SAConnection, faker: Faker, clean_users_db_table: None +): + data = { + "email": faker.email(), + "password_hash": "foo", + "status": UserStatus.ACTIVE, + "expires_at": datetime.utcnow(), + } + new_user = await UsersRepo.new_user(connection, **data) + + assert new_user.email == data["email"] + assert new_user.status == data["status"] + assert new_user.role == UserRole.USER + + other_email = f"{new_user.name}@other-domain.com" + assert _generate_username_from_email(other_email) == new_user.name + other_data = {**data, "email": other_email} + + other_user = await UsersRepo.new_user(connection, **other_data) + assert other_user.email != new_user.email + assert other_user.name != new_user.name + + assert await UsersRepo.get_email(connection, other_user.id) == other_user.email + assert await UsersRepo.get_role(connection, other_user.id) == other_user.role + assert ( + await UsersRepo.get_active_user_email(connection, other_user.id) + == other_user.email + ) + + +async def test_trial_accounts(connection: SAConnection, clean_users_db_table: None): + EXPIRATION_INTERVAL = timedelta(minutes=5) - # back to full_name - assert UserNameConverter.get_full_name(name) == full_name + # creates trial user + client_now = datetime.utcnow() + user_id: int | None = await connection.scalar( + users.insert() + .values( + **random_user( + status=UserStatus.ACTIVE, + # Using some magic from sqlachemy ... + expires_at=func.now() + EXPIRATION_INTERVAL, + ) + ) + .returning(users.c.id) + ) + assert user_id + + # check expiration date + result: ResultProxy = await connection.execute( + sa.select(users.c.status, users.c.created_at, users.c.expires_at).where( + users.c.id == user_id + ) + ) + row: RowProxy | None = await result.first() + assert row + assert row.created_at - client_now < timedelta( + minutes=1 + ), "Difference between server and client now should not differ much" + assert row.expires_at - row.created_at == EXPIRATION_INTERVAL + assert row.status == UserStatus.ACTIVE + + # sets user as expired + await connection.execute( + users.update().values(status=UserStatus.EXPIRED).where(users.c.id == user_id) + ) diff --git a/packages/postgres-database/tests/test_users_details.py b/packages/postgres-database/tests/test_users_details.py new file mode 100644 index 00000000000..e4b6bfeb70f --- /dev/null +++ b/packages/postgres-database/tests/test_users_details.py @@ -0,0 +1,519 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import AsyncIterable, AsyncIterator +from contextlib import AsyncExitStack +from dataclasses import dataclass +from typing import Any, Protocol, Self + +import pytest +import sqlalchemy as sa +from common_library.groups_enums import GroupType +from common_library.users_enums import AccountRequestStatus +from faker import Faker +from pytest_simcore.helpers.faker_factories import ( + random_group, + random_pre_registration_details, + random_product, + random_user, +) +from pytest_simcore.helpers.postgres_tools import ( + insert_and_get_row_lifespan, +) +from simcore_postgres_database.models.groups import groups +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.users import UserRole, UserStatus, users +from simcore_postgres_database.models.users_details import ( + users_pre_registration_details, +) +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from simcore_postgres_database.utils_users import UsersRepo +from sqlalchemy.ext.asyncio import AsyncEngine + + +class CreateProductCallable(Protocol): + """Callable that creates a product and returns its row.""" + + async def __call__(self, name: str) -> dict[str, Any]: ... + + +@pytest.fixture +async def product_factory( + faker: Faker, + asyncpg_engine: AsyncEngine, +) -> AsyncIterator[CreateProductCallable]: + """Fixture that yields a factory function to create products. + + All products created with this factory will be automatically cleaned up when the test ends. + """ + async with AsyncExitStack() as exit_stack: + + async def _create_product(name: str) -> dict[str, Any]: + # 1. create a product group + product_group_row = await exit_stack.enter_async_context( + insert_and_get_row_lifespan( + asyncpg_engine, + table=groups, + values=random_group(fake=faker, type=GroupType.STANDARD.name), + pk_col=groups.c.gid, + ) + ) + + # 2. create the product using that group + product_name = name or faker.pystr(min_chars=3, max_chars=10) + return await exit_stack.enter_async_context( + insert_and_get_row_lifespan( + asyncpg_engine, + table=products, + values=random_product( + fake=faker, + name=product_name, + group_id=int(product_group_row["gid"]), + ), + pk_col=products.c.name, + ) + ) + + yield _create_product + + +@pytest.fixture +async def product(product_factory: CreateProductCallable) -> dict[str, Any]: + """Returns a single product for backward compatibility.""" + return await product_factory("s4l") + + +@pytest.fixture +async def product_owner_user( + faker: Faker, + asyncpg_engine: AsyncEngine, +) -> AsyncIterable[dict[str, Any]]: + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + asyncpg_engine, + table=users, + values=random_user( + faker, + email="po-user@email.com", + name="po-user-fixture", + role=UserRole.PRODUCT_OWNER, + ), + pk_col=users.c.id, + ) as row: + yield row + + +@dataclass +class UserAddress: + """Model for user address information from database records.""" + + line1: str | None + state: str | None + postal_code: str | None + city: str | None + country: str + + @classmethod + def create_from_db(cls, row) -> Self: + parts = ( + getattr(row, col_name) + for col_name in ("institution", "address") + if getattr(row, col_name) + ) + return cls( + line1=". ".join(parts), + state=row.state, + postal_code=row.postal_code, + city=row.city, + country=row.country, + ) + + +@pytest.fixture +async def pre_registered_user( + asyncpg_engine: AsyncEngine, + faker: Faker, + product_owner_user: dict[str, Any], + product: dict[str, Any], +) -> tuple[str, dict[str, Any]]: + """Creates a pre-registered user and returns the email and registration data.""" + product_name = product["name"] + fake_pre_registration_data = random_pre_registration_details( + faker, + pre_email="pre-registered@user.com", + created_by=product_owner_user["id"], + product_name=product_name, + ) + + async with transaction_context(asyncpg_engine) as connection: + pre_email = await connection.scalar( + sa.insert(users_pre_registration_details) + .values(**fake_pre_registration_data) + .returning(users_pre_registration_details.c.pre_email) + ) + + assert pre_email == fake_pre_registration_data["pre_email"] + return pre_email, fake_pre_registration_data + + +async def test_user_requests_account_and_is_approved( + asyncpg_engine: AsyncEngine, + faker: Faker, + product_owner_user: dict[str, Any], + product: dict[str, Any], +): + product_name = product["name"] + + # 1. User request an account + interested_user_email = "interested@user.com" + + async with transaction_context(asyncpg_engine) as connection: + pre_email = await connection.scalar( + sa.insert(users_pre_registration_details) + .values( + **random_pre_registration_details( + faker, + pre_email=interested_user_email, + product_name=product_name, + ) + ) + .returning(users_pre_registration_details.c.pre_email) + ) + assert pre_email is not None + assert pre_email == interested_user_email + + # 2. PO approves the account request + async with transaction_context(asyncpg_engine) as connection: + await connection.execute( + users_pre_registration_details.update() + .where(users_pre_registration_details.c.pre_email == pre_email) + .values( + account_request_status=AccountRequestStatus.APPROVED, + account_request_reviewed_by=product_owner_user["id"], + account_request_reviewed_at=sa.func.now(), + ) + ) + + # 3. Verify approval was recorded + async with pass_or_acquire_connection(asyncpg_engine) as connection: + result = await connection.execute( + sa.select( + users_pre_registration_details.c.account_request_status, + users_pre_registration_details.c.account_request_reviewed_by, + users_pre_registration_details.c.account_request_reviewed_at, + ).where(users_pre_registration_details.c.pre_email == pre_email) + ) + approval_record = result.one() + assert approval_record.account_request_status == AccountRequestStatus.APPROVED + assert approval_record.account_request_reviewed_by == product_owner_user["id"] + assert approval_record.account_request_reviewed_at is not None + + +@pytest.mark.acceptance_test( + "pre-registration link creation in https://github.com/ITISFoundation/osparc-simcore/issues/5138" +) +async def test_create_pre_registration_link( + asyncpg_engine: AsyncEngine, + faker: Faker, + product_owner_user: dict[str, Any], + product: dict[str, Any], +): + """Test that a PO can create a pre-registration link for a user.""" + product_name = product["name"] + + # PO creates a pre-registration and sends an email with the invitation link + fake_pre_registration_data = random_pre_registration_details( + faker, + pre_email="interested@user.com", + created_by=product_owner_user["id"], + product_name=product_name, + ) + + async with transaction_context(asyncpg_engine) as connection: + pre_email = await connection.scalar( + sa.insert(users_pre_registration_details) + .values(**fake_pre_registration_data) + .returning(users_pre_registration_details.c.pre_email) + ) + + assert pre_email is not None + assert pre_email == fake_pre_registration_data["pre_email"] + + +@pytest.mark.acceptance_test( + "pre-registration user creation in https://github.com/ITISFoundation/osparc-simcore/issues/5138" +) +async def test_create_and_link_user_from_pre_registration( + asyncpg_engine: AsyncEngine, + pre_registered_user: tuple[str, dict[str, Any]], +): + """Test that a user can be created from a pre-registration link and is linked properly.""" + pre_email, _ = pre_registered_user + + # Invitation link is clicked and the user is created and linked to the pre-registration + async with transaction_context(asyncpg_engine) as connection: + # user gets created + new_user = await UsersRepo.new_user( + connection, + email=pre_email, + password_hash="123456", # noqa: S106 + status=UserStatus.ACTIVE, + expires_at=None, + ) + await UsersRepo.link_and_update_user_from_pre_registration( + connection, new_user_id=new_user.id, new_user_email=new_user.email + ) + + # Verify the user was created and linked + async with pass_or_acquire_connection(asyncpg_engine) as connection: + result = await connection.execute( + sa.select(users_pre_registration_details.c.user_id).where( + users_pre_registration_details.c.pre_email == pre_email + ) + ) + user_id = result.scalar() + assert user_id == new_user.id + + +@pytest.mark.acceptance_test( + "pre-registration billing info in https://github.com/ITISFoundation/osparc-simcore/issues/5138" +) +async def test_get_billing_details_from_pre_registration( + asyncpg_engine: AsyncEngine, + pre_registered_user: tuple[str, dict[str, Any]], +): + """Test that billing details can be retrieved from pre-registration data.""" + pre_email, fake_pre_registration_data = pre_registered_user + + # Create the user + async with transaction_context(asyncpg_engine) as connection: + new_user = await UsersRepo.new_user( + connection, + email=pre_email, + password_hash="123456", # noqa: S106 + status=UserStatus.ACTIVE, + expires_at=None, + ) + await UsersRepo.link_and_update_user_from_pre_registration( + connection, new_user_id=new_user.id, new_user_email=new_user.email + ) + + # Get billing details + async with pass_or_acquire_connection(asyncpg_engine) as connection: + invoice_data = await UsersRepo.get_billing_details( + connection, user_id=new_user.id + ) + assert invoice_data is not None + + # Test UserAddress model conversion + user_address = UserAddress.create_from_db(invoice_data) + + # Verify address fields match the pre-registration data + assert user_address.line1 + assert user_address.state == fake_pre_registration_data["state"] + assert user_address.postal_code == fake_pre_registration_data["postal_code"] + assert user_address.country == fake_pre_registration_data["country"] + + +@pytest.mark.acceptance_test( + "pre-registration user update in https://github.com/ITISFoundation/osparc-simcore/issues/5138" +) +async def test_update_user_from_pre_registration( + asyncpg_engine: AsyncEngine, + pre_registered_user: tuple[str, dict[str, Any]], +): + """Test that pre-registration details override manual updates when re-linking.""" + pre_email, _ = pre_registered_user + + # Create the user and link to pre-registration + async with transaction_context(asyncpg_engine) as connection: + new_user = await UsersRepo.new_user( + connection, + email=pre_email, + password_hash="123456", # noqa: S106 + status=UserStatus.ACTIVE, + expires_at=None, + ) + await UsersRepo.link_and_update_user_from_pre_registration( + connection, new_user_id=new_user.id, new_user_email=new_user.email + ) + + # Update the user manually + async with transaction_context(asyncpg_engine) as connection: + result = await connection.execute( + users.update() + .values(first_name="My New Name") + .where(users.c.id == new_user.id) + .returning("*") + ) + updated_user = result.one() + + assert updated_user + assert updated_user.first_name == "My New Name" + assert updated_user.id == new_user.id + + # Re-link the user to pre-registration, which should override manual updates + async with transaction_context(asyncpg_engine) as connection: + await UsersRepo.link_and_update_user_from_pre_registration( + connection, new_user_id=new_user.id, new_user_email=new_user.email + ) + + result = await connection.execute( + users.select().where(users.c.id == new_user.id) + ) + current_user = result.one() + assert current_user + + # Verify that the manual updates were overridden + assert current_user.first_name != updated_user.first_name + + +async def test_user_preregisters_for_multiple_products_with_different_outcomes( + asyncpg_engine: AsyncEngine, + faker: Faker, + product_owner_user: dict[str, Any], + product_factory: CreateProductCallable, +): + """Test scenario where a user pre-registers for multiple products and gets different approval outcomes.""" + # Create two products + product1 = await product_factory("s4l") + product2 = await product_factory("tip") + + # User email for pre-registration + user_email = "multi-product-user@example.com" + + # User pre-registers for both products + async with transaction_context(asyncpg_engine) as connection: + # Pre-register for product1 + await connection.execute( + sa.insert(users_pre_registration_details).values( + **random_pre_registration_details( + faker, + pre_email=user_email, + product_name=product1["name"], + ) + ) + ) + + # Pre-register for product2 + await connection.execute( + sa.insert(users_pre_registration_details).values( + **random_pre_registration_details( + faker, + pre_email=user_email, + product_name=product2["name"], + ) + ) + ) + + # Verify both pre-registrations were created + async with pass_or_acquire_connection(asyncpg_engine) as connection: + result = await connection.execute( + sa.select( + users_pre_registration_details.c.pre_email, + users_pre_registration_details.c.product_name, + users_pre_registration_details.c.account_request_status, + ) + .where(users_pre_registration_details.c.pre_email == user_email) + .order_by(users_pre_registration_details.c.product_name) + ) + + registrations = result.fetchall() + assert len(registrations) == 2 + assert all( + reg.account_request_status == AccountRequestStatus.PENDING + for reg in registrations + ) + + # 2. PO approves and rejects the requests + async with transaction_context(asyncpg_engine) as connection: + # PO approves the request for product1 + await connection.execute( + users_pre_registration_details.update() + .where( + (users_pre_registration_details.c.pre_email == user_email) + & (users_pre_registration_details.c.product_name == product1["name"]) + ) + .values( + account_request_status=AccountRequestStatus.APPROVED, + account_request_reviewed_by=product_owner_user["id"], + account_request_reviewed_at=sa.func.now(), + ) + ) + + # PO rejects the request for product2 + await connection.execute( + users_pre_registration_details.update() + .where( + (users_pre_registration_details.c.pre_email == user_email) + & (users_pre_registration_details.c.product_name == product2["name"]) + ) + .values( + account_request_status=AccountRequestStatus.REJECTED, + account_request_reviewed_by=product_owner_user["id"], + account_request_reviewed_at=sa.func.now(), + ) + ) + + # Verify the status updates + async with pass_or_acquire_connection(asyncpg_engine) as connection: + result = await connection.execute( + sa.select( + users_pre_registration_details.c.product_name, + users_pre_registration_details.c.account_request_status, + users_pre_registration_details.c.account_request_reviewed_by, + users_pre_registration_details.c.account_request_reviewed_at, + ) + .where(users_pre_registration_details.c.pre_email == user_email) + .order_by(users_pre_registration_details.c.created) + ) + + registrations = result.fetchall() + assert len(registrations) == 2 + + # Check product1 was approved + assert registrations[0].product_name == product1["name"] + assert registrations[0].account_request_status == AccountRequestStatus.APPROVED + assert registrations[0].account_request_reviewed_by == product_owner_user["id"] + assert registrations[0].account_request_reviewed_at is not None + + # Check product2 was rejected + assert registrations[1].product_name == product2["name"] + assert registrations[1].account_request_status == AccountRequestStatus.REJECTED + assert registrations[1].account_request_reviewed_by == product_owner_user["id"] + assert registrations[1].account_request_reviewed_at is not None + + # 3.Now create a user account with the approved pre-registration + async with transaction_context(asyncpg_engine) as connection: + new_user = await UsersRepo.new_user( + connection, + email=user_email, + password_hash="123456", # noqa: S106 + status=UserStatus.ACTIVE, + expires_at=None, + ) + await UsersRepo.link_and_update_user_from_pre_registration( + connection, new_user_id=new_user.id, new_user_email=new_user.email + ) + + # Verify both pre-registrations are linked to the new user + async with pass_or_acquire_connection(asyncpg_engine) as connection: + result = await connection.execute( + sa.select( + users_pre_registration_details.c.product_name, + users_pre_registration_details.c.account_request_status, + users_pre_registration_details.c.user_id, + ) + .where(users_pre_registration_details.c.pre_email == user_email) + .order_by(users_pre_registration_details.c.product_name) + ) + + registrations = result.fetchall() + assert len(registrations) == 2 + + # Both registrations should be linked to the same user, regardless of approval status + assert all(reg.user_id == new_user.id for reg in registrations) diff --git a/packages/postgres-database/tests/test_utils.py b/packages/postgres-database/tests/test_utils.py index 2d0ce6cc014..910ab84149b 100644 --- a/packages/postgres-database/tests/test_utils.py +++ b/packages/postgres-database/tests/test_utils.py @@ -3,7 +3,13 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from simcore_postgres_database.utils import hide_dict_pass, hide_url_pass +import sqlalchemy as sa +from simcore_postgres_database.models.users import users +from simcore_postgres_database.utils import ( + as_postgres_sql_query_str, + hide_dict_pass, + hide_url_pass, +) from yarl import URL @@ -21,3 +27,13 @@ def test_hide_dict_pass(): "pass": "********", "password": "********", } + + +def test_as_postgres_sql_query_str(): + + assert ( + as_postgres_sql_query_str( + sa.select(users.c.name).where(users.c.id == 1) + ).replace("\n", "") + == "SELECT users.name FROM users WHERE users.id = 1" + ) diff --git a/packages/postgres-database/tests/test_utils_aiopg_orm.py b/packages/postgres-database/tests/test_utils_aiopg_orm.py index 456c23fb54c..2905a3f3a87 100644 --- a/packages/postgres-database/tests/test_utils_aiopg_orm.py +++ b/packages/postgres-database/tests/test_utils_aiopg_orm.py @@ -4,8 +4,8 @@ # pylint: disable=unused-variable +from collections.abc import Iterator from datetime import datetime -from typing import Iterator import pytest from aiopg.sa.connection import SAConnection @@ -16,12 +16,12 @@ @pytest.fixture -async def fake_scicrunch_ids(pg_engine: Engine) -> list[str]: - row1 = dict(rrid="RRID:foo", name="foo", description="fooing") - row2 = dict(rrid="RRID:bar", name="bar", description="barring") +async def fake_scicrunch_ids(aiopg_engine: Engine) -> list[str]: + row1 = {"rrid": "RRID:foo", "name": "foo", "description": "fooing"} + row2 = {"rrid": "RRID:bar", "name": "bar", "description": "barring"} row_ids = [] - async with pg_engine.acquire() as conn: + async with aiopg_engine.acquire() as conn: for row in (row1, row2): row_id = await conn.scalar( scicrunch_resources.insert() @@ -35,7 +35,7 @@ async def fake_scicrunch_ids(pg_engine: Engine) -> list[str]: @pytest.fixture() -async def scicrunch_orm(pg_engine: Engine) -> Iterator[BaseOrm[str]]: +async def scicrunch_orm(aiopg_engine: Engine) -> Iterator[BaseOrm[str]]: # This is a table without dependencies and therefore easy to use as fixture class ScicrunchOrm(BaseOrm[str]): def __init__(self, connection: SAConnection): @@ -46,7 +46,7 @@ def __init__(self, connection: SAConnection): writeonce={"rrid"}, ) - async with pg_engine.acquire() as conn: + async with aiopg_engine.acquire() as conn: orm_obj = ScicrunchOrm(conn) yield orm_obj @@ -299,3 +299,4 @@ async def test_rowproxy(scicrunch_orm: BaseOrm[str], fake_scicrunch_ids: list[st # to list[dict]: warning ... sometimes rows are None when in first() or fetchone()... list_of_dicts = [dict(row.items()) for row in rows if row] + assert list_of_dicts diff --git a/packages/postgres-database/tests/test_utils_groups_extra_properties.py b/packages/postgres-database/tests/test_utils_groups_extra_properties.py new file mode 100644 index 00000000000..05de0f67809 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_groups_extra_properties.py @@ -0,0 +1,367 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import random +from collections.abc import AsyncIterator, Awaitable, Callable + +import aiopg.sa +import pytest +import sqlalchemy +from aiopg.sa.result import RowProxy +from faker import Faker +from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups +from simcore_postgres_database.models.groups_extra_properties import ( + groups_extra_properties, +) +from simcore_postgres_database.utils_groups_extra_properties import ( + GroupExtraProperties, + GroupExtraPropertiesNotFoundError, + GroupExtraPropertiesRepo, +) +from sqlalchemy import literal_column +from sqlalchemy.engine.row import Row +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + + +async def test_get_raises_if_not_found( + asyncpg_engine: AsyncEngine, + faker: Faker, +): + with pytest.raises(GroupExtraPropertiesNotFoundError): + await GroupExtraPropertiesRepo.get_v2( + asyncpg_engine, + gid=faker.pyint(min_value=1), + product_name=faker.pystr(), + ) + + +@pytest.fixture +async def registered_user( + connection: aiopg.sa.connection.SAConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], +): + return await create_fake_user(connection) + + +@pytest.fixture +def product_name(faker: Faker) -> str: + return faker.pystr() + + +@pytest.fixture +async def create_fake_group_extra_properties( + connection: aiopg.sa.connection.SAConnection, +) -> AsyncIterator[Callable[..., Awaitable[GroupExtraProperties]]]: + created_properties = [] + + async def _creator( + gid: int, product_name: str, **group_extra_properties_kwars + ) -> GroupExtraProperties: + result = await connection.execute( + sqlalchemy.insert(groups_extra_properties) + .values( + group_id=gid, product_name=product_name, **group_extra_properties_kwars + ) + .returning(literal_column("*")) + ) + assert result + row = await result.first() + assert row + properties = GroupExtraProperties.from_row(row) + created_properties.append((properties.group_id, properties.product_name)) + return properties + + yield _creator + + for group_id, product_name in created_properties: + await connection.execute( + sqlalchemy.delete(groups_extra_properties).where( + (groups_extra_properties.c.group_id == group_id) + & (groups_extra_properties.c.product_name == product_name) + ) + ) + + +async def test_get_v2( + asyncpg_engine: AsyncEngine, + registered_user: RowProxy, + product_name: str, + create_fake_product: Callable[[str], Awaitable[Row]], + create_fake_group_extra_properties: Callable[..., Awaitable[GroupExtraProperties]], +): + with pytest.raises(GroupExtraPropertiesNotFoundError): + await GroupExtraPropertiesRepo.get_v2( + asyncpg_engine, gid=registered_user.primary_gid, product_name=product_name + ) + + await create_fake_product(product_name) + created_extra_properties = await create_fake_group_extra_properties( + registered_user.primary_gid, product_name + ) + received_extra_properties = await GroupExtraPropertiesRepo.get_v2( + asyncpg_engine, gid=registered_user.primary_gid, product_name=product_name + ) + assert created_extra_properties == received_extra_properties + + +@pytest.fixture +async def everyone_group_id(connection: aiopg.sa.connection.SAConnection) -> int: + result = await connection.scalar( + sqlalchemy.select(groups.c.gid).where(groups.c.type == GroupType.EVERYONE) + ) + assert result + return result + + +async def test_get_aggregated_properties_for_user_with_no_entries_raises( + connection_factory: aiopg.sa.connection.SAConnection | AsyncConnection, + product_name: str, + registered_user: RowProxy, +): + with pytest.raises(GroupExtraPropertiesNotFoundError): + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + + +async def _add_user_to_group( + connection: aiopg.sa.connection.SAConnection | AsyncConnection, + *, + user_id: int, + group_id: int, +) -> None: + await connection.execute( + sqlalchemy.insert(user_to_groups).values(uid=user_id, gid=group_id) + ) + + +async def test_get_aggregated_properties_for_user_returns_properties_in_expected_priority( + connection: aiopg.sa.connection.SAConnection, + connection_factory: aiopg.sa.connection.SAConnection | AsyncConnection, + product_name: str, + registered_user: RowProxy, + create_fake_product: Callable[[str], Awaitable[Row]], + create_fake_group: Callable[..., Awaitable[RowProxy]], + create_fake_group_extra_properties: Callable[..., Awaitable[GroupExtraProperties]], + everyone_group_id: int, +): + await create_fake_product(product_name) + await create_fake_product(f"{product_name}_additional_just_for_fun") + + # let's create a few groups + created_groups = [await create_fake_group(connection) for _ in range(5)] + + # create a specific extra properties for group everyone + everyone_group_extra_properties = await create_fake_group_extra_properties( + everyone_group_id, product_name + ) + + # this should return the everyone group properties + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == everyone_group_extra_properties + + # let's add the user in these groups + for group in created_groups: + await _add_user_to_group( + connection_factory, user_id=registered_user.id, group_id=group.gid + ) + + # this changes nothing + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == everyone_group_extra_properties + + # now create some extra properties + standard_group_extra_properties = [ + await create_fake_group_extra_properties(group.gid, product_name) + for group in created_groups + ] + + # this returns the last properties created + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties != everyone_group_extra_properties + assert aggregated_group_properties == standard_group_extra_properties[0] + + # now create some personal extra properties + personal_group_extra_properties = await create_fake_group_extra_properties( + registered_user.primary_gid, product_name + ) + # this now returns the primary properties + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == personal_group_extra_properties + + +async def test_get_aggregated_properties_for_user_returns_properties_in_expected_priority_without_everyone_group( + connection: aiopg.sa.connection.SAConnection, + connection_factory: aiopg.sa.connection.SAConnection | AsyncConnection, + product_name: str, + registered_user: RowProxy, + create_fake_product: Callable[[str], Awaitable[Row]], + create_fake_group: Callable[..., Awaitable[RowProxy]], + create_fake_group_extra_properties: Callable[..., Awaitable[GroupExtraProperties]], + everyone_group_id: int, +): + await create_fake_product(product_name) + await create_fake_product(f"{product_name}_additional_just_for_fun") + + # let's create a few groups + created_groups = [await create_fake_group(connection) for _ in range(5)] + # let's add the user in these groups + for group in created_groups: + await _add_user_to_group( + connection_factory, user_id=registered_user.id, group_id=group.gid + ) + + # now create some extra properties + standard_group_extra_properties = [ + await create_fake_group_extra_properties(group.gid, product_name) + for group in created_groups + ] + + # this returns the last properties created + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == standard_group_extra_properties[0] + + # now create some personal extra properties + personal_group_extra_properties = await create_fake_group_extra_properties( + registered_user.primary_gid, product_name + ) + # this now returns the primary properties + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == personal_group_extra_properties + + +async def test_get_aggregated_properties_for_user_returns_property_values_as_truthy_if_one_of_them_is( + connection: aiopg.sa.connection.SAConnection, + connection_factory: aiopg.sa.connection.SAConnection | AsyncConnection, + product_name: str, + registered_user: RowProxy, + create_fake_product: Callable[[str], Awaitable[Row]], + create_fake_group: Callable[..., Awaitable[RowProxy]], + create_fake_group_extra_properties: Callable[..., Awaitable[GroupExtraProperties]], + everyone_group_id: int, +): + await create_fake_product(product_name) + await create_fake_product(f"{product_name}_additional_just_for_fun") + + # create a specific extra properties for group that disallow everything + everyone_group_extra_properties = await create_fake_group_extra_properties( + everyone_group_id, + product_name, + internet_access=False, + override_services_specifications=False, + use_on_demand_clusters=False, + ) + # this should return the everyone group properties + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties == everyone_group_extra_properties + + # now we create some standard groups and add the user to them and make everything false for now + standard_groups = [await create_fake_group(connection) for _ in range(5)] + for group in standard_groups: + await create_fake_group_extra_properties( + group.gid, + product_name, + internet_access=False, + override_services_specifications=False, + use_on_demand_clusters=False, + ) + await _add_user_to_group( + connection_factory, user_id=registered_user.id, group_id=group.gid + ) + + # now we still should not have any of these value Truthy + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties.internet_access is False + assert aggregated_group_properties.override_services_specifications is False + assert aggregated_group_properties.use_on_demand_clusters is False + + # let's change one of these standard groups + random_standard_group = random.choice(standard_groups) # noqa: S311 + result = await connection.execute( + groups_extra_properties.update() + .where(groups_extra_properties.c.group_id == random_standard_group.gid) + .values(internet_access=True) + ) + assert result.rowcount == 1 + + # now we should have internet access + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties.internet_access is True + assert aggregated_group_properties.override_services_specifications is False + assert aggregated_group_properties.use_on_demand_clusters is False + + # let's change another one of these standard groups + random_standard_group = random.choice(standard_groups) # noqa: S311 + result = await connection.execute( + groups_extra_properties.update() + .where(groups_extra_properties.c.group_id == random_standard_group.gid) + .values(override_services_specifications=True) + ) + assert result.rowcount == 1 + + # now we should have internet access and service override + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties.internet_access is True + assert aggregated_group_properties.override_services_specifications is True + assert aggregated_group_properties.use_on_demand_clusters is False + + # and we can deny it again by setting a primary extra property + # now create some personal extra properties + personal_group_extra_properties = await create_fake_group_extra_properties( + registered_user.primary_gid, + product_name, + internet_access=False, + use_on_demand_clusters=True, + ) + assert personal_group_extra_properties + + aggregated_group_properties = ( + await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + connection_factory, user_id=registered_user.id, product_name=product_name + ) + ) + assert aggregated_group_properties.internet_access is False + assert aggregated_group_properties.override_services_specifications is False + assert aggregated_group_properties.use_on_demand_clusters is True diff --git a/packages/postgres-database/tests/test_utils_migration.py b/packages/postgres-database/tests/test_utils_migration.py index ec066f445af..d75006badd6 100644 --- a/packages/postgres-database/tests/test_utils_migration.py +++ b/packages/postgres-database/tests/test_utils_migration.py @@ -3,9 +3,13 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable + import pytest +import simcore_postgres_database.cli +import sqlalchemy.engine from alembic.script.revision import MultipleHeads from simcore_postgres_database.utils_migration import get_current_head +from sqlalchemy import inspect def test_migration_has_no_branches(): @@ -17,3 +21,30 @@ def test_migration_has_no_branches(): pytest.fail( f"This project migration expected a single head (i.e. no branches): {err}" ) + + +def test_migration_upgrade_downgrade(sync_engine: sqlalchemy.engine.Engine): + + assert sync_engine + assert simcore_postgres_database.cli.discover.callback + assert simcore_postgres_database.cli.upgrade.callback + dsn = sync_engine.url + # upgrade... + simcore_postgres_database.cli.discover.callback( + user=dsn.username, + password=dsn.password, + host=dsn.host, + database=dsn.database, + port=dsn.port, + ) + simcore_postgres_database.cli.upgrade.callback("head") + # downgrade... + assert simcore_postgres_database.cli.downgrade.callback + assert simcore_postgres_database.cli.clean.callback + simcore_postgres_database.cli.downgrade.callback("base") + simcore_postgres_database.cli.clean.callback() # just cleans discover cache + inspector = inspect(sync_engine) + + assert inspector.get_table_names() == [ + "alembic_version" + ], "Only the alembic table should remain, please check!!!" diff --git a/packages/postgres-database/tests/test_utils_payments_autorecharge.py b/packages/postgres-database/tests/test_utils_payments_autorecharge.py new file mode 100644 index 00000000000..1746b8720cc --- /dev/null +++ b/packages/postgres-database/tests/test_utils_payments_autorecharge.py @@ -0,0 +1,137 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import datetime +from typing import TypeAlias + +import pytest +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_payment_method, utcnow +from simcore_postgres_database.models.payments_methods import ( + InitPromptAckFlowState, + payments_methods, +) +from simcore_postgres_database.utils_payments_autorecharge import AutoRechargeStmts + +# +# HELPERS +# + + +async def _get_auto_recharge(connection, wallet_id) -> RowProxy | None: + # has recharge trigger? + stmt = AutoRechargeStmts.get_wallet_autorecharge(wallet_id) + result = await connection.execute(stmt) + return await result.first() + + +async def _is_valid_payment_method( + connection, user_id, wallet_id, payment_method_id +) -> bool: + + stmt = AutoRechargeStmts.is_valid_payment_method( + user_id, wallet_id, payment_method_id + ) + primary_payment_method_id = await connection.scalar(stmt) + return primary_payment_method_id == payment_method_id + + +async def _upsert_autorecharge( + connection, + wallet_id, + enabled, + primary_payment_method_id, + top_up_amount_in_usd, + monthly_limit_in_usd, +) -> RowProxy: + # using this primary payment-method, create an autorecharge + # NOTE: requires the entire + stmt = AutoRechargeStmts.upsert_wallet_autorecharge( + wallet_id=wallet_id, + enabled=enabled, + primary_payment_method_id=primary_payment_method_id, + top_up_amount_in_usd=top_up_amount_in_usd, + monthly_limit_in_usd=monthly_limit_in_usd, + ) + row = await (await connection.execute(stmt)).first() + assert row + return row + + +async def _update_autorecharge(connection, wallet_id, **settings) -> int | None: + stmt = AutoRechargeStmts.update_wallet_autorecharge(wallet_id, **settings) + return await connection.scalar(stmt) + + +PaymentMethodRow: TypeAlias = RowProxy + + +@pytest.fixture +async def payment_method(connection: SAConnection, faker: Faker) -> PaymentMethodRow: + payment_method_id = faker.uuid4().upper() + + raw_payment_method = random_payment_method( + payment_method_id=payment_method_id, + initiated_at=utcnow(), + completed_at=utcnow() + datetime.timedelta(seconds=1), + state=InitPromptAckFlowState.SUCCESS, + ) + result = await connection.execute( + payments_methods.insert() + .values(**raw_payment_method) + .returning(sa.literal_column("*")) + ) + row = await result.first() + assert row + assert row.payment_method_id == payment_method_id + wallet_id = row.wallet_id + user_id = row.user_id + + assert await _is_valid_payment_method( + connection, user_id, wallet_id, payment_method_id + ) + return row + + +async def test_payments_automation_workflow( + connection: SAConnection, payment_method: PaymentMethodRow +): + payment_method_id = payment_method.payment_method_id + wallet_id = payment_method.wallet_id + + # get + auto_recharge = await _get_auto_recharge(connection, wallet_id) + assert auto_recharge is None + + # new + await _upsert_autorecharge( + connection, + wallet_id, + enabled=True, + primary_payment_method_id=payment_method_id, + top_up_amount_in_usd=100, + monthly_limit_in_usd=None, + ) + + auto_recharge = await _get_auto_recharge(connection, wallet_id) + assert auto_recharge is not None + assert auto_recharge.primary_payment_method_id == payment_method_id + assert auto_recharge.enabled is True + + # upsert: deactivate countdown + auto_recharge = await _upsert_autorecharge( + connection, + wallet_id, + enabled=True, + primary_payment_method_id=payment_method_id, + top_up_amount_in_usd=100, + monthly_limit_in_usd=10000, # <---- + ) + assert auto_recharge.monthly_limit_in_usd == 10000 + + await _update_autorecharge(connection, wallet_id, monthly_limit_in_usd=None) diff --git a/packages/postgres-database/tests/test_utils_projects.py b/packages/postgres-database/tests/test_utils_projects.py new file mode 100644 index 00000000000..be4cde5f180 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_projects.py @@ -0,0 +1,90 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +import uuid +from collections.abc import AsyncIterator, Awaitable, Callable +from datetime import UTC, datetime +from typing import Any + +import pytest +import sqlalchemy as sa +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from faker import Faker +from pydantic import TypeAdapter +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.utils_projects import ( + DBProjectNotFoundError, + ProjectsRepo, +) +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.ext.asyncio import AsyncEngine + + +async def _delete_project(connection: SAConnection, project_uuid: uuid.UUID) -> None: + result = await connection.execute( + sa.delete(projects).where(projects.c.uuid == f"{project_uuid}") + ) + assert result.rowcount == 1 + + +@pytest.fixture +async def registered_user( + connection: SAConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], +) -> RowProxy: + user = await create_fake_user(connection) + assert user + return user + + +@pytest.fixture +async def registered_project( + connection: SAConnection, + registered_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], +) -> AsyncIterator[dict[str, Any]]: + project = await create_fake_project(connection, registered_user) + assert project + + yield dict(project) + + await _delete_project(connection, project["uuid"]) + + +@pytest.mark.parametrize("expected", (datetime.now(tz=UTC), None)) +async def test_get_project_trashed_column_can_be_converted_to_datetime( + asyncpg_engine: AsyncEngine, registered_project: dict, expected: datetime | None +): + project_id = registered_project["uuid"] + + async with transaction_context(asyncpg_engine) as conn: + result = await conn.execute( + projects.update() + .values(trashed=expected) + .where(projects.c.uuid == project_id) + .returning(sa.literal_column("*")) + ) + + row = result.fetchone() + + assert row + trashed = TypeAdapter(datetime | None).validate_python(row.trashed) + assert trashed == expected + + +async def test_get_project_last_change_date( + asyncpg_engine: AsyncEngine, registered_project: dict, faker: Faker +): + projects_repo = ProjectsRepo(asyncpg_engine) + + project_last_change_date = await projects_repo.get_project_last_change_date( + project_uuid=registered_project["uuid"] + ) + assert isinstance(project_last_change_date, datetime) + + with pytest.raises(DBProjectNotFoundError): + await projects_repo.get_project_last_change_date( + project_uuid=faker.uuid4() # <-- Non existing uuid in DB + ) diff --git a/packages/postgres-database/tests/test_utils_projects_metadata.py b/packages/postgres-database/tests/test_utils_projects_metadata.py new file mode 100644 index 00000000000..117530287dc --- /dev/null +++ b/packages/postgres-database/tests/test_utils_projects_metadata.py @@ -0,0 +1,393 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import uuid +from collections.abc import Awaitable, Callable +from uuid import UUID + +import pytest +from aiopg.sa.connection import SAConnection +from aiopg.sa.result import RowProxy +from faker import Faker +from simcore_postgres_database import utils_projects_metadata +from simcore_postgres_database.utils_projects_metadata import ( + DBProjectInvalidAncestorsError, + DBProjectInvalidParentNodeError, + DBProjectInvalidParentProjectError, + DBProjectNotFoundError, +) +from simcore_postgres_database.utils_projects_nodes import ProjectNode +from sqlalchemy.ext.asyncio import AsyncConnection + + +@pytest.fixture +async def fake_user( + connection: SAConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], +) -> RowProxy: + user: RowProxy = await create_fake_user(connection, name=f"user.{__name__}") + return user + + +@pytest.fixture +async def fake_project( + connection: SAConnection, + fake_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_nodes: Callable[..., Awaitable[RowProxy]], +) -> RowProxy: + project: RowProxy = await create_fake_project(connection, fake_user, hidden=True) + await create_fake_nodes(project) + return project + + +@pytest.mark.acceptance_test( + "For https://github.com/ITISFoundation/osparc-simcore/issues/4313" +) +async def test_set_project_custom_metadata( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], + create_fake_project: Callable[..., Awaitable[RowProxy]], + faker: Faker, +): + user: RowProxy = await create_fake_user(connection) + project: RowProxy = await create_fake_project(connection, user, hidden=True) + + # subresource is attached to parent + user_metadata = {"float": 3.14, "int": 42, "string": "foo", "bool": True} + random_project_uuid = faker.uuid4(cast_to=None) + assert isinstance(random_project_uuid, UUID) + with pytest.raises(DBProjectNotFoundError): + await utils_projects_metadata.get( + connection_factory, project_uuid=random_project_uuid + ) + + with pytest.raises(DBProjectNotFoundError): + await utils_projects_metadata.set_project_custom_metadata( + connection_factory, + project_uuid=random_project_uuid, + custom_metadata=user_metadata, + ) + + project_metadata = await utils_projects_metadata.get( + connection_factory, project_uuid=project["uuid"] + ) + assert project_metadata is not None + assert project_metadata.custom is None + assert project_metadata.parent_project_uuid is None + assert project_metadata.parent_node_id is None + assert project_metadata.root_parent_project_uuid is None + assert project_metadata.root_parent_node_id is None + + got = await utils_projects_metadata.set_project_custom_metadata( + connection_factory, + project_uuid=project["uuid"], + custom_metadata=user_metadata, + ) + assert got.custom + assert got.parent_project_uuid is None + assert got.parent_node_id is None + assert got.root_parent_project_uuid is None + assert got.root_parent_node_id is None + assert user_metadata == got.custom + + project_metadata = await utils_projects_metadata.get( + connection_factory, project_uuid=project["uuid"] + ) + assert project_metadata is not None + assert project_metadata == got + + got_after_update = await utils_projects_metadata.set_project_custom_metadata( + connection_factory, + project_uuid=project["uuid"], + custom_metadata={}, + ) + assert got_after_update.custom == {} + assert got.modified + assert got_after_update.modified + assert got.modified < got_after_update.modified + + +async def test_set_project_ancestors_with_invalid_parents( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[[uuid.UUID], Awaitable[ProjectNode]], + faker: Faker, +): + user: RowProxy = await create_fake_user(connection) + project: RowProxy = await create_fake_project(connection, user, hidden=True) + project_node = await create_fake_projects_node(project["uuid"]) + + # this is empty + project_metadata = await utils_projects_metadata.get( + connection_factory, project_uuid=project["uuid"] + ) + assert project_metadata is not None + assert project_metadata.custom is None + assert project_metadata.parent_project_uuid is None + assert project_metadata.parent_node_id is None + assert project_metadata.root_parent_project_uuid is None + assert project_metadata.root_parent_node_id is None + + random_project_uuid = faker.uuid4(cast_to=None) + assert isinstance(random_project_uuid, UUID) + random_node_id = faker.uuid4(cast_to=None) + assert isinstance(random_node_id, UUID) + + # invalid project + with pytest.raises(DBProjectNotFoundError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=random_project_uuid, + parent_project_uuid=None, + parent_node_id=None, + ) + + # test invalid combinations + with pytest.raises(DBProjectInvalidAncestorsError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=random_project_uuid, + parent_node_id=None, + ) + with pytest.raises(DBProjectInvalidAncestorsError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=None, + parent_node_id=random_node_id, + ) + + # valid combination with invalid project/node + with pytest.raises(DBProjectInvalidParentProjectError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=random_project_uuid, + parent_node_id=random_node_id, + ) + + # these would make it a parent of itself which is forbiden + with pytest.raises(DBProjectInvalidAncestorsError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=project["uuid"], + parent_node_id=random_node_id, + ) + + with pytest.raises(DBProjectInvalidAncestorsError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=project["uuid"], + parent_node_id=project_node.node_id, + ) + + # + another_project = await create_fake_project(connection, user, hidden=False) + another_project_node = await create_fake_projects_node(another_project["uuid"]) + with pytest.raises(DBProjectInvalidParentNodeError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=another_project["uuid"], + parent_project_uuid=project["uuid"], + parent_node_id=random_node_id, + ) + + with pytest.raises(DBProjectInvalidParentProjectError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=another_project["uuid"], + parent_project_uuid=random_project_uuid, + parent_node_id=project_node.node_id, + ) + + # mix a node from one project and a parent project + yet_another_project = await create_fake_project(connection, user, hidden=False) + with pytest.raises(DBProjectInvalidParentNodeError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=yet_another_project["uuid"], + parent_project_uuid=project["uuid"], + parent_node_id=another_project_node.node_id, + ) + + with pytest.raises(DBProjectInvalidParentNodeError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=yet_another_project["uuid"], + parent_project_uuid=another_project["uuid"], + parent_node_id=project_node.node_id, + ) + + +async def test_set_project_ancestors( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[[uuid.UUID], Awaitable[ProjectNode]], +): + user: RowProxy = await create_fake_user(connection) + + # create grand-parent + grand_parent_project = await create_fake_project(connection, user, hidden=False) + grand_parent_node = await create_fake_projects_node(grand_parent_project["uuid"]) + + # create parent + parent_project = await create_fake_project(connection, user, hidden=False) + parent_node = await create_fake_projects_node(parent_project["uuid"]) + + # create child + child_project: RowProxy = await create_fake_project(connection, user, hidden=True) + + # set ancestry, first the parents + updated_parent_metadata = await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=parent_project["uuid"], + parent_project_uuid=grand_parent_project["uuid"], + parent_node_id=grand_parent_node.node_id, + ) + assert updated_parent_metadata.parent_project_uuid == uuid.UUID( + grand_parent_project["uuid"] + ) + assert updated_parent_metadata.parent_node_id == grand_parent_node.node_id + assert updated_parent_metadata.root_parent_project_uuid == uuid.UUID( + grand_parent_project["uuid"] + ) + assert updated_parent_metadata.root_parent_node_id == grand_parent_node.node_id + + # then the child + updated_child_metadata = await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=child_project["uuid"], + parent_project_uuid=parent_project["uuid"], + parent_node_id=parent_node.node_id, + ) + assert updated_child_metadata.parent_project_uuid == uuid.UUID( + parent_project["uuid"] + ) + assert updated_child_metadata.parent_node_id == parent_node.node_id + assert updated_child_metadata.root_parent_project_uuid == uuid.UUID( + grand_parent_project["uuid"] + ) + assert updated_child_metadata.root_parent_node_id == grand_parent_node.node_id + + # check properly updated + returned_project_metadata = await utils_projects_metadata.get( + connection_factory, project_uuid=child_project["uuid"] + ) + assert returned_project_metadata == updated_child_metadata + + # remove the child + updated_child_metadata = await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=child_project["uuid"], + parent_project_uuid=None, + parent_node_id=None, + ) + assert updated_child_metadata.parent_project_uuid is None + assert updated_child_metadata.parent_node_id is None + assert updated_child_metadata.root_parent_project_uuid is None + assert updated_child_metadata.root_parent_node_id is None + + +async def _create_child_project( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[[uuid.UUID], Awaitable[ProjectNode]], + parent_project: RowProxy | None, + parent_node: ProjectNode | None, +) -> tuple[RowProxy, ProjectNode]: + project = await create_fake_project(connection, user, hidden=False) + node = await create_fake_projects_node(project["uuid"]) + if parent_project and parent_node: + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=project["uuid"], + parent_project_uuid=parent_project["uuid"], + parent_node_id=parent_node.node_id, + ) + return project, node + + +@pytest.fixture +async def create_projects_genealogy( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[[uuid.UUID], Awaitable[ProjectNode]], +) -> Callable[[RowProxy], Awaitable[list[tuple[RowProxy, ProjectNode]]]]: + async def _(user: RowProxy) -> list[tuple[RowProxy, ProjectNode]]: + ancestors: list[tuple[RowProxy, ProjectNode]] = [] + + ancestor_project = await create_fake_project(connection, user, hidden=False) + ancestor_node = await create_fake_projects_node(ancestor_project["uuid"]) + ancestors.append((ancestor_project, ancestor_node)) + + for _ in range(13): + child_project, child_node = await _create_child_project( + connection, + connection_factory, + user, + create_fake_project, + create_fake_projects_node, + ancestor_project, + ancestor_node, + ) + ancestor_project = child_project + ancestor_node = child_node + ancestors.append((child_project, child_node)) + + return ancestors + + return _ + + +async def test_not_implemented_use_cases( + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[[uuid.UUID], Awaitable[ProjectNode]], + create_projects_genealogy: Callable[ + [RowProxy], Awaitable[list[tuple[RowProxy, ProjectNode]]] + ], +): + """This will tests use-cases that are currently not implemented and that are expected to fail with an exception + Basically any project with children cannot have a change in its genealogy anymore. yes children are sacred. + If you still want to change them you need to go first via the children. + """ + user = await create_fake_user(connection) + # add a missing parent to an already existing chain of parent-children + ancestors = await create_projects_genealogy(user) + missing_parent_project = await create_fake_project(connection, user) + missing_parent_node = await create_fake_projects_node( + missing_parent_project["uuid"] + ) + + with pytest.raises(NotImplementedError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=ancestors[0][0]["uuid"], + parent_project_uuid=missing_parent_project["uuid"], + parent_node_id=missing_parent_node.node_id, + ) + + # modifying a parent-child relationship in the middle of the genealogy is also not implemented + with pytest.raises(NotImplementedError): + await utils_projects_metadata.set_project_ancestors( + connection_factory, + project_uuid=ancestors[3][0]["uuid"], + parent_project_uuid=missing_parent_project["uuid"], + parent_node_id=missing_parent_node.node_id, + ) diff --git a/packages/postgres-database/tests/test_utils_projects_nodes.py b/packages/postgres-database/tests/test_utils_projects_nodes.py new file mode 100644 index 00000000000..9251fec2357 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_projects_nodes.py @@ -0,0 +1,431 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +import asyncio +import random +import uuid +from collections.abc import Awaitable, Callable +from random import randint +from typing import Any + +import pytest +import sqlalchemy +from aiopg.sa.connection import SAConnection +from aiopg.sa.engine import Engine +from aiopg.sa.result import RowProxy +from common_library.async_tools import maybe_await +from faker import Faker +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.models.projects_nodes import projects_nodes +from simcore_postgres_database.utils_projects_nodes import ( + ProjectNodeCreate, + ProjectNodesDuplicateNodeError, + ProjectNodesNodeNotFoundError, + ProjectNodesNonUniqueNodeFoundError, + ProjectNodesProjectNotFoundError, + ProjectNodesRepo, +) +from sqlalchemy.ext.asyncio import AsyncConnection + +# NOTE: Temporary usage of connection_factory until asyncpg is used + + +async def _delete_project( + connection_factory: SAConnection, project_uuid: uuid.UUID +) -> None: + result = await connection_factory.execute( + sqlalchemy.delete(projects).where(projects.c.uuid == f"{project_uuid}") + ) + assert result.rowcount == 1 + + +@pytest.fixture +async def registered_user( + connection: SAConnection, + create_fake_user: Callable[..., Awaitable[RowProxy]], +) -> RowProxy: + user = await create_fake_user(connection) + assert user + return user + + +@pytest.fixture +async def registered_project( + connection: SAConnection, + registered_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], +) -> dict[str, Any]: + project = await create_fake_project(connection, registered_user) + assert project + return dict(project) + + +@pytest.fixture +def projects_nodes_repo_of_invalid_project(faker: Faker) -> ProjectNodesRepo: + invalid_project_uuid = faker.uuid4(cast_to=None) + assert isinstance(invalid_project_uuid, uuid.UUID) + repo = ProjectNodesRepo(project_uuid=invalid_project_uuid) + assert repo + return repo + + +@pytest.fixture +def projects_nodes_repo(registered_project: dict[str, Any]) -> ProjectNodesRepo: + repo = ProjectNodesRepo(project_uuid=registered_project["uuid"]) + assert repo + return repo + + +@pytest.fixture +def create_fake_projects_node( + faker: Faker, +) -> Callable[..., ProjectNodeCreate]: + def _creator() -> ProjectNodeCreate: + node = ProjectNodeCreate( + node_id=uuid.uuid4(), + required_resources=faker.pydict(allowed_types=(str,)), + key=faker.pystr(), + version=faker.pystr(), + label=faker.pystr(), + ) + assert node + return node + + return _creator + + +async def test_create_projects_nodes_raises_if_project_not_found( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo_of_invalid_project: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + with pytest.raises(ProjectNodesProjectNotFoundError): + await projects_nodes_repo_of_invalid_project.add( + connection_factory, + nodes=[create_fake_projects_node()], + ) + + +async def test_create_projects_nodes( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + assert await projects_nodes_repo.add(connection_factory, nodes=[]) == [] + + new_nodes = await projects_nodes_repo.add( + connection_factory, + nodes=[create_fake_projects_node()], + ) + assert new_nodes + assert len(new_nodes) == 1 + assert new_nodes[0] + + +async def test_create_twice_same_projects_nodes_raises( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + node_create = create_fake_projects_node() + new_nodes = await projects_nodes_repo.add(connection_factory, nodes=[node_create]) + assert new_nodes + assert len(new_nodes) == 1 + with pytest.raises(ProjectNodesDuplicateNodeError): + await projects_nodes_repo.add( + connection_factory, + nodes=[node_create], + ) + + +async def test_list_project_nodes_of_invalid_project_returns_nothing( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo_of_invalid_project: ProjectNodesRepo, +): + nodes = await projects_nodes_repo_of_invalid_project.list(connection_factory) + assert nodes == [] + + +async def test_list_project_nodes( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + nodes = await projects_nodes_repo.list(connection_factory) + assert nodes == [] + + created_nodes = await projects_nodes_repo.add( + connection_factory, + nodes=[ + create_fake_projects_node() for _ in range(randint(3, 12)) # noqa: S311 + ], + ) + + nodes = await projects_nodes_repo.list(connection_factory) + assert nodes + assert len(nodes) == len(created_nodes) + + +async def test_get_project_node_of_invalid_project_raises( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo_of_invalid_project: ProjectNodesRepo, +): + with pytest.raises(ProjectNodesNodeNotFoundError): + await projects_nodes_repo_of_invalid_project.get( + connection_factory, node_id=uuid.uuid4() + ) + + +async def test_get_project_node_of_empty_project_raises( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, +): + with pytest.raises(ProjectNodesNodeNotFoundError): + await projects_nodes_repo.get(connection_factory, node_id=uuid.uuid4()) + + +async def test_get_project_node( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + new_nodes = await projects_nodes_repo.add( + connection_factory, nodes=[create_fake_projects_node()] + ) + assert len(new_nodes) == 1 + assert new_nodes[0] + + received_node = await projects_nodes_repo.get( + connection_factory, node_id=new_nodes[0].node_id + ) + + assert received_node == new_nodes[0] + + +async def test_update_project_node_of_invalid_node_raises( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], + faker: Faker, +): + new_nodes = await projects_nodes_repo.add( + connection_factory, + nodes=[create_fake_projects_node()], + ) + assert len(new_nodes) == 1 + assert new_nodes[0] + assert new_nodes[0].created == new_nodes[0].modified + with pytest.raises(ProjectNodesNodeNotFoundError): + await projects_nodes_repo.update( + connection_factory, + node_id=uuid.uuid4(), + required_resources={faker.pystr(): faker.pyint()}, + ) + + +async def test_update_project_node( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], + faker: Faker, +): + new_nodes = await projects_nodes_repo.add( + connection_factory, nodes=[create_fake_projects_node()] + ) + assert len(new_nodes) == 1 + assert new_nodes[0] + assert new_nodes[0].created == new_nodes[0].modified + required_resources = {faker.pystr(): faker.pyint()} + updated_node = await projects_nodes_repo.update( + connection_factory, + node_id=new_nodes[0].node_id, + required_resources=required_resources, + ) + assert updated_node + assert updated_node != new_nodes + assert updated_node.modified > new_nodes[0].modified + assert updated_node.created == new_nodes[0].created + assert updated_node.required_resources == required_resources + + +async def test_delete_invalid_node_does_nothing( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo_of_invalid_project: ProjectNodesRepo, +): + await projects_nodes_repo_of_invalid_project.delete( + connection_factory, node_id=uuid.uuid4() + ) + + +async def test_delete_node( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + new_nodes = await projects_nodes_repo.add( + connection_factory, + nodes=[create_fake_projects_node()], + ) + assert len(new_nodes) == 1 + assert new_nodes[0] + + received_node = await projects_nodes_repo.get( + connection_factory, node_id=new_nodes[0].node_id + ) + assert received_node == new_nodes[0] + await projects_nodes_repo.delete(connection_factory, node_id=new_nodes[0].node_id) + + with pytest.raises(ProjectNodesNodeNotFoundError): + await projects_nodes_repo.get(connection_factory, node_id=new_nodes[0].node_id) + + +async def test_delete_project_delete_all_nodes( + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + # create a project node + new_nodes = await projects_nodes_repo.add( + connection_factory, + nodes=[create_fake_projects_node()], + ) + assert len(new_nodes) == 1 + assert new_nodes[0] + received_node = await projects_nodes_repo.get( + connection_factory, node_id=new_nodes[0].node_id + ) + assert received_node == new_nodes[0] + + # now delete the project from the projects table + await _delete_project(connection_factory, projects_nodes_repo.project_uuid) + + # the project cannot be found anymore (the link in projects_to_projects_nodes is auto-removed) + with pytest.raises(ProjectNodesNodeNotFoundError): + await projects_nodes_repo.get(connection_factory, node_id=new_nodes[0].node_id) + + # the underlying projects_nodes should also be gone, thanks to migration + result = await connection_factory.execute( + sqlalchemy.select(projects_nodes).where( + projects_nodes.c.node_id == f"{new_nodes[0].node_id}" + ) + ) + assert result + row = await maybe_await(result.first()) + assert row is None + + +@pytest.mark.parametrize("num_concurrent_workflows", [1, 250]) +async def test_multiple_creation_deletion_of_nodes( + aiopg_engine: Engine, + registered_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[..., ProjectNodeCreate], + num_concurrent_workflows: int, +): + NUM_NODES = 11 + + async def _workflow() -> None: + async with aiopg_engine.acquire() as connection: + project = await create_fake_project(connection, registered_user) + projects_nodes_repo = ProjectNodesRepo(project_uuid=project.uuid) + + await projects_nodes_repo.add( + connection, + nodes=[create_fake_projects_node() for _ in range(NUM_NODES)], + ) + list_nodes = await projects_nodes_repo.list(connection) + assert list_nodes + assert len(list_nodes) == NUM_NODES + await projects_nodes_repo.delete( + connection, + node_id=random.choice(list_nodes).node_id, # noqa: S311 + ) + list_nodes = await projects_nodes_repo.list(connection) + assert list_nodes + assert len(list_nodes) == (NUM_NODES - 1) + await _delete_project(connection, project_uuid=project.uuid) + + await asyncio.gather(*(_workflow() for _ in range(num_concurrent_workflows))) + + +async def test_get_project_id_from_node_id( + aiopg_engine: Engine, + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + registered_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + NUM_NODES = 11 + + async def _workflow() -> dict[uuid.UUID, list[uuid.UUID]]: + async with aiopg_engine.acquire() as connection: + project = await create_fake_project(connection, registered_user) + projects_nodes_repo = ProjectNodesRepo(project_uuid=project.uuid) + + list_of_nodes = await projects_nodes_repo.add( + connection, + nodes=[create_fake_projects_node() for _ in range(NUM_NODES)], + ) + + return {uuid.UUID(project["uuid"]): [node.node_id for node in list_of_nodes]} + + # create some projects + list_of_project_id_node_ids_map = await asyncio.gather( + *(_workflow() for _ in range(10)) + ) + + for project_id_to_node_ids_map in list_of_project_id_node_ids_map: + project_id = next(iter(project_id_to_node_ids_map)) + random_node_id = random.choice( # noqa: S311 + project_id_to_node_ids_map[project_id] + ) + received_project_id = await ProjectNodesRepo.get_project_id_from_node_id( + connection_factory, node_id=random_node_id + ) + assert received_project_id == next(iter(project_id_to_node_ids_map)) + + +async def test_get_project_id_from_node_id_raises_for_invalid_node_id( + connection_factory: SAConnection | AsyncConnection, + faker: Faker, +): + random_uuid = faker.uuid4(cast_to=None) + assert isinstance(random_uuid, uuid.UUID) + with pytest.raises(ProjectNodesNodeNotFoundError): + await ProjectNodesRepo.get_project_id_from_node_id( + connection_factory, node_id=random_uuid + ) + + +async def test_get_project_id_from_node_id_raises_if_multiple_projects_with_same_node_id_exist( + aiopg_engine: Engine, + connection: SAConnection, + connection_factory: SAConnection | AsyncConnection, + projects_nodes_repo: ProjectNodesRepo, + registered_user: RowProxy, + create_fake_project: Callable[..., Awaitable[RowProxy]], + create_fake_projects_node: Callable[..., ProjectNodeCreate], +): + project1 = await create_fake_project(connection, registered_user) + project1_repo = ProjectNodesRepo(project_uuid=project1.uuid) + + project2 = await create_fake_project(connection, registered_user) + project2_repo = ProjectNodesRepo(project_uuid=project2.uuid) + + shared_node = create_fake_projects_node() + + project1_nodes = await project1_repo.add(connection_factory, nodes=[shared_node]) + assert len(project1_nodes) == 1 + project2_nodes = await project2_repo.add(connection_factory, nodes=[shared_node]) + assert len(project2_nodes) == 1 + assert project1_nodes[0].model_dump( + include=ProjectNodeCreate.get_field_names(exclude={"created", "modified"}) + ) == project2_nodes[0].model_dump( + include=ProjectNodeCreate.get_field_names(exclude={"created", "modified"}) + ) + with pytest.raises(ProjectNodesNonUniqueNodeFoundError): + await ProjectNodesRepo.get_project_id_from_node_id( + connection_factory, node_id=project1_nodes[0].node_id + ) diff --git a/packages/postgres-database/tests/test_utils_repos.py b/packages/postgres-database/tests/test_utils_repos.py new file mode 100644 index 00000000000..be100df2ef1 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_repos.py @@ -0,0 +1,213 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Any, NamedTuple + +import pytest +import sqlalchemy as sa +from simcore_postgres_database.models.tags import tags +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.exc import IntegrityError +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + + +async def test_sa_transactions(asyncpg_engine: AsyncEngine): + # + # SEE https://docs.sqlalchemy.org/en/20/orm/extensions/asyncio.html#synopsis-core + # + + # READ query + total_count_query = sa.select(sa.func.count()).select_from(tags) + + # WRITE queries + query1 = ( + tags.insert().values(id=2, name="query1", color="blue").returning(tags.c.id) + ) + query11 = ( + tags.insert().values(id=3, name="query11", color="blue").returning(tags.c.id) + ) + query12 = ( + tags.insert().values(id=5, name="query12", color="blue").returning(tags.c.id) + ) + query2 = ( + tags.insert().values(id=6, name="query2", color="blue").returning(tags.c.id) + ) + query2 = ( + tags.insert().values(id=7, name="query2", color="blue").returning(tags.c.id) + ) + + async with asyncpg_engine.connect() as conn, conn.begin(): # starts transaction (savepoint) + + result = await conn.execute(query1) + assert result.scalar() == 2 + + total_count = (await conn.execute(total_count_query)).scalar() + assert total_count == 1 + + rows = (await conn.execute(tags.select().where(tags.c.id == 2))).fetchall() + assert rows + assert rows[0].id == 2 + + async with conn.begin_nested(): # savepoint + await conn.execute(query11) + + with pytest.raises(IntegrityError): + async with conn.begin_nested(): # savepoint + await conn.execute(query11) + + await conn.execute(query12) + + total_count = (await conn.execute(total_count_query)).scalar() + assert total_count == 3 # since query11 (second time) reverted! + + await conn.execute(query2) + + total_count = (await conn.execute(total_count_query)).scalar() + assert total_count == 4 + + +class _PageTuple(NamedTuple): + total_count: int + rows: list[dict[str, Any]] + + +class OneResourceRepoDemo: + # This is a PROTOTYPE of how one could implement a generic + # repo that provides CRUD operations providing a given table + def __init__(self, engine: AsyncEngine, table: sa.Table): + if "id" not in table.columns: + msg = "id column expected" + raise ValueError(msg) + self.table = table + + self.engine = engine + + async def create(self, connection: AsyncConnection | None = None, **kwargs) -> int: + async with transaction_context(self.engine, connection) as conn: + result = await conn.execute(self.table.insert().values(**kwargs)) + assert result # nosec + return result.inserted_primary_key[0] + + async def get_by_id( + self, + connection: AsyncConnection | None = None, + *, + row_id: int, + ) -> dict[str, Any] | None: + async with pass_or_acquire_connection(self.engine, connection) as conn: + result = await conn.execute( + sa.select(self.table).where(self.table.c.id == row_id) + ) + row = result.mappings().fetchone() + return dict(row) if row else None + + async def get_page( + self, + connection: AsyncConnection | None = None, + *, + limit: int, + offset: int = 0, + ) -> _PageTuple: + async with pass_or_acquire_connection(self.engine, connection) as conn: + # Compute total count + total_count_query = sa.select(sa.func.count()).select_from(self.table) + total_count_result = await conn.execute(total_count_query) + total_count = total_count_result.scalar() + + # Fetch paginated results + query = sa.select(self.table).limit(limit).offset(offset) + result = await conn.execute(query) + rows = [dict(row) for row in result.mappings().fetchall()] + + return _PageTuple(total_count=total_count or 0, rows=rows) + + async def update( + self, + connection: AsyncConnection | None = None, + *, + row_id: int, + **values, + ) -> bool: + async with transaction_context(self.engine, connection) as conn: + result = await conn.execute( + self.table.update().where(self.table.c.id == row_id).values(**values) + ) + return result.rowcount > 0 + + async def delete( + self, + connection: AsyncConnection | None = None, + *, + row_id: int, + ) -> bool: + async with transaction_context(self.engine, connection) as conn: + result = await conn.execute( + self.table.delete().where(self.table.c.id == row_id) + ) + return result.rowcount > 0 + + +async def test_oneresourcerepodemo_prototype(asyncpg_engine: AsyncEngine): + + tags_repo = OneResourceRepoDemo(engine=asyncpg_engine, table=tags) + + # create + tag_id = await tags_repo.create(name="cyan tag", color="cyan") + assert tag_id > 0 + + # get, list + tag = await tags_repo.get_by_id(row_id=tag_id) + assert tag + + page = await tags_repo.get_page(limit=10) + assert page.total_count == 1 + assert page.rows == [tag] + + # update + ok = await tags_repo.update(row_id=tag_id, name="changed name") + assert ok + + updated_tag = await tags_repo.get_by_id(row_id=tag_id) + assert updated_tag + assert updated_tag["name"] != tag["name"] + + # delete + ok = await tags_repo.delete(row_id=tag_id) + assert ok + + assert not await tags_repo.get_by_id(row_id=tag_id) + + +async def test_transaction_context(asyncpg_engine: AsyncEngine): + # (1) Using transaction_context and fails + fake_error_msg = "some error" + + def _something_raises_here(): + raise RuntimeError(fake_error_msg) + + tags_repo = OneResourceRepoDemo(engine=asyncpg_engine, table=tags) + + # using external transaction_context: commits upon __aexit__ + async with transaction_context(asyncpg_engine) as conn: + await tags_repo.create(conn, name="cyan tag", color="cyan") + await tags_repo.create(conn, name="red tag", color="red") + assert (await tags_repo.get_page(conn, limit=10, offset=0)).total_count == 2 + + # using internal: auto-commit + await tags_repo.create(name="red tag", color="red") + assert (await tags_repo.get_page(limit=10, offset=0)).total_count == 3 + + # auto-rollback + with pytest.raises(RuntimeError, match=fake_error_msg): # noqa: PT012 + async with transaction_context(asyncpg_engine) as conn: + await tags_repo.create(conn, name="violet tag", color="violet") + assert (await tags_repo.get_page(conn, limit=10, offset=0)).total_count == 4 + _something_raises_here() + + assert (await tags_repo.get_page(limit=10, offset=0)).total_count == 3 diff --git a/packages/postgres-database/tests/test_utils_services.py b/packages/postgres-database/tests/test_utils_services.py new file mode 100644 index 00000000000..70b102fea70 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_services.py @@ -0,0 +1,339 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from typing import Any, NamedTuple + +import pytest +import sqlalchemy as sa +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_group +from simcore_postgres_database.models.groups import GroupType, groups +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.services import ( + services_access_rights, + services_meta_data, +) +from simcore_postgres_database.models.services_consume_filetypes import ( + services_consume_filetypes, +) +from simcore_postgres_database.utils_services import create_select_latest_services_query +from sqlalchemy.dialects.postgresql import INTEGER +from sqlalchemy.dialects.postgresql import insert as pg_insert + + +class RandomServiceFactory: + def __init__(self, faker: Faker): + self._faker = faker + self._cache = {} # meta + + def random_service_meta_data(self, **overrides) -> dict[str, Any]: + # + # NOTE: if overrides keys are wrong it will fail later as + # `sqlalchemy.exc.CompileError: Unconsumed column names: product_name` + # + name_suffix = self._faker.name().lower().replace(" ", "") + version = self._faker.numerify("%#.%#.#") + owner_gid = 1 # everybody + + row = { + "key": f"simcore/service/dynamic/{name_suffix}", + "version": version, + "owner": owner_gid, + "name": f"service {name_suffix}", + "description": self._faker.sentence(), + "thumbnail": self._faker.image_url(120, 120), + "classifiers": self._faker.random_elements( + elements=("RRID:SCR_018997", "RRID:SCR_019001", "RRID:Addgene_44362"), + unique=True, + ), + "quality": {}, + } + row.update(overrides) + + self._cache = row + + return row + + def random_service_access_rights(self, **overrides) -> dict[str, Any]: + default_value = self._get_service_meta_data() + row = { + "key": default_value["key"], + "version": default_value["version"], + "gid": default_value["owner"], + "execute_access": True, + "write_access": True, + "product_name": "osparc", + } + row.update(overrides) + + return row + + def random_service_consume_filetypes( + self, port_index: int = 1, **overrides + ) -> dict[str, Any]: + default_value = self._get_service_meta_data() + + row = { + "service_key": default_value["key"], + "service_version": default_value["version"], + "service_display_name": default_value["name"], + "service_input_port": f"input_{port_index}", + "filetype": self._faker.uri_extension().removeprefix(".").upper(), + "is_guest_allowed": bool(port_index % 2), + } + + row.update(overrides) + return row + + def _get_service_meta_data(self): + if not self._cache: + msg = "Run first random_service_meta_data(*)" + raise ValueError(msg) + return self._cache + + def reset(self): + self._cache = {} + + +class ServiceInserted(NamedTuple): + metadata: dict[str, Any] + access: list[dict[str, Any]] + filetypes: list[dict[str, Any]] + + +def execute_insert_service( + conn, + meta_data_values: dict[str, Any], + access_rights_values: list[dict[str, Any]], + filetypes_values: list[dict[str, Any]], +) -> ServiceInserted: + query = services_meta_data.insert().values(**meta_data_values) + conn.execute(query) + + for values in access_rights_values: + query = services_access_rights.insert().values(**values) + conn.execute(query) + + for values in filetypes_values: + query = services_consume_filetypes.insert().values(**values) + conn.execute(query) + + inserted = ServiceInserted( + metadata=meta_data_values, + access=access_rights_values, + filetypes=filetypes_values, + ) + print(inserted) + return inserted + + +ServiceKeyStr = str +ServiceVersionStr = str + + +class ServicesFixture(NamedTuple): + expected_latest: set[tuple[ServiceKeyStr, ServiceVersionStr]] + num_services: int + + +@pytest.fixture +def services_fixture(faker: Faker, pg_sa_engine: sa.engine.Engine) -> ServicesFixture: + expected_latest = set() + num_services = 0 + + with pg_sa_engine.begin() as conn: + # PRODUCT + osparc_product = { + "name": "osparc", + "display_name": "Product Osparc", + "short_name": "osparc", + "host_regex": r"^osparc.", + "priority": 0, + } + product_name = conn.execute( + pg_insert(products) + .values(**osparc_product) + .on_conflict_do_update( + index_elements=[products.c.name], set_=osparc_product + ) + .returning(products.c.name) + ).scalar() + + # GROUPS + product_gid = conn.execute( + groups.insert() + .values(**random_group(type=GroupType.STANDARD, name="osparc group")) + .returning(groups.c.gid) + ).scalar() + + everyone_gid = conn.execute( + sa.select(groups.c.gid).where(groups.c.type == GroupType.EVERYONE) + ).scalar() + + assert product_gid != everyone_gid + + # SERVICE /one + service_factory = RandomServiceFactory(faker=faker) + service_latest = "10.2.33" + for version in ("1.0.0", "10.1.0", service_latest, "10.2.2"): + service = execute_insert_service( + conn, + service_factory.random_service_meta_data( + key="simcore/service/dynamic/one", + version=version, + owner=everyone_gid, + ), + [ + service_factory.random_service_access_rights( + product_name=product_name + ) + ], + [ + service_factory.random_service_consume_filetypes( + port_index=1, + ) + ], + ) + + num_services += 1 + + if version == service_latest: + expected_latest.add( + (service.metadata["key"], service.metadata["version"]) + ) + + # SERVICE /two + service = execute_insert_service( + conn, + service_factory.random_service_meta_data( + key="simcore/service/dynamic/two", + version="1.2.3", + owner=product_gid, + ), + [ + service_factory.random_service_access_rights( + product_name=product_name, + execute_access=True, + write_access=False, + ) + ], + [ + service_factory.random_service_consume_filetypes(port_index=1), + service_factory.random_service_consume_filetypes(port_index=2), + ], + ) + num_services += 1 + expected_latest.add((service.metadata["key"], service.metadata["version"])) + + return ServicesFixture( + expected_latest=expected_latest, + num_services=num_services, + ) + + +def test_select_latest_services( + services_fixture: ServicesFixture, pg_sa_engine: sa.engine.Engine +): + assert issubclass(INTEGER, sa.Integer) + + lts = create_select_latest_services_query().alias("lts") + + stmt = sa.select(lts.c.key, lts.c.latest, services_meta_data.c.name).select_from( + lts.join( + services_meta_data, + (services_meta_data.c.key == lts.c.key) + & (services_meta_data.c.version == lts.c.latest), + ) + ) + + with pg_sa_engine.connect() as conn: + latest_services: list = conn.execute(stmt).fetchall() + assert { + (s.key, s.latest) for s in latest_services + } == services_fixture.expected_latest + + +def test_trial_queries_for_service_metadata( + services_fixture: ServicesFixture, pg_sa_engine: sa.engine.Engine +): + # check if service exists and whether is public or not + with pg_sa_engine.connect() as conn: + query = sa.select( + services_consume_filetypes.c.service_key, + sa.func.array_agg( + sa.func.distinct(services_consume_filetypes.c.filetype) + ).label("file_extensions"), + ).group_by(services_consume_filetypes.c.service_key) + + rows: list = conn.execute(query).fetchall() + print(rows) + + with pg_sa_engine.connect() as conn: + query = ( + sa.select( + services_consume_filetypes.c.service_key, + sa.text( + "array_to_string(MAX(string_to_array(version, '.')::int[]), '.') AS latest_version" + ), + sa.func.array_agg( + sa.func.distinct(services_consume_filetypes.c.filetype) + ).label("file_extensions"), + ) + .select_from( + services_meta_data.join( + services_consume_filetypes, + services_meta_data.c.key + == services_consume_filetypes.c.service_key, + ) + ) + .group_by(services_consume_filetypes.c.service_key) + ) + + rows: list = conn.execute(query).fetchall() + + # list latest services + services_latest = create_select_latest_services_query().alias("services_latest") + + with pg_sa_engine.connect() as conn: + query = sa.select( + services_meta_data.c.key, + services_meta_data.c.version, + services_access_rights.c.gid, + services_access_rights.c.execute_access, + services_access_rights.c.write_access, + services_access_rights.c.product_name, + ).select_from( + services_latest.join( + services_meta_data, + (services_meta_data.c.key == services_latest.c.key) + & (services_meta_data.c.version == services_latest.c.latest), + ).join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version), + ) + ) + print(query) + + query1 = query.where(services_meta_data.c.classifiers.contains(["osparc"])) + + query2 = query.where( + sa.func.array_length(services_meta_data.c.classifiers, 1) > 0 + ) + + # list services with gid=1 (x=1, w=0) and with type dynamic and classifier include osparc + query3 = query.where( + services_latest.c.key.like("simcore/services/dynamic/%%") + & services_meta_data.c.classifiers.contains(["osparc"]) + & (services_access_rights.c.gid == 1) + & (services_access_rights.c.execute_access.is_(True)) + ) + + for n, query in enumerate([query1, query2, query3]): + print("query", n, "-----") + rows = conn.execute(query).fetchall() + assert len(rows) <= services_fixture.num_services + print(rows) diff --git a/packages/postgres-database/tests/test_utils_services_environments.py b/packages/postgres-database/tests/test_utils_services_environments.py new file mode 100644 index 00000000000..0a58704d3f8 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_services_environments.py @@ -0,0 +1,180 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import random +from typing import Any, NamedTuple, TypeAlias + +import pytest +from aiopg.sa.connection import SAConnection +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.services import services_meta_data +from simcore_postgres_database.models.services_environments import ( + VENDOR_SECRET_PREFIX, + services_vendor_secrets, +) +from simcore_postgres_database.utils_services_environments import get_vendor_secrets + + +@pytest.fixture +def vendor_services() -> list[str]: + return [f"simcore/services/dynamic/vendor/some_service_{i}" for i in range(10)] + + +class ExpectedSecrets(NamedTuple): + old_secrets: dict[str, Any] + new_secrets: dict[str, Any] + + +MappedExpectedSecretes: TypeAlias = dict[str, ExpectedSecrets] + + +@pytest.fixture +async def product_name(connection: SAConnection) -> str: + a_product_name = "a_prod" + await connection.execute( + products.insert().values(name=a_product_name, host_regex="") + ) + yield a_product_name + await connection.execute(products.delete()) + + +@pytest.fixture +async def expected_secrets( + connection: SAConnection, vendor_services: list[str], product_name: str +) -> MappedExpectedSecretes: + expected_secrets: MappedExpectedSecretes = {} + for k, vendor_service in enumerate(vendor_services): + # We define old and new secrets + old_secrets = { + VENDOR_SECRET_PREFIX + f"LICENSE_SERVER_HOST{k}": "product_a-server", + VENDOR_SECRET_PREFIX + f"LICENSE_SERVER_PRIMARY_PORT{k}": 1, + VENDOR_SECRET_PREFIX + f"LICENSE_SERVER_SECONDARY_PORT{k}": 2, + } + new_secrets = { + **old_secrets, + VENDOR_SECRET_PREFIX + f"LICENSE_DNS_RESOLVER_IP{k}": "1.1.1.1", + VENDOR_SECRET_PREFIX + f"LICENSE_DNS_RESOLVER_PORT{k}": "21", + VENDOR_SECRET_PREFIX + f"LICENSE_FILE{k}": "license.txt", + VENDOR_SECRET_PREFIX + f"LICENSE_FILE_PRODUCT1{k}": "license-p1.txt", + VENDOR_SECRET_PREFIX + f"LICENSE_FILE_PRODUCT2{k}": "license-p2.txt", + VENDOR_SECRET_PREFIX + f"LIST{k}": "[1, 2, 3]", + } + expected_secrets[vendor_service] = ExpectedSecrets(old_secrets, new_secrets) + + # 'other-service' + await connection.execute( + services_meta_data.insert().values( + key=f"{vendor_service}_other", + version="1.0.0", + name="other-service", + description="Some other service from a vendor", + ) + ) + + # Some versions of 'some_service' + for version, description in [ + ("0.0.1", "This has no secrets"), + ("0.0.2", "This has old_secrets"), # defined old_secrets + ("0.1.0", "This should inherit old_secrets"), + ("1.0.0", "This has new_secrets"), # defined new_secrets + ("1.2.0", "Latest version inherits new_secrets"), + ]: + await connection.execute( + services_meta_data.insert().values( + key=vendor_service, + version=version, + name="some-service", + description=description, + ) + ) + + await connection.execute( + services_vendor_secrets.insert().values( + service_key=vendor_service, + service_base_version="0.0.2", + secrets_map=old_secrets, + product_name=product_name, + ) + ) + + await connection.execute( + # a vendor exposes these environs to its services to everybody + services_vendor_secrets.insert().values( + service_key=vendor_service, + service_base_version="1.0.0", # valid from + secrets_map={ + ( + key.removeprefix(VENDOR_SECRET_PREFIX) + if bool(random.getrandbits(1)) + else key + ): value + for key, value in new_secrets.items() + }, + product_name=product_name, + ) + ) + + return expected_secrets + + +def test_vendor_secret_prefix_must_end_with_underscore(): + assert VENDOR_SECRET_PREFIX.endswith("_") # should allow + + +async def test_get_latest_service_vendor_secrets( + connection: SAConnection, + vendor_services: list[str], + expected_secrets: MappedExpectedSecretes, + product_name: str, +): + # latest i.e. 1.2.0 + for vendor_service in vendor_services: + assert ( + await get_vendor_secrets(connection, product_name, vendor_service) + == expected_secrets[vendor_service].new_secrets + ) + + +@pytest.mark.parametrize( + "service_version,expected_result", + [ + ("0.0.1", "Empty"), + ("0.0.2", "Old"), + ("0.1.0", "Old"), + ("1.0.0", "New"), + ("1.2.0", "New"), + ], +) +async def test_get_service_vendor_secrets( + connection: SAConnection, + vendor_services: list[str], + expected_secrets: MappedExpectedSecretes, + service_version: str, + expected_result: str, + product_name: str, +): + # ("0.0.1", "This has no secrets"), + # ("0.0.2", "This has old_secrets"), # defined old_secrets + # ("0.1.0", "This should inherit old_secrets"), + # ("1.0.0", "This has new_secrets"), # defined new_secrets + # ("1.2.0", "Latest version inherits new_secrets"), + + for vendor_service in vendor_services: + match expected_result: + case "Empty": + expected = {} + case "Old": + expected = expected_secrets[vendor_service].old_secrets + case "New": + expected = expected_secrets[vendor_service].new_secrets + case _: + pytest.fail(f"{expected_result} not considered") + + assert ( + await get_vendor_secrets( + connection, product_name, vendor_service, service_version + ) + == expected + ) diff --git a/packages/postgres-database/tests/test_utils_tags.py b/packages/postgres-database/tests/test_utils_tags.py index 65930e7e683..e8a8fee9df4 100644 --- a/packages/postgres-database/tests/test_utils_tags.py +++ b/packages/postgres-database/tests/test_utils_tags.py @@ -3,20 +3,35 @@ # pylint: disable=unused-variable # pylint: disable=too-many-arguments -from typing import Any, Awaitable, Callable +from collections.abc import Awaitable, Callable +from typing import Any import pytest import sqlalchemy as sa from aiopg.sa.connection import SAConnection from aiopg.sa.result import RowProxy -from pytest_simcore.helpers.utils_tags import create_tag, create_tag_access -from simcore_postgres_database.models.tags import tags_to_groups +from pytest_simcore.helpers.postgres_tags import create_tag, create_tag_access +from simcore_postgres_database.models.tags_access_rights import tags_access_rights from simcore_postgres_database.models.users import UserRole, UserStatus +from simcore_postgres_database.utils import as_postgres_sql_query_str from simcore_postgres_database.utils_tags import ( TagNotFoundError, - TagOperationNotAllowed, + TagOperationNotAllowedError, TagsRepo, ) +from simcore_postgres_database.utils_tags_sql import ( + add_tag_to_project_stmt, + add_tag_to_services_stmt, + create_tag_stmt, + delete_tag_stmt, + get_tag_stmt, + get_tags_for_project_stmt, + get_tags_for_services_stmt, + list_tags_stmt, + update_tag_stmt, + upsert_tags_access_rights_stmt, +) +from sqlalchemy.ext.asyncio import AsyncEngine @pytest.fixture @@ -54,16 +69,19 @@ async def other_user( create_fake_user: Callable[[SAConnection, RowProxy, Any], RowProxy], connection: SAConnection, ) -> RowProxy: - user_ = await create_fake_user( + return await create_fake_user( connection, status=UserStatus.ACTIVE, role=UserRole.USER, ) - return user_ async def test_tags_access_with_primary_groups( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): conn = connection @@ -90,22 +108,29 @@ async def test_tags_access_with_primary_groups( ), ] - tags_repo = TagsRepo(user_id=user.id) + tags_repo = TagsRepo(asyncpg_engine) # repo has access assert ( - await tags_repo.access_count(conn, tag_id, read=True, write=True, delete=True) + await tags_repo.access_count( + user_id=user.id, tag_id=tag_id, read=True, write=True, delete=True + ) == 1 ) - assert await tags_repo.access_count(conn, tag_id, read=True, write=True) == 1 - assert await tags_repo.access_count(conn, tag_id, read=True) == 1 - assert await tags_repo.access_count(conn, tag_id, write=True) == 1 + assert ( + await tags_repo.access_count( + user_id=user.id, tag_id=tag_id, read=True, write=True + ) + == 1 + ) + assert await tags_repo.access_count(user_id=user.id, tag_id=tag_id, read=True) == 1 + assert await tags_repo.access_count(user_id=user.id, tag_id=tag_id, write=True) == 1 # changing access conditions assert ( await tags_repo.access_count( - conn, - tag_id, + user_id=user.id, + tag_id=tag_id, read=True, write=True, delete=False, # <--- @@ -116,15 +141,20 @@ async def test_tags_access_with_primary_groups( # user will have NO access to other user's tags even matching access rights assert ( await tags_repo.access_count( - conn, other_tag_id, read=True, write=True, delete=True + user_id=user.id, tag_id=other_tag_id, read=True, write=True, delete=True ) == 0 ) async def test_tags_access_with_multiple_groups( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): + conn = connection (tag_id, other_tag_id, group_tag_id, everyone_tag_id) = [ @@ -170,30 +200,58 @@ async def test_tags_access_with_multiple_groups( ), ] - tags_repo = TagsRepo(user_id=user.id) - other_repo = TagsRepo(user_id=other_user.id) + tags_repo = TagsRepo(asyncpg_engine) + other_repo = TagsRepo(asyncpg_engine) # tag_id assert ( - await tags_repo.access_count(conn, tag_id, read=True, write=True, delete=True) + await tags_repo.access_count( + user_id=user.id, tag_id=tag_id, read=True, write=True, delete=True + ) == 1 ) assert ( - await other_repo.access_count(conn, tag_id, read=True, write=True, delete=True) + await other_repo.access_count( + user_id=other_user.id, tag_id=tag_id, read=True, write=True, delete=True + ) == 0 ) # other_tag_id - assert await tags_repo.access_count(conn, other_tag_id, read=True) == 0 - assert await other_repo.access_count(conn, other_tag_id, read=True) == 1 + assert ( + await tags_repo.access_count(user_id=user.id, tag_id=other_tag_id, read=True) + == 0 + ) + assert ( + await other_repo.access_count( + user_id=other_user.id, tag_id=other_tag_id, read=True + ) + == 1 + ) # group_tag_id - assert await tags_repo.access_count(conn, group_tag_id, read=True) == 1 - assert await other_repo.access_count(conn, group_tag_id, read=True) == 0 + assert ( + await tags_repo.access_count(user_id=user.id, tag_id=group_tag_id, read=True) + == 1 + ) + assert ( + await other_repo.access_count( + user_id=other_user.id, tag_id=group_tag_id, read=True + ) + == 0 + ) # everyone_tag_id - assert await tags_repo.access_count(conn, everyone_tag_id, read=True) == 1 - assert await other_repo.access_count(conn, everyone_tag_id, read=True) == 1 + assert ( + await tags_repo.access_count(user_id=user.id, tag_id=everyone_tag_id, read=True) + == 1 + ) + assert ( + await other_repo.access_count( + user_id=other_user.id, tag_id=everyone_tag_id, read=True + ) + == 1 + ) # now group adds read for all tags for t in (tag_id, other_tag_id, everyone_tag_id): @@ -206,19 +264,29 @@ async def test_tags_access_with_multiple_groups( delete=False, ) - assert await tags_repo.access_count(conn, tag_id, read=True) == 2 - assert await tags_repo.access_count(conn, other_tag_id, read=True) == 1 - assert await tags_repo.access_count(conn, everyone_tag_id, read=True) == 2 + assert await tags_repo.access_count(user_id=user.id, tag_id=tag_id, read=True) == 2 + assert ( + await tags_repo.access_count(user_id=user.id, tag_id=other_tag_id, read=True) + == 1 + ) + assert ( + await tags_repo.access_count(user_id=user.id, tag_id=everyone_tag_id, read=True) + == 2 + ) async def test_tags_repo_list_and_get( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): conn = connection - tags_repo = TagsRepo(user_id=user.id) + tags_repo = TagsRepo(asyncpg_engine) # (1) no tags - listed_tags = await tags_repo.list(conn) + listed_tags = await tags_repo.list_all(user_id=user.id) assert not listed_tags # (2) one tag @@ -235,7 +303,7 @@ async def test_tags_repo_list_and_get( ) ] - listed_tags = await tags_repo.list(conn) + listed_tags = await tags_repo.list_all(user_id=user.id) assert listed_tags assert [t["id"] for t in listed_tags] == expected_tags_ids @@ -253,7 +321,7 @@ async def test_tags_repo_list_and_get( ) ) - listed_tags = await tags_repo.list(conn) + listed_tags = await tags_repo.list_all(user_id=user.id) assert {t["id"] for t in listed_tags} == set(expected_tags_ids) # (4) add another tag from a differnt user @@ -270,11 +338,11 @@ async def test_tags_repo_list_and_get( # same as before prev_listed_tags = listed_tags - listed_tags = await tags_repo.list(conn) + listed_tags = await tags_repo.list_all(user_id=user.id) assert listed_tags == prev_listed_tags # (5) add a global tag - tag_id = await create_tag( + await create_tag( conn, name="TG", description="tag for EVERYBODY", @@ -285,7 +353,7 @@ async def test_tags_repo_list_and_get( delete=False, ) - listed_tags = await tags_repo.list(conn) + listed_tags = await tags_repo.list_all(user_id=user.id) assert listed_tags == [ { "id": 1, @@ -316,8 +384,8 @@ async def test_tags_repo_list_and_get( }, ] - other_repo = TagsRepo(user_id=other_user.id) - assert await other_repo.list(conn) == [ + other_repo = TagsRepo(asyncpg_engine) + assert await other_repo.list_all(user_id=other_user.id) == [ { "id": 3, "name": "T3", @@ -339,7 +407,7 @@ async def test_tags_repo_list_and_get( ] # exclusive to user - assert await tags_repo.get(conn, tag_id=2) == { + assert await tags_repo.get(user_id=user.id, tag_id=2) == { "id": 2, "name": "T2", "description": "tag via std group", @@ -351,9 +419,9 @@ async def test_tags_repo_list_and_get( # exclusive ot other user with pytest.raises(TagNotFoundError): - assert await tags_repo.get(conn, tag_id=3) + assert await tags_repo.get(user_id=user.id, tag_id=3) - assert await other_repo.get(conn, tag_id=3) == { + assert await other_repo.get(user_id=other_user.id, tag_id=3) == { "id": 3, "name": "T3", "description": "tag for 2", @@ -364,14 +432,71 @@ async def test_tags_repo_list_and_get( } # a common tag - assert await tags_repo.get(conn, tag_id=4) == await other_repo.get(conn, tag_id=4) + assert await tags_repo.get(user_id=user.id, tag_id=4) == await other_repo.get( + user_id=user.id, tag_id=4 + ) + + +async def test_tags_repo_uniquely_list_or_get_shared_tags( + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, +): + conn = connection + tags_repo = TagsRepo(asyncpg_engine) + + # (1) create a tag which cannot be written + expected_tag_id = await create_tag( + conn, + name="T1", + description=f"tag for {user.id}", + color="blue", + group_id=user.primary_gid, + read=True, + write=False, # <-- cannot write + delete=True, + ) + + got = await tags_repo.get(user_id=user.id, tag_id=expected_tag_id) + assert got + assert got["id"] == expected_tag_id + assert got["read"] is True + assert got["write"] is False # <-- + assert got["delete"] is True + + # (2) share with standard group + await create_tag_access( + conn, + tag_id=expected_tag_id, + group_id=group.gid, + read=True, + write=True, # < -- group can write + delete=False, + ) + + # checks that the agregattion is the MOST permisive + # checks that user_id has now full access via its primary and its stadard group + got = await tags_repo.get(user_id=user.id, tag_id=expected_tag_id) + assert got + assert got["id"] == expected_tag_id + assert got["read"] is True + assert got["write"] is True # <-- + assert got["delete"] is True + + user_tags = await tags_repo.list_all(user_id=user.id) + assert user_tags == [got] async def test_tags_repo_update( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): conn = connection - tags_repo = TagsRepo(user_id=user.id) + tags_repo = TagsRepo(asyncpg_engine) # Tags with different access rights readonly_tid, readwrite_tid, other_tid = [ @@ -407,11 +532,13 @@ async def test_tags_repo_update( ), ] - with pytest.raises(TagOperationNotAllowed): - await tags_repo.update(conn, tag_id=readonly_tid, description="modified") + with pytest.raises(TagOperationNotAllowedError): + await tags_repo.update( + user_id=user.id, tag_id=readonly_tid, description="modified" + ) assert await tags_repo.update( - conn, tag_id=readwrite_tid, description="modified" + user_id=user.id, tag_id=readwrite_tid, description="modified" ) == { "id": readwrite_tid, "name": "T2", @@ -422,15 +549,21 @@ async def test_tags_repo_update( "delete": False, } - with pytest.raises(TagOperationNotAllowed): - await tags_repo.update(conn, tag_id=other_tid, description="modified") + with pytest.raises(TagOperationNotAllowedError): + await tags_repo.update( + user_id=user.id, tag_id=other_tid, description="modified" + ) async def test_tags_repo_delete( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): conn = connection - tags_repo = TagsRepo(user_id=user.id) + tags_repo = TagsRepo(asyncpg_engine) # Tags with different access rights readonly_tid, delete_tid, other_tid = [ @@ -467,35 +600,36 @@ async def test_tags_repo_delete( ] # cannot delete - with pytest.raises(TagOperationNotAllowed): - await tags_repo.delete(conn, tag_id=readonly_tid) + with pytest.raises(TagOperationNotAllowedError): + await tags_repo.delete(user_id=user.id, tag_id=readonly_tid) # can delete - await tags_repo.get(conn, tag_id=delete_tid) - await tags_repo.delete(conn, tag_id=delete_tid) + await tags_repo.get(user_id=user.id, tag_id=delete_tid) + await tags_repo.delete(user_id=user.id, tag_id=delete_tid) with pytest.raises(TagNotFoundError): - await tags_repo.get(conn, tag_id=delete_tid) + await tags_repo.get(user_id=user.id, tag_id=delete_tid) # cannot delete - with pytest.raises(TagOperationNotAllowed): - await tags_repo.delete(conn, tag_id=other_tid) + with pytest.raises(TagOperationNotAllowedError): + await tags_repo.delete(user_id=user.id, tag_id=other_tid) async def test_tags_repo_create( - connection: SAConnection, user: RowProxy, group: RowProxy, other_user: RowProxy + asyncpg_engine: AsyncEngine, + connection: SAConnection, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, ): conn = connection - tags_repo = TagsRepo(user_id=user.id) + tags_repo = TagsRepo(asyncpg_engine) tag_1 = await tags_repo.create( - conn, + user_id=user.id, name="T1", description="my first tag", color="pink", - read=True, - write=True, - delete=True, ) assert tag_1 == { "id": 1, @@ -510,9 +644,194 @@ async def test_tags_repo_create( # assigned primary group assert ( await conn.scalar( - sa.select([tags_to_groups.c.group_id]).where( - tags_to_groups.c.tag_id == tag_1["id"] + sa.select(tags_access_rights.c.group_id).where( + tags_access_rights.c.tag_id == tag_1["id"] ) ) == user.primary_gid ) + + # Checks defaults to full ownership + assert await tags_repo.has_access_rights( + user_id=user.id, + tag_id=tag_1["id"], + read=True, + write=True, + delete=True, + ) + + +async def test_tags_repo_access_rights( + asyncpg_engine: AsyncEngine, + user: RowProxy, + group: RowProxy, + other_user: RowProxy, +): + tags_repo = TagsRepo(asyncpg_engine) + tag = await tags_repo.create( + user_id=user.id, + name="T1", + description="my first tag", + color="pink", + ) + + # check ownership + tag_accesses = await tags_repo.list_access_rights(tag_id=tag["id"]) + assert len(tag_accesses) == 1 + user_access = tag_accesses[0] + assert user_access == { + "group_id": user.primary_gid, + "tag_id": tag["id"], + "read": True, + "write": True, + "delete": True, + } + + assert await tags_repo.has_access_rights( + user_id=user.id, + tag_id=tag["id"], + read=True, + write=True, + delete=True, + ) + + # CREATE access for other_user + other_user_access = await tags_repo.create_or_update_access_rights( + tag_id=tag["id"], + group_id=other_user.primary_gid, + read=True, + write=False, + delete=False, + ) + + assert not await tags_repo.has_access_rights( + user_id=other_user.id, + tag_id=tag["id"], + read=user_access["read"], + write=user_access["write"], + delete=user_access["delete"], + ) + + assert await tags_repo.has_access_rights( + user_id=other_user.id, + tag_id=tag["id"], + read=other_user_access["read"], + write=other_user_access["write"], + delete=other_user_access["delete"], + ) + + tag_accesses = await tags_repo.list_access_rights(tag_id=tag["id"]) + assert len(tag_accesses) == 2 + + # UPDATE access + updated_access = await tags_repo.create_or_update_access_rights( + tag_id=tag["id"], + group_id=other_user.primary_gid, + read=False, # <-- + write=False, + delete=False, + ) + assert updated_access != other_user_access + + # checks partial + assert await tags_repo.has_access_rights( + user_id=other_user.id, + tag_id=tag["id"], + read=False, + ) + + assert not await tags_repo.has_access_rights( + user_id=other_user.id, tag_id=tag["id"], write=True + ) + + # DELETE access to other-user + await tags_repo.delete_access_rights( + tag_id=tag["id"], + group_id=other_user.primary_gid, + ) + + tag_accesses = await tags_repo.list_access_rights(tag_id=tag["id"]) + assert len(tag_accesses) == 1 + + +def test_building_tags_sql_statements(): + def _check(func_smt, **kwargs): + print(f"{func_smt.__name__:*^100}") + stmt = func_smt(**kwargs) + print() + print(as_postgres_sql_query_str(stmt)) + print() + + # some data + product_name = "osparc" + user_id = 425 # 4 + tag_id = 4 + project_index = 1 + project_uuid = "106f8b4b-ffb6-459a-a27b-981c779e6d3f" + service_key = "simcore/services/comp/isolve" + service_version = "2.0.85" + + _check( + list_tags_stmt, + user_id=user_id, + ) + + _check( + get_tag_stmt, + user_id=user_id, + tag_id=tag_id, + ) + + _check( + create_tag_stmt, + name="foo", + description="description", + ) + + _check( + upsert_tags_access_rights_stmt, + tag_id=tag_id, + user_id=user_id, + read=True, + write=True, + delete=True, + ) + + _check( + update_tag_stmt, + user_id=user_id, + tag_id=tag_id, + # updates + name="foo", + ) + + _check( + delete_tag_stmt, + user_id=user_id, + tag_id=tag_id, + ) + + _check( + get_tags_for_project_stmt, + project_index=project_index, + ) + + _check( + get_tags_for_services_stmt, + key=service_key, + version=service_version, + ) + + _check( + add_tag_to_project_stmt, + project_index=project_index, + tag_id=tag_id, + project_uuid_for_rut=project_uuid, + ) + + _check( + add_tag_to_services_stmt, + key=service_key, + version=service_version, + tag_id=tag_id, + ) diff --git a/packages/postgres-database/tests/test_utils_user_preferences.py b/packages/postgres-database/tests/test_utils_user_preferences.py new file mode 100644 index 00000000000..0f70dcccf20 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_user_preferences.py @@ -0,0 +1,298 @@ +# pylint: disable=inconsistent-return-statements +# pylint: disable=redefined-outer-name + +from collections.abc import Awaitable, Callable +from typing import Any + +import pytest +from faker import Faker +from pytest_simcore.helpers.faker_factories import random_user +from simcore_postgres_database.models.users import UserRole, users +from simcore_postgres_database.utils_user_preferences import ( + BasePreferencesRepo, + FrontendUserPreferencesRepo, + UserServicesUserPreferencesRepo, +) +from sqlalchemy.engine.row import Row +from sqlalchemy.ext.asyncio import AsyncEngine + + +@pytest.fixture +def preference_one() -> str: + return "pref_one" + + +@pytest.fixture +def preference_two() -> str: + return "pref_two" + + +@pytest.fixture +async def product_name( + create_fake_product: Callable[[str], Awaitable[Row]], +) -> str: + product = await create_fake_product("fake-product") + return product[0] + + +@pytest.fixture(params=[FrontendUserPreferencesRepo, UserServicesUserPreferencesRepo]) +def preference_repo(request: pytest.FixtureRequest) -> type[BasePreferencesRepo]: + return request.param + + +async def _assert_save_get_preference( + asyncpg_engine: AsyncEngine, + preference_repo: type[BasePreferencesRepo], + *, + user_id: int, + preference_name: str, + product_name: str, + payload: Any, +) -> None: + async with asyncpg_engine.begin() as connection: + await preference_repo.save( + connection, + user_id=user_id, + preference_name=preference_name, + product_name=product_name, + payload=payload, + ) + async with asyncpg_engine.connect() as connection: + get_res_2: Any | None = await preference_repo.load( + connection, + user_id=user_id, + preference_name=preference_name, + product_name=product_name, + ) + assert get_res_2 is not None + assert get_res_2 == payload + + +async def _assert_preference_not_saved( + asyncpg_engine: AsyncEngine, + preference_repo: type[BasePreferencesRepo], + *, + user_id: int, + preference_name: str, + product_name: str, +) -> None: + async with asyncpg_engine.connect() as connection: + not_found: Any | None = await preference_repo.load( + connection, + user_id=user_id, + preference_name=preference_name, + product_name=product_name, + ) + assert not_found is None + + +def _get_random_payload( + faker: Faker, preference_repo: type[BasePreferencesRepo] +) -> Any: + if preference_repo == FrontendUserPreferencesRepo: + return {faker.pystr(): faker.pystr()} + if preference_repo == UserServicesUserPreferencesRepo: + return faker.pystr(max_chars=10000).encode() + + pytest.fail(f"Did not define a casa for {preference_repo=}. Please add one.") + + +async def _create_user_id(asyncpg_engine: AsyncEngine, faker: Faker) -> int: + data = random_user(role=faker.random_element(elements=UserRole)) + async with asyncpg_engine.begin() as connection: + user_id = await connection.scalar( + users.insert().values(**data).returning(users.c.id) + ) + assert user_id + return user_id + + +async def test_user_preference_repo_workflow( + asyncpg_engine: AsyncEngine, + preference_repo: type[BasePreferencesRepo], + preference_one: str, + product_name: str, + faker: Faker, +): + user_id = await _create_user_id(asyncpg_engine, faker) + # preference is not saved + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_name, + ) + + payload_1 = _get_random_payload(faker, preference_repo) + payload_2 = _get_random_payload(faker, preference_repo) + assert payload_1 != payload_2 + + # store the preference for the first time + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_name, + payload=payload_1, + ) + + # updating the preference still works + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_name, + payload=payload_2, + ) + + +async def test__same_preference_name_product_name__different_users( + asyncpg_engine: AsyncEngine, + preference_repo: type[BasePreferencesRepo], + preference_one: str, + product_name: str, + faker: Faker, +): + user_id_1 = await _create_user_id(asyncpg_engine, faker) + user_id_2 = await _create_user_id(asyncpg_engine, faker) + + payload_1 = _get_random_payload(faker, preference_repo) + payload_2 = _get_random_payload(faker, preference_repo) + assert payload_1 != payload_2 + + # save preference for first user + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id_1, + preference_name=preference_one, + product_name=product_name, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id_1, + preference_name=preference_one, + product_name=product_name, + payload=payload_1, + ) + + # save preference for second user + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id_2, + preference_name=preference_one, + product_name=product_name, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id_2, + preference_name=preference_one, + product_name=product_name, + payload=payload_2, + ) + + +async def test__same_user_preference_name__different_product_name( + asyncpg_engine: AsyncEngine, + create_fake_product: Callable[[str], Awaitable[Row]], + preference_repo: type[BasePreferencesRepo], + preference_one: str, + faker: Faker, +): + product_1 = (await create_fake_product("fake-product-1"))[0] + product_2 = (await create_fake_product("fake-product-2"))[0] + + user_id = await _create_user_id(asyncpg_engine, faker) + + payload_1 = _get_random_payload(faker, preference_repo) + payload_2 = _get_random_payload(faker, preference_repo) + assert payload_1 != payload_2 + + # save for first product_name + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_1, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_1, + payload=payload_1, + ) + + # save for second product_name + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_2, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_2, + payload=payload_2, + ) + + +async def test__same_product_name_user__different_preference_name( + asyncpg_engine: AsyncEngine, + preference_repo: type[BasePreferencesRepo], + preference_one: str, + preference_two: str, + product_name: str, + faker: Faker, +): + user_id = await _create_user_id(asyncpg_engine, faker) + + payload_1 = _get_random_payload(faker, preference_repo) + payload_2 = _get_random_payload(faker, preference_repo) + assert payload_1 != payload_2 + + # save first preference + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_name, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_one, + product_name=product_name, + payload=payload_1, + ) + + # save second preference + await _assert_preference_not_saved( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_two, + product_name=product_name, + ) + await _assert_save_get_preference( + asyncpg_engine, + preference_repo, + user_id=user_id, + preference_name=preference_two, + product_name=product_name, + payload=payload_2, + ) diff --git a/packages/postgres-database/tests/test_utils_users.py b/packages/postgres-database/tests/test_utils_users.py new file mode 100644 index 00000000000..d4a7039f1f3 --- /dev/null +++ b/packages/postgres-database/tests/test_utils_users.py @@ -0,0 +1,51 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterable +from typing import Any + +import pytest +from faker import Faker +from pytest_simcore.helpers.faker_factories import ( + random_user, +) +from pytest_simcore.helpers.postgres_tools import ( + insert_and_get_row_lifespan, +) +from simcore_postgres_database.models.users import UserRole, users +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, +) +from simcore_postgres_database.utils_users import UserNotFoundInRepoError, UsersRepo +from sqlalchemy.ext.asyncio import AsyncEngine + + +@pytest.fixture +async def user( + faker: Faker, + asyncpg_engine: AsyncEngine, +) -> AsyncIterable[dict[str, Any]]: + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + asyncpg_engine, + table=users, + values=random_user( + faker, + role=faker.random_element(elements=UserRole), + ), + pk_col=users.c.id, + ) as row: + yield row + + +async def test_users_repo_get(asyncpg_engine: AsyncEngine, user: dict[str, Any]): + repo = UsersRepo() + + async with pass_or_acquire_connection(asyncpg_engine) as connection: + assert await repo.get_email(connection, user_id=user["id"]) == user["email"] + assert await repo.get_role(connection, user_id=user["id"]) == user["role"] + + with pytest.raises(UserNotFoundInRepoError): + await repo.get_role(connection, user_id=55) diff --git a/packages/pytest-simcore/pyproject.toml b/packages/pytest-simcore/pyproject.toml new file mode 100644 index 00000000000..c62cb00c684 --- /dev/null +++ b/packages/pytest-simcore/pyproject.toml @@ -0,0 +1,13 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "pytest-simcore" +version = "0.1.0" +requires-python = ">=3.10" +dependencies = [ + "fastapi[standard]>=0.115.12", + "python-socketio>=5.12.1", + "uvicorn>=0.34.0", +] diff --git a/packages/pytest-simcore/requirements/coverage.txt b/packages/pytest-simcore/requirements/coverage.txt deleted file mode 100644 index 5d48901d416..00000000000 --- a/packages/pytest-simcore/requirements/coverage.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -# Include these tools for test coverage -# -pytest-cov -coveralls -coverage -codecov diff --git a/packages/pytest-simcore/requirements/mocks.txt b/packages/pytest-simcore/requirements/mocks.txt deleted file mode 100644 index 34ade1358be..00000000000 --- a/packages/pytest-simcore/requirements/mocks.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -# Include this libraries for mocking -# - -pytest-mock -faker diff --git a/packages/pytest-simcore/requirements/tests_base.txt b/packages/pytest-simcore/requirements/tests_base.txt deleted file mode 100644 index 5285b8dab9d..00000000000 --- a/packages/pytest-simcore/requirements/tests_base.txt +++ /dev/null @@ -1,20 +0,0 @@ -# -# Essential testing libraries and some additional plugins -# -pytest - -# enhances tests logs -pytest-icdiff -pytest-instafail -pytest-sugar - -# enhances test runs -pytest-xdist -pytest-runner # TODO: check if REQUIRED ANYMORE ??? - -# adds functionality -pytest-lazy-fixture - - -# support testing features built on frameworks -# pytest-docker diff --git a/packages/pytest-simcore/requirements/tests_w_aiohttp.txt b/packages/pytest-simcore/requirements/tests_w_aiohttp.txt deleted file mode 100644 index c20d7bd04c1..00000000000 --- a/packages/pytest-simcore/requirements/tests_w_aiohttp.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -# Requirements to test aiohttp-specific services -# -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 - -# fixtures -aioresponses diff --git a/packages/pytest-simcore/requirements/tests_w_fastapi.txt b/packages/pytest-simcore/requirements/tests_w_fastapi.txt deleted file mode 100644 index 3a2513e2e19..00000000000 --- a/packages/pytest-simcore/requirements/tests_w_fastapi.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -# Requirements to test fastapi-specific services -# - -# fixtures -asgi_lifespan diff --git a/packages/pytest-simcore/setup.py b/packages/pytest-simcore/setup.py index 61874d47ea8..caf77e2ade9 100644 --- a/packages/pytest-simcore/setup.py +++ b/packages/pytest-simcore/setup.py @@ -5,26 +5,27 @@ CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent -SETUP = dict( - name="pytest-simcore", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "name": "pytest-simcore", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": ", ".join( ( "Pedro Crespo-Valero (pcrespov)", "Sylvain Anderegg (sanderegg)", ) ), - description="pytest plugin with fixtures and test helpers for osparc-simcore repo modules", - py_modules=["pytest_simcore"], + "description": "pytest plugin with fixtures and test helpers for osparc-simcore repo modules", + "py_modules": ["pytest_simcore"], # WARNING: this is used in frozen services as well !!!! - python_requires=">=3.6", - install_requires=["pytest>=3.5.0"], - extras_require={ + "python_requires": ">=3.10", + "install_requires": ["pytest>=3.5.0"], + "extras_require": { "all": [ "aio-pika", "aiohttp", "aioredis", "docker", + "moto[server]", "python-socketio", "PyYAML", "sqlalchemy[postgresql_psycopg2binary]", @@ -32,9 +33,9 @@ "yarl", ], }, - packages=find_packages(where="src"), - package_dir={"": "src"}, - classifiers=[ + "packages": find_packages(where="src"), + "package_dir": {"": "src"}, + "classifiers": [ "Development Status :: 4 - Beta", "Framework :: Pytest", "Intended Audience :: Developers", @@ -42,8 +43,8 @@ "Operating System :: OS Independent", "License :: OSI Approved :: MIT License", ], - entry_points={"pytest11": ["simcore = pytest_simcore"]}, -) + "entry_points": {"pytest11": ["simcore = pytest_simcore"]}, +} if __name__ == "__main__": diff --git a/packages/pytest-simcore/src/pytest_simcore/__init__.py b/packages/pytest-simcore/src/pytest_simcore/__init__.py index b0019fbe786..8716d997ef2 100644 --- a/packages/pytest-simcore/src/pytest_simcore/__init__.py +++ b/packages/pytest-simcore/src/pytest_simcore/__init__.py @@ -1,17 +1,32 @@ # Collection of tests fixtures for integration testing -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution("pytest-simcore").version +import pytest +# NOTE: this ensures that assertion printouts are nicely formated and complete see https://lorepirri.com/pytest-register-assert-rewrite.html +pytest.register_assert_rewrite("pytest_simcore.helpers") -def pytest_addoption(parser): - group = parser.getgroup("simcore") - group.addoption( +__version__: str = version("pytest-simcore") + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore", description="pytest-simcore options") + simcore_group.addoption( "--keep-docker-up", action="store_true", default=False, help="Keep stack/registry up after fixtures closes", ) - # DUMMY - parser.addini("HELLO", "Dummy pytest.ini setting") + +@pytest.fixture(scope="session") +def keep_docker_up(request: pytest.FixtureRequest) -> bool: + flag: bool = bool(request.config.getoption(name="--keep-docker-up", default=False)) + return flag + + +@pytest.fixture +def is_pdb_enabled(request: pytest.FixtureRequest): + """Returns true if tests are set to use interactive debugger, i.e. --pdb""" + options = request.config.option + return options.usepdb diff --git a/packages/pytest-simcore/src/pytest_simcore/aioresponses_mocker.py b/packages/pytest-simcore/src/pytest_simcore/aioresponses_mocker.py index 11beaa7e325..a81a87acfea 100644 --- a/packages/pytest-simcore/src/pytest_simcore/aioresponses_mocker.py +++ b/packages/pytest-simcore/src/pytest_simcore/aioresponses_mocker.py @@ -1,7 +1,9 @@ +from collections.abc import Iterator + import pytest -from aioresponses import aioresponses as AioResponsesMock +from aioresponses import aioresponses as AioResponsesMock # noqa: N812 -from .helpers.utils_docker import get_localhost_ip +from .helpers.host import get_localhost_ip # WARNING: any request done through the client will go through aioresponses. It is # unfortunate but that means any valid request (like calling the test server) prefix must be set as passthrough. @@ -14,7 +16,7 @@ @pytest.fixture -def aioresponses_mocker() -> AioResponsesMock: +def aioresponses_mocker() -> Iterator[AioResponsesMock]: """Generick aioresponses mock SEE https://github.com/pnuckowski/aioresponses diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py b/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py new file mode 100644 index 00000000000..f971ef9b8f7 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/aws_ec2_service.py @@ -0,0 +1,149 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +import contextlib +import datetime +import random +from collections.abc import AsyncIterator, Callable +from typing import cast + +import aioboto3 +import pytest +from aiobotocore.session import ClientCreatorContext +from aws_library.ec2 import EC2InstanceData, Resources +from faker import Faker +from pydantic import ByteSize +from settings_library.ec2 import EC2Settings +from types_aiobotocore_ec2.client import EC2Client + + +@pytest.fixture +async def ec2_client( + ec2_settings: EC2Settings, +) -> AsyncIterator[EC2Client]: + session = aioboto3.Session() + exit_stack = contextlib.AsyncExitStack() + session_client = session.client( + "ec2", + endpoint_url=ec2_settings.EC2_ENDPOINT, + aws_access_key_id=ec2_settings.EC2_ACCESS_KEY_ID, + aws_secret_access_key=ec2_settings.EC2_SECRET_ACCESS_KEY, + region_name=ec2_settings.EC2_REGION_NAME, + ) + assert isinstance(session_client, ClientCreatorContext) + ec2_client = cast(EC2Client, await exit_stack.enter_async_context(session_client)) + + yield ec2_client + + await exit_stack.aclose() + + +@pytest.fixture(scope="session") +def vpc_cidr_block() -> str: + return "10.0.0.0/16" + + +@pytest.fixture +async def aws_vpc_id( + ec2_client: EC2Client, + vpc_cidr_block: str, +) -> AsyncIterator[str]: + vpc = await ec2_client.create_vpc( + CidrBlock=vpc_cidr_block, + ) + vpc_id = vpc["Vpc"]["VpcId"] # type: ignore + print(f"--> Created Vpc in AWS with {vpc_id=}") + yield vpc_id + + await ec2_client.delete_vpc(VpcId=vpc_id) + print(f"<-- Deleted Vpc in AWS with {vpc_id=}") + + +@pytest.fixture(scope="session") +def subnet_cidr_block() -> str: + return "10.0.1.0/24" + + +@pytest.fixture +async def aws_subnet_id( + aws_vpc_id: str, + ec2_client: EC2Client, + subnet_cidr_block: str, +) -> AsyncIterator[str]: + subnet = await ec2_client.create_subnet( + CidrBlock=subnet_cidr_block, VpcId=aws_vpc_id + ) + assert "Subnet" in subnet + assert "SubnetId" in subnet["Subnet"] + subnet_id = subnet["Subnet"]["SubnetId"] + print(f"--> Created Subnet in AWS with {subnet_id=}") + + yield subnet_id + + # all the instances in the subnet must be terminated before that works + instances_in_subnet = await ec2_client.describe_instances( + Filters=[{"Name": "subnet-id", "Values": [subnet_id]}] + ) + if instances_in_subnet["Reservations"]: + print(f"--> terminating {len(instances_in_subnet)} instances in subnet") + await ec2_client.terminate_instances( + InstanceIds=[ + instance["Instances"][0]["InstanceId"] # type: ignore + for instance in instances_in_subnet["Reservations"] + ] + ) + print(f"<-- terminated {len(instances_in_subnet)} instances in subnet") + + await ec2_client.delete_subnet(SubnetId=subnet_id) + subnets = await ec2_client.describe_subnets() + print(f"<-- Deleted Subnet in AWS with {subnet_id=}") + print(f"current {subnets=}") + + +@pytest.fixture +async def aws_security_group_id( + faker: Faker, + aws_vpc_id: str, + ec2_client: EC2Client, +) -> AsyncIterator[str]: + security_group = await ec2_client.create_security_group( + Description=faker.text(), GroupName=faker.pystr(), VpcId=aws_vpc_id + ) + security_group_id = security_group["GroupId"] + print(f"--> Created Security Group in AWS with {security_group_id=}") + yield security_group_id + await ec2_client.delete_security_group(GroupId=security_group_id) + print(f"<-- Deleted Security Group in AWS with {security_group_id=}") + + +@pytest.fixture +async def aws_ami_id( + ec2_client: EC2Client, +) -> str: + images = await ec2_client.describe_images() + image = random.choice(images["Images"]) # noqa: S311 + assert "ImageId" in image + return image["ImageId"] + + +@pytest.fixture +def fake_ec2_instance_data(faker: Faker) -> Callable[..., EC2InstanceData]: + def _creator(**overrides) -> EC2InstanceData: + return EC2InstanceData( + **( + { + "launch_time": faker.date_time(tzinfo=datetime.timezone.utc), + "id": faker.uuid4(), + "aws_private_dns": f"ip-{faker.ipv4().replace('.', '-')}.ec2.internal", + "aws_public_ip": faker.ipv4(), + "type": faker.pystr(), + "state": faker.pystr(), + "resources": Resources(cpus=4.0, ram=ByteSize(1024 * 1024)), + "tags": faker.pydict(allowed_types=(str,)), + } + | overrides + ) + ) + + return _creator diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_iam_service.py b/packages/pytest-simcore/src/pytest_simcore/aws_iam_service.py new file mode 100644 index 00000000000..c5d8e692d3a --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/aws_iam_service.py @@ -0,0 +1,57 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +import contextlib +import logging +from collections.abc import AsyncIterator +from typing import cast + +import aioboto3 +import pytest +from aiobotocore.session import ClientCreatorContext +from faker import Faker +from settings_library.ec2 import EC2Settings +from types_aiobotocore_iam.client import IAMClient + +from .helpers.logging_tools import log_context + + +@pytest.fixture +async def iam_client( + ec2_settings: EC2Settings, +) -> AsyncIterator[IAMClient]: + session = aioboto3.Session() + exit_stack = contextlib.AsyncExitStack() + session_client = session.client( + "iam", + endpoint_url=ec2_settings.EC2_ENDPOINT, + aws_access_key_id=ec2_settings.EC2_ACCESS_KEY_ID, + aws_secret_access_key=ec2_settings.EC2_SECRET_ACCESS_KEY, + region_name=ec2_settings.EC2_REGION_NAME, + ) + assert isinstance(session_client, ClientCreatorContext) + iam_client = cast(IAMClient, await exit_stack.enter_async_context(session_client)) + + yield iam_client + + await exit_stack.aclose() + + +@pytest.fixture +async def aws_instance_profile( + iam_client: IAMClient, faker: Faker +) -> AsyncIterator[str]: + + profile = await iam_client.create_instance_profile( + InstanceProfileName=faker.pystr(), + ) + profile_arn = profile["InstanceProfile"]["Arn"] + with log_context( + logging.INFO, msg=f"Created InstanceProfile in AWS with {profile_arn=}" + ): + yield profile_arn + + await iam_client.delete_instance_profile( + InstanceProfileName=profile["InstanceProfile"]["InstanceProfileName"] + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_s3_service.py b/packages/pytest-simcore/src/pytest_simcore/aws_s3_service.py new file mode 100644 index 00000000000..893c04e75f2 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/aws_s3_service.py @@ -0,0 +1,101 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + + +import contextlib +import typing + +import aioboto3 +import pytest +from aiobotocore.session import ClientCreatorContext +from botocore.client import Config +from settings_library.s3 import S3Settings +from types_aiobotocore_s3 import S3Client + + +@pytest.fixture +def s3_settings() -> S3Settings: + return S3Settings.create_from_envs() + + +@pytest.fixture +async def s3_client(s3_settings: S3Settings) -> typing.AsyncIterator[S3Client]: + session = aioboto3.Session() + exit_stack = contextlib.AsyncExitStack() + session_client = session.client( + "s3", + endpoint_url=f"{s3_settings.S3_ENDPOINT}" if s3_settings.S3_ENDPOINT else None, + aws_access_key_id=s3_settings.S3_ACCESS_KEY, + aws_secret_access_key=s3_settings.S3_SECRET_KEY, + region_name=s3_settings.S3_REGION, + config=Config(signature_version="s3v4"), + ) + assert isinstance(session_client, ClientCreatorContext) + client = typing.cast(S3Client, await exit_stack.enter_async_context(session_client)) # type: ignore[arg-type] + + yield client + + await exit_stack.aclose() + + +async def _empty_bucket(s3_client: S3Client, bucket_name: str) -> None: + # List object versions + response = await s3_client.list_object_versions(Bucket=bucket_name) + + # Delete all object versions + for version in response.get("Versions", []): + assert "Key" in version + assert "VersionId" in version + await s3_client.delete_object( + Bucket=bucket_name, Key=version["Key"], VersionId=version["VersionId"] + ) + + # Delete all delete markers + for marker in response.get("DeleteMarkers", []): + assert "Key" in marker + assert "VersionId" in marker + await s3_client.delete_object( + Bucket=bucket_name, Key=marker["Key"], VersionId=marker["VersionId"] + ) + + # Delete remaining objects in the bucket + response = await s3_client.list_objects(Bucket=bucket_name) + for obj in response.get("Contents", []): + assert "Key" in obj + await s3_client.delete_object(Bucket=bucket_name, Key=obj["Key"]) + + +@pytest.fixture +async def s3_bucket( + s3_settings: S3Settings, s3_client: S3Client +) -> typing.AsyncIterator[str]: + bucket_name = s3_settings.S3_BUCKET_NAME + + response = await s3_client.list_buckets() + bucket_exists = bucket_name in [ + bucket_struct.get("Name") for bucket_struct in response["Buckets"] + ] + if bucket_exists: + await _empty_bucket(s3_client, bucket_name) + + if not bucket_exists: + await s3_client.create_bucket(Bucket=bucket_name) + response = await s3_client.list_buckets() + assert response["Buckets"] + assert bucket_name in [ + bucket_struct.get("Name") for bucket_struct in response["Buckets"] + ], f"failed creating {bucket_name}" + + yield bucket_name + + await _empty_bucket(s3_client, bucket_name) + + +@pytest.fixture +async def with_bucket_versioning_enabled(s3_client: S3Client, s3_bucket: str) -> str: + await s3_client.put_bucket_versioning( + Bucket=s3_bucket, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) + return s3_bucket diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_server.py b/packages/pytest-simcore/src/pytest_simcore/aws_server.py new file mode 100644 index 00000000000..74f007973c5 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/aws_server.py @@ -0,0 +1,139 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +from collections.abc import Iterator +from unittest import mock + +import pytest +import requests +from aiohttp.test_utils import unused_port +from faker import Faker +from models_library.utils.fastapi_encoders import jsonable_encoder +from moto.server import ThreadedMotoServer +from pydantic import SecretStr +from pytest_mock.plugin import MockerFixture +from settings_library.basic_types import IDStr +from settings_library.ec2 import EC2Settings +from settings_library.s3 import S3Settings +from settings_library.ssm import SSMSettings + +from .helpers.host import get_localhost_ip +from .helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from .helpers.moto import patched_aiobotocore_make_api_call + + +@pytest.fixture(scope="module") +def mocked_aws_server() -> Iterator[ThreadedMotoServer]: + """creates a moto-server that emulates AWS services in place + NOTE: Never use a bucket with underscores it fails!! + """ + server = ThreadedMotoServer(ip_address=get_localhost_ip(), port=unused_port()) + # pylint: disable=protected-access + print( + f"--> started mock AWS server on {server._ip_address}:{server._port}" # noqa: SLF001 + ) + print( + f"--> Dashboard available on [http://{server._ip_address}:{server._port}/moto-api/]" # noqa: SLF001 + ) + server.start() + yield server + server.stop() + print( + f"<-- stopped mock AWS server on {server._ip_address}:{server._port}" # noqa: SLF001 + ) + + +@pytest.fixture +def reset_aws_server_state(mocked_aws_server: ThreadedMotoServer) -> Iterator[None]: + # NOTE: reset_aws_server_state [http://docs.getmoto.org/en/latest/docs/server_mode.html#reset-api] + yield + # pylint: disable=protected-access + requests.post( + f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}/moto-api/reset", # noqa: SLF001 + timeout=10, + ) + print( + f"<-- cleaned mock AWS server on {mocked_aws_server._ip_address}:{mocked_aws_server._port}" # noqa: SLF001 + ) + + +@pytest.fixture +def mocked_ec2_server_settings( + mocked_aws_server: ThreadedMotoServer, + reset_aws_server_state: None, +) -> EC2Settings: + return EC2Settings( + EC2_ACCESS_KEY_ID="xxx", + EC2_ENDPOINT=f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # pylint: disable=protected-access # noqa: SLF001 + EC2_SECRET_ACCESS_KEY="xxx", # noqa: S106 + ) + + +@pytest.fixture +def mocked_ec2_server_envs( + mocked_ec2_server_settings: EC2Settings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + changed_envs: EnvVarsDict = mocked_ec2_server_settings.model_dump() + return setenvs_from_dict(monkeypatch, {**changed_envs}) + + +@pytest.fixture +def with_patched_ssm_server( + mocker: MockerFixture, external_envfile_dict: EnvVarsDict +) -> mock.Mock: + if external_envfile_dict: + # NOTE: we run against AWS. so no need to mock + return mock.Mock() + return mocker.patch( + "aiobotocore.client.AioBaseClient._make_api_call", + side_effect=patched_aiobotocore_make_api_call, + autospec=True, + ) + + +@pytest.fixture +def mocked_ssm_server_settings( + mocked_aws_server: ThreadedMotoServer, + with_patched_ssm_server: mock.Mock, + reset_aws_server_state: None, +) -> SSMSettings: + return SSMSettings( + SSM_ACCESS_KEY_ID=SecretStr("xxx"), + SSM_ENDPOINT=f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # type: ignore[arg-type] # pylint: disable=protected-access # noqa: SLF001 + SSM_SECRET_ACCESS_KEY=SecretStr("xxx"), + ) + + +@pytest.fixture +def mocked_ssm_server_envs( + mocked_ssm_server_settings: SSMSettings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + changed_envs: EnvVarsDict = jsonable_encoder(mocked_ssm_server_settings) + return setenvs_from_dict(monkeypatch, {**changed_envs}) + + +@pytest.fixture +def mocked_s3_server_settings( + mocked_aws_server: ThreadedMotoServer, reset_aws_server_state: None, faker: Faker +) -> S3Settings: + return S3Settings( + S3_ACCESS_KEY=IDStr("xxx"), + S3_ENDPOINT=f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # type: ignore[arg-type] # pylint: disable=protected-access # noqa: SLF001 + S3_SECRET_KEY=IDStr("xxx"), + S3_BUCKET_NAME=IDStr(f"pytest{faker.pystr().lower()}"), + S3_REGION=IDStr("us-east-1"), + ) + + +@pytest.fixture +def mocked_s3_server_envs( + mocked_s3_server_settings: S3Settings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + changed_envs: EnvVarsDict = mocked_s3_server_settings.model_dump( + mode="json", exclude_unset=True + ) + return setenvs_from_dict(monkeypatch, {**changed_envs}) diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_services.py b/packages/pytest-simcore/src/pytest_simcore/aws_services.py deleted file mode 100644 index b96bbfd6c98..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/aws_services.py +++ /dev/null @@ -1,83 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import asyncio -from typing import AsyncIterator, Iterator - -import pytest -from aiobotocore.session import get_session -from aiohttp.test_utils import unused_port -from moto.server import ThreadedMotoServer -from pytest_simcore.helpers.utils_docker import get_localhost_ip - - -@pytest.fixture(scope="module") -def mocked_s3_server() -> Iterator[ThreadedMotoServer]: - """creates a moto-server that emulates AWS services in place - NOTE: Never use a bucket with underscores it fails!! - """ - server = ThreadedMotoServer(ip_address=get_localhost_ip(), port=unused_port()) - # pylint: disable=protected-access - print(f"--> started mock S3 server on {server._ip_address}:{server._port}") - print( - f"--> Dashboard available on [http://{server._ip_address}:{server._port}/moto-api/]" - ) - server.start() - yield server - server.stop() - print(f"<-- stopped mock S3 server on {server._ip_address}:{server._port}") - - -async def _clean_bucket_content(aiobotore_s3_client, bucket: str): - response = await aiobotore_s3_client.list_objects_v2(Bucket=bucket) - while response["KeyCount"] > 0: - await aiobotore_s3_client.delete_objects( - Bucket=bucket, - Delete={ - "Objects": [ - {"Key": obj["Key"]} for obj in response["Contents"] if "Key" in obj - ] - }, - ) - response = await aiobotore_s3_client.list_objects_v2(Bucket=bucket) - - -async def _remove_all_buckets(aiobotore_s3_client): - response = await aiobotore_s3_client.list_buckets() - bucket_names = [ - bucket["Name"] for bucket in response["Buckets"] if "Name" in bucket - ] - await asyncio.gather( - *(_clean_bucket_content(aiobotore_s3_client, bucket) for bucket in bucket_names) - ) - await asyncio.gather( - *(aiobotore_s3_client.delete_bucket(Bucket=bucket) for bucket in bucket_names) - ) - - -@pytest.fixture -async def mocked_s3_server_envs( - mocked_s3_server: ThreadedMotoServer, monkeypatch: pytest.MonkeyPatch -) -> AsyncIterator[None]: - monkeypatch.setenv("S3_SECURE", "false") - monkeypatch.setenv( - "S3_ENDPOINT", - f"{mocked_s3_server._ip_address}:{mocked_s3_server._port}", # pylint: disable=protected-access - ) - monkeypatch.setenv("S3_ACCESS_KEY", "xxx") - monkeypatch.setenv("S3_SECRET_KEY", "xxx") - monkeypatch.setenv("S3_BUCKET_NAME", "pytestbucket") - - yield - - # cleanup the buckets - session = get_session() - async with session.create_client( - "s3", - endpoint_url=f"http://{mocked_s3_server._ip_address}:{mocked_s3_server._port}", # pylint: disable=protected-access - aws_secret_access_key="xxx", - aws_access_key_id="xxx", - ) as client: - await _remove_all_buckets(client) diff --git a/packages/pytest-simcore/src/pytest_simcore/aws_ssm_service.py b/packages/pytest-simcore/src/pytest_simcore/aws_ssm_service.py new file mode 100644 index 00000000000..7e2844ea5d1 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/aws_ssm_service.py @@ -0,0 +1,36 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +import contextlib +from collections.abc import AsyncIterator +from typing import cast + +import aioboto3 +import pytest +from aiobotocore.session import ClientCreatorContext +from pytest_mock.plugin import MockerFixture +from settings_library.ssm import SSMSettings +from types_aiobotocore_ssm.client import SSMClient + + +@pytest.fixture +async def ssm_client( + mocked_ssm_server_settings: SSMSettings, + mocker: MockerFixture, +) -> AsyncIterator[SSMClient]: + session = aioboto3.Session() + exit_stack = contextlib.AsyncExitStack() + session_client = session.client( + "ssm", + endpoint_url=f"{mocked_ssm_server_settings.SSM_ENDPOINT}", + aws_access_key_id=mocked_ssm_server_settings.SSM_ACCESS_KEY_ID.get_secret_value(), + aws_secret_access_key=mocked_ssm_server_settings.SSM_SECRET_ACCESS_KEY.get_secret_value(), + region_name=mocked_ssm_server_settings.SSM_REGION_NAME, + ) + assert isinstance(session_client, ClientCreatorContext) + ec2_client = cast(SSMClient, await exit_stack.enter_async_context(session_client)) + + yield ec2_client + + await exit_stack.aclose() diff --git a/packages/pytest-simcore/src/pytest_simcore/cli_runner.py b/packages/pytest-simcore/src/pytest_simcore/cli_runner.py index 0c643102b65..81dbbfdd98e 100644 --- a/packages/pytest-simcore/src/pytest_simcore/cli_runner.py +++ b/packages/pytest-simcore/src/pytest_simcore/cli_runner.py @@ -6,7 +6,7 @@ # Based on https://github.com/Stranger6667/pytest-click -from typing import Iterator +from collections.abc import Iterator import pytest from typer.testing import CliRunner diff --git a/packages/pytest-simcore/src/pytest_simcore/dask_scheduler.py b/packages/pytest-simcore/src/pytest_simcore/dask_scheduler.py new file mode 100644 index 00000000000..54fdd021223 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/dask_scheduler.py @@ -0,0 +1,123 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterable, AsyncIterator, Callable +from typing import Any + +import distributed +import pytest +from yarl import URL + + +@pytest.fixture +def dask_workers_config() -> dict[str, Any]: + return { + "cpu-worker": { + "cls": distributed.Worker, + "options": { + "nthreads": 2, + "resources": {"CPU": 2, "RAM": 48e9}, + "preload": ( + "dask_task_models_library.plugins.task_life_cycle_worker_plugin", + ), + }, + }, + "gpu-worker": { + "cls": distributed.Worker, + "options": { + "nthreads": 1, + "resources": { + "CPU": 1, + "GPU": 1, + "RAM": 48e9, + }, + "preload": ( + "dask_task_models_library.plugins.task_life_cycle_worker_plugin", + ), + }, + }, + "large-ram-worker": { + "cls": distributed.Worker, + "options": { + "nthreads": 1, + "resources": { + "CPU": 8, + "RAM": 768e9, + }, + "preload": ( + "dask_task_models_library.plugins.task_life_cycle_worker_plugin", + ), + }, + }, + } + + +@pytest.fixture +def dask_scheduler_config( + unused_tcp_port_factory: Callable, +) -> dict[str, Any]: + return { + "cls": distributed.Scheduler, + "options": { + "port": unused_tcp_port_factory(), + "dashboard_address": f":{unused_tcp_port_factory()}", + "preload": ( + "dask_task_models_library.plugins.task_life_cycle_scheduler_plugin", + ), + }, + } + + +@pytest.fixture +async def dask_spec_local_cluster( + monkeypatch: pytest.MonkeyPatch, + dask_workers_config: dict[str, Any], + dask_scheduler_config: dict[str, Any], +) -> AsyncIterator[distributed.SpecCluster]: + # in this mode we can precisely create a specific cluster + + async with distributed.SpecCluster( + workers=dask_workers_config, + scheduler=dask_scheduler_config, + asynchronous=True, + name="pytest_dask_spec_local_cluster", + ) as cluster: + print("Cluster dashboard link: ", cluster.dashboard_link) + scheduler_address = URL(cluster.scheduler_address) + monkeypatch.setenv( + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", + f"{scheduler_address or 'invalid'}", + ) + yield cluster + + +@pytest.fixture +async def dask_local_cluster_without_workers( + monkeypatch: pytest.MonkeyPatch, + dask_scheduler_config: dict[str, Any], +) -> AsyncIterable[distributed.SpecCluster]: + # in this mode we can precisely create a specific cluster + + async with distributed.SpecCluster( + scheduler=dask_scheduler_config, + asynchronous=True, + name="pytest_dask_local_cluster_without_workers", + ) as cluster: + scheduler_address = URL(cluster.scheduler_address) + monkeypatch.setenv( + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", + f"{scheduler_address or 'invalid'}", + ) + yield cluster + + +@pytest.fixture +async def dask_spec_cluster_client( + dask_spec_local_cluster: distributed.SpecCluster, +) -> AsyncIterator[distributed.Client]: + async with distributed.Client( + dask_spec_local_cluster.scheduler_address, asynchronous=True + ) as client: + yield client diff --git a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py index 98323734514..15f28daf316 100644 --- a/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py +++ b/packages/pytest-simcore/src/pytest_simcore/db_entries_mocks.py @@ -1,16 +1,30 @@ -from typing import Any, Callable, Iterator +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-value-for-parameter + +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator +from typing import Any from uuid import uuid4 import pytest import sqlalchemy as sa from faker import Faker -from models_library.projects import ProjectAtDB +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID +from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline +from simcore_postgres_database.models.comp_tasks import comp_tasks from simcore_postgres_database.models.projects import ProjectType, projects from simcore_postgres_database.models.users import UserRole, UserStatus, users +from simcore_postgres_database.utils_projects_nodes import ( + ProjectNodeCreate, + ProjectNodesRepo, +) +from sqlalchemy.ext.asyncio import AsyncEngine @pytest.fixture() -def registered_user( +def create_registered_user( postgres_db: sa.engine.Engine, faker: Faker ) -> Iterator[Callable[..., dict]]: created_user_ids = [] @@ -33,13 +47,13 @@ def creator(**user_kwargs) -> dict[str, Any]: ) # this is needed to get the primary_gid correctly result = con.execute( - sa.select([users]).where(users.c.id == user_config["id"]) + sa.select(users).where(users.c.id == user_config["id"]) ) user = result.first() assert user print(f"--> created {user=}") created_user_ids.append(user["id"]) - return dict(user) + return dict(user._asdict()) yield creator @@ -49,12 +63,17 @@ def creator(**user_kwargs) -> dict[str, Any]: @pytest.fixture -def project( - postgres_db: sa.engine.Engine, faker: Faker -) -> Iterator[Callable[..., ProjectAtDB]]: +async def project( + sqlalchemy_async_engine: AsyncEngine, faker: Faker +) -> AsyncIterator[Callable[..., Awaitable[ProjectAtDB]]]: created_project_ids: list[str] = [] - def creator(user: dict[str, Any], **overrides) -> ProjectAtDB: + async def creator( + user: dict[str, Any], + *, + project_nodes_overrides: dict[str, Any] | None = None, + **project_overrides, + ) -> ProjectAtDB: project_uuid = uuid4() print(f"Created new project with uuid={project_uuid}") project_config = { @@ -67,15 +86,32 @@ def creator(user: dict[str, Any], **overrides) -> ProjectAtDB: "thumbnail": "", "workbench": {}, } - project_config.update(**overrides) - with postgres_db.connect() as con: - result = con.execute( + project_config.update(**project_overrides) + async with sqlalchemy_async_engine.connect() as con, con.begin(): + result = await con.execute( projects.insert() .values(**project_config) .returning(sa.literal_column("*")) ) - inserted_project = ProjectAtDB.parse_obj(result.first()) + inserted_project = ProjectAtDB.model_validate(result.one()) + project_nodes_repo = ProjectNodesRepo(project_uuid=project_uuid) + # NOTE: currently no resources is passed until it becomes necessary + default_node_config = { + "required_resources": {}, + "key": faker.pystr(), + "version": faker.pystr(), + "label": faker.pystr(), + } + if project_nodes_overrides: + default_node_config.update(project_nodes_overrides) + await project_nodes_repo.add( + con, + nodes=[ + ProjectNodeCreate(node_id=NodeID(node_id), **default_node_config) + for node_id in inserted_project.workbench + ], + ) print(f"--> created {inserted_project=}") created_project_ids.append(f"{inserted_project.uuid}") return inserted_project @@ -83,6 +119,67 @@ def creator(user: dict[str, Any], **overrides) -> ProjectAtDB: yield creator # cleanup - with postgres_db.connect() as con: - con.execute(projects.delete().where(projects.c.uuid.in_(created_project_ids))) + async with sqlalchemy_async_engine.begin() as con: + await con.execute( + projects.delete().where(projects.c.uuid.in_(created_project_ids)) + ) print(f"<-- delete projects {created_project_ids=}") + + +@pytest.fixture +def pipeline(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., dict[str, Any]]]: + created_pipeline_ids: list[str] = [] + + def creator(**pipeline_kwargs) -> dict[str, Any]: + pipeline_config = { + "project_id": f"{uuid4()}", + "dag_adjacency_list": {}, + "state": StateType.NOT_STARTED, + } + pipeline_config.update(**pipeline_kwargs) + with postgres_db.connect() as conn: + result = conn.execute( + comp_pipeline.insert() + .values(**pipeline_config) + .returning(sa.literal_column("*")) + ) + row = result.one() + new_pipeline = row._asdict() + created_pipeline_ids.append(new_pipeline["project_id"]) + return new_pipeline + + yield creator + + # cleanup + with postgres_db.connect() as conn: + conn.execute( + comp_pipeline.delete().where( + comp_pipeline.c.project_id.in_(created_pipeline_ids) + ) + ) + + +@pytest.fixture +def comp_task(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., dict[str, Any]]]: + created_task_ids: list[int] = [] + + def creator(project_id: ProjectID, **task_kwargs) -> dict[str, Any]: + task_config = {"project_id": f"{project_id}"} | task_kwargs + with postgres_db.connect() as conn: + result = conn.execute( + comp_tasks.insert() + .values(**task_config) + .returning(sa.literal_column("*")) + ) + row = result.one() + new_task = row._asdict() + created_task_ids.append(new_task["task_id"]) + return new_task + + yield creator + + # cleanup + with postgres_db.connect() as conn: + conn.execute( + comp_tasks.delete().where(comp_tasks.c.task_id.in_(created_task_ids)) + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/dev_vendors_compose.py b/packages/pytest-simcore/src/pytest_simcore/dev_vendors_compose.py new file mode 100644 index 00000000000..178e125b279 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/dev_vendors_compose.py @@ -0,0 +1,27 @@ +from pathlib import Path +from typing import Any + +import pytest + +from .helpers.docker import run_docker_compose_config + + +@pytest.fixture(scope="module") +def dev_vendors_docker_compose( + osparc_simcore_root_dir: Path, + osparc_simcore_scripts_dir: Path, + env_file_for_testing: Path, + temp_folder: Path, +) -> dict[str, Any]: + docker_compose_path = ( + osparc_simcore_root_dir / "services" / "docker-compose-dev-vendors.yml" + ) + assert docker_compose_path.exists() + + return run_docker_compose_config( + project_dir=osparc_simcore_root_dir / "services", + scripts_dir=osparc_simcore_scripts_dir, + docker_compose_paths=docker_compose_path, + env_file_path=env_file_for_testing, + destination_path=temp_folder / "ops_docker_compose.yml", + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/disk_usage_monitoring.py b/packages/pytest-simcore/src/pytest_simcore/disk_usage_monitoring.py new file mode 100644 index 00000000000..c4ebece8cb6 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/disk_usage_monitoring.py @@ -0,0 +1,102 @@ +import logging +import shutil + +import pytest + +_logger = logging.getLogger(__name__) +_DEFAULT_THREADHOLD_MB = 512 + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--disk-usage", action="store_true", help="Enable disk usage monitoring" + ) + simcore_group.addoption( + "--disk-usage-threshold", + action="store", + type=float, + default=_DEFAULT_THREADHOLD_MB, + help="Set the threshold for disk usage increase in Megabytes. No warning if increase is below this value. [default={_DEFAULT_THREADHOLD_MB}]", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure(config): + """Check if the disk usage monitoring is enabled and register the plugin.""" + if config.getoption("--disk-usage"): + config.pluginmanager.register(DiskUsagePlugin(config), "disk_usage_plugin") + + +class DiskUsagePlugin: + """ + The purpose of this plugin is to monitor disk usage during test execution, identifying tests + that do not properly clean up resources. This helps prevent potential issues when running + continuous integration (CI) pipelines on external systems, such as GitHub Actions. + + The plugin is activated by using the `--disk-usage` option, and + it can be configured with a custom threshold using the `--disk-usage-threshold` option. + + Warnings are generated if disk usage increases beyond the specified threshold, + allowing for targeted investigation of resource management + in specific tests, modules, or the entire test session. + + As example, the CI in gh-actions reported this: + XMinioStorageFull: Storage backend has reached its minimum free drive threshold. Please delete a few objects to proceed. + """ + + def __init__(self, config): + self._threshold_mb = config.getoption("--disk-usage-threshold") + + @staticmethod + def _get_disk_usage(): + return shutil.disk_usage("/").used + + def _log_disk_usage_increase( + self, initial_usage: int, final_usage: int, scope_name: str + ): + if final_usage > initial_usage: + increase = final_usage - initial_usage + + if increase >= self._threshold_mb: + increase_mb = increase / (1024 * 1024) + msg = ( + f"Disk usage increased by {increase_mb:.2f} MB during {scope_name}." + ) + _logger.warning(msg) + + @pytest.fixture(scope="session", autouse=True) + def monitor_session_disk_usage(self): + """SESSION-level fixture to monitor overall disk usage.""" + initial_usage = self._get_disk_usage() + + yield + + final_usage = self._get_disk_usage() + self._log_disk_usage_increase(initial_usage, final_usage, "this session") + + @pytest.fixture(scope="module", autouse=True) + def monitor_module_disk_usage(self, request): + """MODULE-level fixture to monitor disk usage before and after each module.""" + initial_usage = self._get_disk_usage() + + yield + + final_usage = self._get_disk_usage() + module_name = request.module.__name__ + self._log_disk_usage_increase( + initial_usage, final_usage, f"the module '{module_name}'" + ) + + @pytest.fixture(autouse=True) + def monitor_test_disk_usage(self, request): + """FUNCTION-level fixture to monitor disk usage before and after each test.""" + initial_usage = self._get_disk_usage() + + yield + + final_usage = self._get_disk_usage() + test_name = request.node.name + self._log_disk_usage_increase( + initial_usage, final_usage, f"the test '{test_name}'" + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/docker.py b/packages/pytest-simcore/src/pytest_simcore/docker.py new file mode 100644 index 00000000000..9ca883ec6c8 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/docker.py @@ -0,0 +1,58 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable + +import asyncio +import contextlib +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager + +import aiodocker +import pytest + + +@pytest.fixture +async def async_docker_client() -> AsyncIterator[aiodocker.Docker]: + async with aiodocker.Docker() as docker_client: + yield docker_client + + +@contextlib.asynccontextmanager +async def _pause_docker_container( + async_docker_client: aiodocker.Docker, container_name: str +) -> AsyncIterator[None]: + containers = await async_docker_client.containers.list( + filters={"name": [f"{container_name}."]} + ) + assert ( + containers + ), f"Failed to pause container {container_name=}, because it was not found" + + await asyncio.gather(*(c.pause() for c in containers)) + # refresh + container_attrs = await asyncio.gather(*(c.show() for c in containers)) + for container_status in container_attrs: + assert container_status["State"]["Status"] == "paused" + + yield + + await asyncio.gather(*(c.unpause() for c in containers)) + # refresh + container_attrs = await asyncio.gather(*(c.show() for c in containers)) + for container_status in container_attrs: + assert container_status["State"]["Status"] == "running" + # NOTE: container takes some time to start + + +@pytest.fixture +async def paused_container() -> Callable[[str], AbstractAsyncContextManager[None]]: + @contextlib.asynccontextmanager + async def _(container_name: str) -> AsyncIterator[None]: + async with aiodocker.Docker() as docker_client, _pause_docker_container( + docker_client, container_name + ): + yield None + + return _ diff --git a/packages/pytest-simcore/src/pytest_simcore/docker_api_proxy.py b/packages/pytest-simcore/src/pytest_simcore/docker_api_proxy.py new file mode 100644 index 00000000000..0a0bf4f05bb --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/docker_api_proxy.py @@ -0,0 +1,64 @@ +import logging + +import pytest +from aiohttp import BasicAuth, ClientSession, ClientTimeout +from pydantic import TypeAdapter +from settings_library.docker_api_proxy import DockerApiProxysettings +from tenacity import before_sleep_log, retry, stop_after_delay, wait_fixed + +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip +from .helpers.typing_env import EnvVarsDict + +_logger = logging.getLogger(__name__) + + +@retry( + wait=wait_fixed(1), + stop=stop_after_delay(10), + before_sleep=before_sleep_log(_logger, logging.INFO), + reraise=True, +) +async def _wait_till_docker_api_proxy_is_responsive( + settings: DockerApiProxysettings, +) -> None: + async with ClientSession( + timeout=ClientTimeout(total=1), + auth=BasicAuth( + settings.DOCKER_API_PROXY_USER, + settings.DOCKER_API_PROXY_PASSWORD.get_secret_value(), + ), + ) as client: + response = await client.get(f"{settings.base_url}/version") + assert response.status == 200, await response.text() + + +@pytest.fixture +async def docker_api_proxy_settings( + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict +) -> DockerApiProxysettings: + """Returns the settings of a redis service that is up and responsive""" + + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] + assert f"{prefix}_docker-api-proxy" in docker_stack["services"] + + published_port = get_service_published_port( + "docker-api-proxy", int(env_vars_for_docker_compose["DOCKER_API_PROXY_PORT"]) + ) + + settings = TypeAdapter(DockerApiProxysettings).validate_python( + { + "DOCKER_API_PROXY_HOST": get_localhost_ip(), + "DOCKER_API_PROXY_PORT": published_port, + "DOCKER_API_PROXY_USER": env_vars_for_docker_compose[ + "DOCKER_API_PROXY_USER" + ], + "DOCKER_API_PROXY_PASSWORD": env_vars_for_docker_compose[ + "DOCKER_API_PROXY_PASSWORD" + ], + } + ) + + await _wait_till_docker_api_proxy_is_responsive(settings) + + return settings diff --git a/packages/pytest-simcore/src/pytest_simcore/docker_compose.py b/packages/pytest-simcore/src/pytest_simcore/docker_compose.py index 820216a3cab..61207aa61a5 100644 --- a/packages/pytest-simcore/src/pytest_simcore/docker_compose.py +++ b/packages/pytest-simcore/src/pytest_simcore/docker_compose.py @@ -2,53 +2,62 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -""" Fixtures to create docker-compose.yaml configuration files (as in Makefile) +"""Fixtures to create docker-compose.yaml configuration files (as in Makefile) - - Basically runs `docker-compose config - - Services in stack can be selected using 'core_services_selection', 'ops_services_selection' fixtures +- Basically runs `docker compose config +- Services in stack can be selected using 'core_services_selection', 'ops_services_selection' fixtures """ -import json +import logging import os import re import shutil -import subprocess -import sys +from collections.abc import Iterator from copy import deepcopy from pathlib import Path -from typing import Any, Iterator +from typing import Any import pytest import yaml -from dotenv import dotenv_values, set_key -from pytest import ExitCode, MonkeyPatch +from dotenv import dotenv_values from .helpers import ( FIXTURE_CONFIG_CORE_SERVICES_SELECTION, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, ) -from .helpers.constants import HEADER_STR +from .helpers.docker import run_docker_compose_config, save_docker_infos +from .helpers.host import get_localhost_ip from .helpers.typing_env import EnvVarsDict -from .helpers.utils_docker import ( - get_localhost_ip, - run_docker_compose_config, - save_docker_infos, -) + +_logger = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def temp_folder( + request: pytest.FixtureRequest, tmp_path_factory: pytest.TempPathFactory +) -> Path: + """**Module scoped** temporary folder""" + prefix = __name__.replace(".", "_") + return tmp_path_factory.mktemp( + basename=f"{prefix}_temp_folder_{request.module.__name__}", numbered=True + ) @pytest.fixture(scope="session") -def testing_environ_vars(env_devel_file: Path) -> EnvVarsDict: - """ - Loads and extends .env-devel returning - all environment variables key=value +def env_vars_for_docker_compose(env_devel_file: Path) -> EnvVarsDict: """ - env_devel_unresolved = dotenv_values(env_devel_file, verbose=True, interpolate=True) + Loads and extends .env-devel returning all environment variables key=value - # get from environ if applicable - env_devel: EnvVarsDict = { - key: os.environ.get(key, value) for key, value in env_devel_unresolved.items() - } + + NOTE: that these are then env-vars used in the services started in the + integration tests! + """ + env_devel = dotenv_values( + env_devel_file, + verbose=True, + interpolate=True, # NOTE: This resolves expressions as VAR=$ENVVAR + ) # These are overrides to .env-devel or an extension to them env_devel["LOG_LEVEL"] = "DEBUG" @@ -60,7 +69,6 @@ def testing_environ_vars(env_devel_file: Path) -> EnvVarsDict: env_devel["REGISTRY_PW"] = "" env_devel["REGISTRY_AUTH"] = "False" - # CAREFUL! FIXME: monkeypatch autouse ?? env_devel["SWARM_STACK_NAME"] = "pytest-simcore" env_devel.setdefault( "SWARM_STACK_NAME_NO_HYPHEN", env_devel["SWARM_STACK_NAME"].replace("-", "_") @@ -68,50 +76,95 @@ def testing_environ_vars(env_devel_file: Path) -> EnvVarsDict: env_devel[ "AIOCACHE_DISABLE" - ] = "1" # ensure that aio-caches are disabled for testing [https://aiocache.readthedocs.io/en/latest/testing.html] + # ensure that aio-caches are disabled for testing [https://aiocache.readthedocs.io/en/latest/testing.html] + ] = "1" env_devel[ "CATALOG_BACKGROUND_TASK_REST_TIME" - ] = "1" # ensure catalog refreshes services access rights fast + # ensure catalog refreshes services access rights fast + ] = "1" + + # TRACING + # NOTE: should go away with pydantic v2 + env_devel["TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT"] = "null" + env_devel["TRACING_OPENTELEMETRY_COLLECTOR_PORT"] = "null" + # DIRECTOR env_devel["DIRECTOR_REGISTRY_CACHING"] = "False" + # NOTE: this will make TracingSettings fail and therefore the default factory of every *_TRACING field will be set to None + + # NOTE: DIRECTOR_DEFAULT_MAX_* used for integration-tests that include `director` service + env_devel["DIRECTOR_DEFAULT_MAX_MEMORY"] = "268435456" + env_devel["DIRECTOR_DEFAULT_MAX_NANO_CPUS"] = "10000000" + env_devel["DIRECTOR_LOGLEVEL"] = "DEBUG" + env_devel["REGISTRY_PATH"] = "127.0.0.1:5000" + env_devel.setdefault("DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS", "") - env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_ID", "") - env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME", "") - env_devel.setdefault("DIRECTOR_SELF_SIGNED_SSL_FILENAME", "") env_devel["API_SERVER_DEV_FEATURES_ENABLED"] = "1" - if not "DOCKER_REGISTRY" in os.environ: + if "DOCKER_REGISTRY" not in os.environ: env_devel["DOCKER_REGISTRY"] = "local" - if not "DOCKER_IMAGE_TAG" in os.environ: + if "DOCKER_IMAGE_TAG" not in os.environ: env_devel["DOCKER_IMAGE_TAG"] = "production" - return env_devel + # ensure we do not use the bucket of simcore or so + env_devel["S3_BUCKET_NAME"] = "pytestbucket" + + # ensure OpenTelemetry is not enabled + env_devel |= { + tracing_setting: "null" + for tracing_setting in ( + "AGENT_TRACING", + "API_SERVER_TRACING", + "AUTOSCALING_TRACING", + "CATALOG_TRACING", + "CLUSTERS_KEEPER_TRACING", + "DATCORE_ADAPTER_TRACING", + "DIRECTOR_TRACING", + "DIRECTOR_V2_TRACING", + "DYNAMIC_SCHEDULER_TRACING", + "EFS_GUARDIAN_TRACING", + "INVITATIONS_TRACING", + "PAYMENTS_TRACING", + "RESOURCE_USAGE_TRACKER_TRACING", + "STORAGE_TRACING", + "WB_DB_EL_TRACING", + "WB_GC_TRACING", + "WEBSERVER_TRACING", + ) + } + + return {key: value for key, value in env_devel.items() if value is not None} @pytest.fixture(scope="module") -def env_file_for_testing( - testing_environ_vars: dict[str, str], +def env_file_for_docker_compose( temp_folder: Path, + env_vars_for_docker_compose: EnvVarsDict, osparc_simcore_root_dir: Path, ) -> Iterator[Path]: """Dumps all the environment variables into an $(temp_folder)/.env.test file - Pass path as argument in 'docker-compose --env-file ... ' + Pass path as argument in 'docker compose --env-file ... ' """ # SEE: # https://docs.docker.com/compose/env-file/ # https://docs.docker.com/compose/environment-variables/#the-env-file env_test_path = temp_folder / ".env.test" - with env_test_path.open("wt") as fh: print( f"# Auto-generated from env_file_for_testing in {__file__}", file=fh, ) - for key in sorted(testing_environ_vars.keys()): - print(f"{key}={testing_environ_vars[key]}", file=fh) + for key, value in sorted(env_vars_for_docker_compose.items()): + # NOTE: python-dotenv parses JSON encoded strings correctly, but + # writing them back shows an issue. if the original ENV is something like MY_ENV='{"correct": "encodedjson"}' + # it goes to MY_ENV={"incorrect": "encodedjson"}! + if value.startswith(("{", "[")) and value.endswith(("}", "]")): + print(f"{key}='{value}'", file=fh) + else: + print(f"{key}={value}", file=fh) # # WARNING: since compose files have references to ../.env we MUST create .env @@ -133,7 +186,7 @@ def env_file_for_testing( def simcore_docker_compose( osparc_simcore_root_dir: Path, osparc_simcore_scripts_dir: Path, - env_file_for_testing: Path, + env_file_for_docker_compose: Path, temp_folder: Path, ) -> dict[str, Any]: """Resolves docker-compose for simcore stack in local host @@ -143,9 +196,9 @@ def simcore_docker_compose( COMPOSE_FILENAMES = ["docker-compose.yml", "docker-compose.local.yml"] # ensures .env at git_root_dir - assert env_file_for_testing.exists() + assert env_file_for_docker_compose.exists() - # target docker-compose path + # target docker compose path docker_compose_paths = [ osparc_simcore_root_dir / "services" / filename for filename in COMPOSE_FILENAMES @@ -154,91 +207,42 @@ def simcore_docker_compose( docker_compose_path.exists() for docker_compose_path in docker_compose_paths ) - compose_specs = run_docker_compose_config( + return run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", scripts_dir=osparc_simcore_scripts_dir, docker_compose_paths=docker_compose_paths, - env_file_path=env_file_for_testing, + env_file_path=env_file_for_docker_compose, destination_path=temp_folder / "simcore_docker_compose.yml", ) - # NOTE: do not add indent. Copy&Paste log into editor instead - print( - HEADER_STR.format("simcore docker-compose"), - json.dumps(compose_specs), - HEADER_STR.format("-"), - ) - return compose_specs - - -@pytest.fixture(scope="module") -def inject_filestash_config_path( - osparc_simcore_scripts_dir: Path, - monkeypatch_module: MonkeyPatch, - env_file_for_testing: Path, -) -> None: - create_filestash_config_py = ( - osparc_simcore_scripts_dir / "filestash" / "create_config.py" - ) - - # ensures .env at git_root_dir, which will be used as current directory - assert env_file_for_testing.exists() - env_values = dotenv_values(env_file_for_testing) - - process = subprocess.run( - ["python3", f"{create_filestash_config_py}"], - shell=False, - check=True, - stdout=subprocess.PIPE, - env=env_values, - ) - filestash_config_json_path = Path(process.stdout.decode("utf-8").strip()) - assert filestash_config_json_path.exists() - - set_key( - env_file_for_testing, - "TMP_PATH_TO_FILESTASH_CONFIG", - f"{filestash_config_json_path}", - ) - monkeypatch_module.setenv( - "TMP_PATH_TO_FILESTASH_CONFIG", f"{filestash_config_json_path}" - ) @pytest.fixture(scope="module") def ops_docker_compose( osparc_simcore_root_dir: Path, osparc_simcore_scripts_dir: Path, - env_file_for_testing: Path, + env_file_for_docker_compose: Path, temp_folder: Path, - inject_filestash_config_path: None, ) -> dict[str, Any]: """Filters only services in docker-compose-ops.yml and returns yaml data Produces same as `make .stack-ops.yml` in a temporary folder """ # ensures .env at git_root_dir, which will be used as current directory - assert env_file_for_testing.exists() + assert env_file_for_docker_compose.exists() - # target docker-compose path + # target docker compose path docker_compose_path = ( osparc_simcore_root_dir / "services" / "docker-compose-ops.yml" ) assert docker_compose_path.exists() - compose_specs = run_docker_compose_config( + return run_docker_compose_config( project_dir=osparc_simcore_root_dir / "services", scripts_dir=osparc_simcore_scripts_dir, docker_compose_paths=docker_compose_path, - env_file_path=env_file_for_testing, + env_file_path=env_file_for_docker_compose, destination_path=temp_folder / "ops_docker_compose.yml", ) - # NOTE: do not add indent. Copy&Paste log into editor instead - print( - HEADER_STR.format("ops docker-compose"), - json.dumps(compose_specs), - HEADER_STR.format("-"), - ) - return compose_specs @pytest.fixture(scope="module") @@ -258,7 +262,7 @@ def core_docker_compose_file( ) -> Path: """A compose with a selection of services from simcore_docker_compose - Creates a docker-compose config file for every stack of services in 'core_services_selection' module variable + Creates a docker compose config file for every stack of services in 'core_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "simcore_docker_compose.filtered.yml") @@ -267,14 +271,18 @@ def core_docker_compose_file( core_services_selection, simcore_docker_compose, docker_compose_path ) + _logger.info( + "Content of '%s':\n%s", + docker_compose_path, + docker_compose_path.read_text(), + ) return docker_compose_path @pytest.fixture(scope="module") def ops_services_selection(request) -> list[str]: """Selection of services from the ops stack""" - ops_services = getattr(request.module, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, []) - return ops_services + return getattr(request.module, FIXTURE_CONFIG_OPS_SERVICES_SELECTION, []) @pytest.fixture(scope="module") @@ -283,16 +291,17 @@ def ops_docker_compose_file( ) -> Path: """A compose with a selection of services from ops_docker_compose - Creates a docker-compose config file for every stack of services in 'ops_services_selection' module variable + Creates a docker compose config file for every stack of services in 'ops_services_selection' module variable File is created in a temp folder """ docker_compose_path = Path(temp_folder / "ops_docker_compose.filtered.yml") # these services are useless when running in the CI - ops_view_only_services = ["adminer", "redis-commander", "portainer", "filestash"] + ops_view_only_services = ["adminer", "redis-commander", "portainer"] if "CI" in os.environ: - print( - f"WARNING: Services such as {ops_view_only_services!r} are removed from the stack when running in the CI" + _logger.info( + "Note that services such as '%s' are removed from the stack when running in the CI", + ops_view_only_services, ) ops_services_selection = list( filter( @@ -304,6 +313,11 @@ def ops_docker_compose_file( ops_services_selection, ops_docker_compose, docker_compose_path ) + _logger.info( + "Content of '%s':\n%s", + docker_compose_path, + docker_compose_path.read_text(), + ) return docker_compose_path @@ -321,7 +335,9 @@ def _save_docker_logs_to_folder(failed_test_directory: Path): @pytest.hookimpl() -def pytest_exception_interact(node, call, report): +def pytest_exception_interact( + node, call: pytest.CallInfo[Any], report: pytest.CollectReport +): # get the node root dir (guaranteed to exist) root_directory: Path = Path(node.config.rootdir) failed_test_directory = root_directory / "test_failures" / node.name @@ -329,8 +345,8 @@ def pytest_exception_interact(node, call, report): @pytest.hookimpl() -def pytest_sessionfinish(session: pytest.Session, exitstatus: ExitCode) -> None: - if exitstatus == ExitCode.TESTS_FAILED: +def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -> None: + if exitstatus == pytest.ExitCode.TESTS_FAILED: root_directory: Path = Path(session.fspath) failed_test_directory = root_directory / "test_failures" / session.name _save_docker_logs_to_folder(failed_test_directory) @@ -339,7 +355,7 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus: ExitCode) -> None: def _minio_fix(service_environs: dict) -> dict: """this hack ensures that S3 is accessed from the host at all time, thus pre-signed links work.""" if "S3_ENDPOINT" in service_environs: - service_environs["S3_ENDPOINT"] = f"{get_localhost_ip()}:9001" + service_environs["S3_ENDPOINT"] = f"http://{get_localhost_ip()}:9001" return service_environs @@ -349,7 +365,7 @@ def _escape_cpus(serialized_yaml: str) -> str: # below is equivalent to the following sed operation fixes above issue # `sed -E "s/cpus: ([0-9\\.]+)/cpus: '\\1'/"` # remove when this issues is fixed, this will most likely occur - # when upgrading the version of docker-compose + # when upgrading the version of docker compose return re.sub( pattern=r"cpus: (\d+\.\d+|\d+)", repl="cpus: '\\1'", string=serialized_yaml @@ -375,16 +391,12 @@ def _filter_services_and_dump( if "environment" in service: service["environment"] = _minio_fix(service["environment"]) + if name == "postgres": + # NOTE: # -c fsync=off is not recommended for production as this disable writing to disk https://pythonspeed.com/articles/faster-db-tests/ + service["command"] += ["-c", "fsync=off"] + # updates current docker-compose (also versioned ... do not change by hand) with docker_compose_path.open("wt") as fh: - if "TRAVIS" in os.environ: - # in travis we do not have access to file - print(f"{str(docker_compose_path):-^100}") - yaml.dump(content, sys.stdout, default_flow_style=False) - print("-" * 100) - else: - # locally we have access to file - print(f"Saving config to '{docker_compose_path}'") yaml.dump(content, fh, default_flow_style=False) docker_compose_path.write_text(_escape_cpus(docker_compose_path.read_text())) diff --git a/packages/pytest-simcore/src/pytest_simcore/docker_registry.py b/packages/pytest-simcore/src/pytest_simcore/docker_registry.py index b071f5bac35..84b4d1e4b24 100644 --- a/packages/pytest-simcore/src/pytest_simcore/docker_registry.py +++ b/packages/pytest-simcore/src/pytest_simcore/docker_registry.py @@ -1,20 +1,26 @@ # pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name +import asyncio import json import logging import os import time +from collections.abc import Awaitable, Callable, Iterator from copy import deepcopy from pathlib import Path -from typing import Any, Callable, Iterator, Optional +from typing import Any +import aiodocker import docker import jsonschema import pytest import tenacity +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.docker_registry import RegistrySettings -from .helpers.utils_docker import get_localhost_ip +from .helpers.host import get_localhost_ip log = logging.getLogger(__name__) @@ -93,6 +99,28 @@ def docker_registry(keep_docker_up: bool) -> Iterator[str]: time.sleep(1) +@pytest.fixture +def external_registry_settings( + external_envfile_dict: EnvVarsDict, +) -> RegistrySettings | None: + if external_envfile_dict: + config = { + field: external_envfile_dict.get(field, None) + for field in RegistrySettings.model_fields + } + return RegistrySettings.model_validate(config) + return None + + +@pytest.fixture +def registry_settings( + docker_registry: str, external_registry_settings: RegistrySettings | None +) -> RegistrySettings: + if external_registry_settings: + return external_registry_settings + return RegistrySettings.create_from_envs() + + @tenacity.retry( wait=tenacity.wait_fixed(2), stop=tenacity.stop_after_delay(20), @@ -113,7 +141,7 @@ def _pull_push_service( tag: str, new_registry: str, node_meta_schema: dict, - owner_email: Optional[str] = None, + owner_email: str | None = None, ) -> dict[str, Any]: client = docker.from_env() # pull image from original location @@ -125,7 +153,7 @@ def _pull_push_service( image_labels: dict = dict(image.labels) if owner_email: - print("Overriding labels to take ownership as %s ...", owner_email) + print(f"Overriding labels to take ownership as {owner_email} ...") # By overriding these labels, user owner_email gets ownership of the service # and the catalog service automatically gives full access rights for testing it # otherwise it does not even get read rights @@ -189,7 +217,7 @@ def docker_registry_image_injector( docker_registry: str, node_meta_schema: dict ) -> Callable[..., dict[str, Any]]: def inject_image( - source_image_repo: str, source_image_tag: str, owner_email: Optional[str] = None + source_image_repo: str, source_image_tag: str, owner_email: str | None = None ): return _pull_push_service( source_image_repo, @@ -202,7 +230,7 @@ def inject_image( return inject_image -@pytest.fixture(scope="function") +@pytest.fixture def osparc_service( docker_registry: str, node_meta_schema: dict, service_repo: str, service_tag: str ) -> dict[str, Any]: @@ -233,8 +261,8 @@ def jupyter_service(docker_registry: str, node_meta_schema: dict) -> dict[str, A ) -@pytest.fixture(scope="session", params=["2.0.4"]) -def dy_static_file_server_version(request): +@pytest.fixture(scope="session", params=["2.0.7"]) +def dy_static_file_server_version(request: pytest.FixtureRequest): return request.param @@ -284,3 +312,27 @@ def dy_static_file_server_dynamic_sidecar_compose_spec_service( docker_registry, node_meta_schema, ) + + +@pytest.fixture +def remove_images_from_host() -> Callable[[list[str]], Awaitable[None]]: + async def _cleaner(images: list[str]) -> None: + with log_context( + logging.INFO, msg=(f"removing {images=}", f"removed {images=}") + ): + async with aiodocker.Docker() as client: + delete_results = await asyncio.gather( + *(client.images.delete(image, force=True) for image in images), + return_exceptions=True, + ) + assert delete_results + # confirm they are gone + inspect_results = await asyncio.gather( + *(client.images.inspect(image) for image in images), + return_exceptions=True, + ) + assert all( + isinstance(r, aiodocker.DockerError) for r in inspect_results + ) + + return _cleaner diff --git a/packages/pytest-simcore/src/pytest_simcore/docker_swarm.py b/packages/pytest-simcore/src/pytest_simcore/docker_swarm.py index b5bb2f35b53..90026b90e60 100644 --- a/packages/pytest-simcore/src/pytest_simcore/docker_swarm.py +++ b/packages/pytest-simcore/src/pytest_simcore/docker_swarm.py @@ -7,29 +7,33 @@ import json import logging import subprocess +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator from contextlib import suppress from pathlib import Path -from typing import Any, Iterator +from typing import Any +import aiodocker import docker import pytest +import pytest_asyncio import yaml +from common_library.dict_tools import copy_from_dict from docker.errors import APIError -from tenacity import Retrying, TryAgain, retry +from faker import Faker +from tenacity import AsyncRetrying, Retrying, TryAgain, retry from tenacity.before_sleep import before_sleep_log from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed, wait_random_exponential from .helpers.constants import HEADER_STR, MINUTE +from .helpers.host import get_localhost_ip from .helpers.typing_env import EnvVarsDict -from .helpers.utils_dict import copy_from_dict -from .helpers.utils_docker import get_localhost_ip log = logging.getLogger(__name__) -class _ResourceStillNotRemoved(Exception): +class _ResourceStillNotRemovedError(Exception): pass @@ -37,19 +41,19 @@ def _is_docker_swarm_init(docker_client: docker.client.DockerClient) -> bool: try: docker_client.swarm.reload() inspect_result = docker_client.swarm.attrs - assert type(inspect_result) == dict - except APIError as error: + assert isinstance(inspect_result, dict) + except APIError: return False return True @retry( - wait=wait_fixed(5), + wait=wait_fixed(1), stop=stop_after_delay(8 * MINUTE), - before_sleep=before_sleep_log(log, logging.WARNING), + before_sleep=before_sleep_log(log, logging.INFO), reraise=True, ) -def assert_service_is_running(service): +def assert_service_is_running(service) -> None: """Checks that a number of tasks of this service are in running state""" def _get(obj: dict[str, Any], dotted_key: str, default=None) -> Any: @@ -101,7 +105,6 @@ def _fetch_and_print_services( print(HEADER_STR.format(f"docker services running {extra_title}")) for service_obj in docker_client.services.list(): - tasks = {} service = {} with suppress(Exception): @@ -146,14 +149,9 @@ def docker_client() -> Iterator[docker.client.DockerClient]: client.close() -@pytest.fixture(scope="session") -def keep_docker_up(request) -> bool: - return request.config.getoption("--keep-docker-up") - - @pytest.fixture(scope="module") def docker_swarm( - docker_client: docker.client.DockerClient, keep_docker_up: Iterator[bool] + docker_client: docker.client.DockerClient, keep_docker_up: bool ) -> Iterator[None]: """inits docker swarm""" @@ -179,19 +177,89 @@ def docker_swarm( assert _is_docker_swarm_init(docker_client) is keep_docker_up -@pytest.fixture(scope="module") -def docker_stack( +@retry( + wait=wait_fixed(0.3), + retry=retry_if_exception_type(AssertionError), + stop=stop_after_delay(30), +) +def _wait_for_migration_service_to_be_removed( + docker_client: docker.client.DockerClient, +) -> None: + for service in docker_client.services.list(): + if "migration" in service.name: # type: ignore + raise TryAgain + + +def _force_remove_migration_service(docker_client: docker.client.DockerClient) -> None: + for migration_service in ( + service + for service in docker_client.services.list() + if "migration" in service.name # type: ignore + ): + print( + "WARNING: migration service detected before updating stack, it will be force-removed now and re-deployed to ensure DB update" + ) + migration_service.remove() # type: ignore + _wait_for_migration_service_to_be_removed(docker_client) + print(f"forced updated {migration_service.name}.") # type: ignore + + +def _deploy_stack(compose_file: Path, stack_name: str) -> None: + for attempt in Retrying( + stop=stop_after_delay(60), + wait=wait_random_exponential(max=5), + retry=retry_if_exception_type(TryAgain), + reraise=True, + ): + with attempt: + try: + cmd = [ + "docker", + "stack", + "deploy", + "--with-registry-auth", + "--compose-file", + f"{compose_file.name}", + f"{stack_name}", + ] + subprocess.run( + cmd, + check=True, + cwd=compose_file.parent, + capture_output=True, + ) + except subprocess.CalledProcessError as err: + if b"update out of sequence" in err.stderr: + raise TryAgain from err + pytest.fail( + reason=f"deploying docker_stack failed: {err.cmd=}, {err.returncode=}, {err.stdout=}, {err.stderr=}\nTIP: frequent failure is due to a corrupt .env file: Delete .env and .env.bak" + ) + + +def _make_dask_sidecar_certificates(simcore_service_folder: Path) -> None: + dask_sidecar_root_folder = simcore_service_folder / "dask-sidecar" + subprocess.run( + ["make", "certificates"], # noqa: S607 + cwd=dask_sidecar_root_folder, + check=True, + capture_output=True, + ) + + +@pytest_asyncio.fixture(scope="module", loop_scope="module") +async def docker_stack( + osparc_simcore_services_dir: Path, docker_swarm: None, docker_client: docker.client.DockerClient, core_docker_compose_file: Path, ops_docker_compose_file: Path, keep_docker_up: bool, - testing_environ_vars: EnvVarsDict, -) -> Iterator[dict]: + env_vars_for_docker_compose: EnvVarsDict, +) -> AsyncIterator[dict]: """deploys core and ops stacks and returns as soon as all are running""" # WARNING: keep prefix "pytest-" in stack names - core_stack_name = testing_environ_vars["SWARM_STACK_NAME"] + core_stack_name = env_vars_for_docker_compose["SWARM_STACK_NAME"] ops_stack_name = "pytest-ops" assert core_stack_name @@ -211,54 +279,12 @@ def docker_stack( # NOTE: if the migration service was already running prior to this call it must # be force updated so that it does its job. else it remains and tests will fail - for migration_service in ( - service - for service in docker_client.services.list() - if "migration" in service.name # type: ignore - ): - print( - "WARNING: migration service detected before updating stack, it will be force-updated" - ) - migration_service.force_update() # type: ignore - print(f"forced updated {migration_service.name}.") # type: ignore - + _force_remove_migration_service(docker_client) + _make_dask_sidecar_certificates(osparc_simcore_services_dir) # make up-version stacks_deployed: dict[str, dict] = {} for key, stack_name, compose_file in stacks: - for attempt in Retrying( - stop=stop_after_delay(60), - wait=wait_random_exponential(max=5), - retry=retry_if_exception_type(TryAgain), - reraise=True, - ): - with attempt: - try: - subprocess.run( - [ - "docker", - "stack", - "deploy", - "--with-registry-auth", - "--compose-file", - f"{compose_file.name}", - f"{stack_name}", - ], - check=True, - cwd=compose_file.parent, - capture_output=True, - ) - except subprocess.CalledProcessError as err: - if b"update out of sequence" in err.stderr: - raise TryAgain from err - print( - "docker_stack failed", - f"{' '.join(err.cmd)}", - f"returncode={err.returncode}", - f"stdout={err.stdout}", - f"stderr={err.stderr}", - "\nTIP: frequent failure is due to a corrupt .env file: Delete .env and .env.bak", - ) - raise + _deploy_stack(compose_file, stack_name) stacks_deployed[key] = { "name": stack_name, @@ -281,9 +307,14 @@ async def _check_all_services_are_running(): return_when=asyncio.FIRST_EXCEPTION, ) assert done, f"no services ready, they all failed! [{pending}]" + + for future in done: + if exc := future.exception(): + raise exc + assert not pending, f"some service did not start correctly [{pending}]" - asyncio.run(_check_all_services_are_running()) + await _check_all_services_are_running() finally: _fetch_and_print_services(docker_client, "[BEFORE TEST]") @@ -293,7 +324,7 @@ async def _check_all_services_are_running(): "services": [service.name for service in docker_client.services.list()], # type: ignore } - ## TEAR DOWN ---------------------- + # TEAR DOWN ---------------------- _fetch_and_print_services(docker_client, "[AFTER TEST]") @@ -318,7 +349,6 @@ async def _check_all_services_are_running(): stacks.reverse() for _, stack, _ in stacks: - try: subprocess.run( f"docker stack remove {stack}".split(" "), @@ -345,7 +375,7 @@ async def _check_all_services_are_running(): for attempt in Retrying( wait=wait_fixed(2), stop=stop_after_delay(3 * MINUTE), - before_sleep=before_sleep_log(log, logging.WARNING), + before_sleep=before_sleep_log(log, logging.INFO), reraise=True, ): with attempt: @@ -359,8 +389,45 @@ async def _check_all_services_are_running(): for resource in pending: resource.remove(force=True) - raise _ResourceStillNotRemoved( - f"Waiting for {len(pending)} {resource_name} to shutdown: {pending}." - ) + msg = f"Waiting for {len(pending)} {resource_name} to shutdown: {pending}." + raise _ResourceStillNotRemovedError(msg) _fetch_and_print_services(docker_client, "[AFTER REMOVED]") + + +@pytest.fixture +async def docker_network( + docker_swarm: None, + async_docker_client: aiodocker.Docker, + faker: Faker, +) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]: + networks = [] + + async def _network_creator(**network_config_kwargs) -> dict[str, Any]: + network = await async_docker_client.networks.create( + config={"Name": faker.uuid4(), "Driver": "overlay"} | network_config_kwargs + ) + assert network + print(f"--> created network {network=}") + networks.append(network) + return await network.show() + + yield _network_creator + + # wait until all networks are really gone + async def _wait_for_network_deletion(network: aiodocker.docker.DockerNetwork): + network_name = (await network.show())["Name"] + await network.delete() + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) + ): + with attempt: + print(f"<-- waiting for network '{network_name}' deletion...") + list_of_network_names = [ + n["Name"] for n in await async_docker_client.networks.list() + ] + assert network_name not in list_of_network_names + print(f"<-- network '{network_name}' deleted") + + print(f"<-- removing all networks {networks=}") + await asyncio.gather(*[_wait_for_network_deletion(network) for network in networks]) diff --git a/packages/pytest-simcore/src/pytest_simcore/environment_configs.py b/packages/pytest-simcore/src/pytest_simcore/environment_configs.py index 215240a5566..6495f1f7cc1 100644 --- a/packages/pytest-simcore/src/pytest_simcore/environment_configs.py +++ b/packages/pytest-simcore/src/pytest_simcore/environment_configs.py @@ -3,26 +3,125 @@ # pylint: disable=unused-variable +import re from pathlib import Path +from typing import Any import pytest -from pytest import MonkeyPatch +from .helpers.monkeypatch_envs import load_dotenv, setenvs_from_dict from .helpers.typing_env import EnvVarsDict -from .helpers.utils_envs import load_dotenv, setenvs_from_dict + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--external-envfile", + action="store", + type=Path, + default=None, + help="Path to an env file. Consider passing a link to repo configs, i.e. `ln -s /path/to/osparc-ops-config/repo.config`", + ) + + +@pytest.fixture(scope="session") +def external_envfile_dict(request: pytest.FixtureRequest) -> EnvVarsDict: + """ + If a file under test folder prefixed with `.env-secret` is present, + then this fixture captures it. + + This technique allows reusing the same tests to check against + external development/production servers + """ + envs = {} + if envfile := request.config.getoption("--external-envfile"): + print("🚨 EXTERNAL `envfile` option detected. Loading", envfile, "...") + + assert isinstance(envfile, Path) + assert envfile.exists() + assert envfile.is_file() + + envs = load_dotenv(envfile) + + return envs + + +@pytest.fixture(scope="session") +def skip_if_external_envfile_dict(external_envfile_dict: EnvVarsDict) -> None: + if not external_envfile_dict: + pytest.skip(reason="Skipping test since external-envfile is not set") @pytest.fixture(scope="session") def env_devel_dict(env_devel_file: Path) -> EnvVarsDict: assert env_devel_file.exists() assert env_devel_file.name == ".env-devel" - envs = load_dotenv(env_devel_file, verbose=True, interpolate=True) - return envs + return load_dotenv(env_devel_file, verbose=True, interpolate=True) -@pytest.fixture(scope="function") +@pytest.fixture def mock_env_devel_environment( - env_devel_dict: EnvVarsDict, monkeypatch: MonkeyPatch + env_devel_dict: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return setenvs_from_dict(monkeypatch, {**env_devel_dict}) + + +# +# ENVIRONMENT IN A SERVICE +# + + +@pytest.fixture(scope="session") +def service_name(project_slug_dir: Path) -> str: + """ + project_slug_dir MUST be defined on root's conftest.py + """ + return project_slug_dir.name + + +@pytest.fixture(scope="session") +def services_docker_compose_dict(services_docker_compose_file: Path) -> EnvVarsDict: + # NOTE: By keeping import here, this library is ONLY required when the fixture is used + import yaml + + content = yaml.safe_load(services_docker_compose_file.read_text()) + assert "services" in content + return content + + +@pytest.fixture +def docker_compose_service_environment_dict( + services_docker_compose_dict: dict[str, Any], + env_devel_dict: EnvVarsDict, + service_name: str, + env_devel_file: Path, ) -> EnvVarsDict: - envs = setenvs_from_dict(monkeypatch, env_devel_dict) + """Returns env vars dict from the docker-compose `environment` section + + - env_devel_dict in environment_configs plugin + - service_name needs to be defined + """ + service = services_docker_compose_dict["services"][service_name] + + def _substitute(key, value) -> tuple[str, str]: + if m := re.match(r"\${([^{}:-]\w+)", value): + expected_env_var = m.group(1) + try: + # NOTE: if this raises, then the RHS env-vars in the docker-compose are + # not defined in the env-devel + if value := env_devel_dict[expected_env_var]: + return key, value + except KeyError: + pytest.fail( + f"{expected_env_var} is not defined in '{env_devel_file}' but it " + f"is used as a rhs variable in the docker-compose services[{service_name}].environment[{key}]" + ) + return key, value + + envs: EnvVarsDict = {} + for key, value in service.get("environment", {}).items(): + if found := _substitute(key, value): + _, new_value = found + envs[key] = new_value + return envs diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/__init__.py b/packages/pytest-simcore/src/pytest_simcore/examples/__init__.py similarity index 100% rename from services/director-v2/src/simcore_service_director_v2/models/schemas/__init__.py rename to packages/pytest-simcore/src/pytest_simcore/examples/__init__.py diff --git a/packages/pytest-simcore/src/pytest_simcore/examples/models_library.py b/packages/pytest-simcore/src/pytest_simcore/examples/models_library.py new file mode 100644 index 00000000000..cbd0f18d8ff --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/examples/models_library.py @@ -0,0 +1,79 @@ +from typing import Final + +PAGE_EXAMPLES: Final[list[dict]] = [ + # first page Page[str] + { + "_meta": {"total": 7, "count": 4, "limit": 4, "offset": 0}, + "_links": { + "self": "https://osparc.io/v2/listing?offset=0&limit=4", + "first": "https://osparc.io/v2/listing?offset=0&limit=4", + "prev": None, + "next": "https://osparc.io/v2/listing?offset=1&limit=4", + "last": "https://osparc.io/v2/listing?offset=1&limit=4", + }, + "data": ["data 1", "data 2", "data 3", "data 4"], + }, + # second and last page + { + "_meta": {"total": 7, "count": 3, "limit": 4, "offset": 1}, + "_links": { + "self": "https://osparc.io/v2/listing?offset=1&limit=4", + "first": "https://osparc.io/v2/listing?offset=0&limit=4", + "prev": "https://osparc.io/v2/listing?offset=0&limit=4", + "next": None, + "last": "https://osparc.io/v2/listing?offset=1&limit=4", + }, + "data": ["data 5", "data 6", "data 7"], + }, + # empty page + { + "_meta": {"total": 0, "count": 0, "limit": 4, "offset": 0}, + "_links": { + "self": "https://osparc.io/v2/listing?offset=0&limit=4", + "first": "https://osparc.io/v2/listing?offset=0&limit=4", + "prev": None, + "next": None, + "last": "https://osparc.io/v2/listing?offset=0&limit=4", + }, + "data": [], + }, +] + +RPC_PAGE_EXAMPLES: Final[list[dict]] = [ + # first page Page[str] + { + "_meta": {"total": 7, "count": 4, "limit": 4, "offset": 0}, + "_links": { + "self": {"offset": 0, "limit": 4}, + "first": {"offset": 0, "limit": 4}, + "prev": None, + "next": {"offset": 1, "limit": 4}, + "last": {"offset": 1, "limit": 4}, + }, + "data": ["data 1", "data 2", "data 3", "data 4"], + }, + # second and last page + { + "_meta": {"total": 7, "count": 3, "limit": 4, "offset": 1}, + "_links": { + "self": {"offset": 1, "limit": 4}, + "first": {"offset": 0, "limit": 4}, + "prev": {"offset": 0, "limit": 4}, + "next": None, + "last": {"offset": 1, "limit": 4}, + }, + "data": ["data 5", "data 6", "data 7"], + }, + # empty page + { + "_meta": {"total": 0, "count": 0, "limit": 4, "offset": 0}, + "_links": { + "self": {"offset": 0, "limit": 4}, + "first": {"offset": 0, "limit": 4}, + "prev": None, + "next": None, + "last": {"offset": 0, "limit": 4}, + }, + "data": [], + }, +] diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_payments_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_payments_data.py new file mode 100644 index 00000000000..3f4058b72e9 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/faker_payments_data.py @@ -0,0 +1,88 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +""" + +Needs including pytest_plugins = [ + "pytest_simcore.faker_products_data", + "pytest_simcore.faker_users_data", +] + + +""" + +from datetime import datetime, timedelta, timezone +from typing import Any + +import pytest +from faker import Faker +from models_library.basic_types import IDStr +from models_library.payments import StripeInvoiceID +from models_library.products import ProductName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, HttpUrl, TypeAdapter +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, +) + +from .helpers.faker_factories import random_payment_transaction + + +@pytest.fixture +def wallet_id(faker: Faker) -> WalletID: + return TypeAdapter(WalletID).validate_python(faker.pyint()) + + +@pytest.fixture +def wallet_name(faker: Faker) -> IDStr: + return TypeAdapter(IDStr).validate_python(f"wallet-{faker.word()}") + + +@pytest.fixture +def invoice_url(faker: Faker) -> str: + return faker.image_url() + + +@pytest.fixture +def invoice_pdf_url(faker: Faker) -> str: + return faker.image_url() + + +@pytest.fixture +def stripe_invoice_id(faker: Faker) -> StripeInvoiceID: + return TypeAdapter(StripeInvoiceID).validate_python(f"in_{faker.word()}") + + +@pytest.fixture +def successful_transaction( + faker: Faker, + wallet_id: WalletID, + user_email: EmailStr, + user_id: UserID, + product_name: ProductName, + invoice_url: HttpUrl, + invoice_pdf_url: HttpUrl, + stripe_invoice_id: StripeInvoiceID, +) -> dict[str, Any]: + + initiated_at = datetime.now(tz=timezone.utc) + return random_payment_transaction( + payment_id=f"pt_{faker.pyint()}", + price_dollars=faker.pydecimal(positive=True, right_digits=2, left_digits=4), + state=PaymentTransactionState.SUCCESS, + initiated_at=initiated_at, + completed_at=initiated_at + timedelta(seconds=10), + osparc_credits=faker.pydecimal(positive=True, right_digits=2, left_digits=4), + product_name=product_name, + user_id=user_id, + user_email=user_email, + wallet_id=wallet_id, + comment=f"fake fixture in {__name__}.successful_transaction", + invoice_url=invoice_url, + invoice_pdf_url=invoice_pdf_url, + stripe_invoice_id=stripe_invoice_id, + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py new file mode 100644 index 00000000000..e55c1e489f0 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/faker_products_data.py @@ -0,0 +1,82 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +""" + Fixtures to produce fake data for a product: + - it is self-consistent + - granular customization by overriding fixtures +""" + +from typing import Any + +import pytest +from faker import Faker +from models_library.products import ProductName, StripePriceID, StripeTaxRateID +from pydantic import EmailStr, TypeAdapter + +from .helpers.faker_factories import random_product + +_MESSAGE = ( + "If set, it overrides the fake value of `{}` fixture." + " Can be handy when interacting with external/real APIs" +) + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--faker-support-email", + action="store", + type=str, + default=None, + help=_MESSAGE.format("support_email"), + ) + simcore_group.addoption( + "--faker-bcc-email", + action="store", + type=str, + default=None, + help=_MESSAGE.format("bcc_email"), + ) + + +@pytest.fixture +def product_name() -> ProductName: + return ProductName("thetestproduct") + + +@pytest.fixture +def support_email( + request: pytest.FixtureRequest, product_name: ProductName +) -> EmailStr: + return TypeAdapter(EmailStr).validate_python( + request.config.getoption("--faker-support-email", default=None) + or f"support@{product_name}.info", + ) + + +@pytest.fixture +def bcc_email(request: pytest.FixtureRequest, product_name: ProductName) -> EmailStr: + return TypeAdapter(EmailStr).validate_python( + request.config.getoption("--faker-bcc-email", default=None) + or f"finance@{product_name}-department.info", + ) + + +@pytest.fixture +def product( + faker: Faker, product_name: ProductName, support_email: EmailStr +) -> dict[str, Any]: + return random_product(name=product_name, support_email=support_email, fake=faker) + + +@pytest.fixture +def product_price_stripe_price_id(faker: Faker) -> StripePriceID: + return StripePriceID(faker.word()) + + +@pytest.fixture +def product_price_stripe_tax_rate_id(faker: Faker) -> StripeTaxRateID: + return StripeTaxRateID(faker.word()) diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_projects_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_projects_data.py new file mode 100644 index 00000000000..09f8a8b75e8 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/faker_projects_data.py @@ -0,0 +1,57 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +""" + Fixtures to produce fake data for a project: + - it is self-consistent + - granular customization by overriding fixtures +""" + + +from typing import Any + +import pytest +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.faker_factories import random_project + +_MESSAGE = ( + "If set, it overrides the fake value of `{}` fixture." + " Can be handy when interacting with external/real APIs" +) + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--faker-project-id", + action="store", + type=str, + default=None, + help=_MESSAGE.format("project_id"), + ) + + +@pytest.fixture +def project_id(faker: Faker, request: pytest.FixtureRequest) -> ProjectID: + return TypeAdapter(ProjectID).validate_python( + request.config.getoption("--faker-project-id", default=None) or faker.uuid4(), + ) + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return TypeAdapter(NodeID).validate_python(faker.uuid4()) + + +@pytest.fixture +def project_data( + faker: Faker, + project_id: ProjectID, + user_id: UserID, +) -> dict[str, Any]: + return random_project(fake=faker, uuid=f"{project_id}", prj_owner=user_id) diff --git a/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py b/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py new file mode 100644 index 00000000000..4e59b6db93a --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/faker_users_data.py @@ -0,0 +1,136 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +""" + Fixtures to produce fake data for a user: + - it is self-consistent + - granular customization by overriding fixtures +""" + +from typing import Any + +import pytest +from faker import Faker +from models_library.basic_types import IDStr +from models_library.users import UserID +from pydantic import EmailStr, TypeAdapter + +from .helpers.faker_factories import DEFAULT_TEST_PASSWORD, random_user + +_MESSAGE = ( + "If set, it overrides the fake value of `{}` fixture." + " Can be handy when interacting with external/real APIs" +) + + +_FAKE_USER_EMAIL_OPTION = "--faker-user-email" + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--faker-user-id", + action="store", + type=int, + default=None, + help=_MESSAGE.format("user_id"), + ) + simcore_group.addoption( + _FAKE_USER_EMAIL_OPTION, + action="store", + type=str, + default=None, + help=_MESSAGE.format("user_email"), + ) + simcore_group.addoption( + "--faker-user-api-key", + action="store", + type=str, + default=None, + help=_MESSAGE.format("user_api_key"), + ) + simcore_group.addoption( + "--faker-user-api-secret", + action="store", + type=str, + default=None, + help=_MESSAGE.format("user_api_secret"), + ) + + +@pytest.fixture +def user_id(faker: Faker, request: pytest.FixtureRequest) -> UserID: + return TypeAdapter(UserID).validate_python( + request.config.getoption("--faker-user-id", default=None) or faker.pyint(), + ) + + +@pytest.fixture(scope="session") +def is_external_user_email(request: pytest.FixtureRequest) -> bool: + return bool(request.config.getoption(_FAKE_USER_EMAIL_OPTION, default=None)) + + +@pytest.fixture +def user_email(faker: Faker, request: pytest.FixtureRequest) -> EmailStr: + return TypeAdapter(EmailStr).validate_python( + request.config.getoption(_FAKE_USER_EMAIL_OPTION, default=None) + or faker.email(), + ) + + +@pytest.fixture +def user_first_name(faker: Faker) -> str: + return faker.first_name() + + +@pytest.fixture +def user_last_name(faker: Faker) -> str: + return faker.last_name() + + +@pytest.fixture +def user_name(user_email: str) -> IDStr: + return TypeAdapter(IDStr).validate_python(user_email.split("@")[0]) + + +@pytest.fixture +def user_password(faker: Faker) -> str: + return faker.password(length=len(DEFAULT_TEST_PASSWORD)) + + +@pytest.fixture +def user_api_key(user_name: str, request: pytest.FixtureRequest) -> str: + return str( + request.config.getoption("--faker-user-api-key", default=None) + or f"api-key-{user_name}" + ) + + +@pytest.fixture +def user_api_secret(user_password: str, request: pytest.FixtureRequest) -> str: + return str( + request.config.getoption("--faker-user-api-secret", default=None) + or f"api-secret-{user_password}" + ) + + +@pytest.fixture +def user( + faker: Faker, + user_id: UserID, + user_email: EmailStr, + user_first_name: str, + user_last_name: str, + user_name: IDStr, + user_password: str, +) -> dict[str, Any]: + return random_user( + id=user_id, + email=user_email, + name=user_name, + first_name=user_first_name, + last_name=user_last_name, + password=user_password, + fake=faker, + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/file_extra.py b/packages/pytest-simcore/src/pytest_simcore/file_extra.py index f544757fbfc..b50e96d8f6c 100644 --- a/packages/pytest-simcore/src/pytest_simcore/file_extra.py +++ b/packages/pytest-simcore/src/pytest_simcore/file_extra.py @@ -1,15 +1,32 @@ +import logging +from collections.abc import Callable, Iterable, Iterator from pathlib import Path -from typing import Callable, Optional import pytest from faker import Faker -from pydantic import ByteSize +from pydantic import ByteSize, NonNegativeInt + +from .helpers.logging_tools import log_context @pytest.fixture -def create_file_of_size(tmp_path: Path, faker: Faker) -> Callable[[ByteSize], Path]: - # NOTE: cleanup is done by tmp_path fixture - def _creator(size: ByteSize, name: Optional[str] = None) -> Path: +def fake_file_name(tmp_path: Path, faker: Faker) -> Iterable[Path]: + file = tmp_path / faker.file_name() + + yield file + + if file.exists(): + file.unlink() + assert not file.exists() + + +@pytest.fixture +def create_file_of_size( + tmp_path: Path, faker: Faker +) -> Iterator[Callable[[ByteSize], Path]]: + created_files = [] + + def _creator(size: ByteSize, name: str | None = None) -> Path: file: Path = tmp_path / (name or faker.file_name()) if not file.parent.exists(): file.parent.mkdir(parents=True) @@ -19,6 +36,83 @@ def _creator(size: ByteSize, name: Optional[str] = None) -> Path: assert file.exists() assert file.stat().st_size == size + created_files.append(file) return file - return _creator + yield _creator + + for file in created_files: + if file.exists(): + file.unlink() + assert not file.exists() + + +def _create_random_content( + faker: Faker, + *, + base_dir: Path, + file_min_size: ByteSize, + file_max_size: ByteSize, + remaining_size: ByteSize, + depth: NonNegativeInt | None, +) -> ByteSize: + if remaining_size <= 0: + return remaining_size + + file_size = ByteSize( + faker.pyint( + min_value=min(file_min_size, remaining_size), + max_value=min(remaining_size, file_max_size), + ) + ) + if depth is None: + depth = faker.pyint(0, 5) + file_path = base_dir / f"{faker.unique.file_path(depth=depth, absolute=False)}" + file_path.parent.mkdir(parents=True, exist_ok=True) + assert not file_path.exists() + with file_path.open("wb") as fp: + fp.write(f"I am a {file_size.human_readable()} file".encode()) + fp.truncate(file_size) + assert file_path.exists() + + return ByteSize(remaining_size - file_size) + + +@pytest.fixture +def create_folder_of_size_with_multiple_files( + tmp_path: Path, faker: Faker +) -> Callable[[ByteSize, ByteSize, ByteSize, Path | None], Path]: + def _create_folder_of_size_with_multiple_files( + directory_size: ByteSize, + file_min_size: ByteSize, + file_max_size: ByteSize, + working_directory: Path | None, + depth: NonNegativeInt | None = None, + ) -> Path: + # Helper function to create random files and directories + assert file_min_size > 0 + assert file_min_size <= file_max_size + + # Recursively create content in the temporary directory + folder_path = working_directory or tmp_path + remaining_size = directory_size + with log_context( + logging.INFO, + msg=f"creating {directory_size.human_readable()} of random files " + f"(up to {file_max_size.human_readable()}) in {folder_path}", + ) as ctx: + num_files_created = 0 + while remaining_size > 0: + remaining_size = _create_random_content( + faker, + base_dir=folder_path, + file_min_size=file_min_size, + file_max_size=file_max_size, + remaining_size=remaining_size, + depth=depth, + ) + num_files_created += 1 + ctx.logger.info("created %s files", num_files_created) + return folder_path + + return _create_folder_of_size_with_multiple_files diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py b/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py new file mode 100644 index 00000000000..fc931cbebd5 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/assert_checks.py @@ -0,0 +1,99 @@ +"""Extends assertions for testing""" + +from http import HTTPStatus +from pprint import pformat + +from aiohttp import ClientResponse +from servicelib.aiohttp import status +from servicelib.rest_responses import unwrap_envelope +from servicelib.status_codes_utils import get_code_display_name, is_error + + +async def assert_status( + response: ClientResponse, + expected_status_code: int, + expected_msg: str | None = None, + expected_error_code: str | None = None, + *, + include_meta: bool | None = False, + include_links: bool | None = False, +) -> tuple[dict, ...]: + """ + Asserts for enveloped responses + """ + # raises ValueError if cannot be converted + expected_status_code = HTTPStatus(expected_status_code) + + # reponse + json_response = await response.json() + data, error = unwrap_envelope(json_response) + + assert response.status == expected_status_code, ( + f"Expected: {get_code_display_name(expected_status_code)} : {expected_msg or ''}" + f"Got: {response.status}:\n" + f" - data :{pformat(data)}\n" + f" - error:{pformat(error)}\n)" + ) + + if is_error(expected_status_code): + _do_assert_error( + data, error, expected_status_code, expected_msg, expected_error_code + ) + + elif expected_status_code == status.HTTP_204_NO_CONTENT: + assert not data, pformat(data) + assert not error, pformat(error) + else: + # with a 200, data may still be empty so we cannot 'assert data is not None' + # SEE https://medium.com/@santhoshkumarkrishna/http-get-rest-api-no-content-404-vs-204-vs-200-6dd869e3af1d + assert not error, pformat(error) + + if expected_msg: + assert expected_msg in data["message"] + + return_value = ( + data, + error, + ) + if include_meta: + return_value += (json_response.get("_meta"),) + if include_links: + return_value += (json_response.get("_links"),) + return return_value + + +async def assert_error( + response: ClientResponse, + expected_status_code: int, + expected_msg: str | None = None, +): + data, error = unwrap_envelope(await response.json()) + return _do_assert_error(data, error, expected_status_code, expected_msg) + + +def _do_assert_error( + data, + error, + expected_status_code: int, + expected_msg: str | None = None, + expected_error_code: str | None = None, +): + assert not data, pformat(data) + assert error, pformat(error) + + assert is_error(expected_status_code) + + # New versions of the error models might not have this attribute + details = error.get("errors", []) + + if expected_msg: + assert details + messages = [e["message"] for e in details] + assert expected_msg in messages + + if expected_error_code: + assert details + codes = [e["code"] for e in details] + assert expected_error_code in codes + + return data, error diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py b/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py new file mode 100644 index 00000000000..2d6c278d92c --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/autoscaling.py @@ -0,0 +1,73 @@ +from collections.abc import Callable + +import arrow +from aws_library.ec2 import EC2InstanceData +from models_library.generated_models.docker_rest_api import ( + Availability, + Node, + NodeState, +) +from pytest_mock import MockType +from simcore_service_autoscaling.models import AssociatedInstance, Cluster +from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY, + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, +) + + +def assert_cluster_state( + spied_cluster_analysis: MockType, *, expected_calls: int, expected_num_machines: int +) -> Cluster: + assert spied_cluster_analysis.call_count == expected_calls + + assert isinstance(spied_cluster_analysis.spy_return, Cluster) + assert ( + spied_cluster_analysis.spy_return.total_number_of_machines() + == expected_num_machines + ) + print("current cluster state:", spied_cluster_analysis.spy_return) + cluster = spied_cluster_analysis.spy_return + spied_cluster_analysis.reset_mock() + return cluster + + +def create_fake_association( + create_fake_node: Callable[..., Node], + drained_machine_id: str | None, + terminating_machine_id: str | None, +): + fake_node_to_instance_map = {} + + async def _fake_node_creator( + _nodes: list[Node], ec2_instances: list[EC2InstanceData] + ) -> tuple[list[AssociatedInstance], list[EC2InstanceData]]: + def _create_fake_node_with_labels(instance: EC2InstanceData) -> Node: + if instance not in fake_node_to_instance_map: + fake_node = create_fake_node() + assert fake_node.spec + fake_node.spec.availability = Availability.active + assert fake_node.status + fake_node.status.state = NodeState.ready + assert fake_node.spec.labels + fake_node.spec.labels |= { + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: arrow.utcnow().isoformat(), + _OSPARC_SERVICE_READY_LABEL_KEY: ( + "true" if instance.id != drained_machine_id else "false" + ), + } + if instance.id == terminating_machine_id: + fake_node.spec.labels |= { + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY: arrow.utcnow().isoformat() + } + fake_node_to_instance_map[instance] = fake_node + return fake_node_to_instance_map[instance] + + associated_instances = [ + AssociatedInstance(node=_create_fake_node_with_labels(i), ec2_instance=i) + for i in ec2_instances + ] + + return associated_instances, [] + + return _fake_node_creator diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/aws_ec2.py b/packages/pytest-simcore/src/pytest_simcore/helpers/aws_ec2.py new file mode 100644 index 00000000000..5f16fefc801 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/aws_ec2.py @@ -0,0 +1,215 @@ +import base64 +from collections.abc import Sequence + +from common_library.json_serialization import json_dumps +from models_library.docker import DockerGenericTag +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.type_defs import ( + FilterTypeDef, + InstanceTypeDef, + ReservationTypeDef, + TagTypeDef, +) + + +async def assert_autoscaled_computational_ec2_instances( + ec2_client: EC2Client, + *, + expected_num_reservations: int, + expected_num_instances: int, + expected_instance_type: InstanceTypeType, + expected_instance_state: InstanceStateNameType, + expected_additional_tag_keys: list[str], +) -> list[InstanceTypeDef]: + return await assert_ec2_instances( + ec2_client, + expected_num_reservations=expected_num_reservations, + expected_num_instances=expected_num_instances, + expected_instance_type=expected_instance_type, + expected_instance_state=expected_instance_state, + expected_instance_tag_keys=[ + "io.simcore.autoscaling.dask-scheduler_url", + "user_id", + "wallet_id", + *expected_additional_tag_keys, + ], + expected_user_data=["docker swarm join"], + ) + + +async def assert_autoscaled_dynamic_ec2_instances( + ec2_client: EC2Client, + *, + expected_num_reservations: int, + expected_num_instances: int, + expected_instance_type: InstanceTypeType, + expected_instance_state: InstanceStateNameType, + expected_additional_tag_keys: list[str], + instance_filters: Sequence[FilterTypeDef] | None, + expected_user_data: list[str] | None = None, + check_reservation_index: int | None = None, +) -> list[InstanceTypeDef]: + if expected_user_data is None: + expected_user_data = ["docker swarm join"] + return await assert_ec2_instances( + ec2_client, + expected_num_reservations=expected_num_reservations, + expected_num_instances=expected_num_instances, + expected_instance_type=expected_instance_type, + expected_instance_state=expected_instance_state, + expected_instance_tag_keys=[ + "io.simcore.autoscaling.monitored_nodes_labels", + "io.simcore.autoscaling.monitored_services_labels", + *expected_additional_tag_keys, + ], + expected_user_data=expected_user_data, + instance_filters=instance_filters, + check_reservation_index=check_reservation_index, + ) + + +async def assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client: EC2Client, + *, + expected_num_reservations: int, + expected_num_instances: int, + expected_instance_type: InstanceTypeType, + expected_instance_state: InstanceStateNameType, + expected_additional_tag_keys: list[str], + expected_pre_pulled_images: list[DockerGenericTag] | None, + instance_filters: Sequence[FilterTypeDef] | None, + check_reservation_index: int | None = None, +) -> list[InstanceTypeDef]: + return await assert_ec2_instances( + ec2_client, + expected_num_reservations=expected_num_reservations, + expected_num_instances=expected_num_instances, + expected_instance_type=expected_instance_type, + expected_instance_state=expected_instance_state, + expected_instance_tag_keys=[ + "io.simcore.autoscaling.monitored_nodes_labels", + "io.simcore.autoscaling.monitored_services_labels", + "io.simcore.autoscaling.buffer_machine", + *expected_additional_tag_keys, + ], + expected_pre_pulled_images=expected_pre_pulled_images, + expected_user_data=[], + instance_filters=instance_filters, + check_reservation_index=check_reservation_index, + ) + + +async def _assert_reservation( + ec2_client: EC2Client, + reservation: ReservationTypeDef, + *, + expected_num_instances: int, + expected_instance_type: InstanceTypeType, + expected_instance_state: InstanceStateNameType, + expected_instance_tag_keys: list[str], + expected_user_data: list[str], + expected_pre_pulled_images: list[DockerGenericTag] | None, +) -> list[InstanceTypeDef]: + list_instances: list[InstanceTypeDef] = [] + assert "Instances" in reservation + assert ( + len(reservation["Instances"]) == expected_num_instances + ), f"expected {expected_num_instances}, found {len(reservation['Instances'])}" + for instance in reservation["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == expected_instance_type + assert "Tags" in instance + assert instance["Tags"] + expected_tag_keys = { + *expected_instance_tag_keys, + "io.simcore.autoscaling.version", + "Name", + } + instance_tag_keys = {tag["Key"] for tag in instance["Tags"] if "Key" in tag} + assert instance_tag_keys == expected_tag_keys + + if expected_pre_pulled_images is None: + assert "io.simcore.autoscaling.pre_pulled_images" not in instance_tag_keys + else: + assert "io.simcore.autoscaling.pre_pulled_images" in instance_tag_keys + + def _by_pre_pull_image(ec2_tag: TagTypeDef) -> bool: + assert "Key" in ec2_tag + return ec2_tag["Key"] == "io.simcore.autoscaling.pre_pulled_images" + + instance_pre_pulled_images_aws_tag = next( + iter(filter(_by_pre_pull_image, instance["Tags"])) + ) + assert "Value" in instance_pre_pulled_images_aws_tag + assert ( + instance_pre_pulled_images_aws_tag["Value"] + == f"{json_dumps(expected_pre_pulled_images)}" + ) + + assert "PrivateDnsName" in instance + instance_private_dns_name = instance["PrivateDnsName"] + if expected_instance_state not in ["terminated"]: + # NOTE: moto behaves here differently than AWS by still returning an IP which does not really make sense + assert instance_private_dns_name.endswith(".ec2.internal") + assert "State" in instance + state = instance["State"] + assert "Name" in state + assert state["Name"] == expected_instance_state + + assert "InstanceId" in instance + user_data = await ec2_client.describe_instance_attribute( + Attribute="userData", InstanceId=instance["InstanceId"] + ) + assert "UserData" in user_data + assert "Value" in user_data["UserData"] + user_data = base64.b64decode(user_data["UserData"]["Value"]).decode() + for user_data_string in expected_user_data: + assert user_data.count(user_data_string) == 1 + list_instances.append(instance) + return list_instances + + +async def assert_ec2_instances( + ec2_client: EC2Client, + *, + expected_num_reservations: int, + expected_num_instances: int, + expected_instance_type: InstanceTypeType, + expected_instance_state: InstanceStateNameType, + expected_instance_tag_keys: list[str], + expected_user_data: list[str], + expected_pre_pulled_images: list[DockerGenericTag] | None = None, + instance_filters: Sequence[FilterTypeDef] | None = None, + check_reservation_index: int | None = None, +) -> list[InstanceTypeDef]: + all_instances = await ec2_client.describe_instances(Filters=instance_filters or []) + assert len(all_instances["Reservations"]) == expected_num_reservations + if check_reservation_index is not None: + assert check_reservation_index < len(all_instances["Reservations"]) + reservation = all_instances["Reservations"][check_reservation_index] + return await _assert_reservation( + ec2_client, + reservation, + expected_num_instances=expected_num_instances, + expected_instance_type=expected_instance_type, + expected_instance_state=expected_instance_state, + expected_instance_tag_keys=expected_instance_tag_keys, + expected_user_data=expected_user_data, + expected_pre_pulled_images=expected_pre_pulled_images, + ) + list_instances: list[InstanceTypeDef] = [] + for reservation in all_instances["Reservations"]: + list_instances.extend( + await _assert_reservation( + ec2_client, + reservation, + expected_num_instances=expected_num_instances, + expected_instance_type=expected_instance_type, + expected_instance_state=expected_instance_state, + expected_instance_tag_keys=expected_instance_tag_keys, + expected_user_data=expected_user_data, + expected_pre_pulled_images=expected_pre_pulled_images, + ) + ) + return list_instances diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_rpc_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_rpc_server.py new file mode 100644 index 00000000000..aba75624983 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_rpc_server.py @@ -0,0 +1,291 @@ +# pylint: disable=no-self-use +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import fnmatch +from dataclasses import dataclass + +from models_library.api_schemas_catalog.services import ( + LatestServiceGet, + ServiceGetV2, + ServiceListFilters, + ServiceSummary, + ServiceUpdateV2, +) +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.products import ProductName +from models_library.rest_pagination import PageOffsetInt +from models_library.rpc_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + PageLimitInt, + PageRpc, +) +from models_library.services_enums import ServiceType +from models_library.services_history import ServiceRelease +from models_library.services_regex import ( + COMPUTATIONAL_SERVICE_KEY_RE, + DYNAMIC_SERVICE_KEY_RE, +) +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter, validate_call +from pytest_mock import MockType +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + +assert ServiceListFilters.model_json_schema()["properties"].keys() == { + "service_type", + "service_key_pattern", + "version_display_pattern", +}, ( + "ServiceListFilters is expected to only have the key 'service_type'. " + "Please update the mock if the schema changes." +) + + +class CatalogRpcSideEffects: + # pylint: disable=no-self-use + @validate_call(config={"arbitrary_types_allowed": True}) + async def list_services_paginated( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, + ): + assert rpc_client + assert product_name + assert user_id + + services_list = TypeAdapter(list[LatestServiceGet]).validate_python( + LatestServiceGet.model_json_schema()["examples"], + ) + if filters: + + filtered_services = [] + for src in services_list: + # Match service type if specified + if filters.service_type and src.service_type != filters.service_type: + continue + + # Match service key pattern if specified + if filters.service_key_pattern and not fnmatch.fnmatch( + src.key, filters.service_key_pattern + ): + continue + + # Match version display pattern if specified + if filters.version_display_pattern and ( + src.version_display is None + or not fnmatch.fnmatch( + src.version_display, filters.version_display_pattern + ) + ): + continue + + filtered_services.append(src) + + services_list = filtered_services + + total_count = len(services_list) + + return PageRpc[LatestServiceGet].create( + services_list[offset : offset + limit], + total=total_count, + limit=limit, + offset=offset, + ) + + @validate_call(config={"arbitrary_types_allowed": True}) + async def get_service( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + ): + assert rpc_client + assert product_name + assert user_id + + got = ServiceGetV2.model_validate( + ServiceGetV2.model_json_schema()["examples"][0] + ) + got.version = service_version + got.key = service_key + + if DYNAMIC_SERVICE_KEY_RE.match(got.key): + got.service_type = ServiceType.DYNAMIC + elif COMPUTATIONAL_SERVICE_KEY_RE.match(got.key): + got.service_type = ServiceType.COMPUTATIONAL + else: + msg = "Service type not recognized. Please extend the mock yourself" + raise RuntimeError(msg) + + return got + + @validate_call(config={"arbitrary_types_allowed": True}) + async def update_service( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + update: ServiceUpdateV2, + ) -> ServiceGetV2: + assert rpc_client + assert product_name + assert user_id + + got = ServiceGetV2.model_validate( + ServiceGetV2.model_json_schema()["examples"][0] + ) + got.version = service_version + got.key = service_key + return got.model_copy(update=update.model_dump(exclude_unset=True)) + + @validate_call(config={"arbitrary_types_allowed": True}) + async def list_my_service_history_latest_first( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, + ) -> PageRpc[ServiceRelease]: + + assert rpc_client + assert product_name + assert user_id + assert service_key + assert filters is None, "filters not mocked yet" + + items = TypeAdapter(list[ServiceRelease]).validate_python( + ServiceRelease.model_json_schema()["examples"], + ) + total_count = len(items) + + return PageRpc[ServiceRelease].create( + items[offset : offset + limit], + total=total_count, + limit=limit, + offset=offset, + ) + + @validate_call(config={"arbitrary_types_allowed": True}) + async def get_service_ports( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + ) -> list[ServicePortGet]: + assert rpc_client + assert product_name + assert user_id + assert service_key + assert service_version + + return TypeAdapter(list[ServicePortGet]).validate_python( + ServicePortGet.model_json_schema()["examples"], + ) + + @validate_call(config={"arbitrary_types_allowed": True}) + async def list_all_services_summaries_paginated( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, + ): + assert rpc_client + assert product_name + assert user_id + + service_summaries = TypeAdapter(list[ServiceSummary]).validate_python( + ServiceSummary.model_json_schema()["examples"], + ) + if filters: + filtered_summaries = [] + for summary in service_summaries: + # Match service type if specified + if ( + filters.service_type + and { + ServiceType.COMPUTATIONAL: "/comp/", + ServiceType.DYNAMIC: "/dynamic/", + }[filters.service_type] + not in summary.key + ): + continue + + # Match service key pattern if specified + if filters.service_key_pattern and not fnmatch.fnmatch( + summary.key, filters.service_key_pattern + ): + continue + + # Match version display pattern if specified + if filters.version_display_pattern and ( + summary.version_display is None + or not fnmatch.fnmatch( + summary.version_display, filters.version_display_pattern + ) + ): + continue + + filtered_summaries.append(summary) + + service_summaries = filtered_summaries + + total_count = len(service_summaries) + + return PageRpc[ServiceSummary].create( + service_summaries[offset : offset + limit], + total=total_count, + limit=limit, + offset=offset, + ) + + +@dataclass +class ZeroListingCatalogRpcSideEffects: + """Catalog RPC mocks that return empty lists""" + + async def list_services_paginated(self, *args, **kwargs): ... + async def get_service(self, *args, **kwargs): ... + async def update_service(self, *args, **kwargs): ... + async def get_service_ports(self, *args, **kwargs): ... + async def list_my_service_history_latest_first(self, *args, **kwargs): + return PageRpc[ServiceRelease].create( + [], + total=0, + limit=DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset=0, + ) + + async def list_all_services_summaries_paginated(self, *args, **kwargs): + return PageRpc[ServiceSummary].create( + [], + total=0, + limit=DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset=0, + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_services.py b/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_services.py new file mode 100644 index 00000000000..5456afb3186 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/catalog_services.py @@ -0,0 +1,43 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from datetime import datetime +from typing import Any, Protocol + +from models_library.products import ProductName + + +class CreateFakeServiceDataCallable(Protocol): + """Signature for services/catalog/tests/unit/with_dbs/conftest.py::create_fake_service_data""" + + def __call__( + self, + key, + version, + team_access: str | None = None, + everyone_access: str | None = None, + product: ProductName = "osparc", + # DB overrides + deprecated: datetime | None = None, # DB column + version_display: str | None = None, # DB column + ) -> tuple[dict[str, Any], ...]: # type: ignore + """ + Returns a fake factory that creates catalog DATA that can be used to fill + both services_meta_data and services_access_rights tables + + + Example: + fake_service, *fake_access_rights = create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "0.0.1", + team_access="xw", + everyone_access="x", + product=target_product, + ), + + owner_access, team_access, everyone_access = fake_access_rights + + """ diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/comparing.py b/packages/pytest-simcore/src/pytest_simcore/helpers/comparing.py new file mode 100644 index 00000000000..31d2e986806 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/comparing.py @@ -0,0 +1,61 @@ +import asyncio +import hashlib +from concurrent.futures import ProcessPoolExecutor +from pathlib import Path +from typing import TypeAlias + +import aiofiles +from servicelib.file_utils import create_sha256_checksum + +_FilesInfo: TypeAlias = dict[str, Path] + + +def get_relative_to(folder: Path, file: Path) -> str: + return f"{file.relative_to(folder)}" + + +async def assert_same_file_content(path_1: Path, path_2: Path) -> None: + async with aiofiles.open(path_1, "rb") as f1, aiofiles.open(path_2, "rb") as f2: + checksum_1 = await create_sha256_checksum(f1) + checksum_2 = await create_sha256_checksum(f2) + assert checksum_1 == checksum_2 + + +def get_files_info_from_path(folder: Path) -> _FilesInfo: + return {get_relative_to(folder, f): f for f in folder.rglob("*") if f.is_file()} + + +def compute_hash(file_path: Path) -> tuple[Path, str]: + with Path.open(file_path, "rb") as file_to_hash: + file_hash = hashlib.md5() # noqa: S324 + chunk = file_to_hash.read(8192) + while chunk: + file_hash.update(chunk) + chunk = file_to_hash.read(8192) + + return file_path, file_hash.hexdigest() + + +async def compute_hashes(file_paths: list[Path]) -> dict[Path, str]: + """given a list of files computes hashes for the files on a process pool""" + + loop = asyncio.get_event_loop() + + with ProcessPoolExecutor() as prcess_pool_executor: + tasks = [ + loop.run_in_executor(prcess_pool_executor, compute_hash, file_path) + for file_path in file_paths + ] + # pylint: disable=unnecessary-comprehension + # see return value of _compute_hash it is a tuple, mapping list[Tuple[Path,str]] to Dict[Path, str] here + return dict(await asyncio.gather(*tasks)) + + +async def assert_same_contents(file_info1: _FilesInfo, file_info2: _FilesInfo) -> None: + assert set(file_info1.keys()) == set(file_info2.keys()) + + hashes_1 = await compute_hashes(list(file_info1.values())) + hashes_2 = await compute_hashes(list(file_info2.values())) + + for key in file_info1: + assert hashes_1[file_info1[key]] == hashes_2[file_info2[key]] diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/constants.py b/packages/pytest-simcore/src/pytest_simcore/helpers/constants.py index cd6edbf427f..5d517b9a071 100644 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/constants.py +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/constants.py @@ -7,4 +7,4 @@ # string templates -HEADER_STR: str = "{:-^50}\n" +HEADER_STR: str = "{:-^100}\n" diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/deprecated_environs.py b/packages/pytest-simcore/src/pytest_simcore/helpers/deprecated_environs.py new file mode 100644 index 00000000000..efd19ff37e2 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/deprecated_environs.py @@ -0,0 +1,125 @@ +""" Utils to deal with environment variables (environs in short) + +""" +import re +import warnings +from pathlib import Path + +import yaml + +from .typing_env import EnvVarsDict + +VARIABLE_SUBSTITUTION = re.compile(r"\$\{(\w+)(?:(:{0,1}[-?]{0,1})(.*))?\}$") + +warnings.warn( + f"{__name__} is deprecated, use instead pytest_simcore.helpers.utils_envs", + DeprecationWarning, +) + + +def _load_env(file_handler) -> dict: + """Deserializes an environment file like .env-devel and + returns a key-value map of the environment + + Analogous to json.load + """ + PATTERN_ENVIRON_EQUAL = re.compile(r"^(\w+)=(.*)$") + # Works even for `POSTGRES_EXPORTER_DATA_SOURCE_NAME=postgresql://simcore:simcore@postgres:5432/simcoredb?sslmode=disable` + + environ = {} + for line in file_handler: + m = PATTERN_ENVIRON_EQUAL.match(line) + if m: + key, value = m.groups() + environ[key] = str(value) + return environ + + +def replace_environs_in_docker_compose_service( + service_section: dict, + docker_compose_dir: Path, + host_environ: dict = None, + *, + use_env_devel=True, +): + """Resolves environments in docker-compose's service section, + drops any reference to env_file and sets all + environs 'environment' section + + NOTE: service_section gets modified! + + SEE https://docs.docker.com/compose/environment-variables/ + """ + service_environ = {} + + # environment defined in env_file + env_files: list[str] = service_section.pop("env_file", []) + for env_file in env_files: + if env_file.endswith(".env") and use_env_devel: + env_file += "-devel" + + env_file_path = (docker_compose_dir / env_file).resolve() + with env_file_path.open() as fh: + file_environ = _load_env(fh) + service_environ.update(file_environ) + + # explicit environment [overrides env_file] + environ_items = service_section.get("environment", []) + if environ_items and isinstance(environ_items, list): + for item in environ_items: + key, value = item.split("=") + m = VARIABLE_SUBSTITUTION.match(value) + if m: # There is a variable as value in docker-compose + envkey = m.groups()[0] # Variable name + if len(m.groups()) == 3: # There is a default value + default_value = m.groups()[2] + if envkey in host_environ: + value = host_environ[envkey] # Use host environ + if default_value and len(value) == 0 and m.groups()[1] == ":-": + value = default_value # Unless it is empty and default exists + elif default_value: + value = default_value # Use default if exists + + service_environ[key] = value + + service_section["environment"] = service_environ + + +def eval_service_environ( + docker_compose_path: Path, + service_name: str, + host_environ: dict = None, + image_environ: dict = None, + *, + use_env_devel=True, +) -> EnvVarsDict: + """Deduces a service environment with it runs in a stack from confirmation + + :param docker_compose_path: path to stack configuration + :type docker_compose_path: Path + :param service_name: service name as defined in docker-compose file + :type service_name: str + :param host_environ: environs in host when stack is started, defaults to None + :type host_environ: Dict, optional + :param image_environ: environs set in Dockerfile, defaults to None + :type image_environ: Dict, optional + :param image_environ: environs set in Dockerfile, defaults to None + :rtype: Dict + """ + docker_compose_dir = docker_compose_path.parent.resolve() + with docker_compose_path.open() as f: + content = yaml.safe_load(f) + + service = content["services"][service_name] + replace_environs_in_docker_compose_service( + service, docker_compose_dir, host_environ, use_env_devel=use_env_devel + ) + + host_environ = host_environ or {} + image_environ = image_environ or {} + + # Environ expected in a running service + service_environ: EnvVarsDict = {} + service_environ.update(image_environ) + service_environ.update(service["environment"]) + return service_environ diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/docker.py b/packages/pytest-simcore/src/pytest_simcore/helpers/docker.py new file mode 100644 index 00000000000..dcccfa55141 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/docker.py @@ -0,0 +1,274 @@ +import json +import logging +import os +import re +import subprocess +from enum import Enum +from pathlib import Path +from typing import Any + +import docker +import yaml +from tenacity import retry +from tenacity.after import after_log +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed + + +# NOTE: CANNOT use models_library.generated_models.docker_rest_api.Status2 because some of the +# packages tests installations do not include this library!! +class ContainerStatus(str, Enum): + """ + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + + """ + + # SEE https://docs.docker.com/engine/api/v1.42/#tag/Container/operation/ContainerList + + created = "created" + running = "running" + paused = "paused" + restarting = "restarting" + removing = "removing" + exited = "exited" + dead = "dead" + + +_COLOR_ENCODING_RE = re.compile(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]") +_MAX_PATH_CHAR_LEN_ALLOWED = 260 +_kFILENAME_TOO_LONG = 36 +_NORMPATH_COUNT = 0 + + +log = logging.getLogger(__name__) + + +@retry( + wait=wait_fixed(2), + stop=stop_after_attempt(10), + after=after_log(log, logging.WARNING), +) +def get_service_published_port( + service_name: str, target_ports: list[int] | int | None = None +) -> str: + # WARNING: ENSURE that service name exposes a port in + # Dockerfile file or docker-compose config file + + # NOTE: retries since services can take some time to start + client = docker.from_env() + + services = [s for s in client.services.list() if str(s.name).endswith(service_name)] + if not services: + msg = ( + f"Cannot find published port for service '{service_name}'." + "Probably services still not started." + ) + raise RuntimeError(msg) + + service_ports = services[0].attrs["Endpoint"].get("Ports") + if not service_ports: + msg = ( + f"Cannot find published port for service '{service_name}' in endpoint." + "Probably services still not started." + ) + raise RuntimeError(msg) + + published_port = None + msg = ", ".join( + f"{p.get('TargetPort')} -> {p.get('PublishedPort')}" for p in service_ports + ) + + if target_ports is None: + if len(service_ports) > 1: + log.warning( + "Multiple ports published in service '%s': %s. Defaulting to first", + service_name, + msg, + ) + published_port = service_ports[0]["PublishedPort"] + + else: + ports_to_look_for: list = ( + [target_ports] if isinstance(target_ports, int | str) else target_ports + ) + + for target_port in ports_to_look_for: + target_port = int(target_port) + for p in service_ports: + if p["TargetPort"] == target_port: + published_port = p["PublishedPort"] + break + + if published_port is None: + msg = f"Cannot find published port for {target_ports}. Got {msg}" + raise RuntimeError(msg) + + return str(published_port) + + +def run_docker_compose_config( + docker_compose_paths: list[Path] | Path, + scripts_dir: Path, + project_dir: Path, + env_file_path: Path, + destination_path: Path | None = None, +) -> dict: + """Runs docker compose config to validate and resolve a compose file configuration + + - Composes all configurations passed in 'docker_compose_paths' + - Takes 'project_dir' as current working directory to resolve relative paths in the docker-compose correctly + - All environments are interpolated from a custom env-file at 'env_file_path' + - Saves resolved output config to 'destination_path' (if given) + """ + + if not isinstance(docker_compose_paths, list): + docker_compose_paths = [ + docker_compose_paths, + ] + + assert project_dir.exists(), "Invalid file '{project_dir}'" + + for docker_compose_path in docker_compose_paths: + assert str(docker_compose_path.resolve()).startswith(str(project_dir.resolve())) + + assert env_file_path.exists(), "Invalid file '{env_file_path}'" + + if destination_path: + assert destination_path.suffix in [ + ".yml", + ".yaml", + ], "Expected yaml/yml file as destination path" + + # https://docs.docker.com/compose/environment-variables/#using-the---env-file--option + bash_options = [ + "-e", + str(env_file_path), # Custom environment variables + ] + + # Specify an alternate compose files + # - When you use multiple Compose files, all paths in the files are relative to the first configuration file specified with -f. + # You can use the --project-directory option to override this base path. + for docker_compose_path in docker_compose_paths: + bash_options += [os.path.relpath(docker_compose_path, project_dir)] + + # SEE https://docs.docker.com/compose/reference/config/ + docker_compose_path = scripts_dir / "docker" / "docker-stack-config.bash" + assert docker_compose_path.exists() + args = [f"{docker_compose_path}", *bash_options] + print(" ".join(args)) + + process = subprocess.run( + args, + cwd=project_dir, + capture_output=True, + check=True, + env=None, # NOTE: Do not use since since we pass all necessary env vars via --env-file option of docker compose + ) + + compose_file_str = process.stdout.decode("utf-8") + compose_file: dict[str, Any] = yaml.safe_load(compose_file_str) + + if destination_path: + # + # NOTE: This step could be avoided and reading instead from stdout + # but prefer to have a file that stays after the test in a tmp folder + # and can be used later for debugging + # + destination_path.parent.mkdir(parents=True, exist_ok=True) + destination_path.write_text(compose_file_str) + + return compose_file + + +def shorten_path(filename: str) -> Path: + # These paths are composed using test name hierarchies + # when the test is parametrized, it uses the str of the + # object as id which could result in path that goes over + # allowed limit (260 characters). + # This helper function tries to normalize the path + # Another possibility would be that the path has some + # problematic characters but so far we did not find any case ... + global _NORMPATH_COUNT # pylint: disable=global-statement + + if len(filename) > _MAX_PATH_CHAR_LEN_ALLOWED: + _NORMPATH_COUNT += 1 + path = Path(filename) + if path.is_dir(): + limit = _MAX_PATH_CHAR_LEN_ALLOWED - 60 + filename = filename[:limit] + f"{_NORMPATH_COUNT}" + elif path.is_file(): + limit = _MAX_PATH_CHAR_LEN_ALLOWED - 10 + filename = filename[:limit] + f"{_NORMPATH_COUNT}{path.suffix}" + + return Path(filename) + + +# actions/upload-artifact@v2: +# Invalid characters for artifact paths include: +# Double quote ", Colon :, Less than <, Greater than >, Vertical bar |, Asterisk *, +# Question mark ?, Carriage return \r, Line feed \n +BANNED_CHARS_FOR_ARTIFACTS = re.compile(r'["\:><|\*\?]') + + +def safe_artifact_name(name: str) -> str: + return BANNED_CHARS_FOR_ARTIFACTS.sub("_", name) + + +def save_docker_infos(destination_dir: Path): + client = docker.from_env() + + # Includes stop containers, which might be e.g. failing tasks + all_containers = client.containers.list(all=True) + + destination_dir = Path(safe_artifact_name(f"{destination_dir}")) + + if all_containers: + try: + destination_dir.mkdir(parents=True, exist_ok=True) + + except OSError as err: + if err.errno == _kFILENAME_TOO_LONG: + destination_dir = shorten_path(err.filename) + destination_dir.mkdir(parents=True, exist_ok=True) + + for container in all_containers: + try: + container_name = safe_artifact_name(container.name) + + # logs w/o coloring characters + logs: str = container.logs(timestamps=True, tail=1000).decode() + + try: + (destination_dir / f"{container_name}.log").write_text( + _COLOR_ENCODING_RE.sub("", logs) + ) + + except OSError as err: + if err.errno == _kFILENAME_TOO_LONG: + shorten_path(err.filename).write_text( + _COLOR_ENCODING_RE.sub("", logs) + ) + + # inspect attrs + try: + (destination_dir / f"{container_name}.json").write_text( + json.dumps(container.attrs, indent=2) + ) + except OSError as err: + if err.errno == _kFILENAME_TOO_LONG: + shorten_path(err.filename).write_text( + json.dumps(container.attrs, indent=2) + ) + + except Exception as err: # pylint: disable=broad-except # noqa: PERF203 + if container.status != ContainerStatus.created: + print( + f"Error while dumping {container.name=}, {container.status=}.\n\t{err=}" + ) + + print( + "\n\t", + f"wrote docker log and json files for {len(all_containers)} containers in ", + destination_dir, + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/docker_registry.py b/packages/pytest-simcore/src/pytest_simcore/helpers/docker_registry.py new file mode 100644 index 00000000000..8dea7a6191c --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/docker_registry.py @@ -0,0 +1,201 @@ +""" Helper to request data from docker-registry + + +NOTE: this could be used as draft for https://github.com/ITISFoundation/osparc-simcore/issues/2165 +""" + + +import json +import os +import re +import sys +from collections.abc import Iterator +from contextlib import suppress +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +import httpx + + +@dataclass +class RegistryConfig: + url: str + auth: tuple[str, str] + + +RepoName = str +RepoTag = str + + +class Registry: + # SEE https://docs.docker.com/registry/spec/api + # SEE https://github.com/moby/moby/issues/9015 + + def __init__(self, **data): + data.setdefault("url", f'https://{os.environ.get("REGISTRY_URL")}') + data.setdefault( + "auth", (os.environ.get("REGISTRY_USER"), os.environ.get("REGISTRY_PW")) + ) + self.data = RegistryConfig(**data) + + def __str__(self) -> str: + return f"" + + def api_version_check(self): + # https://docs.docker.com/registry/spec/api/#api-version-check + + r = httpx.get(f"{self.data.url}/v2/", auth=self.data.auth) + r.raise_for_status() + + def iter_repositories(self, limit: int = 100) -> Iterator[RepoName]: + def _req(**kwargs): + r = httpx.get(auth=self.data.auth, **kwargs) + r.raise_for_status() + + yield from r.json()["repositories"] + + if link := r.headers.get("Link"): + # until the Link header is no longer set in the response + # SEE https://docs.docker.com/registry/spec/api/#pagination-1 + # ex=.g. '; rel="next"' + if m := re.match(r'<([^><]+)>;\s+rel=([\w"]+)', link): + next_page = m.group(1) + yield from _req(url=next_page) + + assert limit > 0 + query = {"n": limit} + + yield from _req(url=f"{self.data.url}/v2/_catalog", params=query) + + def get_digest(self, repo_name: str, repo_reference: str) -> str: + r = httpx.head( + f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", + auth=self.data.auth, + ) + r.raise_for_status() + assert r.status_code == 200 + return r.headers["Docker-Content-Digest"] + + def check_manifest(self, repo_name: RepoName, repo_reference: str) -> bool: + r = httpx.head( + f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", + auth=self.data.auth, + ) + if r.status_code == 400: + return False + # some other error? + r.raise_for_status() + return True + + def list_tags(self, repo_name: RepoName) -> list[RepoTag]: + r = httpx.get( + f"{self.data.url}/v2/{repo_name}/tags/list", + auth=self.data.auth, + ) + r.raise_for_status() + data = r.json() + assert data["name"] == repo_name + return data["tags"] + + def get_manifest(self, repo_name: str, repo_reference: str): + r = httpx.get( + f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", + auth=self.data.auth, + ) + r.raise_for_status() + + # manifest formats and their content types: https://docs.docker.com/registry/spec/manifest-v2-1/, + # see format https://github.com/moby/moby/issues/8093 + return r.json() + + +def get_labels(image_v1: str) -> dict[str, Any]: + """image_v1: v1 compatible string encoded json for each layer""" + return json.loads(image_v1).get("config", {}).get("Labels", {}) + + +def extract_metadata(labels: dict[str, Any]) -> dict[str, Any]: + """Creates a metadata object from 'io.simcore.*' labels such as + { + "name": "foo" + "version": "1.2.3" + } + """ + meta = {} + for key in labels: + if key.startswith("io.simcore."): + meta.update(**json.loads(labels[key])) + return meta + + +def extract_extra_service_metadata(labels: dict[str, Any]) -> dict[str, Any]: + """Creates a metadata object 'simcore.service.*' labels such as + { + "service.settings": { ... } + "service.value": 42 + } + """ + meta = {} + for key in labels: + if key.startswith("simcore.service."): + value = labels[key].strip() + with suppress(json.decoder.JSONDecodeError): + # ignore e.g. key=value where value is a raw name + value = json.loads(value) + + meta.update(**{key.removeprefix("simcore."): value}) + return meta + + +SKIP, SUCCESS, FAILED = "[skip]", "[ok]", "[failed]" + + +def download_all_registry_metadata(dest_dir: Path, **kwargs): + registry = Registry(**kwargs) + + print("Starting", registry) + + count = 0 + for repo in registry.iter_repositories(limit=500): + + # list tags + try: + tags = registry.list_tags(repo_name=repo) + except httpx.HTTPStatusError as err: + print(f"Failed to get tags from {repo=}", err, FAILED) + continue + + # get manifest + folder = dest_dir / Path(repo) + folder.mkdir(parents=True, exist_ok=True) + for tag in tags: + path = folder / f"metadata-{tag}.json" + if not path.exists(): + try: + manifest = registry.get_manifest(repo_name=repo, repo_reference=tag) + + labels = get_labels( + image_v1=manifest["history"][0]["v1Compatibility"] + ) + + meta = extract_metadata(labels) + meta.update(extract_extra_service_metadata(labels)) + + with path.open("wt") as fh: + json.dump(meta, fh, indent=1) + print("downloaded", path, SUCCESS) + count += 1 + except Exception as err: # pylint: disable=broad-except + print("Failed", path, err, FAILED) + path.unlink(missing_ok=True) + else: + print("found", path, SKIP) + count += 1 + print("\nDownloaded", count, "metadata files from", registry.data.url) + + +if __name__ == "__main__": + + dest = Path(sys.argv[1] if len(sys.argv) > 1 else ".") + download_all_registry_metadata(dest_dir=dest) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_catalog.py b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_catalog.py deleted file mode 100644 index f89b869d3d1..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_catalog.py +++ /dev/null @@ -1,113 +0,0 @@ -def create_service_out(**overrides): - # FIXME: should change when schema changes - - obj = { - "name": "Fast Counter", - "key": "simcore/service/dynanic/itis/sim4life" - if overrides.get("type") == "dynamic" - else "simcore/services/comp/itis/sleeper", - "version": "1.0.0", - "integration-version": "1.0.0", - "type": "computational", - "authors": [ - { - "name": "Jim Knopf", - "email": ["sun@sense.eight", "deleen@minbar.bab"], - "affiliation": ["Sense8", "Babylon 5"], - } - ], - "contact": "lab@net.flix", - "inputs": {}, - "outputs": {}, - "owner": "user@example.com", - } - obj.update(**overrides) - return obj - - -def create_service_out2(**overrides): - # - # Creates fake from here - # - # https://github.com/ITISFoundation/osparc-simcore/blob/master/services/catalog/src/simcore_service_catalog/models/schemas/services.py - # - # docker exec -it $(docker ps --filter="ancestor=local/catalog:development" -q) - # python -c "from simcore_service_catalog.models.schemas.services import ServiceOut;print(ServiceOut.schema_json(indent=2))" > services/catalog/ignore-schema.json - # put file in https://json-schema-faker.js.org/ and get fake output - # - - DATA = { - "name": "officia", - "description": "sunt elit", - "key": "simcore/services/dynamic/xO/WAn/1-/$meZpaVN)/t_&[Q0/TC7Wn#y'j/MilxW/kTtV_{ dict[str, Any]: + """ + Fakes https://docs.docker.com/compose/compose-file/compose-file-v3/ + + """ + faker = Faker() + + docker_compose = { + "version": "3", + "services": {}, + } + + # SEE https://faker.readthedocs.io/en/master/providers/baseprovider.html?highlight=random + + for _ in _range(faker, num_services, max_=4): + service_name, service = generate_fake_service_specs(faker) + + docker_compose["services"][service_name] = service + + return docker_compose + + +def generate_fake_service_specs(faker: Faker) -> tuple[str, dict[str, Any]]: + service_name = faker.word() + service = { + "image": faker.word(), + "environment": {faker.word(): faker.word() for _ in _range(faker, max_=10)}, + "ports": [ + f"{faker.random_int(1000, 9999)}:{faker.random_int(1000, 9999)}" + for _ in _range(faker) + ], + } + return service_name, service diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py new file mode 100644 index 00000000000..5aeb6c3be66 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_factories.py @@ -0,0 +1,543 @@ +""" +Collection of functions that create fake raw data that can be used +to populate postgres DATABASE, create datasets with consistent values, etc + +Built on top of the idea of Faker library (https://faker.readthedocs.io/en/master/), +that generate fake data to bootstrap a database, fill-in stress tests, anonymize data ... +etc + +NOTE: all outputs MUST be Dict-like or built-in data structures that fit at least +required fields in postgres_database.models tables or pydantic models. + +NOTE: to reduce coupling, please import simcore_postgres_database inside of the functions +""" + +import itertools +import json +from collections.abc import Callable +from datetime import UTC, datetime, timedelta +from typing import Any, Final +from uuid import uuid4 + +import arrow +import faker +from faker import Faker + +DEFAULT_FAKER: Final = faker.Faker() + + +def random_icon_url(fake: Faker): + return fake.image_url(width=16, height=16) + + +def random_thumbnail_url(fake: Faker): + return fake.image_url(width=32, height=32) + + +def _compute_hash(password: str) -> str: + try: + # 'passlib' will be used only if already installed. + # This way we do not force all modules to install + # it only for testing. + import passlib.hash + + return passlib.hash.sha256_crypt.using(rounds=1000).hash(password) + + except ImportError: + # if 'passlib' is not installed, we will use a library + # from the python distribution for convenience + import hashlib + + return hashlib.sha224(password.encode("ascii")).hexdigest() + + +DEFAULT_TEST_PASSWORD = "password-with-at-least-12-characters" # noqa: S105 +_DEFAULT_HASH = _compute_hash(DEFAULT_TEST_PASSWORD) + + +def random_user( + fake: Faker = DEFAULT_FAKER, password: str | None = None, **overrides +) -> dict[str, Any]: + from simcore_postgres_database.models.users import users + from simcore_postgres_database.webserver_models import UserStatus + + assert set(overrides.keys()).issubset({c.name for c in users.columns}) + + data = { + # NOTE: ensures user name is unique to avoid flaky tests + "name": f"{fake.user_name()}_{fake.uuid4()}", + "email": f"{fake.uuid4()}_{fake.email().lower()}", + "password_hash": _DEFAULT_HASH, + "status": UserStatus.ACTIVE, + } + + assert set(data.keys()).issubset({c.name for c in users.columns}) + + # transform password in hash + if password: + assert len(password) >= 12 + overrides["password_hash"] = _compute_hash(password) + + data.update(overrides) + return data + + +def random_pre_registration_details( + fake: Faker = DEFAULT_FAKER, + *, + # foreign keys + user_id: int | None = None, + created_by: int | None = None, + product_name: str | None = None, + account_request_reviewed_by: int | None = None, + **overrides, +): + from simcore_postgres_database.models.users_details import ( + users_pre_registration_details, + ) + + assert set(overrides.keys()).issubset( + {c.name for c in users_pre_registration_details.columns} + ) + + data = { + "user_id": user_id, + "pre_first_name": fake.first_name(), + "pre_last_name": fake.last_name(), + "pre_email": fake.email(), + "pre_phone": fake.phone_number(), + "institution": fake.company(), + "address": fake.address().replace("\n", ", "), + "city": fake.city(), + "state": fake.state(), + "country": fake.country(), + "postal_code": fake.postcode(), + "extras": { + "application": fake.word(), + "description": fake.sentence(), + "hear": fake.word(), + "privacyPolicy": True, + "eula": True, + "ipinfo": {"x-real-ip": "127.0.0.1"}, + }, + "product_name": product_name, + "created_by": created_by, # user id + "account_request_reviewed_by": account_request_reviewed_by, + } + + assert set(data.keys()).issubset( + {c.name for c in users_pre_registration_details.columns} + ) + + data.update(overrides) + return data + + +def random_project(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]: + """Generates random fake data projects DATABASE table""" + from simcore_postgres_database.models.projects import projects + + data = { + "uuid": fake.uuid4(), + "name": fake.word(), + "description": fake.sentence(), + "prj_owner": fake.pyint(), + "thumbnail": fake.image_url(width=120, height=120), + "access_rights": {}, + "workbench": {}, + "published": False, + } + + icon = fake.random_element([random_icon_url(fake), None]) # nullable + if icon: + data["ui"] = {"icon": icon} + + assert set(data.keys()).issubset({c.name for c in projects.columns}) + + data.update(overrides) + return data + + +def random_group(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]: + from simcore_postgres_database.models.groups import groups + from simcore_postgres_database.webserver_models import GroupType + + data = { + "name": fake.company(), + "description": fake.text(), + "type": GroupType.STANDARD.name, + } + + assert set(data.keys()).issubset({c.name for c in groups.columns}) # nosec + + data.update(overrides) + return data + + +def _get_comp_pipeline_test_states(): + from simcore_postgres_database.models.comp_pipeline import StateType + + return [ + StateType.NOT_STARTED, + StateType.PENDING, + StateType.RUNNING, + StateType.SUCCESS, + StateType.FAILED, + ] + + +def fake_pipeline(fake: Faker = DEFAULT_FAKER, **overrides) -> dict[str, Any]: + data = { + "dag_adjacency_list": json.dumps({}), + "state": fake.random_element(_get_comp_pipeline_test_states()), + } + data.update(overrides) + return data + + +def fake_task_factory( + first_internal_id=1, + fake: Faker = DEFAULT_FAKER, +) -> Callable: + # Each new instance of fake_task will get a copy + _index_in_sequence = itertools.count(start=first_internal_id) + + def fake_task(**overrides) -> dict[str, Any]: + t0 = arrow.utcnow().datetime + data = { + "project_id": uuid4(), + "node_id": uuid4(), + "job_id": uuid4(), + "internal_id": next(_index_in_sequence), + "schema": json.dumps({}), + "inputs": json.dumps({}), + "outputs": json.dumps({}), + "image": json.dumps({}), + "state": fake.random_element(_get_comp_pipeline_test_states()), + "start": t0 + timedelta(seconds=1), + "end": t0 + timedelta(minutes=5), + } + + data.update(overrides) + return data + + return fake_task + + +def random_product( + *, + group_id: int | None = None, + registration_email_template: str | None = None, + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + """ + + Foreign keys are: + - group_id: product group ID. SEE get_or_create_product_group to produce `group_id` + - registration_email_template + """ + from simcore_postgres_database.models.products import Vendor, VendorUI, products + + name = overrides.get("name") + suffix = fake.unique.word() if name is None else name + + data = { + "name": f"prd_{suffix}", + "display_name": suffix.capitalize().replace("_", " "), + "short_name": suffix[:4], + "host_regex": r"[a-zA-Z0-9]+\.com", + "support_email": f"support@{suffix}.io", + "product_owners_email": fake.random_element( + elements=[f"product-owners@{suffix}.io", None] + ), + "twilio_messaging_sid": fake.random_element( + elements=(None, f"{fake.uuid4()}"[:34]) + ), + "vendor": Vendor( + name=fake.company(), + copyright=fake.company_suffix(), + url=fake.url(), + license_url=fake.url(), + invitation_url=fake.url(), + invitation_form=fake.boolean(), + address=fake.address().replace("\n", ". "), + ui=VendorUI( + logo_url="https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/static-webserver/client/source/resource/osparc/osparc-black.svg", + strong_color=fake.color(), + project_alias=fake.random_element(elements=["project", "study"]), + ), + ), + "registration_email_template": registration_email_template, + "created": fake.date_time_this_decade(), + "modified": fake.date_time_this_decade(), + "priority": fake.pyint(0, 10), + "max_open_studies_per_user": fake.pyint(1, 10), + "group_id": group_id, + } + + if ui := fake.random_element( + [ + None, + # Examples from https://github.com/itisfoundation/osparc-simcore/blob/1dcd369717959348099cc6241822a1f0aff0382c/services/static-webserver/client/source/resource/osparc/new_studies.json + { + "categories": [ + {"id": "precomputed", "title": "Precomputed"}, + { + "id": "personalized", + "title": "Personalized", + "description": fake.sentence(), + }, + ] + }, + ] + ): + data.update(ui=ui) + + assert set(data.keys()).issubset({c.name for c in products.columns}) + data.update(overrides) + return data + + +def random_product_price( + *, product_name: str, fake: Faker = DEFAULT_FAKER, **overrides +) -> dict[str, Any]: + from simcore_postgres_database.models.products_prices import products_prices + + data = { + "product_name": product_name, + "usd_per_credit": fake.pydecimal(left_digits=2, right_digits=2, positive=True), + "min_payment_amount_usd": fake.pydecimal( + left_digits=2, right_digits=2, positive=True + ), + "comment": fake.sentence(), + "valid_from": fake.date_time_this_decade(), + "stripe_price_id": fake.uuid4(), + "stripe_tax_rate_id": fake.uuid4(), + } + + assert set(data.keys()).issubset({c.name for c in products_prices.columns}) + + data.update(overrides) + return data + + +def utcnow() -> datetime: + return datetime.now(tz=UTC) + + +def random_payment_method( + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + from simcore_postgres_database.models.payments_methods import ( + InitPromptAckFlowState, + payments_methods, + ) + + data = { + "payment_method_id": fake.uuid4(), + "user_id": fake.pyint(), + "wallet_id": fake.pyint(), + "initiated_at": utcnow(), + "state": InitPromptAckFlowState.PENDING, + "completed_at": None, + } + # state is not added on purpose + assert set(data.keys()).issubset({c.name for c in payments_methods.columns}) + + data.update(overrides) + return data + + +def random_payment_transaction( + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + """Generates Metadata + concept/info (excludes state)""" + from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, + payments_transactions, + ) + + # initiated + data = { + "payment_id": fake.uuid4(), + "price_dollars": "123456.78", + "osparc_credits": "123456.78", + "product_name": "osparc", + "user_id": fake.pyint(), + "user_email": fake.email().lower(), + "wallet_id": 1, + "comment": "Free starting credits", + "initiated_at": utcnow(), + "state": PaymentTransactionState.PENDING, + "completed_at": None, + "invoice_url": None, + "stripe_invoice_id": None, + "invoice_pdf_url": None, + "state_message": None, + } + # state is not added on purpose + assert set(data.keys()).issubset({c.name for c in payments_transactions.columns}) + + data.update(overrides) + return data + + +def random_payment_autorecharge( + primary_payment_method_id: str = "UNDEFINED__", + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + from simcore_postgres_database.models.payments_autorecharge import ( + payments_autorecharge, + ) + + if primary_payment_method_id == "UNDEFINED__": + primary_payment_method_id = fake.uuid4() + + data = { + "wallet_id": fake.pyint(), + "enabled": True, + "primary_payment_method_id": primary_payment_method_id, + "top_up_amount_in_usd": 100, + "monthly_limit_in_usd": 1000, + } + assert set(data.keys()).issubset({c.name for c in payments_autorecharge.columns}) + + data.update(overrides) + return data + + +def random_api_auth( + product_name: str, user_id: int, fake: Faker = DEFAULT_FAKER, **overrides +) -> dict[str, Any]: + from simcore_postgres_database.models.api_keys import api_keys + + data = { + "display_name": fake.word(), + "product_name": product_name, + "user_id": user_id, + "api_key": fake.password(), + "api_secret": fake.password(), + "expires_at": None, + } + assert set(data.keys()).issubset({c.name for c in api_keys.columns}) # nosec + data.update(**overrides) + return data + + +def random_payment_method_view( + fake: Faker = DEFAULT_FAKER, **overrides +) -> dict[str, Any]: + # Produces data for GetPaymentMethod + data = { + "id": fake.uuid4(), + "card_holder_name": fake.name(), + "card_number_masked": f"**** **** **** {fake.credit_card_number()[:4]}", + "card_type": fake.credit_card_provider(), + "expiration_month": fake.random_int(min=1, max=12), + "expiration_year": fake.future_date().year, + "created": utcnow(), + } + assert set(overrides.keys()).issubset(data.keys()) + data.update(**overrides) + return data + + +def random_service_meta_data( + owner_primary_gid: int | None = None, + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + from simcore_postgres_database.models.services import services_meta_data + + _version = ".".join([str(fake.pyint()) for _ in range(3)]) + _name = fake.name() + + data: dict[str, Any] = { + # required + "key": f"simcore/services/{fake.random_element(['dynamic', 'computational'])}/{_name}", + "version": _version, + "name": f"the-{_name}-service", # display + "description": fake.sentence(), + # optional + "description_ui": fake.pybool(), + "owner": owner_primary_gid, + "thumbnail": fake.random_element( + [random_thumbnail_url(fake), None] + ), # nullable + "icon": fake.random_element([random_icon_url(fake), None]), # nullable + "version_display": fake.random_element([f"v{_version}", None]), # nullable + "classifiers": [], # has default + "quality": {}, # has default + "deprecated": None, # nullable + } + + assert set(data.keys()).issubset( # nosec + {c.name for c in services_meta_data.columns} + ) + + data.update(**overrides) + return data + + +def random_service_access_rights( + key: str, + version: str, + gid: int, + product_name: str, + fake: Faker = DEFAULT_FAKER, + **overrides, +) -> dict[str, Any]: + from simcore_postgres_database.models.services import services_access_rights + + data: dict[str, Any] = { + # required + "key": key, + "version": version, + "gid": gid, + "execute_access": fake.pybool(), + "write_access": fake.pybool(), + "product_name": product_name, + } + + assert set(data.keys()).issubset( # nosec + {c.name for c in services_access_rights.columns} + ) + + data.update(**overrides) + return data + + +def random_itis_vip_available_download_item( + identifier: int, + fake: Faker = DEFAULT_FAKER, + features_functionality: str = "Posable", + **overrides, +): + features_str = ( + "{" + f"name: {fake.name()} Right Hand," # w/o spaces + f" version: V{fake.pyint()}.0, " # w/ x2 spaces + f"sex: Male, age: 8 years," # w/o spaces + f"date: {fake.date()} , " # w/ x2 spaces prefix, x1 space suffix + f"ethnicity: Caucasian, functionality: {features_functionality} " + "}" + ) + + data = { + "ID": identifier, + "Description": fake.sentence(), + "Thumbnail": fake.image_url(), + "Features": features_str, + "DOI": fake.bothify(text="10.####/ViP#####-##-#"), + "LicenseKey": fake.bothify(text="MODEL_????_V#"), + "LicenseVersion": fake.bothify(text="V#.0"), + "Protection": fake.random_element(elements=["Code", "PayPal"]), + "AvailableFromURL": fake.random_element(elements=[None, fake.url()]), + } + + data.update(**overrides) + return data diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_webserver.py b/packages/pytest-simcore/src/pytest_simcore/helpers/faker_webserver.py deleted file mode 100644 index 70978fb7979..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/faker_webserver.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Any, Final - -# Web-server API responses of /projects/{project_id}/metadata/ports -# as reponses in this mock. SEE services/web/server/tests/unit/with_dbs/02/test_projects_ports_handlers.py -# NOTE: this could be added as examples in the OAS but for the moment we want to avoid overloading openapi.yml -# in the web-server. -PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA: Final[list[dict[str, Any]]] = [ - { - "key": "38a0d401-af4b-4ea7-ab4c-5005c712a546", - "kind": "input", - "content_schema": { - "description": "Parameter of type integer", - "title": "X", - "type": "integer", - }, - }, - { - "key": "fc48252a-9dbb-4e07-bf9a-7af65a18f612", - "kind": "input", - "content_schema": { - "description": "Parameter of type integer", - "title": "Z", - "type": "integer", - }, - }, - { - "key": "7bf0741f-bae4-410b-b662-fc34b47c27c9", - "kind": "input", - "content_schema": { - "description": "Parameter of type boolean", - "title": "on", - "type": "boolean", - }, - }, - { - "key": "09fd512e-0768-44ca-81fa-0cecab74ec1a", - "kind": "output", - "content_schema": { - "default": 0, - "description": "Captures integer values attached to it", - "title": "Random sleep interval_2", - "type": "integer", - }, - }, - { - "key": "76f607b4-8761-4f96-824d-cab670bc45f5", - "kind": "output", - "content_schema": { - "default": 0, - "description": "Captures integer values attached to it", - "title": "Random sleep interval", - "type": "integer", - }, - }, -] diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/fastapi.py b/packages/pytest-simcore/src/pytest_simcore/helpers/fastapi.py new file mode 100644 index 00000000000..02e0d2d4ad5 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/fastapi.py @@ -0,0 +1,11 @@ +import httpx +from fastapi import FastAPI +from yarl import URL + + +def url_from_operation_id( + client: httpx.AsyncClient, app: FastAPI, operation_id: str, **path_params +) -> URL: + return URL(f"{client.base_url}").with_path( + app.url_path_for(operation_id, **path_params) + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/host.py b/packages/pytest-simcore/src/pytest_simcore/helpers/host.py new file mode 100644 index 00000000000..e259053694e --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/host.py @@ -0,0 +1,14 @@ +import socket + + +def get_localhost_ip(default="127.0.0.1") -> str: + """Return the IP address for localhost""" + local_ip = default + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + # doesn't even have to be reachable + s.connect(("10.255.255.255", 1)) + local_ip = s.getsockname()[0] + finally: + s.close() + return f"{local_ip}" diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_assert_checks.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_assert_checks.py new file mode 100644 index 00000000000..4443241192e --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_assert_checks.py @@ -0,0 +1,89 @@ +"""Extends assertions for testing""" + +import re +from http import HTTPStatus +from pprint import pformat +from typing import Any, TypeVar + +import httpx +from models_library.generics import Envelope +from pydantic import TypeAdapter +from servicelib.aiohttp import status +from servicelib.status_codes_utils import get_code_display_name, is_error + +T = TypeVar("T") + + +def assert_status( + response: httpx.Response, + expected_status_code: int, + response_model: type[T] | None, + *, + expected_msg: str | None = None, + expect_envelope: bool = True, +) -> tuple[T | None, Any]: + """ + Asserts for enveloped responses + """ + # raises ValueError if cannot be converted + expected_status_code = HTTPStatus(expected_status_code) + + assert ( + response.status_code == expected_status_code + ), f"received {response.status_code}: {response.text}, expected {get_code_display_name(expected_status_code)}" + + # response + if expected_status_code == status.HTTP_204_NO_CONTENT: + assert response.text == "" + return None, None + if expect_envelope: + validated_response = TypeAdapter(Envelope[response_model]).validate_json( + response.text + ) + data = validated_response.data + error = validated_response.error + if is_error(expected_status_code): + _do_assert_error( + data, + error, + expected_status_code, + expected_msg, + ) + else: + assert data is not None + return data, error + + if is_error(expected_status_code): + msg = "If you need it implement it" + raise NotImplementedError(msg) + + data = TypeAdapter(response_model).validate_json(response.text) + return data, None + + +def _do_assert_error( + data, + error, + expected_status_code: int, + expected_msg: list[str] | str | list[re.Pattern[str]] | re.Pattern[str] | None, +) -> None: + assert not data, pformat(data) + assert error, pformat(error) + + assert is_error(expected_status_code) + + details = error.get("errors", []) + assert isinstance(details, list) + + if expected_msg: + assert details is not None + # find the expected msg are in the details + if isinstance(expected_msg, list): + list_expected_msg = expected_msg + else: + list_expected_msg = [expected_msg] + + for msg in list_expected_msg: + assert any( + re.search(msg, e) for e in details + ), f"could not find {msg=} in {details=}" diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_errors.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_errors.py new file mode 100644 index 00000000000..bb3e4f16edf --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_errors.py @@ -0,0 +1,15 @@ +class CaptureProcessingError(Exception): + # base for all the exceptions in this submodule + pass + + +class VerbNotInPathError(CaptureProcessingError): + pass + + +class PathNotInOpenApiSpecError(CaptureProcessingError): + pass + + +class OpenApiSpecError(CaptureProcessingError): + pass diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_models.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_models.py new file mode 100644 index 00000000000..60e048f56d3 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_models.py @@ -0,0 +1,93 @@ +import json +from http import HTTPStatus +from pathlib import Path +from typing import Any, Literal, Protocol + +import httpx +import respx +from fastapi import status +from pydantic import BaseModel, Field + +from .httpx_calls_capture_openapi import enhance_path_description_from_openapi_spec +from .httpx_calls_capture_parameters import PathDescription + + +class HttpApiCallCaptureModel(BaseModel): + """ + Captures relevant information of a call to the http api + """ + + name: str + description: str + method: Literal["GET", "PUT", "POST", "PATCH", "DELETE"] + host: str + path: PathDescription | str + query: str | None = None + request_payload: dict[str, Any] | None = None + response_body: list[Any] | dict[str, Any] | None = None + status_code: HTTPStatus = Field(default=status.HTTP_200_OK) + + @classmethod + def create_from_response( + cls, + response: httpx.Response, + *, + name: str, + description: str = "", + enhance_from_openapi_specs: bool = True, + ) -> "HttpApiCallCaptureModel": + request = response.request + + path: PathDescription | str + if enhance_from_openapi_specs: + path = enhance_path_description_from_openapi_spec(response) + else: + path = response.request.url.path + + return cls( + name=name, + description=description or f"{request}", + method=request.method, + host=request.url.host, + path=path, + query=request.url.query.decode() or None, + request_payload=json.loads(request.content.decode()) + if request.content + else None, + response_body=response.json() if response.content else None, + status_code=HTTPStatus(response.status_code), + ) + + def __str__(self) -> str: + return f"{self.description: self.request_desc}" + + @property + def request_desc(self) -> str: + return f"{self.method} {self.path}" + + def as_response(self) -> httpx.Response: + return httpx.Response(status_code=self.status_code, json=self.response_body) + + +def get_captured_model(name: str, response: httpx.Response) -> HttpApiCallCaptureModel: + return HttpApiCallCaptureModel.create_from_response(response, name=name) + + +class SideEffectCallback(Protocol): + def __call__( + self, + request: httpx.Request, + kwargs: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + ... + + +class CreateRespxMockCallback(Protocol): + def __call__( + self, + respx_mocks: list[respx.MockRouter], + capture_path: Path, + side_effects_callbacks: list[SideEffectCallback], + ) -> list[respx.MockRouter]: + ... diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_openapi.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_openapi.py new file mode 100644 index 00000000000..177b1330e36 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_openapi.py @@ -0,0 +1,140 @@ +from contextlib import suppress +from pathlib import Path +from typing import Any, Final, Literal +from urllib.parse import unquote + +import httpx +import jsonref +from pydantic import TypeAdapter, ValidationError +from settings_library.catalog import CatalogSettings +from settings_library.director_v2 import DirectorV2Settings +from settings_library.storage import StorageSettings +from settings_library.webserver import WebServerSettings + +from .httpx_calls_capture_errors import ( + OpenApiSpecError, + PathNotInOpenApiSpecError, + VerbNotInPathError, +) +from .httpx_calls_capture_parameters import ( + CapturedParameter, + CapturedParameterSchema, + PathDescription, +) + +assert CapturedParameterSchema # nosec + + +_AIOHTTP_PATH: Final[str] = "/dev/doc/swagger.json" +_FASTAPI_PATH: Final[str] = "/api/{}/openapi.json" + +ServiceHostNames = Literal[ + "storage", + "catalog", + "webserver", + "director-v2", +] + +settings_classes: Final = [ + ("STORAGE", StorageSettings, _AIOHTTP_PATH), + ("CATALOG", CatalogSettings, _FASTAPI_PATH), + ("WEBSERVER", WebServerSettings, _AIOHTTP_PATH), + ("DIRECTOR_V2", DirectorV2Settings, _FASTAPI_PATH), +] + + +def _get_openapi_specs(url: httpx.URL) -> dict[str, Any]: + openapi_url = None + target = (url.host, url.port) + + for prefix, cls, openapi_path in settings_classes: + with suppress(ValidationError): + settings = cls.create_from_envs() + base_url = httpx.URL(settings.base_url) + if (base_url.host, base_url.port) == target: + vtag = getattr(settings, f"{prefix}_VTAG") + openapi_url = settings.base_url + openapi_path.format(vtag) + break + + if not openapi_url: + msg = f"{url=} has not been added yet to the testing system. Please do so yourself" + raise OpenApiSpecError(msg) + + response = httpx.get(openapi_url) + response.raise_for_status() + + if not response.content: + msg = f"Cannot retrieve OAS from {openapi_url=}" + raise RuntimeError(msg) + openapi_spec = jsonref.loads(response.read().decode("utf8")) + + assert isinstance(openapi_spec, dict) + return openapi_spec + + +def _get_params( + openapi_spec: dict[str, Any], path: str, method: str | None = None +) -> set[CapturedParameter]: + """Returns all parameters for the method associated with a given resource (and optionally also a given method)""" + endpoints: dict[str, Any] | None + if (endpoints := openapi_spec["paths"].get(path)) is None: + msg = f"{path} was not in the openapi specification" + raise PathNotInOpenApiSpecError(msg) + all_params: list[CapturedParameter] = [] + for verb in [method] if method is not None else list(endpoints): + if (verb_spec := endpoints.get(verb)) is None: + msg = f"the verb '{verb}' was not available in '{path}' in {openapi_spec}" + raise VerbNotInPathError(msg) + if (params := verb_spec.get("parameters")) is None: + continue + all_params += TypeAdapter(list[CapturedParameter]).validate_python(params) + return set(all_params) + + +def _determine_path( + openapi_spec: dict[str, Any], response_path: Path +) -> PathDescription: + def parts(p: str) -> tuple[str, ...]: + all_parts: list[str] = sum((elm.split("/") for elm in p.split(":")), start=[]) + return tuple(part for part in all_parts if len(part) > 0) + + for p in openapi_spec["paths"]: + openapi_parts: tuple[str, ...] = tuple(parts(p)) + response_parts: tuple[str, ...] = tuple(parts(f"{response_path}")) + if len(openapi_parts) != len(response_parts): + continue + path_params = { + param.name: param for param in _get_params(openapi_spec, p) if param.is_path + } + if (len(path_params) == 0) and (openapi_parts == response_parts): + return PathDescription( + path=str(response_path), path_parameters=list(path_params.values()) + ) + path_param_indices: tuple[int, ...] = tuple( + openapi_parts.index("{" + name + "}") for name in path_params + ) + if tuple( + elm for ii, elm in enumerate(openapi_parts) if ii not in path_param_indices + ) != tuple( + elm for ii, elm in enumerate(response_parts) if ii not in path_param_indices + ): + continue + path_param_indices_iter = iter(path_param_indices) + for key in path_params: + ii = next(path_param_indices_iter) + path_params[key].response_value = unquote(response_path.parts[ii]) + return PathDescription( + path=p, + path_parameters=list(path_params.values()), + ) + msg = f"Could not find a path matching {response_path} in " + raise PathNotInOpenApiSpecError(msg) + + +def enhance_path_description_from_openapi_spec( + response: httpx.Response, +) -> PathDescription: + openapi_spec: dict[str, Any] = _get_openapi_specs(response.url) + return _determine_path( + openapi_spec, Path(response.request.url.raw_path.decode("utf8").split("?")[0]) + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py new file mode 100644 index 00000000000..a58544a59f0 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_calls_capture_parameters.py @@ -0,0 +1,146 @@ +from typing import Annotated, Literal + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from .httpx_calls_capture_errors import OpenApiSpecError + + +class CapturedParameterSchema(BaseModel): + title: str | None = None + type_: Literal["str", "int", "float", "bool", "null"] | None = Field( + None, alias="type" + ) + pattern: str | None = None + format_: Literal["uuid"] | None = Field(None, alias="format") + exclusiveMinimum: bool | None = None + minimum: int | float | None = None + anyOf: list["CapturedParameterSchema"] | None = None + allOf: list["CapturedParameterSchema"] | None = None + oneOf: list["CapturedParameterSchema"] | None = None + + model_config = ConfigDict(validate_default=True, populate_by_name=True) + + @field_validator("type_", mode="before") + @classmethod + def preprocess_type_(cls, val): + if val == "string": + val = "str" + if val == "integer": + val = "int" + if val == "boolean": + val = "bool" + return val + + @model_validator(mode="after") + @classmethod + def check_compatibility(cls, values): + type_ = values.type_ + pattern = values.pattern + format_ = values.format_ + anyOf = values.anyOf + allOf = values.allOf + oneOf = values.oneOf + if not any([type_, oneOf, anyOf, allOf]): + type_ = "str" # this default is introduced because we have started using json query params in the webserver + values.type_ = type_ + if type_ != "str" and any([pattern, format_]): + msg = f"For {type_=} both {pattern=} and {format_=} must be None" + raise ValueError(msg) + + def _check_no_recursion(v: list["CapturedParameterSchema"]): + if v is not None and not all( + elm.anyOf is None and elm.oneOf is None and elm.allOf is None + for elm in v + ): + msg = "For simplicity we only allow top level schema have oneOf, anyOf or allOf" + raise ValueError(msg) + + _check_no_recursion(anyOf) + _check_no_recursion(allOf) + _check_no_recursion(oneOf) + return values + + @property + def regex_pattern(self) -> str: + # first deal with recursive types: + if self.oneOf: + msg = "Current version cannot compute regex patterns in case of oneOf. Please go ahead and implement it yourself." + raise NotImplementedError(msg) + if self.anyOf is not None: + return "|".join( + [ + elm.regex_pattern + for elm in self.anyOf # pylint:disable=not-an-iterable + ] + ) + if self.allOf is not None: + return "&".join( + [ + elm.regex_pattern + for elm in self.allOf # pylint:disable=not-an-iterable + ] + ) + + # now deal with non-recursive cases + pattern: str | None = None + if self.pattern is not None: + pattern = str(self.pattern).removeprefix("^").removesuffix("$") + elif self.type_ == "int": + pattern = r"[-+]?\d+" + elif self.type_ == "float": + pattern = r"[+-]?\d+(?:\.\d+)?" + elif self.type_ == "str": + if self.format_ == "uuid": + pattern = r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-(1|3|4|5)[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + else: + pattern = r"[^/]*" # should match any string not containing "/" + if pattern is None: + msg = f"Encountered invalid {self.type_=} and {self.format_=} combination" + raise OpenApiSpecError(msg) + return pattern + + +class CapturedParameter(BaseModel): + in_: Literal["path", "header", "query"] = Field(..., alias="in") + name: str + required: bool + schema_: Annotated[CapturedParameterSchema, Field(..., alias="schema")] + response_value: str | None = ( + None # attribute for storing the params value in a concrete response + ) + model_config = ConfigDict(validate_default=True, populate_by_name=True) + + def __hash__(self): + return hash( + self.name + self.in_ + ) # it is assumed name is unique within a given path + + def __eq__(self, other): + return self.name == other.name and self.in_ == other.in_ + + @property + def is_path(self) -> bool: + return self.in_ == "path" + + @property + def is_header(self) -> bool: + return self.in_ == "header" + + @property + def is_query(self) -> bool: + return self.in_ == "query" + + @property + def respx_lookup(self) -> str: + return rf"(?P<{self.name}>{self.schema_.regex_pattern})" + + +class PathDescription(BaseModel): + path: str + path_parameters: list[CapturedParameter] + + def to_path_regex(self) -> str: + path_regex: str = f"{self.path}" + for param in self.path_parameters: + path_regex = path_regex.replace("{" + param.name + "}", param.respx_lookup) + return path_regex diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_client_base_dev.py b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_client_base_dev.py new file mode 100644 index 00000000000..9a36d4cc020 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/httpx_client_base_dev.py @@ -0,0 +1,73 @@ +import json +import logging +from pathlib import Path + +import httpx +from fastapi.encoders import jsonable_encoder +from httpx._types import URLTypes +from jsonschema import ValidationError +from pydantic import TypeAdapter + +from .httpx_calls_capture_errors import CaptureProcessingError +from .httpx_calls_capture_models import HttpApiCallCaptureModel, get_captured_model + +_logger = logging.getLogger(__name__) + + +_HTTP_API_CALL_CAPTURE_MODEL_ADAPTER: TypeAdapter[ + list[HttpApiCallCaptureModel] +] = TypeAdapter(list[HttpApiCallCaptureModel]) + + +class AsyncClientCaptureWrapper(httpx.AsyncClient): + """ + Adds captures mechanism + """ + + def __init__(self, capture_file: Path, **async_clint_kwargs): + super().__init__(**async_clint_kwargs) + if capture_file.is_file(): + assert capture_file.name.endswith( # nosec + ".json" + ), "The capture file should be a json file" + self._capture_file: Path = capture_file + + async def request(self, method: str, url: URLTypes, **kwargs): + response: httpx.Response = await super().request(method, url, **kwargs) + + capture_name = f"{method} {url}" + _logger.info("Capturing %s ... [might be slow]", capture_name) + try: + capture: HttpApiCallCaptureModel = get_captured_model( + name=capture_name, response=response + ) + if ( + not self._capture_file.is_file() + or self._capture_file.read_text().strip() == "" + ): + self._capture_file.write_text("[]") + + serialized_captures: list[ + HttpApiCallCaptureModel + ] = _HTTP_API_CALL_CAPTURE_MODEL_ADAPTER.validate_json( + self._capture_file.read_text() + ) + serialized_captures.append(capture) + self._capture_file.write_text( + json.dumps( + jsonable_encoder( + serialized_captures, + # NOTE: reduces file size by relying on defaults + exclude_unset=True, + exclude_defaults=True, + ), + indent=1, + ) + ) + except (CaptureProcessingError, ValidationError, httpx.RequestError): + _logger.exception( + "Unexpected failure with %s", + capture_name, + stack_info=True, + ) + return response diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py b/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py new file mode 100644 index 00000000000..c12649c1c3d --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/logging_tools.py @@ -0,0 +1,170 @@ +import datetime +import logging +from collections.abc import Iterator +from contextlib import contextmanager +from dataclasses import dataclass, field +from types import SimpleNamespace +from typing import TypeAlias + + +def _timedelta_as_minute_second_ms(delta: datetime.timedelta) -> str: + total_seconds = delta.total_seconds() + minutes, rem_seconds = divmod(abs(total_seconds), 60) + seconds, milliseconds = divmod(rem_seconds, 1) + result = "" + + if int(minutes) != 0: + result += f"{int(minutes)}m " + + if int(seconds) != 0: + result += f"{int(seconds)}s " + + if int(milliseconds * 1000) != 0: + result += f"{int(milliseconds * 1000)}ms" + if not result: + result = "<1ms" + + sign = "-" if total_seconds < 0 else "" + + return f"{sign}{result.strip()}" + + +class DynamicIndentFormatter(logging.Formatter): + indent_char: str = " " + _cls_indent_level: int = 0 + _instance_indent_level: int = 0 + + def __init__(self, *args, **kwargs): + fmt = args[0] if args else None + dynamic_fmt = fmt or "%(asctime)s %(levelname)s %(message)s" + assert "message" in dynamic_fmt + super().__init__(dynamic_fmt, *args, **kwargs) + + def format(self, record) -> str: + original_message = record.msg + record.msg = f"{self.indent_char * self._cls_indent_level}{self.indent_char * self._instance_indent_level}{original_message}" + result = super().format(record) + record.msg = original_message + return result + + @classmethod + def cls_increase_indent(cls) -> None: + cls._cls_indent_level += 1 + + @classmethod + def cls_decrease_indent(cls) -> None: + cls._cls_indent_level = max(0, cls._cls_indent_level - 1) + + def increase_indent(self) -> None: + self._instance_indent_level += 1 + + def decrease_indent(self) -> None: + self._instance_indent_level = max(0, self._instance_indent_level - 1) + + @classmethod + def setup(cls, logger: logging.Logger) -> None: + _formatter = DynamicIndentFormatter() + _handler = logging.StreamHandler() + _handler.setFormatter(_formatter) + logger.addHandler(_handler) + logger.setLevel(logging.INFO) + + +test_logger = logging.getLogger(__name__) +DynamicIndentFormatter.setup(test_logger) + + +@dataclass +class ContextMessages: + starting: str + done: str + raised: str = field(default="") + + def __post_init__(self): + if not self.raised: + self.raised = f"{self.done} [with error]" + + +LogLevelInt: TypeAlias = int +LogMessageStr: TypeAlias = str + + +@contextmanager +def _increased_logger_indent(logger: logging.Logger) -> Iterator[None]: + try: + if formatter := next( + ( + h.formatter + for h in logger.handlers + if isinstance(h.formatter, DynamicIndentFormatter) + ), + None, + ): + formatter.increase_indent() + yield + finally: + if formatter := next( + ( + h.formatter + for h in logger.handlers + if isinstance(h.formatter, DynamicIndentFormatter) + ), + None, + ): + formatter.decrease_indent() + + +@contextmanager +def log_context( + level: LogLevelInt, + msg: LogMessageStr | tuple | ContextMessages, + *args, + logger: logging.Logger = test_logger, + **kwargs, +) -> Iterator[SimpleNamespace]: + # NOTE: Preserves original signature of a logger https://docs.python.org/3/library/logging.html#logging.Logger.log + # NOTE: To add more info to the logs e.g. times, user_id etc prefer using formatting instead of adding more here + + if isinstance(msg, str): + ctx_msg = ContextMessages( + starting=f"-> {msg} starting ...", + done=f"<- {msg} done", + raised=f"! {msg} raised", + ) + elif isinstance(msg, tuple): + ctx_msg = ContextMessages(*msg) + else: + ctx_msg = msg + + started_time = datetime.datetime.now(tz=datetime.UTC) + try: + DynamicIndentFormatter.cls_increase_indent() + + logger.log(level, ctx_msg.starting, *args, **kwargs) + with _increased_logger_indent(logger): + yield SimpleNamespace(logger=logger, messages=ctx_msg) + elapsed_time = datetime.datetime.now(tz=datetime.UTC) - started_time + done_message = ( + f"{ctx_msg.done} ({_timedelta_as_minute_second_ms(elapsed_time)})" + ) + logger.log( + level, + done_message, + *args, + **kwargs, + ) + + except: + elapsed_time = datetime.datetime.now(tz=datetime.UTC) - started_time + error_message = ( + f"{ctx_msg.raised} ({_timedelta_as_minute_second_ms(elapsed_time)})" + ) + logger.exception( + error_message, + *args, + **kwargs, + ) + raise + + finally: + DynamicIndentFormatter.cls_decrease_indent() diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py b/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py new file mode 100644 index 00000000000..d8135614430 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/monkeypatch_envs.py @@ -0,0 +1,101 @@ +""" +.env (dotenv) files (or envfile) +""" + +import os +from io import StringIO +from pathlib import Path + +import dotenv +import pytest + +from .typing_env import EnvVarsDict, EnvVarsIterable + +# +# monkeypatch using dict +# + + +def setenvs_from_dict( + monkeypatch: pytest.MonkeyPatch, envs: dict[str, str | bool] +) -> EnvVarsDict: + env_vars = {} + + for key, value in envs.items(): + assert isinstance(key, str) + assert value is not None, f"{key=},{value=}" + + v = value + + if isinstance(value, bool): + v = "true" if value else "false" + + if isinstance(value, int | float): + v = f"{value}" + + assert isinstance(v, str), ( + "caller MUST explicitly stringify values since some cannot be done automatically" + f"e.g. json-like values. Check {key=},{value=}" + ) + + monkeypatch.setenv(key, v) + env_vars[key] = v + + return env_vars + + +def load_dotenv(envfile_content_or_path: Path | str, **options) -> EnvVarsDict: + """Convenient wrapper around dotenv.dotenv_values""" + kwargs = options.copy() + if isinstance(envfile_content_or_path, Path): + # path + kwargs["dotenv_path"] = envfile_content_or_path + else: + assert isinstance(envfile_content_or_path, str) + # content + kwargs["stream"] = StringIO(envfile_content_or_path) + + return {k: v or "" for k, v in dotenv.dotenv_values(**kwargs).items()} + + +def delenvs_from_dict( + monkeypatch: pytest.MonkeyPatch, + envs: EnvVarsIterable, + *, + raising: bool = True, +) -> None: + for key in envs: + assert isinstance(key, str) + monkeypatch.delenv(key, raising) + + +# +# monkeypath using envfiles ('.env' and also denoted as dotfiles) +# + + +def setenvs_from_envfile( + monkeypatch: pytest.MonkeyPatch, content_or_path: str | Path, **dotenv_kwags +) -> EnvVarsDict: + """Batch monkeypatch.setenv(...) on all env vars in an envfile""" + envs = load_dotenv(content_or_path, **dotenv_kwags) + setenvs_from_dict(monkeypatch, envs) + + assert all(env in os.environ for env in envs) + return envs + + +def delenvs_from_envfile( + monkeypatch: pytest.MonkeyPatch, + content_or_path: str | Path, + *, + raising: bool = True, + **dotenv_kwags, +) -> EnvVarsDict: + """Batch monkeypatch.delenv(...) on all env vars in an envfile""" + envs = load_dotenv(content_or_path, **dotenv_kwags) + for key in envs: + monkeypatch.delenv(key, raising=raising) + + assert all(env not in os.environ for env in envs) + return envs diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/moto.py b/packages/pytest-simcore/src/pytest_simcore/helpers/moto.py new file mode 100644 index 00000000000..65c589ba1b9 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/moto.py @@ -0,0 +1,70 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + + +import warnings +from copy import deepcopy +from typing import Any + +import aiobotocore.client + +# Original botocore _make_api_call function +orig = aiobotocore.client.AioBaseClient._make_api_call # noqa: SLF001 + + +def _patch_send_command(self, operation_name, api_params) -> Any: + # NOTE: send_command is not completely patched by moto, therefore we need this specific mock + # https://docs.getmoto.org/en/latest/docs/services/patching_other_services.html + # this might change with new versions of moto + warnings.warn( + "moto is missing SendCommand mock with InstanceIds as Targets, therefore it is manually mocked." + " TIP: periodically check if it gets updated https://docs.getmoto.org/en/latest/docs/services/ssm.html#ssm", + UserWarning, + stacklevel=1, + ) + + assert "Targets" in api_params, "Targets is missing in the API call" + assert ( + len(api_params["Targets"]) == 1 + ), "Targets for patched SendCommand should have only one item" + target_data = api_params["Targets"][0] + assert "Key" in target_data + assert "Values" in target_data + target_key = target_data["Key"] + assert ( + target_key == "InstanceIds" + ), "Targets for patched SendCommand should have InstanceIds as key" + instance_ids = target_data["Values"] + new_api_params = deepcopy(api_params) + new_api_params.pop("Targets") + new_api_params["InstanceIds"] = instance_ids + return orig(self, operation_name, new_api_params) + + +def _patch_describe_instance_information( + self, operation_name, api_params +) -> dict[str, Any]: + warnings.warn( + "moto is missing the describe_instance_information function, therefore it is manually mocked." + "TIP: periodically check if it gets updated https://docs.getmoto.org/en/latest/docs/services/ssm.html#ssm", + UserWarning, + stacklevel=1, + ) + return {"InstanceInformationList": [{"PingStatus": "Online"}]} + + +# Mocked aiobotocore _make_api_call function +async def patched_aiobotocore_make_api_call(self, operation_name, api_params): + # For example for the Access Analyzer service + # As you can see the operation_name has the list_analyzers snake_case form but + # we are using the ListAnalyzers form. + # Rationale -> https://github.com/boto/botocore/blob/develop/botocore/client.py#L810:L816 + if operation_name == "SendCommand": + return await _patch_send_command(self, operation_name, api_params) + if operation_name == "DescribeInstanceInformation": + return _patch_describe_instance_information(self, operation_name, api_params) + + # If we don't want to patch the API call + return await orig(self, operation_name, api_params) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/parametrizations.py b/packages/pytest-simcore/src/pytest_simcore/helpers/parametrizations.py new file mode 100644 index 00000000000..6eae044643b --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/parametrizations.py @@ -0,0 +1,13 @@ +import pytest +from _pytest.mark.structures import ParameterSet +from pydantic import ByteSize, TypeAdapter + + +def byte_size_ids(val) -> str | None: + if isinstance(val, ByteSize): + return val.human_readable() + return None + + +def parametrized_file_size(size_str: str) -> ParameterSet: + return pytest.param(TypeAdapter(ByteSize).validate_python(size_str), id=size_str) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py new file mode 100644 index 00000000000..64b86cba7bb --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright.py @@ -0,0 +1,719 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-instance-attributes + +import contextlib +import json +import logging +import re +import typing +from collections import defaultdict +from collections.abc import Generator +from dataclasses import dataclass, field +from datetime import UTC, datetime, timedelta +from enum import Enum, unique +from typing import Any, Final + +import arrow +import pytest +from playwright._impl._sync_base import EventContextManager +from playwright.sync_api import ( + APIRequestContext, +) +from playwright.sync_api import Error as PlaywrightError +from playwright.sync_api import ( + FrameLocator, + Locator, + Page, + Request, +) +from playwright.sync_api import TimeoutError as PlaywrightTimeoutError +from playwright.sync_api import ( + WebSocket, +) +from pydantic import AnyUrl, TypeAdapter +from tenacity import ( + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, + stop_after_delay, + wait_exponential, + wait_fixed, +) + +from .logging_tools import log_context + +_logger = logging.getLogger(__name__) + + +SECOND: Final[int] = 1000 +MINUTE: Final[int] = 60 * SECOND +NODE_START_REQUEST_PATTERN: Final[re.Pattern[str]] = re.compile( + r"/projects/[^/]+/nodes/[^:]+:start" +) + + +@unique +class RunningState(str, Enum): + # NOTE: this is a duplicate of models-library/project_states.py + # It must remain as such until that module is pydantic V2 compatible + """State of execution of a project's computational workflow + + SEE StateType for task state + """ + + UNKNOWN = "UNKNOWN" + PUBLISHED = "PUBLISHED" + NOT_STARTED = "NOT_STARTED" + PENDING = "PENDING" + WAITING_FOR_RESOURCES = "WAITING_FOR_RESOURCES" + STARTED = "STARTED" + SUCCESS = "SUCCESS" + FAILED = "FAILED" + ABORTED = "ABORTED" + WAITING_FOR_CLUSTER = "WAITING_FOR_CLUSTER" + + def is_running(self) -> bool: + return self in ( + RunningState.PUBLISHED, + RunningState.PENDING, + RunningState.WAITING_FOR_RESOURCES, + RunningState.STARTED, + RunningState.WAITING_FOR_CLUSTER, + ) + + +@unique +class NodeProgressType(str, Enum): + # NOTE: this is a partial duplicate of models_library/rabbitmq_messages.py + # It must remain as such until that module is pydantic V2 compatible + CLUSTER_UP_SCALING = "CLUSTER_UP_SCALING" + SIDECARS_PULLING = "SIDECARS_PULLING" + SERVICE_INPUTS_PULLING = "SERVICE_INPUTS_PULLING" + SERVICE_OUTPUTS_PULLING = "SERVICE_OUTPUTS_PULLING" + SERVICE_STATE_PULLING = "SERVICE_STATE_PULLING" + SERVICE_IMAGES_PULLING = "SERVICE_IMAGES_PULLING" + SERVICE_CONTAINERS_STARTING = "SERVICE_CONTAINERS_STARTING" + SERVICE_STATE_PUSHING = "SERVICE_STATE_PUSHING" + SERVICE_OUTPUTS_PUSHING = "SERVICE_OUTPUTS_PUSHING" + PROJECT_CLOSING = "PROJECT_CLOSING" + + @classmethod + def required_types_for_started_service(cls) -> set["NodeProgressType"]: + return { + NodeProgressType.SERVICE_INPUTS_PULLING, + NodeProgressType.SIDECARS_PULLING, + NodeProgressType.SERVICE_OUTPUTS_PULLING, + NodeProgressType.SERVICE_STATE_PULLING, + NodeProgressType.SERVICE_IMAGES_PULLING, + NodeProgressType.SERVICE_CONTAINERS_STARTING, + } + + +class ServiceType(str, Enum): + DYNAMIC = "DYNAMIC" + COMPUTATIONAL = "COMPUTATIONAL" + + +class _OSparcMessages(str, Enum): + NODE_UPDATED = "nodeUpdated" + NODE_PROGRESS = "nodeProgress" + PROJECT_STATE_UPDATED = "projectStateUpdated" + SERVICE_DISK_USAGE = "serviceDiskUsage" + WALLET_OSPARC_CREDITS_UPDATED = "walletOsparcCreditsUpdated" + LOGGER = "logger" + SERVICE_STATUS = "serviceStatus" + + +@dataclass(frozen=True, slots=True, kw_only=True) +class AutoRegisteredUser: + user_email: str + password: str + + +@dataclass(frozen=True, slots=True, kw_only=True) +class SocketIOEvent: + name: str + obj: dict[str, Any] + + def to_json(self) -> str: + return json.dumps({"name": self.name, "obj": self.obj}) + + +SOCKETIO_MESSAGE_PREFIX: Final[str] = "42" + + +@dataclass +class RobustWebSocket: + page: Page + ws: WebSocket + _registered_events: list[tuple[str, typing.Callable | None]] = field( + default_factory=list + ) + _num_reconnections: int = 0 + auto_reconnect: bool = True + + def __post_init__(self) -> None: + self._configure_websocket_events() + + def _configure_websocket_events(self) -> None: + with log_context( + logging.INFO, + msg="handle websocket message (set to --log-cli-level=DEBUG level if you wanna see all of them)", + ) as ctx: + + def on_framesent(payload: str | bytes) -> None: + ctx.logger.debug("⬇️ Frame sent: %s", payload) + + def on_framereceived(payload: str | bytes) -> None: + ctx.logger.debug("⬆️ Frame received: %s", payload) + + def on_close(_: WebSocket) -> None: + if self.auto_reconnect: + ctx.logger.warning("⚠️ WebSocket closed. Attempting to reconnect...") + self._attempt_reconnect(ctx.logger) + else: + ctx.logger.warning("⚠️ WebSocket closed.") + + def on_socketerror(error_msg: str) -> None: + ctx.logger.error("❌ WebSocket error: %s", error_msg) + + # Attach core event listeners + self.ws.on("framesent", on_framesent) + self.ws.on("framereceived", on_framereceived) + self.ws.on("close", on_close) + self.ws.on("socketerror", on_socketerror) + + @retry( + stop=stop_after_attempt(3), + wait=wait_exponential( + multiplier=1, + max=10, + ), + reraise=True, + before_sleep=before_sleep_log(_logger, logging.WARNING), + ) + def _attempt_reconnect(self, logger: logging.Logger) -> None: + """ + Attempt to reconnect the WebSocket and restore event listeners. + """ + with self.page.expect_websocket(timeout=5000) as ws_info: + assert not ws_info.value.is_closed() + + self.ws = ws_info.value + self._num_reconnections += 1 + logger.info( + "πŸ”„ Reconnected to WebSocket successfully. Number of reconnections: %s", + self._num_reconnections, + ) + self._configure_websocket_events() + # Re-register all custom event listeners + for event, predicate in self._registered_events: + self.ws.expect_event(event, predicate) + + def expect_event( + self, + event: str, + predicate: typing.Callable | None = None, + *, + timeout: float | None = None, + ) -> EventContextManager: + """ + Register an event listener with support for reconnection. + """ + output = self.ws.expect_event(event, predicate, timeout=timeout) + self._registered_events.append((event, predicate)) + return output + + +def decode_socketio_42_message(message: str) -> SocketIOEvent: + data = json.loads(message.removeprefix(SOCKETIO_MESSAGE_PREFIX)) + return SocketIOEvent(name=data[0], obj=data[1]) + + +def retrieve_project_state_from_decoded_message(event: SocketIOEvent) -> RunningState: + assert event.name == _OSparcMessages.PROJECT_STATE_UPDATED.value + assert "data" in event.obj + assert "state" in event.obj["data"] + assert "value" in event.obj["data"]["state"] + return RunningState(event.obj["data"]["state"]["value"]) + + +@dataclass(frozen=True, slots=True, kw_only=True) +class NodeProgressEvent: + node_id: str + progress_type: NodeProgressType + current_progress: float + total_progress: float + + +def retrieve_node_progress_from_decoded_message( + event: SocketIOEvent, +) -> NodeProgressEvent: + assert event.name == _OSparcMessages.NODE_PROGRESS.value + assert "progress_type" in event.obj + assert "progress_report" in event.obj + return NodeProgressEvent( + node_id=event.obj["node_id"], + progress_type=NodeProgressType(event.obj["progress_type"]), + current_progress=float(event.obj["progress_report"]["actual_value"]), + total_progress=float(event.obj["progress_report"]["total"]), + ) + + +@dataclass +class SocketIOProjectClosedWaiter: + logger: logging.Logger + + def __call__(self, message: str) -> bool: + # socket.io encodes messages like so + # https://stackoverflow.com/questions/24564877/what-do-these-numbers-mean-in-socket-io-payload + if message.startswith(SOCKETIO_MESSAGE_PREFIX): + decoded_message = decode_socketio_42_message(message) + if ( + (decoded_message.name == _OSparcMessages.PROJECT_STATE_UPDATED.value) + and (decoded_message.obj["data"]["locked"]["status"] == "CLOSED") + and (decoded_message.obj["data"]["locked"]["value"] is False) + ): + self.logger.info("project successfully closed") + return True + + return False + + +@dataclass +class SocketIOProjectStateUpdatedWaiter: + expected_states: tuple[RunningState, ...] + + def __call__(self, message: str) -> bool: + with log_context(logging.DEBUG, msg=f"handling websocket {message=}"): + # socket.io encodes messages like so + # https://stackoverflow.com/questions/24564877/what-do-these-numbers-mean-in-socket-io-payload + if message.startswith(SOCKETIO_MESSAGE_PREFIX): + decoded_message = decode_socketio_42_message(message) + if decoded_message.name == _OSparcMessages.PROJECT_STATE_UPDATED.value: + return ( + retrieve_project_state_from_decoded_message(decoded_message) + in self.expected_states + ) + + return False + + +@dataclass +class SocketIOOsparcMessagePrinter: + include_logger_messages: bool = False + + def __call__(self, message: str) -> None: + osparc_messages = [_.value for _ in _OSparcMessages] + if not self.include_logger_messages: + osparc_messages.pop(osparc_messages.index(_OSparcMessages.LOGGER.value)) + + if message.startswith(SOCKETIO_MESSAGE_PREFIX): + decoded_message: SocketIOEvent = decode_socketio_42_message(message) + if decoded_message.name in osparc_messages: + print("WS Message:", decoded_message.name, decoded_message.obj) + + +_FAIL_FAST_DYNAMIC_SERVICE_STATES: Final[tuple[str, ...]] = ("idle", "failed") +_SERVICE_ROOT_POINT_STATUS_TIMEOUT: Final[timedelta] = timedelta(seconds=30) + + +def _get_service_url( + node_id: str, product_url: AnyUrl, *, is_legacy_service: bool +) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"{product_url.scheme}://{product_url.host}/x/{node_id}" + if is_legacy_service + else f"{product_url.scheme}://{node_id}.services.{product_url.host}" + ) + + +def _check_service_endpoint( + node_id: str, + *, + api_request_context: APIRequestContext, + logger: logging.Logger, + product_url: AnyUrl, + is_legacy_service: bool, +) -> bool: + # NOTE: we might have missed some websocket messages, and we check if the service is ready + service_url = _get_service_url( + node_id, product_url, is_legacy_service=is_legacy_service + ) + + with log_context( + logging.INFO, + "Check service endpoint: %s", + service_url, + ): + response = None + + try: + response = api_request_context.get( + f"{service_url}", + timeout=_SERVICE_ROOT_POINT_STATUS_TIMEOUT.total_seconds() * SECOND, + ) + except (PlaywrightTimeoutError, TimeoutError): + logger.exception( + "❌ Timed-out requesting service endpoint after %ds ❌", + _SERVICE_ROOT_POINT_STATUS_TIMEOUT, + ) + except PlaywrightError: + logger.exception("Failed to request service endpoint") + else: + # NOTE: 502,503 are acceptable if the service is not yet ready (traefik still setting up) + if response.status in (502, 503): + logger.info("⏳ service not ready yet %s ⏳", f"{response.status=}") + return False + if response.status > 400: + logger.error( + "❌ service responded with error: %s:%s ❌", + f"{response.status}", + f"{response.text()}", + ) + return False + + if response.status <= 400: + # NOTE: If the response status is less than 400, it means that the service is ready (There are some services that respond with a 3XX) + logger.info( + "βœ… Service ready!! responded with %s βœ…", f"{response.status=}" + ) + return True + return False + + +_SOCKET_IO_NODE_PROGRESS_WAITER_MAX_IDLE_TIMEOUT: Final[timedelta] = timedelta( + seconds=60 +) + + +@dataclass +class SocketIONodeProgressCompleteWaiter: + node_id: str + logger: logging.Logger + max_idle_timeout: timedelta = _SOCKET_IO_NODE_PROGRESS_WAITER_MAX_IDLE_TIMEOUT + _current_progress: dict[NodeProgressType, float] = field( + default_factory=defaultdict + ) + _last_progress_time: datetime = field(default_factory=lambda: datetime.now(tz=UTC)) + _received_messages: list[SocketIOEvent] = field(default_factory=list) + _result: bool = False + + def __call__(self, message: str) -> bool: + # socket.io encodes messages like so + # https://stackoverflow.com/questions/24564877/what-do-these-numbers-mean-in-socket-io-payload + if message.startswith(SOCKETIO_MESSAGE_PREFIX): + decoded_message = decode_socketio_42_message(message) + self._received_messages.append(decoded_message) + if ( + (decoded_message.name == _OSparcMessages.SERVICE_STATUS.value) + and (decoded_message.obj["service_uuid"] == self.node_id) + and ( + decoded_message.obj["service_state"] + in _FAIL_FAST_DYNAMIC_SERVICE_STATES + ) + ): + # NOTE: this is a fail fast for dynamic services that fail to start + self.logger.error( + "❌ node %s failed with state %s, failing fast ❌", + self.node_id, + decoded_message.obj["service_state"], + ) + self._result = False + return True + if decoded_message.name == _OSparcMessages.NODE_PROGRESS.value: + node_progress_event = retrieve_node_progress_from_decoded_message( + decoded_message + ) + if node_progress_event.node_id == self.node_id: + new_progress = ( + node_progress_event.current_progress + / node_progress_event.total_progress + ) + self._last_progress_time = datetime.now(UTC) + if ( + node_progress_event.progress_type not in self._current_progress + ) or ( + new_progress + != self._current_progress[node_progress_event.progress_type] + ): + self._current_progress[node_progress_event.progress_type] = ( + new_progress + ) + + self.logger.info( + "Current startup progress [expected %d types]: %s", + len(NodeProgressType.required_types_for_started_service()), + f"{json.dumps({k: round(v, 2) for k, v in self._current_progress.items()})}", + ) + + done = self._completed_successfully() + if done: + self._result = True # NOTE: might have failed but it is not sure. so we set the result to True + self.logger.info("βœ… Service start completed successfully!! βœ…") + return done + + time_since_last_progress = datetime.now(UTC) - self._last_progress_time + if time_since_last_progress > self.max_idle_timeout: + self.logger.warning( + "⚠️ %s passed since the last received progress message. " + "The service %s might be stuck, or we missed some messages ⚠️", + time_since_last_progress, + self.node_id, + ) + self._result = True + return True + + return False + + def _completed_successfully(self) -> bool: + return all( + progress_type in self._current_progress + for progress_type in NodeProgressType.required_types_for_started_service() + ) and all( + round(progress, 1) == 1.0 for progress in self._current_progress.values() + ) + + @property + def success(self) -> bool: + return self._result + + +def wait_for_service_endpoint_responding( + node_id: str, + *, + api_request_context: APIRequestContext, + product_url: AnyUrl, + is_legacy_service: bool, + timeout: int = 30 * SECOND, +) -> None: + """emulates the frontend polling for the service endpoint until it responds with 2xx/3xx""" + + @retry( + retry=retry_if_exception_type(AssertionError), + wait=wait_fixed(1), + stop=stop_after_delay(timeout / 1000), + before_sleep=before_sleep_log(_logger, logging.INFO), + reraise=True, + ) + def _retry_check_service_endpoint(logger: logging.Logger) -> None: + is_service_ready = _check_service_endpoint( + node_id, + api_request_context=api_request_context, + logger=logger, + product_url=product_url, + is_legacy_service=is_legacy_service, + ) + assert is_service_ready, "❌ the service failed starting! ❌" + + with log_context( + logging.INFO, msg=f"wait for service endpoint to be ready ({timeout=})" + ) as ctx: + _retry_check_service_endpoint(ctx.logger) + + +_FAIL_FAST_COMPUTATIONAL_STATES: Final[tuple[RunningState, ...]] = ( + RunningState.FAILED, + RunningState.ABORTED, +) + + +def wait_for_pipeline_state( + current_state: RunningState, + *, + websocket: RobustWebSocket, + if_in_states: tuple[RunningState, ...], + expected_states: tuple[RunningState, ...], + timeout_ms: int, +) -> RunningState: + if current_state in if_in_states: + with log_context( + logging.INFO, + msg=( + f"pipeline is in {current_state=}, waiting for one of {expected_states=}", + f"pipeline is now in {current_state=}", + ), + ): + waiter = SocketIOProjectStateUpdatedWaiter( + expected_states=expected_states + _FAIL_FAST_COMPUTATIONAL_STATES + ) + with websocket.expect_event( + "framereceived", waiter, timeout=timeout_ms + ) as event: + current_state = retrieve_project_state_from_decoded_message( + decode_socketio_42_message(event.value) + ) + if ( + current_state in _FAIL_FAST_COMPUTATIONAL_STATES + and current_state not in expected_states + ): + pytest.fail( + f"❌ Pipeline failed with state {current_state}. Expected one of {expected_states} ❌" + ) + return current_state + + +def _node_started_predicate(request: Request) -> bool: + return bool( + re.search(NODE_START_REQUEST_PATTERN, request.url) + and request.method.upper() == "POST" + ) + + +def _trigger_service_start(page: Page, node_id: str) -> None: + with ( + log_context(logging.INFO, msg="trigger start button"), + page.expect_request(_node_started_predicate, timeout=35 * SECOND), + ): + page.get_by_test_id(f"Start_{node_id}").click() + + +@dataclass(slots=True, kw_only=True) +class ServiceRunning: + iframe_locator: FrameLocator | None + + +_MIN_TIMEOUT_WAITING_FOR_SERVICE_ENDPOINT: Final[int] = 30 * SECOND + + +@contextlib.contextmanager +def expected_service_running( + *, + page: Page, + node_id: str, + websocket: RobustWebSocket, + timeout: int, + press_start_button: bool, + product_url: AnyUrl, + is_service_legacy: bool, +) -> Generator[ServiceRunning, None, None]: + started = arrow.utcnow() + with contextlib.ExitStack() as stack: + ctx = stack.enter_context( + log_context( + logging.INFO, + msg=f"Waiting for node to run. Timeout: {timeout}", + ) + ) + + if is_service_legacy: + waiter = None + ctx.logger.info( + "⚠️ Legacy service detected. We are skipping websocket messages in this case! ⚠️" + ) + else: + waiter = SocketIONodeProgressCompleteWaiter( + node_id=node_id, + logger=ctx.logger, + max_idle_timeout=min( + _SOCKET_IO_NODE_PROGRESS_WAITER_MAX_IDLE_TIMEOUT, + timedelta(seconds=timeout / 1000 - 10), + ), + ) + stack.enter_context( + websocket.expect_event("framereceived", waiter, timeout=timeout) + ) + service_running = ServiceRunning(iframe_locator=None) + if press_start_button: + _trigger_service_start(page, node_id) + yield service_running + + elapsed_time = arrow.utcnow() - started + if waiter and not waiter.success: + pytest.fail("❌ Service failed starting! ❌") + + wait_for_service_endpoint_responding( + node_id, + api_request_context=page.request, + product_url=product_url, + is_legacy_service=is_service_legacy, + timeout=max( + timeout - int(elapsed_time.total_seconds() * SECOND), + _MIN_TIMEOUT_WAITING_FOR_SERVICE_ENDPOINT, + ), + ) + service_running.iframe_locator = page.frame_locator( + f'[osparc-test-id="iframe_{node_id}"]' + ) + + +def wait_for_service_running( + *, + page: Page, + node_id: str, + websocket: RobustWebSocket, + timeout: int, + press_start_button: bool, + product_url: AnyUrl, + is_service_legacy: bool, +) -> FrameLocator: + """NOTE: if the service was already started this will not work as some of the required websocket events will not be emitted again + In which case this will need further adjutment""" + + started = arrow.utcnow() + with contextlib.ExitStack() as stack: + ctx = stack.enter_context( + log_context( + logging.INFO, + msg=f"Waiting for node to run. Timeout: {timeout}", + ) + ) + if is_service_legacy: + waiter = None + ctx.logger.info( + "⚠️ Legacy service detected. We are skipping websocket messages in this case! ⚠️" + ) + else: + waiter = SocketIONodeProgressCompleteWaiter( + node_id=node_id, + logger=ctx.logger, + max_idle_timeout=min( + _SOCKET_IO_NODE_PROGRESS_WAITER_MAX_IDLE_TIMEOUT, + timedelta(seconds=timeout / 1000 - 10), + ), + ) + stack.enter_context( + websocket.expect_event("framereceived", waiter, timeout=timeout) + ) + if press_start_button: + _trigger_service_start(page, node_id) + elapsed_time = arrow.utcnow() - started + + if waiter and not waiter.success: + pytest.fail("❌ Service failed starting! ❌") + wait_for_service_endpoint_responding( + node_id, + api_request_context=page.request, + product_url=product_url, + is_legacy_service=is_service_legacy, + timeout=max( + timeout - int(elapsed_time.total_seconds() * SECOND), + _MIN_TIMEOUT_WAITING_FOR_SERVICE_ENDPOINT, + ), + ) + return page.frame_locator(f'[osparc-test-id="iframe_{node_id}"]') + + +def app_mode_trigger_next_app(page: Page) -> None: + with ( + log_context(logging.INFO, msg="triggering next app"), + page.expect_request(_node_started_predicate), + ): + # Move to next step (this auto starts the next service) + page.get_by_test_id("AppMode_NextBtn").click() + + +def wait_for_label_text( + page: Page, locator: str, substring: str, timeout: int = 10000 +) -> Locator: + page.locator(locator).wait_for(state="visible", timeout=timeout) + + page.wait_for_function( + f"() => document.querySelector('{locator}').innerText.includes('{substring}')", + timeout=timeout, + ) + + return page.locator(locator) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/playwright_sim4life.py b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright_sim4life.py new file mode 100644 index 00000000000..19afcabf3ee --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/playwright_sim4life.py @@ -0,0 +1,182 @@ +import datetime +import logging +import re +from dataclasses import dataclass +from typing import Final, TypedDict + +import arrow +from playwright.sync_api import FrameLocator, Page, WebSocket, expect +from pydantic import AnyUrl, ByteSize, TypeAdapter # pylint: disable=no-name-in-module + +from .logging_tools import log_context +from .playwright import ( + MINUTE, + SECOND, + SOCKETIO_MESSAGE_PREFIX, + RobustWebSocket, + SocketIOEvent, + decode_socketio_42_message, + wait_for_service_running, +) + +_S4L_STREAMING_ESTABLISHMENT_MIN_WAITING_TIME: Final[int] = 5 * SECOND +_S4L_STREAMING_ESTABLISHMENT_MAX_TIME: Final[int] = 30 * SECOND +_S4L_SOCKETIO_REGEX: Final[re.Pattern] = re.compile( + r"^(?P[^:]+)://(?P[^\.]+)\.services\.(?P[^\/]+)\/socket\.io\/.+$" +) +_EC2_STARTUP_MAX_WAIT_TIME: Final[int] = 1 * MINUTE +_S4L_MAX_STARTUP_TIME: Final[int] = 2 * MINUTE +_S4L_DOCKER_PULLING_MAX_TIME: Final[int] = 10 * MINUTE +_S4L_AUTOSCALED_MAX_STARTUP_TIME: Final[int] = ( + _EC2_STARTUP_MAX_WAIT_TIME + _S4L_DOCKER_PULLING_MAX_TIME + _S4L_MAX_STARTUP_TIME +) +_S4L_STARTUP_SCREEN_MAX_TIME: Final[int] = 45 * SECOND +_S4L_COPY_WORKSPACE_TIME: Final[int] = 60 * SECOND + + +@dataclass(kw_only=True) +class S4LWaitForWebsocket: + logger: logging.Logger + + def __call__(self, new_websocket: WebSocket) -> bool: + if re.match(_S4L_SOCKETIO_REGEX, new_websocket.url): + self.logger.info("found S4L websocket!") + return True + + return False + + +@dataclass(kw_only=True) +class _S4LSocketIOCheckBitRateIncreasesMessagePrinter: + min_waiting_time_before_checking_bitrate: datetime.timedelta + logger: logging.Logger + _initial_bit_rate: float = 0 + _initial_bit_rate_time: datetime.datetime = arrow.utcnow().datetime + + def __call__(self, message: str) -> bool: + if message.startswith(SOCKETIO_MESSAGE_PREFIX): + decoded_message: SocketIOEvent = decode_socketio_42_message(message) + if ( + decoded_message.name == "server.video_stream.bitrate_data" + and "bitrate" in decoded_message.obj + ): + current_bit_rate = decoded_message.obj["bitrate"] + if self._initial_bit_rate == 0: + self._initial_bit_rate = current_bit_rate + self._initial_bit_rate_time = arrow.utcnow().datetime + self.logger.info( + "%s", + f"{TypeAdapter(ByteSize).validate_python(self._initial_bit_rate).human_readable()}/s at {self._initial_bit_rate_time.isoformat()}", + ) + return False + + # NOTE: MaG says the value might also go down, but it shall definitely change, + # if this code proves unsafe we should change it. + if "bitrate" in decoded_message.obj: + self.logger.info( + "bitrate: %s", + f"{TypeAdapter(ByteSize).validate_python(current_bit_rate).human_readable()}/s", + ) + elapsed_time = arrow.utcnow().datetime - self._initial_bit_rate_time + if ( + elapsed_time > self.min_waiting_time_before_checking_bitrate + and "bitrate" in decoded_message.obj + ): + current_bit_rate = decoded_message.obj["bitrate"] + bitrate_test = bool(self._initial_bit_rate != current_bit_rate) + self.logger.info( + "%s", + f"{TypeAdapter(ByteSize).validate_python(current_bit_rate).human_readable()}/s after {elapsed_time=}: {'good!' if bitrate_test else 'failed! bitrate did not change! TIP: talk with MaG about underwater cables!'}", + ) + return bitrate_test + + return False + + +class WaitForS4LDict(TypedDict): + websocket: WebSocket + iframe: FrameLocator + + +def wait_for_launched_s4l( + page: Page, + node_id, + log_in_and_out: RobustWebSocket, + *, + autoscaled: bool, + copy_workspace: bool, + product_url: AnyUrl, + is_service_legacy: bool, +) -> WaitForS4LDict: + with log_context(logging.INFO, "launch S4L") as ctx: + predicate = S4LWaitForWebsocket(logger=ctx.logger) + with page.expect_websocket( + predicate, + timeout=_S4L_STARTUP_SCREEN_MAX_TIME + + ( + _S4L_AUTOSCALED_MAX_STARTUP_TIME + if autoscaled + else _S4L_MAX_STARTUP_TIME + ) + + (_S4L_COPY_WORKSPACE_TIME if copy_workspace else 0) + + 10 * SECOND, + ) as ws_info: + s4l_iframe = wait_for_service_running( + page=page, + node_id=node_id, + websocket=log_in_and_out, + timeout=( + _S4L_AUTOSCALED_MAX_STARTUP_TIME + if autoscaled + else _S4L_MAX_STARTUP_TIME + ) + + (_S4L_COPY_WORKSPACE_TIME if copy_workspace else 0), + press_start_button=False, + product_url=product_url, + is_service_legacy=is_service_legacy, + ) + s4l_websocket = ws_info.value + ctx.logger.info("acquired S4L websocket!") + return { + "websocket": s4l_websocket, + "iframe": s4l_iframe, + } + + +def interact_with_s4l(page: Page, s4l_iframe: FrameLocator) -> None: + # Wait until grid is shown + # NOTE: the startup screen should disappear very fast after the websocket was acquired + with log_context(logging.INFO, "Interact with S4l"): + s4l_iframe.get_by_test_id("tree-item-Grid").nth(0).click() + page.wait_for_timeout(3000) + + +def check_video_streaming( + page: Page, s4l_iframe: FrameLocator, s4l_websocket: WebSocket +) -> None: + assert ( + _S4L_STREAMING_ESTABLISHMENT_MIN_WAITING_TIME + < _S4L_STREAMING_ESTABLISHMENT_MAX_TIME + ) + with log_context(logging.INFO, "Check videostreaming works") as ctx: + waiter = _S4LSocketIOCheckBitRateIncreasesMessagePrinter( + min_waiting_time_before_checking_bitrate=datetime.timedelta( + milliseconds=_S4L_STREAMING_ESTABLISHMENT_MIN_WAITING_TIME, + ), + logger=ctx.logger, + ) + with s4l_websocket.expect_event( + "framereceived", + waiter, + timeout=_S4L_STREAMING_ESTABLISHMENT_MAX_TIME, + ): + ... + + expect( + s4l_iframe.locator("video"), + "videostreaming is not established. " + "TIP: if using playwright integrated open source chromIUM, " + "webkit or firefox this is expected, switch to chrome/msedge!!", + ).to_be_visible() + s4l_iframe.locator("video").click() + page.wait_for_timeout(3000) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tags.py b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tags.py new file mode 100644 index 00000000000..0514369b50e --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tags.py @@ -0,0 +1,54 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from aiopg.sa.connection import SAConnection +from simcore_postgres_database.models.tags import tags +from simcore_postgres_database.models.tags_access_rights import tags_access_rights + + +async def create_tag_access( + conn: SAConnection, + *, + tag_id, + group_id, + read, + write, + delete, +) -> int: + await conn.execute( + tags_access_rights.insert().values( + tag_id=tag_id, group_id=group_id, read=read, write=write, delete=delete + ) + ) + return tag_id + + +async def create_tag( + conn: SAConnection, + *, + name, + description, + color, + group_id, + read, + write, + delete, +) -> int: + """helper to create a tab by inserting rows in two different tables""" + tag_id = await conn.scalar( + tags.insert() + .values(name=name, description=description, color=color) + .returning(tags.c.id) + ) + assert tag_id + await create_tag_access( + conn, tag_id=tag_id, group_id=group_id, read=read, write=write, delete=delete + ) + return tag_id + + +async def delete_tag(conn: SAConnection, tag_id: int): + await conn.execute(tags.delete().where(tags.c.id == tag_id)) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py new file mode 100644 index 00000000000..1e854e8b687 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/postgres_tools.py @@ -0,0 +1,190 @@ +from collections.abc import AsyncIterator, Iterator +from contextlib import asynccontextmanager, contextmanager +from typing import Any, TypedDict + +import simcore_postgres_database.cli +import sqlalchemy as sa +from psycopg2 import OperationalError +from simcore_postgres_database.models.base import metadata +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + + +class PostgresTestConfig(TypedDict): + user: str + password: str + database: str + host: str + port: str + + +def force_drop_all_tables(sa_sync_engine: sa.engine.Engine): + with sa_sync_engine.begin() as conn: + conn.execute(sa.DDL("DROP TABLE IF EXISTS alembic_version")) + conn.execute( + # NOTE: terminates all open transactions before droping all tables + # This solves https://github.com/ITISFoundation/osparc-simcore/issues/7008 + sa.DDL( + "SELECT pg_terminate_backend(pid) " + "FROM pg_stat_activity " + "WHERE state = 'idle in transaction';" + ) + ) + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/1776 + metadata.drop_all(bind=conn) + + +@contextmanager +def migrated_pg_tables_context( + postgres_config: PostgresTestConfig, +) -> Iterator[PostgresTestConfig]: + """ + Within the context, tables are created and dropped + using migration upgrade/downgrade routines + """ + + dsn = "postgresql://{user}:{password}@{host}:{port}/{database}".format( + **postgres_config + ) + + assert simcore_postgres_database.cli.discover.callback + assert simcore_postgres_database.cli.upgrade.callback + + simcore_postgres_database.cli.discover.callback(**postgres_config) + simcore_postgres_database.cli.upgrade.callback("head") + + yield postgres_config + + # downgrades database to zero --- + # + # NOTE: This step CANNOT be avoided since it would leave the db in an invalid state + # E.g. 'alembic_version' table is not deleted and keeps head version or routines + # like 'notify_comp_tasks_changed' remain undeleted + # + assert simcore_postgres_database.cli.downgrade.callback + assert simcore_postgres_database.cli.clean.callback + + simcore_postgres_database.cli.downgrade.callback("base") + simcore_postgres_database.cli.clean.callback() # just cleans discover cache + + try: + sync_engine = sa.create_engine(dsn) + force_drop_all_tables(sync_engine) + finally: + sync_engine.dispose() + + +def is_postgres_responsive(url) -> bool: + """Check if something responds to ``url``""" + try: + sync_engine = sa.create_engine(url) + conn = sync_engine.connect() + conn.close() + except OperationalError: + return False + return True + + +async def _async_insert_and_get_row( + conn: AsyncConnection, + table: sa.Table, + values: dict[str, Any], + pk_col: sa.Column, + pk_value: Any | None = None, +) -> sa.engine.Row: + result = await conn.execute(table.insert().values(**values).returning(pk_col)) + row = result.one() + + # Get the pk_value from the row if not provided + if pk_value is None: + pk_value = getattr(row, pk_col.name) + else: + # NOTE: DO NO USE row[pk_col] since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9) + assert getattr(row, pk_col.name) == pk_value + + result = await conn.execute(sa.select(table).where(pk_col == pk_value)) + return result.one() + + +def _sync_insert_and_get_row( + conn: sa.engine.Connection, + table: sa.Table, + values: dict[str, Any], + pk_col: sa.Column, + pk_value: Any | None = None, +) -> sa.engine.Row: + result = conn.execute(table.insert().values(**values).returning(pk_col)) + row = result.one() + + # Get the pk_value from the row if not provided + if pk_value is None: + pk_value = getattr(row, pk_col.name) + else: + # NOTE: DO NO USE row[pk_col] since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9) + assert getattr(row, pk_col.name) == pk_value + + result = conn.execute(sa.select(table).where(pk_col == pk_value)) + return result.one() + + +@asynccontextmanager +async def insert_and_get_row_lifespan( + sqlalchemy_async_engine: AsyncEngine, + *, + table: sa.Table, + values: dict[str, Any], + pk_col: sa.Column, + pk_value: Any | None = None, +) -> AsyncIterator[dict[str, Any]]: + # SETUP: insert & get + async with sqlalchemy_async_engine.begin() as conn: + row = await _async_insert_and_get_row( + conn, table=table, values=values, pk_col=pk_col, pk_value=pk_value + ) + # If pk_value was None, get it from the row for deletion later + if pk_value is None: + pk_value = getattr(row, pk_col.name) + + assert row + + # NOTE: DO NO USE dict(row) since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9) + # pylint: disable=protected-access + yield row._asdict() + + # TEAD-DOWN: delete row + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute(table.delete().where(pk_col == pk_value)) + + +@contextmanager +def sync_insert_and_get_row_lifespan( + sqlalchemy_sync_engine: sa.engine.Engine, + *, + table: sa.Table, + values: dict[str, Any], + pk_col: sa.Column, + pk_value: Any | None = None, +) -> Iterator[dict[str, Any]]: + """sync version of insert_and_get_row_lifespan. + + TIP: more convenient for **module-scope fixtures** that setup the + database tables before the app starts since it does not require an `event_loop` + fixture (which is funcition-scoped ) + """ + # SETUP: insert & get + with sqlalchemy_sync_engine.begin() as conn: + row = _sync_insert_and_get_row( + conn, table=table, values=values, pk_col=pk_col, pk_value=pk_value + ) + # If pk_value was None, get it from the row for deletion later + if pk_value is None: + pk_value = getattr(row, pk_col.name) + + assert row + + # NOTE: DO NO USE dict(row) since you will get a deprecation error (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9) + # pylint: disable=protected-access + yield row._asdict() + + # TEARDOWN: delete row + with sqlalchemy_sync_engine.begin() as conn: + conn.execute(table.delete().where(pk_col == pk_value)) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/pydantic_extension.py b/packages/pytest-simcore/src/pytest_simcore/helpers/pydantic_extension.py new file mode 100644 index 00000000000..c1252ed8bb4 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/pydantic_extension.py @@ -0,0 +1,34 @@ +from pydantic import SecretStr + + +def _mask(value): + """ + Mask the password, showing only the first and last characters + or *** if very short passwords + """ + if len(value) > 2: + masked_value = value[0] + "*" * (len(value) - 2) + value[-1] + else: + # In case of very short passwords + masked_value = "*" * len(value) + return masked_value + + +def _hash(value): + """Uses hash number to mask the password""" + return f"hash:{hash(value)}" + + +class Secret4TestsStr(SecretStr): + """Prints a hint of the secret + TIP: Can be handy for testing + """ + + def _display(self) -> str | bytes: + # SEE overrides _SecretBase._display + value = self.get_secret_value() + return _mask(value) if value else "" + + +assert str(Secret4TestsStr("123456890")) == "1*******0" +assert "1*******0" in repr(Secret4TestsStr("123456890")) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/rawdata_fakers.py b/packages/pytest-simcore/src/pytest_simcore/helpers/rawdata_fakers.py deleted file mode 100644 index 3f370f0b301..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/rawdata_fakers.py +++ /dev/null @@ -1,140 +0,0 @@ -""" - Collection of functions that create fake raw data that can be used - to populate postgres DATABASE, create datasets with consistent values, etc - - Built on top of the idea of Faker library (https://faker.readthedocs.io/en/master/), - that generate fake data to bootstrap a database, fill-in stress tests, anonymize data ... - etc - - NOTE: all outputs MUST be Dict-like or built-in data structures that fit at least - required fields in postgres_database.models tables or pydantic models. -""" - - -import itertools -import json -import random -from datetime import datetime, timedelta -from typing import Any, Callable, Final -from uuid import uuid4 - -import faker -from simcore_postgres_database.models.comp_pipeline import StateType -from simcore_postgres_database.models.projects import projects -from simcore_postgres_database.models.users import users -from simcore_postgres_database.webserver_models import GroupType, UserStatus - -STATES = [ - StateType.NOT_STARTED, - StateType.PENDING, - StateType.RUNNING, - StateType.SUCCESS, - StateType.FAILED, -] - - -FAKE: Final = faker.Faker() - - -def _compute_hash(password: str) -> str: - try: - # 'passlib' will be used only if already installed. - # This way we do not force all modules to install - # it only for testing. - import passlib.hash - - return passlib.hash.sha256_crypt.using(rounds=1000).hash(password) - - except ImportError: - # if 'passlib' is not installed, we will use a library - # from the python distribution for convenience - import hashlib - - return hashlib.sha224(password.encode("ascii")).hexdigest() - - -_DEFAULT_HASH = _compute_hash("secret") - - -def random_user(**overrides) -> dict[str, Any]: - data = dict( - name=FAKE.user_name(), - email=FAKE.email().lower(), - password_hash=_DEFAULT_HASH, - status=UserStatus.ACTIVE, - created_ip=FAKE.ipv4(), - ) - assert set(data.keys()).issubset({c.name for c in users.columns}) # nosec - - # transform password in hash - password = overrides.pop("password", None) - if password: - overrides["password_hash"] = _compute_hash(password) - - data.update(overrides) - return data - - -def random_project(**overrides) -> dict[str, Any]: - """Generates random fake data projects DATABASE table""" - data = dict( - uuid=FAKE.uuid4(), - name=FAKE.word(), - description=FAKE.sentence(), - prj_owner=FAKE.pyint(), - thumbnail=FAKE.image_url(width=120, height=120), - access_rights={}, - workbench={}, - published=False, - ) - assert set(data.keys()).issubset({c.name for c in projects.columns}) # nosec - - data.update(overrides) - return data - - -def random_group(**overrides) -> dict[str, Any]: - data = dict( - name=FAKE.company(), - description=FAKE.text(), - type=GroupType.STANDARD.name, - ) - data.update(overrides) - return data - - -def fake_pipeline(**overrides) -> dict[str, Any]: - data = dict( - dag_adjacency_list=json.dumps({}), - state=random.choice(STATES), - ) - data.update(overrides) - return data - - -def fake_task_factory(first_internal_id=1) -> Callable: - # Each new instance of fake_task will get a copy - _index_in_sequence = itertools.count(start=first_internal_id) - - def fake_task(**overrides) -> dict[str, Any]: - - t0 = datetime.utcnow() - data = dict( - project_id=uuid4(), - node_id=uuid4(), - job_id=uuid4(), - internal_id=next(_index_in_sequence), - schema=json.dumps({}), - inputs=json.dumps({}), - outputs=json.dumps({}), - image=json.dumps({}), - state=random.choice(STATES), - submit=t0, - start=t0 + timedelta(seconds=1), - end=t0 + timedelta(minutes=5), - ) - - data.update(overrides) - return data - - return fake_task diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/s3.py b/packages/pytest-simcore/src/pytest_simcore/helpers/s3.py new file mode 100644 index 00000000000..61d630d994c --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/s3.py @@ -0,0 +1,148 @@ +import logging +from collections.abc import Iterable +from pathlib import Path +from typing import Final + +import aiofiles +import httpx +import orjson +from aws_library.s3 import MultiPartUploadLinks +from fastapi import status +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + FileUploadSchema, + UploadedPart, +) +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.utils import limited_as_completed, logged_gather +from types_aiobotocore_s3 import S3Client + +from .logging_tools import log_context + +_SENDER_CHUNK_SIZE: Final[int] = TypeAdapter(ByteSize).validate_python("16Mib") + + +async def _file_sender( + file: Path, *, offset: int, bytes_to_send: int, raise_while_uploading: bool +): + chunk_size = _SENDER_CHUNK_SIZE + if raise_while_uploading: + # to ensure we can raise before it is done + chunk_size = min(_SENDER_CHUNK_SIZE, int(file.stat().st_size / 3)) + async with aiofiles.open(file, "rb") as f: + await f.seek(offset) + num_read_bytes = 0 + while chunk := await f.read(min(chunk_size, bytes_to_send - num_read_bytes)): + num_read_bytes += len(chunk) + yield chunk + if raise_while_uploading: + msg = "we were asked to raise here!" + raise RuntimeError(msg) + + +async def upload_file_part( + session: httpx.AsyncClient, + file: Path, + part_index: int, + file_offset: int, + this_file_chunk_size: int, + num_parts: int, + upload_url: AnyUrl, + *, + raise_while_uploading: bool = False, +) -> tuple[int, ETag]: + print( + f"--> uploading {this_file_chunk_size=} of {file=}, [{part_index + 1}/{num_parts}]..." + ) + response = await session.put( + str(upload_url), + content=_file_sender( + file, + offset=file_offset, + bytes_to_send=this_file_chunk_size, + raise_while_uploading=raise_while_uploading, + ), + headers={ + "Content-Length": f"{this_file_chunk_size}", + }, + ) + response.raise_for_status() + # NOTE: the response from minio does not contain a json body + assert response.status_code == status.HTTP_200_OK + assert response.headers + assert "Etag" in response.headers + received_e_tag = orjson.loads(response.headers["Etag"]) + print( + f"--> completed upload {this_file_chunk_size=} of {file=}, [{part_index + 1}/{num_parts}], {received_e_tag=}" + ) + return (part_index, received_e_tag) + + +async def upload_file_to_presigned_link( + file: Path, file_upload_link: FileUploadSchema | MultiPartUploadLinks +) -> list[UploadedPart]: + file_size = file.stat().st_size + + with log_context(logging.INFO, msg=f"uploading {file} via {file_upload_link=}"): + async with httpx.AsyncClient() as session: + file_chunk_size = int(file_upload_link.chunk_size) + num_urls = len(file_upload_link.urls) + last_chunk_size = file_size - file_chunk_size * (num_urls - 1) + upload_tasks = [] + for index, upload_url in enumerate(file_upload_link.urls): + this_file_chunk_size = ( + file_chunk_size if (index + 1) < num_urls else last_chunk_size + ) + upload_tasks.append( + upload_file_part( + session, + file, + index, + index * file_chunk_size, + this_file_chunk_size, + num_urls, + upload_url, + ) + ) + results = await logged_gather(*upload_tasks, max_concurrency=20) + return [UploadedPart(number=index + 1, e_tag=e_tag) for index, e_tag in results] + + +async def delete_all_object_versions( + s3_client: S3Client, bucket: str, keys: Iterable[str] +) -> None: + objects_to_delete = [] + + bucket_versioning = await s3_client.get_bucket_versioning(Bucket=bucket) + if "Status" in bucket_versioning and bucket_versioning["Status"] == "Enabled": + # NOTE: using gather here kills the moto server + all_versions = [ + await v + async for v in limited_as_completed( + ( + s3_client.list_object_versions(Bucket=bucket, Prefix=key) + for key in keys + ), + limit=10, + ) + ] + + for versions in all_versions: + # Collect all version IDs and delete markers + objects_to_delete.extend( + {"Key": version["Key"], "VersionId": version["VersionId"]} + for version in versions.get("Versions", []) + ) + + objects_to_delete.extend( + {"Key": marker["Key"], "VersionId": marker["VersionId"]} + for marker in versions.get("DeleteMarkers", []) + ) + else: + # NOTE: this is way faster + objects_to_delete = [{"Key": key} for key in keys] + # Delete all versions and delete markers + if objects_to_delete: + await s3_client.delete_objects( + Bucket=bucket, Delete={"Objects": objects_to_delete} + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_scrunch_citations.py b/packages/pytest-simcore/src/pytest_simcore/helpers/scrunch_citations.py similarity index 100% rename from packages/pytest-simcore/src/pytest_simcore/helpers/utils_scrunch_citations.py rename to packages/pytest-simcore/src/pytest_simcore/helpers/scrunch_citations.py diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/storage.py b/packages/pytest-simcore/src/pytest_simcore/helpers/storage.py new file mode 100644 index 00000000000..b07006a24dc --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/storage.py @@ -0,0 +1,11 @@ +from collections.abc import Callable + +from yarl import URL + + +def replace_storage_endpoint(host: str, port: int) -> Callable[[str], str]: + def _(url: str) -> str: + url_obj = URL(url).with_host(host).with_port(port) + return f"{url_obj}" + + return _ diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils.py b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils.py new file mode 100644 index 00000000000..39c8e2d91d7 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils.py @@ -0,0 +1,47 @@ +import logging +from dataclasses import dataclass +from pathlib import Path +from typing import Any, TypedDict + +import sqlalchemy as sa +from faker import Faker +from models_library.basic_types import SHA256Str +from pydantic import ByteSize +from simcore_postgres_database.storage_models import projects +from sqlalchemy.ext.asyncio import AsyncEngine + +log = logging.getLogger(__name__) + + +async def get_updated_project( + sqlalchemy_async_engine: AsyncEngine, project_id: str +) -> dict[str, Any]: + async with sqlalchemy_async_engine.connect() as conn: + result = await conn.execute( + sa.select(projects).where(projects.c.uuid == project_id) + ) + row = result.one() + return row._asdict() + + +class FileIDDict(TypedDict): + path: Path + sha256_checksum: SHA256Str + + +@dataclass(frozen=True, kw_only=True, slots=True) +class ProjectWithFilesParams: + num_nodes: int + allowed_file_sizes: tuple[ByteSize, ...] + workspace_files_count: int + allowed_file_checksums: tuple[SHA256Str, ...] = None # type: ignore # NOTE: OK for testing + + def __post_init__(self): + if self.allowed_file_checksums is None: + # generate some random checksums for the corresponding file sizes + faker = Faker() + checksums = tuple(faker.sha256() for _ in self.allowed_file_sizes) + object.__setattr__(self, "allowed_file_checksums", checksums) + + def __repr__(self) -> str: + return f"ProjectWithFilesParams: #nodes={self.num_nodes}, file sizes={[_.human_readable() for _ in self.allowed_file_sizes]}" diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_file_meta_data.py b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_file_meta_data.py new file mode 100644 index 00000000000..c5566d7030e --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_file_meta_data.py @@ -0,0 +1,57 @@ +from aws_library.s3 import UploadID +from models_library.basic_types import SHA256Str +from models_library.projects_nodes_io import StorageFileID +from simcore_postgres_database.storage_models import file_meta_data +from sqlalchemy.ext.asyncio import AsyncEngine + + +async def assert_file_meta_data_in_db( + sqlalchemy_async_engine: AsyncEngine, + *, + file_id: StorageFileID, + expected_entry_exists: bool, + expected_file_size: int | None, + expected_upload_id: bool | None, + expected_upload_expiration_date: bool | None, + expected_sha256_checksum: SHA256Str | None, +) -> UploadID | None: + if expected_entry_exists and expected_file_size is None: + assert True, "Invalid usage of assertion, expected_file_size cannot be None" + + async with sqlalchemy_async_engine.connect() as conn: + result = await conn.execute( + file_meta_data.select().where(file_meta_data.c.file_id == f"{file_id}") + ) + db_data = result.fetchall() + assert db_data is not None + assert len(db_data) == (1 if expected_entry_exists else 0), ( + f"{file_id} was not found!" + if expected_entry_exists + else f"{file_id} should not exist" + ) + upload_id = None + if expected_entry_exists: + row = db_data[0] + assert ( + row.file_size == expected_file_size + ), f"entry in file_meta_data was not initialized correctly, size should be set to {expected_file_size}" + if expected_upload_id: + assert ( + row.upload_id is not None + ), "multipart upload shall have an upload_id, it is missing!" + else: + assert ( + row.upload_id is None + ), "single file upload should not have an upload_id" + if expected_upload_expiration_date: + assert row.upload_expires_at, "no upload expiration date!" + else: + assert row.upload_expires_at is None, "expiration date should be NULL" + if expected_sha256_checksum: + assert ( + SHA256Str(row.sha256_checksum) == expected_sha256_checksum + ), "invalid sha256_checksum" + else: + assert row.sha256_checksum is None, "expected sha256_checksum was None" + upload_id = row.upload_id + return upload_id diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_project.py b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_project.py new file mode 100644 index 00000000000..ad4535c9d70 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/storage_utils_project.py @@ -0,0 +1,44 @@ +import uuid as uuidlib +from copy import deepcopy +from typing import Any + +from models_library.projects_nodes_io import NodeIDStr + + +def clone_project_data( + project: dict, +) -> tuple[dict[str, Any], dict[NodeIDStr, NodeIDStr]]: + project_copy = deepcopy(project) + + # Update project id + # NOTE: this can be re-assigned by dbapi if not unique + project_copy_uuid = uuidlib.uuid4() # random project id + project_copy["uuid"] = str(project_copy_uuid) + project_copy.pop("id", None) + project_copy["name"] = f"{project['name']}-copy" + + # Workbench nodes shall be unique within the project context + def _create_new_node_uuid(old_uuid: NodeIDStr) -> NodeIDStr: + return NodeIDStr(uuidlib.uuid5(project_copy_uuid, old_uuid)) + + nodes_map = {} + for node_uuid in project.get("workbench", {}): + nodes_map[node_uuid] = _create_new_node_uuid(node_uuid) + + def _replace_uuids(node): + if isinstance(node, str): + node = nodes_map.get(node, node) + elif isinstance(node, list): + node = [_replace_uuids(item) for item in node] + elif isinstance(node, dict): + _frozen_items = tuple(node.items()) + for key, value in _frozen_items: + if key in nodes_map: + new_key = nodes_map[key] + node[new_key] = node.pop(key) + key = new_key + node[key] = _replace_uuids(value) + return node + + project_copy["workbench"] = _replace_uuids(project_copy.get("workbench", {})) + return project_copy, nodes_map diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/typing_env.py b/packages/pytest-simcore/src/pytest_simcore/helpers/typing_env.py index cffca75f7c9..fd9dd81f778 100644 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/typing_env.py +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/typing_env.py @@ -1,4 +1,8 @@ -EnvVarsDict = dict[str, str] +from collections.abc import Iterable +from typing import TypeAlias + +EnvVarsDict: TypeAlias = dict[str, str] +EnvVarsIterable: TypeAlias = Iterable[str] # SEE packages/pytest-simcore/tests/test_helpers_utils_envs.py diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/typing_public_api.py b/packages/pytest-simcore/src/pytest_simcore/helpers/typing_public_api.py new file mode 100644 index 00000000000..e3d0716fb8f --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/typing_public_api.py @@ -0,0 +1,33 @@ +from typing import Any, Literal, TypeAlias, TypedDict + +ServiceNameStr: TypeAlias = str + + +class ComposeSpecDict(TypedDict): + version: str + services: dict[str, Any] + + +class StackDict(TypedDict): + name: str + compose: ComposeSpecDict + + +class StacksDeployedDict(TypedDict): + stacks: dict[Literal["core", "ops"], StackDict] + services: list[ServiceNameStr] + + +class RegisteredUserDict(TypedDict): + first_name: str + last_name: str + email: str + password: str + api_key: str + api_secret: str + + +class ServiceInfoDict(TypedDict): + name: str + version: str + schema: dict[str, Any] diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_assert.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_assert.py deleted file mode 100644 index 5208c8c4fe0..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_assert.py +++ /dev/null @@ -1,87 +0,0 @@ -""" Extends assertions for testing - -""" -from pprint import pformat -from typing import Optional - -from aiohttp import ClientResponse -from aiohttp.web import HTTPError, HTTPException, HTTPInternalServerError, HTTPNoContent -from servicelib.aiohttp.rest_responses import unwrap_envelope - - -async def assert_status( - response: ClientResponse, - expected_cls: type[HTTPException], - expected_msg: Optional[str] = None, - expected_error_code: Optional[str] = None, - include_meta: Optional[bool] = False, - include_links: Optional[bool] = False, -) -> tuple[dict, ...]: - """ - Asserts for enveloped responses - """ - json_response = await response.json() - data, error = unwrap_envelope(json_response) - assert response.status == expected_cls.status_code, ( - f"received {response.status}: ({data},{error})" - f", expected {expected_cls.status_code} : {expected_msg or ''}" - ) - - if issubclass(expected_cls, HTTPError): - do_assert_error(data, error, expected_cls, expected_msg, expected_error_code) - - elif issubclass(expected_cls, HTTPNoContent): - assert not data, pformat(data) - assert not error, pformat(error) - else: - # with a 200, data may still be empty see - # https://medium.com/@santhoshkumarkrishna/http-get-rest-api-no-content-404-vs-204-vs-200-6dd869e3af1d - # assert data is not None, pformat(data) - assert not error, pformat(error) - - if expected_msg: - assert expected_msg in data["message"] - - return_value = ( - data, - error, - ) - if include_meta: - return_value += (json_response.get("_meta"),) - if include_links: - return_value += (json_response.get("_links"),) - return return_value - - -async def assert_error( - response: ClientResponse, - expected_cls: type[HTTPException], - expected_msg: Optional[str] = None, -): - data, error = unwrap_envelope(await response.json()) - return do_assert_error(data, error, expected_cls, expected_msg) - - -def do_assert_error( - data, - error, - expected_cls: type[HTTPException], - expected_msg: Optional[str] = None, - expected_error_code: Optional[str] = None, -): - assert not data, pformat(data) - assert error, pformat(error) - - assert len(error["errors"]) == 1 - - err = error["errors"][0] - if expected_msg: - assert expected_msg in err["message"] - - if expected_error_code: - assert expected_error_code == err["code"] - elif expected_cls != HTTPInternalServerError: - # otherwise, code is exactly the name of the Exception class - assert expected_cls.__name__ == err["code"] - - return data, error diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_dict.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_dict.py deleted file mode 100644 index 02d451b7da1..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_dict.py +++ /dev/null @@ -1,55 +0,0 @@ -""" Utils to operate with dicts """ - -from copy import deepcopy -from typing import Any, Mapping, Optional, Union - -ConfigDict = dict[str, Any] - - -def get_from_dict(obj: Mapping[str, Any], dotted_key: str, default=None) -> Any: - keys = dotted_key.split(".") - value = obj - for key in keys[:-1]: - value = value.get(key, {}) - return value.get(keys[-1], default) - - -def copy_from_dict_ex(data: dict[str, Any], exclude: set[str]) -> dict[str, Any]: - # NOTE: to be refactored by someone and merged with the next method - return {k: v for k, v in data.items() if k not in exclude} - - -def copy_from_dict( - data: dict[str, Any], - *, - include: Optional[Union[set, dict]] = None, - deep: bool = False -): - # - # Analogous to advanced includes from pydantic exports - # https://pydantic-docs.helpmanual.io/usage/exporting_models/#advanced-include-and-exclude - # - - if include is None: - return deepcopy(data) if deep else data.copy() - - if include == ...: - return deepcopy(data) if deep else data.copy() - - if isinstance(include, set): - return {key: data[key] for key in include} - - assert isinstance(include, dict) # nosec - - return { - key: copy_from_dict(data[key], include=include[key], deep=deep) - for key in include - } - - -def update_dict(obj: dict, **updates): - for key, update_value in updates.items(): - if callable(update_value): - update_value = update_value(obj[key]) - obj.update({key: update_value}) - return obj diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker.py deleted file mode 100644 index c985ea258cd..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker.py +++ /dev/null @@ -1,281 +0,0 @@ -import json -import logging -import os -import re -import socket -import subprocess -from pathlib import Path -from typing import Any, Optional, Union - -import docker -import yaml -from tenacity import retry -from tenacity.after import after_log -from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed - -COLOR_ENCODING_RE = re.compile(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]") -MAX_PATH_CHAR_LEN_ALLOWED = 260 -kFILENAME_TOO_LONG = 36 -_NORMPATH_COUNT = 0 - - -log = logging.getLogger(__name__) - - -def get_localhost_ip(default="127.0.0.1") -> str: - """Return the IP address for localhost""" - local_ip = default - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # doesn't even have to be reachable - s.connect(("10.255.255.255", 1)) - local_ip = s.getsockname()[0] - finally: - s.close() - return local_ip - - -@retry( - wait=wait_fixed(2), - stop=stop_after_attempt(10), - after=after_log(log, logging.WARNING), -) -def get_service_published_port( - service_name: str, target_ports: Optional[Union[list[int], int]] = None -) -> str: - # WARNING: ENSURE that service name exposes a port in - # Dockerfile file or docker-compose config file - - # NOTE: retries since services can take some time to start - client = docker.from_env() - - services = [s for s in client.services.list() if str(s.name).endswith(service_name)] - if not services: - raise RuntimeError( - f"Cannot find published port for service '{service_name}'." - "Probably services still not started." - ) - - service_ports = services[0].attrs["Endpoint"].get("Ports") - if not service_ports: - raise RuntimeError( - f"Cannot find published port for service '{service_name}' in endpoint." - "Probably services still not started." - ) - - published_port = None - msg = ", ".join( - f"{p.get('TargetPort')} -> {p.get('PublishedPort')}" for p in service_ports - ) - - if target_ports is None: - if len(service_ports) > 1: - log.warning( - "Multiple ports published in service '%s': %s. Defaulting to first", - service_name, - msg, - ) - published_port = service_ports[0]["PublishedPort"] - - else: - ports_to_look_for: list = ( - [target_ports] if isinstance(target_ports, (int, str)) else target_ports - ) - - for target_port in ports_to_look_for: - target_port = int(target_port) - for p in service_ports: - if p["TargetPort"] == target_port: - published_port = p["PublishedPort"] - break - - if published_port is None: - raise RuntimeError(f"Cannot find published port for {target_ports}. Got {msg}") - - return str(published_port) - - -def run_docker_compose_config( - docker_compose_paths: Union[list[Path], Path], - scripts_dir: Path, - project_dir: Path, - env_file_path: Path, - destination_path: Optional[Path] = None, -) -> dict: - """Runs docker-compose config to validate and resolve a compose file configuration - - - Composes all configurations passed in 'docker_compose_paths' - - Takes 'project_dir' as current working directory to resolve relative paths in the docker-compose correctly - - All environments are interpolated from a custom env-file at 'env_file_path' - - Saves resolved output config to 'destination_path' (if given) - """ - - if not isinstance(docker_compose_paths, list): - docker_compose_paths = [ - docker_compose_paths, - ] - - assert project_dir.exists(), "Invalid file '{project_dir}'" - - for docker_compose_path in docker_compose_paths: - assert str(docker_compose_path.resolve()).startswith(str(project_dir.resolve())) - - assert env_file_path.exists(), "Invalid file '{env_file_path}'" - - if destination_path: - assert destination_path.suffix in [ - ".yml", - ".yaml", - ], "Expected yaml/yml file as destination path" - - # SEE https://docs.docker.com/compose/reference/ - - global_options = [ - "-p", - str(project_dir), # Specify an alternate working directory - ] - # https://docs.docker.com/compose/environment-variables/#using-the---env-file--option - global_options += [ - "-e", - str(env_file_path), # Custom environment variables - ] - - # Specify an alternate compose files - # - When you use multiple Compose files, all paths in the files are relative to the first configuration file specified with -f. - # You can use the --project-directory option to override this base path. - for docker_compose_path in docker_compose_paths: - global_options += [os.path.relpath(docker_compose_path, project_dir)] - - # SEE https://docs.docker.com/compose/reference/config/ - docker_compose_path = scripts_dir / "docker" / "docker-compose-config.bash" - assert docker_compose_path.exists() - - cmd = [f"{docker_compose_path}"] + global_options - print(" ".join(cmd)) - - process = subprocess.run( - cmd, - shell=False, - check=True, - cwd=project_dir, - stdout=subprocess.PIPE, - ) - - compose_file_str = process.stdout.decode("utf-8") - compose_file: dict[str, Any] = yaml.safe_load(compose_file_str) - - def _remove_top_level_name_attribute_generated_by_compose_v2( - compose: dict[str, Any] - ) -> dict[str, Any]: - """docker compose V2 CLI config adds a top level name attribute - https://docs.docker.com/compose/compose-file/#name-top-level-element - but it is incompatible with docker stack deploy... - """ - compose.pop("name", None) - return compose - - compose_file = _remove_top_level_name_attribute_generated_by_compose_v2( - compose_file - ) - - if destination_path: - # - # NOTE: This step could be avoided and reading instead from stdout - # but prefer to have a file that stays after the test in a tmp folder - # and can be used later for debugging - # - destination_path.parent.mkdir(parents=True, exist_ok=True) - destination_path.write_text(compose_file_str) - - return compose_file - - -def shorten_path(filename: str) -> Path: - # These paths are composed using test name hierarchies - # when the test is parametrized, it uses the str of the - # object as id which could result in path that goes over - # allowed limit (260 characters). - # This helper function tries to normalize the path - # Another possibility would be that the path has some - # problematic characters but so far we did not find any case ... - global _NORMPATH_COUNT # pylint: disable=global-statement - - if len(filename) > MAX_PATH_CHAR_LEN_ALLOWED: - _NORMPATH_COUNT += 1 - path = Path(filename) - if path.is_dir(): - limit = MAX_PATH_CHAR_LEN_ALLOWED - 60 - filename = filename[:limit] + f"{_NORMPATH_COUNT}" - elif path.is_file(): - limit = MAX_PATH_CHAR_LEN_ALLOWED - 10 - filename = filename[:limit] + f"{_NORMPATH_COUNT}{path.suffix}" - - return Path(filename) - - -# actions/upload-artifact@v2: -# Invalid characters for artifact paths include: -# Double quote ", Colon :, Less than <, Greater than >, Vertical bar |, Asterisk *, -# Question mark ?, Carriage return \r, Line feed \n -BANNED_CHARS_FOR_ARTIFACTS = re.compile(r'["\:><|\*\?]') - - -def safe_artifact_name(name: str) -> str: - return BANNED_CHARS_FOR_ARTIFACTS.sub("_", name) - - -def save_docker_infos(destination_path: Path): - client = docker.from_env() - - # Includes stop containers, which might be e.g. failing tasks - all_containers = client.containers.list(all=True) - - destination_path = Path(safe_artifact_name(f"{destination_path}")) - - if all_containers: - try: - destination_path.mkdir(parents=True, exist_ok=True) - - except OSError as err: - if err.errno == kFILENAME_TOO_LONG: - destination_path = shorten_path(err.filename) - destination_path.mkdir(parents=True, exist_ok=True) - - for container in all_containers: - try: - container_name = safe_artifact_name(container.name) - - # logs w/o coloring characters - logs: str = container.logs(timestamps=True, tail=1000).decode() - - try: - (destination_path / f"{container_name}.log").write_text( - COLOR_ENCODING_RE.sub("", logs) - ) - - except OSError as err: - if err.errno == kFILENAME_TOO_LONG: - shorten_path(err.filename).write_text( - COLOR_ENCODING_RE.sub("", logs) - ) - - # inspect attrs - try: - (destination_path / f"{container_name}.json").write_text( - json.dumps(container.attrs, indent=2) - ) - except OSError as err: - if err.errno == kFILENAME_TOO_LONG: - shorten_path(err.filename).write_text( - json.dumps(container.attrs, indent=2) - ) - - except Exception as err: # pylint: disable=broad-except - print(f"Unexpected failure while dumping {container}." f"Details {err}") - - print( - "\n\t", - f"wrote docker log and json files for {len(all_containers)} containers in ", - destination_path, - ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker_registry.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker_registry.py deleted file mode 100644 index 86531639c1c..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_docker_registry.py +++ /dev/null @@ -1,203 +0,0 @@ -""" Helper to request data from docker-registry - - -NOTE: this could be used as draft for https://github.com/ITISFoundation/osparc-simcore/issues/2165 -""" - - -import json -import os -import re -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Iterator - -import httpx - - -@dataclass -class RegistryConfig: - url: str - auth: tuple[str, str] - - -RepoName = str -RepoTag = str - - -class Registry: - # SEE https://docs.docker.com/registry/spec/api - # SEE https://github.com/moby/moby/issues/9015 - - def __init__(self, **data): - data.setdefault("url", f'https://{os.environ.get("REGISTRY_URL")}') - data.setdefault( - "auth", (os.environ.get("REGISTRY_USER"), os.environ.get("REGISTRY_PW")) - ) - self.data = RegistryConfig(**data) - - def __str__(self) -> str: - return f"" - - def api_version_check(self): - # https://docs.docker.com/registry/spec/api/#api-version-check - - r = httpx.get(f"{self.data.url}/v2/", auth=self.data.auth) - r.raise_for_status() - - def iter_repositories(self, limit: int = 100) -> Iterator[RepoName]: - def _req(**kwargs): - r = httpx.get(auth=self.data.auth, **kwargs) - r.raise_for_status() - - yield from r.json()["repositories"] - - if link := r.headers.get("Link"): - # until the Link header is no longer set in the response - # SEE https://docs.docker.com/registry/spec/api/#pagination-1 - # ex=.g. '; rel="next"' - if m := re.match(r'<([^><]+)>;\s+rel=([\w"]+)', link): - next_page = m.group(1) - yield from _req(url=next_page) - - assert limit > 0 - query = {"n": limit} - - yield from _req(url=f"{self.data.url}/v2/_catalog", params=query) - - def get_digest(self, repo_name: str, repo_reference: str) -> str: - r = httpx.head( - f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", - auth=self.data.auth, - ) - r.raise_for_status() - assert r.status_code == 200 - digest = r.headers["Docker-Content-Digest"] - return digest - - def check_manifest(self, repo_name: RepoName, repo_reference: str) -> bool: - r = httpx.head( - f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", - auth=self.data.auth, - ) - if r.status_code == 400: - return False - # some other error? - r.raise_for_status() - return True - - def list_tags(self, repo_name: RepoName) -> list[RepoTag]: - r = httpx.get( - f"{self.data.url}/v2/{repo_name}/tags/list", - auth=self.data.auth, - ) - r.raise_for_status() - data = r.json() - assert data["name"] == repo_name - return data["tags"] - - def get_manifest(self, repo_name: str, repo_reference: str): - r = httpx.get( - f"{self.data.url}/v2/{repo_name}/manifests/{repo_reference}", - auth=self.data.auth, - ) - r.raise_for_status() - - # manifest formats and their content types: https://docs.docker.com/registry/spec/manifest-v2-1/, - # see format https://github.com/moby/moby/issues/8093 - manifest = r.json() - return manifest - - -def get_labels(image_v1: str) -> dict[str, Any]: - """image_v1: v1 compatible string encoded json for each layer""" - labels = json.loads(image_v1).get("config", {}).get("Labels", {}) - return labels - - -def extract_metadata(labels: dict[str, Any]) -> dict[str, Any]: - """Creates a metadata object from 'io.simcore.*' labels such as - { - "name": "foo" - "version": "1.2.3" - } - """ - meta = {} - for key in labels: - if key.startswith("io.simcore."): - meta.update(**json.loads(labels[key])) - return meta - - -def extract_extra_service_metadata(labels: dict[str, Any]) -> dict[str, Any]: - """Creates a metadata object 'simcore.service.*' labels such as - { - "service.settings": { ... } - "service.value": 42 - } - """ - meta = {} - for key in labels: - if key.startswith("simcore.service."): - value = labels[key].strip() - try: - value = json.loads(value) - except json.decoder.JSONDecodeError: - # e.g. key=value where value is a raw name - pass - meta.update(**{key.removeprefix("simcore."): value}) - return meta - - -SKIP, SUCCESS, FAILED = "[skip]", "[ok]", "[failed]" - - -def download_all_registry_metadata(dest_dir: Path): - registry = Registry() - - print("Starting", registry) - - count = 0 - for repo in registry.iter_repositories(limit=500): - - # list tags - try: - tags = registry.list_tags(repo_name=repo) - except httpx.HTTPStatusError as err: - print(f"Failed to get tags from {repo=}", err, FAILED) - continue - - # get manifest - folder = dest_dir / Path(repo) - folder.mkdir(parents=True, exist_ok=True) - for tag in tags: - path = folder / f"metadata-{tag}.json" - if not path.exists(): - try: - manifest = registry.get_manifest(repo_name=repo, repo_reference=tag) - - labels = get_labels( - image_v1=manifest["history"][0]["v1Compatibility"] - ) - - meta = extract_metadata(labels) - meta.update(extract_extra_service_metadata(labels)) - - with path.open("wt") as fh: - json.dump(meta, fh, indent=1) - print("downloaded", path, SUCCESS) - count += 1 - except Exception as err: # pylint: disable=broad-except - print("Failed", path, err, FAILED) - path.unlink(missing_ok=True) - else: - print("found", path, SKIP) - count += 1 - print("\nDownloaded", count, "metadata files from", registry.data.url) - - -if __name__ == "__main__": - import sys - - dest = Path(sys.argv[1]) - download_all_registry_metadata(dest_dir=dest) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_environs.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_environs.py deleted file mode 100644 index 0d18b2a42a0..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_environs.py +++ /dev/null @@ -1,146 +0,0 @@ -""" Utils to deal with environment variables (environs in short) - -""" -import re -import warnings -from copy import deepcopy -from pathlib import Path - -import yaml - -from .typing_env import EnvVarsDict - -VARIABLE_SUBSTITUTION = re.compile(r"\$\{(\w+)(?:(:{0,1}[-?]{0,1})(.*))?\}$") - -warnings.warn( - f"{__name__} is deprecated, use instead pytest_simcore.helpers.utils_envs", - DeprecationWarning, -) - - -def _load_env(file_handler) -> dict: - """Deserializes an environment file like .env-devel and - returns a key-value map of the environment - - Analogous to json.load - """ - PATTERN_ENVIRON_EQUAL = re.compile(r"^(\w+)=(.*)$") - # Works even for `POSTGRES_EXPORTER_DATA_SOURCE_NAME=postgresql://simcore:simcore@postgres:5432/simcoredb?sslmode=disable` - - environ = {} - for line in file_handler: - m = PATTERN_ENVIRON_EQUAL.match(line) - if m: - key, value = m.groups() - environ[key] = str(value) - return environ - - -def eval_environs_in_docker_compose( - docker_compose: dict, - docker_compose_dir: Path, - host_environ: dict = None, - *, - use_env_devel=True, -): - """Resolves environments in docker compose and sets them under 'environment' section - - TODO: deprecated. Use instead docker-compose config in services/web/server/tests/integration/fixtures/docker_compose.py - SEE https://docs.docker.com/compose/environment-variables/ - """ - content = deepcopy(docker_compose) - for _name, service in content["services"].items(): - replace_environs_in_docker_compose_service( - service, docker_compose_dir, host_environ, use_env_devel=use_env_devel - ) - return content - - -def replace_environs_in_docker_compose_service( - service_section: dict, - docker_compose_dir: Path, - host_environ: dict = None, - *, - use_env_devel=True, -): - """Resolves environments in docker-compose's service section, - drops any reference to env_file and sets all - environs 'environment' section - - NOTE: service_section gets modified! - - SEE https://docs.docker.com/compose/environment-variables/ - """ - service_environ = {} - - # environment defined in env_file - env_files: list[str] = service_section.pop("env_file", []) - for env_file in env_files: - if env_file.endswith(".env") and use_env_devel: - env_file += "-devel" - - env_file_path = (docker_compose_dir / env_file).resolve() - with env_file_path.open() as fh: - file_environ = _load_env(fh) - service_environ.update(file_environ) - - # explicit environment [overrides env_file] - environ_items = service_section.get("environment", []) - if environ_items and isinstance(environ_items, list): - for item in environ_items: - key, value = item.split("=") - m = VARIABLE_SUBSTITUTION.match(value) - if m: # There is a variable as value in docker-compose - envkey = m.groups()[0] # Variable name - if len(m.groups()) == 3: # There is a default value - default_value = m.groups()[2] - if envkey in host_environ: - value = host_environ[envkey] # Use host environ - if default_value and len(value) == 0 and m.groups()[1] == ":-": - value = default_value # Unless it is empty and default exists - elif default_value: - value = default_value # Use default if exists - - service_environ[key] = value - - service_section["environment"] = service_environ - - -def eval_service_environ( - docker_compose_path: Path, - service_name: str, - host_environ: dict = None, - image_environ: dict = None, - *, - use_env_devel=True, -) -> EnvVarsDict: - """Deduces a service environment with it runs in a stack from confirmation - - :param docker_compose_path: path to stack configuration - :type docker_compose_path: Path - :param service_name: service name as defined in docker-compose file - :type service_name: str - :param host_environ: environs in host when stack is started, defaults to None - :type host_environ: Dict, optional - :param image_environ: environs set in Dockerfile, defaults to None - :type image_environ: Dict, optional - :param image_environ: environs set in Dockerfile, defaults to None - :rtype: Dict - """ - docker_compose_dir = docker_compose_path.parent.resolve() - with docker_compose_path.open() as f: - content = yaml.safe_load(f) - - service = content["services"][service_name] - replace_environs_in_docker_compose_service( - service, docker_compose_dir, host_environ, use_env_devel=use_env_devel - ) - - host_environ = host_environ or {} - image_environ = image_environ or {} - - # Environ expected in a running service - service_environ: EnvVarsDict = {} - service_environ.update(image_environ) - service_environ.update(service["environment"]) - return service_environ diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_envs.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_envs.py deleted file mode 100644 index 73313fc8a15..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_envs.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -.env (dotenv) files (or envfile) -""" - -import os -from copy import deepcopy -from io import StringIO -from pathlib import Path -from typing import Union - -import dotenv -from pytest import MonkeyPatch - -from .typing_env import EnvVarsDict - -# -# monkeypatch using dict -# - - -def setenvs_from_dict(monkeypatch: MonkeyPatch, envs: EnvVarsDict) -> EnvVarsDict: - for key, value in envs.items(): - assert isinstance(key, str) - assert value is not None # None keys cannot be is defined w/o value - assert isinstance( - value, str - ), "client MUST explicitly stringify values since some cannot be done automatically e.g. json-like values" - - monkeypatch.setenv(key, value) - return deepcopy(envs) - - -def load_dotenv(envfile_content_or_path: Union[Path, str], **options) -> EnvVarsDict: - """Convenient wrapper around dotenv.dotenv_values""" - kwargs = options.copy() - if isinstance(envfile_content_or_path, Path): - # path - kwargs["dotenv_path"] = envfile_content_or_path - else: - assert isinstance(envfile_content_or_path, str) - # content - kwargs["stream"] = StringIO(envfile_content_or_path) - - return {k: v or "" for k, v in dotenv.dotenv_values(**kwargs).items()} - - -# -# monkeypath using envfiles ('.env' and also denoted as dotfiles) -# - - -def setenvs_from_envfile( - monkeypatch: MonkeyPatch, content_or_path: Union[str, Path], **dotenv_kwags -) -> EnvVarsDict: - """Batch monkeypatch.setenv(...) on all env vars in an envfile""" - envs = load_dotenv(content_or_path, **dotenv_kwags) - setenvs_from_dict(monkeypatch, envs) - - assert all(env in os.environ for env in envs) - return envs - - -def delenvs_from_envfile( - monkeypatch: MonkeyPatch, - content_or_path: Union[str, Path], - raising: bool, - **dotenv_kwags -) -> EnvVarsDict: - """Batch monkeypatch.delenv(...) on all env vars in an envfile""" - envs = load_dotenv(content_or_path, **dotenv_kwags) - for key in envs.keys(): - monkeypatch.delenv(key, raising=raising) - - assert all(env not in os.environ for env in envs) - return envs diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py deleted file mode 100644 index 304b264b695..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py +++ /dev/null @@ -1,154 +0,0 @@ -import re -from datetime import datetime -from typing import Optional, TypedDict - -from aiohttp import web -from aiohttp.test_utils import TestClient -from simcore_service_webserver.db_models import UserRole, UserStatus -from simcore_service_webserver.login._constants import MSG_LOGGED_IN -from simcore_service_webserver.login._registration import create_invitation_token -from simcore_service_webserver.login.storage import AsyncpgStorage, get_plugin_storage -from yarl import URL - -from .rawdata_fakers import FAKE, random_user -from .utils_assert import assert_status - - -# WARNING: DO NOT use UserDict is already in https://docs.python.org/3/library/collections.html#collections.UserDictclass UserRowDict(TypedDict): -# NOTE: this is modified dict version of packages/postgres-database/src/simcore_postgres_database/models/users.py for testing purposes -class _UserInfoDictRequired(TypedDict, total=True): - id: int - name: str - email: str - primary_gid: str - raw_password: str - status: UserStatus - role: UserRole - - -class UserInfoDict(_UserInfoDictRequired, total=False): - created_at: datetime - created_ip: int - password_hash: str - - -TEST_MARKS = re.compile(r"TEST (\w+):(.*)") - - -def parse_test_marks(text): - """Checs for marks as - - TEST name:123123 - TEST link:some-value - """ - marks = {} - for m in TEST_MARKS.finditer(text): - key, value = m.groups() - marks[key] = value.strip() - return marks - - -def parse_link(text): - link = parse_test_marks(text)["link"] - return URL(link).path - - -async def create_fake_user(db: AsyncpgStorage, data=None) -> UserInfoDict: - """Creates a fake user and inserts it in the users table in the database""" - data = data or {} - data.setdefault("password", "secret") - data.setdefault("status", UserStatus.ACTIVE.name) - data.setdefault("role", UserRole.USER.name) - params = random_user(**data) - - user = await db.create_user(params) - user["raw_password"] = data["password"] - return user - - -async def log_client_in( - client: TestClient, user_data=None, *, enable_check=True -) -> UserInfoDict: - # creates user directly in db - assert client.app - db: AsyncpgStorage = get_plugin_storage(client.app) - - user = await create_fake_user(db, user_data) - - # login - url = client.app.router["auth_login"].url_for() - reponse = await client.post( - str(url), - json={ - "email": user["email"], - "password": user["raw_password"], - }, - ) - - if enable_check: - await assert_status(reponse, web.HTTPOk, MSG_LOGGED_IN) - - return user - - -class NewUser: - def __init__(self, params=None, app: Optional[web.Application] = None): - self.params = params - self.user = None - assert app - self.db = get_plugin_storage(app) - - async def __aenter__(self): - self.user = await create_fake_user(self.db, self.params) - return self.user - - async def __aexit__(self, *args): - await self.db.delete_user(self.user) - - -class LoggedUser(NewUser): - def __init__(self, client, params=None, *, check_if_succeeds=True): - super().__init__(params, client.app) - self.client = client - self.enable_check = check_if_succeeds - - async def __aenter__(self) -> UserInfoDict: - self.user = await log_client_in( - self.client, self.params, enable_check=self.enable_check - ) - return self.user - - -class NewInvitation(NewUser): - def __init__( - self, - client: TestClient, - guest_email: Optional[str] = None, - host: Optional[dict] = None, - trial_days: Optional[int] = None, - ): - assert client.app - super().__init__(params=host, app=client.app) - self.client = client - self.tag = f"Created by {guest_email or FAKE.email()}" - self.confirmation = None - self.trial_days = trial_days - - async def __aenter__(self) -> "NewInvitation": - # creates host user - assert self.client.app - db: AsyncpgStorage = get_plugin_storage(self.client.app) - self.user = await create_fake_user(db, self.params) - - self.confirmation = await create_invitation_token( - self.db, - user_id=self.user["id"], - user_email=self.user["email"], - tag=self.tag, - trial_days=self.trial_days, - ) - return self - - async def __aexit__(self, *args): - if await self.db.get_confirmation(self.confirmation): - await self.db.delete_confirmation(self.confirmation) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_parametrizations.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_parametrizations.py deleted file mode 100644 index 8b0b539eb58..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_parametrizations.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Optional - -from pydantic import ByteSize - - -def byte_size_ids(val) -> Optional[str]: - if isinstance(val, ByteSize): - return val.human_readable() - return None diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_postgres.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_postgres.py deleted file mode 100644 index 8d0ec7ed7e6..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_postgres.py +++ /dev/null @@ -1,55 +0,0 @@ -from contextlib import contextmanager -from copy import deepcopy -from typing import Any, Iterator - -import simcore_postgres_database.cli -import sqlalchemy as sa -from simcore_postgres_database.models.base import metadata - - -@contextmanager -def migrated_pg_tables_context( - postgres_config: dict[str, str], -) -> Iterator[dict[str, Any]]: - """ - Within the context, tables are created and dropped - using migration upgrade/downgrade routines - """ - - cfg = deepcopy(postgres_config) - cfg.update( - dsn="postgresql://{user}:{password}@{host}:{port}/{database}".format( - **postgres_config - ) - ) - - simcore_postgres_database.cli.discover.callback(**postgres_config) - simcore_postgres_database.cli.upgrade.callback("head") - - yield cfg - - # downgrades database to zero --- - # - # NOTE: This step CANNOT be avoided since it would leave the db in an invalid state - # E.g. 'alembic_version' table is not deleted and keeps head version or routines - # like 'notify_comp_tasks_changed' remain undeleted - # - simcore_postgres_database.cli.downgrade.callback("base") - simcore_postgres_database.cli.clean.callback() # just cleans discover cache - - # FIXME: migration downgrade fails to remove User types - # SEE https://github.com/ITISFoundation/osparc-simcore/issues/1776 - # Added drop_all as tmp fix - postgres_engine = sa.create_engine(cfg["dsn"]) - metadata.drop_all(bind=postgres_engine) - - -def is_postgres_responsive(url) -> bool: - """Check if something responds to ``url``""" - try: - engine = sa.create_engine(url) - conn = engine.connect() - conn.close() - except sa.exc.OperationalError: - return False - return True diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py deleted file mode 100644 index ce1431811dd..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_projects.py +++ /dev/null @@ -1,169 +0,0 @@ -""" helpers to manage the projects's database and produce fixtures/mockup data for testing - -""" -# pylint: disable=no-value-for-parameter - -import json -import uuid as uuidlib -from pathlib import Path -from typing import Any, Optional - -from aiohttp import web -from aiohttp.test_utils import TestClient -from simcore_service_webserver.projects.project_models import ProjectDict -from simcore_service_webserver.projects.projects_db import ( - APP_PROJECT_DBAPI, - ProjectDBAPI, -) -from simcore_service_webserver.projects.projects_db_utils import DB_EXCLUSIVE_COLUMNS -from simcore_service_webserver.utils import now_str - -from .utils_assert import assert_status - - -def empty_project_data(): - return { - "uuid": f"project-{uuidlib.uuid4()}", - "name": "Empty name", - "description": "some description of an empty project", - "prjOwner": "I'm the empty project owner, hi!", - "creationDate": now_str(), - "lastChangeDate": now_str(), - "thumbnail": "", - "workbench": {}, - } - - -async def create_project( - app: web.Application, - params_override: Optional[dict[str, Any]] = None, - user_id: Optional[int] = None, - *, - product_name: str, - default_project_json: Optional[Path] = None, - force_uuid: bool = False, - as_template: bool = False, -) -> ProjectDict: - """Injects new project in database for user or as template - - :param params_override: predefined project properties (except for non-writeable e.g. uuid), defaults to None - :type params_override: Dict, optional - :param user_id: assigns this project to user or template project if None, defaults to None - :type user_id: int, optional - :return: schema-compliant project - :rtype: Dict - """ - params_override = params_override or {} - - project_data = {} - if default_project_json is not None: - # uses default_project_json as base - assert default_project_json.exists(), f"{default_project_json}" - project_data = json.loads(default_project_json.read_text()) - - project_data.update(params_override) - - db: ProjectDBAPI = app[APP_PROJECT_DBAPI] - - new_project = await db.insert_project( - project_data, - user_id, - product_name=product_name, - force_project_uuid=force_uuid, - force_as_template=as_template, - ) - try: - uuidlib.UUID(str(project_data["uuid"])) - assert new_project["uuid"] == project_data["uuid"] - except (ValueError, AssertionError): - # in that case the uuid gets replaced - assert new_project["uuid"] != project_data["uuid"] - project_data["uuid"] = new_project["uuid"] - - for key in DB_EXCLUSIVE_COLUMNS: - project_data.pop(key, None) - - return new_project - - -async def delete_all_projects(app: web.Application): - from simcore_postgres_database.webserver_models import projects - - db = app[APP_PROJECT_DBAPI] - async with db.engine.acquire() as conn: - query = projects.delete() - await conn.execute(query) - - -class NewProject: - def __init__( - self, - params_override: Optional[dict] = None, - app: Optional[web.Application] = None, - clear_all: bool = True, - user_id: Optional[int] = None, - *, - product_name: str, - tests_data_dir: Path, - force_uuid: bool = False, - as_template: bool = False, - ): - assert app # nosec - - self.params_override = params_override - self.user_id = user_id - self.product_name = product_name - self.app = app - self.prj = {} - self.clear_all = clear_all - self.force_uuid = force_uuid - self.tests_data_dir = tests_data_dir - self.as_template = as_template - - assert tests_data_dir.exists() - assert tests_data_dir.is_dir() - - if not self.clear_all: - # TODO: add delete_project. Deleting a single project implies having to delete as well all dependencies created - raise ValueError( - "UNDER DEVELOPMENT: Currently can only delete all projects " - ) - - async def __aenter__(self) -> ProjectDict: - assert self.app # nosec - - self.prj = await create_project( - self.app, - self.params_override, - self.user_id, - product_name=self.product_name, - force_uuid=self.force_uuid, - default_project_json=self.tests_data_dir / "fake-project.json", - as_template=self.as_template, - ) - return self.prj - - async def __aexit__(self, *args): - assert self.app # nosec - if self.clear_all: - await delete_all_projects(self.app) - - -async def assert_get_same_project( - client: TestClient, - project: ProjectDict, - expected: type[web.HTTPException], - api_vtag="/v0", -) -> dict: - # GET /v0/projects/{project_id} - - # with a project owned by user - assert client.app - url = client.app.router["get_project"].url_for(project_id=project["uuid"]) - assert str(url) == f"{api_vtag}/projects/{project['uuid']}" - resp = await client.get(f"{url}") - data, error = await assert_status(resp, expected) - - if not error: - assert data == project - return data diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_rate_limit.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_rate_limit.py deleted file mode 100644 index 03e3c987d92..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_rate_limit.py +++ /dev/null @@ -1,186 +0,0 @@ -import asyncio -import logging -import math -import time -from functools import wraps -from typing import Awaitable - -from aiohttp import ClientResponse, ClientSession, ClientTimeout - -log = logging.getLogger() - - -def function_duration(func): - @wraps(func) - async def wrapper(*args, **kwargs): - start = time.time() - result = await func(*args, **kwargs) - end = time.time() - elapsed = end - start - log.info("Function '%s' execution took '%0.2f' seconds", func.__name__, elapsed) - return result - - return wrapper - - -def is_rate_limit_reached(result: ClientResponse) -> bool: - return "Retry-After" in result.headers - - -async def get_request_result( - client: ClientSession, endpoint_to_check: str -) -> ClientResponse: - result = await client.get(endpoint_to_check) - log.debug("%s\n%s\n%s", result, await result.text(), dict(result.headers)) - return result - - -async def assert_burst_request( - client: ClientSession, - endpoint_to_check: str, - burst: int, -): - functions = [get_request_result(client, endpoint_to_check) for x in range(burst)] - results = await asyncio.gather(*functions) - for result in results: - assert is_rate_limit_reached(result) is False - - -@function_duration -async def assert_burst_rate_limit( - endpoint_to_check: str, average: int, period_sec: int, burst: int -) -> float: - """ - Runs 2 burst sequences with a pause in between and expects for the - next result to fail. - """ - - max_rate = period_sec / average - # sleeping 2 times the burst window - burst_window = period_sec / burst - sleep_internval = 2 * burst_window - - log.info( - "Sleeping params: burst_window=%s, sleep_interval=%s, max_rate=%s", - burst_window, - sleep_internval, - max_rate, - ) - - timeout = ClientTimeout(total=10, connect=1, sock_connect=1) - async with ClientSession(timeout=timeout) as client: - - # check can burst in timeframe - await assert_burst_request( - client=client, endpoint_to_check=endpoint_to_check, burst=burst - ) - - log.info("First burst finished") - - await asyncio.sleep(sleep_internval) - - # check that burst in timeframe is ok - await assert_burst_request( - client=client, endpoint_to_check=endpoint_to_check, burst=burst - ) - - log.info("Second burst finished") - - # check that another request after the burst fails - result = await get_request_result(client, endpoint_to_check) - assert is_rate_limit_reached(result) is True - - return sleep_internval - - -@function_duration -async def assert_steady_rate_in_5_seconds( - endpoint_to_check: str, average: int, period_sec: int, **_ -) -> float: - """Creates a requests at a continuous rate without considering burst limits""" - # run tests for at least 5 seconds - max_rate = period_sec / average # reqs/ sec - requests_to_make = int(math.ceil(max_rate * 5)) - - sleep_interval = max_rate - - log.info( - "Steady rate params: sleep_interval=%s, max_rate=%s, requests_to_make=%s", - sleep_interval, - max_rate, - requests_to_make, - ) - - timeout = ClientTimeout(total=10, connect=1, sock_connect=1) - async with ClientSession(timeout=timeout) as client: - - for i in range(requests_to_make): - log.info("Request %s", i) - result = await get_request_result(client, endpoint_to_check) - assert is_rate_limit_reached(result) is False - log.info("Sleeping for %s s", sleep_interval) - await asyncio.sleep(sleep_interval) - - return sleep_interval - - -CHECKS_TO_RUN: list[Awaitable] = [ - assert_steady_rate_in_5_seconds, - assert_burst_rate_limit, -] - - -@function_duration -async def run_rate_limit_configuration_checks( - endpoint_to_check: str, average: int = 0, period_sec: int = 1, burst: int = 1 -): - """ - Runner to start all the checks for the firewall configuration - - All tests mut return the period to sleep before the next test can start. - - All defaults are taken from Traefik's docs - SEE https://doc.traefik.io/traefik/middlewares/ratelimit/ - """ - - log.warning( - "Runtime will vary based on the rate limit configuration of the service\n" - ) - - for awaitable in CHECKS_TO_RUN: - log.info("<<<< Starting test '%s'...", awaitable.__name__) - sleep_before_next_test = await awaitable( - endpoint_to_check=endpoint_to_check, - average=average, - period_sec=period_sec, - burst=burst, - ) - log.info(">>>> Finished testing '%s'\n", awaitable.__name__) - - log.info(">>>> Sleeping '%s' seconds before next test", sleep_before_next_test) - await asyncio.sleep(sleep_before_next_test) - - log.info("All tests completed") - - -if __name__ == "__main__": - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s %(levelname)s %(threadName)s [%(name)s] %(message)s", - ) - - # How to use, the below parameters are derived from the following labels: - # - traefik.http.middlewares.ratelimit-${SWARM_STACK_NAME}_api-server.ratelimit.average=1 - # - traefik.http.middlewares.ratelimit-${SWARM_STACK_NAME}_api-server.ratelimit.period=1m - # - traefik.http.middlewares.ratelimit-${SWARM_STACK_NAME}_api-server.ratelimit.burst=10 - # Will result in: average=1, period_sec=60, burst=10 - # WARNING: in the above example the test will run for 5 hours :\ - - asyncio.get_event_loop().run_until_complete( - run_rate_limit_configuration_checks( - endpoint_to_check="http://localhost:10081/", - average=1, - period_sec=60, - burst=10, - ) - ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tags.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tags.py deleted file mode 100644 index e83c6b03b05..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tags.py +++ /dev/null @@ -1,53 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -from aiopg.sa.connection import SAConnection -from simcore_postgres_database.models.tags import tags, tags_to_groups - - -async def create_tag_access( - conn: SAConnection, - *, - tag_id, - group_id, - read, - write, - delete, -) -> int: - await conn.execute( - tags_to_groups.insert().values( - tag_id=tag_id, group_id=group_id, read=read, write=write, delete=delete - ) - ) - return tag_id - - -async def create_tag( - conn: SAConnection, - *, - name, - description, - color, - group_id, - read, - write, - delete, -) -> int: - """helper to create a tab by inserting rows in two different tables""" - tag_id = await conn.scalar( - tags.insert() - .values(name=name, description=description, color=color) - .returning(tags.c.id) - ) - assert tag_id - await create_tag_access( - conn, tag_id=tag_id, group_id=group_id, read=read, write=write, delete=delete - ) - return tag_id - - -async def delete_tag(conn: SAConnection, tag_id: int): - await conn.execute(tags.delete().where(tags.c.id == tag_id)) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tokens.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tokens.py deleted file mode 100644 index 12287a7e0f7..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_tokens.py +++ /dev/null @@ -1,80 +0,0 @@ -# pylint: disable=E1120 - -import json -import random -from functools import reduce - -import sqlalchemy as sa -from servicelib.common_aiopg_utils import DSN -from simcore_service_webserver.db_models import metadata, tokens, users -from simcore_service_webserver.login.utils import get_random_string -from sqlalchemy import JSON, String, cast -from sqlalchemy.sql import and_ # , or_, not_ - - -def create_db_tables(**kargs): - url = DSN.format(**kargs) - engine = sa.create_engine(url, isolation_level="AUTOCOMMIT") - metadata.create_all(bind=engine, tables=[users, tokens], checkfirst=True) - engine.dispose() - return url - - -async def create_token_in_db(engine, **data): - # TODO change by faker? - params = { - "user_id": random.randint(0, 3), - "token_service": get_random_string(5), - "token_data": { - "token_secret": get_random_string(3), - "token_key": get_random_string(4), - }, - } - params.update(data) - - async with engine.acquire() as conn: - stmt = tokens.insert().values(**params) - result = await conn.execute(stmt) - row = await result.first() - return dict(row) - - -async def get_token_from_db( - engine, *, token_id=None, user_id=None, token_service=None, token_data=None -): - async with engine.acquire() as conn: - expr = to_expression( - token_id=token_id, - user_id=user_id, - token_service=token_service, - token_data=token_data, - ) - stmt = sa.select([tokens]).where(expr) - result = await conn.execute(stmt) - row = await result.first() - return dict(row) if row else None - - -async def delete_token_from_db(engine, *, token_id): - expr = tokens.c.token_id == token_id - async with engine.acquire() as conn: - stmt = tokens.delete().where(expr) - await conn.execute(stmt) - - -async def delete_all_tokens_from_db(engine): - async with engine.acquire() as conn: - await conn.execute(tokens.delete()) - - -def to_expression(**params): - expressions = [] - for key, value in params.items(): - if value is not None: - statement = ( - (cast(getattr(tokens.c, key), String) == json.dumps(value)) - if isinstance(getattr(tokens.c, key).type, JSON) - else (getattr(tokens.c, key) == value) - ) - expressions.append(statement) - return reduce(and_, expressions) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_webserver_unit_with_db.py b/packages/pytest-simcore/src/pytest_simcore/helpers/utils_webserver_unit_with_db.py deleted file mode 100644 index 295fa8ae9ce..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_webserver_unit_with_db.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import NamedTuple, Union -from unittest import mock - -from aiohttp import web -from servicelib.aiohttp.web_exceptions_extension import HTTPLocked -from simcore_service_webserver.security_roles import UserRole - - -class ExpectedResponse(NamedTuple): - """ - Stores respons status to an API request in function of the user - - e.g. for a request that normally returns OK, a non-authorized user - will have no access, therefore ExpectedResponse.ok = HTTPUnauthorized - """ - - ok: Union[type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPOk]] - created: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPCreated] - ] - no_content: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPNoContent] - ] - not_found: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPNotFound] - ] - forbidden: Union[ - type[web.HTTPUnauthorized], - type[web.HTTPForbidden], - ] - locked: Union[type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[HTTPLocked]] - accepted: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPAccepted] - ] - unprocessable: Union[ - type[web.HTTPUnauthorized], - type[web.HTTPForbidden], - type[web.HTTPUnprocessableEntity], - ] - not_acceptable: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPNotAcceptable] - ] - conflict: Union[ - type[web.HTTPUnauthorized], type[web.HTTPForbidden], type[web.HTTPConflict] - ] - - def __str__(self) -> str: - # pylint: disable=no-member - items = ",".join(f"{k}={v.__name__}" for k, v in self._asdict().items()) - return f"{self.__class__.__name__}({items})" - - -def standard_role_response() -> tuple[str, list[tuple[UserRole, ExpectedResponse]]]: - return ( - "user_role,expected", - [ - ( - UserRole.ANONYMOUS, - ExpectedResponse( - ok=web.HTTPUnauthorized, - created=web.HTTPUnauthorized, - no_content=web.HTTPUnauthorized, - not_found=web.HTTPUnauthorized, - forbidden=web.HTTPUnauthorized, - locked=web.HTTPUnauthorized, - accepted=web.HTTPUnauthorized, - unprocessable=web.HTTPUnauthorized, - not_acceptable=web.HTTPUnauthorized, - conflict=web.HTTPUnauthorized, - ), - ), - ( - UserRole.GUEST, - ExpectedResponse( - ok=web.HTTPForbidden, - created=web.HTTPForbidden, - no_content=web.HTTPForbidden, - not_found=web.HTTPForbidden, - forbidden=web.HTTPForbidden, - locked=web.HTTPForbidden, - accepted=web.HTTPForbidden, - unprocessable=web.HTTPForbidden, - not_acceptable=web.HTTPForbidden, - conflict=web.HTTPForbidden, - ), - ), - ( - UserRole.USER, - ExpectedResponse( - ok=web.HTTPOk, - created=web.HTTPCreated, - no_content=web.HTTPNoContent, - not_found=web.HTTPNotFound, - forbidden=web.HTTPForbidden, - locked=HTTPLocked, - accepted=web.HTTPAccepted, - unprocessable=web.HTTPUnprocessableEntity, - not_acceptable=web.HTTPNotAcceptable, - conflict=web.HTTPConflict, - ), - ), - ( - UserRole.TESTER, - ExpectedResponse( - ok=web.HTTPOk, - created=web.HTTPCreated, - no_content=web.HTTPNoContent, - not_found=web.HTTPNotFound, - forbidden=web.HTTPForbidden, - locked=HTTPLocked, - accepted=web.HTTPAccepted, - unprocessable=web.HTTPUnprocessableEntity, - not_acceptable=web.HTTPNotAcceptable, - conflict=web.HTTPConflict, - ), - ), - ], - ) - - -class MockedStorageSubsystem(NamedTuple): - copy_data_folders_from_project: mock.MagicMock - delete_project: mock.MagicMock - delete_node: mock.MagicMock - get_project_total_size_simcore_s3: mock.MagicMock diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_ports_data.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_ports_data.py new file mode 100644 index 00000000000..dd4a7933d42 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_ports_data.py @@ -0,0 +1,53 @@ +from typing import Any, Final + +# Web-server API responses of /projects/{project_id}/metadata/ports +# as reponses in this mock. SEE services/web/server/tests/unit/with_dbs/02/test__handlers__ports.py +# NOTE: this could be added as examples in the OAS but for the moment we want to avoid overloading openapi.yml +# in the web-server. +PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA: Final[list[dict[str, Any]]] = [ + { + "key": "38a0d401-af4b-4ea7-ab4c-5005c712a546", + "kind": "input", + "content_schema": { + "description": "Input integer value", + "title": "X", + "type": "integer", + }, + }, + { + "key": "fc48252a-9dbb-4e07-bf9a-7af65a18f612", + "kind": "input", + "content_schema": { + "description": "Input integer value", + "title": "Z", + "type": "integer", + }, + }, + { + "key": "7bf0741f-bae4-410b-b662-fc34b47c27c9", + "kind": "input", + "content_schema": { + "description": "Input boolean value", + "title": "on", + "type": "boolean", + }, + }, + { + "key": "09fd512e-0768-44ca-81fa-0cecab74ec1a", + "kind": "output", + "content_schema": { + "description": "Output integer value", + "title": "Random sleep interval_2", + "type": "integer", + }, + }, + { + "key": "76f607b4-8761-4f96-824d-cab670bc45f5", + "kind": "output", + "content_schema": { + "description": "Output integer value", + "title": "Random sleep interval", + "type": "integer", + }, + }, +] diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/utils_services.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py similarity index 100% rename from packages/pytest-simcore/src/pytest_simcore/helpers/utils_services.py rename to packages/pytest-simcore/src/pytest_simcore/helpers/webserver_fake_services_data.py diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py new file mode 100644 index 00000000000..d055e3a110c --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_login.py @@ -0,0 +1,252 @@ +import contextlib +import re +from collections.abc import AsyncIterator +from datetime import datetime +from typing import Any, TypedDict + +from aiohttp import web +from aiohttp.test_utils import TestClient +from models_library.users import UserID +from servicelib.aiohttp import status +from simcore_service_webserver.db.models import UserRole, UserStatus +from simcore_service_webserver.groups.api import auto_add_user_to_product_group +from simcore_service_webserver.login._constants import MSG_LOGGED_IN +from simcore_service_webserver.login._invitations_service import create_invitation_token +from simcore_service_webserver.login._login_repository_legacy import ( + AsyncpgStorage, + get_plugin_storage, +) +from simcore_service_webserver.products.products_service import list_products +from simcore_service_webserver.security.api import clean_auth_policy_cache +from yarl import URL + +from .assert_checks import assert_status +from .faker_factories import DEFAULT_FAKER, DEFAULT_TEST_PASSWORD, random_user + + +# WARNING: DO NOT use UserDict is already in https://docs.python.org/3/library/collections.html#collections.UserDictclass UserRowDict(TypedDict): +# NOTE: this is modified dict version of packages/postgres-database/src/simcore_postgres_database/models/users.py for testing purposes +class _UserInfoDictRequired(TypedDict, total=True): + id: int + name: str + email: str + primary_gid: str + raw_password: str + status: UserStatus + role: UserRole + + +class UserInfoDict(_UserInfoDictRequired, total=False): + created_at: datetime + password_hash: str + first_name: str + last_name: str + phone: str + + +TEST_MARKS = re.compile(r"TEST (\w+):(.*)") + + +def parse_test_marks(text): + """Checs for marks as + + TEST name:123123 + TEST link:some-value + """ + marks = {} + for m in TEST_MARKS.finditer(text): + key, value = m.groups() + marks[key] = value.strip() + return marks + + +def parse_link(text): + link = parse_test_marks(text)["link"] + return URL(link).path + + +async def _create_user(app: web.Application, data=None) -> UserInfoDict: + db: AsyncpgStorage = get_plugin_storage(app) + + # create + data = data or {} + data.setdefault("status", UserStatus.ACTIVE.name) + data.setdefault("role", UserRole.USER.name) + data.setdefault("password", DEFAULT_TEST_PASSWORD) + user = await db.create_user(random_user(**data)) + + # get + user = await db.get_user({"id": user["id"]}) + assert "first_name" in user + assert "last_name" in user + + # adds extras + extras = {"raw_password": data["password"]} + + return UserInfoDict( + **{ + key: user[key] + for key in [ + "id", + "name", + "email", + "primary_gid", + "status", + "role", + "created_at", + "password_hash", + "first_name", + "last_name", + "phone", + ] + }, + **extras, + ) + + +async def _register_user_in_default_product(app: web.Application, user_id: UserID): + products = list_products(app) + assert products + product_name = products[0].name + + return await auto_add_user_to_product_group(app, user_id, product_name=product_name) + + +async def _create_account( + app: web.Application, + user_data: dict[str, Any] | None = None, +) -> UserInfoDict: + # users, groups in db + user = await _create_user(app, user_data) + # user has default product + await _register_user_in_default_product(app, user_id=user["id"]) + return user + + +async def log_client_in( + client: TestClient, + user_data: dict[str, Any] | None = None, + *, + enable_check=True, +) -> UserInfoDict: + assert client.app + + # create account + user = await _create_account(client.app, user_data=user_data) + + # login + url = client.app.router["auth_login"].url_for() + reponse = await client.post( + str(url), + json={ + "email": user["email"], + "password": user["raw_password"], + }, + ) + + if enable_check: + await assert_status(reponse, status.HTTP_200_OK, MSG_LOGGED_IN) + + return user + + +class NewUser: + def __init__( + self, + user_data: dict[str, Any] | None = None, + app: web.Application | None = None, + ): + self.user_data = user_data + self.user = None + assert app + self.db = get_plugin_storage(app) + self.app = app + + async def __aenter__(self) -> UserInfoDict: + self.user = await _create_account(self.app, self.user_data) + return self.user + + async def __aexit__(self, *args): + await self.db.delete_user(self.user) + + +class LoggedUser(NewUser): + def __init__(self, client: TestClient, user_data=None, *, check_if_succeeds=True): + super().__init__(user_data, client.app) + self.client = client + self.enable_check = check_if_succeeds + assert self.client.app + + async def __aenter__(self) -> UserInfoDict: + self.user = await log_client_in( + self.client, self.user_data, enable_check=self.enable_check + ) + return self.user + + async def __aexit__(self, *args): + assert self.client.app + # NOTE: cache key is based on an email. If the email is + # reused during the test, then it creates quite some noise + await clean_auth_policy_cache(self.client.app) + return await super().__aexit__(*args) + + +@contextlib.asynccontextmanager +async def switch_client_session_to( + client: TestClient, user: UserInfoDict +) -> AsyncIterator[TestClient]: + assert client.app + + await client.post(f'{client.app.router["auth_logout"].url_for()}') + # sometimes 4xx if user already logged out. Ignore + + resp = await client.post( + f'{client.app.router["auth_login"].url_for()}', + json={ + "email": user["email"], + "password": user["raw_password"], + }, + ) + await assert_status(resp, status.HTTP_200_OK) + + yield client + + resp = await client.post(f'{client.app.router["auth_logout"].url_for()}') + await assert_status(resp, status.HTTP_200_OK) + + +class NewInvitation(NewUser): + def __init__( + self, + client: TestClient, + guest_email: str | None = None, + host: dict | None = None, + trial_days: int | None = None, + extra_credits_in_usd: int | None = None, + ): + assert client.app + super().__init__(user_data=host, app=client.app) + self.client = client + self.tag = f"Created by {guest_email or DEFAULT_FAKER.email()}" + self.confirmation = None + self.trial_days = trial_days + self.extra_credits_in_usd = extra_credits_in_usd + + async def __aenter__(self) -> "NewInvitation": + # creates host user + assert self.client.app + self.user = await _create_user(self.client.app, self.user_data) + + self.confirmation = await create_invitation_token( + self.db, + user_id=self.user["id"], + user_email=self.user["email"], + tag=self.tag, + trial_days=self.trial_days, + extra_credits_in_usd=self.extra_credits_in_usd, + ) + return self + + async def __aexit__(self, *args): + if await self.db.get_confirmation(self.confirmation): + await self.db.delete_confirmation(self.confirmation) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_parametrizations.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_parametrizations.py new file mode 100644 index 00000000000..6422122f4f4 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_parametrizations.py @@ -0,0 +1,122 @@ +from typing import NamedTuple +from unittest import mock + +from servicelib.aiohttp import status +from servicelib.status_codes_utils import get_code_display_name +from simcore_postgres_database.models.users import UserRole + + +class ExpectedResponse(NamedTuple): + """ + Stores respons status to an API request in function of the user + + e.g. for a request that normally returns OK, a non-authorized user + will have no access, therefore ExpectedResponse.ok = HTTPUnauthorized + """ + + ok: int + created: int + no_content: int + not_found: int + forbidden: int + locked: int + accepted: int + unprocessable: int + not_acceptable: int + conflict: int + + def __str__(self) -> str: + # pylint: disable=no-member + items = ", ".join( + f"{k}={get_code_display_name(c)}" for k, c in self._asdict().items() + ) + return f"{self.__class__.__name__}({items})" + + +def standard_role_response() -> tuple[str, list[tuple[UserRole, ExpectedResponse]]]: + return ( + "user_role,expected", + [ + ( + UserRole.ANONYMOUS, + ExpectedResponse( + ok=status.HTTP_401_UNAUTHORIZED, + created=status.HTTP_401_UNAUTHORIZED, + no_content=status.HTTP_401_UNAUTHORIZED, + not_found=status.HTTP_401_UNAUTHORIZED, + forbidden=status.HTTP_401_UNAUTHORIZED, + locked=status.HTTP_401_UNAUTHORIZED, + accepted=status.HTTP_401_UNAUTHORIZED, + unprocessable=status.HTTP_401_UNAUTHORIZED, + not_acceptable=status.HTTP_401_UNAUTHORIZED, + conflict=status.HTTP_401_UNAUTHORIZED, + ), + ), + ( + UserRole.GUEST, + ExpectedResponse( + ok=status.HTTP_403_FORBIDDEN, + created=status.HTTP_403_FORBIDDEN, + no_content=status.HTTP_403_FORBIDDEN, + not_found=status.HTTP_403_FORBIDDEN, + forbidden=status.HTTP_403_FORBIDDEN, + locked=status.HTTP_403_FORBIDDEN, + accepted=status.HTTP_403_FORBIDDEN, + unprocessable=status.HTTP_403_FORBIDDEN, + not_acceptable=status.HTTP_403_FORBIDDEN, + conflict=status.HTTP_403_FORBIDDEN, + ), + ), + ( + UserRole.USER, + ExpectedResponse( + ok=status.HTTP_200_OK, + created=status.HTTP_201_CREATED, + no_content=status.HTTP_204_NO_CONTENT, + not_found=status.HTTP_404_NOT_FOUND, + forbidden=status.HTTP_403_FORBIDDEN, + locked=status.HTTP_423_LOCKED, + accepted=status.HTTP_202_ACCEPTED, + unprocessable=status.HTTP_422_UNPROCESSABLE_ENTITY, + not_acceptable=status.HTTP_406_NOT_ACCEPTABLE, + conflict=status.HTTP_409_CONFLICT, + ), + ), + ( + UserRole.TESTER, + ExpectedResponse( + ok=status.HTTP_200_OK, + created=status.HTTP_201_CREATED, + no_content=status.HTTP_204_NO_CONTENT, + not_found=status.HTTP_404_NOT_FOUND, + forbidden=status.HTTP_403_FORBIDDEN, + locked=status.HTTP_423_LOCKED, + accepted=status.HTTP_202_ACCEPTED, + unprocessable=status.HTTP_422_UNPROCESSABLE_ENTITY, + not_acceptable=status.HTTP_406_NOT_ACCEPTABLE, + conflict=status.HTTP_409_CONFLICT, + ), + ), + ], + ) + + +def standard_user_role_response() -> ( + tuple[str, list[tuple[UserRole, ExpectedResponse]]] +): + all_roles = standard_role_response() + return ( + all_roles[0], + [ + (user_role, response) + for user_role, response in all_roles[1] + if user_role in [UserRole.USER] + ], + ) + + +class MockedStorageSubsystem(NamedTuple): + copy_data_folders_from_project: mock.MagicMock + delete_project: mock.MagicMock + delete_node: mock.MagicMock + get_project_total_size_simcore_s3: mock.MagicMock diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py new file mode 100644 index 00000000000..99ee393f394 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_projects.py @@ -0,0 +1,204 @@ +"""helpers to manage the projects's database and produce fixtures/mockup data for testing""" + +# pylint: disable=no-value-for-parameter + +import json +import uuid as uuidlib +from http import HTTPStatus +from pathlib import Path +from typing import Any + +from aiohttp import web +from aiohttp.test_utils import TestClient +from common_library.dict_tools import remap_keys +from models_library.projects_nodes_io import NodeID +from models_library.services_resources import ServiceResourcesDictHelpers +from simcore_postgres_database.utils_projects_nodes import ProjectNodeCreate +from simcore_service_webserver.projects._groups_repository import ( + update_or_insert_project_group, +) +from simcore_service_webserver.projects._projects_repository_legacy import ( + APP_PROJECT_DBAPI, + ProjectDBAPI, +) +from simcore_service_webserver.projects._projects_repository_legacy_utils import ( + DB_EXCLUSIVE_COLUMNS, +) +from simcore_service_webserver.projects.models import ProjectDict +from simcore_service_webserver.utils import now_str + +from .assert_checks import assert_status + + +def empty_project_data(): + return { + "uuid": f"project-{uuidlib.uuid4()}", + "name": "Empty name", + "description": "some description of an empty project", + "prjOwner": "I'm the empty project owner, hi!", + "creationDate": now_str(), + "lastChangeDate": now_str(), + "thumbnail": "", + "workbench": {}, + } + + +async def create_project( + app: web.Application, + params_override: dict[str, Any] | None = None, + user_id: int | None = None, + *, + product_name: str, + default_project_json: Path | None = None, + force_uuid: bool = False, + as_template: bool = False, +) -> ProjectDict: + """Injects new project in database for user or as template + + :param params_override: predefined project properties (except for non-writeable e.g. uuid), defaults to None + :type params_override: Dict, optional + :param user_id: assigns this project to user or template project if None, defaults to None + :type user_id: int, optional + :return: schema-compliant project + :rtype: Dict + """ + params_override = params_override or {} + + project_data = {} + if default_project_json is not None: + # uses default_project_json as base + assert default_project_json.exists(), f"{default_project_json}" + project_data = json.loads(default_project_json.read_text()) + + project_data.update(params_override) + + db: ProjectDBAPI = app[APP_PROJECT_DBAPI] + + new_project = await db.insert_project( + project_data, + user_id, + product_name=product_name, + force_project_uuid=force_uuid, + force_as_template=as_template, + # NOTE: fake initial resources until more is needed + project_nodes={ + NodeID(node_id): ProjectNodeCreate( + node_id=NodeID(node_id), + required_resources=ServiceResourcesDictHelpers.model_config[ + "json_schema_extra" + ]["examples"][0], + key=node_info.get("key"), + version=node_info.get("version"), + label=node_info.get("label"), + ) + for node_id, node_info in project_data.get("workbench", {}).items() + }, + ) + + if params_override and ( + params_override.get("access_rights") or params_override.get("accessRights") + ): + _access_rights = params_override.get("access_rights", {}) | params_override.get( + "accessRights", {} + ) + for group_id, permissions in _access_rights.items(): + await update_or_insert_project_group( + app, + project_id=new_project["uuid"], + group_id=int(group_id), + read=permissions["read"], + write=permissions["write"], + delete=permissions["delete"], + ) + + try: + uuidlib.UUID(str(project_data["uuid"])) + assert new_project["uuid"] == project_data["uuid"] + except (ValueError, AssertionError): + # in that case the uuid gets replaced + assert new_project["uuid"] != project_data["uuid"] + project_data["uuid"] = new_project["uuid"] + + for key in DB_EXCLUSIVE_COLUMNS: + project_data.pop(key, None) + + new_project: ProjectDict = remap_keys( + new_project, + rename={"trashed": "trashedAt"}, + ) + return new_project + + +async def delete_all_projects(app: web.Application): + from simcore_postgres_database.webserver_models import projects + + db = app[APP_PROJECT_DBAPI] + async with db.engine.acquire() as conn: + query = projects.delete() + await conn.execute(query) + + +class NewProject: + def __init__( + self, + params_override: dict | None = None, + app: web.Application | None = None, + *, + user_id: int, + product_name: str, + tests_data_dir: Path, + force_uuid: bool = False, + as_template: bool = False, + ): + assert app # nosec + + self.params_override = params_override + self.user_id = user_id + self.product_name = product_name + self.app = app + self.prj = {} + self.force_uuid = force_uuid + self.tests_data_dir = tests_data_dir + self.as_template = as_template + + assert tests_data_dir.exists() + assert tests_data_dir.is_dir() + + async def __aenter__(self) -> ProjectDict: + assert self.app # nosec + + self.prj = await create_project( + self.app, + self.params_override, + self.user_id, + product_name=self.product_name, + force_uuid=self.force_uuid, + default_project_json=self.tests_data_dir / "fake-project.json", + as_template=self.as_template, + ) + + return self.prj + + async def __aexit__(self, *args): + assert self.app # nosec + await delete_all_projects(self.app) + + +async def assert_get_same_project( + client: TestClient, + project: ProjectDict, + expected: HTTPStatus, + api_vtag="/v0", +) -> dict: + # GET /v0/projects/{project_id} + + # with a project owned by user + assert client.app + url = client.app.router["get_project"].url_for(project_id=project["uuid"]) + assert str(url) == f"{api_vtag}/projects/{project['uuid']}" + resp = await client.get(f"{url}") + data, error = await assert_status(resp, expected) + + if not error: + assert data == {k: project[k] for k in data} + return data diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py new file mode 100644 index 00000000000..17d8051d096 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_rpc_server.py @@ -0,0 +1,86 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rest_pagination import PageOffsetInt +from models_library.rpc.webserver.projects import ( + ListProjectsMarkedAsJobRpcFilters, + PageRpcProjectJobRpcGet, + ProjectJobRpcGet, +) +from models_library.rpc_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + PageLimitInt, +) +from models_library.users import UserID +from pydantic import TypeAdapter, validate_call +from pytest_mock import MockType +from servicelib.rabbitmq import RabbitMQRPCClient + + +class WebserverRpcSideEffects: + # pylint: disable=no-self-use + + @validate_call(config={"arbitrary_types_allowed": True}) + async def mark_project_as_job( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + project_uuid: ProjectID, + job_parent_resource_name: str, + ) -> None: + assert rpc_client + + assert not job_parent_resource_name.startswith("/") # nosec + assert "/" in job_parent_resource_name # nosec + assert not job_parent_resource_name.endswith("/") # nosec + + assert product_name + assert user_id + + TypeAdapter(ProjectID).validate_python(project_uuid) + + @validate_call(config={"arbitrary_types_allowed": True}) + async def list_projects_marked_as_jobs( + self, + rpc_client: RabbitMQRPCClient | MockType, + *, + product_name: ProductName, + user_id: UserID, + # pagination + offset: PageOffsetInt = 0, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + filters: ListProjectsMarkedAsJobRpcFilters | None = None, + ) -> PageRpcProjectJobRpcGet: + assert rpc_client + assert product_name + assert user_id + + if filters and filters.job_parent_resource_name_prefix: + assert not filters.job_parent_resource_name_prefix.startswith("/") + assert not filters.job_parent_resource_name_prefix.endswith("%") + assert not filters.job_parent_resource_name_prefix.startswith("%") + + items = [ + item + for item in ProjectJobRpcGet.model_json_schema()["examples"] + if filters is None + or filters.job_parent_resource_name_prefix is None + or item.get("job_parent_resource_name").startswith( + filters.job_parent_resource_name_prefix + ) + ] + + return PageRpcProjectJobRpcGet.create( + items[offset : offset + limit], + total=len(items), + limit=limit, + offset=offset, + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_tokens.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_tokens.py new file mode 100644 index 00000000000..cce4b12710e --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_tokens.py @@ -0,0 +1,80 @@ +# pylint: disable=E1120 + +import json +import random +from functools import reduce + +import sqlalchemy as sa +from servicelib.common_aiopg_utils import DSN +from servicelib.utils_secrets import generate_password +from simcore_service_webserver.db.models import metadata, tokens, users +from sqlalchemy import JSON, String, cast +from sqlalchemy.sql import and_ # , or_, not_ + + +def create_db_tables(**kargs): + url = DSN.format(**kargs) + engine = sa.create_engine(url, isolation_level="AUTOCOMMIT") + metadata.create_all(bind=engine, tables=[users, tokens], checkfirst=True) + engine.dispose() + return url + + +async def create_token_in_db(engine, **data): + # TODO change by faker? + params = { + "user_id": random.randint(0, 3), + "token_service": generate_password(5), + "token_data": { + "token_secret": generate_password(3), + "token_key": generate_password(4), + }, + } + params.update(data) + + async with engine.acquire() as conn: + stmt = tokens.insert().values(**params) + result = await conn.execute(stmt) + row = await result.first() + return dict(row) + + +async def get_token_from_db( + engine, *, token_id=None, user_id=None, token_service=None, token_data=None +): + async with engine.acquire() as conn: + expr = to_expression( + token_id=token_id, + user_id=user_id, + token_service=token_service, + token_data=token_data, + ) + stmt = sa.select(tokens).where(expr) + result = await conn.execute(stmt) + row = await result.first() + return dict(row) if row else None + + +async def delete_token_from_db(engine, *, token_id): + expr = tokens.c.token_id == token_id + async with engine.acquire() as conn: + stmt = tokens.delete().where(expr) + await conn.execute(stmt) + + +async def delete_all_tokens_from_db(engine): + async with engine.acquire() as conn: + await conn.execute(tokens.delete()) + + +def to_expression(**params): + expressions = [] + for key, value in params.items(): + if value is not None: + statement = ( + (cast(getattr(tokens.c, key), String) == json.dumps(value)) + if isinstance(getattr(tokens.c, key).type, JSON) + else (getattr(tokens.c, key) == value) + ) + expressions.append(statement) + return reduce(and_, expressions) diff --git a/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py new file mode 100644 index 00000000000..1dbe5ebeb42 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/helpers/webserver_workspaces.py @@ -0,0 +1,43 @@ +import sqlalchemy as sa +from aiohttp import web +from models_library.groups import GroupID +from models_library.workspaces import WorkspaceID +from simcore_postgres_database.models.workspaces_access_rights import ( + workspaces_access_rights, +) +from simcore_service_webserver.db.plugin import get_database_engine +from sqlalchemy.dialects.postgresql import insert as pg_insert + + +async def update_or_insert_workspace_group( + app: web.Application, + workspace_id: WorkspaceID, + group_id: GroupID, + *, + read: bool, + write: bool, + delete: bool, +) -> None: + async with get_database_engine(app).acquire() as conn: + insert_stmt = pg_insert(workspaces_access_rights).values( + workspace_id=workspace_id, + gid=group_id, + read=read, + write=write, + delete=delete, + created=sa.func.now(), + modified=sa.func.now(), + ) + on_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[ + workspaces_access_rights.c.workspace_id, + workspaces_access_rights.c.gid, + ], + set_={ + "read": insert_stmt.excluded.read, + "write": insert_stmt.excluded.write, + "delete": insert_stmt.excluded.delete, + "modified": sa.func.now(), + }, + ) + await conn.execute(on_update_stmt) diff --git a/packages/pytest-simcore/src/pytest_simcore/httpbin_service.py b/packages/pytest-simcore/src/pytest_simcore/httpbin_service.py new file mode 100644 index 00000000000..6bc71929eb3 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/httpbin_service.py @@ -0,0 +1,64 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import logging +from contextlib import suppress +from typing import Iterable + +import aiohttp.test_utils +import docker +import pytest +import requests +import requests.exceptions +from docker.errors import APIError +from pydantic import HttpUrl, TypeAdapter +from tenacity import retry +from tenacity.after import after_log +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from .helpers.host import get_localhost_ip + + +@pytest.fixture(scope="session") +def httpbin_base_url() -> Iterable[HttpUrl]: + """ + Implemented as a fixture since it cannot rely on full availability of https://httpbin.org/ during testing + """ + ip_address = get_localhost_ip() + port = aiohttp.test_utils.unused_port() + base_url = f"http://{ip_address}:{port}" + + client = docker.from_env() + container_name = "httpbin-fixture" + try: + container = client.containers.run( + image="kennethreitz/httpbin", + ports={80: port}, + name=container_name, + detach=True, + ) + print(container) + + @retry( + wait=wait_fixed(1), + retry=retry_if_exception_type(requests.exceptions.RequestException), + stop=stop_after_delay(15), + after=after_log(logging.getLogger(), logging.DEBUG), + ) + def _wait_until_httpbin_is_responsive(): + r = requests.get(f"{base_url}/get", timeout=2) + r.raise_for_status() + + _wait_until_httpbin_is_responsive() + + yield TypeAdapter(HttpUrl).validate_python(base_url) + + finally: + with suppress(APIError): + container = client.containers.get(container_name) + container.remove(force=True) diff --git a/packages/pytest-simcore/src/pytest_simcore/httpx_calls_capture.py b/packages/pytest-simcore/src/pytest_simcore/httpx_calls_capture.py new file mode 100644 index 00000000000..d8cd056c115 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/httpx_calls_capture.py @@ -0,0 +1,280 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +""" +The pytest_simcore.httpx_calls_capture module provides fixtures to capture the calls made by instances of httpx.AsyncClient +when interacting with a real backend. These captures can then be used to create a respx.MockRouter, which emulates the backend while running +your tests. + +This module ensures a reliable reproduction and maintenance of mock responses that reflect the real backend environments used for testing. + +## Setting Up the Module and Spy in Your Test Suite (once) +- Include 'pytest_simcore.httpx_calls_capture' in your `pytest_plugins`. +- Implement `create_httpx_async_client_spy_if_enabled("module.name.httpx.AsyncClient")` within your codebase. + +## Creating Mock Captures (every time you want to create/update the mock) +- Initialize the real backend. +- Execute tests using the command: `pytest --spy-httpx-calls-enabled=true --spy-httpx-calls-capture-path="my-captures.json"`. +- Terminate the real backend once testing is complete. + +## Configuring Tests with Mock Captures (once) +- Transfer `my-captures.json` to the `tests/mocks` directory. +- Utilize `create_respx_mock_from_capture(..., capture_path=".../my-captures.json", ...)` to automatically generate a mock for your tests. + +## Utilizing Mocks (normal test runs) +- Conduct your tests without enabling the spy, i.e., do not use the `--spy-httpx-calls-enabled` flag. + + +""" + +import json +from collections.abc import Callable +from pathlib import Path +from typing import Any, get_args + +import httpx +import pytest +import respx +import yaml +from pydantic import TypeAdapter +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.docker import get_service_published_port +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.httpx_client_base_dev import AsyncClientCaptureWrapper +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict + +from .helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, + PathDescription, + SideEffectCallback, +) +from .helpers.httpx_calls_capture_openapi import ServiceHostNames + +_DEFAULT_CAPTURE_PATHNAME = "spy-httpx-calls-capture-path.json" + + +def pytest_addoption(parser: pytest.Parser): + simcore_group = parser.getgroup("simcore") + simcore_group.addoption( + "--spy-httpx-calls-enabled", + action="store", + type=bool, + default=False, + help="If set, it activates a capture mechanism while the tests is running that can be used to generate mock data in respx", + ) + simcore_group.addoption( + "--spy-httpx-calls-capture-path", + action="store", + type=Path, + default=None, + help=f"Path to json file to store capture calls from httpx clients during the tests. Otherwise using a temporary path named {_DEFAULT_CAPTURE_PATHNAME}", + ) + + +@pytest.fixture(scope="session") +def spy_httpx_calls_enabled(request: pytest.FixtureRequest) -> bool: + return bool(request.config.getoption("--spy-httpx-calls-enabled")) + + +@pytest.fixture(scope="session") +def spy_httpx_calls_capture_path( + request: pytest.FixtureRequest, tmp_path_factory: pytest.TempPathFactory +) -> Path: + if capture_path := request.config.getoption("--spy-httpx-calls-capture-path"): + assert isinstance(capture_path, Path) + else: + capture_path = ( + tmp_path_factory.mktemp("session_fixture_spy_httpx_calls_capture_path") + / _DEFAULT_CAPTURE_PATHNAME + ) + assert capture_path.suffix == ".json" + capture_path.touch() + return capture_path.resolve() + + +@pytest.fixture(scope="session") +def services_mocks_enabled(spy_httpx_calls_enabled: bool) -> bool: + """Flag to indicate when to enable/disable service mocks when httpx calls are disabled/enabled""" + return not spy_httpx_calls_enabled + + +@pytest.fixture +def create_httpx_async_client_spy_if_enabled( + mocker: MockerFixture, + spy_httpx_calls_enabled: bool, + spy_httpx_calls_capture_path: Path, +) -> Callable[[str], MockType | None]: + + assert spy_httpx_calls_capture_path + + def _(spy_target: str) -> MockType | None: + + assert spy_target + assert isinstance(spy_target, str) + assert spy_target.endswith( + "AsyncClient" + ), "Expects AsyncClient instance as spy target" + + if spy_httpx_calls_enabled: + print( + f"🚨 Spying httpx calls of '{spy_target}'.", + f"Saving captures dumped at '{spy_httpx_calls_capture_path}'", + " ...", + ) + + def _wrapper(*args, **kwargs): + assert not args, "AsyncClient should be called only with key-arguments" + return AsyncClientCaptureWrapper( + capture_file=spy_httpx_calls_capture_path, **kwargs + ) + + spy: MockType = mocker.patch(spy_target, side_effect=_wrapper) + spy.httpx_calls_capture_path = spy_httpx_calls_capture_path + + # TODO: respx.api.stop(clear=False, reset=False) + + return spy + return None + + return _ + + +@pytest.fixture +def backend_env_vars_overrides( + services_mocks_enabled: bool, + osparc_simcore_root_dir: Path, +) -> EnvVarsDict: + """If --spy_httpx_calls_enabled=true, then it returns the env vars (i.e. host and port) pointing to the **REAL** back-end services + , otherwise it returns an empty dict + """ + overrides = {} + if not services_mocks_enabled: + try: + content = yaml.safe_load( + (osparc_simcore_root_dir / ".stack-simcore-production.yml").read_text() + ) + except FileNotFoundError as err: + pytest.fail( + f"Cannot run --spy_httpx_calls_enabled=true without deploying osparc-simcore locally\n. TIP: run `make prod-up`\n{err}" + ) + + for name in get_args(ServiceHostNames): + prefix = name.replace("-", "_").upper() + for ports in content["services"][name]["ports"]: + target = ports["target"] + if target in (8000, 8080): + published = get_service_published_port(f"simcore_{name}", target) + overrides[f"{prefix}_HOST"] = get_localhost_ip() + overrides[f"{prefix}_PORT"] = str(published) + return overrides + + +class _CaptureSideEffect: + def __init__( + self, + capture: HttpApiCallCaptureModel, + side_effect: SideEffectCallback | None, + ): + self._capture = capture + self._side_effect_callback = side_effect + + def __call__(self, request: httpx.Request, **kwargs) -> httpx.Response: + capture = self._capture + assert isinstance(capture.path, PathDescription) + status_code: int = capture.status_code + response_body: dict[str, Any] | list | None = capture.response_body + assert {param.name for param in capture.path.path_parameters} == set( + kwargs.keys() + ) + if self._side_effect_callback: + response_body = self._side_effect_callback(request, kwargs, capture) + return httpx.Response(status_code=status_code, json=response_body) + + +@pytest.fixture +def create_respx_mock_from_capture( + services_mocks_enabled: bool, +) -> CreateRespxMockCallback: + """Creates a respx.MockRouter from httpx calls captures in capture_path **ONLY** + if spy_httpx_calls_enabled=False otherwise it skips this fixture + """ + + # NOTE: multiple improvements on this function planed in https://github.com/ITISFoundation/osparc-simcore/issues/5705 + def _( + respx_mocks: list[respx.MockRouter], + capture_path: Path, + side_effects_callbacks: list[SideEffectCallback], + ) -> list[respx.MockRouter]: + + assert capture_path.is_file() + assert capture_path.suffix == ".json" + + if services_mocks_enabled: + captures: list[HttpApiCallCaptureModel] = TypeAdapter( + list[HttpApiCallCaptureModel] + ).validate_python(json.loads(capture_path.read_text())) + + if len(side_effects_callbacks) > 0: + assert len(side_effects_callbacks) == len(captures) + + assert isinstance(respx_mocks, list) + for respx_router_mock in respx_mocks: + assert ( + respx_router_mock._bases + ), "the base_url must be set before the fixture is extended" + respx_router_mock._assert_all_mocked = services_mocks_enabled + + def _get_correct_mock_router_for_capture( + respx_mock: list[respx.MockRouter], capture: HttpApiCallCaptureModel + ) -> respx.MockRouter: + for router in respx_mock: + if capture.host == router._bases["host"].value: + return router + msg = f"Missing respx.MockRouter for capture with {capture.host}" + raise RuntimeError(msg) + + side_effects: list[_CaptureSideEffect] = [] + for ii, capture in enumerate(captures): + url_path: PathDescription | str = capture.path + assert isinstance(url_path, PathDescription) + + # path + path_regex = url_path.to_path_regex() + + # response + side_effect = _CaptureSideEffect( + capture=capture, + side_effect=( + side_effects_callbacks[ii] + if len(side_effects_callbacks) + else None + ), + ) + + respx_router_mock = _get_correct_mock_router_for_capture( + respx_mocks, capture + ) + r = respx_router_mock.request( + capture.method.upper(), + url=None, + path__regex=f"^{path_regex}$", + ).mock(side_effect=side_effect) + + assert r.side_effect == side_effect + side_effects.append(side_effect) + else: + # Disabling mocks since it will use real API + for respx_router_mock in respx_mocks: + # SEE https://github.com/pcrespov/sandbox-python/blob/f650aad57aced304aac9d0ad56c00723d2274ad0/respx-lib/test_disable_mock.py + respx_router_mock.stop() + + print( + f"πŸ”Š Disabling mocks respx.MockRouter from {capture_path.name} since --spy-httpx-calls-enabled=true" + ) + + return respx_mocks + + return _ diff --git a/packages/pytest-simcore/src/pytest_simcore/hypothesis_type_strategies.py b/packages/pytest-simcore/src/pytest_simcore/hypothesis_type_strategies.py index ad80ab57774..788a0d36dab 100644 --- a/packages/pytest-simcore/src/pytest_simcore/hypothesis_type_strategies.py +++ b/packages/pytest-simcore/src/pytest_simcore/hypothesis_type_strategies.py @@ -1,9 +1,13 @@ from hypothesis import provisional from hypothesis import strategies as st -from pydantic import AnyHttpUrl, AnyUrl, HttpUrl +from hypothesis.strategies import composite +from pydantic import TypeAdapter +from pydantic_core import Url -# FIXME: For now it seems the pydantic hypothesis plugin does not provide strategies for these types. -# therefore we currently provide it -st.register_type_strategy(AnyUrl, provisional.urls()) -st.register_type_strategy(HttpUrl, provisional.urls()) -st.register_type_strategy(AnyHttpUrl, provisional.urls()) + +@composite +def url_strategy(draw): + return TypeAdapter(Url).validate_python(draw(provisional.urls())) + + +st.register_type_strategy(Url, url_strategy()) diff --git a/packages/pytest-simcore/src/pytest_simcore/minio_service.py b/packages/pytest-simcore/src/pytest_simcore/minio_service.py index c24c3d639dc..d91183d3a28 100644 --- a/packages/pytest-simcore/src/pytest_simcore/minio_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/minio_service.py @@ -1,110 +1,40 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import logging -from typing import Any, Iterator - -import pytest -from minio import Minio -from minio.datatypes import Object -from minio.deleteobjects import DeleteError, DeleteObject -from pydantic import parse_obj_as -from pytest import MonkeyPatch -from tenacity import Retrying -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed - -from .helpers.utils_docker import get_localhost_ip, get_service_published_port - -log = logging.getLogger(__name__) - - -def _ensure_remove_bucket(client: Minio, bucket_name: str): - if client.bucket_exists(bucket_name): - # remove content - objs: Iterator[Object] = client.list_objects( - bucket_name, prefix=None, recursive=True - ) - - # FIXME: minio 7.1.0 does NOT remove all objects!? Added in requirements/constraints.txt - to_delete = [DeleteObject(o.object_name) for o in objs] - errors: Iterator[DeleteError] = client.remove_objects(bucket_name, to_delete) - - list_of_errors = list(errors) - assert not any(list_of_errors), list(list_of_errors) - - # remove bucket - client.remove_bucket(bucket_name) - - assert not client.bucket_exists(bucket_name) - - -@pytest.fixture(scope="module") -def minio_config( - docker_stack: dict, testing_environ_vars: dict, monkeypatch_module: MonkeyPatch -) -> dict[str, Any]: - assert "pytest-ops_minio" in docker_stack["services"] - - config = { - "client": { - "endpoint": f"{get_localhost_ip()}:{get_service_published_port('minio')}", - "access_key": testing_environ_vars["S3_ACCESS_KEY"], - "secret_key": testing_environ_vars["S3_SECRET_KEY"], - "secure": parse_obj_as(bool, testing_environ_vars["S3_SECURE"]), - }, - "bucket_name": testing_environ_vars["S3_BUCKET_NAME"], - } - - # nodeports takes its configuration from env variables - for key, value in config["client"].items(): - monkeypatch_module.setenv(f"S3_{key.upper()}", str(value)) - - monkeypatch_module.setenv("S3_SECURE", testing_environ_vars["S3_SECURE"]) - monkeypatch_module.setenv("S3_BUCKET_NAME", config["bucket_name"]) - - return config - - -@pytest.fixture(scope="module") -def minio_service(minio_config: dict[str, str]) -> Iterator[Minio]: - - client = Minio(**minio_config["client"]) - - for attempt in Retrying( - wait=wait_fixed(5), - stop=stop_after_attempt(60), - before_sleep=before_sleep_log(log, logging.WARNING), - reraise=True, - ): - with attempt: - # TODO: improve as https://docs.min.io/docs/minio-monitoring-guide.html - if not client.bucket_exists("pytest"): - client.make_bucket("pytest") - client.remove_bucket("pytest") - - bucket_name = minio_config["bucket_name"] - - # cleans up in case a failing tests left this bucket - _ensure_remove_bucket(client, bucket_name) - - client.make_bucket(bucket_name) - assert client.bucket_exists(bucket_name) - - yield client - - # cleanup upon tear-down - _ensure_remove_bucket(client, bucket_name) - - -@pytest.fixture -def bucket(minio_config: dict[str, str], minio_service: Minio) -> Iterator[str]: - bucket_name = minio_config["bucket_name"] - - _ensure_remove_bucket(minio_service, bucket_name) - minio_service.make_bucket(bucket_name) - - yield bucket_name - - _ensure_remove_bucket(minio_service, bucket_name) +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + +import pytest +from faker import Faker +from pydantic import AnyHttpUrl, TypeAdapter +from pytest_simcore.helpers.docker import get_service_published_port +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.s3 import S3Settings + + +@pytest.fixture +def minio_s3_settings( + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict, faker: Faker +) -> S3Settings: + assert "pytest-ops_minio" in docker_stack["services"] + + return S3Settings( + S3_ACCESS_KEY=env_vars_for_docker_compose["S3_ACCESS_KEY"], + S3_SECRET_KEY=env_vars_for_docker_compose["S3_SECRET_KEY"], + S3_ENDPOINT=TypeAdapter(AnyHttpUrl).validate_python( + f"http://{get_localhost_ip()}:{get_service_published_port('minio')}" + ), + S3_BUCKET_NAME=env_vars_for_docker_compose["S3_BUCKET_NAME"], + S3_REGION="us-east-1", + ) + + +@pytest.fixture +def minio_s3_settings_envs( + minio_s3_settings: S3Settings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + changed_envs: EnvVarsDict = minio_s3_settings.model_dump( + mode="json", exclude_unset=True + ) + return setenvs_from_dict(monkeypatch, changed_envs) diff --git a/packages/pytest-simcore/src/pytest_simcore/monkeypatch_extra.py b/packages/pytest-simcore/src/pytest_simcore/monkeypatch_extra.py deleted file mode 100644 index 23cdc565642..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/monkeypatch_extra.py +++ /dev/null @@ -1,35 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import warnings -from typing import Iterator - -import pytest -from pytest import FixtureRequest, MonkeyPatch - -warnings.warn( - f"{__name__} is deprecated, we highly recommend to use pytest.monkeypatch at function-scope level." - "Large scopes lead to complex problems during tests", - DeprecationWarning, -) -# Some extras to overcome https://github.com/pytest-dev/pytest/issues/363 -# SEE https://github.com/pytest-dev/pytest/issues/363#issuecomment-289830794 - - -@pytest.fixture(scope="session") -def monkeypatch_session(request: FixtureRequest) -> Iterator[MonkeyPatch]: - assert request.scope == "session" - - mpatch_session = MonkeyPatch() - yield mpatch_session - mpatch_session.undo() - - -@pytest.fixture(scope="module") -def monkeypatch_module(request: FixtureRequest) -> Iterator[MonkeyPatch]: - assert request.scope == "module" - - mpatch_module = MonkeyPatch() - yield mpatch_module - mpatch_module.undo() diff --git a/packages/pytest-simcore/src/pytest_simcore/openapi_specs.py b/packages/pytest-simcore/src/pytest_simcore/openapi_specs.py new file mode 100644 index 00000000000..fd9e6b878d6 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/openapi_specs.py @@ -0,0 +1,113 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import json +from collections.abc import Callable +from copy import deepcopy +from pathlib import Path +from typing import Any, NamedTuple + +import jsonref +import pytest +import yaml + +try: + from aiohttp import web + + has_aiohttp = True +except ImportError: + has_aiohttp = False + + +class Entrypoint(NamedTuple): + name: str + method: str + path: str + + +@pytest.fixture(scope="session") +def openapi_specs_path() -> Path: + # NOTE: cannot be defined as a session scope because it is designed to be overriden + pytest.fail(reason="Must be overriden in caller test suite") + + +def _load(file: Path, base_uri: str = "") -> dict: + match file.suffix: + case ".yaml" | ".yml": + loaded = yaml.safe_load(file.read_text()) + case "json": + loaded = json.loads(file.read_text()) + case _: + msg = f"Expect yaml or json, got {file.suffix}" + raise ValueError(msg) + + # SEE https://jsonref.readthedocs.io/en/latest/#lazy-load-and-load-on-repr + data: dict = jsonref.replace_refs( # type: ignore + loaded, + base_uri=base_uri, + lazy_load=True, # this data will be iterated + merge_props=False, + ) + return data + + +@pytest.fixture(scope="session") +def openapi_specs(openapi_specs_path: Path) -> dict[str, Any]: + assert openapi_specs_path.is_file() + openapi: dict[str, Any] = _load( + openapi_specs_path, base_uri=openapi_specs_path.as_uri() + ) + return deepcopy(openapi) + + +@pytest.fixture(scope="session") +def openapi_specs_entrypoints( + openapi_specs: dict, +) -> set[Entrypoint]: + entrypoints: set[Entrypoint] = set() + + # openapi-specifications, i.e. "contract" + for path, path_obj in openapi_specs["paths"].items(): + for operation, operation_obj in path_obj.items(): + entrypoints.add( + Entrypoint( + method=operation.upper(), + path=path, + name=operation_obj["operationId"], + ) + ) + return entrypoints + + +if has_aiohttp: + + @pytest.fixture + def create_aiohttp_app_rest_entrypoints() -> Callable[ + [web.Application], set[Entrypoint] + ]: + def _(app: web.Application): + entrypoints: set[Entrypoint] = set() + + # app routes, i.e. "exposed" + for resource_name, resource in app.router.named_resources().items(): + resource_path = resource.canonical + for route in resource: + assert route.name == resource_name + assert route.resource + assert route.name is not None + + if route.method == "HEAD": + continue + + entrypoints.add( + Entrypoint( + method=route.method, + path=resource_path, + name=route.name, + ) + ) + return entrypoints + + return _ diff --git a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py index 447e4674d0e..19d9247e8ea 100644 --- a/packages/pytest-simcore/src/pytest_simcore/postgres_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/postgres_service.py @@ -2,40 +2,41 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from typing import AsyncIterator, Final, Iterator, TypedDict +import json +import warnings +from collections.abc import AsyncIterator, Iterator +from typing import Final +import docker import pytest import sqlalchemy as sa import tenacity -from servicelib.json_serialization import json_dumps -from sqlalchemy.orm import sessionmaker +from sqlalchemy.ext.asyncio import AsyncEngine from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -from .helpers.utils_docker import get_localhost_ip, get_service_published_port -from .helpers.utils_postgres import migrated_pg_tables_context +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip +from .helpers.monkeypatch_envs import setenvs_from_dict +from .helpers.postgres_tools import PostgresTestConfig, migrated_pg_tables_context +from .helpers.typing_env import EnvVarsDict -TEMPLATE_DB_TO_RESTORE = "template_simcore_db" +_TEMPLATE_DB_TO_RESTORE = "template_simcore_db" -class PostgresTestConfig(TypedDict): - user: str - password: str - database: str - host: str - port: str - - -def execute_queries( +def _execute_queries( postgres_engine: sa.engine.Engine, sql_statements: list[str], + *, ignore_errors: bool = False, ) -> None: """runs the queries in the list in order""" - with postgres_engine.connect() as con: + with postgres_engine.connect() as connection: for statement in sql_statements: try: - con.execution_options(autocommit=True).execute(statement) + with connection.begin(): + connection.execute(statement) + except Exception as e: # pylint: disable=broad-except # when running tests initially the TEMPLATE_DB_TO_RESTORE dose not exist and will cause an error # which can safely be ignored. The debug message is here to catch future errors which and @@ -43,7 +44,7 @@ def execute_queries( print(f"SQL error which can be ignored {e}") -def create_template_db( +def _create_template_db( postgres_dsn: PostgresTestConfig, postgres_engine: sa.engine.Engine ) -> None: # create a template db, the removal is necessary to allow for the usage of --keep-docker-up @@ -52,30 +53,30 @@ def create_template_db( f""" SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '{postgres_dsn["database"]}' AND pid <> pg_backend_pid(); - """, + """, # noqa: S608 # drop template database - f"ALTER DATABASE {TEMPLATE_DB_TO_RESTORE} is_template false;", - f"DROP DATABASE {TEMPLATE_DB_TO_RESTORE};", + f"ALTER DATABASE {_TEMPLATE_DB_TO_RESTORE} is_template false;", + f"DROP DATABASE {_TEMPLATE_DB_TO_RESTORE};", # create template database """ CREATE DATABASE {template_db} WITH TEMPLATE {original_db} OWNER {db_user}; """.format( - template_db=TEMPLATE_DB_TO_RESTORE, + template_db=_TEMPLATE_DB_TO_RESTORE, original_db=postgres_dsn["database"], db_user=postgres_dsn["user"], ), ] - execute_queries(postgres_engine, queries, ignore_errors=True) + _execute_queries(postgres_engine, queries, ignore_errors=True) -def drop_template_db(postgres_engine: sa.engine.Engine) -> None: +def _drop_template_db(postgres_engine: sa.engine.Engine) -> None: # remove the template db queries = [ # drop template database - f"ALTER DATABASE {TEMPLATE_DB_TO_RESTORE} is_template false;", - f"DROP DATABASE {TEMPLATE_DB_TO_RESTORE};", + f"ALTER DATABASE {_TEMPLATE_DB_TO_RESTORE} is_template false;", + f"DROP DATABASE {_TEMPLATE_DB_TO_RESTORE};", ] - execute_queries(postgres_engine, queries) + _execute_queries(postgres_engine, queries) @pytest.fixture(scope="module") @@ -84,9 +85,9 @@ def postgres_with_template_db( postgres_dsn: PostgresTestConfig, postgres_engine: sa.engine.Engine, ) -> Iterator[sa.engine.Engine]: - create_template_db(postgres_dsn, postgres_engine) + _create_template_db(postgres_dsn, postgres_engine) yield postgres_engine - drop_template_db(postgres_engine) + _drop_template_db(postgres_engine) @pytest.fixture @@ -120,27 +121,29 @@ def database_from_template_before_each_function( f""" SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '{postgres_dsn["database"]}'; - """, + """, # noqa: S608 # drop database f"DROP DATABASE {postgres_dsn['database']};", # create from template database - f"CREATE DATABASE {postgres_dsn['database']} TEMPLATE template_simcore_db;", + f"CREATE DATABASE {postgres_dsn['database']} TEMPLATE {_TEMPLATE_DB_TO_RESTORE};", ] - execute_queries(drop_db_engine, queries) + _execute_queries(drop_db_engine, queries) @pytest.fixture(scope="module") -def postgres_dsn(docker_stack: dict, testing_environ_vars: dict) -> PostgresTestConfig: +def postgres_dsn( + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict +) -> PostgresTestConfig: assert "pytest-simcore_postgres" in docker_stack["services"] pg_config: PostgresTestConfig = { - "user": testing_environ_vars["POSTGRES_USER"], - "password": testing_environ_vars["POSTGRES_PASSWORD"], - "database": testing_environ_vars["POSTGRES_DB"], + "user": env_vars_for_docker_compose["POSTGRES_USER"], + "password": env_vars_for_docker_compose["POSTGRES_PASSWORD"], + "database": env_vars_for_docker_compose["POSTGRES_DB"], "host": get_localhost_ip(), "port": get_service_published_port( - "postgres", testing_environ_vars["POSTGRES_PORT"] + "postgres", env_vars_for_docker_compose["POSTGRES_PORT"] ), } @@ -157,6 +160,8 @@ def postgres_engine(postgres_dsn: PostgresTestConfig) -> Iterator[sa.engine.Engi ) engine = sa.create_engine(dsn, isolation_level="AUTOCOMMIT") + assert isinstance(engine, sa.engine.Engine) # nosec + # Attempts until responsive for attempt in tenacity.Retrying( wait=wait_fixed(1), @@ -167,9 +172,9 @@ def postgres_engine(postgres_dsn: PostgresTestConfig) -> Iterator[sa.engine.Engi print( f"--> Connecting to {dsn}, attempt {attempt.retry_state.attempt_number}..." ) - with engine.connect() as conn: + with engine.connect(): print( - f"Connection to {dsn} succeeded [{json_dumps(attempt.retry_state.retry_object.statistics)}]" + f"Connection to {dsn} succeeded [{json.dumps(attempt.retry_state.retry_object.statistics)}]" ) yield engine @@ -188,14 +193,18 @@ def postgres_dsn_url(postgres_dsn: PostgresTestConfig) -> str: def postgres_db( postgres_dsn: PostgresTestConfig, postgres_engine: sa.engine.Engine, + docker_client: docker.DockerClient, ) -> Iterator[sa.engine.Engine]: - """An postgres database init with empty tables and an sqlalchemy engine connected to it""" + """ + A postgres database init with empty tables + and an sqlalchemy engine connected to it + """ with migrated_pg_tables_context(postgres_dsn.copy()): yield postgres_engine -@pytest.fixture(scope="function") +@pytest.fixture async def aiopg_engine( postgres_db: sa.engine.Engine, ) -> AsyncIterator: @@ -204,6 +213,12 @@ async def aiopg_engine( engine = await create_engine(str(postgres_db.url)) + warnings.warn( + "The 'aiopg_engine' fixture is deprecated and will be removed in a future release. " + "Please use 'asyncpg_engine' fixture instead.", + DeprecationWarning, + stacklevel=2, + ) yield engine if engine: @@ -211,10 +226,10 @@ async def aiopg_engine( await engine.wait_closed() -@pytest.fixture(scope="function") +@pytest.fixture async def sqlalchemy_async_engine( postgres_db: sa.engine.Engine, -) -> AsyncIterator: +) -> AsyncIterator[AsyncEngine]: # NOTE: prevent having to import this if latest sqlalchemy not installed from sqlalchemy.ext.asyncio import create_async_engine @@ -227,29 +242,24 @@ async def sqlalchemy_async_engine( await engine.dispose() -@pytest.fixture(scope="function") +@pytest.fixture +def postgres_env_vars_dict(postgres_dsn: PostgresTestConfig) -> EnvVarsDict: + return { + "POSTGRES_USER": postgres_dsn["user"], + "POSTGRES_PASSWORD": postgres_dsn["password"], + "POSTGRES_DB": postgres_dsn["database"], + "POSTGRES_HOST": postgres_dsn["host"], + "POSTGRES_PORT": f"{postgres_dsn['port']}", + "POSTGRES_ENDPOINT": f"{postgres_dsn['host']}:{postgres_dsn['port']}", + } + + +@pytest.fixture def postgres_host_config( - postgres_dsn: PostgresTestConfig, monkeypatch + postgres_dsn: PostgresTestConfig, + postgres_env_vars_dict: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, ) -> PostgresTestConfig: """sets postgres env vars and returns config""" - monkeypatch.setenv("POSTGRES_USER", postgres_dsn["user"]) - monkeypatch.setenv("POSTGRES_PASSWORD", postgres_dsn["password"]) - monkeypatch.setenv("POSTGRES_DB", postgres_dsn["database"]) - monkeypatch.setenv("POSTGRES_HOST", postgres_dsn["host"]) - monkeypatch.setenv("POSTGRES_PORT", str(postgres_dsn["port"])) - monkeypatch.setenv( - "POSTGRES_ENDPOINT", f"{postgres_dsn['host']}:{postgres_dsn['port']}" - ) + setenvs_from_dict(monkeypatch, postgres_env_vars_dict) return postgres_dsn - - -@pytest.fixture(scope="module") -def postgres_session(postgres_db: sa.engine.Engine) -> Iterator[sa.orm.session.Session]: - from sqlalchemy.orm.session import Session - - Session_cls = sessionmaker(postgres_db) - session: Session = Session_cls() - - yield session - - session.close() # pylint: disable=no-member diff --git a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py index 2270b5be438..e8691a10724 100644 --- a/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py +++ b/packages/pytest-simcore/src/pytest_simcore/pydantic_models.py @@ -1,9 +1,17 @@ import copy +import importlib import inspect -from typing import Any, Iterator, NamedTuple +import itertools +import pkgutil +import warnings +from collections.abc import Iterator +from contextlib import suppress +from types import ModuleType +from typing import Any, NamedTuple, TypeVar import pytest -from pydantic import BaseModel +from common_library.json_serialization import json_dumps +from pydantic import BaseModel, ValidationError def is_strict_inner(outer_cls: type, inner_cls: type) -> bool: @@ -29,48 +37,129 @@ class ModelExample(NamedTuple): example_data: Any -def iter_model_examples_in_module(module: object) -> Iterator[ModelExample]: - """Iterates on all examples defined as BaseModelClass.Config.schema_extra["example"] +def iter_examples( + *, model_cls: type[BaseModel], examples: list[Any] +) -> Iterator[ModelExample]: + for k, data in enumerate(examples): + yield ModelExample( + model_cls=model_cls, example_name=f"example_{k}", example_data=data + ) + + +def walk_model_examples_in_package(package: ModuleType) -> Iterator[ModelExample]: + """Walks recursively all sub-modules and collects BaseModel.Config examples""" + assert inspect.ismodule(package) + + yield from itertools.chain( + *( + iter_model_examples_in_module(importlib.import_module(submodule.name)) + for submodule in pkgutil.walk_packages( + package.__path__, + package.__name__ + ".", + ) + ) + ) +def iter_model_examples_in_module(module: object) -> Iterator[ModelExample]: + """Iterates on all examples defined as BaseModelClass.model_json_schema()["example"] + Usage: + import some_package.some_module @pytest.mark.parametrize( "model_cls, example_name, example_data", - iter_examples(simcore_service_webserver.storage_schemas), + iter_model_examples_in_module(some_package.some_module), ) def test_model_examples( - model_cls: type[BaseModel], example_name: int, example_data: Any + model_cls: type[BaseModel], example_name: str, example_data: Any ): - print(example_name, ":", json.dumps(example_data)) - assert model_cls.parse_obj(example_data) + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) + """ + + def _is_model_cls(obj) -> bool: + with suppress(TypeError): + # NOTE: issubclass( dict[models_library.services.ConstrainedStrValue, models_library.services.ServiceInput] ) raises TypeError + is_parametrized = False + if hasattr(obj, "__parameters__"): + is_parametrized = len(obj.__parameters__) == 0 + return ( + obj is not BaseModel + and inspect.isclass(obj) + and issubclass(obj, BaseModel) + and not is_parametrized + ) + return False + assert inspect.ismodule(module) - for model_name, model_cls in inspect.getmembers( - module, lambda obj: inspect.isclass(obj) and issubclass(obj, BaseModel) - ): - assert model_name # nosec - if ( - (config_cls := getattr(model_cls, "Config")) - and inspect.isclass(config_cls) - and is_strict_inner(model_cls, config_cls) - and (schema_extra := getattr(config_cls, "schema_extra", {})) + for model_name, model_cls in inspect.getmembers(module, _is_model_cls): + + yield from iter_model_examples_in_class(model_cls, model_name) + + +def iter_model_examples_in_class( + model_cls: type[BaseModel], model_name: str | None = None +) -> Iterator[ModelExample]: + """Iterates on all examples within a base model class + + Usage: + + @pytest.mark.parametrize( + "model_cls, example_name, example_data", + iter_model_examples_in_class(SomeModelClass), + ) + def test_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any ): - if "example" in schema_extra: - yield ModelExample( - model_cls=model_cls, - example_name="example", - example_data=schema_extra["example"], - ) - - elif "examples" in schema_extra: - for index, example in enumerate(schema_extra["examples"]): - yield ModelExample( - model_cls=model_cls, - example_name=f"examples_{index}", - example_data=example, - ) + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) + + """ + assert issubclass(model_cls, BaseModel) # nosec + + if model_name is None: + model_name = f"{model_cls.__module__}.{model_cls.__name__}" + + schema = model_cls.model_json_schema() + + if example := schema.get("example"): + yield ModelExample( + model_cls=model_cls, + example_name=f"{model_name}_example", + example_data=example, + ) + + if many_examples := schema.get("examples"): + for index, example in enumerate(many_examples): + yield ModelExample( + model_cls=model_cls, + example_name=f"{model_name}_examples_{index}", + example_data=example, + ) + + +TBaseModel = TypeVar("TBaseModel", bound=BaseModel) + + +def assert_validation_model( + model_cls: type[TBaseModel], example_name: str, example_data: Any +) -> TBaseModel: + try: + model_instance = model_cls.model_validate(example_data) + except ValidationError as err: + pytest.fail( + f"{example_name} is invalid {model_cls.__module__}.{model_cls.__name__}:" + f"\n{json_dumps(example_data, indent=1)}" + f"\nError: {err}" + ) + + assert isinstance(model_instance, model_cls) + return model_instance ## PYDANTIC MODELS & SCHEMAS ----------------------------------------------------- @@ -81,7 +170,12 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]: """ Extracts examples from pydantic model class Config """ - + warnings.warn( + "The 'model_cls_examples' fixture is deprecated and will be removed in a future version. " + "Please use 'iter_model_example_in_class' or 'iter_model_examples_in_module' as an alternative.", + DeprecationWarning, + stacklevel=2, + ) # Use by defining model_cls as test parametrization assert model_cls, ( f"Testing against a {model_cls} model that has NO examples. Add them in Config class. " @@ -89,8 +183,10 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]: "SEE https://pydantic-docs.helpmanual.io/usage/schema/#schema-customization" ) + json_schema: dict = model_cls.model_json_schema() + # checks exampleS setup in schema_extra - examples_list = copy.deepcopy(model_cls.Config.schema_extra.get("examples", [])) + examples_list = copy.deepcopy(json_schema.get("examples", [])) assert isinstance(examples_list, list), ( "OpenAPI and json-schema differ regarding the format for exampleS." "The former is a dict and the latter an array. " @@ -99,15 +195,12 @@ def model_cls_examples(model_cls: type[BaseModel]) -> dict[str, dict[str, Any]]: "SEE https://swagger.io/docs/specification/adding-examples/" ) - # check example in schema_extra - example = copy.deepcopy(model_cls.Config.schema_extra.get("example")) - # collect all examples and creates fixture -> {example-name: example, ...} examples = { - f"{model_cls.__name__}.example[{index}]": example - for index, example in enumerate(examples_list) + f"{model_cls.__name__}.example[{index}]": example_ + for index, example_ in enumerate(examples_list) } - if example: + if example := copy.deepcopy(json_schema.get("example")): examples[f"{model_cls.__name__}.example"] = example return examples diff --git a/packages/pytest-simcore/src/pytest_simcore/pytest_global_environs.py b/packages/pytest-simcore/src/pytest_simcore/pytest_global_environs.py index 4293a955ecb..9d9dc0ae598 100644 --- a/packages/pytest-simcore/src/pytest_simcore/pytest_global_environs.py +++ b/packages/pytest-simcore/src/pytest_simcore/pytest_global_environs.py @@ -2,5 +2,7 @@ @pytest.fixture(autouse=True) -def sqlalchemy_2_0_warnings(monkeypatch): +def sqlalchemy_2_0_warnings(monkeypatch: pytest.MonkeyPatch): + # Major Migration Guide: https://docs.sqlalchemy.org/en/20/changelog/migration_20.html monkeypatch.setenv("SQLALCHEMY_WARN_20", "1") + monkeypatch.setenv("LOG_FORMAT_LOCAL_DEV_ENABLED", "True") diff --git a/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py b/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py index 08a2b043e81..c42075704b0 100644 --- a/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/rabbit_service.py @@ -1,58 +1,66 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable +# pylint: disable=protected-access import asyncio import logging -import os -import socket -from typing import Any, AsyncIterator, Optional +from collections.abc import AsyncIterator, Awaitable, Callable import aio_pika import pytest import tenacity -from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq import QueueName, RabbitMQClient, RabbitMQRPCClient from settings_library.rabbit import RabbitSettings from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from .helpers.utils_docker import get_localhost_ip, get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip +from .helpers.typing_env import EnvVarsDict -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) @tenacity.retry( wait=wait_fixed(5), stop=stop_after_attempt(60), - before_sleep=before_sleep_log(log, logging.INFO), + before_sleep=before_sleep_log(_logger, logging.INFO), reraise=True, ) async def wait_till_rabbit_responsive(url: str) -> None: - connection = await aio_pika.connect(url) - await connection.close() + async with await aio_pika.connect(url): + ... @pytest.fixture -async def rabbit_settings( - docker_stack: dict, testing_environ_vars: dict # stack is up -) -> RabbitSettings: - """Returns the settings of a rabbit service that is up and responsive""" - - prefix = testing_environ_vars["SWARM_STACK_NAME"] +def rabbit_env_vars_dict( + docker_stack: dict, + env_vars_for_docker_compose: EnvVarsDict, +) -> EnvVarsDict: + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] assert f"{prefix}_rabbit" in docker_stack["services"] - port = get_service_published_port("rabbit", testing_environ_vars["RABBIT_PORT"]) - - settings = RabbitSettings( - RABBIT_USER=testing_environ_vars["RABBIT_USER"], - RABBIT_PASSWORD=testing_environ_vars["RABBIT_PASSWORD"], - RABBIT_HOST=get_localhost_ip(), - RABBIT_PORT=int(port), + port = get_service_published_port( + "rabbit", int(env_vars_for_docker_compose["RABBIT_PORT"]) ) - await wait_till_rabbit_responsive(settings.dsn) + return { + "RABBIT_USER": env_vars_for_docker_compose["RABBIT_USER"], + "RABBIT_PASSWORD": env_vars_for_docker_compose["RABBIT_PASSWORD"], + "RABBIT_HOST": get_localhost_ip(), + "RABBIT_PORT": f"{port}", + "RABBIT_SECURE": env_vars_for_docker_compose["RABBIT_SECURE"], + } + +@pytest.fixture +async def rabbit_settings(rabbit_env_vars_dict: EnvVarsDict) -> RabbitSettings: + """Returns the settings of a rabbit service that is up and responsive""" + + settings = RabbitSettings.model_validate(rabbit_env_vars_dict) + await wait_till_rabbit_responsive(settings.dsn) return settings @@ -65,8 +73,9 @@ async def rabbit_service( NOTE: Use this fixture to setup client app """ monkeypatch.setenv("RABBIT_HOST", rabbit_settings.RABBIT_HOST) - monkeypatch.setenv("RABBIT_PORT", str(rabbit_settings.RABBIT_PORT)) + monkeypatch.setenv("RABBIT_PORT", f"{rabbit_settings.RABBIT_PORT}") monkeypatch.setenv("RABBIT_USER", rabbit_settings.RABBIT_USER) + monkeypatch.setenv("RABBIT_SECURE", f"{rabbit_settings.RABBIT_SECURE}") monkeypatch.setenv( "RABBIT_PASSWORD", rabbit_settings.RABBIT_PASSWORD.get_secret_value() ) @@ -75,57 +84,69 @@ async def rabbit_service( @pytest.fixture -async def rabbit_connection( - rabbit_settings: RabbitSettings, -) -> AsyncIterator[aio_pika.abc.AbstractConnection]: - def _reconnect_callback(): - pytest.fail("rabbit reconnected") - - def _connection_close_callback(sender: Any, exc: Optional[BaseException] = None): - if exc and not isinstance(exc, asyncio.CancelledError): - pytest.fail(f"rabbit connection closed with exception {exc} from {sender}!") - print("<-- connection closed") - - # create connection - # NOTE: to show the connection name in the rabbitMQ UI see there - # https://www.bountysource.com/issues/89342433-setting-custom-connection-name-via-client_properties-doesn-t-work-when-connecting-using-an-amqp-url - connection = await aio_pika.connect_robust( - rabbit_settings.dsn + f"?name={__name__}_{socket.gethostname()}_{os.getpid()}", - client_properties={"connection_name": "pytest read connection"}, - ) - assert connection - assert not connection.is_closed - connection.reconnect_callbacks.add(_reconnect_callback) - connection.close_callbacks.add(_connection_close_callback) - - yield connection - # close connection - await connection.close() - assert connection.is_closed +async def create_rabbitmq_client( + rabbit_service: RabbitSettings, +) -> AsyncIterator[Callable[[str], RabbitMQClient]]: + created_clients: list[RabbitMQClient] = [] + + def _creator(client_name: str, *, heartbeat: int = 60) -> RabbitMQClient: + # pylint: disable=protected-access + client = RabbitMQClient( + f"pytest_{client_name}", rabbit_service, heartbeat=heartbeat + ) + assert client + assert client._connection_pool # noqa: SLF001 + assert not client._connection_pool.is_closed # noqa: SLF001 + assert client._channel_pool # noqa: SLF001 + assert not client._channel_pool.is_closed # noqa: SLF001 + assert client.client_name == f"pytest_{client_name}" + assert client.settings == rabbit_service + created_clients.append(client) + return client + + yield _creator + + # cleanup, properly close the clients + await asyncio.gather(*(client.close() for client in created_clients)) @pytest.fixture -async def rabbit_channel( - rabbit_connection: aio_pika.abc.AbstractConnection, -) -> AsyncIterator[aio_pika.abc.AbstractChannel]: - def _channel_close_callback(sender: Any, exc: Optional[BaseException] = None): - if exc and not isinstance(exc, asyncio.CancelledError): - pytest.fail(f"rabbit channel closed with exception {exc} from {sender}!") - print("<-- rabbit channel closed") - - # create channel - async with rabbit_connection.channel() as channel: - print("--> rabbit channel created") - channel.close_callbacks.add(_channel_close_callback) - yield channel - assert channel.is_closed +async def rabbitmq_rpc_client( + rabbit_service: RabbitSettings, +) -> AsyncIterator[Callable[[str], Awaitable[RabbitMQRPCClient]]]: + created_clients = [] + + async def _creator(client_name: str, *, heartbeat: int = 60) -> RabbitMQRPCClient: + client = await RabbitMQRPCClient.create( + client_name=f"pytest_{client_name}", + settings=rabbit_service, + heartbeat=heartbeat, + ) + assert client + assert client.client_name == f"pytest_{client_name}" + assert client.settings == rabbit_service + created_clients.append(client) + return client + + yield _creator + # cleanup, properly close the clients + await asyncio.gather(*(client.close() for client in created_clients)) @pytest.fixture -async def rabbit_client( - rabbit_settings: RabbitSettings, -) -> AsyncIterator[RabbitMQClient]: - client = RabbitMQClient("pytest", settings=rabbit_settings) - assert client - yield client - await client.close() +async def ensure_parametrized_queue_is_empty( + create_rabbitmq_client: Callable[[str], RabbitMQClient], queue_name: QueueName +) -> AsyncIterator[None]: + rabbitmq_client = create_rabbitmq_client("pytest-purger") + + async def _queue_messages_purger() -> None: + assert rabbitmq_client._channel_pool # noqa: SLF001 + async with rabbitmq_client._channel_pool.acquire() as channel: # noqa: SLF001 + assert isinstance(channel, aio_pika.RobustChannel) + queue = await channel.get_queue(queue_name) + await queue.purge() + + await _queue_messages_purger() + yield + # cleanup + await _queue_messages_purger() diff --git a/packages/pytest-simcore/src/pytest_simcore/redis_service.py b/packages/pytest-simcore/src/pytest_simcore/redis_service.py index 8194020d202..05aec86a234 100644 --- a/packages/pytest-simcore/src/pytest_simcore/redis_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/redis_service.py @@ -3,18 +3,23 @@ # pylint:disable=redefined-outer-name import logging -from typing import AsyncIterator, Union +from collections.abc import AsyncIterator +from datetime import timedelta import pytest import tenacity +from pytest_mock import MockerFixture from redis.asyncio import Redis, from_url -from settings_library.redis import RedisSettings +from settings_library.basic_types import PortInt +from settings_library.redis import RedisDatabase, RedisSettings from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed from yarl import URL -from .helpers.utils_docker import get_localhost_ip, get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip +from .helpers.typing_env import EnvVarsDict log = logging.getLogger(__name__) @@ -22,24 +27,28 @@ @pytest.fixture async def redis_settings( docker_stack: dict, # stack is up - testing_environ_vars: dict, + env_vars_for_docker_compose: EnvVarsDict, ) -> RedisSettings: """Returns the settings of a redis service that is up and responsive""" - prefix = testing_environ_vars["SWARM_STACK_NAME"] + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] assert f"{prefix}_redis" in docker_stack["services"] port = get_service_published_port( - "simcore_redis", testing_environ_vars["REDIS_PORT"] + "simcore_redis", int(env_vars_for_docker_compose["REDIS_PORT"]) ) # test runner is running on the host computer - settings = RedisSettings(REDIS_HOST=get_localhost_ip(), REDIS_PORT=int(port)) - await wait_till_redis_responsive(settings.dsn_resources) + settings = RedisSettings( + REDIS_HOST=get_localhost_ip(), + REDIS_PORT=PortInt(port), + REDIS_PASSWORD=env_vars_for_docker_compose["REDIS_PASSWORD"], + ) + await wait_till_redis_responsive(settings.build_redis_dsn(RedisDatabase.RESOURCES)) return settings -@pytest.fixture(scope="function") +@pytest.fixture() def redis_service( redis_settings: RedisSettings, monkeypatch: pytest.MonkeyPatch, @@ -50,35 +59,44 @@ def redis_service( """ monkeypatch.setenv("REDIS_HOST", redis_settings.REDIS_HOST) monkeypatch.setenv("REDIS_PORT", str(redis_settings.REDIS_PORT)) + monkeypatch.setenv( + "REDIS_PASSWORD", redis_settings.REDIS_PASSWORD.get_secret_value() + ) return redis_settings -@pytest.fixture(scope="function") +@pytest.fixture() async def redis_client( redis_settings: RedisSettings, ) -> AsyncIterator[Redis]: """Creates a redis client to communicate with a redis service ready""" client = from_url( - redis_settings.dsn_resources, encoding="utf-8", decode_responses=True + redis_settings.build_redis_dsn(RedisDatabase.RESOURCES), + encoding="utf-8", + decode_responses=True, ) yield client await client.flushall() - await client.close(close_connection_pool=True) + await client.aclose(close_connection_pool=True) -@pytest.fixture(scope="function") +@pytest.fixture() async def redis_locks_client( redis_settings: RedisSettings, ) -> AsyncIterator[Redis]: """Creates a redis client to communicate with a redis service ready""" - client = from_url(redis_settings.dsn_locks, encoding="utf-8", decode_responses=True) + client = from_url( + redis_settings.build_redis_dsn(RedisDatabase.LOCKS), + encoding="utf-8", + decode_responses=True, + ) yield client await client.flushall() - await client.close(close_connection_pool=True) + await client.aclose(close_connection_pool=True) @tenacity.retry( @@ -87,11 +105,19 @@ async def redis_locks_client( before_sleep=before_sleep_log(log, logging.INFO), reraise=True, ) -async def wait_till_redis_responsive(redis_url: Union[URL, str]) -> None: +async def wait_till_redis_responsive(redis_url: URL | str) -> None: client = from_url(f"{redis_url}", encoding="utf-8", decode_responses=True) - try: if not await client.ping(): - raise ConnectionError(f"{redis_url=} not available") + msg = f"{redis_url=} not available" + raise ConnectionError(msg) finally: - await client.close(close_connection_pool=True) + await client.aclose(close_connection_pool=True) + + +@pytest.fixture +def mock_redis_socket_timeout(mocker: MockerFixture) -> None: + # lowered to allow CI to properly shutdown RedisClientSDK instances + mocker.patch( + "servicelib.redis._client.DEFAULT_SOCKET_TIMEOUT", timedelta(seconds=0.25) + ) diff --git a/packages/pytest-simcore/src/pytest_simcore/repository_paths.py b/packages/pytest-simcore/src/pytest_simcore/repository_paths.py index 0c1c470699d..6112cef627b 100644 --- a/packages/pytest-simcore/src/pytest_simcore/repository_paths.py +++ b/packages/pytest-simcore/src/pytest_simcore/repository_paths.py @@ -4,37 +4,39 @@ import pytest -CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent -WILDCARD = "packages/pytest-simcore/src/pytest_simcore/__init__.py" -ROOT = Path("/") +_CURRENT_DIR = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +) +_WILDCARD = "packages/pytest-simcore/src/pytest_simcore/__init__.py" +_ROOT = Path("/") @pytest.fixture(scope="session") -def osparc_simcore_root_dir(request) -> Path: +def osparc_simcore_root_dir(request: pytest.FixtureRequest) -> Path: """osparc-simcore repo root dir""" test_dir = Path(request.session.fspath) # expected test dir in simcore - root_dir = CURRENT_DIR - for start_dir in (CURRENT_DIR, test_dir): + root_dir = _CURRENT_DIR + for start_dir in (_CURRENT_DIR, test_dir): root_dir = start_dir - while not any(root_dir.glob(WILDCARD)) and root_dir != ROOT: + while not any(root_dir.glob(_WILDCARD)) and root_dir != _ROOT: root_dir = root_dir.parent - if root_dir != ROOT: + if root_dir != _ROOT: break msg = f"'{root_dir}' does not look like the git root directory of osparc-simcore" - assert root_dir != ROOT, msg + assert root_dir != _ROOT, msg assert root_dir.exists(), msg - assert any(root_dir.glob(WILDCARD)), msg + assert any(root_dir.glob(_WILDCARD)), msg assert any(root_dir.glob(".git")), msg return root_dir @pytest.fixture(scope="session") -def osparc_simcore_services_dir(osparc_simcore_root_dir) -> Path: +def osparc_simcore_services_dir(osparc_simcore_root_dir: Path) -> Path: """Path to osparc-simcore/services folder""" services_dir = osparc_simcore_root_dir / "services" assert services_dir.exists() @@ -77,7 +79,7 @@ def env_devel_file(osparc_simcore_root_dir: Path) -> Path: @pytest.fixture(scope="session") -def services_docker_compose_file(services_dir): +def services_docker_compose_file(services_dir: Path) -> Path: dcpath = services_dir / "docker-compose.yml" assert dcpath.exists() return dcpath diff --git a/packages/pytest-simcore/src/pytest_simcore/schemas.py b/packages/pytest-simcore/src/pytest_simcore/schemas.py index 8bdfecb9af8..7bb9a6b0e41 100644 --- a/packages/pytest-simcore/src/pytest_simcore/schemas.py +++ b/packages/pytest-simcore/src/pytest_simcore/schemas.py @@ -1,26 +1,23 @@ # pylint:disable=redefined-outer-name import json -import logging import subprocess from pathlib import Path from typing import Any, Callable, Iterable import pytest -log = logging.getLogger(__name__) - @pytest.fixture(scope="session") def common_schemas_specs_dir(osparc_simcore_root_dir: Path) -> Path: - specs_dir = osparc_simcore_root_dir / "api" / "specs" / "common" / "schemas" + specs_dir = osparc_simcore_root_dir / "api" / "specs" / "director" / "schemas" assert specs_dir.exists() return specs_dir @pytest.fixture(scope="session") def node_meta_schema_file(common_schemas_specs_dir: Path) -> Path: - node_meta_file = common_schemas_specs_dir / "node-meta-v0.0.1.json" + node_meta_file = common_schemas_specs_dir / "node-meta-v0.0.1-pydantic.json" assert node_meta_file.exists() return node_meta_file @@ -106,6 +103,10 @@ def _run_diff(schema_lhs: dict, schema_rhs: dict) -> subprocess.CompletedProcess schema_rhs_path = tmpdir / "schema_rhs.json" schema_rhs_path.write_text(json.dumps(schema_rhs, indent=1)) + # NOTE: When debugging the differences, as of now both schemas come from + # pydantic model, now it is possible to visually compare the difference. To do so, + # just dereference the current pydantic schema. + return subprocess.run( [json_diff_script, schema_lhs_path, schema_rhs_path], stdout=subprocess.PIPE, diff --git a/packages/pytest-simcore/src/pytest_simcore/service_environs.py b/packages/pytest-simcore/src/pytest_simcore/service_environs.py deleted file mode 100644 index 266744d3131..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/service_environs.py +++ /dev/null @@ -1,20 +0,0 @@ -# pylint: disable=redefined-outer-name -"""these are fixtures for when running unit tests - """ -from pathlib import Path - -import pytest -from dotenv import load_dotenv - - -@pytest.fixture(scope="session") -def service_env_file(project_slug_dir: Path) -> Path: - env_devel_path = project_slug_dir / ".env-devel" - assert env_devel_path.exists() - return env_devel_path - - -@pytest.fixture(scope="session", autouse=True) -def service_test_environ(service_env_file: Path) -> None: - """this fixtures overload the environ with unit testing only variables""" - load_dotenv(service_env_file, verbose=True) diff --git a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py index 676b1b48553..a001bb1d5d8 100644 --- a/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py +++ b/packages/pytest-simcore/src/pytest_simcore/services_api_mocks_for_aiohttp_clients.py @@ -2,35 +2,40 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -import json -import random import re from pathlib import Path from typing import Any +from urllib.parse import urlparse, urlunparse import pytest -from aiohttp import web from aioresponses import aioresponses as AioResponsesMock from aioresponses.core import CallbackResult -from models_library.api_schemas_storage import ( +from faker import Faker +from models_library.api_schemas_directorv2.computations import ( + ComputationGet as DirectorV2ComputationGet, +) +from models_library.api_schemas_storage.storage_schemas import ( FileMetaDataGet, + FileUploadCompleteFutureResponse, + FileUploadCompleteResponse, + FileUploadCompleteState, FileUploadLinks, FileUploadSchema, LinkType, PresignedLink, ) -from models_library.clusters import Cluster from models_library.generics import Envelope +from models_library.projects_pipeline import ComputationTask from models_library.projects_state import RunningState from models_library.utils.fastapi_encoders import jsonable_encoder -from pydantic import AnyUrl, ByteSize, parse_obj_as +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.aiohttp import status from yarl import URL pytest_plugins = [ "pytest_simcore.aioresponses_mocker", ] - # The adjacency list is defined as a dictionary with the key to the node and its list of successors FULL_PROJECT_PIPELINE_ADJACENCY: dict[str, list[str]] = { "62bca361-8594-48c8-875e-b8577e868aec": [ @@ -70,14 +75,13 @@ def create_computation_cb(url, **kwargs) -> CallbackResult: - assert "json" in kwargs, f"missing body in call to {url}" body = kwargs["json"] for param in ["user_id", "project_id"]: assert param in body, f"{param} is missing from body: {body}" state = ( RunningState.PUBLISHED - if "start_pipeline" in body and body["start_pipeline"] + if body.get("start_pipeline") else RunningState.NOT_STARTED ) pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY @@ -91,7 +95,7 @@ def create_computation_cb(url, **kwargs) -> CallbackResult: "62237c33-8d6c-4709-aa92-c3cf693dd6d2", "0bdf824f-57cb-4e38-949e-fd12c184f000", ] - node_states[node_id] = {"state": {"modified": True, "dependencies": []}} + node_states[node_id] = {"modified": True, "dependencies": []} node_states["62237c33-8d6c-4709-aa92-c3cf693dd6d2"] = { "modified": True, "dependencies": ["2f493631-30b4-4ad8-90f2-a74e4b46fe73"], @@ -104,17 +108,27 @@ def create_computation_cb(url, **kwargs) -> CallbackResult: ], } - return CallbackResult( - status=201, - # NOTE: aioresponses uses json.dump which does NOT encode serialization of UUIDs - payload={ - "id": str(kwargs["json"]["project_id"]), + json_schema = ComputationTask.model_json_schema() + assert isinstance(json_schema["examples"], list) + assert isinstance(json_schema["examples"][0], dict) + computation: dict[str, Any] = json_schema["examples"][0].copy() + computation.update( + { + "id": f"{kwargs['json']['project_id']}", "state": state, "pipeline_details": { "adjacency_list": pipeline, "node_states": node_states, + "progress": 0, }, - }, + } + ) + returned_computation = ComputationTask.model_validate(computation) + + return CallbackResult( + status=201, + # NOTE: aioresponses uses json.dump which does NOT encode serialization of UUIDs + payload=jsonable_encoder(returned_computation), ) @@ -123,88 +137,27 @@ def get_computation_cb(url, **kwargs) -> CallbackResult: pipeline: dict[str, list[str]] = FULL_PROJECT_PIPELINE_ADJACENCY node_states = FULL_PROJECT_NODE_STATES - return CallbackResult( - status=200, - payload={ + json_schema = DirectorV2ComputationGet.model_json_schema() + assert isinstance(json_schema["examples"], list) + assert isinstance(json_schema["examples"][0], dict) + + computation: dict[str, Any] = json_schema["examples"][0].copy() + computation.update( + { "id": Path(url.path).name, "state": state, "pipeline_details": { "adjacency_list": pipeline, "node_states": node_states, + "progress": 0, }, - "iteration": 2, - "cluster_id": 23, - }, - ) - - -def create_cluster_cb(url, **kwargs) -> CallbackResult: - assert "json" in kwargs, f"missing body in call to {url}" - body = kwargs["json"] - assert url.query.get("user_id") - random_cluster = Cluster.parse_obj( - random.choice(Cluster.Config.schema_extra["examples"]) - ) - return CallbackResult( - status=201, payload=json.loads(random_cluster.json(by_alias=True)) - ) - - -def list_clusters_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - return CallbackResult( - status=200, - body=json.dumps( - [ - json.loads( - Cluster.parse_obj( - random.choice(Cluster.Config.schema_extra["examples"]) - ).json(by_alias=True) - ) - for _ in range(3) - ] - ), - ) - - -def get_cluster_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] - return CallbackResult( - status=200, - payload=json.loads( - Cluster.parse_obj( - { - **random.choice(Cluster.Config.schema_extra["examples"]), - **{"id": cluster_id}, - } - ).json(by_alias=True) - ), + } ) + returned_computation = DirectorV2ComputationGet.model_validate(computation) - -def get_cluster_details_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] return CallbackResult( status=200, - payload={"scheduler": {}, "cluster": {}, "dashboard_link": "some_faked_link"}, - ) - - -def patch_cluster_cb(url, **kwargs) -> CallbackResult: - assert url.query.get("user_id") - cluster_id = url.path.split("/")[-1] - return CallbackResult( - status=200, - payload=json.loads( - Cluster.parse_obj( - { - **random.choice(Cluster.Config.schema_extra["examples"]), - **{"id": cluster_id}, - } - ).json(by_alias=True) - ), + payload=jsonable_encoder(returned_computation), ) @@ -226,106 +179,33 @@ async def director_v2_service_mock( r"^http://[a-z\-_]*director-v2:[0-9]+/v2/computations/.*:stop$" ) delete_computation_pattern = get_computation_pattern - projects_networks_pattern = re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/dynamic_services/projects/.*/-/networks$" - ) get_services_pattern = re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/dynamic_services$" + r"^http://[a-z\-_]*director-v2:[0-9]+/v2/dynamic_services.*$" ) + aioresponses_mocker.get( - get_services_pattern, status=web.HTTPOk.status_code, repeat=True + get_services_pattern, status=status.HTTP_200_OK, repeat=True ) aioresponses_mocker.post( create_computation_pattern, callback=create_computation_cb, - status=web.HTTPCreated.status_code, + status=status.HTTP_201_CREATED, repeat=True, ) aioresponses_mocker.post( stop_computation_pattern, - status=web.HTTPAccepted.status_code, + status=status.HTTP_202_ACCEPTED, repeat=True, ) aioresponses_mocker.get( get_computation_pattern, - status=web.HTTPAccepted.status_code, + status=status.HTTP_202_ACCEPTED, callback=get_computation_cb, repeat=True, ) aioresponses_mocker.delete(delete_computation_pattern, status=204, repeat=True) - aioresponses_mocker.patch(projects_networks_pattern, status=204, repeat=True) - - # clusters - cluster_route_pattern = re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)?\?(\w+(?:=\w+)?\&?){1,}$" - ) - aioresponses_mocker.post( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=create_cluster_cb, - status=web.HTTPCreated.status_code, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=list_clusters_cb, - status=web.HTTPCreated.status_code, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=get_cluster_cb, - status=web.HTTPCreated.status_code, - repeat=True, - ) - - aioresponses_mocker.get( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters/[0-9]+/details\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=get_cluster_details_cb, - status=web.HTTPCreated.status_code, - repeat=True, - ) - - aioresponses_mocker.patch( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - callback=patch_cluster_cb, - status=web.HTTPCreated.status_code, - repeat=True, - ) - aioresponses_mocker.delete( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+)\?(\w+(?:=\w+)?\&?){1,}$" - ), - status=web.HTTPNoContent.status_code, - repeat=True, - ) - - aioresponses_mocker.post( - re.compile(r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters:ping$"), - status=web.HTTPNoContent.status_code, - repeat=True, - ) - - aioresponses_mocker.post( - re.compile( - r"^http://[a-z\-_]*director-v2:[0-9]+/v2/clusters(/[0-9]+):ping\?(\w+(?:=\w+)?\&?){1,}$" - ), - status=web.HTTPNoContent.status_code, - repeat=True, - ) return aioresponses_mocker @@ -337,7 +217,7 @@ def get_download_link_cb(url: URL, **kwargs) -> CallbackResult: link_type = kwargs["params"]["link_type"] scheme = {LinkType.PRESIGNED: "http", LinkType.S3: "s3"} return CallbackResult( - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload={"data": {"link": f"{scheme[link_type]}://{file_id}"}}, ) @@ -350,25 +230,27 @@ def get_upload_link_cb(url: URL, **kwargs) -> CallbackResult: scheme = {LinkType.PRESIGNED: "http", LinkType.S3: "s3"} if file_size := kwargs["params"].get("file_size") is not None: - + assert file_size upload_schema = FileUploadSchema( - chunk_size=parse_obj_as(ByteSize, "5GiB"), - urls=[parse_obj_as(AnyUrl, f"{scheme[link_type]}://{file_id}")], + chunk_size=TypeAdapter(ByteSize).validate_python("5GiB"), + urls=[ + TypeAdapter(AnyUrl).validate_python(f"{scheme[link_type]}://{file_id}") + ], links=FileUploadLinks( - abort_upload=parse_obj_as(AnyUrl, f"{url}:abort"), - complete_upload=parse_obj_as(AnyUrl, f"{url}:complete"), + abort_upload=TypeAdapter(AnyUrl).validate_python(f"{url}:abort"), + complete_upload=TypeAdapter(AnyUrl).validate_python(f"{url}:complete"), ), ) return CallbackResult( - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload={"data": jsonable_encoder(upload_schema)}, ) # version 1 returns a presigned link presigned_link = PresignedLink( - link=parse_obj_as(AnyUrl, f"{scheme[link_type]}://{file_id}") + link=TypeAdapter(AnyUrl).validate_python(f"{scheme[link_type]}://{file_id}") ) return CallbackResult( - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload={"data": jsonable_encoder(presigned_link)}, ) @@ -378,14 +260,14 @@ def list_file_meta_data_cb(url: URL, **kwargs) -> CallbackResult: assert "user_id" in kwargs["params"] assert "uuid_filter" in kwargs["params"] return CallbackResult( - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload=jsonable_encoder(Envelope[list[FileMetaDataGet]](data=[])), ) @pytest.fixture async def storage_v0_service_mock( - aioresponses_mocker: AioResponsesMock, + aioresponses_mocker: AioResponsesMock, faker: Faker ) -> AioResponsesMock: """mocks responses of storage API""" @@ -393,10 +275,8 @@ async def storage_v0_service_mock( r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/.+/metadata.+$" ) - get_upload_link_pattern = ( - get_download_link_pattern - ) = delete_file_pattern = re.compile( - r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files.+$" + get_upload_link_pattern = get_download_link_pattern = delete_file_pattern = ( + re.compile(r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files.+$") ) get_locations_link_pattern = re.compile( @@ -407,10 +287,22 @@ async def storage_v0_service_mock( r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/metadata.+$" ) + storage_complete_link = re.compile( + r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/.+complete" + ) + + storage_complete_link_futures = re.compile( + r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/.+complete/futures/.+" + ) + + storage_abort_link = re.compile( + r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/.+abort" + ) + aioresponses_mocker.get( get_file_metadata_pattern, - status=web.HTTPOk.status_code, - payload={"data": FileMetaDataGet.Config.schema_extra["examples"][0]}, + status=status.HTTP_200_OK, + payload={"data": FileMetaDataGet.model_json_schema()["examples"][0]}, repeat=True, ) aioresponses_mocker.get( @@ -424,15 +316,55 @@ async def storage_v0_service_mock( aioresponses_mocker.put( get_upload_link_pattern, callback=get_upload_link_cb, repeat=True ) - aioresponses_mocker.delete( - delete_file_pattern, status=web.HTTPNoContent.status_code - ) + aioresponses_mocker.delete(delete_file_pattern, status=status.HTTP_204_NO_CONTENT) aioresponses_mocker.get( get_locations_link_pattern, - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload={"data": [{"name": "simcore.s3", "id": 0}]}, repeat=True, ) + def generate_future_link(url, **kwargs): + parsed_url = urlparse(str(url)) + stripped_url = urlunparse( + (parsed_url.scheme, parsed_url.netloc, parsed_url.path, "", "", "") + ) + + payload: FileUploadCompleteResponse = TypeAdapter( + FileUploadCompleteResponse + ).validate_python( + { + "links": { + "state": stripped_url + ":complete/futures/" + str(faker.uuid4()) + }, + }, + ) + return CallbackResult( + status=status.HTTP_200_OK, + payload=jsonable_encoder( + Envelope[FileUploadCompleteResponse](data=payload) + ), + ) + + aioresponses_mocker.post(storage_complete_link, callback=generate_future_link) + + aioresponses_mocker.post( + storage_complete_link_futures, + status=status.HTTP_200_OK, + payload=jsonable_encoder( + Envelope[FileUploadCompleteFutureResponse]( + data=FileUploadCompleteFutureResponse( + state=FileUploadCompleteState.OK, + e_tag="07d1c1a4-b073-4be7-b022-f405d90e99aa", + ) + ) + ), + ) + + aioresponses_mocker.post( + storage_abort_link, + status=status.HTTP_200_OK, + ) + return aioresponses_mocker diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py index cc7f6370a56..507bb602e06 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_dask_service.py @@ -2,17 +2,24 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -from typing import Iterator +from collections.abc import Iterator +from dataclasses import dataclass +from pathlib import Path +import distributed import pytest from distributed import Client +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl -from .helpers.utils_docker import get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip -@pytest.fixture(scope="function") -async def dask_scheduler_service(simcore_services_ready, monkeypatch) -> str: +@pytest.fixture +async def dask_scheduler_service( + simcore_services_ready: None, monkeypatch: pytest.MonkeyPatch +) -> str: # the dask scheduler has a UI for the dashboard and a secondary port for the API # simcore_services fixture already ensure the dask-scheduler is up and running dask_scheduler_api_port = get_service_published_port( @@ -20,17 +27,80 @@ async def dask_scheduler_service(simcore_services_ready, monkeypatch) -> str: ) # override the port monkeypatch.setenv("DASK_SCHEDULER_PORT", f"{dask_scheduler_api_port}") - return AnyUrl.build(scheme="tcp", host="127.0.0.1", port=dask_scheduler_api_port) + url = AnyUrl.build( + scheme="tls", host=get_localhost_ip(), port=int(dask_scheduler_api_port) + ) + return f"{url}" + + +@pytest.fixture +def dask_sidecar_dir(osparc_simcore_services_dir: Path) -> Path: + path = osparc_simcore_services_dir / "dask-sidecar" + assert path.exists() + return path + + +@pytest.fixture +def dask_backend_tls_certificates_dir(dask_sidecar_dir: Path) -> Path: + path = dask_sidecar_dir / ".dask-certificates" + assert path.exists() + return path + + +@dataclass(frozen=True, slots=True, kw_only=True) +class _TLSCertificates: + tls_ca_file: Path + tls_cert_file: Path + tls_key_file: Path + +@pytest.fixture +def dask_backend_tls_certificates( + dask_backend_tls_certificates_dir, +) -> _TLSCertificates: + certs = _TLSCertificates( + tls_ca_file=dask_backend_tls_certificates_dir / "dask-cert.pem", + tls_cert_file=dask_backend_tls_certificates_dir / "dask-cert.pem", + tls_key_file=dask_backend_tls_certificates_dir / "dask-key.pem", + ) + assert certs.tls_ca_file.exists() + assert certs.tls_cert_file.exists() + assert certs.tls_key_file.exists() + return certs + + +@pytest.fixture +def dask_scheduler_auth( + dask_backend_tls_certificates: _TLSCertificates, +) -> ClusterAuthentication: + return TLSAuthentication( + tls_ca_file=dask_backend_tls_certificates.tls_ca_file, + tls_client_cert=dask_backend_tls_certificates.tls_cert_file, + tls_client_key=dask_backend_tls_certificates.tls_key_file, + ) + + +@pytest.fixture +def dask_client_security( + dask_backend_tls_certificates: _TLSCertificates, +) -> distributed.Security: + return distributed.Security( + tls_ca_file=f"{dask_backend_tls_certificates.tls_ca_file}", + tls_client_cert=f"{dask_backend_tls_certificates.tls_cert_file}", + tls_client_key=f"{dask_backend_tls_certificates.tls_key_file}", + require_encryption=True, + ) -@pytest.fixture(scope="function") -def dask_client(dask_scheduler_service: str) -> Iterator[Client]: - client = Client(dask_scheduler_service) +@pytest.fixture +def dask_client( + dask_scheduler_service: str, dask_client_security: distributed.Security +) -> Iterator[Client]: + client = Client(dask_scheduler_service, security=dask_client_security) yield client client.close() -@pytest.fixture(scope="function") +@pytest.fixture def dask_sidecar_service(dask_client: Client) -> None: dask_client.wait_for_workers(n_workers=1, timeout=30) diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_service_library_fixtures.py b/packages/pytest-simcore/src/pytest_simcore/simcore_service_library_fixtures.py index fc85ee7a690..90104625cf0 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_service_library_fixtures.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_service_library_fixtures.py @@ -1,4 +1,4 @@ -from typing import AsyncIterable +from collections.abc import AsyncIterable import pytest from servicelib.async_utils import cancel_sequential_workers diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_services.py b/packages/pytest-simcore/src/pytest_simcore/simcore_services.py index b864277bb28..2a4f6d2ff4d 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_services.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_services.py @@ -5,26 +5,30 @@ import asyncio import json import logging +import warnings +from collections.abc import Iterator from dataclasses import dataclass +from io import StringIO +from typing import Final import aiohttp import pytest from aiohttp.client import ClientTimeout -from pytest import MonkeyPatch -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_random from yarl import URL from .helpers.constants import MINUTE +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip from .helpers.typing_env import EnvVarsDict -from .helpers.utils_docker import get_localhost_ip, get_service_published_port log = logging.getLogger(__name__) -_SERVICES_TO_SKIP = { +_SERVICES_TO_SKIP: Final[set[str]] = { "agent", # global mode deploy (NO exposed ports, has http API) "dask-sidecar", # global mode deploy (NO exposed ports, **NO** http API) "migration", @@ -34,27 +38,45 @@ "static-webserver", "traefik", "whoami", + "sto-worker", + "sto-worker-cpu-bound", } # TODO: unify healthcheck policies see https://github.com/ITISFoundation/osparc-simcore/pull/2281 -SERVICE_PUBLISHED_PORT = {} -DEFAULT_SERVICE_HEALTHCHECK_ENTRYPOINT = "/v0/" -MAP_SERVICE_HEALTHCHECK_ENTRYPOINT = { +DEFAULT_SERVICE_HEALTHCHECK_ENTRYPOINT: Final[str] = "/v0/" +MAP_SERVICE_HEALTHCHECK_ENTRYPOINT: Final[dict[str, str]] = { "autoscaling": "/", + "clusters-keeper": "/", "dask-scheduler": "/health", + "notifications": "/", "datcore-adapter": "/v0/live", "director-v2": "/", + "dynamic-schdlr": "/", + "efs-guardian": "/", "invitations": "/", + "payments": "/", + "resource-usage-tracker": "/", + "docker-api-proxy": "/version", } -AIOHTTP_BASED_SERVICE_PORT: int = 8080 -FASTAPI_BASED_SERVICE_PORT: int = 8000 -DASK_SCHEDULER_SERVICE_PORT: int = 8787 +# some services require authentication to access their health-check endpoints +_BASE_AUTH_ENV_VARS: Final[dict[str, tuple[str, str]]] = { + "docker-api-proxy": ("DOCKER_API_PROXY_USER", "DOCKER_API_PROXY_PASSWORD"), +} + +_SERVICE_NAME_REPLACEMENTS: Final[dict[str, str]] = { + "dynamic-scheduler": "dynamic-schdlr", +} -_ONE_SEC_TIMEOUT = ClientTimeout(total=1) # type: ignore +AIOHTTP_BASED_SERVICE_PORT: Final[int] = 8080 +FASTAPI_BASED_SERVICE_PORT: Final[int] = 8000 +DASK_SCHEDULER_SERVICE_PORT: Final[int] = 8787 +DOCKER_API_PROXY_SERVICE_PORT: Final[int] = 8888 -async def wait_till_service_healthy(service_name: str, endpoint: URL): +_ONE_SEC_TIMEOUT: Final[ClientTimeout] = ClientTimeout(total=1) # type: ignore + +async def wait_till_service_healthy(service_name: str, endpoint: URL): log.info( "Connecting to %s", f"{service_name=} at {endpoint=}", @@ -68,14 +90,16 @@ async def wait_till_service_healthy(service_name: str, endpoint: URL): reraise=True, ): with attempt: - async with aiohttp.ClientSession(timeout=_ONE_SEC_TIMEOUT) as session: - async with session.get(endpoint) as response: - # NOTE: Health-check endpoint require only a status code 200 - # (see e.g. services/web/server/docker/healthcheck.py) - # regardless of the payload content - assert ( - response.status == 200 - ), f"Connection to {service_name=} at {endpoint=} failed with {response=}" + async with ( + aiohttp.ClientSession(timeout=_ONE_SEC_TIMEOUT) as session, + session.get(endpoint) as response, + ): + # NOTE: Health-check endpoint require only a status code 200 + # (see e.g. services/web/server/docker/healthcheck.py) + # regardless of the payload content + assert ( + response.status == 200 + ), f"Connection to {service_name=} at {endpoint=} failed with {response=}" log.info( "Connection to %s succeeded [%s]", @@ -92,25 +116,25 @@ class ServiceHealthcheckEndpoint: @classmethod def create(cls, service_name: str, baseurl): # TODO: unify healthcheck policies see https://github.com/ITISFoundation/osparc-simcore/pull/2281 - obj = cls( + return cls( name=service_name, url=URL( f"{baseurl}{MAP_SERVICE_HEALTHCHECK_ENTRYPOINT.get(service_name, DEFAULT_SERVICE_HEALTHCHECK_ENTRYPOINT)}" ), ) - return obj @pytest.fixture(scope="module") def services_endpoint( core_services_selection: list[str], docker_stack: dict, - testing_environ_vars: EnvVarsDict, + env_vars_for_docker_compose: EnvVarsDict, ) -> dict[str, URL]: services_endpoint = {} - stack_name = testing_environ_vars["SWARM_STACK_NAME"] + stack_name = env_vars_for_docker_compose["SWARM_STACK_NAME"] for service in core_services_selection: + service = _SERVICE_NAME_REPLACEMENTS.get(service, service) assert f"{stack_name}_{service}" in docker_stack["services"] full_service_name = f"{stack_name}_{service}" @@ -121,10 +145,19 @@ def services_endpoint( AIOHTTP_BASED_SERVICE_PORT, FASTAPI_BASED_SERVICE_PORT, DASK_SCHEDULER_SERVICE_PORT, + DOCKER_API_PROXY_SERVICE_PORT, ] - endpoint = URL( - f"http://{get_localhost_ip()}:{get_service_published_port(full_service_name, target_ports)}" - ) + if service in _BASE_AUTH_ENV_VARS: + user_env, password_env = _BASE_AUTH_ENV_VARS[service] + user = env_vars_for_docker_compose[user_env] + password = env_vars_for_docker_compose[password_env] + endpoint = URL( + f"http://{user}:{password}@{get_localhost_ip()}:{get_service_published_port(full_service_name, target_ports)}" + ) + else: + endpoint = URL( + f"http://{get_localhost_ip()}:{get_service_published_port(full_service_name, target_ports)}" + ) services_endpoint[service] = endpoint else: print(f"Collecting service endpoints: '{service}' skipped") @@ -132,16 +165,7 @@ def services_endpoint( return services_endpoint -@pytest.fixture(scope="module") -def simcore_services_ready( - services_endpoint: dict[str, URL], monkeypatch_module: MonkeyPatch -) -> None: - """ - - Waits for services in `core_services_selection` to be healthy - - Sets environment with these (host:port) endpoitns - - WARNING: not all services in the selection can be health-checked (see services_endpoint) - """ +async def _wait_for_services_ready(services_endpoint: dict[str, URL]) -> None: # Compose and log healthcheck url entpoints health_endpoints = [ @@ -149,9 +173,14 @@ def simcore_services_ready( for service_name, endpoint in services_endpoint.items() ] - print("Composing health-check endpoints for relevant stack's services:") - for h in health_endpoints: - print(f" - {h.name} -> {h.url}") + with StringIO() as buffer: + print( + "Composing health-check endpoints for relevant stack's services:", + file=buffer, + ) + for h in health_endpoints: + print(f" - {h.name} -> {h.url}", file=buffer) + log.info(buffer.getvalue()) async def _check_all_services_are_healthy(): await asyncio.gather( @@ -160,13 +189,57 @@ async def _check_all_services_are_healthy(): ) # check ready - asyncio.run(_check_all_services_are_healthy()) + await _check_all_services_are_healthy() + + +@pytest.fixture +async def simcore_services_ready( + services_endpoint: dict[str, URL], monkeypatch: pytest.MonkeyPatch +) -> None: + await _wait_for_services_ready(services_endpoint) + # patches environment variables with right host/port per service + for service, endpoint in services_endpoint.items(): + env_prefix = service.upper().replace("-", "_") + + assert endpoint.host + monkeypatch.setenv(f"{env_prefix}_HOST", endpoint.host) + monkeypatch.setenv(f"{env_prefix}_PORT", str(endpoint.port)) + + +@pytest.fixture(scope="module") +def _monkeypatch_module(request: pytest.FixtureRequest) -> Iterator[pytest.MonkeyPatch]: + # WARNING: Temporarily ONLY for simcore_services_ready_module + assert request.scope == "module" + + warnings.warn( + f"{__name__} is deprecated, we highly recommend to use pytest.monkeypatch at function-scope level." + "Large scopes lead to complex problems during tests", + DeprecationWarning, + stacklevel=1, + ) + # Some extras to overcome https://github.com/pytest-dev/pytest/issues/363 + # SEE https://github.com/pytest-dev/pytest/issues/363#issuecomment-289830794 + + mpatch_module = pytest.MonkeyPatch() + yield mpatch_module + mpatch_module.undo() + +@pytest.fixture(scope="module") +async def simcore_services_ready_module( + services_endpoint: dict[str, URL], _monkeypatch_module: pytest.MonkeyPatch +) -> None: + warnings.warn( + "This fixture uses deprecated monkeypatch_module fixturePlease do NOT use it!", + DeprecationWarning, + stacklevel=1, + ) + await _wait_for_services_ready(services_endpoint) # patches environment variables with right host/port per service for service, endpoint in services_endpoint.items(): env_prefix = service.upper().replace("-", "_") assert endpoint.host - monkeypatch_module.setenv(f"{env_prefix}_HOST", endpoint.host) - monkeypatch_module.setenv(f"{env_prefix}_PORT", str(endpoint.port)) + _monkeypatch_module.setenv(f"{env_prefix}_HOST", endpoint.host) + _monkeypatch_module.setenv(f"{env_prefix}_PORT", str(endpoint.port)) diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py new file mode 100644 index 00000000000..e897b9ced75 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_data_models.py @@ -0,0 +1,253 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterator, Awaitable, Callable +from contextlib import asynccontextmanager +from typing import Any + +import pytest +import sqlalchemy as sa +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import TypeAdapter +from simcore_postgres_database.models.project_to_groups import project_to_groups +from simcore_postgres_database.storage_models import projects, users +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from .helpers.faker_factories import DEFAULT_FAKER, random_project, random_user + + +@asynccontextmanager +async def _user_context( + sqlalchemy_async_engine: AsyncEngine, *, name: str +) -> AsyncIterator[UserID]: + # inject a random user in db + + # NOTE: Ideally this (and next fixture) should be done via webserver API but at this point + # in time, the webserver service would bring more dependencies to other services + # which would turn this test too complex. + + # pylint: disable=no-value-for-parameter + stmt = users.insert().values(**random_user(name=name)).returning(users.c.id) + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute(stmt) + row = result.one() + assert isinstance(row.id, int) + + try: + yield TypeAdapter(UserID).validate_python(row.id) + finally: + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute(users.delete().where(users.c.id == row.id)) + + +@pytest.fixture +async def user_id(sqlalchemy_async_engine: AsyncEngine) -> AsyncIterator[UserID]: + async with _user_context(sqlalchemy_async_engine, name="test-user") as new_user_id: + yield new_user_id + + +@pytest.fixture +async def other_user_id(sqlalchemy_async_engine: AsyncEngine) -> AsyncIterator[UserID]: + async with _user_context( + sqlalchemy_async_engine, name="test-other-user" + ) as new_user_id: + yield new_user_id + + +@pytest.fixture +async def create_project( + user_id: UserID, sqlalchemy_async_engine: AsyncEngine +) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]: + created_project_uuids = [] + + async def _creator(**kwargs) -> dict[str, Any]: + prj_config = {"prj_owner": user_id} + prj_config.update(kwargs) + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( + projects.insert() + .values(**random_project(DEFAULT_FAKER, **prj_config)) + .returning(sa.literal_column("*")) + ) + row = result.one() + created_project_uuids.append(row.uuid) + return dict(row._asdict()) + + yield _creator + # cleanup + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + projects.delete().where(projects.c.uuid.in_(created_project_uuids)) + ) + + +@pytest.fixture +async def create_project_access_rights( + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[Callable[[ProjectID, UserID, bool, bool, bool], Awaitable[None]]]: + _created = [] + + async def _creator( + project_id: ProjectID, user_id: UserID, read: bool, write: bool, delete: bool + ) -> None: + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( + project_to_groups.insert() + .values( + project_uuid=f"{project_id}", + gid=sa.select(users.c.primary_gid) + .where(users.c.id == user_id) + .scalar_subquery(), + read=read, + write=write, + delete=delete, + ) + .returning(sa.literal_column("*")) + ) + row = result.one() + _created.append((row.project_uuid, row.gid)) + + yield _creator + + # cleanup + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + project_to_groups.delete().where( + sa.or_( + *( + (project_to_groups.c.project_uuid == pid) + & (project_to_groups.c.gid == gid) + for pid, gid in _created + ) + ) + ) + ) + + +@pytest.fixture +async def project_id( + create_project: Callable[[], Awaitable[dict[str, Any]]], +) -> ProjectID: + project = await create_project() + return ProjectID(project["uuid"]) + + +@pytest.fixture +async def collaborator_id( + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[UserID]: + async with _user_context( + sqlalchemy_async_engine, name="collaborator" + ) as new_user_id: + yield TypeAdapter(UserID).validate_python(new_user_id) + + +@pytest.fixture +def share_with_collaborator( + sqlalchemy_async_engine: AsyncEngine, + collaborator_id: UserID, + user_id: UserID, + project_id: ProjectID, +) -> Callable[[], Awaitable[None]]: + async def _get_user_group(conn: AsyncConnection, query_user: int) -> int: + result = await conn.execute( + sa.select(users.c.primary_gid).where(users.c.id == query_user) + ) + row = result.fetchone() + assert row + primary_gid: int = row.primary_gid + return primary_gid + + async def _() -> None: + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( + sa.select(projects.c.access_rights).where( + projects.c.uuid == f"{project_id}" + ) + ) + row = result.fetchone() + assert row + access_rights: dict[str | int, Any] = row.access_rights + + access_rights[await _get_user_group(conn, user_id)] = { + "read": True, + "write": True, + "delete": True, + } + access_rights[await _get_user_group(conn, collaborator_id)] = { + "read": True, + "write": True, + "delete": False, + } + + await conn.execute( + projects.update() + .where(projects.c.uuid == f"{project_id}") + .values(access_rights=access_rights) + ) + + # project_to_groups needs to be updated + for group_id, permissions in access_rights.items(): + insert_stmt = pg_insert(project_to_groups).values( + project_uuid=f"{project_id}", + gid=int(group_id), + read=permissions["read"], + write=permissions["write"], + delete=permissions["delete"], + created=sa.func.now(), + modified=sa.func.now(), + ) + on_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[ + project_to_groups.c.project_uuid, + project_to_groups.c.gid, + ], + set_={ + "read": insert_stmt.excluded.read, + "write": insert_stmt.excluded.write, + "delete": insert_stmt.excluded.delete, + "modified": sa.func.now(), + }, + ) + await conn.execute(on_update_stmt) + + return _ + + +@pytest.fixture +async def create_project_node( + user_id: UserID, sqlalchemy_async_engine: AsyncEngine, faker: Faker +) -> Callable[..., Awaitable[NodeID]]: + async def _creator( + project_id: ProjectID, node_id: NodeID | None = None, **kwargs + ) -> NodeID: + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( + sa.select(projects.c.workbench).where( + projects.c.uuid == f"{project_id}" + ) + ) + row = result.fetchone() + assert row + project_workbench: dict[str, Any] = row.workbench + new_node_id = node_id or NodeID(f"{faker.uuid4()}") + node_data = { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "pytest_fake_node", + } + node_data.update(**kwargs) + project_workbench.update({f"{new_node_id}": node_data}) + await conn.execute( + projects.update() + .where(projects.c.uuid == f"{project_id}") + .values(workbench=project_workbench) + ) + return new_node_id + + return _creator diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_datcore_adapter.py b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_datcore_adapter.py new file mode 100644 index 00000000000..892d63060e0 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_datcore_adapter.py @@ -0,0 +1,66 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import re +from collections.abc import Iterator + +import httpx +import pytest +import respx +from faker import Faker +from fastapi_pagination import Page, Params +from pytest_simcore.helpers.host import get_localhost_ip +from servicelib.aiohttp import status +from simcore_service_storage.modules.datcore_adapter.datcore_adapter_settings import ( + DatcoreAdapterSettings, +) + + +@pytest.fixture +def datcore_adapter_service_mock(faker: Faker) -> Iterator[respx.MockRouter]: + dat_core_settings = DatcoreAdapterSettings.create_from_envs() + datcore_adapter_base_url = dat_core_settings.endpoint + # mock base endpoint + with respx.mock( + base_url=datcore_adapter_base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mocker: + # NOTE: passthrough the locahost and the local ip + respx_mocker.route(host="127.0.0.1").pass_through() + respx_mocker.route(host=get_localhost_ip()).pass_through() + + respx_mocker.get("/user/profile", name="get_user_profile").respond( + status.HTTP_200_OK, json=faker.pydict(allowed_types=(str,)) + ) + respx_mocker.get( + re.compile(r"/datasets/(?P[^/]+)/files_legacy") + ).respond(status.HTTP_200_OK, json=[]) + list_datasets_re = re.compile(r"/datasets") + respx_mocker.get(list_datasets_re, name="list_datasets").respond( + status.HTTP_200_OK, + json=Page.create(items=[], params=Params(size=10), total=0).model_dump( + mode="json" + ), + ) + + def _create_download_link(request, file_id): + return httpx.Response( + status.HTTP_404_NOT_FOUND, + json={"error": f"{file_id} not found!"}, + ) + + respx_mocker.get( + re.compile(r"/files/(?P[^/]+)"), name="get_file_dowload_link" + ).mock(side_effect=_create_download_link) + + respx_mocker.get( + "/", + name="healthcheck", + ).respond(status.HTTP_200_OK, json={"message": "ok"}) + respx_mocker.get("", name="base_endpoint").respond( + status.HTTP_200_OK, json={"message": "root entrypoint"} + ) + + yield respx_mocker diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_service.py b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_service.py index 2e1c6f4dc86..02e3ddbc167 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_storage_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_storage_service.py @@ -2,28 +2,33 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name import os +from collections.abc import Callable, Iterable from copy import deepcopy -from typing import Callable, Iterable +from pathlib import Path import aiohttp import pytest import tenacity -from minio import Minio from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID, SimcoreS3FileID -from pydantic import parse_obj_as -from servicelib.minio_utils import MinioRetryPolicyUponInitialization +from pydantic import TypeAdapter +from pytest_mock import MockerFixture from yarl import URL -from .helpers.utils_docker import get_localhost_ip, get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.host import get_localhost_ip +from .helpers.storage import replace_storage_endpoint +from .helpers.typing_env import EnvVarsDict @pytest.fixture(scope="module") -def storage_endpoint(docker_stack: dict, testing_environ_vars: dict) -> Iterable[URL]: - prefix = testing_environ_vars["SWARM_STACK_NAME"] +def storage_endpoint( + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict +) -> Iterable[URL]: + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] assert f"{prefix}_storage" in docker_stack["services"] - default_port = testing_environ_vars["STORAGE_ENDPOINT"].split(":")[1] + default_port = int(env_vars_for_docker_compose["STORAGE_ENDPOINT"].split(":")[1]) endpoint = ( f"{get_localhost_ip()}:{get_service_published_port('storage', default_port)}" ) @@ -38,31 +43,51 @@ def storage_endpoint(docker_stack: dict, testing_environ_vars: dict) -> Iterable os.environ = old_environ -@pytest.fixture(scope="function") +@pytest.fixture() async def storage_service( - minio_service: Minio, storage_endpoint: URL, docker_stack: dict + mocker: MockerFixture, storage_endpoint: URL, docker_stack: dict ) -> URL: await wait_till_storage_responsive(storage_endpoint) + # NOTE: Mock to ensure container IP agrees with host IP when testing + assert storage_endpoint.host is not None + assert storage_endpoint.port is not None + mocker.patch( + "simcore_sdk.node_ports_common._filemanager_utils._get_https_link_if_storage_secure", + replace_storage_endpoint(storage_endpoint.host, storage_endpoint.port), + ) + return storage_endpoint -# TODO: this can be used by ANY of the simcore services! -@tenacity.retry(**MinioRetryPolicyUponInitialization().kwargs) +@tenacity.retry( + wait=tenacity.wait_fixed(1), + stop=tenacity.stop_after_delay(30), + reraise=True, +) async def wait_till_storage_responsive(storage_endpoint: URL): - async with aiohttp.ClientSession() as session: - async with session.get(storage_endpoint.with_path("/v0/")) as resp: - assert resp.status == 200 - data = await resp.json() - assert "data" in data - assert data["data"] is not None + async with ( + aiohttp.ClientSession() as session, + session.get(storage_endpoint.with_path("/v0/")) as resp, + ): + assert resp.status == 200 + data = await resp.json() + assert "data" in data + assert data["data"] is not None @pytest.fixture def create_simcore_file_id() -> Callable[[ProjectID, NodeID, str], SimcoreS3FileID]: def _creator( - project_id: ProjectID, node_id: NodeID, file_name: str + project_id: ProjectID, + node_id: NodeID, + file_name: str, + file_base_path: Path | None = None, ) -> SimcoreS3FileID: - return parse_obj_as(SimcoreS3FileID, f"{project_id}/{node_id}/{file_name}") + s3_file_name = file_name + if file_base_path: + s3_file_name = f"{file_base_path / file_name}" + clean_path = Path(f"{project_id}/{node_id}/{s3_file_name}") + return TypeAdapter(SimcoreS3FileID).validate_python(f"{clean_path}") return _creator diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py new file mode 100644 index 00000000000..cc31177abce --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_groups_fixtures.py @@ -0,0 +1,160 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +""" + + Fixtures for groups + + NOTE: These fixtures are used in integration and unit tests +""" + + +from collections.abc import AsyncIterator +from typing import Any + +import pytest +from aiohttp import web +from aiohttp.test_utils import TestClient +from models_library.api_schemas_webserver.groups import GroupGet +from models_library.groups import GroupsByTypeTuple, StandardGroupCreate +from models_library.users import UserID +from pytest_simcore.helpers.webserver_login import NewUser, UserInfoDict +from simcore_service_webserver.groups._groups_service import ( + add_user_in_group, + create_standard_group, + delete_standard_group, + list_user_groups_with_read_access, +) + + +def _groupget_model_dump(group, access_rights) -> dict[str, Any]: + return GroupGet.from_domain_model(group, access_rights).model_dump( + mode="json", + by_alias=True, + exclude_unset=True, + ) + + +async def _create_organization( + app: web.Application, user_id: UserID, new_group: dict +) -> dict[str, Any]: + group, access_rights = await create_standard_group( + app, + user_id=user_id, + create=StandardGroupCreate.model_validate(new_group), + ) + return _groupget_model_dump(group=group, access_rights=access_rights) + + +# +# USER'S GROUPS FIXTURES +# + + +@pytest.fixture +async def standard_groups_owner( + client: TestClient, + logged_user: UserInfoDict, +) -> AsyncIterator[UserInfoDict]: + """ + standard_groups_owner creates TWO organizations and adds logged_user in them + """ + + assert client.app + # create a separate account to own standard groups + async with NewUser( + { + "name": f"{logged_user['name']}_groups_owner", + "role": "USER", + }, + client.app, + ) as owner_user: + + # creates two groups + sparc_group = await _create_organization( + app=client.app, + user_id=owner_user["id"], + new_group={ + "name": "SPARC", + "description": "Stimulating Peripheral Activity to Relieve Conditions", + "thumbnail": "https://commonfund.nih.gov/sites/default/files/sparc-image-homepage500px.png", + "inclusion_rules": {"email": r"@(sparc)+\.(io|com)$"}, + }, + ) + team_black_group = await _create_organization( + app=client.app, + user_id=owner_user["id"], + new_group={ + "name": "team Black", + "description": "THE incredible black team", + "thumbnail": None, + "inclusion_rules": {"email": r"@(black)+\.(io|com)$"}, + }, + ) + + # adds logged_user to sparc group + await add_user_in_group( + app=client.app, + user_id=owner_user["id"], + group_id=sparc_group["gid"], + new_by_user_id=logged_user["id"], + ) + + # adds logged_user to team-black group + await add_user_in_group( + app=client.app, + user_id=owner_user["id"], + group_id=team_black_group["gid"], + new_by_user_id=logged_user["id"], + ) + + yield owner_user + + # clean groups + await delete_standard_group( + client.app, user_id=owner_user["id"], group_id=sparc_group["gid"] + ) + await delete_standard_group( + client.app, user_id=owner_user["id"], group_id=team_black_group["gid"] + ) + + +@pytest.fixture +async def logged_user_groups_by_type( + client: TestClient, logged_user: UserInfoDict, standard_groups_owner: UserInfoDict +) -> GroupsByTypeTuple: + assert client.app + + assert logged_user["id"] != standard_groups_owner["id"] + + groups_by_type = await list_user_groups_with_read_access( + client.app, user_id=logged_user["id"] + ) + assert groups_by_type.primary + assert groups_by_type.everyone + return groups_by_type + + +@pytest.fixture +def primary_group( + logged_user_groups_by_type: GroupsByTypeTuple, +) -> dict[str, Any]: + """`logged_user`'s primary group""" + assert logged_user_groups_by_type.primary + return _groupget_model_dump(*logged_user_groups_by_type.primary) + + +@pytest.fixture +def standard_groups( + logged_user_groups_by_type: GroupsByTypeTuple, +) -> list[dict[str, Any]]: + """owned by `standard_groups_owner` and shared with `logged_user`""" + return [_groupget_model_dump(*sg) for sg in logged_user_groups_by_type.standard] + + +@pytest.fixture +def all_group( + logged_user_groups_by_type: GroupsByTypeTuple, +) -> dict[str, Any]: + assert logged_user_groups_by_type.everyone + return _groupget_model_dump(*logged_user_groups_by_type.everyone) diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py index 38a57d519e0..2533cad65dd 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_projects_rest_api.py @@ -6,7 +6,7 @@ from copy import deepcopy from dataclasses import dataclass from http import HTTPStatus -from typing import Any, Literal, Optional +from typing import Any, Literal import pytest @@ -17,11 +17,13 @@ class HttpApiCallCapture: Captures relevant information of a call to the http api """ + name: str description: str - method: Literal["GET", "PUT", "POST", "PATCH"] + method: Literal["GET", "PUT", "POST", "PATCH", "DELETE"] path: str - request_payload: Optional[dict[str, Any]] - response_body: Optional[dict[str, Any]] + query: str | None = None + request_payload: dict[str, Any] | None = None + response_body: dict[str, Any] | None = None status_code: HTTPStatus = HTTPStatus.OK def __str__(self) -> str: @@ -38,7 +40,9 @@ def request_desc(self) -> str: # This data can be obtained using the browser's developer tools # + NEW_PROJECT = HttpApiCallCapture( + name="NEW_PROJECT", description="Press 'New Project'", method="POST", path="/v0/projects", @@ -69,6 +73,13 @@ def request_desc(self) -> str: "locked": {"value": False, "status": "CLOSED"}, "state": {"value": "NOT_STARTED"}, }, + "dev": None, + "workspace_id": None, + "type": "STANDARD", + "templateType": None, + "folder_id": None, + "trashedAt": None, + "trashedBy": None, }, "error": None, }, @@ -77,6 +88,7 @@ def request_desc(self) -> str: GET_PROJECT = HttpApiCallCapture( + name="GET_PROJECT", description="Received newly created project", method="GET", path="/v0/projects/18f1938c-567d-11ec-b2f3-02420a000010", @@ -91,6 +103,10 @@ def request_desc(self) -> str: "creationDate": "2021-12-06T10:13:03.100Z", "lastChangeDate": "2021-12-06T10:13:03.100Z", "workbench": {}, + "workspaceId": 123, + "type": "STANDARD", + "templateType": None, + "folderId": 2, "accessRights": {"2": {"read": True, "write": True, "delete": True}}, "dev": {}, "classifiers": [], @@ -101,12 +117,17 @@ def request_desc(self) -> str: "locked": {"value": False, "status": "CLOSED"}, "state": {"value": "NOT_STARTED"}, }, + "workspace_id": None, + "folder_id": None, + "trashedAt": "2021-12-06T10:13:18.100Z", + "trashedBy": 3, } }, ) OPEN_PROJECT = HttpApiCallCapture( + name="OPEN_PROJECT", description="Open newly created project, i.e. project becomes active and dy-services are started", method="POST", path="/v0/projects/18f1938c-567d-11ec-b2f3-02420a000010:open", @@ -139,12 +160,19 @@ def request_desc(self) -> str: }, "state": {"value": "NOT_STARTED"}, }, + "workspace_id": None, + "type": "STANDARD", + "templateType": None, + "folder_id": None, + "trashedAt": None, + "trashedBy": None, } }, ) REPLACE_PROJECT = HttpApiCallCapture( + name="REPLACE_PROJECT", description="Saving periodically the project after modification (autosave)", method="PUT", path="/v0/projects/18f1938c-567d-11ec-b2f3-02420a000010", @@ -210,6 +238,8 @@ def request_desc(self) -> str: "creationDate": "2021-12-06T10:13:03.100Z", "lastChangeDate": "2021-12-06T10:13:07.347Z", "workbench": {}, + "type": "STANDARD", + "templateType": None, "accessRights": {"2": {"read": True, "write": True, "delete": True}}, "dev": {}, "classifiers": [], @@ -265,12 +295,17 @@ def request_desc(self) -> str: }, "state": {"value": "NOT_STARTED"}, }, + "workspace_id": None, + "folder_id": None, + "trashedAt": None, + "trashedBy": None, } }, ) REPLACE_PROJECT_ON_MODIFIED = HttpApiCallCapture( + name="REPLACE_PROJECT_ON_MODIFIED", description="After the user adds an iterator 1:3 and two sleepers, the project is saved", method="PUT", path="/v0/projects/18f1938c-567d-11ec-b2f3-02420a000010", @@ -454,6 +489,12 @@ def request_desc(self) -> str: }, "accessRights": {"2": {"read": True, "write": True, "delete": True}}, "dev": {}, + "workspace_id": None, + "type": "STANDARD", + "templateType": None, + "folder_id": None, + "trashedAt": None, + "trashedBy": None, "classifiers": [], "ui": { "mode": "workbench", @@ -523,6 +564,7 @@ def request_desc(self) -> str: RUN_PROJECT = HttpApiCallCapture( + name="RUN_PROJECT", description="User press run button", method="POST", path="/computations/18f1938c-567d-11ec-b2f3-02420a000010:start", @@ -538,6 +580,7 @@ def request_desc(self) -> str: CLOSE_PROJECT = HttpApiCallCapture( + name="CLOSE_PROJECT", description="Back to the dashboard, project closes", method="POST", path="/v0/projects/18f1938c-567d-11ec-b2f3-02420a000010:close", @@ -549,6 +592,7 @@ def request_desc(self) -> str: LIST_PROJECTS = HttpApiCallCapture( + name="LIST_PROJECTS", description="Open browser in ashboard and user gets all projects", method="POST", path="/v0/projects?type=user&offset=0&limit=10", @@ -649,6 +693,12 @@ def request_desc(self) -> str: }, "classifiers": [], "dev": {}, + "workspace_id": None, + "type": "STANDARD", + "templateType": None, + "folder_id": None, + "trashedAt": None, + "trashed_by": None, "quality": { "enabled": True, "tsr_target": { @@ -705,6 +755,298 @@ def request_desc(self) -> str: ) +CREATE_FROM_TEMPLATE = HttpApiCallCapture( + name="CREATE_FROM_TEMPLATE", + description="Click 'Sleeper study' card in Templates tab", + method="POST", + path="/v0/projects", + query="from_study=ee87ff60-4147-4381-bcb8-59d076dbc788", + request_payload={ + "uuid": "", + "name": "Sleepers", + "description": "5 sleepers interconnected", + "prjOwner": "", + "accessRights": {}, + "creationDate": "2023-04-13T10:12:13.197Z", + "lastChangeDate": "2023-04-13T10:12:13.197Z", + "thumbnail": "https://raw.githubusercontent.com/ITISFoundation/osparc-assets/main/assets/TheSoftWatches.jpg", + "workbench": {}, + }, + response_body={ + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13", + "task_name": "POST /v0/projects?from_study=ee87ff60-4147-4381-bcb8-59d076dbc788", + "status_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13", + "result_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13/result", + "abort_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13", + } + }, + status_code=HTTPStatus.ACCEPTED, # 202 +) + + +CREATE_FROM_TEMPLATE__TASK_STATUS = HttpApiCallCapture( + name="CREATE_FROM_TEMPLATE__TASK_STATUS", + description="status_href that follows from CREATE_FROM_TEMPLATE", + method="GET", + path="/v0/tasks/POST%20%2Fv0%2Fprojects%3Ffrom_study%3Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13", + response_body={ + "data": { + "task_progress": {"message": "creating new study...", "percent": 0.0}, + "done": False, + "started": "2023-04-13T10:16:45.602233", + } + }, + status_code=HTTPStatus.OK, # 200 +) + + +CREATE_FROM_TEMPLATE__TASK_RESULT = HttpApiCallCapture( + name="CREATE_FROM_TEMPLATE__TASK_RESULT", + description="status_href that follows from CREATE_FROM_TEMPLATE", + method="GET", + path="/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Dee87ff60-4147-4381-bcb8-59d076dbc788.261e4470-4132-47a3-82d1-7c38bed30e13/result", + response_body={ + "data": { + "uuid": "4c58409a-d9e4-11ed-9c9e-02420a0b755a", + "name": "Sleepers", + "description": "5 sleepers interconnected", + "thumbnail": "https://raw.githubusercontent.com/ITISFoundation/osparc-assets/main/assets/TheSoftWatches.jpg", + "creationDate": "2023-04-13T10:16:47.521Z", + "lastChangeDate": "2023-04-13T10:16:48.572Z", + "accessRights": {"4": {"read": True, "write": True, "delete": True}}, + "workbench": { + "f67a6277-b47f-5a17-9782-b9a92600e8c9": { + "key": "simcore/services/comp/itis/sleeper", + "version": "1.0.0", + "label": "sleeper 0", + "inputs": {"in_2": 2}, + "inputAccess": {"in_1": "Invisible", "in_2": "ReadOnly"}, + "inputNodes": [], + "outputNode": False, + "outputs": {}, + "progress": 0, + "thumbnail": "", + "position": {"x": 50, "y": 300}, + "state": { + "modified": True, + "dependencies": [], + "currentStatus": "NOT_STARTED", + }, + }, + "c898ccef-8ac9-5346-8e8b-99546c551d79": { + "key": "simcore/services/comp/itis/sleeper", + "version": "1.0.0", + "label": "sleeper 1", + "inputs": { + "in_1": { + "nodeUuid": "f67a6277-b47f-5a17-9782-b9a92600e8c9", + "output": "out_1", + }, + "in_2": 2, + }, + "inputNodes": ["f67a6277-b47f-5a17-9782-b9a92600e8c9"], + "outputNode": False, + "outputs": {}, + "progress": 0, + "thumbnail": "", + "position": {"x": 300, "y": 200}, + "state": { + "modified": True, + "dependencies": ["f67a6277-b47f-5a17-9782-b9a92600e8c9"], + "currentStatus": "NOT_STARTED", + }, + }, + "52a6c113-0615-55cd-b32f-5a8ead710562": { + "key": "simcore/services/comp/itis/sleeper", + "version": "1.0.0", + "label": "sleeper 2", + "inputs": { + "in_1": { + "nodeUuid": "c898ccef-8ac9-5346-8e8b-99546c551d79", + "output": "out_1", + }, + "in_2": { + "nodeUuid": "c898ccef-8ac9-5346-8e8b-99546c551d79", + "output": "out_2", + }, + }, + "inputNodes": ["c898ccef-8ac9-5346-8e8b-99546c551d79"], + "outputNode": False, + "outputs": {}, + "progress": 0, + "thumbnail": "", + "position": {"x": 550, "y": 200}, + "state": { + "modified": True, + "dependencies": ["c898ccef-8ac9-5346-8e8b-99546c551d79"], + "currentStatus": "NOT_STARTED", + }, + }, + "1a93a810-749f-58c4-9506-0be716268427": { + "key": "simcore/services/comp/itis/sleeper", + "version": "1.0.0", + "label": "sleeper 3", + "inputs": { + "in_2": { + "nodeUuid": "f67a6277-b47f-5a17-9782-b9a92600e8c9", + "output": "out_2", + } + }, + "inputNodes": ["f67a6277-b47f-5a17-9782-b9a92600e8c9"], + "outputNode": False, + "outputs": {}, + "progress": 0, + "thumbnail": "", + "position": {"x": 420, "y": 400}, + "state": { + "modified": True, + "dependencies": ["f67a6277-b47f-5a17-9782-b9a92600e8c9"], + "currentStatus": "NOT_STARTED", + }, + }, + "281f7845-f7ee-57a7-9b66-81931a30b254": { + "key": "simcore/services/comp/itis/sleeper", + "version": "1.0.0", + "label": "sleeper 4", + "inputs": { + "in_1": { + "nodeUuid": "52a6c113-0615-55cd-b32f-5a8ead710562", + "output": "out_1", + }, + "in_2": { + "nodeUuid": "1a93a810-749f-58c4-9506-0be716268427", + "output": "out_2", + }, + }, + "inputNodes": [ + "52a6c113-0615-55cd-b32f-5a8ead710562", + "1a93a810-749f-58c4-9506-0be716268427", + ], + "outputNode": False, + "outputs": {}, + "progress": 0, + "thumbnail": "", + "position": {"x": 800, "y": 300}, + "state": { + "modified": True, + "dependencies": [ + "1a93a810-749f-58c4-9506-0be716268427", + "52a6c113-0615-55cd-b32f-5a8ead710562", + ], + "currentStatus": "NOT_STARTED", + }, + }, + }, + "ui": { + "mode": "workbench", + "slideshow": {}, + "workbench": {}, + "currentNodeId": "", + }, + "classifiers": [], + "dev": {}, + "workspace_id": None, + "type": "STANDARD", + "templateType": None, + "folder_id": None, + "trashedAt": None, + "trashedBy": None, + "quality": { + "enabled": True, + "tsr_target": { + "r01": {"level": 4, "references": ""}, + "r02": {"level": 4, "references": ""}, + "r03": {"level": 4, "references": ""}, + "r04": {"level": 4, "references": ""}, + "r05": {"level": 4, "references": ""}, + "r06": {"level": 4, "references": ""}, + "r07": {"level": 4, "references": ""}, + "r08": {"level": 4, "references": ""}, + "r09": {"level": 4, "references": ""}, + "r10": {"level": 4, "references": ""}, + }, + "annotations": { + "vandv": "", + "limitations": "", + "certificationLink": "", + "certificationStatus": "Uncertified", + }, + "tsr_current": { + "r01": {"level": 0, "references": ""}, + "r02": {"level": 0, "references": ""}, + "r03": {"level": 0, "references": ""}, + "r04": {"level": 0, "references": ""}, + "r05": {"level": 0, "references": ""}, + "r06": {"level": 0, "references": ""}, + "r07": {"level": 0, "references": ""}, + "r08": {"level": 0, "references": ""}, + "r09": {"level": 0, "references": ""}, + "r10": {"level": 0, "references": ""}, + }, + }, + "prjOwner": "user@company.com", + "tags": [22], + "state": { + "locked": {"value": False, "status": "CLOSED"}, + "state": {"value": "NOT_STARTED"}, + }, + } + }, + status_code=HTTPStatus.CREATED, # 201 +) + +DELETE_PROJECT = HttpApiCallCapture( + name="DELETE_PROJECT", + description="Deletes a given study", + method="DELETE", + path="/v0/projects/4c58409a-d9e4-11ed-9c9e-02420a0b755a", + status_code=HTTPStatus.NO_CONTENT, # 204 +) + + +CREATE_FROM_SERVICE = HttpApiCallCapture( + name="CREATE_FROM_SERVICE", + description="Click 'Sleeper service' card in Services tab", + method="POST", + path="/v0/projects", + request_payload={ + "uuid": "", + "name": "sleeper", + "description": "", + "prjOwner": "", + "accessRights": {}, + "creationDate": "2023-04-12T17:47:22.551Z", + "lastChangeDate": "2023-04-12T17:47:22.551Z", + "thumbnail": "https://raw.githubusercontent.com/ITISFoundation/osparc-assets/main/assets/TheSoftWatches.jpg", + "workbench": { + "5ecf6ef9-7600-4ac2-abe5-c3a2cc714e32": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.1.4", + "label": "sleeper", + } + }, + "ui": { + "workbench": { + "5ecf6ef9-7600-4ac2-abe5-c3a2cc714e32": { + "position": {"x": 250, "y": 100} + } + } + }, + }, + response_body={ + "data": { + "task_id": "POST%20%2Fv0%2Fprojects.c81eb383-d5b7-4284-be34-36477530ac2e", + "task_name": "POST /v0/projects", + "status_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects.c81eb383-d5b7-4284-be34-36477530ac2e", + "result_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects.c81eb383-d5b7-4284-be34-36477530ac2e/result", + "abort_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects.c81eb383-d5b7-4284-be34-36477530ac2e", + } + }, + status_code=HTTPStatus.ACCEPTED, # 202 +) + + @pytest.fixture def project_workflow_captures() -> tuple[HttpApiCallCapture, ...]: return tuple(deepcopy(c) for c in SESSION_WORKFLOW) diff --git a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_service.py b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_service.py index 636de9e6852..b885b62232f 100644 --- a/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/simcore_webserver_service.py @@ -6,15 +6,18 @@ import aiohttp import pytest import tenacity -from servicelib.minio_utils import MinioRetryPolicyUponInitialization +from servicelib.minio_utils import ServiceRetryPolicyUponInitialization from yarl import URL -from .helpers.utils_docker import get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.typing_env import EnvVarsDict @pytest.fixture(scope="module") -def webserver_endpoint(docker_stack: dict, testing_environ_vars: dict) -> URL: - prefix = testing_environ_vars["SWARM_STACK_NAME"] +def webserver_endpoint( + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict +) -> URL: + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] assert f"{prefix}_webserver" in docker_stack["services"] endpoint = f"127.0.0.1:{get_service_published_port('webserver', 8080)}" @@ -29,7 +32,7 @@ async def webserver_service(webserver_endpoint: URL, docker_stack: dict) -> URL: # TODO: this can be used by ANY of the simcore services! -@tenacity.retry(**MinioRetryPolicyUponInitialization().kwargs) +@tenacity.retry(**ServiceRetryPolicyUponInitialization().kwargs) async def wait_till_webserver_responsive(webserver_endpoint: URL): async with aiohttp.ClientSession() as session: async with session.get(webserver_endpoint.with_path("/v0/")) as resp: diff --git a/packages/pytest-simcore/src/pytest_simcore/socketio.py b/packages/pytest-simcore/src/pytest_simcore/socketio.py new file mode 100644 index 00000000000..fd1f21c24a8 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/socketio.py @@ -0,0 +1,132 @@ +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +from collections.abc import AsyncIterable, AsyncIterator, Callable +from contextlib import _AsyncGeneratorContextManager, asynccontextmanager +from unittest.mock import AsyncMock + +import pytest +import socketio +from aiohttp import web +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.users import UserID +from pytest_mock import MockerFixture +from servicelib.socketio_utils import cleanup_socketio_async_pubsub_manager +from settings_library.rabbit import RabbitSettings +from socketio import AsyncAioPikaManager, AsyncServer +from yarl import URL + + +@pytest.fixture +async def socketio_server_factory() -> ( + Callable[[RabbitSettings], _AsyncGeneratorContextManager[AsyncServer]] +): + @asynccontextmanager + async def _(rabbit_settings: RabbitSettings) -> AsyncIterator[AsyncServer]: + # Same configuration as simcore_service_webserver/socketio/server.py + server_manager = AsyncAioPikaManager(url=rabbit_settings.dsn) + + server = AsyncServer( + async_mode="aiohttp", engineio_logger=True, client_manager=server_manager + ) + + yield server + + await cleanup_socketio_async_pubsub_manager(server_manager) + + return _ + + +@pytest.fixture +async def socketio_server() -> AsyncIterable[AsyncServer]: + msg = "must be implemented in test" + raise NotImplementedError(msg) + + +@pytest.fixture +async def web_server( + socketio_server: AsyncServer, unused_tcp_port_factory: Callable[[], int] +) -> AsyncIterator[URL]: + """ + this emulates the webserver setup: socketio server with + an aiopika manager that attaches an aiohttp web app + """ + aiohttp_app = web.Application() + socketio_server.attach(aiohttp_app) + + server_port = unused_tcp_port_factory() + + runner = web.AppRunner(aiohttp_app) + await runner.setup() + + site = web.TCPSite(runner, "localhost", server_port) + await site.start() + + yield URL(f"http://localhost:{server_port}") + + await site.stop() + await runner.cleanup() + + +@pytest.fixture +async def server_url(web_server: URL) -> str: + return f'{web_server.with_path("/")}' + + +@pytest.fixture +def socketio_client_factory( + server_url: str, +) -> Callable[[], _AsyncGeneratorContextManager[socketio.AsyncClient]]: + @asynccontextmanager + async def _() -> AsyncIterator[socketio.AsyncClient]: + """This emulates a socketio client in the front-end""" + client = socketio.AsyncClient(logger=True, engineio_logger=True) + await client.connect(f"{server_url}", transports=["websocket"]) + + yield client + + await client.disconnect() + + return _ + + +@pytest.fixture +def room_name() -> SocketIORoomStr: + msg = "must be implemented in test" + raise NotImplementedError(msg) + + +@pytest.fixture +def socketio_server_events( + socketio_server: AsyncServer, + mocker: MockerFixture, + user_id: UserID, + room_name: SocketIORoomStr, +) -> dict[str, AsyncMock]: + # handlers + async def connect(sid: str, environ): + print("connecting", sid) + await socketio_server.enter_room(sid, room_name) + + async def on_check(sid, data): + print("check", sid, data) + + async def disconnect(sid: str): + print("disconnecting", sid) + await socketio_server.leave_room(sid, room_name) + + # spies + spy_connect = mocker.AsyncMock(wraps=connect) + socketio_server.on("connect", spy_connect) + + spy_on_check = mocker.AsyncMock(wraps=on_check) + socketio_server.on("check", spy_on_check) + + spy_disconnect = mocker.AsyncMock(wraps=disconnect) + socketio_server.on("disconnect", spy_disconnect) + + return { + connect.__name__: spy_connect, + disconnect.__name__: spy_disconnect, + on_check.__name__: spy_on_check, + } diff --git a/packages/pytest-simcore/src/pytest_simcore/socketio_client.py b/packages/pytest-simcore/src/pytest_simcore/socketio_client.py new file mode 100644 index 00000000000..23b9ee0b190 --- /dev/null +++ b/packages/pytest-simcore/src/pytest_simcore/socketio_client.py @@ -0,0 +1,103 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import logging +from collections.abc import AsyncIterable, Awaitable, Callable +from uuid import uuid4 + +import pytest +import socketio +from aiohttp.test_utils import TestClient +from pytest_simcore.helpers.assert_checks import assert_status +from servicelib.aiohttp import status +from yarl import URL + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def client_session_id_factory() -> Callable[[], str]: + def _create() -> str: + return str(uuid4()) + + return _create + + +@pytest.fixture +def socketio_url_factory(client: TestClient) -> Callable[[TestClient | None], str]: + def _create(client_override: TestClient | None = None) -> str: + SOCKET_IO_PATH = "/socket.io/" + return str((client_override or client).make_url(SOCKET_IO_PATH)) + + return _create + + +@pytest.fixture +async def security_cookie_factory( + client: TestClient, +) -> Callable[[TestClient | None], Awaitable[str]]: + async def _create(client_override: TestClient | None = None) -> str: + # get the cookie by calling the root entrypoint + resp = await (client_override or client).get("/v0/") + data, error = await assert_status(resp, status.HTTP_200_OK) + assert data + assert not error + + return ( + resp.request_info.headers["Cookie"] + if "Cookie" in resp.request_info.headers + else "" + ) + + return _create + + +@pytest.fixture +async def socketio_client_factory( + socketio_url_factory: Callable, + security_cookie_factory: Callable, + client_session_id_factory: Callable, +) -> AsyncIterable[ + Callable[[str | None, TestClient | None], Awaitable[socketio.AsyncClient]] +]: + clients: list[socketio.AsyncClient] = [] + + async def _connect( + client_session_id: str | None = None, client: TestClient | None = None + ) -> socketio.AsyncClient: + if client_session_id is None: + client_session_id = client_session_id_factory() + + sio = socketio.AsyncClient(ssl_verify=False) + # enginio 3.10.0 introduced ssl verification + assert client_session_id + url = str( + URL(socketio_url_factory(client)).with_query( + {"client_session_id": client_session_id} + ) + ) + headers = {} + cookie = await security_cookie_factory(client) + if cookie: + # WARNING: engineio fails with empty cookies. Expects "key=value" + headers.update({"Cookie": cookie}) + + print(f"--> Connecting socketio client to {url} ...") + await sio.connect(url, headers=headers, wait_timeout=10) + assert sio.sid + print("... connection done") + clients.append(sio) + return sio + + yield _connect + + # cleans up clients produce by _connect(*) calls + for sio in clients: + if sio.connected: + print(f"<--Disconnecting socketio client {sio}") + await sio.disconnect() + await sio.wait() + print(f"... disconnection from {sio} done.") + assert not sio.connected + assert not sio.sid diff --git a/packages/pytest-simcore/src/pytest_simcore/tmp_path_extra.py b/packages/pytest-simcore/src/pytest_simcore/tmp_path_extra.py deleted file mode 100644 index 6ab98fccd6a..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/tmp_path_extra.py +++ /dev/null @@ -1,24 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -""" - Extends temp_path fixture - https://docs.pytest.org/en/6.2.x/tmpdir.html#the-tmp-path-fixture - - NOTE: use tmp_path instead of tmpdir - NOTE: default base temporary directory can be set as `pytest --basetemp=mydir` - -""" -from pathlib import Path - -import pytest -from pytest import FixtureRequest, TempPathFactory - - -@pytest.fixture(scope="module") -def temp_folder(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path: - """Module scoped temporary folder""" - prefix = __name__.replace(".", "_") - return tmp_path_factory.mktemp( - basename=f"{prefix}_temp_folder_{request.module.__name__}", numbered=True - ) diff --git a/packages/pytest-simcore/src/pytest_simcore/traefik_service.py b/packages/pytest-simcore/src/pytest_simcore/traefik_service.py index b72b26fea9b..881e9173185 100644 --- a/packages/pytest-simcore/src/pytest_simcore/traefik_service.py +++ b/packages/pytest-simcore/src/pytest_simcore/traefik_service.py @@ -6,20 +6,21 @@ import aiohttp import pytest import tenacity -from servicelib.minio_utils import MinioRetryPolicyUponInitialization +from servicelib.minio_utils import ServiceRetryPolicyUponInitialization from yarl import URL -from .helpers.utils_docker import get_service_published_port +from .helpers.docker import get_service_published_port +from .helpers.typing_env import EnvVarsDict @pytest.fixture(scope="module") def traefik_endpoints( - docker_stack: dict, testing_environ_vars: dict + docker_stack: dict, env_vars_for_docker_compose: EnvVarsDict ) -> tuple[URL, URL, URL]: """get the endpoint for the given simcore_service. NOTE: simcore_service defined as a parametrization """ - prefix = testing_environ_vars["SWARM_STACK_NAME"] + prefix = env_vars_for_docker_compose["SWARM_STACK_NAME"] assert f"{prefix}_traefik" in docker_stack["services"] traefik_api_endpoint = f"127.0.0.1:{get_service_published_port('traefik', 8080)}" @@ -32,17 +33,18 @@ def traefik_endpoints( ) -@pytest.fixture(scope="function") +@pytest.fixture() async def traefik_service( - loop, traefik_endpoints: tuple[URL, URL, URL], docker_stack: dict + traefik_endpoints: tuple[URL, URL, URL], + docker_stack: dict, ) -> tuple[URL, URL, URL]: traefik_api_endpoint, webserver_endpoint, apiserver_endpoint = traefik_endpoints await wait_till_traefik_responsive(traefik_api_endpoint) - yield traefik_endpoints + return traefik_endpoints # TODO: this can be used by ANY of the simcore services! -@tenacity.retry(**MinioRetryPolicyUponInitialization().kwargs) +@tenacity.retry(**ServiceRetryPolicyUponInitialization().kwargs) async def wait_till_traefik_responsive(api_endpoint: URL): async with aiohttp.ClientSession() as session: async with session.get(api_endpoint.with_path("/api/http/routers")) as resp: diff --git a/packages/pytest-simcore/src/pytest_simcore/websocket_client.py b/packages/pytest-simcore/src/pytest_simcore/websocket_client.py deleted file mode 100644 index ca787265bac..00000000000 --- a/packages/pytest-simcore/src/pytest_simcore/websocket_client.py +++ /dev/null @@ -1,105 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import logging -from typing import AsyncIterable, Awaitable, Callable, Optional -from uuid import uuid4 - -import pytest -import socketio -from aiohttp import web -from aiohttp.test_utils import TestClient -from pytest_simcore.helpers.utils_assert import assert_status -from yarl import URL - -logger = logging.getLogger(__name__) - - -@pytest.fixture() -def client_session_id_factory() -> Callable[[], str]: - def _create() -> str: - return str(uuid4()) - - return _create - - -@pytest.fixture() -def socketio_url_factory(client) -> Callable[[Optional[TestClient]], str]: - def _create(client_override: Optional[TestClient] = None) -> str: - SOCKET_IO_PATH = "/socket.io/" - return str((client_override or client).make_url(SOCKET_IO_PATH)) - - return _create - - -@pytest.fixture() -async def security_cookie_factory( - client: TestClient, -) -> Callable[[Optional[TestClient]], Awaitable[str]]: - async def _create(client_override: Optional[TestClient] = None) -> str: - # get the cookie by calling the root entrypoint - resp = await (client_override or client).get("/v0/") - data, error = await assert_status(resp, web.HTTPOk) - assert data - assert not error - - cookie = ( - resp.request_info.headers["Cookie"] - if "Cookie" in resp.request_info.headers - else "" - ) - return cookie - - return _create - - -@pytest.fixture() -async def socketio_client_factory( - socketio_url_factory: Callable, - security_cookie_factory: Callable, - client_session_id_factory: Callable, -) -> AsyncIterable[ - Callable[[Optional[str], Optional[TestClient]], Awaitable[socketio.AsyncClient]] -]: - clients: list[socketio.AsyncClient] = [] - - async def _connect( - client_session_id: Optional[str] = None, client: Optional[TestClient] = None - ) -> socketio.AsyncClient: - - if client_session_id is None: - client_session_id = client_session_id_factory() - - sio = socketio.AsyncClient(ssl_verify=False) - # enginio 3.10.0 introduced ssl verification - assert client_session_id - url = str( - URL(socketio_url_factory(client)).with_query( - {"client_session_id": client_session_id} - ) - ) - headers = {} - cookie = await security_cookie_factory(client) - if cookie: - # WARNING: engineio fails with empty cookies. Expects "key=value" - headers.update({"Cookie": cookie}) - - print(f"--> Connecting socketio client to {url} ...") - await sio.connect(url, headers=headers) - assert sio.sid - print("... connection done") - clients.append(sio) - return sio - - yield _connect - - # cleans up clients produce by _connect(*) calls - for sio in clients: - if sio.connected: - print(f"<--Disconnecting socketio client {sio}") - await sio.disconnect() - await sio.wait() - print(f"... disconnection from {sio} done.") - - assert not sio.sid diff --git a/packages/pytest-simcore/tests/conftest.py b/packages/pytest-simcore/tests/conftest.py index 019f9f79578..e69de29bb2d 100644 --- a/packages/pytest-simcore/tests/conftest.py +++ b/packages/pytest-simcore/tests/conftest.py @@ -1,16 +0,0 @@ -# pylint: disable=unused-import -# pylint: disable=broad-exception-raised - -pytest_plugins = "pytester" - - -try: - import pytest_sugar - - raise Exception( - "Cannot run these tests with this module installed: " - "pip uninstall pytest_sugar" - ) -except ImportError: - # GOOD - pass diff --git a/packages/pytest-simcore/tests/test_dev_vendors_compose.py b/packages/pytest-simcore/tests/test_dev_vendors_compose.py new file mode 100644 index 00000000000..2a0d2b17f21 --- /dev/null +++ b/packages/pytest-simcore/tests/test_dev_vendors_compose.py @@ -0,0 +1,40 @@ +import json +from typing import Final + +from settings_library.utils_session import DEFAULT_SESSION_COOKIE_NAME + +pytest_plugins = [ + "pytest_simcore.dev_vendors_compose", + "pytest_simcore.docker_compose", + "pytest_simcore.repository_paths", +] + + +_SERVICE_TO_MIDDLEWARE_MAPPING: Final[dict[str, str]] = { + "manual": "pytest-simcore_manual-auth" +} + + +def test_dev_vendors_docker_compose_auth_enabled( + dev_vendors_docker_compose: dict[str, str] +): + + assert isinstance(dev_vendors_docker_compose["services"], dict) + for service_name, service_spec in dev_vendors_docker_compose["services"].items(): + print( + f"Checking vendor service '{service_name}'\n{json.dumps(service_spec, indent=2)}" + ) + labels = service_spec["deploy"]["labels"] + + # NOTE: when adding a new service it should also be added to the mapping + auth_middleware_name = _SERVICE_TO_MIDDLEWARE_MAPPING[service_name] + + prefix = f"traefik.http.middlewares.{auth_middleware_name}.forwardauth" + + assert labels[f"{prefix}.trustForwardHeader"] == "true" + assert "http://webserver:8080/v0/auth:check" in labels[f"{prefix}.address"] + assert DEFAULT_SESSION_COOKIE_NAME in labels[f"{prefix}.authResponseHeaders"] + assert ( + auth_middleware_name + in labels["traefik.http.routers.pytest-simcore_manual.middlewares"] + ) diff --git a/packages/pytest-simcore/tests/test_docker_compose.py b/packages/pytest-simcore/tests/test_docker_compose.py index 0b46bf23488..36cba7adbb1 100644 --- a/packages/pytest-simcore/tests/test_docker_compose.py +++ b/packages/pytest-simcore/tests/test_docker_compose.py @@ -3,12 +3,11 @@ from textwrap import dedent import pytest -from pytest import FixtureRequest from pytest_simcore.docker_compose import _escape_cpus @pytest.fixture(params=[0.1, 1, 1.0, 100.2313131231]) -def number_of_cpus(request: FixtureRequest) -> float: +def number_of_cpus(request: pytest.FixtureRequest) -> float: return request.param diff --git a/packages/pytest-simcore/tests/test_helpers_utils_dict.py b/packages/pytest-simcore/tests/test_helpers_utils_dict.py deleted file mode 100644 index 524d6a44e30..00000000000 --- a/packages/pytest-simcore/tests/test_helpers_utils_dict.py +++ /dev/null @@ -1,155 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -import json -import sys - -import pytest -from pytest_simcore.helpers.typing_docker import TaskDict -from pytest_simcore.helpers.utils_dict import copy_from_dict, get_from_dict - - -@pytest.fixture -def data(): - return { - "ID": "3ifd79yhz2vpgu1iz43mf9m2d", - "Version": {"Index": 176}, - "CreatedAt": "2021-11-10T17:09:01.892109221Z", - "UpdatedAt": "2021-11-10T17:09:35.291164864Z", - "Labels": {}, - "Spec": { - "ContainerSpec": { - "Image": "local/api-server:production", - "Labels": {"com.docker.stack.namespace": "master-simcore"}, - "Hostname": "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}", - "Env": [ - "API_SERVER_DEV_FEATURES_ENABLED=1", - "BF_API_KEY=none", - "BF_API_SECRET=none", - ], - "Privileges": {"CredentialSpec": None, "SELinuxContext": None}, - "Init": True, - "Isolation": "default", - }, - "Resources": {}, - "Placement": {}, - "Networks": [ - {"Target": "roybucjnp44t561jvgy47dd14", "Aliases": ["api-server"]} - ], - "ForceUpdate": 0, - }, - "ServiceID": "77hyhjm6bqs81xp5g3e4ov7wv", - "Slot": 1, - "NodeID": "iz7unuzyzuxbpr80kzheskbbf", - "Status": { - "Timestamp": "2021-11-10T17:09:35.237847117Z", - "State": "running", - "Message": "started", - "ContainerStatus": { - "ContainerID": "8dadeb42eecbcb58295e0508c27c76d46f5106859af30276abcdcd4e4608f39c", - "PID": 1772378, - "ExitCode": 0, - }, - "PortStatus": {}, - }, - "DesiredState": "running", - "NetworksAttachments": [ - { - "Network": { - "ID": "q6ojghy5phzllv63cmwhorbhy", - "Version": {"Index": 6}, - "CreatedAt": "2021-11-10T17:08:36.840863313Z", - "UpdatedAt": "2021-11-10T17:08:36.846648842Z", - "Spec": { - "Name": "ingress", - "Labels": {}, - "DriverConfiguration": {}, - "Ingress": True, - "IPAMOptions": {"Driver": {}}, - "Scope": "swarm", - }, - "DriverState": { - "Name": "overlay", - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "4096" - }, - }, - "IPAMOptions": { - "Driver": {"Name": "default"}, - "Configs": [{"Subnet": "10.1.1.0/24", "Gateway": "10.1.1.1"}], - }, - }, - "Addresses": ["10.1.1.24/24"], - }, - { - "Network": { - "ID": "roybucjnp44t561jvgy47dd14", - "Version": {"Index": 14}, - "CreatedAt": "2021-11-10T17:08:37.532148857Z", - "UpdatedAt": "2021-11-10T17:08:37.533461228Z", - "Spec": { - "Name": "master-simcore_default", - "Labels": {"com.docker.stack.namespace": "master-simcore"}, - "DriverConfiguration": {"Name": "overlay"}, - "Attachable": True, - "Scope": "swarm", - }, - "DriverState": { - "Name": "overlay", - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "4098" - }, - }, - "IPAMOptions": { - "Driver": {"Name": "default"}, - "Configs": [{"Subnet": "10.0.1.0/24", "Gateway": "10.0.1.1"}], - }, - }, - "Addresses": ["10.1.1.1/24"], - }, - ], - } - - -def test_get_from_dict(data: TaskDict): - - assert get_from_dict(data, "Spec.ContainerSpec.Labels") == { - "com.docker.stack.namespace": "master-simcore" - } - # TODO: see that dotted keys cannot be used here, - assert get_from_dict(data, "Invalid.Invalid.Invalid", default=42) == 42 - - -def test_copy_from_dict(data: TaskDict): - - selected_data = copy_from_dict( - data, - include={ - "ID": ..., - "CreatedAt": ..., - "UpdatedAt": ..., - "Spec": {"ContainerSpec": {"Image"}}, - "Status": {"Timestamp", "State", "ContainerStatus"}, - "DesiredState": ..., - }, - ) - - print(json.dumps(selected_data, indent=2)) - - assert selected_data["ID"] == data["ID"] - assert ( - selected_data["Spec"]["ContainerSpec"]["Image"] - == data["Spec"]["ContainerSpec"]["Image"] - ) - assert selected_data["Status"]["State"] == data["Status"]["State"] - assert "Message" not in selected_data["Status"]["State"] - assert "Message" in data["Status"]["State"] - - -if __name__ == "__main__": - # NOTE: use in vscode "Run and Debug" -> select 'Python: Current File' - sys.exit( - pytest.main(["-vv", "-s", "--pdb", "--log-cli-level=WARNING", sys.argv[0]]) - ) diff --git a/packages/pytest-simcore/tests/test_helpers_utils_envs.py b/packages/pytest-simcore/tests/test_helpers_utils_envs.py index 7117bf99d60..458a72bb063 100644 --- a/packages/pytest-simcore/tests/test_helpers_utils_envs.py +++ b/packages/pytest-simcore/tests/test_helpers_utils_envs.py @@ -1,7 +1,7 @@ from pathlib import Path from textwrap import dedent -from pytest_simcore.helpers.utils_envs import EnvVarsDict, load_dotenv +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, load_dotenv def test_load_envfile(tmp_path: Path): diff --git a/packages/pytest-simcore/tests/test_robust_websocket.py b/packages/pytest-simcore/tests/test_robust_websocket.py new file mode 100644 index 00000000000..60b320f8782 --- /dev/null +++ b/packages/pytest-simcore/tests/test_robust_websocket.py @@ -0,0 +1,154 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=protected-access + + +import json +import logging +from threading import Thread + +import pytest +import socketio +import uvicorn +from fastapi import FastAPI +from playwright.sync_api import Page +from playwright.sync_api import WebSocket as PlaywrightWebSocket +from playwright.sync_api import sync_playwright +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.playwright import RobustWebSocket + +# FastAPI application setup +app = FastAPI() +sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*") +sio_app = socketio.ASGIApp(sio) +app.mount("/", sio_app) + + +@sio.event +async def connect(sid, environ): + print(f"Server: Client connected: {sid}") + + +@sio.event +async def disconnect(sid): + print(f"Server: Client disconnected: {sid}") + + +@sio.event +async def message(sid, data): + print(f"Server: Message received: {data}") + await sio.send(f"Echo: {data}") + + +@pytest.fixture(scope="module") +def fastapi_server(): + """Starts a FastAPI server in a separate thread.""" + server_thread = Thread( + target=uvicorn.run, + kwargs={ + "app": app, + "host": "127.0.0.1", + "port": 8000, + "log_level": "info", + }, + daemon=True, + ) + server_thread.start() + yield "http://127.0.0.1:8000" + # No explicit shutdown needed as the thread is daemonized + + +@pytest.fixture +def real_page() -> Page: + with sync_playwright() as playwright: + browser = playwright.chromium.launch(headless=True) + context = browser.new_context() + page = context.new_page() + yield page + browser.close() + + +def test_robust_websocket_with_socketio(real_page: Page, fastapi_server: str): + # Connect to the FastAPI server + server_url = f"{fastapi_server}" + real_page.goto(f"{fastapi_server}") # Simulate visiting the server + + # Load the socket.io client library in the browser context + real_page.evaluate( + """ + const script = document.createElement('script'); + script.src = "https://cdn.socket.io/4.5.4/socket.io.min.js"; + script.onload = () => console.log("Socket.IO client library loaded"); + document.head.appendChild(script); + """ + ) + + # Wait for the socket.io library to be available + real_page.wait_for_function("() => window.io !== undefined") + + # Establish a WebSocket connection using socket.io + with real_page.expect_websocket() as ws_info: + real_page.evaluate( + f""" + window.ws = io("{server_url}", {{ transports: ["websocket"] }}); + window.ws.on("connect", () => console.log("Connected to server")); + window.ws.on("message", (data) => console.log("Message received:", data)); + """ + ) # Open WebSocket in the browser + websocket: PlaywrightWebSocket = ws_info.value + + # Create a RobustWebSocket instance using the Playwright WebSocket + robust_ws = RobustWebSocket(page=real_page, ws=websocket) + + # Test sending and receiving messages + real_page.evaluate("window.ws.send('Hello')") # Send a message via WebSocket + with robust_ws.expect_event( + "framereceived", timeout=5000 + ) as frame_received_event: + raw_response = frame_received_event.value + # Decode the socket.io message format + assert raw_response.startswith("42"), "Invalid socket.io message format" + decoded_message = json.loads(raw_response[2:]) # Remove "42" prefix + assert decoded_message[0] == "message" + response = decoded_message[1] + assert response == "Echo: Hello" + + # Simulate a network issue by disabling and re-enabling the network + with log_context(logging.INFO, msg="Simulating network issue") as ctx: + ctx.logger.info("First network issue") + real_page.context.set_offline(True) # Disable network + real_page.wait_for_timeout( + 12000 + ) # Wait for 2 seconds to simulate network downtime + real_page.context.set_offline(False) # Re-enable network + real_page.wait_for_timeout( + 12000 + ) # Wait for 2 seconds to simulate network downtime + + ctx.logger.info("Second network issue") + real_page.context.set_offline(True) # Disable network + real_page.wait_for_timeout( + 2000 + ) # Wait for 2 seconds to simulate network downtime + real_page.context.set_offline(False) # Re-enable network + real_page.wait_for_timeout( + 2000 + ) # Wait for 2 seconds to simulate network downtime + + # Test sending and receiving messages after automatic reconnection + real_page.evaluate("window.ws.send('Reconnected')") # Send a message + with robust_ws.expect_event( + "framereceived", timeout=5000 + ) as frame_received_event: + raw_response = frame_received_event.value + # Decode the socket.io message format + assert raw_response.startswith("42"), "Invalid socket.io message format" + decoded_message = json.loads(raw_response[2:]) # Remove "42" prefix + assert decoded_message[0] == "message" + response = decoded_message[1] + assert response == "Echo: Reconnected" + + assert ( + robust_ws._num_reconnections == 2 + ), "Expected 2 restarts due to network issues" diff --git a/packages/pytest-simcore/uv.lock b/packages/pytest-simcore/uv.lock new file mode 100644 index 00000000000..57c794b678f --- /dev/null +++ b/packages/pytest-simcore/uv.lock @@ -0,0 +1,728 @@ +version = 1 +revision = 1 +requires-python = ">=3.11" + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 }, +] + +[[package]] +name = "anyio" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916 }, +] + +[[package]] +name = "bidict" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764 }, +] + +[[package]] +name = "certifi" +version = "2025.1.31" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521 }, +] + +[[package]] +name = "fastapi" +version = "0.115.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/55/ae499352d82338331ca1e28c7f4a63bfd09479b16395dce38cf50a39e2c2/fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681", size = 295236 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/b3/b51f09c2ba432a576fe63758bddc81f78f0c6309d9e5c10d194313bf021e/fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d", size = 95164 }, +] + +[package.optional-dependencies] +standard = [ + { name = "email-validator" }, + { name = "fastapi-cli", extra = ["standard"] }, + { name = "httpx" }, + { name = "jinja2" }, + { name = "python-multipart" }, + { name = "uvicorn", extra = ["standard"] }, +] + +[[package]] +name = "fastapi-cli" +version = "0.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "rich-toolkit" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fe/73/82a5831fbbf8ed75905bacf5b2d9d3dfd6f04d6968b29fe6f72a5ae9ceb1/fastapi_cli-0.0.7.tar.gz", hash = "sha256:02b3b65956f526412515907a0793c9094abd4bfb5457b389f645b0ea6ba3605e", size = 16753 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/e6/5daefc851b514ce2287d8f5d358ae4341089185f78f3217a69d0ce3a390c/fastapi_cli-0.0.7-py3-none-any.whl", hash = "sha256:d549368ff584b2804336c61f192d86ddea080c11255f375959627911944804f4", size = 10705 }, +] + +[package.optional-dependencies] +standard = [ + { name = "uvicorn", extra = ["standard"] }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httptools" +version = "0.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/9a/ce5e1f7e131522e6d3426e8e7a490b3a01f39a6696602e1c4f33f9e94277/httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c", size = 240639 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/26/bb526d4d14c2774fe07113ca1db7255737ffbb119315839af2065abfdac3/httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069", size = 199029 }, + { url = "https://files.pythonhosted.org/packages/a6/17/3e0d3e9b901c732987a45f4f94d4e2c62b89a041d93db89eafb262afd8d5/httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a", size = 103492 }, + { url = "https://files.pythonhosted.org/packages/b7/24/0fe235d7b69c42423c7698d086d4db96475f9b50b6ad26a718ef27a0bce6/httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975", size = 462891 }, + { url = "https://files.pythonhosted.org/packages/b1/2f/205d1f2a190b72da6ffb5f41a3736c26d6fa7871101212b15e9b5cd8f61d/httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636", size = 459788 }, + { url = "https://files.pythonhosted.org/packages/6e/4c/d09ce0eff09057a206a74575ae8f1e1e2f0364d20e2442224f9e6612c8b9/httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721", size = 433214 }, + { url = "https://files.pythonhosted.org/packages/3e/d2/84c9e23edbccc4a4c6f96a1b8d99dfd2350289e94f00e9ccc7aadde26fb5/httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988", size = 434120 }, + { url = "https://files.pythonhosted.org/packages/d0/46/4d8e7ba9581416de1c425b8264e2cadd201eb709ec1584c381f3e98f51c1/httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17", size = 88565 }, + { url = "https://files.pythonhosted.org/packages/bb/0e/d0b71465c66b9185f90a091ab36389a7352985fe857e352801c39d6127c8/httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2", size = 200683 }, + { url = "https://files.pythonhosted.org/packages/e2/b8/412a9bb28d0a8988de3296e01efa0bd62068b33856cdda47fe1b5e890954/httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44", size = 104337 }, + { url = "https://files.pythonhosted.org/packages/9b/01/6fb20be3196ffdc8eeec4e653bc2a275eca7f36634c86302242c4fbb2760/httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1", size = 508796 }, + { url = "https://files.pythonhosted.org/packages/f7/d8/b644c44acc1368938317d76ac991c9bba1166311880bcc0ac297cb9d6bd7/httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2", size = 510837 }, + { url = "https://files.pythonhosted.org/packages/52/d8/254d16a31d543073a0e57f1c329ca7378d8924e7e292eda72d0064987486/httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81", size = 485289 }, + { url = "https://files.pythonhosted.org/packages/5f/3c/4aee161b4b7a971660b8be71a92c24d6c64372c1ab3ae7f366b3680df20f/httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f", size = 489779 }, + { url = "https://files.pythonhosted.org/packages/12/b7/5cae71a8868e555f3f67a50ee7f673ce36eac970f029c0c5e9d584352961/httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970", size = 88634 }, + { url = "https://files.pythonhosted.org/packages/94/a3/9fe9ad23fd35f7de6b91eeb60848986058bd8b5a5c1e256f5860a160cc3e/httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660", size = 197214 }, + { url = "https://files.pythonhosted.org/packages/ea/d9/82d5e68bab783b632023f2fa31db20bebb4e89dfc4d2293945fd68484ee4/httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083", size = 102431 }, + { url = "https://files.pythonhosted.org/packages/96/c1/cb499655cbdbfb57b577734fde02f6fa0bbc3fe9fb4d87b742b512908dff/httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3", size = 473121 }, + { url = "https://files.pythonhosted.org/packages/af/71/ee32fd358f8a3bb199b03261f10921716990808a675d8160b5383487a317/httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071", size = 473805 }, + { url = "https://files.pythonhosted.org/packages/8a/0a/0d4df132bfca1507114198b766f1737d57580c9ad1cf93c1ff673e3387be/httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5", size = 448858 }, + { url = "https://files.pythonhosted.org/packages/1e/6a/787004fdef2cabea27bad1073bf6a33f2437b4dbd3b6fb4a9d71172b1c7c/httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0", size = 452042 }, + { url = "https://files.pythonhosted.org/packages/4d/dc/7decab5c404d1d2cdc1bb330b1bf70e83d6af0396fd4fc76fc60c0d522bf/httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8", size = 87682 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "pydantic" +version = "2.11.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/2e/ca897f093ee6c5f3b0bee123ee4465c50e75431c3d5b6a3b44a47134e891/pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3", size = 785513 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/1d/407b29780a289868ed696d1616f4aad49d6388e5a77f567dcd2629dcd7b8/pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f", size = 443591 }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/19/ed6a078a5287aea7922de6841ef4c06157931622c89c2a47940837b5eecd/pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df", size = 434395 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/7f/c6298830cb780c46b4f46bb24298d01019ffa4d21769f39b908cd14bbd50/pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24", size = 2044224 }, + { url = "https://files.pythonhosted.org/packages/a8/65/6ab3a536776cad5343f625245bd38165d6663256ad43f3a200e5936afd6c/pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30", size = 1858845 }, + { url = "https://files.pythonhosted.org/packages/e9/15/9a22fd26ba5ee8c669d4b8c9c244238e940cd5d818649603ca81d1c69861/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595", size = 1910029 }, + { url = "https://files.pythonhosted.org/packages/d5/33/8cb1a62818974045086f55f604044bf35b9342900318f9a2a029a1bec460/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e", size = 1997784 }, + { url = "https://files.pythonhosted.org/packages/c0/ca/49958e4df7715c71773e1ea5be1c74544923d10319173264e6db122543f9/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a", size = 2141075 }, + { url = "https://files.pythonhosted.org/packages/7b/a6/0b3a167a9773c79ba834b959b4e18c3ae9216b8319bd8422792abc8a41b1/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505", size = 2745849 }, + { url = "https://files.pythonhosted.org/packages/0b/60/516484135173aa9e5861d7a0663dce82e4746d2e7f803627d8c25dfa5578/pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f", size = 2005794 }, + { url = "https://files.pythonhosted.org/packages/86/70/05b1eb77459ad47de00cf78ee003016da0cedf8b9170260488d7c21e9181/pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77", size = 2123237 }, + { url = "https://files.pythonhosted.org/packages/c7/57/12667a1409c04ae7dc95d3b43158948eb0368e9c790be8b095cb60611459/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961", size = 2086351 }, + { url = "https://files.pythonhosted.org/packages/57/61/cc6d1d1c1664b58fdd6ecc64c84366c34ec9b606aeb66cafab6f4088974c/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1", size = 2258914 }, + { url = "https://files.pythonhosted.org/packages/d1/0a/edb137176a1f5419b2ddee8bde6a0a548cfa3c74f657f63e56232df8de88/pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c", size = 2257385 }, + { url = "https://files.pythonhosted.org/packages/26/3c/48ca982d50e4b0e1d9954919c887bdc1c2b462801bf408613ccc641b3daa/pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896", size = 1923765 }, + { url = "https://files.pythonhosted.org/packages/33/cd/7ab70b99e5e21559f5de38a0928ea84e6f23fdef2b0d16a6feaf942b003c/pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83", size = 1950688 }, + { url = "https://files.pythonhosted.org/packages/4b/ae/db1fc237b82e2cacd379f63e3335748ab88b5adde98bf7544a1b1bd10a84/pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89", size = 1908185 }, + { url = "https://files.pythonhosted.org/packages/c8/ce/3cb22b07c29938f97ff5f5bb27521f95e2ebec399b882392deb68d6c440e/pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8", size = 2026640 }, + { url = "https://files.pythonhosted.org/packages/19/78/f381d643b12378fee782a72126ec5d793081ef03791c28a0fd542a5bee64/pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498", size = 1852649 }, + { url = "https://files.pythonhosted.org/packages/9d/2b/98a37b80b15aac9eb2c6cfc6dbd35e5058a352891c5cce3a8472d77665a6/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939", size = 1892472 }, + { url = "https://files.pythonhosted.org/packages/4e/d4/3c59514e0f55a161004792b9ff3039da52448f43f5834f905abef9db6e4a/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d", size = 1977509 }, + { url = "https://files.pythonhosted.org/packages/a9/b6/c2c7946ef70576f79a25db59a576bce088bdc5952d1b93c9789b091df716/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e", size = 2128702 }, + { url = "https://files.pythonhosted.org/packages/88/fe/65a880f81e3f2a974312b61f82a03d85528f89a010ce21ad92f109d94deb/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3", size = 2679428 }, + { url = "https://files.pythonhosted.org/packages/6f/ff/4459e4146afd0462fb483bb98aa2436d69c484737feaceba1341615fb0ac/pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d", size = 2008753 }, + { url = "https://files.pythonhosted.org/packages/7c/76/1c42e384e8d78452ededac8b583fe2550c84abfef83a0552e0e7478ccbc3/pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b", size = 2114849 }, + { url = "https://files.pythonhosted.org/packages/00/72/7d0cf05095c15f7ffe0eb78914b166d591c0eed72f294da68378da205101/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39", size = 2069541 }, + { url = "https://files.pythonhosted.org/packages/b3/69/94a514066bb7d8be499aa764926937409d2389c09be0b5107a970286ef81/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a", size = 2239225 }, + { url = "https://files.pythonhosted.org/packages/84/b0/e390071eadb44b41f4f54c3cef64d8bf5f9612c92686c9299eaa09e267e2/pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db", size = 2248373 }, + { url = "https://files.pythonhosted.org/packages/d6/b2/288b3579ffc07e92af66e2f1a11be3b056fe1214aab314748461f21a31c3/pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda", size = 1907034 }, + { url = "https://files.pythonhosted.org/packages/02/28/58442ad1c22b5b6742b992ba9518420235adced665513868f99a1c2638a5/pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4", size = 1956848 }, + { url = "https://files.pythonhosted.org/packages/a1/eb/f54809b51c7e2a1d9f439f158b8dd94359321abcc98767e16fc48ae5a77e/pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea", size = 1903986 }, + { url = "https://files.pythonhosted.org/packages/7a/24/eed3466a4308d79155f1cdd5c7432c80ddcc4530ba8623b79d5ced021641/pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a", size = 2033551 }, + { url = "https://files.pythonhosted.org/packages/ab/14/df54b1a0bc9b6ded9b758b73139d2c11b4e8eb43e8ab9c5847c0a2913ada/pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266", size = 1852785 }, + { url = "https://files.pythonhosted.org/packages/fa/96/e275f15ff3d34bb04b0125d9bc8848bf69f25d784d92a63676112451bfb9/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3", size = 1897758 }, + { url = "https://files.pythonhosted.org/packages/b7/d8/96bc536e975b69e3a924b507d2a19aedbf50b24e08c80fb00e35f9baaed8/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a", size = 1986109 }, + { url = "https://files.pythonhosted.org/packages/90/72/ab58e43ce7e900b88cb571ed057b2fcd0e95b708a2e0bed475b10130393e/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516", size = 2129159 }, + { url = "https://files.pythonhosted.org/packages/dc/3f/52d85781406886c6870ac995ec0ba7ccc028b530b0798c9080531b409fdb/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764", size = 2680222 }, + { url = "https://files.pythonhosted.org/packages/f4/56/6e2ef42f363a0eec0fd92f74a91e0ac48cd2e49b695aac1509ad81eee86a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d", size = 2006980 }, + { url = "https://files.pythonhosted.org/packages/4c/c0/604536c4379cc78359f9ee0aa319f4aedf6b652ec2854953f5a14fc38c5a/pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4", size = 2120840 }, + { url = "https://files.pythonhosted.org/packages/1f/46/9eb764814f508f0edfb291a0f75d10854d78113fa13900ce13729aaec3ae/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde", size = 2072518 }, + { url = "https://files.pythonhosted.org/packages/42/e3/fb6b2a732b82d1666fa6bf53e3627867ea3131c5f39f98ce92141e3e3dc1/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e", size = 2248025 }, + { url = "https://files.pythonhosted.org/packages/5c/9d/fbe8fe9d1aa4dac88723f10a921bc7418bd3378a567cb5e21193a3c48b43/pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd", size = 2254991 }, + { url = "https://files.pythonhosted.org/packages/aa/99/07e2237b8a66438d9b26482332cda99a9acccb58d284af7bc7c946a42fd3/pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f", size = 1915262 }, + { url = "https://files.pythonhosted.org/packages/8a/f4/e457a7849beeed1e5defbcf5051c6f7b3c91a0624dd31543a64fc9adcf52/pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40", size = 1956626 }, + { url = "https://files.pythonhosted.org/packages/20/d0/e8d567a7cff7b04e017ae164d98011f1e1894269fe8e90ea187a3cbfb562/pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523", size = 1909590 }, + { url = "https://files.pythonhosted.org/packages/ef/fd/24ea4302d7a527d672c5be06e17df16aabfb4e9fdc6e0b345c21580f3d2a/pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d", size = 1812963 }, + { url = "https://files.pythonhosted.org/packages/5f/95/4fbc2ecdeb5c1c53f1175a32d870250194eb2fdf6291b795ab08c8646d5d/pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c", size = 1986896 }, + { url = "https://files.pythonhosted.org/packages/71/ae/fe31e7f4a62431222d8f65a3bd02e3fa7e6026d154a00818e6d30520ea77/pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18", size = 1931810 }, + { url = "https://files.pythonhosted.org/packages/0b/76/1794e440c1801ed35415238d2c728f26cd12695df9057154ad768b7b991c/pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a", size = 2042858 }, + { url = "https://files.pythonhosted.org/packages/73/b4/9cd7b081fb0b1b4f8150507cd59d27b275c3e22ad60b35cb19ea0977d9b9/pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc", size = 1873745 }, + { url = "https://files.pythonhosted.org/packages/e1/d7/9ddb7575d4321e40d0363903c2576c8c0c3280ebea137777e5ab58d723e3/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b", size = 1904188 }, + { url = "https://files.pythonhosted.org/packages/d1/a8/3194ccfe461bb08da19377ebec8cb4f13c9bd82e13baebc53c5c7c39a029/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe", size = 2083479 }, + { url = "https://files.pythonhosted.org/packages/42/c7/84cb569555d7179ca0b3f838cef08f66f7089b54432f5b8599aac6e9533e/pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5", size = 2118415 }, + { url = "https://files.pythonhosted.org/packages/3b/67/72abb8c73e0837716afbb58a59cc9e3ae43d1aa8677f3b4bc72c16142716/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761", size = 2079623 }, + { url = "https://files.pythonhosted.org/packages/0b/cd/c59707e35a47ba4cbbf153c3f7c56420c58653b5801b055dc52cccc8e2dc/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850", size = 2250175 }, + { url = "https://files.pythonhosted.org/packages/84/32/e4325a6676b0bed32d5b084566ec86ed7fd1e9bcbfc49c578b1755bde920/pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544", size = 2254674 }, + { url = "https://files.pythonhosted.org/packages/12/6f/5596dc418f2e292ffc661d21931ab34591952e2843e7168ea5a52591f6ff/pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5", size = 2080951 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest-simcore" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "fastapi", extra = ["standard"] }, + { name = "python-socketio" }, + { name = "uvicorn" }, +] + +[package.metadata] +requires-dist = [ + { name = "fastapi", extras = ["standard"], specifier = ">=0.115.12" }, + { name = "python-socketio", specifier = ">=5.12.1" }, + { name = "uvicorn", specifier = ">=0.34.0" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256 }, +] + +[[package]] +name = "python-engineio" +version = "4.11.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "simple-websocket" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/e0/a9e0fe427ce7f1b7dbf9531fa00ffe4b557c4a7bc8e71891c115af123170/python_engineio-4.11.2.tar.gz", hash = "sha256:145bb0daceb904b4bb2d3eb2d93f7dbb7bb87a6a0c4f20a94cc8654dec977129", size = 91381 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/8f/978a0b913e3f8ad33a9a2fe204d32efe3d1ee34ecb1f2829c1cfbdd92082/python_engineio-4.11.2-py3-none-any.whl", hash = "sha256:f0971ac4c65accc489154fe12efd88f53ca8caf04754c46a66e85f5102ef22ad", size = 59239 }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546 }, +] + +[[package]] +name = "python-socketio" +version = "5.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bidict" }, + { name = "python-engineio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/d0/40ed38076e8aee94785d546d3e3a1cae393da5806a8530be877187e2875f/python_socketio-5.12.1.tar.gz", hash = "sha256:0299ff1f470b676c09c1bfab1dead25405077d227b2c13cf217a34dadc68ba9c", size = 119991 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/a3/c69806f30dd81df5a99d592e7db4c930c3a9b098555aa97b0eb866b20b11/python_socketio-5.12.1-py3-none-any.whl", hash = "sha256:24a0ea7cfff0e021eb28c68edbf7914ee4111bdf030b95e4d250c4dc9af7a386", size = 76947 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "rich" +version = "14.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229 }, +] + +[[package]] +name = "rich-toolkit" +version = "0.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/ea/13945d58d556a28dfb0f774ad5c8af759527390e59505a40d164bf8ce1ce/rich_toolkit-0.14.1.tar.gz", hash = "sha256:9248e2d087bfc01f3e4c5c8987e05f7fa744d00dd22fa2be3aa6e50255790b3f", size = 104416 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/e8/61c5b12d1567fdba41a6775db12a090d88b8305424ee7c47259c70d33cb4/rich_toolkit-0.14.1-py3-none-any.whl", hash = "sha256:dc92c0117d752446d04fdc828dbca5873bcded213a091a5d3742a2beec2e6559", size = 24177 }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 }, +] + +[[package]] +name = "simple-websocket" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wsproto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/d4/bfa032f961103eba93de583b161f0e6a5b63cebb8f2c7d0c6e6efe1e3d2e/simple_websocket-1.1.0.tar.gz", hash = "sha256:7939234e7aa067c534abdab3a9ed933ec9ce4691b0713c78acb195560aa52ae4", size = 17300 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/59/0782e51887ac6b07ffd1570e0364cf901ebc36345fea669969d2084baebb/simple_websocket-1.1.0-py3-none-any.whl", hash = "sha256:4af6069630a38ed6c561010f0e11a5bc0d4ca569b36306eb257cd9a192497c8c", size = 13842 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "starlette" +version = "0.46.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/1b/52b27f2e13ceedc79a908e29eac426a63465a1a01248e5f24aa36a62aeb3/starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230", size = 2580102 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 }, +] + +[[package]] +name = "typer" +version = "0.15.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/6f/3991f0f1c7fcb2df31aef28e0594d8d54b05393a0e4e34c65e475c2a5d41/typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5", size = 100711 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/fc/5b29fea8cee020515ca82cc68e3b8e1e34bb19a3535ad854cac9257b414c/typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc", size = 45061 }, +] + +[[package]] +name = "typing-extensions" +version = "4.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] + +[package.optional-dependencies] +standard = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "httptools" }, + { name = "python-dotenv" }, + { name = "pyyaml" }, + { name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, + { name = "watchfiles" }, + { name = "websockets" }, +] + +[[package]] +name = "uvloop" +version = "0.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/c0/854216d09d33c543f12a44b393c402e89a920b1a0a7dc634c42de91b9cf6/uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3", size = 2492741 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/57/a7/4cf0334105c1160dd6819f3297f8700fda7fc30ab4f61fbf3e725acbc7cc/uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8", size = 1447410 }, + { url = "https://files.pythonhosted.org/packages/8c/7c/1517b0bbc2dbe784b563d6ab54f2ef88c890fdad77232c98ed490aa07132/uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0", size = 805476 }, + { url = "https://files.pythonhosted.org/packages/ee/ea/0bfae1aceb82a503f358d8d2fa126ca9dbdb2ba9c7866974faec1cb5875c/uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e", size = 3960855 }, + { url = "https://files.pythonhosted.org/packages/8a/ca/0864176a649838b838f36d44bf31c451597ab363b60dc9e09c9630619d41/uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb", size = 3973185 }, + { url = "https://files.pythonhosted.org/packages/30/bf/08ad29979a936d63787ba47a540de2132169f140d54aa25bc8c3df3e67f4/uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6", size = 3820256 }, + { url = "https://files.pythonhosted.org/packages/da/e2/5cf6ef37e3daf2f06e651aae5ea108ad30df3cb269102678b61ebf1fdf42/uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d", size = 3937323 }, + { url = "https://files.pythonhosted.org/packages/8c/4c/03f93178830dc7ce8b4cdee1d36770d2f5ebb6f3d37d354e061eefc73545/uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c", size = 1471284 }, + { url = "https://files.pythonhosted.org/packages/43/3e/92c03f4d05e50f09251bd8b2b2b584a2a7f8fe600008bcc4523337abe676/uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2", size = 821349 }, + { url = "https://files.pythonhosted.org/packages/a6/ef/a02ec5da49909dbbfb1fd205a9a1ac4e88ea92dcae885e7c961847cd51e2/uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d", size = 4580089 }, + { url = "https://files.pythonhosted.org/packages/06/a7/b4e6a19925c900be9f98bec0a75e6e8f79bb53bdeb891916609ab3958967/uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc", size = 4693770 }, + { url = "https://files.pythonhosted.org/packages/ce/0c/f07435a18a4b94ce6bd0677d8319cd3de61f3a9eeb1e5f8ab4e8b5edfcb3/uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb", size = 4451321 }, + { url = "https://files.pythonhosted.org/packages/8f/eb/f7032be105877bcf924709c97b1bf3b90255b4ec251f9340cef912559f28/uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f", size = 4659022 }, + { url = "https://files.pythonhosted.org/packages/3f/8d/2cbef610ca21539f0f36e2b34da49302029e7c9f09acef0b1c3b5839412b/uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281", size = 1468123 }, + { url = "https://files.pythonhosted.org/packages/93/0d/b0038d5a469f94ed8f2b2fce2434a18396d8fbfb5da85a0a9781ebbdec14/uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af", size = 819325 }, + { url = "https://files.pythonhosted.org/packages/50/94/0a687f39e78c4c1e02e3272c6b2ccdb4e0085fda3b8352fecd0410ccf915/uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6", size = 4582806 }, + { url = "https://files.pythonhosted.org/packages/d2/19/f5b78616566ea68edd42aacaf645adbf71fbd83fc52281fba555dc27e3f1/uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816", size = 4701068 }, + { url = "https://files.pythonhosted.org/packages/47/57/66f061ee118f413cd22a656de622925097170b9380b30091b78ea0c6ea75/uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc", size = 4454428 }, + { url = "https://files.pythonhosted.org/packages/63/9a/0962b05b308494e3202d3f794a6e85abe471fe3cafdbcf95c2e8c713aabd/uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553", size = 4660018 }, +] + +[[package]] +name = "watchfiles" +version = "1.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/03/e2/8ed598c42057de7aa5d97c472254af4906ff0a59a66699d426fc9ef795d7/watchfiles-1.0.5.tar.gz", hash = "sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9", size = 94537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/f4/41b591f59021786ef517e1cdc3b510383551846703e03f204827854a96f8/watchfiles-1.0.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827", size = 405336 }, + { url = "https://files.pythonhosted.org/packages/ae/06/93789c135be4d6d0e4f63e96eea56dc54050b243eacc28439a26482b5235/watchfiles-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4", size = 395977 }, + { url = "https://files.pythonhosted.org/packages/d2/db/1cd89bd83728ca37054512d4d35ab69b5f12b8aa2ac9be3b0276b3bf06cc/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d", size = 455232 }, + { url = "https://files.pythonhosted.org/packages/40/90/d8a4d44ffe960517e487c9c04f77b06b8abf05eb680bed71c82b5f2cad62/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63", size = 459151 }, + { url = "https://files.pythonhosted.org/packages/6c/da/267a1546f26465dead1719caaba3ce660657f83c9d9c052ba98fb8856e13/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418", size = 489054 }, + { url = "https://files.pythonhosted.org/packages/b1/31/33850dfd5c6efb6f27d2465cc4c6b27c5a6f5ed53c6fa63b7263cf5f60f6/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9", size = 523955 }, + { url = "https://files.pythonhosted.org/packages/09/84/b7d7b67856efb183a421f1416b44ca975cb2ea6c4544827955dfb01f7dc2/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6", size = 502234 }, + { url = "https://files.pythonhosted.org/packages/71/87/6dc5ec6882a2254cfdd8b0718b684504e737273903b65d7338efaba08b52/watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25", size = 454750 }, + { url = "https://files.pythonhosted.org/packages/3d/6c/3786c50213451a0ad15170d091570d4a6554976cf0df19878002fc96075a/watchfiles-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5", size = 631591 }, + { url = "https://files.pythonhosted.org/packages/1b/b3/1427425ade4e359a0deacce01a47a26024b2ccdb53098f9d64d497f6684c/watchfiles-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01", size = 625370 }, + { url = "https://files.pythonhosted.org/packages/15/ba/f60e053b0b5b8145d682672024aa91370a29c5c921a88977eb565de34086/watchfiles-1.0.5-cp311-cp311-win32.whl", hash = "sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246", size = 277791 }, + { url = "https://files.pythonhosted.org/packages/50/ed/7603c4e164225c12c0d4e8700b64bb00e01a6c4eeea372292a3856be33a4/watchfiles-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096", size = 291622 }, + { url = "https://files.pythonhosted.org/packages/a2/c2/99bb7c96b4450e36877fde33690ded286ff555b5a5c1d925855d556968a1/watchfiles-1.0.5-cp311-cp311-win_arm64.whl", hash = "sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed", size = 283699 }, + { url = "https://files.pythonhosted.org/packages/2a/8c/4f0b9bdb75a1bfbd9c78fad7d8854369283f74fe7cf03eb16be77054536d/watchfiles-1.0.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2", size = 401511 }, + { url = "https://files.pythonhosted.org/packages/dc/4e/7e15825def77f8bd359b6d3f379f0c9dac4eb09dd4ddd58fd7d14127179c/watchfiles-1.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f", size = 392715 }, + { url = "https://files.pythonhosted.org/packages/58/65/b72fb817518728e08de5840d5d38571466c1b4a3f724d190cec909ee6f3f/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec", size = 454138 }, + { url = "https://files.pythonhosted.org/packages/3e/a4/86833fd2ea2e50ae28989f5950b5c3f91022d67092bfec08f8300d8b347b/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21", size = 458592 }, + { url = "https://files.pythonhosted.org/packages/38/7e/42cb8df8be9a37e50dd3a818816501cf7a20d635d76d6bd65aae3dbbff68/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512", size = 487532 }, + { url = "https://files.pythonhosted.org/packages/fc/fd/13d26721c85d7f3df6169d8b495fcac8ab0dc8f0945ebea8845de4681dab/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d", size = 522865 }, + { url = "https://files.pythonhosted.org/packages/a1/0d/7f9ae243c04e96c5455d111e21b09087d0eeaf9a1369e13a01c7d3d82478/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6", size = 499887 }, + { url = "https://files.pythonhosted.org/packages/8e/0f/a257766998e26aca4b3acf2ae97dff04b57071e991a510857d3799247c67/watchfiles-1.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234", size = 454498 }, + { url = "https://files.pythonhosted.org/packages/81/79/8bf142575a03e0af9c3d5f8bcae911ee6683ae93a625d349d4ecf4c8f7df/watchfiles-1.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2", size = 630663 }, + { url = "https://files.pythonhosted.org/packages/f1/80/abe2e79f610e45c63a70d271caea90c49bbf93eb00fa947fa9b803a1d51f/watchfiles-1.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663", size = 625410 }, + { url = "https://files.pythonhosted.org/packages/91/6f/bc7fbecb84a41a9069c2c6eb6319f7f7df113adf113e358c57fc1aff7ff5/watchfiles-1.0.5-cp312-cp312-win32.whl", hash = "sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249", size = 277965 }, + { url = "https://files.pythonhosted.org/packages/99/a5/bf1c297ea6649ec59e935ab311f63d8af5faa8f0b86993e3282b984263e3/watchfiles-1.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705", size = 291693 }, + { url = "https://files.pythonhosted.org/packages/7f/7b/fd01087cc21db5c47e5beae507b87965db341cce8a86f9eb12bf5219d4e0/watchfiles-1.0.5-cp312-cp312-win_arm64.whl", hash = "sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417", size = 283287 }, + { url = "https://files.pythonhosted.org/packages/c7/62/435766874b704f39b2fecd8395a29042db2b5ec4005bd34523415e9bd2e0/watchfiles-1.0.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d", size = 401531 }, + { url = "https://files.pythonhosted.org/packages/6e/a6/e52a02c05411b9cb02823e6797ef9bbba0bfaf1bb627da1634d44d8af833/watchfiles-1.0.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763", size = 392417 }, + { url = "https://files.pythonhosted.org/packages/3f/53/c4af6819770455932144e0109d4854437769672d7ad897e76e8e1673435d/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40", size = 453423 }, + { url = "https://files.pythonhosted.org/packages/cb/d1/8e88df58bbbf819b8bc5cfbacd3c79e01b40261cad0fc84d1e1ebd778a07/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563", size = 458185 }, + { url = "https://files.pythonhosted.org/packages/ff/70/fffaa11962dd5429e47e478a18736d4e42bec42404f5ee3b92ef1b87ad60/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04", size = 486696 }, + { url = "https://files.pythonhosted.org/packages/39/db/723c0328e8b3692d53eb273797d9a08be6ffb1d16f1c0ba2bdbdc2a3852c/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f", size = 522327 }, + { url = "https://files.pythonhosted.org/packages/cd/05/9fccc43c50c39a76b68343484b9da7b12d42d0859c37c61aec018c967a32/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a", size = 499741 }, + { url = "https://files.pythonhosted.org/packages/23/14/499e90c37fa518976782b10a18b18db9f55ea73ca14641615056f8194bb3/watchfiles-1.0.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827", size = 453995 }, + { url = "https://files.pythonhosted.org/packages/61/d9/f75d6840059320df5adecd2c687fbc18960a7f97b55c300d20f207d48aef/watchfiles-1.0.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a", size = 629693 }, + { url = "https://files.pythonhosted.org/packages/fc/17/180ca383f5061b61406477218c55d66ec118e6c0c51f02d8142895fcf0a9/watchfiles-1.0.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936", size = 624677 }, + { url = "https://files.pythonhosted.org/packages/bf/15/714d6ef307f803f236d69ee9d421763707899d6298d9f3183e55e366d9af/watchfiles-1.0.5-cp313-cp313-win32.whl", hash = "sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc", size = 277804 }, + { url = "https://files.pythonhosted.org/packages/a8/b4/c57b99518fadf431f3ef47a610839e46e5f8abf9814f969859d1c65c02c7/watchfiles-1.0.5-cp313-cp313-win_amd64.whl", hash = "sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11", size = 291087 }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423 }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082 }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330 }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878 }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883 }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252 }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521 }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958 }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918 }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388 }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828 }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437 }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096 }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332 }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152 }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096 }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523 }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790 }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165 }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160 }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395 }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841 }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440 }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098 }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329 }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111 }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054 }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496 }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829 }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217 }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195 }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393 }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837 }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 }, +] + +[[package]] +name = "wsproto" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/4a/44d3c295350d776427904d73c189e10aeae66d7f555bb2feee16d1e4ba5a/wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", size = 53425 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/58/e860788190eba3bcce367f74d29c4675466ce8dddfba85f7827588416f01/wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736", size = 24226 }, +] diff --git a/packages/service-integration/Dockerfile b/packages/service-integration/Dockerfile index 2eebf66a5b2..0f2a220b4cb 100644 --- a/packages/service-integration/Dockerfile +++ b/packages/service-integration/Dockerfile @@ -1,13 +1,30 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base LABEL maintainer=pcrespov -RUN set -eux \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ && apt-get update \ - && apt-get install -y \ + && apt-get install -y --no-install-recommends \ git \ - && rm -rf /var/lib/apt/lists/* \ + && apt-get clean -y \ # verify that the binary works && git --version @@ -28,52 +45,62 @@ RUN adduser \ # Sets utf-8 encoding for Python et al ENV LANG=C.UTF-8 + # Turns off writing .pyc files; superfluous on an ephemeral container. ENV PYTHONDONTWRITEBYTECODE=1 \ VIRTUAL_ENV=/home/scu/.venv +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + # Ensures that the python and pip executables used # in the image will be those from our virtualenv. ENV PATH="${VIRTUAL_ENV}/bin:$PATH" - # -------------------------- Build stage ------------------- -FROM base as build +FROM base AS build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip install --no-cache-dir --upgrade \ - pip~=23.0 \ - wheel \ - setuptools -WORKDIR /build -COPY --chown=scu:scu packages/models-library packages/models-library -COPY --chown=scu:scu packages/service-integration packages/service-integration +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + pip~=24.0 \ + wheel \ + setuptools +WORKDIR /build/packages/service-integration -# WARNING: keep synced with `make install-prod` (did not use it directly because if would require copying scripts/common.Makefile and other parts of the repo) -RUN cd packages/service-integration \ - && pip install --no-cache-dir -r requirements/_base.txt \ - && pip install --no-cache-dir ../models-library/ \ - && pip install --no-cache-dir . +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=packages/service-integration,target=/build/packages/service-integration,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install \ + --requirement requirements/prod.txt \ + && uv pip list # -------------------------- Build stage ------------------- -FROM base as development +FROM base AS development # NOTE: this is necessary to allow to build development images but is the same as production here -FROM base as production +FROM base AS production + +WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu USER scu diff --git a/packages/service-integration/Makefile b/packages/service-integration/Makefile index ad12089fa46..669e356bd15 100644 --- a/packages/service-integration/Makefile +++ b/packages/service-integration/Makefile @@ -13,7 +13,7 @@ requirements: ## compiles pip requirements (.in -> .txt) .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests tests-ci @@ -39,6 +39,7 @@ tests-ci: ## runs unit tests [ci-mode] --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=service_integration \ --durations=10 \ --log-date-format="%Y-%m-%d %H:%M:%S" \ diff --git a/packages/service-integration/README.md b/packages/service-integration/README.md index b04e6b97d8d..b92b68b31c1 100644 --- a/packages/service-integration/README.md +++ b/packages/service-integration/README.md @@ -5,3 +5,11 @@ This is the o2sparc's service integration library or ``ooil`` in short SEE how is it used in Makefiles in https://github.com/ITISFoundation/cookiecutter-osparc-service + + +#### What is the .osparc folder and its content? +'osparc config' is a set of stardard file forms (yaml) that the user fills provides in order to describe how her service works and integrates with osparc. It may contain: + - config files are stored under '.osparc/' folder in the root repo folder (analogous to other configs like .github, .vscode, etc) + - configs are parsed and validated into pydantic models + - models can be serialized/deserialized into label annotations on images. This way, the config is attached to the service during it's entire lifetime. + - config should provide enough information about that context to allow building an image and running a container on a single command call. diff --git a/packages/service-integration/VERSION b/packages/service-integration/VERSION index 21e8796a09d..ee90284c27f 100644 --- a/packages/service-integration/VERSION +++ b/packages/service-integration/VERSION @@ -1 +1 @@ -1.0.3 +1.0.4 diff --git a/packages/service-integration/requirements/_base.in b/packages/service-integration/requirements/_base.in index a8955a4a9b5..213a27f4c13 100644 --- a/packages/service-integration/requirements/_base.in +++ b/packages/service-integration/requirements/_base.in @@ -3,11 +3,15 @@ # --constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in click +cookiecutter docker # pytest-plugin +jinja2_time jsonschema # pytest-plugin pytest # pytest-plugin pyyaml typer[all] +yarl diff --git a/packages/service-integration/requirements/_base.txt b/packages/service-integration/requirements/_base.txt index d564c4227f5..332dcc97001 100644 --- a/packages/service-integration/requirements/_base.txt +++ b/packages/service-integration/requirements/_base.txt @@ -1,75 +1,172 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -attrs==22.2.0 +annotated-types==0.7.0 + # via pydantic +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # cookiecutter + # jinja2-time +attrs==25.1.0 # via # jsonschema - # pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.0.1 + # referencing +binaryornot==0.4.4 + # via cookiecutter +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +chardet==5.2.0 + # via binaryornot +charset-normalizer==3.4.1 # via requests -click==8.1.3 +click==8.1.8 # via # -r requirements/_base.in + # cookiecutter # typer -colorama==0.4.6 - # via typer -commonmark==0.9.1 - # via rich -dnspython==2.3.0 +cookiecutter==2.6.0 + # via -r requirements/_base.in +dnspython==2.7.0 # via email-validator -docker==6.0.1 +docker==7.1.0 # via -r requirements/_base.in -email-validator==1.3.1 +email-validator==2.2.0 # via pydantic -exceptiongroup==1.1.0 - # via pytest -idna==3.4 +idna==3.10 # via # email-validator # requests + # yarl iniconfig==2.0.0 # via pytest -jsonschema==4.17.3 +jinja2==3.1.5 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # cookiecutter + # jinja2-time +jinja2-time==0.2.0 + # via -r requirements/_base.in +jsonschema==4.23.0 # via # -r requirements/../../../packages/models-library/requirements/_base.in # -r requirements/_base.in -packaging==23.0 +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via yarl +orjson==3.10.15 # via - # docker - # pytest -pluggy==1.0.0 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in +packaging==24.2 # via pytest -pydantic==1.10.2 - # via -r requirements/../../../packages/models-library/requirements/_base.in -pygments==2.14.0 +pluggy==1.5.0 + # via pytest +propcache==0.3.0 + # via yarl +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in +pygments==2.19.1 # via rich -pyrsistent==0.19.3 - # via jsonschema -pytest==7.2.1 - # via -r requirements/_base.in -pyyaml==6.0 +pytest==8.3.5 # via -r requirements/_base.in -requests==2.28.2 - # via docker -rich==12.6.0 - # via typer -shellingham==1.5.0.post1 +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via pydantic-settings +python-slugify==8.0.4 + # via cookiecutter +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in + # cookiecutter +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via + # cookiecutter + # docker +rich==13.9.4 + # via + # cookiecutter + # typer +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 # via typer -tomli==2.0.1 - # via pytest -typer==0.7.0 +six==1.17.0 + # via python-dateutil +text-unidecode==1.3 + # via python-slugify +typer==0.15.2 # via -r requirements/_base.in -typing-extensions==4.5.0 - # via pydantic -urllib3==1.26.14 +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via + # pydantic + # pydantic-core + # pydantic-extra-types + # typer +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # docker # requests -websocket-client==1.5.1 - # via docker +yarl==1.18.3 + # via -r requirements/_base.in diff --git a/packages/service-integration/requirements/_test.in b/packages/service-integration/requirements/_test.in index dc121aa8787..be3ff31cc6a 100644 --- a/packages/service-integration/requirements/_test.in +++ b/packages/service-integration/requirements/_test.in @@ -8,10 +8,13 @@ # --constraint _base.txt + coverage -coveralls pytest pytest-cov pytest-instafail pytest-runner pytest-sugar +types-docker +types-jsonschema +types-PyYAML diff --git a/packages/service-integration/requirements/_test.txt b/packages/service-integration/requirements/_test.txt index 433b5c95cbf..19f48613efa 100644 --- a/packages/service-integration/requirements/_test.txt +++ b/packages/service-integration/requirements/_test.txt @@ -1,77 +1,61 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -attrs==22.2.0 +attrs==25.1.0 # via # -c requirements/_base.txt - # pytest -certifi==2022.12.7 - # via - # -c requirements/_base.txt - # requests -charset-normalizer==3.0.1 - # via - # -c requirements/_base.txt - # requests -coverage==6.5.0 + # referencing +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via - # -c requirements/_base.txt - # pytest -idna==3.4 - # via - # -c requirements/_base.txt - # requests iniconfig==2.0.0 # via # -c requirements/_base.txt # pytest -packaging==23.0 +packaging==24.2 # via # -c requirements/_base.txt # pytest # pytest-sugar -pluggy==1.0.0 +pluggy==1.5.0 # via # -c requirements/_base.txt # pytest -pytest==7.2.1 +pytest==8.3.5 # via + # -c requirements/_base.txt # -r requirements/_test.in # pytest-cov # pytest-instafail # pytest-sugar -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -requests==2.28.2 +referencing==0.35.1 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # coveralls -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 + # types-jsonschema +rpds-py==0.23.1 # via # -c requirements/_base.txt - # coverage - # pytest -urllib3==1.26.14 + # referencing +termcolor==2.5.0 + # via pytest-sugar +types-docker==7.1.0.20241229 + # via -r requirements/_test.in +types-jsonschema==4.23.0.20241208 + # via -r requirements/_test.in +types-pyyaml==6.0.12.20241230 + # via -r requirements/_test.in +types-requests==2.32.0.20250301 + # via types-docker +urllib3==2.3.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # requests + # types-docker + # types-requests diff --git a/packages/service-integration/requirements/_tools.txt b/packages/service-integration/requirements/_tools.txt index de18c0d826c..3b167383938 100644 --- a/packages/service-integration/requirements/_tools.txt +++ b/packages/service-integration/requirements/_tools.txt @@ -1,90 +1,81 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==6.0 +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # pre-commit -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/service-integration/requirements/ci.txt b/packages/service-integration/requirements/ci.txt index a15da8728f7..daa95fb5ef9 100644 --- a/packages/service-integration/requirements/ci.txt +++ b/packages/service-integration/requirements/ci.txt @@ -9,9 +9,11 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt -../models-library/ -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +pytest-simcore @ ../pytest-simcore # current module -. +simcore-service-integration @ . diff --git a/packages/service-integration/requirements/dev.txt b/packages/service-integration/requirements/dev.txt index 9e2af0f7124..bbe3d832532 100644 --- a/packages/service-integration/requirements/dev.txt +++ b/packages/service-integration/requirements/dev.txt @@ -11,6 +11,7 @@ --requirement _test.txt --requirement _tools.txt +--editable ../common-library/ --editable ../models-library/ --editable ../pytest-simcore/ diff --git a/packages/service-integration/requirements/prod.txt b/packages/service-integration/requirements/prod.txt index 3009f906b50..8cebc4d6898 100644 --- a/packages/service-integration/requirements/prod.txt +++ b/packages/service-integration/requirements/prod.txt @@ -9,7 +9,8 @@ # installs base + tests requirements --requirement _base.txt -../models-library/ +simcore-common-library @ ../common-library/ +simcore-models-library @ ../models-library # current module -. +simcore-service-integration @ . diff --git a/packages/service-integration/scripts/ooil.bash b/packages/service-integration/scripts/ooil.bash index 7e5eb116d17..b4527683ef1 100755 --- a/packages/service-integration/scripts/ooil.bash +++ b/packages/service-integration/scripts/ooil.bash @@ -6,7 +6,7 @@ set -o nounset set -o pipefail IFS=$'\n\t' -IMAGE_NAME="${DOCKER_REGISTRY:-itisfoundation}/service-integration:${OOIL_IMAGE_TAG:-master-github-latest}" +IMAGE_NAME="${DOCKER_REGISTRY:-local}/service-integration:${OOIL_IMAGE_TAG:-production}" WORKDIR="$(pwd)" # @@ -20,6 +20,7 @@ WORKDIR="$(pwd)" run() { docker run \ --rm \ + --tty \ --volume="/etc/group:/etc/group:ro" \ --volume="/etc/passwd:/etc/passwd:ro" \ --user="$(id --user "$USER")":"$(id --group "$USER")" \ diff --git a/packages/service-integration/setup.cfg b/packages/service-integration/setup.cfg index af7998eb1a3..77a398943ee 100644 --- a/packages/service-integration/setup.cfg +++ b/packages/service-integration/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.0.3 +current_version = 1.0.4 commit = True message = service-integration version: {current_version} β†’ {new_version} tag = False @@ -12,3 +12,7 @@ universal = 1 [aliases] test = pytest + +[mypy] +plugins = + pydantic.mypy diff --git a/packages/service-integration/setup.py b/packages/service-integration/setup.py index 2039f6af8ef..7ec5569a01f 100644 --- a/packages/service-integration/setup.py +++ b/packages/service-integration/setup.py @@ -43,10 +43,10 @@ def read_reqs(reqs_path: Path) -> set[str]: ) # STRICT requirements -SETUP = dict( - name="simcore-service-integration", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "name": "simcore-service-integration", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": ", ".join( ( "Pedro Crespo-Valero (pcrespov)", "Sylvain Anderegg (sanderegg)", @@ -54,40 +54,43 @@ def read_reqs(reqs_path: Path) -> set[str]: "Andrei Neagu (GitHK)", ) ), - description="Toolkit for service integration", - classifiers=[ + "description": "Toolkit for service integration", + "classifiers": [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Framework :: Pytest", ], - long_description=Path(CURRENT_DIR / "README.md").read_text(), - python_requires=">=3.6", # TODO: should we deal with ~=3.9?? - license="MIT license", - install_requires=INSTALL_REQUIREMENTS, - packages=find_packages(where="src"), - package_dir={"": "src"}, - include_package_data=True, - package_data={ + "long_description": Path(CURRENT_DIR / "README.md").read_text(), + "python_requires": ">=3.10", + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_dir": {"": "src"}, + "include_package_data": True, + "package_data": { "": [ + "py.typed", "service/tests/**/*.py", "service/tests/unit/*.py", ] }, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={}, - zip_safe=False, - entry_points={ + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {}, + "zip_safe": False, + "entry_points": { "console_scripts": [ - "osparc-service-integrator=service_integration.cli:app", "ooil=service_integration.cli:app", + "osparc-service-integrator=service_integration.cli:app", + "simcore-service=service_integration.cli:app", + "simcore-service-integrator=service_integration.cli:app", ], "pytest11": ["simcore_service_integration=service_integration.pytest_plugin"], }, -) +} if __name__ == "__main__": diff --git a/packages/service-integration/src/service_integration/_compose_spec_model_autogenerated.py b/packages/service-integration/src/service_integration/_compose_spec_model_autogenerated.py index d38cd9bef83..a0a5f295402 100644 --- a/packages/service-integration/src/service_integration/_compose_spec_model_autogenerated.py +++ b/packages/service-integration/src/service_integration/_compose_spec_model_autogenerated.py @@ -2,19 +2,16 @@ # filename: https://raw.githubusercontent.com/compose-spec/compose-spec/master/schema/compose-spec.json # timestamp: 2021-11-19T10:40:07+00:00 -# type: ignore - +# type:ignore from enum import Enum -from typing import Any, Optional, Union +from typing import Any, TypeAlias -from pydantic import BaseModel, Extra, Field, conint, constr +from pydantic import BaseModel, ConfigDict, Field, RootModel, StringConstraints +from typing_extensions import Annotated # MODIFICATIONS ------------------------------------------------------------------------- # -SCHEMA_URL = "http://json-schema.org/draft/2019-09/schema#" -SCHEMA_VERSION = SCHEMA_URL.split("/")[-2] - # "$schema": "http://json-schema.org/draft/2019-09/schema#", # # UserWarning: format of 'ports' not understood for 'number' - using default @@ -22,29 +19,29 @@ # UserWarning: format of 'duration' not understood for 'string' - using default # UserWarning: format of 'subnet_ip_address' not understood for 'string' - using default -PortInt = conint(gt=0, lt=65535) +# port number range +PortInt: TypeAlias = Annotated[int, Field(gt=0, lt=65535)] + # ---------------------------------------------------------------------------------------- class Configuration(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - source: Optional[str] = None - target: Optional[str] = None - uid: Optional[str] = None - gid: Optional[str] = None - mode: Optional[float] = None + source: str | None = None + target: str | None = None + uid: str | None = None + gid: str | None = None + mode: float | None = None class CredentialSpec(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - config: Optional[str] = None - file: Optional[str] = None - registry: Optional[str] = None + config: str | None = None + file: str | None = None + registry: str | None = None class Condition(Enum): @@ -54,37 +51,35 @@ class Condition(Enum): class DependsOn(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") condition: Condition class Extend(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") service: str - file: Optional[str] = None + file: str | None = None class Logging(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - driver: Optional[str] = None - options: Optional[dict[constr(regex=r"^.+$"), Optional[Union[str, float]]]] = None + driver: str | None = None + options: dict[ + Annotated[str, StringConstraints(pattern=r"^.+$")], str | float | None + ] | None = None class Port(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - mode: Optional[str] = None - host_ip: Optional[str] = None - target: Optional[int] = None - published: Optional[int] = None - protocol: Optional[str] = None + mode: str | None = None + host_ip: str | None = None + target: int | None = None + published: int | None = None + protocol: str | None = None class PullPolicy(Enum): @@ -96,70 +91,63 @@ class PullPolicy(Enum): class Secret1(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - source: Optional[str] = None - target: Optional[str] = None - uid: Optional[str] = None - gid: Optional[str] = None - mode: Optional[float] = None + source: str | None = None + target: str | None = None + uid: str | None = None + gid: str | None = None + mode: float | None = None class Ulimit(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") hard: int soft: int class Bind(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - propagation: Optional[str] = None - create_host_path: Optional[bool] = None + propagation: str | None = None + create_host_path: bool | None = None class Volume2(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - nocopy: Optional[bool] = None + nocopy: bool | None = None class Tmpfs(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - size: Optional[Union[conint(ge=0), str]] = None + size: Annotated[int, Field(ge=0)] | str | None = None class Volume1(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") type: str - source: Optional[str] = None - target: Optional[str] = None - read_only: Optional[bool] = None - consistency: Optional[str] = None - bind: Optional[Bind] = None - volume: Optional[Volume2] = None - tmpfs: Optional[Tmpfs] = None + source: str | None = None + target: str | None = None + read_only: bool | None = None + consistency: str | None = None + bind: Bind | None = None + volume: Volume2 | None = None + tmpfs: Tmpfs | None = None class Healthcheck(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - disable: Optional[bool] = None - interval: Optional[str] = None - retries: Optional[float] = None - test: Optional[Union[str, list[str]]] = None - timeout: Optional[str] = None - start_period: Optional[str] = None + disable: bool | None = None + interval: str | None = None + retries: float | None = None + test: str | list[str] | None = None + timeout: str | None = None + start_period: str | None = None class Order(Enum): @@ -168,15 +156,14 @@ class Order(Enum): class RollbackConfig(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - parallelism: Optional[int] = None - delay: Optional[str] = None - failure_action: Optional[str] = None - monitor: Optional[str] = None - max_failure_ratio: Optional[float] = None - order: Optional[Order] = None + parallelism: int | None = None + delay: str | None = None + failure_action: str | None = None + monitor: str | None = None + max_failure_ratio: float | None = None + order: Order | None = None class Order1(Enum): @@ -185,371 +172,364 @@ class Order1(Enum): class UpdateConfig(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - parallelism: Optional[int] = None - delay: Optional[str] = None - failure_action: Optional[str] = None - monitor: Optional[str] = None - max_failure_ratio: Optional[float] = None - order: Optional[Order1] = None + parallelism: int | None = None + delay: str | None = None + failure_action: str | None = None + monitor: str | None = None + max_failure_ratio: float | None = None + order: Order1 | None = None class Limits(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - cpus: Optional[Union[float, str]] = None - memory: Optional[str] = None + cpus: float | str | None = None + memory: str | None = None class RestartPolicy(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - condition: Optional[str] = None - delay: Optional[str] = None - max_attempts: Optional[int] = None - window: Optional[str] = None + condition: str | None = None + delay: str | None = None + max_attempts: int | None = None + window: str | None = None class Preference(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - spread: Optional[str] = None + spread: str | None = None class Placement(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - constraints: Optional[list[str]] = None - preferences: Optional[list[Preference]] = None - max_replicas_per_node: Optional[int] = None + constraints: list[str] | None = None + preferences: list[Preference] | None = None + max_replicas_per_node: int | None = None class DiscreteResourceSpec(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - kind: Optional[str] = None - value: Optional[float] = None + kind: str | None = None + value: float | None = None class GenericResource(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - discrete_resource_spec: Optional[DiscreteResourceSpec] = None + discrete_resource_spec: DiscreteResourceSpec | None = None -class GenericResources(BaseModel): - __root__: list[GenericResource] +class GenericResources(RootModel): + root: list[GenericResource] class ConfigItem(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - subnet: Optional[str] = None - ip_range: Optional[str] = None - gateway: Optional[str] = None - aux_addresses: Optional[dict[constr(regex=r"^.+$"), str]] = None + subnet: str | None = None + ip_range: str | None = None + gateway: str | None = None + aux_addresses: dict[ + Annotated[str, StringConstraints(pattern=r"^.+$")], str + ] | None = None class Ipam(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - driver: Optional[str] = None - config: Optional[list[ConfigItem]] = None - options: Optional[dict[constr(regex=r"^.+$"), str]] = None + driver: str | None = None + config: list[ConfigItem] | None = None + options: dict[Annotated[str, StringConstraints(pattern=r"^.+$")], str] | None = None class External(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None + name: str | None = None class External1(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None + name: str | None = None class External2(BaseModel): - name: Optional[str] = None + name: str | None = None class External3(BaseModel): - name: Optional[str] = None + name: str | None = None -class ListOfStrings(BaseModel): - __root__: list[str] +class ListOfStrings(RootModel): + root: list[str] -class ListOrDict(BaseModel): - __root__: Union[ - dict[constr(regex=r".+"), Optional[Union[str, float, bool]]], list[str] - ] +class ListOrDict(RootModel): + root: ( + dict[ + Annotated[str, StringConstraints(pattern=r".+")], str | float | bool | None + ] + | list[str] + ) class BlkioLimit(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - path: Optional[str] = None - rate: Optional[Union[int, str]] = None + path: str | None = None + rate: int | str | None = None class BlkioWeight(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - path: Optional[str] = None - weight: Optional[int] = None + path: str | None = None + weight: int | None = None -class Constraints(BaseModel): - __root__: Any +class Constraints(RootModel): + root: Any = None class BuildItem(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - context: Optional[str] = None - dockerfile: Optional[str] = None - args: Optional[ListOrDict] = None - labels: Optional[ListOrDict] = None - cache_from: Optional[list[str]] = None - network: Optional[str] = None - target: Optional[str] = None - shm_size: Optional[Union[int, str]] = None - extra_hosts: Optional[ListOrDict] = None - isolation: Optional[str] = None + context: str | None = None + dockerfile: str | None = None + args: ListOrDict | None = None + labels: ListOrDict | None = None + cache_from: list[str] | None = None + network: str | None = None + target: str | None = None + shm_size: int | str | None = None + extra_hosts: ListOrDict | None = None + isolation: str | None = None class BlkioConfig(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - device_read_bps: Optional[list[BlkioLimit]] = None - device_read_iops: Optional[list[BlkioLimit]] = None - device_write_bps: Optional[list[BlkioLimit]] = None - device_write_iops: Optional[list[BlkioLimit]] = None - weight: Optional[int] = None - weight_device: Optional[list[BlkioWeight]] = None + device_read_bps: list[BlkioLimit] | None = None + device_read_iops: list[BlkioLimit] | None = None + device_write_bps: list[BlkioLimit] | None = None + device_write_iops: list[BlkioLimit] | None = None + weight: int | None = None + weight_device: list[BlkioWeight] | None = None class Network1(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - aliases: Optional[ListOfStrings] = None - ipv4_address: Optional[str] = None - ipv6_address: Optional[str] = None - link_local_ips: Optional[ListOfStrings] = None - priority: Optional[float] = None + aliases: ListOfStrings | None = None + ipv4_address: str | None = None + ipv6_address: str | None = None + link_local_ips: ListOfStrings | None = None + priority: float | None = None class Device(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - capabilities: Optional[ListOfStrings] = None - count: Optional[Union[str, int]] = None - device_ids: Optional[ListOfStrings] = None - driver: Optional[str] = None - options: Optional[ListOrDict] = None + capabilities: ListOfStrings | None = None + count: str | int | None = None + device_ids: ListOfStrings | None = None + driver: str | None = None + options: ListOrDict | None = None -class Devices(BaseModel): - __root__: list[Device] +class Devices(RootModel): + root: list[Device] class Network(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None - driver: Optional[str] = None - driver_opts: Optional[dict[constr(regex=r"^.+$"), Union[str, float]]] = None - ipam: Optional[Ipam] = None - external: Optional[External] = None - internal: Optional[bool] = None - enable_ipv6: Optional[bool] = None - attachable: Optional[bool] = None - labels: Optional[ListOrDict] = None + name: str | None = None + driver: str | None = None + driver_opts: dict[ + Annotated[str, StringConstraints(pattern=r"^.+$")], str | float + ] | None = None + ipam: Ipam | None = None + external: External | None = None + internal: bool | None = None + enable_ipv6: bool | None = None + attachable: bool | None = None + labels: ListOrDict | None = None class Volume(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None - driver: Optional[str] = None - driver_opts: Optional[dict[constr(regex=r"^.+$"), Union[str, float]]] = None - external: Optional[External1] = None - labels: Optional[ListOrDict] = None + name: str | None = None + driver: str | None = None + driver_opts: dict[ + Annotated[str, StringConstraints(pattern=r"^.+$")], str | float + ] | None = None + external: External1 | None = None + labels: ListOrDict | None = None class Secret(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None - file: Optional[str] = None - external: Optional[External2] = None - labels: Optional[ListOrDict] = None - driver: Optional[str] = None - driver_opts: Optional[dict[constr(regex=r"^.+$"), Union[str, float]]] = None - template_driver: Optional[str] = None + name: str | None = None + file: str | None = None + external: External2 | None = None + labels: ListOrDict | None = None + driver: str | None = None + driver_opts: dict[ + Annotated[str, StringConstraints(pattern=r"^.+$")], str | float + ] | None = None + template_driver: str | None = None class ComposeSpecConfig(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - name: Optional[str] = None - file: Optional[str] = None - external: Optional[External3] = None - labels: Optional[ListOrDict] = None - template_driver: Optional[str] = None + name: str | None = None + file: str | None = None + external: External3 | None = None + labels: ListOrDict | None = None + template_driver: str | None = None -class StringOrList(BaseModel): - __root__: Union[str, ListOfStrings] +class StringOrList(RootModel): + root: str | ListOfStrings class Reservations(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - cpus: Optional[Union[float, str]] = None - memory: Optional[str] = None - generic_resources: Optional[GenericResources] = None - devices: Optional[Devices] = None + cpus: float | str | None = None + memory: str | None = None + generic_resources: GenericResources | None = None + devices: Devices | None = None class Resources(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - limits: Optional[Limits] = None - reservations: Optional[Reservations] = None + limits: Limits | None = None + reservations: Reservations | None = None class Deployment(BaseModel): - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - mode: Optional[str] = None - endpoint_mode: Optional[str] = None - replicas: Optional[int] = None - labels: Optional[ListOrDict] = None - rollback_config: Optional[RollbackConfig] = None - update_config: Optional[UpdateConfig] = None - resources: Optional[Resources] = None - restart_policy: Optional[RestartPolicy] = None - placement: Optional[Placement] = None + mode: str | None = None + endpoint_mode: str | None = None + replicas: int | None = None + labels: ListOrDict | None = None + rollback_config: RollbackConfig | None = None + update_config: UpdateConfig | None = None + resources: Resources | None = None + restart_policy: RestartPolicy | None = None + placement: Placement | None = None class Service(BaseModel): - class Config: - extra = Extra.forbid - - deploy: Optional[Deployment] = None - build: Optional[Union[str, BuildItem]] = None - blkio_config: Optional[BlkioConfig] = None - cap_add: Optional[list[str]] = None - cap_drop: Optional[list[str]] = None - cgroup_parent: Optional[str] = None - command: Optional[Union[str, list[str]]] = None - configs: Optional[list[Union[str, Configuration]]] = None - container_name: Optional[str] = None - cpu_count: Optional[conint(ge=0)] = None - cpu_percent: Optional[conint(ge=0, le=100)] = None - cpu_shares: Optional[Union[float, str]] = None - cpu_quota: Optional[Union[float, str]] = None - cpu_period: Optional[Union[float, str]] = None - cpu_rt_period: Optional[Union[float, str]] = None - cpu_rt_runtime: Optional[Union[float, str]] = None - cpus: Optional[Union[float, str]] = None - cpuset: Optional[str] = None - credential_spec: Optional[CredentialSpec] = None - depends_on: Optional[ - Union[ListOfStrings, dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), DependsOn]] - ] = None - device_cgroup_rules: Optional[ListOfStrings] = None - devices: Optional[list[str]] = None - dns: Optional[StringOrList] = None - dns_opt: Optional[list[str]] = None - dns_search: Optional[StringOrList] = None - domainname: Optional[str] = None - entrypoint: Optional[Union[str, list[str]]] = None - env_file: Optional[StringOrList] = None - environment: Optional[ListOrDict] = None - expose: Optional[list[Union[str, float]]] = None - extends: Optional[Union[str, Extend]] = None - external_links: Optional[list[str]] = None - extra_hosts: Optional[ListOrDict] = None - group_add: Optional[list[Union[str, float]]] = None - healthcheck: Optional[Healthcheck] = None - hostname: Optional[str] = None - image: Optional[str] = None - init: Optional[bool] = None - ipc: Optional[str] = None - isolation: Optional[str] = None - labels: Optional[ListOrDict] = None - links: Optional[list[str]] = None - logging: Optional[Logging] = None - mac_address: Optional[str] = None - mem_limit: Optional[Union[float, str]] = None - mem_reservation: Optional[Union[str, int]] = None - mem_swappiness: Optional[int] = None - memswap_limit: Optional[Union[float, str]] = None - network_mode: Optional[str] = None - networks: Optional[ - Union[ - ListOfStrings, dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), Optional[Network1]] + model_config = ConfigDict(extra="forbid") + + deploy: Deployment | None = None + build: str | BuildItem | None = None + blkio_config: BlkioConfig | None = None + cap_add: list[str] | None = None + cap_drop: list[str] | None = None + cgroup_parent: str | None = None + command: str | list[str] | None = None + configs: list[str | Configuration] | None = None + container_name: str | None = None + cpu_count: Annotated[int, Field(ge=0)] | None = None + cpu_percent: Annotated[int, Field(ge=0, le=100)] | None = None + cpu_shares: float | str | None = None + cpu_quota: float | str | None = None + cpu_period: float | str | None = None + cpu_rt_period: float | str | None = None + cpu_rt_runtime: float | str | None = None + cpus: float | str | None = None + cpuset: str | None = None + credential_spec: CredentialSpec | None = None + depends_on: None | ( + ListOfStrings + | dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], DependsOn + ] + ) = None + device_cgroup_rules: ListOfStrings | None = None + devices: list[str] | None = None + dns: StringOrList | None = None + dns_opt: list[str] | None = None + dns_search: StringOrList | None = None + domainname: str | None = None + entrypoint: str | list[str] | None = None + env_file: StringOrList | None = None + environment: ListOrDict | None = None + expose: list[str | float] | None = None + extends: str | Extend | None = None + external_links: list[str] | None = None + extra_hosts: ListOrDict | None = None + group_add: list[str | float] | None = None + healthcheck: Healthcheck | None = None + hostname: str | None = None + image: str | None = None + init: bool | None = None + ipc: str | None = None + isolation: str | None = None + labels: ListOrDict | None = None + links: list[str] | None = None + logging: Logging | None = None + mac_address: str | None = None + mem_limit: float | str | None = None + mem_reservation: str | int | None = None + mem_swappiness: int | None = None + memswap_limit: float | str | None = None + network_mode: str | None = None + networks: None | ( + ListOfStrings + | dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], + Network1 | None, ] - ] = None - oom_kill_disable: Optional[bool] = None - oom_score_adj: Optional[conint(ge=-1000, le=1000)] = None - pid: Optional[Optional[str]] = None - pids_limit: Optional[Union[float, str]] = None - platform: Optional[str] = None - ports: Optional[list[Union[PortInt, str, Port]]] = None - privileged: Optional[bool] = None - profiles: Optional[ListOfStrings] = None - pull_policy: Optional[PullPolicy] = None - read_only: Optional[bool] = None - restart: Optional[str] = None - runtime: Optional[str] = None - scale: Optional[int] = None - security_opt: Optional[list[str]] = None - shm_size: Optional[Union[float, str]] = None - secrets: Optional[list[Union[str, Secret1]]] = None - sysctls: Optional[ListOrDict] = None - stdin_open: Optional[bool] = None - stop_grace_period: Optional[str] = None - stop_signal: Optional[str] = None - storage_opt: Optional[dict[str, Any]] = None - tmpfs: Optional[StringOrList] = None - tty: Optional[bool] = None - ulimits: Optional[dict[constr(regex=r"^[a-z]+$"), Union[int, Ulimit]]] = None - user: Optional[str] = None - userns_mode: Optional[str] = None - volumes: Optional[list[Union[str, Volume1]]] = None - volumes_from: Optional[list[str]] = None - working_dir: Optional[str] = None + ) = None + oom_kill_disable: bool | None = None + oom_score_adj: Annotated[int, Field(ge=-1000, le=1000)] | None = None + pid: str | None = None + pids_limit: float | str | None = None + platform: str | None = None + ports: list[PortInt | str | Port] | None = None + privileged: bool | None = None + profiles: ListOfStrings | None = None + pull_policy: PullPolicy | None = None + read_only: bool | None = None + restart: str | None = None + runtime: str | None = None + scale: int | None = None + security_opt: list[str] | None = None + shm_size: float | str | None = None + secrets: list[str | Secret1] | None = None + sysctls: ListOrDict | None = None + stdin_open: bool | None = None + stop_grace_period: str | None = None + stop_signal: str | None = None + storage_opt: dict[str, Any] | None = None + tmpfs: StringOrList | None = None + tty: bool | None = None + ulimits: dict[ + Annotated[str, StringConstraints(pattern=r"^[a-z]+$")], int | Ulimit + ] | None = None + user: str | None = None + userns_mode: str | None = None + volumes: list[str | Volume1] | None = None + volumes_from: list[str] | None = None + working_dir: str | None = None class ComposeSpecification(BaseModel): @@ -557,17 +537,27 @@ class ComposeSpecification(BaseModel): The Compose file is a YAML file defining a multi-containers based application. """ - class Config: - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") - version: Optional[str] = Field( + version: str | None = Field( None, description="Version of the Compose specification used. Tools not implementing required version MUST reject the configuration file.", ) - services: Optional[dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), Service]] = None - networks: Optional[dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), Network]] = None - volumes: Optional[dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), Volume]] = None - secrets: Optional[dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), Secret]] = None - configs: Optional[ - dict[constr(regex=r"^[a-zA-Z0-9._-]+$"), ComposeSpecConfig] - ] = None + services: dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], Service + ] | None = None + networks: dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], Network + ] | None = None + volumes: dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], Volume + ] | None = None + secrets: dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], Secret + ] | None = None + configs: None | ( + dict[ + Annotated[str, StringConstraints(pattern=r"^[a-zA-Z0-9._-]+$")], + ComposeSpecConfig, + ] + ) = None diff --git a/packages/service-integration/src/service_integration/_meta.py b/packages/service-integration/src/service_integration/_meta.py index 5bb808b8a43..edd84d14f86 100644 --- a/packages/service-integration/src/service_integration/_meta.py +++ b/packages/service-integration/src/service_integration/_meta.py @@ -1,11 +1,10 @@ -import pkg_resources -from models_library.services import LATEST_INTEGRATION_VERSION +from importlib.metadata import distribution, version -# TODO: replace pkg_resources with https://importlib-metadata.readthedocs.io/en/latest/index.html which is backported from py3.7 and 3.8 +from models_library.services_constants import LATEST_INTEGRATION_VERSION -current_distribution = pkg_resources.get_distribution("simcore-service-integration") -project_name: str = current_distribution.project_name -__version__ = current_distribution.version +current_distribution = distribution("simcore-service-integration") +project_name: str = current_distribution.metadata["Name"] +__version__ = version("simcore-service-integration") INTEGRATION_API_VERSION = "1.0.0" diff --git a/packages/service-integration/src/service_integration/cli.py b/packages/service-integration/src/service_integration/cli.py deleted file mode 100644 index 50dd85eb035..00000000000 --- a/packages/service-integration/src/service_integration/cli.py +++ /dev/null @@ -1,61 +0,0 @@ -# Allows entrypoint via python -m as well - -from typing import Optional - -import rich -import typer - -from ._meta import __version__ -from .commands import compose, config, metadata, run_creator, test -from .settings import AppSettings - -app = typer.Typer() - - -def version_callback(value: bool): - if value: - rich.print(__version__) - raise typer.Exit() - - -@app.callback() -def main( - ctx: typer.Context, - version: Optional[bool] = typer.Option( - None, - "--version", - callback=version_callback, - is_eager=True, - ), - registry_name: Optional[str] = typer.Option( - None, - "--REGISTRY_NAME", - help="image registry name. Full url or prefix used as prefix in an image name", - ), - compose_version: Optional[str] = typer.Option( - None, - "--COMPOSE_VERSION", - help="version used for docker compose specification", - ), -): - """o2s2parc service integration library""" - assert version or not version # nosec - - overrides = {} - if registry_name: - overrides["REGISTRY_NAME"] = registry_name - - if compose_version: - overrides["COMPOSE_VERSION"] = compose_version - - ctx.settings = AppSettings(**overrides) - - -# new -app.command("compose")(compose.main) -app.command("config")(config.main) -app.command("test")(test.main) -# legacy -app.command("bump-version")(metadata.bump_version) -app.command("get-version")(metadata.get_version) -app.command("run-creator")(run_creator.main) diff --git a/packages/service-integration/src/service_integration/cli/__init__.py b/packages/service-integration/src/service_integration/cli/__init__.py new file mode 100644 index 00000000000..a146de5735d --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/__init__.py @@ -0,0 +1,78 @@ +# Allows entrypoint via python -m as well + +from typing import Annotated + +import rich +import typer + +from .._meta import __version__ +from ..settings import AppSettings +from . import _compose_spec, _metadata, _run_creator, _test +from ._config import config_app + +app = typer.Typer() + + +def _version_callback(value: bool) -> None: # noqa: FBT001 + if value: + rich.print(__version__) + raise typer.Exit + + +@app.callback() +def main( + ctx: typer.Context, + registry_name: ( + Annotated[ + str, + typer.Option( + "--REGISTRY_NAME", + help="image registry name. Full url or prefix used as prefix in an image name", + ), + ] + | None + ) = None, + compose_version: ( + Annotated[ + str, + typer.Option( + "--COMPOSE_VERSION", + help="version used for docker compose specification", + ), + ] + | None + ) = None, + version: Annotated[ # noqa: FBT002 + bool, + typer.Option( + "--version", + callback=_version_callback, + is_eager=True, + ), + ] = False, +): + """o2s2parc service Integration Library (OOIL in short)""" + assert isinstance(version, bool | None) # nosec + + overrides = {} + if registry_name: + overrides["REGISTRY_NAME"] = registry_name + + if compose_version: + overrides["COMPOSE_VERSION"] = compose_version + + # save states + ctx.settings = AppSettings.model_validate(overrides) # type: ignore[attr-defined] # pylint:disable=no-member + + +# +# REGISTER commands and/or sub-apps +# + +app.command("compose")(_compose_spec.create_compose) +app.add_typer(config_app, name="config", help="Manage osparc config files") +app.command("test")(_test.run_tests) +# legacy +app.command("bump-version")(_metadata.bump_version) +app.command("get-version")(_metadata.get_version) +app.command("run-creator")(_run_creator.run_creator) diff --git a/packages/service-integration/src/service_integration/cli/_compose_spec.py b/packages/service-integration/src/service_integration/cli/_compose_spec.py new file mode 100644 index 00000000000..448375d25cd --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/_compose_spec.py @@ -0,0 +1,231 @@ +import subprocess +from pathlib import Path +from typing import Annotated + +import arrow +import rich +import typer +import yaml +from models_library.utils.labels_annotations import LabelsAnnotationsDict, to_labels +from rich.console import Console +from yarl import URL + +from ..compose_spec_model import ComposeSpecification +from ..errors import UndefinedOciImageSpecError +from ..oci_image_spec import LS_LABEL_PREFIX, OCI_LABEL_PREFIX +from ..osparc_config import ( + OSPARC_CONFIG_DIRNAME, + DockerComposeOverwriteConfig, + MetadataConfig, + RuntimeConfig, +) +from ..osparc_image_specs import create_image_spec +from ..settings import AppSettings + +error_console = Console(stderr=True) + + +def _run_git(*args) -> str: + """:raises CalledProcessError""" + return subprocess.run( # nosec + ["git", *list(args)], + capture_output=True, + encoding="utf8", + check=True, + ).stdout.strip() + + +def _strip_credentials(url: str) -> str: + if (yarl_url := URL(url)) and yarl_url.is_absolute(): + stripped_url = URL(url).with_user(None).with_password(None) + return f"{stripped_url}" + return url + + +def _run_git_or_empty_string(*args) -> str: + try: + return _run_git(*args) + except FileNotFoundError as err: + error_console.print( + "WARNING: Defaulting label to emtpy string", + "since git is not installed or cannot be executed:", + err, + ) + except subprocess.CalledProcessError as err: + error_console.print( + "WARNING: Defaulting label to emtpy string", + "due to:", + err.stderr, + ) + return "" + + +def create_docker_compose_image_spec( + settings: AppSettings, + *, + meta_config_path: Path, + docker_compose_overwrite_path: Path | None = None, + service_config_path: Path | None = None, +) -> ComposeSpecification: + """Creates image compose-spec""" + + config_basedir = meta_config_path.parent + + # REQUIRED + meta_cfg = MetadataConfig.from_yaml(meta_config_path) + + # REQUIRED + if docker_compose_overwrite_path: + docker_compose_overwrite_cfg = DockerComposeOverwriteConfig.from_yaml( + docker_compose_overwrite_path + ) + else: + docker_compose_overwrite_cfg = DockerComposeOverwriteConfig.create_default( + service_name=meta_cfg.service_name() + ) + + # OPTIONAL + runtime_cfg = None + if service_config_path: + try: + runtime_cfg = RuntimeConfig.from_yaml(service_config_path) + except FileNotFoundError: + rich.print("No runtime config found (optional), using default.") + + # OCI annotations (optional) + extra_labels: LabelsAnnotationsDict = {} + try: + oci_spec = yaml.safe_load( + (config_basedir / f"{OCI_LABEL_PREFIX}.yml").read_text() + ) + if not oci_spec: + raise UndefinedOciImageSpecError + + oci_labels = to_labels(oci_spec, prefix_key=OCI_LABEL_PREFIX) + extra_labels.update(oci_labels) + except (FileNotFoundError, UndefinedOciImageSpecError): + try: + # if not OCI, try label-schema + ls_spec = yaml.safe_load( + (config_basedir / f"{LS_LABEL_PREFIX}.yml").read_text() + ) + ls_labels = to_labels(ls_spec, prefix_key=LS_LABEL_PREFIX) + extra_labels.update(ls_labels) + except FileNotFoundError: + rich.print( + "No explicit config for OCI/label-schema found (optional), skipping OCI annotations." + ) + # add required labels + + # SEE https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys + # Format the datetime object as a string following RFC-3339 + rfc3339_format = arrow.now().format("YYYY-MM-DDTHH:mm:ssZ") + extra_labels[f"{LS_LABEL_PREFIX}.build-date"] = rfc3339_format + extra_labels[f"{LS_LABEL_PREFIX}.schema-version"] = "1.0" + + extra_labels[f"{LS_LABEL_PREFIX}.vcs-ref"] = _run_git_or_empty_string( + "rev-parse", "HEAD" + ) + extra_labels[f"{LS_LABEL_PREFIX}.vcs-url"] = _strip_credentials( + _run_git_or_empty_string("config", "--get", "remote.origin.url") + ) + + return create_image_spec( + settings, + meta_cfg, + docker_compose_overwrite_cfg, + runtime_cfg, + extra_labels=extra_labels, + ) + + +def create_compose( + ctx: typer.Context, + config_path: Annotated[ + Path, + typer.Option( + "-m", + "--metadata", + help="osparc config file or folder. " + "If the latter, it will scan for configs using the glob pattern 'config_path/**/metadata.yml' ", + ), + ] = Path(OSPARC_CONFIG_DIRNAME), + to_spec_file: Annotated[ + Path, + typer.Option( + "-f", + "--to-spec-file", + help="Output docker-compose image spec", + ), + ] = Path("docker-compose.yml"), +): + """Creates the docker image/runtime compose-spec file from an .osparc config""" + + if not config_path.exists(): + msg = "Invalid path to metadata file or folder" + raise typer.BadParameter(msg) + + if config_path.is_dir(): + # equivalent to 'basedir/**/metadata.yml' + basedir = config_path + config_pattern = "metadata.yml" + else: + # equivalent to 'config_path' + basedir = config_path.parent + config_pattern = config_path.name + + configs_kwargs_map: dict[str, dict[str, Path]] = {} + + for meta_config in sorted(basedir.rglob(config_pattern)): + config_name = meta_config.parent.name + configs_kwargs_map[config_name] = {} + + # load meta REQUIRED + configs_kwargs_map[config_name]["meta_config_path"] = meta_config + + # others OPTIONAL + for file_name, arg_name in ( + ("docker-compose.overwrite.yml", "docker_compose_overwrite_path"), + ("runtime.yml", "service_config_path"), + ): + file_path = meta_config.parent / file_name + if file_path.exists(): + configs_kwargs_map[config_name][arg_name] = file_path + + # warn about subfolders without metadata.yml + for subdir in filter(lambda p: p.is_dir(), basedir.rglob("*")): + if not (subdir / "metadata.yml").exists(): + relative_subdir = subdir.relative_to(basedir) + rich.print( + f"[warning] Subfolder '{relative_subdir}' does not contain a 'metadata.yml' file. Skipping." + ) + + if not configs_kwargs_map: + rich.print(f"[warning] No config files were found in '{config_path}'") + + # output + compose_spec_dict = {} + + assert ctx.parent # nosec + settings: AppSettings = ctx.parent.settings # type: ignore[attr-defined] # pylint:disable=no-member + + for n, config_name in enumerate(configs_kwargs_map): + nth_compose_spec = create_docker_compose_image_spec( + settings, **configs_kwargs_map[config_name] + ).model_dump(exclude_unset=True) + + if n == 0: + compose_spec_dict = nth_compose_spec + else: + # appends only services section! + compose_spec_dict["services"].update(nth_compose_spec["services"]) + + to_spec_file.parent.mkdir(parents=True, exist_ok=True) + with to_spec_file.open("wt") as fh: + yaml.safe_dump( + compose_spec_dict, + fh, + default_flow_style=False, + sort_keys=False, + ) + rich.print(f"Created compose specs at '{to_spec_file.resolve()}'") diff --git a/packages/service-integration/src/service_integration/cli/_config.py b/packages/service-integration/src/service_integration/cli/_config.py new file mode 100644 index 00000000000..b342d002cbf --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/_config.py @@ -0,0 +1,144 @@ +from pathlib import Path +from typing import Annotated, Final + +import rich +import typer +import yaml +from common_library.json_serialization import json_loads +from models_library.utils.labels_annotations import LabelsAnnotationsDict +from pydantic import BaseModel + +from ..compose_spec_model import ComposeSpecification +from ..errors import InvalidLabelsError +from ..osparc_config import ( + OSPARC_CONFIG_COMPOSE_SPEC_NAME, + OSPARC_CONFIG_DIRNAME, + OSPARC_CONFIG_METADATA_NAME, + OSPARC_CONFIG_RUNTIME_NAME, + DockerComposeOverwriteConfig, + MetadataConfig, + RuntimeConfig, +) + + +def _get_labels_or_raise(build_labels) -> LabelsAnnotationsDict: + if isinstance(build_labels, list): + return dict(item.strip().split("=") for item in build_labels) + if isinstance(build_labels, dict): + return build_labels + if labels__root__ := build_labels.root: + assert isinstance(labels__root__, dict) # nosec + return labels__root__ + raise InvalidLabelsError(build_labels=build_labels) + + +def _create_config_from_compose_spec( + compose_spec_path: Path, + docker_compose_overwrite_path: Path, + metadata_path: Path, + service_specs_path: Path, +): + rich.print(f"Creating osparc config files from {compose_spec_path}") + + compose_spec = ComposeSpecification.model_validate( + yaml.safe_load(compose_spec_path.read_text()) + ) + + if compose_spec.services: + + has_multiple_services: Final[int] = len(compose_spec.services) + + def _save(service_name: str, filename: Path, model: BaseModel): + output_path = filename + if has_multiple_services: + output_path = filename.parent / service_name / filename.name + + output_path.parent.mkdir(parents=True, exist_ok=True) + rich.print(f"Creating {output_path} ...", end="") + + with output_path.open("wt") as fh: + data = json_loads( + model.model_dump_json(by_alias=True, exclude_none=True) + ) + yaml.safe_dump(data, fh, sort_keys=False) + + rich.print("DONE") + + for service_name in compose_spec.services: + try: + + if build_labels := compose_spec.services[ + service_name + ].build.labels: # AttributeError if build is str + + labels = _get_labels_or_raise(build_labels) + meta_cfg = MetadataConfig.from_labels_annotations(labels) + _save(service_name, metadata_path, meta_cfg) + + docker_compose_overwrite_cfg = ( + DockerComposeOverwriteConfig.create_default( + service_name=meta_cfg.service_name() + ) + ) + _save( + service_name, + docker_compose_overwrite_path, + docker_compose_overwrite_cfg, + ) + + runtime_cfg = RuntimeConfig.from_labels_annotations(labels) + _save(service_name, service_specs_path, runtime_cfg) + + except (AttributeError, TypeError, ValueError) as err: + rich.print( + f"WARNING: failure producing specs for {service_name}: {err}" + ) + + rich.print("osparc config files created") + + +config_app = typer.Typer() + + +@config_app.command(name="create") +def create_config( + from_spec_file: Annotated[ + Path, + typer.Option( + "-f", + "--from-spec-file", + help="docker-compose used to deduce osparc config", + ), + ] = Path("docker-compose.yml"), +): + """Creates osparc configuration folder from a complete docker compose-spec""" + config_dir = from_spec_file.parent / OSPARC_CONFIG_DIRNAME + project_cfg_path = config_dir / OSPARC_CONFIG_COMPOSE_SPEC_NAME + meta_cfg_path = config_dir / OSPARC_CONFIG_METADATA_NAME + runtime_cfg_path = config_dir / OSPARC_CONFIG_RUNTIME_NAME + + meta_cfg_path.parent.mkdir(parents=True, exist_ok=True) + runtime_cfg_path.parent.mkdir(parents=True, exist_ok=True) + rich.print(f"Creating {config_dir} from {from_spec_file} ...") + + _create_config_from_compose_spec( + from_spec_file, project_cfg_path, meta_cfg_path, runtime_cfg_path + ) + + +_COOKIECUTTER_GITHUB_URL = "gh:itisfoundation/cookiecutter-osparc-service" + + +@config_app.command(name="init") +def init_config( + template: Annotated[ + str, typer.Option(help="Github repo or path to the template") + ] = _COOKIECUTTER_GITHUB_URL, + checkout: ( + Annotated[str, typer.Option(help="Branch if different from main")] | None + ) = None, +): + """runs cookie-cutter""" + from cookiecutter.main import cookiecutter # type: ignore[import-untyped] + + cookiecutter(template, checkout=checkout) diff --git a/packages/service-integration/src/service_integration/cli/_metadata.py b/packages/service-integration/src/service_integration/cli/_metadata.py new file mode 100644 index 00000000000..a06504b3b99 --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/_metadata.py @@ -0,0 +1,79 @@ +from collections import OrderedDict +from enum import Enum +from pathlib import Path +from typing import Annotated + +import rich +import typer + +from ..osparc_config import OSPARC_CONFIG_DIRNAME, MetadataConfig +from ..versioning import bump_version_string +from ..yaml_utils import ordered_safe_dump, ordered_safe_load + + +class TargetVersionChoices(str, Enum): + INTEGRATION_VERSION = "integration-version" + SEMANTIC_VERSION = "version" + + +class UpgradeTags(str, Enum): + MAJOR = "major" + MINOR = "minor" + PATCH = "patch" + + +def bump_version( + upgrade: Annotated[UpgradeTags, typer.Option(case_sensitive=False)], + metadata_file: Annotated[ + Path, + typer.Option( + help="The metadata yaml file", + ), + ] = Path("metadata/metadata.yml"), + target_version: Annotated[ + TargetVersionChoices, typer.Argument() + ] = TargetVersionChoices.SEMANTIC_VERSION, +): + """Bumps target version in metadata (legacy)""" + # load + raw_data: OrderedDict = ordered_safe_load(metadata_file.read_text()) + + # parse and validate + metadata = MetadataConfig(**raw_data) + + # get + bump + set + attrname = target_version.replace("-", "_") + current_version: str = getattr(metadata, attrname) + raw_data[target_version] = new_version = bump_version_string( + current_version, upgrade + ) + + # dump to file (preserving order!) + text = ordered_safe_dump(raw_data) + metadata_file.write_text(text) + rich.print(f"{target_version.title()} bumped: {current_version} β†’ {new_version}") + + +def get_version( + target_version: Annotated[ + TargetVersionChoices, typer.Argument() + ] = TargetVersionChoices.SEMANTIC_VERSION, + metadata_file: Annotated[ + Path, + typer.Option( + help="The metadata yaml file", + ), + ] = Path(f"{OSPARC_CONFIG_DIRNAME}/metadata.yml"), +): + """Prints to output requested version (legacy)""" + + # parse and validate + metadata = MetadataConfig.from_yaml(metadata_file) + + attrname = target_version.replace("-", "_") + current_version: str = getattr(metadata, attrname) + + # MUST have no new line so that we can produce a VERSION file with no extra new-line + # VERSION: $(METADATA) + # @simcore-service-integrator get-version --metadata-file $< > $@ + rich.print(current_version, end="") diff --git a/packages/service-integration/src/service_integration/cli/_run_creator.py b/packages/service-integration/src/service_integration/cli/_run_creator.py new file mode 100644 index 00000000000..8cadef194a4 --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/_run_creator.py @@ -0,0 +1,86 @@ +import stat +from pathlib import Path +from typing import Annotated + +import typer +import yaml + +from ..osparc_config import OSPARC_CONFIG_DIRNAME, OSPARC_CONFIG_METADATA_NAME + + +def get_input_config(metadata_file: Path) -> dict: + inputs = {} + with metadata_file.open() as fp: + metadata = yaml.safe_load(fp) + if "inputs" in metadata: + inputs = metadata["inputs"] + return inputs + + +def run_creator( + run_script_file_path: Annotated[ + Path, + typer.Option( + "--runscript", + help="Path to the run script ", + ), + ], + metadata_file: Annotated[ + Path, + typer.Option( + "--metadata", + help="The metadata yaml of the node", + ), + ] = Path(f"{OSPARC_CONFIG_DIRNAME}/{OSPARC_CONFIG_METADATA_NAME}"), +): + """Creates a sh script that uses jq tool to retrieve variables + to use in sh from a json file for use in an osparc service (legacy). + + Usage python run_creator --folder path/to/inputs.json --runscript path/to/put/the/script + + """ + + # generate variables for input + input_script = [ + """ +#!/bin/sh +#--------------------------------------------------------------- +# AUTO-GENERATED CODE, do not modify this will be overwritten!!! +#--------------------------------------------------------------- +# shell strict mode: +set -o errexit +set -o nounset +IFS=$(printf '\\n\\t') +cd "$(dirname "$0")" +json_input=$INPUT_FOLDER/inputs.json + """ + ] + input_config = get_input_config(metadata_file) + for input_key, input_value in input_config.items(): + input_key_upper = f"{input_key}".upper() + + if "data:" in input_value["type"]: + filename = input_key + if "fileToKeyMap" in input_value and len(input_value["fileToKeyMap"]) > 0: + filename, _ = next(iter(input_value["fileToKeyMap"].items())) + input_script.append(f"{input_key_upper}=$INPUT_FOLDER/{filename}") + input_script.append(f"export {input_key_upper}") + else: + input_script.append( + f"{input_key_upper}=$(< \"$json_input\" jq '.{input_key}')" + ) + input_script.append(f"export {input_key_upper}") + + input_script.extend( + [ + """ +exec execute.sh + """ + ] + ) + + # write shell script + shell_script = "\n".join(input_script) + run_script_file_path.write_text(shell_script) + st = run_script_file_path.stat() + run_script_file_path.chmod(st.st_mode | stat.S_IEXEC) diff --git a/packages/service-integration/src/service_integration/cli/_test.py b/packages/service-integration/src/service_integration/cli/_test.py new file mode 100644 index 00000000000..cb999b32307 --- /dev/null +++ b/packages/service-integration/src/service_integration/cli/_test.py @@ -0,0 +1,26 @@ +from pathlib import Path +from typing import Annotated + +import rich +import typer + +from ..service import pytest_runner + +test_app = typer.Typer() + + +@test_app.command("run") +def run_tests( + service_dir: Annotated[ + Path, typer.Argument(help="Root directory of the service under test") + ], +): + """Runs tests against service directory""" + + if not service_dir.exists(): + msg = "Invalid path to service directory" + raise typer.BadParameter(msg) + + rich.print(f"Testing '{service_dir.resolve()}' ...") + error_code = pytest_runner.main(service_dir=service_dir, extra_args=[]) + raise typer.Exit(code=error_code) diff --git a/packages/service-integration/src/service_integration/commands/compose.py b/packages/service-integration/src/service_integration/commands/compose.py deleted file mode 100644 index ae68a660df2..00000000000 --- a/packages/service-integration/src/service_integration/commands/compose.py +++ /dev/null @@ -1,207 +0,0 @@ -import subprocess -from datetime import datetime -from pathlib import Path -from typing import Optional - -import rich -import typer -import yaml -from rich.console import Console - -from ..compose_spec_model import ComposeSpecification -from ..labels_annotations import to_labels -from ..oci_image_spec import LS_LABEL_PREFIX, OCI_LABEL_PREFIX -from ..osparc_config import DockerComposeOverwriteCfg, MetaConfig, RuntimeConfig -from ..osparc_image_specs import create_image_spec -from ..settings import AppSettings - -error_console = Console(stderr=True) - - -def _run_git(*args) -> str: - """:raises CalledProcessError""" - return subprocess.run( - [ - "git", - ] - + list(args), - capture_output=True, - encoding="utf8", - check=True, - ).stdout.strip() - - -def _run_git_or_empty_string(*args) -> str: - try: - return _run_git(*args) - except FileNotFoundError as err: - error_console.print( - "WARNING: Defaulting label to emtpy string", - "since git is not installed or cannot be executed:", - err, - ) - except subprocess.CalledProcessError as err: - error_console.print( - "WARNING: Defaulting label to emtpy string", - "due to:", - err.stderr, - ) - return "" - - -def create_docker_compose_image_spec( - settings: AppSettings, - *, - meta_config_path: Path, - docker_compose_overwrite_path: Optional[Path] = None, - service_config_path: Optional[Path] = None, -) -> ComposeSpecification: - """Creates image compose-spec""" - - config_basedir = meta_config_path.parent - - # required - meta_cfg = MetaConfig.from_yaml(meta_config_path) - - # required - if docker_compose_overwrite_path: - docker_compose_overwrite_cfg = DockerComposeOverwriteCfg.from_yaml( - docker_compose_overwrite_path - ) - else: - docker_compose_overwrite_cfg = DockerComposeOverwriteCfg.create_default( - service_name=meta_cfg.service_name() - ) - - # optional - runtime_cfg = None - if service_config_path: - try: - # TODO: should include default? - runtime_cfg = RuntimeConfig.from_yaml(service_config_path) - except FileNotFoundError: - rich.print("No runtime config found (optional), using default.") - - # OCI annotations (optional) - extra_labels = {} - try: - oci_spec = yaml.safe_load( - (config_basedir / f"{OCI_LABEL_PREFIX}.yml").read_text() - ) - if not oci_spec: - raise ValueError("Undefined OCI image spec") - - oci_labels = to_labels(oci_spec, prefix_key=OCI_LABEL_PREFIX) - extra_labels.update(oci_labels) - except (FileNotFoundError, ValueError): - - try: - # if not OCI, try label-schema - ls_spec = yaml.safe_load( - (config_basedir / f"{LS_LABEL_PREFIX}.yml").read_text() - ) - ls_labels = to_labels(ls_spec, prefix_key=LS_LABEL_PREFIX) - extra_labels.update(ls_labels) - except FileNotFoundError: - rich.print( - "No explicit config for OCI/label-schema found (optional), skipping OCI annotations." - ) - # add required labels - extra_labels[f"{LS_LABEL_PREFIX}.build-date"] = datetime.utcnow().strftime( - "%Y-%m-%dT%H:%M:%SZ" - ) - extra_labels[f"{LS_LABEL_PREFIX}.schema-version"] = "1.0" - - extra_labels[f"{LS_LABEL_PREFIX}.vcs-ref"] = _run_git_or_empty_string( - "rev-parse", "HEAD" - ) - extra_labels[f"{LS_LABEL_PREFIX}.vcs-url"] = _run_git_or_empty_string( - "config", "--get", "remote.origin.url" - ) - - compose_spec = create_image_spec( - settings, - meta_cfg, - docker_compose_overwrite_cfg, - runtime_cfg, - extra_labels=extra_labels, - ) - - return compose_spec - - -def main( - ctx: typer.Context, - config_path: Path = typer.Option( - ".osparc", - "-m", - "--metadata", - help="osparc config file or folder. " - "If the latter, it will scan for configs using the glob pattern 'config_path/**/metadata.yml' ", - ), - to_spec_file: Path = typer.Option( - Path("docker-compose.yml"), - "-f", - "--to-spec-file", - help="Output docker-compose image spec", - ), -): - """create docker image/runtime compose-specs from an osparc config""" - - # TODO: all these MUST be replaced by osparc_config.ConfigFilesStructure - if not config_path.exists(): - raise typer.BadParameter("Invalid path to metadata file or folder") - - if config_path.is_dir(): - # equivalent to 'basedir/**/metadata.yml' - basedir = config_path - config_pattern = "metadata.yml" - else: - # equivalent to 'config_path' - basedir = config_path.parent - config_pattern = config_path.name - - configs_kwargs_map: dict[str, dict[str, Path]] = {} - - for meta_config in sorted(list(basedir.rglob(config_pattern))): - config_name = meta_config.parent.name - configs_kwargs_map[config_name] = {} - - # load meta [required] - configs_kwargs_map[config_name]["meta_config_path"] = meta_config - - # others [optional] - for file_name, arg_name in ( - ("docker-compose.overwrite.yml", "docker_compose_overwrite_path"), - ("runtime.yml", "service_config_path"), - ): - file_path = meta_config.parent / file_name - if file_path.exists(): - configs_kwargs_map[config_name][arg_name] = file_path - - if not configs_kwargs_map: - rich.print(f"[warning] No config files were found in '{config_path}'") - - # output - compose_spec_dict = {} - for n, config_name in enumerate(configs_kwargs_map): - nth_compose_spec = create_docker_compose_image_spec( - ctx.parent.settings, **configs_kwargs_map[config_name] - ).dict(exclude_unset=True) - - # FIXME: shaky! why first decides ?? - if n == 0: - compose_spec_dict = nth_compose_spec - else: - # appends only services section! - compose_spec_dict["services"].update(nth_compose_spec["services"]) - - to_spec_file.parent.mkdir(parents=True, exist_ok=True) - with to_spec_file.open("wt") as fh: - yaml.safe_dump( - compose_spec_dict, - fh, - default_flow_style=False, - sort_keys=False, - ) - rich.print(f"Created compose specs at '{to_spec_file.resolve()}'") diff --git a/packages/service-integration/src/service_integration/commands/config.py b/packages/service-integration/src/service_integration/commands/config.py deleted file mode 100644 index 8be3f9e7873..00000000000 --- a/packages/service-integration/src/service_integration/commands/config.py +++ /dev/null @@ -1,106 +0,0 @@ -import json -from pathlib import Path -from typing import Final - -import rich -import typer -import yaml -from pydantic import ValidationError -from pydantic.main import BaseModel - -from ..compose_spec_model import ComposeSpecification -from ..osparc_config import DockerComposeOverwriteCfg, MetaConfig, RuntimeConfig - - -def create_osparc_specs( - compose_spec_path: Path, - docker_compose_overwrite_path: Path = Path("docker-compose.overwrite.yml"), - metadata_path: Path = Path("metadata.yml"), - service_specs_path: Path = Path("runtime-spec.yml"), -): - rich.print(f"Creating osparc config files from {compose_spec_path}") - - compose_spec = ComposeSpecification.parse_obj( - yaml.safe_load(compose_spec_path.read_text()) - ) - - if compose_spec.services: - - has_multiple_services: Final[int] = len(compose_spec.services) - - def _save(service_name: str, filename: Path, model: BaseModel): - output_path = filename - if has_multiple_services: - output_path = filename.parent / service_name / filename.name - - output_path.parent.mkdir(parents=True, exist_ok=True) - rich.print(f"Creating {output_path} ...", end="") - - with output_path.open("wt") as fh: - data = json.loads(model.json(by_alias=True, exclude_none=True)) - yaml.safe_dump(data, fh, sort_keys=False) - - rich.print("DONE") - - for service_name in compose_spec.services: - try: - labels = compose_spec.services[service_name].build.labels - if labels: - if isinstance(labels, list): - labels: dict[str, str] = dict( - item.strip().split("=") for item in labels - ) - # TODO: there must be a better way for this ... - assert isinstance(labels.__root__, dict) # nosec - labels = labels.__root__ - - meta_cfg = MetaConfig.from_labels_annotations(labels) - _save(service_name, metadata_path, meta_cfg) - - docker_compose_overwrite_cfg = DockerComposeOverwriteCfg.create_default( - service_name=meta_cfg.service_name() - ) - _save( - service_name, - docker_compose_overwrite_path, - docker_compose_overwrite_cfg, - ) - - runtime_cfg = RuntimeConfig.from_labels_annotations(labels) - _save(service_name, service_specs_path, runtime_cfg) - - except (AttributeError, ValidationError, TypeError) as err: - rich.print( - f"WARNING: failure producing specs for {service_name}: {err}" - ) - - rich.print("osparc config files created") - - -def main( - from_spec_file: Path = typer.Option( - Path("docker-compose.yml"), - "-f", - "--from-spec-file", - help="docker-compose used to deduce osparc config", - ), -): - """Creates osparc config from complete docker compose-spec""" - # TODO: sync defaults among CLI commands - config_dir = from_spec_file.parent / ".osparc" - project_cfg_path = config_dir / "docker-compose.overwrite.yml" - meta_cfg_path = config_dir / "metadata.yml" - runtime_cfg_path = config_dir / "runtime.yml" - - meta_cfg_path.parent.mkdir(parents=True, exist_ok=True) - runtime_cfg_path.parent.mkdir(parents=True, exist_ok=True) - rich.print(f"Creating {config_dir} from {from_spec_file} ...") - - create_osparc_specs( - from_spec_file, project_cfg_path, meta_cfg_path, runtime_cfg_path - ) - - -if __name__ == "__main__": - # pylint: disable=no-value-for-parameter - main() diff --git a/packages/service-integration/src/service_integration/commands/metadata.py b/packages/service-integration/src/service_integration/commands/metadata.py deleted file mode 100644 index e7436487d62..00000000000 --- a/packages/service-integration/src/service_integration/commands/metadata.py +++ /dev/null @@ -1,75 +0,0 @@ -from collections import OrderedDict -from enum import Enum -from pathlib import Path - -import rich -import typer -import yaml -from models_library.services import ServiceDockerData - -from ..versioning import bump_version_string -from ..yaml_utils import ordered_safe_dump, ordered_safe_load - - -class TargetVersionChoices(str, Enum): - INTEGRATION_VERSION = "integration-version" - SEMANTIC_VERSION = "version" - - -class UpgradeTags(str, Enum): - MAJOR = "major" - MINOR = "minor" - PATCH = "patch" - - -def bump_version( - target_version: TargetVersionChoices = typer.Argument( - TargetVersionChoices.SEMANTIC_VERSION - ), - upgrade: UpgradeTags = typer.Option(..., case_sensitive=False), - metadata_file: Path = typer.Option( - "metadata/metadata.yml", - help="The metadata yaml file", - ), -): - """Bumps target version in metadata (legacy)""" - # load - raw_data: OrderedDict = ordered_safe_load(metadata_file.read_text()) - - # parse and validate - metadata = ServiceDockerData(**raw_data) - - # get + bump + set - attrname = target_version.replace("-", "_") - current_version: str = getattr(metadata, attrname) - raw_data[target_version] = new_version = bump_version_string( - current_version, upgrade - ) - - # dump to file (preserving order!) - text = ordered_safe_dump(raw_data) - metadata_file.write_text(text) - rich.print(f"{target_version.title()} bumped: {current_version} β†’ {new_version}") - - -def get_version( - target_version: TargetVersionChoices = typer.Argument( - TargetVersionChoices.SEMANTIC_VERSION - ), - metadata_file: Path = typer.Option( - ".osparc/metadata.yml", - help="The metadata yaml file", - ), -): - """Prints to output requested version (legacy)""" - - # parse and validate - metadata = ServiceDockerData(**yaml.safe_load(metadata_file.read_text())) - - attrname = target_version.replace("-", "_") - current_version: str = getattr(metadata, attrname) - - # MUST have no new line so that we can produce a VERSION file with no extra new-line - # VERSION: $(METADATA) - # @osparc-service-integrator get-version --metadata-file $< > $@ - rich.print(current_version, end="") diff --git a/packages/service-integration/src/service_integration/commands/run_creator.py b/packages/service-integration/src/service_integration/commands/run_creator.py deleted file mode 100644 index cfcb6a6b5fb..00000000000 --- a/packages/service-integration/src/service_integration/commands/run_creator.py +++ /dev/null @@ -1,84 +0,0 @@ -import stat -from pathlib import Path - -import typer -import yaml - - -def get_input_config(metadata_file: Path) -> dict: - inputs = {} - with metadata_file.open() as fp: - metadata = yaml.safe_load(fp) - if "inputs" in metadata: - inputs = metadata["inputs"] - return inputs - - -def main( - metadata_file: Path = typer.Option( - ".osparc/metadata.yml", - "--metadata", - help="The metadata yaml of the node", - ), - run_script_file_path: Path = typer.Option( - ..., - "--runscript", - help="Path to the run script ", - ), -): - """Creates a sh script that uses jq tool to retrieve variables - to use in sh from a json file for use in an osparc service (legacy). - - Usage python run_creator --folder path/to/inputs.json --runscript path/to/put/the/script - - """ - - # generate variables for input - input_script = [ - """ -#!/bin/sh -#--------------------------------------------------------------- -# AUTO-GENERATED CODE, do not modify this will be overwritten!!! -#--------------------------------------------------------------- -# shell strict mode: -set -o errexit -set -o nounset -IFS=$(printf '\\n\\t') -cd "$(dirname "$0")" -json_input=$INPUT_FOLDER/inputs.json - """ - ] - input_config = get_input_config(metadata_file) - for input_key, input_value in input_config.items(): - if "data:" in input_value["type"]: - filename = input_key - if "fileToKeyMap" in input_value and len(input_value["fileToKeyMap"]) > 0: - filename, _ = next(iter(input_value["fileToKeyMap"].items())) - input_script.append( - f"{str(input_key).upper()}=$INPUT_FOLDER/{str(filename)}" - ) - input_script.append(f"export {str(input_key).upper()}") - else: - input_script.append( - f"{str(input_key).upper()}=$(< \"$json_input\" jq '.{input_key}')" - ) - input_script.append(f"export {str(input_key).upper()}") - - input_script.extend( - [ - """ -exec execute.sh - """ - ] - ) - - # write shell script - shell_script = "\n".join(input_script) - run_script_file_path.write_text(shell_script) - st = run_script_file_path.stat() - run_script_file_path.chmod(st.st_mode | stat.S_IEXEC) - - -if __name__ == "__main__": - # pylint: disable=no-value-for-parameter - main() diff --git a/packages/service-integration/src/service_integration/commands/test.py b/packages/service-integration/src/service_integration/commands/test.py deleted file mode 100644 index b08c5a85c03..00000000000 --- a/packages/service-integration/src/service_integration/commands/test.py +++ /dev/null @@ -1,21 +0,0 @@ -from pathlib import Path - -import rich -import typer - -from ..service import pytest_runner - - -def main( - service_dir: Path = typer.Argument( - ..., help="Root directory of the service under test" - ), -): - """Runs tests against service directory""" - - if not service_dir.exists(): - raise typer.BadParameter("Invalid path to service directory") - - rich.print(f"Testing '{service_dir.resolve()}' ...") - error_code = pytest_runner.main(service_dir=service_dir, extra_args=[]) - raise typer.Exit(code=error_code) diff --git a/packages/service-integration/src/service_integration/compose_spec_model.py b/packages/service-integration/src/service_integration/compose_spec_model.py index bc47ea91129..5ee4ce6f29a 100644 --- a/packages/service-integration/src/service_integration/compose_spec_model.py +++ b/packages/service-integration/src/service_integration/compose_spec_model.py @@ -5,20 +5,26 @@ SEE Makefile recipe to produce _compose_spec_model_autogenerated.py """ -from ._compose_spec_model_autogenerated import ( - SCHEMA_VERSION, +from typing import TypeAlias + +from ._compose_spec_model_autogenerated import ( # type:ignore BuildItem, ComposeSpecification, + ListOrDict, Service, Volume1, ) -# Aliases -ServiceVolume = Volume1 +SCHEMA_URL = "https://json-schema.org/draft/2019-09/schema#" +SCHEMA_VERSION = SCHEMA_URL.split("/")[-2] + + +ServiceVolume: TypeAlias = Volume1 __all__: tuple[str, ...] = ( "BuildItem", "ComposeSpecification", + "ListOrDict", "SCHEMA_VERSION", "Service", "ServiceVolume", diff --git a/packages/service-integration/src/service_integration/errors.py b/packages/service-integration/src/service_integration/errors.py index 33ebe3eebfc..65521d36371 100644 --- a/packages/service-integration/src/service_integration/errors.py +++ b/packages/service-integration/src/service_integration/errors.py @@ -1,9 +1,17 @@ -from pydantic.errors import PydanticErrorMixin +from common_library.errors_classes import OsparcErrorMixin -class ServiceIntegrationError(PydanticErrorMixin, RuntimeError): +class ServiceIntegrationError(OsparcErrorMixin, RuntimeError): pass -class ConfigNotFound(ServiceIntegrationError): +class ConfigNotFoundError(ServiceIntegrationError): msg_template = "could not find any osparc config under {basedir}" + + +class UndefinedOciImageSpecError(ServiceIntegrationError): + ... + + +class InvalidLabelsError(OsparcErrorMixin, ValueError): + template_msg = "Invalid build labels {build_labels}" diff --git a/packages/service-integration/src/service_integration/labels_annotations.py b/packages/service-integration/src/service_integration/labels_annotations.py deleted file mode 100644 index 09ce0941268..00000000000 --- a/packages/service-integration/src/service_integration/labels_annotations.py +++ /dev/null @@ -1,65 +0,0 @@ -""" Image labels annotations - -osparc expects the service configuration (in short: config) attached to the service's image as label annotations. -This module defines how this config is serialized/deserialized to/from docker labels annotations -""" - -import json -from json.decoder import JSONDecodeError -from typing import Any - -from pydantic.json import pydantic_encoder - -LabelsAnnotationsDict = dict[str, str] - - -def _json_dumps(obj: Any, **kwargs) -> str: - return json.dumps(obj, default=pydantic_encoder, **kwargs) - - -def to_labels( - config: dict[str, Any], *, prefix_key: str, trim_key_head: bool = True -) -> LabelsAnnotationsDict: - """converts config into labels annotations""" - - # FIXME: null is loaded as 'null' string value? is that correct? json -> None upon deserialization? - labels = {} - for key, value in config.items(): - if trim_key_head: - if isinstance(value, str): - # Avoids double quotes, i.e. '"${VERSION}"' - label = value - else: - label = _json_dumps(value, sort_keys=False) - else: - label = _json_dumps({key: value}, sort_keys=False) - - # NOTE: docker-compose env var interpolation gets confused with schema's '$ref' and - # will replace it '$ref' with an empty string. - if isinstance(label, str) and "$ref" in label: - label = label.replace("$ref", "$$ref") - - labels[f"{prefix_key}.{key}"] = label - - return labels - - -def from_labels( - labels: LabelsAnnotationsDict, *, prefix_key: str, trim_key_head: bool = True -) -> dict[str, Any]: - """convert labels annotations into config""" - config: dict[str, Any] = {} - for key, label in labels.items(): - if key.startswith(f"{prefix_key}."): - try: - value = json.loads(label) - except JSONDecodeError: - value = label - - if not trim_key_head: - if isinstance(value, dict): - config.update(value) - else: - config[key.replace(f"{prefix_key}.", "")] = value - - return config diff --git a/packages/service-integration/src/service_integration/oci_image_spec.py b/packages/service-integration/src/service_integration/oci_image_spec.py index 494a45a1345..84f779150ff 100644 --- a/packages/service-integration/src/service_integration/oci_image_spec.py +++ b/packages/service-integration/src/service_integration/oci_image_spec.py @@ -7,15 +7,17 @@ import os from datetime import datetime -from typing import Any +from typing import Annotated, Any from models_library.basic_types import SHA1Str, VersionStr -from pydantic import BaseModel, Field -from pydantic.config import Extra +from models_library.utils.labels_annotations import ( + LabelsAnnotationsDict, + from_labels, + to_labels, +) +from pydantic import BaseModel, ConfigDict, Field from pydantic.networks import AnyUrl -from .labels_annotations import from_labels, to_labels - # # Prefix added to docker image labels using reverse DNS notations of a domain they own # SEE https://docs.docker.com/config/labels-custom-metadata/#key-format-recommendations @@ -38,84 +40,112 @@ } -class OciImageSpecAnnotations(BaseModel): - # TODO: review and polish constraints - - created: datetime = Field( - None, - description="date and time on which the image was built (string, date-time as defined by RFC 3339)", - ) - - authors: str = Field( - None, - description="contact details of the people or organization responsible for the image (freeform string)", - ) - - url: AnyUrl = Field( - None, description="URL to find more information on the image (string)" - ) - - documentation: AnyUrl = Field( - None, description="URL to get documentation on the image (string)" - ) +def _underscore_as_dot(field_name: str): + return field_name.replace("_", ".") - source: AnyUrl = Field( - None, description="URL to get source code for building the image (string)" - ) - version: VersionStr = Field( - None, - description="version of the packaged software" - "The version MAY match a label or tag in the source code repository" - "version MAY be Semantic versioning-compatible", - ) - revision: str = Field( - None, - description="Source control revision identifier for the packaged software.", - ) +class OciImageSpecAnnotations(BaseModel): + # TODO: review and polish constraints - vendor: str = Field( - None, description="Name of the distributing entity, organization or individual." - ) + created: Annotated[ + datetime | None, + Field( + description="date and time on which the image was built (string, date-time as defined by RFC 3339)", + ), + ] = None + + authors: Annotated[ + str | None, + Field( + description="contact details of the people or organization responsible for the image (freeform string)", + ), + ] = None + + url: Annotated[ + AnyUrl | None, + Field(None, description="URL to find more information on the image (string)"), + ] = None + + documentation: Annotated[ + AnyUrl | None, + Field(None, description="URL to get documentation on the image (string)"), + ] = None + + source: Annotated[ + AnyUrl | None, + Field( + None, description="URL to get source code for building the image (string)" + ), + ] = None + + version: Annotated[ + VersionStr | None, + Field( + description="version of the packaged software" + "The version MAY match a label or tag in the source code repository" + "version MAY be Semantic versioning-compatible", + ), + ] = None + revision: Annotated[ + str | None, + Field( + description="Source control revision identifier for the packaged software.", + ), + ] = None + vendor: Annotated[ + str | None, + Field( + description="Name of the distributing entity, organization or individual." + ), + ] = None # SEE https://spdx.dev/spdx-specification-21-web-version/#h.jxpfx0ykyb60 - licenses: str = Field( - "MIT", - description="License(s) under which contained software is distributed as an SPDX License Expression.", - ) - ref_name: str = Field( - None, - description="Name of the reference for a target (string).", + licenses: Annotated[ + str, + Field( + description="License(s) under which contained software is distributed as an SPDX License Expression.", + ), + ] = "MIT" + + ref_name: Annotated[ + str | None, + Field( + description="Name of the reference for a target (string).", + ), + ] = None + + title: Annotated[ + str | None, Field(description="Human-readable title of the image (string)") + ] = None + description: Annotated[ + str | None, + Field( + description="Human-readable description of the software packaged in the image (string)", + ), + ] = None + base_digest: Annotated[ + SHA1Str | None, + Field( + description="Digest of the image this image is based on (string)", + ), + ] = None + + model_config = ConfigDict( + alias_generator=_underscore_as_dot, populate_by_name=True, extra="forbid" ) - title: str = Field(None, description="Human-readable title of the image (string)") - description: str = Field( - None, - description="Human-readable description of the software packaged in the image (string)", - ) - base_digest: SHA1Str = Field( - None, - description="Digest of the image this image is based on (string)", - ) - - class Config: - alias_generator = lambda field_name: field_name.replace("_", ".") - allow_population_by_field_name = True - extra = Extra.forbid - @classmethod def from_labels_annotations( - cls, labels: dict[str, str] + cls, labels: LabelsAnnotationsDict ) -> "OciImageSpecAnnotations": data = from_labels(labels, prefix_key=OCI_LABEL_PREFIX, trim_key_head=False) - return cls.parse_obj(data) + return cls.model_validate(data) - def to_labels_annotations(self) -> dict[str, str]: - labels = to_labels( - self.dict(exclude_unset=True, by_alias=True, exclude_none=True), + def to_labels_annotations(self) -> LabelsAnnotationsDict: + return to_labels( + self.model_dump(exclude_unset=True, by_alias=True, exclude_none=True), prefix_key=OCI_LABEL_PREFIX, ) - return labels class LabelSchemaAnnotations(BaseModel): @@ -123,36 +153,35 @@ class LabelSchemaAnnotations(BaseModel): NOTE: DEPRECATED IN FAVOUR OF OCI IMAGE SPEC """ - schema_version: VersionStr = Field("1.0.0", alias="schema-version") + schema_version: Annotated[VersionStr, Field(alias="schema-version")] = "1.0.0" build_date: datetime vcs_ref: str vcs_url: AnyUrl - - class Config: - alias_generator = lambda field_name: field_name.replace("_", "-") - allow_population_by_field_name = True - extra = Extra.forbid + model_config = ConfigDict( + alias_generator=lambda field_name: field_name.replace("_", "-"), + populate_by_name=True, + extra="forbid", + ) @classmethod def create_from_env(cls) -> "LabelSchemaAnnotations": data = {} - for field_name in cls.__fields__: + for field_name in cls.model_fields.keys(): # noqa: SIM118 if value := os.environ.get(field_name.upper()): data[field_name] = value - return cls(**data) + return cls.model_validate(data) def to_oci_data(self) -> dict[str, Any]: """Collects data that be converted to OCI labels. WARNING: label-schema has be deprecated in favor of OCI image specs """ - convertable_data = self.dict( + convertable_data = self.model_dump( include=set(_TO_OCI.keys()), exclude_unset=True, exclude_none=True ) assert set(convertable_data.keys()).issubset( # nosec - set(self.__fields__.keys()) + set(self.model_fields.keys()) ) # nosec - oci_data = {_TO_OCI[key]: value for key, value in convertable_data.items()} - return oci_data + return {_TO_OCI[key]: value for key, value in convertable_data.items()} diff --git a/packages/service-integration/src/service_integration/osparc_config.py b/packages/service-integration/src/service_integration/osparc_config.py index 042332485e4..12413dcd130 100644 --- a/packages/service-integration/src/service_integration/osparc_config.py +++ b/packages/service-integration/src/service_integration/osparc_config.py @@ -1,4 +1,4 @@ -""" 'osparc config' is a set of stardard file forms (yaml) that the user fills to describe how his/her service works and +"""'osparc config' is a set of stardard file forms (yaml) that the user fills to describe how his/her service works and integrates with osparc. - config files are stored under '.osparc/' folder in the root repo folder (analogous to other configs like .github, .vscode, etc) @@ -8,74 +8,64 @@ - config should provide enough information about that context to allow - build an image - run an container - on a single command call. + on a single command call. - """ import logging from pathlib import Path -from typing import Any, Literal, NamedTuple, Optional +from typing import Annotated, Any, Final, Literal +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import SHA256Str +from models_library.callbacks_mapping import CallbacksMapping from models_library.service_settings_labels import ( ContainerSpec, DynamicSidecarServiceLabels, - NATRule, PathMappingsLabel, RestartPolicy, ) -from models_library.services import ( - COMPUTATIONAL_SERVICE_KEY_FORMAT, - DYNAMIC_SERVICE_KEY_FORMAT, - BootOptions, - ServiceDockerData, - ServiceType, +from models_library.service_settings_nat_rule import NATRule +from models_library.services import BootOptions, ServiceMetaDataPublished +from models_library.services_types import ServiceKey +from models_library.utils.labels_annotations import ( + OSPARC_LABEL_PREFIXES, + LabelsAnnotationsDict, + from_labels, + to_labels, +) +from pydantic import ( + ConfigDict, + NonNegativeInt, + TypeAdapter, + ValidationError, + ValidationInfo, + field_validator, + model_validator, ) -from pydantic import ValidationError -from pydantic.class_validators import root_validator, validator -from pydantic.config import Extra from pydantic.fields import Field from pydantic.main import BaseModel from .compose_spec_model import ComposeSpecification -from .errors import ConfigNotFound -from .labels_annotations import from_labels, to_labels from .settings import AppSettings from .yaml_utils import yaml_safe_load -logger = logging.getLogger(__name__) - -CONFIG_FOLDER_NAME = ".osparc" - - -SERVICE_KEY_FORMATS = { - ServiceType.COMPUTATIONAL: COMPUTATIONAL_SERVICE_KEY_FORMAT, - ServiceType.DYNAMIC: DYNAMIC_SERVICE_KEY_FORMAT, -} - -# SEE https://docs.docker.com/config/labels-custom-metadata/#label-keys-and-values -# "Authors of third-party tools should prefix each label key with the reverse DNS notation of a -# domain they own, such as com.example.some-label "" -# FIXME: review and define a z43-wide inverse DNS e.g. swiss.z43 -OSPARC_LABEL_PREFIXES = ( - "io.simcore", - "simcore.service", -) - +_logger = logging.getLogger(__name__) -## MODELS --------------------------------------------------------------------------------- -# -# Project config -> stored in repo's basedir/.osparc -# +OSPARC_CONFIG_DIRNAME: Final[str] = ".osparc" +OSPARC_CONFIG_COMPOSE_SPEC_NAME: Final[str] = "docker-compose.overwrite.yml" +OSPARC_CONFIG_METADATA_NAME: Final[str] = "metadata.yml" +OSPARC_CONFIG_RUNTIME_NAME: Final[str] = "runtime.yml" -class DockerComposeOverwriteCfg(ComposeSpecification): - """picks up configurations used to overwrite the docker-compuse output""" +class DockerComposeOverwriteConfig(ComposeSpecification): + """Content of docker-compose.overwrite.yml configuration file""" @classmethod def create_default( - cls, service_name: Optional[str] = None - ) -> "DockerComposeOverwriteCfg": - return cls.parse_obj( + cls, service_name: str | None = None + ) -> "DockerComposeOverwriteConfig": + model: "DockerComposeOverwriteConfig" = cls.model_validate( { "services": { service_name: { @@ -86,49 +76,58 @@ def create_default( } } ) + return model @classmethod - def from_yaml(cls, path: Path) -> "DockerComposeOverwriteCfg": + def from_yaml(cls, path: Path) -> "DockerComposeOverwriteConfig": with path.open() as fh: data = yaml_safe_load(fh) - return cls.parse_obj(data) + model: "DockerComposeOverwriteConfig" = cls.model_validate(data) + return model -class MetaConfig(ServiceDockerData): - """Details about general info and I/O configuration of the service +class MetadataConfig(ServiceMetaDataPublished): + """Content of metadata.yml configuration file + Details about general info and I/O configuration of the service Necessary for both image- and runtime-spec """ - @validator("contact") + image_digest: SHA256Str | None = Field( + None, + description="this is NOT a label, therefore it is EXCLUDED to export", + exclude=True, + ) + + @field_validator("contact") @classmethod - def check_contact_in_authors(cls, v, values): + def _check_contact_in_authors(cls, v, info: ValidationInfo): """catalog service relies on contact and author to define access rights""" - authors_emails = {author.email for author in values["authors"]} + authors_emails = {author.email for author in info.data["authors"]} if v not in authors_emails: - raise ValueError("Contact {v} must be registered as an author") + msg = "Contact {v} must be registered as an author" + raise ValueError(msg) return v @classmethod - def from_yaml(cls, path: Path) -> "MetaConfig": + def from_yaml(cls, path: Path) -> "MetadataConfig": with path.open() as fh: data = yaml_safe_load(fh) - return cls.parse_obj(data) + return cls.model_validate(data) @classmethod - def from_labels_annotations(cls, labels: dict[str, str]) -> "MetaConfig": + def from_labels_annotations(cls, labels: LabelsAnnotationsDict) -> "MetadataConfig": data = from_labels( labels, prefix_key=OSPARC_LABEL_PREFIXES[0], trim_key_head=False ) - return cls.parse_obj(data) + return cls.model_validate(data) - def to_labels_annotations(self) -> dict[str, str]: - labels = to_labels( - self.dict(exclude_unset=True, by_alias=True, exclude_none=True), + def to_labels_annotations(self) -> LabelsAnnotationsDict: + return to_labels( + self.model_dump(exclude_unset=True, by_alias=True, exclude_none=True), prefix_key=OSPARC_LABEL_PREFIXES[0], trim_key_head=False, ) - return labels def service_name(self) -> str: """name used as key in the compose-spec services map""" @@ -141,7 +140,9 @@ def image_name(self, settings: AppSettings, registry="local") -> str: if registry in "dockerhub": # dockerhub allows only one-level names -> dot it # TODO: check thisname is compatible with REGEX - service_path = service_path.replace("/", ".") + service_path = TypeAdapter(ServiceKey).validate_python( + service_path.replace("/", ".") + ) service_version = self.version return f"{registry_prefix}{service_path}:{service_version}" @@ -154,7 +155,13 @@ class SettingsItem(BaseModel): name: str = Field(..., description="The name of the service setting") type_: Literal[ - "string", "int", "integer", "number", "object", "ContainerSpec", "Resources" + "string", + "int", + "integer", + "number", + "object", + "ContainerSpec", + "Resources", ] = Field( ..., description="The type of the service setting (follows Docker REST API naming scheme)", @@ -165,19 +172,28 @@ class SettingsItem(BaseModel): description="The value of the service setting (shall follow Docker REST API scheme for services", ) - @validator("value", pre=True) + @field_validator("type_", mode="before") @classmethod - def check_value_against_custom_types(cls, v, values): - if type_ := values.get("type_"): - if type_ == "ContainerSpec": - ContainerSpec.parse_obj(v) + def ensure_backwards_compatible_setting_type(cls, v): + if v == "resources": + # renamed in the latest version as + return "Resources" + return v + + @field_validator("value", mode="before") + @classmethod + def check_value_against_custom_types(cls, v, info: ValidationInfo): + if (type_ := info.data.get("type_")) and type_ == "ContainerSpec": + ContainerSpec.model_validate(v) return v class ValidatingDynamicSidecarServiceLabels(DynamicSidecarServiceLabels): - class Config: - extra = Extra.allow - allow_population_by_field_name = True + model_config = ConfigDict(extra="allow", populate_by_name=True) + + +def _underscore_as_minus(field_name: str) -> str: + return field_name.replace("_", "-") class RuntimeConfig(BaseModel): @@ -186,108 +202,65 @@ class RuntimeConfig(BaseModel): Necessary for runtime-spec """ - compose_spec: Optional[ComposeSpecification] = None - container_http_entrypoint: Optional[str] = None + compose_spec: ComposeSpecification | None = None + container_http_entrypoint: str | None = None restart_policy: RestartPolicy = RestartPolicy.NO_RESTART - paths_mapping: Optional[PathMappingsLabel] = None - boot_options: BootOptions = None + callbacks_mapping: Annotated[ + CallbacksMapping | None, Field(default_factory=dict) + ] = DEFAULT_FACTORY + + paths_mapping: PathMappingsLabel | None = None - containers_allowed_outgoing_permit_list: Optional[dict[str, list[NATRule]]] = None + user_preferences_path: Path | None = None + boot_options: BootOptions | None = None + min_visible_inputs: NonNegativeInt | None = None - containers_allowed_outgoing_internet: Optional[set[str]] = None + containers_allowed_outgoing_permit_list: dict[str, list[NATRule]] | None = None - settings: list[SettingsItem] = [] + containers_allowed_outgoing_internet: set[str] | None = None + + settings: Annotated[list[SettingsItem], Field(default_factory=list)] = ( + DEFAULT_FACTORY + ) - @root_validator(pre=True) + @model_validator(mode="before") @classmethod - def ensure_compatibility(cls, v): + def _ensure_compatibility(cls, v): # NOTE: if changes are applied to `DynamicSidecarServiceLabels` # these are also validated when ooil runs. try: - ValidatingDynamicSidecarServiceLabels.parse_obj(v) - except ValidationError as e: - logger.exception( + ValidatingDynamicSidecarServiceLabels.model_validate(v) + except ValidationError: + _logger.exception( "Could not validate %s via %s", DynamicSidecarServiceLabels, ValidatingDynamicSidecarServiceLabels, ) - raise e + raise return v - class Config: - alias_generator = lambda field_name: field_name.replace("_", "-") - allow_population_by_field_name = True - extra = Extra.forbid + model_config = ConfigDict( + alias_generator=_underscore_as_minus, + populate_by_name=True, + extra="forbid", + ) @classmethod def from_yaml(cls, path: Path) -> "RuntimeConfig": with path.open() as fh: data = yaml_safe_load(fh) - return cls.parse_obj(data) + return cls.model_validate(data) @classmethod - def from_labels_annotations(cls, labels: dict[str, str]) -> "RuntimeConfig": + def from_labels_annotations(cls, labels: LabelsAnnotationsDict) -> "RuntimeConfig": data = from_labels(labels, prefix_key=OSPARC_LABEL_PREFIXES[1]) - return cls.parse_obj(data) + return cls.model_validate(data) - def to_labels_annotations(self) -> dict[str, str]: - labels = to_labels( - self.dict(exclude_unset=True, by_alias=True, exclude_none=True), + def to_labels_annotations(self) -> LabelsAnnotationsDict: + return to_labels( + self.model_dump(exclude_unset=True, by_alias=True, exclude_none=True), prefix_key=OSPARC_LABEL_PREFIXES[1], ) - return labels - - -## FILES ----------------------------------------------------------- - - -class ConfigFileDescriptor(NamedTuple): - glob_pattern: str - required: bool = True - - -class ConfigFilesStructure: - """ - Defines config file structure and how they - map to the models - """ - - FILES_GLOBS = { - DockerComposeOverwriteCfg.__name__: ConfigFileDescriptor( - glob_pattern="docker-compose.overwrite.y*ml", required=False - ), - MetaConfig.__name__: ConfigFileDescriptor(glob_pattern="metadata.y*ml"), - RuntimeConfig.__name__: ConfigFileDescriptor(glob_pattern="runtime.y*ml"), - } - - @staticmethod - def config_file_path(scope: Literal["user", "project"]) -> Path: - basedir = Path.cwd() # assumes project is in CWD - if scope == "user": - basedir = Path.home() - return basedir / ".osparc" / "service-integration.json" - - def search(self, start_dir: Path) -> dict[str, Path]: - """Tries to match of any of file layouts - and returns associated config files - """ - found = { - configtype: list(start_dir.rglob(pattern)) - for configtype, (pattern, required) in self.FILES_GLOBS.items() - if required - } - - if not found: - raise ConfigNotFound(basedir=start_dir) - - raise NotImplementedError("TODO") - - # TODO: - # scenarios: - # .osparc/meta, [runtime] - # .osparc/{service-name}/meta, [runtime] - - # metadata is required, runtime is optional? diff --git a/packages/service-integration/src/service_integration/osparc_image_specs.py b/packages/service-integration/src/service_integration/osparc_image_specs.py index 1bbeef343ea..a94d6dfc1fc 100644 --- a/packages/service-integration/src/service_integration/osparc_image_specs.py +++ b/packages/service-integration/src/service_integration/osparc_image_specs.py @@ -2,33 +2,34 @@ """ +from typing import Any -from typing import Optional - +from models_library.utils.labels_annotations import LabelsAnnotationsDict from service_integration.compose_spec_model import ( BuildItem, ComposeSpecification, + ListOrDict, Service, ) -from .osparc_config import DockerComposeOverwriteCfg, MetaConfig, RuntimeConfig +from .osparc_config import DockerComposeOverwriteConfig, MetadataConfig, RuntimeConfig from .settings import AppSettings def create_image_spec( settings: AppSettings, - meta_cfg: MetaConfig, - docker_compose_overwrite_cfg: DockerComposeOverwriteCfg, - runtime_cfg: Optional[RuntimeConfig] = None, + meta_cfg: MetadataConfig, + docker_compose_overwrite_cfg: DockerComposeOverwriteConfig, + runtime_cfg: RuntimeConfig | None = None, *, - extra_labels: dict[str, str] = None, + extra_labels: LabelsAnnotationsDict | None = None, **_context ) -> ComposeSpecification: """Creates the image-spec provided the osparc-config and a given context (e.g. development) - - the image-spec simplifies building an image to ``docker-compose build`` + - the image-spec simplifies building an image to ``docker compose build`` """ - labels = {**meta_cfg.to_labels_annotations()} + labels = meta_cfg.to_labels_annotations() if extra_labels: labels.update(extra_labels) if runtime_cfg: @@ -38,20 +39,26 @@ def create_image_spec( assert docker_compose_overwrite_cfg.services # nosec - if not docker_compose_overwrite_cfg.services[service_name].build.context: - docker_compose_overwrite_cfg.services[service_name].build.context = "./" + build = docker_compose_overwrite_cfg.services[service_name].build + assert isinstance(build, BuildItem) # nosec + if not build.context: + build.context = "./" - docker_compose_overwrite_cfg.services[service_name].build.labels = labels + build.labels = ListOrDict(root=labels) - overwrite_options = docker_compose_overwrite_cfg.services[service_name].build.dict( - exclude_none=True - ) + overwrite_options = build.model_dump(exclude_none=True, serialize_as_any=True) build_spec = BuildItem(**overwrite_options) - compose_spec = ComposeSpecification( + service_kwargs: dict[str, Any] = { + "image": meta_cfg.image_name(settings), + "build": build_spec, + } + if docker_compose_overwrite_cfg.services[service_name].depends_on: + service_kwargs["depends_on"] = docker_compose_overwrite_cfg.services[ + service_name + ].depends_on + + return ComposeSpecification( version=settings.COMPOSE_VERSION, - services={ - service_name: Service(image=meta_cfg.image_name(settings), build=build_spec) - }, + services={service_name: Service(**service_kwargs)}, ) - return compose_spec diff --git a/packages/service-integration/src/service_integration/osparc_runtime_specs.py b/packages/service-integration/src/service_integration/osparc_runtime_specs.py index a9ffc331f5f..56e33db0d79 100644 --- a/packages/service-integration/src/service_integration/osparc_runtime_specs.py +++ b/packages/service-integration/src/service_integration/osparc_runtime_specs.py @@ -5,6 +5,5 @@ # # -raise NotImplementedError( - "SEE prototype in packages/service-integration/tests/test_osparc_runtime_specs.py" -) +msg = "SEE prototype in packages/service-integration/tests/test_osparc_runtime_specs.py" +raise NotImplementedError(msg) diff --git a/services/director/src/simcore_service_director/rest/generated_code/__init__.py b/packages/service-integration/src/service_integration/py.typed similarity index 100% rename from services/director/src/simcore_service_director/rest/generated_code/__init__.py rename to packages/service-integration/src/service_integration/py.typed diff --git a/packages/service-integration/src/service_integration/pytest_plugin/docker_integration.py b/packages/service-integration/src/service_integration/pytest_plugin/docker_integration.py index adbc9004a37..ba2fd361f79 100644 --- a/packages/service-integration/src/service_integration/pytest_plugin/docker_integration.py +++ b/packages/service-integration/src/service_integration/pytest_plugin/docker_integration.py @@ -6,16 +6,19 @@ import json import os import shutil +import urllib.error import urllib.request +from collections.abc import Iterator from contextlib import suppress from pathlib import Path from pprint import pformat -from typing import Iterator, Optional +from typing import Any import docker import jsonschema import pytest import yaml +from common_library.json_serialization import json_loads from docker.errors import APIError from docker.models.containers import Container @@ -38,7 +41,8 @@ def docker_image_key(docker_client: docker.DockerClient, project_name: str) -> s for image in docker_client.images.list() if any(image_key in tag for tag in image.tags) ] - return docker_images[0].tags[0] + tag: str = docker_images[0].tags[0] + return tag @pytest.fixture @@ -64,27 +68,35 @@ def _is_gitlab_executor() -> bool: @pytest.fixture -def osparc_service_labels_jsonschema(tmp_path) -> dict: +def osparc_service_labels_jsonschema(tmp_path: Path) -> dict: def _download_url(url: str, file: Path): # Download the file from `url` and save it locally under `file_name`: with urllib.request.urlopen(url) as response, file.open("wb") as out_file: shutil.copyfileobj(response, out_file) assert file.exists() - url = "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/api/specs/common/schemas/node-meta-v0.0.1.json" - # TODO: Make sure this is installed with this package!!! - file_name = tmp_path / "service_label.json" - _download_url(url, file_name) + try: + _download_url( + "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/api/specs/common/schemas/node-meta-v0.0.1.json", + file_name, + ) + except urllib.error.URLError: + # New url after this commit + _download_url( + "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/api/specs/director/schemas/node-meta-v0.0.1.json", + file_name, + ) + with file_name.open() as fp: - json_schema = json.load(fp) + json_schema: dict = json.load(fp) return json_schema @pytest.fixture(scope="session") def metadata_labels(metadata_file: Path) -> dict: with metadata_file.open() as fp: - metadata = yaml.safe_load(fp) + metadata: dict = yaml.safe_load(fp) return metadata @@ -109,11 +121,10 @@ def host_folders(temporary_path: Path) -> dict: @pytest.fixture def container_variables() -> dict: # of type INPUT_FOLDER=/home/scu/data/input - env = { + return { f"{str(folder).upper()}_FOLDER": (_CONTAINER_FOLDER / folder).as_posix() for folder in _FOLDER_NAMES } - return env @pytest.fixture @@ -134,9 +145,9 @@ def docker_container( shutil.copytree(validation_folders["input"], host_folders["input"]) assert Path(host_folders["input"]).exists() # run the container (this may take some time) - container: Optional[Container] = None + container: Container | None = None try: - volumes = { + volumes: dict[str, Any] = { host_folders[folder]: { "bind": container_variables[f"{str(folder).upper()}_FOLDER"] } @@ -173,14 +184,16 @@ def docker_container( "The container stopped with exit code {}\n\n\ncommand:\n {}, \n\n\nlog:\n{}".format( exc.exit_status, exc.command, - pformat( - (container.logs(timestamps=True, tail=1000).decode("UTF-8")).split( - "\n" - ), - width=200, - ) - if container - else "", + ( + pformat( + ( + container.logs(timestamps=True, tail=1000).decode("UTF-8") + ).split("\n"), + width=200, + ) + if container + else "" + ), ) ) finally: @@ -194,7 +207,7 @@ def convert_to_simcore_labels(image_labels: dict) -> dict: io_simcore_labels = {} for key, value in image_labels.items(): if str(key).startswith("io.simcore."): - simcore_label = json.loads(value) + simcore_label = json_loads(value) simcore_keys = list(simcore_label.keys()) assert len(simcore_keys) == 1 simcore_key = simcore_keys[0] @@ -214,7 +227,7 @@ def assert_container_runs( list_of_files = [ x.name for x in validation_folders[folder].iterdir() - if not ".gitkeep" in x.name + if ".gitkeep" not in x.name ] for file_name in list_of_files: assert Path( @@ -234,14 +247,12 @@ def assert_container_runs( continue # test if the generated files are the ones expected list_of_files = [ - x.name for x in host_folders[folder].iterdir() if not ".gitkeep" in x.name + x.name for x in host_folders[folder].iterdir() if ".gitkeep" not in x.name ] for file_name in list_of_files: assert Path( validation_folders[folder] / file_name - ).exists(), "{} is not present in {}".format( - file_name, validation_folders[folder] - ) + ).exists(), f"{file_name} is not present in {validation_folders[folder]}" _, _, errors = filecmp.cmpfiles( host_folders[folder], validation_folders[folder], @@ -264,7 +275,7 @@ def assert_container_runs( for key, value in io_simcore_labels["outputs"].items(): assert "type" in value # rationale: files are on their own and other types are in inputs.json - if not "data:" in value["type"]: + if "data:" not in value["type"]: # check that keys are available assert key in output_cfg else: diff --git a/packages/service-integration/src/service_integration/pytest_plugin/folder_structure.py b/packages/service-integration/src/service_integration/pytest_plugin/folder_structure.py index dc1e57fbee4..ef87dbb5ceb 100644 --- a/packages/service-integration/src/service_integration/pytest_plugin/folder_structure.py +++ b/packages/service-integration/src/service_integration/pytest_plugin/folder_structure.py @@ -5,6 +5,8 @@ import pytest +from ..osparc_config import OSPARC_CONFIG_DIRNAME + @pytest.fixture(scope="session") def project_slug_dir(request: pytest.FixtureRequest) -> Path: @@ -15,7 +17,7 @@ def project_slug_dir(request: pytest.FixtureRequest) -> Path: assert isinstance(root_dir, Path) assert root_dir.exists() - assert any(root_dir.glob(".osparc")) + assert any(root_dir.glob(OSPARC_CONFIG_DIRNAME)) return root_dir @@ -44,21 +46,6 @@ def metadata_file(project_slug_dir: Path, request: pytest.FixtureRequest) -> Pat return metadata_file -def get_expected_files(docker_name: str) -> tuple[str, ...]: - return ( - ".cookiecutterrc", - ".dockerignore", - "metadata:metadata.yml", - f"docker/{docker_name}:entrypoint.sh", - f"docker/{docker_name}:Dockerfile", - "service.cli:execute.sh", - "docker-compose-build.yml", - "docker-compose-meta.yml", - "docker-compose.devel.yml", - "docker-compose.yml", - ) - - def assert_path_in_repo(expected_path: str, project_slug_dir: Path): if ":" in expected_path: diff --git a/packages/service-integration/src/service_integration/pytest_plugin/validation_data.py b/packages/service-integration/src/service_integration/pytest_plugin/validation_data.py index 47478534e00..e5cc87da4a2 100644 --- a/packages/service-integration/src/service_integration/pytest_plugin/validation_data.py +++ b/packages/service-integration/src/service_integration/pytest_plugin/validation_data.py @@ -3,8 +3,8 @@ # pylint: disable=unused-variable import json +from collections.abc import Iterator from pathlib import Path -from typing import Iterator, Optional import pytest import yaml @@ -33,7 +33,7 @@ def validation_folder(validation_dir: Path, port_type: str) -> Path: @pytest.fixture -def validation_cfg(validation_dir: Path, port_type: str) -> Optional[dict]: +def validation_cfg(validation_dir: Path, port_type: str) -> dict | None: validation_file = validation_dir / port_type / (f"{port_type}s.json") if validation_file.exists(): with validation_file.open() as fp: @@ -63,7 +63,7 @@ def assert_validation_data_follows_definition( assert "type" in value # rationale: files are on their own and other types are in inputs.json - if not "data:" in value["type"]: + if "data:" not in value["type"]: # check that keys are available assert key in validation_cfg, f"missing {key} in validation config file" else: @@ -93,13 +93,13 @@ def assert_validation_data_follows_definition( for key, value in validation_cfg.items(): # check the key is defined in the labels assert key in label_cfg - label2types = { + label2types: dict[str, type | tuple[type, ...]] = { "number": (float, int), "integer": int, "boolean": bool, "string": str, } - if not "data:" in label_cfg[key]["type"]: + if "data:" not in label_cfg[key]["type"]: # check the type is correct expected_type = label2types[label_cfg[key]["type"]] assert isinstance( diff --git a/packages/service-integration/src/service_integration/service/pytest_runner.py b/packages/service-integration/src/service_integration/service/pytest_runner.py index 9626c4d7be3..128bd2d1158 100644 --- a/packages/service-integration/src/service_integration/service/pytest_runner.py +++ b/packages/service-integration/src/service_integration/service/pytest_runner.py @@ -2,7 +2,6 @@ import sys import tempfile from pathlib import Path -from typing import Optional import pytest @@ -13,7 +12,7 @@ def main( - service_dir: Path, *, debug: bool = False, extra_args: Optional[list[str]] = None + service_dir: Path, *, debug: bool = False, extra_args: list[str] | None = None ) -> int: pytest_args = [ @@ -33,9 +32,9 @@ def main( pytest_args += extra_args logger.debug("Running 'pytest %s'", " ".join(pytest_args)) - exit_code = pytest.main(pytest_args) + exit_code: int | pytest.ExitCode = pytest.main(pytest_args) logger.debug("exit with code=%d", exit_code) - return exit_code + return int(exit_code) if __name__ == "__main__": diff --git a/packages/service-integration/src/service_integration/settings.py b/packages/service-integration/src/service_integration/settings.py index e93c33dbc0e..f8b977cc9a4 100644 --- a/packages/service-integration/src/service_integration/settings.py +++ b/packages/service-integration/src/service_integration/settings.py @@ -1,12 +1,11 @@ -from typing import Optional - -from pydantic import BaseModel, BaseSettings, Field, SecretStr +from pydantic import BaseModel, Field, SecretStr +from pydantic_settings import BaseSettings, SettingsConfigDict class Registry(BaseModel): url_or_prefix: str - user: Optional[str] = None - password: Optional[SecretStr] = None + user: str | None = None + password: SecretStr | None = None # NOTE: image names w/o a prefix default in dockerhub registry @@ -28,9 +27,9 @@ class AppSettings(BaseSettings): COMPOSE_VERSION: str = Field( "3.7", description="version of the docker-compose spec" ) - - class Config: - env_file_encoding = "utf-8" + model_config = SettingsConfigDict( + env_file_encoding="utf-8", + ) # TODO: load from ~/.osparc/service-integration.json or env file # TODO: add access to secrets diff --git a/packages/service-integration/src/service_integration/versioning.py b/packages/service-integration/src/service_integration/versioning.py index 4660defe101..0d7685a818f 100644 --- a/packages/service-integration/src/service_integration/versioning.py +++ b/packages/service-integration/src/service_integration/versioning.py @@ -1,12 +1,13 @@ from datetime import datetime +from typing import Annotated, TypeAlias from models_library.basic_regex import SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS from packaging.version import Version -from pydantic import BaseModel -from pydantic.fields import Field -from pydantic.types import constr +from pydantic import BaseModel, ConfigDict, Field, StringConstraints -SemanticVersionStr = constr(regex=SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS) +SemanticVersionStr: TypeAlias = Annotated[ + str, StringConstraints(pattern=SEMANTIC_VERSION_RE_W_CAPTURE_GROUPS) +] def bump_version_string(current_version: str, bump: str) -> str: @@ -18,7 +19,8 @@ def bump_version_string(current_version: str, bump: str) -> str: # CAN ONLY bump releases not pre/post/dev releases if version.is_devrelease or version.is_postrelease or version.is_prerelease: - raise NotImplementedError("Can only bump released versions") + msg = "Can only bump released versions" + raise NotImplementedError(msg) major, minor, patch = version.major, version.minor, version.micro if bump == "major": @@ -30,7 +32,6 @@ def bump_version_string(current_version: str, bump: str) -> str: return new_version -# TODO: from https://github.com/ITISFoundation/osparc-simcore/issues/2409 # ### versioning # a single version number does not suffice. Instead we should have a set of versions that describes "what is inside the container" # - service version (following semantic versioning): for the published service @@ -38,6 +39,7 @@ def bump_version_string(current_version: str, bump: str) -> str: # - executable name: the public name of the wrapped program (e.g. matlab) # - executable version: the version of the program (e.g. matlab 2020b) # - further libraries version dump (e.g. requirements.txt, etc) +# SEE from https://github.com/ITISFoundation/osparc-simcore/issues/2409 class ExecutableVersionInfo(BaseModel): @@ -48,8 +50,8 @@ class ExecutableVersionInfo(BaseModel): version: SemanticVersionStr released: datetime - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "display_name": "SEMCAD X", "display_version": "Matterhorn Student Edition 1", @@ -59,6 +61,7 @@ class Config: "released": "2021-11-19T14:58:45.900979", } } + ) class ServiceVersionInfo(BaseModel): @@ -68,11 +71,12 @@ class ServiceVersionInfo(BaseModel): ) released: datetime = Field(..., description="Publication/release date") - class Config: - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "version": "1.0.0", # e.g. first time released as an osparc "integration_version": "2.1.0", "released": "2021-11-19T14:58:45.900979", } } + ) diff --git a/packages/service-integration/tests/conftest.py b/packages/service-integration/tests/conftest.py index a7ea31d17fb..2f148482ea5 100644 --- a/packages/service-integration/tests/conftest.py +++ b/packages/service-integration/tests/conftest.py @@ -4,8 +4,8 @@ import shutil import sys +from collections.abc import Callable from pathlib import Path -from typing import Callable import pytest import service_integration @@ -14,12 +14,13 @@ pytest_plugins = [ "pytest_simcore.pydantic_models", - "pytest_simcore.repository_paths", - "pytest_simcore.schemas", "pytest_simcore.pytest_global_environs", + "pytest_simcore.repository_paths", ] -CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +_CURRENT_DIR = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +) @pytest.fixture(scope="session") @@ -31,24 +32,27 @@ def package_dir() -> Path: @pytest.fixture(scope="session") def tests_data_dir() -> Path: - pdir = CURRENT_DIR / "data" + pdir = _CURRENT_DIR / "data" assert pdir.exists() return pdir @pytest.fixture -def project_file_path(tests_data_dir, tmp_path) -> Path: +def docker_compose_overwrite_path(tests_data_dir, tmp_path) -> Path: + name = "docker-compose.overwrite.yml" dst = shutil.copy( - src=tests_data_dir / "docker-compose.overwrite.yml", - dst=tmp_path / "docker-compose.overwrite.yml", + src=tests_data_dir / name, + dst=tmp_path / name, ) return Path(dst) @pytest.fixture def metadata_file_path(tests_data_dir, tmp_path) -> Path: + name = "metadata.yml" dst = shutil.copy( - src=tests_data_dir / "metadata.yml", dst=tmp_path / "metadata.yml" + src=tests_data_dir / name, + dst=tmp_path / name, ) return Path(dst) @@ -59,7 +63,7 @@ def run_program_with_args() -> Callable: runner = CliRunner() def _invoke(*cmd): - print("RUNNING", "osparc-service-integrator", cmd) + print("RUNNING", "simcore-service-integrator", cmd) print(runner.make_env()) return runner.invoke(cli.app, list(cmd)) diff --git a/packages/service-integration/tests/data/docker-compose-meta.yml b/packages/service-integration/tests/data/docker-compose-meta.yml index 79173852c31..53a99ca062b 100644 --- a/packages/service-integration/tests/data/docker-compose-meta.yml +++ b/packages/service-integration/tests/data/docker-compose-meta.yml @@ -83,7 +83,7 @@ services: simcore.service.container-http-entrypoint: rt-web-dy simcore.service.containers-allowed-outgoing-permit-list: '{"rocket-core": [{"hostname": "${rocket_LICENSE_SERVER}", "tcp_ports": [27000, 56625], "dns_resolver": {"address": "172.16.8.15", "port": 53}}]}' simcore.service.containers-allowed-outgoing-internet: '["sym-server"]' - simcore.service.paths-mapping: '{"outputs_path": "/home/smu/work/outputs", "inputs_path": "/home/smu/work/inputs", "state_paths": ["/home/smu/work/workspace"]}' + simcore.service.paths-mapping: '{"outputs_path": "/home/smu/work/outputs", "inputs_path": "/home/smu/work/inputs", "state_paths": ["/home/smu/work/workspace"], "volume_size_limits": {"/home/smu/work/workspace": "5G", "/home/smu/work/outputs": "10m", "/home/smu/work/inputs": "10M"}}' simcore.service.settings: '[{"name": "ports", "type": "int", "value": 80}, {"name": "Resources", "type": "Resources", "value": {"Limits": {"NanoCPUs": 1000000000}, "Reservations": {"NanoCPUs": 1000000000}}}, {"name": "env", "type": "string", "value": ["rocket_CORE_HOSTNAME=%%container_name.rocket-core%%"]}]' version: '3.7' diff --git a/packages/service-integration/tests/data/docker-compose.overwrite.yml b/packages/service-integration/tests/data/docker-compose.overwrite.yml index 2444dbef082..b55b5120790 100644 --- a/packages/service-integration/tests/data/docker-compose.overwrite.yml +++ b/packages/service-integration/tests/data/docker-compose.overwrite.yml @@ -1,4 +1,6 @@ services: osparc-python-runner: + depends_on: + - another-service build: dockerfile: Dockerfile diff --git a/packages/service-integration/tests/data/docker-compose_jupyter-math_ad51f53.yml b/packages/service-integration/tests/data/docker-compose_jupyter-math_ad51f53.yml index 54ac4a936e8..2ec8e943029 100644 --- a/packages/service-integration/tests/data/docker-compose_jupyter-math_ad51f53.yml +++ b/packages/service-integration/tests/data/docker-compose_jupyter-math_ad51f53.yml @@ -11,54 +11,24 @@ services: org.label-schema.schema-version: '1.0' org.label-schema.vcs-ref: ad51f53 org.label-schema.vcs-url: https://github.com/ITISFoundation/jupyter-math - io.simcore.name: '{"name": "JupyterLab Math (Python+Octave)"}' - io.simcore.thumbnail: '{"thumbnail": "https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/1024px-Jupyter_logo.svg.png"}' - io.simcore.description: '{"description": "[JupyterLab](https://jupyter.org/) + io.simcore.name: '{"name":"JupyterLab Math (Python+Octave)"}' + io.simcore.thumbnail: '{"thumbnail":"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/1024px-Jupyter_logo.svg.png"}' + io.simcore.description: '{"description":"[JupyterLab](https://jupyter.org/) with **math** kernels in [octave](https://www.gnu.org/software/octave/index) (largely compatible with Matlab code) and python. Python kernel includes popular math packages like [numpy](https://numpy.org/), [pandas](https://pandas.pydata.org/), [matplotlib](https://matplotlib.org/), [plotly](https://plotly.com/python/) and [scipy](https://www.scipy.org/) packages (see requirements.txt list)\n"}' - io.simcore.key: '{"key": "simcore/services/dynamic/jupyter-math"}' - io.simcore.version: '{"version": "2.0.8"}' - io.simcore.integration-version: '{"integration-version": "2.0.0"}' - io.simcore.type: '{"type": "dynamic"}' - io.simcore.authors: '{"authors": [{"name": "Pedro Crespo-Valero", "email": - "crespo@itis.swiss", "affiliation": "IT''IS Foundation"}, {"name": "Sylvain - Andereregg", "email": "anderegg@itis.swiss", "affiliation": "IT''IS Foundation"}, - {"name": "Sylvain Reboux", "email": "reboux@zmt.swiss", "affiliation": "Zurich - MedTech"}]}' - io.simcore.contact: '{"contact": "crespo@itis.swiss"}' - io.simcore.inputs: '{"inputs": {"input_1": {"displayOrder": 1.0, "label": - "input_files_1", "description": "Any input files. One or several files compressed - in a zip will be downloaded in an inputs folder.", "type": "data:*/*"}, - "input_2": {"displayOrder": 2.0, "label": "input_files_2", "description": - "Any input files. One or several files compressed in a zip will be downloaded - in an inputs folder.", "type": "data:*/*"}, "input_3": {"displayOrder": - 3.0, "label": "input_files_3", "description": "Any input files. One or several - files compressed in a zip will be downloaded in an inputs folder.", "type": - "data:*/*"}, "input_4": {"displayOrder": 4.0, "label": "input_files_4", - "description": "Any input files. One or several files compressed in a zip - will be downloaded in an inputs folder.", "type": "data:*/*"}}}' - io.simcore.outputs: '{"outputs": {"output_1": {"displayOrder": 1.0, "label": - "Output files 1", "description": "Output files uploaded from the outputs - folder", "type": "data:*/*"}, "output_2": {"displayOrder": 2.0, "label": - "Output files 2", "description": "Output files uploaded from the outputs - folder", "type": "data:*/*"}, "output_3": {"displayOrder": 3.0, "label": - "Output files 3", "description": "Output files uploaded from the outputs - folder", "type": "data:*/*"}, "output_4": {"displayOrder": 4.0, "label": - "Output files 4", "description": "Output files uploaded from the outputs - folder", "type": "data:*/*"}}}' - io.simcore.boot-options: '{"boot-options": {"boot_mode": {"label": "Boot mode", - "description": "Select boot type for the service", "default": "0", "items": - {"0": {"label": "JupyterLab", "description": "Display the JupyterLab interface - the default boot mode"}, "1": {"label": "Voila", "description": "To start - as Voila save a notebook as \"voila.ipynb\" in the root folder"}}}}}' + io.simcore.key: '{"key":"simcore/services/dynamic/jupyter-math"}' + io.simcore.version: '{"version":"2.0.8"}' + io.simcore.integration-version: '{"integration-version":"2.0.0"}' + io.simcore.type: '{"type":"dynamic"}' + io.simcore.authors: '{"authors":[{"name":"Pedro Crespo-Valero","email":"crespo@itis.swiss","affiliation":"IT''IS Foundation"},{"name":"Sylvain Andereregg","email":"anderegg@itis.swiss","affiliation":"IT''IS Foundation"},{"name":"Sylvain Reboux","email":"reboux@zmt.swiss","affiliation":"Zurich MedTech"}]}' + io.simcore.contact: '{"contact":"crespo@itis.swiss"}' + io.simcore.inputs: '{"inputs":{"input_1":{"displayOrder":1.0,"label":"input_files_1","description":"Any input files. One or several files compressed in a zip will be downloaded in an inputs folder.","type":"data:*/*"},"input_2":{"displayOrder":2.0,"label":"input_files_2","description":"Any input files. One or several files compressed in a zip will be downloaded in an inputs folder.","type":"data:*/*"},"input_3":{"displayOrder":3.0,"label":"input_files_3","description":"Any input files. One or several files compressed in a zip will be downloaded in an inputs folder.","type":"data:*/*"},"input_4":{"displayOrder":4.0,"label":"input_files_4","description":"Any input files. One or several files compressed in a zip will be downloaded in an inputs folder.","type":"data:*/*"}}}' + io.simcore.outputs: '{"outputs":{"output_1":{"displayOrder":1.0,"label":"Output files 1","description":"Output files uploaded from the outputs folder","type":"data:*/*"},"output_2":{"displayOrder":2.0,"label":"Output files 2","description":"Output files uploaded from the outputs folder","type":"data:*/*"},"output_3":{"displayOrder":3.0,"label":"Output files 3","description":"Output files uploaded from the outputs folder","type":"data:*/*"},"output_4":{"displayOrder":4.0,"label":"Output files 4","description":"Output files uploaded from the outputs folder","type":"data:*/*"}}}' + io.simcore.boot-options: '{"boot-options":{"boot_mode":{"label":"Boot mode","description":"Select boot type for the service","default":"0","items":{"0":{"label":"JupyterLab","description":"Display the JupyterLab interface the default boot mode"},"1":{"label":"Voila","description":"To start as Voila save a notebook as \"voila.ipynb\" in the root folder"}}}}}' simcore.service.restart-policy: no-restart - simcore.service.paths-mapping: '{"inputs_path": "/home/jovyan/work/inputs", - "outputs_path": "/home/jovyan/work/outputs", "state_paths": ["/home/jovyan/work/workspace"]}' - simcore.service.settings: '[{"name": "ports", "type": "int", "value": 8888}, - {"name": "constraints", "type": "string", "value": ["node.platform.os == - linux"]}, {"name": "Resources", "type": "Resources", "value": {"Limits": - {"NanoCPUs": 4000000000, "MemoryBytes": 17179869184}}}]' + simcore.service.paths-mapping: '{"inputs_path":"/home/jovyan/work/inputs","outputs_path":"/home/jovyan/work/outputs","state_paths":["/home/jovyan/work/workspace"]}' + simcore.service.settings: '[{"name":"ports","type":"int","value":8888},{"name":"constraints","type":"string","value":["node.platform.os == linux"]},{"name":"Resources","type":"Resources","value":{"Limits":{"NanoCPUs":4000000000,"MemoryBytes":17179869184}}}]' image: simcore/services/dynamic/jupyter-math:2.0.8 diff --git a/packages/service-integration/tests/data/metadata.yml b/packages/service-integration/tests/data/metadata.yml index 46c3ee99a5e..0b9ffc61cd8 100644 --- a/packages/service-integration/tests/data/metadata.yml +++ b/packages/service-integration/tests/data/metadata.yml @@ -1,9 +1,11 @@ -name: oSparc Python Runner +name: Sim4Life Python Runner key: simcore/services/dynamic/osparc-python-runner type: computational integration-version: 1.0.0 version: 1.1.0 -description: oSparc Python Runner +version_display: "Sim4Life Release V7.2" +release_date: "2024-05-31T13:45:30" +description: Python Runner with Sim4Life contact: sylvain@foo.com authors: - name: Mr X diff --git a/packages/service-integration/tests/test__usecase_jupytermath.py b/packages/service-integration/tests/test__usecase_jupytermath.py index c64c06d89d6..4b816a18198 100644 --- a/packages/service-integration/tests/test__usecase_jupytermath.py +++ b/packages/service-integration/tests/test__usecase_jupytermath.py @@ -7,12 +7,13 @@ import os import shutil import subprocess +from collections.abc import Callable, Iterable from pathlib import Path -from typing import Any, Callable, Iterable +from typing import Any import pytest import yaml -from pytest import TempPathFactory +from common_library.json_serialization import json_loads from service_integration import cli from typer.testing import CliRunner, Result @@ -52,7 +53,7 @@ def _download_git_commit(repository: str, commit_sha: str, directory: Path): @pytest.fixture(scope="module") -def jupytermath_repo(tmp_path_factory: TempPathFactory) -> Path: +def jupytermath_repo(tmp_path_factory: pytest.TempPathFactory) -> Path: print("Running git", _git_version()) tmp_path = tmp_path_factory.mktemp("jupytermath_repo") repo_dir = _download_git_commit( @@ -75,7 +76,7 @@ def run_program_in_repo(tmp_path: Path, jupytermath_repo: Path) -> Iterable[Call def _invoke(*cmd) -> tuple[Path, Result]: print( "RUNNING", - "osparc-service-integrator", + "simcore-service-integrator", " ".join(cmd), f"at {workdir=}", ) @@ -118,10 +119,9 @@ def compose_spec_reference(tests_data_dir: Path) -> dict[str, Any]: Digest: sha256:279a297b49f1fddb26289d205d4ba5acca1bb8e7bedadcfce00f821873935c03 Status: Downloaded newer image for itisfoundation/ci-service-integration-library:v1.0.1-dev-25 """ - compose_spec = yaml.safe_load( + return yaml.safe_load( (tests_data_dir / "docker-compose_jupyter-math_ad51f53.yml").read_text() ) - return compose_spec def test_ooil_compose_wo_arguments( @@ -179,4 +179,24 @@ def test_ooil_compose_wo_arguments( ] = compose_spec_reference["services"]["jupyter-math"]["build"]["labels"][ "org.label-schema.build-date" ] + + label_keys = compose_spec_reference["services"]["jupyter-math"]["build"][ + "labels" + ].keys() + + # NOTE: generally it is not a good idea to compare serialized values. It is difficult to debug + # when it fails and a failure is not always indicative of a real error e.g. orjson serializes diffferently + # to json. + for k in label_keys: + + got_label_value = compose_spec["services"]["jupyter-math"]["build"]["labels"][k] + expected_label_value = compose_spec_reference["services"]["jupyter-math"][ + "build" + ]["labels"][k] + if k.startswith("io.simcore"): + assert json_loads(got_label_value) == json_loads(expected_label_value) + assert ( + got_label_value == expected_label_value + ), f"label {k} got a different dump" + assert compose_spec == compose_spec_reference diff --git a/packages/service-integration/tests/test_cli.py b/packages/service-integration/tests/test_cli.py index aa2844823c3..4eee418ec14 100644 --- a/packages/service-integration/tests/test_cli.py +++ b/packages/service-integration/tests/test_cli.py @@ -1,4 +1,4 @@ -from typing import Callable +from collections.abc import Callable from service_integration import __version__ diff --git a/packages/service-integration/tests/test_cli__compose_spec.py b/packages/service-integration/tests/test_cli__compose_spec.py new file mode 100644 index 00000000000..5fe98689a14 --- /dev/null +++ b/packages/service-integration/tests/test_cli__compose_spec.py @@ -0,0 +1,28 @@ +import pytest +from service_integration.cli._compose_spec import _strip_credentials + + +@pytest.mark.parametrize( + "url, expected_url", + [ + ( + "schema.veshttps://user:password@example.com/some/repo.git", + "schema.veshttps://example.com/some/repo.git", + ), + ( + "https://user:password@example.com/some/repo.git", + "https://example.com/some/repo.git", + ), + ( + "ssh://user:password@example.com/some/repo.git", + "ssh://example.com/some/repo.git", + ), + ( + "git@git.speag.com:some/repo.git", + "git@git.speag.com:some/repo.git", + ), + ("any_str", "any_str"), + ], +) +def test__strip_credentials(url: str, expected_url: str): + assert _strip_credentials(url) == expected_url diff --git a/packages/service-integration/tests/test_command_compose.py b/packages/service-integration/tests/test_command_compose.py index 0f4979c4685..b3aa3cd78d7 100644 --- a/packages/service-integration/tests/test_command_compose.py +++ b/packages/service-integration/tests/test_command_compose.py @@ -3,57 +3,59 @@ # pylint: disable=unused-variable import os +import traceback +from collections.abc import Callable from pathlib import Path -from typing import Callable -import pytest import yaml +from click.testing import Result +from service_integration.compose_spec_model import ComposeSpecification +from service_integration.osparc_config import MetadataConfig -@pytest.fixture -def compose_file_path(metadata_file_path: Path) -> Path: - # TODO: should pass with non-existing docker-compose-meta.yml file - compose_file_path: Path = metadata_file_path.parent / "docker-compose-meta.yml" - assert not compose_file_path.exists() - - # minimal - compose_file_path.write_text( - yaml.dump({"services": {"osparc-python-runner": {"build": {"labels": {}}}}}) - ) - return compose_file_path +def _format_cli_error(result: Result) -> str: + assert result.exception + tb_message = "\n".join(traceback.format_tb(result.exception.__traceback__)) + return f"Below exception was raised by the cli:\n{tb_message}" def test_make_docker_compose_meta( run_program_with_args: Callable, - project_file_path: Path, + docker_compose_overwrite_path: Path, metadata_file_path: Path, - compose_file_path: Path, + tmp_path: Path, ): """ docker-compose-build.yml: $(metatada) # Injects metadata from $< as labels - osparc-service-integrator compose --metadata $< --to-spec-file $@ + simcore-service-integrator compose --metadata $< --to-spec-file $@ """ + target_compose_specs = tmp_path / "docker-compose.yml" + metadata_cfg = MetadataConfig.from_yaml(metadata_file_path) + result = run_program_with_args( "compose", "--metadata", str(metadata_file_path), "--to-spec-file", - compose_file_path, + target_compose_specs, ) - assert result.exit_code == os.EX_OK, result.output + assert result.exit_code == os.EX_OK, _format_cli_error(result) - assert compose_file_path.exists() + # produces a compose spec + assert target_compose_specs.exists() - compose_cfg = yaml.safe_load(compose_file_path.read_text()) - metadata_cfg = yaml.safe_load(metadata_file_path.read_text()) + # valid compose specs + compose_cfg = ComposeSpecification.model_validate( + yaml.safe_load(target_compose_specs.read_text()) + ) + assert compose_cfg.services - # TODO: compare labels vs metadata - service_name = metadata_cfg["key"].split("/")[-1] - compose_labels = compose_cfg["services"][service_name]["build"]["labels"] + # compose labels vs metadata fild + compose_labels = compose_cfg.services[metadata_cfg.service_name()].build.labels assert compose_labels - # schema of expected + assert isinstance(compose_labels.root, dict) - # deserialize content and should fit metadata_cfg + assert MetadataConfig.from_labels_annotations(compose_labels.root) == metadata_cfg diff --git a/packages/service-integration/tests/test_command_config.py b/packages/service-integration/tests/test_command_config.py index ea2b984aafe..f6243efd59f 100644 --- a/packages/service-integration/tests/test_command_config.py +++ b/packages/service-integration/tests/test_command_config.py @@ -3,11 +3,12 @@ # pylint: disable=unused-variable import os import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import pytest import yaml +from service_integration.osparc_config import OSPARC_CONFIG_DIRNAME @pytest.fixture @@ -21,11 +22,12 @@ def tmp_compose_spec(tests_data_dir: Path, tmp_path: Path): def test_create_new_osparc_config( run_program_with_args: Callable, tmp_compose_spec: Path ): - osparc_dir = tmp_compose_spec.parent / ".osparc" + osparc_dir = tmp_compose_spec.parent / OSPARC_CONFIG_DIRNAME assert not osparc_dir.exists() result = run_program_with_args( "config", + "create", "--from-spec-file", str(tmp_compose_spec), ) diff --git a/packages/service-integration/tests/test_command_metadata.py b/packages/service-integration/tests/test_command_metadata.py index 7c89a2cce1c..7204fc953c6 100644 --- a/packages/service-integration/tests/test_command_metadata.py +++ b/packages/service-integration/tests/test_command_metadata.py @@ -3,12 +3,12 @@ # pylint: disable=unused-variable import os +from collections.abc import Callable from pathlib import Path -from typing import Callable import pytest import yaml -from service_integration.commands.metadata import TargetVersionChoices +from service_integration.cli._metadata import TargetVersionChoices @pytest.fixture @@ -50,7 +50,7 @@ def test_make_version( As Makefile recipe: version-service-patch version-service-minor version-service-major: $(metatada) ## kernel/service versioning as patch - osparc-service-integrator bump-version --metadata-file $< --upgrade $(subst version-service-,,$@) + simcore-service-integrator bump-version --metadata-file $< --upgrade $(subst version-service-,,$@) """ # ensures current_metadata fixture worked as expected assert current_metadata[target_version] == current_version @@ -81,15 +81,15 @@ def test_make_version( "cmd,expected_output", [ ( - "osparc-service-integrator get-version --metadata-file tests/data/metadata.yml", + "simcore-service-integrator get-version --metadata-file tests/data/metadata.yml", "1.1.0", ), ( - "osparc-service-integrator get-version --metadata-file tests/data/metadata.yml integration-version", + "simcore-service-integrator get-version --metadata-file tests/data/metadata.yml integration-version", "1.0.0", ), ( - "osparc-service-integrator get-version --metadata-file tests/data/metadata.yml version", + "simcore-service-integrator get-version --metadata-file tests/data/metadata.yml version", "1.1.0", ), ], diff --git a/packages/service-integration/tests/test_command_run_creator.py b/packages/service-integration/tests/test_command_run_creator.py index 40dece4fd48..cc7445d876b 100644 --- a/packages/service-integration/tests/test_command_run_creator.py +++ b/packages/service-integration/tests/test_command_run_creator.py @@ -20,7 +20,7 @@ def test_make_service_cli_run(run_program_with_args, metadata_file_path: Path): """ service.cli/run: $(metatada) # Updates adapter script from metadata in $< - osparc-service-integrator run-creator --metadata $< --runscript $@ + simcore-service-integrator run-creator --metadata $< --runscript $@ """ run_script_path: Path = metadata_file_path.parent / "run" diff --git a/packages/service-integration/tests/test_compose_spec_model.py b/packages/service-integration/tests/test_compose_spec_model.py index 63cd0924c99..416dfbb8eef 100644 --- a/packages/service-integration/tests/test_compose_spec_model.py +++ b/packages/service-integration/tests/test_compose_spec_model.py @@ -9,7 +9,7 @@ def test_autogenerated_compose_spec_model(tests_data_dir: Path): docker_compose_path = tests_data_dir / "docker-compose-meta.yml" # tests if parses valid file - compose_spec = ComposeSpecification.parse_obj( + compose_spec = ComposeSpecification.model_validate( yaml.safe_load(docker_compose_path.read_text()) ) diff --git a/packages/service-integration/tests/test_labels_annotations.py b/packages/service-integration/tests/test_labels_annotations.py deleted file mode 100644 index 18f7bf1dcf3..00000000000 --- a/packages/service-integration/tests/test_labels_annotations.py +++ /dev/null @@ -1,37 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from pathlib import Path -from pprint import pprint -from typing import Any - -import pytest -import yaml -from service_integration.labels_annotations import from_labels, to_labels - - -@pytest.fixture -def metadata_config(tests_data_dir: Path): - config = yaml.safe_load((tests_data_dir / "metadata.yml").read_text()) - # adds some env-vars - # FIXME: if version is set as '1.0' then pydantic will resolve it as a float!! - config.update({"schema-version": "1.0.0", "build-date": "${BUILD_DATE}"}) - return config - - -@pytest.mark.parametrize("trim_key_head", (True, False)) -def test_to_and_from_labels(metadata_config: dict[str, Any], trim_key_head: bool): - - metadata_labels = to_labels( - metadata_config, prefix_key="swiss.itisfoundation", trim_key_head=trim_key_head - ) - print(f"\n{trim_key_head=:*^100}") - pprint(metadata_labels) - - assert all(key.startswith("swiss.itisfoundation.") for key in metadata_labels) - - got_config = from_labels( - metadata_labels, prefix_key="swiss.itisfoundation", trim_key_head=trim_key_head - ) - assert got_config == metadata_config diff --git a/packages/service-integration/tests/test_oci_image_spec.py b/packages/service-integration/tests/test_oci_image_spec.py index 4207c005199..641594c9966 100644 --- a/packages/service-integration/tests/test_oci_image_spec.py +++ b/packages/service-integration/tests/test_oci_image_spec.py @@ -8,7 +8,7 @@ LabelSchemaAnnotations, OciImageSpecAnnotations, ) -from service_integration.osparc_config import MetaConfig +from service_integration.osparc_config import MetadataConfig def test_label_schema_to_oci_conversion(monkeypatch): @@ -18,7 +18,7 @@ def test_label_schema_to_oci_conversion(monkeypatch): lsa = LabelSchemaAnnotations.create_from_env() - OciImageSpecAnnotations.parse_obj(lsa.to_oci_data()) + OciImageSpecAnnotations.model_validate(lsa.to_oci_data()) def test_create_annotations_from_metadata(tests_data_dir: Path): @@ -27,10 +27,10 @@ def test_create_annotations_from_metadata(tests_data_dir: Path): # recover from docker labels # - meta_cfg = MetaConfig.from_yaml(tests_data_dir / "metadata.yml") + meta_cfg = MetadataConfig.from_yaml(tests_data_dir / "metadata.yml") # map io_spec to OCI image-spec - oic_image_spec = OciImageSpecAnnotations( + OciImageSpecAnnotations( authors=", ".join([f"{a.name} ({a.email})" for a in meta_cfg.authors]) ) diff --git a/packages/service-integration/tests/test_osparc_config.py b/packages/service-integration/tests/test_osparc_config.py index 77348df499a..d22871b5c6a 100644 --- a/packages/service-integration/tests/test_osparc_config.py +++ b/packages/service-integration/tests/test_osparc_config.py @@ -10,7 +10,11 @@ import pytest import yaml from models_library.service_settings_labels import SimcoreServiceSettingLabelEntry -from service_integration.osparc_config import MetaConfig, RuntimeConfig, SettingsItem +from service_integration.osparc_config import ( + MetadataConfig, + RuntimeConfig, + SettingsItem, +) @pytest.fixture @@ -44,20 +48,23 @@ def labels(tests_data_dir: Path, labels_fixture_name: str) -> dict[str, str]: def test_load_from_labels( labels: dict[str, str], labels_fixture_name: str, tmp_path: Path ): - meta_cfg = MetaConfig.from_labels_annotations(labels) + meta_cfg = MetadataConfig.from_labels_annotations(labels) runtime_cfg = RuntimeConfig.from_labels_annotations(labels) + assert runtime_cfg.callbacks_mapping is not None - print(meta_cfg.json(exclude_unset=True, indent=2)) - print(runtime_cfg.json(exclude_unset=True, indent=2)) + print(meta_cfg.model_dump_json(exclude_unset=True, indent=2)) + print(runtime_cfg.model_dump_json(exclude_unset=True, indent=2)) # create yamls from config for model in (runtime_cfg, meta_cfg): config_path = ( tmp_path / f"{model.__class__.__name__.lower()}-{labels_fixture_name}.yml" ) - with open(config_path, "wt") as fh: + with open(config_path, "w") as fh: data = json.loads( - model.json(exclude_unset=True, by_alias=True, exclude_none=True) + model.model_dump_json( + exclude_unset=True, by_alias=True, exclude_none=True + ) ) yaml.safe_dump(data, fh, sort_keys=False) @@ -67,17 +74,17 @@ def test_load_from_labels( @pytest.mark.parametrize( - "example_data", SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"] + "example_data", + SimcoreServiceSettingLabelEntry.model_json_schema()["examples"], ) def test_settings_item_in_sync_with_service_settings_label( - example_data: dict[str, Any] + example_data: dict[str, Any], ): - print(pformat(example_data)) # First we parse with SimcoreServiceSettingLabelEntry since it also supports backwards compatibility # and will upgrade old version - example_model = SimcoreServiceSettingLabelEntry.parse_obj(example_data) + example_model = SimcoreServiceSettingLabelEntry.model_validate(example_data) # SettingsItem is exclusively for NEW labels, so it should not support backwards compatibility new_model = SettingsItem( @@ -87,4 +94,4 @@ def test_settings_item_in_sync_with_service_settings_label( ) # check back - SimcoreServiceSettingLabelEntry.parse_obj(new_model.dict(by_alias=True)) + SimcoreServiceSettingLabelEntry.model_validate(new_model.model_dump(by_alias=True)) diff --git a/packages/service-integration/tests/test_osparc_image_specs.py b/packages/service-integration/tests/test_osparc_image_specs.py index 1bd5d4deeca..6bec87425ad 100644 --- a/packages/service-integration/tests/test_osparc_image_specs.py +++ b/packages/service-integration/tests/test_osparc_image_specs.py @@ -2,6 +2,7 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable +import hashlib from pathlib import Path import pytest @@ -9,8 +10,8 @@ from pydantic import BaseModel from service_integration.compose_spec_model import BuildItem, Service from service_integration.osparc_config import ( - DockerComposeOverwriteCfg, - MetaConfig, + DockerComposeOverwriteConfig, + MetadataConfig, RuntimeConfig, ) from service_integration.osparc_image_specs import create_image_spec @@ -27,11 +28,12 @@ def test_create_image_spec_impl(tests_data_dir: Path, settings: AppSettings): # image-spec for devel, prod, ... # load & parse osparc configs - docker_compose_overwrite_cfg = DockerComposeOverwriteCfg.from_yaml( + docker_compose_overwrite_cfg = DockerComposeOverwriteConfig.from_yaml( tests_data_dir / "docker-compose.overwrite.yml" ) - meta_cfg = MetaConfig.from_yaml(tests_data_dir / "metadata-dynamic.yml") + meta_cfg = MetadataConfig.from_yaml(tests_data_dir / "metadata-dynamic.yml") runtime_cfg = RuntimeConfig.from_yaml(tests_data_dir / "runtime.yml") + assert runtime_cfg.callbacks_mapping is not None # assemble docker-compose build_spec = BuildItem( @@ -49,12 +51,22 @@ def test_create_image_spec_impl(tests_data_dir: Path, settings: AppSettings): assert compose_spec.services is not None assert isinstance(compose_spec.services, dict) - service_name = list(compose_spec.services.keys())[0] + service_name = next(iter(compose_spec.services.keys())) # pylint: disable=unsubscriptable-object assert isinstance(compose_spec.services[service_name], Service) build_spec = compose_spec.services[service_name].build assert build_spec assert isinstance(build_spec, BaseModel) - print(build_spec.json(exclude_unset=True, indent=2)) - print(yaml.safe_dump(compose_spec.dict(exclude_unset=True), sort_keys=False)) + print(build_spec.model_dump_json(exclude_unset=True, indent=2)) + print(yaml.safe_dump(compose_spec.model_dump(exclude_unset=True), sort_keys=False)) + + +def test_image_digest_is_not_a_label_annotation(tests_data_dir: Path): + meta_cfg = MetadataConfig.from_yaml(tests_data_dir / "metadata-dynamic.yml") + + assert meta_cfg.image_digest is None + meta_cfg.image_digest = hashlib.sha256(b"this is the image manifest").hexdigest() + + annotations = meta_cfg.to_labels_annotations() + assert not any("digest" in key for key in annotations) diff --git a/packages/service-integration/tests/test_osparc_runtime_specs.py b/packages/service-integration/tests/test_osparc_runtime_specs.py index b987ad8ed0a..153c85d27c4 100644 --- a/packages/service-integration/tests/test_osparc_runtime_specs.py +++ b/packages/service-integration/tests/test_osparc_runtime_specs.py @@ -12,14 +12,13 @@ def test_create_runtime_spec_impl(tests_data_dir: Path): - - # have spec on how to run -> assemble mounts, network etc of the compose -> ready to run with `docker-compose up` + # have spec on how to run -> assemble mounts, network etc of the compose -> ready to run with `docker compose up` # run-spec for devel, prod, etc ... osparc_spec: dict = yaml.safe_load((tests_data_dir / "runtime.yml").read_text()) - pm_spec1 = PathMappingsLabel.parse_obj(osparc_spec["paths-mapping"]) - pm_spec2 = PathMappingsLabel.parse_obj( + pm_spec1 = PathMappingsLabel.model_validate(osparc_spec["paths-mapping"]) + pm_spec2 = PathMappingsLabel.model_validate( { "outputs_path": "/outputs", "inputs_path": "/inputs", @@ -59,12 +58,12 @@ def test_create_runtime_spec_impl(tests_data_dir: Path): # FIXME: ensure all sources are different! (e.g. a/b/c and z/c have the same name!) - print(Service(volumes=volumes).json(exclude_unset=True, indent=2)) + print(Service(volumes=volumes).model_dump_json(exclude_unset=True, indent=2)) # TODO: _auto_map_to_service(osparc_spec["settings"]) data = {} for obj in osparc_spec["settings"]: - item = SettingsItem.parse_obj(obj) + item = SettingsItem.model_validate(obj) if item.name == "resources": # https://docs.docker.com/compose/compose-file/compose-file-v3/#resources @@ -86,9 +85,9 @@ def test_create_runtime_spec_impl(tests_data_dir: Path): data["deploy"] = {"placement": {"constraints": item.value}} else: - assert False, item + raise AssertionError(item) - print(Service(**data).json(exclude_unset=True, indent=2)) + print(Service(**data).model_dump_json(exclude_unset=True, indent=2)) def test_compatibility(): diff --git a/packages/service-integration/tests/test_versioning.py b/packages/service-integration/tests/test_versioning.py index a4172e62461..26fe2962a61 100644 --- a/packages/service-integration/tests/test_versioning.py +++ b/packages/service-integration/tests/test_versioning.py @@ -2,10 +2,17 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -import json + +import itertools +from typing import Any import pytest from packaging.version import Version +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + iter_model_examples_in_class, +) from service_integration.versioning import ( ExecutableVersionInfo, ServiceVersionInfo, @@ -20,13 +27,11 @@ def test_pep404_compare_versions(): assert Version("0.6a9dev") < Version("0.6a9") # same release but one is pre-release - assert ( - Version("2.1-rc2").release == Version("2.1").release - and Version("2.1-rc2").is_prerelease - ) + assert Version("2.1-rc2").release == Version("2.1").release + assert Version("2.1-rc2").is_prerelease -BUMP_PARAMS = [ +_BUMP_PARAMS = [ # "upgrade,current_version,new_version", ("patch", "1.1.1", "1.1.2"), ("minor", "1.1.1", "1.2.0"), @@ -36,7 +41,7 @@ def test_pep404_compare_versions(): @pytest.mark.parametrize( "bump,current_version,new_version", - BUMP_PARAMS, + _BUMP_PARAMS, ) def test_bump_version_string( bump: str, @@ -47,11 +52,15 @@ def test_bump_version_string( @pytest.mark.parametrize( - "model_cls", - (ExecutableVersionInfo, ServiceVersionInfo), + "model_cls, example_name, example_data", + itertools.chain( + iter_model_examples_in_class(ExecutableVersionInfo), + iter_model_examples_in_class(ServiceVersionInfo), + ), ) -def test_version_info_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", json.dumps(example, indent=1)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" +def test_version_info_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/packages/service-library/Makefile b/packages/service-library/Makefile index 26c0740b96c..edefcc08385 100644 --- a/packages/service-library/Makefile +++ b/packages/service-library/Makefile @@ -6,8 +6,8 @@ include ../../scripts/common.Makefile help: ## overwrites and calls common help function $(MAKE) --no-print-directory --file ../../scripts/common.Makefile $@ @echo - @echo '🚨 BEWARE there are issues with brakets in names `make test[aiohttp]` will raise errors:' - @echo 'βœ…` `make "test[aiohttp]"` works as intended' + @echo '🚨 BEWARE there are issues with brakets in names `make test-dev[aiohttp]` will raise errors:' + @echo 'βœ…` `make "test-dev[aiohttp]"` works as intended' @echo 'πŸ‘“ Please note: when INSTALLING, DEVELOPING and TESTING ' @echo @echo 'servicelib support for aiohttp extras is not included:' @@ -22,7 +22,7 @@ help: ## overwrites and calls common help function install-%: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt APP_PACKAGE_NAME=servicelib .PHONY: test-dev @@ -34,6 +34,7 @@ test-dev: ## runs unit tests in w/o extras --cov-config=$(CURDIR)/../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ --exitfirst \ @@ -55,6 +56,7 @@ test-dev[aiohttp]: ## runs unit common tests and aiohttp extras --cov-config=$(CURDIR)/../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ --exitfirst \ @@ -74,6 +76,7 @@ test-dev[fastapi]: ## runs unit common tests and fastapi extras --cov-config=$(CURDIR)/../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ --exitfirst \ @@ -92,6 +95,7 @@ test-dev[all]: ## runs unit tests w/ all extras --cov-config=$(CURDIR)/../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ --exitfirst \ @@ -103,7 +107,7 @@ test-dev[all]: ## runs unit tests w/ all extras .PHONY: test-ci[all] -test-ci[all]: ## runs unit tests w/ all extras +test-ci[all]: ## runs unit tests w/ all extras @pytest \ --asyncio-mode=auto \ --color=yes \ @@ -113,9 +117,10 @@ test-ci[all]: ## runs unit tests w/ all extras --cov-report=xml \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ + --junitxml=junit.xml -o junit_family=legacy \ --keep-docker-up \ --log-date-format="%Y-%m-%d %H:%M:%S" \ - --log-format="%(asctime)s %(levelname)s %(message)s" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ --verbose \ -m "not heavy_load" \ $(CURDIR)/tests diff --git a/packages/service-library/requirements/_aiohttp.in b/packages/service-library/requirements/_aiohttp.in index a9176a4ce93..c953f7546b1 100644 --- a/packages/service-library/requirements/_aiohttp.in +++ b/packages/service-library/requirements/_aiohttp.in @@ -3,13 +3,12 @@ # # ---constraint ./_base.in - aiohttp aiopg[sa] -aiozipkin attrs jsonschema -openapi-core==0.12.0 # frozen until https://github.com/ITISFoundation/osparc-simcore/pull/1396 is CLOSED +opentelemetry-instrumentation-aiohttp-client +opentelemetry-instrumentation-aiohttp-server +opentelemetry-instrumentation-aiopg prometheus_client werkzeug diff --git a/packages/service-library/requirements/_aiohttp.txt b/packages/service-library/requirements/_aiohttp.txt index e7451176d49..91d683a8999 100644 --- a/packages/service-library/requirements/_aiohttp.txt +++ b/packages/service-library/requirements/_aiohttp.txt @@ -1,89 +1,108 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_aiohttp.txt --resolver=backtracking --strip-extras requirements/_aiohttp.in -# -aiohttp==3.8.4 - # via - # -r requirements/_aiohttp.in - # aiozipkin +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via -r requirements/_aiohttp.in aiopg==1.4.0 # via -r requirements/_aiohttp.in -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp -aiozipkin==1.1.1 - # via -r requirements/_aiohttp.in -async-timeout==4.0.2 - # via - # aiohttp - # aiopg -attrs==21.4.0 +async-timeout==4.0.3 + # via aiopg +attrs==25.1.0 # via # -r requirements/_aiohttp.in # aiohttp # jsonschema - # openapi-core -charset-normalizer==3.0.1 - # via aiohttp -frozenlist==1.3.3 + # referencing +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +frozenlist==1.5.0 # via # aiohttp # aiosignal -greenlet==2.0.2 +greenlet==3.1.1 # via sqlalchemy -idna==3.4 +idna==3.10 # via yarl -isodate==0.6.1 - # via openapi-core -jsonschema==3.2.0 - # via - # -r requirements/_aiohttp.in - # openapi-schema-validator - # openapi-spec-validator -lazy-object-proxy==1.9.0 - # via openapi-core -markupsafe==2.1.2 +importlib-metadata==8.5.0 + # via opentelemetry-api +jsonschema==4.23.0 + # via -r requirements/_aiohttp.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markupsafe==3.0.2 # via werkzeug -multidict==6.0.4 +multidict==6.1.0 # via # aiohttp # yarl -openapi-core==0.12.0 +opentelemetry-api==1.30.0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aiohttp-client + # opentelemetry-instrumentation-aiohttp-server + # opentelemetry-instrumentation-aiopg + # opentelemetry-instrumentation-dbapi + # opentelemetry-semantic-conventions +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aiohttp-client + # opentelemetry-instrumentation-aiohttp-server + # opentelemetry-instrumentation-aiopg + # opentelemetry-instrumentation-dbapi +opentelemetry-instrumentation-aiohttp-client==0.51b0 + # via -r requirements/_aiohttp.in +opentelemetry-instrumentation-aiohttp-server==0.51b0 + # via -r requirements/_aiohttp.in +opentelemetry-instrumentation-aiopg==0.51b0 # via -r requirements/_aiohttp.in -openapi-schema-validator==0.2.3 - # via openapi-spec-validator -openapi-spec-validator==0.4.0 +opentelemetry-instrumentation-dbapi==0.51b0 + # via opentelemetry-instrumentation-aiopg +opentelemetry-semantic-conventions==0.51b0 # via - # -c requirements/././constraints.txt - # openapi-core -prometheus-client==0.16.0 + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aiohttp-client + # opentelemetry-instrumentation-aiohttp-server + # opentelemetry-instrumentation-dbapi +opentelemetry-util-http==0.51b0 + # via + # opentelemetry-instrumentation-aiohttp-client + # opentelemetry-instrumentation-aiohttp-server +packaging==24.2 + # via opentelemetry-instrumentation +prometheus-client==0.21.1 # via -r requirements/_aiohttp.in -psycopg2-binary==2.9.5 +propcache==0.3.0 + # via + # aiohttp + # yarl +psycopg2-binary==2.9.10 # via # aiopg # sqlalchemy -pyrsistent==0.19.3 - # via jsonschema -pyyaml==5.4.1 - # via - # -c requirements/./_base.in - # openapi-spec-validator -six==1.16.0 +referencing==0.35.1 # via - # isodate # jsonschema - # openapi-core -sqlalchemy==1.4.46 + # jsonschema-specifications +rpds-py==0.23.1 # via - # -c requirements/./../../../requirements/constraints.txt - # aiopg -strict-rfc3339==0.7 - # via openapi-core -werkzeug==2.2.3 + # jsonschema + # referencing +sqlalchemy==1.4.54 + # via aiopg +werkzeug==3.1.3 # via -r requirements/_aiohttp.in -yarl==1.8.2 +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aiohttp-client + # opentelemetry-instrumentation-aiohttp-server + # opentelemetry-instrumentation-aiopg + # opentelemetry-instrumentation-dbapi +yarl==1.18.3 # via aiohttp - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/service-library/requirements/_base.in b/packages/service-library/requirements/_base.in index 0a47104d50d..24222e414b8 100644 --- a/packages/service-library/requirements/_base.in +++ b/packages/service-library/requirements/_base.in @@ -4,13 +4,32 @@ --constraint ../../../requirements/constraints.txt --constraint ./constraints.txt +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in + +aio-pika +aiocache aiodebug +aiodocker aiofiles -aio-pika arrow # date/time -redis +faststream +opentelemetry-api +opentelemetry-exporter-otlp +opentelemetry-instrumentation-aio-pika +opentelemetry-instrumentation-logging +opentelemetry-instrumentation-redis +opentelemetry-instrumentation-requests +opentelemetry-sdk +psutil pydantic pyinstrument pyyaml +redis +stream-zip tenacity +toolz tqdm +yarl diff --git a/packages/service-library/requirements/_base.txt b/packages/service-library/requirements/_base.txt index ed3c46d1319..4e3870a9ca3 100644 --- a/packages/service-library/requirements/_base.txt +++ b/packages/service-library/requirements/_base.txt @@ -1,48 +1,313 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==9.0.4 +aio-pika==9.5.5 + # via -r requirements/_base.in +aiocache==0.12.3 # via -r requirements/_base.in aiodebug==2.3.0 # via -r requirements/_base.in -aiofiles==23.1.0 +aiodocker==0.24.0 + # via -r requirements/_base.in +aiofiles==24.1.0 # via -r requirements/_base.in -aiormq==6.7.2 +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 # via aio-pika -arrow==1.2.3 - # via -r requirements/_base.in -async-timeout==4.0.2 - # via redis -idna==3.4 - # via yarl -multidict==6.0.4 - # via yarl -pamqp==3.2.1 +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 + # via + # fast-depends + # faststream +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/_base.in +attrs==25.1.0 + # via + # aiohttp + # jsonschema + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via typer +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +faststream==0.5.35 + # via -r requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.68.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +idna==3.10 + # via + # anyio + # email-validator + # requests + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +jsonschema==4.23.0 + # via -r requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp + # yarl +opentelemetry-api==1.30.0 + # via + # -r requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/_base.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pydantic==1.10.2 +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 # via -r requirements/_base.in -pyinstrument==4.4.0 +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fast-depends + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via -r requirements/_base.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -pyyaml==5.4.1 +python-dotenv==1.1.0 + # via pydantic-settings +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==14.0.0 + # via + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +stream-zip==0.0.83 # via -r requirements/_base.in -redis==4.5.1 +tenacity==9.0.0 # via -r requirements/_base.in -six==1.16.0 - # via python-dateutil -tenacity==8.2.2 +toolz==1.0.0 # via -r requirements/_base.in -tqdm==4.64.1 +tqdm==4.67.1 # via -r requirements/_base.in -typing-extensions==4.5.0 +typer==0.16.0 + # via -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug + # anyio + # faststream + # opentelemetry-sdk # pydantic -yarl==1.8.2 + # pydantic-core + # pydantic-extra-types + # typer +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via + # -r requirements/_base.in # aio-pika + # aiohttp # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/service-library/requirements/_fastapi.in b/packages/service-library/requirements/_fastapi.in index 48b55048e4c..3303e6043af 100644 --- a/packages/service-library/requirements/_fastapi.in +++ b/packages/service-library/requirements/_fastapi.in @@ -3,10 +3,10 @@ # # ---constraint ./_base.in - -fastapi -fastapi_contrib[jaegertracing] -httpx -uvicorn +fastapi[standard] +fastapi-lifespan-manager +httpx[http2] +opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-httpx +prometheus-client diff --git a/packages/service-library/requirements/_fastapi.txt b/packages/service-library/requirements/_fastapi.txt index 88b36f8add0..c6e5a29f597 100644 --- a/packages/service-library/requirements/_fastapi.txt +++ b/packages/service-library/requirements/_fastapi.txt @@ -1,69 +1,151 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_fastapi.txt --resolver=backtracking --strip-extras requirements/_fastapi.in -# -anyio==3.6.2 +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # httpcore + # httpx # starlette -certifi==2022.12.7 + # watchfiles +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +certifi==2025.1.31 # via # httpcore # httpx -click==8.1.3 - # via uvicorn -fastapi==0.92.0 +click==8.1.8 + # via + # rich-toolkit + # typer + # uvicorn +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via fastapi +fastapi==0.115.12 # via # -r requirements/_fastapi.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/_fastapi.in h11==0.14.0 # via # httpcore # uvicorn -httpcore==0.16.3 +h2==4.2.0 # via httpx -httpx==0.23.3 - # via -r requirements/_fastapi.in -idna==3.4 - # via - # anyio - # rfc3986 -jaeger-client==4.8.0 - # via fastapi-contrib -opentracing==2.4.0 - # via - # fastapi-contrib - # jaeger-client -pydantic==1.10.2 +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 # via - # -c requirements/./_base.in + # -r requirements/_fastapi.in # fastapi -rfc3986==1.5.0 - # via httpx -six==1.16.0 - # via thrift -sniffio==1.3.0 +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio - # httpcore + # email-validator # httpx -starlette==0.25.0 +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 # via fastapi -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +opentelemetry-api==1.30.0 # via - # jaeger-client - # threadloop -typing-extensions==4.5.0 + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-semantic-conventions +opentelemetry-instrumentation==0.51b0 # via - # pydantic - # starlette -uvicorn==0.20.0 + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx +opentelemetry-instrumentation-asgi==0.51b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-fastapi==0.51b0 + # via -r requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.51b0 # via -r requirements/_fastapi.in +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx +opentelemetry-util-http==0.51b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx +packaging==24.2 + # via opentelemetry-instrumentation +prometheus-client==0.21.1 + # via -r requirements/_fastapi.in +pydantic==2.10.6 + # via fastapi +pydantic-core==2.27.2 + # via pydantic +pygments==2.19.1 + # via rich +python-dotenv==1.1.0 + # via uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via uvicorn +rich==14.0.0 + # via + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +shellingham==1.5.4 + # via typer +sniffio==1.3.1 + # via anyio +starlette==0.46.0 + # via fastapi +typer==0.16.0 + # via fastapi-cli +typing-extensions==4.12.2 + # via + # anyio + # fastapi + # pydantic + # pydantic-core + # rich-toolkit + # typer +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-httpx +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/service-library/requirements/_test.in b/packages/service-library/requirements/_test.in index 2b08c0655d3..239a389cbc0 100644 --- a/packages/service-library/requirements/_test.in +++ b/packages/service-library/requirements/_test.in @@ -11,21 +11,33 @@ --constraint _fastapi.txt # testing + asgi_lifespan +botocore coverage -coveralls docker faker flaky +numpy openapi-spec-validator +pillow +pip pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-aiohttp +pytest-asyncio pytest-benchmark pytest-cov pytest-docker +pytest-icdiff pytest-instafail pytest-mock pytest-runner pytest-sugar pytest-xdist python-dotenv +respx +sqlalchemy[mypy] +types_aiofiles +types_tqdm +types-psutil +types-psycopg2 diff --git a/packages/service-library/requirements/_test.txt b/packages/service-library/requirements/_test.txt index 3d8d4be40ea..af545a9da7a 100644 --- a/packages/service-library/requirements/_test.txt +++ b/packages/service-library/requirements/_test.txt @@ -1,100 +1,156 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 +aiohappyeyeballs==2.4.6 # via # -c requirements/_aiohttp.txt - # pytest-aiohttp -aiosignal==1.3.1 + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_aiohttp.txt - # aiohttp -asgi-lifespan==2.0.0 - # via -r requirements/_test.in -async-timeout==4.0.2 + # -c requirements/_base.txt + # pytest-aiohttp +aiosignal==1.3.2 # via # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # aiohttp -attrs==21.4.0 +anyio==4.8.0 + # via + # -c requirements/_base.txt + # -c requirements/_fastapi.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.1.0 # via # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # aiohttp # jsonschema - # pytest # pytest-docker -certifi==2022.12.7 + # referencing +botocore==1.38.1 + # via -r requirements/_test.in +certifi==2025.1.31 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_fastapi.txt + # httpcore + # httpx # requests -charset-normalizer==3.0.1 +charset-normalizer==3.4.1 # via - # -c requirements/_aiohttp.txt - # aiohttp + # -c requirements/_base.txt # requests -coverage==6.5.0 +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -docker==6.0.1 +docker==7.1.0 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -execnet==1.9.0 +execnet==2.1.1 # via pytest-xdist -faker==17.4.0 +faker==36.1.1 # via -r requirements/_test.in -flaky==3.7.0 +flaky==3.8.1 # via -r requirements/_test.in -frozenlist==1.3.3 +frozenlist==1.5.0 # via # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # aiohttp # aiosignal -idna==3.4 +greenlet==3.1.1 # via + # -c requirements/_aiohttp.txt + # sqlalchemy +h11==0.14.0 + # via + # -c requirements/_fastapi.txt + # httpcore +httpcore==1.0.7 + # via + # -c requirements/_fastapi.txt + # httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_fastapi.txt + # respx +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 + # via + # -c requirements/_aiohttp.txt + # -c requirements/_base.txt + # -c requirements/_fastapi.txt + # anyio + # httpx # requests # yarl iniconfig==2.0.0 # via pytest -jsonschema==3.2.0 +jmespath==1.0.1 + # via botocore +jsonschema==4.23.0 # via # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # openapi-schema-validator # openapi-spec-validator -multidict==6.0.4 +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2024.10.1 # via # -c requirements/_aiohttp.txt - # aiohttp - # yarl -openapi-schema-validator==0.2.3 + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +multidict==6.1.0 # via # -c requirements/_aiohttp.txt - # openapi-spec-validator -openapi-spec-validator==0.4.0 + # -c requirements/_base.txt + # aiohttp + # yarl +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +numpy==2.2.3 + # via -r requirements/_test.in +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 # via -r requirements/_test.in -packaging==23.0 +packaging==24.2 # via - # docker + # -c requirements/_aiohttp.txt + # -c requirements/_base.txt + # -c requirements/_fastapi.txt # pytest # pytest-sugar -pluggy==1.0.0 +pathable==0.4.4 + # via jsonschema-path +pillow==11.1.0 + # via -r requirements/_test.in +pip==25.0.1 + # via -r requirements/_test.in +pluggy==1.5.0 # via pytest -py-cpuinfo==9.0.0 - # via pytest-benchmark -pyrsistent==0.19.3 +pprintpp==0.4.0 + # via pytest-icdiff +propcache==0.3.0 # via # -c requirements/_aiohttp.txt - # jsonschema -pytest==7.2.1 + # -c requirements/_base.txt + # aiohttp + # yarl +py-cpuinfo==9.0.0 + # via pytest-benchmark +pytest==8.3.5 # via # -r requirements/_test.in # pytest-aiohttp @@ -102,70 +158,122 @@ pytest==7.2.1 # pytest-benchmark # pytest-cov # pytest-docker + # pytest-icdiff # pytest-instafail # pytest-mock # pytest-sugar # pytest-xdist -pytest-aiohttp==1.0.4 +pytest-aiohttp==1.1.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-benchmark==4.0.0 +pytest-asyncio==0.26.0 + # via + # -r requirements/_test.in + # pytest-aiohttp +pytest-benchmark==5.1.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-docker==3.2.0 # via -r requirements/_test.in -pytest-docker==1.0.1 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -pytest-xdist==3.2.0 +pytest-xdist==3.6.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt - # faker -python-dotenv==1.0.0 - # via -r requirements/_test.in -pyyaml==5.4.1 + # botocore +python-dotenv==1.1.0 + # via + # -c requirements/_base.txt + # -c requirements/_fastapi.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_fastapi.txt + # jsonschema-path +referencing==0.35.1 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_aiohttp.txt - # openapi-spec-validator -requests==2.28.2 + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +requests==2.32.3 # via - # coveralls + # -c requirements/_base.txt # docker -six==1.16.0 + # jsonschema-path +respx==0.22.0 + # via -r requirements/_test.in +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.23.1 # via - # -c requirements/_fastapi.txt + # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # jsonschema + # referencing +six==1.17.0 + # via + # -c requirements/_base.txt # python-dateutil -sniffio==1.3.0 + # rfc3339-validator +sniffio==1.3.1 # via + # -c requirements/_base.txt # -c requirements/_fastapi.txt + # anyio # asgi-lifespan -termcolor==2.2.0 +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_aiohttp.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +termcolor==2.5.0 # via pytest-sugar -tomli==2.0.1 +types-aiofiles==24.1.0.20241221 + # via -r requirements/_test.in +types-psutil==7.0.0.20250218 + # via -r requirements/_test.in +types-psycopg2==2.9.21.20250121 + # via -r requirements/_test.in +types-requests==2.32.0.20250301 + # via types-tqdm +types-tqdm==4.67.0.20250301 + # via -r requirements/_test.in +typing-extensions==4.12.2 # via - # coverage - # pytest -urllib3==1.26.14 + # -c requirements/_base.txt + # -c requirements/_fastapi.txt + # anyio + # mypy + # sqlalchemy2-stubs +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore # docker # requests -websocket-client==1.5.1 - # via docker -yarl==1.8.2 + # types-requests +yarl==1.18.3 # via # -c requirements/_aiohttp.txt + # -c requirements/_base.txt # aiohttp - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/packages/service-library/requirements/_tools.txt b/packages/service-library/requirements/_tools.txt index cce765c3795..985c2c3bc85 100644 --- a/packages/service-library/requirements/_tools.txt +++ b/packages/service-library/requirements/_tools.txt @@ -1,89 +1,88 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via + # -c requirements/_test.txt + # pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # pre-commit -tomli==2.0.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/service-library/requirements/ci.txt b/packages/service-library/requirements/ci.txt index 21133de95e0..2c748b3f860 100644 --- a/packages/service-library/requirements/ci.txt +++ b/packages/service-library/requirements/ci.txt @@ -9,10 +9,13 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../settings-library/ -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ +pytest-simcore @ ../pytest-simcore # current module -. +simcore-service-library @ . diff --git a/packages/service-library/requirements/ci[aiohttp].txt b/packages/service-library/requirements/ci[aiohttp].txt index 25a23441216..ee41e3b69a6 100644 --- a/packages/service-library/requirements/ci[aiohttp].txt +++ b/packages/service-library/requirements/ci[aiohttp].txt @@ -12,8 +12,10 @@ --requirement _test.txt # installs this repo's packages -../settings-library/ -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ +pytest-simcore @ ../pytest-simcore # current module -.[aiohttp] +simcore-service-library[aiohttp] @ . diff --git a/packages/service-library/requirements/ci[all].txt b/packages/service-library/requirements/ci[all].txt index 81e9018cc9f..f43ee95908f 100644 --- a/packages/service-library/requirements/ci[all].txt +++ b/packages/service-library/requirements/ci[all].txt @@ -13,8 +13,10 @@ --requirement _test.txt # installs this repo's packages -../settings-library/ -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ +pytest-simcore @ ../pytest-simcore # current module -.[all] +simcore-service-library[all] @ . diff --git a/packages/service-library/requirements/ci[fastapi].txt b/packages/service-library/requirements/ci[fastapi].txt index 2e9ac0f68a6..db051f4ef73 100644 --- a/packages/service-library/requirements/ci[fastapi].txt +++ b/packages/service-library/requirements/ci[fastapi].txt @@ -12,8 +12,10 @@ --requirement _test.txt # installs this repo's packages -../settings-library/ -../pytest-simcore/ +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ +pytest-simcore @ ../pytest-simcore # current module -.[fastapi] +simcore-service-library[fastapi] @ . diff --git a/packages/service-library/requirements/constraints.txt b/packages/service-library/requirements/constraints.txt index 2907c99a66b..e69de29bb2d 100644 --- a/packages/service-library/requirements/constraints.txt +++ b/packages/service-library/requirements/constraints.txt @@ -1,22 +0,0 @@ -# NOTE that these are constraints! i.e. not implicitly updated as the *.in requirements listings - -# attrs==22.1.0 (from -c requirements/_aiohttp.txt (line 22)) -# attrs>=19.2.0 (from pytest==7.1.3->-r requirements/_test.in (line 21)) -# attrs<22,>=19 (from pytest-docker==1.0.0->-r requirements/_test.in (line 24)) -attrs<22 - -# Generated by `openapi-core==0.12.0 # frozen until https://github.com/ITISFoundation/osparc-simcore/pull/1396 is CLOSED` -# pyyaml==5.4.1 (from -c requirements/_aiohttp.txt (line 67)) -# pyyaml==6.0 (from -c requirements/_base.txt (line 15)) -# pyyaml>=5.4 (from -c requirements/../../../requirements/constraints.txt (line 15)) -# PyYAML>=5.1 (from openapi-spec-validator==0.3.1->-r requirements/_test.in (line 30)) -pyyaml==5.4.1 - -# Not specified in openapi-core setup, but it breaks openapi-core==0.12.0 -# we have a very old version of openapi-core that is causing further troubles -# specifically when we want to have nullable objects -# It does not follow standard 3.0 correctly -# SEE how to specify nullable object in https://stackoverflow.com/questions/40920441/how-to-specify-a-property-can-be-null-or-a-reference-with-swagger - -openapi-spec-validator<0.5.0 -jsonschema<4.0 diff --git a/packages/service-library/requirements/dev.txt b/packages/service-library/requirements/dev.txt index e54aeb608c2..f814830c46b 100644 --- a/packages/service-library/requirements/dev.txt +++ b/packages/service-library/requirements/dev.txt @@ -12,8 +12,10 @@ --requirement _tools.txt # installs this repo's packages ---editable ../settings-library/ ---editable ../pytest-simcore/ +--editable ../common-library +--editable ../models-library +--editable ../settings-library +--editable ../pytest-simcore # current module --editable . diff --git a/packages/service-library/requirements/dev[aiohttp].txt b/packages/service-library/requirements/dev[aiohttp].txt index 298d255dd6a..87748e35d29 100644 --- a/packages/service-library/requirements/dev[aiohttp].txt +++ b/packages/service-library/requirements/dev[aiohttp].txt @@ -13,8 +13,10 @@ --requirement _tools.txt # installs this repo's packages +--editable ../common-library +--editable ../models-library/ --editable ../settings-library/ --editable ../pytest-simcore/ # current module ---editable ."[aiohttp]" +--editable .[aiohttp] diff --git a/packages/service-library/requirements/dev[all].txt b/packages/service-library/requirements/dev[all].txt index c9d4ba4cd94..8b23b6105c5 100644 --- a/packages/service-library/requirements/dev[all].txt +++ b/packages/service-library/requirements/dev[all].txt @@ -14,8 +14,10 @@ --requirement _tools.txt # installs this repo's packages +--editable ../common-library +--editable ../models-library/ --editable ../settings-library/ --editable ../pytest-simcore/ # current module ---editable ."[all]" +--editable .[all] diff --git a/packages/service-library/requirements/dev[fastapi].txt b/packages/service-library/requirements/dev[fastapi].txt index 126db34eb20..d66370d7904 100644 --- a/packages/service-library/requirements/dev[fastapi].txt +++ b/packages/service-library/requirements/dev[fastapi].txt @@ -13,8 +13,10 @@ --requirement _tools.txt # installs this repo's packages +--editable ../common-library +--editable ../models-library/ --editable ../settings-library/ --editable ../pytest-simcore/ # current module ---editable ."[fastapi]" +--editable .[fastapi] diff --git a/packages/service-library/setup.cfg b/packages/service-library/setup.cfg index c867ce9b8d7..874495da36b 100644 --- a/packages/service-library/setup.cfg +++ b/packages/service-library/setup.cfg @@ -16,6 +16,13 @@ test = pytest [tool:pytest] addopts = --strict-markers asyncio_mode = auto +asyncio_default_fixture_loop_scope = function markers = testit: "marks test to run during development" performance_test: "performance test" + no_cleanup_check_rabbitmq_server_has_no_errors: "no check in rabbitmq logs" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/packages/service-library/setup.py b/packages/service-library/setup.py index f9c63e55ea4..8ab3bd24c47 100644 --- a/packages/service-library/setup.py +++ b/packages/service-library/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -30,25 +29,26 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = read_reqs(CURRENT_DIR / "requirements" / "_test.txt") -SETUP = dict( - name="simcore-service-library", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author="Pedro Crespo-Valero (pcrespov)", - description="Core service library for simcore (or servicelib)", - license="MIT license", - python_requires="~=3.9", - install_requires=tuple(PROD_REQUIREMENTS), - packages=find_packages(where="src"), - package_dir={"": "src"}, - test_suite="tests", - tests_require=tuple(TEST_REQUIREMENTS), - extras_require={ - "test": tuple(TEST_REQUIREMENTS), +SETUP = { + "name": "simcore-service-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Pedro Crespo-Valero (pcrespov)", + "description": "Core service library for simcore (or servicelib)", + "license": "MIT license", + "python_requires": ">=3.10", + "install_requires": tuple(PROD_REQUIREMENTS), + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "test_suite": "tests", + "tests_require": tuple(TEST_REQUIREMENTS), + "extras_require": { "aiohttp": tuple(AIOHTTP_REQUIREMENTS), "fastapi": tuple(FASTAPI_REQUIREMENTS), "all": tuple(AIOHTTP_REQUIREMENTS | FASTAPI_REQUIREMENTS), + "test": tuple(TEST_REQUIREMENTS), }, -) +} if __name__ == "__main__": diff --git a/packages/service-library/src/servicelib/__init__.py b/packages/service-library/src/servicelib/__init__.py index 073fcc30388..ee9e729caaf 100644 --- a/packages/service-library/src/servicelib/__init__.py +++ b/packages/service-library/src/servicelib/__init__.py @@ -2,6 +2,6 @@ """ -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution("simcore-service-library").version +__version__: str = version("simcore-service-library") diff --git a/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py b/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py index da83156a38f..e7b98347c31 100644 --- a/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py +++ b/packages/service-library/src/servicelib/aiohttp/aiopg_utils.py @@ -10,19 +10,18 @@ SEE for underlying psycopg: http://initd.org/psycopg/docs/module.html SEE for extra keywords: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS """ + # TODO: Towards implementing https://github.com/ITISFoundation/osparc-simcore/issues/1195 # TODO: deprecate this module. Move utils into retry_policies, simcore_postgres_database.utils_aiopg -import functools import logging -from typing import Optional import sqlalchemy as sa from aiohttp import web from aiopg.sa import Engine from psycopg2 import DatabaseError from psycopg2 import Error as DBAPIError -from tenacity import RetryCallState, retry +from tenacity import RetryCallState from tenacity.after import after_log from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt @@ -65,9 +64,9 @@ def init_pg_tables(dsn: DataSourceName, schema: sa.schema.MetaData): def raise_http_unavailable_error(retry_state: RetryCallState): - # TODO: mark incident on db to determine the quality of service. E.g. next time we do not stop. TIP: obj, query = retry_state.args; obj.app.register_incidents - - exc: DatabaseError = retry_state.outcome.exception() + assert retry_state.outcome # nosec + exc = retry_state.outcome.exception() + assert exc # nosec # StandardError # |__ Warning # |__ Error @@ -100,46 +99,16 @@ class PostgresRetryPolicyUponOperation: WAIT_SECS = 2 ATTEMPTS_COUNT = 3 - def __init__(self, logger: Optional[logging.Logger] = None): + def __init__(self, logger: logging.Logger | None = None): logger = logger or log - self.kwargs = dict( - retry=retry_if_exception_type(DatabaseError), - wait=wait_fixed(self.WAIT_SECS), - stop=stop_after_attempt(self.ATTEMPTS_COUNT), - after=after_log(logger, logging.WARNING), - retry_error_callback=raise_http_unavailable_error, - ) - - -# alias -postgres_service_retry_policy_kwargs = PostgresRetryPolicyUponOperation().kwargs - - -def retry_pg_api(func): - """Decorator to implement postgres service retry policy and - keep global statistics on service attempt fails - """ - # TODO: temporary. For the time being, use instead postgres_service_retry_policy_kwargs - _deco_func = retry(**postgres_service_retry_policy_kwargs)(func) - _total_retry_count = 0 - - @functools.wraps(func) - async def wrapper(*args, **kargs): - nonlocal _total_retry_count - try: - result = await _deco_func(*args, **kargs) - finally: - stats = _deco_func.retry.statistics - _total_retry_count += int(stats.get("attempt_number", 0)) - return result - - def total_retry_count(): - return _total_retry_count - - wrapper.retry = _deco_func.retry - wrapper.total_retry_count = total_retry_count - return wrapper + self.kwargs = { + "retry": retry_if_exception_type(DatabaseError), + "wait": wait_fixed(self.WAIT_SECS), + "stop": stop_after_attempt(self.ATTEMPTS_COUNT), + "after": after_log(logger, logging.WARNING), + "retry_error_callback": raise_http_unavailable_error, + } __all__ = ( diff --git a/packages/service-library/src/servicelib/aiohttp/application.py b/packages/service-library/src/servicelib/aiohttp/application.py index e7ea666f061..2f583cc06ee 100644 --- a/packages/service-library/src/servicelib/aiohttp/application.py +++ b/packages/service-library/src/servicelib/aiohttp/application.py @@ -1,60 +1,82 @@ import asyncio import logging -from typing import Dict, Optional from aiohttp import web from .application_keys import APP_CONFIG_KEY, APP_FIRE_AND_FORGET_TASKS_KEY from .client_session import persistent_client_session -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -async def startup_info(app: web.Application): - print(f"INFO: STARTING UP {app}...", flush=True) +async def _first_call_on_startup(app: web.Application): + # NOTE: name used in tests to mocker.spy + _logger.info("Starting %s ...", f"{app}") -async def shutdown_info(app: web.Application): - print(f"INFO: SHUTTING DOWN {app} ...", flush=True) +async def _first_call_on_cleanup(app: web.Application): + # NOTE: name used in tests to mocker.spy + _logger.info("Shutdown completed. Cleaning up %s ...", f"{app}") -async def stop_background_tasks(app: web.Application): - running_tasks = app[APP_FIRE_AND_FORGET_TASKS_KEY] - task: asyncio.Task - for task in running_tasks: +_MAX_WAIT_TIME_TO_CANCEL_SECONDS = 5 + + +async def _cancel_all_fire_and_forget_registered_tasks(app: web.Application): + registered_tasks: set[asyncio.Task] = app[APP_FIRE_AND_FORGET_TASKS_KEY] + for task in registered_tasks: task.cancel() + try: - MAX_WAIT_TIME_SECONDS = 5 results = await asyncio.wait_for( - asyncio.gather(*running_tasks, return_exceptions=True), - timeout=MAX_WAIT_TIME_SECONDS, + asyncio.gather(*registered_tasks, return_exceptions=True), + timeout=_MAX_WAIT_TIME_TO_CANCEL_SECONDS, ) if bad_results := list(filter(lambda r: isinstance(r, Exception), results)): - logger.error( + _logger.error( "Following observation tasks completed with an unexpected error:%s", f"{bad_results}", ) except asyncio.TimeoutError: - logger.error( - "Timed-out waiting for %s to complete. Action: Check why this is blocking", - f"{running_tasks=}", + _logger.exception( + "Timed-out waiting more than %s secs for %s to complete. Action: Check why this is blocking", + _MAX_WAIT_TIME_TO_CANCEL_SECONDS, + f"{registered_tasks=}", ) -def create_safe_application(config: Optional[Dict] = None) -> web.Application: +def create_safe_application(config: dict | None = None) -> web.Application: app = web.Application() # Ensures config entry app[APP_CONFIG_KEY] = config or {} app[APP_FIRE_AND_FORGET_TASKS_KEY] = set() - app.on_startup.append(startup_info) - app.on_cleanup.append(shutdown_info) + # Events are triggered as follows + # SEE https://docs.aiohttp.org/en/stable/web_advanced.html#aiohttp-web-signals + # + # cleanup_ctx[0].setup ---> begin of cleanup_ctx + # cleanup_ctx[1].setup. + # ... + # on_startup[0]. + # on_startup[1]. + # ... + # on_shutdown[0]. + # on_shutdown[1]. + # ... + # cleanup_ctx[1].teardown. + # cleanup_ctx[0].teardown <--- end of cleanup_ctx + # on_cleanup[0]. + # on_cleanup[1]. + # ... + # + app.on_startup.append(_first_call_on_startup) - # Ensures persistent client session - # NOTE: Ensures client session context is run first, + # NOTE: Ensures client session context is run first (setup), # then any further get_client_sesions will be correctly closed app.cleanup_ctx.append(persistent_client_session) - app.on_cleanup.append(stop_background_tasks) + + app.on_cleanup.append(_first_call_on_cleanup) + app.on_cleanup.append(_cancel_all_fire_and_forget_registered_tasks) return app diff --git a/packages/service-library/src/servicelib/aiohttp/application_keys.py b/packages/service-library/src/servicelib/aiohttp/application_keys.py index a1fda531849..3958c860cb0 100644 --- a/packages/service-library/src/servicelib/aiohttp/application_keys.py +++ b/packages/service-library/src/servicelib/aiohttp/application_keys.py @@ -19,17 +19,16 @@ # web.Application keys, i.e. app[APP_*_KEY] # APP_CONFIG_KEY: Final[str] = f"{__name__ }.config" -APP_JSONSCHEMA_SPECS_KEY: Final[str] = f"{__name__ }.jsonschema_specs" -APP_OPENAPI_SPECS_KEY: Final[str] = f"{__name__ }.openapi_specs" APP_SETTINGS_KEY: Final[str] = f"{__name__ }.settings" -APP_DB_ENGINE_KEY: Final[str] = f"{__name__ }.db_engine" +APP_AIOPG_ENGINE_KEY: Final[str] = f"{__name__ }.aiopg_engine" APP_CLIENT_SESSION_KEY: Final[str] = f"{__name__ }.session" APP_FIRE_AND_FORGET_TASKS_KEY: Final[str] = f"{__name__}.tasks" APP_RABBITMQ_CLIENT_KEY: Final[str] = f"{__name__}.rabbit_client" +APP_RABBITMQ_RPC_SERVER_KEY: Final[str] = f"{__name__}.rabbit_rpc_server" # # web.Response keys, i.e. app[RSP_*_KEY] diff --git a/packages/service-library/src/servicelib/aiohttp/application_setup.py b/packages/service-library/src/servicelib/aiohttp/application_setup.py index 26c499e19f6..0d52603f965 100644 --- a/packages/service-library/src/servicelib/aiohttp/application_setup.py +++ b/packages/service-library/src/servicelib/aiohttp/application_setup.py @@ -1,13 +1,17 @@ import functools import inspect import logging +from collections.abc import Callable from copy import deepcopy -from datetime import datetime from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple, TypedDict +from typing import Any, Protocol +import arrow from aiohttp import web -from pydantic import parse_obj_as +from pydantic import TypeAdapter +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) from .application_keys import APP_CONFIG_KEY, APP_SETTINGS_KEY @@ -36,7 +40,7 @@ class ModuleCategory(Enum): # ERRORS ------------------------------------------------------------------ -class SkipModuleSetup(Exception): +class SkipModuleSetupError(Exception): def __init__(self, *, reason) -> None: self.reason = reason super().__init__(reason) @@ -52,8 +56,8 @@ class DependencyError(ApplicationSetupError): class SetupMetadataDict(TypedDict): module_name: str - dependencies: List[str] - config_section: str + dependencies: list[str] + config_section: str | None config_enabled: str @@ -61,15 +65,19 @@ class SetupMetadataDict(TypedDict): def _parse_and_validate_arguments( - module_name, depends, config_section, config_enabled -) -> Tuple: + module_name: str, + depends: list[str] | None = None, + config_section: str | None = None, + config_enabled: str | None = None, +) -> tuple[str, list[str], str | None, str]: module_name = module_name.replace(".__init__", "") depends = depends or [] if config_section and config_enabled: - raise ValueError("Can only set config_section or config_enabled but not both") + msg = "Can only set config_section or config_enabled but not both" + raise ValueError(msg) - section = config_section or module_name.split(".")[-1] + section: str | None = config_section or module_name.split(".")[-1] if config_enabled is None: config_enabled = f"{section}.enabled" else: @@ -80,22 +88,23 @@ def _parse_and_validate_arguments( def _is_addon_enabled_from_config( - cfg: Dict[str, Any], dotted_section: str, section + cfg: dict[str, Any], dotted_section: str, section ) -> bool: try: - parts: List[str] = dotted_section.split(".") + parts: list[str] = dotted_section.split(".") # navigates app_config (cfg) searching for section searched_config = deepcopy(cfg) for part in parts: if section and part == "enabled": # if section exists, no need to explicitly enable it - return parse_obj_as(bool, searched_config.get(part, True)) + return TypeAdapter(bool).validate_python( + searched_config.get(part, True) + ) searched_config = searched_config[part] except KeyError as ee: - raise ApplicationSetupError( - f"Cannot find required option '{dotted_section}' in app config's section '{ee}'" - ) from ee + msg = f"Cannot find required option '{dotted_section}' in app config's section '{ee}'" + raise ApplicationSetupError(msg) from ee assert isinstance(searched_config, bool) # nosec return searched_config @@ -104,16 +113,14 @@ def _is_addon_enabled_from_config( def _get_app_settings_and_field_name( app: web.Application, arg_module_name: str, - arg_settings_name: Optional[str], + arg_settings_name: str | None, setup_func_name: str, logger: logging.Logger, -) -> Tuple[Optional[_ApplicationSettings], Optional[str]]: - - app_settings: Optional[_ApplicationSettings] = app.get(APP_SETTINGS_KEY) +) -> tuple[_ApplicationSettings | None, str | None]: + app_settings: _ApplicationSettings | None = app.get(APP_SETTINGS_KEY) settings_field_name = arg_settings_name if app_settings: - if not settings_field_name: # FIXME: hard-coded WEBSERVER_ temporary settings_field_name = f"WEBSERVER_{arg_module_name.split('.')[-1].upper()}" @@ -121,10 +128,8 @@ def _get_app_settings_and_field_name( logger.debug("Checking addon's %s ", f"{settings_field_name=}") if not hasattr(app_settings, settings_field_name): - raise ValueError( - f"Invalid option {arg_settings_name=} in module's setup {setup_func_name}. " - f"It must be a field in {app_settings.__class__}" - ) + msg = f"Invalid option arg_settings_name={arg_settings_name!r} in module's setup {setup_func_name}. It must be a field in {app_settings.__class__}" + raise ValueError(msg) return app_settings, settings_field_name @@ -140,16 +145,16 @@ def app_module_setup( module_name: str, category: ModuleCategory, *, - settings_name: Optional[str] = None, - depends: Optional[List[str]] = None, + settings_name: str | None = None, + depends: list[str] | None = None, logger: logging.Logger = log, # TODO: SEE https://github.com/ITISFoundation/osparc-simcore/issues/2008 # TODO: - settings_name becomes module_name!! # TODO: - plugin base should be aware of setup and settings -> model instead of function? # TODO: - depends mechanism will call registered setups List[Union[str, _SetupFunc]] # TODO: - deprecate config options - config_section: Optional[str] = None, - config_enabled: Optional[str] = None, + config_section: str | None = None, + config_enabled: str | None = None, ) -> Callable: """Decorator that marks a function as 'a setup function' for a given module in an application @@ -186,25 +191,24 @@ def setup(app: web.Application): ) def _decorate(setup_func: _SetupFunc): - if "setup" not in setup_func.__name__: logger.warning("Rename '%s' to contain 'setup'", setup_func.__name__) # metadata info def setup_metadata() -> SetupMetadataDict: - return { - "module_name": module_name, - "dependencies": depends, - "config_section": section, - "config_enabled": config_enabled, - } + return SetupMetadataDict( + module_name=module_name, + dependencies=depends, + config_section=section, + config_enabled=config_enabled, + ) # wrapper @functools.wraps(setup_func) def _wrapper(app: web.Application, *args, **kargs) -> bool: # pre-setup head_msg = f"Setup of {module_name}" - started = datetime.now() + started = arrow.utcnow() logger.info( "%s (%s, %s) started ... ", head_msg, @@ -259,15 +263,13 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool: dep for dep in depends if not is_setup_completed(dep, app) ] if uninitialized: - raise DependencyError( - f"Cannot setup app module '{module_name}' because the " - f"following dependencies are still uninitialized: {uninitialized}" - ) + msg = f"Cannot setup app module '{module_name}' because the following dependencies are still uninitialized: {uninitialized}" + raise DependencyError(msg) # execution of setup try: if is_setup_completed(module_name, app): - raise SkipModuleSetup( + raise SkipModuleSetupError( # noqa: TRY301 reason=f"'{module_name}' was already initialized in {app}." " Setup can only be executed once per app." ) @@ -281,15 +283,15 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool: if completed: # registers completed setup app[APP_SETUP_COMPLETED_KEY].append(module_name) else: - raise SkipModuleSetup( + raise SkipModuleSetupError( # noqa: TRY301 reason="Undefined (setup function returned false)" ) - except SkipModuleSetup as exc: + except SkipModuleSetupError as exc: logger.info("Skipping '%s' setup: %s", module_name, exc.reason) completed = False - elapsed = datetime.now() - started + elapsed = arrow.utcnow() - started logger.info( "%s %s [Elapsed: %3.1f secs]", head_msg, @@ -298,8 +300,8 @@ def _wrapper(app: web.Application, *args, **kargs) -> bool: ) return completed - _wrapper.metadata = setup_metadata - _wrapper.mark_as_simcore_servicelib_setup_func = True + _wrapper.metadata = setup_metadata # type: ignore[attr-defined] + _wrapper.mark_as_simcore_servicelib_setup_func = True # type: ignore[attr-defined] # NOTE: this is added by functools.wraps decorated assert _wrapper.__wrapped__ == setup_func # nosec diff --git a/packages/service-library/src/servicelib/aiohttp/client_session.py b/packages/service-library/src/servicelib/aiohttp/client_session.py index fb5f3868650..40e49c76a4b 100644 --- a/packages/service-library/src/servicelib/aiohttp/client_session.py +++ b/packages/service-library/src/servicelib/aiohttp/client_session.py @@ -1,14 +1,9 @@ -""" async client session - - - SEE https://docs.aiohttp.org/en/latest/client_advanced.html#persistent-session -""" -import logging -from typing import Any, MutableMapping +from collections.abc import AsyncGenerator +from typing import cast from aiohttp import ClientSession, ClientTimeout, web +from common_library.json_serialization import json_dumps -from ..json_serialization import json_dumps from ..utils import ( get_http_client_request_aiohttp_connect_timeout, get_http_client_request_aiohttp_sock_connect_timeout, @@ -16,61 +11,40 @@ ) from .application_keys import APP_CLIENT_SESSION_KEY -log = logging.getLogger(__name__) - - -def get_client_session(app: MutableMapping[str, Any]) -> ClientSession: - """Lazy initialization of ClientSession - - Ensures unique session - """ - session = app.get(APP_CLIENT_SESSION_KEY) - if session is None or session.closed: - # it is important to have fast connection handshakes - # also requests should be as fast as possible - # some services are not that fast to reply - # Setting the time of a request using this client session to 5 seconds totals - timeout_settings = ClientTimeout( - total=get_http_client_request_total_timeout(), - connect=get_http_client_request_aiohttp_connect_timeout(), - sock_connect=get_http_client_request_aiohttp_sock_connect_timeout(), - ) # type: ignore - - app[APP_CLIENT_SESSION_KEY] = session = ClientSession( - timeout=timeout_settings, - json_serialize=json_dumps, - ) - return session - -async def persistent_client_session(app: web.Application): +async def persistent_client_session(app: web.Application) -> AsyncGenerator[None, None]: """Ensures a single client session per application IMPORTANT: Use this function ONLY in cleanup context , i.e. app.cleanup_ctx.append(persistent_client_session) - SEE https://docs.aiohttp.org/en/latest/client_advanced.html#aiohttp-persistent-session """ - # lazy creation and holds reference to session at this point - session = get_client_session(app) - log.info("Starting session %s", session) - - yield - - # closes held session - if session is not app.get(APP_CLIENT_SESSION_KEY): - log.error( - "Unexpected client session upon cleanup! expected %s, got %s", - session, - app.get(APP_CLIENT_SESSION_KEY), - ) - - await session.close() - assert session.closed # nosec - - -# FIXME: if get_client_session upon startup fails and session is NOT closed. Implement some kind of gracefull shutdonw https://docs.aiohttp.org/en/latest/client_advanced.html#graceful-shutdown -# TODO: add some tests - - -__all__ = ["APP_CLIENT_SESSION_KEY", "get_client_session", "persistent_client_session"] + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/4628 + + # ANE: it is important to have fast connection handshakes + # also requests should be as fast as possible + # some services are not that fast to reply + timeout_settings = ClientTimeout( + total=get_http_client_request_total_timeout(), + connect=get_http_client_request_aiohttp_connect_timeout(), + sock_connect=get_http_client_request_aiohttp_sock_connect_timeout(), + ) + + async with ClientSession( + timeout=timeout_settings, + json_serialize=json_dumps, + ) as session: + app[APP_CLIENT_SESSION_KEY] = session + yield + + +def get_client_session(app: web.Application) -> ClientSession: + """Refers to the one-and-only client in the app""" + assert APP_CLIENT_SESSION_KEY in app # nosec + return cast(ClientSession, app[APP_CLIENT_SESSION_KEY]) + + +__all__: tuple[str, ...] = ( + "get_client_session", + "persistent_client_session", +) diff --git a/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py b/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py new file mode 100644 index 00000000000..88b0338dadf --- /dev/null +++ b/packages/service-library/src/servicelib/aiohttp/db_asyncpg_engine.py @@ -0,0 +1,73 @@ +""" +Helpers on asyncpg specific for aiohttp + +SEE migration aiopg->asyncpg https://github.com/ITISFoundation/osparc-simcore/issues/4529 +""" + +import logging +from typing import Final + +from aiohttp import web +from servicelib.logging_utils import log_context +from settings_library.postgres import PostgresSettings +from simcore_postgres_database.utils_aiosqlalchemy import ( # type: ignore[import-not-found] # this on is unclear + get_pg_engine_stateinfo, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..db_asyncpg_utils import create_async_engine_and_database_ready +from ..logging_utils import log_context + +APP_DB_ASYNC_ENGINE_KEY: Final[str] = f"{__name__ }.AsyncEngine" + + +_logger = logging.getLogger(__name__) + + +def _set_async_engine_to_app_state(app: web.Application, engine: AsyncEngine): + if exists := app.get(APP_DB_ASYNC_ENGINE_KEY, None): + msg = f"An instance of {type(exists)} already in app[{APP_DB_ASYNC_ENGINE_KEY}]={exists}" + raise ValueError(msg) + + app[APP_DB_ASYNC_ENGINE_KEY] = engine + return get_async_engine(app) + + +def get_async_engine(app: web.Application) -> AsyncEngine: + engine: AsyncEngine = app[APP_DB_ASYNC_ENGINE_KEY] + assert engine # nosec + return engine + + +async def connect_to_db(app: web.Application, settings: PostgresSettings) -> None: + """ + - db services up, data migrated and ready to use + - sets an engine in app state (use `get_async_engine(app)` to retrieve) + """ + if settings.POSTGRES_CLIENT_NAME: + settings = settings.model_copy( + update={"POSTGRES_CLIENT_NAME": settings.POSTGRES_CLIENT_NAME + "-asyncpg"} + ) + + with log_context( + _logger, + logging.INFO, + "Connecting app[APP_DB_ASYNC_ENGINE_KEY] to postgres with %s", + f"{settings=}", + ): + engine = await create_async_engine_and_database_ready(settings) + _set_async_engine_to_app_state(app, engine) + + _logger.info( + "app[APP_DB_ASYNC_ENGINE_KEY] ready : %s", + await get_pg_engine_stateinfo(engine), + ) + + +async def close_db_connection(app: web.Application) -> None: + engine = get_async_engine(app) + with log_context( + _logger, logging.DEBUG, f"app[APP_DB_ASYNC_ENGINE_KEY] disconnect of {engine}" + ): + if engine: + await engine.dispose() diff --git a/packages/service-library/src/servicelib/aiohttp/dev_error_logger.py b/packages/service-library/src/servicelib/aiohttp/dev_error_logger.py index 030f49a1b04..f97d8e704cc 100644 --- a/packages/service-library/src/servicelib/aiohttp/dev_error_logger.py +++ b/packages/service-library/src/servicelib/aiohttp/dev_error_logger.py @@ -2,9 +2,10 @@ import traceback from aiohttp.web import Application, HTTPError, Request, middleware -from servicelib.aiohttp.typing_extension import Handler, Middleware -logger = logging.getLogger(__name__) +from .typing_extension import Handler, Middleware + +_logger = logging.getLogger(__name__) _SEP = "|||" @@ -23,14 +24,14 @@ async def middleware_handler(request: Request, handler: Handler): "Traceback": "\n".join(traceback.format_tb(err.__traceback__)), } formatted_error = "".join( - [f"\n{_SEP}{k}{_SEP}\n{v}" for k, v in fields.items()] + [f"\n{_SEP}{k!r}{_SEP}\n{v!r}" for k, v in fields.items()] ) - logger.debug("Error serialized to client:%s", formatted_error) - raise err + _logger.debug("Error serialized to client:%s", formatted_error) + raise return middleware_handler def setup_dev_error_logger(app: Application) -> None: - logger.info("Setting up dev_error_logger") + _logger.info("Setting up dev_error_logger") app.middlewares.append(_middleware_factory()) diff --git a/packages/service-library/src/servicelib/aiohttp/docker_utils.py b/packages/service-library/src/servicelib/aiohttp/docker_utils.py new file mode 100644 index 00000000000..8e9393e1e69 --- /dev/null +++ b/packages/service-library/src/servicelib/aiohttp/docker_utils.py @@ -0,0 +1,100 @@ +import logging + +import aiohttp +from models_library.docker import DockerGenericTag +from pydantic import TypeAdapter, ValidationError +from settings_library.docker_registry import RegistrySettings +from yarl import URL + +from ..aiohttp import status +from ..docker_utils import ( + DOCKER_HUB_HOST, + DockerImageManifestsV2, + DockerImageMultiArchManifestsV2, + get_image_complete_url, + get_image_name_and_tag, +) +from ..logging_utils import log_catch + +_logger = logging.getLogger(__name__) + + +async def retrieve_image_layer_information( + image: DockerGenericTag, registry_settings: RegistrySettings +) -> DockerImageManifestsV2 | None: + with log_catch(_logger, reraise=False): + async with aiohttp.ClientSession() as session: + image_complete_url = get_image_complete_url(image, registry_settings) + auth = None + if registry_settings.REGISTRY_URL in f"{image_complete_url}": + auth = aiohttp.BasicAuth( + login=registry_settings.REGISTRY_USER, + password=registry_settings.REGISTRY_PW.get_secret_value(), + ) + # NOTE: either of type ubuntu:latest or ubuntu@sha256:lksfdjlskfjsldkfj + docker_image_name, docker_image_tag = get_image_name_and_tag( + image_complete_url + ) + manifest_url = image_complete_url.with_path( + f"v2/{docker_image_name}/manifests/{docker_image_tag}" + ) + + headers = { + "Accept": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json" + } + if DOCKER_HUB_HOST in f"{image_complete_url}": + # we need the docker hub bearer code (https://stackoverflow.com/questions/57316115/get-manifest-of-a-public-docker-image-hosted-on-docker-hub-using-the-docker-regi) + bearer_url = URL("https://auth.docker.io/token").with_query( + { + "service": "registry.docker.io", + "scope": f"repository:{docker_image_name}:pull", + } + ) + async with session.get(bearer_url) as response: + response.raise_for_status() + assert response.status == status.HTTP_200_OK # nosec + bearer_code = (await response.json())["token"] + headers |= { + "Authorization": f"Bearer {bearer_code}", + } + + async with session.get( + manifest_url, headers=headers, auth=auth + ) as response: + # Check if the request was successful + response.raise_for_status() + assert response.status == status.HTTP_200_OK # nosec + + # if the image has multiple architectures + json_response = await response.json() + try: + multi_arch_manifests = TypeAdapter( + DockerImageMultiArchManifestsV2 + ).validate_python(json_response) + # find the correct platform + digest = "" + for manifest in multi_arch_manifests.manifests: + if ( + manifest.get("platform", {}).get("architecture") == "amd64" + and manifest.get("platform", {}).get("os") == "linux" + ): + digest = manifest["digest"] + break + manifest_url = image_complete_url.with_path( + f"v2/{docker_image_name}/manifests/{digest}" + ) + async with session.get( + manifest_url, headers=headers, auth=auth + ) as response: + response.raise_for_status() + assert response.status == status.HTTP_200_OK # nosec + json_response = await response.json() + return TypeAdapter(DockerImageManifestsV2).validate_python( + json_response + ) + + except ValidationError: + return TypeAdapter(DockerImageManifestsV2).validate_python( + json_response + ) + return None diff --git a/packages/service-library/src/servicelib/aiohttp/incidents.py b/packages/service-library/src/servicelib/aiohttp/incidents.py index 445c4976f9b..b723fe6da48 100644 --- a/packages/service-library/src/servicelib/aiohttp/incidents.py +++ b/packages/service-library/src/servicelib/aiohttp/incidents.py @@ -1,13 +1,11 @@ -from typing import Any, Callable, Generic, List, Optional, TypeVar - -import attr - -# UTILS --- +from collections.abc import Callable +from dataclasses import dataclass, field +from typing import Any, Generic, TypeVar ItemT = TypeVar("ItemT") -@attr.s(auto_attribs=True) +@dataclass class LimitedOrderedStack(Generic[ItemT]): """Container designed only to keep the most relevant items (i.e called max) and drop @@ -20,10 +18,10 @@ class LimitedOrderedStack(Generic[ItemT]): """ max_size: int = 100 - order_by: Optional[Callable[[ItemT], Any]] = None + order_by: Callable[[ItemT], Any] | None = None - _items: List[ItemT] = attr.ib(init=False, default=attr.Factory(list)) - _hits: int = attr.ib(init=False, default=0) + _items: list[ItemT] = field(default_factory=list, init=False) + _hits: int = field(default=0, init=False) def __len__(self) -> int: # called also for __bool__ @@ -38,13 +36,13 @@ def hits(self) -> int: return self._hits @property - def max_item(self) -> Optional[ItemT]: + def max_item(self) -> ItemT | None: if self._items: return self._items[0] return None @property - def min_item(self) -> Optional[ItemT]: + def min_item(self) -> ItemT | None: if self._items: return self._items[-1] return None @@ -54,7 +52,11 @@ def append(self, item: ItemT): self._hits += 1 # sort is based on the __lt__ defined in ItemT - self._items = sorted(self._items, key=self.order_by, reverse=True) + extras: dict[str, Any] = {} + if self.order_by is not None: + extras["key"] = self.order_by + self._items = sorted(self._items, reverse=True, **extras) + if len(self._items) > self.max_size: self._items.pop() # min is dropped @@ -62,11 +64,11 @@ def append(self, item: ItemT): # INCIDENT ISSUES --- -@attr.s(auto_attribs=True) +@dataclass class BaseIncident: msg: str -@attr.s(auto_attribs=True) +@dataclass class SlowCallback(BaseIncident): delay_secs: float diff --git a/packages/service-library/src/servicelib/aiohttp/jsonschema_specs.py b/packages/service-library/src/servicelib/aiohttp/jsonschema_specs.py deleted file mode 100644 index 8c58dd4f02e..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/jsonschema_specs.py +++ /dev/null @@ -1,56 +0,0 @@ -import json -from pathlib import Path -from typing import Dict - -from aiohttp import ClientSession -from jsonschema import ValidationError -from yarl import URL - -from .jsonschema_validation import validate_instance - - -def _load_from_path(filepath: Path) -> Dict: - with filepath.open() as f: - spec_dict = json.load(f) - return spec_dict - - -async def _load_from_url(session: ClientSession, url: URL) -> Dict: - async with session.get(url) as resp: - text = await resp.text() - spec_dict = json.loads(text) - return spec_dict - - -async def create_jsonschema_specs( - location: Path, session: ClientSession = None -) -> Dict: - """Loads specs from a given location (url or path), - validates them and returns a working instance - - If location is an url, the specs are loaded asyncronously - - Both location types (url and file) are intentionally managed - by the same function call to enforce developer always supporting - both options. Notice that the url location enforces - the consumer context to be asyncronous. - - :param location: url or path - :return: validated jsonschema specifications object - :rtype: Dict - """ - if URL(str(location)).host: - spec_dict = await _load_from_url(session, URL(location)) - else: - path = Path(location).expanduser().resolve() # pylint: disable=no-member - spec_dict = _load_from_path(path) - - try: - # will throw a SchemaError if the schema is bad. - # FIXME: validate_instance in this case logs an error when raising the exception! TMP patched adding log_errors flag - validate_instance(None, spec_dict, log_errors=False) - except ValidationError: - # no instance provided so it makes sense for a valid schema - pass - - return spec_dict diff --git a/packages/service-library/src/servicelib/aiohttp/jsonschema_validation.py b/packages/service-library/src/servicelib/aiohttp/jsonschema_validation.py deleted file mode 100644 index 5379f7e237e..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/jsonschema_validation.py +++ /dev/null @@ -1,21 +0,0 @@ -import logging -from typing import Dict - -from jsonschema import SchemaError, ValidationError, validate - -log = logging.getLogger(__name__) - - -def validate_instance(instance: Dict, schema: Dict, *, log_errors=True): - try: - validate(instance, schema) - except ValidationError: - if log_errors: - log.exception("%s\n%s\nNode validation error:", f"{instance}", f"{schema=}") - raise - except SchemaError: - if log_errors: - log.exception( - "%s\n%s\nSchema valdation error:", f"{instance}", f"{schema=}" - ) - raise diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py index 602ba5d4df7..b38004b3200 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_dependencies.py @@ -10,11 +10,13 @@ def get_tasks_manager(app: web.Application) -> TasksManager: - return app[APP_LONG_RUNNING_TASKS_MANAGER_KEY] + output: TasksManager = app[APP_LONG_RUNNING_TASKS_MANAGER_KEY] + return output def get_task_context(request: web.Request) -> dict[str, Any]: - return request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY] + output: dict[str, Any] = request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY] + return output def create_task_name_from_request(request: web.Request) -> str: diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py index d8fee91ac64..4534d7c951c 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_error_handlers.py @@ -1,24 +1,30 @@ +import logging + from aiohttp import web +from common_library.json_serialization import json_dumps -from ...json_serialization import json_dumps from ...long_running_tasks._errors import ( TaskCancelledError, TaskNotCompletedError, TaskNotFoundError, ) +_logger = logging.getLogger(__name__) + @web.middleware async def base_long_running_error_handler(request, handler): try: return await handler(request) except (TaskNotFoundError, TaskNotCompletedError) as exc: - error_fields = dict(code=exc.code, message=f"{exc}") + _logger.debug("", exc_info=True) + error_fields = {"code": exc.code, "message": f"{exc}"} raise web.HTTPNotFound( - reason=f"{json_dumps(error_fields)}", + text=f"{json_dumps(error_fields)}", ) from exc except TaskCancelledError as exc: # NOTE: only use-case would be accessing an already cancelled task # which should not happen, so we return a conflict - error_fields = dict(code=exc.code, message=f"{exc}") - raise web.HTTPConflict(reason=f"{json_dumps(error_fields)}") from exc + _logger.debug("", exc_info=True) + error_fields = {"code": exc.code, "message": f"{exc}"} + raise web.HTTPConflict(text=f"{json_dumps(error_fields)}") from exc diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py index 4ee69a46c84..1906c0bc93f 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_routes.py @@ -1,17 +1,18 @@ import logging +from typing import Any from aiohttp import web +from common_library.json_serialization import json_dumps from pydantic import BaseModel -from servicelib.aiohttp.requests_validation import parse_request_path_parameters_as +from servicelib.aiohttp import status -from ...json_serialization import json_dumps from ...long_running_tasks._errors import TaskNotCompletedError, TaskNotFoundError from ...long_running_tasks._models import TaskGet, TaskId, TaskStatus from ...long_running_tasks._task import TrackedTask -from ...mimetype_constants import MIMETYPE_APPLICATION_JSON +from ..requests_validation import parse_request_path_parameters_as from ._dependencies import get_task_context, get_tasks_manager -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) routes = web.RouteTableDef() @@ -57,7 +58,7 @@ async def get_task_status(request: web.Request) -> web.Response: @routes.get("/{task_id}/result", name="get_task_result") -async def get_task_result(request: web.Request) -> web.Response: +async def get_task_result(request: web.Request) -> web.Response | Any: path_params = parse_request_path_parameters_as(_PathParam, request) tasks_manager = get_tasks_manager(request.app) task_context = get_task_context(request) @@ -88,4 +89,12 @@ async def cancel_and_delete_task(request: web.Request) -> web.Response: tasks_manager = get_tasks_manager(request.app) task_context = get_task_context(request) await tasks_manager.remove_task(path_params.task_id, with_task_context=task_context) - raise web.HTTPNoContent(content_type=MIMETYPE_APPLICATION_JSON) + return web.json_response(status=status.HTTP_204_NO_CONTENT) + + +__all__: tuple[str, ...] = ( + "get_tasks_manager", + "TaskId", + "TaskGet", + "TaskStatus", +) diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py index 873cac812dc..d0c96699462 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/_server.py @@ -1,12 +1,14 @@ import asyncio import logging +from collections.abc import AsyncGenerator, Callable from functools import wraps -from typing import Any, AsyncGenerator, Callable +from typing import Any from aiohttp import web -from pydantic import PositiveFloat -from servicelib.json_serialization import json_dumps +from common_library.json_serialization import json_dumps +from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter +from ...aiohttp import status from ...long_running_tasks._models import TaskGet from ...long_running_tasks._task import ( TaskContext, @@ -15,6 +17,7 @@ start_task, ) from ..typing_extension import Handler +from . import _routes from ._constants import ( APP_LONG_RUNNING_TASKS_MANAGER_KEY, MINUTE, @@ -22,9 +25,8 @@ ) from ._dependencies import create_task_name_from_request, get_tasks_manager from ._error_handlers import base_long_running_error_handler -from ._routes import routes -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) def no_ops_decorator(handler: Handler): @@ -41,29 +43,38 @@ async def _wrap(request: web.Request): async def start_long_running_task( - request: web.Request, - task: TaskProtocol, + # NOTE: positional argument are suffixed with "_" to avoid name conflicts with "task_kwargs" keys + request_: web.Request, + task_: TaskProtocol, *, fire_and_forget: bool = False, task_context: TaskContext, **task_kwargs: Any, ) -> web.Response: - task_manager = get_tasks_manager(request.app) - task_name = create_task_name_from_request(request) + task_manager = get_tasks_manager(request_.app) + task_name = create_task_name_from_request(request_) task_id = None try: task_id = start_task( task_manager, - task, + task_, fire_and_forget=fire_and_forget, task_context=task_context, task_name=task_name, **task_kwargs, ) - status_url = request.app.router["get_task_status"].url_for(task_id=task_id) - result_url = request.app.router["get_task_result"].url_for(task_id=task_id) - abort_url = request.app.router["cancel_and_delete_task"].url_for( - task_id=task_id + assert request_.transport # nosec + ip_addr, port = request_.transport.get_extra_info( + "sockname" + ) # https://docs.python.org/3/library/asyncio-protocol.html#asyncio.BaseTransport.get_extra_info + status_url = TypeAdapter(AnyHttpUrl).validate_python( + f"http://{ip_addr}:{port}{request_.app.router['get_task_status'].url_for(task_id=task_id)}" # NOSONAR + ) + result_url = TypeAdapter(AnyHttpUrl).validate_python( + f"http://{ip_addr}:{port}{request_.app.router['get_task_result'].url_for(task_id=task_id)}" # NOSONAR + ) + abort_url = TypeAdapter(AnyHttpUrl).validate_python( + f"http://{ip_addr}:{port}{request_.app.router['cancel_and_delete_task'].url_for(task_id=task_id)}" # NOSONAR ) task_get = TaskGet( task_id=task_id, @@ -74,17 +85,36 @@ async def start_long_running_task( ) return web.json_response( data={"data": task_get}, - status=web.HTTPAccepted.status_code, + status=status.HTTP_202_ACCEPTED, dumps=json_dumps, ) except asyncio.CancelledError: # cancel the task, the client has disconnected if task_id: - task_manager = get_tasks_manager(request.app) + task_manager = get_tasks_manager(request_.app) await task_manager.cancel_task(task_id, with_task_context=None) raise +def _wrap_and_add_routes( + app: web.Application, + router_prefix: str, + handler_check_decorator: Callable, + task_request_context_decorator: Callable, +): + # add routing paths + for route in _routes.routes: + assert isinstance(route, web.RouteDef) # nosec + app.router.add_route( + method=route.method, + path=f"{router_prefix}{route.path}", + handler=handler_check_decorator( + task_request_context_decorator(route.handler) + ), + **route.kwargs, + ) + + def setup( app: web.Application, *, @@ -104,16 +134,7 @@ def setup( task is considered stale """ - async def on_startup(app: web.Application) -> AsyncGenerator[None, None]: - # add routing paths - for route in routes: - app.router.add_route( - route.method, # type: ignore - f"{router_prefix}{route.path}", # type: ignore - handler_check_decorator(task_request_context_decorator(route.handler)), # type: ignore - **route.kwargs, # type: ignore - ) - + async def on_cleanup_ctx(app: web.Application) -> AsyncGenerator[None, None]: # add components to state app[ APP_LONG_RUNNING_TASKS_MANAGER_KEY @@ -130,4 +151,12 @@ async def on_startup(app: web.Application) -> AsyncGenerator[None, None]: # cleanup await long_running_task_manager.close() - app.cleanup_ctx.append(on_startup) + # add routing (done at setup-time) + _wrap_and_add_routes( + app, + router_prefix=router_prefix, + handler_check_decorator=handler_check_decorator, + task_request_context_decorator=task_request_context_decorator, + ) + + app.cleanup_ctx.append(on_cleanup_ctx) diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py index 4695eb1d9f7..e29fabc87fe 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/client.py @@ -1,43 +1,41 @@ import asyncio -from dataclasses import dataclass -from typing import Any, AsyncGenerator, Coroutine, Final, Optional +import logging +from collections.abc import AsyncGenerator +from typing import Any -from aiohttp import ClientConnectionError, ClientSession, web -from pydantic import Json +from aiohttp import ClientConnectionError, ClientSession from tenacity import TryAgain, retry -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_random_exponential from yarl import URL -from ..rest_responses import unwrap_envelope +from ...long_running_tasks._constants import DEFAULT_POLL_INTERVAL_S, HOUR +from ...long_running_tasks._models import LRTask, RequestBody +from ...rest_responses import unwrap_envelope_if_required +from .. import status from .server import TaskGet, TaskId, TaskProgress, TaskStatus -RequestBody = Json +_logger = logging.getLogger(__name__) -_MINUTE: Final[int] = 60 -_HOUR: Final[int] = 60 * _MINUTE -_DEFAULT_POLL_INTERVAL_S: Final[float] = 1 -_DEFAULT_AIOHTTP_RETRY_POLICY = dict( - retry=retry_if_exception_type(ClientConnectionError), - wait=wait_random_exponential(max=20), - stop=stop_after_delay(60), - reraise=True, -) + +_DEFAULT_AIOHTTP_RETRY_POLICY: dict[str, Any] = { + "retry": retry_if_exception_type(ClientConnectionError), + "wait": wait_random_exponential(max=20), + "stop": stop_after_delay(60), + "reraise": True, + "before_sleep": before_sleep_log(_logger, logging.INFO), +} @retry(**_DEFAULT_AIOHTTP_RETRY_POLICY) -async def _start( - session: ClientSession, url: URL, json: Optional[RequestBody] -) -> TaskGet: +async def _start(session: ClientSession, url: URL, json: RequestBody | None) -> TaskGet: async with session.post(url, json=json) as response: response.raise_for_status() - data, error = unwrap_envelope(await response.json()) - assert not error # nosec - assert data is not None # nosec - task = TaskGet.parse_obj(data) - return task + data = unwrap_envelope_if_required(await response.json()) + return TaskGet.model_validate(data) @retry(**_DEFAULT_AIOHTTP_RETRY_POLICY) @@ -48,79 +46,53 @@ async def _wait_for_completion( client_timeout: int, ) -> AsyncGenerator[TaskProgress, None]: try: - async for attempt in AsyncRetrying( stop=stop_after_delay(client_timeout), reraise=True, retry=retry_if_exception_type(TryAgain), + before_sleep=before_sleep_log(_logger, logging.DEBUG), ): with attempt: async with session.get(status_url) as response: response.raise_for_status() - data, error = unwrap_envelope(await response.json()) - assert not error # nosec - assert data is not None # nosec - task_status = TaskStatus.parse_obj(data) + data = unwrap_envelope_if_required(await response.json()) + task_status = TaskStatus.model_validate(data) yield task_status.task_progress if not task_status.done: await asyncio.sleep( float( - response.headers.get( - "retry-after", _DEFAULT_POLL_INTERVAL_S - ) + response.headers.get("retry-after", DEFAULT_POLL_INTERVAL_S) ) ) - raise TryAgain( - f"{task_id=}, {task_status.started=} has " - f"status: '{task_status.task_progress.message}'" - f" {task_status.task_progress.percent}%" - ) + msg = f"{task_id=}, {task_status.started=} has status: '{task_status.task_progress.message}' {task_status.task_progress.percent}%" + raise TryAgain(msg) # noqa: TRY301 + except TryAgain as exc: # this is a timeout - raise asyncio.TimeoutError( - f"Long running task {task_id}, calling to {status_url} timed-out after {client_timeout} seconds" - ) from exc + msg = f"Long running task {task_id}, calling to {status_url} timed-out after {client_timeout} seconds" + raise TimeoutError(msg) from exc @retry(**_DEFAULT_AIOHTTP_RETRY_POLICY) async def _task_result(session: ClientSession, result_url: URL) -> Any: async with session.get(result_url) as response: response.raise_for_status() - if response.status != web.HTTPNoContent.status_code: - data, error = unwrap_envelope(await response.json()) - assert not error # nosec - assert data # nosec - return data + if response.status != status.HTTP_204_NO_CONTENT: + return unwrap_envelope_if_required(await response.json()) + return None @retry(**_DEFAULT_AIOHTTP_RETRY_POLICY) async def _abort_task(session: ClientSession, abort_url: URL) -> None: async with session.delete(abort_url) as response: response.raise_for_status() - data, error = unwrap_envelope(await response.json()) - assert not error # nosec - assert not data # nosec - - -@dataclass(frozen=True) -class LRTask: - progress: TaskProgress - _result: Optional[Coroutine[Any, Any, Any]] = None - - def done(self) -> bool: - return self._result is not None - - async def result(self) -> Any: - if not self._result: - raise ValueError("No result ready!") - return await self._result async def long_running_task_request( session: ClientSession, url: URL, - json: Optional[RequestBody] = None, - client_timeout: int = 1 * _HOUR, + json: RequestBody | None = None, + client_timeout: int = 1 * HOUR, ) -> AsyncGenerator[LRTask, None]: """Will use the passed `ClientSession` to call an oSparc long running task `url` passing `json` as request body. @@ -136,7 +108,7 @@ async def long_running_task_request( async for task_progress in _wait_for_completion( session, task.task_id, - url.with_path(task.status_href, encoded=True), + URL(task.status_href), client_timeout, ): last_progress = task_progress @@ -144,12 +116,13 @@ async def long_running_task_request( assert last_progress # nosec yield LRTask( progress=last_progress, - _result=_task_result( - session, url.with_path(task.result_href, encoded=True) - ), + _result=_task_result(session, URL(task.result_href)), ) - except (asyncio.CancelledError, asyncio.TimeoutError): + except (TimeoutError, asyncio.CancelledError): if task: - await _abort_task(session, url.with_path(task.abort_href, encoded=True)) + await _abort_task(session, URL(task.abort_href)) raise + + +__all__: tuple[str, ...] = ("LRTask",) diff --git a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py index b1766f8035f..55d1295c197 100644 --- a/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py +++ b/packages/service-library/src/servicelib/aiohttp/long_running_tasks/server.py @@ -5,6 +5,7 @@ The server only has to return a `TaskId` in the handler creating the long running task. """ + from ...long_running_tasks._errors import TaskAlreadyRunningError, TaskCancelledError from ...long_running_tasks._models import ProgressMessage, ProgressPercent from ...long_running_tasks._task import ( @@ -14,12 +15,17 @@ TasksManager, TaskStatus, ) -from ._dependencies import create_task_name_from_request, get_tasks_manager +from ._dependencies import ( + create_task_name_from_request, + get_task_context, + get_tasks_manager, +) from ._routes import TaskGet from ._server import setup, start_long_running_task __all__: tuple[str, ...] = ( "create_task_name_from_request", + "get_task_context", "get_tasks_manager", "ProgressMessage", "ProgressPercent", diff --git a/packages/service-library/src/servicelib/aiohttp/monitor_services.py b/packages/service-library/src/servicelib/aiohttp/monitor_services.py index 68a02dcc512..ad4c2d8fbbb 100644 --- a/packages/service-library/src/servicelib/aiohttp/monitor_services.py +++ b/packages/service-library/src/servicelib/aiohttp/monitor_services.py @@ -1,5 +1,4 @@ from enum import Enum -from typing import Union from aiohttp import web from prometheus_client import Counter @@ -23,38 +22,39 @@ # -kSERVICE_STARTED = f"{__name__}.services_started" -kSERVICE_STOPPED = f"{__name__}.services_stopped" +MONITOR_SERVICE_STARTED = f"{__name__}.services_started" +MONITOR_SERVICE_STOPPED = f"{__name__}.services_stopped" -SERVICE_STARTED_LABELS: list[str] = [ +MONITOR_SERVICE_STARTED_LABELS: list[str] = [ "service_key", "service_tag", + "simcore_user_agent", ] -SERVICE_STOPPED_LABELS: list[str] = [ +MONITOR_SERVICE_STOPPED_LABELS: list[str] = [ "service_key", "service_tag", "result", + "simcore_user_agent", ] def add_instrumentation( app: web.Application, reg: CollectorRegistry, app_name: str ) -> None: - - app[kSERVICE_STARTED] = Counter( + app[MONITOR_SERVICE_STARTED] = Counter( name="services_started_total", documentation="Counts the services started", - labelnames=SERVICE_STARTED_LABELS, + labelnames=MONITOR_SERVICE_STARTED_LABELS, namespace="simcore", subsystem=app_name, registry=reg, ) - app[kSERVICE_STOPPED] = Counter( + app[MONITOR_SERVICE_STOPPED] = Counter( name="services_stopped_total", documentation="Counts the services stopped", - labelnames=SERVICE_STOPPED_LABELS, + labelnames=MONITOR_SERVICE_STOPPED_LABELS, namespace="simcore", subsystem=app_name, registry=reg, @@ -71,10 +71,12 @@ def service_started( app: web.Application, service_key: str, service_tag: str, + simcore_user_agent: str, ) -> None: - app[kSERVICE_STARTED].labels( + app[MONITOR_SERVICE_STARTED].labels( service_key=service_key, service_tag=service_tag, + simcore_user_agent=simcore_user_agent, ).inc() @@ -83,10 +85,12 @@ def service_stopped( app: web.Application, service_key: str, service_tag: str, - result: Union[ServiceResult, str], + simcore_user_agent: str, + result: ServiceResult | str, ) -> None: - app[kSERVICE_STOPPED].labels( + app[MONITOR_SERVICE_STOPPED].labels( service_key=service_key, service_tag=service_tag, + simcore_user_agent=simcore_user_agent, result=result.name if isinstance(result, ServiceResult) else result, ).inc() diff --git a/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py b/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py index 833032d378e..24d7328d351 100644 --- a/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py +++ b/packages/service-library/src/servicelib/aiohttp/monitor_slow_callbacks.py @@ -1,30 +1,33 @@ import asyncio.events import sys import time -from typing import List from pyinstrument import Profiler -from .incidents import SlowCallback +from .incidents import LimitedOrderedStack, SlowCallback -def enable(slow_duration_secs: float, incidents: List[SlowCallback]) -> None: +def enable( + slow_duration_secs: float, incidents: LimitedOrderedStack[SlowCallback] +) -> None: """Based in from aiodebug Patches ``asyncio.events.Handle`` to report an incident every time a callback takes ``slow_duration_secs`` seconds or more to run. """ # pylint: disable=protected-access - from aiodebug.logging_compat import get_logger + from aiodebug.logging_compat import get_logger # type: ignore[import-untyped] - logger = get_logger(__name__) - _run = asyncio.events.Handle._run + aio_debug_logger = get_logger(__name__) + _run = asyncio.events.Handle._run # noqa: SLF001 + + profiler = Profiler(interval=slow_duration_secs, async_mode="disabled") def instrumented(self): # unsetting profiler, helps with development mode and tests sys.setprofile(None) - with Profiler(interval=slow_duration_secs) as profiler: + with profiler: t0 = time.monotonic() retval = _run(self) @@ -39,8 +42,10 @@ def instrumented(self): unicode=True, color=False, show_all=True ) incidents.append(SlowCallback(msg=profiler_result, delay_secs=dt)) - logger.warning("Executing took %.3f seconds\n%s", dt, profiler_result) + aio_debug_logger.warning( + "Executing took %.3f seconds\n%s", dt, profiler_result + ) return retval - asyncio.events.Handle._run = instrumented + asyncio.events.Handle._run = instrumented # type: ignore[method-assign] # noqa: SLF001 diff --git a/packages/service-library/src/servicelib/aiohttp/monitoring.py b/packages/service-library/src/servicelib/aiohttp/monitoring.py index 7bbed9c196b..84472c7e2f3 100644 --- a/packages/service-library/src/servicelib/aiohttp/monitoring.py +++ b/packages/service-library/src/servicelib/aiohttp/monitoring.py @@ -1,132 +1,47 @@ -""" Enables monitoring of some quantities needed for diagnostics - -""" +"""Enables monitoring of some quantities needed for diagnostics""" import asyncio import logging -import time -from typing import Awaitable, Callable, Final, Optional +from collections.abc import Awaitable, Callable +from time import perf_counter +from typing import Final -import prometheus_client from aiohttp import web -from prometheus_client import ( +from prometheus_client.openmetrics.exposition import ( CONTENT_TYPE_LATEST, - Counter, - Gauge, - GCCollector, - PlatformCollector, - ProcessCollector, - Summary, + generate_latest, ) from prometheus_client.registry import CollectorRegistry -from servicelib.aiohttp.typing_extension import Handler -from ..common_headers import X_SIMCORE_USER_AGENT +from ..common_headers import ( + UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE, + X_SIMCORE_USER_AGENT, +) from ..logging_utils import log_catch +from ..prometheus_metrics import ( + PrometheusMetrics, + get_prometheus_metrics, + record_request_metrics, + record_response_metrics, +) +from .typing_extension import Handler log = logging.getLogger(__name__) - -# -# CAUTION CAUTION CAUTION NOTE: -# Be very careful with metrics. pay attention to metrics cardinatity. -# Each time series takes about 3kb of overhead in Prometheus -# -# CAUTION: every unique combination of key-value label pairs represents a new time series -# -# If a metrics is not needed, don't add it!! It will collapse the application AND prometheus -# -# references: -# https://prometheus.io/docs/practices/naming/ -# https://www.robustperception.io/cardinality-is-key -# https://www.robustperception.io/why-does-prometheus-use-so-much-ram -# https://promcon.io/2019-munich/slides/containing-your-cardinality.pdf -# https://grafana.com/docs/grafana-cloud/how-do-i/control-prometheus-metrics-usage/usage-analysis-explore/ -# - - -# This creates the following basic metrics: -# # HELP process_virtual_memory_bytes Virtual memory size in bytes. -# # TYPE process_virtual_memory_bytes gauge -# process_virtual_memory_bytes 8.12425216e+08 -# # HELP process_resident_memory_bytes Resident memory size in bytes. -# # TYPE process_resident_memory_bytes gauge -# process_resident_memory_bytes 1.2986368e+08 -# # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. -# # TYPE process_start_time_seconds gauge -# process_start_time_seconds 1.6418063518e+09 -# # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. -# # TYPE process_cpu_seconds_total counter -# process_cpu_seconds_total 9.049999999999999 -# # HELP process_open_fds Number of open file descriptors. -# # TYPE process_open_fds gauge -# process_open_fds 29.0 -# # HELP process_max_fds Maximum number of open file descriptors. -# # TYPE process_max_fds gauge -# process_max_fds 1.048576e+06 -# # HELP python_info Python platform information -# # TYPE python_info gauge -# python_info{implementation="CPython",major="3",minor="8",patchlevel="10",version="3.9.12"} 1.0 -# # HELP python_gc_objects_collected_total Objects collected during gc -# # TYPE python_gc_objects_collected_total counter -# python_gc_objects_collected_total{generation="0"} 7328.0 -# python_gc_objects_collected_total{generation="1"} 614.0 -# python_gc_objects_collected_total{generation="2"} 0.0 -# # HELP python_gc_objects_uncollectable_total Uncollectable object found during GC -# # TYPE python_gc_objects_uncollectable_total counter -# python_gc_objects_uncollectable_total{generation="0"} 0.0 -# python_gc_objects_uncollectable_total{generation="1"} 0.0 -# python_gc_objects_uncollectable_total{generation="2"} 0.0 -# # HELP python_gc_collections_total Number of times this generation was collected -# # TYPE python_gc_collections_total counter -# python_gc_collections_total{generation="0"} 628.0 -# python_gc_collections_total{generation="1"} 57.0 -# python_gc_collections_total{generation="2"} 5.0 -# # HELP http_requests_total Total requests count -# # TYPE http_requests_total counter -# http_requests_total{app_name="simcore_service_webserver",endpoint="/v0/",http_status="200",method="GET"} 15.0 -# # HELP http_requests_created Total requests count -# # TYPE http_requests_created gauge -# http_requests_created{app_name="simcore_service_webserver",endpoint="/v0/",http_status="200",method="GET"} 1.6418063614890063e+09 -# # HELP http_in_flight_requests Number of requests in process -# # TYPE http_in_flight_requests gauge -# http_in_flight_requests{app_name="simcore_service_webserver",endpoint="/v0/",method="GET"} 0.0 -# http_in_flight_requests{app_name="simcore_service_webserver",endpoint="/metrics",method="GET"} 1.0 -# # HELP http_request_latency_seconds Time processing a request -# # TYPE http_request_latency_seconds summary -# http_request_latency_seconds_count{app_name="simcore_service_webserver",endpoint="/v0/",method="GET"} 15.0 -# http_request_latency_seconds_sum{app_name="simcore_service_webserver",endpoint="/v0/",method="GET"} 0.007384857000033662 -# http_request_latency_seconds_count{app_name="simcore_service_webserver",endpoint="/metrics",method="GET"} 0.0 -# http_request_latency_seconds_sum{app_name="simcore_service_webserver",endpoint="/metrics",method="GET"} 0.0 -# # HELP http_request_latency_seconds_created Time processing a request -# # TYPE http_request_latency_seconds_created gauge -# http_request_latency_seconds_created{app_name="simcore_service_webserver",endpoint="/v0/",method="GET"} 1.6418063614873598e+09 -# http_request_latency_seconds_created{app_name="simcore_service_webserver",endpoint="/metrics",method="GET"} 1.641806371709292e+09 - - -kREQUEST_COUNT = f"{__name__}.request_count" -kINFLIGHTREQUESTS = f"{__name__}.in_flight_requests" -kRESPONSELATENCY = f"{__name__}.in_response_latency" - -kCOLLECTOR_REGISTRY = f"{__name__}.collector_registry" -kPROCESS_COLLECTOR = f"{__name__}.collector_process" -kPLATFORM_COLLECTOR = f"{__name__}.collector_platform" -kGC_COLLECTOR = f"{__name__}.collector_gc" - -UNDEFINED_REGULAR_USER_AGENT: Final[str] = "undefined" +_PROMETHEUS_METRICS: Final[str] = f"{__name__}.prometheus_metrics" # noqa: N816 def get_collector_registry(app: web.Application) -> CollectorRegistry: - return app[kCOLLECTOR_REGISTRY] + metrics = app[_PROMETHEUS_METRICS] + assert isinstance(metrics, PrometheusMetrics) # nosec + return metrics.registry async def metrics_handler(request: web.Request): registry = get_collector_registry(request.app) # NOTE: Cannot use ProcessPoolExecutor because registry is not pickable - result = await request.loop.run_in_executor( - None, prometheus_client.generate_latest, registry - ) + result = await request.loop.run_in_executor(None, generate_latest, registry) response = web.Response(body=result) response.content_type = CONTENT_TYPE_LATEST return response @@ -138,43 +53,39 @@ async def metrics_handler(request: web.Request): def middleware_factory( app_name: str, - enter_middleware_cb: Optional[EnterMiddlewareCB], - exit_middleware_cb: Optional[ExitMiddlewareCB], + enter_middleware_cb: EnterMiddlewareCB | None, + exit_middleware_cb: ExitMiddlewareCB | None, ): @web.middleware async def middleware_handler(request: web.Request, handler: Handler): # See https://prometheus.io/docs/concepts/metric_types - log_exception = None + log_exception: BaseException | None = None resp: web.StreamResponse = web.HTTPInternalServerError( reason="Unexpected exception" ) - # NOTE: a canonical endpoint is `/v0/projects/{project_id}/node/{node_uuid}`` - # vs a resolved endpoint `/v0/projects/51e4bdf4-2cc7-43be-85a6-627a4c0afb77/nodes/51e4bdf4-2cc7-43be-85a6-627a4c0afb77` - # which would create way to many different endpoints for monitoring! canonical_endpoint = request.path if request.match_info.route.resource: canonical_endpoint = request.match_info.route.resource.canonical - start_time = time.time() + start_time = perf_counter() try: if enter_middleware_cb: with log_catch(logger=log, reraise=False): await enter_middleware_cb(request) - in_flight_gauge = request.app[kINFLIGHTREQUESTS] - response_summary = request.app[kRESPONSELATENCY] - - with in_flight_gauge.labels( - app_name, - request.method, - canonical_endpoint, - request.headers.get(X_SIMCORE_USER_AGENT, UNDEFINED_REGULAR_USER_AGENT), - ).track_inprogress(), response_summary.labels( - app_name, - request.method, - canonical_endpoint, - request.headers.get(X_SIMCORE_USER_AGENT, UNDEFINED_REGULAR_USER_AGENT), - ).time(): + metrics = request.app[_PROMETHEUS_METRICS] + assert isinstance(metrics, PrometheusMetrics) # nosec + + user_agent = request.headers.get( + X_SIMCORE_USER_AGENT, UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE + ) + + with record_request_metrics( + metrics=metrics, + method=request.method, + endpoint=canonical_endpoint, + user_agent=user_agent, + ): resp = await handler(request) assert isinstance( # nosec @@ -182,35 +93,34 @@ async def middleware_handler(request: web.Request, handler: Handler): ), "Forgot envelope middleware?" except web.HTTPServerError as exc: - # Transforms exception into response object and log exception resp = exc log_exception = exc + raise resp from exc except web.HTTPException as exc: - # Transforms non-HTTPServerError exceptions into response object resp = exc log_exception = None + raise resp from exc except asyncio.CancelledError as exc: - # Mostly for logging - resp = web.HTTPInternalServerError(reason=f"{exc}") + resp = web.HTTPInternalServerError(text=f"{exc}") log_exception = exc - raise + raise resp from exc except Exception as exc: # pylint: disable=broad-except - # Prevents issue #1025. - resp = web.HTTPInternalServerError(reason=f"{exc}") + resp = web.HTTPInternalServerError(text=f"{exc}") resp.__cause__ = exc log_exception = exc + raise resp from exc finally: - resp_time_secs: float = time.time() - start_time + response_latency_seconds = perf_counter() - start_time - # prometheus probes - request.app[kREQUEST_COUNT].labels( - app_name, - request.method, - canonical_endpoint, - resp.status, - request.headers.get(X_SIMCORE_USER_AGENT, UNDEFINED_REGULAR_USER_AGENT), - ).inc() + record_response_metrics( + metrics=metrics, + method=request.method, + endpoint=canonical_endpoint, + user_agent=user_agent, + http_status=resp.status, + response_latency_seconds=response_latency_seconds, + ) if exit_middleware_cb: with log_catch(logger=log, reraise=False): @@ -224,7 +134,7 @@ async def middleware_handler(request: web.Request, handler: Handler): request.remote, request.method, request.path, - resp_time_secs, + response_latency_seconds, resp.status, exc_info=log_exception, stack_info=True, @@ -232,8 +142,9 @@ async def middleware_handler(request: web.Request, handler: Handler): return resp - # adds identifier - middleware_handler.__middleware_name__ = f"{__name__}.monitor_{app_name}" + setattr( # noqa: B010 + middleware_handler, "__middleware_name__", f"{__name__}.monitor_{app_name}" + ) return middleware_handler @@ -242,51 +153,10 @@ def setup_monitoring( app: web.Application, app_name: str, *, - enter_middleware_cb: Optional[EnterMiddlewareCB] = None, - exit_middleware_cb: Optional[ExitMiddlewareCB] = None, - **app_info_kwargs, + enter_middleware_cb: EnterMiddlewareCB | None = None, + exit_middleware_cb: ExitMiddlewareCB | None = None, ): - # app-scope registry - target_info = {"application_name": app_name} - target_info.update(app_info_kwargs) - app[kCOLLECTOR_REGISTRY] = reg = CollectorRegistry( - auto_describe=False, target_info=target_info - ) - # automatically collects process metrics see [https://github.com/prometheus/client_python] - app[kPROCESS_COLLECTOR] = ProcessCollector(registry=reg) - # automatically collects python_info metrics see [https://github.com/prometheus/client_python] - app[kPLATFORM_COLLECTOR] = PlatformCollector(registry=reg) - # automatically collects python garbage collector metrics see [https://github.com/prometheus/client_python] - # prefixed with python_gc_ - app[kGC_COLLECTOR] = GCCollector(registry=reg) - - # Total number of requests processed - app[kREQUEST_COUNT] = Counter( - name="http_requests", - documentation="Total requests count", - labelnames=[ - "app_name", - "method", - "endpoint", - "http_status", - "simcore_user_agent", - ], - registry=reg, - ) - - app[kINFLIGHTREQUESTS] = Gauge( - name="http_in_flight_requests", - documentation="Number of requests in process", - labelnames=["app_name", "method", "endpoint", "simcore_user_agent"], - registry=reg, - ) - - app[kRESPONSELATENCY] = Summary( - name="http_request_latency_seconds", - documentation="Time processing a request", - labelnames=["app_name", "method", "endpoint", "simcore_user_agent"], - registry=reg, - ) + app[_PROMETHEUS_METRICS] = get_prometheus_metrics() # WARNING: ensure ERROR middleware is over this one # @@ -304,8 +174,7 @@ def setup_monitoring( # # ensures is first layer but cannot guarantee the order setup is applied - app.middlewares.insert( - 0, + app.middlewares.append( middleware_factory( app_name, enter_middleware_cb=enter_middleware_cb, diff --git a/packages/service-library/src/servicelib/aiohttp/observer.py b/packages/service-library/src/servicelib/aiohttp/observer.py new file mode 100644 index 00000000000..e0dfd6a579e --- /dev/null +++ b/packages/service-library/src/servicelib/aiohttp/observer.py @@ -0,0 +1,62 @@ +"""observer pattern module + +Allows loose coupling subject and an observer. +""" + +import logging +from collections import defaultdict +from collections.abc import Callable + +from aiohttp import web + +from ..utils import logged_gather + +log = logging.getLogger(__name__) + + +_APP_OBSERVER_EVENTS_REGISTRY_KEY = "{__name__}.event_registry" + + +class ObserverRegistryNotFoundError(RuntimeError): + ... + + +def setup_observer_registry(app: web.Application): + # only once + app.setdefault(_APP_OBSERVER_EVENTS_REGISTRY_KEY, defaultdict(list)) + + +def _get_registry(app: web.Application) -> defaultdict: + try: + registry: defaultdict = app[_APP_OBSERVER_EVENTS_REGISTRY_KEY] + return registry + except KeyError as err: + msg = "Could not find observer registry. TIP: initialize app with setup_observer_registry" + raise ObserverRegistryNotFoundError(msg) from err + + +def register_observer(app: web.Application, func: Callable, event: str): + _event_registry = _get_registry(app) + + if func not in _event_registry[event]: + log.debug("registering %s to event %s", func, event) + _event_registry[event].append(func) + + +def registed_observers_report(app: web.Application) -> str: + if _event_registry := app.get(_APP_OBSERVER_EVENTS_REGISTRY_KEY): + return "\n".join( + f" {event}->{len(funcs)} handles" + for event, funcs in _event_registry.items() + ) + return "No observers registry found in app" + + +async def emit(app: web.Application, event: str, *args, **kwargs): + _event_registry = _get_registry(app) + if not _event_registry[event]: + return + + coroutines = [observer(*args, **kwargs) for observer in _event_registry[event]] + # all coroutine called in // + await logged_gather(*coroutines) diff --git a/packages/service-library/src/servicelib/aiohttp/openapi.py b/packages/service-library/src/servicelib/aiohttp/openapi.py deleted file mode 100644 index d0f3e3a1ee5..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/openapi.py +++ /dev/null @@ -1,100 +0,0 @@ -""" Openapi specifications - - Facade for openapi functionality -""" -import warnings -from pathlib import Path -from typing import Dict, Tuple - -import openapi_core -import yaml -from aiohttp import ClientSession -from openapi_core.schema.exceptions import OpenAPIError, OpenAPIMappingError -from openapi_core.schema.specs.models import Spec -from yarl import URL - -# Supported version of openapi (last number indicates only editorial changes) -# TODO: ensure openapi_core.__version__ is up-to-date with OAI_VERSION -OAI_VERSION = "3.0.2" -OAI_VERSION_URL = ( - "https://github.com/OAI/OpenAPI-Specification/blob/master/versions/%s.md" - % OAI_VERSION -) - -# alias -OpenApiSpec = Spec - - -def get_base_path(specs: OpenApiSpec) -> str: - """Expected API basepath - - By convention, the API basepath indicates the major - version of the openapi specs - - :param specs: valid specifications - :type specs: OpenApiSpec - :return: /${MAJOR} - :rtype: str - """ - # TODO: guarantee this convention is true - return "/v" + specs.info.version.split(".")[0] - - -# TODO: _load_from_* is also found in jsonshema_specs -def _load_from_path(filepath: Path) -> Tuple[Dict, str]: - with filepath.open() as f: - spec_dict = yaml.safe_load(f) - return spec_dict, filepath.as_uri() - - -async def _load_from_url(session: ClientSession, url: URL) -> Tuple[Dict, str]: - async with session.get(url) as resp: - text = await resp.text() - spec_dict = yaml.safe_load(text) - return spec_dict, str(url) - - -async def create_openapi_specs(location, session: ClientSession = None) -> OpenApiSpec: - """Loads specs from a given location (url or path), - validates them and returns a working instance - - If location is an url, the specs are loaded asyncronously - - Both location types (url and file) are intentionally managed - by the same function call to enforce developer always supporting - both options. Notice that the url location enforces - the consumer context to be asyncronous. - - :param location: url or path - :return: validated openapi specifications object - :rtype: OpenApiSpec - """ - if URL(str(location)).host: - if session is None: - raise ValueError("Client session required in arguments") - spec_dict, spec_url = await _load_from_url(session, URL(location)) - else: - path = Path(location).expanduser().resolve() # pylint: disable=no-member - spec_dict, spec_url = _load_from_path(path) - - return openapi_core.create_spec(spec_dict, spec_url) - - -def create_specs(openapi_path: Path) -> OpenApiSpec: - warnings.warn("Use instead create_openapi_specs", category=DeprecationWarning) - - # TODO: spec_from_file and spec_from_url - with openapi_path.open() as f: - spec_dict = yaml.safe_load(f) - - spec = openapi_core.create_spec(spec_dict, spec_url=openapi_path.as_uri()) - return spec - - -__all__ = ( - "get_base_path", - "create_openapi_specs", - "OpenApiSpec", - "OpenAPIError", - "OpenAPIMappingError", -) diff --git a/packages/service-library/src/servicelib/aiohttp/openapi_servers.py b/packages/service-library/src/servicelib/aiohttp/openapi_servers.py deleted file mode 100644 index 7b965d7cb54..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/openapi_servers.py +++ /dev/null @@ -1,6 +0,0 @@ -def get_server(servers, url): - # Development server: http://{host}:{port}/{basePath} - for server in servers: - if server.url == url: - return server - raise ValueError("Cannot find server %s" % url) diff --git a/packages/service-library/src/servicelib/aiohttp/openapi_validation.py b/packages/service-library/src/servicelib/aiohttp/openapi_validation.py deleted file mode 100644 index fe245fb1ba8..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/openapi_validation.py +++ /dev/null @@ -1,92 +0,0 @@ -""" Implements openapi specs validation - -Based on openapi-core -""" - -import logging - -from aiohttp import web -from openapi_core import shortcuts -from openapi_core.schema.specs.models import Spec as OpenApiSpec -from openapi_core.validation.request.validators import RequestValidator -from openapi_core.validation.response.validators import ResponseValidator - -from .openapi_wrappers import ( - PARAMETERS_KEYS, - AiohttpOpenAPIRequest, - AiohttpOpenAPIResponse, -) - -logger = logging.getLogger(__name__) - - -PATH_KEY, QUERY_KEY, HEADER_KEY, COOKIE_KEY = PARAMETERS_KEYS - - -async def validate_request(request: web.Request, spec: OpenApiSpec): - """Validates aiohttp.web.Request against an opeapi specification - - Returns parameters dict, body object and list of errors (exceptions objects) - """ - req = await AiohttpOpenAPIRequest.create(request) - - validator = RequestValidator(spec) - result = validator.validate(req) - - return result.parameters, result.body, result.errors - - -async def validate_parameters(spec: OpenApiSpec, request: web.Request): - req = await AiohttpOpenAPIRequest.create(request) - return shortcuts.validate_parameters(spec, req) - - -async def validate_body(spec: OpenApiSpec, request: web.Request): - req = await AiohttpOpenAPIRequest.create(request) - return shortcuts.validate_body(spec, req) - - -async def validate_data(spec: OpenApiSpec, request, response: web.Response): - - if isinstance(request, web.Request): - req = await AiohttpOpenAPIRequest.create(request) - else: - # TODO: alternative MockRequest - # params = ['host_url', 'method', 'path'] - # opapi_request = MockRequest(*args) - - params = ["full_url_pattern", "method"] - assert all(hasattr(request, attr) for attr in params) # nosec - # TODO: if a dict with params, convert dict to dot operations! and reverse - - req = request - - res = await AiohttpOpenAPIResponse.create(response) - - validator = ResponseValidator(spec) - result = validator.validate(req, res) - - result.raise_for_errors() - - return result.data - - -async def validate_response( - spec: OpenApiSpec, request: web.Request, response: web.Response -): - """ - Validates server response against openapi specs - - Raises exceptions OpenAPIError, OpenAPIMappingError - """ - validator = ResponseValidator(spec) - - req = await AiohttpOpenAPIRequest.create(request) - res = AiohttpOpenAPIResponse( - response, response.text - ) # FIXME:ONLY IN SERVER side. Async in client! - result = validator.validate(req, res) - result.raise_for_errors() - - -__all__ = ("validate_request", "validate_data") diff --git a/packages/service-library/src/servicelib/aiohttp/openapi_wrappers.py b/packages/service-library/src/servicelib/aiohttp/openapi_wrappers.py deleted file mode 100644 index 1332a3b721d..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/openapi_wrappers.py +++ /dev/null @@ -1,132 +0,0 @@ -""" Implements BaseOpenAPIRequest and BaseOpenAPIResponse interfaces for aiohttp - -""" -import logging -import re - -from aiohttp import web -from openapi_core.wrappers.base import BaseOpenAPIRequest, BaseOpenAPIResponse -from werkzeug.datastructures import ImmutableMultiDict - -log = logging.getLogger(__name__) - -CAPTURES = re.compile(r"\(\?P<([_a-zA-Z][_a-zA-Z0-9]+)>(.[^)]+)\)") -PARAMETERS_KEYS = ("path", "query", "header", "cookie") -PATH_KEY, QUERY_KEY, HEADER_KEY, COOKIE_KEY = PARAMETERS_KEYS - - -class AiohttpOpenAPIRequest(BaseOpenAPIRequest): - wrappedcls = web.Request - - def __init__(self, request: web.Request, data: str): - self._request = request - self._body = data - - @staticmethod - async def create(request: web.Request): - data = await request.text() - return AiohttpOpenAPIRequest(request, data) - - @property - def host_url(self): - url = self._request.url.origin() - return str(url) - - @property - def path(self): - # [scheme:]//[user[:password]@]host[:port][/path][?query][#fragment] - return str(self._request.path_qs) - - @property - def method(self) -> str: - return self._request.method.lower() - - @property - def path_pattern(self): - # match_info is a UrlMappingMatchInfo(dict, AbstractMatchInfo interface) - match_info = self._request.match_info - info = match_info.get_info() - - # if PlainResource then - path_pattern = info.get("path") - - # if DynamicResource then whe need to undo the conversion to formatter and pattern - if not path_pattern: - formatter = info.get("formatter") - re_pattern = info.get("pattern").pattern - kargs = {} - # TODO: create a test with '/my/tokens/{service}/' - # TODO: create a test with '/my/tokens/{service:google|facebook}/' - # TODO: create a test with '/my/tokens/{identifier:\d+}/' - for key, value in CAPTURES.findall(re_pattern): - if value == "[^{}/]+": # = no re in pattern - kargs[key] = "{%s}" % (key) - else: - kargs[key] = "{%s:%s}" % (key, value) - path_pattern = formatter.format(**kargs) - - return path_pattern - - @property - def parameters(self): - rq = self._request - - # a dict view of arguments that matched the request - # TODO: not sure - view_args = dict(rq.match_info) - - # The parsed URL parameters (the part in the URL after the question mark). - # a multidict proxy that is conterted into list of (key,value) tuples - args = list(rq.query.items()) - - # case-insensitive multidict proxy with all headers - headers = rq.headers - - # A multidict of all request's cookies - cookies = rq.cookies - - kpath, kquery, kheader, kcookie = PARAMETERS_KEYS - return { - kpath: view_args or {}, - kquery: ImmutableMultiDict(args or []), - kheader: headers or {}, - kcookie: cookies or {}, - } - - @property - def body(self) -> str: - """Returns str with body content""" - return self._body - - @property - def mimetype(self) -> str: - """Read-only property with content part of Content-Type header""" - return self._request.content_type - - -class AiohttpOpenAPIResponse(BaseOpenAPIResponse): - wrappedcls = web.Response - - def __init__(self, response: web.Response, data: str): - self._response = response - self._text = data - - @staticmethod - async def create(response: web.Response): - text = await response.text() - return AiohttpOpenAPIResponse(response, text) - - @property - def body(self) -> str: - return self._text - - # FIXME: not part of BaseOpenAPIResponse but used in openapi-core - data = body - - @property - def status_code(self) -> int: - return self._response.status - - @property - def mimetype(self): - return self._response.content_type diff --git a/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py b/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py new file mode 100644 index 00000000000..eab7d1fc598 --- /dev/null +++ b/packages/service-library/src/servicelib/aiohttp/profiler_middleware.py @@ -0,0 +1,49 @@ +from aiohttp.web import HTTPInternalServerError, Request, StreamResponse, middleware +from servicelib.mimetype_constants import ( + MIMETYPE_APPLICATION_JSON, + MIMETYPE_APPLICATION_ND_JSON, +) + +from ..utils_profiling_middleware import _is_profiling, _profiler, append_profile + + +@middleware +async def profiling_middleware(request: Request, handler): + if request.headers.get("x-profile") is not None: + try: + if _profiler.is_running or (_profiler.last_session is not None): + raise HTTPInternalServerError( + reason="Profiler is already running. Only a single request can be profiled at any given time.", + headers={}, + ) + _profiler.reset() + _is_profiling.set(True) + + with _profiler: + response = await handler(request) + + if response.content_type != MIMETYPE_APPLICATION_JSON: + raise HTTPInternalServerError( + reason=f"Profiling middleware is not compatible with {response.content_type=}", + headers={}, + ) + + stream_response = StreamResponse( + status=response.status, + reason=response.reason, + headers=response.headers, + ) + stream_response.content_type = MIMETYPE_APPLICATION_ND_JSON + await stream_response.prepare(request) + await stream_response.write(response.body) + await stream_response.write( + append_profile( + "\n", _profiler.output_text(unicode=True, color=True, show_all=True) + ).encode() + ) + await stream_response.write_eof() + finally: + _profiler.reset() + return stream_response + + return await handler(request) diff --git a/packages/service-library/src/servicelib/aiohttp/requests_utils.py b/packages/service-library/src/servicelib/aiohttp/requests_utils.py deleted file mode 100644 index 8f47f0b3698..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/requests_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -from aiohttp import web - - -def get_request(*args, **kwargs) -> web.BaseRequest: - """Helper for handler function decorators to retrieve requests""" - request = kwargs.get("request", args[-1] if args else None) - if not isinstance(request, web.BaseRequest): - msg = ( - "Incorrect decorator usage. " - "Expecting `def handler(request)` " - "or `def handler(self, request)`." - ) - raise RuntimeError(msg) - return request diff --git a/packages/service-library/src/servicelib/aiohttp/requests_validation.py b/packages/service-library/src/servicelib/aiohttp/requests_validation.py index 526a4b9c1fa..d555e535fe7 100644 --- a/packages/service-library/src/servicelib/aiohttp/requests_validation.py +++ b/packages/service-library/src/servicelib/aiohttp/requests_validation.py @@ -1,31 +1,45 @@ -""" Parses and validation aiohttp requests against pydantic models +"""Parses and validation aiohttp requests against pydantic models -These functions are analogous to `pydantic.tools.parse_obj_as(model_class, obj)` for aiohttp's requests +Rationale: These functions follow an interface analogous to ``pydantic.tools``'s + + parse_obj_as(model_class, obj) + +but adapted to parse&validate path, query and body of an aiohttp's request """ import json.decoder +from collections.abc import Iterator from contextlib import contextmanager -from typing import Iterator, TypeVar, Union +from typing import TypeVar from aiohttp import web -from pydantic import BaseModel, ValidationError, parse_obj_as +from common_library.json_serialization import json_dumps +from pydantic import BaseModel, TypeAdapter, ValidationError -from ..json_serialization import json_dumps from ..mimetype_constants import MIMETYPE_APPLICATION_JSON +from . import status -ModelType = TypeVar("ModelType", bound=BaseModel) -ModelOrListType = TypeVar("ModelOrListType", bound=Union[BaseModel, list]) +ModelClass = TypeVar("ModelClass", bound=BaseModel) +ModelOrListOrDictType = TypeVar("ModelOrListOrDictType", bound=BaseModel | list | dict) @contextmanager def handle_validation_as_http_error( *, error_msg_template: str, resource_name: str, use_error_v1: bool ) -> Iterator[None]: + """Context manager to handle ValidationError and reraise them as HTTPUnprocessableEntity error + + Arguments: + error_msg_template -- _description_ + resource_name -- + use_error_v1 -- If True, it uses new error response + + Raises: + web.HTTPUnprocessableEntity: (422) raised from a ValidationError + """ - Transforms ValidationError into HTTP error - """ - try: + try: yield except ValidationError as err: @@ -56,7 +70,7 @@ def handle_validation_as_http_error( error_str = json_dumps( { "error": { - "status": web.HTTPUnprocessableEntity.status_code, + "status": status.HTTP_422_UNPROCESSABLE_ENTITY, "errors": errors, } } @@ -77,7 +91,7 @@ def handle_validation_as_http_error( reason=reason_msg, text=error_str, content_type=MIMETYPE_APPLICATION_JSON, - ) + ) from err # NOTE: @@ -89,33 +103,50 @@ def handle_validation_as_http_error( def parse_request_path_parameters_as( - parameters_schema: type[ModelType], + parameters_schema_cls: type[ModelClass], request: web.Request, *, use_enveloped_error_v1: bool = True, -) -> ModelType: +) -> ModelClass: """Parses path parameters from 'request' and validates against 'parameters_schema' - :raises HTTPUnprocessableEntity (422) if validation of parameters fail + + Keyword Arguments: + use_enveloped_error_v1 -- new enveloped error model (default: {True}) + + Raises: + web.HTTPUnprocessableEntity: (422) if validation of parameters fail + + Returns: + Validated model of path parameters """ + with handle_validation_as_http_error( error_msg_template="Invalid parameter/s '{failed}' in request path", resource_name=request.rel_url.path, use_error_v1=use_enveloped_error_v1, ): data = dict(request.match_info) - return parameters_schema.parse_obj(data) + return parameters_schema_cls.model_validate(data) def parse_request_query_parameters_as( - parameters_schema: type[ModelType], + parameters_schema_cls: type[ModelClass], request: web.Request, *, use_enveloped_error_v1: bool = True, -) -> ModelType: +) -> ModelClass: """Parses query parameters from 'request' and validates against 'parameters_schema' - :raises HTTPUnprocessableEntity (422) if validation of queries fail + + Keyword Arguments: + use_enveloped_error_v1 -- new enveloped error model (default: {True}) + + Raises: + web.HTTPUnprocessableEntity: (422) if validation of parameters fail + + Returns: + Validated model of query parameters """ with handle_validation_as_http_error( @@ -123,19 +154,48 @@ def parse_request_query_parameters_as( resource_name=request.rel_url.path, use_error_v1=use_enveloped_error_v1, ): + # NOTE: Currently, this does not take into consideration cases where there are multiple + # query parameters with the same key. However, we are not using such cases anywhere at the moment. data = dict(request.query) - return parameters_schema.parse_obj(data) + + if hasattr(parameters_schema_cls, "model_validate"): + return parameters_schema_cls.model_validate(data) + model: ModelClass = TypeAdapter(parameters_schema_cls).validate_python(data) + return model + + +def parse_request_headers_as( + parameters_schema_cls: type[ModelClass], + request: web.Request, + *, + use_enveloped_error_v1: bool = True, +) -> ModelClass: + with handle_validation_as_http_error( + error_msg_template="Invalid parameter/s '{failed}' in request headers", + resource_name=request.rel_url.path, + use_error_v1=use_enveloped_error_v1, + ): + data = dict(request.headers) + return parameters_schema_cls.model_validate(data) async def parse_request_body_as( - model_schema: type[ModelOrListType], + model_schema_cls: type[ModelOrListOrDictType], request: web.Request, *, use_enveloped_error_v1: bool = True, -) -> ModelOrListType: +) -> ModelOrListOrDictType: """Parses and validates request body against schema - :raises HTTPUnprocessableEntity (422), HTTPBadRequest(400) + Keyword Arguments: + use_enveloped_error_v1 -- new enveloped error model (default: {True}) + + Raises: + web.HTTPBadRequest: (400) if invalid json body + web.HTTPUnprocessableEntity: (422) if does not validates against schema + + Returns: + Validated model of request body """ with handle_validation_as_http_error( error_msg_template="Invalid field/s '{failed}' in request body", @@ -149,13 +209,13 @@ async def parse_request_body_as( try: body = await request.json() except json.decoder.JSONDecodeError as err: - raise web.HTTPBadRequest(reason=f"Invalid json in body: {err}") + raise web.HTTPBadRequest(text=f"Invalid json in body: {err}") from err - if hasattr(model_schema, "parse_obj"): + if hasattr(model_schema_cls, "model_validate"): # NOTE: model_schema can be 'list[T]' or 'dict[T]' which raise TypeError # with issubclass(model_schema, BaseModel) - assert issubclass(model_schema, BaseModel) # nosec - return model_schema.parse_obj(body) + assert issubclass(model_schema_cls, BaseModel) # nosec + return model_schema_cls.model_validate(body) # type: ignore [return-value] # used for model_schema like 'list[T]' or 'dict[T]' - return parse_obj_as(model_schema, body) + return TypeAdapter(model_schema_cls).validate_python(body) # type: ignore[no-any-return] diff --git a/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py b/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py index 4db627cc3e2..a936cb8bfd6 100644 --- a/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py +++ b/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py @@ -1,70 +1,75 @@ -""" rest - middlewares for error, enveloping and validation +"""rest - middlewares for error, enveloping and validation - SEE https://gist.github.com/amitripshtos/854da3f4217e3441e8fceea85b0cbd91 +SEE https://gist.github.com/amitripshtos/854da3f4217e3441e8fceea85b0cbd91 """ -import asyncio -import json + import logging -from typing import Awaitable, Callable, Union +from collections.abc import Awaitable, Callable +from typing import Any from aiohttp import web from aiohttp.web_request import Request from aiohttp.web_response import StreamResponse -from openapi_core.schema.exceptions import OpenAPIError +from common_library.error_codes import create_error_code +from common_library.json_serialization import json_dumps, json_loads +from models_library.rest_error import ErrorGet, ErrorItemType, LogMessageType +from ..logging_errors import create_troubleshotting_log_kwargs from ..mimetype_constants import MIMETYPE_APPLICATION_JSON +from ..rest_responses import is_enveloped_from_map, is_enveloped_from_text from ..utils import is_production_environ -from .rest_models import ErrorItemType, ErrorType, LogMessageType -from .rest_responses import ( - _DataType, - create_data_response, - create_error_response, - is_enveloped_from_map, - is_enveloped_from_text, - wrap_as_envelope, -) +from .rest_responses import create_data_response, create_http_error, wrap_as_envelope from .rest_utils import EnvelopeFactory -from .rest_validators import OpenApiValidator from .typing_extension import Handler, Middleware DEFAULT_API_VERSION = "v0" +_FMSG_INTERNAL_ERROR_USER_FRIENDLY = ( + "We apologize for the inconvenience. " + "The issue has been recorded, please report it if it persists." +) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) def is_api_request(request: web.Request, api_version: str) -> bool: base_path = "/" + api_version.lstrip("/") - return request.path.startswith(base_path) - + return bool(request.path.startswith(base_path)) -def error_middleware_factory(api_version: str, log_exceptions=True) -> Middleware: +def error_middleware_factory( # noqa: C901 + api_version: str, +) -> Middleware: _is_prod: bool = is_production_environ() def _process_and_raise_unexpected_error(request: web.BaseRequest, err: Exception): - resp = create_error_response( + error_code = create_error_code(err) + error_context: dict[str, Any] = { + "request.remote": f"{request.remote}", + "request.method": f"{request.method}", + "request.path": f"{request.path}", + } + + user_error_msg = _FMSG_INTERNAL_ERROR_USER_FRIENDLY + http_error = create_http_error( err, - "Unexpected Server error", + user_error_msg, web.HTTPInternalServerError, skip_internal_error_details=_is_prod, + error_code=error_code, ) - - if log_exceptions: - logger.error( - 'Unexpected server error "%s" from access: %s "%s %s". Responding with status %s', - type(err), - request.remote, - request.method, - request.path, - resp.status, - exc_info=err, - stack_info=True, + _logger.exception( + **create_troubleshotting_log_kwargs( + user_error_msg, + error=err, + error_context=error_context, + error_code=error_code, ) - raise resp + ) + raise http_error @web.middleware - async def _middleware_handler(request: web.Request, handler: Handler): + async def _middleware_handler(request: web.Request, handler: Handler): # noqa: C901 """ Ensure all error raised are properly enveloped and json responses """ @@ -73,126 +78,81 @@ async def _middleware_handler(request: web.Request, handler: Handler): # FIXME: review when to send info to client and when not! try: - response = await handler(request) - return response + return await handler(request) except web.HTTPError as err: - # TODO: differenciate between server/client error - if not err.reason: - err.set_status(err.status_code, reason="Unexpected error") err.content_type = MIMETYPE_APPLICATION_JSON if not err.text or not is_enveloped_from_text(err.text): - error = ErrorType( + error_message = err.text or err.reason or "Unexpected error" + error_model = ErrorGet( errors=[ ErrorItemType.from_error(err), ], status=err.status, logs=[ - LogMessageType(message=err.reason, level="ERROR"), + LogMessageType(message=error_message, level="ERROR"), ], - message=err.reason, + message=error_message, ) - err.text = EnvelopeFactory(error=error).as_text() + err.text = EnvelopeFactory(error=error_model).as_text() raise - except web.HTTPSuccessful as ex: - ex.content_type = MIMETYPE_APPLICATION_JSON - if ex.text: + except web.HTTPSuccessful as err: + err.content_type = MIMETYPE_APPLICATION_JSON + if err.text: try: - payload = json.loads(ex.text) + payload = json_loads(err.text) if not is_enveloped_from_map(payload): payload = wrap_as_envelope(data=payload) - ex.text = json.dumps(payload) - except Exception as err: # pylint: disable=broad-except - _process_and_raise_unexpected_error(request, err) - raise ex + err.text = json_dumps(payload) + except Exception as other_error: # pylint: disable=broad-except + _process_and_raise_unexpected_error(request, other_error) + raise - except web.HTTPRedirection as ex: - logger.debug("Redirected to %s", ex) + except web.HTTPRedirection as err: + _logger.debug("Redirected to %s", err) raise except NotImplementedError as err: - error_response = create_error_response( + http_error = create_http_error( err, - str(err), + f"{err}", web.HTTPNotImplemented, skip_internal_error_details=_is_prod, ) - raise error_response from err + raise http_error from err - except asyncio.TimeoutError as err: - error_response = create_error_response( + except TimeoutError as err: + http_error = create_http_error( err, f"{err}", web.HTTPGatewayTimeout, skip_internal_error_details=_is_prod, ) - raise error_response from err + raise http_error from err except Exception as err: # pylint: disable=broad-except _process_and_raise_unexpected_error(request, err) # adds identifier (mostly for debugging) - _middleware_handler.__middleware_name__ = f"{__name__}.error_{api_version}" - - return _middleware_handler - - -def validate_middleware_factory(api_version: str) -> Middleware: - @web.middleware - async def _middleware_handler(request: web.Request, handler: Handler): - """ - Validates requests against openapi specs and extracts body, params, etc ... - Validate response against openapi specs - """ - if not is_api_request(request, api_version): - return await handler(request) - - # TODO: move this outside! - RQ_VALIDATED_DATA_KEYS = ("validated-path", "validated-query", "validated-body") - - try: - validator = OpenApiValidator.create(request.app, api_version) - - # FIXME: if request is HTTPNotFound, it still goes through middlewares and then validator.check_request fails!!! - try: - path, query, body = await validator.check_request(request) - - # Injects validated - request["validated-path"] = path - request["validated-query"] = query - request["validated-body"] = body - - except OpenAPIError: - logger.debug("Failing openAPI specs", exc_info=1) - raise - - response = await handler(request) - - # FIXME: openapi-core fails to validate response when specs are in separate files! - validator.check_response(response) - - finally: - for k in RQ_VALIDATED_DATA_KEYS: - request.pop(k, None) - - return response - - # adds identifier (mostly for debugging) - _middleware_handler.__middleware_name__ = f"{__name__}.validate_{api_version}" + setattr( # noqa: B010 + _middleware_handler, "__middleware_name__", f"{__name__}.error_{api_version}" + ) return _middleware_handler -_ResponseOrBodyData = Union[StreamResponse, _DataType] +_ResponseOrBodyData = StreamResponse | Any HandlerFlexible = Callable[[Request], Awaitable[_ResponseOrBodyData]] MiddlewareFlexible = Callable[[Request, HandlerFlexible], Awaitable[StreamResponse]] -def envelope_middleware_factory(api_version: str) -> MiddlewareFlexible: +def envelope_middleware_factory( + api_version: str, +) -> Callable[..., Awaitable[StreamResponse]]: # FIXME: This data conversion is very error-prone. Use decorators instead! _is_prod: bool = is_production_environ() @@ -210,7 +170,7 @@ async def _middleware_handler( return resp # NOTE: the return values of this handler - resp: _ResponseOrBodyData = await handler(request) + resp = await handler(request) if isinstance(resp, web.FileResponse): return resp @@ -225,7 +185,9 @@ async def _middleware_handler( return resp # adds identifier (mostly for debugging) - _middleware_handler.__middleware_name__ = f"{__name__}.envelope_{api_version}" + setattr( # noqa: B010 + _middleware_handler, "__middleware_name__", f"{__name__}.envelope_{api_version}" + ) return _middleware_handler @@ -235,7 +197,4 @@ def append_rest_middlewares( ): """Helper that appends rest-middlewares in the correct order""" app.middlewares.append(error_middleware_factory(api_version)) - # FIXME: openapi-core fails to validate response when specs are in separate files! - # FIXME: disabled so webserver and storage do not get this issue - # app.middlewares.append(validate_middleware_factory(api_version)) app.middlewares.append(envelope_middleware_factory(api_version)) diff --git a/packages/service-library/src/servicelib/aiohttp/rest_models.py b/packages/service-library/src/servicelib/aiohttp/rest_models.py deleted file mode 100644 index f127b86b9dc..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/rest_models.py +++ /dev/null @@ -1,61 +0,0 @@ -""" rest - common schema models and classes - -UNDER DEVELOPMENT -""" -import typing -import warnings - -import attr - -warnings.warn("DO NOT USE IN PRODUCTION, STILL UNDER DEVELOPMENT") - -# NOTE: using these, optional and required fields are always transmitted! -# NOTE: make some attrs nullable by default!? - - -@attr.s(auto_attribs=True) -class LogMessageType: - message: str - level: str = "INFO" - logger: str = "user" - - -@attr.s(auto_attribs=True) -class ErrorItemType: - code: str - message: str - resource: str - field: str - - @classmethod - def from_error(cls, err: BaseException): - item = cls( - code=err.__class__.__name__, message=str(err), resource=None, field=None - ) - return item - - -@attr.s(auto_attribs=True) -class ErrorType: - logs: typing.List[LogMessageType] = attr.Factory(list) - errors: typing.List[ErrorItemType] = attr.Factory(list) - status: int = 400 - message: str = "Unexpected client error" - - -@attr.s(auto_attribs=True) -class FakeType: - path_value: str - query_value: str - body_value: typing.Dict[str, str] - - -@attr.s(auto_attribs=True) -class HealthCheckType: - name: str - status: str - api_version: str - version: str - - -# TODO: fix __all__ diff --git a/packages/service-library/src/servicelib/aiohttp/rest_oas.py b/packages/service-library/src/servicelib/aiohttp/rest_oas.py deleted file mode 100644 index 4cddc98a664..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/rest_oas.py +++ /dev/null @@ -1,26 +0,0 @@ -""" rest - Open API specifications - - -""" - -from aiohttp import web -from openapi_core.schema.specs.models import Spec - -from .application_keys import APP_OPENAPI_SPECS_KEY -from .openapi import create_specs - - -def set_specs(app: web.Application, specs: Spec) -> Spec: - # TODO consider the case of multiple versions of spec -> Dict[Spec] ?? - app[APP_OPENAPI_SPECS_KEY] = specs - return app[APP_OPENAPI_SPECS_KEY] - - -def get_specs(app: web.Application) -> Spec: - # TODO consider the case of multiple versions of spec -> Dict[Spec] ?? - return app[APP_OPENAPI_SPECS_KEY] - - -OpenApiSpec = Spec - -__all__ = ("set_specs", "get_specs", "OpenApiSpec", "create_specs") diff --git a/packages/service-library/src/servicelib/aiohttp/rest_responses.py b/packages/service-library/src/servicelib/aiohttp/rest_responses.py index a405419ebbe..08405fe54cf 100644 --- a/packages/service-library/src/servicelib/aiohttp/rest_responses.py +++ b/packages/service-library/src/servicelib/aiohttp/rest_responses.py @@ -1,102 +1,60 @@ -""" Utils to check, convert and compose server responses for the RESTApi +"""Utils to check, convert and compose server responses for the RESTApi""" -""" import inspect -import json -from typing import Any, Dict, List, Mapping, Optional, Tuple, Type, Union +from typing import Any -import attr from aiohttp import web, web_exceptions from aiohttp.web_exceptions import HTTPError, HTTPException +from common_library.error_codes import ErrorCodeStr +from common_library.json_serialization import json_dumps +from models_library.rest_error import ErrorGet, ErrorItemType -from ..json_serialization import json_dumps +from ..aiohttp.status import HTTP_200_OK from ..mimetype_constants import MIMETYPE_APPLICATION_JSON -from .rest_models import ErrorItemType, ErrorType, LogMessageType - -ENVELOPE_KEYS = ("data", "error") -OFFSET_PAGINATION_KEYS = ("_meta", "_links") - -JsonLikeModel = Union[Dict[str, Any], List[Dict[str, Any]]] - -_DataType = Union[str, Dict[str, Any], List[Any]] - - -def is_enveloped_from_map(payload: Mapping) -> bool: - return all(k in ENVELOPE_KEYS for k in payload.keys() if not str(k).startswith("_")) - - -def is_enveloped_from_text(text: str) -> bool: - try: - payload = json.loads(text) - except json.decoder.JSONDecodeError: - return False - return is_enveloped_from_map(payload) - - -def is_enveloped(payload: Union[Mapping, str]) -> bool: - # pylint: disable=isinstance-second-argument-not-valid-type - if isinstance(payload, Mapping): - return is_enveloped_from_map(payload) - if isinstance(payload, str): - return is_enveloped_from_text(text=payload) - return False +from ..rest_constants import RESPONSE_MODEL_POLICY +from ..rest_responses import is_enveloped +from ..status_codes_utils import get_code_description def wrap_as_envelope( - data: Optional[JsonLikeModel] = None, - error: Optional[JsonLikeModel] = None, - as_null: bool = True, -) -> Dict[str, Any]: - """ - as_null: if True, keys for null values are created and assigned to None - """ - payload = {} - if data or as_null: - payload["data"] = data - if error or as_null: - payload["error"] = error - return payload - - -def unwrap_envelope(payload: Dict[str, Any]) -> Tuple: - """ - Safe returns (data, error) tuple from a response payload - """ - return tuple(payload.get(k) for k in ENVELOPE_KEYS) if payload else (None, None) + data: Any = None, + error: Any = None, +) -> dict[str, Any]: + return {"data": data, "error": error} # RESPONSES FACTORIES ------------------------------- def create_data_response( - data: _DataType, *, skip_internal_error_details=False, status=web.HTTPOk.status_code + data: Any, *, skip_internal_error_details=False, status=HTTP_200_OK ) -> web.Response: response = None try: - if not is_enveloped(data): - payload = wrap_as_envelope(data) - else: - payload = data + payload = wrap_as_envelope(data) if not is_enveloped(data) else data response = web.json_response(payload, dumps=json_dumps, status=status) except (TypeError, ValueError) as err: - response = create_error_response( - [ - err, - ], - str(err), - web.HTTPInternalServerError, - skip_internal_error_details=skip_internal_error_details, + response = exception_to_response( + create_http_error( + [ + err, + ], + str(err), + web.HTTPInternalServerError, + skip_internal_error_details=skip_internal_error_details, + ) ) return response -def create_error_response( - errors: Union[List[Exception], Exception], - reason: Optional[str] = None, - http_error_cls: Type[HTTPError] = web.HTTPInternalServerError, +def create_http_error( + errors: list[Exception] | Exception, + reason: str | None = None, + http_error_cls: type[HTTPError] = web.HTTPInternalServerError, *, skip_internal_error_details: bool = False, + error_code: ErrorCodeStr | None = None, ) -> HTTPError: """ - Response body conforms OAS schema model @@ -106,45 +64,56 @@ def create_error_response( if not isinstance(errors, list): errors = [errors] - # TODO: guarantee no throw! - is_internal_error: bool = http_error_cls == web.HTTPInternalServerError + default_message = reason or get_code_description(http_error_cls.status_code) if is_internal_error and skip_internal_error_details: - error = ErrorType( - errors=[], - status=http_error_cls.status_code, + error = ErrorGet.model_validate( + { + "status": http_error_cls.status_code, + "message": default_message, + "support_id": error_code, + } ) else: - error = ErrorType( - errors=[ErrorItemType.from_error(err) for err in errors], - status=http_error_cls.status_code, + items = [ErrorItemType.from_error(err) for err in errors] + error = ErrorGet.model_validate( + { + "errors": items, # NOTE: deprecated! + "status": http_error_cls.status_code, + "message": default_message, + "support_id": error_code, + } ) - payload = wrap_as_envelope(error=attr.asdict(error)) - - response = http_error_cls( - reason=reason, text=json_dumps(payload), content_type=MIMETYPE_APPLICATION_JSON + assert not http_error_cls.empty_body # nosec + payload = wrap_as_envelope( + error=error.model_dump(mode="json", **RESPONSE_MODEL_POLICY) ) - return response - + return http_error_cls( + reason=safe_status_message(reason), + text=json_dumps( + payload, + ), + content_type=MIMETYPE_APPLICATION_JSON, + ) -def create_log_response(msg: str, level: str) -> web.Response: - """Produces an enveloped response with a log message - Analogous to aiohttp's web.json_response - """ - # TODO: DEPRECATE - msg = LogMessageType(msg, level) - response = web.json_response( - data={"data": attr.asdict(msg), "error": None}, dumps=json_dumps +def exception_to_response(exc: HTTPError) -> web.Response: + # Returning web.HTTPException is deprecated so here we have a converter to a response + # so it can be used as + # SEE https://github.com/aio-libs/aiohttp/issues/2415 + return web.Response( + status=exc.status, + headers=exc.headers, + reason=exc.reason, + text=exc.text, ) - return response # Inverse map from code to HTTPException classes -def _collect_http_exceptions(exception_cls: Type[HTTPException] = HTTPException): +def _collect_http_exceptions(exception_cls: type[HTTPException] = HTTPException): def _pred(obj) -> bool: return ( inspect.isclass(obj) @@ -152,7 +121,7 @@ def _pred(obj) -> bool: and getattr(obj, "status_code", 0) > 0 ) - found: List[Tuple[str, Any]] = inspect.getmembers(web_exceptions, _pred) + found: list[tuple[str, Any]] = inspect.getmembers(web_exceptions, _pred) assert found # nosec http_statuses = {cls.status_code: cls for _, cls in found} @@ -161,14 +130,27 @@ def _pred(obj) -> bool: return http_statuses -_STATUS_CODE_TO_HTTP_ERRORS: Dict[int, Type[HTTPError]] = _collect_http_exceptions( - HTTPError -) +def safe_status_message(message: str | None, max_length: int = 50) -> str | None: + """ + Truncates a status-message (i.e. `reason` in HTTP errors) to a maximum length, replacing newlines with spaces. + If the message is longer than max_length, it will be truncated and "..." will be appended. -def get_http_error(status_code: int) -> Optional[Type[HTTPError]]: - """Returns aiohttp error class corresponding to a 4XX or 5XX status code + This prevents issues such as: + - `aiohttp.http_exceptions.LineTooLong`: 400, message: Got more than 8190 bytes when reading Status line is too long. + - Multiline not allowed in HTTP reason attribute (aiohttp now raises ValueError). - NOTICE that any non-error code (i.e. 2XX, 3XX and 4XX) will return None + See: + - When to use http status and/or text messages https://github.com/ITISFoundation/osparc-simcore/pull/7760 + - [RFC 9112, Section 4.1: HTTP/1.1 Message Syntax and Routing](https://datatracker.ietf.org/doc/html/rfc9112#section-4.1) (status line length limits) + - [RFC 9110, Section 15.5: Reason Phrase](https://datatracker.ietf.org/doc/html/rfc9110#section-15.5) (reason phrase definition) """ - return _STATUS_CODE_TO_HTTP_ERRORS.get(status_code) + if not message: + return None + + flat_message = message.replace("\n", " ") + if len(flat_message) <= max_length: + return flat_message + + # Truncate and add ellipsis + return flat_message[: max_length - 3] + "..." diff --git a/packages/service-library/src/servicelib/aiohttp/rest_routing.py b/packages/service-library/src/servicelib/aiohttp/rest_routing.py deleted file mode 100644 index e79a1c210de..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/rest_routing.py +++ /dev/null @@ -1,119 +0,0 @@ -""" rest - routes mapping based on oaspecs - - -See tests/test_rest_routing.py for example of usage -""" - -import inspect -import logging -from collections import namedtuple -from typing import Callable, Dict, Iterator, List, Mapping - -from aiohttp import web - -from .openapi import OpenApiSpec, get_base_path - -logger = logging.getLogger(__name__) - - -def has_handler_signature(fun) -> bool: - # TODO: last parameter is web.Request or called request? - return any( - param.annotation == web.Request - for name, param in inspect.signature(fun).parameters.items() - ) - - -def get_handlers_from_namespace(handlers_nsp) -> Dict: - """Gets all handlers in a namespace define by a class or a module""" - # TODO: Should search for function that are marked as "handlers". Similar to @pytest.fixtures?? - if inspect.ismodule(handlers_nsp): - predicate = lambda obj: inspect.isfunction(obj) and has_handler_signature(obj) - elif hasattr(handlers_nsp, "__class__"): - predicate = lambda obj: inspect.ismethod(obj) and has_handler_signature(obj) - else: - raise ValueError( - "Expected module or class as namespace, got %s" % type(handlers_nsp) - ) - - name_to_handler_map = dict(inspect.getmembers(handlers_nsp, predicate)) - return name_to_handler_map - - -PathOperation = namedtuple("PathOperation", "method path operation_id tags") - - -def iter_path_operations(specs: OpenApiSpec) -> Iterator[PathOperation]: - """Iterates paths in api specs returning tuple (method, path, operation_id, tags) - - NOTE: prepend API version as basepath to path url, e.g. /v0/my/path for path=/my/path - """ - base_path = get_base_path(specs) - - for url, path in specs.paths.items(): - for method, operation in path.operations.items(): - yield PathOperation( - method.upper(), base_path + url, operation.operation_id, operation.tags - ) - - -def map_handlers_with_operations( - handlers_map: Mapping[str, Callable], - operations_it: Iterator[PathOperation], - *, - strict: bool = True -) -> List[web.RouteDef]: - """Matches operation ids with handler names and returns a list of routes - - :param handlers_map: .See get_handlers_from_namespace - :type handlers_map: Mapping[str, Callable] - :param operations_it: iterates over specs operations. See iter_path_operations - :type operations_it: Iterator[PathOperation] - :param strict: it raises an error if either a handler or an operator was not mapped, defaults to True - :param strict: bool, optional - :raises ValueError: if not operations mapped - :raises RuntimeError: if not handlers mapped - :rtype: List[web.RouteDef] - """ - - handlers = dict(handlers_map) - routes = [] - for method, path, operation_id, _tags in operations_it: - handler = handlers.pop(operation_id, None) - if handler: - routes.append(web.route(method.upper(), path, handler, name=operation_id)) - elif strict: - raise ValueError("Cannot find any handler named {} ".format(operation_id)) - - if handlers and strict: - raise RuntimeError( - "{} handlers were not mapped to routes: {}".format( - len(handlers), handlers.keys() - ) - ) - - return routes - - -def create_routes_from_namespace( - specs: OpenApiSpec, handlers_nsp, *, strict: bool = True -) -> List[web.RouteDef]: - """Gets *all* available handlers and maps one-to-one to *all* specs routes - - :param specs: openapi spec object - :type specs: OpenApiSpec - :param handlers_nsp: class or module with handler functions - :param strict: ensures strict mapping, defaults to True - :param strict: bool, optional - :rtype: List[web.RouteDef] - """ - handlers = get_handlers_from_namespace(handlers_nsp) - - if not handlers and strict: - raise ValueError("No handlers found in %s" % handlers_nsp) - - routes = map_handlers_with_operations( - handlers, iter_path_operations(specs), strict=strict - ) - - return routes diff --git a/packages/service-library/src/servicelib/aiohttp/rest_utils.py b/packages/service-library/src/servicelib/aiohttp/rest_utils.py index 6f8886984b1..78fa58ac8ac 100644 --- a/packages/service-library/src/servicelib/aiohttp/rest_utils.py +++ b/packages/service-library/src/servicelib/aiohttp/rest_utils.py @@ -1,13 +1,6 @@ -import json -import warnings - -import attr from aiohttp import web - -from ..mimetype_constants import MIMETYPE_APPLICATION_JSON -from .openapi_validation import PATH_KEY, QUERY_KEY, validate_request -from .rest_models import ErrorItemType, ErrorType -from .rest_oas import get_specs +from aiohttp.web import RouteDef, RouteTableDef +from common_library.json_serialization import json_dumps class EnvelopeFactory: @@ -18,45 +11,30 @@ class EnvelopeFactory: """ def __init__(self, data=None, error=None): - enveloped = {"data": data, "error": error} - for key, value in enveloped.items(): - if value is not None and not isinstance(value, dict): - enveloped[key] = attr.asdict(value) - self._envelope = enveloped + self._envelope = {"data": data, "error": error} def as_dict(self) -> dict: return self._envelope def as_text(self) -> str: - return json.dumps(self.as_dict()) + return json_dumps(self.as_dict()) as_data = as_dict -async def extract_and_validate(request: web.Request): - """ - Extracts validated parameters in path, query and body +def set_default_route_names(routes: RouteTableDef): + """Usage: - Can raise '400 Bad Request': indicates that the server could not understand the request due to invalid syntax - See https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/400 + set_default_route_names(routes) + app.router.add_routes(routes) """ - warnings.warn( - "extract_and_validate is deprecated. Use instead servicelib.rest_utils.extract_and_validate", - DeprecationWarning, - ) + for r in routes: + if isinstance(r, RouteDef): + r.kwargs.setdefault("name", r.handler.__name__) + - spec = get_specs(request.app) - params, body, errors = await validate_request(request, spec) - - if errors: - error = ErrorType( - errors=[ErrorItemType.from_error(err) for err in errors], - status=web.HTTPBadRequest.status_code, - ) - raise web.HTTPBadRequest( - reason="Failed request validation against API specs", - text=EnvelopeFactory(error=error).as_text(), - content_type=MIMETYPE_APPLICATION_JSON, - ) - - return params[PATH_KEY], params[QUERY_KEY], body +def get_named_routes_as_message(app: web.Application) -> str: + return "\n".join( + f"\t{name}:{resource}" + for name, resource in app.router.named_resources().items() + ) diff --git a/packages/service-library/src/servicelib/aiohttp/rest_validators.py b/packages/service-library/src/servicelib/aiohttp/rest_validators.py deleted file mode 100644 index 9e75bf98250..00000000000 --- a/packages/service-library/src/servicelib/aiohttp/rest_validators.py +++ /dev/null @@ -1,67 +0,0 @@ -from aiohttp import web -from openapi_core.validation.request.validators import RequestValidator -from openapi_core.validation.response.validators import ResponseValidator - -from .openapi_wrappers import ( - PATH_KEY, - QUERY_KEY, - AiohttpOpenAPIRequest, - AiohttpOpenAPIResponse, -) -from .rest_oas import OpenApiSpec, get_specs -from .rest_responses import create_error_response - - -class OpenApiValidator: - """ - Used to validate data in the request->response cycle against openapi specs - """ - - @classmethod - def create(cls, app: web.Application, _version=""): - specs = get_specs(app) - # TODO: one per version! - return cls(specs) - - def __init__(self, spec: OpenApiSpec): - self._reqvtor = RequestValidator(spec, custom_formatters=None) - self._resvtor = ResponseValidator(spec, custom_formatters=None) - - # Current - self.current_request = None # wrapper request - - async def check_request(self, request: web.Request): - self.current_request = None - - rq = await AiohttpOpenAPIRequest.create(request) - result = self._reqvtor.validate(rq) - - # keeps current request and reuses in response - self.current_request = rq - - if result.errors: - err = create_error_response( - result.errors, - "Failed request validation against API specs", - web.HTTPBadRequest, - ) - raise err - - path, query = [result.parameters[k] for k in (PATH_KEY, QUERY_KEY)] - - return path, query, result.body - - def check_response(self, response: web.Response): - req = self.current_request - res = AiohttpOpenAPIResponse( - response, response.text - ) # FIXME:ONLY IN SERVER side. Async in client! - - result = self._resvtor.validate(req, res) - if result.errors: - err = create_error_response( - result.errors, - "Failed response validation against API specs", - web.HTTPServiceUnavailable, - ) - raise err diff --git a/packages/service-library/src/servicelib/aiohttp/status.py b/packages/service-library/src/servicelib/aiohttp/status.py new file mode 100644 index 00000000000..2a38913adcc --- /dev/null +++ b/packages/service-library/src/servicelib/aiohttp/status.py @@ -0,0 +1,148 @@ +""" +HTTP codes: `aiohttp` alternative to `fastapi.status` + +See HTTP Status Code Registry: +https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml + +And RFC 2324 - https://tools.ietf.org/html/rfc2324 + + +These constants include both name and code and are more +informative than those found in http.HTTPStatus or aiohttp status_code +""" + +# +# Copied from https://github.com/encode/starlette/blob/master/starlette/status.py +# + +from __future__ import annotations + +__all__ = ( + "HTTP_100_CONTINUE", + "HTTP_101_SWITCHING_PROTOCOLS", + "HTTP_102_PROCESSING", + "HTTP_103_EARLY_HINTS", + "HTTP_200_OK", + "HTTP_201_CREATED", + "HTTP_202_ACCEPTED", + "HTTP_203_NON_AUTHORITATIVE_INFORMATION", + "HTTP_204_NO_CONTENT", + "HTTP_205_RESET_CONTENT", + "HTTP_206_PARTIAL_CONTENT", + "HTTP_207_MULTI_STATUS", + "HTTP_208_ALREADY_REPORTED", + "HTTP_226_IM_USED", + "HTTP_300_MULTIPLE_CHOICES", + "HTTP_301_MOVED_PERMANENTLY", + "HTTP_302_FOUND", + "HTTP_303_SEE_OTHER", + "HTTP_304_NOT_MODIFIED", + "HTTP_305_USE_PROXY", + "HTTP_306_RESERVED", + "HTTP_307_TEMPORARY_REDIRECT", + "HTTP_308_PERMANENT_REDIRECT", + "HTTP_400_BAD_REQUEST", + "HTTP_401_UNAUTHORIZED", + "HTTP_402_PAYMENT_REQUIRED", + "HTTP_403_FORBIDDEN", + "HTTP_404_NOT_FOUND", + "HTTP_405_METHOD_NOT_ALLOWED", + "HTTP_406_NOT_ACCEPTABLE", + "HTTP_407_PROXY_AUTHENTICATION_REQUIRED", + "HTTP_408_REQUEST_TIMEOUT", + "HTTP_409_CONFLICT", + "HTTP_410_GONE", + "HTTP_411_LENGTH_REQUIRED", + "HTTP_412_PRECONDITION_FAILED", + "HTTP_413_REQUEST_ENTITY_TOO_LARGE", + "HTTP_414_REQUEST_URI_TOO_LONG", + "HTTP_415_UNSUPPORTED_MEDIA_TYPE", + "HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE", + "HTTP_417_EXPECTATION_FAILED", + "HTTP_418_IM_A_TEAPOT", + "HTTP_421_MISDIRECTED_REQUEST", + "HTTP_422_UNPROCESSABLE_ENTITY", + "HTTP_423_LOCKED", + "HTTP_424_FAILED_DEPENDENCY", + "HTTP_425_TOO_EARLY", + "HTTP_426_UPGRADE_REQUIRED", + "HTTP_428_PRECONDITION_REQUIRED", + "HTTP_429_TOO_MANY_REQUESTS", + "HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE", + "HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS", + "HTTP_500_INTERNAL_SERVER_ERROR", + "HTTP_501_NOT_IMPLEMENTED", + "HTTP_502_BAD_GATEWAY", + "HTTP_503_SERVICE_UNAVAILABLE", + "HTTP_504_GATEWAY_TIMEOUT", + "HTTP_505_HTTP_VERSION_NOT_SUPPORTED", + "HTTP_506_VARIANT_ALSO_NEGOTIATES", + "HTTP_507_INSUFFICIENT_STORAGE", + "HTTP_508_LOOP_DETECTED", + "HTTP_510_NOT_EXTENDED", + "HTTP_511_NETWORK_AUTHENTICATION_REQUIRED", +) + +HTTP_100_CONTINUE = 100 +HTTP_101_SWITCHING_PROTOCOLS = 101 +HTTP_102_PROCESSING = 102 +HTTP_103_EARLY_HINTS = 103 +HTTP_200_OK = 200 +HTTP_201_CREATED = 201 +HTTP_202_ACCEPTED = 202 +HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203 +HTTP_204_NO_CONTENT = 204 +HTTP_205_RESET_CONTENT = 205 +HTTP_206_PARTIAL_CONTENT = 206 +HTTP_207_MULTI_STATUS = 207 +HTTP_208_ALREADY_REPORTED = 208 +HTTP_226_IM_USED = 226 +HTTP_300_MULTIPLE_CHOICES = 300 +HTTP_301_MOVED_PERMANENTLY = 301 +HTTP_302_FOUND = 302 +HTTP_303_SEE_OTHER = 303 +HTTP_304_NOT_MODIFIED = 304 +HTTP_305_USE_PROXY = 305 +HTTP_306_RESERVED = 306 +HTTP_307_TEMPORARY_REDIRECT = 307 +HTTP_308_PERMANENT_REDIRECT = 308 +HTTP_400_BAD_REQUEST = 400 +HTTP_401_UNAUTHORIZED = 401 +HTTP_402_PAYMENT_REQUIRED = 402 +HTTP_403_FORBIDDEN = 403 +HTTP_404_NOT_FOUND = 404 +HTTP_405_METHOD_NOT_ALLOWED = 405 +HTTP_406_NOT_ACCEPTABLE = 406 +HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407 +HTTP_408_REQUEST_TIMEOUT = 408 +HTTP_409_CONFLICT = 409 +HTTP_410_GONE = 410 +HTTP_411_LENGTH_REQUIRED = 411 +HTTP_412_PRECONDITION_FAILED = 412 +HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413 +HTTP_414_REQUEST_URI_TOO_LONG = 414 +HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415 +HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416 +HTTP_417_EXPECTATION_FAILED = 417 +HTTP_418_IM_A_TEAPOT = 418 +HTTP_421_MISDIRECTED_REQUEST = 421 +HTTP_422_UNPROCESSABLE_ENTITY = 422 +HTTP_423_LOCKED = 423 +HTTP_424_FAILED_DEPENDENCY = 424 +HTTP_425_TOO_EARLY = 425 +HTTP_426_UPGRADE_REQUIRED = 426 +HTTP_428_PRECONDITION_REQUIRED = 428 +HTTP_429_TOO_MANY_REQUESTS = 429 +HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 +HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS = 451 +HTTP_500_INTERNAL_SERVER_ERROR = 500 +HTTP_501_NOT_IMPLEMENTED = 501 +HTTP_502_BAD_GATEWAY = 502 +HTTP_503_SERVICE_UNAVAILABLE = 503 +HTTP_504_GATEWAY_TIMEOUT = 504 +HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505 +HTTP_506_VARIANT_ALSO_NEGOTIATES = 506 +HTTP_507_INSUFFICIENT_STORAGE = 507 +HTTP_508_LOOP_DETECTED = 508 +HTTP_510_NOT_EXTENDED = 510 +HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511 diff --git a/packages/service-library/src/servicelib/aiohttp/tracing.py b/packages/service-library/src/servicelib/aiohttp/tracing.py index 305da56c7cf..1e41aab20f0 100644 --- a/packages/service-library/src/servicelib/aiohttp/tracing.py +++ b/packages/service-library/src/servicelib/aiohttp/tracing.py @@ -1,74 +1,187 @@ -""" Adds aiohttp middleware for tracing using zipkin server instrumentation. +"""Adds aiohttp middleware for tracing using opentelemetry instrumentation.""" -""" import logging -from typing import Iterable, Optional, Union +from collections.abc import AsyncIterator, Callable -import aiozipkin as az from aiohttp import web -from aiohttp.web import AbstractRoute -from aiozipkin.aiohttp_helpers import ( - APP_AIOZIPKIN_KEY, - REQUEST_AIOZIPKIN_KEY, - middleware_maker, +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import ( + OTLPSpanExporter as OTLPSpanExporterHTTP, ) +from opentelemetry.instrumentation.aiohttp_client import ( # pylint:disable=no-name-in-module + AioHttpClientInstrumentor, +) +from opentelemetry.instrumentation.aiohttp_server import ( + middleware as aiohttp_server_opentelemetry_middleware, # pylint:disable=no-name-in-module +) +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from servicelib.logging_utils import log_context +from settings_library.tracing import TracingSettings from yarl import URL -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) +try: + from opentelemetry.instrumentation.botocore import ( # type: ignore[import-not-found] + BotocoreInstrumentor, + ) + + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False +try: + from opentelemetry.instrumentation.aiopg import AiopgInstrumentor + + HAS_AIOPG = True +except ImportError: + HAS_AIOPG = False +try: + from opentelemetry.instrumentation.requests import RequestsInstrumentor + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False -def setup_tracing( +try: + from opentelemetry.instrumentation.aio_pika import AioPikaInstrumentor + + HAS_AIO_PIKA = True +except ImportError: + HAS_AIO_PIKA = False + + +def _startup( app: web.Application, - *, + tracing_settings: TracingSettings, service_name: str, - host: str, - port: int, - jaeger_base_url: Union[URL, str], - skip_routes: Optional[Iterable[AbstractRoute]] = None, -) -> bool: +) -> None: """ - Sets up this service for a distributed tracing system - using zipkin (https://zipkin.io/) and Jaeger (https://www.jaegertracing.io/) + Sets up this service for a distributed tracing system (opentelemetry) """ - zipkin_address = URL(f"{jaeger_base_url}") / "api/v2/spans" + _ = app + opentelemetry_collector_endpoint = ( + f"{tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT}" + ) + opentelemetry_collector_port = tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_PORT + if not opentelemetry_collector_endpoint and not opentelemetry_collector_port: + _logger.warning("Skipping opentelemetry tracing setup") + return + if not opentelemetry_collector_endpoint or not opentelemetry_collector_port: + msg = ( + "Variable opentelemetry_collector_endpoint " + f"[{tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT}] " + "or opentelemetry_collector_port " + f"[{tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_PORT}] " + "unset. Provide both or remove both." + ) + raise RuntimeError(msg) + resource = Resource(attributes={"service.name": service_name}) + trace.set_tracer_provider(TracerProvider(resource=resource)) + tracer_provider: trace.TracerProvider = trace.get_tracer_provider() + + tracing_destination: str = ( + f"{URL(opentelemetry_collector_endpoint).with_port(opentelemetry_collector_port).with_path('/v1/traces')}" + ) - log.debug( - "Setting up tracing for %s at %s:%d -> %s", + _logger.info( + "Trying to connect service %s to tracing collector at %s.", service_name, - host, - port, - zipkin_address, + tracing_destination, ) - endpoint = az.create_endpoint(service_name, ipv4=host, port=port) + otlp_exporter = OTLPSpanExporterHTTP( + endpoint=tracing_destination, + ) - # TODO: move away from aiozipkin to OpenTelemetrySDK - # https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/asgi/asgi.html - # see issue [#2715](https://github.com/ITISFoundation/osparc-simcore/issues/2715) - # creates / closes tracer - async def _tracer_cleanup_context(app: web.Application): + # Add the span processor to the tracer provider + tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter)) # type: ignore[attr-defined] # https://github.com/open-telemetry/opentelemetry-python/issues/3713 + # Instrument aiohttp server + # Explanation for custom middleware call DK 10/2024: + # OpenTelemetry Aiohttp autoinstrumentation is meant to be used by only calling `AioHttpServerInstrumentor().instrument()` + # The call `AioHttpServerInstrumentor().instrument()` monkeypatches the __init__() of aiohttp's web.application() to inject the tracing middleware, in it's `__init__()`. + # In simcore, we want to switch tracing on or off using the simcore-settings-library. + # The simcore-settings library in turn depends on the instance of web.application(), i.e. the aiohttp webserver, to exist. So here we face a hen-and-egg problem. + # At the time when the instrumentation should be configured, the instance of web.application already exists and the overwrite to the __init__() is never called + # + # Since the code that is provided (monkeypatched) in the __init__ that the opentelemetry-autoinstrumentation-library provides is only 4 lines, + # just adding a middleware, we are free to simply execute this "missed call" [since we can't call the monkeypatch'ed __init__()] in this following line: + app.middlewares.insert(0, aiohttp_server_opentelemetry_middleware) + # Code of the aiohttp server instrumentation: github.com/open-telemetry/opentelemetry-python-contrib/blob/eccb05c808a7d797ef5b6ecefed3590664426fbf/instrumentation/opentelemetry-instrumentation-aiohttp-server/src/opentelemetry/instrumentation/aiohttp_server/__init__.py#L246 + # For reference, the above statement was written for: + # - osparc-simcore 1.77.x + # - opentelemetry-api==1.27.0 + # - opentelemetry-instrumentation==0.48b0 - app[APP_AIOZIPKIN_KEY] = await az.create( - f"{zipkin_address}", endpoint, sample_rate=1.0 - ) + # Instrument aiohttp client + AioHttpClientInstrumentor().instrument() + if HAS_AIOPG: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add aio-pg opentelemetry autoinstrumentation...", + ): + AiopgInstrumentor().instrument() + if HAS_BOTOCORE: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add botocore opentelemetry autoinstrumentation...", + ): + BotocoreInstrumentor().instrument() + if HAS_REQUESTS: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add requests opentelemetry autoinstrumentation...", + ): + RequestsInstrumentor().instrument() - yield + if HAS_AIO_PIKA: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add aio_pika opentelemetry autoinstrumentation...", + ): + AioPikaInstrumentor().instrument() - if APP_AIOZIPKIN_KEY in app: - await app[APP_AIOZIPKIN_KEY].close() - app.cleanup_ctx.append(_tracer_cleanup_context) +def _shutdown() -> None: + """Uninstruments all opentelemetry instrumentors that were instrumented.""" + try: + AioHttpClientInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AioHttpClientInstrumentor") + if HAS_AIOPG: + try: + AiopgInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AiopgInstrumentor") + if HAS_BOTOCORE: + try: + BotocoreInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument BotocoreInstrumentor") + if HAS_REQUESTS: + try: + RequestsInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument RequestsInstrumentor") + if HAS_AIO_PIKA: + try: + AioPikaInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AioPikaInstrumentor") - # adds middleware to tag spans (when used, tracer should be ready) - m = middleware_maker( - skip_routes=skip_routes, - tracer_key=APP_AIOZIPKIN_KEY, - request_key=REQUEST_AIOZIPKIN_KEY, - ) - app.middlewares.append(m) - # # WARNING: adds a middleware that should be the outermost since - # # it expects stream responses while we allow data returns from a handler - # az.setup(app, tracer, skip_routes=skip_routes) +def get_tracing_lifespan( + app: web.Application, tracing_settings: TracingSettings, service_name: str +) -> Callable[[web.Application], AsyncIterator]: + _startup(app=app, tracing_settings=tracing_settings, service_name=service_name) + + async def tracing_lifespan(app: web.Application): + assert app # nosec + yield + _shutdown() - return True + return tracing_lifespan diff --git a/packages/service-library/src/servicelib/aiohttp/typing_extension.py b/packages/service-library/src/servicelib/aiohttp/typing_extension.py index ea9cce52844..6b3cab161ef 100644 --- a/packages/service-library/src/servicelib/aiohttp/typing_extension.py +++ b/packages/service-library/src/servicelib/aiohttp/typing_extension.py @@ -1,7 +1,24 @@ -from typing import Awaitable, Callable +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Any, TypeAlias -from aiohttp.web import Request, StreamResponse +from aiohttp import web +from aiohttp.typedefs import Handler -# Taken from aiohttp.web_middlewares import _Handler, _Middleware -Handler = Callable[[Request], Awaitable[StreamResponse]] -Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] +try: + from aiohttp.typedefs import Middleware +except ImportError: + # For older versions + # Taken from aiohttp.web_middlewares import _Handler, _Middleware + Middleware: TypeAlias = Callable[ # type: ignore[no-redef] + [web.Request, Handler], Awaitable[web.StreamResponse] + ] + + +__all__: tuple[str, ...] = ( + "Handler", + "Middleware", +) + + +HandlerAnyReturn: TypeAlias = Callable[[web.Request], Awaitable[Any]] +CleanupContextFunc: TypeAlias = Callable[[web.Application], AsyncIterator[None]] diff --git a/packages/service-library/src/servicelib/aiohttp/web_exceptions_extension.py b/packages/service-library/src/servicelib/aiohttp/web_exceptions_extension.py index be16e486bb4..eaf0cfc42a3 100644 --- a/packages/service-library/src/servicelib/aiohttp/web_exceptions_extension.py +++ b/packages/service-library/src/servicelib/aiohttp/web_exceptions_extension.py @@ -1,6 +1,85 @@ -from aiohttp.web_exceptions import HTTPClientError +""" Extends `aiohttp.web_exceptions` classes to match `status` codes + and adds helper functions. +""" +import inspect +from typing import Any, TypeVar -class HTTPLocked(HTTPClientError): +from aiohttp import web_exceptions +from aiohttp.web_exceptions import ( + HTTPClientError, + HTTPError, + HTTPException, + HTTPServerError, +) + +from . import status + +assert issubclass(HTTPError, HTTPException) # nsoec + +# NOTE: these are the status codes that DO NOT have an aiohttp.HTTPException associated +STATUS_CODES_WITHOUT_AIOHTTP_EXCEPTION_CLASS = ( + status.HTTP_100_CONTINUE, + status.HTTP_101_SWITCHING_PROTOCOLS, + status.HTTP_102_PROCESSING, + status.HTTP_103_EARLY_HINTS, + status.HTTP_207_MULTI_STATUS, + status.HTTP_208_ALREADY_REPORTED, + status.HTTP_226_IM_USED, + status.HTTP_306_RESERVED, + status.HTTP_418_IM_A_TEAPOT, + status.HTTP_425_TOO_EARLY, +) + + +class HTTPLockedError(HTTPClientError): # pylint: disable=too-many-ancestors - status_code = 423 + status_code = status.HTTP_423_LOCKED + + +class HTTPLoopDetectedError(HTTPServerError): + # pylint: disable=too-many-ancestors + status_code = status.HTTP_508_LOOP_DETECTED + + +E = TypeVar("E", bound="HTTPException") + + +def get_all_aiohttp_http_exceptions( + base_http_exception_cls: type[E], +) -> dict[int, type[E]]: + # Inverse map from code to HTTPException classes + + def _pred(obj) -> bool: + return ( + inspect.isclass(obj) + and issubclass(obj, base_http_exception_cls) + and getattr(obj, "status_code", 0) > 0 + ) + + found: list[tuple[str, Any]] = inspect.getmembers(web_exceptions, _pred) + assert found # nosec + + status_to_http_exception_map = {cls.status_code: cls for _, cls in found} + assert len(status_to_http_exception_map) == len(found), "No duplicates" # nosec + + for cls in ( + HTTPLockedError, + HTTPLoopDetectedError, + ): + status_to_http_exception_map[cls.status_code] = cls + + return status_to_http_exception_map + + +_STATUS_CODE_TO_HTTP_ERRORS: dict[ + int, type[HTTPError] +] = get_all_aiohttp_http_exceptions(HTTPError) + + +def get_http_error_class_or_none(status_code: int) -> type[HTTPError] | None: + """Returns aiohttp error class corresponding to a 4XX or 5XX status code + + NOTE: any non-error code (i.e. 2XX, 3XX and 4XX) will return None + """ + return _STATUS_CODE_TO_HTTP_ERRORS.get(status_code) diff --git a/packages/service-library/src/servicelib/archiving_utils.py b/packages/service-library/src/servicelib/archiving_utils.py deleted file mode 100644 index e5cc3bbee72..00000000000 --- a/packages/service-library/src/servicelib/archiving_utils.py +++ /dev/null @@ -1,469 +0,0 @@ -import asyncio -import fnmatch -import functools -import logging -import types -import zipfile -from contextlib import AsyncExitStack, contextmanager -from functools import partial -from pathlib import Path -from typing import Awaitable, Callable, Final, Iterator, Optional - -import tqdm -from servicelib.logging_utils import log_catch -from servicelib.progress_bar import ProgressBarData -from tqdm.contrib.logging import logging_redirect_tqdm, tqdm_logging_redirect - -from .file_utils import remove_directory -from .pools import non_blocking_process_pool_executor, non_blocking_thread_pool_executor - -_MIN: Final[int] = 60 # secs -_MAX_UNARCHIVING_WORKER_COUNT: Final[int] = 2 -_CHUNK_SIZE: Final[int] = 1024 * 8 - -log = logging.getLogger(__name__) - - -class ArchiveError(Exception): - """ - Error raised while archiving or unarchiving - """ - - -def _human_readable_size(size, decimal_places=3): - human_readable_file_size = float(size) - unit = "B" - for t_unit in ["B", "KiB", "MiB", "GiB", "TiB"]: - if human_readable_file_size < 1024.0: - unit = t_unit - break - human_readable_file_size /= 1024.0 - - return f"{human_readable_file_size:.{decimal_places}f}{unit}" - - -def _compute_tqdm_miniters(byte_size: int) -> float: - """ensures tqdm minimal iteration is 1 %""" - return min(byte_size / 100.0, 1.0) - - -def _strip_undecodable_in_path(path: Path) -> Path: - return Path(str(path).encode(errors="replace").decode("utf-8")) - - -def _iter_files_to_compress( - dir_path: Path, exclude_patterns: Optional[set[str]] -) -> Iterator[Path]: - exclude_patterns = exclude_patterns if exclude_patterns else set() - for path in dir_path.rglob("*"): - if path.is_file() and not any( - fnmatch.fnmatch(f"{path}", x) for x in exclude_patterns - ): - yield path - - -def _strip_directory_from_path(input_path: Path, to_strip: Path) -> Path: - _to_strip = f"{str(to_strip)}/" - return Path(str(input_path).replace(_to_strip, "")) - - -class _FastZipFileReader(zipfile.ZipFile): - """ - Used to gain a speed boost of several orders of magnitude. - - When opening archives the `_RealGetContents` is called - generating the list of files contained in the zip archive. - This is done by the constructor. - - If the archive contains a very large amount, the file scan operation - can take up to seconds. This was observed with 10000+ files. - - When opening the zip file in the background worker the entire file - list generation can be skipped because the `zipfile.ZipFile.open` - is used passing `ZipInfo` object as file to decompress. - Using a `ZipInfo` object does nto require to have the list of - files contained in the archive. - """ - - def _RealGetContents(self): - """method disabled""" - - -_TQDM_FILE_OPTIONS = dict( - unit="byte", - unit_scale=True, - unit_divisor=1024, - colour="yellow", - miniters=1, -) -_TQDM_MULTI_FILES_OPTIONS = _TQDM_FILE_OPTIONS | dict( - unit="file", - unit_divisor=1000, -) - - -def _zipfile_single_file_extract_worker( - zip_file_path: Path, - file_in_archive: zipfile.ZipInfo, - destination_folder: Path, - is_dir: bool, -) -> Path: - """Extracts file_in_archive from the archive zip_file_path -> destination_folder/file_in_archive - - Extracts in chunks to avoid memory pressure on zip/unzip - returns: a path to extracted file or directory - """ - with _FastZipFileReader(zip_file_path) as zf: - # assemble destination and ensure it exits - destination_path = destination_folder / file_in_archive.filename - - if is_dir: - destination_path.mkdir(parents=True, exist_ok=True) - return destination_path - desc = f"decompressing {zip_file_path}:{file_in_archive.filename} -> {destination_path}\n" - with zf.open(name=file_in_archive) as zip_fp, destination_path.open( - "wb" - ) as dest_fp, tqdm_logging_redirect( - total=file_in_archive.file_size, - desc=desc, - **( - _TQDM_FILE_OPTIONS - | dict(miniters=_compute_tqdm_miniters(file_in_archive.file_size)) - ), - ) as pbar: - while chunk := zip_fp.read(_CHUNK_SIZE): - dest_fp.write(chunk) - pbar.update(len(chunk)) - return destination_path - - -def _ensure_destination_subdirectories_exist( - zip_file_handler: zipfile.ZipFile, destination_folder: Path -) -> None: - # assemble full destination paths - full_destination_paths = { - destination_folder / entry.filename for entry in zip_file_handler.infolist() - } - # extract all possible subdirectories - subdirectories = {x.parent for x in full_destination_paths} - # create all subdirectories before extracting - for subdirectory in subdirectories: - Path(subdirectory).mkdir(parents=True, exist_ok=True) - - -async def unarchive_dir( - archive_to_extract: Path, - destination_folder: Path, - *, - max_workers: int = _MAX_UNARCHIVING_WORKER_COUNT, - progress_bar: Optional[ProgressBarData] = None, - log_cb: Optional[Callable[[str], Awaitable[None]]] = None, -) -> set[Path]: - """Extracts zipped file archive_to_extract to destination_folder, - preserving all relative files and folders inside the archive - - Returns a set with all the paths extracted from archive. It includes - all tree leafs, which might include files or empty folders - - - NOTE: ``destination_folder`` is fully deleted after error - - ::raise ArchiveError - """ - if not progress_bar: - progress_bar = ProgressBarData(steps=1) - async with AsyncExitStack() as zip_stack: - zip_file_handler = zip_stack.enter_context( - zipfile.ZipFile( # pylint: disable=consider-using-with - archive_to_extract, - mode="r", - ) - ) - zip_stack.enter_context(logging_redirect_tqdm()) - process_pool = zip_stack.enter_context( - non_blocking_process_pool_executor(max_workers=max_workers) - ) - - # running in process poll is not ideal for concurrency issues - # to avoid race conditions all subdirectories where files will be extracted need to exist - # creating them before the extraction is under way avoids the issue - # the following avoids race conditions while unzippin in parallel - _ensure_destination_subdirectories_exist( - zip_file_handler=zip_file_handler, - destination_folder=destination_folder, - ) - - futures: list[asyncio.Future] = [ - asyncio.get_event_loop().run_in_executor( - process_pool, - # --------- - _zipfile_single_file_extract_worker, - archive_to_extract, - zip_entry, - destination_folder, - zip_entry.is_dir(), - ) - for zip_entry in zip_file_handler.infolist() - ] - - try: - extracted_paths: list[Path] = [] - total_file_size = sum( - zip_entry.file_size for zip_entry in zip_file_handler.infolist() - ) - async with AsyncExitStack() as progress_stack: - sub_prog = await progress_stack.enter_async_context( - progress_bar.sub_progress(steps=total_file_size) - ) - tqdm_progress = progress_stack.enter_context( - tqdm.tqdm( - desc=f"decompressing {archive_to_extract} -> {destination_folder} [{len(futures)} file{'s' if len(futures) > 1 else ''}" - f"/{_human_readable_size(archive_to_extract.stat().st_size)}]\n", - total=total_file_size, - **_TQDM_MULTI_FILES_OPTIONS, - ) - ) - for future in asyncio.as_completed(futures): - extracted_path = await future - extracted_file_size = extracted_path.stat().st_size - if tqdm_progress.update(extracted_file_size) and log_cb: - with log_catch(log, reraise=False): - await log_cb(f"{tqdm_progress}") - await sub_prog.update(extracted_file_size) - extracted_paths.append(extracted_path) - - except Exception as err: - for f in futures: - f.cancel() - - # wait until all tasks are cancelled - await asyncio.wait( - futures, timeout=2 * _MIN, return_when=asyncio.ALL_COMPLETED - ) - - # now we can cleanup - if destination_folder.exists() and destination_folder.is_dir(): - await remove_directory(destination_folder, ignore_errors=True) - - raise ArchiveError( - f"Failed unarchiving {archive_to_extract} -> {destination_folder} due to {type(err)}." - f"Details: {err}" - ) from err - - - # NOTE: extracted_paths includes all tree leafs, which might include files and empty folders - return { - p - for p in extracted_paths - if p.is_file() or (p.is_dir() and not any(p.glob("*"))) - } - - -@contextmanager -def _progress_enabled_zip_write_handler( - zip_file_handler: zipfile.ZipFile, progress_bar: tqdm.tqdm -) -> Iterator[zipfile.ZipFile]: - """This function overrides the default zip write fct to allow to get progress using tqdm library""" - - def _write_with_progress( - original_write_fct, self, data, pbar # pylint: disable=unused-argument - ): - pbar.update(len(data)) - return original_write_fct(data) - - # Replace original write() with a wrapper to track progress - assert zip_file_handler.fp # nosec - old_write_method = zip_file_handler.fp.write - zip_file_handler.fp.write = types.MethodType( - partial(_write_with_progress, old_write_method, pbar=progress_bar), - zip_file_handler.fp, - ) - try: - yield zip_file_handler - finally: - zip_file_handler.fp.write = old_write_method - - -def _add_to_archive( - dir_to_compress: Path, - destination: Path, - compress: bool, - store_relative_path: bool, - update_progress, - loop, - exclude_patterns: Optional[set[str]] = None, -) -> None: - compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED - folder_size_bytes = sum( - file.stat().st_size - for file in _iter_files_to_compress(dir_to_compress, exclude_patterns) - ) - desc = f"compressing {dir_to_compress} -> {destination}" - with tqdm_logging_redirect( - desc=f"{desc}\n", - total=folder_size_bytes, - **( - _TQDM_FILE_OPTIONS - | dict(miniters=_compute_tqdm_miniters(folder_size_bytes)) - ), - ) as progress_bar, _progress_enabled_zip_write_handler( - zipfile.ZipFile(destination, "w", compression=compression), progress_bar - ) as zip_file_handler: - for file_to_add in _iter_files_to_compress(dir_to_compress, exclude_patterns): - progress_bar.set_description(f"{desc}/{file_to_add.name}\n") - file_name_in_archive = ( - _strip_directory_from_path(file_to_add, dir_to_compress) - if store_relative_path - else file_to_add - ) - - # because surrogates are not allowed in zip files, - # replacing them will ensure errors will not happen. - escaped_file_name_in_archive = _strip_undecodable_in_path( - file_name_in_archive - ) - - zip_file_handler.write(file_to_add, escaped_file_name_in_archive) - asyncio.run_coroutine_threadsafe( - update_progress(file_to_add.stat().st_size), loop - ) - - -async def _update_progress(prog: ProgressBarData, delta: float) -> None: - await prog.update(delta) - - -async def archive_dir( - dir_to_compress: Path, - destination: Path, - *, - compress: bool, - store_relative_path: bool, - exclude_patterns: Optional[set[str]] = None, - progress_bar: Optional[ProgressBarData] = None, -) -> None: - """ - When archiving, undecodable bytes in filenames will be escaped, - zipfile does not like them. - When unarchiveing, the **escaped version** of the file names - will be created. - - The **exclude_patterns** is a set of patterns created using - Unix shell-style wildcards to exclude files and directories. - - destination: Path deleted if errors - - ::raise ArchiveError - """ - if not progress_bar: - progress_bar = ProgressBarData(steps=1) - - async with AsyncExitStack() as stack: - - folder_size_bytes = sum( - file.stat().st_size - for file in _iter_files_to_compress(dir_to_compress, exclude_patterns) - ) - sub_progress = await stack.enter_async_context( - progress_bar.sub_progress(folder_size_bytes) - ) - thread_pool = stack.enter_context( - non_blocking_thread_pool_executor(max_workers=1) - ) - try: - await asyncio.get_event_loop().run_in_executor( - thread_pool, - # --------- - _add_to_archive, - dir_to_compress, - destination, - compress, - store_relative_path, - functools.partial(_update_progress, sub_progress), - asyncio.get_event_loop(), - exclude_patterns, - ) - except Exception as err: - if destination.is_file(): - destination.unlink(missing_ok=True) - - raise ArchiveError( - f"Failed archiving {dir_to_compress} -> {destination} due to {type(err)}." - f"Details: {err}" - ) from err - - except BaseException: - if destination.is_file(): - destination.unlink(missing_ok=True) - raise - - -def is_leaf_path(p: Path) -> bool: - """Tests whether a path corresponds to a file or empty folder, i.e. - some leaf item in a file-system tree structure - """ - return p.is_file() or (p.is_dir() and not any(p.glob("*"))) - - -class PrunableFolder: - """ - Use in conjunction with unarchive on the dest_dir to achieve - an update of a folder content without deleting updated files - - folder = PrunableFolder(target_dir) - - unarchived = await archive_dir(destination=target_dir, ... ) - - folder.prune(exclude=unarchived) - - """ - - def __init__(self, folder: Path): - self.basedir = folder - self.before_relpaths = set() - self.capture() - - def capture(self) -> None: - # captures leaf paths in folder at this moment - self.before_relpaths = { - p.relative_to(self.basedir) - for p in self.basedir.rglob("*") - if is_leaf_path(p) - } - - def prune(self, exclude: set[Path]) -> None: - """ - Deletes all paths in folder skipping the exclude set - """ - assert all(self.basedir in p.parents for p in exclude) # nosec - - after_relpaths = {p.relative_to(self.basedir) for p in exclude} - to_delete = self.before_relpaths.difference(after_relpaths) - - for p in to_delete: - path = self.basedir / p - assert path.exists() # nosec - - if path.is_file(): - path.unlink() - elif path.is_dir(): - try: - path.rmdir() - except OSError: - # prevents deleting non-empty folders - pass - - # second pass to delete empty folders - # after deleting files, some folders might have been left empty - for p in self.basedir.rglob("*"): - if p.is_dir() and p not in exclude and not any(p.glob("*")): - p.rmdir() - - -__all__ = ( - "archive_dir", - "ArchiveError", - "is_leaf_path", - "PrunableFolder", - "unarchive_dir", -) diff --git a/packages/service-library/src/servicelib/archiving_utils/__init__.py b/packages/service-library/src/servicelib/archiving_utils/__init__.py new file mode 100644 index 00000000000..ae6e3cdc80b --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/__init__.py @@ -0,0 +1,11 @@ +from ._errors import ArchiveError +from ._interface_7zip import archive_dir, unarchive_dir +from ._prunable_folder import PrunableFolder, is_leaf_path + +__all__ = ( + "archive_dir", + "ArchiveError", + "is_leaf_path", + "PrunableFolder", + "unarchive_dir", +) diff --git a/packages/service-library/src/servicelib/archiving_utils/_errors.py b/packages/service-library/src/servicelib/archiving_utils/_errors.py new file mode 100644 index 00000000000..77c3910427d --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/_errors.py @@ -0,0 +1,21 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class ArchiveError(OsparcErrorMixin, Exception): + """base error class""" + + +class CouldNotFindValueError(ArchiveError): + msg_template = "Unexpected value for '{field_name}'. Should not be None" + + +class CouldNotRunCommandError(ArchiveError): + msg_template = "Command '{command}' failed with error:\n{command_output}" + + +class TableHeaderNotFoundError(ArchiveError): + msg_template = ( + "Excepted to detect a table header since files were detected " + "file_lines='{file_lines}'. " + "Command output:\n{command_output}" + ) diff --git a/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py b/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py new file mode 100644 index 00000000000..1e642895f1d --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/_interface_7zip.py @@ -0,0 +1,337 @@ +import asyncio +import asyncio.subprocess +import logging +import os +import re +import shlex +from collections.abc import Awaitable, Callable +from contextlib import AsyncExitStack +from pathlib import Path +from typing import Final + +import tqdm +from pydantic import NonNegativeInt +from servicelib.logging_utils import log_catch +from tqdm.contrib.logging import tqdm_logging_redirect + +from ..file_utils import shutil_move +from ..progress_bar import ProgressBarData +from ._errors import ( + CouldNotFindValueError, + CouldNotRunCommandError, + TableHeaderNotFoundError, +) +from ._tdqm_utils import ( + TQDM_FILE_OPTIONS, + TQDM_MULTI_FILES_OPTIONS, + compute_tqdm_miniters, + human_readable_size, +) +from ._utils import iter_files_to_compress + +_logger = logging.getLogger(__name__) + +_TOTAL_BYTES_RE: Final[re.Pattern] = re.compile(r" (\d+)\s*bytes") +_FILE_COUNT_RE: Final[re.Pattern] = re.compile(r" (\d+)\s*files") +_PROGRESS_FIND_PERCENT_RE: Final[re.Pattern] = re.compile(r" (?:100|\d?\d)% ") +_PROGRESS_EXTRACT_PERCENT_RE: Final[re.Pattern] = re.compile(r" (\d+)% ") +_ALL_DONE_RE: Final[re.Pattern] = re.compile(r"Everything is Ok", re.IGNORECASE) + +_TOKEN_TABLE_HEADER_START: Final[str] = "------------------- " + +_7ZIP_EXECUTABLE: Final[Path] = Path("/usr/bin/7z") + + +class _7ZipArchiveInfoParser: # noqa: N801 + def __init__(self) -> None: + self.total_bytes: NonNegativeInt | None = None + self.file_count: NonNegativeInt | None = None + + async def parse_chunk(self, chunk: str) -> None: + # search for ` NUMBER bytes ` -> set byte size + if self.total_bytes is None and (match := _TOTAL_BYTES_RE.search(chunk)): + self.total_bytes = int(match.group(1)) + + # search for ` NUMBER files` -> set file count + if self.file_count is None and (match := _FILE_COUNT_RE.search(chunk)): + self.file_count = int(match.group(1)) + + def get_parsed_values(self) -> tuple[NonNegativeInt, NonNegativeInt]: + if self.total_bytes is None: + raise CouldNotFindValueError(field_name="total_bytes") + + if self.file_count is None: + raise CouldNotFindValueError(field_name="file_count") + + return (self.total_bytes, self.file_count) + + +class _7ZipProgressParser: # noqa: N801 + def __init__( + self, progress_handler: Callable[[NonNegativeInt], Awaitable[None]] + ) -> None: + self.progress_handler = progress_handler + self.total_bytes: NonNegativeInt | None = None + + # in range 0% -> 100% + self.percent: NonNegativeInt | None = None + self.finished: bool = False + self.finished_emitted: bool = False + + self.emitted_total: NonNegativeInt = 0 + + def _parse_progress(self, chunk: str) -> None: + # search for " NUMBER bytes" -> set byte size + if self.total_bytes is None and (match := _TOTAL_BYTES_RE.search(chunk)): + self.total_bytes = int(match.group(1)) + + # search for ` dd% ` -> update progress (as last entry inside the string) + if matches := _PROGRESS_FIND_PERCENT_RE.findall(chunk): # noqa: SIM102 + if percent_match := _PROGRESS_EXTRACT_PERCENT_RE.search(matches[-1]): + self.percent = int(percent_match.group(1)) + + # search for `Everything is Ok` -> set 100% and finish + if _ALL_DONE_RE.search(chunk): + self.finished = True + + async def parse_chunk(self, chunk: str) -> None: + self._parse_progress(chunk) + + if self.total_bytes is not None and self.percent is not None: + # total bytes decompressed + current_bytes_progress = int(self.percent * self.total_bytes / 100) + + # only emit an update if something changed since before + bytes_diff = current_bytes_progress - self.emitted_total + if self.emitted_total == 0 or bytes_diff > 0: + await self.progress_handler(bytes_diff) + + self.emitted_total = current_bytes_progress + + # if finished emit the remaining diff + if self.total_bytes and self.finished and not self.finished_emitted: + + await self.progress_handler(self.total_bytes - self.emitted_total) + self.finished_emitted = True + + +async def _stream_output_reader( + stream: asyncio.StreamReader, + *, + output_handler: Callable[[str], Awaitable[None]] | None, + chunk_size: NonNegativeInt = 16, + lookbehind_buffer_size: NonNegativeInt = 40, +) -> str: + # NOTE: content is not read line by line but chunk by chunk to avoid missing progress updates + # small chunks are read and of size `chunk_size` and a bigger chunk of + # size `lookbehind_buffer_size` + `chunk_size` is emitted + # The goal is to not split any important search in half, thus giving a change to the + # `output_handlers` to properly handle it + + # NOTE: at the time of writing this, the biggest possible thing to capture search would be: + # ~`9.1TiB` -> literally ` 9999999999999 bytes ` equal to 21 characters to capture, + # with the above defaults we the "emitted chunk" is more than double in size + # There are no foreseeable issued due to the size of inputs to be captured. + + command_output = "" + lookbehind_buffer = "" + + while True: + read_chunk = await stream.read(chunk_size) + + if not read_chunk: + # process remaining buffer if any + if lookbehind_buffer and output_handler: + await output_handler(lookbehind_buffer) + break + + # `errors=replace`: avoids getting stuck when can't parse utf-8 + chunk = read_chunk.decode("utf-8", errors="replace") + + command_output += chunk + chunk_to_emit = lookbehind_buffer + chunk + lookbehind_buffer = chunk_to_emit[-lookbehind_buffer_size:] + + if output_handler: + await output_handler(chunk_to_emit) + + return command_output + + +async def _run_cli_command( + command: str, + *, + output_handler: Callable[[str], Awaitable[None]] | None = None, +) -> str: + """ + Raises: + ArchiveError: when it fails to execute the command + """ + + process = await asyncio.create_subprocess_shell( + command, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + assert process.stdout # nosec + + command_output, _ = await asyncio.gather( + asyncio.create_task( + _stream_output_reader(process.stdout, output_handler=output_handler) + ), + process.wait(), + ) + + if process.returncode != os.EX_OK: + raise CouldNotRunCommandError(command=command, command_output=command_output) + + return command_output + + +async def archive_dir( + dir_to_compress: Path, + destination: Path, + *, + compress: bool, + progress_bar: ProgressBarData | None = None, +) -> None: + if progress_bar is None: + progress_bar = ProgressBarData( + num_steps=1, description=f"compressing {dir_to_compress.name}" + ) + + options = " ".join( + [ + "a", # archive + "-tzip", # type of archive + "-bsp1", # used for parsing progress + f"-mx={9 if compress else 0}", # compression level + # guarantees archive reproducibility + "-r", # recurse into subdirectories if needed. + "-mtm=off", # Don't store last modification time + "-mtc=off", # Don't store file creation time + "-mta=off", # Don't store file access time + ] + ) + command = f"{_7ZIP_EXECUTABLE} {options} {shlex.quote(f'{destination}')} {shlex.quote(f'{dir_to_compress}')}/*" + + folder_size_bytes = sum( + file.stat().st_size for file in iter_files_to_compress(dir_to_compress) + ) + + async with AsyncExitStack() as exit_stack: + sub_progress = await exit_stack.enter_async_context( + progress_bar.sub_progress(folder_size_bytes, description="...") + ) + + tqdm_progress = exit_stack.enter_context( + tqdm_logging_redirect( + desc=f"compressing {dir_to_compress} -> {destination}\n", + total=folder_size_bytes, + **( + TQDM_FILE_OPTIONS + | {"miniters": compute_tqdm_miniters(folder_size_bytes)} + ), + ) + ) + + async def _compressed_bytes(byte_progress: NonNegativeInt) -> None: + tqdm_progress.update(byte_progress) + await sub_progress.update(byte_progress) + + await _run_cli_command( + command, output_handler=_7ZipProgressParser(_compressed_bytes).parse_chunk + ) + + # 7zip automatically adds .zip extension if it's missing form the archive name + if not destination.exists(): + await shutil_move(f"{destination}.zip", destination) + + +def _is_folder(line: str) -> bool: + folder_attribute = line[20] + return folder_attribute == "D" + + +def _extract_file_names_from_archive(command_output: str) -> set[str]: + file_name_start: NonNegativeInt | None = None + + entries_lines: list[str] = [] + can_add_to_entries: bool = False + + # extract all lines containing files or folders + for line in command_output.splitlines(): + if line.startswith(_TOKEN_TABLE_HEADER_START): + file_name_start = line.rfind(" ") + 1 + can_add_to_entries = not can_add_to_entries + continue + + if can_add_to_entries: + entries_lines.append(line) + + file_lines: list[str] = [line for line in entries_lines if not _is_folder(line)] + + if file_lines and file_name_start is None: + raise TableHeaderNotFoundError( + file_lines=file_lines, command_output=command_output + ) + + return {line[file_name_start:] for line in file_lines} + + +async def unarchive_dir( + archive_to_extract: Path, + destination_folder: Path, + *, + progress_bar: ProgressBarData | None = None, + log_cb: Callable[[str], Awaitable[None]] | None = None, +) -> set[Path]: + if progress_bar is None: + progress_bar = ProgressBarData( + num_steps=1, description=f"extracting {archive_to_extract.name}" + ) + + # get archive information + archive_info_parser = _7ZipArchiveInfoParser() + list_output = await _run_cli_command( + f"{_7ZIP_EXECUTABLE} l {shlex.quote(f'{archive_to_extract}')}", + output_handler=archive_info_parser.parse_chunk, + ) + file_names_in_archive = _extract_file_names_from_archive(list_output) + total_bytes, file_count = archive_info_parser.get_parsed_values() + + async with AsyncExitStack() as exit_stack: + sub_prog = await exit_stack.enter_async_context( + progress_bar.sub_progress(steps=total_bytes, description="...") + ) + + tqdm_progress = exit_stack.enter_context( + tqdm.tqdm( + desc=f"decompressing {archive_to_extract} -> {destination_folder} [{file_count} file{'' if file_count == 1 else 's'}" + f"/{human_readable_size(archive_to_extract.stat().st_size)}]\n", + total=total_bytes, + **TQDM_MULTI_FILES_OPTIONS, + ) + ) + + # extract archive + async def _decompressed_bytes(byte_progress: NonNegativeInt) -> None: + if tqdm_progress.update(byte_progress) and log_cb: + with log_catch(_logger, reraise=False): + await log_cb(f"{tqdm_progress}") + await sub_prog.update(byte_progress) + + options = " ".join( + [ + "x", # extract + "-bsp1", # used for parsing progress + "-y", # reply yes to all + ] + ) + await _run_cli_command( + f"{_7ZIP_EXECUTABLE} {options} {shlex.quote(f'{archive_to_extract}')} -o{shlex.quote(f'{destination_folder}')}", + output_handler=_7ZipProgressParser(_decompressed_bytes).parse_chunk, + ) + + return {destination_folder / x for x in file_names_in_archive} diff --git a/packages/service-library/src/servicelib/archiving_utils/_prunable_folder.py b/packages/service-library/src/servicelib/archiving_utils/_prunable_folder.py new file mode 100644 index 00000000000..afa5279e2c1 --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/_prunable_folder.py @@ -0,0 +1,61 @@ +from contextlib import suppress +from pathlib import Path + + +def is_leaf_path(p: Path) -> bool: + """Tests whether a path corresponds to a file or empty folder, i.e. + some leaf item in a file-system tree structure + """ + return p.is_file() or (p.is_dir() and not any(p.glob("*"))) + + +class PrunableFolder: + """ + Use in conjunction with unarchive on the dest_dir to achieve + an update of a folder content without deleting updated files + + folder = PrunableFolder(target_dir) + + unarchived = await archive_dir(destination=target_dir, ... ) + + folder.prune(exclude=unarchived) + + """ + + def __init__(self, folder: Path): + self.basedir = folder + self.before_relpaths: set = set() + self.capture() + + def capture(self) -> None: + # captures leaf paths in folder at this moment + self.before_relpaths = { + p.relative_to(self.basedir) + for p in self.basedir.rglob("*") + if is_leaf_path(p) + } + + def prune(self, exclude: set[Path]) -> None: + """ + Deletes all paths in folder skipping the exclude set + """ + + after_relpaths = {p.relative_to(self.basedir) for p in exclude} + to_delete = self.before_relpaths.difference(after_relpaths) + + for p in to_delete: + path = self.basedir / p + assert path.exists() # nosec + + if path.is_file(): + path.unlink() + elif path.is_dir(): + # prevents deleting non-empty folders + with suppress(OSError): + path.rmdir() + + # second pass to delete empty folders + # after deleting files, some folders might have been left empty + for p in self.basedir.rglob("*"): + if p.is_dir() and p not in exclude and not any(p.glob("*")): + p.rmdir() diff --git a/packages/service-library/src/servicelib/archiving_utils/_tdqm_utils.py b/packages/service-library/src/servicelib/archiving_utils/_tdqm_utils.py new file mode 100644 index 00000000000..59cf32b92bc --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/_tdqm_utils.py @@ -0,0 +1,24 @@ +from typing import Final + +from pydantic import ByteSize, NonNegativeInt + +TQDM_FILE_OPTIONS: Final[dict] = { + "unit": "byte", + "unit_scale": True, + "unit_divisor": 1024, + "colour": "yellow", + "miniters": 1, +} +TQDM_MULTI_FILES_OPTIONS: Final[dict] = TQDM_FILE_OPTIONS | { + "unit": "file", + "unit_divisor": 1000, +} + + +def human_readable_size(size: NonNegativeInt) -> str: + return ByteSize(size).human_readable() + + +def compute_tqdm_miniters(byte_size: int) -> float: + """ensures tqdm minimal iteration is 1 %""" + return min(byte_size / 100.0, 1.0) diff --git a/packages/service-library/src/servicelib/archiving_utils/_utils.py b/packages/service-library/src/servicelib/archiving_utils/_utils.py new file mode 100644 index 00000000000..0d85001e190 --- /dev/null +++ b/packages/service-library/src/servicelib/archiving_utils/_utils.py @@ -0,0 +1,10 @@ +from collections.abc import Iterator +from pathlib import Path + + +def iter_files_to_compress(dir_path: Path) -> Iterator[Path]: + # NOTE: make sure to sort paths othrwise between different runs + # the zip will have a different structure and hash + for path in sorted(dir_path.rglob("*")): + if path.is_file(): + yield path diff --git a/packages/service-library/src/servicelib/async_utils.py b/packages/service-library/src/servicelib/async_utils.py index 7af74a70f78..c6466df0a70 100644 --- a/packages/service-library/src/servicelib/async_utils.py +++ b/packages/service-library/src/servicelib/async_utils.py @@ -1,12 +1,22 @@ import asyncio +import contextlib +import datetime import logging from collections import deque +from collections.abc import Awaitable, Callable, Coroutine from contextlib import suppress from dataclasses import dataclass from functools import wraps -from typing import TYPE_CHECKING, Any, Callable, Deque, Optional +from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar + +from . import tracing +from .utils_profiling_middleware import dont_profile, is_profiling, profile_context + +_logger = logging.getLogger(__name__) + +P = ParamSpec("P") +R = TypeVar("R") -logger = logging.getLogger(__name__) if TYPE_CHECKING: Queue = asyncio.Queue @@ -27,23 +37,42 @@ class Context: in_queue: asyncio.Queue out_queue: asyncio.Queue initialized: bool - task: Optional[asyncio.Task] = None + task: asyncio.Task | None = None + + +@dataclass +class QueueElement: + tracing_context: tracing.TracingContext + do_profile: bool = False + input: Awaitable | None = None + output: Any | None = None +# NOTE: If you get issues with event loop already closed error use ensure_run_in_sequence_context_is_empty fixture in your tests _sequential_jobs_contexts: dict[str, Context] = {} -async def cancel_sequential_workers() -> None: - """Signals all workers to close thus avoiding errors on shutdown""" - for context in _sequential_jobs_contexts.values(): +async def _safe_cancel(context: Context) -> None: + try: await context.in_queue.put(None) if context.task is not None: context.task.cancel() with suppress(asyncio.CancelledError): await context.task + except RuntimeError as e: + if "Event loop is closed" in f"{e}": + _logger.warning("event loop is closed and could not cancel %s", context) + else: + raise + + +async def cancel_sequential_workers() -> None: + """Signals all workers to close thus avoiding errors on shutdown""" + for context in _sequential_jobs_contexts.values(): + await _safe_cancel(context) _sequential_jobs_contexts.clear() - logger.info("All run_sequentially_in_context pending workers stopped") + _logger.info("All run_sequentially_in_context pending workers stopped") # NOTE: If you get funny mismatches with mypy in returned values it might be due to this decorator. @@ -52,7 +81,7 @@ async def cancel_sequential_workers() -> None: # SEE https://peps.python.org/pep-0612/ # def run_sequentially_in_context( - target_args: Optional[list[str]] = None, + target_args: list[str] | None = None, ) -> Callable: """All request to function with same calling context will be run sequentially. @@ -91,30 +120,30 @@ async def func(param1, param2, param3): """ target_args = [] if target_args is None else target_args - def decorator(decorated_function: Callable[[Any], Optional[Any]]): + def decorator(decorated_function: Callable[[Any], Any | None]): def _get_context(args: Any, kwargs: dict) -> Context: arg_names = decorated_function.__code__.co_varnames[ : decorated_function.__code__.co_argcount ] - search_args = dict(zip(arg_names, args)) + search_args = dict(zip(arg_names, args, strict=False)) search_args.update(kwargs) - key_parts: Deque[str] = deque() + key_parts: deque[str] = deque() for arg in target_args: sub_args = arg.split(".") main_arg = sub_args[0] if main_arg not in search_args: - raise ValueError( + msg = ( f"Expected '{main_arg}' in '{decorated_function.__name__}'" f" arguments. Got '{search_args}'" ) + raise ValueError(msg) context_key = search_args[main_arg] for attribute in sub_args[1:]: potential_key = getattr(context_key, attribute) if not potential_key: - raise ValueError( - f"Expected '{attribute}' attribute in '{context_key.__name__}' arguments." - ) + msg = f"Expected '{attribute}' attribute in '{context_key.__name__}' arguments." + raise ValueError(msg) context_key = potential_key key_parts.append(f"{decorated_function.__name__}_{context_key}") @@ -138,17 +167,21 @@ async def wrapper(*args: Any, **kwargs: Any) -> Any: if not context.initialized: context.initialized = True - async def worker(in_q: Queue, out_q: Queue) -> None: + async def worker(in_q: Queue[QueueElement], out_q: Queue) -> None: while True: - awaitable = await in_q.get() + element = await in_q.get() in_q.task_done() - # check if requested to shutdown - if awaitable is None: - break - try: - result = await awaitable - except Exception as e: # pylint: disable=broad-except - result = e + with tracing.use_tracing_context(element.tracing_context): + # check if requested to shutdown + try: + do_profile = element.do_profile + awaitable = element.input + if awaitable is None: + break + with profile_context(do_profile): + result = await awaitable + except Exception as e: # pylint: disable=broad-except + result = e await out_q.put(result) logging.info( @@ -161,9 +194,16 @@ async def worker(in_q: Queue, out_q: Queue) -> None: worker(context.in_queue, context.out_queue) ) - await context.in_queue.put(decorated_function(*args, **kwargs)) # type: ignore + with dont_profile(): + # ensure profiler is disabled in order to capture profile of endpoint code + queue_input = QueueElement( + input=decorated_function(*args, **kwargs), + do_profile=is_profiling(), + tracing_context=tracing.get_context(), + ) + await context.in_queue.put(queue_input) + wrapped_result = await context.out_queue.get() - wrapped_result = await context.out_queue.get() if isinstance(wrapped_result, Exception): raise wrapped_result @@ -172,3 +212,40 @@ async def worker(in_q: Queue, out_q: Queue) -> None: return wrapper return decorator + + +def delayed_start( + delay: datetime.timedelta, +) -> Callable[ + [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]] +]: + def _decorator( + func: Callable[P, Coroutine[Any, Any, R]], + ) -> Callable[P, Coroutine[Any, Any, R]]: + @wraps(func) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + await asyncio.sleep(delay.total_seconds()) + return await func(*args, **kwargs) + + return _wrapper + + return _decorator + + +async def cancel_wait_task( + task: asyncio.Task, + *, + max_delay: float | None = None, +) -> None: + """Cancel a asyncio.Task and waits for it to finish. + + :param task: task to be canceled + :param max_delay: duration (in seconds) to wait before giving + up the cancellation. If None it waits forever. + :raises TimeoutError: raised if cannot cancel the task. + """ + + task.cancel() + async with asyncio.timeout(max_delay): + with contextlib.suppress(asyncio.CancelledError): + await task diff --git a/packages/service-library/src/servicelib/background_task.py b/packages/service-library/src/servicelib/background_task.py index 81836a2af66..508f34b99ee 100644 --- a/packages/service-library/src/servicelib/background_task.py +++ b/packages/service-library/src/servicelib/background_task.py @@ -1,76 +1,121 @@ import asyncio import contextlib import datetime +import functools import logging -from typing import AsyncIterator, Awaitable, Callable, Final, Optional +from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine +from typing import Any, Final, ParamSpec, TypeVar -from servicelib.logging_utils import log_catch, log_context -from tenacity import TryAgain -from tenacity._asyncio import AsyncRetrying +from tenacity import TryAgain, before_sleep_log, retry, retry_if_exception_type from tenacity.wait import wait_fixed -logger = logging.getLogger(__name__) +from .async_utils import cancel_wait_task, delayed_start +from .logging_utils import log_catch, log_context + +_logger = logging.getLogger(__name__) _DEFAULT_STOP_TIMEOUT_S: Final[int] = 5 -async def _periodic_scheduled_task( - task: Callable[..., Awaitable[None]], +class SleepUsingAsyncioEvent: + """Sleep strategy that waits on an event to be set or sleeps.""" + + def __init__(self, event: "asyncio.Event") -> None: + self.event = event + + async def __call__(self, delay: float | None) -> None: + with contextlib.suppress(TimeoutError): + await asyncio.wait_for(self.event.wait(), timeout=delay) + self.event.clear() + + +P = ParamSpec("P") +R = TypeVar("R") + + +def periodic( *, interval: datetime.timedelta, - task_name: str, - **task_kwargs, -): - # NOTE: This retries forever unless cancelled - async for attempt in AsyncRetrying(wait=wait_fixed(interval.total_seconds())): - with attempt: - with log_context( - logger, - logging.DEBUG, - msg=f"Run {task_name}, {attempt.retry_state.attempt_number=}", - ), log_catch(logger): - await task(**task_kwargs) - - raise TryAgain() - - -async def start_periodic_task( + raise_on_error: bool = False, + early_wake_up_event: asyncio.Event | None = None, +) -> Callable[ + [Callable[P, Coroutine[Any, Any, None]]], Callable[P, Coroutine[Any, Any, None]] +]: + """Calls the function periodically with a given interval. + + Arguments: + interval -- the interval between calls + + Keyword Arguments: + raise_on_error -- If False the function will be retried indefinitely unless cancelled. + If True the function will be retried indefinitely unless cancelled + or an exception is raised. (default: {False}) + early_wake_up_event -- allows to awaken the function before the interval has passed. (default: {None}) + + Returns: + coroutine that will be called periodically (runs forever) + """ + + def _decorator( + func: Callable[P, Coroutine[Any, Any, None]], + ) -> Callable[P, Coroutine[Any, Any, None]]: + class _InternalTryAgain(TryAgain): + # Local exception to prevent reacting to similarTryAgain exceptions raised by the wrapped func + # e.g. when this decorators is used twice on the same function + ... + + nap = ( + asyncio.sleep + if early_wake_up_event is None + else SleepUsingAsyncioEvent(early_wake_up_event) + ) + + @retry( + sleep=nap, + wait=wait_fixed(interval.total_seconds()), + reraise=True, + retry=( + retry_if_exception_type(_InternalTryAgain) + if raise_on_error + else retry_if_exception_type() + ), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + ) + @functools.wraps(func) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> None: + with log_catch(_logger, reraise=True): + await func(*args, **kwargs) + raise _InternalTryAgain + + return _wrapper + + return _decorator + + +def create_periodic_task( task: Callable[..., Awaitable[None]], *, interval: datetime.timedelta, task_name: str, + raise_on_error: bool = False, + wait_before_running: datetime.timedelta = datetime.timedelta(0), + early_wake_up_event: asyncio.Event | None = None, **kwargs, ) -> asyncio.Task: - with log_context( - logger, logging.INFO, msg=f"create periodic background task '{task_name}'" - ): - return asyncio.create_task( - _periodic_scheduled_task( - task, - interval=interval, - task_name=task_name, - **kwargs, - ), - name=task_name, - ) - + @delayed_start(wait_before_running) + @periodic( + interval=interval, + raise_on_error=raise_on_error, + early_wake_up_event=early_wake_up_event, + ) + async def _() -> None: + await task(**kwargs) -async def stop_periodic_task( - asyncio_task: asyncio.Task, *, timeout: Optional[float] = None -) -> None: with log_context( - logger, - logging.INFO, - msg=f"cancel periodic background task '{asyncio_task.get_name()}'", + _logger, logging.DEBUG, msg=f"create periodic background task '{task_name}'" ): - asyncio_task.cancel() - _, pending = await asyncio.wait((asyncio_task,), timeout=timeout) - if pending: - logger.warning( - "periodic background task '%s' did not cancel properly and timed-out!", - f"{asyncio_task.get_name()}", - ) + return asyncio.create_task(_(), name=task_name) @contextlib.asynccontextmanager @@ -79,18 +124,22 @@ async def periodic_task( *, interval: datetime.timedelta, task_name: str, + stop_timeout: float = _DEFAULT_STOP_TIMEOUT_S, + raise_on_error: bool = False, **kwargs, ) -> AsyncIterator[asyncio.Task]: - asyncio_task = None + asyncio_task: asyncio.Task | None = None try: - asyncio_task = await start_periodic_task( - task, interval=interval, task_name=task_name, **kwargs + asyncio_task = create_periodic_task( + task, + interval=interval, + task_name=task_name, + raise_on_error=raise_on_error, + **kwargs, ) yield asyncio_task finally: if asyncio_task is not None: # NOTE: this stopping is shielded to prevent the cancellation to propagate # into the stopping procedure - await asyncio.shield( - stop_periodic_task(asyncio_task, timeout=_DEFAULT_STOP_TIMEOUT_S) - ) + await asyncio.shield(cancel_wait_task(asyncio_task, max_delay=stop_timeout)) diff --git a/packages/service-library/src/servicelib/background_task_utils.py b/packages/service-library/src/servicelib/background_task_utils.py new file mode 100644 index 00000000000..8313f642430 --- /dev/null +++ b/packages/service-library/src/servicelib/background_task_utils.py @@ -0,0 +1,58 @@ +import datetime +import functools +from collections.abc import Callable, Coroutine +from typing import Any, ParamSpec, TypeVar + +from servicelib.exception_utils import silence_exceptions +from servicelib.redis._errors import CouldNotAcquireLockError + +from .background_task import periodic +from .redis import RedisClientSDK, exclusive + +P = ParamSpec("P") +R = TypeVar("R") + + +def exclusive_periodic( + redis_client: RedisClientSDK | Callable[..., RedisClientSDK], + *, + task_interval: datetime.timedelta, + retry_after: datetime.timedelta = datetime.timedelta(seconds=1), +) -> Callable[ + [Callable[P, Coroutine[Any, Any, None]]], Callable[P, Coroutine[Any, Any, None]] +]: + """decorates a function to become exclusive and periodic. + + Arguments: + client -- The Redis client + task_interval -- the task interval, i.e. how often the task should run + retry_after -- in case the exclusive lock cannot be acquired or is lost, this is the retry interval + + Raises: + Nothing + + Returns: + Nothing, a periodic method does not return anything as it runs forever. + """ + + def _decorator( + coro: Callable[P, Coroutine[Any, Any, None]], + ) -> Callable[P, Coroutine[Any, Any, None]]: + @periodic(interval=retry_after) + @silence_exceptions( + # Replicas will raise CouldNotAcquireLockError + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/7574 + (CouldNotAcquireLockError,) + ) + @exclusive( + redis_client, + lock_key=f"lock:exclusive_periodic_task:{coro.__module__}.{coro.__name__}", + ) + @periodic(interval=task_interval) + @functools.wraps(coro) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> None: + return await coro(*args, **kwargs) + + return _wrapper + + return _decorator diff --git a/packages/service-library/src/servicelib/bytes_iters/__init__.py b/packages/service-library/src/servicelib/bytes_iters/__init__.py new file mode 100644 index 00000000000..9d4fb6704df --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/__init__.py @@ -0,0 +1,15 @@ +from ._constants import DEFAULT_READ_CHUNK_SIZE +from ._input import DiskStreamReader +from ._models import BytesStreamer +from ._output import DiskStreamWriter +from ._stream_zip import ArchiveEntries, ArchiveFileEntry, get_zip_bytes_iter + +__all__: tuple[str, ...] = ( + "ArchiveEntries", + "ArchiveFileEntry", + "BytesStreamer", + "DEFAULT_READ_CHUNK_SIZE", + "DiskStreamReader", + "DiskStreamWriter", + "get_zip_bytes_iter", +) diff --git a/packages/service-library/src/servicelib/bytes_iters/_constants.py b/packages/service-library/src/servicelib/bytes_iters/_constants.py new file mode 100644 index 00000000000..d7259d34b7a --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/_constants.py @@ -0,0 +1,5 @@ +from typing import Final + +from pydantic import ByteSize, TypeAdapter + +DEFAULT_READ_CHUNK_SIZE: Final[int] = TypeAdapter(ByteSize).validate_python("1MiB") diff --git a/packages/service-library/src/servicelib/bytes_iters/_input.py b/packages/service-library/src/servicelib/bytes_iters/_input.py new file mode 100644 index 00000000000..becec0981fc --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/_input.py @@ -0,0 +1,25 @@ +from pathlib import Path + +import aiofiles +from models_library.bytes_iters import BytesIter, DataSize + +from ._constants import DEFAULT_READ_CHUNK_SIZE +from ._models import BytesStreamer + + +class DiskStreamReader: + def __init__(self, file_path: Path, *, chunk_size=DEFAULT_READ_CHUNK_SIZE): + self.file_path = file_path + self.chunk_size = chunk_size + + def get_bytes_streamer(self) -> BytesStreamer: + async def _() -> BytesIter: + async with aiofiles.open(self.file_path, "rb") as f: + while True: + chunk = await f.read(self.chunk_size) + if not chunk: + break + + yield chunk + + return BytesStreamer(DataSize(self.file_path.stat().st_size), _) diff --git a/packages/service-library/src/servicelib/bytes_iters/_models.py b/packages/service-library/src/servicelib/bytes_iters/_models.py new file mode 100644 index 00000000000..8f310c6a985 --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/_models.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass + +from models_library.bytes_iters import BytesIter, BytesIterCallable, DataSize + +from ..progress_bar import ProgressBarData + + +@dataclass(frozen=True) +class BytesStreamer: + data_size: DataSize + bytes_iter_callable: BytesIterCallable + + async def with_progress_bytes_iter( + self, progress_bar: ProgressBarData + ) -> BytesIter: + async for chunk in self.bytes_iter_callable(): + if progress_bar.is_running(): + await progress_bar.update(len(chunk)) + yield chunk diff --git a/packages/service-library/src/servicelib/bytes_iters/_output.py b/packages/service-library/src/servicelib/bytes_iters/_output.py new file mode 100644 index 00000000000..9995ce4d33b --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/_output.py @@ -0,0 +1,29 @@ +from pathlib import Path + +import aiofiles +from models_library.bytes_iters import BytesIter + +from ..s3_utils import FileLikeBytesIterReader + + +class DiskStreamWriter: + def __init__(self, destination_path: Path): + self.destination_path = destination_path + + async def write_from_bytes_iter(self, stream: BytesIter) -> None: + async with aiofiles.open(self.destination_path, "wb") as f: + async for chunk in stream: + await f.write(chunk) + await f.flush() + + async def write_from_file_like( + self, file_like_reader: FileLikeBytesIterReader + ) -> None: + async with aiofiles.open(self.destination_path, "wb") as f: + while True: + chunk = await file_like_reader.read(100) + if not chunk: + break + + await f.write(chunk) + await f.flush() diff --git a/packages/service-library/src/servicelib/bytes_iters/_stream_zip.py b/packages/service-library/src/servicelib/bytes_iters/_stream_zip.py new file mode 100644 index 00000000000..4f80125c3e1 --- /dev/null +++ b/packages/service-library/src/servicelib/bytes_iters/_stream_zip.py @@ -0,0 +1,57 @@ +import logging +from collections.abc import AsyncIterable +from datetime import UTC, datetime +from stat import S_IFREG +from typing import TypeAlias + +from models_library.bytes_iters import BytesIter, DataSize +from stream_zip import ZIP_64, AsyncMemberFile, async_stream_zip + +from ..progress_bar import ProgressBarData +from ._models import BytesStreamer + +_logger = logging.getLogger(__name__) + +FileNameInArchive: TypeAlias = str +ArchiveFileEntry: TypeAlias = tuple[FileNameInArchive, BytesStreamer] +ArchiveEntries: TypeAlias = list[ArchiveFileEntry] + + +async def _member_files_iter( + archive_entries: ArchiveEntries, progress_bar: ProgressBarData +) -> AsyncIterable[AsyncMemberFile]: + for file_name, byte_streamer in archive_entries: + yield ( + file_name, + datetime.now(UTC), + S_IFREG | 0o600, + ZIP_64, + byte_streamer.with_progress_bytes_iter(progress_bar=progress_bar), + ) + + +async def get_zip_bytes_iter( + archive_entries: ArchiveEntries, + *, + progress_bar: ProgressBarData | None = None, + chunk_size: int, +) -> BytesIter: + # NOTE: this is CPU bound task, even though the loop is not blocked, + # the CPU is still used for compressing the content. + if progress_bar is None: + progress_bar = ProgressBarData(num_steps=1, description="zip archive stream") + + total_stream_lenth = DataSize( + sum(bytes_streamer.data_size for _, bytes_streamer in archive_entries) + ) + description = f"files: count={len(archive_entries)}, size={total_stream_lenth.human_readable()}" + + async with progress_bar.sub_progress( + steps=total_stream_lenth, description=description, progress_unit="Byte" + ) as sub_progress: + # NOTE: do not disable compression or the streams will be + # loaded fully in memory before yielding their content + async for chunk in async_stream_zip( + _member_files_iter(archive_entries, sub_progress), chunk_size=chunk_size + ): + yield chunk diff --git a/packages/service-library/src/servicelib/common_aiopg_utils.py b/packages/service-library/src/servicelib/common_aiopg_utils.py index 58b27b156ee..cf7c6aba40d 100644 --- a/packages/service-library/src/servicelib/common_aiopg_utils.py +++ b/packages/service-library/src/servicelib/common_aiopg_utils.py @@ -3,12 +3,17 @@ This module was necessary because simcore-sdk (an aiohttp-independent package) still needs some of the helpers here. """ + +import logging from dataclasses import asdict, dataclass -from typing import Optional import sqlalchemy as sa from aiopg.sa import create_engine +from .logging_utils import log_catch, log_context + +_logger = logging.getLogger(__name__) + DSN = "postgresql://{user}:{password}@{host}:{port}/{database}" @@ -21,9 +26,9 @@ class DataSourceName: port: int = 5432 # Attributes about the caller - application_name: Optional[str] = None + application_name: str | None = None - def to_uri(self, with_query=False) -> str: + def to_uri(self, *, with_query=False) -> str: uri = DSN.format(**asdict(self)) if with_query and self.application_name: uri += f"?application_name={self.application_name}" @@ -42,14 +47,25 @@ def create_pg_engine( assert engine.closed """ - aiopg_engine_context = create_engine( + return create_engine( dsn.to_uri(), application_name=dsn.application_name, minsize=minsize, maxsize=maxsize, **pool_kwargs, ) - return aiopg_engine_context + + +async def is_postgres_responsive_async(dsn: DataSourceName) -> bool: + is_responsive: bool = False + with log_catch(_logger, reraise=False), log_context( + _logger, logging.DEBUG, msg=f"checking Postgres connection at {dsn=}" + ): + async with create_engine(dsn): + _logger.debug("postgres connection established") + is_responsive = True + + return is_responsive def is_postgres_responsive(dsn: DataSourceName) -> bool: diff --git a/packages/service-library/src/servicelib/common_headers.py b/packages/service-library/src/servicelib/common_headers.py index 842e9d14986..430823fa776 100644 --- a/packages/service-library/src/servicelib/common_headers.py +++ b/packages/service-library/src/servicelib/common_headers.py @@ -1,6 +1,9 @@ from typing import Final +UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE: Final[str] = "undefined" X_DYNAMIC_SIDECAR_REQUEST_DNS: Final[str] = "X-Dynamic-Sidecar-Request-DNS" X_DYNAMIC_SIDECAR_REQUEST_SCHEME: Final[str] = "X-Dynamic-Sidecar-Request-Scheme" X_FORWARDED_PROTO: Final[str] = "X-Forwarded-Proto" +X_SIMCORE_PARENT_NODE_ID: Final[str] = "X-Simcore-Parent-Node-Id" +X_SIMCORE_PARENT_PROJECT_UUID: Final[str] = "X-Simcore-Parent-Project-Uuid" X_SIMCORE_USER_AGENT: Final[str] = "X-Simcore-User-Agent" diff --git a/packages/service-library/src/servicelib/db_asyncpg_utils.py b/packages/service-library/src/servicelib/db_asyncpg_utils.py new file mode 100644 index 00000000000..f9dfd27c2d8 --- /dev/null +++ b/packages/service-library/src/servicelib/db_asyncpg_utils.py @@ -0,0 +1,100 @@ +import contextlib +import logging +import time +from collections.abc import AsyncIterator +from datetime import timedelta + +from models_library.healthchecks import IsNonResponsive, IsResponsive, LivenessResult +from settings_library.postgres import PostgresSettings +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine +from tenacity import retry + +from .logging_utils import log_context +from .retry_policies import PostgresRetryPolicyUponInitialization + +_logger = logging.getLogger(__name__) + + +@retry(**PostgresRetryPolicyUponInitialization(_logger).kwargs) +async def create_async_engine_and_database_ready( + settings: PostgresSettings, +) -> AsyncEngine: + """ + - creates asyncio engine + - waits until db service is up + - waits until db data is migrated (i.e. ready to use) + - returns engine + """ + from simcore_postgres_database.utils_aiosqlalchemy import ( # type: ignore[import-not-found] # this on is unclear + raise_if_migration_not_ready, + ) + + server_settings = None + if settings.POSTGRES_CLIENT_NAME: + assert isinstance(settings.POSTGRES_CLIENT_NAME, str) # nosec + server_settings = { + "application_name": settings.POSTGRES_CLIENT_NAME, + } + + engine = create_async_engine( + settings.dsn_with_async_sqlalchemy, + pool_size=settings.POSTGRES_MINSIZE, + max_overflow=settings.POSTGRES_MAXSIZE - settings.POSTGRES_MINSIZE, + connect_args={"server_settings": server_settings}, + pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects + future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released + ) + + try: + await raise_if_migration_not_ready(engine) + except Exception as exc: + # NOTE: engine must be closed because retry will create a new engine + await engine.dispose() + exc.add_note("Failed during migration check. Created engine disposed.") + raise + + return engine + + +async def check_postgres_liveness(engine: AsyncEngine) -> LivenessResult: + try: + tic = time.time() + # test + async with engine.connect(): + ... + elapsed_time = time.time() - tic + return IsResponsive(elapsed=timedelta(seconds=elapsed_time)) + except SQLAlchemyError as err: + return IsNonResponsive(reason=f"{err}") + + +@contextlib.asynccontextmanager +async def with_async_pg_engine( + settings: PostgresSettings, +) -> AsyncIterator[AsyncEngine]: + """ + Creates an asyncpg engine and ensures it is properly closed after use. + """ + try: + with log_context( + _logger, + logging.DEBUG, + f"connection to db {settings.dsn_with_async_sqlalchemy}", + ): + server_settings = None + if settings.POSTGRES_CLIENT_NAME: + assert isinstance(settings.POSTGRES_CLIENT_NAME, str) + + engine = create_async_engine( + settings.dsn_with_async_sqlalchemy, + pool_size=settings.POSTGRES_MINSIZE, + max_overflow=settings.POSTGRES_MAXSIZE - settings.POSTGRES_MINSIZE, + connect_args={"server_settings": server_settings}, + pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects + future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released + ) + yield engine + finally: + with log_context(_logger, logging.DEBUG, f"db disconnect of {engine}"): + await engine.dispose() diff --git a/packages/service-library/src/servicelib/decorators.py b/packages/service-library/src/servicelib/decorators.py index e51d1fb4a3b..a0d3cae59ff 100644 --- a/packages/service-library/src/servicelib/decorators.py +++ b/packages/service-library/src/servicelib/decorators.py @@ -1,34 +1,34 @@ -""" General purpose decorators +"""General purpose decorators IMPORTANT: lowest level module I order to avoid cyclic dependences, please DO NOT IMPORT ANYTHING from . """ + import logging from copy import deepcopy from functools import wraps -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -def safe_return(if_fails_return=False, catch=None, logger=None): +def safe_return(if_fails_return=False, catch=None, logger=None): # noqa: FBT002 # defaults if catch is None: catch = (RuntimeError,) if logger is None: - logger = log + logger = _logger def decorate(func): @wraps(func) - def safe_func(*args, **kargs): + def safe_func(*args, **kwargs): try: - res = func(*args, **kargs) - return res + return func(*args, **kwargs) except catch as err: logger.info("%s failed: %s", func.__name__, str(err)) except Exception: # pylint: disable=broad-except logger.info("%s failed unexpectedly", func.__name__, exc_info=True) - return deepcopy(if_fails_return) # avoid issues with default mutables + return deepcopy(if_fails_return) # avoid issues with default mutable return safe_func diff --git a/packages/service-library/src/servicelib/deferred_tasks/__init__.py b/packages/service-library/src/servicelib/deferred_tasks/__init__.py new file mode 100644 index 00000000000..dd57b083810 --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/__init__.py @@ -0,0 +1,79 @@ +""" +# SEE original PR https://github.com/ITISFoundation/osparc-simcore/pull/5704 + +The `BaseDeferredHandler` is the interface to the user. +(**note:** "states" are defined in the diagram below in the rectangles) + + +- `get_retries` (used by state`Scheduled`) [default 1] {can br overwritten by the user}: + returns the max attempts to retry user code +- `get_timeout` (used by state`Scheduled`) [required] {MUST be implemented by user}: + timeout for running the user code +- `start` (called by the user) [required] {MUST be implemented by user}: + defines a nice entrypoint to start new tasks +- `on_created` (called after `start` executes) [optional] {can be overwritten by the user}: + provides a global identifier for the started task +- `run` (called by state `Worker`) [required] {MUST be implemented by user}: + code the user wants to run +- `on_result` (called by state `DeferredResult`) [required] {MUST be implemented by user}: + provides the result of an execution +- `on_finished_with_error` (called by state `FinishedWithError`) [optional] {can be overwritten by the user}: + react to execution error, only triggered if all retry attempts fail +- `cancel`: (called by the user) [optional]: + send a message to cancel the current task. A warning will be logged but no call to either + `on_result` or `on_finished_with_error` will occur. + + +## DeferredHandler lifecycle + +```mermaid +stateDiagram-v2 + * --> Scheduled: via [start] + ** --> ManuallyCancelled: via [cancel] + + ManuallyCancelled --> Worker: attempts to cancel task in + + Scheduled --> SubmitTask + SubmitTask --> Worker + + ErrorResult --> SubmitTask: try again + Worker --> ErrorResult: upon error + ErrorResult --> FinishedWithError: gives up when out of retries or if cancelled + Worker --> DeferredResult: success + + DeferredResult --> Β°: calls [on_result] + FinishedWithError --> °°: calls [on_finished_with_error] + Worker --> °°°: task cancelled +``` + +### States + +Used internally for scheduling the task's execution: + +- `Scheduled`: triggered by `start` and creates a schedule for the task +- `SubmitTask`: decreases retry counter +- `Worker`: checks if enough workers slots are available (can refuse task), creates from `run` code and saves the result. +- `ErrorResult`: checks if it can reschedule the task or gives up +- `FinishedWIthError`: logs error, invokes `on_finished_with_error` and removes the schedule +- `DeferredResult`: invokes `on_result` and removes the schedule +- `ManuallyCancelled`: sends message to all instances to cancel. The instance handling the task will cancel the task and remove the schedule +""" + +from ._base_deferred_handler import ( + BaseDeferredHandler, + DeferredContext, + GlobalsContext, + StartContext, +) +from ._deferred_manager import DeferredManager +from ._models import TaskResultError, TaskUID + +__all__: tuple[str, ...] = ( + "BaseDeferredHandler", + "DeferredContext", + "DeferredManager", + "GlobalsContext", + "StartContext", + "TaskResultError", + "TaskUID", +) diff --git a/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py b/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py new file mode 100644 index 00000000000..3c5110ef8f8 --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_base_deferred_handler.py @@ -0,0 +1,99 @@ +from abc import ABC, abstractmethod +from datetime import timedelta +from typing import Any, ClassVar, Generic, TypeAlias, TypeVar + +from pydantic import NonNegativeInt + +from ._models import ClassUniqueReference, TaskResultError, TaskUID + +ResultType = TypeVar("ResultType") + +StartContext: TypeAlias = dict[str, Any] +GlobalsContext: TypeAlias = dict[str, Any] + +# composed by merging `GlobalsContext` and `StartContext` +DeferredContext: TypeAlias = dict[str, Any] + + +class BaseDeferredHandler(ABC, Generic[ResultType]): + """Base class to define a deferred task.""" + + _SUBCLASSES: ClassVar[list[type["BaseDeferredHandler"]]] = [] + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + BaseDeferredHandler._SUBCLASSES.append(cls) + + @classmethod + def _get_class_unique_reference(cls) -> ClassUniqueReference: + """ + returns: a unique reference for this class (module and name) + """ + return f"{cls.__module__}.{cls.__name__}" + + @classmethod + async def get_retries(cls, context: DeferredContext) -> NonNegativeInt: + """ + returns: the amount of retries in case of error (default: 0) + + This is used only when ``run`` raises an error other than `asyncio.CancelledError`` and this + value is > 0. The code inside ``run`` will be retried. + + NOTE: if the process running the ``run`` code dies, it automatically gets + retried when the process is restarted or by another copy of the service. + """ + assert context # nosec + return 0 + + @classmethod + @abstractmethod + async def get_timeout(cls, context: DeferredContext) -> timedelta: + """return the timeout for the execution of `run`. + If ``run`` does not finish executing in time a timeout exception will be raised + """ + + @classmethod + @abstractmethod + async def start(cls, **kwargs) -> StartContext: + """ + Used to start a deferred. + These values will be passed to ``run`` when it's ran. + """ + + @classmethod + async def on_created(cls, task_uid: TaskUID, context: DeferredContext) -> None: + """called after deferred was scheduled to run""" + + @classmethod + @abstractmethod + async def run(cls, context: DeferredContext) -> ResultType: + """code to be ran by a worker""" + + @classmethod + @abstractmethod + async def on_result(cls, result: ResultType, context: DeferredContext) -> None: + """called when ``run`` provided a successful result""" + + @classmethod + async def on_finished_with_error( + cls, error: TaskResultError, context: DeferredContext + ) -> None: + """ + called when ``run`` code raises an error + + NOTE: by design the default action is to do nothing + """ + + @classmethod + async def cancel(cls, task_uid: TaskUID) -> None: + """cancels a deferred""" + + @classmethod + @abstractmethod + async def is_present(cls, task_uid: TaskUID) -> bool: + """checks if deferred is still scheduled and has not finished + + Returns: + `True` while task execution is not finished + `False` if task is no longer present + """ diff --git a/packages/service-library/src/servicelib/deferred_tasks/_base_task_tracker.py b/packages/service-library/src/servicelib/deferred_tasks/_base_task_tracker.py new file mode 100644 index 00000000000..69b7ba3a4c8 --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_base_task_tracker.py @@ -0,0 +1,26 @@ +from abc import ABC, abstractmethod + +from ._models import TaskUID +from ._task_schedule import TaskScheduleModel + + +class BaseTaskTracker(ABC): + @abstractmethod + async def get_new_unique_identifier(self) -> TaskUID: + """provides a unique identifier for a new task""" + + @abstractmethod + async def get(self, task_uid: TaskUID) -> TaskScheduleModel | None: + """returns the given entry for provided task unique id""" + + @abstractmethod + async def save(self, task_uid: TaskUID, task_schedule: TaskScheduleModel) -> None: + """overwrites the entry at the given task unique id with the provided entry""" + + @abstractmethod + async def remove(self, task_uid: TaskUID) -> None: + """removes the entry for the provided task unique id""" + + @abstractmethod + async def all(self) -> list[TaskScheduleModel]: + """returns a list with all the currently existing entries""" diff --git a/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py b/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py new file mode 100644 index 00000000000..b49990a7834 --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_deferred_manager.py @@ -0,0 +1,627 @@ +import asyncio +import inspect +import logging +from collections.abc import Awaitable, Callable, Iterable +from datetime import timedelta +from enum import Enum +from typing import Any, Final + +import arrow +from faststream.exceptions import NackMessage, RejectMessage +from faststream.rabbit import ExchangeType, RabbitBroker, RabbitExchange, RabbitRouter +from pydantic import NonNegativeInt +from servicelib.logging_utils import log_catch, log_context +from servicelib.redis import RedisClientSDK +from settings_library.rabbit import RabbitSettings + +from ._base_deferred_handler import ( + BaseDeferredHandler, + DeferredContext, + GlobalsContext, + StartContext, +) +from ._base_task_tracker import BaseTaskTracker +from ._models import ( + ClassUniqueReference, + TaskResultCancelledError, + TaskResultError, + TaskResultSuccess, + TaskUID, +) +from ._redis_task_tracker import RedisTaskTracker +from ._task_schedule import TaskScheduleModel, TaskState +from ._utils import stop_retry_for_unintended_errors +from ._worker_tracker import WorkerTracker + +_logger = logging.getLogger(__name__) + +_DEFAULT_DEFERRED_MANAGER_WORKER_SLOTS: Final[NonNegativeInt] = 100 +_DEFAULT_DELAY_BEFORE_NACK: Final[timedelta] = timedelta(seconds=1) + + +class _FastStreamRabbitQueue(str, Enum): + SCHEDULED = "SCHEDULED" + SUBMIT_TASK = "SUBMIT_TASK" + WORKER = "WORKER" + + ERROR_RESULT = "ERROR_RESULT" + + FINISHED_WITH_ERROR = "FINISHED_WITH_ERROR" + DEFERRED_RESULT = "DEFERRED_RESULT" + MANUALLY_CANCELLED = "MANUALLY_CANCELLED" + + +def _get_queue_from_state(task_state: TaskState) -> _FastStreamRabbitQueue: + return _FastStreamRabbitQueue(task_state) + + +class _PatchStartDeferred: + def __init__( + self, + *, + class_unique_reference: ClassUniqueReference, + original_start: Callable[..., Awaitable[StartContext]], + manager_schedule_deferred: Callable[ + [ClassUniqueReference, StartContext], Awaitable[None] + ], + ): + self.class_unique_reference = class_unique_reference + self.original_start = original_start + self.manager_schedule_deferred = manager_schedule_deferred + + async def __call__(self, **kwargs) -> None: + result: StartContext = await self.original_start(**kwargs) + await self.manager_schedule_deferred(self.class_unique_reference, result) + + +class _PatchCancelDeferred: + def __init__( + self, + *, + original_cancel: Callable[[TaskUID], Awaitable[None]], + manager_cancel: Callable[[TaskUID], Awaitable[None]], + ) -> None: + self.original_cancel = original_cancel + self.manager_cancel = manager_cancel + + async def __call__(self, task_uid: TaskUID) -> None: + await self.manager_cancel(task_uid) + + +class _PatchIsPresent: + def __init__( + self, + *, + original_is_present: Callable[[TaskUID], Awaitable[bool]], + manager_is_present: Callable[[TaskUID], Awaitable[bool]], + ) -> None: + self.original_is_present = original_is_present + self.manager_is_present = manager_is_present + + async def __call__(self, task_uid: TaskUID) -> bool: + return await self.manager_is_present(task_uid) + + +def _log_state(task_state: TaskState, task_uid: TaskUID) -> None: + _logger.debug("Handling state '%s' for task_uid '%s'", task_state, task_uid) + + +def _raise_if_not_type(task_result: Any, expected_types: Iterable[type]) -> None: + if not isinstance(task_result, tuple(expected_types)): + msg = f"Unexpected '{task_result=}', should be one of {[x.__name__ for x in expected_types]}" + raise TypeError(msg) + + +class DeferredManager: # pylint:disable=too-many-instance-attributes + def __init__( + self, + rabbit_settings: RabbitSettings, + scheduler_redis_sdk: RedisClientSDK, + *, + globals_context: GlobalsContext, + max_workers: NonNegativeInt = _DEFAULT_DEFERRED_MANAGER_WORKER_SLOTS, + delay_when_requeuing_message: timedelta = _DEFAULT_DELAY_BEFORE_NACK, + ) -> None: + + self._task_tracker: BaseTaskTracker = RedisTaskTracker(scheduler_redis_sdk) + + self._worker_tracker = WorkerTracker(max_workers) + self.delay_when_requeuing_message = delay_when_requeuing_message + + self.globals_context = globals_context + + self._patched_deferred_handlers: dict[ + ClassUniqueReference, type[BaseDeferredHandler] + ] = {} + + self.broker: RabbitBroker = RabbitBroker( + rabbit_settings.dsn, log_level=logging.DEBUG + ) + self.router: RabbitRouter = RabbitRouter() + + # NOTE: do not move this to a function, must remain in constructor + # otherwise the calling_module will be this one instead of the actual one + calling_module = inspect.getmodule(inspect.stack()[1][0]) + assert calling_module # nosec + calling_module_name = calling_module.__name__ + + # NOTE: RabbitMQ queues and exchanges are prefix by this + self._global_resources_prefix = f"{calling_module_name}" + + self.common_exchange = RabbitExchange( + f"{self._global_resources_prefix}_common", type=ExchangeType.DIRECT + ) + self.cancellation_exchange = RabbitExchange( + f"{self._global_resources_prefix}_cancellation", type=ExchangeType.FANOUT + ) + + def patch_based_deferred_handlers(self) -> None: + """Allows subclasses of ``BaseDeferredHandler`` to be scheduled. + + NOTE: If a new subclass of ``BaseDeferredHandler`` was defined after + the call to ``Scheduler.setup()`` this should be called to allow for + scheduling. + """ + # pylint:disable=protected-access + for subclass in BaseDeferredHandler._SUBCLASSES: # noqa: SLF001 + class_unique_reference: ClassUniqueReference = ( + subclass._get_class_unique_reference() # noqa: SLF001 + ) + + if not isinstance(subclass.start, _PatchStartDeferred): + with log_context( + _logger, + logging.DEBUG, + f"Patch `start` for {class_unique_reference}", + ): + patched_start = _PatchStartDeferred( + class_unique_reference=class_unique_reference, + original_start=subclass.start, + manager_schedule_deferred=self.__start, + ) + subclass.start = patched_start # type: ignore + + if not isinstance(subclass.cancel, _PatchCancelDeferred): + with log_context( + _logger, + logging.DEBUG, + f"Patch `cancel` for {class_unique_reference}", + ): + patched_cancel = _PatchCancelDeferred( + original_cancel=subclass.cancel, + manager_cancel=self.__cancel, + ) + subclass.cancel = patched_cancel # type: ignore + + if not isinstance(subclass.is_present, _PatchIsPresent): + with log_context( + _logger, + logging.DEBUG, + f"Patch `is_present` for {class_unique_reference}", + ): + patched_is_present = _PatchIsPresent( + original_is_present=subclass.is_present, + manager_is_present=self.__is_present, + ) + subclass.is_present = patched_is_present # type: ignore + + self._patched_deferred_handlers[class_unique_reference] = subclass + + @classmethod + def un_patch_base_deferred_handlers(cls) -> None: + # pylint:disable=protected-access + for subclass in BaseDeferredHandler._SUBCLASSES: # noqa: SLF001 + class_unique_reference: ClassUniqueReference = ( + subclass._get_class_unique_reference() # noqa: SLF001 + ) + + if isinstance(subclass.start, _PatchStartDeferred): + with log_context( + _logger, + logging.DEBUG, + f"Remove `start` patch for {class_unique_reference}", + ): + subclass.start = subclass.start.original_start + + if isinstance(subclass.cancel, _PatchCancelDeferred): + with log_context( + _logger, + logging.DEBUG, + f"Remove `cancel` patch for {class_unique_reference}", + ): + subclass.cancel = ( # type: ignore + subclass.cancel.original_cancel # type: ignore + ) + + if isinstance(subclass.is_present, _PatchIsPresent): + with log_context( + _logger, + logging.DEBUG, + f"Remove `is_present` patch for {class_unique_reference}", + ): + subclass.is_present = ( # type: ignore + subclass.is_present.original_is_present # type: ignore + ) + + def _get_global_queue_name(self, queue_name: _FastStreamRabbitQueue) -> str: + return f"{self._global_resources_prefix}_{queue_name}" + + def __get_subclass( + self, class_unique_reference: ClassUniqueReference + ) -> type[BaseDeferredHandler]: + return self._patched_deferred_handlers[class_unique_reference] + + def __get_deferred_context(self, start_context: StartContext) -> DeferredContext: + return {**self.globals_context, **start_context} + + async def __publish_to_queue( + self, task_uid: TaskUID, queue: _FastStreamRabbitQueue + ) -> None: + await self.broker.publish( + task_uid, + queue=self._get_global_queue_name(queue), + exchange=( + self.cancellation_exchange + if queue == _FastStreamRabbitQueue.MANUALLY_CANCELLED + else self.common_exchange + ), + ) + + async def __start( + self, + class_unique_reference: ClassUniqueReference, + start_context: StartContext, + ) -> None: + """Assembles TaskSchedule stores it and starts the scheduling chain""" + # NOTE: this is used internally but triggered by when calling `BaseDeferredHandler.start` + + _logger.debug( + "Scheduling '%s' with payload '%s'", + class_unique_reference, + start_context, + ) + + task_uid = await self._task_tracker.get_new_unique_identifier() + subclass = self.__get_subclass(class_unique_reference) + deferred_context = self.__get_deferred_context(start_context) + + task_schedule = TaskScheduleModel( + timeout=await subclass.get_timeout(deferred_context), + execution_attempts=await subclass.get_retries(deferred_context) + 1, + class_unique_reference=class_unique_reference, + start_context=start_context, + state=TaskState.SCHEDULED, + ) + + with log_catch(_logger, reraise=False): + await subclass.on_created(task_uid, deferred_context) + + await self._task_tracker.save(task_uid, task_schedule) + _logger.debug("Scheduled task '%s' with entry: %s", task_uid, task_schedule) + await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.SCHEDULED) + + async def __get_task_schedule( + self, task_uid: TaskUID, *, expected_state: TaskState + ) -> TaskScheduleModel: + task_schedule = await self._task_tracker.get(task_uid) + + if task_schedule is None: + msg = f"Could not find a task_schedule for task_uid '{task_uid}'" + raise RuntimeError(msg) + + if ( + task_schedule.state != expected_state + and task_schedule.state == TaskState.MANUALLY_CANCELLED + ): + _logger.debug( + "Detected that task_uid '%s' was cancelled. Skipping processing of %s", + task_uid, + expected_state, + ) + # abandon message processing + raise RejectMessage + + if task_schedule.state != expected_state: + # NOTE: switching state is a two phase operation (commit to memory and trigger "next handler") + # if there is an interruption between committing to memory and triggering the "next handler" + # the old handler will be retried (this is by design and guarantees that the event chain is not interrupted) + # It is safe to skip this event handling and trigger the next one + + _logger.debug( + "Detected unexpected state '%s' for task '%s', should be: '%s'", + task_schedule.state, + task_uid, + expected_state, + ) + + await self.__publish_to_queue( + task_uid, _get_queue_from_state(task_schedule.state) + ) + raise RejectMessage + + return task_schedule + + @stop_retry_for_unintended_errors + async def _fs_handle_scheduled( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + + _log_state(TaskState.SCHEDULED, task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.SCHEDULED + ) + + task_schedule.state = TaskState.SUBMIT_TASK + await self._task_tracker.save(task_uid, task_schedule) + + await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.SUBMIT_TASK) + + @stop_retry_for_unintended_errors + async def _fs_handle_submit_task( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.SUBMIT_TASK, task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.SUBMIT_TASK + ) + task_schedule.execution_attempts -= 1 + task_schedule.state = TaskState.WORKER + await self._task_tracker.save(task_uid, task_schedule) + + await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.WORKER) + + @stop_retry_for_unintended_errors + async def _fs_handle_worker( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.WORKER, task_uid) + + if not self._worker_tracker.has_free_slots(): + # NOTE: puts the message back in rabbit for redelivery since this pool is currently busy + _logger.info("All workers in pool are busy, requeuing job for %s", task_uid) + # NOTE: due to a bug the message is resent to the same queue (same process) + # to avoid picking it up immediately add sme delay + # (for details see https://faststream.airt.ai/latest/rabbit/ack/#retries) + await asyncio.sleep(self.delay_when_requeuing_message.total_seconds()) + raise NackMessage + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.WORKER + ) + + async with self._worker_tracker: + with log_context( + _logger, + logging.DEBUG, + f"Worker handling task_uid '{task_uid}' for {task_schedule}", + ): + subclass = self.__get_subclass(task_schedule.class_unique_reference) + deferred_context = self.__get_deferred_context( + task_schedule.start_context + ) + task_schedule.result = await self._worker_tracker.handle_run( + subclass, task_uid, deferred_context, task_schedule.timeout + ) + + _logger.debug( + "Worker for task_uid '%s' produced result=%s", + task_uid, + f"{task_schedule.result}", + ) + + if isinstance(task_schedule.result, TaskResultSuccess): + task_schedule.state = TaskState.DEFERRED_RESULT + await self._task_tracker.save(task_uid, task_schedule) + await self.__publish_to_queue( + task_uid, _FastStreamRabbitQueue.DEFERRED_RESULT + ) + return + + if isinstance(task_schedule.result, TaskResultError | TaskResultCancelledError): + task_schedule.state = TaskState.ERROR_RESULT + await self._task_tracker.save(task_uid, task_schedule) + await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.ERROR_RESULT) + return + + msg = ( + f"Unexpected state, result type={type(task_schedule.result)} should be an instance " + f"of {TaskResultSuccess.__name__}, {TaskResultError.__name__} or {TaskResultCancelledError.__name__}" + ) + raise TypeError(msg) + + @stop_retry_for_unintended_errors + async def _fs_handle_error_result( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.ERROR_RESULT, task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.ERROR_RESULT + ) + _raise_if_not_type( + task_schedule.result, (TaskResultError, TaskResultCancelledError) + ) + + if task_schedule.execution_attempts > 0 and not isinstance( + task_schedule.result, TaskResultCancelledError + ): + _logger.debug("Schedule retry attempt for task_uid '%s'", task_uid) + # does not retry if task was cancelled + task_schedule.state = TaskState.SUBMIT_TASK + await self._task_tracker.save(task_uid, task_schedule) + await self.__publish_to_queue(task_uid, _FastStreamRabbitQueue.SUBMIT_TASK) + return + + task_schedule.state = TaskState.FINISHED_WITH_ERROR + await self._task_tracker.save(task_uid, task_schedule) + await self.__publish_to_queue( + task_uid, _FastStreamRabbitQueue.FINISHED_WITH_ERROR + ) + + async def __remove_task( + self, task_uid: TaskUID, task_schedule: TaskScheduleModel + ) -> None: + _logger.info( + "Finished handling of '%s' in %s", + task_schedule.class_unique_reference, + arrow.utcnow().datetime - task_schedule.time_started, + ) + _logger.debug("Removing task %s", task_uid) + await self._task_tracker.remove(task_uid) + + @stop_retry_for_unintended_errors + async def _fs_handle_finished_with_error( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.FINISHED_WITH_ERROR, task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.FINISHED_WITH_ERROR + ) + _raise_if_not_type( + task_schedule.result, (TaskResultError, TaskResultCancelledError) + ) + + if isinstance(task_schedule.result, TaskResultError): + _logger.error( + "Finished task_uid '%s' with error. See below for details.\n%s", + task_uid, + task_schedule.result.format_error(), + ) + subclass = self.__get_subclass(task_schedule.class_unique_reference) + deferred_context = self.__get_deferred_context(task_schedule.start_context) + with log_catch(_logger, reraise=False): + await subclass.on_finished_with_error( + task_schedule.result, deferred_context + ) + else: + _logger.debug("Task '%s' cancelled!", task_uid) + + await self.__remove_task(task_uid, task_schedule) + + @stop_retry_for_unintended_errors + async def _fs_handle_deferred_result( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.DEFERRED_RESULT, task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.DEFERRED_RESULT + ) + _raise_if_not_type(task_schedule.result, (TaskResultSuccess,)) + + subclass = self.__get_subclass(task_schedule.class_unique_reference) + deferred_context = self.__get_deferred_context(task_schedule.start_context) + assert isinstance(task_schedule.result, TaskResultSuccess) # nosec + + with log_catch(_logger, reraise=False): + await subclass.on_result(task_schedule.result.value, deferred_context) + + await self.__remove_task(task_uid, task_schedule) + + async def __cancel(self, task_uid: TaskUID) -> None: + task_schedule: TaskScheduleModel | None = await self._task_tracker.get(task_uid) + if task_schedule is None: + _logger.warning("No entry four to cancel found for task_uid '%s'", task_uid) + return + + _logger.info("Attempting to cancel task_uid '%s'", task_uid) + task_schedule.state = TaskState.MANUALLY_CANCELLED + await self._task_tracker.save(task_uid, task_schedule) + + await self.__publish_to_queue( + task_uid, _FastStreamRabbitQueue.MANUALLY_CANCELLED + ) + + @stop_retry_for_unintended_errors + async def _fs_handle_manually_cancelled( # pylint:disable=method-hidden + self, task_uid: TaskUID + ) -> None: + _log_state(TaskState.MANUALLY_CANCELLED, task_uid) + _logger.info("Attempting to cancel task_uid '%s'", task_uid) + + task_schedule = await self.__get_task_schedule( + task_uid, expected_state=TaskState.MANUALLY_CANCELLED + ) + + if task_schedule.state == TaskState.WORKER: + run_was_cancelled = self._worker_tracker.cancel_run(task_uid) + if not run_was_cancelled: + _logger.debug( + "Currently not handling task related to '%s'. Did not cancel it.", + task_uid, + ) + return + + _logger.info("Found and cancelled run for '%s'", task_uid) + await self.__remove_task(task_uid, task_schedule) + + async def __is_present(self, task_uid: TaskUID) -> bool: + task_schedule: TaskScheduleModel | None = await self._task_tracker.get(task_uid) + return task_schedule is not None + + def _register_subscribers(self) -> None: + # Registers subscribers at runtime instead of import time. + # Enables code reuse. + + # pylint:disable=unexpected-keyword-arg + # pylint:disable=no-value-for-parameter + self._fs_handle_scheduled = self.router.subscriber( + queue=self._get_global_queue_name(_FastStreamRabbitQueue.SCHEDULED), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_scheduled) + + self._fs_handle_submit_task = self.router.subscriber( + queue=self._get_global_queue_name(_FastStreamRabbitQueue.SUBMIT_TASK), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_submit_task) + + self._fs_handle_worker = self.router.subscriber( + queue=self._get_global_queue_name(_FastStreamRabbitQueue.WORKER), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_worker) + + self._fs_handle_error_result = self.router.subscriber( + queue=self._get_global_queue_name(_FastStreamRabbitQueue.ERROR_RESULT), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_error_result) + + self._fs_handle_finished_with_error = self.router.subscriber( + queue=self._get_global_queue_name( + _FastStreamRabbitQueue.FINISHED_WITH_ERROR + ), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_finished_with_error) + + self._fs_handle_deferred_result = self.router.subscriber( + queue=self._get_global_queue_name(_FastStreamRabbitQueue.DEFERRED_RESULT), + exchange=self.common_exchange, + retry=True, + )(self._fs_handle_deferred_result) + + self._fs_handle_manually_cancelled = self.router.subscriber( + queue=self._get_global_queue_name( + _FastStreamRabbitQueue.MANUALLY_CANCELLED + ), + exchange=self.cancellation_exchange, + retry=True, + )(self._fs_handle_manually_cancelled) + + async def setup(self) -> None: + self._register_subscribers() + self.broker.include_router(self.router) + + self.patch_based_deferred_handlers() + + await self.broker.start() + + async def shutdown(self) -> None: + self.un_patch_base_deferred_handlers() + await self.broker.close() diff --git a/packages/service-library/src/servicelib/deferred_tasks/_models.py b/packages/service-library/src/servicelib/deferred_tasks/_models.py new file mode 100644 index 00000000000..22c3ad20c87 --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_models.py @@ -0,0 +1,31 @@ +from typing import Any, Literal, TypeAlias + +from models_library.basic_types import IDStr +from pydantic import BaseModel + +TaskUID: TypeAlias = IDStr # Unique identifier provided by the TaskTracker +ClassUniqueReference: TypeAlias = str + + +class TaskResultSuccess(BaseModel): + result_type: Literal["success"] = "success" + value: Any + + +class TaskResultError(BaseModel): + result_type: Literal["error"] = "error" + # serialized error from the worker + error: str + str_traceback: str + + def format_error(self) -> str: + return f"Execution raised '{self.error}':\n{self.str_traceback}" + + +class TaskResultCancelledError(BaseModel): + result_type: Literal["cancelled"] = "cancelled" + + +TaskExecutionResult: TypeAlias = ( + TaskResultSuccess | TaskResultError | TaskResultCancelledError +) diff --git a/packages/service-library/src/servicelib/deferred_tasks/_redis_task_tracker.py b/packages/service-library/src/servicelib/deferred_tasks/_redis_task_tracker.py new file mode 100644 index 00000000000..bbe45ccc39a --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_redis_task_tracker.py @@ -0,0 +1,59 @@ +import pickle +from typing import Final +from uuid import uuid4 + +from models_library.basic_types import IDStr +from pydantic import NonNegativeInt + +from ..redis import RedisClientSDK +from ..utils import logged_gather +from ._base_task_tracker import BaseTaskTracker +from ._models import TaskUID +from ._task_schedule import TaskScheduleModel + +_TASK_TRACKER_PREFIX: Final[str] = "mm:" +_MAX_REDIS_CONCURRENCY: Final[NonNegativeInt] = 10 + + +def _get_key(task_uid: TaskUID) -> str: + return f"{_TASK_TRACKER_PREFIX}{task_uid}" + + +class RedisTaskTracker(BaseTaskTracker): + def __init__(self, redis_client_sdk: RedisClientSDK) -> None: + self.redis_client_sdk = redis_client_sdk + + async def get_new_unique_identifier(self) -> TaskUID: + candidate_already_exists = True + while candidate_already_exists: + candidate = IDStr(f"{uuid4()}") + candidate_already_exists = ( + await self.redis_client_sdk.redis.get(_get_key(candidate)) is not None + ) + return TaskUID(candidate) + + async def _get_raw(self, redis_key: str) -> TaskScheduleModel | None: + found_data = await self.redis_client_sdk.redis.get(redis_key) + return None if found_data is None else pickle.loads(found_data) # noqa: S301 + + async def get(self, task_uid: TaskUID) -> TaskScheduleModel | None: + return await self._get_raw(_get_key(task_uid)) + + async def save(self, task_uid: TaskUID, task_schedule: TaskScheduleModel) -> None: + await self.redis_client_sdk.redis.set( + _get_key(task_uid), pickle.dumps(task_schedule) + ) + + async def remove(self, task_uid: TaskUID) -> None: + await self.redis_client_sdk.redis.delete(_get_key(task_uid)) + + async def all(self) -> list[TaskScheduleModel]: + return await logged_gather( + *[ + self._get_raw(x) + async for x in self.redis_client_sdk.redis.scan_iter( + match=f"{_TASK_TRACKER_PREFIX}*" + ) + ], + max_concurrency=_MAX_REDIS_CONCURRENCY, + ) diff --git a/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py b/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py new file mode 100644 index 00000000000..5a88b99568b --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_task_schedule.py @@ -0,0 +1,59 @@ +from datetime import datetime, timedelta +from enum import Enum + +import arrow +from pydantic import BaseModel, Field, NonNegativeInt + +from ._base_deferred_handler import StartContext +from ._models import ClassUniqueReference, TaskExecutionResult + + +class TaskState(str, Enum): + # entrypoint state + SCHEDULED = "SCHEDULED" + + SUBMIT_TASK = "SUBMIT_TASK" + WORKER = "WORKER" + ERROR_RESULT = "ERROR_RESULT" + + # end states + DEFERRED_RESULT = "DEFERRED_RESULT" + FINISHED_WITH_ERROR = "FINISHED_WITH_ERROR" + MANUALLY_CANCELLED = "MANUALLY_CANCELLED" + + +class TaskScheduleModel(BaseModel): + timeout: timedelta = Field( + ..., description="Amount of time after which the task execution will time out" + ) + class_unique_reference: ClassUniqueReference = Field( + ..., + description="reference to the class containing the code and handlers for the execution of the task", + ) + start_context: StartContext = Field( + ..., + description="data used to assemble the ``StartContext``", + ) + + state: TaskState = Field( + ..., description="represents the execution step of the task" + ) + + execution_attempts: NonNegativeInt = Field( + ..., + description="remaining attempts to run the code, only retries if this is > 0", + ) + + time_started: datetime = Field( + default_factory=lambda: arrow.utcnow().datetime, + description="time when task schedule was created, used for statistics", + ) + + result: TaskExecutionResult | None = Field( + default=None, + description=( + f"Populated by {TaskState.WORKER}. It always has a value after worker handles it." + "Will be used " + ), + discriminator="result_type", + ) diff --git a/packages/service-library/src/servicelib/deferred_tasks/_utils.py b/packages/service-library/src/servicelib/deferred_tasks/_utils.py new file mode 100644 index 00000000000..b06c98329ad --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_utils.py @@ -0,0 +1,35 @@ +import functools +import logging + +from faststream.exceptions import FastStreamException, RejectMessage +from redis.exceptions import RedisError + +_logger = logging.getLogger(__name__) + + +def stop_retry_for_unintended_errors(func): + """ + Stops FastStream's retry chain when an unexpected error is raised (bug or otherwise). + This is especially important when the subscribers have ``retry=True``. + + Only propagate FastStream error that handle message acknowledgement. + """ + + @functools.wraps(func) + async def wrapper(*args, **kwargs): + try: + return await func(*args, **kwargs) + except Exception as e: + if isinstance(e, FastStreamException | RedisError): + # if there are issues with Redis or FastStream (core dependencies) + # message is always retried + raise + + msg = ( + "Error detected in user code. Aborting message retry. " + f"Please check code at: '{func.__module__}.{func.__name__}'" + ) + _logger.exception(msg) + raise RejectMessage from e + + return wrapper diff --git a/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py b/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py new file mode 100644 index 00000000000..bcf9ce5ec2a --- /dev/null +++ b/packages/service-library/src/servicelib/deferred_tasks/_worker_tracker.py @@ -0,0 +1,93 @@ +import asyncio +import traceback +from collections.abc import Coroutine +from datetime import timedelta +from typing import Any + +from pydantic import NonNegativeInt + +from ._base_deferred_handler import BaseDeferredHandler, DeferredContext +from ._models import ( + TaskExecutionResult, + TaskResultCancelledError, + TaskResultError, + TaskResultSuccess, + TaskUID, +) + + +def _format_exception(e: BaseException) -> str: + return f"{e.__class__.__module__}.{e.__class__.__name__}: {e}" + + +def _get_str_traceback(e: BaseException) -> str: + return "".join(traceback.format_tb(e.__traceback__)) + + +async def _get_task_with_timeout(coroutine: Coroutine, *, timeout: timedelta) -> Any: + return await asyncio.wait_for(coroutine, timeout=timeout.total_seconds()) + + +class WorkerTracker: + def __init__(self, max_worker_count: NonNegativeInt) -> None: + self._semaphore = asyncio.Semaphore(max_worker_count) + + self._tasks: dict[TaskUID, asyncio.Task] = {} + + def has_free_slots(self) -> bool: + return not self._semaphore.locked() + + async def handle_run( + self, + deferred_handler: type[BaseDeferredHandler], + task_uid: TaskUID, + deferred_context: DeferredContext, + timeout: timedelta, + ) -> TaskExecutionResult: + self._tasks[task_uid] = task = asyncio.create_task( + _get_task_with_timeout( + deferred_handler.run(deferred_context), timeout=timeout + ) + ) + + result_to_return: TaskExecutionResult + + try: + task_result = await task + result_to_return = TaskResultSuccess(value=task_result) + except asyncio.CancelledError: + result_to_return = TaskResultCancelledError() + except Exception as e: # pylint:disable=broad-exception-caught + result_to_return = TaskResultError( + error=_format_exception(e), + str_traceback=_get_str_traceback(e), + ) + + self._tasks.pop(task_uid, None) + + return result_to_return + + def cancel_run(self, task_uid: TaskUID) -> bool: + """Attempts to cancel the a task. + It is important to note that the task might not be running in this instance. + + returns: True if it could cancel the task + """ + # if an associated task exists it cancels it + task: asyncio.Task | None = self._tasks.get(task_uid, None) + if task: + # NOTE: there is no need to await the task after cancelling it. + # It is already awaited, by ``handle_run, which handles + # it's result in case of cancellation. + # As a side effect it produces a RuntimeWarning coroutine: '...' was never awaited + # which cannot be suppressed + task.cancel() + return True + return False + + async def __aenter__(self) -> "WorkerTracker": + await self._semaphore.acquire() + return self + + async def __aexit__(self, *args): + self._semaphore.release() diff --git a/packages/service-library/src/servicelib/docker_constants.py b/packages/service-library/src/servicelib/docker_constants.py index 407458ab927..0f9695c55e8 100644 --- a/packages/service-library/src/servicelib/docker_constants.py +++ b/packages/service-library/src/servicelib/docker_constants.py @@ -7,6 +7,6 @@ # NOTE: since a user inside the docker-compose spec can define # their own networks, this name tries to be as unique as possible # NOTE: length is 11 character. When running -# `docker-compose up`, the network will result in having a 53 +# `docker compose up`, the network will result in having a 53 # character prefix in front. Max allowed network name is 64. DEFAULT_USER_SERVICES_NETWORK_NAME: Final[str] = "back----end" diff --git a/packages/service-library/src/servicelib/docker_utils.py b/packages/service-library/src/servicelib/docker_utils.py index c4d9ab81118..552a6d93604 100644 --- a/packages/service-library/src/servicelib/docker_utils.py +++ b/packages/service-library/src/servicelib/docker_utils.py @@ -1,13 +1,326 @@ +import asyncio +import logging +from collections.abc import Awaitable, Callable +from contextlib import AsyncExitStack +from dataclasses import dataclass from datetime import datetime +from functools import cached_property +from typing import Any, Final, Literal +import aiodocker import arrow +from models_library.docker import DockerGenericTag +from models_library.generated_models.docker_rest_api import ProgressDetail +from models_library.utils.change_case import snake_to_camel +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + NonNegativeInt, + TypeAdapter, + ValidationError, +) +from settings_library.docker_registry import RegistrySettings +from tenacity import ( + retry, + retry_if_exception_type, + stop_after_attempt, + wait_random_exponential, +) +from yarl import URL + +from .logging_utils import LogLevelInt +from .progress_bar import ProgressBarData + +_logger = logging.getLogger(__name__) def to_datetime(docker_timestamp: str) -> datetime: # docker follows RFC3339Nano timestamp which is based on ISO 8601 # https://medium.easyread.co/understanding-about-rfc-3339-for-datetime-formatting-in-software-engineering-940aa5d5f68a - ## This is acceptable in ISO 8601 and RFC 3339 (with T) + # This is acceptable in ISO 8601 and RFC 3339 (with T) # 2019-10-12T07:20:50.52Z # This is only accepted in RFC 3339 (without T) # 2019-10-12 07:20:50.52Z - return arrow.get(docker_timestamp).datetime + dt: datetime = arrow.get(docker_timestamp).datetime + return dt + + +LogCB = Callable[[str, LogLevelInt], Awaitable[None]] + + +class DockerLayerSizeV2(BaseModel): + media_type: str + size: ByteSize + digest: str + model_config = ConfigDict( + frozen=True, + alias_generator=snake_to_camel, + populate_by_name=True, + ) + + +class DockerImageManifestsV2(BaseModel): + schema_version: Literal[2] + media_type: str + config: DockerLayerSizeV2 + layers: list[DockerLayerSizeV2] + model_config = ConfigDict( + ignored_types=(cached_property,), + frozen=True, + alias_generator=snake_to_camel, + populate_by_name=True, + ) + + @cached_property + def layers_total_size(self) -> ByteSize: + return TypeAdapter(ByteSize).validate_python( + sum(layer.size for layer in self.layers) + ) + + +class DockerImageMultiArchManifestsV2(BaseModel): + schema_version: Literal[2] + media_type: Literal["application/vnd.oci.image.index.v1+json"] + manifests: list[dict[str, Any]] + model_config = ConfigDict( + frozen=True, + alias_generator=snake_to_camel, + populate_by_name=True, + ) + + +class _DockerPullImage(BaseModel): + status: str + id: str | None = None + progress_detail: ProgressDetail | None = None + progress: str | None = None + model_config = ConfigDict( + frozen=True, + alias_generator=snake_to_camel, + populate_by_name=True, + ) + + +DOCKER_HUB_HOST: Final[str] = "registry-1.docker.io" + + +def _create_docker_hub_complete_url(image: DockerGenericTag) -> URL: + if len(image.split("/")) == 1: + # official image, add library + return URL(f"https://{DOCKER_HUB_HOST}/library/{image}") + return URL(f"https://{DOCKER_HUB_HOST}/{image}") + + +def get_image_complete_url( + image: DockerGenericTag, registry_settings: RegistrySettings +) -> URL: + if registry_settings.REGISTRY_URL and registry_settings.REGISTRY_URL in image: + # this is an image available in the private registry + return URL(f"http{'s' if registry_settings.REGISTRY_AUTH else ''}://{image}") + + # this is an external image, like nginx:latest or library/nginx:latest or quay.io/stuff, ... -> https + try: + # NOTE: entries like nginx:latest or ngingx:1.3 will raise an exception here + url = URL(f"https://{image}") + assert url.host # nosec + if not url.port or ("." not in f"{url.host}"): + # this is Dockerhub + official images are in /library + url = _create_docker_hub_complete_url(image) + except ValueError: + # this is Dockerhub with missing host + url = _create_docker_hub_complete_url(image) + return url + + +def get_image_name_and_tag(image_complete_url: URL) -> tuple[str, str]: + if "sha256" in f"{image_complete_url}": + parts = image_complete_url.path.split("@") + else: + parts = image_complete_url.path.split(":") + return parts[0].strip("/"), parts[1] + + +@dataclass +class _PulledStatus: + size: int + downloaded: int = 0 + extracted: int = 0 + + +async def _parse_pull_information( + parsed_progress: _DockerPullImage, *, layer_id_to_size: dict[str, _PulledStatus] +): + match parsed_progress.status.lower(): + case progress_status if any( + msg in progress_status + for msg in [ + "pulling from", + "pulling fs layer", + "waiting", + "digest: ", + ] + ): + # nothing to do here + pass + case "downloading": + assert parsed_progress.id # nosec + assert parsed_progress.progress_detail # nosec + assert parsed_progress.progress_detail.current # nosec + + layer_id_to_size.setdefault( + parsed_progress.id, + _PulledStatus(parsed_progress.progress_detail.total or 0), + ).downloaded = parsed_progress.progress_detail.current + case "verifying checksum" | "download complete": + assert parsed_progress.id # nosec + layer_id_to_size.setdefault( + parsed_progress.id, _PulledStatus(0) + ).downloaded = layer_id_to_size.setdefault( + parsed_progress.id, _PulledStatus(0) + ).size + case "extracting": + assert parsed_progress.id # nosec + assert parsed_progress.progress_detail # nosec + assert parsed_progress.progress_detail.current # nosec + layer_id_to_size.setdefault( + parsed_progress.id, + _PulledStatus(parsed_progress.progress_detail.total or 0), + ).extracted = parsed_progress.progress_detail.current + case "pull complete": + assert parsed_progress.id # nosec + layer_id_to_size.setdefault( + parsed_progress.id, _PulledStatus(0) + ).extracted = layer_id_to_size[parsed_progress.id].size + case "already exists": + assert parsed_progress.id # nosec + layer_id_to_size.setdefault( + parsed_progress.id, _PulledStatus(0) + ).extracted = layer_id_to_size[parsed_progress.id].size + layer_id_to_size.setdefault( + parsed_progress.id, _PulledStatus(0) + ).downloaded = layer_id_to_size[parsed_progress.id].size + case progress_status if any( + msg in progress_status + for msg in [ + "status: downloaded newer image for ", + "status: image is up to date for ", + ] + ): + for layer_pull_status in layer_id_to_size.values(): + layer_pull_status.downloaded = layer_pull_status.size + layer_pull_status.extracted = layer_pull_status.size + case _: + _logger.warning( + "unknown pull state: %s. Please check", + f"{parsed_progress=}", + ) + + +async def pull_image( + image: DockerGenericTag, + registry_settings: RegistrySettings, + progress_bar: ProgressBarData, + log_cb: LogCB, + image_information: DockerImageManifestsV2 | None, + *, + retry_upon_error_count: NonNegativeInt = 10, +) -> None: + """pull a docker image to the host machine. + + + Arguments: + image -- the docker image to pull + registry_settings -- registry settings + progress_bar -- the current progress bar + log_cb -- a callback function to send logs to + image_information -- the image layer information. If this is None, then no fine progress will be retrieved. + retry_upon_error_count -- number of tries if there is a TimeoutError. Usually cased by networking issues. + """ + + registry_auth = None + if registry_settings.REGISTRY_URL and registry_settings.REGISTRY_URL in image: + registry_auth = { + "username": registry_settings.REGISTRY_USER, + "password": registry_settings.REGISTRY_PW.get_secret_value(), + } + image_short_name = image.split("/")[-1] + layer_id_to_size: dict[str, _PulledStatus] = {} + async with AsyncExitStack() as exit_stack: + # NOTE: docker pulls an image layer by layer + # NOTE: each layer is first downloaded, then extracted. Extraction usually takes about 2/3 of the time + # NOTE: so we compute the layer size x3 (1x for downloading, 2x for extracting) + if image_information: + layer_id_to_size = { + layer.digest.removeprefix("sha256:")[:12]: _PulledStatus(layer.size) + for layer in image_information.layers + } + else: + _logger.warning( + "pulling image without layer information for %s. Progress will be approximative. TIP: check why this happens", + f"{image=}", + ) + + client = await exit_stack.enter_async_context(aiodocker.Docker()) + + def _reset_progress_from_previous_attempt() -> None: + for pulled_status in layer_id_to_size.values(): + pulled_status.downloaded = 0 + pulled_status.extracted = 0 + + attempt: NonNegativeInt = 1 + + @retry( + wait=wait_random_exponential(), + stop=stop_after_attempt(retry_upon_error_count), + reraise=True, + retry=retry_if_exception_type(asyncio.TimeoutError), + ) + async def _pull_image_with_retry() -> None: + nonlocal attempt + if attempt > 1: + # for each attempt rest the progress + progress_bar.reset() + _reset_progress_from_previous_attempt() + attempt += 1 + + _logger.info("attempt '%s' trying to pull image='%s'", attempt, image) + + reported_progress = 0.0 + async for pull_progress in client.images.pull( + image, stream=True, auth=registry_auth + ): + try: + parsed_progress = TypeAdapter(_DockerPullImage).validate_python( + pull_progress + ) + except ValidationError: + _logger.exception( + "Unexpected error while validating '%s'. " + "TIP: This is probably an unforeseen pull status text that shall be added to the code. " + "The pulling process will still continue.", + f"{pull_progress=}", + ) + else: + await _parse_pull_information( + parsed_progress, layer_id_to_size=layer_id_to_size + ) + + # compute total progress + total_downloaded_size = sum( + layer.downloaded for layer in layer_id_to_size.values() + ) + total_extracted_size = sum( + layer.extracted for layer in layer_id_to_size.values() + ) + total_progress = (total_downloaded_size + total_extracted_size) / 2.0 + progress_to_report = total_progress - reported_progress + await progress_bar.update(progress_to_report) + reported_progress = total_progress + + await log_cb( + f"pulling {image_short_name}: {pull_progress}...", + logging.DEBUG, + ) + + await _pull_image_with_retry() diff --git a/packages/service-library/src/servicelib/error_codes.py b/packages/service-library/src/servicelib/error_codes.py deleted file mode 100644 index a87ed97e32d..00000000000 --- a/packages/service-library/src/servicelib/error_codes.py +++ /dev/null @@ -1,31 +0,0 @@ -""" osparc ERROR CODES (OEC) - Unique identifier of an exception instance - Intended to report a user about unexpected errors. - Unexpected exceptions can be traced by matching the - logged error code with that appeneded to the user-friendly message - -SEE test_error_codes for some use cases -""" - - -import re -from typing import TYPE_CHECKING - -from pydantic.tools import parse_obj_as -from pydantic.types import constr - -_LABEL = "OEC:{}" -_PATTERN = r"OEC:\d+" - -if TYPE_CHECKING: - ErrorCodeStr = str -else: - ErrorCodeStr = constr(strip_whitespace=True, regex=_PATTERN) - - -def create_error_code(exception: Exception) -> ErrorCodeStr: - return parse_obj_as(ErrorCodeStr, _LABEL.format(id(exception))) - - -def parse_error_code(obj) -> set[ErrorCodeStr]: - return set(re.findall(_PATTERN, f"{obj}")) diff --git a/packages/service-library/src/servicelib/exception_utils.py b/packages/service-library/src/servicelib/exception_utils.py index 0a11ab87040..2de33fd98e6 100644 --- a/packages/service-library/src/servicelib/exception_utils.py +++ b/packages/service-library/src/servicelib/exception_utils.py @@ -1,10 +1,15 @@ +import inspect import logging +from collections.abc import Callable from datetime import datetime -from typing import Optional +from functools import wraps +from typing import Any, Final, ParamSpec, TypeVar from pydantic import BaseModel, Field, NonNegativeFloat, PrivateAttr -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + +_SKIPS_MESSAGE: Final[str] = "skip(s) of exception" class DelayedExceptionHandler(BaseModel): @@ -36,7 +41,7 @@ class DelayedExceptionHandler(BaseModel): delayed_handler_external_service.else_reset() """ - _first_exception_skip: Optional[datetime] = PrivateAttr(None) + _first_exception_skip: datetime | None = PrivateAttr(None) _failure_counter: int = PrivateAttr(0) delay_for: NonNegativeFloat = Field( @@ -57,10 +62,42 @@ def try_to_raise(self, exception: BaseException) -> None: ).total_seconds() > self.delay_for: raise exception - # ignore if exception inside delay window - log.warning("%s skip(s) of exception: %s", self._failure_counter, exception) + _logger.debug("%s %s: %s", self._failure_counter, _SKIPS_MESSAGE, exception) def else_reset(self) -> None: """error no longer occurs reset tracking""" self._first_exception_skip = None self._failure_counter = 0 + + +P = ParamSpec("P") +R = TypeVar("R") + +F = TypeVar("F", bound=Callable[..., Any]) + + +def silence_exceptions(exceptions: tuple[type[BaseException], ...]) -> Callable[[F], F]: + def _decorator(func_or_coro: F) -> F: + + if inspect.iscoroutinefunction(func_or_coro): + + @wraps(func_or_coro) + async def _async_wrapper(*args, **kwargs) -> Any: + try: + assert inspect.iscoroutinefunction(func_or_coro) # nosec + return await func_or_coro(*args, **kwargs) + except exceptions: + return None + + return _async_wrapper # type: ignore[return-value] # decorators typing is hard + + @wraps(func_or_coro) + def _sync_wrapper(*args, **kwargs) -> Any: + try: + return func_or_coro(*args, **kwargs) + except exceptions: + return None + + return _sync_wrapper # type: ignore[return-value] # decorators typing is hard + + return _decorator diff --git a/packages/service-library/src/servicelib/fastapi/app_state.py b/packages/service-library/src/servicelib/fastapi/app_state.py new file mode 100644 index 00000000000..79e2bea4123 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/app_state.py @@ -0,0 +1,36 @@ +from typing import TypeVar + +from fastapi import FastAPI + +T = TypeVar("T", bound="SingletonInAppStateMixin") + + +class SingletonInAppStateMixin: + """ + Mixin to get, set and delete an instance of 'self' from/to app.state + """ + + app_state_name: str # Name used in app.state.$(app_state_name) + frozen: bool = True # Will raise if set multiple times + + @classmethod + def get_from_app_state(cls: type[T], app: FastAPI) -> T: + return getattr(app.state, cls.app_state_name) # type:ignore[no-any-return] + + def set_to_app_state(self, app: FastAPI): + if (exists := getattr(app.state, self.app_state_name, None)) and self.frozen: + msg = f"An instance of {type(self)} already in app.state.{self.app_state_name}={exists}" + raise ValueError(msg) + + setattr(app.state, self.app_state_name, self) + return self.get_from_app_state(app) + + @classmethod + def pop_from_app_state(cls: type[T], app: FastAPI) -> T: + """ + Raises: + AttributeError: if instance is not in app.state + """ + old = cls.get_from_app_state(app) + delattr(app.state, cls.app_state_name) + return old diff --git a/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py b/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py new file mode 100644 index 00000000000..8116869af5d --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/cancellation_middleware.py @@ -0,0 +1,80 @@ +import asyncio +import logging +from typing import NoReturn + +from starlette.requests import Request +from starlette.types import ASGIApp, Message, Receive, Scope, Send + +from ..logging_utils import log_context + +_logger = logging.getLogger(__name__) + + +class _TerminateTaskGroupError(Exception): + pass + + +async def _message_poller( + request: Request, queue: asyncio.Queue, receive: Receive +) -> NoReturn: + while True: + message = await receive() + if message["type"] == "http.disconnect": + _logger.debug( + "client disconnected, terminating request to %s!", request.url + ) + raise _TerminateTaskGroupError + + # Puts the message in the queue + await queue.put(message) + + +async def _handler( + app: ASGIApp, scope: Scope, queue: asyncio.Queue[Message], send: Send +) -> None: + return await app(scope, queue.get, send) + + +class RequestCancellationMiddleware: + """ASGI Middleware to cancel server requests in case of client disconnection. + Reason: FastAPI-based (e.g. starlette) servers do not automatically cancel + server requests in case of client disconnection. This middleware will cancel + the server request in case of client disconnection via asyncio.CancelledError. + + WARNING: FastAPI BackgroundTasks will also get cancelled. Use with care. + TIP: use asyncio.Task in that case + """ + + def __init__(self, app: ASGIApp) -> None: + self.app = app + _logger.warning( + "CancellationMiddleware is in use, in case of client disconection, " + "FastAPI BackgroundTasks will be cancelled too!", + ) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": + await self.app(scope, receive, send) + return + + # Let's make a shared queue for the request messages + queue: asyncio.Queue[Message] = asyncio.Queue() + request = Request(scope) + + with log_context(_logger, logging.DEBUG, f"cancellable request {request.url}"): + try: + async with asyncio.TaskGroup() as tg: + handler_task = tg.create_task( + _handler(self.app, scope, queue, send) + ) + poller_task = tg.create_task( + _message_poller(request, queue, receive) + ) + await handler_task + poller_task.cancel() + except* _TerminateTaskGroupError: + if not handler_task.done(): + _logger.info( + "The client disconnected. request to %s was cancelled.", + request.url, + ) diff --git a/packages/service-library/src/servicelib/fastapi/client_session.py b/packages/service-library/src/servicelib/fastapi/client_session.py new file mode 100644 index 00000000000..b92dcc2d525 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/client_session.py @@ -0,0 +1,33 @@ +import datetime + +import httpx +from fastapi import FastAPI + + +def setup_client_session( + app: FastAPI, + *, + default_timeout: datetime.timedelta = datetime.timedelta(seconds=20), + max_keepalive_connections: int = 20 +) -> None: + async def on_startup() -> None: + session = httpx.AsyncClient( + transport=httpx.AsyncHTTPTransport(http2=True), + limits=httpx.Limits(max_keepalive_connections=max_keepalive_connections), + timeout=default_timeout.total_seconds(), + ) + app.state.aiohttp_client_session = session + + async def on_shutdown() -> None: + session = app.state.aiohttp_client_session + assert isinstance(session, httpx.AsyncClient) # nosec + await session.aclose() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_client_session(app: FastAPI) -> httpx.AsyncClient: + session = app.state.aiohttp_client_session + assert isinstance(session, httpx.AsyncClient) # nosec + return session diff --git a/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py b/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py new file mode 100644 index 00000000000..8f472dc9b51 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/db_asyncpg_engine.py @@ -0,0 +1,47 @@ +import logging +import warnings + +from fastapi import FastAPI +from settings_library.postgres import PostgresSettings +from simcore_postgres_database.utils_aiosqlalchemy import ( # type: ignore[import-not-found] # this on is unclear + get_pg_engine_stateinfo, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..db_asyncpg_utils import create_async_engine_and_database_ready +from ..logging_utils import log_context + +_logger = logging.getLogger(__name__) + + +async def connect_to_db(app: FastAPI, settings: PostgresSettings) -> None: + warnings.warn( + "The 'connect_to_db' function is deprecated and will be removed in a future release. " + "Please use 'postgres_lifespan' instead for managing the database connection lifecycle.", + DeprecationWarning, + stacklevel=2, + ) + + with log_context( + _logger, + logging.DEBUG, + f"Connecting and migraging {settings.dsn_with_async_sqlalchemy}", + ): + engine = await create_async_engine_and_database_ready(settings) + + app.state.engine = engine + _logger.debug( + "Setup engine: %s", + await get_pg_engine_stateinfo(engine), + ) + + +async def close_db_connection(app: FastAPI) -> None: + with log_context(_logger, logging.DEBUG, f"db disconnect of {app.state.engine}"): + if engine := app.state.engine: + await engine.dispose() + + +def get_engine(app: FastAPI) -> AsyncEngine: + assert isinstance(app.state.engine, AsyncEngine) # nosec + return app.state.engine diff --git a/packages/service-library/src/servicelib/fastapi/dependencies.py b/packages/service-library/src/servicelib/fastapi/dependencies.py index 824da86fb4b..78550d3fd27 100644 --- a/packages/service-library/src/servicelib/fastapi/dependencies.py +++ b/packages/service-library/src/servicelib/fastapi/dependencies.py @@ -1,13 +1,20 @@ """ Common utils for api/dependencies """ -from typing import Any, Callable +from collections.abc import Callable +from typing import Any, cast -from fastapi import Request +from fastapi import FastAPI, Request -def get_reverse_url_mapper(request: Request) -> Callable: - def reverse_url_mapper(name: str, **path_params: Any) -> str: - return request.url_for(name, **path_params) +def get_app(request: Request) -> FastAPI: + return cast(FastAPI, request.app) - return reverse_url_mapper + +def get_reverse_url_mapper(request: Request) -> Callable[..., str]: + def _url_for(name: str, **path_params: Any) -> str: + # Analogous to https://docs.aiohttp.org/en/stable/web_quickstart.html#reverse-url-constructing-using-named-resources + url: str = f"{request.url_for(name, **path_params)}" + return url + + return _url_for diff --git a/packages/service-library/src/servicelib/fastapi/docker.py b/packages/service-library/src/servicelib/fastapi/docker.py new file mode 100644 index 00000000000..2f3694d472f --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/docker.py @@ -0,0 +1,75 @@ +import asyncio +import logging +from collections.abc import AsyncIterator +from contextlib import AsyncExitStack +from typing import Final + +import aiodocker +import aiohttp +import tenacity +from aiohttp import ClientSession +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from pydantic import NonNegativeInt +from settings_library.docker_api_proxy import DockerApiProxysettings + +_logger = logging.getLogger(__name__) + +_DEFAULT_DOCKER_API_PROXY_HEALTH_TIMEOUT: Final[NonNegativeInt] = 5 + + +_DOCKER_API_PROXY_SETTINGS: Final[str] = "docker_api_proxy_settings" + + +def create_remote_docker_client_input_state(settings: DockerApiProxysettings) -> State: + return {_DOCKER_API_PROXY_SETTINGS: settings} + + +async def remote_docker_client_lifespan( + app: FastAPI, state: State +) -> AsyncIterator[State]: + settings: DockerApiProxysettings = state[_DOCKER_API_PROXY_SETTINGS] + + async with AsyncExitStack() as exit_stack: + session = await exit_stack.enter_async_context( + ClientSession( + auth=aiohttp.BasicAuth( + login=settings.DOCKER_API_PROXY_USER, + password=settings.DOCKER_API_PROXY_PASSWORD.get_secret_value(), + ) + ) + ) + + app.state.remote_docker_client = await exit_stack.enter_async_context( + aiodocker.Docker(url=settings.base_url, session=session) + ) + + await wait_till_docker_api_proxy_is_responsive(app) + + # NOTE this has to be inside exit_stack scope + yield {} + + +@tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay(60), + before_sleep=tenacity.before_sleep_log(_logger, logging.WARNING), + reraise=True, +) +async def wait_till_docker_api_proxy_is_responsive(app: FastAPI) -> None: + await is_docker_api_proxy_ready(app) + + +async def is_docker_api_proxy_ready( + app: FastAPI, *, timeout=_DEFAULT_DOCKER_API_PROXY_HEALTH_TIMEOUT # noqa: ASYNC109 +) -> bool: + try: + await asyncio.wait_for(get_remote_docker_client(app).version(), timeout=timeout) + except (aiodocker.DockerError, TimeoutError): + return False + return True + + +def get_remote_docker_client(app: FastAPI) -> aiodocker.Docker: + assert isinstance(app.state.remote_docker_client, aiodocker.Docker) # nosec + return app.state.remote_docker_client diff --git a/packages/service-library/src/servicelib/fastapi/docker_utils.py b/packages/service-library/src/servicelib/fastapi/docker_utils.py new file mode 100644 index 00000000000..20900916963 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/docker_utils.py @@ -0,0 +1,151 @@ +import asyncio +import logging +from typing import Final + +import httpx +from models_library.docker import DockerGenericTag +from pydantic import ByteSize, TypeAdapter, ValidationError +from settings_library.docker_registry import RegistrySettings +from yarl import URL + +from ..aiohttp import status +from ..docker_utils import ( + DOCKER_HUB_HOST, + DockerImageManifestsV2, + DockerImageMultiArchManifestsV2, + LogCB, + get_image_complete_url, + get_image_name_and_tag, + pull_image, +) +from ..logging_utils import log_catch +from ..progress_bar import AsyncReportCB, ProgressBarData + +_DEFAULT_MIN_IMAGE_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python( + "200MiB" +) + +_logger = logging.getLogger(__name__) + + +async def retrieve_image_layer_information( + image: DockerGenericTag, registry_settings: RegistrySettings +) -> DockerImageManifestsV2 | None: + with log_catch(_logger, reraise=False): + async with httpx.AsyncClient() as client: + image_complete_url = get_image_complete_url(image, registry_settings) + auth = None + if registry_settings.REGISTRY_URL in f"{image_complete_url}": + auth = httpx.BasicAuth( + username=registry_settings.REGISTRY_USER, + password=registry_settings.REGISTRY_PW.get_secret_value(), + ) + # NOTE: either of type ubuntu:latest or ubuntu@sha256:lksfdjlskfjsldkfj + docker_image_name, docker_image_tag = get_image_name_and_tag( + image_complete_url + ) + manifest_url = image_complete_url.with_path( + f"v2/{docker_image_name}/manifests/{docker_image_tag}" + ) + + headers = { + "Accept": "application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json" + } + if DOCKER_HUB_HOST in f"{image_complete_url}": + # we need the docker hub bearer code (https://stackoverflow.com/questions/57316115/get-manifest-of-a-public-docker-image-hosted-on-docker-hub-using-the-docker-regi) + bearer_url = URL("https://auth.docker.io/token").with_query( + { + "service": "registry.docker.io", + "scope": f"repository:{docker_image_name}:pull", + } + ) + response = await client.get(f"{bearer_url}") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK # nosec + bearer_code = response.json()["token"] + headers |= { + "Authorization": f"Bearer {bearer_code}", + } + + response = await client.get(f"{manifest_url}", headers=headers, auth=auth) + # Check if the request was successful + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK # nosec + + # if the image has multiple architectures + json_response = response.json() + try: + multi_arch_manifests = TypeAdapter( + DockerImageMultiArchManifestsV2 + ).validate_python(json_response) + # find the correct platform + digest = "" + for manifest in multi_arch_manifests.manifests: + if ( + manifest.get("platform", {}).get("architecture") == "amd64" + and manifest.get("platform", {}).get("os") == "linux" + ): + digest = manifest["digest"] + break + manifest_url = image_complete_url.with_path( + f"v2/{docker_image_name}/manifests/{digest}" + ) + response = await client.get( + f"{manifest_url}", headers=headers, auth=auth + ) + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK # nosec + json_response = response.json() + return TypeAdapter(DockerImageManifestsV2).validate_python( + json_response + ) + + except ValidationError: + return TypeAdapter(DockerImageManifestsV2).validate_python( + json_response + ) + return None + + +async def pull_images( + images: set[DockerGenericTag], + registry_settings: RegistrySettings, + progress_cb: AsyncReportCB, + log_cb: LogCB, +) -> None: + images_layer_information = await asyncio.gather( + *[ + retrieve_image_layer_information(image, registry_settings) + for image in images + ] + ) + images_total_size = sum( + i.layers_total_size if i else _DEFAULT_MIN_IMAGE_SIZE + for i in images_layer_information + ) + + async with ProgressBarData( + num_steps=images_total_size, + progress_report_cb=progress_cb, + progress_unit="Byte", + description=f"pulling {len(images)} images", + ) as pbar: + + await asyncio.gather( + *[ + pull_image( + image, + registry_settings, + pbar, + log_cb, + ( + image_layer_info + if isinstance(image_layer_info, DockerImageManifestsV2) + else None + ), + ) + for image, image_layer_info in zip( + images, images_layer_information, strict=True + ) + ] + ) diff --git a/packages/service-library/src/servicelib/fastapi/errors.py b/packages/service-library/src/servicelib/fastapi/errors.py new file mode 100644 index 00000000000..139ed573fbe --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/errors.py @@ -0,0 +1,9 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class ApplicationRuntimeError(OsparcErrorMixin, RuntimeError): + pass + + +class ApplicationStateError(ApplicationRuntimeError): + msg_template: str = "Invalid app.state.{state}: {msg}" diff --git a/packages/service-library/src/servicelib/fastapi/exceptions_utils.py b/packages/service-library/src/servicelib/fastapi/exceptions_utils.py new file mode 100644 index 00000000000..bd5f18448b1 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/exceptions_utils.py @@ -0,0 +1,44 @@ +import logging + +from fastapi import HTTPException, Request, status +from fastapi.encoders import jsonable_encoder +from fastapi.responses import JSONResponse +from models_library.api_schemas__common.errors import DefaultApiError + +_logger = logging.getLogger(__name__) + + +# NOTE: https://www.starlette.io/exceptions/ +# Handled exceptions **do not represent error cases** ! +# - They are coerced into appropriate HTTP responses, which are then sent through the standard middleware stack. +# - By default the HTTPException class is used to manage any handled exceptions. + + +async def http_exception_as_json_response( + request: Request, exc: Exception +) -> JSONResponse: + assert isinstance(exc, HTTPException) # nosec + assert request # nosec + + error = DefaultApiError.from_status_code(exc.status_code) + + error_detail = error.detail or "" + if exc.detail not in error_detail: + # starlette.exceptions.HTTPException default to similar detail + error.detail = exc.detail + + return JSONResponse( + jsonable_encoder(error, exclude_none=True), status_code=exc.status_code + ) + + +async def handle_errors_as_500(request: Request, exc: Exception) -> JSONResponse: + assert request # nosec + assert isinstance(exc, Exception) # nosec + + error = DefaultApiError.from_status_code(status.HTTP_500_INTERNAL_SERVER_ERROR) + _logger.exception("Unhandled exeption responded as %s", error) + return JSONResponse( + jsonable_encoder(error, exclude_none=True), + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/packages/service-library/src/servicelib/fastapi/http_client.py b/packages/service-library/src/servicelib/fastapi/http_client.py new file mode 100644 index 00000000000..9c431dee71e --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/http_client.py @@ -0,0 +1,83 @@ +import contextlib +import logging +from abc import ABC, abstractmethod + +import httpx +from fastapi import FastAPI +from models_library.healthchecks import IsNonResponsive, IsResponsive, LivenessResult + +from ..logging_utils import log_context + +_logger = logging.getLogger(__name__) + + +class HasClientInterface(ABC): + @property + @abstractmethod + def client(self) -> httpx.AsyncClient: + ... + + +class HasClientSetupInterface(ABC): + @abstractmethod + async def setup_client(self) -> None: + ... + + @abstractmethod + async def teardown_client(self) -> None: + ... + + +class BaseHTTPApi(HasClientSetupInterface): + def __init__(self, client: httpx.AsyncClient): + self._client = client + # Controls all resources lifespan in sync + self._exit_stack: contextlib.AsyncExitStack = contextlib.AsyncExitStack() + + @classmethod + def from_client_kwargs(cls, **kwargs): + return cls(client=httpx.AsyncClient(**kwargs)) + + @property + def client(self) -> httpx.AsyncClient: + return self._client + + async def setup_client(self) -> None: + with log_context(_logger, logging.INFO, "setup client"): + await self._exit_stack.enter_async_context(self.client) + + async def teardown_client(self) -> None: + with log_context(_logger, logging.INFO, "teardown client"): + await self._exit_stack.aclose() + + +class AttachLifespanMixin(HasClientSetupInterface): + def attach_lifespan_to(self, app: FastAPI) -> None: + app.add_event_handler("startup", self.setup_client) + app.add_event_handler("shutdown", self.teardown_client) + + +class HealthMixinMixin(HasClientInterface): + async def ping(self) -> bool: + """Check whether server is reachable""" + try: + await self.client.get("/") + return True + except httpx.RequestError: + return False + + async def is_healthy(self) -> bool: + """Service is reachable and ready""" + try: + response = await self.client.get("/") + response.raise_for_status() + return True + except httpx.HTTPError: + return False + + async def check_liveness(self) -> LivenessResult: + try: + response = await self.client.get("/") + return IsResponsive(elapsed=response.elapsed) + except httpx.RequestError as err: + return IsNonResponsive(reason=f"{err}") diff --git a/packages/service-library/src/servicelib/fastapi/http_client_thin.py b/packages/service-library/src/servicelib/fastapi/http_client_thin.py new file mode 100644 index 00000000000..e4806f88bcf --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/http_client_thin.py @@ -0,0 +1,237 @@ +import asyncio +import functools +import inspect +import logging +from collections.abc import Awaitable, Callable +from typing import Any + +from common_library.errors_classes import OsparcErrorMixin +from httpx import AsyncClient, ConnectError, HTTPError, PoolTimeout, Response +from httpx._types import TimeoutTypes, URLTypes +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from settings_library.tracing import TracingSettings +from tenacity import RetryCallState +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_exponential + +from .http_client import BaseHTTPApi + +_logger = logging.getLogger(__name__) + + +""" +Exception hierarchy: + +* BaseClientError + x BaseRequestError + + ClientHttpError + + UnexpectedStatusError +""" + + +class BaseClientError(OsparcErrorMixin, Exception): + """Used as based for all the raised errors""" + + msg_template: str = "{message}" + + +class BaseHttpClientError(BaseClientError): + """Base class to wrap all http related client errors""" + + +class ClientHttpError(BaseHttpClientError): + """used to captures all httpx.HttpError""" + + msg_template: str = "Received httpx.HTTPError: {error}" + + +class UnexpectedStatusError(BaseHttpClientError): + """raised when the status of the request is not the one it was expected""" + + msg_template: str = ( + "Expected status: {expecting}, got {response.status_code} for: {response.url}: " + "headers={response.headers}, body='{response.text}'" + ) + + +def _log_pool_status(client: AsyncClient, event_name: str) -> None: + # pylint: disable=protected-access + pool = client._transport._pool # type: ignore[attr-defined] # noqa: SLF001 + _logger.warning( + "Pool status @ '%s': requests(%s)=%s, connections(%s)=%s", + event_name.upper(), + len(pool._requests), # noqa: SLF001 + [ + (id(r), r.request.method, r.request.url, r.request.headers) + for r in pool._requests # noqa: SLF001 + ], + len(pool.connections), + [(id(c), c.__dict__) for c in pool.connections], + ) + + +def _after_log(log: logging.Logger) -> Callable[[RetryCallState], None]: + def log_it(retry_state: RetryCallState) -> None: + # pylint: disable=protected-access + + assert retry_state.outcome # nosec + e = retry_state.outcome.exception() + assert isinstance(e, HTTPError) # nosec + log.error( + "Request timed-out after %s attempts with an unexpected error: '%s':%s", + retry_state.attempt_number, + f"{e.request=}", + f"{e=}", + ) + + return log_it + + +def _assert_public_interface( + obj: object, extra_allowed_method_names: set[str] | None = None +) -> None: + # makes sure all user public defined methods return `httpx.Response` + + _allowed_names: set[str] = { + "setup_client", + "teardown_client", + "from_client_kwargs", + } + if extra_allowed_method_names: + _allowed_names |= extra_allowed_method_names + + public_methods = [ + t[1] + for t in inspect.getmembers(obj, predicate=inspect.ismethod) + if not (t[0].startswith("_") or t[0] in _allowed_names) + ] + + for method in public_methods: + signature = inspect.signature(method) + assert signature.return_annotation == Response, ( + f"{method=} should return an instance " + f"of {Response}, not '{signature.return_annotation}'!" + ) + + +def retry_on_errors( + total_retry_timeout_overwrite: float | None = None, +) -> Callable[..., Callable[..., Awaitable[Response]]]: + """ + Will retry the request on `ConnectError` and `PoolTimeout`. + Also wraps `httpx.HTTPError` + raises: + - `ClientHttpError` + """ + + def decorator( + request_func: Callable[..., Awaitable[Response]] + ) -> Callable[..., Awaitable[Response]]: + assert asyncio.iscoroutinefunction(request_func) + + @functools.wraps(request_func) + async def request_wrapper(zelf: "BaseThinClient", *args, **kwargs) -> Response: + # pylint: disable=protected-access + try: + async for attempt in AsyncRetrying( + stop=stop_after_delay( + total_retry_timeout_overwrite + if total_retry_timeout_overwrite + else zelf.total_retry_interval + ), + wait=wait_exponential(min=1), + retry=retry_if_exception_type((ConnectError, PoolTimeout)), + before_sleep=before_sleep_log(_logger, logging.WARNING), + after=_after_log(_logger), + reraise=True, + ): + with attempt: + return await request_func(zelf, *args, **kwargs) + except HTTPError as e: + if isinstance(e, PoolTimeout): + _log_pool_status(zelf.client, "pool timeout") + raise ClientHttpError(error=e) from e + + # NOTE: this satisfies mypy, both exceptions from retrial are HTTPError + msg = "Unexpected error!" + raise NotImplementedError(msg) + + return request_wrapper + + return decorator + + +def expect_status( + expected_code: int, +) -> Callable[..., Callable[..., Awaitable[Response]]]: + """ + raises an `UnexpectedStatusError` if the request's status is different + from `expected_code` + NOTE: always apply after `retry_on_errors` + + raises: + - `UnexpectedStatusError` + - `ClientHttpError` + """ + + def decorator( + request_func: Callable[..., Awaitable[Response]] + ) -> Callable[..., Awaitable[Response]]: + assert asyncio.iscoroutinefunction(request_func) + + @functools.wraps(request_func) + async def request_wrapper(zelf: "BaseThinClient", *args, **kwargs) -> Response: + response = await request_func(zelf, *args, **kwargs) + if response.status_code != expected_code: + raise UnexpectedStatusError(response=response, expecting=expected_code) + + return response + + return request_wrapper + + return decorator + + +class BaseThinClient(BaseHTTPApi): + def __init__( + self, + *, + total_retry_interval: float, + tracing_settings: TracingSettings | None, + base_url: URLTypes | None = None, + default_http_client_timeout: TimeoutTypes | None = None, + extra_allowed_method_names: set[str] | None = None, + ) -> None: + _assert_public_interface(self, extra_allowed_method_names) + + self.total_retry_interval: float = total_retry_interval + + client_args: dict[str, Any] = { + # NOTE: the default httpx pool limit configurations look good + # https://www.python-httpx.org/advanced/#pool-limit-configuration + # instruct the remote uvicorn web server to close the connections + # https://www.uvicorn.org/server-behavior/#http-headers + "headers": { + "Connection": "Close", + } + } + if base_url: + client_args["base_url"] = base_url + if default_http_client_timeout: + client_args["timeout"] = default_http_client_timeout + + client = AsyncClient(**client_args) + if tracing_settings: + setup_httpx_client_tracing(client) + super().__init__(client=client) + + async def __aenter__(self): + await self.setup_client() + return self + + async def __aexit__(self, *args): + _log_pool_status(self.client, "before close") + await self.teardown_client() diff --git a/packages/service-library/src/servicelib/fastapi/http_error.py b/packages/service-library/src/servicelib/fastapi/http_error.py new file mode 100644 index 00000000000..2cc9814dc8f --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/http_error.py @@ -0,0 +1,133 @@ +import logging +from collections.abc import Awaitable, Callable +from typing import TypeVar + +from fastapi import FastAPI, HTTPException, status +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import RequestValidationError +from fastapi.openapi.constants import REF_PREFIX +from fastapi.openapi.utils import validation_error_response_definition +from fastapi.requests import Request +from fastapi.responses import JSONResponse +from pydantic import ValidationError + +from ..logging_errors import create_troubleshotting_log_kwargs +from ..status_codes_utils import is_5xx_server_error + +validation_error_response_definition["properties"] = { + "errors": { + "title": "Validation errors", + "type": "array", + "items": {"$ref": f"{REF_PREFIX}ValidationError"}, + }, +} + + +TException = TypeVar("TException") + +_logger = logging.getLogger(__name__) + + +def make_http_error_handler_for_exception( + status_code: int, + exception_cls: type[TException], + *, + envelope_error: bool, + error_extractor: Callable[[TException], list[str]] | None = None, +) -> Callable[[Request, Exception], Awaitable[JSONResponse]]: + """ + Produces a handler for BaseException-type exceptions which converts them + into an error JSON response with a given status code + + SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions + """ + + async def _http_error_handler(request: Request, exc: Exception) -> JSONResponse: + assert isinstance(exc, exception_cls) # nosec + error_content = { + "errors": error_extractor(exc) if error_extractor else [f"{exc}"] + } + + if is_5xx_server_error(status_code): + _logger.exception( + create_troubleshotting_log_kwargs( + "Unexpected error happened in the Resource Usage Tracker. Please contact support.", + error=exc, + error_context={ + "request": request, + "request.method": f"{request.method}", + }, + ) + ) + + return JSONResponse( + content=jsonable_encoder( + {"error": error_content} if envelope_error else error_content + ), + status_code=status_code, + ) + + return _http_error_handler + + +def _request_validation_error_extractor( + validation_error: RequestValidationError, +) -> list[str]: + return [f"{e}" for e in validation_error.errors()] + + +def _make_default_http_error_handler( + *, envelope_error: bool +) -> Callable[[Request, Exception], Awaitable[JSONResponse]]: + async def _http_error_handler(_: Request, exc: Exception) -> JSONResponse: + assert isinstance(exc, HTTPException) + + error_content = {"errors": [exc.detail]} + + return JSONResponse( + content=jsonable_encoder( + {"error": error_content} if envelope_error else error_content + ), + status_code=exc.status_code, + ) + + return _http_error_handler + + +def set_app_default_http_error_handlers(app: FastAPI) -> None: + app.add_exception_handler( + HTTPException, _make_default_http_error_handler(envelope_error=True) + ) + + app.add_exception_handler( + RequestValidationError, + make_http_error_handler_for_exception( + status.HTTP_422_UNPROCESSABLE_ENTITY, + RequestValidationError, + envelope_error=True, + error_extractor=_request_validation_error_extractor, + ), + ) + + app.add_exception_handler( + ValidationError, + make_http_error_handler_for_exception( + status.HTTP_500_INTERNAL_SERVER_ERROR, + ValidationError, + envelope_error=True, + ), + ) + + # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy + app.add_exception_handler( + NotImplementedError, + make_http_error_handler_for_exception( + status.HTTP_501_NOT_IMPLEMENTED, NotImplementedError, envelope_error=True + ), + ) + app.add_exception_handler( + Exception, + make_http_error_handler_for_exception( + status.HTTP_500_INTERNAL_SERVER_ERROR, Exception, envelope_error=True + ), + ) diff --git a/packages/service-library/src/servicelib/fastapi/httpx_utils.py b/packages/service-library/src/servicelib/fastapi/httpx_utils.py new file mode 100644 index 00000000000..6018a9d01cd --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/httpx_utils.py @@ -0,0 +1,82 @@ +import httpx + +from ..utils_secrets import mask_sensitive_data + + +def _get_headers_safely(request: httpx.Request) -> dict[str, str]: + return mask_sensitive_data(dict(request.headers)) + + +def to_httpx_command( + request: httpx.Request, *, use_short_options: bool = True, multiline: bool = False +) -> str: + """Command with httpx CLI + + $ httpx --help + + NOTE: Particularly handy as an alternative to curl (e.g. when docker exec in osparc containers) + SEE https://www.python-httpx.org/ + """ + cmd = [ + "httpx", + ] + + # -m, --method METHOD + cmd.append(f'{"-m" if use_short_options else "--method"} {request.method}') + + # -c, --content TEXT Byte content to include in the request body. + if content := request.read().decode(): + cmd.append(f'{"-c" if use_short_options else "--content"} \'{content}\'') + + # -h, --headers ... Include additional HTTP headers in the request. + if headers := _get_headers_safely(request): + cmd.extend( + [ + f'{"-h" if use_short_options else "--headers"} "{name}" "{value}"' + for name, value in headers.items() + ] + ) + + cmd.append(f"{request.url}") + separator = " \\\n" if multiline else " " + return separator.join(cmd) + + +def to_curl_command( + request: httpx.Request, *, use_short_options: bool = True, multiline: bool = False +) -> str: + """Composes a curl command from a given request + + $ curl --help + + NOTE: Handy reproduce a request in a separate terminal (e.g. debugging) + """ + # Adapted from https://github.com/marcuxyz/curlify2/blob/master/curlify2/curlify.py + cmd = [ + "curl", + ] + + # https://curl.se/docs/manpage.html#-X + # -X, --request {method} + cmd.append(f'{"-X" if use_short_options else "--request"} {request.method}') + + # https://curl.se/docs/manpage.html#-H + # H, --header
Pass custom header(s) to server + if headers := _get_headers_safely(request): + cmd.extend( + [ + f'{"-H" if use_short_options else "--header"} "{k}: {v}"' + for k, v in headers.items() + ] + ) + + # https://curl.se/docs/manpage.html#-d + # -d, --data HTTP POST data + if body := request.read().decode(): + _d = "-d" if use_short_options else "--data" + cmd.append(f"{_d} '{body}'") + + cmd.append(f"{request.url}") + + separator = " \\\n" if multiline else " " + return separator.join(cmd) diff --git a/packages/service-library/src/servicelib/fastapi/lifespan_utils.py b/packages/service-library/src/servicelib/fastapi/lifespan_utils.py new file mode 100644 index 00000000000..4ccf0410930 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/lifespan_utils.py @@ -0,0 +1,77 @@ +import contextlib +from collections.abc import Iterator +from typing import Final + +from common_library.errors_classes import OsparcErrorMixin +from fastapi import FastAPI +from fastapi_lifespan_manager import State + +from ..logging_utils import log_context + + +class LifespanError(OsparcErrorMixin, RuntimeError): ... + + +class LifespanOnStartupError(LifespanError): + msg_template = "Failed during startup of {lifespan_name}" + + +class LifespanOnShutdownError(LifespanError): + msg_template = "Failed during shutdown of {lifespan_name}" + + +class LifespanAlreadyCalledError(LifespanError): + msg_template = "The lifespan '{lifespan_name}' has already been called." + + +class LifespanExpectedCalledError(LifespanError): + msg_template = "The lifespan '{lifespan_name}' was not called. Ensure it is properly configured and invoked." + + +_CALLED_LIFESPANS_KEY: Final[str] = "_CALLED_LIFESPANS" + + +def is_lifespan_called(state: State, lifespan_name: str) -> bool: + # NOTE: This assert is meant to catch a common mistake: + # The `lifespan` function should accept up to two *optional* positional arguments: (app: FastAPI, state: State). + # Valid signatures include: `()`, `(app)`, `(app, state)`, or even `(_, state)`. + # It's easy to accidentally swap or misplace these arguments. + assert not isinstance( # nosec + state, FastAPI + ), "Did you swap arguments? `lifespan(app, state)` expects (app: FastAPI, state: State)" + + called_lifespans = state.get(_CALLED_LIFESPANS_KEY, set()) + return lifespan_name in called_lifespans + + +def mark_lifespace_called(state: State, lifespan_name: str) -> State: + """Validates if a lifespan has already been called and records it in the state. + Raises LifespanAlreadyCalledError if the lifespan has already been called. + """ + if is_lifespan_called(state, lifespan_name): + raise LifespanAlreadyCalledError(lifespan_name=lifespan_name) + + called_lifespans = state.get(_CALLED_LIFESPANS_KEY, set()) + called_lifespans.add(lifespan_name) + return {_CALLED_LIFESPANS_KEY: called_lifespans} + + +def ensure_lifespan_called(state: State, lifespan_name: str) -> None: + """Ensures that a lifespan has been called. + Raises LifespanNotCalledError if the lifespan has not been called. + """ + if not is_lifespan_called(state, lifespan_name): + raise LifespanExpectedCalledError(lifespan_name=lifespan_name) + + +@contextlib.contextmanager +def lifespan_context( + logger, level, lifespan_name: str, state: State +) -> Iterator[State]: + """Helper context manager to log lifespan event and mark lifespan as called.""" + + with log_context(logger, level, lifespan_name): + # Check if lifespan has already been called + called_state = mark_lifespace_called(state, lifespan_name) + + yield called_state diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py index 3424d0c878c..a00c2417e49 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_client.py @@ -2,13 +2,13 @@ import functools import logging import warnings -from typing import Any, Awaitable, Callable, Final, Optional +from typing import Any, Awaitable, Callable, Final from fastapi import FastAPI, status from httpx import AsyncClient, HTTPError -from pydantic import AnyHttpUrl, PositiveFloat, parse_obj_as +from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter from tenacity import RetryCallState -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt from tenacity.wait import wait_exponential @@ -23,6 +23,7 @@ DEFAULT_HTTP_REQUESTS_TIMEOUT: Final[PositiveFloat] = 15 + logger = logging.getLogger(__name__) @@ -40,11 +41,7 @@ def log_it(retry_state: "RetryCallState") -> None: if retry_state.outcome.failed: ex = retry_state.outcome.exception() verb, value = "raised", f"{ex.__class__.__name__}: {ex}" - - if exc_info: - local_exc_info = retry_state.outcome.exception() - else: - local_exc_info = False + local_exc_info = exc_info else: verb, value = "returned", retry_state.outcome.result() local_exc_info = False # exc_info does not apply when no exception @@ -108,6 +105,9 @@ async def request_wrapper(zelf: "Client", *args, **kwargs) -> Any: with attempt: return await request_func(zelf, *args, **kwargs) + msg = "Unexpected" + raise RuntimeError(msg) + return request_wrapper @@ -117,7 +117,7 @@ class Client: status, result and/or cancel of a long running task. """ - def __init__(self, app: FastAPI, async_client: AsyncClient, base_url: AnyHttpUrl): + def __init__(self, app: FastAPI, async_client: AsyncClient, base_url: str): """ `app`: used byt the `Client` to recover the `ClientConfiguration` `async_client`: an AsyncClient instance used by `Client` @@ -129,17 +129,17 @@ def __init__(self, app: FastAPI, async_client: AsyncClient, base_url: AnyHttpUrl @property def _client_configuration(self) -> ClientConfiguration: - return self.app.state.long_running_client_configuration + output: ClientConfiguration = self.app.state.long_running_client_configuration + return output - def _get_url(self, path: str) -> AnyHttpUrl: - return parse_obj_as( - AnyHttpUrl, - f"{self._base_url}{self._client_configuration.router_prefix}{path}", - ) + def _get_url(self, path: str) -> str: + url_path = f"{self._client_configuration.router_prefix}{path}".lstrip("/") + url = TypeAdapter(AnyHttpUrl).validate_python(f"{self._base_url}{url_path}") + return f"{url}" @retry_on_http_errors async def get_task_status( - self, task_id: TaskId, *, timeout: Optional[PositiveFloat] = None + self, task_id: TaskId, *, timeout: PositiveFloat | None = None # noqa: ASYNC109 ) -> TaskStatus: timeout = timeout or self._client_configuration.default_timeout result = await self._async_client.get( @@ -154,12 +154,12 @@ async def get_task_status( body=result.text, ) - return TaskStatus.parse_obj(result.json()) + return TaskStatus.model_validate(result.json()) @retry_on_http_errors async def get_task_result( - self, task_id: TaskId, *, timeout: Optional[PositiveFloat] = None - ) -> Optional[Any]: + self, task_id: TaskId, *, timeout: PositiveFloat | None = None # noqa: ASYNC109 + ) -> Any | None: timeout = timeout or self._client_configuration.default_timeout result = await self._async_client.get( self._get_url(f"/task/{task_id}/result"), @@ -173,14 +173,14 @@ async def get_task_result( body=result.text, ) - task_result = TaskResult.parse_obj(result.json()) + task_result = TaskResult.model_validate(result.json()) if task_result.error is not None: raise TaskClientResultError(message=task_result.error) return task_result.result @retry_on_http_errors async def cancel_and_delete_task( - self, task_id: TaskId, *, timeout: Optional[PositiveFloat] = None + self, task_id: TaskId, *, timeout: PositiveFloat | None = None # noqa: ASYNC109 ) -> None: timeout = timeout or self._client_configuration.default_timeout result = await self._async_client.delete( diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py index 42b6c54f991..1b0f47449b3 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_context_manager.py @@ -1,7 +1,7 @@ import asyncio from asyncio.log import logger from contextlib import asynccontextmanager -from typing import Any, AsyncIterator, Final, Optional +from typing import Any, AsyncIterator, Final from pydantic import PositiveFloat @@ -30,18 +30,18 @@ class _ProgressManager: def __init__( self, - update_callback: Optional[ProgressCallback], + update_callback: ProgressCallback | None, ) -> None: self._callback = update_callback - self._last_message: Optional[ProgressMessage] = None - self._last_percent: Optional[ProgressPercent] = None + self._last_message: ProgressMessage | None = None + self._last_percent: ProgressPercent | None = None async def update( self, task_id: TaskId, *, - message: Optional[ProgressMessage] = None, - percent: Optional[ProgressPercent] = None, + message: ProgressMessage | None = None, + percent: ProgressPercent | None = None, ) -> None: if self._callback is None: return @@ -56,7 +56,11 @@ async def update( has_changes = True if has_changes: - await self._callback(self._last_message, self._last_percent, task_id) + await self._callback( + self._last_message or "", + self._last_percent, + task_id, + ) @asynccontextmanager @@ -65,9 +69,9 @@ async def periodic_task_result( task_id: TaskId, *, task_timeout: PositiveFloat, - progress_callback: Optional[ProgressCallback] = None, + progress_callback: ProgressCallback | None = None, status_poll_interval: PositiveFloat = 5, -) -> AsyncIterator[Optional[Any]]: +) -> AsyncIterator[Any | None]: """ A convenient wrapper around the Client. Polls for results and returns them once available. @@ -92,7 +96,7 @@ async def periodic_task_result( async def _status_update() -> TaskStatus: task_status: TaskStatus = await client.get_task_status(task_id) - logger.debug("Task status %s", task_status.json()) + logger.debug("Task status %s", task_status.model_dump_json()) await progress_manager.update( task_id=task_id, message=task_status.task_progress.message, @@ -100,21 +104,20 @@ async def _status_update() -> TaskStatus: ) return task_status - async def _wait_task_completion() -> None: + async def _wait_for_task_result() -> Any: task_status = await _status_update() while not task_status.done: await asyncio.sleep(status_poll_interval) task_status = await _status_update() - try: - await asyncio.wait_for(_wait_task_completion(), timeout=task_timeout) - - result: Optional[Any] = await client.get_task_result(task_id) + return await client.get_task_result(task_id) + try: + result = await asyncio.wait_for(_wait_for_task_result(), timeout=task_timeout) logger.debug("%s, %s", f"{task_id=}", f"{result=}") yield result - except asyncio.TimeoutError as e: + except TimeoutError as e: await client.cancel_and_delete_task(task_id) raise TaskClientTimeoutError( task_id=task_id, diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py index a79565fa7b0..937ddcf33d1 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_dependencies.py @@ -4,4 +4,5 @@ def get_tasks_manager(request: Request) -> TasksManager: - return request.app.state.long_running_task_manager + output: TasksManager = request.app.state.long_running_task_manager + return output diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py index ffb8ad23dec..e5f1ef7d9ee 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_error_handlers.py @@ -1,3 +1,5 @@ +import logging + from fastapi import status from fastapi.encoders import jsonable_encoder from starlette.requests import Request @@ -9,10 +11,13 @@ TaskNotFoundError, ) +_logger = logging.getLogger(__name__) + async def base_long_running_error_handler( _: Request, exception: BaseLongRunningError ) -> JSONResponse: + _logger.debug("%s", exception, stack_info=True) error_fields = dict(code=exception.code, message=f"{exception}") status_code = ( status.HTTP_404_NOT_FOUND diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py index 80d6fb5ceab..260a9e9d0b3 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_routes.py @@ -1,4 +1,6 @@ -from fastapi import APIRouter, Depends, Request, status +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, Query, Request, status from ...long_running_tasks._errors import TaskNotCompletedError, TaskNotFoundError from ...long_running_tasks._models import TaskGet, TaskId, TaskResult, TaskStatus @@ -12,7 +14,7 @@ @router.get("", response_model=list[TaskGet]) @cancel_on_disconnect async def list_tasks( - request: Request, tasks_manager: TasksManager = Depends(get_tasks_manager) + request: Request, tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)] ) -> list[TaskGet]: assert request # nosec return [ @@ -29,6 +31,7 @@ async def list_tasks( @router.get( "/{task_id}", + response_model=TaskStatus, responses={ status.HTTP_404_NOT_FOUND: {"description": "Task does not exist"}, }, @@ -37,7 +40,7 @@ async def list_tasks( async def get_task_status( request: Request, task_id: TaskId, - tasks_manager: TasksManager = Depends(get_tasks_manager), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], ) -> TaskStatus: assert request # nosec return tasks_manager.get_task_status(task_id=task_id, with_task_context=None) @@ -56,12 +59,17 @@ async def get_task_status( async def get_task_result( request: Request, task_id: TaskId, - tasks_manager: TasksManager = Depends(get_tasks_manager), -) -> TaskResult: + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + *, + return_exception: Annotated[bool, Query()] = False, +) -> TaskResult | Any: assert request # nosec # TODO: refactor this to use same as in https://github.com/ITISFoundation/osparc-simcore/issues/3265 try: - task_result = tasks_manager.get_task_result_old(task_id=task_id) + if return_exception: + task_result = tasks_manager.get_task_result(task_id, with_task_context=None) + else: + task_result = tasks_manager.get_task_result_old(task_id=task_id) await tasks_manager.remove_task( task_id, with_task_context=None, reraise_errors=False ) @@ -89,7 +97,7 @@ async def get_task_result( async def cancel_and_delete_task( request: Request, task_id: TaskId, - tasks_manager: TasksManager = Depends(get_tasks_manager), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], ) -> None: assert request # nosec await tasks_manager.remove_task(task_id, with_task_context=None) diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py index c5d7429f01a..e8306b6d187 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/_server.py @@ -50,4 +50,4 @@ async def on_shutdown() -> None: # add error handlers # NOTE: Exception handler can not be added during the on_startup script, otherwise not working correctly - app.add_exception_handler(BaseLongRunningError, base_long_running_error_handler) + app.add_exception_handler(BaseLongRunningError, base_long_running_error_handler) # type: ignore[arg-type] diff --git a/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py b/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py index 53ad39da916..c82bde0fe4e 100644 --- a/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py +++ b/packages/service-library/src/servicelib/fastapi/long_running_tasks/client.py @@ -2,28 +2,163 @@ Provides a convenient way to return the result given a TaskId. """ +import asyncio +import logging +from collections.abc import AsyncGenerator +from typing import Any + +import httpx +from fastapi import status +from models_library.api_schemas_long_running_tasks.base import TaskProgress +from models_library.api_schemas_long_running_tasks.tasks import TaskGet, TaskStatus +from tenacity import ( + AsyncRetrying, + TryAgain, + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_delay, + wait_random_exponential, +) +from yarl import URL + +from ...long_running_tasks._constants import DEFAULT_POLL_INTERVAL_S, HOUR from ...long_running_tasks._errors import TaskClientResultError from ...long_running_tasks._models import ( ClientConfiguration, + LRTask, ProgressCallback, ProgressMessage, ProgressPercent, + RequestBody, ) from ...long_running_tasks._task import TaskId, TaskResult +from ...rest_responses import unwrap_envelope_if_required from ._client import DEFAULT_HTTP_REQUESTS_TIMEOUT, Client, setup from ._context_manager import periodic_task_result +_logger = logging.getLogger(__name__) + + +_DEFAULT_FASTAPI_RETRY_POLICY: dict[str, Any] = { + "retry": retry_if_exception_type(httpx.RequestError), + "wait": wait_random_exponential(max=20), + "stop": stop_after_delay(60), + "reraise": True, + "before_sleep": before_sleep_log(_logger, logging.INFO), +} + + +@retry(**_DEFAULT_FASTAPI_RETRY_POLICY) +async def _start( + session: httpx.AsyncClient, url: URL, json: RequestBody | None +) -> TaskGet: + response = await session.post(f"{url}", json=json) + response.raise_for_status() + data = unwrap_envelope_if_required(response.json()) + return TaskGet.model_validate(data) + + +@retry(**_DEFAULT_FASTAPI_RETRY_POLICY) +async def _wait_for_completion( + session: httpx.AsyncClient, + task_id: TaskId, + status_url: URL, + client_timeout: int, +) -> AsyncGenerator[TaskProgress, None]: + try: + async for attempt in AsyncRetrying( + stop=stop_after_delay(client_timeout), + reraise=True, + retry=retry_if_exception_type(TryAgain), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + ): + with attempt: + response = await session.get(f"{status_url}") + response.raise_for_status() + data = unwrap_envelope_if_required(response.json()) + task_status = TaskStatus.model_validate(data) + + yield task_status.task_progress + if not task_status.done: + await asyncio.sleep( + float( + response.headers.get("retry-after", DEFAULT_POLL_INTERVAL_S) + ) + ) + msg = f"{task_id=}, {task_status.started=} has status: '{task_status.task_progress.message}' {task_status.task_progress.percent}%" + raise TryAgain(msg) # noqa: TRY301 + + except TryAgain as exc: + # this is a timeout + msg = f"Long running task {task_id}, calling to {status_url} timed-out after {client_timeout} seconds" + raise TimeoutError(msg) from exc + + +@retry(**_DEFAULT_FASTAPI_RETRY_POLICY) +async def _task_result(session: httpx.AsyncClient, result_url: URL) -> Any: + response = await session.get(f"{result_url}", params={"return_exception": True}) + response.raise_for_status() + if response.status_code != status.HTTP_204_NO_CONTENT: + return unwrap_envelope_if_required(response.json()) + return None + + +@retry(**_DEFAULT_FASTAPI_RETRY_POLICY) +async def _abort_task(session: httpx.AsyncClient, abort_url: URL) -> None: + response = await session.delete(f"{abort_url}") + response.raise_for_status() + + +async def long_running_task_request( + session: httpx.AsyncClient, + url: URL, + json: RequestBody | None = None, + client_timeout: int = 1 * HOUR, +) -> AsyncGenerator[LRTask, None]: + """Will use the passed `httpx.AsyncClient` to call an oSparc long + running task `url` passing `json` as request body. + NOTE: this follows the usual aiohttp client syntax, and will raise the same errors + + Raises: + [https://docs.aiohttp.org/en/stable/client_reference.html#hierarchy-of-exceptions] + """ + task = None + try: + task = await _start(session, url, json) + last_progress = None + async for task_progress in _wait_for_completion( + session, + task.task_id, + URL(task.status_href), + client_timeout, + ): + last_progress = task_progress + yield LRTask(progress=task_progress) + assert last_progress # nosec + yield LRTask( + progress=last_progress, + _result=_task_result(session, URL(task.result_href)), + ) + + except (TimeoutError, asyncio.CancelledError): + if task: + await _abort_task(session, URL(task.abort_href)) + raise + + __all__: tuple[str, ...] = ( + "DEFAULT_HTTP_REQUESTS_TIMEOUT", "Client", "ClientConfiguration", - "DEFAULT_HTTP_REQUESTS_TIMEOUT", - "periodic_task_result", + "LRTask", "ProgressCallback", "ProgressMessage", "ProgressPercent", - "setup", "TaskClientResultError", "TaskId", "TaskResult", + "periodic_task_result", + "setup", ) # nopycln: file diff --git a/packages/service-library/src/servicelib/fastapi/monitoring.py b/packages/service-library/src/servicelib/fastapi/monitoring.py new file mode 100644 index 00000000000..32dd26f53d6 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/monitoring.py @@ -0,0 +1,147 @@ +# pylint: disable=protected-access + +import asyncio +import logging +from collections.abc import AsyncIterator +from time import perf_counter +from typing import Final + +from fastapi import FastAPI, Request, Response, status +from fastapi_lifespan_manager import State +from prometheus_client import CollectorRegistry +from prometheus_client.openmetrics.exposition import ( + CONTENT_TYPE_LATEST, + generate_latest, +) +from servicelib.prometheus_metrics import ( + PrometheusMetrics, + get_prometheus_metrics, + record_request_metrics, + record_response_metrics, +) +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.types import ASGIApp + +from ..common_headers import ( + UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE, + X_SIMCORE_USER_AGENT, +) + +_logger = logging.getLogger(__name__) +_PROMETHEUS_METRICS = "prometheus_metrics" + + +class PrometheusMiddleware(BaseHTTPMiddleware): + def __init__(self, app: ASGIApp, metrics: PrometheusMetrics): + super().__init__(app) + self.metrics = metrics + + async def dispatch( + self, request: Request, call_next: RequestResponseEndpoint + ) -> Response: + canonical_endpoint = request.url.path + + user_agent = request.headers.get( + X_SIMCORE_USER_AGENT, UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE + ) + + start_time = perf_counter() + try: + with record_request_metrics( + metrics=self.metrics, + method=request.method, + endpoint=canonical_endpoint, + user_agent=user_agent, + ): + response = await call_next(request) + status_code = response.status_code + + # path_params are not available before calling call_next + # https://github.com/encode/starlette/issues/685#issuecomment-550240999 + for k, v in request.path_params.items(): + key = "{" + k + "}" + canonical_endpoint = canonical_endpoint.replace(f"/{v}", f"/{key}") + except Exception: # pylint: disable=broad-except + # NOTE: The prometheus metrics middleware should be "outside" exception handling + # middleware. See https://fastapi.tiangolo.com/advanced/middleware/#adding-asgi-middlewares + status_code = status.HTTP_500_INTERNAL_SERVER_ERROR + raise + finally: + reponse_latency_seconds = perf_counter() - start_time + record_response_metrics( + metrics=self.metrics, + method=request.method, + endpoint=canonical_endpoint, + user_agent=user_agent, + http_status=status_code, + response_latency_seconds=reponse_latency_seconds, + ) + + return response + + +def initialize_prometheus_instrumentation(app: FastAPI) -> None: + metrics = get_prometheus_metrics() + app.state.prometheus_metrics = metrics + app.add_middleware(PrometheusMiddleware, metrics=metrics) + + +def _startup(app: FastAPI) -> None: + @app.get("/metrics", include_in_schema=False) + async def metrics_endpoint(request: Request) -> Response: + prometheus_metrics = request.app.state.prometheus_metrics + assert isinstance(prometheus_metrics, PrometheusMetrics) # nosec + + content = await asyncio.get_event_loop().run_in_executor( + None, generate_latest, prometheus_metrics.registry + ) + + return Response(content=content, headers={"Content-Type": CONTENT_TYPE_LATEST}) + + +def _shutdown(app: FastAPI) -> None: + prometheus_metrics = app.state.prometheus_metrics + assert isinstance(prometheus_metrics, PrometheusMetrics) # nosec + registry = prometheus_metrics.registry + for collector in list(registry._collector_to_names.keys()): # noqa: SLF001 + registry.unregister(collector) + + +def setup_prometheus_instrumentation(app: FastAPI) -> CollectorRegistry: + initialize_prometheus_instrumentation(app) + + async def _on_startup() -> None: + _startup(app) + + def _on_shutdown() -> None: + _shutdown(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + prometheus_metrics = app.state.prometheus_metrics + assert isinstance(prometheus_metrics, PrometheusMetrics) # nosec + + return prometheus_metrics.registry + + +_PROMETHEUS_INSTRUMENTATION_ENABLED: Final[str] = "prometheus_instrumentation_enabled" + + +def create_prometheus_instrumentationmain_input_state(*, enabled: bool) -> State: + return {_PROMETHEUS_INSTRUMENTATION_ENABLED: enabled} + + +async def prometheus_instrumentation_lifespan( + app: FastAPI, state: State +) -> AsyncIterator[State]: + # NOTE: requires ``initialize_prometheus_instrumentation`` to be called before the + # lifespan of the applicaiton runs, usually rigth after the ``FastAPI`` instance is created + + instrumentaiton_enabled = state.get(_PROMETHEUS_INSTRUMENTATION_ENABLED, False) + if instrumentaiton_enabled: + + _startup(app) + yield {} + if instrumentaiton_enabled: + _shutdown(app) diff --git a/packages/service-library/src/servicelib/fastapi/openapi.py b/packages/service-library/src/servicelib/fastapi/openapi.py index ad8a786b523..218d3301cc0 100644 --- a/packages/service-library/src/servicelib/fastapi/openapi.py +++ b/packages/service-library/src/servicelib/fastapi/openapi.py @@ -1,6 +1,7 @@ """ Common utils for core/application openapi specs """ +import re import types from typing import Any @@ -24,9 +25,9 @@ } -def get_common_oas_options(is_devel_mode: bool) -> dict[str, Any]: +def get_common_oas_options(*, is_devel_mode: bool) -> dict[str, Any]: """common OAS options for FastAPI constructor""" - servers = [ + servers: list[dict[str, Any]] = [ _OAS_DEFAULT_SERVER, ] if is_devel_mode: @@ -35,16 +36,16 @@ def get_common_oas_options(is_devel_mode: bool) -> dict[str, Any]: # SEE https://sonarcloud.io/project/security_hotspots?id=ITISFoundation_osparc-simcore&pullRequest=3165&hotspots=AYHPqDfX5LRQZ1Ko6y4- servers.append(_OAS_DEVELOPMENT_SERVER) - return dict( - servers=servers, - docs_url="/dev/doc", - redoc_url=None, # default disabled - ) + return { + "servers": servers, + "docs_url": "/dev/doc", + "redoc_url": None, # default disabled + } -def redefine_operation_id_in_router(router: APIRouter, operation_id_prefix: str): +def set_operation_id_as_handler_function_name(router: APIRouter): """ - Overrides default operation_ids assigning the same name as the handler functions and a prefix + Overrides default operation_ids assigning the same name as the handler function MUST be called only after all routes have been added. @@ -54,62 +55,101 @@ def redefine_operation_id_in_router(router: APIRouter, operation_id_prefix: str) for route in router.routes: if isinstance(route, APIRoute): assert isinstance(route.endpoint, types.FunctionType) # nosec - route.operation_id = ( - f"{operation_id_prefix}._{route.endpoint.__name__}_handler" - ) + route.operation_id = route.endpoint.__name__ # https://swagger.io/docs/specification/data-models/data-types/#numbers -SCHEMA_TO_PYTHON_TYPES = {"integer": int, "number": float} - -SKIP = ( +_SCHEMA_TO_PYTHON_TYPES = {"integer": int, "number": float} +_SKIP = ( "examples", # SEE openapi-standard: https://swagger.io/docs/specification/adding-examples/ # - exampleS are Dicts and not Lists - "patternProperties" + "patternProperties", # SEE Unsupported openapi-standard: https://swagger.io/docs/specification/data-models/keywords/?sbsearch=patternProperties # SEE https://github.com/OAI/OpenAPI-Specification/issues/687 # SEE https://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties ) +def _remove_named_groups(regex: str) -> str: + # Fixes structure error produced by named groups like + # ^simcore/services/comp/(?P[a-z0-9][a-z0-9_.-]*/)*(?P[a-z0-9-_]+[a-z0-9])$ + # into + # ^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$ + return re.sub(r"\(\?P<[^>]+>", "(", regex) + + +def _patch_node_properties(key: str, node: dict): + # Validation for URL is broken in the context of the license entry + # this helps to bypass validation and then replace with the correct value + if isinstance(key, str) and key.startswith("__PLACEHOLDER___KEY_"): + new_key = key.replace("__PLACEHOLDER___KEY_", "") + node[new_key] = node[key] + node.pop(key) + + # SEE openapi-standard: https://swagger.io/docs/specification/data-models/data-types/#range + if node_type := node.get("type"): + # SEE fastapi ISSUE: https://github.com/tiangolo/fastapi/issues/240 (test_openap.py::test_exclusive_min_openapi_issue ) + if key == "exclusiveMinimum": + cast_to_python = _SCHEMA_TO_PYTHON_TYPES[node_type] + node["minimum"] = cast_to_python(node[key]) + node["exclusiveMinimum"] = True + + elif key == "exclusiveMaximum": + cast_to_python = _SCHEMA_TO_PYTHON_TYPES[node_type] + node["maximum"] = cast_to_python(node[key]) + node["exclusiveMaximum"] = True + + elif key in ("minimum", "maximum"): + # NOTE: Security Audit Report: + # The property in question requires a value of the type integer, but the value you have defined does not match this. + # SEE https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#dataTypeFormat + cast_to_python = _SCHEMA_TO_PYTHON_TYPES[node_type] + node[key] = cast_to_python(node[key]) + + elif key == "pattern" and node_type == "string": + node[key] = _remove_named_groups(regex=node[key]) + + elif key == "env_names": + # NOTE: `env_names` added by BaseCustomSettings types + # and is not compatible with OpenAPI specifications + node.pop("env_names") + + +def _patch(node: Any): + if isinstance(node, dict): + for key in list(node.keys()): + if key in _SKIP: + node.pop(key) + continue + + _patch_node_properties(key, node) + + # recursive + if key in node: # key could have been removed in _patch_node_properties + _patch(node[key]) + + elif isinstance(node, list): + for value in node: + # recursive + _patch(value) + + def patch_openapi_specs(app_openapi: dict[str, Any]): """Patches app.openapi with some fixes and osparc conventions Modifies fastapi auto-generated OAS to pass our openapi validation. """ - - def _patch(node): - if isinstance(node, dict): - for key in list(node.keys()): - # SEE fastapi ISSUE: https://github.com/tiangolo/fastapi/issues/240 (test_openap.py::test_exclusive_min_openapi_issue ) - # SEE openapi-standard: https://swagger.io/docs/specification/data-models/data-types/#range - if key == "exclusiveMinimum": - cast_to_python = SCHEMA_TO_PYTHON_TYPES[node["type"]] - node["minimum"] = cast_to_python(node[key]) - node["exclusiveMinimum"] = True - - elif key == "exclusiveMaximum": - cast_to_python = SCHEMA_TO_PYTHON_TYPES[node["type"]] - node["maximum"] = cast_to_python(node[key]) - node["exclusiveMaximum"] = True - - elif key in SKIP: - node.pop(key) - continue - - _patch(node[key]) - - elif isinstance(node, list): - for value in node: - _patch(value) - _patch(app_openapi) def override_fastapi_openapi_method(app: FastAPI): # pylint: disable=protected-access - app._original_openapi = types.MethodType(copy_func(app.openapi), app) # type: ignore + setattr( # noqa: B010 + app, + "_original_openapi", + types.MethodType(copy_func(app.openapi), app), + ) def _custom_openapi_method(self: FastAPI) -> dict: """Overrides FastAPI.openapi member function @@ -117,9 +157,46 @@ def _custom_openapi_method(self: FastAPI) -> dict: """ # NOTE: see fastapi.applications.py:FastApi.openapi(self) implementation if not self.openapi_schema: - self.openapi_schema = self._original_openapi() # type: ignore + self.openapi_schema = self._original_openapi() # type: ignore[attr-defined] + assert self.openapi_schema is not None # nosec patch_openapi_specs(self.openapi_schema) - return self.openapi_schema + output = self.openapi_schema + assert self.openapi_schema is not None # nosec + return output - app.openapi = types.MethodType(_custom_openapi_method, app) + setattr(app, "openapi", types.MethodType(_custom_openapi_method, app)) # noqa: B010 + + +def create_openapi_specs( + app: FastAPI, + *, + drop_fastapi_default_422: bool = True, + remove_main_sections: bool = True, +): + """ + Includes some patches used in the api/specs generators + """ + override_fastapi_openapi_method(app) + openapi = app.openapi() + + # Remove these sections + if remove_main_sections: + for section in ("info", "openapi"): + openapi.pop(section, None) + + schemas = openapi["components"]["schemas"] + for section in ("HTTPValidationError", "ValidationError"): + schemas.pop(section, None) + + # Removes default response 422 + if drop_fastapi_default_422: + for method_item in openapi.get("paths", {}).values(): + for param in method_item.values(): + # NOTE: If description is like this, + # it assumes it is the default HTTPValidationError from fastapi + if (e422 := param.get("responses", {}).get("422", None)) and e422.get( + "description" + ) == "Validation Error": + param.get("responses", {}).pop("422", None) + return openapi diff --git a/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py b/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py new file mode 100644 index 00000000000..319a7121896 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/postgres_lifespan.py @@ -0,0 +1,58 @@ +import asyncio +import logging +from collections.abc import AsyncIterator +from enum import Enum + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from settings_library.postgres import PostgresSettings +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..db_asyncpg_utils import create_async_engine_and_database_ready +from ..logging_utils import log_catch +from .lifespan_utils import LifespanOnStartupError, lifespan_context + +_logger = logging.getLogger(__name__) + + +class PostgresLifespanState(str, Enum): + POSTGRES_SETTINGS = "postgres_settings" + POSTGRES_ASYNC_ENGINE = "postgres.async_engine" + + +class PostgresConfigurationError(LifespanOnStartupError): + msg_template = "Invalid postgres settings [={settings}] on startup. Note that postgres cannot be disabled using settings" + + +def create_postgres_database_input_state(settings: PostgresSettings) -> State: + return {PostgresLifespanState.POSTGRES_SETTINGS: settings} + + +async def postgres_database_lifespan(_: FastAPI, state: State) -> AsyncIterator[State]: + + _lifespan_name = f"{__name__}.{postgres_database_lifespan.__name__}" + + with lifespan_context(_logger, logging.INFO, _lifespan_name, state) as called_state: + # Validate input state + settings = state[PostgresLifespanState.POSTGRES_SETTINGS] + + if settings is None or not isinstance(settings, PostgresSettings): + raise PostgresConfigurationError(settings=settings) + + assert isinstance(settings, PostgresSettings) # nosec + + # connect to database + async_engine: AsyncEngine = await create_async_engine_and_database_ready( + settings + ) + + try: + + yield { + PostgresLifespanState.POSTGRES_ASYNC_ENGINE: async_engine, + **called_state, + } + + finally: + with log_catch(_logger, reraise=False): + await asyncio.wait_for(async_engine.dispose(), timeout=10) diff --git a/packages/service-library/src/servicelib/fastapi/profiler.py b/packages/service-library/src/servicelib/fastapi/profiler.py new file mode 100644 index 00000000000..cb3e7c5c084 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/profiler.py @@ -0,0 +1,104 @@ +from typing import Any, Final + +from fastapi import FastAPI +from servicelib.aiohttp import status +from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON +from starlette.requests import Request +from starlette.types import ASGIApp, Receive, Scope, Send + +from ..utils_profiling_middleware import ( + _is_profiling, + _profiler, + append_profile, + check_response_headers, +) + + +def _is_last_response(response_headers: dict[bytes, bytes], message: dict[str, Any]): + if ( + content_type := response_headers.get(b"content-type") + ) and content_type == MIMETYPE_APPLICATION_JSON.encode(): + return True + if (more_body := message.get("more_body")) is not None: + return not more_body + msg = "Could not determine if last response" + raise RuntimeError(msg) + + +class ProfilerMiddleware: + """Following + https://www.starlette.io/middleware/#cleanup-and-error-handling + https://www.starlette.io/middleware/#reusing-starlette-components + https://fastapi.tiangolo.com/advanced/middleware/#advanced-middleware + """ + + def __init__(self, app: ASGIApp): + self._app = app + self._profile_header_trigger: Final[str] = "x-profile" + + async def __call__(self, scope: Scope, receive: Receive, send: Send): + if scope["type"] != "http": + await self._app(scope, receive, send) + return + + request: Request = Request(scope) + request_headers = dict(request.headers) + response_headers: dict[bytes, bytes] = {} + + if request_headers.get(self._profile_header_trigger) is None: + await self._app(scope, receive, send) + return + + if _profiler.is_running or (_profiler.last_session is not None): + response = { + "type": "http.response.start", + "status": status.HTTP_500_INTERNAL_SERVER_ERROR, + "headers": [ + (b"content-type", b"text/plain"), + ], + } + await send(response) + response_body = { + "type": "http.response.body", + "body": b"Profiler is already running. Only a single request can be profiled at any give time.", + } + await send(response_body) + return + + try: + request_headers.pop(self._profile_header_trigger) + scope["headers"] = [ + (k.encode("utf8"), v.encode("utf8")) for k, v in request_headers.items() + ] + _profiler.start() + _is_profiling.set(True) + + async def _send_wrapper(message): + if _is_profiling.get(): + nonlocal response_headers + if message["type"] == "http.response.start": + response_headers = dict(message.get("headers")) + message["headers"] = check_response_headers(response_headers) + elif message["type"] == "http.response.body": + if _is_last_response(response_headers, message): + _profiler.stop() + profile_text = _profiler.output_text( + unicode=True, color=True, show_all=True + ) + _profiler.reset() + message["body"] = append_profile( + message["body"].decode(), profile_text + ).encode() + else: + message["more_body"] = True + await send(message) + + await self._app(scope, receive, _send_wrapper) + + finally: + _profiler.reset() + + +def initialize_profiler(app: FastAPI) -> None: + # NOTE: this cannot be ran once the application is started + app.add_middleware(ProfilerMiddleware) diff --git a/packages/service-library/src/servicelib/fastapi/rabbitmq.py b/packages/service-library/src/servicelib/fastapi/rabbitmq.py new file mode 100644 index 00000000000..4f41526c3ab --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/rabbitmq.py @@ -0,0 +1,92 @@ +import logging +import warnings + +from fastapi import FastAPI +from models_library.rabbitmq_messages import RabbitMessageBase +from settings_library.rabbit import RabbitSettings + +from ..rabbitmq import RabbitMQClient +from ..rabbitmq._utils import wait_till_rabbitmq_responsive +from .errors import ApplicationStateError + +_logger = logging.getLogger(__name__) + + +def _create_client(app: FastAPI): + app.state.rabbitmq_client = RabbitMQClient( + client_name=app.state.rabbitmq_client_name, + settings=app.state.rabbitmq_settings, + ) + + +async def _remove_client(app: FastAPI): + await app.state.rabbitmq_client.close() + app.state.rabbitmq_client = None + + +async def connect(app: FastAPI): + assert app.state.rabbitmq_settings # nosec + await wait_till_rabbitmq_responsive(app.state.rabbitmq_settings.dsn) + _create_client(app) + + +async def disconnect(app: FastAPI): + if app.state.rabbitmq_client: + await _remove_client(app) + + +async def reconnect(app: FastAPI): + await disconnect(app) + await connect(app) + + +def setup_rabbit( + app: FastAPI, + *, + settings: RabbitSettings, + name: str, +) -> None: + """Sets up rabbit in a given app + + - Inits app.states for rabbitmq + - Creates a client to communicate with rabbitmq + + Arguments: + app -- fastapi app + settings -- Rabbit settings or if None, the connection to rabbit is not done upon startup + name -- name for the rmq client name + """ + warnings.warn( + "The 'setup_rabbit' function is deprecated and will be removed in a future release. " + "Please use 'rabbitmq_lifespan' for managing RabbitMQ connections.", + DeprecationWarning, + stacklevel=2, + ) + + app.state.rabbitmq_client = None # RabbitMQClient | None + app.state.rabbitmq_client_name = name + app.state.rabbitmq_settings = settings + + async def on_startup() -> None: + await connect(app) + + app.add_event_handler("startup", on_startup) + + async def on_shutdown() -> None: + await disconnect(app) + + app.add_event_handler("shutdown", on_shutdown) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + if not app.state.rabbitmq_client: + raise ApplicationStateError( + state="rabbitmq_client", + msg="Rabbitmq service unavailable. Check app settings", + ) + assert isinstance(rabbitmq_client := app.state.rabbitmq_client, RabbitMQClient) + return rabbitmq_client + + +async def post_message(app: FastAPI, message: RabbitMessageBase) -> None: + await get_rabbitmq_client(app).publish(message.channel_name, message) diff --git a/packages/service-library/src/servicelib/fastapi/rabbitmq_lifespan.py b/packages/service-library/src/servicelib/fastapi/rabbitmq_lifespan.py new file mode 100644 index 00000000000..180dbad800e --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/rabbitmq_lifespan.py @@ -0,0 +1,47 @@ +import logging +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from pydantic import BaseModel, ValidationError +from settings_library.rabbit import RabbitSettings + +from ..rabbitmq import wait_till_rabbitmq_responsive +from .lifespan_utils import ( + LifespanOnStartupError, + lifespan_context, +) + +_logger = logging.getLogger(__name__) + + +class RabbitMQConfigurationError(LifespanOnStartupError): + msg_template = "Invalid RabbitMQ config on startup : {validation_error}" + + +class RabbitMQLifespanState(BaseModel): + RABBIT_SETTINGS: RabbitSettings + + +async def rabbitmq_connectivity_lifespan( + _: FastAPI, state: State +) -> AsyncIterator[State]: + """Ensures RabbitMQ connectivity during lifespan. + + For creating clients, use additional lifespans like rabbitmq_rpc_client_context. + """ + _lifespan_name = f"{__name__}.{rabbitmq_connectivity_lifespan.__name__}" + + with lifespan_context(_logger, logging.INFO, _lifespan_name, state) as called_state: + + # Validate input state + try: + rabbit_state = RabbitMQLifespanState.model_validate(state) + rabbit_dsn_with_secrets = rabbit_state.RABBIT_SETTINGS.dsn + except ValidationError as exc: + raise RabbitMQConfigurationError(validation_error=exc, state=state) from exc + + # Wait for RabbitMQ to be responsive + await wait_till_rabbitmq_responsive(rabbit_dsn_with_secrets) + + yield {"RABBIT_CONNECTIVITY_LIFESPAN_NAME": _lifespan_name, **called_state} diff --git a/packages/service-library/src/servicelib/fastapi/redis_lifespan.py b/packages/service-library/src/servicelib/fastapi/redis_lifespan.py new file mode 100644 index 00000000000..b1ac98e9d6c --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/redis_lifespan.py @@ -0,0 +1,64 @@ +import asyncio +import logging +from collections.abc import AsyncIterator +from typing import Annotated + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from pydantic import BaseModel, StringConstraints, ValidationError +from settings_library.redis import RedisDatabase, RedisSettings + +from ..logging_utils import log_catch, log_context +from ..redis import RedisClientSDK +from .lifespan_utils import LifespanOnStartupError, lifespan_context + +_logger = logging.getLogger(__name__) + + +class RedisConfigurationError(LifespanOnStartupError): + msg_template = "Invalid redis config on startup : {validation_error}" + + +class RedisLifespanState(BaseModel): + REDIS_SETTINGS: RedisSettings + REDIS_CLIENT_NAME: Annotated[str, StringConstraints(min_length=3, max_length=32)] + REDIS_CLIENT_DB: RedisDatabase + + +async def redis_client_sdk_lifespan(_: FastAPI, state: State) -> AsyncIterator[State]: + _lifespan_name = f"{__name__}.{redis_client_sdk_lifespan.__name__}" + + with lifespan_context(_logger, logging.INFO, _lifespan_name, state) as called_state: + + # Validate input state + try: + redis_state = RedisLifespanState.model_validate(state) + redis_dsn_with_secrets = redis_state.REDIS_SETTINGS.build_redis_dsn( + redis_state.REDIS_CLIENT_DB + ) + except ValidationError as exc: + raise RedisConfigurationError(validation_error=exc, state=state) from exc + + # Setup client + with log_context( + _logger, + logging.INFO, + f"Creating redis client with name={redis_state.REDIS_CLIENT_NAME}", + ): + # NOTE: sdk integrats waiting until connection is ready + # and will raise an exception if it cannot connect + redis_client = RedisClientSDK( + redis_dsn_with_secrets, + client_name=redis_state.REDIS_CLIENT_NAME, + ) + + try: + yield {"REDIS_CLIENT_SDK": redis_client, **called_state} + finally: + # Teardown client + with log_catch(_logger, reraise=False): + await asyncio.wait_for( + redis_client.shutdown(), + # NOTE: shutdown already has a _HEALTHCHECK_TASK_TIMEOUT_S of 10s + timeout=20, + ) diff --git a/packages/service-library/src/servicelib/fastapi/requests_decorators.py b/packages/service-library/src/servicelib/fastapi/requests_decorators.py index a69942d61d1..ae5f1ea047c 100644 --- a/packages/service-library/src/servicelib/fastapi/requests_decorators.py +++ b/packages/service-library/src/servicelib/fastapi/requests_decorators.py @@ -13,7 +13,7 @@ class _HandlerWithRequestArg(Protocol): __name__: str - async def __call__(self, request: Request, *args: Any) -> Any: + async def __call__(self, request: Request, *args: Any, **kwargs: Any) -> Any: ... @@ -22,13 +22,11 @@ def _validate_signature(handler: _HandlerWithRequestArg): try: p = next(iter(inspect.signature(handler).parameters.values())) if p.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD or p.annotation != Request: - raise TypeError( - f"Invalid handler {handler.__name__} signature: first parameter must be a Request, got {p.annotation}" - ) + msg = f"Invalid handler {handler.__name__} signature: first parameter must be a Request, got {p.annotation}" + raise TypeError(msg) except StopIteration as e: - raise TypeError( - f"Invalid handler {handler.__name__} signature: first parameter must be a Request, got none" - ) from e + msg = f"Invalid handler {handler.__name__} signature: first parameter must be a Request, got none" + raise TypeError(msg) from e # diff --git a/packages/service-library/src/servicelib/fastapi/rest_pagination.py b/packages/service-library/src/servicelib/fastapi/rest_pagination.py new file mode 100644 index 00000000000..0a199152ace --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/rest_pagination.py @@ -0,0 +1,28 @@ +from typing import TypeAlias, TypeVar + +from fastapi import Query +from fastapi_pagination.cursor import CursorPage # type: ignore[import-not-found] +from fastapi_pagination.customization import ( # type: ignore[import-not-found] + CustomizedPage, + UseParamsFields, +) +from models_library.api_schemas_storage.storage_schemas import ( + DEFAULT_NUMBER_OF_PATHS_PER_PAGE, + MAX_NUMBER_OF_PATHS_PER_PAGE, +) + +_T = TypeVar("_T") + +CustomizedPathsCursorPage = CustomizedPage[ + CursorPage[_T], + # Customizes the maximum value to fit frontend needs + UseParamsFields( + size=Query( + DEFAULT_NUMBER_OF_PATHS_PER_PAGE, + ge=1, + le=MAX_NUMBER_OF_PATHS_PER_PAGE, + description="Page size", + ) + ), +] +CustomizedPathsCursorPageParams: TypeAlias = CustomizedPathsCursorPage.__params_type__ # type: ignore diff --git a/packages/service-library/src/servicelib/fastapi/timing_middleware.py b/packages/service-library/src/servicelib/fastapi/timing_middleware.py new file mode 100644 index 00000000000..7fe5e283814 --- /dev/null +++ b/packages/service-library/src/servicelib/fastapi/timing_middleware.py @@ -0,0 +1,15 @@ +import logging +import time + +from fastapi import Request + +_logger = logging.getLogger(__name__) + + +async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + _logger.debug("time to process %.2fs", process_time) + response.headers["X-Process-Time"] = str(process_time) + return response diff --git a/packages/service-library/src/servicelib/fastapi/tracing.py b/packages/service-library/src/servicelib/fastapi/tracing.py index 3d8a3fd9d3a..5b2cba5434d 100644 --- a/packages/service-library/src/servicelib/fastapi/tracing.py +++ b/packages/service-library/src/servicelib/fastapi/tracing.py @@ -1,16 +1,218 @@ +"""Adds fastapi middleware for tracing using opentelemetry instrumentation.""" + +import logging +from collections.abc import AsyncIterator + from fastapi import FastAPI -from fastapi_contrib.conf import settings -from fastapi_contrib.tracing.middlewares import OpentracingMiddleware -from fastapi_contrib.tracing.utils import setup_opentracing +from fastapi_lifespan_manager import State +from httpx import AsyncClient, Client +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import ( + OTLPSpanExporter as OTLPSpanExporterHTTP, +) +from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor +from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from servicelib.logging_utils import log_context from settings_library.tracing import TracingSettings +from yarl import URL + +_logger = logging.getLogger(__name__) + +try: + from opentelemetry.instrumentation.asyncpg import ( # type: ignore[import-not-found] + AsyncPGInstrumentor, + ) + + HAS_ASYNCPG = True +except ImportError: + HAS_ASYNCPG = False + +try: + from opentelemetry.instrumentation.aiopg import AiopgInstrumentor + + HAS_AIOPG = True +except ImportError: + HAS_AIOPG = False + +try: + from opentelemetry.instrumentation.redis import RedisInstrumentor + + HAS_REDIS = True +except ImportError: + HAS_REDIS = False + +try: + from opentelemetry.instrumentation.botocore import ( # type: ignore[import-not-found] + BotocoreInstrumentor, + ) + + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False + +try: + from opentelemetry.instrumentation.requests import RequestsInstrumentor + + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +try: + from opentelemetry.instrumentation.aio_pika.aio_pika_instrumentor import ( + AioPikaInstrumentor, + ) + + HAS_AIOPIKA_INSTRUMENTOR = True +except ImportError: + HAS_AIOPIKA_INSTRUMENTOR = False + + +def _startup(tracing_settings: TracingSettings, service_name: str) -> None: + if ( + not tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT + and not tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_PORT + ): + _logger.warning("Skipping opentelemetry tracing setup") + return + # Set up the tracer provider + resource = Resource(attributes={"service.name": service_name}) + trace.set_tracer_provider(TracerProvider(resource=resource)) + global_tracer_provider = trace.get_tracer_provider() + assert isinstance(global_tracer_provider, TracerProvider) # nosec + + opentelemetry_collector_endpoint: str = ( + f"{tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT}" + ) + + tracing_destination: str = ( + f"{URL(opentelemetry_collector_endpoint).with_port(tracing_settings.TRACING_OPENTELEMETRY_COLLECTOR_PORT).with_path('/v1/traces')}" + ) + + _logger.info( + "Trying to connect service %s to opentelemetry tracing collector at %s.", + service_name, + tracing_destination, + ) + # Configure OTLP exporter to send spans to the collector + otlp_exporter = OTLPSpanExporterHTTP(endpoint=tracing_destination) + span_processor = BatchSpanProcessor(otlp_exporter) + global_tracer_provider.add_span_processor(span_processor) + + if HAS_AIOPG: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add asyncpg opentelemetry autoinstrumentation...", + ): + AiopgInstrumentor().instrument() + if HAS_AIOPIKA_INSTRUMENTOR: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add aio_pika opentelemetry autoinstrumentation...", + ): + AioPikaInstrumentor().instrument() + if HAS_ASYNCPG: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add asyncpg opentelemetry autoinstrumentation...", + ): + AsyncPGInstrumentor().instrument() + if HAS_REDIS: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add redis opentelemetry autoinstrumentation...", + ): + RedisInstrumentor().instrument() + if HAS_BOTOCORE: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add botocore opentelemetry autoinstrumentation...", + ): + BotocoreInstrumentor().instrument() + if HAS_REQUESTS: + with log_context( + _logger, + logging.INFO, + msg="Attempting to add requests opentelemetry autoinstrumentation...", + ): + RequestsInstrumentor().instrument() + + +def _shutdown() -> None: + """Uninstruments all opentelemetry instrumentors that were instrumented.""" + FastAPIInstrumentor().uninstrument() + if HAS_AIOPG: + try: + AiopgInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AiopgInstrumentor") + if HAS_AIOPIKA_INSTRUMENTOR: + try: + AioPikaInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AioPikaInstrumentor") + if HAS_ASYNCPG: + try: + AsyncPGInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument AsyncPGInstrumentor") + if HAS_REDIS: + try: + RedisInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument RedisInstrumentor") + if HAS_BOTOCORE: + try: + BotocoreInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument BotocoreInstrumentor") + if HAS_REQUESTS: + try: + RequestsInstrumentor().uninstrument() + except Exception: # pylint:disable=broad-exception-caught + _logger.exception("Failed to uninstrument RequestsInstrumentor") + + +def initialize_fastapi_app_tracing(app: FastAPI): + FastAPIInstrumentor.instrument_app(app) + + +def setup_httpx_client_tracing(client: AsyncClient | Client): + HTTPXClientInstrumentor.instrument_client(client) + + +def setup_tracing( + app: FastAPI, tracing_settings: TracingSettings, service_name: str +) -> None: + # NOTE: This does not instrument the app itself. Call setup_fastapi_app_tracing to do that. + _startup(tracing_settings=tracing_settings, service_name=service_name) + + def _on_shutdown() -> None: + _shutdown() + + app.add_event_handler("shutdown", _on_shutdown) + + +def get_tracing_instrumentation_lifespan( + tracing_settings: TracingSettings, service_name: str +): + # NOTE: This lifespan does not instrument the app itself. Call setup_fastapi_app_tracing to do that. + _startup(tracing_settings=tracing_settings, service_name=service_name) + + async def tracing_instrumentation_lifespan( + app: FastAPI, + ) -> AsyncIterator[State]: + assert app # nosec + yield {} -def setup_tracing(app: FastAPI, tracing_settings: TracingSettings): - async def start_app() -> None: - settings.service_name = tracing_settings.TRACING_CLIENT_NAME - settings.jaeger_host = tracing_settings.TRACING_THRIFT_COMPACT_ENDPOINT.host - settings.jaeger_port = tracing_settings.TRACING_THRIFT_COMPACT_ENDPOINT.port - setup_opentracing(app) - app.add_middleware(OpentracingMiddleware) + _shutdown() - app.add_event_handler("startup", start_app) + return tracing_instrumentation_lifespan diff --git a/packages/service-library/src/servicelib/file_utils.py b/packages/service-library/src/servicelib/file_utils.py index 26ba312d760..f41b210262a 100644 --- a/packages/service-library/src/servicelib/file_utils.py +++ b/packages/service-library/src/servicelib/file_utils.py @@ -1,16 +1,33 @@ import asyncio +import hashlib import shutil +from collections.abc import Iterator +from contextlib import contextmanager +from logging import Logger from pathlib import Path +from typing import Final, Protocol # https://docs.python.org/3/library/shutil.html#shutil.rmtree # https://docs.python.org/3/library/os.html#os.remove from aiofiles.os import remove from aiofiles.os import wrap as sync_to_async +from pydantic import ByteSize, TypeAdapter + +CHUNK_4KB: Final[ByteSize] = TypeAdapter(ByteSize).validate_python("4kb") # 4K blocks +CHUNK_8MB: Final[ByteSize] = TypeAdapter(ByteSize).validate_python( + "8MiB" +) # 8mIB blocks + + +class AsyncStream(Protocol): + async def read(self, size: int = -1) -> bytes: ... + _shutil_rmtree = sync_to_async(shutil.rmtree) +shutil_move = sync_to_async(shutil.move) -async def _rm(path: Path, ignore_errors: bool): +async def _rm(path: Path, *, ignore_errors: bool): """Removes file or directory""" try: await remove(path) @@ -19,10 +36,89 @@ async def _rm(path: Path, ignore_errors: bool): async def remove_directory( - path: Path, only_children: bool = False, ignore_errors: bool = False + path: Path, *, only_children: bool = False, ignore_errors: bool = False ) -> None: """Optional parameter allows to remove all children and keep directory""" if only_children: - await asyncio.gather(*[_rm(child, ignore_errors) for child in path.glob("*")]) + await asyncio.gather( + *[_rm(child, ignore_errors=ignore_errors) for child in path.glob("*")] + ) else: await _shutil_rmtree(path, ignore_errors=ignore_errors) + + +async def create_sha256_checksum( + async_stream: AsyncStream, *, chunk_size: ByteSize = CHUNK_8MB +) -> str: + """ + Usage: + import aiofiles + + async with aiofiles.open(path, mode="rb") as file: + sha256check = await create_sha256_checksum(file) + + SEE https://ant.apache.org/manual/Tasks/checksum.html + WARNING: bandit reports the use of insecure MD2, MD4, MD5, or SHA1 hash function. + """ + sha256_hash = hashlib.sha256() # nosec + return await _eval_hash_async(async_stream, sha256_hash, chunk_size) + + +async def _eval_hash_async( + async_stream: AsyncStream, + hasher: "hashlib._Hash", + chunk_size: ByteSize, +) -> str: + while chunk := await async_stream.read(chunk_size): + hasher.update(chunk) + digest = hasher.hexdigest() + return f"{digest}" + + +def _get_file_properties(path: Path) -> tuple[float, int]: + stats = path.stat() + return stats.st_mtime, stats.st_size + + +def _get_directory_snapshot(path: Path) -> dict[str, tuple[float, int]]: + return { + f"{p.relative_to(path)}": _get_file_properties(p) + for p in path.rglob("*") + if p.is_file() + } + + +@contextmanager +def log_directory_changes(path: Path, logger: Logger, log_level: int) -> Iterator[None]: + before: dict[str, tuple[float, int]] = _get_directory_snapshot(path) + yield + after: dict[str, tuple[float, int]] = _get_directory_snapshot(path) + + after_keys: set[str] = set(after.keys()) + before_keys: set[str] = set(before.keys()) + common_keys = before_keys & after_keys + + added_elements = after_keys - before_keys + removed_elements = before_keys - after_keys + content_changed_elements = {x for x in common_keys if before[x] != after[x]} + + if added_elements or removed_elements or content_changed_elements: + logger.log(log_level, "File changes in path: '%s'", f"{path}") + if added_elements: + logger.log( + log_level, + "Files added:\n%s", + "\n".join([f"+ {x}" for x in sorted(added_elements)]), + ) + if removed_elements: + logger.log( + log_level, + "Files removed:\n%s", + "\n".join([f"- {x}" for x in sorted(removed_elements)]), + ) + if content_changed_elements: + logger.log( + log_level, + "File content changed:\n%s", + "\n".join([f"* {x}" for x in sorted(content_changed_elements)]), + ) diff --git a/packages/service-library/src/servicelib/functools_utils.py b/packages/service-library/src/servicelib/functools_utils.py index f94e7a8fd4a..80c975a466e 100644 --- a/packages/service-library/src/servicelib/functools_utils.py +++ b/packages/service-library/src/servicelib/functools_utils.py @@ -1,8 +1,10 @@ import functools import types +from collections.abc import Callable +from typing import Any, cast -def copy_func(f): +def copy_func(f: Callable[..., Any]) -> Callable[..., Any]: # SEE https://stackoverflow.com/questions/13503079/how-to-create-a-copy-of-a-python-function g = types.FunctionType( f.__code__, @@ -11,6 +13,6 @@ def copy_func(f): argdefs=f.__defaults__, closure=f.__closure__, ) - g = functools.update_wrapper(g, f) + g = cast(types.FunctionType, functools.update_wrapper(g, f)) g.__kwdefaults__ = f.__kwdefaults__ return g diff --git a/packages/service-library/src/servicelib/instrumentation.py b/packages/service-library/src/servicelib/instrumentation.py new file mode 100644 index 00000000000..002e1942853 --- /dev/null +++ b/packages/service-library/src/servicelib/instrumentation.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + +from prometheus_client import CollectorRegistry + + +@dataclass(slots=True, kw_only=True) +class MetricsBase: + subsystem: str + registry: CollectorRegistry + + +def get_metrics_namespace(application_name: str) -> str: + return application_name.replace("-", "_") diff --git a/packages/service-library/src/servicelib/json_serialization.py b/packages/service-library/src/servicelib/json_serialization.py deleted file mode 100644 index 13bcfdff567..00000000000 --- a/packages/service-library/src/servicelib/json_serialization.py +++ /dev/null @@ -1,15 +0,0 @@ -import json -from typing import Any - -from pydantic.json import pydantic_encoder - - -def json_dumps(obj: Any, **kwargs): - """json.dumps with rich encoder. - A big applause for pydantic authors here!!! - """ - return json.dumps(obj, default=pydantic_encoder, **kwargs) - - -# TODO: support for orjson -# TODO: support for ujson (fast but poor encoding, only for basic types) diff --git a/packages/service-library/src/servicelib/logging_errors.py b/packages/service-library/src/servicelib/logging_errors.py new file mode 100644 index 00000000000..2e6150acc39 --- /dev/null +++ b/packages/service-library/src/servicelib/logging_errors.py @@ -0,0 +1,95 @@ +import logging +from typing import Any, TypedDict + +from common_library.error_codes import ErrorCodeStr +from common_library.errors_classes import OsparcErrorMixin +from common_library.json_serialization import json_dumps, representation_encoder + +from .logging_utils import LogExtra, get_log_record_extra + +_logger = logging.getLogger(__name__) + + +def create_troubleshotting_log_message( + user_error_msg: str, + *, + error: BaseException, + error_code: ErrorCodeStr | None = None, + error_context: dict[str, Any] | None = None, + tip: str | None = None, +) -> str: + """Create a formatted message for _logger.exception(...) + + Arguments: + user_error_msg -- A user-friendly message to be displayed on the front-end explaining the issue in simple terms. + error -- the instance of the handled exception + error_code -- A unique error code (e.g., OEC or osparc-specific) to identify the type or source of the error for easier tracking. + error_context -- Additional context surrounding the exception, such as environment variables or function-specific data. This can be derived from exc.error_context() (relevant when using the OsparcErrorMixin) + tip -- Helpful suggestions or possible solutions explaining why the error may have occurred and how it could potentially be resolved + """ + debug_data = json_dumps( + { + "exception_type": f"{type(error)}", + "exception_details": f"{error}", + "error_code": error_code, + "context": error_context, + "tip": tip, + }, + default=representation_encoder, + indent=1, + ) + + return f"{user_error_msg}.\n{debug_data}" + + +class LogKwargs(TypedDict): + msg: str + extra: LogExtra | None + + +def create_troubleshotting_log_kwargs( + user_error_msg: str, + *, + error: BaseException, + error_code: ErrorCodeStr | None = None, + error_context: dict[str, Any] | None = None, + tip: str | None = None, +) -> LogKwargs: + """ + Creates a dictionary of logging arguments to be used with _log.exception for troubleshooting purposes. + + Usage: + + try: + ... + except MyException as exc + _logger.exception( + **create_troubleshotting_log_kwargs( + user_error_msg=frontend_msg, + exception=exc, + tip="Check row in `groups_extra_properties` for this product. It might be missing.", + ) + ) + + """ + # error-context + context = error_context or {} + if isinstance(error, OsparcErrorMixin): + context.update(error.error_context()) + + # compose as log message + log_msg = create_troubleshotting_log_message( + user_error_msg, + error=error, + error_code=error_code, + error_context=context, + tip=tip or getattr(error, "tip", None), + ) + + return { + "msg": log_msg, + "extra": get_log_record_extra( + error_code=error_code, + user_id=context.get("user_id", None), + ), + } diff --git a/packages/service-library/src/servicelib/logging_utils.py b/packages/service-library/src/servicelib/logging_utils.py index 187a7412eb8..7ef3bc28e94 100644 --- a/packages/service-library/src/servicelib/logging_utils.py +++ b/packages/service-library/src/servicelib/logging_utils.py @@ -4,17 +4,25 @@ SEE also https://github.com/Delgan/loguru for a future alternative """ + import asyncio import functools import logging -import os -import sys from asyncio import iscoroutinefunction +from collections.abc import Callable, Iterator from contextlib import contextmanager +from datetime import datetime from inspect import getframeinfo, stack -from typing import Callable, Optional +from pathlib import Path +from typing import Any, NotRequired, TypeAlias, TypedDict, TypeVar + +from settings_library.tracing import TracingSettings -log = logging.getLogger(__name__) +from .logging_utils_filtering import GeneralLogFilter, LoggerName, MessageSubstring +from .tracing import setup_log_tracing +from .utils_secrets import mask_sensitive_data + +_logger = logging.getLogger(__name__) BLACK = "\033[0;30m" @@ -46,56 +54,146 @@ } +class LogExtra(TypedDict): + log_uid: NotRequired[str] + log_oec: NotRequired[str] + + +def get_log_record_extra( + *, + user_id: int | str | None = None, + error_code: str | None = None, +) -> LogExtra | None: + extra: LogExtra = {} + + if user_id: + assert int(user_id) > 0 # nosec + extra["log_uid"] = f"{user_id}" + if error_code: + extra["log_oec"] = error_code + + return extra or None + + class CustomFormatter(logging.Formatter): """Custom Formatter does these 2 things: 1. Overrides 'funcName' with the value of 'func_name_override', if it exists. 2. Overrides 'filename' with the value of 'file_name_override', if it exists. """ - def format(self, record): + def __init__(self, fmt: str, *, log_format_local_dev_enabled: bool) -> None: + super().__init__(fmt) + self.log_format_local_dev_enabled = log_format_local_dev_enabled + + def format(self, record) -> str: if hasattr(record, "func_name_override"): record.funcName = record.func_name_override if hasattr(record, "file_name_override"): record.filename = record.file_name_override - # add color - levelname = record.levelname - if levelname in COLORS: - levelname_color = COLORS[levelname] + levelname + NORMAL - record.levelname = levelname_color - return super().format(record) + for name in LogExtra.__optional_keys__: # pylint: disable=no-member + if not hasattr(record, name): + setattr(record, name, None) + if self.log_format_local_dev_enabled: + levelname = record.levelname + if levelname in COLORS: + levelname_color = COLORS[levelname] + levelname + NORMAL + record.levelname = levelname_color + return super().format(record) -# SEE https://docs.python.org/3/library/logging.html#logrecord-attributes -DEFAULT_FORMATTING = "%(levelname)s: [%(asctime)s/%(processName)s] [%(name)s:%(funcName)s(%(lineno)d)] - %(message)s" + return super().format(record).replace("\n", "\\n") -def config_all_loggers(): +# SEE https://docs.python.org/3/library/logging.html#logrecord-attributes +DEFAULT_FORMATTING = ( + "log_level=%(levelname)s " + "| log_timestamp=%(asctime)s " + "| log_source=%(name)s:%(funcName)s(%(lineno)d) " + "| log_uid=%(log_uid)s " + "| log_oec=%(log_oec)s" + "| log_msg=%(message)s" +) +LOCAL_FORMATTING = "%(levelname)s: [%(asctime)s/%(processName)s] [%(name)s:%(funcName)s(%(lineno)d)] - %(message)s" + +# Graylog Grok pattern extractor: +# log_level=%{WORD:log_level} \| log_timestamp=%{TIMESTAMP_ISO8601:log_timestamp} \| log_source=%{DATA:log_source} \| (log_uid=%{WORD:log_uid} \| )?log_msg=%{GREEDYDATA:log_msg} + + +def config_all_loggers( + *, + log_format_local_dev_enabled: bool, + logger_filter_mapping: dict[LoggerName, list[MessageSubstring]], + tracing_settings: TracingSettings | None, +) -> None: """ Applies common configuration to ALL registered loggers """ the_manager: logging.Manager = logging.Logger.manager + root_logger = logging.getLogger() - loggers = [logging.getLogger()] + [ + loggers = [root_logger] + [ logging.getLogger(name) for name in the_manager.loggerDict ] + + fmt = DEFAULT_FORMATTING + if tracing_settings is not None: + fmt = ( + "log_level=%(levelname)s " + "| log_timestamp=%(asctime)s " + "| log_source=%(name)s:%(funcName)s(%(lineno)d) " + "| log_uid=%(log_uid)s " + "| log_oec=%(log_oec)s" + "| log_trace_id=%(otelTraceID)s " + "| log_span_id=%(otelSpanID)s " + "| log_resource.service.name=%(otelServiceName)s " + "| log_trace_sampled=%(otelTraceSampled)s] " + "| log_msg=%(message)s" + ) + setup_log_tracing(tracing_settings=tracing_settings) + if log_format_local_dev_enabled: + fmt = LOCAL_FORMATTING + if tracing_settings is not None: + fmt = ( + "%(levelname)s: [%(asctime)s/%(processName)s] " + "[log_trace_id=%(otelTraceID)s log_span_id=%(otelSpanID)s log_resource.service.name=%(otelServiceName)s log_trace_sampled=%(otelTraceSampled)s] " + "[%(name)s:%(funcName)s(%(lineno)d)] - %(message)s" + ) + for logger in loggers: - set_logging_handler(logger) + _set_logging_handler( + logger, fmt=fmt, log_format_local_dev_enabled=log_format_local_dev_enabled + ) + + for logger_name, filtered_routes in logger_filter_mapping.items(): + logger = logging.getLogger(logger_name) + # Check if the logger has any handlers or is in active use + if not logger.hasHandlers(): + _logger.warning( + "Logger %s does not have any handlers. Filter will not be added.", + logger_name, + ) + continue + log_filter = GeneralLogFilter(filtered_routes) + logger.addFilter(log_filter) -def set_logging_handler( + +def _set_logging_handler( logger: logging.Logger, - formatter_base: Optional[type[logging.Formatter]] = None, - fmt: str = DEFAULT_FORMATTING, + *, + fmt: str, + log_format_local_dev_enabled: bool, ) -> None: - if not formatter_base: - formatter_base = CustomFormatter - for handler in logger.handlers: - handler.setFormatter(formatter_base(fmt)) + handler.setFormatter( + CustomFormatter( + fmt, log_format_local_dev_enabled=log_format_local_dev_enabled + ) + ) -def test_logger_propagation(logger: logging.Logger): +def test_logger_propagation(logger: logging.Logger) -> None: """log propagation and levels can sometimes be daunting to get it right. This function uses the `logger`` passed as argument to log the same message at different levels @@ -103,7 +201,7 @@ def test_logger_propagation(logger: logging.Logger): This should help to visually test a given configuration USAGE: - from servicelib.logging_utils import test_logger_propagation + from .logging_utils import test_logger_propagation for n in ("aiohttp.access", "gunicorn.access"): test_logger_propagation(logging.getLogger(n)) """ @@ -115,11 +213,66 @@ def test_logger_propagation(logger: logging.Logger): logger.debug(msg, "debug") -def _log_arguments( +class LogExceptionsKwargsDict(TypedDict, total=True): + logger: logging.Logger + level: int + msg_prefix: str + exc_info: bool + stack_info: bool + + +@contextmanager +def log_exceptions( + logger: logging.Logger, + level: int, + msg_prefix: str = "", + *, + exc_info: bool = False, + stack_info: bool = False, +) -> Iterator[None]: + """If an exception is raised, it gets logged with level. + + NOTE that this does NOT suppress exceptions + + Example: logging exceptions raised a "section of code" for debugging purposes + + # raises + with log_exceptions(logger, logging.DEBUG): + # ... + resp.raise_for_status() + + # does NOT raises (NOTE: use composition of context managers) + with suppress(Exception), log_exceptions(logger, logging.DEBUG): + # ... + resp.raise_for_status() + """ + try: + yield + except asyncio.CancelledError: + msg = f"{msg_prefix} call cancelled ".strip() + logger.log(level, msg) + raise + except Exception as exc: # pylint: disable=broad-except + msg = f"{msg_prefix} raised {type(exc).__name__}: {exc}".strip() + logger.log( + level, + msg, + exc_info=exc_info, + stack_info=stack_info, + ) + raise + + +def _log_before_call( logger_obj: logging.Logger, level: int, func: Callable, *args, **kwargs ) -> dict[str, str]: + # NOTE: We should avoid logging arguments but in the meantime, we are trying to + # avoid exposing sensitive data in the logs. For `args` is more difficult. We could eventually + # deduced sensitivity based on the entropy of values but it is very costly + # SEE https://github.com/ITISFoundation/osparc-simcore/security/code-scanning/18 args_passed_in_function = [repr(a) for a in args] - kwargs_passed_in_function = [f"{k}={v!r}" for k, v in kwargs.items()] + masked_kwargs = mask_sensitive_data(kwargs) + kwargs_passed_in_function = [f"{k}={v!r}" for k, v in masked_kwargs.items()] # The lists of positional and keyword arguments is joined together to form final string formatted_arguments = ", ".join(args_passed_in_function + kwargs_passed_in_function) @@ -131,13 +284,15 @@ def _log_arguments( py_file_caller = getframeinfo(stack()[1][0]) extra_args = { "func_name_override": func.__name__, - "file_name_override": os.path.basename(py_file_caller.filename), + "file_name_override": Path(py_file_caller.filename).name, } # Before to the function execution, log function details. logger_obj.log( level, - "Arguments: %s - Begin function", + "%s:%s(%s) - Begin function", + func.__module__.split(".")[-1], + func.__name__, formatted_arguments, extra=extra_args, ) @@ -145,83 +300,162 @@ def _log_arguments( return extra_args -def log_decorator(logger=None, level: int = logging.DEBUG, log_traceback: bool = False): - # Build logger object - logger_obj = logger or log - - def log_decorator_info(func): - if iscoroutinefunction(func): - - @functools.wraps(func) - async def log_decorator_wrapper(*args, **kwargs): - extra_args = _log_arguments(logger_obj, level, func, *args, **kwargs) - try: - # log return value from the function - value = await func(*args, **kwargs) - logger_obj.log( - level, "Returned: - End function %r", value, extra=extra_args - ) - except: - # log exception if occurs in function - logger_obj.error( - "Exception: %s", - sys.exc_info()[1], - extra=extra_args, - exc_info=log_traceback, - ) - raise - # Return function value - return value - - else: - - @functools.wraps(func) - def log_decorator_wrapper(*args, **kwargs): - extra_args = _log_arguments(logger_obj, level, func, *args, **kwargs) - try: - # log return value from the function - value = func(*args, **kwargs) - logger_obj.log( - level, "Returned: - End function %r", value, extra=extra_args - ) - except: - # log exception if occurs in function - logger_obj.error( - "Exception: %s", - sys.exc_info()[1], - extra=extra_args, - exc_info=log_traceback, - ) - raise - # Return function value - return value - - # Return the pointer to the function - return log_decorator_wrapper - - return log_decorator_info +def _log_after_call( + logger_obj: logging.Logger, + level: int, + func: Callable, + result: Any, + extra_args: dict[str, str], +) -> None: + logger_obj.log( + level, + "%s:%s returned %r - End function", + func.__module__.split(".")[-1], + func.__name__, + result, + extra=extra_args, + ) + + +F = TypeVar("F", bound=Callable[..., Any]) + + +def log_decorator( + logger: logging.Logger | None, + level: int = logging.DEBUG, + *, + # NOTE: default defined by legacy: ANE defined full stack tracebacks + # on exceptions + exc_info: bool = True, + exc_stack_info: bool = True, +) -> Callable[[F], F]: + """Logs the decorated function: + - *before* its called + - input parameters + - *after* its called + - returned values *after* the decorated function is executed *or* + - raised exception (w/ or w/o traceback) + """ + logger_obj = logger or _logger + + def _decorator(func_or_coro: F) -> F: + + _log_exc_kwargs = LogExceptionsKwargsDict( + logger=logger_obj, + level=level, + msg_prefix=f"{func_or_coro.__name__}", + exc_info=exc_info, + stack_info=exc_stack_info, + ) + + if iscoroutinefunction(func_or_coro): + + @functools.wraps(func_or_coro) + async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: + extra_args = _log_before_call( + logger_obj, level, func_or_coro, *args, **kwargs + ) + with log_exceptions(**_log_exc_kwargs): + result = await func_or_coro(*args, **kwargs) + _log_after_call(logger_obj, level, func_or_coro, result, extra_args) + return result + + return _async_wrapper # type: ignore[return-value] # decorators typing is hard + + @functools.wraps(func_or_coro) + def _sync_wrapper(*args: Any, **kwargs: Any) -> Any: + extra_args = _log_before_call( + logger_obj, level, func_or_coro, *args, **kwargs + ) + with log_exceptions(**_log_exc_kwargs): + result = func_or_coro(*args, **kwargs) + _log_after_call(logger_obj, level, func_or_coro, result, extra_args) + return result + + return _sync_wrapper # type: ignore[return-value] # decorators typing is hard + + return _decorator @contextmanager -def log_catch(logger: logging.Logger, reraise: bool = True): +def log_catch(logger: logging.Logger, *, reraise: bool = True) -> Iterator[None]: try: yield except asyncio.CancelledError: logger.debug("call was cancelled") raise except Exception as exc: # pylint: disable=broad-except - logger.error("Unhandled exception: %s", f"{exc}", exc_info=True) + logger.exception("Unhandled exception:") if reraise: raise exc from exc -un_capitalize = lambda s: s[:1].lower() + s[1:] if s else "" +LogLevelInt: TypeAlias = int +LogMessageStr: TypeAlias = str + + +def _un_capitalize(s: str) -> str: + return s[:1].lower() + s[1:] if s else "" @contextmanager -def log_context(logger: logging.Logger, level: int, msg: str, *args, **kwargs): +def log_context( + logger: logging.Logger, + level: LogLevelInt, + msg: LogMessageStr, + *args, + log_duration: bool = False, + extra: LogExtra | None = None, +): # NOTE: preserves original signature https://docs.python.org/3/library/logging.html#logging.Logger.log - msg = un_capitalize(msg.strip()) - logger.log(level, "Starting " + msg + " ...", *args, **kwargs) + start = datetime.now() # noqa: DTZ005 + msg = _un_capitalize(msg.strip()) + + kwargs: dict[str, Any] = {} + if extra: + kwargs["extra"] = extra + log_msg = f"Starting {msg} ..." + + stackelvel = 3 # NOTE: 1 => log_context, 2 => contextlib, 3 => caller + logger.log(level, log_msg, *args, **kwargs, stacklevel=stackelvel) yield - logger.log(level, "Finished " + msg, *args, **kwargs) + duration = ( + f" in {(datetime.now() - start ).total_seconds()}s" # noqa: DTZ005 + if log_duration + else "" + ) + log_msg = f"Finished {msg}{duration}" + logger.log(level, log_msg, *args, **kwargs, stacklevel=stackelvel) + + +def guess_message_log_level(message: str) -> LogLevelInt: + lower_case_message = message.lower().strip() + if lower_case_message.startswith( + ( + "error", + "[error]", + "err", + "[err]", + "exception", + "[exception]", + "exc:", + "exc ", + "[exc]", + ) + ): + return logging.ERROR + if lower_case_message.startswith( + ( + "warning", + "[warning]", + "warn", + "[warn]", + ) + ): + return logging.WARNING + return logging.INFO + + +def set_parent_module_log_level(current_module: str, desired_log_level: int) -> None: + parent_module = ".".join(current_module.split(".")[:-1]) + logging.getLogger(parent_module).setLevel(desired_log_level) diff --git a/packages/service-library/src/servicelib/logging_utils_filtering.py b/packages/service-library/src/servicelib/logging_utils_filtering.py new file mode 100644 index 00000000000..8d40f501328 --- /dev/null +++ b/packages/service-library/src/servicelib/logging_utils_filtering.py @@ -0,0 +1,28 @@ +""" +This codes originates from this article + https://medium.com/swlh/add-log-decorators-to-your-python-project-84094f832181 + +SEE also https://github.com/Delgan/loguru for a future alternative +""" + +import logging +from typing import TypeAlias + +_logger = logging.getLogger(__name__) + +LoggerName: TypeAlias = str +MessageSubstring: TypeAlias = str + + +class GeneralLogFilter(logging.Filter): + def __init__(self, filtered_routes: list[str]) -> None: + super().__init__() + self.filtered_routes = filtered_routes + + def filter(self, record: logging.LogRecord) -> bool: + msg = record.getMessage() + + # Check if the filtered routes exists in the message + return not any( + filter_criteria in msg for filter_criteria in self.filtered_routes + ) diff --git a/packages/service-library/src/servicelib/long_running_tasks/_constants.py b/packages/service-library/src/servicelib/long_running_tasks/_constants.py new file mode 100644 index 00000000000..5cc87208a36 --- /dev/null +++ b/packages/service-library/src/servicelib/long_running_tasks/_constants.py @@ -0,0 +1,5 @@ +from typing import Final + +MINUTE: Final[int] = 60 # in secs +HOUR: Final[int] = 60 * MINUTE # in secs +DEFAULT_POLL_INTERVAL_S: Final[float] = 1 diff --git a/packages/service-library/src/servicelib/long_running_tasks/_errors.py b/packages/service-library/src/servicelib/long_running_tasks/_errors.py index 9b96b1776a5..44dc03157f2 100644 --- a/packages/service-library/src/servicelib/long_running_tasks/_errors.py +++ b/packages/service-library/src/servicelib/long_running_tasks/_errors.py @@ -1,15 +1,15 @@ -from pydantic.errors import PydanticErrorMixin +from common_library.errors_classes import OsparcErrorMixin -class BaseLongRunningError(PydanticErrorMixin, Exception): +class BaseLongRunningError(OsparcErrorMixin, Exception): """base exception for this module""" - code: str = "long_running_task.base_long_running_error" + code: str = "long_running_task.base_long_running_error" # type: ignore[assignment] class TaskAlreadyRunningError(BaseLongRunningError): code: str = "long_running_task.task_already_running" - msg_template: str = "{task_name} must be unique, found: '{managed_task}" + msg_template: str = "{task_name} must be unique, found: '{managed_task}'" class TaskNotFoundError(BaseLongRunningError): diff --git a/packages/service-library/src/servicelib/long_running_tasks/_models.py b/packages/service-library/src/servicelib/long_running_tasks/_models.py index d0a035b9e18..89fb8b1b399 100644 --- a/packages/service-library/src/servicelib/long_running_tasks/_models.py +++ b/packages/service-library/src/servicelib/long_running_tasks/_models.py @@ -1,67 +1,32 @@ -import logging -import urllib.parse +# mypy: disable-error-code=truthy-function from asyncio import Task +from collections.abc import Awaitable, Callable, Coroutine +from dataclasses import dataclass from datetime import datetime -from typing import Any, Awaitable, Callable, Coroutine, Optional - -from pydantic import ( - BaseModel, - Field, - PositiveFloat, - confloat, - validate_arguments, - validator, -) - -logger = logging.getLogger(__name__) - -TaskName = str -TaskId = str -TaskType = Callable[..., Coroutine[Any, Any, Any]] - -ProgressMessage = str -ProgressPercent = confloat(ge=0.0, le=1.0) -ProgressCallback = Callable[[ProgressMessage, ProgressPercent, TaskId], Awaitable[None]] - +from typing import Any, TypeAlias -class MarkOptions(BaseModel): - unique: bool = False - - -class TaskProgress(BaseModel): - """ - Helps the user to keep track of the progress. Progress is expected to be - defined as a float bound between 0.0 and 1.0 - """ - - message: ProgressMessage = Field(default="") - percent: ProgressPercent = Field(default=0.0) +from models_library.api_schemas_long_running_tasks.base import ( + ProgressMessage, + ProgressPercent, + TaskId, + TaskProgress, +) +from models_library.api_schemas_long_running_tasks.tasks import ( + TaskGet, + TaskResult, + TaskStatus, +) +from pydantic import BaseModel, ConfigDict, Field, PositiveFloat - @validate_arguments - def update( - self, - *, - message: Optional[ProgressMessage] = None, - percent: Optional[ProgressPercent] = None, - ) -> None: - """`percent` must be between 0.0 and 1.0 otherwise ValueError is raised""" - if message: - self.message = message - if percent: - if not (0.0 <= percent <= 1.0): - raise ValueError(f"{percent=} must be in range [0.0, 1.0]") - self.percent = percent +TaskName: TypeAlias = str - logger.debug("Progress update: %s", f"{self}") +TaskType: TypeAlias = Callable[..., Coroutine[Any, Any, Any]] - @classmethod - def create(cls) -> "TaskProgress": - return cls.parse_obj(dict(message="", percent=0.0)) +ProgressCallback: TypeAlias = Callable[ + [ProgressMessage, ProgressPercent | None, TaskId], Awaitable[None] +] - @validator("percent") - @classmethod - def round_value_to_3_digit(cls, v): - return round(v, 3) +RequestBody: TypeAlias = Any class TrackedTask(BaseModel): @@ -77,42 +42,50 @@ class TrackedTask(BaseModel): ) started: datetime = Field(default_factory=datetime.utcnow) - last_status_check: Optional[datetime] = Field( + last_status_check: datetime | None = Field( default=None, description=( "used to detect when if the task is not actively " "polled by the client who created it" ), ) + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) - class Config: - arbitrary_types_allowed = True +class ClientConfiguration(BaseModel): + router_prefix: str + default_timeout: PositiveFloat -class TaskStatus(BaseModel): - task_progress: TaskProgress - done: bool - started: datetime +@dataclass(frozen=True) +class LRTask: + progress: TaskProgress + _result: Coroutine[Any, Any, Any] | None = None -class TaskResult(BaseModel): - result: Optional[Any] - error: Optional[Any] + def done(self) -> bool: + return self._result is not None + async def result(self) -> Any: + if not self._result: + msg = "No result ready!" + raise ValueError(msg) + return await self._result -class ClientConfiguration(BaseModel): - router_prefix: str - default_timeout: PositiveFloat +# explicit export of models for api-schemas -class TaskGet(BaseModel): - task_id: TaskId - task_name: str - status_href: str - result_href: str - abort_href: str +assert TaskResult # nosec +assert TaskGet # nosec +assert TaskStatus # nosec - @validator("task_name") - @classmethod - def unquote_str(cls, v) -> str: - return urllib.parse.unquote(v) +__all__: tuple[str, ...] = ( + "ProgressMessage", + "ProgressPercent", + "TaskGet", + "TaskId", + "TaskProgress", + "TaskResult", + "TaskStatus", +) diff --git a/packages/service-library/src/servicelib/long_running_tasks/_task.py b/packages/service-library/src/servicelib/long_running_tasks/_task.py index 0834f3d54fb..641e78a96a8 100644 --- a/packages/service-library/src/servicelib/long_running_tasks/_task.py +++ b/packages/service-library/src/servicelib/long_running_tasks/_task.py @@ -6,9 +6,13 @@ from collections import deque from contextlib import suppress from datetime import datetime -from typing import Any, Optional, Protocol +from typing import Any, Protocol from uuid import uuid4 +from models_library.api_schemas_long_running_tasks.base import ( + ProgressPercent, + TaskProgress, +) from pydantic import PositiveFloat from ._errors import ( @@ -18,12 +22,12 @@ TaskNotCompletedError, TaskNotFoundError, ) -from ._models import TaskId, TaskName, TaskProgress, TaskResult, TaskStatus, TrackedTask +from ._models import TaskId, TaskName, TaskResult, TaskStatus, TrackedTask logger = logging.getLogger(__name__) -async def _await_task(task: asyncio.Task): +async def _await_task(task: asyncio.Task) -> None: await task @@ -119,14 +123,17 @@ async def _stale_tasks_monitor_worker(self) -> None: logger.warning( "Removing stale task '%s' with status '%s'", task_id, - self.get_task_status(task_id, with_task_context=None).json(), + self.get_task_status( + task_id, with_task_context=None + ).model_dump_json(), ) await self.remove_task( task_id, with_task_context=None, reraise_errors=False ) @staticmethod - def _create_task_id(task_name: TaskName) -> str: + def create_task_id(task_name: TaskName) -> str: + assert len(task_name) > 0 return f"{task_name}.{uuid4()}" def is_task_running(self, task_name: TaskName) -> bool: @@ -137,8 +144,8 @@ def is_task_running(self, task_name: TaskName) -> bool: managed_tasks_ids = list(self._tasks_groups[task_name].keys()) return len(managed_tasks_ids) > 0 - def list_tasks(self, with_task_context: Optional[TaskContext]) -> list[TrackedTask]: - tasks = [] + def list_tasks(self, with_task_context: TaskContext | None) -> list[TrackedTask]: + tasks: list[TrackedTask] = [] for task_group in self._tasks_groups.values(): if not with_task_context: tasks.extend(task_group.values()) @@ -158,10 +165,10 @@ def add_task( task: asyncio.Task, task_progress: TaskProgress, task_context: TaskContext, + task_id: TaskId, + *, fire_and_forget: bool, ) -> TrackedTask: - task_id = self._create_task_id(task_name) - if task_name not in self._tasks_groups: self._tasks_groups[task_name] = {} @@ -178,7 +185,7 @@ def add_task( return tracked_task def _get_tracked_task( - self, task_id: TaskId, with_task_context: Optional[TaskContext] + self, task_id: TaskId, with_task_context: TaskContext | None ) -> TrackedTask: for tasks in self._tasks_groups.values(): if task_id in tasks: @@ -191,7 +198,7 @@ def _get_tracked_task( raise TaskNotFoundError(task_id=task_id) def get_task_status( - self, task_id: TaskId, with_task_context: Optional[TaskContext] + self, task_id: TaskId, with_task_context: TaskContext | None ) -> TaskStatus: """ returns: the status of the task, along with updates @@ -205,16 +212,16 @@ def get_task_status( task = tracked_task.task done = task.done() - return TaskStatus.parse_obj( - dict( - task_progress=tracked_task.task_progress, - done=done, - started=tracked_task.started, - ) + return TaskStatus.model_validate( + { + "task_progress": tracked_task.task_progress, + "done": done, + "started": tracked_task.started, + } ) def get_task_result( - self, task_id: TaskId, with_task_context: Optional[TaskContext] + self, task_id: TaskId, with_task_context: TaskContext | None ) -> Any: """ returns: the result of the task @@ -245,6 +252,7 @@ def get_task_result_old(self, task_id: TaskId) -> TaskResult: if not tracked_task.task.done(): raise TaskNotCompletedError(task_id=task_id) + error: TaskExceptionError | TaskCancelledError try: exception = tracked_task.task.exception() if exception is not None: @@ -264,7 +272,7 @@ def get_task_result_old(self, task_id: TaskId) -> TaskResult: return TaskResult(result=tracked_task.task.result(), error=None) async def cancel_task( - self, task_id: TaskId, with_task_context: Optional[TaskContext] + self, task_id: TaskId, with_task_context: TaskContext | None ) -> None: """ cancels the task @@ -300,10 +308,7 @@ async def _cancel_tracked_task( task, task_id, reraise_errors=reraise_errors ) except Exception as e: # pylint:disable=broad-except - formatted_traceback = "".join( - # pylint: disable=protected-access,no-value-for-parameter,unexpected-keyword-arg - traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__) - ) + formatted_traceback = "".join(traceback.format_exception(e)) raise TaskExceptionError( task_id=task_id, exception=e, traceback=formatted_traceback ) from e @@ -311,7 +316,7 @@ async def _cancel_tracked_task( async def remove_task( self, task_id: TaskId, - with_task_context: Optional[TaskContext], + with_task_context: TaskContext | None, *, reraise_errors: bool = True, ) -> None: @@ -349,8 +354,7 @@ async def close(self) -> None: class TaskProtocol(Protocol): - # NOTE: when using **kwargs pyright complains. this might be a bug that should be fixed soon - async def __call__(self, task_progress: TaskProgress, *task_kwargs: Any) -> Any: + async def __call__(self, progress: TaskProgress, *args: Any, **kwargs: Any) -> Any: ... @property @@ -363,10 +367,10 @@ def start_task( task: TaskProtocol, *, unique: bool = False, - task_context: Optional[TaskContext] = None, - task_name: Optional[str] = None, + task_context: TaskContext | None = None, + task_name: str | None = None, fire_and_forget: bool = False, - **task_kwargs, + **task_kwargs: Any, ) -> TaskId: """ Creates a background task from an async function. @@ -410,15 +414,16 @@ def start_task( ] raise TaskAlreadyRunningError(task_name=task_name, managed_task=managed_task) - task_progress = TaskProgress.create() + task_id = tasks_manager.create_task_id(task_name=task_name) + task_progress = TaskProgress.create(task_id=task_id) # bind the task with progress 0 and 1 async def _progress_task(progress: TaskProgress, handler: TaskProtocol): - progress.update(message="starting", percent=0) + progress.update(message="starting", percent=ProgressPercent(0)) try: return await handler(progress, **task_kwargs) finally: - progress.update(message="finished", percent=1) + progress.update(message="finished", percent=ProgressPercent(1)) async_task = asyncio.create_task( _progress_task(task_progress, task), name=f"{task_name}" @@ -430,6 +435,21 @@ async def _progress_task(progress: TaskProgress, handler: TaskProtocol): task_progress=task_progress, task_context=task_context or {}, fire_and_forget=fire_and_forget, + task_id=task_id, ) return tracked_task.task_id + + +__all__: tuple[str, ...] = ( + "TaskAlreadyRunningError", + "TaskCancelledError", + "TaskId", + "TasksManager", + "TaskProgress", + "TaskProtocol", + "TaskStatus", + "TaskResult", + "TrackedTask", + "TaskResult", +) diff --git a/packages/service-library/src/servicelib/mimetype_constants.py b/packages/service-library/src/servicelib/mimetype_constants.py index d423561830b..ffde9a3f7b3 100644 --- a/packages/service-library/src/servicelib/mimetype_constants.py +++ b/packages/service-library/src/servicelib/mimetype_constants.py @@ -7,8 +7,12 @@ SEE https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types """ +from typing import Final + # NOTE: mimetypes (https://docs.python.org/3/library/mimetypes.html) is already a module in python -MIMETYPE_APPLICATION_JSON = "application/json" -MIMETYPE_TEXT_PLAIN = "text/plain" -MIMETYPE_TEXT_HTML = "text/html" +MIMETYPE_APPLICATION_JSON: Final[str] = "application/json" +MIMETYPE_APPLICATION_ND_JSON: Final[str] = "application/x-ndjson" +MIMETYPE_APPLICATION_ZIP: Final[str] = "application/zip" +MIMETYPE_TEXT_HTML: Final[str] = "text/html" +MIMETYPE_TEXT_PLAIN: Final[str] = "text/plain" diff --git a/packages/service-library/src/servicelib/minio_utils.py b/packages/service-library/src/servicelib/minio_utils.py index 14e168828fa..53aedaa9e98 100644 --- a/packages/service-library/src/servicelib/minio_utils.py +++ b/packages/service-library/src/servicelib/minio_utils.py @@ -1,20 +1,17 @@ -# FIXME: move to settings-library or refactor - import logging -from typing import Optional from tenacity import before_sleep_log, stop_after_attempt, wait_fixed log = logging.getLogger(__name__) -class MinioRetryPolicyUponInitialization: +class ServiceRetryPolicyUponInitialization: """Retry policy upon service initialization""" WAIT_SECS = 2 ATTEMPTS_COUNT = 40 - def __init__(self, logger: Optional[logging.Logger] = None): + def __init__(self, logger: logging.Logger | None = None): logger = logger or log self.kwargs = dict( diff --git a/packages/service-library/src/servicelib/observer.py b/packages/service-library/src/servicelib/observer.py deleted file mode 100644 index a7e73188a9b..00000000000 --- a/packages/service-library/src/servicelib/observer.py +++ /dev/null @@ -1,38 +0,0 @@ -"""observer pattern module -Allows loose coupling subject and an observer. - -""" - -import logging -from collections import defaultdict -from functools import wraps - -from .utils import logged_gather - -log = logging.getLogger(__name__) - -event_registry = defaultdict(list) - - -async def emit(event: str, *args, **kwargs): - if not event_registry[event]: - return - - coroutines = [observer(*args, **kwargs) for observer in event_registry[event]] - # all coroutine called in // - await logged_gather(*coroutines) - - -def observe(event: str): - def decorator(func): - if func not in event_registry[event]: - log.debug("registering %s to event %s", func, event) - event_registry[event].append(func) - - @wraps(func) - def wrapped(*args, **kwargs): - return func(*args, **kwargs) - - return wrapped - - return decorator diff --git a/packages/service-library/src/servicelib/progress_bar.py b/packages/service-library/src/servicelib/progress_bar.py index 860ff1aa8cc..1f65e44790b 100644 --- a/packages/service-library/src/servicelib/progress_bar.py +++ b/packages/service-library/src/servicelib/progress_bar.py @@ -1,26 +1,52 @@ import asyncio import logging from dataclasses import dataclass, field -from typing import Optional, Protocol, runtime_checkable +from inspect import isawaitable +from typing import Final, Optional, Protocol, runtime_checkable -from servicelib.logging_utils import log_catch +from models_library.progress_bar import ( + ProgressReport, + ProgressStructuredMessage, + ProgressUnit, +) +from pydantic import TypeAdapter -logger = logging.getLogger(__name__) +from .logging_utils import log_catch + +_logger = logging.getLogger(__name__) +_MIN_PROGRESS_UPDATE_PERCENT: Final[float] = 0.01 +_INITIAL_VALUE: Final[float] = -1.0 +_FINAL_VALUE: Final[float] = 1.0 +_PROGRESS_ALREADY_REACGED_MAXIMUM: Final[str] = "Progress already reached maximum of" @runtime_checkable class AsyncReportCB(Protocol): - async def __call__(self, progress_value: float) -> None: - ... + async def __call__(self, report: ProgressReport) -> None: ... + + +@runtime_checkable +class ReportCB(Protocol): + def __call__(self, report: ProgressReport) -> None: ... -@dataclass -class ProgressBarData: +def _normalize_weights(steps: int, weights: list[float]) -> list[float]: + total = sum(weights) + if total == 0: + return [1] * steps + return [weight / total for weight in weights] + + +@dataclass(slots=True, kw_only=True) +class ProgressBarData: # pylint: disable=too-many-instance-attributes """A progress bar data allows to keep track of multiple progress(es) even in deeply nested processes. + BEWARE: Using weights AND concurrency is a recipe for disaster as the progress bar does not know which + sub progress finished. Concurrency may only be used with a single progress bar or with equal step weights!! + - Simple example: async def main_fct(): - async with ProgressBarData(steps=3) as root_progress_bar: + async with ProgressBarData(num_steps=3) as root_progress_bar: first_step() await root_progress_bar.update() second_step() @@ -31,7 +57,7 @@ async def main_fct(): - nested example: async def first_step(progress_bar: ProgressBarData): - async with progress_bar.sub_progress(steps=50) as sub_progress_bar: + async with progress_bar.sub_progress(num_steps=50) as sub_progress_bar: # we create a sub progress bar of 50 steps, that will be stacked into the root progress bar. # i.e. when the sub progress bar reaches 50, it will be equivalent of 1 step in the root progress bar. for n in range(50): @@ -40,26 +66,43 @@ async def first_step(progress_bar: ProgressBarData): async def main_fct(): - async with ProgressBarData(steps=3) as root_progress_bar: + async with ProgressBarData(num_steps=3) as root_progress_bar: await first_step(root_progress_bar) await second_step() await root_progress_bar.update() await third_step() """ - steps: int = field( + num_steps: int = field( metadata={"description": "Defines the number of steps in the progress bar"} ) - progress_report_cb: Optional[AsyncReportCB] = None - _continuous_progress_value: float = 0 - _children: list = field(default_factory=list) + step_weights: list[float] | None = field( + default=None, + metadata={ + "description": "Optionally defines the step relative weight (defaults to steps of equal weights)" + }, + ) + description: str = field(metadata={"description": "define the progress name"}) + progress_unit: ProgressUnit | None = None + progress_report_cb: AsyncReportCB | ReportCB | None = None + _current_steps: float = _INITIAL_VALUE + _current_attempt: int = 0 + _children: list["ProgressBarData"] = field(default_factory=list) _parent: Optional["ProgressBarData"] = None _continuous_value_lock: asyncio.Lock = field(init=False) - _last_report_value: float = 0 + _last_report_value: float = _INITIAL_VALUE def __post_init__(self) -> None: + if self.progress_unit is not None: + TypeAdapter(ProgressUnit).validate_python(self.progress_unit) self._continuous_value_lock = asyncio.Lock() - self.steps = max(1, self.steps) + self.num_steps = max(1, self.num_steps) + if self.step_weights: + if len(self.step_weights) != self.num_steps: + msg = f"{self.num_steps=} and {len(self.step_weights)} weights provided! Wrong usage of ProgressBarData" + raise RuntimeError(msg) + self.step_weights = _normalize_weights(self.num_steps, self.step_weights) + self.step_weights.append(0) # NOTE: needed to compute reports async def __aenter__(self) -> "ProgressBarData": await self.start() @@ -70,51 +113,118 @@ async def __aexit__(self, exc_type, exc_value, traceback) -> None: async def _update_parent(self, value: float) -> None: if self._parent: - await self._parent.update(value / self.steps) - - async def _report_external(self, value: float, force: bool = False) -> None: + await self._parent.update(value) + + def is_running(self) -> bool: + return self._current_steps < self.num_steps + + def compute_report_message_stuct(self) -> ProgressStructuredMessage: + self_report = ProgressStructuredMessage( + description=self.description, + current=self._current_steps, + total=self.num_steps, + unit=self.progress_unit, + sub=None, + ) + for child in self._children: + if child.is_running(): + self_report.sub = child.compute_report_message_stuct() + return self_report + + async def _report_external(self, value: float) -> None: if not self.progress_report_cb: return - with log_catch(logger, reraise=False): + with log_catch(_logger, reraise=False): # NOTE: only report if at least a percent was increased - if (force and value != self._last_report_value) or ( - ((value - self._last_report_value) / self.steps) > 0.01 - ): - await self.progress_report_cb(value / self.steps) + if ( + (value - self._last_report_value) > _MIN_PROGRESS_UPDATE_PERCENT + ) or value == _FINAL_VALUE: + # compute progress string + call = self.progress_report_cb( + ProgressReport( + # NOTE: here we convert back to actual value since this is possibly weighted + actual_value=value * self.num_steps, + total=self.num_steps, + attempt=self._current_attempt, + unit=self.progress_unit, + message=self.compute_report_message_stuct(), + ), + ) + if isawaitable(call): + await call self._last_report_value = value async def start(self) -> None: - await self._report_external(0, force=True) - - async def update(self, value: float = 1) -> None: + await self.set_(0) + + def _compute_progress(self, steps: float) -> float: + if not self.step_weights: + return steps / self.num_steps + weight_index = int(steps) + return ( + sum(self.step_weights[:weight_index]) + + steps % 1 * self.step_weights[weight_index] + ) + + async def update(self, steps: float = 1) -> None: + parent_update_value = 0.0 async with self._continuous_value_lock: - new_progress_value = self._continuous_progress_value + value - if new_progress_value > self.steps: - new_progress_value = round(new_progress_value) - if new_progress_value > self.steps: - logger.warning( + new_steps_value = self._current_steps + steps + if new_steps_value > self.num_steps: + new_steps_value = round(new_steps_value) + if new_steps_value > self.num_steps: + _logger.warning( "%s", - f"Progress already reached maximum of {self.steps=}, " - f"cause: {self._continuous_progress_value=} is updated by {value=}" + f"{_PROGRESS_ALREADY_REACGED_MAXIMUM} {self.num_steps=}, " + f"cause: {self._current_steps=} is updated by {steps=}" "TIP: sub progresses are not created correctly please check the stack trace", stack_info=True, ) - new_progress_value = self.steps - self._continuous_progress_value = new_progress_value - await self._update_parent(value) + new_steps_value = self.num_steps + + if new_steps_value == self._current_steps: + return + + new_progress_value = self._compute_progress(new_steps_value) + if self._current_steps != _INITIAL_VALUE: + old_progress_value = self._compute_progress(self._current_steps) + parent_update_value = new_progress_value - old_progress_value + self._current_steps = new_steps_value + + if parent_update_value: + await self._update_parent(parent_update_value) await self._report_external(new_progress_value) + def reset(self) -> None: + self._current_attempt += 1 + self._current_steps = _INITIAL_VALUE + self._last_report_value = _INITIAL_VALUE + + async def set_(self, new_value: float) -> None: + await self.update(new_value - self._current_steps) + async def finish(self) -> None: - await self.update(self.steps - self._continuous_progress_value) - await self._report_external(self.steps, force=True) - - def sub_progress(self, steps: int) -> "ProgressBarData": - if len(self._children) == self.steps: - raise RuntimeError( - "Too many sub progresses created already. Wrong usage of the progress bar" - ) - child = ProgressBarData(steps=steps, _parent=self) + _logger.debug("finishing %s", f"{self.num_steps} progress") + await self.set_(self.num_steps) + + def sub_progress( + self, + steps: int, + description: str, + step_weights: list[float] | None = None, + progress_unit: ProgressUnit | None = None, + ) -> "ProgressBarData": + if len(self._children) == self.num_steps: + msg = "Too many sub progresses created already. Wrong usage of the progress bar" + raise RuntimeError(msg) + child = ProgressBarData( + num_steps=steps, + description=description, + step_weights=step_weights, + progress_unit=progress_unit, + _parent=self, + ) self._children.append(child) return child diff --git a/packages/service-library/src/servicelib/prometheus_metrics.py b/packages/service-library/src/servicelib/prometheus_metrics.py new file mode 100644 index 00000000000..7f24b4e004b --- /dev/null +++ b/packages/service-library/src/servicelib/prometheus_metrics.py @@ -0,0 +1,143 @@ +from collections.abc import Iterator +from contextlib import contextmanager +from dataclasses import dataclass + +from opentelemetry import trace +from prometheus_client import ( + Counter, + Gauge, + GCCollector, + Histogram, + PlatformCollector, + ProcessCollector, +) +from prometheus_client.registry import CollectorRegistry + +# +# CAUTION CAUTION CAUTION NOTE: +# Be very careful with metrics. pay attention to metrics cardinatity. +# Each time series takes about 3kb of overhead in Prometheus +# +# CAUTION: every unique combination of key-value label pairs represents a new time series +# +# If a metrics is not needed, don't add it!! It will collapse the application AND prometheus +# +# references: +# https://prometheus.io/docs/practices/naming/ +# https://www.robustperception.io/cardinality-is-key +# https://www.robustperception.io/why-does-prometheus-use-so-much-ram +# https://promcon.io/2019-munich/slides/containing-your-cardinality.pdf +# https://grafana.com/docs/grafana-cloud/how-do-i/control-prometheus-metrics-usage/usage-analysis-explore/ +# + + +@dataclass +class PrometheusMetrics: + registry: CollectorRegistry + process_collector: ProcessCollector + platform_collector: PlatformCollector + gc_collector: GCCollector + request_count: Counter + in_flight_requests: Gauge + response_latency_with_labels: Histogram + + +def _get_exemplar() -> dict[str, str] | None: + current_span = trace.get_current_span() + if not current_span.is_recording(): + return None + trace_id = trace.format_trace_id(current_span.get_span_context().trace_id) + return {"TraceID": trace_id} + + +def get_prometheus_metrics() -> PrometheusMetrics: + # app-scope registry + registry = CollectorRegistry(auto_describe=False) + + # automatically collects process metrics + process_collector = ProcessCollector(registry=registry) + # automatically collects python_info metrics + platform_collector = PlatformCollector(registry=registry) + # automatically collects python garbage collector metrics + gc_collector = GCCollector(registry=registry) + + # Total number of requests processed + request_count = Counter( + name="http_requests", + documentation="Total requests count", + labelnames=[ + "method", + "endpoint", + "http_status", + "simcore_user_agent", + ], + registry=registry, + ) + + in_flight_requests = Gauge( + name="http_in_flight_requests", + documentation="Number of requests in process", + labelnames=["method", "endpoint", "simcore_user_agent"], + registry=registry, + ) + + response_latency_with_labels = Histogram( + name="http_request_latency_seconds_with_labels", + documentation="Time processing a request with detailed labels", + labelnames=["method", "endpoint", "simcore_user_agent"], + registry=registry, + buckets=(0.1, 1, 5, 10), + ) + + return PrometheusMetrics( + registry=registry, + process_collector=process_collector, + platform_collector=platform_collector, + gc_collector=gc_collector, + request_count=request_count, + in_flight_requests=in_flight_requests, + response_latency_with_labels=response_latency_with_labels, + ) + + +@contextmanager +def record_request_metrics( + *, + metrics: PrometheusMetrics, + method: str, + endpoint: str, + user_agent: str, +) -> Iterator[None]: + """ + Context manager to record Prometheus metrics for a request. + + Args: + metrics (PrometheusMetrics): The Prometheus metrics instance. + app_name (str): The application name. + method (str): The HTTP method. + endpoint (str): The canonical endpoint. + user_agent (str): The user agent header value. + """ + + with metrics.in_flight_requests.labels( + method, endpoint, user_agent + ).track_inprogress(): + yield + + +def record_response_metrics( + *, + metrics: PrometheusMetrics, + method: str, + endpoint: str, + user_agent: str, + http_status: int, + response_latency_seconds: float, +) -> None: + exemplar = _get_exemplar() + metrics.request_count.labels(method, endpoint, http_status, user_agent).inc( + exemplar=exemplar + ) + metrics.response_latency_with_labels.labels(method, endpoint, user_agent).observe( + amount=response_latency_seconds, exemplar=exemplar + ) diff --git a/services/director/tests/helpers/__init__.py b/packages/service-library/src/servicelib/py.typed similarity index 100% rename from services/director/tests/helpers/__init__.py rename to packages/service-library/src/servicelib/py.typed diff --git a/packages/service-library/src/servicelib/rabbitmq.py b/packages/service-library/src/servicelib/rabbitmq.py deleted file mode 100644 index 7f695c86303..00000000000 --- a/packages/service-library/src/servicelib/rabbitmq.py +++ /dev/null @@ -1,254 +0,0 @@ -import asyncio -import logging -import os -import socket -from dataclasses import dataclass, field -from typing import Any, Awaitable, Callable, Final, Optional - -import aio_pika -from aio_pika.exceptions import ChannelClosed -from aio_pika.patterns import RPC -from pydantic import PositiveInt -from servicelib.logging_utils import log_context -from settings_library.rabbit import RabbitSettings - -from .rabbitmq_errors import RemoteMethodNotRegisteredError, RPCNotInitializedError -from .rabbitmq_utils import RPCMethodName, RPCNamespace, RPCNamespacedMethodName - -log = logging.getLogger(__name__) - - -def _connection_close_callback(sender: Any, exc: Optional[BaseException]) -> None: - if exc: - if isinstance(exc, asyncio.CancelledError): - log.info("Rabbit connection was cancelled") - else: - log.error( - "Rabbit connection closed with exception from %s:%s", - sender, - exc, - ) - - -def _channel_close_callback(sender: Any, exc: Optional[BaseException]) -> None: - if exc: - if isinstance(exc, asyncio.CancelledError): - log.info("Rabbit channel was cancelled") - elif isinstance(exc, ChannelClosed): - log.info("%s", exc) - else: - log.error( - "Rabbit channel closed with exception from %s:%s", - sender, - exc, - ) - - -async def _get_connection( - rabbit_broker: str, connection_name: str -) -> aio_pika.abc.AbstractRobustConnection: - # NOTE: to show the connection name in the rabbitMQ UI see there - # https://www.bountysource.com/issues/89342433-setting-custom-connection-name-via-client_properties-doesn-t-work-when-connecting-using-an-amqp-url - # - url = f"{rabbit_broker}?name={connection_name}_{socket.gethostname()}_{os.getpid()}" - connection = await aio_pika.connect_robust( - url, client_properties={"connection_name": connection_name} - ) - connection.close_callbacks.add(_connection_close_callback) - return connection - - -MessageHandler = Callable[[Any], Awaitable[bool]] -Message = str - -_MINUTE: Final[int] = 60 -_RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_S: Final[int] = 15 * _MINUTE - - -@dataclass -class RabbitMQClient: - client_name: str - settings: RabbitSettings - _connection_pool: Optional[aio_pika.pool.Pool] = field(init=False, default=None) - _channel_pool: Optional[aio_pika.pool.Pool] = field(init=False, default=None) - - _rpc_connection: Optional[aio_pika.RobustConnection] = None - _rpc_channel: Optional[aio_pika.RobustChannel] = None - _rpc: Optional[RPC] = None - - def __post_init__(self): - # recommendations are 1 connection per process - self._connection_pool = aio_pika.pool.Pool( - _get_connection, self.settings.dsn, self.client_name, max_size=1 - ) - # channels are not thread safe, what about python? - self._channel_pool = aio_pika.pool.Pool(self._get_channel, max_size=10) - - async def rpc_initialize(self) -> None: - self._rpc_connection = await aio_pika.connect_robust( - self.settings.dsn, - client_properties={ - "connection_name": f"{self.client_name}.rpc.{socket.gethostname()}" - }, - ) - self._rpc_channel = await self._rpc_connection.channel() - - self._rpc = RPC(self._rpc_channel, host_exceptions=True) - await self._rpc.initialize() - - async def close(self) -> None: - with log_context(log, logging.INFO, msg="Closing connection to RabbitMQ"): - assert self._channel_pool # nosec - await self._channel_pool.close() - assert self._connection_pool # nosec - await self._connection_pool.close() - - # rpc is not always initialized - if self._rpc is not None: - await self._rpc.close() - if self._rpc_channel is not None: - await self._rpc_channel.close() - if self._rpc_connection is not None: - await self._rpc_connection.close() - - async def _get_channel(self) -> aio_pika.abc.AbstractChannel: - assert self._connection_pool # nosec - async with self._connection_pool.acquire() as connection: - connection: aio_pika.RobustConnection - channel = await connection.channel() - channel.close_callbacks.add(_channel_close_callback) - return channel - - async def ping(self) -> bool: - assert self._connection_pool # nosec - async with self._connection_pool.acquire() as connection: - connection: aio_pika.RobustConnection - return connection.connected.is_set() - - async def subscribe( - self, - exchange_name: str, - message_handler: MessageHandler, - *, - exclusive_queue: bool = True, - ) -> None: - """subscribe to exchange_name calling message_handler for every incoming message - - exclusive_queue: True means that every instance of this application will receive the incoming messages - - exclusive_queue: False means that only one instance of this application will reveice the incoming message - """ - assert self._channel_pool # nosec - async with self._channel_pool.acquire() as channel: - channel: aio_pika.RobustChannel - _DEFAULT_PREFETCH_VALUE = 10 # this value is set to the default for now - await channel.set_qos(_DEFAULT_PREFETCH_VALUE) - - exchange = await channel.declare_exchange( - exchange_name, aio_pika.ExchangeType.FANOUT, durable=True - ) - - # NOTE: durable=True makes the queue persistent between RabbitMQ restarts/crashes - # consumer/publisher must set the same configuration for same queue - # exclusive means that the queue is only available for THIS very client - # and will be deleted when the client disconnects - queue_parameters = { - "durable": True, - "exclusive": exclusive_queue, - "arguments": {"x-message-ttl": _RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_S}, - } - if not exclusive_queue: - # NOTE: setting a name will ensure multiple instance will take their data here - queue_parameters |= {"name": exchange_name} - queue = await channel.declare_queue(**queue_parameters) - await queue.bind(exchange) - - async def _on_message( - message: aio_pika.abc.AbstractIncomingMessage, - ) -> None: - async with message.process(requeue=True): - with log_context( - log, logging.DEBUG, msg=f"Message received {message}" - ): - if not await message_handler(message.body): - await message.nack() - - await queue.consume(_on_message) - - async def publish(self, exchange_name: str, message: Message) -> None: - assert self._channel_pool # nosec - async with self._channel_pool.acquire() as channel: - channel: aio_pika.RobustChannel - exchange = await channel.declare_exchange( - exchange_name, aio_pika.ExchangeType.FANOUT, durable=True - ) - await exchange.publish( - aio_pika.Message(message.encode()), - routing_key="", - ) - - async def rpc_request( - self, - namespace: RPCNamespace, - method_name: RPCMethodName, - *, - timeout_s: Optional[PositiveInt] = 5, - **kwargs: dict[str, Any], - ) -> Any: - """ - Call a remote registered `handler` by providing it's `namespace`, `method_name` - and `kwargs` containing the key value arguments expected by the remote `handler`. - - :raises asyncio.TimeoutError: when message expired - :raises CancelledError: when called :func:`RPC.cancel` - :raises RuntimeError: internal error - :raises RemoteMethodNotRegisteredError: when no handler was registered to the - `namespaced_method_name` - """ - - if not self._rpc: - raise RPCNotInitializedError() - - namespaced_method_name = RPCNamespacedMethodName.from_namespace_and_method( - namespace, method_name - ) - try: - queue_expiration_timeout = timeout_s - awaitable = self._rpc.call( - namespaced_method_name, - expiration=queue_expiration_timeout, - kwargs=kwargs, - ) - return await asyncio.wait_for(awaitable, timeout=timeout_s) - except aio_pika.MessageProcessError as e: - if e.args[0] == "Message has been returned": - raise RemoteMethodNotRegisteredError( - method_name=namespaced_method_name, incoming_message=e.args[1] - ) from e - raise e - - async def rpc_register_handler( - self, namespace: RPCNamespace, method_name: RPCMethodName, handler: Awaitable - ) -> None: - """ - Bind a local `handler` to a `namespace` and `method_name`. - The handler can be remotely called by providing the `namespace` and `method_name` - - NOTE: method_name could be computed from the handler, but by design, it is - left to the caller to do so. - """ - - if self._rpc is None: - raise RPCNotInitializedError() - - await self._rpc.register( - RPCNamespacedMethodName.from_namespace_and_method(namespace, method_name), - handler, - auto_delete=True, - ) - - async def rpc_unregister_handler(self, handler: Awaitable) -> None: - """Unbind a locally added `handler`""" - - if self._rpc is None: - raise RPCNotInitializedError() - - await self._rpc.unregister(handler) diff --git a/packages/service-library/src/servicelib/rabbitmq/__init__.py b/packages/service-library/src/servicelib/rabbitmq/__init__.py new file mode 100644 index 00000000000..ad67487cdd9 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/__init__.py @@ -0,0 +1,35 @@ +from models_library.rabbitmq_basic_types import RPCNamespace + +from ._client import RabbitMQClient +from ._client_rpc import RabbitMQRPCClient, rabbitmq_rpc_client_context +from ._constants import BIND_TO_ALL_TOPICS, RPC_REQUEST_DEFAULT_TIMEOUT_S +from ._errors import ( + RemoteMethodNotRegisteredError, + RPCInterfaceError, + RPCNotInitializedError, + RPCServerError, +) +from ._models import ConsumerTag, ExchangeName, QueueName +from ._rpc_router import RPCRouter +from ._utils import is_rabbitmq_responsive, wait_till_rabbitmq_responsive + +__all__: tuple[str, ...] = ( + "BIND_TO_ALL_TOPICS", + "RPC_REQUEST_DEFAULT_TIMEOUT_S", + "ConsumerTag", + "ExchangeName", + "QueueName", + "RPCInterfaceError", + "RPCNamespace", + "RPCNotInitializedError", + "RPCRouter", + "RPCServerError", + "RabbitMQClient", + "RabbitMQRPCClient", + "RemoteMethodNotRegisteredError", + "is_rabbitmq_responsive", + "rabbitmq_rpc_client_context", + "wait_till_rabbitmq_responsive", +) + +# nopycln: file diff --git a/packages/service-library/src/servicelib/rabbitmq/_client.py b/packages/service-library/src/servicelib/rabbitmq/_client.py new file mode 100644 index 00000000000..ccf1445c231 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_client.py @@ -0,0 +1,375 @@ +import asyncio +import logging +from dataclasses import dataclass, field +from functools import partial +from typing import Final +from uuid import uuid4 + +import aio_pika +from pydantic import NonNegativeInt + +from ..logging_utils import log_catch, log_context +from ._client_base import RabbitMQClientBase +from ._models import ( + ConsumerTag, + ExchangeName, + MessageHandler, + QueueName, + RabbitMessage, + TopicName, +) +from ._utils import ( + RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_MS, + declare_queue, + get_rabbitmq_client_unique_name, +) + +_logger = logging.getLogger(__name__) + + +_DEFAULT_PREFETCH_VALUE: Final[int] = 10 +_DEFAULT_RABBITMQ_EXECUTION_TIMEOUT_S: Final[int] = 5 +_HEADER_X_DEATH: Final[str] = "x-death" + +_DEFAULT_UNEXPECTED_ERROR_RETRY_DELAY_S: Final[float] = 1 +_DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS: Final[NonNegativeInt] = 15 + +_DELAYED_EXCHANGE_NAME: Final[ExchangeName] = ExchangeName("delayed_{exchange_name}") +_DELAYED_QUEUE_NAME: Final[ExchangeName] = ExchangeName("delayed_{queue_name}") + + +def _get_x_death_count(message: aio_pika.abc.AbstractIncomingMessage) -> int: + count: int = 0 + if (x_death := message.headers.get(_HEADER_X_DEATH, [])) and ( + isinstance(x_death, list) + and x_death + and isinstance(x_death[0], dict) + and "count" in x_death[0] + ): + + assert isinstance(x_death[0]["count"], int) # nosec + count = x_death[0]["count"] + + return count + + +async def _safe_nack( + message_handler: MessageHandler, + max_retries_upon_error: int, + message: aio_pika.abc.AbstractIncomingMessage, +) -> None: + count = _get_x_death_count(message) + if count < max_retries_upon_error: + _logger.warning( + ( + "Retry [%s/%s] for handler '%s', which raised " + "an unexpected error caused by message_id='%s'" + ), + count, + max_retries_upon_error, + message_handler, + message.message_id, + ) + # NOTE: puts message to the Dead Letter Exchange + await message.nack(requeue=False) + else: + _logger.exception( + "Handler '%s' is giving up on message '%s' with body '%s'", + message_handler, + message, + message.body, + ) + + +async def _on_message( + message_handler: MessageHandler, + max_retries_upon_error: int, + message: aio_pika.abc.AbstractIncomingMessage, +) -> None: + async with message.process(requeue=True, ignore_processed=True): + try: + with log_context( + _logger, + logging.DEBUG, + msg=f"Received message from {message.exchange=}, {message.routing_key=}", + ): + if not await message_handler(message.body): + await _safe_nack(message_handler, max_retries_upon_error, message) + except Exception: # pylint: disable=broad-exception-caught + _logger.exception("Exception raised when handling message") + with log_catch(_logger, reraise=False): + await _safe_nack(message_handler, max_retries_upon_error, message) + + +@dataclass +class RabbitMQClient(RabbitMQClientBase): + _connection_pool: aio_pika.pool.Pool | None = field(init=False, default=None) + _channel_pool: aio_pika.pool.Pool | None = field(init=False, default=None) + + def __post_init__(self) -> None: + # recommendations are 1 connection per process + self._connection_pool = aio_pika.pool.Pool( + self._get_connection, self.settings.dsn, self.client_name, max_size=1 + ) + # channels are not thread safe, what about python? + self._channel_pool = aio_pika.pool.Pool(self._get_channel, max_size=10) + + async def _get_connection( + self, rabbit_broker: str, connection_name: str + ) -> aio_pika.abc.AbstractRobustConnection: + # NOTE: to show the connection name in the rabbitMQ UI see there + # https://www.bountysource.com/issues/89342433-setting-custom-connection-name-via-client_properties-doesn-t-work-when-connecting-using-an-amqp-url + # + url = f"{rabbit_broker}?name={get_rabbitmq_client_unique_name(connection_name)}&heartbeat={self.heartbeat}" + connection = await aio_pika.connect_robust( + url, + client_properties={"connection_name": connection_name}, + timeout=_DEFAULT_RABBITMQ_EXECUTION_TIMEOUT_S, + ) + connection.close_callbacks.add(self._connection_close_callback) + return connection + + async def close(self) -> None: + with log_context( + _logger, + logging.INFO, + msg=f"{self.client_name} closing connection to RabbitMQ", + ): + assert self._channel_pool # nosec + await self._channel_pool.close() + assert self._connection_pool # nosec + await self._connection_pool.close() + + async def _get_channel(self) -> aio_pika.abc.AbstractChannel: + assert self._connection_pool # nosec + async with self._connection_pool.acquire() as connection: + channel: aio_pika.abc.AbstractChannel = await connection.channel() + channel.close_callbacks.add(self._channel_close_callback) + return channel + + async def _create_consumer_tag(self, exchange_name) -> ConsumerTag: + return ConsumerTag( + f"{get_rabbitmq_client_unique_name(self.client_name)}_{exchange_name}_{uuid4()}" + ) + + async def subscribe( + self, + exchange_name: ExchangeName, + message_handler: MessageHandler, + *, + exclusive_queue: bool = True, + non_exclusive_queue_name: str | None = None, + topics: list[str] | None = None, + message_ttl: NonNegativeInt = RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_MS, + unexpected_error_retry_delay_s: float = _DEFAULT_UNEXPECTED_ERROR_RETRY_DELAY_S, + unexpected_error_max_attempts: int = _DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS, + ) -> tuple[QueueName, ConsumerTag]: + """subscribe to exchange_name calling ``message_handler`` for every incoming message + - exclusive_queue: True means that every instance of this application will + receive the incoming messages + - exclusive_queue: False means that only one instance of this application will + reveice the incoming message + - non_exclusive_queue_name: if exclusive_queue is False, then this name will be used. If None + it will use the exchange_name. + + NOTE: ``message_ttl` is also a soft timeout: if the handler does not finish processing + the message before this is reached the message will be redelivered! + + specifying a topic will make the client declare a TOPIC type of RabbitMQ Exchange + instead of FANOUT + - a FANOUT exchange transmit messages to any connected queue regardless of + the routing key + - a TOPIC exchange transmit messages to any connected queue provided it is + bound with the message routing key + - topic = BIND_TO_ALL_TOPICS ("#") is equivalent to the FANOUT effect + - a queue bound with topic "director-v2.*" will receive any message that + uses a routing key such as "director-v2.event.service_started" + - a queue bound with topic "director-v2.event.specific_event" will only + receive messages with that exact routing key (same as DIRECT exchanges behavior) + + ``unexpected_error_max_attempts`` is the maximum amount of retries when the ``message_handler`` + raised an unexpected error or it returns `False` + ``unexpected_error_retry_delay_s`` time to wait between each retry when the ``message_handler`` + raised an unexpected error or it returns `False` + + Raises: + aio_pika.exceptions.ChannelPreconditionFailed: In case an existing exchange with + different type is used + Returns: + tuple of queue name and consumer tag mapping + """ + + assert self._channel_pool # nosec + async with self._channel_pool.acquire() as channel: + qos_value = 1 if exclusive_queue is False else _DEFAULT_PREFETCH_VALUE + await channel.set_qos(qos_value) + + exchange = await channel.declare_exchange( + exchange_name, + ( + aio_pika.ExchangeType.FANOUT + if topics is None + else aio_pika.ExchangeType.TOPIC + ), + durable=True, + timeout=_DEFAULT_RABBITMQ_EXECUTION_TIMEOUT_S, + ) + + # NOTE: durable=True makes the queue persistent between RabbitMQ restarts/crashes + # consumer/publisher must set the same configuration for same queue + # exclusive means that the queue is only available for THIS very client + # and will be deleted when the client disconnects + # NOTE what is a dead letter exchange, see https://www.rabbitmq.com/dlx.html + delayed_exchange_name = _DELAYED_EXCHANGE_NAME.format( + exchange_name=exchange_name + ) + queue = await declare_queue( + channel, + self.client_name, + non_exclusive_queue_name or exchange_name, + exclusive_queue=exclusive_queue, + message_ttl=message_ttl, + arguments={"x-dead-letter-exchange": delayed_exchange_name}, + ) + if topics is None: + await queue.bind(exchange, routing_key="") + else: + await asyncio.gather( + *(queue.bind(exchange, routing_key=topic) for topic in topics) + ) + + delayed_exchange = await channel.declare_exchange( + delayed_exchange_name, aio_pika.ExchangeType.FANOUT, durable=True + ) + delayed_queue_name = _DELAYED_QUEUE_NAME.format( + queue_name=non_exclusive_queue_name or exchange_name + ) + + delayed_queue = await declare_queue( + channel, + self.client_name, + delayed_queue_name, + exclusive_queue=exclusive_queue, + message_ttl=int(unexpected_error_retry_delay_s * 1000), + arguments={"x-dead-letter-exchange": exchange.name}, + ) + await delayed_queue.bind(delayed_exchange) + + consumer_tag = await self._create_consumer_tag(exchange_name) + await queue.consume( + partial(_on_message, message_handler, unexpected_error_max_attempts), + exclusive=exclusive_queue, + consumer_tag=consumer_tag, + ) + return queue.name, consumer_tag + + async def add_topics( + self, + exchange_name: ExchangeName, + *, + topics: list[TopicName], + ) -> None: + assert self._channel_pool # nosec + + async with self._channel_pool.acquire() as channel: + exchange = await channel.get_exchange(exchange_name) + queue = await declare_queue( + channel, + self.client_name, + exchange_name, + exclusive_queue=True, + arguments={ + "x-dead-letter-exchange": _DELAYED_EXCHANGE_NAME.format( + exchange_name=exchange_name + ) + }, + ) + + await asyncio.gather( + *(queue.bind(exchange, routing_key=topic) for topic in topics) + ) + + async def remove_topics( + self, + exchange_name: ExchangeName, + *, + topics: list[TopicName], + ) -> None: + assert self._channel_pool # nosec + async with self._channel_pool.acquire() as channel: + exchange = await channel.get_exchange(exchange_name) + queue = await declare_queue( + channel, + self.client_name, + exchange_name, + exclusive_queue=True, + arguments={ + "x-dead-letter-exchange": _DELAYED_EXCHANGE_NAME.format( + exchange_name=exchange_name + ) + }, + ) + + await asyncio.gather( + *(queue.unbind(exchange, routing_key=topic) for topic in topics), + ) + + async def unsubscribe( + self, + queue_name: QueueName, + ) -> None: + """This will delete the queue if there are no consumers left""" + assert self._connection_pool # nosec + if self._connection_pool.is_closed: + _logger.warning( + "Connection to RabbitMQ is already closed, skipping unsubscribe from queue..." + ) + return + assert self._channel_pool # nosec + async with self._channel_pool.acquire() as channel: + queue = await channel.get_queue(queue_name) + # NOTE: we force delete here + await queue.delete(if_unused=False, if_empty=False) + + async def publish( + self, exchange_name: ExchangeName, message: RabbitMessage + ) -> None: + """publish message in the exchange exchange_name. + specifying a topic will use a TOPIC type of RabbitMQ Exchange instead of FANOUT + + NOTE: changing the type of Exchange will create issues if the name is not changed! + """ + assert self._channel_pool # nosec + topic = message.routing_key() + + async with self._channel_pool.acquire() as channel: + exchange = await channel.declare_exchange( + exchange_name, + ( + aio_pika.ExchangeType.FANOUT + if topic is None + else aio_pika.ExchangeType.TOPIC + ), + durable=True, + timeout=_DEFAULT_RABBITMQ_EXECUTION_TIMEOUT_S, + ) + await exchange.publish( + aio_pika.Message(message.body()), + routing_key=message.routing_key() or "", + ) + + async def unsubscribe_consumer( + self, queue_name: QueueName, consumer_tag: ConsumerTag + ) -> None: + """This will only remove the consumers without deleting the queue""" + assert self._connection_pool # nosec + if self._connection_pool.is_closed: + _logger.warning( + "Connection to RabbitMQ is already closed, skipping unsubscribe consumers from queue..." + ) + return + assert self._channel_pool # nosec + async with self._channel_pool.acquire() as channel: + assert isinstance(channel, aio_pika.RobustChannel) # nosec + queue = await channel.get_queue(queue_name) + await queue.cancel(consumer_tag) diff --git a/packages/service-library/src/servicelib/rabbitmq/_client_base.py b/packages/service-library/src/servicelib/rabbitmq/_client_base.py new file mode 100644 index 00000000000..69720659e50 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_client_base.py @@ -0,0 +1,73 @@ +import asyncio +import logging +from abc import abstractmethod +from dataclasses import dataclass +from typing import Any, Final + +import aio_pika +import aiormq +from servicelib.logging_utils import log_catch +from settings_library.rabbit import RabbitSettings + +_DEFAULT_RABBITMQ_SERVER_HEARTBEAT_S: Final[int] = 60 + +_logger = logging.getLogger(__name__) + + +@dataclass +class RabbitMQClientBase: + client_name: str + settings: RabbitSettings + heartbeat: int = _DEFAULT_RABBITMQ_SERVER_HEARTBEAT_S + + _healthy_state: bool = True + + def _connection_close_callback( + self, + sender: Any, # pylint: disable=unused-argument + exc: BaseException | None, + ) -> None: + if exc: + if isinstance(exc, asyncio.CancelledError): + _logger.info("Rabbit connection cancelled") + elif isinstance(exc, aiormq.exceptions.ConnectionClosed): + _logger.info("Rabbit connection closed: %s", exc) + else: + _logger.error( + "Rabbit connection closed with exception from %s:%s", + type(exc), + exc, + ) + self._healthy_state = False + + def _channel_close_callback( + self, + sender: Any, # pylint: disable=unused-argument # noqa: ARG002 + exc: BaseException | None, + ) -> None: + if exc: + if isinstance(exc, asyncio.CancelledError): + _logger.info("Rabbit channel cancelled") + elif isinstance(exc, aiormq.exceptions.ChannelClosed): + _logger.info("Rabbit channel closed") + else: + _logger.error( + "Rabbit channel closed with exception from %s:%s", + type(exc), + exc, + ) + self._healthy_state = False + + @property + def healthy(self) -> bool: + return self._healthy_state + + async def ping(self) -> bool: + with log_catch(_logger, reraise=False): + async with await aio_pika.connect(self.settings.dsn, timeout=1): + ... + return True + return False + + @abstractmethod + async def close(self) -> None: ... diff --git a/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py b/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py new file mode 100644 index 00000000000..53d9f132658 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py @@ -0,0 +1,175 @@ +import asyncio +import functools +import logging +from collections.abc import AsyncIterator, Callable +from contextlib import asynccontextmanager +from dataclasses import dataclass +from typing import Any + +import aio_pika +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from pydantic import PositiveInt +from settings_library.rabbit import RabbitSettings + +from ..logging_utils import log_context +from ._client_base import RabbitMQClientBase +from ._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S +from ._errors import RemoteMethodNotRegisteredError, RPCNotInitializedError +from ._models import RPCNamespacedMethodName +from ._rpc_router import RPCRouter +from ._utils import get_rabbitmq_client_unique_name + +_logger = logging.getLogger(__name__) + + +@dataclass +class RabbitMQRPCClient(RabbitMQClientBase): + _connection: aio_pika.abc.AbstractConnection | None = None + _channel: aio_pika.abc.AbstractChannel | None = None + _rpc: aio_pika.patterns.RPC | None = None + + @classmethod + async def create( + cls, *, client_name: str, settings: RabbitSettings, **kwargs + ) -> "RabbitMQRPCClient": + client = cls(client_name=client_name, settings=settings, **kwargs) + await client._rpc_initialize() + return client + + async def _rpc_initialize(self) -> None: + # NOTE: to show the connection name in the rabbitMQ UI see there + # https://www.bountysource.com/issues/89342433-setting-custom-connection-name-via-client_properties-doesn-t-work-when-connecting-using-an-amqp-url + # + connection_name = f"{get_rabbitmq_client_unique_name(self.client_name)}.rpc" + url = f"{self.settings.dsn}?name={connection_name}" + self._connection = await aio_pika.connect_robust( + url, + client_properties={"connection_name": connection_name}, + ) + self._channel = await self._connection.channel() + + self._rpc = aio_pika.patterns.RPC(self._channel) + await self._rpc.initialize() + + async def close(self) -> None: + with log_context( + _logger, + logging.INFO, + msg=f"{self.client_name} closing connection to RabbitMQ", + ): + # rpc is not always initialized + if self._rpc is not None: + await self._rpc.close() + if self._channel is not None: + await self._channel.close() + if self._connection is not None: + await self._connection.close() + + async def request( + self, + namespace: RPCNamespace, + method_name: RPCMethodName, + *, + timeout_s: PositiveInt | None = RPC_REQUEST_DEFAULT_TIMEOUT_S, + **kwargs, + ) -> Any: + """ + Call a remote registered `handler` by providing it's `namespace`, `method_name` + and `kwargs` containing the key value arguments expected by the remote `handler`. + + :raises asyncio.TimeoutError: when message expired + :raises CancelledError: when called :func:`RPC.cancel` + :raises RuntimeError: internal error + :raises RemoteMethodNotRegisteredError: when no handler was registered to the + `namespaced_method_name` + """ + + if not self._rpc: + raise RPCNotInitializedError + + namespaced_method_name = RPCNamespacedMethodName.from_namespace_and_method( + namespace, method_name + ) + try: + queue_expiration_timeout = timeout_s + awaitable = self._rpc.call( + namespaced_method_name, + expiration=queue_expiration_timeout, + kwargs=kwargs, + ) + return await asyncio.wait_for(awaitable, timeout=timeout_s) + except aio_pika.MessageProcessError as e: + if e.args[0] == "Message has been returned": + raise RemoteMethodNotRegisteredError( + method_name=namespaced_method_name, incoming_message=e.args[1] + ) from e + raise + except ModuleNotFoundError as err: + # SEE https://github.com/ITISFoundation/osparc-simcore/blob/b1aee64ae207a6ed3e965ff7869c74a312109de7/services/catalog/src/simcore_service_catalog/api/rpc/_services.py#L41-L46 + err.msg += ( + "\nTIP: All i/o rpc parameters MUST be shared by client and server sides. " + "Careful with Generics instanciated on the server side. " + "Use instead a TypeAlias in a common library." + ) + raise + + async def register_handler( + self, + namespace: RPCNamespace, + method_name: RPCMethodName, + handler: Callable[..., Any], + ) -> None: + """ + Bind a local `handler` to a `namespace` and `method_name`. + The handler can be remotely called by providing the `namespace` and `method_name` + + NOTE: method_name could be computed from the handler, but by design, it is + left to the caller to do so. + """ + + if self._rpc is None: + raise RPCNotInitializedError + + await self._rpc.register( + RPCNamespacedMethodName.from_namespace_and_method(namespace, method_name), + handler, + auto_delete=True, + ) + + async def register_router( + self, + router: RPCRouter, + namespace: RPCNamespace, + *handler_args, + **handler_kwargs, + ) -> None: + for rpc_method_name, handler in router.routes.items(): + await self.register_handler( + namespace, + rpc_method_name, + functools.partial(handler, *handler_args, **handler_kwargs), + ) + + async def unregister_handler(self, handler: Callable[..., Any]) -> None: + """Unbind a locally added `handler`""" + + if self._rpc is None: + raise RPCNotInitializedError + + await self._rpc.unregister(handler) + + +@asynccontextmanager +async def rabbitmq_rpc_client_context( + rpc_client_name: str, settings: RabbitSettings, **kwargs +) -> AsyncIterator[RabbitMQRPCClient]: + """ + Adapter to create and close a RabbitMQRPCClient using an async context manager. + """ + rpc_client = await RabbitMQRPCClient.create( + client_name=rpc_client_name, settings=settings, **kwargs + ) + try: + yield rpc_client + finally: + await rpc_client.close() diff --git a/packages/service-library/src/servicelib/rabbitmq/_constants.py b/packages/service-library/src/servicelib/rabbitmq/_constants.py new file mode 100644 index 00000000000..765ebee6e47 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_constants.py @@ -0,0 +1,7 @@ +from typing import Final + +from pydantic import PositiveInt + +BIND_TO_ALL_TOPICS: Final[str] = "#" +RPC_REQUEST_DEFAULT_TIMEOUT_S: Final[PositiveInt] = PositiveInt(5) +RPC_REMOTE_METHOD_TIMEOUT_S: Final[int] = 30 diff --git a/packages/service-library/src/servicelib/rabbitmq/_errors.py b/packages/service-library/src/servicelib/rabbitmq/_errors.py new file mode 100644 index 00000000000..ce58b62fd5c --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_errors.py @@ -0,0 +1,48 @@ +from typing import Final + +from common_library.errors_classes import OsparcErrorMixin + +_ERROR_PREFIX: Final[str] = "rabbitmq_error" + + +class BaseRPCError(OsparcErrorMixin, RuntimeError): ... + + +class RPCNotInitializedError(BaseRPCError): + code = f"{_ERROR_PREFIX}.not_started" # type: ignore[assignment] + msg_template = "Please check that the RabbitMQ RPC backend was initialized!" + + +class RemoteMethodNotRegisteredError(BaseRPCError): + code = f"{_ERROR_PREFIX}.remote_not_registered" # type: ignore[assignment] + msg_template = ( + "Could not find a remote method named: '{method_name}'. " + "Message from remote server was returned: {incoming_message}. " + ) + + +class RPCServerError(BaseRPCError): + msg_template = ( + "While running method '{method_name}' raised " + "'{exc_type}': '{exc_message}'\n{traceback}" + ) + + +class RPCInterfaceError(RPCServerError): + """ + Base class for RPC interface exceptions. + + Avoid using domain exceptions directly; if a one-to-one mapping is required, + prefer using the `from_domain_error` transformation function. + """ + + msg_template = "{domain_error_message} [{domain_error_code}]" + + @classmethod + def from_domain_error(cls, err: OsparcErrorMixin): + domain_err_ctx = err.error_context() + return cls( + domain_error_message=domain_err_ctx.pop("message"), + domain_error_code=domain_err_ctx.pop("code"), + **domain_err_ctx, # same context as domain + ) diff --git a/packages/service-library/src/servicelib/rabbitmq/_models.py b/packages/service-library/src/servicelib/rabbitmq/_models.py new file mode 100644 index 00000000000..cd674e526ff --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_models.py @@ -0,0 +1,36 @@ +from collections.abc import Awaitable, Callable +from typing import Any, Protocol, TypeAlias + +from models_library.basic_types import ConstrainedStr +from models_library.rabbitmq_basic_types import ( + REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS, + RPCMethodName, + RPCNamespace, +) +from pydantic import TypeAdapter + +MessageHandler = Callable[[Any], Awaitable[bool]] + +ExchangeName: TypeAlias = str +QueueName: TypeAlias = str +ConsumerTag: TypeAlias = str +TopicName: TypeAlias = str + + +class RabbitMessage(Protocol): + def body(self) -> bytes: ... + + def routing_key(self) -> str | None: ... + + +class RPCNamespacedMethodName(ConstrainedStr): + min_length: int = 1 + max_length: int = 255 + pattern: str = REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS + + @classmethod + def from_namespace_and_method( + cls, namespace: RPCNamespace, method_name: RPCMethodName + ) -> "RPCNamespacedMethodName": + namespaced_method_name = f"{namespace}.{method_name}" + return TypeAdapter(cls).validate_python(namespaced_method_name) diff --git a/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py b/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py new file mode 100644 index 00000000000..49cab08f79b --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_rpc_router.py @@ -0,0 +1,85 @@ +import asyncio +import functools +import logging +import traceback +from collections.abc import Callable +from dataclasses import dataclass, field +from typing import Any, TypeVar + +from models_library.rabbitmq_basic_types import RPCMethodName + +from ..logging_utils import log_context +from ._errors import RPCServerError + +DecoratedCallable = TypeVar("DecoratedCallable", bound=Callable[..., Any]) + +# NOTE: this is equivalent to http access logs +_logger = logging.getLogger("rpc.access") + + +def _create_func_msg(func, args: tuple[Any, ...], kwargs: dict[str, Any]) -> str: + msg = f"{func.__name__}(" + + if args_msg := ", ".join(map(str, args)): + msg += args_msg + + if kwargs_msg := ", ".join({f"{name}={value}" for name, value in kwargs.items()}): + if args: + msg += ", " + msg += kwargs_msg + + return f"{msg})" + + +@dataclass +class RPCRouter: + routes: dict[RPCMethodName, Callable] = field(default_factory=dict) + + def expose( + self, + *, + reraise_if_error_type: tuple[type[Exception], ...] | None = None, + ) -> Callable[[DecoratedCallable], DecoratedCallable]: + def _decorator(func: DecoratedCallable) -> DecoratedCallable: + @functools.wraps(func) + async def _wrapper(*args, **kwargs): + with log_context( + # NOTE: this is intentionally analogous to the http access log traces. + # To change log-level use getLogger("rpc.access").set_level(...) + _logger, + logging.INFO, + msg=f"RPC call {_create_func_msg(func, args, kwargs)}", + log_duration=True, + ): + try: + return await func(*args, **kwargs) + + except asyncio.CancelledError: + _logger.debug("Call %s was cancelled", func.__name__) + raise + + except Exception as exc: # pylint: disable=broad-except + if reraise_if_error_type and isinstance( + exc, reraise_if_error_type + ): + raise + + _logger.exception( + "Unhandled exception on the rpc-server side." + " Re-raising as RPCServerError." + ) + # NOTE: we do not return internal exceptions over RPC + formatted_traceback = "\n".join( + traceback.format_tb(exc.__traceback__) + ) + raise RPCServerError( + method_name=func.__name__, + exc_type=f"{exc.__class__.__module__}.{exc.__class__.__name__}", + exc_message=f"{exc}", + traceback=f"{formatted_traceback}", + ) from None + + self.routes[RPCMethodName(func.__name__)] = _wrapper + return func + + return _decorator diff --git a/packages/service-library/src/servicelib/rabbitmq/_utils.py b/packages/service-library/src/servicelib/rabbitmq/_utils.py new file mode 100644 index 00000000000..404adb1b652 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/_utils.py @@ -0,0 +1,107 @@ +import logging +import os +import socket +from typing import Any, Final + +import aio_pika +import psutil +from aiormq.exceptions import ChannelPreconditionFailed +from pydantic import NonNegativeInt +from tenacity import retry +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from ..logging_utils import log_context +from ._models import QueueName + +_logger = logging.getLogger(__file__) + + +_MINUTE: Final[int] = 60 + +RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_MS: Final[int] = 15 * _MINUTE * 1000 + + +class RabbitMQRetryPolicyUponInitialization: + """Retry policy upon service initialization""" + + def __init__(self, logger: logging.Logger | None = None) -> None: + logger = logger or _logger + + self.kwargs: dict[str, Any] = { + "wait": wait_fixed(2), + "stop": stop_after_delay(3 * _MINUTE), + "before_sleep": before_sleep_log(logger, logging.WARNING), + "reraise": True, + } + + +async def is_rabbitmq_responsive(url: str) -> bool: + """True if responsive or raises an error""" + with log_context( + _logger, logging.INFO, msg=f"checking RabbitMQ connection at {url=}" + ): + async with await aio_pika.connect(url): + _logger.info("rabbitmq connection established") + return True + + +@retry(**RabbitMQRetryPolicyUponInitialization().kwargs) +async def wait_till_rabbitmq_responsive(url: str) -> bool: + """waits for rabbitmq to become responsive""" + return await is_rabbitmq_responsive(url) + + +def get_rabbitmq_client_unique_name(base_name: str) -> str: + # NOTE: The prefix below will change every time the process restarts. + # Why is this necessary? + # 1. The codebase relies on this behavior; without it, subscribers and consumers will fail. + # 2. It allows the web server to be restarted seamlessly during [re]deployments. + prefix_create_time = f"{psutil.Process(os.getpid()).create_time()}".strip(".")[-6:] + + return f"{base_name}_{socket.gethostname()}_{prefix_create_time}" + + +async def declare_queue( + channel: aio_pika.RobustChannel, + client_name: str, + queue_name: QueueName, + *, + exclusive_queue: bool, + arguments: dict[str, Any] | None = None, + message_ttl: NonNegativeInt = RABBIT_QUEUE_MESSAGE_DEFAULT_TTL_MS, +) -> aio_pika.abc.AbstractRobustQueue: + default_arguments = {"x-message-ttl": message_ttl} + if arguments is not None: + default_arguments.update(arguments) + queue_parameters: dict[str, Any] = { + "durable": True, + "exclusive": exclusive_queue, + "arguments": default_arguments, + "name": f"{get_rabbitmq_client_unique_name(client_name)}_{queue_name}_exclusive", + } + if not exclusive_queue: + # NOTE: setting a name will ensure multiple instance will take their data here + queue_parameters |= {"name": queue_name} + + # NOTE: if below line raises something similar to ``ChannelPreconditionFailed: PRECONDITION_FAILED`` + # most likely someone changed the signature of the queues (parameters etc...) + # Safest way to deal with it: + # 1. check whether there are any messages for the existing queue in rabbitmq + # 2. NO messages -> delete queue + # 3. Found messages: + # - save messages + # - delete queue + # - restore messages + # Why is this the safest, with an example? + # 1. a user bought 1000$ of credits + # 2. for some reason resource usage tracker is unavailable and the messages is stuck in the queue + # 3. if the queue is deleted, the action relative to this transaction will be lost + try: + return await channel.declare_queue(**queue_parameters) + except ChannelPreconditionFailed: + _logger.exception( + "Most likely the rabbit queue parameters have changed. See notes above to fix!" + ) + raise diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/__init__.py similarity index 100% rename from services/osparc-gateway-server/src/osparc_gateway_server/backend/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/__init__.py diff --git a/services/storage/src/simcore_service_storage/datcore_adapter/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/__init__.py similarity index 100% rename from services/storage/src/simcore_service_storage/datcore_adapter/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py new file mode 100644 index 00000000000..2049f0a409f --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/containers.py @@ -0,0 +1,37 @@ +import logging +from datetime import timedelta +from typing import Final + +from models_library.docker import DockerNodeID +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from pydantic import NonNegativeInt, TypeAdapter +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + +_REQUEST_TIMEOUT: Final[NonNegativeInt] = int(timedelta(minutes=60).total_seconds()) + + +@log_decorator(_logger, level=logging.DEBUG) +async def force_container_cleanup( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + docker_node_id: DockerNodeID, + swarm_stack_name: str, + node_id: NodeID, +) -> None: + result = await rabbitmq_rpc_client.request( + RPCNamespace.from_entries( + { + "service": "agent", + "docker_node_id": docker_node_id, + "swarm_stack_name": swarm_stack_name, + } + ), + TypeAdapter(RPCMethodName).validate_python("force_container_cleanup"), + node_id=node_id, + timeout_s=_REQUEST_TIMEOUT, + ) + assert result is None # nosec diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/errors.py new file mode 100644 index 00000000000..b297004e283 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/errors.py @@ -0,0 +1,12 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class BaseAgentRPCError(OsparcErrorMixin, Exception): + ... + + +class NoServiceVolumesFoundRPCError(BaseAgentRPCError): + msg_template: str = ( + "Could not detect any unused volumes after waiting '{period}' seconds for " + "volumes to be released after closing all container for service='{node_id}'" + ) diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py new file mode 100644 index 00000000000..41cf2ffd8b8 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/agent/volumes.py @@ -0,0 +1,62 @@ +import logging +from datetime import timedelta +from typing import Final + +from models_library.docker import DockerNodeID +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from pydantic import NonNegativeInt, TypeAdapter +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + +_REQUEST_TIMEOUT: Final[NonNegativeInt] = int(timedelta(minutes=60).total_seconds()) + + +@log_decorator(_logger, level=logging.DEBUG) +async def remove_volumes_without_backup_for_service( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + docker_node_id: DockerNodeID, + swarm_stack_name: str, + node_id: NodeID, +) -> None: + result = await rabbitmq_rpc_client.request( + RPCNamespace.from_entries( + { + "service": "agent", + "docker_node_id": docker_node_id, + "swarm_stack_name": swarm_stack_name, + } + ), + TypeAdapter(RPCMethodName).validate_python( + "remove_volumes_without_backup_for_service" + ), + node_id=node_id, + timeout_s=_REQUEST_TIMEOUT, + ) + assert result is None # nosec + + +@log_decorator(_logger, level=logging.DEBUG) +async def backup_and_remove_volumes_for_all_services( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + docker_node_id: DockerNodeID, + swarm_stack_name: str, +) -> None: + result = await rabbitmq_rpc_client.request( + RPCNamespace.from_entries( + { + "service": "agent", + "docker_node_id": docker_node_id, + "swarm_stack_name": swarm_stack_name, + } + ), + TypeAdapter(RPCMethodName).validate_python( + "backup_and_remove_volumes_for_all_services" + ), + timeout_s=_REQUEST_TIMEOUT, + ) + assert result is None # nosec diff --git a/services/storage/tests/fixtures/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/__init__.py similarity index 100% rename from services/storage/tests/fixtures/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py new file mode 100644 index 00000000000..f6e1954c936 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/async_jobs/async_jobs.py @@ -0,0 +1,275 @@ +import datetime +import logging +from asyncio import CancelledError +from collections.abc import AsyncGenerator, Awaitable +from typing import Any, Final + +from attr import dataclass +from models_library.api_schemas_rpc_async_jobs.async_jobs import ( + AsyncJobGet, + AsyncJobId, + AsyncJobNameData, + AsyncJobResult, + AsyncJobStatus, +) +from models_library.api_schemas_rpc_async_jobs.exceptions import JobMissingError +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from pydantic import NonNegativeInt, TypeAdapter +from tenacity import ( + AsyncRetrying, + TryAgain, + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, + stop_after_delay, + wait_fixed, + wait_random_exponential, +) + +from ....rabbitmq import RemoteMethodNotRegisteredError +from ... import RabbitMQRPCClient + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 30 +_DEFAULT_POLL_INTERVAL_S: Final[float] = 0.1 + +_logger = logging.getLogger(__name__) + + +async def cancel( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + job_id: AsyncJobId, + job_id_data: AsyncJobNameData, +) -> None: + await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("cancel"), + job_id=job_id, + job_id_data=job_id_data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + + +async def status( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + job_id: AsyncJobId, + job_id_data: AsyncJobNameData, +) -> AsyncJobStatus: + _result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("status"), + job_id=job_id, + job_id_data=job_id_data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(_result, AsyncJobStatus) + return _result + + +async def result( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + job_id: AsyncJobId, + job_id_data: AsyncJobNameData, +) -> AsyncJobResult: + _result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("result"), + job_id=job_id, + job_id_data=job_id_data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(_result, AsyncJobResult) + return _result + + +async def list_jobs( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + filter_: str, + job_id_data: AsyncJobNameData, +) -> list[AsyncJobGet]: + _result: list[AsyncJobGet] = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("list_jobs"), + filter_=filter_, + job_id_data=job_id_data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + return _result + + +async def submit( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + method_name: str, + job_id_data: AsyncJobNameData, + **kwargs, +) -> AsyncJobGet: + _result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python(method_name), + job_id_data=job_id_data, + **kwargs, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(_result, AsyncJobGet) # nosec + return _result + + +_DEFAULT_RPC_RETRY_POLICY: dict[str, Any] = { + "retry": retry_if_exception_type((RemoteMethodNotRegisteredError,)), + "wait": wait_random_exponential(max=20), + "stop": stop_after_attempt(30), + "reraise": True, + "before_sleep": before_sleep_log(_logger, logging.WARNING), +} + + +@retry(**_DEFAULT_RPC_RETRY_POLICY) +async def _wait_for_completion( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + method_name: RPCMethodName, + job_id: AsyncJobId, + job_id_data: AsyncJobNameData, + client_timeout: datetime.timedelta, +) -> AsyncGenerator[AsyncJobStatus, None]: + try: + async for attempt in AsyncRetrying( + stop=stop_after_delay(client_timeout.total_seconds()), + reraise=True, + retry=retry_if_exception_type((TryAgain, JobMissingError)), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + wait=wait_fixed(_DEFAULT_POLL_INTERVAL_S), + ): + with attempt: + job_status = await status( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + job_id=job_id, + job_id_data=job_id_data, + ) + yield job_status + if not job_status.done: + msg = f"{job_status.job_id=}: '{job_status.progress=}'" + raise TryAgain(msg) # noqa: TRY301 + + except TryAgain as exc: + # this is a timeout + msg = f"Async job {job_id=}, calling to '{method_name}' timed-out after {client_timeout}" + raise TimeoutError(msg) from exc + + +@dataclass(frozen=True) +class AsyncJobComposedResult: + status: AsyncJobStatus + _result: Awaitable[Any] | None = None + + @property + def done(self) -> bool: + return self._result is not None + + async def result(self) -> Any: + if not self._result: + msg = "No result ready!" + raise ValueError(msg) + return await self._result + + +async def wait_and_get_result( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + method_name: str, + job_id: AsyncJobId, + job_id_data: AsyncJobNameData, + client_timeout: datetime.timedelta, +) -> AsyncGenerator[AsyncJobComposedResult, None]: + """when a job is already submitted this will wait for its completion + and return the composed result""" + try: + job_status = None + async for job_status in _wait_for_completion( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + method_name=method_name, + job_id=job_id, + job_id_data=job_id_data, + client_timeout=client_timeout, + ): + assert job_status is not None # nosec + yield AsyncJobComposedResult(job_status) + + # return the result + if job_status: + yield AsyncJobComposedResult( + job_status, + result( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + job_id=job_id, + job_id_data=job_id_data, + ), + ) + except (TimeoutError, CancelledError) as error: + try: + await cancel( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + job_id=job_id, + job_id_data=job_id_data, + ) + except Exception as exc: + raise exc from error # NOSONAR + raise + + +async def submit_and_wait( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + rpc_namespace: RPCNamespace, + method_name: str, + job_id_data: AsyncJobNameData, + client_timeout: datetime.timedelta, + **kwargs, +) -> AsyncGenerator[AsyncJobComposedResult, None]: + async_job_rpc_get = None + try: + async_job_rpc_get = await submit( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + method_name=method_name, + job_id_data=job_id_data, + **kwargs, + ) + except (TimeoutError, CancelledError) as error: + if async_job_rpc_get is not None: + try: + await cancel( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + job_id=async_job_rpc_get.job_id, + job_id_data=job_id_data, + ) + except Exception as exc: + raise exc from error + raise + + async for wait_and_ in wait_and_get_result( + rabbitmq_rpc_client, + rpc_namespace=rpc_namespace, + method_name=method_name, + job_id=async_job_rpc_get.job_id, + job_id_data=job_id_data, + client_timeout=client_timeout, + ): + yield wait_and_ diff --git a/services/storage/tests/helpers/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/__init__.py similarity index 100% rename from services/storage/tests/helpers/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/errors.py new file mode 100644 index 00000000000..906bc641665 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/errors.py @@ -0,0 +1,21 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class CatalogApiBaseError(OsparcErrorMixin, Exception): + pass + + +class CatalogInconsistentError(CatalogApiBaseError): + msg_template = "Catalog is inconsistent: The following services are in the database but missing in the registry manifest {missing_services}" + + +class CatalogItemNotFoundError(CatalogApiBaseError): + msg_template = "{name} was not found" + + +class CatalogForbiddenError(CatalogApiBaseError): + msg_template = "Insufficient access rights for {name}" + + +class CatalogNotAvailableError(CatalogApiBaseError): + msg_template = "Catalog service failed unexpectedly" diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/services.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/services.py new file mode 100644 index 00000000000..7ac19275fb6 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/catalog/services.py @@ -0,0 +1,308 @@ +"""RPC client-side for the RPC server at the payments service + +In this interface (and all belows), the context of the caller is passed in the following arguments: +- `user_id` is intended for the caller's identifer. Do not add other user_id that is not the callers!. + - Ideally this could be injected by an authentication layer (as in the rest API) + but for now we are passing it as an argument. +- `product_name` is the name of the product at the caller's context as well + +""" + +import logging +from typing import cast + +from models_library.api_schemas_catalog import CATALOG_RPC_NAMESPACE +from models_library.api_schemas_catalog.services import ( + LatestServiceGet, + MyServiceGet, + PageRpcLatestServiceGet, + PageRpcServiceRelease, + PageRpcServiceSummary, + ServiceGetV2, + ServiceListFilters, + ServiceRelease, + ServiceSummary, + ServiceUpdateV2, +) +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rest_pagination import PageOffsetInt +from models_library.rpc_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + PageLimitInt, + PageRpc, +) +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter, validate_call + +from ....logging_utils import log_decorator +from ..._client_rpc import RabbitMQRPCClient +from ..._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S + +_logger = logging.getLogger(__name__) + + +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_services_paginated( # pylint: disable=too-many-arguments + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcLatestServiceGet: + """ + Raises: + ValidationError: on invalid arguments + CatalogForbiddenError: no access-rights to list services + """ + + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_services_paginated"), + product_name=product_name, + user_id=user_id, + limit=limit, + offset=offset, + filters=filters, + timeout_s=40 * RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert ( # nosec + TypeAdapter(PageRpc[LatestServiceGet]).validate_python(result) is not None + ) + return cast(PageRpc[LatestServiceGet], result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +@log_decorator(_logger, level=logging.DEBUG) +async def get_service( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> ServiceGetV2: + """ + Raises: + ValidationError: on invalid arguments + CatalogItemNotFoundError: service not found in catalog + CatalogForbiddenError: not access rights to read this service + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_service"), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + timeout_s=4 * RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + assert TypeAdapter(ServiceGetV2).validate_python(result) is not None # nosec + return cast(ServiceGetV2, result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +@log_decorator(_logger, level=logging.DEBUG) +async def update_service( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + update: ServiceUpdateV2, +) -> ServiceGetV2: + """Updates editable fields of a service + + Raises: + ValidationError: on invalid arguments + CatalogItemNotFoundError: service not found in catalog + CatalogForbiddenError: not access rights to read this service + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("update_service"), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + update=update, + ) + assert TypeAdapter(ServiceGetV2).validate_python(result) is not None # nosec + return cast(ServiceGetV2, result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +@log_decorator(_logger, level=logging.DEBUG) +async def check_for_service( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> None: + """ + + Raises: + ValidationError: on invalid arguments + CatalogItemNotFoundError: service not found in catalog + CatalogForbiddenError: not access rights to read this service + """ + await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("check_for_service"), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + + +@validate_call(config={"arbitrary_types_allowed": True}) +@log_decorator(_logger, level=logging.DEBUG) +async def batch_get_my_services( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + ids: list[ + tuple[ + ServiceKey, + ServiceVersion, + ] + ], +) -> list[MyServiceGet]: + """ + Raises: + ValidationError: on invalid arguments + CatalogForbiddenError: no access-rights to list services + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("batch_get_my_services"), + product_name=product_name, + user_id=user_id, + ids=ids, + timeout_s=40 * RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + assert TypeAdapter(list[MyServiceGet]).validate_python(result) is not None # nosec + return cast(list[MyServiceGet], result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_my_service_history_latest_first( # pylint: disable=too-many-arguments + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcServiceRelease: + """ + Sorts service releases by version (latest first) + Raises: + ValidationError: on invalid arguments + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python( + "list_my_service_history_latest_first" + ), + product_name=product_name, + user_id=user_id, + service_key=service_key, + limit=limit, + offset=offset, + filters=filters, + ) + assert ( # nosec + TypeAdapter(PageRpcServiceRelease).validate_python(result) is not None + ) + return cast(PageRpc[ServiceRelease], result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +@log_decorator(_logger, level=logging.DEBUG) +async def get_service_ports( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[ServicePortGet]: + """Gets service ports (inputs and outputs) for a specific service version + + Raises: + ValidationError: on invalid arguments + CatalogItemNotFoundError: service not found in catalog + CatalogForbiddenError: not access rights to read this service + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_service_ports"), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + assert ( + TypeAdapter(list[ServicePortGet]).validate_python(result) is not None + ) # nosec + return cast(list[ServicePortGet], result) + + +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_all_services_summaries_paginated( # pylint: disable=too-many-arguments + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcServiceSummary: + """Lists all services with pagination, including all versions of each service. + + Returns a lightweight summary view of services for better performance. + + Args: + rpc_client: RPC client instance + product_name: Product name + user_id: User ID + limit: Maximum number of items to return + offset: Number of items to skip + filters: Optional filters to apply + + Returns: + Paginated list of all services as summaries + + Raises: + ValidationError: on invalid arguments + CatalogForbiddenError: no access-rights to list services + """ + result = await rpc_client.request( + CATALOG_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python( + list_all_services_summaries_paginated.__name__ + ), + product_name=product_name, + user_id=user_id, + limit=limit, + offset=offset, + filters=filters, + timeout_s=40 * RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert ( + TypeAdapter(PageRpc[ServiceSummary]).validate_python(result) is not None + ) # nosec + return cast(PageRpc[ServiceSummary], result) diff --git a/services/web/server/src/simcore_service_webserver/clusters/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/__init__.py similarity index 100% rename from services/web/server/src/simcore_service_webserver/clusters/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py new file mode 100644 index 00000000000..ada0c66d26d --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/clusters.py @@ -0,0 +1,40 @@ +from typing import Final + +from aiocache import cached # type: ignore[import-untyped] +from models_library.api_schemas_clusters_keeper import CLUSTERS_KEEPER_RPC_NAMESPACE +from models_library.api_schemas_clusters_keeper.clusters import OnDemandCluster +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID +from models_library.wallets import WalletID + +from ....async_utils import run_sequentially_in_context +from ..._client_rpc import RabbitMQRPCClient +from ..._constants import RPC_REMOTE_METHOD_TIMEOUT_S + +_TTL_CACHE_ON_CLUSTERS_S: Final[int] = 5 + + +@run_sequentially_in_context(target_args=["user_id", "wallet_id"]) +@cached( + ttl=_TTL_CACHE_ON_CLUSTERS_S, + key_builder=lambda f, *_args, **kwargs: f"{f.__name__}_{kwargs['user_id']}_{kwargs['wallet_id']}", +) +async def get_or_create_cluster( + client: RabbitMQRPCClient, *, user_id: UserID, wallet_id: WalletID | None +) -> OnDemandCluster: + """**Remote method** + + Raises: + RPCServerError -- if anything happens remotely + """ + # NOTE: we tend to have burst of calls for the same cluster + # the 1st decorator ensure all of these go 1 by 1 + # the 2nd decorator ensure that many calls in a short time will return quickly the same value + on_demand_cluster: OnDemandCluster = await client.request( + CLUSTERS_KEEPER_RPC_NAMESPACE, + RPCMethodName("get_or_create_cluster"), + timeout_s=RPC_REMOTE_METHOD_TIMEOUT_S, + user_id=user_id, + wallet_id=wallet_id, + ) + return on_demand_cluster diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/ec2_instances.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/ec2_instances.py new file mode 100644 index 00000000000..9358c60f7ca --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/clusters_keeper/ec2_instances.py @@ -0,0 +1,26 @@ +from typing import Literal + +from models_library.api_schemas_clusters_keeper import CLUSTERS_KEEPER_RPC_NAMESPACE +from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet +from models_library.rabbitmq_basic_types import RPCMethodName + +from ..._client_rpc import RabbitMQRPCClient +from ..._constants import RPC_REMOTE_METHOD_TIMEOUT_S + + +async def get_instance_type_details( + client: RabbitMQRPCClient, *, instance_type_names: set[str] | Literal["ALL"] +) -> list[EC2InstanceTypeGet]: + """**Remote method** + + Raises: + RPCServerError -- if anything happens remotely + + """ + instance_types: list[EC2InstanceTypeGet] = await client.request( + CLUSTERS_KEEPER_RPC_NAMESPACE, + RPCMethodName("get_instance_type_details"), + timeout_s=RPC_REMOTE_METHOD_TIMEOUT_S, + instance_type_names=instance_type_names, + ) + return instance_types diff --git a/services/web/server/src/simcore_service_webserver/director/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/__init__.py similarity index 100% rename from services/web/server/src/simcore_service_webserver/director/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py new file mode 100644 index 00000000000..a24ed19aba9 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/director_v2/computations.py @@ -0,0 +1,116 @@ +# pylint: disable=too-many-arguments +import logging +from typing import Final + +from models_library.api_schemas_directorv2 import ( + DIRECTOR_V2_RPC_NAMESPACE, +) +from models_library.api_schemas_directorv2.comp_runs import ( + ComputationRunRpcGetPage, + ComputationTaskRpcGetPage, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rest_ordering import OrderBy +from models_library.users import UserID +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ... import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_computations_latest_iteration_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + # filters + filter_only_running: bool = False, + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationRunRpcGetPage: + result = await rabbitmq_rpc_client.request( + DIRECTOR_V2_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "list_computations_latest_iteration_page" + ), + product_name=product_name, + user_id=user_id, + filter_only_running=filter_only_running, + offset=offset, + limit=limit, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, ComputationRunRpcGetPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_computations_iterations_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + project_ids: list[ProjectID], + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationRunRpcGetPage: + result = await rabbitmq_rpc_client.request( + DIRECTOR_V2_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("list_computations_iterations_page"), + product_name=product_name, + user_id=user_id, + project_ids=project_ids, + offset=offset, + limit=limit, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, ComputationRunRpcGetPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_computations_latest_iteration_tasks_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + project_ids: list[ProjectID], + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationTaskRpcGetPage: + result = await rabbitmq_rpc_client.request( + DIRECTOR_V2_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "list_computations_latest_iteration_tasks_page" + ), + product_name=product_name, + user_id=user_id, + project_ids=project_ids, + offset=offset, + limit=limit, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, ComputationTaskRpcGetPage) # nosec + return result diff --git a/services/web/server/src/simcore_service_webserver/exporter/formatters/sds/xlsx/templates/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/__init__.py similarity index 100% rename from services/web/server/src/simcore_service_webserver/exporter/formatters/sds/xlsx/templates/__init__.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/errors.py new file mode 100644 index 00000000000..6d7bf2a722c --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/errors.py @@ -0,0 +1,13 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class BaseDynamicSchedulerRPCError(OsparcErrorMixin, Exception): + ... + + +class ServiceWaitingForManualInterventionError(BaseDynamicSchedulerRPCError): + msg_template = "Service {node_id} waiting for manual intervention" + + +class ServiceWasNotFoundError(BaseDynamicSchedulerRPCError): + msg_template = "Service {node_id} was not found" diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py new file mode 100644 index 00000000000..fb3276ae670 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_scheduler/services.py @@ -0,0 +1,166 @@ +import logging +from typing import Final + +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_scheduler import DYNAMIC_SCHEDULER_RPC_NAMESPACE +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import NonNegativeInt, TypeAdapter +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + +# the webserver's director-v2 plugin internally uses a +# 20 second default timeout for all HTTP calls +DEFAULT_LEGACY_WB_TO_DV2_HTTP_REQUESTS_TIMEOUT_S: Final[NonNegativeInt] = 20 + +# make sure RPC calls time out after the HTTP requests +# from dynamic-scheduler to director-v2 time out +_RPC_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = int( + DEFAULT_LEGACY_WB_TO_DV2_HTTP_REQUESTS_TIMEOUT_S * 2 +) + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_tracked_dynamic_services( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID | None = None, + project_id: ProjectID | None = None, +) -> list[DynamicServiceGet]: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("list_tracked_dynamic_services"), + user_id=user_id, + project_id=project_id, + timeout_s=_RPC_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, list) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_service_status( + rabbitmq_rpc_client: RabbitMQRPCClient, *, node_id: NodeID +) -> NodeGetIdle | DynamicServiceGet | NodeGet: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_service_status"), + node_id=node_id, + timeout_s=_RPC_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, NodeGetIdle | DynamicServiceGet | NodeGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def run_dynamic_service( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + dynamic_service_start: DynamicServiceStart, +) -> DynamicServiceGet | NodeGet: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("run_dynamic_service"), + dynamic_service_start=dynamic_service_start, + timeout_s=_RPC_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, DynamicServiceGet | NodeGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def stop_dynamic_service( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + dynamic_service_stop: DynamicServiceStop, + timeout_s: NonNegativeInt, +) -> None: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("stop_dynamic_service"), + dynamic_service_stop=dynamic_service_stop, + timeout_s=timeout_s, + ) + assert result is None # nosec + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_project_inactivity( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + project_id: ProjectID, + max_inactivity_seconds: NonNegativeInt, +) -> GetProjectInactivityResponse: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_project_inactivity"), + project_id=project_id, + max_inactivity_seconds=max_inactivity_seconds, + timeout_s=_RPC_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, GetProjectInactivityResponse) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def restart_user_services( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + node_id: NodeID, + timeout_s: NonNegativeInt, +) -> None: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("restart_user_services"), + node_id=node_id, + timeout_s=timeout_s, + ) + assert result is None # nosec + + +@log_decorator(_logger, level=logging.DEBUG) +async def retrieve_inputs( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + node_id: NodeID, + port_keys: list[ServicePortKey], + timeout_s: NonNegativeInt, +) -> RetrieveDataOutEnveloped: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("retrieve_inputs"), + node_id=node_id, + port_keys=port_keys, + timeout_s=timeout_s, + ) + assert isinstance(result, RetrieveDataOutEnveloped) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_projects_networks( + rabbitmq_rpc_client: RabbitMQRPCClient, *, project_id: ProjectID +) -> None: + result = await rabbitmq_rpc_client.request( + DYNAMIC_SCHEDULER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("update_projects_networks"), + project_id=project_id, + timeout_s=_RPC_DEFAULT_TIMEOUT_S, + ) + assert result is None # nosec diff --git a/services/osparc-gateway-server/requirements/constraints.txt b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/__init__.py similarity index 100% rename from services/osparc-gateway-server/requirements/constraints.txt rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/_utils.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/_utils.py new file mode 100644 index 00000000000..6a1aa3cf6ab --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/_utils.py @@ -0,0 +1,6 @@ +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCNamespace + + +def get_rpc_namespace(node_id: NodeID) -> RPCNamespace: + return RPCNamespace.from_entries({"service": "dy-sidecar", "node_id": f"{node_id}"}) diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk.py new file mode 100644 index 00000000000..b069efbdbd2 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk.py @@ -0,0 +1,25 @@ +import logging + +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName +from pydantic import TypeAdapter + +from ....logging_utils import log_decorator +from ... import RabbitMQRPCClient +from ._utils import get_rpc_namespace + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def free_reserved_disk_space( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + node_id: NodeID, +) -> None: + rpc_namespace = get_rpc_namespace(node_id) + result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("free_reserved_disk_space"), + ) + assert result is None # nosec diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk_usage.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk_usage.py new file mode 100644 index 00000000000..3782b4f9dcd --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/disk_usage.py @@ -0,0 +1,28 @@ +import logging + +from models_library.api_schemas_dynamic_sidecar.telemetry import DiskUsage +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName +from pydantic import TypeAdapter + +from ....logging_utils import log_decorator +from ... import RabbitMQRPCClient +from ._utils import get_rpc_namespace + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_disk_usage( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + node_id: NodeID, + usage: dict[str, DiskUsage], +) -> None: + rpc_namespace = get_rpc_namespace(node_id) + result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("update_disk_usage"), + usage=usage, + ) + assert result is None # nosec diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py new file mode 100644 index 00000000000..00fb9e78d72 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/dynamic_sidecar/volumes.py @@ -0,0 +1,30 @@ +import logging + +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from pydantic import TypeAdapter + +from ....logging_utils import log_decorator +from ... import RabbitMQRPCClient +from ._utils import get_rpc_namespace + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def save_volume_state( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + node_id: NodeID, + status: VolumeStatus, + category: VolumeCategory, +) -> None: + rpc_namespace = get_rpc_namespace(node_id) + result = await rabbitmq_rpc_client.request( + rpc_namespace, + TypeAdapter(RPCMethodName).validate_python("save_volume_state"), + status=status, + category=category, + ) + assert result is None # nosec diff --git a/services/osparc-gateway-server/tests/unit/test_osparc.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/efs_guardian/__init__.py similarity index 100% rename from services/osparc-gateway-server/tests/unit/test_osparc.py rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/efs_guardian/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/efs_guardian/efs_manager.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/efs_guardian/efs_manager.py new file mode 100644 index 00000000000..ec05906b1ef --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/efs_guardian/efs_manager.py @@ -0,0 +1,36 @@ +import logging +from pathlib import Path +from typing import Final + +from models_library.api_schemas_efs_guardian import EFS_GUARDIAN_RPC_NAMESPACE +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_basic_types import RPCMethodName +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + + +@log_decorator(_logger, level=logging.DEBUG) +async def create_project_specific_data_dir( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + project_id: ProjectID, + node_id: NodeID, + storage_directory_name: str, +) -> Path: + output: Path = await rabbitmq_rpc_client.request( + EFS_GUARDIAN_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("create_project_specific_data_dir"), + project_id=project_id, + node_id=node_id, + storage_directory_name=storage_directory_name, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + return output diff --git a/tests/performance/locust_report/.gitkeep b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/__init__.py similarity index 100% rename from tests/performance/locust_report/.gitkeep rename to packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/__init__.py diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/credit_transactions.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/credit_transactions.py new file mode 100644 index 00000000000..17740881bb6 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/credit_transactions.py @@ -0,0 +1,104 @@ +import logging +from decimal import Decimal +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreateBody, + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker import CreditTransactionStatus +from models_library.services_types import ServiceRunID +from models_library.wallets import WalletID +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_wallet_total_credits( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + wallet_id: WalletID, +) -> WalletTotalCredits: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_wallet_total_credits"), + product_name=product_name, + wallet_id=wallet_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, WalletTotalCredits) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_project_wallet_total_credits( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + wallet_id: WalletID, + project_id: ProjectID, + transaction_status: CreditTransactionStatus | None = None, +) -> WalletTotalCredits: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_project_wallet_total_credits"), + product_name=product_name, + wallet_id=wallet_id, + project_id=project_id, + transaction_status=transaction_status, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, WalletTotalCredits) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def pay_project_debt( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + project_id: ProjectID, + current_wallet_transaction: CreditTransactionCreateBody, + new_wallet_transaction: CreditTransactionCreateBody, +) -> None: + await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("pay_project_debt"), + project_id=project_id, + current_wallet_transaction=current_wallet_transaction, + new_wallet_transaction=new_wallet_transaction, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_transaction_current_credits_by_service_run_id( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + service_run_id: ServiceRunID, +) -> Decimal: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "get_transaction_current_credits_by_service_run_id" + ), + service_run_id=service_run_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, Decimal) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/errors.py new file mode 100644 index 00000000000..ab11d42deeb --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/errors.py @@ -0,0 +1,49 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class LicensesBaseError(OsparcErrorMixin, Exception): ... + + +class NotEnoughAvailableSeatsError(LicensesBaseError): + msg_template = "Not enough available seats. Current available seats {available_num_of_seats} for license item {license_item_id}" + + +class CanNotCheckoutNotEnoughAvailableSeatsError(LicensesBaseError): + msg_template = "Can not checkout license item {licensed_item_id} with num of seats {num_of_seats}. Currently available seats {available_num_of_seats}" + + +class CanNotCheckoutServiceIsNotRunningError(LicensesBaseError): + msg_template = "Can not checkout license item {licensed_item_id} as dynamic service is not running. Current service {service_run}" + + +class LicensedItemCheckoutNotFoundError(LicensesBaseError): + msg_template = "Licensed item checkout {licensed_item_checkout_id} not found." + + +LICENSES_ERRORS = ( + NotEnoughAvailableSeatsError, + CanNotCheckoutNotEnoughAvailableSeatsError, + CanNotCheckoutServiceIsNotRunningError, + LicensedItemCheckoutNotFoundError, +) + + +### Transaction Error + + +class WalletTransactionError(OsparcErrorMixin, Exception): + msg_template = "{msg}" + + +class CreditTransactionNotFoundError(OsparcErrorMixin, Exception): + msg_template = "Credit transaction for service run id {service_run_id} not found." + + +### Pricing Plans Error + + +class PricingPlanBaseError(OsparcErrorMixin, Exception): ... + + +class PricingUnitDuplicationError(PricingPlanBaseError): + msg_template = "Pricing unit with that name already exists in given product." diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_checkouts.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_checkouts.py new file mode 100644 index 00000000000..5203fb9d2d5 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_checkouts.py @@ -0,0 +1,130 @@ +import logging +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.licensed_items_checkouts import ( + LicensedItemCheckoutGet, + LicensedItemsCheckoutsPage, +) +from models_library.basic_types import IDStr +from models_library.licenses import LicensedItemID +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.rest_ordering import OrderBy +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ... import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 30 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_licensed_item_checkout( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + licensed_item_checkout_id: LicensedItemCheckoutID, +) -> LicensedItemCheckoutGet: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_licensed_item_checkout"), + product_name=product_name, + licensed_item_checkout_id=licensed_item_checkout_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemCheckoutGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_licensed_items_checkouts_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: int = 0, + limit: int = 20, + order_by: OrderBy | None = None, +) -> LicensedItemsCheckoutsPage: + """ + Default order_by field is "started_at" + """ + if order_by is None: + order_by = OrderBy(field=IDStr("started_at")) + + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_licensed_items_checkouts_page"), + product_name=product_name, + filter_wallet_id=filter_wallet_id, + limit=limit, + offset=offset, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemsCheckoutsPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def checkout_licensed_item( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + licensed_item_id: LicensedItemID, + key: str, + version: str, + wallet_id: WalletID, + product_name: ProductName, + num_of_seats: int, + service_run_id: ServiceRunID, + user_id: UserID, + user_email: str, +) -> LicensedItemCheckoutGet: + result: LicensedItemCheckoutGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("checkout_licensed_item"), + licensed_item_id=licensed_item_id, + key=key, + version=version, + wallet_id=wallet_id, + product_name=product_name, + num_of_seats=num_of_seats, + service_run_id=service_run_id, + user_id=user_id, + user_email=user_email, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemCheckoutGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def release_licensed_item( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + licensed_item_checkout_id: LicensedItemCheckoutID, + product_name: ProductName, +) -> LicensedItemCheckoutGet: + result: LicensedItemCheckoutGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("release_licensed_item"), + licensed_item_checkout_id=licensed_item_checkout_id, + product_name=product_name, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemCheckoutGet) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_purchases.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_purchases.py new file mode 100644 index 00000000000..125dbe655a0 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/licensed_items_purchases.py @@ -0,0 +1,92 @@ +import logging +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.licensed_items_purchases import ( + LicensedItemPurchaseGet, + LicensedItemsPurchasesPage, +) +from models_library.basic_types import IDStr +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, + LicensedItemsPurchasesCreate, +) +from models_library.rest_ordering import OrderBy +from models_library.wallets import WalletID +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 30 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_licensed_items_purchases_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + wallet_id: WalletID, + offset: int = 0, + limit: int = 20, + order_by: OrderBy | None = None, +) -> LicensedItemsPurchasesPage: + """ + Default order_by field is "purchased_at" + """ + if order_by is None: + order_by = OrderBy(field=IDStr("purchased_at")) + + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_licensed_items_purchases_page"), + product_name=product_name, + wallet_id=wallet_id, + limit=limit, + offset=offset, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemsPurchasesPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_licensed_item_purchase( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + licensed_item_purchase_id: LicensedItemPurchaseID, +) -> LicensedItemPurchaseGet: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_licensed_item_purchase"), + product_name=product_name, + licensed_item_purchase_id=licensed_item_purchase_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemPurchaseGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def create_licensed_item_purchase( + rabbitmq_rpc_client: RabbitMQRPCClient, *, data: LicensedItemsPurchasesCreate +) -> LicensedItemPurchaseGet: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("create_licensed_item_purchase"), + data=data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, LicensedItemPurchaseGet) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_plans.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_plans.py new file mode 100644 index 00000000000..4faa6fa3f0c --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_plans.py @@ -0,0 +1,149 @@ +import logging +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + PricingPlanToServiceGet, + RutPricingPlanGet, + RutPricingPlanPage, +) +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker import ( + PricingPlanCreate, + PricingPlanId, + PricingPlanUpdate, +) +from models_library.services import ServiceKey, ServiceVersion +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_pricing_plan( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> RutPricingPlanGet: + result: RutPricingPlanGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_pricing_plan"), + product_name=product_name, + pricing_plan_id=pricing_plan_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingPlanGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_pricing_plans_without_pricing_units( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + exclude_inactive: bool = True, + # pagination + offset: int = 0, + limit: int = 20, +) -> RutPricingPlanPage: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "list_pricing_plans_without_pricing_units" + ), + product_name=product_name, + exclude_inactive=exclude_inactive, + offset=offset, + limit=limit, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingPlanPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def create_pricing_plan( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + data: PricingPlanCreate, +) -> RutPricingPlanGet: + result: RutPricingPlanGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("create_pricing_plan"), + data=data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingPlanGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_pricing_plan( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + data: PricingPlanUpdate, +) -> RutPricingPlanGet: + result: RutPricingPlanGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("update_pricing_plan"), + product_name=product_name, + data=data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingPlanGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_connected_services_to_pricing_plan_by_pricing_plan( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> list[PricingPlanToServiceGet]: + result: RutPricingPlanGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "list_connected_services_to_pricing_plan_by_pricing_plan" + ), + product_name=product_name, + pricing_plan_id=pricing_plan_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, list) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def connect_service_to_pricing_plan( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> PricingPlanToServiceGet: + result: RutPricingPlanGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("connect_service_to_pricing_plan"), + product_name=product_name, + pricing_plan_id=pricing_plan_id, + service_key=service_key, + service_version=service_version, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, PricingPlanToServiceGet) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_units.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_units.py new file mode 100644 index 00000000000..09a47be8281 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/pricing_units.py @@ -0,0 +1,84 @@ +import logging +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingUnitGet, +) +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker import ( + PricingPlanId, + PricingUnitId, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, +) +from pydantic import NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_pricing_unit( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, +) -> RutPricingUnitGet: + result: RutPricingUnitGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_pricing_unit"), + product_name=product_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingUnitGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def create_pricing_unit( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + data: PricingUnitWithCostCreate, +) -> RutPricingUnitGet: + result: RutPricingUnitGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("create_pricing_unit"), + product_name=product_name, + data=data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingUnitGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_pricing_unit( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + data: PricingUnitWithCostUpdate, +) -> RutPricingUnitGet: + result: RutPricingUnitGet = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("update_pricing_unit"), + product_name=product_name, + data=data, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, RutPricingUnitGet) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/service_runs.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/service_runs.py new file mode 100644 index 00000000000..0f1191ebe4f --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/resource_usage_tracker/service_runs.py @@ -0,0 +1,128 @@ +# pylint: disable=too-many-arguments +import logging +from typing import Final + +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + OsparcCreditsAggregatedUsagesPage, + ServiceRunPage, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker import ( + CreditTransactionStatus, + ServiceResourceUsagesFilters, + ServicesAggregatedUsagesTimePeriod, + ServicesAggregatedUsagesType, +) +from models_library.rest_ordering import OrderBy +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import AnyUrl, NonNegativeInt, TypeAdapter + +from ....logging_utils import log_decorator +from ....rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +_DEFAULT_TIMEOUT_S: Final[NonNegativeInt] = 20 + +_RPC_METHOD_NAME_ADAPTER: TypeAdapter[RPCMethodName] = TypeAdapter(RPCMethodName) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_service_run_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + filters: ServiceResourceUsagesFilters | None = None, + transaction_status: CreditTransactionStatus | None = None, + project_id: ProjectID | None = None, + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ServiceRunPage: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("get_service_run_page"), + user_id=user_id, + product_name=product_name, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + filters=filters, + transaction_status=transaction_status, + project_id=project_id, + offset=offset, + limit=limit, + order_by=order_by, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, ServiceRunPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_osparc_credits_aggregated_usages_page( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + aggregated_by: ServicesAggregatedUsagesType, + time_period: ServicesAggregatedUsagesTimePeriod, + limit: int = 20, + offset: int = 0, + wallet_id: WalletID, + access_all_wallet_usage: bool = False, +) -> OsparcCreditsAggregatedUsagesPage: + result = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python( + "get_osparc_credits_aggregated_usages_page" + ), + user_id=user_id, + product_name=product_name, + limit=limit, + offset=offset, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + aggregated_by=aggregated_by, + time_period=time_period, + timeout_s=60, + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def export_service_runs( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + order_by: OrderBy | None = None, + filters: ServiceResourceUsagesFilters | None = None, +) -> AnyUrl: + result: AnyUrl = await rabbitmq_rpc_client.request( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, + _RPC_METHOD_NAME_ADAPTER.validate_python("export_service_runs"), + user_id=user_id, + product_name=product_name, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + order_by=order_by, + filters=filters, + timeout_s=_DEFAULT_TIMEOUT_S, + ) + assert isinstance(result, AnyUrl) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py new file mode 100644 index 00000000000..c1049bfc1bb --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/paths.py @@ -0,0 +1,54 @@ +from pathlib import Path + +from models_library.api_schemas_rpc_async_jobs.async_jobs import ( + AsyncJobGet, + AsyncJobNameData, +) +from models_library.api_schemas_storage import STORAGE_RPC_NAMESPACE +from models_library.products import ProductName +from models_library.projects_nodes_io import LocationID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID + +from ..._client_rpc import RabbitMQRPCClient +from ..async_jobs.async_jobs import submit + + +async def compute_path_size( + client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + location_id: LocationID, + path: Path, +) -> tuple[AsyncJobGet, AsyncJobNameData]: + job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name) + async_job_rpc_get = await submit( + rabbitmq_rpc_client=client, + rpc_namespace=STORAGE_RPC_NAMESPACE, + method_name=RPCMethodName("compute_path_size"), + job_id_data=job_id_data, + location_id=location_id, + path=path, + ) + return async_job_rpc_get, job_id_data + + +async def delete_paths( + client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + location_id: LocationID, + paths: set[Path], +) -> tuple[AsyncJobGet, AsyncJobNameData]: + job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name) + async_job_rpc_get = await submit( + rabbitmq_rpc_client=client, + rpc_namespace=STORAGE_RPC_NAMESPACE, + method_name=RPCMethodName("delete_paths"), + job_id_data=job_id_data, + location_id=location_id, + paths=paths, + ) + return async_job_rpc_get, job_id_data diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py new file mode 100644 index 00000000000..df78448a575 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/storage/simcore_s3.py @@ -0,0 +1,50 @@ +from models_library.api_schemas_rpc_async_jobs.async_jobs import ( + AsyncJobGet, + AsyncJobNameData, +) +from models_library.api_schemas_storage import STORAGE_RPC_NAMESPACE +from models_library.api_schemas_storage.storage_schemas import FoldersBody +from models_library.api_schemas_webserver.storage import PathToExport +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID +from pydantic import TypeAdapter + +from ... import RabbitMQRPCClient +from ..async_jobs.async_jobs import submit + + +async def copy_folders_from_project( + client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + body: FoldersBody, +) -> tuple[AsyncJobGet, AsyncJobNameData]: + job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name) + async_job_rpc_get = await submit( + rabbitmq_rpc_client=client, + rpc_namespace=STORAGE_RPC_NAMESPACE, + method_name=RPCMethodName("copy_folders_from_project"), + job_id_data=job_id_data, + body=body, + ) + return async_job_rpc_get, job_id_data + + +async def start_export_data( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: ProductName, + paths_to_export: list[PathToExport], +) -> tuple[AsyncJobGet, AsyncJobNameData]: + job_id_data = AsyncJobNameData(user_id=user_id, product_name=product_name) + async_job_rpc_get = await submit( + rabbitmq_rpc_client, + rpc_namespace=STORAGE_RPC_NAMESPACE, + method_name=TypeAdapter(RPCMethodName).validate_python("start_export_data"), + job_id_data=job_id_data, + paths_to_export=paths_to_export, + ) + return async_job_rpc_get, job_id_data diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py new file mode 100644 index 00000000000..0358a0e3b6a --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/auth/api_keys.py @@ -0,0 +1,69 @@ +import logging + +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.basic_types import IDStr +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rpc.webserver.auth.api_keys import ApiKeyCreate, ApiKeyGet +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def create_api_key( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: str, + api_key: ApiKeyCreate, +) -> ApiKeyGet: + result: ApiKeyGet = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("create_api_key"), + user_id=user_id, + product_name=product_name, + display_name=api_key.display_name, + expiration=api_key.expiration, + ) + assert isinstance(result, ApiKeyGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_api_key( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: str, + api_key_id: IDStr, +) -> ApiKeyGet: + result: ApiKeyGet = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_api_key"), + user_id=user_id, + product_name=product_name, + api_key_id=api_key_id, + ) + assert isinstance(result, ApiKeyGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def delete_api_key_by_key( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + product_name: str, + api_key: str, +) -> None: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_api_key_by_key"), + user_id=user_id, + product_name=product_name, + api_key=api_key, + ) + assert result is None # nosec diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py new file mode 100644 index 00000000000..e0c3fc2419a --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/errors.py @@ -0,0 +1,7 @@ +from ..._errors import RPCInterfaceError + + +class ProjectNotFoundRpcError(RPCInterfaceError): ... + + +class ProjectForbiddenRpcError(RPCInterfaceError): ... diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py new file mode 100644 index 00000000000..5d1a4b756c5 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/functions/functions_rpc_interface.py @@ -0,0 +1,316 @@ +import logging + +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.api_schemas_webserver.functions import ( + Function, + FunctionID, + FunctionInputs, + FunctionInputSchema, + FunctionJob, + FunctionJobCollection, + FunctionJobCollectionID, + FunctionJobCollectionsListFilters, + FunctionJobID, + FunctionOutputSchema, + RegisteredFunction, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, +) +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rest_pagination import PageMetaInfoLimitOffset +from pydantic import TypeAdapter + +from .....logging_utils import log_decorator +from .... import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def register_function( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function: Function, +) -> RegisteredFunction: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("register_function"), + function=function, + ) + return TypeAdapter(RegisteredFunction).validate_python( + result + ) # Validates the result as a RegisteredFunction + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_function( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, +) -> RegisteredFunction: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_function"), + function_id=function_id, + ) + return TypeAdapter(RegisteredFunction).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_function_input_schema( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, +) -> FunctionInputSchema: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_function_input_schema"), + function_id=function_id, + ) + return TypeAdapter(FunctionInputSchema).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_function_output_schema( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, +) -> FunctionOutputSchema: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_function_output_schema"), + function_id=function_id, + ) + return TypeAdapter(FunctionOutputSchema).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def delete_function( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, +) -> None: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_function"), + function_id=function_id, + ) + assert result is None # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_functions( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + pagination_offset: int, + pagination_limit: int, +) -> tuple[list[RegisteredFunction], PageMetaInfoLimitOffset]: + result: tuple[list[RegisteredFunction], PageMetaInfoLimitOffset] = ( + await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_functions"), + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) + ) + return TypeAdapter( + tuple[list[RegisteredFunction], PageMetaInfoLimitOffset] + ).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_function_jobs( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + pagination_limit: int, + pagination_offset: int, + filter_by_function_id: FunctionID | None = None, +) -> tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset]: + result: tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset] = ( + await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_function_jobs"), + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filter_by_function_id=filter_by_function_id, + ) + ) + return TypeAdapter( + tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset] + ).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def list_function_job_collections( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + pagination_limit: int, + pagination_offset: int, + filters: FunctionJobCollectionsListFilters | None = None, +) -> tuple[list[RegisteredFunctionJobCollection], PageMetaInfoLimitOffset]: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_function_job_collections"), + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filters=filters, + ) + return TypeAdapter( + tuple[list[RegisteredFunctionJobCollection], PageMetaInfoLimitOffset] + ).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_function_title( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, + title: str, +) -> RegisteredFunction: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("update_function_title"), + function_id=function_id, + title=title, + ) + return TypeAdapter(RegisteredFunction).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def update_function_description( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, + description: str, +) -> RegisteredFunction: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("update_function_description"), + function_id=function_id, + description=description, + ) + return TypeAdapter(RegisteredFunction).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def run_function( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, + inputs: FunctionInputs, +) -> RegisteredFunctionJob: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("run_function"), + function_id=function_id, + inputs=inputs, + ) + return TypeAdapter(RegisteredFunctionJob).validate_python( + result + ) # Validates the result as a RegisteredFunctionJob + + +@log_decorator(_logger, level=logging.DEBUG) +async def register_function_job( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job: FunctionJob, +) -> RegisteredFunctionJob: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("register_function_job"), + function_job=function_job, + ) + return TypeAdapter(RegisteredFunctionJob).validate_python( + result + ) # Validates the result as a RegisteredFunctionJob + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_function_job( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job_id: FunctionJobID, +) -> RegisteredFunctionJob: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_function_job"), + function_job_id=function_job_id, + ) + + return TypeAdapter(RegisteredFunctionJob).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def delete_function_job( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job_id: FunctionJobID, +) -> None: + result: None = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_function_job"), + function_job_id=function_job_id, + ) + assert result is None # nosec + + +@log_decorator(_logger, level=logging.DEBUG) +async def find_cached_function_job( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_id: FunctionID, + inputs: FunctionInputs, +) -> RegisteredFunctionJob | None: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("find_cached_function_job"), + function_id=function_id, + inputs=inputs, + ) + if result is None: + return None + return TypeAdapter(RegisteredFunctionJob).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def register_function_job_collection( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job_collection: FunctionJobCollection, +) -> RegisteredFunctionJobCollection: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("register_function_job_collection"), + function_job_collection=function_job_collection, + ) + return TypeAdapter(RegisteredFunctionJobCollection).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_function_job_collection( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job_collection_id: FunctionJobCollectionID, +) -> RegisteredFunctionJobCollection: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_function_job_collection"), + function_job_collection_id=function_job_collection_id, + ) + return TypeAdapter(RegisteredFunctionJobCollection).validate_python(result) + + +@log_decorator(_logger, level=logging.DEBUG) +async def delete_function_job_collection( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + function_job_collection_id: FunctionJobCollectionID, +) -> None: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_function_job_collection"), + function_job_collection_id=function_job_collection_id, + ) + assert result is None # nosec diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/__init__.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py new file mode 100644 index 00000000000..acb367de27b --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/licenses/licensed_items.py @@ -0,0 +1,109 @@ +import logging + +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.api_schemas_webserver.licensed_items import LicensedItemRpcGetPage +from models_library.api_schemas_webserver.licensed_items_checkouts import ( + LicensedItemCheckoutRpcGet, +) +from models_library.licenses import LicensedItemID +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_licensed_items( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: str, + offset: int = 0, + limit: int = 20, +) -> LicensedItemRpcGetPage: + result: LicensedItemRpcGetPage = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_licensed_items"), + product_name=product_name, + offset=offset, + limit=limit, + ) + assert isinstance(result, LicensedItemRpcGetPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def get_available_licensed_items_for_wallet( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + wallet_id: WalletID, + user_id: UserID, + offset: int = 0, + limit: int = 20, +) -> LicensedItemRpcGetPage: + result: LicensedItemRpcGetPage = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python( + "get_available_licensed_items_for_wallet" + ), + product_name=product_name, + user_id=user_id, + wallet_id=wallet_id, + offset=offset, + limit=limit, + ) + assert isinstance(result, LicensedItemRpcGetPage) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def checkout_licensed_item_for_wallet( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + wallet_id: WalletID, + licensed_item_id: LicensedItemID, + num_of_seats: int, + service_run_id: ServiceRunID, +) -> LicensedItemCheckoutRpcGet: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("checkout_licensed_item_for_wallet"), + licensed_item_id=licensed_item_id, + product_name=product_name, + user_id=user_id, + wallet_id=wallet_id, + num_of_seats=num_of_seats, + service_run_id=service_run_id, + ) + assert isinstance(result, LicensedItemCheckoutRpcGet) # nosec + return result + + +@log_decorator(_logger, level=logging.DEBUG) +async def release_licensed_item_for_wallet( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + licensed_item_checkout_id: LicensedItemCheckoutID, +) -> LicensedItemCheckoutRpcGet: + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("release_licensed_item_for_wallet"), + product_name=product_name, + user_id=user_id, + licensed_item_checkout_id=licensed_item_checkout_id, + ) + assert isinstance(result, LicensedItemCheckoutRpcGet) # nosec + return result diff --git a/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py new file mode 100644 index 00000000000..15f40d66011 --- /dev/null +++ b/packages/service-library/src/servicelib/rabbitmq/rpc_interfaces/webserver/projects.py @@ -0,0 +1,69 @@ +import logging +from typing import cast + +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rest_pagination import PageOffsetInt +from models_library.rpc.webserver.projects import ( + ListProjectsMarkedAsJobRpcFilters, + PageRpcProjectJobRpcGet, +) +from models_library.rpc_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + PageLimitInt, +) +from models_library.users import UserID +from pydantic import TypeAdapter, validate_call +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient + +_logger = logging.getLogger(__name__) + + +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def mark_project_as_job( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + project_uuid: ProjectID, + job_parent_resource_name: str, +) -> None: + + result = await rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("mark_project_as_job"), + product_name=product_name, + user_id=user_id, + project_uuid=project_uuid, + job_parent_resource_name=job_parent_resource_name, + ) + assert result is None + + +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_projects_marked_as_jobs( + rpc_client: RabbitMQRPCClient, + *, + product_name: ProductName, + user_id: UserID, + # pagination + offset: PageOffsetInt = 0, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + filters: ListProjectsMarkedAsJobRpcFilters | None = None, +) -> PageRpcProjectJobRpcGet: + result = await rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_projects_marked_as_jobs"), + product_name=product_name, + user_id=user_id, + offset=offset, + limit=limit, + filters=filters, + ) + assert TypeAdapter(PageRpcProjectJobRpcGet).validate_python(result) # nosec + return cast(PageRpcProjectJobRpcGet, result) diff --git a/packages/service-library/src/servicelib/rabbitmq_errors.py b/packages/service-library/src/servicelib/rabbitmq_errors.py deleted file mode 100644 index 9bc0a2d1d0f..00000000000 --- a/packages/service-library/src/servicelib/rabbitmq_errors.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Final - -from pydantic.errors import PydanticErrorMixin - -_ERROR_PREFIX: Final[str] = "rabbitmq_error" - - -class BaseRPCError(PydanticErrorMixin, RuntimeError): - ... - - -class RPCNotInitializedError(BaseRPCError): - code = f"{_ERROR_PREFIX}.not_started" - msg_template = "Please check that the RabbitMQ RPC backend was initialized!" - - -class RemoteMethodNotRegisteredError(BaseRPCError): - code = f"{_ERROR_PREFIX}.remote_not_registered" - msg_template = ( - "Could not find a remote method named: '{method_name}'. " - "Message from remote server was returned: {incoming_message}. " - ) diff --git a/packages/service-library/src/servicelib/rabbitmq_utils.py b/packages/service-library/src/servicelib/rabbitmq_utils.py deleted file mode 100644 index d46969ff46a..00000000000 --- a/packages/service-library/src/servicelib/rabbitmq_utils.py +++ /dev/null @@ -1,96 +0,0 @@ -# FIXME: move to settings-library or refactor - -import logging -import re -from typing import Awaitable, Final, Optional, Pattern - -import aio_pika -from pydantic import ConstrainedStr, parse_obj_as -from tenacity import retry -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -from .logging_utils import log_context - -log = logging.getLogger(__file__) - - -_MINUTE: Final[int] = 60 - -REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS: Final[str] = r"^[\w\-\.]*$" - - -class RPCMethodName(ConstrainedStr): - min_length: int = 1 - max_length: int = 252 - regex: Optional[Pattern[str]] = re.compile(REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS) - - -class RPCNamespace(ConstrainedStr): - min_length: int = 1 - max_length: int = 252 - regex: Optional[Pattern[str]] = re.compile(REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS) - - @classmethod - def from_entries(cls, entries: dict[str, str]) -> "RPCNamespace": - """ - Given a list of entries creates a namespace to be used in declaring the rabbitmq queue. - Keeping this to a predefined length - """ - composed_string = "-".join(f"{k}_{v}" for k, v in sorted(entries.items())) - return parse_obj_as(cls, composed_string) - - -class RPCNamespacedMethodName(ConstrainedStr): - min_length: int = 1 - max_length: int = 255 - regex: Optional[Pattern[str]] = re.compile(REGEX_RABBIT_QUEUE_ALLOWED_SYMBOLS) - - @classmethod - def from_namespace_and_method( - cls, namespace: RPCNamespace, method_name: RPCMethodName - ) -> "RPCNamespacedMethodName": - namespaced_method_name = f"{namespace}.{method_name}" - return parse_obj_as(cls, namespaced_method_name) - - -class RabbitMQRetryPolicyUponInitialization: - """Retry policy upon service initialization""" - - def __init__(self, logger: Optional[logging.Logger] = None): - logger = logger or log - - self.kwargs = { - "wait": wait_fixed(2), - "stop": stop_after_delay(3 * _MINUTE), - "before_sleep": before_sleep_log(logger, logging.WARNING), - "reraise": True, - } - - -@retry(**RabbitMQRetryPolicyUponInitialization().kwargs) -async def wait_till_rabbitmq_responsive(url: str) -> bool: - """Check if something responds to ``url``""" - with log_context(log, logging.INFO, msg=f"checking RabbitMQ connection at {url=}"): - connection = await aio_pika.connect(url) - await connection.close() - log.info("rabbitmq connection established") - return True - - -async def rpc_register_entries( - rabbit_client: "RabbitMQClient", entries: dict[str, str], handler: Awaitable -) -> None: - """ - Bind a local `handler` to a `namespace` derived from the provided `entries` - dictionary. - - NOTE: This is a helper enforce the pattern defined in `rpc_register`'s - docstring. - """ - await rabbit_client.rpc_register_handler( - RPCNamespace.from_entries(entries), - method_name=handler.__name__, - handler=handler, - ) diff --git a/packages/service-library/src/servicelib/redis.py b/packages/service-library/src/servicelib/redis.py deleted file mode 100644 index adfaac48b7a..00000000000 --- a/packages/service-library/src/servicelib/redis.py +++ /dev/null @@ -1,95 +0,0 @@ -import contextlib -import datetime -import logging -from dataclasses import dataclass, field -from typing import AsyncIterator, Final, Optional, Union - -import redis.asyncio as aioredis -import redis.exceptions -from pydantic.errors import PydanticErrorMixin -from redis.asyncio.lock import Lock -from redis.asyncio.retry import Retry -from redis.backoff import ExponentialBackoff - -from .background_task import periodic_task -from .logging_utils import log_catch - -_DEFAULT_LOCK_TTL: Final[datetime.timedelta] = datetime.timedelta(seconds=10) -_AUTO_EXTEND_LOCK_RATIO: Final[float] = 0.6 - -logger = logging.getLogger(__name__) - - -class AlreadyLockedError(PydanticErrorMixin, RuntimeError): - msg_template: str = "Lock {lock.name} is already locked!" - - -@dataclass -class RedisClientSDK: - redis_dsn: str - _client: aioredis.Redis = field(init=False) - - @property - def redis(self) -> aioredis.Redis: - return self._client - - def __post_init__(self): - # Run 3 retries with exponential backoff strategy source: https://redis.readthedocs.io/en/stable/backoff.html - retry = Retry(ExponentialBackoff(cap=0.512, base=0.008), retries=3) - self._client = aioredis.from_url( - self.redis_dsn, - retry=retry, - retry_on_error=[ - redis.exceptions.BusyLoadingError, - redis.exceptions.ConnectionError, - redis.exceptions.TimeoutError, - ], - encoding="utf-8", - decode_responses=True, - ) - - async def close(self) -> None: - await self._client.close(close_connection_pool=True) - - async def ping(self) -> bool: - try: - return await self._client.ping() - except redis.exceptions.ConnectionError: - return False - - @contextlib.asynccontextmanager - async def lock_context( - self, - lock_key: str, - lock_value: Optional[Union[bytes, str]] = None, - ) -> AsyncIterator[Lock]: - ttl_lock = None - try: - - async def _auto_extend_lock(lock: Lock) -> None: - await lock.reacquire() - - ttl_lock = self._client.lock( - lock_key, timeout=_DEFAULT_LOCK_TTL.total_seconds() - ) - if not await ttl_lock.acquire(blocking=False, token=lock_value): - raise AlreadyLockedError(lock=ttl_lock) - async with periodic_task( - _auto_extend_lock, - interval=_AUTO_EXTEND_LOCK_RATIO * _DEFAULT_LOCK_TTL, - task_name=f"{lock_key}_auto_extend", - lock=ttl_lock, - ): - yield ttl_lock - - finally: - if ttl_lock: - with log_catch(logger, reraise=False): - await ttl_lock.release() - - async def is_locked(self, lock_name: str) -> bool: - lock = self._client.lock(lock_name) - return await lock.locked() - - async def lock_value(self, lock_name: str) -> Optional[str]: - return await self._client.get(lock_name) diff --git a/packages/service-library/src/servicelib/redis/__init__.py b/packages/service-library/src/servicelib/redis/__init__.py new file mode 100644 index 00000000000..9e63a9f6525 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/__init__.py @@ -0,0 +1,33 @@ +from ._client import RedisClientSDK +from ._clients_manager import RedisClientsManager +from ._decorators import exclusive +from ._errors import ( + CouldNotAcquireLockError, + CouldNotConnectToRedisError, + LockLostError, + ProjectLockError, +) +from ._models import RedisManagerDBConfig +from ._project_lock import ( + get_project_locked_state, + is_project_locked, + with_project_locked, +) +from ._utils import handle_redis_returns_union_types + +__all__: tuple[str, ...] = ( + "CouldNotAcquireLockError", + "CouldNotConnectToRedisError", + "exclusive", + "get_project_locked_state", + "handle_redis_returns_union_types", + "is_project_locked", + "LockLostError", + "ProjectLockError", + "RedisClientSDK", + "RedisClientsManager", + "RedisManagerDBConfig", + "with_project_locked", +) + +# nopycln: file diff --git a/packages/service-library/src/servicelib/redis/_client.py b/packages/service-library/src/servicelib/redis/_client.py new file mode 100644 index 00000000000..c2a08154110 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_client.py @@ -0,0 +1,127 @@ +import asyncio +import datetime +import logging +from asyncio import Task +from dataclasses import dataclass, field +from typing import Final +from uuid import uuid4 + +import redis.asyncio as aioredis +import redis.exceptions +from redis.asyncio.lock import Lock +from redis.asyncio.retry import Retry +from redis.backoff import ExponentialBackoff + +from ..async_utils import cancel_wait_task +from ..background_task import periodic +from ..logging_utils import log_catch, log_context +from ._constants import ( + DEFAULT_DECODE_RESPONSES, + DEFAULT_HEALTH_CHECK_INTERVAL, + DEFAULT_LOCK_TTL, + DEFAULT_SOCKET_TIMEOUT, +) + +_logger = logging.getLogger(__name__) + +# SEE https://github.com/ITISFoundation/osparc-simcore/pull/7077 +_HEALTHCHECK_TASK_TIMEOUT_S: Final[float] = 3.0 + + +@dataclass +class RedisClientSDK: + redis_dsn: str + client_name: str + decode_responses: bool = DEFAULT_DECODE_RESPONSES + health_check_interval: datetime.timedelta = DEFAULT_HEALTH_CHECK_INTERVAL + + _client: aioredis.Redis = field(init=False) + _health_check_task: Task | None = None + _health_check_task_started_event: asyncio.Event | None = None + _is_healthy: bool = False + + @property + def redis(self) -> aioredis.Redis: + return self._client + + def __post_init__(self) -> None: + self._client = aioredis.from_url( + self.redis_dsn, + # Run 3 retries with exponential backoff strategy source: https://redis.readthedocs.io/en/stable/backoff.html + retry=Retry(ExponentialBackoff(cap=0.512, base=0.008), retries=3), + retry_on_error=[ + redis.exceptions.BusyLoadingError, + redis.exceptions.ConnectionError, + ], + retry_on_timeout=True, + socket_timeout=DEFAULT_SOCKET_TIMEOUT.total_seconds(), + encoding="utf-8", + decode_responses=self.decode_responses, + client_name=self.client_name, + ) + # NOTE: connection is done here already + self._is_healthy = False + self._health_check_task_started_event = asyncio.Event() + + @periodic(interval=self.health_check_interval) + async def _periodic_check_health() -> None: + assert self._health_check_task_started_event # nosec + self._health_check_task_started_event.set() + self._is_healthy = await self.ping() + + self._health_check_task = asyncio.create_task( + _periodic_check_health(), + name=f"redis_service_health_check_{self.redis_dsn}__{uuid4()}", + ) + + _logger.info( + "Connection to %s succeeded with %s", + f"redis at {self.redis_dsn=}", + f"{self._client=}", + ) + + async def shutdown(self) -> None: + with log_context( + _logger, level=logging.DEBUG, msg=f"Shutdown RedisClientSDK {self}" + ): + if self._health_check_task: + assert self._health_check_task_started_event # nosec + # NOTE: wait for the health check task to have started once before we can cancel it + await self._health_check_task_started_event.wait() + await cancel_wait_task( + self._health_check_task, max_delay=_HEALTHCHECK_TASK_TIMEOUT_S + ) + + await self._client.aclose(close_connection_pool=True) + + async def ping(self) -> bool: + with log_catch(_logger, reraise=False): + # NOTE: retry_* input parameters from aioredis.from_url do not apply for the ping call + await self._client.ping() + return True + return False + + @property + def is_healthy(self) -> bool: + """Returns the result of the last health check. + If redis becomes available, after being not available, + it will once more return ``True`` + + Returns: + ``False``: if the service is no longer reachable + ``True``: when service is reachable + """ + return self._is_healthy + + def create_lock( + self, lock_name: str, *, ttl: datetime.timedelta | None = DEFAULT_LOCK_TTL + ) -> Lock: + return self._client.lock( + name=lock_name, + timeout=ttl.total_seconds() if ttl is not None else None, + blocking=False, + ) + + async def lock_value(self, lock_name: str) -> str | None: + output: str | None = await self._client.get(lock_name) + return output diff --git a/packages/service-library/src/servicelib/redis/_clients_manager.py b/packages/service-library/src/servicelib/redis/_clients_manager.py new file mode 100644 index 00000000000..60b93360b88 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_clients_manager.py @@ -0,0 +1,45 @@ +import asyncio +from dataclasses import dataclass, field + +from settings_library.redis import RedisDatabase, RedisSettings + +from ._client import RedisClientSDK +from ._models import RedisManagerDBConfig + + +@dataclass +class RedisClientsManager: + """ + Manages the lifetime of redis client sdk connections + """ + + databases_configs: set[RedisManagerDBConfig] + settings: RedisSettings + client_name: str + + _client_sdks: dict[RedisDatabase, RedisClientSDK] = field(default_factory=dict) + + async def setup(self) -> None: + for config in self.databases_configs: + self._client_sdks[config.database] = RedisClientSDK( + redis_dsn=self.settings.build_redis_dsn(config.database), + decode_responses=config.decode_responses, + health_check_interval=config.health_check_interval, + client_name=f"{self.client_name}", + ) + + async def shutdown(self) -> None: + await asyncio.gather( + *[client.shutdown() for client in self._client_sdks.values()], + return_exceptions=True, + ) + + def client(self, database: RedisDatabase) -> RedisClientSDK: + return self._client_sdks[database] + + async def __aenter__(self) -> "RedisClientsManager": + await self.setup() + return self + + async def __aexit__(self, *args) -> None: + await self.shutdown() diff --git a/packages/service-library/src/servicelib/redis/_constants.py b/packages/service-library/src/servicelib/redis/_constants.py new file mode 100644 index 00000000000..6a10c6b75b0 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_constants.py @@ -0,0 +1,12 @@ +import datetime +from typing import Final + +from pydantic import NonNegativeInt + +DEFAULT_LOCK_TTL: Final[datetime.timedelta] = datetime.timedelta(seconds=10) +DEFAULT_SOCKET_TIMEOUT: Final[datetime.timedelta] = datetime.timedelta(seconds=30) + + +DEFAULT_DECODE_RESPONSES: Final[bool] = True +DEFAULT_HEALTH_CHECK_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5) +SHUTDOWN_TIMEOUT_S: Final[NonNegativeInt] = 5 diff --git a/packages/service-library/src/servicelib/redis/_decorators.py b/packages/service-library/src/servicelib/redis/_decorators.py new file mode 100644 index 00000000000..6d686a33af5 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_decorators.py @@ -0,0 +1,144 @@ +import asyncio +import contextlib +import functools +import logging +import socket +from collections.abc import Callable, Coroutine +from datetime import timedelta +from typing import Any, Final, ParamSpec, TypeVar + +import arrow +import redis.exceptions +from redis.asyncio.lock import Lock + +from ..background_task import periodic +from ._client import RedisClientSDK +from ._constants import DEFAULT_LOCK_TTL +from ._errors import CouldNotAcquireLockError, LockLostError +from ._utils import auto_extend_lock + +_logger = logging.getLogger(__file__) + +P = ParamSpec("P") +R = TypeVar("R") + +_EXCLUSIVE_TASK_NAME: Final[str] = "exclusive/{module_name}.{func_name}" +_EXCLUSIVE_AUTO_EXTEND_TASK_NAME: Final[ + str +] = "exclusive/autoextend_lock_{redis_lock_key}" + + +@periodic(interval=DEFAULT_LOCK_TTL / 2, raise_on_error=True) +async def _periodic_auto_extender(lock: Lock, started_event: asyncio.Event) -> None: + await auto_extend_lock(lock) + started_event.set() + + +def exclusive( + redis_client: RedisClientSDK | Callable[..., RedisClientSDK], + *, + lock_key: str | Callable[..., str], + lock_value: bytes | str | None = None, + blocking: bool = False, + blocking_timeout: timedelta | None = None, +) -> Callable[ + [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]] +]: + """ + Define a method to run exclusively across + processes by leveraging a Redis Lock. + + Arguments: + redis -- the redis client + lock_key -- a string as the name of the lock (good practice: app_name:lock_name) + lock_value -- some additional data that can be retrieved by another client if None, + it will be automatically filled with the current time and the client name + + Raises: + - ValueError if used incorrectly + - CouldNotAcquireLockError if the lock could not be acquired + - LockLostError if the lock was lost (e.g. due to Redis restart, or TTL was not extended in time) + """ + + if not lock_key: + msg = "lock_key cannot be empty string!" + raise ValueError(msg) + + def _decorator( + coro: Callable[P, Coroutine[Any, Any, R]], + ) -> Callable[P, Coroutine[Any, Any, R]]: + @functools.wraps(coro) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + redis_lock_key = ( + lock_key(*args, **kwargs) if callable(lock_key) else lock_key + ) + assert isinstance(redis_lock_key, str) # nosec + + client = ( + redis_client(*args, **kwargs) + if callable(redis_client) + else redis_client + ) + assert isinstance(client, RedisClientSDK) # nosec + nonlocal lock_value + if lock_value is None: + lock_value = f"locked since {arrow.utcnow().format()} by {client.client_name} on {socket.gethostname()}" + + lock = client.create_lock(redis_lock_key, ttl=DEFAULT_LOCK_TTL) + if not await lock.acquire( + token=lock_value, + blocking=blocking, + blocking_timeout=( + blocking_timeout.total_seconds() if blocking_timeout else None + ), + ): + raise CouldNotAcquireLockError(lock=lock) + + try: + async with asyncio.TaskGroup() as tg: + started_event = asyncio.Event() + # first create a task that will auto-extend the lock + auto_extend_lock_task = tg.create_task( + _periodic_auto_extender(lock, started_event), + name=_EXCLUSIVE_AUTO_EXTEND_TASK_NAME.format( + redis_lock_key=redis_lock_key + ), + ) + # NOTE: In case the work thread is raising right away, + # this ensures the extend task ran once and ensure cancellation works + await started_event.wait() + + # then the task that runs the user code + assert asyncio.iscoroutinefunction(coro) # nosec + work_task = tg.create_task( + coro(*args, **kwargs), + name=_EXCLUSIVE_TASK_NAME.format( + module_name=coro.__module__, func_name=coro.__name__ + ), + ) + + res = await work_task + auto_extend_lock_task.cancel() + return res + + except BaseExceptionGroup as eg: + # Separate exceptions into LockLostError and others + lock_lost_errors, other_errors = eg.split(LockLostError) + + # If there are any other errors, re-raise them + if other_errors: + assert len(other_errors.exceptions) == 1 # nosec + raise other_errors.exceptions[0] from eg + + assert lock_lost_errors is not None # nosec + assert len(lock_lost_errors.exceptions) == 1 # nosec + raise lock_lost_errors.exceptions[0] from eg + finally: + with contextlib.suppress(redis.exceptions.LockNotOwnedError): + # in the case where the lock would have been lost, + # this would raise again and is not necessary + await lock.release() + + return _wrapper + + return _decorator diff --git a/packages/service-library/src/servicelib/redis/_errors.py b/packages/service-library/src/servicelib/redis/_errors.py new file mode 100644 index 00000000000..7fc3c7823ae --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_errors.py @@ -0,0 +1,27 @@ +from typing import TypeAlias + +import redis.exceptions +from common_library.errors_classes import OsparcErrorMixin + + +class BaseRedisError(OsparcErrorMixin, RuntimeError): + ... + + +class CouldNotAcquireLockError(BaseRedisError): + msg_template: str = "Lock {lock.name} could not be acquired!" + + +class CouldNotConnectToRedisError(BaseRedisError): + msg_template: str = "Connection to '{dsn}' failed" + + +class LockLostError(BaseRedisError): + msg_template: str = ( + "Lock {lock.name} has been lost (e.g. it could not be auto-extended!)" + "TIP: check connection to Redis DBs or look for Synchronous " + "code that might block the auto-extender task. Somehow the distributed lock disappeared!" + ) + + +ProjectLockError: TypeAlias = redis.exceptions.LockError # NOTE: backwards compatible diff --git a/packages/service-library/src/servicelib/redis/_models.py b/packages/service-library/src/servicelib/redis/_models.py new file mode 100644 index 00000000000..6e2db864c09 --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_models.py @@ -0,0 +1,13 @@ +import datetime +from dataclasses import dataclass + +from settings_library.redis import RedisDatabase + +from ._constants import DEFAULT_DECODE_RESPONSES, DEFAULT_HEALTH_CHECK_INTERVAL + + +@dataclass(frozen=True, kw_only=True) +class RedisManagerDBConfig: + database: RedisDatabase + decode_responses: bool = DEFAULT_DECODE_RESPONSES + health_check_interval: datetime.timedelta = DEFAULT_HEALTH_CHECK_INTERVAL diff --git a/packages/service-library/src/servicelib/redis/_project_lock.py b/packages/service-library/src/servicelib/redis/_project_lock.py new file mode 100644 index 00000000000..d618d88c58f --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_project_lock.py @@ -0,0 +1,105 @@ +import functools +import logging +from collections.abc import Awaitable, Callable, Coroutine +from typing import Any, Final, ParamSpec, TypeVar + +from models_library.projects import ProjectID +from models_library.projects_access import Owner +from models_library.projects_state import ProjectLocked, ProjectStatus +from servicelib.logging_utils import log_catch + +from ._client import RedisClientSDK +from ._decorators import exclusive +from ._errors import CouldNotAcquireLockError, ProjectLockError + +_PROJECT_REDIS_LOCK_KEY: Final[str] = "project_lock:{}" + +_logger = logging.getLogger(__name__) + +P = ParamSpec("P") +R = TypeVar("R") + + +def with_project_locked( + redis_client: RedisClientSDK | Callable[..., RedisClientSDK], + *, + project_uuid: str | ProjectID, + status: ProjectStatus, + owner: Owner | None, + notification_cb: Callable[[], Awaitable[None]] | None, +) -> Callable[ + [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]] +]: + """creates a distributed auto sustained Redis lock for project with project_uuid, keeping its status and owner in the lock data + + Arguments: + redis_client -- the client to use to access redis + project_uuid -- the project UUID + status -- the project status + owner -- the owner of the lock (default: {None}) + notification_cb -- an optional notification callback that will be called AFTER the project is locked and AFTER it was unlocked + + Returns: + the decorated function return value + + Raises: + raises anything from the decorated function and from the optional notification callback + """ + + def _decorator( + func: Callable[P, Coroutine[Any, Any, R]], + ) -> Callable[P, Coroutine[Any, Any, R]]: + @functools.wraps(func) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + @exclusive( + redis_client, + lock_key=_PROJECT_REDIS_LOCK_KEY.format(project_uuid), + lock_value=ProjectLocked( + value=True, + owner=owner, + status=status, + ).model_dump_json(), + ) + async def _exclusive_func(*args, **kwargs) -> R: + if notification_cb is not None: + with log_catch(_logger, reraise=False): + await notification_cb() + return await func(*args, **kwargs) + + try: + return await _exclusive_func(*args, **kwargs) + + except CouldNotAcquireLockError as e: + raise ProjectLockError from e + finally: + # we are now unlocked + if notification_cb is not None: + with log_catch(_logger, reraise=False): + await notification_cb() + + return _wrapper + + return _decorator + + +async def is_project_locked( + redis_client: RedisClientSDK, project_uuid: str | ProjectID +) -> bool: + redis_lock = redis_client.create_lock(_PROJECT_REDIS_LOCK_KEY.format(project_uuid)) + return await redis_lock.locked() + + +async def get_project_locked_state( + redis_client: RedisClientSDK, project_uuid: str | ProjectID +) -> ProjectLocked | None: + """ + Returns: + ProjectLocked object if the project project_uuid is locked or None otherwise + """ + if await is_project_locked(redis_client, project_uuid=project_uuid) and ( + lock_value := await redis_client.redis.get( + _PROJECT_REDIS_LOCK_KEY.format(project_uuid) + ) + ): + return ProjectLocked.model_validate_json(lock_value) + return None diff --git a/packages/service-library/src/servicelib/redis/_utils.py b/packages/service-library/src/servicelib/redis/_utils.py new file mode 100644 index 00000000000..52d112ca4fe --- /dev/null +++ b/packages/service-library/src/servicelib/redis/_utils.py @@ -0,0 +1,35 @@ +import logging +from collections.abc import Awaitable +from typing import Any + +import redis.exceptions +from redis.asyncio.lock import Lock + +from ..logging_utils import log_context +from ._errors import LockLostError + +_logger = logging.getLogger(__name__) + + +async def auto_extend_lock(lock: Lock) -> None: + """automatically extend a distributed lock TTL (time to live) by re-acquiring the lock + + Arguments: + lock -- the lock to auto-extend + + Raises: + LockLostError: in case the lock is not available anymore + LockError: in case of wrong usage (no timeout or lock was not previously acquired) + """ + try: + with log_context(_logger, logging.DEBUG, f"Autoextend lock {lock.name!r}"): + await lock.reacquire() + except redis.exceptions.LockNotOwnedError as exc: + raise LockLostError(lock=lock) from exc + + +async def handle_redis_returns_union_types(result: Any | Awaitable[Any]) -> Any: + """Used to handle mypy issues with redis 5.x return types""" + if isinstance(result, Awaitable): + return await result + return result diff --git a/packages/service-library/src/servicelib/redis_utils.py b/packages/service-library/src/servicelib/redis_utils.py deleted file mode 100644 index 5978d7561ad..00000000000 --- a/packages/service-library/src/servicelib/redis_utils.py +++ /dev/null @@ -1,34 +0,0 @@ -import functools -import logging -from typing import Optional, Union - -from .redis import RedisClientSDK - -log = logging.getLogger(__file__) - - -def exclusive( - redis: RedisClientSDK, - *, - lock_key: str, - lock_value: Optional[Union[bytes, str]] = None -): - """ - Define a method to run exclusively accross - processes by leveraging a Redis Lock. - - parameters: - redis: the redis client SDK - lock_key: a string as the name of the lock (good practice: app_name:lock_name) - lock_value: some additional data that can be retrieved by another client - """ - - def decorator(func): - @functools.wraps(func) - async def wrapper(*args, **kwargs): - async with redis.lock_context(lock_key=lock_key, lock_value=lock_value): - return await func(*args, **kwargs) - - return wrapper - - return decorator diff --git a/packages/service-library/src/servicelib/request_keys.py b/packages/service-library/src/servicelib/request_keys.py index 89a3b6a2777..8322e812557 100644 --- a/packages/service-library/src/servicelib/request_keys.py +++ b/packages/service-library/src/servicelib/request_keys.py @@ -1,6 +1,7 @@ -""" Storage keys in requests +""" Storage keys in requests """ +from typing import Final # RQT=request -RQT_USERID_KEY = __name__ + ".userid" +RQT_USERID_KEY: Final[str] = f"{__name__}.userid" diff --git a/packages/service-library/src/servicelib/resources.py b/packages/service-library/src/servicelib/resources.py index 52416e55eb3..bcc471864c9 100644 --- a/packages/service-library/src/servicelib/resources.py +++ b/packages/service-library/src/servicelib/resources.py @@ -1,70 +1,40 @@ """ Safe access to all data resources distributed with this package -See https://setuptools.readthedocs.io/en/latest/pkg_resources.html +https://docs.python.org/3.11/library/importlib.resources.html#module-importlib.resources """ -import pathlib -from pathlib import Path -from typing import TextIO -import attr -import pkg_resources +import importlib.resources +from dataclasses import dataclass +from pathlib import Path -@attr.s(frozen=True, auto_attribs=True) -class ResourcesFacade: +@dataclass(frozen=True) +class DataResourcesFacade: """Facade to access data resources installed with a distribution - - Built on top of pkg_resources + - Built on top of importlib.resources Resources are read-only files/folders """ package_name: str distribution_name: str - config_folder: str def exists(self, resource_name: str) -> bool: - return pkg_resources.resource_exists(self.package_name, resource_name) - - def stream(self, resource_name: str) -> TextIO: - # TODO: check if read-only and if so, rename - return pkg_resources.resource_stream(self.package_name, resource_name) + path = self.get_path(resource_name) - def listdir(self, resource_name: str) -> str: - return pkg_resources.resource_listdir(self.package_name, resource_name) - - def isdir(self, resource_name: str) -> str: - return pkg_resources.resource_isdir(self.package_name, resource_name) + return path.exists() def get_path(self, resource_name: str) -> Path: - """Returns a path to a resource + """Returns a path to a resourced - WARNING: existence of file is not guaranteed. Use resources.exists + WARNING: existence of file is not guaranteed WARNING: resource files are supposed to be used as read-only! """ - resource_path = pathlib.Path( - pkg_resources.resource_filename(self.package_name, resource_name) + package_dir = importlib.resources.files( + self.distribution_name.replace("-", "_") ) - return resource_path - - def get_distribution(self): - """Returns distribution info object""" - return pkg_resources.get_distribution(self.distribution_name) - - -# TODO: create abc -@attr.s(auto_attribs=True) -class FileResource: - """ - TODO: lazy evaluation of attribs - """ - - name: str - - -class PackageResources: - def get_configfile(self, name: str) -> FileResource: - raise NotImplementedError("Should be implemented in subclass") + return Path(f"{package_dir}") / resource_name.lstrip("/") # resources env keys diff --git a/packages/service-library/src/servicelib/rest_constants.py b/packages/service-library/src/servicelib/rest_constants.py index e03667f15f2..d763657b6c9 100644 --- a/packages/service-library/src/servicelib/rest_constants.py +++ b/packages/service-library/src/servicelib/rest_constants.py @@ -1,8 +1,25 @@ # SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeldict -RESPONSE_MODEL_POLICY = { - "by_alias": True, - "exclude_unset": True, - "exclude_defaults": False, - "exclude_none": False, -} +from typing import Final + +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + + +class PydanticExportParametersDict(TypedDict): + by_alias: bool + exclude_unset: bool + exclude_defaults: bool + exclude_none: bool + + +RESPONSE_MODEL_POLICY = PydanticExportParametersDict( + by_alias=True, + exclude_unset=True, + exclude_defaults=False, + exclude_none=False, +) + +# Headers keys +X_PRODUCT_NAME_HEADER: Final[str] = "X-Simcore-Products-Name" diff --git a/packages/service-library/src/servicelib/rest_responses.py b/packages/service-library/src/servicelib/rest_responses.py new file mode 100644 index 00000000000..9dc32ed9e5a --- /dev/null +++ b/packages/service-library/src/servicelib/rest_responses.py @@ -0,0 +1,42 @@ +import json +from collections.abc import Mapping +from typing import Any + +from common_library.json_serialization import json_loads + +_ENVELOPE_KEYS = ("data", "error") + + +def is_enveloped_from_map(payload: Mapping) -> bool: + return all(k in _ENVELOPE_KEYS for k in payload if not f"{k}".startswith("_")) + + +def is_enveloped_from_text(text: str) -> bool: + try: + payload = json_loads(text) + except json.decoder.JSONDecodeError: + return False + return is_enveloped_from_map(payload) + + +def is_enveloped(payload: Mapping | str) -> bool: + # pylint: disable=isinstance-second-argument-not-valid-type + if isinstance(payload, Mapping): + return is_enveloped_from_map(payload) + if isinstance(payload, str): + return is_enveloped_from_text(text=payload) + return False + + +def unwrap_envelope(payload: Mapping[str, Any]) -> tuple: + """ + Safe returns (data, error) tuple from a response payload + """ + return tuple(payload.get(k) for k in _ENVELOPE_KEYS) if payload else (None, None) + + +def unwrap_envelope_if_required(data: Mapping) -> Mapping: + if is_enveloped(data): + data, error = unwrap_envelope(data) + assert not error # nosec + return data diff --git a/packages/service-library/src/servicelib/retry_policies.py b/packages/service-library/src/servicelib/retry_policies.py index cac7a833f3d..5ab98c6722e 100644 --- a/packages/service-library/src/servicelib/retry_policies.py +++ b/packages/service-library/src/servicelib/retry_policies.py @@ -4,7 +4,7 @@ """ import logging -from typing import Optional +from typing import Any from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_attempt @@ -19,12 +19,16 @@ class PostgresRetryPolicyUponInitialization: WAIT_SECS = 5 ATTEMPTS_COUNT = 20 - def __init__(self, logger: Optional[logging.Logger] = None): + def __init__(self, logger: logging.Logger | None = None): logger = logger or log - self.kwargs = dict( - wait=wait_fixed(self.WAIT_SECS), - stop=stop_after_attempt(self.ATTEMPTS_COUNT), - before_sleep=before_sleep_log(logger, logging.WARNING), - reraise=True, - ) + self.kwargs: dict[str, Any] = { + "wait": wait_fixed(self.WAIT_SECS), + "stop": stop_after_attempt(self.ATTEMPTS_COUNT), + "before_sleep": before_sleep_log(logger, logging.WARNING), + "reraise": True, + } + + +class RedisRetryPolicyUponInitialization(PostgresRetryPolicyUponInitialization): + ... diff --git a/packages/service-library/src/servicelib/s3_utils.py b/packages/service-library/src/servicelib/s3_utils.py new file mode 100644 index 00000000000..f9492af2e32 --- /dev/null +++ b/packages/service-library/src/servicelib/s3_utils.py @@ -0,0 +1,32 @@ +from typing import Protocol + +from models_library.bytes_iters import BytesIter + + +class FileLikeReader(Protocol): + """minimal interface for upload from file objects to S3""" + + async def read(self, size: int) -> bytes: + ... + + +class FileLikeBytesIterReader(FileLikeReader): + def __init__(self, bytes_iter: BytesIter): + self._bytes_iter = bytes_iter + self._buffer = bytearray() + self._async_iterator = self._get_iterator() + + async def _get_iterator(self): + async for chunk in self._bytes_iter: + yield chunk + + async def read(self, size: int) -> bytes: + while len(self._buffer) < size: + try: + chunk = await anext(self._async_iterator) + self._buffer.extend(chunk) + except StopAsyncIteration: + break # End of file + + result, self._buffer = self._buffer[:size], self._buffer[size:] + return bytes(result) diff --git a/packages/service-library/src/servicelib/sequences_utils.py b/packages/service-library/src/servicelib/sequences_utils.py new file mode 100644 index 00000000000..df2f9e1b6ee --- /dev/null +++ b/packages/service-library/src/servicelib/sequences_utils.py @@ -0,0 +1,37 @@ +import itertools +from collections.abc import Generator, Iterable +from typing import TypeVar + +import toolz # type: ignore[import-untyped] +from pydantic import NonNegativeInt + +T = TypeVar("T") + + +def partition_gen( + input_list: Iterable[T], *, slice_size: NonNegativeInt +) -> Generator[tuple[T, ...], None, None]: + """ + Given an iterable and the slice_size yields tuples containing + slice_size elements in them. + Inputs: + input_list= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + slice_size = 5 + Outputs: + [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13)] + """ + if not input_list: + yield () + + yield from toolz.partition_all(slice_size, input_list) + + +def pairwise(iterable: Iterable[T]) -> Iterable[tuple[T, T]]: + """ + s -> (s0,s1), (s1,s2), (s2, s3), ... + NOTE: it requires at least 2 elements to produce a pair, + otherwise an empty sequence will be returned + """ + a, b = itertools.tee(iterable) + next(b, None) + return zip(a, b, strict=False) diff --git a/packages/service-library/src/servicelib/services_utils.py b/packages/service-library/src/servicelib/services_utils.py index 60a9caf92a5..889594cbf0c 100644 --- a/packages/service-library/src/servicelib/services_utils.py +++ b/packages/service-library/src/servicelib/services_utils.py @@ -1,5 +1,11 @@ import urllib.parse +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_webserver.projects_nodes import ( + NodeGet, + NodeGetIdle, + NodeGetUnknown, +) from models_library.services import ServiceType @@ -9,3 +15,14 @@ def get_service_from_key(service_key: str) -> ServiceType: if encoded_service_type == "comp": encoded_service_type = "computational" return ServiceType(encoded_service_type) + + +def get_status_as_dict( + status: NodeGetIdle | NodeGetUnknown | DynamicServiceGet | NodeGet, +) -> dict: + """shared between different backend services to guarantee same result to frontend""" + return ( + status.model_dump(by_alias=True) + if isinstance(status, DynamicServiceGet) + else status.model_dump() + ) diff --git a/packages/service-library/src/servicelib/socketio_utils.py b/packages/service-library/src/servicelib/socketio_utils.py new file mode 100644 index 00000000000..efc63436715 --- /dev/null +++ b/packages/service-library/src/servicelib/socketio_utils.py @@ -0,0 +1,43 @@ +""" Common utilities for python-socketio library + + +NOTE: we intentionally avoided importing socketio here to avoid adding an extra dependency at +this level which would include python-socketio in all libraries +""" + +import asyncio + + +async def cleanup_socketio_async_pubsub_manager(server_manager): + + # NOTE: this is ugly. It seems though that python-socketio does not + # cleanup its background tasks properly. + # https://github.com/miguelgrinberg/python-socketio/discussions/1092 + cancelled_tasks = [] + + if hasattr(server_manager, "thread"): + server_thread = server_manager.thread + assert isinstance(server_thread, asyncio.Task) # nosec + server_thread.cancel() + cancelled_tasks.append(server_thread) + + if server_manager.publisher_channel: + await server_manager.publisher_channel.close() + + if server_manager.publisher_connection: + await server_manager.publisher_connection.close() + + current_tasks = asyncio.tasks.all_tasks() + for task in current_tasks: + coro = task.get_coro() + if any( + coro_name in coro.__qualname__ # type: ignore + for coro_name in [ + "AsyncServer._service_task", + "AsyncSocket.schedule_ping", + "AsyncPubSubManager._thread", + ] + ): + task.cancel() + cancelled_tasks.append(task) + await asyncio.gather(*cancelled_tasks, return_exceptions=True) diff --git a/packages/service-library/src/servicelib/status_codes_utils.py b/packages/service-library/src/servicelib/status_codes_utils.py new file mode 100644 index 00000000000..d50ae7ec681 --- /dev/null +++ b/packages/service-library/src/servicelib/status_codes_utils.py @@ -0,0 +1,120 @@ +""" Usage + + - on aiohttp services + from servicelib.aiohttp import status + from servicelib.status_codes_utils import is_success + + assert is_success(status.HTTP_200_OK) + + + - on fastapi services + + from fastapi import status + from servicelib.status_codes_utils import is_success + + assert is_success(status.HTTP_200_OK) + + NOTE: https://github.com/encode/httpx/blob/master/httpx/_status_codes.py +""" + +import types +from collections.abc import Callable +from http import HTTPStatus +from typing import Final + +_INVALID_STATUS_CODE_MSG = "INVALID_STATUS_CODE" + + +def get_code_display_name(status_code: int) -> str: + """ + Returns display name given a status code, e.g. + + get_display_name(200) == "HTTP_200_OK" + get_display_name(status.HTTP_200_OK) == "HTTP_200_OK" + """ + try: + code = HTTPStatus(status_code) + return f"HTTP_{status_code}_{code.name}" + except ValueError: + if status_code == 306: # noqa: PLR2004 + # NOTE: HttpStatus does not include 306 + return "HTTP_306_RESERVED" + return _INVALID_STATUS_CODE_MSG + + +_CODE_DESCRIPTION_TEMPLATE: Final[ + str +] = "{description}. See https://developer.mozilla.org/en-US/docs/Web/HTTP/Status{url_suffix}" + + +def get_code_description(status_code: int) -> str: + try: + description = HTTPStatus(status_code).description + except ValueError: + description = "Unused" + + match status_code: + case 305: + url_suffix = "#305_use_proxy" + case 306: + url_suffix = "#306_unused" + case _: + url_suffix = f"/{status_code}" + + return _CODE_DESCRIPTION_TEMPLATE.format( + description=description, url_suffix=url_suffix + ) + + +def is_1xx_informational(status_code: int) -> bool: + """ + Returns `True` for 1xx status codes, `False` otherwise. + """ + return 100 <= status_code <= 199 # noqa: PLR2004 + + +def is_2xx_success(status_code: int) -> bool: + """ + Returns `True` for 2xx status codes, `False` otherwise. + """ + return 200 <= status_code <= 299 # noqa: PLR2004 + + +def is_3xx_redirect(status_code: int) -> bool: + """ + Returns `True` for 3xx status codes, `False` otherwise. + """ + return 300 <= status_code <= 399 # noqa: PLR2004 + + +def is_4xx_client_error(status_code: int) -> bool: + """ + Returns `True` for 4xx status codes, `False` otherwise. + """ + return 400 <= status_code <= 499 # noqa: PLR2004 + + +def is_5xx_server_error(status_code: int) -> bool: + """ + Returns `True` for 5xx status codes, `False` otherwise. + """ + return 500 <= status_code <= 599 # noqa: PLR2004 + + +def is_error(status_code: int) -> bool: + """ + Returns `True` for 4xx or 5xx status codes, `False` otherwise. + """ + return 400 <= status_code <= 599 # noqa: PLR2004 + + +def get_http_status_codes( + status: types.ModuleType, predicate: Callable[[int], bool] | None = None +) -> list[int]: + # In the spirit of https://docs.python.org/3/library/inspect.html#inspect.getmembers + iter_all = ( + getattr(status, code) for code in status.__all__ if code.startswith("HTTP_") + ) + if predicate is None: + return list(iter_all) + return [code for code in iter_all if predicate(code)] diff --git a/packages/service-library/src/servicelib/tracing.py b/packages/service-library/src/servicelib/tracing.py new file mode 100644 index 00000000000..e1b3b348a72 --- /dev/null +++ b/packages/service-library/src/servicelib/tracing.py @@ -0,0 +1,36 @@ +from contextlib import contextmanager +from typing import TypeAlias + +from opentelemetry import context as otcontext +from opentelemetry import trace +from opentelemetry.instrumentation.logging import LoggingInstrumentor +from settings_library.tracing import TracingSettings + +TracingContext: TypeAlias = otcontext.Context | None + + +def _is_tracing() -> bool: + return trace.get_current_span().is_recording() + + +def get_context() -> TracingContext: + if not _is_tracing(): + return None + return otcontext.get_current() + + +@contextmanager +def use_tracing_context(context: TracingContext): + token: object | None = None + if context is not None: + token = otcontext.attach(context) + try: + yield + finally: + if token is not None: + otcontext.detach(token) + + +def setup_log_tracing(tracing_settings: TracingSettings): + _ = tracing_settings + LoggingInstrumentor().instrument(set_logging_format=False) diff --git a/packages/service-library/src/servicelib/utils.py b/packages/service-library/src/servicelib/utils.py index 66928d72866..e6de282068c 100644 --- a/packages/service-library/src/servicelib/utils.py +++ b/packages/service-library/src/servicelib/utils.py @@ -4,13 +4,30 @@ I order to avoid cyclic dependences, please DO NOT IMPORT ANYTHING from . """ + import asyncio import logging import os +import socket +from collections.abc import ( + AsyncGenerator, + AsyncIterable, + Awaitable, + Coroutine, + Generator, + Iterable, +) from pathlib import Path -from typing import Any, Awaitable, Coroutine, Optional, Union +from typing import Any, Final, Literal, TypeVar, cast, overload + +import toolz # type: ignore[import-untyped] +from pydantic import NonNegativeInt -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + +_DEFAULT_GATHER_TASKS_GROUP_PREFIX: Final[str] = "gathered" +_DEFAULT_LOGGER: Final[logging.Logger] = _logger +_DEFAULT_LIMITED_CONCURRENCY: Final[int] = 1 def is_production_environ() -> bool: @@ -23,29 +40,30 @@ def is_production_environ() -> bool: return os.environ.get("SC_BUILD_TARGET") == "production" -def get_http_client_request_total_timeout() -> Optional[int]: +def get_http_client_request_total_timeout() -> int | None: return int(os.environ.get("HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT", "20")) or None -def get_http_client_request_aiohttp_connect_timeout() -> Optional[int]: +def get_http_client_request_aiohttp_connect_timeout() -> int | None: return int(os.environ.get("HTTP_CLIENT_REQUEST_AIOHTTP_CONNECT_TIMEOUT", 0)) or None -def get_http_client_request_aiohttp_sock_connect_timeout() -> Optional[int]: +def get_http_client_request_aiohttp_sock_connect_timeout() -> int | None: return ( int(os.environ.get("HTTP_CLIENT_REQUEST_AIOHTTP_SOCK_CONNECT_TIMEOUT", "5")) or None ) +_EXPECTED: Final = {".github", "packages", "services"} + + def is_osparc_repo_dir(path: Path) -> bool: - # TODO: implement with git cli - expected = (".github", "packages", "services") - got = [p.name for p in path.iterdir() if p.is_dir()] - return all(d in got for d in expected) + dirnames = [p.name for p in path.iterdir() if p.is_dir()] + return all(name in dirnames for name in _EXPECTED) -def search_osparc_repo_dir(start: Union[str, Path], max_iterations=8) -> Optional[Path]: +def search_osparc_repo_dir(start: str | Path, max_iterations=8) -> Path | None: """Returns path to root repo dir or None if it does not exists NOTE: assumes starts is a path within repo @@ -67,18 +85,19 @@ def fire_and_forget_task( task_suffix_name: str, fire_and_forget_tasks_collection: set[asyncio.Task], ) -> asyncio.Task: + # NOTE: details on rationale in https://github.com/ITISFoundation/osparc-simcore/pull/3120 task = asyncio.create_task(obj, name=f"fire_and_forget_task_{task_suffix_name}") fire_and_forget_tasks_collection.add(task) - def log_exception_callback(fut: asyncio.Future): + def _log_exception_callback(fut: asyncio.Future): try: fut.result() except asyncio.CancelledError: - logger.warning("%s spawned as fire&forget was cancelled", fut) + _logger.warning("%s spawned as fire&forget was cancelled", fut) except Exception: # pylint: disable=broad-except - logger.exception("Error occurred while running task!") + _logger.exception("Error occurred while running task %s!", task.get_name()) - task.add_done_callback(log_exception_callback) + task.add_done_callback(_log_exception_callback) task.add_done_callback(fire_and_forget_tasks_collection.discard) return task @@ -87,7 +106,7 @@ def log_exception_callback(fut: asyncio.Future): async def logged_gather( *tasks: Awaitable[Any], reraise: bool = True, - log: logging.Logger = logger, + log: logging.Logger = _logger, max_concurrency: int = 0, ) -> list[Any]: """ @@ -103,8 +122,7 @@ async def logged_gather( :param log: passing the logger gives a chance to identify the origin of the gather call, defaults to current submodule's logger :return: list of tasks results and errors e.g. [1, 2, ValueError("task3 went wrong"), 33, "foo"] """ - - wrapped_tasks = tasks + wrapped_tasks: tuple | list if max_concurrency > 0: semaphore = asyncio.Semaphore(max_concurrency) @@ -113,8 +131,10 @@ async def sem_task(task: Awaitable[Any]) -> Any: return await task wrapped_tasks = [sem_task(t) for t in tasks] + else: + wrapped_tasks = tasks - results = await asyncio.gather(*wrapped_tasks, return_exceptions=True) + results: list[Any] = await asyncio.gather(*wrapped_tasks, return_exceptions=True) error = None for i, value in enumerate(results): @@ -134,3 +154,202 @@ async def sem_task(task: Awaitable[Any]) -> Any: raise error return results + + +def ensure_ends_with(input_string: str, char: str) -> str: + if not input_string.endswith(char): + input_string += char + return input_string + + +def partition_gen( + input_list: Iterable, *, slice_size: NonNegativeInt +) -> Generator[tuple[Any, ...], None, None]: + """ + Given an iterable and the slice_size yields tuples containing + slice_size elements in them. + + Inputs: + input_list= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + slice_size = 5 + Outputs: + [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13)] + + """ + if not input_list: + yield () + + yield from toolz.partition_all(slice_size, input_list) + + +def unused_port() -> int: + """Return a port that is unused on the current host.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("127.0.0.1", 0)) + return cast(int, s.getsockname()[1]) + + +T = TypeVar("T") + + +async def limited_as_completed( + awaitables: Iterable[Awaitable[T]] | AsyncIterable[Awaitable[T]], + *, + limit: int = _DEFAULT_LIMITED_CONCURRENCY, + tasks_group_prefix: str | None = None, +) -> AsyncGenerator[asyncio.Task[T], None]: + """Runs awaitables using limited concurrent tasks and returns + result futures unordered. + + Arguments: + awaitables -- The awaitables to limit the concurrency of. + + Keyword Arguments: + limit -- The maximum number of awaitables to run concurrently. + 0 or negative values disables the limit. (default: {1}) + tasks_group_prefix -- The prefix to use for the name of the asyncio tasks group. + If None, no name is used. (default: {None}) + + Returns: + nothing + + Yields: + task[T]: the future of the awaitables as they appear. + + + """ + try: + awaitable_iterator = aiter(awaitables) # type: ignore[arg-type] + is_async = True + except TypeError: + assert isinstance(awaitables, Iterable) # nosec + awaitable_iterator = iter(awaitables) # type: ignore[assignment] + is_async = False + + completed_all_awaitables = False + pending_futures: set[asyncio.Task] = set() + + try: + while pending_futures or not completed_all_awaitables: + while ( + limit < 1 or len(pending_futures) < limit + ) and not completed_all_awaitables: + try: + aw = ( + await anext(awaitable_iterator) + if is_async + else next(awaitable_iterator) # type: ignore[call-overload] + ) + future: asyncio.Task = asyncio.ensure_future(aw) + if tasks_group_prefix: + future.set_name(f"{tasks_group_prefix}-{future.get_name()}") + pending_futures.add(future) + + except (StopIteration, StopAsyncIteration): # noqa: PERF203 + completed_all_awaitables = True + if not pending_futures: + return + done, pending_futures = await asyncio.wait( + pending_futures, return_when=asyncio.FIRST_COMPLETED + ) + + for future in done: + yield future + + except asyncio.CancelledError: + for future in pending_futures: + future.cancel() + await asyncio.gather(*pending_futures, return_exceptions=True) + raise + + +async def _wrapped( + awaitable: Awaitable[T], *, index: int, reraise: bool, logger: logging.Logger +) -> tuple[int, T | BaseException]: + try: + return index, await awaitable + except asyncio.CancelledError: + logger.debug( + "Cancelled %i-th concurrent task %s", + index + 1, + f"{awaitable=}", + ) + raise + except BaseException as exc: # pylint: disable=broad-exception-caught + logger.warning( + "Error in %i-th concurrent task %s: %s", + index + 1, + f"{awaitable=}", + f"{exc=}", + ) + if reraise: + raise + return index, exc + + +@overload +async def limited_gather( + *awaitables: Awaitable[T], + reraise: Literal[True] = True, + log: logging.Logger = _DEFAULT_LOGGER, + limit: int = _DEFAULT_LIMITED_CONCURRENCY, + tasks_group_prefix: str | None = None, +) -> list[T]: + ... + + +@overload +async def limited_gather( + *awaitables: Awaitable[T], + reraise: Literal[False] = False, + log: logging.Logger = _DEFAULT_LOGGER, + limit: int = _DEFAULT_LIMITED_CONCURRENCY, + tasks_group_prefix: str | None = None, +) -> list[T | BaseException]: + ... + + +async def limited_gather( + *awaitables: Awaitable[T], + reraise: bool = True, + log: logging.Logger = _DEFAULT_LOGGER, + limit: int = _DEFAULT_LIMITED_CONCURRENCY, + tasks_group_prefix: str | None = None, +) -> list[T] | list[T | BaseException]: + """runs all the awaitables using the limited concurrency and returns them in the same order + + Arguments: + awaitables -- The awaitables to limit the concurrency of. + + Keyword Arguments: + limit -- The maximum number of awaitables to run concurrently. + setting 0 or negative values disable (default: {1}) + reraise -- if True will raise at the first exception + The remaining tasks will continue as in standard asyncio gather. + If False, then the exceptions will be returned (default: {True}) + log -- the logger to use for logging the exceptions (default: {_logger}) + tasks_group_prefix -- The prefix to use for the name of the asyncio tasks group. + If None, 'gathered' prefix is used. (default: {None}) + + Returns: + the results of the awaitables keeping the order + + special thanks to: https://death.andgravity.com/limit-concurrency + """ + + indexed_awaitables = [ + _wrapped(awaitable, reraise=reraise, index=index, logger=log) + for index, awaitable in enumerate(awaitables) + ] + + interim_results: list[T | BaseException | None] = [None] * len(indexed_awaitables) + async for future in limited_as_completed( + indexed_awaitables, + limit=limit, + tasks_group_prefix=tasks_group_prefix or _DEFAULT_GATHER_TASKS_GROUP_PREFIX, + ): + index, result = await future + interim_results[index] = result + + # NOTE: None is already contained in T + return cast(list[T | BaseException], interim_results) diff --git a/packages/service-library/src/servicelib/utils_formatting.py b/packages/service-library/src/servicelib/utils_formatting.py new file mode 100644 index 00000000000..6de58372ced --- /dev/null +++ b/packages/service-library/src/servicelib/utils_formatting.py @@ -0,0 +1,10 @@ +import datetime + +_TIME_FORMAT = "{:02d}:{:02d}" # format for minutes:seconds + + +def timedelta_as_minute_second(delta: datetime.timedelta) -> str: + total_seconds = round(delta.total_seconds()) + minutes, seconds = divmod(abs(total_seconds), 60) + sign = "-" if total_seconds < 0 else "" + return f"{sign}{_TIME_FORMAT.format(minutes, seconds)}" diff --git a/packages/service-library/src/servicelib/utils_meta.py b/packages/service-library/src/servicelib/utils_meta.py index 241438d64b2..6ee48fd4d56 100644 --- a/packages/service-library/src/servicelib/utils_meta.py +++ b/packages/service-library/src/servicelib/utils_meta.py @@ -1,11 +1,12 @@ """ Utilities to implement _meta.py """ -from contextlib import suppress -import pkg_resources +from importlib.metadata import distribution + +from models_library.basic_types import VersionStr from packaging.version import Version -from pkg_resources import Distribution +from pydantic import TypeAdapter class PackageInfo: @@ -14,7 +15,7 @@ class PackageInfo: Usage example: info: Final = PackageMetaInfo(package_name="simcore-service-library") - __version__: Final[str] = info.__version__ + __version__: Final[VersionStr] = info.__version__ PROJECT_NAME: Final[str] = info.project_name VERSION: Final[Version] = info.version @@ -27,34 +28,27 @@ def __init__(self, package_name: str): """ package_name: as defined in 'setup.name' """ - self._distribution: Distribution = pkg_resources.get_distribution(package_name) + self._distribution = distribution(package_name) @property def project_name(self) -> str: - return self._distribution.project_name + return self._distribution.metadata["Name"] @property def version(self) -> Version: return Version(self._distribution.version) @property - def __version__(self) -> str: - return self._distribution.version + def __version__(self) -> VersionStr: + return TypeAdapter(VersionStr).validate_python(self._distribution.version) @property def api_prefix_path_tag(self) -> str: - """Used as prefix in the api path""" + """Used as prefix in the api path e.g. 'v0'""" return f"v{self.version.major}" def get_summary(self) -> str: - with suppress(Exception): - try: - metadata = self._distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = self._distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" + return self._distribution.metadata.get_all("Summary", [""])[-1] def get_finished_banner(self) -> str: return "{:=^100}".format( diff --git a/packages/service-library/src/servicelib/utils_profiling_middleware.py b/packages/service-library/src/servicelib/utils_profiling_middleware.py new file mode 100644 index 00000000000..5b5038e335e --- /dev/null +++ b/packages/service-library/src/servicelib/utils_profiling_middleware.py @@ -0,0 +1,69 @@ +import contextvars +import json +from collections.abc import Iterator +from contextlib import contextmanager +from typing import Final + +from common_library.json_serialization import json_dumps, json_loads +from pyinstrument import Profiler + +from .mimetype_constants import MIMETYPE_APPLICATION_JSON, MIMETYPE_APPLICATION_ND_JSON + +_UNSET: Final = None + +_profiler = Profiler(async_mode="enabled") +_is_profiling = contextvars.ContextVar("_is_profiling", default=False) + + +def is_profiling() -> bool: + return _is_profiling.get() + + +@contextmanager +def profile_context(enable: bool | None = _UNSET) -> Iterator[None]: + """Context manager which temporarily removes request profiler from context""" + if enable is _UNSET: + enable = _is_profiling.get() + if enable: + try: + _profiler.start() + yield + finally: + _profiler.stop() + else: + yield None + + +@contextmanager +def dont_profile() -> Iterator[None]: + if _is_profiling.get(): + try: + _profiler.stop() + yield + finally: + _profiler.start() + else: + yield + + +def append_profile(body: str, profile_text: str) -> str: + try: + json_loads(body) + body += "\n" if not body.endswith("\n") else "" + except json.decoder.JSONDecodeError: + pass + body += json_dumps({"profile": profile_text}) + return body + + +def check_response_headers( + response_headers: dict[bytes, bytes] +) -> list[tuple[bytes, bytes]]: + original_content_type: str = response_headers[b"content-type"].decode() + assert original_content_type in { # nosec + MIMETYPE_APPLICATION_ND_JSON, + MIMETYPE_APPLICATION_JSON, + } + headers: dict = {} + headers[b"content-type"] = MIMETYPE_APPLICATION_ND_JSON.encode() + return list(headers.items()) diff --git a/packages/service-library/src/servicelib/utils_secrets.py b/packages/service-library/src/servicelib/utils_secrets.py index bda927fdde1..389aab96272 100644 --- a/packages/service-library/src/servicelib/utils_secrets.py +++ b/packages/service-library/src/servicelib/utils_secrets.py @@ -1,6 +1,8 @@ import secrets import string -from typing import Final +from typing import Any, Final + +from pydantic import StrictInt, validate_call MIN_PASSWORD_LENGTH = 30 _SAFE_SYMBOLS = "!$%*+,-.:=?@^_~" # avoid issues with parsing, espapes etc @@ -13,6 +15,16 @@ def generate_password(length: int = MIN_PASSWORD_LENGTH) -> str: return "".join(secrets.choice(_ALPHABET) for _ in range(length)) +_MIN_SECRET_NUM_BYTES = 32 + + +def generate_token_secret_key(nbytes: int = _MIN_SECRET_NUM_BYTES) -> str: + """Equivalent to generating a random password with openssl in hex format + openssl rand -hex 32 + """ + return secrets.token_hex(nbytes) + + MIN_PASSCODE_LENGTH = 6 @@ -29,3 +41,55 @@ def generate_passcode(number_of_digits: int = MIN_PASSCODE_LENGTH) -> str: number_of_digits = max(number_of_digits, MIN_PASSCODE_LENGTH) passcode = secrets.randbelow(10**number_of_digits) return f"{passcode}".zfill(number_of_digits) + + +def are_secrets_equal(got: str, expected: str) -> bool: + """Constant-time evaluation of 'got == expected'""" + return secrets.compare_digest(got.encode("utf8"), expected.encode("utf8")) + + +@validate_call +def secure_randint(start: StrictInt, end: StrictInt) -> int: + """Generate a random integer between start (inclusive) and end (exclusive).""" + if start >= end: + msg = f"{start=} must be less than {end=}" + raise ValueError(msg) + + diff = end - start + return secrets.randbelow(diff) + start + + +_PLACEHOLDER: Final[str] = "*" * 8 +_DEFAULT_SENSITIVE_KEYWORDS: Final[set[str]] = {"pass", "secret"} + + +def _is_possibly_sensitive(name: str, sensitive_keywords: set[str]) -> bool: + return any(k.lower() in name.lower() for k in sensitive_keywords) + + +def mask_sensitive_data( + data: dict[str, Any], *, extra_sensitive_keywords: set[str] | None = None +) -> dict: + """Replaces the sensitive values in the dict with a placeholder before logging + + Sensitive values are detected testing the key name (i.e. a str(key) ) againts sensitive keywords `pass` or `secret`. + + NOTE: this function is used to avoid logging sensitive information like passwords or secrets + """ + sensitive_keywords = _DEFAULT_SENSITIVE_KEYWORDS | ( + extra_sensitive_keywords or set() + ) + masked_data: dict[str, Any] = {} + for key, value in data.items(): + if isinstance(value, dict): + masked_data[key] = mask_sensitive_data( + value, extra_sensitive_keywords=sensitive_keywords + ) + else: + masked_data[key] = ( + _PLACEHOLDER + if _is_possibly_sensitive(f"{key}", sensitive_keywords) + else value + ) + + return masked_data diff --git a/packages/service-library/tests/aiohttp/conftest.py b/packages/service-library/tests/aiohttp/conftest.py index 087d5e50e62..1891ee17d15 100644 --- a/packages/service-library/tests/aiohttp/conftest.py +++ b/packages/service-library/tests/aiohttp/conftest.py @@ -1,20 +1,2 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument - -from pathlib import Path - -import pytest -from servicelib.aiohttp.openapi import OpenApiSpec, create_openapi_specs - - -@pytest.fixture -def petstore_spec_file(here) -> Path: - filepath = here / "data/oas3/petstore.yaml" - assert filepath.exists() - return filepath - - -@pytest.fixture -async def petstore_specs(petstore_spec_file) -> OpenApiSpec: - specs = await create_openapi_specs(petstore_spec_file) - return specs diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py b/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py index bcab3bff0c8..8fe29473cfc 100644 --- a/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py +++ b/packages/service-library/tests/aiohttp/long_running_tasks/conftest.py @@ -3,19 +3,19 @@ # pylint: disable=unused-variable import asyncio -from typing import Awaitable, Callable +from collections.abc import Awaitable, Callable import pytest from aiohttp import web from aiohttp.test_utils import TestClient from faker import Faker -from pydantic import BaseModel, parse_obj_as -from pytest_simcore.helpers.utils_assert import assert_status -from servicelib.aiohttp import long_running_tasks +from pydantic import BaseModel, TypeAdapter +from pytest_simcore.helpers.assert_checks import assert_status +from servicelib.aiohttp import long_running_tasks, status from servicelib.aiohttp.long_running_tasks.server import TaskId from servicelib.aiohttp.requests_validation import parse_request_query_parameters_as from servicelib.long_running_tasks._task import TaskContext -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed @@ -33,11 +33,12 @@ async def _string_list_task( await asyncio.sleep(sleep_time) task_progress.update(message="generated item", percent=index / num_strings) if fail: - raise RuntimeError("We were asked to fail!!") + msg = "We were asked to fail!!" + raise RuntimeError(msg) # NOTE: this code is used just for the sake of not returning the default 200 return web.json_response( - data={"data": generated_strings}, status=web.HTTPCreated.status_code + data={"data": generated_strings}, status=status.HTTP_201_CREATED ) @@ -89,10 +90,10 @@ async def _caller(client: TestClient, **query_kwargs) -> TaskId: .update_query(num_strings=10, sleep_time=f"{0.2}", **query_kwargs) ) resp = await client.post(f"{url}") - data, error = await assert_status(resp, web.HTTPAccepted) + data, error = await assert_status(resp, status.HTTP_202_ACCEPTED) assert data assert not error - task_get = parse_obj_as(long_running_tasks.server.TaskGet, data) + task_get = TypeAdapter(long_running_tasks.server.TaskGet).validate_python(data) return task_get.task_id return _caller @@ -119,10 +120,10 @@ async def _waiter( ): with attempt: result = await client.get(f"{status_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert data assert not error - task_status = long_running_tasks.server.TaskStatus.parse_obj(data) + task_status = long_running_tasks.server.TaskStatus.model_validate(data) assert task_status assert task_status.done diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py index 0e7e0dcd6ee..71d5501b2ed 100644 --- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py +++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks.py @@ -9,21 +9,21 @@ """ - import asyncio import json -from typing import Any, Awaitable, Callable +from collections.abc import Awaitable, Callable +from typing import Any import pytest from aiohttp import web from aiohttp.test_utils import TestClient -from pydantic import parse_obj_as -from pytest_simcore.helpers.utils_assert import assert_status -from servicelib.aiohttp import long_running_tasks +from pydantic import TypeAdapter +from pytest_simcore.helpers.assert_checks import assert_status +from servicelib.aiohttp import long_running_tasks, status from servicelib.aiohttp.long_running_tasks.server import TaskGet, TaskId from servicelib.aiohttp.rest_middlewares import append_rest_middlewares from servicelib.long_running_tasks._task import TaskContext -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed @@ -41,16 +41,12 @@ def app(server_routes: web.RouteTableDef) -> web.Application: @pytest.fixture -def client( - event_loop: asyncio.AbstractEventLoop, +async def client( aiohttp_client: Callable, unused_tcp_port_factory: Callable, app: web.Application, ) -> TestClient: - - return event_loop.run_until_complete( - aiohttp_client(app, server_kwargs={"port": unused_tcp_port_factory()}) - ) + return await aiohttp_client(app, server_kwargs={"port": unused_tcp_port_factory()}) async def test_workflow( @@ -71,15 +67,15 @@ async def test_workflow( ): with attempt: result = await client.get(f"{status_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert data assert not error - task_status = long_running_tasks.server.TaskStatus.parse_obj(data) + task_status = long_running_tasks.server.TaskStatus.model_validate(data) assert task_status progress_updates.append( (task_status.task_progress.message, task_status.task_progress.percent) ) - print(f"<-- received task status: {task_status.json(indent=2)}") + print(f"<-- received task status: {task_status.model_dump_json(indent=2)}") assert task_status.done, "task incomplete" print( f"-- waiting for task status completed successfully: {json.dumps(attempt.retry_state.retry_object.statistics, indent=2)}" @@ -101,13 +97,13 @@ async def test_workflow( # now get the result result_url = client.app.router["get_task_result"].url_for(task_id=task_id) result = await client.get(f"{result_url}") - task_result, error = await assert_status(result, web.HTTPCreated) + task_result, error = await assert_status(result, status.HTTP_201_CREATED) assert task_result assert not error assert task_result == [f"{x}" for x in range(10)] # getting the result again should raise a 404 result = await client.get(f"{result_url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) @pytest.mark.parametrize( @@ -124,7 +120,7 @@ async def test_get_task_wrong_task_id_raises_not_found( assert client.app url = client.app.router[route_name].url_for(task_id="fake_task_id") result = await client.request(method, f"{url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) async def test_failing_task_returns_error( @@ -139,7 +135,7 @@ async def test_failing_task_returns_error( # get the result result_url = client.app.router["get_task_result"].url_for(task_id=task_id) result = await client.get(f"{result_url}") - data, error = await assert_status(result, web.HTTPInternalServerError) + data, error = await assert_status(result, status.HTTP_500_INTERNAL_SERVER_ERROR) assert not data assert error assert "errors" in error @@ -157,7 +153,7 @@ async def test_get_results_before_tasks_finishes_returns_404( result_url = client.app.router["get_task_result"].url_for(task_id=task_id) result = await client.get(f"{result_url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) async def test_cancel_task( @@ -170,22 +166,22 @@ async def test_cancel_task( # cancel the task delete_url = client.app.router["cancel_and_delete_task"].url_for(task_id=task_id) result = await client.delete(f"{delete_url}") - data, error = await assert_status(result, web.HTTPNoContent) + data, error = await assert_status(result, status.HTTP_204_NO_CONTENT) assert not data assert not error # it should be gone, so no status status_url = client.app.router["get_task_status"].url_for(task_id=task_id) result = await client.get(f"{status_url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) # and also no results result_url = client.app.router["get_task_result"].url_for(task_id=task_id) result = await client.get(f"{result_url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) # try cancelling again result = await client.delete(f"{delete_url}") - await assert_status(result, web.HTTPNotFound) + await assert_status(result, status.HTTP_404_NOT_FOUND) async def test_list_tasks_empty_list(client: TestClient): @@ -193,7 +189,7 @@ async def test_list_tasks_empty_list(client: TestClient): assert client.app list_url = client.app.router["list_tasks"].url_for() result = await client.get(f"{list_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error assert data == [] @@ -213,9 +209,9 @@ async def test_list_tasks( # check we have the full list list_url = client.app.router["list_tasks"].url_for() result = await client.get(f"{list_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error - list_of_tasks = parse_obj_as(list[TaskGet], data) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(data) assert len(list_of_tasks) == NUM_TASKS # the task name is properly formatted @@ -232,7 +228,7 @@ async def test_list_tasks( await client.get(f"{result_url}") # the list shall go down one by one result = await client.get(f"{list_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error - list_of_tasks = parse_obj_as(list[TaskGet], data) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(data) assert len(list_of_tasks) == NUM_TASKS - (task_index + 1) diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py index 8b1f19568af..b211cc3d1ca 100644 --- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py +++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_client.py @@ -2,14 +2,13 @@ # pylint: disable=unused-argument import asyncio -from typing import Callable, Optional +from collections.abc import Callable import pytest from aiohttp import ClientResponseError, web from aiohttp.test_utils import TestClient -from pytest import MonkeyPatch -from pytest_simcore.helpers.utils_assert import assert_status -from servicelib.aiohttp import long_running_tasks +from pytest_simcore.helpers.assert_checks import assert_status +from servicelib.aiohttp import long_running_tasks, status from servicelib.aiohttp.long_running_tasks import client as lr_client from servicelib.aiohttp.long_running_tasks.client import ( LRTask, @@ -31,16 +30,12 @@ def app(server_routes: web.RouteTableDef) -> web.Application: @pytest.fixture -def client( - event_loop: asyncio.AbstractEventLoop, +async def client( aiohttp_client: Callable, unused_tcp_port_factory: Callable, app: web.Application, ) -> TestClient: - - return event_loop.run_until_complete( - aiohttp_client(app, server_kwargs={"port": unused_tcp_port_factory()}) - ) + return await aiohttp_client(app, server_kwargs={"port": unused_tcp_port_factory()}) @pytest.fixture @@ -63,10 +58,10 @@ async def test_long_running_task_request_raises_400( @pytest.fixture -def short_poll_interval(monkeypatch: MonkeyPatch): +def short_poll_interval(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( lr_client, - "_DEFAULT_POLL_INTERVAL_S", + "DEFAULT_POLL_INTERVAL_S", 0.01, ) @@ -74,7 +69,7 @@ def short_poll_interval(monkeypatch: MonkeyPatch): async def test_long_running_task_request( short_poll_interval, client: TestClient, long_running_task_url: URL ): - task: Optional[LRTask] = None + task: LRTask | None = None async for task in long_running_task_request( client.session, long_running_task_url.with_query(num_strings=10, sleep_time=0.1), @@ -91,7 +86,7 @@ async def test_long_running_task_request_timeout( client: TestClient, long_running_task_url: URL ): assert client.app - task: Optional[LRTask] = None + task: LRTask | None = None with pytest.raises(asyncio.TimeoutError): async for task in long_running_task_request( client.session, @@ -104,7 +99,7 @@ async def test_long_running_task_request_timeout( # check the task was properly aborted by the client list_url = client.app.router["list_tasks"].url_for() result = await client.get(f"{list_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error assert data == [] @@ -113,7 +108,7 @@ async def test_long_running_task_request_error( client: TestClient, long_running_task_url: URL ): assert client.app - task: Optional[LRTask] = None + task: LRTask | None = None async for task in long_running_task_request( client.session, long_running_task_url.with_query( diff --git a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py index 7b691110b8f..0b37c941669 100644 --- a/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py +++ b/packages/service-library/tests/aiohttp/long_running_tasks/test_long_running_tasks_with_task_context.py @@ -9,17 +9,16 @@ """ - -import asyncio +from collections.abc import Awaitable, Callable from functools import wraps -from typing import Awaitable, Callable, Optional +from typing import Optional import pytest from aiohttp import web from aiohttp.test_utils import TestClient -from pydantic import create_model, parse_obj_as -from pytest_simcore.helpers.utils_assert import assert_status -from servicelib.aiohttp import long_running_tasks +from pydantic import TypeAdapter, create_model +from pytest_simcore.helpers.assert_checks import assert_status +from servicelib.aiohttp import long_running_tasks, status from servicelib.aiohttp.long_running_tasks._server import ( RQT_LONG_RUNNING_TASKS_CONTEXT_KEY, ) @@ -52,7 +51,7 @@ async def _test_task_context_decorator( ) -> web.StreamResponse: """this task context callback tries to get the user_id from the query if available""" query_param = parse_request_query_parameters_as(query_model, request) - request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY] = query_param.dict() + request[RQT_LONG_RUNNING_TASKS_CONTEXT_KEY] = query_param.model_dump() return await handler(request) return _test_task_context_decorator @@ -78,17 +77,13 @@ def app_with_task_context( @pytest.fixture -def client_with_task_context( - event_loop: asyncio.AbstractEventLoop, +async def client_with_task_context( aiohttp_client: Callable, unused_tcp_port_factory: Callable, app_with_task_context: web.Application, ) -> TestClient: - - return event_loop.run_until_complete( - aiohttp_client( - app_with_task_context, server_kwargs={"port": unused_tcp_port_factory()} - ) + return await aiohttp_client( + app_with_task_context, server_kwargs={"port": unused_tcp_port_factory()} ) @@ -105,18 +100,18 @@ async def test_list_tasks( # the list should be empty if we do not pass the expected context list_url = client_with_task_context.app.router["list_tasks"].url_for() result = await client_with_task_context.get(f"{list_url}") - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error - list_of_tasks = parse_obj_as(list[TaskGet], data) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(data) assert len(list_of_tasks) == 0 # the list should be full if we pass the expected context result = await client_with_task_context.get( f"{list_url.update_query(task_context)}" ) - data, error = await assert_status(result, web.HTTPOk) + data, error = await assert_status(result, status.HTTP_200_OK) assert not error - list_of_tasks = parse_obj_as(list[TaskGet], data) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(data) assert len(list_of_tasks) == 1 @@ -133,10 +128,10 @@ async def test_get_task_status( task_id=task_id ) resp = await client_with_task_context.get(f"{status_url}") - await assert_status(resp, web.HTTPNotFound) + await assert_status(resp, status.HTTP_404_NOT_FOUND) # calling with context should find the task resp = await client_with_task_context.get(f"{status_url.with_query(task_context)}") - await assert_status(resp, web.HTTPOk) + await assert_status(resp, status.HTTP_200_OK) async def test_get_task_result( @@ -153,10 +148,10 @@ async def test_get_task_result( task_id=task_id ) resp = await client_with_task_context.get(f"{result_url}") - await assert_status(resp, web.HTTPNotFound) + await assert_status(resp, status.HTTP_404_NOT_FOUND) # calling with context should find the task resp = await client_with_task_context.get(f"{result_url.with_query(task_context)}") - await assert_status(resp, web.HTTPCreated) + await assert_status(resp, status.HTTP_201_CREATED) async def test_cancel_task( @@ -171,14 +166,14 @@ async def test_cancel_task( ) # calling cancel without task context should find nothing resp = await client_with_task_context.delete(f"{cancel_url}") - await assert_status(resp, web.HTTPNotFound) + await assert_status(resp, status.HTTP_404_NOT_FOUND) # calling with context should find and delete the task resp = await client_with_task_context.delete( f"{cancel_url.update_query(task_context)}" ) - await assert_status(resp, web.HTTPNoContent) + await assert_status(resp, status.HTTP_204_NO_CONTENT) # calling with context a second time should find nothing resp = await client_with_task_context.delete( f"{cancel_url.update_query(task_context)}" ) - await assert_status(resp, web.HTTPNotFound) + await assert_status(resp, status.HTTP_404_NOT_FOUND) diff --git a/packages/service-library/tests/aiohttp/test_application.py b/packages/service-library/tests/aiohttp/test_application.py new file mode 100644 index 00000000000..e918c8e873a --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_application.py @@ -0,0 +1,176 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio + +import servicelib.aiohttp.application +from aiohttp import web +from aiohttp.test_utils import TestServer +from pytest_mock import MockerFixture +from servicelib.aiohttp.application_keys import APP_FIRE_AND_FORGET_TASKS_KEY +from servicelib.aiohttp.client_session import APP_CLIENT_SESSION_KEY, get_client_session + + +async def test_create_safe_application(mocker: MockerFixture): # noqa: PLR0915 + # setup spies before init + first_call_on_startup_spy = mocker.spy( + servicelib.aiohttp.application, "_first_call_on_startup" + ) + first_call_on_cleanup_spy = mocker.spy( + servicelib.aiohttp.application, "_first_call_on_cleanup" + ) + persistent_client_session_spy = mocker.spy( + servicelib.aiohttp.application, "persistent_client_session" + ) + + # some more events callbacks + async def _other_on_startup(_app: web.Application): + assert first_call_on_startup_spy.called + assert not first_call_on_cleanup_spy.called + assert persistent_client_session_spy.call_count == 1 + + # What if I add one more background task here?? OK + _app[APP_FIRE_AND_FORGET_TASKS_KEY].add( + asyncio.create_task(asyncio.sleep(100), name="startup") + ) + + async def _other_on_shutdown(_app: web.Application): + assert first_call_on_startup_spy.called + assert not first_call_on_cleanup_spy.called + + # What if I add one more background task here?? OK + _app[APP_FIRE_AND_FORGET_TASKS_KEY].add( + asyncio.create_task(asyncio.sleep(100), name="shutdown") + ) + + async def _other_on_cleanup(_app: web.Application): + assert first_call_on_startup_spy.called + assert first_call_on_cleanup_spy.called + + # What if I add one more background task here?? NOT OK!! + # WARNING: uncommenting this line suggests that we cannot add f&f tasks on-cleanup callbacks !!! + # _app[APP_FIRE_AND_FORGET_TASKS_KEY].add( asyncio.create_task(asyncio.sleep(100), name="cleanup") ) + + async def _other_cleanup_context(_app: web.Application): + # context seem to start first + assert not first_call_on_startup_spy.called + assert not first_call_on_cleanup_spy.called + assert persistent_client_session_spy.call_count == 1 + + # What if I add one more background task here?? OK + _app[APP_FIRE_AND_FORGET_TASKS_KEY].add( + asyncio.create_task(asyncio.sleep(100), name="setup") + ) + + yield + + assert first_call_on_startup_spy.called + assert not first_call_on_cleanup_spy.called + assert persistent_client_session_spy.call_count == 1 + + # What if I add one more background task here?? OK + _app[APP_FIRE_AND_FORGET_TASKS_KEY].add( + asyncio.create_task(asyncio.sleep(100), name="teardown") + ) + + # setup + the_app = servicelib.aiohttp.application.create_safe_application() + + assert len(the_app.on_startup) > 0 + assert len(the_app.on_cleanup) > 0 + assert len(the_app.cleanup_ctx) > 0 + + # NOTE there are 4 type of different events + the_app.on_startup.append(_other_on_startup) + the_app.on_shutdown.append(_other_on_shutdown) + the_app.on_cleanup.append(_other_on_cleanup) + the_app.cleanup_ctx.append(_other_cleanup_context) + + # pre-start checks ----------- + assert APP_CLIENT_SESSION_KEY not in the_app + + # starting ----------- + server = TestServer(the_app) + await server.start_server() + # started ----------- + + assert first_call_on_startup_spy.call_count == 1 + assert first_call_on_cleanup_spy.call_count == 0 + assert persistent_client_session_spy.call_count == 1 + + # persistent_client_session created client + assert APP_CLIENT_SESSION_KEY in the_app + + # stopping ----------- + await server.close() + # stopped ----------- + + assert first_call_on_startup_spy.call_count == 1 + assert first_call_on_cleanup_spy.call_count == 1 + assert persistent_client_session_spy.call_count == 1 + + # persistent_client_session closed session + assert get_client_session(the_app).closed + + # checks that _cancel_all_background_tasks worked? + fire_and_forget_tasks = the_app[APP_FIRE_AND_FORGET_TASKS_KEY] + done = [t for t in fire_and_forget_tasks if t.done()] + cancelled = [t for t in fire_and_forget_tasks if t.cancelled] + pending = [t for t in fire_and_forget_tasks if not t.cancelled() or not t.done()] + assert not pending + assert done or cancelled + + +async def test_aiohttp_events_order(): + the_app = web.Application() + the_app["events"] = [] + + async def _on_startup(_app: web.Application): + _app["events"].append("startup") + + async def _on_shutdown(_app: web.Application): + _app["events"].append("shutdown") + + async def _on_cleanup(_app: web.Application): + _app["events"].append("cleanup") + + async def _cleanup_context(_app: web.Application): + _app["events"].append("cleanup_ctx.setup") + yield + _app["events"].append("cleanup_ctx.teardown") + + the_app.on_startup.append(_on_startup) + the_app.on_shutdown.append(_on_shutdown) + the_app.on_cleanup.append(_on_cleanup) + the_app.cleanup_ctx.append(_cleanup_context) + + server = TestServer(the_app) + await server.start_server() + await server.close() + + # Events are triggered as follows + # SEE https://docs.aiohttp.org/en/stable/web_advanced.html#aiohttp-web-signals + # + # cleanup_ctx[0].setup ---> begin of cleanup_ctx + # cleanup_ctx[1].setup. + # ... + # on_startup[0]. + # on_startup[1]. + # ... + # on_shutdown[0]. + # on_shutdown[1]. + # ... + # cleanup_ctx[1].teardown. + # cleanup_ctx[0].teardown <--- end of cleanup_ctx + # on_cleanup[0]. + # on_cleanup[1]. + # ... + + assert the_app["events"] == [ + "cleanup_ctx.setup", + "startup", + "shutdown", + "cleanup_ctx.teardown", + "cleanup", + ] diff --git a/packages/service-library/tests/aiohttp/test_application_setup.py b/packages/service-library/tests/aiohttp/test_application_setup.py index 5e115eceff6..94af1c07a33 100644 --- a/packages/service-library/tests/aiohttp/test_application_setup.py +++ b/packages/service-library/tests/aiohttp/test_application_setup.py @@ -2,7 +2,6 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -from typing import Dict from unittest.mock import Mock import pytest @@ -11,7 +10,7 @@ from servicelib.aiohttp.application_setup import ( DependencyError, ModuleCategory, - SkipModuleSetup, + SkipModuleSetupError, app_module_setup, is_setup_completed, ) @@ -27,7 +26,7 @@ def setup_bar(app: web.Application, arg1, *, raise_skip: bool = False): @app_module_setup("package.foo", ModuleCategory.ADDON, logger=log) def setup_foo(app: web.Application, arg1, kargs=33, *, raise_skip: bool = False): if raise_skip: - raise SkipModuleSetup(reason="explicit skip") + raise SkipModuleSetupError(reason="explicit skip") return True @@ -51,7 +50,7 @@ def setup_needs_foo(app: web.Application, arg1, kargs=55): @pytest.fixture -def app_config() -> Dict: +def app_config() -> dict: return { "foo": {"enabled": True}, "bar": {"enabled": False}, diff --git a/packages/service-library/tests/aiohttp/test_client_session.py b/packages/service-library/tests/aiohttp/test_client_session.py index ca7123e8cc5..74b91655c31 100644 --- a/packages/service-library/tests/aiohttp/test_client_session.py +++ b/packages/service-library/tests/aiohttp/test_client_session.py @@ -3,18 +3,19 @@ # pylint: disable=unused-variable import json -from typing import Any, Callable, Dict, Iterator +from collections.abc import Callable, Iterator +from typing import Any import pytest from aiohttp import web from aiohttp.client import ClientSession from aiohttp.test_utils import TestServer +from common_library.json_serialization import json_dumps from servicelib.aiohttp.application_keys import APP_CLIENT_SESSION_KEY from servicelib.aiohttp.client_session import ( get_client_session, persistent_client_session, ) -from servicelib.json_serialization import json_dumps @pytest.fixture @@ -35,22 +36,21 @@ async def echo(request): assert isinstance(app[APP_CLIENT_SESSION_KEY], ClientSession) assert not app[APP_CLIENT_SESSION_KEY].closed - yield test_server + return test_server -async def test_get_always_the_same_client_session(): - app = web.Application() - session = get_client_session(app) +async def test_get_always_the_same_client_session(server: TestServer): + session = get_client_session(server.app) - assert session in app.values() - assert app[APP_CLIENT_SESSION_KEY] == session + assert session in server.app.values() + assert server.app[APP_CLIENT_SESSION_KEY] == session for _ in range(3): - assert get_client_session(app) == session + assert get_client_session(server.app) == session async def test_app_client_session_json_serialize( - server: TestServer, fake_data_dict: Dict[str, Any] + server: TestServer, fake_data_dict: dict[str, Any] ): session = get_client_session(server.app) diff --git a/packages/service-library/tests/aiohttp/test_docker_utils.py b/packages/service-library/tests/aiohttp/test_docker_utils.py new file mode 100644 index 00000000000..edd63559b3e --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_docker_utils.py @@ -0,0 +1,174 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +from collections.abc import Awaitable, Callable +from typing import Any +from unittest import mock + +import pytest +from faker import Faker +from models_library.docker import DockerGenericTag +from models_library.progress_bar import ProgressReport +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from servicelib import progress_bar +from servicelib.aiohttp.docker_utils import retrieve_image_layer_information +from servicelib.docker_utils import pull_image +from settings_library.docker_registry import RegistrySettings + + +@pytest.mark.parametrize( + "service_repo, service_tag", + [ + ("itisfoundation/sleeper", "1.0.0"), + ("itisfoundation/sleeper", "2.2.0"), + ( + "itisfoundation/sleeper", + "sha256:a6d9886311721d8d341068361ecf9998a3c7ecb0efb23ebac553602c2eca1f8f", + ), + ], +) +async def test_retrieve_image_layer_information( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + registry_settings: RegistrySettings, + osparc_service: dict[str, Any], + service_repo: str, + service_tag: str, +): + # clean first + image_name = f"{service_repo}:{service_tag}" + if "sha256" in service_tag: + image_name = f"{service_repo}@{service_tag}" + await remove_images_from_host([image_name]) + docker_image = TypeAdapter(DockerGenericTag).validate_python( + f"{registry_settings.REGISTRY_URL}/{osparc_service['image']['name']}:{osparc_service['image']['tag']}", + ) + layer_information = await retrieve_image_layer_information( + docker_image, registry_settings + ) + + assert layer_information + + +@pytest.mark.parametrize( + "image", + [ + "itisfoundation/dask-sidecar:master-github-latest", + "library/nginx:latest", + "nginx:1.25.4", + "nginx:latest", + "ubuntu@sha256:81bba8d1dde7fc1883b6e95cd46d6c9f4874374f2b360c8db82620b33f6b5ca1", + ], +) +async def test_retrieve_image_layer_information_from_external_registry( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + image: DockerGenericTag, + registry_settings: RegistrySettings, +): + # clean first + await remove_images_from_host([image]) + layer_information = await retrieve_image_layer_information(image, registry_settings) + assert layer_information + + +@pytest.fixture +async def mocked_log_cb(mocker: MockerFixture) -> mock.AsyncMock: + async def _log_cb(*args, **kwargs) -> None: + print(f"received log: {args}, {kwargs}") + + return mocker.AsyncMock(side_effect=_log_cb) + + +@pytest.fixture +async def mocked_progress_cb(mocker: MockerFixture) -> mock.AsyncMock: + async def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + + return mocker.AsyncMock(side_effect=_progress_cb) + + +def _assert_progress_report_values( + mocked_progress_cb: mock.AsyncMock, *, total: float +) -> None: + # NOTE: we exclude the message part here as this is already tested in servicelib + # check first progress + assert mocked_progress_cb.call_args_list[0].args[0].dict( + exclude={"message", "attempt"} + ) == ProgressReport(actual_value=0, total=total, unit="Byte").model_dump( + exclude={"message", "attempt"} + ) + # check last progress + assert mocked_progress_cb.call_args_list[-1].args[0].dict( + exclude={"message", "attempt"} + ) == ProgressReport(actual_value=total, total=total, unit="Byte").model_dump( + exclude={"message", "attempt"} + ) + + +@pytest.mark.parametrize( + "image", + ["itisfoundation/sleeper:1.0.0", "nginx:latest"], +) +async def test_pull_image( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + image: DockerGenericTag, + registry_settings: RegistrySettings, + mocked_log_cb: mock.AsyncMock, + mocked_progress_cb: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, + faker: Faker, +): + # clean first + await remove_images_from_host([image]) + layer_information = await retrieve_image_layer_information(image, registry_settings) + assert layer_information + + async with progress_bar.ProgressBarData( + num_steps=layer_information.layers_total_size, + progress_report_cb=mocked_progress_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as main_progress_bar: + await pull_image( + image, + registry_settings, + main_progress_bar, + mocked_log_cb, + layer_information, + ) + mocked_log_cb.assert_called() + + _assert_progress_report_values( + mocked_progress_cb, total=layer_information.layers_total_size + ) + + mocked_progress_cb.reset_mock() + mocked_log_cb.reset_mock() + + # check there were no warnings popping up from the docker pull + # NOTE: this would pop up in case docker changes its pulling statuses + assert not [r.message for r in caplog.records if r.levelname == "WARNING"] + + # pull a second time should, the image is already there + async with progress_bar.ProgressBarData( + num_steps=layer_information.layers_total_size, + progress_report_cb=mocked_progress_cb, + description=faker.pystr(), + progress_unit="Byte", + ) as main_progress_bar: + await pull_image( + image, + registry_settings, + main_progress_bar, + mocked_log_cb, + layer_information, + ) + mocked_log_cb.assert_called() + + _assert_progress_report_values( + mocked_progress_cb, total=layer_information.layers_total_size + ) + # check there were no warnings + assert not [r.message for r in caplog.records if r.levelname == "WARNING"] diff --git a/packages/service-library/tests/aiohttp/test_incidents_monitoring.py b/packages/service-library/tests/aiohttp/test_incidents_monitoring.py deleted file mode 100644 index b822ca1fce5..00000000000 --- a/packages/service-library/tests/aiohttp/test_incidents_monitoring.py +++ /dev/null @@ -1,49 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import asyncio -import time - -import pytest -from servicelib.aiohttp import monitor_slow_callbacks -from servicelib.aiohttp.aiopg_utils import DatabaseError, retry -from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed - - -async def slow_task(delay): - time.sleep(delay) - - -@retry(wait=wait_fixed(1), stop=stop_after_attempt(2)) -async def fails_to_reach_pg_db(): - raise DatabaseError - - -@pytest.fixture -def incidents_manager(event_loop): - incidents = [] - monitor_slow_callbacks.enable(slow_duration_secs=0.2, incidents=incidents) - - f1a = asyncio.ensure_future(slow_task(0.3), loop=event_loop) - f1b = asyncio.ensure_future(slow_task(0.3), loop=event_loop) - f1c = asyncio.ensure_future(slow_task(0.4), loop=event_loop) - - incidents_pg = None # aiopg_utils.monitor_pg_responsiveness.enable() - f2 = asyncio.ensure_future(fails_to_reach_pg_db(), loop=event_loop) - - yield {"slow_callback": incidents, "posgres_responsive": incidents_pg} - - -async def test_slow_task_incident(incidents_manager): - await asyncio.sleep(2) - assert len(incidents_manager["slow_callback"]) == 3 - - delays = [record.delay_secs for record in incidents_manager["slow_callback"]] - assert max(delays) < 0.5 - - -@pytest.mark.skip(reason="TODO: Design under development") -def test_non_responsive_incident(incidents_manager): - pass diff --git a/packages/service-library/tests/aiohttp/test_incidents_utils.py b/packages/service-library/tests/aiohttp/test_incidents_utils.py index f1ad7381a9b..b0d475191f6 100644 --- a/packages/service-library/tests/aiohttp/test_incidents_utils.py +++ b/packages/service-library/tests/aiohttp/test_incidents_utils.py @@ -1,10 +1,13 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name # pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + import operator +from dataclasses import dataclass -import attr from servicelib.aiohttp.incidents import BaseIncident, LimitedOrderedStack @@ -31,7 +34,7 @@ class IntsRegistry(LimitedOrderedStack[int]): def test_incidents_stack(): - @attr.s(auto_attribs=True) + @dataclass class TestIncident(BaseIncident): gravity: int diff --git a/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py b/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py new file mode 100644 index 00000000000..40fd116ffad --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_monitor_slow_callbacks.py @@ -0,0 +1,54 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +# pylint:disable=unused-variable + +import asyncio +import time +from collections.abc import Iterable + +import pytest +from servicelib.aiohttp import monitor_slow_callbacks +from servicelib.aiohttp.aiopg_utils import DatabaseError +from tenacity import retry +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed + + +async def slow_task(delay): + time.sleep(delay) # noqa: ASYNC101 + + +@retry(wait=wait_fixed(1), stop=stop_after_attempt(2)) +async def fails_to_reach_pg_db(): + raise DatabaseError + + +@pytest.fixture +def incidents_manager(event_loop) -> dict: + incidents = [] + monitor_slow_callbacks.enable(slow_duration_secs=0.2, incidents=incidents) + + asyncio.ensure_future(slow_task(0.3)) # noqa: RUF006 + asyncio.ensure_future(slow_task(0.3)) # noqa: RUF006 + asyncio.ensure_future(slow_task(0.4)) # noqa: RUF006 + + incidents_pg = None # aiopg_utils.monitor_pg_responsiveness.enable() + asyncio.ensure_future(fails_to_reach_pg_db()) # noqa: RUF006 + + return {"slow_callback": incidents, "posgres_responsive": incidents_pg} + + +@pytest.fixture +def disable_monitoring() -> Iterable[None]: + original_handler = asyncio.events.Handle._run # noqa: SLF001 + yield None + asyncio.events.Handle._run = original_handler # noqa: SLF001 + + +async def test_slow_task_incident(disable_monitoring: None, incidents_manager: dict): + await asyncio.sleep(2) + assert len(incidents_manager["slow_callback"]) == 3 + + delays = [record.delay_secs for record in incidents_manager["slow_callback"]] + assert max(delays) < 0.5 diff --git a/packages/service-library/tests/aiohttp/test_monitoring.py b/packages/service-library/tests/aiohttp/test_monitoring.py index 18b6043b658..a7ad0d003d8 100644 --- a/packages/service-library/tests/aiohttp/test_monitoring.py +++ b/packages/service-library/tests/aiohttp/test_monitoring.py @@ -3,21 +3,24 @@ # pylint: disable=unused-variable -from asyncio import AbstractEventLoop -from typing import Any, Callable +from collections.abc import Callable +from typing import Any import pytest from aiohttp import web from aiohttp.test_utils import TestClient from faker import Faker -from prometheus_client.parser import text_string_to_metric_families -from servicelib.aiohttp.monitoring import UNDEFINED_REGULAR_USER_AGENT, setup_monitoring -from servicelib.common_headers import X_SIMCORE_USER_AGENT +from prometheus_client.openmetrics.parser import text_string_to_metric_families +from servicelib.aiohttp import status +from servicelib.aiohttp.monitoring import setup_monitoring +from servicelib.common_headers import ( + UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE, + X_SIMCORE_USER_AGENT, +) @pytest.fixture -def client( - event_loop: AbstractEventLoop, +async def client( aiohttp_client: Callable, unused_tcp_port_factory: Callable, ) -> TestClient: @@ -33,11 +36,9 @@ async def monitored_request(request: web.Request) -> web.Response: for resource in app.router.resources(): print(resource) - setup_monitoring(app, app_name="pytest_app", version="0.0.1") + setup_monitoring(app, app_name="pytest_app") - return event_loop.run_until_complete( - aiohttp_client(app, server_kwargs={"port": ports[0]}) - ) + return await aiohttp_client(app, server_kwargs={"port": ports[0]}) def _assert_metrics_contain_entry( @@ -72,28 +73,27 @@ async def test_setup_monitoring(client: TestClient): NUM_CALLS = 12 for _ in range(NUM_CALLS): response = await client.get("/monitored_request") - assert response.status == web.HTTPOk.status_code + assert response.status == status.HTTP_200_OK data = await response.json() assert data assert "data" in data assert data["data"] == "OK" response = await client.get("/metrics") - assert response.status == web.HTTPOk.status_code + assert response.status == status.HTTP_200_OK # by calling it twice, the metrics endpoint should also be incremented response = await client.get("/metrics") - assert response.status == web.HTTPOk.status_code + assert response.status == status.HTTP_200_OK metrics_as_text = await response.text() _assert_metrics_contain_entry( metrics_as_text, metric_name="http_requests", sample_name="http_requests_total", labels={ - "app_name": "pytest_app", "endpoint": "/monitored_request", "http_status": "200", "method": "GET", - "simcore_user_agent": UNDEFINED_REGULAR_USER_AGENT, + "simcore_user_agent": UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE, }, value=NUM_CALLS, ) @@ -103,11 +103,10 @@ async def test_setup_monitoring(client: TestClient): metric_name="http_requests", sample_name="http_requests_total", labels={ - "app_name": "pytest_app", "endpoint": "/metrics", "http_status": "200", "method": "GET", - "simcore_user_agent": UNDEFINED_REGULAR_USER_AGENT, + "simcore_user_agent": UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE, }, value=1, ) @@ -119,17 +118,16 @@ async def test_request_with_simcore_user_agent(client: TestClient, faker: Faker) "/monitored_request", headers={X_SIMCORE_USER_AGENT: faker_simcore_user_agent}, ) - assert response.status == web.HTTPOk.status_code + assert response.status == status.HTTP_200_OK response = await client.get("/metrics") - assert response.status == web.HTTPOk.status_code + assert response.status == status.HTTP_200_OK metrics_as_text = await response.text() _assert_metrics_contain_entry( metrics_as_text, metric_name="http_requests", sample_name="http_requests_total", labels={ - "app_name": "pytest_app", "endpoint": "/monitored_request", "http_status": "200", "method": "GET", diff --git a/packages/service-library/tests/aiohttp/test_monitoring_middleware.py b/packages/service-library/tests/aiohttp/test_monitoring_middleware.py new file mode 100644 index 00000000000..7c081a508ab --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_monitoring_middleware.py @@ -0,0 +1,32 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import pytest +from aiohttp import web +from aiohttp.test_utils import TestClient, TestServer +from prometheus_client.openmetrics.exposition import ( + CONTENT_TYPE_LATEST, +) +from servicelib.aiohttp.monitoring import setup_monitoring + + +@pytest.fixture +def aiohttp_app_with_monitoring(): + app = web.Application() + setup_monitoring(app, app_name="test_app") + return app + + +@pytest.fixture +async def client(aiohttp_app_with_monitoring): + async with TestServer(aiohttp_app_with_monitoring) as server: + async with TestClient(server) as client: + yield client + + +async def test_metrics_endpoint(client): + response = await client.get("/metrics") + assert response.status == 200 + assert response.headers["Content-Type"] == CONTENT_TYPE_LATEST + body = await response.text() + assert "# HELP" in body # Check for Prometheus metrics format diff --git a/packages/service-library/tests/aiohttp/test_observer.py b/packages/service-library/tests/aiohttp/test_observer.py new file mode 100644 index 00000000000..114e0a6cbed --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_observer.py @@ -0,0 +1,36 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +import pytest +from aiohttp import web +from pytest_mock import MockerFixture +from servicelib.aiohttp.observer import ( + emit, + registed_observers_report, + register_observer, + setup_observer_registry, +) + + +@pytest.fixture +def app() -> web.Application: + _app = web.Application() + setup_observer_registry(_app) + return _app + + +async def test_observer(mocker: MockerFixture, app: web.Application): + # register a couroutine as callback function + cb_function = mocker.AsyncMock(return_value=None) + + register_observer(app, cb_function, event="my_test_event") + + registed_observers_report(app) + + await emit(app, "my_invalid_test_event") + cb_function.assert_not_called() + + await emit(app, "my_test_event") + cb_function.assert_called() diff --git a/packages/service-library/tests/aiohttp/test_openapi_validation.py b/packages/service-library/tests/aiohttp/test_openapi_validation.py deleted file mode 100644 index 062a5a9df50..00000000000 --- a/packages/service-library/tests/aiohttp/test_openapi_validation.py +++ /dev/null @@ -1,109 +0,0 @@ -""" Tests OpenAPI validation middlewares - -How to spec NULLABLE OBJECTS? -SEE https://stackoverflow.com/questions/40920441/how-to-specify-a-property-can-be-null-or-a-reference-with-swagger - - - SEE https://github.com/p1c2u/openapi-core -""" -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - - -import jsonschema -import openapi_spec_validator -import pytest -from aiohttp import web -from packaging.version import Version -from servicelib.aiohttp import openapi -from servicelib.aiohttp.application_keys import APP_OPENAPI_SPECS_KEY -from servicelib.aiohttp.rest_middlewares import ( - envelope_middleware_factory, - error_middleware_factory, - validate_middleware_factory, -) -from servicelib.aiohttp.rest_responses import is_enveloped, unwrap_envelope -from servicelib.aiohttp.rest_routing import create_routes_from_namespace - -from .tutils import Handlers - - -@pytest.fixture -async def specs(here): - openapi_path = here / "data" / "oas3" / "enveloped_responses.yaml" - assert openapi_path.exists() - specs = await openapi.create_openapi_specs(openapi_path) - return specs - - -@pytest.fixture -def client(event_loop, aiohttp_client, specs): - app = web.Application() - - # routes - handlers = Handlers() - routes = create_routes_from_namespace(specs, handlers, strict=False) - - app.router.add_routes(routes) - - # validators - app[APP_OPENAPI_SPECS_KEY] = specs - base = openapi.get_base_path(specs) - - # middlewares - app.middlewares.append(error_middleware_factory(base)) - app.middlewares.append(validate_middleware_factory(base)) - app.middlewares.append(envelope_middleware_factory(base)) - - return event_loop.run_until_complete(aiohttp_client(app)) - - -@pytest.mark.parametrize( - "path", - [ - "/health", - "/dict", - "/envelope", - "/list", - "/attobj", - "/string", - "/number", - ], -) -async def test_validate_handlers(path, client, specs): - - assert Version(openapi_spec_validator.__version__) < Version("0.5.0") and Version( - jsonschema.__version__ - ) < Version( - "4.0" - ), """ - we have a very old version of openapi-core that is causing further troubles - specifically when we want to have nullable objects. For that reason we have constraint - these libraries and we can do nothing until we do not deprecate or fully upgrade openapi! - SEE how to specify nullable object in https://stackoverflow.com/questions/40920441/how-to-specify-a-property-can-be-null-or-a-reference-with-swagger - - If these libraries are upgraded, the test_validate_handlers[/dict] will fail because he cannot validate that `error=None`, i.e. - that the property 'error' is a nullable object! - """ - - base = openapi.get_base_path(specs) - response = await client.get(base + path) - payload = await response.json() - - assert is_enveloped(payload) - - data, error = unwrap_envelope(payload) - assert not error - assert data - - -# "/mixed" FIXME: openapi core bug reported in https://github.com/p1c2u/openapi-core/issues/153 -# Raises AssertionError: assert not {'errors': [{'code': 'InvalidMediaTypeValue', 'field': None, 'message': 'Mimetype invalid: Value not valid for schema', 'resource': None}], 'logs': [], 'status': 503} -@pytest.mark.xfail( - reason="openapi core bug reported in https://github.com/p1c2u/openapi-core/issues/153", - strict=True, - raises=AssertionError, -) -async def test_validate_handlers_mixed(client, specs): - await test_validate_handlers("/mixed", client, specs) diff --git a/packages/service-library/tests/aiohttp/test_requests_validation.py b/packages/service-library/tests/aiohttp/test_requests_validation.py index 7d54d66a775..97c2b317b6a 100644 --- a/packages/service-library/tests/aiohttp/test_requests_validation.py +++ b/packages/service-library/tests/aiohttp/test_requests_validation.py @@ -3,20 +3,29 @@ # pylint: disable=unused-variable import json -from typing import Callable +from collections.abc import Callable from uuid import UUID import pytest from aiohttp import web -from aiohttp.test_utils import TestClient +from aiohttp.test_utils import TestClient, make_mocked_request +from common_library.json_serialization import json_dumps from faker import Faker -from pydantic import BaseModel, Extra, Field +from models_library.rest_base import RequestParameters, StrictRequestParameters +from models_library.rest_ordering import ( + OrderBy, + OrderDirection, + create_ordering_query_model_class, +) +from pydantic import BaseModel, ConfigDict, Field +from servicelib.aiohttp import status from servicelib.aiohttp.requests_validation import ( parse_request_body_as, + parse_request_headers_as, parse_request_path_parameters_as, parse_request_query_parameters_as, ) -from servicelib.json_serialization import json_dumps +from yarl import URL RQT_USERID_KEY = f"{__name__}.user_id" APP_SECRET_KEY = f"{__name__}.secret" @@ -28,7 +37,7 @@ def jsonable_encoder(data): return json.loads(json_dumps(data)) -class MyRequestContext(BaseModel): +class MyRequestContext(RequestParameters): user_id: int = Field(alias=RQT_USERID_KEY) secret: str = Field(alias=APP_SECRET_KEY) @@ -37,30 +46,40 @@ def create_fake(cls, faker: Faker): return cls(user_id=faker.pyint(), secret=faker.password()) -class MyRequestPathParams(BaseModel): +class MyRequestPathParams(StrictRequestParameters): project_uuid: UUID - class Config: - extra = Extra.forbid - @classmethod def create_fake(cls, faker: Faker): return cls(project_uuid=faker.uuid4()) -class MyRequestQueryParams(BaseModel): +class MyRequestQueryParams(RequestParameters): is_ok: bool = True label: str - def as_params(self, **kwargs) -> dict[str, str]: - data = self.dict(**kwargs) - return {k: f"{v}" for k, v in data.items()} - @classmethod def create_fake(cls, faker: Faker): return cls(is_ok=faker.pybool(), label=faker.word()) +class MyRequestHeadersParams(RequestParameters): + user_agent: str = Field(alias="X-Simcore-User-Agent") + optional_header: str | None = Field(default=None, alias="X-Simcore-Optional-Header") + model_config = ConfigDict( + populate_by_name=False, + ) + + @classmethod + def create_fake(cls, faker: Faker): + return cls( + **{ + "X-Simcore-User-Agent": faker.pystr(), + "X-Simcore-Optional-Header": faker.word(), + } + ) + + class Sub(BaseModel): a: float = 33 @@ -92,7 +111,9 @@ def client(event_loop, aiohttp_client: Callable, faker: Faker) -> TestClient: async def _handler(request: web.Request) -> web.Response: # --------- UNDER TEST ------- # NOTE: app context does NOT need to be validated everytime! - context = MyRequestContext.parse_obj({**dict(request.app), **dict(request)}) + context = MyRequestContext.model_validate( + {**dict(request.app), **dict(request)} + ) path_params = parse_request_path_parameters_as( MyRequestPathParams, request, use_enveloped_error_v1=False @@ -100,6 +121,9 @@ async def _handler(request: web.Request) -> web.Response: query_params = parse_request_query_parameters_as( MyRequestQueryParams, request, use_enveloped_error_v1=False ) + headers_params = parse_request_headers_as( + MyRequestHeadersParams, request, use_enveloped_error_v1=False + ) body = await parse_request_body_as( MyBody, request, use_enveloped_error_v1=False ) @@ -107,10 +131,11 @@ async def _handler(request: web.Request) -> web.Response: return web.json_response( { - "parameters": path_params.dict(), - "queries": query_params.dict(), - "body": body.dict(), - "context": context.dict(), + "parameters": path_params.model_dump(), + "queries": query_params.model_dump(), + "body": body.model_dump(), + "context": context.model_dump(), + "headers": headers_params.model_dump(), }, dumps=json_dumps, ) @@ -122,8 +147,7 @@ async def _middleware(request: web.Request, handler): # request context request[RQT_USERID_KEY] = 42 request["RQT_IGNORE_CONTEXT"] = "not interesting" - resp = await handler(request) - return resp + return await handler(request) app = web.Application( middlewares=[ @@ -142,9 +166,8 @@ async def _middleware(request: web.Request, handler): @pytest.fixture -def path_params(faker: Faker): - path_params = MyRequestPathParams.create_fake(faker) - return path_params +def path_params(faker: Faker) -> MyRequestPathParams: + return MyRequestPathParams.create_fake(faker) @pytest.fixture @@ -157,43 +180,53 @@ def body(faker: Faker) -> MyBody: return MyBody.create_fake(faker) +@pytest.fixture +def headers_params(faker: Faker) -> MyRequestHeadersParams: + return MyRequestHeadersParams.create_fake(faker) + + async def test_parse_request_as( client: TestClient, path_params: MyRequestPathParams, query_params: MyRequestQueryParams, body: MyBody, + headers_params: MyRequestHeadersParams, ): assert client.app r = await client.get( f"/projects/{path_params.project_uuid}", params=query_params.as_params(), - json=body.dict(), + json=body.model_dump(), + headers=headers_params.model_dump(by_alias=True), ) - assert r.status == web.HTTPOk.status_code, f"{await r.text()}" + assert r.status == status.HTTP_200_OK, f"{await r.text()}" got = await r.json() - assert got["parameters"] == jsonable_encoder(path_params.dict()) - assert got["queries"] == jsonable_encoder(query_params.dict()) - assert got["body"] == body.dict() + assert got["parameters"] == jsonable_encoder(path_params.model_dump()) + assert got["queries"] == jsonable_encoder(query_params.model_dump()) + assert got["body"] == body.model_dump() assert got["context"] == { "secret": client.app[APP_SECRET_KEY], "user_id": 42, } + assert got["headers"] == jsonable_encoder(headers_params.model_dump()) async def test_parse_request_with_invalid_path_params( client: TestClient, query_params: MyRequestQueryParams, body: MyBody, + headers_params: MyRequestHeadersParams, ): r = await client.get( "/projects/invalid-uuid", params=query_params.as_params(), - json=body.dict(), + json=body.model_dump(), + headers=headers_params.model_dump(by_alias=True), ) - assert r.status == web.HTTPUnprocessableEntity.status_code, f"{await r.text()}" + assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}" response_body = await r.json() assert response_body["error"].pop("resource") @@ -203,8 +236,8 @@ async def test_parse_request_with_invalid_path_params( "details": [ { "loc": "project_uuid", - "msg": "value is not a valid uuid", - "type": "type_error.uuid", + "msg": "Input should be a valid UUID, invalid character: expected an optional prefix of `urn:uuid:` followed by [0-9a-fA-F-], found `i` at 1", + "type": "uuid_parsing", } ], } @@ -215,14 +248,16 @@ async def test_parse_request_with_invalid_query_params( client: TestClient, path_params: MyRequestPathParams, body: MyBody, + headers_params: MyRequestHeadersParams, ): r = await client.get( f"/projects/{path_params.project_uuid}", params={}, - json=body.dict(), + json=body.model_dump(), + headers=headers_params.model_dump(by_alias=True), ) - assert r.status == web.HTTPUnprocessableEntity.status_code, f"{await r.text()}" + assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}" response_body = await r.json() assert response_body["error"].pop("resource") @@ -232,8 +267,8 @@ async def test_parse_request_with_invalid_query_params( "details": [ { "loc": "label", - "msg": "field required", - "type": "value_error.missing", + "msg": "Field required", + "type": "missing", } ], } @@ -244,14 +279,16 @@ async def test_parse_request_with_invalid_body( client: TestClient, path_params: MyRequestPathParams, query_params: MyRequestQueryParams, + headers_params: MyRequestHeadersParams, ): r = await client.get( f"/projects/{path_params.project_uuid}", params=query_params.as_params(), json={"invalid": "body"}, + headers=headers_params.model_dump(by_alias=True), ) - assert r.status == web.HTTPUnprocessableEntity.status_code, f"{await r.text()}" + assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}" response_body = await r.json() @@ -263,13 +300,13 @@ async def test_parse_request_with_invalid_body( "details": [ { "loc": "x", - "msg": "field required", - "type": "value_error.missing", + "msg": "Field required", + "type": "missing", }, { "loc": "z", - "msg": "field required", - "type": "value_error.missing", + "msg": "Field required", + "type": "missing", }, ], } @@ -280,13 +317,64 @@ async def test_parse_request_with_invalid_json_body( client: TestClient, path_params: MyRequestPathParams, query_params: MyRequestQueryParams, + headers_params: MyRequestHeadersParams, ): r = await client.get( f"/projects/{path_params.project_uuid}", params=query_params.as_params(), data=b"[ 1 2, 3 'broken-json' ]", + headers=headers_params.model_dump(by_alias=True), ) body = await r.text() - assert r.status == web.HTTPBadRequest.status_code, body + assert r.status == status.HTTP_400_BAD_REQUEST, body + + +async def test_parse_request_with_invalid_headers_params( + client: TestClient, + path_params: MyRequestPathParams, + query_params: MyRequestQueryParams, + body: MyBody, + headers_params: MyRequestHeadersParams, +): + + r = await client.get( + f"/projects/{path_params.project_uuid}", + params=query_params.as_params(), + json=body.model_dump(), + headers=headers_params.model_dump(), # we pass the wrong names + ) + assert r.status == status.HTTP_422_UNPROCESSABLE_ENTITY, f"{await r.text()}" + + response_body = await r.json() + assert response_body["error"].pop("resource") + assert response_body == { + "error": { + "msg": "Invalid parameter/s 'X-Simcore-User-Agent' in request headers", + "details": [ + { + "loc": "X-Simcore-User-Agent", + "msg": "Field required", + "type": "missing", + } + ], + } + } + + +def test_parse_request_query_parameters_as_with_order_by_query_models(): + + OrderQueryModel = create_ordering_query_model_class( + ordering_fields={"modified", "name"}, default=OrderBy(field="name") + ) + + expected = OrderBy(field="name", direction=OrderDirection.ASC) + + url = URL("/test").with_query(order_by=expected.model_dump_json()) + + request = make_mocked_request("GET", path=f"{url}") + + query_params = parse_request_query_parameters_as(OrderQueryModel, request) + + assert OrderBy.model_construct(**query_params.order_by.model_dump()) == expected diff --git a/packages/service-library/tests/aiohttp/test_rest_middlewares.py b/packages/service-library/tests/aiohttp/test_rest_middlewares.py index 42ebf20de46..de5e80b85ae 100644 --- a/packages/service-library/tests/aiohttp/test_rest_middlewares.py +++ b/packages/service-library/tests/aiohttp/test_rest_middlewares.py @@ -1,65 +1,197 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import asyncio +import json +import logging +from collections.abc import Callable +from dataclasses import dataclass +from typing import Any import pytest from aiohttp import web -from servicelib.aiohttp import openapi -from servicelib.aiohttp.application_keys import APP_OPENAPI_SPECS_KEY +from aiohttp.test_utils import TestClient +from common_library.json_serialization import json_dumps +from servicelib.aiohttp import status from servicelib.aiohttp.rest_middlewares import ( envelope_middleware_factory, error_middleware_factory, ) -from servicelib.aiohttp.rest_responses import is_enveloped, unwrap_envelope -from servicelib.aiohttp.rest_routing import create_routes_from_namespace +from servicelib.rest_responses import is_enveloped, unwrap_envelope -from .tutils import Handlers +@dataclass +class Data: + x: int = 3 + y: str = "foo" -@pytest.fixture -async def specs(here): - openapi_path = here / "data" / "oas3" / "enveloped_responses.yaml" - assert openapi_path.exists() - specs = await openapi.create_openapi_specs(openapi_path) - return specs + +class SomeUnexpectedError(Exception): ... + + +class Handlers: + @staticmethod + async def get_health_wrong(_request: web.Request): + return { + "name": __name__.split(".")[0], + "version": "1.0", + "status": "SERVICE_RUNNING", + "invalid_entry": 125, + } + + @staticmethod + async def get_health(_request: web.Request): + return { + "name": __name__.split(".")[0], + "version": "1.0", + "status": "SERVICE_RUNNING", + "api_version": "1.0", + } + + @staticmethod + async def get_dict(_request: web.Request): + return {"x": 3, "y": "3"} + + @staticmethod + async def get_envelope(_request: web.Request): + data = {"x": 3, "y": "3"} + return {"error": None, "data": data} + + @staticmethod + async def get_list(_request: web.Request): + return [{"x": 3, "y": "3"}] * 3 + + @staticmethod + async def get_obj(_request: web.Request): + return Data(3, "3") + + @staticmethod + async def get_string(_request: web.Request): + return "foo" + + @staticmethod + async def get_number(_request: web.Request): + return 3 + + @staticmethod + async def get_mixed(_request: web.Request): + return [{"x": 3, "y": "3", "z": [Data(3, "3")] * 2}] * 3 + + @classmethod + def returns_value(cls, suffix): + handlers = cls() + coro = getattr(handlers, "get_" + suffix) + loop = asyncio.get_event_loop() + returned_value = loop.run_until_complete(coro(None)) + return json.loads(json_dumps(returned_value)) + + EXPECTED_RAISE_UNEXPECTED_REASON = "Unexpected error" + + @classmethod + async def raise_exception(cls, request: web.Request): + exc_name = request.query.get("exc") + match exc_name: + case NotImplementedError.__name__: + raise NotImplementedError + case asyncio.TimeoutError.__name__: + raise TimeoutError + case web.HTTPOk.__name__: + raise web.HTTPOk # 2XX + case web.HTTPUnauthorized.__name__: + raise web.HTTPUnauthorized # 4XX + case web.HTTPServiceUnavailable.__name__: + raise web.HTTPServiceUnavailable # 5XX + case _: # unexpected + raise SomeUnexpectedError(cls.EXPECTED_RAISE_UNEXPECTED_REASON) + + @staticmethod + async def raise_error(_request: web.Request): + raise web.HTTPNotFound + + @staticmethod + async def raise_error_with_reason(_request: web.Request): + raise web.HTTPNotFound(reason="A short phrase") + + @staticmethod + async def raise_success(_request: web.Request): + raise web.HTTPOk + + @staticmethod + async def raise_success_with_reason(_request: web.Request): + raise web.HTTPOk(reason="I'm ok") + + @staticmethod + async def raise_success_with_text(_request: web.Request): + # NOTE: explicitly NOT enveloped! + raise web.HTTPOk(reason="I'm ok", text=json.dumps({"ok": True})) @pytest.fixture -def client(event_loop, aiohttp_client, specs): +async def client( + aiohttp_client: Callable, + monkeypatch: pytest.MonkeyPatch, +): + monkeypatch.setenv("SC_BUILD_TARGET", "production") + app = web.Application() # routes - handlers = Handlers() - routes = create_routes_from_namespace(specs, handlers, strict=False) - app.router.add_routes(routes) - - # validators - app[APP_OPENAPI_SPECS_KEY] = specs + app.router.add_routes( + [ + web.get(path, handler, name=handler.__name__) + for path, handler in [ + ("/v1/health", Handlers.get_health), + ("/v1/dict", Handlers.get_dict), + ("/v1/envelope", Handlers.get_envelope), + ("/v1/list", Handlers.get_list), + ("/v1/obj", Handlers.get_obj), + ("/v1/string", Handlers.get_string), + ("/v1/number", Handlers.get_number), + ("/v1/mixed", Handlers.get_mixed), + # custom use cases + ("/v1/raise_exception", Handlers.raise_exception), + ("/v1/raise_error", Handlers.raise_error), + ("/v1/raise_error_with_reason", Handlers.raise_error_with_reason), + ("/v1/raise_success", Handlers.raise_success), + ("/v1/raise_success_with_reason", Handlers.raise_success_with_reason), + ("/v1/raise_success_with_text", Handlers.raise_success_with_text), + ] + ] + ) + app.router.add_routes( + [ + web.get( + "/free/raise_exception", + Handlers.raise_exception, + name="raise_exception_without_middleware", + ) + ] + ) # middlewares - base = openapi.get_base_path(specs) - app.middlewares.append(error_middleware_factory(base)) - app.middlewares.append(envelope_middleware_factory(base)) + app.middlewares.append(error_middleware_factory(api_version="/v1")) + app.middlewares.append(envelope_middleware_factory(api_version="/v1")) - return event_loop.run_until_complete(aiohttp_client(app)) + return await aiohttp_client(app) @pytest.mark.parametrize( "path,expected_data", [ - ("/health", Handlers.get("health")), - ("/dict", Handlers.get("dict")), - ("/envelope", Handlers.get("envelope")["data"]), - ("/list", Handlers.get("list")), - ("/attobj", Handlers.get("attobj")), - ("/string", Handlers.get("string")), - ("/number", Handlers.get("number")), - ("/mixed", Handlers.get("mixed")), + ("/health", Handlers.returns_value("health")), + ("/dict", Handlers.returns_value("dict")), + ("/envelope", Handlers.returns_value("envelope")["data"]), + ("/list", Handlers.returns_value("list")), + ("/obj", Handlers.returns_value("obj")), + ("/string", Handlers.returns_value("string")), + ("/number", Handlers.returns_value("number")), + ("/mixed", Handlers.returns_value("mixed")), ], ) -async def test_envelope_middleware(path, expected_data, client, specs): - base = openapi.get_base_path(specs) - response = await client.get(base + path) +async def test_envelope_middleware(path: str, expected_data: Any, client: TestClient): + response = await client.get("/v1" + path) payload = await response.json() assert is_enveloped(payload) @@ -69,20 +201,71 @@ async def test_envelope_middleware(path, expected_data, client, specs): assert data == expected_data -async def test_404_not_found(client, specs): - # see FIXME: in validate_middleware_factory - +async def test_404_not_found_when_entrypoint_not_exposed(client: TestClient): response = await client.get("/some-invalid-address-outside-api") payload = await response.text() - assert response.status == 404, payload + assert response.status == status.HTTP_404_NOT_FOUND, payload - api_base = openapi.get_base_path(specs) - response = await client.get(api_base + "/some-invalid-address-in-api") + response = await client.get("/v1/some-invalid-address-in-api") payload = await response.json() - assert response.status == 404, payload + assert response.status == status.HTTP_404_NOT_FOUND, payload assert is_enveloped(payload) data, error = unwrap_envelope(payload) assert error assert not data + + +async def test_raised_unhandled_exception( + client: TestClient, caplog: pytest.LogCaptureFixture +): + with caplog.at_level(logging.ERROR): + response = await client.get("/v1/raise_exception") + + # respond the client with 500 + assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR + + # response model + data, error = unwrap_envelope(await response.json()) + assert not data + assert error + + # avoids details + assert not error.get("errors") + assert not error.get("logs") + + # - log sufficient information to diagnose the issue + # + # ERROR servicelib.aiohttp.rest_middlewares:rest_middlewares.py:75 We apologize ... [OEC:128594540599840]. + # { + # "exception_details": "Unexpected error", + # "error_code": "OEC:128594540599840", + # "context": { + # "request.remote": "127.0.0.1", + # "request.method": "GET", + # "request.path": "/v1/raise_exception" + # }, + # "tip": null + # } + # Traceback (most recent call last): + # File "/osparc-simcore/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py", line 94, in _middleware_handler + # return await handler(request) + # ^^^^^^^^^^^^^^^^^^^^^^ + # File "/osparc-simcore/packages/service-library/src/servicelib/aiohttp/rest_middlewares.py", line 186, in _middleware_handler + # resp = await handler(request) + # ^^^^^^^^^^^^^^^^^^^^^^ + # File "/osparc-simcore/packages/service-library/tests/aiohttp/test_rest_middlewares.py", line 109, in raise_exception + # raise SomeUnexpectedError(cls.EXPECTED_RAISE_UNEXPECTED_REASON) + # tests.aiohttp.test_rest_middlewares.SomeUnexpectedError: Unexpected error + + assert response.method in caplog.text + assert response.url.path in caplog.text + assert "exception_details" in caplog.text + assert "request.remote" in caplog.text + assert "context" in caplog.text + assert SomeUnexpectedError.__name__ in caplog.text + assert Handlers.EXPECTED_RAISE_UNEXPECTED_REASON in caplog.text + + # log OEC + assert "OEC:" in caplog.text diff --git a/packages/service-library/tests/aiohttp/test_rest_responses.py b/packages/service-library/tests/aiohttp/test_rest_responses.py index ce47777cd4f..d172d636f66 100644 --- a/packages/service-library/tests/aiohttp/test_rest_responses.py +++ b/packages/service-library/tests/aiohttp/test_rest_responses.py @@ -3,8 +3,10 @@ # pylint: disable=unused-variable import itertools +import json import pytest +from aiohttp import web from aiohttp.web_exceptions import ( HTTPBadRequest, HTTPError, @@ -14,12 +16,14 @@ HTTPNotModified, HTTPOk, ) -from servicelib.aiohttp.rest_responses import ( +from common_library.error_codes import ErrorCodeStr, create_error_code +from servicelib.aiohttp import status +from servicelib.aiohttp.rest_responses import create_http_error, exception_to_response +from servicelib.aiohttp.web_exceptions_extension import ( _STATUS_CODE_TO_HTTP_ERRORS, - get_http_error, + get_http_error_class_or_none, ) - -# +from servicelib.mimetype_constants import MIMETYPE_APPLICATION_JSON # SEE https://httpstatuses.com/ # - below 1xx -> invalid @@ -31,17 +35,17 @@ @pytest.mark.parametrize( - "http_exc", (HTTPBadRequest, HTTPGone, HTTPInternalServerError) + "http_exc", [HTTPBadRequest, HTTPGone, HTTPInternalServerError] ) def test_get_http_exception_class_from_code(http_exc: HTTPException): - assert get_http_error(http_exc.status_code) == http_exc + assert get_http_error_class_or_none(http_exc.status_code) == http_exc @pytest.mark.parametrize( "status_code", itertools.chain(BELOW_1XX, NONE_ERRORS, ABOVE_599) ) def test_get_none_for_invalid_or_not_errors_code(status_code): - assert get_http_error(status_code) is None + assert get_http_error_class_or_none(status_code) is None @pytest.mark.parametrize( @@ -53,3 +57,98 @@ def test_collected_http_errors_map(status_code: int, http_error_cls: type[HTTPEr assert http_error_cls != HTTPError assert issubclass(http_error_cls, HTTPError) + + +@pytest.mark.parametrize("skip_details", [True, False]) +@pytest.mark.parametrize("error_code", [None, create_error_code(Exception("fake"))]) +def tests_exception_to_response(skip_details: bool, error_code: ErrorCodeStr | None): + + expected_reason = "Something whent wrong !" + expected_exceptions: list[Exception] = [RuntimeError("foo")] + + http_error = create_http_error( + errors=expected_exceptions, + reason=expected_reason, + http_error_cls=web.HTTPInternalServerError, + skip_internal_error_details=skip_details, + error_code=error_code, + ) + + # For now until deprecated SEE https://github.com/aio-libs/aiohttp/issues/2415 + assert isinstance(http_error, Exception) + assert isinstance(http_error, web.Response) + assert hasattr(http_error, "__http_exception__") + + # until they have exception.make_response(), we user + response = exception_to_response(http_error) + assert isinstance(response, web.Response) + assert not isinstance(response, Exception) + assert not hasattr(response, "__http_exception__") + + # checks response components + assert response.content_type == MIMETYPE_APPLICATION_JSON + assert response.status == status.HTTP_500_INTERNAL_SERVER_ERROR + assert response.text + assert response.body + + # checks response model + response_json = json.loads(response.text) + assert response_json["data"] is None + assert response_json["error"]["message"] == expected_reason + assert response_json["error"]["supportId"] == error_code + assert response_json["error"]["status"] == response.status + + +@pytest.mark.parametrize( + "input_message, expected_output", + [ + (None, None), # None input returns None + ("", None), # Empty string returns None + ("Simple message", "Simple message"), # Simple message stays the same + ( + "Message\nwith\nnewlines", + "Message with newlines", + ), # Newlines are replaced with spaces + ("A" * 100, "A" * 47 + "..."), # Long message gets truncated with ellipsis + ( + "Line1\nLine2\nLine3" + "X" * 100, + "Line1 Line2 Line3" + "X" * 30 + "...", + ), # Combined case: newlines and truncation with ellipsis + ], + ids=[ + "none_input", + "empty_string", + "simple_message", + "newlines_replaced", + "long_message_truncated", + "newlines_and_truncation", + ], +) +def test_safe_status_message(input_message: str | None, expected_output: str | None): + from servicelib.aiohttp.rest_responses import safe_status_message + + result = safe_status_message(input_message) + assert result == expected_output + + # Test with custom max_length + custom_max = 10 + result_custom = safe_status_message(input_message, max_length=custom_max) + + # Check length constraint is respected + if result_custom is not None: + assert len(result_custom) <= custom_max + # Check that ellipsis is added when truncated + if input_message and len(input_message.replace("\n", " ")) > custom_max: + assert result_custom.endswith("...") + + # Verify it can be used in a web response without raising exceptions + try: + # This would fail with long or multiline reasons + if result is not None: + web.Response(reason=result) + + # Test with custom length result too + if result_custom is not None: + web.Response(reason=result_custom) + except ValueError: + pytest.fail("safe_status_message result caused an exception in web.Response") diff --git a/packages/service-library/tests/aiohttp/test_rest_routing.py b/packages/service-library/tests/aiohttp/test_rest_routing.py deleted file mode 100644 index 458a9d70e45..00000000000 --- a/packages/service-library/tests/aiohttp/test_rest_routing.py +++ /dev/null @@ -1,77 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import pytest -from servicelib.aiohttp import openapi -from servicelib.aiohttp.rest_routing import ( - create_routes_from_namespace, - get_handlers_from_namespace, - iter_path_operations, - map_handlers_with_operations, -) - -from .tutils import Handlers - - -@pytest.fixture -async def specs(here): - openapi_path = here / "data" / "oas3" / "enveloped_responses.yaml" - assert openapi_path.exists() - specs = await openapi.create_openapi_specs(openapi_path) - return specs - - -def test_filtered_routing(specs): - handlers = Handlers() - found = get_handlers_from_namespace(handlers) - - hdl_sel = {name: hdl for name, hdl in found.items() if "i" in name} - opr_iter = ( - (mth, url, opname, _tags) - for mth, url, opname, _tags in iter_path_operations(specs) - if "i" in opname - ) - - routes = map_handlers_with_operations(hdl_sel, opr_iter, strict=True) - - for rdef in routes: - assert rdef.method == "GET" - assert rdef.handler in hdl_sel.values() - - -def test_create_routes_from_namespace(specs): - handlers = Handlers() - - # not - strict - try: - routes = create_routes_from_namespace(specs, handlers, strict=False) - except Exception: # pylint: disable=W0703 - pytest.fail("Non-strict failed", pytrace=True) - - # strict - with pytest.raises((RuntimeError, ValueError)): - routes = create_routes_from_namespace(specs, handlers, strict=True) - - # Removing non-spec handler - handlers.get_health_wrong = None - routes = create_routes_from_namespace(specs, handlers, strict=True) - - assert len(routes) == len(specs.paths) - for rdef in routes: - assert rdef.method == "GET" - - -def test_prepends_basepath(specs): - - # not - strict - try: - handlers = Handlers() - routes = create_routes_from_namespace(specs, handlers, strict=False) - except Exception: # pylint: disable=W0703 - pytest.fail("Non-strict failed", pytrace=True) - - basepath = openapi.get_base_path(specs) - for route in routes: - assert route.path.startswith(basepath) - assert route.handler.__name__[len("get_") :] in route.path diff --git a/packages/service-library/tests/aiohttp/test_sandbox.py b/packages/service-library/tests/aiohttp/test_sandbox.py deleted file mode 100644 index a76ae5f1f14..00000000000 --- a/packages/service-library/tests/aiohttp/test_sandbox.py +++ /dev/null @@ -1,39 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import pytest -from servicelib.aiohttp import openapi - - -@pytest.fixture -def multi_doc_oas(here): - openapi_path = here / "data" / "oas3-parts" / "petstore.yaml" - assert openapi_path.exists() - return openapi_path - - -@pytest.fixture -def single_doc_oas(here): - openapi_path = here / "data" / "oas3" / "petstore.yaml" - assert openapi_path.exists() - return openapi_path - - -async def test_multi_doc_openapi_specs(multi_doc_oas, single_doc_oas): - try: - # specs created out of multiple documents - multi_doc_specs = await openapi.create_openapi_specs(multi_doc_oas) - - # a single-document spec - single_doc_specs = await openapi.create_openapi_specs(single_doc_oas) - - except Exception: # pylint: disable=W0703 - pytest.fail("Failed specs validation") - - assert single_doc_specs.paths.keys() == multi_doc_specs.paths.keys() - - assert ( - single_doc_specs.paths["/tags"].operations["get"].operation_id - == multi_doc_specs.paths["/tags"].operations["get"].operation_id - ) diff --git a/packages/service-library/tests/aiohttp/test_status_utils.py b/packages/service-library/tests/aiohttp/test_status_utils.py new file mode 100644 index 00000000000..c5a8785a3b1 --- /dev/null +++ b/packages/service-library/tests/aiohttp/test_status_utils.py @@ -0,0 +1,83 @@ +from http import HTTPStatus + +import pytest +from servicelib.aiohttp import status +from servicelib.aiohttp.web_exceptions_extension import ( + STATUS_CODES_WITHOUT_AIOHTTP_EXCEPTION_CLASS, + HTTPException, + get_all_aiohttp_http_exceptions, +) +from servicelib.status_codes_utils import ( + _INVALID_STATUS_CODE_MSG, + get_code_description, + get_code_display_name, + get_http_status_codes, + is_1xx_informational, + is_2xx_success, + is_3xx_redirect, + is_4xx_client_error, + is_5xx_server_error, + is_error, +) + + +def test_display(): + assert get_code_display_name(status.HTTP_200_OK) == "HTTP_200_OK" + assert get_code_display_name(status.HTTP_306_RESERVED) == "HTTP_306_RESERVED" + assert get_code_display_name(11) == _INVALID_STATUS_CODE_MSG + + +def test_description(): + # SEE https://github.com/python/cpython/blob/main/Lib/http/__init__.py#L54-L171 + assert ( + get_code_description(status.HTTP_200_OK) + == "Request fulfilled, document follows. See https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/200" + ) + + +def test_status_codes_checks(): + + assert is_1xx_informational(status.HTTP_102_PROCESSING) + assert is_2xx_success(status.HTTP_202_ACCEPTED) + assert is_3xx_redirect(status.HTTP_301_MOVED_PERMANENTLY) + + assert is_4xx_client_error(status.HTTP_401_UNAUTHORIZED) + assert is_5xx_server_error(status.HTTP_503_SERVICE_UNAVAILABLE) + + assert is_error(status.HTTP_401_UNAUTHORIZED) + assert is_error(status.HTTP_503_SERVICE_UNAVAILABLE) + + +def test_predicates_with_status(): + + # in formational + assert get_http_status_codes(status, is_1xx_informational) == [ + status.HTTP_100_CONTINUE, + status.HTTP_101_SWITCHING_PROTOCOLS, + status.HTTP_102_PROCESSING, + status.HTTP_103_EARLY_HINTS, + ] + + # errors + assert [is_error(c) for c in get_http_status_codes(status, is_error)] + + # all. Curiously 306 is not in HTTPSTatus! + assert [ + HTTPStatus(c) + for c in get_http_status_codes(status) + if c != status.HTTP_306_RESERVED + ] + + +AIOHTTP_EXCEPTION_CLASSES_MAP: dict[ + int, type[HTTPException] +] = get_all_aiohttp_http_exceptions(HTTPException) + + +@pytest.mark.parametrize("status_code", get_http_status_codes(status)) +def test_how_status_codes_map_to_aiohttp_exception_class(status_code): + aiohttp_exception_cls = AIOHTTP_EXCEPTION_CLASSES_MAP.get(status_code) + if status_code in STATUS_CODES_WITHOUT_AIOHTTP_EXCEPTION_CLASS: + assert aiohttp_exception_cls is None + else: + assert aiohttp_exception_cls is not None diff --git a/packages/service-library/tests/aiohttp/test_tracing.py b/packages/service-library/tests/aiohttp/test_tracing.py index c39bfeed646..2621751f344 100644 --- a/packages/service-library/tests/aiohttp/test_tracing.py +++ b/packages/service-library/tests/aiohttp/test_tracing.py @@ -2,96 +2,149 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from asyncio import AbstractEventLoop -from typing import Callable +import importlib +from collections.abc import Callable, Iterator +from typing import Any +import pip import pytest from aiohttp import web -from aiohttp.client_reqrep import ClientResponse from aiohttp.test_utils import TestClient -from servicelib.aiohttp.rest_responses import _collect_http_exceptions -from servicelib.aiohttp.tracing import setup_tracing +from pydantic import ValidationError +from servicelib.aiohttp.tracing import get_tracing_lifespan +from settings_library.tracing import TracingSettings -DEFAULT_JAEGER_BASE_URL = "http://jaeger:9411" + +@pytest.fixture +def tracing_settings_in(request): + return request.param @pytest.fixture() -def client( - event_loop: AbstractEventLoop, +def set_and_clean_settings_env_vars( + monkeypatch: pytest.MonkeyPatch, tracing_settings_in +): + endpoint_mocked = False + if tracing_settings_in[0]: + endpoint_mocked = True + monkeypatch.setenv( + "TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT", f"{tracing_settings_in[0]}" + ) + port_mocked = False + if tracing_settings_in[1]: + port_mocked = True + monkeypatch.setenv( + "TRACING_OPENTELEMETRY_COLLECTOR_PORT", f"{tracing_settings_in[1]}" + ) + yield + if endpoint_mocked: + monkeypatch.delenv("TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT") + if port_mocked: + monkeypatch.delenv("TRACING_OPENTELEMETRY_COLLECTOR_PORT") + + +@pytest.mark.parametrize( + "tracing_settings_in", + [ + ("http://opentelemetry-collector", 4318), + ], + indirect=True, +) +async def test_valid_tracing_settings( aiohttp_client: Callable, - unused_tcp_port_factory: Callable, + set_and_clean_settings_env_vars: Callable, + tracing_settings_in, + uninstrument_opentelemetry: Iterator[None], ) -> TestClient: - ports = [unused_tcp_port_factory() for _ in range(2)] - - async def redirect(request: web.Request) -> web.Response: - return web.HTTPFound(location="/return/200") - - async def return_response(request: web.Request) -> web.Response: - code = int(request.match_info["code"]) - return web.Response(status=code) - - async def raise_response(request: web.Request): - status_code = int(request.match_info["code"]) - status_to_http_exception = _collect_http_exceptions() - http_exception_cls = status_to_http_exception[status_code] - raise http_exception_cls( - reason=f"raised from raised_error with code {status_code}" - ) - - async def skip(request: web.Request): - return web.HTTPServiceUnavailable(reason="should not happen") - app = web.Application() - app.add_routes( - [ - web.get("/redirect", redirect), - web.get("/return/{code}", return_response), - web.get("/raise/{code}", raise_response), - web.get("/skip", skip, name="skip"), - ] - ) - - print("Resources:") - for resource in app.router.resources(): - print(resource) - - # UNDER TEST --- - # SEE RoutesView to understand how resources can be iterated to get routes - resource = app.router["skip"] - routes_in_a_resource = list(resource) - - setup_tracing( + service_name = "simcore_service_webserver" + tracing_settings = TracingSettings() + async for _ in get_tracing_lifespan( + app, service_name=service_name, tracing_settings=tracing_settings + )(app): + pass + + +@pytest.mark.parametrize( + "tracing_settings_in", + [ + ("http://opentelemetry-collector", 80), + ("opentelemetry-collector", 4318), + ("httsdasp://ot@##el-collector", 4318), + ], + indirect=True, +) +async def test_invalid_tracing_settings( + aiohttp_client: Callable, + set_and_clean_settings_env_vars: Callable, + tracing_settings_in, + uninstrument_opentelemetry: Iterator[None], +) -> TestClient: + with pytest.raises(ValidationError): + TracingSettings() + + +def install_package(package): + pip.main(["install", package]) + + +def uninstall_package(package): + pip.main(["uninstall", "-y", package]) + + +@pytest.fixture(scope="function") +def manage_package(request): + package, importname = request.param + install_package(package) + yield importname + uninstall_package(package) + + +@pytest.mark.skip( + reason="this test installs always the latest version of the package which creates conflicts." +) +@pytest.mark.parametrize( + "tracing_settings_in, manage_package", + [ + ( + ("http://opentelemetry-collector", 4318), + ( + "opentelemetry-instrumentation-botocore", + "opentelemetry.instrumentation.botocore", + ), + ), + ( + ("http://opentelemetry-collector", "4318"), + ( + "opentelemetry-instrumentation-aiopg", + "opentelemetry.instrumentation.aiopg", + ), + ), + ], + indirect=True, +) +async def test_tracing_setup_package_detection( + aiohttp_client: Callable, + set_and_clean_settings_env_vars: Callable[[], None], + tracing_settings_in: Callable[[], dict[str, Any]], + manage_package, + uninstrument_opentelemetry: Iterator[None], +): + package_name = manage_package + importlib.import_module(package_name) + # + app = web.Application() + service_name = "simcore_service_webserver" + tracing_settings = TracingSettings() + async for _ in get_tracing_lifespan( app, - service_name=f"{__name__}.client", - host="127.0.0.1", - port=ports[0], - jaeger_base_url=DEFAULT_JAEGER_BASE_URL, - skip_routes=routes_in_a_resource, - ) - - return event_loop.run_until_complete( - aiohttp_client(app, server_kwargs={"port": ports[0]}) - ) - - -async def test_setup_tracing(client: TestClient): - res: ClientResponse - - # on error - for code in (web.HTTPOk.status_code, web.HTTPBadRequest.status_code): - res = await client.get(f"/return/{code}") - - assert res.status == code, await res.text() - res = await client.get(f"/raise/{code}") - assert res.status == code, await res.text() - - res = await client.get("/redirect") - # TODO: check it was redirected - assert res.status == 200, await res.text() - - res = await client.get("/skip") - assert res.status == web.HTTPServiceUnavailable.status_code - - # using POST instead of GET -> HTTPMethodNotAllowed - res = await client.post("/skip") - assert res.status == web.HTTPMethodNotAllowed.status_code, "GET and not POST" + service_name=service_name, + tracing_settings=tracing_settings, + )(app): + # idempotency + async for _ in get_tracing_lifespan( + app, + service_name=service_name, + tracing_settings=tracing_settings, + )(app): + pass diff --git a/packages/service-library/tests/aiohttp/tutils.py b/packages/service-library/tests/aiohttp/tutils.py deleted file mode 100644 index 8821ae4ba42..00000000000 --- a/packages/service-library/tests/aiohttp/tutils.py +++ /dev/null @@ -1,69 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-import -# pylint: disable=no-self-use - -import asyncio -import json -from dataclasses import dataclass - -from aiohttp import web -from servicelib.json_serialization import json_dumps - - -@dataclass -class Data: - x: int = 3 - y: str = "foo" - - -class Handlers: - async def get_health_wrong(self, request: web.Request): - out = { - "name": __name__.split(".")[0], - "version": "1.0", - "status": "SERVICE_RUNNING", - "invalid_entry": 125, - } - return out - - async def get_health(self, request: web.Request): - out = { - "name": __name__.split(".")[0], - "version": "1.0", - "status": "SERVICE_RUNNING", - "api_version": "1.0", - } - return out - - async def get_dict(self, request: web.Request): - return {"x": 3, "y": "3"} - - async def get_envelope(self, request: web.Request): - data = {"x": 3, "y": "3"} - return {"error": None, "data": data} - - async def get_list(self, request: web.Request): - return [{"x": 3, "y": "3"}] * 3 - - async def get_attobj(self, request: web.Request): - return Data(3, "3") - - async def get_string(self, request: web.Request): - return "foo" - - async def get_number(self, request: web.Request): - return 3 - - async def get_mixed(self, request: web.Request): - data = [{"x": 3, "y": "3", "z": [Data(3, "3")] * 2}] * 3 - return data - - @classmethod - def get(cls, suffix, process=True): - handlers = cls() - coro = getattr(handlers, "get_" + suffix) - loop = asyncio.get_event_loop() - data = loop.run_until_complete(coro(None)) - - return json.loads(json_dumps(data)) if process else data diff --git a/packages/service-library/tests/aiohttp/with_postgres/docker-compose.yml b/packages/service-library/tests/aiohttp/with_postgres/docker-compose.yml index 928b35779a2..22ebab6fa0c 100644 --- a/packages/service-library/tests/aiohttp/with_postgres/docker-compose.yml +++ b/packages/service-library/tests/aiohttp/with_postgres/docker-compose.yml @@ -1,7 +1,6 @@ -version: "3.8" services: postgres: - image: "postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce" + image: "postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc" restart: always environment: POSTGRES_DB: db diff --git a/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py b/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py index 30b6cfa7110..1a2d453b4e6 100644 --- a/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py +++ b/packages/service-library/tests/aiohttp/with_postgres/test_aiopg_utils.py @@ -4,7 +4,6 @@ # pylint:disable=broad-except import asyncio -import logging import sys from copy import deepcopy from dataclasses import asdict @@ -14,14 +13,7 @@ import pytest import sqlalchemy as sa import sqlalchemy.exc as sa_exceptions -from aiohttp import web -from servicelib.aiohttp.aiopg_utils import ( - DatabaseError, - PostgresRetryPolicyUponOperation, - init_pg_tables, - is_pg_responsive, - retry_pg_api, -) +from servicelib.aiohttp.aiopg_utils import init_pg_tables, is_pg_responsive from servicelib.common_aiopg_utils import DataSourceName, create_pg_engine current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent @@ -62,7 +54,7 @@ async def _create_table(engine: aiopg.sa.Engine): return dsn -def test_dsn_uri_with_query(postgres_service_with_fake_data): +def test_dsn_uri_with_query(postgres_service_with_fake_data: DataSourceName): uri = postgres_service_with_fake_data.to_uri(with_query=True) sa_engine = None try: @@ -82,7 +74,7 @@ def test_dsn_uri_with_query(postgres_service_with_fake_data): sa_engine.dispose() -async def test_create_pg_engine(postgres_service_with_fake_data): +async def test_create_pg_engine(postgres_service_with_fake_data: DataSourceName): dsn = postgres_service_with_fake_data # using raw call and dsn.asdict to fill create_engine arguments! @@ -137,119 +129,6 @@ async def test_engine_when_idle_for_some_time(): await conn.execute(tbl.insert().values(val="third")) -def test_init_tables(postgres_service_with_fake_data): +def test_init_tables(postgres_service_with_fake_data: DataSourceName): dsn = postgres_service_with_fake_data init_pg_tables(dsn, metadata) - - -async def test_retry_pg_api_policy(postgres_service_with_fake_data, caplog): - caplog.set_level(logging.ERROR) - - # pylint: disable=no-value-for-parameter - dsn = postgres_service_with_fake_data.to_uri() - app_name = postgres_service_with_fake_data.application_name - - async with aiopg.sa.create_engine( - dsn, application_name=app_name, echo=True - ) as engine: - - # goes - await dec_go(engine, gid=0) - print(dec_go.retry.statistics) - assert dec_go.total_retry_count() == 1 - - # goes, fails and max retries - with pytest.raises(web.HTTPServiceUnavailable): - await dec_go(engine, gid=1, raise_cls=DatabaseError) - assert "Postgres service non-responsive, responding 503" in caplog.text - - print(dec_go.retry.statistics) - assert ( - dec_go.total_retry_count() - == PostgresRetryPolicyUponOperation.ATTEMPTS_COUNT + 1 - ) - - # goes and keeps count of all retrials - await dec_go(engine, gid=2) - assert ( - dec_go.total_retry_count() - == PostgresRetryPolicyUponOperation.ATTEMPTS_COUNT + 2 - ) - - -# TODO: review tests below -@pytest.mark.skip(reason="UNDER DEVELOPMENT") -async def test_engine_when_pg_refuses(postgres_service_with_fake_data): - dsn = postgres_service_with_fake_data - dsn.password = "Wrong pass" - - # async with create_pg_engine(dsn) as engine: - - engine = await create_pg_engine(dsn) - assert not engine.closed # does not mean anything!!! - - # acquiring connection must fail - with pytest.raises(RuntimeError) as execinfo: - async with engine.acquire() as conn: - await conn.execute("SELECT 1 as is_alive") - assert "Cannot acquire connection" in str(execinfo.value) - - # pg not responsive - assert not await is_pg_responsive(engine) - - -@pytest.mark.skip(reason="UNDER DEVELOPMENT") -async def test_connections(postgres_service_with_fake_data): - dsn = postgres_service_with_fake_data.to_uri() - app_name = postgres_service_with_fake_data.application_name - ## number of seconds after which connection is recycled, helps to deal with stale connections in pool, default value is -1, means recycling logic is disabled. - POOL_RECYCLE_SECS = 2 - - async def conn_callback(conn): - print(f"Opening {conn.raw}") - - async with aiopg.sa.create_engine( - dsn, - minsize=20, - maxsize=20, - # timeout=1, - pool_recycle=POOL_RECYCLE_SECS, - echo=True, - enable_json=True, - enable_hstore=True, - enable_uuid=True, - on_connect=conn_callback, - # extra kwargs in https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS - application_name=app_name, - ) as engine: - - # used and free connections - # size_before = engine.size - - for n in range(10): - await go(engine, gid=n) - - assert engine - assert engine.size == 0 - - -@retry_pg_api -async def dec_go(*args, **kargs): - return await go(*args, **kargs) - - -async def go(engine: aiopg.sa.Engine, gid="", raise_cls=None): - # pylint: disable=no-value-for-parameter - async with engine.acquire() as conn: - # writes - async with conn.begin(): - await conn.execute(tbl.insert().values(val=f"first-{gid}")) - await conn.execute(tbl.insert().values(val=f"second-{gid}")) - - if raise_cls is not None: - raise raise_cls - - # reads - async for row in conn.execute(tbl.select()): - print(row.id, row.val) - assert any(prefix in row.val for prefix in ("first", "second")) diff --git a/packages/service-library/tests/archiving_utils/conftest.py b/packages/service-library/tests/archiving_utils/conftest.py new file mode 100644 index 00000000000..c091ac9550f --- /dev/null +++ b/packages/service-library/tests/archiving_utils/conftest.py @@ -0,0 +1,56 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from collections.abc import AsyncIterable +from concurrent.futures import ProcessPoolExecutor +from pathlib import Path + +import numpy as np +import pytest +from faker import Faker +from helpers import print_tree +from PIL import Image +from servicelib.file_utils import remove_directory + + +def _process_and_save_image(image_path: Path) -> None: + random_image = np.random.rand(900, 900, 3) * 255 # noqa: NPY002 + im_out = Image.fromarray(random_image.astype("uint8")).convert("RGB") + im_out.save(image_path) + + +@pytest.fixture +async def mixed_file_types(tmp_path: Path, faker: Faker) -> AsyncIterable[Path]: + base_dir = tmp_path / "mixed_types_dir" + base_dir.mkdir() + + # mixed small text files and binary files + (base_dir / "empty").mkdir() + (base_dir / "d1").mkdir() + (base_dir / "d1" / "f1.txt").write_text(faker.text()) + (base_dir / "d1" / "b2.bin").write_bytes(faker.json_bytes()) + (base_dir / "d1" / "sd1").mkdir() + (base_dir / "d1" / "sd1" / "f1.txt").write_text(faker.text()) + (base_dir / "d1" / "sd1" / "b2.bin").write_bytes(faker.json_bytes()) + (base_dir / "images").mkdir() + + # images cause issues with zipping, below content produced different + # hashes for zip files + + image_paths: list[Path] = [] + for i in range(2): + image_dir = base_dir / f"images{i}" + image_dir.mkdir() + for n in range(50): + image_paths.append(image_dir / f"out{n}.jpg") # noqa: PERF401 + + with ProcessPoolExecutor() as executor: + executor.map(_process_and_save_image, image_paths) + + print("mixed_types_dir ---") + print_tree(base_dir) + + yield base_dir + + await remove_directory(base_dir) + assert not base_dir.exists() diff --git a/packages/service-library/tests/archiving_utils/helpers.py b/packages/service-library/tests/archiving_utils/helpers.py new file mode 100644 index 00000000000..7c4e146756a --- /dev/null +++ b/packages/service-library/tests/archiving_utils/helpers.py @@ -0,0 +1,8 @@ +from pathlib import Path + + +def print_tree(path: Path, level=0): + tab = " " * level + print(f"{tab}{'+' if path.is_dir() else '-'} {path if level==0 else path.name}") + for p in path.glob("*"): + print_tree(p, level + 1) diff --git a/packages/service-library/tests/archiving_utils/test_archiving__interface_7zip.py b/packages/service-library/tests/archiving_utils/test_archiving__interface_7zip.py new file mode 100644 index 00000000000..da6ba3260c2 --- /dev/null +++ b/packages/service-library/tests/archiving_utils/test_archiving__interface_7zip.py @@ -0,0 +1,137 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import json +from pathlib import Path + +import pytest +from helpers import print_tree +from pydantic import NonNegativeInt +from servicelib.archiving_utils._interface_7zip import ( + _7ZipProgressParser, + _extract_file_names_from_archive, + archive_dir, + unarchive_dir, +) + + +@pytest.fixture +async def archive_path(tmp_path: Path) -> Path: + return tmp_path / "mixed_types_dir.zip" + + +@pytest.fixture +def unpacked_archive(tmp_path: Path) -> Path: + path = tmp_path / "unpacked_dir" + path.mkdir() + return path + + +@pytest.fixture +def data_archive_utils(package_tests_dir: Path) -> Path: + path = package_tests_dir / "data" / "archive_utils" + assert path.exists() + assert path.is_dir() + return path + + +@pytest.mark.parametrize( + "progress_stdout, expected_size", + [ + ("compress_stdout.json", 434866026), + ("decompress_stdout.json", 434902745), + ], +) +async def test_compress_progress_parser( + data_archive_utils: Path, progress_stdout: str, expected_size: NonNegativeInt +): + stdout_path = data_archive_utils / progress_stdout + assert stdout_path.exists() + stdout_entries: list[str] = json.loads(stdout_path.read_text()) + + detected_entries: list[NonNegativeInt] = [] + + async def _progress_handler(byte_progress: NonNegativeInt) -> None: + detected_entries.append(byte_progress) + + parser = _7ZipProgressParser(_progress_handler) + for chunk in stdout_entries: + await parser.parse_chunk(chunk) + + print(detected_entries) + assert sum(detected_entries) == expected_size + + +def _assert_same_folder_content(f1: Path, f2: Path) -> None: + in_f1 = {x.relative_to(f1) for x in f1.rglob("*")} + in_f2 = {x.relative_to(f2) for x in f2.rglob("*")} + assert in_f1 == in_f2 + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_archive_unarchive( + mixed_file_types: Path, archive_path: Path, unpacked_archive: Path, compress: bool +): + await archive_dir(mixed_file_types, archive_path, compress=compress) + await unarchive_dir(archive_path, unpacked_archive) + _assert_same_folder_content(mixed_file_types, unpacked_archive) + + +@pytest.fixture +def empty_folder(tmp_path: Path) -> Path: + path = tmp_path / "empty_folder" + path.mkdir() + return path + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_archive_unarchive_empty_folder( + empty_folder: Path, archive_path: Path, unpacked_archive: Path, compress: bool +): + await archive_dir(empty_folder, archive_path, compress=compress) + await unarchive_dir(archive_path, unpacked_archive) + _assert_same_folder_content(empty_folder, unpacked_archive) + + +@pytest.mark.parametrize( + "file_name, expected_file_count", + [ + ("list_edge_case.txt", 3), + ("list_stdout.txt", 674), + ("list_broken_format.txt", 22), + ("list_empty_archive.txt", 0), + ], +) +def test__extract_file_names_from_archive( + data_archive_utils: Path, file_name: str, expected_file_count: NonNegativeInt +): + archive_list_stdout_path = data_archive_utils / file_name + assert archive_list_stdout_path.exists() + + archive_list_stdout_path.read_text() + files = _extract_file_names_from_archive(archive_list_stdout_path.read_text()) + assert len(files) == expected_file_count + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_archive_unarchive_with_names_with_spaces(tmp_path: Path, compress: bool): + to_archive_path = tmp_path / "'source of files!a ads now strange'" + to_archive_path.mkdir(parents=True, exist_ok=True) + assert to_archive_path.exists() + + # generate some content + for i in range(10): + (to_archive_path / f"f{i}.txt").write_text("*" * i) + print_tree(to_archive_path) + + archive_path = tmp_path / "archived version herre!)!(/Β£)!'" + assert not archive_path.exists() + + extracted_to_path = tmp_path / "this is where i want them to be extracted to''''" + extracted_to_path.mkdir(parents=True, exist_ok=True) + assert extracted_to_path.exists() + + # source and destination all with spaces + await archive_dir(to_archive_path, archive_path, compress=compress) + await unarchive_dir(archive_path, extracted_to_path) + _assert_same_folder_content(to_archive_path, extracted_to_path) diff --git a/packages/service-library/tests/archiving_utils/test_archiving_utils.py b/packages/service-library/tests/archiving_utils/test_archiving_utils.py new file mode 100644 index 00000000000..ba47ee00c0b --- /dev/null +++ b/packages/service-library/tests/archiving_utils/test_archiving_utils.py @@ -0,0 +1,419 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-name-in-module + +import asyncio +import os +import secrets +import string +import tempfile +from collections.abc import Callable, Iterable +from pathlib import Path + +import pytest +from faker import Faker +from pydantic import ByteSize, TypeAdapter +from pytest_benchmark.plugin import BenchmarkFixture +from pytest_simcore.helpers.comparing import compute_hash, compute_hashes +from servicelib.archiving_utils import archive_dir, unarchive_dir + + +@pytest.fixture +def dir_with_random_content(faker: Faker) -> Iterable[Path]: + def random_string(length: int) -> str: + return "".join(secrets.choice(string.ascii_letters) for i in range(length)) + + def make_files_in_dir(dir_path: Path, file_count: int) -> None: + for _ in range(file_count): + (dir_path / f"{random_string(8)}.bin").write_bytes( + os.urandom(faker.random_int(1, 10)) + ) + + def ensure_dir(path_to_ensure: Path) -> Path: + path_to_ensure.mkdir(parents=True, exist_ok=True) + return path_to_ensure + + def make_subdirectory_with_content(subdir_name: Path, max_file_count: int) -> None: + subdir_name = ensure_dir(subdir_name) + make_files_in_dir( + dir_path=subdir_name, + file_count=faker.random_int(1, max_file_count), + ) + + def make_subdirectories_with_content( + subdir_name: Path, max_subdirectories_count: int, max_file_count: int + ) -> None: + subdirectories_count = faker.random_int(1, max_subdirectories_count) + for _ in range(subdirectories_count): + make_subdirectory_with_content( + subdir_name=subdir_name / f"{random_string(4)}", + max_file_count=max_file_count, + ) + + def get_dirs_and_subdris_in_path(path_to_scan: Path) -> list[Path]: + return [path for path in path_to_scan.rglob("*") if path.is_dir()] + + with tempfile.TemporaryDirectory() as temp_dir: + temp_dir_path = Path(temp_dir) + data_container = ensure_dir(temp_dir_path / "study_data") + + make_subdirectories_with_content( + subdir_name=data_container, max_subdirectories_count=5, max_file_count=5 + ) + make_files_in_dir(dir_path=data_container, file_count=5) + + # creates a good amount of files + for _ in range(4): + for subdirectory_path in get_dirs_and_subdris_in_path(data_container): + make_subdirectories_with_content( + subdir_name=subdirectory_path, + max_subdirectories_count=3, + max_file_count=3, + ) + + yield temp_dir_path + + +# UTILS + + +def strip_directory_from_path(input_path: Path, to_strip: Path) -> Path: + # NOTE: could use os.path.relpath instead or Path.relative_to ? + return Path(str(input_path).replace(str(to_strip) + "/", "")) + + +def get_all_files_in_dir(dir_path: Path) -> set[Path]: + return { + strip_directory_from_path(x, dir_path) + for x in dir_path.rglob("*") + if x.is_file() + } + + +def full_file_path_from_dir_and_subdirs(dir_path: Path) -> list[Path]: + return [x for x in dir_path.rglob("*") if x.is_file()] + + +def _escape_undecodable_str(s: str) -> str: + return s.encode(errors="replace").decode("utf-8") + + +async def assert_same_directory_content( + dir_to_compress: Path, + output_dir: Path, + inject_relative_path: Path | None = None, +) -> None: + def _relative_path(input_path: Path) -> Path: + assert inject_relative_path is not None + return Path(str(inject_relative_path / str(input_path))[1:]) + + input_set = get_all_files_in_dir(dir_to_compress) + output_set = get_all_files_in_dir(output_dir) + + if inject_relative_path is not None: + input_set = {_relative_path(x) for x in input_set} + + assert ( + input_set == output_set + ), f"There following files are missing {input_set - output_set}" + + # computing the hashes for dir_to_compress and map in a dict + # with the name starting from the root of the directory and md5sum + dir_to_compress_hashes = { + strip_directory_from_path(k, dir_to_compress): v + for k, v in ( + await compute_hashes(full_file_path_from_dir_and_subdirs(dir_to_compress)) + ).items() + } + + # computing the hashes for output_dir and map in a dict + # with the name starting from the root of the directory and md5sum + output_dir_hashes = { + strip_directory_from_path(k, output_dir): v + for k, v in ( + await compute_hashes(full_file_path_from_dir_and_subdirs(output_dir)) + ).items() + } + + # finally check if hashes are mapped 1 to 1 in order to verify + # that the compress/decompress worked correctly + for key in dir_to_compress_hashes: + assert ( + dir_to_compress_hashes[key] + == output_dir_hashes[_relative_path(key) if inject_relative_path else key] + ) + + +def assert_unarchived_paths( + unarchived_paths: set[Path], + src_dir: Path, + dst_dir: Path, +): + def is_file_or_emptydir(path: Path) -> bool: + return path.is_file() or path.is_dir() and not any(path.glob("*")) + + # all unarchivedare under dst_dir + assert all(dst_dir in f.parents for f in unarchived_paths) + + # can be also checked with strings + assert all(str(f).startswith(str(dst_dir)) for f in unarchived_paths) + + # trim basedir and compare relative paths (alias 'tails') against src_dir + basedir = str(dst_dir) + + got_tails = {os.path.relpath(f, basedir) for f in unarchived_paths} + expected_tails = { + os.path.relpath(f, src_dir) + for f in src_dir.rglob("*") + if is_file_or_emptydir(f) + } + expected_tails = {_escape_undecodable_str(x) for x in expected_tails} + got_tails = {x.replace("οΏ½", "?") for x in got_tails} + assert got_tails == expected_tails + + +@pytest.mark.skip(reason="DEV:only for manual tessting") +async def test_archiving_utils_against_sample( + osparc_simcore_root_dir: Path, tmp_path: Path +): + """ + ONLY for manual testing + User MUST provide a sample of a zip file in ``sample_path`` + """ + sample_path = osparc_simcore_root_dir / "keep.ignore" / "workspace.zip" + destination = tmp_path / "unzipped" + + extracted_paths = await unarchive_dir(sample_path, destination) + assert extracted_paths + + for p in extracted_paths: + assert isinstance(p, Path), p + + await archive_dir( + dir_to_compress=destination, destination=tmp_path / "test_it.zip", compress=True + ) + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_archive_unarchive_same_structure_dir( + dir_with_random_content: Path, + tmp_path: Path, + compress: bool, +): + temp_dir_one = tmp_path / "one" + temp_dir_two = tmp_path / "two" + + temp_dir_one.mkdir() + temp_dir_two.mkdir() + + archive_file = temp_dir_one / "archive.zip" + + await archive_dir( + dir_to_compress=dir_with_random_content, + destination=archive_file, + compress=compress, + ) + + unarchived_paths: set[Path] = await unarchive_dir( + archive_to_extract=archive_file, destination_folder=temp_dir_two + ) + + assert_unarchived_paths( + unarchived_paths, + src_dir=dir_with_random_content, + dst_dir=temp_dir_two, + ) + + await assert_same_directory_content(dir_with_random_content, temp_dir_two, None) + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_unarchive_in_same_dir_as_archive( + dir_with_random_content: Path, + tmp_path: Path, + compress: bool, +): + archive_file = tmp_path / "archive.zip" + + existing_files: set[Path] = set() + for i in range(10): + # add some other files to the folder + existing = tmp_path / f"exiting-file-{i}" + existing.touch() + existing_files.add(existing) + + await archive_dir( + dir_to_compress=dir_with_random_content, + destination=archive_file, + compress=compress, + ) + + unarchived_paths = await unarchive_dir( + archive_to_extract=archive_file, destination_folder=tmp_path + ) + + archive_file.unlink() # delete before comparing contents + + # remove existing files now that the listing was complete + for file in existing_files: + file.unlink() + + assert_unarchived_paths( + unarchived_paths, + src_dir=dir_with_random_content, + dst_dir=tmp_path, + ) + + await assert_same_directory_content(dir_with_random_content, tmp_path, None) + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_regression_unsupported_characters( + tmp_path: Path, compress: bool +) -> None: + archive_path = tmp_path / "archive.zip" + dir_to_archive = tmp_path / "to_compress" + dir_to_archive.mkdir() + dst_dir = tmp_path / "decompressed" + dst_dir.mkdir() + + def _create_file(file_name: str, content: str) -> None: + file_path = dir_to_archive / file_name + file_path.write_text(content) + assert file_path.read_text() == content + + # unsupported file name + _create_file("something\udce6likethis.txt", "payload1") + # supported name + _create_file("this_file_name_works.txt", "payload2") + + await archive_dir( + dir_to_compress=dir_to_archive, + destination=archive_path, + compress=compress, + ) + + unarchived_paths = await unarchive_dir( + archive_to_extract=archive_path, destination_folder=dst_dir + ) + + assert_unarchived_paths( + unarchived_paths, + src_dir=dir_to_archive, + dst_dir=dst_dir, + ) + + await assert_same_directory_content( + dir_to_compress=dir_to_archive, + output_dir=dst_dir, + inject_relative_path=None, + ) + + +EMPTY_SET: set[Path] = set() +ALL_ITEMS_SET: set[Path] = { + Path("d1/f2.txt"), + Path("d1/f1"), + Path("d1/sd1/f1"), + Path("d1/sd1/f2.txt"), +} + + +file_suffix = 0 + + +async def _archive_dir_performance( + input_path: Path, destination_path: Path, compress: bool +): + global file_suffix # pylint: disable=global-statement # noqa: PLW0603 + + await archive_dir( + input_path, destination_path / f"archive_{file_suffix}.zip", compress=compress + ) + file_suffix += 1 + + +@pytest.mark.skip(reason="manual testing") +@pytest.mark.parametrize( + "compress, file_size, num_files", + [(False, TypeAdapter(ByteSize).validate_python("1Mib"), 10000)], +) +def test_archive_dir_performance( + benchmark: BenchmarkFixture, + create_file_of_size: Callable[[ByteSize, str], Path], + tmp_path: Path, + compress: bool, + file_size: ByteSize, + num_files: int, +): + # create a bunch of different files + files_to_compress = [ + create_file_of_size(file_size, f"inputs/test_file_{n}") + for n in range(num_files) + ] + assert len(files_to_compress) == num_files + parent_path = files_to_compress[0].parent + assert all(f.parent == parent_path for f in files_to_compress) + + destination_path = tmp_path / "archive_performance" + assert not destination_path.exists() + destination_path.mkdir(parents=True) + assert destination_path.exists() + + def run_async_test(*args, **kwargs): + asyncio.get_event_loop().run_until_complete( + _archive_dir_performance(parent_path, destination_path, compress) + ) + + benchmark(run_async_test) + + +def _touch_all_files_in_path(path_to_archive: Path) -> None: + for path in path_to_archive.rglob("*"): + print("touching", path) + path.touch() + + +@pytest.mark.parametrize("compress", [False]) +async def test_regression_archive_hash_does_not_change( + mixed_file_types: Path, tmp_path: Path, compress: bool +): + destination_path = tmp_path / "archives_to_compare" + destination_path.mkdir(parents=True, exist_ok=True) + + first_archive = destination_path / "first" + second_archive = destination_path / "second" + assert not first_archive.exists() + assert not second_archive.exists() + assert first_archive != second_archive + + await archive_dir(mixed_file_types, first_archive, compress=compress) + assert first_archive.exists() + + _touch_all_files_in_path(mixed_file_types) + + await archive_dir(mixed_file_types, second_archive, compress=compress) + assert second_archive.exists() + + _, first_hash = compute_hash(first_archive) + _, second_hash = compute_hash(second_archive) + assert first_hash == second_hash + + +@pytest.mark.parametrize("compress", [True, False]) +async def test_archive_empty_folder(tmp_path: Path, compress: bool): + archive_path = tmp_path / "zip_archive" + assert not archive_path.exists() + + empty_folder_path = tmp_path / "empty" + empty_folder_path.mkdir(parents=True, exist_ok=True) + extract_to_path = tmp_path / "extracted_to" + extract_to_path.mkdir(parents=True, exist_ok=True) + + await archive_dir(empty_folder_path, archive_path, compress=compress) + + detected_files = await unarchive_dir(archive_path, extract_to_path) + assert detected_files == set() + + await assert_same_directory_content(empty_folder_path, extract_to_path) diff --git a/packages/service-library/tests/test_archiving_utils_extra.py b/packages/service-library/tests/archiving_utils/test_archiving_utils_extra.py similarity index 92% rename from packages/service-library/tests/test_archiving_utils_extra.py rename to packages/service-library/tests/archiving_utils/test_archiving_utils_extra.py index a428b5db4aa..16cb33b5d45 100644 --- a/packages/service-library/tests/test_archiving_utils_extra.py +++ b/packages/service-library/tests/archiving_utils/test_archiving_utils_extra.py @@ -6,6 +6,7 @@ from pathlib import Path import pytest +from helpers import print_tree from servicelib.archiving_utils import ( PrunableFolder, archive_dir, @@ -13,8 +14,6 @@ unarchive_dir, ) -from .test_utils import print_tree - @pytest.fixture def state_dir(tmp_path) -> Path: @@ -134,18 +133,10 @@ async def test_override_and_prune_from_archive( compress: bool, ): download_file = tmp_path / "download.zip" - expected_paths = { - p.relative_to(new_state_dir) - for p in new_state_dir.rglob("*") - if is_leaf_path(p) - } # archive new_state_dir -> download.zip await archive_dir( - dir_to_compress=new_state_dir, - destination=download_file, - compress=compress, - store_relative_path=True, # <=== relative! + dir_to_compress=new_state_dir, destination=download_file, compress=compress ) folder = PrunableFolder(state_dir) diff --git a/packages/service-library/tests/conftest.py b/packages/service-library/tests/conftest.py index e03a1450d03..979a3731071 100644 --- a/packages/service-library/tests/conftest.py +++ b/packages/service-library/tests/conftest.py @@ -3,6 +3,8 @@ # pylint: disable=unused-import import sys +from collections.abc import AsyncIterable, AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager, asynccontextmanager from copy import deepcopy from pathlib import Path from typing import Any @@ -10,23 +12,28 @@ import pytest import servicelib from faker import Faker +from pytest_mock import MockerFixture +from servicelib.redis import RedisClientSDK, RedisClientsManager, RedisManagerDBConfig +from settings_library.redis import RedisDatabase, RedisSettings pytest_plugins = [ "pytest_simcore.docker_compose", + "pytest_simcore.docker_registry", "pytest_simcore.docker_swarm", + "pytest_simcore.docker", + "pytest_simcore.environment_configs", "pytest_simcore.file_extra", - "pytest_simcore.monkeypatch_extra", "pytest_simcore.pytest_global_environs", "pytest_simcore.rabbit_service", "pytest_simcore.redis_service", "pytest_simcore.repository_paths", + "pytest_simcore.schemas", "pytest_simcore.simcore_service_library_fixtures", - "pytest_simcore.tmp_path_extra", ] @pytest.fixture(scope="session") -def here(): +def package_tests_dir(): return Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent @@ -38,12 +45,12 @@ def package_dir() -> Path: @pytest.fixture(scope="session") -def osparc_simcore_root_dir(here) -> Path: - root_dir = here.parent.parent.parent.resolve() +def osparc_simcore_root_dir(package_tests_dir: Path) -> Path: + root_dir = package_tests_dir.parent.parent.parent.resolve() assert root_dir.exists(), "Is this service within osparc-simcore repo?" - assert any(root_dir.glob("packages/service-library")), ( - "%s not look like rootdir" % root_dir - ) + assert any( + root_dir.glob("packages/service-library") + ), f"{root_dir} not look like rootdir" return root_dir @@ -58,3 +65,99 @@ def fake_data_dict(faker: Faker) -> dict[str, Any]: } data["object"] = deepcopy(data) return data + + +@pytest.fixture +async def get_redis_client_sdk( + mock_redis_socket_timeout: None, + mocker: MockerFixture, + redis_service: RedisSettings, +) -> AsyncIterable[ + Callable[[RedisDatabase], AbstractAsyncContextManager[RedisClientSDK]] +]: + @asynccontextmanager + async def _( + database: RedisDatabase, + decode_response: bool = True, # noqa: FBT002 + ) -> AsyncIterator[RedisClientSDK]: + redis_resources_dns = redis_service.build_redis_dsn(database) + client = RedisClientSDK( + redis_resources_dns, decode_responses=decode_response, client_name="pytest" + ) + assert client + assert client.redis_dsn == redis_resources_dns + assert client.client_name == "pytest" + + yield client + + await client.shutdown() + + async def _cleanup_redis_data(clients_manager: RedisClientsManager) -> None: + for db in RedisDatabase: + await clients_manager.client(db).redis.flushall() + + async with RedisClientsManager( + {RedisManagerDBConfig(database=db) for db in RedisDatabase}, + redis_service, + client_name="pytest", + ) as clients_manager: + await _cleanup_redis_data(clients_manager) + yield _ + await _cleanup_redis_data(clients_manager) + + +@pytest.fixture() +def uninstrument_opentelemetry(): + yield + try: + from opentelemetry.instrumentation.redis import RedisInstrumentor + + RedisInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.botocore import BotocoreInstrumentor + + BotocoreInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.requests import RequestsInstrumentor + + RequestsInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.aiopg import AiopgInstrumentor + + AiopgInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor + + AsyncPGInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor + + FastAPIInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.aiohttp_client import ( + AioHttpClientInstrumentor, + ) + + AioHttpClientInstrumentor().uninstrument() + except ImportError: + pass + try: + from opentelemetry.instrumentation.aiohttp_server import ( + AioHttpServerInstrumentor, + ) + + AioHttpServerInstrumentor().uninstrument() + except ImportError: + pass diff --git a/packages/service-library/tests/data/archive_utils/compress_stdout.json b/packages/service-library/tests/data/archive_utils/compress_stdout.json new file mode 100644 index 00000000000..6787eefe50f --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/compress_stdout.json @@ -0,0 +1,148 @@ +[ + "\n7-Zip 23.01 (x6", + "\n7-Zip 23.01 (x64) : Copyright (", + "4) : Copyright (c) 1999-2023 Igo", + "c) 1999-2023 Igor Pavlov : 2023-", + "r Pavlov : 2023-06-20\n 64-bit lo", + "06-20\n 64-bit locale=en_US.UTF-8", + "cale=en_US.UTF-8 Threads:12 OPEN", + " Threads:12 OPEN_MAX:1024\n\nScann", + "_MAX:1024\n\nScanning the drive:\n ", + "ing the drive:\n 0M Scan /tmp/p", + " 0M Scan /tmp/pytest-of-silenth", + "ytest-of-silenthk/pytest-470/tes", + "k/pytest-470/test_something0/", + "test_something0/\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " ", + " \b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b9 fold", + "\b\b\b\b\b\b\b\b\b\b9 folders, 204 files, ", + "ers, 204 files, 434866026 bytes ", + "434866026 bytes (415 MiB)\n\n", + "ytes (415 MiB)\n\nCreating archive", + "Creating archive: /tmp/pytest-of", + ": /tmp/pytest-of-silenthk/pytest", + "-silenthk/pytest-470/test_someth", + "-470/test_something0/mixed_types", + "ing0/mixed_types_dir.zip\n\nAdd ne", + "_dir.zip\n\nAdd new data to archiv", + "w data to archive: 9 folders, 20", + "e: 9 folders, 204 files, 4348660", + "4 files, 434866026 bytes (415 Mi", + "26 bytes (415 MiB)\n\n 0%", + " (415 MiB)\n\n 0%\b\b\b\b \b\b\b\b 8%", + "\b\b\b\b \b\b\b\b 8% 28 + mixed_type", + " 28 + mixed_types_dir/images0/ou", + "s_dir/images0/out3.jpg", + "images0/out3.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 1", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b 19% 49 + mixed_ty", + "9% 49 + mixed_types_dir/images0/", + "pes_dir/images0/out49.jpg", + "mages0/out49.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 30% 71 + mixed", + "\b 30% 71 + mixed_types_dir/image", + "_types_dir/images1/out23.jpg", + "mages1/out23.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 41% 92 + mixed", + "\b 41% 92 + mixed_types_dir/image", + "_types_dir/images1/out42.jpg", + "mages1/out42.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 52% 112 + mixe", + "\b 52% 112 + mixed_types_dir/imag", + "d_types_dir/images2/out15.jpg", + "mages2/out15.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 63% 136 + m", + "\b\b\b\b 63% 136 + mixed_types_dir/i", + "ixed_types_dir/images2/out37.jpg", + "mages2/out37.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 74% 160 + m", + "\b\b\b\b 74% 160 + mixed_types_dir/i", + "ixed_types_dir/images3/out13.jpg", + "mages3/out13.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 85% 180 + m", + "\b\b\b\b 85% 180 + mixed_types_dir/i", + "ixed_types_dir/images3/out31.jpg", + "mages3/out31.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 96% 204 + m", + "\b\b\b\b 96% 204 + mixed_types_dir/i", + "ixed_types_dir/images3/out9.jpg", + "images3/out9.jpg\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b\b\b\b ", + " ", + " \b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\nFiles read fro", + "\b\nFiles read from disk: 204\nArch", + "m disk: 204\nArchive size: 434902", + "ive size: 434902745 bytes (415 M", + "745 bytes (415 MiB)\nEverything i", + "iB)\nEverything is Ok\n", + "verything is Ok\n" +] diff --git a/packages/service-library/tests/data/archive_utils/decompress_stdout.json b/packages/service-library/tests/data/archive_utils/decompress_stdout.json new file mode 100644 index 00000000000..3d38437b7de --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/decompress_stdout.json @@ -0,0 +1,53 @@ +[ + "\n7-Zip 23.01 (x6", + "\n7-Zip 23.01 (x64) : Copyright (", + "4) : Copyright (c) 1999-2023 Igo", + "c) 1999-2023 Igor Pavlov : 2023-", + "r Pavlov : 2023-06-20\n 64-bit lo", + "06-20\n 64-bit locale=en_US.UTF-8", + "cale=en_US.UTF-8 Threads:12 OPEN", + " Threads:12 OPEN_MAX:1024\n\nScann", + "_MAX:1024\n\nScanning the drive fo", + "ing the drive for archives:\n 0M", + "r archives:\n 0M Scan /tmp/pytes", + " Scan /tmp/pytest-of-silenthk/py", + "t-of-silenthk/pytest-470/test_so", + "test-470/test_something0/\b\b\b\b\b\b\b", + "mething0/\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b ", + " ", + " ", + " \b\b\b\b\b\b\b\b\b\b\b\b\b", + " \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b1 file, 43490274", + "1 file, 434902745 bytes (415 MiB", + "5 bytes (415 MiB)\n\nExtracting ar", + ")\n\nExtracting archive: /tmp/pyte", + "chive: /tmp/pytest-of-silenthk/p", + "st-of-silenthk/pytest-470/test_s", + "ytest-470/test_something0/mixed_", + "omething0/mixed_types_dir.zip\n--", + "types_dir.zip\n--\nPath = /tmp/pyt", + "\nPath = /tmp/pytest-of-silenthk/", + "est-of-silenthk/pytest-470/test_", + "pytest-470/test_something0/mixed", + "something0/mixed_types_dir.zip\nT", + "_types_dir.zip\nType = zip\nPhysic", + "ype = zip\nPhysical Size = 434902", + "al Size = 434902745\n\n 0%", + " 434902745\n\n 0%\b\b\b\b \b\b\b\b 62%", + "\b\b\b\b \b\b\b\b 62% 137", + " \b\b\b\b 62% 137\b\b\b\b\b\b\b\b ", + "\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\bEverythi", + "\b\b\b\b\b\b\b\bEverything is Ok\n\nFolder", + "ng is Ok\n\nFolders: 9\nFiles: 204\n", + "s: 9\nFiles: 204\nSize: 4348", + "Size: 434866026\nCompressed", + "66026\nCompressed: 434902745\n", + "ssed: 434902745\n" +] diff --git a/packages/service-library/tests/data/archive_utils/list_broken_format.txt b/packages/service-library/tests/data/archive_utils/list_broken_format.txt new file mode 100644 index 00000000000..41d31eadb05 --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/list_broken_format.txt @@ -0,0 +1,35 @@ +7-Zip (z) 24.09 (x64) : Copyright (c) 1999-2024 Igor Pavlov : 2024-11-28 + 64-bit locale=C.UTF-8 Threads:12 OPEN_MAX:1048576, ASM +Scanning the drive for archives: +1 file, 2089391770 bytes (1993 MiB) +Listing archive: /tmp/simcorefiles/3f8abbcc-923b-429b-953f-827df68b715f/input_1/PromlematicZipFile.zip +-- +Path = /tmp/simcorefiles/3f8abbcc-923b-429b-953f-827df68b715f/input_1/PromlematicZipFile.zip +Type = zip +Physical Size = 2089391770 + Date Time Attr Size Compressed Name +------------------- ----- ------------ ------------ ------------------------ +2024-12-11 15:31:40 ....A 45388221 45392151 b0_pair.nii.gz +2024-12-11 15:31:32 ....A 326 14 bvals +2024-12-11 15:31:32 ....A 2610 1228 bvecs +2024-12-11 15:31:52 ....A 192 111 diff2struct_fsl.mat +2024-12-11 15:31:52 ....A 191 110 diff2struct_fsl_inverse.mat +2024-12-11 15:31:52 ....A 848 367 diff2struct_mrtrix.txt +2024-12-11 15:31:46 ....A 1124081680 1009101417 dwi_preproc.mif +2024-12-11 15:31:52 ....A 1008525390 1008612664 dwi_preproc.nii.gz +2024-12-11 15:31:52 ....A 154 118 electrode_coords_coreg.txt +2024-12-11 15:31:52 ....A 57871 36566 mask_brain_coreg.nii.gz +2024-12-11 15:31:52 ....A 16811 703 mask_lh_hippo_body_coreg.nii.gz +2024-12-11 15:31:52 ....A 17304 1324 mask_lh_hippo_coreg.nii.gz +2024-12-11 15:31:52 ....A 16903 821 mask_lh_hippo_head_coreg.nii.gz +2024-12-11 15:31:52 ....A 16679 540 mask_lh_hippo_tail_coreg.nii.gz +2024-12-11 15:31:52 ....A 16779 670 mask_rh_hippo_body_coreg.nii.gz +2024-12-11 15:31:52 ....A 17232 1246 mask_rh_hippo_coreg.nii.gz +2024-12-11 15:31:52 ....A 16840 752 mask_rh_hippo_head_coreg.nii.gz +2024-12-11 15:31:52 ....A 16699 582 mask_rh_hippo_tail_coreg.nii.gz +2024-12-11 15:31:38 ....A 15068179 15069543 mean_b0.nii.gz +2024-12-11 11:49:22 ....A 54 54 simulation_parameters.txt +2024-11-22 12:21:58 ....A 94 80 stimulation_parameters.txt +2024-12-11 15:31:40 ....A 11166326 11168031 T1_coreg.nii.gz +------------------- ----- ------------ ------------ ------------------------ +2024-12-11 15:31:52 2204427383 2089389092 22 files diff --git a/packages/service-library/tests/data/archive_utils/list_edge_case.txt b/packages/service-library/tests/data/archive_utils/list_edge_case.txt new file mode 100644 index 00000000000..81831dafbd4 --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/list_edge_case.txt @@ -0,0 +1,24 @@ +7-Zip (z) 24.09 (x64) : Copyright (c) 1999-2024 Igor Pavlov : 2024-11-28 + 64-bit locale=en_US.UTF-8 Threads:12 OPEN_MAX:1024, ASM + +Scanning the drive for archives: +1 file, 672 bytes (1 KiB) + +Listing archive: /tmp/pytest-of-silenthk/pytest-654/test_override_and_prune_from_a0/download.zip + +-- +Path = /tmp/pytest-of-silenthk/pytest-654/test_override_and_prune_from_a0/download.zip +Type = zip +Physical Size = 672 + + Date Time Attr Size Compressed Name +------------------- ----- ------------ ------------ ------------------------ +1980-01-01 00:00:00 D.... 0 0 d1 +1980-01-01 00:00:00 D.... 0 0 d1/d1_1 +1980-01-01 00:00:00 D.... 0 0 d1/d1_1/d1_2 +1980-01-01 00:00:00 ..... 0 0 d1/d1_1/d1_2/f5 +1980-01-01 00:00:00 D.... 0 0 d1/empty +1980-01-01 00:00:00 ..... 1 1 d1/f1 +1980-01-01 00:00:00 ..... 1 1 d1/f2 +------------------- ----- ------------ ------------ ------------------------ +1980-01-01 00:00:00 2 2 3 files, 4 folders diff --git a/packages/service-library/tests/data/archive_utils/list_empty_archive.txt b/packages/service-library/tests/data/archive_utils/list_empty_archive.txt new file mode 100644 index 00000000000..81bb24d808d --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/list_empty_archive.txt @@ -0,0 +1,17 @@ +7-Zip (z) 24.09 (x64) : Copyright (c) 1999-2024 Igor Pavlov : 2024-11-28 + 64-bit locale=en_US.UTF-8 Threads:12 OPEN_MAX:1024, ASM + +Scanning the drive for archives: +1 file, 22 bytes (1 KiB) + +Listing archive: /tmp/pytest-of-silenthk/pytest-692/test_archive_unarchive_empty_f1/mixed_types_dir.zip + +-- +Path = /tmp/pytest-of-silenthk/pytest-692/test_archive_unarchive_empty_f1/mixed_types_dir.zip +Type = zip +Physical Size = 22 + + Date Time Attr Size Compressed Name +------------------- ----- ------------ ------------ ------------------------ +------------------- ----- ------------ ------------ ------------------------ + 0 0 0 files diff --git a/packages/service-library/tests/data/archive_utils/list_stdout.txt b/packages/service-library/tests/data/archive_utils/list_stdout.txt new file mode 100644 index 00000000000..c2a0b0cba8f --- /dev/null +++ b/packages/service-library/tests/data/archive_utils/list_stdout.txt @@ -0,0 +1,1026 @@ +7-Zip (z) 24.09 (x64) : Copyright (c) 1999-2024 Igor Pavlov : 2024-11-28 + 64-bit locale=en_US.UTF-8 Threads:12 OPEN_MAX:1024, ASM + +Scanning the drive for archives: +1 file, 155072 bytes (152 KiB) + +Listing archive: /tmp/pytest-of-silenthk/pytest-636/test_unarchive_in_same_dir_as_0/archive.zip + +-- +Path = /tmp/pytest-of-silenthk/pytest-636/test_unarchive_in_same_dir_as_0/archive.zip +Type = zip +Physical Size = 155072 + + Date Time Attr Size Compressed Name +------------------- ----- ------------ ------------ ------------------------ +1980-01-01 00:00:00 D.... 0 0 study_data +1980-01-01 00:00:00 ..... 9 9 study_data/AHmPWWlG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BTdF +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/BTdF/sFXYkFAy.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/VOth +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/BzQs/VOth/KpMSFqyA.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/BzQs/VOth/UPSYGkEP.bin +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/BzQs/VOth/vJfXMQIC.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/BzQs/XuBDjZTP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/ZqbM +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/BzQs/ZqbM/mYzAqZjT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/ZqbM/mjFG +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/BzQs/ZqbM/mjFG/YbWExhbk.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/BzQs/ZqbM/mjFG/eVYoddih.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/BzQs/ZqbM/mjFG/rxCDPVFF.bin +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/BzQs/ZqbM/ogZtYPfy.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/BzQs/ZqbM/xabFpebj.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/oZeQ +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/oZeQ/MQnS +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/BzQs/oZeQ/MQnS/iAimSiwK.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/BzQs/oZeQ/MQnS/rOKJEsBI.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/oZeQ/ZGdi +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/BzQs/oZeQ/ZGdi/wTDcGUje.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/BzQs/oZeQ/ZxDtLxeH.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/BzQs/oZeQ/tMVI +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/BzQs/oZeQ/tMVI/dFqNULMW.bin +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/BzQs/oZeQ/tMVI/puxktrPH.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/BzQs/oZeQ/vuWuSluC.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/BzQs/oZeQ/wuzXQmhi.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/HohXRGhf.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/Mpid +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/QUar/Mpid/NdzntSjX.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/Xzlt +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/QUar/Xzlt/btqHuExZ.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/Xzlt/tcsoZMZI.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/QUar/Xzlt/vCcJUSMG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/QUar/bEEU/AhluzLDS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/BLtJ +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/QUar/bEEU/BLtJ/AxpLtAvu.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/QUar/bEEU/BLtJ/QYgKRbpp.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/bEEU/BLtJ/YuuHpxZk.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/BLtJ/awey +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/bEEU/BLtJ/awey/lOvujXaN.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/BLtJ/jITF +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/QUar/bEEU/BLtJ/jITF/IoCtRjhp.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/QUar/bEEU/BLtJ/jITF/QXNYHPqc.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/QUar/bEEU/BLtJ/jITF/pvMObkYX.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/BLtJ/zlvM +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/QUar/bEEU/BLtJ/zlvM/ZeEhVZwY.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/QUar/bEEU/BLtJ/zlvM/ihmHTbnN.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/bEEU/BLtJ/zlvM/nzoiJZki.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/lERU +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/QUar/bEEU/lERU/dnwRnFwU.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/QUar/bEEU/lERU/hfWJOzkF.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/QUar/bEEU/lERU/wQyzbHYT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/bEEU/mxdh +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/bEEU/mxdh/rcTCsmge.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/QUar/pNXEIcLo.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/qAhw +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/qAhw/SITz +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/qAhw/SITz/TMwTQtnA.bin +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/QUar/qAhw/SITz/jPEINHjF.bin +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/QUar/qAhw/SITz/qzixwQro.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/QUar/qAhw/fQtJoGGb.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/qAhw/sZTvMnTJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/qAhw/uuGV +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/QUar/qAhw/uuGV/QIBfeuDc.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/QUar/qAhw/uuGV/XylGuojB.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/QUar/qAhw/uuGV/lRWSDcMi.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/QUar/tFumcsJe.bin +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/QUar/tJPiMsqg.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/xIOs +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/QUar/xIOs/IHImoeXf.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/QUar/xIOs/TjUtupqB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/QUar/xIOs/UjGi +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/QUar/xIOs/UjGi/GnPWYhmo.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/QUar/xIOs/UjGi/fYMJDnhQ.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/QUar/xIOs/eqoWettf.bin +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/SjHMTgiT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/VXBb +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/VXBb/DxYL +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/VXBb/DxYL/SgIxHBds.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/VXBb/DxYL/qHPbvyxB.bin +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/VXBb/XRdZqAWz.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/VXBb/xgFE +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/VXBb/xgFE/lfGWtuHA.bin +1980-01-01 00:00:00 ..... 7 7 study_data/IUDS/VXBb/xgFE/wpTZWDnU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/VXBb/ztKe +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/VXBb/ztKe/YPFIVOrf.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/VXBb/ztKe/ZJeFsUho.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/BXSb +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/BXSb/JquB +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/WQYt/BXSb/JquB/qLPVVqKb.bin +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/WQYt/BXSb/WKglhhgu.bin +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/WQYt/BXSb/cyIvOSrm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/BXSb/fDgN +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/WQYt/BXSb/fDgN/xZJLFXXA.bin +1980-01-01 00:00:00 ..... 3 3 study_data/IUDS/WQYt/BXSb/fDgN/ztqgmNPi.bin +1980-01-01 00:00:00 ..... 7 7 study_data/IUDS/WQYt/BXSb/iShVNXLq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/FTKI +1980-01-01 00:00:00 ..... 7 7 study_data/IUDS/WQYt/FTKI/MymnzjRJ.bin +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/WQYt/FTKI/hJQhANiY.bin +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/WQYt/PIChqQda.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/PYnJ +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/WQYt/PYnJ/OUUqJvzT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/WQYt/uDeE +1980-01-01 00:00:00 ..... 1 1 study_data/IUDS/WQYt/uDeE/eJKaqNsP.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/aeGNkaky.bin +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/gfmgGgKA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/NaUm +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/uomJ/NaUm/BEZkvGlg.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/PzmA +1980-01-01 00:00:00 ..... 6 6 study_data/IUDS/uomJ/PzmA/GYiivuev.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/uomJ/PzmA/tLaNwKAZ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/herB +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/herB/AJRh +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/uomJ/herB/AJRh/MptYwYYo.bin +1980-01-01 00:00:00 ..... 8 8 study_data/IUDS/uomJ/herB/AJRh/pFaGUjAA.bin +1980-01-01 00:00:00 ..... 7 7 study_data/IUDS/uomJ/herB/AJRh/tTrjnkTO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/herB/DSym +1980-01-01 00:00:00 ..... 4 4 study_data/IUDS/uomJ/herB/DSym/EXdWaQBd.bin +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/uomJ/herB/DSym/oIhKwNnt.bin +1980-01-01 00:00:00 D.... 0 0 study_data/IUDS/uomJ/herB/SQWH +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/uomJ/herB/SQWH/OEwPkHZt.bin +1980-01-01 00:00:00 ..... 10 10 study_data/IUDS/uomJ/herB/SQWH/aPnGhYff.bin +1980-01-01 00:00:00 ..... 9 9 study_data/IUDS/uomJ/herB/kbAwBsaM.bin +1980-01-01 00:00:00 ..... 5 5 study_data/IUDS/uomJ/herB/kfenzVnd.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/uomJ/pnFMfLCF.bin +1980-01-01 00:00:00 ..... 2 2 study_data/IUDS/uomJ/tmiBHwha.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/AHax/HHcLYoHX.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/cida +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/AHax/cida/iqrXWDol.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/AHax/cida/xHZygVBV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/mhSe +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/AHax/mhSe/obwrdfVP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/sDKW +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/sDKW/JeOB +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/AHax/sDKW/JeOB/XHmHUMdy.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/AHax/sDKW/JeOB/wBPFRLuC.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/AHax/sDKW/JeOB/wlDKOFXa.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/AHax/sDKW/QpGGvMkO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/sDKW/qxUp +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/AHax/sDKW/qxUp/eEHSZJTy.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/AHax/sDKW/qxUp/yDSPphdu.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/AHax/sDKW/soalSpuS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/AHax/woay +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/AHax/woay/OglKDkgA.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/AHax/woay/uIposZJU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/BEIS +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/BEIS/XvuCCPPA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/BEIS/hIUw +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/BEIS/hIUw/SelMNQFB.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/BEIS/hIUw/fQdPzOgJ.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/BEIS/hIUw/mOwNunmp.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/BEIS/vOAA +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/BEIS/vOAA/NBvNlKaH.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/BEIS/vOAA/UEfQylpF.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/CGwQ/HIdCWBFM.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/CGwQ/TMWUpAjs.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/ZlXG +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/ZlXG/OWdF +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/CGwQ/ZlXG/OWdF/OFmIkxCP.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/CGwQ/ZlXG/OWdF/PSdnKriR.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/CGwQ/ZlXG/OWdF/mLfXczgZ.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/CGwQ/ZlXG/skMTdATf.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/ARkM +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/ARkM/GGFrxYJI.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/HraQ +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/CGwQ/glLz/HraQ/AEpAUPee.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/HraQ/ALDS +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/CGwQ/glLz/HraQ/ALDS/brzjpmwy.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/HraQ/ALDS/pxBGKyUS.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/CGwQ/glLz/HraQ/ALDS/xJlwYbab.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/CGwQ/glLz/HraQ/MatzghLB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/HraQ/Pmff +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/CGwQ/glLz/HraQ/Pmff/gWotxMKq.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/CGwQ/glLz/HraQ/ZIxtYbBj.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/CGwQ/glLz/HxaOrjyy.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/CGwQ/glLz/OwSpSFzU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/acvd +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/CGwQ/glLz/acvd/CHiprfuO.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/CGwQ/glLz/acvd/NKEIGiVl.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/CGwQ/glLz/acvd/dHvcDzKL.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/CGwQ/glLz/phrjYiFq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/qrLX +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/qrLX/LtmA +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/CGwQ/glLz/qrLX/LtmA/DtJcuFIC.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/qrLX/LtmA/onlSzKUy.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/CGwQ/glLz/qrLX/LtmA/tCLsDgeU.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/qrLX/VEtnVRLJ.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/qrLX/ZXFETwRa.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/qrLX/xweq +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/CGwQ/glLz/qrLX/xweq/cmAVxrwC.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/qrLX/xweq/dYNGHiXH.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/CGwQ/glLz/qrLX/xweq/yfGkscwv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/glLz/qrLX/yYXb +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/CGwQ/glLz/qrLX/yYXb/vAGAkkwI.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/CGwQ/gspMOvRJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/CGwQ/kzvA +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/CGwQ/kzvA/UMDWfhuC.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/CGwQ/kzvA/gijIIvlb.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/EpGS +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/EpGS/UuTE +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/DLUZ/EpGS/UuTE/OAdlkjjY.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/DLUZ/EpGS/fBUUjfUw.bin +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/DLUZ/EpGS/gNRfPaEP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/EpGS/pMlZ +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/DLUZ/EpGS/pMlZ/LqjcxAZR.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/DLUZ/EpGS/pMlZ/SCpvaIYE.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/DLUZ/EpGS/pMlZ/rdjkKAPe.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/DLUZ/LghbzxaY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/XOWB +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/DLUZ/XOWB/BhvokPZb.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/DLUZ/XOWB/lmPXYNjp.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/DLUZ/YspaXBKN.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/piPY +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/DLUZ/piPY/BLJhwTKs.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/DLUZ/piPY/GcXTairW.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/DLUZ/piPY/ltTOQpae.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/zUQl +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/DLUZ/zUQl/MKpwDeHY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/DLUZ/zUQl/OuOU +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/DLUZ/zUQl/OuOU/EuuLsrvp.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/DLUZ/zUQl/OuOU/fZhWFKVP.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/DLUZ/zUQl/OuOU/usrVkWUt.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/VSTY/JyCeSjfx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/LJil +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/VSTY/LJil/ZPsEjcSB.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/VSTY/LJil/dlYKGjXI.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/LJil/uPYc +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/VSTY/LJil/uPYc/RxYReBnu.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/AzmB +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/VSTY/RRGz/AzmB/lKzwMJqx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/AzmB/oCKc +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/VSTY/RRGz/AzmB/oCKc/REnXtFrp.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/MCux +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/VSTY/RRGz/MCux/BMfcVMQX.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/VSTY/RRGz/dEZbdrPP.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/VSTY/RRGz/dkqruact.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/mEKd +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/VSTY/RRGz/mEKd/DuKbgZkd.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/VSTY/RRGz/mEKd/JiPMEOqv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/mEKd/eGzJ +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/VSTY/RRGz/mEKd/eGzJ/XgIJzblX.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/RRGz/mEKd/ibgk +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/VSTY/RRGz/mEKd/ibgk/FGbingZo.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/VSTY/RRGz/xGGsXkeo.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/uWlw +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/VSTY/uWlw/ApYpBPJI.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/VSTY/uWlw/GcQwkFLK.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/uWlw/SHln +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/VSTY/uWlw/SHln/YUHBaifP.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/VSTY/uWlw/hxumkOwg.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/uWlw/iiaQ +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/VSTY/uWlw/iiaQ/FIqMoOJZ.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/VSTY/uWlw/iiaQ/iXMMZzKE.bin +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/VSTY/uWlw/iiaQ/jIXXEQXU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/VSTY/vAwh +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/VSTY/vAwh/xZynSKnG.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/VSTY/vkRNwRzu.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/WMtOCTHZ.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/XGQGvNKr.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/DpWM +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/DpWM/XlPYvxxH.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/ZaeP/DpWM/pBlUsWKW.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/DpWM/wJeYcIpu.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/FfHg +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/FfHg/IiFy +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/ZaeP/FfHg/IiFy/eqgDEchG.bin +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/ZaeP/FfHg/PpIaxZzG.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/ZaeP/FfHg/iNWEDRER.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/FfHg/upUk +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/ZaeP/FfHg/upUk/JPVjhyDQ.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/FfHg/upUk/KdZmxodV.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/ZaeP/FfHg/yCWUuAXv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/aTPz +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/aTPz/OHcJTBRe.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/ZaeP/aTPz/uKwlCrJT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/mywl +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/mywl/ulwBbWcd.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/ocjx +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/ocjx/FLIp +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/ocjx/FLIp/NDoRNeuq.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/ZaeP/ocjx/FLIp/UzzbUmmv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/ocjx/NPvz +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/ZaeP/ocjx/NPvz/UgrVBJXC.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/ZaeP/ocjx/NPvz/ZmunbBBl.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/ZaeP/ocjx/NyJP +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/ZaeP/ocjx/NyJP/ZyloaHjk.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/ZaeP/ocjx/qVhCWfKD.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/ZaeP/vEesrCcd.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/nDgc +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/nDgc/JZsoYDwQ.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/nDgc/LgcnuLoA.bin +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/nDgc/ZKTtJeXV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW/GILE +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW/GILE/ADNa +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/spKz/FZiW/GILE/ADNa/NiLIdxMt.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/FZiW/GILE/ADNa/baAhcEWY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW/GILE/HHAP +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/FZiW/GILE/HHAP/BUYkaaFj.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/spKz/FZiW/GILE/HHAP/RzldGmhm.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/FZiW/GILE/HHAP/iTeZtHOU.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/FZiW/GILE/VyXRfRVQ.bin +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/spKz/FZiW/GILE/eewCvNAN.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/spKz/FZiW/GILE/ktJXFPXz.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW/GILE/yfUb +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/FZiW/GILE/yfUb/dDKAfAZE.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/FZiW/GILE/yfUb/umLeeGQL.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/FZiW/GILE/yfUb/xiyozONo.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/FZiW/HwEjOXIR.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/FZiW/LlQs +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/FZiW/LlQs/YUdnxKOj.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/FZiW/LlQs/lMrWIHnc.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/spKz/FZiW/LlQs/pBSEzNZW.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/spKz/ULYZNpys.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/eyDjAdhw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/sXqJ/AdzfACXQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/OeNj +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/spKz/sXqJ/OeNj/AnRPduis.bin +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/spKz/sXqJ/OeNj/yqxpchNa.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/spKz/sXqJ/SdVPoeDT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/Tvjn +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/sXqJ/Tvjn/GOgsNtHV.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/sXqJ/Tvjn/QIvObNkk.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/spKz/sXqJ/Tvjn/VGJjiFQh.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/UONi +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/UONi/Hpbv +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/sXqJ/UONi/Hpbv/FNLyKFpT.bin +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/spKz/sXqJ/UONi/Hpbv/bZCTbXJT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/UONi/NVpw +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/spKz/sXqJ/UONi/NVpw/OuKUROEq.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/spKz/sXqJ/UONi/NVpw/qoLOadUH.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/sXqJ/UONi/NVpw/uTUymCTi.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/UONi/Witp +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/spKz/sXqJ/UONi/Witp/gxqwKKJq.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/sXqJ/UONi/Witp/iCBirjew.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/spKz/sXqJ/UONi/ztkcHHOi.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/vmKv +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/vmKv/CkGq +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/spKz/sXqJ/vmKv/CkGq/xquIKRuS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/vmKv/HJUM +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/spKz/sXqJ/vmKv/HJUM/BKgDBvHB.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/sXqJ/vmKv/HJUM/ZWekbYEG.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/sXqJ/vmKv/HJUM/nqFqJcpE.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/spKz/sXqJ/vmKv/NDqtGaaA.bin +1980-01-01 00:00:00 ..... 6 6 study_data/PbDS/spKz/sXqJ/vmKv/pIiNhJaI.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/sXqJ/vmKv/sKnu +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/sXqJ/vmKv/sKnu/OoPxEOZr.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/sXqJ/vmKv/sKnu/hEKmDTRd.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/wYGM +1980-01-01 00:00:00 ..... 3 3 study_data/PbDS/spKz/wYGM/KsgcCdPJ.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/wYGM/QfWEihJc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/ymut +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/ymut/RdJe +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/spKz/ymut/RdJe/eOdgYdJr.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/spKz/ymut/RdJe/fGGivJke.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/ymut/dWqXfuqB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/ymut/eZsl +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/spKz/ymut/eZsl/rDnzlYzm.bin +1980-01-01 00:00:00 ..... 9 9 study_data/PbDS/spKz/ymut/eZsl/tirLLJuG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/spKz/ymut/tHga +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/spKz/ymut/tHga/fJYTIXcQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/tsdB +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/tsdB/BEIOYZmw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/uNpG +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/uNpG/GKMS +1980-01-01 00:00:00 ..... 2 2 study_data/PbDS/uNpG/GKMS/eOlzfIGa.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/uNpG/GKMS/qrydULYI.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/uNpG/GKMS/zoQsaBgm.bin +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/uNpG/WBJhVela.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/uNpG/smez +1980-01-01 00:00:00 ..... 4 4 study_data/PbDS/uNpG/smez/SOePbQeI.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/uNpG/smez/auLnSzPS.bin +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/uNpG/smez/sCFLNyht.bin +1980-01-01 00:00:00 ..... 10 10 study_data/PbDS/vaxuZvrv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/yWpC +1980-01-01 00:00:00 ..... 5 5 study_data/PbDS/yWpC/IIADldhb.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/yWpC/Khcd +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/yWpC/Khcd/JineYSCb.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/yWpC/Khcd/bcLvtLeS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/PbDS/yWpC/mtZm +1980-01-01 00:00:00 ..... 7 7 study_data/PbDS/yWpC/mtZm/MGfhcHkp.bin +1980-01-01 00:00:00 ..... 8 8 study_data/PbDS/yWpC/mtZm/jhNrgOsG.bin +1980-01-01 00:00:00 ..... 1 1 study_data/PbDS/yWpC/mtZm/xeNdgdzs.bin +1980-01-01 00:00:00 ..... 10 10 study_data/XOKAMIFq.bin +1980-01-01 00:00:00 ..... 5 5 study_data/XepHHTJo.bin +1980-01-01 00:00:00 ..... 3 3 study_data/dWYwKvfQ.bin +1980-01-01 00:00:00 ..... 5 5 study_data/joLovHDU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/LXoatLgw.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/NnqawJFg.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/RGsGfOIB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/eaVw +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/eaVw/BvFicfle.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/eaVw/ORBrGIiT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/iEPO +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/iEPO/lxKE +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/iEPO/lxKE/AOENgbsn.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/iEPO/zRlZKSdY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/Fasc +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/Fasc/YKrH +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/EpFh/nihR/Fasc/YKrH/aaXePFwS.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/EpFh/nihR/Fasc/YKrH/gajMUskk.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/Fasc/YKrH/raQuZNGJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/Fasc/okiQ +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/EpFh/nihR/Fasc/okiQ/IrOtTJFx.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/EpFh/nihR/Fasc/okiQ/MJYXQsyO.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/Fasc/okiQ/sQqBpqzD.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/EpFh/nihR/Fasc/wIfZiuuw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/NDVH +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/NDVH/cZrDbzfN.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/nihR/UjcYcdmS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/bRaj +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/nihR/bRaj/NJXiXFYi.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/nihR/bRaj/ZlnPxYqP.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/bWylLbEK.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/lttR +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/nihR/lttR/CVhzbPeh.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/EpFh/nihR/lttR/QenoImvH.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/lttR/fSMUpaRn.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vOMj +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vOMj/TdeW +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/EpFh/nihR/vOMj/TdeW/BGOoUuHE.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/nihR/vOMj/TdeW/lUETxQqS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vOMj/TrFt +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/nihR/vOMj/TrFt/EAbnIiTW.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/EpFh/nihR/vOMj/TrFt/bxFZtNfR.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/vOMj/TrFt/eIAYdzIx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vOMj/cXzO +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/nihR/vOMj/cXzO/DXgTlcXe.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/nihR/vOMj/cXzO/IAurqNpE.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/nihR/vOMj/cXzO/egEGKVeK.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/nihR/vOMj/hwiZkldP.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/nihR/vOMj/nGvUhWAB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vfaz +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vfaz/PgeA +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/nihR/vfaz/PgeA/fHiljXhM.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/nihR/vfaz/PgeA/rRpgIYzk.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/nihR/vfaz/ZRzfDAuM.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/EpFh/nihR/vfaz/ZapbujcM.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/nihR/vfaz/gOXuIvuu.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/nihR/vfaz/iVFp +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/nihR/vfaz/iVFp/IOekCIGU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/JJGM +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/pRYz/JJGM/YqcCLfOW.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/pRYz/JJGM/iTVflTax.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Nhgn +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/pRYz/Nhgn/OgQQakzD.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/Nhgn/VfVAbEPf.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Nhgn/Zgyp +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/Nhgn/Zgyp/FuMtJTPq.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/pRYz/Nhgn/Zgyp/GhpdjBRS.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/pRYz/Nhgn/Zgyp/fDdYvwtf.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/Nhgn/jIjvPasH.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Piyb +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/pRYz/Piyb/HepLTRsV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Piyb/SnJA +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/EpFh/pRYz/Piyb/SnJA/Xgbdlvcn.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/EpFh/pRYz/Piyb/SnJA/lFhaivBK.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/Piyb/SnJA/rpSWLjju.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Piyb/TNdn +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/EpFh/pRYz/Piyb/TNdn/ThNnMvHL.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/pRYz/Piyb/TNdn/vdueObZq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/Piyb/XFTc +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/pRYz/Piyb/XFTc/FaEWOeaf.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/Piyb/XFTc/LwYirLla.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/pRYz/Piyb/XFTc/zWMuXdnl.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/pRYz/Piyb/bQvsoZgn.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/EpFh/pRYz/SXRTSzwc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/aPCU +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/pRYz/aPCU/ZbYtlWbi.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/pRYz/aPCU/bKLpLQyY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/aPCU/iyBf +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/aPCU/iyBf/QXyLnltk.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/EpFh/pRYz/aPCU/iyBf/RRrvDFBm.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/EpFh/pRYz/aPCU/iyBf/tebfpNNw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/aPCU/rCnj +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/EpFh/pRYz/aPCU/rCnj/BYopBjsq.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/EpFh/pRYz/aPCU/rCnj/xTYMFVdC.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/pRYz/aPCU/rCnj/ztmfWauD.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/EpFh/pRYz/mNyxHHZU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/EpFh/pRYz/vGfd +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/EpFh/pRYz/vGfd/UZEDmfRf.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/TRuJ +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/TRuJ/TBTCWVIF.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/TRuJ/dgNWJGZZ.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/WrBimZYN.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/cvNb +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/cvNb/DDBlyTVQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/cvNb/ECqb +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/cvNb/ECqb/AflRnBHO.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/cvNb/ECqb/IHAoRRfi.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/cvNb/ECqb/UsOeOqKz.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/cvNb/UxyI +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/cvNb/UxyI/OoxFFTKz.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/cvNb/UxyI/PNZGMdcO.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/cvNb/fvXnBWEH.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/cvNb/jhhlxDpv.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/ezvwqwjA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fCfx +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fCfx/XVtJ +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/fCfx/XVtJ/jHpzpKhm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fCfx/luiD +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fCfx/luiD/ANEnmJBQ.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/fCfx/luiD/crQEaRwE.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/fCfx/wfaBvFqN.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fCfx/zTyq +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fCfx/zTyq/PmbqMjih.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fCfx/zTyq/eQRdtxLq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fVOY +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fVOY/cqtsqGPc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fVOY/upaM +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fVOY/upaM/VAqByQwN.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fVOY/upaM/tjhJjheI.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BDCM +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BDCM/Hmru +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/BDCM/Hmru/moIBrKnl.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BDCM/UVjo +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fvWH/BDCM/UVjo/OaDqrgNV.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/BDCM/UVjo/OzqXynvt.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/BDCM/UVjo/waJTqlzv.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/fvWH/BDCM/VFKVCKSd.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BDCM/ZIVO +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/fvWH/BDCM/ZIVO/WLaoZCKE.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/BDCM/ZIVO/tebssNLZ.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fvWH/BDCM/ZIVO/vTErengc.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/BDCM/dZqOfgtH.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/BDCM/lDeJqMxj.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BUuY +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/BUuY/KunOFMPo.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BUuY/ULnx +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/BUuY/ULnx/vhxhmSEG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/BUuY/iKGS +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/fvWH/BUuY/iKGS/AsQkhisM.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/fvWH/BUuY/iKGS/rnPnXFfC.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/BUuY/iRImrZFN.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/BUuY/oVZwPFrY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/LgmT +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/LgmT/OGQK +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/fvWH/LgmT/OGQK/BIZlUBKq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/LgmT/VbfA +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/LgmT/VbfA/cDdTRWcK.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/LgmT/VbfA/kfrpbyjT.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/LgmT/VbfA/wtiJkSMT.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/LgmT/jJMb +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/LgmT/jJMb/CPINYFLA.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/fvWH/LgmT/jJMb/MzrpZTbW.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/fvWH/LgmT/oKTZaGbJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/czeO +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/czeO/CPduFCuG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/czeO/HXOz +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/czeO/HXOz/JSaRyCIc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/czeO/Wftj +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/fvWH/czeO/Wftj/ZbHrtDgU.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/czeO/Wftj/uUYI +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/fvWH/czeO/Wftj/uUYI/pqtgCVkL.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fvWH/czeO/Wftj/uUYI/sYHxXPZC.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fvWH/czeO/Wftj/uUYI/xMRMwhnr.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/czeO/zeSB +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/fvWH/czeO/zeSB/blWgWcDu.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/fvWH/czeO/zeSB/mIFegvuO.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/ddEffTHj.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/ifJn +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/fvWH/ifJn/XvjPxmxX.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/fvWH/ifJn/uBFNbsua.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/rdjg/ByRvZziY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/DGeD +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/fvWH/rdjg/DGeD/JrkFMQiv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/DGeD/QGJW +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/rdjg/DGeD/QGJW/YgNpPJiJ.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/fvWH/rdjg/DGeD/QGJW/ryLWzUvA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/DGeD/WjwU +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/fvWH/rdjg/DGeD/WjwU/qvgUfPjY.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/fvWH/rdjg/DGeD/yLWxNwlk.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/rdjg/ZncvEwJm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/ayGm +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/ayGm/Myjs +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/fvWH/rdjg/ayGm/Myjs/ElPKkYsz.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/rdjg/ayGm/Myjs/SylqDbHW.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/rdjg/ayGm/wvxnBOKO.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/rdjg/ixqpZqUJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/qTMZ +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/rdjg/qTMZ/BjNIiRos.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/tnqV +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/tnqV/GIQN +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/fvWH/rdjg/tnqV/GIQN/BNXmlOpW.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/fvWH/rdjg/tnqV/HeDz +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/fvWH/rdjg/tnqV/HeDz/HRwylvtC.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/rdjg/tnqV/HeDz/czUpsQdU.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/fvWH/rdjg/tnqV/HeDz/kyCzCRzi.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/fvWH/rdjg/tnqV/RGdcUwIl.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/fvWH/rdjg/tnqV/zoDphsOA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/iAyO +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/iAyO/PPmxfTfO.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/iAyO/ncBVNvYU.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/iAyO/zZvQefyZ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/Dsot +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/okzR/Dsot/LBtCFust.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/Dsot/PrMW +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/okzR/Dsot/PrMW/AgiwqkCG.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/okzR/Dsot/PrMW/GeuHheGI.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/okzR/Dsot/PrMW/UfwYrxWD.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/IRSz +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/okzR/IRSz/pTkbQxLN.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/okzR/IRSz/yYqHDOvE.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/IRSz/zFOu +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/okzR/IRSz/zFOu/XdozGxhV.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/okzR/IRSz/zFOu/ZtiKJGpR.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/okzR/IRSz/zFOu/bnVtDjhd.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/okzR/MHHiWQHG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/Wzwr +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/okzR/Wzwr/spDbzHGA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/itZT +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/itZT/EvDG +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/okzR/itZT/EvDG/dBFvHSHR.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/okzR/itZT/EvDG/rKpmiQhv.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/okzR/itZT/EvDG/vvPVJrID.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/okzR/itZT/dnEVgLwz.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/itZT/sYqw +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/okzR/itZT/sYqw/TBgDoilL.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/okzR/itZT/sYqw/TNvGZGAA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/okzR/neiT +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/okzR/neiT/SUSkHYyZ.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/okzR/vbEclTGm.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/okzR/zzNUNqQx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/HYsw +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/HYsw/BFOG +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/ygsr/HYsw/BFOG/RYrsjSMp.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/ygsr/HYsw/BFOG/umDeitsb.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/HYsw/EUbu +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/ygsr/HYsw/EUbu/EiaIpHaR.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/ygsr/HYsw/EUbu/hTMxDopY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/HYsw/mmQS +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/ygsr/HYsw/mmQS/IKOfQKHg.bin +1980-01-01 00:00:00 ..... 6 6 study_data/qpBn/ygsr/HYsw/mmQS/aZjtUQrA.bin +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/ygsr/HYsw/mmQS/jMqXeEHK.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/ygsr/HYsw/zxBwxQBV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/LGnv +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/ygsr/LGnv/GLlLwZTX.bin +1980-01-01 00:00:00 ..... 3 3 study_data/qpBn/ygsr/LGnv/UfCliLsk.bin +1980-01-01 00:00:00 ..... 10 10 study_data/qpBn/ygsr/LGnv/hHkrHkUL.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/MjyB +1980-01-01 00:00:00 ..... 4 4 study_data/qpBn/ygsr/MjyB/HrrNLNgt.bin +1980-01-01 00:00:00 ..... 1 1 study_data/qpBn/ygsr/MjyB/OUXOQcDk.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/SxnX +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/ygsr/SxnX/IjbzJfYC.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/ygsr/SxnX/acHfxqVQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/aGQe +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/ygsr/aGQe/QlDFdseq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/qpBn/ygsr/aGQe/WRkk +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/ygsr/aGQe/WRkk/bOZqMgPB.bin +1980-01-01 00:00:00 ..... 9 9 study_data/qpBn/ygsr/aGQe/WRkk/bnixlRLd.bin +1980-01-01 00:00:00 ..... 7 7 study_data/qpBn/ygsr/aGQe/WRkk/gdsfUcZg.bin +1980-01-01 00:00:00 ..... 8 8 study_data/qpBn/ygsr/aokwRxPP.bin +1980-01-01 00:00:00 ..... 5 5 study_data/qpBn/ygsr/hhOTTdxQ.bin +1980-01-01 00:00:00 ..... 2 2 study_data/qpBn/ygsr/miJahKEK.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/Acpw +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/Acpw/EpnwbdTq.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/Acpw/wYWyFKaq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/HIzP +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/HIzP/ESRQdUty.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/HIzP/OviHEAXX.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/LhEi +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/LhEi/LUNRHjwM.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/LhEi/NIRc +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/LhEi/NIRc/kEJbKeND.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/LhEi/TodIQghn.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/LhEi/mwAYhzaW.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/AXRE +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/POfv/AXRE/CDkdtsgj.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/POfv/HDAOwLCZ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/KLzp +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/POfv/KLzp/VdcGHgWk.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/KLzp/XPdh +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/POfv/KLzp/XPdh/juyQHIhv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/KLzp/btcF +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/POfv/KLzp/btcF/BKAqFTqk.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/POfv/KLzp/btcF/PNOtiDiM.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/POfv/KLzp/btcF/itzrHoXN.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/POfv/KLzp/cvVCYhxN.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/POfv/KLzp/nqqdvauD.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/NQCY +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/NQCY/EtTR +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/POfv/NQCY/EtTR/WEwxmidz.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/POfv/NQCY/EtTR/qPYkbccP.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/POfv/NQCY/FjWFUbhU.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/POfv/NQCY/kynTWzzv.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/POfv/NQCY/tUDwIPMP.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/POfv/XtKSaiuJ.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/POfv/iaDFjGee.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/POfv/mdwT +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/POfv/mdwT/BBDbHkfl.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/POfv/mdwT/PLRSafGv.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/POfv/mdwT/QBguVHuw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/RdbQ/DFWrHCde.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/JGpd +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/RdbQ/JGpd/FRwxKDxR.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/RdbQ/JGpd/KxWJEhMk.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/UhGv +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/RdbQ/UhGv/IRCOMMBy.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/RdbQ/ZLvZQYqx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/absq +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/absq/AvZV +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/RdbQ/absq/AvZV/bPvQjucH.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/RdbQ/absq/AvZV/hNEvuEaf.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/RdbQ/absq/SxKCmmAC.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/RdbQ/absq/YltmgmGJ.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/RdbQ/absq/jjTLOtNg.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/bLSC +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/RdbQ/bLSC/vlUMpRaC.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/bcAj +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/RdbQ/bcAj/YHeLwylR.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/RdbQ/bcAj/gjuz +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/RdbQ/bcAj/gjuz/OlAQxxZn.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/RdbQ/yShZVhRx.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/CYIJMHTQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/GwqG +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/GwqG/LwRhfSYy.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/CzQS +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/USmD/XUAi/CzQS/ECyfvMPP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/CzQS/gXuZ +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/XUAi/CzQS/gXuZ/SMztWfWu.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/USmD/XUAi/CzQS/gXuZ/XyqGnwfe.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/CzQS/uzau +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/XUAi/CzQS/uzau/tQdbfTUz.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/XUAi/CzQS/uzau/xJpRStbn.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/eKGQ +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/XUAi/eKGQ/bvxMMcMf.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/XUAi/eKGQ/yzgjWMmk.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/fNJe +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/XUAi/fNJe/ALUQxkdU.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/XUAi/fNJe/jxxVZAdj.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/llRd +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/llRd/YSPZ +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/XUAi/llRd/YSPZ/TUXRRGxX.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/XUAi/llRd/fPvMvIlh.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/XUAi/llRd/lebTeotH.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/XUAi/llRd/vxhOIIuv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/llRd/xAUr +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/XUAi/llRd/xAUr/knVguTBT.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/XUAi/llRd/xAUr/uwxqvNka.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/XUAi/loRxYivv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/ulEW +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/XUAi/ulEW/SuVIOWyh.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/XUAi/ulEW/XkxHykGg.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/XUAi/ulEW/nfVdyMUw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/ulEW/xAew +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/XUAi/ulEW/xAew/cwtCecYJ.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/XUAi/ulEW/xAew/rEFHmxkm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/XUAi/yNha +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/XUAi/yNha/bOsbDHyJ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop/CXCA +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/ozop/CXCA/BzzozBSL.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/USmD/ozop/CXCA/ClmSOpKS.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/ozop/CXCA/LcBHApWy.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop/SgtE +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/ozop/SgtE/kPNoxogl.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop/SgtE/xwPG +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/USmD/ozop/SgtE/xwPG/aXMUqvZN.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/ozop/SgtE/xwPG/umvZCqer.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/ozop/WFdSxlTh.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop/ZMpw +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/ozop/ZMpw/jcCtZKws.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/ozop/ZMpw/tlFYaZCt.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/ozop/cPxsEwJV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/ozop/dbII +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/ozop/dbII/NtBmtMvW.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/ozop/dbII/eSVCosqx.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/ozop/dbII/zRYDeXZm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/uCqd +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/uCqd/WrTcNrGB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/uCqd/iDmi +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/USmD/uCqd/iDmi/ctTntudZ.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/uCqd/iDmi/ndkzpoGj.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/uCqd/sYyNwIRR.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/uCqd/wraW +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/uCqd/wraW/BJhtXoSu.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/uCqd/wraW/vXnzUKZb.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/BGrN +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/BGrN/IoUGwuUD.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/USmD/vRYX/BGrN/YtRvveQM.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/USmD/vRYX/BGrN/buUGPGfb.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/GaNe +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/GaNe/DXYDeayc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/GaNe/GqWx +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/vRYX/GaNe/GqWx/Teyvodjh.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/GaNe/IOjS +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/vRYX/GaNe/IOjS/lGEFiHiE.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/vRYX/GaNe/IOjS/tTMLUMaS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/GaNe/oqPr +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/vRYX/GaNe/oqPr/OKRWYOYc.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/GaNe/oqPr/nmEjhwDT.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/vRYX/GaNe/wrZMvSnM.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/USmD/vRYX/RRschtqY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/boCn +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/boCn/EOpa +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/vRYX/boCn/EOpa/NwzrILUw.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/vRYX/boCn/EOpa/ZjKmZctR.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/vRYX/boCn/EOpa/dEOipIDj.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/USmD/vRYX/boCn/IeKkCoAV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/boCn/VudF +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/boCn/VudF/AruRmTRk.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/USmD/vRYX/boCn/VudF/VQvUHRya.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/boCn/VudF/plDmzuSc.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/vRYX/boCn/tnTlTGiF.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/vRYX/boCn/xBcN +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/USmD/vRYX/boCn/xBcN/bOsKUOoc.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/vRYX/boCn/yEpXrJVa.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/zIcb +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/USmD/zIcb/WpAY +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/zIcb/WpAY/lHlOwyFi.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/USmD/zIcb/cFuoWFdJ.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/USmD/zIcb/uBiHbUJK.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/USmD/zIcb/xgcyYiiL.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/ViRVnhhf.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/WCGgYjGh.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/ceIx +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/ceIx/LaQSJcHt.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/ceIx/Sglf +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/ceIx/Sglf/EkLaRHDo.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/ceIx/dFGRwIAH.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/ceIx/pLVNhVWe.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/ceIx/zews +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/ceIx/zews/dRjoROCk.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/ceIx/zews/wVSOdwuw.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/fLXx/FfFT/DdihJHdA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/Pset +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/Pset/ADAg +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/FfFT/Pset/ADAg/DRmLclJC.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/FfFT/Pset/ADAg/LXUOVICl.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/fLXx/FfFT/Pset/ADAg/XDAoDxZR.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/fLXx/FfFT/Pset/rrlgLQDL.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/UYUm +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/UYUm/BXHK +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/fLXx/FfFT/UYUm/BXHK/oIYsrINY.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/UYUm/DJch +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/FfFT/UYUm/DJch/FNGWtnKI.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/FfFT/UYUm/DJch/RWdAlyEc.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/UYUm/YMky +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/fLXx/FfFT/UYUm/YMky/XlXjGKoT.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/FfFT/UYUm/ldbqxWdp.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/fLXx/FfFT/aXhwCehn.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/fLXx/FfFT/dlTzXRDP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/FfFT/iboZ +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/FfFT/iboZ/GDEZqkvO.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/fLXx/HsMLxCMl.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/IWnb +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/IWnb/JlupqKdX.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/IWnb/dMoDDgkQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/KseZ +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/KseZ/WPca +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/fLXx/KseZ/WPca/HOtTPTDC.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/fLXx/KseZ/odiFVaUP.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/CKam +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/CKam/BGjm +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/fLXx/dVHb/CKam/BGjm/HWqAptzI.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/dVHb/CKam/BGjm/JpfAaIcT.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/dVHb/CKam/JIVLfOiO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/CKam/YWmE +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/fLXx/dVHb/CKam/YWmE/FjPJNgfr.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/dVHb/CKam/YWmE/TqlCbTcz.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/CKam/ppeR +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/dVHb/CKam/ppeR/UmKObOvw.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/fLXx/dVHb/CKam/ppeR/eUyYgIRh.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/dVHb/CKam/rjdxIAok.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/dVHb/CKam/yrSbdSFl.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/GPpQ +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/GPpQ/MVul +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/fLXx/dVHb/GPpQ/MVul/KrhCsTmh.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/fLXx/dVHb/GPpQ/MVul/hfKKPzuD.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/GPpQ/aFyT +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/dVHb/GPpQ/aFyT/FvXdEuCl.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/dVHb/GPpQ/crlNFfms.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/GPpQ/pyIb +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/dVHb/GPpQ/pyIb/Iwtfunmc.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/fLXx/dVHb/GPpQ/pyIb/unfvnyEf.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/JXAl +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/fLXx/dVHb/JXAl/CNWlEmmk.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/fLXx/dVHb/JXAl/CiJaaDVr.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/NngI +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/dVHb/NngI/EWyiUwSJ.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/fLXx/dVHb/NngI/cGnduzmA.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/dVHb/NngI/iTvVIdiE.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/jxyr +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/dVHb/jxyr/NIwGVQpZ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/jxyr/QvvI +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/fLXx/dVHb/jxyr/QvvI/scAdGHep.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/dVHb/jxyr/TLsn +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/dVHb/jxyr/TLsn/LuZVIJco.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/fLXx/dVHb/jxyr/TLsn/VnTgENac.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/fLXx/dVHb/vmuEpVPC.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/fLXx/dVHb/vxFhUwiR.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/kSpx +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/kSpx/AdiK +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/kSpx/AdiK/GgxIEAoN.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/kSpx/AdiK/hZHtLEkK.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/kSpx/CDIkzrjN.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/fLXx/kSpx/ZPdzQyJH.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/kSpx/ipbz +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/fLXx/kSpx/ipbz/QrrWumws.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/kSpx/zzMD +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/fLXx/kSpx/zzMD/VRRvrRUJ.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/fLXx/ovnEwMrg.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/fLXx/tHyCNhag.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/fLXx/wwXc +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/wwXc/ErxxemRI.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/fLXx/wwXc/MBpSxmbC.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/kOBq/DjjNWBVS.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/YHOK +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/kOBq/YHOK/ClMGEJcO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/YHOK/JFPN +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/kOBq/YHOK/JFPN/CXHkniPg.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/kOBq/YHOK/JFPN/wyDztIYn.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/kOBq/YHOK/OOGnEThQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/cDyu +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/kOBq/cDyu/DcJnYVol.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/kOBq/cDyu/ZHnoEmiQ.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/kOBq/cDyu/qrzOfmei.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/eZfF +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/kOBq/eZfF/FlQyDdMp.bin +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/kOBq/eZfF/JZGqOprZ.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/eZfF/TzjRcpze.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/eZfF/ZkqU +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/eZfF/ZkqU/lhbmRZqV.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/oGUp +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/kOBq/oGUp/xDbYyYKq.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/kOBq/qoOh/DIiwKncu.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/JYOh +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/qoOh/JYOh/aGwzkhgO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/YmjG +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/qoOh/YmjG/ZDhBFhAC.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/qoOh/YmjG/abwzVKhE.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/YmjG/joWP +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/kOBq/qoOh/YmjG/joWP/HBzOXCgv.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/qoOh/YmjG/joWP/XOlAZqGM.bin +1980-01-01 00:00:00 ..... 4 4 study_data/ubAC/kOBq/qoOh/YmjG/joWP/oKOMIiZO.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/YmjG/kOXv +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/kOBq/qoOh/YmjG/kOXv/rIUqcFjB.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/kOBq/qoOh/YmjG/xrfOEKHm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/rjRb +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/kOBq/qoOh/rjRb/ANoPqbto.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/qoOh/rjRb/CFVRAMHg.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/kOBq/qoOh/rjRb/qhaXWWfe.bin +1980-01-01 00:00:00 ..... 7 7 study_data/ubAC/kOBq/qoOh/uCNvXcNG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/vgxj +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/kOBq/qoOh/vgxj/CHeyRvYA.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/vgxj/DKsK +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/kOBq/qoOh/vgxj/DKsK/GyfDoGVz.bin +1980-01-01 00:00:00 ..... 3 3 study_data/ubAC/kOBq/qoOh/vgxj/DKsK/wYcLhbeH.bin +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/kOBq/qoOh/vgxj/WrKNtSDQ.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/qoOh/vgxj/qwcJ +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/kOBq/qoOh/vgxj/qwcJ/TpRTQvlG.bin +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/kOBq/qoOh/vjxEPbkr.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/kOBq/tuDhdTPG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/vpzD +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/kOBq/vpzD/lOvizAnH.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/zQHX +1980-01-01 00:00:00 ..... 9 9 study_data/ubAC/kOBq/zQHX/BsiELuck.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/zQHX/RtNEETKG.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/kOBq/zQHX/beZa +1980-01-01 00:00:00 ..... 5 5 study_data/ubAC/kOBq/zQHX/beZa/FFNfhEyY.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/kOBq/zQHX/beZa/NXyvddMj.bin +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/kOBq/zQHX/beZa/jACFjCUV.bin +1980-01-01 00:00:00 ..... 10 10 study_data/ubAC/lehSaCiB.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/qHWo +1980-01-01 00:00:00 ..... 2 2 study_data/ubAC/qHWo/CbEoHmnm.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/qHWo/OenX +1980-01-01 00:00:00 ..... 8 8 study_data/ubAC/qHWo/OenX/YWXonYoC.bin +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/qHWo/ysQCnyrv.bin +1980-01-01 00:00:00 D.... 0 0 study_data/ubAC/qTJi +1980-01-01 00:00:00 ..... 1 1 study_data/ubAC/qTJi/cVJuZYms.bin +1980-01-01 00:00:00 ..... 6 6 study_data/ubAC/tmyQpgmN.bin +------------------- ----- ------------ ------------ ------------------------ +1980-01-01 00:00:00 3622 3622 674 files, 335 folders diff --git a/packages/service-library/tests/deferred_tasks/conftest.py b/packages/service-library/tests/deferred_tasks/conftest.py new file mode 100644 index 00000000000..00881e61471 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/conftest.py @@ -0,0 +1,18 @@ +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager + +import pytest +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase + + +@pytest.fixture +async def redis_client_sdk_deferred_tasks( + get_redis_client_sdk: Callable[ + [RedisDatabase, bool], AbstractAsyncContextManager[RedisClientSDK] + ] +) -> AsyncIterator[RedisClientSDK]: + async with get_redis_client_sdk( + RedisDatabase.DEFERRED_TASKS, decode_response=False + ) as client: + yield client diff --git a/packages/service-library/tests/deferred_tasks/example_app.py b/packages/service-library/tests/deferred_tasks/example_app.py new file mode 100644 index 00000000000..9adb654e896 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/example_app.py @@ -0,0 +1,194 @@ +import asyncio +import json +import logging +import os +from dataclasses import dataclass +from datetime import timedelta +from typing import Any +from uuid import uuid4 + +from pydantic import NonNegativeInt +from redis.asyncio import Redis +from servicelib.deferred_tasks import ( + BaseDeferredHandler, + DeferredContext, + DeferredManager, + StartContext, + TaskUID, +) +from servicelib.redis import RedisClientSDK +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisDatabase, RedisSettings + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + +_logger = logging.getLogger(__name__) + + +class ExampleDeferredHandler(BaseDeferredHandler[str]): + @classmethod + async def get_timeout(cls, context: DeferredContext) -> timedelta: + return timedelta(seconds=60) + + @classmethod + async def start(cls, sleep_duration: float, sequence_id: int) -> StartContext: + return {"sleep_duration": sleep_duration, "sequence_id": sequence_id} + + @classmethod + async def on_created(cls, task_uid: TaskUID, context: DeferredContext) -> None: + in_memory_lists: InMemoryLists = context["in_memory_lists"] + await in_memory_lists.append_to("scheduled", task_uid) + + @classmethod + async def run(cls, context: DeferredContext) -> str: + sleep_duration: float = context["sleep_duration"] + await asyncio.sleep(sleep_duration) + return context["sequence_id"] + + @classmethod + async def on_result(cls, result: str, context: DeferredContext) -> None: + in_memory_lists: InMemoryLists = context["in_memory_lists"] + await in_memory_lists.append_to("results", result) + + +class InMemoryLists: + def __init__(self, redis_settings: RedisSettings, port: int) -> None: + # NOTE: RedisClientSDK is not required here but it's used to easily construct + # a redis connection + self.redis: Redis = RedisClientSDK( + redis_settings.build_redis_dsn(RedisDatabase.DEFERRED_TASKS), + decode_responses=True, + client_name="example_app", + ).redis + self.port = port + + def _get_queue_name(self, queue_name: str) -> str: + return f"in_memory_lists::{queue_name}.{self.port}" + + async def append_to(self, queue_name: str, value: Any) -> None: + await self.redis.rpush(self._get_queue_name(queue_name), value) # type: ignore + + async def get_all_from(self, queue_name: str) -> list: + return await self.redis.lrange(self._get_queue_name(queue_name), 0, -1) # type: ignore + + +class ExampleApp: + def __init__( + self, + rabbit_settings: RabbitSettings, + redis_settings: RedisSettings, + in_memory_lists: InMemoryLists, + max_workers: NonNegativeInt, + ) -> None: + self._redis_client = RedisClientSDK( + redis_settings.build_redis_dsn(RedisDatabase.DEFERRED_TASKS), + decode_responses=False, + client_name="example_app", + ) + self._manager = DeferredManager( + rabbit_settings, + self._redis_client, + globals_context={"in_memory_lists": in_memory_lists}, + max_workers=max_workers, + ) + + async def setup(self) -> None: + await self._manager.setup() + + +@dataclass +class Context: + redis_settings: RedisSettings | None = None + rabbit_settings: RabbitSettings | None = None + example_app: ExampleApp | None = None + in_memory_lists: InMemoryLists | None = None + + +async def _commands_handler( + context: Context, command: str, payload: dict[str, Any], port: int +) -> Any: + """Handles all commands send by remote party""" + if command == "init-context": + context.redis_settings = RedisSettings.model_validate_json(payload["redis"]) + context.rabbit_settings = RabbitSettings.model_validate_json(payload["rabbit"]) + # using the same db as the deferred tasks with different keys + context.in_memory_lists = InMemoryLists(context.redis_settings, port) + + context.example_app = ExampleApp( + context.rabbit_settings, + context.redis_settings, + context.in_memory_lists, + payload["max-workers"], + ) + await context.example_app.setup() + + _logger.info("Initialized context %s", context) + + return None + + if command == "start": + await ExampleDeferredHandler.start(**payload) + return None + + if command == "get-scheduled": + assert context.in_memory_lists + return await context.in_memory_lists.get_all_from("scheduled") + + if command == "get-results": + assert context.in_memory_lists + return await context.in_memory_lists.get_all_from("results") + + return None + + +class AsyncTCPServer: + def __init__( + self, port: int, host: str = "127.0.0.1", read_chunk_size: int = 10000 + ) -> None: + self.host = host + self.port = port + self.read_chunk_size = read_chunk_size + + self._scheduled: list = [] + self._results: list = [] + + self._context = Context() + + async def _handle_request(self, command: Any) -> Any: + unique_request_id = uuid4() + _logger.info("[%s] request: %s", unique_request_id, command) + response = await _commands_handler( + self._context, command["command"], command["payload"], self.port + ) + _logger.info("[%s] response: %s", unique_request_id, response) + return response + + async def _handle_client(self, reader, writer): + while True: + data = await reader.read(self.read_chunk_size) + if not data: + break + response = await self._handle_request(json.loads(data.decode())) + writer.write(json.dumps(response).encode()) + await writer.drain() + + _logger.info("Client disconnected.") + writer.close() + + async def run(self): + tcp_server = await asyncio.start_server( + self._handle_client, self.host, self.port + ) + addr = tcp_server.sockets[0].getsockname() + _logger.info("Serving on %s", addr) + + async with tcp_server: + await tcp_server.serve_forever() + + +if __name__ == "__main__": + listen_port: int = int(os.environ.get("LISTEN_PORT", -1)) + assert listen_port != -1 + asyncio.run(AsyncTCPServer(port=listen_port).run()) diff --git a/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py b/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py new file mode 100644 index 00000000000..cc19133b6b2 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/test__base_deferred_handler.py @@ -0,0 +1,474 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +import logging +from collections.abc import AsyncIterable, Awaitable, Callable, Iterable +from datetime import timedelta +from enum import auto +from typing import Any +from unittest.mock import Mock + +import pytest +from models_library.utils.enums import StrAutoEnum +from pydantic import NonNegativeInt +from servicelib.deferred_tasks._base_deferred_handler import ( + BaseDeferredHandler, + DeferredContext, + StartContext, +) +from servicelib.deferred_tasks._deferred_manager import ( + DeferredManager, + _FastStreamRabbitQueue, + _get_queue_from_state, +) +from servicelib.deferred_tasks._models import TaskResultError, TaskUID +from servicelib.deferred_tasks._task_schedule import TaskState +from servicelib.redis import RedisClientSDK +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisDatabase, RedisSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", + "redis", +] + + +class MockKeys(StrAutoEnum): + GET_RETRIES = auto() + GET_TIMEOUT = auto() + START_DEFERRED = auto() + ON_DEFERRED_CREATED = auto() + RUN_DEFERRED = auto() + ON_DEFERRED_RESULT = auto() + ON_FINISHED_WITH_ERROR = auto() + + +@pytest.fixture +async def redis_client_sdk( + redis_service: RedisSettings, +) -> AsyncIterable[RedisClientSDK]: + sdk = RedisClientSDK( + redis_service.build_redis_dsn(RedisDatabase.DEFERRED_TASKS), + decode_responses=False, + client_name="pytest", + ) + yield sdk + await sdk.shutdown() + + +@pytest.fixture +def mocked_deferred_globals() -> dict[str, Any]: + return {f"global_{i}": Mock for i in range(5)} + + +@pytest.fixture +async def deferred_manager( + rabbit_service: RabbitSettings, + redis_client_sdk: RedisClientSDK, + mocked_deferred_globals: dict[str, Any], +) -> AsyncIterable[DeferredManager]: + manager = DeferredManager( + rabbit_service, + redis_client_sdk, + globals_context=mocked_deferred_globals, + max_workers=10, + ) + + await manager.setup() + yield manager + await manager.shutdown() + + +@pytest.fixture +async def get_mocked_deferred_handler( + deferred_manager: DeferredManager, +) -> Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], +]: + def _( + retry_count: int, + timeout: timedelta, + run: Callable[[DeferredContext], Awaitable[Any]], + ) -> tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]]: + mocks: dict[MockKeys, Mock] = {k: Mock() for k in MockKeys} + + class ObservableDeferredHandler(BaseDeferredHandler[Any]): + @classmethod + async def get_retries(cls, context: DeferredContext) -> int: + mocks[MockKeys.GET_RETRIES](retry_count, context) + return retry_count + + @classmethod + async def get_timeout(cls, context: DeferredContext) -> timedelta: + mocks[MockKeys.GET_TIMEOUT](timeout, context) + return timeout + + @classmethod + async def start(cls, **kwargs) -> StartContext: + mocks[MockKeys.START_DEFERRED](kwargs) + return kwargs + + @classmethod + async def on_created( + cls, task_uid: TaskUID, context: DeferredContext + ) -> None: + mocks[MockKeys.ON_DEFERRED_CREATED](task_uid, context) + + @classmethod + async def run(cls, context: DeferredContext) -> Any: + result = await run(context) + mocks[MockKeys.RUN_DEFERRED](context) + return result + + @classmethod + async def on_result(cls, result: Any, context: DeferredContext) -> None: + mocks[MockKeys.ON_DEFERRED_RESULT](result, context) + + @classmethod + async def on_finished_with_error( + cls, error: TaskResultError, context: DeferredContext + ) -> None: + mocks[MockKeys.ON_FINISHED_WITH_ERROR](error, context) + + deferred_manager.patch_based_deferred_handlers() + + return mocks, ObservableDeferredHandler + + return _ + + +@pytest.fixture() +def caplog_debug_level( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: + with caplog.at_level(logging.DEBUG): + yield caplog + + +async def _assert_mock_call( + mocks: dict[MockKeys, Mock], + *, + key: MockKeys, + count: NonNegativeInt, + timeout: float = 5, +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(timeout), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert len(mocks[key].call_args_list) == count + + +async def _assert_log_message( + caplog: pytest.LogCaptureFixture, + *, + message: str, + count: NonNegativeInt, + timeout: float = 5, +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(timeout), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert caplog.text.count(message) == count + + +async def test_rabbit_resources_are_prefixed_with_instancing_module_name( + deferred_manager: DeferredManager, +): + # pylint:disable=protected-access + assert deferred_manager._global_resources_prefix == __name__ # noqa: SLF001 + + +@pytest.mark.parametrize( + "run_return", [{}, None, 1, 1.34, [], [12, 35, 7, "str", 455.66]] +) +async def test_deferred_manager_result_ok( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], + mocked_deferred_globals: dict[str, Any], + run_return: Any, +): + async def _run_ok(_: DeferredContext) -> Any: + return run_return + + retry_count = 1 + timeout = timedelta(seconds=1) + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + retry_count, timeout, _run_ok + ) + + start_kwargs = {f"start_with{i}": f"par-{i}" for i in range(6)} + await mocked_deferred_handler.start(**start_kwargs) + + context = {**mocked_deferred_globals, **start_kwargs} + + await _assert_mock_call(mocks, key=MockKeys.GET_RETRIES, count=1) + mocks[MockKeys.GET_RETRIES].assert_called_with(retry_count, context) + + await _assert_mock_call(mocks, key=MockKeys.GET_TIMEOUT, count=1) + mocks[MockKeys.GET_TIMEOUT].assert_called_with(timeout, context) + + await _assert_mock_call(mocks, key=MockKeys.START_DEFERRED, count=1) + mocks[MockKeys.START_DEFERRED].assert_called_with(start_kwargs) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1) + assert TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0]) + + await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=1) + mocks[MockKeys.RUN_DEFERRED].assert_called_once_with(context) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=1) + mocks[MockKeys.ON_DEFERRED_RESULT].assert_called_once_with(run_return, context) + + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=0) + + +@pytest.mark.parametrize("retry_count", [1, 5]) +async def test_deferred_manager_raised_error( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], + mocked_deferred_globals: dict[str, Any], + caplog_debug_level: pytest.LogCaptureFixture, + retry_count: int, +): + caplog_debug_level.clear() + + expected_error_message = ( + "This is an expected error that was raised and should be found in the logs" + ) + + async def _run_raises(_: DeferredContext) -> None: + raise RuntimeError(expected_error_message) + + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + retry_count, timedelta(seconds=1), _run_raises + ) + + await mocked_deferred_handler.start() + + await _assert_mock_call(mocks, key=MockKeys.START_DEFERRED, count=1) + mocks[MockKeys.START_DEFERRED].assert_called_once_with({}) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1) + task_uid = TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0]) + + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=1) + result, received_globals = ( + mocks[MockKeys.ON_FINISHED_WITH_ERROR].call_args_list[0].args + ) + assert isinstance(result, TaskResultError) + assert mocked_deferred_globals == received_globals + if retry_count > 1: + await _assert_log_message( + caplog_debug_level, + message=f"Schedule retry attempt for task_uid '{task_uid}'", + count=retry_count, + ) + + await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0) + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0) + + await _assert_log_message( + caplog_debug_level, + message=f"Finished task_uid '{task_uid}' with error", + count=1, + ) + assert expected_error_message in caplog_debug_level.text + + +@pytest.mark.parametrize("retry_count", [1, 5]) +async def test_deferred_manager_cancelled( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], + caplog_debug_level: pytest.LogCaptureFixture, + retry_count: int, +): + caplog_debug_level.clear() + + async def _run_to_cancel(_: DeferredContext) -> None: + await asyncio.sleep(1e6) + + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + retry_count, timedelta(seconds=10), _run_to_cancel + ) + + await mocked_deferred_handler.start() + + await _assert_mock_call(mocks, key=MockKeys.START_DEFERRED, count=1) + mocks[MockKeys.START_DEFERRED].assert_called_once_with({}) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1) + task_uid = TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0]) + + await mocked_deferred_handler.cancel(task_uid) + + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=0) + + assert ( + caplog_debug_level.text.count( + f"Schedule retry attempt for task_uid '{task_uid}'" + ) + == 0 + ) + + await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0) + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0) + + await _assert_log_message( + caplog_debug_level, + message=f"Found and cancelled run for '{task_uid}'", + count=1, + ) + + +@pytest.mark.parametrize("fail", [True, False]) +async def test_deferred_manager_task_is_present( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], + fail: bool, +): + total_wait_time = 0.5 + + async def _run_for_short_period(context: DeferredContext) -> None: + await asyncio.sleep(total_wait_time) + if context["fail"]: + msg = "Failing at the end of sleeping as requested" + raise RuntimeError(msg) + + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + 0, timedelta(seconds=10), _run_for_short_period + ) + + await mocked_deferred_handler.start(fail=fail) + + await _assert_mock_call(mocks, key=MockKeys.START_DEFERRED, count=1) + mocks[MockKeys.START_DEFERRED].assert_called_once_with({"fail": fail}) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1) + task_uid = TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0]) + + assert await mocked_deferred_handler.is_present(task_uid) is True + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(total_wait_time * 2), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert await mocked_deferred_handler.is_present(task_uid) is False + + if fail: + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=1) + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0) + else: + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=1) + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=0) + + +@pytest.mark.parametrize("tasks_to_start", [100]) +async def test_deferred_manager_start_parallelized( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], + mocked_deferred_globals: dict[str, Any], + caplog_debug_level: pytest.LogCaptureFixture, + tasks_to_start: NonNegativeInt, +): + caplog_debug_level.clear() + + async def _run_ok(_: DeferredContext) -> None: + await asyncio.sleep(0.1) + + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + 3, timedelta(seconds=1), _run_ok + ) + + await asyncio.gather( + *[mocked_deferred_handler.start() for _ in range(tasks_to_start)] + ) + + await _assert_mock_call( + mocks, key=MockKeys.ON_DEFERRED_RESULT, count=tasks_to_start, timeout=10 + ) + for entry in mocks[MockKeys.ON_DEFERRED_RESULT].call_args_list: + assert entry.args == (None, mocked_deferred_globals) + + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=0) + await _assert_log_message( + caplog_debug_level, message="Schedule retry attempt for task_uid ", count=0 + ) + await _assert_log_message( + caplog_debug_level, message="Found and cancelled run for '", count=0 + ) + + +async def test_deferred_manager_code_times_out( + get_mocked_deferred_handler: Callable[ + [int, timedelta, Callable[[DeferredContext], Awaitable[Any]]], + tuple[dict[MockKeys, Mock], type[BaseDeferredHandler]], + ], +): + async def _run_that_times_out(_: DeferredContext) -> None: + await asyncio.sleep(1e6) + + mocks, mocked_deferred_handler = get_mocked_deferred_handler( + 1, timedelta(seconds=0.5), _run_that_times_out + ) + + await mocked_deferred_handler.start() + + await _assert_mock_call(mocks, key=MockKeys.START_DEFERRED, count=1) + mocks[MockKeys.START_DEFERRED].assert_called_once_with({}) + + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_CREATED, count=1) + assert TaskUID(mocks[MockKeys.ON_DEFERRED_CREATED].call_args_list[0].args[0]) + + await _assert_mock_call(mocks, key=MockKeys.ON_FINISHED_WITH_ERROR, count=1) + for entry in mocks[MockKeys.ON_FINISHED_WITH_ERROR].call_args_list: + assert "builtins.TimeoutError" in entry.args[0].error + + await _assert_mock_call(mocks, key=MockKeys.RUN_DEFERRED, count=0) + await _assert_mock_call(mocks, key=MockKeys.ON_DEFERRED_RESULT, count=0) + + +def test_enums_have_same_entries(): + assert len(TaskState) == len(_FastStreamRabbitQueue) + + +@pytest.mark.parametrize( + "state, queue", + [ + (TaskState.SCHEDULED, _FastStreamRabbitQueue.SCHEDULED), + (TaskState.SUBMIT_TASK, _FastStreamRabbitQueue.SUBMIT_TASK), + (TaskState.WORKER, _FastStreamRabbitQueue.WORKER), + (TaskState.ERROR_RESULT, _FastStreamRabbitQueue.ERROR_RESULT), + (TaskState.DEFERRED_RESULT, _FastStreamRabbitQueue.DEFERRED_RESULT), + (TaskState.FINISHED_WITH_ERROR, _FastStreamRabbitQueue.FINISHED_WITH_ERROR), + (TaskState.MANUALLY_CANCELLED, _FastStreamRabbitQueue.MANUALLY_CANCELLED), + ], +) +def test__get_queue_from_state(state: TaskState, queue: _FastStreamRabbitQueue): + assert _get_queue_from_state(state) == queue diff --git a/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py b/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py new file mode 100644 index 00000000000..366759e22d3 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/test__redis_task_tracker.py @@ -0,0 +1,67 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +from datetime import timedelta + +import pytest +from pydantic import TypeAdapter +from servicelib.deferred_tasks._models import TaskUID +from servicelib.deferred_tasks._redis_task_tracker import RedisTaskTracker +from servicelib.deferred_tasks._task_schedule import TaskScheduleModel, TaskState +from servicelib.redis import RedisClientSDK +from servicelib.utils import logged_gather + +pytest_simcore_core_services_selection = [ + "redis", +] + + +@pytest.fixture +def task_schedule() -> TaskScheduleModel: + return TypeAdapter(TaskScheduleModel).validate_python( + { + "timeout": timedelta(seconds=1), + "execution_attempts": 1, + "class_unique_reference": "mock", + "start_context": {}, + "state": TaskState.SCHEDULED, + "result": None, + }, + ) + + +async def test_task_tracker_workflow( + redis_client_sdk_deferred_tasks: RedisClientSDK, + task_schedule: TaskScheduleModel, +): + task_tracker = RedisTaskTracker(redis_client_sdk_deferred_tasks) + + task_uid: TaskUID = await task_tracker.get_new_unique_identifier() + + assert await task_tracker.get(task_uid) is None + + await task_tracker.save(task_uid, task_schedule) + assert await task_tracker.get(task_uid) == task_schedule + + await task_tracker.remove(task_uid) + assert await task_tracker.get(task_uid) is None + + +@pytest.mark.parametrize("count", [0, 1, 10, 100]) +async def test_task_tracker_list_all_entries( + redis_client_sdk_deferred_tasks: RedisClientSDK, + task_schedule: TaskScheduleModel, + count: int, +): + task_tracker = RedisTaskTracker(redis_client_sdk_deferred_tasks) + + async def _make_entry() -> None: + task_uid = await task_tracker.get_new_unique_identifier() + await task_tracker.save(task_uid, task_schedule) + + await logged_gather(*(_make_entry() for _ in range(count))) + + entries = await task_tracker.all() + assert len(entries) == count + assert entries == [task_schedule for _ in range(count)] diff --git a/packages/service-library/tests/deferred_tasks/test__utils.py b/packages/service-library/tests/deferred_tasks/test__utils.py new file mode 100644 index 00000000000..db3f32554b3 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/test__utils.py @@ -0,0 +1,277 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +import operator +import time +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager, asynccontextmanager +from unittest.mock import Mock + +import pytest +from faststream.broker.wrapper.call import HandlerCallWrapper +from faststream.exceptions import NackMessage, RejectMessage +from faststream.rabbit import ( + ExchangeType, + RabbitBroker, + RabbitExchange, + RabbitRouter, + TestRabbitBroker, +) +from pydantic import NonNegativeInt +from servicelib.deferred_tasks._utils import stop_retry_for_unintended_errors +from settings_library.rabbit import RabbitSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def rabbit_router() -> RabbitRouter: + return RabbitRouter() + + +@pytest.fixture +def rabbit_broker(rabbit_service: RabbitSettings) -> RabbitBroker: + return RabbitBroker(rabbit_service.dsn) + + +@pytest.fixture +async def get_test_broker( + rabbit_broker: RabbitBroker, rabbit_router: RabbitRouter +) -> Callable[[], AbstractAsyncContextManager[RabbitBroker]]: + @asynccontextmanager + async def _() -> AsyncIterator[RabbitBroker]: + rabbit_broker.include_router(rabbit_router) + + async with TestRabbitBroker(rabbit_broker, with_real=True) as test_broker: + yield test_broker + + return _ + + +@pytest.fixture +def rabbit_exchange() -> RabbitExchange: + return RabbitExchange("test_exchange") + + +async def _assert_call_count( + handler: HandlerCallWrapper, + *, + expected_count: NonNegativeInt, + operation: Callable = operator.eq +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(5), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert handler.mock + count = len(handler.mock.call_args_list) + assert operation(count, expected_count) + + +async def test_handler_called_as_expected( + rabbit_broker: RabbitBroker, + rabbit_exchange: RabbitExchange, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + @rabbit_broker.subscriber(queue="print_message_no_deco", exchange=rabbit_exchange) + async def print_message_no_deco(some_value: int) -> None: + print(some_value) + + @rabbit_broker.subscriber(queue="print_message_with_deco", exchange=rabbit_exchange) + @stop_retry_for_unintended_errors + async def print_message_with_deco(some_value: int) -> None: + print(some_value) + + async with get_test_broker() as test_broker: + await test_broker.publish( + 24, queue="print_message_no_deco", exchange=rabbit_exchange + ) + await _assert_call_count(print_message_no_deco, expected_count=1) + + await test_broker.publish( + 42, queue="print_message_with_deco", exchange=rabbit_exchange + ) + await _assert_call_count(print_message_with_deco, expected_count=1) + + +async def test_handler_nacks_message( + rabbit_broker: RabbitBroker, + rabbit_exchange: RabbitExchange, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + @rabbit_broker.subscriber( + queue="nacked_message_no_deco", exchange=rabbit_exchange, retry=True + ) + async def nacked_message_no_deco(msg: str) -> None: + raise NackMessage + + @rabbit_broker.subscriber( + queue="nacked_message_with_deco", exchange=rabbit_exchange, retry=True + ) + @stop_retry_for_unintended_errors + async def nacked_message_with_deco(msg: str) -> None: + raise NackMessage + + async with get_test_broker() as test_broker: + await test_broker.publish( + "", queue="nacked_message_no_deco", exchange=rabbit_exchange + ) + await _assert_call_count( + nacked_message_no_deco, expected_count=10, operation=operator.gt + ) + + await test_broker.publish( + "", queue="nacked_message_with_deco", exchange=rabbit_exchange + ) + await _assert_call_count( + nacked_message_with_deco, expected_count=10, operation=operator.gt + ) + + +async def test_handler_rejects_message( + rabbit_broker: RabbitBroker, + rabbit_exchange: RabbitExchange, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + @rabbit_broker.subscriber( + queue="rejected_message_no_deco", exchange=rabbit_exchange, retry=True + ) + @stop_retry_for_unintended_errors + async def rejected_message_no_deco(msg: str) -> None: + raise RejectMessage + + @rabbit_broker.subscriber( + queue="rejected_message_with_deco", exchange=rabbit_exchange, retry=True + ) + @stop_retry_for_unintended_errors + async def rejected_message_with_deco(msg: str) -> None: + raise RejectMessage + + async with get_test_broker() as test_broker: + await test_broker.publish( + "", queue="rejected_message_no_deco", exchange=rabbit_exchange + ) + await _assert_call_count(rejected_message_no_deco, expected_count=1) + + await test_broker.publish( + "", queue="rejected_message_with_deco", exchange=rabbit_exchange + ) + await _assert_call_count(rejected_message_with_deco, expected_count=1) + + +async def test_handler_unintended_error( + rabbit_broker: RabbitBroker, + rabbit_exchange: RabbitExchange, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + @rabbit_broker.subscriber( + queue="unintended_error_no_deco", exchange=rabbit_exchange, retry=True + ) + async def unintended_error_no_deco(msg: str) -> None: + msg = "this was an unexpected error" + raise RuntimeError(msg) + + @rabbit_broker.subscriber( + queue="unintended_error_with_deco", exchange=rabbit_exchange, retry=True + ) + @stop_retry_for_unintended_errors + async def unintended_error_with_deco(msg: str) -> None: + msg = "this was an unexpected error" + raise RuntimeError(msg) + + async with get_test_broker() as test_broker: + await test_broker.publish( + "", queue="unintended_error_no_deco", exchange=rabbit_exchange + ) + await _assert_call_count( + unintended_error_no_deco, expected_count=10, operation=operator.gt + ) + + await test_broker.publish( + "", queue="unintended_error_with_deco", exchange=rabbit_exchange + ) + await _assert_call_count(unintended_error_with_deco, expected_count=1) + + +async def test_handler_parallelism( + rabbit_broker: RabbitBroker, + rabbit_exchange: RabbitExchange, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + done_mock = Mock() + + @rabbit_broker.subscriber(queue="sleeper", exchange=rabbit_exchange, retry=True) + async def handler_sleeper(sleep_duration: float) -> None: + await asyncio.sleep(sleep_duration) + done_mock() + + async def _sleep_for(test_broker: RabbitBroker, *, duration: float) -> None: + await test_broker.publish(duration, queue="sleeper", exchange=rabbit_exchange) + + async def _wait_for_calls(mock: Mock, *, expected_calls: int) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(5), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert len(mock.call_args_list) == expected_calls + + request_count = 100 + sleep_duration = 0.1 + async with get_test_broker() as test_broker: + start_time = time.time() + + await asyncio.gather( + *[ + _sleep_for(test_broker, duration=sleep_duration) + for _ in range(request_count) + ] + ) + + await _wait_for_calls(done_mock, expected_calls=request_count) + + elapsed = time.time() - start_time + + # ensure the run in parallel by checking that they finish in a fraction of th total duration + assert elapsed <= (sleep_duration * request_count) * 0.15 + + +async def test_fan_out_exchange_message_delivery( + rabbit_broker: RabbitBroker, + get_test_broker: Callable[[], AbstractAsyncContextManager[RabbitBroker]], +): + + handler_1_call_count = Mock() + handler_2_call_count = Mock() + + fan_out_exchange = RabbitExchange("test_fan_out_exchange", type=ExchangeType.FANOUT) + + @rabbit_broker.subscriber(queue="handler_1", exchange=fan_out_exchange, retry=True) + async def handler_1(sleep_duration: float) -> None: + assert sleep_duration == 0.1 + handler_1_call_count(sleep_duration) + + @rabbit_broker.subscriber(queue="handler_2", exchange=fan_out_exchange, retry=True) + async def handler_2(sleep_duration: float) -> None: + assert sleep_duration == 0.1 + handler_2_call_count(sleep_duration) + + async with get_test_broker() as test_broker: + await test_broker.publish(0.1, exchange=fan_out_exchange) + + await asyncio.sleep(1) + + assert len(handler_1_call_count.call_args_list) == 1 + assert len(handler_2_call_count.call_args_list) == 1 diff --git a/packages/service-library/tests/deferred_tasks/test__worker_tracker.py b/packages/service-library/tests/deferred_tasks/test__worker_tracker.py new file mode 100644 index 00000000000..f5d54b36a0f --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/test__worker_tracker.py @@ -0,0 +1,173 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + +import asyncio +from collections.abc import Awaitable, Callable +from datetime import timedelta +from typing import Any +from unittest.mock import AsyncMock + +import pytest +from faker import Faker +from servicelib.deferred_tasks._base_deferred_handler import DeferredContext +from servicelib.deferred_tasks._deferred_manager import ( + _DEFAULT_DEFERRED_MANAGER_WORKER_SLOTS, +) +from servicelib.deferred_tasks._models import ( + TaskResultCancelledError, + TaskResultError, + TaskResultSuccess, + TaskUID, +) +from servicelib.deferred_tasks._worker_tracker import WorkerTracker + + +async def _worker(tracker: WorkerTracker) -> None: + async with tracker: + await asyncio.sleep(1) + + +async def test_worker_tracker_full(): + tracker_size = 2 + tracker = WorkerTracker(tracker_size) + + tasks = [asyncio.create_task(_worker(tracker)) for _ in range(tracker_size)] + # context switch to allow tasks to be picked up + await asyncio.sleep(0.01) + + assert tracker.has_free_slots() is False + await asyncio.gather(*tasks) + assert tracker.has_free_slots() is True + + +async def test_worker_tracker_filling_up_gradually(): + tracker_size = 10 + tracker = WorkerTracker(tracker_size) + + tasks = [] + for _ in range(tracker_size): + assert tracker.has_free_slots() is True + + tasks.append(asyncio.create_task(_worker(tracker))) + # context switch to allow task to be picked up + await asyncio.sleep(0.01) + + assert tracker.has_free_slots() is False + await asyncio.gather(*tasks) + assert tracker.has_free_slots() is True + + +@pytest.fixture +def worker_tracker() -> WorkerTracker: + return WorkerTracker(_DEFAULT_DEFERRED_MANAGER_WORKER_SLOTS) + + +@pytest.fixture +def task_uid(faker: Faker) -> TaskUID: + return faker.uuid4() + + +def _get_mock_deferred_handler(handler: Callable[..., Awaitable[Any]]) -> AsyncMock: + async_mock = AsyncMock() + async_mock.run = handler + return async_mock + + +async def __h_return_constant_integer(full_start_context) -> int: + _ = full_start_context + return 42 + + +async def __h_sum_numbers(deferred_context: DeferredContext) -> float: + return deferred_context["first"] + deferred_context["second"] + + +async def __h_stringify(deferred_context: DeferredContext) -> str: + + return f"{deferred_context['name']} is {deferred_context['age']} years old" + + +async def __h_do_nothing(deferred_context: DeferredContext) -> None: + _ = deferred_context + + +@pytest.mark.parametrize( + "handler, context, expected_result", + [ + (__h_return_constant_integer, {}, 42), + (__h_sum_numbers, {"first": 4, "second": 0.14}, 4.14), + (__h_stringify, {"name": "John", "age": 56}, "John is 56 years old"), + (__h_do_nothing, {}, None), + (__h_do_nothing, {"first": "arg", "second": "argument"}, None), + ], +) +async def test_returns_task_result_success( + worker_tracker: WorkerTracker, + task_uid: TaskUID, + handler: Callable[..., Awaitable[Any]], + context: DeferredContext, + expected_result: Any, +): + + deferred_handler = _get_mock_deferred_handler(handler) + result = await worker_tracker.handle_run( + deferred_handler, # type: ignore + task_uid=task_uid, + deferred_context=context, + timeout=timedelta(seconds=0.1), + ) + assert isinstance(result, TaskResultSuccess) + assert result.value == expected_result + assert len(worker_tracker._tasks) == 0 # noqa: SLF001 + + +async def test_returns_task_result_error( + worker_tracker: WorkerTracker, + task_uid: TaskUID, +): + async def _handler(deferred_context: DeferredContext) -> None: + msg = "raising an error as expected" + raise RuntimeError(msg) + + deferred_handler = _get_mock_deferred_handler(_handler) + result = await worker_tracker.handle_run( + deferred_handler, # type: ignore + task_uid=task_uid, + deferred_context={}, + timeout=timedelta(seconds=0.1), + ) + assert isinstance(result, TaskResultError) + assert "raising an error as expected" in result.format_error() + assert len(worker_tracker._tasks) == 0 # noqa: SLF001 + + +async def test_returns_task_result_cancelled_error( + worker_tracker: WorkerTracker, + task_uid: TaskUID, +): + async def _handler(deferred_context: DeferredContext) -> None: + await asyncio.sleep(1e6) + + deferred_handler = _get_mock_deferred_handler(_handler) + + def _start_in_task() -> asyncio.Task: + return asyncio.create_task( + worker_tracker.handle_run( + deferred_handler, # type: ignore + task_uid=task_uid, + deferred_context={}, + timeout=timedelta(seconds=100), + ) + ) + + task = _start_in_task() + # context switch for task to start + await asyncio.sleep(0) + assert worker_tracker.cancel_run(task_uid) is True + + assert len(worker_tracker._tasks) == 1 # noqa: SLF001 + result = await task + assert len(worker_tracker._tasks) == 0 # noqa: SLF001 + assert isinstance(result, TaskResultCancelledError) + + assert worker_tracker.cancel_run("missing_task_uid") is False diff --git a/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py b/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py new file mode 100644 index 00000000000..7d11d257153 --- /dev/null +++ b/packages/service-library/tests/deferred_tasks/test_deferred_tasks.py @@ -0,0 +1,452 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +import contextlib +import datetime +import itertools +import json +import random +import sys +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable +from contextlib import AbstractAsyncContextManager, AsyncExitStack, suppress +from pathlib import Path +from typing import Any, Protocol + +import psutil +import pytest +from aiohttp.test_utils import unused_port +from common_library.json_serialization import json_dumps +from common_library.serialization import model_dump_with_secrets +from pydantic import NonNegativeFloat, NonNegativeInt +from pytest_mock import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from servicelib.redis import RedisClientSDK +from servicelib.sequences_utils import partition_gen +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +class _RemoteProcess: + def __init__(self, shell_command, port: int): + self.shell_command = shell_command + self.port = port + self.process = None + self.pid: int | None = None + + async def start(self): + assert self.process is None + assert self.pid is None + + self.process = await asyncio.create_subprocess_shell( + self.shell_command, env={"LISTEN_PORT": f"{self.port}"} + ) + self.pid = self.process.pid + + async def stop(self, *, graceful: bool = False): + if not graceful: + assert self.process is not None + assert self.pid is not None + + with suppress(psutil.NoSuchProcess): + parent = psutil.Process(self.pid) + children = parent.children(recursive=True) + for child_pid in [child.pid for child in children]: + with suppress(psutil.NoSuchProcess): + psutil.Process(child_pid).kill() + + self.process = None + self.pid = None + + +@pytest.fixture +async def get_remote_process( + redis_client_sdk_deferred_tasks: RedisClientSDK, +) -> AsyncIterable[Callable[[], Awaitable[_RemoteProcess]]]: + python_interpreter = sys.executable + current_module_path = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + ) + app_to_start = current_module_path / "example_app.py" + assert app_to_start.exists() + + started_processes: list[_RemoteProcess] = [] + + async def _() -> _RemoteProcess: + process = _RemoteProcess( + shell_command=f"{python_interpreter} {app_to_start}", port=unused_port() + ) + started_processes.append(process) + return process + + yield _ + + await asyncio.gather( + *[process.stop(graceful=True) for process in started_processes] + ) + + +async def _tcp_command( + command: str, + payload: dict[str, Any], + *, + host: str = "localhost", + port: int, + read_chunk_size: int = 10000, + timeout: NonNegativeFloat = 10, +) -> Any: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(timeout), + reraise=True, + ): + with attempt: + reader, writer = await asyncio.open_connection(host, port) + + writer.write(json.dumps({"command": command, "payload": payload}).encode()) + await writer.drain() + response = await reader.read(read_chunk_size) + decoded_response = response.decode() + writer.close() + return json.loads(decoded_response) + + +def _get_serialization_options() -> dict[str, Any]: + return { + "exclude_defaults": True, + "exclude_none": True, + "exclude_unset": True, + } + + +class _RemoteProcessLifecycleManager: + def __init__( + self, + remote_process: _RemoteProcess, + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + max_workers: int, + ) -> None: + self.remote_process = remote_process + self.rabbit_service = rabbit_service + self.redis_service = redis_service + self.max_workers = max_workers + + async def __aenter__(self) -> "_RemoteProcessLifecycleManager": + await self.start() + return self + + async def __aexit__(self, *args): + await self.stop() + # wait for all events to be consumed + await asyncio.sleep(0.1) + + async def start(self) -> None: + await self.remote_process.start() + + response = await _tcp_command( + "init-context", + { + "rabbit": json_dumps( + model_dump_with_secrets( + self.rabbit_service, + show_secrets=True, + **_get_serialization_options(), + ) + ), + "redis": json_dumps( + model_dump_with_secrets( + self.redis_service, + show_secrets=True, + **_get_serialization_options(), + ) + ), + "max-workers": self.max_workers, + }, + port=self.remote_process.port, + ) + assert response is None + + async def stop(self) -> None: + await self.remote_process.stop() + + async def start_task(self, sleep_duration: float, sequence_id: int) -> None: + response = await _tcp_command( + "start", + {"sleep_duration": sleep_duration, "sequence_id": sequence_id}, + port=self.remote_process.port, + ) + assert response is None + + async def get_results(self) -> list[str]: + response = await _tcp_command("get-results", {}, port=self.remote_process.port) + assert isinstance(response, list) + return response + + async def get_scheduled(self) -> list[str]: + response = await _tcp_command( + "get-scheduled", {}, port=self.remote_process.port + ) + assert isinstance(response, list) + return response + + +async def _assert_has_entries( + managers: list[_RemoteProcessLifecycleManager], + list_name: str, + *, + count: NonNegativeInt, + timeout: float = 10, + all_managers_have_some_entries: bool = False, +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.01), + stop=stop_after_delay(timeout), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + if list_name == "get-results": + gathered_results: list[list[str]] = await asyncio.gather( + *[manager.get_results() for manager in managers] + ) + if all_managers_have_some_entries: + for entry in gathered_results: + assert len(entry) > 0 + results: list[str] = list(itertools.chain(*gathered_results)) + # enure sequence numbers appear at least once + # since results handler can be retries since they are interrupted + assert len(results) >= count + assert set(results) == {f"{x}" for x in range(count)} + if list_name == "get-scheduled": + gathered_results: list[list[str]] = await asyncio.gather( + *[manager.get_scheduled() for manager in managers] + ) + if all_managers_have_some_entries: + for entry in gathered_results: + assert len(entry) > 0 + scheduled: list[str] = list(itertools.chain(*gathered_results)) + assert len(scheduled) == count + # ensure all entries are unique + assert len(scheduled) == len(set(scheduled)) + + +async def _sleep_in_interval(lower: NonNegativeFloat, upper: NonNegativeFloat) -> None: + assert upper >= lower + radom_wait = random.uniform(lower, upper) # noqa: S311 + await asyncio.sleep(radom_wait) + + +@pytest.mark.parametrize("remote_processes", [1, 10]) +@pytest.mark.parametrize("max_workers", [10]) +@pytest.mark.parametrize("deferred_tasks_to_start", [100]) +@pytest.mark.parametrize("start_stop_cycles", [0, 10]) +async def test_workflow_with_outages_in_process_running_deferred_manager( + get_remote_process: Callable[[], Awaitable[_RemoteProcess]], + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + remote_processes: int, + max_workers: int, + deferred_tasks_to_start: NonNegativeInt, + start_stop_cycles: NonNegativeInt, +): + async with AsyncExitStack() as exit_stack: + managers: list[_RemoteProcessLifecycleManager] = await asyncio.gather( + *[ + exit_stack.enter_async_context( + _RemoteProcessLifecycleManager( + await get_remote_process(), + rabbit_service, + redis_service, + max_workers, + ) + ) + for i in range(remote_processes) + ] + ) + + # pylint:disable=unnecessary-comprehension + sequence_ids_list: list[tuple[int, ...]] = [ # noqa: C416 + x + for x in partition_gen( + range(deferred_tasks_to_start), + slice_size=int(deferred_tasks_to_start / remote_processes) + 1, + ) + ] + assert sum(len(x) for x in sequence_ids_list) == deferred_tasks_to_start + + # start all in parallel divided among workers + await asyncio.gather( + *[ + manager.start_task(0.1, i) + for manager, sequence_ids in zip( + managers, sequence_ids_list, strict=True + ) + for i in sequence_ids + ] + ) + # makes sure tasks have been scheduled + await _assert_has_entries( + managers, "get-scheduled", count=deferred_tasks_to_start + ) + + # if this fails all scheduled tasks have already finished + gathered_results: list[list[str]] = await asyncio.gather( + *[manager.get_results() for manager in managers] + ) + results: list[str] = list(itertools.chain(*gathered_results)) + assert len(results) <= deferred_tasks_to_start + + # emulate issues with processing start & stop DeferredManager + for _ in range(start_stop_cycles): + # pick a random manager to stop and resume + manager = random.choice(managers) # noqa: S311 + await manager.stop() + await _sleep_in_interval(0.2, 0.4) + await manager.start() + + await _assert_has_entries( + managers, + "get-results", + count=deferred_tasks_to_start, + all_managers_have_some_entries=True, + ) + + +@pytest.fixture +async def rabbit_client( + create_rabbitmq_client: Callable[[str], RabbitMQClient], +) -> RabbitMQClient: + return create_rabbitmq_client("pinger") + + +class ClientWithPingProtocol(Protocol): + async def ping(self) -> bool: + ... + + +class ServiceManager: + def __init__( + self, + redis_client: RedisClientSDK, + rabbit_client: RabbitMQClient, + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + ) -> None: + self.redis_client = redis_client + self.rabbit_client = rabbit_client + self.paused_container = paused_container + + @contextlib.asynccontextmanager + async def _paused_container( + self, container_name: str, client: ClientWithPingProtocol + ) -> AsyncIterator[None]: + async with self.paused_container(container_name): + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(10), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert await client.ping() is False + yield + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(10), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert await client.ping() is True + + @contextlib.asynccontextmanager + async def pause_rabbit(self) -> AsyncIterator[None]: + async with self._paused_container("rabbit", self.rabbit_client): + yield + + @contextlib.asynccontextmanager + async def pause_redis(self) -> AsyncIterator[None]: + # save db for clean restore point + await self.redis_client.redis.save() + + async with self._paused_container("redis", self.redis_client): + yield + + +@pytest.fixture +def mock_default_socket_timeout(mocker: MockerFixture) -> None: + mocker.patch( + "servicelib.redis._client.DEFAULT_SOCKET_TIMEOUT", + datetime.timedelta(seconds=0.25), + ) + + +@pytest.mark.parametrize("max_workers", [10]) +@pytest.mark.parametrize("deferred_tasks_to_start", [100]) +@pytest.mark.parametrize("service", ["rabbit", "redis"]) +async def test_workflow_with_third_party_services_outages( + mock_default_socket_timeout: None, + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + redis_client_sdk_deferred_tasks: RedisClientSDK, + rabbit_client: RabbitMQClient, + get_remote_process: Callable[[], Awaitable[_RemoteProcess]], + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + max_workers: int, + deferred_tasks_to_start: int, + service: str, +): + service_manager = ServiceManager( + redis_client_sdk_deferred_tasks, rabbit_client, paused_container + ) + + async with _RemoteProcessLifecycleManager( + await get_remote_process(), + rabbit_service, + redis_service, + max_workers, + ) as manager: + # start all in parallel + await asyncio.gather( + *[manager.start_task(0.1, i) for i in range(deferred_tasks_to_start)] + ) + # makes sure tasks have been scheduled + await _assert_has_entries( + [manager], "get-scheduled", count=deferred_tasks_to_start + ) + + # if this fails all scheduled tasks have already finished + assert len(await manager.get_results()) < deferred_tasks_to_start + + # emulate issues with 3rd party services + match service: + case "rabbit": + print("[rabbit]: pausing") + async with service_manager.pause_rabbit(): + print("[rabbit]: paused") + await _sleep_in_interval(0.2, 0.4) + print("[rabbit]: resumed") + + case "redis": + print("[redis]: pausing") + async with service_manager.pause_redis(): + print("[redis]: paused") + await _sleep_in_interval(0.2, 0.4) + print("[redis]: resumed") + + await _assert_has_entries( + [manager], "get-results", count=deferred_tasks_to_start + ) diff --git a/packages/service-library/tests/fastapi/conftest.py b/packages/service-library/tests/fastapi/conftest.py index 9fb84bb62ff..f8811ca04f5 100644 --- a/packages/service-library/tests/fastapi/conftest.py +++ b/packages/service-library/tests/fastapi/conftest.py @@ -3,17 +3,15 @@ # pylint: disable=unused-variable import socket -from datetime import datetime -from typing import AsyncIterable, Callable, cast +from collections.abc import AsyncIterator, Callable +from typing import cast +import arrow import pytest -from fastapi import FastAPI +from fastapi import APIRouter, FastAPI from fastapi.params import Query -from fastapi.routing import APIRouter -from httpx import AsyncClient +from httpx import ASGITransport, AsyncClient from pydantic.types import PositiveFloat -from pytest import FixtureRequest -from servicelib.fastapi import long_running_tasks @pytest.fixture @@ -23,7 +21,7 @@ def app() -> FastAPI: @api_router.get("/") def _get_root(): - return {"name": __name__, "timestamp": datetime.utcnow().isoformat()} + return {"name": __name__, "timestamp": arrow.utcnow().datetime.isoformat()} @api_router.get("/data") def _get_data(x: PositiveFloat, y: int = Query(..., gt=3, lt=4)): @@ -35,29 +33,19 @@ def _get_data(x: PositiveFloat, y: int = Query(..., gt=3, lt=4)): return _app -@pytest.fixture(params=["", "/base-path", "/nested/path"]) -def router_prefix(request: FixtureRequest) -> str: - return request.param - - @pytest.fixture -async def bg_task_app(router_prefix: str) -> AsyncIterable[FastAPI]: - app = FastAPI() - - long_running_tasks.server.setup(app, router_prefix=router_prefix) - yield app - - -@pytest.fixture(scope="function") -async def async_client(bg_task_app: FastAPI) -> AsyncIterable[AsyncClient]: +async def client(app: FastAPI) -> AsyncIterator[AsyncClient]: async with AsyncClient( - app=bg_task_app, - base_url="http://backgroud.testserver.io", - headers={"Content-Type": "application/json"}, + transport=ASGITransport(app=app), base_url="http://test" ) as client: yield client +@pytest.fixture(params=["", "/base-path", "/nested/path"]) +def router_prefix(request: pytest.FixtureRequest) -> str: + return request.param + + @pytest.fixture def get_unused_port() -> Callable[[], int]: def go() -> int: diff --git a/packages/service-library/tests/fastapi/long_running_tasks/conftest.py b/packages/service-library/tests/fastapi/long_running_tasks/conftest.py new file mode 100644 index 00000000000..d43a7e445c1 --- /dev/null +++ b/packages/service-library/tests/fastapi/long_running_tasks/conftest.py @@ -0,0 +1,29 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import AsyncIterable + +import pytest +from fastapi import FastAPI +from httpx import ASGITransport, AsyncClient +from servicelib.fastapi import long_running_tasks + + +@pytest.fixture +async def bg_task_app(router_prefix: str) -> FastAPI: + app = FastAPI() + + long_running_tasks.server.setup(app, router_prefix=router_prefix) + return app + + +@pytest.fixture +async def async_client(bg_task_app: FastAPI) -> AsyncIterable[AsyncClient]: + async with AsyncClient( + transport=ASGITransport(app=bg_task_app), + base_url="http://backgroud.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + yield client diff --git a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py index b05a7da6bc8..52527f138d9 100644 --- a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py +++ b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks.py @@ -11,17 +11,18 @@ import asyncio import json -from typing import AsyncIterator, Awaitable, Callable, Final +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Final import pytest from asgi_lifespan import LifespanManager from fastapi import APIRouter, Depends, FastAPI, status from httpx import AsyncClient -from pydantic import parse_obj_as +from pydantic import TypeAdapter from servicelib.fastapi import long_running_tasks from servicelib.long_running_tasks._models import TaskGet, TaskId from servicelib.long_running_tasks._task import TaskContext -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed @@ -76,6 +77,7 @@ async def create_string_list_task( @pytest.fixture async def app(server_routes: APIRouter) -> AsyncIterator[FastAPI]: + # overrides fastapi/conftest.py:app app = FastAPI(title="test app") app.include_router(server_routes) long_running_tasks.server.setup(app) @@ -84,12 +86,6 @@ async def app(server_routes: APIRouter) -> AsyncIterator[FastAPI]: yield app -@pytest.fixture -async def client(app: FastAPI) -> AsyncIterator[AsyncClient]: - async with AsyncClient(app=app, base_url="http://test") as client: - yield client - - @pytest.fixture def start_long_running_task() -> Callable[[FastAPI, AsyncClient], Awaitable[TaskId]]: async def _caller(app: FastAPI, client: AsyncClient, **query_kwargs) -> TaskId: @@ -98,7 +94,9 @@ async def _caller(app: FastAPI, client: AsyncClient, **query_kwargs) -> TaskId: ) resp = await client.post(f"{url}") assert resp.status_code == status.HTTP_202_ACCEPTED - task_id = parse_obj_as(long_running_tasks.server.TaskId, resp.json()) + task_id = TypeAdapter(long_running_tasks.server.TaskId).validate_python( + resp.json() + ) return task_id return _caller @@ -126,7 +124,7 @@ async def _waiter( with attempt: result = await client.get(f"{status_url}") assert result.status_code == status.HTTP_200_OK - task_status = long_running_tasks.server.TaskStatus.parse_obj( + task_status = long_running_tasks.server.TaskStatus.model_validate( result.json() ) assert task_status @@ -134,6 +132,7 @@ async def _waiter( return _waiter + async def test_workflow( app: FastAPI, client: AsyncClient, @@ -152,12 +151,14 @@ async def test_workflow( with attempt: result = await client.get(f"{status_url}") assert result.status_code == status.HTTP_200_OK - task_status = long_running_tasks.server.TaskStatus.parse_obj(result.json()) + task_status = long_running_tasks.server.TaskStatus.model_validate( + result.json() + ) assert task_status progress_updates.append( (task_status.task_progress.message, task_status.task_progress.percent) ) - print(f"<-- received task status: {task_status.json(indent=2)}") + print(f"<-- received task status: {task_status.model_dump_json(indent=2)}") assert task_status.done, "task incomplete" print( f"-- waiting for task status completed successfully: {json.dumps(attempt.retry_state.retry_object.statistics, indent=2)}" @@ -182,7 +183,7 @@ async def test_workflow( result = await client.get(f"{result_url}") # NOTE: this is DIFFERENT than with aiohttp where we return the real result assert result.status_code == status.HTTP_200_OK - task_result = long_running_tasks.server.TaskResult.parse_obj(result.json()) + task_result = long_running_tasks.server.TaskResult.model_validate(result.json()) assert not task_result.error assert task_result.result == [f"{x}" for x in range(10)] # getting the result again should raise a 404 @@ -221,7 +222,7 @@ async def test_failing_task_returns_error( result_url = app.url_path_for("get_task_result", task_id=task_id) result = await client.get(f"{result_url}") assert result.status_code == status.HTTP_200_OK - task_result = long_running_tasks.server.TaskResult.parse_obj(result.json()) + task_result = long_running_tasks.server.TaskResult.model_validate(result.json()) assert not task_result.result assert task_result.error @@ -233,6 +234,7 @@ async def test_failing_task_returns_error( # assert task_result.error["errors"][0]["code"] == "RuntimeError" # assert task_result.error["errors"][0]["message"] == "We were asked to fail!!" + async def test_get_results_before_tasks_finishes_returns_404( app: FastAPI, client: AsyncClient, @@ -276,7 +278,7 @@ async def test_list_tasks_empty_list(app: FastAPI, client: AsyncClient): list_url = app.url_path_for("list_tasks") result = await client.get(f"{list_url}") assert result.status_code == status.HTTP_200_OK - list_of_tasks = parse_obj_as(list[TaskGet], result.json()) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(result.json()) assert list_of_tasks == [] @@ -298,7 +300,7 @@ async def test_list_tasks( list_url = app.url_path_for("list_tasks") result = await client.get(f"{list_url}") assert result.status_code == status.HTTP_200_OK - list_of_tasks = parse_obj_as(list[TaskGet], result.json()) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(result.json()) assert len(list_of_tasks) == NUM_TASKS # now wait for them to finish @@ -313,5 +315,5 @@ async def test_list_tasks( # the list shall go down one by one result = await client.get(f"{list_url}") assert result.status_code == status.HTTP_200_OK - list_of_tasks = parse_obj_as(list[TaskGet], result.json()) + list_of_tasks = TypeAdapter(list[TaskGet]).validate_python(result.json()) assert len(list_of_tasks) == NUM_TASKS - (task_index + 1) diff --git a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py index 30193b9b269..985cfca2de6 100644 --- a/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py +++ b/packages/service-library/tests/fastapi/long_running_tasks/test_long_running_tasks_context_manager.py @@ -8,7 +8,7 @@ from asgi_lifespan import LifespanManager from fastapi import APIRouter, Depends, FastAPI, status from httpx import AsyncClient -from pydantic import AnyHttpUrl, PositiveFloat, parse_obj_as +from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter from servicelib.fastapi.long_running_tasks._context_manager import _ProgressManager from servicelib.fastapi.long_running_tasks.client import ( Client, @@ -49,7 +49,8 @@ async def a_test_task(task_progress: TaskProgress) -> int: async def a_failing_test_task(task_progress: TaskProgress) -> None: await asyncio.sleep(TASK_SLEEP_INTERVAL) - raise RuntimeError("I am failing as requested") + msg = "I am failing as requested" + raise RuntimeError(msg) @pytest.fixture @@ -90,7 +91,7 @@ async def bg_task_app( @pytest.fixture def mock_task_id() -> TaskId: - return parse_obj_as(TaskId, "fake_task_id") + return TypeAdapter(TaskId).validate_python("fake_task_id") async def test_task_result( @@ -100,7 +101,7 @@ async def test_task_result( assert result.status_code == status.HTTP_200_OK, result.text task_id = result.json() - url = parse_obj_as(AnyHttpUrl, "http://backgroud.testserver.io") + url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/") client = Client(app=bg_task_app, async_client=async_client, base_url=url) async with periodic_task_result( client, @@ -120,7 +121,7 @@ async def test_task_result_times_out( assert result.status_code == status.HTTP_200_OK, result.text task_id = result.json() - url = parse_obj_as(AnyHttpUrl, "http://backgroud.testserver.io") + url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/") client = Client(app=bg_task_app, async_client=async_client, base_url=url) timeout = TASK_SLEEP_INTERVAL / 10 with pytest.raises(TaskClientTimeoutError) as exec_info: @@ -146,7 +147,7 @@ async def test_task_result_task_result_is_an_error( assert result.status_code == status.HTTP_200_OK, result.text task_id = result.json() - url = parse_obj_as(AnyHttpUrl, "http://backgroud.testserver.io") + url = TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io/") client = Client(app=bg_task_app, async_client=async_client, base_url=url) with pytest.raises(TaskClientResultError) as exec_info: async with periodic_task_result( @@ -157,7 +158,7 @@ async def test_task_result_task_result_is_an_error( ): pass assert f"{exec_info.value}".startswith(f"Task {task_id} finished with exception:") - assert 'raise RuntimeError("I am failing as requested")' in f"{exec_info.value}" + assert "I am failing as requested" in f"{exec_info.value}" await _assert_task_removed(async_client, task_id, router_prefix) @@ -167,7 +168,7 @@ async def test_progress_updater(repeat: int, mock_task_id: TaskId) -> None: received = () async def progress_update( - message: ProgressMessage, percent: ProgressPercent, task_id: TaskId + message: ProgressMessage, percent: ProgressPercent | None, task_id: TaskId ) -> None: nonlocal counter nonlocal received @@ -185,12 +186,18 @@ async def progress_update( assert received == ("", None) for _ in range(repeat): - await progress_updater.update(mock_task_id, percent=0.0) + await progress_updater.update( + mock_task_id, percent=TypeAdapter(ProgressPercent).validate_python(0.0) + ) assert counter == 2 assert received == ("", 0.0) for _ in range(repeat): - await progress_updater.update(mock_task_id, percent=1.0, message="done") + await progress_updater.update( + mock_task_id, + percent=TypeAdapter(ProgressPercent).validate_python(1.0), + message="done", + ) assert counter == 3 assert received == ("done", 1.0) diff --git a/packages/service-library/tests/fastapi/test_cancellation_middleware.py b/packages/service-library/tests/fastapi/test_cancellation_middleware.py new file mode 100644 index 00000000000..add93851f54 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_cancellation_middleware.py @@ -0,0 +1,165 @@ +# pylint: disable=redefined-outer-name + +import asyncio +import logging +from collections.abc import Iterator +from threading import Thread +from unittest.mock import AsyncMock + +import httpx +import pytest +import uvicorn +from fastapi import APIRouter, BackgroundTasks, FastAPI +from pytest_simcore.helpers.logging_tools import log_context +from servicelib.fastapi.cancellation_middleware import RequestCancellationMiddleware +from servicelib.utils import unused_port +from tenacity import retry, stop_after_delay, wait_fixed +from yarl import URL + + +@pytest.fixture +def server_done_event() -> asyncio.Event: + return asyncio.Event() + + +@pytest.fixture +def server_cancelled_mock() -> AsyncMock: + return AsyncMock() + + +@pytest.fixture +def fastapi_router( + server_done_event: asyncio.Event, server_cancelled_mock: AsyncMock +) -> APIRouter: + router = APIRouter() + + @router.get("/") + async def root() -> dict[str, str]: + with log_context(logging.INFO, msg="root endpoint") as ctx: + ctx.logger.info("root endpoint called") + return {"message": "Hello, World!"} + + @router.get("/sleep") + async def sleep(sleep_time: float) -> dict[str, str]: + with log_context(logging.INFO, msg="sleeper") as ctx: + try: + await asyncio.sleep(sleep_time) + return {"message": f"Slept for {sleep_time} seconds"} + except asyncio.CancelledError: + ctx.logger.info("sleeper cancelled!") + await server_cancelled_mock() + return {"message": "Cancelled"} + finally: + server_done_event.set() + + async def _sleep_in_the_back(sleep_time: float) -> None: + with log_context(logging.INFO, msg="sleeper in the back") as ctx: + try: + await asyncio.sleep(sleep_time) + except asyncio.CancelledError: + ctx.logger.info("sleeper in the back cancelled!") + await server_cancelled_mock() + finally: + server_done_event.set() + + @router.get("/sleep-with-background-task") + async def sleep_with_background_task( + sleep_time: float, background_tasks: BackgroundTasks + ) -> dict[str, str]: + with log_context(logging.INFO, msg="sleeper with background task"): + background_tasks.add_task(_sleep_in_the_back, sleep_time) + return {"message": "Sleeping in the back"} + + return router + + +@pytest.fixture +def fastapi_app(fastapi_router: APIRouter) -> FastAPI: + app = FastAPI() + app.include_router(fastapi_router) + app.add_middleware(RequestCancellationMiddleware) + return app + + +@pytest.fixture +def uvicorn_server(fastapi_app: FastAPI) -> Iterator[URL]: + random_port = unused_port() + with log_context( + logging.INFO, + msg=f"with uvicorn server on 127.0.0.1:{random_port}", + ) as ctx: + config = uvicorn.Config( + fastapi_app, + host="127.0.0.1", + port=random_port, + log_level="error", + ) + server = uvicorn.Server(config) + + thread = Thread(target=server.run) + thread.daemon = True + thread.start() + + @retry(wait=wait_fixed(0.1), stop=stop_after_delay(10), reraise=True) + def wait_for_server_ready() -> None: + with httpx.Client() as client: + response = client.get(f"http://127.0.1:{random_port}/") + assert ( + response.is_success + ), f"Server did not start successfully: {response.status_code} {response.text}" + + wait_for_server_ready() + + ctx.logger.info( + "server ready at: %s", + f"http://127.0.0.1:{random_port}", + ) + + yield URL(f"http://127.0.0.1:{random_port}") + + server.should_exit = True + thread.join(timeout=10) + + +async def test_server_cancels_when_client_disconnects( + uvicorn_server: URL, + server_done_event: asyncio.Event, + server_cancelled_mock: AsyncMock, +): + async with httpx.AsyncClient(base_url=f"{uvicorn_server}") as client: + # check standard call still complete as expected + with log_context(logging.INFO, msg="client calling endpoint"): + response = await client.get("/sleep", params={"sleep_time": 0.1}) + assert response.status_code == 200 + assert response.json() == {"message": "Slept for 0.1 seconds"} + async with asyncio.timeout(10): + await server_done_event.wait() + server_done_event.clear() + + # check slow call get cancelled + with log_context( + logging.INFO, msg="client calling endpoint for cancellation" + ) as ctx: + with pytest.raises(httpx.ReadTimeout): + response = await client.get( + "/sleep", params={"sleep_time": 10}, timeout=0.1 + ) + ctx.logger.info("client disconnected from server") + + async with asyncio.timeout(5): + await server_done_event.wait() + server_cancelled_mock.assert_called_once() + server_cancelled_mock.reset_mock() + server_done_event.clear() + + # NOTE: shows that FastAPI BackgroundTasks get cancelled too! + # check background tasks get cancelled as well sadly + with log_context(logging.INFO, msg="client calling endpoint for cancellation"): + response = await client.get( + "/sleep-with-background-task", + params={"sleep_time": 2}, + ) + assert response.status_code == 200 + async with asyncio.timeout(5): + await server_done_event.wait() + server_cancelled_mock.assert_called_once() diff --git a/packages/service-library/tests/fastapi/test_docker_utils.py b/packages/service-library/tests/fastapi/test_docker_utils.py new file mode 100644 index 00000000000..55898891a14 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_docker_utils.py @@ -0,0 +1,282 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +import asyncio +from collections.abc import Awaitable, Callable +from typing import Any +from unittest import mock + +import pytest +from faker import Faker +from models_library.docker import DockerGenericTag +from models_library.progress_bar import ProgressReport +from pydantic import ByteSize, TypeAdapter +from pytest_mock import MockerFixture +from servicelib import progress_bar +from servicelib.docker_utils import pull_image +from servicelib.fastapi.docker_utils import ( + pull_images, + retrieve_image_layer_information, +) +from settings_library.docker_registry import RegistrySettings + + +@pytest.mark.parametrize( + "service_repo, service_tag", + [ + ("itisfoundation/sleeper", "1.0.0"), + ("itisfoundation/sleeper", "2.2.0"), + ( + "itisfoundation/sleeper", + "sha256:a6d9886311721d8d341068361ecf9998a3c7ecb0efb23ebac553602c2eca1f8f", + ), + ], +) +async def test_retrieve_image_layer_information( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + registry_settings: RegistrySettings, + osparc_service: dict[str, Any], + service_repo: str, + service_tag: str, +): + # clean first + image_name = f"{service_repo}:{service_tag}" + if "sha256" in service_tag: + image_name = f"{service_repo}@{service_tag}" + await remove_images_from_host([image_name]) + docker_image = TypeAdapter(DockerGenericTag).validate_python( + f"{registry_settings.REGISTRY_URL}/{osparc_service['image']['name']}:{osparc_service['image']['tag']}", + ) + layer_information = await retrieve_image_layer_information( + docker_image, registry_settings + ) + + assert layer_information + + +@pytest.mark.parametrize( + "image", + [ + "itisfoundation/dask-sidecar:master-github-latest", + "library/nginx:latest", + "nginx:1.25.4", + "nginx:latest", + "ubuntu@sha256:81bba8d1dde7fc1883b6e95cd46d6c9f4874374f2b360c8db82620b33f6b5ca1", + "busybox:latest", + ], +) +async def test_retrieve_image_layer_information_from_external_registry( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + image: DockerGenericTag, + registry_settings: RegistrySettings, +): + # clean first + await remove_images_from_host([image]) + layer_information = await retrieve_image_layer_information(image, registry_settings) + assert layer_information + + +@pytest.fixture +async def mocked_log_cb(mocker: MockerFixture) -> mock.AsyncMock: + async def _log_cb(*args, **kwargs) -> None: + print(f"received log: {args}, {kwargs}") + + return mocker.AsyncMock(side_effect=_log_cb) + + +@pytest.fixture +async def mocked_progress_cb(mocker: MockerFixture) -> mock.AsyncMock: + async def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + assert isinstance(args[0], ProgressReport) + + return mocker.AsyncMock(side_effect=_progress_cb) + + +def _assert_progress_report_values( + mocked_progress_cb: mock.AsyncMock, *, total: float +) -> None: + # NOTE: we exclude the message part here as this is already tested in servicelib + # check first progress + assert mocked_progress_cb.call_args_list[0].args[0].dict( + exclude={"message", "attempt"} + ) == ProgressReport(actual_value=0, total=total, unit="Byte").model_dump( + exclude={"message", "attempt"} + ) + # check last progress + assert mocked_progress_cb.call_args_list[-1].args[0].dict( + exclude={"message", "attempt"} + ) == ProgressReport(actual_value=total, total=total, unit="Byte").model_dump( + exclude={"message", "attempt"} + ) + + +@pytest.mark.parametrize( + "image", + ["itisfoundation/sleeper:1.0.0", "nginx:latest", "busybox:latest"], +) +async def test_pull_image( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + image: DockerGenericTag, + registry_settings: RegistrySettings, + mocked_log_cb: mock.AsyncMock, + mocked_progress_cb: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, + faker: Faker, +): + await remove_images_from_host([image]) + layer_information = await retrieve_image_layer_information(image, registry_settings) + assert layer_information + + async with progress_bar.ProgressBarData( + num_steps=layer_information.layers_total_size, + progress_report_cb=mocked_progress_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as main_progress_bar: + + await pull_image( + image, + registry_settings, + main_progress_bar, + mocked_log_cb, + layer_information, + ) + mocked_log_cb.assert_called() + + _assert_progress_report_values( + mocked_progress_cb, total=layer_information.layers_total_size + ) + + mocked_progress_cb.reset_mock() + mocked_log_cb.reset_mock() + + # check there were no warnings + # NOTE: this would pop up in case docker changes its pulling statuses + assert not [r.message for r in caplog.records if r.levelname == "WARNING"] + + # pull a second time should, the image is already there + async with progress_bar.ProgressBarData( + num_steps=layer_information.layers_total_size, + progress_report_cb=mocked_progress_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as main_progress_bar: + await pull_image( + image, + registry_settings, + main_progress_bar, + mocked_log_cb, + layer_information, + ) + mocked_log_cb.assert_called() + assert ( + main_progress_bar._current_steps # noqa: SLF001 + == layer_information.layers_total_size + ) + _assert_progress_report_values( + mocked_progress_cb, total=layer_information.layers_total_size + ) + # check there were no warnings + assert not [r.message for r in caplog.records if r.levelname == "WARNING"] + + +@pytest.mark.parametrize( + "image", + ["nginx:latest", "busybox:latest"], +) +async def test_pull_image_without_layer_information( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + image: DockerGenericTag, + registry_settings: RegistrySettings, + mocked_log_cb: mock.AsyncMock, + mocked_progress_cb: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, + faker: Faker, +): + await remove_images_from_host([image]) + layer_information = await retrieve_image_layer_information(image, registry_settings) + assert layer_information + print(f"{image=} has {layer_information.layers_total_size=}") + + fake_number_of_steps = TypeAdapter(ByteSize).validate_python("200MiB") + assert fake_number_of_steps > layer_information.layers_total_size + async with progress_bar.ProgressBarData( + num_steps=fake_number_of_steps, + progress_report_cb=mocked_progress_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as main_progress_bar: + await pull_image( + image, registry_settings, main_progress_bar, mocked_log_cb, None + ) + mocked_log_cb.assert_called() + # depending on the system speed, and if the progress report callback is slow, then + + _assert_progress_report_values(mocked_progress_cb, total=fake_number_of_steps) + mocked_progress_cb.reset_mock() + mocked_log_cb.reset_mock() + + # check there were no warnings + # NOTE: this would pop up in case docker changes its pulling statuses + expected_warning = "pulling image without layer information" + assert not [ + r.message + for r in caplog.records + if r.levelname == "WARNING" and not r.message.startswith(expected_warning) + ] + + # pull a second time should, the image is already there, but the progress is then 0 + async with progress_bar.ProgressBarData( + num_steps=1, + progress_report_cb=mocked_progress_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as main_progress_bar: + await pull_image( + image, registry_settings, main_progress_bar, mocked_log_cb, None + ) + mocked_log_cb.assert_called() + assert main_progress_bar._current_steps == 0 # noqa: SLF001 + _assert_progress_report_values(mocked_progress_cb, total=1) + # check there were no warnings + assert not [ + r.message + for r in caplog.records + if r.levelname == "WARNING" and not r.message.startswith(expected_warning) + ] + + +@pytest.mark.parametrize( + "images_set", + [ + {"itisfoundation/sleeper:1.0.0", "nginx:latest", "busybox:latest"}, + ], +) +async def test_pull_images_set( + remove_images_from_host: Callable[[list[str]], Awaitable[None]], + images_set: set[DockerGenericTag], + registry_settings: RegistrySettings, + mocked_log_cb: mock.AsyncMock, + mocked_progress_cb: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, +): + await remove_images_from_host(list(images_set)) + layer_informations = await asyncio.gather( + *[ + retrieve_image_layer_information(image, registry_settings) + for image in images_set + ] + ) + assert layer_informations + images_total_size = sum(_.layers_total_size for _ in layer_informations if _) + + await pull_images(images_set, registry_settings, mocked_progress_cb, mocked_log_cb) + mocked_log_cb.assert_called() + _assert_progress_report_values(mocked_progress_cb, total=images_total_size) + + # check there were no warnings + # NOTE: this would pop up in case docker changes its pulling statuses + assert not [r.message for r in caplog.records if r.levelname == "WARNING"] diff --git a/packages/service-library/tests/fastapi/test_exceptions_utils.py b/packages/service-library/tests/fastapi/test_exceptions_utils.py new file mode 100644 index 00000000000..cfe7fbde0e8 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_exceptions_utils.py @@ -0,0 +1,82 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import http + +import pytest +from fastapi import FastAPI, HTTPException +from httpx import AsyncClient +from models_library.api_schemas__common.errors import DefaultApiError +from pydantic import TypeAdapter +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +_MIN_ERROR_STATUS_CODE = 400 + + +builtin_exceptions = { + f"{exc.__class__.__name__}": exc + for exc in [ + # https://docs.python.org/3/library/exceptions.html#base-classes + Exception(), + ArithmeticError(), + BufferError(), + LookupError(), + # https://docs.python.org/3/library/exceptions.html#concrete-exceptions + NotImplementedError(), + ValueError("wrong value"), + ] +} + +http_exceptions = { + status_code: HTTPException(status_code=status_code, detail=f"test {status_code}") + for status_code in [ + e.value for e in http.HTTPStatus if e.value >= _MIN_ERROR_STATUS_CODE + ] +} + + +@pytest.fixture +def app() -> FastAPI: + + app = FastAPI() + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) + + @app.post("/error/{code}") + async def raise_http_exception(code: int): + raise http_exceptions[code] + + @app.post("/raise/{code}") + async def raise_exception(code: str): + raise builtin_exceptions[code] + + return app + + +@pytest.mark.parametrize("code,exception", list(http_exceptions.items())) +async def test_http_errors_respond_with_error_model( + client: AsyncClient, code: int, exception: HTTPException +): + response = await client.post(f"/error/{code}") + assert response.status_code == code + + error = TypeAdapter(DefaultApiError).validate_json(response.text) + assert error.detail == f"test {code}" + assert error.name + + +@pytest.mark.xfail +@pytest.mark.parametrize("code,exception", list(builtin_exceptions.items())) +async def test_non_http_error_handling( + client: AsyncClient, code: int | str, exception: BaseException +): + response = await client.post(f"/raise/{code}") + print(response) + + error = TypeAdapter(DefaultApiError).validate_json(response.text) diff --git a/packages/service-library/tests/fastapi/test_http_client.py b/packages/service-library/tests/fastapi/test_http_client.py new file mode 100644 index 00000000000..38bcde6aab2 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_http_client.py @@ -0,0 +1,115 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Iterator + +import httpx +import pytest +import respx +from asgi_lifespan import LifespanManager +from fastapi import FastAPI, status +from models_library.healthchecks import IsResponsive +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import ( + AttachLifespanMixin, + BaseHTTPApi, + HealthMixinMixin, +) + + +def test_using_app_state_mixin(): + class SomeData(SingletonInAppStateMixin): + app_state_name: str = "my_data" + frozen: bool = True + + def __init__(self, value): + self.value = value + + # my app + app = FastAPI() + + # load -> fails + with pytest.raises(AttributeError): + SomeData.get_from_app_state(app) + + # save + obj = SomeData(42) + obj.set_to_app_state(app) + + # load + assert SomeData.get_from_app_state(app) == obj + assert app.state.my_data == obj + + # cannot re-save if frozen + assert SomeData.frozen + with pytest.raises(ValueError): + SomeData(32).set_to_app_state(app) + + # delete + assert SomeData.pop_from_app_state(app) == obj + with pytest.raises(AttributeError): + SomeData.get_from_app_state(app) + + # save = load + assert SomeData(32).set_to_app_state(app) == SomeData.get_from_app_state(app) + + +@pytest.fixture +def base_url() -> str: + return "https://test_base_http_api" + + +@pytest.fixture +def mock_server_api(base_url: str) -> Iterator[respx.MockRouter]: + with respx.mock( + base_url=base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get("/").respond(status.HTTP_200_OK) + yield mock + + +async def test_base_http_api(mock_server_api: respx.MockRouter, base_url: str): + class MyClientApi( + BaseHTTPApi, AttachLifespanMixin, HealthMixinMixin, SingletonInAppStateMixin + ): + app_state_name: str = "my_client_api" + + new_app = FastAPI() + + # create + api = MyClientApi(client=httpx.AsyncClient(base_url=base_url)) + + # or create from client kwargs + assert MyClientApi.from_client_kwargs(base_url=base_url) + + # save to app.state + api.set_to_app_state(new_app) + assert MyClientApi.get_from_app_state(new_app) == api + + # defin lifespan + api.attach_lifespan_to(new_app) + + async with LifespanManager( + new_app, + startup_timeout=None, # for debugging + shutdown_timeout=10, + ): + # start event called + assert not api.client.is_closed + + assert await api.ping() + assert await api.is_healthy() + + alive = await api.check_liveness() + assert bool(alive) + assert isinstance(alive, IsResponsive) + assert alive.elapsed.total_seconds() < 1 + + # shutdown event + assert api.client.is_closed diff --git a/packages/service-library/tests/fastapi/test_http_client_thin.py b/packages/service-library/tests/fastapi/test_http_client_thin.py new file mode 100644 index 00000000000..0a1f800f510 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_http_client_thin.py @@ -0,0 +1,252 @@ +# pylint:disable=redefined-outer-name + +import logging +from collections.abc import AsyncIterable, Iterable +from typing import Final + +import arrow +import pytest +from httpx import ( + HTTPError, + PoolTimeout, + Request, + RequestError, + Response, + TransportError, + codes, +) +from pydantic import AnyHttpUrl, TypeAdapter +from respx import MockRouter +from servicelib.fastapi.http_client_thin import ( + BaseThinClient, + ClientHttpError, + UnexpectedStatusError, + expect_status, + retry_on_errors, +) + +_TIMEOUT_OVERWRITE: Final[int] = 1 + +# UTILS + + +class FakeThickClient(BaseThinClient): + @retry_on_errors() + async def get_provided_url(self, provided_url: str) -> Response: + return await self.client.get(provided_url) + + @retry_on_errors() + async def normal_timeout(self) -> Response: + return await self.client.get("http://missing-host:1111") + + @retry_on_errors(total_retry_timeout_overwrite=_TIMEOUT_OVERWRITE) + async def overwritten_timeout(self) -> Response: + return await self.client.get("http://missing-host:1111") + + +def _assert_messages(messages: list[str]) -> None: + # check if the right amount of messages was captured by the logs + unexpected_counter = 1 + for log_message in messages: + if log_message.startswith("Retrying"): + assert "as it raised" in log_message + continue + assert log_message.startswith(f"Request timed-out after {unexpected_counter}") + unexpected_counter += 1 + + +@pytest.fixture() +def caplog_info_level( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: + with caplog.at_level(logging.INFO): + yield caplog + + +@pytest.fixture +def request_timeout() -> int: + # below refer to exponential wait step duration + return 1 + 2 + + +@pytest.fixture +async def thick_client(request_timeout: int) -> AsyncIterable[FakeThickClient]: + async with FakeThickClient( + total_retry_interval=request_timeout, tracing_settings=None + ) as client: + yield client + + +@pytest.fixture +def test_url() -> str: + url = TypeAdapter(AnyHttpUrl).validate_python("http://missing-host:1111") + return f"{url}" + + +async def test_connection_error( + thick_client: FakeThickClient, + test_url: str, +) -> None: + with pytest.raises(ClientHttpError) as exe_info: + await thick_client.get_provided_url(test_url) + + assert isinstance(exe_info.value, ClientHttpError) + assert isinstance(exe_info.value.error, TransportError) + + +async def test_retry_on_errors( + request_timeout: int, + test_url: str, + caplog_info_level: pytest.LogCaptureFixture, +) -> None: + client = FakeThickClient( + total_retry_interval=request_timeout, tracing_settings=None + ) + + with pytest.raises(ClientHttpError): + await client.get_provided_url(test_url) + + _assert_messages(caplog_info_level.messages) + + +@pytest.mark.parametrize("error_class", [TransportError, PoolTimeout]) +async def test_retry_on_errors_by_error_type( + error_class: type[RequestError], + caplog_info_level: pytest.LogCaptureFixture, + request_timeout: int, + test_url: str, +) -> None: + class ATestClient(BaseThinClient): + # pylint: disable=no-self-use + @retry_on_errors() + async def raises_request_error(self) -> Response: + raise error_class( + "mock_connect_error", # noqa: EM101 + request=Request(method="GET", url=test_url), + ) + + client = ATestClient(total_retry_interval=request_timeout, tracing_settings=None) + + with pytest.raises(ClientHttpError): + await client.raises_request_error() + + if error_class == PoolTimeout: + _assert_messages(caplog_info_level.messages[:-1]) + connections_message = caplog_info_level.messages[-1] + assert ( + connections_message + == "Pool status @ 'POOL TIMEOUT': requests(0)=[], connections(0)=[]" + ) + else: + _assert_messages(caplog_info_level.messages) + + +async def test_retry_on_errors_raises_client_http_error( + request_timeout: int, +) -> None: + class ATestClient(BaseThinClient): + # pylint: disable=no-self-use + @retry_on_errors() + async def raises_http_error(self) -> Response: + msg = "mock_http_error" + raise HTTPError(msg) + + client = ATestClient(total_retry_interval=request_timeout, tracing_settings=None) + + with pytest.raises(ClientHttpError): + await client.raises_http_error() + + +async def test_methods_do_not_return_response( + request_timeout: int, +) -> None: + class OKTestClient(BaseThinClient): + async def public_method_ok(self) -> Response: # type: ignore + """this method will be ok even if no code is used""" + + # OK + OKTestClient(total_retry_interval=request_timeout, tracing_settings=None) + + class FailWrongAnnotationTestClient(BaseThinClient): + async def public_method_wrong_annotation(self) -> None: + """this method will raise an error""" + + with pytest.raises(AssertionError, match="should return an instance"): + FailWrongAnnotationTestClient( + total_retry_interval=request_timeout, tracing_settings=None + ) + + class FailNoAnnotationTestClient(BaseThinClient): + async def public_method_no_annotation(self): + """this method will raise an error""" + + with pytest.raises(AssertionError, match="should return an instance"): + FailNoAnnotationTestClient( + total_retry_interval=request_timeout, tracing_settings=None + ) + + +async def test_expect_state_decorator( + test_url: str, + respx_mock: MockRouter, + request_timeout: int, +) -> None: + url_get_200_ok = f"{test_url}ok" + get_wrong_state = f"{test_url}wrong-state" + error_status = codes.NOT_FOUND + + class ATestClient(BaseThinClient): + @expect_status(codes.OK) + async def get_200_ok(self) -> Response: + return await self.client.get(url_get_200_ok) + + @expect_status(error_status) + async def get_wrong_state(self) -> Response: + return await self.client.get(get_wrong_state) + + respx_mock.get(url_get_200_ok).mock(return_value=Response(codes.OK)) + respx_mock.get(get_wrong_state).mock(return_value=Response(codes.OK)) + + test_client = ATestClient( + total_retry_interval=request_timeout, tracing_settings=None + ) + + # OK + response = await test_client.get_200_ok() + assert response.status_code == codes.OK + + # RAISES EXPECTED ERROR + with pytest.raises(UnexpectedStatusError) as err_info: + await test_client.get_wrong_state() + + assert err_info.value.response.status_code == codes.OK + assert ( + f"{err_info.value}" + == f"Expected status: {error_status}, got {codes.OK} for: {get_wrong_state}: headers=Headers({{}}), body=''" + ) + + +async def test_retry_timeout_overwrite( + request_timeout: int, + caplog_info_level: pytest.LogCaptureFixture, +) -> None: + client = FakeThickClient( + total_retry_interval=request_timeout, tracing_settings=None + ) + + caplog_info_level.clear() + start = arrow.utcnow() + with pytest.raises(ClientHttpError): + await client.normal_timeout() + + normal_duration = (arrow.utcnow() - start).total_seconds() + assert normal_duration >= request_timeout + _assert_messages(caplog_info_level.messages) + + caplog_info_level.clear() + start = arrow.utcnow() + with pytest.raises(ClientHttpError): + await client.overwritten_timeout() + overwritten_duration = (arrow.utcnow() - start).total_seconds() + assert overwritten_duration >= _TIMEOUT_OVERWRITE + _assert_messages(caplog_info_level.messages) diff --git a/packages/service-library/tests/fastapi/test_httpx_utils.py b/packages/service-library/tests/fastapi/test_httpx_utils.py new file mode 100644 index 00000000000..48ce50389c8 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_httpx_utils.py @@ -0,0 +1,119 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import textwrap +from collections.abc import AsyncIterator, Iterator + +import httpx +import pytest +import respx +from fastapi import status +from httpx import AsyncClient +from servicelib.fastapi.httpx_utils import to_curl_command, to_httpx_command +from servicelib.utils_secrets import _PLACEHOLDER + + +@pytest.fixture +def base_url() -> str: + return "https://test_base_http_api" + + +@pytest.fixture +def mock_server_api(base_url: str) -> Iterator[respx.MockRouter]: + with respx.mock( + base_url=base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get("/").respond(status.HTTP_200_OK) + + mock.post(path__startswith="/foo").respond(status.HTTP_200_OK) + mock.get(path__startswith="/foo").respond(status.HTTP_200_OK) + mock.delete(path__startswith="/foo").respond(status.HTTP_200_OK) + + yield mock + + +@pytest.fixture +async def client( + mock_server_api: respx.MockRouter, base_url: str +) -> AsyncIterator[AsyncClient]: + async with httpx.AsyncClient(base_url=base_url) as client: + + yield client + + +async def test_to_curl_command(client: AsyncClient): + + # with POST + response = await client.post( + "/foo", + params={"x": "3"}, + json={"y": 12}, + headers={"x-secret": "this should not display"}, + ) + assert response.status_code == 200 + + cmd_short = to_curl_command(response.request) + + assert ( + cmd_short + == f'curl -X POST -H "host: test_base_http_api" -H "accept: */*" -H "accept-encoding: gzip, deflate" -H "connection: keep-alive" -H "user-agent: python-httpx/{httpx.__version__}" -H "x-secret: {_PLACEHOLDER}" -H "content-length: 8" -H "content-type: application/json" -d \'{{"y":12}}\' https://test_base_http_api/foo?x=3' + ) + + cmd_long = to_curl_command(response.request, use_short_options=False) + assert cmd_long == cmd_short.replace("-X", "--request",).replace( + "-H", + "--header", + ).replace( + "-d", + "--data", + ) + + # with GET + response = await client.get("/foo", params={"x": "3"}) + cmd_multiline = to_curl_command(response.request, multiline=True) + + assert ( + cmd_multiline + == textwrap.dedent( + f"""\ + curl \\ + -X GET \\ + -H "host: test_base_http_api" \\ + -H "accept: */*" \\ + -H "accept-encoding: gzip, deflate" \\ + -H "connection: keep-alive" \\ + -H "user-agent: python-httpx/{httpx.__version__}" \\ + https://test_base_http_api/foo?x=3 + """ + ).strip() + ) + + # with DELETE + response = await client.delete("/foo", params={"x": "3"}) + cmd = to_curl_command(response.request) + + assert "DELETE" in cmd + assert " -d " not in cmd + + +async def test_to_httpx_command(client: AsyncClient): + response = await client.post( + "/foo", + params={"x": "3"}, + json={"y": 12}, + headers={"x-secret": "this should not display"}, + ) + + cmd_short = to_httpx_command(response.request, multiline=False) + + print(cmd_short) + assert ( + cmd_short + == f'httpx -m POST -c \'{{"y":12}}\' -h "host" "test_base_http_api" -h "accept" "*/*" -h "accept-encoding" "gzip, deflate" -h "connection" "keep-alive" -h "user-agent" "python-httpx/{httpx.__version__}" -h "x-secret" "{_PLACEHOLDER}" -h "content-length" "8" -h "content-type" "application/json" https://test_base_http_api/foo?x=3' + ) diff --git a/packages/service-library/tests/fastapi/test_lifespan_utils.py b/packages/service-library/tests/fastapi/test_lifespan_utils.py new file mode 100644 index 00000000000..9f8baabf430 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_lifespan_utils.py @@ -0,0 +1,291 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import logging +from collections.abc import AsyncIterator +from typing import Any + +import pytest +from asgi_lifespan import LifespanManager as ASGILifespanManager +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from pytest_mock import MockerFixture +from pytest_simcore.helpers.logging_tools import log_context +from servicelib.fastapi.lifespan_utils import ( + LifespanAlreadyCalledError, + LifespanExpectedCalledError, + LifespanOnShutdownError, + LifespanOnStartupError, + ensure_lifespan_called, + mark_lifespace_called, +) + + +async def test_multiple_lifespan_managers(capsys: pytest.CaptureFixture): + async def database_lifespan(app: FastAPI) -> AsyncIterator[State]: + _ = app + print("setup DB") + yield {} + print("shutdown DB") + + async def cache_lifespan(app: FastAPI) -> AsyncIterator[State]: + _ = app + print("setup CACHE") + yield {} + print("shutdown CACHE") + + lifespan_manager = LifespanManager() + lifespan_manager.add(database_lifespan) + lifespan_manager.add(cache_lifespan) + + app = FastAPI(lifespan=lifespan_manager) + + capsys.readouterr() + + async with ASGILifespanManager(app): + messages = capsys.readouterr().out + + assert "setup DB" in messages + assert "setup CACHE" in messages + assert "shutdown DB" not in messages + assert "shutdown CACHE" not in messages + + messages = capsys.readouterr().out + + assert "setup DB" not in messages + assert "setup CACHE" not in messages + assert "shutdown DB" in messages + assert "shutdown CACHE" in messages + + +@pytest.fixture +def postgres_lifespan() -> LifespanManager: + lifespan_manager = LifespanManager() + + @lifespan_manager.add + async def _setup_postgres_sync_engine(_) -> AsyncIterator[State]: + with log_context(logging.INFO, "postgres_sync_engine"): + # pass state to children + yield {"postgres": {"engine": "Some Engine"}} + + @lifespan_manager.add + async def _setup_postgres_async_engine(_, state: State) -> AsyncIterator[State]: + with log_context(logging.INFO, "postgres_async_engine"): + # pass state to children + + current = state["postgres"] + yield {"postgres": {"aengine": "Some Async Engine", **current}} + + return lifespan_manager + + +@pytest.fixture +def rabbitmq_lifespan() -> LifespanManager: + lifespan_manager = LifespanManager() + + @lifespan_manager.add + async def _setup_rabbitmq(app: FastAPI) -> AsyncIterator[State]: + with log_context(logging.INFO, "rabbitmq"): + + with pytest.raises(AttributeError, match="rabbitmq_rpc_server"): + _ = app.state.rabbitmq_rpc_server + + # pass state to children + yield {"rabbitmq_rpc_server": "Some RabbitMQ RPC Server"} + + return lifespan_manager + + +async def test_app_lifespan_composition( + postgres_lifespan: LifespanManager, rabbitmq_lifespan: LifespanManager +): + # The app has its own database and rpc-server to initialize + # this is how you connect the lifespans pre-defined in servicelib + + @postgres_lifespan.add + async def database_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + + with log_context(logging.INFO, "app database"): + assert state["postgres"] == { + "engine": "Some Engine", + "aengine": "Some Async Engine", + } + + with pytest.raises(AttributeError, match="database_engine"): + _ = app.state.database_engine + + app.state.database_engine = state["postgres"]["engine"] + + yield {} # no update + + # tear-down stage + assert app.state.database_engine + + @rabbitmq_lifespan.add + async def rpc_service_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + with log_context(logging.INFO, "app rpc-server"): + assert "rabbitmq_rpc_server" in state + + app.state.rpc_server = state["rabbitmq_rpc_server"] + + yield {} + + # Composes lifepans + app_lifespan = LifespanManager() + app_lifespan.include(postgres_lifespan) + app_lifespan.include(rabbitmq_lifespan) + + app = FastAPI(lifespan=app_lifespan) + async with ASGILifespanManager(app) as asgi_manager: + + # asgi_manage state + assert asgi_manager._state == { # noqa: SLF001 + "postgres": { + "engine": "Some Engine", + "aengine": "Some Async Engine", + }, + "rabbitmq_rpc_server": "Some RabbitMQ RPC Server", + } + + # app state + assert app.state.database_engine + assert app.state.rpc_server + + # NOTE: these are different states! + assert app.state._state != asgi_manager._state # noqa: SLF001 + + # Logs shows lifespan execution: + # -> postgres_sync_engine starting ... + # -> postgres_async_engine starting ... + # -> app database starting ... + # -> rabbitmq starting ... + # -> app rpc-server starting ... + # <- app rpc-server done (<1ms) + # <- rabbitmq done (<1ms) + # <- app database done (1ms) + # <- postgres_async_engine done (1ms) + # <- postgres_sync_engine done (1ms) + + +@pytest.fixture +def failing_lifespan_manager(mocker: MockerFixture) -> dict[str, Any]: + startup_step = mocker.MagicMock() + shutdown_step = mocker.MagicMock() + handle_error = mocker.MagicMock() + + def raise_error(): + msg = "failing module" + raise RuntimeError(msg) + + async def lifespan_failing_on_startup(app: FastAPI) -> AsyncIterator[State]: + _name = lifespan_failing_on_startup.__name__ + + with log_context(logging.INFO, _name): + try: + raise_error() + startup_step(_name) + except RuntimeError as exc: + handle_error(_name, exc) + raise LifespanOnStartupError(lifespan_name=_name) from exc + yield {} + shutdown_step(_name) + + async def lifespan_failing_on_shutdown(app: FastAPI) -> AsyncIterator[State]: + _name = lifespan_failing_on_shutdown.__name__ + + with log_context(logging.INFO, _name): + startup_step(_name) + yield {} + try: + raise_error() + shutdown_step(_name) + except RuntimeError as exc: + handle_error(_name, exc) + raise LifespanOnShutdownError(lifespan_name=_name) from exc + + return { + "startup_step": startup_step, + "shutdown_step": shutdown_step, + "handle_error": handle_error, + "lifespan_failing_on_startup": lifespan_failing_on_startup, + "lifespan_failing_on_shutdown": lifespan_failing_on_shutdown, + } + + +async def test_app_lifespan_with_error_on_startup( + failing_lifespan_manager: dict[str, Any], +): + app_lifespan = LifespanManager() + app_lifespan.add(failing_lifespan_manager["lifespan_failing_on_startup"]) + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(LifespanOnStartupError) as err_info: + async with ASGILifespanManager(app): + ... + + exception = err_info.value + assert failing_lifespan_manager["handle_error"].called + assert not failing_lifespan_manager["startup_step"].called + assert not failing_lifespan_manager["shutdown_step"].called + assert exception.error_context() == { + "lifespan_name": "lifespan_failing_on_startup", + "message": "Failed during startup of lifespan_failing_on_startup", + "code": "RuntimeError.LifespanError.LifespanOnStartupError", + } + + +async def test_app_lifespan_with_error_on_shutdown( + failing_lifespan_manager: dict[str, Any], +): + app_lifespan = LifespanManager() + app_lifespan.add(failing_lifespan_manager["lifespan_failing_on_shutdown"]) + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(LifespanOnShutdownError) as err_info: + async with ASGILifespanManager(app): + ... + + exception = err_info.value + assert failing_lifespan_manager["handle_error"].called + assert failing_lifespan_manager["startup_step"].called + assert not failing_lifespan_manager["shutdown_step"].called + assert exception.error_context() == { + "lifespan_name": "lifespan_failing_on_shutdown", + "message": "Failed during shutdown of lifespan_failing_on_shutdown", + "code": "RuntimeError.LifespanError.LifespanOnShutdownError", + } + + +async def test_lifespan_called_more_than_once(is_pdb_enabled: bool): + app_lifespan = LifespanManager() + + @app_lifespan.add + async def _one(_, state: State) -> AsyncIterator[State]: + called_state = mark_lifespace_called(state, "test_lifespan_one") + yield {"other": 0, **called_state} + + @app_lifespan.add + async def _two(_, state: State) -> AsyncIterator[State]: + ensure_lifespan_called(state, "test_lifespan_one") + + with pytest.raises(LifespanExpectedCalledError): + ensure_lifespan_called(state, "test_lifespan_three") + + called_state = mark_lifespace_called(state, "test_lifespan_two") + yield {"something": 0, **called_state} + + app_lifespan.add(_one) # added "by mistake" + + with pytest.raises(LifespanAlreadyCalledError) as err_info: + async with ASGILifespanManager( + FastAPI(lifespan=app_lifespan), + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + ... + + assert err_info.value.lifespan_name == "test_lifespan_one" diff --git a/packages/service-library/tests/fastapi/test_openapi.py b/packages/service-library/tests/fastapi/test_openapi.py index f8b5f5e1163..7df0ab63a9f 100644 --- a/packages/service-library/tests/fastapi/test_openapi.py +++ b/packages/service-library/tests/fastapi/test_openapi.py @@ -1,36 +1,54 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + import pytest import starlette.routing from fastapi.applications import FastAPI from fastapi.routing import APIRouter -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError +from openapi_spec_validator.exceptions import ( + OpenAPISpecValidatorError, # pylint: disable=no-name-in-module +) +from openapi_spec_validator.shortcuts import ( + get_validator_cls, # pylint: disable=no-name-in-module +) from servicelib.fastapi.openapi import ( override_fastapi_openapi_method, - redefine_operation_id_in_router, + set_operation_id_as_handler_function_name, ) def test_naming_operation_id(app: FastAPI): - redefine_operation_id_in_router(app.router, __name__) + set_operation_id_as_handler_function_name(app.router) for route in app.router.routes: if isinstance(route, APIRouter): - assert route.operation_id.startswith(__name__) + assert route.operation_id + assert "handler" not in route.operation_id else: # e.g. /docs etc assert isinstance(route, starlette.routing.Route) +@pytest.mark.xfail( + reason="fastapi unresolved issue. Waiting for review of new OAS update by PC" +) def test_exclusive_min_openapi_issue(app: FastAPI): # Tests patched issues is still unresolved https://github.com/tiangolo/fastapi/issues/240 # When this test fails, remove patch - with pytest.raises(OpenAPIValidationError): - validate_spec(app.openapi()) + # NOTE: With the latest update of openapi_spec_validator, now passes validation 3.1 but + # does not seem resolved. It was moved to https://github.com/tiangolo/fastapi/discussions/9140 + with pytest.raises(OpenAPISpecValidatorError): + specs = app.openapi() + openapi_validator_cls = get_validator_cls(specs) + openapi_validator_cls(specs) def test_overriding_openapi_method(app: FastAPI): assert not hasattr(app, "_original_openapi") - assert app.openapi.__doc__ is None + # assert app.openapi.__doc__ is None # PC why was this set to check that it is none? it's coming from the base fastapi applicaiton and now they provide some docs override_fastapi_openapi_method(app) @@ -39,10 +57,14 @@ def test_overriding_openapi_method(app: FastAPI): # override patches should now work openapi = app.openapi() - assert openapi and isinstance(openapi, dict) + assert openapi + assert isinstance(openapi, dict) - validate_spec(openapi) + openapi_validator_cls = get_validator_cls(openapi) + openapi_validator_cls(openapi) + # NOTE: https://github.com/tiangolo/fastapi/issues/240 now passes validation 3.1 but + # does not seem resolved. It was moved to https://github.com/tiangolo/fastapi/discussions/9140 params = openapi["paths"]["/data"]["get"]["parameters"] assert params == [ { diff --git a/packages/service-library/tests/fastapi/test_postgres_lifespan.py b/packages/service-library/tests/fastapi/test_postgres_lifespan.py new file mode 100644 index 00000000000..0c656c37187 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_postgres_lifespan.py @@ -0,0 +1,172 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterator +from typing import Annotated, Any + +import pytest +import servicelib.fastapi.postgres_lifespan +from asgi_lifespan import LifespanManager as ASGILifespanManager +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from pydantic import Field +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.fastapi.postgres_lifespan import ( + PostgresConfigurationError, + PostgresLifespanState, + postgres_database_lifespan, +) +from settings_library.application import BaseApplicationSettings +from settings_library.postgres import PostgresSettings + + +@pytest.fixture +def mock_create_async_engine_and_database_ready(mocker: MockerFixture) -> MockType: + return mocker.patch.object( + servicelib.fastapi.postgres_lifespan, + "create_async_engine_and_database_ready", + return_value=mocker.AsyncMock(), + ) + + +@pytest.fixture +def app_environment(monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, PostgresSettings.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def app_lifespan( + app_environment: EnvVarsDict, + mock_create_async_engine_and_database_ready: MockType, +) -> LifespanManager: + assert app_environment + + class AppSettings(BaseApplicationSettings): + CATALOG_POSTGRES: Annotated[ + PostgresSettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + app.state.settings = AppSettings.create_from_envs() + + yield { + PostgresLifespanState.POSTGRES_SETTINGS: app.state.settings.CATALOG_POSTGRES + } + + async def my_database_setup(app: FastAPI, state: State) -> AsyncIterator[State]: + app.state.my_db_engine = state[PostgresLifespanState.POSTGRES_ASYNC_ENGINE] + + yield {} + + # compose lifespans + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + + # potsgres + app_lifespan.add(postgres_database_lifespan) + app_lifespan.add(my_database_setup) + + return app_lifespan + + +async def test_lifespan_postgres_database_in_an_app( + is_pdb_enabled: bool, + app_environment: EnvVarsDict, + mock_create_async_engine_and_database_ready: MockType, + app_lifespan: LifespanManager, +): + + app = FastAPI(lifespan=app_lifespan) + + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ) as asgi_manager: + # Verify that the async engine was created + mock_create_async_engine_and_database_ready.assert_called_once_with( + app.state.settings.CATALOG_POSTGRES + ) + + # Verify that the async engine is in the lifespan manager state + assert ( + PostgresLifespanState.POSTGRES_ASYNC_ENGINE + in asgi_manager._state # noqa: SLF001 + ) + assert app.state.my_db_engine + assert ( + app.state.my_db_engine + == asgi_manager._state[ # noqa: SLF001 + PostgresLifespanState.POSTGRES_ASYNC_ENGINE + ] + ) + + assert ( + app.state.my_db_engine + == mock_create_async_engine_and_database_ready.return_value + ) + + # Verify that the engine was disposed + async_engine: Any = mock_create_async_engine_and_database_ready.return_value + async_engine.dispose.assert_called_once() + + +async def test_lifespan_postgres_database_dispose_engine_on_failure( + is_pdb_enabled: bool, + app_environment: EnvVarsDict, + mock_create_async_engine_and_database_ready: MockType, + app_lifespan: LifespanManager, +): + expected_msg = "my_faulty_lifespan error" + + def raise_error(): + raise RuntimeError(expected_msg) + + @app_lifespan.add + async def my_faulty_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + assert PostgresLifespanState.POSTGRES_ASYNC_ENGINE in state + raise_error() + yield {} + + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(RuntimeError, match=expected_msg): + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + ... + + # Verify that the engine was disposed even if error happend + async_engine: Any = mock_create_async_engine_and_database_ready.return_value + async_engine.dispose.assert_called_once() + + +async def test_setup_postgres_database_with_empty_pg_settings( + is_pdb_enabled: bool, +): + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + yield {PostgresLifespanState.POSTGRES_SETTINGS: None} + + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + app_lifespan.add(postgres_database_lifespan) + + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(PostgresConfigurationError, match="postgres cannot be disabled"): + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + ... diff --git a/packages/service-library/tests/fastapi/test_prometheus_middleware.py b/packages/service-library/tests/fastapi/test_prometheus_middleware.py new file mode 100644 index 00000000000..9d67cc8ee0d --- /dev/null +++ b/packages/service-library/tests/fastapi/test_prometheus_middleware.py @@ -0,0 +1,31 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +from collections.abc import AsyncIterable + +import pytest +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from httpx import AsyncClient +from prometheus_client.openmetrics.exposition import CONTENT_TYPE_LATEST +from servicelib.fastapi.monitoring import setup_prometheus_instrumentation + + +@pytest.fixture +async def app(app: FastAPI) -> AsyncIterable[FastAPI]: + """ + Fixture that sets up the Prometheus middleware in the FastAPI app. + """ + setup_prometheus_instrumentation(app) + async with LifespanManager(app): + yield app + + +async def test_metrics_endpoint(client: AsyncClient, app: FastAPI): + """ + Test that the /metrics endpoint is available and returns Prometheus metrics. + """ + response = await client.get("/metrics") + assert response.status_code == 200 + assert response.headers["Content-Type"] == CONTENT_TYPE_LATEST + assert "# HELP" in response.text + assert "# TYPE" in response.text diff --git a/packages/service-library/tests/fastapi/test_rabbitmq.py b/packages/service-library/tests/fastapi/test_rabbitmq.py new file mode 100644 index 00000000000..b41a94097f2 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_rabbitmq.py @@ -0,0 +1,137 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from typing import AsyncIterable, Callable + +import pytest +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from models_library.rabbitmq_messages import LoggerRabbitMessage, RabbitMessageBase +from pydantic import ValidationError +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.fastapi.rabbitmq import get_rabbitmq_client, setup_rabbit +from servicelib.rabbitmq import BIND_TO_ALL_TOPICS, RabbitMQClient +from settings_library.rabbit import RabbitSettings +from tenacity import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "stop": stop_after_delay(30), + "wait": wait_fixed(0.1), +} + +# Selection of core and tool services started in this swarm fixture (integration) +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def rabbit_log_message(faker: Faker) -> LoggerRabbitMessage: + return LoggerRabbitMessage( + user_id=faker.pyint(min_value=1), + project_id=faker.uuid4(), + node_id=faker.uuid4(), + messages=faker.pylist(allowed_types=(str,)), + ) + + +@pytest.fixture(params=["rabbit_log_message"]) +def rabbit_message( + request: pytest.FixtureRequest, + rabbit_log_message: LoggerRabbitMessage, +) -> RabbitMessageBase: + return { + "rabbit_log_message": rabbit_log_message, + }[request.param] + + +@pytest.fixture +def disabled_rabbitmq( + rabbit_env_vars_dict: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +): + for key in rabbit_env_vars_dict: + rabbit_env_vars_dict[key] = "null" + setenvs_from_dict(monkeypatch, rabbit_env_vars_dict) + + +@pytest.fixture +def enabled_rabbitmq( + rabbit_env_vars_dict: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> RabbitSettings: + setenvs_from_dict(monkeypatch, rabbit_env_vars_dict) + return RabbitSettings.create_from_envs() + + +@pytest.fixture +async def initialized_app(app: FastAPI, is_pdb_enabled: bool) -> AsyncIterable[FastAPI]: + rabbit_settings: RabbitSettings | None = None + try: + rabbit_settings = RabbitSettings.create_from_envs() + setup_rabbit(app=app, settings=rabbit_settings, name="my_rabbitmq_client") + except ValidationError: + pass + async with LifespanManager( + app=app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + yield app + + +def test_rabbitmq_does_not_initialize_if_deactivated( + disabled_rabbitmq: None, + initialized_app: FastAPI, +): + with pytest.raises(AttributeError): + get_rabbitmq_client(initialized_app) + + +def test_rabbitmq_initializes( + enabled_rabbitmq: RabbitSettings, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "rabbitmq_client") + assert initialized_app.state.rabbitmq_client is not None + assert get_rabbitmq_client(initialized_app) == initialized_app.state.rabbitmq_client + + +async def test_post_message( + enabled_rabbitmq: RabbitSettings, + initialized_app: FastAPI, + rabbit_message: RabbitMessageBase, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocker: MockerFixture, +): + mocked_message_handler = mocker.AsyncMock(return_value=True) + consumer_rmq = create_rabbitmq_client("pytest_consumer") + await consumer_rmq.subscribe( + rabbit_message.channel_name, + mocked_message_handler, + topics=[BIND_TO_ALL_TOPICS] if rabbit_message.routing_key() else None, + ) + + producer_rmq = get_rabbitmq_client(initialized_app) + assert producer_rmq is not None + await producer_rmq.publish(rabbit_message.channel_name, rabbit_message) + + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + print( + f"--> checking for message in rabbit exchange {rabbit_message.channel_name}, {attempt.retry_state.retry_object.statistics}" + ) + mocked_message_handler.assert_called_once_with( + rabbit_message.model_dump_json().encode() + ) + print("... message received") diff --git a/packages/service-library/tests/fastapi/test_rabbitmq_lifespan.py b/packages/service-library/tests/fastapi/test_rabbitmq_lifespan.py new file mode 100644 index 00000000000..f143070da9e --- /dev/null +++ b/packages/service-library/tests/fastapi/test_rabbitmq_lifespan.py @@ -0,0 +1,161 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterator + +import pytest +import servicelib.fastapi.rabbitmq_lifespan +import servicelib.rabbitmq +from asgi_lifespan import LifespanManager as ASGILifespanManager +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from pydantic import Field +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.fastapi.rabbitmq_lifespan import ( + RabbitMQConfigurationError, + RabbitMQLifespanState, + rabbitmq_connectivity_lifespan, +) +from servicelib.rabbitmq import rabbitmq_rpc_client_context +from settings_library.application import BaseApplicationSettings +from settings_library.rabbit import RabbitSettings + + +@pytest.fixture +def mock_rabbitmq_connection(mocker: MockerFixture) -> MockType: + return mocker.patch.object( + servicelib.fastapi.rabbitmq_lifespan, + "wait_till_rabbitmq_responsive", + return_value=mocker.AsyncMock(), + ) + + +@pytest.fixture +def mock_rabbitmq_rpc_client_class(mocker: MockerFixture) -> MockType: + mock_rpc_client_instance = mocker.AsyncMock() + mocker.patch.object( + servicelib.rabbitmq._client_rpc.RabbitMQRPCClient, # noqa: SLF001 + "create", + return_value=mock_rpc_client_instance, + ) + mock_rpc_client_instance.close = mocker.AsyncMock() + return mock_rpc_client_instance + + +@pytest.fixture +def app_environment(monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, RabbitSettings.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def app_lifespan( + app_environment: EnvVarsDict, + mock_rabbitmq_connection: MockType, + mock_rabbitmq_rpc_client_class: MockType, +) -> LifespanManager: + assert app_environment + + class AppSettings(BaseApplicationSettings): + RABBITMQ: RabbitSettings = Field( + ..., json_schema_extra={"auto_default_from_env": True} + ) + + # setup settings + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + app.state.settings = AppSettings.create_from_envs() + + yield RabbitMQLifespanState( + RABBIT_SETTINGS=app.state.settings.RABBITMQ, + ).model_dump() + + # setup rpc-server using rabbitmq_rpc_client_context (yes, a "rpc_server" is built with an RabbitMQRpcClient) + async def my_app_rpc_server(app: FastAPI, state: State) -> AsyncIterator[State]: + assert "RABBIT_CONNECTIVITY_LIFESPAN_NAME" in state + + async with rabbitmq_rpc_client_context( + "rpc_server", app.state.settings.RABBITMQ + ) as rpc_server: + app.state.rpc_server = rpc_server + yield {} + + # setup rpc-client using rabbitmq_rpc_client_context + async def my_app_rpc_client(app: FastAPI, state: State) -> AsyncIterator[State]: + assert "RABBIT_CONNECTIVITY_LIFESPAN_NAME" in state + + async with rabbitmq_rpc_client_context( + "rpc_client", app.state.settings.RABBITMQ + ) as rpc_client: + app.state.rpc_client = rpc_client + yield {} + + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + app_lifespan.add(rabbitmq_connectivity_lifespan) + app_lifespan.add(my_app_rpc_server) + app_lifespan.add(my_app_rpc_client) + + assert not mock_rabbitmq_connection.called + assert not mock_rabbitmq_rpc_client_class.called + + return app_lifespan + + +async def test_lifespan_rabbitmq_in_an_app( + is_pdb_enabled: bool, + app_environment: EnvVarsDict, + mock_rabbitmq_connection: MockType, + mock_rabbitmq_rpc_client_class: MockType, + app_lifespan: LifespanManager, +): + app = FastAPI(lifespan=app_lifespan) + + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + # Verify that RabbitMQ responsiveness was checked + mock_rabbitmq_connection.assert_called_once_with( + app.state.settings.RABBITMQ.dsn + ) + + # Verify that RabbitMQ settings are in the lifespan manager state + assert app.state.settings.RABBITMQ + assert app.state.rpc_server + assert app.state.rpc_client + + # No explicit shutdown logic for RabbitMQ in this case + assert mock_rabbitmq_rpc_client_class.close.called + + +async def test_lifespan_rabbitmq_with_invalid_settings( + is_pdb_enabled: bool, +): + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + yield {"RABBIT_SETTINGS": None} + + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + app_lifespan.add(rabbitmq_connectivity_lifespan) + + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(RabbitMQConfigurationError, match="Invalid RabbitMQ") as excinfo: + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + ... + + exception = excinfo.value + assert isinstance(exception, RabbitMQConfigurationError) + assert exception.validation_error + assert exception.state["RABBIT_SETTINGS"] is None diff --git a/packages/service-library/tests/fastapi/test_redis_lifespan.py b/packages/service-library/tests/fastapi/test_redis_lifespan.py new file mode 100644 index 00000000000..8a30055c393 --- /dev/null +++ b/packages/service-library/tests/fastapi/test_redis_lifespan.py @@ -0,0 +1,130 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterator +from typing import Annotated, Any + +import pytest +import servicelib.fastapi.redis_lifespan +from asgi_lifespan import LifespanManager as ASGILifespanManager +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from pydantic import Field +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.fastapi.redis_lifespan import ( + RedisConfigurationError, + RedisLifespanState, + redis_client_sdk_lifespan, +) +from settings_library.application import BaseApplicationSettings +from settings_library.redis import RedisDatabase, RedisSettings + + +@pytest.fixture +def mock_redis_client_sdk(mocker: MockerFixture) -> MockType: + return mocker.patch.object( + servicelib.fastapi.redis_lifespan, + "RedisClientSDK", + return_value=mocker.AsyncMock(), + ) + + +@pytest.fixture +def app_environment(monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, RedisSettings.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def app_lifespan( + app_environment: EnvVarsDict, + mock_redis_client_sdk: MockType, +) -> LifespanManager: + assert app_environment + + class AppSettings(BaseApplicationSettings): + CATALOG_REDIS: Annotated[ + RedisSettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + app.state.settings = AppSettings.create_from_envs() + + yield RedisLifespanState( + REDIS_SETTINGS=app.state.settings.CATALOG_REDIS, + REDIS_CLIENT_NAME="test_client", + REDIS_CLIENT_DB=RedisDatabase.LOCKS, + ).model_dump() + + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + app_lifespan.add(redis_client_sdk_lifespan) + + assert not mock_redis_client_sdk.called + + return app_lifespan + + +async def test_lifespan_redis_database_in_an_app( + is_pdb_enabled: bool, + app_environment: EnvVarsDict, + mock_redis_client_sdk: MockType, + app_lifespan: LifespanManager, +): + app = FastAPI(lifespan=app_lifespan) + + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ) as asgi_manager: + # Verify that the Redis client SDK was created + mock_redis_client_sdk.assert_called_once_with( + app.state.settings.CATALOG_REDIS.build_redis_dsn(RedisDatabase.LOCKS), + client_name="test_client", + ) + + # Verify that the Redis client SDK is in the lifespan manager state + assert "REDIS_CLIENT_SDK" in asgi_manager._state # noqa: SLF001 + assert app.state.settings.CATALOG_REDIS + assert ( + asgi_manager._state["REDIS_CLIENT_SDK"] # noqa: SLF001 + == mock_redis_client_sdk.return_value + ) + + # Verify that the Redis client SDK was shut down + redis_client: Any = mock_redis_client_sdk.return_value + redis_client.shutdown.assert_called_once() + + +async def test_lifespan_redis_database_with_invalid_settings( + is_pdb_enabled: bool, +): + async def my_app_settings(app: FastAPI) -> AsyncIterator[State]: + yield {"REDIS_SETTINGS": None} + + app_lifespan = LifespanManager() + app_lifespan.add(my_app_settings) + app_lifespan.add(redis_client_sdk_lifespan) + + app = FastAPI(lifespan=app_lifespan) + + with pytest.raises(RedisConfigurationError, match="Invalid redis") as excinfo: + async with ASGILifespanManager( + app, + startup_timeout=None if is_pdb_enabled else 10, + shutdown_timeout=None if is_pdb_enabled else 10, + ): + ... + + exception = excinfo.value + assert isinstance(exception, RedisConfigurationError) + assert exception.validation_error + assert exception.state["REDIS_SETTINGS"] is None diff --git a/packages/service-library/tests/fastapi/test_request_decorators.py b/packages/service-library/tests/fastapi/test_request_decorators.py index 312684437e7..18f6267cf33 100644 --- a/packages/service-library/tests/fastapi/test_request_decorators.py +++ b/packages/service-library/tests/fastapi/test_request_decorators.py @@ -6,9 +6,10 @@ import subprocess import sys import time +from collections.abc import Callable, Iterator from contextlib import contextmanager from pathlib import Path -from typing import Callable, Iterator, NamedTuple +from typing import NamedTuple import pytest import requests diff --git a/packages/service-library/tests/fastapi/test_tracing.py b/packages/service-library/tests/fastapi/test_tracing.py new file mode 100644 index 00000000000..8e58dfd75dd --- /dev/null +++ b/packages/service-library/tests/fastapi/test_tracing.py @@ -0,0 +1,169 @@ +# pylint: disable=all + + +import importlib +import random +import string +from collections.abc import Callable, Iterator +from typing import Any + +import pip +import pytest +from fastapi import FastAPI +from pydantic import ValidationError +from servicelib.fastapi.tracing import ( + get_tracing_instrumentation_lifespan, +) +from settings_library.tracing import TracingSettings + + +@pytest.fixture +def mocked_app() -> FastAPI: + return FastAPI(title="opentelemetry example") + + +@pytest.fixture +def tracing_settings_in(request: pytest.FixtureRequest) -> dict[str, Any]: + return request.param + + +@pytest.fixture() +def set_and_clean_settings_env_vars( + monkeypatch: pytest.MonkeyPatch, tracing_settings_in: Callable[[], dict[str, Any]] +) -> None: + endpoint_mocked = False + if tracing_settings_in[0]: + endpoint_mocked = True + monkeypatch.setenv( + "TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT", f"{tracing_settings_in[0]}" + ) + port_mocked = False + if tracing_settings_in[1]: + port_mocked = True + monkeypatch.setenv( + "TRACING_OPENTELEMETRY_COLLECTOR_PORT", f"{tracing_settings_in[1]}" + ) + yield + if endpoint_mocked: + monkeypatch.delenv("TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT") + if port_mocked: + monkeypatch.delenv("TRACING_OPENTELEMETRY_COLLECTOR_PORT") + + +@pytest.mark.parametrize( + "tracing_settings_in", + [ + ("http://opentelemetry-collector", 4318), + ("http://opentelemetry-collector", "4318"), + ], + indirect=True, +) +async def test_valid_tracing_settings( + mocked_app: FastAPI, + set_and_clean_settings_env_vars: Callable[[], None], + tracing_settings_in: Callable[[], dict[str, Any]], + uninstrument_opentelemetry: Iterator[None], +): + tracing_settings = TracingSettings() + async for _ in get_tracing_instrumentation_lifespan( + tracing_settings=tracing_settings, + service_name="Mock-Openetlemetry-Pytest", + )(app=mocked_app): + async for _ in get_tracing_instrumentation_lifespan( + tracing_settings=tracing_settings, + service_name="Mock-Openetlemetry-Pytest", + )(app=mocked_app): + pass + + +@pytest.mark.parametrize( + "tracing_settings_in", + [ + ("http://opentelemetry-collector", 80), + ("http://opentelemetry-collector", 1238712936), + ("opentelemetry-collector", 4318), + ("httsdasp://ot@##el-collector", 4318), + (" !@#$%^&*()[]{};:,<>?\\|`~+=/'\"", 4318), + # The following exceeds max DNS name length + ( + "".join(random.choice(string.ascii_letters) for _ in range(300)), + "1238712936", + ), # noqa: S311 + ], + indirect=True, +) +async def test_invalid_tracing_settings( + mocked_app: FastAPI, + set_and_clean_settings_env_vars: Callable[[], None], + tracing_settings_in: Callable[[], dict[str, Any]], + uninstrument_opentelemetry: Iterator[None], +): + app = mocked_app + with pytest.raises((BaseException, ValidationError, TypeError)): # noqa: PT012 + tracing_settings = TracingSettings() + async for _ in get_tracing_instrumentation_lifespan( + tracing_settings=tracing_settings, + service_name="Mock-Openetlemetry-Pytest", + )(app=app): + pass + + +def install_package(package): + pip.main(["install", package]) + + +def uninstall_package(package): + pip.main(["uninstall", "-y", package]) + + +@pytest.fixture(scope="function") +def manage_package(request): + package, importname = request.param + install_package(package) + yield importname + uninstall_package(package) + + +@pytest.mark.skip( + reason="this test installs always the latest version of the package which creates conflicts." +) +@pytest.mark.parametrize( + "tracing_settings_in, manage_package", + [ + ( + ("http://opentelemetry-collector", 4318), + ( + "opentelemetry-instrumentation-botocore", + "opentelemetry.instrumentation.botocore", + ), + ), + ( + ("http://opentelemetry-collector", "4318"), + ( + "opentelemetry-instrumentation-aiopg", + "opentelemetry.instrumentation.aiopg", + ), + ), + ], + indirect=True, +) +async def test_tracing_setup_package_detection( + mocked_app: FastAPI, + set_and_clean_settings_env_vars: Callable[[], None], + tracing_settings_in: Callable[[], dict[str, Any]], + uninstrument_opentelemetry: Iterator[None], + manage_package, +): + package_name = manage_package + importlib.import_module(package_name) + tracing_settings = TracingSettings() + async for _ in get_tracing_instrumentation_lifespan( + tracing_settings=tracing_settings, + service_name="Mock-Openetlemetry-Pytest", + )(app=mocked_app): + # idempotency check + async for _ in get_tracing_instrumentation_lifespan( + tracing_settings=tracing_settings, + service_name="Mock-Openetlemetry-Pytest", + )(app=mocked_app): + pass diff --git a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py index 95fa95b7607..0cd0a1676cf 100644 --- a/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py +++ b/packages/service-library/tests/long_running_tasks/test_long_running_tasks_task.py @@ -6,8 +6,9 @@ import asyncio import urllib.parse +from collections.abc import AsyncIterator from datetime import datetime -from typing import AsyncIterator, Final +from typing import Any, Final import pytest from faker import Faker @@ -17,14 +18,25 @@ TaskNotCompletedError, TaskNotFoundError, ) -from servicelib.long_running_tasks._models import TaskProgress, TaskResult, TaskStatus +from servicelib.long_running_tasks._models import ( + ProgressPercent, + TaskProgress, + TaskResult, + TaskStatus, +) from servicelib.long_running_tasks._task import TasksManager, start_task -from tenacity._asyncio import AsyncRetrying +from tenacity import TryAgain +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -# UTILS +_RETRY_PARAMS: dict[str, Any] = { + "reraise": True, + "wait": wait_fixed(0.1), + "stop": stop_after_delay(60), + "retry": retry_if_exception_type((AssertionError, TryAgain)), +} async def a_background_task( @@ -35,9 +47,10 @@ async def a_background_task( """sleeps and raises an error or returns 42""" for i in range(total_sleep): await asyncio.sleep(1) - task_progress.update(percent=float((i + 1) / total_sleep)) + task_progress.update(percent=ProgressPercent((i + 1) / total_sleep)) if raise_when_finished: - raise RuntimeError("raised this error as instructed") + msg = "raised this error as instructed" + raise RuntimeError(msg) return 42 @@ -49,7 +62,8 @@ async def fast_background_task(task_progress: TaskProgress) -> int: async def failing_background_task(task_progress: TaskProgress): """this task does nothing and returns a constant""" - raise RuntimeError("failing asap") + msg = "failing asap" + raise RuntimeError(msg) TEST_CHECK_STALE_INTERVAL_S: Final[float] = 1 @@ -65,32 +79,31 @@ async def tasks_manager() -> AsyncIterator[TasksManager]: await tasks_manager.close() -async def test_unchecked_task_is_auto_removed(tasks_manager: TasksManager): +@pytest.mark.parametrize("check_task_presence_before", [True, False]) +async def test_task_is_auto_removed( + tasks_manager: TasksManager, check_task_presence_before: bool +): task_id = start_task( tasks_manager, a_background_task, raise_when_finished=False, total_sleep=10 * TEST_CHECK_STALE_INTERVAL_S, ) - await asyncio.sleep(2 * TEST_CHECK_STALE_INTERVAL_S + 1) - with pytest.raises(TaskNotFoundError): - tasks_manager.get_task_status(task_id, with_task_context=None) - with pytest.raises(TaskNotFoundError): - tasks_manager.get_task_result(task_id, with_task_context=None) - with pytest.raises(TaskNotFoundError): - tasks_manager.get_task_result_old(task_id) + if check_task_presence_before: + # immediately after starting the task is still there + task_status = tasks_manager.get_task_status(task_id, with_task_context=None) + assert task_status + + # wait for task to be automatically removed + # meaning no calls via the manager methods are received + async for attempt in AsyncRetrying(**_RETRY_PARAMS): + with attempt: + for tasks in tasks_manager._tasks_groups.values(): # noqa: SLF001 + if task_id in tasks: + msg = "wait till no element is found any longer" + raise TryAgain(msg) -async def test_checked_once_task_is_auto_removed(tasks_manager: TasksManager): - task_id = start_task( - tasks_manager, - a_background_task, - raise_when_finished=False, - total_sleep=10 * TEST_CHECK_STALE_INTERVAL_S, - ) - # check once (different branch in code) - tasks_manager.get_task_status(task_id, with_task_context=None) - await asyncio.sleep(2 * TEST_CHECK_STALE_INTERVAL_S + 1) with pytest.raises(TaskNotFoundError): tasks_manager.get_task_status(task_id, with_task_context=None) with pytest.raises(TaskNotFoundError): @@ -106,12 +119,7 @@ async def test_checked_task_is_not_auto_removed(tasks_manager: TasksManager): raise_when_finished=False, total_sleep=5 * TEST_CHECK_STALE_INTERVAL_S, ) - async for attempt in AsyncRetrying( - reraise=True, - wait=wait_fixed(TEST_CHECK_STALE_INTERVAL_S / 10.0), - stop=stop_after_delay(60), - retry=retry_if_exception_type(AssertionError), - ): + async for attempt in AsyncRetrying(**_RETRY_PARAMS): with attempt: status = tasks_manager.get_task_status(task_id, with_task_context=None) assert status.done, f"task {task_id} not complete" @@ -172,8 +180,10 @@ async def not_unique_task(task_progress: TaskProgress): start_task(tasks_manager=tasks_manager, task=not_unique_task) -def test_get_task_id(): - assert TasksManager._create_task_id("") != TasksManager._create_task_id("") +def test_get_task_id(faker): + obj1 = TasksManager.create_task_id(faker.word()) # noqa: SLF001 + obj2 = TasksManager.create_task_id(faker.word()) # noqa: SLF001 + assert obj1 != obj2 async def test_get_status(tasks_manager: TasksManager): @@ -220,12 +230,7 @@ async def test_get_result_missing(tasks_manager: TasksManager): async def test_get_result_finished_with_error(tasks_manager: TasksManager): task_id = start_task(tasks_manager=tasks_manager, task=failing_background_task) # wait for result - async for attempt in AsyncRetrying( - reraise=True, - wait=wait_fixed(0.1), - stop=stop_after_delay(60), - retry=retry_if_exception_type(AssertionError), - ): + async for attempt in AsyncRetrying(**_RETRY_PARAMS): with attempt: assert tasks_manager.get_task_status(task_id, with_task_context=None).done @@ -236,12 +241,7 @@ async def test_get_result_finished_with_error(tasks_manager: TasksManager): async def test_get_result_old_finished_with_error(tasks_manager: TasksManager): task_id = start_task(tasks_manager=tasks_manager, task=failing_background_task) # wait for result - async for attempt in AsyncRetrying( - reraise=True, - wait=wait_fixed(0.1), - stop=stop_after_delay(60), - retry=retry_if_exception_type(AssertionError), - ): + async for attempt in AsyncRetrying(**_RETRY_PARAMS): with attempt: assert tasks_manager.get_task_status(task_id, with_task_context=None).done @@ -249,7 +249,7 @@ async def test_get_result_old_finished_with_error(tasks_manager: TasksManager): assert task_result.result is None assert task_result.error is not None assert task_result.error.startswith(f"Task {task_id} finished with exception:") - assert 'raise RuntimeError("failing asap")' in task_result.error + assert "failing asap" in task_result.error async def test_get_result_task_was_cancelled_multiple_times( @@ -365,7 +365,7 @@ async def test_list_tasks(tasks_manager: TasksManager): NUM_TASKS = 10 task_ids = [] for _ in range(NUM_TASKS): - task_ids.append( + task_ids.append( # noqa: PERF401 start_task( tasks_manager=tasks_manager, task=a_background_task, diff --git a/packages/service-library/tests/rabbitmq/conftest.py b/packages/service-library/tests/rabbitmq/conftest.py new file mode 100644 index 00000000000..e107d848daa --- /dev/null +++ b/packages/service-library/tests/rabbitmq/conftest.py @@ -0,0 +1,80 @@ +from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine +from typing import cast + +import aiodocker +import arrow +import pytest +from faker import Faker +from models_library.rabbitmq_basic_types import RPCNamespace +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + + +@pytest.fixture +async def rpc_client( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("pytest_rpc_client") + + +@pytest.fixture +async def rpc_server( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("pytest_rpc_server") + + +@pytest.fixture +def namespace() -> RPCNamespace: + return RPCNamespace.from_entries({f"test{i}": f"test{i}" for i in range(8)}) + + +@pytest.fixture(autouse=True) +async def cleanup_check_rabbitmq_server_has_no_errors( + request: pytest.FixtureRequest, +) -> AsyncIterator[None]: + now = arrow.utcnow() + yield + if "no_cleanup_check_rabbitmq_server_has_no_errors" in request.keywords: + return + print("--> checking for errors/warnings in rabbitmq logs...") + async with aiodocker.Docker() as docker_client: + containers = await docker_client.containers.list(filters=({"name": ["rabbit"]})) + assert len(containers) == 1, "missing rabbit container!" + rabbit_container = containers[0] + + all_logs = await cast( + Coroutine, + rabbit_container.log( + stdout=True, + stderr=True, + follow=False, + since=now.timestamp(), + ), + ) + + warning_logs = [log for log in all_logs if "warning" in log] + error_logs = [log for log in all_logs if "error" in log] + RABBIT_SKIPPED_WARNINGS = [ + "rebuilding indices from scratch", + ] + filtered_warning_logs = [ + log + for log in warning_logs + if all(w not in log for w in RABBIT_SKIPPED_WARNINGS) + ] + assert ( + not filtered_warning_logs + ), f"warning(s) found in rabbitmq logs for {request.function}" + assert not error_logs, f"error(s) found in rabbitmq logs for {request.function}" + print("<-- no error founds in rabbitmq server logs, that's great. good job!") + + +@pytest.fixture +def random_exchange_name() -> Callable[[], str]: + def _creator() -> str: + faker = ( + Faker() + ) # NOTE: this ensure the faker seed is new each time, since we do not clean the exchanges + return f"pytest_fake_exchange_{faker.pystr()}" + + return _creator diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq.py b/packages/service-library/tests/rabbitmq/test_rabbitmq.py new file mode 100644 index 00000000000..d4c6c4b8ebb --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq.py @@ -0,0 +1,757 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access +# pylint:disable=too-many-statements + + +import asyncio +from collections.abc import AsyncIterator, Awaitable, Callable +from dataclasses import dataclass +from typing import Any, Final +from unittest import mock + +import aio_pika +import pytest +from faker import Faker +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq import ( + BIND_TO_ALL_TOPICS, + ConsumerTag, + QueueName, + RabbitMQClient, + _client, +) +from servicelib.rabbitmq._client import _DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS +from settings_library.rabbit import RabbitSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +_ON_ERROR_DELAY_S: Final[float] = 0.1 + + +@pytest.fixture +def rabbit_client_name(faker: Faker) -> str: + return faker.pystr() + + +async def test_rabbit_client( + rabbit_client_name: str, + rabbit_service: RabbitSettings, +): + client = RabbitMQClient(rabbit_client_name, rabbit_service) + assert client + # check it is correctly initialized + assert client._connection_pool # noqa: SLF001 + assert not client._connection_pool.is_closed # noqa: SLF001 + assert client._channel_pool # noqa: SLF001 + assert not client._channel_pool.is_closed # noqa: SLF001 + assert client.client_name == rabbit_client_name + assert client.settings == rabbit_service + await client.close() + assert client._connection_pool # noqa: SLF001 + assert client._connection_pool.is_closed # noqa: SLF001 + + +@pytest.fixture +def mocked_message_parser(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.AsyncMock(return_value=True) + + +@dataclass(frozen=True) +class PytestRabbitMessage: + message: str + topic: str + + def routing_key(self) -> str: + return self.topic + + def body(self) -> bytes: + return self.message.encode() + + +@pytest.fixture +def random_rabbit_message( + faker: Faker, +) -> Callable[..., PytestRabbitMessage]: + def _creator(**kwargs: dict[str, Any]) -> PytestRabbitMessage: + msg_config = {"message": faker.text(), "topic": None, **kwargs} + + return PytestRabbitMessage(**msg_config) + + return _creator + + +@pytest.fixture +def on_message_spy(mocker: MockerFixture) -> mock.Mock: + return mocker.spy(_client, "_on_message") + + +def _get_spy_report(mock: mock.Mock) -> dict[str, set[int]]: + results: dict[str, set[int]] = {} + + for entry in mock.call_args_list: + message: aio_pika.abc.AbstractIncomingMessage = entry.args[2] + assert message.routing_key is not None + + if message.routing_key not in results: + results[message.routing_key] = set() + + count = _client._get_x_death_count(message) # noqa: SLF001 + results[message.routing_key].add(count) + + return results + + +async def _setup_publisher_and_subscriber( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + max_requeue_retry: int, + topics: list[str] | None, + message_handler: Callable[[Any], Awaitable[bool]], +) -> int: + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + + exchange_name = f"{random_exchange_name()}" + + await consumer.subscribe( + exchange_name, + message_handler, + topics=topics, + exclusive_queue=False, + unexpected_error_max_attempts=max_requeue_retry, + unexpected_error_retry_delay_s=_ON_ERROR_DELAY_S, + ) + + if topics is None: + message = random_rabbit_message() + await publisher.publish(exchange_name, message) + else: + for topic in topics: + message = random_rabbit_message(topic=topic) + await publisher.publish(exchange_name, message) + + topics_count: int = 1 if topics is None else len(topics) + return topics_count + + +async def _assert_wait_for_messages( + on_message_spy: mock.Mock, expected_results: int +) -> None: + total_seconds_to_wait = expected_results * _ON_ERROR_DELAY_S * 2 + print(f"Will wait for messages for {total_seconds_to_wait} seconds") + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(total_seconds_to_wait), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + assert len(on_message_spy.call_args_list) == expected_results + + # wait some more time to make sure retry mechanism did not trigger + await asyncio.sleep(_ON_ERROR_DELAY_S * 3) + assert len(on_message_spy.call_args_list) == expected_results + + +async def _assert_message_received( + mocked_message_parser: mock.AsyncMock, + expected_call_count: int, + expected_message: PytestRabbitMessage | None = None, +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + print( + f"--> waiting for rabbitmq message [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + assert mocked_message_parser.call_count == expected_call_count + if expected_call_count == 1: + assert expected_message + mocked_message_parser.assert_called_once_with( + expected_message.message.encode() + ) + elif expected_call_count == 0: + mocked_message_parser.assert_not_called() + else: + assert expected_message + mocked_message_parser.assert_any_call(expected_message.message.encode()) + print( + f"<-- rabbitmq message received after [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + + +_TOPICS: Final[list[list[str] | None]] = [ + None, + ["one"], + ["one", "two"], +] + + +@pytest.mark.parametrize("max_requeue_retry", [0, 1, 3, 10]) +@pytest.mark.parametrize("topics", _TOPICS) +async def test_subscribe_to_failing_message_handler( + on_message_spy: mock.Mock, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + max_requeue_retry: int, + topics: list[str] | None, +): + async def _faulty_message_handler(message: Any) -> bool: + msg = f"Always fail. Received message {message}" + raise RuntimeError(msg) + + topics_count = await _setup_publisher_and_subscriber( + create_rabbitmq_client, + random_exchange_name, + random_rabbit_message, + max_requeue_retry, + topics, + _faulty_message_handler, + ) + + expected_results = (max_requeue_retry + 1) * topics_count + await _assert_wait_for_messages(on_message_spy, expected_results) + + report = _get_spy_report(on_message_spy) + routing_keys: list[str] = [""] if topics is None else topics + assert report == {k: set(range(max_requeue_retry + 1)) for k in routing_keys} + + +@pytest.mark.parametrize("topics", _TOPICS) +async def test_subscribe_fail_then_success( + on_message_spy: mock.Mock, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + topics: list[str] | None, +): + message_status: dict[str, bool] = {} + + async def _fail_once_then_succeed(message: Any) -> bool: + if message not in message_status: + message_status[message] = False + if not message_status[message]: + message_status[message] = True + return False + return True + + topics_count = await _setup_publisher_and_subscriber( + create_rabbitmq_client, + random_exchange_name, + random_rabbit_message, + _DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS, + topics, + _fail_once_then_succeed, + ) + + expected_results = 2 * topics_count + await _assert_wait_for_messages(on_message_spy, expected_results) + + report = _get_spy_report(on_message_spy) + routing_keys: list[str] = [""] if topics is None else topics + assert report == {k: set(range(2)) for k in routing_keys} + + # check messages as expected + original_message_count = 0 + requeued_message_count = 0 + for entry in on_message_spy.call_args_list: + message = entry.args[2] + if message.headers == {}: + original_message_count += 1 + if ( + message.headers + and "x-death" in message.headers + and message.headers["x-death"][0]["count"] == 1 + ): + requeued_message_count += 1 + + assert original_message_count == topics_count + assert requeued_message_count == topics_count + + +@pytest.mark.parametrize("topics", _TOPICS) +async def test_subscribe_always_returns_fails_stops( + on_message_spy: mock.Mock, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + topics: list[str] | None, +): + async def _always_returning_fail(_: Any) -> bool: + return False + + topics_count = await _setup_publisher_and_subscriber( + create_rabbitmq_client, + random_exchange_name, + random_rabbit_message, + _DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS, + topics, + _always_returning_fail, + ) + + expected_results = (_DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS + 1) * topics_count + await _assert_wait_for_messages(on_message_spy, expected_results) + + report = _get_spy_report(on_message_spy) + routing_keys: list[str] = [""] if topics is None else topics + assert report == { + k: set(range(_DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS + 1)) for k in routing_keys + } + + +@pytest.mark.parametrize("topics", _TOPICS) +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() +async def test_publish_with_no_registered_subscriber( + on_message_spy: mock.Mock, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + mocked_message_parser: mock.AsyncMock, + topics: list[str] | None, +): + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + + exchange_name = f"{random_exchange_name()}" + + ttl_s: float = 0.1 + topics_count: int = 1 if topics is None else len(topics) + + async def _publish_random_message() -> None: + if topics is None: + message = random_rabbit_message() + await publisher.publish(exchange_name, message) + + else: + for topic in topics: + message = random_rabbit_message(topic=topic) + await publisher.publish(exchange_name, message) + + async def _subscribe_consumer_to_queue() -> tuple[QueueName, ConsumerTag]: + return await consumer.subscribe( + exchange_name, + mocked_message_parser, + topics=topics, + exclusive_queue=False, + message_ttl=int(ttl_s * 1000), + unexpected_error_max_attempts=_DEFAULT_UNEXPECTED_ERROR_MAX_ATTEMPTS, + unexpected_error_retry_delay_s=ttl_s, + ) + + async def _unsubscribe_consumer( + queue_name: QueueName, consumer_tag: ConsumerTag + ) -> None: + await consumer.unsubscribe_consumer(queue_name, consumer_tag) + + # CASE 1 (subscribe immediately after publishing message) + + consumer_1 = await _subscribe_consumer_to_queue() + await _unsubscribe_consumer(*consumer_1) + await _publish_random_message() + # reconnect immediately + consumer_2 = await _subscribe_consumer_to_queue() + # expected to receive a message (one per topic) + await _assert_wait_for_messages(on_message_spy, 1 * topics_count) + + # CASE 2 (no subscriber attached when publishing) + on_message_spy.reset_mock() + + await _unsubscribe_consumer(*consumer_2) + await _publish_random_message() + # wait for message to expire (will be dropped) + await asyncio.sleep(ttl_s * 2) + _consumer_3 = await _subscribe_consumer_to_queue() + + # wait for a message to be possibly delivered + await asyncio.sleep(ttl_s * 2) + # nothing changed from before + await _assert_wait_for_messages(on_message_spy, 0) + + +async def test_rabbit_client_pub_sub_message_is_lost_if_no_consumer_present( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + message = random_rabbit_message() + + exchange_name = random_exchange_name() + await publisher.publish(exchange_name, message) + await asyncio.sleep(0) # ensure context switch + await consumer.subscribe(exchange_name, mocked_message_parser) + await _assert_message_received(mocked_message_parser, 0) + + +async def test_rabbit_client_pub_sub( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + message = random_rabbit_message() + + exchange_name = random_exchange_name() + await consumer.subscribe(exchange_name, mocked_message_parser) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 1, message) + + +@pytest.mark.parametrize("num_subs", [10]) +async def test_rabbit_client_pub_many_subs( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocker: MockerFixture, + random_rabbit_message: Callable[..., PytestRabbitMessage], + num_subs: int, +): + consumers = (create_rabbitmq_client(f"consumer_{n}") for n in range(num_subs)) + mocked_message_parsers = [ + mocker.AsyncMock(return_value=True) for _ in range(num_subs) + ] + + publisher = create_rabbitmq_client("publisher") + message = random_rabbit_message() + exchange_name = random_exchange_name() + await asyncio.gather( + *( + consumer.subscribe(exchange_name, parser) + for consumer, parser in zip(consumers, mocked_message_parsers, strict=True) + ) + ) + + await publisher.publish(exchange_name, message) + await asyncio.gather( + *( + _assert_message_received(parser, 1, message) + for parser in mocked_message_parsers + ) + ) + + +async def test_rabbit_client_pub_sub_republishes_if_exception_raised( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + + message = random_rabbit_message() + + def _raise_once_then_true(*args, **kwargs): + _raise_once_then_true.calls += 1 + + if _raise_once_then_true.calls == 1: + msg = "this is a test!" + raise KeyError(msg) + return _raise_once_then_true.calls != 2 + + exchange_name = random_exchange_name() + _raise_once_then_true.calls = 0 + mocked_message_parser.side_effect = _raise_once_then_true + await consumer.subscribe(exchange_name, mocked_message_parser) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 3, message) + + +@pytest.fixture +async def ensure_queue_deletion( + create_rabbitmq_client: Callable[[str], RabbitMQClient] +) -> AsyncIterator[Callable[[QueueName], None]]: + created_queues = set() + + def _(queue_name: QueueName) -> None: + created_queues.add(queue_name) + + yield _ + + client = create_rabbitmq_client("ensure_queue_deletion") + await asyncio.gather(*(client.unsubscribe(q) for q in created_queues)) + + +@pytest.mark.parametrize("defined_queue_name", [None, "pytest-queue"]) +@pytest.mark.parametrize("num_subs", [10]) +async def test_pub_sub_with_non_exclusive_queue( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocker: MockerFixture, + random_rabbit_message: Callable[..., PytestRabbitMessage], + num_subs: int, + defined_queue_name: QueueName | None, + ensure_queue_deletion: Callable[[QueueName], None], +): + consumers = (create_rabbitmq_client(f"consumer_{n}") for n in range(num_subs)) + mocked_message_parsers = [ + mocker.AsyncMock(return_value=True) for _ in range(num_subs) + ] + + publisher = create_rabbitmq_client("publisher") + message = random_rabbit_message() + exchange_name = random_exchange_name() + list_queue_name_consumer_mappings = await asyncio.gather( + *( + consumer.subscribe( + exchange_name, + parser, + exclusive_queue=False, + non_exclusive_queue_name=defined_queue_name, + ) + for consumer, parser in zip(consumers, mocked_message_parsers, strict=True) + ) + ) + for queue_name, _ in list_queue_name_consumer_mappings: + assert ( + queue_name == exchange_name + if defined_queue_name is None + else defined_queue_name + ) + ensure_queue_deletion(queue_name) + ensure_queue_deletion(f"delayed_{queue_name}") + await publisher.publish(exchange_name, message) + # only one consumer should have gotten the message here and the others not + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + total_call_count = 0 + for parser in mocked_message_parsers: + total_call_count += parser.call_count + assert total_call_count == 1, "too many messages" + + +def test_rabbit_pub_sub_performance( + benchmark, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + message = random_rabbit_message() + + exchange_name = random_exchange_name() + asyncio.get_event_loop().run_until_complete( + consumer.subscribe(exchange_name, mocked_message_parser) + ) + + async def async_fct_to_test(): + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 1, message) + mocked_message_parser.reset_mock() + + def run_test_async(): + asyncio.get_event_loop().run_until_complete(async_fct_to_test()) + + benchmark.pedantic(run_test_async, iterations=1, rounds=10) + + +async def test_rabbit_pub_sub_with_topic( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocker: MockerFixture, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + exchange_name = f"{random_exchange_name()}_topic" + critical_message = random_rabbit_message(topic="pytest.red.critical") + debug_message = random_rabbit_message(topic="pytest.orange.debug") + publisher = create_rabbitmq_client("publisher") + + all_receiving_consumer = create_rabbitmq_client("all_receiving_consumer") + all_receiving_mocked_message_parser = mocker.AsyncMock(return_value=True) + await all_receiving_consumer.subscribe( + exchange_name, all_receiving_mocked_message_parser, topics=[BIND_TO_ALL_TOPICS] + ) + + only_critical_consumer = create_rabbitmq_client("only_critical_consumer") + only_critical_mocked_message_parser = mocker.AsyncMock(return_value=True) + await only_critical_consumer.subscribe( + exchange_name, only_critical_mocked_message_parser, topics=["*.*.critical"] + ) + + orange_and_critical_consumer = create_rabbitmq_client( + "orange_and_critical_consumer" + ) + orange_and_critical_mocked_message_parser = mocker.AsyncMock(return_value=True) + await orange_and_critical_consumer.subscribe( + exchange_name, + orange_and_critical_mocked_message_parser, + topics=["*.*.critical", "*.orange.*"], + ) + + # check now that topic is working + await publisher.publish(exchange_name, critical_message) + await publisher.publish(exchange_name, debug_message) + + await _assert_message_received( + all_receiving_mocked_message_parser, 2, critical_message + ) + await _assert_message_received( + all_receiving_mocked_message_parser, 2, debug_message + ) + await _assert_message_received( + only_critical_mocked_message_parser, 1, critical_message + ) + await _assert_message_received( + orange_and_critical_mocked_message_parser, 2, critical_message + ) + await _assert_message_received( + orange_and_critical_mocked_message_parser, 2, debug_message + ) + + +async def test_rabbit_pub_sub_bind_and_unbind_topics( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + exchange_name = f"{random_exchange_name()}_topic" + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + severities = ["debug", "info", "warning", "critical"] + messages = {sev: random_rabbit_message(topic=f"pytest.{sev}") for sev in severities} + + # send 1 message of each type + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + + # we should get no messages since no one was subscribed + queue_name, consumer_tag = await consumer.subscribe( + exchange_name, mocked_message_parser, topics=[] + ) + await _assert_message_received(mocked_message_parser, 0) + + # now we should also not get anything since we are not interested in any topic + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 0) + + # we are interested in warnings and critical + await consumer.add_topics(exchange_name, topics=["*.warning", "*.critical"]) + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 2, messages["critical"]) + await _assert_message_received(mocked_message_parser, 2, messages["warning"]) + mocked_message_parser.reset_mock() + # adding again the same topics makes no difference, we should still have 2 messages + await consumer.add_topics(exchange_name, topics=["*.warning"]) + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 2, messages["critical"]) + await _assert_message_received(mocked_message_parser, 2, messages["warning"]) + mocked_message_parser.reset_mock() + + # after unsubscribing, we do not receive warnings anymore + await consumer.remove_topics(exchange_name, topics=["*.warning"]) + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 1, messages["critical"]) + mocked_message_parser.reset_mock() + + # after unsubscribing something that does not exist, we still receive the same things + await consumer.remove_topics(exchange_name, topics=[]) + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 1, messages["critical"]) + mocked_message_parser.reset_mock() + + # after unsubscribing we receive nothing anymore + await consumer.unsubscribe(queue_name) + await asyncio.gather( + *(publisher.publish(exchange_name, m) for m in messages.values()) + ) + await _assert_message_received(mocked_message_parser, 0) + + +async def test_rabbit_adding_topics_to_a_fanout_exchange( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, + random_rabbit_message: Callable[..., PytestRabbitMessage], +): + exchange_name = f"{random_exchange_name()}_fanout" + message = random_rabbit_message() + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + queue_name, _ = await consumer.subscribe(exchange_name, mocked_message_parser) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 1, message) + mocked_message_parser.reset_mock() + # this changes nothing on a FANOUT exchange + await consumer.add_topics(exchange_name, topics=["some_topics"]) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 1, message) + mocked_message_parser.reset_mock() + # this changes nothing on a FANOUT exchange + await consumer.remove_topics(exchange_name, topics=["some_topics"]) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 1, message) + mocked_message_parser.reset_mock() + # this will do something + await consumer.unsubscribe(queue_name) + await publisher.publish(exchange_name, message) + await _assert_message_received(mocked_message_parser, 0) + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() +async def test_rabbit_not_using_the_same_exchange_type_raises( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, +): + exchange_name = f"{random_exchange_name()}_fanout" + client = create_rabbitmq_client("consumer") + # this will create a FANOUT exchange + await client.subscribe(exchange_name, mocked_message_parser) + # now do a second subscribtion wiht topics, will create a TOPICS exchange + with pytest.raises(aio_pika.exceptions.ChannelPreconditionFailed): + await client.subscribe(exchange_name, mocked_message_parser, topics=[]) + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() +async def test_unsubscribe_consumer( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_exchange_name: Callable[[], str], + mocked_message_parser: mock.AsyncMock, +): + exchange_name = f"{random_exchange_name()}" + client = create_rabbitmq_client("consumer") + queue_name, consumer_tag = await client.subscribe( + exchange_name, mocked_message_parser, exclusive_queue=False + ) + # Unsubsribe just a consumer, the queue will be still there + await client.unsubscribe_consumer(queue_name, consumer_tag) + # Unsubsribe the queue + await client.unsubscribe(queue_name) + with pytest.raises(aio_pika.exceptions.ChannelNotFoundEntity): + await client.unsubscribe(queue_name) diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_connection.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_connection.py new file mode 100644 index 00000000000..ba7576e3027 --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_connection.py @@ -0,0 +1,177 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + +import asyncio +from collections.abc import Callable +from contextlib import AbstractAsyncContextManager +from dataclasses import dataclass +from typing import Any + +import docker +import pytest +import requests +from faker import Faker +from pydantic import HttpUrl +from servicelib.rabbitmq import RabbitMQClient +from settings_library.rabbit import RabbitSettings +from tenacity import retry, retry_if_exception_type +from tenacity.asyncio import AsyncRetrying +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +async def test_rabbit_client_lose_connection( + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + create_rabbitmq_client: Callable[[str], RabbitMQClient], +): + rabbit_client = create_rabbitmq_client("pinger") + assert await rabbit_client.ping() is True + async with paused_container("rabbit"): + # check that connection was lost + async for attempt in AsyncRetrying( + stop=stop_after_delay(15), wait=wait_fixed(0.5), reraise=True + ): + with attempt: + assert await rabbit_client.ping() is False + # now the connection is back + assert await rabbit_client.ping() is True + + +@dataclass(frozen=True) +class PytestRabbitMessage: + message: str + topic: str + + def routing_key(self) -> str: + return self.topic + + def body(self) -> bytes: + return self.message.encode() + + +@pytest.fixture +def random_rabbit_message( + faker: Faker, +) -> Callable[..., PytestRabbitMessage]: + def _creator(**kwargs: dict[str, Any]) -> PytestRabbitMessage: + msg_config = {"message": faker.text(), "topic": None, **kwargs} + + return PytestRabbitMessage(**msg_config) + + return _creator + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() +async def test_rabbit_client_with_paused_container( + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + random_exchange_name: Callable[[], str], + random_rabbit_message: Callable[..., PytestRabbitMessage], + create_rabbitmq_client: Callable[[str], RabbitMQClient], +): + rabbit_client = create_rabbitmq_client("pinger") + assert await rabbit_client.ping() is True + exchange_name = random_exchange_name() + message = random_rabbit_message() + await rabbit_client.publish(exchange_name, message) + async with paused_container("rabbit"): + # check that connection was lost + with pytest.raises(asyncio.TimeoutError): + await rabbit_client.publish(exchange_name, message) + await rabbit_client.publish(exchange_name, message) + + +def _get_rabbitmq_api_params(rabbit_service: RabbitSettings) -> dict[str, Any]: + return { + "scheme": "http", + "username": rabbit_service.RABBIT_USER, + "password": rabbit_service.RABBIT_PASSWORD.get_secret_value(), + "host": rabbit_service.RABBIT_HOST, + "port": 15672, + } + + +@retry( + reraise=True, + retry=retry_if_exception_type(AssertionError), + wait=wait_fixed(1), + stop=stop_after_delay(10), +) +def _assert_rabbitmq_has_connections( + rabbit_service: RabbitSettings, num_connections: int +) -> list[str]: + rabbit_list_connections_url = HttpUrl.build( + **_get_rabbitmq_api_params(rabbit_service), + path="/api/connections/", + ) + response = requests.get(rabbit_list_connections_url, timeout=5) + response.raise_for_status() + list_connections = response.json() + assert len(list_connections) == num_connections + return [conn["name"] for conn in list_connections] + + +@retry( + reraise=True, + retry=retry_if_exception_type(AssertionError), + wait=wait_fixed(1), + stop=stop_after_delay(10), +) +def _assert_connection_state( + rabbit_service: RabbitSettings, connection_name: str, *, state: str +) -> None: + rabbit_specific_connection_url = HttpUrl.build( + **_get_rabbitmq_api_params(rabbit_service), + path=f"/api/connections/{connection_name}", + ) + response = requests.get(rabbit_specific_connection_url, timeout=5) + response.raise_for_status() + connection = response.json() + assert connection["state"] == state + + +def _close_rabbitmq_connection( + rabbit_service: RabbitSettings, connection_name: str +) -> None: + rabbit_specific_connection_url = HttpUrl.build( + **_get_rabbitmq_api_params(rabbit_service), + path=f"/api/connections/{connection_name}", + ) + response = requests.delete(rabbit_specific_connection_url, timeout=5) + response.raise_for_status() + + +@retry( + reraise=True, + retry=retry_if_exception_type(AssertionError), + wait=wait_fixed(1), + stop=stop_after_delay(20), +) +async def _assert_rabbit_client_state( + rabbit_client: RabbitMQClient, *, healthy: bool +) -> None: + assert rabbit_client.healthy == healthy + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() +async def test_rabbit_server_closes_connection( + rabbit_service: RabbitSettings, + create_rabbitmq_client: Callable[[str, int], RabbitMQClient], + docker_client: docker.client.DockerClient, +): + _assert_rabbitmq_has_connections(rabbit_service, 0) + rabbit_client = create_rabbitmq_client("tester", heartbeat=2) + message = PytestRabbitMessage(message="blahblah", topic="topic") + await rabbit_client.publish("test", message) + await asyncio.sleep(5) + connection_names = _assert_rabbitmq_has_connections(rabbit_service, 1) + _close_rabbitmq_connection(rabbit_service, connection_names[0]) + # since the heartbeat during testing is low, the connection disappears fast + _assert_rabbitmq_has_connections(rabbit_service, 0) + + await _assert_rabbit_client_state(rabbit_client, healthy=False) diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc.py new file mode 100644 index 00000000000..40417c4d4c3 --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc.py @@ -0,0 +1,343 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +from collections.abc import Awaitable +from typing import Any, Final + +import pytest +from models_library.rabbitmq_basic_types import RPCMethodName +from pydantic import NonNegativeInt, ValidationError +from servicelib.rabbitmq import ( + RabbitMQRPCClient, + RemoteMethodNotRegisteredError, + RPCNamespace, + RPCNotInitializedError, +) +from settings_library.rabbit import RabbitSettings + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +MULTIPLE_REQUESTS_COUNT: Final[NonNegativeInt] = 100 + + +async def add_me(*, x: Any, y: Any) -> Any: + return x + y + # NOTE: types are not enforced + # result's type will on the caller side will be the one it has here + + +class CustomClass: + def __init__(self, x: Any, y: Any) -> None: + self.x = x + self.y = y + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} x={self.x}, y={self.y}>" + + def __eq__(self, other: "CustomClass") -> bool: + return self.x == other.x and self.y == other.y + + def __add__(self, other: "CustomClass") -> "CustomClass": + return CustomClass(x=self.x + other.x, y=self.y + other.y) + + +@pytest.mark.parametrize( + "x,y,expected_result,expected_type", + [ + pytest.param(12, 20, 32, int, id="expect_int"), + pytest.param(12, 20.0, 32.0, float, id="expect_float"), + pytest.param(b"123b", b"xyz0", b"123bxyz0", bytes, id="expect_bytes"), + pytest.param([1, 2], [2, 3], [1, 2, 2, 3], list, id="list_addition"), + pytest.param( + CustomClass(2, 1), + CustomClass(1, 2), + CustomClass(3, 3), + CustomClass, + id="custom_class", + ), + pytest.param( + CustomClass([{"p", "1"}], [{"h": 1}]), + CustomClass([{3, b"bytes"}], [{"b": 2}]), + CustomClass([{"p", "1"}, {3, b"bytes"}], [{"h": 1}, {"b": 2}]), + CustomClass, + id="custom_class_complex_objects", + ), + ], +) +async def test_base_rpc_pattern( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + x: Any, + y: Any, + expected_result: Any, + expected_type: type, + namespace: RPCNamespace, +): + await rpc_server.register_handler(namespace, RPCMethodName(add_me.__name__), add_me) + + request_result = await rpc_client.request( + namespace, RPCMethodName(add_me.__name__), x=x, y=y + ) + assert request_result == expected_result + assert type(request_result) == expected_type + + await rpc_server.unregister_handler(add_me) + + +async def test_multiple_requests_sequence_same_replier_and_requester( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + await rpc_server.register_handler(namespace, RPCMethodName(add_me.__name__), add_me) + + for i in range(MULTIPLE_REQUESTS_COUNT): + assert ( + await rpc_client.request( + namespace, RPCMethodName(add_me.__name__), x=1 + i, y=2 + i + ) + == 3 + i * 2 + ) + + +async def test_multiple_requests_parallel_same_replier_and_requester( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + await rpc_server.register_handler(namespace, RPCMethodName(add_me.__name__), add_me) + + expected_result: list[int] = [] + requests: list[Awaitable] = [] + for i in range(MULTIPLE_REQUESTS_COUNT): + requests.append( + rpc_client.request( + namespace, RPCMethodName(add_me.__name__), x=1 + i, y=2 + i + ) + ) + expected_result.append(3 + i * 2) + + assert await asyncio.gather(*requests) == expected_result + + +async def test_multiple_requests_parallel_same_replier_different_requesters( + rabbit_service: RabbitSettings, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + await rpc_server.register_handler(namespace, RPCMethodName(add_me.__name__), add_me) + + clients: list[RabbitMQRPCClient] = [] + for _ in range(MULTIPLE_REQUESTS_COUNT): + client = await RabbitMQRPCClient.create(client_name="", settings=rabbit_service) + clients.append(client) + + # worst case scenario + requests: list[Awaitable] = [] + expected_result: list[int] = [] + for i in range(MULTIPLE_REQUESTS_COUNT): + client = clients[i] + requests.append( + client.request(namespace, RPCMethodName(add_me.__name__), x=1 + i, y=2 + i) + ) + expected_result.append(3 + i * 2) + + assert await asyncio.gather(*requests) == expected_result + + # worst case scenario + await asyncio.gather(*[c.close() for c in clients]) + + +async def test_raise_error_if_not_started( + rabbit_service: RabbitSettings, namespace: RPCNamespace +): + requester = RabbitMQRPCClient("", settings=rabbit_service) + with pytest.raises(RPCNotInitializedError): + await requester.request(namespace, RPCMethodName(add_me.__name__), x=1, y=2) + + # expect not to raise error + await requester.close() + + replier = RabbitMQRPCClient("", settings=rabbit_service) + with pytest.raises(RPCNotInitializedError): + await replier.register_handler( + namespace, RPCMethodName(add_me.__name__), add_me + ) + + with pytest.raises(RPCNotInitializedError): + await replier.unregister_handler(add_me) + + # expect not to raise error + await replier.close() + + +async def _assert_event_not_registered( + rpc_client: RabbitMQRPCClient, namespace: RPCNamespace +): + with pytest.raises(RemoteMethodNotRegisteredError) as exec_info: + assert ( + await rpc_client.request( + namespace, RPCMethodName(add_me.__name__), x=1, y=3 + ) + == 3 + ) + assert ( + f"Could not find a remote method named: '{namespace}.{RPCMethodName(add_me.__name__)}'" + in f"{exec_info.value}" + ) + + +async def test_replier_not_started( + rpc_client: RabbitMQRPCClient, namespace: RPCNamespace +): + await _assert_event_not_registered(rpc_client, namespace) + + +async def test_replier_handler_not_registered( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + await _assert_event_not_registered(rpc_client, namespace) + + +async def test_request_is_missing_arguments( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + await rpc_server.register_handler(namespace, RPCMethodName(add_me.__name__), add_me) + + # missing 1 argument + with pytest.raises(TypeError) as exec_info: + await rpc_client.request(namespace, RPCMethodName(add_me.__name__), x=1) + assert ( + f"{RPCMethodName(add_me.__name__)}() missing 1 required keyword-only argument: 'y'" + in f"{exec_info.value}" + ) + + # missing all arguments + with pytest.raises(TypeError) as exec_info: + await rpc_client.request(namespace, RPCMethodName(add_me.__name__)) + assert ( + f"{RPCMethodName(add_me.__name__)}() missing 2 required keyword-only arguments: 'x' and 'y'" + in f"{exec_info.value}" + ) + + +async def test_requester_cancels_long_running_request_or_requester_takes_too_much_to_respond( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + async def _long_running(*, time_to_sleep: float) -> None: + await asyncio.sleep(time_to_sleep) + + await rpc_server.register_handler( + namespace, RPCMethodName(_long_running.__name__), _long_running + ) + + with pytest.raises(asyncio.TimeoutError): + await rpc_client.request( + namespace, + RPCMethodName(_long_running.__name__), + time_to_sleep=3, + timeout_s=1, + ) + + +async def test_replier_handler_raises_error( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + async def _raising_error() -> None: + msg = "failed as requested" + raise RuntimeError(msg) + + await rpc_server.register_handler( + namespace, RPCMethodName(_raising_error.__name__), _raising_error + ) + + with pytest.raises(RuntimeError) as exec_info: + await rpc_client.request(namespace, RPCMethodName(_raising_error.__name__)) + assert f"{exec_info.value}" == "failed as requested" + + +async def test_replier_responds_with_not_locally_defined_object_instance( + rpc_client: RabbitMQRPCClient, + rpc_server: RabbitMQRPCClient, + namespace: RPCNamespace, +): + async def _replier_scope() -> None: + class Custom: + def __init__(self, x: Any) -> None: + self.x = x + + async def _get_custom(x: Any) -> Custom: + return Custom(x) + + await rpc_server.register_handler( + namespace, RPCMethodName("a_name"), _get_custom + ) + + async def _requester_scope() -> None: + # NOTE: what is happening here? + # the replier will say that it cannot pickle a local object and send it over + # the server's request will just time out. I would prefer a cleaner interface. + # There is no change of intercepting this message. + with pytest.raises( + AttributeError, match=r"Can't pickle local object .+..Custom" + ): + await rpc_client.request( + namespace, RPCMethodName("a_name"), x=10, timeout_s=1 + ) + + await _replier_scope() + await _requester_scope() + + +async def test_register_handler_under_same_name_raises_error( + rpc_server: RabbitMQRPCClient, namespace: RPCNamespace +): + async def _a_handler() -> None: + pass + + async def _another_handler() -> None: + pass + + await rpc_server.register_handler(namespace, RPCMethodName("same_name"), _a_handler) + with pytest.raises(RuntimeError) as exec_info: + await rpc_server.register_handler( + namespace, RPCMethodName("same_name"), _another_handler + ) + assert "Method name already used for" in f"{exec_info.value}" + + +@pytest.mark.parametrize( + "handler_name, expect_fail", + [ + ("a" * 254, True), + ("a" * 253, False), + ], +) +async def test_get_namespaced_method_name_max_length( + rpc_server: RabbitMQRPCClient, handler_name: str, expect_fail: bool +): + async def _a_handler() -> None: + pass + + if expect_fail: + with pytest.raises( + ValidationError, match="String should have at most 255 characters" + ): + await rpc_server.register_handler( + RPCNamespace("a"), RPCMethodName(handler_name), _a_handler + ) + else: + await rpc_server.register_handler( + RPCNamespace("a"), RPCMethodName(handler_name), _a_handler + ) diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py new file mode 100644 index 00000000000..72ecc9a8aa6 --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_interfaces_async_jobs.py @@ -0,0 +1,252 @@ +import asyncio +import datetime +from collections.abc import AsyncIterator +from dataclasses import dataclass, field + +import pytest +from faker import Faker +from models_library.api_schemas_rpc_async_jobs.async_jobs import ( + AsyncJobGet, + AsyncJobId, + AsyncJobNameData, + AsyncJobResult, + AsyncJobStatus, +) +from models_library.api_schemas_rpc_async_jobs.exceptions import JobMissingError +from models_library.progress_bar import ProgressReport +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from pydantic import TypeAdapter +from servicelib.async_utils import cancel_wait_task +from servicelib.rabbitmq import RabbitMQRPCClient, RemoteMethodNotRegisteredError +from servicelib.rabbitmq.rpc_interfaces.async_jobs.async_jobs import ( + list_jobs, + submit, + submit_and_wait, +) + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def method_name(faker: Faker) -> RPCMethodName: + return TypeAdapter(RPCMethodName).validate_python(faker.word()) + + +@pytest.fixture +def job_id_data(faker: Faker) -> AsyncJobNameData: + return AsyncJobNameData( + user_id=faker.pyint(min_value=1), + product_name=faker.word(), + ) + + +@pytest.fixture +def job_id(faker: Faker) -> AsyncJobId: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +async def async_job_rpc_server( # noqa: C901 + rpc_server: RabbitMQRPCClient, + faker: Faker, + namespace: RPCNamespace, + method_name: RPCMethodName, +) -> AsyncIterator[None]: + async def _slow_task() -> None: + await asyncio.sleep(2) + + @dataclass + class FakeServer: + tasks: list[asyncio.Task] = field(default_factory=list) + + def _get_task(self, job_id: AsyncJobId) -> asyncio.Task: + for task in self.tasks: + if task.get_name() == f"{job_id}": + return task + raise JobMissingError(job_id=f"{job_id}") + + async def status( + self, job_id: AsyncJobId, job_id_data: AsyncJobNameData + ) -> AsyncJobStatus: + assert job_id_data + task = self._get_task(job_id) + return AsyncJobStatus( + job_id=job_id, + progress=ProgressReport(actual_value=1 if task.done() else 0.3), + done=task.done(), + ) + + async def cancel( + self, job_id: AsyncJobId, job_id_data: AsyncJobNameData + ) -> None: + assert job_id + assert job_id_data + task = self._get_task(job_id) + task.cancel() + + async def result( + self, job_id: AsyncJobId, job_id_data: AsyncJobNameData + ) -> AsyncJobResult: + assert job_id_data + task = self._get_task(job_id) + assert task.done() + return AsyncJobResult( + result={ + "data": task.result(), + "job_id": job_id, + "job_id_data": job_id_data, + } + ) + + async def list_jobs( + self, filter_: str, job_id_data: AsyncJobNameData + ) -> list[AsyncJobGet]: + assert job_id_data + assert filter_ is not None + + return [ + AsyncJobGet( + job_id=TypeAdapter(AsyncJobId).validate_python(t.get_name()), + job_name="fake_job_name", + ) + for t in self.tasks + ] + + async def submit(self, job_id_data: AsyncJobNameData) -> AsyncJobGet: + assert job_id_data + job_id = faker.uuid4(cast_to=None) + self.tasks.append(asyncio.create_task(_slow_task(), name=f"{job_id}")) + return AsyncJobGet(job_id=job_id, job_name="fake_job_name") + + async def setup(self) -> None: + for m in (self.status, self.cancel, self.result): + await rpc_server.register_handler( + namespace, RPCMethodName(m.__name__), m + ) + await rpc_server.register_handler( + namespace, RPCMethodName(self.list_jobs.__name__), self.list_jobs + ) + + await rpc_server.register_handler(namespace, method_name, self.submit) + + fake_server = FakeServer() + await fake_server.setup() + + yield + + for task in fake_server.tasks: + await cancel_wait_task(task) + + +@pytest.mark.parametrize("method", ["result", "status", "cancel"]) +async def test_async_jobs_methods( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + job_id_data: AsyncJobNameData, + job_id: AsyncJobId, + method: str, +): + from servicelib.rabbitmq.rpc_interfaces.async_jobs import async_jobs + + async_jobs_method = getattr(async_jobs, method) + with pytest.raises(JobMissingError): + await async_jobs_method( + rpc_client, + rpc_namespace=namespace, + job_id=job_id, + job_id_data=job_id_data, + ) + + +async def test_list_jobs( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + method_name: RPCMethodName, + job_id_data: AsyncJobNameData, +): + await list_jobs( + rpc_client, + rpc_namespace=namespace, + filter_="", + job_id_data=job_id_data, + ) + + +async def test_submit( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + method_name: RPCMethodName, + job_id_data: AsyncJobNameData, +): + await submit( + rpc_client, + rpc_namespace=namespace, + method_name=method_name, + job_id_data=job_id_data, + ) + + +async def test_submit_with_invalid_method_name( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + job_id_data: AsyncJobNameData, +): + with pytest.raises(RemoteMethodNotRegisteredError): + await submit( + rpc_client, + rpc_namespace=namespace, + method_name=RPCMethodName("invalid_method_name"), + job_id_data=job_id_data, + ) + + +async def test_submit_and_wait_properly_timesout( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + method_name: RPCMethodName, + job_id_data: AsyncJobNameData, +): + with pytest.raises(TimeoutError): # noqa: PT012 + async for _job_composed_result in submit_and_wait( + rpc_client, + rpc_namespace=namespace, + method_name=method_name, + job_id_data=job_id_data, + client_timeout=datetime.timedelta(seconds=0.1), + ): + pass + + +async def test_submit_and_wait( + async_job_rpc_server: RabbitMQRPCClient, + rpc_client: RabbitMQRPCClient, + namespace: RPCNamespace, + method_name: RPCMethodName, + job_id_data: AsyncJobNameData, +): + async for job_composed_result in submit_and_wait( + rpc_client, + rpc_namespace=namespace, + method_name=method_name, + job_id_data=job_id_data, + client_timeout=datetime.timedelta(seconds=10), + ): + if not job_composed_result.done: + with pytest.raises(ValueError, match="No result ready!"): + await job_composed_result.result() + assert job_composed_result.done + assert job_composed_result.status.progress.actual_value == 1 + assert await job_composed_result.result() == AsyncJobResult( + result={ + "data": None, + "job_id": job_composed_result.status.job_id, + "job_id_data": job_id_data, + } + ) diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_router.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_router.py new file mode 100644 index 00000000000..913d5854cd9 --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_rpc_router.py @@ -0,0 +1,129 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Awaitable, Callable +from typing import cast + +import pytest +from common_library.errors_classes import OsparcErrorMixin +from faker import Faker +from models_library.rabbitmq_basic_types import RPCMethodName +from servicelib.rabbitmq import ( + RabbitMQRPCClient, + RPCInterfaceError, + RPCNamespace, + RPCRouter, + RPCServerError, +) + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +class MyServiceError(OsparcErrorMixin, Exception): ... + + +class MyDomainError(MyServiceError): + msg_template = "This could happen" + + +def raise_my_expected_error(): + raise MyDomainError(user_id=33, project_id=3) + + +router = RPCRouter() # Server-side + + +class MyExpectedRpcError(RPCInterfaceError): ... + + +@router.expose() +async def a_str_method( + a_global_arg: str, *, a_global_kwarg: str, a_specific_kwarg: str +) -> str: + return f"{a_global_arg}, that was a winner! {a_global_kwarg} {a_specific_kwarg}" + + +@router.expose() +async def an_int_method(a_global_arg: str, *, a_global_kwarg: str) -> int: + return 34 + + +@router.expose(reraise_if_error_type=(MyExpectedRpcError,)) +async def raising_expected_error(a_global_arg: str, *, a_global_kwarg: str): + try: + raise_my_expected_error() + except MyDomainError as exc: + # NOTE how it is adapted from a domain exception to an interface exception + raise MyExpectedRpcError.from_domain_error(exc) from exc + + +@router.expose() +async def raising_unexpected_error(a_global_arg: str, *, a_global_kwarg: str) -> int: + msg = "This is not good!" + raise ValueError(msg) + + +@pytest.fixture +def router_namespace(faker: Faker) -> RPCNamespace: + return cast(RPCNamespace, faker.pystr()) + + +async def test_exposed_methods( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], + router_namespace: RPCNamespace, +): + rpc_client = await rabbitmq_rpc_client("client") + rpc_server = await rabbitmq_rpc_client("server") + + a_arg = "The A-Team" + a_kwargs = "What about McGiver?" + a_specific_kwarg = "Yeah, it was actually good, too!" + + await rpc_server.register_router( + router, router_namespace, a_arg, a_global_kwarg=a_kwargs + ) + + rpc_result = await rpc_client.request( + router_namespace, + RPCMethodName(a_str_method.__name__), + a_specific_kwarg=a_specific_kwarg, + ) + assert isinstance(rpc_result, str) + result = rpc_result + assert result == f"{a_arg}, that was a winner! {a_kwargs} {a_specific_kwarg}" + + rpc_result = await rpc_client.request( + router_namespace, + RPCMethodName(an_int_method.__name__), + ) + assert isinstance(rpc_result, int) + result = rpc_result + assert result == 34 + + # unexpected errors are turned into RPCServerError + with pytest.raises(RPCServerError) as exc_info: + await rpc_client.request( + router_namespace, + RPCMethodName(raising_unexpected_error.__name__), + ) + assert "This is not good!" in f"{exc_info.value}" + assert "builtins.ValueError" in f"{exc_info.value}" + + # This error was classified int he interface + with pytest.raises(RPCInterfaceError) as exc_info: + await rpc_client.request( + router_namespace, + RPCMethodName(raising_expected_error.__name__), + ) + + assert isinstance(exc_info.value, MyExpectedRpcError) + assert exc_info.value.error_context() == { + "message": "This could happen [MyServiceError.MyDomainError]", + "code": "RuntimeError.BaseRPCError.RPCServerError", + "domain_error_message": "This could happen", + "domain_error_code": "MyServiceError.MyDomainError", + "user_id": 33, + "project_id": 3, + } diff --git a/packages/service-library/tests/rabbitmq/test_rabbitmq_utils.py b/packages/service-library/tests/rabbitmq/test_rabbitmq_utils.py new file mode 100644 index 00000000000..2615a92ac56 --- /dev/null +++ b/packages/service-library/tests/rabbitmq/test_rabbitmq_utils.py @@ -0,0 +1,43 @@ +import pytest +from pydantic import ValidationError +from servicelib.rabbitmq import RPCNamespace + + +@pytest.mark.parametrize( + "entries, expected", + [ + ({"test": "b"}, "test_b"), + ({"hello": "1", "b": "2"}, "b_2-hello_1"), + ], +) +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() # no rabbitmq instance running +def test_rpc_namespace_from_entries(entries: dict[str, str], expected: str): + assert RPCNamespace.from_entries(entries) == expected + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() # no rabbitmq instance running +def test_rpc_namespace_sorts_elements(): + assert RPCNamespace.from_entries({"1": "a", "2": "b"}) == RPCNamespace.from_entries( + {"2": "b", "1": "a"} + ) + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() # no rabbitmq instance running +def test_rpc_namespace_too_long(): + with pytest.raises(ValidationError) as exec_info: + RPCNamespace.from_entries({f"test{i}": f"test{i}" for i in range(20)}) + assert "String should have at most 252 characters" in f"{exec_info.value}" + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() # no rabbitmq instance running +def test_rpc_namespace_too_short(): + with pytest.raises(ValidationError) as exec_info: + RPCNamespace.from_entries({}) + assert "String should have at least 1 character" in f"{exec_info.value}" + + +@pytest.mark.no_cleanup_check_rabbitmq_server_has_no_errors() # no rabbitmq instance running +def test_rpc_namespace_invalid_symbols(): + with pytest.raises(ValidationError) as exec_info: + RPCNamespace.from_entries({"test": "@"}) + assert "String should match pattern" in f"{exec_info.value}" diff --git a/packages/service-library/tests/redis/conftest.py b/packages/service-library/tests/redis/conftest.py new file mode 100644 index 00000000000..ae6d04c2085 --- /dev/null +++ b/packages/service-library/tests/redis/conftest.py @@ -0,0 +1,32 @@ +import datetime +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager + +import pytest +from faker import Faker +from pytest_mock import MockerFixture +from servicelib.redis import _constants as redis_constants +from servicelib.redis._client import RedisClientSDK +from settings_library.redis import RedisDatabase + + +@pytest.fixture +async def redis_client_sdk( + get_redis_client_sdk: Callable[ + [RedisDatabase], AbstractAsyncContextManager[RedisClientSDK] + ], +) -> AsyncIterator[RedisClientSDK]: + async with get_redis_client_sdk(RedisDatabase.RESOURCES) as client: + yield client + + +@pytest.fixture +def lock_name(faker: Faker) -> str: + return faker.pystr() + + +@pytest.fixture +def with_short_default_redis_lock_ttl(mocker: MockerFixture) -> datetime.timedelta: + short_ttl = datetime.timedelta(seconds=0.25) + mocker.patch.object(redis_constants, "DEFAULT_LOCK_TTL", short_ttl) + return short_ttl diff --git a/packages/service-library/tests/redis/test_client.py b/packages/service-library/tests/redis/test_client.py new file mode 100644 index 00000000000..210c857bb9b --- /dev/null +++ b/packages/service-library/tests/redis/test_client.py @@ -0,0 +1,143 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + + +import asyncio +import datetime +from collections.abc import Callable +from contextlib import AbstractAsyncContextManager + +import pytest +from redis.exceptions import LockError, LockNotOwnedError +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase, RedisSettings +from tenacity import ( + AsyncRetrying, + retry_if_exception_type, + stop_after_delay, + wait_fixed, +) + +pytest_simcore_core_services_selection = [ + "redis", +] + +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +@pytest.fixture +def redis_lock_ttl() -> datetime.timedelta: + return datetime.timedelta(seconds=1) + + +async def test_redis_lock_no_ttl(redis_client_sdk: RedisClientSDK, lock_name: str): + lock = redis_client_sdk.create_lock(lock_name, ttl=None) + assert await lock.locked() is False + + lock_acquired = await lock.acquire(blocking=False) + assert lock_acquired is True + assert await lock.locked() is True + assert await lock.owned() is True + with pytest.raises(LockError): + # a lock with no ttl cannot be reacquired + await lock.reacquire() + with pytest.raises(LockError): + # a lock with no ttl cannot be extended + await lock.extend(2) + + # try to acquire the lock a second time + same_lock = redis_client_sdk.create_lock(lock_name, ttl=None) + assert await same_lock.locked() is True + assert await same_lock.owned() is False + assert await same_lock.acquire(blocking=False) is False + + # now release the lock + await lock.release() + assert not await lock.locked() + assert not await lock.owned() + + +async def test_redis_lock_context_manager_no_ttl( + redis_client_sdk: RedisClientSDK, lock_name: str +): + lock = redis_client_sdk.create_lock(lock_name, ttl=None) + assert not await lock.locked() + + async with lock: + assert await lock.locked() + assert await lock.owned() + with pytest.raises(LockError): + # a lock with no timeout cannot be reacquired + await lock.reacquire() + + with pytest.raises(LockError): + # a lock with no timeout cannot be extended + await lock.extend(2) + + # try to acquire the lock a second time + same_lock = redis_client_sdk.create_lock(lock_name, ttl=None) + assert await same_lock.locked() + assert not await same_lock.owned() + assert await same_lock.acquire() is False + with pytest.raises(LockError): + async with same_lock: + ... + assert not await lock.locked() + + +async def test_redis_lock_with_ttl( + redis_client_sdk: RedisClientSDK, lock_name: str, redis_lock_ttl: datetime.timedelta +): + ttl_lock = redis_client_sdk.create_lock(lock_name, ttl=redis_lock_ttl) + assert not await ttl_lock.locked() + + with pytest.raises(LockNotOwnedError): # noqa: PT012 + # this raises as the lock is lost + async with ttl_lock: + assert await ttl_lock.locked() + assert await ttl_lock.owned() + await asyncio.sleep(2 * redis_lock_ttl.total_seconds()) + assert not await ttl_lock.locked() + + +async def test_redis_client_sdk_setup_shutdown( + mock_redis_socket_timeout: None, redis_service: RedisSettings +): + # setup + redis_resources_dns = redis_service.build_redis_dsn(RedisDatabase.RESOURCES) + client = RedisClientSDK(redis_resources_dns, client_name="pytest") + assert client + assert client.redis_dsn == redis_resources_dns + + # ensure health check task sets the health to True + client._is_healthy = False # noqa: SLF001 + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(10), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert client.is_healthy is True + + # cleanup + await client.redis.flushall() + await client.shutdown() + + +async def test_regression_fails_on_redis_service_outage( + mock_redis_socket_timeout: None, + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + redis_client_sdk: RedisClientSDK, +): + assert await redis_client_sdk.ping() is True + + async with paused_container("redis"): + # no connection available any longer should not hang but timeout + assert await redis_client_sdk.ping() is False + + assert await redis_client_sdk.ping() is True diff --git a/packages/service-library/tests/redis/test_clients_manager.py b/packages/service-library/tests/redis/test_clients_manager.py new file mode 100644 index 00000000000..eeb110557e3 --- /dev/null +++ b/packages/service-library/tests/redis/test_clients_manager.py @@ -0,0 +1,33 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + +from servicelib.redis._clients_manager import RedisClientsManager +from servicelib.redis._models import RedisManagerDBConfig +from settings_library.redis import RedisDatabase, RedisSettings + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +async def test_redis_client_sdks_manager( + mock_redis_socket_timeout: None, + redis_service: RedisSettings, +): + all_redis_configs: set[RedisManagerDBConfig] = { + RedisManagerDBConfig(database=db) for db in RedisDatabase + } + manager = RedisClientsManager( + databases_configs=all_redis_configs, + settings=redis_service, + client_name="pytest", + ) + + async with manager: + for config in all_redis_configs: + assert manager.client(config.database) diff --git a/packages/service-library/tests/redis/test_decorators.py b/packages/service-library/tests/redis/test_decorators.py new file mode 100644 index 00000000000..e4ca9d51463 --- /dev/null +++ b/packages/service-library/tests/redis/test_decorators.py @@ -0,0 +1,312 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + + +import asyncio +import datetime +from collections.abc import Awaitable, Callable +from typing import Final + +import pytest +from faker import Faker +from servicelib.redis import CouldNotAcquireLockError, RedisClientSDK, exclusive +from servicelib.redis._decorators import ( + _EXCLUSIVE_AUTO_EXTEND_TASK_NAME, + _EXCLUSIVE_TASK_NAME, +) +from servicelib.redis._errors import LockLostError +from servicelib.utils import limited_gather, logged_gather + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +def _assert_exclusive_tasks_are_cancelled(lock_name: str, func: Callable) -> None: + assert _EXCLUSIVE_AUTO_EXTEND_TASK_NAME.format(redis_lock_key=lock_name) not in [ + t.get_name() for t in asyncio.tasks.all_tasks() + ], "the auto extend lock task was not properly stopped!" + assert _EXCLUSIVE_TASK_NAME.format( + module_name=func.__module__, func_name=func.__name__ + ) not in [ + t.get_name() for t in asyncio.tasks.all_tasks() + ], "the exclusive task was not properly stopped!" + + +async def _is_locked(redis_client_sdk: RedisClientSDK, lock_name: str) -> bool: + lock = redis_client_sdk.redis.lock(lock_name) + return await lock.locked() + + +def _exclusive_sleeping_task( + redis_client_sdk: RedisClientSDK | Callable[..., RedisClientSDK], + lock_name: str | Callable[..., str], + sleep_duration: float, +) -> Callable[..., Awaitable[float]]: + @exclusive(redis_client_sdk, lock_key=lock_name) + async def _() -> float: + resolved_client = ( + redis_client_sdk() if callable(redis_client_sdk) else redis_client_sdk + ) + resolved_lock_name = lock_name() if callable(lock_name) else lock_name + assert await _is_locked(resolved_client, resolved_lock_name) + await asyncio.sleep(sleep_duration) + assert await _is_locked(resolved_client, resolved_lock_name) + return sleep_duration + + return _ + + +@pytest.fixture +def sleep_duration(faker: Faker) -> float: + return faker.pyfloat(min_value=0.2, max_value=0.8) + + +async def test_exclusive_with_empty_lock_key_raises(redis_client_sdk: RedisClientSDK): + with pytest.raises(ValueError, match="lock_key cannot be empty"): + + @exclusive(redis_client_sdk, lock_key="") + async def _(): + pass + + +async def test_exclusive_decorator_runs_original_method( + redis_client_sdk: RedisClientSDK, + lock_name: str, + sleep_duration: float, +): + for _ in range(3): + assert ( + await _exclusive_sleeping_task( + redis_client_sdk, lock_name, sleep_duration + )() + == sleep_duration + ) + + +async def test_exclusive_decorator_with_key_builder( + redis_client_sdk: RedisClientSDK, + lock_name: str, + sleep_duration: float, +): + def _get_lock_name(*args, **kwargs) -> str: + assert args is not None + assert kwargs is not None + return lock_name + + for _ in range(3): + assert ( + await _exclusive_sleeping_task( + redis_client_sdk, _get_lock_name, sleep_duration + )() + == sleep_duration + ) + + +async def test_exclusive_decorator_with_client_builder( + redis_client_sdk: RedisClientSDK, + lock_name: str, + sleep_duration: float, +): + def _get_redis_client_builder(*args, **kwargs) -> RedisClientSDK: + assert args is not None + assert kwargs is not None + return redis_client_sdk + + for _ in range(3): + assert ( + await _exclusive_sleeping_task( + _get_redis_client_builder, lock_name, sleep_duration + )() + == sleep_duration + ) + + +async def _acquire_lock_and_exclusively_sleep( + redis_client_sdk: RedisClientSDK, + lock_name: str | Callable[..., str], + sleep_duration: float, +) -> None: + redis_lock_name = lock_name() if callable(lock_name) else lock_name + + @exclusive(redis_client_sdk, lock_key=lock_name) + async def _() -> float: + assert await _is_locked(redis_client_sdk, redis_lock_name) + await asyncio.sleep(sleep_duration) + assert await _is_locked(redis_client_sdk, redis_lock_name) + return sleep_duration + + assert await _() == sleep_duration + + assert not await _is_locked(redis_client_sdk, redis_lock_name) + + +async def test_exclusive_parallel_lock_is_released_and_reacquired( + redis_client_sdk: RedisClientSDK, + lock_name: str, +): + parallel_tasks = 10 + results = await logged_gather( + *[ + _acquire_lock_and_exclusively_sleep( + redis_client_sdk, lock_name, sleep_duration=1 + ) + for _ in range(parallel_tasks) + ], + reraise=False, + ) + assert results.count(None) == 1 + assert [isinstance(x, CouldNotAcquireLockError) for x in results].count( + True + ) == parallel_tasks - 1 + + # check lock is released + assert not await _is_locked(redis_client_sdk, lock_name) + + +async def test_exclusive_raises_if_lock_is_lost( + redis_client_sdk: RedisClientSDK, + lock_name: str, +): + started_event = asyncio.Event() + + @exclusive(redis_client_sdk, lock_key=lock_name) + async def _sleeper(time_to_sleep: datetime.timedelta) -> datetime.timedelta: + started_event.set() + await asyncio.sleep(time_to_sleep.total_seconds()) + return time_to_sleep + + exclusive_task = asyncio.create_task(_sleeper(datetime.timedelta(seconds=10))) + await asyncio.wait_for(started_event.wait(), timeout=2) + # let's simlulate lost lock by forcefully deleting it + await redis_client_sdk.redis.delete(lock_name) + + with pytest.raises(LockLostError): + await exclusive_task + + _assert_exclusive_tasks_are_cancelled(lock_name, _sleeper) + + +@pytest.fixture +def lock_data(faker: Faker) -> str: + return faker.text() + + +async def test_exclusive_with_lock_value( + redis_client_sdk: RedisClientSDK, lock_name: str, lock_data: str +): + started_event = asyncio.Event() + + @exclusive(redis_client_sdk, lock_key=lock_name, lock_value=lock_data) + async def _sleeper(time_to_sleep: datetime.timedelta) -> datetime.timedelta: + started_event.set() + await asyncio.sleep(time_to_sleep.total_seconds()) + return time_to_sleep + + # initial state + assert await _is_locked(redis_client_sdk, lock_name) is False + assert await redis_client_sdk.lock_value(lock_name) is None + + # run the exclusive task + exclusive_task = asyncio.create_task(_sleeper(datetime.timedelta(seconds=3))) + await asyncio.wait_for(started_event.wait(), timeout=2) + # expected + assert await _is_locked(redis_client_sdk, lock_name) is True + assert await redis_client_sdk.lock_value(lock_name) == lock_data + # now let the task finish + assert await exclusive_task == datetime.timedelta(seconds=3) + # expected + assert await _is_locked(redis_client_sdk, lock_name) is False + assert await redis_client_sdk.lock_value(lock_name) is None + + _assert_exclusive_tasks_are_cancelled(lock_name, _sleeper) + + +async def test_exclusive_task_erroring_releases_lock( + redis_client_sdk: RedisClientSDK, lock_name: str +): + @exclusive(redis_client_sdk, lock_key=lock_name) + async def _raising_func() -> None: + msg = "Expected error" + raise RuntimeError(msg) + + # initial state + assert await _is_locked(redis_client_sdk, lock_name) is False + assert await redis_client_sdk.lock_value(lock_name) is None + + with pytest.raises(RuntimeError): + await _raising_func() + + assert await redis_client_sdk.lock_value(lock_name) is None + + _assert_exclusive_tasks_are_cancelled(lock_name, _raising_func) + + +async def test_lock_acquired_in_parallel_to_update_same_resource( + with_short_default_redis_lock_ttl: datetime.timedelta, + redis_client_sdk: RedisClientSDK, + lock_name: str, +): + INCREASE_OPERATIONS: Final[int] = 250 + INCREASE_BY: Final[int] = 10 + + class RaceConditionCounter: + def __init__(self) -> None: + self.value: int = 0 + + async def race_condition_increase(self, by: int) -> None: + current_value = self.value + current_value += by + # most likely situation which creates issues + await asyncio.sleep(with_short_default_redis_lock_ttl.total_seconds() / 2) + self.value = current_value + + counter = RaceConditionCounter() + # ensures it does nto time out before acquiring the lock + time_for_all_inc_counter_calls_to_finish = ( + with_short_default_redis_lock_ttl * INCREASE_OPERATIONS * 10 + ) + + @exclusive( + redis_client_sdk, + lock_key=lock_name, + blocking=True, + blocking_timeout=time_for_all_inc_counter_calls_to_finish, + ) + async def _inc_counter() -> None: + await counter.race_condition_increase(INCREASE_BY) + + await limited_gather( + *(_inc_counter() for _ in range(INCREASE_OPERATIONS)), limit=15 + ) + assert counter.value == INCREASE_BY * INCREASE_OPERATIONS + + _assert_exclusive_tasks_are_cancelled(lock_name, _inc_counter) + + +async def test_cancelling_exclusive_task_cancels_properly( + redis_client_sdk: RedisClientSDK, lock_name: str +): + started_event = asyncio.Event() + + @exclusive(redis_client_sdk, lock_key=lock_name) + async def _sleep_task(time_to_sleep: datetime.timedelta) -> datetime.timedelta: + started_event.set() + await asyncio.sleep(time_to_sleep.total_seconds()) + return time_to_sleep + + exclusive_task = asyncio.create_task(_sleep_task(datetime.timedelta(seconds=10))) + await asyncio.wait_for(started_event.wait(), timeout=2) + exclusive_task.cancel() + + with pytest.raises(asyncio.CancelledError): + await exclusive_task + + assert not await _is_locked(redis_client_sdk, lock_name) + + _assert_exclusive_tasks_are_cancelled(lock_name, _sleep_task) diff --git a/packages/service-library/tests/redis/test_project_lock.py b/packages/service-library/tests/redis/test_project_lock.py new file mode 100644 index 00000000000..aa9d7fd1c74 --- /dev/null +++ b/packages/service-library/tests/redis/test_project_lock.py @@ -0,0 +1,144 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +from typing import cast +from unittest import mock +from uuid import UUID + +import pytest +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_access import Owner +from models_library.projects_state import ProjectLocked, ProjectStatus +from servicelib.async_utils import cancel_wait_task +from servicelib.redis import ( + ProjectLockError, + RedisClientSDK, + get_project_locked_state, + is_project_locked, + with_project_locked, +) +from servicelib.redis._project_lock import _PROJECT_REDIS_LOCK_KEY + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +@pytest.fixture() +def project_uuid(faker: Faker) -> ProjectID: + return cast(UUID, faker.uuid4(cast_to=None)) + + +assert "json_schema_extra" in Owner.model_config +assert isinstance(Owner.model_config["json_schema_extra"], dict) +assert isinstance(Owner.model_config["json_schema_extra"]["examples"], list) + + +@pytest.fixture(params=Owner.model_config["json_schema_extra"]["examples"]) +def owner(request: pytest.FixtureRequest) -> Owner: + return Owner(**request.param) + + +@pytest.fixture +def mocked_notification_cb() -> mock.AsyncMock: + return mock.AsyncMock() + + +@pytest.mark.parametrize( + "project_status", + [ + ProjectStatus.CLOSING, + ProjectStatus.CLONING, + ProjectStatus.EXPORTING, + ProjectStatus.OPENING, + ProjectStatus.MAINTAINING, + ], +) +async def test_with_project_locked( + redis_client_sdk: RedisClientSDK, + project_uuid: ProjectID, + owner: Owner, + project_status: ProjectStatus, + mocked_notification_cb: mock.AsyncMock, +): + @with_project_locked( + redis_client_sdk, + project_uuid=project_uuid, + status=project_status, + owner=owner, + notification_cb=mocked_notification_cb, + ) + async def _locked_fct() -> None: + mocked_notification_cb.assert_called_once() + assert await is_project_locked(redis_client_sdk, project_uuid) is True + locked_state = await get_project_locked_state(redis_client_sdk, project_uuid) + assert locked_state is not None + assert locked_state == ProjectLocked( + value=True, + owner=owner, + status=project_status, + ) + # check lock name formatting is correct + redis_lock = await redis_client_sdk.redis.get( + _PROJECT_REDIS_LOCK_KEY.format(project_uuid) + ) + assert redis_lock + assert ProjectLocked.model_validate_json(redis_lock) == ProjectLocked( + value=True, + owner=owner, + status=project_status, + ) + + mocked_notification_cb.assert_not_called() + assert await get_project_locked_state(redis_client_sdk, project_uuid) is None + assert await is_project_locked(redis_client_sdk, project_uuid) is False + await _locked_fct() + assert await is_project_locked(redis_client_sdk, project_uuid) is False + assert await get_project_locked_state(redis_client_sdk, project_uuid) is None + mocked_notification_cb.assert_called() + assert mocked_notification_cb.call_count == 2 + + +@pytest.mark.parametrize( + "project_status", + [ + ProjectStatus.CLOSING, + ProjectStatus.CLONING, + ProjectStatus.EXPORTING, + ProjectStatus.OPENING, + ProjectStatus.MAINTAINING, + ], +) +async def test_lock_already_locked_project_raises( + redis_client_sdk: RedisClientSDK, + project_uuid: ProjectID, + owner: Owner, + project_status: ProjectStatus, +): + started_event = asyncio.Event() + + @with_project_locked( + redis_client_sdk, + project_uuid=project_uuid, + status=project_status, + owner=owner, + notification_cb=None, + ) + async def _locked_fct() -> None: + started_event.set() + await asyncio.sleep(10) + + task1 = asyncio.create_task(_locked_fct(), name="pytest_task_1") + await started_event.wait() + with pytest.raises(ProjectLockError): + await _locked_fct() + + await cancel_wait_task(task1) diff --git a/packages/service-library/tests/redis/test_utils.py b/packages/service-library/tests/redis/test_utils.py new file mode 100644 index 00000000000..25eb129c853 --- /dev/null +++ b/packages/service-library/tests/redis/test_utils.py @@ -0,0 +1,26 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access + + +from faker import Faker +from servicelib.redis import RedisClientSDK, handle_redis_returns_union_types + +pytest_simcore_core_services_selection = [ + "redis", +] + +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +async def test_handle_redis_returns_union_types( + redis_client_sdk: RedisClientSDK, faker: Faker +): + await handle_redis_returns_union_types( + redis_client_sdk.redis.hset( + faker.pystr(), mapping={faker.pystr(): faker.pystr()} + ) + ) diff --git a/packages/service-library/tests/test_archiving_utils.py b/packages/service-library/tests/test_archiving_utils.py deleted file mode 100644 index 84bcafb9572..00000000000 --- a/packages/service-library/tests/test_archiving_utils.py +++ /dev/null @@ -1,593 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=no-name-in-module - -import asyncio -import hashlib -import itertools -import os -import random -import secrets -import string -import tempfile -from concurrent.futures import ProcessPoolExecutor -from dataclasses import dataclass -from pathlib import Path -from typing import Callable, Iterable, Iterator, Optional - -import pytest -from faker import Faker -from pydantic import ByteSize, parse_obj_as -from pytest_benchmark.plugin import BenchmarkFixture -from servicelib import archiving_utils -from servicelib.archiving_utils import ArchiveError, archive_dir, unarchive_dir - -from .test_utils import print_tree - - -@pytest.fixture -def dir_with_random_content() -> Iterable[Path]: - def random_string(length: int) -> str: - return "".join(secrets.choice(string.ascii_letters) for i in range(length)) - - def make_files_in_dir(dir_path: Path, file_count: int) -> None: - for _ in range(file_count): - (dir_path / f"{random_string(8)}.bin").write_bytes( - os.urandom(random.randint(1, 10)) - ) - - def ensure_dir(path_to_ensure: Path) -> Path: - path_to_ensure.mkdir(parents=True, exist_ok=True) - return path_to_ensure - - def make_subdirectory_with_content(subdir_name: Path, max_file_count: int) -> None: - subdir_name = ensure_dir(subdir_name) - make_files_in_dir( - dir_path=subdir_name, - file_count=random.randint(1, max_file_count), - ) - - def make_subdirectories_with_content( - subdir_name: Path, max_subdirectories_count: int, max_file_count: int - ) -> None: - subdirectories_count = random.randint(1, max_subdirectories_count) - for _ in range(subdirectories_count): - make_subdirectory_with_content( - subdir_name=subdir_name / f"{random_string(4)}", - max_file_count=max_file_count, - ) - - def get_dirs_and_subdris_in_path(path_to_scan: Path) -> list[Path]: - return [path for path in path_to_scan.rglob("*") if path.is_dir()] - - with tempfile.TemporaryDirectory() as temp_dir: - temp_dir_path = Path(temp_dir) - data_container = ensure_dir(temp_dir_path / "study_data") - - make_subdirectories_with_content( - subdir_name=data_container, max_subdirectories_count=5, max_file_count=5 - ) - make_files_in_dir(dir_path=data_container, file_count=5) - - # creates a good amount of files - for _ in range(4): - for subdirectory_path in get_dirs_and_subdris_in_path(data_container): - make_subdirectories_with_content( - subdir_name=subdirectory_path, - max_subdirectories_count=3, - max_file_count=3, - ) - - yield temp_dir_path - - -@pytest.fixture -def exclude_patterns_validation_dir(tmp_path: Path, faker: Faker) -> Path: - """Directory with well known structure""" - base_dir = tmp_path / "exclude_patterns_validation_dir" - base_dir.mkdir() - (base_dir / "empty").mkdir() - (base_dir / "d1").mkdir() - (base_dir / "d1" / "f1").write_text(faker.text()) - (base_dir / "d1" / "f2.txt").write_text(faker.text()) - (base_dir / "d1" / "sd1").mkdir() - (base_dir / "d1" / "sd1" / "f1").write_text(faker.text()) - (base_dir / "d1" / "sd1" / "f2.txt").write_text(faker.text()) - - print("exclude_patterns_validation_dir ---") - print_tree(base_dir) - return base_dir - - -def __raise_error(*arts, **kwargs) -> None: - raise ArchiveError("raised as requested") - - -@pytest.fixture -def zipfile_single_file_extract_worker_raises_error() -> Iterator[None]: - # NOTE: cannot MagicMock cannot be serialized via pickle used by - # multiprocessing, also `__raise_error` cannot be defined in the - # context fo this function or it cannot be pickled - - # pylint: disable=protected-access - old_func = archiving_utils._zipfile_single_file_extract_worker - archiving_utils._zipfile_single_file_extract_worker = __raise_error - yield - archiving_utils._zipfile_single_file_extract_worker = old_func - - -# UTILS - - -def strip_directory_from_path(input_path: Path, to_strip: Path) -> Path: - # NOTE: could use os.path.relpath instead or Path.relative_to ? - return Path(str(input_path).replace(str(to_strip) + "/", "")) - - -def get_all_files_in_dir(dir_path: Path) -> set[Path]: - return { - strip_directory_from_path(x, dir_path) - for x in dir_path.rglob("*") - if x.is_file() - } - - -def _compute_hash(file_path: Path) -> tuple[Path, str]: - with open(file_path, "rb") as file_to_hash: - file_hash = hashlib.md5() - chunk = file_to_hash.read(8192) - while chunk: - file_hash.update(chunk) - chunk = file_to_hash.read(8192) - - return file_path, file_hash.hexdigest() - - -async def compute_hashes(file_paths: list[Path]) -> dict[Path, str]: - """given a list of files computes hashes for the files on a process pool""" - - loop = asyncio.get_event_loop() - - with ProcessPoolExecutor() as prcess_pool_executor: - tasks = [ - loop.run_in_executor(prcess_pool_executor, _compute_hash, file_path) - for file_path in file_paths - ] - # pylint: disable=unnecessary-comprehension - # see return value of _compute_hash it is a tuple, mapping list[Tuple[Path,str]] to Dict[Path, str] here - return {k: v for k, v in await asyncio.gather(*tasks)} - - -def full_file_path_from_dir_and_subdirs(dir_path: Path) -> list[Path]: - return [x for x in dir_path.rglob("*") if x.is_file()] - - -def _escape_undecodable_str(s: str) -> str: - return s.encode(errors="replace").decode("utf-8") - - -def _escape_undecodable_path(path: Path) -> Path: - return Path(_escape_undecodable_str(str(path))) - - -async def assert_same_directory_content( - dir_to_compress: Path, - output_dir: Path, - inject_relative_path: Optional[Path] = None, - unsupported_replace: bool = False, -) -> None: - def _relative_path(input_path: Path) -> Path: - assert inject_relative_path is not None - return Path(str(inject_relative_path / str(input_path))[1:]) - - input_set = get_all_files_in_dir(dir_to_compress) - output_set = get_all_files_in_dir(output_dir) - - if unsupported_replace: - input_set = {_escape_undecodable_path(x) for x in input_set} - - if inject_relative_path is not None: - input_set = {_relative_path(x) for x in input_set} - - assert ( - input_set == output_set - ), f"There following files are missing {input_set - output_set}" - - # computing the hashes for dir_to_compress and map in a dict - # with the name starting from the root of the directory and md5sum - dir_to_compress_hashes = { - strip_directory_from_path(k, dir_to_compress): v - for k, v in ( - await compute_hashes(full_file_path_from_dir_and_subdirs(dir_to_compress)) - ).items() - } - dir_to_compress_hashes = { - _escape_undecodable_path(k): v for k, v in dir_to_compress_hashes.items() - } - - # computing the hashes for output_dir and map in a dict - # with the name starting from the root of the directory and md5sum - output_dir_hashes = { - strip_directory_from_path(k, output_dir): v - for k, v in ( - await compute_hashes(full_file_path_from_dir_and_subdirs(output_dir)) - ).items() - } - - # finally check if hashes are mapped 1 to 1 in order to verify - # that the compress/decompress worked correctly - for key in dir_to_compress_hashes: - assert ( - dir_to_compress_hashes[key] - == output_dir_hashes[_relative_path(key) if inject_relative_path else key] - ) - - -def assert_unarchived_paths( - unarchived_paths: set[Path], - src_dir: Path, - dst_dir: Path, - is_saved_as_relpath: bool, - unsupported_replace: bool = False, -): - is_file_or_emptydir = lambda p: p.is_file() or (p.is_dir() and not any(p.glob("*"))) - - # all unarchivedare under dst_dir - assert all(dst_dir in f.parents for f in unarchived_paths) - - # can be also checked with strings - assert all(str(f).startswith(str(dst_dir)) for f in unarchived_paths) - - # trim basedir and compare relative paths (alias 'tails') against src_dir - basedir = str(dst_dir) - if not is_saved_as_relpath: - basedir += str(src_dir) - - got_tails = {os.path.relpath(f, basedir) for f in unarchived_paths} - expected_tails = { - os.path.relpath(f, src_dir) - for f in src_dir.rglob("*") - if is_file_or_emptydir(f) - } - if unsupported_replace: - expected_tails = {_escape_undecodable_str(x) for x in expected_tails} - assert got_tails == expected_tails - - -@pytest.mark.skip(reason="DEV:only for manual tessting") -async def test_archiving_utils_against_sample( - osparc_simcore_root_dir: Path, tmp_path: Path -): - """ - ONLY for manual testing - User MUST provide a sample of a zip file in ``sample_path`` - """ - sample_path = osparc_simcore_root_dir / "keep.ignore" / "workspace.zip" - destination = tmp_path / "unzipped" - - extracted_paths = await unarchive_dir(sample_path, destination) - assert extracted_paths - - for p in extracted_paths: - assert isinstance(p, Path), p - - await archive_dir( - dir_to_compress=destination, - destination=tmp_path / "test_it.zip", - compress=True, - store_relative_path=True, - ) - - -@pytest.mark.parametrize( - "compress,store_relative_path", - itertools.product([True, False], repeat=2), -) -async def test_archive_unarchive_same_structure_dir( - dir_with_random_content: Path, - tmp_path: Path, - compress: bool, - store_relative_path: bool, -): - temp_dir_one = tmp_path / "one" - temp_dir_two = tmp_path / "two" - - temp_dir_one.mkdir() - temp_dir_two.mkdir() - - archive_file = temp_dir_one / "archive.zip" - - await archive_dir( - dir_to_compress=dir_with_random_content, - destination=archive_file, - store_relative_path=store_relative_path, - compress=compress, - ) - - unarchived_paths: set[Path] = await unarchive_dir( - archive_to_extract=archive_file, destination_folder=temp_dir_two - ) - - assert_unarchived_paths( - unarchived_paths, - src_dir=dir_with_random_content, - dst_dir=temp_dir_two, - is_saved_as_relpath=store_relative_path, - ) - - await assert_same_directory_content( - dir_with_random_content, - temp_dir_two, - None if store_relative_path else dir_with_random_content, - ) - - -@pytest.mark.parametrize( - "compress,store_relative_path", - itertools.product([True, False], repeat=2), -) -async def test_unarchive_in_same_dir_as_archive( - dir_with_random_content: Path, - tmp_path: Path, - compress: bool, - store_relative_path: bool, -): - archive_file = tmp_path / "archive.zip" - - await archive_dir( - dir_to_compress=dir_with_random_content, - destination=archive_file, - store_relative_path=store_relative_path, - compress=compress, - ) - - unarchived_paths = await unarchive_dir( - archive_to_extract=archive_file, destination_folder=tmp_path - ) - - archive_file.unlink() # delete before comparing contents - - assert_unarchived_paths( - unarchived_paths, - src_dir=dir_with_random_content, - dst_dir=tmp_path, - is_saved_as_relpath=store_relative_path, - ) - - await assert_same_directory_content( - dir_with_random_content, - tmp_path, - None if store_relative_path else dir_with_random_content, - ) - - -@pytest.mark.parametrize( - "compress,store_relative_path", - itertools.product([True, False], repeat=2), -) -async def test_regression_unsupported_characters( - tmp_path: Path, - compress: bool, - store_relative_path: bool, -) -> None: - archive_path = tmp_path / "archive.zip" - dir_to_archive = tmp_path / "to_compress" - dir_to_archive.mkdir() - dst_dir = tmp_path / "decompressed" - dst_dir.mkdir() - - def _create_file(file_name: str, content: str) -> None: - file_path = dir_to_archive / file_name - file_path.write_text(content) - assert file_path.read_text() == content - - # unsupported file name - _create_file("something\udce6likethis.txt", "payload1") - # supported name - _create_file("this_file_name_works.txt", "payload2") - - await archive_dir( - dir_to_compress=dir_to_archive, - destination=archive_path, - store_relative_path=store_relative_path, - compress=compress, - ) - - unarchived_paths = await unarchive_dir( - archive_to_extract=archive_path, destination_folder=dst_dir - ) - - assert_unarchived_paths( - unarchived_paths, - src_dir=dir_to_archive, - dst_dir=dst_dir, - is_saved_as_relpath=store_relative_path, - unsupported_replace=True, - ) - - await assert_same_directory_content( - dir_to_compress=dir_to_archive, - output_dir=dst_dir, - inject_relative_path=None if store_relative_path else dir_to_archive, - unsupported_replace=True, - ) - - -EMPTY_SET: set[Path] = set() -ALL_ITEMS_SET: set[Path] = { - Path("d1/f2.txt"), - Path("d1/f1"), - Path("d1/sd1/f1"), - Path("d1/sd1/f2.txt"), -} - - -@dataclass(frozen=True) -class ExcludeParams: - exclude_patterns: set[str] - expected_result: set[Path] - - -# + /exclude_patterns_validation_dir -# + empty -# + d1 -# - f2.txt -# + sd1 -# - f2.txt -# - f1 -# - f1 -@pytest.mark.parametrize( - "params", - [ - ExcludeParams( - exclude_patterns={"/d1*"}, - expected_result=EMPTY_SET, - ), - ExcludeParams( - exclude_patterns={"/d1/sd1*"}, - expected_result={ - Path("d1/f2.txt"), - Path("d1/f1"), - }, - ), - ExcludeParams( - exclude_patterns={"d1*"}, - expected_result=EMPTY_SET, - ), - ExcludeParams( - exclude_patterns={"*d1*"}, - expected_result=EMPTY_SET, - ), - ExcludeParams( - exclude_patterns={"*.txt"}, - expected_result={ - Path("d1/f1"), - Path("d1/sd1/f1"), - }, - ), - ExcludeParams( - exclude_patterns={"/absolute/path/does/not/exist*"}, - expected_result=ALL_ITEMS_SET, - ), - ExcludeParams( - exclude_patterns={"/../../this/is/ignored*"}, - expected_result=ALL_ITEMS_SET, - ), - ExcludeParams( - exclude_patterns={"*relative/path/does/not/exist"}, - expected_result=ALL_ITEMS_SET, - ), - ], -) -async def test_archive_unarchive_check_exclude( - params: ExcludeParams, - exclude_patterns_validation_dir: Path, - tmp_path: Path, -): - temp_dir_one = tmp_path / "one" - temp_dir_two = tmp_path / "two" - - temp_dir_one.mkdir() - temp_dir_two.mkdir() - - archive_file = temp_dir_one / "archive.zip" - - # make exclude_patterns work relative to test directory - exclude_patterns = { - f"{exclude_patterns_validation_dir}/{x.strip('/') if x.startswith('/') else x}" - for x in params.exclude_patterns - } - - await archive_dir( - dir_to_compress=exclude_patterns_validation_dir, - destination=archive_file, - store_relative_path=True, - compress=False, - exclude_patterns=exclude_patterns, - ) - - unarchived_paths: set[Path] = await unarchive_dir( - archive_to_extract=archive_file, destination_folder=temp_dir_two - ) - - relative_unarchived_paths = {x.relative_to(temp_dir_two) for x in unarchived_paths} - - assert ( - relative_unarchived_paths == params.expected_result - ), f"Exclude rules: {exclude_patterns=}" - - -async def test_unarchive_dir_raises_error( - zipfile_single_file_extract_worker_raises_error: None, - dir_with_random_content: Path, - tmp_path: Path, -): - temp_dir_one = tmp_path / "one" - temp_dir_two = tmp_path / "two" - - temp_dir_one.mkdir() - temp_dir_two.mkdir() - - archive_file = temp_dir_one / "archive.zip" - - await archive_dir( - dir_to_compress=dir_with_random_content, - destination=archive_file, - store_relative_path=True, - compress=True, - ) - - with pytest.raises(ArchiveError, match=r"^.*raised as requested.*$"): - await archiving_utils.unarchive_dir(archive_file, temp_dir_two) - - -file_suffix = 0 - - -async def _archive_dir_performance( - input_path: Path, destination_path: Path, compress: bool -): - global file_suffix # pylint: disable=global-statement - - await archive_dir( - input_path, - destination_path / f"archive_{file_suffix}.zip", - compress=compress, - store_relative_path=True, - ) - file_suffix += 1 - - -@pytest.mark.skip(reason="manual testing") -@pytest.mark.parametrize( - "compress, file_size, num_files", [(False, parse_obj_as(ByteSize, "1Mib"), 10000)] -) -def test_archive_dir_performance( - benchmark: BenchmarkFixture, - create_file_of_size: Callable[[ByteSize, str], Path], - tmp_path: Path, - compress: bool, - file_size: ByteSize, - num_files: int, -): - # create a bunch of different files - files_to_compress = [ - create_file_of_size(file_size, f"inputs/test_file_{n}") - for n in range(num_files) - ] - assert len(files_to_compress) == num_files - parent_path = files_to_compress[0].parent - assert all(f.parent == parent_path for f in files_to_compress) - - destination_path = tmp_path / "archive_performance" - assert not destination_path.exists() - destination_path.mkdir(parents=True) - assert destination_path.exists() - - def run_async_test(*args, **kwargs): - asyncio.get_event_loop().run_until_complete( - _archive_dir_performance(parent_path, destination_path, compress) - ) - - benchmark(run_async_test) diff --git a/packages/service-library/tests/test_async_utils.py b/packages/service-library/tests/test_async_utils.py index cf2d051fe95..9bb1b4fff45 100644 --- a/packages/service-library/tests/test_async_utils.py +++ b/packages/service-library/tests/test_async_utils.py @@ -7,13 +7,15 @@ import random from collections import deque from dataclasses import dataclass +from datetime import timedelta from time import time -from typing import Any, Optional +from typing import Any import pytest from faker import Faker from servicelib.async_utils import ( _sequential_jobs_contexts, + delayed_start, run_sequentially_in_context, ) @@ -59,24 +61,25 @@ def _compensate_for_slow_systems(number: float) -> float: async def test_context_aware_dispatch( - sleep_duration: float, - ensure_run_in_sequence_context_is_empty: None, + sleep_duration: float, ensure_run_in_sequence_context_is_empty: None, faker: Faker ) -> None: @run_sequentially_in_context(target_args=["c1", "c2", "c3"]) async def orderly(c1: Any, c2: Any, c3: Any, control: Any) -> None: _ = (c1, c2, c3) await asyncio.sleep(sleep_duration) - context = dict(c1=c1, c2=c2, c3=c3) + context = {"c1": c1, "c2": c2, "c3": c3} await locked_stores[make_key_from_context(context)].push(control) def make_key_from_context(context: dict) -> str: return ".".join([f"{k}:{v}" for k, v in context.items()]) def make_context(): - return dict( - c1=random.randint(0, 10), c2=random.randint(0, 10), c3=random.randint(0, 10) - ) + return { + "c1": faker.random_int(0, 10), + "c2": faker.random_int(0, 10), + "c3": faker.random_int(0, 10), + } contexts = [make_context() for _ in range(10)] @@ -116,7 +119,8 @@ class DidFailException(Exception): @run_sequentially_in_context(target_args=["will_fail"]) async def sometimes_failing(will_fail: bool) -> bool: if will_fail: - raise DidFailException("I was instructed to fail") + msg = "I was instructed to fail" + raise DidFailException(msg) return True for x in range(100): @@ -133,7 +137,6 @@ async def test_context_aware_wrong_target_args_name( expected_param_name: str, ensure_run_in_sequence_context_is_empty: None, # pylint: disable=unused-argument ) -> None: - # pylint: disable=unused-argument @run_sequentially_in_context(target_args=[expected_param_name]) async def target_function(the_param: Any) -> None: @@ -201,7 +204,7 @@ class ObjectWithPropos: @run_sequentially_in_context(target_args=["object_with_props.attr1"]) async def test_attribute( - object_with_props: ObjectWithPropos, other_attr: Optional[int] = None + object_with_props: ObjectWithPropos, other_attr: int | None = None ) -> str: return object_with_props.attr1 @@ -222,3 +225,20 @@ async def test_multiple_context_calls(context_param: int) -> int: assert i == await test_multiple_context_calls(i) assert len(_sequential_jobs_contexts) == RETRIES + + +async def test_with_delay(): + @delayed_start(timedelta(seconds=0.2)) + async def decorated_awaitable() -> int: + return 42 + + assert await decorated_awaitable() == 42 + + async def another_awaitable() -> int: + return 42 + + decorated_another_awaitable = delayed_start(timedelta(seconds=0.2))( + another_awaitable + ) + + assert await decorated_another_awaitable() == 42 diff --git a/packages/service-library/tests/test_background_task.py b/packages/service-library/tests/test_background_task.py index b78f1107709..8c508bf8979 100644 --- a/packages/service-library/tests/test_background_task.py +++ b/packages/service-library/tests/test_background_task.py @@ -6,26 +6,33 @@ import asyncio import datetime -from typing import AsyncIterator, Awaitable, Callable, Optional +import logging +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Final from unittest import mock +from unittest.mock import AsyncMock import pytest from faker import Faker -from pytest import FixtureRequest from pytest_mock.plugin import MockerFixture -from servicelib.background_task import ( - periodic_task, - start_periodic_task, - stop_periodic_task, -) +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task, periodic, periodic_task + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + -_FAST_POLL_INTERVAL = 1 +_FAST_POLL_INTERVAL: Final[int] = 1 +_VERY_SLOW_POLL_INTERVAL: Final[int] = 100 @pytest.fixture def mock_background_task(mocker: MockerFixture) -> mock.AsyncMock: - mocked_task = mocker.AsyncMock(return_value=None) - return mocked_task + return mocker.AsyncMock(return_value=None) @pytest.fixture @@ -33,24 +40,36 @@ def task_interval() -> datetime.timedelta: return datetime.timedelta(seconds=_FAST_POLL_INTERVAL) -@pytest.fixture(params=[None, 1]) -def stop_task_timeout(request: FixtureRequest) -> Optional[float]: +@pytest.fixture +def very_long_task_interval() -> datetime.timedelta: + return datetime.timedelta(seconds=_VERY_SLOW_POLL_INTERVAL) + + +@pytest.fixture(params=[None, 1], ids=lambda x: f"stop-timeout={x}") +def stop_task_timeout(request: pytest.FixtureRequest) -> float | None: return request.param @pytest.fixture async def create_background_task( - faker: Faker, stop_task_timeout: Optional[float] -) -> AsyncIterator[Callable[[datetime.timedelta, Callable], Awaitable[asyncio.Task]]]: + faker: Faker, stop_task_timeout: float | None +) -> AsyncIterator[ + Callable[ + [datetime.timedelta, Callable, asyncio.Event | None], Awaitable[asyncio.Task] + ] +]: created_tasks = [] async def _creator( - interval: datetime.timedelta, task: Callable[..., Awaitable] + interval: datetime.timedelta, + task: Callable[..., Awaitable], + early_wake_up_event: asyncio.Event | None, ) -> asyncio.Task: - background_task = await start_periodic_task( + background_task = create_periodic_task( task, interval=interval, task_name=faker.pystr(), + early_wake_up_event=early_wake_up_event, ) assert background_task created_tasks.append(background_task) @@ -59,54 +78,91 @@ async def _creator( yield _creator # cleanup await asyncio.gather( - *(stop_periodic_task(t, timeout=stop_task_timeout) for t in created_tasks) + *(cancel_wait_task(t, max_delay=stop_task_timeout) for t in created_tasks) ) +@pytest.mark.parametrize( + "wake_up_event", [None, asyncio.Event], ids=lambda x: f"wake-up-event: {x}" +) async def test_background_task_created_and_deleted( mock_background_task: mock.AsyncMock, task_interval: datetime.timedelta, create_background_task: Callable[ - [datetime.timedelta, Callable], Awaitable[asyncio.Task] + [datetime.timedelta, Callable, asyncio.Event | None], Awaitable[asyncio.Task] ], + wake_up_event: Callable | None, ): - task = await create_background_task( + event = wake_up_event() if wake_up_event else None + _task = await create_background_task( task_interval, mock_background_task, + event, ) await asyncio.sleep(5 * task_interval.total_seconds()) mock_background_task.assert_called() - assert mock_background_task.call_count > 1 + assert mock_background_task.call_count > 2 + + +async def test_background_task_wakes_up_early( + mock_background_task: mock.AsyncMock, + very_long_task_interval: datetime.timedelta, + create_background_task: Callable[ + [datetime.timedelta, Callable, asyncio.Event | None], Awaitable[asyncio.Task] + ], +): + wake_up_event = asyncio.Event() + _task = await create_background_task( + very_long_task_interval, + mock_background_task, + wake_up_event, + ) + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + # now the task should have run only once + mock_background_task.assert_called_once() + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_called_once() + # this should wake up the task + wake_up_event.set() + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_called() + assert mock_background_task.call_count == 2 + # no change this now waits again a very long time + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_called() + assert mock_background_task.call_count == 2 -async def test_dynamic_scaling_task_raises_restarts( +async def test_background_task_raises_restarts( mock_background_task: mock.AsyncMock, task_interval: datetime.timedelta, create_background_task: Callable[ - [datetime.timedelta, Callable], Awaitable[asyncio.Task] + [datetime.timedelta, Callable, asyncio.Event | None], Awaitable[asyncio.Task] ], ): mock_background_task.side_effect = RuntimeError("pytest faked runtime error") - task = await create_background_task( + _task = await create_background_task( task_interval, mock_background_task, + None, ) await asyncio.sleep(5 * task_interval.total_seconds()) mock_background_task.assert_called() assert mock_background_task.call_count > 1 -async def test_dynamic_scaling_task_correctly_cancels( +async def test_background_task_correctly_cancels( mock_background_task: mock.AsyncMock, task_interval: datetime.timedelta, create_background_task: Callable[ - [datetime.timedelta, Callable], Awaitable[asyncio.Task] + [datetime.timedelta, Callable, asyncio.Event | None], Awaitable[asyncio.Task] ], ): mock_background_task.side_effect = asyncio.CancelledError - task = await create_background_task( + _task = await create_background_task( task_interval, mock_background_task, + None, ) await asyncio.sleep(5 * task_interval.total_seconds()) # the task will be called once, and then stop @@ -128,3 +184,40 @@ async def test_periodic_task_context_manager( assert asyncio_task.cancelled() is False assert asyncio_task.done() is False assert asyncio_task.cancelled() is True + + +async def test_periodic_decorator(): + # This mock function will allow us to test if the function is called periodically + mock_func = AsyncMock() + + @periodic(interval=datetime.timedelta(seconds=0.1)) + async def _func() -> None: + await mock_func() + + task = asyncio.create_task(_func()) + + # Give some time for the periodic calls to happen + await asyncio.sleep(0.5) + + # Once enough time has passed, cancel the task + task.cancel() + with pytest.raises(asyncio.CancelledError): + await task + + assert mock_func.call_count > 1 + + +async def test_periodic_task_logs_error( + mock_background_task: mock.AsyncMock, + task_interval: datetime.timedelta, + caplog: pytest.LogCaptureFixture, +): + mock_background_task.side_effect = RuntimeError("Test error") + + with caplog.at_level(logging.ERROR): + async with periodic_task( + mock_background_task, interval=task_interval, task_name="test_task" + ): + await asyncio.sleep(2 * task_interval.total_seconds()) + + assert "Test error" in caplog.text diff --git a/packages/service-library/tests/test_background_task_utils.py b/packages/service-library/tests/test_background_task_utils.py new file mode 100644 index 00000000000..9a03a6c3541 --- /dev/null +++ b/packages/service-library/tests/test_background_task_utils.py @@ -0,0 +1,125 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import datetime +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager +from itertools import chain +from unittest import mock + +import arrow +import pytest +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task_utils import exclusive_periodic +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase +from tenacity import ( + AsyncRetrying, + retry_if_exception_type, + stop_after_delay, + wait_fixed, +) + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + "redis-commander", +] + + +@pytest.fixture +async def redis_client_sdk( + get_redis_client_sdk: Callable[ + [RedisDatabase], AbstractAsyncContextManager[RedisClientSDK] + ], +) -> AsyncIterator[RedisClientSDK]: + async with get_redis_client_sdk(RedisDatabase.RESOURCES) as client: + yield client + + +async def _assert_on_sleep_done(on_sleep_events: mock.Mock, *, stop_after: float): + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(stop_after), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert on_sleep_events.call_count == 2 + print("sleep was done with", on_sleep_events.call_count, " counts") + + +async def _assert_task_completes_once( + redis_client_sdk: RedisClientSDK, + stop_after: float, +) -> tuple[float, ...]: + @exclusive_periodic(redis_client_sdk, task_interval=datetime.timedelta(seconds=1)) + async def _sleep_task(sleep_interval: float, on_sleep_events: mock.Mock) -> None: + on_sleep_events(arrow.utcnow()) + await asyncio.sleep(sleep_interval) + print("Slept for", sleep_interval) + on_sleep_events(arrow.utcnow()) + + sleep_events = mock.Mock() + + task = asyncio.create_task(_sleep_task(1, sleep_events), name="pytest_sleep_task") + + await _assert_on_sleep_done(sleep_events, stop_after=stop_after) + + await cancel_wait_task(task, max_delay=5) + + events_timestamps: tuple[float, ...] = tuple( + x.args[0].timestamp() for x in sleep_events.call_args_list + ) + return events_timestamps + + +def _check_elements_lower(lst: list) -> bool: + # False when lst[x] => lst[x+1] otherwise True + return all(lst[i] < lst[i + 1] for i in range(len(lst) - 1)) + + +def test__check_elements_lower(): + assert _check_elements_lower([1, 2, 3, 4, 5]) + assert not _check_elements_lower([1, 2, 3, 3, 4, 5]) + assert not _check_elements_lower([1, 2, 3, 5, 4]) + assert not _check_elements_lower([2, 1, 3, 4, 5]) + assert not _check_elements_lower([1, 2, 4, 3, 5]) + + +async def test_exclusive_periodic_decorator_single( + redis_client_sdk: RedisClientSDK, +): + await _assert_task_completes_once(redis_client_sdk, stop_after=2) + + +async def test_exclusive_periodic_decorator_parallel_all_finish( + redis_client_sdk: RedisClientSDK, +): + parallel_tasks = 10 + results = await asyncio.gather( + *[ + _assert_task_completes_once(redis_client_sdk, stop_after=60) + for _ in range(parallel_tasks) + ], + return_exceptions=True, + ) + + # check no error occurred + assert [isinstance(x, tuple) for x in results].count(True) == parallel_tasks + assert [isinstance(x, Exception) for x in results].count(True) == 0 + valid_results = [x for x in results if isinstance(x, tuple)] + assert [x[0] < x[1] for x in valid_results].count(True) == parallel_tasks + + # sort by start time (task start order is not equal to the task lock acquisition order) + sorted_results = sorted(valid_results, key=lambda x: x[0]) + flattened_results = list(chain(*sorted_results)) + + # NOTE all entries should be in increasing order; + # this means that the `_sleep_task` ran sequentially + assert _check_elements_lower(flattened_results) diff --git a/packages/service-library/tests/test_bytes_iters.py b/packages/service-library/tests/test_bytes_iters.py new file mode 100644 index 00000000000..32c3037a9f0 --- /dev/null +++ b/packages/service-library/tests/test_bytes_iters.py @@ -0,0 +1,137 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import secrets +from collections.abc import AsyncIterable +from pathlib import Path +from unittest.mock import Mock + +import pytest +from faker import Faker +from pytest_mock import MockerFixture +from pytest_simcore.helpers.comparing import ( + assert_same_contents, + get_files_info_from_path, + get_relative_to, +) +from servicelib.archiving_utils import unarchive_dir +from servicelib.bytes_iters import ( + ArchiveEntries, + DiskStreamReader, + DiskStreamWriter, + get_zip_bytes_iter, +) +from servicelib.file_utils import remove_directory +from servicelib.progress_bar import ProgressBarData +from servicelib.s3_utils import FileLikeBytesIterReader + + +def _ensure_dir(path: Path) -> Path: + path.mkdir(parents=True, exist_ok=True) + assert path.exists() + assert path.is_dir() + return path + + +@pytest.fixture +def local_files_dir(tmp_path: Path) -> Path: + # Cotent to add to the zip + return _ensure_dir(tmp_path / "local_files_dir") + + +@pytest.fixture +def local_archive_path(tmp_path: Path) -> Path: + # local destination of archive (either form S3 or archived locally) + return tmp_path / "archive.zip" + + +@pytest.fixture +def local_unpacked_archive(tmp_path: Path) -> Path: + # contents of unpacked archive + return _ensure_dir(tmp_path / "unpacked_archive") + + +def _rand_range(lower: int, upper: int) -> int: + return secrets.randbelow(upper) + (upper - lower) + 1 + + +def _generate_files_in_path(faker: Faker, base_dir: Path, *, prefix: str = "") -> None: + # mixed small text files and binary files + (base_dir / "empty").mkdir() + + (base_dir / "d1").mkdir() + for i in range(_rand_range(10, 40)): + (base_dir / "d1" / f"{prefix}f{i}.txt").write_text(faker.text()) + (base_dir / "d1" / f"{prefix}b{i}.bin").write_bytes(faker.json_bytes()) + + (base_dir / "d1" / "sd1").mkdir() + for i in range(_rand_range(10, 40)): + (base_dir / "d1" / "sd1" / f"{prefix}f{i}.txt").write_text(faker.text()) + (base_dir / "d1" / "sd1" / f"{prefix}b{i}.bin").write_bytes(faker.json_bytes()) + + (base_dir / "fancy-names").mkdir() + for fancy_name in ( + "i have some spaces in my name", + "(%$)&%$()", + " ", + ): + (base_dir / "fancy-names" / fancy_name).write_text(faker.text()) + + +@pytest.fixture +async def prepare_content(local_files_dir: Path, faker: Faker) -> AsyncIterable[None]: + _generate_files_in_path(faker, local_files_dir, prefix="local_") + yield + await remove_directory(local_files_dir, only_children=True) + + +@pytest.fixture +def mocked_progress_bar_cb(mocker: MockerFixture) -> Mock: + def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + + return mocker.Mock(side_effect=_progress_cb) + + +@pytest.mark.parametrize("use_file_like", [True, False]) +async def test_get_zip_bytes_iter( + mocked_progress_bar_cb: Mock, + prepare_content: None, + local_files_dir: Path, + local_archive_path: Path, + local_unpacked_archive: Path, + use_file_like: bool, +): + # 1. generate archive form soruces + archive_files: ArchiveEntries = [] + for file in (x for x in local_files_dir.rglob("*") if x.is_file()): + archive_name = get_relative_to(local_files_dir, file) + + archive_files.append( + (archive_name, DiskStreamReader(file).get_bytes_streamer()) + ) + + writer = DiskStreamWriter(local_archive_path) + + async with ProgressBarData( + num_steps=1, + progress_report_cb=mocked_progress_bar_cb, + description="root_bar", + ) as root: + bytes_iter = get_zip_bytes_iter( + archive_files, progress_bar=root, chunk_size=1024 + ) + + if use_file_like: + await writer.write_from_file_like(FileLikeBytesIterReader(bytes_iter)) + else: + await writer.write_from_bytes_iter(bytes_iter) + + # 2. extract archive using exiting tools + await unarchive_dir(local_archive_path, local_unpacked_archive) + + # 3. compare files in directories (same paths & sizes) + await assert_same_contents( + get_files_info_from_path(local_files_dir), + get_files_info_from_path(local_unpacked_archive), + ) diff --git a/packages/service-library/tests/test_decorators.py b/packages/service-library/tests/test_decorators.py index dcdf359af4a..c8e368a57a3 100644 --- a/packages/service-library/tests/test_decorators.py +++ b/packages/service-library/tests/test_decorators.py @@ -2,16 +2,17 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name + from servicelib.decorators import safe_return def test_safe_return_decorator(): - class MyException(Exception): + class AnError(Exception): pass - @safe_return(if_fails_return=False, catch=(MyException,), logger=None) + @safe_return(if_fails_return=False, catch=(AnError,), logger=None) def raise_my_exception(): - raise MyException() + raise AnError assert not raise_my_exception() @@ -19,9 +20,10 @@ def raise_my_exception(): def test_safe_return_mutables(): some_mutable_return = ["some", "defaults"] - @safe_return(if_fails_return=some_mutable_return) + @safe_return(if_fails_return=some_mutable_return) # type: ignore def return_mutable(): - raise RuntimeError("Runtime is default") + msg = "Runtime is default" + raise RuntimeError(msg) assert return_mutable() == some_mutable_return # contains the same - assert not (return_mutable() is some_mutable_return) # but is not the same + assert return_mutable() is not some_mutable_return # but is not the same diff --git a/packages/service-library/tests/test_docker_utils.py b/packages/service-library/tests/test_docker_utils.py index d799aea46d0..d8c4e3d2ea5 100644 --- a/packages/service-library/tests/test_docker_utils.py +++ b/packages/service-library/tests/test_docker_utils.py @@ -1,3 +1,4 @@ +# pylint: disable=protected-access from datetime import datetime, timezone import pytest diff --git a/packages/service-library/tests/test_error_codes.py b/packages/service-library/tests/test_error_codes.py deleted file mode 100644 index 0d88fa978ba..00000000000 --- a/packages/service-library/tests/test_error_codes.py +++ /dev/null @@ -1,54 +0,0 @@ -# pylint: disable=broad-except -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import logging - -from servicelib.error_codes import create_error_code, parse_error_code - -logger = logging.getLogger(__name__) - - -def test_error_code_use_case(caplog): - """use case for error-codes""" - try: - raise RuntimeError("Something unexpected went wrong") - except Exception as err: - # 1. Unexpected ERROR - - # 2. create error-code - error_code = create_error_code(err) - - # 3. log all details in service - caplog.clear() - - # Can add a formatter that prefix error-codes - syslog = logging.StreamHandler() - syslog.setFormatter( - logging.Formatter("%(asctime)s %(error_code)s : %(message)s") - ) - logger.addHandler(syslog) - - logger.error("Fake Unexpected error", extra={"error_code": error_code}) - - # logs something like E.g. 2022-07-06 14:31:13,432 OEC:140350117529856 : Fake Unexpected error - assert parse_error_code( - f"2022-07-06 14:31:13,432 {error_code} : Fake Unexpected error" - ) == { - error_code, - } - - assert caplog.records[0].error_code == error_code - assert caplog.records[0] - - logger.error("Fake without error_code") - - # 4. inform user (e.g. with new error or sending message) - user_message = ( - f"This is a user-friendly message to inform about an error. [{error_code}]" - ) - - assert parse_error_code(user_message) == { - error_code, - } diff --git a/packages/service-library/tests/test_exception_utils.py b/packages/service-library/tests/test_exception_utils.py index 299855e8241..a884d3dafb1 100644 --- a/packages/service-library/tests/test_exception_utils.py +++ b/packages/service-library/tests/test_exception_utils.py @@ -4,7 +4,7 @@ import pytest from pydantic import PositiveFloat, PositiveInt -from servicelib.exception_utils import DelayedExceptionHandler +from servicelib.exception_utils import DelayedExceptionHandler, silence_exceptions TOLERANCE: Final[PositiveFloat] = 0.1 SLEEP_FOR: Final[PositiveFloat] = TOLERANCE * 0.1 @@ -49,3 +49,60 @@ def test_workflow_passes() -> None: def test_workflow_raises() -> None: with pytest.raises(TargetException): workflow(stop_raising_after=ITERATIONS + 1) + + +# Define some custom exceptions for testing +class CustomError(Exception): + pass + + +class AnotherCustomError(Exception): + pass + + +@silence_exceptions((CustomError,)) +def sync_function(*, raise_error: bool, raise_another_error: bool) -> str: + if raise_error: + raise CustomError + if raise_another_error: + raise AnotherCustomError + return "Success" + + +@silence_exceptions((CustomError,)) +async def async_function(*, raise_error: bool, raise_another_error: bool) -> str: + if raise_error: + raise CustomError + if raise_another_error: + raise AnotherCustomError + return "Success" + + +def test_sync_function_no_exception(): + result = sync_function(raise_error=False, raise_another_error=False) + assert result == "Success" + + +def test_sync_function_with_exception_is_silenced(): + result = sync_function(raise_error=True, raise_another_error=False) + assert result is None + + +async def test_async_function_no_exception(): + result = await async_function(raise_error=False, raise_another_error=False) + assert result == "Success" + + +async def test_async_function_with_exception_is_silenced(): + result = await async_function(raise_error=True, raise_another_error=False) + assert result is None + + +def test_sync_function_with_different_exception(): + with pytest.raises(AnotherCustomError): + sync_function(raise_error=False, raise_another_error=True) + + +async def test_async_function_with_different_exception(): + with pytest.raises(AnotherCustomError): + await async_function(raise_error=False, raise_another_error=True) diff --git a/packages/service-library/tests/test_file_utils.py b/packages/service-library/tests/test_file_utils.py index 454106c22b4..b5feff78603 100644 --- a/packages/service-library/tests/test_file_utils.py +++ b/packages/service-library/tests/test_file_utils.py @@ -1,11 +1,14 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument +import logging from pathlib import Path import pytest from faker import Faker -from servicelib.file_utils import remove_directory +from servicelib.file_utils import log_directory_changes, remove_directory + +_logger = logging.getLogger(__name__) @pytest.fixture @@ -80,3 +83,60 @@ async def test_remove_not_existing_directory_rasing_error( await remove_directory( path=missing_path, only_children=only_children, ignore_errors=False ) + + +async def test_log_directory_changes(caplog: pytest.LogCaptureFixture, some_dir: Path): + # directory cretion triggers no changes + caplog.clear() + with log_directory_changes(some_dir, _logger, logging.ERROR): + (some_dir / "a-dir").mkdir(parents=True, exist_ok=True) + assert "File changes in path" not in caplog.text + assert "Files added:" not in caplog.text + assert "Files removed:" not in caplog.text + assert "File content changed" not in caplog.text + + # files were added + caplog.clear() + with log_directory_changes(some_dir, _logger, logging.ERROR): + (some_dir / "hoho").touch() + assert "File changes in path" in caplog.text + assert "Files added:" in caplog.text + assert "Files removed:" not in caplog.text + assert "File content changed" not in caplog.text + + # files were removed + caplog.clear() + with log_directory_changes(some_dir, _logger, logging.ERROR): + await remove_directory(path=some_dir) + assert "File changes in path" in caplog.text + assert "Files removed:" in caplog.text + assert "Files added:" not in caplog.text + assert "File content changed" not in caplog.text + + # nothing changed + caplog.clear() + with log_directory_changes(some_dir, _logger, logging.ERROR): + pass + assert caplog.text == "" + + # files added and removed + caplog.clear() + some_dir.mkdir(parents=True, exist_ok=True) + (some_dir / "som_other_file").touch() + with log_directory_changes(some_dir, _logger, logging.ERROR): + (some_dir / "som_other_file").unlink() + (some_dir / "som_other_file_2").touch() + assert "File changes in path" in caplog.text + assert "Files added:" in caplog.text + assert "Files removed:" in caplog.text + assert "File content changed" not in caplog.text + + # file content changed + caplog.clear() + (some_dir / "file_to_change").touch() + with log_directory_changes(some_dir, _logger, logging.ERROR): + (some_dir / "file_to_change").write_text("ab") + assert "File changes in path" in caplog.text + assert "Files added:" not in caplog.text + assert "Files removed:" not in caplog.text + assert "File content changed" in caplog.text diff --git a/packages/service-library/tests/test_functools_utils.py b/packages/service-library/tests/test_functools_utils.py index ccecd4a46b7..0ef3252a991 100644 --- a/packages/service-library/tests/test_functools_utils.py +++ b/packages/service-library/tests/test_functools_utils.py @@ -3,7 +3,6 @@ # pylint: disable=unused-variable import inspect -from typing import Tuple, Union from servicelib.functools_utils import copy_func @@ -11,8 +10,8 @@ def test_copy_functions(): # fixture def original_func( - x: int, y: bool, *, z: Union[str, float, None] = None - ) -> Tuple[int, Union[str, float, None]]: + x: int, y: bool, *, z: str | float | None = None + ) -> tuple[int, str | float | None]: """some doc""" return 2 * x, z if y else "Foo" diff --git a/packages/service-library/tests/test_json_serialization.py b/packages/service-library/tests/test_json_serialization.py deleted file mode 100644 index 1fa608a465e..00000000000 --- a/packages/service-library/tests/test_json_serialization.py +++ /dev/null @@ -1,42 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -import json -from typing import Any -from uuid import UUID, uuid4 - -from servicelib.json_serialization import json_dumps - -# - - -def export_uuids_to_str(n: Any): - if isinstance(n, dict): - for k, v in n.items(): - n.update({k: export_uuids_to_str(v)}) - elif isinstance(n, list): - n = [export_uuids_to_str(v) for v in n] - elif isinstance(n, UUID): - return str(n) - return n - - -def test_serialization_of_uuids(fake_data_dict: dict[str, Any]): - - uuid_obj = uuid4() - # NOTE the quotes around expected value - assert json_dumps(uuid_obj) == f'"{uuid_obj}"' - - obj = {"ids": [uuid4() for _ in range(3)]} - dump = json_dumps(obj) - assert json.loads(dump) == export_uuids_to_str(obj) - - -def test_serialization_of_nested_dicts(fake_data_dict: dict[str, Any]): - - obj = {"data": fake_data_dict, "ids": [uuid4() for _ in range(3)]} - - dump = json_dumps(obj) - assert json.loads(dump) == export_uuids_to_str(obj) diff --git a/packages/service-library/tests/test_logging_errors.py b/packages/service-library/tests/test_logging_errors.py new file mode 100644 index 00000000000..ac99c2fd657 --- /dev/null +++ b/packages/service-library/tests/test_logging_errors.py @@ -0,0 +1,72 @@ +# pylint:disable=redefined-outer-name + +import logging + +import pytest +from common_library.error_codes import create_error_code, parse_error_code_parts +from common_library.errors_classes import OsparcErrorMixin +from servicelib.logging_errors import ( + create_troubleshotting_log_kwargs, + create_troubleshotting_log_message, +) + + +def test_create_troubleshotting_log_message(caplog: pytest.LogCaptureFixture): + class MyError(OsparcErrorMixin, RuntimeError): + msg_template = "My error {user_id}" + + with pytest.raises(MyError) as exc_info: + raise MyError(user_id=123, product_name="foo") + + exc = exc_info.value + error_code = create_error_code(exc) + + eoc1_fingerprint, eoc1_snapshot = parse_error_code_parts(error_code) + eoc2_fingerprint, eoc2_snapshot = parse_error_code_parts(exc.error_code()) + + assert eoc1_fingerprint == eoc2_fingerprint + assert eoc1_snapshot <= eoc2_snapshot + + msg = f"Nice message to user [{error_code}]" + + log_msg = create_troubleshotting_log_message( + msg, + error=exc, + error_code=error_code, + error_context=exc.error_context(), + tip="This is a test error", + ) + + log_kwargs = create_troubleshotting_log_kwargs( + msg, + error=exc, + error_code=error_code, + tip="This is a test error", + ) + + assert log_kwargs["msg"] == log_msg + assert log_kwargs["extra"] is not None + assert ( + # pylint: disable=unsubscriptable-object + log_kwargs["extra"].get("log_uid") + == "123" + ), "user_id is injected as extra from context" + + with caplog.at_level(logging.WARNING): + root_logger = logging.getLogger() + root_logger.exception(**log_kwargs) + + # ERROR root:test_logging_utils.py:417 Nice message to user [OEC:126055703573984]. + # { + # "exception_details": "My error 123", + # "error_code": "OEC:126055703573984", + # "context": { + # "user_id": 123, + # "product_name": "foo" + # }, + # "tip": "This is a test error" + # } + + assert error_code in caplog.text + assert "user_id" in caplog.text + assert "product_name" in caplog.text diff --git a/packages/service-library/tests/test_logging_utils.py b/packages/service-library/tests/test_logging_utils.py index c8899e4dc19..d56e07962f2 100644 --- a/packages/service-library/tests/test_logging_utils.py +++ b/packages/service-library/tests/test_logging_utils.py @@ -1,52 +1,412 @@ # pylint:disable=redefined-outer-name +# pylint:disable=unused-argument import logging -from threading import Thread -from typing import Optional +from collections.abc import Iterable +from contextlib import suppress +from pathlib import Path +from typing import Any import pytest -from pytest import LogCaptureFixture -from servicelib.logging_utils import log_decorator -from servicelib.utils import logged_gather +from faker import Faker +from servicelib.logging_utils import ( + LogExtra, + LogLevelInt, + LogMessageStr, + guess_message_log_level, + log_context, + log_decorator, + log_exceptions, + set_parent_module_log_level, +) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) +_ALL_LOGGING_LEVELS = [ + logging.CRITICAL, + logging.ERROR, + logging.WARNING, + logging.INFO, + logging.DEBUG, + logging.NOTSET, +] -@pytest.mark.parametrize("logger", [None, logger]) -@pytest.mark.parametrize("log_traceback", [True, False]) +def _to_level_name(lvl: int) -> str: + return logging.getLevelName(lvl) + + +@pytest.mark.parametrize("logger", [None, _logger]) async def test_error_regression_async_def( - caplog: LogCaptureFixture, logger: Optional[logging.Logger], log_traceback: bool + caplog: pytest.LogCaptureFixture, logger: logging.Logger | None, faker: Faker ): - @log_decorator(logger, log_traceback=log_traceback) - async def _raising_error() -> None: - raise RuntimeError("Raising as expected") + # NOTE: change the log level so that the log is visible + caplog.set_level(logging.INFO) + + @log_decorator(logger, logging.INFO) + async def _not_raising_fct( + argument1: int, argument2: str, *, keyword_arg1: bool, keyword_arg2: str + ) -> int: + assert argument1 is not None + assert argument2 is not None + assert keyword_arg1 is not None + assert keyword_arg2 is not None + return 0 + + @log_decorator(logger, logging.INFO) + async def _raising_error( + argument1: int, argument2: str, *, keyword_arg1: bool, keyword_arg2: str + ) -> None: + assert argument1 is not None + assert argument2 is not None + assert keyword_arg1 is not None + assert keyword_arg2 is not None + msg = "Raising as expected" + raise RuntimeError(msg) + + argument1 = faker.pyint() + argument2 = faker.pystr() + key_argument1 = faker.pybool() + key_argument2 = faker.pystr() + # run function under test: _not_raising_fct ----------------- caplog.clear() + result = await _not_raising_fct( + argument1, argument2, keyword_arg1=key_argument1, keyword_arg2=key_argument2 + ) + assert result == 0 + assert len(caplog.records) == 2 + info_record = caplog.records[0] + assert info_record.levelno == logging.INFO + assert ( + f"{_not_raising_fct.__module__.split('.')[-1]}:{_not_raising_fct.__name__}({argument1!r}, {argument2!r}, keyword_arg1={key_argument1!r}, keyword_arg2={key_argument2!r})" + in info_record.message + ) + return_record = caplog.records[1] + assert return_record.levelno == logging.INFO + assert not return_record.exc_text + assert ( + f"{_not_raising_fct.__module__.split('.')[-1]}:{_not_raising_fct.__name__} returned {result!r}" + in return_record.message + ) - await logged_gather(_raising_error(), reraise=False) + # run function under test: _raising_error ----------------- + caplog.clear() + with pytest.raises(RuntimeError): + await _raising_error( + argument1, argument2, keyword_arg1=key_argument1, keyword_arg2=key_argument2 + ) - if log_traceback: - assert "Traceback" in caplog.text - else: - assert "Traceback" not in caplog.text + assert len(caplog.records) == 2 + info_record = caplog.records[0] + assert info_record.levelno == logging.INFO + assert ( + f"{_raising_error.__module__.split('.')[-1]}:{_raising_error.__name__}({argument1!r}, {argument2!r}, keyword_arg1={key_argument1!r}, keyword_arg2={key_argument2!r})" + in info_record.message + ) + error_record = caplog.records[1] + assert error_record.levelno == logging.INFO + assert error_record.exc_text + assert "Traceback" in error_record.exc_text + + +@pytest.mark.parametrize("logger", [None, _logger]) +def test_error_regression_sync_def( + caplog: pytest.LogCaptureFixture, logger: logging.Logger | None, faker: Faker +): + # NOTE: change the log level so that the log is visible + caplog.set_level(logging.INFO) + @log_decorator(logger, logging.INFO) + def _not_raising_fct( + argument1: int, argument2: str, *, keyword_arg1: bool, keyword_arg2: str + ) -> int: + assert argument1 is not None + assert argument2 is not None + assert keyword_arg1 is not None + assert keyword_arg2 is not None + return 0 -@pytest.mark.parametrize("logger", [None, logger]) -@pytest.mark.parametrize("log_traceback", [True, False]) -async def test_error_regression_def( - caplog: LogCaptureFixture, logger: Optional[logging.Logger], log_traceback: bool + @log_decorator(logger, logging.INFO) + def _raising_error( + argument1: int, argument2: str, *, keyword_arg1: bool, keyword_arg2: str + ) -> None: + assert argument1 is not None + assert argument2 is not None + assert keyword_arg1 is not None + assert keyword_arg2 is not None + msg = "Raising as expected" + raise RuntimeError(msg) + + caplog.clear() + argument1 = faker.pyint() + argument2 = faker.pystr() + key_argument1 = faker.pybool() + key_argument2 = faker.pystr() + + result = _not_raising_fct( + argument1, argument2, keyword_arg1=key_argument1, keyword_arg2=key_argument2 + ) + assert result == 0 + assert len(caplog.records) == 2 + info_record = caplog.records[0] + assert info_record.levelno == logging.INFO + assert ( + f"{_not_raising_fct.__module__.split('.')[-1]}:{_not_raising_fct.__name__}({argument1!r}, {argument2!r}, keyword_arg1={key_argument1!r}, keyword_arg2={key_argument2!r})" + in info_record.message + ) + return_record = caplog.records[1] + assert return_record.levelno == logging.INFO + assert not return_record.exc_text + assert ( + f"{_not_raising_fct.__module__.split('.')[-1]}:{_not_raising_fct.__name__} returned {result!r}" + in return_record.message + ) + + caplog.clear() + with pytest.raises(RuntimeError): + _raising_error( + argument1, argument2, keyword_arg1=key_argument1, keyword_arg2=key_argument2 + ) + + assert len(caplog.records) == 2 + info_record = caplog.records[0] + assert info_record.levelno == logging.INFO + assert ( + f"{_raising_error.__module__.split('.')[-1]}:{_raising_error.__name__}({argument1!r}, {argument2!r}, keyword_arg1={key_argument1!r}, keyword_arg2={key_argument2!r})" + in info_record.message + ) + error_record = caplog.records[1] + assert error_record.levelno == logging.INFO + assert error_record.exc_text + assert "Traceback" in error_record.exc_text + + +@pytest.mark.parametrize( + "message, expected_log_level", + [ + ("", logging.INFO), + ("Error: this is an error", logging.ERROR), + ("[Error] this is an error", logging.ERROR), + ("[Error]: this is an error", logging.ERROR), + ("[Err] this is an error", logging.ERROR), + ("[Err]: this is an error", logging.ERROR), + ("Err: this is an error", logging.ERROR), + ("Warning: this is an warning", logging.WARNING), + ("[Warning] this is an warning", logging.WARNING), + ("[Warning]: this is an warning", logging.WARNING), + ("[Warn] this is an warning", logging.WARNING), + ("[Warn]: this is an warning", logging.WARNING), + ("Warn: this is an warning", logging.WARNING), + ("Not a Warn: this is an warning", logging.INFO), + ], +) +def test_guess_message_log_level( + message: LogMessageStr, expected_log_level: LogLevelInt ): - @log_decorator(logger, log_traceback=log_traceback) - def _raising_error() -> None: - raise RuntimeError("Raising as expected") + assert guess_message_log_level(message) == expected_log_level + +@pytest.mark.parametrize("with_log_duration", [True, False]) +def test_log_context_with_log_duration( + caplog: pytest.LogCaptureFixture, with_log_duration: bool +): caplog.clear() - thread = Thread(target=_raising_error) - thread.start() - thread.join() + with log_context(_logger, logging.ERROR, "test", log_duration=with_log_duration): + ... - if log_traceback: - assert "Traceback" in caplog.text + all(r.levelno == logging.ERROR for r in caplog.records) + + assert "Starting test ..." in caplog.text + if with_log_duration: + assert "Finished test in " in caplog.text else: - assert "Traceback" not in caplog.text + assert "Finished test" in caplog.text + + +@pytest.mark.parametrize( + "msg, args, extra", + [ + ("nothing", (), None), + ("format %s", ("this_arg",), None), + ("only extra", (), {"only": "extra"}), + ("format %s", ("this_arg",), {"me": "he"}), + ], +) +def test_log_context( + caplog: pytest.LogCaptureFixture, + msg: str, + args: tuple[Any, ...], + extra: LogExtra | None, +): + caplog.clear() + + with log_context(_logger, logging.ERROR, msg, *args, extra=extra): + ... + assert len(caplog.messages) == 2 + + +@pytest.fixture +def log_format_with_module_name() -> Iterable[None]: + for handler in logging.root.handlers: + original_formatter = handler.formatter + handler.setFormatter( + logging.Formatter( + "%(asctime)s %(levelname)s %(module)s:%(filename)s:%(lineno)d %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + ) + + yield + + for handler in logging.root.handlers: + handler.formatter = original_formatter + + +def test_log_context_caller_is_included_in_log( + caplog: pytest.LogCaptureFixture, + log_format_with_module_name: None, +): + caplog.clear() + + with log_context(_logger, logging.ERROR, "a test message"): + ... + + # Verify file name is in the log + assert Path(__file__).name in caplog.text + + +@pytest.mark.parametrize("level", _ALL_LOGGING_LEVELS, ids=_to_level_name) +def test_logs_no_exceptions(caplog: pytest.LogCaptureFixture, level: int): + caplog.set_level(level) + + with log_exceptions(_logger, level): + ... + + assert not caplog.records + + +@pytest.mark.parametrize("level", _ALL_LOGGING_LEVELS, ids=_to_level_name) +def test_log_exceptions_and_suppress(caplog: pytest.LogCaptureFixture, level: int): + caplog.set_level(level) + + exc_msg = "logs exceptions and suppresses" + with suppress(ValueError), log_exceptions(_logger, level, "CONTEXT", exc_info=True): + raise ValueError(exc_msg) + + assert len(caplog.records) == (1 if level != logging.NOTSET else 0) + + if caplog.records: + assert caplog.records[0].levelno == level + record = caplog.records[0] + # this is how it looks with exc_info=True + # + # CRITICAL tests.test_logging_utils:logging_utils.py:170 CONTEXT raised ValueError: logs exceptions and suppresses + # Traceback (most recent call last): + # File "path/to/file.py", line 163, in log_exceptions + # yield + # File "path/to/file2.py", line 262, in test_log_exceptions_and_suppress + # raise ValueError(msg) + # ValueError: logs exceptions and suppresses + # + + assert record.message == f"CONTEXT raised ValueError: {exc_msg}" + # since exc_info=True + assert record.exc_text + assert exc_msg in record.exc_text + assert "ValueError" in record.exc_text + assert "Traceback" in record.exc_text + + +@pytest.mark.parametrize("level", _ALL_LOGGING_LEVELS, ids=_to_level_name) +def test_log_exceptions_and_suppress_without_exc_info( + caplog: pytest.LogCaptureFixture, level: int +): + caplog.set_level(level) + + exc_msg = "logs exceptions and suppresses" + with suppress(ValueError), log_exceptions( + _logger, level, "CONTEXT", exc_info=False + ): + raise ValueError(exc_msg) + + assert len(caplog.records) == (1 if level != logging.NOTSET else 0) + + if caplog.records: + assert caplog.records[0].levelno == level + record = caplog.records[0] + # this is how it looks with exc_info=False + # + # CRITICAL tests.test_logging_utils:logging_utils.py:170 CONTEXT raised ValueError: logs exceptions and suppresses + # + + assert record.message == f"CONTEXT raised ValueError: {exc_msg}" + + # since exc_info=False + assert not record.exc_text + + +@pytest.mark.parametrize("level", _ALL_LOGGING_LEVELS, ids=_to_level_name) +def test_log_exceptions_and_reraise(caplog: pytest.LogCaptureFixture, level: int): + caplog.set_level(level) + + msg = "logs exceptions and reraises" + with pytest.raises(ValueError, match=msg), log_exceptions(_logger, level): + raise ValueError(msg) + + assert len(caplog.records) == (1 if level != logging.NOTSET else 0) + assert all(r.levelno == level for r in caplog.records) + + +def test_set_parent_module_log_level_(caplog: pytest.LogCaptureFixture): + caplog.clear() + # emulates service logger + logging.root.setLevel(logging.WARNING) + + parent = logging.getLogger("parent") + child = logging.getLogger("parent.child") + + assert parent.level == logging.NOTSET + assert child.level == logging.NOTSET + + parent.debug("parent debug") + child.debug("child debug") + + parent.info("parent info") + child.info("child info") + + parent.warning("parent warning") + child.warning("child warning") + + assert "parent debug" not in caplog.text + assert "child debug" not in caplog.text + + assert "parent info" not in caplog.text + assert "child info" not in caplog.text + + assert "parent warning" in caplog.text + assert "child warning" in caplog.text + + caplog.clear() + set_parent_module_log_level("parent.child", logging.INFO) + + assert parent.level == logging.INFO + assert child.level == logging.NOTSET + + parent.debug("parent debug") + child.debug("child debug") + + parent.info("parent info") + child.info("child info") + + parent.warning("parent warning") + child.warning("child warning") + + assert "parent debug" not in caplog.text + assert "child debug" not in caplog.text + + assert "parent info" in caplog.text + assert "child info" in caplog.text + + assert "parent warning" in caplog.text + assert "child warning" in caplog.text diff --git a/packages/service-library/tests/test_logging_utils_filtering.py b/packages/service-library/tests/test_logging_utils_filtering.py new file mode 100644 index 00000000000..64084c3204a --- /dev/null +++ b/packages/service-library/tests/test_logging_utils_filtering.py @@ -0,0 +1,99 @@ +# pylint: disable=redefined-outer-name + +import logging +from typing import Generator + +import pytest +from servicelib.logging_utils_filtering import GeneralLogFilter + + +@pytest.fixture +def logger_with_filter() -> Generator[tuple[logging.Logger, list[str]], None, None]: + # Set up a logger for testing + logger = logging.getLogger("uvicorn.access") + logger.setLevel(logging.DEBUG) + + # Create a list to capture log outputs + log_capture = [] + + # Create a handler that appends log messages to the log_capture list + class ListHandler(logging.Handler): + def emit(self, record): + log_capture.append(self.format(record)) + + handler = ListHandler() + logger.addHandler(handler) + + # Set up the filter based on the new logic + filtered_routes = [ + '"GET / HTTP/1.1" 200', + '"GET /metrics HTTP/1.1" 200', + ] + + # Add the GeneralLogFilter to the logger + log_filter = GeneralLogFilter(filtered_routes) + logger.addFilter(log_filter) + + # Return logger and the log_capture for testing + yield logger, log_capture + + # Cleanup: remove handlers and filters after test + logger.handlers.clear() + logger.filters.clear() + + +def test_log_filtered_out(logger_with_filter: tuple[logging.Logger, list[str]]): + logger, log_capture = logger_with_filter + + # Create a log record that should be filtered out (matches the filter criteria) + record = logger.makeRecord( + name="uvicorn.access", + level=logging.INFO, + fn="testfile", + lno=10, + msg='"GET / HTTP/1.1" 200 OK', + args=(), + exc_info=None, + ) + logger.handle(record) + + # Assert no log messages were captured (filtered out) + assert len(log_capture) == 0 + + +def test_log_allowed(logger_with_filter): + logger, log_capture = logger_with_filter + + # Create a log record that should NOT be filtered out (doesn't match any filter criteria) + record = logger.makeRecord( + name="uvicorn.access", + level=logging.INFO, + fn="testfile", + lno=10, + msg='"GET /another HTTP/1.1" 200 OK', + args=(), + exc_info=None, + ) + logger.handle(record) + + # Assert the log message was captured (not filtered out) + assert len(log_capture) == 1 + + +def test_log_with_different_status(logger_with_filter): + logger, log_capture = logger_with_filter + + # Create a log record that has the same route but a different status code (should pass through) + record = logger.makeRecord( + name="uvicorn.access", + level=logging.INFO, + fn="testfile", + lno=10, + msg='"GET / HTTP/1.1" 500 Internal Server Error', + args=(), + exc_info=None, + ) + logger.handle(record) + + # Assert the log message was captured (not filtered out due to different status code) + assert len(log_capture) == 1 diff --git a/packages/service-library/tests/test_observer.py b/packages/service-library/tests/test_observer.py deleted file mode 100644 index c455bdead9c..00000000000 --- a/packages/service-library/tests/test_observer.py +++ /dev/null @@ -1,18 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - - -from servicelib.observer import emit, observe - - -async def test_observer(mocker): - # register a couroutine as callback function - cb_function = mocker.AsyncMock(return_value=None) - - decorated_fct = observe(event="my_test_event")(cb_function) - - await emit("my_invalid_test_event") - cb_function.assert_not_called() - await emit("my_test_event") - cb_function.assert_called() diff --git a/packages/service-library/tests/test_package.py b/packages/service-library/tests/test_package.py index a77469a9ea4..7826d592e43 100644 --- a/packages/service-library/tests/test_package.py +++ b/packages/service-library/tests/test_package.py @@ -3,15 +3,12 @@ # pylint: disable=unused-variable # pylint: disable=too-many-arguments +from pathlib import Path + from servicelib.utils import is_osparc_repo_dir, search_osparc_repo_dir -def test_utils(osparc_simcore_root_dir, package_dir): +def test_utils(osparc_simcore_root_dir: Path): assert is_osparc_repo_dir(osparc_simcore_root_dir) - assert search_osparc_repo_dir(osparc_simcore_root_dir) == osparc_simcore_root_dir - - # assert not search_osparc_repo_dir(package_dir), "package is installed, should not be in osparc-repo" - # assert search_osparc_repo_dir(package_dir) == osparc_simcore_root_dir, "in develop mode" - assert not search_osparc_repo_dir(osparc_simcore_root_dir.parent) diff --git a/packages/service-library/tests/test_pools.py b/packages/service-library/tests/test_pools.py index 13c62ad0a3a..1604ba10147 100644 --- a/packages/service-library/tests/test_pools.py +++ b/packages/service-library/tests/test_pools.py @@ -1,4 +1,4 @@ -from asyncio import BaseEventLoop +import asyncio from concurrent.futures import ProcessPoolExecutor from servicelib.pools import ( @@ -11,17 +11,25 @@ def return_int_one() -> int: return 1 -async def test_default_thread_pool_executor(event_loop: BaseEventLoop) -> None: - assert await event_loop.run_in_executor(None, return_int_one) == 1 +async def test_default_thread_pool_executor() -> None: + assert await asyncio.get_running_loop().run_in_executor(None, return_int_one) == 1 -async def test_blocking_process_pool_executor(event_loop: BaseEventLoop) -> None: - assert await event_loop.run_in_executor(ProcessPoolExecutor(), return_int_one) == 1 +async def test_blocking_process_pool_executor() -> None: + assert ( + await asyncio.get_running_loop().run_in_executor( + ProcessPoolExecutor(), return_int_one + ) + == 1 + ) -async def test_non_blocking_process_pool_executor(event_loop: BaseEventLoop) -> None: +async def test_non_blocking_process_pool_executor() -> None: with non_blocking_process_pool_executor() as executor: - assert await event_loop.run_in_executor(executor, return_int_one) == 1 + assert ( + await asyncio.get_running_loop().run_in_executor(executor, return_int_one) + == 1 + ) async def test_same_pool_instances() -> None: @@ -36,9 +44,12 @@ async def test_different_pool_instances() -> None: assert first != second -async def test_non_blocking_thread_pool_executor(event_loop: BaseEventLoop) -> None: +async def test_non_blocking_thread_pool_executor() -> None: with non_blocking_thread_pool_executor() as executor: - assert await event_loop.run_in_executor(executor, return_int_one) == 1 + assert ( + await asyncio.get_running_loop().run_in_executor(executor, return_int_one) + == 1 + ) async def test_same_thread_pool_instances() -> None: diff --git a/packages/service-library/tests/test_progress_bar.py b/packages/service-library/tests/test_progress_bar.py index 274ff5d6455..e99516cac98 100644 --- a/packages/service-library/tests/test_progress_bar.py +++ b/packages/service-library/tests/test_progress_bar.py @@ -5,65 +5,532 @@ # pylint: disable=protected-access import asyncio +from unittest import mock import pytest -from servicelib.progress_bar import ProgressBarData +from faker import Faker +from models_library.progress_bar import ProgressReport, ProgressStructuredMessage +from pydantic import ValidationError +from pytest_mock import MockerFixture +from servicelib.progress_bar import ( + _INITIAL_VALUE, + _MIN_PROGRESS_UPDATE_PERCENT, + _PROGRESS_ALREADY_REACGED_MAXIMUM, + ProgressBarData, +) -async def test_progress_bar(): - async with ProgressBarData(steps=2) as root: - assert root._continuous_progress_value == pytest.approx(0) - assert root.steps == 2 - async with root.sub_progress(steps=50) as sub: - for _ in range(50): +@pytest.fixture +def mocked_progress_bar_cb(mocker: MockerFixture) -> mock.Mock: + def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + + return mocker.Mock(side_effect=_progress_cb) + + +@pytest.fixture +def async_mocked_progress_bar_cb(mocker: MockerFixture) -> mock.AsyncMock: + async def _progress_cb(*args, **kwargs) -> None: + print(f"received progress: {args}, {kwargs}") + + return mocker.AsyncMock(side_effect=_progress_cb) + + +@pytest.mark.parametrize( + "progress_report_cb_type", + ["mocked_progress_bar_cb", "async_mocked_progress_bar_cb"], +) +async def test_progress_bar_progress_report_cb( + progress_report_cb_type: str, + mocked_progress_bar_cb: mock.Mock, + async_mocked_progress_bar_cb: mock.AsyncMock, + faker: Faker, +): + mocked_cb: mock.Mock | mock.AsyncMock = { + "mocked_progress_bar_cb": mocked_progress_bar_cb, + "async_mocked_progress_bar_cb": async_mocked_progress_bar_cb, + }[progress_report_cb_type] + outer_num_steps = 3 + async with ProgressBarData( + num_steps=outer_num_steps, + progress_report_cb=mocked_cb, + progress_unit="Byte", + description=faker.pystr(), + ) as root: + assert root.num_steps == outer_num_steps + assert root.step_weights is None # i.e. all steps have equal weight + assert root._current_steps == pytest.approx(0) # noqa: SLF001 + mocked_cb.assert_called_once_with( + ProgressReport( + actual_value=0, + total=outer_num_steps, + unit="Byte", + message=ProgressStructuredMessage( + description=root.description, + current=0.0, + total=outer_num_steps, + unit="Byte", + sub=None, + ), + ) + ) + mocked_cb.reset_mock() + # first step is done right away + await root.update() + assert root._current_steps == pytest.approx(1) # noqa: SLF001 + mocked_cb.assert_called_once_with( + ProgressReport( + actual_value=1, + total=outer_num_steps, + unit="Byte", + message=ProgressStructuredMessage( + description=root.description, + current=1.0, + total=outer_num_steps, + unit="Byte", + sub=None, + ), + ) + ) + mocked_cb.reset_mock() + + # 2nd step is a sub progress bar of 10 steps + inner_num_steps_step2 = 100 + async with root.sub_progress( + steps=inner_num_steps_step2, description=faker.pystr() + ) as sub: + assert sub._current_steps == pytest.approx(0) # noqa: SLF001 + assert root._current_steps == pytest.approx(1) # noqa: SLF001 + for i in range(inner_num_steps_step2): await sub.update() - assert root._continuous_progress_value == pytest.approx(1) - assert root.steps == 2 - async with root.sub_progress(steps=50) as sub: - for _ in range(50): + assert sub._current_steps == pytest.approx(float(i + 1)) # noqa: SLF001 + assert root._current_steps == pytest.approx( # noqa: SLF001 + 1 + float(i + 1) / float(inner_num_steps_step2) + ) + assert sub._current_steps == pytest.approx( # noqa: SLF001 + inner_num_steps_step2 + ) + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + mocked_cb.assert_called() + assert mocked_cb.call_args_list[-1].args[0].percent_value == pytest.approx( + 2 / 3 + ) + for call_index, call in enumerate(mocked_cb.call_args_list[1:-1]): + assert ( + call.args[0].percent_value + - mocked_cb.call_args_list[call_index].args[0].percent_value + ) > _MIN_PROGRESS_UPDATE_PERCENT + + mocked_cb.reset_mock() + + # 3rd step is another subprogress of 50 steps + inner_num_steps_step3 = 50 + async with root.sub_progress( + steps=inner_num_steps_step3, description=faker.pystr() + ) as sub: + assert sub._current_steps == pytest.approx(0) # noqa: SLF001 + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + for i in range(inner_num_steps_step3): await sub.update() - assert root._continuous_progress_value == pytest.approx(2) - assert root.steps == 2 + assert sub._current_steps == pytest.approx(float(i + 1)) # noqa: SLF001 + assert root._current_steps == pytest.approx( # noqa: SLF001 + 2 + float(i + 1) / float(inner_num_steps_step3) + ) + assert sub._current_steps == pytest.approx( # noqa: SLF001 + inner_num_steps_step3 + ) + assert root._current_steps == pytest.approx(3) # noqa: SLF001 + mocked_cb.assert_called() + assert mocked_cb.call_args_list[-1].args[0].percent_value == 1.0 + mocked_cb.reset_mock() + + +def test_creating_progress_bar_with_invalid_unit_fails(faker: Faker): + with pytest.raises(ValidationError): + ProgressBarData( + num_steps=321, progress_unit="invalid", description=faker.pystr() + ) + + +async def test_progress_bar_always_reports_0_on_creation_and_1_on_finish( + mocked_progress_bar_cb: mock.Mock, faker: Faker +): + num_steps = 156587 + progress_bar = ProgressBarData( + num_steps=num_steps, + progress_report_cb=mocked_progress_bar_cb, + description=faker.pystr(), + ) + assert progress_bar._current_steps == _INITIAL_VALUE # noqa: SLF001 + async with progress_bar as root: + assert root is progress_bar + assert root._current_steps == 0 # noqa: SLF001 + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=0, + total=num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=0.0, + total=num_steps, + unit=None, + sub=None, + ), + ) + ) + + # going out of scope always updates to final number of steps + assert progress_bar._current_steps == num_steps # noqa: SLF001 + assert mocked_progress_bar_cb.call_args_list[-1] == mock.call( + ProgressReport( + actual_value=num_steps, + total=num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=num_steps, + total=num_steps, + unit=None, + sub=None, + ), + ) + ) + + +async def test_progress_bar_always_reports_1_on_finish( + mocked_progress_bar_cb: mock.Mock, faker: Faker +): + num_steps = 156587 + chunks = 123.3 + + num_chunked_steps = int(num_steps / chunks) + last_step = num_steps % chunks + progress_bar = ProgressBarData( + num_steps=num_steps, + progress_report_cb=mocked_progress_bar_cb, + description=faker.pystr(), + ) + assert progress_bar._current_steps == _INITIAL_VALUE # noqa: SLF001 + async with progress_bar as root: + assert root is progress_bar + assert root._current_steps == 0 # noqa: SLF001 + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=0, + total=num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=0, + total=num_steps, + unit=None, + sub=None, + ), + ) + ) + for _ in range(num_chunked_steps): + await root.update(chunks) + await root.update(last_step) + assert progress_bar._current_steps == pytest.approx(num_steps) # noqa: SLF001 + + # going out of scope always updates to final number of steps + assert progress_bar._current_steps == pytest.approx(num_steps) # noqa: SLF001 + assert mocked_progress_bar_cb.call_args_list[-1] == mock.call( + ProgressReport( + actual_value=num_steps, + total=num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=num_steps, + total=num_steps, + unit=None, + sub=None, + ), + ) + ) + + +async def test_set_progress(caplog: pytest.LogCaptureFixture, faker: Faker): + async with ProgressBarData(num_steps=50, description=faker.pystr()) as root: + assert root._current_steps == pytest.approx(0) # noqa: SLF001 + assert root.num_steps == 50 + assert root.step_weights is None + await root.set_(13) + assert root._current_steps == pytest.approx(13) # noqa: SLF001 + await root.set_(34) + assert root._current_steps == pytest.approx(34) # noqa: SLF001 + await root.set_(58) + assert root._current_steps == pytest.approx(50) # noqa: SLF001 + assert "WARNING" in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM in caplog.messages[0] + assert "TIP:" in caplog.messages[0] + +async def test_reset_progress(caplog: pytest.LogCaptureFixture, faker: Faker): + async with ProgressBarData(num_steps=50, description=faker.pystr()) as root: + assert root._current_steps == pytest.approx(0) # noqa: SLF001 + assert root.num_steps == 50 + assert root.step_weights is None + await root.set_(50) + assert root._current_steps == pytest.approx(50) # noqa: SLF001 + assert "WARNING" not in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM not in caplog.text + await root.set_(51) + assert root._current_steps == pytest.approx(50) # noqa: SLF001 + assert "WARNING" in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM in caplog.text -async def test_concurrent_progress_bar(): + caplog.clear() + root.reset() + + assert root._current_steps == pytest.approx(-1) # noqa: SLF001 + assert "WARNING" not in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM not in caplog.text + + await root.set_(12) + assert root._current_steps == pytest.approx(12) # noqa: SLF001 + assert "WARNING" not in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM not in caplog.text + + await root.set_(51) + assert root._current_steps == pytest.approx(50) # noqa: SLF001 + assert "WARNING" in caplog.text + assert _PROGRESS_ALREADY_REACGED_MAXIMUM in caplog.text + + +async def test_concurrent_progress_bar(faker: Faker): async def do_something(root: ProgressBarData): - async with root.sub_progress(steps=50) as sub: - assert sub.steps == 50 - assert sub._continuous_progress_value == 0 + async with root.sub_progress(steps=50, description=faker.pystr()) as sub: + assert sub.num_steps == 50 + assert sub.step_weights is None + assert sub._current_steps == 0 # noqa: SLF001 for n in range(50): await sub.update() - assert sub._continuous_progress_value == (n + 1) + assert sub._current_steps == (n + 1) # noqa: SLF001 - async with ProgressBarData(steps=12) as root: - assert root._continuous_progress_value == pytest.approx(0) - assert root.steps == 12 + async with ProgressBarData(num_steps=12, description=faker.pystr()) as root: + assert root._current_steps == pytest.approx(0) # noqa: SLF001 + assert root.step_weights is None await asyncio.gather(*[do_something(root) for n in range(12)]) - assert root._continuous_progress_value == pytest.approx(12) + assert root._current_steps == pytest.approx(12) # noqa: SLF001 -async def test_too_many_sub_progress_bars_raises(): - async with ProgressBarData(steps=2) as root: - assert root.steps == 2 - async with root.sub_progress(steps=50) as sub: +async def test_too_many_sub_progress_bars_raises(faker: Faker): + async with ProgressBarData(num_steps=2, description=faker.pystr()) as root: + assert root.num_steps == 2 + assert root.step_weights is None + async with root.sub_progress(steps=50, description=faker.pystr()) as sub: for _ in range(50): await sub.update() - async with root.sub_progress(steps=50) as sub: + async with root.sub_progress(steps=50, description=faker.pystr()) as sub: for _ in range(50): await sub.update() + with pytest.raises(RuntimeError): - async with root.sub_progress(steps=50) as sub: - for _ in range(50): - await sub.update() + async with root.sub_progress(steps=50, description=faker.pystr()) as sub: + ... async def test_too_many_updates_does_not_raise_but_show_warning_with_stack( - caplog: pytest.LogCaptureFixture, + caplog: pytest.LogCaptureFixture, faker: Faker ): - async with ProgressBarData(steps=2) as root: - assert root.steps == 2 + async with ProgressBarData(num_steps=2, description=faker.pystr()) as root: + assert root.num_steps == 2 + assert root.step_weights is None await root.update() await root.update() await root.update() - assert "already reached maximum" in caplog.messages[0] + assert _PROGRESS_ALREADY_REACGED_MAXIMUM in caplog.messages[0] assert "TIP:" in caplog.messages[0] + + +async def test_weighted_progress_bar(mocked_progress_bar_cb: mock.Mock, faker: Faker): + outer_num_steps = 3 + async with ProgressBarData( + num_steps=outer_num_steps, + step_weights=[1, 3, 1], + progress_report_cb=mocked_progress_bar_cb, + description=faker.pystr(), + ) as root: + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=0, + total=outer_num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=0, + total=outer_num_steps, + unit=None, + sub=None, + ), + ) + ) + mocked_progress_bar_cb.reset_mock() + assert root.step_weights == [1 / 5, 3 / 5, 1 / 5, 0] + await root.update() + assert mocked_progress_bar_cb.call_args.args[0].percent_value == pytest.approx( + 1 / 5 + ) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(1) # noqa: SLF001 + await root.update() + assert mocked_progress_bar_cb.call_args.args[0].percent_value == pytest.approx( + 1 / 5 + 3 / 5 + ) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=outer_num_steps, + total=outer_num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=outer_num_steps, + total=outer_num_steps, + unit=None, + sub=None, + ), + ) + ) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(3) # noqa: SLF001 + + +async def test_weighted_progress_bar_with_weighted_sub_progress( + mocked_progress_bar_cb: mock.Mock, faker: Faker +): + outer_num_steps = 3 + async with ProgressBarData( + num_steps=outer_num_steps, + step_weights=[1, 3, 1], + progress_report_cb=mocked_progress_bar_cb, + description=faker.pystr(), + ) as root: + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=0, + total=outer_num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=0, + total=outer_num_steps, + unit=None, + sub=None, + ), + ) + ) + mocked_progress_bar_cb.reset_mock() + assert root.step_weights == [1 / 5, 3 / 5, 1 / 5, 0] + # first step + await root.update() + assert mocked_progress_bar_cb.call_args.args[0].percent_value == pytest.approx( + 1 / 5 + ) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(1) # noqa: SLF001 + + # 2nd step is a sub progress bar of 5 steps + async with root.sub_progress( + steps=5, step_weights=[2, 5, 1, 2, 3], description=faker.pystr() + ) as sub: + assert sub.step_weights == [2 / 13, 5 / 13, 1 / 13, 2 / 13, 3 / 13, 0] + assert sub._current_steps == pytest.approx(0) # noqa: SLF001 + assert root._current_steps == pytest.approx(1) # noqa: SLF001 + # sub steps + # 1 + await sub.update() + assert sub._current_steps == pytest.approx(1) # noqa: SLF001 + assert root._current_steps == pytest.approx(1 + 2 / 13) # noqa: SLF001 + # 2 + await sub.update() + assert sub._current_steps == pytest.approx(2) # noqa: SLF001 + assert root._current_steps == pytest.approx( # noqa: SLF001 + 1 + 2 / 13 + 5 / 13 + ) + # 3 + await sub.update() + assert sub._current_steps == pytest.approx(3) # noqa: SLF001 + assert root._current_steps == pytest.approx( # noqa: SLF001 + 1 + 2 / 13 + 5 / 13 + 1 / 13 + ) + # 4 + await sub.update() + assert sub._current_steps == pytest.approx(4) # noqa: SLF001 + assert root._current_steps == pytest.approx( # noqa: SLF001 + 1 + 2 / 13 + 5 / 13 + 1 / 13 + 2 / 13 + ) + # 5 + await sub.update() + assert sub._current_steps == pytest.approx(5) # noqa: SLF001 + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + mocked_progress_bar_cb.assert_called() + assert mocked_progress_bar_cb.call_count == 5 + assert mocked_progress_bar_cb.call_args_list[4].args[ + 0 + ].percent_value == pytest.approx(1 / 5 + 3 / 5) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(2) # noqa: SLF001 + mocked_progress_bar_cb.assert_called_once_with( + ProgressReport( + actual_value=outer_num_steps, + total=outer_num_steps, + message=ProgressStructuredMessage( + description=root.description, + current=outer_num_steps, + total=outer_num_steps, + unit=None, + sub=None, + ), + ) + ) + mocked_progress_bar_cb.reset_mock() + assert root._current_steps == pytest.approx(3) # noqa: SLF001 + + +async def test_weighted_progress_bar_wrong_num_weights_raises(faker: Faker): + with pytest.raises(RuntimeError): + async with ProgressBarData( + num_steps=3, step_weights=[3, 1], description=faker.pystr() + ): + ... + + +async def test_weighted_progress_bar_with_0_weights_is_equivalent_to_standard_progress_bar( + faker: Faker, +): + async with ProgressBarData( + num_steps=3, step_weights=[0, 0, 0], description=faker.pystr() + ) as root: + assert root.step_weights == [1, 1, 1, 0] + + +@pytest.mark.xfail(reason="show how to not use the progress bar") +async def test_concurrent_sub_progress_update_correct_sub_progress( + mocked_progress_bar_cb: mock.Mock, faker: Faker +): + async with ProgressBarData( + num_steps=3, + step_weights=[3, 1, 2], + progress_report_cb=mocked_progress_bar_cb, + description=faker.pystr(), + ) as root: + sub_progress1 = root.sub_progress(23, description=faker.pystr()) + assert sub_progress1._current_steps == _INITIAL_VALUE # noqa: SLF001 + sub_progress2 = root.sub_progress(45, description=faker.pystr()) + assert sub_progress2._current_steps == _INITIAL_VALUE # noqa: SLF001 + sub_progress3 = root.sub_progress(12, description=faker.pystr()) + assert sub_progress3._current_steps == _INITIAL_VALUE # noqa: SLF001 + + # NOTE: in a gather call there is no control on which step finishes first + + assert root._current_steps == 0 # noqa: SLF001 + # complete last progress + async with sub_progress3: + ... + # so sub 3 is done here + assert sub_progress3._current_steps == 12 # noqa: SLF001 + assert mocked_progress_bar_cb.call_count == 2 + assert mocked_progress_bar_cb.call_args.args[0].percent_value == pytest.approx( + 2 / 6 + ) diff --git a/packages/service-library/tests/test_rabbitmq.py b/packages/service-library/tests/test_rabbitmq.py deleted file mode 100644 index 12a683e4021..00000000000 --- a/packages/service-library/tests/test_rabbitmq.py +++ /dev/null @@ -1,280 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=protected-access - - -import asyncio -from typing import AsyncIterator, Callable -from unittest import mock - -import docker -import pytest -from faker import Faker -from pytest_mock.plugin import MockerFixture -from servicelib.rabbitmq import RabbitMQClient -from settings_library.rabbit import RabbitSettings -from tenacity._asyncio import AsyncRetrying -from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_simcore_core_services_selection = [ - "rabbit", -] - - -@pytest.fixture -def rabbit_client_name(faker: Faker) -> str: - return faker.pystr() - - -async def test_rabbit_client(rabbit_client_name: str, rabbit_service: RabbitSettings): - client = RabbitMQClient(rabbit_client_name, rabbit_service) - assert client - # check it is correctly initialized - assert client._connection_pool - assert not client._connection_pool.is_closed - assert client._channel_pool - assert not client._channel_pool.is_closed - assert client.client_name == rabbit_client_name - assert client.settings == rabbit_service - await client.close() - assert client._connection_pool - assert client._connection_pool.is_closed - assert client._channel_pool - assert client._channel_pool.is_closed - - -@pytest.fixture -async def rabbitmq_client( - rabbit_service: RabbitSettings, -) -> AsyncIterator[Callable[[str], RabbitMQClient]]: - created_clients = [] - - def _creator(client_name: str) -> RabbitMQClient: - client = RabbitMQClient(f"pytest_{client_name}", rabbit_service) - assert client - assert client._connection_pool - assert not client._connection_pool.is_closed - assert client._channel_pool - assert not client._channel_pool.is_closed - assert client.client_name == f"pytest_{client_name}" - assert client.settings == rabbit_service - created_clients.append(client) - return client - - yield _creator - # cleanup, properly close the clients - await asyncio.gather(*(client.close() for client in created_clients)) - for client in created_clients: - assert client._channel_pool - assert client._channel_pool.is_closed - - -@pytest.fixture -def random_exchange_name(faker: Faker) -> Callable[[], str]: - def _creator() -> str: - return f"pytest_fake_exchange_{faker.pystr()}" - - return _creator - - -async def _assert_message_received( - mocked_message_parser: mock.AsyncMock, - expected_call_count: int, - expected_message: str, -) -> None: - async for attempt in AsyncRetrying( - wait=wait_fixed(0.1), - stop=stop_after_delay(5), - retry=retry_if_exception_type(AssertionError), - reraise=True, - ): - with attempt: - # NOTE: this sleep is here to ensure that there are not multiple messages coming in - await asyncio.sleep(1) - assert mocked_message_parser.call_count == expected_call_count - if expected_call_count == 1: - mocked_message_parser.assert_called_once_with(expected_message.encode()) - elif expected_call_count == 0: - mocked_message_parser.assert_not_called() - else: - mocked_message_parser.assert_called_with(expected_message.encode()) - - -async def test_rabbit_client_pub_sub_message_is_lost_if_no_consumer_present( - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, -): - consumer = rabbitmq_client("consumer") - publisher = rabbitmq_client("publisher") - - message = faker.text() - - mocked_message_parser = mocker.AsyncMock(return_value=True) - exchange_name = random_exchange_name() - await publisher.publish(exchange_name, message) - await asyncio.sleep(0) # ensure context switch - await consumer.subscribe(exchange_name, mocked_message_parser) - await _assert_message_received(mocked_message_parser, 0, "") - - -async def test_rabbit_client_pub_sub( - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, -): - consumer = rabbitmq_client("consumer") - publisher = rabbitmq_client("publisher") - - message = faker.text() - - mocked_message_parser = mocker.AsyncMock(return_value=True) - exchange_name = random_exchange_name() - await consumer.subscribe(exchange_name, mocked_message_parser) - await publisher.publish(exchange_name, message) - await _assert_message_received(mocked_message_parser, 1, message) - - -@pytest.mark.parametrize("num_subs", [10]) -async def test_rabbit_client_pub_many_subs( - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, - num_subs: int, -): - consumers = (rabbitmq_client(f"consumer_{n}") for n in range(num_subs)) - mocked_message_parsers = [ - mocker.AsyncMock(return_value=True) for _ in range(num_subs) - ] - - publisher = rabbitmq_client("publisher") - message = faker.text() - exchange_name = random_exchange_name() - await asyncio.gather( - *( - consumer.subscribe(exchange_name, parser) - for consumer, parser in zip(consumers, mocked_message_parsers) - ) - ) - - await publisher.publish(exchange_name, message) - await asyncio.gather( - *( - _assert_message_received(parser, 1, message) - for parser in mocked_message_parsers - ) - ) - - -async def test_rabbit_client_pub_sub_republishes_if_exception_raised( - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, -): - publisher = rabbitmq_client("publisher") - consumer = rabbitmq_client("consumer") - - message = faker.text() - - def _raise_once_then_true(*args, **kwargs): - _raise_once_then_true.calls += 1 - - if _raise_once_then_true.calls == 1: - raise KeyError("this is a test!") - if _raise_once_then_true.calls == 2: - return False - return True - - exchange_name = random_exchange_name() - _raise_once_then_true.calls = 0 - mocked_message_parser = mocker.AsyncMock(side_effect=_raise_once_then_true) - await consumer.subscribe(exchange_name, mocked_message_parser) - await publisher.publish(exchange_name, message) - await _assert_message_received(mocked_message_parser, 3, message) - - -@pytest.mark.parametrize("num_subs", [10]) -async def test_pub_sub_with_non_exclusive_queue( - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, - num_subs: int, -): - consumers = (rabbitmq_client(f"consumer_{n}") for n in range(num_subs)) - mocked_message_parsers = [ - mocker.AsyncMock(return_value=True) for _ in range(num_subs) - ] - - publisher = rabbitmq_client("publisher") - message = faker.text() - exchange_name = random_exchange_name() - await asyncio.gather( - *( - consumer.subscribe(exchange_name, parser, exclusive_queue=False) - for consumer, parser in zip(consumers, mocked_message_parsers) - ) - ) - - await publisher.publish(exchange_name, message) - # only one consumer should have gotten the message here and the others not - async for attempt in AsyncRetrying( - wait=wait_fixed(0.1), - stop=stop_after_delay(5), - retry=retry_if_exception_type(AssertionError), - reraise=True, - ): - with attempt: - total_call_count = 0 - for parser in mocked_message_parsers: - total_call_count += parser.call_count - assert total_call_count == 1, "too many messages" - - -def test_rabbit_pub_sub_performance( - benchmark, - rabbitmq_client: Callable[[str], RabbitMQClient], - random_exchange_name: Callable[[], str], - mocker: MockerFixture, - faker: Faker, -): - async def async_fct_to_test(): - consumer = rabbitmq_client("consumer") - publisher = rabbitmq_client("publisher") - - message = faker.text() - - mocked_message_parser = mocker.AsyncMock(return_value=True) - exchange_name = random_exchange_name() - await consumer.subscribe(exchange_name, mocked_message_parser) - await publisher.publish(exchange_name, message) - await _assert_message_received(mocked_message_parser, 1, message) - - def run_test_async(): - asyncio.get_event_loop().run_until_complete(async_fct_to_test()) - - benchmark.pedantic(run_test_async, iterations=1, rounds=10) - - -async def test_rabbit_client_lose_connection( - rabbitmq_client: Callable[[str], RabbitMQClient], - docker_client: docker.client.DockerClient, -): - rabbit_client = rabbitmq_client("pinger") - assert await rabbit_client.ping() is True - # now let's put down the rabbit service - for rabbit_docker_service in ( - docker_service - for docker_service in docker_client.services.list() - if "rabbit" in docker_service.name # type: ignore - ): - rabbit_docker_service.remove() # type: ignore - await asyncio.sleep(10) # wait for the client to disconnect - assert await rabbit_client.ping() is False diff --git a/packages/service-library/tests/test_rabbitmq_rpc.py b/packages/service-library/tests/test_rabbitmq_rpc.py deleted file mode 100644 index 102cd6873e1..00000000000 --- a/packages/service-library/tests/test_rabbitmq_rpc.py +++ /dev/null @@ -1,371 +0,0 @@ -# pylint:disable=redefined-outer-name -# pylint:disable=unused-argument - -import asyncio -from typing import Any, Awaitable, Final - -import pytest -from pydantic import NonNegativeInt, ValidationError -from pytest import LogCaptureFixture -from servicelib.rabbitmq import RabbitMQClient -from servicelib.rabbitmq_errors import ( - RemoteMethodNotRegisteredError, - RPCNotInitializedError, -) -from servicelib.rabbitmq_utils import RPCNamespace, rpc_register_entries -from settings_library.rabbit import RabbitSettings - -pytest_simcore_core_services_selection = [ - "rabbit", -] - -MULTIPLE_REQUESTS_COUNT: Final[NonNegativeInt] = 100 - - -@pytest.fixture -def namespace() -> RPCNamespace: - return RPCNamespace.from_entries({f"test{i}": f"test{i}" for i in range(8)}) - - -@pytest.fixture -async def rabbit_requester(rabbit_service: RabbitSettings) -> RabbitMQClient: - client = RabbitMQClient(client_name="requester", settings=rabbit_service) - await client.rpc_initialize() - yield client - await client.close() - - -@pytest.fixture -async def rabbit_replier(rabbit_service: RabbitSettings) -> RabbitMQClient: - client = RabbitMQClient(client_name="replier", settings=rabbit_service) - await client.rpc_initialize() - yield client - await client.close() - - -async def add_me(*, x: Any, y: Any) -> Any: - result = x + y - # NOTE: types are not enforced - # result's type will on the caller side will be the one it has here - return result - - -class CustomClass: - def __init__(self, x: Any, y: Any) -> None: - self.x = x - self.y = y - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} x={self.x}, y={self.y}>" - - def __eq__(self, other: "CustomClass") -> bool: - return self.x == other.x and self.y == other.y - - def __add__(self, other: "CustomClass") -> "CustomClass": - return CustomClass(x=self.x + other.x, y=self.y + other.y) - - -@pytest.mark.parametrize( - "x,y,expected_result,expected_type", - [ - pytest.param(12, 20, 32, int, id="expect_int"), - pytest.param(12, 20.0, 32.0, float, id="expect_float"), - pytest.param(b"123b", b"xyz0", b"123bxyz0", bytes, id="expect_bytes"), - pytest.param([1, 2], [2, 3], [1, 2, 2, 3], list, id="list_addition"), - pytest.param( - CustomClass(2, 1), - CustomClass(1, 2), - CustomClass(3, 3), - CustomClass, - id="custom_class", - ), - pytest.param( - CustomClass([{"p", "1"}], [{"h": 1}]), - CustomClass([{3, b"bytes"}], [{"b": 2}]), - CustomClass([{"p", "1"}, {3, b"bytes"}], [{"h": 1}, {"b": 2}]), - CustomClass, - id="custom_class_complex_objects", - ), - ], -) -async def test_base_rpc_pattern( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - x: Any, - y: Any, - expected_result: Any, - expected_type: type, - namespace: RPCNamespace, -): - await rabbit_replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - request_result = await rabbit_requester.rpc_request( - namespace, add_me.__name__, x=x, y=y - ) - assert request_result == expected_result - assert type(request_result) == expected_type - - await rabbit_replier.rpc_unregister_handler(add_me) - - -async def test_multiple_requests_sequence_same_replier_and_requester( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - await rabbit_replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - for i in range(MULTIPLE_REQUESTS_COUNT): - assert ( - await rabbit_requester.rpc_request( - namespace, add_me.__name__, x=1 + i, y=2 + i - ) - == 3 + i * 2 - ) - - -async def test_multiple_requests_parallel_same_replier_and_requester( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - await rabbit_replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - expected_result: list[int] = [] - requests: list[Awaitable] = [] - for i in range(MULTIPLE_REQUESTS_COUNT): - requests.append( - rabbit_requester.rpc_request(namespace, add_me.__name__, x=1 + i, y=2 + i) - ) - expected_result.append(3 + i * 2) - - assert await asyncio.gather(*requests) == expected_result - - -async def test_multiple_requests_parallel_same_replier_different_requesters( - rabbit_service: RabbitSettings, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - await rabbit_replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - clients: list[RabbitMQClient] = [] - for _ in range(MULTIPLE_REQUESTS_COUNT): - client = RabbitMQClient("", rabbit_service) - clients.append(client) - - # worst case scenario - await asyncio.gather(*[c.rpc_initialize() for c in clients]) - - requests: list[Awaitable] = [] - expected_result: list[int] = [] - for i in range(MULTIPLE_REQUESTS_COUNT): - client = clients[i] - requests.append( - client.rpc_request(namespace, add_me.__name__, x=1 + i, y=2 + i) - ) - expected_result.append(3 + i * 2) - - assert await asyncio.gather(*requests) == expected_result - - # worst case scenario - await asyncio.gather(*[c.close() for c in clients]) - - -async def test_raise_error_if_not_started( - rabbit_service: RabbitSettings, namespace: RPCNamespace -): - requester = RabbitMQClient("", settings=rabbit_service) - with pytest.raises(RPCNotInitializedError): - await requester.rpc_request(namespace, add_me.__name__, x=1, y=2) - - # expect not to raise error - await requester.close() - - replier = RabbitMQClient("", settings=rabbit_service) - with pytest.raises(RPCNotInitializedError): - await replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - with pytest.raises(RPCNotInitializedError): - await replier.rpc_unregister_handler(add_me) - - # expect not to raise error - await replier.close() - - -async def _assert_event_not_registered( - rabbit_requester: RabbitMQClient, namespace: RPCNamespace -): - with pytest.raises(RemoteMethodNotRegisteredError) as exec_info: - assert ( - await rabbit_requester.rpc_request(namespace, add_me.__name__, x=1, y=3) - == 3 - ) - assert ( - f"Could not find a remote method named: '{namespace}.{add_me.__name__}'" - in f"{exec_info.value}" - ) - - -async def test_replier_not_started( - rabbit_requester: RabbitMQClient, namespace: RPCNamespace -): - await _assert_event_not_registered(rabbit_requester, namespace) - - -async def test_replier_handler_not_registered( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - await _assert_event_not_registered(rabbit_requester, namespace) - - -async def test_request_is_missing_arguments( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - await rabbit_replier.rpc_register_handler(namespace, add_me.__name__, add_me) - - # missing 1 argument - with pytest.raises(TypeError) as exec_info: - await rabbit_requester.rpc_request(namespace, add_me.__name__, x=1) - assert ( - f"{add_me.__name__}() missing 1 required keyword-only argument: 'y'" - in f"{exec_info.value}" - ) - - # missing all arguments - with pytest.raises(TypeError) as exec_info: - await rabbit_requester.rpc_request(namespace, add_me.__name__) - assert ( - f"{add_me.__name__}() missing 2 required keyword-only arguments: 'x' and 'y'" - in f"{exec_info.value}" - ) - - -async def test_requester_cancels_long_running_request_or_requester_takes_too_much_to_respond( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - async def _long_running(*, time_to_sleep: float) -> None: - await asyncio.sleep(time_to_sleep) - - await rabbit_replier.rpc_register_handler( - namespace, _long_running.__name__, _long_running - ) - - with pytest.raises(asyncio.TimeoutError): - await rabbit_requester.rpc_request( - namespace, _long_running.__name__, time_to_sleep=3, timeout_s=1 - ) - - -async def test_replier_handler_raises_error( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, -): - async def _raising_error() -> None: - raise RuntimeError("failed as requested") - - await rabbit_replier.rpc_register_handler( - namespace, _raising_error.__name__, _raising_error - ) - - with pytest.raises(RuntimeError) as exec_info: - await rabbit_requester.rpc_request(namespace, _raising_error.__name__) - assert "failed as requested" == f"{exec_info.value}" - - -async def test_replier_responds_with_not_locally_defined_object_instance( - rabbit_requester: RabbitMQClient, - rabbit_replier: RabbitMQClient, - namespace: RPCNamespace, - caplog: LogCaptureFixture, -): - async def _replier_scope() -> None: - class Custom: - def __init__(self, x: Any) -> None: - self.x = x - - async def _get_custom(x: Any) -> Custom: - return Custom(x) - - await rabbit_replier.rpc_register_handler(namespace, "a_name", _get_custom) - - async def _requester_scope() -> None: - # NOTE: what is happening here? - # the replier will say that it cannot pickle a local object and send it over - # the server's request will just time out. I would prefer a cleaner interface. - # There is no change of intercepting this message. - with pytest.raises(asyncio.TimeoutError): - await rabbit_requester.rpc_request(namespace, "a_name", x=10, timeout_s=1) - - assert "Can't pickle local object" in caplog.text - assert "..Custom" in caplog.text - - await _replier_scope() - await _requester_scope() - - -async def test_register_handler_under_same_name_raises_error( - rabbit_replier: RabbitMQClient, namespace: RPCNamespace -): - async def _a_handler() -> None: - pass - - async def _another_handler() -> None: - pass - - await rabbit_replier.rpc_register_handler(namespace, "same_name", _a_handler) - with pytest.raises(RuntimeError) as exec_info: - await rabbit_replier.rpc_register_handler( - namespace, "same_name", _another_handler - ) - assert "Method name already used for" in f"{exec_info.value}" - - -async def test_rpc_register_for_is_equivalent_to_rpc_register( - rabbit_replier: RabbitMQClient, -): - namespace_entries = {"hello": "test", "1": "me"} - namespace = RPCNamespace.from_entries(namespace_entries) - - async def _a_handler() -> int: - return 42 - - async def _assert_call_ok(): - result = await rabbit_replier.rpc_request(namespace, "_a_handler") - assert result == 42 - - await rabbit_replier.rpc_register_handler(namespace, "_a_handler", _a_handler) - await _assert_call_ok() - - await rabbit_replier.rpc_unregister_handler(_a_handler) - - await rpc_register_entries(rabbit_replier, namespace_entries, _a_handler) - await _assert_call_ok() - - -@pytest.mark.parametrize( - "handler_name, expect_fail", - [ - ("a" * 254, True), - ("a" * 253, False), - ], -) -async def test_get_namespaced_method_name_max_length( - rabbit_replier: RabbitMQClient, handler_name: str, expect_fail: bool -): - async def _a_handler() -> None: - pass - - if expect_fail: - with pytest.raises(ValidationError) as exec_info: - await rabbit_replier.rpc_register_handler("a", handler_name, _a_handler) - assert "ensure this value has at most 255 characters" in f"{exec_info.value}" - else: - await rabbit_replier.rpc_register_handler("a", handler_name, _a_handler) diff --git a/packages/service-library/tests/test_rabbitmq_utils.py b/packages/service-library/tests/test_rabbitmq_utils.py deleted file mode 100644 index aaee225a02c..00000000000 --- a/packages/service-library/tests/test_rabbitmq_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest -from pydantic import ValidationError -from servicelib.rabbitmq_utils import RPCNamespace - - -@pytest.mark.parametrize( - "entries, expected", - [ - ({"test": "b"}, "test_b"), - ({"hello": "1", "b": "2"}, "b_2-hello_1"), - ], -) -def test_rpc_namespace_from_entries(entries: dict[str, str], expected: str): - assert RPCNamespace.from_entries(entries) == expected - - -def test_rpc_namespace_sorts_elements(): - assert RPCNamespace.from_entries({"1": "a", "2": "b"}) == RPCNamespace.from_entries( - {"2": "b", "1": "a"} - ) - - -def test_rpc_namespace_too_long(): - with pytest.raises(ValidationError) as exec_info: - RPCNamespace.from_entries({f"test{i}": f"test{i}" for i in range(20)}) - assert "ensure this value has at most 252 characters" in f"{exec_info.value}" - - -def test_rpc_namespace_too_short(): - with pytest.raises(ValidationError) as exec_info: - RPCNamespace.from_entries({}) - assert "ensure this value has at least 1 characters" in f"{exec_info.value}" - - -def test_rpc_namespace_invalid_symbols(): - with pytest.raises(ValidationError) as exec_info: - RPCNamespace.from_entries({"test": "@"}) - assert "string does not match regex" in f"{exec_info.value}" diff --git a/packages/service-library/tests/test_redis.py b/packages/service-library/tests/test_redis.py deleted file mode 100644 index 1141e4d8bdb..00000000000 --- a/packages/service-library/tests/test_redis.py +++ /dev/null @@ -1,220 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=protected-access - - -import asyncio -import datetime -from typing import AsyncIterator, Callable - -import docker -import pytest -from faker import Faker -from redis.exceptions import LockError, LockNotOwnedError -from servicelib.redis import AlreadyLockedError, RedisClientSDK -from settings_library.redis import RedisSettings - -pytest_simcore_core_services_selection = [ - "redis", -] - -pytest_simcore_ops_services_selection = [ - "redis-commander", -] - - -async def test_redis_client(redis_service: RedisSettings): - client = RedisClientSDK(redis_service.dsn_resources) - assert client - assert client.redis_dsn == redis_service.dsn_resources - # check it is correctly initialized - assert await client.ping() is True - await client.close() - - -@pytest.fixture -async def redis_client( - redis_service: RedisSettings, -) -> AsyncIterator[Callable[[], RedisClientSDK]]: - created_clients = [] - - def _creator() -> RedisClientSDK: - client = RedisClientSDK(redis_service.dsn_resources) - assert client - created_clients.append(client) - return client - - yield _creator - # cleanup, properly close the clients - await asyncio.gather( - *(client.redis.flushall() for client in created_clients), return_exceptions=True - ) - await asyncio.gather(*(client.close() for client in created_clients)) - - -@pytest.fixture -def lock_timeout() -> datetime.timedelta: - return datetime.timedelta(seconds=1) - - -async def test_redis_key_encode_decode( - redis_client: Callable[[], RedisClientSDK], - faker: Faker, -): - client = redis_client() - key = faker.pystr() - value = faker.pystr() - await client.redis.set(key, value) - val = await client.redis.get(key) - assert val == value - await client.redis.delete(key) - - -async def test_redis_lock_acquisition( - redis_client: Callable[[], RedisClientSDK], faker: Faker -): - client = redis_client() - - lock_name = faker.pystr() - lock = client.redis.lock(lock_name) - assert await lock.locked() is False - - # Try to acquire the lock: - lock_acquired = await lock.acquire(blocking=False) - assert lock_acquired is True - assert await lock.locked() is True - assert await lock.owned() is True - with pytest.raises(LockError): - # a lock with no timeout cannot be reacquired - await lock.reacquire() - with pytest.raises(LockError): - # a lock with no timeout cannot be extended - await lock.extend(2) - - # try to acquire the lock a second time - same_lock = client.redis.lock(lock_name) - assert await same_lock.locked() is True - assert await same_lock.owned() is False - assert await same_lock.acquire(blocking=False) is False - - # now release the lock - await lock.release() - assert not await lock.locked() - assert not await lock.owned() - - -async def test_redis_lock_context_manager( - redis_client: Callable[[], RedisClientSDK], faker: Faker -): - client = redis_client() - lock_name = faker.pystr() - lock = client.redis.lock(lock_name) - assert not await lock.locked() - - async with lock: - assert await lock.locked() - assert await lock.owned() - with pytest.raises(LockError): - # a lock with no timeout cannot be reacquired - await lock.reacquire() - - with pytest.raises(LockError): - # a lock with no timeout cannot be extended - await lock.extend(2) - - # try to acquire the lock a second time - same_lock = client.redis.lock(lock_name, blocking_timeout=1) - assert await same_lock.locked() - assert not await same_lock.owned() - assert await same_lock.acquire() == False - with pytest.raises(LockError): - async with same_lock: - ... - assert not await lock.locked() - - -async def test_redis_lock_with_ttl( - redis_client: Callable[[], RedisClientSDK], - faker: Faker, - lock_timeout: datetime.timedelta, -): - client = redis_client() - ttl_lock = client.redis.lock(faker.pystr(), timeout=lock_timeout.total_seconds()) - assert not await ttl_lock.locked() - - with pytest.raises(LockNotOwnedError): - # this raises as the lock is lost - async with ttl_lock: - assert await ttl_lock.locked() - assert await ttl_lock.owned() - await asyncio.sleep(2 * lock_timeout.total_seconds()) - assert not await ttl_lock.locked() - - -async def test_lock_context( - redis_client: Callable[[], RedisClientSDK], - faker: Faker, - lock_timeout: datetime.timedelta, -): - client = redis_client() - lock_name = faker.pystr() - assert await client.is_locked(lock_name) is False - async with client.lock_context(lock_name) as ttl_lock: - assert await client.is_locked(lock_name) is True - assert await ttl_lock.owned() is True - await asyncio.sleep(5 * lock_timeout.total_seconds()) - assert await client.is_locked(lock_name) is True - assert await ttl_lock.owned() is True - assert await client.is_locked(lock_name) is False - assert await ttl_lock.owned() is False - - -async def test_lock_context_with_already_locked_lock_raises( - redis_client: Callable[[], RedisClientSDK], - faker: Faker, -): - client = redis_client() - lock_name = faker.pystr() - assert await client.is_locked(lock_name) is False - async with client.lock_context(lock_name) as lock: - assert await client.is_locked(lock_name) is True - - with pytest.raises(AlreadyLockedError): - assert isinstance(lock.name, str) - async with client.lock_context(lock.name): - ... - assert await lock.locked() is True - assert await client.is_locked(lock_name) is False - - -async def test_lock_context_with_data( - redis_client: Callable[[], RedisClientSDK], faker: Faker -): - client = redis_client() - lock_data = faker.text() - lock_name = faker.pystr() - assert await client.is_locked(lock_name) is False - assert await client.lock_value(lock_name) is None - async with client.lock_context(lock_name, lock_value=lock_data) as lock: - assert await client.is_locked(lock_name) is True - assert await client.lock_value(lock_name) == lock_data - assert await client.is_locked(lock_name) is False - assert await client.lock_value(lock_name) is None - - -async def test_redis_client_lose_connection( - redis_client: Callable[[], RedisClientSDK], - docker_client: docker.client.DockerClient, -): - client = redis_client() - assert await client.ping() is True - # now let's put down the rabbit service - for rabbit_docker_service in ( - docker_service - for docker_service in docker_client.services.list() - if "redis" in docker_service.name # type: ignore - ): - rabbit_docker_service.remove() # type: ignore - await asyncio.sleep(10) # wait for the client to disconnect - assert await client.ping() is False diff --git a/packages/service-library/tests/test_sequences_utils.py b/packages/service-library/tests/test_sequences_utils.py new file mode 100644 index 00000000000..657465b81c9 --- /dev/null +++ b/packages/service-library/tests/test_sequences_utils.py @@ -0,0 +1,72 @@ +from collections.abc import Iterable +from typing import Any + +import pytest +from servicelib.sequences_utils import T, pairwise, partition_gen + + +@pytest.mark.parametrize( + "slice_size, input_list, expected, ", + [ + pytest.param( + 5, + list(range(13)), + [(0, 1, 2, 3, 4), (5, 6, 7, 8, 9), (10, 11, 12)], + id="group_5_last_group_is_smaller", + ), + pytest.param( + 2, + list(range(5)), + [(0, 1), (2, 3), (4,)], + id="group_2_last_group_is_smaller", + ), + pytest.param( + 2, + list(range(4)), + [(0, 1), (2, 3)], + id="group_2_last_group_is_the_same", + ), + pytest.param( + 10, + list(range(4)), + [(0, 1, 2, 3)], + id="only_one_group_if_list_is_not_bit_enough", + ), + pytest.param( + 3, + [], + [()], + id="input_is_empty_returns_an_empty_list", + ), + pytest.param( + 5, + list(range(13)), + [(0, 1, 2, 3, 4), (5, 6, 7, 8, 9), (10, 11, 12)], + id="group_5_using_generator", + ), + ], +) +def test_partition_gen( + input_list: list[Any], expected: list[tuple[Any, ...]], slice_size: int +): + # check returned result + result = list(partition_gen(input_list, slice_size=slice_size)) + assert result == expected + + # check returned type + for entry in result: + assert type(entry) == tuple + + +@pytest.mark.parametrize( + "input_iter, expected", + [ + pytest.param([], [], id="0_elements"), + pytest.param([1], [], id="1_element"), + pytest.param([1, 2], [(1, 2)], id="2_elements"), + pytest.param([1, 2, 3], [(1, 2), (2, 3)], id="3_elements"), + pytest.param([1, 2, 3, 4], [(1, 2), (2, 3), (3, 4)], id="4_elements"), + ], +) +def test_pairwise(input_iter: Iterable[T], expected: Iterable[tuple[T, T]]): + assert list(pairwise(input_iter)) == expected diff --git a/packages/service-library/tests/test_utils.py b/packages/service-library/tests/test_utils.py index 853cacffad4..ebcad03b031 100644 --- a/packages/service-library/tests/test_utils.py +++ b/packages/service-library/tests/test_utils.py @@ -3,26 +3,36 @@ # pylint:disable=redefined-outer-name import asyncio -from pathlib import Path -from random import randint -from typing import Awaitable, Coroutine, Union +from collections.abc import AsyncIterator, Awaitable, Coroutine, Iterator +from copy import copy, deepcopy +from typing import NoReturn +from unittest import mock import pytest from faker import Faker -from servicelib.utils import fire_and_forget_task, logged_gather +from pytest_mock import MockerFixture +from servicelib.utils import ( + ensure_ends_with, + fire_and_forget_task, + limited_as_completed, + limited_gather, + logged_gather, +) -async def _value_error(uid, *, delay=1): - await _succeed(delay) - raise ValueError(f"task#{uid}") +async def _value_error(uid: int, *, delay: int = 1) -> NoReturn: + await _succeed(uid, delay=delay) + msg = f"task#{uid}" + raise ValueError(msg) -async def _runtime_error(uid, *, delay=1): - await _succeed(delay) - raise RuntimeError(f"task#{uid}") +async def _runtime_error(uid: int, *, delay: int = 1) -> NoReturn: + await _succeed(uid, delay=delay) + msg = f"task#{uid}" + raise RuntimeError(msg) -async def _succeed(uid, *, delay=1): +async def _succeed(uid: int, *, delay: int = 1) -> int: print(f"task#{uid} begin") await asyncio.sleep(delay) print(f"task#{uid} end") @@ -30,20 +40,19 @@ async def _succeed(uid, *, delay=1): @pytest.fixture -def coros(): - coros = [ +def coros() -> list[Coroutine]: + return [ _succeed(0), - _value_error(1, delay=2), + _value_error(1, delay=4), _succeed(2), - _runtime_error(3), - _value_error(4, delay=0), + _runtime_error(3, delay=0), + _value_error(4, delay=2), _succeed(5), ] - return coros @pytest.fixture -def mock_logger(mocker): +def mock_logger(mocker: MockerFixture) -> Iterator[mock.Mock]: mock_logger = mocker.Mock() yield mock_logger @@ -55,9 +64,11 @@ def mock_logger(mocker): ), "Expected all 3 errors ALWAYS logged as warnings" -async def test_logged_gather(event_loop, coros, mock_logger): - - with pytest.raises(ValueError) as excinfo: +async def test_logged_gather( + coros: list[Coroutine], + mock_logger: mock.Mock, +): + with pytest.raises(ValueError) as excinfo: # noqa: PT011 await logged_gather(*coros, reraise=True, log=mock_logger) # NOTE: #4 fails first, the one raised in #1 @@ -66,7 +77,7 @@ async def test_logged_gather(event_loop, coros, mock_logger): # NOTE: only first error in the list is raised, since it is not RuntimeError, that task assert isinstance(excinfo.value, ValueError) - for task in asyncio.all_tasks(event_loop): + for task in asyncio.all_tasks(asyncio.get_running_loop()): if task is not asyncio.current_task(): # info task.print_stack() @@ -78,7 +89,7 @@ async def test_logged_gather(event_loop, coros, mock_logger): assert not task.cancelled() -async def test_logged_gather_wo_raising(coros, mock_logger): +async def test_logged_gather_wo_raising(coros: list[Coroutine], mock_logger: mock.Mock): results = await logged_gather(*coros, reraise=False, log=mock_logger) assert results[0] == 0 @@ -89,18 +100,12 @@ async def test_logged_gather_wo_raising(coros, mock_logger): assert results[5] == 5 -def print_tree(path: Path, level=0): - tab = " " * level - print(f"{tab}{'+' if path.is_dir() else '-'} {path if level==0 else path.name}") - for p in path.glob("*"): - print_tree(p, level + 1) - - @pytest.fixture() -async def coroutine_that_cancels() -> Union[asyncio.Future, Awaitable]: +async def coroutine_that_cancels() -> asyncio.Future | Awaitable: async def _self_cancelling() -> None: await asyncio.sleep(0) # NOTE: this forces a context switch - raise asyncio.CancelledError("manual cancellation") + msg = "manual cancellation" + raise asyncio.CancelledError(msg) return _self_cancelling() @@ -140,8 +145,8 @@ async def test_fire_and_forget_cancellation_no_errors_raised( async def test_fire_and_forget_1000s_tasks(faker: Faker): tasks_collection = set() - async def _some_task(n: int): - await asyncio.sleep(randint(1, 3)) + async def _some_task(n: int) -> str: + await asyncio.sleep(faker.random_int(1, 3)) return f"I'm great since I slept a bit, and by the way I'm task {n}" for n in range(1000): @@ -157,3 +162,140 @@ async def _some_task(n: int): assert len(done) == 1000 assert len(pending) == 0 assert len(tasks_collection) == 0 + + +@pytest.mark.parametrize( + "original, termination, expected", + [ + ("hello", "world", "helloworld"), + ("first_second", "second", "first_second"), + ("some/path", "/", "some/path/"), + ], +) +def test_ensure_ends_with(original: str, termination: str, expected: str): + original_copy = copy(original) + terminated_string = ensure_ends_with(original, termination) + assert original_copy == original + assert terminated_string.endswith(termination) + assert terminated_string == expected + + +@pytest.fixture +def uids(faker: Faker) -> list[int]: + return [faker.pyint() for _ in range(10)] + + +@pytest.fixture +def long_delay() -> int: + return 10 + + +@pytest.fixture +def slow_successful_coros_list(uids: list[int], long_delay: int) -> list[Coroutine]: + return [_succeed(uid, delay=long_delay) for uid in uids] + + +@pytest.fixture +def successful_coros_list(uids: list[int]) -> list[Coroutine]: + return [_succeed(uid) for uid in uids] + + +@pytest.fixture +async def successful_coros_gen(uids: list[int]) -> AsyncIterator[Coroutine]: + async def as_async_iter(it): + for x in it: + yield x + + return as_async_iter(_succeed(uid) for uid in uids) + + +@pytest.fixture(params=["list", "generator"]) +async def successful_coros( + successful_coros_list: list[Coroutine], + successful_coros_gen: AsyncIterator[Coroutine], + request: pytest.FixtureRequest, +) -> list[Coroutine] | AsyncIterator[Coroutine]: + return successful_coros_list if request.param == "list" else successful_coros_gen + + +@pytest.mark.parametrize("limit", [0, 2, 5, 10]) +async def test_limited_as_completed( + uids: list[int], + successful_coros: list[Coroutine] | AsyncIterator[Coroutine], + limit: int, +): + expected_uids = deepcopy(uids) + async for future in limited_as_completed(successful_coros, limit=limit): + result = await future + assert result is not None + assert result in expected_uids + expected_uids.remove(result) + assert len(expected_uids) == 0 + + +async def test_limited_as_completed_empty_coros(): + results = [await result async for result in limited_as_completed([])] + assert results == [] + + +@pytest.mark.parametrize("limit", [0, 2, 5, 10]) +async def test_limited_gather_limits( + uids: list[int], + successful_coros_list: list[Coroutine], + limit: int, +): + results = await limited_gather(*successful_coros_list, limit=limit) + assert results == uids + + +async def test_limited_gather( + coros: list[Coroutine], + mock_logger: mock.Mock, +): + with pytest.raises(RuntimeError) as excinfo: + await limited_gather(*coros, reraise=True, log=mock_logger, limit=0) + + # NOTE: #3 fails first + assert "task#3" in str(excinfo.value) + + # NOTE: only first error in the list is raised, since it is not RuntimeError, that task + assert isinstance(excinfo.value, RuntimeError) + + unfinished_tasks = [ + task + for task in asyncio.all_tasks(asyncio.get_running_loop()) + if task is not asyncio.current_task() + ] + final_results = await asyncio.gather(*unfinished_tasks, return_exceptions=True) + for result in final_results: + if isinstance(result, Exception): + assert isinstance(result, ValueError | RuntimeError) + + +async def test_limited_gather_wo_raising( + coros: list[Coroutine], mock_logger: mock.Mock +): + results = await limited_gather(*coros, reraise=False, log=mock_logger, limit=0) + + assert results[0] == 0 + assert isinstance(results[1], ValueError) + assert results[2] == 2 + assert isinstance(results[3], RuntimeError) + assert isinstance(results[4], ValueError) + assert results[5] == 5 + + +async def test_limited_gather_cancellation(slow_successful_coros_list: list[Coroutine]): + task = asyncio.create_task(limited_gather(*slow_successful_coros_list, limit=0)) + await asyncio.sleep(3) + task.cancel() + with pytest.raises(asyncio.CancelledError): + await task + + # check all coros are cancelled + unfinished_tasks = [ + task + for task in asyncio.all_tasks(asyncio.get_running_loop()) + if task is not asyncio.current_task() + ] + assert not unfinished_tasks diff --git a/packages/service-library/tests/test_utils_formatting.py b/packages/service-library/tests/test_utils_formatting.py new file mode 100644 index 00000000000..bd97e25f51c --- /dev/null +++ b/packages/service-library/tests/test_utils_formatting.py @@ -0,0 +1,20 @@ +from datetime import timedelta + +import pytest +from servicelib.utils_formatting import timedelta_as_minute_second + + +@pytest.mark.parametrize( + "input_timedelta, expected_formatting", + [ + (timedelta(), "00:00"), + (timedelta(seconds=23), "00:23"), + (timedelta(days=2, seconds=23), f"{2*24*60}:23"), + (timedelta(seconds=-23), "-00:23"), + (timedelta(seconds=-83), "-01:23"), + ], +) +def test_timedelta_as_minute_second( + input_timedelta: timedelta, expected_formatting: str +): + assert timedelta_as_minute_second(input_timedelta) == expected_formatting diff --git a/packages/service-library/tests/test_utils_meta.py b/packages/service-library/tests/test_utils_meta.py index d4b988b44dc..a6da532bc77 100644 --- a/packages/service-library/tests/test_utils_meta.py +++ b/packages/service-library/tests/test_utils_meta.py @@ -1,5 +1,6 @@ from typing import Final +from models_library.basic_types import VersionStr from packaging.version import Version from servicelib.utils_meta import PackageInfo @@ -8,7 +9,7 @@ def test_meta_module_implementation(): # This is what is used in _meta.py info: Final = PackageInfo(package_name="simcore-service-library") - __version__: Final[str] = info.__version__ + __version__: Final[VersionStr] = info.__version__ PROJECT_NAME: Final[str] = info.project_name VERSION: Final[Version] = info.version diff --git a/packages/service-library/tests/test_utils_secrets.py b/packages/service-library/tests/test_utils_secrets.py index e10b0d4848b..37e31f4c076 100644 --- a/packages/service-library/tests/test_utils_secrets.py +++ b/packages/service-library/tests/test_utils_secrets.py @@ -1,13 +1,25 @@ +# pylint: disable=protected-access # pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable -# pylint: disable=too-many-arguments + +from uuid import uuid4 + +import pytest +from pydantic import ValidationError from servicelib.utils_secrets import ( + _MIN_SECRET_NUM_BYTES, + _PLACEHOLDER, MIN_PASSCODE_LENGTH, MIN_PASSWORD_LENGTH, + are_secrets_equal, generate_passcode, generate_password, + generate_token_secret_key, + mask_sensitive_data, + secure_randint, ) @@ -50,3 +62,58 @@ def test_generate_passcode(): # passcode is a number assert int(generate_passcode()) >= 0 + + +def test_compare_secrets(): + passcode = generate_passcode(100) + assert not are_secrets_equal(got="foo", expected=passcode) + assert are_secrets_equal(got=passcode, expected=passcode) + + +def test_generate_token_secrets(): + secret_key = generate_token_secret_key() + assert len(secret_key) == 2 * _MIN_SECRET_NUM_BYTES + + +@pytest.mark.parametrize("start, end", [(1, 2), (1, 10), (99, 100)]) +async def test_secure_randint(start: int, end: int): + random_number = secure_randint(start, end) + assert start <= random_number <= end + + +async def test_secure_randint_called_with_wrong_tupes(): + with pytest.raises(ValidationError): + secure_randint(1.1, 2) + + +def test_mask_sensitive_data(): + + # NOTE: any hasahble object can be a dict key + uuid_obj = uuid4() + other_obj = object() + + sensitive_data = { + "username": "john_doe", + "password": "sensitive_password", + "details": { + "secret_key": "super_secret_key", + "nested": {"nested_password": "nested_sensitive_password"}, + }, + "credit-card": "12345", + uuid_obj: other_obj, + } + + masked_data = mask_sensitive_data( + sensitive_data, extra_sensitive_keywords={"credit-card"} + ) + + assert masked_data == { + "username": "john_doe", + "password": _PLACEHOLDER, + "details": { + "secret_key": _PLACEHOLDER, + "nested": {"nested_password": _PLACEHOLDER}, + }, + "credit-card": _PLACEHOLDER, + uuid_obj: other_obj, + } diff --git a/packages/settings-library/Makefile b/packages/settings-library/Makefile index 487c0fc825d..ce2fbb189cd 100644 --- a/packages/settings-library/Makefile +++ b/packages/settings-library/Makefile @@ -12,7 +12,7 @@ requirements: ## compiles pip requirements (.in -> .txt) .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests tests-ci @@ -38,6 +38,7 @@ tests-ci: ## runs unit tests [ci-mode] --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=settings_library \ --durations=10 \ --log-date-format="%Y-%m-%d %H:%M:%S" \ diff --git a/packages/settings-library/requirements/_base.in b/packages/settings-library/requirements/_base.in index 4c659aafea9..91f4dd23b04 100644 --- a/packages/settings-library/requirements/_base.in +++ b/packages/settings-library/requirements/_base.in @@ -1,10 +1,12 @@ # -# Specifies third-party dependencies for 'models-library' +# Specifies third-party dependencies for 'settings-library' # --constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in -pydantic>=1.9 - +pydantic +pydantic-settings # extra +rich typer diff --git a/packages/settings-library/requirements/_base.txt b/packages/settings-library/requirements/_base.txt index a0f4ba26507..bc7e8331334 100644 --- a/packages/settings-library/requirements/_base.txt +++ b/packages/settings-library/requirements/_base.txt @@ -1,14 +1,48 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -click==8.1.3 +annotated-types==0.7.0 + # via pydantic +click==8.1.8 # via typer -pydantic==1.10.2 - # via -r requirements/_base.in -typer==0.7.0 - # via -r requirements/_base.in -typing-extensions==4.5.0 +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/_base.in + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 # via pydantic +pydantic-extra-types==2.10.2 + # via -r requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +pygments==2.19.1 + # via rich +python-dotenv==1.0.1 + # via pydantic-settings +rich==13.9.4 + # via + # -r requirements/_base.in + # typer +shellingham==1.5.4 + # via typer +typer==0.15.2 + # via -r requirements/_base.in +typing-extensions==4.12.2 + # via + # pydantic + # pydantic-core + # pydantic-extra-types + # typer diff --git a/packages/settings-library/requirements/_test.in b/packages/settings-library/requirements/_test.in index fad0477d5f1..5f1d98cde8c 100644 --- a/packages/settings-library/requirements/_test.in +++ b/packages/settings-library/requirements/_test.in @@ -8,8 +8,9 @@ # --constraint _base.txt + coverage -coveralls +faker pytest pytest-cov pytest-instafail diff --git a/packages/settings-library/requirements/_test.txt b/packages/settings-library/requirements/_test.txt index 129c026e0e8..fb8381375d5 100644 --- a/packages/settings-library/requirements/_test.txt +++ b/packages/settings-library/requirements/_test.txt @@ -1,64 +1,39 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -attrs==22.2.0 - # via pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.0.1 - # via requests -coverage==6.5.0 +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 +faker==36.1.1 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -idna==3.4 - # via requests iniconfig==2.0.0 # via pytest -packaging==23.0 +packaging==24.2 # via # pytest # pytest-sugar -pluggy==1.0.0 +pluggy==1.5.0 # via pytest -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in # pytest-cov # pytest-instafail # pytest-mock # pytest-sugar -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -python-dotenv==1.0.0 - # via -r requirements/_test.in -requests==2.28.2 - # via coveralls -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 +python-dotenv==1.0.1 # via - # coverage - # pytest -urllib3==1.26.14 - # via - # -c requirements/../../../requirements/constraints.txt - # requests + # -c requirements/_base.txt + # -r requirements/_test.in +termcolor==2.5.0 + # via pytest-sugar +tzdata==2025.1 + # via faker diff --git a/packages/settings-library/requirements/_tools.txt b/packages/settings-library/requirements/_tools.txt index e5bd3c4a963..13e0ee77ce6 100644 --- a/packages/settings-library/requirements/_tools.txt +++ b/packages/settings-library/requirements/_tools.txt @@ -1,90 +1,79 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==6.0 +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 # via # -c requirements/../../../requirements/constraints.txt # pre-commit -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/settings-library/requirements/ci.txt b/packages/settings-library/requirements/ci.txt index 18b9cd124df..f535a4dc026 100644 --- a/packages/settings-library/requirements/ci.txt +++ b/packages/settings-library/requirements/ci.txt @@ -9,9 +9,11 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../pytest-simcore/ +simcore-common-library @ ../common-library/ +pytest-simcore @ ../pytest-simcore # current module -. +simcore-settings-library @ . diff --git a/packages/settings-library/requirements/dev.txt b/packages/settings-library/requirements/dev.txt index 32d383e9ccc..de2adb4ecbb 100644 --- a/packages/settings-library/requirements/dev.txt +++ b/packages/settings-library/requirements/dev.txt @@ -1,4 +1,4 @@ -# Shortcut to install all packages needed to develop 'models-library' +# Shortcut to install all packages needed to develop 'settings-library' # # - As ci.txt but with current and repo packages in develop (edit) mode # @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../common-library/ --editable ../pytest-simcore/ # current module diff --git a/packages/settings-library/setup.cfg b/packages/settings-library/setup.cfg index 9f06ddde50e..43f7aec5015 100644 --- a/packages/settings-library/setup.cfg +++ b/packages/settings-library/setup.cfg @@ -14,6 +14,7 @@ universal = 1 # Define setup.py command aliases here test = pytest -# NOTE: uncomment when pytest-asyncio is added in requirements -# [tool:pytest] -# asyncio_mode = auto +[tool:pytest] +# SEE https://docs.pytest.org/en/stable/how-to/capture-warnings.html +filterwarnings = + error diff --git a/packages/settings-library/setup.py b/packages/settings-library/setup.py index 30aecb77c68..8e3e4367185 100644 --- a/packages/settings-library/setup.py +++ b/packages/settings-library/setup.py @@ -29,35 +29,37 @@ def read_reqs(reqs_path: Path) -> set[str]: ) # STRONG requirements -SETUP = dict( - name="simcore-settings-library", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "author": ", ".join( ( "Pedro Crespo-Valero (pcrespov)", "Sylvain Anderegg (sanderegg)", ) ), - description="Library with common pydantic settings", + "name": "simcore-settings-library", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "description": "Library with common pydantic settings", + "python_requires": ">=3.10", # SEE https://pypi.org/classifiers/ - classifiers=[ + "classifiers": [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - install_requires=INSTALL_REQUIREMENTS, - packages=find_packages(where="src"), - package_dir={"": "src"}, - include_package_data=True, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - zip_safe=False, -) + "long_description": (CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "install_requires": INSTALL_REQUIREMENTS, + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "include_package_data": True, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "zip_safe": False, +} if __name__ == "__main__": diff --git a/packages/settings-library/src/settings_library/__init__.py b/packages/settings-library/src/settings_library/__init__.py index 31a9a9f7576..f36b602b994 100644 --- a/packages/settings-library/src/settings_library/__init__.py +++ b/packages/settings-library/src/settings_library/__init__.py @@ -1,3 +1,3 @@ -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution("simcore-settings-library").version +__version__: str = version("simcore-settings-library") diff --git a/packages/settings-library/src/settings_library/application.py b/packages/settings-library/src/settings_library/application.py new file mode 100644 index 00000000000..321e450e65b --- /dev/null +++ b/packages/settings-library/src/settings_library/application.py @@ -0,0 +1,32 @@ +from typing import Annotated + +from pydantic import Field, PositiveInt + +from .base import BaseCustomSettings +from .basic_types import BootModeEnum, BuildTargetEnum + + +class BaseApplicationSettings(BaseCustomSettings): + """ + Environments in image set at build-time + """ + + # @Makefile + SC_BUILD_DATE: str | None = None + SC_BUILD_TARGET: BuildTargetEnum | None = None + SC_VCS_REF: str | None = None + SC_VCS_URL: str | None = None + + # @Dockerfile + SC_BOOT_MODE: BootModeEnum | None = None + SC_BOOT_TARGET: BuildTargetEnum | None = None + SC_HEALTHCHECK_TIMEOUT: Annotated[ + PositiveInt | None, + Field( + description="If a single run of the check takes longer than timeout seconds " + "then the check is considered to have failed." + "It takes retries consecutive failures of the health check for the container to be considered unhealthy.", + ), + ] = None + SC_USER_ID: int | None = None + SC_USER_NAME: str | None = None diff --git a/packages/settings-library/src/settings_library/aws_s3_cli.py b/packages/settings-library/src/settings_library/aws_s3_cli.py new file mode 100644 index 00000000000..68ae01d91eb --- /dev/null +++ b/packages/settings-library/src/settings_library/aws_s3_cli.py @@ -0,0 +1,16 @@ +from typing import Annotated + +from pydantic import Field + +from .base import BaseCustomSettings +from .s3 import S3Settings + + +class AwsS3CliSettings(BaseCustomSettings): + AWS_S3_CLI_S3: Annotated[ + S3Settings, + Field( + description="These settings intentionally do not use auto_default_from_env=True " + "because we might want to turn them off if RClone is enabled.", + ), + ] diff --git a/packages/settings-library/src/settings_library/base.py b/packages/settings-library/src/settings_library/base.py index 80ebbb9ff90..9ab3119dfc7 100644 --- a/packages/settings-library/src/settings_library/base.py +++ b/packages/settings-library/src/settings_library/base.py @@ -1,55 +1,98 @@ import logging from functools import cached_property -from typing import Final, Sequence, get_args - -from pydantic import BaseConfig, BaseSettings, Extra, ValidationError, validator -from pydantic.error_wrappers import ErrorList, ErrorWrapper -from pydantic.fields import ModelField, Undefined - -logger = logging.getLogger(__name__) - -_DEFAULTS_TO_NONE_MSG: Final[ +from typing import Any, Final, get_origin + +from common_library.pydantic_fields_extension import get_type, is_literal, is_nullable +from pydantic import ValidationInfo, field_validator +from pydantic.fields import FieldInfo +from pydantic_core import ValidationError +from pydantic_settings import ( + BaseSettings, + EnvSettingsSource, + PydanticBaseSettingsSource, + SettingsConfigDict, +) + +_logger = logging.getLogger(__name__) + +_AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING: Final[ str -] = "%s auto_default_from_env unresolved, defaulting to None" +] = "{field_name} auto_default_from_env unresolved, defaulting to None" -class DefaultFromEnvFactoryError(ValidationError): - ... +class DefaultFromEnvFactoryError(ValueError): + def __init__(self, errors): + super().__init__("Default could not be constructed") + self.errors = errors -def create_settings_from_env(field: ModelField): +def _create_settings_from_env(field_name: str, info: FieldInfo): # NOTE: Cannot pass only field.type_ because @prepare_field (when this function is called) # this value is still not resolved (field.type_ at that moment has a weak_ref). # Therefore we keep the entire 'field' but MUST be treated here as read-only def _default_factory(): """Creates default from sub-settings or None (if nullable)""" - field_settings_cls = field.type_ + field_settings_cls = get_type(info) try: return field_settings_cls() except ValidationError as err: - if field.allow_none: + if is_nullable(info): # e.g. Optional[PostgresSettings] would warn if defaults to None - logger.warning( - _DEFAULTS_TO_NONE_MSG, - field.name, + msg = _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name=field_name ) + _logger.warning(msg) return None + _logger.warning("Validation errors=%s", err.errors()) + raise DefaultFromEnvFactoryError(errors=err.errors()) from err - def _prepend_field_name(ee: ErrorList): - if isinstance(ee, ErrorWrapper): - return ErrorWrapper(ee.exc, (field.name,) + ee.loc_tuple()) - assert isinstance(ee, Sequence) # nosec - return [_prepend_field_name(e) for e in ee] + return _default_factory - raise DefaultFromEnvFactoryError( - errors=_prepend_field_name(err.raw_errors), # type: ignore - model=err.model, - # FIXME: model = shall be the parent settings?? but I dont find how retrieve it from the field - ) from err - return _default_factory +def _is_auto_default_from_env_enabled(field: FieldInfo) -> bool: + return bool( + field.json_schema_extra is not None + and field.json_schema_extra.get("auto_default_from_env", False) # type: ignore[union-attr] + ) + + +_MARKED_AS_UNSET: Final[dict] = {} + + +class EnvSettingsWithAutoDefaultSource(EnvSettingsSource): + def __init__( + self, settings_cls: type[BaseSettings], env_settings: EnvSettingsSource + ): + super().__init__( + settings_cls, + env_settings.case_sensitive, + env_settings.env_prefix, + env_settings.env_nested_delimiter, + env_settings.env_ignore_empty, + env_settings.env_parse_none_str, + env_settings.env_parse_enums, + ) + + def prepare_field_value( + self, + field_name: str, + field: FieldInfo, + value: Any, + value_is_complex: bool, # noqa: FBT001 + ) -> Any: + prepared_value = super().prepare_field_value( + field_name, field, value, value_is_complex + ) + if ( + _is_auto_default_from_env_enabled(field) + and field.default_factory + and field.default is None + and prepared_value == _MARKED_AS_UNSET + ): + prepared_value = field.default_factory() # type: ignore[call-arg] + return prepared_value class BaseCustomSettings(BaseSettings): @@ -61,55 +104,65 @@ class BaseCustomSettings(BaseSettings): SEE tests for details. """ - @validator("*", pre=True) + @field_validator("*", mode="before") @classmethod - def parse_none(cls, v, field: ModelField): + def _parse_none(cls, v, info: ValidationInfo): # WARNING: In nullable fields, envs equal to null or none are parsed as None !! - if field.allow_none: - if isinstance(v, str) and v.lower() in ("null", "none"): - return None + if ( + info.field_name + and is_nullable(cls.model_fields[info.field_name]) + and isinstance(v, str) + and v.lower() in ("none",) + ): + return None return v - class Config(BaseConfig): - case_sensitive = True # All must be capitalized - extra = Extra.forbid - allow_mutation = False - frozen = True - validate_all = True - keep_untouched = (cached_property,) - - @classmethod - def prepare_field(cls, field: ModelField) -> None: - super().prepare_field(field) - - auto_default_from_env = field.field_info.extra.get( - "auto_default_from_env", False - ) + model_config = SettingsConfigDict( + case_sensitive=True, # All must be capitalized + extra="forbid", + frozen=True, + validate_default=True, + ignored_types=(cached_property,), + env_parse_none_str="null", + ) - field_type = field.type_ - if args := get_args(field_type): - field_type = next(a for a in args if a != type(None)) - - if issubclass(field_type, BaseCustomSettings): + @classmethod + def __pydantic_init_subclass__(cls, **kwargs: Any): + super().__pydantic_init_subclass__(**kwargs) + + for name, field in cls.model_fields.items(): + auto_default_from_env = _is_auto_default_from_env_enabled(field) + field_type = get_type(field) + + # Avoids issubclass raising TypeError. SEE test_issubclass_type_error_with_pydantic_models + is_not_composed = ( + get_origin(field_type) is None + ) # is not composed as dict[str, Any] or Generic[Base] + is_not_literal = not is_literal(field) + + if ( + is_not_literal + and is_not_composed + and issubclass(field_type, BaseCustomSettings) + ): if auto_default_from_env: - assert field.field_info.default is Undefined - assert field.field_info.default_factory is None - - # Transform it into something like `Field(default_factory=create_settings_from_env(field))` - field.default_factory = create_settings_from_env(field) + # Builds a default factory `Field(default_factory=create_settings_from_env(field))` + field.default_factory = _create_settings_from_env(name, field) field.default = None - field.required = False # has a default now - elif issubclass(field_type, BaseSettings): - raise ValueError( - f"{cls}.{field.name} of type {field_type} must inherit from BaseCustomSettings" - ) + elif ( + is_not_literal + and is_not_composed + and issubclass(field_type, BaseSettings) + ): + msg = f"{cls}.{name} of type {field_type} must inherit from BaseCustomSettings" + raise ValueError(msg) elif auto_default_from_env: - raise ValueError( - "auto_default_from_env=True can only be used in BaseCustomSettings subclasses" - f"but field {cls}.{field.name} is {field_type} " - ) + msg = f"auto_default_from_env=True can only be used in BaseCustomSettings subclasses but field {cls}.{name} is {field_type} " + raise ValueError(msg) + + cls.model_rebuild(force=True) @classmethod def create_from_envs(cls, **overrides): @@ -117,3 +170,22 @@ def create_from_envs(cls, **overrides): # Optional to use to make the code more readable # More explicit and pylance seems to get less confused return cls(**overrides) + + @classmethod + def settings_customise_sources( + cls, + settings_cls: type[BaseSettings], + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ) -> tuple[PydanticBaseSettingsSource, ...]: + assert env_settings # nosec + return ( + init_settings, + EnvSettingsWithAutoDefaultSource( + settings_cls, env_settings=env_settings # type:ignore[arg-type] + ), + dotenv_settings, + file_secret_settings, + ) diff --git a/packages/settings-library/src/settings_library/basic_types.py b/packages/settings-library/src/settings_library/basic_types.py index ce2ab163f2b..b96ce428817 100644 --- a/packages/settings-library/src/settings_library/basic_types.py +++ b/packages/settings-library/src/settings_library/basic_types.py @@ -1,47 +1,31 @@ -# -# NOTE: This files copies some of the types from models_library.basic_types -# This is a minor evil to avoid the maintenance burden that creates -# an extra dependency to a larger models_library (intra-repo library) - from enum import Enum +from typing import Annotated, TypeAlias -from pydantic.types import conint, constr - -# port number range -PortInt = conint(gt=0, lt=65535) +from common_library.basic_types import BootModeEnum, BuildTargetEnum, LogLevel +from pydantic import Field, StringConstraints -# e.g. 'v5' -VersionTag = constr(regex=r"^v\d$") +assert issubclass(LogLevel, Enum) # nosec +assert issubclass(BootModeEnum, Enum) # nosec +assert issubclass(BuildTargetEnum, Enum) # nosec +__all__: tuple[str, ...] = ( + "LogLevel", + "BootModeEnum", + "BuildTargetEnum", +) -class LogLevel(str, Enum): - DEBUG = "DEBUG" - INFO = "INFO" - WARNING = "WARNING" - ERROR = "ERROR" +# port number range +PortInt: TypeAlias = Annotated[int, Field(gt=0, lt=65535)] +RegisteredPortInt: TypeAlias = Annotated[int, Field(gt=1024, lt=65535)] -class BootMode(str, Enum): - """ - Values taken by SC_BOOT_MODE environment variable - set in Dockerfile and used during docker/boot.sh - """ - - DEFAULT = "default" - LOCAL = "local-development" - DEBUG = "debug-ptvsd" - PRODUCTION = "production" - DEVELOPMENT = "development" +# e.g. 'v5' +VersionTag: TypeAlias = Annotated[str, StringConstraints(pattern=r"^v\d$")] -class BuildTargetEnum(str, Enum): - """ - Values taken by SC_BUILD_TARGET environment variable - set in Dockerfile that defines the stage targeted in the - docker image build - """ - BUILD = "build" - CACHE = "cache" - PRODUCTION = "production" - DEVELOPMENT = "development" +# non-empty bounded string used as identifier +# e.g. "123" or "name_123" or "fa327c73-52d8-462a-9267-84eeaf0f90e3" but NOT "" +IDStr: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, min_length=1, max_length=50) +] diff --git a/packages/settings-library/src/settings_library/celery.py b/packages/settings-library/src/settings_library/celery.py new file mode 100644 index 00000000000..168b86c6745 --- /dev/null +++ b/packages/settings-library/src/settings_library/celery.py @@ -0,0 +1,58 @@ +from datetime import timedelta +from typing import Annotated + +from pydantic import Field +from pydantic_settings import SettingsConfigDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings + +from .base import BaseCustomSettings + + +class CelerySettings(BaseCustomSettings): + CELERY_RABBIT_BROKER: Annotated[ + RabbitSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + CELERY_REDIS_RESULT_BACKEND: Annotated[ + RedisSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + CELERY_RESULT_EXPIRES: Annotated[ + timedelta, + Field( + description="Time after which task results will be deleted (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)." + ), + ] = timedelta(days=7) + CELERY_EPHEMERAL_RESULT_EXPIRES: Annotated[ + timedelta, + Field( + description="Time after which ephemeral task results will be deleted (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)." + ), + ] = timedelta(hours=1) + CELERY_RESULT_PERSISTENT: Annotated[ + bool, + Field( + description="If set to True, result messages will be persistent (after a broker restart)." + ), + ] = True + + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + { + "CELERY_RABBIT_BROKER": { + "RABBIT_USER": "guest", + "RABBIT_SECURE": False, + "RABBIT_PASSWORD": "guest", + "RABBIT_HOST": "localhost", + "RABBIT_PORT": 5672, + }, + "CELERY_REDIS_RESULT_BACKEND": { + "REDIS_HOST": "localhost", + "REDIS_PORT": 6379, + }, + "CELERY_RESULT_EXPIRES": timedelta(days=1), # type: ignore[dict-item] + "CELERY_RESULT_PERSISTENT": True, + } + ], + } + ) diff --git a/packages/settings-library/src/settings_library/comp_services.py b/packages/settings-library/src/settings_library/comp_services.py index 787882143f5..71901e61624 100644 --- a/packages/settings-library/src/settings_library/comp_services.py +++ b/packages/settings-library/src/settings_library/comp_services.py @@ -1,5 +1,4 @@ -from pydantic import ByteSize, NonNegativeInt, validator -from pydantic.tools import parse_raw_as +from pydantic import ByteSize, NonNegativeInt, TypeAdapter, field_validator from settings_library.base import BaseCustomSettings from ._constants import GB @@ -10,21 +9,21 @@ class ComputationalServices(BaseCustomSettings): DEFAULT_MAX_NANO_CPUS: NonNegativeInt = _DEFAULT_MAX_NANO_CPUS_VALUE - DEFAULT_MAX_MEMORY: ByteSize = parse_raw_as( - ByteSize, f"{_DEFAULT_MAX_MEMORY_VALUE}" + DEFAULT_MAX_MEMORY: ByteSize = TypeAdapter(ByteSize).validate_python( + f"{_DEFAULT_MAX_MEMORY_VALUE}" ) DEFAULT_RUNTIME_TIMEOUT: NonNegativeInt = 0 - @validator("DEFAULT_MAX_NANO_CPUS", pre=True) + @field_validator("DEFAULT_MAX_NANO_CPUS", mode="before") @classmethod - def set_default_cpus_if_negative(cls, v): + def _set_default_cpus_if_negative(cls, v): if v is None or v == "" or int(v) <= 0: v = _DEFAULT_MAX_NANO_CPUS_VALUE return v - @validator("DEFAULT_MAX_MEMORY", pre=True) + @field_validator("DEFAULT_MAX_MEMORY", mode="before") @classmethod - def set_default_memory_if_negative(cls, v): + def _set_default_memory_if_negative(cls, v): if v is None or v == "" or int(v) <= 0: v = _DEFAULT_MAX_MEMORY_VALUE return v diff --git a/packages/settings-library/src/settings_library/director_v0.py b/packages/settings-library/src/settings_library/director_v0.py new file mode 100644 index 00000000000..1d599d9b328 --- /dev/null +++ b/packages/settings-library/src/settings_library/director_v0.py @@ -0,0 +1,26 @@ +from functools import cached_property +from typing import Annotated + +from pydantic import AnyHttpUrl, Field, TypeAdapter +from settings_library.base import BaseCustomSettings +from settings_library.basic_types import PortInt, VersionTag + + +class DirectorV0Settings(BaseCustomSettings): + DIRECTOR_ENABLED: bool = True + + DIRECTOR_HOST: str = "director" + DIRECTOR_PORT: PortInt = TypeAdapter(PortInt).validate_python(8000) + DIRECTOR_VTAG: Annotated[ + VersionTag, Field(description="Director-v0 service API's version tag") + ] = "v0" + + @cached_property + def endpoint(self) -> str: + url = AnyHttpUrl.build( # pylint: disable=no-member + scheme="http", + host=self.DIRECTOR_HOST, + port=self.DIRECTOR_PORT, + path=f"{self.DIRECTOR_VTAG}", + ) + return f"{url}" diff --git a/packages/settings-library/src/settings_library/director_v2.py b/packages/settings-library/src/settings_library/director_v2.py new file mode 100644 index 00000000000..baf32956c8e --- /dev/null +++ b/packages/settings-library/src/settings_library/director_v2.py @@ -0,0 +1,33 @@ +from functools import cached_property + +from settings_library.base import BaseCustomSettings +from settings_library.basic_types import PortInt, VersionTag +from settings_library.utils_service import ( + DEFAULT_FASTAPI_PORT, + MixinServiceSettings, + URLPart, +) + + +class DirectorV2Settings(BaseCustomSettings, MixinServiceSettings): + DIRECTOR_V2_HOST: str = "director-v2" + DIRECTOR_V2_PORT: PortInt = DEFAULT_FASTAPI_PORT + DIRECTOR_V2_VTAG: VersionTag = "v2" + + @cached_property + def api_base_url(self) -> str: + # http://director-v2:8000/v2 + return self._compose_url( + prefix="DIRECTOR_V2", + port=URLPart.REQUIRED, + vtag=URLPart.REQUIRED, + ) + + @cached_property + def base_url(self) -> str: + # http://director-v2:8000 + return self._compose_url( + prefix="DIRECTOR_V2", + port=URLPart.REQUIRED, + vtag=URLPart.EXCLUDE, + ) diff --git a/packages/settings-library/src/settings_library/docker_api_proxy.py b/packages/settings-library/src/settings_library/docker_api_proxy.py new file mode 100644 index 00000000000..14f66f0934e --- /dev/null +++ b/packages/settings-library/src/settings_library/docker_api_proxy.py @@ -0,0 +1,24 @@ +from functools import cached_property + +from pydantic import Field, SecretStr + +from .base import BaseCustomSettings +from .basic_types import PortInt + + +class DockerApiProxysettings(BaseCustomSettings): + DOCKER_API_PROXY_HOST: str = Field( + description="hostname of the docker-api-proxy service" + ) + DOCKER_API_PROXY_PORT: PortInt = Field( + 8888, description="port of the docker-api-proxy service" + ) + DOCKER_API_PROXY_SECURE: bool = False + + DOCKER_API_PROXY_USER: str + DOCKER_API_PROXY_PASSWORD: SecretStr + + @cached_property + def base_url(self) -> str: + protocl = "https" if self.DOCKER_API_PROXY_SECURE else "http" + return f"{protocl}://{self.DOCKER_API_PROXY_HOST}:{self.DOCKER_API_PROXY_PORT}" diff --git a/packages/settings-library/src/settings_library/docker_registry.py b/packages/settings-library/src/settings_library/docker_registry.py index c79ecaac0d6..312bd0a53d5 100644 --- a/packages/settings-library/src/settings_library/docker_registry.py +++ b/packages/settings-library/src/settings_library/docker_registry.py @@ -1,37 +1,86 @@ from functools import cached_property -from typing import Optional +from typing import Annotated, Any, Self -from pydantic import Field, SecretStr, validator +from pydantic import ( + AnyHttpUrl, + Field, + SecretStr, + TypeAdapter, + field_validator, + model_validator, +) +from pydantic_settings import SettingsConfigDict from .base import BaseCustomSettings class RegistrySettings(BaseCustomSettings): + REGISTRY_AUTH: Annotated[bool, Field(description="do registry authentication")] + REGISTRY_PATH: Annotated[ + str | None, + Field( + # This is useful in case of a local registry, where the registry url (path) is relative to the host docker engine" + description="development mode only, in case a local registry is used - " + "this is the hostname to the docker registry as seen from the host running the containers (e.g. 127.0.0.1:5000)", + ), + ] = None - REGISTRY_AUTH: bool = Field(..., description="do registry authentication") - REGISTRY_PATH: Optional[str] = Field( - None, description="development mode only, in case a local registry is used" - ) - # NOTE: name is missleading, http or https protocol are not included - REGISTRY_URL: str = Field("", description="address to the docker registry") + REGISTRY_URL: Annotated[ + str, + Field( + # NOTE: name is missleading, http or https protocol are not included + description="hostname of docker registry (without protocol but with port if available)", + min_length=1, + ), + ] - REGISTRY_USER: str = Field( - ..., description="username to access the docker registry" - ) - REGISTRY_PW: SecretStr = Field( - ..., description="password to access the docker registry" - ) - REGISTRY_SSL: bool = Field(..., description="access to registry through ssl") + REGISTRY_USER: Annotated[ + str, + Field(description="username to access the docker registry"), + ] + REGISTRY_PW: Annotated[ + SecretStr, + Field(description="password to access the docker registry"), + ] + REGISTRY_SSL: Annotated[ + bool, + Field(description="True if docker registry is using HTTPS protocol"), + ] - @validator("REGISTRY_PATH", pre=True) + @field_validator("REGISTRY_PATH", mode="before") @classmethod - def escape_none_string(cls, v) -> Optional[str]: + def _escape_none_string(cls, v) -> Any | None: return None if v == "None" else v + @model_validator(mode="after") + def _check_registry_authentication(self: Self) -> Self: + if self.REGISTRY_AUTH and any( + not v for v in (self.REGISTRY_USER, self.REGISTRY_PW) + ): + msg = "If REGISTRY_AUTH is True, both REGISTRY_USER and REGISTRY_PW must be provided" + raise ValueError(msg) + return self + @cached_property def resolved_registry_url(self) -> str: return self.REGISTRY_PATH or self.REGISTRY_URL @cached_property - def api_url(self) -> str: - return f"{self.REGISTRY_URL}/v2" + def api_url(self) -> AnyHttpUrl: + return TypeAdapter(AnyHttpUrl).validate_python( + f"http{'s' if self.REGISTRY_SSL else ''}://{self.REGISTRY_URL}/v2" + ) + + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + { + "REGISTRY_AUTH": "True", + "REGISTRY_USER": "theregistryuser", + "REGISTRY_PW": "some_secret_value", + "REGISTRY_SSL": "True", + "REGISTRY_URL": "registry.osparc-master.speag.com", + } + ], + } + ) diff --git a/packages/settings-library/src/settings_library/ec2.py b/packages/settings-library/src/settings_library/ec2.py new file mode 100644 index 00000000000..dd78a70a138 --- /dev/null +++ b/packages/settings-library/src/settings_library/ec2.py @@ -0,0 +1,31 @@ +from typing import Annotated + +from pydantic import BeforeValidator, Field +from pydantic_settings import SettingsConfigDict + +from .base import BaseCustomSettings +from .utils_validators import validate_nullable_url + + +class EC2Settings(BaseCustomSettings): + EC2_ACCESS_KEY_ID: str + EC2_ENDPOINT: Annotated[ + str | None, + BeforeValidator(validate_nullable_url), + Field(description="do not define if using standard AWS"), + ] = None + EC2_REGION_NAME: str = "us-east-1" + EC2_SECRET_ACCESS_KEY: str + + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + { + "EC2_ACCESS_KEY_ID": "my_access_key_id", + "EC2_ENDPOINT": "https://my_ec2_endpoint.com", + "EC2_REGION_NAME": "us-east-1", + "EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + } + ], + } + ) diff --git a/packages/settings-library/src/settings_library/efs.py b/packages/settings-library/src/settings_library/efs.py new file mode 100644 index 00000000000..c27d70b37c6 --- /dev/null +++ b/packages/settings-library/src/settings_library/efs.py @@ -0,0 +1,50 @@ +from pathlib import Path +from typing import Annotated + +from pydantic import Field + +from .base import BaseCustomSettings + + +class AwsEfsSettings(BaseCustomSettings): + EFS_DNS_NAME: Annotated[ + str, + Field( + description="AWS Elastic File System DNS name", + examples=["fs-xxx.efs.us-east-1.amazonaws.com"], + ), + ] + EFS_PROJECT_SPECIFIC_DATA_DIRECTORY: str + EFS_MOUNTED_PATH: Annotated[ + Path, + Field( + description="This is the path where EFS is mounted to the EC2 machine", + ), + ] + + +NFS_PROTOCOL = "4.1" +READ_SIZE = "1048576" +WRITE_SIZE = "1048576" +RECOVERY_MODE = "hard" +NFS_REQUEST_TIMEOUT = "600" +NUMBER_OF_RETRANSMISSIONS = "2" +PORT_MODE = "noresvport" + +""" +`sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport` + +Explanation: + +nfsvers=4.1: Specifies the NFS protocol version to use; here, it is version 4.1, which supports improved security features and performance optimizations over earlier versions. + +rsize=1048576 and wsize=1048576: Set the read and write buffer sizes in bytes, respectively. Here, both are set to 1,048,576 bytes (1 MB). Larger buffer sizes can improve performance over high-latency networks by allowing more data to be transferred with each read or write request. + +hard: Specifies the recovery behavior of the NFS client. If the NFS server becomes unreachable, the NFS client will retry the request until the server becomes available again. The alternative is soft, where the NFS client gives up after a certain number of retries, potentially leading to data corruption or loss. + +timeo=600: Sets the timeout value for NFS requests in deciseconds (tenths of a second). Here, 600 deciseconds means 60 seconds. This is how long the NFS client will wait for a response from the server before retrying or failing. + +retrans=2: Sets the number of retransmissions for each NFS request if a response is not received before the timeout. Here, it will retry each request twice. + +noresvport: Normally, NFS uses a reserved port (number below 1024) for communicating, which requires root privileges on the client side. noresvport allows using non-reserved ports, which can be helpful in environments where clients don't have root privileges. +""" diff --git a/packages/settings-library/src/settings_library/email.py b/packages/settings-library/src/settings_library/email.py index f7c10f7f7b4..65ca6d6b735 100644 --- a/packages/settings-library/src/settings_library/email.py +++ b/packages/settings-library/src/settings_library/email.py @@ -1,7 +1,7 @@ from enum import Enum -from typing import Optional +from typing import Annotated, Self -from pydantic import root_validator +from pydantic import model_validator from pydantic.fields import Field from pydantic.types import SecretStr @@ -25,39 +25,42 @@ class SMTPSettings(BaseCustomSettings): SMTP_HOST: str SMTP_PORT: PortInt - SMTP_PROTOCOL: EmailProtocol = Field( - EmailProtocol.UNENCRYPTED, - description="Select between TLS, STARTTLS Secure Mode or unencrypted communication", - ) - SMTP_USERNAME: Optional[str] = Field(None, min_length=1) - SMTP_PASSWORD: Optional[SecretStr] = Field(None, min_length=1) - - @root_validator - @classmethod - def both_credentials_must_be_set(cls, values): - username = values.get("SMTP_USERNAME") - password = values.get("SMTP_PASSWORD") + SMTP_PROTOCOL: Annotated[ + EmailProtocol, + Field( + description="Select between TLS, STARTTLS Secure Mode or unencrypted communication", + ), + ] = EmailProtocol.UNENCRYPTED + + SMTP_USERNAME: Annotated[str | None, Field(min_length=1)] = None + SMTP_PASSWORD: Annotated[SecretStr | None, Field(min_length=1)] = None + + @model_validator(mode="after") + def _both_credentials_must_be_set(self) -> Self: + username = self.SMTP_USERNAME + password = self.SMTP_PASSWORD if username is None and password or username and password is None: - raise ValueError( - f"Please provide both {username=} and {password=} not just one" - ) + msg = f"Please provide both {username=} and {password=} not just one" + raise ValueError(msg) - return values + return self - @root_validator - @classmethod - def enabled_tls_required_authentication(cls, values): - smtp_protocol = values.get("SMTP_PROTOCOL") + @model_validator(mode="after") + def _enabled_tls_required_authentication(self) -> Self: + smtp_protocol = self.SMTP_PROTOCOL - username = values.get("SMTP_USERNAME") - password = values.get("SMTP_PASSWORD") + username = self.SMTP_USERNAME + password = self.SMTP_PASSWORD tls_enabled = smtp_protocol == EmailProtocol.TLS starttls_enabled = smtp_protocol == EmailProtocol.STARTTLS if (tls_enabled or starttls_enabled) and not (username or password): - raise ValueError( - "when using SMTP_PROTOCOL other than UNENCRYPTED username and password are required" - ) - return values + msg = "when using SMTP_PROTOCOL other than UNENCRYPTED username and password are required" + raise ValueError(msg) + return self + + @property + def has_credentials(self) -> bool: + return self.SMTP_USERNAME is not None and self.SMTP_PASSWORD is not None diff --git a/packages/settings-library/src/settings_library/http_client_request.py b/packages/settings-library/src/settings_library/http_client_request.py index 213d8084e76..b73cbdea82e 100644 --- a/packages/settings-library/src/settings_library/http_client_request.py +++ b/packages/settings-library/src/settings_library/http_client_request.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Annotated from pydantic import Field @@ -8,26 +8,32 @@ class ClientRequestSettings(BaseCustomSettings): # NOTE: These entries are used in some old services as well. These need to be updated if these # variable names or defaults are changed. - HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT: Optional[int] = Field( - default=20, - description="timeout in seconds used for outgoing http requests", - ) + HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT: Annotated[ + int | None, + Field( + description="timeout in seconds used for outgoing http requests", + ), + ] = 20 - HTTP_CLIENT_REQUEST_AIOHTTP_CONNECT_TIMEOUT: Optional[int] = Field( - default=None, - description=( - "Maximal number of seconds for acquiring a connection" - " from pool. The time consists connection establishment" - " for a new connection or waiting for a free connection" - " from a pool if pool connection limits are exceeded. " - "For pure socket connection establishment time use sock_connect." + HTTP_CLIENT_REQUEST_AIOHTTP_CONNECT_TIMEOUT: Annotated[ + int | None, + Field( + description=( + "Maximal number of seconds for acquiring a connection" + " from pool. The time consists connection establishment" + " for a new connection or waiting for a free connection" + " from a pool if pool connection limits are exceeded. " + "For pure socket connection establishment time use sock_connect." + ), ), - ) + ] = None - HTTP_CLIENT_REQUEST_AIOHTTP_SOCK_CONNECT_TIMEOUT: Optional[int] = Field( - default=5, - description=( - "aiohttp specific field used in ClientTimeout, timeout for connecting to a " - "peer for a new connection not given a pool" + HTTP_CLIENT_REQUEST_AIOHTTP_SOCK_CONNECT_TIMEOUT: Annotated[ + int | None, + Field( + description=( + "aiohttp specific field used in ClientTimeout, timeout for connecting to a " + "peer for a new connection not given a pool" + ), ), - ) + ] = 5 diff --git a/packages/settings-library/src/settings_library/node_ports.py b/packages/settings-library/src/settings_library/node_ports.py new file mode 100644 index 00000000000..562e71e038a --- /dev/null +++ b/packages/settings-library/src/settings_library/node_ports.py @@ -0,0 +1,49 @@ +from datetime import timedelta +from typing import Annotated, Final, Self + +from pydantic import Field, NonNegativeInt, PositiveInt, SecretStr, model_validator + +from .base import BaseCustomSettings +from .postgres import PostgresSettings +from .storage import StorageSettings + +NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS_DEFAULT_VALUE: Final[NonNegativeInt] = 3 + + +class StorageAuthSettings(StorageSettings): + STORAGE_USERNAME: str | None = None + STORAGE_PASSWORD: SecretStr | None = None + STORAGE_SECURE: bool = False + + @property + def auth_required(self) -> bool: + # NOTE: authentication is required to an issue with docker networks + # for details see https://github.com/ITISFoundation/osparc-issues/issues/1264 + return self.STORAGE_USERNAME is not None and self.STORAGE_PASSWORD is not None + + @model_validator(mode="after") + def _validate_auth_fields(self) -> Self: + username = self.STORAGE_USERNAME + password = self.STORAGE_PASSWORD + if (username is None) != (password is None): + msg = f"Both {username=} and {password=} must be either set or unset!" + raise ValueError(msg) + return self + + +class NodePortsSettings(BaseCustomSettings): + NODE_PORTS_STORAGE_AUTH: Annotated[ + StorageAuthSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + POSTGRES_SETTINGS: Annotated[ + PostgresSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + NODE_PORTS_MULTIPART_UPLOAD_COMPLETION_TIMEOUT_S: NonNegativeInt = int( + timedelta(minutes=5).total_seconds() + ) + NODE_PORTS_IO_NUM_RETRY_ATTEMPTS: PositiveInt = 5 + NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS: NonNegativeInt = ( + NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS_DEFAULT_VALUE + ) diff --git a/packages/settings-library/src/settings_library/postgres.py b/packages/settings-library/src/settings_library/postgres.py index bcbcefa4345..83aa960c92c 100644 --- a/packages/settings-library/src/settings_library/postgres.py +++ b/packages/settings-library/src/settings_library/postgres.py @@ -1,14 +1,20 @@ -import urllib.parse from functools import cached_property -from typing import Optional +from typing import Annotated +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse -from pydantic import Field, PostgresDsn, SecretStr, conint, validator +from pydantic import ( + AliasChoices, + Field, + PostgresDsn, + SecretStr, + ValidationInfo, + field_validator, +) +from pydantic_settings import SettingsConfigDict from .base import BaseCustomSettings from .basic_types import PortInt -IntGE1 = conint(ge=1) - class PostgresSettings(BaseCustomSettings): # entrypoint @@ -20,74 +26,87 @@ class PostgresSettings(BaseCustomSettings): POSTGRES_PASSWORD: SecretStr # database - POSTGRES_DB: str = Field(..., description="Database name") + POSTGRES_DB: Annotated[str, Field(description="Database name")] # pool connection limits - POSTGRES_MINSIZE: IntGE1 = Field( - 1, description="Minimum number of connections in the pool" - ) - POSTGRES_MAXSIZE: IntGE1 = Field( - 50, description="Maximum number of connections in the pool" - ) + POSTGRES_MINSIZE: Annotated[ + int, Field(description="Minimum number of connections in the pool", ge=1) + ] = 1 + POSTGRES_MAXSIZE: Annotated[ + int, Field(description="Maximum number of connections in the pool", ge=1) + ] = 50 - POSTGRES_CLIENT_NAME: Optional[str] = Field( - None, - description="Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)", - env=[ - "POSTGRES_CLIENT_NAME", - # This is useful when running inside a docker container, then the hostname is set each client gets a different name - "HOST", - "HOSTNAME", - ], - ) + POSTGRES_CLIENT_NAME: Annotated[ + str | None, + Field( + description="Name of the application connecting the postgres database, will default to use the host hostname (hostname on linux)", + validation_alias=AliasChoices( + "POSTGRES_CLIENT_NAME", + # This is useful when running inside a docker container, then the hostname is set each client gets a different name + "HOST", + "HOSTNAME", + ), + ), + ] = None - @validator("POSTGRES_MAXSIZE") + @field_validator("POSTGRES_MAXSIZE") @classmethod - def _check_size(cls, v, values): - if not (values["POSTGRES_MINSIZE"] <= v): - raise ValueError( - f"assert POSTGRES_MINSIZE={values['POSTGRES_MINSIZE']} <= POSTGRES_MAXSIZE={v}" - ) + def _check_size(cls, v, info: ValidationInfo): + if info.data["POSTGRES_MINSIZE"] > v: + msg = f"assert POSTGRES_MINSIZE={info.data['POSTGRES_MINSIZE']} <= POSTGRES_MAXSIZE={v}" + raise ValueError(msg) return v @cached_property def dsn(self) -> str: - dsn: str = PostgresDsn.build( + url = PostgresDsn.build( # pylint: disable=no-member scheme="postgresql", - user=self.POSTGRES_USER, + username=self.POSTGRES_USER, password=self.POSTGRES_PASSWORD.get_secret_value(), host=self.POSTGRES_HOST, - port=f"{self.POSTGRES_PORT}", - path=f"/{self.POSTGRES_DB}", + port=self.POSTGRES_PORT, + path=f"{self.POSTGRES_DB}", ) - return dsn + return f"{url}" @cached_property def dsn_with_async_sqlalchemy(self) -> str: - dsn: str = PostgresDsn.build( + url = PostgresDsn.build( # pylint: disable=no-member scheme="postgresql+asyncpg", - user=self.POSTGRES_USER, + username=self.POSTGRES_USER, password=self.POSTGRES_PASSWORD.get_secret_value(), host=self.POSTGRES_HOST, - port=f"{self.POSTGRES_PORT}", - path=f"/{self.POSTGRES_DB}", + port=self.POSTGRES_PORT, + path=f"{self.POSTGRES_DB}", ) - return dsn + return f"{url}" @cached_property def dsn_with_query(self) -> str: """Some clients do not support queries in the dsn""" dsn = self.dsn + return self._update_query(dsn) + + def _update_query(self, uri: str) -> str: + # SEE https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS + new_params: dict[str, str] = {} if self.POSTGRES_CLIENT_NAME: - dsn += "?" + urllib.parse.urlencode( - {"application_name": self.POSTGRES_CLIENT_NAME} - ) - return dsn + new_params = { + "application_name": self.POSTGRES_CLIENT_NAME, + } + + if new_params: + parsed_uri = urlparse(uri) + query = dict(parse_qsl(parsed_uri.query)) + query.update(new_params) + updated_query = urlencode(query) + return urlunparse(parsed_uri._replace(query=updated_query)) + return uri - class Config(BaseCustomSettings.Config): - schema_extra = { + model_config = SettingsConfigDict( + json_schema_extra={ "examples": [ - # minimal + # minimal required { "POSTGRES_HOST": "localhost", "POSTGRES_PORT": "5432", @@ -97,3 +116,4 @@ class Config(BaseCustomSettings.Config): } ], } + ) diff --git a/packages/settings-library/src/settings_library/prometheus.py b/packages/settings-library/src/settings_library/prometheus.py index 16fdf84c82c..9c40293d463 100644 --- a/packages/settings-library/src/settings_library/prometheus.py +++ b/packages/settings-library/src/settings_library/prometheus.py @@ -1,26 +1,39 @@ from functools import cached_property -from pydantic.networks import AnyHttpUrl +from pydantic import AnyUrl, SecretStr from settings_library.base import BaseCustomSettings from settings_library.utils_service import MixinServiceSettings -from .basic_types import PortInt, VersionTag +from .basic_types import VersionTag class PrometheusSettings(BaseCustomSettings, MixinServiceSettings): - PROMETHEUS_HOST: str = "prometheus" - PROMETHEUS_PORT: PortInt = 9090 + PROMETHEUS_URL: AnyUrl PROMETHEUS_VTAG: VersionTag = "v1" + PROMETHEUS_USERNAME: str | None = None + PROMETHEUS_PASSWORD: SecretStr | None = None @cached_property def base_url(self) -> str: - return AnyHttpUrl.build( - scheme="http", - host=self.PROMETHEUS_HOST, - port=f"{self.PROMETHEUS_PORT}", - path=f"/api/{self.PROMETHEUS_VTAG}/query", - ) + return f"{self.PROMETHEUS_URL}/api/{self.PROMETHEUS_VTAG}/query" @cached_property def origin(self) -> str: return self._build_origin_url(prefix="PROMETHEUS") + + @cached_property + def api_url(self) -> str: + assert self.PROMETHEUS_URL.host # nosec + prometheus_url: str = str( + AnyUrl.build( + scheme=self.PROMETHEUS_URL.scheme, + username=self.PROMETHEUS_USERNAME, + password=self.PROMETHEUS_PASSWORD.get_secret_value() + if self.PROMETHEUS_PASSWORD + else None, + host=self.PROMETHEUS_URL.host, + port=self.PROMETHEUS_URL.port, + path=self.PROMETHEUS_URL.path, + ) + ) + return prometheus_url diff --git a/packages/settings-library/src/settings_library/py.typed b/packages/settings-library/src/settings_library/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/settings-library/src/settings_library/r_clone.py b/packages/settings-library/src/settings_library/r_clone.py index c873c96d792..d1a6472e9c6 100644 --- a/packages/settings-library/src/settings_library/r_clone.py +++ b/packages/settings-library/src/settings_library/r_clone.py @@ -1,29 +1,38 @@ -from enum import Enum +from enum import StrEnum +from typing import Annotated -from pydantic import Field +from pydantic import Field, NonNegativeInt from .base import BaseCustomSettings from .s3 import S3Settings -class S3Provider(str, Enum): +class S3Provider(StrEnum): AWS = "AWS" CEPH = "CEPH" MINIO = "MINIO" class RCloneSettings(BaseCustomSettings): - # TODO: PC this flag is actually ONLY used by the dynamic sidecar. - # It determines how the dynamic sidecar sets up node-ports storage - # mechanism. I propose it is added as an extra independent variable in - # simcore_service_dynamic_idecar.core.settings.Settings instead of here. - R_CLONE_ENABLED: bool = Field( - True, - description=( - "simple way to enable/disable the usage of rclone " - "in parts of the system where it is optional " - "eg: dynamic-sidecar" - ), - ) - R_CLONE_S3: S3Settings = Field(auto_default_from_env=True) + R_CLONE_S3: Annotated[ + S3Settings, Field(json_schema_extra={"auto_default_from_env": True}) + ] R_CLONE_PROVIDER: S3Provider + + R_CLONE_OPTION_TRANSFERS: Annotated[ + # SEE https://rclone.org/docs/#transfers-n + NonNegativeInt, + Field(description="`--transfers X`: sets the amount of parallel transfers"), + ] = 5 + R_CLONE_OPTION_RETRIES: Annotated[ + # SEE https://rclone.org/docs/#retries-int + NonNegativeInt, + Field(description="`--retries X`: times to retry each individual transfer"), + ] = 3 + R_CLONE_OPTION_BUFFER_SIZE: Annotated[ + # SEE https://rclone.org/docs/#buffer-size-size + str, + Field( + description="`--buffer-size X`: sets the amount of RAM to use for each individual transfer", + ), + ] = "16M" diff --git a/packages/settings-library/src/settings_library/rabbit.py b/packages/settings-library/src/settings_library/rabbit.py index 18b109da68f..5e59010c3ee 100644 --- a/packages/settings-library/src/settings_library/rabbit.py +++ b/packages/settings-library/src/settings_library/rabbit.py @@ -1,20 +1,24 @@ from functools import cached_property +from typing import ClassVar +from pydantic.config import JsonDict from pydantic.networks import AnyUrl from pydantic.types import SecretStr +from pydantic_settings import SettingsConfigDict from .base import BaseCustomSettings from .basic_types import PortInt class RabbitDsn(AnyUrl): - allowed_schemes = {"amqp"} + allowed_schemes: ClassVar[set[str]] = {"amqp", "amqps"} class RabbitSettings(BaseCustomSettings): # host RABBIT_HOST: str RABBIT_PORT: PortInt = 5672 + RABBIT_SECURE: bool # auth RABBIT_USER: str @@ -22,10 +26,42 @@ class RabbitSettings(BaseCustomSettings): @cached_property def dsn(self) -> str: - return RabbitDsn.build( - scheme="amqp", - user=self.RABBIT_USER, - password=self.RABBIT_PASSWORD.get_secret_value(), - host=self.RABBIT_HOST, - port=f"{self.RABBIT_PORT}", + rabbit_dsn: str = str( + RabbitDsn.build( + scheme="amqps" if self.RABBIT_SECURE else "amqp", + username=self.RABBIT_USER, + password=self.RABBIT_PASSWORD.get_secret_value(), + host=self.RABBIT_HOST, + port=self.RABBIT_PORT, + ) ) + return rabbit_dsn + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "RABBIT_HOST": "rabbitmq.example.com", + "RABBIT_USER": "guest", + "RABBIT_PASSWORD": "guest-password", + "RABBIT_SECURE": False, + "RABBIT_PORT": 5672, + }, + { + "RABBIT_HOST": "secure.rabbitmq.example.com", + "RABBIT_USER": "guest", + "RABBIT_PASSWORD": "guest-password", + "RABBIT_SECURE": True, + "RABBIT_PORT": 15672, + }, + ] + } + ) + + model_config = SettingsConfigDict( + extra="ignore", + populate_by_name=True, + json_schema_extra=_update_json_schema_extra, + ) diff --git a/packages/settings-library/src/settings_library/redis.py b/packages/settings-library/src/settings_library/redis.py index bb313ede431..40dd88aabf9 100644 --- a/packages/settings-library/src/settings_library/redis.py +++ b/packages/settings-library/src/settings_library/redis.py @@ -1,69 +1,60 @@ -from functools import cached_property -from typing import Optional +from enum import IntEnum -from pydantic import Field from pydantic.networks import RedisDsn from pydantic.types import SecretStr +from pydantic_settings import SettingsConfigDict from .base import BaseCustomSettings from .basic_types import PortInt +class RedisDatabase(IntEnum): + RESOURCES = 0 + LOCKS = 1 + VALIDATION_CODES = 2 + SCHEDULED_MAINTENANCE = 3 + USER_NOTIFICATIONS = 4 + ANNOUNCEMENTS = 5 + DISTRIBUTED_IDENTIFIERS = 6 + DEFERRED_TASKS = 7 + DYNAMIC_SERVICES = 8 + CELERY_TASKS = 9 + + class RedisSettings(BaseCustomSettings): # host + REDIS_SECURE: bool = False REDIS_HOST: str = "redis" REDIS_PORT: PortInt = 6789 # auth - REDIS_USER: Optional[str] = None - REDIS_PASSWORD: Optional[SecretStr] = None - - # redis databases (db) - REDIS_RESOURCES_DB: int = Field( - default=0, - description="typical redis DB have 16 'tables', for convenience we use this table for user resources", - ) - REDIS_LOCKS_DB: int = Field( - default=1, description="This redis table is used to put locks" - ) - REDIS_VALIDATION_CODES_DB: int = Field( - default=2, description="This redis table is used to store SMS validation codes" - ) - REDIS_SCHEDULED_MAINTENANCE_DB: int = Field( - default=3, description="This redis table is used for handling scheduled maintenance" - ) - REDIS_USER_NOTIFICATIONS_DB: int = Field( - default=4, description="This redis table is used for handling the notifications that have to be sent to the user" - ) - - def _build_redis_dsn(self, db_index: int): - return RedisDsn.build( - scheme="redis", - user=self.REDIS_USER or None, - password=self.REDIS_PASSWORD.get_secret_value() - if self.REDIS_PASSWORD - else None, - host=self.REDIS_HOST, - port=f"{self.REDIS_PORT}", - path=f"/{db_index}", + REDIS_USER: str | None = None + REDIS_PASSWORD: SecretStr | None = None + + def build_redis_dsn(self, db_index: RedisDatabase) -> str: + return str( + RedisDsn.build( # pylint: disable=no-member + scheme="rediss" if self.REDIS_SECURE else "redis", + username=self.REDIS_USER or None, + password=( + self.REDIS_PASSWORD.get_secret_value() + if self.REDIS_PASSWORD + else None + ), + host=self.REDIS_HOST, + port=self.REDIS_PORT, + path=f"{db_index}", + ) ) - @cached_property - def dsn_resources(self) -> str: - return self._build_redis_dsn(self.REDIS_RESOURCES_DB) - - @cached_property - def dsn_locks(self) -> str: - return self._build_redis_dsn(self.REDIS_LOCKS_DB) - - @cached_property - def dsn_validation_codes(self) -> str: - return self._build_redis_dsn(self.REDIS_VALIDATION_CODES_DB) - - @cached_property - def dsn_scheduled_maintenance(self) -> str: - return self._build_redis_dsn(self.REDIS_SCHEDULED_MAINTENANCE_DB) - - @cached_property - def dsn_user_notifications(self) -> str: - return self._build_redis_dsn(self.REDIS_USER_NOTIFICATIONS_DB) \ No newline at end of file + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + # minimal required + { + "REDIS_USER": "user", + "REDIS_PASSWORD": "foobar", # NOSONAR + } + ], + } + ) diff --git a/packages/settings-library/src/settings_library/resource_usage_tracker.py b/packages/settings-library/src/settings_library/resource_usage_tracker.py new file mode 100644 index 00000000000..d0df8f093ad --- /dev/null +++ b/packages/settings-library/src/settings_library/resource_usage_tracker.py @@ -0,0 +1,36 @@ +from datetime import timedelta +from functools import cached_property + +from settings_library.base import BaseCustomSettings +from settings_library.basic_types import PortInt, VersionTag +from settings_library.utils_service import ( + DEFAULT_FASTAPI_PORT, + MixinServiceSettings, + URLPart, +) + +DEFAULT_RESOURCE_USAGE_HEARTBEAT_INTERVAL: timedelta = timedelta(seconds=60) + + +class ResourceUsageTrackerSettings(BaseCustomSettings, MixinServiceSettings): + RESOURCE_USAGE_TRACKER_HOST: str = "resource-usage-tracker" + RESOURCE_USAGE_TRACKER_PORT: PortInt = DEFAULT_FASTAPI_PORT + RESOURCE_USAGE_TRACKER_VTAG: VersionTag = "v1" + + @cached_property + def api_base_url(self) -> str: + # http://resource-usage-tracker:8000/v1 + return self._compose_url( + prefix="RESOURCE_USAGE_TRACKER", + port=URLPart.REQUIRED, + vtag=URLPart.REQUIRED, + ) + + @cached_property + def base_url(self) -> str: + # http://resource-usage-tracker:8000 + return self._compose_url( + prefix="RESOURCE_USAGE_TRACKER", + port=URLPart.REQUIRED, + vtag=URLPart.EXCLUDE, + ) diff --git a/packages/settings-library/src/settings_library/s3.py b/packages/settings-library/src/settings_library/s3.py index cb527bfd6ec..348e1dcb39d 100644 --- a/packages/settings-library/src/settings_library/s3.py +++ b/packages/settings-library/src/settings_library/s3.py @@ -1,23 +1,39 @@ -from typing import Optional +from typing import Annotated -from pydantic import validator +from pydantic import AnyHttpUrl, Field +from pydantic_settings import SettingsConfigDict from .base import BaseCustomSettings +from .basic_types import IDStr class S3Settings(BaseCustomSettings): - S3_SECURE: bool = False - S3_ENDPOINT: str - S3_ACCESS_KEY: str - S3_SECRET_KEY: str - S3_ACCESS_TOKEN: Optional[str] = None - S3_BUCKET_NAME: str - S3_REGION: str = "us-east-1" + S3_ACCESS_KEY: IDStr + S3_BUCKET_NAME: IDStr + S3_ENDPOINT: Annotated[ + AnyHttpUrl | None, Field(description="do not define if using standard AWS") + ] = None + S3_REGION: IDStr + S3_SECRET_KEY: IDStr - @validator("S3_ENDPOINT", pre=True) - @classmethod - def ensure_scheme(cls, v: str, values) -> str: - if not v.startswith("http"): - scheme = "https" if values.get("S3_SECURE") else "http" - return f"{scheme}://{v}" - return v + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + { + # non AWS use-case + "S3_ACCESS_KEY": "my_access_key_id", + "S3_BUCKET_NAME": "some-s3-bucket", + "S3_ENDPOINT": "https://non-aws-s3_endpoint.com", + "S3_REGION": "us-east-1", + "S3_SECRET_KEY": "my_secret_access_key", + }, + { + # AWS use-case + "S3_ACCESS_KEY": "my_access_key_id", + "S3_BUCKET_NAME": "some-s3-bucket", + "S3_REGION": "us-east-2", + "S3_SECRET_KEY": "my_secret_access_key", + }, + ], + } + ) diff --git a/packages/settings-library/src/settings_library/ssm.py b/packages/settings-library/src/settings_library/ssm.py new file mode 100644 index 00000000000..44546e4fdd7 --- /dev/null +++ b/packages/settings-library/src/settings_library/ssm.py @@ -0,0 +1,31 @@ +from typing import Annotated + +from pydantic import BeforeValidator, Field, SecretStr +from pydantic_settings import SettingsConfigDict + +from .base import BaseCustomSettings +from .utils_validators import validate_nullable_url + + +class SSMSettings(BaseCustomSettings): + SSM_ACCESS_KEY_ID: SecretStr + SSM_ENDPOINT: Annotated[ + str | None, + BeforeValidator(validate_nullable_url), + Field(description="do not define if using standard AWS"), + ] = None + SSM_REGION_NAME: str = "us-east-1" + SSM_SECRET_ACCESS_KEY: SecretStr + + model_config = SettingsConfigDict( + json_schema_extra={ + "examples": [ + { + "SSM_ACCESS_KEY_ID": "my_access_key_id", + "SSM_ENDPOINT": "https://my_ssm_endpoint.com", + "SSM_REGION_NAME": "us-east-1", + "SSM_SECRET_ACCESS_KEY": "my_secret_access_key", + } + ], + } + ) diff --git a/packages/settings-library/src/settings_library/storage.py b/packages/settings-library/src/settings_library/storage.py index d90f1f69685..00ef1987037 100644 --- a/packages/settings-library/src/settings_library/storage.py +++ b/packages/settings-library/src/settings_library/storage.py @@ -14,11 +14,20 @@ class StorageSettings(BaseCustomSettings, MixinServiceSettings): STORAGE_PORT: PortInt = DEFAULT_AIOHTTP_PORT STORAGE_VTAG: VersionTag = "v0" + @cached_property + def base_url(self) -> str: + # e.g. http://storage:8000 + return self._compose_url( + prefix="STORAGE", + port=URLPart.REQUIRED, + vtag=URLPart.EXCLUDE, + ) + @cached_property def api_base_url(self) -> str: - # http://storage:8080/v0 + # e.g. http://storage:8080/v0 return self._compose_url( - prefix="storage", + prefix="STORAGE", port=URLPart.REQUIRED, vtag=URLPart.REQUIRED, ) diff --git a/packages/settings-library/src/settings_library/tracing.py b/packages/settings-library/src/settings_library/tracing.py index e0c5e5f9201..c8263dd8be2 100644 --- a/packages/settings-library/src/settings_library/tracing.py +++ b/packages/settings-library/src/settings_library/tracing.py @@ -1,4 +1,7 @@ +from typing import Annotated + from pydantic import AnyUrl, Field +from settings_library.basic_types import RegisteredPortInt from .base import BaseCustomSettings @@ -6,15 +9,9 @@ class TracingSettings(BaseCustomSettings): - TRACING_ZIPKIN_ENDPOINT: AnyUrl = Field( - "http://jaeger:9411", description="Zipkin compatible endpoint" - ) - TRACING_THRIFT_COMPACT_ENDPOINT: AnyUrl = Field( - "http://jaeger:5775", - description="accept zipkin.thrift over compact thrift protocol (deprecated, used by legacy clients only)", - ) - TRACING_CLIENT_NAME: str = Field( - UNDEFINED_CLIENT_NAME, - description="Name of the application connecting the tracing service", - env=["HOST", "HOSTNAME", "TRACING_CLIENT_NAME"], - ) + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: Annotated[ + AnyUrl, Field(description="Opentelemetry compatible collector endpoint") + ] + TRACING_OPENTELEMETRY_COLLECTOR_PORT: Annotated[ + RegisteredPortInt, Field(description="Opentelemetry compatible collector port") + ] diff --git a/packages/settings-library/src/settings_library/twilio.py b/packages/settings-library/src/settings_library/twilio.py index a6f3f92fabd..93ebc753fec 100644 --- a/packages/settings-library/src/settings_library/twilio.py +++ b/packages/settings-library/src/settings_library/twilio.py @@ -5,24 +5,41 @@ SEE https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them """ +from typing import Annotated, TypeAlias -from pydantic import Field, constr +from pydantic import BeforeValidator, Field, StringConstraints, TypeAdapter from .base import BaseCustomSettings -# Based on https://countrycode.org/ -CountryCodeStr = constr(strip_whitespace=True, regex=r"^\d{1,4}") +CountryCodeStr: TypeAlias = Annotated[ + str, + BeforeValidator(str), + # Based on https://countrycode.org/ + StringConstraints(strip_whitespace=True, pattern=r"^\d{1,4}"), +] class TwilioSettings(BaseCustomSettings): - TWILIO_ACCOUNT_SID: str = Field(..., description="Twilio account String Identifier") - TWILIO_AUTH_TOKEN: str = Field(..., description="API tokens") - TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT: list[CountryCodeStr] = Field( + TWILIO_ACCOUNT_SID: Annotated[ + str, + Field(description="Twilio account String Identifier"), + ] + + TWILIO_AUTH_TOKEN: Annotated[ + str, + Field(description="API tokens"), + ] + + TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT: Annotated[ + list[CountryCodeStr], + Field( + description="list of country-codes supporting/registered for alphanumeric sender ID" + "See https://support.twilio.com/hc/en-us/articles/223133767-International-support-for-Alphanumeric-Sender-ID", + ), + ] = TypeAdapter(list[CountryCodeStr]).validate_python( [ "41", ], - description="list of country-codes supporting/registered for alphanumeric sender ID" - "See https://support.twilio.com/hc/en-us/articles/223133767-International-support-for-Alphanumeric-Sender-ID", ) def is_alphanumeric_supported(self, phone_number: str) -> bool: @@ -34,5 +51,5 @@ def is_alphanumeric_supported(self, phone_number: str) -> bool: ) return any( phone_number_wo_international_code.startswith(code) - for code in self.TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT + for code in self.TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT # pylint:disable=not-an-iterable ) diff --git a/packages/settings-library/src/settings_library/utils_cli.py b/packages/settings-library/src/settings_library/utils_cli.py index 7395cbee537..106b1d6fb74 100644 --- a/packages/settings-library/src/settings_library/utils_cli.py +++ b/packages/settings-library/src/settings_library/utils_cli.py @@ -1,13 +1,17 @@ import logging import os -from functools import partial +from collections.abc import Callable +from enum import Enum from pprint import pformat -from typing import Any, Callable, Optional +from typing import Any +import rich import typer -from pydantic import BaseModel, SecretStr, ValidationError -from pydantic.env_settings import BaseSettings -from pydantic.json import custom_pydantic_encoder +from common_library.json_serialization import json_dumps +from common_library.serialization import model_dump_with_secrets +from pydantic import ValidationError +from pydantic_core import to_jsonable_python +from pydantic_settings import BaseSettings from ._constants import HEADER_STR from .base import BaseCustomSettings @@ -23,14 +27,15 @@ def print_as_envfile( ): exclude_unset = pydantic_export_options.get("exclude_unset", False) - for field in settings_obj.__fields__.values(): - auto_default_from_env = field.field_info.extra.get( - "auto_default_from_env", False + for name, field in settings_obj.model_fields.items(): + auto_default_from_env = ( + field.json_schema_extra is not None + and field.json_schema_extra.get("auto_default_from_env", False) ) - value = getattr(settings_obj, field.name) + value = getattr(settings_obj, name) - if exclude_unset and field.name not in settings_obj.__fields_set__: + if exclude_unset and name not in settings_obj.model_fields_set: if not auto_default_from_env: continue if value is None: @@ -38,10 +43,14 @@ def print_as_envfile( if isinstance(value, BaseSettings): if compact: - value = f"'{value.json(**pydantic_export_options)}'" # flat + value = json_dumps( + model_dump_with_secrets( + value, show_secrets=show_secrets, **pydantic_export_options + ) + ) # flat else: if verbose: - typer.echo(f"\n# --- {field.name} --- ") + typer.echo(f"\n# --- {name} --- ") print_as_envfile( value, compact=False, @@ -53,34 +62,33 @@ def print_as_envfile( elif show_secrets and hasattr(value, "get_secret_value"): value = value.get_secret_value() - if verbose: - field_info = field.field_info - if field_info.description: - typer.echo(f"# {field_info.description}") + if verbose and field.description: + typer.echo(f"# {field.description}") + if isinstance(value, Enum): + value = value.value + typer.echo(f"{name}={value}") - typer.echo(f"{field.name}={value}") - -def print_as_json(settings_obj, *, compact=False, **pydantic_export_options): +def _print_as_json( + settings_obj, + *, + compact: bool = False, + show_secrets: bool, + **pydantic_export_options, +): typer.echo( - settings_obj.json(indent=None if compact else 2, **pydantic_export_options) - ) - - -def create_json_encoder_wo_secrets(model_cls: type[BaseModel]): - current_encoders = getattr(model_cls.Config, "json_encoders", {}) - encoder = partial( - custom_pydantic_encoder, - { - SecretStr: lambda v: v.get_secret_value(), - **current_encoders, - }, + json_dumps( + model_dump_with_secrets( + settings_obj, show_secrets=show_secrets, **pydantic_export_options + ), + indent=None if compact else 2, + ) ) - return encoder def create_settings_command( - settings_cls: type[BaseCustomSettings], logger: Optional[logging.Logger] = None + settings_cls: type[BaseCustomSettings], + logger: logging.Logger | None = None, ) -> Callable: """Creates typer command function for settings""" @@ -105,18 +113,27 @@ def settings( """Resolves settings and prints envfile""" if as_json_schema: - typer.echo(settings_cls.schema_json(indent=0 if compact else 2)) + typer.echo( + json_dumps( + settings_cls.model_json_schema(), + default=to_jsonable_python, + indent=0 if compact else 2, + ) + ) return try: - settings_obj = settings_cls.create_from_envs() except ValidationError as err: - settings_schema = settings_cls.schema_json(indent=2) + settings_schema = json_dumps( + settings_cls.model_json_schema(), + default=to_jsonable_python, + indent=2, + ) assert logger is not None # nosec - logger.error( + logger.error( # noqa: TRY400 "Invalid settings. " "Typically this is due to an environment variable missing or misspelled :\n%s", "\n".join( @@ -140,14 +157,14 @@ def settings( raise pydantic_export_options: dict[str, Any] = {"exclude_unset": exclude_unset} - if show_secrets: - # NOTE: this option is for json-only - pydantic_export_options["encoder"] = create_json_encoder_wo_secrets( - settings_cls - ) if as_json: - print_as_json(settings_obj, compact=compact, **pydantic_export_options) + _print_as_json( + settings_obj, + compact=compact, + show_secrets=show_secrets, + **pydantic_export_options, + ) else: print_as_envfile( settings_obj, @@ -158,3 +175,27 @@ def settings( ) return settings + + +def create_version_callback(application_version: str) -> Callable: + def _version_callback(value: bool): # noqa: FBT001 + if value: + rich.print(application_version) + raise typer.Exit + + def version( + ctx: typer.Context, + *, + version: bool = ( # noqa: ARG001 # pylint: disable=unused-argument + typer.Option( + None, + "--version", + callback=_version_callback, + is_eager=True, + ) + ), + ): + """current version""" + assert ctx # nosec + + return version diff --git a/packages/settings-library/src/settings_library/utils_logging.py b/packages/settings-library/src/settings_library/utils_logging.py index 1334f35cec3..7ca3ee05b01 100644 --- a/packages/settings-library/src/settings_library/utils_logging.py +++ b/packages/settings-library/src/settings_library/utils_logging.py @@ -1,6 +1,8 @@ import logging from functools import cached_property +from common_library.basic_types import LogLevel + class MixinLoggingSettings: """ @@ -8,13 +10,14 @@ class MixinLoggingSettings: """ @classmethod - def validate_log_level(cls, value: str) -> str: + def validate_log_level(cls, value: str) -> LogLevel: """Standard implementation for @validator("LOG_LEVEL")""" try: getattr(logging, value.upper()) except AttributeError as err: - raise ValueError(f"{value.upper()} is not a valid level") from err - return value.upper() + msg = f"{value.upper()} is not a valid level" + raise ValueError(msg) from err + return LogLevel(value.upper()) @cached_property def log_level(self) -> int: diff --git a/packages/settings-library/src/settings_library/utils_r_clone.py b/packages/settings-library/src/settings_library/utils_r_clone.py index e13aed60425..cda4f878ad5 100644 --- a/packages/settings-library/src/settings_library/utils_r_clone.py +++ b/packages/settings-library/src/settings_library/utils_r_clone.py @@ -4,7 +4,7 @@ from .r_clone import RCloneSettings, S3Provider -_COMMON_ENTRIES: dict[str, str] = { +_COMMON_SETTINGS_OPTIONS: dict[str, str] = { "type": "s3", "access_key_id": "{access_key}", "secret_access_key": "{secret_key}", @@ -12,7 +12,7 @@ "acl": "private", } -_PROVIDER_ENTRIES: dict[S3Provider, dict[str, str]] = { +_PROVIDER_SETTINGS_OPTIONS: dict[S3Provider, dict[str, str]] = { # NOTE: # AWS_SESSION_TOKEN should be required for STS S3Provider.AWS: {"provider": "AWS"}, S3Provider.CEPH: {"provider": "Ceph", "endpoint": "{endpoint}"}, @@ -20,31 +20,42 @@ } -def _format_config(entries: dict[str, str]) -> str: +def _format_config(settings_options: dict[str, str], s3_config_key: str) -> str: config = configparser.ConfigParser() - config["dst"] = entries + config[s3_config_key] = settings_options with StringIO() as string_io: config.write(string_io) string_io.seek(0) return string_io.read() -def get_r_clone_config(r_clone_settings: RCloneSettings) -> str: - provider = r_clone_settings.R_CLONE_PROVIDER - entries = deepcopy(_COMMON_ENTRIES) - entries.update(_PROVIDER_ENTRIES[provider]) +def get_r_clone_config(r_clone_settings: RCloneSettings, *, s3_config_key: str) -> str: + """ + Arguments: + r_clone_settings -- current rclone configuration + s3_config_key -- used by the cli to reference the rclone configuration + it is used to make the cli command more readable - r_clone_config_template = _format_config(entries=entries) + Returns: + stringified *.ini rclone configuration + """ + settings_options: dict[str, str] = deepcopy(_COMMON_SETTINGS_OPTIONS) + settings_options.update( + _PROVIDER_SETTINGS_OPTIONS[r_clone_settings.R_CLONE_PROVIDER] + ) + + r_clone_config_template = _format_config( + settings_options=settings_options, s3_config_key=s3_config_key + ) # replace entries in template - r_clone_config = r_clone_config_template.format( + return r_clone_config_template.format( endpoint=r_clone_settings.R_CLONE_S3.S3_ENDPOINT, access_key=r_clone_settings.R_CLONE_S3.S3_ACCESS_KEY, secret_key=r_clone_settings.R_CLONE_S3.S3_SECRET_KEY, aws_region=r_clone_settings.R_CLONE_S3.S3_REGION, ) - return r_clone_config def resolve_provider(s3_provider: S3Provider) -> str: - return _PROVIDER_ENTRIES[s3_provider]["provider"] + return _PROVIDER_SETTINGS_OPTIONS[s3_provider]["provider"] diff --git a/packages/settings-library/src/settings_library/utils_service.py b/packages/settings-library/src/settings_library/utils_service.py index 8766c2e58a0..8c23df45a55 100644 --- a/packages/settings-library/src/settings_library/utils_service.py +++ b/packages/settings-library/src/settings_library/utils_service.py @@ -3,7 +3,6 @@ """ from enum import Enum, auto -from typing import Optional from pydantic.networks import AnyUrl from pydantic.types import SecretStr @@ -52,21 +51,31 @@ class MyServiceSettings(BaseCustomSettings, MixinServiceSettings): # base_url -> http://user:pass@example.com:8042 # api_base -> http://user:pass@example.com:8042/v0 - def _safe_getattr(self, key, req: URLPart, default=None) -> Optional[str]: - # TODO: convert AttributeError in ValidationError field required + def _safe_getattr( + self, key: str, req: URLPart, default: str | None = None + ) -> str | None: + """ - if req == URLPart.EXCLUDE: - return None + Raises: + AttributeError - if req == URLPart.REQUIRED: - # raise AttributeError - return getattr(self, key) + """ + result: str | None = None + match req: + case URLPart.EXCLUDE: + result = None - if req == URLPart.OPTIONAL: - # return default if fails - return getattr(self, key, default) + case URLPart.REQUIRED: + # raises AttributeError upon failure + required_value: str = getattr(self, key) + result = required_value - return None + case URLPart.OPTIONAL: + # returns default upon failure + optional_value: str | None = getattr(self, key, default) + result = optional_value + + return result def _compose_url( self, @@ -77,34 +86,50 @@ def _compose_url( port: URLPart = URLPart.EXCLUDE, vtag: URLPart = URLPart.EXCLUDE, ) -> str: + """ + + Raises: + AttributeError + + """ assert prefix # nosec prefix = prefix.upper() - parts = dict( - scheme=self._safe_getattr(f"{prefix}_SCHEME", URLPart.OPTIONAL, "http"), - host=self._safe_getattr(f"{prefix}_HOST", URLPart.REQUIRED), - user=self._safe_getattr(f"{prefix}_USER", user), - password=self._safe_getattr(f"{prefix}_PASSWORD", password), - port=self._safe_getattr(f"{prefix}_PORT", port), - ) - - if vtag != URLPart.EXCLUDE: + port_value = self._safe_getattr(f"{prefix}_PORT", port) + + parts = { + "scheme": ( + "https" + if self._safe_getattr(f"{prefix}_SECURE", URLPart.OPTIONAL) + else "http" + ), + "host": self._safe_getattr(f"{prefix}_HOST", URLPart.REQUIRED), + "port": int(port_value) if port_value is not None else None, + "username": self._safe_getattr(f"{prefix}_USER", user), + "password": self._safe_getattr(f"{prefix}_PASSWORD", password), + } + + if vtag != URLPart.EXCLUDE: # noqa: SIM102 if v := self._safe_getattr(f"{prefix}_VTAG", vtag): - parts["path"] = f"/{v}" + parts["path"] = f"{v}" - # postprocess parts dict + # post process parts dict kwargs = {} - for k, v in parts.items(): + for k, v in parts.items(): # type: ignore[assignment] if isinstance(v, SecretStr): - v = v.get_secret_value() - elif v is not None: - v = f"{v}" + value = v.get_secret_value() + else: + value = v - kwargs[k] = v + if value is not None: + kwargs[k] = value - assert all(isinstance(v, str) or v is None for v in kwargs.values()) # nosec + assert all( + isinstance(v, (str, int)) or v is None for v in kwargs.values() + ) # nosec - return AnyUrl.build(**kwargs) + composed_url: str = str(AnyUrl.build(**kwargs)) # type: ignore[arg-type] # pylint: disable=missing-kwoa + return composed_url.rstrip("/") def _build_api_base_url(self, *, prefix: str) -> str: return self._compose_url( diff --git a/packages/settings-library/src/settings_library/utils_session.py b/packages/settings-library/src/settings_library/utils_session.py index 650dd2ef2fd..3c78f7efa84 100644 --- a/packages/settings-library/src/settings_library/utils_session.py +++ b/packages/settings-library/src/settings_library/utils_session.py @@ -1,5 +1,9 @@ import base64 import binascii +from typing import Final + +DEFAULT_SESSION_COOKIE_NAME: Final[str] = "osparc-sc2" +_32_BYTES_LENGTH: Final[int] = 32 class MixinSessionSettings: @@ -13,10 +17,12 @@ def do_check_valid_fernet_key(cls, v): # NOTE: len(v) cannot be 1 more than a multiple of 4 key_b64decode = base64.urlsafe_b64decode(value) except binascii.Error as exc: - raise ValueError(f"Invalid session key {value=}: {exc}") from exc - if len(key_b64decode) != 32: - raise ValueError( + msg = f"Invalid session key {value=}: {exc}" + raise ValueError(msg) from exc + if len(key_b64decode) != _32_BYTES_LENGTH: + msg = ( f"Invalid session secret {value=} must be 32 url-safe base64-encoded bytes, got {len(key_b64decode)=}." 'TIP: create new key with python3 -c "from cryptography.fernet import *; print(Fernet.generate_key())"' ) + raise ValueError(msg) return v diff --git a/packages/settings-library/src/settings_library/utils_validators.py b/packages/settings-library/src/settings_library/utils_validators.py new file mode 100644 index 00000000000..c26c09fa7c8 --- /dev/null +++ b/packages/settings-library/src/settings_library/utils_validators.py @@ -0,0 +1,9 @@ +from pydantic import AnyHttpUrl, TypeAdapter + +ANY_HTTP_URL_ADAPTER: TypeAdapter = TypeAdapter(AnyHttpUrl) + + +def validate_nullable_url(value: str | None) -> str | None: + if value is not None: + return str(ANY_HTTP_URL_ADAPTER.validate_python(value)) + return value diff --git a/packages/settings-library/src/settings_library/webserver.py b/packages/settings-library/src/settings_library/webserver.py new file mode 100644 index 00000000000..c32bdbeb0c5 --- /dev/null +++ b/packages/settings-library/src/settings_library/webserver.py @@ -0,0 +1,30 @@ +from functools import cached_property + +from .base import BaseCustomSettings +from .basic_types import PortInt, VersionTag +from .utils_service import DEFAULT_AIOHTTP_PORT, MixinServiceSettings, URLPart + + +class WebServerSettings(BaseCustomSettings, MixinServiceSettings): + WEBSERVER_HOST: str = "webserver" + WEBSERVER_PORT: PortInt = DEFAULT_AIOHTTP_PORT + WEBSERVER_VTAG: VersionTag = "v0" + + @cached_property + def base_url(self) -> str: + # e.g. http://webserver:8080/ + url_without_vtag: str = self._compose_url( + prefix="WEBSERVER", + port=URLPart.REQUIRED, + ) + return url_without_vtag + + @cached_property + def api_base_url(self) -> str: + # e.g. http://webserver:8080/v0 + url_with_vtag: str = self._compose_url( + prefix="WEBSERVER", + port=URLPart.REQUIRED, + vtag=URLPart.REQUIRED, + ) + return url_with_vtag diff --git a/packages/settings-library/tests/conftest.py b/packages/settings-library/tests/conftest.py index d6c9e524f89..0431a6c6748 100644 --- a/packages/settings-library/tests/conftest.py +++ b/packages/settings-library/tests/conftest.py @@ -4,7 +4,6 @@ import sys from pathlib import Path -from typing import Optional import pytest import settings_library @@ -97,13 +96,13 @@ class _ApplicationSettings(BaseCustomSettings): # NOTE: by convention, an addon is disabled when APP_ADDON=None, so we make this # entry nullable as well - APP_OPTIONAL_ADDON: Optional[_ModuleSettings] = Field( - auto_default_from_env=True + APP_OPTIONAL_ADDON: _ModuleSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} ) # NOTE: example of a group that cannot be disabled (not nullable) - APP_REQUIRED_PLUGIN: Optional[PostgresSettings] = Field( - auto_default_from_env=True + APP_REQUIRED_PLUGIN: PostgresSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} ) return _ApplicationSettings diff --git a/packages/settings-library/tests/test__models_examples.py b/packages/settings-library/tests/test__models_examples.py new file mode 100644 index 00000000000..c93ed934cf1 --- /dev/null +++ b/packages/settings-library/tests/test__models_examples.py @@ -0,0 +1,22 @@ +from typing import Any + +import pytest +import settings_library +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(settings_library), +) +def test_all_settings_library_models_config_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/packages/settings-library/tests/test__pydantic_settings.py b/packages/settings-library/tests/test__pydantic_settings.py index 5b46e05fd25..eb2989852cb 100644 --- a/packages/settings-library/tests/test__pydantic_settings.py +++ b/packages/settings-library/tests/test__pydantic_settings.py @@ -12,48 +12,63 @@ """ -from typing import Optional +from typing import Annotated -from pydantic import BaseSettings, validator -from pydantic.fields import ModelField, Undefined +import pytest +from common_library.basic_types import LogLevel +from common_library.pydantic_fields_extension import is_nullable +from pydantic import AliasChoices, Field, ValidationInfo, field_validator +from pydantic_core import PydanticUndefined +from pydantic_settings import BaseSettings +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from settings_library.application import BaseApplicationSettings def assert_field_specs( - model_cls, name, is_required, is_nullable, explicit_default, defaults + model_cls: type[BaseSettings], + name: str, + required: bool, + nullable: bool, + explicit_default, ): - field: ModelField = model_cls.__fields__[name] - print(field, field.field_info) + info = model_cls.model_fields[name] + print(info) - assert field.required == is_required - assert field.allow_none == is_nullable - assert field.field_info.default == explicit_default + assert info.is_required() == required + assert is_nullable(info) == nullable - assert field.default == defaults - if field.required: + if info.is_required(): # in this case, default is not really used - assert field.default is None + assert info.default is PydanticUndefined + else: + assert info.default == explicit_default class Settings(BaseSettings): VALUE: int VALUE_DEFAULT: int = 42 - VALUE_NULLABLE_REQUIRED: Optional[int] = ... # type: ignore - VALUE_NULLABLE_OPTIONAL: Optional[int] + VALUE_NULLABLE_REQUIRED: int | None = ... # type: ignore[assignment] + VALUE_NULLABLE_REQUIRED_AS_WELL: int | None - VALUE_NULLABLE_DEFAULT_VALUE: Optional[int] = 42 - VALUE_NULLABLE_DEFAULT_NULL: Optional[int] = None + VALUE_NULLABLE_DEFAULT_VALUE: int | None = 42 + VALUE_NULLABLE_DEFAULT_NULL: int | None = None # Other ways to write down "required" is using ... - VALUE_ALSO_REQUIRED: int = ... # type: ignore + VALUE_REQUIRED_AS_WELL: int = ... # type: ignore[assignment] - @validator("*", pre=True) + @field_validator("*", mode="before") @classmethod - def parse_none(cls, v, values, field: ModelField): + def parse_none(cls, v, info: ValidationInfo): # WARNING: In nullable fields, envs equal to null or none are parsed as None !! - if field.allow_none: - if isinstance(v, str) and v.lower() in ("null", "none"): - return None + + if ( + info.field_name + and is_nullable(dict(cls.model_fields)[info.field_name]) + and isinstance(v, str) + and v.lower() in ("null", "none") + ): + return None return v @@ -65,37 +80,33 @@ def test_fields_declarations(): assert_field_specs( Settings, "VALUE", - is_required=True, - is_nullable=False, - explicit_default=Undefined, - defaults=None, + required=True, + nullable=False, + explicit_default=PydanticUndefined, ) assert_field_specs( Settings, "VALUE_DEFAULT", - is_required=False, - is_nullable=False, + required=False, + nullable=False, explicit_default=42, - defaults=42, ) assert_field_specs( Settings, "VALUE_NULLABLE_REQUIRED", - is_required=True, - is_nullable=True, + required=True, + nullable=True, explicit_default=Ellipsis, - defaults=None, ) assert_field_specs( Settings, - "VALUE_NULLABLE_OPTIONAL", - is_required=False, - is_nullable=True, - explicit_default=Undefined, # <- difference wrt VALUE_NULLABLE_DEFAULT_NULL - defaults=None, + "VALUE_NULLABLE_REQUIRED_AS_WELL", + required=True, + nullable=True, + explicit_default=PydanticUndefined, # <- difference wrt VALUE_NULLABLE_DEFAULT_NULL ) # VALUE_NULLABLE_OPTIONAL interpretation has always been confusing @@ -105,54 +116,85 @@ def test_fields_declarations(): assert_field_specs( Settings, "VALUE_NULLABLE_DEFAULT_VALUE", - is_required=False, - is_nullable=True, + required=False, + nullable=True, explicit_default=42, - defaults=42, ) assert_field_specs( Settings, "VALUE_NULLABLE_DEFAULT_NULL", - is_required=False, - is_nullable=True, - explicit_default=None, # <- difference wrt VALUE_NULLABLE_OPTIONAL - defaults=None, + required=False, + nullable=True, + explicit_default=None, ) assert_field_specs( Settings, - "VALUE_ALSO_REQUIRED", - is_required=True, - is_nullable=False, + "VALUE_REQUIRED_AS_WELL", + required=True, + nullable=False, explicit_default=Ellipsis, - defaults=None, ) def test_construct(monkeypatch): # from __init__ settings_from_init = Settings( - VALUE=1, VALUE_ALSO_REQUIRED=10, VALUE_NULLABLE_REQUIRED=None + VALUE=1, + VALUE_NULLABLE_REQUIRED=None, + VALUE_NULLABLE_REQUIRED_AS_WELL=None, + VALUE_REQUIRED_AS_WELL=32, ) - print(settings_from_init.json(exclude_unset=True, indent=1)) + + print(settings_from_init.model_dump_json(exclude_unset=True, indent=1)) # from env vars - monkeypatch.setenv("VALUE", "1") - monkeypatch.setenv("VALUE_ALSO_REQUIRED", "10") - monkeypatch.setenv( - "VALUE_NULLABLE_REQUIRED", "null" + setenvs_from_dict( + monkeypatch, + { + "VALUE": "1", + "VALUE_ALSO_REQUIRED": "10", + "VALUE_NULLABLE_REQUIRED": "null", + "VALUE_NULLABLE_REQUIRED_AS_WELL": "null", + "VALUE_REQUIRED_AS_WELL": "32", + }, ) # WARNING: set this env to None would not work w/o ``parse_none`` validator! bug??? - settings_from_env = Settings() - print(settings_from_env.json(exclude_unset=True, indent=1)) + settings_from_env = Settings() # type: ignore[call-arg] + print(settings_from_env.model_dump_json(exclude_unset=True, indent=1)) assert settings_from_init == settings_from_env # mixed - settings_from_both = Settings(VALUE_NULLABLE_REQUIRED=3) - print(settings_from_both.json(exclude_unset=True, indent=1)) + settings_from_both = Settings(VALUE_NULLABLE_REQUIRED=3) # type: ignore[call-arg] + print(settings_from_both.model_dump_json(exclude_unset=True, indent=1)) - assert settings_from_both == settings_from_init.copy( + assert settings_from_both == settings_from_init.model_copy( update={"VALUE_NULLABLE_REQUIRED": 3} ) + + +class _TestSettings(BaseApplicationSettings): + APP_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices("APP_LOGLEVEL", "LOG_LEVEL"), + ), + ] = LogLevel.WARNING + + +@pytest.mark.filterwarnings("error") +def test_pydantic_serialization_user_warning(monkeypatch: pytest.MonkeyPatch): + # This test is exploring the reason for `UserWarning` + # + # /python3.11/site-packages/pydantic/main.py:477: UserWarning: Pydantic serializer warnings: + # Expected `enum` but got `str` with value `'WARNING'` - serialized value may not be as expected + # return self.__pydantic_serializer__.to_json( + # + # NOTE: it seems settings.model_dump_json(warnings='none') is not the cause here of `UserWarning` + monkeypatch.setenv("LOG_LEVEL", "DEBUG") + + settings = _TestSettings.create_from_envs() + assert settings.APP_LOGLEVEL == LogLevel.DEBUG + assert settings.model_dump_json(indent=2) diff --git a/packages/settings-library/tests/test_application.py b/packages/settings-library/tests/test_application.py new file mode 100644 index 00000000000..8f847e5dd24 --- /dev/null +++ b/packages/settings-library/tests/test_application.py @@ -0,0 +1,49 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.application import BaseApplicationSettings + + +@pytest.fixture +def envs_from_docker_inspect() -> EnvVarsDict: + # docker image inspect local/storage:development | jq ".[0].Config.Env" + envs = [ + "PATH=/home/scu/.venv/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "LANG=C.UTF-8", + "GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D", + "PYTHON_VERSION=3.10.14", + "PYTHON_PIP_VERSION=22.3.1", + "PYTHON_SETUPTOOLS_VERSION=65.5.1", + "PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/d5cb0afaf23b8520f1bbcfed521017b4a95f5c01/public/get-pip.py", + "PYTHON_GET_PIP_SHA256=394be00f13fa1b9aaa47e911bdb59a09c3b2986472130f30aa0bfaf7f3980637", + "SC_USER_ID=8004", + "SC_USER_NAME=scu", + "SC_BUILD_TARGET=development", + "SC_BOOT_MODE=default", + "PYTHONDONTWRITEBYTECODE=1", + "VIRTUAL_ENV=/home/scu/.venv", + "SC_DEVEL_MOUNT=/devel/services/storage/", + ] + return EnvVarsDict(env.split("=") for env in envs) + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, envs_from_docker_inspect: EnvVarsDict +) -> EnvVarsDict: + return setenvs_from_dict(monkeypatch, envs_from_docker_inspect) + + +def test_applicaton_settings(app_environment: EnvVarsDict): + + # should not raise + settings = BaseApplicationSettings.create_from_envs() + + # some check + assert int(app_environment["SC_USER_ID"]) == settings.SC_USER_ID diff --git a/packages/settings-library/tests/test_base.py b/packages/settings-library/tests/test_base.py index 164cc0e2c9a..d4ebd987760 100644 --- a/packages/settings-library/tests/test_base.py +++ b/packages/settings-library/tests/test_base.py @@ -2,22 +2,29 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable # pylint: disable=too-many-arguments +# pylint: disable=protected-access import json -from typing import Any, Callable, Optional +from collections.abc import Callable +from typing import Any +import pydantic import pytest import settings_library.base from pydantic import BaseModel, ValidationError from pydantic.fields import Field -from pytest import MonkeyPatch +from pydantic_settings import BaseSettings, SettingsConfigDict from pytest_mock import MockerFixture -from pytest_simcore.helpers.utils_envs import setenvs_from_envfile +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_envfile +from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.base import ( - _DEFAULTS_TO_NONE_MSG, + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING, BaseCustomSettings, DefaultFromEnvFactoryError, ) +from settings_library.email import SMTPSettings + +pydantic_version = ".".join(pydantic.__version__.split(".")[:2]) S2 = json.dumps({"S_VALUE": 2}) S3 = json.dumps({"S_VALUE": 3}) @@ -37,17 +44,17 @@ def _get_attrs_tree(obj: Any) -> dict[str, Any]: def _print_defaults(model_cls: type[BaseModel]): - for field in model_cls.__fields__.values(): - print(field.name, ":", end="") + for name, field in model_cls.model_fields.items(): + print(name, ":", end="") try: - default = field.get_default() + default = field.get_default(call_default_factory=True) # new in Pydatic v2 print(default, type(default)) except ValidationError as err: print(err) def _dumps_model_class(model_cls: type[BaseModel]): - d = {field.name: _get_attrs_tree(field) for field in model_cls.__fields__.values()} + d = {name: _get_attrs_tree(field) for name, field in model_cls.model_fields.items()} return json.dumps(d, indent=1) @@ -60,16 +67,19 @@ class S(BaseCustomSettings): class M1(BaseCustomSettings): VALUE: S VALUE_DEFAULT: S = S(S_VALUE=42) - VALUE_CONFUSING: S = None # type: ignore + # VALUE_CONFUSING: S = None # type: ignore - VALUE_NULLABLE_REQUIRED: Optional[S] = ... # type: ignore - VALUE_NULLABLE_OPTIONAL: Optional[S] + VALUE_NULLABLE_REQUIRED: S | None = ... # type: ignore - VALUE_NULLABLE_DEFAULT_VALUE: Optional[S] = S(S_VALUE=42) - VALUE_NULLABLE_DEFAULT_NULL: Optional[S] = None + VALUE_NULLABLE_DEFAULT_VALUE: S | None = S(S_VALUE=42) + VALUE_NULLABLE_DEFAULT_NULL: S | None = None - VALUE_NULLABLE_DEFAULT_ENV: Optional[S] = Field(auto_default_from_env=True) - VALUE_DEFAULT_ENV: S = Field(auto_default_from_env=True) + VALUE_NULLABLE_DEFAULT_ENV: S | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + VALUE_DEFAULT_ENV: S = Field( + json_schema_extra={"auto_default_from_env": True} + ) class M2(BaseCustomSettings): # @@ -78,13 +88,17 @@ class M2(BaseCustomSettings): # # defaults disabled but only explicit enabled - VALUE_NULLABLE_DEFAULT_NULL: Optional[S] = None + VALUE_NULLABLE_DEFAULT_NULL: S | None = None # defaults enabled but if not exists, it disables - VALUE_NULLABLE_DEFAULT_ENV: Optional[S] = Field(auto_default_from_env=True) + VALUE_NULLABLE_DEFAULT_ENV: S | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) # cannot be disabled - VALUE_DEFAULT_ENV: S = Field(auto_default_from_env=True) + VALUE_DEFAULT_ENV: S = Field( + json_schema_extra={"auto_default_from_env": True} + ) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order _classes = {"M1": M1, "M2": M2, "S": S} @@ -100,18 +114,18 @@ def test_create_settings_class( # DEV: Path("M1.ignore.json").write_text(dumps_model_class(M)) - assert M.__fields__["VALUE_NULLABLE_DEFAULT_ENV"].default_factory + assert M.model_fields["VALUE_NULLABLE_DEFAULT_ENV"].default_factory - assert M.__fields__["VALUE_NULLABLE_DEFAULT_ENV"].get_default() == None + assert M.model_fields["VALUE_NULLABLE_DEFAULT_ENV"].get_default() is None - assert M.__fields__["VALUE_DEFAULT_ENV"].default_factory + assert M.model_fields["VALUE_DEFAULT_ENV"].default_factory with pytest.raises(DefaultFromEnvFactoryError): - M.__fields__["VALUE_DEFAULT_ENV"].get_default() + M.model_fields["VALUE_DEFAULT_ENV"].get_default(call_default_factory=True) def test_create_settings_class_with_environment( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, create_settings_class: Callable[[str], type[BaseCustomSettings]], ): # create class within one context @@ -135,20 +149,19 @@ def test_create_settings_class_with_environment( instance = SettingsClass() - print(instance.json(indent=2)) + print(instance.model_dump_json(indent=2)) # checks - assert instance.dict(exclude_unset=True) == { + assert instance.model_dump(exclude_unset=True) == { "VALUE": {"S_VALUE": 2}, "VALUE_NULLABLE_REQUIRED": {"S_VALUE": 3}, } - assert instance.dict() == { + assert instance.model_dump() == { "VALUE": {"S_VALUE": 2}, "VALUE_DEFAULT": {"S_VALUE": 42}, - "VALUE_CONFUSING": None, + # "VALUE_CONFUSING": None, "VALUE_NULLABLE_REQUIRED": {"S_VALUE": 3}, - "VALUE_NULLABLE_OPTIONAL": None, "VALUE_NULLABLE_DEFAULT_VALUE": {"S_VALUE": 42}, "VALUE_NULLABLE_DEFAULT_NULL": None, "VALUE_NULLABLE_DEFAULT_ENV": {"S_VALUE": 1}, @@ -162,18 +175,20 @@ def test_create_settings_class_without_environ_fails( # now defining S_VALUE M2_outside_context = create_settings_class("M2") - with pytest.raises(ValidationError) as err_info: - instance = M2_outside_context.create_from_envs() + with pytest.raises(DefaultFromEnvFactoryError) as err_info: + M2_outside_context.create_from_envs() - assert err_info.value.errors()[0] == { - "loc": ("VALUE_DEFAULT_ENV", "S_VALUE"), - "msg": "field required", - "type": "value_error.missing", + assert err_info.value.errors[0] == { + "input": {}, + "loc": ("S_VALUE",), + "msg": "Field required", + "type": "missing", + "url": f"https://errors.pydantic.dev/{pydantic_version}/v/missing", } def test_create_settings_class_with_environ_passes( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, create_settings_class: Callable[[str], type[BaseCustomSettings]], ): # now defining S_VALUE @@ -195,25 +210,32 @@ def test_auto_default_to_none_logs_a_warning( create_settings_class: Callable[[str], type[BaseCustomSettings]], mocker: MockerFixture, ): - logger_warn = mocker.spy(settings_library.base.logger, "warning") + logger_warn = mocker.spy(settings_library.base._logger, "warning") # noqa: SLF001 S = create_settings_class("S") class SettingsClass(BaseCustomSettings): - VALUE_NULLABLE_DEFAULT_NULL: Optional[S] = None - VALUE_NULLABLE_DEFAULT_ENV: Optional[S] = Field(auto_default_from_env=True) + VALUE_NULLABLE_DEFAULT_NULL: S | None = None + VALUE_NULLABLE_DEFAULT_ENV: S | None = Field( + json_schema_extra={"auto_default_from_env": True}, + ) instance = SettingsClass.create_from_envs() - assert instance.VALUE_NULLABLE_DEFAULT_NULL == None - assert instance.VALUE_NULLABLE_DEFAULT_ENV == None + assert instance.VALUE_NULLABLE_DEFAULT_NULL is None + assert instance.VALUE_NULLABLE_DEFAULT_ENV is None # Defaulting to None also logs a warning assert logger_warn.call_count == 1 - assert _DEFAULTS_TO_NONE_MSG in logger_warn.call_args[0][0] + assert ( + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name="VALUE_NULLABLE_DEFAULT_ENV" + ) + in logger_warn.call_args[0][0] + ) def test_auto_default_to_not_none( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, create_settings_class: Callable[[str], type[BaseCustomSettings]], ): with monkeypatch.context() as patch: @@ -222,15 +244,17 @@ def test_auto_default_to_not_none( S = create_settings_class("S") class SettingsClass(BaseCustomSettings): - VALUE_NULLABLE_DEFAULT_NULL: Optional[S] = None - VALUE_NULLABLE_DEFAULT_ENV: Optional[S] = Field(auto_default_from_env=True) + VALUE_NULLABLE_DEFAULT_NULL: S | None = None + VALUE_NULLABLE_DEFAULT_ENV: S | None = Field( + json_schema_extra={"auto_default_from_env": True}, + ) instance = SettingsClass.create_from_envs() - assert instance.VALUE_NULLABLE_DEFAULT_NULL == None - assert instance.VALUE_NULLABLE_DEFAULT_ENV == S(S_VALUE=123) + assert instance.VALUE_NULLABLE_DEFAULT_NULL is None + assert S(S_VALUE=123) == instance.VALUE_NULLABLE_DEFAULT_ENV -def test_how_settings_parse_null_environs(monkeypatch: MonkeyPatch): +def test_how_settings_parse_null_environs(monkeypatch: pytest.MonkeyPatch): # # We were wondering how nullable fields (i.e. those marked as Optional[.]) can # be defined in the envfile. Here we test different options @@ -260,12 +284,12 @@ def test_how_settings_parse_null_environs(monkeypatch: MonkeyPatch): } class SettingsClass(BaseCustomSettings): - VALUE_TO_NOTHING: Optional[str] - VALUE_TO_WORD_NULL: Optional[str] - VALUE_TO_WORD_NONE: Optional[str] - VALUE_TO_ZERO: Optional[str] + VALUE_TO_NOTHING: str | None + VALUE_TO_WORD_NULL: str | None + VALUE_TO_WORD_NONE: str | None + VALUE_TO_ZERO: str | None - INT_VALUE_TO_ZERO: Optional[int] + INT_VALUE_TO_ZERO: int | None instance = SettingsClass.create_from_envs() @@ -278,14 +302,61 @@ class SettingsClass(BaseCustomSettings): ) class SettingsClassExt(SettingsClass): - INT_VALUE_TO_NOTHING: Optional[int] + INT_VALUE_TO_NOTHING: int | None with pytest.raises(ValidationError) as err_info: SettingsClassExt.create_from_envs() error = err_info.value.errors()[0] assert error == { + "input": "", "loc": ("INT_VALUE_TO_NOTHING",), - "msg": "value is not a valid integer", - "type": "type_error.integer", + "msg": "Input should be a valid integer, unable to parse string as an integer", + "type": "int_parsing", + "url": f"https://errors.pydantic.dev/{pydantic_version}/v/int_parsing", } + + +def test_issubclass_type_error_with_pydantic_models(): + # There is a problem + # + # TypeError: issubclass() arg 1 must be a class + # + # SEE https://github.com/pydantic/pydantic/issues/545 + # + # >> issubclass(dict, BaseSettings) + # False + # >> issubclass(dict[str, str], BaseSettings) + # Traceback (most recent call last): + # File "", line 1, in + # File "/home/crespo/.pyenv/versions/3.10.13/lib/python3.10/abc.py", line 123, in __subclasscheck__ + # return _abc_subclasscheck(cls, subclass) + # TypeError: issubclass() arg 1 must be a class + # + + assert not issubclass(dict, BaseSettings) + + # NOTE: this should be fixed by pydantic at some point. When this happens, this test will fail + with pytest.raises(TypeError): + issubclass(dict[str, str], BaseSettings) + + # here reproduces the problem with our settings that ANE and PC had + class SettingsClassThatFailed(BaseCustomSettings): + FOO: dict[str, str] | None = Field(default=None) + + SettingsClassThatFailed(FOO={}) + assert SettingsClassThatFailed(FOO=None) == SettingsClassThatFailed() + + +def test_upgrade_failure_to_pydantic_settings_2_6( + mock_env_devel_environment: EnvVarsDict, +): + class ProblematicSettings(BaseCustomSettings): + WEBSERVER_EMAIL: SMTPSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + model_config = SettingsConfigDict(nested_model_default_partial_update=True) + + settings = ProblematicSettings() + assert settings.WEBSERVER_EMAIL is not None diff --git a/packages/settings-library/tests/test_base_w_postgres.py b/packages/settings-library/tests/test_base_w_postgres.py index 1118a41ad34..37329a4e9bb 100644 --- a/packages/settings-library/tests/test_base_w_postgres.py +++ b/packages/settings-library/tests/test_base_w_postgres.py @@ -3,14 +3,18 @@ # pylint: disable=unused-variable -from typing import Callable, Optional +import os +from collections.abc import Callable import pytest -from pydantic import Field, ValidationError -from pytest_simcore.helpers.utils_envs import setenvs_from_envfile +from pydantic import AliasChoices, Field, ValidationError, __version__ +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_envfile from settings_library.base import BaseCustomSettings, DefaultFromEnvFactoryError from settings_library.basic_types import PortInt +pydantic_vtag = ".".join(__version__.split(".")[:2]) + + # # NOTE: Pydantic models are returned by function-scoped fixture such that every # test starts with a fresh Model class (notice that pydanctic classes involve meta-operations @@ -22,6 +26,13 @@ # +@pytest.fixture +def postgres_envvars_unset(monkeypatch: pytest.MonkeyPatch) -> None: + for name in os.environ: + if name.startswith("POSTGRES_"): + monkeypatch.delenv(name) + + @pytest.fixture def model_classes_factory() -> Callable: # @@ -47,9 +58,11 @@ class _FakePostgresSettings(BaseCustomSettings): POSTGRES_MINSIZE: int = Field(1, ge=1) POSTGRES_MAXSIZE: int = Field(50, ge=1) - POSTGRES_CLIENT_NAME: Optional[str] = Field( + POSTGRES_CLIENT_NAME: str | None = Field( None, - env=["HOST", "HOSTNAME", "POSTGRES_CLIENT_NAME"], + validation_alias=AliasChoices( + "HOST", "HOSTNAME", "POSTGRES_CLIENT_NAME" + ), ) # @@ -60,34 +73,33 @@ class S1(BaseCustomSettings): WEBSERVER_POSTGRES: _FakePostgresSettings class S2(BaseCustomSettings): - WEBSERVER_POSTGRES_NULLABLE_OPTIONAL: Optional[_FakePostgresSettings] + WEBSERVER_POSTGRES_NULLABLE_OPTIONAL: _FakePostgresSettings | None = None class S3(BaseCustomSettings): # cannot be disabled!! WEBSERVER_POSTGRES_DEFAULT_ENV: _FakePostgresSettings = Field( - auto_default_from_env=True + json_schema_extra={"auto_default_from_env": True} ) class S4(BaseCustomSettings): # defaults enabled but if cannot be resolved, it disables - WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV: Optional[ - _FakePostgresSettings - ] = Field(auto_default_from_env=True) + WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV: _FakePostgresSettings | None = ( + Field(json_schema_extra={"auto_default_from_env": True}) + ) class S5(BaseCustomSettings): # defaults disabled but only explicit enabled - WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL: Optional[ - _FakePostgresSettings - ] = None + WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL: _FakePostgresSettings | None = ( + None + ) - _classes = ( + return ( S1, S2, S3, S4, S5, ) - return _classes return _create_classes @@ -98,36 +110,50 @@ class S5(BaseCustomSettings): # Pay attention how the defaults of SubSettings are automaticaly captured from env vars # at construction time. # -# NOTE: monkeypatching envs using envfile text gets the tests closer +# NOTE: pytest.MonkeyPatching envs using envfile text gets the tests closer # to the real use case where .env/.env-devel # files are used to setup envs. Quotes formatting in # those files can sometimes be challenging for parsers # -def test_parse_from_empty_envs(model_classes_factory: Callable): +def test_parse_from_empty_envs( + postgres_envvars_unset: None, model_classes_factory: Callable +): S1, S2, S3, S4, S5 = model_classes_factory() - with pytest.raises(ValidationError): - s1 = S1() + with pytest.raises(ValidationError, match="WEBSERVER_POSTGRES") as exc_info: + S1() + + validation_error = exc_info.value + assert validation_error.error_count() == 1 + error = validation_error.errors()[0] + assert error["type"] == "missing" + assert error["input"] == {} s2 = S2() - assert s2.WEBSERVER_POSTGRES_NULLABLE_OPTIONAL == None + assert s2.WEBSERVER_POSTGRES_NULLABLE_OPTIONAL is None - with pytest.raises(DefaultFromEnvFactoryError): - # NOTE: cannot hae a default or assignment - s3 = S3() + with pytest.raises(DefaultFromEnvFactoryError) as exc_info: + # NOTE: cannot have a default or assignment + S3() + + assert len(exc_info.value.errors) == 4, "Default could not be constructed" # auto default factory resolves to None (because is nullable) s4 = S4() - assert s4.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV == None + assert s4.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV is None s5 = S5() - assert s5.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL == None + assert s5.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL is None -def test_parse_from_individual_envs(monkeypatch, model_classes_factory): +def test_parse_from_individual_envs( + postgres_envvars_unset: None, + monkeypatch: pytest.MonkeyPatch, + model_classes_factory: Callable, +): S1, S2, S3, S4, S5 = model_classes_factory() @@ -147,18 +173,20 @@ def test_parse_from_individual_envs(monkeypatch, model_classes_factory): S1() assert exc_info.value.errors()[0] == { + "input": {}, "loc": ("WEBSERVER_POSTGRES",), - "msg": "field required", - "type": "value_error.missing", + "msg": "Field required", + "type": "missing", + "url": f"https://errors.pydantic.dev/{pydantic_vtag}/v/missing", } s2 = S2() - assert s2.dict(exclude_unset=True) == {} - assert s2.dict() == {"WEBSERVER_POSTGRES_NULLABLE_OPTIONAL": None} + assert s2.model_dump(exclude_unset=True) == {} + assert s2.model_dump() == {"WEBSERVER_POSTGRES_NULLABLE_OPTIONAL": None} s3 = S3() - assert s3.dict(exclude_unset=True) == {} - assert s3.dict() == { + assert s3.model_dump(exclude_unset=True) == {} + assert s3.model_dump() == { "WEBSERVER_POSTGRES_DEFAULT_ENV": { "POSTGRES_HOST": "pg", "POSTGRES_USER": "test", @@ -172,8 +200,8 @@ def test_parse_from_individual_envs(monkeypatch, model_classes_factory): } s4 = S4() - assert s4.dict(exclude_unset=True) == {} - assert s4.dict() == { + assert s4.model_dump(exclude_unset=True) == {} + assert s4.model_dump() == { "WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV": { "POSTGRES_HOST": "pg", "POSTGRES_USER": "test", @@ -187,11 +215,13 @@ def test_parse_from_individual_envs(monkeypatch, model_classes_factory): } s5 = S5() - assert s5.dict(exclude_unset=True) == {} - assert s5.dict() == {"WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL": None} + assert s5.model_dump(exclude_unset=True) == {} + assert s5.model_dump() == {"WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL": None} -def test_parse_compact_env(monkeypatch, model_classes_factory): +def test_parse_compact_env( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): S1, S2, S3, S4, S5 = model_classes_factory() @@ -210,7 +240,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): # test s1 = S1() - assert s1.dict(exclude_unset=True) == { + assert s1.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -218,7 +248,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): "POSTGRES_DB": "db2", } } - assert s1.dict() == { + assert s1.model_dump() == { "WEBSERVER_POSTGRES": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -239,7 +269,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): """, ) s2 = S2() - assert s2.dict(exclude_unset=True) == { + assert s2.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_OPTIONAL": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -259,7 +289,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): # default until it is really needed. Here before it would # fail because default cannot be computed even if the final value can! s3 = S3() - assert s3.dict(exclude_unset=True) == { + assert s3.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_DEFAULT_ENV": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -276,7 +306,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): """, ) s4 = S4() - assert s4.dict(exclude_unset=True) == { + assert s4.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -293,7 +323,7 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): """, ) s5 = S5() - assert s5.dict(exclude_unset=True) == { + assert s5.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -303,7 +333,9 @@ def test_parse_compact_env(monkeypatch, model_classes_factory): } -def test_parse_from_mixed_envs(monkeypatch, model_classes_factory): +def test_parse_from_mixed_envs( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): S1, S2, S3, S4, S5 = model_classes_factory() @@ -316,10 +348,9 @@ def test_parse_from_mixed_envs(monkeypatch, model_classes_factory): POSTGRES_USER=test POSTGRES_PASSWORD=ssh POSTGRES_DB=db - POSTGRES_CLIENT_NAME=client-name """ - with monkeypatch.context() as patch: + with monkeypatch.context(): setenvs_from_envfile( monkeypatch, ENV_FILE.format("WEBSERVER_POSTGRES"), @@ -327,7 +358,7 @@ def test_parse_from_mixed_envs(monkeypatch, model_classes_factory): s1 = S1() - assert s1.dict() == { + assert s1.model_dump() == { "WEBSERVER_POSTGRES": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", @@ -336,87 +367,82 @@ def test_parse_from_mixed_envs(monkeypatch, model_classes_factory): "POSTGRES_DB": "db2", "POSTGRES_MAXSIZE": 50, "POSTGRES_MINSIZE": 1, - "POSTGRES_CLIENT_NAME": "client-name", + "POSTGRES_CLIENT_NAME": None, } } # NOTE how unset marks also applies to embedded fields # NOTE: (1) priority of json-compact over granulated # NOTE: (2) json-compact did not define this but granulated did - assert s1.dict(exclude_unset=True) == { + assert s1.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES": { "POSTGRES_HOST": "pg2", # <- (1) "POSTGRES_USER": "test2", # <- (1) "POSTGRES_PASSWORD": "shh2", # <- (1) "POSTGRES_DB": "db2", # <- (1) - "POSTGRES_CLIENT_NAME": "client-name", # <- (2) } } - with monkeypatch.context() as patch: + with monkeypatch.context(): setenvs_from_envfile( monkeypatch, ENV_FILE.format("WEBSERVER_POSTGRES_NULLABLE_OPTIONAL"), ) s2 = S2() - assert s2.dict(exclude_unset=True) == { + assert s2.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_OPTIONAL": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", "POSTGRES_PASSWORD": "shh2", "POSTGRES_DB": "db2", - "POSTGRES_CLIENT_NAME": "client-name", } } - with monkeypatch.context() as patch: + with monkeypatch.context(): setenvs_from_envfile( monkeypatch, ENV_FILE.format("WEBSERVER_POSTGRES_DEFAULT_ENV"), ) s3 = S3() - assert s3.dict(exclude_unset=True) == { + assert s3.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_DEFAULT_ENV": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", "POSTGRES_PASSWORD": "shh2", "POSTGRES_DB": "db2", - "POSTGRES_CLIENT_NAME": "client-name", } } - with monkeypatch.context() as patch: + with monkeypatch.context(): setenvs_from_envfile( monkeypatch, ENV_FILE.format("WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV"), ) s4 = S4() - assert s4.dict(exclude_unset=True) == { + assert s4.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_DEFAULT_ENV": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", "POSTGRES_PASSWORD": "shh2", "POSTGRES_DB": "db2", - "POSTGRES_CLIENT_NAME": "client-name", } } - with monkeypatch.context() as patch: + with monkeypatch.context(): setenvs_from_envfile( monkeypatch, ENV_FILE.format("WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL"), ) s5 = S5() - assert s5.dict(exclude_unset=True) == { + assert s5.model_dump(exclude_unset=True) == { "WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL": { "POSTGRES_HOST": "pg2", "POSTGRES_USER": "test2", "POSTGRES_PASSWORD": "shh2", "POSTGRES_DB": "db2", - "POSTGRES_CLIENT_NAME": "client-name", } } @@ -437,7 +463,9 @@ def test_parse_from_mixed_envs(monkeypatch, model_classes_factory): # -def test_toggle_plugin_1(monkeypatch, model_classes_factory): +def test_toggle_plugin_1( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): *_, S4, S5 = model_classes_factory() @@ -450,7 +478,9 @@ def test_toggle_plugin_1(monkeypatch, model_classes_factory): assert s5.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL is None -def test_toggle_plugin_2(monkeypatch, model_classes_factory): +def test_toggle_plugin_2( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): *_, S4, S5 = model_classes_factory() # minimal @@ -471,7 +501,9 @@ def test_toggle_plugin_2(monkeypatch, model_classes_factory): assert s5.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL is None -def test_toggle_plugin_3(monkeypatch, model_classes_factory): +def test_toggle_plugin_3( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): *_, S4, S5 = model_classes_factory() # explicitly disables @@ -494,7 +526,9 @@ def test_toggle_plugin_3(monkeypatch, model_classes_factory): assert s5.WEBSERVER_POSTGRES_NULLABLE_DEFAULT_NULL is None -def test_toggle_plugin_4(monkeypatch, model_classes_factory): +def test_toggle_plugin_4( + postgres_envvars_unset: None, monkeypatch, model_classes_factory +): *_, S4, S5 = model_classes_factory() JSON_VALUE = '{"POSTGRES_HOST":"pg2", "POSTGRES_USER":"test2", "POSTGRES_PASSWORD":"shh2", "POSTGRES_DB":"db2"}' diff --git a/packages/settings-library/tests/test_docker_registry.py b/packages/settings-library/tests/test_docker_registry.py index d168d93cf8a..0cc513399c8 100644 --- a/packages/settings-library/tests/test_docker_registry.py +++ b/packages/settings-library/tests/test_docker_registry.py @@ -4,7 +4,6 @@ from copy import deepcopy import pytest -from pytest import MonkeyPatch from settings_library.docker_registry import RegistrySettings MOCKED_BASE_REGISTRY_ENV_VARS: dict[str, str] = { @@ -12,6 +11,7 @@ "REGISTRY_USER": "usr", "REGISTRY_PW": "pwd", "REGISTRY_SSL": "False", + "REGISTRY_URL": "pytest.registry.com", } @@ -21,7 +21,7 @@ def _add_parameter_to_env(env: dict[str, str], key: str, value: str) -> dict[str return registry_env -def _mock_env_vars(monkeypatch: MonkeyPatch, env_vars: dict[str, str]) -> None: +def _mock_env_vars(monkeypatch: pytest.MonkeyPatch, env_vars: dict[str, str]) -> None: for key, value in env_vars.items(): monkeypatch.setenv(key, value) @@ -33,7 +33,7 @@ def _mock_env_vars(monkeypatch: MonkeyPatch, env_vars: dict[str, str]) -> None: ("REGISTRY_URL", "some_prod_url"), ], ) -def test_model_ok(env_key: str, env_var: str, monkeypatch: MonkeyPatch) -> None: +def test_model_ok(env_key: str, env_var: str, monkeypatch: pytest.MonkeyPatch) -> None: registry_env_vars = _add_parameter_to_env( MOCKED_BASE_REGISTRY_ENV_VARS, env_key, env_var ) @@ -44,7 +44,7 @@ def test_model_ok(env_key: str, env_var: str, monkeypatch: MonkeyPatch) -> None: assert registry_settings.resolved_registry_url == env_var -def test_registry_path_none_string(monkeypatch: MonkeyPatch) -> None: +def test_registry_path_none_string(monkeypatch: pytest.MonkeyPatch) -> None: registry_env_vars = _add_parameter_to_env( MOCKED_BASE_REGISTRY_ENV_VARS, "REGISTRY_PATH", "None" ) diff --git a/packages/settings-library/tests/test_ec2.py b/packages/settings-library/tests/test_ec2.py new file mode 100644 index 00000000000..6f78d72e446 --- /dev/null +++ b/packages/settings-library/tests/test_ec2.py @@ -0,0 +1,66 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from pydantic import ValidationError +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from settings_library.ec2 import EC2Settings + + +def test_ec2_endpoint_defaults_to_null(monkeypatch: pytest.MonkeyPatch): + setenvs_from_dict( + monkeypatch, + { + "EC2_ACCESS_KEY_ID": "my_access_key_id", + "EC2_REGION_NAME": "us-east-1", + "EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + }, + ) + + settings = EC2Settings.create_from_envs() + assert settings.EC2_ENDPOINT is None + + +def test_ec2_endpoint_is_nullified(monkeypatch: pytest.MonkeyPatch): + setenvs_from_dict( + monkeypatch, + { + "EC2_ACCESS_KEY_ID": "my_access_key_id", + "EC2_ENDPOINT": "null", + "EC2_REGION_NAME": "us-east-1", + "EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + }, + ) + + settings = EC2Settings.create_from_envs() + assert settings.EC2_ENDPOINT is None + + +def test_ec2_endpoint_invalid(monkeypatch: pytest.MonkeyPatch): + setenvs_from_dict( + monkeypatch, + { + "EC2_ACCESS_KEY_ID": "my_access_key_id", + "EC2_ENDPOINT": "ftp://my_ec2_endpoint.com", + "EC2_REGION_NAME": "us-east-1", + "EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + }, + ) + + with pytest.raises(ValidationError) as err_info: + EC2Settings.create_from_envs() + + assert err_info.value.error_count() == 1 + error = err_info.value.errors()[0] + + assert error["loc"] == ("EC2_ENDPOINT",) + assert error["type"] == "url_scheme" + + +def test_ec2_endpoint_description(): + model_fields = dict(EC2Settings.model_fields) + assert model_fields["EC2_ACCESS_KEY_ID"].description is None + assert model_fields["EC2_ENDPOINT"].description is not None diff --git a/packages/settings-library/tests/test_email.py b/packages/settings-library/tests/test_email.py index c12d035e517..439c0c634e5 100644 --- a/packages/settings-library/tests/test_email.py +++ b/packages/settings-library/tests/test_email.py @@ -1,10 +1,31 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from enum import Enum from typing import Any import pytest from pydantic import ValidationError +from pytest_simcore.helpers.monkeypatch_envs import delenvs_from_dict, setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.email import EmailProtocol, SMTPSettings +@pytest.fixture +def all_env_devel_undefined( + monkeypatch: pytest.MonkeyPatch, env_devel_dict: EnvVarsDict +) -> None: + """Ensures that all env vars in .env-devel are undefined in the test environment + + NOTE: this is useful to have a clean starting point and avoid + the environment to influence your test. I found this situation + when some script was accidentaly injecting the entire .env-devel in the environment + """ + delenvs_from_dict(monkeypatch, env_devel_dict, raising=False) + + @pytest.mark.parametrize( "cfg", [ @@ -15,7 +36,7 @@ { "SMTP_HOST": "test", "SMTP_PORT": 113, - "SMTP_PROTOCOL": EmailProtocol.UNENCRYPTED, + "SMTP_PROTOCOL": EmailProtocol.UNENCRYPTED.value, }, { "SMTP_HOST": "test", @@ -28,61 +49,114 @@ "SMTP_PORT": 113, "SMTP_USERNAME": "test", "SMTP_PASSWORD": "test", - "SMTP_PROTOCOL": EmailProtocol.UNENCRYPTED, + "SMTP_PROTOCOL": EmailProtocol.UNENCRYPTED.value, }, { "SMTP_HOST": "test", "SMTP_PORT": 113, "SMTP_USERNAME": "test", "SMTP_PASSWORD": "test", - "SMTP_PROTOCOL": EmailProtocol.TLS, + "SMTP_PROTOCOL": EmailProtocol.TLS.value, }, { "SMTP_HOST": "test", "SMTP_PORT": 113, "SMTP_USERNAME": "test", "SMTP_PASSWORD": "test", - "SMTP_PROTOCOL": EmailProtocol.STARTTLS, + "SMTP_PROTOCOL": EmailProtocol.STARTTLS.value, }, ], ) -def test_smtp_configuration_ok(cfg: dict[str, Any]): - assert SMTPSettings.parse_obj(cfg) +def test_smtp_configuration_ok( + all_env_devel_undefined: None, + monkeypatch: pytest.MonkeyPatch, + cfg: dict[str, Any], +): + assert SMTPSettings.model_validate(cfg) + + setenvs_from_dict(monkeypatch, {k: f"{v}" for k, v in cfg.items()}) + assert SMTPSettings.create_from_envs() @pytest.mark.parametrize( - "cfg", + "cfg,error_type", [ - {"SMTP_HOST": "test", "SMTP_PORT": 113, "SMTP_USERNAME": "test"}, - {"SMTP_HOST": "test", "SMTP_PORT": 113, "SMTP_PASSWORD": "test"}, - { - "SMTP_HOST": "test", - "SMTP_PORT": 113, - "SMTP_PROTOCOL": EmailProtocol.STARTTLS, - "SMTP_PASSWORD": "test", - }, - { - "SMTP_HOST": "test", - "SMTP_PORT": 113, - "SMTP_PROTOCOL": EmailProtocol.STARTTLS, - "SMTP_USERNAME": "test", - }, - { - "SMTP_HOST": "test", - "SMTP_PORT": 113, - "SMTP_USERNAME": "", - "SMTP_PASSWORD": "test", - "SMTP_PROTOCOL": EmailProtocol.STARTTLS, - }, - { - "SMTP_HOST": "test", - "SMTP_PORT": 113, - "SMTP_USERNAME": "", - "SMTP_PASSWORD": "test", - "SMTP_PROTOCOL": EmailProtocol.TLS, - }, + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 111, + "SMTP_USERNAME": "test", + # password required if username provided + }, + "value_error", + ), + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 112, + "SMTP_PASSWORD": "test", + # username required if password provided + }, + "value_error", + ), + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 113, + "SMTP_PROTOCOL": EmailProtocol.STARTTLS, + "SMTP_PASSWORD": "test", + }, + "value_error", + ), + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 114, + "SMTP_PROTOCOL": EmailProtocol.STARTTLS, + "SMTP_USERNAME": "test", + }, + "value_error", + ), + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 115, + "SMTP_USERNAME": "", + "SMTP_PASSWORD": "test", + "SMTP_PROTOCOL": EmailProtocol.STARTTLS, + }, + "string_too_short", + ), + ( + { + "SMTP_HOST": "test", + "SMTP_PORT": 116, + "SMTP_USERNAME": "", + "SMTP_PASSWORD": "test", + "SMTP_PROTOCOL": EmailProtocol.TLS, + }, + "string_too_short", + ), ], ) -def test_smtp_configuration_fails(cfg: dict[str, Any]): - with pytest.raises(ValidationError): - assert SMTPSettings.parse_obj(cfg) +def test_smtp_configuration_fails( + all_env_devel_undefined: None, + monkeypatch: pytest.MonkeyPatch, + cfg: dict[str, Any], + error_type: str, +): + with pytest.raises(ValidationError) as err_info: + SMTPSettings(**cfg) + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == error_type + + setenvs_from_dict( + monkeypatch, + {k: str(v.value if isinstance(v, Enum) else v) for k, v in cfg.items()}, + ) + with pytest.raises(ValidationError) as err_info: + SMTPSettings.create_from_envs() + + assert err_info.value.error_count() == 1 + assert err_info.value.errors()[0]["type"] == error_type diff --git a/packages/settings-library/tests/test_node_ports_settings.py b/packages/settings-library/tests/test_node_ports_settings.py new file mode 100644 index 00000000000..0adbd4efb4d --- /dev/null +++ b/packages/settings-library/tests/test_node_ports_settings.py @@ -0,0 +1,15 @@ +import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from settings_library.node_ports import StorageAuthSettings + + +@pytest.mark.parametrize("secure", [True, False]) +def test_storage_auth_settings_secure(monkeypatch: pytest.MonkeyPatch, secure: bool): + setenvs_from_dict( + monkeypatch, + { + "STORAGE_SECURE": "1" if secure else "0", + }, + ) + settings = StorageAuthSettings.create_from_envs() + assert settings.base_url == f"{'https' if secure else 'http'}://storage:8080" diff --git a/packages/settings-library/tests/test_postgres.py b/packages/settings-library/tests/test_postgres.py index 2d7769736e2..6c9067c2d6b 100644 --- a/packages/settings-library/tests/test_postgres.py +++ b/packages/settings-library/tests/test_postgres.py @@ -3,7 +3,11 @@ # pylint: disable=unused-variable +from urllib.parse import urlparse + import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.postgres import PostgresSettings @@ -12,36 +16,50 @@ def env_file(): return ".env-sample" -def test_cached_property_dsn(mock_environment: dict): +@pytest.fixture +def mock_environment(mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): + return mock_environment | setenvs_from_dict( + monkeypatch, {"POSTGRES_CLIENT_NAME": "Some &43 funky name"} + ) - settings = PostgresSettings() - # all are upper-case - assert all(key == key.upper() for key in settings.dict().keys()) +def test_cached_property_dsn(mock_environment: EnvVarsDict): - # dsn is computed from the other fields - assert "dsn" not in settings.dict().keys() + settings = PostgresSettings.create_from_envs() - # causes cached property to be computed and stored on the instance - assert settings.dsn + # all are upper-case + assert all(key == key.upper() for key in settings.model_dump()) - assert "dsn" in settings.dict().keys() + assert settings.dsn + # dsn is computed from the other fields + assert "dsn" not in settings.model_dump() -def test_dsn_with_query(mock_environment: dict, monkeypatch): +def test_dsn_with_query(mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): settings = PostgresSettings() - assert not settings.POSTGRES_CLIENT_NAME + assert settings.POSTGRES_CLIENT_NAME assert settings.dsn == "postgresql://foo:secret@localhost:5432/foodb" - - # now with app - monkeypatch.setenv("POSTGRES_CLIENT_NAME", "Some &43 funky name") - - settings_with_app = PostgresSettings() - - assert settings_with_app.POSTGRES_CLIENT_NAME assert ( - settings_with_app.dsn_with_query + settings.dsn_with_query == "postgresql://foo:secret@localhost:5432/foodb?application_name=Some+%2643+funky+name" ) + + with monkeypatch.context() as patch: + patch.delenv("POSTGRES_CLIENT_NAME") + settings = PostgresSettings() + + assert not settings.POSTGRES_CLIENT_NAME + assert settings.dsn == settings.dsn_with_query + + +def test_dsn_with_async_sqlalchemy_has_query( + mock_environment: EnvVarsDict, monkeypatch +): + settings = PostgresSettings() + + parsed_url = urlparse(settings.dsn_with_async_sqlalchemy) + assert parsed_url.scheme.split("+") == ["postgresql", "asyncpg"] + + assert not parsed_url.query diff --git a/packages/settings-library/tests/test_s3.py b/packages/settings-library/tests/test_s3.py deleted file mode 100644 index e557d9f4e29..00000000000 --- a/packages/settings-library/tests/test_s3.py +++ /dev/null @@ -1,34 +0,0 @@ -# pylint:disable=redefined-outer-name -# pylint:disable=unused-argument - -import pytest -from pytest import MonkeyPatch -from settings_library.s3 import S3Settings - - -@pytest.fixture -def base_env(monkeypatch: MonkeyPatch) -> None: - monkeypatch.setenv("S3_ACCESS_KEY", "mocked") - monkeypatch.setenv("S3_SECRET_KEY", "mocked") - monkeypatch.setenv("S3_BUCKET_NAME", "mocked") - - -@pytest.mark.parametrize( - "endpoint, secure, expected", - [ - ("osparc.io", "true", "https://osparc.io"), - ("osparc.io", "false", "http://osparc.io"), - ("https://osparc.io", "true", "https://osparc.io"), - ("https://osparc.io", "false", "https://osparc.io"), - ("http://osparc.io", "true", "http://osparc.io"), - ("http://osparc.io", "false", "http://osparc.io"), - ], -) -def test_regression( - monkeypatch: MonkeyPatch, endpoint: str, secure: str, expected: str, base_env: None -) -> None: - monkeypatch.setenv("S3_ENDPOINT", endpoint) - monkeypatch.setenv("S3_SECURE", secure) - - s3_settings = S3Settings() - assert s3_settings.S3_ENDPOINT == expected diff --git a/packages/settings-library/tests/test_twilio.py b/packages/settings-library/tests/test_twilio.py index 08ab303d9e6..1989fbe6a9f 100644 --- a/packages/settings-library/tests/test_twilio.py +++ b/packages/settings-library/tests/test_twilio.py @@ -3,19 +3,13 @@ # pylint: disable=unused-variable import pytest -from pydantic import ValidationError -from pytest import MonkeyPatch -from pytest_simcore.helpers.utils_envs import setenvs_from_dict +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from settings_library.twilio import TwilioSettings def test_twilio_settings_within_envdevel( - mock_env_devel_environment: dict[str, str], monkeypatch: MonkeyPatch + mock_env_devel_environment: dict[str, str], monkeypatch: pytest.MonkeyPatch ): - # in .env-devel these are for the oment undefined - with pytest.raises(ValidationError): - TwilioSettings.create_from_envs() - # adds twilio credentials with monkeypatch.context() as patch: setenvs_from_dict( @@ -26,12 +20,12 @@ def test_twilio_settings_within_envdevel( }, ) settings = TwilioSettings.create_from_envs() - print(settings.json(indent=2)) + print(settings.model_dump_json(indent=2)) assert settings def test_twilio_settings_with_country_codes( - mock_env_devel_environment: dict[str, str], monkeypatch: MonkeyPatch + mock_env_devel_environment: dict[str, str], monkeypatch: pytest.MonkeyPatch ): # defaults diff --git a/packages/settings-library/tests/test_utils_cli.py b/packages/settings-library/tests/test_utils_cli.py index 3566bb5f3ca..49c684ea626 100644 --- a/packages/settings-library/tests/test_utils_cli.py +++ b/packages/settings-library/tests/test_utils_cli.py @@ -4,18 +4,23 @@ import json import logging +from collections.abc import Callable from io import StringIO -from typing import Any, Callable +from typing import Any import pytest import typer from dotenv import dotenv_values +from pydantic import AnyHttpUrl, Field, SecretStr +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_envfile from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_envfile from settings_library.base import BaseCustomSettings from settings_library.utils_cli import ( - create_json_encoder_wo_secrets, + _print_as_json, create_settings_command, + create_version_callback, + model_dump_with_secrets, + print_as_envfile, ) from typer.testing import CliRunner @@ -34,7 +39,14 @@ def envs_to_kwargs(envs: EnvVarsDict) -> dict[str, Any]: @pytest.fixture -def cli(fake_settings_class: type[BaseCustomSettings]) -> typer.Typer: +def fake_version() -> str: + return "0.0.1-alpha" + + +@pytest.fixture +def cli( + fake_settings_class: type[BaseCustomSettings], fake_version: str +) -> typer.Typer: main = typer.Typer(name="app") @main.command() @@ -47,6 +59,7 @@ def run(): # adds settings command settings_cmd = create_settings_command(fake_settings_class, log) main.command()(settings_cmd) + main.callback()(create_version_callback(fake_version)) return main @@ -71,12 +84,7 @@ def fake_granular_env_file_content() -> str: @pytest.fixture def export_as_dict() -> Callable: def _export(model_obj, **export_options): - return json.loads( - model_obj.json( - encoder=create_json_encoder_wo_secrets(model_obj.__class__), - **export_options - ) - ) + return model_dump_with_secrets(model_obj, show_secrets=True, **export_options) return _export @@ -89,6 +97,10 @@ def test_compose_commands(cli: typer.Typer, cli_runner: CliRunner): print(result.stdout) assert result.exit_code == 0, result + result = cli_runner.invoke(cli, ["--version"], catch_exceptions=False) + print(result.stdout) + assert result.exit_code == 0, result + # first command result = cli_runner.invoke(cli, ["run", "--help"], catch_exceptions=False) print(result.stdout) @@ -97,37 +109,13 @@ def test_compose_commands(cli: typer.Typer, cli_runner: CliRunner): # settings command result = cli_runner.invoke(cli, ["settings", "--help"], catch_exceptions=False) print(result.stdout) - - assert "--compact" in result.stdout assert result.exit_code == 0, result - def extract_lines(text): - lines = [line.strip() for line in text.split("\n") if line.strip()] - return lines - - assert extract_lines(HELP) == extract_lines(result.stdout) - - -HELP = """ -Usage: app settings [OPTIONS] + received_help = result.stdout - Resolves settings and prints envfile - -Options: - --as-json / --no-as-json [default: no-as-json] - --as-json-schema / --no-as-json-schema - [default: no-as-json-schema] - --compact / --no-compact Print compact form [default: no-compact] - --verbose / --no-verbose [default: no-verbose] - --show-secrets / --no-show-secrets - [default: no-show-secrets] - --exclude-unset / --no-exclude-unset - displays settings that were explicitly setThis - represents current config (i.e. required+ - defaults overriden). [default: no-exclude- - unset] - --help Show this message and exit. -""" + assert "compact" in result.stdout, f"got instead {received_help=}" + assert "as-json" in received_help, f"got instead {received_help=}" + assert "help" in received_help, f"got instead {received_help=}" def test_settings_as_json( @@ -136,7 +124,6 @@ def test_settings_as_json( mock_environment, cli_runner: CliRunner, ): - result = cli_runner.invoke( cli, ["settings", "--as-json", "--show-secrets"], catch_exceptions=False ) @@ -144,7 +131,7 @@ def test_settings_as_json( # reuse resulting json to build settings settings: dict = json.loads(result.stdout) - assert fake_settings_class.parse_obj(settings) + assert fake_settings_class.model_validate(settings) def test_settings_as_json_schema( @@ -153,14 +140,13 @@ def test_settings_as_json_schema( mock_environment, cli_runner: CliRunner, ): - result = cli_runner.invoke( cli, ["settings", "--as-json-schema"], catch_exceptions=False ) print(result.stdout) # reuse resulting json to build settings - settings_schema: dict = json.loads(result.stdout) + json.loads(result.stdout) def test_cli_default_settings_envs( @@ -172,9 +158,7 @@ def test_cli_default_settings_envs( monkeypatch: pytest.MonkeyPatch, ): with monkeypatch.context() as patch: - mocked_envs_1: EnvVarsDict = setenvs_from_envfile( - patch, fake_granular_env_file_content - ) + setenvs_from_envfile(patch, fake_granular_env_file_content) cli_settings_output = cli_runner.invoke( cli, @@ -186,7 +170,7 @@ def test_cli_default_settings_envs( # now let's use these as env vars with monkeypatch.context() as patch: - mocked_envs_2: EnvVarsDict = setenvs_from_envfile( + setenvs_from_envfile( patch, cli_settings_output, ) @@ -219,11 +203,8 @@ def test_cli_compact_settings_envs( cli_runner: CliRunner, monkeypatch: pytest.MonkeyPatch, ): - with monkeypatch.context() as patch: - mocked_envs_1: EnvVarsDict = setenvs_from_envfile( - patch, fake_granular_env_file_content - ) + setenvs_from_envfile(patch, fake_granular_env_file_content) settings_1 = fake_settings_class() @@ -262,8 +243,8 @@ def test_cli_compact_settings_envs( assert mocked_envs_2 == { "APP_HOST": "localhost", "APP_PORT": "80", - "APP_OPTIONAL_ADDON": '{"MODULE_VALUE": 10, "MODULE_VALUE_DEFAULT": 42}', - "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST": "localhost", "POSTGRES_PORT": 5432, "POSTGRES_USER": "foo", "POSTGRES_PASSWORD": "secret", "POSTGRES_DB": "foodb", "POSTGRES_MINSIZE": 1, "POSTGRES_MAXSIZE": 50, "POSTGRES_CLIENT_NAME": null}', + "APP_OPTIONAL_ADDON": '{"MODULE_VALUE":10,"MODULE_VALUE_DEFAULT":42}', + "APP_REQUIRED_PLUGIN": '{"POSTGRES_HOST":"localhost","POSTGRES_PORT":5432,"POSTGRES_USER":"foo","POSTGRES_PASSWORD":"secret","POSTGRES_DB":"foodb","POSTGRES_MINSIZE":1,"POSTGRES_MAXSIZE":50,"POSTGRES_CLIENT_NAME":null}', } settings_2 = fake_settings_class() @@ -389,7 +370,7 @@ def test_cli_settings_exclude_unset_as_json( monkeypatch: pytest.MonkeyPatch, ): # minimal envfile - mocked_envs: EnvVarsDict = setenvs_from_envfile( + setenvs_from_envfile( monkeypatch, """ # these are required @@ -429,3 +410,33 @@ def test_cli_settings_exclude_unset_as_json( "POSTGRES_MAXSIZE": 20, }, } + + +def test_print_as(capsys: pytest.CaptureFixture): + class FakeSettings(BaseCustomSettings): + INTEGER: int = Field(..., description="Some info") + SECRET: SecretStr + URL: AnyHttpUrl + + settings_obj = FakeSettings(INTEGER=1, SECRET="secret", URL="http://google.com") # type: ignore + + print_as_envfile(settings_obj, compact=True, verbose=True, show_secrets=True) + captured = capsys.readouterr() + assert "secret" in captured.out + assert "Some info" in captured.out + + print_as_envfile(settings_obj, compact=True, verbose=False, show_secrets=True) + captured = capsys.readouterr() + assert "secret" in captured.out + assert "Some info" not in captured.out + + print_as_envfile(settings_obj, compact=True, verbose=False, show_secrets=False) + captured = capsys.readouterr() + assert "secret" not in captured.out + assert "Some info" not in captured.out + + _print_as_json(settings_obj, compact=True, show_secrets=False) + captured = capsys.readouterr() + assert "secret" not in captured.out + assert "**" in captured.out + assert "Some info" not in captured.out diff --git a/packages/settings-library/tests/test_utils_logging.py b/packages/settings-library/tests/test_utils_logging.py index c0659b80f82..d63a8ae8538 100644 --- a/packages/settings-library/tests/test_utils_logging.py +++ b/packages/settings-library/tests/test_utils_logging.py @@ -1,9 +1,8 @@ import logging -from typing import Optional -from pydantic import Field, validator +from pydantic import AliasChoices, Field, field_validator from settings_library.base import BaseCustomSettings -from settings_library.basic_types import BootMode +from settings_library.basic_types import BootModeEnum from settings_library.utils_logging import MixinLoggingSettings @@ -15,22 +14,24 @@ def test_mixin_logging(monkeypatch): class Settings(BaseCustomSettings, MixinLoggingSettings): # DOCKER - SC_BOOT_MODE: Optional[BootMode] + SC_BOOT_MODE: BootModeEnum | None = None # LOGGING LOG_LEVEL: str = Field( "WARNING", - env=[ + validation_alias=AliasChoices( "APPNAME_LOG_LEVEL", "LOG_LEVEL", - ], + ), ) - APPNAME_DEBUG: bool = Field(False, description="Starts app in debug mode") + APPNAME_DEBUG: bool = Field( + default=False, description="Starts app in debug mode" + ) - @validator("LOG_LEVEL") + @field_validator("LOG_LEVEL", mode="before") @classmethod - def _v(cls, value) -> str: + def _v(cls, value: str) -> str: return cls.validate_log_level(value) # ----------------------------------------------------------- @@ -41,14 +42,9 @@ def _v(cls, value) -> str: assert settings.LOG_LEVEL == "DEBUG" assert ( - settings.json() - == '{"SC_BOOT_MODE": null, "LOG_LEVEL": "DEBUG", "APPNAME_DEBUG": false}' + settings.model_dump_json() + == '{"SC_BOOT_MODE":null,"LOG_LEVEL":"DEBUG","APPNAME_DEBUG":false}' ) # test cached-property assert settings.log_level == logging.DEBUG - # log_level is cached-property (notice that is lower-case!), and gets added after first use - assert ( - settings.json() - == '{"SC_BOOT_MODE": null, "LOG_LEVEL": "DEBUG", "APPNAME_DEBUG": false, "log_level": 10}' - ) diff --git a/packages/settings-library/tests/test_utils_r_clone.py b/packages/settings-library/tests/test_utils_r_clone.py index 9344bc65086..82dabf47daf 100644 --- a/packages/settings-library/tests/test_utils_r_clone.py +++ b/packages/settings-library/tests/test_utils_r_clone.py @@ -1,34 +1,37 @@ # pylint: disable=redefined-outer-name import pytest +from faker import Faker from settings_library.r_clone import RCloneSettings, S3Provider from settings_library.utils_r_clone import ( - _COMMON_ENTRIES, + _COMMON_SETTINGS_OPTIONS, get_r_clone_config, resolve_provider, ) @pytest.fixture(params=list(S3Provider)) -def r_clone_settings(request, monkeypatch) -> RCloneSettings: +def r_clone_settings( + request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch, faker: Faker +) -> RCloneSettings: monkeypatch.setenv("R_CLONE_PROVIDER", request.param) - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", False) - return RCloneSettings() + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + return RCloneSettings.create_from_envs() def test_r_clone_config_template_replacement(r_clone_settings: RCloneSettings) -> None: - r_clone_config = get_r_clone_config(r_clone_settings) + r_clone_config = get_r_clone_config(r_clone_settings, s3_config_key="target-s3") print(r_clone_config) assert "{endpoint}" not in r_clone_config assert "{access_key}" not in r_clone_config assert "{secret_key}" not in r_clone_config - for key in _COMMON_ENTRIES.keys(): + for key in _COMMON_SETTINGS_OPTIONS: assert key in r_clone_config diff --git a/packages/settings-library/tests/test_utils_service.py b/packages/settings-library/tests/test_utils_service.py index 3eadb1828c6..8ecd9835893 100644 --- a/packages/settings-library/tests/test_utils_service.py +++ b/packages/settings-library/tests/test_utils_service.py @@ -3,24 +3,30 @@ # pylint: disable=unused-variable from functools import cached_property -from typing import Optional +import pytest +from pydantic import AnyHttpUrl, TypeAdapter from pydantic.types import SecretStr from settings_library.base import BaseCustomSettings from settings_library.basic_types import PortInt, VersionTag +from settings_library.catalog import CatalogSettings +from settings_library.director_v2 import DirectorV2Settings +from settings_library.storage import StorageSettings from settings_library.utils_service import MixinServiceSettings, URLPart +from settings_library.webserver import WebServerSettings -def test_mixing_service_settings_usage(monkeypatch): +def test_mixing_service_settings_usage(monkeypatch: pytest.MonkeyPatch): # this test provides an example of usage class MySettings(BaseCustomSettings, MixinServiceSettings): MY_HOST: str = "example.com" MY_PORT: PortInt = 8000 - MY_VTAG: Optional[VersionTag] = None + MY_VTAG: VersionTag | None = None + MY_SECURE: bool = False - # optional - MY_USER: Optional[str] - MY_PASSWORD: Optional[SecretStr] + # optional (in Pydantic v2 requires a default) + MY_USER: str | None = None + MY_PASSWORD: SecretStr | None = None @cached_property def api_base_url(self) -> str: @@ -60,3 +66,34 @@ def base_url(self) -> str: assert settings.api_base_url == "http://me:secret@example.com:8000/v9" assert settings.base_url == "http://me:secret@example.com:8000" assert settings.origin_url == "http://example.com" + + # ----------- + + monkeypatch.setenv("MY_SECURE", "1") + settings = MySettings.create_from_envs() + + assert settings.api_base_url == "https://me:secret@example.com:8000/v9" + assert settings.base_url == "https://me:secret@example.com:8000" + assert settings.origin_url == "https://example.com" + + +@pytest.mark.parametrize( + "service_settings_cls", + [WebServerSettings, CatalogSettings, DirectorV2Settings, StorageSettings], +) +def test_service_settings_base_urls(service_settings_cls: type): + + assert issubclass(service_settings_cls, BaseCustomSettings) + assert issubclass(service_settings_cls, MixinServiceSettings) + + settings_with_defaults = service_settings_cls() + + base_url = TypeAdapter(AnyHttpUrl).validate_python(settings_with_defaults.base_url) + api_base_url = TypeAdapter(AnyHttpUrl).validate_python(settings_with_defaults.api_base_url) + + assert base_url.path != api_base_url.path + assert (base_url.scheme, base_url.host, base_url.port) == ( + api_base_url.scheme, + api_base_url.host, + api_base_url.port, + ) diff --git a/packages/simcore-sdk/Makefile b/packages/simcore-sdk/Makefile index f05e85fd985..fb631379f39 100644 --- a/packages/simcore-sdk/Makefile +++ b/packages/simcore-sdk/Makefile @@ -14,7 +14,7 @@ include ../../scripts/common-package.Makefile .PHONY: install-dev install-prod install-ci install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt .PHONY: tests-unit tests-unit-ci tests-integration tests-integration-ci tests tests-unit: ## runs unit tests @@ -42,6 +42,7 @@ tests-unit-ci: ## runs unit tests with ci parameters --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=simcore_sdk \ --durations=10 \ --keep-docker-up \ @@ -61,6 +62,7 @@ tests-integration: ## runs integration tests against local+production images --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=simcore_sdk \ --durations=10 \ --exitfirst \ @@ -75,13 +77,17 @@ tests-integration-ci: ## runs integration tests against local+production images --asyncio-mode=auto \ --color=yes \ --cov-append \ + --disk-usage \ + --disk-usage-threshold=20 \ --cov-config=../../.coveragerc \ --cov-report=term-missing \ --cov-report=xml \ + --junitxml=junit.xml -o junit_family=legacy \ --cov=simcore_sdk \ --durations=10 \ --keep-docker-up \ --log-date-format="%Y-%m-%d %H:%M:%S" \ + --log-cli-level=WARNING \ --verbose \ -m "not heavy_load" \ --log-format="%(asctime)s %(levelname)s %(message)s" \ diff --git a/packages/simcore-sdk/requirements/_base.in b/packages/simcore-sdk/requirements/_base.in index a07a0b50b01..9be327aed36 100644 --- a/packages/simcore-sdk/requirements/_base.in +++ b/packages/simcore-sdk/requirements/_base.in @@ -7,13 +7,15 @@ --requirement ../../../packages/service-library/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/common-library/requirements/_base.in + aiocache aiofiles aiohttp -aiopg[sa] packaging pint +sqlalchemy[asyncio] pydantic[email] tenacity tqdm diff --git a/packages/simcore-sdk/requirements/_base.txt b/packages/simcore-sdk/requirements/_base.txt index 380718bbc11..47e473da7c3 100644 --- a/packages/simcore-sdk/requirements/_base.txt +++ b/packages/simcore-sdk/requirements/_base.txt @@ -1,127 +1,486 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==9.0.4 +aio-pika==9.5.5 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiocache==0.12.0 - # via -r requirements/_base.in +aiocache==0.12.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in aiodebug==2.3.0 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiofiles==23.1.0 +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiohttp==3.8.4 - # via -r requirements/_base.in -aiopg==1.4.0 - # via -r requirements/_base.in -aiormq==6.7.2 +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in + # aiodocker +aiormq==6.8.1 # via aio-pika -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp -alembic==1.9.4 +alembic==1.14.1 # via -r requirements/../../../packages/postgres-database/requirements/_base.in -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # aiohttp - # aiopg - # redis -attrs==21.4.0 + # fast-depends + # faststream +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.1.0 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt # aiohttp # jsonschema -charset-normalizer==3.0.1 - # via aiohttp -click==8.1.3 + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 # via typer -dnspython==2.3.0 +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -email-validator==1.3.1 +email-validator==2.2.0 # via pydantic -frozenlist==1.3.3 +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +frozenlist==1.5.0 # via # aiohttp # aiosignal -greenlet==2.0.2 +googleapis-common-protos==1.68.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 # via sqlalchemy -idna==3.4 +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +idna==3.10 # via + # anyio # email-validator + # requests # yarl -jsonschema==3.2.0 - # via -r requirements/../../../packages/models-library/requirements/_base.in -mako==1.2.4 +importlib-metadata==8.5.0 + # via opentelemetry-api +jsonschema==4.23.0 # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via mako -multidict==6.0.4 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 # via # aiohttp # yarl -packaging==23.0 - # via -r requirements/_base.in -pamqp==3.2.1 +opentelemetry-api==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asyncpg==0.51b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pint==0.20.1 +pint==0.24.4 # via -r requirements/_base.in -psycopg2-binary==2.9.5 +platformdirs==4.3.6 + # via pint +propcache==0.3.0 # via - # aiopg - # sqlalchemy -pydantic==1.10.2 + # aiohttp + # yarl +protobuf==5.29.3 # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in -pyinstrument==4.4.0 + # fast-depends + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via -r requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.19.3 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -pyyaml==5.4.1 +python-dotenv==1.0.1 + # via pydantic-settings +pyyaml==6.0.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in -redis==4.5.1 - # via -r requirements/../../../packages/service-library/requirements/_base.in -six==1.16.0 +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.23.1 # via # jsonschema - # python-dateutil -sqlalchemy==1.4.46 + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sqlalchemy==1.4.54 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/postgres-database/requirements/_base.in - # aiopg + # -r requirements/_base.in # alembic -tenacity==8.2.2 +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -tqdm==4.64.1 +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -typer==0.7.0 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.5.0 +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug + # alembic + # anyio + # faststream + # flexcache + # flexparser + # opentelemetry-sdk + # pint # pydantic -yarl==1.8.2 + # pydantic-core + # pydantic-extra-types + # typer +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/packages/simcore-sdk/requirements/_test.in b/packages/simcore-sdk/requirements/_test.in index 00a31fd11cd..5ee33c8a3cd 100644 --- a/packages/simcore-sdk/requirements/_test.in +++ b/packages/simcore-sdk/requirements/_test.in @@ -8,26 +8,28 @@ # --constraint _base.txt + aioboto3 aioresponses alembic click coverage -coveralls docker faker flaky -minio moto[server] pytest -pytest-aiohttp +pytest-asyncio pytest-cov pytest-icdiff pytest-instafail -pytest-lazy-fixture pytest-mock pytest-runner pytest-sugar pytest-xdist python-dotenv requests +sqlalchemy[mypy] +types-aiobotocore[s3] +types-aiofiles +types-tqdm diff --git a/packages/simcore-sdk/requirements/_test.txt b/packages/simcore-sdk/requirements/_test.txt index 83c30b6aeed..fda48d30bb2 100644 --- a/packages/simcore-sdk/requirements/_test.txt +++ b/packages/simcore-sdk/requirements/_test.txt @@ -1,337 +1,367 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aioboto3==9.6.0 +aioboto3==14.3.0 # via -r requirements/_test.in -aiobotocore==2.3.0 +aiobotocore==2.22.0 # via aioboto3 -aiohttp==3.8.4 +aiofiles==24.1.0 # via # -c requirements/_base.txt + # aioboto3 +aiohappyeyeballs==2.4.6 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # aiobotocore # aioresponses - # pytest-aiohttp -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore -aioresponses==0.7.4 +aioresponses==0.7.8 # via -r requirements/_test.in -aiosignal==1.3.1 +aiosignal==1.3.2 # via # -c requirements/_base.txt # aiohttp -alembic==1.9.4 - # via -r requirements/_test.in -async-timeout==4.0.2 +alembic==1.14.1 # via # -c requirements/_base.txt - # aiohttp -attrs==21.4.0 + # -r requirements/_test.in +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +attrs==25.1.0 # via # -c requirements/_base.txt # aiohttp - # jschema-to-python # jsonschema - # pytest - # sarif-om -aws-sam-translator==1.55.0 + # referencing +aws-sam-translator==1.95.0 # via cfn-lint -aws-xray-sdk==2.11.0 +aws-xray-sdk==2.14.0 # via moto -boto3==1.21.21 +blinker==1.9.0 + # via flask +boto3==1.37.3 # via - # -c requirements/./constraints.txt # aiobotocore # aws-sam-translator # moto -botocore==1.24.21 +botocore==1.37.3 # via # aiobotocore # aws-xray-sdk # boto3 # moto # s3transfer -certifi==2022.12.7 +botocore-stubs==1.37.4 + # via types-aiobotocore +certifi==2025.1.31 # via - # minio + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # requests -cffi==1.15.1 +cffi==1.17.1 # via cryptography -cfn-lint==0.72.6 +cfn-lint==1.27.0 # via moto -charset-normalizer==3.0.1 +charset-normalizer==3.4.1 # via # -c requirements/_base.txt - # aiohttp # requests -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # -r requirements/_test.in # flask -coverage==6.5.0 +coverage==7.6.12 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.1 +cryptography==44.0.2 # via # -c requirements/../../../requirements/constraints.txt + # joserfc # moto - # python-jose - # sshpubkeys -docker==6.0.1 +docker==7.1.0 # via # -r requirements/_test.in # moto -docopt==0.6.2 - # via coveralls -ecdsa==0.18.0 - # via - # moto - # python-jose - # sshpubkeys -exceptiongroup==1.1.0 - # via pytest -execnet==1.9.0 +execnet==2.1.1 # via pytest-xdist -faker==17.4.0 +faker==36.1.1 # via -r requirements/_test.in -flaky==3.7.0 +flaky==3.8.1 # via -r requirements/_test.in -flask==2.2.3 +flask==3.1.0 # via # flask-cors # moto -flask-cors==3.0.10 +flask-cors==5.0.1 # via moto -frozenlist==1.3.3 +frozenlist==1.5.0 # via # -c requirements/_base.txt # aiohttp # aiosignal -graphql-core==3.2.3 +graphql-core==3.2.6 # via moto -greenlet==2.0.2 +greenlet==3.1.1 # via # -c requirements/_base.txt # sqlalchemy -icdiff==2.0.6 +icdiff==2.0.7 # via pytest-icdiff -idna==3.4 +idna==3.10 # via # -c requirements/_base.txt # requests # yarl -importlib-metadata==6.0.0 - # via flask iniconfig==2.0.0 # via pytest -itsdangerous==2.1.2 +itsdangerous==2.2.0 # via flask -jinja2==3.1.2 +jinja2==3.1.5 # via # -c requirements/../../../requirements/constraints.txt # flask # moto jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -jschema-to-python==1.2.3 - # via cfn-lint -jsondiff==2.0.0 +joserfc==1.0.4 # via moto -jsonpatch==1.32 +jsonpatch==1.33 # via cfn-lint -jsonpickle==3.0.1 - # via jschema-to-python -jsonpointer==2.3 +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 # via jsonpatch -jsonschema==3.2.0 +jsonschema==4.23.0 # via # -c requirements/_base.txt # aws-sam-translator - # cfn-lint # openapi-schema-validator # openapi-spec-validator -junit-xml==1.9 - # via cfn-lint -mako==1.2.4 +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2024.10.1 # via # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +mako==1.3.9 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # alembic -markupsafe==2.1.2 +markupsafe==3.0.2 # via # -c requirements/_base.txt # jinja2 # mako # werkzeug -minio==7.0.4 +moto==5.1.4 # via -r requirements/_test.in -moto==4.1.3 - # via -r requirements/_test.in -multidict==6.0.4 +mpmath==1.3.0 + # via sympy +multidict==6.1.0 # via # -c requirements/_base.txt + # aiobotocore # aiohttp # yarl -networkx==2.8.8 +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +networkx==3.4.2 # via cfn-lint -openapi-schema-validator==0.2.3 +openapi-schema-validator==0.6.3 # via openapi-spec-validator -openapi-spec-validator==0.4.0 - # via - # -c requirements/./constraints.txt - # moto -packaging==23.0 +openapi-spec-validator==0.7.1 + # via moto +packaging==24.2 # via # -c requirements/_base.txt - # docker + # aioresponses # pytest # pytest-sugar -pbr==5.11.1 - # via - # jschema-to-python - # sarif-om -pluggy==1.0.0 +pathable==0.4.4 + # via jsonschema-path +pluggy==1.5.0 # via pytest +ply==3.11 + # via jsonpath-ng pprintpp==0.4.0 # via pytest-icdiff -pyasn1==0.4.8 +propcache==0.3.0 # via - # python-jose - # rsa -pycparser==2.21 - # via cffi -pyparsing==3.0.9 + # -c requirements/_base.txt + # aiohttp + # yarl +py-partiql-parser==0.6.1 # via moto -pyrsistent==0.19.3 +pycparser==2.22 + # via cffi +pydantic==2.10.6 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # jsonschema -pytest==7.2.1 + # aws-sam-translator +pydantic-core==2.27.2 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.2.1 + # via moto +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-icdiff # pytest-instafail - # pytest-lazy-fixture # pytest-mock # pytest-sugar # pytest-xdist -pytest-aiohttp==1.0.4 - # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-lazy-fixture==0.6.3 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -pytest-xdist==3.2.0 +pytest-xdist==3.6.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt + # aiobotocore # botocore - # faker # moto -python-dotenv==1.0.0 - # via -r requirements/_test.in -python-jose==3.3.0 - # via moto -pyyaml==5.4.1 +python-dotenv==1.0.1 # via # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # cfn-lint + # jsonschema-path # moto - # openapi-spec-validator -requests==2.28.2 + # responses +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 # via + # -c requirements/_base.txt # -r requirements/_test.in - # coveralls # docker + # jsonschema-path # moto # responses -responses==0.22.0 +responses==0.25.6 # via moto -rsa==4.9 +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.23.1 # via - # -c requirements/../../../requirements/constraints.txt - # python-jose -s3transfer==0.5.2 + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 # via boto3 -sarif-om==1.0.4 - # via cfn-lint -six==1.16.0 +setuptools==75.8.2 + # via moto +six==1.17.0 # via # -c requirements/_base.txt - # ecdsa - # flask-cors - # jsonschema - # junit-xml # python-dateutil -sqlalchemy==1.4.46 + # rfc3339-validator +sqlalchemy==1.4.54 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt + # -r requirements/_test.in # alembic -sshpubkeys==3.3.1 - # via moto -termcolor==2.2.0 +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +sympy==1.13.3 + # via cfn-lint +termcolor==2.5.0 # via pytest-sugar -toml==0.10.2 - # via responses -tomli==2.0.1 - # via - # coverage - # pytest -types-toml==0.10.8.5 - # via responses -typing-extensions==4.5.0 +types-aiobotocore==2.21.0 + # via -r requirements/_test.in +types-aiobotocore-s3==2.21.0 + # via types-aiobotocore +types-aiofiles==24.1.0.20241221 + # via -r requirements/_test.in +types-awscrt==0.23.10 + # via botocore-stubs +types-requests==2.32.0.20250301 + # via types-tqdm +types-tqdm==4.67.0.20250301 + # via -r requirements/_test.in +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # aioitertools -urllib3==1.26.14 + # alembic + # aws-sam-translator + # cfn-lint + # mypy + # pydantic + # pydantic-core + # sqlalchemy2-stubs + # types-aiobotocore + # types-aiobotocore-s3 +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # botocore # docker - # minio # requests # responses -websocket-client==1.5.1 - # via docker -werkzeug==2.2.3 + # types-requests +werkzeug==3.1.3 # via # flask + # flask-cors # moto -wrapt==1.15.0 +wrapt==1.17.2 # via + # -c requirements/_base.txt # aiobotocore # aws-xray-sdk -xmltodict==0.13.0 +xmltodict==0.14.2 # via moto -yarl==1.8.2 +yarl==1.18.3 # via # -c requirements/_base.txt # aiohttp -zipp==3.15.0 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/packages/simcore-sdk/requirements/_tools.txt b/packages/simcore-sdk/requirements/_tools.txt index 054c34e61e0..57c8ea03246 100644 --- a/packages/simcore-sdk/requirements/_tools.txt +++ b/packages/simcore-sdk/requirements/_tools.txt @@ -1,92 +1,90 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via + # -c requirements/_base.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt # pre-commit -tomli==2.0.1 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 # via # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.12.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.2 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/packages/simcore-sdk/requirements/ci.txt b/packages/simcore-sdk/requirements/ci.txt index 5b661d543e3..18aaf5e93a2 100644 --- a/packages/simcore-sdk/requirements/ci.txt +++ b/packages/simcore-sdk/requirements/ci.txt @@ -9,16 +9,18 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt +--requirement _tools.txt +--requirement _tools.txt # installs this repo's packages -../postgres-database -../pytest-simcore/ -../models-library/ -../settings-library/ - -# FIXME: these dependencies should be removed -../service-library/ +simcore-postgres-database @ ../postgres-database +pytest-simcore @ ../pytest-simcore +simcore-common-library @ ../common-library +simcore-models-library @ ../models-library +simcore-settings-library @ ../settings-library/ +simcore-service-library @ ../service-library/ # installs current package -. +simcore-sdk @ . diff --git a/packages/simcore-sdk/requirements/constraints.txt b/packages/simcore-sdk/requirements/constraints.txt index ef1bba96d1d..e69de29bb2d 100644 --- a/packages/simcore-sdk/requirements/constraints.txt +++ b/packages/simcore-sdk/requirements/constraints.txt @@ -1,19 +0,0 @@ -# There are incompatible versions in the resolved dependencies: -# boto3==1.21.21 (from -c requirements/./constraints.txt (line 3)) -# boto3<1.24.60,>=1.24.59 (from aiobotocore[boto3]==2.4.0->aioboto3==10.1.0->-r requirements/_test.in (line 13)) -# boto3>=1.9.201 (from moto[server]==4.0.1->-r requirements/_test.in (line 18)) -aioboto3<=9.6.0 -# There are incompatible versions in the resolved dependencies: -# botocore>=1.12.201 (from moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore>=1.11.3 (from aws-xray-sdk==2.10.0->moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore<1.28.0,>=1.27.95 (from boto3==1.24.95->moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore<1.24.22,>=1.24.21 (from aiobotocore[boto3]==2.3.0->aioboto3==9.6.0->-r requirements/_test.in (line 13)) -boto3<=1.21.21 - -# Not specified in openapi-core setup, but it breaks openapi-core==0.12.0 -# we have a very old version of openapi-core that is causing further troubles -# specifically when we want to have nullable objects -# It does not follow standard 3.0 correctly -# SEE how to specify nullable object in https://stackoverflow.com/questions/40920441/how-to-specify-a-property-can-be-null-or-a-reference-with-swagger - -openapi-spec-validator<0.5.0 diff --git a/packages/simcore-sdk/requirements/dev.txt b/packages/simcore-sdk/requirements/dev.txt index b67f43d8690..c7e7f45b7ed 100644 --- a/packages/simcore-sdk/requirements/dev.txt +++ b/packages/simcore-sdk/requirements/dev.txt @@ -15,6 +15,7 @@ --editable ../pytest-simcore/ --editable ../postgres-database +--editable ../common-library/ --editable ../models-library/ --editable ../settings-library/ diff --git a/packages/simcore-sdk/setup.cfg b/packages/simcore-sdk/setup.cfg index dab6f4c4e81..a547ef01d2a 100644 --- a/packages/simcore-sdk/setup.cfg +++ b/packages/simcore-sdk/setup.cfg @@ -15,3 +15,14 @@ test = pytest [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." + testit: "marks test to run during development" + heavy_load: "mark tests that require large amount of data" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/packages/simcore-sdk/setup.py b/packages/simcore-sdk/setup.py index d8d36cb39cf..23097f997e5 100644 --- a/packages/simcore-sdk/setup.py +++ b/packages/simcore-sdk/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -31,17 +30,18 @@ def read_reqs(reqs_path: Path) -> Set[str]: } ) -SETUP = dict( - name="simcore-sdk", - version=Path(CURRENT_DIR / "VERSION").read_text().strip(), - packages=find_packages(where="src"), - package_dir={"": "src"}, - python_requires=">=3.6", - install_requires=INSTALL_REQUIREMENTS, - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - test_suite="tests", -) +SETUP = { + "name": "simcore-sdk", + "version": Path(CURRENT_DIR / "VERSION").read_text().strip(), + "packages": find_packages(where="src"), + "package_data": {"": ["py.typed"]}, + "package_dir": {"": "src"}, + "python_requires": ">=3.10", + "install_requires": INSTALL_REQUIREMENTS, + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "test_suite": "tests", +} if __name__ == "__main__": diff --git a/packages/simcore-sdk/src/simcore_sdk/__init__.py b/packages/simcore-sdk/src/simcore_sdk/__init__.py index 9faf154bb7e..cf884cf3d1d 100644 --- a/packages/simcore-sdk/src/simcore_sdk/__init__.py +++ b/packages/simcore-sdk/src/simcore_sdk/__init__.py @@ -2,6 +2,6 @@ """ -import pkg_resources +from importlib.metadata import version -__version__: str = pkg_resources.get_distribution("simcore-sdk").version +__version__: str = version("simcore-sdk") diff --git a/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py b/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py index ec0a5dd6f9b..db552f193b7 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_data/data_manager.py @@ -1,219 +1,334 @@ import logging -from contextlib import AsyncExitStack from pathlib import Path -from shutil import move from tempfile import TemporaryDirectory -from typing import Optional, Union -from models_library.projects_nodes_io import StorageFileID -from pydantic import parse_obj_as -from servicelib.archiving_utils import archive_dir, unarchive_dir -from servicelib.logging_utils import log_catch, log_context +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, StorageFileID +from models_library.service_settings_labels import LegacyState +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.archiving_utils import unarchive_dir +from servicelib.logging_utils import log_context from servicelib.progress_bar import ProgressBarData +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings -from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION from ..node_ports_common import filemanager -from ..node_ports_common.filemanager import LogRedirectCB +from ..node_ports_common.constants import SIMCORE_LOCATION +from ..node_ports_common.dbmanager import DBManager +from ..node_ports_common.file_io_utils import LogRedirectCB -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -def _create_s3_object( - project_id: str, node_uuid: str, file_path: Union[Path, str] +def __create_s3_object_key( + project_id: ProjectID, node_uuid: NodeID, file_path: Path | str ) -> StorageFileID: file_name = file_path.name if isinstance(file_path, Path) else file_path - return parse_obj_as(StorageFileID, f"{project_id}/{node_uuid}/{file_name}") + return TypeAdapter(StorageFileID).validate_python( + f"{project_id}/{node_uuid}/{file_name}" + ) -async def _push_file( - user_id: int, - project_id: str, - node_uuid: str, - file_path: Path, - *, - rename_to: Optional[str], - io_log_redirect_cb: Optional[LogRedirectCB], - r_clone_settings: Optional[RCloneSettings] = None, - progress_bar: ProgressBarData, -) -> None: - store_id = SIMCORE_LOCATION - s3_object = _create_s3_object( - project_id, node_uuid, rename_to if rename_to else file_path - ) - log.info("uploading %s to S3 to %s...", file_path.name, s3_object) - await filemanager.upload_file( - user_id=user_id, - store_id=store_id, - store_name=None, - s3_object=s3_object, - file_to_upload=file_path, - r_clone_settings=r_clone_settings, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar, - ) - log.info("%s successfuly uploaded", file_path) +def __get_s3_name(path: Path, *, is_archive: bool) -> str: + return f"{path.stem}.zip" if is_archive else path.stem -async def push( - user_id: int, - project_id: str, - node_uuid: str, - file_or_folder: Path, +async def _push_directory( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + source_path: Path, *, - io_log_redirect_cb: Optional[LogRedirectCB], - rename_to: Optional[str] = None, - r_clone_settings: Optional[RCloneSettings] = None, - archive_exclude_patterns: Optional[set[str]] = None, + io_log_redirect_cb: LogRedirectCB, + r_clone_settings: RCloneSettings, + exclude_patterns: set[str] | None = None, progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None, ) -> None: - if file_or_folder.is_file(): - return await _push_file( - user_id, - project_id, - node_uuid, - file_or_folder, - rename_to=rename_to, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar, - ) - # we have a folder, so we create a compressed file - async with AsyncExitStack() as stack: - stack.enter_context(log_catch(log)) - stack.enter_context( - log_context(log, logging.INFO, "pushing %s", file_or_folder) - ) - tmp_dir_name = stack.enter_context( - TemporaryDirectory() # pylint: disable=consider-using-with - ) - sub_progress = await stack.enter_async_context( - progress_bar.sub_progress(steps=2) - ) - - # compress the files - archive_file_path = ( - Path(tmp_dir_name) / f"{rename_to or file_or_folder.stem}.zip" - ) - if io_log_redirect_cb: - await io_log_redirect_cb( - f"archiving {file_or_folder} into {archive_file_path}, please wait..." - ) - await archive_dir( - dir_to_compress=file_or_folder, - destination=archive_file_path, - compress=False, # disabling compression for faster speeds - store_relative_path=True, - exclude_patterns=archive_exclude_patterns, - progress_bar=sub_progress, - ) - if io_log_redirect_cb: - await io_log_redirect_cb( - f"archiving {file_or_folder} into {archive_file_path} completed." - ) - await _push_file( - user_id, - project_id, - node_uuid, - archive_file_path, - rename_to=None, + s3_object = __create_s3_object_key(project_id, node_uuid, source_path) + with log_context( + _logger, logging.INFO, f"uploading {source_path.name} to S3 to {s3_object}" + ): + await filemanager.upload_path( + user_id=user_id, + store_id=SIMCORE_LOCATION, + store_name=None, + s3_object=s3_object, + path_to_upload=source_path, r_clone_settings=r_clone_settings, io_log_redirect_cb=io_log_redirect_cb, - progress_bar=sub_progress, + progress_bar=progress_bar, + exclude_patterns=exclude_patterns, + aws_s3_cli_settings=aws_s3_cli_settings, ) -async def _pull_file( - user_id: int, - project_id: str, - node_uuid: str, - file_path: Path, +async def _pull_directory( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + destination_path: Path, *, - io_log_redirect_cb: Optional[LogRedirectCB], - save_to: Optional[Path] = None, + io_log_redirect_cb: LogRedirectCB, + r_clone_settings: RCloneSettings, progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None, + save_to: Path | None = None, ) -> None: - destination_path = file_path if save_to is None else save_to - s3_object = _create_s3_object(project_id, node_uuid, file_path) - log.info("pulling data from %s to %s...", s3_object, file_path) - downloaded_file = await filemanager.download_file_from_s3( - user_id=user_id, - store_id=SIMCORE_LOCATION, - store_name=None, - s3_object=s3_object, - local_folder=destination_path.parent, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar, - ) - if downloaded_file != destination_path: - destination_path.unlink(missing_ok=True) - move(f"{downloaded_file}", destination_path) - log.info("completed pull of %s.", destination_path) - - -def _get_archive_name(path: Path) -> str: - return f"{path.stem}.zip" + save_to_path = destination_path if save_to is None else save_to + s3_object = __create_s3_object_key(project_id, node_uuid, destination_path) + with log_context( + _logger, logging.INFO, f"pulling data from {s3_object} to {save_to_path}" + ): + await filemanager.download_path_from_s3( + user_id=user_id, + store_id=SIMCORE_LOCATION, + store_name=None, + s3_object=s3_object, + local_path=save_to_path, + io_log_redirect_cb=io_log_redirect_cb, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=aws_s3_cli_settings, + ) -async def pull( - user_id: int, - project_id: str, - node_uuid: str, - file_or_folder: Path, +async def _pull_legacy_archive( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + destination_path: Path, *, - io_log_redirect_cb: Optional[LogRedirectCB], - save_to: Optional[Path] = None, + io_log_redirect_cb: LogRedirectCB, progress_bar: ProgressBarData, + legacy_destination_path: Path | None = None, ) -> None: - if file_or_folder.is_file(): - return await _pull_file( - user_id, - project_id, - node_uuid, - file_or_folder, - save_to=save_to, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar, - ) - # we have a folder, so we need somewhere to extract it to - async with progress_bar.sub_progress(steps=2) as sub_prog: + # NOTE: the legacy way of storing states was as zip archives + archive_path = legacy_destination_path or destination_path + async with progress_bar.sub_progress( + steps=2, description=f"pulling {archive_path.name}" + ) as sub_prog: with TemporaryDirectory() as tmp_dir_name: - archive_file = Path(tmp_dir_name) / _get_archive_name(file_or_folder) - await _pull_file( - user_id, - project_id, - node_uuid, - archive_file, + archive_file = Path(tmp_dir_name) / __get_s3_name( + archive_path, is_archive=True + ) + + s3_object = __create_s3_object_key(project_id, node_uuid, archive_file) + _logger.info("pulling data from %s to %s...", s3_object, archive_file) + downloaded_file = await filemanager.download_path_from_s3( + user_id=user_id, + store_id=SIMCORE_LOCATION, + store_name=None, + s3_object=s3_object, + local_path=archive_file.parent, io_log_redirect_cb=io_log_redirect_cb, + r_clone_settings=None, progress_bar=sub_prog, + aws_s3_cli_settings=None, ) + _logger.info("completed pull of %s.", archive_path) - destination_folder = file_or_folder if save_to is None else save_to if io_log_redirect_cb: await io_log_redirect_cb( - f"unarchiving {archive_file} into {destination_folder}, please wait..." + f"unarchiving {downloaded_file} into {destination_path}, please wait..." ) await unarchive_dir( - archive_to_extract=archive_file, - destination_folder=destination_folder, + archive_to_extract=downloaded_file, + destination_folder=destination_path, progress_bar=sub_prog, log_cb=io_log_redirect_cb, ) if io_log_redirect_cb: await io_log_redirect_cb( - f"unarchiving {archive_file} into {destination_folder} completed." + f"unarchiving {downloaded_file} into {destination_path} completed." ) -async def exists( - user_id: int, project_id: str, node_uuid: str, file_path: Path +async def _state_metadata_entry_exists( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + path: Path, + *, + is_archive: bool, ) -> bool: """ :returns True if an entry is present inside the files_metadata else False """ - s3_object = _create_s3_object(project_id, node_uuid, _get_archive_name(file_path)) - log.debug("Checking if s3_object='%s' is present", s3_object) + s3_object = __create_s3_object_key( + project_id, node_uuid, __get_s3_name(path, is_archive=is_archive) + ) + _logger.debug("Checking if s3_object='%s' is present", s3_object) return await filemanager.entry_exists( user_id=user_id, store_id=SIMCORE_LOCATION, s3_object=s3_object, + is_directory=not is_archive, + ) + + +async def _delete_legacy_archive( + project_id: ProjectID, node_uuid: NodeID, path: Path +) -> None: + """removes the .zip state archive from storage""" + s3_object = __create_s3_object_key( + project_id, node_uuid, __get_s3_name(path, is_archive=True) + ) + _logger.debug("Deleting s3_object='%s' is archive", s3_object) + + # NOTE: if service is opened by a person which the users shared it with, + # they will not have the permission to delete the node + # Removing it via it's owner allows to always have access to the delete operation. + owner_id = await DBManager().get_project_owner_user_id(project_id) + await filemanager.delete_file( + user_id=owner_id, store_id=SIMCORE_LOCATION, s3_object=s3_object + ) + + +async def push( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + source_path: Path, + *, + io_log_redirect_cb: LogRedirectCB, + r_clone_settings: RCloneSettings, + exclude_patterns: set[str] | None = None, + progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None, + legacy_state: LegacyState | None, +) -> None: + """pushes and removes the legacy archive if present""" + + await _push_directory( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + source_path=source_path, + r_clone_settings=r_clone_settings, + exclude_patterns=exclude_patterns, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=progress_bar, + aws_s3_cli_settings=aws_s3_cli_settings, + ) + + archive_exists = await _state_metadata_entry_exists( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=source_path, + is_archive=True, + ) + if archive_exists: + with log_context(_logger, logging.INFO, "removing legacy archive"): + await _delete_legacy_archive( + project_id=project_id, + node_uuid=node_uuid, + path=source_path, + ) + + if legacy_state: + legacy_archive_exists = await _state_metadata_entry_exists( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=legacy_state.old_state_path, + is_archive=True, + ) + if legacy_archive_exists: + with log_context( + _logger, logging.INFO, f"removing legacy archive in {legacy_state}" + ): + await _delete_legacy_archive( + project_id=project_id, + node_uuid=node_uuid, + path=legacy_state.old_state_path, + ) + + +async def pull( + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + destination_path: Path, + *, + io_log_redirect_cb: LogRedirectCB, + r_clone_settings: RCloneSettings, + progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None, + legacy_state: LegacyState | None, +) -> None: + """restores the state folder""" + + if legacy_state and legacy_state.new_state_path == destination_path: + _logger.info( + "trying to restore from legacy_state=%s, destination_path=%s", + legacy_state, + destination_path, + ) + legacy_state_exists = await _state_metadata_entry_exists( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=legacy_state.old_state_path, + is_archive=True, + ) + _logger.info("legacy_state_exists=%s", legacy_state_exists) + if legacy_state_exists: + with log_context( + _logger, + logging.INFO, + f"restoring data from legacy archive in {legacy_state}", + ): + await _pull_legacy_archive( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + destination_path=legacy_state.new_state_path, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=progress_bar, + legacy_destination_path=legacy_state.old_state_path, + ) + return + + state_archive_exists = await _state_metadata_entry_exists( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=destination_path, + is_archive=True, + ) + if state_archive_exists: + with log_context(_logger, logging.INFO, "restoring data from legacy archive"): + await _pull_legacy_archive( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + destination_path=destination_path, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=progress_bar, + ) + return + + state_directory_exists = await _state_metadata_entry_exists( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=destination_path, + is_archive=False, ) + if state_directory_exists: + await _pull_directory( + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + destination_path=destination_path, + io_log_redirect_cb=io_log_redirect_cb, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=aws_s3_cli_settings, + ) + return + + _logger.debug("No content previously saved for '%s'", destination_path) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py new file mode 100644 index 00000000000..484380fded7 --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_filemanager_utils.py @@ -0,0 +1,153 @@ +import logging +from typing import cast + +from aiohttp import ClientError, ClientSession +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + FileUploadCompleteFutureResponse, + FileUploadCompleteResponse, + FileUploadCompleteState, + FileUploadCompletionBody, + UploadedPart, +) +from models_library.generics import Envelope +from models_library.projects_nodes_io import LocationID, LocationName +from models_library.users import UserID +from models_library.utils.fastapi_encoders import jsonable_encoder +from pydantic import AnyUrl, TypeAdapter +from settings_library.node_ports import NodePortsSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed +from yarl import URL + +from . import exceptions, storage_client +from .storage_endpoint import get_basic_auth, is_storage_secure + +_logger = logging.getLogger(__name__) + + +async def _get_location_id_from_location_name( + user_id: UserID, + store: LocationName, + session: ClientSession, +) -> LocationID: + resp = await storage_client.list_storage_locations(session=session, user_id=user_id) + for location in resp: + if location.name == store: + return cast(LocationID, location.id) # mypy wants it + # location id not found + raise exceptions.S3InvalidStore(store) + + +def _get_https_link_if_storage_secure(url: str) -> str: + # NOTE: links generated by storage are http only. + # WEBSERVER -> STORAGE (http requests) + # DY-SIDECAR (simcore-sdk) -> STORAGE (httpS requests) + # https://github.com/ITISFoundation/osparc-simcore/issues/5390 + parsed_url = URL(url) + if bool(is_storage_secure()): + return f"{parsed_url.with_scheme('https')}" + + return url + + +async def complete_upload( + session: ClientSession, + upload_completion_link: AnyUrl, + parts: list[UploadedPart], + *, + is_directory: bool, +) -> ETag | None: + """completes a potentially multipart upload in AWS + NOTE: it can take several minutes to finish, see [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) + it can take several minutes + :raises ValueError: _description_ + :raises exceptions.S3TransferError: _description_ + :rtype: ETag + """ + async with session.post( + _get_https_link_if_storage_secure(f"{upload_completion_link}"), + json=jsonable_encoder(FileUploadCompletionBody(parts=parts)), + auth=get_basic_auth(), + ) as resp: + resp.raise_for_status() + # now poll for state + file_upload_complete_response = TypeAdapter( + Envelope[FileUploadCompleteResponse] + ).validate_python(await resp.json()) + assert file_upload_complete_response.data # nosec + state_url = _get_https_link_if_storage_secure( + f"{file_upload_complete_response.data.links.state}" + ) + _logger.info("completed upload of %s", f"{len(parts)} parts, received {state_url}") + + async for attempt in AsyncRetrying( + reraise=True, + wait=wait_fixed(1), + stop=stop_after_delay( + NodePortsSettings.create_from_envs().NODE_PORTS_MULTIPART_UPLOAD_COMPLETION_TIMEOUT_S + ), + retry=retry_if_exception_type(ValueError), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + ): + with attempt: + async with session.post(state_url, auth=get_basic_auth()) as resp: + resp.raise_for_status() + future_enveloped = TypeAdapter( + Envelope[FileUploadCompleteFutureResponse] + ).validate_python(await resp.json()) + assert future_enveloped.data # nosec + if future_enveloped.data.state == FileUploadCompleteState.NOK: + msg = "upload not ready yet" + raise ValueError(msg) + if is_directory: + assert future_enveloped.data.e_tag is None # nosec + return None + + assert future_enveloped.data.e_tag # nosec + _logger.debug( + "multipart upload completed in %s, received %s", + attempt.retry_state.retry_object.statistics, + f"{future_enveloped.data.e_tag=}", + ) + return future_enveloped.data.e_tag + msg = f"Could not complete the upload using the upload_completion_link={upload_completion_link!r}" + raise exceptions.S3TransferError(msg) + + +async def resolve_location_id( + client_session: ClientSession, + user_id: UserID, + store_name: LocationName | None, + store_id: LocationID | None, +) -> LocationID: + if store_name is None and store_id is None: + msg = f"both {store_name=} and {store_id=} are None" + raise exceptions.NodeportsException(msg) + + if store_name is not None: + store_id = await _get_location_id_from_location_name( + user_id, store_name, client_session + ) + assert store_id is not None # nosec + return store_id + + +async def abort_upload( + session: ClientSession, abort_upload_link: AnyUrl, *, reraise_exceptions: bool +) -> None: + # abort the upload correctly, so it can revert back to last version + try: + async with session.post( + _get_https_link_if_storage_secure(f"{abort_upload_link}"), + auth=get_basic_auth(), + ) as resp: + resp.raise_for_status() + except ClientError: + _logger.warning("Error while aborting upload", exc_info=True) + if reraise_exceptions: + raise + _logger.warning("Upload aborted") diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_utils.py new file mode 100644 index 00000000000..56920f978f4 --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/_utils.py @@ -0,0 +1,7 @@ +from abc import abstractmethod + + +class BaseLogParser: + @abstractmethod + async def __call__(self, logs: str) -> None: + ... diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli.py new file mode 100644 index 00000000000..d64f0d90355 --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli.py @@ -0,0 +1,346 @@ +import asyncio +import contextlib +import logging +import os +import shlex +from asyncio.streams import StreamReader +from pathlib import Path + +from aiocache import cached # type: ignore[import-untyped] +from common_library.errors_classes import OsparcErrorMixin +from pydantic import AnyUrl, ByteSize +from servicelib.progress_bar import ProgressBarData +from servicelib.utils import logged_gather +from settings_library.aws_s3_cli import AwsS3CliSettings + +from ._utils import BaseLogParser +from .aws_s3_cli_utils import SyncAwsCliS3ProgressLogParser +from .r_clone_utils import CommandResultCaptureParser, DebugLogParser + +_logger = logging.getLogger(__name__) + + +_OSPARC_SYMLINK_EXTENSION = ".rclonelink" # named `rclonelink` to maintain backwards + + +class BaseAwsS3CliError(OsparcErrorMixin, RuntimeError): + ... + + +class AwsS3CliFailedError(BaseAwsS3CliError): + msg_template: str = ( + "Command {command} finished with exit code={returncode}:\n{command_output}" + ) + + +class AwsS3CliPathIsAFileError(BaseAwsS3CliError): + msg_template: str = ( + "Provided path '{local_directory_path}' is a file. Expects a directory!" + ) + + +class CRLFStreamReaderWrapper: + """ + A wrapper for asyncio streams that converts carriage return characters to newlines. + + When the AWS S3 CLI provides progress updates, it uses carriage return ('\r') characters + to overwrite the output. This wrapper converts '\r' to '\n' to standardize line endings, + allowing the stream to be read line by line using newlines as delimiters. + """ + + def __init__(self, reader): + self.reader = reader + self.buffer = bytearray() + + async def readline(self): + while True: + # Check if there's a newline character in the buffer + if b"\n" in self.buffer: + line, self.buffer = self.buffer.split(b"\n", 1) + return line + b"\n" + # Read a chunk of data from the stream + chunk = await self.reader.read(1024) + if not chunk: + # If no more data is available, return the buffer as the final line + line = self.buffer + self.buffer = bytearray() + return line + # Replace \r with \n in the chunk + chunk = chunk.replace(b"\r", b"\n") + self.buffer.extend(chunk) + + +async def _read_stream( + stream: StreamReader, aws_s3_cli_log_parsers: list[BaseLogParser] +): + reader_wrapper = CRLFStreamReaderWrapper(stream) + while True: + line: bytes = await reader_wrapper.readline() + if line: + decoded_line = line.decode() + await logged_gather( + *[parser(decoded_line) for parser in aws_s3_cli_log_parsers] + ) + else: + break + + +@cached() +async def is_aws_s3_cli_available(aws_s3_cli_settings: AwsS3CliSettings | None) -> bool: + """returns: True if the `aws` cli is installed and a configuration is provided""" + if aws_s3_cli_settings is None: + return False + try: + await _async_aws_cli_command( + "aws", "--version", aws_s3_cli_settings=aws_s3_cli_settings + ) + return True + except AwsS3CliFailedError: + return False + + +async def _async_aws_cli_command( + *cmd: str, + aws_s3_cli_settings: AwsS3CliSettings, + aws_cli_s3_log_parsers: list[BaseLogParser] | None = None, +) -> str: + str_cmd = " ".join(cmd) + proc = await asyncio.create_subprocess_shell( + str_cmd, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + env={ + "AWS_ACCESS_KEY_ID": aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ACCESS_KEY, + "AWS_SECRET_ACCESS_KEY": aws_s3_cli_settings.AWS_S3_CLI_S3.S3_SECRET_KEY, + "AWS_REGION": aws_s3_cli_settings.AWS_S3_CLI_S3.S3_REGION, + }, + ) + + command_result_parser = CommandResultCaptureParser() + aws_cli_s3_log_parsers = ( + [*aws_cli_s3_log_parsers, command_result_parser] + if aws_cli_s3_log_parsers + else [command_result_parser] + ) + + assert proc.stdout # nosec + await asyncio.wait( + [asyncio.create_task(_read_stream(proc.stdout, [*aws_cli_s3_log_parsers]))] + ) + + _stdout, _stderr = await proc.communicate() + + command_output = command_result_parser.get_output() + if proc.returncode != 0: + raise AwsS3CliFailedError( + command=str_cmd, + command_output=command_output, + returncode=proc.returncode, + ) + + _logger.debug("'%s' result:\n%s", str_cmd, command_output) + return command_output + + +def _get_exclude_filters(exclude_patterns: set[str] | None) -> list[str]: + if exclude_patterns is None: + return [] + + exclude_options: list[str] = [] + for entry in exclude_patterns: + exclude_options.append("--exclude") + exclude_options.append(entry.replace("*", "**")) + + return exclude_options + + +async def _get_s3_folder_size( + aws_s3_cli_settings: AwsS3CliSettings, + *, + s3_path: str, +) -> ByteSize: + cli_command = [ + "aws", + "s3", + "ls", + "--summarize", + "--recursive", + s3_path, + "| grep 'Total Size' | awk '{print $3}'", + ] + + if aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ENDPOINT: + cli_command.insert( + 1, f"--endpoint-url {aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ENDPOINT}" + ) + + result = await _async_aws_cli_command( + *cli_command, aws_s3_cli_settings=aws_s3_cli_settings + ) + return ByteSize(result.strip()) + + +def _get_file_size_and_manage_symlink(path: Path) -> ByteSize: + if path.is_symlink(): + # Convert symlink to a .rclonelink file that can be stored in the S3 + target_path = f"{path.readlink()}" + _name = path.name + _OSPARC_SYMLINK_EXTENSION + + textfile_path = path.parent / _name + textfile_path.write_text(target_path) + return ByteSize(textfile_path.stat().st_size) + return ByteSize(path.stat().st_size) + + +async def _get_local_folder_size_and_manage_symlink(local_path: Path) -> ByteSize: + total_size = 0 + for dirpath, _, filenames in os.walk(local_path): + for filename in filenames: + file_path = Path(dirpath) / filename + total_size += _get_file_size_and_manage_symlink(Path(file_path)) + return ByteSize(total_size) + + +async def _sync_sources( + aws_s3_cli_settings: AwsS3CliSettings, + progress_bar: ProgressBarData, + *, + source: str, + destination: str, + local_dir: Path, + exclude_patterns: set[str] | None, + debug_logs: bool, +) -> None: + + if source.startswith("s3://"): + folder_size: ByteSize = await _get_s3_folder_size( + aws_s3_cli_settings, s3_path=shlex.quote(source) + ) + else: + folder_size = await _get_local_folder_size_and_manage_symlink(Path(source)) + + cli_command = [ + "aws", + "s3", + "sync", + "--delete", + shlex.quote(source), + shlex.quote(destination), + # filter options + *_get_exclude_filters(exclude_patterns), + "--no-follow-symlinks", + ] + + if aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ENDPOINT: + cli_command.insert( + 1, f"--endpoint-url {aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ENDPOINT}" + ) + + async with progress_bar.sub_progress( + steps=folder_size, + progress_unit="Byte", + description=f"transferring {local_dir.name}", + ) as sub_progress: + aws_s3_cli_log_parsers: list[BaseLogParser] = ( + [DebugLogParser()] if debug_logs else [] + ) + aws_s3_cli_log_parsers.append(SyncAwsCliS3ProgressLogParser(sub_progress)) + + await _async_aws_cli_command( + *cli_command, + aws_s3_cli_settings=aws_s3_cli_settings, + aws_cli_s3_log_parsers=aws_s3_cli_log_parsers, + ) + + +def _raise_if_directory_is_file(local_directory_path: Path) -> None: + if local_directory_path.exists() and local_directory_path.is_file(): + raise AwsS3CliPathIsAFileError(local_directory_path=local_directory_path) + + +@contextlib.asynccontextmanager +async def remove_local_osparclinks(local_directory_path): + try: + yield + finally: + # Remove the temporary created .rclonelink files generated by `_get_local_folder_size_and_manage_symlink` + for textfile_path in local_directory_path.rglob( + f"*{_OSPARC_SYMLINK_EXTENSION}" + ): + textfile_path.unlink() + + +@contextlib.asynccontextmanager +async def convert_osparclinks_to_original_symlinks(local_directory_path): + try: + yield + finally: + # Convert .rclonelink files to real symlink files after they were downloaded from S3 + for textfile_path in local_directory_path.rglob( + f"*{_OSPARC_SYMLINK_EXTENSION}" + ): + symlink_path = textfile_path.with_suffix("") + target_path = textfile_path.read_text().strip() + os.symlink(target_path, symlink_path) + textfile_path.unlink() + + +async def sync_local_to_s3( + aws_s3_cli_settings: AwsS3CliSettings, + progress_bar: ProgressBarData, + *, + local_directory_path: Path, + upload_s3_link: AnyUrl, + exclude_patterns: set[str] | None = None, + debug_logs: bool = False, +) -> None: + """transfer the contents of a local directory to an s3 path + + :raises e: AwsS3CliFailedError + """ + _raise_if_directory_is_file(local_directory_path) + + upload_s3_path = upload_s3_link + _logger.debug(" %s; %s", f"{upload_s3_link=}", f"{upload_s3_path=}") + + async with remove_local_osparclinks(local_directory_path): + await _sync_sources( + aws_s3_cli_settings, + progress_bar, + source=f"{local_directory_path}", + destination=f"{upload_s3_path}", + local_dir=local_directory_path, + exclude_patterns=exclude_patterns, + debug_logs=debug_logs, + ) + + +async def sync_s3_to_local( + aws_s3_cli_settings: AwsS3CliSettings, + progress_bar: ProgressBarData, + *, + local_directory_path: Path, + download_s3_link: AnyUrl, + exclude_patterns: set[str] | None = None, + debug_logs: bool = False, +) -> None: + """transfer the contents of a path in s3 to a local directory + + :raises e: AwsS3CliFailedError + """ + _raise_if_directory_is_file(local_directory_path) + + download_s3_path = download_s3_link + _logger.debug(" %s; %s", f"{download_s3_link=}", f"{download_s3_path=}") + + async with convert_osparclinks_to_original_symlinks(local_directory_path): + await _sync_sources( + aws_s3_cli_settings, + progress_bar, + source=f"{download_s3_path}", + destination=f"{local_directory_path}", + local_dir=local_directory_path, + exclude_patterns=exclude_patterns, + debug_logs=debug_logs, + ) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli_utils.py new file mode 100644 index 00000000000..d38c1c59d2b --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/aws_s3_cli_utils.py @@ -0,0 +1,41 @@ +import logging +import re + +from pydantic import ByteSize, TypeAdapter +from servicelib.logging_utils import log_catch +from servicelib.progress_bar import ProgressBarData + +from ._utils import BaseLogParser + +_logger = logging.getLogger(__name__) + + +def _parse_size(log_string): + match = re.search(r"^\w+ (?P[^\/]+)", log_string) + if match: + # NOTE: ByteSize does not know what `Bytes` or `Byte` are. + # It only knows about `b` and omitting the word bytes if they are just bytes. + return match.group("size").replace("Bytes", "").replace("Byte", "") + return None + + +class SyncAwsCliS3ProgressLogParser(BaseLogParser): + """ + log processor that onlyyields progress updates detected in the logs. + + + This command: + aws --endpoint-url ENDPOINT_URL s3 sync s3://BUCKET/S3_KEY . --delete --no-follow-symlinks + generates this log lines: + Completed 2.9 GiB/4.9 GiB (102.8 MiB/s) with 1 file(s) remaining + """ + + def __init__(self, progress_bar: ProgressBarData) -> None: + self.progress_bar = progress_bar + + async def __call__(self, logs: str) -> None: + _logger.debug("received logs: %s", logs) + with log_catch(_logger, reraise=False): + if _size := _parse_size(logs): + _bytes = TypeAdapter(ByteSize).validate_python(_size) + await self.progress_bar.set_(_bytes) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/client_session_manager.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/client_session_manager.py index a8aacb4afa8..b252fce6028 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/client_session_manager.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/client_session_manager.py @@ -12,7 +12,7 @@ class ClientSessionContextManager: # This package has no app so session is passed as optional arguments # See https://github.com/ITISFoundation/osparc-simcore/issues/1098 # - def __init__(self, session=None): + def __init__(self, session=None) -> None: # We are interested in fast connections, if a connection is established # there is no timeout for file download operations @@ -24,14 +24,14 @@ def __init__(self, session=None): total=None, connect=client_request_settings.HTTP_CLIENT_REQUEST_AIOHTTP_CONNECT_TIMEOUT, sock_connect=client_request_settings.HTTP_CLIENT_REQUEST_AIOHTTP_SOCK_CONNECT_TIMEOUT, - ), # type: ignore + ), ) self.is_owned = self.active_session is not session - async def __aenter__(self): + async def __aenter__(self) -> ClientSession: return self.active_session - async def __aexit__(self, exc_type, exc, tb): + async def __aexit__(self, exc_type, exc, tb) -> None: if self.is_owned: warnings.warn( "Optional session is not recommended, pass instead controled session (e.g. from app[APP_CLIENT_SESSION_KEY])", diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/constants.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/constants.py index 566aa68074f..f6803ffbeb4 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/constants.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/constants.py @@ -1,6 +1,6 @@ from typing import Final -from models_library.api_schemas_storage import LocationID +from models_library.projects_nodes_io import LocationID CHUNK_SIZE: Final[int] = 16 * 1024 * 1024 MINUTE: Final[int] = 60 diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/data_items_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/data_items_utils.py index 8f08f38db0e..2bad7d6fdf7 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/data_items_utils.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/data_items_utils.py @@ -1,17 +1,19 @@ import tempfile -import threading from pathlib import Path -from typing import Optional +from typing import Final +from uuid import uuid4 from models_library.projects_nodes_io import SimcoreS3FileID +_TMP_SIMCOREFILES: Final[Path] = Path(tempfile.gettempdir()) / "simcorefiles" + def create_simcore_file_id( file_path: Path, project_id: str, node_id: str, *, - file_base_path: Optional[Path] = None, + file_base_path: Path | None = None, ) -> SimcoreS3FileID: s3_file_name = file_path.name if file_base_path: @@ -20,12 +22,9 @@ def create_simcore_file_id( return SimcoreS3FileID(f"{clean_path}") -_INTERNAL_DIR = Path(tempfile.gettempdir(), "simcorefiles") - - -def create_folder_path(key: str) -> Path: - return Path(_INTERNAL_DIR, f"{threading.get_ident()}", key) +def get_folder_path(key: str) -> Path: + return _TMP_SIMCOREFILES / f"{uuid4()}" / key -def create_file_path(key: str, name: str) -> Path: - return Path(_INTERNAL_DIR, f"{threading.get_ident()}", key, name) +def get_file_path(key: str, name: str) -> Path: + return _TMP_SIMCOREFILES / f"{uuid4()}" / key / name diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py index c1533636f5a..2fc41388f52 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/dbmanager.py @@ -1,103 +1,80 @@ -import json import logging -import os -import socket -from typing import Optional - -import aiopg.sa -import tenacity -from aiopg.sa.engine import Engine -from aiopg.sa.result import RowProxy -from servicelib.common_aiopg_utils import DataSourceName, create_pg_engine -from servicelib.retry_policies import PostgresRetryPolicyUponInitialization + +import sqlalchemy as sa +from common_library.json_serialization import json_dumps, json_loads + +from models_library.projects import ProjectID +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.db_asyncpg_utils import create_async_engine_and_database_ready +from settings_library.node_ports import NodePortsSettings from simcore_postgres_database.models.comp_tasks import comp_tasks -from simcore_postgres_database.utils_aiopg import ( - close_engine, - raise_if_migration_not_ready, -) -from sqlalchemy import and_ +from simcore_postgres_database.models.projects import projects +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine -from .exceptions import NodeNotFound -from .settings import NodePortsSettings +from .exceptions import NodeNotFound, ProjectNotFoundError -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) async def _get_node_from_db( - project_id: str, node_uuid: str, connection: aiopg.sa.SAConnection -) -> RowProxy: - log.debug( + project_id: str, node_uuid: str, connection: AsyncConnection +) -> sa.engine.Row: + _logger.debug( "Reading from comp_tasks table for node uuid %s, project %s", node_uuid, project_id, ) + rows_count = await connection.scalar( + sa.select(sa.func.count()) + .select_from(comp_tasks) + .where( + (comp_tasks.c.node_id == node_uuid) + & (comp_tasks.c.project_id == project_id), + ) + ) + if rows_count > 1: + _logger.error("the node id %s is not unique", node_uuid) result = await connection.execute( - comp_tasks.select( - and_( - comp_tasks.c.node_id == node_uuid, - comp_tasks.c.project_id == project_id, - ) + sa.select(comp_tasks).where( + (comp_tasks.c.node_id == node_uuid) + & (comp_tasks.c.project_id == project_id) ) ) - if result.rowcount > 1: - log.error("the node id %s is not unique", node_uuid) - node: Optional[RowProxy] = await result.first() + node = result.one_or_none() if not node: - log.error("the node id %s was not found", node_uuid) + _logger.error("the node id %s was not found", node_uuid) raise NodeNotFound(node_uuid) return node -@tenacity.retry(**PostgresRetryPolicyUponInitialization().kwargs) -async def _ensure_postgres_ready(dsn: DataSourceName) -> Engine: - engine = await create_pg_engine(dsn, minsize=1, maxsize=4) - try: - await raise_if_migration_not_ready(engine) - except Exception: - await close_engine(engine) - raise - return engine - - class DBContextManager: - def __init__(self, db_engine: Optional[aiopg.sa.Engine] = None): - self._db_engine: Optional[aiopg.sa.Engine] = db_engine + def __init__(self, db_engine: AsyncEngine | None = None) -> None: + self._db_engine: AsyncEngine | None = db_engine self._db_engine_created: bool = False @staticmethod - async def _create_db_engine() -> aiopg.sa.Engine: + async def _create_db_engine() -> AsyncEngine: settings = NodePortsSettings.create_from_envs() - dsn = DataSourceName( - application_name=f"{__name__}_{socket.gethostname()}_{os.getpid()}", - database=settings.POSTGRES_SETTINGS.POSTGRES_DB, - user=settings.POSTGRES_SETTINGS.POSTGRES_USER, - password=settings.POSTGRES_SETTINGS.POSTGRES_PASSWORD.get_secret_value(), - host=settings.POSTGRES_SETTINGS.POSTGRES_HOST, - port=settings.POSTGRES_SETTINGS.POSTGRES_PORT, - ) # type: ignore - - engine = await _ensure_postgres_ready(dsn) + engine = await create_async_engine_and_database_ready( + settings.POSTGRES_SETTINGS + ) + assert isinstance(engine, AsyncEngine) # nosec return engine - async def __aenter__(self): + async def __aenter__(self) -> AsyncEngine: if not self._db_engine: self._db_engine = await self._create_db_engine() self._db_engine_created = True return self._db_engine - async def __aexit__(self, exc_type, exc, tb): + async def __aexit__(self, exc_type, exc, tb) -> None: if self._db_engine and self._db_engine_created: - await close_engine(self._db_engine) - log.debug( - "engine '%s' after shutdown: closed=%s, size=%d", - self._db_engine.dsn, - self._db_engine.closed, - self._db_engine.size, - ) + await self._db_engine.dispose() class DBManager: - def __init__(self, db_engine: Optional[aiopg.sa.Engine] = None): + def __init__(self, db_engine: AsyncEngine | None = None): self._db_engine = db_engine async def write_ports_configuration( @@ -107,48 +84,60 @@ async def write_ports_configuration( f"Writing port configuration to database for " f"project={project_id} node={node_uuid}: {json_configuration}" ) - log.debug(message) - - node_configuration = json.loads(json_configuration) - async with DBContextManager(self._db_engine) as engine: - async with engine.acquire() as connection: - # update the necessary parts - await connection.execute( - # FIXME: E1120:No value for argument 'dml' in method call - # pylint: disable=E1120 - comp_tasks.update() - .where( - and_( - comp_tasks.c.node_id == node_uuid, - comp_tasks.c.project_id == project_id, - ) - ) - .values( - schema=node_configuration["schema"], - inputs=node_configuration["inputs"], - outputs=node_configuration["outputs"], - run_hash=node_configuration.get("run_hash"), - ) + _logger.debug(message) + + node_configuration = json_loads(json_configuration) + async with ( + DBContextManager(self._db_engine) as engine, + engine.begin() as connection, + ): + # update the necessary parts + await connection.execute( + comp_tasks.update() + .where( + (comp_tasks.c.node_id == node_uuid) + & (comp_tasks.c.project_id == project_id), ) + .values( + schema=node_configuration["schema"], + inputs=node_configuration["inputs"], + outputs=node_configuration["outputs"], + run_hash=node_configuration.get("run_hash"), + ) + ) async def get_ports_configuration_from_node_uuid( self, project_id: str, node_uuid: str ) -> str: - log.debug( + _logger.debug( "Getting ports configuration of node %s from comp_tasks table", node_uuid ) - async with DBContextManager(self._db_engine) as engine: - async with engine.acquire() as connection: - node: RowProxy = await _get_node_from_db( - project_id, node_uuid, connection - ) - node_json_config = json.dumps( - { - "schema": node.schema, - "inputs": node.inputs, - "outputs": node.outputs, - "run_hash": node.run_hash, - } - ) - log.debug("Found and converted to json") + async with ( + DBContextManager(self._db_engine) as engine, + engine.connect() as connection, + ): + node = await _get_node_from_db(project_id, node_uuid, connection) + node_json_config = json_dumps( + { + "schema": node.schema, + "inputs": node.inputs, + "outputs": node.outputs, + "run_hash": node.run_hash, + } + ) + _logger.debug("Found and converted to json") return node_json_config + + async def get_project_owner_user_id(self, project_id: ProjectID) -> UserID: + async with ( + DBContextManager(self._db_engine) as engine, + engine.connect() as connection, + ): + prj_owner = await connection.scalar( + sa.select(projects.c.prj_owner).where( + projects.c.uuid == f"{project_id}" + ) + ) + if prj_owner is None: + raise ProjectNotFoundError(project_id) + return TypeAdapter(UserID).validate_python(prj_owner) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/exceptions.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/exceptions.py index 763593486b9..b0381357aca 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/exceptions.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/exceptions.py @@ -5,13 +5,11 @@ # # -from typing import Optional - class NodeportsException(Exception): """Basic exception for errors raised in nodeports""" - def __init__(self, msg: Optional[str] = None): + def __init__(self, msg: str | None = None): super().__init__(msg or "An error occured in simcore") @@ -26,7 +24,7 @@ def __init__(self, obj): class UnboundPortError(NodeportsException, IndexError): """Accessed port is not configured""" - def __init__(self, port_index, msg: Optional[str] = None): + def __init__(self, port_index, msg: str | None = None): super().__init__(f"No port bound at index {port_index}") self.port_index = port_index @@ -34,7 +32,7 @@ def __init__(self, port_index, msg: Optional[str] = None): class InvalidKeyError(NodeportsException): """Accessed key does not exist""" - def __init__(self, item_key: str, msg: Optional[str] = None): + def __init__(self, item_key: str, msg: str | None = None): super().__init__(f"No port bound with key {item_key}") self.item_key = item_key @@ -42,7 +40,7 @@ def __init__(self, item_key: str, msg: Optional[str] = None): class InvalidItemTypeError(NodeportsException): """Item type incorrect""" - def __init__(self, item_type: str, item_value: str, msg: Optional[str] = None): + def __init__(self, item_type: str, item_value: str, msg: str | None = None): super().__init__( msg or f"Invalid item type, value [{item_value}] does not qualify as type [{item_type}]" @@ -54,7 +52,7 @@ def __init__(self, item_type: str, item_value: str, msg: Optional[str] = None): class InvalidProtocolError(NodeportsException): """Invalid protocol used""" - def __init__(self, dct, msg: Optional[str] = None): + def __init__(self, dct, msg: str | None = None): super().__init__(f"Invalid protocol used: {dct} [{msg}]") self.dct = dct @@ -70,10 +68,22 @@ class StorageServerIssue(NodeportsException): class S3TransferError(NodeportsException): """S3 transfer error""" - def __init__(self, msg: Optional[str] = None): + def __init__(self, msg: str | None = None): super().__init__(msg or "Error while transferring to/from S3 storage") +class AwsS3BadRequestRequestTimeoutError(NodeportsException): + """Sometimes the request to S3 can time out and a 400 with a `RequestTimeout` + reason in the body will be received. For details regarding the error + see https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + + In this case the entire multipart upload needs to be abandoned and retried. + """ + + def __init__(self, body: str): + super().__init__(f"S3 replied with 400 RequestTimeout: {body=}") + + class S3InvalidPathError(NodeportsException): """S3 transfer error""" @@ -126,6 +136,14 @@ def __init__(self, node_uuid): super().__init__(f"the node id {node_uuid} was not found") +class ProjectNotFoundError(NodeportsException): + """The given node_uuid was not found""" + + def __init__(self, project_id): + self.project_id = project_id + super().__init__(f"the {project_id=} was not found") + + class SymlinkToSymlinkIsNotUploadableException(NodeportsException): """Not possible to upload a symlink to a symlink""" diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py index 9edb5edba4b..51aa3bae3c1 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/file_io_utils.py @@ -1,18 +1,10 @@ import asyncio -import json import logging +from collections.abc import AsyncGenerator, Coroutine from contextlib import AsyncExitStack from dataclasses import dataclass from pathlib import Path -from typing import ( - IO, - AsyncGenerator, - Optional, - Protocol, - Union, - cast, - runtime_checkable, -) +from typing import IO, Any, Final, Protocol, runtime_checkable import aiofiles from aiohttp import ( @@ -23,16 +15,22 @@ ClientResponseError, ClientSession, RequestInfo, - web, ) -from aiohttp.typedefs import LooseHeaders -from models_library.api_schemas_storage import ETag, FileUploadSchema, UploadedPart -from pydantic import AnyUrl +from common_library.json_serialization import json_loads +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + FileUploadSchema, + UploadedPart, +) +from models_library.basic_types import SHA256Str +from multidict import MultiMapping +from pydantic import AnyUrl, NonNegativeInt +from servicelib.aiohttp import status from servicelib.logging_utils import log_catch from servicelib.progress_bar import ProgressBarData -from servicelib.utils import logged_gather -from tenacity._asyncio import AsyncRetrying +from servicelib.utils import logged_gather, partition_gen from tenacity.after import after_log +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.retry import retry_if_exception, retry_if_exception_type from tenacity.stop import stop_after_attempt @@ -44,31 +42,37 @@ from . import exceptions from .constants import CHUNK_SIZE +_logger = logging.getLogger(__name__) + +_CONCURRENT_MULTIPART_UPLOADS_COUNT: Final[NonNegativeInt] = 10 +_VALID_HTTP_STATUS_CODES: Final[NonNegativeInt] = 299 + @dataclass(frozen=True) class UploadableFileObject: file_object: IO file_name: str file_size: int + sha256_checksum: SHA256Str | None = None -class ExtendedClientResponseError(ClientResponseError): +class _ExtendedClientResponseError(ClientResponseError): def __init__( self, request_info: RequestInfo, history: tuple[ClientResponse, ...], body: str, *, - code: Optional[int] = None, - status: Optional[int] = None, + code: int | None = None, + status_code: int | None = None, message: str = "", - headers: Optional[LooseHeaders] = None, + headers: MultiMapping[str] | None = None, ): super().__init__( request_info, history, code=code, - status=status, + status=status_code, message=message, headers=headers, ) @@ -89,13 +93,13 @@ def __str__(self) -> str: async def _raise_for_status(response: ClientResponse) -> None: - if response.status >= 400: + if response.status >= status.HTTP_400_BAD_REQUEST: body = await response.text() - raise ExtendedClientResponseError( + raise _ExtendedClientResponseError( response.request_info, response.history, body, - status=response.status, + status_code=response.status, message=response.reason or "", headers=response.headers, ) @@ -139,34 +143,32 @@ class ProgressData: @runtime_checkable class LogRedirectCB(Protocol): - async def __call__(self, logs: str) -> None: - ... + async def __call__(self, log: str) -> None: ... async def _file_chunk_writer( file: Path, response: ClientResponse, pbar: tqdm, - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, progress_bar: ProgressBarData, ): async with aiofiles.open(file, "wb") as file_pointer: while chunk := await response.content.read(CHUNK_SIZE): await file_pointer.write(chunk) if io_log_redirect_cb and pbar.update(len(chunk)): - with log_catch(log, reraise=False): + with log_catch(_logger, reraise=False): await io_log_redirect_cb(f"{pbar}") await progress_bar.update(len(chunk)) -log = logging.getLogger(__name__) -_TQDM_FILE_OPTIONS = dict( - unit="byte", - unit_scale=True, - unit_divisor=1024, - colour="yellow", - miniters=1, -) +_TQDM_FILE_OPTIONS: dict[str, Any] = { + "unit": "byte", + "unit_scale": True, + "unit_divisor": 1024, + "colour": "yellow", + "miniters": 1, +} async def download_link_to_file( @@ -175,24 +177,24 @@ async def download_link_to_file( file_path: Path, *, num_retries: int, - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, progress_bar: ProgressBarData, ): - log.debug("Downloading from %s to %s", url, file_path) + _logger.debug("Downloading from %s to %s", url, file_path) async for attempt in AsyncRetrying( reraise=True, wait=wait_exponential(min=1, max=10), stop=stop_after_attempt(num_retries), retry=retry_if_exception_type(ClientConnectionError), - before_sleep=before_sleep_log(log, logging.WARNING, exc_info=True), - after=after_log(log, log_level=logging.ERROR), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), + after=after_log(_logger, log_level=logging.ERROR), ): with attempt: async with AsyncExitStack() as stack: response = await stack.enter_async_context(session.get(url)) - if response.status == 404: + if response.status == status.HTTP_404_NOT_FOUND: raise exceptions.InvalidDownloadLinkError(url) - if response.status > 299: + if response.status > _VALID_HTTP_STATUS_CODES: raise exceptions.TransferError(url) file_path.parent.mkdir(parents=True, exist_ok=True) # SEE https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Length @@ -204,16 +206,21 @@ async def download_link_to_file( total=file_size, **( _TQDM_FILE_OPTIONS - | dict( - miniters=_compute_tqdm_miniters(file_size) - if file_size - else 1 - ) + | { + "miniters": ( + _compute_tqdm_miniters(file_size) + if file_size + else 1 + ) + } ), ) ) sub_progress = await stack.enter_async_context( - progress_bar.sub_progress(steps=file_size or 1) + progress_bar.sub_progress( + steps=file_size or 1, + description=f"downloading {file_path.name}", + ) ) await _file_chunk_writer( @@ -223,7 +230,7 @@ async def download_link_to_file( io_log_redirect_cb, sub_progress, ) - log.debug("Download complete") + _logger.debug("Download complete") except ClientPayloadError as exc: raise exceptions.TransferError(url) from exc @@ -231,36 +238,50 @@ async def download_link_to_file( def _check_for_aws_http_errors(exc: BaseException) -> bool: """returns: True if it should retry when http exception is detected""" - if not isinstance(exc, ExtendedClientResponseError): + if not isinstance(exc, _ExtendedClientResponseError): return False - client_error = cast(ExtendedClientResponseError, exc) - # Sometimes AWS responds with a 500 or 503 which shall be retried, # form more information see: # https://aws.amazon.com/premiumsupport/knowledge-center/http-5xx-errors-s3/ - if client_error.status in ( - web.HTTPInternalServerError.status_code, - web.HTTPServiceUnavailable.status_code, - ): - return True - - # Sometimes the request to S3 can time out and a 400 with a `RequestTimeout` - # reason in the body will be received. This also needs retrying, - # for more information see: - # see https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - if ( - client_error.status == web.HTTPBadRequest.status_code - and "RequestTimeout" in client_error.body + if exc.status in ( + status.HTTP_500_INTERNAL_SERVER_ERROR, + status.HTTP_503_SERVICE_UNAVAILABLE, ): return True return False +async def _session_put( + session: ClientSession, + file_part_size: int, + upload_url: str, + pbar: tqdm, + io_log_redirect_cb: LogRedirectCB | None, + progress_bar: ProgressBarData, + file_uploader: Any | None, +) -> str: + async with session.put( + upload_url, data=file_uploader, headers={"Content-Length": f"{file_part_size}"} + ) as response: + await _raise_for_status(response) + if io_log_redirect_cb and pbar.update(file_part_size): + with log_catch(_logger, reraise=False): + await io_log_redirect_cb(f"{pbar}") + await progress_bar.update(file_part_size) + + # NOTE: the response from minio does not contain a json body + assert response.status == status.HTTP_200_OK # nosec + assert response.headers # nosec + assert "Etag" in response.headers # nosec + etag: str = json_loads(response.headers["Etag"]) + return etag + + async def _upload_file_part( session: ClientSession, - file_to_upload: Union[Path, UploadableFileObject], + file_to_upload: Path | UploadableFileObject, part_index: int, file_offset: int, file_part_size: int, @@ -268,7 +289,7 @@ async def _upload_file_part( pbar: tqdm, num_retries: int, *, - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, progress_bar: ProgressBarData, ) -> tuple[int, ETag]: file_uploader = _file_chunk_reader( @@ -289,102 +310,135 @@ async def _upload_file_part( stop=stop_after_attempt(num_retries), retry=retry_if_exception_type(ClientConnectionError) | retry_if_exception(_check_for_aws_http_errors), - before_sleep=before_sleep_log(log, logging.WARNING, exc_info=True), - after=after_log(log, log_level=logging.ERROR), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), + after=after_log(_logger, log_level=logging.ERROR), ): with attempt: - async with session.put( - upload_url, - data=file_uploader, - headers={ - "Content-Length": f"{file_part_size}", - }, - ) as response: - await _raise_for_status(response) - if io_log_redirect_cb and pbar.update(file_part_size): - with log_catch(log, reraise=False): - await io_log_redirect_cb(f"{pbar}") - await progress_bar.update(file_part_size) - - # NOTE: the response from minio does not contain a json body - assert response.status == web.HTTPOk.status_code # nosec - assert response.headers # nosec - assert "Etag" in response.headers # nosec - received_e_tag = json.loads(response.headers["Etag"]) - return (part_index, received_e_tag) - raise exceptions.S3TransferError( - f"Unexpected error while transferring {file_to_upload} to {upload_url}" - ) + received_e_tag = await _session_put( + session=session, + file_part_size=file_part_size, + upload_url=str(upload_url), + pbar=pbar, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=progress_bar, + file_uploader=file_uploader, + ) + return (part_index, received_e_tag) + msg = f"Unexpected error while transferring {file_to_upload} to {upload_url}" + raise exceptions.S3TransferError(msg) + + +def _get_file_size_and_name( + file_to_upload: Path | UploadableFileObject, +) -> tuple[int, str]: + if isinstance(file_to_upload, Path): + file_size = file_to_upload.stat().st_size + file_name = file_to_upload.as_posix() + else: + file_size = file_to_upload.file_size + file_name = file_to_upload.file_name + + return file_size, file_name + + +async def _process_batch( + *, + upload_tasks: list[Coroutine], + max_concurrency: int, + file_name: str, + file_size: int, + file_chunk_size: int, + last_chunk_size: int, +) -> list[UploadedPart]: + results: list[UploadedPart] = [] + try: + upload_results = await logged_gather( + *upload_tasks, log=_logger, max_concurrency=max_concurrency + ) + + for i, e_tag in upload_results: + results.append(UploadedPart(number=i + 1, e_tag=e_tag)) + except _ExtendedClientResponseError as e: + if e.status == status.HTTP_400_BAD_REQUEST and "RequestTimeout" in e.body: + raise exceptions.AwsS3BadRequestRequestTimeoutError(e.body) from e + except ClientError as exc: + msg = ( + f"Could not upload file {file_name} ({file_size=}, " + f"{file_chunk_size=}, {last_chunk_size=}):{exc}" + ) + raise exceptions.S3TransferError(msg) from exc + + return results async def upload_file_to_presigned_links( session: ClientSession, file_upload_links: FileUploadSchema, - file_to_upload: Union[Path, UploadableFileObject], + file_to_upload: Path | UploadableFileObject, *, num_retries: int, - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, progress_bar: ProgressBarData, ) -> list[UploadedPart]: - file_size = 0 - file_name = "" - if isinstance(file_to_upload, Path): - file_size = file_to_upload.stat().st_size - file_name = file_to_upload.as_posix() - else: - file_size = file_to_upload.file_size - file_name = file_to_upload.file_name + file_size, file_name = _get_file_size_and_name(file_to_upload) + + # NOTE: when the file object is already created it cannot be duplicated so + # no concurrency is allowed in that case + max_concurrency: int = 4 if isinstance(file_to_upload, Path) else 1 file_chunk_size = int(file_upload_links.chunk_size) - num_urls = len(file_upload_links.urls) - last_chunk_size = file_size - file_chunk_size * (num_urls - 1) - upload_tasks = [] + num_urls: int = len(file_upload_links.urls) + last_chunk_size: int = file_size - file_chunk_size * (num_urls - 1) + + results: list[UploadedPart] = [] async with AsyncExitStack() as stack: tqdm_progress = stack.enter_context( tqdm_logging_redirect( desc=f"uploading {file_name}\n", total=file_size, **( - _TQDM_FILE_OPTIONS - | dict(miniters=_compute_tqdm_miniters(file_size)) + _TQDM_FILE_OPTIONS | {"miniters": _compute_tqdm_miniters(file_size)} ), ) ) sub_progress = await stack.enter_async_context( - progress_bar.sub_progress(steps=file_size) + progress_bar.sub_progress( + steps=file_size, description=f"uploading {file_name}" + ) ) - for index, upload_url in enumerate(file_upload_links.urls): - this_file_chunk_size = ( - file_chunk_size if (index + 1) < num_urls else last_chunk_size - ) - upload_tasks.append( - _upload_file_part( - session, - file_to_upload, - index, - index * file_chunk_size, - this_file_chunk_size, - upload_url, - tqdm_progress, - num_retries, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=sub_progress, + indexed_urls: list[tuple[int, AnyUrl]] = list(enumerate(file_upload_links.urls)) + for partition_of_indexed_urls in partition_gen( + indexed_urls, slice_size=_CONCURRENT_MULTIPART_UPLOADS_COUNT + ): + upload_tasks: list[Coroutine] = [] + for index, upload_url in partition_of_indexed_urls: + this_file_chunk_size = ( + file_chunk_size if (index + 1) < num_urls else last_chunk_size + ) + upload_tasks.append( + _upload_file_part( + session=session, + file_to_upload=file_to_upload, + part_index=index, + file_offset=index * file_chunk_size, + file_part_size=this_file_chunk_size, + upload_url=upload_url, + pbar=tqdm_progress, + num_retries=num_retries, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=sub_progress, + ) + ) + results.extend( + await _process_batch( + upload_tasks=upload_tasks, + max_concurrency=max_concurrency, + file_name=file_name, + file_size=file_chunk_size, + file_chunk_size=file_chunk_size, + last_chunk_size=last_chunk_size, ) ) - try: - results = await logged_gather( - *upload_tasks, - log=log, - # NOTE: when the file object is already created it cannot be duplicated so - # no concurrency is allowed in that case - max_concurrency=4 if isinstance(file_to_upload, Path) else 1, - ) - part_to_etag = [ - UploadedPart(number=index + 1, e_tag=e_tag) for index, e_tag in results - ] - return part_to_etag - except ClientError as exc: - raise exceptions.S3TransferError( - f"Could not upload file {file_name} ({file_size=}, {file_chunk_size=}, {last_chunk_size=}):{exc}" - ) from exc + + return results diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py index b024a86cfbd..5fdd631474d 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/filemanager.py @@ -1,128 +1,73 @@ import logging from asyncio import CancelledError +from dataclasses import dataclass from pathlib import Path -from typing import Optional, Union -from aiohttp import ClientError, ClientSession -from models_library.api_schemas_storage import ( +import aiofiles +from aiohttp import ClientSession +from models_library.api_schemas_storage.storage_schemas import ( ETag, FileMetaDataGet, - FileUploadCompleteFutureResponse, - FileUploadCompleteResponse, - FileUploadCompleteState, - FileUploadCompletionBody, FileUploadSchema, - LocationID, - LocationName, + LinkType, UploadedPart, ) -from models_library.generics import Envelope -from models_library.projects_nodes_io import StorageFileID +from models_library.basic_types import SHA256Str +from models_library.projects_nodes_io import LocationID, LocationName, StorageFileID from models_library.users import UserID -from models_library.utils.fastapi_encoders import jsonable_encoder -from pydantic import ByteSize, parse_obj_as +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.file_utils import create_sha256_checksum from servicelib.progress_bar import ProgressBarData +from settings_library.aws_s3_cli import AwsS3CliSettings +from settings_library.node_ports import NodePortsSettings from settings_library.r_clone import RCloneSettings -from tenacity._asyncio import AsyncRetrying +from tenacity import AsyncRetrying +from tenacity.after import after_log from tenacity.before_sleep import before_sleep_log from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_random_exponential from yarl import URL from ..node_ports_common.client_session_manager import ClientSessionContextManager -from . import exceptions, r_clone, storage_client -from .constants import SIMCORE_LOCATION +from . import _filemanager_utils, aws_s3_cli, exceptions, r_clone, storage_client from .file_io_utils import ( LogRedirectCB, UploadableFileObject, download_link_to_file, upload_file_to_presigned_links, ) -from .settings import NodePortsSettings -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -async def _get_location_id_from_location_name( - user_id: UserID, - store: LocationName, - session: ClientSession, -) -> LocationID: - resp = await storage_client.get_storage_locations(session=session, user_id=user_id) - for location in resp: - if location.name == store: - return location.id - # location id not found - raise exceptions.S3InvalidStore(store) - - -async def _complete_upload( - session: ClientSession, - upload_links: FileUploadSchema, - parts: list[UploadedPart], -) -> ETag: - """completes a potentially multipart upload in AWS - NOTE: it can take several minutes to finish, see [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) - it can take several minutes - :raises ValueError: _description_ - :raises exceptions.S3TransferError: _description_ - :rtype: ETag - """ - async with session.post( - upload_links.links.complete_upload, - json=jsonable_encoder(FileUploadCompletionBody(parts=parts)), - ) as resp: - resp.raise_for_status() - # now poll for state - file_upload_complete_response = parse_obj_as( - Envelope[FileUploadCompleteResponse], await resp.json() +async def complete_file_upload( + uploaded_parts: list[UploadedPart], + upload_completion_link: AnyUrl, + client_session: ClientSession | None = None, + is_directory: bool = False, +) -> ETag | None: + async with ClientSessionContextManager(client_session) as session: + e_tag: ETag | None = await _filemanager_utils.complete_upload( + session=session, + upload_completion_link=upload_completion_link, + parts=uploaded_parts, + is_directory=is_directory, ) - assert file_upload_complete_response.data # nosec - state_url = file_upload_complete_response.data.links.state - log.info( - "completed upload of %s", - f"{len(parts)} parts, received {file_upload_complete_response.json(indent=2)}", - ) - - async for attempt in AsyncRetrying( - reraise=True, - wait=wait_fixed(1), - stop=stop_after_delay( - NodePortsSettings.create_from_envs().NODE_PORTS_MULTIPART_UPLOAD_COMPLETION_TIMEOUT_S - ), - retry=retry_if_exception_type(ValueError), - before_sleep=before_sleep_log(log, logging.DEBUG), - ): - with attempt: - async with session.post(state_url) as resp: - resp.raise_for_status() - future_enveloped = parse_obj_as( - Envelope[FileUploadCompleteFutureResponse], await resp.json() - ) - assert future_enveloped.data # nosec - if future_enveloped.data.state == FileUploadCompleteState.NOK: - raise ValueError("upload not ready yet") - assert future_enveloped.data.e_tag # nosec - log.debug( - "multipart upload completed in %s, received %s", - attempt.retry_state.retry_object.statistics, - f"{future_enveloped.data.e_tag=}", - ) - return future_enveloped.data.e_tag - raise exceptions.S3TransferError( - f"Could not complete the upload of file {upload_links=}" - ) + # should not be None because a file is being uploaded + if not is_directory: + assert e_tag is not None # nosec + return e_tag async def get_download_link_from_s3( *, user_id: UserID, - store_name: Optional[LocationName], - store_id: Optional[LocationID], + store_name: LocationName | None, + store_id: LocationID | None, s3_object: StorageFileID, - link_type: storage_client.LinkType, - client_session: Optional[ClientSession] = None, + link_type: LinkType, + client_session: ClientSession | None = None, ) -> URL: """ :raises exceptions.NodeportsException @@ -130,86 +75,105 @@ async def get_download_link_from_s3( :raises exceptions.StorageInvalidCall :raises exceptions.StorageServerIssue """ - if store_name is None and store_id is None: - raise exceptions.NodeportsException(msg="both store name and store id are None") - async with ClientSessionContextManager(client_session) as session: - if store_name is not None: - store_id = await _get_location_id_from_location_name( - user_id, store_name, session - ) - assert store_id is not None # nosec - return URL( - await storage_client.get_download_file_link( - session=session, - file_id=s3_object, - location_id=store_id, - user_id=user_id, - link_type=link_type, - ) + store_id = await _filemanager_utils.resolve_location_id( + session, user_id, store_name, store_id + ) + file_link = await storage_client.get_download_file_link( + session=session, + file_id=s3_object, + location_id=store_id, + user_id=user_id, + link_type=link_type, ) + return URL(f"{file_link}") async def get_upload_links_from_s3( *, user_id: UserID, - store_name: Optional[LocationName], - store_id: Optional[LocationID], + store_name: LocationName | None, + store_id: LocationID | None, s3_object: StorageFileID, - link_type: storage_client.LinkType, - client_session: Optional[ClientSession] = None, + link_type: LinkType, + client_session: ClientSession | None = None, file_size: ByteSize, + is_directory: bool, + sha256_checksum: SHA256Str | None, ) -> tuple[LocationID, FileUploadSchema]: - if store_name is None and store_id is None: - raise exceptions.NodeportsException(msg="both store name and store id are None") - async with ClientSessionContextManager(client_session) as session: - if store_name is not None: - store_id = await _get_location_id_from_location_name( - user_id, store_name, session - ) - assert store_id is not None # nosec - return ( - store_id, - await storage_client.get_upload_file_links( - session=session, - file_id=s3_object, - location_id=store_id, - user_id=user_id, - link_type=link_type, - file_size=file_size, - ), + store_id = await _filemanager_utils.resolve_location_id( + session, user_id, store_name, store_id + ) + file_links = await storage_client.get_upload_file_links( + session=session, + file_id=s3_object, + location_id=store_id, + user_id=user_id, + link_type=link_type, + file_size=file_size, + is_directory=is_directory, + sha256_checksum=sha256_checksum, ) + return (store_id, file_links) -async def download_file_from_s3( +async def download_path_from_s3( *, user_id: UserID, - store_name: Optional[LocationName], - store_id: Optional[LocationID], + store_name: LocationName | None, + store_id: LocationID | None, s3_object: StorageFileID, - local_folder: Path, - io_log_redirect_cb: Optional[LogRedirectCB], - client_session: Optional[ClientSession] = None, + local_path: Path, + io_log_redirect_cb: LogRedirectCB | None, + client_session: ClientSession | None = None, + r_clone_settings: RCloneSettings | None, progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None, ) -> Path: """Downloads a file from S3 :param session: add app[APP_CLIENT_SESSION_KEY] session here otherwise default is opened/closed every call :type session: ClientSession, optional + :raises exceptions.NodeportsException :raises exceptions.S3InvalidPathError :raises exceptions.StorageInvalidCall :return: path to downloaded file """ - log.debug( + _logger.debug( "Downloading from store %s:id %s, s3 object %s, to %s", store_name, store_id, s3_object, - local_folder, + local_path, ) async with ClientSessionContextManager(client_session) as session: + store_id = await _filemanager_utils.resolve_location_id( + session, user_id, store_name, store_id + ) + file_meta_data: FileMetaDataGet = await _get_file_meta_data( + user_id=user_id, + s3_object=s3_object, + store_id=store_id, + client_session=session, + ) + + if ( + file_meta_data.is_directory + and not aws_s3_cli_settings + and not await r_clone.is_r_clone_available(r_clone_settings) + ): + msg = f"Requested to download directory {s3_object}, but no rclone support was detected" + raise exceptions.NodeportsException(msg) + if ( + file_meta_data.is_directory + and aws_s3_cli_settings + and not await aws_s3_cli.is_aws_s3_cli_available(aws_s3_cli_settings) + ): + msg = f"Requested to download directory {s3_object}, but no aws cli support was detected" + raise exceptions.NodeportsException(msg) + # get the s3 link download_link = await get_download_link_from_s3( user_id=user_id, @@ -217,16 +181,42 @@ async def download_file_from_s3( store_id=store_id, s3_object=s3_object, client_session=session, - link_type=storage_client.LinkType.PRESIGNED, + link_type=( + LinkType.S3 if file_meta_data.is_directory else LinkType.PRESIGNED + ), ) # the link contains the file name if not download_link: raise exceptions.S3InvalidPathError(s3_object) + if file_meta_data.is_directory: + if aws_s3_cli_settings: + await aws_s3_cli.sync_s3_to_local( + aws_s3_cli_settings, + progress_bar, + local_directory_path=local_path, + download_s3_link=TypeAdapter(AnyUrl).validate_python( + f"{download_link}" + ), + ) + elif r_clone_settings: + await r_clone.sync_s3_to_local( + r_clone_settings, + progress_bar, + local_directory_path=local_path, + download_s3_link=str( + TypeAdapter(AnyUrl).validate_python(f"{download_link}") + ), + ) + else: + msg = "Unexpected configuration" + raise RuntimeError(msg) + return local_path + return await download_file_from_link( download_link, - local_folder, + local_path, client_session=session, io_log_redirect_cb=io_log_redirect_cb, progress_bar=progress_bar, @@ -237,9 +227,9 @@ async def download_file_from_link( download_link: URL, destination_folder: Path, *, - io_log_redirect_cb: Optional[LogRedirectCB], - file_name: Optional[str] = None, - client_session: Optional[ClientSession] = None, + io_log_redirect_cb: LogRedirectCB | None, + file_name: str | None = None, + client_session: ClientSession | None = None, progress_bar: ProgressBarData, ) -> Path: # a download link looks something like: @@ -266,32 +256,61 @@ async def download_file_from_link( return local_file_path -async def _abort_upload( - session: ClientSession, upload_links: FileUploadSchema, *, reraise_exceptions: bool +async def abort_upload( + abort_upload_link: AnyUrl, client_session: ClientSession | None = None ) -> None: - # abort the upload correctly, so it can revert back to last version - try: - async with session.post(upload_links.links.abort_upload) as resp: - resp.raise_for_status() - except ClientError: - log.warning("Error while aborting upload", exc_info=True) - if reraise_exceptions: - raise - log.warning("Upload aborted") + """Abort a multipart upload + Arguments: + upload_links: FileUploadSchema -async def upload_file( + """ + async with ClientSessionContextManager(client_session) as session: + await _filemanager_utils.abort_upload( + session=session, + abort_upload_link=abort_upload_link, + reraise_exceptions=True, + ) + + +@dataclass +class UploadedFile: + store_id: LocationID + etag: ETag + + +@dataclass +class UploadedFolder: ... + + +async def _generate_checksum( + path_to_upload: Path | UploadableFileObject, is_directory: bool +) -> SHA256Str | None: + checksum: SHA256Str | None = None + if is_directory: + return checksum + if isinstance(path_to_upload, Path): + async with aiofiles.open(path_to_upload, mode="rb") as f: + checksum = await create_sha256_checksum(f) + elif isinstance(path_to_upload, UploadableFileObject): + checksum = path_to_upload.sha256_checksum + return checksum + + +async def upload_path( # pylint: disable=too-many-arguments *, user_id: UserID, - store_id: Optional[LocationID], - store_name: Optional[LocationName], + store_id: LocationID | None, + store_name: LocationName | None, s3_object: StorageFileID, - file_to_upload: Union[Path, UploadableFileObject], - io_log_redirect_cb: Optional[LogRedirectCB], - client_session: Optional[ClientSession] = None, - r_clone_settings: Optional[RCloneSettings] = None, - progress_bar: Optional[ProgressBarData] = None, -) -> tuple[LocationID, ETag]: + path_to_upload: Path | UploadableFileObject, + io_log_redirect_cb: LogRedirectCB | None, + client_session: ClientSession | None = None, + r_clone_settings: RCloneSettings | None = None, + progress_bar: ProgressBarData | None = None, + exclude_patterns: set[str] | None = None, + aws_s3_cli_settings: AwsS3CliSettings | None = None, +) -> UploadedFile | UploadedFolder: """Uploads a file (potentially in parallel) or a file object (sequential in any case) to S3 :param session: add app[APP_CLIENT_SESSION_KEY] session here otherwise default is opened/closed every call @@ -301,26 +320,83 @@ async def upload_file( :raises exceptions.NodeportsException :return: stored id, S3 entity_tag """ - log.debug( + async for attempt in AsyncRetrying( + reraise=True, + wait=wait_random_exponential(), + stop=stop_after_attempt( + NodePortsSettings.create_from_envs().NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS + ), + retry=retry_if_exception_type(exceptions.AwsS3BadRequestRequestTimeoutError), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), + after=after_log(_logger, log_level=logging.ERROR), + ): + with attempt: + result = await _upload_path( + user_id=user_id, + store_id=store_id, + store_name=store_name, + s3_object=s3_object, + path_to_upload=path_to_upload, + io_log_redirect_cb=io_log_redirect_cb, + client_session=client_session, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + exclude_patterns=exclude_patterns, + aws_s3_cli_settings=aws_s3_cli_settings, + ) + return result + + +async def _upload_path( # pylint: disable=too-many-arguments + *, + user_id: UserID, + store_id: LocationID | None, + store_name: LocationName | None, + s3_object: StorageFileID, + path_to_upload: Path | UploadableFileObject, + io_log_redirect_cb: LogRedirectCB | None, + client_session: ClientSession | None, + r_clone_settings: RCloneSettings | None, + progress_bar: ProgressBarData | None, + exclude_patterns: set[str] | None, + aws_s3_cli_settings: AwsS3CliSettings | None, +) -> UploadedFile | UploadedFolder: + _logger.debug( "Uploading %s to %s:%s@%s", - f"{file_to_upload=}", + f"{path_to_upload=}", f"{store_id=}", f"{store_name=}", f"{s3_object=}", ) if not progress_bar: - progress_bar = ProgressBarData(steps=1) + progress_bar = ProgressBarData(num_steps=1, description="uploading") - use_rclone = ( - await r_clone.is_r_clone_available(r_clone_settings) - and store_id == SIMCORE_LOCATION - and isinstance(file_to_upload, Path) - ) + is_directory: bool = isinstance(path_to_upload, Path) and path_to_upload.is_dir() + if ( + is_directory + and not aws_s3_cli_settings + and not await r_clone.is_r_clone_available(r_clone_settings) + ): + msg = f"Requested to upload directory {path_to_upload}, but no rclone support was detected" + raise exceptions.NodeportsException(msg) + if ( + is_directory + and aws_s3_cli_settings + and not await aws_s3_cli.is_aws_s3_cli_available(aws_s3_cli_settings) + ): + msg = f"Requested to upload directory {path_to_upload}, but no aws cli support was detected" + raise exceptions.NodeportsException(msg) + + checksum: SHA256Str | None = await _generate_checksum(path_to_upload, is_directory) if io_log_redirect_cb: - await io_log_redirect_cb(f"uploading {file_to_upload}, please wait...") + await io_log_redirect_cb(f"uploading {path_to_upload}, please wait...") + + # NOTE: when uploading a directory there is no e_tag as this is provided only for + # each single file and it makes no sense to have one for directories + e_tag: ETag | None = None async with ClientSessionContextManager(client_session) as session: - upload_links = None + upload_links: FileUploadSchema | None = None try: store_id, upload_links = await get_upload_links_from_s3( user_id=user_id, @@ -328,114 +404,190 @@ async def upload_file( store_id=store_id, s3_object=s3_object, client_session=session, - link_type=storage_client.LinkType.S3 - if use_rclone - else storage_client.LinkType.PRESIGNED, + link_type=LinkType.S3 if is_directory else LinkType.PRESIGNED, file_size=ByteSize( - file_to_upload.stat().st_size - if isinstance(file_to_upload, Path) - else file_to_upload.file_size + path_to_upload.stat().st_size + if isinstance(path_to_upload, Path) + else path_to_upload.file_size ), + is_directory=is_directory, + sha256_checksum=checksum, ) - # NOTE: in case of S3 upload, there are no multipart uploads, so this remains empty - uploaded_parts: list[UploadedPart] = [] - if use_rclone: - assert r_clone_settings # nosec - assert isinstance(file_to_upload, Path) # nosec - await r_clone.sync_local_to_s3( - file_to_upload, - r_clone_settings, - upload_links, - ) - await progress_bar.update() - else: - uploaded_parts = await upload_file_to_presigned_links( - session, - upload_links, - file_to_upload, - num_retries=NodePortsSettings.create_from_envs().NODE_PORTS_IO_NUM_RETRY_ATTEMPTS, - io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar, - ) - - # complete the upload - e_tag = await _complete_upload( - session, - upload_links, - uploaded_parts, + e_tag, upload_links = await _upload_to_s3( + upload_links=upload_links, + path_to_upload=path_to_upload, + io_log_redirect_cb=io_log_redirect_cb, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + is_directory=is_directory, + session=session, + exclude_patterns=exclude_patterns, + aws_s3_cli_settings=aws_s3_cli_settings, ) - except (r_clone.RCloneFailedError, exceptions.S3TransferError) as exc: - log.error("The upload failed with an unexpected error:", exc_info=True) + except ( + r_clone.RCloneFailedError, + aws_s3_cli.AwsS3CliFailedError, + exceptions.S3TransferError, + ) as exc: + _logger.exception("The upload failed with an unexpected error:") if upload_links: - await _abort_upload(session, upload_links, reraise_exceptions=False) + await _filemanager_utils.abort_upload( + session, upload_links.links.abort_upload, reraise_exceptions=False + ) raise exceptions.S3TransferError from exc except CancelledError: if upload_links: - await _abort_upload(session, upload_links, reraise_exceptions=False) + await _filemanager_utils.abort_upload( + session, upload_links.links.abort_upload, reraise_exceptions=False + ) raise if io_log_redirect_cb: - await io_log_redirect_cb(f"upload of {file_to_upload} complete.") - return store_id, e_tag + await io_log_redirect_cb(f"upload of {path_to_upload} complete.") + return UploadedFolder() if e_tag is None else UploadedFile(store_id, e_tag) + + +async def _upload_to_s3( + *, + upload_links, + path_to_upload: Path | UploadableFileObject, + io_log_redirect_cb: LogRedirectCB | None, + r_clone_settings: RCloneSettings | None, + progress_bar: ProgressBarData, + is_directory: bool, + session: ClientSession, + exclude_patterns: set[str] | None, + aws_s3_cli_settings: AwsS3CliSettings | None, +) -> tuple[ETag | None, FileUploadSchema]: + uploaded_parts: list[UploadedPart] = [] + if is_directory: + assert isinstance(path_to_upload, Path) # nosec + assert len(upload_links.urls) > 0 # nosec + if aws_s3_cli_settings: + await aws_s3_cli.sync_local_to_s3( + aws_s3_cli_settings, + progress_bar, + local_directory_path=path_to_upload, + upload_s3_link=upload_links.urls[0], + exclude_patterns=exclude_patterns, + ) + elif r_clone_settings: + await r_clone.sync_local_to_s3( + r_clone_settings, + progress_bar, + local_directory_path=path_to_upload, + upload_s3_link=upload_links.urls[0], + exclude_patterns=exclude_patterns, + ) + else: + msg = "Unexpected configuration" + raise RuntimeError(msg) + else: + uploaded_parts = await upload_file_to_presigned_links( + session, + upload_links, + path_to_upload, + num_retries=NodePortsSettings.create_from_envs().NODE_PORTS_IO_NUM_RETRY_ATTEMPTS, + io_log_redirect_cb=io_log_redirect_cb, + progress_bar=progress_bar, + ) + # complete the upload + e_tag = await _filemanager_utils.complete_upload( + session, + upload_links.links.complete_upload, + uploaded_parts, + is_directory=is_directory, + ) + return e_tag, upload_links + + +async def _get_file_meta_data( + user_id: UserID, + store_id: LocationID, + s3_object: StorageFileID, + client_session: ClientSession | None = None, +) -> FileMetaDataGet: + async with ClientSessionContextManager(client_session) as session: + _logger.debug("Will request metadata for s3_object=%s", s3_object) + + file_metadata: FileMetaDataGet = await storage_client.get_file_metadata( + session=session, + file_id=s3_object, + location_id=store_id, + user_id=user_id, + ) + _logger.debug( + "Result for metadata s3_object=%s, result=%s", + s3_object, + f"{file_metadata=}", + ) + return file_metadata async def entry_exists( user_id: UserID, store_id: LocationID, s3_object: StorageFileID, - client_session: Optional[ClientSession] = None, + client_session: ClientSession | None = None, + *, + is_directory: bool, ) -> bool: - """Returns True if metadata for s3_object is present""" + """ + Returns True if metadata for s3_object is present. + Before returning it also checks if the metadata entry is a directory or a file. + """ try: - async with ClientSessionContextManager(client_session) as session: - log.debug("Will request metadata for s3_object=%s", s3_object) - - file_metadata: FileMetaDataGet = await storage_client.get_file_metadata( - session=session, - file_id=s3_object, - location_id=store_id, - user_id=user_id, - ) - log.debug( - "Result for metadata s3_object=%s, result=%s", - s3_object, - f"{file_metadata=}", - ) - return bool(file_metadata.file_id == s3_object) - except exceptions.S3InvalidPathError: + file_metadata: FileMetaDataGet = await _get_file_meta_data( + user_id, store_id, s3_object, client_session + ) + result: bool = ( + file_metadata.file_id == s3_object + and file_metadata.is_directory == is_directory + ) + return result + except exceptions.S3InvalidPathError as err: + _logger.debug( + "Failed request metadata for s3_object=%s with %s", s3_object, err + ) return False +@dataclass(kw_only=True, frozen=True, slots=True) +class FileMetaData: + location: LocationID + etag: ETag + + async def get_file_metadata( user_id: UserID, store_id: LocationID, s3_object: StorageFileID, - client_session: Optional[ClientSession] = None, -) -> tuple[LocationID, ETag]: + client_session: ClientSession | None = None, +) -> FileMetaData: """ :raises S3InvalidPathError """ - async with ClientSessionContextManager(client_session) as session: - log.debug("Will request metadata for s3_object=%s", s3_object) - file_metadata = await storage_client.get_file_metadata( - session=session, file_id=s3_object, location_id=store_id, user_id=user_id - ) - - log.debug( - "Result for metadata s3_object=%s, result=%s", s3_object, f"{file_metadata=}" + file_metadata: FileMetaDataGet = await _get_file_meta_data( + user_id=user_id, + store_id=store_id, + s3_object=s3_object, + client_session=client_session, ) assert file_metadata.location_id is not None # nosec assert file_metadata.entity_tag is not None # nosec - return (file_metadata.location_id, file_metadata.entity_tag) + return FileMetaData( + location=file_metadata.location_id, + etag=file_metadata.entity_tag, + ) async def delete_file( user_id: UserID, store_id: LocationID, s3_object: StorageFileID, - client_session: Optional[ClientSession] = None, + client_session: ClientSession | None = None, ) -> None: async with ClientSessionContextManager(client_session) as session: - log.debug("Will delete file for s3_object=%s", s3_object) + _logger.debug("Will delete file for s3_object=%s", s3_object) await storage_client.delete_file( session=session, file_id=s3_object, location_id=store_id, user_id=user_id ) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone.py index e58b25f245a..db5e107b753 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone.py @@ -2,34 +2,76 @@ import logging import re import shlex -import urllib.parse +from asyncio.streams import StreamReader +from collections.abc import AsyncIterator from contextlib import asynccontextmanager from pathlib import Path -from typing import AsyncGenerator, Optional +from typing import Final -from aiocache import cached +from aiocache import cached # type: ignore[import-untyped] from aiofiles import tempfile -from models_library.api_schemas_storage import FileUploadSchema -from pydantic.errors import PydanticErrorMixin +from common_library.errors_classes import OsparcErrorMixin +from pydantic import AnyUrl, BaseModel, ByteSize +from servicelib.progress_bar import ProgressBarData +from servicelib.utils import logged_gather from settings_library.r_clone import RCloneSettings from settings_library.utils_r_clone import get_r_clone_config -logger = logging.getLogger(__name__) +from ._utils import BaseLogParser +from .r_clone_utils import ( + CommandResultCaptureParser, + DebugLogParser, + SyncProgressLogParser, +) +_S3_CONFIG_KEY_DESTINATION: Final[str] = "s3-destination" +_S3_CONFIG_KEY_SOURCE: Final[str] = "s3-source" -class RCloneFailedError(PydanticErrorMixin, RuntimeError): - msg_template: str = "Command {command} finished with exception:\n{stdout}" +_logger = logging.getLogger(__name__) + + +class BaseRCloneError(OsparcErrorMixin, RuntimeError): + ... + + +class RCloneFailedError(BaseRCloneError): + msg_template: str = ( + "Command {command} finished with exit code={returncode}:\n{command_output}" + ) + + +class RCloneDirectoryNotFoundError(BaseRCloneError): + msg_template: str = ( + "Provided path '{local_directory_path}' is a file. Expects a directory!" + ) @asynccontextmanager -async def _config_file(config: str) -> AsyncGenerator[str, None]: +async def _config_file(config: str) -> AsyncIterator[str]: async with tempfile.NamedTemporaryFile("w") as f: await f.write(config) await f.flush() + assert isinstance(f.name, str) # nosec yield f.name -async def _async_command(*cmd: str, cwd: Optional[str] = None) -> str: +async def _read_stream(stream: StreamReader, r_clone_log_parsers: list[BaseLogParser]): + while True: + line: bytes = await stream.readline() + if line: + decoded_line = line.decode() + await logged_gather( + *[parser(decoded_line) for parser in r_clone_log_parsers] + ) + else: + break + + +async def _async_r_clone_command( + *cmd: str, + r_clone_log_parsers: list[BaseLogParser] | None = None, + cwd: str | None = None, +) -> str: str_cmd = " ".join(cmd) proc = await asyncio.create_subprocess_shell( str_cmd, @@ -39,76 +81,222 @@ async def _async_command(*cmd: str, cwd: Optional[str] = None) -> str: cwd=cwd, ) - stdout, _ = await proc.communicate() - decoded_stdout = stdout.decode() + command_result_parser = CommandResultCaptureParser() + r_clone_log_parsers = ( + [*r_clone_log_parsers, command_result_parser] + if r_clone_log_parsers + else [command_result_parser] + ) + + assert proc.stdout # nosec + await asyncio.wait( + [asyncio.create_task(_read_stream(proc.stdout, [*r_clone_log_parsers]))] + ) + + # NOTE: ANE not sure why you do this call here. The above one already reads out the stream. + _stdout, _stderr = await proc.communicate() + + command_output = command_result_parser.get_output() if proc.returncode != 0: - raise RCloneFailedError(command=str_cmd, stdout=decoded_stdout) + raise RCloneFailedError( + command=str_cmd, + command_output=command_output, + returncode=proc.returncode, + ) - logger.debug("'%s' result:\n%s", str_cmd, decoded_stdout) - return decoded_stdout + _logger.debug("'%s' result:\n%s", str_cmd, command_output) + return command_output @cached() -async def is_r_clone_available(r_clone_settings: Optional[RCloneSettings]) -> bool: +async def is_r_clone_available(r_clone_settings: RCloneSettings | None) -> bool: """returns: True if the `rclone` cli is installed and a configuration is provided""" if r_clone_settings is None: return False try: - await _async_command("rclone", "--version") + await _async_r_clone_command("rclone", "--version") return True except RCloneFailedError: return False -async def sync_local_to_s3( - local_file_path: Path, +def _get_exclude_filters(exclude_patterns: set[str] | None) -> list[str]: + if exclude_patterns is None: + return [] + + exclude_options: list[str] = [] + for entry in exclude_patterns: + exclude_options.append("--exclude") + # NOTE: in rclone ** is the equivalent of * in unix + # for details about rclone filters https://rclone.org/filtering/ + exclude_options.append(entry.replace("*", "**")) + + return exclude_options + + +class _RCloneSize(BaseModel): + count: int + bytes: ByteSize + sizeless: int + + +async def _get_folder_size( r_clone_settings: RCloneSettings, - upload_file_links: FileUploadSchema, + *, + local_dir: Path, + folder: Path, + s3_config_key: str, +) -> ByteSize: + r_clone_config_file_content = get_r_clone_config( + r_clone_settings, s3_config_key=s3_config_key + ) + async with _config_file(r_clone_config_file_content) as config_file_name: + r_clone_command = ( + "rclone", + f"--config {config_file_name}", + "size", + f"{folder}", + "--json", + "--links", + ) + + result = await _async_r_clone_command( + *r_clone_command, + cwd=f"{local_dir.resolve()}", + ) + + rclone_folder_size_result = _RCloneSize.model_validate_json(result) + _logger.debug( + "RClone size call for %s: %s", f"{folder}", f"{rclone_folder_size_result}" + ) + return rclone_folder_size_result.bytes + + +async def _sync_sources( + r_clone_settings: RCloneSettings, + progress_bar: ProgressBarData, + *, + source: str, + destination: str, + local_dir: Path, + s3_config_key: str, + exclude_patterns: set[str] | None, + debug_logs: bool, ) -> None: - """_summary_ - :raises e: RCloneFailedError - """ - assert len(upload_file_links.urls) == 1 # nosec - s3_link = urllib.parse.unquote(upload_file_links.urls[0]) - s3_path = re.sub(r"^s3://", "", s3_link) - logger.debug(" %s; %s", f"{s3_link=}", f"{s3_path=}") + folder_size = await _get_folder_size( + r_clone_settings, + local_dir=local_dir, + folder=Path(source), + s3_config_key=s3_config_key, + ) - r_clone_config_file_content = get_r_clone_config(r_clone_settings) + r_clone_config_file_content = get_r_clone_config( + r_clone_settings, s3_config_key=s3_config_key + ) async with _config_file(r_clone_config_file_content) as config_file_name: - source_path = local_file_path - destination_path = Path(s3_path) - file_name = local_file_path.name - # FIXME: capture progress and connect progressbars or some event to inform the UI - - # rclone only acts upon directories, so to target a specific file - # we must run the command from the file's directory. See below - # example for further details: - # - # local_file_path=`/tmp/pytest-of-silenthk/pytest-80/test_sync_local_to_s30/filee3e70682-c209-4cac-a29f-6fbed82c07cd.txt` - # s3_path=`simcore/00000000-0000-0000-0000-000000000001/00000000-0000-0000-0000-000000000002/filee3e70682-c209-4cac-a29f-6fbed82c07cd.txt` - # - # rclone - # --config - # /tmp/tmpd_1rtmss - # sync - # '/tmp/pytest-of-silenthk/pytest-80/test_sync_local_to_s30' - # 'dst:simcore/00000000-0000-0000-0000-000000000001/00000000-0000-0000-0000-000000000002' - # --progress - # --copy-links - # --include - # 'filee3e70682-c209-4cac-a29f-6fbed82c07cd.txt' r_clone_command = ( "rclone", "--config", config_file_name, + "--retries", + f"{r_clone_settings.R_CLONE_OPTION_RETRIES}", + "--transfers", + f"{r_clone_settings.R_CLONE_OPTION_TRANSFERS}", + # below two options reduce to a minimum the memory footprint + # https://forum.rclone.org/t/how-to-set-a-memory-limit/10230/4 + "--buffer-size", # docs https://rclone.org/docs/#buffer-size-size + r_clone_settings.R_CLONE_OPTION_BUFFER_SIZE, + "--use-json-log", + # frequent polling for faster progress updates + "--stats", + "200ms", + "--verbose", "sync", - shlex.quote(f"{source_path.parent}"), - shlex.quote(f"dst:{destination_path.parent}"), - "--progress", - "--copy-links", - "--include", - shlex.quote(f"{file_name}"), + shlex.quote(source), + shlex.quote(destination), + # filter options + *_get_exclude_filters(exclude_patterns), + "--links", ) - await _async_command(*r_clone_command, cwd=f"{source_path.parent}") + async with progress_bar.sub_progress( + steps=folder_size, + progress_unit="Byte", + description=f"transferring {local_dir.name}", + ) as sub_progress: + r_clone_log_parsers: list[BaseLogParser] = ( + [DebugLogParser()] if debug_logs else [] + ) + r_clone_log_parsers.append(SyncProgressLogParser(sub_progress)) + + await _async_r_clone_command( + *r_clone_command, + r_clone_log_parsers=r_clone_log_parsers, + cwd=f"{local_dir}", + ) + + +def _raise_if_directory_is_file(local_directory_path: Path) -> None: + if local_directory_path.exists() and local_directory_path.is_file(): + raise RCloneDirectoryNotFoundError(local_directory_path=local_directory_path) + + +async def sync_local_to_s3( + r_clone_settings: RCloneSettings, + progress_bar: ProgressBarData, + *, + local_directory_path: Path, + upload_s3_link: AnyUrl, + exclude_patterns: set[str] | None = None, + debug_logs: bool = False, +) -> None: + """transfer the contents of a local directory to an s3 path + + :raises e: RCloneFailedError + """ + _raise_if_directory_is_file(local_directory_path) + + upload_s3_path = re.sub(r"^s3://", "", str(upload_s3_link)) + _logger.debug(" %s; %s", f"{upload_s3_link=}", f"{upload_s3_path=}") + + await _sync_sources( + r_clone_settings, + progress_bar, + source=f"{local_directory_path}", + destination=f"{_S3_CONFIG_KEY_DESTINATION}:{upload_s3_path}", + local_dir=local_directory_path, + s3_config_key=_S3_CONFIG_KEY_DESTINATION, + exclude_patterns=exclude_patterns, + debug_logs=debug_logs, + ) + + +async def sync_s3_to_local( + r_clone_settings: RCloneSettings, + progress_bar: ProgressBarData, + *, + local_directory_path: Path, + download_s3_link: str, + exclude_patterns: set[str] | None = None, + debug_logs: bool = False, +) -> None: + """transfer the contents of a path in s3 to a local directory + + :raises e: RCloneFailedError + """ + _raise_if_directory_is_file(local_directory_path) + + download_s3_path = re.sub(r"^s3://", "", download_s3_link) + _logger.debug(" %s; %s", f"{download_s3_link=}", f"{download_s3_path=}") + + await _sync_sources( + r_clone_settings, + progress_bar, + source=f"{_S3_CONFIG_KEY_SOURCE}:{download_s3_path}", + destination=f"{local_directory_path}", + local_dir=local_directory_path, + s3_config_key=_S3_CONFIG_KEY_SOURCE, + exclude_patterns=exclude_patterns, + debug_logs=debug_logs, + ) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone_utils.py new file mode 100644 index 00000000000..c4ef5e83e2a --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/r_clone_utils.py @@ -0,0 +1,101 @@ +import datetime +import logging +from typing import Union + +from models_library.utils.change_case import snake_to_camel +from pydantic import BaseModel, ByteSize, ConfigDict, Field, TypeAdapter +from servicelib.logging_utils import log_catch +from servicelib.progress_bar import ProgressBarData + +from ._utils import BaseLogParser + +_logger = logging.getLogger(__name__) + + +class _RCloneSyncMessageBase(BaseModel): + level: str = Field(..., description="log level") + msg: str + source: str = Field(..., description="source code information") + time: datetime.datetime + + +class _RCloneSyncUpdatedMessage(_RCloneSyncMessageBase): + object: str = Field(..., description="object file name") + + +class _RCloneSyncTransferCompletedMessage(_RCloneSyncMessageBase): + object: str = Field(..., description="object file name") + size: ByteSize + + +class _RCloneSyncTransferringStats(BaseModel): + bytes: ByteSize + total_bytes: ByteSize + model_config = ConfigDict(alias_generator=snake_to_camel) + + +class _RCloneSyncTransferringMessage(_RCloneSyncMessageBase): + stats: _RCloneSyncTransferringStats + + +_RCloneSyncMessages = Union[ # noqa: UP007 + _RCloneSyncTransferCompletedMessage, + _RCloneSyncUpdatedMessage, + _RCloneSyncTransferringMessage, + _RCloneSyncMessageBase, +] + + +class SyncProgressLogParser(BaseLogParser): + """ + log processor that only yields and progress updates detected in the logs. + + + This command: + rclone --buffer-size 0M --transfers 5 sync mys3:simcore/5cfdef88-013b-11ef-910e-0242ac14003e/2d544003-9eb8-47e4-bcf7-95a8c31845f7/workspace ./tests3 --progress + generates this but the rclone modifies the terminal printed lines which python does not like so much + Transferred: 4.666 GiB / 4.666 GiB, 100%, 530.870 MiB/s, ETA 0s + Transferred: 4 / 4, 100% + Elapsed time: 9.6s + + This other command: + rclone --buffer-size 0M --transfers 5 --use-json-log --stats-log-level INFO -v --stats 500ms sync mys3:simcore/5cfdef88-013b-11ef-910e-0242ac14003e/2d544003-9eb8-47e4-bcf7-95a8c31845f7/workspace ./tests3 + prints stuff such as: + {"level":"info","msg":"Copied (new)","object":"README.ipynb","objectType":"*s3.Object","size":5123,"source":"operations/copy.go:360","time":"2024-04-23T14:05:10.408277+00:00"} + {"level":"info","msg":"Copied (new)","object":".hidden_do_not_remove","objectType":"*s3.Object","size":219,"source":"operations/copy.go:360","time":"2024-04-23T14:05:10.408246+00:00"} + {"level":"info","msg":"Copied (new)","object":"10MBfile","objectType":"*s3.Object","size":10000000,"source":"operations/copy.go:360","time":"2024-04-23T14:05:10.437499+00:00"} + {"level":"info","msg":"\nTransferred: \t 788.167 MiB / 4.666 GiB, 16%, 0 B/s, ETA -\nTransferred: 3 / 4, 75%\nElapsed time: 0.5s\nTransferring:\n * 5GBfile: 16% /4.657Gi, 0/s, -\n\n","source":"accounting/stats.go:526","stats":{"bytes":826452830,"checks":0,"deletedDirs":0,"deletes":0,"elapsedTime":0.512036999,"errors":0,"eta":null,"fatalError":false,"renames":0,"retryError":false,"serverSideCopies":0,"serverSideCopyBytes":0,"serverSideMoveBytes":0,"serverSideMoves":0,"speed":0,"totalBytes":5010005342,"totalChecks":0,"totalTransfers":4,"transferTime":0.497064856,"transferring":[{"bytes":816447488,"dstFs":"/devel/tests3","eta":null,"group":"global_stats","name":"5GBfile","percentage":16,"size":5000000000,"speed":1662518962.4875596,"speedAvg":0,"srcFs":"mys3:simcore/5cfdef88-013b-11ef-910e-0242ac14003e/2d544003-9eb8-47e4-bcf7-95a8c31845f7/workspace"}],"transfers":3},"time":"2024-04-23T14:05:10.901275+00:00"} + {"level":"info","msg":"\nTransferred: \t 1.498 GiB / 4.666 GiB, 32%, 0 B/s, ETA -\nTransferred: 3 / 4, 75%\nElapsed time: 1.0s\nTransferring:\n * 5GBfile: 31% /4.657Gi, 0/s, -\n\n","source":"accounting/stats.go:526","stats":{"bytes":1608690526,"checks":0,"deletedDirs":0,"deletes":0,"elapsedTime":1.012386594,"errors":0,"eta":null,"fatalError":false,"renames":0,"retryError":false,"serverSideCopies":0,"serverSideCopyBytes":0,"serverSideMoveBytes":0,"serverSideMoves":0,"speed":0,"totalBytes":5010005342,"totalChecks":0,"totalTransfers":4,"transferTime":0.997407347,"transferring":[{"bytes":1598816256,"dstFs":"/devel/tests3","eta":null,"group":"global_stats","name":"5GBfile","percentage":31,"size":5000000000,"speed":1612559346.2428129,"speedAvg":0,"srcFs":"mys3:simcore/5cfdef88-013b-11ef-910e-0242ac14003e/2d544003-9eb8-47e4-bcf7-95a8c31845f7/workspace"}],"transfers":3},"time":"2024-04-23T14:05:11.40166+00:00"} + But this prints each file, do we really want to keep bookkeeping of all this??? that can potentially be a lot of files + + """ + + def __init__(self, progress_bar: ProgressBarData) -> None: + self.progress_bar = progress_bar + + async def __call__(self, logs: str) -> None: + _logger.debug("received logs: %s", logs) + with log_catch(_logger, reraise=False): + rclone_message: _RCloneSyncMessages = TypeAdapter( + _RCloneSyncMessages + ).validate_json(logs) + + if isinstance(rclone_message, _RCloneSyncTransferringMessage): + await self.progress_bar.set_(rclone_message.stats.bytes) + + +class DebugLogParser(BaseLogParser): + async def __call__(self, logs: str) -> None: + _logger.debug("|>>>| %s |", logs) + + +class CommandResultCaptureParser(BaseLogParser): + def __init__(self) -> None: + super().__init__() + self._logs: list[str] = [] + + async def __call__(self, logs: str) -> None: + self._logs.append(logs) + + def get_output(self) -> str: + return "".join(self._logs) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/settings.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/settings.py deleted file mode 100644 index fdc6c4684f3..00000000000 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/settings.py +++ /dev/null @@ -1,14 +0,0 @@ -from pydantic import Field, NonNegativeInt, PositiveInt -from settings_library.base import BaseCustomSettings -from settings_library.postgres import PostgresSettings -from settings_library.storage import StorageSettings - -from .constants import MINUTE - - -class NodePortsSettings(BaseCustomSettings): - NODE_PORTS_STORAGE: StorageSettings = Field(auto_default_from_env=True) - POSTGRES_SETTINGS: PostgresSettings = Field(auto_default_from_env=True) - - NODE_PORTS_MULTIPART_UPLOAD_COMPLETION_TIMEOUT_S: NonNegativeInt = 5 * MINUTE - NODE_PORTS_IO_NUM_RETRY_ATTEMPTS: PositiveInt = 5 diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_client.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_client.py index b13a9f76c3c..71d80febbc0 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_client.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_client.py @@ -1,74 +1,156 @@ -from functools import lru_cache, wraps +import datetime +import logging +from collections.abc import AsyncIterator, Callable +from contextlib import asynccontextmanager +from functools import wraps from json import JSONDecodeError -from typing import Callable +from typing import Any, Coroutine, ParamSpec, TypeAlias, TypeVar from urllib.parse import quote -from aiohttp import ClientSession, web +from aiohttp import ClientResponse, ClientSession +from aiohttp import client as aiohttp_client_module from aiohttp.client_exceptions import ClientConnectionError, ClientResponseError -from models_library.api_schemas_storage import ( +from models_library.api_schemas_storage.storage_schemas import ( FileLocationArray, FileMetaDataGet, FileUploadSchema, LinkType, - LocationID, PresignedLink, - StorageFileID, ) +from models_library.basic_types import SHA256Str from models_library.generics import Envelope +from models_library.projects_nodes_io import LocationID, StorageFileID from models_library.users import UserID from pydantic import ByteSize from pydantic.networks import AnyUrl +from servicelib.aiohttp import status +from tenacity import RetryCallState +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_exponential from . import exceptions -from .settings import NodePortsSettings +from .storage_endpoint import get_base_url, get_basic_auth +_logger = logging.getLogger(__name__) -def handle_client_exception(handler: Callable): + +RequestContextManager: TypeAlias = ( + aiohttp_client_module._RequestContextManager # pylint: disable=protected-access # noqa: SLF001 +) + +P = ParamSpec("P") +R = TypeVar("R") + + +def handle_client_exception( + handler: Callable[P, Coroutine[Any, Any, R]], +) -> Callable[P, Coroutine[Any, Any, R]]: @wraps(handler) - async def wrapped(*args, **kwargs): + async def wrapped(*args: P.args, **kwargs: P.kwargs) -> R: try: - ret = await handler(*args, **kwargs) - return ret + return await handler(*args, **kwargs) except ClientResponseError as err: - if err.status == web.HTTPNotFound.status_code: - raise exceptions.S3InvalidPathError( - kwargs.get("file_id", "unknown file id") - ) - if err.status == web.HTTPUnprocessableEntity.status_code: - raise exceptions.StorageInvalidCall( - f"Invalid call to storage: {err.message}" - ) - if 500 > err.status > 399: + if err.status == status.HTTP_404_NOT_FOUND: + msg = kwargs.get("file_id", "unknown file id") + raise exceptions.S3InvalidPathError(msg) from err + if err.status == status.HTTP_422_UNPROCESSABLE_ENTITY: + msg = f"Invalid call to storage: {err.message}" + raise exceptions.StorageInvalidCall(msg) from err + if ( + status.HTTP_500_INTERNAL_SERVER_ERROR + > err.status + >= status.HTTP_400_BAD_REQUEST + ): raise exceptions.StorageInvalidCall(err.message) from err - if err.status > 500: + if err.status > status.HTTP_500_INTERNAL_SERVER_ERROR: raise exceptions.StorageServerIssue(err.message) from err except ClientConnectionError as err: - raise exceptions.StorageServerIssue(f"{err}") from err + msg = f"{err}" + raise exceptions.StorageServerIssue(msg) from err except JSONDecodeError as err: - raise exceptions.StorageServerIssue(f"{err}") from err + msg = f"{err}" + raise exceptions.StorageServerIssue(msg) from err + # satisfy mypy + msg = "Unhandled control flow" + raise RuntimeError(msg) return wrapped -@lru_cache -def _base_url() -> str: - settings = NodePortsSettings.create_from_envs() - return settings.NODE_PORTS_STORAGE.api_base_url +def _after_log(log: logging.Logger) -> Callable[[RetryCallState], None]: + def log_it(retry_state: RetryCallState) -> None: + assert retry_state.outcome # nosec + e = retry_state.outcome.exception() + log.error( + "Request timed-out after %s attempts with an unexpected error: '%s'", + retry_state.attempt_number, + f"{e=}", + ) + + return log_it + + +def _session_method( + session: ClientSession, method: str, url: str, **kwargs +) -> RequestContextManager: + return session.request(method, url, auth=get_basic_auth(), **kwargs) + + +@asynccontextmanager +async def retry_request( + session: ClientSession, + method: str, + url: str, + *, + expected_status: int, + give_up_after: datetime.timedelta = datetime.timedelta(seconds=30), + **kwargs, +) -> AsyncIterator[ClientResponse]: + async for attempt in AsyncRetrying( + stop=stop_after_delay(give_up_after.total_seconds()), + wait=wait_exponential(min=1), + retry=retry_if_exception_type(ClientConnectionError), + before_sleep=before_sleep_log(_logger, logging.WARNING), + after=_after_log(_logger), + reraise=True, + ): + with attempt: + async with _session_method(session, method, url, **kwargs) as response: + if response.status != expected_status: + # this is a more precise raise_for_status() + error_msg = await response.json() + response.release() + raise ClientResponseError( + response.request_info, + response.history, + status=response.status, + message=f"Received {response.status} but was expecting {expected_status=}: '{error_msg=}'", + headers=response.headers, + ) + + yield response @handle_client_exception -async def get_storage_locations( +async def list_storage_locations( *, session: ClientSession, user_id: UserID ) -> FileLocationArray: - async with session.get( - f"{_base_url()}/locations", params={"user_id": f"{user_id}"} + async with retry_request( + session, + "GET", + f"{get_base_url()}/locations", + expected_status=status.HTTP_200_OK, + params={"user_id": f"{user_id}"}, ) as response: - response.raise_for_status() - locations_enveloped = Envelope[FileLocationArray].parse_obj( + locations_enveloped = Envelope[FileLocationArray].model_validate( await response.json() ) if locations_enveloped.data is None: - raise exceptions.StorageServerIssue("Storage server is not reponding") + msg = "Storage server is not responding" + raise exceptions.StorageServerIssue(msg) return locations_enveloped.data @@ -85,23 +167,21 @@ async def get_download_file_link( :raises exceptions.StorageInvalidCall :raises exceptions.StorageServerIssue """ - async with session.get( - f"{_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}", + async with retry_request( + session, + "GET", + f"{get_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}", + expected_status=status.HTTP_200_OK, params={"user_id": f"{user_id}", "link_type": link_type.value}, ) as response: - response.raise_for_status() - - presigned_link_enveloped = Envelope[PresignedLink].parse_obj( + presigned_link_enveloped = Envelope[PresignedLink].model_validate( await response.json() ) - if ( - presigned_link_enveloped.data is None - or not presigned_link_enveloped.data.link - ): - raise exceptions.S3InvalidPathError( - f"file {location_id}@{file_id} not found" - ) - return presigned_link_enveloped.data.link + if not presigned_link_enveloped.data or not presigned_link_enveloped.data.link: + msg = f"file {location_id}@{file_id} not found" + raise exceptions.S3InvalidPathError(msg) + url: AnyUrl = presigned_link_enveloped.data.link + return url @handle_client_exception @@ -113,6 +193,8 @@ async def get_upload_file_links( user_id: UserID, link_type: LinkType, file_size: ByteSize, + is_directory: bool, + sha256_checksum: SHA256Str | None, ) -> FileUploadSchema: """ :raises exceptions.StorageServerIssue: _description_ @@ -123,17 +205,23 @@ async def get_upload_file_links( "user_id": f"{user_id}", "link_type": link_type.value, "file_size": int(file_size), + "is_directory": f"{is_directory}".lower(), } - async with session.put( - f"{_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}", + if sha256_checksum: + query_params["sha256_checksum"] = f"{sha256_checksum}" + async with retry_request( + session, + "PUT", + f"{get_base_url()}/locations/{location_id}/files/{file_id}", + expected_status=status.HTTP_200_OK, params=query_params, ) as response: - response.raise_for_status() - file_upload_links_enveloped = Envelope[FileUploadSchema].parse_obj( + file_upload_links_enveloped = Envelope[FileUploadSchema].model_validate( await response.json() ) if file_upload_links_enveloped.data is None: - raise exceptions.StorageServerIssue("Storage server is not reponding") + msg = "Storage server is not responding" + raise exceptions.StorageServerIssue(msg) return file_upload_links_enveloped.data @@ -145,16 +233,20 @@ async def get_file_metadata( location_id: LocationID, user_id: UserID, ) -> FileMetaDataGet: - async with session.get( - f"{_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}/metadata", + async with retry_request( + session, + "GET", + f"{get_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}/metadata", + expected_status=status.HTTP_200_OK, params={"user_id": f"{user_id}"}, ) as response: - response.raise_for_status() - file_metadata_enveloped = Envelope[FileMetaDataGet].parse_obj( - await response.json() - ) - if file_metadata_enveloped.data is None: + payload = await response.json() + if not payload.get("data"): + # NOTE: keeps backwards compatibility raise exceptions.S3InvalidPathError(file_id) + + file_metadata_enveloped = Envelope[FileMetaDataGet].model_validate(payload) + assert file_metadata_enveloped.data # nosec return file_metadata_enveloped.data @@ -166,14 +258,17 @@ async def list_file_metadata( location_id: LocationID, uuid_filter: str, ) -> list[FileMetaDataGet]: - async with session.get( - f"{_base_url()}/locations/{location_id}/files/metadata", + async with retry_request( + session, + "GET", + f"{get_base_url()}/locations/{location_id}/files/metadata", + expected_status=status.HTTP_200_OK, params={"user_id": f"{user_id}", "uuid_filter": uuid_filter}, ) as resp: - resp.raise_for_status() - envelope = Envelope[list[FileMetaDataGet]].parse_obj(await resp.json()) + envelope = Envelope[list[FileMetaDataGet]].model_validate(await resp.json()) assert envelope.data is not None # nosec - return envelope.data + file_meta_data: list[FileMetaDataGet] = envelope.data + return file_meta_data @handle_client_exception @@ -184,8 +279,11 @@ async def delete_file( location_id: LocationID, user_id: UserID, ) -> None: - async with session.delete( - f"{_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}", + async with retry_request( + session, + "DELETE", + f"{get_base_url()}/locations/{location_id}/files/{quote(file_id, safe='')}", + expected_status=status.HTTP_204_NO_CONTENT, params={"user_id": f"{user_id}"}, - ) as response: - response.raise_for_status() + ): + ... diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_endpoint.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_endpoint.py new file mode 100644 index 00000000000..7efbf45af37 --- /dev/null +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_common/storage_endpoint.py @@ -0,0 +1,35 @@ +from functools import lru_cache + +from aiohttp import BasicAuth +from settings_library.node_ports import NodePortsSettings + + +@lru_cache +def is_storage_secure() -> bool: + settings = NodePortsSettings.create_from_envs() + node_ports_storage_auth = settings.NODE_PORTS_STORAGE_AUTH + is_secure: bool = node_ports_storage_auth.STORAGE_SECURE + return is_secure + + +@lru_cache +def get_base_url() -> str: + settings = NodePortsSettings.create_from_envs() + # pylint:disable=no-member + base_url: str = settings.NODE_PORTS_STORAGE_AUTH.api_base_url + return base_url + + +@lru_cache +def get_basic_auth() -> BasicAuth | None: + settings = NodePortsSettings.create_from_envs() + node_ports_storage_auth = settings.NODE_PORTS_STORAGE_AUTH + + if node_ports_storage_auth.auth_required: + assert node_ports_storage_auth.STORAGE_USERNAME is not None # nosec + assert node_ports_storage_auth.STORAGE_PASSWORD is not None # nosec + return BasicAuth( + login=node_ports_storage_auth.STORAGE_USERNAME, + password=node_ports_storage_auth.STORAGE_PASSWORD.get_secret_value(), + ) + return None diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py index b4ddfe97d98..8874f98efe7 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/__init__.py @@ -1,15 +1,15 @@ import logging -from typing import Optional +from models_library.api_schemas_storage.storage_schemas import LinkType as FileLinkType from models_library.projects import ProjectIDStr from models_library.projects_nodes_io import NodeIDStr from models_library.users import UserID +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings from ..node_ports_common import exceptions from ..node_ports_common.dbmanager import DBManager from ..node_ports_common.file_io_utils import LogRedirectCB -from ..node_ports_common.storage_client import LinkType as FileLinkType from .nodeports_v2 import Nodeports from .port import Port from .serialization_v2 import load @@ -22,12 +22,13 @@ async def ports( project_id: ProjectIDStr, node_uuid: NodeIDStr, *, - db_manager: Optional[DBManager] = None, - r_clone_settings: Optional[RCloneSettings] = None, - io_log_redirect_cb: Optional[LogRedirectCB] = None + db_manager: DBManager | None = None, + r_clone_settings: RCloneSettings | None = None, + io_log_redirect_cb: LogRedirectCB | None = None, + aws_s3_cli_settings: AwsS3CliSettings | None = None ) -> Nodeports: log.debug("creating node_ports_v2 object using provided dbmanager: %s", db_manager) - # FIXME: warning every dbmanager create a new db engine! + # NOTE: warning every dbmanager create a new db engine! if db_manager is None: # NOTE: keeps backwards compatibility log.debug("no db manager provided, creating one...") db_manager = DBManager() @@ -40,7 +41,15 @@ async def ports( auto_update=True, r_clone_settings=r_clone_settings, io_log_redirect_cb=io_log_redirect_cb, + aws_s3_cli_settings=aws_s3_cli_settings, ) -__all__ = ("ports", "exceptions", "Port", "FileLinkType") +__all__ = ( + "DBManager", + "exceptions", + "FileLinkType", + "Nodeports", + "Port", + "ports", +) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/links.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/links.py index ef5cf272d41..ad94884c3b0 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/links.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/links.py @@ -1,21 +1,30 @@ from pathlib import Path -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Union from models_library.basic_regex import UUID_RE from models_library.projects_nodes_io import BaseFileLink, DownloadLink from models_library.projects_nodes_io import PortLink as BasePortLink -from pydantic import AnyUrl, Extra, Field, StrictBool, StrictFloat, StrictInt, StrictStr +from pydantic import ( + AnyUrl, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) class PortLink(BasePortLink): - node_uuid: str = Field(..., regex=UUID_RE, alias="nodeUuid") + node_uuid: str = Field(..., pattern=UUID_RE, alias="nodeUuid") # type: ignore[assignment] # This overrides the base class it is ugly but needs its own PR to fix it class FileLink(BaseFileLink): """allow all kind of file links""" - class Config: - extra = Extra.allow + model_config = ConfigDict( + extra="allow", + ) # TODO: needs to be in sync with project_nodes.InputTypes and project_nodes.OutputTypes @@ -27,8 +36,8 @@ class Config: DownloadLink, PortLink, FileLink, - List[Any], # arrays - Dict[str, Any], # object + list[Any], # arrays + dict[str, Any], # object ] # @@ -39,13 +48,22 @@ class Config: # - ItemConcreteValue are the types finally consumed by the actual service port # SchemaValidatedTypes = Union[ - StrictBool, StrictInt, StrictFloat, StrictStr, List[Any], Dict[str, Any] + StrictBool, StrictInt, StrictFloat, StrictStr, list[Any], dict[str, Any] ] ItemValue = Union[SchemaValidatedTypes, AnyUrl] ItemConcreteValue = Union[SchemaValidatedTypes, Path] +ItemConcreteValueTypes = ( + type[StrictBool] + | type[StrictInt] + | type[StrictFloat] + | type[StrictStr] + | type[list[Any]] + | type[dict[str, Any]] + | type[Path] +) -__all__: Tuple[str, ...] = ( +__all__: tuple[str, ...] = ( "DataItemValue", "DownloadLink", "FileLink", diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/nodeports_v2.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/nodeports_v2.py index d4baf2fc553..59c73716ca3 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/nodeports_v2.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/nodeports_v2.py @@ -1,21 +1,26 @@ import logging -from collections import deque +import traceback +from abc import ABC, abstractmethod +from asyncio import CancelledError, Task +from collections.abc import Callable, Coroutine from pathlib import Path -from typing import Any, Callable, Coroutine, Optional +from typing import Any +from models_library.api_schemas_storage.storage_schemas import LinkType from models_library.projects import ProjectIDStr from models_library.projects_nodes_io import NodeIDStr +from models_library.services_types import ServicePortKey from models_library.users import UserID -from pydantic import BaseModel, Field, ValidationError -from pydantic.error_wrappers import flatten_errors +from pydantic import BaseModel, ConfigDict, Field, ValidationError +from pydantic_core import InitErrorDetails from servicelib.progress_bar import ProgressBarData from servicelib.utils import logged_gather +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings from ..node_ports_common.dbmanager import DBManager from ..node_ports_common.exceptions import PortNotFound, UnboundPortError from ..node_ports_common.file_io_utils import LogRedirectCB -from ..node_ports_common.storage_client import LinkType from ..node_ports_v2.port import SetKWargs from .links import ItemConcreteValue, ItemValue from .port_utils import is_file_type @@ -24,6 +29,42 @@ log = logging.getLogger(__name__) +# -> @GitHK this looks very dangerous, using a lot of protected stuff, just checking the number of ignores shows it's a bad idea... +def _format_error(task: Task) -> str: + # pylint:disable=protected-access + assert task._exception # nosec # noqa: SLF001 + error_list = traceback.format_exception( + type(task._exception), # noqa: SLF001 + task._exception, # noqa: SLF001 + task._exception.__traceback__, # noqa: SLF001 + ) + return "\n".join(error_list) + + +def _get_error_details(task: Task, port_key: str) -> InitErrorDetails: + # pylint:disable=protected-access + return InitErrorDetails( + type="value_error", + loc=(f"{port_key}",), + input=_format_error(task), + ctx={"error": task._exception}, # noqa: SLF001 + ) + + +class OutputsCallbacks(ABC): + @abstractmethod + async def aborted(self, key: ServicePortKey) -> None: + pass + + @abstractmethod + async def finished_succesfully(self, key: ServicePortKey) -> None: + pass + + @abstractmethod + async def finished_with_error(self, key: ServicePortKey) -> None: + pass + + class Nodeports(BaseModel): """ Represents a node in a project and all its input/output ports @@ -41,11 +82,12 @@ class Nodeports(BaseModel): Coroutine[Any, Any, type["Nodeports"]], ] auto_update: bool = False - r_clone_settings: Optional[RCloneSettings] = None - io_log_redirect_cb: Optional[LogRedirectCB] - - class Config: - arbitrary_types_allowed = True + r_clone_settings: RCloneSettings | None = None + io_log_redirect_cb: LogRedirectCB | None + aws_s3_cli_settings: AwsS3CliSettings | None = None + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) def __init__(self, **data: Any): super().__init__(**data) @@ -53,9 +95,9 @@ def __init__(self, **data: Any): # let's pass ourselves down for input_key in self.internal_inputs: - self.internal_inputs[input_key]._node_ports = self + self.internal_inputs[input_key]._node_ports = self # noqa: SLF001 for output_key in self.internal_outputs: - self.internal_outputs[output_key]._node_ports = self + self.internal_outputs[output_key]._node_ports = self # noqa: SLF001 @property async def inputs(self) -> InputsList: @@ -72,8 +114,8 @@ async def outputs(self) -> OutputsList: return self.internal_outputs async def get_value_link( - self, item_key: str, *, file_link_type: LinkType - ) -> Optional[ItemValue]: + self, item_key: ServicePortKey, *, file_link_type: LinkType + ) -> ItemValue | None: try: return await (await self.inputs)[item_key].get_value( file_link_type=file_link_type @@ -87,8 +129,8 @@ async def get_value_link( ) async def get( - self, item_key: str, progress_bar: Optional[ProgressBarData] = None - ) -> Optional[ItemConcreteValue]: + self, item_key: ServicePortKey, progress_bar: ProgressBarData | None = None + ) -> ItemConcreteValue | None: try: return await (await self.inputs)[item_key].get(progress_bar) except UnboundPortError: @@ -97,7 +139,9 @@ async def get( # if this fails it will raise an exception return await (await self.outputs)[item_key].get(progress_bar) - async def set(self, item_key: str, item_value: ItemConcreteValue) -> None: + async def set( + self, item_key: ServicePortKey, item_value: ItemConcreteValue + ) -> None: # first try to set the inputs. try: the_updated_inputs = await self.inputs @@ -111,10 +155,11 @@ async def set(self, item_key: str, item_value: ItemConcreteValue) -> None: async def set_file_by_keymap(self, item_value: Path) -> None: for output in (await self.outputs).values(): - if is_file_type(output.property_type) and output.file_to_key_map: - if item_value.name in output.file_to_key_map: - await output.set(item_value) - return + if (is_file_type(output.property_type) and output.file_to_key_map) and ( + item_value.name in output.file_to_key_map + ): + await output.set(item_value) + return raise PortNotFound(msg=f"output port for item {item_value} not found") async def _node_ports_creator_cb(self, node_uuid: NodeIDStr) -> type["Nodeports"]: @@ -131,15 +176,18 @@ async def _auto_update_from_db(self) -> None: # let's pass ourselves down # pylint: disable=protected-access for input_key in self.internal_inputs: - self.internal_inputs[input_key]._node_ports = self + self.internal_inputs[input_key]._node_ports = self # noqa: SLF001 for output_key in self.internal_outputs: - self.internal_outputs[output_key]._node_ports = self + self.internal_outputs[output_key]._node_ports = self # noqa: SLF001 async def set_multiple( self, - port_values: dict[str, tuple[Optional[ItemConcreteValue], Optional[SetKWargs]]], + port_values: dict[ + ServicePortKey, tuple[ItemConcreteValue | None, SetKWargs | None] + ], *, progress_bar: ProgressBarData, + outputs_callbacks: OutputsCallbacks | None, ) -> None: """ Sets the provided values to the respective input or output ports @@ -148,34 +196,54 @@ async def set_multiple( raises ValidationError """ - tasks = deque() + + async def _set_with_notifications( + port_key: ServicePortKey, + value: ItemConcreteValue | None, + set_kwargs: SetKWargs | None, + sub_progress: ProgressBarData, + ) -> None: + try: + # pylint: disable=protected-access + await self.internal_outputs[port_key]._set( # noqa: SLF001 + value, set_kwargs=set_kwargs, progress_bar=sub_progress + ) + if outputs_callbacks: + await outputs_callbacks.finished_succesfully(port_key) + except UnboundPortError: + # not available try inputs + # if this fails it will raise another exception + # pylint: disable=protected-access + await self.internal_inputs[port_key]._set( # noqa: SLF001 + value, set_kwargs=set_kwargs, progress_bar=sub_progress + ) + except CancelledError: + if outputs_callbacks: + await outputs_callbacks.aborted(port_key) + raise + except Exception: + if outputs_callbacks: + await outputs_callbacks.finished_with_error(port_key) + raise + + tasks = [] async with progress_bar.sub_progress( - steps=len(port_values.items()) + steps=len(port_values.items()), description="set multiple" ) as sub_progress: for port_key, (value, set_kwargs) in port_values.items(): - # pylint: disable=protected-access - try: - tasks.append( - self.internal_outputs[port_key]._set( - value, set_kwargs=set_kwargs, progress_bar=sub_progress - ) - ) - except UnboundPortError: - # not available try inputs - # if this fails it will raise another exception - tasks.append( - self.internal_inputs[port_key]._set( - value, set_kwargs=set_kwargs, progress_bar=sub_progress - ) - ) + tasks.append( + _set_with_notifications(port_key, value, set_kwargs, sub_progress) + ) results = await logged_gather(*tasks) await self.save_to_db_cb(self) # groups all ValidationErrors pre-pending 'port_key' to loc and raises ValidationError - if errors := [ - flatten_errors(r, self.__config__, loc=(f"{port_key}",)) - for port_key, r in zip(port_values.keys(), results) - if isinstance(r, ValidationError) + if error_details := [ + _get_error_details(r, port_key) + for port_key, r in zip(port_values.keys(), results, strict=False) + if r is not None ]: - raise ValidationError(errors, model=self) + raise ValidationError.from_exception_data( + title="Multiple port_key errors", line_errors=error_details + ) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py index 5a116157613..014aff56529 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port.py @@ -1,15 +1,25 @@ import logging import os +from collections.abc import Callable from dataclasses import dataclass from pathlib import Path from pprint import pformat -from typing import Any, Callable, Optional - -from models_library.services import PROPERTY_KEY_RE, BaseServiceIOModel -from pydantic import AnyUrl, Field, PrivateAttr, ValidationError, validator -from pydantic.tools import parse_obj_as +from typing import Any + +from models_library.api_schemas_storage.storage_schemas import LinkType +from models_library.services_io import BaseServiceIOModel +from models_library.services_types import ServicePortKey +from pydantic import ( + AnyUrl, + ConfigDict, + Field, + PrivateAttr, + TypeAdapter, + ValidationError, + ValidationInfo, + field_validator, +) from servicelib.progress_bar import ProgressBarData -from simcore_sdk.node_ports_common.storage_client import LinkType from ..node_ports_common.exceptions import ( AbsoluteSymlinkIsNotUploadableException, @@ -53,7 +63,7 @@ def _check_if_symlink_is_valid(symlink: Path) -> None: def can_parse_as(v, *types) -> bool: try: for type_ in types: - parse_obj_as(type_, v) + TypeAdapter(type_).validate_python(v) return True except ValidationError: return False @@ -61,26 +71,30 @@ def can_parse_as(v, *types) -> bool: @dataclass(frozen=True) class SetKWargs: - file_base_path: Optional[Path] = None + file_base_path: Path | None = None class Port(BaseServiceIOModel): - key: str = Field(..., regex=PROPERTY_KEY_RE) - widget: Optional[dict[str, Any]] = None - default_value: Optional[DataItemValue] = Field(None, alias="defaultValue") + key: ServicePortKey + widget: dict[str, Any] | None = None + default_value: DataItemValue | None = Field( + None, alias="defaultValue", union_mode="left_to_right" + ) - value: Optional[DataItemValue] = None + value: DataItemValue | None = Field( + None, validate_default=True, union_mode="left_to_right" + ) # Different states of "value" # - e.g. typically after resolving a port's link, a download link, ... # - lazy evaluation using get_* members # - used to run validation & conversion of resolved PortContentTypes values # - excluded from all model export - value_item: Optional[ItemValue] = Field(None, exclude=True) - value_concrete: Optional[ItemConcreteValue] = Field(None, exclude=True) + value_item: ItemValue | None = Field(None, exclude=True, union_mode="left_to_right") + value_concrete: ItemConcreteValue | None = Field( + None, exclude=True, union_mode="left_to_right" + ) - # Types expected in _value_concrete - _py_value_type: tuple[type[ItemConcreteValue], ...] = PrivateAttr() # Function to convert from ItemValue -> ItemConcreteValue _py_value_converter: Callable[[Any], ItemConcreteValue] = PrivateAttr() # Reference to the `NodePorts` instance that contains this port @@ -89,16 +103,14 @@ class Port(BaseServiceIOModel): # flags _used_default_value: bool = PrivateAttr(False) - class Config(BaseServiceIOModel.Config): - validate_assignment = True + model_config = ConfigDict(validate_assignment=True) - @validator("value", always=True) + @field_validator("value") @classmethod - def check_value(cls, v: DataItemValue, values: dict[str, Any]) -> DataItemValue: - + def check_value(cls, v: DataItemValue, info: ValidationInfo) -> DataItemValue: if ( v is not None - and (property_type := values.get("property_type")) + and (property_type := info.data.get("property_type")) and not isinstance(v, PortLink) ): if port_utils.is_file_type(property_type): @@ -108,10 +120,10 @@ def check_value(cls, v: DataItemValue, values: dict[str, Any]) -> DataItemValue: ) elif property_type == "ref_contentSchema": v, _ = validate_port_content( - port_key=values.get("key"), + port_key=info.data.get("key"), value=v, unit=None, - content_schema=values.get("content_schema", {}), + content_schema=info.data.get("content_schema", {}), ) elif isinstance(v, (list, dict)): raise TypeError( @@ -119,21 +131,21 @@ def check_value(cls, v: DataItemValue, values: dict[str, Any]) -> DataItemValue: ) return v - @validator("value_item", "value_concrete", pre=True) + @field_validator("value_item", "value_concrete", mode="before") @classmethod - def check_item_or_concrete_value(cls, v, values): + def check_item_or_concrete_value(cls, v, info: ValidationInfo): if ( v - and v != values["value"] - and (property_type := values.get("property_type")) + and v != info.data["value"] + and (property_type := info.data.get("property_type")) and property_type == "ref_contentSchema" and not can_parse_as(v, Path, AnyUrl) ): v, _ = validate_port_content( - port_key=values.get("key"), + port_key=info.data.get("key"), value=v, unit=None, - content_schema=values.get("content_schema", {}), + content_schema=info.data.get("content_schema", {}), ) return v @@ -142,29 +154,28 @@ def __init__(self, **data: Any): super().__init__(**data) if port_utils.is_file_type(self.property_type): - self._py_value_type = (Path, str) self._py_value_converter = Path elif self.property_type == "ref_contentSchema": - self._py_value_type = (int, float, bool, str, list, dict) - self._py_value_converter = lambda v: v + + def _converter(value: ItemConcreteValue) -> ItemConcreteValue: + return value + + self._py_value_converter = _converter else: assert self.property_type in TYPE_TO_PYTYPE # nosec - - self._py_value_type = TYPE_TO_PYTYPE[self.property_type] self._py_value_converter = TYPE_TO_PYTYPE[self.property_type] if self.value is None and self.default_value is not None: self.value = self.default_value self._used_default_value = True - assert self._py_value_type # nosec assert self._py_value_converter # nosec async def get_value( - self, *, file_link_type: Optional[LinkType] = None - ) -> Optional[ItemValue]: + self, *, file_link_type: LinkType | None = None + ) -> ItemValue | None: """Resolves data links and returns resulted value Transforms DataItemValue value -> ItemValue @@ -182,12 +193,12 @@ async def get_value( file_link_type, ) - async def _evaluate(): + async def _evaluate() -> ItemValue | None: if isinstance(self.value, PortLink): # this is a link to another node's port - other_port_itemvalue: Optional[ + other_port_itemvalue: None | ( ItemValue - ] = await port_utils.get_value_link_from_port_link( + ) = await port_utils.get_value_link_from_port_link( self.value, # pylint: disable=protected-access self._node_ports._node_ports_creator_cb, @@ -198,9 +209,9 @@ async def _evaluate(): if isinstance(self.value, FileLink): # let's get the download/upload link from storage - url_itemvalue: Optional[ + url_itemvalue: None | ( AnyUrl - ] = await port_utils.get_download_link_from_storage( + ) = await port_utils.get_download_link_from_storage( # pylint: disable=protected-access user_id=self._node_ports.user_id, value=self.value, @@ -210,7 +221,10 @@ async def _evaluate(): if isinstance(self.value, DownloadLink): # generic download link for a file - return self.value.download_link + url: AnyUrl = TypeAdapter(AnyUrl).validate_python( + self.value.download_link + ) + return url # otherwise, this is a BasicValueTypes return self.value @@ -222,8 +236,8 @@ async def _evaluate(): return v async def get( - self, progress_bar: Optional[ProgressBarData] = None - ) -> Optional[ItemConcreteValue]: + self, progress_bar: ProgressBarData | None = None + ) -> ItemConcreteValue | None: """ Transforms DataItemValue value -> ItemConcreteValue @@ -236,48 +250,46 @@ async def get( pformat(self.value), ) - async def _evaluate(): + async def _evaluate() -> ItemConcreteValue | None: if self.value is None: return None if isinstance(self.value, PortLink): # this is a link to another node - other_port_concretevalue: Optional[ + other_port_concretevalue: None | ( ItemConcreteValue - ] = await port_utils.get_value_from_link( + ) = await port_utils.get_value_from_link( # pylint: disable=protected-access key=self.key, value=self.value, - fileToKeyMap=self.file_to_key_map, - node_port_creator=self._node_ports._node_ports_creator_cb, + file_to_key_map=self.file_to_key_map, + node_port_creator=self._node_ports._node_ports_creator_cb, # noqa: SLF001 progress_bar=progress_bar, ) value = other_port_concretevalue elif isinstance(self.value, FileLink): # this is a link from storage - path_concrete_value: Path = await port_utils.pull_file_from_store( + value = await port_utils.pull_file_from_store( user_id=self._node_ports.user_id, key=self.key, - fileToKeyMap=self.file_to_key_map, + file_to_key_map=self.file_to_key_map, value=self.value, io_log_redirect_cb=self._node_ports.io_log_redirect_cb, + r_clone_settings=self._node_ports.r_clone_settings, progress_bar=progress_bar, + aws_s3_cli_settings=self._node_ports.aws_s3_cli_settings, ) - value = path_concrete_value elif isinstance(self.value, DownloadLink): # this is a downloadable link - path_concrete_value: Path = ( - await port_utils.pull_file_from_download_link( - key=self.key, - fileToKeyMap=self.file_to_key_map, - value=self.value, - io_log_redirect_cb=self._node_ports.io_log_redirect_cb, - progress_bar=progress_bar, - ) + value = await port_utils.pull_file_from_download_link( + key=self.key, + file_to_key_map=self.file_to_key_map, + value=self.value, + io_log_redirect_cb=self._node_ports.io_log_redirect_cb, + progress_bar=progress_bar, ) - value = path_concrete_value else: # otherwise, this is a BasicValueTypes @@ -298,9 +310,9 @@ async def _evaluate(): async def _set( self, - new_concrete_value: Optional[ItemConcreteValue], + new_concrete_value: ItemConcreteValue | None, *, - set_kwargs: Optional[SetKWargs] = None, + set_kwargs: SetKWargs | None = None, progress_bar: ProgressBarData, ) -> None: """ @@ -313,7 +325,7 @@ async def _set( self.property_type, new_concrete_value, ) - new_value: Optional[DataItemValue] = None + new_value: DataItemValue | None = None if new_concrete_value is not None: converted_value = self._py_value_converter(new_concrete_value) if isinstance(converted_value, Path): @@ -342,6 +354,7 @@ async def _set( io_log_redirect_cb=self._node_ports.io_log_redirect_cb, file_base_path=base_path, progress_bar=progress_bar, + aws_s3_cli_settings=self._node_ports.aws_s3_cli_settings, ) else: new_value = converted_value @@ -358,7 +371,7 @@ async def set( self, new_value: ItemConcreteValue, *, - progress_bar: Optional[ProgressBarData] = None, + progress_bar: ProgressBarData | None = None, **set_kwargs, ) -> None: """sets a value to the port, by default it is also stored in the database @@ -369,11 +382,12 @@ async def set( await self._set( new_concrete_value=new_value, **set_kwargs, - progress_bar=progress_bar or ProgressBarData(steps=1), + progress_bar=progress_bar + or ProgressBarData(num_steps=1, description="set"), ) await self._node_ports.save_to_db_cb(self._node_ports) - async def set_value(self, new_item_value: Optional[ItemValue]) -> None: + async def set_value(self, new_item_value: ItemValue | None) -> None: """set the value on the port using an item-value :raises InvalidItemTypeError @@ -400,7 +414,7 @@ async def set_value(self, new_item_value: Optional[ItemValue]) -> None: new_item_value ) self.value_concrete = None - self.value = new_concrete_value # type:ignore + self.value = new_concrete_value # type: ignore[assignment] self.value_item = None self.value_concrete = None diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_utils.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_utils.py index a4d39bb83ce..41f317b6f44 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_utils.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_utils.py @@ -1,20 +1,27 @@ import logging import shutil +from collections.abc import Callable, Coroutine from pathlib import Path -from typing import Any, Callable, Coroutine, Optional - -from models_library.api_schemas_storage import FileUploadSchema +from typing import Any + +from models_library.api_schemas_storage.storage_schemas import ( + FileUploadSchema, + LinkType, +) +from models_library.basic_types import SHA256Str +from models_library.services_types import FileName, ServicePortKey from models_library.users import UserID -from pydantic import AnyUrl, ByteSize -from pydantic.tools import parse_obj_as +from pydantic import AnyUrl, ByteSize, TypeAdapter from servicelib.progress_bar import ProgressBarData +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings from yarl import URL from ..node_ports_common import data_items_utils, filemanager from ..node_ports_common.constants import SIMCORE_LOCATION -from ..node_ports_common.filemanager import LogRedirectCB -from ..node_ports_common.storage_client import LinkType +from ..node_ports_common.exceptions import NodeportsException +from ..node_ports_common.file_io_utils import LogRedirectCB +from ..node_ports_common.filemanager import UploadedFile, UploadedFolder from .links import DownloadLink, FileLink, ItemConcreteValue, ItemValue, PortLink log = logging.getLogger(__name__) @@ -25,14 +32,14 @@ async def get_value_link_from_port_link( node_port_creator: Callable[[str], Coroutine[Any, Any, Any]], *, file_link_type: LinkType, -) -> Optional[ItemValue]: +) -> ItemValue | None: log.debug("Getting value link %s", value) # create a node ports for the other node - other_nodeports = await node_port_creator(value.node_uuid) + other_nodeports = await node_port_creator(f"{value.node_uuid}") # get the port value through that guy log.debug("Received node from DB %s, now returning value link", other_nodeports) - other_value: Optional[ItemValue] = await other_nodeports.get_value_link( + other_value: ItemValue | None = await other_nodeports.get_value_link( value.output, file_link_type=file_link_type ) return other_value @@ -41,28 +48,29 @@ async def get_value_link_from_port_link( async def get_value_from_link( key: str, value: PortLink, - fileToKeyMap: Optional[dict[str, str]], + file_to_key_map: dict[FileName, ServicePortKey] | None, node_port_creator: Callable[[str], Coroutine[Any, Any, Any]], *, - progress_bar: Optional[ProgressBarData], -) -> Optional[ItemConcreteValue]: + progress_bar: ProgressBarData | None, +) -> ItemConcreteValue | None: log.debug("Getting value %s", value) # create a node ports for the other node - other_nodeports = await node_port_creator(value.node_uuid) + other_nodeports = await node_port_creator(f"{value.node_uuid}") # get the port value through that guy log.debug("Received node from DB %s, now returning value", other_nodeports) - other_value: Optional[ItemConcreteValue] = await other_nodeports.get( + other_value: ItemConcreteValue | None = await other_nodeports.get( value.output, progress_bar ) if isinstance(other_value, Path): file_name = other_value.name # move the file to the right final location # if a file alias is present use it - if fileToKeyMap: - file_name = next(iter(fileToKeyMap)) - file_path = data_items_utils.create_file_path(key, file_name) + if file_to_key_map: + file_name = next(iter(file_to_key_map)) + + file_path = data_items_utils.get_file_path(key, file_name) if other_value == file_path: # this is a corner case: in case the output key of the other node has the same name as the input key return other_value @@ -96,7 +104,8 @@ async def get_download_link_from_storage( # could raise ValidationError but will never do it since assert isinstance(link, URL) # nosec - return parse_obj_as(AnyUrl, f"{link}") + url: AnyUrl = TypeAdapter(AnyUrl).validate_python(f"{link}") + return url async def get_download_link_from_storage_overload( @@ -117,7 +126,8 @@ async def get_download_link_from_storage_overload( s3_object=s3_object, link_type=link_type, ) - return parse_obj_as(AnyUrl, f"{link}") + url: AnyUrl = TypeAdapter(AnyUrl).validate_python(f"{link}") + return url async def get_upload_links_from_storage( @@ -127,6 +137,7 @@ async def get_upload_links_from_storage( file_name: str, link_type: LinkType, file_size: ByteSize, + sha256_checksum: SHA256Str | None, ) -> FileUploadSchema: log.debug("getting link to file from storage for %s", file_name) s3_object = data_items_utils.create_simcore_file_id( @@ -139,6 +150,8 @@ async def get_upload_links_from_storage( s3_object=s3_object, link_type=link_type, file_size=file_size, + is_directory=False, + sha256_checksum=sha256_checksum, ) return links @@ -153,7 +166,10 @@ async def target_link_exists( Path(file_name), project_id, node_id ) return await filemanager.entry_exists( - user_id=user_id, store_id=SIMCORE_LOCATION, s3_object=s3_object + user_id=user_id, + store_id=SIMCORE_LOCATION, + s3_object=s3_object, + is_directory=False, ) @@ -172,26 +188,31 @@ async def delete_target_link( async def pull_file_from_store( user_id: UserID, key: str, - fileToKeyMap: Optional[dict[str, str]], + file_to_key_map: dict[FileName, ServicePortKey] | None, value: FileLink, - io_log_redirect_cb: Optional[LogRedirectCB], - progress_bar: Optional[ProgressBarData], + io_log_redirect_cb: LogRedirectCB | None, + r_clone_settings: RCloneSettings | None, + progress_bar: ProgressBarData | None, + aws_s3_cli_settings: AwsS3CliSettings | None, ) -> Path: log.debug("pulling file from storage %s", value) # do not make any assumption about s3_path, it is a str containing stuff that can be anything depending on the store - local_path = data_items_utils.create_folder_path(key) - downloaded_file = await filemanager.download_file_from_s3( + local_path = data_items_utils.get_folder_path(key) + downloaded_file = await filemanager.download_path_from_s3( user_id=user_id, store_id=value.store, store_name=None, s3_object=value.path, - local_folder=local_path, + local_path=local_path, io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar or ProgressBarData(steps=1), + r_clone_settings=r_clone_settings, + progress_bar=progress_bar + or ProgressBarData(num_steps=1, description="pulling file"), + aws_s3_cli_settings=aws_s3_cli_settings, ) # if a file alias is present use it to rename the file accordingly - if fileToKeyMap: - renamed_file = local_path / next(iter(fileToKeyMap)) + if file_to_key_map: + renamed_file = local_path / next(iter(file_to_key_map)) if downloaded_file != renamed_file: if renamed_file.exists(): renamed_file.unlink() @@ -207,53 +228,68 @@ async def push_file_to_store( user_id: UserID, project_id: str, node_id: str, - io_log_redirect_cb: Optional[LogRedirectCB], - r_clone_settings: Optional[RCloneSettings] = None, - file_base_path: Optional[Path] = None, + io_log_redirect_cb: LogRedirectCB | None, + r_clone_settings: RCloneSettings | None = None, + file_base_path: Path | None = None, progress_bar: ProgressBarData, + aws_s3_cli_settings: AwsS3CliSettings | None = None, ) -> FileLink: + """ + :raises exceptions.NodeportsException + """ + log.debug("file path %s will be uploaded to s3", file) s3_object = data_items_utils.create_simcore_file_id( file, project_id, node_id, file_base_path=file_base_path ) - store_id, e_tag = await filemanager.upload_file( + if not file.is_file(): + msg = f"Expected path={file} should be a file" + raise NodeportsException(msg) + + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( user_id=user_id, store_id=SIMCORE_LOCATION, store_name=None, s3_object=s3_object, - file_to_upload=file, + path_to_upload=file, r_clone_settings=r_clone_settings, io_log_redirect_cb=io_log_redirect_cb, progress_bar=progress_bar, + aws_s3_cli_settings=aws_s3_cli_settings, + ) + assert isinstance(upload_result, UploadedFile) # nosec + log.debug("file path %s uploaded, received ETag %s", file, upload_result.etag) + return FileLink( + store=upload_result.store_id, path=s3_object, eTag=upload_result.etag ) - log.debug("file path %s uploaded, received ETag %s", file, e_tag) - return FileLink(store=store_id, path=s3_object, e_tag=e_tag) async def pull_file_from_download_link( key: str, - fileToKeyMap: Optional[dict[str, str]], + file_to_key_map: dict[FileName, ServicePortKey] | None, value: DownloadLink, - io_log_redirect_cb: Optional[LogRedirectCB], - progress_bar: Optional[ProgressBarData], + io_log_redirect_cb: LogRedirectCB | None, + progress_bar: ProgressBarData | None, ) -> Path: + # download 1 file from a link log.debug( "Getting value from download link [%s] with label %s", value.download_link, value.label, ) - local_path = data_items_utils.create_folder_path(key) + local_path = data_items_utils.get_folder_path(key) downloaded_file = await filemanager.download_file_from_link( URL(f"{value.download_link}"), local_path, io_log_redirect_cb=io_log_redirect_cb, - progress_bar=progress_bar or ProgressBarData(steps=1), + progress_bar=progress_bar + or ProgressBarData(num_steps=1, description="pulling file"), ) # if a file alias is present use it to rename the file accordingly - if fileToKeyMap: - renamed_file = local_path / next(iter(fileToKeyMap)) + if file_to_key_map: + renamed_file = local_path / next(iter(file_to_key_map)) if downloaded_file != renamed_file: if renamed_file.exists(): renamed_file.unlink() @@ -278,10 +314,14 @@ async def get_file_link_from_url( s3_object = data_items_utils.create_simcore_file_id( Path(new_value.path), project_id, node_id ) - store_id, e_tag = await filemanager.get_file_metadata( + file_metadata = await filemanager.get_file_metadata( user_id=user_id, store_id=SIMCORE_LOCATION, s3_object=s3_object, ) - log.debug("file meta data for %s found, received ETag %s", new_value, e_tag) - return FileLink(store=store_id, path=s3_object, e_tag=e_tag) + log.debug( + "file meta data for %s found, received ETag %s", new_value, file_metadata.etag + ) + return FileLink( + store=file_metadata.location, path=s3_object, eTag=file_metadata.etag + ) diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_validation.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_validation.py index c2ebb56986d..c6596e669e9 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_validation.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/port_validation.py @@ -1,7 +1,8 @@ import logging import re -from typing import Any, Dict, Optional, Tuple +from typing import Any +from common_library.errors_classes import OsparcErrorMixin from models_library.projects_nodes import UnitStr from models_library.utils.json_schema import ( JsonSchemaValidationError, @@ -9,9 +10,8 @@ jsonschema_validate_schema, ) from pint import PintError, UnitRegistry -from pydantic.errors import PydanticValueError -JsonSchemaDict = Dict[str, Any] +JsonSchemaDict = dict[str, Any] log = logging.getLogger(__name__) @@ -22,8 +22,7 @@ # - Use 'code' to discriminate port_validation errors -class PortValueError(PydanticValueError): - code = "port_validation.schema_error" +class PortValueError(OsparcErrorMixin, ValueError): msg_template = "Invalid value in port {port_key!r}: {schema_error_message}" # pylint: disable=useless-super-delegation @@ -37,8 +36,7 @@ def __init__(self, *, port_key: str, schema_error: JsonSchemaValidationError): ) -class PortUnitError(PydanticValueError): - code = "port_validation.unit_error" +class PortUnitError(OsparcErrorMixin, ValueError): msg_template = "Invalid unit in port {port_key!r}: {pint_error_msg}" # pylint: disable=useless-super-delegation @@ -72,7 +70,7 @@ def _validate_port_value(value, content_schema: JsonSchemaDict): def _validate_port_unit( value, unit, content_schema: JsonSchemaDict, *, ureg: UnitRegistry -) -> Tuple[Any, Optional[UnitStr]]: +) -> tuple[Any, UnitStr | None]: """ - Checks valid 'value' against content_schema - Converts 'value' with 'unit' to unit expected in content_schema @@ -101,7 +99,7 @@ def _validate_port_unit( def validate_port_content( port_key, value: Any, - unit: Optional[UnitStr], + unit: UnitStr | None, content_schema: JsonSchemaDict, ): """A port content is all datasets injected to a given port. Currently only diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/ports_mapping.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/ports_mapping.py index ffcf7cd9665..9fb13510afb 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/ports_mapping.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/ports_mapping.py @@ -1,40 +1,35 @@ -from typing import Dict, ItemsView, Iterator, KeysView, Type, Union, ValuesView +from collections.abc import ItemsView, Iterator, KeysView, ValuesView -from models_library.services import PROPERTY_KEY_RE -from pydantic import BaseModel, constr +from models_library.services_types import ServicePortKey +from pydantic import RootModel from ..node_ports_common.exceptions import UnboundPortError from .port import Port -PortKey: Type[str] = constr(regex=PROPERTY_KEY_RE) -# TODO: could use new models_library.generic.DictModel[PortKey, Port] instead -class BasePortsMapping(BaseModel): - __root__: Dict[PortKey, Port] - - def __getitem__(self, key: Union[int, PortKey]) -> Port: - if isinstance(key, int): - if key < len(self.__root__): - key = list(self.__root__.keys())[key] - if not key in self.__root__: +class BasePortsMapping(RootModel[dict[ServicePortKey, Port]]): + def __getitem__(self, key: int | ServicePortKey) -> Port: + if isinstance(key, int) and key < len(self.root): + key = list(self.root.keys())[key] + if key not in self.root: raise UnboundPortError(key) assert isinstance(key, str) # nosec - return self.__root__[key] + return self.root[key] - def __iter__(self) -> Iterator[PortKey]: - return iter(self.__root__) + def __iter__(self) -> Iterator[ServicePortKey]: # type: ignore + return iter(self.root) - def keys(self) -> KeysView[PortKey]: - return self.__root__.keys() + def keys(self) -> KeysView[ServicePortKey]: + return self.root.keys() - def items(self) -> ItemsView[PortKey, Port]: - return self.__root__.items() + def items(self) -> ItemsView[ServicePortKey, Port]: + return self.root.items() def values(self) -> ValuesView[Port]: - return self.__root__.values() + return self.root.values() def __len__(self) -> int: - return self.__root__.__len__() + return self.root.__len__() class InputsList(BasePortsMapping): diff --git a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/serialization_v2.py b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/serialization_v2.py index f16a5ee9ff8..7c60de81caf 100644 --- a/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/serialization_v2.py +++ b/packages/simcore-sdk/src/simcore_sdk/node_ports_v2/serialization_v2.py @@ -1,18 +1,19 @@ import functools -import json import logging from pprint import pformat -from typing import Any, Optional +from typing import Any import pydantic -from models_library.projects_nodes import NodeID +from common_library.json_serialization import json_dumps, json_loads +from models_library.projects_nodes_io import NodeID from models_library.utils.nodes import compute_node_hash from packaging import version +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings -from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB from ..node_ports_common.dbmanager import DBManager from ..node_ports_common.exceptions import InvalidProtocolError +from ..node_ports_common.file_io_utils import LogRedirectCB from .nodeports_v2 import Nodeports # NOTE: Keeps backwards compatibility with pydantic @@ -34,9 +35,10 @@ async def load( user_id: int, project_id: str, node_uuid: str, - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, auto_update: bool = False, - r_clone_settings: Optional[RCloneSettings] = None, + r_clone_settings: RCloneSettings | None = None, + aws_s3_cli_settings: AwsS3CliSettings | None = None, ) -> Nodeports: """creates a nodeport object from a row from comp_tasks""" log.debug( @@ -47,7 +49,7 @@ async def load( port_config_str: str = await db_manager.get_ports_configuration_from_node_uuid( project_id, node_uuid ) - port_cfg = json.loads(port_config_str) + port_cfg = json_loads(port_config_str) log.debug(f"{port_cfg=}") # pylint: disable=logging-fstring-interpolation if any(k not in port_cfg for k in NODE_REQUIRED_KEYS): @@ -56,9 +58,10 @@ async def load( ) # convert to our internal node ports + node_ports_cfg: dict[str, dict[str, Any]] = {} if _PYDANTIC_NEEDS_ROOT_SPECIFIED: _PY_INT = "__root__" - node_ports_cfg: dict[str, dict[str, Any]] = { + node_ports_cfg = { "inputs": {_PY_INT: {}}, "outputs": {_PY_INT: {}}, } @@ -74,7 +77,7 @@ async def load( port_value["key"] = key port_value["value"] = port_cfg[port_type].get(key, None) else: - node_ports_cfg: dict[str, dict[str, Any]] = {} + node_ports_cfg = {} for port_type in ["inputs", "outputs"]: # schemas first node_ports_cfg[port_type] = port_cfg["schema"][port_type] @@ -97,6 +100,7 @@ async def load( auto_update=auto_update, r_clone_settings=r_clone_settings, io_log_redirect_cb=io_log_redirect_cb, + aws_s3_cli_settings=aws_s3_cli_settings, ) log.debug( "created node_ports_v2 object %s", @@ -110,7 +114,7 @@ async def dump(nodeports: Nodeports) -> None: "dumping node_ports_v2 object %s", pformat(nodeports, indent=2), ) - _nodeports_cfg = nodeports.dict( + _nodeports_cfg = nodeports.model_dump( include={"internal_inputs", "internal_outputs"}, by_alias=True, exclude_unset=True, @@ -144,7 +148,7 @@ async def get_node_io_payload_cb(node_id: NodeID) -> dict[str, Any]: ) # convert to DB - port_cfg = { + port_cfg: dict[str, Any] = { "schema": {"inputs": {}, "outputs": {}}, "inputs": {}, "outputs": {}, @@ -163,14 +167,14 @@ async def get_node_io_payload_cb(node_id: NodeID) -> dict[str, Any]: # pylint: disable=protected-access if ( port_values["value"] is not None - and not getattr(nodeports, f"internal_{port_type}")[ + and not getattr(nodeports, f"internal_{port_type}")[ # noqa: SLF001 port_key ]._used_default_value ): port_cfg[port_type][port_key] = port_values["value"] await nodeports.db_manager.write_ports_configuration( - json.dumps(port_cfg), + json_dumps(port_cfg), nodeports.project_id, nodeports.node_uuid, ) diff --git a/packages/simcore-sdk/src/simcore_sdk/py.typed b/packages/simcore-sdk/src/simcore_sdk/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packages/simcore-sdk/tests/conftest.py b/packages/simcore-sdk/tests/conftest.py index 7c68cc59265..39bd7070bae 100644 --- a/packages/simcore-sdk/tests/conftest.py +++ b/packages/simcore-sdk/tests/conftest.py @@ -10,26 +10,30 @@ import pytest import simcore_sdk -from pytest_simcore.postgres_service import PostgresTestConfig +from helpers.utils_port_v2 import CONSTANT_UUID +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.postgres_tools import PostgresTestConfig +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent sys.path.append(str(current_dir / "helpers")) pytest_plugins = [ - "pytest_simcore.aws_services", + "pytest_simcore.aws_s3_service", + "pytest_simcore.aws_server", + "pytest_simcore.disk_usage_monitoring", "pytest_simcore.docker_compose", "pytest_simcore.docker_swarm", "pytest_simcore.file_extra", "pytest_simcore.minio_service", - "pytest_simcore.monkeypatch_extra", "pytest_simcore.postgres_service", "pytest_simcore.pytest_global_environs", "pytest_simcore.repository_paths", "pytest_simcore.services_api_mocks_for_aiohttp_clients", "pytest_simcore.simcore_services", "pytest_simcore.simcore_storage_service", - "pytest_simcore.tmp_path_extra", ] @@ -40,30 +44,6 @@ def package_dir(): return pdir -@pytest.fixture(scope="session") -def osparc_simcore_root_dir() -> Path: - """osparc-simcore repo root dir""" - WILDCARD = "packages/simcore-sdk" - - root_dir = Path(current_dir) - while not any(root_dir.glob(WILDCARD)) and root_dir != Path("/"): - root_dir = root_dir.parent - - msg = f"'{root_dir}' does not look like the git root directory of osparc-simcore" - assert root_dir.exists(), msg - assert any(root_dir.glob(WILDCARD)), msg - assert any(root_dir.glob(".git")), msg - - return root_dir - - -@pytest.fixture(scope="session") -def env_devel_file(osparc_simcore_root_dir) -> Path: - env_devel_fpath = osparc_simcore_root_dir / ".env-devel" - assert env_devel_fpath.exists() - return env_devel_fpath - - @pytest.fixture(scope="session") def default_configuration_file() -> Path: path = current_dir / "mock" / "default_config.json" @@ -73,8 +53,7 @@ def default_configuration_file() -> Path: @pytest.fixture(scope="session") def default_configuration(default_configuration_file: Path) -> dict[str, Any]: - config = json.loads(default_configuration_file.read_text()) - return config + return json.loads(default_configuration_file.read_text()) @pytest.fixture(scope="session") @@ -86,6 +65,22 @@ def empty_configuration_file() -> Path: @pytest.fixture def node_ports_config( - postgres_host_config: PostgresTestConfig, minio_config: dict[str, str] + postgres_host_config: PostgresTestConfig, minio_s3_settings_envs: EnvVarsDict ) -> None: ... + + +@pytest.fixture +def mock_io_log_redirect_cb() -> LogRedirectCB: + async def _mocked_function(*args, **kwargs) -> None: + pass + + return _mocked_function + + +@pytest.fixture +def constant_uuid4(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_sdk.node_ports_common.data_items_utils.uuid4", + return_value=CONSTANT_UUID, + ) diff --git a/packages/simcore-sdk/tests/helpers/utils_docker.py b/packages/simcore-sdk/tests/helpers/utils_docker.py index f4254150bca..bd1ace47755 100644 --- a/packages/simcore-sdk/tests/helpers/utils_docker.py +++ b/packages/simcore-sdk/tests/helpers/utils_docker.py @@ -3,7 +3,6 @@ import subprocess import tempfile from pathlib import Path -from typing import Dict, List, Optional, Union import docker import yaml @@ -16,7 +15,7 @@ wait=wait_fixed(2), stop=stop_after_attempt(10), after=after_log(log, logging.WARN) ) def get_service_published_port( - service_name: str, target_port: Optional[int] = None + service_name: str, target_port: int | None = None ) -> str: """ WARNING: ENSURE that service name exposes a port in Dockerfile file or docker-compose config file @@ -64,11 +63,11 @@ def get_service_published_port( def run_docker_compose_config( - docker_compose_paths: Union[List[Path], Path], + docker_compose_paths: list[Path] | Path, workdir: Path, - destination_path: Optional[Path] = None, -) -> Dict: - """Runs docker-compose config to validate and resolve a compose file configuration + destination_path: Path | None = None, +) -> dict: + """Runs docker compose config to validate and resolve a compose file configuration - Composes all configurations passed in 'docker_compose_paths' - Takes 'workdir' as current working directory (i.e. all '.env' files there will be captured) @@ -93,7 +92,7 @@ def run_docker_compose_config( configs_prefix = " ".join(config_paths) subprocess.run( - f"docker-compose {configs_prefix} config > {destination_path}", + f"docker compose {configs_prefix} config > {destination_path}", shell=True, check=True, cwd=workdir, diff --git a/packages/simcore-sdk/tests/helpers/utils_environs.py b/packages/simcore-sdk/tests/helpers/utils_environs.py index 134aac8ea1d..60fb4fe0898 100644 --- a/packages/simcore-sdk/tests/helpers/utils_environs.py +++ b/packages/simcore-sdk/tests/helpers/utils_environs.py @@ -2,16 +2,14 @@ """ import re -from copy import deepcopy from pathlib import Path -from typing import Dict import yaml VARIABLE_SUBSTITUTION = re.compile(r"\$\{(\w+)+") # -def load_env(file_handler) -> Dict: +def load_env(file_handler) -> dict: """Deserializes an environment file like .env-devel and returns a key-value map of the environment @@ -29,30 +27,10 @@ def load_env(file_handler) -> Dict: return environ -def eval_environs_in_docker_compose( - docker_compose: Dict, - docker_compose_dir: Path, - host_environ: Dict = None, - *, - use_env_devel=True, -): - """Resolves environments in docker compose and sets them under 'environment' section - - TODO: deprecated. Use instead docker-compose config in services/web/server/tests/integration/fixtures/docker_compose.py - SEE https://docs.docker.com/compose/environment-variables/ - """ - content = deepcopy(docker_compose) - for _name, service in content["services"].items(): - replace_environs_in_docker_compose_service( - service, docker_compose_dir, host_environ, use_env_devel=use_env_devel - ) - return content - - def replace_environs_in_docker_compose_service( - service_section: Dict, + service_section: dict, docker_compose_dir: Path, - host_environ: Dict = None, + host_environ: dict = None, *, use_env_devel=True, ): @@ -80,7 +58,7 @@ def replace_environs_in_docker_compose_service( # explicit environment [overrides env_file] environ_items = service_section.get("environment", []) if environ_items and isinstance(environ_items, list): - # TODO: use docker-compose config first + # TODO: use docker compose config first for item in environ_items: key, value = item.split("=") @@ -100,11 +78,11 @@ def replace_environs_in_docker_compose_service( def eval_service_environ( docker_compose_path: Path, service_name: str, - host_environ: Dict = None, - image_environ: Dict = None, + host_environ: dict = None, + image_environ: dict = None, *, use_env_devel=True, -) -> Dict: +) -> dict: """Deduces a service environment with it runs in a stack from confirmation :param docker_compose_path: path to stack configuration diff --git a/packages/simcore-sdk/tests/helpers/utils_port_v2.py b/packages/simcore-sdk/tests/helpers/utils_port_v2.py index 1b7670502f8..23298f6b175 100644 --- a/packages/simcore-sdk/tests/helpers/utils_port_v2.py +++ b/packages/simcore-sdk/tests/helpers/utils_port_v2.py @@ -1,9 +1,12 @@ -from typing import Any, Dict, Optional, Type, Union +from typing import Any, Final +from uuid import UUID from simcore_sdk.node_ports_v2.ports_mapping import InputsList, OutputsList +CONSTANT_UUID: Final[UUID] = UUID(int=0) -def create_valid_port_config(conf_type: str, **kwargs) -> Dict[str, Any]: + +def create_valid_port_config(conf_type: str, **kwargs) -> dict[str, Any]: valid_config = { "key": f"some_{conf_type}", "label": "some label", @@ -16,12 +19,11 @@ def create_valid_port_config(conf_type: str, **kwargs) -> Dict[str, Any]: def create_valid_port_mapping( - mapping_class: Union[Type[InputsList], Type[OutputsList]], + mapping_class: type[InputsList] | type[OutputsList], suffix: str, - file_to_key: Optional[str] = None, -) -> Union[InputsList, OutputsList]: - - port_cfgs: Dict[str, Any] = {} + file_to_key: str | None = None, +) -> InputsList | OutputsList: + port_cfgs: dict[str, Any] = {} for t, v in { "integer": 43, "number": 45.6, @@ -43,5 +45,5 @@ def create_valid_port_mapping( key=key_for_file_port, fileToKeyMap={file_to_key: key_for_file_port} if file_to_key else None, ) - port_mapping = mapping_class(**{"__root__": port_cfgs}) + port_mapping = mapping_class(**{"root": port_cfgs}) return port_mapping diff --git a/packages/simcore-sdk/tests/integration/conftest.py b/packages/simcore-sdk/tests/integration/conftest.py index 9b6f24e2ea8..b32fc4aa1df 100644 --- a/packages/simcore-sdk/tests/integration/conftest.py +++ b/packages/simcore-sdk/tests/integration/conftest.py @@ -5,25 +5,29 @@ import json import urllib.parse +from collections.abc import Awaitable, Callable, Iterable, Iterator from pathlib import Path -from typing import Any, Awaitable, Callable, Iterable, Iterator, Optional +from typing import Any from uuid import uuid4 import pytest import sqlalchemy as sa from aiohttp import ClientSession -from models_library.api_schemas_storage import FileUploadSchema +from models_library.api_schemas_storage.storage_schemas import FileUploadSchema from models_library.generics import Envelope from models_library.projects_nodes_io import LocationID, NodeIDStr, SimcoreS3FileID from models_library.users import UserID -from pydantic import parse_obj_as -from pytest_simcore.helpers.rawdata_fakers import random_project, random_user +from pydantic import TypeAdapter +from pytest_simcore.helpers.faker_factories import random_project, random_user +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.r_clone import RCloneSettings, S3Provider +from settings_library.s3 import S3Settings from simcore_postgres_database.models.comp_pipeline import comp_pipeline from simcore_postgres_database.models.comp_tasks import comp_tasks from simcore_postgres_database.models.file_meta_data import file_meta_data from simcore_postgres_database.models.projects import projects from simcore_postgres_database.models.users import users +from simcore_sdk.node_ports_common.aws_s3_cli import is_aws_s3_cli_available from simcore_sdk.node_ports_common.r_clone import is_r_clone_available from yarl import URL @@ -37,10 +41,10 @@ def user_id(postgres_db: sa.engine.Engine) -> Iterable[UserID]: # which would turn this test too complex. # pylint: disable=no-value-for-parameter - stmt = users.insert().values(**random_user(name="test")).returning(users.c.id) - print(f"{stmt}") with postgres_db.connect() as conn: - result = conn.execute(stmt) + result = conn.execute( + users.insert().values(**random_user(name="test")).returning(users.c.id) + ) row = result.first() assert row usr_id = row[users.c.id] @@ -76,7 +80,7 @@ def project_id(user_id: int, postgres_db: sa.engine.Engine) -> Iterable[str]: @pytest.fixture(scope="module") def node_uuid() -> NodeIDStr: - return NodeIDStr(f"{uuid4()}") + return TypeAdapter(NodeIDStr).validate_python(f"{uuid4()}") @pytest.fixture(scope="session") @@ -90,7 +94,7 @@ def create_valid_file_uuid( ) -> Callable[[str, Path], SimcoreS3FileID]: def _create(key: str, file_path: Path) -> SimcoreS3FileID: clean_path = Path(f"{project_id}/{node_uuid}/{key}/{file_path.name}") - return parse_obj_as(SimcoreS3FileID, f"{clean_path}") + return TypeAdapter(SimcoreS3FileID).validate_python(f"{clean_path}") return _create @@ -107,10 +111,7 @@ def default_configuration( # prepare database with default configuration json_configuration = default_configuration_file.read_text() create_pipeline(project_id) - config_dict = _set_configuration( - create_task, project_id, node_uuid, json_configuration - ) - return config_dict + return _set_configuration(create_task, project_id, node_uuid, json_configuration) @pytest.fixture() @@ -141,7 +142,7 @@ async def _create(file_path: Path) -> dict[str, Any]: async with ClientSession() as session: async with session.put(url) as resp: resp.raise_for_status() - presigned_links_enveloped = Envelope[FileUploadSchema].parse_obj( + presigned_links_enveloped = Envelope[FileUploadSchema].model_validate( await resp.json() ) assert presigned_links_enveloped.data @@ -155,17 +156,17 @@ async def _create(file_path: Path) -> dict[str, Any]: "Content-Type": "application/binary", } async with session.put( - link, data=file_path.read_bytes(), headers=extra_hdr + f"{link}", data=file_path.read_bytes(), headers=extra_hdr ) as resp: resp.raise_for_status() - # FIXME: that at this point, S3 and pg have some data that is NOT cleaned up + # NOTE: that at this point, S3 and pg have some data that is NOT cleaned up return {"store": s3_simcore_location, "path": file_id} return _create -@pytest.fixture(scope="function") +@pytest.fixture() def create_special_configuration( node_ports_config: None, create_pipeline: Callable[[str], str], @@ -175,8 +176,8 @@ def create_special_configuration( node_uuid: str, ) -> Callable: def _create( - inputs: Optional[list[tuple[str, str, Any]]] = None, - outputs: Optional[list[tuple[str, str, Any]]] = None, + inputs: list[tuple[str, str, Any]] | None = None, + outputs: list[tuple[str, str, Any]] | None = None, project_id: str = project_id, node_id: str = node_uuid, ) -> tuple[dict, str, str]: @@ -192,7 +193,7 @@ def _create( return _create -@pytest.fixture(scope="function") +@pytest.fixture() def create_2nodes_configuration( node_ports_config: None, create_pipeline: Callable[[str], str], @@ -329,52 +330,66 @@ def _assign_config( } } ) - if not entry[2] is None: + if entry[2] is not None: config_dict[port_type].update({entry[0]: entry[2]}) @pytest.fixture async def r_clone_settings_factory( - minio_config: dict[str, Any], storage_service: URL -) -> Awaitable[RCloneSettings]: + minio_s3_settings: S3Settings, storage_service: URL +) -> Callable[[], Awaitable[RCloneSettings]]: async def _factory() -> RCloneSettings: - client = minio_config["client"] - settings = RCloneSettings.parse_obj( - dict( - R_CLONE_S3=dict( - S3_ENDPOINT=client["endpoint"], - S3_ACCESS_KEY=client["access_key"], - S3_SECRET_KEY=client["secret_key"], - S3_BUCKET_NAME=minio_config["bucket_name"], - S3_SECURE=client["secure"], - ), - R_CLONE_PROVIDER=S3Provider.MINIO, - ) + settings = RCloneSettings( + R_CLONE_S3=minio_s3_settings, R_CLONE_PROVIDER=S3Provider.MINIO ) if not await is_r_clone_available(settings): pytest.skip("rclone not installed") return settings - return _factory() + return _factory + + +@pytest.fixture +async def aws_s3_cli_settings_factory( + minio_s3_settings: S3Settings, storage_service: URL +) -> Callable[[], Awaitable[AwsS3CliSettings]]: + async def _factory() -> AwsS3CliSettings: + settings = AwsS3CliSettings(AWS_S3_CLI_S3=minio_s3_settings) + if not await is_aws_s3_cli_available(settings): + pytest.skip("aws cli not installed") + + return settings + + return _factory @pytest.fixture async def r_clone_settings( - r_clone_settings_factory: Awaitable[RCloneSettings], + r_clone_settings_factory: Callable[[], Awaitable[RCloneSettings]], ) -> RCloneSettings: - return await r_clone_settings_factory + return await r_clone_settings_factory() + + +@pytest.fixture +async def aws_s3_cli_settings( + aws_s3_cli_settings_factory: Callable[[], Awaitable[AwsS3CliSettings]], +) -> AwsS3CliSettings: + return await aws_s3_cli_settings_factory() @pytest.fixture def cleanup_file_meta_data(postgres_db: sa.engine.Engine) -> Iterator[None]: - yield + yield None with postgres_db.connect() as conn: conn.execute(file_meta_data.delete()) @pytest.fixture def node_ports_config( - node_ports_config, simcore_services_ready, cleanup_file_meta_data: None, bucket: str -): - ... + node_ports_config, + with_bucket_versioning_enabled: str, + simcore_services_ready, + cleanup_file_meta_data: None, +) -> None: + return None diff --git a/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py b/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py new file mode 100644 index 00000000000..a25e95aa715 --- /dev/null +++ b/packages/simcore-sdk/tests/integration/test_node_data_data_manager.py @@ -0,0 +1,306 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import hashlib +import shutil +import zipfile +from collections.abc import Callable +from pathlib import Path +from uuid import uuid4 + +import pytest +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, SimcoreS3FileID +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.progress_bar import ProgressBarData +from settings_library.aws_s3_cli import AwsS3CliSettings +from settings_library.r_clone import RCloneSettings +from simcore_sdk.node_data import data_manager +from simcore_sdk.node_ports_common import filemanager +from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION +from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB + +pytest_simcore_core_services_selection = [ + "migration", + "postgres", + "rabbit", + "redis", + "storage", + "sto-worker", +] + +pytest_simcore_ops_services_selection = [ + "minio", + "adminer", +] + + +# UTILS + + +def _empty_path(path: Path) -> None: + if path.is_file(): + path.unlink() + assert path.exists() is False + path.touch() + assert path.exists() is True + else: + shutil.rmtree(path) + assert path.exists() is False + path.mkdir(parents=True, exist_ok=True) + assert path.exists() is True + + +def _get_file_hashes_in_path(path_to_hash: Path) -> set[tuple[Path, str]]: + def _hash_path(path: Path): + sha256_hash = hashlib.sha256() + with Path.open(path, "rb") as f: + # Read and update hash string value in blocks of 4K + for byte_block in iter(lambda: f.read(4096), b""): + sha256_hash.update(byte_block) + return sha256_hash.hexdigest() + + def _relative_path(root_path: Path, full_path: Path) -> Path: + return full_path.relative_to(root_path) + + if path_to_hash.is_file(): + return {(_relative_path(path_to_hash, path_to_hash), _hash_path(path_to_hash))} + + return { + (_relative_path(path_to_hash, path), _hash_path(path)) + for path in path_to_hash.rglob("*") + } + + +def _make_file_with_content(file_path: Path) -> Path: + content = " ".join(f"{uuid4()}" for x in range(10)) + file_path.write_text(content) + assert file_path.exists() + return file_path + + +def _make_dir_with_files(temp_dir: Path, file_count: int) -> Path: + assert file_count > 0 + + content_dir_path = temp_dir / f"content_dir{uuid4()}" + content_dir_path.mkdir(parents=True, exist_ok=True) + + for _ in range(file_count): + _make_file_with_content(file_path=content_dir_path / f"{uuid4()}_test.txt") + + return content_dir_path + + +def _zip_directory(dir_to_compress: Path, destination: Path) -> None: + dir_to_compress = Path(dir_to_compress) + destination = Path(destination) + + with zipfile.ZipFile(destination, "w", zipfile.ZIP_DEFLATED) as zipf: + for file_path in dir_to_compress.glob("**/*"): + if file_path.is_file(): + zipf.write(file_path, file_path.relative_to(dir_to_compress)) + + +@pytest.fixture +def temp_dir(tmpdir: Path) -> Path: + return Path(tmpdir) + + +@pytest.fixture +def random_tmp_dir_generator(temp_dir: Path) -> Callable[[bool], Path]: + def generator(is_file: bool) -> Path: + random_dir_path = temp_dir / f"{uuid4()}" + random_dir_path.mkdir(parents=True, exist_ok=True) + if is_file: + file_path = random_dir_path / f"{uuid4()}_test.txt" + file_path.touch() + return file_path + + return random_dir_path + + return generator + + +@pytest.fixture +def project_id(project_id: str) -> ProjectID: + return ProjectID(project_id) + + +@pytest.fixture +def node_uuid(faker: Faker) -> NodeID: + return NodeID(faker.uuid4()) + + +@pytest.fixture(params=["dir_content_one_file_path", "dir_content_multiple_files_path"]) +def content_path(request: pytest.FixtureRequest, temp_dir: Path) -> Path: + match request.param: + case "dir_content_one_file_path": + return _make_dir_with_files(temp_dir, file_count=1) + case "dir_content_multiple_files_path": + return _make_dir_with_files(temp_dir, file_count=2) + case _: + pytest.fail("Undefined content_param") + + +async def test_valid_upload_download( + node_ports_config: None, + content_path: Path, + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + r_clone_settings: RCloneSettings, + aws_s3_cli_settings: AwsS3CliSettings, + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, +): + async with ProgressBarData(num_steps=2, description=faker.pystr()) as progress_bar: + await data_manager._push_directory( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + source_path=content_path, + io_log_redirect_cb=mock_io_log_redirect_cb, + progress_bar=progress_bar, + r_clone_settings=r_clone_settings, + aws_s3_cli_settings=None, + ) + assert progress_bar._current_steps == pytest.approx(1.0) # noqa: SLF001 + + uploaded_hashes = _get_file_hashes_in_path(content_path) + + _empty_path(content_path) + + await data_manager._pull_directory( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + destination_path=content_path, + io_log_redirect_cb=mock_io_log_redirect_cb, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=None, + ) + assert progress_bar._current_steps == pytest.approx(2.0) # noqa: SLF001 + + downloaded_hashes = _get_file_hashes_in_path(content_path) + + assert uploaded_hashes == downloaded_hashes + + +async def test_valid_upload_download_saved_to( + node_ports_config, + content_path: Path, + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + random_tmp_dir_generator: Callable, + r_clone_settings: RCloneSettings, + aws_s3_cli_settings: AwsS3CliSettings, + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, +): + async with ProgressBarData(num_steps=2, description=faker.pystr()) as progress_bar: + await data_manager._push_directory( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + source_path=content_path, + io_log_redirect_cb=mock_io_log_redirect_cb, + progress_bar=progress_bar, + r_clone_settings=r_clone_settings, + aws_s3_cli_settings=None, + ) + # pylint: disable=protected-access + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 + + uploaded_hashes = _get_file_hashes_in_path(content_path) + + _empty_path(content_path) + + new_destination = random_tmp_dir_generator(is_file=content_path.is_file()) + + await data_manager._pull_directory( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + destination_path=content_path, + save_to=new_destination, + io_log_redirect_cb=mock_io_log_redirect_cb, + r_clone_settings=r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=None, + ) + assert progress_bar._current_steps == pytest.approx(2) # noqa: SLF001 + + downloaded_hashes = _get_file_hashes_in_path(new_destination) + + assert uploaded_hashes == downloaded_hashes + + +async def test_delete_legacy_archive( + node_ports_config, + content_path: Path, + user_id: UserID, + project_id: ProjectID, + node_uuid: NodeID, + r_clone_settings: RCloneSettings, + temp_dir: Path, + faker: Faker, +): + async with ProgressBarData(num_steps=2, description=faker.pystr()) as progress_bar: + # NOTE: legacy archives can no longer be crated + # generating a "legacy style archive" + archive_into_dir = temp_dir / f"legacy-archive-dir-{uuid4()}" + archive_into_dir.mkdir(parents=True, exist_ok=True) + legacy_archive_name = archive_into_dir / f"{content_path.stem}.zip" + _zip_directory(dir_to_compress=content_path, destination=legacy_archive_name) + + await filemanager.upload_path( + user_id=user_id, + store_id=SIMCORE_LOCATION, + store_name=None, + s3_object=TypeAdapter(SimcoreS3FileID).validate_python( + f"{project_id}/{node_uuid}/{legacy_archive_name.name}" + ), + path_to_upload=legacy_archive_name, + io_log_redirect_cb=None, + progress_bar=progress_bar, + r_clone_settings=r_clone_settings, + ) + + # pylint: disable=protected-access + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 + + assert ( + await data_manager._state_metadata_entry_exists( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=content_path, + is_archive=True, + ) + is True + ) + + await data_manager._delete_legacy_archive( # noqa: SLF001 + project_id=project_id, + node_uuid=node_uuid, + path=content_path, + ) + + assert ( + await data_manager._state_metadata_entry_exists( # noqa: SLF001 + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + path=content_path, + is_archive=True, + ) + is False + ) diff --git a/packages/simcore-sdk/tests/integration/test_node_data_data_manager_.py b/packages/simcore-sdk/tests/integration/test_node_data_data_manager_.py deleted file mode 100644 index e17286c8ab9..00000000000 --- a/packages/simcore-sdk/tests/integration/test_node_data_data_manager_.py +++ /dev/null @@ -1,216 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=too-many-arguments - -import hashlib -import os -from pathlib import Path -from typing import Callable -from uuid import uuid4 - -import pytest -from servicelib.progress_bar import ProgressBarData -from simcore_sdk.node_data import data_manager - -pytest_simcore_core_services_selection = [ - "migration", - "postgres", - "storage", -] - -pytest_simcore_ops_services_selection = ["minio", "adminer"] - - -# UTILS - - -def _remove_file_or_folder(file_or_folder: Path) -> None: - if file_or_folder.is_file(): - file_or_folder.unlink() - assert file_or_folder.exists() is False - file_or_folder.touch() - assert file_or_folder.exists() is True - else: - os.system(f"rm -rf {file_or_folder}") - assert file_or_folder.exists() is False - file_or_folder.mkdir(parents=True, exist_ok=True) - assert file_or_folder.exists() is True - - -def _get_file_hashes_in_path(path_to_hash: Path) -> set[tuple[Path, str]]: - def _hash_path(path: Path): - sha256_hash = hashlib.sha256() - with open(path, "rb") as f: - # Read and update hash string value in blocks of 4K - for byte_block in iter(lambda: f.read(4096), b""): - sha256_hash.update(byte_block) - return sha256_hash.hexdigest() - - def _relative_path(root_path: Path, full_path: Path) -> Path: - return full_path.relative_to(root_path) - - if path_to_hash.is_file(): - return {(_relative_path(path_to_hash, path_to_hash), _hash_path(path_to_hash))} - - return { - (_relative_path(path_to_hash, path), _hash_path(path)) - for path in path_to_hash.rglob("*") - } - - -def _make_file_with_content(file_path: Path) -> Path: - content = " ".join(f"{uuid4()}" for x in range(10)) - file_path.write_text(content) - assert file_path.exists() - return file_path - - -def _make_dir_with_files(temp_dir: Path, file_count: int) -> Path: - assert file_count > 0 - - content_dir_path = temp_dir / f"content_dir{uuid4()}" - content_dir_path.mkdir(parents=True, exist_ok=True) - - for _ in range(file_count): - _make_file_with_content(file_path=content_dir_path / f"{uuid4()}_test.txt") - - return content_dir_path - - -@pytest.fixture -def node_uuid() -> str: - return f"{uuid4()}" - - -@pytest.fixture -def temp_dir(tmpdir: Path) -> Path: - return Path(tmpdir) - - -@pytest.fixture -def random_tmp_dir_generator(temp_dir: Path) -> Callable[[bool], Path]: - def generator(is_file: bool) -> Path: - random_dir_path = temp_dir / f"{uuid4()}" - random_dir_path.mkdir(parents=True, exist_ok=True) - if is_file: - file_path = random_dir_path / f"{uuid4()}_test.txt" - file_path.touch() - return file_path - - return random_dir_path - - return generator - - -@pytest.fixture -def file_content_path(temp_dir: Path) -> Path: - return _make_file_with_content(file_path=temp_dir / f"{uuid4()}_test.txt") - - -@pytest.fixture -def dir_content_one_file_path(temp_dir: Path) -> Path: - return _make_dir_with_files(temp_dir, file_count=1) - - -@pytest.fixture -def dir_content_multiple_files_path(temp_dir: Path) -> Path: - return _make_dir_with_files(temp_dir, file_count=2) - - -@pytest.mark.parametrize( - "content_path", - [ - # pylint: disable=no-member - pytest.lazy_fixture("file_content_path"), - pytest.lazy_fixture("dir_content_one_file_path"), - pytest.lazy_fixture("dir_content_multiple_files_path"), - ], -) -async def test_valid_upload_download( - node_ports_config, - content_path: Path, - user_id: int, - project_id: str, - node_uuid: str, -): - async with ProgressBarData(steps=2) as progress_bar: - await data_manager.push( - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - file_or_folder=content_path, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - # pylint: disable=protected-access - assert progress_bar._continuous_progress_value == pytest.approx(1.0) - - uploaded_hashes = _get_file_hashes_in_path(content_path) - - _remove_file_or_folder(content_path) - - await data_manager.pull( - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - file_or_folder=content_path, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - assert progress_bar._continuous_progress_value == pytest.approx(2.0) - - downloaded_hashes = _get_file_hashes_in_path(content_path) - - assert uploaded_hashes == downloaded_hashes - - -@pytest.mark.parametrize( - "content_path", - [ - # pylint: disable=no-member - pytest.lazy_fixture("file_content_path"), - pytest.lazy_fixture("dir_content_one_file_path"), - pytest.lazy_fixture("dir_content_multiple_files_path"), - ], -) -async def test_valid_upload_download_saved_to( - node_ports_config, - content_path: Path, - user_id: int, - project_id: str, - node_uuid: str, - random_tmp_dir_generator: Callable, -): - async with ProgressBarData(steps=2) as progress_bar: - await data_manager.push( - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - file_or_folder=content_path, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - # pylint: disable=protected-access - assert progress_bar._continuous_progress_value == pytest.approx(1) - - uploaded_hashes = _get_file_hashes_in_path(content_path) - - _remove_file_or_folder(content_path) - - new_destination = random_tmp_dir_generator(is_file=content_path.is_file()) - - await data_manager.pull( - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - file_or_folder=content_path, - save_to=new_destination, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - assert progress_bar._continuous_progress_value == pytest.approx(2) - - downloaded_hashes = _get_file_hashes_in_path(new_destination) - - assert uploaded_hashes == downloaded_hashes diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_common_aws_s3_cli.py b/packages/simcore-sdk/tests/integration/test_node_ports_common_aws_s3_cli.py new file mode 100644 index 00000000000..9de63cb4fed --- /dev/null +++ b/packages/simcore-sdk/tests/integration/test_node_ports_common_aws_s3_cli.py @@ -0,0 +1,448 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import filecmp +import os +import urllib.parse +from collections.abc import AsyncIterator, Callable +from pathlib import Path +from typing import Final +from unittest.mock import AsyncMock +from uuid import uuid4 + +import aioboto3 +import aiofiles +import pytest +from faker import Faker +from models_library.progress_bar import ProgressReport +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.file_utils import remove_directory +from servicelib.progress_bar import ProgressBarData +from servicelib.utils import logged_gather +from settings_library.aws_s3_cli import AwsS3CliSettings +from simcore_sdk.node_ports_common import aws_s3_cli + +pytest_simcore_core_services_selection = [ + "migration", + "postgres", + "rabbit", + "redis", + "storage", + "sto-worker", +] + +pytest_simcore_ops_services_selection = [ + "minio", + "adminer", +] + + +WAIT_FOR_S3_BACKEND_TO_UPDATE: Final[float] = 1.0 + + +@pytest.fixture +async def cleanup_bucket_after_test( + aws_s3_cli_settings: AwsS3CliSettings, +) -> AsyncIterator[None]: + session = aioboto3.Session( + aws_access_key_id=aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ACCESS_KEY, + aws_secret_access_key=aws_s3_cli_settings.AWS_S3_CLI_S3.S3_SECRET_KEY, + ) + + yield + + async with session.client( + "s3", endpoint_url=f"{aws_s3_cli_settings.AWS_S3_CLI_S3.S3_ENDPOINT}" + ) as s3_client: + # List all object versions + paginator = s3_client.get_paginator("list_object_versions") + async for page in paginator.paginate( + Bucket=aws_s3_cli_settings.AWS_S3_CLI_S3.S3_BUCKET_NAME + ): + # Prepare delete markers and versions for deletion + delete_markers = page.get("DeleteMarkers", []) + versions = page.get("Versions", []) + + objects_to_delete = [ + {"Key": obj["Key"], "VersionId": obj["VersionId"]} + for obj in delete_markers + versions + ] + + # Perform deletion + if objects_to_delete: + await s3_client.delete_objects( + Bucket=aws_s3_cli_settings.AWS_S3_CLI_S3.S3_BUCKET_NAME, + Delete={"Objects": objects_to_delete, "Quiet": True}, + ) + + +# put to shared config +def _fake_s3_link(aws_s3_cli_settings: AwsS3CliSettings, s3_object: str) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"s3://{aws_s3_cli_settings.AWS_S3_CLI_S3.S3_BUCKET_NAME}/{urllib.parse.quote(s3_object)}", + ) + + +# put to shared config +async def _create_random_binary_file( + file_path: Path, + file_size: ByteSize, + # NOTE: bigger files get created faster with bigger chunk_size + chunk_size: int = TypeAdapter(ByteSize).validate_python("1mib"), +): + async with aiofiles.open(file_path, mode="wb") as file: + bytes_written = 0 + while bytes_written < file_size: + remaining_bytes = file_size - bytes_written + current_chunk_size = min(chunk_size, remaining_bytes) + await file.write(os.urandom(current_chunk_size)) + bytes_written += current_chunk_size + assert bytes_written == file_size + + +# put to shared config +async def _create_file_of_size( + tmp_path: Path, *, name: str, file_size: ByteSize +) -> Path: + file: Path = tmp_path / name + if not file.parent.exists(): + file.parent.mkdir(parents=True, exist_ok=True) + + await _create_random_binary_file(file, file_size) + assert file.exists() + assert file.stat().st_size == file_size + return file + + +# put to shared config +async def _create_files_in_dir( + target_dir: Path, file_count: int, file_size: ByteSize +) -> set[str]: + results: list[Path] = await logged_gather( + *[ + _create_file_of_size(target_dir, name=f"{i}-file.bin", file_size=file_size) + for i in range(file_count) + ], + max_concurrency=10, + ) + return {x.name for x in results} + + +async def _upload_local_dir_to_s3( + aws_s3_cli_settings: AwsS3CliSettings, + s3_directory_link: AnyUrl, + source_dir: Path, + *, + check_progress: bool = False, + faker: Faker, +) -> None: + # NOTE: progress is enforced only when uploading and only when using + # total file sizes that are quite big, otherwise the test will fail + # we ant to avoid this from being flaky. + # Since using moto to mock the S3 api, downloading is way to fast. + # Progress behaves as expected with CEPH and AWS S3 backends. + + progress_entries: list[ProgressReport] = [] + + async def _report_progress_upload(report: ProgressReport) -> None: + print(">>>|", report, "| ⏫") + progress_entries.append(report) + + async with ProgressBarData( + num_steps=1, + progress_report_cb=_report_progress_upload, + description=faker.pystr(), + ) as progress_bar: + await aws_s3_cli.sync_local_to_s3( + aws_s3_cli_settings, + progress_bar, + local_directory_path=source_dir, + upload_s3_link=s3_directory_link, + debug_logs=True, + ) + if check_progress: + # NOTE: a progress of 1 is always sent by the progress bar + # we want to check that aws cli also reports some progress entries + assert len(progress_entries) > 1 + + +async def _download_from_s3_to_local_dir( + aws_s3_cli_settings: AwsS3CliSettings, + s3_directory_link: AnyUrl, + destination_dir: Path, + faker: Faker, +) -> None: + async def _report_progress_download(report: ProgressReport) -> None: + print(">>>|", report, "| ⏬") + + async with ProgressBarData( + num_steps=1, + progress_report_cb=_report_progress_download, + description=faker.pystr(), + ) as progress_bar: + await aws_s3_cli.sync_s3_to_local( + aws_s3_cli_settings, + progress_bar, + local_directory_path=destination_dir, + download_s3_link=s3_directory_link, + debug_logs=True, + ) + + +def _directories_have_the_same_content(dir_1: Path, dir_2: Path) -> bool: + names_in_dir_1 = {x.name for x in dir_1.glob("*")} + names_in_dir_2 = {x.name for x in dir_2.glob("*")} + if names_in_dir_1 != names_in_dir_2: + return False + + filecmp.clear_cache() + + compare_results: list[bool] = [] + + for file_name in names_in_dir_1: + f1 = dir_1 / file_name + f2 = dir_2 / file_name + + # when there is a broken symlink, which we want to sync, filecmp does not work + is_broken_symlink = ( + not f1.exists() and f1.is_symlink() and not f2.exists() and f2.is_symlink() + ) + + if is_broken_symlink: + compare_results.append(True) + else: + compare_results.append(filecmp.cmp(f1, f2, shallow=False)) + + return all(compare_results) + + +def _ensure_dir(tmp_path: Path, faker: Faker, *, dir_prefix: str) -> Path: + generated_files_dir: Path = tmp_path / f"{dir_prefix}-{faker.uuid4()}" + generated_files_dir.mkdir(parents=True, exist_ok=True) + assert generated_files_dir.exists() + return generated_files_dir + + +@pytest.fixture +async def dir_locally_created_files( + tmp_path: Path, faker: Faker +) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="source") + yield path + await remove_directory(path) + + +@pytest.fixture +async def dir_downloaded_files_1(tmp_path: Path, faker: Faker) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="downloaded-1") + yield path + await remove_directory(path) + + +@pytest.fixture +async def dir_downloaded_files_2(tmp_path: Path, faker: Faker) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="downloaded-2") + yield path + await remove_directory(path) + + +@pytest.mark.parametrize( + "file_count, file_size, check_progress", + [ + (0, TypeAdapter(ByteSize).validate_python("0"), False), + (1, TypeAdapter(ByteSize).validate_python("1mib"), False), + (2, TypeAdapter(ByteSize).validate_python("1mib"), False), + (1, TypeAdapter(ByteSize).validate_python("1Gib"), True), + pytest.param( + 4, + TypeAdapter(ByteSize).validate_python("500Mib"), + True, + marks=pytest.mark.heavy_load, + ), + pytest.param( + 100, + TypeAdapter(ByteSize).validate_python("20mib"), + True, + marks=pytest.mark.heavy_load, + ), + ], +) +async def test_local_to_remote_to_local( + aws_s3_cli_settings: AwsS3CliSettings, + create_valid_file_uuid: Callable[[str, Path], str], + dir_locally_created_files: Path, + dir_downloaded_files_1: Path, + file_count: int, + file_size: ByteSize, + check_progress: bool, + cleanup_bucket_after_test: None, + faker: Faker, +) -> None: + await _create_files_in_dir(dir_locally_created_files, file_count, file_size) + + # get s3 reference link + directory_uuid = create_valid_file_uuid(f"{dir_locally_created_files}", Path()) + s3_directory_link = _fake_s3_link(aws_s3_cli_settings, directory_uuid) + + # run the test + await _upload_local_dir_to_s3( + aws_s3_cli_settings, + s3_directory_link, + dir_locally_created_files, + check_progress=check_progress, + faker=faker, + ) + await _download_from_s3_to_local_dir( + aws_s3_cli_settings, s3_directory_link, dir_downloaded_files_1, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 + ) + + +def _change_content_of_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).write_bytes(os.urandom(10)) + + +def _change_content_of_all_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + for file_name in generated_file_names: + (dir_locally_created_files / file_name).unlink() + (dir_locally_created_files / file_name).write_bytes(os.urandom(10)) + + +def _remove_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).unlink() + + +def _rename_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).rename( + dir_locally_created_files / f"renamed-{a_generated_file}" + ) + + +def _add_a_new_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + (dir_locally_created_files / "new_file.bin").write_bytes(os.urandom(10)) + + +def _remove_all_files( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + for file_name in generated_file_names: + (dir_locally_created_files / file_name).unlink() + + +def _regression_add_broken_symlink( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + # NOTE: if rclone tries to copy a link that does not exist an error is raised + path_does_not_exist_on_fs = Path(f"/tmp/missing-{uuid4()}") # noqa: S108 + assert not path_does_not_exist_on_fs.exists() + + broken_symlink = dir_locally_created_files / "missing.link" + assert not broken_symlink.exists() + os.symlink(f"{path_does_not_exist_on_fs}", f"{broken_symlink}") + + +@pytest.mark.parametrize( + "changes_callable", + [ + _change_content_of_one_file, + _change_content_of_all_file, + _remove_one_file, + _remove_all_files, + _rename_one_file, + _add_a_new_file, + _regression_add_broken_symlink, + ], +) +async def test_overwrite_an_existing_file_and_sync_again( + aws_s3_cli_settings: AwsS3CliSettings, + create_valid_file_uuid: Callable[[str, Path], str], + dir_locally_created_files: Path, + dir_downloaded_files_1: Path, + dir_downloaded_files_2: Path, + changes_callable: Callable[[Path, set[str]], None], + cleanup_bucket_after_test: None, + faker: Faker, +) -> None: + generated_file_names: set[str] = await _create_files_in_dir( + dir_locally_created_files, + 3, + TypeAdapter(ByteSize).validate_python("1kib"), + ) + assert len(generated_file_names) > 0 + + # get s3 reference link + directory_uuid = create_valid_file_uuid(f"{dir_locally_created_files}", Path()) + s3_directory_link = _fake_s3_link(aws_s3_cli_settings, directory_uuid) + + # sync local to remote and check + await _upload_local_dir_to_s3( + aws_s3_cli_settings, s3_directory_link, dir_locally_created_files, faker=faker + ) + await _download_from_s3_to_local_dir( + aws_s3_cli_settings, s3_directory_link, dir_downloaded_files_1, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 + ) + + # make some changes to local content + changes_callable(dir_locally_created_files, generated_file_names) + + # ensure local content changed form remote content + assert not _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 + ) + + # upload and check new local and new remote are in sync + await _upload_local_dir_to_s3( + aws_s3_cli_settings, s3_directory_link, dir_locally_created_files, faker=faker + ) + await _download_from_s3_to_local_dir( + aws_s3_cli_settings, s3_directory_link, dir_downloaded_files_2, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_2 + ) + # check that old remote and new remote are not the same + assert not _directories_have_the_same_content( + dir_downloaded_files_1, dir_downloaded_files_2 + ) + + +async def test_raises_error_if_local_directory_path_is_a_file( + tmp_path: Path, faker: Faker, cleanup_bucket_after_test: None +): + file_path = await _create_file_of_size( + tmp_path, name=f"test{faker.uuid4()}.bin", file_size=ByteSize(1) + ) + with pytest.raises(aws_s3_cli.AwsS3CliPathIsAFileError): + await aws_s3_cli.sync_local_to_s3( + aws_s3_cli_settings=AsyncMock(), + progress_bar=AsyncMock(), + local_directory_path=file_path, + upload_s3_link=AsyncMock(), + debug_logs=True, + ) + with pytest.raises(aws_s3_cli.AwsS3CliPathIsAFileError): + await aws_s3_cli.sync_s3_to_local( + aws_s3_cli_settings=AsyncMock(), + progress_bar=AsyncMock(), + local_directory_path=file_path, + download_s3_link=AsyncMock(), + debug_logs=True, + ) diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py b/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py index edfb06445db..db8e51d7dbd 100644 --- a/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py +++ b/packages/simcore-sdk/tests/integration/test_node_ports_common_dbmanager.py @@ -3,8 +3,8 @@ # pylint:disable=redefined-outer-name import json +from collections.abc import Callable from pathlib import Path -from typing import Callable, Dict from simcore_sdk.node_ports_common.dbmanager import DBManager @@ -20,7 +20,7 @@ async def test_db_manager_read_config( project_id: str, node_uuid: str, node_ports_config: None, - default_configuration: Dict, + default_configuration: dict, ): db_manager = DBManager() ports_configuration_str = await db_manager.get_ports_configuration_from_node_uuid( diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_common_filemanager.py b/packages/simcore-sdk/tests/integration/test_node_ports_common_filemanager.py index 643bdb5ee69..5f0656a8c59 100644 --- a/packages/simcore-sdk/tests/integration/test_node_ports_common_filemanager.py +++ b/packages/simcore-sdk/tests/integration/test_node_ports_common_filemanager.py @@ -1,517 +1,713 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=too-many-arguments -# pylint:disable=protected-access - -import filecmp -from pathlib import Path -from typing import Any, Awaitable, Callable, Optional -from uuid import uuid4 - -import pytest -from aiohttp import ClientError -from models_library.projects_nodes_io import LocationID, SimcoreS3FileID -from models_library.users import UserID -from pydantic import ByteSize, parse_obj_as -from pytest_mock import MockerFixture -from pytest_simcore.helpers.utils_parametrizations import byte_size_ids -from servicelib.progress_bar import ProgressBarData -from settings_library.r_clone import RCloneSettings -from simcore_sdk.node_ports_common import exceptions, filemanager -from simcore_sdk.node_ports_common.r_clone import RCloneFailedError -from yarl import URL - -pytest_simcore_core_services_selection = [ - "migration", - "postgres", - "storage", -] - -pytest_simcore_ops_services_selection = ["minio", "adminer"] - - -@pytest.fixture(params=[True, False], ids=["with RClone", "without RClone"]) -def optional_r_clone( - r_clone_settings: RCloneSettings, request: pytest.FixtureRequest -) -> Optional[RCloneSettings]: - return r_clone_settings if request.param else None # type: ignore - - -def _file_size(size_str: str, **pytest_params): - return pytest.param(parse_obj_as(ByteSize, size_str), id=size_str, **pytest_params) - - -@pytest.mark.parametrize( - "file_size", - [ - _file_size("10Mib"), - _file_size("103Mib"), - _file_size("1003Mib", marks=pytest.mark.heavy_load), - _file_size("7Gib", marks=pytest.mark.heavy_load), - ], - ids=byte_size_ids, -) -async def test_valid_upload_download( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, - file_size: ByteSize, - create_file_of_size: Callable[[ByteSize, str], Path], - optional_r_clone: Optional[RCloneSettings], - simcore_services_ready: None, - storage_service: URL, -): - file_path = create_file_of_size(file_size, "test.test") - - file_id = create_valid_file_uuid("", file_path) - async with ProgressBarData(steps=2) as progress_bar: - store_id, e_tag = await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - r_clone_settings=optional_r_clone, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - # pylint: disable=protected-access - assert progress_bar._continuous_progress_value == pytest.approx(1) - assert store_id == s3_simcore_location - assert e_tag - get_store_id, get_e_tag = await filemanager.get_file_metadata( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - assert get_store_id == store_id - assert get_e_tag == e_tag - - download_folder = Path(tmpdir) / "downloads" - download_file_path = await filemanager.download_file_from_s3( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - assert progress_bar._continuous_progress_value == pytest.approx(2) - assert download_file_path.exists() - assert download_file_path.name == "test.test" - assert filecmp.cmp(download_file_path, file_path) - - -@pytest.mark.parametrize( - "file_size", - [ - _file_size("10Mib"), - _file_size("103Mib"), - ], - ids=byte_size_ids, -) -async def test_valid_upload_download_using_file_object( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, - file_size: ByteSize, - create_file_of_size: Callable[[ByteSize, str], Path], - optional_r_clone: Optional[RCloneSettings], -): - file_path = create_file_of_size(file_size, "test.test") - - file_id = create_valid_file_uuid("", file_path) - with file_path.open("rb") as file_object: - store_id, e_tag = await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=filemanager.UploadableFileObject( - file_object, file_path.name, file_path.stat().st_size - ), - r_clone_settings=optional_r_clone, - io_log_redirect_cb=None, - ) - assert store_id == s3_simcore_location - assert e_tag - get_store_id, get_e_tag = await filemanager.get_file_metadata( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - assert get_store_id == store_id - assert get_e_tag == e_tag - - download_folder = Path(tmpdir) / "downloads" - async with ProgressBarData(steps=1) as progress_bar: - download_file_path = await filemanager.download_file_from_s3( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - assert progress_bar._continuous_progress_value == pytest.approx(1) - assert download_file_path.exists() - assert download_file_path.name == "test.test" - assert filecmp.cmp(download_file_path, file_path) - - -@pytest.fixture -def mocked_upload_file_raising_exceptions(mocker: MockerFixture): - mocker.patch( - "simcore_sdk.node_ports_common.filemanager.r_clone.sync_local_to_s3", - autospec=True, - side_effect=RCloneFailedError, - ) - mocker.patch( - "simcore_sdk.node_ports_common.file_io_utils._upload_file_part", - autospec=True, - side_effect=ClientError, - ) - - -@pytest.mark.parametrize( - "file_size", - [ - _file_size("10Mib"), - ], - ids=byte_size_ids, -) -async def test_failed_upload_is_properly_removed_from_storage( - node_ports_config: None, - create_file_of_size: Callable[[ByteSize], Path], - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, - optional_r_clone: Optional[RCloneSettings], - file_size: ByteSize, - user_id: UserID, - mocked_upload_file_raising_exceptions: None, -): - file_path = create_file_of_size(file_size) - file_id = create_valid_file_uuid("", file_path) - with pytest.raises(exceptions.S3TransferError): - await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - r_clone_settings=optional_r_clone, - io_log_redirect_cb=None, - ) - with pytest.raises(exceptions.S3InvalidPathError): - await filemanager.get_file_metadata( - user_id=user_id, store_id=s3_simcore_location, s3_object=file_id - ) - - -@pytest.mark.parametrize( - "file_size", - [ - _file_size("10Mib"), - ], - ids=byte_size_ids, -) -async def test_failed_upload_after_valid_upload_keeps_last_valid_state( - node_ports_config: None, - create_file_of_size: Callable[[ByteSize], Path], - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, - optional_r_clone: Optional[RCloneSettings], - file_size: ByteSize, - user_id: UserID, - mocker: MockerFixture, -): - # upload a valid file - file_path = create_file_of_size(file_size) - file_id = create_valid_file_uuid("", file_path) - store_id, e_tag = await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - r_clone_settings=optional_r_clone, - io_log_redirect_cb=None, - ) - assert store_id == s3_simcore_location - assert e_tag - # check the file is correctly uploaded - get_store_id, get_e_tag = await filemanager.get_file_metadata( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - assert get_store_id == store_id - assert get_e_tag == e_tag - # now start an invalid update by generating an exception while uploading the same file - mocker.patch( - "simcore_sdk.node_ports_common.filemanager.r_clone.sync_local_to_s3", - autospec=True, - side_effect=RCloneFailedError, - ) - mocker.patch( - "simcore_sdk.node_ports_common.file_io_utils._upload_file_part", - autospec=True, - side_effect=ClientError, - ) - with pytest.raises(exceptions.S3TransferError): - await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - r_clone_settings=optional_r_clone, - io_log_redirect_cb=None, - ) - # the file shall be back to its original state - old_store_id, old_e_tag = await filemanager.get_file_metadata( - user_id=user_id, store_id=s3_simcore_location, s3_object=file_id - ) - assert get_store_id == old_store_id - assert get_e_tag == old_e_tag - - -async def test_invalid_file_path( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, -): - file_path = Path(tmpdir) / "test.test" - file_path.write_text("I am a test file") - assert file_path.exists() - - file_id = create_valid_file_uuid("", file_path) - store = s3_simcore_location - with pytest.raises(FileNotFoundError): - await filemanager.upload_file( - user_id=user_id, - store_id=store, - store_name=None, - s3_object=file_id, - file_to_upload=Path(tmpdir) / "some other file.txt", - io_log_redirect_cb=None, - ) - - download_folder = Path(tmpdir) / "downloads" - with pytest.raises(exceptions.S3InvalidPathError): - async with ProgressBarData(steps=1) as progress_bar: - await filemanager.download_file_from_s3( - user_id=user_id, - store_id=store, - store_name=None, - s3_object=file_id, - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - - -async def test_errors_upon_invalid_file_identifiers( - node_ports_config: None, - tmpdir: Path, - user_id: UserID, - project_id: str, - s3_simcore_location: LocationID, -): - file_path = Path(tmpdir) / "test.test" - file_path.write_text("I am a test file") - assert file_path.exists() - - store = s3_simcore_location - with pytest.raises(exceptions.S3InvalidPathError): - await filemanager.upload_file( - user_id=user_id, - store_id=store, - store_name=None, - s3_object="", # type: ignore - file_to_upload=file_path, - io_log_redirect_cb=None, - ) - - with pytest.raises(exceptions.StorageInvalidCall): - await filemanager.upload_file( - user_id=user_id, - store_id=store, - store_name=None, - s3_object="file_id", # type: ignore - file_to_upload=file_path, - io_log_redirect_cb=None, - ) - - download_folder = Path(tmpdir) / "downloads" - with pytest.raises(exceptions.S3InvalidPathError): - async with ProgressBarData(steps=1) as progress_bar: - await filemanager.download_file_from_s3( - user_id=user_id, - store_id=store, - store_name=None, - s3_object="", # type: ignore - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - - with pytest.raises(exceptions.S3InvalidPathError): - async with ProgressBarData(steps=1) as progress_bar: - await filemanager.download_file_from_s3( - user_id=user_id, - store_id=store, - store_name=None, - s3_object=SimcoreS3FileID(f"{project_id}/{uuid4()}/invisible.txt"), - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - - -async def test_invalid_store( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], -): - file_path = Path(tmpdir) / "test.test" - file_path.write_text("I am a test file") - assert file_path.exists() - - file_id = create_valid_file_uuid("", file_path) - store = "somefunkystore" - with pytest.raises(exceptions.S3InvalidStore): - await filemanager.upload_file( - user_id=user_id, - store_id=None, - store_name=store, # type: ignore - s3_object=file_id, - file_to_upload=file_path, - io_log_redirect_cb=None, - ) - - download_folder = Path(tmpdir) / "downloads" - with pytest.raises(exceptions.S3InvalidStore): - async with ProgressBarData(steps=1) as progress_bar: - await filemanager.download_file_from_s3( - user_id=user_id, - store_id=None, - store_name=store, # type: ignore - s3_object=file_id, - local_folder=download_folder, - io_log_redirect_cb=None, - progress_bar=progress_bar, - ) - - -async def test_valid_metadata( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, -): - # first we go with a non-existing file - file_path = Path(tmpdir) / "test.test" - file_id = create_valid_file_uuid("", file_path) - assert file_path.exists() is False - - is_metadata_present = await filemanager.entry_exists( - user_id=user_id, store_id=s3_simcore_location, s3_object=file_id # type: ignore - ) - assert is_metadata_present == False - - # now really create the file and upload it - file_path.write_text("I am a test file") - assert file_path.exists() - - file_id = create_valid_file_uuid("", file_path) - store_id, e_tag = await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - io_log_redirect_cb=None, - ) - assert store_id == s3_simcore_location - assert e_tag - - is_metadata_present = await filemanager.entry_exists( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - - assert is_metadata_present is True - - -@pytest.mark.parametrize( - "fct", - [filemanager.entry_exists, filemanager.delete_file, filemanager.get_file_metadata], -) -async def test_invalid_call_raises_exception( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, - fct: Callable[[int, str, str, Optional[Any]], Awaitable], -): - file_path = Path(tmpdir) / "test.test" - file_id = create_valid_file_uuid("", file_path) - assert file_path.exists() is False - - with pytest.raises(exceptions.StorageInvalidCall): - await fct( - user_id=None, store_id=s3_simcore_location, s3_object=file_id # type: ignore - ) - with pytest.raises(exceptions.StorageInvalidCall): - await fct(user_id=user_id, store_id=None, s3_object=file_id) # type: ignore - with pytest.raises(exceptions.StorageInvalidCall): - await fct( - user_id=user_id, store_id=s3_simcore_location, s3_object="bing" # type: ignore - ) - - -async def test_delete_File( - node_ports_config: None, - tmpdir: Path, - user_id: int, - create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], - s3_simcore_location: LocationID, -): - file_path = Path(tmpdir) / "test.test" - file_path.write_text("I am a test file") - assert file_path.exists() - - file_id = create_valid_file_uuid("", file_path) - store_id, e_tag = await filemanager.upload_file( - user_id=user_id, - store_id=s3_simcore_location, - store_name=None, - s3_object=file_id, - file_to_upload=file_path, - io_log_redirect_cb=None, - ) - assert store_id == s3_simcore_location - assert e_tag - - is_metadata_present = await filemanager.entry_exists( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - assert is_metadata_present is True - - await filemanager.delete_file( - user_id=user_id, store_id=s3_simcore_location, s3_object=file_id - ) - - # check that it disappeared - assert ( - await filemanager.entry_exists( - user_id=user_id, store_id=store_id, s3_object=file_id - ) - == False - ) +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint:disable=protected-access + +import filecmp +from collections.abc import Awaitable, Callable +from pathlib import Path +from typing import Any +from uuid import uuid4 + +import pytest +from aiohttp import ClientError +from faker import Faker +from models_library.projects_nodes_io import ( + LocationID, + SimcoreS3DirectoryID, + SimcoreS3FileID, +) +from models_library.users import UserID +from pydantic import BaseModel, ByteSize, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.parametrizations import byte_size_ids +from servicelib.progress_bar import ProgressBarData +from settings_library.aws_s3_cli import AwsS3CliSettings +from settings_library.r_clone import RCloneSettings +from simcore_sdk.node_ports_common import exceptions, filemanager +from simcore_sdk.node_ports_common.aws_s3_cli import AwsS3CliFailedError +from simcore_sdk.node_ports_common.filemanager import UploadedFile, UploadedFolder +from simcore_sdk.node_ports_common.r_clone import RCloneFailedError +from yarl import URL + +pytest_simcore_core_services_selection = [ + "migration", + "postgres", + "rabbit", + "redis", + "storage", + "sto-worker", +] + +pytest_simcore_ops_services_selection = ["minio", "adminer"] + + +class _SyncSettings(BaseModel): + r_clone_settings: RCloneSettings | None + aws_s3_cli_settings: AwsS3CliSettings | None + + +@pytest.fixture( + params=[(True, False), (False, True), (False, False)], + ids=[ + "RClone enabled", + "AwsS3Cli enabled", + "Both RClone and AwsS3Cli disabled", + ], +) +async def optional_sync_settings( + r_clone_settings_factory: Callable[[], Awaitable[RCloneSettings]], + aws_s3_cli_settings_factory: Callable[[], Awaitable[AwsS3CliSettings]], + request: pytest.FixtureRequest, +) -> _SyncSettings: + _rclone_enabled, _aws_s3_cli_enabled = request.param + + _r_clone_settings = await r_clone_settings_factory() if _rclone_enabled else None + _aws_s3_cli_settings = ( + await aws_s3_cli_settings_factory() if _aws_s3_cli_enabled else None + ) + + return _SyncSettings( + r_clone_settings=_r_clone_settings, aws_s3_cli_settings=_aws_s3_cli_settings + ) + + +def _file_size(size_str: str, **pytest_params): + return pytest.param( + TypeAdapter(ByteSize).validate_python(size_str), id=size_str, **pytest_params + ) + + +@pytest.mark.parametrize( + "file_size", + [ + _file_size("10Mib"), + _file_size("103Mib"), + _file_size("1003Mib", marks=pytest.mark.heavy_load), + _file_size("7Gib", marks=pytest.mark.heavy_load), + ], + ids=byte_size_ids, +) +async def test_valid_upload_download( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + file_size: ByteSize, + create_file_of_size: Callable[[ByteSize, str], Path], + optional_sync_settings: _SyncSettings, + simcore_services_ready: None, + storage_service: URL, + faker: Faker, +): + file_path = create_file_of_size(file_size, "test.test") + + file_id = create_valid_file_uuid("", file_path) + async with ProgressBarData(num_steps=2, description=faker.pystr()) as progress_bar: + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=file_path, + r_clone_settings=optional_sync_settings.r_clone_settings, + io_log_redirect_cb=None, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + assert isinstance(upload_result, UploadedFile) + store_id, e_tag = upload_result.store_id, upload_result.etag + # pylint: disable=protected-access + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 + assert store_id == s3_simcore_location + assert e_tag + file_metadata = await filemanager.get_file_metadata( + user_id=user_id, store_id=store_id, s3_object=file_id + ) + assert file_metadata.location == store_id + assert file_metadata.etag == e_tag + + download_folder = Path(tmpdir) / "downloads" + download_file_path = await filemanager.download_path_from_s3( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + assert progress_bar._current_steps == pytest.approx(2) # noqa: SLF001 + assert download_file_path.exists() + assert download_file_path.name == "test.test" + assert filecmp.cmp(download_file_path, file_path) + + +@pytest.mark.parametrize( + "file_size", + [ + _file_size("10Mib"), + _file_size("103Mib"), + ], + ids=byte_size_ids, +) +async def test_valid_upload_download_using_file_object( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + file_size: ByteSize, + create_file_of_size: Callable[[ByteSize, str], Path], + optional_sync_settings: _SyncSettings, + faker: Faker, +): + file_path = create_file_of_size(file_size, "test.test") + + file_id = create_valid_file_uuid("", file_path) + with file_path.open("rb") as file_object: + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=filemanager.UploadableFileObject( + file_object, file_path.name, file_path.stat().st_size + ), + r_clone_settings=optional_sync_settings.r_clone_settings, + io_log_redirect_cb=None, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + assert isinstance(upload_result, UploadedFile) + store_id, e_tag = upload_result.store_id, upload_result.etag + assert store_id == s3_simcore_location + assert e_tag + file_metadata = await filemanager.get_file_metadata( + user_id=user_id, store_id=store_id, s3_object=file_id + ) + assert file_metadata.location == store_id + assert file_metadata.etag == e_tag + + download_folder = Path(tmpdir) / "downloads" + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + download_file_path = await filemanager.download_path_from_s3( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 + assert download_file_path.exists() + assert download_file_path.name == "test.test" + assert filecmp.cmp(download_file_path, file_path) + + +@pytest.fixture +def mocked_upload_file_raising_exceptions(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_sdk.node_ports_common.filemanager.r_clone.sync_local_to_s3", + autospec=True, + side_effect=RCloneFailedError, + ) + mocker.patch( + "simcore_sdk.node_ports_common.file_io_utils._upload_file_part", + autospec=True, + side_effect=ClientError, + ) + mocker.patch( + "simcore_sdk.node_ports_common.filemanager.aws_s3_cli.sync_local_to_s3", + autospec=True, + side_effect=AwsS3CliFailedError, + ) + + +@pytest.mark.parametrize( + "file_size", + [ + _file_size("10Mib"), + ], + ids=byte_size_ids, +) +async def test_failed_upload_is_properly_removed_from_storage( + node_ports_config: None, + create_file_of_size: Callable[[ByteSize], Path], + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + optional_sync_settings: _SyncSettings, + file_size: ByteSize, + user_id: UserID, + mocked_upload_file_raising_exceptions: None, +): + file_path = create_file_of_size(file_size) + file_id = create_valid_file_uuid("", file_path) + with pytest.raises(exceptions.S3TransferError): + await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=file_path, + r_clone_settings=optional_sync_settings.r_clone_settings, + io_log_redirect_cb=None, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + with pytest.raises(exceptions.S3InvalidPathError): + await filemanager.get_file_metadata( + user_id=user_id, store_id=s3_simcore_location, s3_object=file_id + ) + + +@pytest.mark.parametrize( + "file_size", + [ + _file_size("10Mib"), + ], + ids=byte_size_ids, +) +async def test_failed_upload_after_valid_upload_keeps_last_valid_state( + node_ports_config: None, + create_file_of_size: Callable[[ByteSize], Path], + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + optional_sync_settings: _SyncSettings, + file_size: ByteSize, + user_id: UserID, + mocker: MockerFixture, +): + # upload a valid file + file_path = create_file_of_size(file_size) + file_id = create_valid_file_uuid("", file_path) + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=file_path, + r_clone_settings=optional_sync_settings.r_clone_settings, + io_log_redirect_cb=None, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + assert isinstance(upload_result, UploadedFile) + store_id, e_tag = upload_result.store_id, upload_result.etag + assert store_id == s3_simcore_location + assert e_tag + # check the file is correctly uploaded + file_metadata = await filemanager.get_file_metadata( + user_id=user_id, store_id=store_id, s3_object=file_id + ) + assert file_metadata.location == store_id + assert file_metadata.etag == e_tag + # now start an invalid update by generating an exception while uploading the same file + mocker.patch( + "simcore_sdk.node_ports_common.filemanager.r_clone.sync_local_to_s3", + autospec=True, + side_effect=RCloneFailedError, + ) + mocker.patch( + "simcore_sdk.node_ports_common.file_io_utils._upload_file_part", + autospec=True, + side_effect=ClientError, + ) + with pytest.raises(exceptions.S3TransferError): + await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=file_path, + r_clone_settings=optional_sync_settings.r_clone_settings, + io_log_redirect_cb=None, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + # the file shall be back to its original state + file_metadata = await filemanager.get_file_metadata( + user_id=user_id, store_id=s3_simcore_location, s3_object=file_id + ) + assert file_metadata.location == store_id + assert file_metadata.etag == e_tag + + +async def test_invalid_file_path( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + optional_sync_settings: _SyncSettings, + faker: Faker, +): + file_path = Path(tmpdir) / "test.test" + file_path.write_text("I am a test file") + assert file_path.exists() + + file_id = create_valid_file_uuid("", file_path) + store = s3_simcore_location + with pytest.raises(FileNotFoundError): + await filemanager.upload_path( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=file_id, + path_to_upload=Path(tmpdir) / "some other file.txt", + io_log_redirect_cb=None, + ) + + download_folder = Path(tmpdir) / "downloads" + with pytest.raises(exceptions.S3InvalidPathError): + async with ProgressBarData( + num_steps=1, description=faker.pystr() + ) as progress_bar: + await filemanager.download_path_from_s3( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=file_id, + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + + +async def test_errors_upon_invalid_file_identifiers( + node_ports_config: None, + tmpdir: Path, + user_id: UserID, + project_id: str, + s3_simcore_location: LocationID, + optional_sync_settings: _SyncSettings, + faker: Faker, +): + file_path = Path(tmpdir) / "test.test" + file_path.write_text("I am a test file") + assert file_path.exists() + + store = s3_simcore_location + with pytest.raises(exceptions.StorageInvalidCall): # noqa: PT012 + invalid_s3_path = "" + await filemanager.upload_path( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=invalid_s3_path, + path_to_upload=file_path, + io_log_redirect_cb=None, + ) + + with pytest.raises(exceptions.StorageInvalidCall): # noqa: PT012 + invalid_file_id = "file_id" + await filemanager.upload_path( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=invalid_file_id, + path_to_upload=file_path, + io_log_redirect_cb=None, + ) + + download_folder = Path(tmpdir) / "downloads" + with pytest.raises(exceptions.StorageInvalidCall): # noqa: PT012 + async with ProgressBarData( + num_steps=1, description=faker.pystr() + ) as progress_bar: + invalid_s3_path = "" + await filemanager.download_path_from_s3( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=invalid_s3_path, + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + + with pytest.raises(exceptions.S3InvalidPathError): + async with ProgressBarData( + num_steps=1, description=faker.pystr() + ) as progress_bar: + await filemanager.download_path_from_s3( + user_id=user_id, + store_id=store, + store_name=None, + s3_object=TypeAdapter(SimcoreS3FileID).validate_python( + f"{project_id}/{uuid4()}/invisible.txt" + ), + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + + +async def test_invalid_store( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + optional_sync_settings: _SyncSettings, + faker: Faker, +): + file_path = Path(tmpdir) / "test.test" + file_path.write_text("I am a test file") + assert file_path.exists() + + file_id = create_valid_file_uuid("", file_path) + store = "somefunkystore" + with pytest.raises(exceptions.S3InvalidStore): + await filemanager.upload_path( + user_id=user_id, + store_id=None, + store_name=store, # type: ignore + s3_object=file_id, + path_to_upload=file_path, + io_log_redirect_cb=None, + ) + + download_folder = Path(tmpdir) / "downloads" + with pytest.raises(exceptions.S3InvalidStore): + async with ProgressBarData( + num_steps=1, description=faker.pystr() + ) as progress_bar: + await filemanager.download_path_from_s3( + user_id=user_id, + store_id=None, + store_name=store, # type: ignore + s3_object=file_id, + local_path=download_folder, + io_log_redirect_cb=None, + r_clone_settings=optional_sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=optional_sync_settings.aws_s3_cli_settings, + ) + + +@pytest.fixture( + params=[True, False], + ids=["with RClone", "with AwsS3Cli"], +) +def sync_settings( + r_clone_settings: RCloneSettings, + aws_s3_cli_settings: AwsS3CliSettings, + request: pytest.FixtureRequest, +) -> _SyncSettings: + is_rclone_enabled = request.param + + return _SyncSettings( + r_clone_settings=r_clone_settings if is_rclone_enabled else None, + aws_s3_cli_settings=aws_s3_cli_settings if not is_rclone_enabled else None, + ) + + +@pytest.mark.parametrize("is_directory", [False, True]) +async def test_valid_metadata( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + sync_settings: _SyncSettings, + is_directory: bool, +): + # first we go with a non-existing file + file_path = Path(tmpdir) / "a-subdir" / "test.test" + file_path.parent.mkdir(parents=True, exist_ok=True) + + path_to_upload = file_path.parent if is_directory else file_path + + file_id = create_valid_file_uuid("", path_to_upload) + assert file_path.exists() is False + + is_metadata_present = await filemanager.entry_exists( + user_id=user_id, + store_id=s3_simcore_location, + s3_object=file_id, + is_directory=is_directory, + ) + assert is_metadata_present is False + + # now really create the file and upload it + file_path.write_text("I am a test file") + assert file_path.exists() + + file_id = create_valid_file_uuid("", path_to_upload) + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=path_to_upload, + io_log_redirect_cb=None, + r_clone_settings=sync_settings.r_clone_settings, + aws_s3_cli_settings=sync_settings.aws_s3_cli_settings, + ) + if is_directory: + assert isinstance(upload_result, UploadedFolder) + else: + assert isinstance(upload_result, UploadedFile) + assert upload_result.store_id == s3_simcore_location + assert upload_result.etag + + is_metadata_present = await filemanager.entry_exists( + user_id=user_id, + store_id=s3_simcore_location, + s3_object=file_id, + is_directory=is_directory, + ) + + assert is_metadata_present is True + + +@pytest.mark.parametrize( + "fct, extra_kwargs", + [ + (filemanager.entry_exists, {"is_directory": False}), + (filemanager.delete_file, {}), + (filemanager.get_file_metadata, {}), + ], +) +async def test_invalid_call_raises_exception( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + fct: Callable[[int, str, str, Any | None], Awaitable], + extra_kwargs: dict[str, Any], +): + file_path = Path(tmpdir) / "test.test" + file_id = create_valid_file_uuid("", file_path) + assert file_path.exists() is False + + with pytest.raises(exceptions.StorageInvalidCall): + await fct( + user_id=None, + store_id=s3_simcore_location, + s3_object=file_id, + **extra_kwargs, # type: ignore + ) + with pytest.raises(exceptions.StorageInvalidCall): + await fct(user_id=user_id, store_id=None, s3_object=file_id, **extra_kwargs) # type: ignore + with pytest.raises(exceptions.StorageInvalidCall): + await fct( + user_id=user_id, + store_id=s3_simcore_location, + s3_object="bing", + **extra_kwargs, # type: ignore + ) + + +async def test_delete_file( + node_ports_config: None, + tmpdir: Path, + user_id: int, + create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + s3_simcore_location: LocationID, + storage_service: URL, +): + file_path = Path(tmpdir) / "test.test" + file_path.write_text("I am a test file") + assert file_path.exists() + + file_id = create_valid_file_uuid("", file_path) + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=file_id, + path_to_upload=file_path, + io_log_redirect_cb=None, + ) + assert isinstance(upload_result, UploadedFile) + store_id, e_tag = upload_result.store_id, upload_result.etag + assert store_id == s3_simcore_location + assert e_tag + + is_metadata_present = await filemanager.entry_exists( + user_id=user_id, store_id=store_id, s3_object=file_id, is_directory=False + ) + assert is_metadata_present is True + + await filemanager.delete_file( + user_id=user_id, store_id=s3_simcore_location, s3_object=file_id + ) + + # check that it disappeared + assert ( + await filemanager.entry_exists( + user_id=user_id, store_id=store_id, s3_object=file_id, is_directory=False + ) + is False + ) + + +@pytest.mark.parametrize("files_in_folder", [1, 10]) +async def test_upload_path_source_is_a_folder( + node_ports_config: None, + project_id: str, + tmp_path: Path, + faker: Faker, + user_id: int, + s3_simcore_location: LocationID, + files_in_folder: int, + sync_settings: _SyncSettings, +): + source_dir = tmp_path / f"source-{faker.uuid4()}" + source_dir.mkdir(parents=True, exist_ok=True) + + download_dir = tmp_path / f"download-{faker.uuid4()}" + download_dir.mkdir(parents=True, exist_ok=True) + + for i in range(files_in_folder): + (source_dir / f"file-{i}.txt").write_text("1") + + directory_id = SimcoreS3DirectoryID.from_simcore_s3_object( + f"{project_id}/{faker.uuid4()}/some-dir-in-node-root/" + ) + s3_object = TypeAdapter(SimcoreS3FileID).validate_python(directory_id) + + upload_result: UploadedFolder | UploadedFile = await filemanager.upload_path( + user_id=user_id, + store_id=s3_simcore_location, + store_name=None, + s3_object=s3_object, + path_to_upload=source_dir, + io_log_redirect_cb=None, + r_clone_settings=sync_settings.r_clone_settings, + aws_s3_cli_settings=sync_settings.aws_s3_cli_settings, + ) + assert isinstance(upload_result, UploadedFolder) + assert source_dir.exists() + + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await filemanager.download_path_from_s3( + user_id=user_id, + store_name=None, + store_id=s3_simcore_location, + s3_object=s3_object, + local_path=download_dir, + io_log_redirect_cb=None, + r_clone_settings=sync_settings.r_clone_settings, + progress_bar=progress_bar, + aws_s3_cli_settings=sync_settings.aws_s3_cli_settings, + ) + assert download_dir.exists() + + # ensure all files in download and source directory are the same + file_names: set = {f.name for f in source_dir.glob("*")} & { + f.name for f in download_dir.glob("*") + } + for file_name in file_names: + filecmp.cmp(source_dir / file_name, download_dir / file_name, shallow=False) diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_common_r_clone.py b/packages/simcore-sdk/tests/integration/test_node_ports_common_r_clone.py index 4c257756cad..598d7d653e7 100644 --- a/packages/simcore-sdk/tests/integration/test_node_ports_common_r_clone.py +++ b/packages/simcore-sdk/tests/integration/test_node_ports_common_r_clone.py @@ -1,28 +1,35 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument -import asyncio import filecmp +import os import re import urllib.parse -from contextlib import asynccontextmanager +from collections.abc import AsyncIterator, Callable from pathlib import Path -from typing import AsyncGenerator, Callable, Final +from typing import Final +from unittest.mock import AsyncMock from uuid import uuid4 import aioboto3 +import aiofiles import pytest from faker import Faker -from models_library.api_schemas_storage import FileUploadLinks, FileUploadSchema -from pydantic import AnyUrl, ByteSize, parse_obj_as -from pytest import FixtureRequest +from models_library.progress_bar import ProgressReport +from pydantic import AnyUrl, ByteSize, TypeAdapter +from servicelib.file_utils import remove_directory +from servicelib.progress_bar import ProgressBarData +from servicelib.utils import logged_gather from settings_library.r_clone import RCloneSettings from simcore_sdk.node_ports_common import r_clone pytest_simcore_core_services_selection = [ "migration", "postgres", + "rabbit", + "redis", "storage", + "sto-worker", ] pytest_simcore_ops_services_selection = [ @@ -34,75 +41,55 @@ WAIT_FOR_S3_BACKEND_TO_UPDATE: Final[float] = 1.0 -@pytest.fixture( - params=[ - f"{uuid4()}.bin", - "some funky name.txt", - "ΓΆΓ€$Àâ2-34 no extension", - ] -) -def file_name(request: FixtureRequest) -> str: - return request.param # type: ignore - - @pytest.fixture -def local_file_for_download(upload_file_dir: Path, file_name: str) -> Path: - local_file_path = upload_file_dir / f"__local__{file_name}" - return local_file_path - - -# UTILS -@asynccontextmanager -async def _get_s3_object( - r_clone_settings: RCloneSettings, s3_path: str -) -> AsyncGenerator["aioboto3.resources.factory.s3.Object", None]: +async def cleanup_bucket_after_test( + r_clone_settings: RCloneSettings, +) -> AsyncIterator[None]: session = aioboto3.Session( aws_access_key_id=r_clone_settings.R_CLONE_S3.S3_ACCESS_KEY, aws_secret_access_key=r_clone_settings.R_CLONE_S3.S3_SECRET_KEY, ) - async with session.resource( - "s3", endpoint_url=r_clone_settings.R_CLONE_S3.S3_ENDPOINT - ) as s3: - s3_object = await s3.Object( - bucket_name=r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME, - key=s3_path.removeprefix(r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME), - ) - yield s3_object + yield -async def _download_s3_object( - r_clone_settings: RCloneSettings, s3_path: str, local_path: Path -): - await asyncio.sleep(WAIT_FOR_S3_BACKEND_TO_UPDATE) - async with _get_s3_object(r_clone_settings, s3_path) as s3_object_in_s3: - await s3_object_in_s3.download_file(f"{local_path}") - - -def _fake_upload_file_link( - r_clone_settings: RCloneSettings, s3_object: str -) -> FileUploadSchema: - return FileUploadSchema( - chunk_size=ByteSize(0), - urls=[ - parse_obj_as( - AnyUrl, - f"s3://{r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME}/{urllib.parse.quote(s3_object)}", - ) - ], - links=FileUploadLinks( - abort_upload=parse_obj_as(AnyUrl, "https://www.fakeabort.com"), - complete_upload=parse_obj_as(AnyUrl, "https://www.fakecomplete.com"), - ), + async with session.client( + "s3", endpoint_url=f"{r_clone_settings.R_CLONE_S3.S3_ENDPOINT}" + ) as s3_client: + # List all object versions + paginator = s3_client.get_paginator("list_object_versions") + async for page in paginator.paginate( + Bucket=r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME + ): + # Prepare delete markers and versions for deletion + delete_markers = page.get("DeleteMarkers", []) + versions = page.get("Versions", []) + + objects_to_delete = [ + {"Key": obj["Key"], "VersionId": obj["VersionId"]} + for obj in delete_markers + versions + ] + + # Perform deletion + if objects_to_delete: + await s3_client.delete_objects( + Bucket=r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME, + Delete={"Objects": objects_to_delete, "Quiet": True}, + ) + + +def _fake_s3_link(r_clone_settings: RCloneSettings, s3_object: str) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"s3://{r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME}/{urllib.parse.quote(s3_object)}" ) def test_s3_url_quote_and_unquote(): - """This test was added to validate quotation operations in _fake_upload_file_link + """This test was added to validate quotation operations in _fake_s3_link against unquotation operation in """ src = "53a35372-d44d-4d2e-8319-b40db5f31ce0/2f67d5cb-ea9c-4f8c-96ef-eae8445a0fe7/6fa73b0f-4006-46c6-9847-967b45ff3ae7.bin" - # as in _fake_upload_file_link + # as in _fake_s3_link url = f"s3://simcore/{urllib.parse.quote(src)}" # as in sync_local_to_s3 @@ -111,26 +98,363 @@ def test_s3_url_quote_and_unquote(): assert truncated_url == f"simcore/{src}" -async def test_sync_local_to_s3( +async def _create_random_binary_file( + file_path: Path, + file_size: ByteSize, + # NOTE: bigger files get created faster with bigger chunk_size + chunk_size: int = TypeAdapter(ByteSize).validate_python("1mib"), +): + async with aiofiles.open(file_path, mode="wb") as file: + bytes_written = 0 + while bytes_written < file_size: + remaining_bytes = file_size - bytes_written + current_chunk_size = min(chunk_size, remaining_bytes) + await file.write(os.urandom(current_chunk_size)) + bytes_written += current_chunk_size + assert bytes_written == file_size + + +async def _create_file_of_size( + tmp_path: Path, *, name: str, file_size: ByteSize +) -> Path: + file: Path = tmp_path / name + if not file.parent.exists(): + file.parent.mkdir(parents=True, exist_ok=True) + + await _create_random_binary_file(file, file_size) + assert file.exists() + assert file.stat().st_size == file_size + return file + + +async def _create_files_in_dir( + target_dir: Path, file_count: int, file_size: ByteSize +) -> set[str]: + results: list[Path] = await logged_gather( + *[ + _create_file_of_size(target_dir, name=f"{i}-file.bin", file_size=file_size) + for i in range(file_count) + ], + max_concurrency=10, + ) + return {x.name for x in results} + + +async def _upload_local_dir_to_s3( + r_clone_settings: RCloneSettings, + s3_directory_link: AnyUrl, + source_dir: Path, + *, + check_progress: bool = False, + faker: Faker, +) -> None: + # NOTE: progress is enforced only when uploading and only when using + # total file sizes that are quite big, otherwise the test will fail + # we ant to avoid this from being flaky. + # Since using moto to mock the S3 api, downloading is way to fast. + # Progress behaves as expected with CEPH and AWS S3 backends. + + progress_entries: list[ProgressReport] = [] + + async def _report_progress_upload(report: ProgressReport) -> None: + print(">>>|", report, "| ⏫") + progress_entries.append(report) + + async with ProgressBarData( + num_steps=1, + progress_report_cb=_report_progress_upload, + description=faker.pystr(), + ) as progress_bar: + await r_clone.sync_local_to_s3( + r_clone_settings, + progress_bar, + local_directory_path=source_dir, + upload_s3_link=s3_directory_link, + debug_logs=True, + ) + if check_progress: + # NOTE: a progress of 1 is always sent by the progress bar + # we want to check that rclone also reports some progress entries + assert len(progress_entries) > 1 + + +async def _download_from_s3_to_local_dir( + r_clone_settings: RCloneSettings, + s3_directory_link: AnyUrl, + destination_dir: Path, + faker: Faker, +) -> None: + async def _report_progress_download(report: ProgressReport) -> None: + print(">>>|", report, "| ⏬") + + async with ProgressBarData( + num_steps=1, + progress_report_cb=_report_progress_download, + description=faker.pystr(), + ) as progress_bar: + await r_clone.sync_s3_to_local( + r_clone_settings, + progress_bar, + local_directory_path=destination_dir, + download_s3_link=f"{s3_directory_link}", + debug_logs=True, + ) + + +def _directories_have_the_same_content(dir_1: Path, dir_2: Path) -> bool: + names_in_dir_1 = {x.name for x in dir_1.glob("*")} + names_in_dir_2 = {x.name for x in dir_2.glob("*")} + if names_in_dir_1 != names_in_dir_2: + return False + + filecmp.clear_cache() + + compare_results: list[bool] = [] + + for file_name in names_in_dir_1: + f1 = dir_1 / file_name + f2 = dir_2 / file_name + + # when there is a broken symlink, which we want to sync, filecmp does not work + is_broken_symlink = ( + not f1.exists() and f1.is_symlink() and not f2.exists() and f2.is_symlink() + ) + + if is_broken_symlink: + compare_results.append(True) + else: + compare_results.append(filecmp.cmp(f1, f2, shallow=False)) + + return all(compare_results) + + +def _ensure_dir(tmp_path: Path, faker: Faker, *, dir_prefix: str) -> Path: + generated_files_dir: Path = tmp_path / f"{dir_prefix}-{faker.uuid4()}" + generated_files_dir.mkdir(parents=True, exist_ok=True) + assert generated_files_dir.exists() + return generated_files_dir + + +@pytest.fixture +async def dir_locally_created_files( + tmp_path: Path, faker: Faker +) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="source") + yield path + await remove_directory(path) + + +@pytest.fixture +async def dir_downloaded_files_1(tmp_path: Path, faker: Faker) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="downloaded-1") + yield path + await remove_directory(path) + + +@pytest.fixture +async def dir_downloaded_files_2(tmp_path: Path, faker: Faker) -> AsyncIterator[Path]: + path = _ensure_dir(tmp_path, faker, dir_prefix="downloaded-2") + yield path + await remove_directory(path) + + +@pytest.mark.parametrize( + "file_count, file_size, check_progress", + [ + (0, TypeAdapter(ByteSize).validate_python("0"), False), + (1, TypeAdapter(ByteSize).validate_python("1mib"), False), + (2, TypeAdapter(ByteSize).validate_python("1mib"), False), + (1, TypeAdapter(ByteSize).validate_python("1Gib"), True), + pytest.param( + 4, + TypeAdapter(ByteSize).validate_python("500Mib"), + True, + marks=pytest.mark.heavy_load, + ), + pytest.param( + 100, + TypeAdapter(ByteSize).validate_python("20mib"), + True, + marks=pytest.mark.heavy_load, + ), + ], +) +async def test_local_to_remote_to_local( r_clone_settings: RCloneSettings, - file_name: str, - create_file_of_size: Callable[[ByteSize, str], Path], create_valid_file_uuid: Callable[[str, Path], str], - tmp_path: Path, + dir_locally_created_files: Path, + dir_downloaded_files_1: Path, + file_count: int, + file_size: ByteSize, + check_progress: bool, + cleanup_bucket_after_test: None, faker: Faker, ) -> None: - local_file = create_file_of_size(parse_obj_as(ByteSize, "10Mib"), file_name) - file_uuid = create_valid_file_uuid("", local_file) - upload_file_link = _fake_upload_file_link(r_clone_settings, file_uuid) - await r_clone.sync_local_to_s3(local_file, r_clone_settings, upload_file_link) + await _create_files_in_dir(dir_locally_created_files, file_count, file_size) + + # get s3 reference link + directory_uuid = create_valid_file_uuid(f"{dir_locally_created_files}", Path()) + s3_directory_link = _fake_s3_link(r_clone_settings, directory_uuid) - local_download_file = tmp_path / faker.file_name() - await _download_s3_object( - r_clone_settings=r_clone_settings, - s3_path=file_uuid, - local_path=local_download_file, + # run the test + await _upload_local_dir_to_s3( + r_clone_settings, + s3_directory_link, + dir_locally_created_files, + check_progress=check_progress, + faker=faker, + ) + await _download_from_s3_to_local_dir( + r_clone_settings, s3_directory_link, dir_downloaded_files_1, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 ) - assert local_download_file.exists() - # check same file contents after upload and download - assert filecmp.cmp(local_file, local_download_file) + +def _change_content_of_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).write_bytes(os.urandom(10)) + + +def _change_content_of_all_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + for file_name in generated_file_names: + (dir_locally_created_files / file_name).unlink() + (dir_locally_created_files / file_name).write_bytes(os.urandom(10)) + + +def _remove_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).unlink() + + +def _rename_one_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + a_generated_file = next(iter(generated_file_names)) + (dir_locally_created_files / a_generated_file).rename( + dir_locally_created_files / f"renamed-{a_generated_file}" + ) + + +def _add_a_new_file( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + (dir_locally_created_files / "new_file.bin").write_bytes(os.urandom(10)) + + +def _remove_all_files( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + for file_name in generated_file_names: + (dir_locally_created_files / file_name).unlink() + + +def _regression_add_broken_symlink( + dir_locally_created_files: Path, generated_file_names: set[str] +) -> None: + # NOTE: if rclone tries to copy a link that does not exist an error is raised + path_does_not_exist_on_fs = Path(f"/tmp/missing-{uuid4()}") # noqa: S108 + assert not path_does_not_exist_on_fs.exists() + + broken_symlink = dir_locally_created_files / "missing.link" + assert not broken_symlink.exists() + os.symlink(f"{path_does_not_exist_on_fs}", f"{broken_symlink}") + + +@pytest.mark.parametrize( + "changes_callable", + [ + _change_content_of_one_file, + _change_content_of_all_file, + _remove_one_file, + _remove_all_files, + _rename_one_file, + _add_a_new_file, + _regression_add_broken_symlink, + ], +) +async def test_overwrite_an_existing_file_and_sync_again( + r_clone_settings: RCloneSettings, + create_valid_file_uuid: Callable[[str, Path], str], + dir_locally_created_files: Path, + dir_downloaded_files_1: Path, + dir_downloaded_files_2: Path, + changes_callable: Callable[[Path, set[str]], None], + cleanup_bucket_after_test: None, + faker: Faker, +) -> None: + generated_file_names: set[str] = await _create_files_in_dir( + dir_locally_created_files, + r_clone_settings.R_CLONE_OPTION_TRANSFERS * 3, + TypeAdapter(ByteSize).validate_python("1kib"), + ) + assert len(generated_file_names) > 0 + + # get s3 reference link + directory_uuid = create_valid_file_uuid(f"{dir_locally_created_files}", Path()) + s3_directory_link = _fake_s3_link(r_clone_settings, directory_uuid) + + # sync local to remote and check + await _upload_local_dir_to_s3( + r_clone_settings, s3_directory_link, dir_locally_created_files, faker=faker + ) + await _download_from_s3_to_local_dir( + r_clone_settings, s3_directory_link, dir_downloaded_files_1, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 + ) + + # make some changes to local content + changes_callable(dir_locally_created_files, generated_file_names) + + # ensure local content changed form remote content + assert not _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_1 + ) + + # upload and check new local and new remote are in sync + await _upload_local_dir_to_s3( + r_clone_settings, s3_directory_link, dir_locally_created_files, faker=faker + ) + await _download_from_s3_to_local_dir( + r_clone_settings, s3_directory_link, dir_downloaded_files_2, faker=faker + ) + assert _directories_have_the_same_content( + dir_locally_created_files, dir_downloaded_files_2 + ) + # check that old remote and new remote are nto the same + assert not _directories_have_the_same_content( + dir_downloaded_files_1, dir_downloaded_files_2 + ) + + +async def test_raises_error_if_local_directory_path_is_a_file( + tmp_path: Path, faker: Faker, cleanup_bucket_after_test: None +): + file_path = await _create_file_of_size( + tmp_path, name=f"test{faker.uuid4()}.bin", file_size=ByteSize(1) + ) + with pytest.raises(r_clone.RCloneDirectoryNotFoundError): + await r_clone.sync_local_to_s3( + r_clone_settings=AsyncMock(), + progress_bar=AsyncMock(), + local_directory_path=file_path, + upload_s3_link=AsyncMock(), + debug_logs=True, + ) + with pytest.raises(r_clone.RCloneDirectoryNotFoundError): + await r_clone.sync_s3_to_local( + r_clone_settings=AsyncMock(), + progress_bar=AsyncMock(), + local_directory_path=file_path, + download_s3_link=AsyncMock(), + debug_logs=True, + ) diff --git a/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py b/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py index 8f08e966c16..88d16e383d2 100644 --- a/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py +++ b/packages/simcore-sdk/tests/integration/test_node_ports_v2_nodeports2.py @@ -1,37 +1,53 @@ # pylint: disable=pointless-statement +# pylint: disable=protected-access # pylint: disable=redefined-outer-name # pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable -# pylint: disable=protected-access import filecmp import os import tempfile -import threading from asyncio import gather +from collections.abc import Awaitable, Callable, Iterable from pathlib import Path -from typing import Any, Awaitable, Callable, Iterable, Optional, Union +from typing import Any +from unittest.mock import AsyncMock from uuid import uuid4 import np_helpers import pytest import sqlalchemy as sa -from models_library.projects_nodes_io import LocationID, NodeIDStr, SimcoreS3FileID +from faker import Faker +from models_library.projects_nodes_io import ( + BaseFileLink, + DownloadLink, + LocationID, + NodeIDStr, + SimcoreS3FileID, +) +from models_library.services_types import ServicePortKey +from pydantic import TypeAdapter +from pytest_mock import MockerFixture from servicelib.progress_bar import ProgressBarData from settings_library.r_clone import RCloneSettings from simcore_sdk import node_ports_v2 from simcore_sdk.node_ports_common.exceptions import UnboundPortError from simcore_sdk.node_ports_v2 import exceptions -from simcore_sdk.node_ports_v2.links import ItemConcreteValue -from simcore_sdk.node_ports_v2.nodeports_v2 import Nodeports +from simcore_sdk.node_ports_v2.links import ItemConcreteValue, PortLink +from simcore_sdk.node_ports_v2.nodeports_v2 import Nodeports, OutputsCallbacks from simcore_sdk.node_ports_v2.port import Port +from utils_port_v2 import CONSTANT_UUID pytest_simcore_core_services_selection = [ "migration", "postgres", + "rabbit", + "redis", "storage", + "sto-worker", ] pytest_simcore_ops_services_selection = [ @@ -45,7 +61,7 @@ async def _check_port_valid( config_dict: dict, port_type: str, key_name: str, - key: Union[str, int], + key: str | int, ): port: Port = (await getattr(ports, port_type))[key] assert isinstance(port, Port) @@ -76,8 +92,10 @@ async def _check_port_valid( port_values = config_dict[port_type] if key_name in port_values: if isinstance(port_values[key_name], dict): + assert port.value + assert isinstance(port.value, DownloadLink | PortLink | BaseFileLink) assert ( - port.value.dict(by_alias=True, exclude_unset=True) + port.value.model_dump(by_alias=True, exclude_unset=True) == port_values[key_name] ) else: @@ -90,7 +108,7 @@ async def _check_port_valid( async def _check_ports_valid(ports: Nodeports, config_dict: dict, port_type: str): port_schemas = config_dict["schema"][port_type] - for key in port_schemas.keys(): + for key in port_schemas: # test using "key" name await _check_port_valid(ports, config_dict, port_type, key, key) # test using index @@ -137,10 +155,10 @@ def config_value_symlink_path(symlink_path: Path) -> dict[str, Any]: @pytest.fixture(params=[True, False]) async def option_r_clone_settings( - request, r_clone_settings_factory: Awaitable[RCloneSettings] -) -> Optional[RCloneSettings]: + request, r_clone_settings_factory: Callable[[], Awaitable[RCloneSettings]] +) -> RCloneSettings | None: if request.param: - return await r_clone_settings_factory + return await r_clone_settings_factory() return None @@ -149,7 +167,7 @@ async def test_default_configuration( project_id: str, node_uuid: NodeIDStr, default_configuration: dict[str, Any], - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): config_dict = default_configuration await check_config_valid( @@ -168,7 +186,7 @@ async def test_invalid_ports( project_id: str, node_uuid: NodeIDStr, create_special_configuration: Callable, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): config_dict, _, _ = create_special_configuration() PORTS = await node_ports_v2.ports( @@ -209,9 +227,9 @@ async def test_port_value_accessors( item_type: str, item_value: ItemConcreteValue, item_pytype: type, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): # pylint: disable=W0613, W0621 - item_key = "some_key" + item_key = TypeAdapter(ServicePortKey).validate_python("some_key") config_dict, _, _ = create_special_configuration( inputs=[(item_key, item_type, item_value)], outputs=[(item_key, item_type, None)], @@ -244,12 +262,7 @@ async def test_port_value_accessors( ("data:*/*", __file__, Path, {"store": 0, "path": __file__}), ("data:text/*", __file__, Path, {"store": 0, "path": __file__}), ("data:text/py", __file__, Path, {"store": 0, "path": __file__}), - ( - "data:text/py", - pytest.lazy_fixture("symlink_path"), - Path, - pytest.lazy_fixture("config_value_symlink_path"), - ), + ("data:text/py", "symlink_path", Path, "config_value_symlink_path"), ], ) async def test_port_file_accessors( @@ -263,8 +276,15 @@ async def test_port_file_accessors( project_id: str, node_uuid: NodeIDStr, e_tag: str, - option_r_clone_settings: Optional[RCloneSettings], -): # pylint: disable=W0613, W0621 + option_r_clone_settings: RCloneSettings | None, + request: pytest.FixtureRequest, + constant_uuid4: None, +): + if item_value == "symlink_path": + item_value = request.getfixturevalue("symlink_path") + if config_value == "config_value_symlink_path": + config_value = request.getfixturevalue("config_value_symlink_path") + config_value["path"] = f"{project_id}/{node_uuid}/{Path(config_value['path']).name}" config_dict, _project_id, _node_uuid = create_special_configuration( @@ -282,16 +302,27 @@ async def test_port_file_accessors( r_clone_settings=option_r_clone_settings, ) await check_config_valid(PORTS, config_dict) - assert await (await PORTS.outputs)["out_34"].get() is None # check emptyness + assert ( + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].get() + is None + ) # check emptyness with pytest.raises(exceptions.S3InvalidPathError): - await (await PORTS.inputs)["in_1"].get() + await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_1") + ].get() # this triggers an upload to S3 + configuration change - await (await PORTS.outputs)["out_34"].set(item_value) + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].set(item_value) # this is the link to S3 storage - received_file_link = (await PORTS.outputs)["out_34"].value.dict( - by_alias=True, exclude_unset=True - ) + value = (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].value + assert isinstance(value, DownloadLink | PortLink | BaseFileLink) + received_file_link = value.model_dump(by_alias=True, exclude_unset=True) assert received_file_link["store"] == s3_simcore_location assert ( received_file_link["path"] @@ -303,16 +334,27 @@ async def test_port_file_accessors( assert received_file_link["eTag"] # this triggers a download from S3 to a location in /tempdir/simcorefiles/item_key - assert isinstance(await (await PORTS.outputs)["out_34"].get(), item_pytype) - downloaded_file = await (await PORTS.outputs)["out_34"].get() + assert isinstance( + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].get(), + item_pytype, + ) + downloaded_file = await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].get() assert isinstance(downloaded_file, Path) assert downloaded_file.exists() - assert str(await (await PORTS.outputs)["out_34"].get()).startswith( + assert str( + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_34") + ].get() + ).startswith( str( Path( tempfile.gettempdir(), "simcorefiles", - f"{threading.get_ident()}", + f"{CONSTANT_UUID}", "out_34", ) ) @@ -327,7 +369,7 @@ async def test_adding_new_ports( node_uuid: NodeIDStr, create_special_configuration: Callable, postgres_db: sa.engine.Engine, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): config_dict, project_id, node_uuid = create_special_configuration() PORTS = await node_ports_v2.ports( @@ -378,7 +420,7 @@ async def test_removing_ports( node_uuid: NodeIDStr, create_special_configuration: Callable, postgres_db: sa.engine.Engine, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): config_dict, project_id, node_uuid = create_special_configuration( inputs=[("in_14", "integer", 15), ("in_17", "boolean", False)], @@ -432,7 +474,7 @@ async def test_get_value_from_previous_node( item_type: str, item_value: ItemConcreteValue, item_pytype: type, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ): config_dict, _, _ = create_2nodes_configuration( prev_node_inputs=None, @@ -452,9 +494,16 @@ async def test_get_value_from_previous_node( ) await check_config_valid(PORTS, config_dict) - input_value = await (await PORTS.inputs)["in_15"].get() + input_value = await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_15") + ].get() assert isinstance(input_value, item_pytype) - assert await (await PORTS.inputs)["in_15"].get() == item_value + assert ( + await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_15") + ].get() + == item_value + ) @pytest.mark.parametrize( @@ -475,7 +524,8 @@ async def test_get_file_from_previous_node( item_type: str, item_value: str, item_pytype: type, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, + constant_uuid4: None, ): config_dict, _, _ = create_2nodes_configuration( prev_node_inputs=None, @@ -495,12 +545,14 @@ async def test_get_file_from_previous_node( r_clone_settings=option_r_clone_settings, ) await check_config_valid(PORTS, config_dict) - file_path = await (await PORTS.inputs)["in_15"].get() + file_path = await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_15") + ].get() assert isinstance(file_path, item_pytype) assert file_path == Path( tempfile.gettempdir(), "simcorefiles", - f"{threading.get_ident()}", + f"{CONSTANT_UUID}", "in_15", Path(item_value).name, ) @@ -531,7 +583,8 @@ async def test_get_file_from_previous_node_with_mapping_of_same_key_name( item_value: str, item_alias: str, item_pytype: type, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, + constant_uuid4: None, ): config_dict, _, this_node_uuid = create_2nodes_configuration( prev_node_inputs=None, @@ -555,12 +608,14 @@ async def test_get_file_from_previous_node_with_mapping_of_same_key_name( postgres_db, project_id, this_node_uuid, config_dict ) # pylint: disable=E1101 await check_config_valid(PORTS, config_dict) - file_path = await (await PORTS.inputs)["in_15"].get() + file_path = await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_15") + ].get() assert isinstance(file_path, item_pytype) assert file_path == Path( tempfile.gettempdir(), "simcorefiles", - f"{threading.get_ident()}", + f"{CONSTANT_UUID}", "in_15", item_alias, ) @@ -591,8 +646,9 @@ async def test_file_mapping( item_value: str, item_alias: str, item_pytype: type, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, create_valid_file_uuid: Callable[[str, Path], SimcoreS3FileID], + constant_uuid4: None, ): config_dict, project_id, node_uuid = create_special_configuration( inputs=[("in_1", item_type, await create_store_link(item_value))], @@ -614,23 +670,27 @@ async def test_file_mapping( postgres_db, project_id, node_uuid, config_dict ) # pylint: disable=E1101 await check_config_valid(PORTS, config_dict) - file_path = await (await PORTS.inputs)["in_1"].get() + file_path = await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_1") + ].get() assert isinstance(file_path, item_pytype) assert file_path == Path( tempfile.gettempdir(), "simcorefiles", - f"{threading.get_ident()}", + f"{CONSTANT_UUID}", "in_1", item_alias, ) # let's get it a second time to see if replacing works - file_path = await (await PORTS.inputs)["in_1"].get() + file_path = await (await PORTS.inputs)[ + TypeAdapter(ServicePortKey).validate_python("in_1") + ].get() assert isinstance(file_path, item_pytype) assert file_path == Path( tempfile.gettempdir(), "simcorefiles", - f"{threading.get_ident()}", + f"{CONSTANT_UUID}", "in_1", item_alias, ) @@ -642,9 +702,11 @@ async def test_file_mapping( assert isinstance(file_path, Path) await PORTS.set_file_by_keymap(file_path) file_id = create_valid_file_uuid("out_1", file_path) - received_file_link = (await PORTS.outputs)["out_1"].value.dict( - by_alias=True, exclude_unset=True - ) + value = (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python("out_1") + ].value + assert isinstance(value, DownloadLink | PortLink | BaseFileLink) + received_file_link = value.model_dump(by_alias=True, exclude_unset=True) assert received_file_link["store"] == s3_simcore_location assert received_file_link["path"] == file_id # received a new eTag @@ -677,7 +739,7 @@ async def test_regression_concurrent_port_update_fails( int_item_value: int, parallel_int_item_value: int, port_count: int, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, ) -> None: """ when using `await PORTS.outputs` test will fail @@ -697,13 +759,19 @@ async def test_regression_concurrent_port_update_fails( # when writing in serial these are expected to work for item_key, _, _ in outputs: - await (await PORTS.outputs)[item_key].set(int_item_value) - assert (await PORTS.outputs)[item_key].value == int_item_value + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].set(int_item_value) + assert (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].value == int_item_value # when writing in parallel and reading back, # they fail, with enough concurrency async def _upload_create_task(item_key: str) -> None: - await (await PORTS.outputs)[item_key].set(parallel_int_item_value) + await (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].set(parallel_int_item_value) # updating in parallel creates a race condition results = await gather( @@ -713,23 +781,59 @@ async def _upload_create_task(item_key: str) -> None: # since a race condition was created when uploading values in parallel # it is expected to find at least one mismatching value here - with pytest.raises(AssertionError) as exc_info: + with pytest.raises(AssertionError) as exc_info: # noqa: PT012 for item_key, _, _ in outputs: - assert (await PORTS.outputs)[item_key].value == parallel_int_item_value + assert (await PORTS.outputs)[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].value == parallel_int_item_value assert exc_info.value.args[0].startswith( f"assert {int_item_value} == {parallel_int_item_value}\n + where {int_item_value} = Port(" ) +class _Callbacks(OutputsCallbacks): + async def aborted(self, key: ServicePortKey) -> None: + pass + + async def finished_succesfully(self, key: ServicePortKey) -> None: + pass + + async def finished_with_error(self, key: ServicePortKey) -> None: + pass + + +@pytest.fixture +async def output_callbacks() -> _Callbacks: + return _Callbacks() + + +@pytest.fixture +async def spy_outputs_callbaks( + mocker: MockerFixture, output_callbacks: _Callbacks +) -> dict[str, AsyncMock]: + return { + "aborted": mocker.spy(output_callbacks, "aborted"), + "finished_succesfully": mocker.spy(output_callbacks, "finished_succesfully"), + "finished_with_error": mocker.spy(output_callbacks, "finished_with_error"), + } + + +@pytest.mark.parametrize("use_output_callbacks", [True, False]) async def test_batch_update_inputs_outputs( user_id: int, project_id: str, node_uuid: NodeIDStr, create_special_configuration: Callable, port_count: int, - option_r_clone_settings: Optional[RCloneSettings], + option_r_clone_settings: RCloneSettings | None, + faker: Faker, + output_callbacks: _Callbacks, + spy_outputs_callbaks: dict[str, AsyncMock], + use_output_callbacks: bool, ) -> None: + callbacks = output_callbacks if use_output_callbacks else None + outputs = [(f"value_out_{i}", "integer", None) for i in range(port_count)] inputs = [(f"value_in_{i}", "integer", None) for i in range(port_count)] config_dict, _, _ = create_special_configuration(inputs=inputs, outputs=outputs) @@ -742,41 +846,78 @@ async def test_batch_update_inputs_outputs( ) await check_config_valid(PORTS, config_dict) - async with ProgressBarData(steps=2) as progress_bar: + async with ProgressBarData(num_steps=2, description=faker.pystr()) as progress_bar: + port_values = (await PORTS.outputs).values() await PORTS.set_multiple( { - port.key: (k, None) - for k, port in enumerate((await PORTS.outputs).values()) + TypeAdapter(ServicePortKey).validate_python(port.key): (k, None) + for k, port in enumerate(port_values) }, progress_bar=progress_bar, + outputs_callbacks=callbacks, + ) + assert len(spy_outputs_callbaks["finished_succesfully"].call_args_list) == ( + len(port_values) if use_output_callbacks else 0 ) # pylint: disable=protected-access - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 await PORTS.set_multiple( { - port.key: (k, None) + TypeAdapter(ServicePortKey).validate_python(port.key): (k, None) for k, port in enumerate((await PORTS.inputs).values(), start=1000) }, progress_bar=progress_bar, + outputs_callbacks=callbacks, + ) + # inputs do not trigger callbacks + assert len(spy_outputs_callbaks["finished_succesfully"].call_args_list) == ( + len(port_values) if use_output_callbacks else 0 ) - assert progress_bar._continuous_progress_value == pytest.approx(2) + assert progress_bar._current_steps == pytest.approx(2) # noqa: SLF001 ports_outputs = await PORTS.outputs ports_inputs = await PORTS.inputs for k, asd in enumerate(outputs): item_key, _, _ = asd - assert ports_outputs[item_key].value == k - assert await ports_outputs[item_key].get() == k + assert ( + ports_outputs[TypeAdapter(ServicePortKey).validate_python(item_key)].value + == k + ) + assert ( + await ports_outputs[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].get() + == k + ) for k, asd in enumerate(inputs, start=1000): item_key, _, _ = asd - assert ports_inputs[item_key].value == k - assert await ports_inputs[item_key].get() == k + assert ( + ports_inputs[TypeAdapter(ServicePortKey).validate_python(item_key)].value + == k + ) + assert ( + await ports_inputs[ + TypeAdapter(ServicePortKey).validate_python(item_key) + ].get() + == k + ) # test missing key raises error - with pytest.raises(UnboundPortError): - async with ProgressBarData(steps=1) as progress_bar: + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + with pytest.raises(UnboundPortError): await PORTS.set_multiple( - {"missing_key_in_both": (123132, None)}, progress_bar=progress_bar + { + TypeAdapter(ServicePortKey).validate_python( + "missing_key_in_both" + ): (123132, None) + }, + progress_bar=progress_bar, + outputs_callbacks=callbacks, ) - assert progress_bar._continuous_progress_value == pytest.approx(0) + + assert len(spy_outputs_callbaks["finished_succesfully"].call_args_list) == ( + len(port_values) if use_output_callbacks else 0 + ) + assert len(spy_outputs_callbaks["aborted"].call_args_list) == 0 + assert len(spy_outputs_callbaks["finished_with_error"].call_args_list) == 0 diff --git a/packages/simcore-sdk/tests/unit/conftest.py b/packages/simcore-sdk/tests/unit/conftest.py index 896c98f3564..34cd932081c 100644 --- a/packages/simcore-sdk/tests/unit/conftest.py +++ b/packages/simcore-sdk/tests/unit/conftest.py @@ -3,8 +3,9 @@ # pylint:disable=redefined-outer-name import json +from collections.abc import AsyncIterator, Callable from random import randint -from typing import Any, AsyncIterator, Callable, Dict +from typing import Any from uuid import uuid4 import pytest @@ -26,13 +27,13 @@ def node_uuid() -> str: return str(uuid4()) -@pytest.fixture(scope="function") +@pytest.fixture async def mock_db_manager( monkeypatch, project_id: str, node_uuid: str, ) -> AsyncIterator[Callable]: - def _mock_db_manager(port_cfg: Dict[str, Any]) -> DBManager: + def _mock_db_manager(port_cfg: dict[str, Any]) -> DBManager: async def mock_get_ports_configuration_from_node_uuid(*args, **kwargs) -> str: return json.dumps(port_cfg) @@ -57,4 +58,4 @@ async def mock_write_ports_configuration( db_manager = DBManager() return db_manager - yield _mock_db_manager + return _mock_db_manager diff --git a/packages/simcore-sdk/tests/unit/test_node_data_data_manager.py b/packages/simcore-sdk/tests/unit/test_node_data_data_manager.py index 5690a9508a5..bc36b2c2a5e 100644 --- a/packages/simcore-sdk/tests/unit/test_node_data_data_manager.py +++ b/packages/simcore-sdk/tests/unit/test_node_data_data_manager.py @@ -1,17 +1,23 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name # pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +# pylint:disable=unused-variable +from collections.abc import Callable, Iterator from filecmp import cmpfiles from pathlib import Path -from shutil import copy, make_archive, unpack_archive -from typing import Callable, Iterator +from shutil import copy, make_archive import pytest +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from pytest_mock import MockerFixture from servicelib.progress_bar import ProgressBarData +from settings_library.r_clone import RCloneSettings, S3Provider from simcore_sdk.node_data import data_manager from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION +from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB @pytest.fixture @@ -19,7 +25,6 @@ def create_files() -> Iterator[Callable[..., list[Path]]]: created_files = [] def _create_files(number: int, folder: Path) -> list[Path]: - for i in range(number): file_path = folder / f"{i}.test" file_path.write_text(f"I am test file number {i}") @@ -33,95 +38,104 @@ def _create_files(number: int, folder: Path) -> list[Path]: file_path.unlink() +@pytest.fixture +def r_clone_settings(faker: Faker) -> RCloneSettings: + return RCloneSettings.model_validate( + { + "R_CLONE_S3": { + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + }, + "R_CLONE_PROVIDER": S3Provider.MINIO, + } + ) + + +@pytest.fixture +def project_id(project_id: str) -> ProjectID: + return ProjectID(project_id) + + +@pytest.fixture +def node_uuid(node_uuid: str) -> NodeID: + return NodeID(node_uuid) + + async def test_push_folder( user_id: int, - project_id: str, - node_uuid: str, - mocker, + project_id: ProjectID, + node_uuid: NodeID, + mocker: MockerFixture, tmpdir: Path, create_files: Callable[..., list[Path]], + r_clone_settings: RCloneSettings, + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, ): # create some files assert tmpdir.exists() - # create a folder to compress from + # create a folder to upload test_folder = Path(tmpdir) / "test_folder" test_folder.mkdir() assert test_folder.exists() - # create a folder to compress in - test_compression_folder = Path(tmpdir) / "test_compression_folder" - test_compression_folder.mkdir() - assert test_compression_folder.exists() - # mocks mock_filemanager = mocker.patch( "simcore_sdk.node_data.data_manager.filemanager", spec=True ) - mock_filemanager.upload_file.return_value = "" - mock_temporary_directory = mocker.patch( - "simcore_sdk.node_data.data_manager.TemporaryDirectory" - ) - mock_temporary_directory.return_value.__enter__.return_value = ( - test_compression_folder - ) + mock_filemanager.upload_path.return_value = "" files_number = 10 create_files(files_number, test_folder) assert len(list(test_folder.glob("**/*"))) == files_number for file_path in test_folder.glob("**/*"): assert file_path.exists() - async with ProgressBarData(steps=1) as progress_bar: - await data_manager.push( + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await data_manager._push_directory( # noqa: SLF001 user_id, project_id, node_uuid, test_folder, - io_log_redirect_cb=None, + io_log_redirect_cb=mock_io_log_redirect_cb, progress_bar=progress_bar, + r_clone_settings=r_clone_settings, + aws_s3_cli_settings=None, ) - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 - mock_temporary_directory.assert_called_once() - mock_filemanager.upload_file.assert_called_once_with( - file_to_upload=(test_compression_folder / f"{test_folder.stem}.zip"), - r_clone_settings=None, - io_log_redirect_cb=None, - s3_object=f"{project_id}/{node_uuid}/{test_folder.stem}.zip", + mock_filemanager.upload_path.assert_called_once_with( + r_clone_settings=r_clone_settings, + io_log_redirect_cb=mock_io_log_redirect_cb, + path_to_upload=test_folder, + s3_object=f"{project_id}/{node_uuid}/{test_folder.stem}", store_id=SIMCORE_LOCATION, store_name=None, user_id=user_id, - progress_bar=progress_bar._children[0], + progress_bar=progress_bar, + exclude_patterns=None, + aws_s3_cli_settings=None, ) - archive_file = test_compression_folder / f"{test_folder.stem}.zip" - assert archive_file.exists() - - # create control folder - control_folder = Path(tmpdir) / "control_folder" - control_folder.mkdir() - assert control_folder.exists() - unpack_archive(f"{archive_file}", extract_dir=control_folder) - matchs, mismatchs, errors = cmpfiles( - test_folder, control_folder, [x.name for x in test_folder.glob("**/*")] - ) - assert len(matchs) == files_number - assert not mismatchs - assert not errors - async def test_push_file( user_id: int, - project_id: str, - node_uuid: str, + project_id: ProjectID, + node_uuid: NodeID, mocker, tmpdir: Path, create_files: Callable[..., list[Path]], + r_clone_settings: RCloneSettings, + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, ): mock_filemanager = mocker.patch( "simcore_sdk.node_data.data_manager.filemanager", spec=True ) - mock_filemanager.upload_file.return_value = "" + mock_filemanager.upload_path.return_value = "" mock_temporary_directory = mocker.patch( "simcore_sdk.node_data.data_manager.TemporaryDirectory" ) @@ -130,37 +144,45 @@ async def test_push_file( assert file_path.exists() # test push file by file - async with ProgressBarData(steps=1) as progress_bar: - await data_manager.push( + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await data_manager._push_directory( # noqa: SLF001 user_id, project_id, node_uuid, file_path, - io_log_redirect_cb=None, + io_log_redirect_cb=mock_io_log_redirect_cb, progress_bar=progress_bar, + r_clone_settings=r_clone_settings, + aws_s3_cli_settings=None, ) - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 mock_temporary_directory.assert_not_called() - mock_filemanager.upload_file.assert_called_once_with( - r_clone_settings=None, - io_log_redirect_cb=None, - file_to_upload=file_path, + mock_filemanager.upload_path.assert_called_once_with( + r_clone_settings=r_clone_settings, + io_log_redirect_cb=mock_io_log_redirect_cb, + path_to_upload=file_path, s3_object=f"{project_id}/{node_uuid}/{file_path.name}", store_id=SIMCORE_LOCATION, store_name=None, user_id=user_id, progress_bar=progress_bar, + exclude_patterns=None, + aws_s3_cli_settings=None, ) mock_filemanager.reset_mock() -async def test_pull_folder( +@pytest.mark.parametrize("create_legacy_archive", [False, True]) +async def test_pull_legacy_archive( user_id: int, - project_id: str, - node_uuid: str, + project_id: ProjectID, + node_uuid: NodeID, mocker, tmpdir: Path, create_files: Callable[..., list[Path]], + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, + create_legacy_archive: bool, ): assert tmpdir.exists() # create a folder to compress from @@ -180,7 +202,13 @@ async def test_pull_folder( create_files(files_number, test_control_folder) compressed_file_name = test_compression_folder / test_folder.stem archive_file = make_archive( - f"{compressed_file_name}", "zip", root_dir=test_control_folder + ( + f"{compressed_file_name}_legacy" + if create_legacy_archive + else f"{compressed_file_name}" + ), + "zip", + root_dir=test_control_folder, ) assert Path(archive_file).exists() # create mock downloaded folder @@ -193,7 +221,7 @@ async def test_pull_folder( mock_filemanager = mocker.patch( "simcore_sdk.node_data.data_manager.filemanager", spec=True ) - mock_filemanager.download_file_from_s3.return_value = fake_zipped_folder + mock_filemanager.download_path_from_s3.return_value = fake_zipped_folder mock_temporary_directory = mocker.patch( "simcore_sdk.node_data.data_manager.TemporaryDirectory" ) @@ -201,25 +229,30 @@ async def test_pull_folder( test_compression_folder ) - async with ProgressBarData(steps=1) as progress_bar: - await data_manager.pull( + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await data_manager._pull_legacy_archive( # noqa: SLF001 user_id, project_id, node_uuid, test_folder, - io_log_redirect_cb=None, + io_log_redirect_cb=mock_io_log_redirect_cb, progress_bar=progress_bar, + legacy_destination_path=( + Path(f"{test_folder}_legacy") if create_legacy_archive else None + ), ) - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 mock_temporary_directory.assert_called_once() - mock_filemanager.download_file_from_s3.assert_called_once_with( - local_folder=test_compression_folder, - s3_object=f"{project_id}/{node_uuid}/{test_folder.stem}.zip", + mock_filemanager.download_path_from_s3.assert_called_once_with( + user_id=user_id, + local_path=test_compression_folder, + s3_object=f"{project_id}/{node_uuid}/{f'{test_folder.stem}_legacy' if create_legacy_archive else test_folder.stem}.zip", store_id=SIMCORE_LOCATION, store_name=None, - user_id=user_id, - io_log_redirect_cb=None, - progress_bar=progress_bar._children[0], + io_log_redirect_cb=mock_io_log_redirect_cb, + r_clone_settings=None, + progress_bar=progress_bar._children[0], # noqa: SLF001 + aws_s3_cli_settings=None, ) matchs, mismatchs, errors = cmpfiles( @@ -232,13 +265,16 @@ async def test_pull_folder( assert not errors -async def test_pull_file( +async def test_pull_directory( user_id: int, - project_id: str, - node_uuid: str, + project_id: ProjectID, + node_uuid: NodeID, mocker, tmpdir: Path, create_files: Callable[..., list[Path]], + r_clone_settings: RCloneSettings, + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, ): file_path = create_files(1, Path(tmpdir))[0] assert file_path.exists() @@ -246,34 +282,32 @@ async def test_pull_file( fake_download_folder = Path(tmpdir) / "download_folder" fake_download_folder.mkdir() - fake_downloaded_file = fake_download_folder / file_path.name - copy(file_path, fake_downloaded_file) mock_filemanager = mocker.patch( "simcore_sdk.node_data.data_manager.filemanager", spec=True ) - mock_filemanager.download_file_from_s3.return_value = fake_downloaded_file - mock_temporary_directory = mocker.patch( - "simcore_sdk.node_data.data_manager.TemporaryDirectory" - ) + mock_filemanager.download_path_from_s3.return_value = fake_download_folder - async with ProgressBarData(steps=1) as progress_bar: - await data_manager.pull( + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await data_manager._pull_directory( # noqa: SLF001 user_id, project_id, node_uuid, - file_path, - io_log_redirect_cb=None, + fake_download_folder, + io_log_redirect_cb=mock_io_log_redirect_cb, + r_clone_settings=r_clone_settings, progress_bar=progress_bar, + aws_s3_cli_settings=None, ) - assert progress_bar._continuous_progress_value == pytest.approx(1) - mock_temporary_directory.assert_not_called() - mock_filemanager.download_file_from_s3.assert_called_once_with( - local_folder=file_path.parent, - s3_object=f"{project_id}/{node_uuid}/{file_path.name}", + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 + mock_filemanager.download_path_from_s3.assert_called_once_with( + local_path=fake_download_folder, + s3_object=f"{project_id}/{node_uuid}/{fake_download_folder.name}", store_id=SIMCORE_LOCATION, store_name=None, user_id=user_id, - io_log_redirect_cb=None, + io_log_redirect_cb=mock_io_log_redirect_cb, + r_clone_settings=r_clone_settings, progress_bar=progress_bar, + aws_s3_cli_settings=None, ) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_common_file_io_utils.py b/packages/simcore-sdk/tests/unit/test_node_ports_common_file_io_utils.py index 8aa9487eb00..70ad8adbbc7 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_common_file_io_utils.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_common_file_io_utils.py @@ -3,26 +3,31 @@ # pylint: disable=protected-access import asyncio +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable from dataclasses import dataclass from pathlib import Path -from typing import AsyncIterable, AsyncIterator, Awaitable, Callable +from unittest.mock import AsyncMock import pytest from aiobotocore.session import AioBaseClient, get_session from aiohttp import ClientResponse, ClientSession, TCPConnector from aioresponses import aioresponses from faker import Faker -from models_library.api_schemas_storage import ( +from models_library.api_schemas_storage.storage_schemas import ( FileUploadLinks, FileUploadSchema, UploadedPart, ) from moto.server import ThreadedMotoServer -from pydantic import AnyUrl, ByteSize, parse_obj_as +from pydantic import AnyUrl, ByteSize, TypeAdapter +from pytest_mock import MockerFixture +from servicelib.aiohttp import status from servicelib.progress_bar import ProgressBarData +from simcore_sdk.node_ports_common.exceptions import AwsS3BadRequestRequestTimeoutError from simcore_sdk.node_ports_common.file_io_utils import ( - ExtendedClientResponseError, _check_for_aws_http_errors, + _ExtendedClientResponseError, + _process_batch, _raise_for_status, upload_file_to_presigned_links, ) @@ -46,7 +51,7 @@ async def test_raise_for_status( async with client_session.get(A_TEST_ROUTE) as resp: assert isinstance(resp, ClientResponse) - with pytest.raises(ExtendedClientResponseError) as exe_info: + with pytest.raises(_ExtendedClientResponseError) as exe_info: await _raise_for_status(resp) assert "OPSIE there was an error here" in f"{exe_info.value}" @@ -61,21 +66,10 @@ class _TestParams: @pytest.mark.parametrize( "test_params", [ - _TestParams( - will_retry=True, - status_code=400, - body='RequestTimeout' - "Your socket connection to the server was not read from or written to within " - "the timeout period. Idle connections will be closed." - "7EE901348D6C6812" - "FfQE7jdbUt39E6mcQq/" - "ZeNR52ghjv60fccNT4gCE4IranXjsGLG+L6FUyiIxx1tAuXL9xtz2NAY7ZlbzMTm94fhY3TBiCBmf" - "", - ), - _TestParams(will_retry=True, status_code=500), - _TestParams(will_retry=True, status_code=503), - _TestParams(will_retry=False, status_code=400), - _TestParams(will_retry=False, status_code=200), + _TestParams(will_retry=True, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR), + _TestParams(will_retry=True, status_code=status.HTTP_503_SERVICE_UNAVAILABLE), + _TestParams(will_retry=False, status_code=status.HTTP_400_BAD_REQUEST), + _TestParams(will_retry=False, status_code=status.HTTP_200_OK), _TestParams(will_retry=False, status_code=399), ], ) @@ -91,18 +85,76 @@ async def test_check_for_aws_http_errors( async with client_session.get(A_TEST_ROUTE) as resp: try: await _raise_for_status(resp) - except ExtendedClientResponseError as exception: - assert _check_for_aws_http_errors(exception) is test_params.will_retry + except _ExtendedClientResponseError as exception: + assert ( # noqa: PT017 + _check_for_aws_http_errors(exception) is test_params.will_retry + ) + + +async def test_process_batch_captures_400_request_timeout_and_wraps_in_error( + aioresponses_mocker: aioresponses, client_session: ClientSession +): + async def _mock_upload_task() -> None: + body = ( + 'RequestTimeout' + "Your socket connection to the server was not read from or written to within " + "the timeout period. Idle connections will be closed." + "7EE901348D6C6812" + "FfQE7jdbUt39E6mcQq/" + "ZeNR52ghjv60fccNT4gCE4IranXjsGLG+L6FUyiIxx1tAuXL9xtz2NAY7ZlbzMTm94fhY3TBiCBmf" + "" + ) + aioresponses_mocker.get(A_TEST_ROUTE, body=body, status=400) + + async with client_session.get(A_TEST_ROUTE) as resp: + # raises like _session_put does + await _raise_for_status(resp) + + with pytest.raises(AwsS3BadRequestRequestTimeoutError): + await _process_batch( + upload_tasks=[_mock_upload_task()], + max_concurrency=1, + file_name="mock_file", + file_size=1, + file_chunk_size=1, + last_chunk_size=1, + ) + + +async def test_upload_file_to_presigned_links_raises_aws_s3_400_request_time_out_error( + mocker: MockerFixture, + create_upload_links: Callable[[int, ByteSize], Awaitable[FileUploadSchema]], + create_file_of_size: Callable[[ByteSize], Path], + faker: Faker, +): + file_size = ByteSize(1) + upload_links = await create_upload_links(1, file_size) + + mocker.patch( + "simcore_sdk.node_ports_common.file_io_utils._upload_file_part", + side_effect=AwsS3BadRequestRequestTimeoutError(body="nothing"), + ) + + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + with pytest.raises(AwsS3BadRequestRequestTimeoutError): + await upload_file_to_presigned_links( + session=AsyncMock(), + file_upload_links=upload_links, + file_to_upload=create_file_of_size(file_size), + num_retries=0, + io_log_redirect_cb=None, + progress_bar=progress_bar, + ) @pytest.fixture async def aiobotocore_s3_client( - mocked_s3_server: ThreadedMotoServer, + mocked_aws_server: ThreadedMotoServer, ) -> AsyncIterator[AioBaseClient]: session = get_session() async with session.create_client( "s3", - endpoint_url=f"http://{mocked_s3_server._ip_address}:{mocked_s3_server._port}", # pylint: disable=protected-access + endpoint_url=f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # pylint: disable=protected-access aws_secret_access_key="xxx", aws_access_key_id="xxx", ) as client: @@ -128,7 +180,7 @@ async def bucket(aiobotocore_s3_client: AioBaseClient, faker: Faker) -> str: response = await aiobotocore_s3_client.create_bucket(Bucket=faker.pystr()) assert "ResponseMetadata" in response assert "HTTPStatusCode" in response["ResponseMetadata"] - assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 + assert response["ResponseMetadata"]["HTTPStatusCode"] == status.HTTP_200_OK response = await aiobotocore_s3_client.list_buckets() assert response["Buckets"] @@ -145,7 +197,7 @@ def file_id(faker: Faker) -> str: @pytest.fixture async def create_upload_links( - mocked_s3_server: ThreadedMotoServer, + mocked_aws_server: ThreadedMotoServer, aiobotocore_s3_client: AioBaseClient, faker: Faker, bucket: str, @@ -160,8 +212,7 @@ async def _creator(num_upload_links: int, chunk_size: ByteSize) -> FileUploadSch assert "UploadId" in response upload_id = response["UploadId"] - upload_links = parse_obj_as( - list[AnyUrl], + upload_links = TypeAdapter(list[AnyUrl]).validate_python( await asyncio.gather( *[ aiobotocore_s3_client.generate_presigned_url( @@ -182,18 +233,23 @@ async def _creator(num_upload_links: int, chunk_size: ByteSize) -> FileUploadSch chunk_size=chunk_size, urls=upload_links, links=FileUploadLinks( - abort_upload=parse_obj_as(AnyUrl, faker.uri()), - complete_upload=parse_obj_as(AnyUrl, faker.uri()), + abort_upload=TypeAdapter(AnyUrl).validate_python(faker.uri()), + complete_upload=TypeAdapter(AnyUrl).validate_python(faker.uri()), ), ) - yield _creator + return _creator @pytest.mark.skip(reason="this will allow to reproduce an issue") @pytest.mark.parametrize( "file_size,used_chunk_size", - [(parse_obj_as(ByteSize, 21800510238), parse_obj_as(ByteSize, 10485760))], + [ + ( + TypeAdapter(ByteSize).validate_python(21800510238), + TypeAdapter(ByteSize).validate_python(10485760), + ) + ], ) async def test_upload_file_to_presigned_links( client_session: ClientSession, @@ -201,6 +257,7 @@ async def test_upload_file_to_presigned_links( create_file_of_size: Callable[[ByteSize], Path], file_size: ByteSize, used_chunk_size: ByteSize, + faker: Faker, ): """This test is here to reproduce the issue https://github.com/ITISFoundation/osparc-simcore/issues/3531 One theory is that something might be wrong in how the chunking is done and that AWS times out @@ -215,11 +272,13 @@ async def test_upload_file_to_presigned_links( """ local_file = create_file_of_size(file_size) num_links = 2080 - effective_chunk_size = parse_obj_as(ByteSize, local_file.stat().st_size / num_links) + effective_chunk_size = TypeAdapter(ByteSize).validate_python( + local_file.stat().st_size / num_links + ) assert effective_chunk_size <= used_chunk_size upload_links = await create_upload_links(num_links, used_chunk_size) assert len(upload_links.urls) == num_links - async with ProgressBarData(steps=1) as progress_bar: + async with ProgressBarData(num_steps=1, description="") as progress_bar: uploaded_parts: list[UploadedPart] = await upload_file_to_presigned_links( session=client_session, file_upload_links=upload_links, @@ -228,5 +287,5 @@ async def test_upload_file_to_presigned_links( io_log_redirect_cb=None, progress_bar=progress_bar, ) - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 assert uploaded_parts diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_common_r_clone_utils.py b/packages/simcore-sdk/tests/unit/test_node_ports_common_r_clone_utils.py new file mode 100644 index 00000000000..e2d9b890ba5 --- /dev/null +++ b/packages/simcore-sdk/tests/unit/test_node_ports_common_r_clone_utils.py @@ -0,0 +1,75 @@ +import json +from unittest.mock import AsyncMock + +import pytest +from pydantic import TypeAdapter +from simcore_sdk.node_ports_common.r_clone_utils import ( + SyncProgressLogParser, + _RCloneSyncMessageBase, + _RCloneSyncMessages, + _RCloneSyncTransferCompletedMessage, + _RCloneSyncTransferringMessage, + _RCloneSyncUpdatedMessage, +) + + +@pytest.mark.parametrize( + "log_message,expected", + [ + ( + '{"level":"info","msg":"There was nothing to transfer","source":"sync/sync.go:954","time":"2024-09-25T10:18:04.904537+00:00"}', + _RCloneSyncMessageBase, + ), + ( + '{"level":"info","msg":"","object":".hidden_do_not_remove","objectType":"*s3.Object","source":"operations/operations.go:277","time":"2024-09-24T07:11:22.147117+00:00"}', + _RCloneSyncUpdatedMessage, + ), + ( + '{"level":"info","msg":"Copied (new)","object":"README.ipynb","objectType":"*s3.Object","size":5123,"source":"operations/copy.go:360","time":"2024-04-23T14:05:10.408277+00:00"}', + _RCloneSyncTransferCompletedMessage, + ), + ( + json.dumps( + { + "level": "", + "msg": "", + "source": "", + "time": "2024-09-24T07:11:22.147117+00:00", + "object": "str", + } + ), + _RCloneSyncUpdatedMessage, + ), + ( + json.dumps( + { + "level": "", + "msg": "", + "source": "", + "time": "2024-09-24T07:11:22.147117+00:00", + "object": "str", + "size": 1, + } + ), + _RCloneSyncTransferCompletedMessage, + ), + ( + json.dumps( + { + "level": "", + "msg": "", + "source": "", + "time": "2024-09-24T07:11:22.147117+00:00", + "stats": {"bytes": 1, "totalBytes": 1}, + } + ), + _RCloneSyncTransferringMessage, + ), + ], +) +async def test_rclone_stbc_message_parsing_regression(log_message: str, expected: type): + parsed_log = TypeAdapter(_RCloneSyncMessages).validate_json(log_message) + assert isinstance(parsed_log, expected) + + progress_log_parser = SyncProgressLogParser(AsyncMock()) + await progress_log_parser(log_message) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_common_storage_client.py b/packages/simcore-sdk/tests/unit/test_node_ports_common_storage_client.py new file mode 100644 index 00000000000..3f3d722fa07 --- /dev/null +++ b/packages/simcore-sdk/tests/unit/test_node_ports_common_storage_client.py @@ -0,0 +1,78 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import datetime +from collections.abc import AsyncIterable +from typing import Any, Final + +import pytest +from aiohttp import ClientResponseError, ClientSession +from aiohttp.client_exceptions import ClientConnectionError +from aioresponses import aioresponses +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from simcore_sdk.node_ports_common.storage_client import retry_request + +_ROUTE_ALWAYS_200_OK: Final[str] = "http://always-200-ok" + +_MOCK_RESPONSE_BODY: Final[dict[str, Any]] = {"data": "mock_body"} + + +@pytest.fixture +def mock_responses(aioresponses_mocker: aioresponses) -> None: + aioresponses_mocker.get( + _ROUTE_ALWAYS_200_OK, status=200, payload=_MOCK_RESPONSE_BODY + ) + + +@pytest.fixture +def mock_postgres(monkeypatch: pytest.MonkeyPatch) -> None: + setenvs_from_dict( + monkeypatch, + { + "POSTGRES_HOST": "test", + "POSTGRES_USER": "test", + "POSTGRES_PASSWORD": "test", + "POSTGRES_DB": "test", + }, + ) + + +@pytest.fixture +async def session(mock_postgres: None) -> AsyncIterable[ClientSession]: + async with ClientSession() as client_session: + yield client_session + + +async def test_retry_request_ok(mock_responses: None, session: ClientSession): + async with retry_request( + session, "GET", _ROUTE_ALWAYS_200_OK, expected_status=200 + ) as response: + assert response.status == 200 + assert await response.json() == _MOCK_RESPONSE_BODY + + +async def test_retry_request_unexpected_code( + mock_responses: None, session: ClientSession +): + with pytest.raises(ClientResponseError, match="but was expecting"): + async with retry_request( + session, "GET", _ROUTE_ALWAYS_200_OK, expected_status=999 + ): + ... + + +async def test_retry_retries_before_giving_up( + session: ClientSession, caplog: pytest.LogCaptureFixture +): + caplog.clear() + with pytest.raises(ClientConnectionError, match="Cannot connect to host"): + async with retry_request( + session, + "GET", + "http://this-route-does-not-exist.local", + expected_status=200, + give_up_after=datetime.timedelta(seconds=1), + ): + ... + + assert "unexpected error:" in caplog.text diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_links.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_links.py index 5116311ae01..72ba5e76570 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_links.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_links.py @@ -1,4 +1,3 @@ -from typing import Dict from uuid import uuid4 import pytest @@ -23,7 +22,7 @@ def test_valid_port_link(): {"nodeUuid": f"{uuid4()}", "output": "some:key"}, ], ) -def test_invalid_port_link(port_link: Dict[str, str]): +def test_invalid_port_link(port_link: dict[str, str]): with pytest.raises(ValidationError): PortLink(**port_link) @@ -36,7 +35,7 @@ def test_invalid_port_link(port_link: Dict[str, str]): {"label": "some stuff"}, ], ) -def test_invalid_download_link(download_link: Dict[str, str]): +def test_invalid_download_link(download_link: dict[str, str]): with pytest.raises(ValidationError): DownloadLink(**download_link) @@ -49,6 +48,6 @@ def test_invalid_download_link(download_link: Dict[str, str]): {"path": "/somefile/blahblah:"}, ], ) -def test_invalid_file_link(file_link: Dict[str, str]): +def test_invalid_file_link(file_link: dict[str, str]): with pytest.raises(ValidationError): FileLink(**file_link) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py index 6044fdd9739..250f9d2599d 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_nodeports_v2.py @@ -5,9 +5,14 @@ from pathlib import Path from typing import Any, Callable +from unittest.mock import AsyncMock import pytest +from faker import Faker +from pydantic import ValidationError +from pytest_mock import MockFixture from servicelib.progress_bar import ProgressBarData +from simcore_sdk.node_ports_common.filemanager import UploadedFile from simcore_sdk.node_ports_v2 import Nodeports, exceptions, ports from simcore_sdk.node_ports_v2.ports_mapping import InputsList, OutputsList from utils_port_v2 import create_valid_port_mapping @@ -40,18 +45,18 @@ async def mock_save_db_cb(*args, **kwargs): pass async def mock_node_port_creator_cb(*args, **kwargs): - updated_node_ports = Nodeports( + return Nodeports( inputs=updated_inputs, outputs=updated_outputs, db_manager=db_manager, user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=False, ) - return updated_node_ports node_ports = Nodeports( inputs=original_inputs, @@ -60,6 +65,7 @@ async def mock_node_port_creator_cb(*args, **kwargs): user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=auto_update, @@ -81,6 +87,7 @@ async def test_node_ports_accessors( user_id: int, project_id: str, node_uuid: str, + faker: Faker, ): db_manager = mock_db_manager(default_configuration) @@ -91,18 +98,18 @@ async def mock_save_db_cb(*args, **kwargs): pass async def mock_node_port_creator_cb(*args, **kwargs): - updated_node_ports = Nodeports( + return Nodeports( inputs=original_inputs, outputs=original_outputs, db_manager=db_manager, user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=False, ) - return updated_node_ports node_ports = Nodeports( inputs=original_inputs, @@ -111,6 +118,7 @@ async def mock_node_port_creator_cb(*args, **kwargs): user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=False, @@ -121,14 +129,14 @@ async def mock_node_port_creator_cb(*args, **kwargs): await node_ports.set(port.key, port.value) with pytest.raises(exceptions.UnboundPortError): - await node_ports.get("some_invalid_key") + await node_ports.get("some_invalid_key") # type: ignore for port in original_outputs.values(): assert await node_ports.get(port.key) == port.value await node_ports.set(port.key, port.value) # test batch add - async with ProgressBarData(steps=1) as progress_bar: + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: await node_ports.set_multiple( { port.key: (port.value, None) @@ -136,8 +144,9 @@ async def mock_node_port_creator_cb(*args, **kwargs): + list(original_outputs.values()) }, progress_bar=progress_bar, + outputs_callbacks=AsyncMock(), ) - assert progress_bar._continuous_progress_value == pytest.approx(1) + assert progress_bar._current_steps == pytest.approx(1) # noqa: SLF001 @pytest.fixture(scope="session") @@ -146,13 +155,12 @@ def e_tag() -> str: @pytest.fixture -async def mock_upload_file(mocker, e_tag): - mock = mocker.patch( - "simcore_sdk.node_ports_common.filemanager.upload_file", - return_value=(0, e_tag), +async def mock_upload_path(mocker: MockFixture, e_tag: str) -> MockFixture: + return mocker.patch( + "simcore_sdk.node_ports_common.filemanager.upload_path", + return_value=UploadedFile(0, e_tag), autospec=True, ) - yield mock async def test_node_ports_set_file_by_keymap( @@ -161,7 +169,7 @@ async def test_node_ports_set_file_by_keymap( user_id: int, project_id: str, node_uuid: str, - mock_upload_file, + mock_upload_path: MockFixture, ): db_manager = mock_db_manager(default_configuration) @@ -174,18 +182,18 @@ async def mock_save_db_cb(*args, **kwargs): pass async def mock_node_port_creator_cb(*args, **kwargs): - updated_node_ports = Nodeports( + return Nodeports( inputs=original_inputs, outputs=original_outputs, db_manager=db_manager, user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=False, ) - return updated_node_ports node_ports = Nodeports( inputs=original_inputs, @@ -194,6 +202,7 @@ async def mock_node_port_creator_cb(*args, **kwargs): user_id=user_id, project_id=project_id, node_uuid=node_uuid, + io_log_redirect_cb=None, save_to_db_cb=mock_save_db_cb, node_port_creator_cb=mock_node_port_creator_cb, auto_update=False, @@ -215,3 +224,55 @@ async def test_node_ports_v2_packages( db_manager = mock_db_manager(default_configuration) node_ports = await ports(user_id, project_id, node_uuid) node_ports = await ports(user_id, project_id, node_uuid, db_manager=db_manager) + + +@pytest.fixture +def mock_port_set(mocker: MockFixture) -> None: + async def _always_raise_error(*args, **kwargs): + raise ValidationError.from_exception_data(title="Just a test", line_errors=[]) + + mocker.patch( + "simcore_sdk.node_ports_v2.port.Port._set", side_effect=_always_raise_error + ) + + +async def test_node_ports_v2_set_multiple_catch_multiple_failing_set_ports( + mock_port_set: None, + mock_db_manager: Callable, + default_configuration: dict[str, Any], + user_id: int, + project_id: str, + node_uuid: str, + faker: Faker, +): + db_manager = mock_db_manager(default_configuration) + + original_inputs = create_valid_port_mapping(InputsList, suffix="original") + original_outputs = create_valid_port_mapping(OutputsList, suffix="original") + + async def _mock_callback(*args, **kwargs): + pass + + node_ports = Nodeports( + inputs=original_inputs, + outputs=original_outputs, + db_manager=db_manager, + user_id=user_id, + project_id=project_id, + node_uuid=node_uuid, + io_log_redirect_cb=None, + save_to_db_cb=_mock_callback, + node_port_creator_cb=_mock_callback, + auto_update=False, + ) + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + with pytest.raises(ValidationError): + await node_ports.set_multiple( + { + port.key: (port.value, None) + for port in list(original_inputs.values()) + + list(original_outputs.values()) + }, + progress_bar=progress_bar, + outputs_callbacks=None, + ) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py index 4de298391ae..6817d788faa 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py @@ -1,28 +1,33 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name # pylint:disable=no-member # pylint:disable=protected-access +# pylint:disable=redefined-outer-name # pylint:disable=too-many-arguments +# pylint:disable=too-many-positional-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable import os import re import shutil import tempfile -import threading +from collections.abc import Callable, Iterator +from dataclasses import dataclass from pathlib import Path -from typing import Any, Callable, Iterator, NamedTuple, Optional, Union +from typing import Any, NamedTuple from unittest.mock import AsyncMock import pytest from aiohttp.client import ClientSession -from attr import dataclass +from aioresponses import aioresponses as AioResponsesMock +from faker import Faker +from models_library.api_schemas_storage.storage_schemas import FileMetaDataGet from models_library.projects_nodes_io import LocationID -from pydantic.error_wrappers import ValidationError +from pydantic import TypeAdapter, ValidationError from pytest_mock.plugin import MockerFixture from servicelib.progress_bar import ProgressBarData from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB +from simcore_sdk.node_ports_common.filemanager import UploadedFile from simcore_sdk.node_ports_v2 import exceptions from simcore_sdk.node_ports_v2.links import ( DataItemValue, @@ -33,7 +38,7 @@ ) from simcore_sdk.node_ports_v2.port import Port from simcore_sdk.node_ports_v2.ports_mapping import InputsList, OutputsList -from utils_port_v2 import create_valid_port_config +from utils_port_v2 import CONSTANT_UUID, create_valid_port_config from yarl import URL @@ -51,7 +56,7 @@ def another_node_file_name() -> Path: def download_file_folder_name() -> Path: - return Path(tempfile.gettempdir(), "simcorefiles", f"{threading.get_ident()}") + return Path(tempfile.gettempdir()) / "simcorefiles" / f"{CONSTANT_UUID}" def project_id() -> str: @@ -116,12 +121,12 @@ def file_with_data() -> Iterator[Path]: @pytest.fixture( params=[ - pytest.lazy_fixture("symlink_to_file_with_data"), - pytest.lazy_fixture("file_with_data"), + "symlink_to_file_with_data", + "file_with_data", ] ) -def this_node_file(request) -> Iterator[Path]: - yield request.param +def this_node_file(request: pytest.FixtureRequest) -> Path: + return request.getfixturevalue(request.param) @pytest.fixture @@ -135,7 +140,7 @@ def another_node_file() -> Iterator[Path]: @pytest.fixture -def download_file_folder() -> Iterator[Path]: +def download_file_folder(constant_uuid4: None) -> Iterator[Path]: destination_path = download_file_folder_name() destination_path.mkdir(parents=True, exist_ok=True) yield destination_path @@ -146,21 +151,24 @@ def download_file_folder() -> Iterator[Path]: @pytest.fixture(scope="module", name="project_id") def project_id_fixture() -> str: """NOTE: since pytest does not allow to use fixtures inside parametrizations, - this trick allows to re-use the same function in a fixture with a same "fixture" name""" + this trick allows to re-use the same function in a fixture with a same "fixture" name + """ return project_id() @pytest.fixture(scope="module", name="node_uuid") def node_uuid_fixture() -> str: """NOTE: since pytest does not allow to use fixtures inside parametrizations, - this trick allows to re-use the same function in a fixture with a same "fixture" name""" + this trick allows to re-use the same function in a fixture with a same "fixture" name + """ return node_uuid() @pytest.fixture(scope="module", name="user_id") def user_id_fixture() -> int: """NOTE: since pytest does not allow to use fixtures inside parametrizations, - this trick allows to re-use the same function in a fixture with a same "fixture" name""" + this trick allows to re-use the same function in a fixture with a same "fixture" name + """ return user_id() @@ -171,14 +179,14 @@ async def mock_download_file( project_id: str, node_uuid: str, download_file_folder: Path, -): +) -> None: async def mock_download_file_from_link( download_link: URL, local_folder: Path, *, - io_log_redirect_cb: Optional[LogRedirectCB], - file_name: Optional[str] = None, - client_session: Optional[ClientSession] = None, + io_log_redirect_cb: LogRedirectCB | None, + file_name: str | None = None, + client_session: ClientSession | None = None, progress_bar: ProgressBarData, ) -> Path: assert io_log_redirect_cb @@ -207,35 +215,40 @@ def e_tag_fixture() -> str: @pytest.fixture -async def mock_upload_file(mocker, e_tag): - mock = mocker.patch( - "simcore_sdk.node_ports_common.filemanager.upload_file", - return_value=(simcore_store_id(), e_tag), +async def mock_filemanager(mocker: MockerFixture, e_tag: str, faker: Faker) -> None: + mocker.patch( + "simcore_sdk.node_ports_common.filemanager._get_file_meta_data", + return_value=TypeAdapter(FileMetaDataGet).validate_python( + FileMetaDataGet.model_json_schema()["examples"][0], + ), + ) + mocker.patch( + "simcore_sdk.node_ports_common.filemanager.upload_path", + return_value=UploadedFile(simcore_store_id(), e_tag), ) - yield mock @pytest.fixture def common_fixtures( - storage_v0_service_mock, - mock_download_file, - mock_upload_file, + storage_v0_service_mock: AioResponsesMock, + mock_download_file: None, + mock_filemanager: None, this_node_file: Path, another_node_file: Path, download_file_folder: Path, -): +) -> None: """this module main fixture""" class PortParams(NamedTuple): - port_cfg: Union[InputsList, OutputsList] - exp_value_type: Union[Callable, tuple[Callable, ...]] + port_cfg: InputsList | OutputsList + exp_value_type: Callable | tuple[Callable, ...] exp_value_converter: type[ItemConcreteValue] - exp_value: Union[DataItemValue, None] - exp_get_value: Union[int, float, bool, str, Path, None] - new_value: Union[int, float, bool, str, Path, None] - exp_new_value: Union[int, float, bool, str, Path, FileLink, None] - exp_new_get_value: Union[int, float, bool, str, Path, None] + exp_value: DataItemValue | None + exp_get_value: int | float | bool | str | Path | None + new_value: int | float | bool | str | Path | None + exp_new_value: int | float | bool | str | Path | FileLink | None + exp_new_get_value: int | float | bool | str | Path | None @pytest.mark.parametrize( @@ -582,13 +595,13 @@ async def test_valid_port( project_id: str, node_uuid: str, port_cfg: dict[str, Any], - exp_value_type: type[Union[int, float, bool, str, Path]], - exp_value_converter: type[Union[int, float, bool, str, Path]], - exp_value: Union[int, float, bool, str, Path, FileLink, DownloadLink, PortLink], - exp_get_value: Union[int, float, bool, str, Path], - new_value: Union[int, float, bool, str, Path], - exp_new_value: Union[int, float, bool, str, Path, FileLink], - exp_new_get_value: Union[int, float, bool, str, Path], + exp_value_type: type[int | float | bool | str | Path], + exp_value_converter: type[int | float | bool | str | Path], + exp_value: int | float | bool | str | Path | FileLink | DownloadLink | PortLink, + exp_get_value: int | float | bool | str | Path, + new_value: int | float | bool | str | Path, + exp_new_value: int | float | bool | str | Path | FileLink, + exp_new_get_value: int | float | bool | str | Path, another_node_file: Path, ): async def _io_log_redirect_cb(logs: str) -> None: @@ -599,11 +612,12 @@ class FakeNodePorts: user_id: int project_id: str node_uuid: str - r_clone_settings: Optional[Any] = None - io_log_redirect_cb: Optional[LogRedirectCB] = _io_log_redirect_cb + r_clone_settings: Any | None = None + io_log_redirect_cb: LogRedirectCB | None = _io_log_redirect_cb + aws_s3_cli_settings: Any | None = None @staticmethod - async def get(key: str, progress_bar: Optional[ProgressBarData] = None): + async def get(key: str, progress_bar: ProgressBarData | None = None): # this gets called when a node links to another node we return the get value but for files it needs to be a real one return ( another_node_file @@ -640,7 +654,6 @@ async def save_to_db_cb(node_ports): assert v == getattr(port, camel_key) # check payload - assert port._py_value_type == exp_value_type assert port._py_value_converter == exp_value_converter assert port.value == exp_value diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_mapping.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_mapping.py index 10c074591fc..4926a9f4123 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_mapping.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_mapping.py @@ -4,20 +4,21 @@ from collections import deque from pprint import pprint -from typing import Any, Dict, List, Type, Union +from typing import Any import pytest from models_library.services import ServiceInput -from pydantic import ValidationError, confloat, schema_of +from pydantic import Field, TypeAdapter, ValidationError from simcore_sdk.node_ports_v2 import exceptions from simcore_sdk.node_ports_v2.port import Port from simcore_sdk.node_ports_v2.ports_mapping import InputsList, OutputsList +from typing_extensions import Annotated from utils_port_v2 import create_valid_port_config @pytest.mark.parametrize("port_class", [InputsList, OutputsList]) -def test_empty_ports_mapping(port_class: Type[Union[InputsList, OutputsList]]): - port_mapping = port_class(__root__={}) +def test_empty_ports_mapping(port_class: type[InputsList | OutputsList]): + port_mapping = port_class(root={}) assert not port_mapping.items() assert not port_mapping.values() assert not port_mapping.keys() @@ -28,17 +29,17 @@ def test_empty_ports_mapping(port_class: Type[Union[InputsList, OutputsList]]): @pytest.mark.parametrize("port_class", [InputsList, OutputsList]) -def test_filled_ports_mapping(port_class: Type[Union[InputsList, OutputsList]]): - port_cfgs: Dict[str, Any] = {} +def test_filled_ports_mapping(port_class: type[InputsList | OutputsList]): + port_cfgs: dict[str, Any] = {} for t in ["integer", "number", "boolean", "string"]: port = create_valid_port_config(t) port_cfgs[port["key"]] = port port_cfgs["some_file"] = create_valid_port_config("data:*/*", key="some_file") - port_mapping = port_class(__root__=port_cfgs) + port_mapping = port_class(root=port_cfgs) # two ways to construct instances of __root__ - assert port_class.parse_obj(port_cfgs) == port_mapping + assert port_class.model_validate(port_cfgs) == port_mapping assert len(port_mapping) == len(port_cfgs) for port_key, port_value in port_mapping.items(): @@ -60,8 +61,8 @@ def test_filled_ports_mapping(port_class: Type[Union[InputsList, OutputsList]]): def test_io_ports_are_not_aliases(): # prevents creating alises as InputsList = PortsMappings - inputs = InputsList(__root__={}) - outputs = OutputsList(__root__={}) + inputs = InputsList(root={}) + outputs = OutputsList(root={}) assert isinstance(inputs, InputsList) assert not isinstance(inputs, OutputsList) @@ -71,22 +72,23 @@ def test_io_ports_are_not_aliases(): @pytest.fixture -def fake_port_meta() -> Dict[str, Any]: +def fake_port_meta() -> dict[str, Any]: """Service port metadata: defines a list of non-negative numbers""" - schema = schema_of( - List[confloat(ge=0)], - title="list[non-negative number]", - ) + schema = { + **TypeAdapter(list[Annotated[float, Field(ge=0)]]).json_schema(), + "title": "list[non-negative number]", + } + schema.update( description="Port with an array of numbers", x_unit="millimeter", ) port_model = ServiceInput.from_json_schema(port_schema=schema) - return port_model.dict(exclude_unset=True, by_alias=True) + return port_model.model_dump(exclude_unset=True, by_alias=True) -def test_validate_port_value_against_schema(fake_port_meta: Dict[str, Any]): +def test_validate_port_value_against_schema(fake_port_meta: dict[str, Any]): # A simcore-sdk Port instance is a combination of both # - the port's metadata # - the port's value @@ -109,19 +111,19 @@ def test_validate_port_value_against_schema(fake_port_meta: Dict[str, Any]): assert error["loc"] == ("value",) assert "-2 is less than the minimum of 0" in error["msg"] - assert error["type"] == "value_error.port_validation.schema_error" + assert error["type"] == "value_error" assert "ctx" in error - assert error["ctx"]["port_key"] == "port_1" + assert error["ctx"]["error"].port_key == "port_1" - schema_error_message = error["ctx"]["schema_error_message"] - schema_error_path = error["ctx"]["schema_error_path"] + schema_error_message = error["ctx"]["error"].schema_error_message + schema_error_path = error["ctx"]["error"].schema_error_path assert schema_error_message in error["msg"] assert schema_error_path == deque([1]) -def test_validate_iolist_against_schema(fake_port_meta: Dict[str, Any]): +def test_validate_iolist_against_schema(fake_port_meta: dict[str, Any]): # Check how errors propagate from a single Port to InputsList # reference port @@ -151,7 +153,7 @@ def test_validate_iolist_against_schema(fake_port_meta: Dict[str, Any]): # ---- with pytest.raises(ValidationError) as err_info: - InputsList.parse_obj({p["key"]: p for p in ports}) + InputsList.model_validate({p["key"]: p for p in ports}) # --- assert isinstance(err_info.value, ValidationError) @@ -161,14 +163,13 @@ def test_validate_iolist_against_schema(fake_port_meta: Dict[str, Any]): for error in err_info.value.errors(): error_loc = error["loc"] assert "ctx" in error - port_key = error["ctx"].get("port_key") + port_key = error["ctx"]["error"].port_key # path hierachy - assert error_loc[0] == "__root__", f"{error_loc=}" - assert error_loc[1] == port_key, f"{error_loc=}" - assert error_loc[-1] == "value", f"{error_loc=}" + assert error_loc[0] == port_key, f"{error_loc=}" + assert error_loc[1] == "value", f"{error_loc=}" - assert error["type"] == "value_error.port_validation.schema_error" + assert error["type"] == "value_error" port_with_errors.append(port_key) pprint(error) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_validation.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_validation.py index c4c7ad1c9f9..e6bcf71f323 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_validation.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_port_validation.py @@ -13,13 +13,13 @@ from unittest.mock import AsyncMock import pytest -from pydantic import BaseModel, conint, schema_of -from pydantic.error_wrappers import ValidationError +from pydantic import BaseModel, Field, TypeAdapter, ValidationError from simcore_sdk.node_ports_v2.port import Port from simcore_sdk.node_ports_v2.port_validation import ( PortUnitError, validate_port_content, ) +from typing_extensions import Annotated def _replace_value_in_dict(item: Any, original_schema: dict[str, Any]): @@ -79,7 +79,6 @@ def test_validate_port_content(): def test_validate_port_content_fails(): - with pytest.raises(PortUnitError) as err_info: value, unit = validate_port_content( "port_1", @@ -129,12 +128,17 @@ async def test_port_with_array_of_object(mocker): mocker.patch.object(Port, "_node_ports", new=AsyncMock()) class A(BaseModel): - i: conint(gt=3) + i: Annotated[int, Field(gt=3)] b: bool = False s: str - l: list[int] + t: list[int] - content_schema = _resolve_refs(schema_of(list[A], title="array[A]")) + content_schema = _resolve_refs( + { + **TypeAdapter(list[A]).json_schema(), + "title": "array[A]", + } + ) port_meta = { "label": "array_", @@ -142,8 +146,8 @@ class A(BaseModel): "type": "ref_contentSchema", "contentSchema": content_schema, } - sample = [{"i": 5, "s": "x", "l": [1, 2]}, {"i": 6, "s": "y", "l": [2]}] - expected_value = [A(**i).dict() for i in sample] + sample = [{"i": 5, "s": "x", "t": [1, 2]}, {"i": 6, "s": "y", "t": [2]}] + expected_value = [A(**i).model_dump() for i in sample] print(json.dumps(port_meta, indent=1)) print(json.dumps(expected_value, indent=1)) @@ -245,7 +249,7 @@ async def test_port_with_units_and_constraints(mocker): print(validation_error) assert validation_error["loc"] == ("value",) # starts with value,! - assert validation_error["type"] == "value_error.port_validation.schema_error" + assert validation_error["type"] == "value_error" assert "-3.14 is less than the minimum of 0" in validation_error["msg"] # inits with None + set_value @@ -257,8 +261,6 @@ async def test_port_with_units_and_constraints(mocker): with pytest.raises(ValidationError) as exc_info: await port.set_value(-3.14) - assert exc_info.value.errors()[0] == validation_error - def test_incident__port_validator_check_value(): # SEE incident https://git.speag.com/oSparc/e2e-testing/-/issues/1) diff --git a/packages/simcore-sdk/tests/unit/test_node_ports_v2_r_clone.py b/packages/simcore-sdk/tests/unit/test_node_ports_v2_r_clone.py index e91edb09326..181813559fb 100644 --- a/packages/simcore-sdk/tests/unit/test_node_ports_v2_r_clone.py +++ b/packages/simcore-sdk/tests/unit/test_node_ports_v2_r_clone.py @@ -4,12 +4,10 @@ import subprocess from pathlib import Path -from typing import Iterable, Optional from unittest.mock import Mock import pytest from faker import Faker -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture from settings_library.r_clone import S3Provider from simcore_sdk.node_ports_common import r_clone @@ -23,78 +21,193 @@ def s3_provider(request) -> S3Provider: @pytest.fixture def r_clone_settings( - monkeypatch: MonkeyPatch, s3_provider: S3Provider + monkeypatch: pytest.MonkeyPatch, s3_provider: S3Provider, faker: Faker ) -> RCloneSettings: monkeypatch.setenv("R_CLONE_PROVIDER", s3_provider.value) - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) return RCloneSettings.create_from_envs() @pytest.fixture -def skip_if_r_clone_is_missing() -> None: +def skip_if_r_clone_is_missing() -> None: # noqa: PT004 try: - subprocess.check_output(["rclone", "--version"]) + subprocess.check_output(["rclone", "--version"]) # noqa: S603, S607 except Exception: # pylint: disable=broad-except pytest.skip("rclone is not installed") @pytest.fixture -def mock_async_command(mocker: MockerFixture) -> Iterable[Mock]: +def mock_async_r_clone_command(mocker: MockerFixture) -> Mock: mock = Mock() - original_async_command = r_clone._async_command + original_async_command = r_clone._async_r_clone_command # noqa: SLF001 - async def _mock_async_command(*cmd: str, cwd: Optional[str] = None) -> str: + async def _mock_async_command(*cmd: str, cwd: str | None = None) -> str: mock() return await original_async_command(*cmd, cwd=cwd) mocker.patch( - "simcore_sdk.node_ports_common.r_clone._async_command", + "simcore_sdk.node_ports_common.r_clone._async_r_clone_command", side_effect=_mock_async_command, ) - yield mock + return mock async def test_is_r_clone_available_cached( r_clone_settings: RCloneSettings, - mock_async_command: Mock, + mock_async_r_clone_command: Mock, skip_if_r_clone_is_missing: None, ) -> None: for _ in range(3): result = await r_clone.is_r_clone_available(r_clone_settings) assert type(result) is bool - assert mock_async_command.call_count == 1 + assert mock_async_r_clone_command.call_count == 1 assert await r_clone.is_r_clone_available(None) is False async def test__config_file(faker: Faker) -> None: text_to_write = faker.text() - async with r_clone._config_file(text_to_write) as file_name: + async with r_clone._config_file(text_to_write) as file_name: # noqa: SLF001 assert text_to_write == Path(file_name).read_text() assert Path(file_name).exists() is False async def test__async_command_ok() -> None: - await r_clone._async_command("ls", "-la") + result = await r_clone._async_r_clone_command("ls", "-la") # noqa: SLF001 + assert len(result) > 0 @pytest.mark.parametrize( - "cmd", + "cmd, exit_code, output", [ - ("__i_do_not_exist__",), - ("ls_", "-lah"), + ( + ["__i_do_not_exist__"], + 127, + "/bin/sh: 1: __i_do_not_exist__: not found", + ), + ( + ["ls_", "-lah"], + 127, + "/bin/sh: 1: ls_: not found", + ), + ( + ["echo", "this command will fail", "&&", "false"], + 1, + "this command will fail", + ), ], ) -async def test__async_command_error(cmd: list[str]) -> None: +async def test__async_command_error( + cmd: list[str], exit_code: int, output: str +) -> None: with pytest.raises(r_clone.RCloneFailedError) as exe_info: - await r_clone._async_command(*cmd) + await r_clone._async_r_clone_command(*cmd) # noqa: SLF001 assert ( f"{exe_info.value}" - == f"Command {' '.join(cmd)} finished with exception:\n/bin/sh: 1: {cmd[0]}: not found\n" + == f"Command {' '.join(cmd)} finished with exit code={exit_code}:\n{output}\n" ) + + +@pytest.fixture +def exclude_patterns_validation_dir(tmp_path: Path, faker: Faker) -> Path: + """Directory with well known structure""" + base_dir = tmp_path / "exclude_patterns_validation_dir" + base_dir.mkdir() + (base_dir / "empty").mkdir() + (base_dir / "d1").mkdir() + (base_dir / "d1" / "f1").write_text(faker.text()) + (base_dir / "d1" / "f2.txt").write_text(faker.text()) + (base_dir / "d1" / "sd1").mkdir() + (base_dir / "d1" / "sd1" / "f1").write_text(faker.text()) + (base_dir / "d1" / "sd1" / "f2.txt").write_text(faker.text()) + + return base_dir + + +EMPTY_SET: set[Path] = set() +ALL_ITEMS_SET: set[Path] = { + Path("d1/f2.txt"), + Path("d1/f1"), + Path("d1/sd1/f1"), + Path("d1/sd1/f2.txt"), +} + + +# + /exclude_patterns_validation_dir +# + empty +# + d1 +# - f2.txt +# + sd1 +# - f2.txt +# - f1 +# - f1 +@pytest.mark.parametrize( + "exclude_patterns, expected_result", + [ + pytest.param({"/d1*"}, EMPTY_SET), + pytest.param( + {"/d1/sd1*"}, + { + Path("d1/f2.txt"), + Path("d1/f1"), + }, + ), + pytest.param( + {"d1*"}, + EMPTY_SET, + ), + pytest.param( + {"*d1*"}, + EMPTY_SET, + ), + pytest.param( + {"*.txt"}, + {Path("d1/f1"), Path("d1/sd1/f1")}, + ), + pytest.param( + {"/absolute/path/does/not/exist*"}, + ALL_ITEMS_SET, + ), + pytest.param( + {"/../../this/is/ignored*"}, + ALL_ITEMS_SET, + ), + pytest.param( + {"*relative/path/does/not/exist"}, + ALL_ITEMS_SET, + ), + pytest.param( + None, + ALL_ITEMS_SET, + ), + ], +) +async def test__get_exclude_filter( + skip_if_r_clone_is_missing: None, + exclude_patterns_validation_dir: Path, + exclude_patterns: set[str] | None, + expected_result: set[Path], +): + command: list[str] = [ + "rclone", + "--quiet", + "--dry-run", + "--links", + *r_clone._get_exclude_filters(exclude_patterns), # noqa: SLF001 + "lsf", + "--absolute", + "--files-only", + "--recursive", + f"{exclude_patterns_validation_dir}", + ] + ls_result = await r_clone._async_r_clone_command(*command) # noqa: SLF001 + relative_files_paths: set[Path] = { + Path(x.lstrip("/")) for x in ls_result.split("\n") if x + } + assert relative_files_paths == expected_result diff --git a/packages/simcore-sdk/tests/unit/test_storage_client.py b/packages/simcore-sdk/tests/unit/test_storage_client.py index 1fab53e7f6b..feb61ed2042 100644 --- a/packages/simcore-sdk/tests/unit/test_storage_client.py +++ b/packages/simcore-sdk/tests/unit/test_storage_client.py @@ -4,15 +4,15 @@ # pylint:disable=too-many-arguments import re -from typing import AsyncIterator +from collections.abc import AsyncIterator, Iterable +from typing import Final from uuid import uuid4 import aiohttp import pytest -from aiohttp import web from aioresponses import aioresponses as AioResponsesMock from faker import Faker -from models_library.api_schemas_storage import ( +from models_library.api_schemas_storage.storage_schemas import ( FileLocationArray, FileMetaDataGet, FileUploadSchema, @@ -20,28 +20,62 @@ ) from models_library.projects_nodes_io import SimcoreS3FileID from models_library.users import UserID -from pydantic import ByteSize -from pydantic.networks import AnyUrl +from pydantic import AnyUrl, ByteSize, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.aiohttp import status from simcore_sdk.node_ports_common import exceptions +from simcore_sdk.node_ports_common._filemanager_utils import ( + _get_https_link_if_storage_secure, +) from simcore_sdk.node_ports_common.storage_client import ( LinkType, delete_file, get_download_file_link, get_file_metadata, - get_storage_locations, get_upload_file_links, list_file_metadata, + list_storage_locations, +) +from simcore_sdk.node_ports_common.storage_endpoint import ( + get_base_url, + get_basic_auth, + is_storage_secure, ) +def _clear_caches(): + get_base_url.cache_clear() + get_basic_auth.cache_clear() + + +@pytest.fixture +def clear_caches() -> Iterable[None]: + _clear_caches() + yield + _clear_caches() + + +@pytest.fixture() +def mock_postgres(monkeypatch: pytest.MonkeyPatch, faker: Faker) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "POSTGRES_HOST": faker.pystr(), + "POSTGRES_USER": faker.user_name(), + "POSTGRES_PASSWORD": faker.password(), + "POSTGRES_DB": faker.pystr(), + }, + ) + + @pytest.fixture() -def mock_environment(monkeypatch: pytest.MonkeyPatch, faker: Faker): - monkeypatch.setenv("STORAGE_HOST", "fake_storage") - monkeypatch.setenv("STORAGE_PORT", "1535") - monkeypatch.setenv("POSTGRES_HOST", faker.pystr()) - monkeypatch.setenv("POSTGRES_USER", faker.user_name()) - monkeypatch.setenv("POSTGRES_PASSWORD", faker.password()) - monkeypatch.setenv("POSTGRES_DB", faker.pystr()) +def mock_environment( + mock_postgres: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + {"STORAGE_HOST": "fake_storage", "STORAGE_PORT": "1535", **mock_postgres}, + ) @pytest.fixture() @@ -60,13 +94,14 @@ async def session() -> AsyncIterator[aiohttp.ClientSession]: yield session -async def test_get_storage_locations( - session: aiohttp.ClientSession, - mock_environment: None, +async def test_list_storage_locations( + clear_caches: None, storage_v0_service_mock: AioResponsesMock, + mock_postgres: EnvVarsDict, + session: aiohttp.ClientSession, user_id: UserID, ): - result = await get_storage_locations(session=session, user_id=user_id) + result = await list_storage_locations(session=session, user_id=user_id) assert isinstance(result, FileLocationArray) # type: ignore assert len(result) == 1 @@ -79,7 +114,8 @@ async def test_get_storage_locations( [(LinkType.PRESIGNED, ("http", "https")), (LinkType.S3, ("s3", "s3a"))], ) async def test_get_download_file_link( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -104,7 +140,8 @@ async def test_get_download_file_link( [(LinkType.PRESIGNED, ("http", "https")), (LinkType.S3, ("s3", "s3a"))], ) async def test_get_upload_file_links( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -112,6 +149,7 @@ async def test_get_upload_file_links( location_id: LocationID, link_type: LinkType, expected_scheme: tuple[str], + faker: Faker, ): file_upload_links = await get_upload_file_links( session=session, @@ -120,13 +158,16 @@ async def test_get_upload_file_links( user_id=user_id, link_type=link_type, file_size=ByteSize(0), + is_directory=False, + sha256_checksum=faker.sha256(), ) assert isinstance(file_upload_links, FileUploadSchema) assert file_upload_links.urls[0].scheme in expected_scheme async def test_get_file_metada( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -137,8 +178,8 @@ async def test_get_file_metada( session=session, file_id=file_id, location_id=location_id, user_id=user_id ) assert file_metadata - assert file_metadata == FileMetaDataGet.parse_obj( - FileMetaDataGet.Config.schema_extra["examples"][0] + assert file_metadata == FileMetaDataGet.model_validate( + FileMetaDataGet.model_json_schema()["examples"][0] ) @@ -151,10 +192,12 @@ def storage_v0_service_mock_get_file_meta_data_not_found( r"^http://[a-z\-_]*storage:[0-9]+/v0/locations/[0-9]+/files/.+/metadata.+$" ) if request.param == "version1": - # NOTE: the old storage service did not consider using a 404 for when file is not found + # + # WARNING: this is a LEGACY test. Do not modify this response. + # - The old storage service did not consider using a 404 for when file is not found aioresponses_mocker.get( get_file_metadata_pattern, - status=web.HTTPOk.status_code, + status=status.HTTP_200_OK, payload={"error": "No result found", "data": {}}, repeat=True, ) @@ -162,14 +205,15 @@ def storage_v0_service_mock_get_file_meta_data_not_found( # NOTE: the new storage service shall do it right one day and we shall be prepared aioresponses_mocker.get( get_file_metadata_pattern, - status=web.HTTPNotFound.status_code, + status=status.HTTP_404_NOT_FOUND, repeat=True, ) return aioresponses_mocker async def test_get_file_metada_invalid_s3_path( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock_get_file_meta_data_not_found: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -186,7 +230,8 @@ async def test_get_file_metada_invalid_s3_path( async def test_list_file_metadata( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -200,7 +245,8 @@ async def test_list_file_metadata( async def test_delete_file( - mock_environment: None, + clear_caches: None, + mock_environment: EnvVarsDict, storage_v0_service_mock: AioResponsesMock, session: aiohttp.ClientSession, user_id: UserID, @@ -210,3 +256,148 @@ async def test_delete_file( await delete_file( session=session, file_id=file_id, location_id=location_id, user_id=user_id ) + + +@pytest.mark.parametrize( + "envs, expected_base_url", + [ + pytest.param( + { + "NODE_PORTS_STORAGE_AUTH": ( + '{"STORAGE_USERNAME": "user", ' + '"STORAGE_PASSWORD": "passwd", ' + '"STORAGE_HOST": "host", ' + '"STORAGE_PORT": "42"}' + ) + }, + "http://host:42/v0", + id="json-no-auth", + ), + pytest.param( + { + "STORAGE_USERNAME": "user", + "STORAGE_PASSWORD": "passwd", + "STORAGE_HOST": "host", + "STORAGE_PORT": "42", + }, + "http://host:42/v0", + id="single-vars+auth", + ), + pytest.param( + { + "NODE_PORTS_STORAGE_AUTH": ( + '{"STORAGE_USERNAME": "user", ' + '"STORAGE_PASSWORD": "passwd", ' + '"STORAGE_HOST": "host", ' + '"STORAGE_SECURE": "1",' + '"STORAGE_PORT": "42"}' + ) + }, + "https://host:42/v0", + id="json-no-auth", + ), + pytest.param( + { + "STORAGE_USERNAME": "user", + "STORAGE_PASSWORD": "passwd", + "STORAGE_HOST": "host", + "STORAGE_SECURE": "1", + "STORAGE_PORT": "42", + }, + "https://host:42/v0", + id="single-vars+auth", + ), + ], +) +def test_mode_ports_storage_with_auth( + clear_caches: None, + mock_postgres: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + envs: dict[str, str], + expected_base_url: str, +): + setenvs_from_dict(monkeypatch, envs) + + assert get_base_url() == expected_base_url + assert get_basic_auth() == aiohttp.BasicAuth( + login="user", password="passwd", encoding="latin1" + ) + + +@pytest.mark.parametrize( + "envs, expected_base_url", + [ + pytest.param( + {}, + "http://storage:8080/v0", + id="no-overwrites", + ), + pytest.param( + { + "STORAGE_HOST": "a-host", + "STORAGE_PORT": "54", + }, + "http://a-host:54/v0", + id="custom-host-port", + ), + ], +) +def test_mode_ports_storage_without_auth( + clear_caches: None, + mock_postgres: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + envs: dict[str, str], + expected_base_url: str, +): + setenvs_from_dict(monkeypatch, envs) + + assert get_base_url() == expected_base_url + assert get_basic_auth() is None + + +_HTTP_URL: Final[str] = "http://a" +_HTTPS_URL: Final[str] = "https://a" + + +@pytest.mark.parametrize( + "storage_secure, provided, expected", + [ + (True, _HTTP_URL, _HTTPS_URL), + (False, _HTTP_URL, _HTTP_URL), + ( + True, + str(TypeAdapter(AnyUrl).validate_python(_HTTP_URL)).rstrip("/"), + _HTTPS_URL, + ), + ( + False, + str(TypeAdapter(AnyUrl).validate_python(_HTTP_URL)).rstrip("/"), + _HTTP_URL, + ), + (True, _HTTPS_URL, _HTTPS_URL), + (False, _HTTPS_URL, _HTTPS_URL), + ( + True, + str(TypeAdapter(AnyUrl).validate_python(_HTTPS_URL)).rstrip("/"), + _HTTPS_URL, + ), + ( + False, + str(TypeAdapter(AnyUrl).validate_python(_HTTPS_URL)).rstrip("/"), + _HTTPS_URL, + ), + (True, "http://http", "https://http"), + (True, "https://http", "https://http"), + ], +) +def test__get_secure_link( + mock_postgres: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + storage_secure: bool, + provided: str, + expected: str, +): + is_storage_secure.cache_clear() + + setenvs_from_dict(monkeypatch, {"STORAGE_SECURE": "1" if storage_secure else "0"}) + assert _get_https_link_if_storage_secure(str(provided)) == expected diff --git a/requirements/PYTHON_VERSION b/requirements/PYTHON_VERSION index bd28b9c5c27..c8cfe395918 100644 --- a/requirements/PYTHON_VERSION +++ b/requirements/PYTHON_VERSION @@ -1 +1 @@ -3.9 +3.10 diff --git a/requirements/base.Makefile b/requirements/base.Makefile index 5ae5885ccea..35823f26d16 100644 --- a/requirements/base.Makefile +++ b/requirements/base.Makefile @@ -9,7 +9,7 @@ REPO_BASE_DIR := $(shell git rev-parse --show-toplevel) .DEFAULT_GOAL := help DO_CLEAN_OR_UPGRADE:=$(if $(clean),,--upgrade) -UPGRADE_OPTION := $(if $(upgrade),--upgrade-package $(upgrade),$(DO_CLEAN_OR_UPGRADE)) +UPGRADE_OPTION := $(if $(upgrade),--upgrade-package "$(upgrade)",$(DO_CLEAN_OR_UPGRADE)) objects = $(sort $(wildcard *.in)) @@ -21,8 +21,8 @@ touch: @$(foreach p,${objects},touch ${p};) -check: ## Checks whether pip-compile is installed - @which pip-compile > /dev/null +check: ## Checks whether uv is installed + @which uv > /dev/null clean: check ## Cleans all requirements/*.txt @@ -45,10 +45,8 @@ help: ## this colorful help # %.txt: %.in cd ..; \ - pip-compile $(UPGRADE_OPTION) \ - --build-isolation \ - --strip-extras \ - --resolver=backtracking \ + uv pip compile $(UPGRADE_OPTION) \ + --no-header \ --output-file requirements/$@ requirements/$< _test.txt: _base.txt diff --git a/requirements/common-test.in b/requirements/common-test.in new file mode 100644 index 00000000000..b4a17edaadd --- /dev/null +++ b/requirements/common-test.in @@ -0,0 +1,11 @@ +# +# Basic testing libraries +# +# Code coverage: https://app.codecov.io/gh/ITISFoundation/osparc-simcore + +coverage # Measures code coverage during test execution +pytest # Framework makes it easy to write small tests, yet scales to support complex functional testing for applications and libraries +pytest-cov # Produces coverage reports +pytest-icdiff # Better diffs in pytest assertion error messages using ICDiff +pytest-instafail # Shows failures and errors instantly instead of waiting until the end of test session +pytest-sugar # Shows failures and errors instantly, adding a progress bar, improving the test results, and making the output look better diff --git a/requirements/constraints.txt b/requirements/constraints.txt index a3ef7c52497..44a81178a5c 100644 --- a/requirements/constraints.txt +++ b/requirements/constraints.txt @@ -4,71 +4,74 @@ # - breaking changes # - known bugs/malfunction # - coordination (e.g. enforce same library in the entire repository) +# - blocked libraries (specify why) # -# Vulnerabilities +# Vulnerabilities ----------------------------------------------------------------------------------------- # -aiohttp>=3.7.4 # https://github.com/advisories/GHSA-v6wp-4m6f-gcjg -cryptography>=39.0.1 # https://github.com/advisories/GHSA-x4qr-2fvf-3mr5 Mar.2023 +aiohttp>=3.7.4, !=3.11.13 # https://github.com/advisories/GHSA-v6wp-4m6f-gcjg, 3.11.13 was yanked https://github.com/aio-libs/aiohttp/issues/10617 +certifi>=2023.7.22 # https://github.com/advisories/GHSA-xqr8-7jwr-rhp7 +cryptography>=41.0.6 # https://github.com/advisories/GHSA-v8gr-m533-ghj9 httpx>=0.23.0 # https://github.com/advisories/GHSA-h8pj-cxx2-jfg2 / CVE-2021-41945 jinja2>=2.11.3 # https://github.com/advisories/GHSA-g3rq-g295-4j3m mako>=1.2.2 # https://github.com/advisories/GHSA-v973-fxgf-6xhp +orjson>=3.9.15 # https://github.com/advisories/GHSA-pwr2-4v36-6qpr paramiko>=2.10.1 # https://github.com/advisories/GHSA-f8q4-jwww-x3wv py>=1.11.0 # https://github.com/advisories/GHSA-w596-4wvx-j9j6 / CVE-2022-42969 -pydantic>=1.8.2 # https://github.com/advisories/GHSA-5jqp-qgf6-3pvh pyyaml>=5.4 # https://github.com/advisories/GHSA-8q59-q68h-6hv4 +redis>=4.5.4 # https://github.com/advisories/GHSA-24wv-mv5m-xv4h rsa>=4.1 # https://github.com/advisories/GHSA-537h-rv9q-vvph sqlalchemy>=1.3.3 # https://nvd.nist.gov/vuln/detail/CVE-2019-7164 +starlette>=0.27.0 # https://github.com/advisories/GHSA-qj8w-rv5x-2v9h ujson>=5.4.0 # https://github.com/advisories/GHSA-fh56-85cw-5pq6, https://github.com/advisories/GHSA-wpqr-jcpx-745r urllib3>=1.26.5 # https://github.com/advisories/GHSA-q2q7-5pp4-w6pg -# Blocked https://github.com/Pennsieve/pennsieve-python/issues/17 -# protobuf # https://github.com/advisories/GHSA-8gq9-2x98-w8hf - - - # -# Breaking changes +# Breaking changes ----------------------------------------------------------------------------------------- # - # with new released version 1.0.0 (https://github.com/aio-libs/aiozipkin/releases). # TODO: includes async features https://docs.sqlalchemy.org/en/14/changelog/migration_20.html sqlalchemy<2.0 -# with the new release >=1.10.3 dict schemas changed output structure (https://docs.pydantic.dev/changelog/) -# NOTE: https://github.com/ITISFoundation/osparc-simcore/issues/3905 -pydantic<1.10.3 # # Bugs # +httpx!=0.28.0 # Waiting for fix in respx: https://github.com/lundberg/respx/pull/278 + -# FIXME: minio 7.1.0 does not delete objects. SEE -minio==7.0.4 # -# Compatibility/coordination +# Compatibility/coordination ----------------------------------------------------------------------------------------- # +pydantic>=2.10.0 # Avoids inter-version compatibility serialization errors as: _pickle.UnpicklingError: NEWOBJ class argument must be a type, not _AnnotatedAlias - -# Keeps all docker compose to the same version. TODO: remove when all synced -docker-compose==1.29.1 +# Python 3.10 compatibility - replace distutils functionality +setuptools>=45 +packaging>=20.9 -# constraint since https://github.com/MagicStack/uvloop/releases/tag/v0.15.0: drops support for 3.5/3.6 Feb.2021 -uvloop<0.15.0 ; python_version < '3.7' -# All backports libraries add environ markers -# NOTE: If >second dependency, this will annotate a marker in the compiled requirements file # -async-exit-stack ; python_version < '3.7' -async-generator ; python_version < '3.7' -contextvars ; python_version < '3.7' -dataclasses ; python_version < '3.7' -importlib-metadata ; python_version < '3.8' -importlib-resources ; python_version < '3.9' -typing-extensions ; python_version < '3.7' -zipp ; python_version < '3.7' +# Blocked ----------------------------------------------------------------------------------------- +# + +# We use aiofiles (with s) and NOT thisone. +aiofile>=999999999 + +# Dependencies were blocking updates. Instead or using the python client we +# directly use http calls. +# SEE https://github.com/Pennsieve/pennsieve-python/issues/17 +pennsieve>=999999999 + +# User alternative e.g. parametrized fixture or request.getfixturevalue(.) +pytest-lazy-fixture>=999999999 + +# avoid downgrades of openapi-spec-validator related libraries +referencing<=0.35.1 + +# See issue https://github.com/ITISFoundation/osparc-simcore/issues/7300 +pydantic-settings<2.7.1 diff --git a/requirements/devenv.txt b/requirements/devenv.txt index 5d1cbf17bee..a44efd68242 100644 --- a/requirements/devenv.txt +++ b/requirements/devenv.txt @@ -25,5 +25,11 @@ pip-tools # version manager bump2version +# static type checker for Python that aims to combine the benefits of dynamic (or "duck") typing and static typing +mypy + # SEE `make pylint` pylint + +# ultra-fast linter +ruff diff --git a/requirements/how-to-unify-versions.md b/requirements/how-to-unify-versions.md index 5dc99cd921b..37842e171dd 100644 --- a/requirements/how-to-unify-versions.md +++ b/requirements/how-to-unify-versions.md @@ -47,7 +47,7 @@ do done ``` -It would also be possible to upgrade them simultaneously by using ``--upgrade`` multiple times as ``pip-compile --upgrade X --upgrade Y ...`` +It would also be possible to upgrade them simultaneously by using ``--upgrade`` multiple times as ``uv pip compile --upgrade X --upgrade Y ...`` but we prefer to do it one by one and commit changes so that any issue can be tracked to the library upgrade diff --git a/requirements/how-to-upgrade-python.md b/requirements/how-to-upgrade-python.md index 27278c27397..56c3e32a2cf 100644 --- a/requirements/how-to-upgrade-python.md +++ b/requirements/how-to-upgrade-python.md @@ -14,8 +14,8 @@ Both python and pip version are specified: - repository's *prefered* python version file in ``requirements/PYTHON_VERSION`` (could not version standard `.python-version` because then we cannot work with different versions on the same repo) - in the services/scripts ``Dockerfile``: ```Dockerfile - ARG PYTHON_VERSION="3.9.12" - FROM python:${PYTHON_VERSION}-slim-buster as base + ARG PYTHON_VERSION="3.10.14" + FROM python:${PYTHON_VERSION}-slim-bookworm as base ``` - in the CI ``.github/workflows/ci-testing-deploy.yml`` ```yaml @@ -24,7 +24,7 @@ Both python and pip version are specified: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9"] + python: ["3.10"] ``` and in ``ci/helpers/ensure_python_pip.bash`` @@ -47,7 +47,7 @@ Both python and pip version are specified: runs-on: ${{ matrix.os }} strategy: matrix: - python: ["3.9", "3.10"] + python: ["3.10", "3.12"] ``` - [pyupgrade](https://github.com/asottile/pyupgrade) tool which has been containarized (``scripts/pyupgrade.bash``) and added as a Makefile recipe (``make pyupgrade``) diff --git a/requirements/packages-notes.md b/requirements/packages-notes.md index 93994e23b7b..c342aa85e2b 100644 --- a/requirements/packages-notes.md +++ b/requirements/packages-notes.md @@ -17,9 +17,6 @@ Keeps a list notes with relevant information about releases of python package. S - is a backport of Python 3.9’s standard library [importlib.resources](https://docs.python.org/3.7/library/importlib.html#module-importlib.resources) - [packaging](https://packaging.pypa.io/en/latest/) - used for version handling, specifiers, markers, requirements, tags, utilities. It follows several PEPs and will probably end up in the python standard library - - might prefer over ``pkg_resources`` -- [pkg_resources](https://setuptools.readthedocs.io/en/latest/pkg_resources.html) - - most of the functionlity seems to be moved into the standard library. Some backports of those are ``importlib-metadata``, ``importlib-resources`` and ``packaging``. - [dataclasses](https://pypi.org/project/dataclasses/) - a backport of the [``dataclasses`` module](https://docs.python.org/3/library/dataclasses.html) for Python 3.6. Included as dataclasses in standard library from python >=3.7. - here is included as a dependency to [pydantic](https://pydantic-docs.helpmanual.io/) diff --git a/requirements/python-dependencies.md b/requirements/python-dependencies.md index ac6da794e22..a24fea0a250 100644 --- a/requirements/python-dependencies.md +++ b/requirements/python-dependencies.md @@ -211,7 +211,7 @@ crespo@8ac9edf78469:~/services/api-server/requirements$ make reqs 1. [Using pip-tools to manage my python dependencies](https://alexwlchan.net/2017/10/pip-tools/) by alexwlchan 1. [A successful pip-tools workflow for managing Python package requirements](https://jamescooke.info/a-successful-pip-tools-workflow-for-managing-python-package-requirements.html) by J. Cooke 1. [Python Application Dependency Management in 2018](https://hynek.me/articles/python-app-deps-2018/#pip-tools-everything-old-is-new-again) by Hynek Schlawack - +1. [Dealing with dependency conflicts](https://pip.pypa.io/en/latest/topics/dependency-resolution/#dealing-with-dependency-conflicts) in [pip] doc [pip-tools]:https://github.com/jazzband/pip-tools [pip]:https://pip.pypa.io/en/stable/reference/ diff --git a/requirements/tools/Dockerfile b/requirements/tools/Dockerfile index 49e3ccf7159..bfee8c9f6ec 100644 --- a/requirements/tools/Dockerfile +++ b/requirements/tools/Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # NOTE: This is a first step towards a devcontainer # to perform operations like pip-compile or auto-formatting # that preserves identical environment across developer machines @@ -7,29 +8,39 @@ # - Can be installed with pyenv (SEE pyenv install --list ) # # -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:${PYTHON_VERSION}-slim-bookworm AS base +ENV VIRTUAL_ENV=/home/scu/.venv -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update \ && apt-get -y install --no-install-recommends\ make \ git \ gawk \ - && rm -rf /var/lib/apt/lists/* \ - && apt-get clean + && apt-get clean -y -# SEE bug with pip==22.1 https://github.com/jazzband/pip-tools/issues/1617 -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +RUN uv venv "${VIRTUAL_ENV}" + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools + # devenv -RUN pip install \ - pip-tools \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install \ pipreqs \ pipdeptree && \ - pip list -vv + uv pip list -vv diff --git a/requirements/tools/Makefile b/requirements/tools/Makefile index b8f8b7cc39e..44e6c8f12a4 100644 --- a/requirements/tools/Makefile +++ b/requirements/tools/Makefile @@ -14,7 +14,7 @@ # .DEFAULT_GOAL := help -PYTHON_VERSION=3.9.12 +PYTHON_VERSION=3.10.14 # locations REPODIR := $(shell git rev-parse --show-toplevel) @@ -32,8 +32,11 @@ MAKE_C := $(MAKE) --directory .PHONY: touch reqs info # requirements in packages and services -_compiled-all = $(shell find $(REPODIR) -path "*/requirements*.txt") -_inputs-all = $(shell find $(REPODIR) -path "*/requirements*.in") +_all-txt = $(shell find $(REPODIR) -path "*/requirements*.txt") +_all-in := $(shell find ${REPODIR} -path "*/requirements*.in" | sort) +_tools-in := $(shell find ${REPODIR} -path "*/requirements/*tools.in" | sort) +_services-in := $(shell find ${SERVICES_DIR} -path "*/requirements*.in" | sort) + # packages/ _target-inputs = $(shell find ${PACKAGES_DIR} -type f -name _base.in) @@ -60,31 +63,41 @@ info: ## displays some info touch: ## touches all package requirement inputs # First aligns time-stamps: ensures compiled timestamp is older than target inputs - @$(foreach p,${_inputs-all},touch $(p);) - @$(foreach p,${_compiled-all},touch $(p);) + @$(foreach p,${_all-txt},touch $(p);) + @$(foreach p,${_all-in},touch $(p);) # Touchs all target input requirements @$(foreach p,${_target-inputs},touch $(p);) +only-tools: ## upgrades tools repo wide + # Upgrading ONLY _tools.in + @$(foreach p,${_tools-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);) + reqs: ## updates test & tooling requirements # Upgrading $(upgrade) requirements @$(foreach p,${_target-inputs},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);) -reqs-all: guard-UPGRADE_OPTION ## updates a give package repository-wise (e.g. make reqs-all upgrade=foo==1.2.3 ) +reqs-all: guard-UPGRADE_OPTION ## updates a given package repository-wise IN ALL `requirements/` folders (e.g. make reqs-all upgrade=foo==1.2.3 ) # Upgrading $(upgrade) ALL requirements - @$(foreach p,${_inputs-all},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);) + @$(foreach p,${_all-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);) + +reqs-services: guard-UPGRADE_OPTION ## updates a given package on all services [and not packages] (e.g. make reqs-services upgrade=foo==1.2.3 ) + # Upgrading $(upgrade) in services + @$(foreach p,${_services-in},echo Touching $(p);touch $(p);$(MAKE_C) $(dir $(p)) reqs $(UPGRADE_OPTION);) + reqs-ci: ## upgrades requirements for pylint recipe in CI cd $(REPODIR)/ci/helpers \ && rm requirements.txt \ - && pip-compile --output-file=requirements.txt requirements.in + && uv pip compile --resolver=backtracking --strip-extras requirements.in IMAGE_NAME:=local/python-devkit:${PYTHON_VERSION} # SEE https://medium.com/faun/set-current-host-user-for-docker-container-4e521cef9ffc .PHONY: build build build-nc: ## builds tooling image ${IMAGE_NAME} - docker build $(if $(findstring -nc,$@),--no-cache,) \ + docker buildx build $(if $(findstring -nc,$@),--no-cache,) \ + --load \ --build-arg PYTHON_VERSION="${PYTHON_VERSION}" \ --tag ${IMAGE_NAME} . diff --git a/requirements/tools/check_changes.py b/requirements/tools/check_changes.py index 2c7954d8f0d..e14425a740b 100644 --- a/requirements/tools/check_changes.py +++ b/requirements/tools/check_changes.py @@ -6,7 +6,7 @@ from collections import Counter, defaultdict from contextlib import contextmanager from pathlib import Path -from typing import Literal, NamedTuple, Optional +from typing import Literal, NamedTuple from packaging.version import Version @@ -125,7 +125,7 @@ def format_reqs_paths(req_paths): def main_changes_stats() -> None: - filepath = Path("changes.ignore.keep.log") + filepath = Path("changes.ignore.log") if not filepath.exists(): dump_changes(filepath) @@ -134,20 +134,22 @@ def main_changes_stats() -> None: # format print("### Highlights on updated libraries (only updated libraries are included)") print() - print("- #packages before:", len(before)) - print("- #packages after :", len(after)) + print("- #packages before ~", len(before)) + print("- #packages after ~", len(after)) print("") COLUMNS = ["#", "name", "before", "after", "upgrade", "count", "packages"] with printing_table(COLUMNS): - - for i, name in enumerate(sorted(before.keys()), start=1): + i = 1 + for name in sorted(before.keys()): # TODO: where are these libraries? # TODO: are they first dependencies? # TODO: if major, get link to release notes from_versions = {str(v) for v in before[name]} to_versions = {str(v) for v in after[name]} + if from_versions == to_versions: + continue used_packages = [] if req_paths := lib2reqs.get(name): @@ -173,6 +175,7 @@ def main_changes_stats() -> None: "
".join(sorted(used_packages)), "|", ) + i += 1 print() print("*Legend*: ") @@ -201,9 +204,7 @@ class ReqFile(NamedTuple): dependencies: dict[str, Version] -def parse_dependencies( - repodir: Path, *, exclude: Optional[set] = None -) -> list[ReqFile]: +def parse_dependencies(repodir: Path, *, exclude: set | None = None) -> list[ReqFile]: reqs = [] exclude = exclude or set() for reqfile in repodir.rglob("**/requirements/_*.txt"): @@ -229,7 +230,7 @@ def parse_dependencies( return reqs -def repo_wide_changes(exclude: Optional[set] = None) -> None: +def repo_wide_changes(exclude: set | None = None) -> None: reqs = parse_dependencies(REPODIR, exclude=exclude) # format diff --git a/scripts/act.bash b/scripts/act.bash index 8a9bdadeb21..73d4d77e0d3 100755 --- a/scripts/act.bash +++ b/scripts/act.bash @@ -14,7 +14,7 @@ DOCKER_IMAGE_NAME=dind-act-runner ACT_RUNNER=ubuntu-20.04=catthehacker/ubuntu:act-20.04 ACT_VERSION_TAG=v0.2.20 # from https://github.com/nektos/act/releases -docker build -t $DOCKER_IMAGE_NAME - < /dev/null + +COPY ./list_versions.bash ./list_versions.bash + +CMD ["./list_versions.bash"] diff --git a/scripts/apt-packages-versions/Makefile b/scripts/apt-packages-versions/Makefile new file mode 100644 index 00000000000..9cb7fa5d234 --- /dev/null +++ b/scripts/apt-packages-versions/Makefile @@ -0,0 +1,7 @@ +.DEFAULT_GOAL := list-versions + +SHELL := /bin/bash + +list-versions: + docker build -t docker-services-apt-versions:latest . + docker run -it --rm docker-services-apt-versions:latest diff --git a/scripts/apt-packages-versions/list_versions.bash b/scripts/apt-packages-versions/list_versions.bash new file mode 100755 index 00000000000..041e68f1ced --- /dev/null +++ b/scripts/apt-packages-versions/list_versions.bash @@ -0,0 +1,16 @@ +#!/bin/bash + +# apt package names +PACKAGES_NAMES_LIST=( + "docker-ce-cli" + "docker-compose-plugin" +) + +# update apt cahce +apt-get update + +# dispaly verson for each package +for pckage_name in "${PACKAGES_NAMES_LIST[@]}"; do + echo -e "\nListing versions for pckage: '${pckage_name}'\n" + apt-cache madison "${pckage_name}" | awk '{ print $3 }' +done diff --git a/scripts/code-climate.bash b/scripts/code-climate.bash deleted file mode 100755 index 570f575a43a..00000000000 --- a/scripts/code-climate.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# -# Runs code climate locally on CWD -# -# SEE https://github.com/codeclimate/codeclimate#manual-docker-invocation -# https://github.com/codeclimate/codeclimate#commands -# -echo Running codeclimate on "$PWD" - -TMPDIR=/tmp/cc -mkdir --parent ${TMPDIR} - -docker run \ - --interactive --tty --rm \ - --env CODECLIMATE_CODE="$PWD" \ - --volume "$PWD":/code \ - --volume /var/run/docker.sock:/var/run/docker.sock \ - --volume ${TMPDIR}:/tmp/cc \ - codeclimate/codeclimate "$@" - - -if test -z "$@" -then - echo "----" - echo "Listing other engines (in dockers)" - docker images codeclimate/* -fi diff --git a/scripts/common-package.Makefile b/scripts/common-package.Makefile index 7dccecef79e..0fff1785d1c 100644 --- a/scripts/common-package.Makefile +++ b/scripts/common-package.Makefile @@ -38,7 +38,6 @@ info: ## displays package info @echo ' PACKAGE_VERSION : ${PACKAGE_VERSION}' - # # SUBTASKS # diff --git a/scripts/common-service.Makefile b/scripts/common-service.Makefile index 44c0cb0e9a8..57fb6e3b5b4 100644 --- a/scripts/common-service.Makefile +++ b/scripts/common-service.Makefile @@ -37,21 +37,23 @@ export APP_VERSION install-dev install-prod install-ci: _check_venv_active ## install app in development/production or CI mode # Installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt + @uv pip sync requirements/$(subst install-,,$@).txt + .PHONY: test-dev-unit test-ci-unit test-dev-integration test-ci-integration test-dev -TEST_SUBFOLDER := $(if $(test-subfolder),/$(test-subfolder),) -test-dev-unit test-ci-unit: _check_venv_active +TEST_PATH := $(if $(test-path),/$(patsubst tests/integration/%,%, $(patsubst tests/unit/%,%, $(patsubst %/,%,$(test-path)))),) + +test-dev-unit test-ci-unit: _check_venv_active ## run app unit tests (specifying test-path can restrict to a folder) # Targets tests/unit folder - @make --no-print-directory _run-$(subst -unit,,$@) target=$(CURDIR)/tests/unit$(TEST_SUBFOLDER) + @make --no-print-directory _run-$(subst -unit,,$@) target=$(CURDIR)/tests/unit$(TEST_PATH) -test-dev-integration test-ci-integration: +test-dev-integration test-ci-integration: ## run app integration tests (specifying test-path can restrict to a folder) # Targets tests/integration folder using local/$(image-name):production images @export DOCKER_REGISTRY=local; \ export DOCKER_IMAGE_TAG=production; \ - make --no-print-directory _run-$(subst -integration,,$@) target=$(CURDIR)/tests/integration$(TEST_SUBFOLDER) + make --no-print-directory _run-$(subst -integration,,$@) target=$(CURDIR)/tests/integration$(TEST_PATH) test-dev: test-dev-unit test-dev-integration ## runs unit and integration tests for development (e.g. w/ pdb) @@ -103,6 +105,7 @@ settings-schema.json: ## [container] dumps json-shcema of this service settings ${APP_CLI_NAME} settings --as-json-schema \ | sed --expression='1,/{/ {/{/!d}' \ > $@ + # Dumped '$(CURDIR)/$@' # NOTE: settings CLI prints some logs in the header from the boot and entrypoint scripts. We # use strema editor expression (sed --expression) to trim them: @@ -148,6 +151,7 @@ _run-test-dev: _check_venv_active --durations=10 \ --exitfirst \ --failed-first \ + --junitxml=junit.xml -o junit_family=legacy \ --keep-docker-up \ --pdb \ -vv \ @@ -166,9 +170,10 @@ _run-test-ci: _check_venv_active --cov-report=xml \ --cov=$(APP_PACKAGE_NAME) \ --durations=10 \ + --junitxml=junit.xml -o junit_family=legacy \ --keep-docker-up \ --log-date-format="%Y-%m-%d %H:%M:%S" \ - --log-format="%(asctime)s %(levelname)s %(message)s" \ + --log-format="%(asctime)s %(levelname)s %(message)s" \ --verbose \ -m "not heavy_load" \ $(PYTEST_ADDITIONAL_PARAMETERS) \ @@ -178,3 +183,23 @@ _run-test-ci: _check_venv_active .PHONY: _assert_target_defined _assert_target_defined: $(if $(target),,$(error unset argument 'target' is required)) + + + + +# +# OPENAPI SPECIFICATIONS ROUTINES +# + + +# specification of the used openapi-generator-cli (see also https://github.com/ITISFoundation/openapi-generator) +OPENAPI_GENERATOR_NAME := openapitools/openapi-generator-cli +OPENAPI_GENERATOR_TAG := latest +OPENAPI_GENERATOR_IMAGE := $(OPENAPI_GENERATOR_NAME):$(OPENAPI_GENERATOR_TAG) + +define validate_openapi_specs + # Validating OAS '$(1)' ... + docker run --rm \ + --volume "$(CURDIR):/local" \ + $(OPENAPI_GENERATOR_IMAGE) validate --input-spec /local/$(strip $(1)) +endef diff --git a/scripts/common.Makefile b/scripts/common.Makefile index 56055c75a8d..0dc78b889dd 100644 --- a/scripts/common.Makefile +++ b/scripts/common.Makefile @@ -15,6 +15,13 @@ # defaults .DEFAULT_GOAL := help +# Colors +BLUE=\033[0;34m +GREEN=\033[0;32m +YELLOW=\033[0;33m +RED=\033[0;31m +NC=\033[0m # No Color + # Use bash not sh SHELL := /bin/bash @@ -48,7 +55,12 @@ VENV_DIR := $(abspath $(REPO_BASE_DIR)/.venv) DOT_ENV_FILE = $(abspath $(REPO_BASE_DIR)/.env) # utils -get_my_ip := $(shell hostname --all-ip-addresses | cut --delimiter=" " --fields=1) +get_my_ip := $(shell (hostname --all-ip-addresses || hostname -i) 2>/dev/null | cut --delimiter=" " --fields=1) + +IGNORE_DIR=.ignore + +$(IGNORE_DIR): # Used to produce .ignore folders which are auto excluded from version control (see .gitignore) + mkdir -p $(IGNORE_DIR) # # SHORTCUTS @@ -72,6 +84,12 @@ hel%: @echo "" +.env: .env-devel ## creates .env file from defaults in .env-devel + $(if $(wildcard $@), \ + @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ + @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) + + .PHONY: devenv devenv: ## build development environment @$(MAKE_C) $(REPO_BASE_DIR) $@ @@ -87,6 +105,8 @@ clean: ## cleans all unversioned files in project and temp files create by this @git clean $(_GIT_CLEAN_ARGS) + + .PHONY: info inf%: ## displays basic info # system @@ -95,14 +115,8 @@ inf%: ## displays basic info @echo ' NOW_TIMESTAMP : ${NOW_TIMESTAMP}' @echo ' VCS_URL : ${VCS_URL}' @echo ' VCS_REF : ${VCS_REF}' - # dev tools version - @echo ' make : $(shell make --version 2>&1 | head -n 1)' - @echo ' jq : $(shell jq --version)' - @echo ' awk : $(shell awk -W version 2>&1 | head -n 1)' - @echo ' node : $(shell node --version 2> /dev/null || echo ERROR nodejs missing)' - @echo ' python : $(shell python3 --version)' # installed in .venv - @pip list + @uv pip list # package setup -@echo ' name : ' $(shell python ${CURDIR}/setup.py --name) -@echo ' version : ' $(shell python ${CURDIR}/setup.py --version) @@ -130,21 +144,33 @@ pylint: $(REPO_BASE_DIR)/.pylintrc ## runs pylint (python linter) on src and tes @pylint --rcfile="$(REPO_BASE_DIR)/.pylintrc" -v $(CURDIR)/src $(CURDIR)/tests -.PHONY: mypy -mypy: $(REPO_BASE_DIR)/scripts/mypy.bash $(REPO_BASE_DIR)/mypy.ini ## runs mypy python static type-checker on this services's code. Use AFTER make install-* - @$(REPO_BASE_DIR)/scripts/mypy.bash src +.PHONY: doc-uml +doc-uml: $(IGNORE_DIR) ## Create UML diagrams for classes and modules in current package. e.g. (export DOC_UML_PATH_SUFFIX="services*"; export DOC_UML_CLASS=models_library.api_schemas_catalog.services.ServiceGet; make doc-uml) + @pyreverse \ + --verbose \ + --output=svg \ + --output-directory=$(IGNORE_DIR) \ + --project=$(if ${PACKAGE_NAME},${PACKAGE_NAME},${SERVICE_NAME})${DOC_UML_PATH_SUFFIX} \ + $(if ${DOC_UML_CLASS},--class=${DOC_UML_CLASS},) \ + ${SRC_DIR}$(if ${DOC_UML_PATH_SUFFIX},/${DOC_UML_PATH_SUFFIX},) + @echo Outputs in $(realpath $(IGNORE_DIR)) -.PHONY: codeclimate -codeclimate: $(REPO_BASE_DIR)/.codeclimate.yml ## runs code-climate analysis - # Copying config - cp $(REPO_BASE_DIR)/.codeclimate.yml $(CURDIR)/.codeclimate.yml - # Validates $< at ${PWD} - $(REPO_BASE_DIR)/scripts/code-climate.bash validate-config - # Running analysis - $(REPO_BASE_DIR)/scripts/code-climate.bash analyze - # Removing tmp config - @-rm $(CURDIR)/.codeclimate.yml +.PHONY: ruff +ruff: $(REPO_BASE_DIR)/.ruff.toml ## runs ruff (python fast linter) on src and tests folders + @ruff check \ + --config=$(REPO_BASE_DIR)/.ruff.toml \ + --respect-gitignore \ + $(CURDIR)/src \ + $(CURDIR)/tests + +.PHONY: mypy +mypy: $(REPO_BASE_DIR)/mypy.ini ## runs mypy python static type-checker on this services's code. Use AFTER make install-* + @mypy \ + --config-file=$(REPO_BASE_DIR)/mypy.ini \ + --show-error-context \ + --show-traceback \ + $(CURDIR)/src .PHONY: codestyle @@ -157,6 +183,7 @@ github-workflow-job: ## runs a github workflow job using act locally, run using $(SCRIPTS_DIR)/act.bash ../.. ${job} + .PHONY: version-patch version-minor version-major version-patch: ## commits version with bug fixes not affecting the cookiecuter config $(_bumpversion) diff --git a/scripts/demo-meta/meta_modeling_results.py b/scripts/demo-meta/meta_modeling_results.py index 4ff2a3a869b..04a19de3463 100644 --- a/scripts/demo-meta/meta_modeling_results.py +++ b/scripts/demo-meta/meta_modeling_results.py @@ -3,7 +3,6 @@ """ from collections import defaultdict -from typing import List from uuid import UUID import httpx @@ -24,24 +23,24 @@ def print_checkpoints(client: httpx.Client): - repos: List[ProjectRepo] = list(iter_repos(client)) + repos: list[ProjectRepo] = list(iter_repos(client)) project_id = repos[0].project_uuid for checkpoint in iter_checkpoints(client, project_id): - print(checkpoint.json(exclude_unset=True, indent=1)) + print(checkpoint.model_dump_json(exclude_unset=True, indent=1)) def print_iterations(client: httpx.Client, project_id: UUID, checkpoint: CheckPoint): # print-iterations print("Metaproject at", f"{project_id=}", f"{checkpoint=}") for project_iteration in iter_project_iteration(client, project_id, checkpoint.id): - print(project_iteration.json(exclude_unset=True, indent=1)) + print(project_iteration.model_dump_json(exclude_unset=True, indent=1)) def select_project_head(client: httpx.Client, project_id: UUID): # get head r = client.get(f"/repos/projects/{project_id}/checkpoints/HEAD") - head = Envelope[CheckPoint].parse_obj(r.json()).data + head = Envelope[CheckPoint].model_validate(r.json()).data assert head # nosec return project_id, head diff --git a/scripts/demo-meta/osparc_webapi.py b/scripts/demo-meta/osparc_webapi.py index 143be23daa1..8d76e8be369 100644 --- a/scripts/demo-meta/osparc_webapi.py +++ b/scripts/demo-meta/osparc_webapi.py @@ -7,7 +7,7 @@ from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import Any, Generic, Iterator, Optional, Type, TypeVar +from typing import Annotated, Any, Generic, Iterator, TypeVar from uuid import UUID import httpx @@ -16,15 +16,13 @@ AnyHttpUrl, AnyUrl, BaseModel, - BaseSettings, EmailStr, Field, NonNegativeInt, SecretStr, ValidationError, - conint, ) -from pydantic.generics import GenericModel +from pydantic_settings import BaseSettings, SettingsConfigDict log = logging.getLogger(__name__) logging.basicConfig(level=getattr(logging, os.environ.get("LOG_LEVEL", "INFO"))) @@ -46,32 +44,32 @@ class Meta(BaseModel): class PageLinks(BaseModel): self: AnyHttpUrl first: AnyHttpUrl - prev: Optional[AnyHttpUrl] - next: Optional[AnyHttpUrl] + prev: AnyHttpUrl | None + next: AnyHttpUrl | None last: AnyHttpUrl -class Page(GenericModel, Generic[ItemT]): +class Page(BaseModel, Generic[ItemT]): meta: Meta = Field(..., alias="_meta") data: list[ItemT] links: PageLinks = Field(..., alias="_links") -class Envelope(GenericModel, Generic[DataT]): - data: Optional[DataT] - error: Optional[Any] +class Envelope(BaseModel, Generic[DataT]): + data: DataT | None + error: Any | None @classmethod def parse_data(cls, obj): - return cls.parse_obj({"data": obj}) + return cls.model_validate({"data": obj}) class CheckPoint(BaseModel): id: NonNegativeInt checksum: str - tag: Optional[str] = None - message: Optional[str] = None - parent: Optional[NonNegativeInt] = None + tag: str | None = None + message: str | None = None + parent: NonNegativeInt | None = None created_at: datetime @@ -98,7 +96,7 @@ class ProjectIteration(BaseModel): class ExtractedResults(BaseModel): - progress: dict[NodeIDStr, conint(ge=0, le=100)] = Field( + progress: dict[NodeIDStr, Annotated[int, Field(ge=0, le=100)]] = Field( ..., description="Progress in each computational node" ) labels: dict[NodeIDStr, str] = Field( @@ -140,19 +138,19 @@ def login(client: httpx.Client, user: str, password: str): def get_profile(client: httpx.Client): r = client.get("/me") - assert r.status_code == 200 + assert r.status_code == httpx.codes.OK return r.json()["data"] def iter_items( - client: httpx.Client, url_path: str, item_cls: Type[ItemT] + client: httpx.Client, url_path: str, item_cls: type[ItemT] ) -> Iterator[ItemT]: """iterates items returned by a List std-method SEE https://google.aip.dev/132 """ - def _relative_url_path(page_link: Optional[AnyHttpUrl]) -> Optional[str]: + def _relative_url_path(page_link: AnyHttpUrl | None) -> str | None: if page_link: return f"{page_link.path}".replace(client.base_url.path, "") return None @@ -165,9 +163,8 @@ def _relative_url_path(page_link: Optional[AnyHttpUrl]) -> Optional[str]: r = client.get(next_url) r.raise_for_status() - page = Page[item_cls].parse_raw(r.text) - for item in page.data: - yield item + page = Page[item_cls].model_validate_json(r.text) + yield from page.data next_url = _relative_url_path(page.links.next) last_url = _relative_url_path(page.links.last) @@ -198,16 +195,17 @@ def iter_project_iteration( # SETUP ------------------------------------------ class ClientSettings(BaseSettings): - OSPARC_API_URL: AnyUrl = Field(default="http://127.0.0.1.nip.io:9081/v0") + OSPARC_API_URL: AnyUrl = Field( + default="http://127.0.0.1.nip.io:9081/v0" + ) # NOSONAR OSPARC_USER_EMAIL: EmailStr OSPARC_USER_PASSWORD: SecretStr - class Config: - env_file = ".env-osparc-web.ignore" + model_config = SettingsConfigDict(env_file=".env-osparc-web.ignore") def init(): - env_file = Path(ClientSettings.Config.env_file) + env_file = Path(ClientSettings.model_config.env_file) log.info("Creating %s", f"{env_file}") kwargs = {} kwargs["OSPARC_API_URL"] = input("OSPARC_API_URL: ").strip() or None @@ -215,7 +213,7 @@ def init(): input("OSPARC_USER_EMAIL: ") or getpass.getuser() + "@itis.swiss" ) kwargs["OSPARC_USER_PASSWORD"] = getpass.getpass() - with open(env_file, "wt") as fh: + with env_file.open("w") as fh: for key, value in kwargs.items(): print(key, value) if value is not None: @@ -234,7 +232,7 @@ def query_if_invalid_config(): def setup_client() -> Iterator[httpx.Client]: settings = ClientSettings() - client = httpx.Client(base_url=settings.OSPARC_API_URL) + client = httpx.Client(base_url=f"{settings.OSPARC_API_URL}") try: # check if online and login print(ping(client)) diff --git a/scripts/demo-meta/requirements.txt b/scripts/demo-meta/requirements.txt index 4aa7c6af281..9b9b3316253 100644 --- a/scripts/demo-meta/requirements.txt +++ b/scripts/demo-meta/requirements.txt @@ -1,4 +1,7 @@ +setuptools>=45 +packaging>=20.9 httpx pandas pydantic[dotenv,email] +pydantic-settings tabulate diff --git a/scripts/demo/create_portal_markdown.py b/scripts/demo/create_portal_markdown.py deleted file mode 100644 index 217e046d28a..00000000000 --- a/scripts/demo/create_portal_markdown.py +++ /dev/null @@ -1,156 +0,0 @@ -""" This script produces a markdown document with links to template studies - - Aims to emulate links - -""" - -# TODO: extend cli to generate invitations use jinja templates (see folder) - -import argparse -import json -import logging -import sys -from contextlib import contextmanager -from datetime import datetime -from pathlib import Path -from string import ascii_uppercase -from typing import Optional - -from simcore_service_webserver.login._registration import get_invitation_url -from simcore_service_webserver.login.utils import get_random_string -from yarl import URL - -current_path = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve() -current_dir = current_path.parent - -logging.basicConfig(level=logging.INFO) -log = logging.getLogger(__name__) - -CONFIRMATIONS_FILENAME = "ignore-confirmations.csv" - -ISSUE = r"https://github.com/ITISFoundation/osparc-simcore/issues/" - -HOST_URLS_MAPS = [ - ("localhost", r"http://127.0.0.1:9081"), - ("master", r"https://master.osparc.io"), - ("staging", r"https://staging.osparc.io"), - ("production", r"https://osparc.io"), -] - -N = len(ascii_uppercase) -NUM_CODES = 15 -CODE_LEN = 30 -default_mock_codes = [ascii_uppercase[i % N] * CODE_LEN for i in range(NUM_CODES)] - -params = {} -params["194bb264-a717-11e9-9dff-02420aff2767"] = { - "stimulation_mode": "1", - "stimulation_level": "0.5", -} - - -@contextmanager -def _open(filepath): - filepath = Path(filepath) - log.info("Writing %s ... ", filepath) - with open(filepath, "wt") as fh: - yield fh - log.info("%s ready", filepath.name) - - -def write_list(hostname, url, data, fh): - origin = URL(url) - - print(f"## studies available @{hostname}", file=fh) - print("", file=fh) - for prj in data: - prj["msg"] = "" - study_url = origin.with_path("study/{uuid}".format(**prj)) - if prj["uuid"] in params: - prj_params = params[prj["uuid"]] - study_url = study_url.with_query(**prj_params) - prj["msg"] = "with " + "and ".join( - [f"{k}={v} " for k, v in prj_params.items()] - ) - print( - "- [{name}]({study_url}) {msg}".format(study_url=str(study_url), **prj), - file=fh, - ) - print("", file=fh) - - -def main(mock_codes, *, trial_account_days: Optional[int] = None, uid: int = 1): - data = {} - - with open(current_dir / "template-projects/templates_in_master.json") as fp: - data["master"] = json.load(fp) - - file_path = str(current_path.with_suffix(".md")).replace("create_", "ignore-") - with _open(file_path) as fh: - print( - "".format( - current_path.name, datetime.utcnow() - ), - file=fh, - ) - print("# THE PORTAL Emulator\n", file=fh) - print( - "This pages is for testing purposes for issue [#{1}]({0}{1})\n".format( - ISSUE, 715 - ), - file=fh, - ) - for hostname, url in HOST_URLS_MAPS: - write_list(hostname, url, data.get(hostname, []), fh) - - print("---", file=fh) - - print("# INVITATIONS Samples:", file=fh) - for hostname, url in HOST_URLS_MAPS: - print(f"## urls for @{hostname}", file=fh) - for code in mock_codes: - print( - "- {}".format( - get_invitation_url( - {"code": code, "action": "INVITATION"}, URL(url) - ), - code=code, - ), - file=fh, - ) - - print("", file=fh) - - today: datetime = datetime.today() - file_path = current_path.parent / CONFIRMATIONS_FILENAME - with _open(file_path) as fh: - print("code,user_id,action,data,created_at", file=fh) - for n, code in enumerate(mock_codes, start=1): - print(f'{code},{uid},INVITATION,"{{', file=fh) - print( - f'""tag"": ""invitation-{today.year:04d}{today.month:02d}{today.day:02d}-{n}"" ,', - file=fh, - ) - print('""issuer"" : ""support@osparc.io"" ,', file=fh) - print(f'""trial_account_days"" : ""{trial_account_days}""', file=fh) - print('}",%s' % datetime.now().isoformat(sep=" "), file=fh) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Generates some material for demos") - parser.add_argument( - "--renew-invitation-codes", - "-c", - action="store_true", - help="Regenerates codes for invitations", - ) - parser.add_argument("--user-id", "-u", default=1) - parser.add_argument("--trial-days", "-t", default=7) - - args = parser.parse_args() - - codes = default_mock_codes - if args.renew_invitation_codes: - codes = [get_random_string(len(c)) for c in default_mock_codes] - - main(codes, uid=args.user_id, trial_account_days=args.trial_days) diff --git a/scripts/demo/template-projects/converter.py b/scripts/demo/template-projects/converter.py deleted file mode 100644 index 43ca3ecafe5..00000000000 --- a/scripts/demo/template-projects/converter.py +++ /dev/null @@ -1,73 +0,0 @@ -import csv -import json -import sys -from pathlib import Path - -from simcore_service_webserver.projects.projects_db_utils import convert_to_schema_names - -SEPARATOR = "," - -current_file = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve() -current_dir = current_file.parent - - -def load_csv(csv_filepath: Path) -> list[dict]: - headers, items = [], [] - with open(csv_filepath, encoding="utf-8-sig") as fhandler: - reader = csv.reader(fhandler, delimiter=",", quotechar='"') - for row in reader: - if row: - if not headers: - headers = row - else: - item = {key: row[i] for i, key in enumerate(headers)} - items.append(item) - return items - - -def load_projects(csv_path: Path): - """Returns schema-compatible projects""" - db_projects = load_csv(csv_path) - _projects = [] - - fake_email = "my.email@osparc.io" - # process - for db_prj in db_projects: - if int(db_prj.get("published", 0) or 0) == 1: - prj = convert_to_schema_names(db_prj, fake_email) - - # jsonifies - dump = prj["workbench"] - # TODO: use Encoder instead? - dump = ( - dump.replace("False", "false") - .replace("True", "true") - .replace("None", "null") - ) - try: - prj["workbench"] = json.loads(dump) - except json.decoder.JSONDecodeError as err: - print(err) - - # TODO: validate against project schema!! - - _projects.append(prj) - else: - print("skipping {}".format(db_prj["name"])) - - return _projects - - -def main(): - """ - Converts csv exported from db into project schema-compatible json files - """ - for db_csv_export in current_dir.glob("template*.csv"): - data_projects = load_projects(db_csv_export) - json_path = db_csv_export.with_suffix(".json") - with open(json_path, "w") as fh: - json.dump(data_projects, fh, indent=2) - - -if __name__ == "__main__": - main() diff --git a/scripts/demo/template-projects/templates_in_master.csv b/scripts/demo/template-projects/templates_in_master.csv deleted file mode 100644 index da6b8673408..00000000000 --- a/scripts/demo/template-projects/templates_in_master.csv +++ /dev/null @@ -1,10 +0,0 @@ -ο»Ώtype,uuid,name,description,thumbnail,prj_owner,creation_date,last_change_date,workbench,published -TEMPLATE,5a6d7f24-ee9a-4112-bca2-85a8ca49234a,ISAN2019: 3D Paraview,3D Paraview viewer with two inputs,https://user-images.githubusercontent.com/33152403/60168939-073a5580-9806-11e9-8dad-8a7caa3eb5ab.png,maiz@itis.swiss,2019-06-06 14:33:43.065,2019-06-06 14:33:44.747,"{""de2578c5-431e-5753-af37-e6aec8120bf2"": {""key"": ""simcore/services/frontend/file-picker"", ""version"": ""1.0.0"", ""label"": ""File Picker 1"", ""inputs"": {}, ""inputNodes"": [], ""outputs"": {""outFile"": {""store"": 1, ""path"": ""Shared Data/HField_Big.vtk""}}, ""progress"": 100, ""thumbnail"": """", ""position"": {""x"": 100, ""y"": 100}}, ""de2578c5-431e-522c-a377-dd8d7cd1265b"": {""key"": ""simcore/services/frontend/file-picker"", ""version"": ""1.0.0"", ""label"": ""File Picker 2"", ""inputs"": {}, ""inputNodes"": [], ""outputs"": {""outFile"": {""store"": 1, ""path"": ""Shared Data/bunny.vtk""}}, ""progress"": 100, ""thumbnail"": """", ""position"": {""x"": 100, ""y"": 250}}, ""de2578c5-431e-9b0f-67677a20996c"": {""key"": ""simcore/services/dynamic/3d-viewer"", ""version"": ""2.10.0"", ""label"": ""3D ParaViewer"", ""inputs"": {""A"": {""nodeUuid"": ""de2578c5-431e-5753-af37-e6aec8120bf2"", ""output"": ""outFile""}, ""B"": {""nodeUuid"": ""de2578c5-431e-522c-a377-dd8d7cd1265b"", ""output"": ""outFile""}}, ""inputNodes"": [""de2578c5-431e-5753-af37-e6aec8120bf2"", ""de2578c5-431e-522c-a377-dd8d7cd1265b""], ""outputs"": {}, ""progress"": 85, ""thumbnail"": """", ""position"": {""x"": 400, ""y"": 175}}}",1 -TEMPLATE,ed6c2f58-dc16-445d-bb97-e989e2611603,Sleepers,5 sleepers interconnected,"",maiz@itis.swiss,2019-06-06 14:34:19.631,2019-06-06 14:34:28.647,"{""027e3ff9-3119-45dd-b8a2-2e31661a7385"": {""key"": ""simcore/services/comp/itis/sleeper"", ""version"": ""1.0.0"", ""label"": ""sleeper 0"", ""inputs"": {""in_2"": 2}, ""inputAccess"": {""in_1"": ""Invisible"", ""in_2"": ""ReadOnly""}, ""inputNodes"": [], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 50, ""y"": 300}}, ""562aaea9-95ff-46f3-8e84-db8f3c9e3a39"": {""key"": ""simcore/services/comp/itis/sleeper"", ""version"": ""1.0.0"", ""label"": ""sleeper 1"", ""inputs"": {""in_1"": {""nodeUuid"": ""027e3ff9-3119-45dd-b8a2-2e31661a7385"", ""output"": ""out_1""}, ""in_2"": 2}, ""inputNodes"": [""027e3ff9-3119-45dd-b8a2-2e31661a7385""], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 300, ""y"": 200}}, ""bf405067-d168-44ba-b6dc-bb3e08542f92"": {""key"": ""simcore/services/comp/itis/sleeper"", ""version"": ""1.0.0"", ""label"": ""sleeper 2"", ""inputs"": {""in_1"": {""nodeUuid"": ""562aaea9-95ff-46f3-8e84-db8f3c9e3a39"", ""output"": ""out_1""}, ""in_2"": {""nodeUuid"": ""562aaea9-95ff-46f3-8e84-db8f3c9e3a39"", ""output"": ""out_2""}}, ""inputNodes"": [""562aaea9-95ff-46f3-8e84-db8f3c9e3a39""], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 550, ""y"": 200}}, ""de2578c5-431e-5065-a079-a5a0476e3c10"": {""key"": ""simcore/services/comp/itis/sleeper"", ""version"": ""1.0.0"", ""label"": ""sleeper 3"", ""inputs"": {""in_2"": {""nodeUuid"": ""027e3ff9-3119-45dd-b8a2-2e31661a7385"", ""output"": ""out_2""}}, ""inputNodes"": [""027e3ff9-3119-45dd-b8a2-2e31661a7385""], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 420, ""y"": 400}}, ""de2578c5-431e-559d-aa19-dc9293e10e4c"": {""key"": ""simcore/services/comp/itis/sleeper"", ""version"": ""1.0.0"", ""label"": ""sleeper 4"", ""inputs"": {""in_1"": {""nodeUuid"": ""bf405067-d168-44ba-b6dc-bb3e08542f92"", ""output"": ""out_1""}, ""in_2"": {""nodeUuid"": ""de2578c5-431e-5065-a079-a5a0476e3c10"", ""output"": ""out_2""}}, ""inputNodes"": [""bf405067-d168-44ba-b6dc-bb3e08542f92"", ""de2578c5-431e-5065-a079-a5a0476e3c10""], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 800, ""y"": 300}}}", -TEMPLATE,03a1eb8d-5f86-4cb5-8397-8bc50e9a5ccc,Kember use case,Kember Cordiac Model with PostPro Viewer,"",maiz@itis.swiss,2019-06-06 14:34:41.832,2019-06-06 14:34:44.981,"{""de2578c5-431e-5f7f-af9c-e91b64c4989e"": {""key"": ""simcore/services/comp/kember-cardiac-model"", ""version"": ""1.0.0"", ""label"": ""Kember cardiac model"", ""inputs"": {""dt"": 0.01, ""T"": 1000, ""forcing_factor"": 0}, ""inputNodes"": [], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 50, ""y"": 100}}, ""de2578c5-431e-50b8-8ae7-b2a10131cc8f"": {""key"": ""simcore/services/dynamic/kember-viewer"", ""version"": ""2.9.0"", ""label"": ""kember-viewer"", ""inputs"": {""outputController"": {""nodeUuid"": ""de2578c5-431e-5f7f-af9c-e91b64c4989e"", ""output"": ""out_1""}}, ""inputNodes"": [""de2578c5-431e-5f7f-af9c-e91b64c4989e""], ""outputs"": {}, ""progress"": 10, ""thumbnail"": """", ""position"": {""x"": 300, ""y"": 100}}}", -TEMPLATE,194bb264-a717-11e9-9dff-02420aff2767,ISAN: osparc-opencor,"We are using the Fabbri et al (2017) sinoatrial cell model: https://models.physiomeproject.org/e/568 - -The model includes autonomic modulation via inclusion of the effects of ACh on If, ICaL, SR calcium uptake, and IK,ACh; and the effect of isoprenaline on If, ICaL, INaK, maximal Ca uptake, and IKs. We are varying the concentration of ACh according to the stimulation level, while isoprenaline is encoded to be ""on"" or ""off"" only (we use the ""on"" version in this exemplar). The range of ACh we're allowing is beyond what has been presented in the paper.",https://user-images.githubusercontent.com/33152403/61133437-be4cf700-a4bd-11e9-8b2a-c6425e15abea.png,crespo@itis.swiss,2019-07-15 15:42:06.208,2019-07-15 15:42:06.208,"{""f631a142-d3b6-435d-abfb-8ad4acb91a70"": {""key"": ""simcore/services/comp/osparc-opencor"", ""version"": ""0.3.0"", ""label"": ""osparc-opencor"", ""inputs"": {""stimulation_mode"": ""{{stimulation_mode}}"", ""stimulation_level"": ""{{stimulation_level}}""}, ""inputNodes"": [], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 587, ""y"": 279}}, ""744c9209-0450-4272-8357-4a08cf7f8458"": {""key"": ""simcore/services/dynamic/raw-graphs"", ""version"": ""2.10.2"", ""label"": ""2D plot"", ""inputs"": {""input_1"": {""nodeUuid"": ""f631a142-d3b6-435d-abfb-8ad4acb91a70"", ""output"": ""membrane_potential_csv""}}, ""inputNodes"": [""f631a142-d3b6-435d-abfb-8ad4acb91a70""], ""outputs"": {}, ""progress"": 100, ""thumbnail"": """", ""position"": {""x"": 933, ""y"": 228}}}",1 -TEMPLATE,de2578c5-431e-5716-bedd-b409bb021760,ISAN: 2D Plot,2D RawGraphs viewer with one input,https://user-images.githubusercontent.com/33152403/60168938-06a1bf00-9806-11e9-99ff-20c52d851add.png,maiz@itis.swiss,2019-06-06 14:33:34.008,2019-06-06 14:33:35.825,"{""de2578c5-431e-58e5-884e-2690b3d54f11"": {""key"": ""simcore/services/frontend/file-picker"", ""version"": ""1.0.0"", ""label"": ""File Picker"", ""inputs"": {}, ""inputNodes"": [], ""outputs"": {""outFile"": {""store"": 1, ""path"": ""Shared Data/Height-Weight""}}, ""progress"": 100, ""thumbnail"": """", ""position"": {""x"": 100, ""y"": 100}}, ""de2578c5-431e-58a5-83d7-57d0feffbfea"": {""key"": ""simcore/services/dynamic/raw-graphs"", ""version"": ""2.8.0"", ""label"": ""2D plot"", ""inputs"": {""input_1"": {""nodeUuid"": ""de2578c5-431e-58e5-884e-2690b3d54f11"", ""output"": ""outFile""}}, ""inputNodes"": [""de2578c5-431e-58e5-884e-2690b3d54f11""], ""outputs"": {}, ""progress"": 90, ""thumbnail"": """", ""position"": {""x"": 400, ""y"": 100}}}",1 -TEMPLATE,de2578c5-431e-5d82-b08d-d39c436ca738,ISAN: UCDavis use case: 0D,Colleen Clancy Single Cell solver with a file picker and PostPro viewer,https://user-images.githubusercontent.com/33152403/60168940-073a5580-9806-11e9-9a44-ae5266eeb020.png,maiz@itis.swiss,2019-06-06 14:33:51.94,2019-06-06 14:33:54.329,"{""de2578c5-431e-59d6-b1a5-6e7b2773636b"": {""key"": ""simcore/services/frontend/file-picker"", ""version"": ""1.0.0"", ""label"": ""File Picker 0D"", ""inputs"": {}, ""inputNodes"": [], ""outputs"": {""outFile"": {""store"": 1, ""path"": ""Shared Data/initial_WStates""}}, ""progress"": 100, ""thumbnail"": """", ""position"": {""x"": 50, ""y"": 150}}, ""de2578c5-431e-562f-afd1-cca5105c8844"": {""key"": ""simcore/services/comp/ucdavis-singlecell-cardiac-model"", ""version"": ""1.0.0"", ""label"": ""DBP-Clancy-Rabbit-Single-Cell solver"", ""inputs"": {""Na"": 0, ""Kr"": 0, ""BCL"": 200, ""NBeats"": 5, ""Ligand"": 0, ""cAMKII"": ""WT"", ""initfile"": {""nodeUuid"": ""de2578c5-431e-59d6-b1a5-6e7b2773636b"", ""output"": ""outFile""}}, ""inputAccess"": {""Na"": ""ReadAndWrite"", ""Kr"": ""ReadOnly"", ""BCL"": ""ReadAndWrite"", ""NBeats"": ""ReadOnly"", ""Ligand"": ""Invisible"", ""cAMKII"": ""Invisible""}, ""inputNodes"": [""de2578c5-431e-59d6-b1a5-6e7b2773636b""], ""outputs"": {}, ""progress"": 0, ""thumbnail"": """", ""position"": {""x"": 300, ""y"": 150}}, ""de2578c5-431e-5fdd-9daa-cb03c51d8138"": {""key"": ""simcore/services/dynamic/cc-0d-viewer"", ""version"": ""2.8.0"", ""label"": ""cc-0d-viewer"", ""inputs"": {""vm_1Hz"": {""nodeUuid"": ""de2578c5-431e-562f-afd1-cca5105c8844"", ""output"": ""out_4""}, ""all_results_1Hz"": {""nodeUuid"": ""de2578c5-431e-562f-afd1-cca5105c8844"", ""output"": ""out_1""}}, ""inputNodes"": [""de2578c5-431e-562f-afd1-cca5105c8844""], ""outputs"": {}, ""progress"": 20, ""thumbnail"": """", ""position"": {""x"": 550, ""y"": 150}}}",1 -TEMPLATE,de2578c5-431e-5a9e-9580-c53d92d18803,ISAN: MattWard use case,MattWard Solver/PostPro viewer,https://user-images.githubusercontent.com/33152403/60168942-073a5580-9806-11e9-9162-3683dcff0711.png,maiz@itis.swiss,2019-06-06 14:33:58.681,2019-06-06 14:34:01.617,"{""de2578c5-431e-523c-8caa-4ca36c927ca2"": {""key"": ""simcore/services/dynamic/mattward-viewer"", ""version"": ""2.9.0"", ""label"": ""MattWard"", ""inputs"": {}, ""inputNodes"": [], ""outputs"": {}, ""progress"": 55, ""thumbnail"": """", ""position"": {""x"": 100, ""y"": 100}}}",1 diff --git a/scripts/demo/template-projects/templates_in_master.json b/scripts/demo/template-projects/templates_in_master.json deleted file mode 100644 index dd899d50169..00000000000 --- a/scripts/demo/template-projects/templates_in_master.json +++ /dev/null @@ -1,281 +0,0 @@ -[ - { - "uuid": "5a6d7f24-ee9a-4112-bca2-85a8ca49234a", - "name": "ISAN2019: 3D Paraview", - "description": "3D Paraview viewer with two inputs", - "thumbnail": "https://user-images.githubusercontent.com/33152403/60168939-073a5580-9806-11e9-8dad-8a7caa3eb5ab.png", - "prjOwner": "maiz@itis.swiss", - "creationDate": "2019-06-06 14:33:43.065", - "lastChangeDate": "2019-06-06 14:33:44.747", - "workbench": { - "de2578c5-431e-5753-af37-e6aec8120bf2": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "File Picker 1", - "inputs": {}, - "inputNodes": [], - "outputs": { - "outFile": { - "store": 1, - "path": "Shared Data/HField_Big.vtk" - } - }, - "progress": 100, - "position": { - "x": 100, - "y": 100 - } - }, - "de2578c5-431e-522c-a377-dd8d7cd1265b": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "File Picker 2", - "inputs": {}, - "inputNodes": [], - "outputs": { - "outFile": { - "store": 1, - "path": "Shared Data/bunny.vtk" - } - }, - "progress": 100, - "position": { - "x": 100, - "y": 250 - } - }, - "de2578c5-431e-9b0f-67677a20996c": { - "key": "simcore/services/dynamic/3d-viewer", - "version": "2.10.0", - "label": "3D ParaViewer", - "inputs": { - "A": { - "nodeUuid": "de2578c5-431e-5753-af37-e6aec8120bf2", - "output": "outFile" - }, - "B": { - "nodeUuid": "de2578c5-431e-522c-a377-dd8d7cd1265b", - "output": "outFile" - } - }, - "inputNodes": [ - "de2578c5-431e-5753-af37-e6aec8120bf2", - "de2578c5-431e-522c-a377-dd8d7cd1265b" - ], - "outputs": {}, - "progress": 85, - "position": { - "x": 400, - "y": 175 - } - } - } - }, - { - "uuid": "194bb264-a717-11e9-9dff-02420aff2767", - "name": "ISAN: osparc-opencor", - "description": "We are using the Fabbri et al (2017) sinoatrial cell model: https://models.physiomeproject.org/e/568\n\nThe model includes autonomic modulation via inclusion of the effects of ACh on If, ICaL, SR calcium uptake, and IK,ACh; and the effect of isoprenaline on If, ICaL, INaK, maximal Ca uptake, and IKs. We are varying the concentration of ACh according to the stimulation level, while isoprenaline is encoded to be \"on\" or \"off\" only (we use the \"on\" version in this exemplar). The range of ACh we're allowing is beyond what has been presented in the paper.", - "thumbnail": "https://user-images.githubusercontent.com/33152403/61133437-be4cf700-a4bd-11e9-8b2a-c6425e15abea.png", - "prjOwner": "crespo@itis.swiss", - "creationDate": "2019-07-15 15:42:06.208", - "lastChangeDate": "2019-07-15 15:42:06.208", - "workbench": { - "f631a142-d3b6-435d-abfb-8ad4acb91a70": { - "key": "simcore/services/comp/osparc-opencor", - "version": "0.3.0", - "label": "osparc-opencor", - "inputs": { - "stimulation_mode": "{{stimulation_mode}}", - "stimulation_level": "{{stimulation_level}}" - }, - "inputNodes": [], - "outputs": {}, - "progress": 0, - "position": { - "x": 587, - "y": 279 - } - }, - "744c9209-0450-4272-8357-4a08cf7f8458": { - "key": "simcore/services/dynamic/raw-graphs", - "version": "2.10.2", - "label": "2D plot", - "inputs": { - "input_1": { - "nodeUuid": "f631a142-d3b6-435d-abfb-8ad4acb91a70", - "output": "membrane_potential_csv" - } - }, - "inputNodes": [ - "f631a142-d3b6-435d-abfb-8ad4acb91a70" - ], - "outputs": {}, - "progress": 100, - "position": { - "x": 933, - "y": 228 - } - } - } - }, - { - "uuid": "de2578c5-431e-5716-bedd-b409bb021760", - "name": "ISAN: 2D Plot", - "description": "2D RawGraphs viewer with one input", - "thumbnail": "https://user-images.githubusercontent.com/33152403/60168938-06a1bf00-9806-11e9-99ff-20c52d851add.png", - "prjOwner": "maiz@itis.swiss", - "creationDate": "2019-06-06 14:33:34.008", - "lastChangeDate": "2019-06-06 14:33:35.825", - "workbench": { - "de2578c5-431e-58e5-884e-2690b3d54f11": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "File Picker", - "inputs": {}, - "inputNodes": [], - "outputs": { - "outFile": { - "store": 1, - "path": "Shared Data/Height-Weight" - } - }, - "progress": 100, - "position": { - "x": 100, - "y": 100 - } - }, - "de2578c5-431e-58a5-83d7-57d0feffbfea": { - "key": "simcore/services/dynamic/raw-graphs", - "version": "2.8.0", - "label": "2D plot", - "inputs": { - "input_1": { - "nodeUuid": "de2578c5-431e-58e5-884e-2690b3d54f11", - "output": "outFile" - } - }, - "inputNodes": [ - "de2578c5-431e-58e5-884e-2690b3d54f11" - ], - "outputs": {}, - "progress": 90, - "position": { - "x": 400, - "y": 100 - } - } - } - }, - { - "uuid": "de2578c5-431e-5d82-b08d-d39c436ca738", - "name": "ISAN: UCDavis use case: 0D", - "description": "Colleen Clancy Single Cell solver with a file picker and PostPro viewer", - "thumbnail": "https://user-images.githubusercontent.com/33152403/60168940-073a5580-9806-11e9-9a44-ae5266eeb020.png", - "prjOwner": "maiz@itis.swiss", - "creationDate": "2019-06-06 14:33:51.94", - "lastChangeDate": "2019-06-06 14:33:54.329", - "workbench": { - "de2578c5-431e-59d6-b1a5-6e7b2773636b": { - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "label": "File Picker 0D", - "inputs": {}, - "inputNodes": [], - "outputs": { - "outFile": { - "store": 1, - "path": "Shared Data/initial_WStates" - } - }, - "progress": 100, - "position": { - "x": 50, - "y": 150 - } - }, - "de2578c5-431e-562f-afd1-cca5105c8844": { - "key": "simcore/services/comp/ucdavis-singlecell-cardiac-model", - "version": "1.0.0", - "label": "DBP-Clancy-Rabbit-Single-Cell solver", - "inputs": { - "Na": 0, - "Kr": 0, - "BCL": 200, - "NBeats": 5, - "Ligand": 0, - "cAMKII": "WT", - "initfile": { - "nodeUuid": "de2578c5-431e-59d6-b1a5-6e7b2773636b", - "output": "outFile" - } - }, - "inputAccess": { - "Na": "ReadAndWrite", - "Kr": "ReadOnly", - "BCL": "ReadAndWrite", - "NBeats": "ReadOnly", - "Ligand": "Invisible", - "cAMKII": "Invisible" - }, - "inputNodes": [ - "de2578c5-431e-59d6-b1a5-6e7b2773636b" - ], - "outputs": {}, - "progress": 0, - "position": { - "x": 300, - "y": 150 - } - }, - "de2578c5-431e-5fdd-9daa-cb03c51d8138": { - "key": "simcore/services/dynamic/cc-0d-viewer", - "version": "2.8.0", - "label": "cc-0d-viewer", - "inputs": { - "vm_1Hz": { - "nodeUuid": "de2578c5-431e-562f-afd1-cca5105c8844", - "output": "out_4" - }, - "all_results_1Hz": { - "nodeUuid": "de2578c5-431e-562f-afd1-cca5105c8844", - "output": "out_1" - } - }, - "inputNodes": [ - "de2578c5-431e-562f-afd1-cca5105c8844" - ], - "outputs": {}, - "progress": 20, - "position": { - "x": 550, - "y": 150 - } - } - } - }, - { - "uuid": "de2578c5-431e-5a9e-9580-c53d92d18803", - "name": "ISAN: MattWard use case", - "description": "MattWard Solver/PostPro viewer", - "thumbnail": "https://user-images.githubusercontent.com/33152403/60168942-073a5580-9806-11e9-9162-3683dcff0711.png", - "prjOwner": "maiz@itis.swiss", - "creationDate": "2019-06-06 14:33:58.681", - "lastChangeDate": "2019-06-06 14:34:01.617", - "workbench": { - "de2578c5-431e-523c-8caa-4ca36c927ca2": { - "key": "simcore/services/dynamic/mattward-viewer", - "version": "2.9.0", - "label": "MattWard", - "inputs": {}, - "inputNodes": [], - "outputs": {}, - "progress": 55, - "position": { - "x": 100, - "y": 100 - } - } - } - } -] diff --git a/scripts/demo/templates/invitation-codes-{{ deploy }}-{{ datestamp }}.md.jinja2 b/scripts/demo/templates/invitation-codes-{{ deploy }}-{{ datestamp }}.md.jinja2 deleted file mode 100644 index f7e9306f125..00000000000 --- a/scripts/demo/templates/invitation-codes-{{ deploy }}-{{ datestamp }}.md.jinja2 +++ /dev/null @@ -1,22 +0,0 @@ -# Invitations for {{ deploy }} - - -{% for url in invitation_urls %} - 1. {url} -{% endfor %} - - -Every invitation can be identified in the data column as - -```json -{ - "guest": "invitation-{{ datestamp }}-${NUMBER}" , - "issuer" : "{{ issuer_email }}" -} -``` - -These invitations **will expire on {{ datetime.now() + valid_lifetime }}** if they are not renovated - ---- - -Generated with {{ current_program }} by {{ issuer_email }} on {{ datetime.now() }} diff --git a/scripts/docker/docker-compose-config.bash b/scripts/docker/docker-compose-config.bash deleted file mode 100755 index a9327b18745..00000000000 --- a/scripts/docker/docker-compose-config.bash +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash -# generated using chatgpt -show_info() { - local message="$1" - echo -e "\e[37mInfo:\e[0m $message" >&2 -} - -show_warning() { - local message="$1" - echo -e "\e[31mWarning:\e[0m $message" >&2 -} - -show_error() { - local message="$1" - echo -e "\e[31mError:\e[0m $message" >&2 -} - - -env_file=".env" -project_directory="" -# Parse command line arguments -while getopts ":e:p:" opt; do - case $opt in - e) - env_file="$OPTARG" - ;; - p) - project_directory="$OPTARG" - ;; - \?) - show_error "Invalid option: -$OPTARG" - exit 1 - ;; - :) - show_error "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done -shift $((OPTIND-1)) - -if [[ "$#" -eq 0 ]]; then - show_error "No compose files specified!" - exit 1 -fi - -# REFERENCE: https://github.com/docker/compose/issues/9306 -# composeV2 defines specifications for docker compose to run -# they are not 100% compatible with what docker stack deploy command expects -# some parts have to be modified - - - - -# check if docker-compose V2 is available -if docker compose version --short | grep --quiet "^2\." ; then - show_info "Running compose V2" - # V2 does not write the version anymore, so we take it from the first compose file - first_compose_file="${1}" - version=$(grep --max-count=1 "^version:" "${first_compose_file}" | cut --delimiter=' ' --fields=2 | tr --delete \"\') - if [[ -z "$version" ]]; then - version="3.9" # Default to 3.9 if version is not found in file - fi - - docker_command="\ -docker \ ---log-level=ERROR \ -compose \ ---env-file ${env_file}" - - if [ "$project_directory" ]; then - docker_command+=" --project-directory ${project_directory}" - fi - - for compose_file_path in "$@" - do - docker_command+=" --file=${compose_file_path}" - done - docker_command+="\ - config \ -| sed '/published:/s/\"//g' \ -| sed '/size:/s/\"//g' \ -| sed '1 { /name:.*/d ; }' \ -| sed '1 i version: \"${version}\"' \ -| sed --regexp-extended 's/cpus: ([0-9\\.]+)/cpus: \"\\1\"/'" - - # Execute the command - show_info "Executing Docker command: ${docker_command}" - eval ${docker_command} -else - show_warning "docker compose V2 is not available, trying V1 instead... please update your docker engine." - if docker-compose version --short | grep --quiet "^1\." ; then - show_info "Running compose V1" - docker_command="\ -docker-compose \ ---log-level=ERROR \ ---env-file ${env_file}" - for compose_file_path in "$@" - do - docker_command+=" --file=${compose_file_path} " - done - if [ "$project_directory" ]; then - docker_command+=" --project-directory ${project_directory}" - fi - docker_command+=" \ -config \ -| sed --regexp-extended 's/cpus: ([0-9\\.]+)/cpus: \"\\1\"/'" - # Execute the command - show_info "Executing Docker command: ${docker_command}" - eval ${docker_command} - else - show_error "docker-compose V1 is not available. It is impossible to run this script!" - exit 1 - fi -fi diff --git a/scripts/docker/docker-stack-config.bash b/scripts/docker/docker-stack-config.bash new file mode 100755 index 00000000000..b41f6361d4b --- /dev/null +++ b/scripts/docker/docker-stack-config.bash @@ -0,0 +1,70 @@ +#!/bin/bash +# generated using chatgpt +show_info() { + local message="$1" + echo -e "\e[37mInfo:\e[0m $message" >&2 +} + +show_warning() { + local message="$1" + echo -e "\e[31mWarning:\e[0m $message" >&2 +} + +show_error() { + local message="$1" + echo -e "\e[31mError:\e[0m $message" >&2 +} + +env_file=".env" +# Parse command line arguments +while getopts ":e:" opt; do + case $opt in + e) + env_file="$OPTARG" + ;; + \?) + show_error "Invalid option: -$OPTARG" + exit 1 + ;; + :) + show_error "Option -$OPTARG requires an argument." + exit 1 + ;; + esac +done +shift $((OPTIND - 1)) + +if [[ "$#" -eq 0 ]]; then + show_error "No compose files specified!" + exit 1 +fi + +# Check if Docker version is greater than or equal to 24.0.9 +version_check=$(docker --version | grep --extended-regexp --only-matching '[0-9]+\.[0-9]+\.[0-9]+') +IFS='.' read -r -a version_parts <<<"$version_check" + +if [[ "${version_parts[0]}" -gt 24 ]] || + { [[ "${version_parts[0]}" -eq 24 ]] && [[ "${version_parts[1]}" -gt 0 ]]; } || + { [[ "${version_parts[0]}" -eq 24 ]] && [[ "${version_parts[1]}" -eq 0 ]] && [[ "${version_parts[2]}" -ge 9 ]]; }; then + show_info "Running Docker version $version_check" +else + show_error "Docker version 25.0.3 or higher is required." + exit 1 +fi + +# shellcheck disable=SC2002 +docker_command="\ +set -o allexport && \ +. ${env_file} && set +o allexport && \ +docker stack config" + +for compose_file_path in "$@"; do + docker_command+=" --compose-file ${compose_file_path}" +done +# WE CANNOT DO THIS: +# docker_command+=" --skip-interpolation" +# because docker stack compose will *validate* that e.g. `replicas: ${SIMCORE_SERVICES_POSTGRES_REPLICAS}` is a valid number, which it is not if it is read as a literal string. + +# Execute the command +show_info "Executing Docker command: ${docker_command}" +eval "${docker_command}" diff --git a/scripts/docker/healthcheck_curl_host.py b/scripts/docker/healthcheck_curl_host.py old mode 100644 new mode 100755 index 5442cc8d206..9ed5cda90dd --- a/scripts/docker/healthcheck_curl_host.py +++ b/scripts/docker/healthcheck_curl_host.py @@ -6,7 +6,8 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` diff --git a/scripts/download-deployed-webserver-settings.bash b/scripts/download-deployed-webserver-settings.bash new file mode 100755 index 00000000000..554f15f4556 --- /dev/null +++ b/scripts/download-deployed-webserver-settings.bash @@ -0,0 +1,14 @@ +#!/bin/bash + +# all containers running on the image `local/webserver:production` +containers=$(docker ps -a --filter "status=running" --filter "ancestor=local/webserver:production" --format "{{.ID}}") + +for container_id in $containers +do + # Get the name of the container + container_name=$(docker inspect -f '{{.Name}}' "$container_id" | cut -c 2-) + + # Execute the command in the container to create settings.json + docker exec "$container_id" simcore-service-webserver settings --as-json > "${container_name}-settings.ignore.json" + +done diff --git a/scripts/echo_services_markdown.py b/scripts/echo_services_markdown.py new file mode 100755 index 00000000000..0a0ebb2dadf --- /dev/null +++ b/scripts/echo_services_markdown.py @@ -0,0 +1,141 @@ +#!/bin/env python +"""Usage + +cd osparc-simcore +./scripts/echo_services_markdown.py >services.md +""" + +import itertools +import sys +from collections.abc import Iterable +from datetime import datetime +from operator import attrgetter +from pathlib import Path +from typing import Final, NamedTuple + +CURRENT_FILE = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve() +CURRENT_DIR = CURRENT_FILE.parent + +_URL_PREFIX: Final[str] = ( + "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master" +) + +_REDOC_URL_PREFIX: Final[str] = f"https://redocly.github.io/redoc/?url={_URL_PREFIX}" +_SWAGGER_URL_PREFIX: Final[str] = f"https://petstore.swagger.io/?url={_URL_PREFIX}" + + +class CaptureTuple(NamedTuple): + service_name: str + file_path: Path + + +_service_names_aliases: dict[str, str] = { + "web": "webserver", +} + + +def generate_markdown_table( + *captured_files: Iterable[CaptureTuple], +) -> str: + title = ("Name", "Files", " ") + num_cols = len(title) + lines = ["-" * 10] * num_cols + + def _to_row_data(values: Iterable) -> list[str]: + row = list(map(str, values)) + assert len(row) == num_cols, f"len({row=}) != {num_cols=}" + return row + + rows = [ + _to_row_data(title), + _to_row_data(lines), + ] + + found = itertools.groupby( + sorted(itertools.chain(*captured_files), key=attrgetter("service_name")), + key=attrgetter("service_name"), + ) + + for name, service_files in found: + rows.append( + _to_row_data( + ( + f"**{name.upper()}**", + "", + "", + ) + ) + ) + for _, file_path in service_files: + linked_path = f"[{file_path}](./{file_path})" + + # SEE https://shields.io/badges + badges = [] + + if file_path.stem.lower() == "dockerfile": + repo = _service_names_aliases.get(f"{name}") or name + badges = [ + f"[![Docker Image Size](https://img.shields.io/docker/image-size/itisfoundation/{repo})](https://hub.docker.com/r/itisfoundation/{repo}/tags)" + ] + + elif file_path.stem.lower() == "openapi": + badges = [ + f"[![ReDoc](https://img.shields.io/badge/OpenAPI-ReDoc-85ea2d?logo=openapiinitiative)]({_REDOC_URL_PREFIX}/{file_path}) " + f"[![Swagger UI](https://img.shields.io/badge/OpenAPI-Swagger_UI-85ea2d?logo=swagger)]({_SWAGGER_URL_PREFIX}/{file_path})", + ] + + rows.append( + _to_row_data( + ( + "", + linked_path, + " ".join(badges), + ) + ) + ) + rows.append(_to_row_data(["" * 10] * num_cols)) + + # converts to markdown table + return "\n".join(f"| {'|'.join(r)} |" for r in rows) + + +if __name__ == "__main__": + + repo_base_path = CURRENT_DIR.parent.resolve() + services_path = repo_base_path / "services" + + def _to_tuple(file: Path): + return CaptureTuple( + f"{file.relative_to(services_path).parents[-2]}", + file.relative_to(repo_base_path), + ) + + def _is_hidden(file: Path) -> bool: + return any(p.name.startswith(".") for p in file.parents) + + dockerfiles_found = ( + _to_tuple(file) + for file in services_path.rglob("Dockerfile") + if not _is_hidden(file) + ) + + openapi_files_found = ( + _to_tuple(file) + for file in services_path.rglob("openapi.*") + if file.suffix in {".json", ".yaml", ".yml"} and not _is_hidden(file) + ) + + markdown_table = generate_markdown_table( + openapi_files_found, + dockerfiles_found, + ) + now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + print("# services") + print(">") + print(f"> Auto generated on `{now}` using ") + print("```cmd") + print("cd osparc-simcore") + print(f"python ./{CURRENT_FILE.relative_to(repo_base_path)}") + print("```") + print(markdown_table) diff --git a/scripts/erd.bash b/scripts/erd.bash index c362b2a325c..c1f8568aa4a 100755 --- a/scripts/erd.bash +++ b/scripts/erd.bash @@ -22,7 +22,8 @@ echo ${IMAGE_DIR} build() { echo Building image "${IMAGE_NAME}" at "${IMAGE_DIR}" - docker build \ + docker buildx build \ + --load \ --quiet \ --tag "$IMAGE_NAME" \ "$IMAGE_DIR" diff --git a/scripts/erd/Dockerfile b/scripts/erd/Dockerfile index b66f1b705c1..7bce4e1ff9c 100644 --- a/scripts/erd/Dockerfile +++ b/scripts/erd/Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # # ERD: Entity Relationship Diagrams # @@ -6,8 +7,11 @@ # - https://erdantic.drivendata.org/stable/ # -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:${PYTHON_VERSION}-slim-bookworm AS base RUN apt-get update \ && apt-get -y install --no-install-recommends\ @@ -21,11 +25,14 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install -r requirements.txt diff --git a/scripts/erd/requirements.txt b/scripts/erd/requirements.txt index 9318b18043d..c83302035c5 100644 --- a/scripts/erd/requirements.txt +++ b/scripts/erd/requirements.txt @@ -1,3 +1,5 @@ +setuptools>=45 +packaging>=20.9 eralchemy erdantic pydot diff --git a/scripts/filestash/create_config.py b/scripts/filestash/create_config.py deleted file mode 100644 index cd9476f0722..00000000000 --- a/scripts/filestash/create_config.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Parses the configuration template and injects it where the platform expects it -Notes: -- Admin credentials for filestash are admin:adminadmin -- $ must be escaped with $$ in the template file -""" - -import os -import random -import string -import tempfile - -from distutils.util import strtobool -from pathlib import Path -from string import Template - -SCRIPT_DIR = Path(__file__).resolve().parent -TEMPLATE_PATH = SCRIPT_DIR / "filestash_config.json.template" -CONFIG_JSON = Path(tempfile.mkdtemp()) / "filestash_config.json" - - -def random_secret_key(length: int = 16) -> str: - return "".join(random.choice(string.ascii_letters) for _ in range(length)) - - -def patch_env_vars() -> None: - endpoint = os.environ["S3_ENDPOINT"] - if not endpoint.startswith("http"): - protocol = "https" if strtobool(os.environ["S3_SECURE"].lower()) else "http" - endpoint = f"{protocol}://{endpoint}" - - os.environ["S3_ENDPOINT"] = endpoint - - os.environ["REPLACE_SECRET_KEY"] = random_secret_key() - - -def main() -> None: - patch_env_vars() - - assert TEMPLATE_PATH.exists() - - template_content = TEMPLATE_PATH.read_text() - - config_json = Template(template_content).substitute(os.environ) - - assert CONFIG_JSON.parent.exists() - CONFIG_JSON.write_text(config_json) - - # path of configuration file is exported as env var - print(f"{CONFIG_JSON}") - - -if __name__ == "__main__": - main() diff --git a/scripts/filestash/filestash_config.json.template b/scripts/filestash/filestash_config.json.template deleted file mode 100644 index eaaaa2e2695..00000000000 --- a/scripts/filestash/filestash_config.json.template +++ /dev/null @@ -1,90 +0,0 @@ -{ - "general": { - "name": null, - "port": null, - "host": null, - "secret_key": "$REPLACE_SECRET_KEY", - "force_ssl": null, - "editor": null, - "fork_button": null, - "logout": null, - "display_hidden": null, - "refresh_after_upload": null, - "auto_connect": null, - "upload_button": null, - "upload_pool_size": null, - "filepage_default_view": "list", - "filepage_default_sort": "date", - "cookie_timeout": null, - "custom_css": null - }, - "features": { - "share": { - "enable": null, - "default_access": null, - "redirect": null - }, - "protection": { - "zip_timeout": null, - "enable": null, - "disable_svg": null - }, - "office": { - "enable": null, - "onlyoffice_server": null - }, - "server": { - "console_enable": null, - "tor_enable": null, - "tor_url": null - }, - "syncthing": { - "enable": null, - "server_url": null - }, - "image": { - "enable_image": null, - "thumbnail_size": null, - "thumbnail_quality": null, - "thumbnail_caching": null, - "image_quality": null, - "image_caching": null - }, - "search": { - "explore_timeout": null - }, - "video": { - "blacklist_format": null, - "enable_transcoder": null - } - }, - "log": { - "enable": null, - "level": null, - "telemetry": null - }, - "email": { - "server": null, - "port": null, - "username": null, - "password": null, - "from": null - }, - "auth": { - "admin": "$$2a$$10$$viR16hXd35bAaEJFEgpd9OqIzNBb/VoIsgQ8P3SjKxolpEQEHltrW" - }, - "constant": { - "user": "filestash", - "emacs": true, - "pdftotext": true - }, - "connections": [ - { - "label": "S3", - "type": "s3", - "advanced": true, - "path": "$S3_BUCKET_NAME", - "endpoint": "$S3_ENDPOINT" - } - ] -} diff --git a/scripts/gh.bash b/scripts/gh.bash new file mode 100755 index 00000000000..5f23c6b9f0a --- /dev/null +++ b/scripts/gh.bash @@ -0,0 +1,30 @@ +#!/bin/bash + +# Github CLI (https://cli.github.com/) +# The Dockerfile for generating the image used here is located here: https://github.com/ITISFoundation/osparc-simcore-clients/blob/master/scripts/gh/Dockerfile +# By default the pwd is mounted into the docker container and used as the current working directory +# N.B. For Github actions: Remember to expose GITHUB_TOKEN in your Github workflow .yml file." + +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +IMAGE_NAME=itisfoundation/gh +IMAGE_VERSION=v0 + +USERID=$(id -u) +USER_DIR=$(realpath ~) +GH_TOKEN_FILE=${USER_DIR}/.gh-token + +if [ -v GITHUB_ACTIONS ]; then + gh "$@" +else + if [ ! -f "${GH_TOKEN_FILE}" ]; then + echo "The file '${GH_TOKEN_FILE}' does not exist. To use Gihtub CLI, create '${GH_TOKEN_FILE}' and expose your github token in it as follows:" + echo "GH_TOKEN=" + exit 1 + fi + curdir=/tmp/curdir + docker run --rm --env-file=${GH_TOKEN_FILE} --volume=$(pwd):${curdir} --workdir=${curdir} --user=${USERID}:${USERID}\ + ${IMAGE_NAME}:${IMAGE_VERSION} "$@" +fi diff --git a/scripts/install_7zip.bash b/scripts/install_7zip.bash new file mode 100755 index 00000000000..9f162f9d70c --- /dev/null +++ b/scripts/install_7zip.bash @@ -0,0 +1,35 @@ +#!/bin/bash +# +# Installs 7zip +# + +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes +IFS=$'\n\t' + +TARGETARCH="${TARGETARCH:-amd64}" + +case "${TARGETARCH}" in \ + "amd64") ARCH="x64" ;; \ + "arm64") ARCH="arm64" ;; \ + *) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \ +esac + +SEVEN_ZIP_VERSION="2409" + +echo "create install dir" +rm -rf /tmp/7zip +mkdir -p /tmp/7zip +cd /tmp/7zip + +curl -LO https://www.7-zip.org/a/7z${SEVEN_ZIP_VERSION}-linux-${ARCH}.tar.xz +tar -xvf 7z${SEVEN_ZIP_VERSION}-linux-${ARCH}.tar.xz +cp 7zz /usr/bin/7z + +echo "remove install dir" +rm -rf /tmp/7zip + +echo "test installation" +7z --help diff --git a/scripts/install_rclone.bash b/scripts/install_rclone.bash index 028753aef2b..e6378cdd9b3 100755 --- a/scripts/install_rclone.bash +++ b/scripts/install_rclone.bash @@ -4,14 +4,17 @@ # # http://redsymbol.net/articles/unofficial-bash-strict-mode/ -set -o errexit # abort on nonzero exitstatus -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes IFS=$'\n\t' +R_CLONE_VERSION="1.63.1" +TARGETARCH="${TARGETARCH:-amd64}" -R_CLONE_VERSION="1.62.1" -curl --silent --location --remote-name "https://downloads.rclone.org/v${R_CLONE_VERSION}/rclone-v${R_CLONE_VERSION}-linux-amd64.deb" -dpkg --install "rclone-v${R_CLONE_VERSION}-linux-amd64.deb" -rm "rclone-v${R_CLONE_VERSION}-linux-amd64.deb" +echo "platform ${TARGETARCH}" + +curl --silent --location --remote-name "https://downloads.rclone.org/v${R_CLONE_VERSION}/rclone-v${R_CLONE_VERSION}-linux-${TARGETARCH}.deb" +dpkg --install "rclone-v${R_CLONE_VERSION}-linux-${TARGETARCH}.deb" +rm "rclone-v${R_CLONE_VERSION}-linux-${TARGETARCH}.deb" rclone --version diff --git a/scripts/json-schema-dereference.bash b/scripts/json-schema-dereference.bash index 01859888f1e..9c76cc833e3 100755 --- a/scripts/json-schema-dereference.bash +++ b/scripts/json-schema-dereference.bash @@ -9,7 +9,7 @@ image_name="$(basename $0):latest" # derefernce json-schemas for easy comparison # SEE https://github.com/davidkelley/json-dereference-cli -docker build --tag "$image_name" - < dict[str, Any]: - async with pool.acquire() as conn: - async with conn.cursor() as cursor: - await cursor.execute( - "SELECT uuid, workbench, prj_owner, users.name, users.email" - ' FROM "projects"' - " INNER JOIN users" - " ON projects.prj_owner = users.id" - " WHERE users.role != 'GUEST'" - ) - typer.secho( - f"found {cursor.rowcount} project rows, now getting project with valid node ids..." - ) - project_db_rows = await cursor.fetchall() - project_nodes = { - project_uuid: { - "nodes": list(workbench.keys()), - "owner": prj_owner, - "name": user_name, - "email": user_email, - } - for project_uuid, workbench, prj_owner, user_name, user_email in project_db_rows - if len(workbench) > 0 - } - typer.echo( - f"processed {cursor.rowcount} project rows, found {len(project_nodes)} valid projects." - ) - return project_nodes - - -async def _get_files_from_project_nodes( - pool, project_uuid: str, node_ids: list[str] -) -> set[tuple[str, int, datetime]]: - async with pool.acquire() as conn: - async with conn.cursor() as cursor: - array = str([f"{project_uuid}/{n}%" for n in node_ids]) - await cursor.execute( - "SELECT file_id, file_size, last_modified" - ' FROM "file_meta_data"' - f" WHERE file_meta_data.file_id LIKE any (array{array}) AND location_id = '0'" - ) - - # here we got all the files for that project uuid/node_ids combination - file_rows = await cursor.fetchall() - return { - (file_id, file_size, parser.parse(last_modified or "2000-01-01")) - for file_id, file_size, last_modified in file_rows - } - - -async def _get_all_invalid_files_from_file_meta_data( - pool, -) -> set[tuple[str, int, datetime]]: - async with pool.acquire() as conn: - async with conn.cursor() as cursor: - await cursor.execute( - 'SELECT file_id, file_size, last_modified FROM "file_meta_data" ' - "WHERE file_meta_data.file_size < 1 OR file_meta_data.entity_tag IS NULL" - ) - # here we got all the files for that project uuid/node_ids combination - file_rows = await cursor.fetchall() - return { - (file_id, file_size, parser.parse(last_modified or "2000-01-01")) - for file_id, file_size, last_modified in file_rows - } - - -POWER_LABELS = {0: "B", 1: "KiB", 2: "MiB", 3: "GiB"} -LABELS_POWER = {v: k for k, v in POWER_LABELS.items()} - - -def convert_s3_label_to_bytes(s3_size: str) -> int: - """convert 12MiB to 12 * 1024**2""" - match = re.match(r"([0-9.]+)(\w+)", s3_size) - if match: - return int(float(match.groups()[0]) * 1024 ** LABELS_POWER[match.groups()[1]]) - return -1 - - -async def limited_gather(*tasks, max_concurrency: int): - wrapped_tasks = tasks - if max_concurrency > 0: - semaphore = asyncio.Semaphore(max_concurrency) - - async def sem_task(task): - async with semaphore: - return await task - - wrapped_tasks = [sem_task(t) for t in tasks] - - return await asyncio.gather(*wrapped_tasks, return_exceptions=True) - - -async def _get_files_from_s3_backend( - s3_endpoint: str, - s3_access: str, - s3_secret: str, - s3_bucket: str, - project_uuid: str, - progress, -) -> set[tuple[str, int, datetime]]: - s3_file_entries = set() - try: - # TODO: this could probably run faster if we maintain the client, and run successive commands in there - command = ( - f"docker run --rm " - f"--env MC_HOST_mys3='https://{s3_access}:{s3_secret}@{s3_endpoint}' " - "minio/mc " - f"ls --recursive mys3/{s3_bucket}/{project_uuid}/" - ) - process = await asyncio.create_subprocess_shell( - command, - stdin=asyncio.subprocess.PIPE, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - stdout, _ = await process.communicate() - decoded_stdout = stdout.decode() - if decoded_stdout != b"": - # formatted as: - # [2021-09-07 04:35:49 UTC] 1.5GiB 05e821d1-2b4b-455f-86b6-9e197545c1ad/work.tgz - # DATE_creation? size node_id/file_path.ext - list_of_files = decoded_stdout.split("\n") - for file in list_of_files: - match = re.findall(r"\[(.+)\]\s+(\S+)\s+(.+)", file) - if match: - last_modified, size, node_id_file = match[0] - s3_file_entries.add( - ( - f"{project_uuid}/{node_id_file}", - convert_s3_label_to_bytes(size), - parser.parse(last_modified), - ) - ) - - except subprocess.CalledProcessError: - pass - - progress.update(1) - return s3_file_entries - - -def write_file(file_path: Path, data, fieldnames): - with file_path.open("w", newline="") as csvfile: - csv_writer = csv.writer(csvfile) - csv_writer.writerow(fieldnames) - csv_writer.writerows(data) - - -async def main_async( - postgres_volume_name: str, - postgres_username: str, - postgres_password: str, - s3_endpoint: str, - s3_access: str, - s3_secret: str, - s3_bucket: str, -): - - # ---------------------- GET FILE ENTRIES FROM DB PROKECT TABLE ------------------------------------------------------------- - async with managed_docker_compose( - postgres_volume_name, postgres_username, postgres_password - ): - # now the database is up, let's get all the projects owned by a non-GUEST account - async with aiopg.create_pool( - f"dbname=simcoredb user={postgres_username} password={postgres_password} host=127.0.0.1" - ) as pool: - project_nodes = await _get_projects_nodes(pool) - # Rationale: the project database does not contain all the files in a node (logs, state files are missing) - # Therefore, we will list here all the files that are registered in the file_meta_data table using the same projectid/nodeid - all_sets_of_file_entries = await asyncio.gather( - *[ - _get_files_from_project_nodes(pool, project_uuid, prj_data["nodes"]) - for project_uuid, prj_data in project_nodes.items() - ] - ) - all_invalid_files_in_file_meta_data = ( - await _get_all_invalid_files_from_file_meta_data(pool) - ) - db_file_entries: set[tuple[str, int, datetime]] = set().union( - *all_sets_of_file_entries - ) - db_file_entries_path = Path.cwd() / f"{s3_endpoint}_db_file_entries.csv" - write_file( - db_file_entries_path, db_file_entries, ["file_id", "size", "last modified"] - ) - typer.secho( - f"processed {len(project_nodes)} projects, found {len(db_file_entries)} file entries, saved in {db_file_entries_path}", - fg=typer.colors.YELLOW, - ) - - if all_invalid_files_in_file_meta_data: - db_file_meta_data_invalid_entries_path = ( - Path.cwd() / f"{s3_endpoint}_db_file_meta_data_invalid_entries.csv" - ) - write_file( - db_file_meta_data_invalid_entries_path, - all_invalid_files_in_file_meta_data, - ["file_id", "size", "last modified"], - ) - typer.secho( - f"processed {len(all_invalid_files_in_file_meta_data)} INVALID file entries, saved in {db_file_meta_data_invalid_entries_path}", - fg=typer.colors.YELLOW, - ) - - # ---------------------- GET FILE ENTRIES FROM S3 --------------------------------------------------------------------- - # let's proceed with S3 backend: files are saved in BUCKET_NAME/projectID/nodeID/fileA.ext - # Rationale: Similarly we list here all the files in each of the projects. And it goes faster to list them recursively. - typer.echo( - f"now connecting with S3 backend and getting files for {len(project_nodes)} projects..." - ) - # pull first: prevents _get_files_from_s3_backend from pulling it and poluting outputs - subprocess.run("docker pull minio/mc", shell=True, check=True) - with typer.progressbar(length=len(project_nodes)) as progress: - all_sets_in_s3 = await limited_gather( - *[ - _get_files_from_s3_backend( - s3_endpoint, s3_access, s3_secret, s3_bucket, project_uuid, progress - ) - for project_uuid in project_nodes - ], - max_concurrency=20, - ) - s3_file_entries = set().union(*all_sets_in_s3) - s3_file_entries_path = Path.cwd() / f"{s3_endpoint}_s3_file_entries.csv" - write_file( - s3_file_entries_path, - s3_file_entries, - fieldnames=["file_id", "size", "last_modified"], - ) - typer.echo( - f"processed {len(project_nodes)} projects, found {len(s3_file_entries)} file entries, saved in {s3_file_entries_path}" - ) - - # ---------------------- COMPARISON --------------------------------------------------------------------- - db_file_ids = {db_file_id for db_file_id, _, _ in db_file_entries} - s3_file_ids = {s3_file_id for s3_file_id, _, _ in s3_file_entries} - common_files_uuids = db_file_ids.intersection(s3_file_ids) - s3_missing_files_uuids = db_file_ids.difference(s3_file_ids) - db_missing_files_uuids = s3_file_ids.difference(db_file_ids) - typer.secho( - f"{len(common_files_uuids)} files are the same in both system", - fg=typer.colors.BLUE, - ) - typer.secho( - f"{len(s3_missing_files_uuids)} files are missing in S3", fg=typer.colors.RED - ) - typer.secho( - f"{len(db_missing_files_uuids)} files are missing in DB", fg=typer.colors.RED - ) - - # ------------------ WRITING REPORT -------------------------------------------- - consistent_files_path = Path.cwd() / f"{s3_endpoint}_consistent_files.csv" - s3_missing_files_path = Path.cwd() / f"{s3_endpoint}_s3_missing_files.csv" - db_missing_files_path = Path.cwd() / f"{s3_endpoint}_db_missing_files.csv" - db_file_map: dict[str, tuple[int, datetime]] = { - e[0]: e[1:] for e in db_file_entries - } - - def order_by_owner( - list_of_files_uuids: set[str], - ) -> dict[tuple[str, str, str], list[tuple[str, int, datetime]]]: - files_by_owner = defaultdict(list) - for file_id in list_of_files_uuids: - # project_id/node_id/file - prj_uuid = file_id.split("/")[0] - prj_data = project_nodes[prj_uuid] - files_by_owner[ - ( - prj_data["owner"], - prj_data["name"], - prj_data["email"], - ) - ].append(file_id) - return files_by_owner - - def write_to_file(path: Path, files_by_owner): - with path.open("wt") as fp: - fp.write("owner,name,email,file,size,last_modified\n") - for (owner, name, email), files in files_by_owner.items(): - for file in files: - size, modified = db_file_map.get(file, ("?", "?")) - fp.write(f"{owner},{name},{email},{file}, {size}, {modified}\n") - - write_to_file(consistent_files_path, order_by_owner(common_files_uuids)) - write_to_file(s3_missing_files_path, order_by_owner(s3_missing_files_uuids)) - write_to_file(db_missing_files_path, order_by_owner(db_missing_files_uuids)) - - -def main( - postgres_volume_name: str, - postgres_username: str, - postgres_password: str, - s3_endpoint: str, - s3_access: str, - s3_secret: str, - s3_bucket: str, -): - """Script to check consistency of the file storage backend in oSparc. - - requirements: - - local docker volume containing a database from a deployment (see make import-db-from-docker-volume in /packages/postgres-database) - - 1. From an osparc database, go over all projects, get the project IDs and Node IDs - - 2. From the same database, now get all the files listed like projectID/nodeID from 1. - - 3. We get a list of files that are needed for the current projects - - 4. connect to the S3 backend, check that these files exist - - 5. generate a report with: project uuid, owner, files missing in S3""" - - asyncio.run( - main_async( - postgres_volume_name, - postgres_username, - postgres_password, - s3_endpoint, - s3_access, - s3_secret, - s3_bucket, - ) - ) - - -if __name__ == "__main__": - typer.run(main) diff --git a/scripts/maintenance/computational-clusters/Makefile b/scripts/maintenance/computational-clusters/Makefile new file mode 100644 index 00000000000..3912e29fbb2 --- /dev/null +++ b/scripts/maintenance/computational-clusters/Makefile @@ -0,0 +1,34 @@ +.DEFAULT_GOAL := help + +SHELL := /bin/bash + +PYTHON_VERSION := $(or $(PYTHON), 3.10) + + +.PHONY: hel% +# thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +hel%: + @echo "usage: make [target] ..." + @echo "" + @echo "Targets for '$(notdir $(CURDIR))':" + @echo "" + @awk --posix 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + @echo "" + +.venv: + # creating python virtual environment + @uv venv --python=$(PYTHON_VERSION) + +install: .venv ## installs using $PYTHON_VERSION or uses defaults + # installing package + @uv pip install --python=$(PYTHON_VERSION) . + # now you can call the maintenance scripts + # source .venv/bin/activate + # autoscaled-monitor --deploy-config PATH/TO/REPO.CONFIG summary + +install-dev: .venv ## installs in devel mode using PYTHON_VERSION or uses defaults + # installing package + @uv pip install -e . + # now you can call the maintenance scripts + # source .venv/bin/activate + # autoscaled-monitor --deploy-config PATH/TO/REPO.CONFIG summary diff --git a/scripts/maintenance/computational-clusters/README.md b/scripts/maintenance/computational-clusters/README.md new file mode 100644 index 00000000000..af7e5d95bf0 --- /dev/null +++ b/scripts/maintenance/computational-clusters/README.md @@ -0,0 +1,20 @@ +# summary of autoscaled nodes on a deployment + +```bash +./osparc_clusters.py --help # to print the help +``` + + +# example usage + +```bash +./osparc_clusters.py --repo-config=PATH/TO/DEPLOYX/REPO.CONFIG summary # this will show the current auto-scaled machines in DEPLOYX +``` + +```bash +./osparc_clusters.py --repo-config=PATH/TO/DEPLOYX/REPO.CONFIG --ssh-key-path=PATH/TO/DEPLOYX/SSH_KEY summary # this will show the current auto-scaled machines in DEPLOYX AND also what is running on them +``` + +```bash +./osparc_clusters.py --repo-config=PATH/TO/DEPLOYX/REPO.CONFIG --ssh-key-path=PATH/TO/DEPLOYX/SSH_KEY summary --user-id=XX # this will show the current auto-scaled machines in DEPLOYX AND also what is running on them for user-id XX +``` diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/__init__.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py new file mode 100644 index 00000000000..3a09e12e3dc --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py @@ -0,0 +1,226 @@ +import asyncio +import os +from pathlib import Path +from typing import Annotated, Optional + +import parse +import rich +import typer +from ansible.inventory.manager import InventoryManager +from ansible.parsing.dataloader import DataLoader +from dotenv import dotenv_values + +from . import core as api +from .constants import ( + DEFAULT_COMPUTATIONAL_EC2_FORMAT, + DEFAULT_COMPUTATIONAL_EC2_FORMAT_WORKERS, + DEFAULT_DYNAMIC_EC2_FORMAT, + DEPLOY_SSH_KEY_PARSER, + UNIFIED_SSH_KEY_PARSE, + wallet_id_spec, +) +from .ec2 import autoscaling_ec2_client, cluster_keeper_ec2_client +from .models import AppState, BastionHost + +state: AppState = AppState( + dynamic_parser=parse.compile(DEFAULT_DYNAMIC_EC2_FORMAT), + computational_parser_primary=parse.compile( + DEFAULT_COMPUTATIONAL_EC2_FORMAT, {"wallet_id_spec": wallet_id_spec} + ), + computational_parser_workers=parse.compile( + DEFAULT_COMPUTATIONAL_EC2_FORMAT_WORKERS, {"wallet_id_spec": wallet_id_spec} + ), +) + +app = typer.Typer() + + +def _parse_repo_config(deploy_config: Path) -> dict[str, str | None]: + repo_config = deploy_config / "repo.config" + if not repo_config.exists(): + rich.print( + f"[red]{repo_config} does not exist! Please run `make repo.config` in {deploy_config} to generate it[/red]" + ) + raise typer.Exit(os.EX_DATAERR) + + environment = dotenv_values(repo_config) + + assert environment + return environment + + +def _parse_inventory(deploy_config: Path) -> BastionHost: + inventory_path = deploy_config / "ansible" / "inventory.ini" + if not inventory_path.exists(): + rich.print( + f"[red]{inventory_path} does not exist! Please run `make inventory` in {deploy_config} to generate it[/red]" + ) + raise typer.Exit(os.EX_DATAERR) + + loader = DataLoader() + inventory = InventoryManager(loader=loader, sources=[f"{inventory_path}"]) + + try: + return BastionHost( + ip=inventory.groups["CAULDRON_UNIX"].get_vars()["bastion_ip"], + user_name=inventory.groups["CAULDRON_UNIX"].get_vars()["bastion_user"], + ) + except KeyError as err: + rich.print( + f"[red]{inventory_path} invalid! Unable to find bastion_ip in the inventory file. TIP: Please run `make inventory` in {deploy_config} to generate it[/red]" + ) + raise typer.Exit(os.EX_DATAERR) from err + + +@app.callback() +def main( + deploy_config: Annotated[ + Path, typer.Option(help="path to the deploy configuration") + ], +): + """Manages external clusters""" + + state.deploy_config = deploy_config.expanduser() + assert ( + deploy_config.is_dir() + ), "deploy-config argument is not pointing to a directory!" + state.environment = _parse_repo_config(deploy_config) + state.main_bastion_host = _parse_inventory(deploy_config) + + # connect to ec2s + state.ec2_resource_autoscaling = autoscaling_ec2_client(state) + state.ec2_resource_clusters_keeper = cluster_keeper_ec2_client(state) + + assert state.environment["EC2_INSTANCES_KEY_NAME"] + dynamic_pattern = f"{state.environment['EC2_INSTANCES_NAME_PREFIX']}-{{key_name}}" + state.dynamic_parser = parse.compile(dynamic_pattern) + if state.environment["CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX"]: + state.computational_parser_primary = parse.compile( + rf"{state.environment['CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX'].strip('-')}-{DEFAULT_COMPUTATIONAL_EC2_FORMAT}", + {"wallet_id_spec": wallet_id_spec}, + ) + state.computational_parser_workers = parse.compile( + rf"{state.environment['CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX'].strip('-')}-{DEFAULT_COMPUTATIONAL_EC2_FORMAT_WORKERS}", + {"wallet_id_spec": wallet_id_spec}, + ) + + # locate ssh key path + for file_path in deploy_config.glob("**/*.pem"): + if any(_ in file_path.name for _ in ["license", "pkcs8", "dask"]): + continue + + if DEPLOY_SSH_KEY_PARSER.parse( + f"{file_path.name}" + ) is not None or UNIFIED_SSH_KEY_PARSE.parse(f"{file_path.name}"): + rich.print( + f"will be using following ssh_key_path: {file_path}. " + "TIP: if wrong adapt the code or manually remove some of them." + ) + state.ssh_key_path = file_path + break + if not state.ssh_key_path: + rich.print( + f"[red]could not find ssh key in {deploy_config}! Please run OPS code to generate it[/red]" + ) + raise typer.Exit(1) + + +@app.command() +def summary( + *, + user_id: Annotated[int, typer.Option(help="filters by the user ID")] = 0, + wallet_id: Annotated[int, typer.Option(help="filters by the wallet ID")] = 0, + as_json: Annotated[bool, typer.Option(help="outputs as json")] = False, + output: Annotated[Path | None, typer.Option(help="outputs to a file")] = None, +) -> None: + """Show a summary of the current situation of autoscaled EC2 instances. + + Gives a list of all the instances used for dynamic services, and optionally shows what runs in them. + Gives alist of all the instances used for computational services (e.g. primary + worker(s) instances) + + Arguments: + repo_config -- path that shall point to a repo.config type of file (see osparc-ops-deployment-configuration repository) + + """ + + if not asyncio.run( + api.summary( + state, + user_id or None, + wallet_id or None, + output_json=as_json, + output=output, + ) + ): + raise typer.Exit(1) + + +@app.command() +def cancel_jobs( + user_id: Annotated[int, typer.Option(help="the user ID")], + wallet_id: Annotated[ + Optional[int | None], # noqa: UP007 # typer does not understand | syntax + typer.Option(help="the wallet ID"), + ] = None, + *, + abort_in_db: Annotated[ + bool, + typer.Option( + help="will also force the job to abort in the database (use only if job is in WAITING FOR CLUSTER/WAITING FOR RESOURCE)" + ), + ] = False, +) -> None: + """Cancel jobs from the cluster, this will rely on osparc platform to work properly + The director-v2 should receive the cancellation and abort the concerned pipelines in the next 15 seconds. + NOTE: This should be called prior to clearing jobs on the cluster. + + Keyword Arguments: + user_id -- the user ID + wallet_id -- the wallet ID + abort_in_db -- will also force the job to abort in the database (use only if job is in WAITING FOR CLUSTER/WAITING FOR RESOURCE) + """ + asyncio.run(api.cancel_jobs(state, user_id, wallet_id, abort_in_db=abort_in_db)) + + +@app.command() +def trigger_cluster_termination( + user_id: Annotated[int, typer.Option(help="the user ID")], + wallet_id: Annotated[ + Optional[int | None], # noqa: UP007 # typer does not understand | syntax + typer.Option(help="the wallet ID"), + ] = None, + *, + force: Annotated[bool, typer.Option(help="will not ask for confirmation")] = False, +) -> None: + """this will set the Heartbeat tag on the primary machine to 1 hour, thus ensuring the + clusters-keeper will properly terminate that cluster. + + Keyword Arguments: + user_id -- the user ID + wallet_id -- the wallet ID + force -- will not ask for confirmation (VERY RISKY! USE WITH CAUTION!) + """ + asyncio.run(api.trigger_cluster_termination(state, user_id, wallet_id, force=force)) + + +@app.command() +def check_database_connection() -> None: + """this will check the connection to simcore database is ready""" + asyncio.run(api.check_database_connection(state)) + + +@app.command() +def terminate_dynamic_instances( + user_id: Annotated[int | None, typer.Option(help="the user ID")] = None, + instance_id: Annotated[str | None, typer.Option(help="the instance ID")] = None, + *, + force: Annotated[bool, typer.Option(help="will not ask for confirmation")] = False, +) -> None: + """this will terminate the instance(s) used for the given user or instance ID.""" + asyncio.run( + api.terminate_dynamic_instances(state, user_id, instance_id, force=force) + ) + + +if __name__ == "__main__": + app() diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/constants.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/constants.py new file mode 100644 index 00000000000..6b8b1038205 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/constants.py @@ -0,0 +1,40 @@ +import re +from typing import Final + +import parse +from pydantic import ByteSize + + +@parse.with_pattern(r"None|\d+") +def wallet_id_spec(text) -> None | int: + if text == "None": + return None + return int(text) + + +DEFAULT_COMPUTATIONAL_EC2_FORMAT: Final[ + str +] = r"osparc-computational-cluster-{role}-{swarm_stack_name}-user_id:{user_id:d}-wallet_id:{wallet_id:wallet_id_spec}" +DEFAULT_COMPUTATIONAL_EC2_FORMAT_WORKERS: Final[ + str +] = r"osparc-computational-cluster-{role}-{swarm_stack_name}-user_id:{user_id:d}-wallet_id:{wallet_id:wallet_id_spec}-{key_name}" +DEFAULT_DYNAMIC_EC2_FORMAT: Final[str] = r"osparc-dynamic-autoscaled-worker-{key_name}" +DEPLOY_SSH_KEY_PARSER: Final[parse.Parser] = parse.compile( + r"{prefix}-{random_name}.pem" +) +UNIFIED_SSH_KEY_PARSE: Final[parse.Parser] = parse.compile("sshkey.pem") + +MINUTE: Final[int] = 60 +HOUR: Final[int] = 60 * MINUTE + + +SSH_USER_NAME: Final[str] = "ubuntu" +UNDEFINED_BYTESIZE: Final[ByteSize] = ByteSize(-1) +TASK_CANCEL_EVENT_NAME_TEMPLATE: Final[str] = "cancel_event_{}" + +# NOTE: service_name and service_version are not available on dynamic-sidecar/dynamic-proxies! +DYN_SERVICES_NAMING_CONVENTION: Final[re.Pattern] = re.compile( + r"^dy-(proxy|sidecar)(-|_)(?P.{8}-.{4}-.{4}-.{4}-.{12}).*\t(?P[^\t]+)\t(?P\d+)\t(?P.{8}-.{4}-.{4}-.{4}-.{12})\t(?P[^\t]*)\t(?P.*)$" +) + +DANGER = "[red]{}[/red]" diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py new file mode 100755 index 00000000000..4eff89026dd --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py @@ -0,0 +1,831 @@ +#! /usr/bin/env python3 + +import asyncio +import datetime +import json +from dataclasses import replace +from pathlib import Path + +import arrow +import parse +import rich +import typer +from mypy_boto3_ec2.service_resource import Instance, ServiceResourceInstancesCollection +from mypy_boto3_ec2.type_defs import TagTypeDef +from pydantic import ByteSize, TypeAdapter, ValidationError +from rich.progress import track +from rich.style import Style +from rich.table import Column, Table + +from . import dask, db, ec2, ssh, utils +from .constants import SSH_USER_NAME, UNDEFINED_BYTESIZE +from .models import ( + AppState, + ComputationalCluster, + ComputationalInstance, + ComputationalTask, + DaskTask, + DynamicInstance, + DynamicService, + InstanceRole, + TaskId, + TaskState, +) + + +@utils.to_async +def _parse_computational( + state: AppState, instance: Instance +) -> ComputationalInstance | None: + name = utils.get_instance_name(instance) + if result := ( + state.computational_parser_workers.parse(name) + or state.computational_parser_primary.parse(name) + ): + assert isinstance(result, parse.Result) + + last_heartbeat = utils.get_last_heartbeat(instance) + return ComputationalInstance( + role=InstanceRole(result["role"]), + user_id=result["user_id"], + wallet_id=result["wallet_id"], + name=name, + last_heartbeat=last_heartbeat, + ec2_instance=instance, + disk_space=UNDEFINED_BYTESIZE, + dask_ip="unknown", + ) + + return None + + +def _create_graylog_permalinks( + environment: dict[str, str | None], instance: Instance +) -> str: + # https://monitoring.sim4life.io/graylog/search/6552235211aee4262e7f9f21?q=source%3A%22ip-10-0-1-67%22&rangetype=relative&from=28800 + source_name = instance.private_ip_address.replace(".", "-") + time_span = int( + ( + arrow.utcnow().datetime - instance.launch_time + datetime.timedelta(hours=1) + ).total_seconds() + ) + return f"https://monitoring.{environment['MACHINE_FQDN']}/graylog/search?q=source%3A%22ip-{source_name}%22&rangetype=relative&from={time_span}" + + +def _parse_dynamic(state: AppState, instance: Instance) -> DynamicInstance | None: + name = utils.get_instance_name(instance) + if result := state.dynamic_parser.search(name): + assert isinstance(result, parse.Result) + + return DynamicInstance( + name=name, + ec2_instance=instance, + running_services=[], + disk_space=UNDEFINED_BYTESIZE, + ) + return None + + +def _print_dynamic_instances( + instances: list[DynamicInstance], + environment: dict[str, str | None], + aws_region: str, + output: Path | None, +) -> None: + time_now = arrow.utcnow() + table = Table( + Column("Instance"), + Column( + "Running services", + footer="[red]Intervention detection might show false positive if in transient state, be careful and always double-check!![/red]", + ), + title=f"dynamic autoscaled instances: {aws_region}", + show_footer=True, + padding=(0, 0), + title_style=Style(color="red", encircle=True), + ) + for instance in track( + instances, description="Preparing dynamic autoscaled instances details..." + ): + service_table = "[i]n/a[/i]" + if instance.running_services: + service_table = Table( + "UserID", + "ProjectID", + "NodeID", + "ServiceName", + "ServiceVersion", + "Created Since", + "Need intervention", + expand=True, + padding=(0, 0), + ) + for service in instance.running_services: + service_table.add_row( + f"{service.user_id}", + service.project_id, + service.node_id, + service.service_name, + service.service_version, + utils.timedelta_formatting( + time_now - service.created_at, color_code=True + ), + f"{'[red]' if service.needs_manual_intervention else ''}{service.needs_manual_intervention}{'[/red]' if service.needs_manual_intervention else ''}", + ) + + table.add_row( + "\n".join( + [ + f"{utils.color_encode_with_state(instance.name, instance.ec2_instance)}", + f"ID: {instance.ec2_instance.instance_id}", + f"AMI: {instance.ec2_instance.image_id}", + f"Type: {instance.ec2_instance.instance_type}", + f"Up: {utils.timedelta_formatting(time_now - instance.ec2_instance.launch_time, color_code=True)}", + f"ExtIP: {instance.ec2_instance.public_ip_address}", + f"IntIP: {instance.ec2_instance.private_ip_address}", + f"/mnt/docker(free): {utils.color_encode_with_threshold(instance.disk_space.human_readable(), instance.disk_space, TypeAdapter(ByteSize).validate_python('15Gib'))}", + ] + ), + service_table, + ) + table.add_row( + "Graylog: ", + f"{_create_graylog_permalinks(environment, instance.ec2_instance)}", + end_section=True, + ) + if output: + with output.open("w") as fp: + rich.print(table, flush=True, file=fp) + else: + rich.print(table, flush=True) + + +def _print_computational_clusters( + clusters: list[ComputationalCluster], + environment: dict[str, str | None], + aws_region: str, + output: Path | None, +) -> None: + time_now = arrow.utcnow() + table = Table( + Column("Instance", justify="left", overflow="ellipsis", ratio=1), + Column("Computational details", overflow="fold", ratio=2), + title=f"computational clusters: {aws_region}", + padding=(0, 0), + title_style=Style(color="red", encircle=True), + expand=True, + ) + + for cluster in track( + clusters, "Collecting information about computational clusters..." + ): + cluster_worker_metrics = dask.get_worker_metrics(cluster.scheduler_info) + # first print primary machine info + table.add_row( + "\n".join( + [ + f"[bold]{utils.color_encode_with_state('Primary', cluster.primary.ec2_instance)}", + f"Name: {cluster.primary.name}", + f"ID: {cluster.primary.ec2_instance.id}", + f"AMI: {cluster.primary.ec2_instance.image_id}", + f"Type: {cluster.primary.ec2_instance.instance_type}", + f"Up: {utils.timedelta_formatting(time_now - cluster.primary.ec2_instance.launch_time, color_code=True)}", + f"ExtIP: {cluster.primary.ec2_instance.public_ip_address}", + f"IntIP: {cluster.primary.ec2_instance.private_ip_address}", + f"DaskSchedulerIP: {cluster.primary.dask_ip}", + f"UserID: {cluster.primary.user_id}", + f"WalletID: {cluster.primary.wallet_id}", + f"Heartbeat: {utils.timedelta_formatting(time_now - cluster.primary.last_heartbeat) if cluster.primary.last_heartbeat else 'n/a'}", + f"/mnt/docker(free): {utils.color_encode_with_threshold(cluster.primary.disk_space.human_readable(), cluster.primary.disk_space, TypeAdapter(ByteSize).validate_python('15Gib'))}", + ] + ), + "\n".join( + [ + f"Dask Scheduler UI: http://{cluster.primary.ec2_instance.public_ip_address}:8787", + f"Dask Scheduler TLS: tls://{cluster.primary.ec2_instance.public_ip_address}:8786", + f"Graylog UI: {_create_graylog_permalinks(environment, cluster.primary.ec2_instance)}", + f"Prometheus UI: http://{cluster.primary.ec2_instance.public_ip_address}:9090", + f"tasks: {json.dumps(cluster.task_states_to_tasks, indent=2)}", + ] + ), + ) + + # now add the workers + for index, worker in enumerate(cluster.workers): + worker_dask_metrics = next( + ( + worker_metrics + for worker_name, worker_metrics in cluster_worker_metrics.items() + if worker.dask_ip in worker_name + ), + "no metrics???", + ) + worker_processing_jobs = [ + job_id + for worker_name, job_id in cluster.processing_jobs.items() + if worker.dask_ip in worker_name + ] + table.add_row() + table.add_row( + "\n".join( + [ + f"[italic]{utils.color_encode_with_state(f'Worker {index + 1}', worker.ec2_instance)}[/italic]", + f"Name: {worker.name}", + f"ID: {worker.ec2_instance.id}", + f"AMI: {worker.ec2_instance.image_id}", + f"Type: {worker.ec2_instance.instance_type}", + f"Up: {utils.timedelta_formatting(time_now - worker.ec2_instance.launch_time, color_code=True)}", + f"ExtIP: {worker.ec2_instance.public_ip_address}", + f"IntIP: {worker.ec2_instance.private_ip_address}", + f"DaskWorkerIP: {worker.dask_ip}", + f"/mnt/docker(free): {utils.color_encode_with_threshold(worker.disk_space.human_readable(), worker.disk_space, TypeAdapter(ByteSize).validate_python('15Gib'))}", + "", + ] + ), + "\n".join( + [ + f"Graylog: {_create_graylog_permalinks(environment, worker.ec2_instance)}", + f"Dask metrics: {json.dumps(worker_dask_metrics, indent=2)}", + f"Running tasks: {worker_processing_jobs}", + ] + ), + ) + table.add_row(end_section=True) + if output: + with output.open("a") as fp: + rich.print(table, file=fp) + else: + rich.print(table) + + +async def _fetch_instance_details( + state: AppState, instance: DynamicInstance, ssh_key_path: Path +) -> tuple[list[DynamicService] | BaseException, ByteSize | BaseException]: + # Run both SSH operations concurrently for this instance + running_services, disk_space = await asyncio.gather( + ssh.list_running_dyn_services( + state, + instance.ec2_instance, + SSH_USER_NAME, + ssh_key_path, + ), + ssh.get_available_disk_space( + state, instance.ec2_instance, SSH_USER_NAME, ssh_key_path + ), + return_exceptions=True, + ) + return running_services, disk_space + + +async def _analyze_dynamic_instances_running_services_concurrently( + state: AppState, + dynamic_instances: list[DynamicInstance], + ssh_key_path: Path, + user_id: int | None, +) -> list[DynamicInstance]: + details = await asyncio.gather( + *( + _fetch_instance_details(state, instance, ssh_key_path) + for instance in dynamic_instances + ), + return_exceptions=True, + ) + + # Filter and update instances based on results and given criteria + return [ + replace( + instance, + running_services=instance_details[0], + disk_space=instance_details[1], + ) + for instance, instance_details in zip(dynamic_instances, details, strict=True) + if isinstance(instance_details, tuple) + and isinstance(instance_details[0], list) + and isinstance(instance_details[1], ByteSize) + and (user_id is None or any(s.user_id == user_id for s in instance_details[0])) + ] + + +async def _analyze_computational_instances( + state: AppState, + computational_instances: list[ComputationalInstance], + ssh_key_path: Path | None, +) -> list[ComputationalCluster]: + all_disk_spaces = [UNDEFINED_BYTESIZE] * len(computational_instances) + if ssh_key_path is not None: + all_disk_spaces = await asyncio.gather( + *( + ssh.get_available_disk_space( + state, instance.ec2_instance, SSH_USER_NAME, ssh_key_path + ) + for instance in computational_instances + ), + return_exceptions=True, + ) + + all_dask_ips = await asyncio.gather( + *( + ssh.get_dask_ip( + state, instance.ec2_instance, SSH_USER_NAME, ssh_key_path + ) + for instance in computational_instances + ), + return_exceptions=True, + ) + + computational_clusters = [] + for instance, disk_space, dask_ip in track( + zip(computational_instances, all_disk_spaces, all_dask_ips, strict=True), + description="Collecting computational clusters data...", + ): + if isinstance(disk_space, ByteSize): + instance.disk_space = disk_space + if isinstance(dask_ip, str): + instance.dask_ip = dask_ip + if instance.role is InstanceRole.manager: + ( + scheduler_info, + datasets_on_cluster, + processing_jobs, + all_tasks, + ) = await dask.get_scheduler_details( + state, + instance.ec2_instance, + ) + + assert isinstance(datasets_on_cluster, tuple) + assert isinstance(processing_jobs, dict) + + computational_clusters.append( + ComputationalCluster( + primary=instance, + workers=[], + scheduler_info=scheduler_info, + datasets=datasets_on_cluster, + processing_jobs=processing_jobs, + task_states_to_tasks=all_tasks, + ) + ) + + for instance in computational_instances: + if instance.role is InstanceRole.worker: + # assign the worker to correct cluster + for cluster in computational_clusters: + if ( + cluster.primary.user_id == instance.user_id + and cluster.primary.wallet_id == instance.wallet_id + ): + cluster.workers.append(instance) + + return computational_clusters + + +async def _parse_computational_clusters( + state: AppState, + instances: ServiceResourceInstancesCollection, + ssh_key_path: Path | None, + user_id: int | None, + wallet_id: int | None, +) -> list[ComputationalCluster]: + computational_instances = [ + comp_instance + for instance in track( + instances, description="Parsing computational instances..." + ) + if (comp_instance := await _parse_computational(state, instance)) + and (user_id is None or comp_instance.user_id == user_id) + and (wallet_id is None or comp_instance.wallet_id == wallet_id) + ] + return await _analyze_computational_instances( + state, computational_instances, ssh_key_path + ) + + +async def _parse_dynamic_instances( + state: AppState, + instances: ServiceResourceInstancesCollection, + ssh_key_path: Path | None, + user_id: int | None, + wallet_id: int | None, # noqa: ARG001 +) -> list[DynamicInstance]: + dynamic_instances = [ + dyn_instance + for instance in track(instances, description="Parsing dynamic instances...") + if (dyn_instance := _parse_dynamic(state, instance)) + ] + + if dynamic_instances and ssh_key_path: + dynamic_instances = ( + await _analyze_dynamic_instances_running_services_concurrently( + state, dynamic_instances, ssh_key_path, user_id + ) + ) + return dynamic_instances + + +def _print_summary_as_json( + dynamic_instances: list[DynamicInstance], + computational_clusters: list[ComputationalCluster], + output: Path | None, +) -> None: + result = { + "dynamic_instances": [ + { + "name": instance.name, + "ec2_instance_id": instance.ec2_instance.instance_id, + "running_services": [ + { + "user_id": service.user_id, + "project_id": service.project_id, + "node_id": service.node_id, + "service_name": service.service_name, + "service_version": service.service_version, + "created_at": service.created_at.isoformat(), + "needs_manual_intervention": service.needs_manual_intervention, + } + for service in instance.running_services + ], + "disk_space": instance.disk_space.human_readable(), + } + for instance in dynamic_instances + ], + "computational_clusters": [ + { + "primary": { + "name": cluster.primary.name, + "ec2_instance_id": cluster.primary.ec2_instance.instance_id, + "user_id": cluster.primary.user_id, + "wallet_id": cluster.primary.wallet_id, + "disk_space": cluster.primary.disk_space.human_readable(), + "last_heartbeat": ( + cluster.primary.last_heartbeat.isoformat() + if cluster.primary.last_heartbeat + else "n/a" + ), + }, + "workers": [ + { + "name": worker.name, + "ec2_instance_id": worker.ec2_instance.instance_id, + "disk_space": worker.disk_space.human_readable(), + } + for worker in cluster.workers + ], + "datasets": cluster.datasets, + "tasks": cluster.task_states_to_tasks, + } + for cluster in computational_clusters + ], + } + + if output: + output.write_text(json.dumps(result)) + else: + rich.print_json(json.dumps(result)) + + +async def summary( + state: AppState, + user_id: int | None, + wallet_id: int | None, + *, + output_json: bool, + output: Path | None, +) -> bool: + # get all the running instances + assert state.ec2_resource_autoscaling + dynamic_instances = await ec2.list_dynamic_instances_from_ec2( + state, + filter_by_user_id=user_id, + filter_by_wallet_id=wallet_id, + filter_by_instance_id=None, + ) + dynamic_autoscaled_instances = await _parse_dynamic_instances( + state, dynamic_instances, state.ssh_key_path, user_id, wallet_id + ) + + assert state.ec2_resource_clusters_keeper + computational_instances = await ec2.list_computational_instances_from_ec2( + state, user_id, wallet_id + ) + computational_clusters = await _parse_computational_clusters( + state, computational_instances, state.ssh_key_path, user_id, wallet_id + ) + + if output_json: + _print_summary_as_json( + dynamic_autoscaled_instances, computational_clusters, output=output + ) + + if not output_json: + _print_dynamic_instances( + dynamic_autoscaled_instances, + state.environment, + state.ec2_resource_autoscaling.meta.client.meta.region_name, + output=output, + ) + _print_computational_clusters( + computational_clusters, + state.environment, + state.ec2_resource_clusters_keeper.meta.client.meta.region_name, + output=output, + ) + + time_threshold = arrow.utcnow().shift(minutes=-30).datetime + dynamic_services_in_error = any( + service.needs_manual_intervention and service.created_at < time_threshold + for instance in dynamic_autoscaled_instances + for service in instance.running_services + ) + + return not dynamic_services_in_error + + +def _print_computational_tasks( + user_id: int, + wallet_id: int | None, + tasks: list[tuple[ComputationalTask | None, DaskTask | None]], +) -> None: + table = Table( + "index", + "ProjectID", + "NodeID", + "ServiceName", + "ServiceVersion", + "State in DB", + "State in Dask cluster", + title=f"{len(tasks)} Tasks running for {user_id=}/{wallet_id=}", + padding=(0, 0), + title_style=Style(color="red", encircle=True), + ) + + for index, (db_task, dask_task) in enumerate(tasks): + table.add_row( + f"{index}", + ( + f"{db_task.project_id}" + if db_task + else "[red][bold]intervention needed[/bold][/red]" + ), + f"{db_task.node_id}" if db_task else "", + f"{db_task.service_name}" if db_task else "", + f"{db_task.service_version}" if db_task else "", + f"{db_task.state}" if db_task else "", + ( + dask_task.state + if dask_task + else "[orange]task not yet in cluster[/orange]" + ), + ) + + rich.print(table) + + +async def _list_computational_clusters( + state: AppState, user_id: int, wallet_id: int | None +) -> list[ComputationalCluster]: + assert state.ec2_resource_clusters_keeper + computational_instances = await ec2.list_computational_instances_from_ec2( + state, user_id, wallet_id + ) + return await _parse_computational_clusters( + state, computational_instances, state.ssh_key_path, user_id, wallet_id + ) + + +async def _cancel_all_jobs( + state: AppState, + the_cluster: ComputationalCluster, + *, + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]], + abort_in_db: bool, +) -> None: + rich.print("cancelling all tasks") + for comp_task, dask_task in task_to_dask_job: + if dask_task is not None and dask_task.state != "unknown": + await dask.trigger_job_cancellation_in_scheduler( + state, + the_cluster, + dask_task.job_id, + ) + if comp_task is None: + # we need to clear it of the cluster + await dask.remove_job_from_scheduler( + state, + the_cluster, + dask_task.job_id, + ) + if comp_task is not None and abort_in_db: + await db.abort_job_in_db(state, comp_task.project_id, comp_task.node_id) + + rich.print("cancelled all tasks") + + +async def _get_job_id_to_dask_state_from_cluster( + cluster: ComputationalCluster, +) -> dict[TaskId, TaskState]: + job_id_to_dask_state: dict[TaskId, TaskState] = {} + for job_state, job_ids in cluster.task_states_to_tasks.items(): + for job_id in job_ids: + job_id_to_dask_state[job_id] = job_state + return job_id_to_dask_state + + +async def _get_db_task_to_dask_job( + computational_tasks: list[ComputationalTask], + job_id_to_dask_state: dict[TaskId, TaskState], +) -> list[tuple[ComputationalTask | None, DaskTask | None]]: + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = [] + for task in computational_tasks: + dask_task = None + if task.job_id: + dask_task = DaskTask( + job_id=task.job_id, + state=job_id_to_dask_state.pop(task.job_id, None) or "unknown", + ) + task_to_dask_job.append((task, dask_task)) + # keep the jobs still in the cluster + for job_id, dask_state in job_id_to_dask_state.items(): + task_to_dask_job.append((None, DaskTask(job_id=job_id, state=dask_state))) + return task_to_dask_job + + +async def cancel_jobs( # noqa: C901, PLR0912 + state: AppState, user_id: int, wallet_id: int | None, *, abort_in_db: bool +) -> None: + # get the theory + computational_tasks = await db.list_computational_tasks_from_db(state, user_id) + + # get the reality + computational_clusters = await _list_computational_clusters( + state, user_id, wallet_id + ) + + if computational_clusters: + assert ( + len(computational_clusters) == 1 + ), "too many clusters found! TIP: fix this code or something weird is playing out" + + the_cluster = computational_clusters[0] + rich.print(f"{the_cluster.task_states_to_tasks=}") + + job_id_to_dask_state = await _get_job_id_to_dask_state_from_cluster(the_cluster) + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = ( + await _get_db_task_to_dask_job(computational_tasks, job_id_to_dask_state) + ) + + if not task_to_dask_job: + rich.print("[red]nothing found![/red]") + raise typer.Exit + + _print_computational_tasks(user_id, wallet_id, task_to_dask_job) + rich.print(the_cluster.datasets) + try: + if response := typer.prompt( + "Which dataset to cancel? (all: will cancel everything, 1-5: will cancel jobs 1-5, or 4: will cancel job #4)", + default="none", + ): + if response == "none": + rich.print("[yellow]not cancelling anything[/yellow]") + elif response == "all": + await _cancel_all_jobs( + state, + the_cluster, + task_to_dask_job=task_to_dask_job, + abort_in_db=abort_in_db, + ) + else: + try: + # Split the response and handle ranges + indices = response.split("-") + if len(indices) == 2: + start_index, end_index = map(int, indices) + selected_indices = range(start_index, end_index + 1) + else: + selected_indices = [int(indices[0])] + + for selected_index in selected_indices: + comp_task, dask_task = task_to_dask_job[selected_index] + if dask_task is not None and dask_task.state != "unknown": + await dask.trigger_job_cancellation_in_scheduler( + state, the_cluster, dask_task.job_id + ) + if comp_task is None: + # we need to clear it of the cluster + await dask.remove_job_from_scheduler( + state, the_cluster, dask_task.job_id + ) + + if comp_task is not None and abort_in_db: + await db.abort_job_in_db( + state, comp_task.project_id, comp_task.node_id + ) + rich.print(f"Cancelled selected tasks: {response}") + + except ValidationError: + rich.print( + "[yellow]wrong index format, not cancelling anything[/yellow]" + ) + except IndexError: + rich.print( + "[yellow]index out of range, not cancelling anything[/yellow]" + ) + except ValidationError: + rich.print("[yellow]wrong input, not cancelling anything[/yellow]") + + +async def trigger_cluster_termination( + state: AppState, user_id: int, wallet_id: int | None, *, force: bool +) -> None: + assert state.ec2_resource_clusters_keeper + computational_instances = await ec2.list_computational_instances_from_ec2( + state, user_id, wallet_id + ) + computational_clusters = await _parse_computational_clusters( + state, computational_instances, state.ssh_key_path, user_id, wallet_id + ) + assert computational_clusters + assert ( + len(computational_clusters) == 1 + ), "too many clusters found! TIP: fix this code" + + _print_computational_clusters( + computational_clusters, + state.environment, + state.ec2_resource_clusters_keeper.meta.client.meta.region_name, + output=None, + ) + if (force is True) or typer.confirm( + "Are you sure you want to trigger termination of that cluster?" + ): + the_cluster = computational_clusters[0] + + computational_tasks = await db.list_computational_tasks_from_db(state, user_id) + job_id_to_dask_state = await _get_job_id_to_dask_state_from_cluster(the_cluster) + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = ( + await _get_db_task_to_dask_job(computational_tasks, job_id_to_dask_state) + ) + await _cancel_all_jobs( + state, the_cluster, task_to_dask_job=task_to_dask_job, abort_in_db=force + ) + + new_heartbeat_tag: TagTypeDef = { + "Key": "last_heartbeat", + "Value": f"{arrow.utcnow().datetime - datetime.timedelta(hours=1)}", + } + the_cluster.primary.ec2_instance.create_tags(Tags=[new_heartbeat_tag]) + rich.print( + f"heartbeat tag on cluster of {user_id=}/{wallet_id=} changed, clusters-keeper will terminate that cluster soon." + ) + else: + rich.print("not deleting anything") + + +async def check_database_connection(state: AppState) -> None: + await db.check_db_connection(state) + + +async def terminate_dynamic_instances( + state: AppState, + user_id: int | None, + instance_id: str | None, + *, + force: bool, +) -> None: + if not user_id and not instance_id: + rich.print("either define user_id or instance_id!") + raise typer.Exit(2) + dynamic_instances = await ec2.list_dynamic_instances_from_ec2( + state, + filter_by_user_id=None, + filter_by_wallet_id=None, + filter_by_instance_id=instance_id, + ) + + dynamic_autoscaled_instances = await _parse_dynamic_instances( + state, dynamic_instances, state.ssh_key_path, user_id, None + ) + + if not dynamic_autoscaled_instances: + rich.print("no instances found") + raise typer.Exit(1) + + assert state.ec2_resource_autoscaling # nosec + _print_dynamic_instances( + dynamic_autoscaled_instances, + state.environment, + state.ec2_resource_autoscaling.meta.client.meta.region_name, + output=None, + ) + + for instance in dynamic_autoscaled_instances: + rich.print( + f"terminating instance {instance.ec2_instance.instance_id} with name {utils.get_instance_name(instance.ec2_instance)}" + ) + if force is True or typer.confirm( + f"Are you sure you want to terminate instance {instance.ec2_instance.instance_id}?" + ): + instance.ec2_instance.terminate() + rich.print(f"terminated instance {instance.ec2_instance.instance_id}") + else: + rich.print("not terminating anything") diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/dask.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/dask.py new file mode 100644 index 00000000000..a0ef1e9dea0 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/dask.py @@ -0,0 +1,150 @@ +import contextlib +from collections.abc import AsyncGenerator, Awaitable, Coroutine +from typing import Any, Final + +import distributed +import rich +from mypy_boto3_ec2.service_resource import Instance +from pydantic import AnyUrl + +from .constants import SSH_USER_NAME, TASK_CANCEL_EVENT_NAME_TEMPLATE +from .ec2 import get_bastion_instance_from_remote_instance +from .models import AppState, ComputationalCluster, TaskId, TaskState +from .ssh import ssh_tunnel + +_SCHEDULER_PORT: Final[int] = 8786 + + +def _wrap_dask_async_call(called_fct) -> Awaitable[Any]: + assert isinstance(called_fct, Coroutine) + return called_fct + + +@contextlib.asynccontextmanager +async def dask_client( + state: AppState, instance: Instance +) -> AsyncGenerator[distributed.Client, None]: + security = distributed.Security() + assert state.deploy_config + dask_certificates = state.deploy_config / "assets" / "dask-certificates" + if dask_certificates.exists(): + security = distributed.Security( + tls_ca_file=f"{dask_certificates / 'dask-cert.pem'}", + tls_client_cert=f"{dask_certificates / 'dask-cert.pem'}", + tls_client_key=f"{dask_certificates / 'dask-key.pem'}", + require_encryption=True, + ) + + try: + async with contextlib.AsyncExitStack() as stack: + if instance.public_ip_address is not None: + url = AnyUrl(f"tls://{instance.public_ip_address}:{_SCHEDULER_PORT}") + else: + bastion_instance = await get_bastion_instance_from_remote_instance( + state, instance + ) + assert state.ssh_key_path # nosec + assert state.environment # nosec + tunnel = stack.enter_context( + ssh_tunnel( + ssh_host=bastion_instance.public_dns_name, + username=SSH_USER_NAME, + private_key_path=state.ssh_key_path, + remote_bind_host=instance.private_ip_address, + remote_bind_port=_SCHEDULER_PORT, + ) + ) + assert tunnel # nosec + host, port = tunnel.local_bind_address + url = AnyUrl(f"tls://{host}:{port}") + client = await stack.enter_async_context( + distributed.Client( + f"{url}", security=security, timeout="5", asynchronous=True + ) + ) + yield client + + finally: + pass + + +async def remove_job_from_scheduler( + state: AppState, + cluster: ComputationalCluster, + task_id: TaskId, +) -> None: + async with dask_client(state, cluster.primary.ec2_instance) as client: + await _wrap_dask_async_call(client.unpublish_dataset(task_id)) + rich.print(f"unpublished {task_id} from scheduler") + + +async def trigger_job_cancellation_in_scheduler( + state: AppState, + cluster: ComputationalCluster, + task_id: TaskId, +) -> None: + async with dask_client(state, cluster.primary.ec2_instance) as client: + task_future = distributed.Future(task_id, client=client) + cancel_event = distributed.Event( + name=TASK_CANCEL_EVENT_NAME_TEMPLATE.format(task_future.key), + client=client, + ) + await _wrap_dask_async_call(cancel_event.set()) + await _wrap_dask_async_call(task_future.cancel()) + rich.print(f"cancelled {task_id} in scheduler/workers") + + +async def _list_all_tasks( + client: distributed.Client, +) -> dict[TaskState, list[TaskId]]: + def _list_tasks( + dask_scheduler: distributed.Scheduler, + ) -> dict[TaskId, TaskState]: + # NOTE: this is ok and needed: this runs on the dask scheduler, so don't remove this import + + task_state_to_tasks = {} + for task in dask_scheduler.tasks.values(): + if task.state in task_state_to_tasks: + task_state_to_tasks[task.state].append(task.key) + else: + task_state_to_tasks[task.state] = [task.key] + + return dict(task_state_to_tasks) + + list_of_tasks: dict[TaskState, list[TaskId]] = {} + try: + list_of_tasks = await client.run_on_scheduler(_list_tasks) # type: ignore + except TypeError: + rich.print( + "ERROR while recoverring unrunnable tasks . Defaulting to empty list of tasks!!" + ) + return list_of_tasks + + +async def get_scheduler_details(state: AppState, instance: Instance): + scheduler_info = {} + datasets_on_cluster = () + processing_jobs = {} + all_tasks = {} + try: + async with dask_client(state, instance) as client: + scheduler_info = client.scheduler_info() + datasets_on_cluster = await _wrap_dask_async_call(client.list_datasets()) + processing_jobs = await _wrap_dask_async_call(client.processing()) + all_tasks = await _list_all_tasks(client) + except (TimeoutError, OSError, TypeError): + rich.print( + "ERROR while recoverring scheduler details !! no scheduler info found!!" + ) + + return scheduler_info, datasets_on_cluster, processing_jobs, all_tasks + + +def get_worker_metrics(scheduler_info: dict[str, Any]) -> dict[str, Any]: + worker_metrics = {} + for worker_name, worker_data in scheduler_info.get("workers", {}).items(): + worker_metrics[worker_name] = { + "resources": worker_data["resources"], + "tasks": worker_data["metrics"].get("task_counts", {}), + } + return worker_metrics diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py new file mode 100644 index 00000000000..14190934aa1 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/db.py @@ -0,0 +1,149 @@ +import asyncio +import contextlib +import uuid +from collections.abc import AsyncGenerator +from typing import Any + +import rich +import sqlalchemy as sa +from pydantic import PostgresDsn, TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine + +from .models import AppState, ComputationalTask, PostgresDB +from .ssh import ssh_tunnel + + +@contextlib.asynccontextmanager +async def db_engine( + state: AppState, +) -> AsyncGenerator[AsyncEngine, Any]: + async with contextlib.AsyncExitStack() as stack: + assert state.environment["POSTGRES_ENDPOINT"] # nosec + db_endpoint = state.environment["POSTGRES_ENDPOINT"] + if state.main_bastion_host: + assert state.ssh_key_path # nosec + db_host, db_port = db_endpoint.split(":") + tunnel = stack.enter_context( + ssh_tunnel( + ssh_host=state.main_bastion_host.ip, + username=state.main_bastion_host.user_name, + private_key_path=state.ssh_key_path, + remote_bind_host=db_host, + remote_bind_port=int(db_port), + ) + ) + assert tunnel + db_endpoint = ( + f"{tunnel.local_bind_address[0]}:{tunnel.local_bind_address[1]}" + ) + + engine = None + try: + for env in [ + "POSTGRES_USER", + "POSTGRES_PASSWORD", + "POSTGRES_DB", + ]: + assert state.environment[env] + postgres_db = PostgresDB( + dsn=TypeAdapter(PostgresDsn).validate_python( + f"postgresql+asyncpg://{state.environment['POSTGRES_USER']}:{state.environment['POSTGRES_PASSWORD']}@{db_endpoint}/{state.environment['POSTGRES_DB']}" + ) + ) + + engine = create_async_engine( + f"{postgres_db.dsn}", + connect_args={ + "server_settings": { + "application_name": "osparc-clusters-monitoring-script" + } + }, + ) + yield engine + finally: + if engine: + await engine.dispose() + + +async def abort_job_in_db( + state: AppState, project_id: uuid.UUID, node_id: uuid.UUID +) -> None: + async with contextlib.AsyncExitStack() as stack: + engine = await stack.enter_async_context(db_engine(state)) + db_connection = await stack.enter_async_context(engine.begin()) + + await db_connection.execute( + sa.text( + f"UPDATE comp_tasks SET state = 'ABORTED' WHERE project_id='{project_id}' AND node_id='{node_id}'" + ) + ) + rich.print(f"set comp_tasks for {project_id=}/{node_id=} set to ABORTED") + + +async def check_db_connection(state: AppState) -> bool: + try: + async with contextlib.AsyncExitStack() as stack: + engine = await stack.enter_async_context(db_engine(state)) + async with asyncio.timeout(5): + db_connection = await stack.enter_async_context(engine.connect()) + result = await db_connection.execute(sa.text("SELECT 1")) + result.one() + rich.print( + "[green]Database connection test completed successfully![/green]" + ) + return True + except Exception as e: # pylint: disable=broad-exception-caught + rich.print(f"[red]Database connection test failed: {e}[/red]") + return False + + +async def list_computational_tasks_from_db( + state: AppState, user_id: int +) -> list[ComputationalTask]: + async with contextlib.AsyncExitStack() as stack: + engine = await stack.enter_async_context(db_engine(state)) + db_connection = await stack.enter_async_context(engine.begin()) + + # Get the list of running project UUIDs with a subquery + subquery = ( + sa.select(sa.column("project_uuid")) + .select_from(sa.table("comp_runs")) + .where( + sa.and_( + sa.column("user_id") == user_id, + sa.cast(sa.column("result"), sa.VARCHAR) != "SUCCESS", + sa.cast(sa.column("result"), sa.VARCHAR) != "FAILED", + sa.cast(sa.column("result"), sa.VARCHAR) != "ABORTED", + ) + ) + ) + + # Now select comp_tasks rows where project_id is one of the project_uuids + query = ( + sa.select("*") + .select_from(sa.table("comp_tasks")) + .where( + sa.column("project_id").in_(subquery) + & (sa.cast(sa.column("state"), sa.VARCHAR) != "SUCCESS") + & (sa.cast(sa.column("state"), sa.VARCHAR) != "FAILED") + & (sa.cast(sa.column("state"), sa.VARCHAR) != "ABORTED") + ) + ) + + result = await db_connection.execute(query) + comp_tasks_list = result.fetchall() + return [ + TypeAdapter(ComputationalTask).validate_python( + { + "project_id": row.project_id, + "node_id": row.node_id, + "job_id": row.job_id, + "service_name": row.image["name"].split("/")[-1], + "service_version": row.image["tag"], + "state": row.state, + } + ) + for row in comp_tasks_list + ] + msg = "unable to access database!" + raise RuntimeError(msg) diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/ec2.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/ec2.py new file mode 100644 index 00000000000..7dc7c73cf59 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/ec2.py @@ -0,0 +1,179 @@ +import json +from typing import Final + +import boto3 +from aiocache import cached +from mypy_boto3_ec2 import EC2ServiceResource +from mypy_boto3_ec2.service_resource import Instance, ServiceResourceInstancesCollection +from mypy_boto3_ec2.type_defs import FilterTypeDef + +from .models import AppState +from .utils import get_instance_name, to_async + + +@to_async +def _list_running_ec2_instances( + ec2_resource: EC2ServiceResource, + key_name: str, + custom_tags: dict[str, str], + user_id: int | None, + wallet_id: int | None, + instance_id: str | None, +) -> ServiceResourceInstancesCollection: + # get all the running instances + + ec2_filters: list[FilterTypeDef] = [ + {"Name": "instance-state-name", "Values": ["running", "pending"]}, + {"Name": "key-name", "Values": [key_name]}, + ] + if custom_tags: + ec2_filters.extend( + [ + {"Name": f"tag:{key}", "Values": [f"{value}"]} + for key, value in custom_tags.items() + ] + ) + + if user_id: + ec2_filters.append({"Name": "tag:user_id", "Values": [f"{user_id}"]}) + if wallet_id: + ec2_filters.append({"Name": "tag:wallet_id", "Values": [f"{wallet_id}"]}) + if instance_id: + ec2_filters.append({"Name": "instance-id", "Values": [f"{instance_id}"]}) + return ec2_resource.instances.filter(Filters=ec2_filters) + + +async def list_computational_instances_from_ec2( + state: AppState, + user_id: int | None, + wallet_id: int | None, +) -> ServiceResourceInstancesCollection: + assert state.environment["PRIMARY_EC2_INSTANCES_KEY_NAME"] + assert state.environment["WORKERS_EC2_INSTANCES_KEY_NAME"] + assert ( + state.environment["PRIMARY_EC2_INSTANCES_KEY_NAME"] + == state.environment["WORKERS_EC2_INSTANCES_KEY_NAME"] + ), "key name is different on primary and workers. TIP: adjust this code now" + custom_tags = {} + if state.environment["PRIMARY_EC2_INSTANCES_CUSTOM_TAGS"]: + assert ( + state.environment["PRIMARY_EC2_INSTANCES_CUSTOM_TAGS"] + == state.environment["WORKERS_EC2_INSTANCES_CUSTOM_TAGS"] + ), "custom tags are different on primary and workers. TIP: adjust this code now" + custom_tags = json.loads(state.environment["PRIMARY_EC2_INSTANCES_CUSTOM_TAGS"]) + assert state.ec2_resource_clusters_keeper + return await _list_running_ec2_instances( + state.ec2_resource_clusters_keeper, + state.environment["PRIMARY_EC2_INSTANCES_KEY_NAME"], + custom_tags, + user_id, + wallet_id, + None, + ) + + +async def list_dynamic_instances_from_ec2( + state: AppState, + *, + filter_by_user_id: int | None, + filter_by_wallet_id: int | None, + filter_by_instance_id: str | None, +) -> ServiceResourceInstancesCollection: + assert state.environment["EC2_INSTANCES_KEY_NAME"] + custom_tags = {} + if state.environment["EC2_INSTANCES_CUSTOM_TAGS"]: + custom_tags = json.loads(state.environment["EC2_INSTANCES_CUSTOM_TAGS"]) + assert state.ec2_resource_autoscaling + return await _list_running_ec2_instances( + state.ec2_resource_autoscaling, + state.environment["EC2_INSTANCES_KEY_NAME"], + custom_tags, + filter_by_user_id, + filter_by_wallet_id, + filter_by_instance_id, + ) + + +_DEFAULT_BASTION_NAME: Final[str] = "bastion-host" + + +@cached() +async def get_computational_bastion_instance(state: AppState) -> Instance: + assert state.ec2_resource_clusters_keeper # nosec + assert state.environment["PRIMARY_EC2_INSTANCES_KEY_NAME"] # nosec + instances = await _list_running_ec2_instances( + state.ec2_resource_clusters_keeper, + state.environment["PRIMARY_EC2_INSTANCES_KEY_NAME"], + {}, + None, + None, + None, + ) + + possible_bastions = list( + filter(lambda i: _DEFAULT_BASTION_NAME in get_instance_name(i), instances) + ) + assert len(possible_bastions) == 1 + return possible_bastions[0] + + +@cached() +async def get_dynamic_bastion_instance(state: AppState) -> Instance: + assert state.ec2_resource_autoscaling # nosec + assert state.environment["EC2_INSTANCES_KEY_NAME"] # nosec + instances = await _list_running_ec2_instances( + state.ec2_resource_autoscaling, + state.environment["EC2_INSTANCES_KEY_NAME"], + {}, + None, + None, + None, + ) + + possible_bastions = list( + filter(lambda i: _DEFAULT_BASTION_NAME in get_instance_name(i), instances) + ) + assert len(possible_bastions) == 1 + return possible_bastions[0] + + +def cluster_keeper_region(state: AppState) -> str: + assert state.environment["CLUSTERS_KEEPER_EC2_REGION_NAME"] # nosec + return state.environment["CLUSTERS_KEEPER_EC2_REGION_NAME"] + + +def autoscaling_region(state: AppState) -> str: + assert state.environment["AUTOSCALING_EC2_REGION_NAME"] # nosec + return state.environment["AUTOSCALING_EC2_REGION_NAME"] + + +async def get_bastion_instance_from_remote_instance( + state: AppState, remote_instance: Instance +) -> Instance: + availability_zone = remote_instance.placement["AvailabilityZone"] + if cluster_keeper_region(state) in availability_zone: + return await get_computational_bastion_instance(state) + if autoscaling_region(state) in availability_zone: + return await get_dynamic_bastion_instance(state) + msg = "no corresponding bastion instance!" + raise RuntimeError(msg) + + +def cluster_keeper_ec2_client(state: AppState) -> EC2ServiceResource: + return boto3.resource( + "ec2", + region_name=cluster_keeper_region(state), + aws_access_key_id=state.environment["CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID"], + aws_secret_access_key=state.environment[ + "CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY" + ], + ) + + +def autoscaling_ec2_client(state: AppState) -> EC2ServiceResource: + return boto3.resource( + "ec2", + region_name=autoscaling_region(state), + aws_access_key_id=state.environment["AUTOSCALING_EC2_ACCESS_KEY_ID"], + aws_secret_access_key=state.environment["AUTOSCALING_EC2_SECRET_ACCESS_KEY"], + ) diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/models.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/models.py new file mode 100644 index 00000000000..84e992294d5 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/models.py @@ -0,0 +1,121 @@ +import datetime +import uuid +from collections import namedtuple +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path +from typing import Any, TypeAlias + +import parse +from mypy_boto3_ec2 import EC2ServiceResource +from mypy_boto3_ec2.service_resource import Instance +from pydantic import BaseModel, ByteSize, PostgresDsn + + +@dataclass(kw_only=True, frozen=True, slots=True) +class BastionHost: + ip: str + user_name: str + + +@dataclass(kw_only=True) +class AppState: + environment: dict[str, str | None] = field(default_factory=dict) + ec2_resource_autoscaling: EC2ServiceResource | None = None + ec2_resource_clusters_keeper: EC2ServiceResource | None = None + dynamic_parser: parse.Parser + computational_parser_primary: parse.Parser + computational_parser_workers: parse.Parser + deploy_config: Path | None = None + ssh_key_path: Path | None = None + main_bastion_host: BastionHost | None = None + + computational_bastion: Instance | None = None + dynamic_bastion: Instance | None = None + + +@dataclass(slots=True, kw_only=True) +class AutoscaledInstance: + name: str + ec2_instance: Instance + disk_space: ByteSize + + +class InstanceRole(str, Enum): + manager = "manager" + worker = "worker" + + +@dataclass(slots=True, kw_only=True) +class ComputationalInstance(AutoscaledInstance): + role: InstanceRole + user_id: int + wallet_id: int + last_heartbeat: datetime.datetime | None + dask_ip: str + + +@dataclass +class DynamicService: + node_id: str + user_id: int + project_id: str + service_name: str + service_version: str + created_at: datetime.datetime + needs_manual_intervention: bool + containers: list[str] + + +@dataclass(slots=True, kw_only=True) +class DynamicInstance(AutoscaledInstance): + running_services: list[DynamicService] + + +TaskId: TypeAlias = str +TaskState: TypeAlias = str + + +@dataclass(slots=True, kw_only=True, frozen=True) +class ComputationalTask: + project_id: uuid.UUID + node_id: uuid.UUID + job_id: TaskId | None + service_name: str + service_version: str + state: str + + +@dataclass(slots=True, kw_only=True, frozen=True) +class DaskTask: + job_id: TaskId + state: TaskState + + +@dataclass(frozen=True, slots=True, kw_only=True) +class ComputationalCluster: + primary: ComputationalInstance + workers: list[ComputationalInstance] + + scheduler_info: dict[str, Any] + datasets: tuple[str, ...] + processing_jobs: dict[str, str] + task_states_to_tasks: dict[str, list[TaskState]] + + +DockerContainer = namedtuple( # noqa: PYI024 + "docker_container", + [ + "node_id", + "user_id", + "project_id", + "created_at", + "name", + "service_name", + "service_version", + ], +) + + +class PostgresDB(BaseModel): + dsn: PostgresDsn diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py new file mode 100644 index 00000000000..88486c37428 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py @@ -0,0 +1,284 @@ +import contextlib +import datetime +import json +import logging +import re +from collections import defaultdict +from collections.abc import AsyncGenerator, Generator +from pathlib import Path +from typing import Any, Final + +import arrow +import paramiko +import rich +import typer +from mypy_boto3_ec2.service_resource import Instance +from paramiko import Ed25519Key +from pydantic import ByteSize +from sshtunnel import SSHTunnelForwarder + +from .constants import DYN_SERVICES_NAMING_CONVENTION +from .ec2 import get_bastion_instance_from_remote_instance +from .models import AppState, DockerContainer, DynamicService + +_DEFAULT_SSH_PORT: Final[int] = 22 +_LOCAL_BIND_ADDRESS: Final[str] = "127.0.0.1" + +_logger = logging.getLogger(__name__) + + +@contextlib.contextmanager +def ssh_tunnel( + *, + ssh_host: str, + username: str, + private_key_path: Path, + remote_bind_host: str, + remote_bind_port: int, +) -> Generator[SSHTunnelForwarder | None, Any, None]: + try: + with SSHTunnelForwarder( + (ssh_host, _DEFAULT_SSH_PORT), + ssh_username=username, + ssh_pkey=Ed25519Key(filename=private_key_path), + remote_bind_address=(remote_bind_host, remote_bind_port), + local_bind_address=(_LOCAL_BIND_ADDRESS, 0), + set_keepalive=10, + ) as tunnel: + yield tunnel + except Exception: + _logger.exception("Unexpected issue with ssh tunnel") + raise + finally: + pass + + +@contextlib.contextmanager +def _ssh_client( + hostname: str, port: int, *, username: str, private_key_path: Path +) -> Generator[paramiko.SSHClient, Any, None]: + try: + with paramiko.SSHClient() as client: + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect( + hostname, + port, + username=username, + key_filename=f"{private_key_path}", + timeout=5, + ) + yield client + except Exception: + _logger.exception("Unexpected issue with ssh client") + raise + finally: + pass + + +@contextlib.asynccontextmanager +async def ssh_instance( + instance: Instance, *, state: AppState, username: str, private_key_path: Path +) -> AsyncGenerator[paramiko.SSHClient, Any]: + """ssh in instance with/without tunnel as needed""" + assert state.ssh_key_path # nosec + try: + async with contextlib.AsyncExitStack() as stack: + if instance.public_ip_address: + hostname = instance.public_ip_address + port = _DEFAULT_SSH_PORT + else: + assert state.environment + bastion_instance = await get_bastion_instance_from_remote_instance( + state, instance + ) + tunnel = stack.enter_context( + ssh_tunnel( + ssh_host=bastion_instance.public_dns_name, + username=username, + private_key_path=state.ssh_key_path, + remote_bind_host=instance.private_ip_address, + remote_bind_port=_DEFAULT_SSH_PORT, + ) + ) + assert tunnel # nosec + hostname, port = tunnel.local_bind_address + ssh_client = stack.enter_context( + _ssh_client( + hostname, + port, + username=username, + private_key_path=private_key_path, + ) + ) + yield ssh_client + + finally: + pass + + +async def get_available_disk_space( + state: AppState, instance: Instance, username: str, private_key_path: Path +) -> ByteSize: + assert state.ssh_key_path + + try: + async with ssh_instance( + instance, state=state, username=username, private_key_path=private_key_path + ) as ssh_client: + # Command to get disk space for /docker partition + disk_space_command = "df --block-size=1 /mnt/docker | awk 'NR==2{print $4}'" + + # Run the command on the remote machine + _, stdout, stderr = ssh_client.exec_command(disk_space_command) + exit_status = stdout.channel.recv_exit_status() + error = stderr.read().decode() + + if exit_status != 0: + rich.print(error) + raise typer.Abort(error) + + # Available disk space will be captured here + available_space = stdout.read().decode("utf-8").strip() + return ByteSize(available_space if available_space else 0) + except ( + paramiko.AuthenticationException, + paramiko.SSHException, + TimeoutError, + ): + return ByteSize(0) + + +async def get_dask_ip( + state: AppState, instance: Instance, username: str, private_key_path: Path +) -> str: + try: + async with ssh_instance( + instance, state=state, username=username, private_key_path=private_key_path + ) as ssh_client: + # First, get the container IDs for dask-sidecar or dask-scheduler + list_containers_command = "docker ps --filter 'name=dask-sidecar|dask-scheduler' --format '{{.ID}}'" + _, stdout, stderr = ssh_client.exec_command(list_containers_command) + container_ids = stdout.read().decode("utf-8").strip() + exit_status = stdout.channel.recv_exit_status() + + if exit_status != 0 or not container_ids: + error_message = stderr.read().decode().strip() + _logger.warning( + "No matching containers found or command failed with exit status %s: %s", + exit_status, + error_message, + ) + return "No Containers Found / Not Ready" + + # If containers are found, inspect their IP addresses + dask_ip_command = ( + "docker inspect -f '{{.NetworkSettings.Networks.dask_stack_cluster.IPAddress}}' " + f"{container_ids}" + ) + _, stdout, stderr = ssh_client.exec_command(dask_ip_command) + exit_status = stdout.channel.recv_exit_status() + + if exit_status != 0: + error_message = stderr.read().decode().strip() + _logger.error( + "Inspecting Dask IP command failed with exit status %s: %s", + exit_status, + error_message, + ) + return "Not docker network Found / Drained / Not Ready" + + ip_address = stdout.read().decode("utf-8").strip() + if not ip_address: + _logger.error("Dask IP address not found in the output") + return "Not IP Found / Drained / Not Ready" + + return ip_address + except ( + paramiko.AuthenticationException, + paramiko.SSHException, + TimeoutError, + ): + return "Not Ready" + + +async def list_running_dyn_services( + state: AppState, instance: Instance, username: str, private_key_path: Path +) -> list[DynamicService]: + try: + async with ssh_instance( + instance, state=state, username=username, private_key_path=private_key_path + ) as ssh_client: + # Run the Docker command to list containers + _stdin, stdout, stderr = ssh_client.exec_command( + 'docker ps --format=\'{{.Names}}\t{{.CreatedAt}}\t{{.Label "io.simcore.runtime.user-id"}}\t{{.Label "io.simcore.runtime.project-id"}}\t{{.Label "io.simcore.name"}}\t{{.Label "io.simcore.version"}}\' --filter=name=dy-', + ) + exit_status = stdout.channel.recv_exit_status() + error = stderr.read().decode() + if exit_status != 0: + rich.print(error) + raise typer.Abort(error) + + output = stdout.read().decode("utf-8") + # Extract containers that follow the naming convention + running_service: dict[str, list[DockerContainer]] = defaultdict(list) + for container in output.splitlines(): + if match := re.match(DYN_SERVICES_NAMING_CONVENTION, container): + named_container = DockerContainer( + match["node_id"], + int(match["user_id"]), + match["project_id"], + arrow.get( + match["created_at"], + "YYYY-MM-DD HH:mm:ss", + tzinfo=datetime.UTC, + ).datetime, + container, + ( + json.loads(match["service_name"])["name"] + if match["service_name"] + else "" + ), + ( + json.loads(match["service_version"])["version"] + if match["service_version"] + else "" + ), + ) + running_service[match["node_id"]].append(named_container) + + def _needs_manual_intervention( + running_containers: list[DockerContainer], + ) -> bool: + valid_prefixes = ["dy-sidecar_", "dy-proxy_", "dy-sidecar-"] + for prefix in valid_prefixes: + found = any( + container.name.startswith(prefix) + for container in running_containers + ) + if not found: + return True + return False + + return [ + DynamicService( + node_id=node_id, + user_id=containers[0].user_id, + project_id=containers[0].project_id, + created_at=containers[0].created_at, + needs_manual_intervention=_needs_manual_intervention(containers) + and ( + (arrow.utcnow().datetime - containers[0].created_at) + > datetime.timedelta(minutes=2) + ), + containers=[c.name for c in containers], + service_name=containers[0].service_name, + service_version=containers[0].service_version, + ) + for node_id, containers in running_service.items() + ] + except ( + paramiko.AuthenticationException, + paramiko.SSHException, + TimeoutError, + ): + return [] diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/utils.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/utils.py new file mode 100644 index 00000000000..29c2225db80 --- /dev/null +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/utils.py @@ -0,0 +1,64 @@ +import asyncio +import datetime +import functools +from typing import Awaitable, Callable, ParamSpec, TypeVar + +import arrow +from mypy_boto3_ec2.service_resource import Instance + +from .constants import DANGER, HOUR + + +def timedelta_formatting( + time_diff: datetime.timedelta, *, color_code: bool = False +) -> str: + formatted_time_diff = f"{time_diff.days} day(s), " if time_diff.days > 0 else "" + formatted_time_diff += f"{time_diff.seconds // 3600:02}:{(time_diff.seconds // 60) % 60:02}:{time_diff.seconds % 60:02}" + if time_diff.days and color_code: + formatted_time_diff = f"[red]{formatted_time_diff}[/red]" + elif (time_diff.seconds > 5 * HOUR) and color_code: + formatted_time_diff = f"[orange]{formatted_time_diff}[/orange]" + return formatted_time_diff + + +def get_instance_name(instance: Instance) -> str: + for tag in instance.tags: + assert "Key" in tag # nosec + if tag["Key"] == "Name": + return tag.get("Value", "unknown") + return "unknown" + + +def get_last_heartbeat(instance: Instance) -> datetime.datetime | None: + for tag in instance.tags: + assert "Key" in tag # nosec + if tag["Key"] == "last_heartbeat": + assert "Value" in tag # nosec + return arrow.get(tag["Value"]).datetime + return None + + +def color_encode_with_state(string: str, ec2_instance: Instance) -> str: + return ( + f"[green]{string}[/green]" + if ec2_instance.state["Name"] == "running" + else f"[yellow]{string}[/yellow]" + ) + + +def color_encode_with_threshold(string: str, value, threshold) -> str: + return string if value > threshold else DANGER.format(string) + + +P = ParamSpec("P") +R = TypeVar("R") + + +def to_async(func: Callable[P, R]) -> Callable[P, Awaitable[R]]: + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]: + loop = asyncio.get_running_loop() + partial_func = functools.partial(func, *args, **kwargs) + return loop.run_in_executor(None, partial_func) + + return wrapper diff --git a/scripts/maintenance/computational-clusters/pyproject.toml b/scripts/maintenance/computational-clusters/pyproject.toml new file mode 100644 index 00000000000..cdef0453285 --- /dev/null +++ b/scripts/maintenance/computational-clusters/pyproject.toml @@ -0,0 +1,39 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +dependencies = [ + "arrow", + "aiocache", + "asyncpg", + "black", + "boto3", + # NOTE: these must be in sync with ospar + "cloudpickle", + "dask[distributed]", + "mypy_boto3_ec2", + "types-boto3", + "parse", + "paramiko", + "pydantic[email]", + "pylint", + "python-dotenv", + "typer", + "rich", + "sqlalchemy[asyncio]", + "sshtunnel", + "ansible>=10.7.0", + "lz4", +] +name = "autoscaled-monitor" +version = "1.0.0" +authors = [ + { name = "Sylvain Anderegg", email = "35365065+sanderegg@users.noreply.github.com" }, +] +description = "Helper script for monitoring clusters" +readme = "README.md" +requires-python = ">=3.10" + +[project.scripts] +autoscaled-monitor = "autoscaled_monitor.cli:app" diff --git a/scripts/maintenance/computational-clusters/test.txt b/scripts/maintenance/computational-clusters/test.txt new file mode 100644 index 00000000000..48fdfe03bd1 --- /dev/null +++ b/scripts/maintenance/computational-clusters/test.txt @@ -0,0 +1,136 @@ + dynamic autoscaled instances: us-east-1 +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃Instance ┃Running services ┃ +┑━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +β”‚autoscaling-osparc-production-05-09-2023β”‚n/a β”‚ +β”‚ID: i-0fc848eca40438f07 β”‚ β”‚ +β”‚AMI: ami-079d2dddff7ec5bf2 β”‚ β”‚ +β”‚Type: g4dn.8xlarge β”‚ β”‚ +β”‚Up: 00:21:54 β”‚ β”‚ +β”‚ExtIP: 174.129.68.135 β”‚ β”‚ +β”‚IntIP: 10.0.2.234 β”‚ β”‚ +β”‚/mnt/docker(free): 374.6GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-234%22&rangetype=relative&from=4914 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0913988e3c27955f0 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚264089β”‚e3348fe4-29a3-11f0-aed8-0242ac174a19β”‚ad4cf922-b8c2-405a-8768-2551b9154b0bβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:27:00 β”‚False β”‚β”‚ +β”‚Up: 00:44:26 β”‚β”‚263109β”‚61f6d2a8-29a2-11f0-a827-0242ac174a28β”‚2115738c-8f43-4465-9e62-9d3ad3ea5eddβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:39:14 β”‚False β”‚β”‚ +β”‚ExtIP: 54.147.185.53 β”‚β”‚263979β”‚0e813c6c-29a2-11f0-aed8-0242ac174a19β”‚9cadd29d-ffa0-4d64-8529-6e62da8b9cfaβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:40:35 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.3.210 β”‚β”‚263315β”‚31818cd0-29a2-11f0-aed8-0242ac174a19β”‚234c8b03-ae6f-44b7-83fd-2e8d50e99c18β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:40:39 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 374.3GiB β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-3-210%22&rangetype=relative&from=6266 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0acafcf6510669994 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263935β”‚e1255fee-29a3-11f0-a827-0242ac174a28β”‚9dc8bc8d-71e5-46ec-be96-1a7ddc279ab9β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:20:05 β”‚False β”‚β”‚ +β”‚Up: 00:29:42 β”‚β”‚264097β”‚06d5524e-29a4-11f0-aed8-0242ac174a19β”‚fb765bc1-210d-4264-ab4a-13d3904b1528β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:20:11 β”‚False β”‚β”‚ +β”‚ExtIP: 54.227.61.178 β”‚β”‚264093β”‚8095a3c0-2983-11f0-aed8-0242ac174a19β”‚b7c87b97-ddf8-40fd-95f3-040bcf865fa0β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:20:11 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.3.38 β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚/mnt/docker(free): 374.3GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-3-38%22&rangetype=relative&from=5382 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0b5932e8a1bc9031f │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚264074β”‚60b20a22-297d-11f0-aed8-0242ac174a19β”‚6f7013f7-7c28-4f81-808c-bd6b4ed8362fβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:37:34 β”‚False β”‚β”‚ +β”‚Up: 11:47:08 β”‚β”‚263112β”‚13f0d568-29a2-11f0-aed8-0242ac174a19β”‚e90dc5a8-1d14-4064-913b-e4331f21716fβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:41:21 β”‚False β”‚β”‚ +β”‚ExtIP: 18.208.194.214 β”‚β”‚263127β”‚b337ba8e-29a1-11f0-a827-0242ac174a28β”‚a064c804-88c9-4e2a-8b03-1ca09ed9f47dβ”‚s4l-core-liteβ”‚3.2.179 β”‚00:43:37 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.124 β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚/mnt/docker(free): 374.3GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-124%22&rangetype=relative&from=46028 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-04e1f9236667090b3 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263122β”‚a2a09aa2-24e6-11f0-aed8-0242ac174a19β”‚c3a03f74-a815-46cf-9752-67e6c7312dabβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:21:53 β”‚False β”‚β”‚ +β”‚Up: 08:28:31 β”‚β”‚263070β”‚08d1b382-2984-11f0-a827-0242ac174a28β”‚09a3716c-2c83-4af0-a306-e075681bacb8β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:45:52 β”‚False β”‚β”‚ +β”‚ExtIP: 3.90.239.149 β”‚β”‚150025β”‚574acd4c-29a1-11f0-a827-0242ac174a28β”‚527b3c75-a636-40e2-8626-107eeb97bd61β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:46:37 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.112 β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚/mnt/docker(free): 372.1GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-112%22&rangetype=relative&from=34111 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0e48c0c98a14ef0d5 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚262998β”‚16ccb350-29a3-11f0-aed8-0242ac174a19β”‚4fdf649d-5e17-4a60-9682-522a818e7325β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:33:58 β”‚False β”‚β”‚ +β”‚Up: 00:40:35 β”‚β”‚263110β”‚57691cfa-24e4-11f0-aed8-0242ac174a19β”‚0a195c3f-2936-47a3-9d11-52a684a64d03β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:34:03 β”‚False β”‚β”‚ +β”‚ExtIP: 54.196.216.2 β”‚β”‚264024β”‚a0d3215c-29a2-11f0-a827-0242ac174a28β”‚9e72dab1-307e-4b7d-8442-1dd06429f5dcβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:36:15 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.186 β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚/mnt/docker(free): 372.1GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-186%22&rangetype=relative&from=6035 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-052ac050917d3a7f7 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚264000β”‚6638c0d6-29a4-11f0-a827-0242ac174a28β”‚b3362fc4-7e55-4018-af5c-66d06fae5dbfβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:21:46 β”‚False β”‚β”‚ +β”‚Up: 00:37:43 β”‚β”‚264091β”‚70839206-29a3-11f0-aed8-0242ac174a19β”‚41d6e45b-1f65-4fee-9a4c-f8ce0f0e7cdeβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:31:37 β”‚False β”‚β”‚ +β”‚ExtIP: 34.224.79.128 β”‚β”‚263008β”‚63f0b56e-29a3-11f0-aed8-0242ac174a19β”‚073d4170-2fdf-45a0-bb2b-f237d4419dfbβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:31:50 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.222 β”‚β”‚262997β”‚4088f7ee-2917-11f0-a827-0242ac174a28β”‚d15d2585-2ca7-4102-9983-a1ed7a3d4822β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:32:23 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 371.9GiB β”‚β”‚264125β”‚376386e8-29a3-11f0-aed8-0242ac174a19β”‚1ac04ca3-1970-4dc5-b36a-8d07420b46b7β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:32:43 β”‚False β”‚β”‚ +β”‚ β”‚β”‚263090β”‚4d35053c-29a3-11f0-a827-0242ac174a28β”‚8a7d6ec6-2bf7-4039-9693-f6bbb3e652b9β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:32:43 β”‚False β”‚β”‚ +β”‚ β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-222%22&rangetype=relative&from=5863 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-009c64beb6bcc8fb4 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263009β”‚871ada56-29a3-11f0-aed8-0242ac174a19β”‚1041437f-8a06-4a65-a57c-968288225937β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:30:28 β”‚False β”‚β”‚ +β”‚Up: 00:33:34 β”‚β”‚263650β”‚708654c8-29a3-11f0-a827-0242ac174a28β”‚7f4ab81a-2cc1-417c-819b-19a19d0522ceβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:30:31 β”‚False β”‚β”‚ +β”‚ExtIP: 54.160.131.70 β”‚β”‚263134β”‚5451c62a-29a3-11f0-a827-0242ac174a28β”‚05988ce7-a665-4213-adb6-d12bc26c6fddβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:30:31 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.122 β”‚β”‚263120β”‚79d7cdea-29a3-11f0-a827-0242ac174a28β”‚ed376707-a4e5-49df-b3a8-11ba7630bf9eβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:30:32 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 374.2GiB β”‚β”‚263000β”‚586e46d4-24e4-11f0-a827-0242ac174a28β”‚10eb12d2-af96-4b28-8242-08a34dddfedfβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:30:34 β”‚False β”‚β”‚ +β”‚ β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-122%22&rangetype=relative&from=5614 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-09d54cc2fc4e7753e │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263689β”‚8477f4be-29a3-11f0-a827-0242ac174a28β”‚928d1783-0884-420e-8331-fb595fe9c441β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:31 β”‚False β”‚β”‚ +β”‚Up: 00:31:51 β”‚β”‚264002β”‚879bbeb4-29a3-11f0-a827-0242ac174a28β”‚4ceadc56-6513-4368-baa1-372bb3db814dβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:35 β”‚False β”‚β”‚ +β”‚ExtIP: 54.197.194.60 β”‚β”‚263293β”‚9d036946-29a3-11f0-aed8-0242ac174a19β”‚66d1bfd5-9aff-437d-9a37-5c7316194bb9β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:36 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.150 β”‚β”‚263146β”‚90e7a168-29a3-11f0-a827-0242ac174a28β”‚77f80839-955e-4b0f-8157-6cfa63147cd8β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:38 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 374.2GiB β”‚β”‚264123β”‚8c01b44a-29a3-11f0-aed8-0242ac174a19β”‚1949baad-4e99-4975-bb99-e5b9c4aba266β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:38 β”‚False β”‚β”‚ +β”‚ β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-150%22&rangetype=relative&from=5511 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0bf06b6fc85aed572 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263132β”‚cd5ceb56-24e6-11f0-a827-0242ac174a28β”‚94d97140-26d4-420d-96d6-ad14bae8984cβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:28:59 β”‚False β”‚β”‚ +β”‚Up: 00:30:55 β”‚β”‚263124β”‚b7c61756-29a3-11f0-a827-0242ac174a28β”‚4946217a-d381-4c78-a5eb-576254af4efeβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:00 β”‚False β”‚β”‚ +β”‚ExtIP: 54.172.39.192 β”‚β”‚263129β”‚a8453bfe-29a3-11f0-aed8-0242ac174a19β”‚d3cbda78-2739-4529-9ea5-5b21fb81ec89β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:07 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.3.144 β”‚β”‚263114β”‚c6d7a926-24e4-11f0-aed8-0242ac174a19β”‚d4987ece-6eac-48b9-a340-63874482a03fβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:08 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 374.2GiB β”‚β”‚264121β”‚af8f3568-29a3-11f0-a827-0242ac174a28β”‚cc730efb-b6f4-4734-9d6e-41a83c01964fβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:29:12 β”‚False β”‚β”‚ +β”‚ β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-3-144%22&rangetype=relative&from=5455 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0b870a6100e20c2d3 │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263126β”‚d4bf67d6-29a3-11f0-aed8-0242ac174a19β”‚c8802f4c-8cae-4857-9901-1c98864b6a62β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:27:23 β”‚False β”‚β”‚ +β”‚Up: 00:30:00 β”‚β”‚264120β”‚c64ab82c-29a3-11f0-a827-0242ac174a28β”‚d92e0aa0-2556-496a-9f89-8d44d36ff8faβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:28:03 β”‚False β”‚β”‚ +β”‚ExtIP: 3.80.29.143 β”‚β”‚263626β”‚c0272138-29a3-11f0-aed8-0242ac174a19β”‚67255148-236b-4627-a4ae-ceccaa8d149dβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:28:10 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.10 β”‚β”‚264124β”‚c1f52622-29a3-11f0-aed8-0242ac174a19β”‚cf621cc5-9738-4bd0-a3ae-183854e94d72β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:28:11 β”‚False β”‚β”‚ +β”‚/mnt/docker(free): 374.3GiB β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-10%22&rangetype=relative&from=5400 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚autoscaling-osparc-production-05-09-2023│┏━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓│ +β”‚ID: i-0b147bb88d6e4533c │┃UserID┃ProjectID ┃NodeID ┃ServiceName ┃ServiceVersion┃Created Since┃Need intervention┃│ +β”‚AMI: ami-079d2dddff7ec5bf2 │┑━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩│ +β”‚Type: g4dn.8xlarge β”‚β”‚263006β”‚a974cf08-29a3-11f0-a827-0242ac174a28β”‚81768666-a3d3-4d94-9105-27206e05ade5β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:18:57 β”‚False β”‚β”‚ +β”‚Up: 00:26:54 β”‚β”‚263087β”‚a6d59188-29a3-11f0-aed8-0242ac174a19β”‚028747fc-dcb1-41ba-bde7-44bb4d637496β”‚Sim4Life Liteβ”‚3.2.179 β”‚00:20:27 β”‚False β”‚β”‚ +β”‚ExtIP: 54.91.192.121 β”‚β”‚263960β”‚69c58dec-29a4-11f0-a827-0242ac174a28β”‚976259c4-f376-414b-9151-d81e3890f49cβ”‚Sim4Life Liteβ”‚3.2.179 β”‚00:20:35 β”‚False β”‚β”‚ +β”‚IntIP: 10.0.2.254 β”‚β””β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜β”‚ +β”‚/mnt/docker(free): 374.3GiB β”‚ β”‚ +β”‚Graylog: β”‚https://monitoring.osparc.io/graylog/search?q=source%3A%22ip-10-0-2-254%22&rangetype=relative&from=5214 β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚Intervention detection might show false positive if in transient state, be careful and always double-check!! β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + computational clusters: us-east-1 +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃Instance ┃Computational details ┃ +┑━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ diff --git a/scripts/maintenance/computational-clusters/uv.lock b/scripts/maintenance/computational-clusters/uv.lock new file mode 100644 index 00000000000..45edc627207 --- /dev/null +++ b/scripts/maintenance/computational-clusters/uv.lock @@ -0,0 +1,1514 @@ +version = 1 +revision = 2 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "aiocache" +version = "0.12.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7a/64/b945b8025a9d1e6e2138845f4022165d3b337f55f50984fbc6a4c0a1e355/aiocache-0.12.3.tar.gz", hash = "sha256:f528b27bf4d436b497a1d0d1a8f59a542c153ab1e37c3621713cb376d44c4713", size = 132196, upload-time = "2024-09-25T13:20:23.823Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/d7/15d67e05b235d1ed8c3ce61688fe4d84130e72af1657acadfaac3479f4cf/aiocache-0.12.3-py2.py3-none-any.whl", hash = "sha256:889086fc24710f431937b87ad3720a289f7fc31c4fd8b68e9f918b9bacd8270d", size = 28199, upload-time = "2024-09-25T13:20:22.688Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "ansible" +version = "10.7.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] +dependencies = [ + { name = "ansible-core", version = "2.17.12", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d4/64/29fdff6fe7682342adb54802c1cd90b2272d382e1743089af88f90a1d986/ansible-10.7.0.tar.gz", hash = "sha256:59d29e3de1080e740dfa974517d455217601b16d16880314d9be26145c68dc22", size = 41256974, upload-time = "2024-12-03T18:04:25.794Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/95/cb8944902a2cdd94b1e19ff73695548679a388b9c473dc63c8dc64ffea3a/ansible-10.7.0-py3-none-any.whl", hash = "sha256:0089f08e047ceb70edd011be009f5c6273add613fbe491e9697c0556c989d8ea", size = 51576038, upload-time = "2024-12-03T18:04:20.065Z" }, +] + +[[package]] +name = "ansible" +version = "11.6.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", +] +dependencies = [ + { name = "ansible-core", version = "2.18.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/6f/b491cd89e0393810b67598098ccb6a204d6a9202c9733a541568f69f6dea/ansible-11.6.0.tar.gz", hash = "sha256:934a948caa3ec1a3eb277e7ab1638b808b074a6e0c46045794cde7b637e275d8", size = 44015165, upload-time = "2025-05-20T20:28:24.184Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/92/8aebdbdd4574d337e47ebb7171fdc83095b82c255f8362f96681b113b79d/ansible-11.6.0-py3-none-any.whl", hash = "sha256:5b9c19d6a1080011c14c821bc7e6f8fd5b2a392219cbf2ced9be05e6d447d8cd", size = 55488595, upload-time = "2025-05-20T20:28:17.672Z" }, +] + +[[package]] +name = "ansible-core" +version = "2.17.12" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] +dependencies = [ + { name = "cryptography", marker = "python_full_version < '3.11'" }, + { name = "jinja2", marker = "python_full_version < '3.11'" }, + { name = "packaging", marker = "python_full_version < '3.11'" }, + { name = "pyyaml", marker = "python_full_version < '3.11'" }, + { name = "resolvelib", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/b2/f662d40226acaa504b185969255846ac5187c02a4bb2631954db5db60159/ansible_core-2.17.12.tar.gz", hash = "sha256:24fb30783fcd3e800b839b15a396a1f9d622c007bc358e98f2992156ace52671", size = 3118286, upload-time = "2025-05-19T17:36:21.935Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/89/2887a65bdc2305a191df9f84260a7fdee65e960b5652dbf8ae4c1d7efc5d/ansible_core-2.17.12-py3-none-any.whl", hash = "sha256:cb74f3a148b77fa0c89a284e48e7515d13fda10ad8c789eb92274c72f017a9a0", size = 2188903, upload-time = "2025-05-19T17:36:19.826Z" }, +] + +[[package]] +name = "ansible-core" +version = "2.18.6" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", +] +dependencies = [ + { name = "cryptography", marker = "python_full_version >= '3.11'" }, + { name = "jinja2", marker = "python_full_version >= '3.11'" }, + { name = "packaging", marker = "python_full_version >= '3.11'" }, + { name = "pyyaml", marker = "python_full_version >= '3.11'" }, + { name = "resolvelib", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/1e/c5d52171ae2b86689e3ef9e4f578c605a7f53a862d1e9fe8c254deb75fe1/ansible_core-2.18.6.tar.gz", hash = "sha256:25bb20ce1516a1b7307831b263cef684043b3720711466bd9d4164e5fd576557", size = 3088072, upload-time = "2025-05-19T16:59:59.234Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/b7/2ca5a126486a5323dde87cc43b207e926f3f3bce0b5758395308de3f146d/ansible_core-2.18.6-py3-none-any.whl", hash = "sha256:12a34749a7b20f0f1536bd3e3b2e137341867e4642e351273e96647161f595c0", size = 2208798, upload-time = "2025-05-19T16:59:57.372Z" }, +] + +[[package]] +name = "arrow" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "types-python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/00/0f6e8fcdb23ea632c866620cc872729ff43ed91d284c866b515c6342b173/arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85", size = 131960, upload-time = "2023-09-30T22:11:18.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/ed/e97229a566617f2ae958a6b13e7cc0f585470eac730a73e9e82c32a3cdd2/arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80", size = 66419, upload-time = "2023-09-30T22:11:16.072Z" }, +] + +[[package]] +name = "astroid" +version = "3.3.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/c2/9b2de9ed027f9fe5734a6c0c0a601289d796b3caaf1e372e23fa88a73047/astroid-3.3.10.tar.gz", hash = "sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce", size = 398941, upload-time = "2025-05-10T13:33:10.405Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/58/5260205b9968c20b6457ed82f48f9e3d6edf2f1f95103161798b73aeccf0/astroid-3.3.10-py3-none-any.whl", hash = "sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb", size = 275388, upload-time = "2025-05-10T13:33:08.391Z" }, +] + +[[package]] +name = "async-timeout" +version = "5.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/ae/136395dfbfe00dfc94da3f3e136d0b13f394cba8f4841120e34226265780/async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3", size = 9274, upload-time = "2024-11-06T16:41:39.6Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" }, +] + +[[package]] +name = "asyncpg" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "async-timeout", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/4c/7c991e080e106d854809030d8584e15b2e996e26f16aee6d757e387bc17d/asyncpg-0.30.0.tar.gz", hash = "sha256:c551e9928ab6707602f44811817f82ba3c446e018bfe1d3abecc8ba5f3eac851", size = 957746, upload-time = "2024-10-20T00:30:41.127Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/07/1650a8c30e3a5c625478fa8aafd89a8dd7d85999bf7169b16f54973ebf2c/asyncpg-0.30.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bfb4dd5ae0699bad2b233672c8fc5ccbd9ad24b89afded02341786887e37927e", size = 673143, upload-time = "2024-10-20T00:29:08.846Z" }, + { url = "https://files.pythonhosted.org/packages/a0/9a/568ff9b590d0954553c56806766914c149609b828c426c5118d4869111d3/asyncpg-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc1f62c792752a49f88b7e6f774c26077091b44caceb1983509edc18a2222ec0", size = 645035, upload-time = "2024-10-20T00:29:12.02Z" }, + { url = "https://files.pythonhosted.org/packages/de/11/6f2fa6c902f341ca10403743701ea952bca896fc5b07cc1f4705d2bb0593/asyncpg-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3152fef2e265c9c24eec4ee3d22b4f4d2703d30614b0b6753e9ed4115c8a146f", size = 2912384, upload-time = "2024-10-20T00:29:13.644Z" }, + { url = "https://files.pythonhosted.org/packages/83/83/44bd393919c504ffe4a82d0aed8ea0e55eb1571a1dea6a4922b723f0a03b/asyncpg-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7255812ac85099a0e1ffb81b10dc477b9973345793776b128a23e60148dd1af", size = 2947526, upload-time = "2024-10-20T00:29:15.871Z" }, + { url = "https://files.pythonhosted.org/packages/08/85/e23dd3a2b55536eb0ded80c457b0693352262dc70426ef4d4a6fc994fa51/asyncpg-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:578445f09f45d1ad7abddbff2a3c7f7c291738fdae0abffbeb737d3fc3ab8b75", size = 2895390, upload-time = "2024-10-20T00:29:19.346Z" }, + { url = "https://files.pythonhosted.org/packages/9b/26/fa96c8f4877d47dc6c1864fef5500b446522365da3d3d0ee89a5cce71a3f/asyncpg-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c42f6bb65a277ce4d93f3fba46b91a265631c8df7250592dd4f11f8b0152150f", size = 3015630, upload-time = "2024-10-20T00:29:21.186Z" }, + { url = "https://files.pythonhosted.org/packages/34/00/814514eb9287614188a5179a8b6e588a3611ca47d41937af0f3a844b1b4b/asyncpg-0.30.0-cp310-cp310-win32.whl", hash = "sha256:aa403147d3e07a267ada2ae34dfc9324e67ccc4cdca35261c8c22792ba2b10cf", size = 568760, upload-time = "2024-10-20T00:29:22.769Z" }, + { url = "https://files.pythonhosted.org/packages/f0/28/869a7a279400f8b06dd237266fdd7220bc5f7c975348fea5d1e6909588e9/asyncpg-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb622c94db4e13137c4c7f98834185049cc50ee01d8f657ef898b6407c7b9c50", size = 625764, upload-time = "2024-10-20T00:29:25.882Z" }, + { url = "https://files.pythonhosted.org/packages/4c/0e/f5d708add0d0b97446c402db7e8dd4c4183c13edaabe8a8500b411e7b495/asyncpg-0.30.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5e0511ad3dec5f6b4f7a9e063591d407eee66b88c14e2ea636f187da1dcfff6a", size = 674506, upload-time = "2024-10-20T00:29:27.988Z" }, + { url = "https://files.pythonhosted.org/packages/6a/a0/67ec9a75cb24a1d99f97b8437c8d56da40e6f6bd23b04e2f4ea5d5ad82ac/asyncpg-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:915aeb9f79316b43c3207363af12d0e6fd10776641a7de8a01212afd95bdf0ed", size = 645922, upload-time = "2024-10-20T00:29:29.391Z" }, + { url = "https://files.pythonhosted.org/packages/5c/d9/a7584f24174bd86ff1053b14bb841f9e714380c672f61c906eb01d8ec433/asyncpg-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c198a00cce9506fcd0bf219a799f38ac7a237745e1d27f0e1f66d3707c84a5a", size = 3079565, upload-time = "2024-10-20T00:29:30.832Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d7/a4c0f9660e333114bdb04d1a9ac70db690dd4ae003f34f691139a5cbdae3/asyncpg-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3326e6d7381799e9735ca2ec9fd7be4d5fef5dcbc3cb555d8a463d8460607956", size = 3109962, upload-time = "2024-10-20T00:29:33.114Z" }, + { url = "https://files.pythonhosted.org/packages/3c/21/199fd16b5a981b1575923cbb5d9cf916fdc936b377e0423099f209e7e73d/asyncpg-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:51da377487e249e35bd0859661f6ee2b81db11ad1f4fc036194bc9cb2ead5056", size = 3064791, upload-time = "2024-10-20T00:29:34.677Z" }, + { url = "https://files.pythonhosted.org/packages/77/52/0004809b3427534a0c9139c08c87b515f1c77a8376a50ae29f001e53962f/asyncpg-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc6d84136f9c4d24d358f3b02be4b6ba358abd09f80737d1ac7c444f36108454", size = 3188696, upload-time = "2024-10-20T00:29:36.389Z" }, + { url = "https://files.pythonhosted.org/packages/52/cb/fbad941cd466117be58b774a3f1cc9ecc659af625f028b163b1e646a55fe/asyncpg-0.30.0-cp311-cp311-win32.whl", hash = "sha256:574156480df14f64c2d76450a3f3aaaf26105869cad3865041156b38459e935d", size = 567358, upload-time = "2024-10-20T00:29:37.915Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0a/0a32307cf166d50e1ad120d9b81a33a948a1a5463ebfa5a96cc5606c0863/asyncpg-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:3356637f0bd830407b5597317b3cb3571387ae52ddc3bca6233682be88bbbc1f", size = 629375, upload-time = "2024-10-20T00:29:39.987Z" }, + { url = "https://files.pythonhosted.org/packages/4b/64/9d3e887bb7b01535fdbc45fbd5f0a8447539833b97ee69ecdbb7a79d0cb4/asyncpg-0.30.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c902a60b52e506d38d7e80e0dd5399f657220f24635fee368117b8b5fce1142e", size = 673162, upload-time = "2024-10-20T00:29:41.88Z" }, + { url = "https://files.pythonhosted.org/packages/6e/eb/8b236663f06984f212a087b3e849731f917ab80f84450e943900e8ca4052/asyncpg-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aca1548e43bbb9f0f627a04666fedaca23db0a31a84136ad1f868cb15deb6e3a", size = 637025, upload-time = "2024-10-20T00:29:43.352Z" }, + { url = "https://files.pythonhosted.org/packages/cc/57/2dc240bb263d58786cfaa60920779af6e8d32da63ab9ffc09f8312bd7a14/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c2a2ef565400234a633da0eafdce27e843836256d40705d83ab7ec42074efb3", size = 3496243, upload-time = "2024-10-20T00:29:44.922Z" }, + { url = "https://files.pythonhosted.org/packages/f4/40/0ae9d061d278b10713ea9021ef6b703ec44698fe32178715a501ac696c6b/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1292b84ee06ac8a2ad8e51c7475aa309245874b61333d97411aab835c4a2f737", size = 3575059, upload-time = "2024-10-20T00:29:46.891Z" }, + { url = "https://files.pythonhosted.org/packages/c3/75/d6b895a35a2c6506952247640178e5f768eeb28b2e20299b6a6f1d743ba0/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0f5712350388d0cd0615caec629ad53c81e506b1abaaf8d14c93f54b35e3595a", size = 3473596, upload-time = "2024-10-20T00:29:49.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/e7/3693392d3e168ab0aebb2d361431375bd22ffc7b4a586a0fc060d519fae7/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:db9891e2d76e6f425746c5d2da01921e9a16b5a71a1c905b13f30e12a257c4af", size = 3641632, upload-time = "2024-10-20T00:29:50.768Z" }, + { url = "https://files.pythonhosted.org/packages/32/ea/15670cea95745bba3f0352341db55f506a820b21c619ee66b7d12ea7867d/asyncpg-0.30.0-cp312-cp312-win32.whl", hash = "sha256:68d71a1be3d83d0570049cd1654a9bdfe506e794ecc98ad0873304a9f35e411e", size = 560186, upload-time = "2024-10-20T00:29:52.394Z" }, + { url = "https://files.pythonhosted.org/packages/7e/6b/fe1fad5cee79ca5f5c27aed7bd95baee529c1bf8a387435c8ba4fe53d5c1/asyncpg-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a0292c6af5c500523949155ec17b7fe01a00ace33b68a476d6b5059f9630305", size = 621064, upload-time = "2024-10-20T00:29:53.757Z" }, + { url = "https://files.pythonhosted.org/packages/3a/22/e20602e1218dc07692acf70d5b902be820168d6282e69ef0d3cb920dc36f/asyncpg-0.30.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05b185ebb8083c8568ea8a40e896d5f7af4b8554b64d7719c0eaa1eb5a5c3a70", size = 670373, upload-time = "2024-10-20T00:29:55.165Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b3/0cf269a9d647852a95c06eb00b815d0b95a4eb4b55aa2d6ba680971733b9/asyncpg-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c47806b1a8cbb0a0db896f4cd34d89942effe353a5035c62734ab13b9f938da3", size = 634745, upload-time = "2024-10-20T00:29:57.14Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6d/a4f31bf358ce8491d2a31bfe0d7bcf25269e80481e49de4d8616c4295a34/asyncpg-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b6fde867a74e8c76c71e2f64f80c64c0f3163e687f1763cfaf21633ec24ec33", size = 3512103, upload-time = "2024-10-20T00:29:58.499Z" }, + { url = "https://files.pythonhosted.org/packages/96/19/139227a6e67f407b9c386cb594d9628c6c78c9024f26df87c912fabd4368/asyncpg-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46973045b567972128a27d40001124fbc821c87a6cade040cfcd4fa8a30bcdc4", size = 3592471, upload-time = "2024-10-20T00:30:00.354Z" }, + { url = "https://files.pythonhosted.org/packages/67/e4/ab3ca38f628f53f0fd28d3ff20edff1c975dd1cb22482e0061916b4b9a74/asyncpg-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9110df111cabc2ed81aad2f35394a00cadf4f2e0635603db6ebbd0fc896f46a4", size = 3496253, upload-time = "2024-10-20T00:30:02.794Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5f/0bf65511d4eeac3a1f41c54034a492515a707c6edbc642174ae79034d3ba/asyncpg-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04ff0785ae7eed6cc138e73fc67b8e51d54ee7a3ce9b63666ce55a0bf095f7ba", size = 3662720, upload-time = "2024-10-20T00:30:04.501Z" }, + { url = "https://files.pythonhosted.org/packages/e7/31/1513d5a6412b98052c3ed9158d783b1e09d0910f51fbe0e05f56cc370bc4/asyncpg-0.30.0-cp313-cp313-win32.whl", hash = "sha256:ae374585f51c2b444510cdf3595b97ece4f233fde739aa14b50e0d64e8a7a590", size = 560404, upload-time = "2024-10-20T00:30:06.537Z" }, + { url = "https://files.pythonhosted.org/packages/c8/a4/cec76b3389c4c5ff66301cd100fe88c318563ec8a520e0b2e792b5b84972/asyncpg-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:f59b430b8e27557c3fb9869222559f7417ced18688375825f8f12302c34e915e", size = 621623, upload-time = "2024-10-20T00:30:09.024Z" }, +] + +[[package]] +name = "autoscaled-monitor" +version = "1.0.0" +source = { virtual = "." } +dependencies = [ + { name = "aiocache" }, + { name = "ansible", version = "10.7.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "ansible", version = "11.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "arrow" }, + { name = "asyncpg" }, + { name = "black" }, + { name = "boto3" }, + { name = "cloudpickle" }, + { name = "dask", extra = ["distributed"] }, + { name = "lz4" }, + { name = "mypy-boto3-ec2" }, + { name = "paramiko" }, + { name = "parse" }, + { name = "pydantic", extra = ["email"] }, + { name = "pylint" }, + { name = "python-dotenv" }, + { name = "rich" }, + { name = "sqlalchemy", extra = ["asyncio"] }, + { name = "sshtunnel" }, + { name = "typer" }, + { name = "types-boto3" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiocache" }, + { name = "ansible", specifier = ">=10.7.0" }, + { name = "arrow" }, + { name = "asyncpg" }, + { name = "black" }, + { name = "boto3" }, + { name = "cloudpickle" }, + { name = "dask", extras = ["distributed"] }, + { name = "lz4" }, + { name = "mypy-boto3-ec2" }, + { name = "paramiko" }, + { name = "parse" }, + { name = "pydantic", extras = ["email"] }, + { name = "pylint" }, + { name = "python-dotenv" }, + { name = "rich" }, + { name = "sqlalchemy", extras = ["asyncio"] }, + { name = "sshtunnel" }, + { name = "typer" }, + { name = "types-boto3" }, +] + +[[package]] +name = "bcrypt" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/5d/6d7433e0f3cd46ce0b43cd65e1db465ea024dbb8216fb2404e919c2ad77b/bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18", size = 25697, upload-time = "2025-02-28T01:24:09.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/2c/3d44e853d1fe969d229bd58d39ae6902b3d924af0e2b5a60d17d4b809ded/bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281", size = 483719, upload-time = "2025-02-28T01:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e2/58ff6e2a22eca2e2cff5370ae56dba29d70b1ea6fc08ee9115c3ae367795/bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb", size = 272001, upload-time = "2025-02-28T01:22:38.078Z" }, + { url = "https://files.pythonhosted.org/packages/37/1f/c55ed8dbe994b1d088309e366749633c9eb90d139af3c0a50c102ba68a1a/bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180", size = 277451, upload-time = "2025-02-28T01:22:40.787Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1c/794feb2ecf22fe73dcfb697ea7057f632061faceb7dcf0f155f3443b4d79/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f", size = 272792, upload-time = "2025-02-28T01:22:43.144Z" }, + { url = "https://files.pythonhosted.org/packages/13/b7/0b289506a3f3598c2ae2bdfa0ea66969812ed200264e3f61df77753eee6d/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09", size = 289752, upload-time = "2025-02-28T01:22:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/dc/24/d0fb023788afe9e83cc118895a9f6c57e1044e7e1672f045e46733421fe6/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d", size = 277762, upload-time = "2025-02-28T01:22:47.023Z" }, + { url = "https://files.pythonhosted.org/packages/e4/38/cde58089492e55ac4ef6c49fea7027600c84fd23f7520c62118c03b4625e/bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd", size = 272384, upload-time = "2025-02-28T01:22:49.221Z" }, + { url = "https://files.pythonhosted.org/packages/de/6a/d5026520843490cfc8135d03012a413e4532a400e471e6188b01b2de853f/bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af", size = 277329, upload-time = "2025-02-28T01:22:51.603Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a3/4fc5255e60486466c389e28c12579d2829b28a527360e9430b4041df4cf9/bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231", size = 305241, upload-time = "2025-02-28T01:22:53.283Z" }, + { url = "https://files.pythonhosted.org/packages/c7/15/2b37bc07d6ce27cc94e5b10fd5058900eb8fb11642300e932c8c82e25c4a/bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c", size = 309617, upload-time = "2025-02-28T01:22:55.461Z" }, + { url = "https://files.pythonhosted.org/packages/5f/1f/99f65edb09e6c935232ba0430c8c13bb98cb3194b6d636e61d93fe60ac59/bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f", size = 335751, upload-time = "2025-02-28T01:22:57.81Z" }, + { url = "https://files.pythonhosted.org/packages/00/1b/b324030c706711c99769988fcb694b3cb23f247ad39a7823a78e361bdbb8/bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d", size = 355965, upload-time = "2025-02-28T01:22:59.181Z" }, + { url = "https://files.pythonhosted.org/packages/aa/dd/20372a0579dd915dfc3b1cd4943b3bca431866fcb1dfdfd7518c3caddea6/bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4", size = 155316, upload-time = "2025-02-28T01:23:00.763Z" }, + { url = "https://files.pythonhosted.org/packages/6d/52/45d969fcff6b5577c2bf17098dc36269b4c02197d551371c023130c0f890/bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669", size = 147752, upload-time = "2025-02-28T01:23:02.908Z" }, + { url = "https://files.pythonhosted.org/packages/11/22/5ada0b9af72b60cbc4c9a399fdde4af0feaa609d27eb0adc61607997a3fa/bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d", size = 498019, upload-time = "2025-02-28T01:23:05.838Z" }, + { url = "https://files.pythonhosted.org/packages/b8/8c/252a1edc598dc1ce57905be173328eda073083826955ee3c97c7ff5ba584/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b", size = 279174, upload-time = "2025-02-28T01:23:07.274Z" }, + { url = "https://files.pythonhosted.org/packages/29/5b/4547d5c49b85f0337c13929f2ccbe08b7283069eea3550a457914fc078aa/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e", size = 283870, upload-time = "2025-02-28T01:23:09.151Z" }, + { url = "https://files.pythonhosted.org/packages/be/21/7dbaf3fa1745cb63f776bb046e481fbababd7d344c5324eab47f5ca92dd2/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59", size = 279601, upload-time = "2025-02-28T01:23:11.461Z" }, + { url = "https://files.pythonhosted.org/packages/6d/64/e042fc8262e971347d9230d9abbe70d68b0a549acd8611c83cebd3eaec67/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753", size = 297660, upload-time = "2025-02-28T01:23:12.989Z" }, + { url = "https://files.pythonhosted.org/packages/50/b8/6294eb84a3fef3b67c69b4470fcdd5326676806bf2519cda79331ab3c3a9/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761", size = 284083, upload-time = "2025-02-28T01:23:14.5Z" }, + { url = "https://files.pythonhosted.org/packages/62/e6/baff635a4f2c42e8788fe1b1633911c38551ecca9a749d1052d296329da6/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb", size = 279237, upload-time = "2025-02-28T01:23:16.686Z" }, + { url = "https://files.pythonhosted.org/packages/39/48/46f623f1b0c7dc2e5de0b8af5e6f5ac4cc26408ac33f3d424e5ad8da4a90/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d", size = 283737, upload-time = "2025-02-28T01:23:18.897Z" }, + { url = "https://files.pythonhosted.org/packages/49/8b/70671c3ce9c0fca4a6cc3cc6ccbaa7e948875a2e62cbd146e04a4011899c/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f", size = 312741, upload-time = "2025-02-28T01:23:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/27/fb/910d3a1caa2d249b6040a5caf9f9866c52114d51523ac2fb47578a27faee/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732", size = 316472, upload-time = "2025-02-28T01:23:23.183Z" }, + { url = "https://files.pythonhosted.org/packages/dc/cf/7cf3a05b66ce466cfb575dbbda39718d45a609daa78500f57fa9f36fa3c0/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef", size = 343606, upload-time = "2025-02-28T01:23:25.361Z" }, + { url = "https://files.pythonhosted.org/packages/e3/b8/e970ecc6d7e355c0d892b7f733480f4aa8509f99b33e71550242cf0b7e63/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304", size = 362867, upload-time = "2025-02-28T01:23:26.875Z" }, + { url = "https://files.pythonhosted.org/packages/a9/97/8d3118efd8354c555a3422d544163f40d9f236be5b96c714086463f11699/bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51", size = 160589, upload-time = "2025-02-28T01:23:28.381Z" }, + { url = "https://files.pythonhosted.org/packages/29/07/416f0b99f7f3997c69815365babbc2e8754181a4b1899d921b3c7d5b6f12/bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62", size = 152794, upload-time = "2025-02-28T01:23:30.187Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c1/3fa0e9e4e0bfd3fd77eb8b52ec198fd6e1fd7e9402052e43f23483f956dd/bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3", size = 498969, upload-time = "2025-02-28T01:23:31.945Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d4/755ce19b6743394787fbd7dff6bf271b27ee9b5912a97242e3caf125885b/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24", size = 279158, upload-time = "2025-02-28T01:23:34.161Z" }, + { url = "https://files.pythonhosted.org/packages/9b/5d/805ef1a749c965c46b28285dfb5cd272a7ed9fa971f970435a5133250182/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef", size = 284285, upload-time = "2025-02-28T01:23:35.765Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/698580547a4a4988e415721b71eb45e80c879f0fb04a62da131f45987b96/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b", size = 279583, upload-time = "2025-02-28T01:23:38.021Z" }, + { url = "https://files.pythonhosted.org/packages/f2/87/62e1e426418204db520f955ffd06f1efd389feca893dad7095bf35612eec/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676", size = 297896, upload-time = "2025-02-28T01:23:39.575Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c6/8fedca4c2ada1b6e889c52d2943b2f968d3427e5d65f595620ec4c06fa2f/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1", size = 284492, upload-time = "2025-02-28T01:23:40.901Z" }, + { url = "https://files.pythonhosted.org/packages/4d/4d/c43332dcaaddb7710a8ff5269fcccba97ed3c85987ddaa808db084267b9a/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe", size = 279213, upload-time = "2025-02-28T01:23:42.653Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/1e36379e169a7df3a14a1c160a49b7b918600a6008de43ff20d479e6f4b5/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0", size = 284162, upload-time = "2025-02-28T01:23:43.964Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0a/644b2731194b0d7646f3210dc4d80c7fee3ecb3a1f791a6e0ae6bb8684e3/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f", size = 312856, upload-time = "2025-02-28T01:23:46.011Z" }, + { url = "https://files.pythonhosted.org/packages/dc/62/2a871837c0bb6ab0c9a88bf54de0fc021a6a08832d4ea313ed92a669d437/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23", size = 316726, upload-time = "2025-02-28T01:23:47.575Z" }, + { url = "https://files.pythonhosted.org/packages/0c/a1/9898ea3faac0b156d457fd73a3cb9c2855c6fd063e44b8522925cdd8ce46/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe", size = 343664, upload-time = "2025-02-28T01:23:49.059Z" }, + { url = "https://files.pythonhosted.org/packages/40/f2/71b4ed65ce38982ecdda0ff20c3ad1b15e71949c78b2c053df53629ce940/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505", size = 363128, upload-time = "2025-02-28T01:23:50.399Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/12f6a58eca6dea4be992d6c681b7ec9410a1d9f5cf368c61437e31daa879/bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a", size = 160598, upload-time = "2025-02-28T01:23:51.775Z" }, + { url = "https://files.pythonhosted.org/packages/a9/cf/45fb5261ece3e6b9817d3d82b2f343a505fd58674a92577923bc500bd1aa/bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b", size = 152799, upload-time = "2025-02-28T01:23:53.139Z" }, + { url = "https://files.pythonhosted.org/packages/55/2d/0c7e5ab0524bf1a443e34cdd3926ec6f5879889b2f3c32b2f5074e99ed53/bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1", size = 275367, upload-time = "2025-02-28T01:23:54.578Z" }, + { url = "https://files.pythonhosted.org/packages/10/4f/f77509f08bdff8806ecc4dc472b6e187c946c730565a7470db772d25df70/bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d", size = 280644, upload-time = "2025-02-28T01:23:56.547Z" }, + { url = "https://files.pythonhosted.org/packages/35/18/7d9dc16a3a4d530d0a9b845160e9e5d8eb4f00483e05d44bb4116a1861da/bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492", size = 274881, upload-time = "2025-02-28T01:23:57.935Z" }, + { url = "https://files.pythonhosted.org/packages/df/c4/ae6921088adf1e37f2a3a6a688e72e7d9e45fdd3ae5e0bc931870c1ebbda/bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90", size = 280203, upload-time = "2025-02-28T01:23:59.331Z" }, + { url = "https://files.pythonhosted.org/packages/4c/b1/1289e21d710496b88340369137cc4c5f6ee036401190ea116a7b4ae6d32a/bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a", size = 275103, upload-time = "2025-02-28T01:24:00.764Z" }, + { url = "https://files.pythonhosted.org/packages/94/41/19be9fe17e4ffc5d10b7b67f10e459fc4eee6ffe9056a88de511920cfd8d/bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce", size = 280513, upload-time = "2025-02-28T01:24:02.243Z" }, + { url = "https://files.pythonhosted.org/packages/aa/73/05687a9ef89edebdd8ad7474c16d8af685eb4591c3c38300bb6aad4f0076/bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8", size = 274685, upload-time = "2025-02-28T01:24:04.512Z" }, + { url = "https://files.pythonhosted.org/packages/63/13/47bba97924ebe86a62ef83dc75b7c8a881d53c535f83e2c54c4bd701e05c/bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938", size = 280110, upload-time = "2025-02-28T01:24:05.896Z" }, +] + +[[package]] +name = "black" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/49/26a7b0f3f35da4b5a65f081943b7bcd22d7002f5f0fb8098ec1ff21cb6ef/black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666", size = 649449, upload-time = "2025-01-29T04:15:40.373Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/3b/4ba3f93ac8d90410423fdd31d7541ada9bcee1df32fb90d26de41ed40e1d/black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32", size = 1629419, upload-time = "2025-01-29T05:37:06.642Z" }, + { url = "https://files.pythonhosted.org/packages/b4/02/0bde0485146a8a5e694daed47561785e8b77a0466ccc1f3e485d5ef2925e/black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da", size = 1461080, upload-time = "2025-01-29T05:37:09.321Z" }, + { url = "https://files.pythonhosted.org/packages/52/0e/abdf75183c830eaca7589144ff96d49bce73d7ec6ad12ef62185cc0f79a2/black-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7", size = 1766886, upload-time = "2025-01-29T04:18:24.432Z" }, + { url = "https://files.pythonhosted.org/packages/dc/a6/97d8bb65b1d8a41f8a6736222ba0a334db7b7b77b8023ab4568288f23973/black-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9", size = 1419404, upload-time = "2025-01-29T04:19:04.296Z" }, + { url = "https://files.pythonhosted.org/packages/7e/4f/87f596aca05c3ce5b94b8663dbfe242a12843caaa82dd3f85f1ffdc3f177/black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0", size = 1614372, upload-time = "2025-01-29T05:37:11.71Z" }, + { url = "https://files.pythonhosted.org/packages/e7/d0/2c34c36190b741c59c901e56ab7f6e54dad8df05a6272a9747ecef7c6036/black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299", size = 1442865, upload-time = "2025-01-29T05:37:14.309Z" }, + { url = "https://files.pythonhosted.org/packages/21/d4/7518c72262468430ead45cf22bd86c883a6448b9eb43672765d69a8f1248/black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096", size = 1749699, upload-time = "2025-01-29T04:18:17.688Z" }, + { url = "https://files.pythonhosted.org/packages/58/db/4f5beb989b547f79096e035c4981ceb36ac2b552d0ac5f2620e941501c99/black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2", size = 1428028, upload-time = "2025-01-29T04:18:51.711Z" }, + { url = "https://files.pythonhosted.org/packages/83/71/3fe4741df7adf015ad8dfa082dd36c94ca86bb21f25608eb247b4afb15b2/black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b", size = 1650988, upload-time = "2025-01-29T05:37:16.707Z" }, + { url = "https://files.pythonhosted.org/packages/13/f3/89aac8a83d73937ccd39bbe8fc6ac8860c11cfa0af5b1c96d081facac844/black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc", size = 1453985, upload-time = "2025-01-29T05:37:18.273Z" }, + { url = "https://files.pythonhosted.org/packages/6f/22/b99efca33f1f3a1d2552c714b1e1b5ae92efac6c43e790ad539a163d1754/black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f", size = 1783816, upload-time = "2025-01-29T04:18:33.823Z" }, + { url = "https://files.pythonhosted.org/packages/18/7e/a27c3ad3822b6f2e0e00d63d58ff6299a99a5b3aee69fa77cd4b0076b261/black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba", size = 1440860, upload-time = "2025-01-29T04:19:12.944Z" }, + { url = "https://files.pythonhosted.org/packages/98/87/0edf98916640efa5d0696e1abb0a8357b52e69e82322628f25bf14d263d1/black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f", size = 1650673, upload-time = "2025-01-29T05:37:20.574Z" }, + { url = "https://files.pythonhosted.org/packages/52/e5/f7bf17207cf87fa6e9b676576749c6b6ed0d70f179a3d812c997870291c3/black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3", size = 1453190, upload-time = "2025-01-29T05:37:22.106Z" }, + { url = "https://files.pythonhosted.org/packages/e3/ee/adda3d46d4a9120772fae6de454c8495603c37c4c3b9c60f25b1ab6401fe/black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171", size = 1782926, upload-time = "2025-01-29T04:18:58.564Z" }, + { url = "https://files.pythonhosted.org/packages/cc/64/94eb5f45dcb997d2082f097a3944cfc7fe87e071907f677e80788a2d7b7a/black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18", size = 1442613, upload-time = "2025-01-29T04:19:27.63Z" }, + { url = "https://files.pythonhosted.org/packages/09/71/54e999902aed72baf26bca0d50781b01838251a462612966e9fc4891eadd/black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717", size = 207646, upload-time = "2025-01-29T04:15:38.082Z" }, +] + +[[package]] +name = "boto3" +version = "1.38.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/02/b8e52defe7322998a79cb8e09d8f8a00b9d7452a0a516072f67df89f57f1/boto3-1.38.21.tar.gz", hash = "sha256:417d0328fd3394ffb1c1f400d4277d45b0b86f48d2f088a02306474969344a47", size = 111817, upload-time = "2025-05-21T19:28:13.896Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/6f/9ecfc29719791bd363490741aab6eb271aab36701359f7ab69d5aef0ff0f/boto3-1.38.21-py3-none-any.whl", hash = "sha256:37e4b6b7f77f4cc476ea82eb76a502a289bb750eee96f7d07ec9bcec6592191a", size = 139933, upload-time = "2025-05-21T19:28:10.724Z" }, +] + +[[package]] +name = "botocore" +version = "1.38.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/4a/89f2beab6757c900b15aa301227c9447feff7d327ff0595a2b74406a388c/botocore-1.38.21.tar.gz", hash = "sha256:08d5e9c00e5cc9e0ae0e60570846011789dc7f1d4ea094b3f3e3f3ae1ff2063a", size = 13904318, upload-time = "2025-05-21T19:27:59.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/bf/8e943894e0c9f898db63c6af4c590c153dff680bd02536777b0a543e94e5/botocore-1.38.21-py3-none-any.whl", hash = "sha256:567b4d338114174d0b41857002a4b1e8efb68f1654ed9f3ec6c34ebdef5e9eaf", size = 13564842, upload-time = "2025-05-21T19:27:53.955Z" }, +] + +[[package]] +name = "botocore-stubs" +version = "1.38.19" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "types-awscrt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/70/6204c97f8d8362364f11c16085566abdcaa114c264d3a4d709ff697b203b/botocore_stubs-1.38.19.tar.gz", hash = "sha256:84f67a42bb240a8ea0c5fe4f05d497cc411177db600bc7012182e499ac24bf19", size = 42269, upload-time = "2025-05-19T20:18:13.556Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/ce/28b143452c22b678678d832bf8b41218e3d319bf94062b48c28fe5d81163/botocore_stubs-1.38.19-py3-none-any.whl", hash = "sha256:66fd7d231c21134a12acbe313ef7a6b152cbf9bfd7bfa12a62f8c33e94737e26", size = 65603, upload-time = "2025-05-19T20:18:10.445Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191, upload-time = "2024-09-04T20:43:30.027Z" }, + { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592, upload-time = "2024-09-04T20:43:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024, upload-time = "2024-09-04T20:43:34.186Z" }, + { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188, upload-time = "2024-09-04T20:43:36.286Z" }, + { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571, upload-time = "2024-09-04T20:43:38.586Z" }, + { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687, upload-time = "2024-09-04T20:43:40.084Z" }, + { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211, upload-time = "2024-09-04T20:43:41.526Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325, upload-time = "2024-09-04T20:43:43.117Z" }, + { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784, upload-time = "2024-09-04T20:43:45.256Z" }, + { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564, upload-time = "2024-09-04T20:43:46.779Z" }, + { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804, upload-time = "2024-09-04T20:43:48.186Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299, upload-time = "2024-09-04T20:43:49.812Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "cloudpickle" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113, upload-time = "2025-01-14T17:02:05.085Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992, upload-time = "2025-01-14T17:02:02.417Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "45.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/47/92a8914716f2405f33f1814b97353e3cfa223cd94a77104075d42de3099e/cryptography-45.0.2.tar.gz", hash = "sha256:d784d57b958ffd07e9e226d17272f9af0c41572557604ca7554214def32c26bf", size = 743865, upload-time = "2025-05-18T02:46:34.986Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/2f/46b9e715157643ad16f039ec3c3c47d174da6f825bf5034b1c5f692ab9e2/cryptography-45.0.2-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:61a8b1bbddd9332917485b2453d1de49f142e6334ce1d97b7916d5a85d179c84", size = 7043448, upload-time = "2025-05-18T02:45:12.495Z" }, + { url = "https://files.pythonhosted.org/packages/90/52/49e6c86278e1b5ec226e96b62322538ccc466306517bf9aad8854116a088/cryptography-45.0.2-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cc31c66411e14dd70e2f384a9204a859dc25b05e1f303df0f5326691061b839", size = 4201098, upload-time = "2025-05-18T02:45:15.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/3a/201272539ac5b66b4cb1af89021e423fc0bfacb73498950280c51695fb78/cryptography-45.0.2-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:463096533acd5097f8751115bc600b0b64620c4aafcac10c6d0041e6e68f88fe", size = 4429839, upload-time = "2025-05-18T02:45:17.614Z" }, + { url = "https://files.pythonhosted.org/packages/99/89/fa1a84832b8f8f3917875cb15324bba98def5a70175a889df7d21a45dc75/cryptography-45.0.2-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:cdafb86eb673c3211accffbffdb3cdffa3aaafacd14819e0898d23696d18e4d3", size = 4205154, upload-time = "2025-05-18T02:45:19.874Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c5/5225d5230d538ab461725711cf5220560a813d1eb68bafcfb00131b8f631/cryptography-45.0.2-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:05c2385b1f5c89a17df19900cfb1345115a77168f5ed44bdf6fd3de1ce5cc65b", size = 3897145, upload-time = "2025-05-18T02:45:22.209Z" }, + { url = "https://files.pythonhosted.org/packages/fe/24/f19aae32526cc55ae17d473bc4588b1234af2979483d99cbfc57e55ffea6/cryptography-45.0.2-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e9e4bdcd70216b08801e267c0b563316b787f957a46e215249921f99288456f9", size = 4462192, upload-time = "2025-05-18T02:45:24.773Z" }, + { url = "https://files.pythonhosted.org/packages/19/18/4a69ac95b0b3f03355970baa6c3f9502bbfc54e7df81fdb179654a00f48e/cryptography-45.0.2-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b2de529027579e43b6dc1f805f467b102fb7d13c1e54c334f1403ee2b37d0059", size = 4208093, upload-time = "2025-05-18T02:45:27.028Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/2dea55ccc9558b8fa14f67156250b6ee231e31765601524e4757d0b5db6b/cryptography-45.0.2-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10d68763892a7b19c22508ab57799c4423c7c8cd61d7eee4c5a6a55a46511949", size = 4461819, upload-time = "2025-05-18T02:45:29.39Z" }, + { url = "https://files.pythonhosted.org/packages/37/f1/1b220fcd5ef4b1f0ff3e59e733b61597505e47f945606cc877adab2c1a17/cryptography-45.0.2-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2a90ce2f0f5b695e4785ac07c19a58244092f3c85d57db6d8eb1a2b26d2aad6", size = 4329202, upload-time = "2025-05-18T02:45:31.925Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e0/51d1dc4f96f819a56db70f0b4039b4185055bbb8616135884c3c3acc4c6d/cryptography-45.0.2-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:59c0c8f043dd376bbd9d4f636223836aed50431af4c5a467ed9bf61520294627", size = 4570412, upload-time = "2025-05-18T02:45:34.348Z" }, + { url = "https://files.pythonhosted.org/packages/dc/44/88efb40a3600d15277a77cdc69eeeab45a98532078d2a36cffd9325d3b3f/cryptography-45.0.2-cp311-abi3-win32.whl", hash = "sha256:80303ee6a02ef38c4253160446cbeb5c400c07e01d4ddbd4ff722a89b736d95a", size = 2933584, upload-time = "2025-05-18T02:45:36.198Z" }, + { url = "https://files.pythonhosted.org/packages/d9/a1/bc9f82ba08760442cc8346d1b4e7b769b86d197193c45b42b3595d231e84/cryptography-45.0.2-cp311-abi3-win_amd64.whl", hash = "sha256:7429936146063bd1b2cfc54f0e04016b90ee9b1c908a7bed0800049cbace70eb", size = 3408537, upload-time = "2025-05-18T02:45:38.184Z" }, + { url = "https://files.pythonhosted.org/packages/59/bc/1b6acb1dca366f9c0b3880888ecd7fcfb68023930d57df854847c6da1d10/cryptography-45.0.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:e86c8d54cd19a13e9081898b3c24351683fd39d726ecf8e774aaa9d8d96f5f3a", size = 7025581, upload-time = "2025-05-18T02:45:40.632Z" }, + { url = "https://files.pythonhosted.org/packages/31/a3/a3e4a298d3db4a04085728f5ae6c8cda157e49c5bb784886d463b9fbff70/cryptography-45.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e328357b6bbf79928363dbf13f4635b7aac0306afb7e5ad24d21d0c5761c3253", size = 4189148, upload-time = "2025-05-18T02:45:42.538Z" }, + { url = "https://files.pythonhosted.org/packages/53/90/100dfadd4663b389cb56972541ec1103490a19ebad0132af284114ba0868/cryptography-45.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49af56491473231159c98c2c26f1a8f3799a60e5cf0e872d00745b858ddac9d2", size = 4424113, upload-time = "2025-05-18T02:45:44.316Z" }, + { url = "https://files.pythonhosted.org/packages/0d/40/e2b9177dbed6f3fcbbf1942e1acea2fd15b17007204b79d675540dd053af/cryptography-45.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f169469d04a23282de9d0be349499cb6683b6ff1b68901210faacac9b0c24b7d", size = 4189696, upload-time = "2025-05-18T02:45:46.622Z" }, + { url = "https://files.pythonhosted.org/packages/70/ae/ec29c79f481e1767c2ff916424ba36f3cf7774de93bbd60428a3c52d1357/cryptography-45.0.2-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9cfd1399064b13043082c660ddd97a0358e41c8b0dc7b77c1243e013d305c344", size = 3881498, upload-time = "2025-05-18T02:45:48.884Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4a/72937090e5637a232b2f73801c9361cd08404a2d4e620ca4ec58c7ea4b70/cryptography-45.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f8084b7ca3ce1b8d38bdfe33c48116edf9a08b4d056ef4a96dceaa36d8d965", size = 4451678, upload-time = "2025-05-18T02:45:50.706Z" }, + { url = "https://files.pythonhosted.org/packages/d3/fa/1377fced81fd67a4a27514248261bb0d45c3c1e02169411fe231583088c8/cryptography-45.0.2-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:2cb03a944a1a412724d15a7c051d50e63a868031f26b6a312f2016965b661942", size = 4192296, upload-time = "2025-05-18T02:45:52.422Z" }, + { url = "https://files.pythonhosted.org/packages/d1/cf/b6fe837c83a08b9df81e63299d75fc5b3c6d82cf24b3e1e0e331050e9e5c/cryptography-45.0.2-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a9727a21957d3327cf6b7eb5ffc9e4b663909a25fea158e3fcbc49d4cdd7881b", size = 4451749, upload-time = "2025-05-18T02:45:55.025Z" }, + { url = "https://files.pythonhosted.org/packages/af/d8/5a655675cc635c7190bfc8cffb84bcdc44fc62ce945ad1d844adaa884252/cryptography-45.0.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ddb8d01aa900b741d6b7cc585a97aff787175f160ab975e21f880e89d810781a", size = 4317601, upload-time = "2025-05-18T02:45:56.911Z" }, + { url = "https://files.pythonhosted.org/packages/b9/d4/75d2375a20d80aa262a8adee77bf56950e9292929e394b9fae2481803f11/cryptography-45.0.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c0c000c1a09f069632d8a9eb3b610ac029fcc682f1d69b758e625d6ee713f4ed", size = 4560535, upload-time = "2025-05-18T02:45:59.33Z" }, + { url = "https://files.pythonhosted.org/packages/aa/18/c3a94474987ebcfb88692036b2ec44880d243fefa73794bdcbf748679a6e/cryptography-45.0.2-cp37-abi3-win32.whl", hash = "sha256:08281de408e7eb71ba3cd5098709a356bfdf65eebd7ee7633c3610f0aa80d79b", size = 2922045, upload-time = "2025-05-18T02:46:01.012Z" }, + { url = "https://files.pythonhosted.org/packages/63/63/fb28b30c144182fd44ce93d13ab859791adbf923e43bdfb610024bfecda1/cryptography-45.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:48caa55c528617fa6db1a9c3bf2e37ccb31b73e098ac2b71408d1f2db551dde4", size = 3393321, upload-time = "2025-05-18T02:46:03.441Z" }, + { url = "https://files.pythonhosted.org/packages/6e/f5/d1d4dead3b269671cda7be6d6b2970b25398e84219681cb397139bdce88b/cryptography-45.0.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8ec324711596fbf21837d3a5db543937dd84597d364769b46e0102250023f77", size = 3578517, upload-time = "2025-05-18T02:46:05.263Z" }, + { url = "https://files.pythonhosted.org/packages/ac/7b/00e18d24f08bc642e4018e0066a6f872d85c744e3265910c3beabb1f4d73/cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:965611880c3fa8e504b7458484c0697e00ae6e937279cd6734fdaa2bc954dc49", size = 4135515, upload-time = "2025-05-18T02:46:07.241Z" }, + { url = "https://files.pythonhosted.org/packages/29/9f/ea7ad5239c33c36f0e2cbdf631a0e3b7633466e87e55923f5b5ea1b0b92d/cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d891942592789fa0ab71b502550bbadb12f540d7413d7d7c4cef4b02af0f5bc6", size = 4378133, upload-time = "2025-05-18T02:46:09.035Z" }, + { url = "https://files.pythonhosted.org/packages/47/f8/b4e29d87fbc4d2cf46b36e01fcb98305bf76699f34de6b877cddd8bc3a64/cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b19f4b28dd2ef2e6d600307fee656c00825a2980c4356a7080bd758d633c3a6f", size = 4136787, upload-time = "2025-05-18T02:46:10.772Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7c/ac19bbf24d261667a67aac712d8aa3bb740f94bc2391f06ccc90e783f3ff/cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:7c73968fbb7698a4c5d6160859db560d3aac160edde89c751edd5a8bc6560c88", size = 4377741, upload-time = "2025-05-18T02:46:13.215Z" }, + { url = "https://files.pythonhosted.org/packages/e2/69/51f1c3d03ef4e3bcac4d3f00738f6ac0a205199809e2b92368b6f15a9ec4/cryptography-45.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:501de1296b2041dccf2115e3c7d4947430585601b251b140970ce255c5cfb985", size = 3326934, upload-time = "2025-05-18T02:46:15.081Z" }, + { url = "https://files.pythonhosted.org/packages/d7/74/2a0fb642c4c34d8c46c12b6eac89b10769b378c7b6a901ff94a8d4ba1b52/cryptography-45.0.2-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1655d3a76e3dedb683c982a6c3a2cbfae2d08f47a48ec5a3d58db52b3d29ea6f", size = 3587805, upload-time = "2025-05-18T02:46:17.531Z" }, + { url = "https://files.pythonhosted.org/packages/8a/18/57bc98fa5d93e74c2c2b16a3c5383f7ec218f957aa44559c0008a46c3629/cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc7693573f16535428183de8fd27f0ca1ca37a51baa0b41dc5ed7b3d68fe80e2", size = 4143347, upload-time = "2025-05-18T02:46:19.934Z" }, + { url = "https://files.pythonhosted.org/packages/84/6f/d015e7e7bd7f3a6c538973005de5a780d93b68138c2d88c804422cf46b1c/cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:614bca7c6ed0d8ad1dce683a6289afae1f880675b4090878a0136c3da16bc693", size = 4387414, upload-time = "2025-05-18T02:46:21.944Z" }, + { url = "https://files.pythonhosted.org/packages/de/9e/fa5ec89cce7e4b86e430438da4d66b79113bdf321d0a00167d34b61daf19/cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:4142e20c29224cec63e9e32eb1e6014fb285fe39b7be66b3564ca978a3a8afe9", size = 4145849, upload-time = "2025-05-18T02:46:24.327Z" }, + { url = "https://files.pythonhosted.org/packages/7c/09/5887d4fcc6f9c6fb19920789d094c4e25c2f604cc1b10b7e69d6f56187fe/cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:9a900036b42f7324df7c7ad9569eb92ba0b613cf699160dd9c2154b24fd02f8e", size = 4387449, upload-time = "2025-05-18T02:46:26.144Z" }, + { url = "https://files.pythonhosted.org/packages/a5/4a/e27ab71dc3e517becc3f2ae358454bb4b78c0cb5af52f8e11b8943525ea6/cryptography-45.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:057723b79752a142efbc609e90b0dff27b0361ccbee3bd48312d70f5cdf53b78", size = 3335090, upload-time = "2025-05-18T02:46:27.913Z" }, +] + +[[package]] +name = "dask" +version = "2025.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "cloudpickle" }, + { name = "fsspec" }, + { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, + { name = "packaging" }, + { name = "partd" }, + { name = "pyyaml" }, + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/44/ac66964138b026bc31d82c38c97ff260b1be7e13d8446c5c21e81b4c0991/dask-2025.5.0.tar.gz", hash = "sha256:3ec9175e53effe1c2b0086668352e0d5261c5ef6f71a410264eda83659d686ef", size = 10971343, upload-time = "2025-05-13T22:18:40.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/cb/a68c59dd229bae7c439631dba66b0286d551efae67e09e0bbf813635b0d3/dask-2025.5.0-py3-none-any.whl", hash = "sha256:77e9a64bb09098515bc579477b7051b0909474cd7b3e0005e3d0968a70c84015", size = 1473896, upload-time = "2025-05-13T22:18:30.514Z" }, +] + +[package.optional-dependencies] +distributed = [ + { name = "distributed" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "distributed" +version = "2025.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "cloudpickle" }, + { name = "dask" }, + { name = "jinja2" }, + { name = "locket" }, + { name = "msgpack" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyyaml" }, + { name = "sortedcontainers" }, + { name = "tblib" }, + { name = "toolz" }, + { name = "tornado" }, + { name = "urllib3" }, + { name = "zict" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/a4eca3d263a4574c9721f4e5121083aaf113951e697f1c87eabbd7fc8a19/distributed-2025.5.0.tar.gz", hash = "sha256:49dc3395eb3b7169800160731064bbc7ee6f5235bbea49d9b85baa6358a2e37a", size = 1108873, upload-time = "2025-05-13T22:18:35.877Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/e4/a462ea00743788a5b006a3ddc3ea18656410ed97a7a5a8571ae44f0faef0/distributed-2025.5.0-py3-none-any.whl", hash = "sha256:374e3236b4945745b48cd821025f802095abb18fd6477123dd237905253bc60f", size = 1014792, upload-time = "2025-05-13T22:18:32.179Z" }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197, upload-time = "2024-10-05T20:14:59.362Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632, upload-time = "2024-10-05T20:14:57.687Z" }, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/ce/13508a1ec3f8bb981ae4ca79ea40384becc868bfae97fd1c942bb3a001b1/email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7", size = 48967, upload-time = "2024-06-20T11:30:30.034Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/ee/bf0adb559ad3c786f12bcbc9296b3f5675f529199bef03e2df281fa1fadb/email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631", size = 33521, upload-time = "2024-06-20T11:30:28.248Z" }, +] + +[[package]] +name = "fsspec" +version = "2025.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/77/deb99b97981e2e191913454da82d406702405178631c31cd623caebaf1b1/fsspec-2025.5.0.tar.gz", hash = "sha256:e4f4623bb6221f7407fd695cc535d1f857a077eb247580f4ada34f5dc25fd5c8", size = 300989, upload-time = "2025-05-20T15:46:22.484Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/a9/a7022f58e081149ec0184c31ea81dcee605e1d46380b48122e1ef94ac24e/fsspec-2025.5.0-py3-none-any.whl", hash = "sha256:0ca253eca6b5333d8a2b8bd98c7326fe821f1f0fdbd34e1b445bddde8e804c95", size = 196164, upload-time = "2025-05-20T15:46:20.89Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/c1/a82edae11d46c0d83481aacaa1e578fea21d94a1ef400afd734d47ad95ad/greenlet-3.2.2.tar.gz", hash = "sha256:ad053d34421a2debba45aa3cc39acf454acbcd025b3fc1a9f8a0dee237abd485", size = 185797, upload-time = "2025-05-09T19:47:35.066Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/66/910217271189cc3f32f670040235f4bf026ded8ca07270667d69c06e7324/greenlet-3.2.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:c49e9f7c6f625507ed83a7485366b46cbe325717c60837f7244fc99ba16ba9d6", size = 267395, upload-time = "2025-05-09T14:50:45.357Z" }, + { url = "https://files.pythonhosted.org/packages/a8/36/8d812402ca21017c82880f399309afadb78a0aa300a9b45d741e4df5d954/greenlet-3.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3cc1a3ed00ecfea8932477f729a9f616ad7347a5e55d50929efa50a86cb7be7", size = 625742, upload-time = "2025-05-09T15:23:58.293Z" }, + { url = "https://files.pythonhosted.org/packages/7b/77/66d7b59dfb7cc1102b2f880bc61cb165ee8998c9ec13c96606ba37e54c77/greenlet-3.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c9896249fbef2c615853b890ee854f22c671560226c9221cfd27c995db97e5c", size = 637014, upload-time = "2025-05-09T15:24:47.025Z" }, + { url = "https://files.pythonhosted.org/packages/36/a7/ff0d408f8086a0d9a5aac47fa1b33a040a9fca89bd5a3f7b54d1cd6e2793/greenlet-3.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7409796591d879425997a518138889d8d17e63ada7c99edc0d7a1c22007d4907", size = 632874, upload-time = "2025-05-09T15:29:20.014Z" }, + { url = "https://files.pythonhosted.org/packages/a1/75/1dc2603bf8184da9ebe69200849c53c3c1dca5b3a3d44d9f5ca06a930550/greenlet-3.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7791dcb496ec53d60c7f1c78eaa156c21f402dda38542a00afc3e20cae0f480f", size = 631652, upload-time = "2025-05-09T14:53:30.961Z" }, + { url = "https://files.pythonhosted.org/packages/7b/74/ddc8c3bd4c2c20548e5bf2b1d2e312a717d44e2eca3eadcfc207b5f5ad80/greenlet-3.2.2-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d8009ae46259e31bc73dc183e402f548e980c96f33a6ef58cc2e7865db012e13", size = 580619, upload-time = "2025-05-09T14:53:42.049Z" }, + { url = "https://files.pythonhosted.org/packages/7e/f2/40f26d7b3077b1c7ae7318a4de1f8ffc1d8ccbad8f1d8979bf5080250fd6/greenlet-3.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fd9fb7c941280e2c837b603850efc93c999ae58aae2b40765ed682a6907ebbc5", size = 1109809, upload-time = "2025-05-09T15:26:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/c5/21/9329e8c276746b0d2318b696606753f5e7b72d478adcf4ad9a975521ea5f/greenlet-3.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:00cd814b8959b95a546e47e8d589610534cfb71f19802ea8a2ad99d95d702057", size = 1133455, upload-time = "2025-05-09T14:53:55.823Z" }, + { url = "https://files.pythonhosted.org/packages/bb/1e/0dca9619dbd736d6981f12f946a497ec21a0ea27262f563bca5729662d4d/greenlet-3.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:d0cb7d47199001de7658c213419358aa8937df767936506db0db7ce1a71f4a2f", size = 294991, upload-time = "2025-05-09T15:05:56.847Z" }, + { url = "https://files.pythonhosted.org/packages/a3/9f/a47e19261747b562ce88219e5ed8c859d42c6e01e73da6fbfa3f08a7be13/greenlet-3.2.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:dcb9cebbf3f62cb1e5afacae90761ccce0effb3adaa32339a0670fe7805d8068", size = 268635, upload-time = "2025-05-09T14:50:39.007Z" }, + { url = "https://files.pythonhosted.org/packages/11/80/a0042b91b66975f82a914d515e81c1944a3023f2ce1ed7a9b22e10b46919/greenlet-3.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf3fc9145141250907730886b031681dfcc0de1c158f3cc51c092223c0f381ce", size = 628786, upload-time = "2025-05-09T15:24:00.692Z" }, + { url = "https://files.pythonhosted.org/packages/38/a2/8336bf1e691013f72a6ebab55da04db81a11f68e82bb691f434909fa1327/greenlet-3.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:efcdfb9df109e8a3b475c016f60438fcd4be68cd13a365d42b35914cdab4bb2b", size = 640866, upload-time = "2025-05-09T15:24:48.153Z" }, + { url = "https://files.pythonhosted.org/packages/f8/7e/f2a3a13e424670a5d08826dab7468fa5e403e0fbe0b5f951ff1bc4425b45/greenlet-3.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd139e4943547ce3a56ef4b8b1b9479f9e40bb47e72cc906f0f66b9d0d5cab3", size = 636752, upload-time = "2025-05-09T15:29:23.182Z" }, + { url = "https://files.pythonhosted.org/packages/fd/5d/ce4a03a36d956dcc29b761283f084eb4a3863401c7cb505f113f73af8774/greenlet-3.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71566302219b17ca354eb274dfd29b8da3c268e41b646f330e324e3967546a74", size = 636028, upload-time = "2025-05-09T14:53:32.854Z" }, + { url = "https://files.pythonhosted.org/packages/4b/29/b130946b57e3ceb039238413790dd3793c5e7b8e14a54968de1fe449a7cf/greenlet-3.2.2-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3091bc45e6b0c73f225374fefa1536cd91b1e987377b12ef5b19129b07d93ebe", size = 583869, upload-time = "2025-05-09T14:53:43.614Z" }, + { url = "https://files.pythonhosted.org/packages/ac/30/9f538dfe7f87b90ecc75e589d20cbd71635531a617a336c386d775725a8b/greenlet-3.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:44671c29da26539a5f142257eaba5110f71887c24d40df3ac87f1117df589e0e", size = 1112886, upload-time = "2025-05-09T15:27:01.304Z" }, + { url = "https://files.pythonhosted.org/packages/be/92/4b7deeb1a1e9c32c1b59fdca1cac3175731c23311ddca2ea28a8b6ada91c/greenlet-3.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c23ea227847c9dbe0b3910f5c0dd95658b607137614eb821e6cbaecd60d81cc6", size = 1138355, upload-time = "2025-05-09T14:53:58.011Z" }, + { url = "https://files.pythonhosted.org/packages/c5/eb/7551c751a2ea6498907b2fcbe31d7a54b602ba5e8eb9550a9695ca25d25c/greenlet-3.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:0a16fb934fcabfdfacf21d79e6fed81809d8cd97bc1be9d9c89f0e4567143d7b", size = 295437, upload-time = "2025-05-09T15:00:57.733Z" }, + { url = "https://files.pythonhosted.org/packages/2c/a1/88fdc6ce0df6ad361a30ed78d24c86ea32acb2b563f33e39e927b1da9ea0/greenlet-3.2.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:df4d1509efd4977e6a844ac96d8be0b9e5aa5d5c77aa27ca9f4d3f92d3fcf330", size = 270413, upload-time = "2025-05-09T14:51:32.455Z" }, + { url = "https://files.pythonhosted.org/packages/a6/2e/6c1caffd65490c68cd9bcec8cb7feb8ac7b27d38ba1fea121fdc1f2331dc/greenlet-3.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da956d534a6d1b9841f95ad0f18ace637668f680b1339ca4dcfb2c1837880a0b", size = 637242, upload-time = "2025-05-09T15:24:02.63Z" }, + { url = "https://files.pythonhosted.org/packages/98/28/088af2cedf8823b6b7ab029a5626302af4ca1037cf8b998bed3a8d3cb9e2/greenlet-3.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c7b15fb9b88d9ee07e076f5a683027bc3befd5bb5d25954bb633c385d8b737e", size = 651444, upload-time = "2025-05-09T15:24:49.856Z" }, + { url = "https://files.pythonhosted.org/packages/4a/9f/0116ab876bb0bc7a81eadc21c3f02cd6100dcd25a1cf2a085a130a63a26a/greenlet-3.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:752f0e79785e11180ebd2e726c8a88109ded3e2301d40abced2543aa5d164275", size = 646067, upload-time = "2025-05-09T15:29:24.989Z" }, + { url = "https://files.pythonhosted.org/packages/35/17/bb8f9c9580e28a94a9575da847c257953d5eb6e39ca888239183320c1c28/greenlet-3.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ae572c996ae4b5e122331e12bbb971ea49c08cc7c232d1bd43150800a2d6c65", size = 648153, upload-time = "2025-05-09T14:53:34.716Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ee/7f31b6f7021b8df6f7203b53b9cc741b939a2591dcc6d899d8042fcf66f2/greenlet-3.2.2-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02f5972ff02c9cf615357c17ab713737cccfd0eaf69b951084a9fd43f39833d3", size = 603865, upload-time = "2025-05-09T14:53:45.738Z" }, + { url = "https://files.pythonhosted.org/packages/b5/2d/759fa59323b521c6f223276a4fc3d3719475dc9ae4c44c2fe7fc750f8de0/greenlet-3.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4fefc7aa68b34b9224490dfda2e70ccf2131368493add64b4ef2d372955c207e", size = 1119575, upload-time = "2025-05-09T15:27:04.248Z" }, + { url = "https://files.pythonhosted.org/packages/30/05/356813470060bce0e81c3df63ab8cd1967c1ff6f5189760c1a4734d405ba/greenlet-3.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a31ead8411a027c2c4759113cf2bd473690517494f3d6e4bf67064589afcd3c5", size = 1147460, upload-time = "2025-05-09T14:54:00.315Z" }, + { url = "https://files.pythonhosted.org/packages/07/f4/b2a26a309a04fb844c7406a4501331b9400e1dd7dd64d3450472fd47d2e1/greenlet-3.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:b24c7844c0a0afc3ccbeb0b807adeefb7eff2b5599229ecedddcfeb0ef333bec", size = 296239, upload-time = "2025-05-09T14:57:17.633Z" }, + { url = "https://files.pythonhosted.org/packages/89/30/97b49779fff8601af20972a62cc4af0c497c1504dfbb3e93be218e093f21/greenlet-3.2.2-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:3ab7194ee290302ca15449f601036007873028712e92ca15fc76597a0aeb4c59", size = 269150, upload-time = "2025-05-09T14:50:30.784Z" }, + { url = "https://files.pythonhosted.org/packages/21/30/877245def4220f684bc2e01df1c2e782c164e84b32e07373992f14a2d107/greenlet-3.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc5c43bb65ec3669452af0ab10729e8fdc17f87a1f2ad7ec65d4aaaefabf6bf", size = 637381, upload-time = "2025-05-09T15:24:12.893Z" }, + { url = "https://files.pythonhosted.org/packages/8e/16/adf937908e1f913856b5371c1d8bdaef5f58f251d714085abeea73ecc471/greenlet-3.2.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:decb0658ec19e5c1f519faa9a160c0fc85a41a7e6654b3ce1b44b939f8bf1325", size = 651427, upload-time = "2025-05-09T15:24:51.074Z" }, + { url = "https://files.pythonhosted.org/packages/ad/49/6d79f58fa695b618654adac64e56aff2eeb13344dc28259af8f505662bb1/greenlet-3.2.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6fadd183186db360b61cb34e81117a096bff91c072929cd1b529eb20dd46e6c5", size = 645795, upload-time = "2025-05-09T15:29:26.673Z" }, + { url = "https://files.pythonhosted.org/packages/5a/e6/28ed5cb929c6b2f001e96b1d0698c622976cd8f1e41fe7ebc047fa7c6dd4/greenlet-3.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1919cbdc1c53ef739c94cf2985056bcc0838c1f217b57647cbf4578576c63825", size = 648398, upload-time = "2025-05-09T14:53:36.61Z" }, + { url = "https://files.pythonhosted.org/packages/9d/70/b200194e25ae86bc57077f695b6cc47ee3118becf54130c5514456cf8dac/greenlet-3.2.2-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3885f85b61798f4192d544aac7b25a04ece5fe2704670b4ab73c2d2c14ab740d", size = 606795, upload-time = "2025-05-09T14:53:47.039Z" }, + { url = "https://files.pythonhosted.org/packages/f8/c8/ba1def67513a941154ed8f9477ae6e5a03f645be6b507d3930f72ed508d3/greenlet-3.2.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:85f3e248507125bf4af607a26fd6cb8578776197bd4b66e35229cdf5acf1dfbf", size = 1117976, upload-time = "2025-05-09T15:27:06.542Z" }, + { url = "https://files.pythonhosted.org/packages/c3/30/d0e88c1cfcc1b3331d63c2b54a0a3a4a950ef202fb8b92e772ca714a9221/greenlet-3.2.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1e76106b6fc55fa3d6fe1c527f95ee65e324a13b62e243f77b48317346559708", size = 1145509, upload-time = "2025-05-09T14:54:02.223Z" }, + { url = "https://files.pythonhosted.org/packages/90/2e/59d6491834b6e289051b252cf4776d16da51c7c6ca6a87ff97e3a50aa0cd/greenlet-3.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:fe46d4f8e94e637634d54477b0cfabcf93c53f29eedcbdeecaf2af32029b4421", size = 296023, upload-time = "2025-05-09T14:53:24.157Z" }, + { url = "https://files.pythonhosted.org/packages/65/66/8a73aace5a5335a1cba56d0da71b7bd93e450f17d372c5b7c5fa547557e9/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba30e88607fb6990544d84caf3c706c4b48f629e18853fc6a646f82db9629418", size = 629911, upload-time = "2025-05-09T15:24:22.376Z" }, + { url = "https://files.pythonhosted.org/packages/48/08/c8b8ebac4e0c95dcc68ec99198842e7db53eda4ab3fb0a4e785690883991/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:055916fafad3e3388d27dd68517478933a97edc2fc54ae79d3bec827de2c64c4", size = 635251, upload-time = "2025-05-09T15:24:52.205Z" }, + { url = "https://files.pythonhosted.org/packages/37/26/7db30868f73e86b9125264d2959acabea132b444b88185ba5c462cb8e571/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2593283bf81ca37d27d110956b79e8723f9aa50c4bcdc29d3c0543d4743d2763", size = 632620, upload-time = "2025-05-09T15:29:28.051Z" }, + { url = "https://files.pythonhosted.org/packages/10/ec/718a3bd56249e729016b0b69bee4adea0dfccf6ca43d147ef3b21edbca16/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c69e9a10670eb7a66b8cef6354c24671ba241f46152dd3eed447f79c29fb5b", size = 628851, upload-time = "2025-05-09T14:53:38.472Z" }, + { url = "https://files.pythonhosted.org/packages/9b/9d/d1c79286a76bc62ccdc1387291464af16a4204ea717f24e77b0acd623b99/greenlet-3.2.2-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a98600899ca1ca5d3a2590974c9e3ec259503b2d6ba6527605fcd74e08e207", size = 593718, upload-time = "2025-05-09T14:53:48.313Z" }, + { url = "https://files.pythonhosted.org/packages/cd/41/96ba2bf948f67b245784cd294b84e3d17933597dffd3acdb367a210d1949/greenlet-3.2.2-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:b50a8c5c162469c3209e5ec92ee4f95c8231b11db6a04db09bbe338176723bb8", size = 1105752, upload-time = "2025-05-09T15:27:08.217Z" }, + { url = "https://files.pythonhosted.org/packages/68/3b/3b97f9d33c1f2eb081759da62bd6162159db260f602f048bc2f36b4c453e/greenlet-3.2.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:45f9f4853fb4cc46783085261c9ec4706628f3b57de3e68bae03e8f8b3c0de51", size = 1125170, upload-time = "2025-05-09T14:54:04.082Z" }, + { url = "https://files.pythonhosted.org/packages/31/df/b7d17d66c8d0f578d2885a3d8f565e9e4725eacc9d3fdc946d0031c055c4/greenlet-3.2.2-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:9ea5231428af34226c05f927e16fc7f6fa5e39e3ad3cd24ffa48ba53a47f4240", size = 269899, upload-time = "2025-05-09T14:54:01.581Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "isort" +version = "6.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b8/21/1e2a441f74a653a144224d7d21afe8f4169e6c7c20bb13aec3a2dc3815e0/isort-6.0.1.tar.gz", hash = "sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450", size = 821955, upload-time = "2025-02-26T21:13:16.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/11/114d0a5f4dabbdcedc1125dee0888514c3c3b16d3e9facad87ed96fad97c/isort-6.0.1-py3-none-any.whl", hash = "sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615", size = 94186, upload-time = "2025-02-26T21:13:14.911Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "locket" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/83/97b29fe05cb6ae28d2dbd30b81e2e402a3eed5f460c26e9eaa5895ceacf5/locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632", size = 4350, upload-time = "2022-04-20T22:04:44.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/bc/83e112abc66cd466c6b83f99118035867cecd41802f8d044638aa78a106e/locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3", size = 4398, upload-time = "2022-04-20T22:04:42.23Z" }, +] + +[[package]] +name = "lz4" +version = "4.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/5a/945f5086326d569f14c84ac6f7fcc3229f0b9b1e8cc536b951fd53dfb9e1/lz4-4.4.4.tar.gz", hash = "sha256:070fd0627ec4393011251a094e08ed9fdcc78cb4e7ab28f507638eee4e39abda", size = 171884, upload-time = "2025-04-01T22:55:58.62Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/80/4054e99cda2e003097f59aeb3ad470128f3298db5065174a84564d2d6983/lz4-4.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f170abb8416c4efca48e76cac2c86c3185efdf841aecbe5c190121c42828ced0", size = 220896, upload-time = "2025-04-01T22:55:13.577Z" }, + { url = "https://files.pythonhosted.org/packages/dd/4e/f92424d5734e772b05ddbeec739e2566e2a2336995b36a180e1dd9411e9a/lz4-4.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d33a5105cd96ebd32c3e78d7ece6123a9d2fb7c18b84dec61f27837d9e0c496c", size = 189679, upload-time = "2025-04-01T22:55:15.471Z" }, + { url = "https://files.pythonhosted.org/packages/a2/70/71ffd496067cba6ba352e10b89c0e9cee3e4bc4717ba866b6aa350f4c7ac/lz4-4.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ebbc5b76b4f0018988825a7e9ce153be4f0d4eba34e6c1f2fcded120573e88", size = 1237940, upload-time = "2025-04-01T22:55:16.498Z" }, + { url = "https://files.pythonhosted.org/packages/6e/59/cf34d1e232b11e1ae7122300be00529f369a7cd80f74ac351d58c4c4eedf/lz4-4.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc64d6dfa7a89397529b22638939e70d85eaedc1bd68e30a29c78bfb65d4f715", size = 1264105, upload-time = "2025-04-01T22:55:17.606Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f6/3a00a98ff5b872d572cc6e9c88e0f6275bea0f3ed1dc1b8f8b736c85784c/lz4-4.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a355223a284f42a723c120ce68827de66d5cb872a38732b3d5abbf544fa2fe26", size = 1184179, upload-time = "2025-04-01T22:55:19.206Z" }, + { url = "https://files.pythonhosted.org/packages/bc/de/6aeb602786174bad290609c0c988afb1077b74a80eaea23ebc3b5de6e2fa/lz4-4.4.4-cp310-cp310-win32.whl", hash = "sha256:b28228197775b7b5096898851d59ef43ccaf151136f81d9c436bc9ba560bc2ba", size = 88265, upload-time = "2025-04-01T22:55:20.215Z" }, + { url = "https://files.pythonhosted.org/packages/e4/b5/1f52c8b17d02ae637f85911c0135ca08be1c9bbdfb3e7de1c4ae7af0bac6/lz4-4.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:45e7c954546de4f85d895aa735989d77f87dd649f503ce1c8a71a151b092ed36", size = 99916, upload-time = "2025-04-01T22:55:21.332Z" }, + { url = "https://files.pythonhosted.org/packages/01/e7/123587e7dae6cdba48393e4fdad2b9412f43f51346afe9ca6f697029de11/lz4-4.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:e3fc90f766401684740978cd781d73b9685bd81b5dbf7257542ef9de4612e4d2", size = 89746, upload-time = "2025-04-01T22:55:22.205Z" }, + { url = "https://files.pythonhosted.org/packages/28/e8/63843dc5ecb1529eb38e1761ceed04a0ad52a9ad8929ab8b7930ea2e4976/lz4-4.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ddfc7194cd206496c445e9e5b0c47f970ce982c725c87bd22de028884125b68f", size = 220898, upload-time = "2025-04-01T22:55:23.085Z" }, + { url = "https://files.pythonhosted.org/packages/e4/94/c53de5f07c7dc11cf459aab2a1d754f5df5f693bfacbbe1e4914bfd02f1e/lz4-4.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:714f9298c86f8e7278f1c6af23e509044782fa8220eb0260f8f8f1632f820550", size = 189685, upload-time = "2025-04-01T22:55:24.413Z" }, + { url = "https://files.pythonhosted.org/packages/fe/59/c22d516dd0352f2a3415d1f665ccef2f3e74ecec3ca6a8f061a38f97d50d/lz4-4.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8474c91de47733856c6686df3c4aca33753741da7e757979369c2c0d32918ba", size = 1239225, upload-time = "2025-04-01T22:55:25.737Z" }, + { url = "https://files.pythonhosted.org/packages/81/af/665685072e71f3f0e626221b7922867ec249cd8376aca761078c8f11f5da/lz4-4.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80dd27d7d680ea02c261c226acf1d41de2fd77af4fb2da62b278a9376e380de0", size = 1265881, upload-time = "2025-04-01T22:55:26.817Z" }, + { url = "https://files.pythonhosted.org/packages/90/04/b4557ae381d3aa451388a29755cc410066f5e2f78c847f66f154f4520a68/lz4-4.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b7d6dddfd01b49aedb940fdcaf32f41dc58c926ba35f4e31866aeec2f32f4f4", size = 1185593, upload-time = "2025-04-01T22:55:27.896Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e4/03636979f4e8bf92c557f998ca98ee4e6ef92e92eaf0ed6d3c7f2524e790/lz4-4.4.4-cp311-cp311-win32.whl", hash = "sha256:4134b9fd70ac41954c080b772816bb1afe0c8354ee993015a83430031d686a4c", size = 88259, upload-time = "2025-04-01T22:55:29.03Z" }, + { url = "https://files.pythonhosted.org/packages/07/f0/9efe53b4945441a5d2790d455134843ad86739855b7e6199977bf6dc8898/lz4-4.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:f5024d3ca2383470f7c4ef4d0ed8eabad0b22b23eeefde1c192cf1a38d5e9f78", size = 99916, upload-time = "2025-04-01T22:55:29.933Z" }, + { url = "https://files.pythonhosted.org/packages/87/c8/1675527549ee174b9e1db089f7ddfbb962a97314657269b1e0344a5eaf56/lz4-4.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:6ea715bb3357ea1665f77874cf8f55385ff112553db06f3742d3cdcec08633f7", size = 89741, upload-time = "2025-04-01T22:55:31.184Z" }, + { url = "https://files.pythonhosted.org/packages/f7/2d/5523b4fabe11cd98f040f715728d1932eb7e696bfe94391872a823332b94/lz4-4.4.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:23ae267494fdd80f0d2a131beff890cf857f1b812ee72dbb96c3204aab725553", size = 220669, upload-time = "2025-04-01T22:55:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/91/06/1a5bbcacbfb48d8ee5b6eb3fca6aa84143a81d92946bdb5cd6b005f1863e/lz4-4.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fff9f3a1ed63d45cb6514bfb8293005dc4141341ce3500abdfeb76124c0b9b2e", size = 189661, upload-time = "2025-04-01T22:55:33.413Z" }, + { url = "https://files.pythonhosted.org/packages/fa/08/39eb7ac907f73e11a69a11576a75a9e36406b3241c0ba41453a7eb842abb/lz4-4.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ea7f07329f85a8eda4d8cf937b87f27f0ac392c6400f18bea2c667c8b7f8ecc", size = 1238775, upload-time = "2025-04-01T22:55:34.835Z" }, + { url = "https://files.pythonhosted.org/packages/e9/26/05840fbd4233e8d23e88411a066ab19f1e9de332edddb8df2b6a95c7fddc/lz4-4.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ccab8f7f7b82f9fa9fc3b0ba584d353bd5aa818d5821d77d5b9447faad2aaad", size = 1265143, upload-time = "2025-04-01T22:55:35.933Z" }, + { url = "https://files.pythonhosted.org/packages/b7/5d/5f2db18c298a419932f3ab2023deb689863cf8fd7ed875b1c43492479af2/lz4-4.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43e9d48b2daf80e486213128b0763deed35bbb7a59b66d1681e205e1702d735", size = 1185032, upload-time = "2025-04-01T22:55:37.454Z" }, + { url = "https://files.pythonhosted.org/packages/c4/e6/736ab5f128694b0f6aac58343bcf37163437ac95997276cd0be3ea4c3342/lz4-4.4.4-cp312-cp312-win32.whl", hash = "sha256:33e01e18e4561b0381b2c33d58e77ceee850a5067f0ece945064cbaac2176962", size = 88284, upload-time = "2025-04-01T22:55:38.536Z" }, + { url = "https://files.pythonhosted.org/packages/40/b8/243430cb62319175070e06e3a94c4c7bd186a812e474e22148ae1290d47d/lz4-4.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d21d1a2892a2dcc193163dd13eaadabb2c1b803807a5117d8f8588b22eaf9f12", size = 99918, upload-time = "2025-04-01T22:55:39.628Z" }, + { url = "https://files.pythonhosted.org/packages/6c/e1/0686c91738f3e6c2e1a243e0fdd4371667c4d2e5009b0a3605806c2aa020/lz4-4.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:2f4f2965c98ab254feddf6b5072854a6935adab7bc81412ec4fe238f07b85f62", size = 89736, upload-time = "2025-04-01T22:55:40.5Z" }, + { url = "https://files.pythonhosted.org/packages/3b/3c/d1d1b926d3688263893461e7c47ed7382a969a0976fc121fc678ec325fc6/lz4-4.4.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ed6eb9f8deaf25ee4f6fad9625d0955183fdc90c52b6f79a76b7f209af1b6e54", size = 220678, upload-time = "2025-04-01T22:55:41.78Z" }, + { url = "https://files.pythonhosted.org/packages/26/89/8783d98deb058800dabe07e6cdc90f5a2a8502a9bad8c5343c641120ace2/lz4-4.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:18ae4fe3bafb344dbd09f976d45cbf49c05c34416f2462828f9572c1fa6d5af7", size = 189670, upload-time = "2025-04-01T22:55:42.775Z" }, + { url = "https://files.pythonhosted.org/packages/22/ab/a491ace69a83a8914a49f7391e92ca0698f11b28d5ce7b2ececa2be28e9a/lz4-4.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57fd20c5fc1a49d1bbd170836fccf9a338847e73664f8e313dce6ac91b8c1e02", size = 1238746, upload-time = "2025-04-01T22:55:43.797Z" }, + { url = "https://files.pythonhosted.org/packages/97/12/a1f2f4fdc6b7159c0d12249456f9fe454665b6126e98dbee9f2bd3cf735c/lz4-4.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9cb387c33f014dae4db8cb4ba789c8d2a0a6d045ddff6be13f6c8d9def1d2a6", size = 1265119, upload-time = "2025-04-01T22:55:44.943Z" }, + { url = "https://files.pythonhosted.org/packages/50/6e/e22e50f5207649db6ea83cd31b79049118305be67e96bec60becf317afc6/lz4-4.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0be9f68240231e1e44118a4ebfecd8a5d4184f0bdf5c591c98dd6ade9720afd", size = 1184954, upload-time = "2025-04-01T22:55:46.161Z" }, + { url = "https://files.pythonhosted.org/packages/4c/c4/2a458039645fcc6324ece731d4d1361c5daf960b553d1fcb4261ba07d51c/lz4-4.4.4-cp313-cp313-win32.whl", hash = "sha256:e9ec5d45ea43684f87c316542af061ef5febc6a6b322928f059ce1fb289c298a", size = 88289, upload-time = "2025-04-01T22:55:47.601Z" }, + { url = "https://files.pythonhosted.org/packages/00/96/b8e24ea7537ab418074c226279acfcaa470e1ea8271003e24909b6db942b/lz4-4.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:a760a175b46325b2bb33b1f2bbfb8aa21b48e1b9653e29c10b6834f9bb44ead4", size = 99925, upload-time = "2025-04-01T22:55:48.463Z" }, + { url = "https://files.pythonhosted.org/packages/a5/a5/f9838fe6aa132cfd22733ed2729d0592259fff074cefb80f19aa0607367b/lz4-4.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:f4c21648d81e0dda38b4720dccc9006ae33b0e9e7ffe88af6bf7d4ec124e2fba", size = 89743, upload-time = "2025-04-01T22:55:49.716Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "msgpack" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/d0/7555686ae7ff5731205df1012ede15dd9d927f6227ea151e901c7406af4f/msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e", size = 167260, upload-time = "2024-09-10T04:25:52.197Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/f9/a892a6038c861fa849b11a2bb0502c07bc698ab6ea53359e5771397d883b/msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd", size = 150428, upload-time = "2024-09-10T04:25:43.089Z" }, + { url = "https://files.pythonhosted.org/packages/df/7a/d174cc6a3b6bb85556e6a046d3193294a92f9a8e583cdbd46dc8a1d7e7f4/msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d", size = 84131, upload-time = "2024-09-10T04:25:30.22Z" }, + { url = "https://files.pythonhosted.org/packages/08/52/bf4fbf72f897a23a56b822997a72c16de07d8d56d7bf273242f884055682/msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5", size = 81215, upload-time = "2024-09-10T04:24:54.329Z" }, + { url = "https://files.pythonhosted.org/packages/02/95/dc0044b439b518236aaf012da4677c1b8183ce388411ad1b1e63c32d8979/msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5", size = 371229, upload-time = "2024-09-10T04:25:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/ff/75/09081792db60470bef19d9c2be89f024d366b1e1973c197bb59e6aabc647/msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e", size = 378034, upload-time = "2024-09-10T04:25:22.097Z" }, + { url = "https://files.pythonhosted.org/packages/32/d3/c152e0c55fead87dd948d4b29879b0f14feeeec92ef1fd2ec21b107c3f49/msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b", size = 363070, upload-time = "2024-09-10T04:24:43.957Z" }, + { url = "https://files.pythonhosted.org/packages/d9/2c/82e73506dd55f9e43ac8aa007c9dd088c6f0de2aa19e8f7330e6a65879fc/msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f", size = 359863, upload-time = "2024-09-10T04:24:51.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/a0/3d093b248837094220e1edc9ec4337de3443b1cfeeb6e0896af8ccc4cc7a/msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68", size = 368166, upload-time = "2024-09-10T04:24:19.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/13/7646f14f06838b406cf5a6ddbb7e8dc78b4996d891ab3b93c33d1ccc8678/msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b", size = 370105, upload-time = "2024-09-10T04:25:35.141Z" }, + { url = "https://files.pythonhosted.org/packages/67/fa/dbbd2443e4578e165192dabbc6a22c0812cda2649261b1264ff515f19f15/msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044", size = 68513, upload-time = "2024-09-10T04:24:36.099Z" }, + { url = "https://files.pythonhosted.org/packages/24/ce/c2c8fbf0ded750cb63cbcbb61bc1f2dfd69e16dca30a8af8ba80ec182dcd/msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f", size = 74687, upload-time = "2024-09-10T04:24:23.394Z" }, + { url = "https://files.pythonhosted.org/packages/b7/5e/a4c7154ba65d93be91f2f1e55f90e76c5f91ccadc7efc4341e6f04c8647f/msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7", size = 150803, upload-time = "2024-09-10T04:24:40.911Z" }, + { url = "https://files.pythonhosted.org/packages/60/c2/687684164698f1d51c41778c838d854965dd284a4b9d3a44beba9265c931/msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa", size = 84343, upload-time = "2024-09-10T04:24:50.283Z" }, + { url = "https://files.pythonhosted.org/packages/42/ae/d3adea9bb4a1342763556078b5765e666f8fdf242e00f3f6657380920972/msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701", size = 81408, upload-time = "2024-09-10T04:25:12.774Z" }, + { url = "https://files.pythonhosted.org/packages/dc/17/6313325a6ff40ce9c3207293aee3ba50104aed6c2c1559d20d09e5c1ff54/msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6", size = 396096, upload-time = "2024-09-10T04:24:37.245Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a1/ad7b84b91ab5a324e707f4c9761633e357820b011a01e34ce658c1dda7cc/msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59", size = 403671, upload-time = "2024-09-10T04:25:10.201Z" }, + { url = "https://files.pythonhosted.org/packages/bb/0b/fd5b7c0b308bbf1831df0ca04ec76fe2f5bf6319833646b0a4bd5e9dc76d/msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0", size = 387414, upload-time = "2024-09-10T04:25:27.552Z" }, + { url = "https://files.pythonhosted.org/packages/f0/03/ff8233b7c6e9929a1f5da3c7860eccd847e2523ca2de0d8ef4878d354cfa/msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e", size = 383759, upload-time = "2024-09-10T04:25:03.366Z" }, + { url = "https://files.pythonhosted.org/packages/1f/1b/eb82e1fed5a16dddd9bc75f0854b6e2fe86c0259c4353666d7fab37d39f4/msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6", size = 394405, upload-time = "2024-09-10T04:25:07.348Z" }, + { url = "https://files.pythonhosted.org/packages/90/2e/962c6004e373d54ecf33d695fb1402f99b51832631e37c49273cc564ffc5/msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5", size = 396041, upload-time = "2024-09-10T04:25:48.311Z" }, + { url = "https://files.pythonhosted.org/packages/f8/20/6e03342f629474414860c48aeffcc2f7f50ddaf351d95f20c3f1c67399a8/msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88", size = 68538, upload-time = "2024-09-10T04:24:29.953Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c4/5a582fc9a87991a3e6f6800e9bb2f3c82972912235eb9539954f3e9997c7/msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788", size = 74871, upload-time = "2024-09-10T04:25:44.823Z" }, + { url = "https://files.pythonhosted.org/packages/e1/d6/716b7ca1dbde63290d2973d22bbef1b5032ca634c3ff4384a958ec3f093a/msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d", size = 152421, upload-time = "2024-09-10T04:25:49.63Z" }, + { url = "https://files.pythonhosted.org/packages/70/da/5312b067f6773429cec2f8f08b021c06af416bba340c912c2ec778539ed6/msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2", size = 85277, upload-time = "2024-09-10T04:24:48.562Z" }, + { url = "https://files.pythonhosted.org/packages/28/51/da7f3ae4462e8bb98af0d5bdf2707f1b8c65a0d4f496e46b6afb06cbc286/msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420", size = 82222, upload-time = "2024-09-10T04:25:36.49Z" }, + { url = "https://files.pythonhosted.org/packages/33/af/dc95c4b2a49cff17ce47611ca9ba218198806cad7796c0b01d1e332c86bb/msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2", size = 392971, upload-time = "2024-09-10T04:24:58.129Z" }, + { url = "https://files.pythonhosted.org/packages/f1/54/65af8de681fa8255402c80eda2a501ba467921d5a7a028c9c22a2c2eedb5/msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39", size = 401403, upload-time = "2024-09-10T04:25:40.428Z" }, + { url = "https://files.pythonhosted.org/packages/97/8c/e333690777bd33919ab7024269dc3c41c76ef5137b211d776fbb404bfead/msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f", size = 385356, upload-time = "2024-09-10T04:25:31.406Z" }, + { url = "https://files.pythonhosted.org/packages/57/52/406795ba478dc1c890559dd4e89280fa86506608a28ccf3a72fbf45df9f5/msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247", size = 383028, upload-time = "2024-09-10T04:25:17.08Z" }, + { url = "https://files.pythonhosted.org/packages/e7/69/053b6549bf90a3acadcd8232eae03e2fefc87f066a5b9fbb37e2e608859f/msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c", size = 391100, upload-time = "2024-09-10T04:25:08.993Z" }, + { url = "https://files.pythonhosted.org/packages/23/f0/d4101d4da054f04274995ddc4086c2715d9b93111eb9ed49686c0f7ccc8a/msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b", size = 394254, upload-time = "2024-09-10T04:25:06.048Z" }, + { url = "https://files.pythonhosted.org/packages/1c/12/cf07458f35d0d775ff3a2dc5559fa2e1fcd06c46f1ef510e594ebefdca01/msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b", size = 69085, upload-time = "2024-09-10T04:25:01.494Z" }, + { url = "https://files.pythonhosted.org/packages/73/80/2708a4641f7d553a63bc934a3eb7214806b5b39d200133ca7f7afb0a53e8/msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f", size = 75347, upload-time = "2024-09-10T04:25:33.106Z" }, + { url = "https://files.pythonhosted.org/packages/c8/b0/380f5f639543a4ac413e969109978feb1f3c66e931068f91ab6ab0f8be00/msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf", size = 151142, upload-time = "2024-09-10T04:24:59.656Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ee/be57e9702400a6cb2606883d55b05784fada898dfc7fd12608ab1fdb054e/msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330", size = 84523, upload-time = "2024-09-10T04:25:37.924Z" }, + { url = "https://files.pythonhosted.org/packages/7e/3a/2919f63acca3c119565449681ad08a2f84b2171ddfcff1dba6959db2cceb/msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734", size = 81556, upload-time = "2024-09-10T04:24:28.296Z" }, + { url = "https://files.pythonhosted.org/packages/7c/43/a11113d9e5c1498c145a8925768ea2d5fce7cbab15c99cda655aa09947ed/msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e", size = 392105, upload-time = "2024-09-10T04:25:20.153Z" }, + { url = "https://files.pythonhosted.org/packages/2d/7b/2c1d74ca6c94f70a1add74a8393a0138172207dc5de6fc6269483519d048/msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca", size = 399979, upload-time = "2024-09-10T04:25:41.75Z" }, + { url = "https://files.pythonhosted.org/packages/82/8c/cf64ae518c7b8efc763ca1f1348a96f0e37150061e777a8ea5430b413a74/msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915", size = 383816, upload-time = "2024-09-10T04:24:45.826Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/a847ef7a0f5ef3fa94ae20f52a4cacf596a4e4a010197fbcc27744eb9a83/msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d", size = 380973, upload-time = "2024-09-10T04:25:04.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/90/c74cf6e1126faa93185d3b830ee97246ecc4fe12cf9d2d31318ee4246994/msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434", size = 387435, upload-time = "2024-09-10T04:24:17.879Z" }, + { url = "https://files.pythonhosted.org/packages/7a/40/631c238f1f338eb09f4acb0f34ab5862c4e9d7eda11c1b685471a4c5ea37/msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c", size = 399082, upload-time = "2024-09-10T04:25:18.398Z" }, + { url = "https://files.pythonhosted.org/packages/e9/1b/fa8a952be252a1555ed39f97c06778e3aeb9123aa4cccc0fd2acd0b4e315/msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc", size = 69037, upload-time = "2024-09-10T04:24:52.798Z" }, + { url = "https://files.pythonhosted.org/packages/b6/bc/8bd826dd03e022153bfa1766dcdec4976d6c818865ed54223d71f07862b3/msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f", size = 75140, upload-time = "2024-09-10T04:24:31.288Z" }, +] + +[[package]] +name = "mypy-boto3-ec2" +version = "1.38.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/2e/f7ca76a374a4e89425239761c69aeec7ea3fbe37c5f8112debfb66b6d330/mypy_boto3_ec2-1.38.21.tar.gz", hash = "sha256:93cae7ec14e91f44cf7f34b372a57e39a22a9d973abcb29e1a923f39ac738646", size = 399238, upload-time = "2025-05-21T19:42:24.774Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/19/9565935ad8eecfb56803640bd0d2c766c92b6ad40a9f0dd96cab5bb05eb0/mypy_boto3_ec2-1.38.21-py3-none-any.whl", hash = "sha256:cfed1e5c990fb8182819f065d5a350dd0d885735a782218b1f54198265cf694f", size = 388653, upload-time = "2025-05-21T19:42:15.154Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "paramiko" +version = "3.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "cryptography" }, + { name = "pynacl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/15/ad6ce226e8138315f2451c2aeea985bf35ee910afb477bae7477dc3a8f3b/paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822", size = 1566110, upload-time = "2025-02-04T02:37:59.783Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/f8/c7bd0ef12954a81a1d3cea60a13946bd9a49a0036a5927770c461eade7ae/paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61", size = 227298, upload-time = "2025-02-04T02:37:57.672Z" }, +] + +[[package]] +name = "parse" +version = "1.20.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4f/78/d9b09ba24bb36ef8b83b71be547e118d46214735b6dfb39e4bfde0e9b9dd/parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce", size = 29391, upload-time = "2024-06-11T04:41:57.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/31/ba45bf0b2aa7898d81cbbfac0e88c267befb59ad91a19e36e1bc5578ddb1/parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558", size = 20126, upload-time = "2024-06-11T04:41:55.057Z" }, +] + +[[package]] +name = "partd" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "locket" }, + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/3a/3f06f34820a31257ddcabdfafc2672c5816be79c7e353b02c1f318daa7d4/partd-1.4.2.tar.gz", hash = "sha256:d022c33afbdc8405c226621b015e8067888173d85f7f5ecebb3cafed9a20f02c", size = 21029, upload-time = "2024-05-06T19:51:41.945Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/e7/40fb618334dcdf7c5a316c0e7343c5cd82d3d866edc100d98e29bc945ecd/partd-1.4.2-py3-none-any.whl", hash = "sha256:978e4ac767ec4ba5b86c6eaa52e5a2a3bc748a2ca839e8cc798f1cc6ce6efb0f", size = 18905, upload-time = "2024-05-06T19:51:39.271Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/77/ab/5250d56ad03884ab5efd07f734203943c8a8ab40d551e208af81d0257bf2/pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d", size = 786540, upload-time = "2025-04-29T20:38:55.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/12/46b65f3534d099349e38ef6ec98b1a5a81f42536d17e0ba382c28c67ba67/pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb", size = 443900, upload-time = "2025-04-29T20:38:52.724Z" }, +] + +[package.optional-dependencies] +email = [ + { name = "email-validator" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" }, + { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" }, + { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" }, + { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" }, + { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" }, + { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, + { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" }, + { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" }, + { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, +] + +[[package]] +name = "pylint" +version = "3.3.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/e4/83e487d3ddd64ab27749b66137b26dc0c5b5c161be680e6beffdc99070b3/pylint-3.3.7.tar.gz", hash = "sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559", size = 1520709, upload-time = "2025-05-04T17:07:51.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/83/bff755d09e31b5d25cc7fdc4bf3915d1a404e181f1abf0359af376845c24/pylint-3.3.7-py3-none-any.whl", hash = "sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d", size = 522565, upload-time = "2025-05-04T17:07:48.714Z" }, +] + +[[package]] +name = "pynacl" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/22/27582568be639dfe22ddb3902225f91f2f17ceff88ce80e4db396c8986da/PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba", size = 3392854, upload-time = "2022-01-07T22:05:41.134Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/75/0b8ede18506041c0bf23ac4d8e2971b4161cd6ce630b177d0a08eb0d8857/PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1", size = 349920, upload-time = "2022-01-07T22:05:49.156Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/fddf10acd09637327a97ef89d2a9d621328850a72f1fdc8c08bdf72e385f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92", size = 601722, upload-time = "2022-01-07T22:05:50.989Z" }, + { url = "https://files.pythonhosted.org/packages/5d/70/87a065c37cca41a75f2ce113a5a2c2aa7533be648b184ade58971b5f7ccc/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394", size = 680087, upload-time = "2022-01-07T22:05:52.539Z" }, + { url = "https://files.pythonhosted.org/packages/ee/87/f1bb6a595f14a327e8285b9eb54d41fef76c585a0edef0a45f6fc95de125/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d", size = 856678, upload-time = "2022-01-07T22:05:54.251Z" }, + { url = "https://files.pythonhosted.org/packages/66/28/ca86676b69bf9f90e710571b67450508484388bfce09acf8a46f0b8c785f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858", size = 1133660, upload-time = "2022-01-07T22:05:56.056Z" }, + { url = "https://files.pythonhosted.org/packages/3d/85/c262db650e86812585e2bc59e497a8f59948a005325a11bbbc9ecd3fe26b/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b", size = 663824, upload-time = "2022-01-07T22:05:57.434Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1a/cc308a884bd299b651f1633acb978e8596c71c33ca85e9dc9fa33a5399b9/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff", size = 1117912, upload-time = "2022-01-07T22:05:58.665Z" }, + { url = "https://files.pythonhosted.org/packages/25/2d/b7df6ddb0c2a33afdb358f8af6ea3b8c4d1196ca45497dd37a56f0c122be/PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543", size = 204624, upload-time = "2022-01-07T22:06:00.085Z" }, + { url = "https://files.pythonhosted.org/packages/5e/22/d3db169895faaf3e2eda892f005f433a62db2decbcfbc2f61e6517adfa87/PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", size = 212141, upload-time = "2022-01-07T22:06:01.861Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920, upload-time = "2025-03-25T10:14:56.835Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256, upload-time = "2025-03-25T10:14:55.034Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, +] + +[[package]] +name = "resolvelib" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/10/f699366ce577423cbc3df3280063099054c23df70856465080798c6ebad6/resolvelib-1.0.1.tar.gz", hash = "sha256:04ce76cbd63fded2078ce224785da6ecd42b9564b1390793f64ddecbe997b309", size = 21065, upload-time = "2023-03-09T05:10:38.292Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fc/e9ccf0521607bcd244aa0b3fbd574f71b65e9ce6a112c83af988bbbe2e23/resolvelib-1.0.1-py2.py3-none-any.whl", hash = "sha256:d2da45d1a8dfee81bdd591647783e340ef3bcb104b54c383f70d422ef5cc7dbf", size = 17194, upload-time = "2023-03-09T05:10:36.214Z" }, +] + +[[package]] +name = "rich" +version = "14.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/53/830aa4c3066a8ab0ae9a9955976fb770fe9c6102117c8ec4ab3ea62d89e8/rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725", size = 224078, upload-time = "2025-03-30T14:15:14.23Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/9e/73b14aed38ee1f62cd30ab93cd0072dec7fb01f3033d116875ae3e7b8b44/s3transfer-0.12.0.tar.gz", hash = "sha256:8ac58bc1989a3fdb7c7f3ee0918a66b160d038a147c7b5db1500930a607e9a1c", size = 149178, upload-time = "2025-04-22T21:08:09.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/64/d2b49620039b82688aeebd510bd62ff4cdcdb86cbf650cc72ae42c5254a3/s3transfer-0.12.0-py3-none-any.whl", hash = "sha256:35b314d7d82865756edab59f7baebc6b477189e6ab4c53050e28c1de4d9cce18", size = 84773, upload-time = "2025-04-22T21:08:08.265Z" }, +] + +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.41" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "(python_full_version < '3.14' and platform_machine == 'AMD64') or (python_full_version < '3.14' and platform_machine == 'WIN32') or (python_full_version < '3.14' and platform_machine == 'aarch64') or (python_full_version < '3.14' and platform_machine == 'amd64') or (python_full_version < '3.14' and platform_machine == 'ppc64le') or (python_full_version < '3.14' and platform_machine == 'win32') or (python_full_version < '3.14' and platform_machine == 'x86_64')" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/66/45b165c595ec89aa7dcc2c1cd222ab269bc753f1fc7a1e68f8481bd957bf/sqlalchemy-2.0.41.tar.gz", hash = "sha256:edba70118c4be3c2b1f90754d308d0b79c6fe2c0fdc52d8ddf603916f83f4db9", size = 9689424, upload-time = "2025-05-14T17:10:32.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/12/d7c445b1940276a828efce7331cb0cb09d6e5f049651db22f4ebb0922b77/sqlalchemy-2.0.41-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b1f09b6821406ea1f94053f346f28f8215e293344209129a9c0fcc3578598d7b", size = 2117967, upload-time = "2025-05-14T17:48:15.841Z" }, + { url = "https://files.pythonhosted.org/packages/6f/b8/cb90f23157e28946b27eb01ef401af80a1fab7553762e87df51507eaed61/sqlalchemy-2.0.41-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1936af879e3db023601196a1684d28e12f19ccf93af01bf3280a3262c4b6b4e5", size = 2107583, upload-time = "2025-05-14T17:48:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/9e/c2/eef84283a1c8164a207d898e063edf193d36a24fb6a5bb3ce0634b92a1e8/sqlalchemy-2.0.41-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2ac41acfc8d965fb0c464eb8f44995770239668956dc4cdf502d1b1ffe0d747", size = 3186025, upload-time = "2025-05-14T17:51:51.226Z" }, + { url = "https://files.pythonhosted.org/packages/bd/72/49d52bd3c5e63a1d458fd6d289a1523a8015adedbddf2c07408ff556e772/sqlalchemy-2.0.41-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81c24e0c0fde47a9723c81d5806569cddef103aebbf79dbc9fcbb617153dea30", size = 3186259, upload-time = "2025-05-14T17:55:22.526Z" }, + { url = "https://files.pythonhosted.org/packages/4f/9e/e3ffc37d29a3679a50b6bbbba94b115f90e565a2b4545abb17924b94c52d/sqlalchemy-2.0.41-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23a8825495d8b195c4aa9ff1c430c28f2c821e8c5e2d98089228af887e5d7e29", size = 3126803, upload-time = "2025-05-14T17:51:53.277Z" }, + { url = "https://files.pythonhosted.org/packages/8a/76/56b21e363f6039978ae0b72690237b38383e4657281285a09456f313dd77/sqlalchemy-2.0.41-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:60c578c45c949f909a4026b7807044e7e564adf793537fc762b2489d522f3d11", size = 3148566, upload-time = "2025-05-14T17:55:24.398Z" }, + { url = "https://files.pythonhosted.org/packages/3b/92/11b8e1b69bf191bc69e300a99badbbb5f2f1102f2b08b39d9eee2e21f565/sqlalchemy-2.0.41-cp310-cp310-win32.whl", hash = "sha256:118c16cd3f1b00c76d69343e38602006c9cfb9998fa4f798606d28d63f23beda", size = 2086696, upload-time = "2025-05-14T17:55:59.136Z" }, + { url = "https://files.pythonhosted.org/packages/5c/88/2d706c9cc4502654860f4576cd54f7db70487b66c3b619ba98e0be1a4642/sqlalchemy-2.0.41-cp310-cp310-win_amd64.whl", hash = "sha256:7492967c3386df69f80cf67efd665c0f667cee67032090fe01d7d74b0e19bb08", size = 2110200, upload-time = "2025-05-14T17:56:00.757Z" }, + { url = "https://files.pythonhosted.org/packages/37/4e/b00e3ffae32b74b5180e15d2ab4040531ee1bef4c19755fe7926622dc958/sqlalchemy-2.0.41-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6375cd674fe82d7aa9816d1cb96ec592bac1726c11e0cafbf40eeee9a4516b5f", size = 2121232, upload-time = "2025-05-14T17:48:20.444Z" }, + { url = "https://files.pythonhosted.org/packages/ef/30/6547ebb10875302074a37e1970a5dce7985240665778cfdee2323709f749/sqlalchemy-2.0.41-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9f8c9fdd15a55d9465e590a402f42082705d66b05afc3ffd2d2eb3c6ba919560", size = 2110897, upload-time = "2025-05-14T17:48:21.634Z" }, + { url = "https://files.pythonhosted.org/packages/9e/21/59df2b41b0f6c62da55cd64798232d7349a9378befa7f1bb18cf1dfd510a/sqlalchemy-2.0.41-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f9dc8c44acdee06c8fc6440db9eae8b4af8b01e4b1aee7bdd7241c22edff4f", size = 3273313, upload-time = "2025-05-14T17:51:56.205Z" }, + { url = "https://files.pythonhosted.org/packages/62/e4/b9a7a0e5c6f79d49bcd6efb6e90d7536dc604dab64582a9dec220dab54b6/sqlalchemy-2.0.41-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c11ceb9a1f482c752a71f203a81858625d8df5746d787a4786bca4ffdf71c6", size = 3273807, upload-time = "2025-05-14T17:55:26.928Z" }, + { url = "https://files.pythonhosted.org/packages/39/d8/79f2427251b44ddee18676c04eab038d043cff0e764d2d8bb08261d6135d/sqlalchemy-2.0.41-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:911cc493ebd60de5f285bcae0491a60b4f2a9f0f5c270edd1c4dbaef7a38fc04", size = 3209632, upload-time = "2025-05-14T17:51:59.384Z" }, + { url = "https://files.pythonhosted.org/packages/d4/16/730a82dda30765f63e0454918c982fb7193f6b398b31d63c7c3bd3652ae5/sqlalchemy-2.0.41-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03968a349db483936c249f4d9cd14ff2c296adfa1290b660ba6516f973139582", size = 3233642, upload-time = "2025-05-14T17:55:29.901Z" }, + { url = "https://files.pythonhosted.org/packages/04/61/c0d4607f7799efa8b8ea3c49b4621e861c8f5c41fd4b5b636c534fcb7d73/sqlalchemy-2.0.41-cp311-cp311-win32.whl", hash = "sha256:293cd444d82b18da48c9f71cd7005844dbbd06ca19be1ccf6779154439eec0b8", size = 2086475, upload-time = "2025-05-14T17:56:02.095Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8e/8344f8ae1cb6a479d0741c02cd4f666925b2bf02e2468ddaf5ce44111f30/sqlalchemy-2.0.41-cp311-cp311-win_amd64.whl", hash = "sha256:3d3549fc3e40667ec7199033a4e40a2f669898a00a7b18a931d3efb4c7900504", size = 2110903, upload-time = "2025-05-14T17:56:03.499Z" }, + { url = "https://files.pythonhosted.org/packages/3e/2a/f1f4e068b371154740dd10fb81afb5240d5af4aa0087b88d8b308b5429c2/sqlalchemy-2.0.41-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:81f413674d85cfd0dfcd6512e10e0f33c19c21860342a4890c3a2b59479929f9", size = 2119645, upload-time = "2025-05-14T17:55:24.854Z" }, + { url = "https://files.pythonhosted.org/packages/9b/e8/c664a7e73d36fbfc4730f8cf2bf930444ea87270f2825efbe17bf808b998/sqlalchemy-2.0.41-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:598d9ebc1e796431bbd068e41e4de4dc34312b7aa3292571bb3674a0cb415dd1", size = 2107399, upload-time = "2025-05-14T17:55:28.097Z" }, + { url = "https://files.pythonhosted.org/packages/5c/78/8a9cf6c5e7135540cb682128d091d6afa1b9e48bd049b0d691bf54114f70/sqlalchemy-2.0.41-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a104c5694dfd2d864a6f91b0956eb5d5883234119cb40010115fd45a16da5e70", size = 3293269, upload-time = "2025-05-14T17:50:38.227Z" }, + { url = "https://files.pythonhosted.org/packages/3c/35/f74add3978c20de6323fb11cb5162702670cc7a9420033befb43d8d5b7a4/sqlalchemy-2.0.41-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6145afea51ff0af7f2564a05fa95eb46f542919e6523729663a5d285ecb3cf5e", size = 3303364, upload-time = "2025-05-14T17:51:49.829Z" }, + { url = "https://files.pythonhosted.org/packages/6a/d4/c990f37f52c3f7748ebe98883e2a0f7d038108c2c5a82468d1ff3eec50b7/sqlalchemy-2.0.41-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b46fa6eae1cd1c20e6e6f44e19984d438b6b2d8616d21d783d150df714f44078", size = 3229072, upload-time = "2025-05-14T17:50:39.774Z" }, + { url = "https://files.pythonhosted.org/packages/15/69/cab11fecc7eb64bc561011be2bd03d065b762d87add52a4ca0aca2e12904/sqlalchemy-2.0.41-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41836fe661cc98abfae476e14ba1906220f92c4e528771a8a3ae6a151242d2ae", size = 3268074, upload-time = "2025-05-14T17:51:51.736Z" }, + { url = "https://files.pythonhosted.org/packages/5c/ca/0c19ec16858585d37767b167fc9602593f98998a68a798450558239fb04a/sqlalchemy-2.0.41-cp312-cp312-win32.whl", hash = "sha256:a8808d5cf866c781150d36a3c8eb3adccfa41a8105d031bf27e92c251e3969d6", size = 2084514, upload-time = "2025-05-14T17:55:49.915Z" }, + { url = "https://files.pythonhosted.org/packages/7f/23/4c2833d78ff3010a4e17f984c734f52b531a8c9060a50429c9d4b0211be6/sqlalchemy-2.0.41-cp312-cp312-win_amd64.whl", hash = "sha256:5b14e97886199c1f52c14629c11d90c11fbb09e9334fa7bb5f6d068d9ced0ce0", size = 2111557, upload-time = "2025-05-14T17:55:51.349Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ad/2e1c6d4f235a97eeef52d0200d8ddda16f6c4dd70ae5ad88c46963440480/sqlalchemy-2.0.41-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eeb195cdedaf17aab6b247894ff2734dcead6c08f748e617bfe05bd5a218443", size = 2115491, upload-time = "2025-05-14T17:55:31.177Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8d/be490e5db8400dacc89056f78a52d44b04fbf75e8439569d5b879623a53b/sqlalchemy-2.0.41-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d4ae769b9c1c7757e4ccce94b0641bc203bbdf43ba7a2413ab2523d8d047d8dc", size = 2102827, upload-time = "2025-05-14T17:55:34.921Z" }, + { url = "https://files.pythonhosted.org/packages/a0/72/c97ad430f0b0e78efaf2791342e13ffeafcbb3c06242f01a3bb8fe44f65d/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a62448526dd9ed3e3beedc93df9bb6b55a436ed1474db31a2af13b313a70a7e1", size = 3225224, upload-time = "2025-05-14T17:50:41.418Z" }, + { url = "https://files.pythonhosted.org/packages/5e/51/5ba9ea3246ea068630acf35a6ba0d181e99f1af1afd17e159eac7e8bc2b8/sqlalchemy-2.0.41-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc56c9788617b8964ad02e8fcfeed4001c1f8ba91a9e1f31483c0dffb207002a", size = 3230045, upload-time = "2025-05-14T17:51:54.722Z" }, + { url = "https://files.pythonhosted.org/packages/78/2f/8c14443b2acea700c62f9b4a8bad9e49fc1b65cfb260edead71fd38e9f19/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c153265408d18de4cc5ded1941dcd8315894572cddd3c58df5d5b5705b3fa28d", size = 3159357, upload-time = "2025-05-14T17:50:43.483Z" }, + { url = "https://files.pythonhosted.org/packages/fc/b2/43eacbf6ccc5276d76cea18cb7c3d73e294d6fb21f9ff8b4eef9b42bbfd5/sqlalchemy-2.0.41-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f67766965996e63bb46cfbf2ce5355fc32d9dd3b8ad7e536a920ff9ee422e23", size = 3197511, upload-time = "2025-05-14T17:51:57.308Z" }, + { url = "https://files.pythonhosted.org/packages/fa/2e/677c17c5d6a004c3c45334ab1dbe7b7deb834430b282b8a0f75ae220c8eb/sqlalchemy-2.0.41-cp313-cp313-win32.whl", hash = "sha256:bfc9064f6658a3d1cadeaa0ba07570b83ce6801a1314985bf98ec9b95d74e15f", size = 2082420, upload-time = "2025-05-14T17:55:52.69Z" }, + { url = "https://files.pythonhosted.org/packages/e9/61/e8c1b9b6307c57157d328dd8b8348ddc4c47ffdf1279365a13b2b98b8049/sqlalchemy-2.0.41-cp313-cp313-win_amd64.whl", hash = "sha256:82ca366a844eb551daff9d2e6e7a9e5e76d2612c8564f58db6c19a726869c1df", size = 2108329, upload-time = "2025-05-14T17:55:54.495Z" }, + { url = "https://files.pythonhosted.org/packages/1c/fc/9ba22f01b5cdacc8f5ed0d22304718d2c758fce3fd49a5372b886a86f37c/sqlalchemy-2.0.41-py3-none-any.whl", hash = "sha256:57df5dc6fdb5ed1a88a1ed2195fd31927e705cad62dedd86b46972752a80f576", size = 1911224, upload-time = "2025-05-14T17:39:42.154Z" }, +] + +[package.optional-dependencies] +asyncio = [ + { name = "greenlet" }, +] + +[[package]] +name = "sshtunnel" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "paramiko" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/ad/4c587adf79865be268ee0b6bd52cfaa7a75d827a23ced072dc5ab554b4af/sshtunnel-0.4.0.tar.gz", hash = "sha256:e7cb0ea774db81bf91844db22de72a40aae8f7b0f9bb9ba0f666d474ef6bf9fc", size = 62716, upload-time = "2021-01-11T13:26:32.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/13/8476c4328dcadfe26f8bd7f3a1a03bf9ddb890a7e7b692f54a179bc525bf/sshtunnel-0.4.0-py2.py3-none-any.whl", hash = "sha256:98e54c26f726ab8bd42b47a3a21fca5c3e60f58956f0f70de2fb8ab0046d0606", size = 24729, upload-time = "2021-01-11T13:26:29.969Z" }, +] + +[[package]] +name = "tblib" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/95/4b3044ec4bf248186769629bbfb495a458deb6e4c1f9eff7f298ae1e336e/tblib-3.1.0.tar.gz", hash = "sha256:06404c2c9f07f66fee2d7d6ad43accc46f9c3361714d9b8426e7f47e595cd652", size = 30766, upload-time = "2025-03-31T12:58:27.473Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/44/aa5c8b10b2cce7a053018e0d132bd58e27527a0243c4985383d5b6fd93e9/tblib-3.1.0-py3-none-any.whl", hash = "sha256:670bb4582578134b3d81a84afa1b016128b429f3d48e6cbbaecc9d15675e984e", size = 12552, upload-time = "2025-03-31T12:58:26.142Z" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b1/09/a439bec5888f00a54b8b9f05fa94d7f901d6735ef4e55dcec9bc37b5d8fa/tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79", size = 192885, upload-time = "2024-08-14T08:19:41.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/b6/a447b5e4ec71e13871be01ba81f5dfc9d0af7e473da256ff46bc0e24026f/tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde", size = 37955, upload-time = "2024-08-14T08:19:40.05Z" }, +] + +[[package]] +name = "toolz" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790, upload-time = "2024-10-04T16:17:04.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383, upload-time = "2024-10-04T16:17:01.533Z" }, +] + +[[package]] +name = "tornado" +version = "6.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/c4/bb3bd68b1b3cd30abc6411469875e6d32004397ccc4a3230479f86f86a73/tornado-6.5.tar.gz", hash = "sha256:c70c0a26d5b2d85440e4debd14a8d0b463a0cf35d92d3af05f5f1ffa8675c826", size = 508968, upload-time = "2025-05-15T20:37:43.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/7c/6526062801e4becb5a7511079c0b0f170a80d929d312042d5b5c4afad464/tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6", size = 441204, upload-time = "2025-05-15T20:37:22.107Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ff/53d49f869a390ce68d4f98306b6f9ad5765c114ab27ef47d7c9bd05d1191/tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41", size = 439373, upload-time = "2025-05-15T20:37:24.476Z" }, + { url = "https://files.pythonhosted.org/packages/4a/62/fdd9b12b95e4e2b7b8c21dfc306b0960b20b741e588318c13918cf52b868/tornado-6.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c625b9d03f1fb4d64149c47d0135227f0434ebb803e2008040eb92906b0105a", size = 442935, upload-time = "2025-05-15T20:37:26.638Z" }, + { url = "https://files.pythonhosted.org/packages/46/00/0094bd1538cb8579f7a97330cb77f40c9b8042c71fb040e5daae439be1ae/tornado-6.5-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a0d8d2309faf015903080fb5bdd969ecf9aa5ff893290845cf3fd5b2dd101bc", size = 442282, upload-time = "2025-05-15T20:37:28.436Z" }, + { url = "https://files.pythonhosted.org/packages/d8/fa/23bb108afb8197a55edd333fe26a3dad9341ce441337aad95cd06b025594/tornado-6.5-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03576ab51e9b1677e4cdaae620d6700d9823568b7939277e4690fe4085886c55", size = 442515, upload-time = "2025-05-15T20:37:30.051Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f2/c4d43d830578111b1826cf831fdbb8b2a10e3c4fccc4b774b69d818eb231/tornado-6.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab75fe43d0e1b3a5e3ceddb2a611cb40090dd116a84fc216a07a298d9e000471", size = 443192, upload-time = "2025-05-15T20:37:31.832Z" }, + { url = "https://files.pythonhosted.org/packages/92/c5/932cc6941f88336d70744b3fda420b9cb18684c034293a1c430a766b2ad9/tornado-6.5-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:119c03f440a832128820e87add8a175d211b7f36e7ee161c631780877c28f4fb", size = 442615, upload-time = "2025-05-15T20:37:33.883Z" }, + { url = "https://files.pythonhosted.org/packages/70/90/e831b7800ec9632d5eb6a0931b016b823efa963356cb1c215f035b6d5d2e/tornado-6.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:231f2193bb4c28db2bdee9e57bc6ca0cd491f345cd307c57d79613b058e807e0", size = 442592, upload-time = "2025-05-15T20:37:35.507Z" }, + { url = "https://files.pythonhosted.org/packages/71/ed/fe27371e79930559e9a90324727267ad5cf9479a2c897ff75ace1d3bec3d/tornado-6.5-cp39-abi3-win32.whl", hash = "sha256:fd20c816e31be1bbff1f7681f970bbbd0bb241c364220140228ba24242bcdc59", size = 443674, upload-time = "2025-05-15T20:37:37.617Z" }, + { url = "https://files.pythonhosted.org/packages/78/77/85fb3a93ef109f6de9a60acc6302f9761a3e7150a6c1b40e8a4a215db5fc/tornado-6.5-cp39-abi3-win_amd64.whl", hash = "sha256:007f036f7b661e899bd9ef3fa5f87eb2cb4d1b2e7d67368e778e140a2f101a7a", size = 444118, upload-time = "2025-05-15T20:37:39.174Z" }, + { url = "https://files.pythonhosted.org/packages/54/9a/3cc3969c733ddd4f5992b3d4ec15c9a2564192c7b1a239ba21c8f73f8af4/tornado-6.5-cp39-abi3-win_arm64.whl", hash = "sha256:542e380658dcec911215c4820654662810c06ad872eefe10def6a5e9b20e9633", size = 442874, upload-time = "2025-05-15T20:37:41.267Z" }, +] + +[[package]] +name = "typer" +version = "0.15.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6c/89/c527e6c848739be8ceb5c44eb8208c52ea3515c6cf6406aa61932887bf58/typer-0.15.4.tar.gz", hash = "sha256:89507b104f9b6a0730354f27c39fae5b63ccd0c95b1ce1f1a6ba0cfd329997c3", size = 101559, upload-time = "2025-05-14T16:34:57.704Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/62/d4ba7afe2096d5659ec3db8b15d8665bdcb92a3c6ff0b95e99895b335a9c/typer-0.15.4-py3-none-any.whl", hash = "sha256:eb0651654dcdea706780c466cf06d8f174405a659ffff8f163cfbfee98c0e173", size = 45258, upload-time = "2025-05-14T16:34:55.583Z" }, +] + +[[package]] +name = "types-awscrt" +version = "0.27.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/36/6c/583522cfb3c330e92e726af517a91c13247e555e021791a60f1b03c6ff16/types_awscrt-0.27.2.tar.gz", hash = "sha256:acd04f57119eb15626ab0ba9157fc24672421de56e7bd7b9f61681fedee44e91", size = 16304, upload-time = "2025-05-16T03:10:08.712Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/82/1ee2e5c9d28deac086ab3a6ff07c8bc393ef013a083f546c623699881715/types_awscrt-0.27.2-py3-none-any.whl", hash = "sha256:49a045f25bbd5ad2865f314512afced933aed35ddbafc252e2268efa8a787e4e", size = 37761, upload-time = "2025-05-16T03:10:07.466Z" }, +] + +[[package]] +name = "types-boto3" +version = "1.38.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore-stubs" }, + { name = "types-s3transfer" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/0c/60e3c3146f4efb1cea8e9a008833aeb61f7200aff5aa56d363259a1f54a0/types_boto3-1.38.21.tar.gz", hash = "sha256:1e2dd9e8ce0f66475ea404535966c3613f026316950a5ba00b25efbc05bbc3e8", size = 99492, upload-time = "2025-05-21T19:42:42.596Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/fd/fcf04188bd12c83fede912cdeefd32793664ab5edfe8ae6799457a89927b/types_boto3-1.38.21-py3-none-any.whl", hash = "sha256:0f04a22b1d074da1843b83db34f2352f794fbc27d8856cf6b1135ac6cc6a9249", size = 68584, upload-time = "2025-05-21T19:42:34.804Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20250516" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/88/d65ed807393285204ab6e2801e5d11fbbea811adcaa979a2ed3b67a5ef41/types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5", size = 13943, upload-time = "2025-05-16T03:06:58.385Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/3f/b0e8db149896005adc938a1e7f371d6d7e9eca4053a29b108978ed15e0c2/types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93", size = 14356, upload-time = "2025-05-16T03:06:57.249Z" }, +] + +[[package]] +name = "types-s3transfer" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/d5/830e9efe91a26601a2bebde6f299239d2d26e542f5d4b3bc7e8c23c81a3f/types_s3transfer-0.12.0.tar.gz", hash = "sha256:f8f59201481e904362873bf0be3267f259d60ad946ebdfcb847d092a1fa26f98", size = 14096, upload-time = "2025-04-23T00:38:19.131Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/43/6097275152463ac9bacf1e00aab30bc6682bf45f6a031be8bf029c030ba2/types_s3transfer-0.12.0-py3-none-any.whl", hash = "sha256:101bbc5b7f00b71512374df881f480fc6bf63c948b5098ab024bf3370fbfb0e8", size = 19553, upload-time = "2025-04-23T00:38:17.865Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967, upload-time = "2025-04-10T14:19:05.416Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806, upload-time = "2025-04-10T14:19:03.967Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "zict" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/ac/3c494dd7ec5122cff8252c1a209b282c0867af029f805ae9befd73ae37eb/zict-3.0.0.tar.gz", hash = "sha256:e321e263b6a97aafc0790c3cfb3c04656b7066e6738c37fffcca95d803c9fba5", size = 33238, upload-time = "2023-04-17T21:41:16.041Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/ab/11a76c1e2126084fde2639514f24e6111b789b0bfa4fc6264a8975c7e1f1/zict-3.0.0-py2.py3-none-any.whl", hash = "sha256:5796e36bd0e0cc8cf0fbc1ace6a68912611c1dbd74750a3f3026b9b9d6a327ae", size = 43332, upload-time = "2023-04-17T21:41:13.444Z" }, +] + +[[package]] +name = "zipp" +version = "3.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545, upload-time = "2024-11-10T15:05:20.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630, upload-time = "2024-11-10T15:05:19.275Z" }, +] diff --git a/scripts/maintenance/consistency/README.md b/scripts/maintenance/consistency/README.md deleted file mode 100644 index 0168e8e8c0f..00000000000 --- a/scripts/maintenance/consistency/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# how to use consistency check with a deployment - -```bash -cd osparc-simcore - -cd packages/postgres-database -# this imports the database of master in a local volume -make import-db-from-docker-volume host=DOCKER_HOST_NAME host_volume=DB_VOLUME_NAME local_volume=LOCAL_VOLUME_NAME - -cd - -cd scripts/maintenance -make -./check_consistency_data.py LOCAL_VOLUME_NAME DB_USERNAME DB_PSSWORD S3_ENDPOINT S3_ACCESS S3_SECRET S3_BUCKET -``` diff --git a/scripts/maintenance/consistency/docker-compose.yml b/scripts/maintenance/consistency/docker-compose.yml deleted file mode 100644 index f7e282c1ece..00000000000 --- a/scripts/maintenance/consistency/docker-compose.yml +++ /dev/null @@ -1,49 +0,0 @@ -version: "3.8" -services: - postgres: - image: "postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce" - volumes: - - postgres_data:/var/lib/postgresql/data - init: true - environment: - POSTGRES_USER: test - POSTGRES_PASSWORD: test - POSTGRES_DB: test - POSTGRES_HOST: 127.0.0.1 - POSTGRES_PORT: 5432 - ports: - - "5432:5432" - # https://www.postgresql.org/docs/10/runtime-config-logging.html#GUC-LOG-STATEMENT - command: - [ - "postgres", - "-c", - "log_connections=true", - "-c", - "log_disconnections=true", - "-c", - "log_duration=true", - "-c", - "log_line_prefix=[%p] [%a] [%c] [%x] ", - "-c", - "tcp_keepalives_idle=600", - "-c", - "tcp_keepalives_interval=600", - "-c", - "tcp_keepalives_count=5" - ] - adminer: - image: adminer:4.8.0 - init: true - environment: - - ADMINER_DEFAULT_SERVER=postgres - - ADMINER_DESIGN=nette - - ADMINER_PLUGINS=json-column - ports: - - 18080:8080 - depends_on: - - postgres - -volumes: - postgres_data: - name: ${POSTGRES_DATA_VOLUME} diff --git a/scripts/maintenance/consistency/dump-restore.md b/scripts/maintenance/consistency/dump-restore.md deleted file mode 100644 index 31595b2297b..00000000000 --- a/scripts/maintenance/consistency/dump-restore.md +++ /dev/null @@ -1,31 +0,0 @@ -# dump the database - -This command will dump a database to the file ```dump.sql``` - -```bash -pg_dump --host POSTGRESHOST --username POSTGRES_USER --format c --blobs --verbose --file dump.sql POSTGRESDB -``` - -# restore the database locally - -First a postgres instance must be setup, and the following will allow saving its contents into a docker-volume name POSTGRES_DATA_VOLUME - -```bash -export POSTGRES_DATA_VOLUME=aws-production-simcore_postgres_data; docker-compose up -``` - -This allows connecting to the local postgres instance -```bash -psql -h 127.0.0.1 -p 5432 -U test -``` - -This creates the database and a user (must be the same as on original DB) -```sql -CREATE DATABASE POSTGRESDB; -CREATE USER %ORIGINAL_DB_USER% WITH PASSWORD 'test'; -GRANT ALL PRIVILEGES ON DATABASE POSTGRESDB to %ORIGINAL_DB_USER%; -``` - -```bash -pg_restore --host 127.0.0.1 --port 5432 --username %ORIGINAL_DB_USER% -d POSTGRESDB dump.sql -``` diff --git a/scripts/maintenance/migrate_project/Dockerfile b/scripts/maintenance/migrate_project/Dockerfile index 969d84a7fc4..6b3715e62c5 100644 --- a/scripts/maintenance/migrate_project/Dockerfile +++ b/scripts/maintenance/migrate_project/Dockerfile @@ -1,17 +1,27 @@ -FROM python:3.9.12-buster +# syntax=docker/dockerfile:1 +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:3.10.14-buster RUN curl https://rclone.org/install.sh | bash && \ rclone --version +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + WORKDIR /scripts COPY packages/postgres-database postgres-database -RUN cd postgres-database && pip install . +RUN --mount=type=cache,target=/root/.cache/uv \ + cd postgres-database && uv pip install . COPY packages/settings-library settings-library -RUN cd settings-library && pip install . +RUN --mount=type=cache,target=/root/.cache/uv \ + cd settings-library && uv pip install . COPY scripts/maintenance/migrate_project/requirements.txt /scripts/requirements.txt -RUN pip install -r /scripts/requirements.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install -r /scripts/requirements.txt COPY scripts/maintenance/migrate_project/src/*.py /scripts/ diff --git a/scripts/maintenance/migrate_project/Makefile b/scripts/maintenance/migrate_project/Makefile index dc7975bbd17..252d4eda777 100644 --- a/scripts/maintenance/migrate_project/Makefile +++ b/scripts/maintenance/migrate_project/Makefile @@ -14,15 +14,11 @@ help: ## help on rule's targets # πŸ’£ No postgres database version migration is performed at the moment. This migration **only works for **identical databases**: source and target. # 🚨 If a file's or project's UUID already exist in the destination database (collision), this script will fail with an error. # βœ… Supported S3 providers are `CEPH`, `AWS`, `MINIO` -ifeq ($(IS_WIN),) - @awk 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) -else @awk 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "%-20s %s\n", $$1, $$2}' $(MAKEFILE_LIST) -endif .PHONY: build build: ## Builds docker container for the migration. Run this first. - docker build --tag ${IMAGE_NAME} --file $(PWD)/Dockerfile $(REPO_DIR) + docker buildx build --load --tag ${IMAGE_NAME} --file $(PWD)/Dockerfile $(REPO_DIR) .PHONY: debug-shell debug-shell: ## Runs a bash inside the container that performs the migrate. For manual interventions and debuging. diff --git a/scripts/maintenance/migrate_project/requirements.txt b/scripts/maintenance/migrate_project/requirements.txt index 800e200809d..ba4e3d7c90a 100644 --- a/scripts/maintenance/migrate_project/requirements.txt +++ b/scripts/maintenance/migrate_project/requirements.txt @@ -1,3 +1,5 @@ +setuptools>=45 +packaging>=20.9 psycopg2 pydantic sqlalchemy diff --git a/scripts/maintenance/migrate_project/src/cli.py b/scripts/maintenance/migrate_project/src/cli.py index 6012e97bbd6..5d7525efcac 100644 --- a/scripts/maintenance/migrate_project/src/cli.py +++ b/scripts/maintenance/migrate_project/src/cli.py @@ -1,5 +1,4 @@ from pathlib import Path -from typing import Optional import typer from db import ( @@ -15,7 +14,7 @@ def main(config: Path = typer.Option(..., exists=True)): assert config.exists() # nosec settings = Settings.load_from_file(config) - typer.echo(f"Detected settings:\n{settings.json(indent=2)}\n") + typer.echo(f"Detected settings:\n{settings.model_dump_json(indent=2)}\n") r_clone_config_path = assemble_config_file( # source diff --git a/scripts/maintenance/migrate_project/src/db.py b/scripts/maintenance/migrate_project/src/db.py index 3a5a72842a8..2d4fddfcd37 100644 --- a/scripts/maintenance/migrate_project/src/db.py +++ b/scripts/maintenance/migrate_project/src/db.py @@ -1,6 +1,6 @@ from collections import deque from contextlib import contextmanager -from typing import Any, Deque, Iterator, Optional +from typing import Any, Deque, Iterator from uuid import UUID import typer @@ -26,13 +26,13 @@ def db_connection(db_config: DBConfig) -> Iterator[Connection]: def _project_uuid_exists_in_destination( connection: Connection, project_id: str ) -> bool: - query = select([projects.c.id]).where(projects.c.uuid == f"{project_id}") + query = select(projects.c.id).where(projects.c.uuid == f"{project_id}") exists = len(list(connection.execute(query))) > 0 return exists def _meta_data_exists_in_destination(connection: Connection, file_id: str) -> bool: - query = select([file_meta_data.c.file_id]).where( + query = select(file_meta_data.c.file_id).where( file_meta_data.c.file_id == f"{file_id}" ) exists = len(list(connection.execute(query))) > 0 @@ -41,14 +41,14 @@ def _meta_data_exists_in_destination(connection: Connection, file_id: str) -> bo def _get_project(connection: Connection, project_uuid: UUID) -> ResultProxy: return connection.execute( - select([projects]).where(projects.c.uuid == f"{project_uuid}") + select(projects).where(projects.c.uuid == f"{project_uuid}") ) def _get_hidden_project(connection: Connection, prj_owner: int) -> ResultProxy: return connection.execute( - select([projects]).where( - and_(projects.c.prj_owner == prj_owner, projects.c.hidden == True) + select(projects).where( + and_(projects.c.prj_owner == prj_owner, projects.c.hidden.is_(True)) ) ) @@ -57,11 +57,11 @@ def _get_file_meta_data_without_soft_links( connection: Connection, node_uuid: UUID, project_id: UUID ) -> ResultProxy: return connection.execute( - select([file_meta_data]).where( + select(file_meta_data).where( and_( file_meta_data.c.node_id == f"{node_uuid}", file_meta_data.c.project_id == f"{project_id}", - file_meta_data.c.is_soft_link != True, + file_meta_data.c.is_soft_link.is_not(True), ) ) ) @@ -90,7 +90,7 @@ def _file_summary(file_meta_data: dict) -> str: def get_project_and_files_to_migrate( project_uuid: UUID, - hidden_projects_for_user: Optional[int], + hidden_projects_for_user: int | None, src_conn: Connection, dst_conn: Connection, ) -> tuple[Deque, Deque]: diff --git a/scripts/maintenance/migrate_project/src/models.py b/scripts/maintenance/migrate_project/src/models.py index a26cf63e734..964d95de550 100644 --- a/scripts/maintenance/migrate_project/src/models.py +++ b/scripts/maintenance/migrate_project/src/models.py @@ -1,6 +1,5 @@ import json from pathlib import Path -from typing import Optional from uuid import UUID from pydantic import BaseModel, Field @@ -32,7 +31,7 @@ class SourceConfig(BaseModel): db: DBConfig s3: S3Config project_uuid: UUID = Field(..., description="project to be moved from the source") - hidden_projects_for_user: Optional[int] = Field( + hidden_projects_for_user: int | None = Field( None, description="by default nothing is moved, must provide an user ID for which to move the hidden projects", ) @@ -57,7 +56,7 @@ class Settings(BaseModel): @classmethod def load_from_file(cls, path: Path) -> "Settings": - return Settings.parse_obj(json.loads(path.read_text())) + return Settings.model_validate(json.loads(path.read_text())) class Config: schema_extra = { @@ -92,4 +91,8 @@ class Config: if __name__ == "__main__": # produces an empty configuration to be saved as starting point - print(Settings.parse_obj(Settings.Config.schema_extra["example"]).json(indent=2)) + print( + Settings.model_validate( + Settings.Config.schema_extra["example"] + ).model_dump_json(indent=2) + ) diff --git a/scripts/maintenance/pre_registration.py b/scripts/maintenance/pre_registration.py new file mode 100644 index 00000000000..56ad86fa09b --- /dev/null +++ b/scripts/maintenance/pre_registration.py @@ -0,0 +1,611 @@ +# /// script +# requires-python = ">=3.11" +# dependencies = [ +# "httpx", +# "pydantic[email]", +# "typer", +# ] +# /// +""" +Examples of usage: + $ uv run pre_registration.py --help + + $ uv run pre_registration.py pre-register pre_register_users.json --base-url http://localhost:8001 --email admin@email.com + + $ uv run pre_registration.py invite user@example.com --base-url http://localhost:8001 --email admin@email.com + + $ uv run pre_registration.py invite-all users.json --base-url http://localhost:8001 --email admin@email.com +""" + +import asyncio +import datetime +import json +import os +import sys +from pathlib import Path +from typing import Annotated, Any + +import typer +from httpx import AsyncClient, HTTPStatusError +from pydantic import ( + BaseModel, + BeforeValidator, + EmailStr, + Field, + PositiveInt, + SecretStr, + TypeAdapter, + ValidationError, +) + + +def _print_info(message: str) -> None: + typer.secho(message, fg=typer.colors.BLUE) + + +def _print_success(message: str) -> None: + typer.secho(message, fg=typer.colors.GREEN) + + +def _print_error(message: str) -> None: + typer.secho(f"Error: {message}", fg=typer.colors.RED, err=True) + + +class LoginCredentialsRequest(BaseModel): + """Request body model for login endpoint""" + + email: EmailStr + password: SecretStr + + +class PreRegisterUserRequest(BaseModel): + """Request body model for pre-registering a user""" + + firstName: str + lastName: str + email: EmailStr + institution: str | None = None + phone: str | None = None + address: str | None = None + city: str | None = None + state: Annotated[str | None, Field(description="State, province, canton, ...")] + postalCode: str | None = None + country: str | None = None + extras: dict[str, Any] = {} + + +class InvitationGenerateRequest(BaseModel): + """Request body model for generating an invitation""" + + guest: EmailStr + trialAccountDays: PositiveInt | None = None + extraCreditsInUsd: Annotated[int, Field(ge=0, lt=500)] | None = None + + +async def _login( + client: AsyncClient, email: EmailStr, password: SecretStr +) -> dict[str, Any]: + """Login user with the provided credentials""" + path = "/v0/auth/login" + + credentials = LoginCredentialsRequest(email=email, password=password) + + response = await client.post( + path, + json={ + "email": credentials.email, + "password": credentials.password.get_secret_value(), + }, + ) + response.raise_for_status() + + return response.json()["data"] + + +async def _logout_current_user(client: AsyncClient): + path = "/v0/auth/logout" + r = await client.post(path) + r.raise_for_status() + + +async def _pre_register_user( + client: AsyncClient, + first_name: str, + last_name: str, + email: EmailStr, + institution: str | None = None, + phone: str | None = None, + address: str | None = None, + city: str | None = None, + state: str | None = None, + postal_code: str | None = None, + country: str | None = None, + extras: dict[str, Any] = {}, +) -> dict[str, Any]: + """Pre-register a user in the system""" + path = "/v0/admin/user-accounts:pre-register" + + user_data = PreRegisterUserRequest( + firstName=first_name, + lastName=last_name, + email=email, + institution=institution, + phone=phone, + address=address or "", + city=city or "", + state=state, + postalCode=postal_code or "", + country=country, + extras=extras, + ) + + response = await client.post(path, json=user_data.model_dump(mode="json")) + response.raise_for_status() + + return response.json()["data"] + + +async def _create_invitation( + client: AsyncClient, + guest_email: EmailStr, + trial_days: PositiveInt | None = None, + extra_credits: int | None = None, +) -> dict[str, Any]: + """Generate an invitation link for a guest email""" + path = "/v0/invitation:generate" + + invitation_data = InvitationGenerateRequest( + guest=guest_email, + trialAccountDays=trial_days, + extraCreditsInUsd=extra_credits, + ) + + response = await client.post( + path, + json=invitation_data.model_dump( + exclude_none=True, exclude_unset=True, mode="json" + ), + ) + response.raise_for_status() + + return response.json()["data"] + + +async def _pre_register_users_from_list( + client: AsyncClient, + users_data: list[PreRegisterUserRequest], +) -> list[dict[str, Any]]: + """Pre-registers multiple users from a list of user data""" + results = [] + for user_data in users_data: + try: + result = await _pre_register_user( + client=client, + first_name=user_data.firstName, + last_name=user_data.lastName, + email=user_data.email, + institution=user_data.institution, + phone=user_data.phone, + address=user_data.address, + city=user_data.city, + state=user_data.state, + postal_code=user_data.postalCode, + country=user_data.country, + extras=user_data.extras, + ) + results.append(result) + _print_success(f"Successfully pre-registered user: {user_data.email}") + + except HTTPStatusError as e: + _print_error( + f"Failed to pre-register user {user_data.email} with {e.response.status_code}: {e.response.text}" + ) + + except Exception as e: + _print_error(f"Failed to pre-register user {user_data.email}: {str(e)}") + + return results + + +async def _create_invitations_from_list( + client: AsyncClient, + emails: list[EmailStr], + trial_days: PositiveInt | None = None, + extra_credits: int | None = None, +) -> list[dict[str, Any]]: + """Generate invitations for multiple users from a list of emails""" + results = [] + for email in emails: + try: + result = await _create_invitation( + client=client, + guest_email=email, + trial_days=trial_days, + extra_credits=extra_credits, + ) + results.append({"email": email, "invitation": result}) + _print_success(f"Successfully generated invitation for: {email}") + + except HTTPStatusError as e: + _print_error( + f"Failed to generate invitation for {email} with {e.response.status_code}: {e.response.text}" + ) + results.append({"email": email, "error": str(e)}) + + except Exception as e: + _print_error(f"Failed to generate invitation for {email}: {str(e)}") + results.append({"email": email, "error": str(e)}) + + return results + + +async def run_pre_registration( + base_url: str, + users_file_path: Path, + admin_email: str, + admin_password: str, +) -> None: + """Run the pre-registration process""" + # Read and parse the users file + try: + users_data_raw = json.loads(users_file_path.read_text()) + users_data = TypeAdapter(list[PreRegisterUserRequest]).validate_python( + users_data_raw + ) + except json.JSONDecodeError: + _print_error(f"{users_file_path} is not a valid JSON file") + sys.exit(os.EX_DATAERR) + except ValidationError as e: + _print_error(f"Invalid user data format: {e}") + sys.exit(os.EX_DATAERR) + except Exception as e: + _print_error(f"Reading or parsing {users_file_path}: {str(e)}") + sys.exit(os.EX_IOERR) + + # Create an HTTP client and process + async with AsyncClient(base_url=base_url, timeout=30) as client: + try: + # Login as admin + _print_info(f"Logging in as {admin_email}...") + await _login( + client=client, + email=admin_email, + password=admin_password, + ) + + # Pre-register users + _print_info(f"Pre-registering {len(users_data)} users...") + results = await _pre_register_users_from_list(client, users_data) + _print_success(f"Successfully pre-registered {len(results)} users") + + # Dump results to a file + timestamp = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d_%H%M%S") + input_filename = users_file_path.stem + output_filename = f"{input_filename}_results_{timestamp}.json" + output_path = users_file_path.parent / output_filename + + output_path.write_text(json.dumps(results, indent=1)) + _print_success(f"Results written to {output_path}") + + # Logout + _print_info("Logging out...") + await _logout_current_user(client) + + except Exception as e: + _print_error(f"{str(e)}") + sys.exit(os.EX_SOFTWARE) + + +async def run_create_invitation( + base_url: str, + guest_email: EmailStr, + admin_email: str, + admin_password: str, + trial_days: PositiveInt | None = None, + extra_credits: int | None = None, +) -> None: + """Run the invitation generation process""" + async with AsyncClient(base_url=base_url, timeout=30) as client: + try: + # Login as admin + _print_info(f"Logging in as {admin_email}...") + await _login( + client=client, + email=admin_email, + password=admin_password, + ) + + # Generate invitation + _print_info(f"Generating invitation for {guest_email}...") + result = await _create_invitation( + client, guest_email, trial_days=trial_days, extra_credits=extra_credits + ) + + # Display invitation link + _print_success(f"Successfully generated invitation for {guest_email}") + _print_success(f"Invitation link: {result.get('link', 'No link returned')}") + + # Save result to a file + timestamp = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d_%H%M%S") + output_filename = f"invitation_{guest_email.split('@')[0]}_{timestamp}.json" + output_path = Path(output_filename) + output_path.write_text(json.dumps(result, indent=1)) + _print_success(f"Result written to {output_path}") + + # Logout + _print_info("Logging out...") + await _logout_current_user(client) + + except HTTPStatusError as e: + _print_error( + f"Failed to generate invitation with {e.response.status_code}: {e.response.text}" + ) + sys.exit(os.EX_SOFTWARE) + except Exception as e: + _print_error(f"{str(e)}") + sys.exit(os.EX_SOFTWARE) + + +async def run_bulk_create_invitation( + base_url: str, + emails_file_path: Path, + admin_email: str, + admin_password: str, + trial_days: PositiveInt | None = None, + extra_credits: int | None = None, +) -> None: + """Run the bulk invitation process""" + # Read and parse the emails file + try: + file_content = emails_file_path.read_text() + data = json.loads(file_content) + + # Check if the file contains a list of emails or objects with email property + if isinstance(data, list): + if all(isinstance(item, str) for item in data): + # Simple list of email strings + data = data + elif all(isinstance(item, dict) and "email" in item for item in data): + # List of objects with email property (like pre-registered users) + data = [item["email"].lower() for item in data] + else: + _print_error( + "File must contain either a list of email strings or objects with 'email' property" + ) + sys.exit(os.EX_DATAERR) + + emails = TypeAdapter( + list[Annotated[BeforeValidator(lambda s: s.lower()), EmailStr]] + ).validate_python(data) + else: + _print_error("File must contain a JSON array") + sys.exit(os.EX_DATAERR) + + except json.JSONDecodeError: + _print_error(f"{emails_file_path} is not a valid JSON file") + sys.exit(os.EX_DATAERR) + except ValidationError as e: + _print_error(f"Invalid email format: {e}") + sys.exit(os.EX_DATAERR) + except Exception as e: + _print_error(f"Reading or parsing {emails_file_path}: {str(e)}") + sys.exit(os.EX_IOERR) + + # Create an HTTP client and process + async with AsyncClient(base_url=base_url, timeout=30) as client: + try: + # Login as admin + _print_info(f"Logging in as {admin_email}...") + await _login( + client=client, + email=admin_email, + password=admin_password, + ) + + # Generate invitations + _print_info(f"Generating invitations for {len(emails)} users...") + results = await _create_invitations_from_list( + client, emails, trial_days=trial_days, extra_credits=extra_credits + ) + + successful = sum(1 for r in results if "invitation" in r) + _print_success( + f"Successfully generated {successful} invitations out of {len(emails)} users" + ) + + # Dump results to a file + timestamp = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d_%H%M%S") + input_filename = emails_file_path.stem + output_filename = f"{input_filename}_invitations_{timestamp}.json" + output_path = emails_file_path.parent / output_filename + + output_path.write_text(json.dumps(results, indent=1)) + _print_success(f"Results written to {output_path}") + + # Logout + _print_info("Logging out...") + await _logout_current_user(client) + + except Exception as e: + _print_error(f"{str(e)}") + sys.exit(os.EX_SOFTWARE) + + +# Create Typer app with common options +app = typer.Typer(help="User management utilities for osparc-simcore") + +# Common options +BaseUrlOption = Annotated[ + str, + typer.Option( + "--base-url", + "-u", + help="Base URL of the API", + ), +] + +AdminEmailOption = Annotated[ + str, + typer.Option( + "--email", + "-e", + help="Admin email for login", + ), +] + +AdminPasswordOption = Annotated[ + str, + typer.Option( + "--password", + "-p", + help="Admin password for login", + prompt=True, + hide_input=True, + ), +] + + +@app.command() +def pre_register( + users_file: Annotated[ + Path, + typer.Argument(help="Path to JSON file containing user data to pre-register"), + ], + base_url: BaseUrlOption = "http://localhost:8001", + admin_email: AdminEmailOption = None, + admin_password: AdminPasswordOption = None, +): + """Pre-register users from a JSON file. + + The JSON file should contain a list of user objects with the following fields: + firstName, lastName, email, and optionally institution, phone, address, city, state, postalCode, country. + """ + if not users_file.exists(): + _print_error(f"File {users_file} does not exist") + sys.exit(os.EX_NOINPUT) + + if not admin_email: + admin_email = typer.prompt("Admin email") + + _print_info(f"Pre-registering users from {users_file} using {base_url}") + asyncio.run(run_pre_registration(base_url, users_file, admin_email, admin_password)) + _print_success("Pre-registration completed") + + +@app.command() +def invite( + guest_email: Annotated[ + str, + typer.Argument(help="Email address of the guest to invite"), + ], + trial_days: Annotated[ + int, + typer.Option( + "--trial-days", + "-t", + help="Number of days for trial account", + ), + ] = None, + extra_credits: Annotated[ + int, + typer.Option( + "--extra-credits", + "-c", + help="Extra credits in USD (0-499)", + ), + ] = None, + base_url: BaseUrlOption = "http://localhost:8001", + admin_email: AdminEmailOption = None, + admin_password: AdminPasswordOption = None, +): + """Generate an invitation link for a guest email.""" + if not admin_email: + admin_email = typer.prompt("Admin email") + + # Validate trial_days and extra_credits + if trial_days is not None and trial_days <= 0: + _print_error("Trial days must be a positive integer") + sys.exit(os.EX_USAGE) + + if extra_credits is not None and (extra_credits < 0 or extra_credits >= 500): + _print_error("Extra credits must be between 0 and 499") + sys.exit(os.EX_USAGE) + + _print_info(f"Generating invitation for {guest_email} using {base_url}") + asyncio.run( + run_create_invitation( + base_url, + guest_email, + admin_email, + admin_password, + trial_days, + extra_credits, + ) + ) + _print_success("Invitation generation completed") + + +@app.command() +def invite_all( + emails_file: Annotated[ + Path, + typer.Argument(help="Path to JSON file containing emails to invite"), + ], + trial_days: Annotated[ + int, + typer.Option( + "--trial-days", + "-t", + help="Number of days for trial account", + ), + ] = None, + extra_credits: Annotated[ + int, + typer.Option( + "--extra-credits", + "-c", + help="Extra credits in USD (0-499)", + ), + ] = None, + base_url: BaseUrlOption = "http://localhost:8001", + admin_email: AdminEmailOption = None, + admin_password: AdminPasswordOption = None, +): + """Generate invitation links for multiple users from a JSON file. + + The JSON file should contain either: + 1. A list of email strings: ["user1@example.com", "user2@example.com"] + 2. A list of objects with an email property: [{"email": "user1@example.com", ...}, ...] + """ + if not emails_file.exists(): + _print_error(f"File {emails_file} does not exist") + sys.exit(os.EX_NOINPUT) + + if not admin_email: + admin_email = typer.prompt("Admin email") + + # Validate trial_days and extra_credits + if trial_days is not None and trial_days <= 0: + _print_error("Trial days must be a positive integer") + sys.exit(os.EX_USAGE) + + if extra_credits is not None and (extra_credits < 0 or extra_credits >= 500): + _print_error("Extra credits must be between 0 and 499") + sys.exit(os.EX_USAGE) + + _print_info(f"Generating invitations for users in {emails_file} using {base_url}") + asyncio.run( + run_bulk_create_invitation( + base_url, + emails_file, + admin_email, + admin_password, + trial_days, + extra_credits, + ) + ) + _print_success("Bulk invitation completed") + + +if __name__ == "__main__": + app() diff --git a/scripts/maintenance/requirements.txt b/scripts/maintenance/requirements.txt index 315585c0189..51d9b5ae629 100644 --- a/scripts/maintenance/requirements.txt +++ b/scripts/maintenance/requirements.txt @@ -1,9 +1,16 @@ +setuptools>=45 +packaging>=20.9 aiopg black +boto3 +types-boto3 httpx +parse +paramiko psycopg2-binary -pydantic[email,dotenv] +pydantic[email]<2 pylint python-dateutil +python-dotenv tenacity -typer[all] +typer diff --git a/scripts/metrics/compute_list_of_images_in_registry.py b/scripts/metrics/compute_list_of_images_in_registry.py index c06c03a042b..518c7932b5f 100755 --- a/scripts/metrics/compute_list_of_images_in_registry.py +++ b/scripts/metrics/compute_list_of_images_in_registry.py @@ -2,12 +2,10 @@ import asyncio import json -import pdb from collections import defaultdict, deque from datetime import date, datetime from pathlib import Path from pprint import pformat -from typing import Dict, List, Optional, Tuple import typer from httpx import URL, AsyncClient @@ -38,9 +36,9 @@ async def list_images_in_registry( endpoint: URL, username: str, password: str, - from_date: Optional[datetime], + from_date: datetime | None, to_date: datetime, -) -> Dict[str, List[Tuple[str, str, str, str]]]: +) -> dict[str, list[tuple[str, str, str, str]]]: if not from_date: from_date = datetime(year=2000, month=1, day=1) typer.secho( @@ -115,12 +113,12 @@ def main( endpoint: str, username: str, password: str = typer.Option(..., prompt=True, hide_input=True), - from_date: Optional[datetime] = typer.Option(None, formats=["%Y-%m-%d"]), + from_date: datetime | None = typer.Option(None, formats=["%Y-%m-%d"]), to_date: datetime = typer.Option(f"{date.today()}", formats=["%Y-%m-%d"]), markdown: bool = typer.Option(False), ): endpoint_url = URL(endpoint) - list_of_images: Dict[str, List[Tuple[str, str, str, str]]] = asyncio.run( + list_of_images: dict[str, list[tuple[str, str, str, str]]] = asyncio.run( list_images_in_registry(endpoint_url, username, password, from_date, to_date) ) diff --git a/scripts/metrics/requirements.txt b/scripts/metrics/requirements.txt index c843a6494bf..dd9f3618547 100644 --- a/scripts/metrics/requirements.txt +++ b/scripts/metrics/requirements.txt @@ -1,3 +1,5 @@ +setuptools>=45 +packaging>=20.9 black httpx[http2] pydantic[email,dotenv] diff --git a/scripts/mypy.bash b/scripts/mypy.bash deleted file mode 100755 index 54f47e1c03b..00000000000 --- a/scripts/mypy.bash +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# http://redsymbol.net/articles/unofficial-bash-strict-mode/ - -set -o errexit -set -o nounset -set -o pipefail -IFS=$'\n\t' - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) -IMAGE_NAME="local/scripts-$(basename "$0"):latest" -WORKDIR="$(pwd)" - -DEFAULT_MYPY_CONFIG="$(git rev-parse --show-toplevel)/mypy.ini" -MYPY_CONFIG=$(realpath "${2:-${DEFAULT_MYPY_CONFIG}}") - -build() { - echo Building image "$IMAGE_NAME" - # - docker build \ - --quiet \ - --tag "$IMAGE_NAME" \ - "$SCRIPT_DIR/mypy" -} - -run() { - echo Using "$(docker run --rm "$IMAGE_NAME" --version)" - echo Mypy config "${MYPY_CONFIG}" - echo Mypying "$(realpath "$@")": - # - docker run \ - --rm \ - --volume="/etc/group:/etc/group:ro" \ - --volume="/etc/passwd:/etc/passwd:ro" \ - --user="$(id --user "$USER")":"$(id --group "$USER")" \ - --volume "$MYPY_CONFIG":/config/mypy.ini \ - --volume "$WORKDIR":/src \ - --workdir=/src \ - "$IMAGE_NAME" \ - "$@" -} - -# ---------------------------------------------------------------------- -# MAIN -# -# USAGE -# ./scripts/mypy.bash --help -build -run "$@" -echo "DONE" -# ---------------------------------------------------------------------- diff --git a/scripts/mypy/Dockerfile b/scripts/mypy/Dockerfile deleted file mode 100644 index f06626b05aa..00000000000 --- a/scripts/mypy/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base - - -COPY requirements.txt /requirements.txt - -RUN pip install --upgrade pip \ - && pip install -r requirements.txt - -ENTRYPOINT ["mypy", "--config-file", "/config/mypy.ini", "--warn-unused-configs"] diff --git a/scripts/mypy/requirements.txt b/scripts/mypy/requirements.txt deleted file mode 100644 index 144956c4dbc..00000000000 --- a/scripts/mypy/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -mypy==0.971 -pydantic[email]==1.9.2 -types-aiofiles==0.8.10 -types-attrs -types-PyYAML==6.0.11 -types-redis -types-setuptools -types-ujson diff --git a/scripts/openapi-diff.bash b/scripts/openapi-diff.bash new file mode 100755 index 00000000000..519bcb6fe51 --- /dev/null +++ b/scripts/openapi-diff.bash @@ -0,0 +1,20 @@ +#!/bin/bash +# +# +# - https://github.com/OpenAPITools/openapi-diff +# + +# NOTE: do not forget that the target /specs +# +# +# + +exec docker run \ + --interactive \ + --rm \ + --volume="/etc/group:/etc/group:ro" \ + --volume="/etc/passwd:/etc/passwd:ro" \ + --user="$(id --user "$USER")":"$(id --group "$USER")" \ + --volume "$(pwd):/specs" \ + tufin/oasdiff:latest \ + "$@" diff --git a/scripts/openapi-generator-cli.bash b/scripts/openapi-generator-cli.bash deleted file mode 100755 index b1072c66353..00000000000 --- a/scripts/openapi-generator-cli.bash +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# OpenAPI Generator: generate clients, servers, and documentation from OpenAPI 2.0/3.x documents -# -# -# usage: openapi-generator-cli [] -# -# The most commonly used openapi-generator-cli commands are: -# config-help Config help for chosen lang -# generate Generate code with the specified generator. -# help Display help information -# list Lists the available generators -# meta MetaGenerator. Generator for creating a new template set and configuration for Codegen. The output will be based on the language you specify, and includes default templates to include. -# validate Validate specification -# version Show version information -# -# IMPORTANT: use absolute paths so they can be automaticaly mapped inside of the container -# -# REFERENCES: -# https://openapi-generator.tech/ -# https://hub.docker.com/r/openapitools/openapi-generator-cli -# - -USERID=$(stat --format=%u "$PWD") -GROUPID=$(stat --format=%g "$PWD") - -# FIXME: replaces automatically $PWD by /local so it maps correctly in the container -#PATTERN=s+$PWD+/local+ -#CMD=$(echo "$@" | sed $PATTERN) - -# TODO: check SAME digest. Perhaps push into itisfoundation repo? -# openapitools/openapi-generator-cli v4.2.3 sha256:c90e7f2d63340574bba015ad88a5abb55d5b25ab3d5460c02e14a566574e8d55 - -exec docker run --rm \ - --user "$USERID:$GROUPID" \ - --volume "$PWD:/local" \ - openapitools/openapi-generator-cli:v4.2.3 "$@" - -# Example -# openapi-generator-cli generate -i /local/api/specs/webserver/openapi.yaml -g python -o /local/out/sdk/webserver -# diff --git a/scripts/openapi-pydantic-models-generator.bash b/scripts/openapi-pydantic-models-generator.bash index 7b48fa4c167..6f7ebf3e813 100755 --- a/scripts/openapi-pydantic-models-generator.bash +++ b/scripts/openapi-pydantic-models-generator.bash @@ -1,18 +1,15 @@ #!/bin/bash -#!/bin/bash # http://redsymbol.net/articles/unofficial-bash-strict-mode/ set -o errexit set -o nounset set -o pipefail IFS=$'\n\t' -PYTHON_VERSION=3.9 +PYTHON_VERSION=3.10.14 IMAGE_NAME="local/datamodel-code-generator:${PYTHON_VERSION}" WORKDIR="$(pwd)" - -Build() -{ +Build() { docker buildx build \ --build-arg PYTHON_VERSION="${PYTHON_VERSION}" \ --build-arg HOME_DIR="/home/$USER" \ @@ -20,24 +17,31 @@ Build() --load \ - <&2 + # Check the file for lines with more than one dollar sign + if grep -n -P '\$\{[^}]*\$\{[^}]*\}[^}]*\}' "$file"; then + echo "Error: $file contains a line with more than one dollar sign." + exit 1 + elif grep -n -P '\$[a-zA-Z_][a-zA-Z0-9_]*' "$file"; then + echo "Error: $file contains a line with an environment variable not wrapped in curly braces." + exit 1 + fi + fi +done + + +# If no errors were found, allow the commit +exit 0 diff --git a/scripts/pydeps-docker/Dockerfile b/scripts/pydeps-docker/Dockerfile index 67b2ec430ac..82fbf0b6e36 100644 --- a/scripts/pydeps-docker/Dockerfile +++ b/scripts/pydeps-docker/Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # NOTE: This is a first step towards a devcontainer # to perform operations like pip-compile or auto-formatting # that preserves identical environment across developer machines @@ -7,8 +8,11 @@ # - Can be installed with pyenv (SEE pyenv install --list ) # # -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM python:${PYTHON_VERSION}-slim-bookworm AS base RUN apt-get update \ @@ -22,16 +26,20 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + ARG HOME_DIR RUN mkdir -p ${HOME_DIR} COPY .pydeps ${HOME_DIR}/.pydeps -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools # devenv -RUN pip install \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install \ pydeps diff --git a/scripts/pydeps.bash b/scripts/pydeps.bash index f73863fce71..affd21597d7 100755 --- a/scripts/pydeps.bash +++ b/scripts/pydeps.bash @@ -1,28 +1,27 @@ #!/bin/bash # http://redsymbol.net/articles/unofficial-bash-strict-mode/ +# NOTE: used for circular depedndency detection + set -o errexit set -o nounset set -o pipefail IFS=$'\n\t' -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -PYTHON_VERSION=3.9.12 +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +PYTHON_VERSION=3.10.14 IMAGE_NAME="local/pydeps-devkit:${PYTHON_VERSION}" WORKDIR="$(pwd)" - -Build() -{ - docker build \ +Build() { + docker buildx build \ + --load \ --build-arg PYTHON_VERSION="${PYTHON_VERSION}" \ --build-arg HOME_DIR="/home/$USER" \ --tag "$IMAGE_NAME" \ "$SCRIPT_DIR/pydeps-docker" } - -Run() -{ +Run() { docker run \ -it \ --workdir="/home/$USER/workdir" \ diff --git a/scripts/pyupgrade.bash b/scripts/pyupgrade.bash index b015f0194ac..13436cca412 100755 --- a/scripts/pyupgrade.bash +++ b/scripts/pyupgrade.bash @@ -8,16 +8,17 @@ IFS=$'\n\t' # # # NOTE: check --py* flag in CLI when PYTHON_VERSION is modified -PYTHON_VERSION=3.9.12 +PYTHON_VERSION=3.10.14 IMAGE_NAME="local/pyupgrade-devkit:${PYTHON_VERSION}" WORKDIR="$(pwd)" Build() { docker buildx build \ + --load \ --build-arg HOME_DIR="/home/$USER" \ --tag "$IMAGE_NAME" \ - < task["timestamp"] + ): + container_status = f"[green]{task['status']}[/green]" + container_timestamp = f"{task['timestamp']}" + container_git_sha = task["git_sha"] + + oldest_running_task_timestamp = task["timestamp"] + if task["status"] == "starting": + container_status = f"[blue]{task['status']}[/blue]" + container_timestamp = f"{task['timestamp']}" + container_git_sha = task["git_sha"] + break + + table.add_row( + service_name, container_status, container_timestamp, container_git_sha + ) + + console.print(table) + + +def check_running_sidecars(settings, deployment): + token = get_bearer_token(settings) + services = get_services(settings, token) + + sidecars: list[RunningSidecar] = check_simcore_running_sidecars(settings, services) + table = Table( + "Sidecar name", + "Created at", + "User ID", + "Project ID", + "Service Key", + "Service Version", + title=f"[bold yellow]{deployment.upper()}[/bold yellow]", + ) + for sidecar in sidecars: + table.add_row( + sidecar.name, + f"{sidecar.created_at}", + sidecar.user_id, + sidecar.project_id, + sidecar.service_key, + sidecar.service_version, + ) + + console.print(table) diff --git a/scripts/release/monitor_release/portainer_utils.py b/scripts/release/monitor_release/portainer_utils.py new file mode 100644 index 00000000000..5a63a62f998 --- /dev/null +++ b/scripts/release/monitor_release/portainer_utils.py @@ -0,0 +1,132 @@ +import json + +import arrow +import requests +from monitor_release.models import RunningSidecar +from monitor_release.settings import LegacySettings + + +def get_bearer_token(settings: LegacySettings): + headers = {"accept": "application/json", "Content-Type": "application/json"} + payload = json.dumps( + { + "Username": settings.portainer_username, + "Password": settings.portainer_password, + } + ) + response = requests.post( + f"{settings.portainer_url}/portainer/api/auth", + headers=headers, + data=payload, + ) + return response.json()["jwt"] + + +def get_services(settings: LegacySettings, bearer_token): + services_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/services" + response = requests.get( + services_url, + headers={ + "Authorization": "Bearer " + bearer_token, + "Content-Type": "application/json", + }, + ) + return response.json() + + +def get_tasks(settings: LegacySettings, bearer_token): + tasks_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/tasks" + response = requests.get( + tasks_url, + headers={ + "Authorization": "Bearer " + bearer_token, + "Content-Type": "application/json", + }, + ) + return response.json() + + +def get_containers(settings: LegacySettings, bearer_token): + bearer_token = get_bearer_token(settings) + + containers_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/containers/json?all=true" + response = requests.get( + containers_url, + headers={ + "Authorization": "Bearer " + bearer_token, + "Content-Type": "application/json", + }, + ) + return response.json() + + +def check_simcore_running_sidecars(settings: LegacySettings, services): + running_sidecars: list[RunningSidecar] = [] + for service in services: + if ( + service["Spec"]["Name"].startswith("dy-sidecar") + and service["Spec"]["Labels"]["io.simcore.runtime.swarm-stack-name"] + == settings.swarm_stack_name + ): + running_sidecars.append( + RunningSidecar( + name=service["Spec"]["Name"], + created_at=arrow.get(service["CreatedAt"]).datetime, + user_id=service["Spec"]["Labels"]["io.simcore.runtime.user-id"], + project_id=service["Spec"]["Labels"][ + "io.simcore.runtime.project-id" + ], + service_key=service["Spec"]["Labels"][ + "io.simcore.runtime.service-key" + ], + service_version=service["Spec"]["Labels"][ + "io.simcore.runtime.service-version" + ], + ) + ) + return running_sidecars + + +def _generate_containers_map(containers): + container_map = {} + for container in containers: + git_sha = ( + container.get("Labels").get("org.opencontainers.image.revision") + if container.get("Labels").get( + "org.opencontainers.image.revision" + ) # container.get("Labels").get("org.label-schema.vcs-ref") + else container.get("Labels").get("org.label-schema.vcs-ref") + ) + + container_map[container["Id"]] = {"git_sha": git_sha} + return container_map + + +def check_simcore_deployed_services( + settings: LegacySettings, services, tasks, containers +): + container_map = _generate_containers_map(containers) + service_task_map = {} + for service in services: + if service["Spec"]["Name"].startswith(settings.starts_with): + service_task_map[service["ID"]] = { + "service_name": service["Spec"]["Name"], + "tasks": [], + } + + for task in tasks: + if task["ServiceID"] in service_task_map: + if task["Status"].get("ContainerStatus") is None: + continue + container_id = task["Status"]["ContainerStatus"]["ContainerID"] + + service_task_map[task["ServiceID"]]["tasks"].append( + { + "created_at": arrow.get(task["CreatedAt"]).datetime, + "status": task["Status"]["State"], + "timestamp": arrow.get(task["Status"]["Timestamp"]).datetime, + "git_sha": container_map.get(container_id, {}).get("git_sha"), + } + ) + + return service_task_map diff --git a/scripts/release/monitor_release/postgres.py b/scripts/release/monitor_release/postgres.py new file mode 100644 index 00000000000..fdffa2a0fd7 --- /dev/null +++ b/scripts/release/monitor_release/postgres.py @@ -0,0 +1 @@ +# placeholder diff --git a/scripts/release/monitor_release/settings.py b/scripts/release/monitor_release/settings.py new file mode 100644 index 00000000000..947732f2ceb --- /dev/null +++ b/scripts/release/monitor_release/settings.py @@ -0,0 +1,231 @@ +import os +from pathlib import Path +from typing import Final, Self + +from dotenv import load_dotenv +from pydantic import BaseModel, Field, HttpUrl, TypeAdapter, model_validator +from pydantic_settings import BaseSettings, SettingsConfigDict + +from .models import Deployment + +# +_DEPLOYMENTS_MAP = { + Deployment.master: "osparc-master.speag.com", + Deployment.aws_staging: "osparc-staging.io", + Deployment.dalco_staging: "osparc-staging.speag.com", + Deployment.aws_nih_production: "osparc.io", + Deployment.dalco_production: "osparc.speag.com", + Deployment.tip_production: "tip.itis.swiss", + Deployment.aws_zmt_production: "sim4life.io", +} +_DEPLOYMENTS_IMAP = {v: k for k, v in _DEPLOYMENTS_MAP.items()} + +SECRETS_CONFIG_FILE_NAME: Final[str] = "repo.config" + + +def get_repo_configs_paths(top_folder: Path) -> list[Path]: + return list(top_folder.rglob(SECRETS_CONFIG_FILE_NAME)) + + +def get_deployment_name_or_none(repo_config: Path) -> str | None: + if repo_config.name == "repo.config": + return repo_config.resolve().parent.name + return None + + +class ReleaseSettings(BaseSettings): + OSPARC_DEPLOYMENT_TARGET: str + PORTAINER_DOMAIN: str + + portainer_username: str = Field(..., validation_alias="PORTAINER_USER") + portainer_password: str = Field(..., validation_alias="PORTAINER_PASSWORD") + swarm_stack_name: str = Field(..., validation_alias="SWARM_STACK_NAME") + portainer_endpoint_version: int + starts_with: str + portainer_url: HttpUrl | None = None + + model_config = SettingsConfigDict(extra="ignore") + + @model_validator(mode="after") + def deduce_portainer_url(self) -> Self: + self.portainer_url = TypeAdapter(HttpUrl).validate_python( + f"https://{self.PORTAINER_DOMAIN}" + ) + return self + + +def get_release_settings(env_file_path: Path): + + # NOTE: these conversions and checks are done to keep + deployment_name = get_deployment_name_or_none(env_file_path) + if deployment_name is None: + msg = f"{env_file_path=} cannot be matched to any deployment" + raise ValueError(msg) + + deployment = _DEPLOYMENTS_IMAP.get(deployment_name) + if deployment is None: + msg = f"{deployment_name=} cannot be matched to any known deployment {set(_DEPLOYMENTS_IMAP.keys())}" + raise ValueError(msg) + + match deployment_name: + # NOTE: `portainer_endpoint_version` and `starts_with` cannot be deduced from the + # information in the `repo.config`. For that reason we have to set + # those values in the code. + # + + case "osparc-master.speag.com": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=1, + starts_with="master-simcore_master", + ) + case "osparc-staging.speag.com": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=1, + starts_with="staging-simcore_staging", + ) + case "osparc.speag.com": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=1, + starts_with="production-simcore_production", + ) + case "tip.itis.swiss": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=2, + starts_with="production-simcore_production", + ) + case "osparc-staging.io": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=2, + starts_with="staging-simcore_staging", + ) + case "osparc.io": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=2, + starts_with="production-simcore_production", + ) + case "sim4life.io": + settings = ReleaseSettings( + _env_file=env_file_path, # type: ignore + portainer_endpoint_version=1, + starts_with="staging-simcore_staging", + ) + case _: + msg = f"Unkown {deployment=}. Please setupa a new ReleaseSettings for this configuration" + raise ValueError(msg) + + return settings + + +class LegacySettings(BaseModel): + portainer_url: str + portainer_username: str + portainer_password: str + starts_with: str + swarm_stack_name: str + portainer_endpoint_version: int + + +def get_legacy_settings(env_file, deployment: str) -> LegacySettings: + # pylint: disable=too-many-return-statements + load_dotenv(env_file) + + if deployment == "master": + portainer_url = os.getenv("MASTER_PORTAINER_URL") + portainer_username = os.getenv("MASTER_PORTAINER_USERNAME") + portainer_password = os.getenv("MASTER_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="master-simcore_master", + swarm_stack_name="master-simcore", + portainer_endpoint_version=1, + ) + if deployment == "dalco-staging": + portainer_url = os.getenv("DALCO_STAGING_PORTAINER_URL") + portainer_username = os.getenv("DALCO_STAGING_PORTAINER_USERNAME") + portainer_password = os.getenv("DALCO_STAGING_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="staging-simcore_staging", + swarm_stack_name="staging-simcore", + portainer_endpoint_version=1, + ) + if deployment == "dalco-production": + portainer_url = os.getenv("DALCO_PRODUCTION_PORTAINER_URL") + portainer_username = os.getenv("DALCO_PRODUCTION_PORTAINER_USERNAME") + portainer_password = os.getenv("DALCO_PRODUCTION_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="production-simcore_production", + swarm_stack_name="production-simcore", + portainer_endpoint_version=1, + ) + if deployment == "tip-production": + portainer_url = os.getenv("TIP_PRODUCTION_PORTAINER_URL") + portainer_username = os.getenv("TIP_PRODUCTION_PORTAINER_USERNAME") + portainer_password = os.getenv("TIP_PRODUCTION_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="production-simcore_production", + swarm_stack_name="production-simcore", + portainer_endpoint_version=2, + ) + if deployment == "aws-staging": + portainer_url = os.getenv("AWS_STAGING_PORTAINER_URL") + portainer_username = os.getenv("AWS_STAGING_PORTAINER_USERNAME") + portainer_password = os.getenv("AWS_STAGING_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="staging-simcore_staging", + swarm_stack_name="staging-simcore", + portainer_endpoint_version=2, + ) + if deployment == "aws-nih-production": + portainer_url = os.getenv("AWS_NIH_PRODUCTION_PORTAINER_URL") + portainer_username = os.getenv("AWS_NIH_PRODUCTION_PORTAINER_USERNAME") + portainer_password = os.getenv("AWS_NIH_PRODUCTION_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="production-simcore_production", + swarm_stack_name="production-simcore", + portainer_endpoint_version=2, + ) + if deployment == "aws-zmt-production": + portainer_url = os.getenv("AWS_ZMT_PRODUCTION_PORTAINER_URL") + portainer_username = os.getenv("AWS_ZMT_PRODUCTION_PORTAINER_USERNAME") + portainer_password = os.getenv("AWS_ZMT_PRODUCTION_PORTAINER_PASSWORD") + + return LegacySettings( + portainer_url=portainer_url, + portainer_username=portainer_username, + portainer_password=portainer_password, + starts_with="staging-simcore_staging", + swarm_stack_name="staging-simcore", + portainer_endpoint_version=1, + ) + else: + msg = "Invalid environment type provided." + raise ValueError(msg) diff --git a/scripts/release/pyproject.toml b/scripts/release/pyproject.toml new file mode 100644 index 00000000000..e6ab2020205 --- /dev/null +++ b/scripts/release/pyproject.toml @@ -0,0 +1,15 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "monitor_release" +version = "1.2.3" +authors = [{name="Matus Drobuliak", email="drobuliak@itis.swiss" }] +description = "Helper script for monitoring releases" +readme = "README.md" +dependencies = ["arrow", "python-dotenv","pydantic", "pydantic-settings", "typer[all]>=0.9", "rich", "requests"] +requires-python = ">=3.10" + +[project.scripts] +monitor-release = "monitor_release.cli:app" diff --git a/scripts/shellcheck.bash b/scripts/shellcheck.bash index 2fea124a928..2279a9a86fb 100755 --- a/scripts/shellcheck.bash +++ b/scripts/shellcheck.bash @@ -5,4 +5,4 @@ # - VS extension: https://github.com/timonwong/vscode-shellcheck # -exec docker run --rm --interactive --volume "$PWD:/mnt:ro" koalaman/shellcheck:v0.7.0 "$@" +exec docker run --rm --interactive --volume "$PWD:/mnt:ro" koalaman/shellcheck:v0.10.0 "$@" diff --git a/scripts/test_python_version.py b/scripts/test_python_version.py new file mode 100755 index 00000000000..30f6a63931e --- /dev/null +++ b/scripts/test_python_version.py @@ -0,0 +1,39 @@ +#!/bin/env python + +import pathlib as pl +import platform +import sys + + +def main(): + major_v, minor_v, patch_v = platform.python_version_tuple() + + print(f"Found python version: {major_v}.{minor_v}.{patch_v}") + + min_major_v, min_minor_v = to_version( + (pl.Path(__file__).parent.parent / "requirements" / "PYTHON_VERSION") + .read_text() + .strip() + ) + + exit_code = ( + 1 + if int(major_v) < min_major_v + or (int(major_v) == min_major_v and int(minor_v) < min_minor_v) + else 0 + ) + + if exit_code > 0: + print( + f"Wrong python version, osparc compilation needs at least Python {min_major_v}.{min_minor_v}" + ) + + sys.exit(exit_code) + + +def to_version(version): + return tuple(int(v) for v in version.split(".")) + + +if __name__ == "__main__": + main() diff --git a/services/agent/Dockerfile b/services/agent/Dockerfile index 5079a5e66db..3f519ef6f30 100644 --- a/services/agent/Dockerfile +++ b/services/agent/Dockerfile @@ -1,5 +1,20 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" + +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + # # USAGE: @@ -11,14 +26,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=GitHK -RUN set -eux; \ - apt-get update; \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ apt-get install -y --no-install-recommends \ gosu \ - curl; \ - rm -rf /var/lib/apt/lists/*; \ + curl \ + && apt-get clean -y \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -47,40 +66,45 @@ ENV PYTHONDONTWRITEBYTECODE=1 \ ENV PATH="${VIRTUAL_ENV}/bin:$PATH" # rclone installation -COPY --chown=scu:scu scripts/install_rclone.bash /tmp/install_rclone.bash -RUN ./tmp/install_rclone.bash && rm /tmp/install_rclone.bash +ARG TARGETARCH +ENV TARGETARCH=${TARGETARCH} +RUN \ + --mount=type=bind,source=scripts/install_rclone.bash,target=/tmp/install_rclone.bash \ + ./tmp/install_rclone.bash # -------------------------- Build stage ------------------- # Installs build/package management tools and third party dependencies # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/agent/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + # --------------------------Prod-depends-only stage ------------------- @@ -89,17 +113,19 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/agent [scu:scu] WORKDIR # -FROM build as prod-only-deps +FROM build AS prod-only-deps -ENV SC_BUILD_TARGET prod-only-deps - -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/agent /build/services/agent +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/agent -RUN pip3 --no-cache-dir install -r requirements/prod.txt \ - && pip3 --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/agent,target=/build/services/agent,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -109,15 +135,20 @@ RUN pip3 --no-cache-dir install -r requirements/prod.txt \ # + /home/scu $HOME = WORKDIR # + services/agent [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu + # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -126,10 +157,13 @@ COPY --chown=scu:scu services/agent/docker services/agent/docker RUN chmod +x services/agent/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/agent/docker/healthcheck.py", "http://localhost:8000/health"] EXPOSE 8000 @@ -146,7 +180,7 @@ CMD ["/bin/sh", "services/agent/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_DEVEL_MOUNT=/devel/services/agent diff --git a/services/agent/README.md b/services/agent/README.md index 4010642d460..515254b183c 100644 --- a/services/agent/README.md +++ b/services/agent/README.md @@ -1,19 +1,5 @@ # agent -[![image-size]](https://microbadger.com/images/itisfoundation/agent. "More on itisfoundation/agent.:staging-latest image") - -[![image-badge]](https://microbadger.com/images/itisfoundation/agent "More on agent image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/agent "More on agent image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/agent "More on agent image in registry") - -Service for executing commands on docker nodes - - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/agent./staging-latest.svg?label=agent.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/agent.svg -[image-version]https://images.microbadger.com/badges/version/itisfoundation/agent.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/agent.svg - To develop this project, just diff --git a/services/agent/VERSION b/services/agent/VERSION index 8acdd82b765..3eefcb9dd5b 100644 --- a/services/agent/VERSION +++ b/services/agent/VERSION @@ -1 +1 @@ -0.0.1 +1.0.0 diff --git a/services/agent/docker/boot.sh b/services/agent/docker/boot.sh index d3a7b8b1515..5cc8f9f5aad 100755 --- a/services/agent/docker/boot.sh +++ b/services/agent/docker/boot.sh @@ -23,27 +23,36 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/agent || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/agent + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # # RUNNING application # APP_LOG_LEVEL=${LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +AGENT_SERVER_REMOTE_DEBUG_PORT=3000 SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" - -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/agent/src/simcore_service_agent && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${AGENT_SERVER_REMOTE_DEBUG_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --port 8000 \ --reload \ diff --git a/services/agent/docker/entrypoint.sh b/services/agent/docker/entrypoint.sh index 12d4deb30e5..e89ad5408a3 100755 --- a/services/agent/docker/entrypoint.sh +++ b/services/agent/docker/entrypoint.sh @@ -63,27 +63,20 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock -if stat $DOCKER_MOUNT > /dev/null 2>&1 -then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker - if ! addgroup --gid "$GROUPID" $GROUPNAME > /dev/null 2>&1 - then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" fi echo "$INFO Starting $* ..." diff --git a/services/agent/docker/healthcheck.py b/services/agent/docker/healthcheck.py old mode 100644 new mode 100755 index 8df0bcbd649..8c8e007898a --- a/services/agent/docker/healthcheck.py +++ b/services/agent/docker/healthcheck.py @@ -6,7 +6,8 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ CMD python3 docker/healthcheck.py http://localhost:8000/ ``` @@ -15,13 +16,14 @@ 1. why not to use curl instead of a python script? - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ """ + import os import sys from urllib.request import urlopen SUCCESS, UNHEALTHY = 0, 1 -# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc) +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) ok = "debug" in os.environ.get("SC_BOOT_MODE", "").lower() # Queries host diff --git a/services/agent/requirements/_base.in b/services/agent/requirements/_base.in index c195fb17f5c..4b1b5b29b82 100644 --- a/services/agent/requirements/_base.in +++ b/services/agent/requirements/_base.in @@ -6,12 +6,13 @@ --constraint ./constraints.txt # intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in +# service-library[fastapi] --requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in aiodocker -fastapi packaging pydantic -uvicorn diff --git a/services/agent/requirements/_base.txt b/services/agent/requirements/_base.txt index 40edfb2049f..b3fe3dfecfb 100644 --- a/services/agent/requirements/_base.txt +++ b/services/agent/requirements/_base.txt @@ -1,117 +1,520 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.5 +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 # via -r requirements/../../../packages/service-library/requirements/_base.in aiodebug==2.3.0 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiodocker==0.21.0 - # via -r requirements/_base.in -aiofiles==22.1.0 +aiodocker==0.24.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +aiofiles==24.1.0 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiohttp==3.8.3 +aiohappyeyeballs==2.5.0 + # via aiohttp +aiohttp==3.11.18 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # aiodocker -aiormq==6.4.2 +aiormq==6.8.1 # via aio-pika -aiosignal==1.2.0 +aiosignal==1.3.2 # via aiohttp -anyio==3.6.2 - # via starlette -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # aiohttp - # redis -attrs==21.4.0 + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==25.1.0 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt # aiohttp # jsonschema -charset-normalizer==2.1.1 - # via aiohttp -click==8.1.3 + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 # via + # rich-toolkit # typer # uvicorn -dnspython==2.2.1 +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -email-validator==1.3.0 - # via pydantic -fastapi==0.85.2 - # via -r requirements/_base.in -frozenlist==1.3.1 +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 # via # aiohttp # aiosignal +googleapis-common-protos==1.69.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc h11==0.14.0 + # via + # httpcore + # uvicorn +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 # via uvicorn -idna==3.4 +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator + # httpx + # requests # yarl -jsonschema==3.2.0 +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jsonschema==4.23.0 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt # -r requirements/../../../packages/models-library/requirements/_base.in -multidict==6.0.2 + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 # via # aiohttp # yarl -packaging==21.3 - # via -r requirements/_base.in -pamqp==3.2.1 +opentelemetry-api==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.51b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-fastapi==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pydantic==1.10.2 +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.0 # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in + # fast-depends # fastapi -pyinstrument==4.3.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via -r requirements/../../../packages/service-library/requirements/_base.in -pyparsing==3.0.9 - # via packaging -pyrsistent==0.19.2 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -pyyaml==5.4.1 - # via -r requirements/../../../packages/service-library/requirements/_base.in -redis==4.4.0 - # via -r requirements/../../../packages/service-library/requirements/_base.in -six==1.16.0 +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil -sniffio==1.3.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 # via anyio -starlette==0.20.4 - # via fastapi -tenacity==8.1.0 +starlette==0.46.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 # via -r requirements/../../../packages/service-library/requirements/_base.in -tqdm==4.64.1 +tqdm==4.67.1 # via -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.6.1 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.4.0 +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug - # aiodocker + # anyio + # fastapi + # faststream + # opentelemetry-sdk # pydantic - # starlette -uvicorn==0.19.0 - # via -r requirements/_base.in -yarl==1.8.1 + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/agent/requirements/_test.in b/services/agent/requirements/_test.in index 56d67fdfbcc..ff76fcd01f5 100644 --- a/services/agent/requirements/_test.in +++ b/services/agent/requirements/_test.in @@ -11,13 +11,14 @@ --constraint _base.txt aioboto3 -codecov +asgi-lifespan coverage -coveralls faker +httpx moto[server] # mock out tests based on AWS-S3 pytest pytest-asyncio pytest-cov pytest-mock pytest-runner +python-dotenv diff --git a/services/agent/requirements/_test.txt b/services/agent/requirements/_test.txt index d13f99699c8..a0c0ea3b114 100644 --- a/services/agent/requirements/_test.txt +++ b/services/agent/requirements/_test.txt @@ -1,286 +1,320 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aioboto3==9.6.0 +aioboto3==14.3.0 # via -r requirements/_test.in -aiobotocore==2.3.0 +aiobotocore==2.22.0 # via aioboto3 -aiohttp==3.8.3 +aiofiles==24.1.0 # via # -c requirements/_base.txt + # aioboto3 +aiohappyeyeballs==2.5.0 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # aiobotocore -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore -aiosignal==1.2.0 +aiosignal==1.3.2 # via # -c requirements/_base.txt # aiohttp -async-timeout==4.0.2 +annotated-types==0.7.0 # via # -c requirements/_base.txt - # aiohttp -attrs==21.4.0 + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.8.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.1.0 # via # -c requirements/_base.txt # aiohttp - # jschema-to-python # jsonschema - # pytest - # sarif-om -aws-sam-translator==1.60.1 + # referencing +aws-sam-translator==1.95.0 # via cfn-lint -aws-xray-sdk==2.11.0 +aws-xray-sdk==2.14.0 # via moto -boto3==1.21.21 +blinker==1.9.0 + # via flask +boto3==1.37.3 # via - # -c requirements/./constraints.txt # aiobotocore # aws-sam-translator # moto -botocore==1.24.21 +botocore==1.37.3 # via # aiobotocore # aws-xray-sdk # boto3 # moto # s3transfer -certifi==2022.12.7 - # via requests -cffi==1.15.1 +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +cffi==1.17.1 # via cryptography -cfn-lint==0.74.0 +cfn-lint==1.28.0 # via moto -charset-normalizer==2.1.1 +charset-normalizer==3.4.1 # via # -c requirements/_base.txt - # aiohttp # requests -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # flask -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 +coverage==7.6.12 # via # -r requirements/_test.in - # codecov - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.1 +cryptography==44.0.2 # via # -c requirements/../../../requirements/constraints.txt + # joserfc # moto - # python-jose - # sshpubkeys -docker==6.0.1 +docker==7.1.0 # via moto -docopt==0.6.2 - # via coveralls -ecdsa==0.18.0 - # via - # moto - # python-jose - # sshpubkeys -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 +faker==36.2.2 # via -r requirements/_test.in -flask==2.2.3 +flask==3.1.0 # via # flask-cors # moto -flask-cors==3.0.10 +flask-cors==5.0.1 # via moto -frozenlist==1.3.1 +frozenlist==1.5.0 # via # -c requirements/_base.txt # aiohttp # aiosignal -graphql-core==3.2.3 +graphql-core==3.2.6 # via moto -idna==3.4 +h11==0.14.0 # via # -c requirements/_base.txt + # httpcore +httpcore==1.0.7 + # via + # -c requirements/_base.txt + # httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx # requests # yarl -importlib-metadata==6.0.0 - # via flask iniconfig==2.0.0 # via pytest -itsdangerous==2.1.2 +itsdangerous==2.2.0 # via flask -jinja2==3.1.2 +jinja2==3.1.6 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # flask # moto jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -jschema-to-python==1.2.3 - # via cfn-lint -jsondiff==2.0.0 +joserfc==1.0.4 # via moto -jsonpatch==1.32 +jsonpatch==1.33 # via cfn-lint -jsonpickle==3.0.1 - # via jschema-to-python -jsonpointer==2.3 +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 # via jsonpatch -jsonschema==3.2.0 +jsonschema==4.23.0 # via # -c requirements/_base.txt # aws-sam-translator - # cfn-lint # openapi-schema-validator # openapi-spec-validator -junit-xml==1.9 - # via cfn-lint -markupsafe==2.1.2 +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2024.10.1 # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +markupsafe==3.0.2 + # via + # -c requirements/_base.txt # jinja2 # werkzeug -moto==4.1.3 +moto==5.1.4 # via -r requirements/_test.in -multidict==6.0.2 +mpmath==1.3.0 + # via sympy +multidict==6.1.0 # via # -c requirements/_base.txt + # aiobotocore # aiohttp # yarl -networkx==3.0 +networkx==3.4.2 # via cfn-lint -openapi-schema-validator==0.2.3 +openapi-schema-validator==0.6.3 # via openapi-spec-validator -openapi-spec-validator==0.4.0 - # via - # -c requirements/./constraints.txt - # moto -packaging==21.3 +openapi-spec-validator==0.7.1 + # via moto +packaging==24.2 # via # -c requirements/_base.txt - # docker # pytest -pbr==5.11.1 - # via - # jschema-to-python - # sarif-om -pluggy==1.0.0 +pathable==0.4.4 + # via jsonschema-path +pluggy==1.5.0 # via pytest -pyasn1==0.4.8 +ply==3.11 + # via jsonpath-ng +propcache==0.3.0 # via - # python-jose - # rsa -pycparser==2.21 + # -c requirements/_base.txt + # aiohttp + # yarl +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 # via cffi -pyparsing==3.0.9 +pydantic==2.10.6 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # moto - # packaging -pyrsistent==0.19.2 + # aws-sam-translator +pydantic-core==2.27.2 # via # -c requirements/_base.txt - # jsonschema -pytest==7.2.1 + # pydantic +pyparsing==3.2.1 + # via moto +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio # pytest-cov # pytest-mock -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt + # aiobotocore # botocore - # faker # moto -python-jose==3.3.0 - # via moto -pyyaml==5.4.1 +python-dotenv==1.0.1 # via # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # cfn-lint + # jsonschema-path # moto - # openapi-spec-validator -requests==2.28.2 + # responses +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 # via - # codecov - # coveralls + # -c requirements/_base.txt # docker + # jsonschema-path # moto # responses -responses==0.22.0 +responses==0.25.6 # via moto -rsa==4.9 +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.23.1 # via - # -c requirements/../../../requirements/constraints.txt - # python-jose -s3transfer==0.5.2 + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 # via boto3 -sarif-om==1.0.4 - # via cfn-lint -six==1.16.0 +setuptools==75.8.2 + # via moto +six==1.17.0 # via # -c requirements/_base.txt - # ecdsa - # flask-cors - # jsonschema - # junit-xml # python-dateutil -sshpubkeys==3.3.1 - # via moto -toml==0.10.2 - # via responses -tomli==2.0.1 + # rfc3339-validator +sniffio==1.3.1 # via - # coverage - # pytest -types-toml==0.10.8.5 - # via responses -typing-extensions==4.4.0 + # -c requirements/_base.txt + # anyio + # asgi-lifespan +sympy==1.13.3 + # via cfn-lint +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # aioitertools + # anyio # aws-sam-translator -urllib3==1.26.14 + # cfn-lint + # pydantic + # pydantic-core +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # botocore # docker # requests # responses -websocket-client==1.5.1 - # via docker -werkzeug==2.2.3 +werkzeug==3.1.3 # via # flask + # flask-cors # moto -wrapt==1.15.0 +wrapt==1.17.2 # via + # -c requirements/_base.txt # aiobotocore # aws-xray-sdk -xmltodict==0.13.0 +xmltodict==0.14.2 # via moto -yarl==1.8.1 +yarl==1.18.3 # via # -c requirements/_base.txt # aiohttp -zipp==3.15.0 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/services/agent/requirements/_tools.in b/services/agent/requirements/_tools.in index 8e7d4eb265e..1def82c12a3 100644 --- a/services/agent/requirements/_tools.in +++ b/services/agent/requirements/_tools.in @@ -3,7 +3,3 @@ --constraint _test.txt --requirement ../../../requirements/devenv.txt - -black -isort -watchdog[watchmedo] diff --git a/services/agent/requirements/_tools.txt b/services/agent/requirements/_tools.txt index 63714134c29..70694d84d7b 100644 --- a/services/agent/requirements/_tools.txt +++ b/services/agent/requirements/_tools.txt @@ -1,101 +1,86 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==22.12.0 - # via - # -r requirements/../../../requirements/devenv.txt - # -r requirements/_tools.in -build==0.10.0 +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt - # -r requirements/_tools.in # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==21.3 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt + # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyparsing==3.0.9 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # packaging -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt # pre-commit - # watchdog -tomli==2.0.1 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 # via # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.4.0 +typing-extensions==4.12.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.3 # via pre-commit -watchdog==2.3.1 - # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/agent/requirements/ci.txt b/services/agent/requirements/ci.txt index 04bec253a17..5660d901f3e 100644 --- a/services/agent/requirements/ci.txt +++ b/services/agent/requirements/ci.txt @@ -12,10 +12,11 @@ --requirement _tools.txt # installs this repo's packages -../../packages/models-library -../../packages/pytest-simcore -../../packages/service-library -../../packages/settings-library +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library # installs current package -. +simcore-service-agent @ . diff --git a/services/agent/requirements/constraints.txt b/services/agent/requirements/constraints.txt index 5c14f8c59bc..e69de29bb2d 100644 --- a/services/agent/requirements/constraints.txt +++ b/services/agent/requirements/constraints.txt @@ -1,26 +0,0 @@ - -# -# CONSTRAINTS DUE TO TEST LIBRARIES -# - -# -# BELOW COSNTRAINTS are required to install moto[server] -# - -# There are incompatible versions in the resolved dependencies: -# boto3==1.21.21 (from -c requirements/./constraints.txt (line 3)) -# boto3<1.24.60,>=1.24.59 (from aiobotocore[boto3]==2.4.0->aioboto3==10.1.0->-r requirements/_test.in (line 13)) -# boto3>=1.9.201 (from moto[server]==4.0.1->-r requirements/_test.in (line 18)) -aioboto3<=9.6.0 -# There are incompatible versions in the resolved dependencies: -# botocore>=1.12.201 (from moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore>=1.11.3 (from aws-xray-sdk==2.10.0->moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore<1.28.0,>=1.27.95 (from boto3==1.24.95->moto[server]==4.0.1->-r requirements/_test.in (line 18)) -# botocore<1.24.22,>=1.24.21 (from aiobotocore[boto3]==2.3.0->aioboto3==9.6.0->-r requirements/_test.in (line 13)) -boto3<=1.21.21 -# There are incompatible versions in the resolved dependencies: -# jsonschema==3.2.0 (from -c requirements/_base.txt (line 159)) -# jsonschema~=3.2 (from -c requirements/./constraints.txt (line 12)) -# jsonschema<5,>=3.0 (from cfn-lint==0.64.1->moto[server]==4.0.1->-r requirements/_test.in (line 21)) -# jsonschema<5.0.0,>=4.0.0 (from openapi-spec-validator==0.5.1->moto[server]==4.0.1->-r requirements/_test.in (line 21)) -openapi-spec-validator<0.5.0 diff --git a/services/agent/requirements/dev.txt b/services/agent/requirements/dev.txt index a20e1ab941d..692b48d0946 100644 --- a/services/agent/requirements/dev.txt +++ b/services/agent/requirements/dev.txt @@ -12,9 +12,10 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/pytest-simcore ---editable ../../packages/service-library +--editable ../../packages/service-library[fastapi] --editable ../../packages/settings-library # installs current package diff --git a/services/agent/requirements/prod.txt b/services/agent/requirements/prod.txt index acf5da3c628..fb4e4e1e8fa 100644 --- a/services/agent/requirements/prod.txt +++ b/services/agent/requirements/prod.txt @@ -10,8 +10,10 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library -../../packages/service-library -../../packages/settings-library +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library + # installs current package -. +simcore-service-agent @ . diff --git a/services/agent/setup.cfg b/services/agent/setup.cfg index c5c4317e800..c4965654e3e 100644 --- a/services/agent/setup.cfg +++ b/services/agent/setup.cfg @@ -9,3 +9,8 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +[mypy] +plugins = + pydantic.mypy diff --git a/services/agent/setup.py b/services/agent/setup.py old mode 100644 new mode 100755 index 509cb8619b6..c6ae30502d4 --- a/services/agent/setup.py +++ b/services/agent/setup.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import re import sys @@ -39,29 +38,30 @@ def read_reqs(reqs_path: Path) -> set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name=NAME, - version=VERSION, - author=AUTHORS, - description=DESCRIPTION, - long_description=README, - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ "simcore-service-agent = simcore_service_agent.cli:main", + "simcore-service = simcore_service_agent.cli:main", ], }, -) +} if __name__ == "__main__": setup(**SETUP) diff --git a/services/agent/src/simcore_service_agent/_meta.py b/services/agent/src/simcore_service_agent/_meta.py index a4494809785..f7c328ab2aa 100644 --- a/services/agent/src/simcore_service_agent/_meta.py +++ b/services/agent/src/simcore_service_agent/_meta.py @@ -1,30 +1,23 @@ """ Application's metadata """ -from contextlib import suppress + +from importlib.metadata import distribution, version from typing import Final -import pkg_resources from packaging.version import Version -_current_distribution = pkg_resources.get_distribution("simcore-service-agent") -__version__: str = _current_distribution.version +_current_distribution = distribution("simcore-service-agent") +__version__: str = version("simcore-service-agent") -APP_NAME: Final[str] = _current_distribution.project_name +APP_NAME: Final[str] = _current_distribution.metadata["Name"] VERSION: Final[Version] = Version(__version__) API_VTAG: str = f"v{VERSION.major}" def get_summary() -> str: - with suppress(Exception): - try: - metadata = _current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = _current_distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" + return _current_distribution.metadata.get_all("Summary", [""])[-1] SUMMARY: Final[str] = get_summary() diff --git a/services/agent/src/simcore_service_agent/api/__init__.py b/services/agent/src/simcore_service_agent/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/agent/src/simcore_service_agent/api/rest/__init__.py b/services/agent/src/simcore_service_agent/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/agent/src/simcore_service_agent/api/rest/_dependencies.py b/services/agent/src/simcore_service_agent/api/rest/_dependencies.py new file mode 100644 index 00000000000..a02971d996a --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rest/_dependencies.py @@ -0,0 +1,27 @@ +""" Free functions to inject dependencies in routes handlers +""" + +from typing import Annotated, cast + +from fastapi import Depends, FastAPI, Request +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + +from ...core.settings import ApplicationSettings + + +def get_application(request: Request) -> FastAPI: + return cast(FastAPI, request.app) + + +def get_settings( + app: Annotated[FastAPI, Depends(get_application)] +) -> ApplicationSettings: + assert isinstance(app.state.settings, ApplicationSettings) # nosec + return app.state.settings + + +def get_rabbitmq_client( + app: Annotated[FastAPI, Depends(get_application)] +) -> RabbitMQRPCClient: + assert isinstance(app.state.rabbitmq_rpc_server, RabbitMQRPCClient) # nosec + return app.state.rabbitmq_rpc_server diff --git a/services/agent/src/simcore_service_agent/api/rest/_health.py b/services/agent/src/simcore_service_agent/api/rest/_health.py new file mode 100644 index 00000000000..600de246722 --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rest/_health.py @@ -0,0 +1,25 @@ +from typing import Annotated + +import arrow +from fastapi import APIRouter, Depends +from models_library.api_schemas__common.health import HealthCheckGet +from models_library.errors import RABBITMQ_CLIENT_UNHEALTHY_MSG +from servicelib.rabbitmq import RabbitMQClient + +from ._dependencies import get_rabbitmq_client + +router = APIRouter() + + +class HealthCheckError(RuntimeError): + """Failed a health check""" + + +@router.get("/health", response_model=HealthCheckGet) +async def check_service_health( + rabbitmq_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client)] +): + if not rabbitmq_client.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + return HealthCheckGet(timestamp=f"{__name__}@{arrow.utcnow().datetime.isoformat()}") diff --git a/services/agent/src/simcore_service_agent/api/rest/routes.py b/services/agent/src/simcore_service_agent/api/rest/routes.py new file mode 100644 index 00000000000..18688cf2f4d --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rest/routes.py @@ -0,0 +1,14 @@ +from fastapi import FastAPI, HTTPException +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +from . import _health + + +def setup_rest_api(app: FastAPI): + app.include_router(_health.router) + + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) diff --git a/services/agent/src/simcore_service_agent/api/rpc/__init__.py b/services/agent/src/simcore_service_agent/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/agent/src/simcore_service_agent/api/rpc/_containers.py b/services/agent/src/simcore_service_agent/api/rpc/_containers.py new file mode 100644 index 00000000000..e7d651d6ede --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rpc/_containers.py @@ -0,0 +1,20 @@ +import logging + +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RPCRouter + +from ...services.containers_manager import ContainersManager + +_logger = logging.getLogger(__name__) + +router = RPCRouter() + + +@router.expose() +async def force_container_cleanup(app: FastAPI, *, node_id: NodeID) -> None: + with log_context( + _logger, logging.INFO, f"removing all orphan container for {node_id=}" + ): + await ContainersManager.get_from_app_state(app).force_container_cleanup(node_id) diff --git a/services/agent/src/simcore_service_agent/api/rpc/_volumes.py b/services/agent/src/simcore_service_agent/api/rpc/_volumes.py new file mode 100644 index 00000000000..9d2433a19af --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rpc/_volumes.py @@ -0,0 +1,29 @@ +import logging + +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.agent.errors import ( + NoServiceVolumesFoundRPCError, +) + +from ...services.volumes_manager import VolumesManager + +_logger = logging.getLogger(__name__) + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=(NoServiceVolumesFoundRPCError,)) +async def remove_volumes_without_backup_for_service( + app: FastAPI, *, node_id: NodeID +) -> None: + with log_context(_logger, logging.INFO, f"removing volumes for service: {node_id}"): + await VolumesManager.get_from_app_state(app).remove_service_volumes(node_id) + + +@router.expose() +async def backup_and_remove_volumes_for_all_services(app: FastAPI) -> None: + with log_context(_logger, logging.INFO, "removing all service volumes from node"): + await VolumesManager.get_from_app_state(app).remove_all_volumes() diff --git a/services/agent/src/simcore_service_agent/api/rpc/routes.py b/services/agent/src/simcore_service_agent/api/rpc/routes.py new file mode 100644 index 00000000000..e8b0cea8f4c --- /dev/null +++ b/services/agent/src/simcore_service_agent/api/rpc/routes.py @@ -0,0 +1,29 @@ +from fastapi import FastAPI +from models_library.rabbitmq_basic_types import RPCNamespace +from servicelib.rabbitmq import RPCRouter +from simcore_service_agent.core.settings import ApplicationSettings + +from ...services.rabbitmq import get_rabbitmq_rpc_server +from . import _containers, _volumes + +ROUTERS: list[RPCRouter] = [ + _containers.router, + _volumes.router, +] + + +def setup_rpc_api_routes(app: FastAPI) -> None: + async def startup() -> None: + rpc_server = get_rabbitmq_rpc_server(app) + settings: ApplicationSettings = app.state.settings + rpc_namespace = RPCNamespace.from_entries( + { + "service": "agent", + "docker_node_id": settings.AGENT_DOCKER_NODE_ID, + "swarm_stack_name": settings.AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME, + } + ) + for router in ROUTERS: + await rpc_server.register_router(router, rpc_namespace, app) + + app.add_event_handler("startup", startup) diff --git a/services/agent/src/simcore_service_agent/cli.py b/services/agent/src/simcore_service_agent/cli.py index c8b473748c0..2bddceb1717 100644 --- a/services/agent/src/simcore_service_agent/cli.py +++ b/services/agent/src/simcore_service_agent/cli.py @@ -1,9 +1,9 @@ import logging import typer -from settings_library.utils_cli import create_settings_command +from settings_library.utils_cli import create_settings_command, create_version_callback -from ._meta import APP_NAME +from ._meta import APP_NAME, __version__ from .core.settings import ApplicationSettings log = logging.getLogger(__name__) @@ -11,3 +11,4 @@ main = typer.Typer(name=APP_NAME) main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) +main.callback()(create_version_callback(__version__)) diff --git a/services/agent/src/simcore_service_agent/core/_dependencies.py b/services/agent/src/simcore_service_agent/core/_dependencies.py deleted file mode 100644 index 920f520dcdf..00000000000 --- a/services/agent/src/simcore_service_agent/core/_dependencies.py +++ /dev/null @@ -1,22 +0,0 @@ -""" Free functions to inject dependencies in routes handlers -""" - - -from fastapi import Depends, FastAPI, Request - -from ..modules.task_monitor import TaskMonitor -from .settings import ApplicationSettings - - -def get_application(request: Request) -> FastAPI: - return request.app - - -def get_settings(app: FastAPI = Depends(get_application)) -> ApplicationSettings: - assert isinstance(app.state.settings, ApplicationSettings) # nosec - return app.state.settings - - -def get_task_monitor(app: FastAPI = Depends(get_application)) -> TaskMonitor: - assert isinstance(app.state.task_monitor, TaskMonitor) # nosec - return app.state.task_monitor diff --git a/services/agent/src/simcore_service_agent/core/_routes.py b/services/agent/src/simcore_service_agent/core/_routes.py deleted file mode 100644 index 6f3486d0662..00000000000 --- a/services/agent/src/simcore_service_agent/core/_routes.py +++ /dev/null @@ -1,12 +0,0 @@ -from fastapi import APIRouter, Depends, HTTPException, status - -from ..modules.task_monitor import TaskMonitor -from ._dependencies import get_task_monitor - -router = APIRouter() - - -@router.get("/health") -def health(task_monitor: TaskMonitor = Depends(get_task_monitor)) -> None: - if not task_monitor.was_started or task_monitor.are_tasks_hanging: - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE, detail="unhealthy") diff --git a/services/agent/src/simcore_service_agent/core/application.py b/services/agent/src/simcore_service_agent/core/application.py index 0fa26c9418c..442c4649c62 100644 --- a/services/agent/src/simcore_service_agent/core/application.py +++ b/services/agent/src/simcore_service_agent/core/application.py @@ -5,6 +5,10 @@ get_common_oas_options, override_fastapi_openapi_method, ) +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from servicelib.logging_utils import config_all_loggers from .._meta import ( @@ -15,8 +19,12 @@ SUMMARY, VERSION, ) -from ..modules import task_monitor -from ._routes import router +from ..api.rest.routes import setup_rest_api +from ..api.rpc.routes import setup_rpc_api_routes +from ..services.containers_manager import setup_containers_manager +from ..services.instrumentation import setup_instrumentation +from ..services.rabbitmq import setup_rabbitmq +from ..services.volumes_manager import setup_volume_manager from .settings import ApplicationSettings logger = logging.getLogger(__name__) @@ -24,16 +32,19 @@ def _setup_logger(settings: ApplicationSettings): # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 - logging.basicConfig(level=settings.LOGLEVEL.value) # NOSONAR - logging.root.setLevel(settings.LOGLEVEL.value) - config_all_loggers() + logging.basicConfig(level=settings.LOG_LEVEL.value) # NOSONAR + logging.root.setLevel(settings.LOG_LEVEL.value) + config_all_loggers( + log_format_local_dev_enabled=settings.AGENT_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.AGENT_VOLUMES_LOG_FILTER_MAPPING, + tracing_settings=settings.AGENT_TRACING, + ) def create_app() -> FastAPI: - # SETTINGS settings = ApplicationSettings.create_from_envs() _setup_logger(settings) - logger.debug(settings.json(indent=2)) + logger.debug(settings.model_dump_json(indent=2)) assert settings.SC_BOOT_MODE # nosec app = FastAPI( @@ -42,22 +53,30 @@ def create_app() -> FastAPI: description=SUMMARY, version=f"{VERSION}", openapi_url=f"/api/{API_VTAG}/openapi.json", - **get_common_oas_options(settings.SC_BOOT_MODE.is_devel_mode()), + **get_common_oas_options(is_devel_mode=settings.SC_BOOT_MODE.is_devel_mode()), ) override_fastapi_openapi_method(app) app.state.settings = settings - # ROUTERS - app.include_router(router) + if settings.AGENT_TRACING: + setup_tracing(app, settings.AGENT_TRACING, APP_NAME) + + setup_instrumentation(app) + + setup_rabbitmq(app) + setup_volume_manager(app) + setup_containers_manager(app) + setup_rest_api(app) + setup_rpc_api_routes(app) - # EVENTS - task_monitor.setup(app) + if settings.AGENT_TRACING: + initialize_fastapi_app_tracing(app) async def _on_startup() -> None: - print(APP_STARTED_BANNER_MSG, flush=True) + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 async def _on_shutdown() -> None: - print(APP_FINISHED_BANNER_MSG, flush=True) + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 app.add_event_handler("startup", _on_startup) app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/agent/src/simcore_service_agent/core/settings.py b/services/agent/src/simcore_service_agent/core/settings.py index a17811bd276..d11b286f065 100644 --- a/services/agent/src/simcore_service_agent/core/settings.py +++ b/services/agent/src/simcore_service_agent/core/settings.py @@ -1,55 +1,122 @@ -from typing import Final, Optional +from datetime import timedelta +from typing import Annotated +from common_library.basic_types import DEFAULT_FACTORY from models_library.basic_types import BootModeEnum, LogLevel -from pydantic import Field, NonNegativeInt, validator +from models_library.docker import DockerNodeID +from pydantic import AliasChoices, AnyHttpUrl, Field, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring from settings_library.base import BaseCustomSettings from settings_library.r_clone import S3Provider +from settings_library.rabbit import RabbitSettings +from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings -_MINUTE: Final[NonNegativeInt] = 60 - class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): - LOGLEVEL: LogLevel = Field( - LogLevel.WARNING.value, env=["WEBSERVER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"] - ) - SC_BOOT_MODE: Optional[BootModeEnum] - - AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME: str = Field( - ..., description="Exactly the same as director-v2's `SWARM_STACK_NAME` env var" - ) - AGENT_VOLUMES_CLEANUP_S3_SECURE: bool = False - AGENT_VOLUMES_CLEANUP_S3_ENDPOINT: str + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "AGENT_LOGLEVEL", + "LOG_LEVEL", + "LOGLEVEL", + ), + ), + ] = LogLevel.WARNING + + SC_BOOT_MODE: BootModeEnum | None + + AGENT_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "AGENT_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description=( + "Enables local development log format. WARNING: make sure it is " + "disabled if you want to have structured logs!" + ), + ), + ] = False + + AGENT_VOLUMES_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "AGENT_VOLUMES_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME: str + AGENT_VOLUMES_CLEANUP_S3_ENDPOINT: AnyHttpUrl AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY: str AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY: str AGENT_VOLUMES_CLEANUP_S3_BUCKET: str AGENT_VOLUMES_CLEANUP_S3_PROVIDER: S3Provider AGENT_VOLUMES_CLEANUP_S3_REGION: str = "us-east-1" - AGENT_VOLUMES_CLEANUP_RETRIES: int = Field( - 3, description="upload retries in case of error" - ) - AGENT_VOLUMES_CLEANUP_PARALLELISM: int = Field( - 5, description="parallel transfers to s3" - ) - AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES: list[str] = Field( - [".hidden_do_not_remove", "key_values.json"], - description="Files to ignore when syncing to s3", - ) - AGENT_VOLUMES_CLEANUP_INTERVAL_S: NonNegativeInt = Field( - 60 * _MINUTE, description="interval at which to repeat volumes cleanup" - ) - - @validator("AGENT_VOLUMES_CLEANUP_S3_ENDPOINT", pre=True) - @classmethod - def ensure_scheme(cls, v: str, values) -> str: - if not v.startswith("http"): - scheme = ( - "https" if values.get("AGENT_VOLUMES_CLEANUP_S3_SECURE") else "http" - ) - return f"{scheme}://{v}" - return v - - @validator("LOGLEVEL") + AGENT_VOLUMES_CLEANUP_RETRIES: Annotated[ + int, Field(description="upload retries in case of error") + ] = 3 + AGENT_VOLUMES_CLEANUP_PARALLELISM: Annotated[ + int, Field(description="parallel transfers to s3") + ] = 5 + AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES: Annotated[ + list[str], + Field( + [".hidden_do_not_remove", "key_values.json"], + description="Files to ignore when syncing to s3", + ), + ] + AGENT_VOLUMES_CLEANUP_INTERVAL: Annotated[ + timedelta, Field(description="interval for running volumes removal") + ] = timedelta(minutes=1) + AGENT_VOLUMES_CLEANUP_BOOK_KEEPING_INTERVAL: Annotated[ + timedelta, + Field( + description=( + "interval at which to scan for unsued volumes and keep track since " + "they were detected as being unused" + ), + ), + ] = timedelta(minutes=1) + AGENT_VOLUMES_CLEANUP_REMOVE_VOLUMES_INACTIVE_FOR: Annotated[ + timedelta, + Field( + description=( + "if a volume is unused for more than this interval it can be removed. " + "The default is set to a health 60+ miunutes since it might take upto " + "60 minutes for the dy-sidecar to properly save data form the volumes" + ), + ), + ] = timedelta(minutes=65) + + AGENT_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + AGENT_DOCKER_NODE_ID: Annotated[ + DockerNodeID, Field(description="used by the rabbitmq module") + ] + + AGENT_RABBITMQ: Annotated[ + RabbitSettings, + Field( + description="settings for service/rabbitmq", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + AGENT_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + @field_validator("LOG_LEVEL") @classmethod def valid_log_level(cls, value) -> LogLevel: return LogLevel(cls.validate_log_level(value)) diff --git a/services/agent/src/simcore_service_agent/main.py b/services/agent/src/simcore_service_agent/main.py index 6701b5188ea..a16db0c3d52 100644 --- a/services/agent/src/simcore_service_agent/main.py +++ b/services/agent/src/simcore_service_agent/main.py @@ -1,3 +1,3 @@ -from .core.application import create_app +from simcore_service_agent.core.application import create_app the_app = create_app() diff --git a/services/agent/src/simcore_service_agent/models/__init__.py b/services/agent/src/simcore_service_agent/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/agent/src/simcore_service_agent/models/volumes.py b/services/agent/src/simcore_service_agent/models/volumes.py new file mode 100644 index 00000000000..68f20cae559 --- /dev/null +++ b/services/agent/src/simcore_service_agent/models/volumes.py @@ -0,0 +1,36 @@ +from pathlib import Path +from typing import Final + +from models_library.api_schemas_directorv2.services import ( + CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME, +) +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter + + +class DynamicServiceVolumeLabels(BaseModel): + node_uuid: NodeID + run_id: ServiceRunID + source: str + study_id: ProjectID + swarm_stack_name: str + user_id: UserID + + @property + def directory_name(self) -> str: + return self.source[CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME:][::-1].strip("_") + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + +class VolumeDetails(BaseModel): + mountpoint: Path = Field(alias="Mountpoint") + labels: DynamicServiceVolumeLabels = Field(alias="Labels") + + +VolumeDetailsAdapter: Final[TypeAdapter[VolumeDetails]] = TypeAdapter(VolumeDetails) diff --git a/services/agent/src/simcore_service_agent/modules/task_monitor.py b/services/agent/src/simcore_service_agent/modules/task_monitor.py deleted file mode 100644 index 8f4e3cd2ab3..00000000000 --- a/services/agent/src/simcore_service_agent/modules/task_monitor.py +++ /dev/null @@ -1,169 +0,0 @@ -import asyncio -import logging -from collections import deque -from contextlib import suppress -from dataclasses import dataclass, field -from time import time -from typing import Any, Awaitable, Callable, Final, Optional - -from fastapi import FastAPI -from pydantic import PositiveFloat, PositiveInt -from servicelib.logging_utils import log_context - -from ..core.settings import ApplicationSettings -from .volumes_cleanup import backup_and_remove_volumes - -logger = logging.getLogger(__name__) - -DEFAULT_TASK_WAIT_ON_ERROR: Final[PositiveInt] = 10 - - -@dataclass -class _TaskData: - target: Callable - args: Any - repeat_interval_s: Optional[PositiveFloat] - _start_time: Optional[PositiveFloat] = None - - @property - def name(self) -> str: - return self.target.__name__ - - async def run(self) -> None: - coroutine = self.target(*self.args) - - self._start_time = time() - - try: - await coroutine - finally: - self._start_time = None - - def is_hanging(self) -> bool: - # NOTE: tasks with no repeat_interval_s are design to run forever - if self.repeat_interval_s is None: - return False - - if self._start_time is None: - return False - - return (time() - self._start_time) > self.repeat_interval_s - - -async def _task_runner(task_data: _TaskData) -> None: - with log_context(logger, logging.INFO, msg=f"'{task_data.name}'"): - while True: - try: - await task_data.run() - except Exception: # pylint: disable=broad-except - logger.exception("Had an error while running '%s'", task_data.name) - - if task_data.repeat_interval_s is None: - logger.warning( - "Unexpected termination of '%s'; it will be restarted", - task_data.name, - ) - - logger.info( - "Will run '%s' again in %s seconds", - task_data.name, - task_data.repeat_interval_s, - ) - await asyncio.sleep( - DEFAULT_TASK_WAIT_ON_ERROR - if task_data.repeat_interval_s is None - else task_data.repeat_interval_s - ) - - -@dataclass -class TaskMonitor: - _was_started: bool = False - _tasks: set[asyncio.Task] = field(default_factory=set) - _to_start: dict[str, _TaskData] = field(default_factory=dict) - - @property - def was_started(self) -> bool: - return self._was_started - - @property - def are_tasks_hanging(self) -> bool: - hanging_tasks_detected = False - for name, task_data in self._to_start.items(): - if task_data.is_hanging(): - logger.warning("Task '%s' is hanging", name) - hanging_tasks_detected = True - return hanging_tasks_detected - - def register_job( - self, - target: Callable, - *args: Any, - repeat_interval_s: Optional[PositiveFloat] = None, - ) -> None: - if self._was_started: - raise RuntimeError( - "Cannot add more tasks, monitor already running with: " - f"{[x.get_name() for x in self._tasks]}" - ) - - task_data = _TaskData(target, args, repeat_interval_s) - if task_data.name in self._to_start: - raise RuntimeError(f"{target.__name__} is already registered") - - self._to_start[target.__name__] = task_data - - async def start(self) -> None: - self._was_started = True - for name, task_data in self._to_start.items(): - logger.info("Starting task '%s'", name) - self._tasks.add( - asyncio.create_task(_task_runner(task_data), name=f"task_{name}") - ) - - async def shutdown(self): - async def _wait_for_task(task: asyncio.Task) -> None: - with suppress(asyncio.CancelledError): - await task - - tasks_to_wait: deque[Awaitable] = deque() - for task in set(self._tasks): - logger.info("Cancel and stop task '%s'", task.get_name()) - - task.cancel() - tasks_to_wait.append(_wait_for_task(task)) - self._tasks.remove(task) - - await asyncio.gather(*tasks_to_wait, return_exceptions=True) - self._was_started = False - self._to_start = {} - - -def setup(app: FastAPI) -> None: - async def _on_startup() -> None: - task_monitor = app.state.task_monitor = TaskMonitor() - settings: ApplicationSettings = app.state.settings - - # setup all relative jobs - task_monitor.register_job( - backup_and_remove_volumes, - settings, - repeat_interval_s=settings.AGENT_VOLUMES_CLEANUP_INTERVAL_S, - ) - - await task_monitor.start() - logger.info("Started πŸ” task_monitor") - - async def _on_shutdown() -> None: - task_monitor: TaskMonitor = app.state.task_monitor - await task_monitor.shutdown() - logger.info("Stopped πŸ” task_monitor") - - app.add_event_handler("startup", _on_startup) - app.add_event_handler("shutdown", _on_shutdown) - - -__all__: tuple[str, ...] = ( - "setup", - "TaskMonitor", -) diff --git a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/__init__.py b/services/agent/src/simcore_service_agent/modules/volumes_cleanup/__init__.py deleted file mode 100644 index 00002f118b7..00000000000 --- a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from ._core import backup_and_remove_volumes - -__all__: tuple[str, ...] = ("backup_and_remove_volumes",) diff --git a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_core.py b/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_core.py deleted file mode 100644 index 6a4e63e3ce9..00000000000 --- a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_core.py +++ /dev/null @@ -1,61 +0,0 @@ -import logging - -from ...core.settings import ApplicationSettings -from ._docker import delete_volume, docker_client, get_dyv_volumes, is_volume_used -from ._s3 import store_to_s3 - -logger = logging.getLogger(__name__) - - -async def backup_and_remove_volumes(settings: ApplicationSettings) -> None: - async with docker_client() as client: - dyv_volumes: list[dict] = await get_dyv_volumes( - client, settings.AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME - ) - - if len(dyv_volumes) == 0: - return - - cleaned_up_volumes_count = 0 - logger.info("Beginning cleanup.") - for dyv_volume in dyv_volumes: - volume_name = dyv_volume["Name"] - - if await is_volume_used(client, volume_name): - logger.debug("Skipped in use docker volume: '%s'", volume_name) - continue - - try: - await store_to_s3( - volume_name=volume_name, - dyv_volume=dyv_volume, - s3_endpoint=settings.AGENT_VOLUMES_CLEANUP_S3_ENDPOINT, - s3_access_key=settings.AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY, - s3_secret_key=settings.AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY, - s3_bucket=settings.AGENT_VOLUMES_CLEANUP_S3_BUCKET, - s3_region=settings.AGENT_VOLUMES_CLEANUP_S3_REGION, - s3_provider=settings.AGENT_VOLUMES_CLEANUP_S3_PROVIDER, - s3_retries=settings.AGENT_VOLUMES_CLEANUP_RETRIES, - s3_parallelism=settings.AGENT_VOLUMES_CLEANUP_PARALLELISM, - exclude_files=settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES, - ) - except Exception as e: # pylint:disable=broad-except - logger.error("%s", e) - continue - - logger.info("Successfully cleaned up docker volume: '%s'", volume_name) - - await delete_volume(client, volume_name) - logger.info("Removed docker volume: '%s'", volume_name) - cleaned_up_volumes_count += 1 - - if cleaned_up_volumes_count > 0: - logger.info( - ( - "The dy-sidecar volume cleanup detected %s " - "zombie volumes on the current machine." - ), - cleaned_up_volumes_count, - ) - else: - logger.info("Found no zombie dy-sidecar volumes to cleanup.") diff --git a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_docker.py b/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_docker.py deleted file mode 100644 index 26d1475fdc1..00000000000 --- a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_docker.py +++ /dev/null @@ -1,37 +0,0 @@ -from collections import deque -from contextlib import asynccontextmanager -from typing import Any, AsyncIterator - -from aiodocker import Docker -from aiodocker.utils import clean_filters -from aiodocker.volumes import DockerVolume -from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES - - -@asynccontextmanager -async def docker_client() -> AsyncIterator[Docker]: - async with Docker() as docker: - yield docker - - -async def get_dyv_volumes(docker: Docker, target_swarm_stack_name: str) -> list[dict]: - dyv_volumes: deque[dict] = deque() - volumes = await docker.volumes.list() - for volume in volumes["Volumes"]: - volume_labels: dict[str, Any] = volume.get("Labels") or {} - if ( - volume["Name"].startswith(f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_") - and volume_labels.get("swarm_stack_name") == target_swarm_stack_name - ): - dyv_volumes.append(volume) - return list(dyv_volumes) - - -async def delete_volume(docker: Docker, volume_name: str) -> None: - await DockerVolume(docker, volume_name).delete() - - -async def is_volume_used(docker: Docker, volume_name: str) -> bool: - filters = clean_filters({"volume": volume_name}) - containers = await docker.containers.list(all=True, filters=filters) - return len(containers) > 0 diff --git a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_s3.py b/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_s3.py deleted file mode 100644 index 14c2a664f68..00000000000 --- a/services/agent/src/simcore_service_agent/modules/volumes_cleanup/_s3.py +++ /dev/null @@ -1,208 +0,0 @@ -import asyncio -import logging -from asyncio.streams import StreamReader -from pathlib import Path -from textwrap import dedent -from typing import Final - -from settings_library.r_clone import S3Provider -from settings_library.utils_r_clone import resolve_provider - -logger = logging.getLogger(__name__) - -R_CLONE_CONFIG = """ -[dst] -type = s3 -provider = {destination_provider} -access_key_id = {destination_access_key} -secret_access_key = {destination_secret_key} -endpoint = {destination_endpoint} -region = {destination_region} -acl = private -""" -VOLUME_NAME_FIXED_PORTION: Final[int] = 78 - - -def get_config_file_path( - s3_endpoint: str, - s3_access_key: str, - s3_secret_key: str, - s3_region: str, - s3_provider: S3Provider, -) -> Path: - config_content = R_CLONE_CONFIG.format( - destination_provider=resolve_provider(s3_provider), - destination_access_key=s3_access_key, - destination_secret_key=s3_secret_key, - destination_endpoint=s3_endpoint, - destination_region=s3_region, - ) - conf_path = Path("/tmp/rclone_config.ini") # NOSONAR - conf_path.write_text(config_content) # pylint:disable=unspecified-encoding - return conf_path - - -def _get_dir_name(volume_name: str) -> str: - # from: "dyv_a0430d06-40d2-4c92-9490-6aca30e00fc7_898fff63-d402-5566-a99b-091522dd2ae9_stuptuo_krow_nayvoj_emoh_" - # gets: "home_jovyan_work_outputs" - return volume_name[VOLUME_NAME_FIXED_PORTION:][::-1].strip("_") - - -def _get_s3_path(s3_bucket: str, labels: dict[str, str], volume_name: str) -> Path: - joint_key = "/".join( - ( - s3_bucket, - labels["swarm_stack_name"], - labels["study_id"], - labels["node_uuid"], - labels["run_id"], - _get_dir_name(volume_name), - ) - ) - return Path(f"/{joint_key}") - - -async def _read_stream(stream: StreamReader) -> str: - output = "" - while line := await stream.readline(): - message = line.decode() - output += message - logger.debug(message.strip("\n")) - return output - - -def _get_r_clone_str_command(command: list[str], exclude_files: list[str]) -> str: - # add files to be ignored - for to_exclude in exclude_files: - command.append("--exclude") - command.append(to_exclude) - - str_command = " ".join(command) - logger.info(str_command) - return str_command - - -def _log_expected_operation( - dyv_volume_labels: dict[str, str], - s3_path: Path, - r_clone_ls_output: str, - volume_name: str, -) -> None: - """ - This message will be logged as warning if any files will be synced - """ - log_level = logging.INFO if r_clone_ls_output.strip() == "" else logging.WARNING - - formatted_message = dedent( - f""" - --- - Volume data - --- - volume_name {volume_name} - destination_path {s3_path} - study_id: {dyv_volume_labels['study_id']} - node_id: {dyv_volume_labels['node_uuid']} - user_id: {dyv_volume_labels['user_id']} - run_id: {dyv_volume_labels['run_id']} - --- - Files to sync by rclone - ---\n{r_clone_ls_output.rstrip()} - --- - """ - ) - logger.log(log_level, formatted_message) - - -async def store_to_s3( # pylint:disable=too-many-locals,too-many-arguments - volume_name: str, - dyv_volume: dict, - s3_endpoint: str, - s3_access_key: str, - s3_secret_key: str, - s3_bucket: str, - s3_region: str, - s3_provider: S3Provider, - s3_retries: int, - s3_parallelism: int, - exclude_files: list[str], -) -> None: - config_file_path = get_config_file_path( - s3_endpoint=s3_endpoint, - s3_access_key=s3_access_key, - s3_secret_key=s3_secret_key, - s3_region=s3_region, - s3_provider=s3_provider, - ) - - source_dir = dyv_volume["Mountpoint"] - if not Path(source_dir).exists(): - logger.info( - "Volume mountpoint %s does not exist. Skipping backup, volume %s will be removed.", - source_dir, - volume_name, - ) - return - - s3_path = _get_s3_path(s3_bucket, dyv_volume["Labels"], volume_name) - - # listing files rclone will sync - r_clone_ls = [ - "rclone", - "--config", - f"{config_file_path}", - "ls", - f"{source_dir}", - ] - process = await asyncio.create_subprocess_shell( - _get_r_clone_str_command(r_clone_ls, exclude_files), - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - r_clone_ls_output = await _read_stream(process.stdout) - await process.wait() - _log_expected_operation( - dyv_volume["Labels"], s3_path, r_clone_ls_output, volume_name - ) - - # sync files via rclone - r_clone_sync = [ - "rclone", - "--config", - f"{config_file_path}", - "--low-level-retries", - "3", - "--retries", - f"{s3_retries}", - "--transfers", - f"{s3_parallelism}", - # below two options reduce to a minimum the memory footprint - # https://forum.rclone.org/t/how-to-set-a-memory-limit/10230/4 - "--use-mmap", # docs https://rclone.org/docs/#use-mmap - "--buffer-size", # docs https://rclone.org/docs/#buffer-size-size - "0M", - "--stats", - "5s", - "--stats-one-line", - "sync", - f"{source_dir}", - f"dst:{s3_path}", - "--verbose", - ] - - str_r_clone_sync = _get_r_clone_str_command(r_clone_sync, exclude_files) - process = await asyncio.create_subprocess_shell( - str_r_clone_sync, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - r_clone_sync_output = await _read_stream(process.stdout) - await process.wait() - logger.info("Sync result:\n%s", r_clone_sync_output) - - if process.returncode != 0: - raise RuntimeError( - f"Shell subprocesses yielded nonzero error code {process.returncode} " - f"for command {str_r_clone_sync}\n{r_clone_sync_output}" - ) diff --git a/services/agent/src/simcore_service_agent/services/__init__.py b/services/agent/src/simcore_service_agent/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/agent/src/simcore_service_agent/services/backup.py b/services/agent/src/simcore_service_agent/services/backup.py new file mode 100644 index 00000000000..a7e125af0c4 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/backup.py @@ -0,0 +1,201 @@ +import asyncio +import logging +import tempfile +from asyncio.streams import StreamReader +from pathlib import Path +from textwrap import dedent +from typing import Final +from uuid import uuid4 + +from fastapi import FastAPI +from settings_library.utils_r_clone import resolve_provider + +from ..core.settings import ApplicationSettings +from ..models.volumes import DynamicServiceVolumeLabels, VolumeDetails + +_logger = logging.getLogger(__name__) + + +_R_CLONE_CONFIG: Final[ + str +] = """ +[dst] +type = s3 +provider = {destination_provider} +access_key_id = {destination_access_key} +secret_access_key = {destination_secret_key} +endpoint = {destination_endpoint} +region = {destination_region} +acl = private +""" + + +def _get_config_file_path(settings: ApplicationSettings) -> Path: + config_content = _R_CLONE_CONFIG.format( + destination_provider=resolve_provider( + settings.AGENT_VOLUMES_CLEANUP_S3_PROVIDER + ), + destination_access_key=settings.AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY, + destination_secret_key=settings.AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY, + destination_endpoint=settings.AGENT_VOLUMES_CLEANUP_S3_ENDPOINT, + destination_region=settings.AGENT_VOLUMES_CLEANUP_S3_REGION, + ) + conf_path = Path(tempfile.gettempdir()) / f"rclone_config_{uuid4()}.ini" + conf_path.write_text(config_content) + return conf_path + + +def _get_s3_path(s3_bucket: str, labels: DynamicServiceVolumeLabels) -> Path: + return ( + Path(s3_bucket) + / labels.swarm_stack_name + / f"{labels.study_id}" + / f"{labels.node_uuid}" + / labels.run_id + / labels.directory_name + ) + + +async def _read_stream(stream: StreamReader) -> str: + output = "" + while line := await stream.readline(): + message = line.decode() + output += message + _logger.debug(message.strip("\n")) + return output + + +def _get_r_clone_str_command(command: list[str], exclude_files: list[str]) -> str: + # add files to be ignored + for to_exclude in exclude_files: + command.append("--exclude") + command.append(to_exclude) + + str_command = " ".join(command) + _logger.info(str_command) + return str_command + + +def _log_expected_operation( + labels: DynamicServiceVolumeLabels, + s3_path: Path, + r_clone_ls_output: str, + volume_name: str, +) -> None: + """ + This message will be logged as warning if any files will be synced + """ + log_level = logging.INFO if r_clone_ls_output.strip() == "" else logging.WARNING + + formatted_message = dedent( + f""" + --- + Volume data + --- + volume_name {volume_name} + destination_path {s3_path} + study_id: {labels.study_id} + node_id: {labels.node_uuid} + user_id: {labels.user_id} + run_id: {labels.run_id} + --- + Files to sync by rclone + ---\n{r_clone_ls_output.rstrip()} + --- + """ + ) + _logger.log(log_level, formatted_message) + + +async def _store_in_s3( + settings: ApplicationSettings, volume_name: str, volume_details: VolumeDetails +) -> None: + exclude_files = settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES + + config_file_path = _get_config_file_path(settings) + + source_dir = volume_details.mountpoint + if not Path(source_dir).exists(): + _logger.info( + "Volume mountpoint %s does not exist. Skipping backup, volume %s will be removed.", + source_dir, + volume_name, + ) + return + + s3_path = _get_s3_path( + settings.AGENT_VOLUMES_CLEANUP_S3_BUCKET, volume_details.labels + ) + + # listing files rclone will sync + r_clone_ls = [ + "rclone", + "--config", + f"{config_file_path}", + "ls", + f"{source_dir}", + ] + process = await asyncio.create_subprocess_shell( + _get_r_clone_str_command(r_clone_ls, exclude_files), + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + + assert process.stdout # nosec + r_clone_ls_output = await _read_stream(process.stdout) + await process.wait() + _log_expected_operation( + volume_details.labels, s3_path, r_clone_ls_output, volume_name + ) + + # sync files via rclone + r_clone_sync = [ + "rclone", + "--config", + f"{config_file_path}", + "--low-level-retries", + "3", + "--retries", + f"{settings.AGENT_VOLUMES_CLEANUP_RETRIES}", + "--transfers", + f"{settings.AGENT_VOLUMES_CLEANUP_PARALLELISM}", + # below two options reduce to a minimum the memory footprint + # https://forum.rclone.org/t/how-to-set-a-memory-limit/10230/4 + "--buffer-size", # docs https://rclone.org/docs/#buffer-size-size + "0M", + "--stats", + "5s", + "--stats-one-line", + "sync", + f"{source_dir}", + f"dst:{s3_path}", + "--verbose", + ] + + str_r_clone_sync = _get_r_clone_str_command(r_clone_sync, exclude_files) + process = await asyncio.create_subprocess_shell( + str_r_clone_sync, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + ) + + assert process.stdout # nosec + r_clone_sync_output = await _read_stream(process.stdout) + await process.wait() + _logger.info("Sync result:\n%s", r_clone_sync_output) + + if process.returncode != 0: + msg = ( + f"Shell subprocesses yielded nonzero error code {process.returncode} " + f"for command {str_r_clone_sync}\n{r_clone_sync_output}" + ) + raise RuntimeError(msg) + + +async def backup_volume( + app: FastAPI, volume_details: VolumeDetails, volume_name: str +) -> None: + settings: ApplicationSettings = app.state.settings + await _store_in_s3( + settings=settings, volume_name=volume_name, volume_details=volume_details + ) diff --git a/services/agent/src/simcore_service_agent/services/containers_manager.py b/services/agent/src/simcore_service_agent/services/containers_manager.py new file mode 100644 index 00000000000..ca2317e156e --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/containers_manager.py @@ -0,0 +1,71 @@ +import logging +from dataclasses import dataclass, field + +from aiodocker import Docker +from fastapi import FastAPI +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_PROXY_SERVICE_PREFIX, + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) +from models_library.projects_nodes_io import NodeID +from servicelib.fastapi.app_state import SingletonInAppStateMixin + +from .docker_utils import get_containers_with_prefixes, remove_container_forcefully + +_logger = logging.getLogger(__name__) + + +@dataclass +class ContainersManager(SingletonInAppStateMixin): + app_state_name: str = "containers_manager" + + docker: Docker = field(default_factory=Docker) + + async def force_container_cleanup(self, node_id: NodeID) -> None: + # compose all possible used container prefixes + proxy_prefix = f"{DYNAMIC_PROXY_SERVICE_PREFIX}_{node_id}" + dy_sidecar_prefix = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_id}" + user_service_prefix = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}-{node_id}" + + orphan_containers = await get_containers_with_prefixes( + self.docker, {proxy_prefix, dy_sidecar_prefix, user_service_prefix} + ) + _logger.debug( + "Detected orphan containers for node_id='%s': %s", + node_id, + orphan_containers, + ) + + unexpected_orphans = { + orphan + for orphan in orphan_containers + if orphan.startswith(user_service_prefix) + } + if unexpected_orphans: + _logger.warning( + "Unexpected orphans detected for node_id='%s': %s", + node_id, + unexpected_orphans, + ) + + # avoids parallel requests to docker engine + for container in orphan_containers: + await remove_container_forcefully(self.docker, container) + + async def shutdown(self) -> None: + await self.docker.close() + + +def get_containers_manager(app: FastAPI) -> ContainersManager: + return ContainersManager.get_from_app_state(app) + + +def setup_containers_manager(app: FastAPI) -> None: + async def _on_startup() -> None: + ContainersManager().set_to_app_state(app) + + async def _on_shutdown() -> None: + await ContainersManager.get_from_app_state(app).shutdown() + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/agent/src/simcore_service_agent/services/docker_utils.py b/services/agent/src/simcore_service_agent/services/docker_utils.py new file mode 100644 index 00000000000..1390a5b12df --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/docker_utils.py @@ -0,0 +1,132 @@ +import logging +from collections.abc import Iterator +from contextlib import contextmanager +from typing import Final + +from aiodocker import DockerError +from aiodocker.docker import Docker +from aiodocker.volumes import DockerVolume +from fastapi import FastAPI +from models_library.api_schemas_directorv2.services import ( + CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME, +) +from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES +from servicelib.logging_utils import log_catch, log_context +from simcore_service_agent.core.settings import ApplicationSettings +from starlette import status + +from ..models.volumes import VolumeDetails, VolumeDetailsAdapter +from .backup import backup_volume +from .instrumentation import get_instrumentation + +_logger = logging.getLogger(__name__) + + +def _reverse_string(to_reverse: str) -> str: + return to_reverse[::-1] + + +_VOLUMES_NOT_TO_BACKUP: Final[tuple[str, ...]] = ( + _reverse_string("inputs"), + _reverse_string("shared-store"), +) + + +def _does_volume_require_backup(volume_name: str) -> bool: + # from `dyv_1726228407_891aa1a7-eb31-459f-8aed-8c902f5f5fb0_dd84f39e-7154-4a13-ba1d-50068d723104_stupni_www_` + # retruns `stupni_www_` + inverse_name_part = volume_name[CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME:] + return not inverse_name_part.startswith(_VOLUMES_NOT_TO_BACKUP) + + +async def get_unused_dynamc_sidecar_volumes(docker: Docker) -> set[str]: + """Returns all volumes unused by sidecars""" + volumes = await docker.volumes.list() + all_volumes: set[str] = {volume["Name"] for volume in volumes["Volumes"]} + + containers = await docker.containers.list(all=True) + + used_volumes: set[str] = set() + for container in containers: + container_info = await container.show() + mounts = container_info.get("Mounts", []) + for mount in mounts: + if mount["Type"] == "volume": + used_volumes.add(mount["Name"]) + + unused_volumes = all_volumes - used_volumes + return {v for v in unused_volumes if v.startswith(PREFIX_DYNAMIC_SIDECAR_VOLUMES)} + + +async def get_volume_details(docker: Docker, *, volume_name: str) -> VolumeDetails: + volume_details = await DockerVolume(docker, volume_name).show() + return VolumeDetailsAdapter.validate_python(volume_details) + + +@contextmanager +def _log_volume_not_found(volume_name: str) -> Iterator[None]: + try: + yield + except DockerError as e: + if e.status == status.HTTP_404_NOT_FOUND: + _logger.info("Volume not found '%s'", volume_name) + else: + raise + + +async def _backup_volume(app: FastAPI, docker: Docker, *, volume_name: str) -> None: + """Backs up only volumes which require a backup""" + if _does_volume_require_backup(volume_name): + with log_context( + _logger, logging.INFO, f"backup '{volume_name}'", log_duration=True + ): + volume_details = await get_volume_details(docker, volume_name=volume_name) + settings: ApplicationSettings = app.state.settings + get_instrumentation(app).agent_metrics.backedup_volumes( + settings.AGENT_DOCKER_NODE_ID + ) + await backup_volume(app, volume_details, volume_name) + else: + _logger.debug("No backup is required for '%s'", volume_name) + + +async def remove_volume( + app: FastAPI, docker: Docker, *, volume_name: str, requires_backup: bool +) -> None: + """Removes a volume and backs data up if required""" + with log_context( + _logger, logging.DEBUG, f"removing '{volume_name}'", log_duration=True + ), log_catch(_logger, reraise=False), _log_volume_not_found(volume_name): + if requires_backup: + await _backup_volume(app, docker, volume_name=volume_name) + + await DockerVolume(docker, volume_name).delete() + + settings: ApplicationSettings = app.state.settings + get_instrumentation(app).agent_metrics.remove_volumes( + settings.AGENT_DOCKER_NODE_ID + ) + + +async def get_containers_with_prefixes(docker: Docker, prefixes: set[str]) -> set[str]: + """Returns a set of container names matching any of the given prefixes""" + all_containers = await docker.containers.list(all=True) + + result: set[str] = set() + for container in all_containers: + container_info = await container.show() + container_name = container_info.get("Name", "").lstrip("/") + if any(container_name.startswith(prefix) for prefix in prefixes): + result.add(container_name) + + return result + + +async def remove_container_forcefully(docker: Docker, container_id: str) -> None: + """Removes a container regardless of it's state""" + try: + container = await docker.containers.get(container_id) + await container.delete(force=True) + except DockerError as e: + if e.status != status.HTTP_404_NOT_FOUND: + raise diff --git a/services/agent/src/simcore_service_agent/services/instrumentation/__init__.py b/services/agent/src/simcore_service_agent/services/instrumentation/__init__.py new file mode 100644 index 00000000000..49d7b66b079 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/instrumentation/__init__.py @@ -0,0 +1,6 @@ +from ._setup import get_instrumentation, setup_instrumentation + +__all__: tuple[str, ...] = ( + "get_instrumentation", + "setup_instrumentation", +) diff --git a/services/agent/src/simcore_service_agent/services/instrumentation/_models.py b/services/agent/src/simcore_service_agent/services/instrumentation/_models.py new file mode 100644 index 00000000000..2c49859e897 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/instrumentation/_models.py @@ -0,0 +1,53 @@ +from dataclasses import dataclass, field +from typing import Final + +from models_library.docker import DockerNodeID +from prometheus_client import CollectorRegistry, Counter +from servicelib.instrumentation import MetricsBase, get_metrics_namespace + +from ..._meta import APP_NAME + +_METRICS_NAMESPACE: Final[str] = get_metrics_namespace(APP_NAME) +_LABELS_COUNTERS: Final[tuple[str, ...]] = ("docker_node_id",) + + +@dataclass(slots=True, kw_only=True) +class AgentMetrics(MetricsBase): + volumes_removed: Counter = field(init=False) + volumes_backedup: Counter = field(init=False) + + def __post_init__(self) -> None: + self.volumes_removed = Counter( + "volumes_removed_total", + "Number of removed volumes by the agent", + labelnames=_LABELS_COUNTERS, + namespace=_METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + + self.volumes_backedup = Counter( + "volumes_backedup_total", + "Number of removed volumes who's content was uplaoded by the agent", + labelnames=_LABELS_COUNTERS, + namespace=_METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + + def remove_volumes(self, docker_node_id: DockerNodeID) -> None: + self.volumes_removed.labels(docker_node_id=docker_node_id).inc() + + def backedup_volumes(self, docker_node_id: DockerNodeID) -> None: + self.volumes_backedup.labels(docker_node_id=docker_node_id).inc() + + +@dataclass(slots=True, kw_only=True) +class AgentInstrumentation: + registry: CollectorRegistry + agent_metrics: AgentMetrics = field(init=False) + + def __post_init__(self) -> None: + self.agent_metrics = AgentMetrics( # pylint: disable=unexpected-keyword-arg + subsystem="agent", registry=self.registry + ) diff --git a/services/agent/src/simcore_service_agent/services/instrumentation/_setup.py b/services/agent/src/simcore_service_agent/services/instrumentation/_setup.py new file mode 100644 index 00000000000..0f57e5be288 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/instrumentation/_setup.py @@ -0,0 +1,28 @@ +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) +from simcore_service_agent.core.settings import ApplicationSettings + +from ._models import AgentInstrumentation + + +def setup_instrumentation(app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + if not settings.AGENT_PROMETHEUS_INSTRUMENTATION_ENABLED: + return + + registry = setup_prometheus_instrumentation(app) + + async def on_startup() -> None: + app.state.instrumentation = AgentInstrumentation(registry=registry) + + app.add_event_handler("startup", on_startup) + + +def get_instrumentation(app: FastAPI) -> AgentInstrumentation: + assert ( + app.state.instrumentation + ), "Instrumentation not setup. Please check the configuration" # nosec + instrumentation: AgentInstrumentation = app.state.instrumentation + return instrumentation diff --git a/services/agent/src/simcore_service_agent/services/rabbitmq.py b/services/agent/src/simcore_service_agent/services/rabbitmq.py new file mode 100644 index 00000000000..3c548fb0b24 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/rabbitmq.py @@ -0,0 +1,29 @@ +from typing import cast + +from fastapi import FastAPI +from servicelib.rabbitmq import RabbitMQRPCClient, wait_till_rabbitmq_responsive +from settings_library.rabbit import RabbitSettings + + +def setup_rabbitmq(app: FastAPI) -> None: + settings: RabbitSettings = app.state.settings.AGENT_RABBITMQ + app.state.rabbitmq_rpc_server = None + + async def _on_startup() -> None: + await wait_till_rabbitmq_responsive(settings.dsn) + + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="dynamic_scheduler_rpc_server", settings=settings + ) + + async def _on_shutdown() -> None: + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) diff --git a/services/agent/src/simcore_service_agent/services/volumes_manager.py b/services/agent/src/simcore_service_agent/services/volumes_manager.py new file mode 100644 index 00000000000..860ab86d0e2 --- /dev/null +++ b/services/agent/src/simcore_service_agent/services/volumes_manager.py @@ -0,0 +1,189 @@ +import logging +from asyncio import Lock, Task +from dataclasses import dataclass, field +from datetime import datetime, timedelta +from typing import Final + +import arrow +from aiodocker.docker import Docker +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from pydantic import NonNegativeFloat +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.logging_utils import log_context +from servicelib.rabbitmq.rpc_interfaces.agent.errors import ( + NoServiceVolumesFoundRPCError, +) +from tenacity import AsyncRetrying, before_sleep_log, stop_after_delay, wait_fixed + +from ..core.settings import ApplicationSettings +from .docker_utils import get_unused_dynamc_sidecar_volumes, remove_volume + +_logger = logging.getLogger(__name__) + +_WAIT_FOR_UNUSED_SERVICE_VOLUMES: Final[timedelta] = timedelta(minutes=1) + + +@dataclass +class VolumesManager( # pylint:disable=too-many-instance-attributes + SingletonInAppStateMixin +): + app: FastAPI + book_keeping_interval: timedelta + volume_cleanup_interval: timedelta + remove_volumes_inactive_for: NonNegativeFloat + + docker: Docker = field(default_factory=Docker) + removal_lock: Lock = field(default_factory=Lock) + + _task_bookkeeping: Task | None = None + _unused_volumes: dict[str, datetime] = field(default_factory=dict) + + _task_periodic_volume_cleanup: Task | None = None + + app_state_name: str = "volumes_manager" + + async def setup(self) -> None: + self._task_bookkeeping = create_periodic_task( + self._bookkeeping_task, + interval=self.book_keeping_interval, + task_name="volumes bookkeeping", + ) + self._task_periodic_volume_cleanup = create_periodic_task( + self._periodic_volume_cleanup_task, + interval=self.volume_cleanup_interval, + task_name="volume cleanup", + ) + + async def shutdown(self) -> None: + await self.docker.close() + + if self._task_bookkeeping: + await cancel_wait_task(self._task_bookkeeping) + + if self._task_periodic_volume_cleanup: + await cancel_wait_task(self._task_periodic_volume_cleanup) + + async def _bookkeeping_task(self) -> None: + with log_context(_logger, logging.DEBUG, "volume bookkeeping"): + current_unused_volumes = await get_unused_dynamc_sidecar_volumes( + self.docker + ) + old_unused_volumes = set(self._unused_volumes.keys()) + + # remove + to_remove = old_unused_volumes - current_unused_volumes + for volume in to_remove: + self._unused_volumes.pop(volume, None) + + # volumes which have just been detected as inactive + to_add = current_unused_volumes - old_unused_volumes + for volume in to_add: + self._unused_volumes[volume] = arrow.utcnow().datetime + + async def _remove_volume_safe( + self, *, volume_name: str, requires_backup: bool + ) -> None: + # NOTE: to avoid race conditions only one volume can be removed + # also avoids issues with accessing the docker API in parallel + async with self.removal_lock: + await remove_volume( + self.app, + self.docker, + volume_name=volume_name, + requires_backup=requires_backup, + ) + + async def _periodic_volume_cleanup_task(self) -> None: + with log_context(_logger, logging.DEBUG, "volume cleanup"): + volumes_to_remove: set[str] = set() + for volume_name, inactive_since in self._unused_volumes.items(): + volume_inactive_sicne = ( + arrow.utcnow().datetime - inactive_since + ).total_seconds() + if volume_inactive_sicne > self.remove_volumes_inactive_for: + volumes_to_remove.add(volume_name) + + for volume in volumes_to_remove: + await self._remove_volume_safe(volume_name=volume, requires_backup=True) + + async def _wait_for_service_volumes_to_become_unused( + self, node_id: NodeID + ) -> set[str]: + # NOTE: it usually takes a few seconds for volumes to become unused, + # if agent does not wait for this operation to finish, + # volumes will be removed and backed up by the background task + # causing unncecessary data transfer to S3 + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(_WAIT_FOR_UNUSED_SERVICE_VOLUMES.total_seconds()), + wait=wait_fixed(1), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + ): + with attempt: + current_unused_volumes = await get_unused_dynamc_sidecar_volumes( + self.docker + ) + + service_volumes = { + v for v in current_unused_volumes if f"{node_id}" in v + } + _logger.debug( + "service %s found volumes to remove: %s", node_id, service_volumes + ) + if len(service_volumes) == 0: + raise NoServiceVolumesFoundRPCError( + period=_WAIT_FOR_UNUSED_SERVICE_VOLUMES.total_seconds(), + node_id=node_id, + ) + + return service_volumes + + async def remove_service_volumes(self, node_id: NodeID) -> None: + # bookkept volumes might not be up to date + service_volumes = await self._wait_for_service_volumes_to_become_unused(node_id) + _logger.debug( + "will remove volumes for %s from service_volumes=%s", + node_id, + service_volumes, + ) + + for volume_name in service_volumes: + # volumes already saved to S3 by the sidecar and no longer require backup + await self._remove_volume_safe( + volume_name=volume_name, requires_backup=False + ) + + async def remove_all_volumes(self) -> None: + # bookkept volumes might not be up to date + current_unused_volumes = await get_unused_dynamc_sidecar_volumes(self.docker) + + with log_context(_logger, logging.INFO, "remove all volumes"): + for volume in current_unused_volumes: + await self._remove_volume_safe(volume_name=volume, requires_backup=True) + + +def get_volumes_manager(app: FastAPI) -> VolumesManager: + return VolumesManager.get_from_app_state(app) + + +def setup_volume_manager(app: FastAPI) -> None: + async def _on_startup() -> None: + settings: ApplicationSettings = app.state.settings + + volumes_manager = VolumesManager( + app=app, + book_keeping_interval=settings.AGENT_VOLUMES_CLEANUP_BOOK_KEEPING_INTERVAL, + volume_cleanup_interval=settings.AGENT_VOLUMES_CLEANUP_INTERVAL, + remove_volumes_inactive_for=settings.AGENT_VOLUMES_CLEANUP_REMOVE_VOLUMES_INACTIVE_FOR.total_seconds(), + ) + volumes_manager.set_to_app_state(app) + await volumes_manager.setup() + + async def _on_shutdown() -> None: + await VolumesManager.get_from_app_state(app).shutdown() + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/agent/tests/conftest.py b/services/agent/tests/conftest.py index 14d6a5d146e..97df58d4e5a 100644 --- a/services/agent/tests/conftest.py +++ b/services/agent/tests/conftest.py @@ -1,196 +1,72 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument -import logging -from pathlib import Path -from typing import AsyncIterator, Iterable, Iterator -from uuid import uuid4 -import aiodocker import pytest -import simcore_service_agent -from aiodocker.volumes import DockerVolume +from faker import Faker from models_library.basic_types import BootModeEnum +from models_library.docker import DockerNodeID from moto.server import ThreadedMotoServer -from pydantic import HttpUrl, parse_obj_as -from pytest import LogCaptureFixture, MonkeyPatch +from pydantic import HttpUrl, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict from settings_library.r_clone import S3Provider -from simcore_service_agent.core.settings import ApplicationSettings - -pytestmark = pytest.mark.asyncio pytest_plugins = [ - "pytest_simcore.aws_services", + "pytest_simcore.aws_server", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.rabbit_service", "pytest_simcore.repository_paths", ] -@pytest.fixture(scope="session") -def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: - # fixtures in pytest_simcore.environs - service_folder = osparc_simcore_root_dir / "services" / "agent" - assert service_folder.exists() - assert any(service_folder.glob("src/simcore_service_agent")) - return service_folder - - -@pytest.fixture(scope="session") -def installed_package_dir() -> Path: - dirpath = Path(simcore_service_agent.__file__).resolve().parent - assert dirpath.exists() - return dirpath - - @pytest.fixture def swarm_stack_name() -> str: return "test-simcore" @pytest.fixture -def study_id() -> str: - return f"{uuid4()}" - - -@pytest.fixture -def node_uuid() -> str: - return f"{uuid4()}" - - -@pytest.fixture -def run_id() -> str: - return f"{uuid4()}" - - -@pytest.fixture -def bucket() -> str: - return f"test-bucket-{uuid4()}" - - -@pytest.fixture -def used_volume_path(tmp_path: Path) -> Path: - return tmp_path / "used_volume" +def docker_node_id() -> DockerNodeID: + return TypeAdapter(DockerNodeID).validate_python("testnodeid") @pytest.fixture -def unused_volume_path(tmp_path: Path) -> Path: - return tmp_path / "unused_volume" - - -def _get_source(run_id: str, node_uuid: str, volume_path: Path) -> str: - reversed_path = f"{volume_path}"[::-1].replace("/", "_") - return f"dyv_{run_id}_{node_uuid}_{reversed_path}" +def bucket(faker: Faker) -> str: + return f"test-bucket-{faker.uuid4()}" @pytest.fixture -async def unused_volume( - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, - unused_volume_path: Path, -) -> AsyncIterator[DockerVolume]: - async with aiodocker.Docker() as docker_client: - source = _get_source(run_id, node_uuid, unused_volume_path) - volume = await docker_client.volumes.create( - { - "Name": source, - "Labels": { - "node_uuid": node_uuid, - "run_id": run_id, - "source": source, - "study_id": study_id, - "swarm_stack_name": swarm_stack_name, - "user_id": "1", - }, - } - ) - - # attach to volume and create some files!!! - - yield volume - - try: - await volume.delete() - except aiodocker.DockerError: - pass - - -@pytest.fixture -async def used_volume( - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, - used_volume_path: Path, -) -> AsyncIterator[DockerVolume]: - async with aiodocker.Docker() as docker_client: - source = _get_source(run_id, node_uuid, used_volume_path) - volume = await docker_client.volumes.create( - { - "Name": source, - "Labels": { - "node_uuid": node_uuid, - "run_id": run_id, - "source": source, - "study_id": study_id, - "swarm_stack_name": swarm_stack_name, - "user_id": "1", - }, - } - ) - - container = await docker_client.containers.run( - config={ - "Cmd": ["/bin/ash", "-c", "sleep 10000"], - "Image": "alpine:latest", - "HostConfig": {"Binds": [f"{volume.name}:{used_volume_path}"]}, - }, - name=f"using_volume_{volume.name}", - ) - await container.start() - - yield volume - - await container.delete(force=True) - await volume.delete() - - -@pytest.fixture -def env( - monkeypatch: MonkeyPatch, +def mock_environment( + monkeypatch: pytest.MonkeyPatch, mocked_s3_server_url: HttpUrl, bucket: str, swarm_stack_name: str, -) -> None: - mock_dict = { - "LOGLEVEL": "DEBUG", - "SC_BOOT_MODE": BootModeEnum.DEBUG, - "AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME": swarm_stack_name, - "AGENT_VOLUMES_CLEANUP_S3_ENDPOINT": mocked_s3_server_url, - "AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY": "xxx", - "AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY": "xxx", - "AGENT_VOLUMES_CLEANUP_S3_BUCKET": bucket, - "AGENT_VOLUMES_CLEANUP_S3_PROVIDER": S3Provider.MINIO, - } - for key, value in mock_dict.items(): - monkeypatch.setenv(key, value) - - -@pytest.fixture -def settings(env: None) -> ApplicationSettings: - return ApplicationSettings.create_from_envs() - - -@pytest.fixture() -def caplog_info_debug(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]: - with caplog.at_level(logging.DEBUG): - yield caplog + docker_node_id: DockerNodeID, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "LOGLEVEL": "DEBUG", + "SC_BOOT_MODE": BootModeEnum.DEBUG, + "AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME": swarm_stack_name, + "AGENT_VOLUMES_CLEANUP_S3_ENDPOINT": f"{mocked_s3_server_url}", + "AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY": "xxx", + "AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY": "xxx", + "AGENT_VOLUMES_CLEANUP_S3_BUCKET": bucket, + "AGENT_VOLUMES_CLEANUP_S3_PROVIDER": S3Provider.MINIO, + "RABBIT_HOST": "test", + "RABBIT_PASSWORD": "test", + "RABBIT_SECURE": "false", + "RABBIT_USER": "test", + "AGENT_DOCKER_NODE_ID": docker_node_id, + "AGENT_TRACING": "null", + }, + ) @pytest.fixture(scope="module") -def mocked_s3_server_url(mocked_s3_server: ThreadedMotoServer) -> Iterator[HttpUrl]: +def mocked_s3_server_url(mocked_aws_server: ThreadedMotoServer) -> HttpUrl: # pylint: disable=protected-access - endpoint_url = parse_obj_as( - HttpUrl, f"http://{mocked_s3_server._ip_address}:{mocked_s3_server._port}" + return TypeAdapter(HttpUrl).validate_python( + f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # noqa: SLF001 ) - yield endpoint_url diff --git a/services/agent/tests/unit/conftest.py b/services/agent/tests/unit/conftest.py new file mode 100644 index 00000000000..4b23619f5a0 --- /dev/null +++ b/services/agent/tests/unit/conftest.py @@ -0,0 +1,144 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable +from contextlib import suppress +from pathlib import Path +from uuid import uuid4 + +import aiodocker +import pytest +from aiodocker.containers import DockerContainer +from aiodocker.volumes import DockerVolume +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from fastapi.testclient import TestClient +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from settings_library.rabbit import RabbitSettings +from simcore_service_agent.core.application import create_app +from utils import VOLUMES_TO_CREATE, get_source + + +@pytest.fixture +def service_env( + monkeypatch: pytest.MonkeyPatch, + mock_environment: EnvVarsDict, + rabbit_service: RabbitSettings, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **mock_environment, + "RABBIT_HOST": rabbit_service.RABBIT_HOST, + "RABBIT_PASSWORD": rabbit_service.RABBIT_PASSWORD.get_secret_value(), + "RABBIT_PORT": f"{rabbit_service.RABBIT_PORT}", + "RABBIT_SECURE": f"{rabbit_service.RABBIT_SECURE}", + "RABBIT_USER": rabbit_service.RABBIT_USER, + }, + ) + + +@pytest.fixture +async def initialized_app(service_env: EnvVarsDict) -> AsyncIterator[FastAPI]: + app: FastAPI = create_app() + + async with LifespanManager(app): + yield app + + +@pytest.fixture +def test_client(initialized_app: FastAPI) -> TestClient: + return TestClient(initialized_app) + + +@pytest.fixture +def service_run_id() -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_dynamic() + + +@pytest.fixture +def project_id() -> ProjectID: + return uuid4() + + +@pytest.fixture +def user_id() -> UserID: + return 1 + + +@pytest.fixture +def volumes_path(tmp_path: Path) -> Path: + return tmp_path / "volumes" + + +@pytest.fixture +async def create_dynamic_sidecar_volume( + service_run_id: ServiceRunID, + project_id: ProjectID, + swarm_stack_name: str, + user_id: UserID, + volumes_path: Path, +) -> AsyncIterable[Callable[[NodeID, bool, str], Awaitable[str]]]: + volumes_to_cleanup: list[DockerVolume] = [] + containers_to_cleanup: list[DockerContainer] = [] + + async with aiodocker.Docker() as docker_client: + + async def _(node_id: NodeID, in_use: bool, volume_name: str) -> str: + source = get_source(service_run_id, node_id, volumes_path / volume_name) + volume = await docker_client.volumes.create( + { + "Name": source, + "Labels": { + "node_uuid": f"{node_id}", + "run_id": service_run_id, + "source": source, + "study_id": f"{project_id}", + "swarm_stack_name": swarm_stack_name, + "user_id": f"{user_id}", + }, + } + ) + volumes_to_cleanup.append(volume) + + if in_use: + container = await docker_client.containers.run( + config={ + "Cmd": ["/bin/ash", "-c", "sleep 10000"], + "Image": "alpine:latest", + "HostConfig": {"Binds": [f"{volume.name}:{volumes_path}"]}, + }, + name=f"using_volume_{volume.name}", + ) + await container.start() + containers_to_cleanup.append(container) + + return source + + yield _ + + for container in containers_to_cleanup: + with suppress(aiodocker.DockerError): + await container.delete(force=True) + for volume in volumes_to_cleanup: + with suppress(aiodocker.DockerError): + await volume.delete() + + +@pytest.fixture +def create_dynamic_sidecar_volumes( + create_dynamic_sidecar_volume: Callable[[NodeID, bool, str], Awaitable[str]] +) -> Callable[[NodeID, bool], Awaitable[set[str]]]: + async def _(node_id: NodeID, in_use: bool) -> set[str]: + volume_names: set[str] = set() + for volume_name in VOLUMES_TO_CREATE: + name = await create_dynamic_sidecar_volume(node_id, in_use, volume_name) + volume_names.add(name) + + return volume_names + + return _ diff --git a/services/agent/tests/unit/test_api_rest__health.py b/services/agent/tests/unit/test_api_rest__health.py new file mode 100644 index 00000000000..9f0904c182e --- /dev/null +++ b/services/agent/tests/unit/test_api_rest__health.py @@ -0,0 +1,17 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name + + +from fastapi import status +from fastapi.testclient import TestClient +from models_library.api_schemas__common.health import HealthCheckGet + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +def test_health_ok(test_client: TestClient): + response = test_client.get("/health") + assert response.status_code == status.HTTP_200_OK + assert HealthCheckGet.model_validate(response.json()) diff --git a/services/agent/tests/unit/test_api_rpc__containers.py b/services/agent/tests/unit/test_api_rpc__containers.py new file mode 100644 index 00000000000..201acf5d218 --- /dev/null +++ b/services/agent/tests/unit/test_api_rpc__containers.py @@ -0,0 +1,55 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Awaitable, Callable +from unittest.mock import AsyncMock + +import pytest +import pytest_mock +from faker import Faker +from fastapi import FastAPI +from models_library.docker import DockerNodeID +from models_library.projects_nodes_io import NodeID +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.agent import containers + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +async def rpc_client( + initialized_app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") + + +@pytest.fixture +def mocked_force_container_cleanup(mocker: pytest_mock.MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_agent.services.containers_manager.ContainersManager.force_container_cleanup" + ) + + +async def test_force_container_cleanup( + rpc_client: RabbitMQRPCClient, + swarm_stack_name: str, + docker_node_id: DockerNodeID, + node_id: NodeID, + mocked_force_container_cleanup: AsyncMock, +): + assert mocked_force_container_cleanup.call_count == 0 + await containers.force_container_cleanup( + rpc_client, + docker_node_id=docker_node_id, + swarm_stack_name=swarm_stack_name, + node_id=node_id, + ) + assert mocked_force_container_cleanup.call_count == 1 diff --git a/services/agent/tests/unit/test_api_rpc__volumes.py b/services/agent/tests/unit/test_api_rpc__volumes.py new file mode 100644 index 00000000000..6e7eeb76485 --- /dev/null +++ b/services/agent/tests/unit/test_api_rpc__volumes.py @@ -0,0 +1,68 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Awaitable, Callable +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +import pytest_mock +from fastapi import FastAPI +from models_library.docker import DockerNodeID +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.agent import volumes + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +async def rpc_client( + initialized_app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") + + +@pytest.fixture +def mocked_remove_service_volumes(mocker: pytest_mock.MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_agent.services.volumes_manager.VolumesManager.remove_service_volumes" + ) + + +@pytest.fixture +def mocked_remove_all_volumes(mocker: pytest_mock.MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_agent.services.volumes_manager.VolumesManager.remove_all_volumes" + ) + + +async def test_backup_and_remove_volumes_for_all_services( + rpc_client: RabbitMQRPCClient, + swarm_stack_name: str, + docker_node_id: DockerNodeID, + mocked_remove_all_volumes: AsyncMock, +): + assert mocked_remove_all_volumes.call_count == 0 + await volumes.backup_and_remove_volumes_for_all_services( + rpc_client, docker_node_id=docker_node_id, swarm_stack_name=swarm_stack_name + ) + assert mocked_remove_all_volumes.call_count == 1 + + +async def test_remove_volumes_without_backup_for_service( + rpc_client: RabbitMQRPCClient, + swarm_stack_name: str, + docker_node_id: str, + mocked_remove_service_volumes: AsyncMock, +): + assert mocked_remove_service_volumes.call_count == 0 + await volumes.remove_volumes_without_backup_for_service( + rpc_client, + docker_node_id=docker_node_id, + swarm_stack_name=swarm_stack_name, + node_id=uuid4(), + ) + assert mocked_remove_service_volumes.call_count == 1 diff --git a/services/agent/tests/unit/test_cli.py b/services/agent/tests/unit/test_cli.py index 0eff80574b5..a205dadb47b 100644 --- a/services/agent/tests/unit/test_cli.py +++ b/services/agent/tests/unit/test_cli.py @@ -5,12 +5,13 @@ import pytest from click.testing import Result +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict from simcore_service_agent.cli import main from typer.testing import CliRunner @pytest.fixture -def cli_runner() -> CliRunner: +def cli_runner(mock_environment: EnvVarsDict) -> CliRunner: return CliRunner() @@ -20,7 +21,15 @@ def _format_cli_error(result: Result) -> str: return f"Below exception was raised by the cli:\n{tb_message}" -def test_process_settings(env: None, cli_runner: CliRunner): - result = cli_runner.invoke(main, []) +def test_process_cli_options(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["--help"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + result = cli_runner.invoke(main, ["settings"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + result = cli_runner.invoke(main, ["--version"]) print(result.stdout) assert result.exit_code == 0, _format_cli_error(result) diff --git a/services/agent/tests/unit/test_core_routes.py b/services/agent/tests/unit/test_core_routes.py deleted file mode 100644 index 1fd0252d1aa..00000000000 --- a/services/agent/tests/unit/test_core_routes.py +++ /dev/null @@ -1,58 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=protected-access - -from time import time -from typing import AsyncIterator - -import pytest -from fastapi import FastAPI, status -from fastapi.testclient import TestClient -from simcore_service_agent.core.application import create_app -from simcore_service_agent.modules.task_monitor import TaskMonitor - - -@pytest.fixture -async def initialized_app() -> AsyncIterator[FastAPI]: - app: FastAPI = create_app() - - await app.router.startup() - yield app - await app.router.shutdown() - - -@pytest.fixture -def test_client(initialized_app: FastAPI) -> TestClient: - return TestClient(initialized_app) - - -def test_health_ok(env: None, test_client: TestClient): - response = test_client.get("/health") - assert response.status_code == status.HTTP_200_OK - assert response.json() == None - - -def test_health_fails_not_started( - env: None, initialized_app: FastAPI, test_client: TestClient -): - task_monitor: TaskMonitor = initialized_app.state.task_monitor - # emulate monitor not being started - task_monitor._was_started = False - - response = test_client.get("/health") - assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - assert response.json() == {"detail": "unhealthy"} - - -def test_health_fails_hanging_tasks( - env: None, initialized_app: FastAPI, test_client: TestClient -): - task_monitor: TaskMonitor = initialized_app.state.task_monitor - - # emulate tasks hanging - for task_data in task_monitor._to_start.values(): - task_data._start_time = time() - 1e6 - - response = test_client.get("/health") - assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE - assert response.json() == {"detail": "unhealthy"} diff --git a/services/agent/tests/unit/test_modules_task_monitor.py b/services/agent/tests/unit/test_modules_task_monitor.py deleted file mode 100644 index 788d09b7496..00000000000 --- a/services/agent/tests/unit/test_modules_task_monitor.py +++ /dev/null @@ -1,92 +0,0 @@ -# pylint:disable=protected-access - -import asyncio -from typing import Final, Optional - -import pytest -from pydantic import PositiveFloat -from pytest import LogCaptureFixture -from simcore_service_agent.modules.task_monitor import TaskMonitor - -REPEAT_TASK_INTERVAL_S: Final[PositiveFloat] = 0.05 - - -async def _job_which_raises_error() -> None: - raise RuntimeError("raised expected error") - - -async def _job_which_hangs() -> None: - print("I will be hanging....") - await asyncio.sleep(REPEAT_TASK_INTERVAL_S * 10000) - - -@pytest.mark.parametrize("repeat_interval_s", [REPEAT_TASK_INTERVAL_S, None]) -async def test_task_monitor_recovers_from_error( - caplog_info_debug: LogCaptureFixture, - repeat_interval_s: Optional[PositiveFloat], -): - - task_monitor = TaskMonitor() - task_monitor.register_job( - _job_which_raises_error, repeat_interval_s=repeat_interval_s - ) - - await task_monitor.start() - - await asyncio.sleep(REPEAT_TASK_INTERVAL_S * 2) - - await task_monitor.shutdown() - assert len(task_monitor._tasks) == 0 - assert len(task_monitor._to_start) == 0 - - log_messages = caplog_info_debug.text - print(log_messages) - - assert f"Starting '{_job_which_raises_error.__name__}' ..." in log_messages - assert 'RuntimeError("raised expected error")' in log_messages - assert ( - f"Will run '{_job_which_raises_error.__name__}' again in {repeat_interval_s} seconds" - in log_messages - ) - if repeat_interval_s is None: - assert ( - f"Unexpected termination of '{_job_which_raises_error.__name__}'; it will be restarted" - in log_messages - ) - - -async def test_add_same_task_fails(): - task_monitor = TaskMonitor() - task_monitor.register_job(_job_which_raises_error, repeat_interval_s=1) - with pytest.raises(RuntimeError) as exe_info: - task_monitor.register_job(_job_which_raises_error, repeat_interval_s=1) - assert ( - f"{exe_info.value}" - == f"{_job_which_raises_error.__name__} is already registered" - ) - - -async def test_add_task_after_start_fails(): - task_monitor = TaskMonitor() - await task_monitor.start() - - with pytest.raises(RuntimeError) as exe_info: - task_monitor.register_job(_job_which_raises_error, repeat_interval_s=1) - assert ( - f"{exe_info.value}" == "Cannot add more tasks, monitor already running with: []" - ) - await task_monitor.shutdown() - - -async def test_hanging_jobs_are_detected(): - task_monitor = TaskMonitor() - task_monitor.register_job( - _job_which_hangs, repeat_interval_s=REPEAT_TASK_INTERVAL_S - ) - await task_monitor.start() - - assert task_monitor.are_tasks_hanging is False - - await asyncio.sleep(REPEAT_TASK_INTERVAL_S * 2) - - assert task_monitor.are_tasks_hanging is True diff --git a/services/agent/tests/unit/test_modules_volumes_cleanup.py b/services/agent/tests/unit/test_modules_volumes_cleanup.py deleted file mode 100644 index 13d037d81ec..00000000000 --- a/services/agent/tests/unit/test_modules_volumes_cleanup.py +++ /dev/null @@ -1,92 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument - - -from pathlib import Path - -import pytest -from aiodocker.volumes import DockerVolume -from pytest import LogCaptureFixture -from pytest_mock.plugin import MockerFixture -from simcore_service_agent.core.settings import ApplicationSettings -from simcore_service_agent.modules.volumes_cleanup import backup_and_remove_volumes - - -@pytest.fixture -async def mock_volumes_folders( - mocker: MockerFixture, - unused_volume: DockerVolume, - used_volume: DockerVolume, - unused_volume_path: Path, - used_volume_path: Path, -) -> None: - - unused_volume_path.mkdir(parents=True, exist_ok=True) - used_volume_path.mkdir(parents=True, exist_ok=True) - - # root permissions are required to access the /var/docker data - # overwriting with a mocked path for this test - unused_volume_data = await unused_volume.show() - unused_volume_data["Mountpoint"] = f"{unused_volume_path}" - used_volume_data = await used_volume.show() - used_volume_data["Mountpoint"] = f"{used_volume_path}" - - volumes_inspect = [unused_volume_data, used_volume_data] - - # patch the function here - mocker.patch( - "aiodocker.volumes.DockerVolumes.list", - return_value={"Volumes": volumes_inspect}, - ) - - -@pytest.fixture -async def used_volume_name(used_volume: DockerVolume) -> str: - return (await used_volume.show())["Name"] - - -@pytest.fixture -async def unused_volume_name(unused_volume: DockerVolume) -> str: - return (await unused_volume.show())["Name"] - - -async def test_workflow( - mock_volumes_folders: None, - caplog_info_debug: LogCaptureFixture, - settings: ApplicationSettings, - used_volume_name: str, - unused_volume_name: str, -): - await backup_and_remove_volumes(settings) - - log_messages = caplog_info_debug.messages - assert f"Removed docker volume: '{unused_volume_name}'" in log_messages - assert f"Skipped in use docker volume: '{used_volume_name}'" in log_messages - - -@pytest.mark.parametrize( - "error_class, error_message", - [ - (RuntimeError, "this was already handled"), - (Exception, "also capture all other generic errors"), - ], -) -async def test_regression_error_handling( - mock_volumes_folders: None, - caplog_info_debug: LogCaptureFixture, - settings: ApplicationSettings, - used_volume_name: str, - unused_volume_name: str, - mocker: MockerFixture, - error_class: type[BaseException], - error_message: str, -): - mocker.patch( - "simcore_service_agent.modules.volumes_cleanup._core.store_to_s3", - side_effect=error_class(error_message), - ) - - await backup_and_remove_volumes(settings) - - log_messages = caplog_info_debug.messages - assert error_message in log_messages diff --git a/services/agent/tests/unit/test_modules_volumes_cleanup_docker.py b/services/agent/tests/unit/test_modules_volumes_cleanup_docker.py deleted file mode 100644 index e2e74088bd6..00000000000 --- a/services/agent/tests/unit/test_modules_volumes_cleanup_docker.py +++ /dev/null @@ -1,133 +0,0 @@ -# pylint: disable=redefined-outer-name) - -from typing import Any, AsyncIterator - -import aiodocker -import pytest -from aiodocker.volumes import DockerVolume -from pytest_mock import MockerFixture -from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES -from simcore_service_agent.modules.volumes_cleanup._docker import ( - docker_client, - get_dyv_volumes, - is_volume_used, -) - -# UTILS - - -async def _create_volume( - docker_client: aiodocker.Docker, - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, -) -> DockerVolume: - mocked_source = f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_a_test_ok" - volume = await docker_client.volumes.create( - { - "Name": mocked_source, - "Labels": { - "node_uuid": node_uuid, - "run_id": run_id, - "source": mocked_source, - "study_id": study_id, - "swarm_stack_name": swarm_stack_name, - "user_id": "1", - }, - } - ) - return volume - - -# FIXTURES - - -@pytest.fixture -async def volume_with_correct_target( - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, -) -> AsyncIterator[dict[str, Any]]: - async with aiodocker.Docker() as docker_client: - volume = await _create_volume( - docker_client, swarm_stack_name, study_id, node_uuid, run_id - ) - - yield await volume.show() - - try: - await volume.delete() - except aiodocker.DockerError: - pass - - -@pytest.fixture -def wrong_swarm_stack_name() -> str: - return "a_different_swarm_stack_name" - - -@pytest.fixture -async def volume_with_wrong_target( - study_id: str, node_uuid: str, run_id: str, wrong_swarm_stack_name: str -) -> None: - async with aiodocker.Docker() as docker_client: - volume = await _create_volume( - docker_client, wrong_swarm_stack_name, study_id, node_uuid, run_id - ) - - yield await volume.show() - - try: - await volume.delete() - except aiodocker.DockerError: - pass - - -# TESTS - - -async def test_get_dyv_volumes_expect_a_volume( - volume_with_correct_target: dict[str, Any], swarm_stack_name: str -): - async with aiodocker.Docker() as docker_client: - volumes = await get_dyv_volumes(docker_client, swarm_stack_name) - assert len(volumes) == 1 - assert volumes[0] == volume_with_correct_target - - -async def test_get_dyv_volumes_expect_no_volume( - volume_with_wrong_target: dict[str, Any], - swarm_stack_name: str, - wrong_swarm_stack_name: str, -): - async with aiodocker.Docker() as docker_client: - volumes = await get_dyv_volumes(docker_client, swarm_stack_name) - assert len(volumes) == 0 - - async with aiodocker.Docker() as docker_client: - volumes = await get_dyv_volumes(docker_client, wrong_swarm_stack_name) - assert len(volumes) == 1 - assert volumes[0] == volume_with_wrong_target - - -async def test_is_volume_mounted_true_(used_volume: DockerVolume): - async with docker_client() as client: - assert await is_volume_used(client, used_volume.name) is True - - -async def test_is_volume_mounted_false(unused_volume: DockerVolume): - async with docker_client() as client: - assert await is_volume_used(client, unused_volume.name) is False - - -async def test_regression_volume_labels_are_none(mocker: MockerFixture): - mocked_volumes = { - "Volumes": [{"Name": f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_test", "Labels": None}] - } - - async with docker_client() as client: - mocker.patch.object(client.volumes, "list", return_value=mocked_volumes) - - await get_dyv_volumes(client, "test") diff --git a/services/agent/tests/unit/test_modules_volumes_cleanup_s3.py b/services/agent/tests/unit/test_modules_volumes_cleanup_s3.py deleted file mode 100644 index 29639d4aadd..00000000000 --- a/services/agent/tests/unit/test_modules_volumes_cleanup_s3.py +++ /dev/null @@ -1,237 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=too-many-locals - -import hashlib -from pathlib import Path -from typing import Optional - -import aioboto3 -import pytest -from aiodocker.volumes import DockerVolume -from pydantic import HttpUrl -from pytest import LogCaptureFixture -from simcore_service_agent.core.settings import ApplicationSettings -from simcore_service_agent.modules.volumes_cleanup._s3 import ( - S3Provider, - _get_dir_name, - _get_s3_path, - store_to_s3, -) - -# UTILS - - -def _get_file_hashes_in_path( - path_to_hash: Path, exclude_files: Optional[set[Path]] = None -) -> set[tuple[Path, str]]: - def _hash_path(path: Path): - sha256_hash = hashlib.sha256() - with path.open("rb") as file: - # Read and update hash string value in blocks of 4K - for byte_block in iter(lambda: file.read(4096), b""): - sha256_hash.update(byte_block) - return sha256_hash.hexdigest() - - if path_to_hash.is_file(): - return {(path_to_hash.relative_to(path_to_hash), _hash_path(path_to_hash))} - - if exclude_files is None: - exclude_files = set() - - return { - (path.relative_to(path_to_hash), _hash_path(path)) - for path in path_to_hash.rglob("*") - if path.is_file() and path.relative_to(path_to_hash) not in exclude_files - } - - -async def _download_files_from_bucket( - endpoint: str, - access_key: str, - secret_key: str, - bucket_name: str, - save_to: Path, - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, -) -> None: - session = aioboto3.Session( - aws_access_key_id=access_key, aws_secret_access_key=secret_key - ) - async with session.resource("s3", endpoint_url=endpoint, use_ssl=False) as s_3: - bucket = await s_3.Bucket(bucket_name) - async for s3_object in bucket.objects.all(): - key_path = f"{swarm_stack_name}/{study_id}/{node_uuid}/{run_id}/" - if s3_object.key.startswith(key_path): - file_object = await s3_object.get() - file_path: Path = save_to / s3_object.key.replace(key_path, "") - file_path.parent.mkdir(parents=True, exist_ok=True) - print(f"Saving file to {file_path}") - file_content = await file_object["Body"].read() - file_path.write_bytes(file_content) - - -def _create_data(folder: Path) -> None: - for file in { # pylint:disable=use-sequence-for-iteration - ".hidden_do_not_remove", - "key_values.json", - "f1.txt", - "f2.txt", - "f3.txt", - "d1/f1.txt", - "d1/f2.txt", - "d1/f3.txt", - "d1/sd1/f1.txt", - "d1/sd1/f2.txt", - "d1/sd1/f3.txt", - }: - file_path = folder / file - file_path.parent.mkdir(parents=True, exist_ok=True) - file_path.write_text("test") - - -# FIXTURES - - -@pytest.fixture -def save_to(tmp_path: Path) -> Path: - return tmp_path / "save_to" - - -# TESTS - - -async def test_get_s3_path( - unused_volume: DockerVolume, - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, - bucket: str, -): - volume_data = await unused_volume.show() - assert _get_s3_path(bucket, volume_data["Labels"], unused_volume.name) == Path( - f"/{bucket}/{swarm_stack_name}/{study_id}/{node_uuid}/{run_id}/{_get_dir_name(unused_volume.name)}" - ) - - -async def test_store_to_s3( - unused_volume: DockerVolume, - mocked_s3_server_url: HttpUrl, - unused_volume_path: Path, - save_to: Path, - study_id: str, - node_uuid: str, - run_id: str, - bucket: str, - settings: ApplicationSettings, -): - _create_data(unused_volume_path) - dyv_volume = await unused_volume.show() - - # overwrite to test locally not against volume - # root permissions are required to access this - dyv_volume["Mountpoint"] = unused_volume_path - - await store_to_s3( - volume_name=unused_volume.name, - dyv_volume=dyv_volume, - s3_access_key="xxx", - s3_secret_key="xxx", - s3_bucket=bucket, - s3_endpoint=mocked_s3_server_url, - s3_region="us-east-1", - s3_provider=S3Provider.MINIO, - s3_parallelism=3, - s3_retries=1, - exclude_files=settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES, - ) - - await _download_files_from_bucket( - endpoint=mocked_s3_server_url, - access_key="xxx", - secret_key="xxx", - bucket_name=bucket, - save_to=save_to, - swarm_stack_name=dyv_volume["Labels"]["swarm_stack_name"], - study_id=study_id, - node_uuid=node_uuid, - run_id=run_id, - ) - - hashes_on_disk = _get_file_hashes_in_path( - unused_volume_path, set(map(Path, settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES)) - ) - volume_path_without_source_dir = save_to / _get_dir_name(unused_volume.name) - hashes_in_s3 = _get_file_hashes_in_path(volume_path_without_source_dir) - assert len(hashes_on_disk) > 0 - assert len(hashes_in_s3) > 0 - assert hashes_on_disk == hashes_in_s3 - - -@pytest.mark.parametrize("provider", [S3Provider.CEPH, S3Provider.MINIO]) -async def test_regression_non_aws_providers( - unused_volume: DockerVolume, - mocked_s3_server_url: HttpUrl, - unused_volume_path: Path, - bucket: str, - settings: ApplicationSettings, - caplog_info_debug: LogCaptureFixture, - provider: S3Provider, -): - _create_data(unused_volume_path) - dyv_volume = await unused_volume.show() - - # overwrite to test locally not against volume - # root permissions are required to access this - dyv_volume["Mountpoint"] = unused_volume_path - - await store_to_s3( - volume_name=unused_volume.name, - dyv_volume=dyv_volume, - s3_access_key="xxx", - s3_secret_key="xxx", - s3_bucket=bucket, - s3_endpoint=mocked_s3_server_url, - s3_region="us-east-1", - s3_provider=provider, - s3_parallelism=3, - s3_retries=1, - exclude_files=settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES, - ) - - assert f'provider "{provider}" not known' not in caplog_info_debug.text - - -async def test_regression_store_to_s3_volume_mountpoint_not_found( - unused_volume: DockerVolume, - mocked_s3_server_url: HttpUrl, - unused_volume_path: Path, - bucket: str, - settings: ApplicationSettings, - caplog_info_debug: LogCaptureFixture, -): - dyv_volume = await unused_volume.show() - assert unused_volume_path.exists() is False - - # overwrite to test locally not against volume - # root permissions are required to access this - dyv_volume["Mountpoint"] = unused_volume_path - - await store_to_s3( - volume_name=unused_volume.name, - dyv_volume=dyv_volume, - s3_access_key="xxx", - s3_secret_key="xxx", - s3_bucket=bucket, - s3_endpoint=mocked_s3_server_url, - s3_region="us-east-1", - s3_provider=S3Provider.MINIO, - s3_parallelism=3, - s3_retries=1, - exclude_files=settings.AGENT_VOLUMES_CLEANUP_EXCLUDE_FILES, - ) - assert f"mountpoint {unused_volume_path} does not exist" in caplog_info_debug.text - assert f"{unused_volume.name}" in caplog_info_debug.text diff --git a/services/agent/tests/unit/test_services_backup.py b/services/agent/tests/unit/test_services_backup.py new file mode 100644 index 00000000000..d544a25dfa5 --- /dev/null +++ b/services/agent/tests/unit/test_services_backup.py @@ -0,0 +1,105 @@ +# pylint: disable=redefined-outer-name + +import asyncio +from collections.abc import Awaitable, Callable +from pathlib import Path +from typing import Final +from uuid import uuid4 + +import aioboto3 +import pytest +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from pydantic import NonNegativeInt +from simcore_service_agent.core.settings import ApplicationSettings +from simcore_service_agent.services.backup import backup_volume +from simcore_service_agent.services.docker_utils import get_volume_details +from simcore_service_agent.services.volumes_manager import VolumesManager +from utils import VOLUMES_TO_CREATE + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +_FILES_TO_CREATE_IN_VOLUME: Final[NonNegativeInt] = 10 + + +@pytest.fixture +def volume_content(tmpdir: Path) -> Path: + path = Path(tmpdir) / "to_copy" + path.mkdir(parents=True, exist_ok=True) + + for i in range(_FILES_TO_CREATE_IN_VOLUME): + (path / f"f{i}").write_text(f"some text for file {i}\n" * (i + 1)) + + return path + + +@pytest.fixture +def downlaoded_from_s3(tmpdir: Path) -> Path: + path = Path(tmpdir) / "downloaded_from_s3" + path.mkdir(parents=True, exist_ok=True) + return path + + +async def test_backup_volume( + volume_content: Path, + project_id: ProjectID, + swarm_stack_name: str, + service_run_id: ServiceRunID, + downlaoded_from_s3: Path, + create_dynamic_sidecar_volumes: Callable[[NodeID, bool], Awaitable[set[str]]], + initialized_app: FastAPI, +): + node_id = uuid4() + volumes: set[str] = await create_dynamic_sidecar_volumes( + node_id, True # noqa: FBT003 + ) + + for volume in volumes: + volume_details = await get_volume_details( + VolumesManager.get_from_app_state(initialized_app).docker, + volume_name=volume, + ) + # root permissions are required to access the /var/docker data + # overwriting with a mocked path for this test + volume_details.mountpoint = volume_content + await backup_volume(initialized_app, volume_details, volume) + + settings: ApplicationSettings = initialized_app.state.settings + + session = aioboto3.Session( + aws_access_key_id=settings.AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY, + aws_secret_access_key=settings.AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY, + ) + + expected_files = _FILES_TO_CREATE_IN_VOLUME * len(VOLUMES_TO_CREATE) + + async with session.client("s3", endpoint_url=f"{settings.AGENT_VOLUMES_CLEANUP_S3_ENDPOINT}") as s3_client: # type: ignore + list_response = await s3_client.list_objects_v2( + Bucket=settings.AGENT_VOLUMES_CLEANUP_S3_BUCKET, + Prefix=f"{swarm_stack_name}/{project_id}/{node_id}/{service_run_id}", + ) + synced_keys: list[str] = [o["Key"] for o in list_response["Contents"]] + + assert len(synced_keys) == expected_files + + async def _download_file(key: str) -> None: + key_path = Path(key) + (downlaoded_from_s3 / key_path.parent.name).mkdir( + parents=True, exist_ok=True + ) + await s3_client.download_file( + settings.AGENT_VOLUMES_CLEANUP_S3_BUCKET, + key, + downlaoded_from_s3 / key_path.parent.name / key_path.name, + ) + + await asyncio.gather(*[_download_file(key) for key in synced_keys]) + + assert ( + len([x for x in downlaoded_from_s3.rglob("*") if x.is_file()]) + == expected_files + ) diff --git a/services/agent/tests/unit/test_services_containers_manager.py b/services/agent/tests/unit/test_services_containers_manager.py new file mode 100644 index 00000000000..4489d975ab3 --- /dev/null +++ b/services/agent/tests/unit/test_services_containers_manager.py @@ -0,0 +1,107 @@ +# pylint: disable=redefined-outer-name + + +import logging +from collections.abc import AsyncIterable, Awaitable, Callable +from enum import Enum + +import pytest +from aiodocker import Docker, DockerError +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI, status +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_PROXY_SERVICE_PREFIX, + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) +from models_library.projects_nodes_io import NodeID +from simcore_service_agent.services.containers_manager import ( + get_containers_manager, + setup_containers_manager, +) + + +@pytest.fixture +async def app() -> AsyncIterable[FastAPI]: + app = FastAPI() + setup_containers_manager(app) + + async with LifespanManager(app): + yield app + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +async def docker() -> AsyncIterable[Docker]: + async with Docker() as docker: + yield docker + + +class _ContainerMode(Enum): + CREATED = "CREATED" + RUNNING = "RUNNING" + STOPPED = "STOPPED" + + +@pytest.fixture +async def create_container( + docker: Docker, +) -> AsyncIterable[Callable[[str, _ContainerMode], Awaitable[str]]]: + created_containers: set[str] = set() + + async def _(name: str, container_mode: _ContainerMode) -> str: + container = await docker.containers.create( + config={ + "Image": "alpine", + "Cmd": ["sh", "-c", "while true; do sleep 1; done"], + }, + name=name, + ) + + if container_mode in (_ContainerMode.RUNNING, _ContainerMode.STOPPED): + await container.start() + if container_mode == _ContainerMode.STOPPED: + await container.stop() + + created_containers.add(container.id) + return container.id + + yield _ + + # cleanup containers + for container_id in created_containers: + try: + container = await docker.containers.get(container_id) + await container.delete(force=True) + except DockerError as e: + if e.status != status.HTTP_404_NOT_FOUND: + raise + + +async def test_force_container_cleanup( + app: FastAPI, + node_id: NodeID, + create_container: Callable[[str, _ContainerMode], Awaitable[str]], + faker: Faker, + caplog: pytest.LogCaptureFixture, +): + caplog.set_level(logging.DEBUG) + caplog.clear() + + proxy_name = f"{DYNAMIC_PROXY_SERVICE_PREFIX}_{node_id}{faker.pystr()}" + dynamic_sidecar_name = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}-{node_id}{faker.pystr()}" + user_service_name = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_id}{faker.pystr()}" + + await create_container(proxy_name, _ContainerMode.CREATED) + await create_container(dynamic_sidecar_name, _ContainerMode.RUNNING) + await create_container(user_service_name, _ContainerMode.STOPPED) + + await get_containers_manager(app).force_container_cleanup(node_id) + + assert proxy_name in caplog.text + assert dynamic_sidecar_name in caplog.text + assert user_service_name in caplog.text diff --git a/services/agent/tests/unit/test_services_docker_utils.py b/services/agent/tests/unit/test_services_docker_utils.py new file mode 100644 index 00000000000..f4a19c9b9aa --- /dev/null +++ b/services/agent/tests/unit/test_services_docker_utils.py @@ -0,0 +1,148 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name + +from collections.abc import Awaitable, Callable +from pathlib import Path +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +from aiodocker.docker import Docker +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from pytest_mock import MockerFixture +from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES +from simcore_service_agent.services.docker_utils import ( + _VOLUMES_NOT_TO_BACKUP, + _does_volume_require_backup, + _reverse_string, + get_unused_dynamc_sidecar_volumes, + get_volume_details, + remove_volume, +) +from simcore_service_agent.services.volumes_manager import VolumesManager +from utils import VOLUMES_TO_CREATE, get_source + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +def test__reverse_string(): + assert _reverse_string("abcd") == "dcba" + + +@pytest.mark.parametrize( + "volume_path_part, expected", + [ + ("inputs", False), + ("shared-store", False), + ("outputs", True), + ("workdir", True), + ], +) +def test__does_volume_require_backup( + service_run_id: ServiceRunID, volume_path_part: str, expected: bool +) -> None: + volume_name = get_source(service_run_id, uuid4(), Path("/apath") / volume_path_part) + print(volume_name) + assert _does_volume_require_backup(volume_name) is expected + + +@pytest.fixture +def volumes_manager_docker_client(initialized_app: FastAPI) -> Docker: + volumes_manager = VolumesManager.get_from_app_state(initialized_app) + return volumes_manager.docker + + +@pytest.fixture +def mock_backup_volume(mocker: MockerFixture) -> AsyncMock: + return mocker.patch("simcore_service_agent.services.docker_utils.backup_volume") + + +@pytest.mark.parametrize("volume_count", [2]) +@pytest.mark.parametrize("requires_backup", [True, False]) +async def test_doclker_utils_workflow( + volume_count: int, + requires_backup: bool, + initialized_app: FastAPI, + volumes_manager_docker_client: Docker, + create_dynamic_sidecar_volumes: Callable[[NodeID, bool], Awaitable[set[str]]], + mock_backup_volume: AsyncMock, +): + created_volumes: set[str] = set() + for _ in range(volume_count): + created_volume = await create_dynamic_sidecar_volumes( + uuid4(), False # noqa: FBT003 + ) + created_volumes.update(created_volume) + + volumes = await get_unused_dynamc_sidecar_volumes(volumes_manager_docker_client) + assert volumes == created_volumes, ( + "Most likely you have a dirty working state, please check " + "that there are no previous docker volumes named `dyv_...` " + "currently present on the machine" + ) + + assert len(volumes) == len(VOLUMES_TO_CREATE) * volume_count + + count_vloumes_to_backup = 0 + count_volumes_to_skip = 0 + + for volume in volumes: + if _does_volume_require_backup(volume): + count_vloumes_to_backup += 1 + else: + count_volumes_to_skip += 1 + + assert volume.startswith(PREFIX_DYNAMIC_SIDECAR_VOLUMES) + await remove_volume( + initialized_app, + volumes_manager_docker_client, + volume_name=volume, + requires_backup=requires_backup, + ) + + assert ( + count_vloumes_to_backup + == (len(VOLUMES_TO_CREATE) - len(_VOLUMES_NOT_TO_BACKUP)) * volume_count + ) + assert count_volumes_to_skip == len(_VOLUMES_NOT_TO_BACKUP) * volume_count + + assert mock_backup_volume.call_count == ( + count_vloumes_to_backup if requires_backup else 0 + ) + + volumes = await get_unused_dynamc_sidecar_volumes(volumes_manager_docker_client) + assert len(volumes) == 0 + + +@pytest.mark.parametrize("requires_backup", [True, False]) +async def test_remove_misisng_volume_does_not_raise_error( + requires_backup: bool, + initialized_app: FastAPI, + volumes_manager_docker_client: Docker, +): + await remove_volume( + initialized_app, + volumes_manager_docker_client, + volume_name="this-volume-does-not-exist", + requires_backup=requires_backup, + ) + + +async def test_get_volume_details( + volumes_path: Path, + volumes_manager_docker_client: Docker, + create_dynamic_sidecar_volumes: Callable[[NodeID, bool], Awaitable[set[str]]], +): + + volume_names = await create_dynamic_sidecar_volumes(uuid4(), False) # noqa: FBT003 + for volume_name in volume_names: + volume_details = await get_volume_details( + volumes_manager_docker_client, volume_name=volume_name + ) + print(volume_details) + volume_prefix = f"{volumes_path}".replace("/", "_").strip("_") + assert volume_details.labels.directory_name.startswith(volume_prefix) diff --git a/services/agent/tests/unit/test_services_volumes_manager.py b/services/agent/tests/unit/test_services_volumes_manager.py new file mode 100644 index 00000000000..5fae32710df --- /dev/null +++ b/services/agent/tests/unit/test_services_volumes_manager.py @@ -0,0 +1,187 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from copy import deepcopy +from dataclasses import dataclass, field +from datetime import timedelta +from pathlib import Path +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +import pytest_mock +from aiodocker.docker import Docker +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from servicelib.rabbitmq.rpc_interfaces.agent.errors import ( + NoServiceVolumesFoundRPCError, +) +from simcore_service_agent.services.volumes_manager import VolumesManager +from tenacity import ( + AsyncRetrying, + retry_if_exception_type, + stop_after_delay, + wait_fixed, +) +from utils import VOLUMES_TO_CREATE, get_source + + +@dataclass +class MockedVolumesProxy: + service_run_id: ServiceRunID + volumes: set[str] = field(default_factory=set) + + def add_unused_volumes_for_service(self, node_id: NodeID) -> None: + for folder_name in VOLUMES_TO_CREATE: + volume_name = get_source( + self.service_run_id, node_id, Path("/apath") / folder_name + ) + self.volumes.add(volume_name) + + def remove_volume(self, volume_name: str) -> None: + self.volumes.remove(volume_name) + + def get_unused_dynamc_sidecar_volumes(self) -> set[str]: + return deepcopy(self.volumes) + + +@pytest.fixture +async def mock_docker_utils( + mocker: pytest_mock.MockerFixture, service_run_id: ServiceRunID +) -> MockedVolumesProxy: + proxy = MockedVolumesProxy(service_run_id) + + async def _remove_volume( + app: FastAPI, docker: Docker, *, volume_name: str, requires_backup: bool + ) -> None: + proxy.remove_volume(volume_name) + + async def _get_unused_dynamc_sidecar_volumes(app: FastAPI) -> set[str]: + return proxy.get_unused_dynamc_sidecar_volumes() + + mocker.patch( + "simcore_service_agent.services.volumes_manager.remove_volume", + side_effect=_remove_volume, + ) + + mocker.patch( + "simcore_service_agent.services.volumes_manager.get_unused_dynamc_sidecar_volumes", + side_effect=_get_unused_dynamc_sidecar_volumes, + ) + + return proxy + + +@pytest.fixture +def spy_remove_volume( + mocker: pytest_mock.MockerFixture, mock_docker_utils: MockedVolumesProxy +) -> AsyncMock: + return mocker.spy(mock_docker_utils, "remove_volume") + + +@pytest.fixture +async def volumes_manager() -> VolumesManager: + # NOTE: background tasks are disabled on purpose + return VolumesManager( + app=FastAPI(), + book_keeping_interval=timedelta(seconds=1), + volume_cleanup_interval=timedelta(seconds=1), + remove_volumes_inactive_for=timedelta(seconds=0.1).total_seconds(), + ) + + +@pytest.mark.parametrize("service_count", [1, 3]) +async def test_volumes_manager_remove_all_volumes( + service_count: int, + mock_docker_utils: MockedVolumesProxy, + spy_remove_volume: AsyncMock, + volumes_manager: VolumesManager, +): + assert spy_remove_volume.call_count == 0 + + for _ in range(service_count): + mock_docker_utils.add_unused_volumes_for_service(uuid4()) + assert spy_remove_volume.call_count == 0 + assert ( + len(mock_docker_utils.get_unused_dynamc_sidecar_volumes()) + == len(VOLUMES_TO_CREATE) * service_count + ) + + await volumes_manager.remove_all_volumes() + assert spy_remove_volume.call_count == len(VOLUMES_TO_CREATE) * service_count + assert len(mock_docker_utils.get_unused_dynamc_sidecar_volumes()) == 0 + + +async def test_volumes_manager_remove_service_volumes( + mock_docker_utils: MockedVolumesProxy, + spy_remove_volume: AsyncMock, + volumes_manager: VolumesManager, +): + assert spy_remove_volume.call_count == 0 + mock_docker_utils.add_unused_volumes_for_service(uuid4()) + node_id_to_remvoe = uuid4() + mock_docker_utils.add_unused_volumes_for_service(node_id_to_remvoe) + + assert spy_remove_volume.call_count == 0 + assert ( + len(mock_docker_utils.get_unused_dynamc_sidecar_volumes()) + == len(VOLUMES_TO_CREATE) * 2 + ) + + await volumes_manager.remove_service_volumes(node_id_to_remvoe) + + assert spy_remove_volume.call_count == len(VOLUMES_TO_CREATE) + unused_volumes = mock_docker_utils.get_unused_dynamc_sidecar_volumes() + assert len(unused_volumes) == len(VOLUMES_TO_CREATE) + for volume_name in unused_volumes: + assert f"{node_id_to_remvoe}" not in volume_name + + +@pytest.fixture +async def mock_wait_for_unused_service_volumes( + mocker: pytest_mock.MockerFixture, +) -> None: + mocker.patch( + "simcore_service_agent.services.volumes_manager._WAIT_FOR_UNUSED_SERVICE_VOLUMES", + timedelta(seconds=2), + ) + + +async def test_volumes_manager_remove_service_volumes_when_volume_does_not_exist( + mock_wait_for_unused_service_volumes: None, + volumes_manager: VolumesManager, +): + not_existing_service = uuid4() + with pytest.raises(NoServiceVolumesFoundRPCError): + await volumes_manager.remove_service_volumes(not_existing_service) + + +async def test_volumes_manager_periodic_task_cleanup( + mock_docker_utils: MockedVolumesProxy, + spy_remove_volume: AsyncMock, + volumes_manager: VolumesManager, +): + async def _run_volumes_clennup() -> None: + await volumes_manager._bookkeeping_task() # noqa: SLF001 + await volumes_manager._periodic_volume_cleanup_task() # noqa: SLF001 + + await _run_volumes_clennup() + assert spy_remove_volume.call_count == 0 + + mock_docker_utils.add_unused_volumes_for_service(uuid4()) + await _run_volumes_clennup() + assert spy_remove_volume.call_count == 0 + + # wait for the amount of time to pass + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(1), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + await _run_volumes_clennup() + assert spy_remove_volume.call_count == len(VOLUMES_TO_CREATE) + assert len(mock_docker_utils.get_unused_dynamc_sidecar_volumes()) == 0 diff --git a/services/agent/tests/unit/utils.py b/services/agent/tests/unit/utils.py new file mode 100644 index 00000000000..8eeb23138d4 --- /dev/null +++ b/services/agent/tests/unit/utils.py @@ -0,0 +1,19 @@ +from pathlib import Path +from typing import Final + +from models_library.projects_nodes_io import NodeID + + +def get_source(run_id: str, node_id: NodeID, full_volume_path: Path) -> str: + # NOTE: volume name is not trimmed here, but it's ok for the tests + reversed_path = f"{full_volume_path}"[::-1].replace("/", "_") + return f"dyv_{run_id}_{node_id}_{reversed_path}" + + +VOLUMES_TO_CREATE: Final[list[str]] = [ + "inputs", + "outputs", + "workspace", + "work", + "shared-store", +] diff --git a/services/api-server/.env-devel b/services/api-server/.env-devel index 29f25cd6e70..29d4830d47f 100644 --- a/services/api-server/.env-devel +++ b/services/api-server/.env-devel @@ -13,6 +13,8 @@ # API_SERVER_DEV_FEATURES_ENABLED=1 +API_SERVER_REMOTE_DEBUG_PORT=3000 +LOG_FORMAT_LOCAL_DEV_ENABLED=1 DEBUG=0 # SEE services/api-server/src/simcore_service_api_server/auth_security.py @@ -27,7 +29,7 @@ POSTGRES_DB=test POSTGRES_HOST=127.0.0.1 # Enables debug -SC_BOOT_MODE=debug-ptvsd +SC_BOOT_MODE=debug # webserver @@ -44,6 +46,3 @@ STORAGE_HOST=storage # director DIRECTOR_V2_HOST=director-v2 - - -API_SERVER_TRACING=null diff --git a/services/api-server/.gitignore b/services/api-server/.gitignore index 6c9c6bbe82d..c63eb09127c 100644 --- a/services/api-server/.gitignore +++ b/services/api-server/.gitignore @@ -1,2 +1,5 @@ # outputs from makefile client + +# openapi for development mode +openapi-*.json diff --git a/services/api-server/Dockerfile b/services/api-server/Dockerfile index 100e0a00320..aa8917b71ea 100644 --- a/services/api-server/Dockerfile +++ b/services/api-server/Dockerfile @@ -1,5 +1,18 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: # cd sercices/api-server @@ -10,12 +23,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=pcrespov -RUN set -eux && \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ apt-get update && \ - apt-get install -y gosu && \ - rm -rf /var/lib/apt/lists/* && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -51,32 +70,34 @@ EXPOSE 3000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" + -RUN pip install --no-cache-dir --upgrade \ - pip~=23.0 \ + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/api-server/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + # --------------------------Prod-depends-only stage ------------------- @@ -85,17 +106,20 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/api-server [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/api-server /build/services/api-server +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/api-server -RUN pip --no-cache-dir install -r requirements/prod.txt &&\ - pip --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/api-server,target=/build/services/api-server,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + # --------------------------Production stage ------------------- @@ -105,15 +129,19 @@ RUN pip --no-cache-dir install -r requirements/prod.txt &&\ # + /home/scu $HOME = WORKDIR # + services/api-server [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -121,10 +149,12 @@ COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY --chown=scu:scu services/api-server/docker services/api-server/docker RUN chmod +x services/api-server/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ + +HEALTHCHECK --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/api-server/docker/healthcheck.py", "http://localhost:8000/"] ENTRYPOINT [ "/bin/sh", "services/api-server/docker/entrypoint.sh" ] @@ -139,7 +169,7 @@ CMD ["/bin/sh", "services/api-server/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development diff --git a/services/api-server/Makefile b/services/api-server/Makefile index 20ccba061a5..e923de11db8 100644 --- a/services/api-server/Makefile +++ b/services/api-server/Makefile @@ -4,158 +4,100 @@ include ../../scripts/common.Makefile include ../../scripts/common-service.Makefile +# Check that given variables are set and all have non-empty values, +# die with an error otherwise. +# +# Params: +# 1. Variable name(s) to test. +# 2. (optional) Error message to print. +guard-%: + @ if [ "${${*}}" = "" ]; then \ + echo "Environment variable $* not set"; \ + exit 1; \ + fi + .PHONY: reqs reqs: ## compiles pip requirements (.in -> .txt) @$(MAKE_C) requirements reqs -# DEVELOPMENT TOOLS ########################################################################### .env: cp .env-devel $@ -DOCKER_COMPOSE_EXTRA_FILE:=.docker-compose-extra-ignore.yml -$(DOCKER_COMPOSE_EXTRA_FILE): - cp $(CURDIR)/tests/utils/docker-compose.yml $@ +define _create_and_validate_openapi + # generating openapi specs file under $< (NOTE: Skips DEV FEATURES since this OAS is the 'offically released'!) + @source .env; \ + export API_SERVER_DEV_FEATURES_ENABLED=$1; \ + python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ -.PHONY: db-tables -db-tables: .env ## upgrades and create tables [for development] - # Upgrading and creating tables - export $(shell grep -v '^#' $< | xargs -d '\n'); \ - python3 tests/utils/init-pg.py + # validates OAS file: $@ + docker run --rm \ + --volume "$(CURDIR):/local" \ + $(OPENAPI_GENERATOR_IMAGE) validate --input-spec /local/$@ +endef -.PHONY: db-migration -db-migration: .env ## runs discover and upgrade on running pg-db [for development] - # Creating tables - export $(shell grep -v '^#' $< | xargs -d '\n'); \ - sc-pg discover && sc-pg upgrade +.PHONY: openapi-specs openapi.json openapi-dev.json +openapi-specs: openapi.json -.PHONY: down -down: $(DOCKER_COMPOSE_EXTRA_FILE)## stops pg fixture - # stopping extra services - -@docker-compose -f $< down - # killing any process using port 8000 (app) and 3000 (debug) - -@fuser --kill --verbose --namespace tcp 8000 - -@fuser --kill --verbose --namespace tcp 3000 +openapi.json: .env ## Creates OAS (API_SERVER_DEV_FEATURES_ENABLED=0) + @$(call _create_and_validate_openapi,0) +openapi-dev.json: .env ## Creates OAS under development (API_SERVER_DEV_FEATURES_ENABLED=1) + @$(call _create_and_validate_openapi,1) -# TODO: These are all different ways to run the server for dev-purposes . -.PHONY: run-devel -run-devel: .env $(DOCKER_COMPOSE_EXTRA_FILE) down - # Starting db (under $<) - docker-compose --file $(DOCKER_COMPOSE_EXTRA_FILE) up --detach - sleep 2 - # Creating db-tables: user=key, password=secret - @$(MAKE) db-tables - # start app (under $<) - uvicorn simcore_service_api_server.__main__:the_app \ - --reload --reload-dir $(SRC_DIR) \ - --port=8000 --host=0.0.0.0 \ - --log-level debug +CLIENT_RELEASE=0.5.0 +CLIENT_OAS_PATH=osparc-simcore-clients-$(CLIENT_RELEASE)/api/openapi.json +openapi-client.json: + # download artifacts for $(CLIENT_RELEASE) + @temp_dir=$$(mktemp --directory); \ + cd "$$temp_dir"; \ + wget --output-document=artifacts.zip "https://github.com/ITISFoundation/osparc-simcore-clients/archive/refs/tags/v$(CLIENT_RELEASE).zip"; \ + unzip artifacts.zip "$(CLIENT_OAS_PATH)";\ + mv "$(CLIENT_OAS_PATH)" "$(CURDIR)/$@";\ + rm -rf "$$temp_dir" -.PHONY: run-fake run-fake-devel -run-fake: # starts a fake server in a container - docker run -it \ - --env-file .env-fake-standalone \ - --publish 8000:8000 \ - local/${APP_NAME}:production - # Open http://172.0.0.1:8000/dev/doc +openapi-client-master.json: + wget -O $@ https://raw.githubusercontent.com/ITISFoundation/osparc-simcore-clients/master/api/openapi.json -run-fake-devel: # starts a fake server in a dev-container - docker run -it \ - --env-file .env-fake-standalone \ - --env SC_BOOT_MODE=debug-ptvsd \ - --env LOG_LEVEL=debug \ - --env DEBUG=true \ - --publish 8000:8000 \ - --publish 3006:3000 \ - --volume $(REPO_BASE_DIR)/services/api-server:/devel/services/api-server \ - --volume $(REPO_BASE_DIR)/packages:/devel/packages \ - local/${APP_NAME}:development - # Open http://172.0.0.1:8000/dev/doc +define _openapi_diff_inputs + $(SCRIPTS_DIR)/openapi-diff.bash diff $(foreach f,$^,/specs/$f) --format json > $@ + $(SCRIPTS_DIR)/openapi-diff.bash breaking $(foreach f,$^,/specs/$f) --fail-on ERR > /dev/null +endef -# BUILD ########################################################################### +# Examples: +# make openapi-dev-diff.json +# make openapi-client-master-diff.json +# make openapi-client-diff.json +openapi-%-diff.json: openapi.json openapi-%.json ## Diffs against newer or older openapi-%.json and checks backwards compatibility + $(call _openapi_diff_inputs) -.PHONY: openapi-specs openapi.json -openapi-specs: openapi.json -openapi.json: .env - # generating openapi specs file under $< (NOTE: Skips DEV FEATURES since this OAS is the 'offically released'!) - @set -o allexport; \ - source .env; \ - set +o allexport; \ - export API_SERVER_DEV_FEATURES_ENABLED=0; \ - python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ +openapi-diff.md: guard-OPENAPI_JSON_BASE_URL openapi.json ## Diffs against a remote openapi.json. E.g. OPENAPI_JSON_BASE_URL=https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/refs/heads/master/services/api-server/openapi.json + $(SCRIPTS_DIR)/openapi-diff.bash diff $(OPENAPI_JSON_BASE_URL) /specs/openapi.json --format markup --flatten-allof --exclude-elements title,description > $@ + $(SCRIPTS_DIR)/openapi-diff.bash breaking $(OPENAPI_JSON_BASE_URL) /specs/openapi.json --flatten-allof --fail-on ERR - # validates OAS file: $@ - @cd $(CURDIR); \ - $(SCRIPTS_DIR)/openapi-generator-cli.bash validate --input-spec /local/$@ +# SEE https://schemathesis.readthedocs.io/en/stable/index.html +APP_URL:=http://$(get_my_ip).nip.io:8006 +test-api: ## Runs schemathesis against development server (NOTE: make up-devel first) + @docker run schemathesis/schemathesis:stable run \ + "$(APP_URL)/api/v0/openapi.json" -# GENERATION python client ########################################################################### -.PHONY: python-client generator-help -# SEE https://openapi-generator.tech/docs/usage#generate -# SEE https://openapi-generator.tech/docs/generators/python -# -# TODO: put instead to additional-props.yaml and --config=openapi-generator/python-config.yaml -# TODO: copy this code to https://github.com/ITISFoundation/osparc-simcore-python-client/blob/master/Makefile -# -# NOTE: assumes this repo exists -GIT_USER_ID := ITISFoundation -GIT_REPO_ID := osparc-simcore-python-client - -GENERATOR_NAME := python - -ADDITIONAL_PROPS := \ - generateSourceCodeOnly=false\ - hideGenerationTimestamp=true\ - library=urllib3\ - packageName=osparc\ - packageUrl=https://github.com/$(GIT_USER_ID)/${GIT_REPO_ID}.git\ - packageVersion=$(APP_VERSION)\ - projectName=osparc-simcore-python-api -ADDITIONAL_PROPS := $(foreach prop,$(ADDITIONAL_PROPS),$(strip $(prop))) - -null := -space := $(null) # -comma := , - -client: - # cloning $(GIT_USER_ID)/$(GIT_REPO_ID) -> $@ - git clone git@github.com:$(GIT_USER_ID)/$(GIT_REPO_ID).git $@ - # TODO: if fails, add -b - cd client; git checkout "upgrade-${APP_VERSION}" - - -python-client: client openapi.json ## runs python client generator - # copies latest version of the openapi - @cp openapi.json client/api/ - # generates - cd $(CURDIR); \ - $(SCRIPTS_DIR)/openapi-generator-cli.bash generate \ - --generator-name=$(GENERATOR_NAME) \ - --git-user-id=$(GIT_USER_ID)\ - --git-repo-id=$(GIT_REPO_ID)\ - --http-user-agent="osparc-api/$(APP_VERSION)/python"\ - --input-spec=/local/openapi.json \ - --output=/local/client \ - --additional-properties=$(subst $(space),$(comma),$(strip $(ADDITIONAL_PROPS)))\ - --package-name=osparc\ - --release-note="Updated to $(APP_VERSION)" - - - -generator-help: ## help on client-api generator - # generate help - @$(SCRIPTS_DIR)/openapi-generator-cli.bash help generate - # generator config help - @$(SCRIPTS_DIR)/openapi-generator-cli.bash config-help -g $(GENERATOR_NAME) +test-pacts: guard-PACT_BROKER_USERNAME guard-PACT_BROKER_PASSWORD guard-PACT_BROKER_URL _check_venv_active ## Test pacts + pytest tests/unit/pact_broker/test* + +# Usage: +# PACT_BROKER_USERNAME=your_username \ +# PACT_BROKER_PASSWORD=your_password \ +# PACT_BROKER_URL=your_broker_url \ +# make test-pacts diff --git a/services/api-server/README.md b/services/api-server/README.md index 3ad0eed62fa..79548d831a8 100644 --- a/services/api-server/README.md +++ b/services/api-server/README.md @@ -1,49 +1,72 @@ # api-server -[![image-size]](https://microbadger.com/images/itisfoundation/api-server. "More on itisfoundation/api-server.:staging-latest image") -[![image-badge]](https://microbadger.com/images/itisfoundation/api-server "More on Public API Server image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/api-server "More on Public API Server image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/api-server "More on Public API Server image in registry") - Platform's public API server - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/api-server./staging-latest.svg?label=api-server.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/api-server.svg -[image-version]https://images.microbadger.com/badges/version/itisfoundation/api-server.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/api-server.svg - - ## Development -Setup environment - -```cmd -make devenv -source .venv/bin/activate -cd services/api-server -make install-dev +Since the API server communicates with (almost) all other services, running it requires running the entire osparc (see the section [development build](../../README.md#development-build) of the main README) + +Open the following sites and use the test credentials user=key, password=secret to manually test the API: + + http://127.0.0.1.nip.io:8006/dev/doc: swagger type of documentation + +### Profiling requests to the api server +When in development mode (the environment variable `API_SERVER_DEV_FEATURES_ENABLED` is =1 in the running container) one can profile calls to the API server directly from the client side. This is done by setting the custom header `x-profile-api-server` equal to `true` in the request. In that case the the reponse will be of media type `application/x-ndjson` and the final line of the response will be a json object whose `profile` key holds the profile. Here's an example of how the "/v0/me" endpoint of the api server can be profiled +: +```python +from httpx import AsyncClient, BasicAuth +headers: dict[str, str] = {"x-profile-api-server": "true"} +async with AsyncClient(base_url="", auth=BasicAuth(username="", password="")) as client: + async with client.stream("GET", f"/v0/me", timeout=20, headers=headers) as response: + async for ll in response.aiter_lines(): + line = json.loads(ll) + if profile := line.get("profile"): + print(profile) + else: + pprint(line) ``` -Then +## Clients + +- Python client for osparc-simcore API can be found in https://github.com/ITISFoundation/osparc-simcore-client) -```cmd -make run-devel -``` -will start the api-server in development-mode together with a postgres db initialized with test data. Open the following sites and use the test credentials ``user=key, password=secret`` to manually test the API: +## Backwards compatibility of the server API +The public API is required to be backwards compatible in the sense that a [client](https://github.com/ITISFoundation/osparc-simcore-clients) which is compatible with API version `N` should also be compatible with version `N+1`. Because of this, upgrading the server should never force a user to upgrade their client: The client which they have is already compatible with the new server. -- http://127.0.0.1:8000/docs: redoc documentation -- http://127.0.0.1:8000/dev/docs: swagger type of documentation +```mermaid + +flowchart LR + subgraph Client + direction LR + A2(dev branch) .->|"πŸ”™"| B2(master) .->|"πŸ”™"| C2(staging) .->|"πŸ”™"| D2(production) + A2(dev branch) ==>|"πŸ”¨"| B2(master) ==>|"πŸ”¨"| C2(staging) ==>|"πŸ”¨"| D2(production) + end + subgraph Server + direction LR + A1(dev branch) ~~~ B1(master) ~~~ C1(staging) ~~~ D1(production) + A1(dev branch) ==>|"πŸ”¨"| B1(master) ==>|"πŸ”¨"| C1(staging) ==>|"πŸ”¨"| D1(production) + end + + A1 .->|"πŸ”™"| A2 + B1 .->|"πŸ”™"| B2 + C1 .->|"πŸ”™"| C2 + D1 .->|"πŸ”™"| D2 + + A2 ~~~ A1 + B2 ~~~ B1 + C2 ~~~ C1 + D2 ~~~ D1 +``` + +In this diagram the development workflow/progress is indicated with πŸ”¨-arrows both for the server client. To see which client version a given server version is compatible with one can follow the backwards πŸ”™-arrows from that server version. E.g. one sees that the server in `staging` is compatible with the client in `staging` and in `production`. Needless to say, to see which versions of the server a given client is compatible with one can follow the dotted lines backwards from the client version. E.g. the client in `master` is seen to be compatible with ther server versions in `master` and in `dev branch`. ## References - [Design patterns for modern web APIs](https://blog.feathersjs.com/design-patterns-for-modern-web-apis-1f046635215) by D. Luecke - [API Design Guide](https://cloud.google.com/apis/design/) by Google Cloud -## Clients -- [Python client for osparc-simcore API](https://github.com/ITISFoundation/osparc-simcore-python-client) ## Acknowledgments diff --git a/services/api-server/VERSION b/services/api-server/VERSION index 17b2ccd9bf9..a3df0a6959e 100644 --- a/services/api-server/VERSION +++ b/services/api-server/VERSION @@ -1 +1 @@ -0.4.3 +0.8.0 diff --git a/services/api-server/docker/boot.sh b/services/api-server/docker/boot.sh index 666b245d22a..ea12e3446c9 100755 --- a/services/api-server/docker/boot.sh +++ b/services/api-server/docker/boot.sh @@ -18,11 +18,20 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/api-server || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/api-server + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # RUNNING application ---------------------------------------- @@ -30,12 +39,12 @@ APP_LOG_LEVEL=${API_SERVER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/api-server/src/simcore_service_api_server && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${API_SERVER_REMOTE_DEBUG_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/api-server/docker/entrypoint.sh b/services/api-server/docker/entrypoint.sh index c1bfc513856..b579236b562 100755 --- a/services/api-server/docker/entrypoint.sh +++ b/services/api-server/docker/entrypoint.sh @@ -64,11 +64,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - echo "$INFO Starting $* ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" diff --git a/services/api-server/docker/healthcheck.py b/services/api-server/docker/healthcheck.py old mode 100644 new mode 100755 index 551868d3cc1..808782f3261 --- a/services/api-server/docker/healthcheck.py +++ b/services/api-server/docker/healthcheck.py @@ -6,9 +6,10 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: diff --git a/services/api-server/docs/api-server.drawio.svg b/services/api-server/docs/api-server.drawio.svg new file mode 100644 index 00000000000..98f7dcfdfc6 --- /dev/null +++ b/services/api-server/docs/api-server.drawio.svg @@ -0,0 +1,994 @@ + + + + + + + + + + + + + + + +
+
+
+ CONTROLLER +
+
+
+
+ + CONTROLLER + +
+
+
+ + + + + + + +
+
+
+ SERVICE +
+
+
+
+ + SERVICE + +
+
+
+ + + + + + + +
+
+
+ REPOSITORY +
+
+
+
+ + REPOSITORY + +
+
+
+ + + + + + + +
+
+
+ CLIENTS +
+
+
+
+ + CLIENTS + +
+
+
+ + + + + + + + +
+
+
+ CONTROLLER +
+
+
+
+ + CONTROLLER + +
+
+
+ + + + + + + +
+
+
+ SERVICE +
+
+
+
+ + SERVICE + +
+
+
+ + + + + + + +
+
+
+ REPOSITORY +
+
+
+
+ + REPOSITORY + +
+
+
+ + + + + + + +
+
+
+ CLIENTS +
+
+
+
+ + CLIENTS + +
+
+
+ + + + + + + + + + + +
+
+
+ rest +
+
+
+
+ + rest + +
+
+
+ + + + + + + + + + + +
+
+
+ rpc +
+
+
+
+ + rpc + +
+
+
+ + + + + + + + + + + +
+
+
+ projects +
+
+
+
+ + projects + +
+
+
+ + + + + + + + + + + +
+
+
+ projects +
+
+
+
+ + projects + +
+
+
+ + + + + + + +
+
+
+ sa[asyngpg] +
+
+
+
+ + sa[asyngpg] + +
+
+
+ + + + + + + + +
+
+
+ CONTROLLER +
+
+
+
+ + CONTROLLER + +
+
+
+ + + + + + + +
+
+
+ SERVICE +
+
+
+
+ + SERVICE + +
+
+
+ + + + + + + +
+
+
+ REPOSITORY +
+
+
+
+ + REPOSITORY + +
+
+
+ + + + + + + +
+
+
+ CLIENTS +
+
+
+
+ + CLIENTS + +
+
+
+ + + + + + + +
+
+
+ rest +
+
+
+
+ + rest + +
+
+
+ + + + + + + + + + + +
+
+
+ rpc +
+
+
+
+ + rpc + +
+
+
+ + + + + + + + + + + +
+
+
+ services +
+
+
+
+ + services + +
+
+
+ + + + + + + + + + + +
+
+
+ catalog_srv +
+
+
+
+ + catalog_srv + +
+
+
+ + + + + + + +
+
+
+ sa[asyncpg] +
+
+
+
+ + sa[asyncpg] + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ SolverService +
+
+
+
+ + SolverService + +
+
+
+ + + + + + + + + + + +
+
+
+ CatalogService +
+
+
+
+ + CatalogService + +
+
+
+ + + + + + + + +
+
+
+ httpx +
+
+
+
+ + httpx + +
+
+
+ + + + + + + +
+
+
+ AuthSession +
+
+
+
+ + AuthSession + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ WbApiRpcClient +
+
+
+
+ + WbApiRpcClient + +
+
+
+ + + + + + + + + + + +
+
+
+ sa[asyncg] +
+
+
+
+ + sa[asyncg] + +
+
+
+ + + + + + + +
+
+
+ simcore_service_catalog +
+
+
+
+ + simcore_ser... + +
+
+
+ + + + + + + +
+
+
+ simcore_service_webserver +
+
+
+
+ + simcore_ser... + +
+
+
+ + + + + + + +
+
+
+ simcore_service_api_server +
+
+
+
+ + simcore_ser... + +
+
+
+ + + + + + + + + + + +
+
+
+ /solvers +
+
+
+
+ + /solvers + +
+
+
+ + + + + + + + + + + +
+
+
+ ProgramsService +
+
+
+
+ + ProgramsService + +
+
+
+ + + + + + + + + + + +
+
+
+ /programs +
+
+
+
+ + /programs + +
+
+
+ + + + + + + + + + + +
+
+
+ StudyService +
+
+
+
+ + StudyService + +
+
+
+ + + + + + + + + + + +
+
+
+ /studies +
+
+
+
+ + /studies + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ JobService +
+
+
+
+ + JobService + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ ApiKeysRepository +
+
+
+
+ + ApiKeysRepository + +
+
+
+ + + + + + + + + + + +
+
+
+ UsersRepository +
+
+
+
+ + UsersRepository + +
+
+
+ + + + + + + + + + + +
+
+
+ RabbitMQ +
+ RPCClient +
+
+
+
+ + RabbitMQ... + +
+
+
+ + + + + + + +
+
+
+ Dependencies go inwards +
+
+
+
+ + Dependencies go inwards + +
+
+
+ + + + + + + +
+ + + + + Text is not SVG - cannot display + + + +
diff --git a/services/api-server/openapi.json b/services/api-server/openapi.json index d1119e051ab..839877be699 100644 --- a/services/api-server/openapi.json +++ b/services/api-server/openapi.json @@ -1,13 +1,9 @@ { - "openapi": "3.0.2", + "openapi": "3.1.0", "info": { - "title": "osparc.io web API", - "description": "osparc-simcore public web API specifications", - "version": "0.4.2", - "x-logo": { - "url": "https://raw.githubusercontent.com/ITISFoundation/osparc-manual/b809d93619512eb60c827b7e769c6145758378d0/_media/osparc-logo.svg", - "altText": "osparc-simcore logo" - } + "title": "osparc.io public API", + "description": "osparc-simcore public API specifications", + "version": "0.8.0" }, "paths": { "/v0/meta": { @@ -48,6 +44,66 @@ } } } + }, + "404": { + "description": "User not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } } }, "security": [ @@ -83,6 +139,66 @@ } } }, + "404": { + "description": "User not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, "422": { "description": "Validation Error", "content": { @@ -107,7 +223,7 @@ "files" ], "summary": "List Files", - "description": "Lists all files stored in the system", + "description": "\ud83d\udea8 **Deprecated**: This endpoint is deprecated and will be removed in a future release.\nPlease use `GET /v0/files/page` instead.\n\n\n\nLists all files stored in the system\n\nAdded in *version 0.5*: \n\nRemoved in *version 0.7*: This endpoint is deprecated and will be removed in a future version", "operationId": "list_files", "responses": { "200": { @@ -115,11 +231,71 @@ "content": { "application/json": { "schema": { - "title": "Response List Files V0 Files Get", - "type": "array", "items": { "$ref": "#/components/schemas/File" - } + }, + "type": "array", + "title": "Response List Files V0 Files Get" + } + } + } + }, + "404": { + "description": "File not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" } } } @@ -140,26 +316,38 @@ "summary": "Upload File", "description": "Uploads a single file to the system", "operationId": "upload_file", + "security": [ + { + "HTTPBasic": [] + } + ], "parameters": [ { + "name": "content-length", + "in": "header", "required": false, "schema": { - "title": "Content-Length", - "type": "string" - }, - "name": "content-length", - "in": "header" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content-Length" + } } ], "requestBody": { + "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/Body_upload_file_v0_files_content_put" } } - }, - "required": true + } }, "responses": { "200": { @@ -172,114 +360,62 @@ } } }, - "422": { - "description": "Validation Error", + "404": { + "description": "File not found", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/files/{file_id}": { - "get": { - "tags": [ - "files" - ], - "summary": "Get File", - "description": "Gets metadata for a given file resource", - "operationId": "get_file", - "parameters": [ - { - "required": true, - "schema": { - "title": "File Id", - "type": "string", - "format": "uuid" - }, - "name": "file_id", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + }, + "429": { + "description": "Too many requests", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/File" + "$ref": "#/components/schemas/ErrorGet" } } } }, - "404": { - "description": "File not found" - }, - "422": { - "description": "Validation Error", + "500": { + "description": "Internal server error", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/files/{file_id}/content": { - "get": { - "tags": [ - "files" - ], - "summary": "Download File", - "operationId": "download_file", - "parameters": [ - { - "required": true, - "schema": { - "title": "File Id", - "type": "string", - "format": "uuid" - }, - "name": "file_id", - "in": "path" - } - ], - "responses": { - "307": { - "description": "Successful Response" }, - "404": { - "description": "File not found" + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } }, - "200": { - "description": "Returns a arbitrary binary data", + "503": { + "description": "Service unavailable", "content": { - "application/octet-stream": { + "application/json": { "schema": { - "type": "string", - "format": "binary" + "$ref": "#/components/schemas/ErrorGet" } - }, - "text/plain": { + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { "schema": { - "type": "string" + "$ref": "#/components/schemas/ErrorGet" } } } @@ -294,215 +430,105 @@ } } } - }, + } + }, + "post": { + "tags": [ + "files" + ], + "summary": "Get Upload Links", + "description": "Get upload links for uploading a file to storage", + "operationId": "get_upload_links", "security": [ { "HTTPBasic": [] } - ] - } - }, - "/v0/solvers": { - "get": { - "tags": [ - "solvers" ], - "summary": "List Solvers", - "description": "Lists all available solvers (latest version)", - "operationId": "list_solvers", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserFileToProgramJob" + }, + { + "$ref": "#/components/schemas/UserFile" + } + ], + "title": "Client File" + } + } + } + }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "title": "Response List Solvers V0 Solvers Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/Solver" - } + "$ref": "#/components/schemas/ClientFileUploadData" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/releases": { - "get": { - "tags": [ - "solvers" - ], - "summary": "Lists All Releases", - "description": "Lists all released solvers (all released versions)", - "operationId": "list_solvers_releases", - "responses": { - "200": { - "description": "Successful Response", + }, + "404": { + "description": "File not found", "content": { "application/json": { "schema": { - "title": "Response List Solvers Releases V0 Solvers Releases Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/Solver" - } + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/latest": { - "get": { - "tags": [ - "solvers" - ], - "summary": "Get Latest Release of a Solver", - "description": "Gets latest release of a solver", - "operationId": "get_solver", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + }, + "429": { + "description": "Too many requests", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Solver" + "$ref": "#/components/schemas/ErrorGet" } } } }, - "422": { - "description": "Validation Error", + "500": { + "description": "Internal server error", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/releases": { - "get": { - "tags": [ - "solvers" - ], - "summary": "List Solver Releases", - "description": "Lists all releases of a given solver", - "operationId": "list_solver_releases", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + }, + "502": { + "description": "Unexpected error when communicating with backend service", "content": { "application/json": { "schema": { - "title": "Response List Solver Releases V0 Solvers Solver Key Releases Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/Solver" - } + "$ref": "#/components/schemas/ErrorGet" } } } }, - "422": { - "description": "Validation Error", + "503": { + "description": "Service unavailable", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}": { - "get": { - "tags": [ - "solvers" - ], - "summary": "Get Solver Release", - "description": "Gets a specific release of a solver", - "operationId": "get_solver_release", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" }, - { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + "504": { + "description": "Request to a backend service timed out.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Solver" + "$ref": "#/components/schemas/ErrorGet" } } } @@ -517,41 +543,32 @@ } } } - }, - "security": [ - { - "HTTPBasic": [] - } - ] + } } }, - "/v0/solvers/{solver_key}/releases/{version}/jobs": { + "/v0/files/{file_id}": { "get": { "tags": [ - "solvers" + "files" ], - "summary": "List Jobs", - "description": "List of all jobs in a specific released solver", - "operationId": "list_jobs", - "parameters": [ + "summary": "Get File", + "description": "Gets metadata for a given file resource", + "operationId": "get_file", + "security": [ { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" - }, + "HTTPBasic": [] + } + ], + "parameters": [ { + "name": "file_id", + "in": "path", "required": true, "schema": { - "title": "Version", - "type": "string" - }, - "name": "version", - "in": "path" + "type": "string", + "format": "uuid", + "title": "File Id" + } } ], "responses": { @@ -560,77 +577,67 @@ "content": { "application/json": { "schema": { - "title": "Response List Jobs V0 Solvers Solver Key Releases Version Jobs Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/Job" - } + "$ref": "#/components/schemas/File" } } } }, - "422": { - "description": "Validation Error", + "404": { + "description": "File not found", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - }, - "post": { - "tags": [ - "solvers" - ], - "summary": "Create Job", - "description": "Creates a job in a specific release with given inputs.\n\nNOTE: This operation does **not** start the job", - "operationId": "create_job", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" }, - { - "required": true, - "schema": { - "title": "Version", - "type": "string" - }, - "name": "version", - "in": "path" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/JobInputs" + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } } } }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", + "500": { + "description": "Internal server error", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Job" + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" } } } @@ -645,61 +652,96 @@ } } } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}": { - "get": { + } + }, + "delete": { "tags": [ - "solvers" + "files" ], - "summary": "Get Job", - "description": "Gets job of a given solver", - "operationId": "get_job", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" - }, + "summary": "Delete File", + "operationId": "delete_file", + "security": [ { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" - }, + "HTTPBasic": [] + } + ], + "parameters": [ { + "name": "file_id", + "in": "path", "required": true, "schema": { - "title": "Job Id", "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" + "format": "uuid", + "title": "File Id" + } } ], "responses": { "200": { "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "404": { + "description": "File not found", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Job" + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" } } } @@ -714,62 +756,79 @@ } } } - }, + } + } + }, + "/v0/files:search": { + "get": { + "tags": [ + "files" + ], + "summary": "Search Files Page", + "description": "Search files", + "operationId": "search_files_page", "security": [ { "HTTPBasic": [] } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:start": { - "post": { - "tags": [ - "solvers" ], - "summary": "Start Job", - "description": "Starts job job_id created with the solver solver_key:version\n\nNew in *version 0.4.3*: cluster_id", - "operationId": "start_job", "parameters": [ { - "required": true, + "name": "sha256_checksum", + "in": "query", + "required": false, "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" + "anyOf": [ + { + "type": "string", + "pattern": "^[a-fA-F0-9]{64}$" + }, + { + "type": "null" + } + ], + "title": "Sha256 Checksum" + } }, { - "required": true, + "name": "file_id", + "in": "query", + "required": false, "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "File Id" + } }, { - "required": true, + "name": "limit", + "in": "query", + "required": false, "schema": { - "title": "Job Id", - "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } }, { + "name": "offset", + "in": "query", "required": false, "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "query" + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } } ], "responses": { @@ -778,143 +837,67 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/JobStatus" + "$ref": "#/components/schemas/Page_File_" } } } }, - "422": { - "description": "Validation Error", + "404": { + "description": "File not found", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:stop": { - "post": { - "tags": [ - "solvers" - ], - "summary": "Stop Job", - "operationId": "stop_job", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" }, - { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } }, - { - "required": true, - "schema": { - "title": "Job Id", - "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + "500": { + "description": "Internal server error", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Job" + "$ref": "#/components/schemas/ErrorGet" } } } }, - "422": { - "description": "Validation Error", + "502": { + "description": "Unexpected error when communicating with backend service", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/ErrorGet" } } } - } - }, - "security": [ - { - "HTTPBasic": [] - } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:inspect": { - "post": { - "tags": [ - "solvers" - ], - "summary": "Inspect Job", - "operationId": "inspect_job", - "parameters": [ - { - "required": true, - "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" }, - { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } }, - { - "required": true, - "schema": { - "title": "Job Id", - "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", + "504": { + "description": "Request to a backend service timed out.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/JobStatus" + "$ref": "#/components/schemas/ErrorGet" } } } @@ -929,60 +912,98 @@ } } } - }, + } + } + }, + "/v0/files/{file_id}:abort": { + "post": { + "tags": [ + "files" + ], + "summary": "Abort Multipart Upload", + "operationId": "abort_multipart_upload", "security": [ { "HTTPBasic": [] } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs": { - "get": { - "tags": [ - "solvers" ], - "summary": "Get Job Outputs", - "operationId": "get_job_outputs", "parameters": [ { + "name": "file_id", + "in": "path", "required": true, "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "Job Id", "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" + "format": "uuid", + "title": "File Id" + } } ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_abort_multipart_upload_v0_files__file_id__abort_post" + } + } + } + }, "responses": { "200": { "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "429": { + "description": "Too many requests", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/JobOutputs" + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" } } } @@ -997,68 +1018,218 @@ } } } - }, + } + } + }, + "/v0/files/{file_id}:complete": { + "post": { + "tags": [ + "files" + ], + "summary": "Complete Multipart Upload", + "operationId": "complete_multipart_upload", "security": [ { "HTTPBasic": [] } - ] - } - }, - "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs/logfile": { - "get": { - "tags": [ - "solvers" ], - "summary": "Get Job Output Logfile", - "description": "Special extra output with persistent logs file for the solver run.\n\nNOTE: this is not a log stream but a predefined output that is only\navailable after the job is done.\n\nNew in *version 0.4.0*", - "operationId": "get_job_output_logfile", "parameters": [ { + "name": "file_id", + "in": "path", "required": true, "schema": { - "title": "Solver Key", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", - "type": "string" - }, - "name": "solver_key", - "in": "path" + "type": "string", + "format": "uuid", + "title": "File Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_complete_multipart_upload_v0_files__file_id__complete_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/File" + } + } + } }, - { - "required": true, - "schema": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "version", - "in": "path" + "404": { + "description": "File not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/files/{file_id}/content": { + "get": { + "tags": [ + "files" + ], + "summary": "Download File", + "operationId": "download_file", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ { + "name": "file_id", + "in": "path", "required": true, "schema": { - "title": "Job Id", "type": "string", - "format": "uuid" - }, - "name": "job_id", - "in": "path" + "format": "uuid", + "title": "File Id" + } } ], "responses": { "307": { "description": "Successful Response" }, - "200": { - "description": "Returns a log file", + "404": { + "description": "File not found", "content": { - "application/octet-stream": { + "application/json": { "schema": { - "type": "string", - "format": "binary" + "$ref": "#/components/schemas/ErrorGet" } - }, - "application/zip": { + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "200": { + "content": { + "application/octet-stream": { "schema": { "type": "string", "format": "binary" @@ -1069,10 +1240,8 @@ "type": "string" } } - } - }, - "404": { - "description": "Log not found" + }, + "description": "Returns a arbitrary binary data" }, "422": { "description": "Validation Error", @@ -1084,569 +1253,9805 @@ } } } - }, + } + } + }, + "/v0/programs/{program_key}/releases/{version}": { + "get": { + "tags": [ + "programs" + ], + "summary": "Get Program Release", + "description": "Gets a specific release of a solver", + "operationId": "get_program_release", "security": [ { "HTTPBasic": [] } - ] - } - } - }, - "components": { - "schemas": { - "Body_upload_file_v0_files_content_put": { - "title": "Body_upload_file_v0_files_content_put", - "required": [ - "file" ], - "type": "object", + "parameters": [ + { + "name": "program_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Program Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Program" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/programs/{program_key}/releases/{version}/jobs": { + "post": { + "tags": [ + "programs" + ], + "summary": "Create Program Job", + "description": "Creates a program job", + "operationId": "create_program_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "program_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Program Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "x-simcore-parent-project-uuid", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Project-Uuid" + } + }, + { + "name": "x-simcore-parent-node-id", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Node-Id" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_create_program_job_v0_programs__program_key__releases__version__jobs_post" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Solvers", + "description": "\ud83d\udea8 **Deprecated**: This endpoint is deprecated and will be removed in a future release.\nPlease use `GET /v0/solvers/page` instead.\n\n\n\nLists all available solvers (latest version)\n\nNew in *version 0.5*", + "operationId": "list_solvers", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/Solver" + }, + "type": "array", + "title": "Response List Solvers V0 Solvers Get" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/releases": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Lists All Releases", + "description": "\ud83d\udea8 **Deprecated**: This endpoint is deprecated and will be removed in a future release.\nPlease use `GET /v0/solvers/{solver_key}/releases/page` instead.\n\n\n\nLists **all** released solvers (not just latest version)\n\nNew in *version 0.5*", + "operationId": "list_solvers_releases", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/Solver" + }, + "type": "array", + "title": "Response List Solvers Releases V0 Solvers Releases Get" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/latest": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Solver", + "description": "Gets latest release of a solver\n\nAdded in *version 0.7.1*: `version_display` field in the response", + "operationId": "get_solver", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Solver" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Solver Releases", + "description": "Lists all releases of a given (one) solver\n\nAdded in *version 0.7.1*: `version_display` field in the response", + "operationId": "list_solver_releases", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Solver" + }, + "title": "Response List Solver Releases V0 Solvers Solver Key Releases Get" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Solver Release", + "description": "Gets a specific release of a solver\n\nAdded in *version 0.7.1*: `version_display` field in the response", + "operationId": "get_solver_release", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Solver" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/ports": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Solver Ports", + "description": "Lists inputs and outputs of a given solver\n\nNew in *version 0.5*\n\nAdded in *version 0.7.1*: `version_display` field in the response", + "operationId": "list_solver_ports", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OnePage_SolverPort_" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/pricing_plan": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Solver Pricing Plan", + "description": "Gets solver pricing plan\n\nNew in *version 0.7*\n\nAdded in *version 0.7.1*: `version_display` field in the response", + "operationId": "get_solver_pricing_plan", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ServicePricingPlanGetLegacy" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Create Solver Job", + "description": "Creates a job in a specific release with given inputs. This operation does not start the job.\n\nNew in *version 0.5*", + "operationId": "create_solver_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "hidden", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "default": true, + "title": "Hidden" + } + }, + { + "name": "x-simcore-parent-project-uuid", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Project-Uuid" + } + }, + { + "name": "x-simcore-parent-node-id", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Node-Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobInputs" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "solvers" + ], + "summary": "List Jobs", + "description": "\ud83d\udea8 **Deprecated**: This endpoint is deprecated and will be removed in a future release.\nPlease use `GET /{solver_key}/releases/{version}/jobs/page` instead.\n\n\n\nList of jobs in a specific released solver (limited to 20 jobs)\n\nNew in *version 0.5*\n\nRemoved in *version 0.7*: This endpoint is deprecated and will be removed in a future version", + "operationId": "list_jobs", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Job" + }, + "title": "Response List Jobs V0 Solvers Solver Key Releases Version Jobs Get" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}": { + "delete": { + "tags": [ + "solvers" + ], + "summary": "Delete Job", + "description": "Deletes an existing solver job\n\nNew in *version 0.7*", + "operationId": "delete_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job", + "description": "Gets job of a given solver", + "operationId": "get_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:start": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Start Job", + "description": "Starts job job_id created with the solver solver_key:version\n\nAdded in *version 0.4.3*: query parameter `cluster_id`\n\nAdded in *version 0.6*: responds with a 202 when successfully starting a computation\n\nChanged in *version 0.7*: query parameter `cluster_id` deprecated", + "operationId": "start_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + }, + { + "name": "cluster_id", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "deprecated": true, + "title": "Cluster Id" + }, + "deprecated": true + } + ], + "responses": { + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "200": { + "description": "Job already started", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "406": { + "description": "Cluster not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Configuration error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:stop": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Stop Job", + "description": "Stops a running job\n\nNew in *version 0.5*", + "operationId": "stop_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:inspect": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Inspect Job", + "description": "Inspects the current status of a job\n\nNew in *version 0.5*", + "operationId": "inspect_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/metadata": { + "patch": { + "tags": [ + "solvers" + ], + "summary": "Replace Job Custom Metadata", + "description": "Updates custom metadata from a job\n\nNew in *version 0.7*", + "operationId": "replace_job_custom_metadata", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadataUpdate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadata" + } + } + } + }, + "404": { + "description": "Metadata not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Custom Metadata", + "description": "Gets custom metadata from a job\n\nNew in *version 0.7*", + "operationId": "get_job_custom_metadata", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadata" + } + } + } + }, + "404": { + "description": "Metadata not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/page": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Jobs Paginated", + "description": "List of jobs on a specific released solver (includes pagination)\n\nNew in *version 0.7*", + "operationId": "list_jobs_paginated", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_Job_" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Outputs", + "operationId": "get_job_outputs", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobOutputs" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs/logfile": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Output Logfile", + "description": "Special extra output with persistent logs file for the solver run.\n\n**NOTE**: this is not a log stream but a predefined output that is only\navailable after the job is done\n\nNew in *version 0.4*", + "operationId": "get_job_output_logfile", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "307": { + "description": "Successful Response" + }, + "200": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + }, + "application/zip": { + "schema": { + "type": "string", + "format": "binary" + } + }, + "text/plain": { + "schema": { + "type": "string" + } + } + }, + "description": "Returns a log file" + }, + "404": { + "description": "Log not found" + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/wallet": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Wallet", + "description": "Get job wallet\n\nNew in *version 0.7*", + "operationId": "get_job_wallet", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WalletGetWithAvailableCreditsLegacy" + } + } + } + }, + "404": { + "description": "Wallet not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "403": { + "description": "Access to wallet is not allowed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/pricing_unit": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Pricing Unit", + "description": "Get job pricing unit\n\nNew in *version 0.7*", + "operationId": "get_job_pricing_unit", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PricingUnitGetLegacy" + } + } + } + }, + "404": { + "description": "Pricing unit not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/logstream": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Log Stream", + "operationId": "get_log_stream", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "solver_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + } + }, + { + "name": "version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Returns a JobLog or an ErrorGet", + "content": { + "application/x-ndjson": { + "schema": { + "type": "string", + "anyOf": [ + { + "$ref": "#/components/schemas/JobLog" + }, + { + "$ref": "#/components/schemas/ErrorGet" + } + ], + "title": "Response 200 Get Log Stream V0 Solvers Solver Key Releases Version Jobs Job Id Logstream Get" + } + } + } + }, + "409": { + "description": "Conflict: Logs are already being streamed", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/x-ndjson": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies": { + "get": { + "tags": [ + "studies" + ], + "summary": "List Studies", + "description": "List all studies\n\nNew in *version 0.5*", + "operationId": "list_studies", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_Study_" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}": { + "get": { + "tags": [ + "studies" + ], + "summary": "Get Study", + "description": "Get study by ID\n\nNew in *version 0.5*", + "operationId": "get_study", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Study" + } + } + } + }, + "404": { + "description": "Study not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}:clone": { + "post": { + "tags": [ + "studies" + ], + "summary": "Clone Study", + "operationId": "clone_study", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "x-simcore-parent-project-uuid", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Project-Uuid" + } + }, + { + "name": "x-simcore-parent-node-id", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Node-Id" + } + } + ], + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Study" + } + } + } + }, + "404": { + "description": "Study not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/ports": { + "get": { + "tags": [ + "studies" + ], + "summary": "List Study Ports", + "description": "Lists metadata on ports of a given study\n\nNew in *version 0.5*", + "operationId": "list_study_ports", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OnePage_StudyPort_" + } + } + } + }, + "404": { + "description": "Study not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs": { + "post": { + "tags": [ + "studies" + ], + "summary": "Create Study Job", + "description": "hidden -- if True (default) hides project from UI", + "operationId": "create_study_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "hidden", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "default": true, + "title": "Hidden" + } + }, + { + "name": "x-simcore-parent-project-uuid", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Project-Uuid" + } + }, + { + "name": "x-simcore-parent-node-id", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "X-Simcore-Parent-Node-Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobInputs" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}": { + "delete": { + "tags": [ + "studies" + ], + "summary": "Delete Study Job", + "description": "Deletes an existing study job", + "operationId": "delete_study_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "404": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + }, + "description": "Not Found" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}:start": { + "post": { + "tags": [ + "studies" + ], + "summary": "Start Study Job", + "description": "Changed in *version 0.6*: Now responds with a 202 when successfully starting a computation", + "operationId": "start_study_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + }, + { + "name": "cluster_id", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "description": "Changed in *version 0.7*: query parameter `cluster_id` deprecated\n", + "deprecated": true, + "title": "Cluster Id" + }, + "description": "Changed in *version 0.7*: query parameter `cluster_id` deprecated\n", + "deprecated": true + } + ], + "responses": { + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "402": { + "description": "Payment required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "404": { + "description": "Job/wallet/pricing details not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "200": { + "description": "Job already started", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "406": { + "description": "Cluster not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Configuration error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}:stop": { + "post": { + "tags": [ + "studies" + ], + "summary": "Stop Study Job", + "operationId": "stop_study_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}:inspect": { + "post": { + "tags": [ + "studies" + ], + "summary": "Inspect Study Job", + "operationId": "inspect_study_job", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}/outputs": { + "post": { + "tags": [ + "studies" + ], + "summary": "Get Study Job Outputs", + "operationId": "get_study_job_outputs", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobOutputs" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}/outputs/log-links": { + "get": { + "tags": [ + "studies" + ], + "summary": "Get download links for study job log files", + "operationId": "get_study_job_output_logfile", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobLogsMap" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/studies/{study_id}/jobs/{job_id}/metadata": { + "get": { + "tags": [ + "studies" + ], + "summary": "Get Study Job Custom Metadata", + "description": "Get custom metadata from a study's job\n\nNew in *version 0.7*", + "operationId": "get_study_job_custom_metadata", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadata" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "tags": [ + "studies" + ], + "summary": "Replace Study Job Custom Metadata", + "description": "Changes custom metadata of a study's job\n\nNew in *version 0.7*", + "operationId": "replace_study_job_custom_metadata", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "study_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Study Id" + } + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Job Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadataUpdate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobMetadata" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_jobs": { + "get": { + "tags": [ + "function_jobs" + ], + "summary": "List Function Jobs", + "description": "List function jobs\n\nNew in *version 0.8.0*", + "operationId": "list_function_jobs", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_Annotated_Union_RegisteredProjectFunctionJob__RegisteredPythonCodeFunctionJob__RegisteredSolverFunctionJob___FieldInfo_annotation_NoneType__required_True__discriminator__function_class____" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "post": { + "tags": [ + "function_jobs" + ], + "summary": "Register Function Job", + "description": "Create function job\n\nNew in *version 0.8.0*", + "operationId": "register_function_job", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/ProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/PythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/SolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/ProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/PythonCodeFunctionJob", + "SOLVER": "#/components/schemas/SolverFunctionJob" + } + }, + "title": "Function Job" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunctionJob", + "SOLVER": "#/components/schemas/RegisteredSolverFunctionJob" + } + }, + "title": "Response Register Function Job V0 Function Jobs Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_jobs/{function_job_id}": { + "get": { + "tags": [ + "function_jobs" + ], + "summary": "Get Function Job", + "description": "Get function job\n\nNew in *version 0.8.0*", + "operationId": "get_function_job", + "parameters": [ + { + "name": "function_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunctionJob", + "SOLVER": "#/components/schemas/RegisteredSolverFunctionJob" + } + }, + "title": "Response Get Function Job V0 Function Jobs Function Job Id Get" + } + } + } + }, + "404": { + "description": "Function job not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "function_jobs" + ], + "summary": "Delete Function Job", + "description": "Delete function job\n\nNew in *version 0.8.0*", + "operationId": "delete_function_job", + "parameters": [ + { + "name": "function_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "404": { + "description": "Function job not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_jobs/{function_job_id}/status": { + "get": { + "tags": [ + "function_jobs" + ], + "summary": "Function Job Status", + "description": "Get function job status\n\nNew in *version 0.8.0*", + "operationId": "function_job_status", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "function_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FunctionJobStatus" + } + } + } + }, + "404": { + "description": "Function job not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_jobs/{function_job_id}/outputs": { + "get": { + "tags": [ + "function_jobs" + ], + "summary": "Function Job Outputs", + "description": "Get function job outputs\n\nNew in *version 0.8.0*", + "operationId": "function_job_outputs", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "function_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Response Function Job Outputs V0 Function Jobs Function Job Id Outputs Get" + } + } + } + }, + "404": { + "description": "Function job not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_job_collections": { + "get": { + "tags": [ + "function_job_collections" + ], + "summary": "List Function Job Collections", + "description": "List function job collections\n\nNew in *version 0.8.0*", + "operationId": "list_function_job_collections", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + }, + { + "name": "has_function_id", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string", + "pattern": "[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}" + }, + { + "type": "null" + } + ], + "title": "Has Function Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_RegisteredFunctionJobCollection_" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "post": { + "tags": [ + "function_job_collections" + ], + "summary": "Register Function Job Collection", + "description": "Register function job collection\n\nNew in *version 0.8.0*", + "operationId": "register_function_job_collection", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FunctionJobCollection" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisteredFunctionJobCollection" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_job_collections/{function_job_collection_id}": { + "get": { + "tags": [ + "function_job_collections" + ], + "summary": "Get Function Job Collection", + "description": "Get function job collection\n\nNew in *version 0.8.0*", + "operationId": "get_function_job_collection", + "parameters": [ + { + "name": "function_job_collection_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Collection Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisteredFunctionJobCollection" + } + } + } + }, + "404": { + "description": "Function job collection not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "function_job_collections" + ], + "summary": "Delete Function Job Collection", + "description": "Delete function job collection\n\nNew in *version 0.8.0*", + "operationId": "delete_function_job_collection", + "parameters": [ + { + "name": "function_job_collection_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Collection Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "404": { + "description": "Function job collection not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_job_collections/{function_job_collection_id}/function_jobs": { + "get": { + "tags": [ + "function_job_collections" + ], + "summary": "Function Job Collection List Function Jobs", + "description": "Get the function jobs in function job collection\n\nNew in *version 0.8.0*", + "operationId": "function_job_collection_list_function_jobs", + "parameters": [ + { + "name": "function_job_collection_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Collection Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunctionJob", + "SOLVER": "#/components/schemas/RegisteredSolverFunctionJob" + } + } + }, + "title": "Response Function Job Collection List Function Jobs V0 Function Job Collections Function Job Collection Id Function Jobs Get" + } + } + } + }, + "404": { + "description": "Function job collection not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/function_job_collections/{function_job_collection_id}/status": { + "get": { + "tags": [ + "function_job_collections" + ], + "summary": "Function Job Collection Status", + "description": "Get function job collection status\n\nNew in *version 0.8.0*", + "operationId": "function_job_collection_status", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "function_job_collection_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Job Collection Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FunctionJobCollectionStatus" + } + } + } + }, + "404": { + "description": "Function job collection not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/wallets/default": { + "get": { + "tags": [ + "wallets" + ], + "summary": "Get Default Wallet", + "description": "Get default wallet\n\nNew in *version 0.7*", + "operationId": "get_default_wallet", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WalletGetWithAvailableCreditsLegacy" + } + } + } + }, + "404": { + "description": "Wallet not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "403": { + "description": "Access to wallet is not allowed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/wallets/{wallet_id}": { + "get": { + "tags": [ + "wallets" + ], + "summary": "Get Wallet", + "description": "Get wallet\n\nNew in *version 0.7*", + "operationId": "get_wallet", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "wallet_id", + "in": "path", + "required": true, + "schema": { + "type": "integer", + "title": "Wallet Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WalletGetWithAvailableCreditsLegacy" + } + } + } + }, + "404": { + "description": "Wallet not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "403": { + "description": "Access to wallet is not allowed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/wallets/{wallet_id}/licensed-items": { + "get": { + "tags": [ + "wallets" + ], + "summary": "Get Available Licensed Items For Wallet", + "description": "Get all available licensed items for a given wallet\n\nNew in *version 0.6*", + "operationId": "get_available_licensed_items_for_wallet", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "wallet_id", + "in": "path", + "required": true, + "schema": { + "type": "integer", + "title": "Wallet Id" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_LicensedItemGet_" + } + } + } + }, + "404": { + "description": "Wallet not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "403": { + "description": "Access to wallet is not allowed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/wallets/{wallet_id}/licensed-items/{licensed_item_id}/checkout": { + "post": { + "tags": [ + "wallets" + ], + "summary": "Checkout Licensed Item", + "description": "Checkout licensed item", + "operationId": "checkout_licensed_item", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "wallet_id", + "in": "path", + "required": true, + "schema": { + "type": "integer", + "title": "Wallet Id" + } + }, + { + "name": "licensed_item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicensedItemCheckoutData" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicensedItemCheckoutGet" + } + } + } + }, + "404": { + "description": "Wallet not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "403": { + "description": "Access to wallet is not allowed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/credits/price": { + "get": { + "tags": [ + "credits" + ], + "summary": "Get Credits Price", + "description": "New in *version 0.6*", + "operationId": "get_credits_price", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetCreditPriceLegacy" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/licensed-items": { + "get": { + "tags": [ + "licensed-items" + ], + "summary": "Get Licensed Items", + "description": "Get all licensed items", + "operationId": "get_licensed_items", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_LicensedItemGet_" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/licensed-items/{licensed_item_id}/checked-out-items/{licensed_item_checkout_id}/release": { + "post": { + "tags": [ + "licensed-items" + ], + "summary": "Release Licensed Item", + "description": "Release previously checked out licensed item", + "operationId": "release_licensed_item", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "licensed_item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Id" + } + }, + { + "name": "licensed_item_checkout_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Checkout Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LicensedItemCheckoutGet" + } + } + } + }, + "429": { + "description": "Too many requests", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "500": { + "description": "Internal server error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "502": { + "description": "Unexpected error when communicating with backend service", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "503": { + "description": "Service unavailable", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "504": { + "description": "Request to a backend service timed out.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions": { + "post": { + "tags": [ + "functions" + ], + "summary": "Register Function", + "description": "Create function\n\nNew in *version 0.8.0*", + "operationId": "register_function", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/ProjectFunction" + }, + { + "$ref": "#/components/schemas/PythonCodeFunction" + }, + { + "$ref": "#/components/schemas/SolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/ProjectFunction", + "PYTHON_CODE": "#/components/schemas/PythonCodeFunction", + "SOLVER": "#/components/schemas/SolverFunction" + } + }, + "title": "Function" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunction" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunction" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunction", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunction", + "SOLVER": "#/components/schemas/RegisteredSolverFunction" + } + }, + "title": "Response Register Function V0 Functions Post" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "functions" + ], + "summary": "List Functions", + "description": "List functions\n\nNew in *version 0.8.0*", + "operationId": "list_functions", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_Annotated_Union_RegisteredProjectFunction__RegisteredPythonCodeFunction__RegisteredSolverFunction___FieldInfo_annotation_NoneType__required_True__discriminator__function_class____" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}": { + "get": { + "tags": [ + "functions" + ], + "summary": "Get Function", + "description": "Get function\n\nNew in *version 0.8.0*", + "operationId": "get_function", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunction" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunction" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunction", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunction", + "SOLVER": "#/components/schemas/RegisteredSolverFunction" + } + }, + "title": "Response Get Function V0 Functions Function Id Get" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "functions" + ], + "summary": "Delete Function", + "description": "Delete function\n\nNew in *version 0.8.0*", + "operationId": "delete_function", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}/jobs": { + "get": { + "tags": [ + "functions" + ], + "summary": "List Function Jobs For Functionid", + "description": "List function jobs for a function\n\nNew in *version 0.8.0*", + "operationId": "list_function_jobs_for_functionid", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "maximum": 50, + "minimum": 1, + "default": 20, + "title": "Limit" + } + }, + { + "name": "offset", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "minimum": 0, + "default": 0, + "title": "Offset" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Page_Annotated_Union_RegisteredProjectFunctionJob__RegisteredPythonCodeFunctionJob__RegisteredSolverFunctionJob___FieldInfo_annotation_NoneType__required_True__discriminator__function_class____" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}/title": { + "patch": { + "tags": [ + "functions" + ], + "summary": "Update Function Title", + "description": "Update function\n\nNew in *version 0.8.0*", + "operationId": "update_function_title", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + }, + { + "name": "title", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Title" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunction" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunction" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunction", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunction", + "SOLVER": "#/components/schemas/RegisteredSolverFunction" + } + }, + "title": "Response Update Function Title V0 Functions Function Id Title Patch" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}/description": { + "patch": { + "tags": [ + "functions" + ], + "summary": "Update Function Description", + "description": "Update function\n\nNew in *version 0.8.0*", + "operationId": "update_function_description", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + }, + { + "name": "description", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Description" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunction" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunction" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunction", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunction", + "SOLVER": "#/components/schemas/RegisteredSolverFunction" + } + }, + "title": "Response Update Function Description V0 Functions Function Id Description Patch" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}/input_schema": { + "get": { + "tags": [ + "functions" + ], + "summary": "Get Function Inputschema", + "description": "Get function input schema\n\nNew in *version 0.8.0*", + "operationId": "get_function_inputschema", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + }, + "title": "Response Get Function Inputschema V0 Functions Function Id Input Schema Get" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}/output_schema": { + "get": { + "tags": [ + "functions" + ], + "summary": "Get Function Outputschema", + "description": "Get function output schema\n\nNew in *version 0.8.0*", + "operationId": "get_function_outputschema", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + }, + "title": "Response Get Function Outputschema V0 Functions Function Id Output Schema Get" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}:validate_inputs": { + "post": { + "tags": [ + "functions" + ], + "summary": "Validate Function Inputs", + "description": "Validate inputs against the function's input schema\n\nNew in *version 0.8.0*", + "operationId": "validate_function_inputs", + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "prefixItems": [ + { + "type": "boolean" + }, + { + "type": "string" + } + ], + "minItems": 2, + "maxItems": 2, + "title": "Response Validate Function Inputs V0 Functions Function Id Validate Inputs Post" + } + } + } + }, + "400": { + "description": "Invalid inputs" + }, + "404": { + "description": "Function not found" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}:run": { + "post": { + "tags": [ + "functions" + ], + "summary": "Run Function", + "description": "Run function\n\nNew in *version 0.8.0*", + "operationId": "run_function", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Function Inputs" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunctionJob", + "SOLVER": "#/components/schemas/RegisteredSolverFunctionJob" + } + }, + "title": "Response Run Function V0 Functions Function Id Run Post" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v0/functions/{function_id}:map": { + "post": { + "tags": [ + "functions" + ], + "summary": "Map Function", + "description": "Map function over input parameters\n\nNew in *version 0.8.0*", + "operationId": "map_function", + "security": [ + { + "HTTPBasic": [] + } + ], + "parameters": [ + { + "name": "function_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Function Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ] + }, + "title": "Function Inputs List" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisteredFunctionJobCollection" + } + } + } + }, + "404": { + "description": "Function not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "Body_abort_multipart_upload_v0_files__file_id__abort_post": { + "properties": { + "client_file": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserFileToProgramJob" + }, + { + "$ref": "#/components/schemas/UserFile" + } + ], + "title": "Client File" + } + }, + "type": "object", + "required": [ + "client_file" + ], + "title": "Body_abort_multipart_upload_v0_files__file_id__abort_post" + }, + "Body_complete_multipart_upload_v0_files__file_id__complete_post": { + "properties": { + "client_file": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserFileToProgramJob" + }, + { + "$ref": "#/components/schemas/UserFile" + } + ], + "title": "Client File" + }, + "uploaded_parts": { + "$ref": "#/components/schemas/FileUploadCompletionBody" + } + }, + "type": "object", + "required": [ + "client_file", + "uploaded_parts" + ], + "title": "Body_complete_multipart_upload_v0_files__file_id__complete_post" + }, + "Body_create_program_job_v0_programs__program_key__releases__version__jobs_post": { + "properties": { + "name": { + "anyOf": [ + { + "type": "string", + "maxLength": 500 + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string", + "maxLength": 500 + }, + { + "type": "null" + } + ], + "title": "Description" + } + }, + "type": "object", + "title": "Body_create_program_job_v0_programs__program_key__releases__version__jobs_post" + }, + "Body_upload_file_v0_files_content_put": { + "properties": { + "file": { + "type": "string", + "format": "binary", + "title": "File" + } + }, + "type": "object", + "required": [ + "file" + ], + "title": "Body_upload_file_v0_files_content_put" + }, + "ClientFileUploadData": { + "properties": { + "file_id": { + "type": "string", + "format": "uuid", + "title": "File Id", + "description": "The file resource id" + }, + "upload_schema": { + "$ref": "#/components/schemas/FileUploadData", + "description": "Schema for uploading file" + } + }, + "type": "object", + "required": [ + "file_id", + "upload_schema" + ], + "title": "ClientFileUploadData" + }, + "ErrorGet": { + "properties": { + "errors": { + "items": {}, + "type": "array", + "title": "Errors" + } + }, + "type": "object", + "required": [ + "errors" + ], + "title": "ErrorGet", + "example": { + "errors": [ + "some error message", + "another error message" + ] + } + }, + "File": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id", + "description": "Resource identifier" + }, + "filename": { + "type": "string", + "title": "Filename", + "description": "Name of the file with extension" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "Guess of type content [EXPERIMENTAL]" + }, + "checksum": { + "anyOf": [ + { + "type": "string", + "pattern": "^[a-fA-F0-9]{64}$" + }, + { + "type": "null" + } + ], + "title": "Checksum", + "description": "SHA256 hash of the file's content" + }, + "e_tag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "E Tag", + "description": "S3 entity tag" + } + }, + "type": "object", + "required": [ + "id", + "filename" + ], + "title": "File", + "description": "Represents a file stored on the server side i.e. a unique reference to a file in the cloud." + }, + "FileUploadCompletionBody": { + "properties": { + "parts": { + "items": { + "$ref": "#/components/schemas/UploadedPart" + }, + "type": "array", + "title": "Parts" + } + }, + "type": "object", + "required": [ + "parts" + ], + "title": "FileUploadCompletionBody" + }, + "FileUploadData": { + "properties": { + "chunk_size": { + "type": "integer", + "minimum": 0, + "title": "Chunk Size" + }, + "urls": { + "items": { + "type": "string", + "minLength": 1, + "format": "uri" + }, + "type": "array", + "title": "Urls" + }, + "links": { + "$ref": "#/components/schemas/UploadLinks" + } + }, + "type": "object", + "required": [ + "chunk_size", + "urls", + "links" + ], + "title": "FileUploadData" + }, + "FunctionJobCollection": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "job_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Job Ids", + "default": [] + } + }, + "type": "object", + "title": "FunctionJobCollection", + "description": "Model for a collection of function jobs" + }, + "FunctionJobCollectionStatus": { + "properties": { + "status": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Status" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "FunctionJobCollectionStatus" + }, + "FunctionJobStatus": { + "properties": { + "status": { + "type": "string", + "title": "Status" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "FunctionJobStatus" + }, + "GetCreditPriceLegacy": { + "properties": { + "productName": { + "type": "string", + "title": "Productname" + }, + "usdPerCredit": { + "anyOf": [ + { + "type": "number", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Usdpercredit", + "description": "Price of a credit in USD. If None, then this product's price is UNDEFINED" + }, + "minPaymentAmountUsd": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Minpaymentamountusd", + "description": "Minimum amount (included) in USD that can be paid for this productCan be None if this product's price is UNDEFINED" + } + }, + "type": "object", + "required": [ + "productName", + "usdPerCredit", + "minPaymentAmountUsd" + ], + "title": "GetCreditPriceLegacy" + }, + "Groups": { + "properties": { + "me": { + "$ref": "#/components/schemas/UsersGroup" + }, + "organizations": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/UsersGroup" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Organizations", + "default": [] + }, + "all": { + "$ref": "#/components/schemas/UsersGroup" + } + }, + "type": "object", + "required": [ + "me", + "all" + ], + "title": "Groups" + }, + "HTTPValidationError": { + "properties": { + "errors": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Validation errors" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "JSONFunctionInputSchema": { + "properties": { + "schema_content": { + "type": "object", + "title": "JSON Schema", + "description": "JSON Schema", + "default": {} + }, + "schema_class": { + "type": "string", + "const": "application/schema+json", + "title": "Schema Class", + "default": "application/schema+json" + } + }, + "type": "object", + "title": "JSONFunctionInputSchema" + }, + "JSONFunctionOutputSchema": { + "properties": { + "schema_content": { + "type": "object", + "title": "JSON Schema", + "description": "JSON Schema", + "default": {} + }, + "schema_class": { + "type": "string", + "const": "application/schema+json", + "title": "Schema Class", + "default": "application/schema+json" + } + }, + "type": "object", + "title": "JSONFunctionOutputSchema" + }, + "Job": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "name": { + "type": "string", + "pattern": "^([^\\s/]+/?){1,10}$", + "title": "Name" + }, + "inputs_checksum": { + "type": "string", + "title": "Inputs Checksum", + "description": "Input's checksum" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At", + "description": "Job creation timestamp" + }, + "runner_name": { + "type": "string", + "pattern": "^([^\\s/]+/?){1,10}$", + "title": "Runner Name", + "description": "Runner that executes job" + }, + "url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Url", + "description": "Link to get this resource (self)" + }, + "runner_url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Runner Url", + "description": "Link to the solver's job (parent collection)" + }, + "outputs_url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Outputs Url", + "description": "Link to the job outputs (sub-collection)" + } + }, + "type": "object", + "required": [ + "id", + "name", + "inputs_checksum", + "created_at", + "runner_name", + "url", + "runner_url", + "outputs_url" + ], + "title": "Job", + "example": { + "created_at": "2021-01-22T23:59:52.322176", + "id": "f622946d-fd29-35b9-a193-abdd1095167c", + "inputs_checksum": "12345", + "name": "solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", + "outputs_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs", + "runner_name": "solvers/isolve/releases/1.3.4", + "runner_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4", + "url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c" + } + }, + "JobInputs": { + "properties": { + "values": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": {}, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "type": "object", + "title": "Values" + } + }, + "type": "object", + "required": [ + "values" + ], + "title": "JobInputs", + "example": { + "values": { + "enabled": true, + "input_file": { + "filename": "input.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f" + }, + "n": 55, + "title": "Temperature", + "x": 4.33 + } + } + }, + "JobLog": { + "properties": { + "job_id": { + "type": "string", + "format": "uuid", + "title": "Job Id" + }, + "node_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Node Id" + }, + "log_level": { + "type": "integer", + "title": "Log Level" + }, + "messages": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Messages" + } + }, + "type": "object", + "required": [ + "job_id", + "log_level", + "messages" + ], + "title": "JobLog", + "example": { + "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", + "log_level": 10, + "messages": [ + "PROGRESS: 5/10" + ], + "node_id": "3742215e-6756-48d2-8b73-4d043065309f" + } + }, + "JobLogsMap": { + "properties": { + "log_links": { + "items": { + "$ref": "#/components/schemas/LogLink" + }, + "type": "array", + "title": "Log Links", + "description": "Array of download links" + } + }, + "type": "object", + "required": [ + "log_links" + ], + "title": "JobLogsMap" + }, + "JobMetadata": { + "properties": { + "job_id": { + "type": "string", + "format": "uuid", + "title": "Job Id", + "description": "Parent Job" + }, + "metadata": { + "additionalProperties": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ] + }, + "type": "object", + "title": "Metadata", + "description": "Custom key-value map" + }, + "url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Url", + "description": "Link to get this resource (self)" + } + }, + "type": "object", + "required": [ + "job_id", + "metadata", + "url" + ], + "title": "JobMetadata", + "example": { + "job_id": "3497e4de-0e69-41fb-b08f-7f3875a1ac4b", + "metadata": { + "bool": "true", + "float": "3.14", + "int": "42", + "str": "hej med dig" + }, + "url": "https://f02b2452-1dd8-4882-b673-af06373b41b3.fake" + } + }, + "JobMetadataUpdate": { + "properties": { + "metadata": { + "additionalProperties": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ] + }, + "type": "object", + "title": "Metadata", + "description": "Custom key-value map" + } + }, + "type": "object", + "title": "JobMetadataUpdate", + "example": { + "metadata": { + "bool": "true", + "float": "3.14", + "int": "42", + "str": "hej med dig" + } + } + }, + "JobOutputs": { + "properties": { + "job_id": { + "type": "string", + "format": "uuid", + "title": "Job Id", + "description": "Job that produced this output" + }, + "results": { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "items": {}, + "type": "array" + }, + { + "type": "null" + } + ] + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "job_id", + "results" + ], + "title": "JobOutputs", + "example": { + "job_id": "99d9ac65-9f10-4e2f-a433-b5e412bb037b", + "results": { + "enabled": false, + "maxSAR": 4.33, + "n": 55, + "output_file": { + "filename": "sar_matrix.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f" + }, + "title": "Specific Absorption Rate" + } + } + }, + "JobStatus": { + "properties": { + "job_id": { + "type": "string", + "format": "uuid", + "title": "Job Id" + }, + "state": { + "$ref": "#/components/schemas/RunningState" + }, + "progress": { + "type": "integer", + "maximum": 100, + "minimum": 0, + "title": "Progress", + "default": 0 + }, + "submitted_at": { + "type": "string", + "format": "date-time", + "title": "Submitted At", + "description": "Last modification timestamp of the solver job" + }, + "started_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Started At", + "description": "Timestamp that indicate the moment the solver starts execution or None if the event did not occur" + }, + "stopped_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Stopped At", + "description": "Timestamp at which the solver finished or killed execution or None if the event did not occur" + } + }, + "type": "object", + "required": [ + "job_id", + "state", + "submitted_at" + ], + "title": "JobStatus", + "example": { + "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", + "progress": 3, + "started_at": "2021-04-01 07:16:43.670610", + "state": "STARTED", + "submitted_at": "2021-04-01 07:15:54.631007" + } + }, + "LicensedItemCheckoutData": { + "properties": { + "number_of_seats": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Number Of Seats", + "minimum": 0 + }, + "service_run_id": { + "type": "string", + "title": "Service Run Id" + } + }, + "type": "object", + "required": [ + "number_of_seats", + "service_run_id" + ], + "title": "LicensedItemCheckoutData" + }, + "LicensedItemCheckoutGet": { + "properties": { + "licensed_item_checkout_id": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Checkout Id" + }, + "licensed_item_id": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Id" + }, + "key": { + "type": "string", + "title": "Key" + }, + "version": { + "type": "string", + "pattern": "^\\d+\\.\\d+\\.\\d+$", + "title": "Version" + }, + "wallet_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 + }, + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + }, + "product_name": { + "type": "string", + "title": "Product Name" + }, + "started_at": { + "type": "string", + "format": "date-time", + "title": "Started At" + }, + "stopped_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Stopped At" + }, + "num_of_seats": { + "type": "integer", + "title": "Num Of Seats" + } + }, + "type": "object", + "required": [ + "licensed_item_checkout_id", + "licensed_item_id", + "key", + "version", + "wallet_id", + "user_id", + "product_name", + "started_at", + "stopped_at", + "num_of_seats" + ], + "title": "LicensedItemCheckoutGet" + }, + "LicensedItemGet": { + "properties": { + "licensed_item_id": { + "type": "string", + "format": "uuid", + "title": "Licensed Item Id" + }, + "key": { + "type": "string", + "title": "Key" + }, + "version": { + "type": "string", + "pattern": "^\\d+\\.\\d+\\.\\d+$", + "title": "Version" + }, + "display_name": { + "type": "string", + "title": "Display Name" + }, + "licensed_resource_type": { + "$ref": "#/components/schemas/LicensedResourceType" + }, + "licensed_resources": { + "items": { + "$ref": "#/components/schemas/LicensedResource" + }, + "type": "array", + "title": "Licensed Resources" + }, + "pricing_plan_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Plan Id", + "minimum": 0 + }, + "is_hidden_on_market": { + "type": "boolean", + "title": "Is Hidden On Market" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "modified_at": { + "type": "string", + "format": "date-time", + "title": "Modified At" + } + }, + "type": "object", + "required": [ + "licensed_item_id", + "key", + "version", + "display_name", + "licensed_resource_type", + "licensed_resources", + "pricing_plan_id", + "is_hidden_on_market", + "created_at", + "modified_at" + ], + "title": "LicensedItemGet" + }, + "LicensedResource": { + "properties": { + "source": { + "$ref": "#/components/schemas/LicensedResourceSource" + }, + "category_id": { + "type": "string", + "maxLength": 100, + "minLength": 1, + "title": "Category Id" + }, + "category_display": { + "type": "string", + "title": "Category Display" + }, + "terms_of_use_url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Terms Of Use Url" + } + }, + "type": "object", + "required": [ + "source", + "category_id", + "category_display", + "terms_of_use_url" + ], + "title": "LicensedResource" + }, + "LicensedResourceSource": { + "properties": { + "id": { + "type": "integer", + "title": "Id" + }, + "description": { + "type": "string", + "title": "Description" + }, + "thumbnail": { + "type": "string", + "title": "Thumbnail" + }, + "features": { + "$ref": "#/components/schemas/LicensedResourceSourceFeaturesDict" + }, + "doi": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Doi" + }, + "license_key": { + "type": "string", + "title": "License Key" + }, + "license_version": { + "type": "string", + "title": "License Version" + }, + "protection": { + "type": "string", + "enum": [ + "Code", + "PayPal" + ], + "title": "Protection" + }, + "available_from_url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Available From Url" + } + }, + "type": "object", + "required": [ + "id", + "description", + "thumbnail", + "features", + "doi", + "license_key", + "license_version", + "protection", + "available_from_url" + ], + "title": "LicensedResourceSource" + }, + "LicensedResourceSourceFeaturesDict": { + "properties": { + "age": { + "type": "string", + "title": "Age" + }, + "date": { + "type": "string", + "format": "date", + "title": "Date" + }, + "ethnicity": { + "type": "string", + "title": "Ethnicity" + }, + "functionality": { + "type": "string", + "title": "Functionality" + }, + "height": { + "type": "string", + "title": "Height" + }, + "name": { + "type": "string", + "title": "Name" + }, + "sex": { + "type": "string", + "title": "Sex" + }, + "species": { + "type": "string", + "title": "Species" + }, + "version": { + "type": "string", + "title": "Version" + }, + "weight": { + "type": "string", + "title": "Weight" + } + }, + "type": "object", + "required": [ + "date" + ], + "title": "LicensedResourceSourceFeaturesDict" + }, + "LicensedResourceType": { + "type": "string", + "enum": [ + "VIP_MODEL" + ], + "title": "LicensedResourceType" + }, + "Links": { + "properties": { + "first": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First" + }, + "last": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last" + }, + "self": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Self" + }, + "next": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Next" + }, + "prev": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prev" + } + }, + "type": "object", + "required": [ + "first", + "last", + "self", + "next", + "prev" + ], + "title": "Links" + }, + "LogLink": { + "properties": { + "node_name": { + "type": "string", + "title": "Node Name" + }, + "download_link": { + "type": "string", + "minLength": 1, + "format": "uri", + "title": "Download Link" + } + }, + "type": "object", + "required": [ + "node_name", + "download_link" + ], + "title": "LogLink" + }, + "Meta": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + }, + "released": { + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Released", + "description": "Maps every route's path tag with a released version" + }, + "docs_url": { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri", + "title": "Docs Url" + }, + "docs_dev_url": { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri", + "title": "Docs Dev Url" + } + }, + "type": "object", + "required": [ + "name", + "version", + "docs_url", + "docs_dev_url" + ], + "title": "Meta", + "example": { + "docs_dev_url": "https://api.osparc.io/dev/doc", + "docs_url": "https://api.osparc.io/dev/doc", + "name": "simcore_service_foo", + "released": { + "v1": "1.3.4", + "v2": "2.4.45" + }, + "version": "2.4.45" + } + }, + "OnePage_SolverPort_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/SolverPort" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + } + }, + "type": "object", + "required": [ + "items" + ], + "title": "OnePage[SolverPort]" + }, + "OnePage_StudyPort_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/StudyPort" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + } + }, + "type": "object", + "required": [ + "items" + ], + "title": "OnePage[StudyPort]" + }, + "Page_Annotated_Union_RegisteredProjectFunctionJob__RegisteredPythonCodeFunctionJob__RegisteredSolverFunctionJob___FieldInfo_annotation_NoneType__required_True__discriminator__function_class____": { + "properties": { + "items": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunctionJob" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunctionJob" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunctionJob", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunctionJob", + "SOLVER": "#/components/schemas/RegisteredSolverFunctionJob" + } + } + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[Annotated[Union[RegisteredProjectFunctionJob, RegisteredPythonCodeFunctionJob, RegisteredSolverFunctionJob], FieldInfo(annotation=NoneType, required=True, discriminator='function_class')]]" + }, + "Page_Annotated_Union_RegisteredProjectFunction__RegisteredPythonCodeFunction__RegisteredSolverFunction___FieldInfo_annotation_NoneType__required_True__discriminator__function_class____": { + "properties": { + "items": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/RegisteredProjectFunction" + }, + { + "$ref": "#/components/schemas/RegisteredPythonCodeFunction" + }, + { + "$ref": "#/components/schemas/RegisteredSolverFunction" + } + ], + "discriminator": { + "propertyName": "function_class", + "mapping": { + "PROJECT": "#/components/schemas/RegisteredProjectFunction", + "PYTHON_CODE": "#/components/schemas/RegisteredPythonCodeFunction", + "SOLVER": "#/components/schemas/RegisteredSolverFunction" + } + } + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[Annotated[Union[RegisteredProjectFunction, RegisteredPythonCodeFunction, RegisteredSolverFunction], FieldInfo(annotation=NoneType, required=True, discriminator='function_class')]]" + }, + "Page_File_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/File" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[File]" + }, + "Page_Job_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/Job" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[Job]" + }, + "Page_LicensedItemGet_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/LicensedItemGet" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[LicensedItemGet]" + }, + "Page_RegisteredFunctionJobCollection_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/RegisteredFunctionJobCollection" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[RegisteredFunctionJobCollection]" + }, + "Page_Study_": { + "properties": { + "items": { + "items": { + "$ref": "#/components/schemas/Study" + }, + "type": "array", + "title": "Items" + }, + "total": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Total" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 1 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "offset": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Offset" + }, + "links": { + "$ref": "#/components/schemas/Links" + } + }, + "type": "object", + "required": [ + "items", + "total", + "limit", + "offset", + "links" + ], + "title": "Page[Study]" + }, + "PricingPlanClassification": { + "type": "string", + "enum": [ + "TIER", + "LICENSE" + ], + "title": "PricingPlanClassification" + }, + "PricingUnitGetLegacy": { + "properties": { + "pricingUnitId": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricingunitid", + "minimum": 0 + }, + "unitName": { + "type": "string", + "title": "Unitname" + }, + "unitExtraInfo": { + "$ref": "#/components/schemas/UnitExtraInfoTier" + }, + "currentCostPerUnit": { + "type": "number", + "minimum": 0.0, + "title": "Currentcostperunit" + }, + "default": { + "type": "boolean", + "title": "Default" + } + }, + "type": "object", + "required": [ + "pricingUnitId", + "unitName", + "unitExtraInfo", + "currentCostPerUnit", + "default" + ], + "title": "PricingUnitGetLegacy" + }, + "Profile": { + "properties": { + "first_name": { + "anyOf": [ + { + "type": "string", + "maxLength": 255 + }, + { + "type": "null" + } + ], + "title": "First Name" + }, + "last_name": { + "anyOf": [ + { + "type": "string", + "maxLength": 255 + }, + { + "type": "null" + } + ], + "title": "Last Name" + }, + "id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Id", + "minimum": 0 + }, + "login": { + "type": "string", + "format": "email", + "title": "Login" + }, + "role": { + "$ref": "#/components/schemas/UserRoleEnum" + }, + "groups": { + "anyOf": [ + { + "$ref": "#/components/schemas/Groups" + }, + { + "type": "null" + } + ] + }, + "gravatar_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 40 + }, + { + "type": "null" + } + ], + "title": "Gravatar Id", + "description": "md5 hash value of email to retrieve an avatar image from https://www.gravatar.com" + } + }, + "type": "object", + "required": [ + "id", + "login", + "role" + ], + "title": "Profile", + "example": { + "first_name": "James", + "gravatar_id": "9a8930a5b20d7048e37740bac5c1ca4f", + "groups": { + "all": { + "description": "all users", + "gid": "1", + "label": "Everyone" + }, + "me": { + "description": "primary group", + "gid": "123", + "label": "maxy" + }, + "organizations": [] + }, + "id": "20", + "last_name": "Maxwell", + "login": "james-maxwell@itis.swiss", + "role": "USER" + } + }, + "ProfileUpdate": { + "properties": { + "first_name": { + "anyOf": [ + { + "type": "string", + "maxLength": 255 + }, + { + "type": "null" + } + ], + "title": "First Name" + }, + "last_name": { + "anyOf": [ + { + "type": "string", + "maxLength": 255 + }, + { + "type": "null" + } + ], + "title": "Last Name" + } + }, + "type": "object", + "title": "ProfileUpdate" + }, + "Program": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "Resource identifier" + }, + "version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version", + "description": "Semantic version number of the resource" + }, + "title": { + "type": "string", + "maxLength": 100, + "title": "Title", + "description": "Human readable name" + }, + "description": { + "anyOf": [ + { + "type": "string", + "maxLength": 1000 + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Description of the resource" + }, + "url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Url", + "description": "Link to get this resource" + }, + "version_display": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version Display" + } + }, + "type": "object", + "required": [ + "id", + "version", + "title", + "url", + "version_display" + ], + "title": "Program", + "description": "A released program with a specific version", + "example": { + "description": "Simulation framework", + "id": "simcore/services/dynamic/sim4life", + "maintainer": "info@itis.swiss", + "title": "Sim4life", + "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fdynamic%2Fsim4life/releases/8.0.0", + "version": "8.0.0", + "version_display": "8.0.0" + } + }, + "ProjectFunction": { + "properties": { + "function_class": { + "type": "string", + "const": "PROJECT", + "title": "Function Class", + "default": "PROJECT" + }, + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + } + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } + } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "project_id": { + "type": "string", + "format": "uuid", + "title": "Project Id" + } + }, + "type": "object", + "required": [ + "input_schema", + "output_schema", + "default_inputs", + "project_id" + ], + "title": "ProjectFunction" + }, + "ProjectFunctionJob": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" + }, + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "PROJECT", + "title": "Function Class", + "default": "PROJECT" + }, + "project_job_id": { + "type": "string", + "format": "uuid", + "title": "Project Job Id" + } + }, + "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs", + "project_job_id" + ], + "title": "ProjectFunctionJob" + }, + "PythonCodeFunction": { + "properties": { + "function_class": { + "type": "string", + "const": "PYTHON_CODE", + "title": "Function Class", + "default": "PYTHON_CODE" + }, + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + } + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } + } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "code_url": { + "type": "string", + "title": "Code Url" + } + }, + "type": "object", + "required": [ + "input_schema", + "output_schema", + "default_inputs", + "code_url" + ], + "title": "PythonCodeFunction" + }, + "PythonCodeFunctionJob": { "properties": { - "file": { - "title": "File", + "title": { "type": "string", - "format": "binary" + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" + }, + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "PYTHON_CODE", + "title": "Function Class", + "default": "PYTHON_CODE" } - } + }, + "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs" + ], + "title": "PythonCodeFunctionJob" }, - "File": { - "title": "File", + "RegisteredFunctionJobCollection": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "job_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Job Ids", + "default": [] + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + } + }, + "type": "object", "required": [ - "id", - "filename" + "uid" + ], + "title": "RegisteredFunctionJobCollection" + }, + "RegisteredProjectFunction": { + "properties": { + "function_class": { + "type": "string", + "const": "PROJECT", + "title": "Function Class", + "default": "PROJECT" + }, + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + } + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } + } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + }, + "project_id": { + "type": "string", + "format": "uuid", + "title": "Project Id" + } + }, + "type": "object", + "required": [ + "input_schema", + "output_schema", + "default_inputs", + "uid", + "project_id" ], + "title": "RegisteredProjectFunction" + }, + "RegisteredProjectFunctionJob": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" + }, + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "PROJECT", + "title": "Function Class", + "default": "PROJECT" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + }, + "project_job_id": { + "type": "string", + "format": "uuid", + "title": "Project Job Id" + } + }, "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs", + "uid", + "project_job_id" + ], + "title": "RegisteredProjectFunctionJob" + }, + "RegisteredPythonCodeFunction": { "properties": { - "id": { - "title": "Id", + "function_class": { + "type": "string", + "const": "PYTHON_CODE", + "title": "Function Class", + "default": "PYTHON_CODE" + }, + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + } + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } + } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + }, + "code_url": { + "type": "string", + "title": "Code Url" + } + }, + "type": "object", + "required": [ + "input_schema", + "output_schema", + "default_inputs", + "uid", + "code_url" + ], + "title": "RegisteredPythonCodeFunction" + }, + "RegisteredPythonCodeFunctionJob": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" + }, + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "PYTHON_CODE", + "title": "Function Class", + "default": "PYTHON_CODE" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + } + }, + "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs", + "uid" + ], + "title": "RegisteredPythonCodeFunctionJob" + }, + "RegisteredSolverFunction": { + "properties": { + "function_class": { + "type": "string", + "const": "SOLVER", + "title": "Function Class", + "default": "SOLVER" + }, + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } + } + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } + } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + }, + "solver_key": { + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + }, + "solver_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Solver Version" + } + }, + "type": "object", + "required": [ + "input_schema", + "output_schema", + "default_inputs", + "uid", + "solver_key", + "solver_version" + ], + "title": "RegisteredSolverFunction" + }, + "RegisteredSolverFunctionJob": { + "properties": { + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" + }, + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "SOLVER", + "title": "Function Class", + "default": "SOLVER" + }, + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" + }, + "solver_job_id": { + "type": "string", + "format": "uuid", + "title": "Solver Job Id" + } + }, + "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs", + "uid", + "solver_job_id" + ], + "title": "RegisteredSolverFunctionJob" + }, + "RunningState": { + "type": "string", + "enum": [ + "UNKNOWN", + "PUBLISHED", + "NOT_STARTED", + "PENDING", + "WAITING_FOR_RESOURCES", + "STARTED", + "SUCCESS", + "FAILED", + "ABORTED", + "WAITING_FOR_CLUSTER" + ], + "title": "RunningState", + "description": "State of execution of a project's computational workflow\n\nSEE StateType for task state" + }, + "ServicePricingPlanGetLegacy": { + "properties": { + "pricingPlanId": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricingplanid", + "minimum": 0 + }, + "displayName": { + "type": "string", + "title": "Displayname" + }, + "description": { "type": "string", - "description": "Resource identifier", - "format": "uuid" + "title": "Description" }, - "filename": { - "title": "Filename", - "type": "string", - "description": "Name of the file with extension" + "classification": { + "$ref": "#/components/schemas/PricingPlanClassification" }, - "content_type": { - "title": "Content Type", + "createdAt": { "type": "string", - "description": "Guess of type content [EXPERIMENTAL]" + "format": "date-time", + "title": "Createdat" }, - "checksum": { - "title": "Checksum", + "pricingPlanKey": { "type": "string", - "description": "MD5 hash of the file's content [EXPERIMENTAL]" - } - }, - "description": "Represents a file stored on the server side i.e. a unique reference to a file in the cloud." - }, - "Groups": { - "title": "Groups", - "required": [ - "me", - "all" - ], - "type": "object", - "properties": { - "me": { - "$ref": "#/components/schemas/UsersGroup" + "title": "Pricingplankey" }, - "organizations": { - "title": "Organizations", - "type": "array", + "pricingUnits": { "items": { - "$ref": "#/components/schemas/UsersGroup" + "$ref": "#/components/schemas/PricingUnitGetLegacy" }, - "default": [] - }, - "all": { - "$ref": "#/components/schemas/UsersGroup" - } - } - }, - "HTTPValidationError": { - "title": "HTTPValidationError", - "type": "object", - "properties": { - "errors": { - "title": "Validation errors", "type": "array", - "items": { - "$ref": "#/components/schemas/ValidationError" - } + "title": "Pricingunits" } - } - }, - "Job": { - "title": "Job", + }, + "type": "object", "required": [ - "id", - "name", - "inputs_checksum", - "created_at", - "runner_name", - "url", - "runner_url", - "outputs_url" + "pricingPlanId", + "displayName", + "description", + "classification", + "createdAt", + "pricingPlanKey", + "pricingUnits" ], - "type": "object", + "title": "ServicePricingPlanGetLegacy" + }, + "Solver": { "properties": { "id": { - "title": "Id", "type": "string", - "format": "uuid" - }, - "name": { - "title": "Name", - "pattern": "^([^\\s/]+/?)+$", - "type": "string" + "title": "Id", + "description": "Resource identifier" }, - "inputs_checksum": { - "title": "Inputs Checksum", + "version": { "type": "string", - "description": "Input's checksum" + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version", + "description": "Semantic version number of the resource" }, - "created_at": { - "title": "Created At", + "title": { "type": "string", - "description": "Job creation timestamp", - "format": "date-time" + "maxLength": 100, + "title": "Title", + "description": "Human readable name" }, - "runner_name": { - "title": "Runner Name", - "pattern": "^([^\\s/]+/?)+$", - "type": "string", - "description": "Runner that executes job" + "description": { + "anyOf": [ + { + "type": "string", + "maxLength": 1000 + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Description of the resource" }, "url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], "title": "Url", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "Link to get this resource (self)", - "format": "uri" + "description": "Link to get this resource" }, - "runner_url": { - "title": "Runner Url", - "maxLength": 2083, - "minLength": 1, + "maintainer": { "type": "string", - "description": "Link to the solver's job (parent collection)", - "format": "uri" + "title": "Maintainer", + "description": "Maintainer of the solver" }, - "outputs_url": { - "title": "Outputs Url", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "Link to the job outputs (sub-collection", - "format": "uri" + "version_display": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version Display", + "description": "A user-friendly or marketing name for the release." } }, - "example": { - "id": "f622946d-fd29-35b9-a193-abdd1095167c", - "name": "solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", - "runner_name": "solvers/isolve/releases/1.3.4", - "inputs_checksum": "12345", - "created_at": "2021-01-22T23:59:52.322176", - "url": "https://api.osparc.io/v0/jobs/f622946d-fd29-35b9-a193-abdd1095167c", - "runner_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4", - "outputs_url": "https://api.osparc.io/v0/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs" - } - }, - "JobInputs": { - "title": "JobInputs", + "type": "object", "required": [ - "values" + "id", + "version", + "title", + "url", + "maintainer" ], - "type": "object", - "properties": { - "values": { - "title": "Values", - "type": "object", - "additionalProperties": { - "anyOf": [ - { - "$ref": "#/components/schemas/File" - }, - { - "type": "number" - }, - { - "type": "integer" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } - }, + "title": "Solver", + "description": "A released solver with a specific version", "example": { - "values": { - "x": 4.33, - "n": 55, - "title": "Temperature", - "enabled": true, - "input_file": { - "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f", - "filename": "input.txt", - "content_type": "text/plain" - } - } + "description": "EM solver", + "id": "simcore/services/comp/isolve", + "maintainer": "info@itis.swiss", + "title": "iSolve", + "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.1", + "version": "2.1.1", + "version_display": "2.1.1-2023-10-01" } }, - "JobOutputs": { - "title": "JobOutputs", - "required": [ - "job_id", - "results" - ], - "type": "object", + "SolverFunction": { "properties": { - "job_id": { - "title": "Job Id", + "function_class": { "type": "string", - "description": "Job that produced this output", - "format": "uuid" + "const": "SOLVER", + "title": "Function Class", + "default": "SOLVER" }, - "results": { - "title": "Results", - "type": "object", - "additionalProperties": { - "anyOf": [ - { - "$ref": "#/components/schemas/File" - }, - { - "type": "number" - }, - { - "type": "integer" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] + "title": { + "type": "string", + "title": "Title", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "default": "" + }, + "input_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionInputSchema" + } + ], + "title": "Input Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionInputSchema" + } } - } - }, - "example": { - "job_id": "99d9ac65-9f10-4e2f-a433-b5e412bb037b", - "results": { - "maxSAR": 4.33, - "n": 55, - "title": "Specific Absorption Rate", - "enabled": false, - "output_file": { - "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f", - "filename": "sar_matrix.txt", - "content_type": "text/plain" + }, + "output_schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/JSONFunctionOutputSchema" + } + ], + "title": "Output Schema", + "discriminator": { + "propertyName": "schema_class", + "mapping": { + "application/schema+json": "#/components/schemas/JSONFunctionOutputSchema" + } } + }, + "default_inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Default Inputs" + }, + "solver_key": { + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Solver Key" + }, + "solver_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Solver Version" } - } - }, - "JobStatus": { - "title": "JobStatus", + }, + "type": "object", "required": [ - "job_id", - "state", - "submitted_at" + "input_schema", + "output_schema", + "default_inputs", + "solver_key", + "solver_version" ], - "type": "object", + "title": "SolverFunction" + }, + "SolverFunctionJob": { "properties": { - "job_id": { - "title": "Job Id", + "title": { "type": "string", - "format": "uuid" + "title": "Title", + "default": "" }, - "state": { - "$ref": "#/components/schemas/TaskStates" + "description": { + "type": "string", + "title": "Description", + "default": "" }, - "progress": { - "title": "Progress", - "maximum": 100.0, - "minimum": 0.0, - "type": "integer", - "default": 0 + "function_uid": { + "type": "string", + "format": "uuid", + "title": "Function Uid" }, - "submitted_at": { - "title": "Submitted At", + "inputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Inputs" + }, + "outputs": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Outputs" + }, + "function_class": { + "type": "string", + "const": "SOLVER", + "title": "Function Class", + "default": "SOLVER" + }, + "solver_job_id": { + "type": "string", + "format": "uuid", + "title": "Solver Job Id" + } + }, + "type": "object", + "required": [ + "function_uid", + "inputs", + "outputs", + "solver_job_id" + ], + "title": "SolverFunctionJob" + }, + "SolverPort": { + "properties": { + "key": { "type": "string", - "format": "date-time" + "pattern": "^[^_\\W0-9]\\w*$", + "title": "Key name", + "description": "port identifier name" }, - "started_at": { - "title": "Started At", + "kind": { "type": "string", - "description": "Timestamp that indicate the moment the solver starts execution or None if the event did not occur", - "format": "date-time" + "enum": [ + "input", + "output" + ], + "title": "Kind" }, - "stopped_at": { - "title": "Stopped At", - "type": "string", - "description": "Timestamp at which the solver finished or killed execution or None if the event did not occur", - "format": "date-time" + "content_schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Content Schema", + "description": "jsonschema for the port's value. SEE https://json-schema.org" } }, + "type": "object", + "required": [ + "key", + "kind" + ], + "title": "SolverPort", "example": { - "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", - "state": "STARTED", - "progress": 3, - "submitted_at": "2021-04-01 07:15:54.631007", - "started_at": "2021-04-01 07:16:43.670610" + "content_schema": { + "maximum": 5, + "minimum": 0, + "title": "Sleep interval", + "type": "integer", + "x_unit": "second" + }, + "key": "input_2", + "kind": "input" } }, - "Meta": { - "title": "Meta", - "required": [ - "name", - "version" - ], - "type": "object", + "Study": { "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" + "uid": { + "type": "string", + "format": "uuid", + "title": "Uid" }, - "released": { - "title": "Released", - "type": "object", - "additionalProperties": { - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "description": "Maps every route's path tag with a released version" + "title": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Title" }, - "docs_url": { - "title": "Docs Url", - "maxLength": 65536, - "minLength": 1, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + } + }, + "type": "object", + "required": [ + "uid" + ], + "title": "Study" + }, + "StudyPort": { + "properties": { + "key": { "type": "string", - "format": "uri", - "default": "https://docs.osparc.io" + "format": "uuid", + "title": "Key name", + "description": "port identifier name.Correponds to the UUID of the parameter/probe node in the study" }, - "docs_dev_url": { - "title": "Docs Dev Url", - "maxLength": 65536, - "minLength": 1, + "kind": { "type": "string", - "format": "uri", - "default": "https://api.osparc.io/dev/docs" + "enum": [ + "input", + "output" + ], + "title": "Kind" + }, + "content_schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Content Schema", + "description": "jsonschema for the port's value. SEE https://json-schema.org" } }, + "type": "object", + "required": [ + "key", + "kind" + ], + "title": "StudyPort", "example": { - "name": "simcore_service_foo", - "version": "2.4.45", - "released": { - "v1": "1.3.4", - "v2": "2.4.45" + "content_schema": { + "maximum": 5, + "minimum": 0, + "title": "Sleep interval", + "type": "integer", + "x_unit": "second" }, - "doc_url": "https://api.osparc.io/doc", - "doc_dev_url": "https://api.osparc.io/dev/doc" + "key": "f763658f-a89a-4a90-ace4-c44631290f12", + "kind": "input" } }, - "Profile": { - "title": "Profile", + "UnitExtraInfoTier": { + "properties": { + "CPU": { + "type": "integer", + "minimum": 0, + "title": "Cpu" + }, + "RAM": { + "type": "integer", + "minimum": 0, + "title": "Ram" + }, + "VRAM": { + "type": "integer", + "minimum": 0, + "title": "Vram" + } + }, + "additionalProperties": true, + "type": "object", "required": [ - "login", - "role" + "CPU", + "RAM", + "VRAM" ], - "type": "object", + "title": "UnitExtraInfoTier", + "description": "Custom information that is propagated to the frontend. Defined fields are mandatory." + }, + "UploadLinks": { "properties": { - "first_name": { - "title": "First Name", - "type": "string", - "example": "James" - }, - "last_name": { - "title": "Last Name", + "abort_upload": { "type": "string", - "example": "Maxwell" + "title": "Abort Upload" }, - "login": { - "title": "Login", + "complete_upload": { "type": "string", - "format": "email" - }, - "role": { - "$ref": "#/components/schemas/UserRoleEnum" - }, - "groups": { - "$ref": "#/components/schemas/Groups" + "title": "Complete Upload" + } + }, + "type": "object", + "required": [ + "abort_upload", + "complete_upload" + ], + "title": "UploadLinks" + }, + "UploadedPart": { + "properties": { + "number": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Number", + "minimum": 0 }, - "gravatar_id": { - "title": "Gravatar Id", - "maxLength": 40, + "e_tag": { "type": "string", - "description": "md5 hash value of email to retrieve an avatar image from https://www.gravatar.com" + "title": "E Tag" } }, - "example": { - "first_name": "James", - "last_name": "Maxwell", - "login": "james-maxwell@itis.swiss", - "role": "USER", - "groups": { - "me": { - "gid": "123", - "label": "maxy", - "description": "primary group" - }, - "organizations": [], - "all": { - "gid": "1", - "label": "Everyone", - "description": "all users" - } - }, - "gravatar_id": "9a8930a5b20d7048e37740bac5c1ca4f" - } - }, - "ProfileUpdate": { - "title": "ProfileUpdate", "type": "object", + "required": [ + "number", + "e_tag" + ], + "title": "UploadedPart" + }, + "UserFile": { "properties": { - "first_name": { - "title": "First Name", + "filename": { "type": "string", - "example": "James" + "title": "Filename", + "description": "File name" }, - "last_name": { - "title": "Last Name", + "filesize": { + "type": "integer", + "minimum": 0, + "title": "Filesize", + "description": "File size in bytes" + }, + "sha256_checksum": { "type": "string", - "example": "Maxwell" + "pattern": "^[a-fA-F0-9]{64}$", + "title": "Sha256 Checksum", + "description": "SHA256 checksum" } - } - }, - "Solver": { - "title": "Solver", + }, + "type": "object", "required": [ - "id", - "version", - "title", - "maintainer", - "url" + "filename", + "filesize", + "sha256_checksum" ], - "type": "object", + "title": "UserFile", + "description": "Represents a file stored on the client side" + }, + "UserFileToProgramJob": { "properties": { - "id": { - "title": "Id", - "pattern": "^(simcore)/(services)/comp(/[\\w/-]+)+$", + "filename": { "type": "string", - "description": "Solver identifier" + "pattern": ".+", + "title": "Filename", + "description": "File name" }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "filesize": { + "type": "integer", + "minimum": 0, + "title": "Filesize", + "description": "File size in bytes" + }, + "sha256_checksum": { "type": "string", - "description": "semantic version number of the node" + "pattern": "^[a-fA-F0-9]{64}$", + "title": "Sha256 Checksum", + "description": "SHA256 checksum" }, - "title": { - "title": "Title", + "program_key": { "type": "string", - "description": "Human readable name" + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Program Key", + "description": "Program identifier" }, - "description": { - "title": "Description", - "type": "string" + "program_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Program Version", + "description": "Program version" }, - "maintainer": { - "title": "Maintainer", - "type": "string" + "job_id": { + "type": "string", + "format": "uuid", + "title": "Job Id", + "description": "Job identifier" }, - "url": { - "title": "Url", - "maxLength": 2083, - "minLength": 1, + "workspace_path": { "type": "string", - "description": "Link to get this resource", - "format": "uri" + "pattern": "^workspace/.*", + "format": "path", + "title": "Workspace Path", + "description": "The file's relative path within the job's workspace directory. E.g. 'workspace/myfile.txt'" } }, - "description": "A released solver with a specific version", - "example": { - "id": "simcore/services/comp/isolve", - "version": "2.1.1", - "title": "iSolve", - "description": "EM solver", - "maintainer": "info@itis.swiss", - "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.1" - } - }, - "TaskStates": { - "title": "TaskStates", - "enum": [ - "UNKNOWN", - "PUBLISHED", - "NOT_STARTED", - "PENDING", - "STARTED", - "RETRY", - "SUCCESS", - "FAILED", - "ABORTED" + "type": "object", + "required": [ + "filename", + "filesize", + "sha256_checksum", + "program_key", + "program_version", + "job_id", + "workspace_path" ], - "type": "string", - "description": "An enumeration." + "title": "UserFileToProgramJob" }, "UserRoleEnum": { - "title": "UserRoleEnum", + "type": "string", "enum": [ "ANONYMOUS", "GUEST", "USER", "TESTER", + "PRODUCT_OWNER", "ADMIN" ], - "type": "string", - "description": "An enumeration." + "title": "UserRoleEnum" }, "UsersGroup": { - "title": "UsersGroup", - "required": [ - "gid", - "label" - ], - "type": "object", "properties": { "gid": { - "title": "Gid", - "type": "string" + "type": "string", + "title": "Gid" }, "label": { - "title": "Label", - "type": "string" + "type": "string", + "title": "Label" }, "description": { - "title": "Description", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" } - } - }, - "ValidationError": { - "title": "ValidationError", + }, + "type": "object", "required": [ - "loc", - "msg", - "type" + "gid", + "label" ], - "type": "object", + "title": "UsersGroup" + }, + "ValidationError": { "properties": { "loc": { - "title": "Location", - "type": "array", "items": { "anyOf": [ { @@ -1656,17 +11061,107 @@ "type": "integer" } ] - } + }, + "type": "array", + "title": "Location" }, "msg": { - "title": "Message", - "type": "string" + "type": "string", + "title": "Message" }, "type": { - "title": "Error Type", - "type": "string" + "type": "string", + "title": "Error Type" } - } + }, + "type": "object", + "required": [ + "loc", + "msg", + "type" + ], + "title": "ValidationError" + }, + "WalletGetWithAvailableCreditsLegacy": { + "properties": { + "walletId": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Walletid", + "minimum": 0 + }, + "name": { + "type": "string", + "maxLength": 100, + "minLength": 1, + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "owner": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Owner", + "minimum": 0 + }, + "thumbnail": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Thumbnail" + }, + "status": { + "$ref": "#/components/schemas/WalletStatus" + }, + "created": { + "type": "string", + "format": "date-time", + "title": "Created" + }, + "modified": { + "type": "string", + "format": "date-time", + "title": "Modified" + }, + "availableCredits": { + "type": "number", + "minimum": 0.0, + "title": "Availablecredits" + } + }, + "type": "object", + "required": [ + "walletId", + "name", + "owner", + "status", + "created", + "modified", + "availableCredits" + ], + "title": "WalletGetWithAvailableCreditsLegacy" + }, + "WalletStatus": { + "type": "string", + "enum": [ + "ACTIVE", + "INACTIVE" + ], + "title": "WalletStatus" } }, "securitySchemes": { diff --git a/services/api-server/requirements/_base.in b/services/api-server/requirements/_base.in index 647b3f7198c..031fabf9e4e 100644 --- a/services/api-server/requirements/_base.in +++ b/services/api-server/requirements/_base.in @@ -7,6 +7,7 @@ --constraint ./constraints.txt # intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/postgres-database/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in @@ -16,12 +17,11 @@ --requirement ../../../packages/service-library/requirements/_fastapi.in aiofiles -aiopg[sa] cryptography -fastapi[all] -httpx +fastapi-pagination orjson packaging +parse pydantic[dotenv] pyyaml tenacity diff --git a/services/api-server/requirements/_base.txt b/services/api-server/requirements/_base.txt index e576adb9425..0d53295e4fe 100644 --- a/services/api-server/requirements/_base.txt +++ b/services/api-server/requirements/_base.txt @@ -1,263 +1,918 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.5 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in +aio-pika==9.5.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -aiocache==0.11.1 - # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in -aiodebug==2.3.0 +aiocache==0.12.3 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -aiofiles==0.8.0 + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in +aiodebug==2.3.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in - # -r requirements/_base.in -aiohttp==3.8.3 +aiodocker==0.24.0 # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt - # -r requirements/../../../packages/simcore-sdk/requirements/_base.in -aiopg==1.3.3 + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in -aiormq==6.4.2 +aiohappyeyeballs==2.4.4 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in + # aiodocker +aiormq==6.8.1 # via aio-pika -aiosignal==1.2.0 +aiosignal==1.3.1 # via aiohttp -alembic==1.8.1 +alembic==1.14.0 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in -anyio==3.6.1 +annotated-types==0.7.0 + # via pydantic +anyio==4.7.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette - # watchgod -arrow==1.2.3 - # via -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -asgiref==3.5.2 - # via uvicorn -async-timeout==4.0.2 + # watchfiles +arrow==1.3.0 # via - # aiohttp - # aiopg - # redis -attrs==21.4.0 + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==24.2.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt # aiohttp # jsonschema -certifi==2022.12.7 - # via +certifi==2024.8.30 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx # requests -cffi==1.15.0 +cffi==1.17.1 # via cryptography -charset-normalizer==2.0.12 - # via - # aiohttp - # requests -click==8.1.3 +charset-normalizer==3.4.0 + # via requests +click==8.1.7 # via + # rich-toolkit # typer # uvicorn -cryptography==39.0.1 - # via -r requirements/_base.in -dnspython==2.2.1 +cryptography==44.0.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -email-validator==1.2.1 +email-validator==2.2.0 # via # fastapi # pydantic -fastapi==0.85.0 +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.6 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -frozenlist==1.3.0 +fastapi-pagination==0.12.32 + # via -r requirements/_base.in +faststream==0.5.33 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +frozenlist==1.5.0 # via # aiohttp # aiosignal -greenlet==1.1.2 +googleapis-common-protos==1.66.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 # via sqlalchemy -h11==0.12.0 +grpcio==1.68.1 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 # via # httpcore # uvicorn -httpcore==0.15.0 +h2==4.2.0 # via httpx -httptools==0.2.0 +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 # via uvicorn -httpx==0.23.0 - # via +httpx==0.27.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -idna==3.3 + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator + # httpx # requests - # rfc3986 # yarl -itsdangerous==2.1.2 - # via fastapi -jaeger-client==4.8.0 - # via fastapi-contrib -jinja2==3.1.2 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # fastapi jsonschema==3.2.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt + # -c requirements/./constraints.txt # -r requirements/../../../packages/models-library/requirements/_base.in -mako==1.2.2 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +mako==1.3.7 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.1 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via # jinja2 # mako -multidict==6.0.2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 # via # aiohttp # yarl -opentracing==2.4.0 +opentelemetry-api==1.28.2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.28.2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.28.2 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.28.2 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.28.2 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.49b2 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.49b2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.49b2 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.49b2 + # via + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.49b2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.49b2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.49b2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.28.2 # via - # fastapi-contrib - # jaeger-client -orjson==3.7.2 + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.28.2 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.49b2 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.49b2 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.12 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/_base.in - # fastapi -packaging==21.3 +packaging==24.2 # via # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in -pamqp==3.2.1 + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pint==0.19.2 +parse==1.20.2 + # via -r requirements/_base.in +pint==0.24.4 # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in -psycopg2-binary==2.9.3 +platformdirs==4.3.6 + # via pint +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.2.1 # via - # aiopg - # sqlalchemy -pycparser==2.21 - # via cffi -pydantic==1.9.0 + # aiohttp + # yarl +protobuf==5.29.1 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==6.1.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycparser==2.22 + # via cffi +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in + # fast-depends # fastapi -pyinstrument==4.1.1 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # fastapi-pagination + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.10.0 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.18.0 + # via rich +pyinstrument==5.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -pyparsing==3.0.9 - # via packaging -pyrsistent==0.18.1 +pyrsistent==0.20.0 # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -python-dotenv==0.20.0 +python-dotenv==1.0.1 # via - # pydantic + # pydantic-settings # uvicorn -python-multipart==0.0.5 +python-multipart==0.0.19 # via fastapi -pyyaml==5.4.1 - # via +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in - # fastapi # uvicorn -redis==4.4.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -requests==2.27.1 - # via fastapi -rfc3986==1.5.0 - # via httpx -six==1.16.0 +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.12.0 + # via fastapi-cli +setuptools==75.6.0 + # via jsonschema +shellingham==1.5.4 + # via typer +six==1.17.0 # via # jsonschema # python-dateutil - # python-multipart - # thrift -sniffio==1.2.0 +sniffio==1.3.1 # via # anyio - # httpcore # httpx -sqlalchemy==1.4.37 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/postgres-database/requirements/_base.in - # aiopg + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # alembic -starlette==0.20.4 - # via fastapi -tenacity==8.0.1 +starlette==0.41.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.1 +toolz==1.0.0 # via - # jaeger-client - # threadloop -tqdm==4.64.0 + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -typer==0.4.1 + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in +typer==0.15.1 # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in -typing-extensions==4.3.0 + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug - # pydantic - # starlette -ujson==5.5.0 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # alembic + # anyio # fastapi -urllib3==1.26.9 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # fastapi-pagination + # faststream + # flexcache + # flexparser + # opentelemetry-sdk + # pint + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +urllib3==2.2.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # requests -uvicorn==0.15.0 +uvicorn==0.34.2 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in # fastapi -uvloop==0.16.0 + # fastapi-cli +uvloop==0.21.0 # via uvicorn -watchgod==0.8.2 +watchfiles==1.0.0 # via uvicorn -websockets==10.2 +websockets==14.1 # via uvicorn -yarl==1.7.2 +wrapt==1.17.0 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/api-server/requirements/_test.in b/services/api-server/requirements/_test.in index 54343d0f6c4..805e1f7a7af 100644 --- a/services/api-server/requirements/_test.in +++ b/services/api-server/requirements/_test.in @@ -10,16 +10,17 @@ # --constraint _base.txt + +aioresponses alembic asgi_lifespan click -codecov -coveralls docker faker -types-boto3 +jsonref moto[server] # mock out tests based on AWS-S3 -passlib[bcrypt] +pact-python +pyinstrument pytest pytest-asyncio pytest-cov @@ -27,3 +28,7 @@ pytest-docker pytest-mock pytest-runner respx +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types-aiofiles +types-boto3 +types-jsonschema diff --git a/services/api-server/requirements/_test.txt b/services/api-server/requirements/_test.txt index a7a2ce60321..e1e0dfe6a61 100644 --- a/services/api-server/requirements/_test.txt +++ b/services/api-server/requirements/_test.txt @@ -1,140 +1,157 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -alembic==1.8.1 +aiohappyeyeballs==2.4.4 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aioresponses +aioresponses==0.7.8 # via -r requirements/_test.in -anyio==3.6.1 +aiosignal==1.3.1 # via # -c requirements/_base.txt - # httpcore -asgi-lifespan==2.0.0 + # aiohttp +alembic==1.14.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +anyio==4.7.0 + # via + # -c requirements/_base.txt + # httpx + # starlette +asgi-lifespan==2.1.0 # via -r requirements/_test.in -attrs==21.4.0 +attrs==24.2.0 # via # -c requirements/_base.txt + # aiohttp # jschema-to-python # jsonschema - # pytest # pytest-docker + # referencing # sarif-om aws-sam-translator==1.55.0 # via # -c requirements/./constraints.txt # cfn-lint -aws-xray-sdk==2.11.0 +aws-xray-sdk==2.14.0 # via moto -bcrypt==4.0.1 - # via passlib -boto3==1.26.82 +boto3==1.38.1 # via # aws-sam-translator # moto -boto3-stubs==1.26.82 - # via types-boto3 -botocore==1.29.82 +botocore==1.38.1 # via # aws-xray-sdk # boto3 # moto # s3transfer -botocore-stubs==1.29.82 - # via boto3-stubs -certifi==2022.12.7 +botocore-stubs==1.37.4 + # via types-boto3 +certifi==2024.8.30 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # httpcore # httpx # requests -cffi==1.15.0 +cffi==1.17.1 # via # -c requirements/_base.txt # cryptography + # pact-python cfn-lint==0.72.0 # via # -c requirements/./constraints.txt # moto -charset-normalizer==2.0.12 +charset-normalizer==3.4.0 # via # -c requirements/_base.txt # requests -click==8.1.3 +click==8.1.7 # via + # -c requirements/_base.txt # -r requirements/_test.in # flask -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 - # via - # codecov - # coveralls - # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.1 + # pact-python + # uvicorn +coverage==7.6.12 + # via pytest-cov +cryptography==44.0.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # moto # python-jose # sshpubkeys -docker==6.0.1 +docker==7.1.0 # via # -r requirements/_test.in # moto -docopt==0.6.2 - # via coveralls -ecdsa==0.18.0 +ecdsa==0.19.0 # via # moto # python-jose # sshpubkeys -exceptiongroup==1.1.0 - # via pytest -faker==17.5.0 +faker==36.1.1 # via -r requirements/_test.in +fastapi==0.115.12 + # via + # -c requirements/_base.txt + # pact-python flask==2.1.3 # via # flask-cors # moto -flask-cors==3.0.10 +flask-cors==5.0.1 # via moto -graphql-core==3.2.3 +frozenlist==1.5.0 + # via + # -c requirements/_base.txt + # aiohttp + # aiosignal +graphql-core==3.2.6 # via moto -greenlet==1.1.2 +greenlet==3.1.1 # via # -c requirements/_base.txt # sqlalchemy -h11==0.12.0 +h11==0.14.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.15.0 + # uvicorn +httpcore==1.0.7 # via # -c requirements/_base.txt # httpx -httpx==0.23.0 +httpx==0.27.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # respx -idna==3.3 +idna==3.10 # via # -c requirements/_base.txt # anyio + # httpx # moto # requests - # rfc3986 -importlib-metadata==6.0.0 - # via flask + # yarl iniconfig==2.0.0 # via pytest -itsdangerous==2.1.2 - # via - # -c requirements/_base.txt - # flask -jinja2==3.1.2 +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # flask # moto @@ -144,190 +161,249 @@ jmespath==1.0.1 # botocore jschema-to-python==1.2.3 # via cfn-lint -jsondiff==2.0.0 +jsondiff==2.2.1 # via moto -jsonpatch==1.32 +jsonpatch==1.33 # via cfn-lint -jsonpickle==3.0.1 +jsonpickle==4.0.2 # via jschema-to-python -jsonpointer==2.3 +jsonpointer==3.0.0 # via jsonpatch +jsonref==1.1.0 + # via -r requirements/_test.in jsonschema==3.2.0 # via # -c requirements/_base.txt + # -c requirements/./constraints.txt # aws-sam-translator # cfn-lint # openapi-schema-validator # openapi-spec-validator junit-xml==1.9 # via cfn-lint -mako==1.2.2 +mako==1.3.7 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # alembic -markupsafe==2.1.1 +markupsafe==3.0.2 # via # -c requirements/_base.txt # jinja2 # mako # moto moto==4.0.1 - # via -r requirements/_test.in + # via + # -c requirements/./constraints.txt + # -r requirements/_test.in +multidict==6.1.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy networkx==2.8.8 # via cfn-lint openapi-schema-validator==0.2.3 # via openapi-spec-validator openapi-spec-validator==0.4.0 - # via - # -c requirements/./constraints.txt - # moto -packaging==21.3 + # via moto +packaging==24.2 # via # -c requirements/_base.txt - # docker + # aioresponses # pytest -passlib==1.7.4 +pact-python==2.3.1 # via -r requirements/_test.in -pbr==5.11.1 +pbr==6.1.1 # via # jschema-to-python # sarif-om -pluggy==1.0.0 +pluggy==1.5.0 # via pytest +propcache==0.2.1 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +psutil==6.1.0 + # via + # -c requirements/_base.txt + # pact-python pyasn1==0.4.8 # via # python-jose # rsa -pycparser==2.21 +pycparser==2.22 # via # -c requirements/_base.txt # cffi -pyparsing==3.0.9 +pydantic==2.10.3 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # moto - # packaging -pyrsistent==0.18.1 + # fastapi +pydantic-core==2.27.1 + # via + # -c requirements/_base.txt + # pydantic +pyinstrument==5.0.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyparsing==3.2.1 + # via moto +pyrsistent==0.20.0 # via # -c requirements/_base.txt # jsonschema -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio # pytest-cov # pytest-docker # pytest-mock -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-docker==1.0.1 +pytest-docker==3.2.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt # botocore - # faker # moto -python-jose==3.3.0 +python-jose==3.4.0 # via moto -pytz==2022.7.1 +pytz==2025.1 # via moto -pyyaml==5.4.1 +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # cfn-lint + # jsondiff # moto # openapi-spec-validator -requests==2.27.1 + # responses +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # types-jsonschema +requests==2.32.3 # via # -c requirements/_base.txt - # codecov - # coveralls # docker # moto + # pact-python # responses -responses==0.22.0 +responses==0.25.6 # via moto -respx==0.20.1 +respx==0.22.0 # via -r requirements/_test.in -rfc3986==1.5.0 - # via - # -c requirements/_base.txt - # httpx +rpds-py==0.25.0 + # via referencing rsa==4.9 # via # -c requirements/../../../requirements/constraints.txt # python-jose -s3transfer==0.6.0 +s3transfer==0.12.0 # via boto3 sarif-om==1.0.4 # via cfn-lint -six==1.16.0 +setuptools==75.6.0 + # via + # -c requirements/_base.txt + # jsonschema + # moto + # openapi-spec-validator + # pbr +six==1.17.0 # via # -c requirements/_base.txt # ecdsa - # flask-cors # jsonschema # junit-xml + # pact-python # python-dateutil -sniffio==1.2.0 +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio # asgi-lifespan - # httpcore # httpx -sqlalchemy==1.4.37 +sqlalchemy==1.4.54 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt + # -r requirements/_test.in # alembic +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy sshpubkeys==3.3.1 # via moto -toml==0.10.2 - # via responses -tomli==2.0.1 +starlette==0.41.3 # via - # coverage - # pytest -types-awscrt==0.16.10 - # via - # botocore-stubs - # types-s3transfer -types-boto3==1.0.2 + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # fastapi +types-aiofiles==24.1.0.20241221 + # via -r requirements/_test.in +types-awscrt==0.23.10 + # via botocore-stubs +types-boto3==1.37.4 + # via -r requirements/_test.in +types-jsonschema==4.23.0.20241208 # via -r requirements/_test.in -types-s3transfer==0.6.0.post5 - # via boto3-stubs -types-toml==0.10.8.5 - # via responses -typing-extensions==4.3.0 +types-s3transfer==0.11.3 + # via types-boto3 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # boto3-stubs -urllib3==1.26.9 + # alembic + # anyio + # fastapi + # mypy + # pydantic + # pydantic-core + # sqlalchemy2-stubs + # types-boto3 +tzdata==2025.1 + # via faker +urllib3==2.2.3 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # botocore # docker # requests # responses -websocket-client==1.5.1 - # via docker +uvicorn==0.34.2 + # via + # -c requirements/_base.txt + # pact-python werkzeug==2.1.2 # via # flask + # flask-cors # moto -wrapt==1.15.0 - # via aws-xray-sdk -xmltodict==0.13.0 +wrapt==1.17.0 + # via + # -c requirements/_base.txt + # aws-xray-sdk +xmltodict==0.14.2 # via moto -zipp==3.15.0 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +yarl==1.18.3 + # via + # -c requirements/_base.txt + # aiohttp + # pact-python diff --git a/services/api-server/requirements/_tools.txt b/services/api-server/requirements/_tools.txt index 8c626151d69..e86a0164c22 100644 --- a/services/api-server/requirements/_tools.txt +++ b/services/api-server/requirements/_tools.txt @@ -1,106 +1,107 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==22.12.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit change-case==0.5.2 # via -r requirements/_tools.in -click==8.1.3 +click==8.1.7 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -jinja2==3.1.2 - # via -r requirements/_tools.in -lazy-object-proxy==1.9.0 - # via astroid -markupsafe==2.1.1 +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_test.txt + # -r requirements/_tools.in +markupsafe==3.0.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # jinja2 mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==21.3 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt + # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via + # -c requirements/_base.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyparsing==3.0.9 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # packaging -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt # pre-commit # watchdog -tomli==2.0.1 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.6.0 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.3.0 +typing-extensions==4.12.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.2 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/api-server/requirements/ci.txt b/services/api-server/requirements/ci.txt index 0f027a404fb..cc1799cee07 100644 --- a/services/api-server/requirements/ci.txt +++ b/services/api-server/requirements/ci.txt @@ -9,14 +9,16 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/models-library -../../packages/postgres-database/ -../../packages/pytest-simcore/ -../../packages/simcore-sdk -../../packages/service-library[fastapi] -../../packages/settings-library/ +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database/ +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-sdk @ ../../packages/simcore-sdk +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-api-server @ . diff --git a/services/api-server/requirements/constraints.txt b/services/api-server/requirements/constraints.txt index 6247b000156..22d919bdae3 100644 --- a/services/api-server/requirements/constraints.txt +++ b/services/api-server/requirements/constraints.txt @@ -22,12 +22,6 @@ jsonschema~=3.2 # jsonschema<5,>=3.0 (from cfn-lint==0.64.1->moto[server]==4.0.2->-r requirements/_test.in (line 21)) moto<4.0.2 -# There are incompatible versions in the resolved dependencies: -# jsonschema==3.2.0 (from -c requirements/_base.txt (line 159)) -# jsonschema~=3.2 (from -c requirements/./constraints.txt (line 12)) -# jsonschema<5,>=3.0 (from cfn-lint==0.64.1->moto[server]==4.0.1->-r requirements/_test.in (line 21)) -# jsonschema<5.0.0,>=4.0.0 (from openapi-spec-validator==0.5.1->moto[server]==4.0.1->-r requirements/_test.in (line 21)) -openapi-spec-validator<0.5.0 # There are incompatible versions in the resolved dependencies: # pydantic>=1.8.2 (from -c ../../../requirements/constraints.txt (line 18)) diff --git a/services/api-server/requirements/dev.txt b/services/api-server/requirements/dev.txt index 2de1f4cc316..5afc552d753 100644 --- a/services/api-server/requirements/dev.txt +++ b/services/api-server/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/postgres-database --editable ../../packages/pytest-simcore/ diff --git a/services/api-server/requirements/prod.txt b/services/api-server/requirements/prod.txt index 2ca6529a5ce..9d4d747507e 100644 --- a/services/api-server/requirements/prod.txt +++ b/services/api-server/requirements/prod.txt @@ -10,10 +10,11 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library -../../packages/postgres-database/ -../../packages/service-library[fastapi] -../../packages/settings-library/ -../../packages/simcore-sdk +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-postgres-database @ ../../packages/postgres-database/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ +simcore-sdk @ ../../packages/simcore-sdk # installs current package -. +simcore-service-api-server @ . diff --git a/services/api-server/setup.cfg b/services/api-server/setup.cfg index fa929dbc3f2..8bb3e9b8f8e 100644 --- a/services/api-server/setup.cfg +++ b/services/api-server/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.4.3 +current_version = 0.8.0 commit = True message = services/api-server version: {current_version} β†’ {new_version} tag = False @@ -9,8 +9,14 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function addopts = --strict-markers markers = slow: marks tests as slow (deselect with '-m "not slow"') acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/services/api-server/setup.py b/services/api-server/setup.py old mode 100644 new mode 100755 index 53afcf1293e..9da2789a9c4 --- a/services/api-server/setup.py +++ b/services/api-server/setup.py @@ -1,14 +1,12 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -24,7 +22,7 @@ def read_reqs(reqs_path: Path) -> Set[str]: NAME = "simcore-service-api-server" VERSION = (CURRENT_DIR / "VERSION").read_text().strip() -AUTHORS = "Pedro Crespo-Valero (pcrespov)" +AUTHORS = ("Pedro Crespo-Valero (pcrespov)", "Mads Bisgaard (bisgaard-itis)") DESCRIPTION = "Platform's API Server for external clients" README = (CURRENT_DIR / "README.md").read_text() @@ -41,29 +39,30 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name=NAME, - version=VERSION, - author=AUTHORS, - description=DESCRIPTION, - long_description=README, - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ "simcore-service-api-server = simcore_service_api_server.cli:main", + "simcore-service = simcore_service_api_server.cli:main", ], }, -) +} if __name__ == "__main__": setup(**SETUP) diff --git a/services/api-server/src/simcore_service_api_server/__init__.py b/services/api-server/src/simcore_service_api_server/__init__.py index 95ca74c6aad..f513c971cca 100644 --- a/services/api-server/src/simcore_service_api_server/__init__.py +++ b/services/api-server/src/simcore_service_api_server/__init__.py @@ -1,4 +1,3 @@ -""" Python package for the simcore_service_api_server. - -""" from ._meta import __version__ + +assert __version__ # nosec diff --git a/services/api-server/src/simcore_service_api_server/_constants.py b/services/api-server/src/simcore_service_api_server/_constants.py new file mode 100644 index 00000000000..7bfbfd43907 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_constants.py @@ -0,0 +1,9 @@ +from typing import Final + +MSG_BACKEND_SERVICE_UNAVAILABLE: Final[ + str +] = "backend service is disabled or unreachable" + +MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE: Final[ + str +] = "Oops! Something went wrong, but we've noted it down and we'll sort it out ASAP. Thanks for your patience!" diff --git a/services/api-server/src/simcore_service_api_server/_meta.py b/services/api-server/src/simcore_service_api_server/_meta.py index 770c84cca8a..33a0480a25c 100644 --- a/services/api-server/src/simcore_service_api_server/_meta.py +++ b/services/api-server/src/simcore_service_api_server/_meta.py @@ -1,30 +1,36 @@ """ Application's metadata """ -from contextlib import suppress -import pkg_resources -_current_distribution = pkg_resources.get_distribution("simcore_service_api_server") +from typing import Final -PROJECT_NAME: str = _current_distribution.project_name +from models_library.basic_types import VersionStr +from packaging.version import Version +from servicelib.utils_meta import PackageInfo -API_VERSION: str = _current_distribution.version -MAJOR, MINOR, PATCH = _current_distribution.version.split(".") -API_VTAG: str = f"v{MAJOR}" +info: Final = PackageInfo(package_name="simcore-service-api-server") +__version__: Final[VersionStr] = info.__version__ -__version__ = _current_distribution.version +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +API_VTAG: Final[str] = info.api_prefix_path_tag +APP_NAME: Final[str] = PROJECT_NAME +SUMMARY: Final[str] = info.get_summary() -def get_summary() -> str: - with suppress(Exception): - try: - metadata = _current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = _current_distribution.get_metadata_lines("PKG-INFO") - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" +# +# https://patorjk.com/software/taag/#p=display&f=JS%20Stick%20Letters&t=API-server%0A +# +APP_STARTED_BANNER_MSG = r""" + __ __ ___ __ ___ __ + /\ |__) | __ /__` |__ |__) \ / |__ |__) +/~~\ | | .__/ |___ | \ \/ |___ | \ {} +""".format( + f"v{__version__}" +) -SUMMARY: str = get_summary() +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/api-server/src/simcore_service_api_server/_service_jobs.py b/services/api-server/src/simcore_service_api_server/_service_jobs.py new file mode 100644 index 00000000000..faac214897f --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_service_jobs.py @@ -0,0 +1,149 @@ +import logging +from collections.abc import Callable +from dataclasses import dataclass + +from common_library.exclude import as_dict_exclude_none +from models_library.api_schemas_webserver.projects import ProjectCreateNew, ProjectGet +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rest_pagination import ( + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.rpc_pagination import PageLimitInt +from models_library.users import UserID +from pydantic import HttpUrl +from servicelib.logging_utils import log_context +from simcore_service_api_server.models.basic_types import NameValueTuple + +from .models.schemas.jobs import Job, JobInputs +from .models.schemas.programs import Program +from .models.schemas.solvers import Solver +from .services_http.solver_job_models_converters import ( + create_job_from_project, + create_job_inputs_from_node_inputs, + create_new_project_for_job, +) +from .services_http.webserver import AuthSession +from .services_rpc.wb_api_server import WbApiRpcClient + +_logger = logging.getLogger(__name__) + + +@dataclass(frozen=True, kw_only=True) +class JobService: + _web_rest_client: AuthSession + _web_rpc_client: WbApiRpcClient + user_id: UserID + product_name: ProductName + + async def list_jobs( + self, + job_parent_resource_name: str, + *, + filter_any_custom_metadata: list[NameValueTuple] | None = None, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[Job], PageMetaInfoLimitOffset]: + """Lists all jobs for a user with pagination based on resource name prefix""" + + pagination_kwargs = as_dict_exclude_none( + pagination_offset=pagination_offset, pagination_limit=pagination_limit + ) + + # 1. List projects marked as jobs + projects_page = await self._web_rpc_client.list_projects_marked_as_jobs( + product_name=self.product_name, + user_id=self.user_id, + filter_by_job_parent_resource_name_prefix=job_parent_resource_name, + filter_any_custom_metadata=filter_any_custom_metadata, + **pagination_kwargs, + ) + + # 2. Convert projects to jobs + jobs: list[Job] = [] + for project_job in projects_page.data: + assert ( # nosec + len(project_job.workbench) == 1 + ), "Expected only one solver node in workbench" + + solver_node = next(iter(project_job.workbench.values())) + job_inputs: JobInputs = create_job_inputs_from_node_inputs( + inputs=solver_node.inputs or {} + ) + assert project_job.job_parent_resource_name # nosec + + jobs.append( + Job( + id=project_job.uuid, + name=Job.compose_resource_name( + project_job.job_parent_resource_name, project_job.uuid + ), + inputs_checksum=job_inputs.compute_checksum(), + created_at=project_job.created_at, + runner_name=project_job.job_parent_resource_name, + url=None, + runner_url=None, + outputs_url=None, + ) + ) + + return jobs, projects_page.meta + + async def create_job( + self, + *, + solver_or_program: Solver | Program, + inputs: JobInputs, + parent_project_uuid: ProjectID | None, + parent_node_id: NodeID | None, + url_for: Callable[..., HttpUrl], + hidden: bool, + project_name: str | None, + description: str | None, + ) -> tuple[Job, ProjectGet]: + """If no project_name is provided, the job name is used as project name""" + + # creates NEW job as prototype + + pre_job = Job.create_job_from_solver_or_program( + solver_or_program_name=solver_or_program.name, inputs=inputs + ) + with log_context( + logger=_logger, level=logging.DEBUG, msg=f"Creating job {pre_job.name}" + ): + project_in: ProjectCreateNew = create_new_project_for_job( + solver_or_program=solver_or_program, + job=pre_job, + inputs=inputs, + description=description, + project_name=project_name, + ) + new_project: ProjectGet = await self._web_rest_client.create_project( + project_in, + is_hidden=hidden, + parent_project_uuid=parent_project_uuid, + parent_node_id=parent_node_id, + ) + await self._web_rpc_client.mark_project_as_job( + product_name=self.product_name, + user_id=self.user_id, + project_uuid=new_project.uuid, + job_parent_resource_name=pre_job.runner_name, + ) + + assert new_project # nosec + assert new_project.uuid == pre_job.id # nosec + + # for consistency, it rebuild job + job = create_job_from_project( + solver_or_program=solver_or_program, project=new_project, url_for=url_for + ) + assert job.id == pre_job.id # nosec + assert job.name == pre_job.name # nosec + assert job.name == Job.compose_resource_name( + parent_name=solver_or_program.resource_name, + job_id=job.id, + ) + return job, new_project diff --git a/services/api-server/src/simcore_service_api_server/_service_programs.py b/services/api-server/src/simcore_service_api_server/_service_programs.py new file mode 100644 index 00000000000..ebb9ae652d7 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_service_programs.py @@ -0,0 +1,78 @@ +from dataclasses import dataclass + +from models_library.api_schemas_catalog.services import ServiceListFilters +from models_library.basic_types import VersionStr +from models_library.rest_pagination import ( + PageLimitInt, + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.services_enums import ServiceType + +from .models.schemas.programs import Program, ProgramKeyId +from .services_rpc.catalog import CatalogService + + +@dataclass(frozen=True, kw_only=True) +class ProgramService: + catalog_service: CatalogService + + async def get_program( + self, + *, + name: ProgramKeyId, + version: VersionStr, + ) -> Program: + service = await self.catalog_service.get( + name=name, + version=version, + ) + assert service.service_type == ServiceType.DYNAMIC # nosec + + return Program.create_from_service(service) + + async def list_latest_programs( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[Program], PageMetaInfoLimitOffset]: + page, page_meta = await self.catalog_service.list_latest_releases( + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filters=ServiceListFilters(service_type=ServiceType.DYNAMIC), + ) + + items = [Program.create_from_service(service) for service in page] + return items, page_meta + + async def list_program_history( + self, + *, + program_key: ProgramKeyId, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[Program], PageMetaInfoLimitOffset]: + page, page_meta = await self.catalog_service.list_release_history_latest_first( + filter_by_service_key=program_key, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) + if len(page) == 0: + return [], page_meta + + program_instance = await self.catalog_service.get( + name=program_key, + version=page[-1].version, + ) + + items = [ + Program.create_from_service_release( + service=service, + service_key=program_instance.key, + description=program_instance.description, + name=program_instance.name, + ) + for service in page + ] + return items, page_meta diff --git a/services/api-server/src/simcore_service_api_server/_service_solvers.py b/services/api-server/src/simcore_service_api_server/_service_solvers.py new file mode 100644 index 00000000000..3c0aac79721 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_service_solvers.py @@ -0,0 +1,229 @@ +from dataclasses import dataclass + +from models_library.api_schemas_catalog.services import ServiceListFilters +from models_library.basic_types import VersionStr +from models_library.products import ProductName +from models_library.rest_pagination import ( + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.rpc_pagination import PageLimitInt +from models_library.services_enums import ServiceType +from models_library.users import UserID +from simcore_service_api_server.models.basic_types import NameValueTuple + +from ._service_jobs import JobService +from ._service_utils import check_user_product_consistency +from .exceptions.backend_errors import ( + ProgramOrSolverOrStudyNotFoundError, +) +from .exceptions.custom_errors import ( + SolverServiceListJobsFiltersError, +) +from .models.api_resources import compose_resource_name +from .models.schemas.jobs import Job +from .models.schemas.solvers import Solver, SolverKeyId +from .services_rpc.catalog import CatalogService + + +@dataclass(frozen=True, kw_only=True) +class SolverService: + catalog_service: CatalogService + job_service: JobService + user_id: UserID + product_name: ProductName + + def __post_init__(self): + check_user_product_consistency( + service_cls_name=self.__class__.__name__, + service_provider=self.catalog_service, + user_id=self.user_id, + product_name=self.product_name, + ) + + check_user_product_consistency( + service_cls_name=self.__class__.__name__, + service_provider=self.job_service, + user_id=self.user_id, + product_name=self.product_name, + ) + + async def get_solver( + self, + *, + solver_key: SolverKeyId, + solver_version: VersionStr, + ) -> Solver: + service = await self.catalog_service.get( + name=solver_key, + version=solver_version, + ) + assert ( # nosec + service.service_type == ServiceType.COMPUTATIONAL + ), "Expected by SolverName regex" + + return Solver.create_from_service(service) + + async def get_latest_release( + self, + *, + solver_key: SolverKeyId, + ) -> Solver: + releases, _ = await self.catalog_service.list_release_history_latest_first( + filter_by_service_key=solver_key, + pagination_offset=0, + pagination_limit=1, + ) + + if len(releases) == 0: + raise ProgramOrSolverOrStudyNotFoundError(name=solver_key, version="latest") + service = await self.catalog_service.get( + name=solver_key, + version=releases[0].version, + ) + + return Solver.create_from_service(service) + + async def list_jobs( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + filter_by_solver_key: SolverKeyId | None = None, + filter_by_solver_version: VersionStr | None = None, + filter_any_custom_metadata: list[NameValueTuple] | None = None, + ) -> tuple[list[Job], PageMetaInfoLimitOffset]: + """Lists all solver jobs for a user with pagination""" + + # 1. Compose job parent resource name prefix + collection_or_resource_ids = [ + "solvers", # solver_id, "releases", solver_version, "jobs", + ] + if filter_by_solver_key: + collection_or_resource_ids.append(filter_by_solver_key) + if filter_by_solver_version: + collection_or_resource_ids.append("releases") + collection_or_resource_ids.append(filter_by_solver_version) + elif filter_by_solver_version: + raise SolverServiceListJobsFiltersError + + job_parent_resource_name = compose_resource_name(*collection_or_resource_ids) + + # 2. list jobs under job_parent_resource_name + return await self.job_service.list_jobs( + job_parent_resource_name=job_parent_resource_name, + filter_any_custom_metadata=filter_any_custom_metadata, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) + + async def solver_release_history( + self, + *, + solver_key: SolverKeyId, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[Solver], PageMetaInfoLimitOffset]: + + releases, page_meta = ( + await self.catalog_service.list_release_history_latest_first( + filter_by_service_key=solver_key, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) + ) + + service_instance = await self.catalog_service.get( + name=solver_key, + version=releases[-1].version, + ) + + return [ + Solver.create_from_service_release( + service_key=service_instance.key, + description=service_instance.description, + contact=service_instance.contact, + name=service_instance.name, + service=service, + ) + for service in releases + ], page_meta + + async def list_all_solvers( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + filter_by_solver_key_pattern: str | None = None, + filter_by_version_display_pattern: str | None = None, + ) -> tuple[list[Solver], PageMetaInfoLimitOffset]: + """Lists all solvers with pagination and filtering, including all versions. + + Unlike `latest_solvers` which only shows the latest version of each solver, + this method returns all versions of solvers that match the filters. + + Args: + pagination_offset: Pagination offset + pagination_limit: Pagination limit + filter_by_solver_key_pattern: Optional pattern to filter solvers by key e.g. "simcore/service/my_solver*" + filter_by_version_display_pattern: Optional pattern to filter by version display e.g. "1.0.*-beta" + + Returns: + A tuple with the list of filtered solvers and pagination metadata + """ + filters = ServiceListFilters(service_type=ServiceType.COMPUTATIONAL) + + # Add key_pattern filter for solver ID if provided + if filter_by_solver_key_pattern: + filters.service_key_pattern = filter_by_solver_key_pattern + + # Add version_display_pattern filter if provided + if filter_by_version_display_pattern: + filters.version_display_pattern = filter_by_version_display_pattern + + services, page_meta = await self.catalog_service.list_all_services_summaries( + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filters=filters, + ) + + solvers = [Solver.create_from_service(service) for service in services] + return solvers, page_meta + + async def latest_solvers( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + filter_by_solver_key_pattern: str | None = None, + filter_by_version_display_pattern: str | None = None, + ) -> tuple[list[Solver], PageMetaInfoLimitOffset]: + """Lists the latest solvers with pagination and filtering. + + Args: + pagination_offset: Pagination offset + pagination_limit: Pagination limit + filter_by_solver_key_pattern: Optional pattern to filter solvers by key e.g. "simcore/service/my_solver*" + filter_by_version_display_pattern: Optional pattern to filter by version display e.g. "1.0.*-beta" + + Returns: + A tuple with the list of filtered solvers and pagination metadata + """ + filters = ServiceListFilters(service_type=ServiceType.COMPUTATIONAL) + + # Add key_pattern filter for solver ID if provided + if filter_by_solver_key_pattern: + filters.service_key_pattern = filter_by_solver_key_pattern + + # Add version_display_pattern filter if provided + if filter_by_version_display_pattern: + filters.version_display_pattern = filter_by_version_display_pattern + + services, page_meta = await self.catalog_service.list_latest_releases( + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filters=filters, + ) + + solvers = [Solver.create_from_service(service) for service in services] + return solvers, page_meta diff --git a/services/api-server/src/simcore_service_api_server/_service_studies.py b/services/api-server/src/simcore_service_api_server/_service_studies.py new file mode 100644 index 00000000000..733315f3330 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_service_studies.py @@ -0,0 +1,58 @@ +from dataclasses import dataclass + +from models_library.products import ProductName +from models_library.rest_pagination import ( + MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.rpc_pagination import PageLimitInt +from models_library.users import UserID + +from ._service_jobs import JobService +from ._service_utils import check_user_product_consistency +from .models.api_resources import compose_resource_name +from .models.schemas.jobs import Job +from .models.schemas.studies import StudyID + +DEFAULT_PAGINATION_LIMIT = MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE - 1 + + +@dataclass(frozen=True, kw_only=True) +class StudyService: + job_service: JobService + user_id: UserID + product_name: ProductName + + def __post_init__(self): + check_user_product_consistency( + service_cls_name=self.__class__.__name__, + service_provider=self.job_service, + user_id=self.user_id, + product_name=self.product_name, + ) + + async def list_jobs( + self, + *, + filter_by_study_id: StudyID | None = None, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[Job], PageMetaInfoLimitOffset]: + """Lists all solver jobs for a user with pagination""" + + # 1. Compose job parent resource name prefix + collection_or_resource_ids: list[str] = [ + "study", # study_id, "jobs", + ] + if filter_by_study_id: + collection_or_resource_ids.append(f"{filter_by_study_id}") + + job_parent_resource_name = compose_resource_name(*collection_or_resource_ids) + + # 2. list jobs under job_parent_resource_name + return await self.job_service.list_jobs( + job_parent_resource_name=job_parent_resource_name, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) diff --git a/services/api-server/src/simcore_service_api_server/_service_utils.py b/services/api-server/src/simcore_service_api_server/_service_utils.py new file mode 100644 index 00000000000..7d5a29fe116 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/_service_utils.py @@ -0,0 +1,35 @@ +from typing import Protocol + +from models_library.products import ProductName +from models_library.users import UserID + +from .exceptions.custom_errors import ServiceConfigurationError + + +class UserProductProvider(Protocol): + """Protocol for classes that provide user_id and product_name properties.""" + + @property + def user_id(self) -> UserID: ... + + @property + def product_name(self) -> ProductName: ... + + +def check_user_product_consistency( + service_cls_name: str, + service_provider: UserProductProvider, + user_id: UserID, + product_name: ProductName, +) -> None: + + if user_id != service_provider.user_id: + msg = f"User ID {user_id} does not match {service_provider.__class__.__name__} user ID {service_provider.user_id}" + raise ServiceConfigurationError( + service_cls_name=service_cls_name, detail_msg=msg + ) + if product_name != service_provider.product_name: + msg = f"Product name {product_name} does not match {service_provider.__class__.__name__}product name {service_provider.product_name}" + raise ServiceConfigurationError( + service_cls_name=service_cls_name, detail_msg=msg + ) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/_utils.py b/services/api-server/src/simcore_service_api_server/api/dependencies/_utils.py new file mode 100644 index 00000000000..112d7c861d5 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/_utils.py @@ -0,0 +1,11 @@ +from typing import Any + +from common_library.exclude import as_dict_exclude_none +from pydantic.fields import FieldInfo + + +def get_query_params(field: FieldInfo) -> dict[str, Any]: + return as_dict_exclude_none( + description=field.description, + examples=field.examples or None, + ) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/application.py b/services/api-server/src/simcore_service_api_server/api/dependencies/application.py index ff7094f6477..e7ccf5691f0 100644 --- a/services/api-server/src/simcore_service_api_server/api/dependencies/application.py +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/application.py @@ -1,30 +1,20 @@ -from typing import Any, Callable - -from fastapi import Depends, FastAPI, Request -from simcore_postgres_database.utils_products import get_default_product_name +# mypy: disable-error-code=truthy-function +from fastapi import Request +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper from ...core.settings import ApplicationSettings -def get_reverse_url_mapper(request: Request) -> Callable: - def reverse_url_mapper(name: str, **path_params: Any) -> str: - return request.url_for(name, **path_params) - - return reverse_url_mapper - - def get_settings(request: Request) -> ApplicationSettings: - return request.app.state.settings - - -def get_app(request: Request) -> FastAPI: - return request.app + settings = request.app.state.settings + assert isinstance(settings, ApplicationSettings) # nosec + return settings -async def get_product_name(app: FastAPI = Depends(get_app)) -> str: - if not hasattr(app.state, "default_product_name"): - # lazy evaluation - async with app.state.engine.acquire() as conn: - app.state.default_product_name = await get_default_product_name(conn) +assert get_reverse_url_mapper # nosec +assert get_app # nosec - return app.state.default_product_name +__all__: tuple[str, ...] = ( + "get_app", + "get_reverse_url_mapper", +) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/authentication.py b/services/api-server/src/simcore_service_api_server/api/dependencies/authentication.py index c39637f32da..c727897154d 100644 --- a/services/api-server/src/simcore_service_api_server/api/dependencies/authentication.py +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/authentication.py @@ -1,20 +1,30 @@ +from typing import Annotated + from fastapi import Depends, HTTPException, Security, status from fastapi.security import HTTPBasic, HTTPBasicCredentials -from pydantic.types import PositiveInt +from models_library.emails import LowerCaseEmailStr +from models_library.products import ProductName +from pydantic import BaseModel, PositiveInt -from ...db.repositories.api_keys import ApiKeysRepository -from ...db.repositories.users import UsersRepository +from ...repository.api_keys import ApiKeysRepository, UserAndProductTuple +from ...repository.users import UsersRepository from .database import get_repository # SEE https://swagger.io/docs/specification/authentication/basic-authentication/ basic_scheme = HTTPBasic() -def _create_exception(): +class Identity(BaseModel): + user_id: PositiveInt + product_name: ProductName + email: LowerCaseEmailStr + + +def _create_exception() -> HTTPException: _unauthorized_headers = { - "WWW-Authenticate": f'Basic realm="{basic_scheme.realm}"' - if basic_scheme.realm - else "Basic" + "WWW-Authenticate": ( + f'Basic realm="{basic_scheme.realm}"' if basic_scheme.realm else "Basic" + ) } return HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, @@ -23,27 +33,43 @@ def _create_exception(): ) -async def get_current_user_id( +async def get_current_identity( + apikeys_repo: Annotated[ + ApiKeysRepository, Depends(get_repository(ApiKeysRepository)) + ], + users_repo: Annotated[UsersRepository, Depends(get_repository(UsersRepository))], credentials: HTTPBasicCredentials = Security(basic_scheme), - apikeys_repo: ApiKeysRepository = Depends(get_repository(ApiKeysRepository)), -) -> PositiveInt: - user_id = await apikeys_repo.get_user_id( +) -> Identity: + user_and_product: UserAndProductTuple | None = await apikeys_repo.get_user( api_key=credentials.username, api_secret=credentials.password ) - if not user_id: - raise _create_exception() - return user_id + if user_and_product is None: + exc = _create_exception() + raise exc + email = await users_repo.get_active_user_email(user_id=user_and_product.user_id) + if not email: + exc = _create_exception() + raise exc + return Identity( + user_id=user_and_product.user_id, + product_name=user_and_product.product_name, + email=email, + ) -async def get_active_user_email( - user_id: PositiveInt = Depends(get_current_user_id), - users_repo: UsersRepository = Depends(get_repository(UsersRepository)), -) -> str: - email = await users_repo.get_email_from_user_id(user_id) - if not email: - raise _create_exception() - return email +async def get_current_user_id( + identity: Annotated[Identity, Depends(get_current_identity)], +) -> PositiveInt: + return identity.user_id -# alias -get_active_user_id = get_current_user_id +async def get_product_name( + identity: Annotated[Identity, Depends(get_current_identity)], +) -> ProductName: + return identity.product_name + + +async def get_active_user_email( + identity: Annotated[Identity, Depends(get_current_identity)], +) -> LowerCaseEmailStr: + return identity.email diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/database.py b/services/api-server/src/simcore_service_api_server/api/dependencies/database.py index 6a71da5aa8d..c307f0de52a 100644 --- a/services/api-server/src/simcore_service_api_server/api/dependencies/database.py +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/database.py @@ -1,39 +1,38 @@ import logging -from typing import AsyncGenerator, Callable, Type +from collections.abc import AsyncGenerator, Callable +from typing import Annotated -from aiopg.sa import Engine from fastapi import Depends from fastapi.requests import Request +from simcore_postgres_database.utils_aiosqlalchemy import ( + get_pg_engine_stateinfo, +) +from sqlalchemy.ext.asyncio import AsyncEngine -from ...db.repositories import BaseRepository +from ...clients.postgres import get_engine +from ...repository import BaseRepository -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -def get_db_engine(request: Request) -> Engine: - return request.app.state.engine +def get_db_asyncpg_engine(request: Request) -> AsyncEngine: + return get_engine(request.app) -def get_repository(repo_type: Type[BaseRepository]) -> Callable: +def get_repository(repo_type: type[BaseRepository]) -> Callable: async def _get_repo( - engine: Engine = Depends(get_db_engine), + engine: Annotated[AsyncEngine, Depends(get_db_asyncpg_engine)], ) -> AsyncGenerator[BaseRepository, None]: # NOTE: 2 different ideas were tried here with not so good # 1st one was acquiring a connection per repository which lead to the following issue https://github.com/ITISFoundation/osparc-simcore/pull/1966 # 2nd one was acquiring a connection per request which works but blocks the director-v2 responsiveness once # the max amount of connections is reached # now the current solution is to acquire connection when needed. + _logger.debug( + "Setting up a repository. Current state of connections: %s", + await get_pg_engine_stateinfo(engine), + ) - available_engines = engine.maxsize - (engine.size - engine.freesize) - if available_engines <= 1: - logger.warning( - "Low pg connections available in pool: pool size=%d, acquired=%d, free=%d, reserved=[%d, %d]", - engine.size, - engine.size - engine.freesize, - engine.freesize, - engine.minsize, - engine.maxsize, - ) yield repo_type(db_engine=engine) return _get_repo diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_function_filters.py b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_function_filters.py new file mode 100644 index 00000000000..bd0e4e49cbe --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_function_filters.py @@ -0,0 +1,22 @@ +from typing import Annotated + +from fastapi import Query +from models_library.functions import FunctionIDString, FunctionJobCollectionsListFilters + +from ._utils import get_query_params + + +def get_function_job_collections_filters( + # pylint: disable=unsubscriptable-object + has_function_id: Annotated[ + FunctionIDString | None, + Query( + **get_query_params( + FunctionJobCollectionsListFilters.model_fields["has_function_id"] + ) + ), + ] = None, +) -> FunctionJobCollectionsListFilters: + return FunctionJobCollectionsListFilters( + has_function_id=has_function_id, + ) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_jobs_filters.py b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_jobs_filters.py new file mode 100644 index 00000000000..c4683f653d1 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_jobs_filters.py @@ -0,0 +1,51 @@ +import textwrap +from typing import Annotated + +from fastapi import Query + +from ...models.schemas.jobs_filters import JobMetadataFilter, MetadataFilterItem + + +def get_job_metadata_filter( + any_: Annotated[ + list[str] | None, + Query( + alias="metadata.any", + description=textwrap.dedent( + """ + Filters jobs based on **any** of the matches on custom metadata fields. + + *Format*: `key:pattern` where pattern can contain glob wildcards + """ + ), + example=["key1:val*", "key2:exactval"], + ), + ] = None, +) -> JobMetadataFilter | None: + """ + Example input: + + /solvers/-/releases/-/jobs?metadata.any=key1:val*&metadata.any=key2:exactval + + This will be converted to: + JobMetadataFilter( + any=[ + MetadataFilterItem(name="key1", pattern="val*"), + MetadataFilterItem(name="key2", pattern="exactval"), + ] + ) + + This is used to filter jobs based on custom metadata fields. + + """ + if not any_: + return None + + items = [] + for item in any_: + try: + name, pattern = item.split(":", 1) + except ValueError: + continue # or raise HTTPException + items.append(MetadataFilterItem(name=name, pattern=pattern)) + return JobMetadataFilter(any=items) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_solvers_filters.py b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_solvers_filters.py new file mode 100644 index 00000000000..814b6003264 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/models_schemas_solvers_filters.py @@ -0,0 +1,24 @@ +from typing import Annotated + +from fastapi import Query +from models_library.basic_types import SafeQueryStr + +from ...models.schemas.solvers_filters import SolversListFilters +from ._utils import get_query_params + + +def get_solvers_filters( + # pylint: disable=unsubscriptable-object + solver_id: Annotated[ + SafeQueryStr | None, + Query(**get_query_params(SolversListFilters.model_fields["solver_id"])), + ] = None, + version_display: Annotated[ + SafeQueryStr | None, + Query(**get_query_params(SolversListFilters.model_fields["version_display"])), + ] = None, +) -> SolversListFilters: + return SolversListFilters( + solver_id=solver_id, + version_display=version_display, + ) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/rabbitmq.py b/services/api-server/src/simcore_service_api_server/api/dependencies/rabbitmq.py new file mode 100644 index 00000000000..e90c60861eb --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/rabbitmq.py @@ -0,0 +1,50 @@ +import logging +from typing import Annotated, Final, cast + +from fastapi import Depends, FastAPI +from pydantic import NonNegativeInt +from servicelib.aiohttp.application_setup import ApplicationSetupError +from servicelib.fastapi.dependencies import get_app +from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from tenacity import before_sleep_log, retry, stop_after_delay, wait_fixed + +from ...services_http.log_streaming import LogDistributor + +_MAX_WAIT_FOR_LOG_DISTRIBUTOR_SECONDS: Final[int] = 10 + +_logger = logging.getLogger(__name__) + + +def get_rabbitmq_rpc_client( + app: Annotated[FastAPI, Depends(get_app)] +) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_client # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + +def get_rabbitmq_client(app: Annotated[FastAPI, Depends(get_app)]) -> RabbitMQClient: + assert app.state.rabbitmq_client # nosec + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_log_distributor(app: Annotated[FastAPI, Depends(get_app)]) -> LogDistributor: + assert app.state.log_distributor # nosec + return cast(LogDistributor, app.state.log_distributor) + + +@retry( + wait=wait_fixed(2), + stop=stop_after_delay(_MAX_WAIT_FOR_LOG_DISTRIBUTOR_SECONDS), + before_sleep=before_sleep_log(_logger, logging.WARNING), + reraise=True, +) +async def wait_till_log_distributor_ready(app) -> None: + if not hasattr(app.state, "log_distributor"): + msg = f"Api server's log_distributor was not ready within {_MAX_WAIT_FOR_LOG_DISTRIBUTOR_SECONDS=} seconds" + raise ApplicationSetupError(msg) + + +def get_log_check_timeout(app: Annotated[FastAPI, Depends(get_app)]) -> NonNegativeInt: + assert app.state.settings # nosec + return cast(NonNegativeInt, app.state.settings.API_SERVER_LOG_CHECK_TIMEOUT_SECONDS) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/resource_usage_tracker_rpc.py b/services/api-server/src/simcore_service_api_server/api/dependencies/resource_usage_tracker_rpc.py new file mode 100644 index 00000000000..e55fa59ca6d --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/resource_usage_tracker_rpc.py @@ -0,0 +1,12 @@ +from typing import Annotated + +from fastapi import Depends, FastAPI +from servicelib.fastapi.dependencies import get_app + +from ...services_rpc.resource_usage_tracker import ResourceUsageTrackerClient + + +async def get_resource_usage_tracker_client( + app: Annotated[FastAPI, Depends(get_app)] +) -> ResourceUsageTrackerClient: + return ResourceUsageTrackerClient.get_from_app_state(app=app) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/services.py b/services/api-server/src/simcore_service_api_server/api/dependencies/services.py index 909661299f0..9c5a29a4f61 100644 --- a/services/api-server/src/simcore_service_api_server/api/dependencies/services.py +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/services.py @@ -1,26 +1,38 @@ -""" Dependences with any other services (except webserver) +"""Dependences with any other services (except webserver)""" -""" -from typing import Callable, Type +from collections.abc import Callable +from typing import Annotated -from fastapi import HTTPException, Request, status +from fastapi import Depends, HTTPException, Request, status +from models_library.products import ProductName +from models_library.users import UserID +from servicelib.rabbitmq import RabbitMQRPCClient +from ..._service_jobs import JobService +from ..._service_programs import ProgramService +from ..._service_solvers import SolverService +from ..._service_studies import StudyService +from ...services_rpc.catalog import CatalogService +from ...services_rpc.wb_api_server import WbApiRpcClient from ...utils.client_base import BaseServiceClientApi +from .authentication import get_current_user_id, get_product_name +from .rabbitmq import get_rabbitmq_rpc_client +from .webserver_http import AuthSession, get_webserver_session +from .webserver_rpc import get_wb_api_rpc_client -def get_api_client(client_type: Type[BaseServiceClientApi]) -> Callable: +def get_api_client(client_type: type[BaseServiceClientApi]) -> Callable: """ Retrieves API client from backend services EXCEPT web-server (see dependencies/webserver) Usage: director_client: DirectorApi = Depends(get_api_client(DirectorApi)), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), storage_client: StorageApi = Depends(get_api_client(StorageApi)), """ assert issubclass(client_type, BaseServiceClientApi) # nosec - def _get_client_from_app(request: Request) -> client_type: + def _get_client_from_app(request: Request) -> BaseServiceClientApi: client_obj = client_type.get_instance(request.app) if client_obj is None: raise HTTPException( @@ -32,3 +44,73 @@ def _get_client_from_app(request: Request) -> client_type: return client_obj return _get_client_from_app + + +def get_catalog_service( + rpc_client: Annotated[RabbitMQRPCClient, Depends(get_rabbitmq_rpc_client)], + user_id: Annotated[UserID, Depends(get_current_user_id)], + product_name: Annotated[ProductName, Depends(get_product_name)], +): + """ + "Assembles" the CatalogService layer to the RabbitMQ client + in the context of the rest controller (i.e. api/dependencies) + """ + return CatalogService( + _rpc_client=rpc_client, user_id=user_id, product_name=product_name + ) + + +def get_job_service( + web_rest_api: Annotated[AuthSession, Depends(get_webserver_session)], + web_rpc_api: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + user_id: Annotated[UserID, Depends(get_current_user_id)], + product_name: Annotated[ProductName, Depends(get_product_name)], +) -> JobService: + """ + "Assembles" the JobsService layer to the underlying service and client interfaces + in the context of the rest controller (i.e. api/dependencies) + """ + return JobService( + _web_rest_client=web_rest_api, + _web_rpc_client=web_rpc_api, + user_id=user_id, + product_name=product_name, + ) + + +def get_solver_service( + catalog_service: Annotated[CatalogService, Depends(get_catalog_service)], + job_service: Annotated[JobService, Depends(get_job_service)], + user_id: Annotated[UserID, Depends(get_current_user_id)], + product_name: Annotated[ProductName, Depends(get_product_name)], +) -> SolverService: + """ + "Assembles" the SolverService layer to the underlying service and client interfaces + in the context of the rest controller (i.e. api/dependencies) + """ + return SolverService( + catalog_service=catalog_service, + job_service=job_service, + user_id=user_id, + product_name=product_name, + ) + + +def get_study_service( + job_service: Annotated[JobService, Depends(get_job_service)], + user_id: Annotated[UserID, Depends(get_current_user_id)], + product_name: Annotated[ProductName, Depends(get_product_name)], +) -> StudyService: + return StudyService( + job_service=job_service, + user_id=user_id, + product_name=product_name, + ) + + +def get_program_service( + catalog_service: Annotated[CatalogService, Depends(get_catalog_service)], +) -> ProgramService: + return ProgramService( + catalog_service=catalog_service, + ) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py b/services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py deleted file mode 100644 index c95393c4b6d..00000000000 --- a/services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py +++ /dev/null @@ -1,67 +0,0 @@ -import json -import time -from typing import Optional - -from cryptography.fernet import Fernet -from fastapi import Depends, FastAPI, HTTPException, status -from fastapi.requests import Request - -from ...core.settings import WebServerSettings -from ...modules.webserver import AuthSession -from .authentication import get_active_user_email - -UNAVAILBLE_MSG = "backend service is disabled or unreachable" - - -def _get_app(request: Request) -> FastAPI: - return request.app - - -def _get_settings(request: Request) -> WebServerSettings: - s = request.app.state.settings.API_SERVER_WEBSERVER - if not s: - raise HTTPException( - status.HTTP_503_SERVICE_UNAVAILABLE, detail="webserver disabled" - ) - return s - - -def _get_encrypt(request: Request) -> Optional[Fernet]: - return getattr(request.app.state, "webserver_fernet", None) - - -def get_session_cookie( - identity: str = Depends(get_active_user_email), - settings: WebServerSettings = Depends(_get_settings), - fernet: Optional[Fernet] = Depends(_get_encrypt), -) -> dict: - # Based on aiohttp_session and aiohttp_security - # SEE services/web/server/tests/unit/with_dbs/test_login.py - - if fernet is None: - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE, detail=UNAVAILBLE_MSG) - - # builds session cookie - cookie_name = settings.WEBSERVER_SESSION_NAME - cookie_data = json.dumps( - { - "created": int(time.time()), # now - "session": {"AIOHTTP_SECURITY": identity}, - "path": "/", - # extras? e.g. expiration - } - ).encode("utf-8") - encrypted_cookie_data = fernet.encrypt(cookie_data).decode("utf-8") - - return {cookie_name: encrypted_cookie_data} - - -def get_webserver_session( - app: FastAPI = Depends(_get_app), - session_cookies: dict = Depends(get_session_cookie), -) -> AuthSession: - """ - Lifetime of AuthSession wrapper is one request because it needs different session cookies - Lifetime of embedded client is attached to the app lifetime - """ - return AuthSession.create(app, session_cookies) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_http.py b/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_http.py new file mode 100644 index 00000000000..d70a64575e2 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_http.py @@ -0,0 +1,81 @@ +import time +from typing import Annotated + +from common_library.json_serialization import json_dumps +from cryptography.fernet import Fernet +from fastapi import Depends, FastAPI, HTTPException, status +from fastapi.requests import Request +from servicelib.rest_constants import X_PRODUCT_NAME_HEADER + +from ..._constants import MSG_BACKEND_SERVICE_UNAVAILABLE +from ...core.settings import ApplicationSettings, WebServerSettings +from ...services_http.webserver import AuthSession +from .application import get_app, get_settings +from .authentication import Identity, get_active_user_email, get_current_identity + + +def _get_settings( + app_settings: Annotated[ApplicationSettings, Depends(get_settings)], +) -> WebServerSettings: + settings = app_settings.API_SERVER_WEBSERVER + if not settings: + raise HTTPException( + status.HTTP_503_SERVICE_UNAVAILABLE, detail=MSG_BACKEND_SERVICE_UNAVAILABLE + ) + assert isinstance(settings, WebServerSettings) # nosec + return settings + + +def _get_encrypt(request: Request) -> Fernet | None: + e: Fernet | None = getattr(request.app.state, "webserver_fernet", None) + return e + + +def get_session_cookie( + identity: Annotated[str, Depends(get_active_user_email)], + settings: Annotated[WebServerSettings, Depends(_get_settings)], + fernet: Annotated[Fernet | None, Depends(_get_encrypt)], +) -> dict: + # Based on aiohttp_session and aiohttp_security + # SEE services/web/server/tests/unit/with_dbs/test_login.py + + if fernet is None: + raise HTTPException( + status.HTTP_503_SERVICE_UNAVAILABLE, detail=MSG_BACKEND_SERVICE_UNAVAILABLE + ) + + # builds session cookie + cookie_name = settings.WEBSERVER_SESSION_NAME + cookie_data = json_dumps( + { + "created": int(time.time()), # now + "session": {"AIOHTTP_SECURITY": identity}, + "path": "/", + # extras? e.g. expiration + } + ).encode("utf-8") + encrypted_cookie_data = fernet.encrypt(cookie_data).decode("utf-8") + + return {cookie_name: encrypted_cookie_data} + + +def get_webserver_session( + app: Annotated[FastAPI, Depends(get_app)], + session_cookies: Annotated[dict, Depends(get_session_cookie)], + identity: Annotated[Identity, Depends(get_current_identity)], +) -> AuthSession: + """ + Lifetime of AuthSession wrapper is one request because it needs different session cookies + Lifetime of embedded client is attached to the app lifetime + """ + product_header: dict[str, str] = {X_PRODUCT_NAME_HEADER: f"{identity.product_name}"} + session = AuthSession.create(app, session_cookies, product_header) + assert isinstance(session, AuthSession) # nosec + return session + + +__all__: tuple[str, ...] = ( + "AuthSession", + "get_session_cookie", + "get_webserver_session", +) diff --git a/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_rpc.py b/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_rpc.py new file mode 100644 index 00000000000..19887e1e1c8 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/dependencies/webserver_rpc.py @@ -0,0 +1,12 @@ +from typing import Annotated + +from fastapi import Depends, FastAPI +from servicelib.fastapi.dependencies import get_app + +from ...services_rpc.wb_api_server import WbApiRpcClient + + +async def get_wb_api_rpc_client( + app: Annotated[FastAPI, Depends(get_app)] +) -> WbApiRpcClient: + return WbApiRpcClient.get_from_app_state(app=app) diff --git a/services/api-server/src/simcore_service_api_server/api/errors/http_error.py b/services/api-server/src/simcore_service_api_server/api/errors/http_error.py deleted file mode 100644 index 55438fe8525..00000000000 --- a/services/api-server/src/simcore_service_api_server/api/errors/http_error.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Callable, Optional, Type - -from fastapi import HTTPException -from fastapi.encoders import jsonable_encoder -from starlette.requests import Request -from starlette.responses import JSONResponse - - -async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": [exc.detail]}), status_code=exc.status_code - ) - - -def make_http_error_handler_for_exception( - status_code: int, - exception_cls: Type[BaseException], - *, - override_detail_message: Optional[str] = None, -) -> Callable: - """ - Produces a handler for BaseException-type exceptions which converts them - into an error JSON response with a given status code - - SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions - """ - - async def _http_error_handler(_: Request, exc: exception_cls) -> JSONResponse: - assert isinstance(exc, exception_cls) # nosec - details = override_detail_message or f"{exc}" - return JSONResponse( - content=jsonable_encoder({"errors": [details]}), status_code=status_code - ) - - return _http_error_handler diff --git a/services/api-server/src/simcore_service_api_server/api/errors/httpx_client_error.py b/services/api-server/src/simcore_service_api_server/api/errors/httpx_client_error.py deleted file mode 100644 index 133ee0f5328..00000000000 --- a/services/api-server/src/simcore_service_api_server/api/errors/httpx_client_error.py +++ /dev/null @@ -1,50 +0,0 @@ -""" General handling of httpx-based exceptions - - - httpx-based clients are used to communicate with other backend services - - When those respond with 4XX, 5XX status codes, those are generally handled here -""" -import logging - -from fastapi import status -from fastapi.encoders import jsonable_encoder -from httpx import HTTPStatusError -from starlette.requests import Request -from starlette.responses import JSONResponse - -log = logging.getLogger(__file__) - - -async def httpx_client_error_handler(_: Request, exc: HTTPStatusError) -> JSONResponse: - """ - This is called when HTTPStatusError was raised and reached the outermost handler - - This handler is used as a "last resource" since it is recommended to handle these exceptions - closer to the raising point. - - The response had an error HTTP status of 4xx or 5xx, and this is how is - transformed in the api-server API - """ - if 400 <= exc.response.status_code < 500: - # Forward backend client errors - status_code = exc.response.status_code - errors = exc.response.json()["errors"] - - else: - # Hide api-server client from backend server errors - assert exc.response.status_code >= 500 # nosec - status_code = status.HTTP_503_SERVICE_UNAVAILABLE - message = f"{exc.request.url.host.capitalize()} service unexpectedly failed" - log.exception( - "%s. host=%s status-code=%s msg=%s", - message, - exc.request.url.host, - exc.response.status_code, - exc.response.text, - ) - errors = [ - message, - ] - - return JSONResponse( - content=jsonable_encoder({"errors": errors}), status_code=status_code - ) diff --git a/services/api-server/src/simcore_service_api_server/api/errors/validation_error.py b/services/api-server/src/simcore_service_api_server/api/errors/validation_error.py deleted file mode 100644 index fb70f6791ac..00000000000 --- a/services/api-server/src/simcore_service_api_server/api/errors/validation_error.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Union - -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError -from fastapi.openapi.constants import REF_PREFIX -from fastapi.openapi.utils import validation_error_response_definition -from pydantic import ValidationError -from starlette.requests import Request -from starlette.responses import JSONResponse -from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY - - -async def http422_error_handler( - _: Request, - exc: Union[RequestValidationError, ValidationError], -) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": exc.errors()}), - status_code=HTTP_422_UNPROCESSABLE_ENTITY, - ) - - -validation_error_response_definition["properties"] = { - "errors": { - "title": "Validation errors", - "type": "array", - "items": {"$ref": f"{REF_PREFIX}ValidationError"}, - }, -} diff --git a/services/api-server/src/simcore_service_api_server/api/root.py b/services/api-server/src/simcore_service_api_server/api/root.py index bb2cb12f40d..8c6dbc6de1a 100644 --- a/services/api-server/src/simcore_service_api_server/api/root.py +++ b/services/api-server/src/simcore_service_api_server/api/root.py @@ -1,7 +1,31 @@ +from typing import Final + from fastapi import APIRouter from ..core.settings import ApplicationSettings -from .routes import files, health, meta, solvers, solvers_jobs, studies, users +from .routes import credits as _credits +from .routes import ( + files, + function_job_collections_routes, + function_jobs_routes, + functions_routes, + health, + licensed_items, + meta, + programs, + solvers, + solvers_jobs, + solvers_jobs_read, + studies, + studies_jobs, + users, + wallets, +) + +_SOLVERS_PREFIX: Final[str] = "/solvers" +_FUNCTIONS_PREFIX: Final[str] = "/functions" +_FUNCTION_JOBS_PREFIX: Final[str] = "/function_jobs" +_FUNCTION_JOB_COLLECTIONS_PREFIX: Final[str] = "/function_job_collections" def create_router(settings: ApplicationSettings): @@ -15,14 +39,37 @@ def create_router(settings: ApplicationSettings): router.include_router(meta.router, tags=["meta"], prefix="/meta") router.include_router(users.router, tags=["users"], prefix="/me") router.include_router(files.router, tags=["files"], prefix="/files") - router.include_router(solvers.router, tags=["solvers"], prefix="/solvers") - router.include_router(solvers_jobs.router, tags=["solvers"], prefix="/solvers") + router.include_router(programs.router, tags=["programs"], prefix="/programs") + router.include_router(solvers.router, tags=["solvers"], prefix=_SOLVERS_PREFIX) + router.include_router(solvers_jobs.router, tags=["solvers"], prefix=_SOLVERS_PREFIX) + router.include_router( + solvers_jobs_read.router, tags=["solvers"], prefix=_SOLVERS_PREFIX + ) router.include_router(studies.router, tags=["studies"], prefix="/studies") + router.include_router(studies_jobs.router, tags=["studies"], prefix="/studies") + router.include_router( + function_jobs_routes.function_job_router, + tags=["function_jobs"], + prefix=_FUNCTION_JOBS_PREFIX, + ) + router.include_router( + function_job_collections_routes.function_job_collections_router, + tags=["function_job_collections"], + prefix=_FUNCTION_JOB_COLLECTIONS_PREFIX, + ) + router.include_router(wallets.router, tags=["wallets"], prefix="/wallets") + router.include_router(_credits.router, tags=["credits"], prefix="/credits") + router.include_router( + licensed_items.router, tags=["licensed-items"], prefix="/licensed-items" + ) + router.include_router( + functions_routes.function_router, tags=["functions"], prefix=_FUNCTIONS_PREFIX + ) # NOTE: multiple-files upload is currently disabled # Web form to upload files at http://localhost:8000/v0/upload-form-view # Overcomes limitation of Swagger UI view # NOTE: As of 2020-10-07, Swagger UI doesn't support multiple file uploads in the same form field - # router.get("/upload-multiple-view")(files.files_upload_multiple_view) + # as router.get("/upload-multiple-view")(files.files_upload_multiple_view) return router diff --git a/services/api-server/src/simcore_service_api_server/api/routes/_common.py b/services/api-server/src/simcore_service_api_server/api/routes/_common.py new file mode 100644 index 00000000000..a6f292dd433 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/_common.py @@ -0,0 +1,7 @@ +from typing import Final + +from ...core.settings import BasicSettings + +API_SERVER_DEV_FEATURES_ENABLED: Final[ + bool +] = BasicSettings.create_from_envs().API_SERVER_DEV_FEATURES_ENABLED diff --git a/services/api-server/src/simcore_service_api_server/api/routes/_constants.py b/services/api-server/src/simcore_service_api_server/api/routes/_constants.py new file mode 100644 index 00000000000..d7aeb3c1b73 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/_constants.py @@ -0,0 +1,62 @@ +from typing import Final + +# +# CHANGELOG formatted-messages for API routes +# +# - Append at the bottom of the route's description +# - These are displayed in the swagger doc +# - These are displayed in client's doc as well (auto-generator) +# - Inspired on this idea https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#describing-changes-between-versions +# + +# new routes +FMSG_CHANGELOG_NEW_IN_VERSION: Final[str] = "New in *version {}*\n" + +# new inputs/outputs in routes +FMSG_CHANGELOG_ADDED_IN_VERSION: Final[str] = "Added in *version {}*: {}\n" + +# changes on inputs/outputs in routes +FMSG_CHANGELOG_CHANGED_IN_VERSION: Final[str] = "Changed in *version {}*: {}\n" + +# removed on inputs/outputs in routes +FMSG_CHANGELOG_REMOVED_IN_VERSION_FORMAT: Final[str] = "Removed in *version {}*: {}\n" + +FMSG_DEPRECATED_ROUTE_NOTICE: Final[str] = ( + "🚨 **Deprecated**: This endpoint is deprecated and will be removed in a future release.\n" + "Please use `{}` instead.\n\n" +) + +DEFAULT_MAX_STRING_LENGTH: Final[int] = 500 + + +def create_route_description( + *, + base: str = "", + deprecated: bool = False, + alternative: str | None = None, # alternative + changelog: list[str] | None = None +) -> str: + """ + Builds a consistent route/query description with optional deprecation and changelog information. + + Args: + base (str): Main route/query description. + deprecated (tuple): alternative_route if deprecated. + changelog (List[str]): List of formatted changelog strings. + + Returns: + str: Final description string. + """ + parts = [] + + if deprecated: + assert alternative, "If deprecated, alternative must be provided" # nosec + parts.append(FMSG_DEPRECATED_ROUTE_NOTICE.format(alternative)) + + if base: + parts.append(base) + + if changelog: + parts.append("\n".join(changelog)) + + return "\n\n".join(parts) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/credits.py b/services/api-server/src/simcore_service_api_server/api/routes/credits.py new file mode 100644 index 00000000000..410f803b6ee --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/credits.py @@ -0,0 +1,25 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, status + +from ...models.schemas.model_adapter import GetCreditPriceLegacy +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description + +router = APIRouter() + + +@router.get( + "/price", + status_code=status.HTTP_200_OK, + response_model=GetCreditPriceLegacy, + description=create_route_description( + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.6"), + ] + ), +) +async def get_credits_price( + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + return await webserver_api.get_product_price() diff --git a/services/api-server/src/simcore_service_api_server/api/routes/files.py b/services/api-server/src/simcore_service_api_server/api/routes/files.py index 9c7e0e25596..28028cf8b25 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/files.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/files.py @@ -1,67 +1,182 @@ import asyncio +import datetime import io import logging -from collections import deque -from datetime import datetime -from textwrap import dedent -from typing import IO, Optional +from typing import IO, Annotated, Any from uuid import UUID -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Body, Depends from fastapi import File as FileParam from fastapi import Header, Request, UploadFile, status from fastapi.exceptions import HTTPException -from fastapi.responses import HTMLResponse -from models_library.projects_nodes_io import StorageFileID -from pydantic import ValidationError, parse_obj_as +from fastapi_pagination.api import create_page +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + FileUploadCompletionBody, + LinkType, +) +from models_library.basic_types import SHA256Str +from models_library.projects_nodes_io import NodeID +from pydantic import AnyUrl, ByteSize, PositiveInt, TypeAdapter, ValidationError from servicelib.fastapi.requests_decorators import cancel_on_disconnect from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION -from simcore_sdk.node_ports_common.filemanager import UploadableFileObject -from simcore_sdk.node_ports_common.filemanager import upload_file as storage_upload_file +from simcore_sdk.node_ports_common.file_io_utils import UploadableFileObject +from simcore_sdk.node_ports_common.filemanager import ( + UploadedFile, + UploadedFolder, + abort_upload, + complete_file_upload, + get_upload_links_from_s3, +) +from simcore_sdk.node_ports_common.filemanager import upload_path as storage_upload_path +from starlette.datastructures import URL from starlette.responses import RedirectResponse -from ..._meta import API_VTAG -from ...models.schemas.files import File -from ...modules.storage import StorageApi, StorageFileMetaData, to_file_api_model +from ...api.dependencies.webserver_http import ( + get_webserver_session, +) +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.domain.files import File as DomainFile +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.files import ( + ClientFileUploadData, +) +from ...models.schemas.files import File as OutputFile +from ...models.schemas.files import ( + FileUploadData, + UploadLinks, + UserFile, +) +from ...models.schemas.jobs import UserFileToProgramJob +from ...services_http.storage import StorageApi, StorageFileMetaData, to_file_api_model +from ...services_http.webserver import AuthSession from ..dependencies.authentication import get_current_user_id from ..dependencies.services import get_api_client +from ._common import API_SERVER_DEV_FEATURES_ENABLED +from ._constants import ( + FMSG_CHANGELOG_ADDED_IN_VERSION, + FMSG_CHANGELOG_REMOVED_IN_VERSION_FORMAT, + create_route_description, +) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) router = APIRouter() - ## FILES --------------------------------------------------------------------------------- # # - WARNING: the order of the router-decorated functions MATTER -# - TODO: pagination ? # - TODO: extend :search as https://cloud.google.com/apis/design/custom_methods ? # # -common_error_responses = { - status.HTTP_404_NOT_FOUND: {"description": "File not found"}, +_FILE_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "File not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, } -@router.get("", response_model=list[File]) +async def _get_file( + *, + file_id: UUID, + storage_client: StorageApi, + user_id: int, +) -> DomainFile: + """Gets metadata for a given file resource""" + + try: + stored_files: list[StorageFileMetaData] = ( + await storage_client.search_owned_files( + user_id=user_id, file_id=file_id, limit=1 + ) + ) + if not stored_files: + msg = "Not found in storage" + raise ValueError(msg) # noqa: TRY301 + + assert len(stored_files) == 1 + stored_file_meta = stored_files[0] + assert stored_file_meta.file_id # nosec + + # Adapts storage API model to API model + return to_file_api_model(stored_file_meta) + + except (ValueError, ValidationError) as err: + _logger.debug("File %d not found: %s", file_id, err) + raise HTTPException( + status.HTTP_404_NOT_FOUND, + detail=f"File with identifier {file_id} not found", + ) from err + + +async def _create_domain_file( + *, + webserver_api: AuthSession, + file_id: UUID | None, + client_file: UserFile | UserFileToProgramJob, +) -> DomainFile: + if isinstance(client_file, UserFile): + file = client_file.to_domain_model(file_id=file_id) + elif isinstance(client_file, UserFileToProgramJob): + project = await webserver_api.get_project(project_id=client_file.job_id) + if len(project.workbench) > 1: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Job_id {project.uuid} is not a valid program job.", + ) + node_id = next(iter(project.workbench.keys())) + file = client_file.to_domain_model( + project_id=project.uuid, node_id=NodeID(node_id) + ) + else: + err_msg = f"Invalid client_file type passed: {type(client_file)=}" + raise TypeError(err_msg) + return file + + +@router.get( + "", + response_model=list[OutputFile], + responses=_FILE_STATUS_CODES, + description=create_route_description( + base="Lists all files stored in the system", + deprecated=True, + alternative="GET /v0/files/page", + changelog=[ + FMSG_CHANGELOG_ADDED_IN_VERSION.format("0.5", ""), + FMSG_CHANGELOG_REMOVED_IN_VERSION_FORMAT.format( + "0.7", + "This endpoint is deprecated and will be removed in a future version", + ), + ], + ), +) async def list_files( - storage_client: StorageApi = Depends(get_api_client(StorageApi)), - user_id: int = Depends(get_current_user_id), + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[int, Depends(get_current_user_id)], ): - """Lists all files stored in the system""" + """Lists all files stored in the system - stored_files: list[StorageFileMetaData] = await storage_client.list_files(user_id) + SEE `get_files_page` for a paginated version of this function + """ + + stored_files: list[StorageFileMetaData] = await storage_client.list_files( + user_id=user_id + ) # Adapts storage API model to API model - files_meta = deque() + all_files: list[OutputFile] = [] for stored_file_meta in stored_files: try: assert stored_file_meta.file_id # nosec - file_meta: File = to_file_api_model(stored_file_meta) + file_meta = to_file_api_model(stored_file_meta) except (ValidationError, ValueError, AttributeError) as err: - logger.warning( + _logger.warning( "Skipping corrupted entry in storage '%s' (%s)" "TIP: check this entry in file_meta_data table.", stored_file_meta.file_uuid, @@ -69,9 +184,27 @@ async def list_files( ) else: - files_meta.append(file_meta) + all_files.append(OutputFile.from_domain_model(file_meta)) - return list(files_meta) + return all_files + + +@router.get( + "/page", + response_model=Page[OutputFile], + include_in_schema=API_SERVER_DEV_FEATURES_ENABLED, + status_code=status.HTTP_501_NOT_IMPLEMENTED, +) +async def get_files_page( + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[int, Depends(get_current_user_id)], + page_params: Annotated[PaginationParams, Depends()], +): + assert storage_client # nosec + assert user_id # nosec + + msg = f"get_files_page of user_id={user_id!r} with page_params={page_params!r}. SEE https://github.com/ITISFoundation/osparc-issues/issues/952" + raise NotImplementedError(msg) def _get_spooled_file_size(file_io: IO) -> int: @@ -81,13 +214,17 @@ def _get_spooled_file_size(file_io: IO) -> int: return file_size -@router.put("/content", response_model=File) +@router.put( + "/content", + response_model=OutputFile, + responses=_FILE_STATUS_CODES, +) @cancel_on_disconnect async def upload_file( request: Request, - file: UploadFile = FileParam(...), - content_length: Optional[str] = Header(None), - user_id: int = Depends(get_current_user_id), + file: Annotated[UploadFile, FileParam(...)], + user_id: Annotated[int, Depends(get_current_user_id)], + content_length: str | None = Header(None), ): """Uploads a single file to the system""" # TODO: For the moment we upload file here and re-upload to S3 @@ -98,14 +235,19 @@ async def upload_file( assert request # nosec + if file.filename is None: + file.filename = "Undefined" + file_size = await asyncio.get_event_loop().run_in_executor( None, _get_spooled_file_size, file.file ) # assign file_id. - file_meta: File = await File.create_from_uploaded( - file, file_size=file_size, created_at=datetime.utcnow().isoformat() + file_meta = await DomainFile.create_from_uploaded( + file, + file_size=file_size, + created_at=datetime.datetime.now(datetime.UTC).isoformat(), ) - logger.debug( + _logger.debug( "Assigned id: %s of %s bytes (content-length), real size %s bytes", file_meta, content_length, @@ -113,23 +255,26 @@ async def upload_file( ) # upload to S3 using pre-signed link - _, entity_tag = await storage_upload_file( + upload_result: UploadedFolder | UploadedFile = await storage_upload_path( user_id=user_id, store_id=SIMCORE_LOCATION, store_name=None, - s3_object=parse_obj_as( - StorageFileID, f"api/{file_meta.id}/{file_meta.filename}" + s3_object=file_meta.storage_file_id, + path_to_upload=UploadableFileObject( + file_object=file.file, + file_name=file.filename, + file_size=file_size, + sha256_checksum=file_meta.sha256_checksum, ), - file_to_upload=UploadableFileObject(file.file, file.filename, file_size), io_log_redirect_cb=None, ) + assert isinstance(upload_result, UploadedFile) # nosec - file_meta.checksum = entity_tag - return file_meta + file_meta.e_tag = upload_result.etag + return OutputFile.from_domain_model(file_meta) -# DISABLED @router.post(":upload-multiple", response_model=list[FileMetadata]) -# MaG suggested a single function that can upload one or multiple files instead of having +# NOTE: MaG suggested a single function that can upload one or multiple files instead of having # two of them. Tried something like upload_file( files: Union[list[UploadFile], File] ) but it # produces an error in the generated openapi.json # @@ -138,44 +283,191 @@ async def upload_file( # async def upload_files(files: list[UploadFile] = FileParam(...)): """Uploads multiple files to the system""" - raise NotImplementedError() + raise NotImplementedError + + +@router.post( + "/content", + response_model=ClientFileUploadData, + responses=_FILE_STATUS_CODES, +) +@cancel_on_disconnect +async def get_upload_links( + request: Request, + client_file: UserFileToProgramJob | UserFile, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + """Get upload links for uploading a file to storage""" + assert request # nosec + file_meta = await _create_domain_file( + webserver_api=webserver_api, file_id=None, client_file=client_file + ) + _, upload_links = await get_upload_links_from_s3( + user_id=user_id, + store_name=None, + store_id=SIMCORE_LOCATION, + s3_object=file_meta.storage_file_id, + client_session=None, + link_type=LinkType.PRESIGNED, + file_size=ByteSize(client_file.filesize), + is_directory=False, + sha256_checksum=file_meta.sha256_checksum, + ) + completion_url: URL = request.url_for( + "complete_multipart_upload", file_id=file_meta.id + ) + abort_url: URL = request.url_for("abort_multipart_upload", file_id=file_meta.id) + upload_data: FileUploadData = FileUploadData( + chunk_size=upload_links.chunk_size, + urls=upload_links.urls, # type: ignore[arg-type] + links=UploadLinks( + complete_upload=completion_url.path, abort_upload=abort_url.path + ), + ) + return ClientFileUploadData(file_id=file_meta.id, upload_schema=upload_data) -@router.get("/{file_id}", response_model=File, responses={**common_error_responses}) +@router.get( + "/{file_id}", + response_model=OutputFile, + responses=_FILE_STATUS_CODES, +) async def get_file( file_id: UUID, - storage_client: StorageApi = Depends(get_api_client(StorageApi)), - user_id: int = Depends(get_current_user_id), + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[int, Depends(get_current_user_id)], ): """Gets metadata for a given file resource""" - try: - stored_files: list[StorageFileMetaData] = await storage_client.search_files( - user_id, file_id + return await _get_file( + file_id=file_id, + storage_client=storage_client, + user_id=user_id, + ) + + +@router.get( + ":search", + response_model=Page[OutputFile], + responses=_FILE_STATUS_CODES, +) +async def search_files_page( + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[int, Depends(get_current_user_id)], + page_params: Annotated[PaginationParams, Depends()], + sha256_checksum: SHA256Str | None = None, + file_id: UUID | None = None, +): + """Search files""" + stored_files: list[StorageFileMetaData] = await storage_client.search_owned_files( + user_id=user_id, + file_id=file_id, + sha256_checksum=sha256_checksum, + limit=page_params.limit, + offset=page_params.offset, + ) + if page_params.offset > len(stored_files): + _logger.debug("File with sha256_checksum=%d not found.", sha256_checksum) + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Not found in storage" ) - if not stored_files: - raise ValueError("Not found in storage") + file_list = [ + OutputFile.from_domain_model(to_file_api_model(fmd)) for fmd in stored_files + ] + return create_page( + file_list, + total=len(stored_files), + params=page_params, + ) - stored_file_meta = stored_files[0] - assert stored_file_meta.file_id # nosec - # Adapts storage API model to API model - file_meta = to_file_api_model(stored_file_meta) - return file_meta +@router.delete( + "/{file_id}", + responses=_FILE_STATUS_CODES, +) +async def delete_file( + file_id: UUID, + user_id: Annotated[int, Depends(get_current_user_id)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], +): + file = await _get_file( + file_id=file_id, + storage_client=storage_client, + user_id=user_id, + ) + await storage_client.delete_file( + user_id=user_id, quoted_storage_file_id=file.quoted_storage_file_id + ) - except (ValueError, ValidationError) as err: - logger.debug("File %d not found: %s", file_id, err) - raise HTTPException( - status.HTTP_404_NOT_FOUND, - detail=f"File with identifier {file_id} not found", - ) from err + +@router.post( + "/{file_id}:abort", + responses=DEFAULT_BACKEND_SERVICE_STATUS_CODES, +) +async def abort_multipart_upload( + request: Request, + file_id: UUID, + client_file: Annotated[UserFileToProgramJob | UserFile, Body(..., embed=True)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + assert file_id # nosec + assert request # nosec + assert user_id # nosec + + file = await _create_domain_file( + webserver_api=webserver_api, file_id=file_id, client_file=client_file + ) + abort_link: URL = await storage_client.create_abort_upload_link( + file=file, query={"user_id": str(user_id)} + ) + await abort_upload( + abort_upload_link=TypeAdapter(AnyUrl).validate_python(str(abort_link)) + ) + + +@router.post( + "/{file_id}:complete", + response_model=OutputFile, + responses=_FILE_STATUS_CODES, +) +@cancel_on_disconnect +async def complete_multipart_upload( + request: Request, + file_id: UUID, + client_file: Annotated[UserFileToProgramJob | UserFile, Body(...)], + uploaded_parts: Annotated[FileUploadCompletionBody, Body(...)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + assert file_id # nosec + assert request # nosec + assert user_id # nosec + file = await _create_domain_file( + webserver_api=webserver_api, file_id=file_id, client_file=client_file + ) + complete_link: URL = await storage_client.create_complete_upload_link( + file=file, query={"user_id": str(user_id)} + ) + + e_tag: ETag | None = await complete_file_upload( + uploaded_parts=uploaded_parts.parts, + upload_completion_link=TypeAdapter(AnyUrl).validate_python(f"{complete_link}"), + ) + assert e_tag is not None # nosec + + file.e_tag = e_tag + return file @router.get( "/{file_id}/content", response_class=RedirectResponse, - responses={ - **common_error_responses, + responses=_FILE_STATUS_CODES + | { 200: { "content": { "application/octet-stream": { @@ -189,38 +481,19 @@ async def get_file( ) async def download_file( file_id: UUID, - storage_client: StorageApi = Depends(get_api_client(StorageApi)), - user_id: int = Depends(get_current_user_id), + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + user_id: Annotated[int, Depends(get_current_user_id)], ): # NOTE: application/octet-stream is defined as "arbitrary binary data" in RFC 2046, # gets meta - file_meta: File = await get_file(file_id, storage_client, user_id) + file_meta = await get_file(file_id, storage_client, user_id) # download from S3 using pre-signed link presigned_download_link = await storage_client.get_download_link( - user_id, file_meta.id, file_meta.filename + user_id=user_id, + file_id=file_meta.id, + file_name=file_meta.filename, ) - logger.info("Downloading %s to %s ...", file_meta, presigned_download_link) - return RedirectResponse(presigned_download_link) - - -async def files_upload_multiple_view(): - """Extra **Web form** to upload multiple files at http://localhost:8000/v0/files/upload-form-view - and overcomes the limitations of Swagger-UI view - - NOTE: Only enabled if DEBUG=1 - NOTE: As of 2020-10-07, Swagger UI doesn't support multiple file uploads in the same form field - """ - return HTMLResponse( - content=dedent( - f""" - -
- - -
- - """ - ) - ) + _logger.info("Downloading %s to %s ...", file_meta, presigned_download_link) + return RedirectResponse(f"{presigned_download_link}") diff --git a/services/api-server/src/simcore_service_api_server/api/routes/function_job_collections_routes.py b/services/api-server/src/simcore_service_api_server/api/routes/function_job_collections_routes.py new file mode 100644 index 00000000000..3ece899ec08 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/function_job_collections_routes.py @@ -0,0 +1,183 @@ +import asyncio +from typing import Annotated, Final + +from fastapi import APIRouter, Depends, status +from fastapi_pagination.api import create_page +from models_library.api_schemas_webserver.functions import ( + FunctionJobCollection, + FunctionJobCollectionID, + FunctionJobCollectionsListFilters, + FunctionJobCollectionStatus, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, +) +from pydantic import PositiveInt + +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...services_http.director_v2 import DirectorV2Api +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.authentication import get_current_user_id +from ..dependencies.models_schemas_function_filters import ( + get_function_job_collections_filters, +) +from ..dependencies.services import get_api_client +from ..dependencies.webserver_rpc import get_wb_api_rpc_client +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description +from .function_jobs_routes import function_job_status, get_function_job + +# pylint: disable=too-many-arguments + +function_job_collections_router = APIRouter() + +FIRST_RELEASE_VERSION = "0.8.0" + + +_COMMON_FUNCTION_JOB_COLLECTION_ERROR_RESPONSES: Final[dict] = { + status.HTTP_404_NOT_FOUND: { + "description": "Function job collection not found", + "model": ErrorGet, + }, +} + + +@function_job_collections_router.get( + "", + response_model=Page[RegisteredFunctionJobCollection], + description=create_route_description( + base="List function job collections", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def list_function_job_collections( + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + page_params: Annotated[PaginationParams, Depends()], + filters: Annotated[ + FunctionJobCollectionsListFilters, Depends(get_function_job_collections_filters) + ], +): + function_job_collection_list, meta = await wb_api_rpc.list_function_job_collections( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + filters=filters, + ) + return create_page( + function_job_collection_list, + total=meta.total, + params=page_params, + ) + + +@function_job_collections_router.get( + "/{function_job_collection_id:uuid}", + response_model=RegisteredFunctionJobCollection, + responses={**_COMMON_FUNCTION_JOB_COLLECTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get function job collection", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def get_function_job_collection( + function_job_collection_id: FunctionJobCollectionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> RegisteredFunctionJobCollection: + return await wb_api_rpc.get_function_job_collection( + function_job_collection_id=function_job_collection_id + ) + + +@function_job_collections_router.post( + "", + response_model=RegisteredFunctionJobCollection, + description=create_route_description( + base="Register function job collection", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def register_function_job_collection( + function_job_collection: FunctionJobCollection, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> RegisteredFunctionJobCollection: + return await wb_api_rpc.register_function_job_collection( + function_job_collection=function_job_collection + ) + + +@function_job_collections_router.delete( + "/{function_job_collection_id:uuid}", + response_model=None, + responses={**_COMMON_FUNCTION_JOB_COLLECTION_ERROR_RESPONSES}, + description=create_route_description( + base="Delete function job collection", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def delete_function_job_collection( + function_job_collection_id: FunctionJobCollectionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> None: + return await wb_api_rpc.delete_function_job_collection( + function_job_collection_id=function_job_collection_id + ) + + +@function_job_collections_router.get( + "/{function_job_collection_id:uuid}/function_jobs", + response_model=list[RegisteredFunctionJob], + responses={**_COMMON_FUNCTION_JOB_COLLECTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get the function jobs in function job collection", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def function_job_collection_list_function_jobs( + function_job_collection_id: FunctionJobCollectionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> list[RegisteredFunctionJob]: + function_job_collection = await get_function_job_collection( + function_job_collection_id=function_job_collection_id, + wb_api_rpc=wb_api_rpc, + ) + return [ + await get_function_job( + job_id, + wb_api_rpc=wb_api_rpc, + ) + for job_id in function_job_collection.job_ids + ] + + +@function_job_collections_router.get( + "/{function_job_collection_id:uuid}/status", + response_model=FunctionJobCollectionStatus, + responses={**_COMMON_FUNCTION_JOB_COLLECTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get function job collection status", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def function_job_collection_status( + function_job_collection_id: FunctionJobCollectionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], +) -> FunctionJobCollectionStatus: + function_job_collection = await get_function_job_collection( + function_job_collection_id=function_job_collection_id, + wb_api_rpc=wb_api_rpc, + ) + + job_statuses = await asyncio.gather( + *[ + function_job_status( + job_id, + wb_api_rpc=wb_api_rpc, + director2_api=director2_api, + user_id=user_id, + ) + for job_id in function_job_collection.job_ids + ] + ) + return FunctionJobCollectionStatus( + status=[job_status.status for job_status in job_statuses] + ) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/function_jobs_routes.py b/services/api-server/src/simcore_service_api_server/api/routes/function_jobs_routes.py new file mode 100644 index 00000000000..3289f59e597 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/function_jobs_routes.py @@ -0,0 +1,242 @@ +from typing import Annotated, Final + +from fastapi import APIRouter, Depends, status +from fastapi_pagination.api import create_page +from models_library.api_schemas_webserver.functions import ( + Function, + FunctionClass, + FunctionJob, + FunctionJobID, + FunctionJobStatus, + FunctionOutputs, + RegisteredFunctionJob, + UnsupportedFunctionClassError, + UnsupportedFunctionFunctionJobClassCombinationError, +) +from pydantic import PositiveInt +from sqlalchemy.ext.asyncio import AsyncEngine + +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.storage import StorageApi +from ...services_http.webserver import AuthSession +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.authentication import get_current_user_id +from ..dependencies.database import get_db_asyncpg_engine +from ..dependencies.services import get_api_client +from ..dependencies.webserver_http import get_webserver_session +from ..dependencies.webserver_rpc import get_wb_api_rpc_client +from . import solvers_jobs, solvers_jobs_read, studies_jobs +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description + +# pylint: disable=too-many-arguments +# pylint: disable=cyclic-import + + +function_job_router = APIRouter() + +_COMMON_FUNCTION_JOB_ERROR_RESPONSES: Final[dict] = { + status.HTTP_404_NOT_FOUND: { + "description": "Function job not found", + "model": ErrorGet, + }, +} + +FIRST_RELEASE_VERSION = "0.8.0" + + +@function_job_router.get( + "", + response_model=Page[RegisteredFunctionJob], + description=create_route_description( + base="List function jobs", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def list_function_jobs( + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + page_params: Annotated[PaginationParams, Depends()], +): + function_jobs_list, meta = await wb_api_rpc.list_function_jobs( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + + return create_page( + function_jobs_list, + total=meta.total, + params=page_params, + ) + + +@function_job_router.post( + "", + response_model=RegisteredFunctionJob, + description=create_route_description( + base="Create function job", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def register_function_job( + function_job: FunctionJob, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> RegisteredFunctionJob: + return await wb_api_rpc.register_function_job(function_job=function_job) + + +@function_job_router.get( + "/{function_job_id:uuid}", + response_model=RegisteredFunctionJob, + responses={**_COMMON_FUNCTION_JOB_ERROR_RESPONSES}, + description=create_route_description( + base="Get function job", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def get_function_job( + function_job_id: FunctionJobID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> RegisteredFunctionJob: + return await wb_api_rpc.get_function_job(function_job_id=function_job_id) + + +@function_job_router.delete( + "/{function_job_id:uuid}", + response_model=None, + responses={**_COMMON_FUNCTION_JOB_ERROR_RESPONSES}, + description=create_route_description( + base="Delete function job", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def delete_function_job( + function_job_id: FunctionJobID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> None: + return await wb_api_rpc.delete_function_job(function_job_id=function_job_id) + + +@function_job_router.get( + "/{function_job_id:uuid}/status", + response_model=FunctionJobStatus, + responses={**_COMMON_FUNCTION_JOB_ERROR_RESPONSES}, + description=create_route_description( + base="Get function job status", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def function_job_status( + function_job_id: FunctionJobID, + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> FunctionJobStatus: + + function, function_job = await get_function_from_functionjobid( + wb_api_rpc=wb_api_rpc, function_job_id=function_job_id + ) + + if ( + function.function_class == FunctionClass.PROJECT + and function_job.function_class == FunctionClass.PROJECT + ): + job_status = await studies_jobs.inspect_study_job( + study_id=function.project_id, + job_id=function_job.project_job_id, + user_id=user_id, + director2_api=director2_api, + ) + return FunctionJobStatus(status=job_status.state) + + if (function.function_class == FunctionClass.SOLVER) and ( + function_job.function_class == FunctionClass.SOLVER + ): + job_status = await solvers_jobs.inspect_job( + solver_key=function.solver_key, + version=function.solver_version, + job_id=function_job.solver_job_id, + user_id=user_id, + director2_api=director2_api, + ) + return FunctionJobStatus(status=job_status.state) + + raise UnsupportedFunctionFunctionJobClassCombinationError( + function_class=function.function_class, + function_job_class=function_job.function_class, + ) + + +async def get_function_from_functionjobid( + wb_api_rpc: WbApiRpcClient, + function_job_id: FunctionJobID, +) -> tuple[Function, FunctionJob]: + function_job = await get_function_job( + wb_api_rpc=wb_api_rpc, function_job_id=function_job_id + ) + + from .functions_routes import get_function + + return ( + await get_function( + wb_api_rpc=wb_api_rpc, function_id=function_job.function_uid + ), + function_job, + ) + + +@function_job_router.get( + "/{function_job_id:uuid}/outputs", + response_model=FunctionOutputs, + responses={**_COMMON_FUNCTION_JOB_ERROR_RESPONSES}, + description=create_route_description( + base="Get function job outputs", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION)], + ), +) +async def function_job_outputs( + function_job_id: FunctionJobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + async_pg_engine: Annotated[AsyncEngine, Depends(get_db_asyncpg_engine)], +) -> FunctionOutputs: + function, function_job = await get_function_from_functionjobid( + wb_api_rpc=wb_api_rpc, function_job_id=function_job_id + ) + + if ( + function.function_class == FunctionClass.PROJECT + and function_job.function_class == FunctionClass.PROJECT + ): + return dict( + ( + await studies_jobs.get_study_job_outputs( + study_id=function.project_id, + job_id=function_job.project_job_id, + user_id=user_id, + webserver_api=webserver_api, + storage_client=storage_client, + ) + ).results + ) + + if ( + function.function_class == FunctionClass.SOLVER + and function_job.function_class == FunctionClass.SOLVER + ): + return dict( + ( + await solvers_jobs_read.get_job_outputs( + solver_key=function.solver_key, + version=function.solver_version, + job_id=function_job.solver_job_id, + user_id=user_id, + webserver_api=webserver_api, + storage_client=storage_client, + async_pg_engine=async_pg_engine, + ) + ).results + ) + raise UnsupportedFunctionClassError(function_class=function.function_class) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/functions_routes.py b/services/api-server/src/simcore_service_api_server/api/routes/functions_routes.py new file mode 100644 index 00000000000..f0d619b107d --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/functions_routes.py @@ -0,0 +1,486 @@ +from collections.abc import Callable +from typing import Annotated, Final + +import jsonschema +from fastapi import APIRouter, Depends, Request, status +from fastapi_pagination.api import create_page +from jsonschema import ValidationError +from models_library.api_schemas_api_server.functions import ( + Function, + FunctionClass, + FunctionID, + FunctionInputs, + FunctionInputSchema, + FunctionInputsList, + FunctionInputsValidationError, + FunctionJobCollection, + FunctionOutputSchema, + FunctionSchemaClass, + ProjectFunctionJob, + RegisteredFunction, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, + SolverFunctionJob, + UnsupportedFunctionClassError, +) +from pydantic import PositiveInt +from servicelib.fastapi.dependencies import get_reverse_url_mapper +from simcore_service_api_server._service_jobs import JobService + +from ..._service_solvers import SolverService +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.jobs import JobInputs +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.webserver import AuthSession +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.authentication import get_current_user_id, get_product_name +from ..dependencies.services import get_api_client, get_job_service, get_solver_service +from ..dependencies.webserver_http import get_webserver_session +from ..dependencies.webserver_rpc import get_wb_api_rpc_client +from . import solvers_jobs, studies_jobs +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description +from .function_jobs_routes import register_function_job + +# pylint: disable=too-many-arguments +# pylint: disable=cyclic-import + +function_router = APIRouter() + +_COMMON_FUNCTION_ERROR_RESPONSES: Final[dict] = { + status.HTTP_404_NOT_FOUND: { + "description": "Function not found", + "model": ErrorGet, + }, +} + +FIRST_RELEASE_VERSION = "0.8.0" + + +@function_router.post( + "", + response_model=RegisteredFunction, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Create function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def register_function( + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + function: Function, +) -> RegisteredFunction: + return await wb_api_rpc.register_function(function=function) + + +@function_router.get( + "/{function_id:uuid}", + response_model=RegisteredFunction, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def get_function( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> RegisteredFunction: + return await wb_api_rpc.get_function(function_id=function_id) + + +@function_router.get( + "", + response_model=Page[RegisteredFunction], + description=create_route_description( + base="List functions", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.8.0"), + ], + ), +) +async def list_functions( + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + page_params: Annotated[PaginationParams, Depends()], +): + functions_list, meta = await wb_api_rpc.list_functions( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + + return create_page( + functions_list, + total=meta.total, + params=page_params, + ) + + +@function_router.get( + "/{function_id:uuid}/jobs", + response_model=Page[RegisteredFunctionJob], + description=create_route_description( + base="List function jobs for a function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def list_function_jobs_for_functionid( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + page_params: Annotated[PaginationParams, Depends()], +): + function_jobs_list, meta = await wb_api_rpc.list_function_jobs( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + filter_by_function_id=function_id, + ) + + return create_page( + function_jobs_list, + total=meta.total, + params=page_params, + ) + + +@function_router.patch( + "/{function_id:uuid}/title", + response_model=RegisteredFunction, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Update function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def update_function_title( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + title: str, +) -> RegisteredFunction: + returned_function = await wb_api_rpc.update_function_title( + function_id=function_id, title=title + ) + assert ( + returned_function.title == title + ), f"Function title was not updated. Expected {title} but got {returned_function.title}" # nosec + return returned_function + + +@function_router.patch( + "/{function_id:uuid}/description", + response_model=RegisteredFunction, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Update function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def update_function_description( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + description: str, +) -> RegisteredFunction: + returned_function = await wb_api_rpc.update_function_description( + function_id=function_id, description=description + ) + assert ( + returned_function.description == description + ), f"Function description was not updated. Expected {description} but got {returned_function.description}" # nosec + return returned_function + + +def _join_inputs( + default_inputs: FunctionInputs | None, + function_inputs: FunctionInputs | None, +) -> FunctionInputs: + if default_inputs is None: + return function_inputs + + if function_inputs is None: + return default_inputs + + # last dict will override defaults + return {**default_inputs, **function_inputs} + + +@function_router.get( + "/{function_id:uuid}/input_schema", + response_model=FunctionInputSchema, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get function input schema", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def get_function_inputschema( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> FunctionInputSchema: + function = await wb_api_rpc.get_function(function_id=function_id) + return function.input_schema + + +@function_router.get( + "/{function_id:uuid}/output_schema", + response_model=FunctionInputSchema, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Get function output schema", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def get_function_outputschema( + function_id: FunctionID, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> FunctionOutputSchema: + function = await wb_api_rpc.get_function(function_id=function_id) + return function.output_schema + + +@function_router.post( + "/{function_id:uuid}:validate_inputs", + response_model=tuple[bool, str], + responses={ + status.HTTP_400_BAD_REQUEST: {"description": "Invalid inputs"}, + status.HTTP_404_NOT_FOUND: {"description": "Function not found"}, + }, + description=create_route_description( + base="Validate inputs against the function's input schema", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def validate_function_inputs( + function_id: FunctionID, + inputs: FunctionInputs, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], +) -> tuple[bool, str]: + function = await wb_api_rpc.get_function(function_id=function_id) + + if function.input_schema is None or function.input_schema.schema_content is None: + return True, "No input schema defined for this function" + + if function.input_schema.schema_class == FunctionSchemaClass.json_schema: + try: + jsonschema.validate( + instance=inputs, schema=function.input_schema.schema_content + ) + except ValidationError as err: + return False, str(err) + return True, "Inputs are valid" + + return ( + False, + f"Unsupported function schema class {function.input_schema.schema_class}", + ) + + +@function_router.post( + "/{function_id:uuid}:run", + response_model=RegisteredFunctionJob, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Run function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def run_function( # noqa: PLR0913 + request: Request, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + function_id: FunctionID, + function_inputs: FunctionInputs, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + product_name: Annotated[str, Depends(get_product_name)], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + job_service: Annotated[JobService, Depends(get_job_service)], +) -> RegisteredFunctionJob: + + to_run_function = await wb_api_rpc.get_function(function_id=function_id) + + joined_inputs = _join_inputs( + to_run_function.default_inputs, + function_inputs, + ) + + if to_run_function.input_schema is not None: + is_valid, validation_str = await validate_function_inputs( + function_id=to_run_function.uid, + inputs=joined_inputs, + wb_api_rpc=wb_api_rpc, + ) + if not is_valid: + raise FunctionInputsValidationError(error=validation_str) + + if cached_function_job := await wb_api_rpc.find_cached_function_job( + function_id=to_run_function.uid, + inputs=joined_inputs, + ): + return cached_function_job + + if to_run_function.function_class == FunctionClass.PROJECT: + study_job = await studies_jobs.create_study_job( + study_id=to_run_function.project_id, + job_inputs=JobInputs(values=joined_inputs or {}), + webserver_api=webserver_api, + wb_api_rpc=wb_api_rpc, + url_for=url_for, + x_simcore_parent_project_uuid=None, + x_simcore_parent_node_id=None, + user_id=user_id, + product_name=product_name, + ) + await studies_jobs.start_study_job( + request=request, + study_id=to_run_function.project_id, + job_id=study_job.id, + user_id=user_id, + webserver_api=webserver_api, + director2_api=director2_api, + ) + return await register_function_job( + wb_api_rpc=wb_api_rpc, + function_job=ProjectFunctionJob( + function_uid=to_run_function.uid, + title=f"Function job of function {to_run_function.uid}", + description=to_run_function.description, + inputs=joined_inputs, + outputs=None, + project_job_id=study_job.id, + ), + ) + + if to_run_function.function_class == FunctionClass.SOLVER: + solver_job = await solvers_jobs.create_solver_job( + solver_key=to_run_function.solver_key, + version=to_run_function.solver_version, + inputs=JobInputs(values=joined_inputs or {}), + solver_service=solver_service, + job_service=job_service, + url_for=url_for, + x_simcore_parent_project_uuid=None, + x_simcore_parent_node_id=None, + ) + await solvers_jobs.start_job( + request=request, + solver_key=to_run_function.solver_key, + version=to_run_function.solver_version, + job_id=solver_job.id, + user_id=user_id, + webserver_api=webserver_api, + director2_api=director2_api, + ) + return await register_function_job( + wb_api_rpc=wb_api_rpc, + function_job=SolverFunctionJob( + function_uid=to_run_function.uid, + title=f"Function job of function {to_run_function.uid}", + description=to_run_function.description, + inputs=joined_inputs, + outputs=None, + solver_job_id=solver_job.id, + ), + ) + + raise UnsupportedFunctionClassError( + function_class=to_run_function.function_class, + ) + + +@function_router.delete( + "/{function_id:uuid}", + response_model=None, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Delete function", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def delete_function( + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + function_id: FunctionID, +) -> None: + return await wb_api_rpc.delete_function(function_id=function_id) + + +_COMMON_FUNCTION_JOB_ERROR_RESPONSES: Final[dict] = { + status.HTTP_404_NOT_FOUND: { + "description": "Function job not found", + "model": ErrorGet, + }, +} + + +@function_router.post( + "/{function_id:uuid}:map", + response_model=RegisteredFunctionJobCollection, + responses={**_COMMON_FUNCTION_ERROR_RESPONSES}, + description=create_route_description( + base="Map function over input parameters", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format(FIRST_RELEASE_VERSION), + ], + ), +) +async def map_function( # noqa: PLR0913 + function_id: FunctionID, + function_inputs_list: FunctionInputsList, + request: Request, + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + product_name: Annotated[str, Depends(get_product_name)], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + job_service: Annotated[JobService, Depends(get_job_service)], +) -> RegisteredFunctionJobCollection: + function_jobs = [] + function_jobs = [ + await run_function( + wb_api_rpc=wb_api_rpc, + function_id=function_id, + function_inputs=function_inputs, + product_name=product_name, + user_id=user_id, + webserver_api=webserver_api, + url_for=url_for, + director2_api=director2_api, + request=request, + solver_service=solver_service, + job_service=job_service, + ) + for function_inputs in function_inputs_list + ] + + function_job_collection_description = f"Function job collection of map of function {function_id} with {len(function_inputs_list)} inputs" + # Import here to avoid circular import + from .function_job_collections_routes import register_function_job_collection + + return await register_function_job_collection( + wb_api_rpc=wb_api_rpc, + function_job_collection=FunctionJobCollection( + title="Function job collection of function map", + description=function_job_collection_description, + job_ids=[function_job.uid for function_job in function_jobs], + ), + ) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/health.py b/services/api-server/src/simcore_service_api_server/api/routes/health.py index e4cd5c41ef6..4c398128ec1 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/health.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/health.py @@ -1,16 +1,18 @@ import asyncio -from datetime import datetime -from typing import Callable, Tuple +import datetime +from collections.abc import Callable +from typing import Annotated -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, HTTPException from fastapi.responses import PlainTextResponse from models_library.app_diagnostics import AppStatusCheck +from servicelib.aiohttp import status from ..._meta import API_VERSION, PROJECT_NAME -from ...modules.catalog import CatalogApi -from ...modules.director_v2 import DirectorV2Api -from ...modules.storage import StorageApi -from ...modules.webserver import WebserverApi +from ...core.health_checker import ApiServerHealthChecker, get_health_checker +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.storage import StorageApi +from ...services_http.webserver import WebserverApi from ..dependencies.application import get_reverse_url_mapper from ..dependencies.services import get_api_client @@ -18,33 +20,47 @@ @router.get("/", include_in_schema=False, response_class=PlainTextResponse) -async def check_service_health(): - return f"{__name__}@{datetime.utcnow().isoformat()}" +async def check_service_health( + health_checker: Annotated[ApiServerHealthChecker, Depends(get_health_checker)], +): + if not health_checker.healthy: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail="unhealthy" + ) + + return f"{__name__}@{datetime.datetime.now(tz=datetime.UTC).isoformat()}" -@router.get("/state", include_in_schema=False) +@router.get( + "/state", + include_in_schema=False, + response_model=AppStatusCheck, + response_model_exclude_unset=True, +) async def get_service_state( - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), - storage_client: StorageApi = Depends(get_api_client(StorageApi)), - webserver_client: WebserverApi = Depends(get_api_client(WebserverApi)), - url_for: Callable = Depends(get_reverse_url_mapper), + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], + webserver_client: Annotated[WebserverApi, Depends(get_api_client(WebserverApi))], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): - apis = (catalog_client, director2_api, storage_client, webserver_client) - heaths: Tuple[bool] = await asyncio.gather(*[api.is_responsive() for api in apis]) + apis = ( + director2_api, + storage_client, + webserver_client, + ) + healths = await asyncio.gather( + *[api.is_responsive() for api in apis], + return_exceptions=False, + ) - current_status = AppStatusCheck( + return AppStatusCheck( app_name=PROJECT_NAME, version=API_VERSION, services={ api.service_name: { - "healthy": is_healty, - "url": str(api.client.base_url) + api.health_check_path.lstrip("/"), + "healthy": bool(is_healty), } - for api, is_healty in zip(apis, heaths) + for api, is_healty in zip(apis, healths, strict=True) }, url=url_for("get_service_state"), ) - resp = current_status.dict(exclude_unset=True) - resp.update(docs_dev_url=url_for("swagger_ui_html")) - return resp diff --git a/services/api-server/src/simcore_service_api_server/api/routes/licensed_items.py b/services/api-server/src/simcore_service_api_server/api/routes/licensed_items.py new file mode 100644 index 00000000000..161051bfa9f --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/licensed_items.py @@ -0,0 +1,74 @@ +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, HTTPException, status +from models_library.licenses import LicensedItemID +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from pydantic import PositiveInt + +from ...api.dependencies.authentication import get_current_user_id, get_product_name +from ...api.dependencies.webserver_rpc import get_wb_api_rpc_client +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.pagination import Page, PaginationParams +from ...models.schemas.model_adapter import LicensedItemCheckoutGet, LicensedItemGet +from ...services_rpc.resource_usage_tracker import ResourceUsageTrackerClient +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.resource_usage_tracker_rpc import ( + get_resource_usage_tracker_client, +) + +router = APIRouter() + +_LICENSE_ITEMS_STATUS_CODES: dict[int | str, dict[str, Any]] = { + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} + + +@router.get( + "", + response_model=Page[LicensedItemGet], + status_code=status.HTTP_200_OK, + responses=_LICENSE_ITEMS_STATUS_CODES, + description="Get all licensed items", +) +async def get_licensed_items( + page_params: Annotated[PaginationParams, Depends()], + web_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + product_name: Annotated[str, Depends(get_product_name)], +): + return await web_api_rpc.get_licensed_items( + product_name=product_name, page_params=page_params + ) + + +@router.post( + "/{licensed_item_id}/checked-out-items/{licensed_item_checkout_id}/release", + response_model=LicensedItemCheckoutGet, + status_code=status.HTTP_200_OK, + responses=_LICENSE_ITEMS_STATUS_CODES, + description="Release previously checked out licensed item", +) +async def release_licensed_item( + web_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + rut_rpc: Annotated[ + ResourceUsageTrackerClient, Depends(get_resource_usage_tracker_client) + ], + product_name: Annotated[str, Depends(get_product_name)], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + licensed_item_id: LicensedItemID, + licensed_item_checkout_id: LicensedItemCheckoutID, +): + _licensed_item_checkout = await rut_rpc.get_licensed_item_checkout( + product_name=product_name, licensed_item_checkout_id=licensed_item_checkout_id + ) + if _licensed_item_checkout.licensed_item_id != licensed_item_id: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"{licensed_item_id} is not the license_item_id associated with the checked out item {licensed_item_checkout_id}", + ) + return await web_api_rpc.release_licensed_item_for_wallet( + product_name=product_name, + user_id=user_id, + licensed_item_checkout_id=licensed_item_checkout_id, + ) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/meta.py b/services/api-server/src/simcore_service_api_server/api/routes/meta.py index 9b8a622e129..1a40d9dfa20 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/meta.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/meta.py @@ -1,8 +1,9 @@ -from typing import Callable +from collections.abc import Callable +from typing import Annotated from fastapi import APIRouter, Depends -from ..._meta import API_VERSION, API_VTAG, __version__ +from ..._meta import API_VERSION, API_VTAG from ...models.schemas.meta import Meta from ..dependencies.application import get_reverse_url_mapper @@ -11,12 +12,12 @@ @router.get("", response_model=Meta) async def get_service_metadata( - url_for: Callable = Depends(get_reverse_url_mapper), + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): return Meta( name=__name__.split(".")[0], version=API_VERSION, released={API_VTAG: API_VERSION}, - docs_url=url_for("redoc_html"), + docs_url=url_for("swagger_ui_html"), docs_dev_url=url_for("swagger_ui_html"), ) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/programs.py b/services/api-server/src/simcore_service_api_server/api/routes/programs.py new file mode 100644 index 00000000000..0f3de634193 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/programs.py @@ -0,0 +1,206 @@ +# pylint: disable=too-many-arguments +import logging +from collections.abc import Callable +from typing import Annotated + +from fastapi import APIRouter, Body, Depends, Header, HTTPException, status +from fastapi_pagination import create_page +from httpx import HTTPStatusError +from models_library.api_schemas_storage.storage_schemas import LinkType +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from pydantic import ByteSize, PositiveInt, StringConstraints, ValidationError +from servicelib.fastapi.dependencies import get_reverse_url_mapper +from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION +from simcore_sdk.node_ports_common.filemanager import ( + complete_file_upload, + get_upload_links_from_s3, +) + +from ..._service_jobs import JobService +from ..._service_programs import ProgramService +from ...api.routes._constants import ( + DEFAULT_MAX_STRING_LENGTH, + FMSG_CHANGELOG_NEW_IN_VERSION, + create_route_description, +) +from ...models.basic_types import VersionStr +from ...models.pagination import Page, PaginationParams +from ...models.schemas.jobs import Job, JobInputs +from ...models.schemas.programs import Program, ProgramKeyId +from ..dependencies.authentication import get_current_user_id +from ..dependencies.services import get_job_service, get_program_service + +_logger = logging.getLogger(__name__) + +router = APIRouter() + + +@router.get( + "", + response_model=Page[Program], + description=create_route_description( + base="Lists the latest of all available programs", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_programs( + program_service: Annotated[ProgramService, Depends(get_program_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + page_params: Annotated[PaginationParams, Depends()], +): + programs, page_meta = await program_service.list_latest_programs( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + page_params.limit = page_meta.limit + page_params.offset = page_meta.offset + + for program in programs: + program.url = url_for( + "get_program_release", program_key=program.id, version=program.version + ) + + return create_page( + programs, + total=page_meta.total, + params=page_params, + ) + + +@router.get( + "/{program_key:path}/releases", + response_model=Page[Program], + description=create_route_description( + base="Lists the latest of all available programs", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_program_history( + program_key: ProgramKeyId, + program_service: Annotated[ProgramService, Depends(get_program_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + page_params: Annotated[PaginationParams, Depends()], +): + programs, page_meta = await program_service.list_program_history( + program_key=program_key, + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + page_params.limit = page_meta.limit + page_params.offset = page_meta.offset + + for program in programs: + program.url = url_for( + "get_program_release", program_key=program.id, version=program.version + ) + + return create_page( + programs, + total=page_meta.total, + params=page_params, + ) + + +@router.get( + "/{program_key:path}/releases/{version}", + response_model=Program, +) +async def get_program_release( + program_key: ProgramKeyId, + version: VersionStr, + program_service: Annotated[ProgramService, Depends(get_program_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +) -> Program: + """Gets a specific release of a solver""" + try: + program = await program_service.get_program( + name=program_key, + version=version, + ) + + program.url = url_for( + "get_program_release", program_key=program.id, version=program.version + ) + return program + + except ( + ValueError, + IndexError, + ValidationError, + HTTPStatusError, + ) as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Program {program_key}:{version} not found", + ) from err + + +@router.post( + "/{program_key:path}/releases/{version}/jobs", + response_model=Job, + status_code=status.HTTP_201_CREATED, +) +async def create_program_job( + program_key: ProgramKeyId, + version: VersionStr, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + program_service: Annotated[ProgramService, Depends(get_program_service)], + job_service: Annotated[JobService, Depends(get_job_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + x_simcore_parent_project_uuid: Annotated[ProjectID | None, Header()] = None, + x_simcore_parent_node_id: Annotated[NodeID | None, Header()] = None, + name: Annotated[ + str | None, StringConstraints(max_length=DEFAULT_MAX_STRING_LENGTH), Body() + ] = None, + description: Annotated[ + str | None, StringConstraints(max_length=DEFAULT_MAX_STRING_LENGTH), Body() + ] = None, +): + """Creates a program job""" + + # ensures user has access to solver + inputs = JobInputs(values={}) + program = await program_service.get_program( + name=program_key, + version=version, + ) + + job, project = await job_service.create_job( + project_name=name, + description=description, + solver_or_program=program, + inputs=inputs, + parent_project_uuid=x_simcore_parent_project_uuid, + parent_node_id=x_simcore_parent_node_id, + url_for=url_for, + hidden=False, + ) + + # create workspace directory so files can be uploaded to it + assert len(project.workbench) > 0 # nosec + node_id = next(iter(project.workbench)) + + _, file_upload_schema = await get_upload_links_from_s3( + user_id=user_id, + store_name=None, + store_id=SIMCORE_LOCATION, + s3_object=f"{project.uuid}/{node_id}/workspace", + link_type=LinkType.PRESIGNED, + client_session=None, + file_size=ByteSize(0), + is_directory=True, + sha256_checksum=None, + ) + await complete_file_upload( + uploaded_parts=[], + upload_completion_link=file_upload_schema.links.complete_upload, + is_directory=True, + ) + return job diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers.py index d5ee1d99164..c162e359422 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/solvers.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers.py @@ -1,46 +1,73 @@ import logging +from collections.abc import Callable from operator import attrgetter -from typing import Callable +from typing import Annotated, Any +from common_library.pagination_tools import iter_pagination_params from fastapi import APIRouter, Depends, HTTPException, status +from fastapi_pagination import create_page from httpx import HTTPStatusError +from models_library.api_schemas_catalog.services import ServiceListFilters +from models_library.rest_pagination import MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE +from models_library.services_enums import ServiceType from pydantic import ValidationError -from pydantic.errors import PydanticValueError -from servicelib.error_codes import create_error_code -from ...core.settings import BasicSettings -from ...models.schemas.solvers import Solver, SolverKeyId, SolverPort, VersionStr -from ...modules.catalog import CatalogApi -from ..dependencies.application import get_product_name, get_reverse_url_mapper -from ..dependencies.authentication import get_current_user_id -from ..dependencies.services import get_api_client +from ..._service_solvers import SolverService +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.basic_types import VersionStr +from ...models.pagination import OnePage, Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.model_adapter import ServicePricingPlanGetLegacy +from ...models.schemas.solvers import Solver, SolverKeyId, SolverPort +from ...models.schemas.solvers_filters import SolversListFilters +from ...services_rpc.catalog import CatalogService +from ..dependencies.application import get_reverse_url_mapper +from ..dependencies.authentication import get_current_user_id, get_product_name +from ..dependencies.models_schemas_solvers_filters import get_solvers_filters +from ..dependencies.services import get_catalog_service, get_solver_service +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ._constants import ( + FMSG_CHANGELOG_ADDED_IN_VERSION, + FMSG_CHANGELOG_NEW_IN_VERSION, + create_route_description, +) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -router = APIRouter() -settings = BasicSettings.create_from_envs() +_SOLVER_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "Not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} -## SOLVERS ----------------------------------------------------------------------------------------- -# -# - TODO: pagination, result ordering, filter field and results fields?? SEE https://cloud.google.com/apis/design/standard_methods#list -# - TODO: :search? SEE https://cloud.google.com/apis/design/custom_methods#common_custom_methods -# - TODO: move more of this logic to catalog service -# - TODO: error handling!!! -# - TODO: allow release_tags instead of versions in the next iteration. -# Would be nice to have /solvers/foo/releases/latest or solvers/foo/releases/3 , similar to docker tagging +router = APIRouter( + # /v0/solvers/ +) -@router.get("", response_model=list[Solver]) +@router.get( + "", + response_model=list[Solver], + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Lists all available solvers (latest version)", + deprecated=True, + alternative="GET /v0/solvers/page", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), +) async def list_solvers( - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), + catalog_service: Annotated[CatalogService, Depends(get_catalog_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): - """Lists all available solvers (latest version)""" - solvers: list[Solver] = await catalog_client.list_latest_releases( - user_id=user_id, product_name=product_name + services, _ = await catalog_service.list_latest_releases( + filters=ServiceListFilters(service_type=ServiceType.COMPUTATIONAL), ) + solvers = [Solver.create_from_service(service=service) for service in services] for solver in solvers: solver.url = url_for( @@ -50,18 +77,28 @@ async def list_solvers( return sorted(solvers, key=attrgetter("id")) -@router.get("/releases", response_model=list[Solver], summary="Lists All Releases") -async def list_solvers_releases( - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), +@router.get( + "/page", + response_model=Page[Solver], + description=create_route_description( + base="Lists all available solvers (paginated)", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9-rc1"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_all_solvers_paginated( + page_params: Annotated[PaginationParams, Depends()], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + filters: Annotated[SolversListFilters, Depends(get_solvers_filters)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): - """Lists all released solvers (all released versions)""" - assert await catalog_client.is_responsive() # nosec - - solvers: list[Solver] = await catalog_client.list_solvers( - user_id=user_id, product_name=product_name + solvers, page_meta = await solver_service.list_all_solvers( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + filter_by_solver_key_pattern=filters.solver_id, + filter_by_version_display_pattern=filters.version_display, ) for solver in solvers: @@ -69,34 +106,93 @@ async def list_solvers_releases( "get_solver_release", solver_key=solver.id, version=solver.version ) - return sorted(solvers, key=attrgetter("id", "pep404_version")) + assert page_params.limit == page_meta.limit # nosec + assert page_params.offset == page_meta.offset # nosec + + return create_page( + solvers, + total=page_meta.total, + params=page_params, + ) + + +@router.get( + "/releases", + response_model=list[Solver], + summary="Lists All Releases", + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Lists **all** released solvers (not just latest version)", + deprecated=True, + alternative="GET /v0/solvers/{solver_key}/releases/page", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), +) +async def list_solvers_releases( + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + + latest_solvers: list[Solver] = [] + for page_params in iter_pagination_params( + offset=0, limit=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE + ): + solvers, page_meta = await solver_service.latest_solvers( + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + page_params.total_number_of_items = page_meta.total + latest_solvers.extend(solvers) + + all_solvers = [] + for solver in latest_solvers: + for page_params in iter_pagination_params( + offset=0, limit=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE + ): + solvers, page_meta = await solver_service.solver_release_history( + solver_key=solver.id, + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + page_params.total_number_of_items = page_meta.total + all_solvers.extend(solvers) + + for solver in all_solvers: + solver.url = url_for( + "get_solver_release", solver_key=solver.id, version=solver.version + ) + + return sorted(all_solvers, key=attrgetter("id", "pep404_version")) @router.get( "/{solver_key:path}/latest", response_model=Solver, - summary="Get Latest Release of a Solver", + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Gets latest release of a solver", + changelog=[ + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.7.1", "`version_display` field in the response" + ), + ], + ), ) async def get_solver( solver_key: SolverKeyId, - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), -) -> Solver: - """Gets latest release of a solver""" + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): # IMPORTANT: by adding /latest, we avoid changing the order of this entry in the router list # otherwise, {solver_key:path} will override and consume any of the paths that follow. try: - - solver = await catalog_client.get_latest_release( - user_id, solver_key, product_name=product_name - ) + solver = await solver_service.get_latest_release(solver_key=solver_key) solver.url = url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) assert solver.id == solver_key # nosec - return solver except (KeyError, HTTPStatusError, IndexError) as err: @@ -106,49 +202,108 @@ async def get_solver( ) from err -@router.get("/{solver_key:path}/releases", response_model=list[Solver]) +@router.get( + "/{solver_key:path}/releases", + response_model=list[Solver], + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Lists all releases of a given (one) solver", + changelog=[ + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.7.1", "`version_display` field in the response" + ), + ], + ), +) async def list_solver_releases( solver_key: SolverKeyId, - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): - """Lists all releases of a given solver""" - releases: list[Solver] = await catalog_client.list_solver_releases( - user_id, solver_key, product_name=product_name - ) + all_releases: list[Solver] = [] + for page_params in iter_pagination_params( + offset=0, limit=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE + ): + solvers, page_meta = await solver_service.solver_release_history( + solver_key=solver_key, + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + page_params.total_number_of_items = page_meta.total + all_releases.extend(solvers) - for solver in releases: + for solver in all_releases: solver.url = url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) - return sorted(releases, key=attrgetter("pep404_version")) + return sorted(all_releases, key=attrgetter("pep404_version")) -@router.get("/{solver_key:path}/releases/{version}", response_model=Solver) +@router.get( + "/{solver_key:path}/releases/page", + response_model=Page[Solver], + description=create_route_description( + base="Lists all releases of a give solver (paginated)", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9-rc1"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_solver_releases_paginated( + solver_key: SolverKeyId, + page_params: Annotated[PaginationParams, Depends()], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + solver_service: Annotated[SolverService, Depends(get_solver_service)], +): + solvers, page_meta = await solver_service.solver_release_history( + solver_key=solver_key, + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + + for solver in solvers: + solver.url = url_for( + "get_solver_release", solver_key=solver.id, version=solver.version + ) + page_params.limit = page_meta.limit + page_params.offset = page_meta.offset + return create_page( + solvers, + total=page_meta.total, + params=page_params, + ) + + +@router.get( + "/{solver_key:path}/releases/{version}", + response_model=Solver, + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Gets a specific release of a solver", + changelog=[ + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.7.1", "`version_display` field in the response" + ), + ], + ), +) async def get_solver_release( solver_key: SolverKeyId, version: VersionStr, - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), -) -> Solver: - """Gets a specific release of a solver""" + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): try: - solver = await catalog_client.get_solver( - user_id=user_id, - name=solver_key, - version=version, - product_name=product_name, + solver: Solver = await solver_service.get_solver( + solver_key=solver_key, + solver_version=version, ) solver.url = url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) - return solver except ( @@ -156,7 +311,6 @@ async def get_solver_release( IndexError, ValidationError, HTTPStatusError, - PydanticValueError, ) as err: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, @@ -166,45 +320,58 @@ async def get_solver_release( @router.get( "/{solver_key:path}/releases/{version}/ports", - response_model=list[SolverPort], - include_in_schema=settings.API_SERVER_DEV_FEATURES_ENABLED, + response_model=OnePage[SolverPort], + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Lists inputs and outputs of a given solver", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.7.1", "`version_display` field in the response" + ), + ], + ), ) async def list_solver_ports( solver_key: SolverKeyId, version: VersionStr, - user_id: int = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - product_name: str = Depends(get_product_name), + catalog_service: Annotated[CatalogService, Depends(get_catalog_service)], ): - """Lists inputs and outputs of a given solver + ports = await catalog_service.get_service_ports( + name=solver_key, + version=version, + ) - New in *version 0.5.0* (only with API_SERVER_DEV_FEATURES_ENABLED=1) - """ - try: + solver_ports = [SolverPort.model_validate(port.model_dump()) for port in ports] + return OnePage[SolverPort].model_validate(dict(items=solver_ports)) - ports = await catalog_client.get_solver_ports( - user_id=user_id, - name=solver_key, - version=version, - product_name=product_name, - ) - return ports - - except ValidationError as err: - error_code = create_error_code(err) - logger.exception( - "Corrupted port data for service %s [%s]", - f"{solver_key}:{version}", - f"{error_code}", - extra={"error_code": error_code}, - ) - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=f"Port definition of {solver_key}:{version} seems corrupted [{error_code}]", - ) from err - except HTTPStatusError as err: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Ports for solver {solver_key}:{version} not found", - ) from err +@router.get( + "/{solver_key:path}/releases/{version}/pricing_plan", + response_model=ServicePricingPlanGetLegacy, + responses=_SOLVER_STATUS_CODES, + description=create_route_description( + base="Gets solver pricing plan", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.7.1", "`version_display` field in the response" + ), + ], + ), +) +async def get_solver_pricing_plan( + solver_key: SolverKeyId, + version: VersionStr, + user_id: Annotated[int, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + product_name: Annotated[str, Depends(get_product_name)], +): + assert user_id + assert product_name + pricing_plan_or_none = await webserver_api.get_service_pricing_plan( + solver_key=solver_key, version=version + ) + # NOTE: pricing_plan_or_none https://github.com/ITISFoundation/osparc-simcore/issues/6901 + assert pricing_plan_or_none # nosec + return pricing_plan_or_none diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py index 8930d3a6789..83b5d83f5f0 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py @@ -1,48 +1,54 @@ # pylint: disable=too-many-arguments -# TODO: user_id should be injected every request in api instances, i.e. a new api-instance per request import logging -from collections import deque -from typing import Callable, Optional, Union -from uuid import UUID +from collections.abc import Callable +from typing import Annotated, Any -from fastapi import APIRouter, Depends, status -from fastapi.exceptions import HTTPException -from fastapi.responses import RedirectResponse +from fastapi import APIRouter, Depends, Header, Query, Request, status +from fastapi.encoders import jsonable_encoder +from fastapi.responses import JSONResponse from models_library.clusters import ClusterID -from models_library.projects_nodes_io import BaseFileLink +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID from pydantic.types import PositiveInt -from ...models.domain.projects import NewProjectIn, Project -from ...models.schemas.files import File -from ...models.schemas.jobs import ArgumentType, Job, JobInputs, JobOutputs, JobStatus -from ...models.schemas.solvers import Solver, SolverKeyId, VersionStr -from ...modules.catalog import CatalogApi -from ...modules.director_v2 import ( - ComputationTaskGet, - DirectorV2Api, - DownloadLink, - NodeName, +from ..._service_jobs import JobService +from ..._service_solvers import SolverService +from ...exceptions.backend_errors import ProjectAlreadyStartedError +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.basic_types import VersionStr +from ...models.schemas.errors import ErrorGet +from ...models.schemas.jobs import ( + Job, + JobID, + JobInputs, + JobMetadata, + JobMetadataUpdate, + JobStatus, ) -from ...modules.storage import StorageApi, to_file_api_model -from ...utils.solver_job_models_converters import ( - create_job_from_project, +from ...models.schemas.solvers import Solver, SolverKeyId +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.jobs import replace_custom_metadata, start_project, stop_project +from ...services_http.solver_job_models_converters import ( create_jobstatus_from_task, - create_new_project_for_job, ) -from ...utils.solver_job_outputs import get_solver_output_results -from ..dependencies.application import get_product_name, get_reverse_url_mapper +from ..dependencies.application import get_reverse_url_mapper from ..dependencies.authentication import get_current_user_id -from ..dependencies.database import Engine, get_db_engine -from ..dependencies.services import get_api_client -from ..dependencies.webserver import AuthSession, get_webserver_session +from ..dependencies.services import get_api_client, get_job_service, get_solver_service +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ._constants import ( + FMSG_CHANGELOG_ADDED_IN_VERSION, + FMSG_CHANGELOG_CHANGED_IN_VERSION, + FMSG_CHANGELOG_NEW_IN_VERSION, + create_route_description, +) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) router = APIRouter() -def _compose_job_resource_name(solver_key, solver_version, job_id) -> str: +def compose_job_resource_name(solver_key, solver_version, job_id) -> str: """Creates a unique resource name for solver's jobs""" return Job.compose_resource_name( parent_name=Solver.compose_resource_name(solver_key, solver_version), @@ -50,61 +56,53 @@ def _compose_job_resource_name(solver_key, solver_version, job_id) -> str: ) -## JOBS --------------- +# JOBS --------------- # # - Similar to docker container's API design (container = job and image = solver) # +METADATA_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "Metadata not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} - -@router.get( - "/{solver_key:path}/releases/{version}/jobs", - response_model=list[Job], -) -async def list_jobs( - solver_key: SolverKeyId, - version: str, - user_id: PositiveInt = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - webserver_api: AuthSession = Depends(get_webserver_session), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), -): - """List of all jobs in a specific released solver""" - - solver = await catalog_client.get_solver( - user_id=user_id, - name=solver_key, - version=version, - product_name=product_name, - ) - logger.debug("Listing Jobs in Solver '%s'", solver.name) - - projects: list[Project] = await webserver_api.list_projects(solver.name) - jobs: deque[Job] = deque() - for prj in projects: - job = create_job_from_project(solver_key, version, prj, url_for) - assert job.id == prj.uuid # nosec - assert job.name == prj.name # nosec - - jobs.append(job) - - return list(jobs) +JOBS_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_402_PAYMENT_REQUIRED: { + "description": "Payment required", + "model": ErrorGet, + }, + status.HTTP_404_NOT_FOUND: { + "description": "Job/wallet/pricing details not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} @router.post( "/{solver_key:path}/releases/{version}/jobs", response_model=Job, + status_code=status.HTTP_201_CREATED, + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="Creates a job in a specific release with given inputs. This operation does not start the job.", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), ) -async def create_job( +async def create_solver_job( # noqa: PLR0913 solver_key: SolverKeyId, - version: str, + version: VersionStr, inputs: JobInputs, - user_id: PositiveInt = Depends(get_current_user_id), - catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), - webserver_api: AuthSession = Depends(get_webserver_session), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), - url_for: Callable = Depends(get_reverse_url_mapper), - product_name: str = Depends(get_product_name), + solver_service: Annotated[SolverService, Depends(get_solver_service)], + job_service: Annotated[JobService, Depends(get_job_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + hidden: Annotated[bool, Query()] = True, + x_simcore_parent_project_uuid: Annotated[ProjectID | None, Header()] = None, + x_simcore_parent_node_id: Annotated[NodeID | None, Header()] = None, ): """Creates a job in a specific release with given inputs. @@ -112,243 +110,206 @@ async def create_job( """ # ensures user has access to solver - solver = await catalog_client.get_solver( - user_id=user_id, - name=solver_key, - version=version, - product_name=product_name, + solver = await solver_service.get_solver( + solver_key=solver_key, + solver_version=version, ) - - # creates NEW job as prototype - pre_job = Job.create_solver_job(solver=solver, inputs=inputs) - logger.debug("Creating Job '%s'", pre_job.name) - - # -> catalog - # TODO: validate inputs against solver input schema - - # -> webserver: NewProjectIn = Job - project_in: NewProjectIn = create_new_project_for_job(solver, pre_job, inputs) - new_project: Project = await webserver_api.create_project(project_in) - assert new_project # nosec - assert new_project.uuid == pre_job.id # nosec - - # for consistency, it rebuild job - job = create_job_from_project( - solver_key=solver.id, - solver_version=solver.version, - project=new_project, + job, _ = await job_service.create_job( + project_name=None, + description=None, + solver_or_program=solver, + inputs=inputs, url_for=url_for, + hidden=hidden, + parent_project_uuid=x_simcore_parent_project_uuid, + parent_node_id=x_simcore_parent_node_id, ) - assert job.id == pre_job.id # nosec - assert job.name == pre_job.name # nosec - assert job.name == _compose_job_resource_name(solver_key, version, job.id) # nosec - - # -> director2: ComputationTaskOut = JobStatus - # consistency check - task: ComputationTaskGet = await director2_api.create_computation( - job.id, user_id, product_name - ) - assert task.id == job.id # nosec - - job_status: JobStatus = create_jobstatus_from_task(task) - assert job.id == job_status.job_id # nosec return job -@router.get( - "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}", response_model=Job +@router.delete( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}", + status_code=status.HTTP_204_NO_CONTENT, + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="Deletes an existing solver job", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + ], + ), ) -async def get_job( +async def delete_job( solver_key: SolverKeyId, version: VersionStr, - job_id: UUID, - webserver_api: AuthSession = Depends(get_webserver_session), - url_for: Callable = Depends(get_reverse_url_mapper), + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], ): - """Gets job of a given solver""" - - job_name = _compose_job_resource_name(solver_key, version, job_id) - logger.debug("Getting Job '%s'", job_name) - - project: Project = await webserver_api.get_project(project_id=job_id) + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Deleting Job '%s'", job_name) - job = create_job_from_project(solver_key, version, project, url_for) - assert job.id == job_id # nosec - return job # nosec + await webserver_api.delete_project(project_id=job_id) @router.post( "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}:start", + status_code=status.HTTP_202_ACCEPTED, response_model=JobStatus, + responses=JOBS_STATUS_CODES + | { + status.HTTP_200_OK: { + "description": "Job already started", + "model": JobStatus, + }, + status.HTTP_406_NOT_ACCEPTABLE: { + "description": "Cluster not found", + "model": ErrorGet, + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "description": "Configuration error", + "model": ErrorGet, + }, + }, + description=create_route_description( + base="Starts job job_id created with the solver solver_key:version", + changelog=[ + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.4.3", "query parameter `cluster_id`" + ), + FMSG_CHANGELOG_ADDED_IN_VERSION.format( + "0.6", "responds with a 202 when successfully starting a computation" + ), + FMSG_CHANGELOG_CHANGED_IN_VERSION.format( + "0.7", "query parameter `cluster_id` deprecated" + ), + ], + ), ) async def start_job( + request: Request, solver_key: SolverKeyId, version: VersionStr, - job_id: UUID, - cluster_id: Optional[ClusterID] = None, - user_id: PositiveInt = Depends(get_current_user_id), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), - product_name: str = Depends(get_product_name), + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001 + ClusterID | None, Query(deprecated=True) + ] = None, ): - """Starts job job_id created with the solver solver_key:version - - New in *version 0.4.3*: cluster_id - """ - - job_name = _compose_job_resource_name(solver_key, version, job_id) - logger.debug("Start Job '%s'", job_name) - - task = await director2_api.start_computation( - project_id=job_id, + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Start Job '%s'", job_name) + + try: + await start_project( + request=request, + job_id=job_id, + expected_job_name=job_name, + webserver_api=webserver_api, + ) + except ProjectAlreadyStartedError: + job_status = await inspect_job( + solver_key=solver_key, + version=version, + job_id=job_id, + user_id=user_id, + director2_api=director2_api, + ) + return JSONResponse( + status_code=status.HTTP_200_OK, content=jsonable_encoder(job_status) + ) + return await inspect_job( + solver_key=solver_key, + version=version, + job_id=job_id, user_id=user_id, - product_name=product_name, - cluster_id=cluster_id, + director2_api=director2_api, ) - job_status: JobStatus = create_jobstatus_from_task(task) - return job_status @router.post( - "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}:stop", response_model=Job + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}:stop", + response_model=JobStatus, + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="Stops a running job", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), ) async def stop_job( solver_key: SolverKeyId, version: VersionStr, - job_id: UUID, - user_id: PositiveInt = Depends(get_current_user_id), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], ): - job_name = _compose_job_resource_name(solver_key, version, job_id) - logger.debug("Stopping Job '%s'", job_name) + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Stopping Job '%s'", job_name) - await director2_api.stop_computation(job_id, user_id) - - task = await director2_api.get_computation(job_id, user_id) - job_status: JobStatus = create_jobstatus_from_task(task) - return job_status + return await stop_project( + job_id=job_id, user_id=user_id, director2_api=director2_api + ) @router.post( "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}:inspect", response_model=JobStatus, + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="Inspects the current status of a job", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), ) async def inspect_job( solver_key: SolverKeyId, version: VersionStr, - job_id: UUID, - user_id: PositiveInt = Depends(get_current_user_id), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), -): - job_name = _compose_job_resource_name(solver_key, version, job_id) - logger.debug("Inspecting Job '%s'", job_name) - - task = await director2_api.get_computation(job_id, user_id) + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +) -> JobStatus: + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Inspecting Job '%s'", job_name) + + task = await director2_api.get_computation(project_id=job_id, user_id=user_id) job_status: JobStatus = create_jobstatus_from_task(task) return job_status -@router.get( - "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/outputs", - response_model=JobOutputs, -) -async def get_job_outputs( - solver_key: SolverKeyId, - version: VersionStr, - job_id: UUID, - user_id: PositiveInt = Depends(get_current_user_id), - db_engine: Engine = Depends(get_db_engine), - webserver_api: AuthSession = Depends(get_webserver_session), - storage_client: StorageApi = Depends(get_api_client(StorageApi)), -): - job_name = _compose_job_resource_name(solver_key, version, job_id) - logger.debug("Get Job '%s' outputs", job_name) - - project: Project = await webserver_api.get_project(project_id=job_id) - node_ids = list(project.workbench.keys()) - assert len(node_ids) == 1 # nosec - - outputs: dict[ - str, Union[float, int, bool, BaseFileLink, str, None] - ] = await get_solver_output_results( - user_id=user_id, - project_uuid=job_id, - node_uuid=UUID(node_ids[0]), - db_engine=db_engine, - ) - - results: dict[str, ArgumentType] = {} - for name, value in outputs.items(): - if isinstance(value, BaseFileLink): - # TODO: value.path exists?? - file_id: UUID = File.create_id(*value.path.split("/")) - - # TODO: acquire_soft_link will halve calls - found = await storage_client.search_files(user_id, file_id) - if found: - assert len(found) == 1 # nosec - results[name] = to_file_api_model(found[0]) - else: - api_file: File = await storage_client.create_soft_link( - user_id, value.path, file_id - ) - results[name] = api_file - else: - # TODO: cast against catalog's output port specs - results[name] = value - - job_outputs = JobOutputs(job_id=job_id, results=results) - return job_outputs - - -@router.get( - "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/outputs/logfile", - response_class=RedirectResponse, - responses={ - status.HTTP_200_OK: { - "content": { - "application/octet-stream": { - "schema": {"type": "string", "format": "binary"} - }, - "application/zip": {"schema": {"type": "string", "format": "binary"}}, - "text/plain": {"schema": {"type": "string"}}, - }, - "description": "Returns a log file", - }, - status.HTTP_404_NOT_FOUND: {"description": "Log not found"}, - }, +@router.patch( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/metadata", + response_model=JobMetadata, + responses=METADATA_STATUS_CODES, + description=create_route_description( + base="Updates custom metadata from a job", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + ], + ), ) -async def get_job_output_logfile( +async def replace_job_custom_metadata( solver_key: SolverKeyId, version: VersionStr, - job_id: UUID, - user_id: PositiveInt = Depends(get_current_user_id), - director2_api: DirectorV2Api = Depends(get_api_client(DirectorV2Api)), + job_id: JobID, + update: JobMetadataUpdate, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], ): - """Special extra output with persistent logs file for the solver run. - - NOTE: this is not a log stream but a predefined output that is only - available after the job is done. + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Custom metadata for '%s'", job_name) - New in *version 0.4.0* - """ - - logs_urls: dict[NodeName, DownloadLink] = await director2_api.get_computation_logs( - user_id=user_id, project_id=job_id - ) - - # if more than one node? should rezip all of them?? - assert len(logs_urls) <= 1, "Current version only supports one node per solver" - for presigned_download_link in logs_urls.values(): - logger.info( - "Redirecting '%s' to %s ...", - f"{solver_key}/releases/{version}/jobs/{job_id}/outputs/logfile", - presigned_download_link, - ) - return RedirectResponse(presigned_download_link) - - raise HTTPException( - status.HTTP_404_NOT_FOUND, - detail=f"Log for {solver_key}/releases/{version}/jobs/{job_id} not found." - "Note that these logs are only available after the job is completed.", + return await replace_custom_metadata( + job_name=job_name, + job_id=job_id, + update=update, + webserver_api=webserver_api, + self_url=url_for( + "replace_job_custom_metadata", + solver_key=solver_key, + version=version, + job_id=job_id, + ), ) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs_read.py b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs_read.py new file mode 100644 index 00000000000..7210b9b2c97 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs_read.py @@ -0,0 +1,565 @@ +# pylint: disable=too-many-arguments + +import logging +from collections import deque +from collections.abc import Callable +from functools import partial +from typing import Annotated, Any, Union +from uuid import UUID + +from fastapi import APIRouter, Depends, Request, status +from fastapi.exceptions import HTTPException +from fastapi.responses import RedirectResponse +from fastapi_pagination.api import create_page +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.projects_nodes_io import BaseFileLink +from models_library.users import UserID +from models_library.wallets import ZERO_CREDITS +from pydantic import HttpUrl, NonNegativeInt +from pydantic.types import PositiveInt +from servicelib.logging_utils import log_context +from sqlalchemy.ext.asyncio import AsyncEngine +from starlette.background import BackgroundTask + +from ..._service_solvers import SolverService +from ...exceptions.custom_errors import InsufficientCreditsError, MissingWalletError +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.api_resources import parse_resources_ids +from ...models.basic_types import LogStreamingResponse, NameValueTuple, VersionStr +from ...models.domain.files import File as DomainFile +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.files import File as SchemaFile +from ...models.schemas.jobs import ( + ArgumentTypes, + Job, + JobID, + JobLog, + JobMetadata, + JobOutputs, +) +from ...models.schemas.jobs_filters import JobMetadataFilter +from ...models.schemas.model_adapter import ( + PricingUnitGetLegacy, + WalletGetWithAvailableCreditsLegacy, +) +from ...models.schemas.solvers import SolverKeyId +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.jobs import ( + get_custom_metadata, + raise_if_job_not_associated_with_solver, +) +from ...services_http.log_streaming import LogDistributor, LogStreamer +from ...services_http.solver_job_models_converters import create_job_from_project +from ...services_http.solver_job_outputs import ResultsTypes, get_solver_output_results +from ...services_http.storage import StorageApi, to_file_api_model +from ..dependencies.application import get_reverse_url_mapper +from ..dependencies.authentication import get_current_user_id +from ..dependencies.database import get_db_asyncpg_engine +from ..dependencies.models_schemas_jobs_filters import get_job_metadata_filter +from ..dependencies.rabbitmq import get_log_check_timeout, get_log_distributor +from ..dependencies.services import get_api_client, get_solver_service +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ._constants import ( + FMSG_CHANGELOG_NEW_IN_VERSION, + FMSG_CHANGELOG_REMOVED_IN_VERSION_FORMAT, + create_route_description, +) +from .solvers_jobs import ( + JOBS_STATUS_CODES, + METADATA_STATUS_CODES, + compose_job_resource_name, +) +from .wallets import WALLET_STATUS_CODES + +_logger = logging.getLogger(__name__) + +_OUTPUTS_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_402_PAYMENT_REQUIRED: { + "description": "Payment required", + "model": ErrorGet, + }, + status.HTTP_404_NOT_FOUND: { + "description": "Job not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} + +_LOGFILE_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_200_OK: { + "content": { + "application/octet-stream": { + "schema": {"type": "string", "format": "binary"} + }, + "application/zip": {"schema": {"type": "string", "format": "binary"}}, + "text/plain": {"schema": {"type": "string"}}, + }, + "description": "Returns a log file", + }, + status.HTTP_404_NOT_FOUND: {"description": "Log not found"}, +} | DEFAULT_BACKEND_SERVICE_STATUS_CODES # type: ignore + +_PRICING_UNITS_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "Pricing unit not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} + +_LOGSTREAM_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_200_OK: { + "description": "Returns a JobLog or an ErrorGet", + "model": Union[JobLog, ErrorGet], + }, + status.HTTP_409_CONFLICT: { + "description": "Conflict: Logs are already being streamed", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} + + +router = APIRouter() + + +@router.get( + "/-/releases/-/jobs", + response_model=Page[Job], + description=create_route_description( + base="List of all jobs created for any released solver (paginated)", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9-rc1"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_all_solvers_jobs( + page_params: Annotated[PaginationParams, Depends()], + filter_job_metadata_params: Annotated[ + JobMetadataFilter | None, Depends(get_job_metadata_filter) + ], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + + jobs, meta = await solver_service.list_jobs( + filter_any_custom_metadata=( + [ + NameValueTuple(filter_metadata.name, filter_metadata.pattern) + for filter_metadata in filter_job_metadata_params.any + ] + if filter_job_metadata_params + else None + ), + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + + for job in jobs: + solver_key, version, job_id = parse_resources_ids(job.resource_name) + _update_solver_job_urls(job, solver_key, version, job_id, url_for) + + return create_page( + jobs, + total=meta.total, + params=page_params, + ) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs", + response_model=list[Job], + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="List of jobs in a specific released solver (limited to 20 jobs)", + deprecated=True, + alternative="GET /{solver_key}/releases/{version}/jobs/page", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + FMSG_CHANGELOG_REMOVED_IN_VERSION_FORMAT.format( + "0.7", + "This endpoint is deprecated and will be removed in a future version", + ), + ], + ), +) +async def list_jobs( + solver_key: SolverKeyId, + version: VersionStr, + solver_service: Annotated[SolverService, Depends(get_solver_service)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + solver = await solver_service.get_solver( + solver_key=solver_key, + solver_version=version, + ) + _logger.debug("Listing Jobs in Solver '%s'", solver.name) + + projects_page = await webserver_api.get_projects_w_solver_page( + solver_name=solver.name, limit=20, offset=0 + ) + + jobs: deque[Job] = deque() + for prj in projects_page.data: + job = create_job_from_project( + solver_or_program=solver, project=prj, url_for=url_for + ) + assert job.id == prj.uuid # nosec + assert job.name == prj.name # nosec + + jobs.append(job) + + return list(jobs) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/page", + response_model=Page[Job], + responses=JOBS_STATUS_CODES, + description=create_route_description( + base="List of jobs on a specific released solver (includes pagination)", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + ], + ), + operation_id="get_jobs_page", +) +async def list_jobs_paginated( + solver_key: SolverKeyId, + version: VersionStr, + page_params: Annotated[PaginationParams, Depends()], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + # NOTE: Different entry to keep backwards compatibility with list_jobs. + # Eventually use a header with agent version to switch to new interface + + solver = await solver_service.get_solver( + solver_key=solver_key, + solver_version=version, + ) + _logger.debug("Listing Jobs in Solver '%s'", solver.name) + + projects_page = await webserver_api.get_projects_w_solver_page( + solver_name=solver.name, limit=page_params.limit, offset=page_params.offset + ) + + jobs: list[Job] = [ + create_job_from_project(solver_or_program=solver, project=prj, url_for=url_for) + for prj in projects_page.data + ] + + return create_page( + jobs, + total=projects_page.meta.total, + params=page_params, + ) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}", + response_model=Job, + responses=JOBS_STATUS_CODES, +) +async def get_job( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + solver_service: Annotated[SolverService, Depends(get_solver_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + """Gets job of a given solver""" + _logger.debug( + "Getting Job '%s'", compose_job_resource_name(solver_key, version, job_id) + ) + + solver = await solver_service.get_solver( + solver_key=solver_key, + solver_version=version, + ) + project: ProjectGet = await webserver_api.get_project(project_id=job_id) + + job = create_job_from_project( + solver_or_program=solver, project=project, url_for=url_for + ) + assert job.id == job_id # nosec + return job # nosec + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/outputs", + response_model=JobOutputs, + responses=_OUTPUTS_STATUS_CODES, +) +async def get_job_outputs( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + async_pg_engine: Annotated[AsyncEngine, Depends(get_db_asyncpg_engine)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], +): + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Get Job '%s' outputs", job_name) + + project: ProjectGet = await webserver_api.get_project(project_id=job_id) + node_ids = list(project.workbench.keys()) + assert len(node_ids) == 1 # nosec + + product_price = await webserver_api.get_product_price() + if product_price.usd_per_credit is not None: + wallet = await webserver_api.get_project_wallet(project_id=project.uuid) + if wallet is None: + raise MissingWalletError(job_id=project.uuid) + wallet_with_credits = await webserver_api.get_wallet(wallet_id=wallet.wallet_id) + if wallet_with_credits.available_credits <= ZERO_CREDITS: + raise InsufficientCreditsError( + wallet_name=wallet_with_credits.name, + wallet_credit_amount=wallet_with_credits.available_credits, + ) + + outputs: dict[str, ResultsTypes] = await get_solver_output_results( + user_id=user_id, + project_uuid=job_id, + node_uuid=UUID(node_ids[0]), + db_engine=async_pg_engine, + ) + + results: dict[str, ArgumentTypes] = {} + for name, value in outputs.items(): + if isinstance(value, BaseFileLink): + file_id: UUID = DomainFile.create_id(*value.path.split("/")) + + found = await storage_client.search_owned_files( + user_id=user_id, file_id=file_id, limit=1 + ) + if found: + assert len(found) == 1 # nosec + results[name] = SchemaFile.from_domain_model( + to_file_api_model(found[0]) + ) + else: + api_file = await storage_client.create_soft_link( + user_id=user_id, target_s3_path=value.path, as_file_id=file_id + ) + results[name] = SchemaFile.from_domain_model(api_file) + else: + results[name] = value + + return JobOutputs(job_id=job_id, results=results) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/outputs/logfile", + response_class=RedirectResponse, + responses=_LOGFILE_STATUS_CODES, + description=create_route_description( + base="Special extra output with persistent logs file for the solver run.\n\n" + "**NOTE**: this is not a log stream but a predefined output that is only\n" + "available after the job is done", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.4"), + ], + ), +) +async def get_job_output_logfile( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +): + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Get Job '%s' outputs logfile", job_name) + + project_id = job_id + + log_link_map = await director2_api.get_computation_logs( + user_id=user_id, project_id=project_id + ) + logs_urls = log_link_map.log_links + + _logger.debug( + "Found %d logfiles for %s %s: %s", + len(logs_urls), + f"{project_id=}", + f"{user_id=}", + [e.download_link for e in logs_urls], + ) + + # if more than one node? should rezip all of them?? + assert ( # nosec + len(logs_urls) <= 1 + ), "Current version only supports one node per solver" + + for log_link in logs_urls: + presigned_download_link = log_link.download_link + _logger.info( + "Redirecting '%s' to %s ...", + f"{solver_key}/releases/{version}/jobs/{job_id}/outputs/logfile", + presigned_download_link, + ) + return RedirectResponse(f"{presigned_download_link}") + + # No log found ! + raise HTTPException( + status.HTTP_404_NOT_FOUND, + detail=f"Log for {solver_key}/releases/{version}/jobs/{job_id} not found." + "Note that these logs are only available after the job is completed.", + ) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/metadata", + response_model=JobMetadata, + responses=METADATA_STATUS_CODES, + description=create_route_description( + base="Gets custom metadata from a job", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7")], + ), +) +async def get_job_custom_metadata( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Custom metadata for '%s'", job_name) + + return await get_custom_metadata( + job_name=job_name, + job_id=job_id, + webserver_api=webserver_api, + self_url=url_for( + "get_job_custom_metadata", + solver_key=solver_key, + version=version, + job_id=job_id, + ), + ) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/wallet", + response_model=WalletGetWithAvailableCreditsLegacy, + responses=WALLET_STATUS_CODES, + description=create_route_description( + base="Get job wallet", changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7")] + ), +) +async def get_job_wallet( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +) -> WalletGetWithAvailableCreditsLegacy: + job_name = compose_job_resource_name(solver_key, version, job_id) + _logger.debug("Getting wallet for job '%s'", job_name) + + if project_wallet := await webserver_api.get_project_wallet(project_id=job_id): + return await webserver_api.get_wallet(wallet_id=project_wallet.wallet_id) + raise MissingWalletError(job_id=job_id) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/pricing_unit", + response_model=PricingUnitGetLegacy, + responses=_PRICING_UNITS_STATUS_CODES, + description=create_route_description( + base="Get job pricing unit", + changelog=[FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7")], + ), +) +async def get_job_pricing_unit( + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + job_name = compose_job_resource_name(solver_key, version, job_id) + with log_context(_logger, logging.DEBUG, "Get pricing unit"): + _logger.debug("job: %s", job_name) + project: ProjectGet = await webserver_api.get_project(project_id=job_id) + raise_if_job_not_associated_with_solver(job_name, project) + node_ids = list(project.workbench.keys()) + assert len(node_ids) == 1 # nosec + node_id: UUID = UUID(node_ids[0]) + return await webserver_api.get_project_node_pricing_unit( + project_id=job_id, node_id=node_id + ) + + +@router.get( + "/{solver_key:path}/releases/{version}/jobs/{job_id:uuid}/logstream", + response_class=LogStreamingResponse, + responses=_LOGSTREAM_STATUS_CODES, +) +async def get_log_stream( + request: Request, + solver_key: SolverKeyId, + version: VersionStr, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + log_distributor: Annotated[LogDistributor, Depends(get_log_distributor)], + user_id: Annotated[UserID, Depends(get_current_user_id)], + log_check_timeout: Annotated[NonNegativeInt, Depends(get_log_check_timeout)], +): + assert request # nosec + + job_name = compose_job_resource_name(solver_key, version, job_id) + with log_context( + _logger, logging.DEBUG, f"Streaming logs for {job_name=} and {user_id=}" + ): + project: ProjectGet = await webserver_api.get_project(project_id=job_id) + raise_if_job_not_associated_with_solver(job_name, project) + log_streamer = LogStreamer( + user_id=user_id, + director2_api=director2_api, + job_id=job_id, + log_distributor=log_distributor, + log_check_timeout=log_check_timeout, + ) + await log_distributor.register(job_id, log_streamer.queue) + return LogStreamingResponse( + log_streamer.log_generator(), + background=BackgroundTask(partial(log_distributor.deregister, job_id)), + ) + + +def _update_solver_job_urls( + job: Job, + solver_key: SolverKeyId, + solver_version: VersionStr, + job_id: JobID | str, + url_for: Callable[..., HttpUrl], +) -> Job: + job.url = url_for( + get_job.__name__, + solver_key=solver_key, + version=solver_version, + job_id=job_id, + ) + + job.runner_url = url_for( + "get_solver_release", + solver_key=solver_key, + version=solver_version, + ) + + job.outputs_url = url_for( + "get_job_outputs", + solver_key=solver_key, + version=solver_version, + job_id=job_id, + ) + + return job diff --git a/services/api-server/src/simcore_service_api_server/api/routes/studies.py b/services/api-server/src/simcore_service_api_server/api/routes/studies.py index 92450321ccb..11a17b295c7 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/studies.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/studies.py @@ -1,31 +1,125 @@ import logging -from typing import Any +from typing import Annotated, Final -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, Header, status +from fastapi_pagination.api import create_page +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID -from ...core.settings import BasicSettings -from ...models.schemas.studies import StudyID, StudyPort -from ..dependencies.webserver import AuthSession, get_webserver_session +from ...models.pagination import OnePage, Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.studies import Study, StudyID, StudyPort +from ...services_http.webserver import AuthSession +from ..dependencies.webserver_http import get_webserver_session +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) router = APIRouter() -settings = BasicSettings.create_from_envs() + +_COMMON_ERROR_RESPONSES: Final[dict] = { + status.HTTP_404_NOT_FOUND: { + "description": "Study not found", + "model": ErrorGet, + }, +} + + +def _create_study_from_project(project: ProjectGet) -> Study: + assert isinstance(project, ProjectGet) # nosec + return Study.model_construct( + uid=project.uuid, + title=project.name, + description=project.description, + _fields_set={"uid", "title", "description"}, + ) + + +@router.get( + "", + response_model=Page[Study], + description=create_route_description( + base="List all studies", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), +) +async def list_studies( + page_params: Annotated[PaginationParams, Depends()], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + projects_page = await webserver_api.get_projects_page( + limit=page_params.limit, offset=page_params.offset + ) + + studies: list[Study] = [ + _create_study_from_project(prj) for prj in projects_page.data + ] + + return create_page( + studies, + total=projects_page.meta.total, + params=page_params, + ) + + +@router.get( + "/{study_id:uuid}", + response_model=Study, + responses={**_COMMON_ERROR_RESPONSES}, + description=create_route_description( + base="Get study by ID", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), +) +async def get_study( + study_id: StudyID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + project: ProjectGet = await webserver_api.get_project(project_id=study_id) + return _create_study_from_project(project) + + +@router.post( + "/{study_id:uuid}:clone", + response_model=Study, + status_code=status.HTTP_201_CREATED, + responses={**_COMMON_ERROR_RESPONSES}, +) +async def clone_study( + study_id: StudyID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + x_simcore_parent_project_uuid: Annotated[ProjectID | None, Header()] = None, + x_simcore_parent_node_id: Annotated[NodeID | None, Header()] = None, +): + project: ProjectGet = await webserver_api.clone_project( + project_id=study_id, + hidden=False, + parent_project_uuid=x_simcore_parent_project_uuid, + parent_node_id=x_simcore_parent_node_id, + ) + return _create_study_from_project(project) @router.get( - "/{study_id}/ports", - response_model=list[StudyPort], - include_in_schema=settings.API_SERVER_DEV_FEATURES_ENABLED, + "/{study_id:uuid}/ports", + response_model=OnePage[StudyPort], + responses={**_COMMON_ERROR_RESPONSES}, + description=create_route_description( + base="Lists metadata on ports of a given study", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.5"), + ], + ), ) async def list_study_ports( study_id: StudyID, - webserver_api: AuthSession = Depends(get_webserver_session), + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], ): - """Lists metadata on ports of a given study - - New in *version 0.5.0* (only with API_SERVER_DEV_FEATURES_ENABLED=1) - """ - project_ports: list[ - dict[str, Any] - ] = await webserver_api.get_project_metadata_ports(project_id=study_id) - return project_ports + project_ports: list[StudyPort] = await webserver_api.get_project_metadata_ports( + project_id=study_id + ) + return OnePage[StudyPort](items=project_ports) diff --git a/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py new file mode 100644 index 00000000000..c19845df2aa --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py @@ -0,0 +1,493 @@ +import logging +from collections.abc import Callable +from typing import Annotated +from uuid import UUID + +from fastapi import APIRouter, Depends, Header, Query, Request, status +from fastapi.encoders import jsonable_encoder +from fastapi.responses import JSONResponse +from fastapi_pagination.api import create_page +from models_library.api_schemas_webserver.projects import ProjectPatch +from models_library.api_schemas_webserver.projects_nodes import NodeOutputs +from models_library.clusters import ClusterID +from models_library.function_services_catalog.services import file_picker +from models_library.projects import ProjectID +from models_library.projects_nodes import InputID, InputTypes +from models_library.projects_nodes_io import NodeID +from pydantic import HttpUrl, PositiveInt +from servicelib.logging_utils import log_context + +from ..._service_studies import StudyService +from ...exceptions.backend_errors import ProjectAlreadyStartedError +from ...models.api_resources import parse_resources_ids +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.jobs import ( + Job, + JobID, + JobInputs, + JobMetadata, + JobMetadataUpdate, + JobOutputs, + JobStatus, +) +from ...models.schemas.studies import JobLogsMap, Study, StudyID +from ...services_http.director_v2 import DirectorV2Api +from ...services_http.jobs import ( + get_custom_metadata, + replace_custom_metadata, + start_project, + stop_project, +) +from ...services_http.solver_job_models_converters import create_jobstatus_from_task +from ...services_http.storage import StorageApi +from ...services_http.study_job_models_converters import ( + create_job_from_study, + create_job_outputs_from_project_outputs, + get_project_and_file_inputs_from_job_inputs, +) +from ...services_http.webserver import AuthSession +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.application import get_reverse_url_mapper +from ..dependencies.authentication import get_current_user_id, get_product_name +from ..dependencies.services import get_api_client, get_study_service +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ..dependencies.webserver_rpc import ( + get_wb_api_rpc_client, +) +from ._constants import ( + FMSG_CHANGELOG_CHANGED_IN_VERSION, + FMSG_CHANGELOG_NEW_IN_VERSION, + create_route_description, +) +from .solvers_jobs import ( + JOBS_STATUS_CODES, +) + +# pylint: disable=too-many-arguments + + +_logger = logging.getLogger(__name__) + + +router = APIRouter() + + +def _compose_job_resource_name(study_key, job_id) -> str: + """Creates a unique resource name for solver's jobs""" + return Job.compose_resource_name( + parent_name=Study.compose_resource_name(study_key), + job_id=job_id, + ) + + +@router.get( + "/{study_id:uuid}/jobs", + response_model=Page[Job], + description=create_route_description( + base="List of all jobs created for a given study (paginated)", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9-rc1"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def list_study_jobs( + study_id: StudyID, + page_params: Annotated[PaginationParams, Depends()], + study_service: Annotated[StudyService, Depends(get_study_service)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + msg = f"list study jobs study_id={study_id!r} with pagination={page_params!r}. SEE https://github.com/ITISFoundation/osparc-simcore/issues/4177" + _logger.debug(msg) + + jobs, meta = await study_service.list_jobs( + filter_by_study_id=study_id, + pagination_offset=page_params.offset, + pagination_limit=page_params.limit, + ) + + for job in jobs: + study_id_str, job_id = parse_resources_ids(job.resource_name) + assert study_id_str == f"{study_id}" + _update_study_job_urls( + job=job, study_id=study_id, job_id=job_id, url_for=url_for + ) + + return create_page( + jobs, + total=meta.total, + params=page_params, + ) + + +@router.post( + "/{study_id:uuid}/jobs", + response_model=Job, +) +async def create_study_job( + study_id: StudyID, + job_inputs: JobInputs, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + wb_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + product_name: Annotated[str, Depends(get_product_name)], + hidden: Annotated[bool, Query()] = True, # noqa: FBT002 + x_simcore_parent_project_uuid: ProjectID | None = Header(default=None), + x_simcore_parent_node_id: NodeID | None = Header(default=None), +) -> Job: + """ + hidden -- if True (default) hides project from UI + """ + project = await webserver_api.clone_project( + project_id=study_id, + hidden=hidden, + parent_project_uuid=x_simcore_parent_project_uuid, + parent_node_id=x_simcore_parent_node_id, + ) + job = create_job_from_study( + study_key=study_id, project=project, job_inputs=job_inputs + ) + job.url = url_for( + "get_study_job", + study_id=study_id, + job_id=job.id, + ) + job.runner_url = url_for("get_study", study_id=study_id) + job.outputs_url = url_for( + "get_study_job_outputs", + study_id=study_id, + job_id=job.id, + ) + + await webserver_api.patch_project( + project_id=job.id, + patch_params=ProjectPatch(name=job.name), + ) + + await wb_api_rpc.mark_project_as_job( + product_name=product_name, + user_id=user_id, + project_uuid=job.id, + job_parent_resource_name=job.runner_name, + ) + + project_inputs = await webserver_api.get_project_inputs(project_id=project.uuid) + + file_param_nodes = {} + for node_id, node in project.workbench.items(): + if ( + node.key == file_picker.META.key + and node.outputs is not None + and len(node.outputs) == 0 + ): + file_param_nodes[node.label] = node_id + + file_inputs: dict[InputID, InputTypes] = {} + + ( + new_project_inputs, + new_project_file_inputs, + ) = get_project_and_file_inputs_from_job_inputs( + project_inputs, file_inputs, job_inputs + ) + + for node_label, file_link in new_project_file_inputs.items(): + await webserver_api.update_node_outputs( + project_id=project.uuid, + node_id=UUID(file_param_nodes[node_label]), + new_node_outputs=NodeOutputs(outputs={"outFile": file_link}), + ) + + if len(new_project_inputs) > 0: + await webserver_api.update_project_inputs( + project_id=project.uuid, new_inputs=new_project_inputs + ) + + assert job.name == _compose_job_resource_name(study_id, job.id) + + return job + + +@router.get( + "/{study_id:uuid}/jobs/{job_id:uuid}", + response_model=Job, + status_code=status.HTTP_501_NOT_IMPLEMENTED, + description=create_route_description( + base="Gets a jobs for a given study", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.9-rc1"), + ], + ), + include_in_schema=False, # TO BE RELEASED in 0.9 +) +async def get_study_job( + study_id: StudyID, + job_id: JobID, + study_service: Annotated[StudyService, Depends(get_study_service)], +): + assert study_service # nosec + msg = f"get study job study_id={study_id!r} job_id={job_id!r}. SEE https://github.com/ITISFoundation/osparc-simcore/issues/4177" + raise NotImplementedError(msg) + + +@router.delete( + "/{study_id:uuid}/jobs/{job_id:uuid}", + status_code=status.HTTP_204_NO_CONTENT, + responses={status.HTTP_404_NOT_FOUND: {"model": ErrorGet}}, +) +async def delete_study_job( + study_id: StudyID, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + """Deletes an existing study job""" + job_name = _compose_job_resource_name(study_id, job_id) + with log_context(_logger, logging.DEBUG, f"Deleting Job '{job_name}'"): + await webserver_api.delete_project(project_id=job_id) + + +@router.post( + "/{study_id:uuid}/jobs/{job_id:uuid}:start", + status_code=status.HTTP_202_ACCEPTED, + response_model=JobStatus, + responses=JOBS_STATUS_CODES + | { + status.HTTP_200_OK: { + "description": "Job already started", + "model": JobStatus, + }, + status.HTTP_406_NOT_ACCEPTABLE: { + "description": "Cluster not found", + "model": ErrorGet, + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "description": "Configuration error", + "model": ErrorGet, + }, + }, + description=create_route_description( + changelog=[ + FMSG_CHANGELOG_CHANGED_IN_VERSION.format( + "0.6", + "Now responds with a 202 when successfully starting a computation", + ), + ] + ), +) +async def start_study_job( + request: Request, + study_id: StudyID, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], + cluster_id: Annotated[ # pylint: disable=unused-argument # noqa: ARG001 + ClusterID | None, + Query( + description=create_route_description( + changelog=[ + FMSG_CHANGELOG_CHANGED_IN_VERSION.format( + "0.7", "query parameter `cluster_id` deprecated" + ), + ] + ), + deprecated=True, + ), + ] = None, +): + job_name = _compose_job_resource_name(study_id, job_id) + with log_context(_logger, logging.DEBUG, f"Starting Job '{job_name}'"): + try: + await start_project( + request=request, + job_id=job_id, + expected_job_name=job_name, + webserver_api=webserver_api, + ) + except ProjectAlreadyStartedError: + job_status: JobStatus = await inspect_study_job( + study_id=study_id, + job_id=job_id, + user_id=user_id, + director2_api=director2_api, + ) + return JSONResponse( + content=jsonable_encoder(job_status), status_code=status.HTTP_200_OK + ) + return await inspect_study_job( + study_id=study_id, + job_id=job_id, + user_id=user_id, + director2_api=director2_api, + ) + + +@router.post( + "/{study_id:uuid}/jobs/{job_id:uuid}:stop", + response_model=JobStatus, +) +async def stop_study_job( + study_id: StudyID, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +): + job_name = _compose_job_resource_name(study_id, job_id) + with log_context(_logger, logging.DEBUG, f"Stopping Job '{job_name}'"): + return await stop_project( + job_id=job_id, user_id=user_id, director2_api=director2_api + ) + + +@router.post( + "/{study_id}/jobs/{job_id}:inspect", + response_model=JobStatus, +) +async def inspect_study_job( + study_id: StudyID, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +) -> JobStatus: + job_name = _compose_job_resource_name(study_id, job_id) + _logger.debug("Inspecting Job '%s'", job_name) + + task = await director2_api.get_computation(project_id=job_id, user_id=user_id) + job_status: JobStatus = create_jobstatus_from_task(task) + return job_status + + +@router.post( + "/{study_id}/jobs/{job_id}/outputs", + response_model=JobOutputs, +) +async def get_study_job_outputs( + study_id: StudyID, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + storage_client: Annotated[StorageApi, Depends(get_api_client(StorageApi))], +): + job_name = _compose_job_resource_name(study_id, job_id) + _logger.debug("Getting Job Outputs for '%s'", job_name) + + project_outputs = await webserver_api.get_project_outputs(project_id=job_id) + job_outputs: JobOutputs = await create_job_outputs_from_project_outputs( + job_id, project_outputs, user_id, storage_client + ) + + return job_outputs + + +@router.get( + "/{study_id}/jobs/{job_id}/outputs/log-links", + response_model=JobLogsMap, + status_code=status.HTTP_200_OK, + summary="Get download links for study job log files", +) +async def get_study_job_output_logfile( + study_id: StudyID, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +): + with log_context( + logger=_logger, + level=logging.DEBUG, + msg=f"get study job output logfile study_id={study_id!r} job_id={job_id!r}.", + ): + return await director2_api.get_computation_logs( + user_id=user_id, project_id=job_id + ) + + +@router.get( + "/{study_id}/jobs/{job_id}/metadata", + response_model=JobMetadata, + description=( + "Get custom metadata from a study's job\n\n" + + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7") + ), +) +async def get_study_job_custom_metadata( + study_id: StudyID, + job_id: JobID, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + job_name = _compose_job_resource_name(study_id, job_id) + msg = f"Gets metadata attached to study_id={study_id!r} job_id={job_id!r}.\njob_name={job_name!r}.\nSEE https://github.com/ITISFoundation/osparc-simcore/issues/4313" + _logger.debug(msg) + + return await get_custom_metadata( + job_name=job_name, + job_id=job_id, + webserver_api=webserver_api, + self_url=url_for( + "get_study_job_custom_metadata", + study_id=study_id, + job_id=job_id, + ), + ) + + +@router.put( + "/{study_id}/jobs/{job_id}/metadata", + response_model=JobMetadata, + description=( + "Changes custom metadata of a study's job\n\n" + + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7") + ), +) +async def replace_study_job_custom_metadata( + study_id: StudyID, + job_id: JobID, + replace: JobMetadataUpdate, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + job_name = _compose_job_resource_name(study_id, job_id) + + msg = f"Attaches metadata={replace.metadata!r} to study_id={study_id!r} job_id={job_id!r}.\njob_name={job_name!r}.\nSEE https://github.com/ITISFoundation/osparc-simcore/issues/4313" + _logger.debug(msg) + + return await replace_custom_metadata( + job_name=job_name, + job_id=job_id, + update=replace, + webserver_api=webserver_api, + self_url=url_for( + "replace_study_job_custom_metadata", + study_id=study_id, + job_id=job_id, + ), + ) + + +def _update_study_job_urls( + *, + job: Job, + study_id: StudyID, + job_id: JobID | str, + url_for: Callable[..., HttpUrl], +) -> Job: + job.url = url_for( + get_study_job.__name__, + study_id=study_id, + job_id=job_id, + ) + + job.runner_url = url_for( + "get_study", + study_id=study_id, + ) + + job.outputs_url = url_for( + get_study_job_outputs.__name__, + study_id=study_id, + job_id=job_id, + ) + + return job diff --git a/services/api-server/src/simcore_service_api_server/api/routes/users.py b/services/api-server/src/simcore_service_api_server/api/routes/users.py index 94a9fb0e87c..1aee57c4648 100644 --- a/services/api-server/src/simcore_service_api_server/api/routes/users.py +++ b/services/api-server/src/simcore_service_api_server/api/routes/users.py @@ -1,48 +1,42 @@ import logging +from typing import Annotated, Any -from fastapi import APIRouter, Depends, HTTPException, Security -from pydantic import ValidationError -from starlette import status +from fastapi import APIRouter, Depends, Security, status +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.schemas.errors import ErrorGet from ...models.schemas.profiles import Profile, ProfileUpdate -from ..dependencies.webserver import AuthSession, get_webserver_session +from ...services_http.webserver import AuthSession +from ..dependencies.webserver_http import get_webserver_session -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) router = APIRouter() -# SEE: https://www.python-httpx.org/async/ -# TODO: path mapping and operation +_USER_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "User not found", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} -@router.get("", response_model=Profile) + +@router.get("", response_model=Profile, responses=_USER_STATUS_CODES) async def get_my_profile( - client: AuthSession = Depends(get_webserver_session), + webserver_session: Annotated[AuthSession, Depends(get_webserver_session)], ) -> Profile: - data = await client.get("/me") - - # FIXME: temporary patch until web-API is reviewed - data["role"] = data["role"].upper() - try: - profile = Profile.parse_obj(data) - except ValidationError as err: - logger.exception("webserver invalid response") - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err - + profile: Profile = await webserver_session.get_me() return profile -@router.put("", response_model=Profile) +@router.put("", response_model=Profile, responses=_USER_STATUS_CODES) async def update_my_profile( profile_update: ProfileUpdate, - client: AuthSession = Security(get_webserver_session, scopes=["write"]), + webserver_session: Annotated[ + AuthSession, Security(get_webserver_session, scopes=["write"]) + ], ) -> Profile: - # FIXME: replace by patch - # TODO: improve. from patch -> put, we need to ensure it has a default in place - profile_update.first_name = profile_update.first_name or "" - profile_update.last_name = profile_update.last_name or "" - - await client.put("/me", body=profile_update.dict()) - - profile = await get_my_profile(client) + profile: Profile = await webserver_session.update_me(profile_update=profile_update) return profile diff --git a/services/api-server/src/simcore_service_api_server/api/routes/wallets.py b/services/api-server/src/simcore_service_api_server/api/routes/wallets.py new file mode 100644 index 00000000000..4454d3e0ae3 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/api/routes/wallets.py @@ -0,0 +1,124 @@ +import logging +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, status +from models_library.licenses import LicensedItemID +from pydantic import PositiveInt + +from ...api.dependencies.authentication import get_current_user_id, get_product_name +from ...api.dependencies.webserver_rpc import get_wb_api_rpc_client +from ...exceptions.service_errors_utils import DEFAULT_BACKEND_SERVICE_STATUS_CODES +from ...models.pagination import Page, PaginationParams +from ...models.schemas.errors import ErrorGet +from ...models.schemas.licensed_items import LicensedItemCheckoutData +from ...models.schemas.model_adapter import ( + LicensedItemCheckoutGet, + LicensedItemGet, + WalletGetWithAvailableCreditsLegacy, +) +from ...services_rpc.wb_api_server import WbApiRpcClient +from ..dependencies.webserver_http import AuthSession, get_webserver_session +from ._constants import FMSG_CHANGELOG_NEW_IN_VERSION, create_route_description + +_logger = logging.getLogger(__name__) + +router = APIRouter() + +WALLET_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_404_NOT_FOUND: { + "description": "Wallet not found", + "model": ErrorGet, + }, + status.HTTP_403_FORBIDDEN: { + "description": "Access to wallet is not allowed", + "model": ErrorGet, + }, + **DEFAULT_BACKEND_SERVICE_STATUS_CODES, +} + + +@router.get( + "/default", + description=create_route_description( + base="Get default wallet", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + ], + ), + response_model=WalletGetWithAvailableCreditsLegacy, + responses=WALLET_STATUS_CODES, +) +async def get_default_wallet( + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + return await webserver_api.get_default_wallet() + + +@router.get( + "/{wallet_id}", + response_model=WalletGetWithAvailableCreditsLegacy, + responses=WALLET_STATUS_CODES, + description=create_route_description( + base="Get wallet", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.7"), + ], + ), +) +async def get_wallet( + wallet_id: int, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +): + return await webserver_api.get_wallet(wallet_id=wallet_id) + + +@router.get( + "/{wallet_id}/licensed-items", + response_model=Page[LicensedItemGet], + status_code=status.HTTP_200_OK, + responses=WALLET_STATUS_CODES, + description=create_route_description( + base="Get all available licensed items for a given wallet", + changelog=[ + FMSG_CHANGELOG_NEW_IN_VERSION.format("0.6"), + ], + ), +) +async def get_available_licensed_items_for_wallet( + wallet_id: int, + page_params: Annotated[PaginationParams, Depends()], + web_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + product_name: Annotated[str, Depends(get_product_name)], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], +): + return await web_api_rpc.get_available_licensed_items_for_wallet( + product_name=product_name, + wallet_id=wallet_id, + user_id=user_id, + page_params=page_params, + ) + + +@router.post( + "/{wallet_id}/licensed-items/{licensed_item_id}/checkout", + response_model=LicensedItemCheckoutGet, + status_code=status.HTTP_200_OK, + responses=WALLET_STATUS_CODES, + description="Checkout licensed item", +) +async def checkout_licensed_item( + wallet_id: int, + licensed_item_id: LicensedItemID, + web_api_rpc: Annotated[WbApiRpcClient, Depends(get_wb_api_rpc_client)], + product_name: Annotated[str, Depends(get_product_name)], + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + checkout_data: LicensedItemCheckoutData, +): + return await web_api_rpc.checkout_licensed_item_for_wallet( + product_name=product_name, + user_id=user_id, + wallet_id=wallet_id, + licensed_item_id=licensed_item_id, + num_of_seats=checkout_data.number_of_seats, + service_run_id=checkout_data.service_run_id, + ) diff --git a/services/api-server/src/simcore_service_api_server/clients/__init__.py b/services/api-server/src/simcore_service_api_server/clients/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/api-server/src/simcore_service_api_server/clients/postgres.py b/services/api-server/src/simcore_service_api_server/clients/postgres.py new file mode 100644 index 00000000000..4f337bd82d6 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/clients/postgres.py @@ -0,0 +1,42 @@ +from fastapi import FastAPI +from servicelib.fastapi.db_asyncpg_engine import close_db_connection, connect_to_db +from servicelib.fastapi.lifespan_utils import LifespanOnStartupError +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..core.settings import ApplicationSettings + + +class PostgresNotConfiguredError(LifespanOnStartupError): + msg_template = LifespanOnStartupError.msg_template + ( + "Postgres settings are not configured. " + "Please check your application settings. " + ) + + +def get_engine(app: FastAPI) -> AsyncEngine: + assert app.state.engine # nosec + engine: AsyncEngine = app.state.engine + return engine + + +def setup_postgres(app: FastAPI): + app.state.engine = None + + async def _on_startup() -> None: + settings: ApplicationSettings = app.state.settings + if settings.API_SERVER_POSTGRES is None: + raise PostgresNotConfiguredError( + lifespan_name="Postgres", + settings=settings, + ) + + await connect_to_db(app, settings.API_SERVER_POSTGRES) + assert app.state.engine # nosec + assert isinstance(app.state.engine, AsyncEngine) # nosec + + async def _on_shutdown() -> None: + assert app.state.engine # nosec + await close_db_connection(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/api-server/src/simcore_service_api_server/core/_prometheus_instrumentation.py b/services/api-server/src/simcore_service_api_server/core/_prometheus_instrumentation.py new file mode 100644 index 00000000000..f19bac34a76 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/core/_prometheus_instrumentation.py @@ -0,0 +1,98 @@ +import logging +from collections.abc import Iterator +from dataclasses import dataclass, field +from datetime import timedelta +from typing import Final, cast + +from fastapi import FastAPI +from prometheus_client import CollectorRegistry, Gauge +from pydantic import PositiveInt +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation as setup_rest_instrumentation, +) +from servicelib.logging_utils import log_catch + +from .._meta import PROJECT_NAME +from ..api.dependencies.rabbitmq import ( + get_log_distributor, + wait_till_log_distributor_ready, +) +from ..core.health_checker import get_health_checker +from ..models.schemas.jobs import JobID + +_logger = logging.getLogger(__name__) +METRICS_NAMESPACE: Final[str] = PROJECT_NAME.replace("-", "_") + + +@dataclass(slots=True, kw_only=True) +class ApiServerPrometheusInstrumentation: + registry: CollectorRegistry + _logstreaming_queues: Gauge = field(init=False) + _health_check_qauge: Gauge = field(init=False) + + def __post_init__(self) -> None: + self._logstreaming_queues = Gauge( + "log_stream_queue_length", + "#Logs in log streaming queue", + ["job_id"], + namespace=METRICS_NAMESPACE, + registry=self.registry, + ) + self._health_check_qauge = Gauge( + "log_stream_health_check", + "#Failures of log stream health check", + namespace=METRICS_NAMESPACE, + registry=self.registry, + ) + + def update_metrics( + self, + iter_log_queue_sizes: Iterator[tuple[JobID, int]], + health_check_failure_count: PositiveInt, + ): + self._health_check_qauge.set(health_check_failure_count) + self._logstreaming_queues.clear() + for job_id, length in iter_log_queue_sizes: + self._logstreaming_queues.labels(job_id=job_id).set(length) + + +async def _collect_prometheus_metrics_task(app: FastAPI): + get_instrumentation(app).update_metrics( + iter_log_queue_sizes=get_log_distributor(app).iter_log_queue_sizes, + health_check_failure_count=get_health_checker(app).health_check_failure_count, + ) + + +def setup_prometheus_instrumentation(app: FastAPI): + registry = setup_rest_instrumentation(app) + + async def on_startup() -> None: + app.state.instrumentation = ApiServerPrometheusInstrumentation( + registry=registry + ) + await wait_till_log_distributor_ready(app) + app.state.instrumentation_task = create_periodic_task( + task=_collect_prometheus_metrics_task, + interval=timedelta( + seconds=app.state.settings.API_SERVER_PROMETHEUS_INSTRUMENTATION_COLLECT_SECONDS + ), + task_name="prometheus_metrics_collection_task", + app=app, + ) + + async def on_shutdown() -> None: + assert app.state.instrumentation_task # nosec + with log_catch(_logger, reraise=False): + await cancel_wait_task(app.state.instrumentation_task) + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_instrumentation(app: FastAPI) -> ApiServerPrometheusInstrumentation: + assert ( + app.state.instrumentation + ), "Instrumentation not setup. Please check the configuration" # nosec + return cast(ApiServerPrometheusInstrumentation, app.state.instrumentation) diff --git a/services/api-server/src/simcore_service_api_server/core/application.py b/services/api-server/src/simcore_service_api_server/core/application.py index 0de7a91e03f..44c5b5fc129 100644 --- a/services/api-server/src/simcore_service_api_server/core/application.py +++ b/services/api-server/src/simcore_service_api_server/core/application.py @@ -1,98 +1,134 @@ import logging -from typing import Optional from fastapi import FastAPI -from fastapi.exceptions import RequestValidationError -from httpx import HTTPStatusError -from servicelib.fastapi.tracing import setup_tracing +from fastapi_pagination import add_pagination +from models_library.basic_types import BootModeEnum +from packaging.version import Version +from servicelib.fastapi.profiler import initialize_profiler +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from servicelib.logging_utils import config_all_loggers -from starlette import status -from starlette.exceptions import HTTPException -from .._meta import API_VERSION, API_VTAG -from ..api.errors.http_error import ( - http_error_handler, - make_http_error_handler_for_exception, -) -from ..api.errors.httpx_client_error import httpx_client_error_handler -from ..api.errors.validation_error import http422_error_handler +from .. import exceptions +from .._meta import API_VERSION, API_VTAG, APP_NAME from ..api.root import create_router from ..api.routes.health import router as health_router -from ..modules import catalog, director_v2, remote_debug, storage, webserver -from .events import create_start_app_handler, create_stop_app_handler +from ..clients.postgres import setup_postgres +from ..services_http import director_v2, storage, webserver +from ..services_http.rabbitmq import setup_rabbitmq +from ._prometheus_instrumentation import setup_prometheus_instrumentation +from .events import on_shutdown, on_startup from .openapi import override_openapi_method, use_route_names_as_operation_ids -from .redoc import create_redoc_handler -from .settings import ApplicationSettings, BootModeEnum +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def _label_title_and_version(settings: ApplicationSettings, title: str, version: str): + labels = [] + if settings.API_SERVER_DEV_FEATURES_ENABLED: + # builds public version identifier with pre: `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]` + # SEE https://packaging.python.org/en/latest/specifications/version-specifiers/#public-version-identifiers + v = Version(version) + version = f"{v.base_version}.post0.dev0" + assert Version(version).is_devrelease, version # nosec + _logger.info("Setting up a developmental version: %s -> %s", v, version) + + if settings.debug: + labels.append("debug") -logger = logging.getLogger(__name__) + if local_version_label := "-".join(labels): + # Appends local version identifier `[+]` + # SEE https://packaging.python.org/en/latest/specifications/version-specifiers/#local-version-identifiers + title += f" ({local_version_label})" + version += f"+{local_version_label}" + return title, version -def init_app(settings: Optional[ApplicationSettings] = None) -> FastAPI: + +def init_app(settings: ApplicationSettings | None = None) -> FastAPI: if settings is None: settings = ApplicationSettings.create_from_envs() assert settings # nosec - logging.basicConfig(level=settings.LOG_LEVEL.value) - logging.root.setLevel(settings.LOG_LEVEL.value) - logger.debug("App settings:\n%s", settings.json(indent=2)) + logging.basicConfig(level=settings.log_level) + logging.root.setLevel(settings.log_level) + config_all_loggers( + log_format_local_dev_enabled=settings.API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.API_SERVER_LOG_FILTER_MAPPING, + tracing_settings=settings.API_SERVER_TRACING, + ) + _logger.debug("App settings:\n%s", settings.model_dump_json(indent=2)) + + # Labeling + title = "osparc.io public API" + version = API_VERSION # public version identifier + description = "osparc-simcore public API specifications" + + # Appends local version identifier if setup: version=[+] + title, version = _label_title_and_version(settings, title, version) # creates app instance app = FastAPI( debug=settings.debug, - title="osparc.io web API", - description="osparc-simcore public web API specifications", - version=API_VERSION, + title=title, + description=description, + version=version, openapi_url=f"/api/{API_VTAG}/openapi.json", docs_url="/dev/doc", - redoc_url=None, # default disabled, see below + redoc_url="/doc", ) override_openapi_method(app) + add_pagination(app) app.state.settings = settings - # setup modules - if settings.SC_BOOT_MODE == BootModeEnum.DEBUG: - remote_debug.setup(app) + if settings.API_SERVER_TRACING: + setup_tracing(app, settings.API_SERVER_TRACING, APP_NAME) - if settings.API_SERVER_WEBSERVER: - webserver.setup(app, settings.API_SERVER_WEBSERVER) + if settings.API_SERVER_POSTGRES: + setup_postgres(app) - if settings.API_SERVER_CATALOG: - catalog.setup(app, settings.API_SERVER_CATALOG) + setup_rabbitmq(app) + + if app.state.settings.API_SERVER_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) + + if settings.API_SERVER_TRACING: + initialize_fastapi_app_tracing(app) + + if settings.API_SERVER_WEBSERVER: + webserver.setup( + app, + settings.API_SERVER_WEBSERVER, + tracing_settings=settings.API_SERVER_TRACING, + ) if settings.API_SERVER_STORAGE: - storage.setup(app, settings.API_SERVER_STORAGE) + storage.setup( + app, + settings.API_SERVER_STORAGE, + tracing_settings=settings.API_SERVER_TRACING, + ) if settings.API_SERVER_DIRECTOR_V2: - director_v2.setup(app, settings.API_SERVER_DIRECTOR_V2) - - if settings.API_SERVER_TRACING: - setup_tracing(app, settings.API_SERVER_TRACING) + director_v2.setup( + app, + settings.API_SERVER_DIRECTOR_V2, + tracing_settings=settings.API_SERVER_TRACING, + ) # setup app - app.add_event_handler("startup", create_start_app_handler(app)) - app.add_event_handler("shutdown", create_stop_app_handler(app)) - - app.add_exception_handler(HTTPException, http_error_handler) - app.add_exception_handler(RequestValidationError, http422_error_handler) - app.add_exception_handler(HTTPStatusError, httpx_client_error_handler) - - # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy - app.add_exception_handler( - NotImplementedError, - make_http_error_handler_for_exception( - status.HTTP_501_NOT_IMPLEMENTED, NotImplementedError - ), - ) - app.add_exception_handler( - Exception, - make_http_error_handler_for_exception( - status.HTTP_500_INTERNAL_SERVER_ERROR, - Exception, - override_detail_message="Internal error" - if settings.SC_BOOT_MODE == BootModeEnum.DEBUG - else None, - ), + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + if settings.API_SERVER_PROFILING: + initialize_profiler(app) + + exceptions.setup_exception_handlers( + app, is_debug=settings.SC_BOOT_MODE == BootModeEnum.DEBUG ) # routing @@ -100,15 +136,10 @@ def init_app(settings: Optional[ApplicationSettings] = None) -> FastAPI: # healthcheck at / and at /VTAG/ app.include_router(health_router) - # docs - redoc_html = create_redoc_handler(app) - app.add_route("/doc", redoc_html, name="redoc_html", include_in_schema=False) - # api under /v* api_router = create_router(settings) app.include_router(api_router, prefix=f"/{API_VTAG}") # NOTE: cleanup all OpenAPIs https://github.com/ITISFoundation/osparc-simcore/issues/3487 use_route_names_as_operation_ids(app) - config_all_loggers() return app diff --git a/services/api-server/src/simcore_service_api_server/core/events.py b/services/api-server/src/simcore_service_api_server/core/events.py index 44305123e3a..dd3cb21cb45 100644 --- a/services/api-server/src/simcore_service_api_server/core/events.py +++ b/services/api-server/src/simcore_service_api_server/core/events.py @@ -1,56 +1,15 @@ import logging -from typing import Callable -from fastapi import FastAPI +from .._meta import APP_FINISHED_BANNER_MSG, APP_STARTED_BANNER_MSG -from .._meta import PROJECT_NAME, __version__ -from ..db.events import close_db_connection, connect_to_db +_logger = logging.getLogger(__name__) -logger = logging.getLogger(__name__) -# -# https://patorjk.com/software/taag/#p=display&f=JS%20Stick%20Letters&t=API-server%0A -# -WELCOME_MSG = r""" - __ __ ___ __ ___ __ - /\ |__) | __ /__` |__ |__) \ / |__ |__) -/~~\ | | .__/ |___ | \ \/ |___ | \ {} +async def on_startup() -> None: + _logger.info("Application starting ...") + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 -""".format( - f"v{__version__}" -) - -def create_start_app_handler(app: FastAPI) -> Callable: - async def on_startup() -> None: - logger.info("Application starting") - if app.state.settings.API_SERVER_POSTGRES: - # database - await connect_to_db(app) - assert app.state.engine # nosec - - print(WELCOME_MSG, flush=True) - - return on_startup - - -def create_stop_app_handler(app: FastAPI) -> Callable: - async def on_shutdown() -> None: - logger.info("Application stopping") - - if app.state.settings.API_SERVER_POSTGRES: - try: - await close_db_connection(app) - - except Exception as err: # pylint: disable=broad-except - logger.warning( - "Failed to close app: %s", - err, - exc_info=app.state.settings.debug, - stack_info=app.state.settings.debug, - ) - - msg = PROJECT_NAME + f" v{__version__} SHUT DOWN" - print(f"{msg:=^100}") - - return on_shutdown +async def on_shutdown() -> None: + _logger.info("Application stopping, ...") + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 diff --git a/services/api-server/src/simcore_service_api_server/core/health_checker.py b/services/api-server/src/simcore_service_api_server/core/health_checker.py new file mode 100644 index 00000000000..b5a5180b12b --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/core/health_checker.py @@ -0,0 +1,108 @@ +# pylint: disable=R0902 +import asyncio +import logging +from datetime import timedelta +from typing import Annotated, Final, cast +from uuid import uuid4 + +from fastapi import Depends, FastAPI +from models_library.rabbitmq_messages import LoggerRabbitMessage +from models_library.users import UserID +from pydantic import NonNegativeInt, PositiveFloat, PositiveInt +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.fastapi.dependencies import get_app +from servicelib.logging_utils import log_catch +from servicelib.rabbitmq import RabbitMQClient + +from .._meta import PROJECT_NAME +from ..models.schemas.jobs import JobID, JobLog +from ..services_http.log_streaming import LogDistributor + +METRICS_NAMESPACE: Final[str] = PROJECT_NAME.replace("-", "_") + +_logger = logging.getLogger(__name__) + + +class ApiServerHealthChecker: + def __init__( + self, + *, + log_distributor: LogDistributor, + rabbit_client: RabbitMQClient, + timeout_seconds: PositiveFloat, + allowed_health_check_failures: PositiveInt, + ) -> None: + self._log_distributor: LogDistributor = log_distributor + self._rabbit_client: RabbitMQClient = rabbit_client + self._timeout_seconds = timeout_seconds + self._allowed_health_check_failures = allowed_health_check_failures + + self._health_check_failure_count: NonNegativeInt = 0 + self._dummy_job_id: JobID = uuid4() + self._dummy_queue: asyncio.Queue[JobLog] = asyncio.Queue() + self._dummy_message = LoggerRabbitMessage( + user_id=UserID(123456789), + project_id=self._dummy_job_id, + node_id=uuid4(), + messages=["Api-server health check message"], + ) + self._background_task: asyncio.Task | None = None + _logger.info("Api server health check dummy job_id=%s", f"{self._dummy_job_id}") + + async def setup(self, health_check_task_period_seconds: PositiveFloat): + await self._log_distributor.register( + job_id=self._dummy_job_id, queue=self._dummy_queue + ) + self._background_task = create_periodic_task( + task=self._background_task_method, + interval=timedelta(seconds=health_check_task_period_seconds), + task_name="api_server_health_check_task", + ) + + async def teardown(self): + if self._background_task: + with log_catch(_logger, reraise=False): + await cancel_wait_task( + self._background_task, max_delay=self._timeout_seconds + ) + await self._log_distributor.deregister(job_id=self._dummy_job_id) + + @property + def healthy(self) -> bool: + return self._rabbit_client.healthy and ( + self._health_check_failure_count <= self._allowed_health_check_failures + ) # https://github.com/ITISFoundation/osparc-simcore/pull/6662 + + @property + def health_check_failure_count(self) -> NonNegativeInt: + return self._health_check_failure_count + + def _increment_health_check_failure_count(self): + self._health_check_failure_count += 1 + + async def _background_task_method(self): + while self._dummy_queue.qsize() > 0: + _ = self._dummy_queue.get_nowait() + try: + await asyncio.wait_for( + self._rabbit_client.publish( + self._dummy_message.channel_name, self._dummy_message + ), + timeout=self._timeout_seconds, + ) + _ = await asyncio.wait_for( + self._dummy_queue.get(), timeout=self._timeout_seconds + ) + self._health_check_failure_count = 0 + except asyncio.TimeoutError: + self._increment_health_check_failure_count() + + +def get_health_checker( + app: Annotated[FastAPI, Depends(get_app)], +) -> ApiServerHealthChecker: + assert ( + app.state.health_checker + ), "Api-server healthchecker is not setup. Please check the configuration" # nosec + return cast(ApiServerHealthChecker, app.state.health_checker) diff --git a/services/api-server/src/simcore_service_api_server/core/openapi.py b/services/api-server/src/simcore_service_api_server/core/openapi.py index c58ca9540c0..dd22d2a2380 100644 --- a/services/api-server/src/simcore_service_api_server/core/openapi.py +++ b/services/api-server/src/simcore_service_api_server/core/openapi.py @@ -1,49 +1,8 @@ -import json -import logging -import types -from pathlib import Path - -import yaml from fastapi import FastAPI -from fastapi.openapi.utils import get_openapi from fastapi.routing import APIRoute -from servicelib.fastapi.openapi import patch_openapi_specs - -from .redoc import add_vendor_extensions, compose_long_description - -logger = logging.getLogger(__name__) - - -def override_openapi_method(app: FastAPI): - # TODO: test openapi(*) member does not change interface - - def _custom_openapi_method(zelf: FastAPI) -> dict: - """Overrides FastAPI.openapi member function - returns OAS schema with vendor extensions - """ - if not zelf.openapi_schema: +from servicelib.fastapi.openapi import override_fastapi_openapi_method - if zelf.redoc_url: - desc = compose_long_description(zelf.description) - else: - desc = zelf.description - openapi_schema = get_openapi( - title=zelf.title, - version=zelf.version, - openapi_version=zelf.openapi_version, - description=desc, - routes=zelf.routes, - tags=zelf.openapi_tags, - servers=zelf.servers, - ) - - add_vendor_extensions(openapi_schema) - patch_openapi_specs(openapi_schema) - zelf.openapi_schema = openapi_schema - - return zelf.openapi_schema - - app.openapi = types.MethodType(_custom_openapi_method, app) +override_openapi_method = override_fastapi_openapi_method def use_route_names_as_operation_ids(app: FastAPI) -> None: @@ -58,14 +17,3 @@ def use_route_names_as_operation_ids(app: FastAPI) -> None: for route in app.routes: if isinstance(route, APIRoute): route.operation_id = route.name - - -def dump_openapi(app: FastAPI, filepath: Path): - logger.info("Dumping openapi specs as %s", filepath) - with open(filepath, "wt") as fh: - if filepath.suffix == ".json": - json.dump(app.openapi(), fh, indent=2) - elif filepath.suffix in (".yaml", ".yml"): - yaml.safe_dump(app.openapi(), fh) - else: - raise ValueError("invalid") diff --git a/services/api-server/src/simcore_service_api_server/core/redoc.py b/services/api-server/src/simcore_service_api_server/core/redoc.py deleted file mode 100644 index 4f70ac0035a..00000000000 --- a/services/api-server/src/simcore_service_api_server/core/redoc.py +++ /dev/null @@ -1,50 +0,0 @@ -from typing import Callable - -from fastapi import FastAPI -from fastapi.applications import HTMLResponse, Request -from fastapi.openapi.docs import get_redoc_html - -# TODO: move all these static resources away from the server! - -FAVICON = "https://osparc.io/resource/osparc/favicon.png" -LOGO = "https://raw.githubusercontent.com/ITISFoundation/osparc-manual/b809d93619512eb60c827b7e769c6145758378d0/_media/osparc-logo.svg" -PYTHON_CODE_SAMPLES_BASE_URL = "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore-python-client/master/code_samples" - - -def compose_long_description(description: str) -> str: - desc = f"**{description}**\n" - desc += "## Python Library\n" - desc += "- Check the [documentation](https://itisfoundation.github.io/osparc-simcore-python-client)\n" - desc += "- Quick install: ``pip install git+https://github.com/ITISFoundation/osparc-simcore-python-client.git``\n" - return desc - - -def add_vendor_extensions(openapi_schema: dict): - # ReDoc vendor extensions - # SEE https://github.com/Redocly/redoc/blob/master/docs/redoc-vendor-extensions.md - openapi_schema["info"]["x-logo"] = { - "url": LOGO, - "altText": "osparc-simcore logo", - } - - # - # TODO: load code samples add if function is contained in sample - # TODO: See if openapi-cli does this already - # TODO: check that all url are available before exposing - # openapi_schema["paths"][f"/{api_vtag}/meta"]["get"]["x-code-samples"] = [ - # { - # "lang": "python", - # "source": {"$ref": f"{PYTHON_CODE_SAMPLES_BASE_URL}/meta/get.py"}, - # }, - # ] - - -def create_redoc_handler(app: FastAPI) -> Callable: - async def _redoc_html(_req: Request) -> HTMLResponse: - return get_redoc_html( - openapi_url=app.openapi_url, - title=app.title + " - redoc", - redoc_favicon_url=FAVICON, - ) - - return _redoc_html diff --git a/services/api-server/src/simcore_service_api_server/core/settings.py b/services/api-server/src/simcore_service_api_server/core/settings.py index 3b2a61f00b8..cf734ed1040 100644 --- a/services/api-server/src/simcore_service_api_server/core/settings.py +++ b/services/api-server/src/simcore_service_api_server/core/settings.py @@ -1,125 +1,160 @@ from functools import cached_property -from typing import Optional +from typing import Annotated +from common_library.basic_types import DEFAULT_FACTORY from models_library.basic_types import BootModeEnum, LogLevel -from pydantic import AnyHttpUrl, Field, SecretStr -from pydantic.class_validators import validator +from pydantic import ( + AliasChoices, + Field, + NonNegativeInt, + PositiveInt, + SecretStr, + field_validator, +) +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring from settings_library.base import BaseCustomSettings -from settings_library.catalog import CatalogSettings +from settings_library.director_v2 import DirectorV2Settings from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.storage import StorageSettings from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings -from settings_library.utils_session import MixinSessionSettings - -# SERVICES CLIENTS -------------------------------------------- - - -class _UrlMixin: - def _build_url(self, prefix: str) -> str: - prefix = prefix.upper() - return AnyHttpUrl.build( - scheme="http", - host=getattr(self, f"{prefix}_HOST"), - port=f"{getattr(self, f'{prefix}_PORT')}", - path=f"/{getattr(self, f'{prefix}_VTAG')}", # NOTE: it ends with /{VTAG} - ) - - -class WebServerSettings(BaseCustomSettings, _UrlMixin, MixinSessionSettings): - WEBSERVER_HOST: str = "webserver" - WEBSERVER_PORT: int = 8080 - WEBSERVER_VTAG: str = "v0" - - WEBSERVER_SESSION_SECRET_KEY: SecretStr = Field( - ..., - description="Secret key to encrypt cookies. " - 'TIP: python3 -c "from cryptography.fernet import *; print(Fernet.generate_key())"', - min_length=44, - env=["SESSION_SECRET_KEY", "WEBSERVER_SESSION_SECRET_KEY"], - ) - WEBSERVER_SESSION_NAME: str = "osparc.WEBAPI_SESSION" - - @cached_property - def base_url(self) -> str: - return self._build_url("WEBSERVER") - - @validator("WEBSERVER_SESSION_SECRET_KEY") +from settings_library.utils_session import ( + DEFAULT_SESSION_COOKIE_NAME, + MixinSessionSettings, +) +from settings_library.webserver import WebServerSettings as WebServerBaseSettings + + +class WebServerSettings(WebServerBaseSettings, MixinSessionSettings): + + WEBSERVER_SESSION_SECRET_KEY: Annotated[ + SecretStr, + Field( + description="Secret key to encrypt cookies. " + 'TIP: python3 -c "from cryptography.fernet import *; print(Fernet.generate_key())"', + min_length=44, + validation_alias=AliasChoices( + "SESSION_SECRET_KEY", "WEBSERVER_SESSION_SECRET_KEY" + ), + ), + ] + WEBSERVER_SESSION_NAME: str = DEFAULT_SESSION_COOKIE_NAME + + @field_validator("WEBSERVER_SESSION_SECRET_KEY") @classmethod - def check_valid_fernet_key(cls, v): + def _check_valid_fernet_key(cls, v): return cls.do_check_valid_fernet_key(v) -class StorageSettings(BaseCustomSettings, _UrlMixin): - STORAGE_HOST: str = "storage" - STORAGE_PORT: int = 8080 - STORAGE_VTAG: str = "v0" - - @cached_property - def base_url(self) -> str: - return self._build_url("STORAGE") - - -class DirectorV2Settings(BaseCustomSettings, _UrlMixin): - DIRECTOR_V2_HOST: str = "director-v2" - DIRECTOR_V2_PORT: int = 8000 - DIRECTOR_V2_VTAG: str = "v2" - - @cached_property - def base_url(self) -> str: - return self._build_url("DIRECTOR_V2") - - # MAIN SETTINGS -------------------------------------------- class BasicSettings(BaseCustomSettings, MixinLoggingSettings): - # DEVELOPMENT - API_SERVER_DEV_FEATURES_ENABLED: bool = Field( - False, env=["API_SERVER_DEV_FEATURES_ENABLED", "FAKE_API_SERVER_ENABLED"] - ) + API_SERVER_DEV_FEATURES_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "API_SERVER_DEV_FEATURES_ENABLED", "FAKE_API_SERVER_ENABLED" + ), + ), + ] = False # LOGGING - LOG_LEVEL: LogLevel = Field( - LogLevel.INFO.value, - env=["API_SERVER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"], - ) - - # DEBUGGING - API_SERVER_REMOTE_DEBUG_PORT: int = 3000 - - @validator("LOG_LEVEL", pre=True) + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "API_SERVER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + + API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + + API_SERVER_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "API_SERVER_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + @field_validator("LOG_LEVEL", mode="before") @classmethod def _validate_loglevel(cls, value) -> str: - return cls.validate_log_level(value) + log_level: str = cls.validate_log_level(value) + return log_level class ApplicationSettings(BasicSettings): - # DOCKER BOOT - SC_BOOT_MODE: Optional[BootModeEnum] + SC_BOOT_MODE: BootModeEnum | None = None - # POSTGRES - API_SERVER_POSTGRES: Optional[PostgresSettings] = Field(auto_default_from_env=True) + API_SERVER_POSTGRES: Annotated[ + PostgresSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + API_SERVER_RABBITMQ: Annotated[ + RabbitSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for service/rabbitmq", + ), + ] # SERVICES with http API - API_SERVER_WEBSERVER: Optional[WebServerSettings] = Field( - auto_default_from_env=True - ) - API_SERVER_CATALOG: Optional[CatalogSettings] = Field(auto_default_from_env=True) - API_SERVER_STORAGE: Optional[StorageSettings] = Field(auto_default_from_env=True) - API_SERVER_DIRECTOR_V2: Optional[DirectorV2Settings] = Field( - auto_default_from_env=True - ) - - # DIAGNOSTICS - API_SERVER_TRACING: Optional[TracingSettings] = Field(auto_default_from_env=True) + API_SERVER_WEBSERVER: Annotated[ + WebServerSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + API_SERVER_STORAGE: Annotated[ + StorageSettings | None, Field(json_schema_extra={"auto_default_from_env": True}) + ] + API_SERVER_DIRECTOR_V2: Annotated[ + DirectorV2Settings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + API_SERVER_LOG_CHECK_TIMEOUT_SECONDS: NonNegativeInt = 3 * 60 + API_SERVER_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + API_SERVER_HEALTH_CHECK_TASK_PERIOD_SECONDS: PositiveInt = 30 + API_SERVER_HEALTH_CHECK_TASK_TIMEOUT_SECONDS: PositiveInt = 10 + API_SERVER_ALLOWED_HEALTH_CHECK_FAILURES: PositiveInt = 5 + API_SERVER_PROMETHEUS_INSTRUMENTATION_COLLECT_SECONDS: PositiveInt = 5 + API_SERVER_PROFILING: bool = False + API_SERVER_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] @cached_property def debug(self) -> bool: """If True, debug tracebacks should be returned on errors.""" - return self.SC_BOOT_MODE in [ - BootModeEnum.DEBUG, - BootModeEnum.DEVELOPMENT, - BootModeEnum.LOCAL, - ] + return self.SC_BOOT_MODE is not None and self.SC_BOOT_MODE.is_devel_mode() + + +__all__: tuple[str, ...] = ( + "ApplicationSettings", + "BasicSettings", + "DirectorV2Settings", + "StorageSettings", + "WebServerSettings", + "WebServerSettings", +) diff --git a/services/api-server/src/simcore_service_api_server/db/errors.py b/services/api-server/src/simcore_service_api_server/db/errors.py deleted file mode 100644 index bb3ef669024..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/errors.py +++ /dev/null @@ -1,2 +0,0 @@ -class EntityDoesNotExist(Exception): - """Raised when entity was not found in database.""" diff --git a/services/api-server/src/simcore_service_api_server/db/events.py b/services/api-server/src/simcore_service_api_server/db/events.py deleted file mode 100644 index a39bd1c187f..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/events.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging - -from aiopg.sa import Engine, create_engine -from fastapi import FastAPI -from servicelib.retry_policies import PostgresRetryPolicyUponInitialization -from simcore_postgres_database.utils_aiopg import ( - close_engine, - get_pg_engine_info, - raise_if_migration_not_ready, -) -from tenacity import retry - -from .._meta import PROJECT_NAME -from ..core.settings import PostgresSettings - -logger = logging.getLogger(__name__) - - -@retry(**PostgresRetryPolicyUponInitialization(logger).kwargs) -async def connect_to_db(app: FastAPI) -> None: - logger.debug("Connecting db ...") - - cfg: PostgresSettings = app.state.settings.API_SERVER_POSTGRES - engine: Engine = await create_engine( - str(cfg.dsn), - application_name=cfg.POSTGRES_CLIENT_NAME - or f"{PROJECT_NAME}_{id(app)}", # unique identifier per app - minsize=cfg.POSTGRES_MINSIZE, - maxsize=cfg.POSTGRES_MAXSIZE, - ) - logger.debug("Connected to %s", engine.dsn) - - logger.debug("Checking db migrationn ...") - try: - await raise_if_migration_not_ready(engine) - except Exception: - # NOTE: engine must be closed because retry will create a new engine - await close_engine(engine) - raise - logger.debug("Migration up-to-date") - - app.state.engine = engine - logger.debug( - "Setup engine: %s", - get_pg_engine_info(engine), - ) - - -async def close_db_connection(app: FastAPI) -> None: - logger.debug("Disconnecting db ...") - - if engine := app.state.engine: - await close_engine(engine) - - logger.debug("Disconnected from %s", engine.dsn) diff --git a/services/api-server/src/simcore_service_api_server/db/repositories/_base.py b/services/api-server/src/simcore_service_api_server/db/repositories/_base.py deleted file mode 100644 index 1f7cc57a069..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/repositories/_base.py +++ /dev/null @@ -1,12 +0,0 @@ -from dataclasses import dataclass - -from aiopg.sa import Engine - - -@dataclass -class BaseRepository: - """ - Repositories are pulled at every request - """ - - db_engine: Engine = None diff --git a/services/api-server/src/simcore_service_api_server/db/repositories/api_keys.py b/services/api-server/src/simcore_service_api_server/db/repositories/api_keys.py deleted file mode 100644 index df24b5af246..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/repositories/api_keys.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging -from typing import Optional - -import sqlalchemy as sa -from pydantic.types import PositiveInt -from simcore_postgres_database.errors import DatabaseError - -from .. import tables as tbl -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - - -# TODO: see if can use services/api-server/src/simcore_service_api_server/models/domain/api_keys.py -# NOTE: For psycopg2 errors SEE https://www.psycopg.org/docs/errors.html#sqlstate-exception-classes - - -class ApiKeysRepository(BaseRepository): - async def get_user_id(self, api_key: str, api_secret: str) -> Optional[PositiveInt]: - stmt = sa.select([tbl.api_keys.c.user_id,]).where( - sa.and_( - tbl.api_keys.c.api_key == api_key, - tbl.api_keys.c.api_secret == api_secret, - ) - ) - - try: - async with self.db_engine.acquire() as conn: - user_id: Optional[PositiveInt] = await conn.scalar(stmt) - - except DatabaseError as err: - logger.debug("Failed to get user id: %s", err) - user_id = None - - return user_id - - async def any_user_with_id(self, user_id: int) -> bool: - # FIXME: shall identify api_key or api_secret instead - stmt = sa.select( - [ - tbl.api_keys.c.user_id, - ] - ).where(tbl.api_keys.c.user_id == user_id) - async with self.db_engine.acquire() as conn: - return (await conn.scalar(stmt)) is not None diff --git a/services/api-server/src/simcore_service_api_server/db/repositories/users.py b/services/api-server/src/simcore_service_api_server/db/repositories/users.py deleted file mode 100644 index 439ee23d60c..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/repositories/users.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import Optional - -import sqlalchemy as sa - -from ..tables import api_keys, users -from ._base import BaseRepository - - -class UsersRepository(BaseRepository): - async def get_user_id(self, api_key: str, api_secret: str) -> Optional[int]: - stmt = sa.select([api_keys.c.user_id,]).where( - sa.and_( - api_keys.c.api_key == api_key, - api_keys.c.api_secret == api_secret, - ) - ) - async with self.db_engine.acquire() as conn: - user_id: Optional[int] = await conn.scalar(stmt) - return user_id - - async def any_user_with_id(self, user_id: int) -> bool: - stmt = sa.select( - [ - api_keys.c.user_id, - ] - ).where(api_keys.c.user_id == user_id) - async with self.db_engine.acquire() as conn: - return (await conn.scalar(stmt)) is not None - - async def get_email_from_user_id(self, user_id: int) -> Optional[str]: - stmt = sa.select( - [ - users.c.email, - ] - ).where(users.c.id == user_id) - async with self.db_engine.acquire() as conn: - email: Optional[str] = await conn.scalar(stmt) - return email diff --git a/services/api-server/src/simcore_service_api_server/db/tables.py b/services/api-server/src/simcore_service_api_server/db/tables.py deleted file mode 100644 index d1d60558155..00000000000 --- a/services/api-server/src/simcore_service_api_server/db/tables.py +++ /dev/null @@ -1,16 +0,0 @@ -from simcore_postgres_database.models.api_keys import api_keys -from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups -from simcore_postgres_database.models.users import UserRole, UserStatus, users - -metadata = api_keys.metadata - -__all__ = [ - "api_keys", - "users", - "groups", - "user_to_groups", - "metadata", - "UserStatus", - "UserRole", - "GroupType", -] diff --git a/services/api-server/src/simcore_service_api_server/exceptions/__init__.py b/services/api-server/src/simcore_service_api_server/exceptions/__init__.py new file mode 100644 index 00000000000..b6036dda040 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/__init__.py @@ -0,0 +1,5 @@ +from . import handlers + +setup_exception_handlers = handlers.setup + +__all__: tuple[str, ...] = ("setup_exception_handlers",) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/_base.py b/services/api-server/src/simcore_service_api_server/exceptions/_base.py new file mode 100644 index 00000000000..9101ae4164c --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/_base.py @@ -0,0 +1,5 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class ApiServerBaseError(OsparcErrorMixin, Exception): + ... diff --git a/services/api-server/src/simcore_service_api_server/exceptions/backend_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/backend_errors.py new file mode 100644 index 00000000000..bde18b2dbb5 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/backend_errors.py @@ -0,0 +1,132 @@ +import parse # type: ignore[import-untyped] +from fastapi import status + +from ._base import ApiServerBaseError + + +class BaseBackEndError(ApiServerBaseError): + """status_code: the default return status which will be returned to the client calling the + api-server (in case this exception is raised)""" + + status_code = status.HTTP_502_BAD_GATEWAY + + @classmethod + def named_fields(cls) -> set[str]: + return set( + parse.compile(cls.msg_template).named_fields # pylint: disable=no-member + ) + + +class InvalidInputError(BaseBackEndError): + msg_template = "Invalid input" + status_code = status.HTTP_422_UNPROCESSABLE_ENTITY + + +class ListSolversOrStudiesError(BaseBackEndError): + msg_template = "Cannot list solvers/studies" + status_code = status.HTTP_404_NOT_FOUND + + +class ListJobsError(BaseBackEndError): + msg_template = "Cannot list jobs" + status_code = status.HTTP_404_NOT_FOUND + + +class PaymentRequiredError(BaseBackEndError): + msg_template = "Payment required" + status_code = status.HTTP_402_PAYMENT_REQUIRED + + +class ProfileNotFoundError(BaseBackEndError): + msg_template = "Profile not found" + status_code = status.HTTP_404_NOT_FOUND + + +class ProgramOrSolverOrStudyNotFoundError(BaseBackEndError): + msg_template = "Could not get program/solver/study {name}:{version}" + status_code = status.HTTP_404_NOT_FOUND + + +class ServiceForbiddenAccessError(BaseBackEndError): + msg_template = "Forbidden access to program/solver/study {name}:{version}" + status_code = status.HTTP_403_FORBIDDEN + + +class JobNotFoundError(BaseBackEndError): + msg_template = "Could not get solver/study job {project_id}" + status_code = status.HTTP_404_NOT_FOUND + + +class LogFileNotFoundError(BaseBackEndError): + msg_template = "Could not get logfile for solver/study job {project_id}" + status_code = status.HTTP_404_NOT_FOUND + + +class SolverOutputNotFoundError(BaseBackEndError): + msg_template = "Solver output of project {project_id} not found" + status_code = status.HTTP_404_NOT_FOUND + + +class ClusterNotFoundError(BaseBackEndError): + msg_template = "Cluster not found" + status_code = status.HTTP_406_NOT_ACCEPTABLE + + +class ConfigurationError(BaseBackEndError): + msg_template = "Configuration error" + status_code = status.HTTP_422_UNPROCESSABLE_ENTITY + + +class ProductPriceNotFoundError(BaseBackEndError): + msg_template = "Product price not found" + status_code = status.HTTP_404_NOT_FOUND + + +class WalletNotFoundError(BaseBackEndError): + msg_template = "Wallet not found" + status_code = status.HTTP_404_NOT_FOUND + + +class ForbiddenWalletError(BaseBackEndError): + msg_template = "User does not have access to wallet" + status_code = status.HTTP_403_FORBIDDEN + + +class ProjectPortsNotFoundError(BaseBackEndError): + msg_template = "The ports for the job/study {project_id} could not be found" + status_code = status.HTTP_404_NOT_FOUND + + +class ProjectMetadataNotFoundError(BaseBackEndError): + msg_template = "The metadata for the job/study {project_id} could not be found" + status_code = status.HTTP_404_NOT_FOUND + + +class PricingUnitNotFoundError(BaseBackEndError): + msg_template = "The pricing unit could not be found" + status_code = status.HTTP_404_NOT_FOUND + + +class PricingPlanNotFoundError(BaseBackEndError): + msg_template = "The pricing plan could not be found" + status_code = status.HTTP_404_NOT_FOUND + + +class ProjectAlreadyStartedError(BaseBackEndError): + msg_template = "Project already started" + status_code = status.HTTP_200_OK + + +class InsufficientNumberOfSeatsError(BaseBackEndError): + msg_template = "Not enough available seats for license item {licensed_item_id}" + status_code = status.HTTP_409_CONFLICT + + +class CanNotCheckoutServiceIsNotRunningError(BaseBackEndError): + msg_template = "Can not checkout license item {licensed_item_id} as dynamic service is not running. Current service id: {service_run_id}" + status_code = status.HTTP_422_UNPROCESSABLE_ENTITY + + +class LicensedItemCheckoutNotFoundError(BaseBackEndError): + msg_template = "Licensed item checkout {licensed_item_checkout_id} not found." + status_code = status.HTTP_404_NOT_FOUND diff --git a/services/api-server/src/simcore_service_api_server/exceptions/custom_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/custom_errors.py new file mode 100644 index 00000000000..28f73f2a736 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/custom_errors.py @@ -0,0 +1,29 @@ +from ._base import ApiServerBaseError + + +class CustomBaseError(ApiServerBaseError): + pass + + +class InsufficientCreditsError(CustomBaseError): + # NOTE: Same message as WalletNotEnoughCreditsError + msg_template = "Wallet '{wallet_name}' has {wallet_credit_amount} credits. Please add some before requesting solver ouputs" + + +class MissingWalletError(CustomBaseError): + msg_template = "Job {job_id} does not have an associated wallet." + + +class ApplicationSetupError(CustomBaseError): + pass + + +class ServiceConfigurationError(CustomBaseError, ValueError): + msg_template = "{service_cls_name} invalid configuration: {detail_msg}." + + +class SolverServiceListJobsFiltersError( + ServiceConfigurationError +): # pylint: disable=too-many-ancestors + service_cls_name = "SolverService" + detail_msg = "solver_version is set but solver_id is not. Please provide both or none of them" diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/__init__.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/__init__.py new file mode 100644 index 00000000000..91c4e0d9ccf --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/__init__.py @@ -0,0 +1,46 @@ +from fastapi import FastAPI +from fastapi.exceptions import RequestValidationError +from httpx import HTTPError as HttpxException +from starlette import status +from starlette.exceptions import HTTPException + +from ..._constants import MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE +from ...exceptions.backend_errors import BaseBackEndError +from ..custom_errors import CustomBaseError +from ..log_streaming_errors import LogStreamingBaseError +from ._custom_errors import custom_error_handler +from ._handlers_backend_errors import backend_error_handler +from ._handlers_factory import make_handler_for_exception +from ._http_exceptions import http_exception_handler +from ._httpx_client_exceptions import handle_httpx_client_exceptions +from ._log_streaming_errors import log_handling_error_handler +from ._validation_errors import http422_error_handler + + +def setup(app: FastAPI, *, is_debug: bool = False): + app.add_exception_handler(HTTPException, http_exception_handler) + app.add_exception_handler(HttpxException, handle_httpx_client_exceptions) + app.add_exception_handler(RequestValidationError, http422_error_handler) + app.add_exception_handler(LogStreamingBaseError, log_handling_error_handler) + app.add_exception_handler(CustomBaseError, custom_error_handler) + app.add_exception_handler(BaseBackEndError, backend_error_handler) + + # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy + app.add_exception_handler( + NotImplementedError, + make_handler_for_exception( + NotImplementedError, + status.HTTP_501_NOT_IMPLEMENTED, + error_message="This endpoint is still not implemented (under development)", + ), + ) + app.add_exception_handler( + Exception, + make_handler_for_exception( + Exception, + status.HTTP_500_INTERNAL_SERVER_ERROR, + error_message=MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE, + add_exception_to_message=is_debug, + add_oec_to_message=True, + ), + ) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_custom_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_custom_errors.py new file mode 100644 index 00000000000..558b5191f59 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_custom_errors.py @@ -0,0 +1,26 @@ +from fastapi import Request, status + +from ..custom_errors import ( + CustomBaseError, + InsufficientCreditsError, + MissingWalletError, +) +from ._utils import create_error_json_response + + +async def custom_error_handler(request: Request, exc: Exception): + assert request # nosec + assert isinstance(exc, CustomBaseError) + + error_msg = f"{exc}" + if isinstance(exc, InsufficientCreditsError): + return create_error_json_response( + error_msg, status_code=status.HTTP_402_PAYMENT_REQUIRED + ) + if isinstance(exc, MissingWalletError): + return create_error_json_response( + error_msg, status_code=status.HTTP_424_FAILED_DEPENDENCY + ) + + msg = f"Exception handler is not implement for {exc=} [{type(exc)}]" + raise NotImplementedError(msg) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_backend_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_backend_errors.py new file mode 100644 index 00000000000..ca335deceac --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_backend_errors.py @@ -0,0 +1,12 @@ +from starlette.requests import Request +from starlette.responses import JSONResponse + +from ...exceptions.backend_errors import BaseBackEndError +from ._utils import create_error_json_response + + +async def backend_error_handler(request: Request, exc: Exception) -> JSONResponse: + assert request # nosec + assert isinstance(exc, BaseBackEndError) + + return create_error_json_response(f"{exc}", status_code=exc.status_code) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_factory.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_factory.py new file mode 100644 index 00000000000..fe2befdce63 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_handlers_factory.py @@ -0,0 +1,52 @@ +import logging + +from common_library.error_codes import create_error_code +from fastapi.requests import Request +from fastapi.responses import JSONResponse +from servicelib.logging_errors import create_troubleshotting_log_kwargs + +from ._utils import ExceptionHandler, create_error_json_response + +_logger = logging.getLogger(__file__) + + +def make_handler_for_exception( + exception_cls: type[BaseException], + status_code: int, + *, + error_message: str, + add_exception_to_message: bool = False, + add_oec_to_message: bool = False, +) -> ExceptionHandler: + """ + Produces a handler for BaseException-type exceptions which converts them + into an error JSON response with a given status code + + SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions + """ + + async def _http_error_handler( + request: Request, exception: BaseException + ) -> JSONResponse: + assert request # nosec + assert isinstance(exception, exception_cls) # nosec + + user_error_msg = error_message + if add_exception_to_message: + user_error_msg += f" {exception}" + + error_code = create_error_code(exception) + if add_oec_to_message: + user_error_msg += f" [{error_code}]" + + _logger.exception( + **create_troubleshotting_log_kwargs( + user_error_msg, + error=exception, + error_code=error_code, + tip="Unexpected error", + ) + ) + return create_error_json_response(user_error_msg, status_code=status_code) + + return _http_error_handler diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_http_exceptions.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_http_exceptions.py new file mode 100644 index 00000000000..bdff166096b --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_http_exceptions.py @@ -0,0 +1,12 @@ +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse + +from ._utils import create_error_json_response + + +async def http_exception_handler(request: Request, exc: Exception) -> JSONResponse: + assert request # nosec + assert isinstance(exc, HTTPException) # nosec + + return create_error_json_response(exc.detail, status_code=exc.status_code) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_httpx_client_exceptions.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_httpx_client_exceptions.py new file mode 100644 index 00000000000..99989de85e3 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_httpx_client_exceptions.py @@ -0,0 +1,41 @@ +""" General handling of httpx-based exceptions + + - httpx-based clients are used to communicate with other backend services + - any exception raised by a httpx client will be handled here. +""" +import logging +from typing import Any + +from fastapi import Request, status +from httpx import HTTPError, TimeoutException + +from ._utils import create_error_json_response + +_logger = logging.getLogger(__file__) + + +async def handle_httpx_client_exceptions(request: Request, exc: Exception): + """ + Default httpx exception handler. + See https://www.python-httpx.org/exceptions/ + With this in place only HTTPStatusErrors need to be customized closer to the httpx client itself. + """ + assert request # nosec + assert isinstance(exc, HTTPError) + + status_code: Any + detail: str + headers: dict[str, str] = {} + if isinstance(exc, TimeoutException): + status_code = status.HTTP_504_GATEWAY_TIMEOUT + detail = f"Request to {exc.request.url.host.capitalize()} timed out" + else: + status_code = status.HTTP_502_BAD_GATEWAY + detail = f"{exc.request.url.host.capitalize()} service unexpectedly failed" + + if status_code >= status.HTTP_500_INTERNAL_SERVER_ERROR: + _logger.exception("%s. host=%s. %s", detail, exc.request.url.host, f"{exc}") + + return create_error_json_response( + f"{detail}", status_code=status_code, headers=headers + ) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_log_streaming_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_log_streaming_errors.py new file mode 100644 index 00000000000..31039186060 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_log_streaming_errors.py @@ -0,0 +1,24 @@ +from fastapi import status +from starlette.requests import Request +from starlette.responses import JSONResponse + +from ..log_streaming_errors import ( + LogStreamerNotRegisteredError, + LogStreamerRegistrationConflictError, + LogStreamingBaseError, +) +from ._utils import create_error_json_response + + +async def log_handling_error_handler(request: Request, exc: Exception) -> JSONResponse: + assert request # nosec + assert isinstance(exc, LogStreamingBaseError) + + msg = f"{exc}" + status_code: int = status.HTTP_500_INTERNAL_SERVER_ERROR + if isinstance(exc, LogStreamerNotRegisteredError): + status_code = status.HTTP_500_INTERNAL_SERVER_ERROR + elif isinstance(exc, LogStreamerRegistrationConflictError): + status_code = status.HTTP_409_CONFLICT + + return create_error_json_response(msg, status_code=status_code) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_utils.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_utils.py new file mode 100644 index 00000000000..da741fdb8b4 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_utils.py @@ -0,0 +1,27 @@ +from collections.abc import Awaitable, Callable +from typing import Any, TypeAlias + +from fastapi.encoders import jsonable_encoder +from fastapi.requests import Request +from fastapi.responses import JSONResponse + +from ...models.schemas.errors import ErrorGet + +ExceptionHandler: TypeAlias = Callable[ + [Request, BaseException], Awaitable[JSONResponse] +] + + +def create_error_json_response( + *errors: Any, status_code: int, **kwargs +) -> JSONResponse: + """ + Converts errors to Error response model defined in the OAS + """ + + error_model = ErrorGet(errors=list(errors)) + return JSONResponse( + content=jsonable_encoder(error_model), + status_code=status_code, + **kwargs, + ) diff --git a/services/api-server/src/simcore_service_api_server/exceptions/handlers/_validation_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_validation_errors.py new file mode 100644 index 00000000000..f7d5f0d7c93 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/handlers/_validation_errors.py @@ -0,0 +1,29 @@ +from fastapi import Request, status +from fastapi.exceptions import RequestValidationError +from fastapi.openapi.constants import REF_PREFIX +from fastapi.openapi.utils import validation_error_response_definition +from pydantic import ValidationError +from starlette.responses import JSONResponse + +from ._utils import create_error_json_response + + +async def http422_error_handler( + request: Request, + exc: Exception, +) -> JSONResponse: + assert request # nosec + assert isinstance(exc, RequestValidationError | ValidationError) + + return create_error_json_response( + *exc.errors(), status_code=status.HTTP_422_UNPROCESSABLE_ENTITY + ) + + +validation_error_response_definition["properties"] = { + "errors": { + "title": "Validation errors", + "type": "array", + "items": {"$ref": f"{REF_PREFIX}ValidationError"}, + }, +} diff --git a/services/api-server/src/simcore_service_api_server/exceptions/log_streaming_errors.py b/services/api-server/src/simcore_service_api_server/exceptions/log_streaming_errors.py new file mode 100644 index 00000000000..b971b63003e --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/log_streaming_errors.py @@ -0,0 +1,13 @@ +from ._base import ApiServerBaseError + + +class LogStreamingBaseError(ApiServerBaseError): + pass + + +class LogStreamerNotRegisteredError(LogStreamingBaseError): + msg_template = "{msg}" + + +class LogStreamerRegistrationConflictError(LogStreamingBaseError): + msg_template = "A stream was already connected to {job_id}. Only a single stream can be connected at the time" diff --git a/services/api-server/src/simcore_service_api_server/exceptions/service_errors_utils.py b/services/api-server/src/simcore_service_api_server/exceptions/service_errors_utils.py new file mode 100644 index 00000000000..90bdc27bad4 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/exceptions/service_errors_utils.py @@ -0,0 +1,199 @@ +# pylint: disable=dangerous-default-value +import asyncio +import logging +from collections.abc import Callable, Coroutine, Mapping +from contextlib import contextmanager +from functools import wraps +from inspect import signature +from typing import Any, Concatenate, NamedTuple, ParamSpec, TypeAlias, TypeVar + +import httpx +from fastapi import HTTPException, status +from pydantic import ValidationError +from servicelib.rabbitmq._errors import RemoteMethodNotRegisteredError + +from ..exceptions.backend_errors import BaseBackEndError +from ..models.schemas.errors import ErrorGet + +_logger = logging.getLogger(__name__) + +MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE = "Oops! Something went wrong, but we've noted it down and we'll sort it out ASAP. Thanks for your patience! [{}]" + +DEFAULT_BACKEND_SERVICE_STATUS_CODES: dict[int | str, dict[str, Any]] = { + status.HTTP_429_TOO_MANY_REQUESTS: { + "description": "Too many requests", + "model": ErrorGet, + }, + status.HTTP_500_INTERNAL_SERVER_ERROR: { + "description": "Internal server error", + "model": ErrorGet, + }, + status.HTTP_502_BAD_GATEWAY: { + "description": "Unexpected error when communicating with backend service", + "model": ErrorGet, + }, + status.HTTP_503_SERVICE_UNAVAILABLE: { + "description": "Service unavailable", + "model": ErrorGet, + }, + status.HTTP_504_GATEWAY_TIMEOUT: { + "description": "Request to a backend service timed out.", + "model": ErrorGet, + }, +} + + +ServiceHTTPStatus: TypeAlias = int +ApiHTTPStatus: TypeAlias = int + + +class ToApiTuple(NamedTuple): + status_code: ApiHTTPStatus + detail: Callable[[Any], str] | str | None = None + + +# service to public-api status maps +BackEndErrorType = TypeVar("BackEndErrorType", bound=BaseBackEndError) +RpcExceptionType = TypeVar( + "RpcExceptionType", bound=Exception +) # need more specific rpc exception base class +HttpStatusMap: TypeAlias = Mapping[ServiceHTTPStatus, BackEndErrorType] +RabbitMqRpcExceptionMap: TypeAlias = Mapping[RpcExceptionType, BackEndErrorType] + + +def _get_http_exception_kwargs( + service_name: str, + service_error: httpx.HTTPStatusError, + http_status_map: HttpStatusMap, + **exception_ctx: Any, +): + detail: str = "" + headers: dict[str, str] = {} + + if exception_type := http_status_map.get(service_error.response.status_code): + raise exception_type(**exception_ctx) + + if service_error.response.status_code in { + status.HTTP_429_TOO_MANY_REQUESTS, + status.HTTP_503_SERVICE_UNAVAILABLE, + status.HTTP_504_GATEWAY_TIMEOUT, + }: + status_code = service_error.response.status_code + detail = f"The {service_name} service was unavailable." + if retry_after := service_error.response.headers.get("Retry-After"): + headers["Retry-After"] = retry_after + else: + status_code = status.HTTP_502_BAD_GATEWAY + detail = f"Received unexpected response from {service_name}" + + if status_code >= status.HTTP_500_INTERNAL_SERVER_ERROR: + _logger.exception( + "Converted status code %s from %s service to status code %s", + f"{service_error.response.status_code}", + service_name, + f"{status_code}", + ) + return status_code, detail, headers + + +Self = TypeVar("Self") +P = ParamSpec("P") +R = TypeVar("R") + + +@contextmanager +def service_exception_handler( + service_name: str, + http_status_map: HttpStatusMap, + rpc_exception_map: RabbitMqRpcExceptionMap, + **context, +): + status_code: int + detail: str + headers: dict[str, str] = {} + + try: + + yield + + except ValidationError as exc: + detail = f"{service_name} service returned invalid response" + _logger.exception( + "Invalid data exchanged with %s service. %s", service_name, detail + ) + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, detail=detail, headers=headers + ) from exc + + except httpx.HTTPStatusError as exc: + + status_code, detail, headers = _get_http_exception_kwargs( + service_name, exc, http_status_map=http_status_map, **context + ) + raise HTTPException( + status_code=status_code, detail=detail, headers=headers + ) from exc + + except BaseException as exc: # currently no baseclass for rpc errors + if ( + type(exc) == asyncio.TimeoutError + ): # https://github.com/ITISFoundation/osparc-simcore/blob/master/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py#L76 + raise HTTPException( + status_code=status.HTTP_504_GATEWAY_TIMEOUT, + detail="Request to backend timed out", + ) from exc + if type(exc) in { + asyncio.exceptions.CancelledError, + RuntimeError, + RemoteMethodNotRegisteredError, + }: # https://github.com/ITISFoundation/osparc-simcore/blob/master/packages/service-library/src/servicelib/rabbitmq/_client_rpc.py#L76 + raise HTTPException( + status_code=status.HTTP_502_BAD_GATEWAY, + detail="Request to backend failed", + ) from exc + if backend_error_type := rpc_exception_map.get(type(exc)): + raise backend_error_type(**context) from exc + raise + + +def service_exception_mapper( + *, + service_name: str, + http_status_map: HttpStatusMap = {}, + rpc_exception_map: RabbitMqRpcExceptionMap = {}, +) -> Callable[ + [Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]], + Callable[Concatenate[Self, P], Coroutine[Any, Any, R]], +]: + def _decorator(member_func: Callable[Concatenate[Self, P], Coroutine[Any, Any, R]]): + _assert_correct_kwargs( + func=member_func, + exception_types=set(http_status_map.values()).union( + set(rpc_exception_map.values()) + ), + ) + + @wraps(member_func) + async def _wrapper(self: Self, *args: P.args, **kwargs: P.kwargs) -> R: + with service_exception_handler( + service_name, http_status_map, rpc_exception_map, **kwargs + ): + return await member_func(self, *args, **kwargs) + + return _wrapper + + return _decorator + + +def _assert_correct_kwargs(func: Callable, exception_types: set[BackEndErrorType]): + _required_kwargs = { + name + for name, param in signature(func).parameters.items() + if param.kind == param.KEYWORD_ONLY + } + for exc_type in exception_types: + assert isinstance(exc_type, type) # nosec + _exception_inputs = exc_type.named_fields() + assert _exception_inputs.issubset( + _required_kwargs + ), f"{_exception_inputs - _required_kwargs} are inputs to `{exc_type.__name__}.msg_template` but not a kwarg in the decorated coroutine `{func.__module__}.{func.__name__}`" # nosec diff --git a/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py b/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py new file mode 100644 index 00000000000..5e0e3c57120 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/_utils_pydantic.py @@ -0,0 +1,42 @@ +from copy import deepcopy + +from pydantic import GetJsonSchemaHandler +from pydantic.json_schema import JsonSchemaValue +from pydantic_core.core_schema import CoreSchema + + +class UriSchema: + """Metadata class to modify openapi schemas of Url fields + + Usage: + class TestModel(BaseModel): + url: Annotated[HttpUrl, UriSchema()] + + + will produce a schema for url field property as a string with a format + { + "format": "uri", + "type": "string", + } + + SEE https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#section-7.3.5 + """ + + @classmethod + def __get_pydantic_json_schema__( + cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + # SEE https://docs.pydantic.dev/2.10/concepts/json_schema/#implementing-__get_pydantic_json_schema__ + json_schema = deepcopy(handler(core_schema)) + + if (schema := core_schema.get("schema", {})) and schema.get("type") == "url": + json_schema.update( + type="string", + format="uri", + ) + if max_length := schema.get("max_length"): + # SEE https://docs.pydantic.dev/2.10/api/networks/#pydantic.networks.UrlConstraints + # adds limits if schema UrlConstraints includes it (e.g HttUrl includes ) + json_schema.update(maxLength=max_length, minLength=1) + + return json_schema diff --git a/services/api-server/src/simcore_service_api_server/models/api_resources.py b/services/api-server/src/simcore_service_api_server/models/api_resources.py index 44491e2c51f..939012bbf57 100644 --- a/services/api-server/src/simcore_service_api_server/models/api_resources.py +++ b/services/api-server/src/simcore_service_api_server/models/api_resources.py @@ -1,9 +1,11 @@ +import re import urllib.parse -from typing import Any +from typing import Annotated, TypeAlias -from pydantic import BaseModel, Field, constr +from pydantic import Field, TypeAdapter +from pydantic.types import StringConstraints -# RESOURCE NAMES https://cloud.google.com/apis/design/resource_names +# RESOURCE NAMES https://google.aip.dev/122 # # # API Service Name Collection ID Resource ID Collection ID Resource ID @@ -20,46 +22,72 @@ # "//calendar.googleapis.com/users/john smith/events/123" # # This is the corresponding HTTP URL. -# "https://calendar.googleapis.com/v3/users/john%20smith/events/123" # # SEE https://tools.ietf.org/html/rfc3986#appendix-B # -RELATIVE_RESOURCE_NAME_RE = r"^([^\s/]+/?)+$" -RelativeResourceName = constr(regex=RELATIVE_RESOURCE_NAME_RE) +_RELATIVE_RESOURCE_NAME_RE = r"^([^\s/]+/?){1,10}$" + + +RelativeResourceName: TypeAlias = Annotated[ + str, StringConstraints(pattern=_RELATIVE_RESOURCE_NAME_RE), Field(frozen=True) +] + # NOTE: we quote parts in a single resource_name and unquote when split def parse_last_resource_id(resource_name: RelativeResourceName) -> str: - match = RelativeResourceName.regex.match(resource_name) - last_quoted_part = match.group(1) - return urllib.parse.unquote_plus(last_quoted_part) + if match := re.match(_RELATIVE_RESOURCE_NAME_RE, resource_name): + last_quoted_part = match.group(1) + return urllib.parse.unquote_plus(last_quoted_part) + msg = f"Invalid '{resource_name=}' does not match RelativeResourceName" + raise ValueError(msg) def compose_resource_name(*collection_or_resource_ids) -> RelativeResourceName: quoted_parts = [ - urllib.parse.quote_plus(str(_id).lstrip("/")) + urllib.parse.quote_plus(f"{_id}".lstrip("/")) for _id in collection_or_resource_ids ] - return RelativeResourceName("/".join(quoted_parts)) + return TypeAdapter(RelativeResourceName).validate_python("/".join(quoted_parts)) -def split_resource_name(resource_name: RelativeResourceName) -> list[str]: +def split_resource_name(resource_name: RelativeResourceName) -> tuple[str, ...]: + """ + Example: + resource_name = "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs/output+22" + returns ("solvers", "simcore/services/comp/isolve", "releases", "1.3.4", "jobs", "f622946d-fd29-35b9-a193-abdd1095167c", "outputs", "output 22") + """ quoted_parts = resource_name.split("/") - return [f"{urllib.parse.unquote_plus(p)}" for p in quoted_parts] + return tuple(f"{urllib.parse.unquote_plus(p)}" for p in quoted_parts) -# -# For resource definitions, the first field should be a string field for the resource name, -# and it should be called *name* -# Resource IDs must be clearly documented whether they are assigned by the client, the server, or either -# -class BaseResource(BaseModel): - name: RelativeResourceName = Field(None, example="solvers/isolve/releases/1.2.3") - id: Any = Field(None, description="Resource ID", example="1.2.3") +def parse_collections_ids(resource_name: RelativeResourceName) -> tuple[str, ...]: + """ + Example: + resource_name = "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs/output+22" + returns ("solvers", "releases", "jobs", "outputs") + """ + parts = split_resource_name(resource_name) + return parts[::2] + + +def parse_resources_ids(resource_name: RelativeResourceName) -> tuple[str, ...]: + """ + Example: + resource_name = "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs/output+22" + returns ("simcore/services/comp/isolve", "1.3.4", "f622946d-fd29-35b9-a193-abdd1095167c", "output 22") + """ + parts = split_resource_name(resource_name) + return parts[1::2] -class BaseCollection(BaseModel): - name: RelativeResourceName = Field(None, example="solvers/isolve/releases") - id: Any = Field(None, description="Collection ID", example="releases") +def split_resource_name_as_dict( + resource_name: RelativeResourceName, +) -> dict[str, str | None]: + """ + Returns a map such as resource_ids[Collection-ID] == Resource-ID + """ + parts = split_resource_name(resource_name) + return dict(zip(parts[::2], parts[1::2], strict=False)) diff --git a/services/api-server/src/simcore_service_api_server/models/basic_types.py b/services/api-server/src/simcore_service_api_server/models/basic_types.py index ab6baffe1d1..df9661bf039 100644 --- a/services/api-server/src/simcore_service_api_server/models/basic_types.py +++ b/services/api-server/src/simcore_service_api_server/models/basic_types.py @@ -1,4 +1,20 @@ -from models_library.basic_regex import VERSION_RE -from pydantic import constr +from typing import Annotated, NamedTuple, TypeAlias -VersionStr = constr(strip_whitespace=True, regex=VERSION_RE) # as M.m.p +from fastapi.responses import StreamingResponse +from models_library.basic_regex import SIMPLE_VERSION_RE +from pydantic import StringConstraints + +VersionStr: TypeAlias = Annotated[ + str, StringConstraints(strip_whitespace=True, pattern=SIMPLE_VERSION_RE) +] + +FileNameStr: TypeAlias = Annotated[str, StringConstraints(strip_whitespace=True)] + + +class LogStreamingResponse(StreamingResponse): + media_type = "application/x-ndjson" + + +class NameValueTuple(NamedTuple): + name: str + value: str diff --git a/services/api-server/src/simcore_service_api_server/models/config.py b/services/api-server/src/simcore_service_api_server/models/config.py deleted file mode 100644 index 16220e43862..00000000000 --- a/services/api-server/src/simcore_service_api_server/models/config.py +++ /dev/null @@ -1,6 +0,0 @@ -from ..utils.serialization import json_dumps, json_loads - - -class BaseConfig: - json_loads = json_loads - json_dumps = json_dumps diff --git a/services/api-server/src/simcore_service_api_server/models/domain/api_keys.py b/services/api-server/src/simcore_service_api_server/models/domain/api_keys.py deleted file mode 100644 index d0452bb54ad..00000000000 --- a/services/api-server/src/simcore_service_api_server/models/domain/api_keys.py +++ /dev/null @@ -1,15 +0,0 @@ -from pydantic import BaseModel, Field, SecretStr - - -class ApiKey(BaseModel): - api_key: str - api_secret: SecretStr - - -class ApiKeyInDB(ApiKey): - id_: int = Field(0, alias="id") - display_name: str - user_id: int - - class Config: - orm_mode = True diff --git a/services/api-server/src/simcore_service_api_server/models/domain/files.py b/services/api-server/src/simcore_service_api_server/models/domain/files.py new file mode 100644 index 00000000000..82b73cb4456 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/domain/files.py @@ -0,0 +1,147 @@ +from pathlib import Path +from typing import Annotated +from urllib.parse import quote as _quote +from urllib.parse import unquote as _unquote +from uuid import UUID, uuid3 + +import aiofiles +from fastapi import UploadFile +from models_library.api_schemas_storage.storage_schemas import ETag +from models_library.basic_types import SHA256Str +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, StorageFileID +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StringConstraints, + TypeAdapter, +) +from servicelib.file_utils import create_sha256_checksum + +FileName = Annotated[str, StringConstraints(strip_whitespace=True)] +NAMESPACE_FILEID_KEY = UUID("aa154444-d22d-4290-bb15-df37dba87865") + + +class FileInProgramJobData(BaseModel): + """Represents a file stored on the client side""" + + project_id: Annotated[ProjectID, Field(..., description="Project identifier")] + node_id: Annotated[NodeID, Field(..., description="Node identifier")] + workspace_path: Annotated[ + Path, + StringConstraints(pattern=r"^workspace/.*"), + Field(..., description="File path within the workspace"), + ] + + +class File(BaseModel): + """Represents a file stored on the server side i.e. a unique reference to a file in the cloud.""" + + # WARNING: from pydantic import File as FileParam + # NOTE: see https://ant.apache.org/manual/Tasks/checksum.html + + id: UUID = Field(..., description="Resource identifier") + + filename: str = Field(..., description="Name of the file with extension") + content_type: str | None = Field( + default=None, + description="Guess of type content [EXPERIMENTAL]", + validate_default=True, + ) + sha256_checksum: SHA256Str | None = Field( + default=None, + description="SHA256 hash of the file's content", + alias="checksum", # alias for backwards compatibility + ) + e_tag: ETag | None = Field(default=None, description="S3 entity tag") + program_job_file_path: Annotated[ + FileInProgramJobData | None, + Field( + ..., + description="Destination information in case the file is uploaded directly to a program job", + ), + ] = None + + model_config = ConfigDict( + populate_by_name=True, + json_schema_extra={ + "examples": [ + # complete + { + "id": "f0e1fb11-208d-3ed2-b5ef-cab7a7398f78", + "filename": "Architecture-of-Scalable-Distributed-ETL-System-whitepaper.pdf", + "content_type": "application/pdf", + "checksum": "1a512547e3ce3427482da14e8c914ecf61da76ad5f749ff532efe906e6bba128", + }, + # minimum + { + "id": "f0e1fb11-208d-3ed2-b5ef-cab7a7398f78", + "filename": "whitepaper.pdf", + }, + ] + }, + ) + + @classmethod + async def create_from_path(cls, path: Path) -> "File": + async with aiofiles.open(path, mode="rb") as file: + sha256check = await create_sha256_checksum(file) + + return cls( + id=cls.create_id(sha256check, path.name), + filename=path.name, + checksum=SHA256Str(sha256check), + ) + + @classmethod + async def create_from_file_link(cls, s3_object_path: str, e_tag: str) -> "File": + filename = Path(s3_object_path).name + return cls( + id=cls.create_id(e_tag, filename), + filename=filename, + e_tag=e_tag, + ) + + @classmethod + async def create_from_uploaded( + cls, file: UploadFile, *, file_size=None, created_at=None + ) -> "File": + sha256check = await create_sha256_checksum(file) + # WARNING: UploadFile wraps a stream and wil checkt its cursor position: file.file.tell() != 0 + # WARNING: await file.seek(0) might introduce race condition if not done carefuly + + return cls( + id=cls.create_id(sha256check or file_size, file.filename, created_at), + filename=file.filename or "Undefined", + content_type=file.content_type, + checksum=SHA256Str(sha256check), + ) + + @classmethod + async def create_from_quoted_storage_id(cls, quoted_storage_id: str) -> "File": + storage_file_id: StorageFileID = TypeAdapter(StorageFileID).validate_python( + _unquote(quoted_storage_id) + ) + _, fid, fname = Path(storage_file_id).parts + return cls(id=UUID(fid), filename=fname, checksum=None) + + @classmethod + def create_id(cls, *keys) -> UUID: + return uuid3(NAMESPACE_FILEID_KEY, ":".join(map(str, keys))) + + @property + def storage_file_id(self) -> StorageFileID: + """Get the StorageFileId associated with this file""" + if program_path := self.program_job_file_path: + return TypeAdapter(StorageFileID).validate_python( + f"{program_path.project_id}/{program_path.node_id}/{program_path.workspace_path}" + ) + return TypeAdapter(StorageFileID).validate_python( + f"api/{self.id}/{self.filename}" + ) + + @property + def quoted_storage_file_id(self) -> str: + """Quoted version of the StorageFileId""" + return _quote(self.storage_file_id, safe="") diff --git a/services/api-server/src/simcore_service_api_server/models/domain/groups.py b/services/api-server/src/simcore_service_api_server/models/domain/groups.py index 2784d356365..59e253e6cf1 100644 --- a/services/api-server/src/simcore_service_api_server/models/domain/groups.py +++ b/services/api-server/src/simcore_service_api_server/models/domain/groups.py @@ -1,15 +1,13 @@ -from typing import Optional - from pydantic import BaseModel, Field class UsersGroup(BaseModel): - gid: str + gid: str = Field(..., coerce_numbers_to_str=True) label: str - description: Optional[str] = None + description: str | None = None class Groups(BaseModel): me: UsersGroup - organizations: Optional[list[UsersGroup]] = [] + organizations: list[UsersGroup] | None = [] all_: UsersGroup = Field(..., alias="all") diff --git a/services/api-server/src/simcore_service_api_server/models/domain/projects.py b/services/api-server/src/simcore_service_api_server/models/domain/projects.py index d5c9afd786b..ae74533546b 100644 --- a/services/api-server/src/simcore_service_api_server/models/domain/projects.py +++ b/services/api-server/src/simcore_service_api_server/models/domain/projects.py @@ -1,35 +1,19 @@ -# from datetime import datetime -# from typing import Optional +# mypy: disable-error-code=truthy-function -from models_library.projects import AccessRights, Node, Project, StudyUI -from models_library.projects_nodes import InputTypes, OutputTypes +from models_library.projects_access import AccessRights +from models_library.projects_nodes import InputTypes, Node, OutputTypes from models_library.projects_nodes_io import SimCoreFileLink -from ...utils.serialization import json_dumps, json_loads +assert AccessRights # nosec +assert InputTypes # nosec +assert Node # nosec +assert OutputTypes # nosec +assert SimCoreFileLink # nosec - -class NewProjectIn(Project): - """Web-server API model in body for create_project""" - - # - uuid - # - name - # - description - # - prjOwner - # - accessRights - # - creationDate - # - lastChangeDate - # - thumbnail - # - workbench - class Config: - json_loads = json_loads - json_dumps = json_dumps - - -__all__ = [ +__all__: tuple[str, ...] = ( "AccessRights", - "Node", - "StudyUI", "InputTypes", + "Node", "OutputTypes", "SimCoreFileLink", -] +) diff --git a/services/api-server/src/simcore_service_api_server/models/pagination.py b/services/api-server/src/simcore_service_api_server/models/pagination.py new file mode 100644 index 00000000000..1c00ecccf42 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/pagination.py @@ -0,0 +1,96 @@ +"""Overrides models in fastapi_pagination + +Usage: + from fastapi_pagination.api import create_page + from ...models.pagination import LimitOffsetPage, LimitOffsetParams + +""" + +from collections.abc import Sequence +from typing import Generic, TypeAlias, TypeVar + +from fastapi import Query +from fastapi_pagination.customization import CustomizedPage, UseName, UseParamsFields +from fastapi_pagination.links import LimitOffsetPage as _LimitOffsetPage +from models_library.rest_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, + MINIMUM_NUMBER_OF_ITEMS_PER_PAGE, +) +from pydantic import ( + BaseModel, + ConfigDict, + Field, + NonNegativeInt, + ValidationInfo, + field_validator, +) + +T = TypeVar("T") + +Page = CustomizedPage[ + _LimitOffsetPage[T], + # Customizes the default and maximum to fit those of the web-server. It simplifies interconnection + UseParamsFields( + limit=Query( + # NOTE: in sync with PageLimitInt + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + ge=MINIMUM_NUMBER_OF_ITEMS_PER_PAGE, + le=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, + description="Page size limit", + ) + ), + # Renames class for the openapi.json to make the python-client's name models shorter + UseName(name="Page"), +] + +PaginationParams: TypeAlias = Page.__params_type__ # type: ignore + + +class OnePage(BaseModel, Generic[T]): + """ + A single page is used to envelope a small sequence that does not require + pagination + + If total > MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, we should consider extending this + entrypoint to proper pagination + """ + + items: Sequence[T] + total: NonNegativeInt | None = Field(default=None, validate_default=True) + + @field_validator("total", mode="before") + @classmethod + def _check_total(cls, v, info: ValidationInfo): + items = info.data.get("items", []) + if v is None: + return len(items) + + if v != len(items): + msg = f"In one page total:{v} == len(items):{len(items)}" + raise ValueError(msg) + + return v + + model_config = ConfigDict( + frozen=True, + json_schema_extra={ + "examples": [ + { + "total": 1, + "items": ["one"], + }, + { + "items": ["one"], + }, + ], + }, + ) + + +__all__: tuple[str, ...] = ( + "MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE", + "OnePage", + "Page", + "PaginationParams", +) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/_base.py b/services/api-server/src/simcore_service_api_server/models/schemas/_base.py new file mode 100644 index 00000000000..07144ba5b76 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/_base.py @@ -0,0 +1,85 @@ +import urllib.parse +from typing import Annotated + +import packaging.version +from models_library.utils.change_case import camel_to_snake +from models_library.utils.common_validators import trim_string_before +from pydantic import BaseModel, ConfigDict, Field, HttpUrl, StringConstraints + +from ...models._utils_pydantic import UriSchema +from ..basic_types import VersionStr + + +class ApiServerOutputSchema(BaseModel): + model_config = ConfigDict( + alias_generator=camel_to_snake, + populate_by_name=True, + extra="ignore", # Used to prune extra fields from internal data + frozen=False, + ) + + +class ApiServerInputSchema(BaseModel): + model_config = ConfigDict( + alias_generator=camel_to_snake, + populate_by_name=True, + extra="ignore", # Used to prune extra fields from internal data + frozen=True, + ) + + +class BaseService(BaseModel): + id: Annotated[ + str, + Field(description="Resource identifier"), + ] + version: Annotated[ + VersionStr, + Field(description="Semantic version number of the resource"), + ] + title: Annotated[ + str, + trim_string_before(max_length=100), + StringConstraints(max_length=100), + Field(description="Human readable name"), + ] + description: Annotated[ + str | None, + StringConstraints( + # NOTE: Place `StringConstraints` before `trim_string_before` for valid OpenAPI schema due to a Pydantic limitation. + # SEE `test_trim_string_before_with_string_constraints` + max_length=1000 + ), + trim_string_before(max_length=1000), + Field(default=None, description="Description of the resource"), + ] + + url: Annotated[ + HttpUrl | None, + UriSchema(), + Field(description="Link to get this resource"), + ] + + @property + def pep404_version(self) -> packaging.version.Version: + """Rich version type that can be used e.g. to compare""" + return packaging.version.parse(self.version) + + @property + def url_friendly_id(self) -> str: + """Use to pass id as parameter in URLs""" + return urllib.parse.quote_plus(self.id) + + @property + def resource_name(self) -> str: + """Relative resource name""" + return self.compose_resource_name(self.id, self.version) + + @property + def name(self) -> str: + """API standards notation (see api_resources.py)""" + return self.resource_name + + @classmethod + def compose_resource_name(cls, key: str, version: str) -> str: + raise NotImplementedError("Subclasses must implement this method") diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/errors.py b/services/api-server/src/simcore_service_api_server/models/schemas/errors.py new file mode 100644 index 00000000000..3243f5e44b9 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/errors.py @@ -0,0 +1,23 @@ +from typing import Any + +from pydantic import BaseModel, ConfigDict + + +class ErrorGet(BaseModel): + # We intentionally keep it open until more restrictive policy is implemented + # Check use cases: + # - https://github.com/ITISFoundation/osparc-issues/issues/958 + # - https://github.com/ITISFoundation/osparc-simcore/issues/2520 + # - https://github.com/ITISFoundation/osparc-simcore/issues/2446 + errors: list[Any] + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "errors": [ + "some error message", + "another error message", + ] + } + } + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/files.py b/services/api-server/src/simcore_service_api_server/models/schemas/files.py index 3456eaf97ec..ebfee726adb 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/files.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/files.py @@ -1,42 +1,88 @@ +import datetime from mimetypes import guess_type -from pathlib import Path -from typing import Optional -from uuid import UUID, uuid3 - -import aiofiles -from fastapi import UploadFile -from pydantic import BaseModel, Field, validator - -from ...utils.hash import create_md5_checksum - -NAMESPACE_FILEID_KEY = UUID("aa154444-d22d-4290-bb15-df37dba87865") +from typing import Annotated +from uuid import UUID + +from models_library.api_schemas_storage.storage_schemas import ETag +from models_library.basic_types import SHA256Str +from pydantic import ( + AnyHttpUrl, + BaseModel, + ConfigDict, + Field, + NonNegativeInt, + ValidationInfo, + field_validator, +) + +from .._utils_pydantic import UriSchema +from ..domain.files import File as DomainFile +from ..domain.files import FileName +from ._base import ApiServerInputSchema, ApiServerOutputSchema + + +class UserFile(ApiServerInputSchema): + """Represents a file stored on the client side""" + + filename: Annotated[FileName, Field(..., description="File name")] + filesize: Annotated[NonNegativeInt, Field(..., description="File size in bytes")] + sha256_checksum: Annotated[SHA256Str, Field(..., description="SHA256 checksum")] + + def to_domain_model( + self, + file_id: UUID | None = None, + ) -> DomainFile: + return DomainFile( + id=( + file_id + if file_id + else DomainFile.create_id( + self.filesize, + self.filename, + datetime.datetime.now(datetime.UTC).isoformat(), + ) + ), + filename=self.filename, + checksum=self.sha256_checksum, + program_job_file_path=None, + ) -class File(BaseModel): +class File(ApiServerOutputSchema): """Represents a file stored on the server side i.e. a unique reference to a file in the cloud.""" - # WARNING: from pydantic import File as FileParam - # NOTE: see https://ant.apache.org/manual/Tasks/checksum.html - - id: UUID = Field(..., description="Resource identifier") - - filename: str = Field(..., description="Name of the file with extension") - content_type: Optional[str] = Field( - default=None, description="Guess of type content [EXPERIMENTAL]" - ) - checksum: Optional[str] = Field( - None, description="MD5 hash of the file's content [EXPERIMENTAL]" + id: Annotated[UUID, Field(..., description="Resource identifier")] + filename: Annotated[str, Field(..., description="Name of the file with extension")] + content_type: Annotated[ + str | None, + Field( + default=None, + description="Guess of type content [EXPERIMENTAL]", + validate_default=True, + ), + ] = None + sha256_checksum: Annotated[ + SHA256Str | None, + Field( + default=None, + description="SHA256 hash of the file's content", + alias="checksum", # alias for backwards compatibility + ), + ] = None + e_tag: Annotated[ETag | None, Field(default=None, description="S3 entity tag")] = ( + None ) - class Config: - schema_extra = { + model_config = ConfigDict( + populate_by_name=True, + json_schema_extra={ "examples": [ # complete { "id": "f0e1fb11-208d-3ed2-b5ef-cab7a7398f78", "filename": "Architecture-of-Scalable-Distributed-ETL-System-whitepaper.pdf", "content_type": "application/pdf", - "checksum": "de47d0e1229aa2dfb80097389094eadd-1", + "checksum": "1a512547e3ce3427482da14e8c914ecf61da76ad5f749ff532efe906e6bba128", }, # minimum { @@ -44,58 +90,41 @@ class Config: "filename": "whitepaper.pdf", }, ] - } + }, + ) - @validator("content_type", always=True, pre=True) + @field_validator("content_type", mode="before") @classmethod - def guess_content_type(cls, v, values): + def guess_content_type(cls, v, info: ValidationInfo): if v is None: - filename = values.get("filename") + filename = info.data.get("filename") if filename: mime_content_type, _ = guess_type(filename, strict=False) return mime_content_type return v @classmethod - async def create_from_path(cls, path: Path) -> "File": - async with aiofiles.open(path, mode="rb") as file: - md5check = await create_md5_checksum(file) - + def from_domain_model(cls, file: DomainFile) -> "File": return cls( - id=cls.create_id(md5check, path.name), - filename=path.name, - checksum=md5check, + id=file.id, + filename=file.filename, + content_type=file.content_type, + sha256_checksum=file.sha256_checksum, + e_tag=file.e_tag, ) - @classmethod - async def create_from_file_link(cls, s3_object_path: str, e_tag: str) -> "File": - filename = Path(s3_object_path).name - return cls( - id=cls.create_id(e_tag, filename), - filename=filename, - checksum=e_tag, - ) - @classmethod - async def create_from_uploaded( - cls, file: UploadFile, *, file_size=None, created_at=None - ) -> "File": - """ - If use_md5=True, then checksum if fi - """ - md5check = None - if not file_size: - md5check = await create_md5_checksum(file) - # WARNING: UploadFile wraps a stream and wil checkt its cursor position: file.file.tell() != 0 - # WARNING: await file.seek(0) might introduce race condition if not done carefuly +class UploadLinks(BaseModel): + abort_upload: str + complete_upload: str - return cls( - id=cls.create_id(md5check or file_size, file.filename, created_at), - filename=file.filename, - content_type=file.content_type, - checksum=md5check, - ) - @classmethod - def create_id(cls, *keys) -> UUID: - return uuid3(NAMESPACE_FILEID_KEY, ":".join(map(str, keys))) +class FileUploadData(BaseModel): + chunk_size: NonNegativeInt + urls: list[Annotated[AnyHttpUrl, UriSchema()]] + links: UploadLinks + + +class ClientFileUploadData(BaseModel): + file_id: UUID = Field(..., description="The file resource id") + upload_schema: FileUploadData = Field(..., description="Schema for uploading file") diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py b/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py index 49af4373530..86abb0a8741 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/jobs.py @@ -1,44 +1,113 @@ +import datetime import hashlib -from datetime import datetime -from enum import Enum -from typing import Optional, Type, Union +import logging +from collections.abc import Callable +from pathlib import Path +from typing import Annotated, TypeAlias from uuid import UUID, uuid4 -from pydantic import BaseModel, Field, HttpUrl, conint, validator +from models_library.basic_types import SHA256Str +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.services_types import FileName +from pydantic import ( + BaseModel, + ConfigDict, + Field, + HttpUrl, + NonNegativeInt, + PositiveInt, + StrictBool, + StrictFloat, + StrictInt, + StringConstraints, + TypeAdapter, + ValidationError, +) +from servicelib.logging_utils import LogLevelInt, LogMessageStr +from starlette.datastructures import Headers -from ...models.config import BaseConfig -from ...models.schemas.files import File -from ...models.schemas.solvers import Solver +from ...models.schemas.files import File, UserFile +from .._utils_pydantic import UriSchema from ..api_resources import ( RelativeResourceName, compose_resource_name, split_resource_name, ) +from ..basic_types import VersionStr +from ..domain.files import File as DomainFile +from ..domain.files import FileInProgramJobData +from ..schemas.files import UserFile +from ._base import ApiServerInputSchema + +# JOB SUB-RESOURCES ---------- +# +# - Wrappers for input/output values +# - Input/outputs are defined in service metadata +# - custom metadata +# +from .programs import Program, ProgramKeyId +from .solvers import Solver -# FIXME: all ints and bools will be floats -# TODO: evaluate how coupled is this to InputTypes/OUtputTypes -ArgumentType = Union[File, float, int, bool, str, None] -KeywordArguments = dict[str, ArgumentType] -PositionalArguments = list[ArgumentType] +JobID: TypeAlias = UUID + +# ArgumentTypes are types used in the job inputs (see ResultsTypes) +ArgumentTypes: TypeAlias = ( + File | StrictFloat | StrictInt | StrictBool | str | list | None +) +KeywordArguments: TypeAlias = dict[str, ArgumentTypes] +PositionalArguments: TypeAlias = list[ArgumentTypes] -def compute_checksum(kwargs: KeywordArguments): +def _compute_keyword_arguments_checksum(kwargs: KeywordArguments): _dump_str = "" for key in sorted(kwargs.keys()): value = kwargs[key] if isinstance(value, File): - value = compute_checksum(value.dict()) + value = _compute_keyword_arguments_checksum(value.model_dump()) else: value = str(value) _dump_str += f"{key}:{value}" return hashlib.sha256(_dump_str.encode("utf-8")).hexdigest() -# JOB INPUTS/OUTPUTS ---------- -# -# - Wrappers for input/output values -# - Input/outputs are defined in service metadata -# +class UserFileToProgramJob(ApiServerInputSchema): + filename: Annotated[FileName, Field(..., description="File name")] + filesize: Annotated[NonNegativeInt, Field(..., description="File size in bytes")] + sha256_checksum: Annotated[SHA256Str, Field(..., description="SHA256 checksum")] + program_key: Annotated[ProgramKeyId, Field(..., description="Program identifier")] + program_version: Annotated[VersionStr, Field(..., description="Program version")] + job_id: Annotated[JobID, Field(..., description="Job identifier")] + workspace_path: Annotated[ + Path, + StringConstraints(pattern=r"^workspace/.*"), + Field( + ..., + description="The file's relative path within the job's workspace directory. E.g. 'workspace/myfile.txt'", + ), + ] + + def to_domain_model(self, *, project_id: ProjectID, node_id: NodeID) -> DomainFile: + return DomainFile( + id=DomainFile.create_id( + self.filesize, + self.filename, + datetime.datetime.now(datetime.UTC).isoformat(), + ), + filename=self.filename, + checksum=self.sha256_checksum, + program_job_file_path=FileInProgramJobData( + project_id=project_id, + node_id=node_id, + workspace_path=self.workspace_path, + ), + ) + + +assert set(UserFile.model_fields.keys()).issubset( + set(UserFileToProgramJob.model_fields.keys()) +) # nosec class JobInputs(BaseModel): @@ -47,43 +116,42 @@ class JobInputs(BaseModel): # TODO: gibt es platz fuer metadata? - class Config(BaseConfig): - frozen = True - allow_mutation = False - schema_extra = { + model_config = ConfigDict( + frozen=True, + json_schema_extra={ "example": { "values": { "x": 4.33, "n": 55, "title": "Temperature", "enabled": True, - "input_file": File( - filename="input.txt", id="0a3b2c56-dbcd-4871-b93b-d454b7883f9f" - ), + "input_file": { + "filename": "input.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f", + }, } } - } + }, + ) def compute_checksum(self): - return compute_checksum(self.values) + return _compute_keyword_arguments_checksum(self.values) class JobOutputs(BaseModel): # TODO: JobOutputs is a resources! - job_id: UUID = Field(..., description="Job that produced this output") + job_id: JobID = Field(..., description="Job that produced this output") # TODO: an output could be computed before than the others? has a state? not-ready/ready? results: KeywordArguments # TODO: an error might have occurred at the level of the job, i.e. affects all outputs, or only # on one specific output. - # errors: list[JobErrors] = [] - class Config(BaseConfig): - frozen = True - allow_mutation = False - schema_extra = { + model_config = ConfigDict( + frozen=True, + json_schema_extra={ "example": { "job_id": "99d9ac65-9f10-4e2f-a433-b5e412bb037b", "results": { @@ -91,16 +159,65 @@ class Config(BaseConfig): "n": 55, "title": "Specific Absorption Rate", "enabled": False, - "output_file": File( - filename="sar_matrix.txt", - id="0a3b2c56-dbcd-4871-b93b-d454b7883f9f", - ), + "output_file": { + "filename": "sar_matrix.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f", + }, }, } - } + }, + ) def compute_results_checksum(self): - return compute_checksum(self.results) + return _compute_keyword_arguments_checksum(self.results) + + +# Limits metadata values +MetaValueType: TypeAlias = StrictBool | StrictInt | StrictFloat | str + + +class JobMetadataUpdate(BaseModel): + metadata: dict[str, MetaValueType] = Field( + default_factory=dict, description="Custom key-value map" + ) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "metadata": { + "bool": "true", + "int": "42", + "float": "3.14", + "str": "hej med dig", + } + } + } + ) + + +class JobMetadata(BaseModel): + job_id: JobID = Field(..., description="Parent Job") + metadata: dict[str, MetaValueType] = Field(..., description="Custom key-value map") + + # Links + url: Annotated[HttpUrl, UriSchema()] | None = Field( + ..., description="Link to get this resource (self)" + ) + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "job_id": "3497e4de-0e69-41fb-b08f-7f3875a1ac4b", + "metadata": { + "bool": "true", + "int": "42", + "float": "3.14", + "str": "hej med dig", + }, + "url": "https://f02b2452-1dd8-4882-b673-af06373b41b3.fake", + } + } + ) # JOBS ---------- @@ -119,11 +236,11 @@ def compute_results_checksum(self): class Job(BaseModel): - id: UUID + id: JobID name: RelativeResourceName inputs_checksum: str = Field(..., description="Input's checksum") - created_at: datetime = Field(..., description="Job creation timestamp") + created_at: datetime.datetime = Field(..., description="Job creation timestamp") # parent runner_name: RelativeResourceName = Field( @@ -131,41 +248,38 @@ class Job(BaseModel): ) # Get links to other resources - url: Optional[HttpUrl] = Field(..., description="Link to get this resource (self)") - runner_url: Optional[HttpUrl] = Field( + url: Annotated[HttpUrl, UriSchema()] | None = Field( + ..., description="Link to get this resource (self)" + ) + runner_url: Annotated[HttpUrl, UriSchema()] | None = Field( ..., description="Link to the solver's job (parent collection)" ) - outputs_url: Optional[HttpUrl] = Field( - ..., description="Link to the job outputs (sub-collection" + outputs_url: Annotated[HttpUrl, UriSchema()] | None = Field( + ..., description="Link to the job outputs (sub-collection)" ) - class Config(BaseConfig): - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "id": "f622946d-fd29-35b9-a193-abdd1095167c", "name": "solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", "runner_name": "solvers/isolve/releases/1.3.4", "inputs_checksum": "12345", "created_at": "2021-01-22T23:59:52.322176", - "url": "https://api.osparc.io/v0/jobs/f622946d-fd29-35b9-a193-abdd1095167c", + "url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", "runner_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4", - "outputs_url": "https://api.osparc.io/v0/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs", + "outputs_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs", } } - - @validator("name", pre=True) - @classmethod - def check_name(cls, v, values): - _id = str(values["id"]) - if not v.endswith(f"/{_id}"): - raise ValueError(f"Resource name [{v}] and id [{_id}] do not match") - return v + ) # constructors ------ @classmethod def create_now( - cls, parent_name: RelativeResourceName, inputs_checksum: str + cls, + parent_name: RelativeResourceName, + inputs_checksum: str, ) -> "Job": global_uuid = uuid4() @@ -174,26 +288,34 @@ def create_now( id=global_uuid, runner_name=parent_name, inputs_checksum=inputs_checksum, - created_at=datetime.utcnow(), + created_at=datetime.datetime.now(tz=datetime.UTC), url=None, runner_url=None, outputs_url=None, ) @classmethod - def create_solver_job(cls, *, solver: Solver, inputs: JobInputs): - job = Job.create_now( - parent_name=solver.name, inputs_checksum=inputs.compute_checksum() + def create_job_from_solver_or_program( + cls, *, solver_or_program_name: str, inputs: JobInputs + ): + return Job.create_now( + parent_name=solver_or_program_name, + inputs_checksum=inputs.compute_checksum(), ) - return job @classmethod def compose_resource_name( cls, parent_name: RelativeResourceName, job_id: UUID - ) -> str: + ) -> RelativeResourceName: + assert "jobs" not in parent_name # nosec + # CAREFUL, this is not guarantee a UNIQUE identifier since the resource # could have some alias entrypoints and the wrong parent_name might be introduced here - collection_or_resource_ids = split_resource_name(parent_name) + ["jobs", job_id] + collection_or_resource_ids = [ + *split_resource_name(parent_name), + "jobs", + f"{job_id}", + ] return compose_resource_name(*collection_or_resource_ids) @property @@ -202,20 +324,45 @@ def resource_name(self) -> str: return self.name -# TODO: these need to be in sync with computational task states -class TaskStates(str, Enum): - UNKNOWN = "UNKNOWN" - PUBLISHED = "PUBLISHED" - NOT_STARTED = "NOT_STARTED" - PENDING = "PENDING" - STARTED = "STARTED" - RETRY = "RETRY" - SUCCESS = "SUCCESS" - FAILED = "FAILED" - ABORTED = "ABORTED" +def get_url( + solver_or_program: Solver | Program, url_for: Callable[..., HttpUrl], job_id: JobID +) -> HttpUrl | None: + if isinstance(solver_or_program, Solver): + return url_for( + "get_job", + solver_key=solver_or_program.id, + version=solver_or_program.version, + job_id=job_id, + ) + return None + + +def get_runner_url( + solver_or_program: Solver | Program, url_for: Callable[..., HttpUrl] +) -> HttpUrl | None: + if isinstance(solver_or_program, Solver): + return url_for( + "get_solver_release", + solver_key=solver_or_program.id, + version=solver_or_program.version, + ) + return None + + +def get_outputs_url( + solver_or_program: Solver | Program, url_for: Callable[..., HttpUrl], job_id: JobID +) -> HttpUrl | None: + if isinstance(solver_or_program, Solver): + return url_for( + "get_job_outputs", + solver_key=solver_or_program.id, + version=solver_or_program.version, + job_id=job_id, + ) + return None -PercentageInt: Type[int] = conint(ge=0, le=100) +PercentageInt: TypeAlias = Annotated[int, Field(ge=0, le=100)] class JobStatus(BaseModel): @@ -223,36 +370,64 @@ class JobStatus(BaseModel): # What is the status of X? What sort of state is X in? # SEE https://english.stackexchange.com/questions/12958/status-vs-state - job_id: UUID - state: TaskStates - progress: PercentageInt = 0 + job_id: JobID + state: RunningState + progress: PercentageInt = Field(default=0) # Timestamps on states - # TODO: sync state events and timestamps - submitted_at: datetime - started_at: Optional[datetime] = Field( + submitted_at: datetime.datetime = Field( + ..., description="Last modification timestamp of the solver job" + ) + started_at: datetime.datetime | None = Field( None, description="Timestamp that indicate the moment the solver starts execution or None if the event did not occur", ) - stopped_at: Optional[datetime] = Field( + stopped_at: datetime.datetime | None = Field( None, description="Timestamp at which the solver finished or killed execution or None if the event did not occur", ) - class Config(BaseConfig): - # frozen = True - # allow_mutation = False - schema_extra = { + model_config = ConfigDict( + json_schema_extra={ "example": { "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", - "state": TaskStates.STARTED, + "state": RunningState.STARTED, "progress": 3, "submitted_at": "2021-04-01 07:15:54.631007", "started_at": "2021-04-01 07:16:43.670610", "stopped_at": None, } } + ) + + +class JobPricingSpecification(BaseModel): + pricing_plan: PositiveInt = Field(..., alias="x-pricing-plan") + pricing_unit: PositiveInt = Field(..., alias="x-pricing-unit") + + model_config = ConfigDict(extra="ignore") + + @classmethod + def create_from_headers(cls, headers: Headers) -> "JobPricingSpecification | None": + try: + return TypeAdapter(cls).validate_python(headers) + except ValidationError: + return None + - def take_snapshot(self, event: str = "submitted"): - setattr(self, f"{event}_at", datetime.utcnow()) - return getattr(self, f"{event}_at") +class JobLog(BaseModel): + job_id: ProjectID + node_id: NodeID | None = None + log_level: LogLevelInt + messages: list[LogMessageStr] + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", + "node_id": "3742215e-6756-48d2-8b73-4d043065309f", + "log_level": logging.DEBUG, + "messages": ["PROGRESS: 5/10"], + } + } + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/jobs_filters.py b/services/api-server/src/simcore_service_api_server/models/schemas/jobs_filters.py new file mode 100644 index 00000000000..961cae8d22f --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/jobs_filters.py @@ -0,0 +1,58 @@ +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic.config import JsonDict + + +class MetadataFilterItem(BaseModel): + name: Annotated[ + str, + StringConstraints(min_length=1, max_length=255), + Field(description="Name fo the metadata field"), + ] + pattern: Annotated[ + str, + StringConstraints(min_length=1, max_length=255), + Field(description="Exact value or glob pattern"), + ] + + +class JobMetadataFilter(BaseModel): + any: Annotated[ + list[MetadataFilterItem], + Field(description="Matches any custom metadata field (OR logic)"), + ] + # NOTE: AND logic shall be implemented as `all: list[MetadataFilterItem] | None = None` + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + "any": [ + { + "name": "solver_type", + "pattern": "FEM", + }, + { + "name": "mesh_cells", + "pattern": "1*", + }, + ] + }, + { + "any": [ + { + "name": "solver_type", + "pattern": "*CFD*", + } + ] + }, + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/licensed_items.py b/services/api-server/src/simcore_service_api_server/models/schemas/licensed_items.py new file mode 100644 index 00000000000..4080d4089ed --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/licensed_items.py @@ -0,0 +1,7 @@ +from models_library.services_types import ServiceRunID +from pydantic import BaseModel, PositiveInt + + +class LicensedItemCheckoutData(BaseModel): + number_of_seats: PositiveInt + service_run_id: ServiceRunID diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/meta.py b/services/api-server/src/simcore_service_api_server/models/schemas/meta.py index c590e6d7b4c..4251d0f2a77 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/meta.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/meta.py @@ -1,26 +1,22 @@ -from typing import Optional +from typing import Annotated -from pydantic import AnyHttpUrl, BaseModel, Field +from models_library.api_schemas__common.meta import BaseMeta +from pydantic import ConfigDict, HttpUrl -from ..basic_types import VersionStr +from ...models._utils_pydantic import UriSchema -class Meta(BaseModel): - name: str - version: VersionStr - released: Optional[dict[str, VersionStr]] = Field( - None, description="Maps every route's path tag with a released version" - ) - docs_url: AnyHttpUrl = "https://docs.osparc.io" - docs_dev_url: AnyHttpUrl = "https://api.osparc.io/dev/docs" - - class Config: - schema_extra = { +class Meta(BaseMeta): + docs_url: Annotated[HttpUrl, UriSchema()] + docs_dev_url: Annotated[HttpUrl, UriSchema()] + model_config = ConfigDict( + json_schema_extra={ "example": { "name": "simcore_service_foo", "version": "2.4.45", "released": {"v1": "1.3.4", "v2": "2.4.45"}, - "doc_url": "https://api.osparc.io/doc", - "doc_dev_url": "https://api.osparc.io/dev/doc", + "docs_url": "https://api.osparc.io/dev/doc", + "docs_dev_url": "https://api.osparc.io/dev/doc", } } + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/model_adapter.py b/services/api-server/src/simcore_service_api_server/models/schemas/model_adapter.py new file mode 100644 index 00000000000..8e0bcb3cba1 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/model_adapter.py @@ -0,0 +1,253 @@ +# Models added here "cover" models from within the deployment in order to restore backwards compatibility + +from datetime import date, datetime +from decimal import Decimal +from typing import Annotated, Literal, NotRequired + +from models_library.api_schemas_api_server.pricing_plans import ( + ServicePricingPlanGet as _ServicePricingPlanGet, +) +from models_library.api_schemas_webserver.licensed_items import ( + LicensedItemRpcGet as _LicensedItemGet, +) +from models_library.api_schemas_webserver.licensed_items import ( + LicensedResource as _LicensedResource, +) +from models_library.api_schemas_webserver.licensed_items import ( + LicensedResourceSource as _LicensedResourceSource, +) +from models_library.api_schemas_webserver.licensed_items import ( + LicensedResourceSourceFeaturesDict as _LicensedResourceSourceFeaturesDict, +) +from models_library.api_schemas_webserver.licensed_items_checkouts import ( + LicensedItemCheckoutRpcGet as _LicensedItemCheckoutRpcGet, +) +from models_library.api_schemas_webserver.products import ( + CreditPriceGet as _GetCreditPrice, +) +from models_library.api_schemas_webserver.resource_usage import ( + PricingUnitGet as _PricingUnitGet, +) +from models_library.api_schemas_webserver.wallets import ( + WalletGetWithAvailableCredits as _WalletGetWithAvailableCredits, +) +from models_library.basic_types import IDStr, NonNegativeDecimal +from models_library.groups import GroupID +from models_library.licenses import ( + LicensedItemID, + LicensedItemKey, + LicensedItemVersion, + LicensedResourceType, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanClassification, + PricingPlanId, + PricingUnitId, + UnitExtraInfoTier, +) +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.users import UserID +from models_library.wallets import WalletID, WalletStatus +from pydantic import ( + BaseModel, + ConfigDict, + Field, + HttpUrl, + NonNegativeFloat, + NonNegativeInt, + PlainSerializer, +) +from typing_extensions import TypedDict + + +class GetCreditPriceLegacy(BaseModel): + product_name: str = Field(alias="productName") + usd_per_credit: ( + Annotated[ + NonNegativeDecimal, + PlainSerializer(float, return_type=NonNegativeFloat, when_used="json"), + ] + | None + ) = Field( + ..., + description="Price of a credit in USD. " + "If None, then this product's price is UNDEFINED", + alias="usdPerCredit", + ) + min_payment_amount_usd: NonNegativeInt | None = Field( + ..., + description="Minimum amount (included) in USD that can be paid for this product" + "Can be None if this product's price is UNDEFINED", + alias="minPaymentAmountUsd", + ) + model_config = ConfigDict( + populate_by_name=True, + ) + + +assert set(GetCreditPriceLegacy.model_fields.keys()) == set( # nosec + _GetCreditPrice.model_fields.keys() +) + + +class PricingUnitGetLegacy(BaseModel): + pricing_unit_id: PricingUnitId = Field(alias="pricingUnitId") + unit_name: str = Field(alias="unitName") + unit_extra_info: UnitExtraInfoTier = Field( + alias="unitExtraInfo" + ) # <-- NOTE: API Server is interested only in the TIER type + current_cost_per_unit: Annotated[ + Decimal, PlainSerializer(float, return_type=NonNegativeFloat, when_used="json") + ] = Field(alias="currentCostPerUnit") + default: bool + model_config = ConfigDict( + populate_by_name=True, + ) + + +assert set(PricingUnitGetLegacy.model_fields.keys()) == set( # nosec + _PricingUnitGet.model_fields.keys() +) + + +class WalletGetWithAvailableCreditsLegacy(BaseModel): + wallet_id: WalletID = Field(alias="walletId") + name: IDStr + description: str | None = None + owner: GroupID + thumbnail: str | None = None + status: WalletStatus + created: datetime + modified: datetime + available_credits: Annotated[ + Decimal, PlainSerializer(float, return_type=NonNegativeFloat, when_used="json") + ] = Field(alias="availableCredits") + model_config = ConfigDict( + populate_by_name=True, + ) + + +assert set(WalletGetWithAvailableCreditsLegacy.model_fields.keys()) == set( # nosec + _WalletGetWithAvailableCredits.model_fields.keys() +) + + +class ServicePricingPlanGetLegacy(BaseModel): + pricing_plan_id: PricingPlanId = Field(alias="pricingPlanId") + display_name: str = Field(alias="displayName") + description: str + classification: PricingPlanClassification + created_at: datetime = Field(alias="createdAt") + pricing_plan_key: str = Field(alias="pricingPlanKey") + pricing_units: list[PricingUnitGetLegacy] = Field(alias="pricingUnits") + model_config = ConfigDict( + populate_by_name=True, + ) + + +assert set(ServicePricingPlanGetLegacy.model_fields.keys()) == set( # nosec + _ServicePricingPlanGet.model_fields.keys() +) + + +class LicensedResourceSourceFeaturesDict(TypedDict): + age: NotRequired[str] + date: date + ethnicity: NotRequired[str] + functionality: NotRequired[str] + height: NotRequired[str] + name: NotRequired[str] + sex: NotRequired[str] + species: NotRequired[str] + version: NotRequired[str] + weight: NotRequired[str] + + +assert set(LicensedResourceSourceFeaturesDict.__annotations__.keys()) == set( # nosec + _LicensedResourceSourceFeaturesDict.__annotations__.keys() +), "LicensedResourceSourceFeaturesDict keys do not match" + +for key in LicensedResourceSourceFeaturesDict.__annotations__: + assert ( # nosec + LicensedResourceSourceFeaturesDict.__annotations__[key] + == _LicensedResourceSourceFeaturesDict.__annotations__[key] + ), f"Type of {key} in LicensedResourceSourceFeaturesDict does not match" + + +class LicensedResourceSource(BaseModel): + id: int + description: str + thumbnail: str + features: LicensedResourceSourceFeaturesDict + doi: str | None + license_key: str + license_version: str + protection: Literal["Code", "PayPal"] + available_from_url: HttpUrl | None + + +assert set(LicensedResourceSource.model_fields.keys()) == set( # nosec + _LicensedResourceSource.model_fields.keys() +), "LicensedResourceSource keys do not match" + +for key in LicensedResourceSource.model_fields.keys(): + if key == "features": + continue + assert ( # nosec + LicensedResourceSource.__annotations__[key] + == _LicensedResourceSource.__annotations__[key] + ), f"Type of {key} in LicensedResourceSource does not match" + + +class LicensedResource(BaseModel): + source: LicensedResourceSource + category_id: IDStr + category_display: str + terms_of_use_url: HttpUrl | None + + +assert set(LicensedResource.__annotations__.keys()) == set( # nosec + _LicensedResource.__annotations__.keys() +), "LicensedResource keys do not match" + + +class LicensedItemGet(BaseModel): + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + display_name: str + licensed_resource_type: LicensedResourceType + licensed_resources: list[LicensedResource] + pricing_plan_id: PricingPlanId + is_hidden_on_market: bool + created_at: datetime + modified_at: datetime + model_config = ConfigDict( + populate_by_name=True, + ) + + +assert set(LicensedItemGet.model_fields.keys()) == set( # nosec + _LicensedItemGet.model_fields.keys() +) + + +class LicensedItemCheckoutGet(BaseModel): + licensed_item_checkout_id: LicensedItemCheckoutID + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + user_id: UserID + product_name: ProductName + started_at: datetime + stopped_at: datetime | None + num_of_seats: int + + +assert set(LicensedItemCheckoutGet.model_fields.keys()) == set( # nosec + _LicensedItemCheckoutRpcGet.model_fields.keys() +) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/profiles.py b/services/api-server/src/simcore_service_api_server/models/schemas/profiles.py index 3b15f2144e0..76b283aa4a9 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/profiles.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/profiles.py @@ -1,44 +1,54 @@ -from enum import Enum -from typing import Optional +from enum import auto from models_library.emails import LowerCaseEmailStr -from pydantic import BaseModel, Field +from models_library.users import FirstNameStr, LastNameStr, UserID +from models_library.utils.enums import StrAutoEnum +from pydantic import BaseModel, ConfigDict, Field, field_validator from ..domain.groups import Groups class ProfileCommon(BaseModel): - first_name: Optional[str] = Field(None, example="James") - last_name: Optional[str] = Field(None, example="Maxwell") + first_name: FirstNameStr | None = Field(None, examples=["James"]) + last_name: LastNameStr | None = Field(None, examples=["Maxwell"]) class ProfileUpdate(ProfileCommon): - pass + ... -# from simcore_postgres_database.models.users import UserRole -class UserRoleEnum(str, Enum): - # TODO: build from UserRole! or assert Role == UserRole - ANONYMOUS = "ANONYMOUS" - GUEST = "GUEST" - USER = "USER" - TESTER = "TESTER" - ADMIN = "ADMIN" +class UserRoleEnum(StrAutoEnum): + # NOTE: this is in sync with simcore_postgres_database.models.users.UserRole via testing + ANONYMOUS = auto() + GUEST = auto() + USER = auto() + TESTER = auto() + PRODUCT_OWNER = auto() + ADMIN = auto() class Profile(ProfileCommon): + id_: UserID = Field(alias="id") login: LowerCaseEmailStr role: UserRoleEnum - groups: Optional[Groups] = None - gravatar_id: Optional[str] = Field( + groups: Groups | None = None + gravatar_id: str | None = Field( None, description="md5 hash value of email to retrieve an avatar image from https://www.gravatar.com", max_length=40, ) - class Config: - schema_extra = { + @field_validator("role", mode="before") + @classmethod + def enforce_role_upper(cls, v): + if v: + return v.upper() + return v + + model_config = ConfigDict( + json_schema_extra={ "example": { + "id": "20", "first_name": "James", "last_name": "Maxwell", "login": "james-maxwell@itis.swiss", @@ -59,3 +69,4 @@ class Config: "gravatar_id": "9a8930a5b20d7048e37740bac5c1ca4f", } } + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/programs.py b/services/api-server/src/simcore_service_api_server/models/schemas/programs.py new file mode 100644 index 00000000000..305f9eb28cf --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/programs.py @@ -0,0 +1,114 @@ +from typing import Annotated + +from models_library.api_schemas_catalog.services import LatestServiceGet, ServiceGetV2 +from models_library.services import ServiceMetaDataPublished +from models_library.services_history import ServiceRelease +from models_library.services_regex import DYNAMIC_SERVICE_KEY_RE +from models_library.services_types import ServiceKey +from pydantic import ConfigDict, StringConstraints + +from ..api_resources import compose_resource_name +from ..basic_types import VersionStr +from ._base import ( + ApiServerOutputSchema, + BaseService, +) + +# - API will add flexibility to identify solver resources using aliases. Analogously to docker images e.g. a/b == a/b:latest == a/b:2.3 +# +LATEST_VERSION = "latest" + + +# SOLVER ---------- +# +PROGRAM_RESOURCE_NAME_RE = r"^programs/([^\s/]+)/releases/([\d\.]+)$" + + +ProgramKeyId = Annotated[ + str, StringConstraints(strip_whitespace=True, pattern=DYNAMIC_SERVICE_KEY_RE) +] + + +class Program(BaseService, ApiServerOutputSchema): + """A released program with a specific version""" + + version_display: str | None + + model_config = ConfigDict( + extra="ignore", + json_schema_extra={ + "example": { + "id": "simcore/services/dynamic/sim4life", + "version": "8.0.0", + "title": "Sim4life", + "description": "Simulation framework", + "maintainer": "info@itis.swiss", + "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fdynamic%2Fsim4life/releases/8.0.0", + "version_display": "8.0.0", + } + }, + ) + + @classmethod + def create_from_image(cls, image_meta: ServiceMetaDataPublished) -> "Program": + data = image_meta.model_dump( + include={ + "name", + "key", + "version", + "description", + "contact", + "version_display", + }, + ) + return cls( + id=data.pop("key"), + version=data.pop("version"), + title=data.pop("name"), + url=None, + version_display=data.pop("version_display"), + **data, + ) + + @classmethod + def create_from_service(cls, service: ServiceGetV2 | LatestServiceGet) -> "Program": + data = service.model_dump( + include={ + "name", + "key", + "version", + "description", + "contact", + "version_display", + }, + ) + return cls( + id=data.pop("key"), + version=data.pop("version"), + title=data.pop("name"), + url=None, + version_display=data.pop("version_display"), + **data, + ) + + @classmethod + def create_from_service_release( + cls, + *, + service_key: ServiceKey, + description: str, + name: str, + service: ServiceRelease + ) -> "Program": + return cls( + id=service_key, + version=service.version, + title=name, + url=None, + description=description, + version_display=service.version, + ) + + @classmethod + def compose_resource_name(cls, key: ProgramKeyId, version: VersionStr) -> str: + return compose_resource_name("programs", key, "releases", version) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py b/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py index fd0bb9a628a..63a383c4f2b 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/solvers.py @@ -1,14 +1,20 @@ -import urllib.parse -from typing import Any, Literal, Optional, Union +from typing import Annotated, Any, Literal, Self, TypeAlias -import packaging.version +from models_library.api_schemas_catalog.services import ( + LatestServiceGet, + ServiceGetV2, + ServiceSummary, +) from models_library.basic_regex import PUBLIC_VARIABLE_NAME_RE -from models_library.services import COMPUTATIONAL_SERVICE_KEY_RE, ServiceDockerData -from packaging.version import LegacyVersion, Version -from pydantic import BaseModel, Extra, Field, HttpUrl, constr - +from models_library.emails import LowerCaseEmailStr +from models_library.services import ServiceMetaDataPublished +from models_library.services_history import ServiceRelease +from models_library.services_regex import COMPUTATIONAL_SERVICE_KEY_RE +from models_library.services_types import ServiceKey +from pydantic import BaseModel, ConfigDict, Field, StringConstraints + +from ...models.schemas._base import BaseService from ..api_resources import compose_resource_name -from ..basic_types import VersionStr # NOTE: # - API does NOT impose prefix (simcore)/(services)/comp because does not know anything about registry deployed. This constraint @@ -24,108 +30,114 @@ LATEST_VERSION = "latest" -# SOLVER ---------- -# SOLVER_RESOURCE_NAME_RE = r"^solvers/([^\s/]+)/releases/([\d\.]+)$" -SolverKeyId = constr( - strip_whitespace=True, - regex=COMPUTATIONAL_SERVICE_KEY_RE, - # TODO: should we use here a less restrictive regex that does not impose simcore/comp/?? this should be catalog responsibility -) +SolverKeyId = Annotated[ + str, StringConstraints(strip_whitespace=True, pattern=COMPUTATIONAL_SERVICE_KEY_RE) +] -class Solver(BaseModel): - """A released solver with a specific version""" - id: SolverKeyId = Field( - ..., - description="Solver identifier", - ) - version: VersionStr = Field( - ..., - description="semantic version number of the node", - ) +class Solver(BaseService): + """A released solver with a specific version""" - # Human readables Identifiers - title: str = Field(..., description="Human readable name") - description: Optional[str] - maintainer: str - # TODO: consider released: Optional[datetime] required? - # TODO: consider version_aliases: list[str] = [] # remaining tags + maintainer: Annotated[str, Field(description="Maintainer of the solver")] - # Get links to other resources - url: Optional[HttpUrl] = Field(..., description="Link to get this resource") + version_display: Annotated[ + str | None, + Field(description="A user-friendly or marketing name for the release."), + ] = None - class Config: - extra = Extra.ignore - schema_extra = { + model_config = ConfigDict( + extra="ignore", + json_schema_extra={ "example": { "id": "simcore/services/comp/isolve", "version": "2.1.1", + "version_display": "2.1.1-2023-10-01", "title": "iSolve", "description": "EM solver", "maintainer": "info@itis.swiss", "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.1", } - } + }, + ) @classmethod - def create_from_image(cls, image_meta: ServiceDockerData) -> "Solver": - data = image_meta.dict( - include={"name", "key", "version", "description", "contact"}, - ) - + def create_from_image(cls, image_meta: ServiceMetaDataPublished) -> Self: return cls( - id=data.pop("key"), - version=data.pop("version"), - title=data.pop("name"), - maintainer=data.pop("contact"), + id=image_meta.key, + version=image_meta.version, + title=image_meta.name, + description=image_meta.description, + maintainer=image_meta.contact, + version_display=image_meta.version_display, url=None, - **data, ) - @property - def pep404_version(self) -> Union[Version, LegacyVersion]: - """Rich version type that can be used e.g. to compare""" - return packaging.version.parse(self.version) - - @property - def url_friendly_id(self) -> str: - """Use to pass id as parameter in urls""" - return urllib.parse.quote_plus(self.id) + @classmethod + def create_from_service( + cls, service: ServiceGetV2 | LatestServiceGet | ServiceSummary + ) -> Self: + # Common fields in all service types + maintainer = "" + if hasattr(service, "contact") and service.contact: + maintainer = service.contact - @property - def resource_name(self) -> str: - """Relative resource name""" - return self.compose_resource_name(self.id, self.version) + return cls( + id=service.key, + version=service.version, + title=service.name, + description=service.description, + maintainer=maintainer or "UNDEFINED", + version_display=( + service.version_display if hasattr(service, "version_display") else None + ), + url=None, + ) - name = resource_name # API standards notation (see api_resources.py) + @classmethod + def create_from_service_release( + cls, + *, + service_key: ServiceKey, + description: str, + contact: LowerCaseEmailStr | None, + name: str, + service: ServiceRelease + ) -> "Solver": + return cls( + id=service_key, + version=service.version, + title=name, + url=None, + description=description, + maintainer=contact or "", + ) @classmethod - def compose_resource_name(cls, solver_key, solver_version) -> str: - return compose_resource_name("solvers", solver_key, "releases", solver_version) + def compose_resource_name(cls, key: str, version: str) -> str: + return compose_resource_name("solvers", key, "releases", version) -PortKindStr = Literal["input", "output"] +PortKindStr: TypeAlias = Literal["input", "output"] class SolverPort(BaseModel): key: str = Field( ..., description="port identifier name", - regex=PUBLIC_VARIABLE_NAME_RE, + pattern=PUBLIC_VARIABLE_NAME_RE, title="Key name", ) kind: PortKindStr - content_schema: Optional[dict[str, Any]] = Field( + content_schema: dict[str, Any] | None = Field( None, description="jsonschema for the port's value. SEE https://json-schema.org", ) - - class Config: - extra = Extra.ignore - schema_extra = { + model_config = ConfigDict( + extra="ignore", + json_schema_extra={ "example": { "key": "input_2", "kind": "input", @@ -137,4 +149,5 @@ class Config: "maximum": 5, }, } - } + }, + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/solvers_filters.py b/services/api-server/src/simcore_service_api_server/models/schemas/solvers_filters.py new file mode 100644 index 00000000000..ce442b24d33 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/models/schemas/solvers_filters.py @@ -0,0 +1,27 @@ +from typing import Annotated + +from pydantic import BaseModel, ConfigDict, Field + + +class SolversListFilters(BaseModel): + """Filters for listing solvers""" + + solver_id: Annotated[ + str | None, + Field( + description="Filter by solver ID pattern", + examples=["simcore/services/comp/itis/sleeper", "simcore/services/comp/*"], + ), + ] = None + + version_display: Annotated[ + str | None, + Field( + description="Filter by version display pattern", + examples=["2.1.1-2023-10-01", "*2023*"], + ), + ] = None + + model_config = ConfigDict( + extra="ignore", + ) diff --git a/services/api-server/src/simcore_service_api_server/models/schemas/studies.py b/services/api-server/src/simcore_service_api_server/models/schemas/studies.py index 7bef4e65098..e0686713508 100644 --- a/services/api-server/src/simcore_service_api_server/models/schemas/studies.py +++ b/services/api-server/src/simcore_service_api_server/models/schemas/studies.py @@ -1,16 +1,56 @@ -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from pydantic import Field +from typing import Annotated, TypeAlias -from .solvers import SolverPort +from models_library import projects, projects_nodes_io +from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field -StudyID = ProjectID +from ...models._utils_pydantic import UriSchema +from .. import api_resources +from . import solvers +StudyID: TypeAlias = projects.ProjectID +NodeName: TypeAlias = str +DownloadLink: TypeAlias = Annotated[AnyHttpUrl, UriSchema()] -class StudyPort(SolverPort): - key: NodeID = Field( + +class Study(BaseModel): + uid: StudyID + title: str | None = None + description: str | None = None + + @classmethod + def compose_resource_name(cls, study_key) -> api_resources.RelativeResourceName: + return api_resources.compose_resource_name("studies", study_key) + + +class StudyPort(solvers.SolverPort): + key: projects_nodes_io.NodeID = Field( # type: ignore[assignment] ..., description="port identifier name." "Correponds to the UUID of the parameter/probe node in the study", title="Key name", ) + model_config = ConfigDict( + extra="ignore", + json_schema_extra={ + "example": { + "key": "f763658f-a89a-4a90-ace4-c44631290f12", + "kind": "input", + "content_schema": { + "title": "Sleep interval", + "type": "integer", + "x_unit": "second", + "minimum": 0, + "maximum": 5, + }, + } + }, + ) + + +class LogLink(BaseModel): + node_name: NodeName + download_link: DownloadLink + + +class JobLogsMap(BaseModel): + log_links: list[LogLink] = Field(..., description="Array of download links") diff --git a/services/api-server/src/simcore_service_api_server/models/types.py b/services/api-server/src/simcore_service_api_server/models/types.py index 4f88b0c4a03..0e987c1980c 100644 --- a/services/api-server/src/simcore_service_api_server/models/types.py +++ b/services/api-server/src/simcore_service_api_server/models/types.py @@ -1,7 +1,7 @@ -from typing import Any, Union +from typing import Any, TypeAlias -AnyDict = dict[str, Any] -ListAnyDict = list[AnyDict] +AnyDict: TypeAlias = dict[str, Any] +ListAnyDict: TypeAlias = list[AnyDict] # Represent the type returned by e.g. json.load -JSON = Union[AnyDict, ListAnyDict] +AnyJson: TypeAlias = AnyDict | ListAnyDict diff --git a/services/api-server/src/simcore_service_api_server/modules/catalog.py b/services/api-server/src/simcore_service_api_server/modules/catalog.py deleted file mode 100644 index 3863cdb6e2f..00000000000 --- a/services/api-server/src/simcore_service_api_server/modules/catalog.py +++ /dev/null @@ -1,209 +0,0 @@ -import logging -import urllib.parse -from dataclasses import dataclass -from operator import attrgetter -from typing import Callable, Optional - -from fastapi import FastAPI -from models_library.emails import LowerCaseEmailStr -from models_library.services import ServiceDockerData, ServiceType -from pydantic import Extra, ValidationError, parse_obj_as -from settings_library.catalog import CatalogSettings - -from ..models.schemas.solvers import ( - LATEST_VERSION, - Solver, - SolverKeyId, - SolverPort, - VersionStr, -) -from ..utils.client_base import BaseServiceClientApi, setup_client_instance - -## from ..utils.client_decorators import JSON, handle_errors, handle_retry - -logger = logging.getLogger(__name__) - - -SolverNameVersionPair = tuple[SolverKeyId, str] - - -class TruncatedCatalogServiceOut(ServiceDockerData): - """ - This model is used to truncate the response of the catalog, whose schema is - in services/catalog/src/simcore_service_catalog/models/schemas/services.py::ServiceOut - and is a superset of ServiceDockerData. - - We do not use directly ServiceDockerData because it will only consume the exact fields - (it is configured as Extra.forbid). Instead we inherit from it, override this configuration - and add an extra field that we want to capture from ServiceOut. - - Ideally the rest of the response is dropped so here it would - perhaps make more sense to use something like graphql - that asks only what is needed. - """ - - owner: Optional[LowerCaseEmailStr] - - class Config: - extra = Extra.ignore - - # Converters - def to_solver(self) -> Solver: - data = self.dict( - include={"name", "key", "version", "description", "contact", "owner"}, - ) - - return Solver( - id=data.pop("key"), - version=data.pop("version"), - title=data.pop("name"), - maintainer=data.pop("owner") or data.pop("contact"), - url=None, - **data, - ) - - -# API CLASS --------------------------------------------- -# -# - Error handling: What do we reraise, suppress, transform??? -# -# -# TODO: handlers should not capture outputs -# @handle_errors("catalog", logger, return_json=True) -# @handle_retry(logger) -# async def get(self, path: str, *args, **kwargs) -> JSON: -# return await self.client.get(path, *args, **kwargs) - - -@dataclass -class CatalogApi(BaseServiceClientApi): - """ - This class acts a proxy of the catalog service - It abstracts request to the catalog API service - """ - - async def list_solvers( - self, - user_id: int, - *, - product_name: str, - predicate: Optional[Callable[[Solver], bool]] = None, - ) -> list[Solver]: - resp = await self.client.get( - "/services", - params={"user_id": user_id, "details": True}, - headers={"x-simcore-products-name": product_name}, - ) - resp.raise_for_status() - - # TODO: move this sorting down to catalog service? - solvers = [] - for data in resp.json(): - try: - service = TruncatedCatalogServiceOut.parse_obj(data) - if service.service_type == ServiceType.COMPUTATIONAL: - solver = service.to_solver() - if predicate is None or predicate(solver): - solvers.append(solver) - - except ValidationError as err: - # NOTE: For the moment, this is necessary because there are no guarantees - # at the image registry. Therefore we exclude and warn - # invalid items instead of returning error - logger.warning( - "Skipping invalid service returned by catalog '%s': %s", - data, - err, - ) - return solvers - - async def get_solver( - self, user_id: int, name: SolverKeyId, version: VersionStr, *, product_name: str - ) -> Solver: - - assert version != LATEST_VERSION # nosec - - service_key = urllib.parse.quote_plus(name) - service_version = version - - resp = await self.client.get( - f"/services/{service_key}/{service_version}", - params={"user_id": user_id}, - headers={"x-simcore-products-name": product_name}, - ) - resp.raise_for_status() - - service = TruncatedCatalogServiceOut.parse_obj(resp.json()) - assert ( # nosec - service.service_type == ServiceType.COMPUTATIONAL - ), "Expected by SolverName regex" - - return service.to_solver() - - async def get_solver_ports( - self, user_id: int, name: SolverKeyId, version: VersionStr, *, product_name: str - ): - - assert version != LATEST_VERSION # nosec - - service_key = urllib.parse.quote_plus(name) - service_version = version - - resp = await self.client.get( - f"/services/{service_key}/{service_version}/ports", - params={"user_id": user_id}, - headers={"x-simcore-products-name": product_name}, - ) - resp.raise_for_status() - - solver_ports = parse_obj_as(list[SolverPort], resp.json()) - return solver_ports - - async def list_latest_releases( - self, user_id: int, *, product_name: str - ) -> list[Solver]: - solvers: list[Solver] = await self.list_solvers( - user_id, product_name=product_name - ) - - latest_releases = {} - for solver in solvers: - latest = latest_releases.setdefault(solver.id, solver) - if latest.pep404_version < solver.pep404_version: - latest_releases[solver.id] = solver - - return list(latest_releases.values()) - - async def list_solver_releases( - self, user_id: int, solver_key: SolverKeyId, *, product_name: str - ) -> list[Solver]: - def _this_solver(solver: Solver) -> bool: - return solver.id == solver_key - - releases: list[Solver] = await self.list_solvers( - user_id, predicate=_this_solver, product_name=product_name - ) - return releases - - async def get_latest_release( - self, user_id: int, solver_key: SolverKeyId, *, product_name: str - ) -> Solver: - releases = await self.list_solver_releases( - user_id, solver_key, product_name=product_name - ) - - # raises IndexError if None - latest = sorted(releases, key=attrgetter("pep404_version"))[-1] - return latest - - -# MODULES APP SETUP ------------------------------------------------------------- - - -def setup(app: FastAPI, settings: CatalogSettings) -> None: - if not settings: - settings = CatalogSettings() - - setup_client_instance( - app, CatalogApi, api_baseurl=settings.api_base_url, service_name="catalog" - ) diff --git a/services/api-server/src/simcore_service_api_server/modules/director_v2.py b/services/api-server/src/simcore_service_api_server/modules/director_v2.py deleted file mode 100644 index 21426b4be53..00000000000 --- a/services/api-server/src/simcore_service_api_server/modules/director_v2.py +++ /dev/null @@ -1,225 +0,0 @@ -import logging -from contextlib import contextmanager -from typing import Optional -from uuid import UUID - -from fastapi import FastAPI -from fastapi.exceptions import HTTPException -from httpx import HTTPStatusError, codes -from models_library.clusters import ClusterID -from models_library.projects_nodes import NodeID -from models_library.projects_pipeline import ComputationTask -from models_library.projects_state import RunningState -from pydantic import AnyHttpUrl, AnyUrl, BaseModel, Field, PositiveInt, parse_raw_as -from starlette import status - -from ..core.settings import DirectorV2Settings -from ..models.schemas.jobs import PercentageInt -from ..utils.client_base import BaseServiceClientApi, setup_client_instance - -logger = logging.getLogger(__name__) - - -# API MODELS --------------------------------------------- -# NOTE: as services/director-v2/src/simcore_service_director_v2/models/schemas/comp_tasks.py -# TODO: shall schemas of internal APIs be in models_library as well?? or is against - - -class ComputationTaskGet(ComputationTask): - url: AnyHttpUrl = Field( - ..., description="the link where to get the status of the task" - ) - stop_url: Optional[AnyHttpUrl] = Field( - None, description="the link where to stop the task" - ) - - def guess_progress(self) -> PercentageInt: - # guess progress based on self.state - # FIXME: incomplete! - if self.state in [RunningState.SUCCESS, RunningState.FAILED]: - return 100 - return 0 - - -class TaskLogFileGet(BaseModel): - task_id: NodeID - download_link: Optional[AnyUrl] = Field( - None, description="Presigned link for log file or None if still not available" - ) - - -NodeName = str -DownloadLink = AnyUrl - -# API CLASS --------------------------------------------- - - -@contextmanager -def handle_errors_context(project_id: UUID): - try: - yield - - # except ValidationError - except HTTPStatusError as err: - msg = ( - f"Failed {err.request.url} with status={err.response.status_code}: {err.response.json()}", - ) - if codes.is_client_error(err.response.status_code): - # client errors are mapped - logger.debug(msg) - if err.response.status_code == status.HTTP_404_NOT_FOUND: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Job {project_id} not found", - ) from err - - raise err - - # server errors are logged and re-raised as 503 - assert codes.is_server_error(err.response.status_code) - logger.exception( - "director-v2 service failed: %s. Re-rasing as service unavailable (503)", - msg, - ) - raise HTTPException( - status_code=status.HTTP_503_SERVICE_UNAVAILABLE, - detail="Director service failed", - ) from err - - -class DirectorV2Api(BaseServiceClientApi): - # NOTE: keep here tmp as reference - # @handle_errors("director", logger, return_json=True) - # @handle_retry(logger) - # async def get(self, path: str, *args, **kwargs) -> JSON: - # return await self.client.get(path, *args, **kwargs) - - # director2 API --------------------------- - # TODO: error handling - # - # HTTPStatusError: 404 Not Found - # ValidationError - # ServiceUnabalabe: 503 - - async def create_computation( - self, - project_id: UUID, - user_id: PositiveInt, - product_name: str, - ) -> ComputationTaskGet: - resp = await self.client.post( - "/v2/computations", - json={ - "user_id": user_id, - "project_id": str(project_id), - "start_pipeline": False, - "product_name": product_name, - }, - ) - - resp.raise_for_status() - computation_task = ComputationTaskGet(**resp.json()) - return computation_task - - async def start_computation( - self, - project_id: UUID, - user_id: PositiveInt, - product_name: str, - cluster_id: Optional[ClusterID] = None, - ) -> ComputationTaskGet: - with handle_errors_context(project_id): - extras = {} - if cluster_id is not None: - extras["cluster_id"] = cluster_id - - resp = await self.client.post( - "/v2/computations", - json={ - "user_id": user_id, - "project_id": str(project_id), - "start_pipeline": True, - "product_name": product_name, - **extras, - }, - ) - resp.raise_for_status() - computation_task = ComputationTaskGet(**resp.json()) - return computation_task - - async def get_computation( - self, project_id: UUID, user_id: PositiveInt - ) -> ComputationTaskGet: - resp = await self.client.get( - f"/v2/computations/{project_id}", - params={ - "user_id": user_id, - }, - ) - resp.raise_for_status() - computation_task = ComputationTaskGet(**resp.json()) - return computation_task - - async def stop_computation( - self, project_id: UUID, user_id: PositiveInt - ) -> ComputationTaskGet: - data = await self.client.post( - f"/v2/computations/{project_id}:stop", - json={ - "user_id": user_id, - }, - ) - - computation_task = ComputationTaskGet(**data.json()) - return computation_task - - async def delete_computation(self, project_id: UUID, user_id: PositiveInt): - await self.client.request( - "DELETE", - f"/v2/computations/{project_id}", - json={ - "user_id": user_id, - "force": True, - }, - ) - - async def get_computation_logs( - self, user_id: PositiveInt, project_id: UUID - ) -> dict[NodeName, DownloadLink]: - resp = await self.client.get( - f"/v2/computations/{project_id}/tasks/-/logfile", - params={ - "user_id": user_id, - }, - ) - # probably not found - resp.raise_for_status() - payload = parse_raw_as(list[TaskLogFileGet], resp.text or "[]") - return {r.task_id: r.download_link for r in payload} - - # TODO: HIGHER lever interface with job* resources - # or better in another place? - async def create_job(self): - pass - - async def list_jobs(self): - pass - - async def get_job(self): - pass - - -# MODULES APP SETUP ------------------------------------------------------------- - - -def setup(app: FastAPI, settings: DirectorV2Settings) -> None: - if not settings: - settings = DirectorV2Settings() - - setup_client_instance( - app, - DirectorV2Api, - # WARNING: it has /v0 and /v2 prefixes - api_baseurl=f"http://{settings.DIRECTOR_V2_HOST}:{settings.DIRECTOR_V2_PORT}", - service_name="director_v2", - ) diff --git a/services/api-server/src/simcore_service_api_server/modules/remote_debug.py b/services/api-server/src/simcore_service_api_server/modules/remote_debug.py deleted file mode 100644 index 0cb05e84805..00000000000 --- a/services/api-server/src/simcore_service_api_server/modules/remote_debug.py +++ /dev/null @@ -1,34 +0,0 @@ -""" Setup remote debugger with Python Tools for Visual Studio (PTVSD) - -""" -import logging - -from fastapi import FastAPI - -logger = logging.getLogger(__name__) - - -def setup(app: FastAPI): - API_SERVER_REMOTE_DEBUG_PORT = app.state.settings.API_SERVER_REMOTE_DEBUG_PORT - - def on_startup() -> None: - try: - logger.debug("Enabling attach ptvsd ...") - # - # SEE https://github.com/microsoft/ptvsd#enabling-debugging - # - import ptvsd - - ptvsd.enable_attach( - address=("0.0.0.0", API_SERVER_REMOTE_DEBUG_PORT), # nosec - ) # nosec - except ImportError as err: - raise RuntimeError( - "Cannot enable remote debugging. Please install ptvsd first" - ) from err - - logger.info( - "Remote debugging enabled: listening port %s", API_SERVER_REMOTE_DEBUG_PORT - ) - - app.add_event_handler("startup", on_startup) diff --git a/services/api-server/src/simcore_service_api_server/modules/storage.py b/services/api-server/src/simcore_service_api_server/modules/storage.py deleted file mode 100644 index c3dc8b04a7a..00000000000 --- a/services/api-server/src/simcore_service_api_server/modules/storage.py +++ /dev/null @@ -1,151 +0,0 @@ -import logging -import re -import urllib.parse -from mimetypes import guess_type -from uuid import UUID - -from fastapi import FastAPI -from models_library.api_schemas_storage import FileMetaDataArray -from models_library.api_schemas_storage import FileMetaDataGet as StorageFileMetaData -from models_library.api_schemas_storage import FileUploadSchema, PresignedLink -from models_library.generics import Envelope - -from ..core.settings import StorageSettings -from ..models.schemas.files import File -from ..utils.client_base import BaseServiceClientApi, setup_client_instance - -## from ..utils.client_decorators import JSON, handle_errors, handle_retry - -logger = logging.getLogger(__name__) - - -FILE_ID_PATTERN = re.compile(r"^api\/(?P[\w-]+)\/(?P.+)$") - - -def to_file_api_model(stored_file_meta: StorageFileMetaData) -> File: - # extracts fields from api/{file_id}/{filename} - match = FILE_ID_PATTERN.match(stored_file_meta.file_id or "") - if not match: - raise ValueError(f"Invalid file_id {stored_file_meta.file_id} in file metadata") - - file_id, filename = match.groups() - - meta = File( - id=file_id, - filename=filename, - # FIXME: UploadFile gets content from the request header while here is - # mimetypes.guess_type used. Sometimes it does not match. - # Add column in meta_data table of storage and stop guessing :-) - content_type=guess_type(filename)[0] or "application/octet-stream", - checksum=stored_file_meta.entity_tag, - ) - return meta - - -class StorageApi(BaseServiceClientApi): - # - # All files created via the API are stored in simcore-s3 as objects with name pattern "api/{file_id}/{filename.ext}" - # - SIMCORE_S3_ID = 0 - - # FIXME: error handling and retrying policies? - # @handle_errors("storage", logger, return_json=True) - # @handle_retry(logger) - # async def get(self, path: str, *args, **kwargs) -> JSON: - # return await self.client.get(path, *args, **kwargs) - - async def list_files(self, user_id: int) -> list[StorageFileMetaData]: - """Lists metadata of all s3 objects name as api/* from a given user""" - resp = await self.client.post( - "/simcore-s3/files/metadata:search", - params={ - "user_id": str(user_id), - "startswith": "api/", - }, - ) - # FIXME: handle HTTPStatusError - resp.raise_for_status() - - files_metadata = FileMetaDataArray(__root__=resp.json()["data"] or []) - return files_metadata.__root__ - - async def search_files( - self, user_id: int, file_id: UUID - ) -> list[StorageFileMetaData]: - # NOTE: can NOT use /locations/0/files/metadata with uuid_filter=api/ because - # logic in storage 'wrongly' assumes that all data is associated to a project and - # here there is no project, so it would always returns an empty - resp = await self.client.post( - "/simcore-s3/files/metadata:search", - params={ - "user_id": str(user_id), - "startswith": f"api/{file_id}", - }, - ) - files_metadata = FileMetaDataArray(__root__=resp.json()["data"] or []) - return files_metadata.__root__ - - async def get_download_link( - self, user_id: int, file_id: UUID, file_name: str - ) -> str: - object_path = urllib.parse.quote_plus(f"api/{file_id}/{file_name}") - - resp = await self.client.get( - f"/locations/{self.SIMCORE_S3_ID}/files/{object_path}", - params={"user_id": str(user_id)}, - ) - - presigned_link = PresignedLink.parse_obj(resp.json()["data"]) - return presigned_link.link - - async def get_upload_links( - self, user_id: int, file_id: UUID, file_name: str - ) -> FileUploadSchema: - object_path = urllib.parse.quote_plus(f"api/{file_id}/{file_name}") - - resp = await self.client.put( - f"/locations/{self.SIMCORE_S3_ID}/files/{object_path}", - params={"user_id": user_id, "file_size": 0}, - ) - enveloped_data = Envelope[FileUploadSchema].parse_obj(resp.json()) - assert enveloped_data.data # nosec - return enveloped_data.data - - async def create_soft_link( - self, user_id: int, target_s3_path: str, as_file_id: UUID - ) -> File: - assert len(target_s3_path.split("/")) == 3 # nosec - - # define api-prefixed object-path for link - file_id = as_file_id - file_name = target_s3_path.split("/")[-1] - link_path = f"api/{file_id}/{file_name}" - - file_id = urllib.parse.quote_plus(target_s3_path) - - # ln makes links between files - # ln TARGET LINK_NAME - resp = await self.client.post( - f"/files/{file_id}:soft-copy", - params={"user_id": user_id}, - json={"link_id": link_path}, - ) - # FIXME: handle errors properly - resp.raise_for_status() - - # FIXME: was hanging when resp.join()["data"] -> None - stored_file_meta = StorageFileMetaData.parse_obj(resp.json()["data"]) - file_meta: File = to_file_api_model(stored_file_meta) - return file_meta - - -# MODULES APP SETUP ------------------------------------------------------------- - - -def setup(app: FastAPI, settings: StorageSettings) -> None: - if not settings: - settings = StorageSettings() - - setup_client_instance( - app, StorageApi, api_baseurl=settings.base_url, service_name="storage" - ) diff --git a/services/api-server/src/simcore_service_api_server/modules/webserver.py b/services/api-server/src/simcore_service_api_server/modules/webserver.py deleted file mode 100644 index c3cbe09742b..00000000000 --- a/services/api-server/src/simcore_service_api_server/modules/webserver.py +++ /dev/null @@ -1,262 +0,0 @@ -import base64 -import json -import logging -from collections import deque -from contextlib import suppress -from dataclasses import dataclass -from typing import Any, Optional -from uuid import UUID - -from cryptography import fernet -from fastapi import FastAPI, HTTPException -from httpx import AsyncClient, Response -from models_library.projects import ProjectID -from pydantic import ValidationError -from servicelib.aiohttp.long_running_tasks.server import TaskStatus -from starlette import status -from tenacity import TryAgain -from tenacity._asyncio import AsyncRetrying -from tenacity.after import after_log -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -from ..core.settings import WebServerSettings -from ..models.domain.projects import NewProjectIn, Project -from ..models.types import JSON, ListAnyDict -from ..utils.client_base import BaseServiceClientApi, setup_client_instance - -logger = logging.getLogger(__name__) - - -@dataclass -class AuthSession: - """ - - wrapper around thin-client to simplify webserver's API - - sets endspoint upon construction - - MIME type: application/json - - processes responses, returning data or raising formatted HTTP exception - - The lifetime of an AuthSession is ONE request. - - SEE services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py - """ - - client: AsyncClient # Its lifetime is attached to app - vtag: str - session_cookies: Optional[dict] = None - - @classmethod - def create(cls, app: FastAPI, session_cookies: dict): - return cls( - client=app.state.webserver_client, - vtag=app.state.settings.API_SERVER_WEBSERVER.WEBSERVER_VTAG, - session_cookies=session_cookies, - ) - - @classmethod - def _postprocess(cls, resp: Response) -> Optional[JSON]: - # enveloped answer - data, error = None, None - - if resp.status_code != status.HTTP_204_NO_CONTENT: - try: - body = resp.json() - data, error = body.get("data"), body.get("error") - except json.JSONDecodeError: - logger.warning( - "Failed to unenvelop webserver response %s", - f"{resp.text=}", - exc_info=True, - ) - - if resp.is_server_error: - logger.error( - "webserver error %s [%s]: %s", - f"{resp.status_code=}", - f"{resp.reason_phrase=}", - error, - ) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) - - if resp.is_client_error: - msg = error or resp.reason_phrase - raise HTTPException(resp.status_code, detail=msg) - - return data - - # OPERATIONS - # TODO: refactor and code below - # TODO: policy to retry if NetworkError/timeout? - # TODO: add ping to healthcheck - - async def get(self, path: str) -> Optional[JSON]: - url = path.lstrip("/") - try: - resp = await self.client.get(url, cookies=self.session_cookies) - except Exception as err: - # FIXME: error handling - logger.exception("Failed to get %s", url) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err - - return self._postprocess(resp) - - async def put(self, path: str, body: dict) -> Optional[JSON]: - url = path.lstrip("/") - try: - resp = await self.client.put(url, json=body, cookies=self.session_cookies) - except Exception as err: - logger.exception("Failed to put %s", url) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err - - return self._postprocess(resp) - - # PROJECTS resource --- - # TODO: error handling! - - async def create_project(self, project: NewProjectIn): - # POST /projects --> 202 - resp = await self.client.post( - "/projects", - params={"hidden": True}, - data=project.json( - by_alias=True, exclude={"state"} - ), ## FIXME: REEAAAALY HACKY! - cookies=self.session_cookies, - ) - data: Optional[JSON] = self._postprocess(resp) - assert data # nosec - assert isinstance(data, dict) # nosec - - # NOTE: /v0 is already included in the http client base_url - status_url = data["status_href"].lstrip(f"/{self.vtag}") - result_url = data["result_href"].lstrip(f"/{self.vtag}") - # GET task status now until done - async for attempt in AsyncRetrying( - wait=wait_fixed(0.5), - stop=stop_after_delay(60), - reraise=True, - before_sleep=before_sleep_log(logger, logging.INFO), - after=after_log(logger, log_level=logging.ERROR), - ): - with attempt: - data: Optional[JSON] = await self.get( - status_url, - ) - task_status = TaskStatus.parse_obj(data) - if not task_status.done: - raise TryAgain( - "Timed out creating project. TIP: Try again, or contact oSparc support if this is happening repeatedly" - ) - data: Optional[JSON] = await self.get( - f"{result_url}", - ) - return Project.parse_obj(data) - - async def get_project(self, project_id: UUID) -> Project: - resp = await self.client.get( - f"/projects/{project_id}", cookies=self.session_cookies - ) - - data: Optional[JSON] = self._postprocess(resp) - return Project.parse_obj(data) - - async def list_projects(self, solver_name: str) -> list[Project]: - # TODO: pagination? - resp = await self.client.get( - "/projects", - params={"type": "user", "show_hidden": True}, - cookies=self.session_cookies, - ) - - data: ListAnyDict = self._postprocess(resp) or [] - - # FIXME: move filter to webserver API (next PR) - projects: deque[Project] = deque() - for prj in data: - possible_job_name = prj.get("name", "") - if possible_job_name.startswith(solver_name): - try: - projects.append(Project.parse_obj(prj)) - except ValidationError as err: - logger.warning( - "Invalid prj %s [%s]: %s", prj.get("uuid"), solver_name, err - ) - - return list(projects) - - async def get_project_metadata_ports( - self, project_id: ProjectID - ) -> list[dict[str, Any]]: - """ - maps GET "/projects/{study_id}/metadata/ports", unenvelopes - and returns data - """ - resp = await self.client.get( - f"/projects/{project_id}/metadata/ports", - cookies=self.session_cookies, - ) - data = self._postprocess(resp) - assert data - assert isinstance(data, list) - return data - - -def _get_secret_key(settings: WebServerSettings): - secret_key_bytes = settings.WEBSERVER_SESSION_SECRET_KEY.get_secret_value().encode( - "utf-8" - ) - while len(secret_key_bytes) < 32: - secret_key_bytes += secret_key_bytes - secret_key = secret_key_bytes[:32] - - if isinstance(secret_key, str): - pass - elif isinstance(secret_key, (bytes, bytearray)): - secret_key = base64.urlsafe_b64encode(secret_key) - return secret_key - - -class WebserverApi(BaseServiceClientApi): - """Access to web-server API""" - - # def create_auth_session(self, session_cookies) -> AuthSession: - # """ Needed per request, so it can perform """ - # return AuthSession(client=self.client, vtag="v0", session_cookies=session_cookies) - - -# MODULES APP SETUP ------------------------------------------------------------- - - -def setup(app: FastAPI, settings: Optional[WebServerSettings] = None) -> None: - if not settings: - settings = WebServerSettings.create_from_envs() - - assert settings is not None # nosec - - setup_client_instance( - app, WebserverApi, api_baseurl=settings.base_url, service_name="webserver" - ) - - # TODO: old startup. need to integrat - # TODO: init client and then build sessions from client using depenencies - - def on_startup() -> None: - # normalize & encrypt - secret_key = settings.WEBSERVER_SESSION_SECRET_KEY.get_secret_value() - app.state.webserver_fernet = fernet.Fernet(secret_key) - - # init client - logger.debug("Setup webserver at %s...", settings.base_url) - - client = AsyncClient(base_url=settings.base_url) - app.state.webserver_client = client - - async def on_shutdown() -> None: - with suppress(AttributeError): - client: AsyncClient = app.state.webserver_client - await client.aclose() - del app.state.webserver_client - logger.debug("Webserver closed successfully") - - app.add_event_handler("startup", on_startup) - app.add_event_handler("shutdown", on_shutdown) diff --git a/services/api-server/src/simcore_service_api_server/repository/__init__.py b/services/api-server/src/simcore_service_api_server/repository/__init__.py new file mode 100644 index 00000000000..574fb4864a7 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/repository/__init__.py @@ -0,0 +1,5 @@ +# mypy: disable-error-code=truthy-function +from ._base import BaseRepository + +assert BaseRepository # nosec +__all__: tuple[str, ...] = ("BaseRepository",) diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/_base.py b/services/api-server/src/simcore_service_api_server/repository/_base.py similarity index 100% rename from services/catalog/src/simcore_service_catalog/db/repositories/_base.py rename to services/api-server/src/simcore_service_api_server/repository/_base.py diff --git a/services/api-server/src/simcore_service_api_server/repository/api_keys.py b/services/api-server/src/simcore_service_api_server/repository/api_keys.py new file mode 100644 index 00000000000..2ab34849c7c --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/repository/api_keys.py @@ -0,0 +1,51 @@ +import logging +from typing import NamedTuple + +import sqlalchemy as sa +from models_library.products import ProductName +from pydantic.types import PositiveInt +from simcore_postgres_database.models.api_keys import api_keys as auth_api_keys_table +from simcore_postgres_database.utils_repos import pass_or_acquire_connection +from sqlalchemy.ext.asyncio import AsyncConnection + +from ._base import BaseRepository + +_logger = logging.getLogger(__name__) + + +class UserAndProductTuple(NamedTuple): + user_id: PositiveInt + product_name: ProductName + + +class ApiKeysRepository(BaseRepository): + """Auth access""" + + async def get_user( + self, + connection: AsyncConnection | None = None, + *, + api_key: str, + api_secret: str + ) -> UserAndProductTuple | None: + + stmt = sa.select( + auth_api_keys_table.c.user_id, + auth_api_keys_table.c.product_name, + ).where( + ( + auth_api_keys_table.c.api_key == api_key + ) # NOTE: keep order, api_key is indexed + & ( + auth_api_keys_table.c.api_secret + == sa.func.crypt(api_secret, auth_api_keys_table.c.api_secret) + ) + ) + async with pass_or_acquire_connection(self.db_engine, connection) as conn: + result = await conn.execute(stmt) + row = result.one_or_none() + return ( + UserAndProductTuple(user_id=row.user_id, product_name=row.product_name) + if row + else None + ) diff --git a/services/api-server/src/simcore_service_api_server/repository/users.py b/services/api-server/src/simcore_service_api_server/repository/users.py new file mode 100644 index 00000000000..f09335ecdef --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/repository/users.py @@ -0,0 +1,30 @@ +import sqlalchemy as sa +from common_library.users_enums import UserStatus +from models_library.emails import LowerCaseEmailStr +from models_library.users import UserID +from pydantic import TypeAdapter +from simcore_postgres_database.models.users import users +from simcore_postgres_database.utils_repos import pass_or_acquire_connection +from sqlalchemy.ext.asyncio import AsyncConnection + +from ._base import BaseRepository + + +class UsersRepository(BaseRepository): + async def get_active_user_email( + self, + connection: AsyncConnection | None = None, + *, + user_id: UserID, + ) -> LowerCaseEmailStr | None: + async with pass_or_acquire_connection(self.db_engine, connection) as conn: + email = await conn.scalar( + sa.select(users.c.email).where( + (users.c.id == user_id) & (users.c.status == UserStatus.ACTIVE) + ) + ) + return ( + TypeAdapter(LowerCaseEmailStr).validate_python(email) + if email is not None + else None + ) diff --git a/services/api-server/src/simcore_service_api_server/services_http/__init__.py b/services/api-server/src/simcore_service_api_server/services_http/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/api-server/src/simcore_service_api_server/services_http/director_v2.py b/services/api-server/src/simcore_service_api_server/services_http/director_v2.py new file mode 100644 index 00000000000..614a77b5d9d --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/director_v2.py @@ -0,0 +1,161 @@ +import logging +from functools import partial +from uuid import UUID + +from fastapi import FastAPI +from models_library.api_schemas_directorv2.computations import ( + ComputationGet as DirectorV2ComputationGet, +) +from models_library.projects_nodes_io import NodeID +from models_library.projects_pipeline import ComputationTask +from models_library.projects_state import RunningState +from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, PositiveInt, TypeAdapter +from pydantic.config import JsonDict +from settings_library.tracing import TracingSettings +from starlette import status + +from ..core.settings import DirectorV2Settings +from ..exceptions.backend_errors import JobNotFoundError, LogFileNotFoundError +from ..exceptions.service_errors_utils import service_exception_mapper +from ..models.schemas.jobs import PercentageInt +from ..models.schemas.studies import JobLogsMap, LogLink +from ..utils.client_base import BaseServiceClientApi, setup_client_instance + +logger = logging.getLogger(__name__) + + +class ComputationTaskGet(ComputationTask): + url: AnyHttpUrl = Field( + ..., description="the link where to get the status of the task" + ) + stop_url: AnyHttpUrl | None = Field( + None, description="the link where to stop the task" + ) + + def guess_progress(self) -> PercentageInt: + # guess progress based on self.state + if self.state in [RunningState.SUCCESS, RunningState.FAILED]: + return 100 + return 0 + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + { + **ComputationTask.model_json_schema()["examples"][0], + "url": "https://link-to-stop-computation", + } + ] + } + ) + + model_config = ConfigDict( + json_schema_extra=_update_json_schema_extra, + ) + + +class TaskLogFileGet(BaseModel): + task_id: NodeID + download_link: AnyHttpUrl | None = Field( + None, description="Presigned link for log file or None if still not available" + ) + + +# API CLASS --------------------------------------------- + +_client_status_code_to_exception = partial( + service_exception_mapper, service_name="Director V2" +) + + +class DirectorV2Api(BaseServiceClientApi): + + @_client_status_code_to_exception( + http_status_map={status.HTTP_404_NOT_FOUND: JobNotFoundError} + ) + async def get_computation( + self, *, project_id: UUID, user_id: PositiveInt + ) -> ComputationTaskGet: + response = await self.client.get( + f"/v2/computations/{project_id}", + params={ + "user_id": user_id, + }, + ) + response.raise_for_status() + return ComputationTaskGet.model_validate( + DirectorV2ComputationGet.model_validate_json(response.text), + from_attributes=True, + ) + + @_client_status_code_to_exception( + http_status_map={status.HTTP_404_NOT_FOUND: JobNotFoundError} + ) + async def stop_computation(self, *, project_id: UUID, user_id: PositiveInt) -> None: + response = await self.client.post( + f"/v2/computations/{project_id}:stop", + json={ + "user_id": user_id, + }, + ) + response.raise_for_status() + + @_client_status_code_to_exception( + http_status_map={status.HTTP_404_NOT_FOUND: JobNotFoundError} + ) + async def delete_computation( + self, *, project_id: UUID, user_id: PositiveInt + ) -> None: + response = await self.client.request( + "DELETE", + f"/v2/computations/{project_id}", + json={ + "user_id": user_id, + "force": True, + }, + ) + response.raise_for_status() + + @_client_status_code_to_exception( + http_status_map={status.HTTP_404_NOT_FOUND: LogFileNotFoundError} + ) + async def get_computation_logs( + self, *, user_id: PositiveInt, project_id: UUID + ) -> JobLogsMap: + response = await self.client.get( + f"/v2/computations/{project_id}/tasks/-/logfile", + params={ + "user_id": user_id, + }, + ) + + # probably not found + response.raise_for_status() + + log_links: list[LogLink] = [ + LogLink(node_name=f"{r.task_id}", download_link=r.download_link) + for r in TypeAdapter(list[TaskLogFileGet]).validate_json( + response.text or "[]" + ) + if r.download_link + ] + + return JobLogsMap(log_links=log_links) + + +# MODULES APP SETUP ------------------------------------------------------------- + + +def setup( + app: FastAPI, settings: DirectorV2Settings, tracing_settings: TracingSettings | None +) -> None: + setup_client_instance( + app, + DirectorV2Api, + # WARNING: it has /v0 and /v2 prefixes + api_baseurl=settings.base_url, + service_name="director_v2", + tracing_settings=tracing_settings, + ) diff --git a/services/api-server/src/simcore_service_api_server/services_http/jobs.py b/services/api-server/src/simcore_service_api_server/services_http/jobs.py new file mode 100644 index 00000000000..ed2ef50d588 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/jobs.py @@ -0,0 +1,105 @@ +import logging +from typing import Annotated +from uuid import UUID + +from fastapi import Depends, HTTPException, Request, status +from models_library.api_schemas_webserver.projects import ProjectGet +from pydantic import HttpUrl, PositiveInt +from servicelib.logging_utils import log_context + +from ..api.dependencies.authentication import get_current_user_id +from ..api.dependencies.services import get_api_client +from ..api.dependencies.webserver_http import get_webserver_session +from ..models.schemas.jobs import ( + JobID, + JobMetadata, + JobMetadataUpdate, + JobPricingSpecification, + JobStatus, +) +from .director_v2 import DirectorV2Api +from .solver_job_models_converters import create_jobstatus_from_task +from .webserver import AuthSession + +_logger = logging.getLogger(__name__) + + +def raise_if_job_not_associated_with_solver( + expected_project_name: str, project: ProjectGet +) -> None: + if expected_project_name != project.name: + raise HTTPException( + status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Invalid input data for job {project.uuid}", + ) + + +async def start_project( + *, + request: Request, + job_id: JobID, + expected_job_name: str, + webserver_api: Annotated[AuthSession, Depends(get_webserver_session)], +) -> None: + if pricing_spec := JobPricingSpecification.create_from_headers(request.headers): + with log_context(_logger, logging.DEBUG, "Set pricing plan and unit"): + project: ProjectGet = await webserver_api.get_project(project_id=job_id) + raise_if_job_not_associated_with_solver(expected_job_name, project) + node_ids = list(project.workbench.keys()) + assert len(node_ids) == 1 # nosec + await webserver_api.connect_pricing_unit_to_project_node( + project_id=job_id, + node_id=UUID(node_ids[0]), + pricing_plan=pricing_spec.pricing_plan, + pricing_unit=pricing_spec.pricing_unit, + ) + with log_context(_logger, logging.DEBUG, "Starting job"): + await webserver_api.start_project(project_id=job_id) + + +async def stop_project( + *, + job_id: JobID, + user_id: Annotated[PositiveInt, Depends(get_current_user_id)], + director2_api: Annotated[DirectorV2Api, Depends(get_api_client(DirectorV2Api))], +) -> JobStatus: + await director2_api.stop_computation(project_id=job_id, user_id=user_id) + + task = await director2_api.get_computation(project_id=job_id, user_id=user_id) + job_status: JobStatus = create_jobstatus_from_task(task) + return job_status + + +async def get_custom_metadata( + *, + job_name: str, + job_id: JobID, + webserver_api: AuthSession, + self_url: HttpUrl, +): + assert job_name # nosec + project_metadata = await webserver_api.get_project_metadata(project_id=job_id) + return JobMetadata( + job_id=job_id, + metadata=project_metadata.custom, + url=self_url, + ) + + +async def replace_custom_metadata( + *, + job_name: str, + job_id: JobID, + update: JobMetadataUpdate, + webserver_api: AuthSession, + self_url: HttpUrl, +): + assert job_name # nosec + project_metadata = await webserver_api.update_project_metadata( + project_id=job_id, metadata=update.metadata + ) + return JobMetadata( + job_id=job_id, + metadata=project_metadata.custom, + url=self_url, + ) diff --git a/services/api-server/src/simcore_service_api_server/services_http/log_streaming.py b/services/api-server/src/simcore_service_api_server/services_http/log_streaming.py new file mode 100644 index 00000000000..1fcf65bdf42 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/log_streaming.py @@ -0,0 +1,148 @@ +import asyncio +import logging +from asyncio import Queue +from collections.abc import AsyncIterable, Iterator +from typing import Final + +from common_library.error_codes import create_error_code +from models_library.rabbitmq_messages import LoggerRabbitMessage +from models_library.users import UserID +from pydantic import NonNegativeInt +from servicelib.logging_errors import create_troubleshotting_log_kwargs +from servicelib.logging_utils import log_catch +from servicelib.rabbitmq import QueueName, RabbitMQClient + +from .._constants import MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE +from ..exceptions.backend_errors import BaseBackEndError +from ..exceptions.log_streaming_errors import ( + LogStreamerNotRegisteredError, + LogStreamerRegistrationConflictError, +) +from ..models.schemas.errors import ErrorGet +from ..models.schemas.jobs import JobID, JobLog +from .director_v2 import DirectorV2Api + +_logger = logging.getLogger(__name__) + +_NEW_LINE: Final[str] = "\n" + + +class LogDistributor: + def __init__(self, rabbitmq_client: RabbitMQClient): + self._rabbit_client = rabbitmq_client + self._log_streamers: dict[JobID, Queue[JobLog]] = {} + self._queue_name: QueueName + + async def setup(self): + self._queue_name, _ = await self._rabbit_client.subscribe( + LoggerRabbitMessage.get_channel_name(), + self._distribute_logs, + exclusive_queue=True, + topics=[], + ) + + async def teardown(self): + await self._rabbit_client.unsubscribe(self._queue_name) + + async def __aenter__(self): + await self.setup() + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.teardown() + + async def _distribute_logs(self, data: bytes): + with log_catch(_logger, reraise=False): + got = LoggerRabbitMessage.model_validate_json(data) + item = JobLog( + job_id=got.project_id, + node_id=got.node_id, + log_level=got.log_level, + messages=got.messages, + ) + queue = self._log_streamers.get(item.job_id) + if queue is None: + msg = f"Could not forward log because a logstreamer associated with job_id={item.job_id} was not registered" + raise LogStreamerNotRegisteredError(job_id=item.job_id, details=msg) + await queue.put(item) + return True + return False + + async def register(self, job_id: JobID, queue: Queue[JobLog]): + _logger.debug("Registering log streamer for job_id=%s", job_id) + if job_id in self._log_streamers: + raise LogStreamerRegistrationConflictError(job_id=job_id) + self._log_streamers[job_id] = queue + await self._rabbit_client.add_topics( + LoggerRabbitMessage.get_channel_name(), topics=[f"{job_id}.*"] + ) + + async def deregister(self, job_id: JobID): + _logger.debug("Deregistering log streamer for job_id=%s", job_id) + if job_id not in self._log_streamers: + msg = f"No stream was connected to {job_id}." + raise LogStreamerNotRegisteredError(details=msg, job_id=job_id) + await self._rabbit_client.remove_topics( + LoggerRabbitMessage.get_channel_name(), topics=[f"{job_id}.*"] + ) + self._log_streamers.pop(job_id) + + @property + def iter_log_queue_sizes(self) -> Iterator[tuple[JobID, int]]: + for k, v in self._log_streamers.items(): + yield k, v.qsize() + + +class LogStreamer: + def __init__( + self, + *, + user_id: UserID, + director2_api: DirectorV2Api, + job_id: JobID, + log_distributor: LogDistributor, + log_check_timeout: NonNegativeInt, + ): + self._user_id = user_id + self._director2_api = director2_api + self.queue: Queue[JobLog] = Queue() + self._job_id: JobID = job_id + self._log_distributor: LogDistributor = log_distributor + self._log_check_timeout: NonNegativeInt = log_check_timeout + + async def _project_done(self) -> bool: + task = await self._director2_api.get_computation( + project_id=self._job_id, user_id=self._user_id + ) + return task.stopped is not None + + async def log_generator(self) -> AsyncIterable[str]: + try: + done: bool = False + while not done: + try: + log: JobLog = await asyncio.wait_for( + self.queue.get(), timeout=self._log_check_timeout + ) + yield log.model_dump_json() + _NEW_LINE + except TimeoutError: + done = await self._project_done() + + except (BaseBackEndError, LogStreamerRegistrationConflictError) as exc: + error_msg = f"{exc}" + + _logger.info("%s: %s", exc.code, error_msg) + yield ErrorGet(errors=[error_msg]).model_dump_json() + _NEW_LINE + + except Exception as exc: # pylint: disable=W0718 + error_code = create_error_code(exc) + error_msg = MSG_INTERNAL_ERROR_USER_FRIENDLY_TEMPLATE + f" [{error_code}]" + + _logger.exception( + **create_troubleshotting_log_kwargs( + error_msg, + error=exc, + error_code=error_code, + ) + ) + yield ErrorGet(errors=[error_msg]).model_dump_json() + _NEW_LINE diff --git a/services/api-server/src/simcore_service_api_server/services_http/rabbitmq.py b/services/api-server/src/simcore_service_api_server/services_http/rabbitmq.py new file mode 100644 index 00000000000..3466403d590 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/rabbitmq.py @@ -0,0 +1,56 @@ +import logging + +from fastapi import FastAPI +from servicelib.rabbitmq import RabbitMQClient, wait_till_rabbitmq_responsive +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from settings_library.rabbit import RabbitSettings + +from ..api.dependencies.rabbitmq import get_rabbitmq_rpc_client +from ..core.health_checker import ApiServerHealthChecker +from ..services_http.log_streaming import LogDistributor +from ..services_rpc import resource_usage_tracker, wb_api_server + +_logger = logging.getLogger(__name__) + + +def setup_rabbitmq(app: FastAPI) -> None: + settings: RabbitSettings = app.state.settings.API_SERVER_RABBITMQ + app.state.rabbitmq_client = None + app.state.log_distributor = None + + async def _on_startup() -> None: + await wait_till_rabbitmq_responsive(settings.dsn) + + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="api_server", settings=settings + ) + app.state.rabbitmq_client = RabbitMQClient( + client_name="api_server", settings=settings + ) + app.state.log_distributor = LogDistributor(app.state.rabbitmq_client) + await app.state.log_distributor.setup() + app.state.health_checker = ApiServerHealthChecker( + log_distributor=app.state.log_distributor, + rabbit_client=app.state.rabbitmq_client, + timeout_seconds=app.state.settings.API_SERVER_HEALTH_CHECK_TASK_TIMEOUT_SECONDS, + allowed_health_check_failures=app.state.settings.API_SERVER_ALLOWED_HEALTH_CHECK_FAILURES, + ) + await app.state.health_checker.setup( + app.state.settings.API_SERVER_HEALTH_CHECK_TASK_PERIOD_SECONDS + ) + # setup rpc clients + resource_usage_tracker.setup(app, get_rabbitmq_rpc_client(app)) + wb_api_server.setup(app, get_rabbitmq_rpc_client(app)) + + async def _on_shutdown() -> None: + if app.state.health_checker: + await app.state.health_checker.teardown() + if app.state.log_distributor: + await app.state.log_distributor.teardown() + if app.state.rabbitmq_client: + await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_client: + await app.state.rabbitmq_rpc_client.close() + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/api-server/src/simcore_service_api_server/services_http/solver_job_models_converters.py b/services/api-server/src/simcore_service_api_server/services_http/solver_job_models_converters.py new file mode 100644 index 00000000000..3a6728f478a --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/solver_job_models_converters.py @@ -0,0 +1,239 @@ +""" +Helper functions to convert models used in +services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py +""" + +import uuid +from collections.abc import Callable +from datetime import UTC, datetime +from functools import lru_cache + +import arrow +from models_library.api_schemas_webserver.projects import ProjectCreateNew, ProjectGet +from models_library.api_schemas_webserver.projects_ui import StudyUI +from models_library.basic_types import KeyIDStr +from models_library.projects import Project +from models_library.projects_nodes import InputID +from pydantic import HttpUrl, TypeAdapter + +from ..models.domain.projects import InputTypes, Node, SimCoreFileLink +from ..models.schemas.files import File +from ..models.schemas.jobs import ( + ArgumentTypes, + Job, + JobInputs, + JobStatus, + PercentageInt, + get_outputs_url, + get_runner_url, + get_url, +) +from ..models.schemas.programs import Program +from ..models.schemas.solvers import Solver +from .director_v2 import ComputationTaskGet + +# UTILS ------ +_BASE_UUID = uuid.UUID("231e13db-6bc6-4f64-ba56-2ee2c73b9f09") + + +@lru_cache +def compose_uuid_from(*values) -> str: + composition = "/".join(map(str, values)) + new_uuid = uuid.uuid5(_BASE_UUID, composition) + return str(new_uuid) + + +def format_datetime(snapshot: datetime) -> str: + return "{}Z".format(snapshot.isoformat(timespec="milliseconds")) + + +def now_str() -> str: + # NOTE: backend MUST use UTC + return format_datetime(datetime.now(UTC)) + + +# CONVERTERS -------------- +# +# - creates a model in one API composing models in others +# + + +def create_node_inputs_from_job_inputs( + inputs: JobInputs, +) -> dict[InputID, InputTypes]: + # map Job inputs with solver inputs + # TODO: ArgumentType -> InputTypes dispatcher + + node_inputs: dict[InputID, InputTypes] = {} + for name, value in inputs.values.items(): + assert TypeAdapter(ArgumentTypes).validate_python(value) == value # type: ignore # nosec + assert TypeAdapter(KeyIDStr).validate_python(name) is not None # nosec + + if isinstance(value, File): + # FIXME: ensure this aligns with storage policy + node_inputs[KeyIDStr(name)] = SimCoreFileLink( + store=0, + path=f"api/{value.id}/{value.filename}", + label=value.filename, + eTag=value.e_tag, + ) + else: + node_inputs[KeyIDStr(name)] = value + + # TODO: validate Inputs?? + + return node_inputs + + +def create_job_inputs_from_node_inputs(inputs: dict[InputID, InputTypes]) -> JobInputs: + """Reverse from create_node_inputs_from_job_inputs + + raises ValidationError + """ + input_values: dict[str, ArgumentTypes] = {} + for name, value in inputs.items(): + assert TypeAdapter(InputID).validate_python(name) == name # nosec + assert TypeAdapter(InputTypes).validate_python(value) == value # nosec + + if isinstance(value, SimCoreFileLink): + # FIXME: ensure this aligns with storage policy + _api, file_id, filename = value.path.split("/") + assert _api == "api" # nosec + input_values[name] = File( + id=file_id, # type: ignore[arg-type] + filename=filename, + e_tag=value.e_tag, + ) + else: + # NOTE: JobInputs pydantic model will parse&validate these values + input_values[name] = value # type: ignore [assignment] + + return JobInputs(values=input_values) # raises ValidationError + + +def get_node_id(project_id, solver_id) -> str: + # By clumsy design, the webserver needs a global uuid, + # so we decieded to compose as this + return compose_uuid_from(project_id, solver_id) + + +def create_new_project_for_job( + *, + solver_or_program: Solver | Program, + job: Job, + inputs: JobInputs, + description: str | None = None, + project_name: str | None = None, +) -> ProjectCreateNew: + """ + Creates a project for a solver's job + + Returns model used in the body of create_project at the web-server API + + In reality, we also need solvers and inputs to produce + the project, but the name of the function is intended + to stress the one-to-one equivalence between a project + (model at web-server API) and a job (model at api-server API) + + + raises ValidationError + """ + project_id = job.id + solver_id = get_node_id(project_id, solver_or_program.id) + + # map Job inputs with solveri nputs + # TODO: ArgumentType -> InputTypes dispatcher and reversed + solver_inputs: dict[InputID, InputTypes] = create_node_inputs_from_job_inputs( + inputs + ) + + solver_service = Node( + key=solver_or_program.id, + version=solver_or_program.version, + label=solver_or_program.title, + inputs=solver_inputs, + inputs_units={}, + ) + + # Ensembles project model so it can be used as input for create_project + job_info = job.model_dump_json( + include={"id", "name", "inputs_checksum", "created_at"}, indent=2 + ) + + return ProjectCreateNew( + uuid=project_id, + name=project_name or job.name, + description=description + or f"Study associated to solver/study/program job:\n{job_info}", + thumbnail="https://via.placeholder.com/170x120.png", # type: ignore[arg-type] + workbench={solver_id: solver_service}, + ui=StudyUI( + workbench={ + f"{solver_id}": { # type: ignore[dict-item] + "position": { + "x": 633, + "y": 229, + }, + }, + }, + slideshow={}, + current_node_id=solver_id, # type: ignore[arg-type] + annotations={}, + ), + accessRights={}, # type: ignore[call-arg] # This MUST be called with alias + ) + + +def create_job_from_project( + *, + solver_or_program: Solver | Program, + project: ProjectGet | Project, + url_for: Callable[..., HttpUrl], +) -> Job: + """ + Given a project, creates a job + + - Complementary from create_project_from_job + - Assumes project created via solver's job + + raise ValidationError + """ + assert len(project.workbench) == 1 # nosec + + solver_node: Node = next(iter(project.workbench.values())) + job_inputs: JobInputs = create_job_inputs_from_node_inputs( + inputs=solver_node.inputs or {} + ) + + # create solver's job + solver_or_program_name = solver_or_program.resource_name + + job_id = project.uuid + + return Job( + id=job_id, + name=Job.compose_resource_name( + parent_name=solver_or_program_name, job_id=job_id + ), + inputs_checksum=job_inputs.compute_checksum(), + created_at=project.creation_date, # type: ignore[arg-type] + runner_name=solver_or_program_name, + url=get_url( + solver_or_program=solver_or_program, url_for=url_for, job_id=job_id + ), + runner_url=get_runner_url(solver_or_program=solver_or_program, url_for=url_for), + outputs_url=get_outputs_url( + solver_or_program=solver_or_program, url_for=url_for, job_id=job_id + ), + ) + + +def create_jobstatus_from_task(task: ComputationTaskGet) -> JobStatus: + return JobStatus( + job_id=task.id, + state=task.state, + progress=PercentageInt((task.pipeline_details.progress or 0) * 100.0), + submitted_at=task.submitted or arrow.utcnow().datetime, + started_at=task.started, + stopped_at=task.stopped, + ) diff --git a/services/api-server/src/simcore_service_api_server/services_http/solver_job_outputs.py b/services/api-server/src/simcore_service_api_server/services_http/solver_job_outputs.py new file mode 100644 index 00000000000..4554bc0ccc3 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/solver_job_outputs.py @@ -0,0 +1,53 @@ +import logging +from typing import Any, TypeAlias + +from models_library.projects import ProjectID, ProjectIDStr +from models_library.projects_nodes_io import BaseFileLink, NodeID, NodeIDStr +from pydantic import StrictBool, StrictFloat, StrictInt, TypeAdapter +from simcore_sdk import node_ports_v2 +from simcore_sdk.node_ports_v2 import DBManager, Nodeports +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..exceptions.backend_errors import SolverOutputNotFoundError + +log = logging.getLogger(__name__) + +# ResultsTypes are types used in the job outputs (see ArgumentType) +ResultsTypes: TypeAlias = ( + StrictFloat | StrictInt | StrictBool | BaseFileLink | str | list | None +) + + +async def get_solver_output_results( + user_id: int, project_uuid: ProjectID, node_uuid: NodeID, db_engine: AsyncEngine +) -> dict[str, ResultsTypes]: + """ + Wraps calls via node_ports to retrieve project's output + """ + + # get the DB engine + db_manager = DBManager(db_engine=db_engine) + + try: + solver: Nodeports = await node_ports_v2.ports( + user_id=user_id, + project_id=ProjectIDStr(f"{project_uuid}"), + node_uuid=NodeIDStr(f"{node_uuid}"), + db_manager=db_manager, + ) + solver_output_results: dict[str, Any] = {} + for port in (await solver.outputs).values(): + log.debug( + "Output %s [%s]: %s", + port.key, + port.property_type, + port.value, + ) + assert TypeAdapter(ResultsTypes).validate_python(port.value) == port.value # type: ignore # nosec + + solver_output_results[port.key] = port.value + + return solver_output_results + + except node_ports_v2.exceptions.NodeNotFound as err: + raise SolverOutputNotFoundError(project_uuid=project_uuid) from err diff --git a/services/api-server/src/simcore_service_api_server/services_http/storage.py b/services/api-server/src/simcore_service_api_server/services_http/storage.py new file mode 100644 index 00000000000..ae82609a58a --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/storage.py @@ -0,0 +1,249 @@ +import logging +import re +import urllib.parse +from functools import partial +from mimetypes import guess_type +from typing import Literal +from uuid import UUID + +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_storage.storage_schemas import ( + FileMetaDataArray, +) +from models_library.api_schemas_storage.storage_schemas import ( + FileMetaDataGet as StorageFileMetaData, +) +from models_library.api_schemas_storage.storage_schemas import ( + FileUploadSchema, + PresignedLink, +) +from models_library.basic_types import SHA256Str +from models_library.generics import Envelope +from models_library.rest_pagination import PageLimitInt, PageOffsetInt +from pydantic import AnyUrl +from settings_library.tracing import TracingSettings +from starlette.datastructures import URL + +from ..core.settings import StorageSettings +from ..exceptions.service_errors_utils import service_exception_mapper +from ..models.domain.files import File +from ..utils.client_base import BaseServiceClientApi, setup_client_instance + +_logger = logging.getLogger(__name__) + +_exception_mapper = partial(service_exception_mapper, service_name="Storage") + +AccessRight = Literal["read", "write"] + +_FILE_ID_PATTERN = re.compile(r"^api\/(?P[\w-]+)\/(?P.+)$") + + +def to_file_api_model(stored_file_meta: StorageFileMetaData) -> File: + # extracts fields from api/{file_id}/{filename} + match = _FILE_ID_PATTERN.match(stored_file_meta.file_id or "") + if not match: + msg = f"Invalid file_id {stored_file_meta.file_id} in file metadata" + raise ValueError(msg) + + file_id, filename = match.groups() + + return File( + id=file_id, # type: ignore + filename=filename, + content_type=guess_type(stored_file_meta.file_name)[0] + or "application/octet-stream", + e_tag=stored_file_meta.entity_tag, + checksum=stored_file_meta.sha256_checksum, + ) + + +class StorageApi(BaseServiceClientApi): + # + # All files created via the API are stored in simcore-s3 as objects with name pattern "api/{file_id}/{filename.ext}" + # + SIMCORE_S3_ID = 0 + + @_exception_mapper(http_status_map={}) + async def list_files( + self, + *, + user_id: int, + ) -> list[StorageFileMetaData]: + """Lists metadata of all s3 objects name as api/* from a given user""" + + # search_files_starting_with + response = await self.client.post( + "/simcore-s3/files/metadata:search", + params={ + "kind": "owned", + "user_id": str(user_id), + "startswith": "api/", + }, + ) + response.raise_for_status() + + files_metadata = ( + Envelope[FileMetaDataArray].model_validate_json(response.text).data + ) + files: list[StorageFileMetaData] = ( + [] if files_metadata is None else files_metadata.root + ) + return files + + @_exception_mapper(http_status_map={}) + async def search_owned_files( + self, + *, + user_id: int, + file_id: UUID | None, + sha256_checksum: SHA256Str | None = None, + limit: PageLimitInt | None = None, + offset: PageOffsetInt | None = None, + ) -> list[StorageFileMetaData]: + # NOTE: can NOT use /locations/0/files/metadata with uuid_filter=api/ because + # logic in storage 'wrongly' assumes that all data is associated to a project and + # here there is no project, so it would always returns an empty + response = await self.client.post( + "/simcore-s3/files/metadata:search", + params=jsonable_encoder( + { + "kind": "owned", + "user_id": f"{user_id}", + "startswith": "api/" if file_id is None else f"api/{file_id}", + "sha256_checksum": sha256_checksum, + "limit": limit, + "offset": offset, + }, + exclude_none=True, + ), + ) + response.raise_for_status() + + files_metadata = ( + Envelope[FileMetaDataArray].model_validate_json(response.text).data + ) + files: list[StorageFileMetaData] = ( + [] if files_metadata is None else files_metadata.root + ) + assert len(files) <= limit if limit else True # nosec + return files + + @_exception_mapper(http_status_map={}) + async def get_download_link( + self, *, user_id: int, file_id: UUID, file_name: str + ) -> AnyUrl: + object_path = urllib.parse.quote_plus(f"api/{file_id}/{file_name}") + + response = await self.client.get( + f"/locations/{self.SIMCORE_S3_ID}/files/{object_path}", + params={"user_id": str(user_id)}, + ) + response.raise_for_status() + + presigned_link: PresignedLink | None = ( + Envelope[PresignedLink].model_validate_json(response.text).data + ) + assert presigned_link is not None + link: AnyUrl = presigned_link.link + return link + + @_exception_mapper(http_status_map={}) + async def delete_file(self, *, user_id: int, quoted_storage_file_id: str) -> None: + response = await self.client.delete( + f"/locations/{self.SIMCORE_S3_ID}/files/{quoted_storage_file_id}", + params={"user_id": user_id}, + ) + response.raise_for_status() + + @_exception_mapper(http_status_map={}) + async def get_upload_links( + self, *, user_id: int, file_id: UUID, file_name: str + ) -> FileUploadSchema: + object_path = urllib.parse.quote_plus(f"api/{file_id}/{file_name}") + + # complete_upload_file + response = await self.client.put( + f"/locations/{self.SIMCORE_S3_ID}/files/{object_path}", + params={"user_id": user_id, "file_size": 0}, + ) + response.raise_for_status() + + enveloped_data = Envelope[FileUploadSchema].model_validate_json(response.text) + assert enveloped_data.data # nosec + return enveloped_data.data + + async def create_complete_upload_link( + self, *, file: File, query: dict[str, str] | None = None + ) -> URL: + url = URL( + f"{self.client.base_url}locations/{self.SIMCORE_S3_ID}/files/{file.quoted_storage_file_id}:complete" + ) + if query is not None: + url = url.include_query_params(**query) + return url + + async def create_abort_upload_link( + self, *, file: File, query: dict[str, str] | None = None + ) -> URL: + url = URL( + f"{self.client.base_url}locations/{self.SIMCORE_S3_ID}/files/{file.quoted_storage_file_id}:abort" + ) + if query is not None: + url = url.include_query_params(**query) + return url + + @_exception_mapper(http_status_map={}) + async def create_soft_link( + self, *, user_id: int, target_s3_path: str, as_file_id: UUID + ) -> File: + assert len(target_s3_path.split("/")) == 3 # nosec + + # define api-prefixed object-path for link + file_id: str = f"{as_file_id}" + file_name = target_s3_path.split("/")[-1] + link_path = f"api/{file_id}/{file_name}" + + file_id = urllib.parse.quote_plus(target_s3_path) + + # ln makes links between files + # ln TARGET LINK_NAME + response = await self.client.post( + f"/files/{file_id}:soft-copy", + params={"user_id": user_id}, + json={"link_id": link_path}, + ) + response.raise_for_status() + + stored_file_meta = ( + Envelope[StorageFileMetaData].model_validate_json(response.text).data + ) + assert stored_file_meta is not None + file_meta: File = to_file_api_model(stored_file_meta) + return file_meta + + +# MODULES APP SETUP ------------------------------------------------------------- + + +def setup( + app: FastAPI, settings: StorageSettings, tracing_settings: TracingSettings | None +) -> None: + if not settings: + settings = StorageSettings() + + setup_client_instance( + app, + StorageApi, + api_baseurl=settings.api_base_url, + service_name="storage", + tracing_settings=tracing_settings, + ) + + +__all__: tuple[str, ...] = ( + "StorageApi", + "StorageFileMetaData", + "setup", + "to_file_api_model", +) diff --git a/services/api-server/src/simcore_service_api_server/services_http/study_job_models_converters.py b/services/api-server/src/simcore_service_api_server/services_http/study_job_models_converters.py new file mode 100644 index 00000000000..99bb5a59ae9 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/study_job_models_converters.py @@ -0,0 +1,132 @@ +""" +Helper functions to convert models used in +services/api-server/src/simcore_service_api_server/api/routes/studies_jobs.py +""" + +from typing import Any, NamedTuple +from uuid import UUID + +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.api_schemas_webserver.projects_ports import ( + ProjectInputGet, + ProjectInputUpdate, +) +from models_library.projects import DateTimeStr +from models_library.projects_nodes import InputID +from models_library.projects_nodes_io import LinkToFileTypes, NodeID, SimcoreS3FileID +from pydantic import TypeAdapter + +from ..models.domain.files import File +from ..models.domain.projects import InputTypes, SimCoreFileLink +from ..models.schemas.jobs import Job, JobInputs, JobOutputs +from ..models.schemas.studies import Study, StudyID +from .storage import StorageApi, to_file_api_model + + +class ProjectInputs(NamedTuple): + inputs: list[ProjectInputUpdate] + file_inputs: dict[InputID, InputTypes] + + +def get_project_and_file_inputs_from_job_inputs( + project_inputs: dict[NodeID, ProjectInputGet], + file_inputs: dict[InputID, InputTypes], + job_inputs: JobInputs, +) -> ProjectInputs: + job_inputs_dict = job_inputs.values + + # TODO make sure all values are set at some point + + for name, value in job_inputs.values.items(): + if isinstance(value, File): + # FIXME: ensure this aligns with storage policy + file_inputs[InputID(name)] = SimCoreFileLink( + store=0, + path=SimcoreS3FileID(f"api/{value.id}/{value.filename}"), + label=value.filename, + eTag=value.e_tag, + ) + + new_inputs: list[ProjectInputUpdate] = [] + for node_id, node_dict in project_inputs.items(): + if node_dict.label in job_inputs_dict: + new_inputs.append( + ProjectInputUpdate(key=node_id, value=job_inputs_dict[node_dict.label]) + ) + + return ProjectInputs(new_inputs, file_inputs) + + +def create_job_from_study( + study_key: StudyID, + project: ProjectGet, + job_inputs: JobInputs, +) -> Job: + """ + Given a study, creates a job + + raise ValidationError + """ + + study_name = Study.compose_resource_name(f"{study_key}") + + job_name = Job.compose_resource_name(parent_name=study_name, job_id=project.uuid) + + return Job( + id=project.uuid, + name=job_name, + inputs_checksum=job_inputs.compute_checksum(), + created_at=DateTimeStr.to_datetime(project.creation_date), + runner_name=study_name, + url=None, + runner_url=None, + outputs_url=None, + ) + + +async def create_job_outputs_from_project_outputs( + job_id: StudyID, + project_outputs: dict[NodeID, dict[str, Any]], + user_id, + storage_client: StorageApi, +) -> JobOutputs: + """ + + Raises: + ValidationError: when on invalid project_outputs + + """ + results: dict[str, Any] = {} + + for node_dict in project_outputs.values(): + name = node_dict["label"] + value = node_dict["value"] + + if ( + value + and isinstance(value, dict) + and {"store", "path"}.issubset(value.keys()) + ): + assert ( # nosec + TypeAdapter(LinkToFileTypes).validate_python(value) is not None + ) + + path = value["path"] + file_id: UUID = File.create_id(*path.split("/")) + + if found := await storage_client.search_owned_files( + user_id=user_id, + file_id=file_id, + sha256_checksum=None, + ): + assert len(found) == 1 # nosec + results[name] = to_file_api_model(found[0]) + else: + api_file: File = await storage_client.create_soft_link( + user_id=user_id, target_s3_path=path, as_file_id=file_id + ) + results[name] = api_file + else: + results[name] = value + + return JobOutputs(job_id=job_id, results=results) diff --git a/services/api-server/src/simcore_service_api_server/services_http/webserver.py b/services/api-server/src/simcore_service_api_server/services_http/webserver.py new file mode 100644 index 00000000000..7751ffb382f --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_http/webserver.py @@ -0,0 +1,694 @@ +# pylint: disable=too-many-public-methods + +import logging +import urllib.parse +from collections.abc import Mapping +from dataclasses import dataclass +from functools import partial +from typing import Any +from uuid import UUID + +import httpx +from common_library.json_serialization import json_dumps +from cryptography import fernet +from fastapi import FastAPI, status +from models_library.api_schemas_api_server.pricing_plans import ServicePricingPlanGet +from models_library.api_schemas_long_running_tasks.tasks import TaskGet +from models_library.api_schemas_webserver.computations import ComputationStart +from models_library.api_schemas_webserver.projects import ( + ProjectCreateNew, + ProjectGet, + ProjectPatch, +) +from models_library.api_schemas_webserver.projects_metadata import ( + ProjectMetadataGet, + ProjectMetadataUpdate, +) +from models_library.api_schemas_webserver.projects_nodes import NodeOutputs +from models_library.api_schemas_webserver.projects_ports import ( + ProjectInputGet, + ProjectInputUpdate, +) +from models_library.api_schemas_webserver.resource_usage import PricingPlanGet +from models_library.api_schemas_webserver.users import MyProfileGet as WebProfileGet +from models_library.api_schemas_webserver.users import ( + MyProfilePatch as WebProfileUpdate, +) +from models_library.api_schemas_webserver.wallets import WalletGet +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rest_pagination import Page, PageLimitInt, PageOffsetInt +from models_library.utils.fastapi_encoders import jsonable_encoder +from pydantic import PositiveInt +from servicelib.aiohttp.long_running_tasks.server import TaskStatus +from servicelib.common_headers import ( + X_SIMCORE_PARENT_NODE_ID, + X_SIMCORE_PARENT_PROJECT_UUID, +) +from settings_library.tracing import TracingSettings +from tenacity import TryAgain +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from ..core.settings import WebServerSettings +from ..exceptions.backend_errors import ( + ClusterNotFoundError, + ConfigurationError, + ForbiddenWalletError, + JobNotFoundError, + ListJobsError, + PaymentRequiredError, + PricingPlanNotFoundError, + PricingUnitNotFoundError, + ProductPriceNotFoundError, + ProfileNotFoundError, + ProjectAlreadyStartedError, + ProjectMetadataNotFoundError, + ProjectPortsNotFoundError, + SolverOutputNotFoundError, + WalletNotFoundError, +) +from ..exceptions.service_errors_utils import ( + service_exception_handler, + service_exception_mapper, +) +from ..models.basic_types import VersionStr +from ..models.pagination import MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE +from ..models.schemas.jobs import MetaValueType +from ..models.schemas.model_adapter import ( + GetCreditPriceLegacy, + PricingUnitGetLegacy, + WalletGetWithAvailableCreditsLegacy, +) +from ..models.schemas.profiles import Profile, ProfileUpdate, UserRoleEnum +from ..models.schemas.solvers import SolverKeyId +from ..models.schemas.studies import StudyPort +from ..utils.client_base import BaseServiceClientApi, setup_client_instance + +_logger = logging.getLogger(__name__) + +_exception_mapper = partial(service_exception_mapper, service_name="Webserver") + +_JOB_STATUS_MAP = { + status.HTTP_402_PAYMENT_REQUIRED: PaymentRequiredError, + status.HTTP_404_NOT_FOUND: JobNotFoundError, +} + +_PROFILE_STATUS_MAP = {status.HTTP_404_NOT_FOUND: ProfileNotFoundError} + +_WALLET_STATUS_MAP = { + status.HTTP_404_NOT_FOUND: WalletNotFoundError, + status.HTTP_403_FORBIDDEN: ForbiddenWalletError, +} + + +def _get_lrt_urls(lrt_response: httpx.Response): + # WARNING: this function is patched in patch_lrt_response_urls fixture + data = Envelope[TaskGet].model_validate_json(lrt_response.text).data + assert data is not None # nosec + + return data.status_href, data.result_href + + +class WebserverApi(BaseServiceClientApi): + """Access to web-server API + + - BaseServiceClientApi: + - wraps a httpx client + - lifetime attached to app + - responsive tests (i.e. ping) to API in-place + + """ + + +class LongRunningTasksClient(BaseServiceClientApi): + "Client for requesting status and results of long running tasks" + + +@dataclass +class AuthSession: + """ + - wrapper around thin-client to simplify webserver's API + - sets endspoint upon construction + - MIME type: application/json + - processes responses, returning data or raising formatted HTTP exception + - The lifetime of an AuthSession is ONE request. + + SEE services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py + """ + + _api: WebserverApi + _long_running_task_client: LongRunningTasksClient + vtag: str + session_cookies: dict | None = None + + @classmethod + def create( + cls, + app: FastAPI, + session_cookies: dict, + product_extra_headers: Mapping[str, str], + ) -> "AuthSession": + api = WebserverApi.get_instance(app) + assert api # nosec + assert isinstance(api, WebserverApi) # nosec + + api.client.headers = product_extra_headers # type: ignore[assignment] + long_running_tasks_client = LongRunningTasksClient.get_instance(app=app) + + assert long_running_tasks_client # nosec + assert isinstance(long_running_tasks_client, LongRunningTasksClient) # nosec + + long_running_tasks_client.client.headers = product_extra_headers # type: ignore[assignment] + return cls( + _api=api, + _long_running_task_client=long_running_tasks_client, + vtag=app.state.settings.API_SERVER_WEBSERVER.WEBSERVER_VTAG, + session_cookies=session_cookies, + ) + + # OPERATIONS + + @property + def client(self): + return self._api.client + + @property + def long_running_task_client(self): + return self._long_running_task_client.client + + async def _page_projects( + self, + *, + limit: int, + offset: int, + show_hidden: bool, + search_by_project_name: str | None = None, + ) -> Page[ProjectGet]: + assert 1 <= limit <= MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE # nosec + assert offset >= 0 # nosec + + optional: dict[str, Any] = {} + if search_by_project_name is not None: + optional["filters"] = json_dumps( + {"search_by_project_name": search_by_project_name} + ) + + with service_exception_handler( + service_name="Webserver", + http_status_map={status.HTTP_404_NOT_FOUND: ListJobsError}, + rpc_exception_map={}, + ): + resp = await self.client.get( + "/projects", + params={ + "type": "user", + "show_hidden": show_hidden, + "limit": limit, + "offset": offset, + **optional, + }, + cookies=self.session_cookies, + ) + resp.raise_for_status() + + return Page[ProjectGet].model_validate_json(resp.text) + + async def _wait_for_long_running_task_results(self, lrt_response: httpx.Response): + status_url, result_url = _get_lrt_urls(lrt_response) + + # GET task status now until done + async for attempt in AsyncRetrying( + wait=wait_fixed(0.5), + stop=stop_after_delay(60), + reraise=True, + before_sleep=before_sleep_log(_logger, logging.INFO), + ): + with attempt: + get_response = await self.long_running_task_client.get( + url=status_url, cookies=self.session_cookies + ) + get_response.raise_for_status() + task_status = ( + Envelope[TaskStatus].model_validate_json(get_response.text).data + ) + assert task_status is not None # nosec + if not task_status.done: + msg = "Timed out creating project. TIP: Try again, or contact oSparc support if this is happening repeatedly" + raise TryAgain(msg) + + result_response = await self.long_running_task_client.get( + f"{result_url}", cookies=self.session_cookies + ) + result_response.raise_for_status() + return Envelope.model_validate_json(result_response.text).data + + # PROFILE -------------------------------------------------- + + @_exception_mapper(http_status_map=_PROFILE_STATUS_MAP) + async def get_me(self) -> Profile: + response = await self.client.get("/me", cookies=self.session_cookies) + response.raise_for_status() + + got: WebProfileGet | None = ( + Envelope[WebProfileGet].model_validate_json(response.text).data + ) + assert got is not None # nosec + + return Profile( + first_name=got.first_name, + last_name=got.last_name, + id=got.id, + login=got.login, + role=UserRoleEnum(got.role), + groups=got.groups.model_dump() if got.groups else None, # type: ignore + gravatar_id=got.gravatar_id, + ) + + @_exception_mapper(http_status_map=_PROFILE_STATUS_MAP) + async def update_me(self, *, profile_update: ProfileUpdate) -> Profile: + + update = WebProfileUpdate.model_construct( + _fields_set=profile_update.model_fields_set, + first_name=profile_update.first_name, + last_name=profile_update.last_name, + ) + + response = await self.client.patch( + "/me", + json=update.model_dump(exclude_unset=True), + cookies=self.session_cookies, + ) + response.raise_for_status() + profile: Profile = await self.get_me() + return profile + + # PROJECTS ------------------------------------------------- + + @_exception_mapper(http_status_map={}) + async def create_project( + self, + project: ProjectCreateNew, + *, + is_hidden: bool, + parent_project_uuid: ProjectID | None, + parent_node_id: NodeID | None, + ) -> ProjectGet: + # POST /projects --> 202 Accepted + query_params = {"hidden": is_hidden} + headers = { + X_SIMCORE_PARENT_PROJECT_UUID: parent_project_uuid, + X_SIMCORE_PARENT_NODE_ID: parent_node_id, + } + + response = await self.client.post( + "/projects", + params=query_params, + headers={k: f"{v}" for k, v in headers.items() if v is not None}, + json=jsonable_encoder(project, by_alias=True, exclude={"state"}), + cookies=self.session_cookies, + ) + response.raise_for_status() + result = await self._wait_for_long_running_task_results(response) + return ProjectGet.model_validate(result) + + @_exception_mapper(http_status_map=_JOB_STATUS_MAP) + async def clone_project( + self, + *, + project_id: UUID, + hidden: bool, + parent_project_uuid: ProjectID | None, + parent_node_id: NodeID | None, + ) -> ProjectGet: + # POST /projects --> 202 Accepted + query_params = {"from_study": project_id, "hidden": hidden} + _headers = { + X_SIMCORE_PARENT_PROJECT_UUID: parent_project_uuid, + X_SIMCORE_PARENT_NODE_ID: parent_node_id, + } + + response = await self.client.post( + "/projects", + cookies=self.session_cookies, + params=query_params, + headers={k: f"{v}" for k, v in _headers.items() if v is not None}, + ) + response.raise_for_status() + result = await self._wait_for_long_running_task_results(response) + return ProjectGet.model_validate(result) + + @_exception_mapper(http_status_map=_JOB_STATUS_MAP) + async def get_project(self, *, project_id: UUID) -> ProjectGet: + response = await self.client.get( + f"/projects/{project_id}", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = Envelope[ProjectGet].model_validate_json(response.text).data + assert data is not None # nosec + return data + + async def get_projects_w_solver_page( + self, *, solver_name: str, limit: PageLimitInt, offset: PageOffsetInt + ) -> Page[ProjectGet]: + assert not solver_name.endswith("/") # nosec + + return await self._page_projects( + limit=limit, + offset=offset, + show_hidden=True, + search_by_project_name=solver_name, + ) + + async def get_projects_page(self, *, limit: PageLimitInt, offset: PageOffsetInt): + return await self._page_projects( + limit=limit, + offset=offset, + show_hidden=False, + ) + + @_exception_mapper(http_status_map=_JOB_STATUS_MAP) + async def delete_project(self, *, project_id: ProjectID) -> None: + response = await self.client.delete( + f"/projects/{project_id}", + cookies=self.session_cookies, + ) + response.raise_for_status() + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: ProjectPortsNotFoundError} + ) + async def get_project_metadata_ports( + self, *, project_id: ProjectID + ) -> list[StudyPort]: + """ + maps GET "/projects/{study_id}/metadata/ports", unenvelopes + and returns data + """ + response = await self.client.get( + f"/projects/{project_id}/metadata/ports", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = Envelope[list[StudyPort]].model_validate_json(response.text).data + assert data is not None # nosec + assert isinstance(data, list) # nosec + return data + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: ProjectMetadataNotFoundError} + ) + async def get_project_metadata( + self, *, project_id: ProjectID + ) -> ProjectMetadataGet: + response = await self.client.get( + f"/projects/{project_id}/metadata", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = Envelope[ProjectMetadataGet].model_validate_json(response.text).data + assert data is not None # nosec + return data + + @_exception_mapper(http_status_map=_JOB_STATUS_MAP) + async def patch_project(self, *, project_id: UUID, patch_params: ProjectPatch): + response = await self.client.patch( + f"/projects/{project_id}", + cookies=self.session_cookies, + json=jsonable_encoder(patch_params, exclude_unset=True), + ) + response.raise_for_status() + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: ProjectMetadataNotFoundError} + ) + async def update_project_metadata( + self, *, project_id: ProjectID, metadata: dict[str, MetaValueType] + ) -> ProjectMetadataGet: + response = await self.client.patch( + f"/projects/{project_id}/metadata", + cookies=self.session_cookies, + json=jsonable_encoder(ProjectMetadataUpdate(custom=metadata)), + ) + response.raise_for_status() + data = Envelope[ProjectMetadataGet].model_validate_json(response.text).data + assert data is not None # nosec + return data + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: PricingUnitNotFoundError} + ) + async def get_project_node_pricing_unit( + self, *, project_id: UUID, node_id: UUID + ) -> PricingUnitGetLegacy: + response = await self.client.get( + f"/projects/{project_id}/nodes/{node_id}/pricing-unit", + cookies=self.session_cookies, + ) + + response.raise_for_status() + data = Envelope[PricingUnitGetLegacy].model_validate_json(response.text).data + assert data is not None # nosec + return data + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: PricingUnitNotFoundError} + ) + async def connect_pricing_unit_to_project_node( + self, + *, + project_id: UUID, + node_id: UUID, + pricing_plan: PositiveInt, + pricing_unit: PositiveInt, + ) -> None: + response = await self.client.put( + f"/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan}/pricing-unit/{pricing_unit}", + cookies=self.session_cookies, + ) + response.raise_for_status() + + @_exception_mapper( + http_status_map=_JOB_STATUS_MAP + | { + status.HTTP_409_CONFLICT: ProjectAlreadyStartedError, + status.HTTP_406_NOT_ACCEPTABLE: ClusterNotFoundError, + status.HTTP_422_UNPROCESSABLE_ENTITY: ConfigurationError, + } + ) + async def start_project( + self, + *, + project_id: UUID, + ) -> None: + body_input: dict[str, Any] = {} + + body: ComputationStart = ComputationStart(**body_input) + response = await self.client.post( + f"/computations/{project_id}:start", + cookies=self.session_cookies, + json=jsonable_encoder(body, exclude_unset=True, exclude_defaults=True), + ) + response.raise_for_status() + + @_exception_mapper(http_status_map={}) + async def update_project_inputs( + self, + *, + project_id: ProjectID, + new_inputs: list[ProjectInputUpdate], + ) -> dict[NodeID, ProjectInputGet]: + response = await self.client.patch( + f"/projects/{project_id}/inputs", + cookies=self.session_cookies, + json=jsonable_encoder(new_inputs), + ) + response.raise_for_status() + data: dict[NodeID, ProjectInputGet] | None = ( + Envelope[dict[NodeID, ProjectInputGet]] + .model_validate_json(response.text) + .data + ) + assert data is not None # nosec + return data + + @_exception_mapper(http_status_map={}) + async def get_project_inputs( + self, *, project_id: ProjectID + ) -> dict[NodeID, ProjectInputGet]: + response = await self.client.get( + f"/projects/{project_id}/inputs", + cookies=self.session_cookies, + ) + + response.raise_for_status() + + data: dict[NodeID, ProjectInputGet] | None = ( + Envelope[dict[NodeID, ProjectInputGet]] + .model_validate_json(response.text) + .data + ) + assert data is not None # nosec + return data + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: SolverOutputNotFoundError} + ) + async def get_project_outputs( + self, *, project_id: ProjectID + ) -> dict[NodeID, dict[str, Any]]: + response = await self.client.get( + f"/projects/{project_id}/outputs", + cookies=self.session_cookies, + ) + + response.raise_for_status() + + data: dict[NodeID, dict[str, Any]] | None = ( + Envelope[dict[NodeID, dict[str, Any]]] + .model_validate_json(response.text) + .data + ) + assert data is not None # nosec + return data + + @_exception_mapper(http_status_map={}) + async def update_node_outputs( + self, *, project_id: UUID, node_id: UUID, new_node_outputs: NodeOutputs + ) -> None: + response = await self.client.patch( + f"/projects/{project_id}/nodes/{node_id}/outputs", + cookies=self.session_cookies, + json=jsonable_encoder(new_node_outputs), + ) + response.raise_for_status() + + # WALLETS ------------------------------------------------- + + @_exception_mapper(http_status_map=_WALLET_STATUS_MAP) + async def get_default_wallet(self) -> WalletGetWithAvailableCreditsLegacy: + response = await self.client.get( + "/wallets/default", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = ( + Envelope[WalletGetWithAvailableCreditsLegacy] + .model_validate_json(response.text) + .data + ) + assert data is not None # nosec + return data + + @_exception_mapper(http_status_map=_WALLET_STATUS_MAP) + async def get_wallet( + self, *, wallet_id: int + ) -> WalletGetWithAvailableCreditsLegacy: + response = await self.client.get( + f"/wallets/{wallet_id}", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = ( + Envelope[WalletGetWithAvailableCreditsLegacy] + .model_validate_json(response.text) + .data + ) + assert data is not None # nosec + return data + + @_exception_mapper(http_status_map=_WALLET_STATUS_MAP) + async def get_project_wallet(self, *, project_id: ProjectID) -> WalletGet: + response = await self.client.get( + f"/projects/{project_id}/wallet", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = Envelope[WalletGet].model_validate_json(response.text).data + assert data is not None # nosec + return data + + # PRODUCTS ------------------------------------------------- + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: ProductPriceNotFoundError} + ) + async def get_product_price(self) -> GetCreditPriceLegacy: + response = await self.client.get( + "/credits-price", + cookies=self.session_cookies, + ) + response.raise_for_status() + data = Envelope[GetCreditPriceLegacy].model_validate_json(response.text).data + assert data is not None # nosec + return data + + # SERVICES ------------------------------------------------- + + @_exception_mapper( + http_status_map={status.HTTP_404_NOT_FOUND: PricingPlanNotFoundError} + ) + async def get_service_pricing_plan( + self, *, solver_key: SolverKeyId, version: VersionStr + ) -> ServicePricingPlanGet | None: + service_key = urllib.parse.quote_plus(solver_key) + + response = await self.client.get( + f"/catalog/services/{service_key}/{version}/pricing-plan", + cookies=self.session_cookies, + ) + response.raise_for_status() + pricing_plan_get = ( + Envelope[PricingPlanGet].model_validate_json(response.text).data + ) + if pricing_plan_get: + return ServicePricingPlanGet.model_construct( + pricing_plan_id=pricing_plan_get.pricing_plan_id, + display_name=pricing_plan_get.display_name, + description=pricing_plan_get.description, + classification=pricing_plan_get.classification, + created_at=pricing_plan_get.created_at, + pricing_plan_key=pricing_plan_get.pricing_plan_key, + pricing_units=pricing_plan_get.pricing_units, + ) + return None + + +# MODULES APP SETUP ------------------------------------------------------------- + + +def setup( + app: FastAPI, + webserver_settings: WebServerSettings, + tracing_settings: TracingSettings | None, +) -> None: + + setup_client_instance( + app, + WebserverApi, + api_baseurl=webserver_settings.api_base_url, + service_name="webserver", + tracing_settings=tracing_settings, + ) + setup_client_instance( + app, + LongRunningTasksClient, + api_baseurl="", + service_name="long_running_tasks_client", + tracing_settings=tracing_settings, + ) + + def _on_startup() -> None: + # normalize & encrypt + secret_key = webserver_settings.WEBSERVER_SESSION_SECRET_KEY.get_secret_value() + app.state.webserver_fernet = fernet.Fernet(secret_key) + + async def _on_shutdown() -> None: + _logger.debug("Webserver closed successfully") + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/api-server/src/simcore_service_api_server/services_rpc/__init__.py b/services/api-server/src/simcore_service_api_server/services_rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/api-server/src/simcore_service_api_server/services_rpc/catalog.py b/services/api-server/src/simcore_service_api_server/services_rpc/catalog.py new file mode 100644 index 00000000000..f8dbe035e6b --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_rpc/catalog.py @@ -0,0 +1,183 @@ +from dataclasses import dataclass +from functools import partial +from typing import Any + +from common_library.exclude import as_dict_exclude_none +from models_library.api_schemas_catalog.services import ( + LatestServiceGet, + ServiceGetV2, + ServiceListFilters, + ServiceSummary, +) +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.products import ProductName +from models_library.rest_pagination import ( + PageLimitInt, + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.services_history import ServiceRelease +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import ValidationError +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.catalog import services as catalog_rpc +from servicelib.rabbitmq.rpc_interfaces.catalog.errors import ( + CatalogForbiddenError, + CatalogItemNotFoundError, +) + +from ..exceptions.backend_errors import ( + InvalidInputError, + ProgramOrSolverOrStudyNotFoundError, + ServiceForbiddenAccessError, +) +from ..exceptions.service_errors_utils import service_exception_mapper + +_exception_mapper = partial(service_exception_mapper, service_name="CatalogService") + + +def _create_page_meta_info(page: Any) -> PageMetaInfoLimitOffset: + """Creates a PageMetaInfoLimitOffset from an RPC response page.""" + return PageMetaInfoLimitOffset( + limit=page.meta.limit, + offset=page.meta.offset, + total=page.meta.total, + count=page.meta.count, + ) + + +@dataclass(frozen=True, kw_only=True) +class CatalogService: + _rpc_client: RabbitMQRPCClient + user_id: UserID + product_name: ProductName + + async def list_latest_releases( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + filters: ServiceListFilters | None = None, + ) -> tuple[list[LatestServiceGet], PageMetaInfoLimitOffset]: + + pagination_kwargs = as_dict_exclude_none( + offset=pagination_offset, limit=pagination_limit + ) + + page = await catalog_rpc.list_services_paginated( + self._rpc_client, + product_name=self.product_name, + user_id=self.user_id, + filters=filters, + **pagination_kwargs, + ) + meta = _create_page_meta_info(page) + return page.data, meta + + async def list_release_history_latest_first( + self, + *, + filter_by_service_key: ServiceKey, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + ) -> tuple[list[ServiceRelease], PageMetaInfoLimitOffset]: + + pagination_kwargs = as_dict_exclude_none( + offset=pagination_offset, limit=pagination_limit + ) + + page = await catalog_rpc.list_my_service_history_latest_first( + self._rpc_client, + product_name=self.product_name, + user_id=self.user_id, + service_key=filter_by_service_key, + **pagination_kwargs, + ) + + meta = _create_page_meta_info(page) + return page.data, meta + + async def list_all_services_summaries( + self, + *, + pagination_offset: PageOffsetInt | None = None, + pagination_limit: PageLimitInt | None = None, + filters: ServiceListFilters | None = None, + ) -> tuple[list[ServiceSummary], PageMetaInfoLimitOffset]: + """Lists all services with pagination, including all versions of each service. + + Returns a lightweight summary view of services for better performance. + + Args: + pagination_offset: Number of items to skip + pagination_limit: Maximum number of items to return + filters: Optional filters to apply + + Returns: + Tuple containing list of service summaries and pagination metadata + """ + + pagination_kwargs = as_dict_exclude_none( + offset=pagination_offset, limit=pagination_limit + ) + + page = await catalog_rpc.list_all_services_summaries_paginated( + self._rpc_client, + product_name=self.product_name, + user_id=self.user_id, + filters=filters, + **pagination_kwargs, + ) + meta = _create_page_meta_info(page) + return page.data, meta + + @_exception_mapper( + rpc_exception_map={ + CatalogItemNotFoundError: ProgramOrSolverOrStudyNotFoundError, + CatalogForbiddenError: ServiceForbiddenAccessError, + ValidationError: InvalidInputError, + } + ) + async def get( + self, + *, + name: ServiceKey, + version: ServiceVersion, + ) -> ServiceGetV2: + + return await catalog_rpc.get_service( + self._rpc_client, + product_name=self.product_name, + user_id=self.user_id, + service_key=name, + service_version=version, + ) + + @_exception_mapper( + rpc_exception_map={ + CatalogItemNotFoundError: ProgramOrSolverOrStudyNotFoundError, + CatalogForbiddenError: ServiceForbiddenAccessError, + ValidationError: InvalidInputError, + } + ) + async def get_service_ports( + self, + *, + name: ServiceKey, + version: ServiceVersion, + ) -> list[ServicePortGet]: + """Gets service ports (inputs and outputs) for a specific service version + + Raises: + ProgramOrSolverOrStudyNotFoundError: service not found in catalog + ServiceForbiddenAccessError: no access rights to read this service + InvalidInputError: invalid input parameters + """ + return await catalog_rpc.get_service_ports( + self._rpc_client, + product_name=self.product_name, + user_id=self.user_id, + service_key=name, + service_version=version, + ) diff --git a/services/api-server/src/simcore_service_api_server/services_rpc/resource_usage_tracker.py b/services/api-server/src/simcore_service_api_server/services_rpc/resource_usage_tracker.py new file mode 100644 index 00000000000..82e3ea1d369 --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_rpc/resource_usage_tracker.py @@ -0,0 +1,62 @@ +from dataclasses import dataclass +from functools import partial + +from fastapi import FastAPI +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + LicensedItemCheckoutNotFoundError as _LicensedItemCheckoutNotFoundError, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.licensed_items_checkouts import ( + get_licensed_item_checkout as _get_licensed_item_checkout, +) + +from ..exceptions.backend_errors import LicensedItemCheckoutNotFoundError +from ..exceptions.service_errors_utils import service_exception_mapper +from ..models.schemas.model_adapter import LicensedItemCheckoutGet + +_exception_mapper = partial( + service_exception_mapper, service_name="ResourceUsageTracker" +) + + +@dataclass +class ResourceUsageTrackerClient(SingletonInAppStateMixin): + app_state_name = "resource_usage_tracker_rpc_client" + _client: RabbitMQRPCClient + + @_exception_mapper( + rpc_exception_map={ + _LicensedItemCheckoutNotFoundError: LicensedItemCheckoutNotFoundError + } + ) + async def get_licensed_item_checkout( + self, *, product_name: str, licensed_item_checkout_id: LicensedItemCheckoutID + ) -> LicensedItemCheckoutGet: + _licensed_item_checkout = await _get_licensed_item_checkout( + rabbitmq_rpc_client=self._client, + product_name=product_name, + licensed_item_checkout_id=licensed_item_checkout_id, + ) + return LicensedItemCheckoutGet( + licensed_item_checkout_id=_licensed_item_checkout.licensed_item_checkout_id, + licensed_item_id=_licensed_item_checkout.licensed_item_id, + key=_licensed_item_checkout.key, + version=_licensed_item_checkout.version, + wallet_id=_licensed_item_checkout.wallet_id, + user_id=_licensed_item_checkout.user_id, + product_name=_licensed_item_checkout.product_name, + started_at=_licensed_item_checkout.started_at, + stopped_at=_licensed_item_checkout.stopped_at, + num_of_seats=_licensed_item_checkout.num_of_seats, + ) + + +def setup(app: FastAPI, rabbitmq_rpc_client: RabbitMQRPCClient): + resource_usage_tracker_rpc_client = ResourceUsageTrackerClient( + _client=rabbitmq_rpc_client + ) + resource_usage_tracker_rpc_client.set_to_app_state(app=app) diff --git a/services/api-server/src/simcore_service_api_server/services_rpc/wb_api_server.py b/services/api-server/src/simcore_service_api_server/services_rpc/wb_api_server.py new file mode 100644 index 00000000000..f5f4534e71e --- /dev/null +++ b/services/api-server/src/simcore_service_api_server/services_rpc/wb_api_server.py @@ -0,0 +1,437 @@ +# pylint: disable=too-many-public-methods + +from dataclasses import dataclass +from functools import partial +from typing import cast + +from common_library.exclude import as_dict_exclude_none +from fastapi import FastAPI +from fastapi_pagination import create_page +from models_library.api_schemas_api_server.functions import ( + Function, + FunctionID, + FunctionInputs, + FunctionInputSchema, + FunctionJob, + FunctionJobCollection, + FunctionJobCollectionID, + FunctionJobCollectionsListFilters, + FunctionJobID, + FunctionOutputSchema, + RegisteredFunction, + RegisteredFunctionJob, + RegisteredFunctionJobCollection, +) +from models_library.api_schemas_webserver.licensed_items import LicensedItemRpcGetPage +from models_library.licenses import LicensedItemID +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.rest_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + PageLimitInt, + PageMetaInfoLimitOffset, + PageOffsetInt, +) +from models_library.rpc.webserver.projects import ( + ListProjectsMarkedAsJobRpcFilters, + MetadataFilterItem, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CanNotCheckoutNotEnoughAvailableSeatsError, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CanNotCheckoutServiceIsNotRunningError as _CanNotCheckoutServiceIsNotRunningError, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + LicensedItemCheckoutNotFoundError as _LicensedItemCheckoutNotFoundError, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + NotEnoughAvailableSeatsError, +) +from servicelib.rabbitmq.rpc_interfaces.webserver import projects as projects_rpc +from servicelib.rabbitmq.rpc_interfaces.webserver.functions import ( + functions_rpc_interface, +) +from servicelib.rabbitmq.rpc_interfaces.webserver.licenses.licensed_items import ( + checkout_licensed_item_for_wallet as _checkout_licensed_item_for_wallet, +) +from servicelib.rabbitmq.rpc_interfaces.webserver.licenses.licensed_items import ( + get_available_licensed_items_for_wallet as _get_available_licensed_items_for_wallet, +) +from servicelib.rabbitmq.rpc_interfaces.webserver.licenses.licensed_items import ( + get_licensed_items as _get_licensed_items, +) +from servicelib.rabbitmq.rpc_interfaces.webserver.licenses.licensed_items import ( + release_licensed_item_for_wallet as _release_licensed_item_for_wallet, +) +from simcore_service_api_server.models.basic_types import NameValueTuple + +from ..exceptions.backend_errors import ( + CanNotCheckoutServiceIsNotRunningError, + InsufficientNumberOfSeatsError, + LicensedItemCheckoutNotFoundError, +) +from ..exceptions.service_errors_utils import service_exception_mapper +from ..models.api_resources import RelativeResourceName +from ..models.pagination import Page, PaginationParams +from ..models.schemas.model_adapter import ( + LicensedItemCheckoutGet, + LicensedItemGet, + LicensedResource, +) + +_exception_mapper = partial(service_exception_mapper, service_name="WebApiServer") + + +def _create_licensed_items_get_page( + *, licensed_items_page: LicensedItemRpcGetPage, page_params: PaginationParams +) -> Page[LicensedItemGet]: + page = create_page( + [ + LicensedItemGet( + licensed_item_id=elm.licensed_item_id, + key=elm.key, + version=elm.version, + display_name=elm.display_name, + licensed_resource_type=elm.licensed_resource_type, + licensed_resources=[ + LicensedResource.model_validate(res.model_dump()) + for res in elm.licensed_resources + ], + pricing_plan_id=elm.pricing_plan_id, + is_hidden_on_market=elm.is_hidden_on_market, + created_at=elm.created_at, + modified_at=elm.modified_at, + ) + for elm in licensed_items_page.items + ], + total=licensed_items_page.total, + params=page_params, + ) + return cast(Page[LicensedItemGet], page) + + +@dataclass +class WbApiRpcClient(SingletonInAppStateMixin): + app_state_name = "wb_api_rpc_client" + _client: RabbitMQRPCClient + + @_exception_mapper(rpc_exception_map={}) + async def get_licensed_items( + self, *, product_name: ProductName, page_params: PaginationParams + ) -> Page[LicensedItemGet]: + licensed_items_page = await _get_licensed_items( + rabbitmq_rpc_client=self._client, + product_name=product_name, + offset=page_params.offset, + limit=page_params.limit, + ) + return _create_licensed_items_get_page( + licensed_items_page=licensed_items_page, page_params=page_params + ) + + @_exception_mapper(rpc_exception_map={}) + async def get_available_licensed_items_for_wallet( + self, + *, + product_name: ProductName, + wallet_id: WalletID, + user_id: UserID, + page_params: PaginationParams, + ) -> Page[LicensedItemGet]: + licensed_items_page = await _get_available_licensed_items_for_wallet( + rabbitmq_rpc_client=self._client, + product_name=product_name, + wallet_id=wallet_id, + user_id=user_id, + offset=page_params.offset, + limit=page_params.limit, + ) + return _create_licensed_items_get_page( + licensed_items_page=licensed_items_page, page_params=page_params + ) + + @_exception_mapper( + rpc_exception_map={ + NotEnoughAvailableSeatsError: InsufficientNumberOfSeatsError, + CanNotCheckoutNotEnoughAvailableSeatsError: InsufficientNumberOfSeatsError, + _CanNotCheckoutServiceIsNotRunningError: CanNotCheckoutServiceIsNotRunningError, + # NOTE: missing WalletAccessForbiddenError + } + ) + async def checkout_licensed_item_for_wallet( + self, + *, + product_name: ProductName, + user_id: UserID, + wallet_id: WalletID, + licensed_item_id: LicensedItemID, + num_of_seats: int, + service_run_id: ServiceRunID, + ) -> LicensedItemCheckoutGet: + licensed_item_checkout_get = await _checkout_licensed_item_for_wallet( + self._client, + product_name=product_name, + user_id=user_id, + wallet_id=wallet_id, + licensed_item_id=licensed_item_id, + num_of_seats=num_of_seats, + service_run_id=service_run_id, + ) + return LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_get.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_get.licensed_item_id, + key=licensed_item_checkout_get.key, + version=licensed_item_checkout_get.version, + wallet_id=licensed_item_checkout_get.wallet_id, + user_id=licensed_item_checkout_get.user_id, + product_name=licensed_item_checkout_get.product_name, + started_at=licensed_item_checkout_get.started_at, + stopped_at=licensed_item_checkout_get.stopped_at, + num_of_seats=licensed_item_checkout_get.num_of_seats, + ) + + @_exception_mapper( + rpc_exception_map={ + _LicensedItemCheckoutNotFoundError: LicensedItemCheckoutNotFoundError + } + ) + async def release_licensed_item_for_wallet( + self, + *, + product_name: ProductName, + user_id: UserID, + licensed_item_checkout_id: LicensedItemCheckoutID, + ) -> LicensedItemCheckoutGet: + licensed_item_checkout_get = await _release_licensed_item_for_wallet( + self._client, + product_name=product_name, + user_id=user_id, + licensed_item_checkout_id=licensed_item_checkout_id, + ) + return LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_get.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_get.licensed_item_id, + key=licensed_item_checkout_get.key, + version=licensed_item_checkout_get.version, + wallet_id=licensed_item_checkout_get.wallet_id, + user_id=licensed_item_checkout_get.user_id, + product_name=licensed_item_checkout_get.product_name, + started_at=licensed_item_checkout_get.started_at, + stopped_at=licensed_item_checkout_get.stopped_at, + num_of_seats=licensed_item_checkout_get.num_of_seats, + ) + + async def mark_project_as_job( + self, + product_name: ProductName, + user_id: UserID, + project_uuid: ProjectID, + job_parent_resource_name: RelativeResourceName, + ): + await projects_rpc.mark_project_as_job( + rpc_client=self._client, + product_name=product_name, + user_id=user_id, + project_uuid=project_uuid, + job_parent_resource_name=job_parent_resource_name, + ) + + async def list_projects_marked_as_jobs( + self, + *, + product_name: ProductName, + user_id: UserID, + pagination_offset: int = 0, + pagination_limit: int = 50, + filter_by_job_parent_resource_name_prefix: str | None, + filter_any_custom_metadata: list[NameValueTuple] | None, + ): + pagination_kwargs = as_dict_exclude_none( + offset=pagination_offset, limit=pagination_limit + ) + + filters = ListProjectsMarkedAsJobRpcFilters( + job_parent_resource_name_prefix=filter_by_job_parent_resource_name_prefix, + any_custom_metadata=( + [ + MetadataFilterItem(name=name, pattern=pattern) + for name, pattern in filter_any_custom_metadata + ] + if filter_any_custom_metadata + else None + ), + ) + + return await projects_rpc.list_projects_marked_as_jobs( + rpc_client=self._client, + product_name=product_name, + user_id=user_id, + filters=filters, + **pagination_kwargs, + ) + + async def register_function(self, *, function: Function) -> RegisteredFunction: + return await functions_rpc_interface.register_function( + self._client, + function=function, + ) + + async def get_function(self, *, function_id: FunctionID) -> RegisteredFunction: + return await functions_rpc_interface.get_function( + self._client, + function_id=function_id, + ) + + async def delete_function(self, *, function_id: FunctionID) -> None: + return await functions_rpc_interface.delete_function( + self._client, + function_id=function_id, + ) + + async def list_functions( + self, + *, + pagination_offset: PageOffsetInt = 0, + pagination_limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + ) -> tuple[list[RegisteredFunction], PageMetaInfoLimitOffset]: + + return await functions_rpc_interface.list_functions( + self._client, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + ) + + async def list_function_jobs( + self, + *, + pagination_offset: PageOffsetInt = 0, + pagination_limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + filter_by_function_id: FunctionID | None = None, + ) -> tuple[list[RegisteredFunctionJob], PageMetaInfoLimitOffset]: + return await functions_rpc_interface.list_function_jobs( + self._client, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filter_by_function_id=filter_by_function_id, + ) + + async def list_function_job_collections( + self, + *, + pagination_offset: PageOffsetInt = 0, + pagination_limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + filters: FunctionJobCollectionsListFilters | None = None, + ) -> tuple[list[RegisteredFunctionJobCollection], PageMetaInfoLimitOffset]: + return await functions_rpc_interface.list_function_job_collections( + self._client, + pagination_offset=pagination_offset, + pagination_limit=pagination_limit, + filters=filters, + ) + + async def run_function( + self, *, function_id: FunctionID, inputs: FunctionInputs + ) -> RegisteredFunctionJob: + return await functions_rpc_interface.run_function( + self._client, + function_id=function_id, + inputs=inputs, + ) + + async def get_function_job( + self, *, function_job_id: FunctionJobID + ) -> RegisteredFunctionJob: + return await functions_rpc_interface.get_function_job( + self._client, + function_job_id=function_job_id, + ) + + async def update_function_title( + self, *, function_id: FunctionID, title: str + ) -> RegisteredFunction: + return await functions_rpc_interface.update_function_title( + self._client, + function_id=function_id, + title=title, + ) + + async def update_function_description( + self, *, function_id: FunctionID, description: str + ) -> RegisteredFunction: + return await functions_rpc_interface.update_function_description( + self._client, + function_id=function_id, + description=description, + ) + + async def delete_function_job(self, *, function_job_id: FunctionJobID) -> None: + return await functions_rpc_interface.delete_function_job( + self._client, + function_job_id=function_job_id, + ) + + async def register_function_job( + self, *, function_job: FunctionJob + ) -> RegisteredFunctionJob: + return await functions_rpc_interface.register_function_job( + self._client, + function_job=function_job, + ) + + async def get_function_input_schema( + self, *, function_id: FunctionID + ) -> FunctionInputSchema: + return await functions_rpc_interface.get_function_input_schema( + self._client, + function_id=function_id, + ) + + async def get_function_output_schema( + self, *, function_id: FunctionID + ) -> FunctionOutputSchema: + return await functions_rpc_interface.get_function_output_schema( + self._client, + function_id=function_id, + ) + + async def find_cached_function_job( + self, *, function_id: FunctionID, inputs: FunctionInputs + ) -> RegisteredFunctionJob | None: + return await functions_rpc_interface.find_cached_function_job( + self._client, function_id=function_id, inputs=inputs + ) + + async def get_function_job_collection( + self, *, function_job_collection_id: FunctionJobCollectionID + ) -> RegisteredFunctionJobCollection: + return await functions_rpc_interface.get_function_job_collection( + self._client, function_job_collection_id=function_job_collection_id + ) + + async def register_function_job_collection( + self, *, function_job_collection: FunctionJobCollection + ) -> RegisteredFunctionJobCollection: + return await functions_rpc_interface.register_function_job_collection( + self._client, function_job_collection=function_job_collection + ) + + async def delete_function_job_collection( + self, *, function_job_collection_id: FunctionJobCollectionID + ) -> None: + return await functions_rpc_interface.delete_function_job_collection( + self._client, function_job_collection_id=function_job_collection_id + ) + + +def setup(app: FastAPI, rabbitmq_rmp_client: RabbitMQRPCClient): + wb_api_rpc_client = WbApiRpcClient(_client=rabbitmq_rmp_client) + wb_api_rpc_client.set_to_app_state(app=app) diff --git a/services/api-server/src/simcore_service_api_server/utils/app_data.py b/services/api-server/src/simcore_service_api_server/utils/app_data.py index 8a0e3a0a1da..6f1a5588e5c 100644 --- a/services/api-server/src/simcore_service_api_server/utils/app_data.py +++ b/services/api-server/src/simcore_service_api_server/utils/app_data.py @@ -6,19 +6,21 @@ class AppDataMixin: """ - appdata preserves a single instance of the data within an app context + appdata is a unique entry of app's state This mixin adds a mechanism to reliably create, get and delete instances of the derived class """ + state_attr_name: str | None = None + @classmethod def create_once(cls, app: FastAPI, **data): - """Creates a single instance in app""" + """Creates a single instance in app context""" obj = cls.get_instance(app) - if not obj: - assert issubclass(cls, AppDataMixin), "AppDataMixin must be inherited!" + if obj is None: + assert issubclass(cls, AppDataMixin) # nosec cls.state_attr_name = f"unique_{cls.__name__.lower()}" # creates dataclass instance @@ -32,20 +34,19 @@ def create_once(cls, app: FastAPI, **data): @classmethod def get_instance(cls, app: FastAPI): """Gets single instance in app if any, otherwise returns None""" - assert issubclass(cls, AppDataMixin), "AppDataMixin must be inherited!" + assert issubclass(cls, AppDataMixin) # nosec - try: - obj = getattr(app.state, cls.state_attr_name) - except AttributeError: - # not in app.state or state_attr_name undefined + if cls.state_attr_name is None: return None - return obj + assert isinstance(cls.state_attr_name, str) # nosec + + return getattr(app.state, cls.state_attr_name, None) @classmethod def pop_instance(cls, app: FastAPI): - assert issubclass(cls, AppDataMixin), "AppDataMixin must be inherited!" + assert issubclass(cls, AppDataMixin) # nosec obj = cls.get_instance(app) - if obj: + if obj and cls.state_attr_name: delattr(app.state, cls.state_attr_name) return obj diff --git a/services/api-server/src/simcore_service_api_server/utils/client_base.py b/services/api-server/src/simcore_service_api_server/utils/client_base.py index 8e591c8ed39..3cc35a74bb6 100644 --- a/services/api-server/src/simcore_service_api_server/utils/client_base.py +++ b/services/api-server/src/simcore_service_api_server/utils/client_base.py @@ -1,13 +1,15 @@ import logging from dataclasses import dataclass -from typing import Optional import httpx from fastapi import FastAPI +from httpx import AsyncClient +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from settings_library.tracing import TracingSettings from .app_data import AppDataMixin -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) @dataclass @@ -29,8 +31,7 @@ async def is_responsive(self) -> bool: resp = await self.client.get(self.health_check_path, timeout=1) resp.raise_for_status() return True - except (httpx.HTTPStatusError, httpx.RequestError) as err: - log.error("%s not responsive: %s", self.service_name, err) + except (httpx.HTTPStatusError, httpx.RequestError): return False ping = is_responsive # alias @@ -44,22 +45,29 @@ def setup_client_instance( api_cls: type[BaseServiceClientApi], api_baseurl, service_name: str, - **extra_fields + tracing_settings: TracingSettings | None, + **extra_fields, ) -> None: """Helper to add init/cleanup of ServiceClientApi instances in the app lifespam""" assert issubclass(api_cls, BaseServiceClientApi) # nosec + # NOTE: this term is mocked in tests. If you need to modify pay attention to the mock + client = AsyncClient(base_url=api_baseurl) + if tracing_settings: + setup_httpx_client_tracing(client) + # events def _create_instance() -> None: + _logger.debug("Creating %s for %s", f"{type(client)=}", f"{api_baseurl=}") api_cls.create_once( app, - client=httpx.AsyncClient(base_url=api_baseurl), + client=client, service_name=service_name, - **extra_fields + **extra_fields, ) async def _cleanup_instance() -> None: - api_obj: Optional[BaseServiceClientApi] = api_cls.pop_instance(app) + api_obj: BaseServiceClientApi | None = api_cls.pop_instance(app) if api_obj: await api_obj.client.aclose() diff --git a/services/api-server/src/simcore_service_api_server/utils/client_decorators.py b/services/api-server/src/simcore_service_api_server/utils/client_decorators.py deleted file mode 100644 index ad6c16b1b38..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/client_decorators.py +++ /dev/null @@ -1,91 +0,0 @@ -""" Collection of decorators for httpx request functions - - Each decorator implements a specific feature on the request workflow: - - retrial - - error handling - - transform/compose/merge responses from different internal APIs - - TODO: circuit breaker? - - TODO: diagnostic tracker? - - TODO: cache? -""" - -import functools -import logging -from typing import Callable, Union - -import httpx -from fastapi import HTTPException -from starlette import status -from tenacity import ( - before_sleep_log, - retry, - retry_if_exception_type, - stop_after_attempt, - wait_fixed, -) - -from ..models.types import JSON - - -def handle_retry(logger: logging.Logger): - """ - Retry policy after connection timeout or a network error - - SEE https://www.python-httpx.org/exceptions/ - """ - return retry( - retry=retry_if_exception_type((httpx.TimeoutException, httpx.NetworkError)), - wait=wait_fixed(2), - stop=stop_after_attempt(3), - reraise=True, - before_sleep=before_sleep_log(logger, logging.DEBUG), - ) - - -def handle_errors( - service_name: str, logger: logging.Logger, *, return_json: bool = True -): - """ - Handles different types of errors and transform them into error reponses - - - httpx errors -> logged + respond raising HTTP_503_SERVICE_UNAVAILABLE - - response client error -> forward response raising status error - - response server error -> logged + responds raising HTTP_503_SERVICE_UNAVAILABLE - - ok -> json() - """ - - def decorator_func(request_func: Callable): - @functools.wraps(request_func) - async def wrapper_func(*args, **kwargs) -> Union[JSON, httpx.Response]: - try: - # TODO: assert signature!? - resp: httpx.Response = await request_func(*args, **kwargs) - - except httpx.RequestError as err: - logger.error( - "Failed request %s(%s, %s)", request_func.__name__, args, kwargs - ) - raise HTTPException( - status.HTTP_503_SERVICE_UNAVAILABLE, - detail=f"{service_name} is not responsive", - ) from err - - # status response errors - if httpx.codes.is_client_error(resp.status_code): - raise HTTPException(resp.status_code, detail=resp.reason_phrase) - - if httpx.codes.is_server_error(resp.status_code): # i.e. 5XX error - logger.error( - "%s service error %d [%s]: %s", - service_name, - resp.reason_phrase, - resp.status_code, - resp.text, - ) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) - - return resp.json() if return_json else resp - - return wrapper_func - - return decorator_func diff --git a/services/api-server/src/simcore_service_api_server/utils/hash.py b/services/api-server/src/simcore_service_api_server/utils/hash.py deleted file mode 100644 index 8037b3e866f..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/hash.py +++ /dev/null @@ -1,33 +0,0 @@ -""" - Helpers functions around https://docs.python.org/3/library/hashlib.html?highlight=hashlib - -""" -import hashlib - -CHUNK_4KB = 4 * 1024 # 4K blocks - - -async def create_md5_checksum(async_stream, *, chunk_size=CHUNK_4KB) -> str: - """ - Usage: - import aiofiles - - async with aiofiles.open(path, mode="rb") as file: - md5check = await create_md5_checksum(file) - - SEE https://ant.apache.org/manual/Tasks/checksum.html - WARNING: bandit reports the use of insecure MD2, MD4, MD5, or SHA1 hash function. - """ - md5_hash = hashlib.md5() # nosec - return await _eval_hash_async(async_stream, md5_hash, chunk_size) - - -async def _eval_hash_async(async_stream, hasher, chunk_size) -> str: - more_chunk = True - while more_chunk: - chunk = await async_stream.read(chunk_size) - more_chunk = len(chunk) == chunk_size - - hasher.update(chunk) - digest = hasher.hexdigest() - return digest diff --git a/services/api-server/src/simcore_service_api_server/utils/serialization.py b/services/api-server/src/simcore_service_api_server/utils/serialization.py deleted file mode 100644 index a08fe8bfac3..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/serialization.py +++ /dev/null @@ -1,22 +0,0 @@ -import json -from typing import Any, Callable, Optional - -import orjson - - -def json_dumps( - v, *, default: Optional[Callable[[Any], Any]] = None, indent: Optional[int] = None -) -> str: - # SEE https://github.com/ijl/orjson - # - orjson.dumps returns *bytes*, to match standard json.dumps we need to decode - - # Cannot use anymore human readable prints like ``print(model.json(indent=2))`` - # because it does not include indent option. This is very convenient for debugging - # so if added, it switches to json - if indent: - return json.dumps(v, default=default, indent=indent) - - return orjson.dumps(v, default=default).decode() - - -json_loads = orjson.loads diff --git a/services/api-server/src/simcore_service_api_server/utils/solver_job_models_converters.py b/services/api-server/src/simcore_service_api_server/utils/solver_job_models_converters.py deleted file mode 100644 index 5e079bb6804..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/solver_job_models_converters.py +++ /dev/null @@ -1,266 +0,0 @@ -""" - Helper functions to convert models used in - services/api-server/src/simcore_service_api_server/api/routes/solvers_jobs.py -""" -import urllib.parse -import uuid -from datetime import datetime -from functools import lru_cache -from typing import Callable, Optional - -from models_library.projects_nodes import InputID, InputTypes - -from ..models.domain.projects import ( - InputTypes, - NewProjectIn, - Node, - Project, - SimCoreFileLink, - StudyUI, -) -from ..models.schemas.files import File -from ..models.schemas.jobs import ArgumentType, Job, JobInputs, JobStatus, TaskStates -from ..models.schemas.solvers import Solver, SolverKeyId, VersionStr -from ..modules.director_v2 import ComputationTaskGet -from .typing_extra import get_types - -# UTILS ------ -_BASE_UUID = uuid.UUID("231e13db-6bc6-4f64-ba56-2ee2c73b9f09") - - -@lru_cache() -def compose_uuid_from(*values) -> str: - composition = "/".join(map(str, values)) - new_uuid = uuid.uuid5(_BASE_UUID, composition) - return str(new_uuid) - - -def format_datetime(snapshot: datetime) -> str: - return "{}Z".format(snapshot.isoformat(timespec="milliseconds")) - - -def now_str() -> str: - # NOTE: backend MUST use UTC - return format_datetime(datetime.utcnow()) - - -# CONVERTERS -------------- -# -# - creates a model in one API composing models in others -# - - -def create_node_inputs_from_job_inputs(inputs: JobInputs) -> dict[InputID, InputTypes]: - - # map Job inputs with solver inputs - # TODO: ArgumentType -> InputTypes dispatcher - - node_inputs: dict[InputID, InputTypes] = {} - for name, value in inputs.values.items(): - - assert isinstance(value, get_types(ArgumentType)) # nosec - - if isinstance(value, File): - # FIXME: ensure this aligns with storage policy - node_inputs[name] = SimCoreFileLink( - store=0, - path=f"api/{value.id}/{value.filename}", - label=value.filename, - eTag=value.checksum, - ) - else: - node_inputs[name] = value - - # TODO: validate Inputs?? - - return node_inputs - - -def create_job_inputs_from_node_inputs(inputs: dict[InputID, InputTypes]) -> JobInputs: - """Reverse from create_node_inputs_from_job_inputs - - raises ValidationError - """ - input_values: dict[str, ArgumentType] = {} - for name, value in inputs.items(): - - assert isinstance(name, get_types(InputID)) # nosec - assert isinstance(value, get_types(InputTypes)) # nosec - - if isinstance(value, SimCoreFileLink): - # FIXME: ensure this aligns with storage policy - _api, file_id, filename = value.path.split("/") - assert _api == "api" # nosec - input_values[name] = File( - id=file_id, - filename=filename, - checksum=value.e_tag, - ) - else: - input_values[name] = value - - job_inputs = JobInputs(values=input_values) # raises ValidationError - return job_inputs - - -def get_node_id(project_id, solver_id) -> str: - # By clumsy design, the webserver needs a global uuid, - # so we decieded to compose as this - return compose_uuid_from(project_id, solver_id) - - -def create_new_project_for_job( - solver: Solver, job: Job, inputs: JobInputs -) -> NewProjectIn: - """ - Creates a project for a solver's job - - Returns model used in the body of create_project at the web-server API - - In reality, we also need solvers and inputs to produce - the project, but the name of the function is intended - to stress the one-to-one equivalence between a project - (model at web-server API) and a job (model at api-server API) - - - raises ValidationError - """ - project_id = job.id - solver_id = get_node_id(project_id, solver.id) - - # map Job inputs with solveri nputs - # TODO: ArgumentType -> InputTypes dispatcher and reversed - solver_inputs: dict[InputID, InputTypes] = create_node_inputs_from_job_inputs( - inputs - ) - - solver_service = Node( - key=solver.id, - version=solver.version, - label=solver.title, - inputs=solver_inputs, - inputsUnits={}, - ) - - # Ensembles project model so it can be used as input for create_project - job_info = job.json( - include={"id", "name", "inputs_checksum", "created_at"}, indent=2 - ) - - create_project_body = NewProjectIn( - uuid=project_id, - name=job.name, # NOTE: this IS an identifier as well. MUST NOT be changed in the case of project APIs! - description=f"Study associated to solver job:\n{job_info}", - thumbnail="https://2xx2gy2ovf3r21jclkjio3x8-wpengine.netdna-ssl.com/wp-content/uploads/2018/12/API-Examples.jpg", # https://placeimg.com/171/96/tech/grayscale/?0.jpg", - workbench={solver_id: solver_service}, - ui=StudyUI( - workbench={ - solver_id: {"position": {"x": 633, "y": 229}}, - }, - slideshow={}, - currentNodeId=solver_id, - annotations={}, - ), - # FIXME: these should be unnecessary - prjOwner="api-placeholder@osparc.io", - creationDate=now_str(), - lastChangeDate=now_str(), - accessRights={}, - dev={}, - ) - - return create_project_body - - -def _copy_n_update_urls( - job: Job, url_for: Callable, solver_key: SolverKeyId, version: VersionStr -): - return job.copy( - update={ - "url": url_for( - "get_job", solver_key=solver_key, version=version, job_id=job.id - ), - "runner_url": url_for( - "get_solver_release", - solver_key=solver_key, - version=version, - ), - "outputs_url": url_for( - "get_job_outputs", - solver_key=solver_key, - version=version, - job_id=job.id, - ), - } - ) - - -def create_job_from_project( - solver_key: SolverKeyId, - solver_version: VersionStr, - project: Project, - url_for: Optional[Callable] = None, -) -> Job: - """ - Given a project, creates a job - - - Complementary from create_project_from_job - - Assumes project created via solver's job - - raise ValidationError - """ - assert len(project.workbench) == 1 # nosec - assert solver_version in project.name # nosec - assert urllib.parse.quote_plus(solver_key) in project.name # nosec - - # get solver node - node_id = list(project.workbench.keys())[0] - solver_node: Node = project.workbench[node_id] - job_inputs: JobInputs = create_job_inputs_from_node_inputs( - inputs=solver_node.inputs or {} - ) - - # create solver's job - solver_name = Solver.compose_resource_name(solver_key, solver_version) - - job = Job( - id=project.uuid, - name=project.name, - inputs_checksum=job_inputs.compute_checksum(), - created_at=project.creation_date, - runner_name=solver_name, - url=None, - runner_url=None, - outputs_url=None, - ) - if url_for: - job = _copy_n_update_urls(job, url_for, solver_key, solver_version) - assert all( # nosec - getattr(job, f) for f in job.__fields__ if f.startswith("url") - ) # nosec - - return job - - -def create_jobstatus_from_task(task: ComputationTaskGet) -> JobStatus: - - job_status = JobStatus( - job_id=task.id, - state=task.state, - progress=task.guess_progress(), - submitted_at=datetime.utcnow(), - ) - - # FIXME: timestamp is wrong but at least it will stop run - if job_status.state in [ - TaskStates.SUCCESS, - TaskStates.FAILED, - TaskStates.ABORTED, - ]: - job_status.take_snapshot("stopped") - elif job_status.state in [ - TaskStates.STARTED, - ]: - job_status.take_snapshot("started") - - return job_status diff --git a/services/api-server/src/simcore_service_api_server/utils/solver_job_outputs.py b/services/api-server/src/simcore_service_api_server/utils/solver_job_outputs.py deleted file mode 100644 index b7e6c18ef41..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/solver_job_outputs.py +++ /dev/null @@ -1,50 +0,0 @@ -import logging -from typing import Union - -import aiopg -from fastapi import status -from fastapi.exceptions import HTTPException -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.projects_nodes_io import BaseFileLink -from simcore_sdk import node_ports_v2 -from simcore_sdk.node_ports_v2 import DBManager, Nodeports - -from .typing_extra import get_types - -log = logging.getLogger(__name__) - - -ResultsTypes = Union[float, int, bool, BaseFileLink, str, None] - - -async def get_solver_output_results( - user_id: int, project_uuid: ProjectID, node_uuid: NodeID, db_engine: aiopg.sa.Engine -) -> dict[str, ResultsTypes]: - """ - Wraps calls via node_ports to retrieve project's output - """ - - # get the DB engine - db_manager = DBManager(db_engine=db_engine) - - try: - solver: Nodeports = await node_ports_v2.ports( - user_id=user_id, - project_id=str(project_uuid), - node_uuid=str(node_uuid), - db_manager=db_manager, - ) - solver_output_results = {} - for port in (await solver.outputs).values(): - log.debug("Getting %s [%s]: %s", port.key, port.property_type, port.value) - assert isinstance(port.value, get_types(ResultsTypes)) # nosec - solver_output_results[port.key] = port.value - - return solver_output_results - - except node_ports_v2.exceptions.NodeNotFound as err: - raise HTTPException( - status.HTTP_404_NOT_FOUND, - detail=f"Solver {node_uuid} output of project {project_uuid} not found", - ) from err diff --git a/services/api-server/src/simcore_service_api_server/utils/typing_extra.py b/services/api-server/src/simcore_service_api_server/utils/typing_extra.py deleted file mode 100644 index 49e1cf29255..00000000000 --- a/services/api-server/src/simcore_service_api_server/utils/typing_extra.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Tuple, Union, get_args, get_origin - - -def get_types(annotation) -> Tuple: - # WARNING: use for testing ONLY - - assert get_origin(dict[str, int]) is dict # nosec - assert get_args(dict[int, str]) == (int, str) # nosec - assert get_origin(Union[int, str]) is Union # nosec - assert get_args(Union[int, str]) == (int, str) # nosec - - if get_origin(annotation) is Union: - annotated_types = get_args(annotation) - else: - annotated_types = (annotation,) - - def _transform(annotated_type): - for primitive_type in (float, bool, int, str): - try: - if issubclass(annotated_type, primitive_type): - return primitive_type - except TypeError: - # list[Any] or dict[str, Any] - pass - - return annotated_type - - return tuple(map(_transform, annotated_types)) diff --git a/services/api-server/tests/conftest.py b/services/api-server/tests/conftest.py index 27f03a23e17..16d33d3afc0 100644 --- a/services/api-server/tests/conftest.py +++ b/services/api-server/tests/conftest.py @@ -8,14 +8,28 @@ import pytest import simcore_service_api_server from dotenv import dotenv_values -from pytest_simcore.helpers.utils_envs import EnvVarsDict, setenvs_from_dict +from models_library.projects import ProjectID +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_api_server.models.schemas.jobs import JobID CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.httpbin_service", + "pytest_simcore.httpx_calls_capture", "pytest_simcore.pydantic_models", "pytest_simcore.pytest_global_environs", + "pytest_simcore.rabbit_service", "pytest_simcore.repository_paths", + "pytest_simcore.schemas", + "pytest_simcore.services_api_mocks_for_aiohttp_clients", ] @@ -23,8 +37,9 @@ def project_env_devel_vars(project_slug_dir: Path) -> EnvVarsDict: env_devel_file = project_slug_dir / ".env-devel" assert env_devel_file.exists() - environ = dotenv_values(env_devel_file, verbose=True, interpolate=True) - return environ + environs = dotenv_values(env_devel_file, verbose=True, interpolate=True) + assert not any(value is None for key, value in environs.items()) + return environs @pytest.fixture(scope="session") @@ -52,20 +67,12 @@ def default_app_env_vars( env_vars.update(project_env_devel_vars) env_vars.update(dockerfile_env_vars) env_vars["API_SERVER_DEV_FEATURES_ENABLED"] = "1" + env_vars["API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED"] = "1" + env_vars["API_SERVER_PROMETHEUS_INSTRUMENTATION_ENABLED"] = "0" return env_vars -@pytest.fixture -def patched_default_app_environ( - monkeypatch: pytest.MonkeyPatch, default_app_env_vars: EnvVarsDict -) -> EnvVarsDict: - """default environment for testing""" - - setenvs_from_dict(monkeypatch, default_app_env_vars) - return default_app_env_vars - - ## FOLDER LAYOUT ---- @@ -92,3 +99,11 @@ def tests_utils_dir(project_tests_dir: Path) -> Path: utils_dir = (project_tests_dir / "utils").resolve() assert utils_dir.exists() return utils_dir + + +## BASIC IDENTIFIERS --- + + +@pytest.fixture +def job_id(project_id: ProjectID) -> JobID: + return TypeAdapter(JobID).validate_python(project_id) diff --git a/services/api-server/tests/integration/.gitkeep b/services/api-server/tests/integration/.gitkeep deleted file mode 100644 index 97834dcf47d..00000000000 --- a/services/api-server/tests/integration/.gitkeep +++ /dev/null @@ -1 +0,0 @@ -# Use tests/public-api diff --git a/services/api-server/tests/mocks/cleanup.py b/services/api-server/tests/mocks/cleanup.py new file mode 100644 index 00000000000..fb563a3ebe8 --- /dev/null +++ b/services/api-server/tests/mocks/cleanup.py @@ -0,0 +1,56 @@ +import argparse +import json +import re +from pathlib import Path + +from faker import Faker + +_fake = Faker() + + +def anonymize_values(json_key, json_data): + if isinstance(json_data, dict): + for key, value in json_data.items(): + json_data[key] = anonymize_values(key, value) + elif isinstance(json_data, list): + for i in range(len(json_data)): + json_data[i] = anonymize_values(i, json_data[i]) + elif isinstance(json_data, str): + if "@" in json_data: + print("\tAnonymizing email ...") + json_data = _fake.email() + elif json_key == "affiliation": + print(f"\tAnonymizing {json_key} ...") + json_data = _fake.company() + elif json_key == "name" and re.match(r"^[A-Z][a-z]+ +[A-Z][a-z]+$", json_data): + print("\tAnonymizing user names ...") + json_data = f"{_fake.first_name()} {_fake.last_name()}" + + return json_data + + +def main(): + parser = argparse.ArgumentParser(description="Anonymizes mocks/*.json files") + + parser.add_argument( + "file", nargs="?", type=str, help="The file that will be sanitized" + ) + args = parser.parse_args() + + if args.file: + target = Path(args.file) + assert target.exists() + iter_paths = [ + target, + ] + else: + iter_paths = Path.cwd().glob("*.json") + + for path in iter_paths: + print("Anonymizing", path, "...") + json_data = anonymize_values(None, json.loads(path.read_text())) + path.write_text(json.dumps(json_data, indent=1)) + + +if __name__ == "__main__": + main() diff --git a/services/api-server/tests/mocks/create_program_job_success.json b/services/api-server/tests/mocks/create_program_job_success.json new file mode 100644 index 00000000000..32006ffdc5a --- /dev/null +++ b/services/api-server/tests/mocks/create_program_job_success.json @@ -0,0 +1,200 @@ +[ + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "hidden=false", + "request_payload": { + "uuid": "a6677890-356b-4113-b26e-b77a748427f4", + "name": "programs/simcore%2Fservices%2Fdynamic%2Felectrode-selector/releases/2.1.3/jobs/a6677890-356b-4113-b26e-b77a748427f4", + "description": "Study associated to solver job:\n{\n \"id\": \"a6677890-356b-4113-b26e-b77a748427f4\",\n \"name\": \"programs/simcore%2Fservices%2Fdynamic%2Felectrode-selector/releases/2.1.3/jobs/a6677890-356b-4113-b26e-b77a748427f4\",\n \"inputs_checksum\": \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n \"created_at\": \"2025-04-15T13:14:44.594564Z\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "workbench": { + "f298a7d7-900d-533d-9385-89b39ea023fe": { + "key": "simcore/services/dynamic/electrode-selector", + "version": "2.1.3", + "label": "sleeper", + "progress": null, + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsRequired": [], + "inputsUnits": {}, + "inputAccess": null, + "inputNodes": [], + "outputs": {}, + "outputNode": null, + "outputNodes": null, + "parent": null, + "position": null, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + }, + "bootOptions": null + } + }, + "accessRights": {}, + "tags": [], + "classifiers": [], + "ui": { + "icon": null, + "workbench": { + "f298a7d7-900d-533d-9385-89b39ea023fe": { + "position": { + "x": 633, + "y": 229 + }, + "marker": null + } + }, + "slideshow": {}, + "currentNodeId": "f298a7d7-900d-533d-9385-89b39ea023fe", + "annotations": {} + }, + "workspaceId": null, + "type": "STANDARD", + "templateType": null, + "folderId": null + }, + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Fhidden%3Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8", + "task_name": "POST /v0/projects?hidden=false", + "status_href": "http://webserver:8080/v0/tasks-legacy/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8", + "result_href": "http://webserver:8080/v0/tasks-legacy/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8/result", + "abort_href": "http://webserver:8080/v0/tasks-legacy/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8" + } + }, + "status_code": 202 + }, + { + "name": "GET http://webserver:8080/v0/tasks-legacy/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks-legacy/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks-legacy" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Fhidden%3Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2025-04-15T13:14:44.617066" + } + } + }, + { + "name": "GET http://webserver:8080/v0/tasks-legacy/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dfalse.24ca757c-9edb-4017-b0e2-e6f7dcc447c8/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks-legacy/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks-legacy" + } + ] + }, + "response_body": { + "data": { + "uuid": "a6677890-356b-4113-b26e-b77a748427f4", + "name": "programs/simcore%2Fservices%2Fdynamic%2Felectrode-selector/releases/2.1.3/jobs/a6677890-356b-4113-b26e-b77a748427f4", + "description": "Study associated to solver job:\n{\n \"id\": \"a6677890-356b-4113-b26e-b77a748427f4\",\n \"name\": \"programs/simcore%2Fservices%2Fdynamic%2Felectrode-selector/releases/2.1.3/jobs/a6677890-356b-4113-b26e-b77a748427f4\",\n \"inputs_checksum\": \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n \"created_at\": \"2025-04-15T13:14:44.594564Z\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "workbench": { + "f298a7d7-900d-533d-9385-89b39ea023fe": { + "key": "simcore/services/dynamic/electrode-selector", + "version": "2.1.3", + "label": "sleeper", + "inputs": {}, + "inputsRequired": [], + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + } + } + }, + "prjOwner": "bisgaard@itis.swiss", + "accessRights": { + "4": { + "read": true, + "write": true, + "delete": true + } + }, + "creationDate": "2025-04-15T13:14:44.636Z", + "lastChangeDate": "2025-04-15T13:14:46.704Z", + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "UNKNOWN" + } + }, + "trashedAt": null, + "trashedBy": null, + "tags": [], + "classifiers": [], + "quality": {}, + "ui": { + "workbench": { + "f298a7d7-900d-533d-9385-89b39ea023fe": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "f298a7d7-900d-533d-9385-89b39ea023fe", + "annotations": {} + }, + "dev": {}, + "workspaceId": null, + "type": "STANDARD", + "templateType": null, + "folderId": null + } + }, + "status_code": 201 + } +] diff --git a/services/api-server/tests/mocks/create_study_job.json b/services/api-server/tests/mocks/create_study_job.json new file mode 100644 index 00000000000..db7f717efd2 --- /dev/null +++ b/services/api-server/tests/mocks/create_study_job.json @@ -0,0 +1,306 @@ +[ + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "from_study=f44c67dc-11cd-11ef-967b-0242ac140010&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Df44c67dc-11cd-11ef-967b-0242ac140010%26hidden%3Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a", + "task_name": "POST /v0/projects?from_study=f44c67dc-11cd-11ef-967b-0242ac140010&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Df44c67dc-11cd-11ef-967b-0242ac140010%2526hidden%253Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Df44c67dc-11cd-11ef-967b-0242ac140010%2526hidden%253Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Df44c67dc-11cd-11ef-967b-0242ac140010%2526hidden%253Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a" + } + }, + "status_code": 202 + }, + { + "name": "GET http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Df44c67dc-11cd-11ef-967b-0242ac140010%2526hidden%253Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Df44c67dc-11cd-11ef-967b-0242ac140010%26hidden%3Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-05-14T09:43:20.075800" + } + } + }, + { + "name": "GET http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Df44c67dc-11cd-11ef-967b-0242ac140010%2526hidden%253Dtrue.e08f96fe-bff3-44d0-9278-607536f7189a/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "uuid": "65d611f2-11d6-11ef-9991-0242ac14000a", + "name": "New Study (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-05-14T09:43:20.099Z", + "lastChangeDate": "2024-05-14T09:43:20.099Z", + "workbench": {}, + "prjOwner": "frubio@example.net", + "workspaceId": 23, + "type": "STANDARD", + "templateType": null, + "folderId": 4, + "trashedAt": "2024-05-14T09:55:20.099Z", + "trashedBy": 2, + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "UNKNOWN" + } + }, + "ui": { + "workbench": {}, + "slideshow": {}, + "currentNodeId": "f44c67dc-11cd-11ef-967b-0242ac140010", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "PATCH /projects/65d611f2-11d6-11ef-9991-0242ac14000a", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "name": "posix" + }, + "status_code": 204 + }, + { + "name": "GET /projects/65d611f2-11d6-11ef-9991-0242ac14000a/inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": {} + } + } +] diff --git a/services/api-server/tests/mocks/delete_file.json b/services/api-server/tests/mocks/delete_file.json new file mode 100644 index 00000000000..32d19eb22b6 --- /dev/null +++ b/services/api-server/tests/mocks/delete_file.json @@ -0,0 +1,105 @@ +[ + { + "name": "POST /simcore-s3/files/metadata:search", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search", + "path_parameters": [] + }, + "query": "kind=owned&user_id=1&startswith=api/cc3dd190-8c87-3686-b581-d9f7809d312e", + "request_payload": null, + "response_body": { + "data": [ + { + "file_uuid": "api/cc3dd190-8c87-3686-b581-d9f7809d312e/tmp.py", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "tmp.py", + "file_id": "api/cc3dd190-8c87-3686-b581-d9f7809d312e/tmp.py", + "created_at": "2023-09-04T08:03:26.191887", + "last_modified": "2023-09-04T08:03:26+00:00", + "file_size": 250, + "entity_tag": "c374e4caf73fc4cd39c0000d11d36de0", + "is_soft_link": false, + "is_directory": false + } + ] + }, + "status_code": 200 + }, + { + "name": "DELETE http://storage:8080/v0/locations/0/files/api%2Fcc3dd190-8c87-3686-b581-d9f7809d312e%2Ftmp.py", + "description": "", + "method": "DELETE", + "host": "storage", + "path": { + "path": "/v0/locations/{location_id}/files/{file_id}", + "path_parameters": [ + { + "in_": "path", + "name": "file_id", + "required": true, + "schema_": { + "title": "File Id", + "type_": null, + "pattern": null, + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": [ + { + "title": null, + "type_": "str", + "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + { + "title": null, + "type_": "str", + "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + } + ], + "allOf": null, + "oneOf": null + }, + "response_value": "api/cc3dd190-8c87-3686-b581-d9f7809d312e/tmp.py" + }, + { + "in_": "path", + "name": "location_id", + "required": true, + "schema_": { + "title": "Location Id", + "type_": "int", + "pattern": null, + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "0" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": null, + "status_code": 204 + } +] diff --git a/services/api-server/tests/mocks/delete_project_not_found.json b/services/api-server/tests/mocks/delete_project_not_found.json new file mode 100644 index 00000000000..a4dac698380 --- /dev/null +++ b/services/api-server/tests/mocks/delete_project_not_found.json @@ -0,0 +1,32 @@ +{ + "name": "DELETE /projects/{{ project_id }}", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": "/v0/projects/{{ project_id }}", + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project {{ project_id }} not found", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project {{ project_id }} not found", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project {{ project_id }} not found" + } + }, + "status_code": 404 +} diff --git a/services/api-server/tests/mocks/for_test_api_routes_studies.json b/services/api-server/tests/mocks/for_test_api_routes_studies.json new file mode 100644 index 00000000000..65524c3c742 --- /dev/null +++ b/services/api-server/tests/mocks/for_test_api_routes_studies.json @@ -0,0 +1,631 @@ +[ + { + "name": "get_me", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/me", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "first_name": "collins", + "last_name": "drew", + "id": 1, + "login": "sarahlopez@example.net", + "role": "User", + "groups": { + "me": { + "gid": 3, + "label": "collinsdrew", + "description": "primary group", + "thumbnail": null, + "accessRights": { + "read": true, + "write": false, + "delete": false + }, + "inclusionRules": {} + }, + "organizations": [ + { + "gid": 2, + "label": "osparc", + "description": "osparc product group", + "thumbnail": null, + "accessRights": { + "read": false, + "write": false, + "delete": false + }, + "inclusionRules": {} + } + ], + "all": { + "gid": 1, + "label": "Everyone", + "description": "all users", + "thumbnail": null, + "accessRights": { + "read": true, + "write": false, + "delete": false + }, + "inclusionRules": {} + } + }, + "gravatar_id": "aa33f6ec77ea434c2ea4fb92d0fd379e" + } + }, + "status_code": 200 + }, + { + "name": "list_projects", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects", + "query": "type=user&show_hidden=false&limit=20&offset=0", + "request_payload": null, + "response_body": { + "_meta": { + "limit": 20, + "total": 1, + "offset": 0, + "count": 1 + }, + "_links": { + "self": "http://webserver:8080/v0/projects?type=user&show_hidden=false&limit=20&offset=0", + "first": "http://webserver:8080/v0/projects?type=user&show_hidden=false&limit=20&offset=0", + "prev": null, + "next": null, + "last": "http://webserver:8080/v0/projects?type=user&show_hidden=false&limit=20&offset=0" + }, + "data": [ + { + "uuid": "25531b1a-2565-11ee-ab43-02420a000031", + "name": "Alexandra Brown", + "description": "", + "thumbnail": "", + "creationDate": "2023-07-18T12:18:04.314Z", + "lastChangeDate": "2023-07-20T20:02:42.535Z", + "workspaceId": 278, + "type": "STANDARD", + "templateType": null, + "folderId": 123, + "trashedAt": "2023-07-20T20:02:55.535Z", + "trashedBy": 2, + "workbench": { + "deea006c-a223-4103-b46e-7b677428de9f": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "File Picker", + "progress": 0.0, + "thumbnail": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "parent": null + }, + "3e700053-3d10-4089-90f7-f0865167c5b2": { + "key": "simcore/services/frontend/parameter/integer", + "version": "1.0.0", + "label": "Integer Parameter", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + }, + "parent": null + }, + "60d76eed-2e50-42e7-8c2d-580566b2f4c6": { + "key": "simcore/services/frontend/parameter/boolean", + "version": "1.0.0", + "label": "Boolean Parameter", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": true + }, + "parent": null + }, + "09e17caa-a538-40d4-9b8e-9a221b8367d3": { + "key": "simcore/services/frontend/iterator-consumer/probe/integer", + "version": "1.0.0", + "label": "Integer probe", + "thumbnail": null, + "inputs": { + "in_1": 0 + }, + "inputsUnits": {}, + "inputNodes": [], + "parent": null + } + }, + "prjOwner": "melinda11@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "UNKNOWN" + } + }, + "ui": { + "workbench": { + "09e17caa-a538-40d4-9b8e-9a221b8367d3": { + "position": { + "x": 540, + "y": 240 + } + }, + "3e700053-3d10-4089-90f7-f0865167c5b2": { + "position": { + "x": 180, + "y": 280 + } + }, + "60d76eed-2e50-42e7-8c2d-580566b2f4c6": { + "position": { + "x": 180, + "y": 360 + } + }, + "deea006c-a223-4103-b46e-7b677428de9f": { + "position": { + "x": 160, + "y": 120 + } + } + }, + "slideshow": {}, + "currentNodeId": "25531b1a-2565-11ee-ab43-02420a000031", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + } + }, + "annotations": { + "vandv": "", + "limitations": "", + "certificationLink": "", + "certificationStatus": "Uncertified" + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + } + } + }, + "dev": {} + } + ] + }, + "status_code": 200 + }, + { + "name": "get_project", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/25531b1a-2565-11ee-ab43-02420a000031", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "25531b1a-2565-11ee-ab43-02420a000031", + "name": "Richard Perez", + "description": "This is an interesting study", + "thumbnail": "", + "creationDate": "2023-07-18T12:18:04.314Z", + "lastChangeDate": "2023-07-20T20:04:05.607Z", + "workspaceId": 278, + "type": "STANDARD", + "templateType": null, + "folderId": 123, + "trashedAt": "2023-07-20T20:04:10.607Z", + "trashedBy": 2, + "workbench": { + "deea006c-a223-4103-b46e-7b677428de9f": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "File Picker", + "progress": 0.0, + "thumbnail": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "parent": null + }, + "3e700053-3d10-4089-90f7-f0865167c5b2": { + "key": "simcore/services/frontend/parameter/integer", + "version": "1.0.0", + "label": "Integer Parameter", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + }, + "parent": null + }, + "60d76eed-2e50-42e7-8c2d-580566b2f4c6": { + "key": "simcore/services/frontend/parameter/boolean", + "version": "1.0.0", + "label": "Boolean Parameter", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": true + }, + "parent": null + }, + "09e17caa-a538-40d4-9b8e-9a221b8367d3": { + "key": "simcore/services/frontend/iterator-consumer/probe/integer", + "version": "1.0.0", + "label": "Integer probe", + "thumbnail": null, + "inputs": { + "in_1": 0 + }, + "inputsUnits": {}, + "inputNodes": [], + "parent": null + } + }, + "prjOwner": "mitchellamber@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "UNKNOWN" + } + }, + "ui": { + "workbench": { + "09e17caa-a538-40d4-9b8e-9a221b8367d3": { + "position": { + "x": 540, + "y": 240 + } + }, + "3e700053-3d10-4089-90f7-f0865167c5b2": { + "position": { + "x": 180, + "y": 280 + } + }, + "60d76eed-2e50-42e7-8c2d-580566b2f4c6": { + "position": { + "x": 180, + "y": 360 + } + }, + "deea006c-a223-4103-b46e-7b677428de9f": { + "position": { + "x": 160, + "y": 120 + } + } + }, + "slideshow": {}, + "currentNodeId": "25531b1a-2565-11ee-ab43-02420a000031", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + } + }, + "annotations": { + "vandv": "", + "limitations": "", + "certificationLink": "", + "certificationStatus": "Uncertified" + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + } + } + }, + "dev": {} + } + }, + "status_code": 200 + }, + { + "name": "get_invalid_project", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/15531b1a-2565-11ee-ab43-02420a000031", + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project 15531b1a-2565-11ee-ab43-02420a000031 not found", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project 15531b1a-2565-11ee-ab43-02420a000031 not found", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project 15531b1a-2565-11ee-ab43-02420a000031 not found" + } + }, + "status_code": 404 + }, + { + "name": "get_project_ports", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/25531b1a-2565-11ee-ab43-02420a000031/metadata/ports", + "query": null, + "request_payload": null, + "response_body": { + "data": [ + { + "key": "3e700053-3d10-4089-90f7-f0865167c5b2", + "kind": "input", + "content_schema": { + "title": "Integer Parameter", + "type": "integer", + "description": "Parameter of type integer" + } + }, + { + "key": "60d76eed-2e50-42e7-8c2d-580566b2f4c6", + "kind": "input", + "content_schema": { + "title": "Boolean Parameter", + "type": "boolean", + "description": "Parameter of type boolean" + } + }, + { + "key": "09e17caa-a538-40d4-9b8e-9a221b8367d3", + "kind": "output", + "content_schema": { + "title": "Integer probe", + "type": "integer", + "description": "Captures integer values attached to it", + "default": 0 + } + } + ] + }, + "status_code": 200 + }, + { + "name": "get_invalid_project_ports", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/15531b1a-2565-11ee-ab43-02420a000031/metadata/ports", + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project '15531b1a-2565-11ee-ab43-02420a000031' not found", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project '15531b1a-2565-11ee-ab43-02420a000031' not found", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project '15531b1a-2565-11ee-ab43-02420a000031' not found" + } + }, + "status_code": 404 + } +] diff --git a/services/api-server/tests/mocks/for_test_get_and_update_job_metadata.json b/services/api-server/tests/mocks/for_test_get_and_update_job_metadata.json new file mode 100644 index 00000000000..f14044c398c --- /dev/null +++ b/services/api-server/tests/mocks/for_test_get_and_update_job_metadata.json @@ -0,0 +1,461 @@ +[ + { + "name": "get_profile", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/me", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "first_name": "crespo", + "last_name": "", + "id": 1, + "login": "rturner@example.net", + "role": "User", + "groups": { + "me": { + "gid": 3, + "label": "crespo", + "description": "primary group", + "thumbnail": null, + "accessRights": { + "read": true, + "write": false, + "delete": false + }, + "inclusionRules": {} + }, + "organizations": [ + { + "gid": 2, + "label": "osparc", + "description": "osparc product group", + "thumbnail": null, + "accessRights": { + "read": false, + "write": false, + "delete": false + }, + "inclusionRules": {} + } + ], + "all": { + "gid": 1, + "label": "Everyone", + "description": "all users", + "thumbnail": null, + "accessRights": { + "read": true, + "write": false, + "delete": false + }, + "inclusionRules": {} + } + }, + "gravatar_id": "aa33f6ec77ea434c2ea4fb92d0fd379e" + } + }, + "status_code": 200 + }, + { + "name": "get_service", + "description": "", + "method": "GET", + "host": "catalog", + "path": "/v0/services/simcore/services/comp/itis/sleeper/2.0.0", + "query": "user_id=1", + "request_payload": null, + "response_body": { + "name": "sleeper", + "thumbnail": null, + "description": "A service which awaits for time to pass.", + "deprecated": null, + "classifiers": [], + "quality": {}, + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "integration-version": "1.0.0", + "type": "computational", + "authors": [ + { + "name": "Joshua Boone", + "email": "sharon60@example.net", + "affiliation": "Johnson Inc" + }, + { + "name": "Kenneth Alvarez", + "email": "ncollins@example.com", + "affiliation": "Singh LLC" + }, + { + "name": "Jennifer Howard", + "email": "amyhood@example.org", + "affiliation": "Campos-Weaver" + } + ], + "contact": "sharon91@example.com", + "inputs": { + "input_1": { + "displayOrder": 1.0, + "label": "File with int number", + "description": "Pick a file containing only one integer", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "input_1" + } + }, + "input_2": { + "displayOrder": 2.0, + "label": "Sleep interval", + "description": "Choose an amount of time to sleep", + "type": "integer", + "defaultValue": 2 + }, + "input_3": { + "displayOrder": 3.0, + "label": "Fail after sleep", + "description": "If set to true will cause service to fail after it sleeps", + "type": "boolean", + "defaultValue": false + } + }, + "outputs": { + "output_1": { + "displayOrder": 1.0, + "label": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "output_1" + } + }, + "output_2": { + "displayOrder": 2.0, + "label": "Random sleep interval", + "description": "Interval is generated in range [1-9]", + "type": "integer" + } + } + }, + "status_code": 200 + }, + { + "name": "create_project", + "description": "", + "method": "POST", + "host": "webserver", + "path": "/v0/projects", + "query": "hidden=true", + "request_payload": { + "uuid": "767a7355-3062-4ceb-b339-ed67d07d2ed2", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/767a7355-3062-4ceb-b339-ed67d07d2ed2", + "description": "Study associated to solver job:\n{\n \"id\": \"767a7355-3062-4ceb-b339-ed67d07d2ed2\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/767a7355-3062-4ceb-b339-ed67d07d2ed2\",\n \"inputs_checksum\": \"f6fce006d0fe7b6168fc20a10ec1fe74d1723ebc935232d3c0707c277db2ef0c\",\n \"created_at\": \"2023-07-12T12:45:30.757605+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "workbench": { + "d49543e7-6e36-57ee-86ff-46b71f63757f": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": null, + "thumbnail": null, + "runHash": null, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true + }, + "inputsUnits": {}, + "inputAccess": null, + "inputNodes": [], + "outputs": {}, + "outputNode": null, + "outputNodes": null, + "parent": null, + "position": null, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0 + }, + "bootOptions": null + } + }, + "accessRights": {}, + "tags": [], + "classifiers": [], + "ui": { + "workbench": { + "d49543e7-6e36-57ee-86ff-46b71f63757f": { + "position": { + "x": 633, + "y": 229 + }, + "marker": null + } + }, + "slideshow": {}, + "currentNodeId": "d49543e7-6e36-57ee-86ff-46b71f63757f", + "annotations": {} + } + }, + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b", + "task_name": "POST /v0/projects?hidden=true", + "status_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b", + "result_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b/result", + "abort_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b" + } + }, + "status_code": 202 + }, + { + "name": "get_task_status_1", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "creating new study...", + "percent": 0.0 + }, + "done": false, + "started": "2023-07-12T12:45:30.825756" + } + }, + "status_code": 200 + }, + { + "name": "get_task_status_2", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "creating new study...", + "percent": 0.0 + }, + "done": false, + "started": "2023-07-12T12:45:30.825756" + } + }, + "status_code": 200 + }, + { + "name": "get_task_status_3", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2023-07-12T12:45:30.825756" + } + }, + "status_code": 200 + }, + { + "name": "get_task_result", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.ceb7b7ab-ccef-4ea6-b82f-199d265d4c3b/result", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "767a7355-3062-4ceb-b339-ed67d07d2ed2", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/767a7355-3062-4ceb-b339-ed67d07d2ed2", + "description": "Study associated to solver job:\n{\n \"id\": \"767a7355-3062-4ceb-b339-ed67d07d2ed2\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/767a7355-3062-4ceb-b339-ed67d07d2ed2\",\n \"inputs_checksum\": \"f6fce006d0fe7b6168fc20a10ec1fe74d1723ebc935232d3c0707c277db2ef0c\",\n \"created_at\": \"2023-07-12T12:45:30.757605+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-07-12T12:45:30.832Z", + "lastChangeDate": "2023-07-12T12:45:30.832Z", + "workbench": { + "d49543e7-6e36-57ee-86ff-46b71f63757f": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + } + } + }, + "prjOwner": "brownlisa@example.com", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "d49543e7-6e36-57ee-86ff-46b71f63757f": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "d49543e7-6e36-57ee-86ff-46b71f63757f", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "get_project_metadata", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/767a7355-3062-4ceb-b339-ed67d07d2ed2/metadata", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "projectUuid": "767a7355-3062-4ceb-b339-ed67d07d2ed2", + "custom": {} + } + }, + "status_code": 200 + }, + { + "name": "update_project_metadata", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": "/v0/projects/767a7355-3062-4ceb-b339-ed67d07d2ed2/metadata", + "query": null, + "request_payload": { + "custom": { + "number": 3.14, + "integer": 42, + "string": "foo", + "boolean": true + } + }, + "response_body": { + "data": { + "projectUuid": "767a7355-3062-4ceb-b339-ed67d07d2ed2", + "custom": { + "number": 3.14, + "string": "foo", + "boolean": true, + "integer": 42 + } + } + }, + "status_code": 200 + }, + { + "name": "get_project_metadata_1", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/767a7355-3062-4ceb-b339-ed67d07d2ed2/metadata", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "projectUuid": "767a7355-3062-4ceb-b339-ed67d07d2ed2", + "custom": { + "number": 3.14, + "string": "foo", + "boolean": true, + "integer": 42 + } + } + }, + "status_code": 200 + }, + { + "name": "delete_project", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": "/v0/projects/767a7355-3062-4ceb-b339-ed67d07d2ed2", + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + }, + { + "name": "get_project_metadata_2", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects/767a7355-3062-4ceb-b339-ed67d07d2ed2/metadata", + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project with uuid 767a7355-3062-4ceb-b339-ed67d07d2ed2 not found.", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project with uuid 767a7355-3062-4ceb-b339-ed67d07d2ed2 not found.", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project with uuid 767a7355-3062-4ceb-b339-ed67d07d2ed2 not found." + } + }, + "status_code": 404 + } +] diff --git a/services/api-server/tests/mocks/get_credits_price.json b/services/api-server/tests/mocks/get_credits_price.json new file mode 100644 index 00000000000..220952437e8 --- /dev/null +++ b/services/api-server/tests/mocks/get_credits_price.json @@ -0,0 +1,19 @@ +[ + { + "name": "GET /credits-price", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/credits-price", + "path_parameters": [] + }, + "response_body": { + "data": { + "productName": "osparc", + "usdPerCredit": null, + "minPaymentAmountUsd": null + } + } + } +] diff --git a/services/api-server/tests/mocks/get_default_wallet.json b/services/api-server/tests/mocks/get_default_wallet.json new file mode 100644 index 00000000000..8fcddffb1be --- /dev/null +++ b/services/api-server/tests/mocks/get_default_wallet.json @@ -0,0 +1,28 @@ +[ + { + "name": "GET /wallets/default", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/default", + "path_parameters": [] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "Mike Medina", + "description": "Credits purchased by Bisgaard end up in here", + "owner": 3, + "thumbnail": null, + "status": "ACTIVE", + "created": "2023-11-06T11:23:38.559362+00:00", + "modified": "2023-11-06T11:23:38.559362+00:00", + "availableCredits": 0.0 + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_job_outputs.json b/services/api-server/tests/mocks/get_job_outputs.json new file mode 100644 index 00000000000..579d79b7de9 --- /dev/null +++ b/services/api-server/tests/mocks/get_job_outputs.json @@ -0,0 +1,588 @@ +[ + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "task_name": "POST /v0/projects?from_study=e9f34992-436c-11ef-a15d-0242ac14000c&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3" + } + }, + "status_code": 202 + }, + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-07-16T12:56:51.900041" + } + } + }, + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-07-16T12:56:51.900041" + } + } + }, + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3De9f34992-436c-11ef-a15d-0242ac14000c%26hidden%3Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-07-16T12:56:51.900041" + } + } + }, + { + "name": "GET http://webserver:30004/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253De9f34992-436c-11ef-a15d-0242ac14000c%2526hidden%253Dtrue.419d55ef-8ab6-4d2e-8654-1dd29fac18c3/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "uuid": "df0b67b6-4372-11ef-a15d-0242ac14000c", + "name": "teststudy (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-07-16T12:56:51.922Z", + "lastChangeDate": "2024-07-16T12:56:51.922Z", + "workspaceId": 5, + "type": "STANDARD", + "templateType": null, + "folderId": 2, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "input_1": { + "nodeUuid": "cda9d480-d3ad-55c8-b9ce-c50eb1bab818", + "output": "outFile" + }, + "input_2": 2, + "input_3": false + }, + "inputsRequired": [], + "inputNodes": [ + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818" + ], + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + }, + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "inputfile", + "inputs": {}, + "inputsRequired": [], + "inputNodes": [] + }, + "c784a033-36c7-558b-9cc5-448321de01f8": { + "key": "simcore/services/frontend/iterator-consumer/probe/file", + "version": "1.0.0", + "label": "outputfile", + "inputs": { + "in_1": { + "nodeUuid": "dd875b4f-7663-529f-bd7f-3716b19e28af", + "output": "output_1" + } + }, + "inputsRequired": [], + "inputNodes": [ + "dd875b4f-7663-529f-bd7f-3716b19e28af" + ] + } + }, + "prjOwner": "bisgaard@itis.swiss", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "c784a033-36c7-558b-9cc5-448321de01f8": { + "position": { + "x": 1175, + "y": 467 + } + }, + "cda9d480-d3ad-55c8-b9ce-c50eb1bab818": { + "position": { + "x": 586, + "y": 471 + } + }, + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "position": { + "x": 860, + "y": 440 + } + } + }, + "slideshow": {}, + "currentNodeId": "b448cfb0-436c-11ef-a15d-0242ac14000c", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "name": "studies/e9f34992-436c-11ef-a15d-0242ac14000c/jobs/df0b67b6-4372-11ef-a15d-0242ac14000c" + }, + "status_code": 204 + }, + { + "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": {} + } + }, + { + "name": "PATCH /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/nodes/cda9d480-d3ad-55c8-b9ce-c50eb1bab818/outputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str" + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str" + }, + "response_value": "nodes" + } + ] + }, + "request_payload": { + "outputs": { + "outFile": { + "store": 0, + "path": "api/c1dcde67-6434-31c3-95ee-bf5fe1e9422d/inputfile", + "label": "inputfile", + "eTag": null, + "dataset": null + } + } + }, + "status_code": 204 + }, + { + "name": "POST /computations/df0b67b6-4372-11ef-a15d-0242ac14000c:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "df0b67b6-4372-11ef-a15d-0242ac14000c" + } + }, + "status_code": 201 + }, + { + "name": "GET /v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "df0b67b6-4372-11ef-a15d-0242ac14000c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": [] + }, + "progress": 0.0, + "node_states": { + "dd875b4f-7663-529f-bd7f-3716b19e28af": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2024-07-16T12:56:57.553331+00:00", + "stopped": null, + "submitted": "2024-07-16T12:56:57.454372+00:00", + "url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c?user_id=1", + "stop_url": "http://10.43.103.193:30009/v2/computations/df0b67b6-4372-11ef-a15d-0242ac14000c:stop?user_id=1" + } + }, + { + "name": "GET /projects/df0b67b6-4372-11ef-a15d-0242ac14000c/outputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "c784a033-36c7-558b-9cc5-448321de01f8": { + "key": "c784a033-36c7-558b-9cc5-448321de01f8", + "value": null, + "label": "outputfile" + } + } + } + } +] diff --git a/services/api-server/tests/mocks/get_job_pricing_unit_invalid_job.json b/services/api-server/tests/mocks/get_job_pricing_unit_invalid_job.json new file mode 100644 index 00000000000..3e966fbed13 --- /dev/null +++ b/services/api-server/tests/mocks/get_job_pricing_unit_invalid_job.json @@ -0,0 +1,55 @@ +[ + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ab3d50620", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project 87643648-3a38-44e2-9cfe-d86ab3d50620 not found", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project 87643648-3a38-44e2-9cfe-d86ab3d50620 not found", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project 87643648-3a38-44e2-9cfe-d86ab3d50620 not found" + } + }, + "status_code": 404 + } +] diff --git a/services/api-server/tests/mocks/get_job_pricing_unit_invalid_solver.json b/services/api-server/tests/mocks/get_job_pricing_unit_invalid_solver.json new file mode 100644 index 00000000000..270af965166 --- /dev/null +++ b/services/api-server/tests/mocks/get_job_pricing_unit_invalid_solver.json @@ -0,0 +1,111 @@ +[ + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ab3d50629", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "87643648-3a38-44e2-9cfe-d86ab3d50629", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.24/jobs/87643648-3a38-44e2-9cfe-d86ab3d50629", + "description": "Study associated to solver job:\n{\n \"id\": \"87643648-3a38-44e2-9cfe-d86ab3d50629\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.24/jobs/87643648-3a38-44e2-9cfe-d86ab3d50629\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-10T20:15:22.071797+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-10T20:15:22.096Z", + "lastChangeDate": "2023-10-10T20:15:22.096Z", + "workspaceId": 3, + "type": "STANDARD", + "templateType": null, + "folderId": 31, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "4b03863d-107a-5c77-a3ca-c5ba1d7048c0": { + "key": "simcore/services/comp/isolve", + "version": "2.1.24", + "label": "isolve edge", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "ptrevino@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "4b03863d-107a-5c77-a3ca-c5ba1d7048c0": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "4b03863d-107a-5c77-a3ca-c5ba1d7048c0", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_job_pricing_unit_success.json b/services/api-server/tests/mocks/get_job_pricing_unit_success.json new file mode 100644 index 00000000000..8ab29ed9112 --- /dev/null +++ b/services/api-server/tests/mocks/get_job_pricing_unit_success.json @@ -0,0 +1,172 @@ +[ + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ab3d50629", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "87643648-3a38-44e2-9cfe-d86ab3d50629", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.24/jobs/87643648-3a38-44e2-9cfe-d86ab3d50629", + "description": "Study associated to solver job:\n{\n \"id\": \"87643648-3a38-44e2-9cfe-d86ab3d50629\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.24/jobs/87643648-3a38-44e2-9cfe-d86ab3d50629\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-10T20:15:22.071797+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-10T20:15:22.096Z", + "lastChangeDate": "2023-10-10T20:15:22.096Z", + "workspaceId": 3, + "type": "STANDARD", + "templateType": null, + "folderId": 1, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "4b03863d-107a-5c77-a3ca-c5ba1d7048c0": { + "key": "simcore/services/comp/isolve", + "version": "2.1.24", + "label": "isolve edge", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "imitchell@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "4b03863d-107a-5c77-a3ca-c5ba1d7048c0": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "4b03863d-107a-5c77-a3ca-c5ba1d7048c0", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 200 + }, + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ab3d50629/nodes/4b03863d-107a-5c77-a3ca-c5ba1d7048c0/pricing-unit", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-unit", + "path_parameters": [ + { + "in_": "path", + "name": "node_id", + "required": true, + "schema_": { + "title": "Node Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "nodes" + }, + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "pricingUnitId": 1, + "unitName": "small", + "unitExtraInfo": { + "CPU": 500, + "RAM": 26598, + "VRAM": 456789123456 + }, + "currentCostPerUnit": 50, + "default": true + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_job_wallet_found.json b/services/api-server/tests/mocks/get_job_wallet_found.json new file mode 100644 index 00000000000..1f3fed0bd19 --- /dev/null +++ b/services/api-server/tests/mocks/get_job_wallet_found.json @@ -0,0 +1,89 @@ +[ + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ab3d50629/wallet", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/wallet", + "path_parameters": [ + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "my_wallet", + "description": "my awesome wallet", + "owner": 3, + "thumbnail": "string", + "status": "ACTIVE", + "created": "2023-10-10T13:58:20.381826+00:00", + "modified": "2023-10-10T13:58:20.381826+00:00" + } + }, + "status_code": 200 + }, + { + "name": "GET /wallets/{wallet_id}", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/{wallet_id}", + "path_parameters": [ + { + "in_": "path", + "name": "wallet_id", + "required": true, + "schema_": { + "title": "Wallet Id", + "type_": "int", + "pattern": null, + "format_": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "wallets" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "my_wallet", + "description": "my awesome wallet", + "owner": 3, + "thumbnail": "string", + "status": "ACTIVE", + "created": "2023-10-10T13:58:20.381826+00:00", + "modified": "2023-10-10T13:58:20.381826+00:00", + "availableCredits": 0.0 + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_job_wallet_not_found.json b/services/api-server/tests/mocks/get_job_wallet_not_found.json new file mode 100644 index 00000000000..31f6deb029a --- /dev/null +++ b/services/api-server/tests/mocks/get_job_wallet_not_found.json @@ -0,0 +1,108 @@ +[ + { + "name": "GET /projects/87643648-3a38-44e2-9cfe-d86ac3d50629/wallet", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/wallet", + "path_parameters": [ + { + "in_": "path", + "name": "project_id", + "required": true, + "schema_": { + "title": "Project Id", + "type_": "str", + "pattern": null, + "format_": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "87643648-3a38-44e2-9cfe-d86ac3d50629" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project with uuid 87643648-3a38-44e2-9cfe-d86ac3d50629 not found.", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project with uuid 87643648-3a38-44e2-9cfe-d86ac3d50629 not found.", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project with uuid 87643648-3a38-44e2-9cfe-d86ac3d50629 not found." + } + }, + "status_code": 404 + }, + { + "name": "GET /wallets/8", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/{wallet_id}", + "path_parameters": [ + { + "in_": "path", + "name": "wallet_id", + "required": true, + "schema_": { + "title": "Wallet Id", + "type_": "int", + "pattern": null, + "format_": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "wallets" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist.", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPForbidden", + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist.", + "resource": null, + "field": null + } + ], + "status": 403, + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist." + } + }, + "status_code": 403 + } +] diff --git a/services/api-server/tests/mocks/get_solver_outputs.json b/services/api-server/tests/mocks/get_solver_outputs.json new file mode 100644 index 00000000000..53e041cc45d --- /dev/null +++ b/services/api-server/tests/mocks/get_solver_outputs.json @@ -0,0 +1,258 @@ +[ + { + "name": "GET /projects/0f5f114f-c2bf-4807-914f-2f4df2604223", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "0f5f114f-c2bf-4807-914f-2f4df2604223", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/0f5f114f-c2bf-4807-914f-2f4df2604223", + "description": "Study associated to solver job:\n{\n \"id\": \"0f5f114f-c2bf-4807-914f-2f4df2604223\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/0f5f114f-c2bf-4807-914f-2f4df2604223\",\n \"inputs_checksum\": \"88e3aa0cf82491572d5978fa359bad9d100ef247492020efb4dbcc9c5ee09b45\",\n \"created_at\": \"2024-01-18T12:33:56.883048+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2024-01-18T12:33:56.952Z", + "lastChangeDate": "2024-01-18T12:34:13.002Z", + "workspaceId": 2, + "type": "STANDARD", + "templateType": null, + "folderId": 2, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "df42d273-b6f0-509c-bfb5-4abbc5bb0581": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 100.0, + "runHash": "10042fe8aa6ba3140532ba27dbbb1ba6c25d3e60a75c1d142f55a53dbecb5ead", + "inputs": { + "input_1": { + "store": 0, + "path": "api/45c97ed8-191d-300c-89be-c5f83148a391/input.txt", + "label": "input.txt", + "eTag": "eccbc87e4b5ce2fe28308fd9f2a7baf3" + }, + "input_2": 3, + "input_3": false, + "input_4": 4 + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "output_1": { + "store": 0, + "path": "0f5f114f-c2bf-4807-914f-2f4df2604223/df42d273-b6f0-509c-bfb5-4abbc5bb0581/single_number.txt", + "eTag": "c81e728d9d4c2f636f067f89cc14862c" + }, + "output_2": 6 + }, + "state": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "prjOwner": "greenrichard@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "SUCCESS" + } + }, + "ui": { + "workbench": { + "df42d273-b6f0-509c-bfb5-4abbc5bb0581": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "df42d273-b6f0-509c-bfb5-4abbc5bb0581", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 200 + }, + { + "name": "GET /credits-price", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/credits-price", + "path_parameters": [] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "productName": "osparc", + "usdPerCredit": 10.0, + "minPaymentAmountUsd": 5 + } + }, + "status_code": 200 + }, + { + "name": "GET /projects/0f5f114f-c2bf-4807-914f-2f4df2604223/wallet", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/wallet", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "Derek Nguyen", + "description": "Credits purchased by Bisgaard end up in here", + "owner": 3, + "thumbnail": null, + "status": "ACTIVE", + "created": "2024-01-18T09:32:58.042380+00:00", + "modified": "2024-01-18T09:32:58.042380+00:00" + } + }, + "status_code": 200 + }, + { + "name": "GET /wallets/1", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/{wallet_id}", + "path_parameters": [ + { + "in": "path", + "name": "wallet_id", + "required": true, + "schema": { + "title": "Wallet Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "wallets" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "Eric Hunter", + "description": "Credits purchased by Bisgaard end up in here", + "owner": 3, + "thumbnail": null, + "status": "ACTIVE", + "created": "2024-01-18T09:32:58.042380+00:00", + "modified": "2024-01-18T09:32:58.042380+00:00", + "availableCredits": 0.0 + } + }, + "status_code": 200 + }, + { + "name": "POST /simcore-s3/files/metadata:search", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search", + "path_parameters": [] + }, + "query": "kind=owned&user_id=1&startswith=api/4ea24645-fd8c-339b-9621-ae045d45d94d", + "request_payload": null, + "response_body": { + "data": [ + { + "file_uuid": "api/4ea24645-fd8c-339b-9621-ae045d45d94d/single_number.txt", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "single_number.txt", + "file_id": "api/4ea24645-fd8c-339b-9621-ae045d45d94d/single_number.txt", + "created_at": "2024-01-18T12:33:58.399872", + "last_modified": "2024-01-18T12:34:12+00:00", + "file_size": 1, + "entity_tag": "c81e728d9d4c2f636f067f89cc14862c", + "is_soft_link": true, + "is_directory": false, + "sha256_checksum": null + } + ] + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_solver_pricing_plan_invalid_solver.json b/services/api-server/tests/mocks/get_solver_pricing_plan_invalid_solver.json new file mode 100644 index 00000000000..000b91b992b --- /dev/null +++ b/services/api-server/tests/mocks/get_solver_pricing_plan_invalid_solver.json @@ -0,0 +1,133 @@ +[ + { + "name": "GET /catalog/services/simcore%2Fservices%2Fcomp%2Fisolve/2.1.24/pricing-plan", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/catalog/services/{service_key}/{service_version}/pricing-plan", + "path_parameters": [ + { + "in_": "path", + "name": "service_key", + "required": true, + "schema_": { + "title": "Service Key", + "type_": "str", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "services" + }, + { + "in_": "path", + "name": "service_version", + "required": true, + "schema_": { + "title": "Service Version", + "type_": "str", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "simcore/services/comp/isolve" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "pricingPlanId": 1, + "displayName": "osparc_pricing_plan", + "description": "", + "classification": "TIER", + "createdAt": "2023-10-12T07:16:31.155807+00:00", + "pricingPlanKey": "osparc_pricing_plan", + "pricingUnits": [ + { + "pricingUnitId": 1, + "unitName": "small", + "unitExtraInfo": {}, + "currentCostPerUnit": 50, + "default": true + } + ] + } + }, + "status_code": 200 + }, + { + "name": "GET /catalog/services/simcore%2Fservices%2Fcomp%2Fisolv/2.1.24/pricing-plan", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/catalog/services/{service_key}/{service_version}/pricing-plan", + "path_parameters": [ + { + "in_": "path", + "name": "service_key", + "required": true, + "schema_": { + "title": "Service Key", + "type_": "str", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "services" + }, + { + "in_": "path", + "name": "service_version", + "required": true, + "schema_": { + "title": "Service Version", + "type_": "str", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "simcore/services/comp/isolv" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [], + "errors": [ + { + "code": "ValidationError", + "message": "7 validation errors for ParsingModel[ServicePricingPlanGet]\n__root__ -> pricing_plan_id\n field required (type=value_error.missing)\n__root__ -> display_name\n field required (type=value_error.missing)\n__root__ -> description\n field required (type=value_error.missing)\n__root__ -> classification\n field required (type=value_error.missing)\n__root__ -> created_at\n field required (type=value_error.missing)\n__root__ -> pricing_plan_key\n field required (type=value_error.missing)\n__root__ -> pricing_units\n field required (type=value_error.missing)", + "resource": null, + "field": null + } + ], + "status": 500, + "message": "Unexpected client error" + } + }, + "status_code": 500 + } +] diff --git a/services/api-server/tests/mocks/get_solver_pricing_plan_success.json b/services/api-server/tests/mocks/get_solver_pricing_plan_success.json new file mode 100644 index 00000000000..1cb46e85b8f --- /dev/null +++ b/services/api-server/tests/mocks/get_solver_pricing_plan_success.json @@ -0,0 +1,74 @@ +[ + { + "name": "GET /catalog/services/simcore%2Fservices%2Fcomp%2Fisolve/2.1.24/pricing-plan", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/catalog/services/{service_key}/{service_version}/pricing-plan", + "path_parameters": [ + { + "in_": "path", + "name": "service_key", + "required": true, + "schema_": { + "title": "Service Key", + "type_": "str", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "services" + }, + { + "in_": "path", + "name": "service_version", + "required": true, + "schema_": { + "title": "Service Version", + "type_": "str", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "format_": null, + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "simcore/services/comp/isolve" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "pricingPlanId": 1, + "displayName": "osparc_pricing_plan", + "description": "", + "classification": "TIER", + "createdAt": "2023-10-12T07:16:31.155807+00:00", + "pricingPlanKey": "osparc_pricing_plan", + "pricingUnits": [ + { + "pricingUnitId": 1, + "unitName": "small", + "unitExtraInfo": { + "CPU": 2, + "RAM": 1239876234, + "VRAM": 23454676789 + }, + "currentCostPerUnit": 50, + "default": true + } + ], + "isActive": true + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/get_study_job_logs.json b/services/api-server/tests/mocks/get_study_job_logs.json new file mode 100644 index 00000000000..85f07b476c9 --- /dev/null +++ b/services/api-server/tests/mocks/get_study_job_logs.json @@ -0,0 +1,31 @@ +[ + { + "name": "GET /v2/computations/1a4145e2-2fca-11ef-a199-0242ac14002a/tasks/-/logfile", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}/tasks/-/logfile", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": [ + { + "task_id": "2da40b78-d529-5657-95c5-ab663cbd890d", + "download_link": "http://www.jensen-boyle.com/" + } + ] + } +] diff --git a/services/api-server/tests/mocks/get_wallet_failure.json b/services/api-server/tests/mocks/get_wallet_failure.json new file mode 100644 index 00000000000..7934a47e2f9 --- /dev/null +++ b/services/api-server/tests/mocks/get_wallet_failure.json @@ -0,0 +1,55 @@ +[ + { + "name": "GET /wallets/8", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/{wallet_id}", + "path_parameters": [ + { + "in_": "path", + "name": "wallet_id", + "required": true, + "schema_": { + "title": "Wallet Id", + "type_": "int", + "pattern": null, + "format_": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "wallets" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist.", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPForbidden", + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist.", + "resource": null, + "field": null + } + ], + "status": 403, + "message": "Wallet access forbidden. User does not have access to the wallet 8. Or wallet does not exist." + } + }, + "status_code": 403 + } +] diff --git a/services/api-server/tests/mocks/get_wallet_success.json b/services/api-server/tests/mocks/get_wallet_success.json new file mode 100644 index 00000000000..7cfeed511e1 --- /dev/null +++ b/services/api-server/tests/mocks/get_wallet_success.json @@ -0,0 +1,46 @@ +[ + { + "name": "GET /wallets/1", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/wallets/{wallet_id}", + "path_parameters": [ + { + "in_": "path", + "name": "wallet_id", + "required": true, + "schema_": { + "title": "Wallet Id", + "type_": "int", + "pattern": null, + "format_": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "wallets" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "walletId": 1, + "name": "my_wallet", + "description": "my awesome wallet", + "owner": 3, + "thumbnail": "string", + "status": "ACTIVE", + "created": "2023-10-10T13:58:20.381826+00:00", + "modified": "2023-10-10T13:58:20.381826+00:00", + "availableCredits": 0.0 + } + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/on_create_job.json b/services/api-server/tests/mocks/on_create_job.json new file mode 100644 index 00000000000..9820285afad --- /dev/null +++ b/services/api-server/tests/mocks/on_create_job.json @@ -0,0 +1,300 @@ +[ + { + "name": "GET /services/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.0", + "description": "", + "method": "GET", + "host": "catalog", + "path": "/v0/services/simcore/services/comp/itis/sleeper/2.0.0", + "query": "user_id=1", + "request_payload": null, + "response_body": { + "name": "sleeper", + "thumbnail": null, + "description": "A service which awaits for time to pass.", + "deprecated": null, + "classifiers": [], + "quality": {}, + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "integration-version": "1.0.0", + "type": "computational", + "authors": [ + { + "name": "Kimberly Wilson", + "email": "christopher72@example.org", + "affiliation": "Ford, Collins and Villarreal" + }, + { + "name": "Daniel Jones", + "email": "fschmitt@example.net", + "affiliation": "Ayala-Anderson" + } + ], + "contact": "jlozano@example.net", + "inputs": { + "input_1": { + "displayOrder": 1.0, + "label": "File with int number", + "description": "Pick a file containing only one integer", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "input_1" + } + }, + "input_2": { + "displayOrder": 2.0, + "label": "Sleep interval", + "description": "Choose an amount of time to sleep", + "type": "integer", + "defaultValue": 2 + }, + "input_3": { + "displayOrder": 3.0, + "label": "Fail after sleep", + "description": "If set to true will cause service to fail after it sleeps", + "type": "boolean", + "defaultValue": false + } + }, + "outputs": { + "output_1": { + "displayOrder": 1.0, + "label": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "output_1" + } + }, + "output_2": { + "displayOrder": 2.0, + "label": "Random sleep interval", + "description": "Interval is generated in range [1-9]", + "type": "integer" + } + } + }, + "status_code": 200 + }, + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": "/v0/projects", + "query": "hidden=true", + "request_payload": { + "uuid": "06325dd9-64af-4243-8011-efdf7fb588a4", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/06325dd9-64af-4243-8011-efdf7fb588a4", + "description": "Study associated to solver job:\n{\n \"id\": \"06325dd9-64af-4243-8011-efdf7fb588a4\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/06325dd9-64af-4243-8011-efdf7fb588a4\",\n \"inputs_checksum\": \"0def0cfbe784a61b4779a5a8cf35a376c6335558f5208958fb13cc24e6851bc6\",\n \"created_at\": \"2023-06-08T16:15:03.573115\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-06-08T16:15:03.587Z", + "lastChangeDate": "2023-06-08T16:15:03.587Z", + "workbench": { + "b5b971ac-86a2-5f31-93ab-d2ac572a201a": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": null, + "thumbnail": null, + "runHash": null, + "inputs": { + "x": 4.33, + "n": 55 + }, + "inputsUnits": {}, + "inputAccess": null, + "inputNodes": [], + "outputs": {}, + "outputNode": null, + "outputNodes": null, + "parent": null, + "position": null, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0 + }, + "bootOptions": null + } + }, + "prjOwner": "robertsmith@example.org", + "accessRights": {}, + "tags": [], + "classifiers": [], + "ui": { + "workbench": { + "b5b971ac-86a2-5f31-93ab-d2ac572a201a": { + "position": { + "x": 633, + "y": 229 + }, + "marker": null + } + }, + "slideshow": {}, + "currentNodeId": "b5b971ac-86a2-5f31-93ab-d2ac572a201a", + "annotations": {} + }, + "quality": {}, + "dev": {} + }, + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "task_name": "POST /v0/projects?hidden=true", + "status_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "result_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59/result", + "abort_href": "/v0/tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59" + } + }, + "status_code": 202 + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "creating new study...", + "percent": 0.0 + }, + "done": false, + "started": "2023-06-08T16:15:03.621835" + } + }, + "status_code": 200 + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "creating new study...", + "percent": 0.0 + }, + "done": false, + "started": "2023-06-08T16:15:03.621835" + } + }, + "status_code": 200 + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "task_progress": { + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2023-06-08T16:15:03.621835" + } + }, + "status_code": 200 + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Fhidden%253Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/tasks/POST%20%2Fv0%2Fprojects%3Fhidden%3Dtrue.42a04ed5-c581-4a8d-b037-48deda49ef59/result", + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "06325dd9-64af-4243-8011-efdf7fb588a4", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/06325dd9-64af-4243-8011-efdf7fb588a4", + "description": "Study associated to solver job:\n{\n \"id\": \"06325dd9-64af-4243-8011-efdf7fb588a4\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/06325dd9-64af-4243-8011-efdf7fb588a4\",\n \"inputs_checksum\": \"0def0cfbe784a61b4779a5a8cf35a376c6335558f5208958fb13cc24e6851bc6\",\n \"created_at\": \"2023-06-08T16:15:03.573115\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-06-08T16:15:03.627Z", + "lastChangeDate": "2023-06-08T16:15:03.627Z", + "workbench": { + "b5b971ac-86a2-5f31-93ab-d2ac572a201a": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55 + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + } + } + }, + "prjOwner": "rhondakelly@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "b5b971ac-86a2-5f31-93ab-d2ac572a201a": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "b5b971ac-86a2-5f31-93ab-d2ac572a201a", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "DELETE /projects/06325dd9-64af-4243-8011-efdf7fb588a4", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": "/v0/projects/06325dd9-64af-4243-8011-efdf7fb588a4", + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + } +] diff --git a/services/api-server/tests/mocks/on_list_jobs.json b/services/api-server/tests/mocks/on_list_jobs.json new file mode 100644 index 00000000000..6f920f42ad5 --- /dev/null +++ b/services/api-server/tests/mocks/on_list_jobs.json @@ -0,0 +1,253 @@ +[ + { + "name": "get_service", + "description": "", + "method": "GET", + "host": "catalog", + "path": "/v0/services/simcore/services/comp/itis/sleeper/2.0.0", + "query": "user_id=1", + "request_payload": null, + "response_body": { + "name": "sleeper", + "thumbnail": null, + "description": "A service which awaits for time to pass.", + "deprecated": null, + "classifiers": [], + "quality": {}, + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "integration-version": "1.0.0", + "type": "computational", + "authors": [ + { + "name": "Julia Lewis", + "email": "ecantrell@example.org", + "affiliation": "Eaton LLC" + } + ], + "contact": "moorezachary@example.com", + "inputs": { + "input_1": { + "displayOrder": 1.0, + "label": "File with int number", + "description": "Pick a file containing only one integer", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "input_1" + } + }, + "input_2": { + "displayOrder": 2.0, + "label": "Sleep interval", + "description": "Choose an amount of time to sleep", + "type": "integer", + "defaultValue": 2 + }, + "input_3": { + "displayOrder": 3.0, + "label": "Fail after sleep", + "description": "If set to true will cause service to fail after it sleeps", + "type": "boolean", + "defaultValue": false + } + }, + "outputs": { + "output_1": { + "displayOrder": 1.0, + "label": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + "type": "data:text/plain", + "fileToKeyMap": { + "single_number.txt": "output_1" + } + }, + "output_2": { + "displayOrder": 2.0, + "label": "Random sleep interval", + "description": "Interval is generated in range [1-9]", + "type": "integer" + } + } + }, + "status_code": 200 + }, + { + "name": "list_projects", + "description": "", + "method": "GET", + "host": "webserver", + "path": "/v0/projects", + "query": "type=user&show_hidden=true&limit=20&offset=0&search=solvers%2Fsimcore%252Fservices%252Fcomp%252Fitis%252Fsleeper%2Freleases%2F2.0.0", + "request_payload": null, + "response_body": { + "_meta": { + "limit": 20, + "total": 3, + "offset": 0, + "count": 2 + }, + "_links": { + "self": "http://webserver:8080/v0/projects?type=user&show_hidden=true&limit=2&offset=0&search=solvers/simcore%252Fservices%252Fcomp%252Fitis%252Fsleeper/releases/2.0.0", + "first": "http://webserver:8080/v0/projects?type=user&show_hidden=true&limit=2&offset=0&search=solvers/simcore%252Fservices%252Fcomp%252Fitis%252Fsleeper/releases/2.0.0", + "prev": null, + "next": "http://webserver:8080/v0/projects?type=user&show_hidden=true&limit=2&offset=2&search=solvers/simcore%252Fservices%252Fcomp%252Fitis%252Fsleeper/releases/2.0.0", + "last": "http://webserver:8080/v0/projects?type=user&show_hidden=true&limit=2&offset=2&search=solvers/simcore%252Fservices%252Fcomp%252Fitis%252Fsleeper/releases/2.0.0" + }, + "data": [ + { + "uuid": "1455d63c-4e8f-4ffe-bdd4-e885f991cd87", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/1455d63c-4e8f-4ffe-bdd4-e885f991cd87", + "description": "Study associated to solver job:\n{\n \"id\": \"1455d63c-4e8f-4ffe-bdd4-e885f991cd87\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/1455d63c-4e8f-4ffe-bdd4-e885f991cd87\",\n \"inputs_checksum\": \"4e16c861276db7f69f7fac76dfd9d65308121d767b7cba56c1003ef6ed38ffec\",\n \"created_at\": \"2023-06-22T18:42:35.489609\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-06-22T18:42:35.506Z", + "lastChangeDate": "2023-06-22T18:42:35.506Z", + "workspaceId": 7, + "type": "STANDARD", + "templateType": null, + "folderId": 1, + "trashedAt": "2023-06-22T18:42:36.506Z", + "trashedBy": 2, + "workbench": { + "05c7ed3b-0be1-5077-8065-fb55f5e59ff3": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + } + } + }, + "prjOwner": "madison17@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "05c7ed3b-0be1-5077-8065-fb55f5e59ff3": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "05c7ed3b-0be1-5077-8065-fb55f5e59ff3", + "annotations": {} + }, + "quality": {}, + "dev": {} + }, + { + "uuid": "61d8acda-a560-4d76-ac47-59c56a399d98", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/61d8acda-a560-4d76-ac47-59c56a399d98", + "description": "Study associated to solver job:\n{\n \"id\": \"61d8acda-a560-4d76-ac47-59c56a399d98\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.0/jobs/61d8acda-a560-4d76-ac47-59c56a399d98\",\n \"inputs_checksum\": \"4e16c861276db7f69f7fac76dfd9d65308121d767b7cba56c1003ef6ed38ffec\",\n \"created_at\": \"2023-06-22T18:42:32.126304\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-06-22T18:42:32.201Z", + "lastChangeDate": "2023-06-22T18:42:32.201Z", + "workspaceId": 4, + "type": "STANDARD", + "templateType": null, + "folderId": 8, + "trashedAt": "2023-06-22T18:42:33.201Z", + "trashedBy": 2, + "workbench": { + "34805d7e-c2d0-561f-831f-c74a28fc9bd1": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.0", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": 0.0 + } + } + }, + "prjOwner": "jacksonnicole@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "34805d7e-c2d0-561f-831f-c74a28fc9bd1": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "34805d7e-c2d0-561f-831f-c74a28fc9bd1", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + ] + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/run_study_workflow.json b/services/api-server/tests/mocks/run_study_workflow.json new file mode 100644 index 00000000000..90216ee45ba --- /dev/null +++ b/services/api-server/tests/mocks/run_study_workflow.json @@ -0,0 +1,1669 @@ +[ + { + "name": "GET /projects/aeab71fe-f71b-11ee-8fca-0242ac140008/metadata/ports", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata/ports", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": [ + { + "key": "0b8042c4-501a-4f9b-b2fa-17f860548b33", + "kind": "output", + "content_schema": null + }, + { + "key": "c0f304e0-228b-413c-937a-2b1b060c9e02", + "kind": "input", + "content_schema": { + "title": "InputInt", + "type": "integer", + "description": "Produced integer value" + } + }, + { + "key": "d9069bdb-35ae-4ec3-a05a-a42d7a7b0579", + "kind": "output", + "content_schema": { + "title": "OutputInt", + "type": "integer", + "description": "Captured integer value" + } + }, + { + "key": "50fd6b01-bb5d-4136-a932-73676a461680", + "kind": "output", + "content_schema": { + "title": "OutputString", + "type": "string", + "description": "Captured string value" + } + }, + { + "key": "38985050-7476-4534-8c79-839a928ea2a8", + "kind": "input", + "content_schema": { + "title": "InputString", + "type": "string", + "description": "Produced string value" + } + }, + { + "key": "8815eab9-9bd5-4dda-a65c-3c14a423bfb3", + "kind": "input", + "content_schema": { + "title": "InputArray", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "key": "22e7a091-2e4e-4e5a-93aa-c500457f5684", + "kind": "output", + "content_schema": { + "title": "OutputArray", + "type": "array", + "items": { + "type": "number" + } + } + }, + { + "key": "04de3b6f-668d-4826-822f-c58370c037ed", + "kind": "input", + "content_schema": { + "title": "InputNumber", + "type": "number", + "description": "Produced number value" + } + }, + { + "key": "b227b053-1207-4b48-b6ee-71a0ff24b014", + "kind": "output", + "content_schema": { + "title": "OutputNumber", + "type": "number", + "description": "Captured number value" + } + }, + { + "key": "72d5daac-f728-4603-b49e-9a407e4aa079", + "kind": "input", + "content_schema": { + "title": "InputBool", + "type": "boolean", + "description": "Produced boolean value" + } + }, + { + "key": "f85418d5-45d8-41eb-a1ac-4f14a63ec890", + "kind": "output", + "content_schema": { + "title": "OutputBool", + "type": "boolean", + "description": "Captured boolean value" + } + } + ] + } + }, + { + "name": "POST /projects", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "task_name": "POST /v0/projects?from_study=aeab71fe-f71b-11ee-8fca-0242ac140008&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24" + } + }, + "status_code": 202 + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "inserted project new_project['uuid']='e19f9144-fb3f-11ee-b7b0-0242ac14001c' into the db", + "percent": 0.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } + } + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } + } + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "updated network information in directorv2", + "percent": 1.0 + }, + "done": false, + "started": "2024-04-15T15:50:28.173722" + } + } + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3Daeab71fe-f71b-11ee-8fca-0242ac140008%26hidden%3Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-04-15T15:50:28.173722" + } + } + }, + { + "name": "GET tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253Daeab71fe-f71b-11ee-8fca-0242ac140008%2526hidden%253Dtrue.194a6216-eb67-4769-8e45-ec19f7076b24/result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "uuid": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "name": "test (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-04-15T15:50:28.196Z", + "lastChangeDate": "2024-04-15T15:50:28.196Z", + "workspaceId": 3, + "type": "STANDARD", + "templateType": null, + "folderId": 3, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "ab014072-a95f-5775-bb34-5582a13245a6": { + "key": "simcore/services/frontend/iterator-consumer/probe/file", + "version": "1.0.0", + "label": "OutputFile", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca", + "output": "outFile" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca" + ], + "parent": null + }, + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + "label": "InputFile", + "thumbnail": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {} + }, + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "simcore/services/frontend/parameter/integer", + "version": "1.0.0", + "label": "InputInt", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + } + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "key": "simcore/services/frontend/iterator-consumer/probe/integer", + "version": "1.0.0", + "label": "OutputInt", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "096acfb2-8c38-560a-91d3-8911f4334289", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "096acfb2-8c38-560a-91d3-8911f4334289" + ] + }, + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "thumbnail": null, + "inputs": { + "input_2": 2, + "input_3": false + }, + "inputsUnits": {}, + "inputNodes": [], + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "key": "simcore/services/frontend/iterator-consumer/probe/string", + "version": "1.0.0", + "label": "OutputString", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "bcc36381-7377-533f-bb04-f785c0f8e2be" + ] + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "simcore/services/frontend/parameter/string", + "version": "1.0.0", + "label": "InputString", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": "Foo" + } + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "simcore/services/frontend/parameter/array", + "version": "1.0.0", + "label": "InputArray", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": [ + 1 + ] + } + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "key": "simcore/services/frontend/iterator-consumer/probe/array", + "version": "1.0.0", + "label": "OutputArray", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3" + ] + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "simcore/services/frontend/parameter/number", + "version": "1.0.0", + "label": "InputNumber", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": 1 + } + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "key": "simcore/services/frontend/iterator-consumer/probe/number", + "version": "1.0.0", + "label": "OutputNumber", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "d43949c5-5143-5738-bae9-7d231dcabe7f" + ] + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "simcore/services/frontend/parameter/boolean", + "version": "1.0.0", + "label": "InputBool", + "thumbnail": null, + "runHash": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [], + "outputs": { + "out_1": true + } + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "key": "simcore/services/frontend/iterator-consumer/probe/boolean", + "version": "1.0.0", + "label": "OutputBool", + "thumbnail": null, + "inputs": { + "in_1": { + "nodeUuid": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "output": "out_1" + } + }, + "inputsUnits": {}, + "inputNodes": [ + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3" + ] + } + }, + "prjOwner": "harpercynthia@example.com", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "position": { + "x": 220, + "y": 40 + } + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "position": { + "x": 240, + "y": 400 + } + }, + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "position": { + "x": 820, + "y": 360 + } + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "position": { + "x": 580, + "y": 200 + } + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "position": { + "x": 580, + "y": 40 + } + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "position": { + "x": 278, + "y": 733 + } + }, + "9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca": { + "position": { + "x": 200, + "y": 840 + } + }, + "ab014072-a95f-5775-bb34-5582a13245a6": { + "position": { + "x": 700, + "y": 840 + } + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "position": { + "x": 220, + "y": 200 + } + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "position": { + "x": 580, + "y": 420 + } + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "position": { + "x": 562, + "y": 586 + } + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "position": { + "x": 271, + "y": 621 + } + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "position": { + "x": 656, + "y": 720 + } + } + }, + "slideshow": {}, + "currentNodeId": "aeab71fe-f71b-11ee-8fca-0242ac140008", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "name": "posix" + }, + "status_code": 204 + }, + { + "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "096acfb2-8c38-560a-91d3-8911f4334289", + "value": 1, + "label": "InputInt" + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "value": "Foo", + "label": "InputString" + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "value": [ + 1 + ], + "label": "InputArray" + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "value": 1, + "label": "InputNumber" + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "value": true, + "label": "InputBool" + } + } + } + }, + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/nodes/9fdd8dcc-7b2a-5b48-9918-63edc5eb1aca/outputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str" + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str" + }, + "response_value": "nodes" + } + ] + }, + "request_payload": { + "outputs": { + "outFile": { + "store": 0, + "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", + "label": "input.json", + "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "dataset": null + } + } + }, + "status_code": 204 + }, + { + "name": "PATCH /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/inputs", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "key": "value" + }, + "response_body": { + "data": { + "096acfb2-8c38-560a-91d3-8911f4334289": { + "key": "096acfb2-8c38-560a-91d3-8911f4334289", + "value": 42, + "label": "InputInt" + }, + "bcc36381-7377-533f-bb04-f785c0f8e2be": { + "key": "bcc36381-7377-533f-bb04-f785c0f8e2be", + "value": "Z43", + "label": "InputString" + }, + "197ba9f7-d09c-5cf8-9290-284cd6c40fb3": { + "key": "197ba9f7-d09c-5cf8-9290-284cd6c40fb3", + "value": [ + 1, + 2, + 3 + ], + "label": "InputArray" + }, + "d43949c5-5143-5738-bae9-7d231dcabe7f": { + "key": "d43949c5-5143-5738-bae9-7d231dcabe7f", + "value": 3.14, + "label": "InputNumber" + }, + "584e44d4-9a78-571f-a2a4-7d9c7b2396e3": { + "key": "584e44d4-9a78-571f-a2a4-7d9c7b2396e3", + "value": false, + "label": "InputBool" + } + } + } + }, + { + "name": "POST /computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c" + } + }, + "status_code": 201 + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 0.05, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": null, + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c:stop?user_id=1" + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 1.0, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": "2024-04-15T15:50:37.747356+00:00", + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": null + } + }, + { + "name": "GET /v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": [] + }, + "progress": 1.0, + "node_states": { + "2519eb2c-fd8f-5f07-a75a-800cc4b284f2": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-15T15:50:31.284124+00:00", + "stopped": "2024-04-15T15:50:37.747356+00:00", + "submitted": "2024-04-15T15:50:31.162440+00:00", + "url": "http://director-v2:8000/v2/computations/e19f9144-fb3f-11ee-b7b0-0242ac14001c?user_id=1", + "stop_url": null + } + }, + { + "name": "GET /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c/outputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "ab014072-a95f-5775-bb34-5582a13245a6": { + "key": "ab014072-a95f-5775-bb34-5582a13245a6", + "value": { + "store": 0, + "path": "api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json", + "label": "input.json", + "eTag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "dataset": null + }, + "label": "OutputFile" + }, + "3d4963ee-179f-5948-9086-dd9bef543f65": { + "key": "3d4963ee-179f-5948-9086-dd9bef543f65", + "value": 42, + "label": "OutputInt" + }, + "2a9452ac-d210-5e11-a631-1d73454bfd91": { + "key": "2a9452ac-d210-5e11-a631-1d73454bfd91", + "value": "Z43", + "label": "OutputString" + }, + "cb5bc33d-6635-5680-98e3-a6ac57f908f4": { + "key": "cb5bc33d-6635-5680-98e3-a6ac57f908f4", + "value": [ + 1, + 2, + 3 + ], + "label": "OutputArray" + }, + "cd7eacb5-6806-5956-86c8-9b30ec588402": { + "key": "cd7eacb5-6806-5956-86c8-9b30ec588402", + "value": 3.14, + "label": "OutputNumber" + }, + "efaaeabf-e4bc-5667-a757-d9b17ad606d9": { + "key": "efaaeabf-e4bc-5667-a757-d9b17ad606d9", + "value": false, + "label": "OutputBool" + } + } + } + }, + { + "name": "POST /simcore-s3/files/metadata:search_owned", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search_owned", + "path_parameters": [] + }, + "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4", + "response_body": { + "data": [] + } + }, + { + "name": "POST /files/api%2Fd8bc0c02-c3ee-3cec-a562-e6fd3e00be4b%2Finput.json:soft-copy", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/files/{file_id}:soft-copy", + "path_parameters": [ + { + "in": "path", + "name": "file_id", + "required": true, + "schema": { + "title": "File Id", + "anyOf": [ + { + "type": "str", + "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" + }, + { + "type": "str", + "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" + } + ] + }, + "response_value": "files" + } + ] + }, + "query": "user_id=1", + "request_payload": { + "link_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json" + }, + "response_body": { + "data": { + "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "input.json", + "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "created_at": "2024-04-15T15:50:27.134729", + "last_modified": "2024-04-15T15:50:27+00:00", + "file_size": 9, + "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "is_soft_link": true, + "is_directory": false, + "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" + }, + "error": null + } + }, + { + "name": "POST /simcore-s3/files/metadata:search", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search", + "path_parameters": [] + }, + "query": "user_id=1&startswith=api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4&access_right=read", + "response_body": { + "data": [ + { + "file_uuid": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "input.json", + "file_id": "api/5b0cd3cd-5ceb-3d74-9961-246840c1e1d4/input.json", + "created_at": "2024-04-15T15:50:27.134729", + "last_modified": "2024-04-15T15:50:27+00:00", + "file_size": 9, + "entity_tag": "3f14fb3a8ba8d750f26bdaa402b2f6cc", + "is_soft_link": true, + "is_directory": false, + "sha256_checksum": "fd3bb7e4cc5098e8040cd35fe3346628693097fbf7d05477d0b2845b20b4a4fd" + } + ] + } + }, + { + "name": "GET /locations/0/files/api%2F5b0cd3cd-5ceb-3d74-9961-246840c1e1d4%2Finput.json", + "description": "", + "method": "GET", + "host": "storage", + "path": { + "path": "/v0/locations/{location_id}/files/{file_id}", + "path_parameters": [ + { + "in": "path", + "name": "file_id", + "required": true, + "schema": { + "title": "File Id", + "anyOf": [ + { + "type": "str", + "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$" + }, + { + "type": "str", + "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" + } + ] + }, + "response_value": "files" + }, + { + "in": "path", + "name": "location_id", + "required": true, + "schema": { + "title": "Location Id", + "type": "int" + }, + "response_value": "locations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "data": { + "link": "http://127.0.0.1:9001/simcore/api/d8bc0c02-c3ee-3cec-a562-e6fd3e00be4b/input.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=12345678%2F20240415%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240415T155039Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=79a5cbc9b23ebb4084f4156acd6f7e6f891197dbd5a088327c9131768bd1c610" + } + } + }, + { + "name": "DELETE /projects/e19f9144-fb3f-11ee-b7b0-0242ac14001c", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "status_code": 204 + } +] diff --git a/services/api-server/tests/mocks/search_file_checksum.json b/services/api-server/tests/mocks/search_file_checksum.json new file mode 100644 index 00000000000..2b61f6cdf1a --- /dev/null +++ b/services/api-server/tests/mocks/search_file_checksum.json @@ -0,0 +1,34 @@ +[ + { + "name": "POST /simcore-s3/files/metadata:search", + "description": "", + "method": "POST", + "host": "storage", + "path": { + "path": "/v0/simcore-s3/files/metadata:search", + "path_parameters": [] + }, + "query": "kind=owned&user_id=2&sha256_checksum=92c7a39ce451ee57edd6da0b9c734ca9e6423a20410f73ce55e0d07cfd603b9d", + "request_payload": null, + "response_body": { + "data": [ + { + "file_uuid": "api/b9d0fa49-3e3c-333e-b282-0df27654dafe/test.bash", + "location_id": 0, + "project_name": null, + "node_name": null, + "file_name": "test.bash", + "file_id": "api/b9d0fa49-3e3c-333e-b282-0df27654dafe/test.bash", + "created_at": "2023-09-11T07:19:21.785922", + "last_modified": "2023-09-11T07:19:21+00:00", + "file_size": 26, + "entity_tag": "dda643336c3dcb6ec9f93fbe084effc1", + "is_soft_link": false, + "is_directory": false, + "sha256_checksum": "92c7a39ce451ee57edd6da0b9c734ca9e6423a20410f73ce55e0d07cfd603b9d" + } + ] + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/start_job_no_payment.json b/services/api-server/tests/mocks/start_job_no_payment.json new file mode 100644 index 00000000000..15b1e3b92b4 --- /dev/null +++ b/services/api-server/tests/mocks/start_job_no_payment.json @@ -0,0 +1,97 @@ +[ + { + "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] + }, + "response_body": { + "data": { + "pipeline_id": "48323c7f-e379-4e16-8b58-dc69643f653d" + } + }, + "status_code": 201 + }, + { + "name": "GET /v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "48323c7f-e379-4e16-8b58-dc69643f653d", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": [] + }, + "progress": 0.0, + "node_states": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2023-10-26T14:19:05.389765+00:00", + "stopped": null, + "submitted": "2023-10-26T14:19:05.241935+00:00", + "url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/48323c7f-e379-4e16-8b58-dc69643f653d:stop?user_id=1" + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/start_job_not_enough_credit.json b/services/api-server/tests/mocks/start_job_not_enough_credit.json new file mode 100644 index 00000000000..c942d8341ad --- /dev/null +++ b/services/api-server/tests/mocks/start_job_not_enough_credit.json @@ -0,0 +1,244 @@ +[ + { + "name": "GET /projects/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "48323c7f-e379-4e16-8b58-dc69643f653d", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d", + "description": "Study associated to solver job:\n{\n \"id\": \"48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/48323c7f-e379-4e16-8b58-dc69643f653d\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:13:07.998632+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-26T14:13:08.013Z", + "lastChangeDate": "2023-10-26T14:13:08.013Z", + "workspaceId": 3, + "type": "STANDARD", + "templateType": null, + "folderId": 2, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "austin66@example.org", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 200 + }, + { + "name": "PUT /projects/48323c7f-e379-4e16-8b58-dc69643f653d/nodes/3b0b20e0-c860-51d9-9f82-d6b4bc5c2f24/pricing-plan/1/pricing-unit/1", + "description": "", + "method": "PUT", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", + "path_parameters": [ + { + "in": "path", + "name": "pricing_plan_id", + "required": true, + "schema": { + "title": "Pricing Plan Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-plan" + }, + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "pricing_unit_id", + "required": true, + "schema": { + "title": "Pricing Unit Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-unit" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "nodes" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + }, + { + "name": "POST /computations/48323c7f-e379-4e16-8b58-dc69643f653d:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] + }, + "response_body": { + "data": null, + "error": { + "logs": [], + "errors": [ + { + "code": "WalletNotEnoughCreditsError", + "message": "Wallet does not have enough credits. Wallet 1 credit balance -200.11", + "resource": null, + "field": null + } + ], + "status": 402, + "message": "Unexpected client error" + } + }, + "status_code": 402 + } +] diff --git a/services/api-server/tests/mocks/start_job_with_payment.json b/services/api-server/tests/mocks/start_job_with_payment.json new file mode 100644 index 00000000000..847dab9d054 --- /dev/null +++ b/services/api-server/tests/mocks/start_job_with_payment.json @@ -0,0 +1,289 @@ +[ + { + "name": "GET /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": { + "data": { + "uuid": "e551e994-a68d-4c26-b6fc-59019b35ee6e", + "name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "Study associated to solver job:\n{\n \"id\": \"e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"name\": \"solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2/jobs/e551e994-a68d-4c26-b6fc-59019b35ee6e\",\n \"inputs_checksum\": \"015ba4cd5cf00c511a8217deb65c242e3b15dc6ae4b1ecf94982d693887d9e8a\",\n \"created_at\": \"2023-10-26T14:10:11.103041+00:00\"\n}", + "thumbnail": "https://via.placeholder.com/170x120.png", + "creationDate": "2023-10-26T14:10:11.118Z", + "lastChangeDate": "2023-10-26T14:10:11.118Z", + "workspaceId": 12, + "type": "STANDARD", + "templateType": null, + "folderId": 2, + "trashedAt": null, + "trashedBy": null, + "workbench": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 0.0, + "inputs": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "store": 0, + "path": "api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + "label": "input.txt" + } + }, + "inputsUnits": {}, + "inputNodes": [], + "outputs": {}, + "state": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "prjOwner": "freemanryan@example.net", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "NOT_STARTED" + } + }, + "ui": { + "workbench": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "position": { + "x": 633, + "y": 229 + } + } + }, + "slideshow": {}, + "currentNodeId": "657b124c-0697-5166-b820-a2ea2704ae84", + "annotations": {} + }, + "quality": {}, + "dev": {} + } + }, + "status_code": 200 + }, + { + "name": "PUT /projects/e551e994-a68d-4c26-b6fc-59019b35ee6e/nodes/657b124c-0697-5166-b820-a2ea2704ae84/pricing-plan/1/pricing-unit/1", + "description": "", + "method": "PUT", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/nodes/{node_id}/pricing-plan/{pricing_plan_id}/pricing-unit/{pricing_unit_id}", + "path_parameters": [ + { + "in": "path", + "name": "pricing_plan_id", + "required": true, + "schema": { + "title": "Pricing Plan Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-plan" + }, + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + }, + { + "in": "path", + "name": "pricing_unit_id", + "required": true, + "schema": { + "title": "Pricing Unit Id", + "type": "int", + "pattern": null, + "format": null, + "exclusiveMinimum": true, + "minimum": 0, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "pricing-unit" + }, + { + "in": "path", + "name": "node_id", + "required": true, + "schema": { + "title": "Node Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "nodes" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + }, + { + "name": "POST /computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "force_restart": false, + "subgraph": [] + }, + "response_body": { + "data": { + "pipeline_id": "e551e994-a68d-4c26-b6fc-59019b35ee6e" + } + }, + "status_code": 201 + }, + { + "name": "GET /v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "e551e994-a68d-4c26-b6fc-59019b35ee6e", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "657b124c-0697-5166-b820-a2ea2704ae84": [] + }, + "progress": 0.0, + "node_states": { + "657b124c-0697-5166-b820-a2ea2704ae84": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.0 + } + } + }, + "iteration": 1, + "started": "2023-10-26T14:11:20.606448+00:00", + "stopped": null, + "submitted": "2023-10-26T14:11:20.460760+00:00", + "url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/e551e994-a68d-4c26-b6fc-59019b35ee6e:stop?user_id=1" + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/start_solver_job.json b/services/api-server/tests/mocks/start_solver_job.json new file mode 100644 index 00000000000..f779cd45b9d --- /dev/null +++ b/services/api-server/tests/mocks/start_solver_job.json @@ -0,0 +1,79 @@ +[ + { + "name": "POST /computations/b9faf8d8-4928-4e50-af40-3690712c5481:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "b9faf8d8-4928-4e50-af40-3690712c5481" + } + }, + "status_code": 409 + }, + { + "name": "GET /v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "response_body": { + "id": "b9faf8d8-4928-4e50-af40-3690712c5481", + "state": "STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": [] + }, + "progress": 0.05, + "node_states": { + "d3a3c1e6-3d89-5e7a-af22-0f3ffcedef3d": { + "modified": true, + "dependencies": [], + "currentStatus": "STARTED", + "progress": 0.05 + } + } + }, + "iteration": 2, + "started": "2024-06-18T20:33:46.482456+00:00", + "stopped": "2024-06-18T20:31:25.399647+00:00", + "submitted": "2024-06-18T20:33:46.384524+00:00", + "url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481?user_id=1", + "stop_url": "http://director-v2/v2/computations/b9faf8d8-4928-4e50-af40-3690712c5481:stop?user_id=1" + } + } +] diff --git a/services/api-server/tests/mocks/stop_job.json b/services/api-server/tests/mocks/stop_job.json new file mode 100644 index 00000000000..f6574562dbf --- /dev/null +++ b/services/api-server/tests/mocks/stop_job.json @@ -0,0 +1,116 @@ +[ + { + "name": "POST /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", + "description": "", + "method": "POST", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}:stop", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "user_id": 1 + }, + "response_body": { + "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", + "state": "NOT_STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + }, + "progress": 0.0, + "node_states": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "iteration": null, + "started": null, + "stopped": null, + "submitted": "2023-11-17T13:04:59.327557+00:00", + "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc:stop", + "stop_url": null + }, + "status_code": 202 + }, + { + "name": "GET /v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "4989fa99-b567-43bd-978a-68c2b95fdabc", + "state": "NOT_STARTED", + "result": null, + "pipeline_details": { + "adjacency_list": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": [] + }, + "progress": 0.0, + "node_states": { + "0c8b627e-2d3e-5560-a4de-f6cbc8ebca2f": { + "modified": true, + "dependencies": [], + "currentStatus": "NOT_STARTED", + "progress": null + } + } + }, + "iteration": null, + "started": null, + "stopped": null, + "submitted": "2023-11-17T13:04:59.327557+00:00", + "url": "http://director-v2:8000/v2/computations/4989fa99-b567-43bd-978a-68c2b95fdabc?user_id=1", + "stop_url": null + }, + "status_code": 200 + } +] diff --git a/services/api-server/tests/mocks/study_job_start_stop_delete.json b/services/api-server/tests/mocks/study_job_start_stop_delete.json new file mode 100644 index 00000000000..d279e1dc240 --- /dev/null +++ b/services/api-server/tests/mocks/study_job_start_stop_delete.json @@ -0,0 +1,240 @@ +[ + { + "name": "POST /computations/10da03f0-f1bc-11ee-9e42-0242ac140012:start", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/computations/{project_id}:start", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": {}, + "response_body": { + "data": { + "pipeline_id": "10da03f0-f1bc-11ee-9e42-0242ac140012" + } + }, + "status_code": 201 + }, + { + "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "PENDING", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 0.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": true, + "dependencies": [], + "currentStatus": "PENDING", + "progress": null + } + } + }, + "iteration": 1, + "started": null, + "stopped": null, + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", + "stop_url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop?user_id=1" + }, + "status_code": 200 + }, + { + "name": "POST /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", + "description": "", + "method": "POST", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}:stop", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": null, + "request_payload": { + "user_id": 1 + }, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 1.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-03T13:15:00.425270+00:00", + "stopped": "2024-04-03T13:15:08.997076+00:00", + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012:stop", + "stop_url": null + }, + "status_code": 202 + }, + { + "name": "GET /v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "GET", + "host": "director-v2", + "path": { + "path": "/v2/computations/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "computations" + } + ] + }, + "query": "user_id=1", + "request_payload": null, + "response_body": { + "id": "10da03f0-f1bc-11ee-9e42-0242ac140012", + "state": "SUCCESS", + "result": null, + "pipeline_details": { + "adjacency_list": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": [] + }, + "progress": 1.0, + "node_states": { + "4df8e8a9-eb3d-5d5d-b058-ee1f6179c09f": { + "modified": false, + "dependencies": [], + "currentStatus": "SUCCESS", + "progress": 1.0 + } + } + }, + "iteration": 1, + "started": "2024-04-03T13:15:00.425270+00:00", + "stopped": "2024-04-03T13:15:08.997076+00:00", + "submitted": "2024-04-03T13:15:00.045631+00:00", + "url": "http://director-v2:8000/v2/computations/10da03f0-f1bc-11ee-9e42-0242ac140012?user_id=1", + "stop_url": null + }, + "status_code": 200 + }, + { + "name": "DELETE /projects/10da03f0-f1bc-11ee-9e42-0242ac140012", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": null, + "format": "uuid", + "exclusiveMinimum": null, + "minimum": null, + "anyOf": null, + "allOf": null, + "oneOf": null + }, + "response_value": "projects" + } + ] + }, + "query": null, + "request_payload": null, + "response_body": null, + "status_code": 204 + } +] diff --git a/services/api-server/tests/mocks/test_get_and_update_study_job_metadata.json b/services/api-server/tests/mocks/test_get_and_update_study_job_metadata.json new file mode 100644 index 00000000000..b624e232ff9 --- /dev/null +++ b/services/api-server/tests/mocks/test_get_and_update_study_job_metadata.json @@ -0,0 +1,621 @@ +[ + { + "name": "clone_project", + "description": "", + "method": "POST", + "host": "webserver", + "path": { + "path": "/v0/projects", + "path_parameters": [] + }, + "query": "from_study=784f63f4-1d9f-11ef-892d-0242ac140012&hidden=true", + "response_body": { + "data": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "task_name": "POST /v0/projects?from_study=784f63f4-1d9f-11ef-892d-0242ac140012&hidden=true", + "status_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253D784f63f4-1d9f-11ef-892d-0242ac140012%2526hidden%253Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "result_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253D784f63f4-1d9f-11ef-892d-0242ac140012%2526hidden%253Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72/result", + "abort_href": "http://webserver:8080/v0/tasks/POST%2520%252Fv0%252Fprojects%253Ffrom_study%253D784f63f4-1d9f-11ef-892d-0242ac140012%2526hidden%253Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72" + } + }, + "status_code": 202 + }, + { + "name": "get_clone_project_task_status", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "message": "creating new study...", + "percent": 0.0 + }, + "done": false, + "started": "2024-05-30T10:29:54.137359" + } + } + }, + { + "name": "get_clone_project_task_status_1", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "message": "Checking study access rights...", + "percent": 0.0 + }, + "done": false, + "started": "2024-05-30T10:29:54.137359" + } + } + }, + { + "name": "get_clone_project_task_status_2", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "message": "finished", + "percent": 1.0 + }, + "done": false, + "started": "2024-05-30T10:29:54.137359" + } + } + }, + { + "name": "get_clone_project_task_status_3", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "message": "finished", + "percent": 1.0 + }, + "done": false, + "started": "2024-05-30T10:29:54.137359" + } + } + }, + { + "name": "get_clone_project_task_status_4", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "task_progress": { + "task_id": "POST%20%2Fv0%2Fprojects%3Ffrom_study%3D784f63f4-1d9f-11ef-892d-0242ac140012%26hidden%3Dtrue.3b945ded-136e-405a-8ae3-e2b2f3ea9e72", + "message": "finished", + "percent": 1.0 + }, + "done": true, + "started": "2024-05-30T10:29:54.137359" + } + } + }, + { + "name": "get_clone_project_task_result", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/tasks/{task_id}/result", + "path_parameters": [ + { + "in": "path", + "name": "task_id", + "required": true, + "schema": { + "title": "Task Id", + "type": "str" + }, + "response_value": "tasks" + } + ] + }, + "response_body": { + "data": { + "uuid": "8dd46a50-1e6f-11ef-90e3-0242ac14000c", + "name": "New Study (Copy)", + "description": "", + "thumbnail": "", + "creationDate": "2024-05-30T10:29:54.150Z", + "lastChangeDate": "2024-05-30T10:29:54.150Z", + "workspaceId": 3, + "type": "STANDARD", + "templateType": null, + "folderId": 12, + "trashedAt": "2024-05-30T10:30:54.137359", + "trashedBy": 2, + "workbench": { + "45043872-d6d3-530b-bf40-67bfde79191c": { + "key": "simcore/services/dynamic/jupyter-math", + "version": "3.0.2", + "label": "JupyterLab Math (Python+Octave)", + "thumbnail": null, + "inputs": {}, + "inputsUnits": {}, + "inputNodes": [] + } + }, + "prjOwner": "joshualam@example.com", + "accessRights": { + "3": { + "read": true, + "write": true, + "delete": true + } + }, + "tags": [], + "classifiers": [], + "state": { + "locked": { + "value": false, + "status": "CLOSED" + }, + "state": { + "value": "UNKNOWN" + } + }, + "ui": { + "workbench": { + "45043872-d6d3-530b-bf40-67bfde79191c": { + "position": { + "x": 195, + "y": 180 + } + } + }, + "slideshow": {}, + "currentNodeId": "784f63f4-1d9f-11ef-892d-0242ac140012", + "mode": "workbench" + }, + "quality": { + "enabled": true, + "tsr_target": { + "r01": { + "level": 4, + "references": "" + }, + "r02": { + "level": 4, + "references": "" + }, + "r03": { + "level": 4, + "references": "" + }, + "r04": { + "level": 4, + "references": "" + }, + "r05": { + "level": 4, + "references": "" + }, + "r06": { + "level": 4, + "references": "" + }, + "r07": { + "level": 4, + "references": "" + }, + "r08": { + "level": 4, + "references": "" + }, + "r09": { + "level": 4, + "references": "" + }, + "r10": { + "level": 4, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + }, + "tsr_current": { + "r01": { + "level": 0, + "references": "" + }, + "r02": { + "level": 0, + "references": "" + }, + "r03": { + "level": 0, + "references": "" + }, + "r04": { + "level": 0, + "references": "" + }, + "r05": { + "level": 0, + "references": "" + }, + "r06": { + "level": 0, + "references": "" + }, + "r07": { + "level": 0, + "references": "" + }, + "r08": { + "level": 0, + "references": "" + }, + "r09": { + "level": 0, + "references": "" + }, + "r10": { + "level": 0, + "references": "" + }, + "r03b": { + "references": "" + }, + "r03c": { + "references": "" + }, + "r07b": { + "references": "" + }, + "r07c": { + "references": "" + }, + "r07d": { + "references": "" + }, + "r07e": { + "references": "" + }, + "r08b": { + "references": "" + }, + "r10b": { + "references": "" + } + } + }, + "dev": {} + } + }, + "status_code": 201 + }, + { + "name": "patch_project", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "name": "studies/784f63f4-1d9f-11ef-892d-0242ac140012/jobs/8dd46a50-1e6f-11ef-90e3-0242ac14000c" + }, + "status_code": 204 + }, + { + "name": "get_project_inputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/inputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": {} + } + }, + { + "name": "get_project_metadata", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "projectUuid": "8dd46a50-1e6f-11ef-90e3-0242ac14000c", + "custom": {} + } + } + }, + { + "name": "patch_project_metadata", + "description": "", + "method": "PATCH", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "request_payload": { + "custom": { + "number": 3.14, + "integer": 42, + "string": "foo", + "boolean": true + } + }, + "response_body": { + "data": { + "projectUuid": "8dd46a50-1e6f-11ef-90e3-0242ac14000c", + "custom": { + "number": 3.14, + "string": "foo", + "boolean": true, + "integer": 42 + } + } + } + }, + { + "name": "get_project_metadata_1", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": { + "projectUuid": "8dd46a50-1e6f-11ef-90e3-0242ac14000c", + "custom": { + "number": 3.14, + "string": "foo", + "boolean": true, + "integer": 42 + } + } + } + }, + { + "name": "delete_project", + "description": "", + "method": "DELETE", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "status_code": 204 + }, + { + "name": "get_project_metadata_2", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/metadata", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": true, + "schema": { + "title": "Project Id", + "type": "str", + "format": "uuid" + }, + "response_value": "projects" + } + ] + }, + "response_body": { + "data": null, + "error": { + "logs": [ + { + "message": "Project with uuid '8dd46a50-1e6f-11ef-90e3-0242ac14000c' not found.", + "level": "ERROR", + "logger": "user" + } + ], + "errors": [ + { + "code": "HTTPNotFound", + "message": "Project with uuid '8dd46a50-1e6f-11ef-90e3-0242ac14000c' not found.", + "resource": null, + "field": null + } + ], + "status": 404, + "message": "Project with uuid '8dd46a50-1e6f-11ef-90e3-0242ac14000c' not found." + } + }, + "status_code": 404 + } +] diff --git a/services/api-server/tests/test_utils_pydantic.py b/services/api-server/tests/test_utils_pydantic.py new file mode 100644 index 00000000000..77136f73982 --- /dev/null +++ b/services/api-server/tests/test_utils_pydantic.py @@ -0,0 +1,214 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from typing import Annotated, Any + +import fastapi +import pydantic +import pytest +from fastapi import FastAPI +from pydantic import ( + AnyHttpUrl, + AnyUrl, + BaseModel, + HttpUrl, + TypeAdapter, + ValidationError, +) +from simcore_service_api_server.models._utils_pydantic import UriSchema + + +class _FakeModel(BaseModel): + urls0: list[HttpUrl] + urls1: list[Annotated[HttpUrl, UriSchema()]] + + # with and w/o + url0: HttpUrl + url1: Annotated[HttpUrl, UriSchema()] + + # # including None inside/outside annotated + url2: Annotated[HttpUrl, UriSchema()] | None + url3: Annotated[HttpUrl | None, UriSchema()] + + # # mistake + int0: Annotated[int, UriSchema()] + + +@pytest.fixture +def pydantic_schema() -> dict[str, Any]: + return _FakeModel.model_json_schema() + + +def test_pydantic_json_schema(pydantic_schema: dict[str, Any]): + assert pydantic_schema["properties"] == { + "int0": {"title": "Int0", "type": "integer"}, + "url0": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "title": "Url0", + "type": "string", + }, + "url1": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "title": "Url1", + "type": "string", + }, + "url2": { + "anyOf": [ + {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"}, + {"type": "null"}, + ], + "title": "Url2", + }, + "url3": { + "anyOf": [ + {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"}, + {"type": "null"}, + ], + "title": "Url3", + }, + "urls0": { + "items": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "type": "string", + }, + "title": "Urls0", + "type": "array", + }, + "urls1": { + "items": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "type": "string", + }, + "title": "Urls1", + "type": "array", + }, + } + + +@pytest.fixture +def fastapi_schema() -> dict[str, Any]: + app = FastAPI() + + @app.get("/", response_model=_FakeModel) + def _h(): + ... + + openapi = app.openapi() + return openapi["components"]["schemas"][_FakeModel.__name__] + + +def test_fastapi_openapi_component_schemas(fastapi_schema: dict[str, Any]): + + assert fastapi_schema["properties"] == { + "int0": {"title": "Int0", "type": "integer"}, + "url0": {"title": "Url0", "type": "string"}, + "url1": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "title": "Url1", + "type": "string", + }, + "url2": { + "anyOf": [ + {"format": "uri", "maxLength": 2083, "minLength": 1, "type": "string"}, + {"type": "null"}, + ], + "title": "Url2", + }, + "url3": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Url3"}, + "urls0": {"items": {"type": "string"}, "title": "Urls0", "type": "array"}, + "urls1": { + "items": { + "format": "uri", + "maxLength": 2083, + "minLength": 1, + "type": "string", + }, + "title": "Urls1", + "type": "array", + }, + } + + +@pytest.mark.xfail( + reason=f"{pydantic.__version__=} and {fastapi.__version__=} produce different json-schemas for the same model" +) +def test_compare_pydantic_vs_fastapi_schemas( + fastapi_schema: dict[str, Any], pydantic_schema: dict[str, Any] +): + + # NOTE @all: I cannot understand this?! + assert fastapi_schema["properties"] == pydantic_schema["properties"] + + +def test_differences_between_new_pydantic_url_types(): + # SEE https://docs.pydantic.dev/2.10/api/networks/ + + # | **URL** | **AnyUrl** | **AnyHttpUrl** | **HttpUrl** | + # |-------------------------------|-------------|-----------------|-----------------| + # | `http://example.com` | βœ… | βœ… | βœ… | + # | `https://example.com/resource`| βœ… | βœ… | βœ… | + # | `ftp://example.com` | βœ… | ❌ | ❌ | + # | `http://localhost` | βœ… | βœ… | βœ… | + # | `http://127.0.0.1` | βœ… | βœ… | βœ… | + # | `http://127.0.0.1:8080` | βœ… | βœ… | βœ… | + # | `customscheme://example.com` | βœ… | ❌ | ❌ | + + url = "http://example.com" + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "https://example.com/resource" + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "ftp://example.com" + TypeAdapter(AnyUrl).validate_python(url) + with pytest.raises(ValidationError): + TypeAdapter(HttpUrl).validate_python(url) + with pytest.raises(ValidationError): + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "http://localhost" + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "http://127.0.0.1" + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "http://127.0.0.1:8080" + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) + + url = "customscheme://example.com" + TypeAdapter(AnyUrl).validate_python(url) + with pytest.raises(ValidationError): + TypeAdapter(HttpUrl).validate_python(url) + with pytest.raises(ValidationError): + TypeAdapter(AnyHttpUrl).validate_python(url) + + # examples taken from docker API + for url in ( + "https://hub-mirror.corp.example.com:5000/", + "https://[2001:db8:a0b:12f0::1]/", + ): + TypeAdapter(AnyUrl).validate_python(url) + TypeAdapter(HttpUrl).validate_python(url) + TypeAdapter(AnyHttpUrl).validate_python(url) diff --git a/services/api-server/tests/unit/_with_db/conftest.py b/services/api-server/tests/unit/_with_db/conftest.py index 307fccde7c3..fd2441c879e 100644 --- a/services/api-server/tests/unit/_with_db/conftest.py +++ b/services/api-server/tests/unit/_with_db/conftest.py @@ -7,44 +7,56 @@ import os import shutil import subprocess +import sys +from collections.abc import AsyncGenerator, Callable, Iterable from pathlib import Path -from pprint import pformat -from typing import Callable, Dict, Union +from typing import TypedDict -import aiopg.sa -import aiopg.sa.engine as aiopg_sa_engine import httpx import pytest import simcore_postgres_database.cli as pg_cli -import simcore_service_api_server.db.tables as orm import sqlalchemy as sa -import sqlalchemy.engine as sa_engine +import sqlalchemy.engine import yaml -from aiopg.sa.result import RowProxy -from faker import Faker from fastapi import FastAPI -from pytest_simcore.helpers.rawdata_fakers import random_user +from models_library.api_schemas_api_server.api_keys import ApiKeyInDB +from pydantic import PositiveInt +from pytest_mock import MockerFixture +from pytest_simcore.helpers import postgres_tools +from pytest_simcore.helpers.faker_factories import ( + random_api_auth, + random_product, + random_user, +) +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from simcore_postgres_database.models.base import metadata +from simcore_postgres_database.models.api_keys import api_keys +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.users import users +from simcore_service_api_server.clients.postgres import get_engine from simcore_service_api_server.core.application import init_app -from simcore_service_api_server.db.repositories import BaseRepository -from simcore_service_api_server.db.repositories.users import UsersRepository -from simcore_service_api_server.models.domain.api_keys import ApiKeyInDB +from simcore_service_api_server.core.settings import PostgresSettings +from sqlalchemy.ext.asyncio import AsyncEngine ## POSTGRES ----- +_CURRENT_DIR = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +) + + @pytest.fixture(scope="session") def docker_compose_file( - default_app_env_vars: dict[str, str], tests_utils_dir: Path, tmpdir_factory + default_app_env_vars: dict[str, str], tmpdir_factory: Callable ) -> Path: # Overrides fixture in https://github.com/avast/pytest-docker - # NOTE: do not forget to add the current environ here, otherwise docker-compose fails + # NOTE: do not forget to add the current environ here, otherwise docker compose fails environ = dict(os.environ) environ.update(default_app_env_vars) - src_path = tests_utils_dir / "docker-compose.yml" + src_path = _CURRENT_DIR / "data" / "docker-compose.yml" assert src_path.exists dst_path = Path(str(tmpdir_factory.mktemp("config").join("docker-compose.yml"))) @@ -54,7 +66,7 @@ def docker_compose_file( # configs subprocess.run( - f'docker-compose --file "{src_path}" config > "{dst_path}"', + f'docker compose --file "{src_path}" config > "{dst_path}"', shell=True, check=True, env=environ, @@ -63,21 +75,31 @@ def docker_compose_file( return dst_path -@pytest.fixture(scope="session") -def postgres_service(docker_services, docker_ip, docker_compose_file: Path) -> Dict: +class PostgreServiceInfoDict(TypedDict): + dsn: str + user: str + password: str + host: str + port: int + datbase: str + +@pytest.fixture(scope="session") +def postgres_service( + docker_services, docker_ip, docker_compose_file: Path +) -> PostgreServiceInfoDict: # check docker-compose's environ is resolved properly config = yaml.safe_load(docker_compose_file.read_text()) environ = config["services"]["postgres"]["environment"] # builds DSN - config = dict( - user=environ["POSTGRES_USER"], - password=environ["POSTGRES_PASSWORD"], - host=docker_ip, - port=docker_services.port_for("postgres", 5432), - database=environ["POSTGRES_DB"], - ) + config = { + "user": environ["POSTGRES_USER"], + "password": environ["POSTGRES_PASSWORD"], + "host": docker_ip, + "port": docker_services.port_for("postgres", 5432), + "database": environ["POSTGRES_DB"], + } dsn = "postgresql://{user}:{password}@{host}:{port}/{database}".format(**config) @@ -101,115 +123,200 @@ def is_postgres_responsive() -> bool: ) config["dsn"] = dsn - return config + return PostgreServiceInfoDict(**config) @pytest.fixture(scope="session") -def make_engine(postgres_service: dict) -> Callable: - dsn = postgres_service["dsn"] # session scope freezes dsn - - def maker(*, is_async=True) -> Union[aiopg_sa_engine.Engine, sa_engine.Engine]: - if is_async: - return aiopg.sa.create_engine(dsn) - return sa.create_engine(dsn) - - return maker +def sync_engine( + postgres_service: PostgreServiceInfoDict, +) -> Iterable[sqlalchemy.engine.Engine]: + _engine: sqlalchemy.engine.Engine = sa.create_engine(url=postgres_service["dsn"]) + yield _engine + _engine.dispose() @pytest.fixture -def migrated_db(postgres_service: dict, make_engine: Callable): +def migrated_db(postgres_service: dict, sync_engine: sqlalchemy.engine.Engine): # NOTE: this is equivalent to packages/pytest-simcore/src/pytest_simcore/postgres_service.py::postgres_db # but we do override postgres_dsn -> postgres_engine -> postgres_db because we want the latter # fixture to have local scope # kwargs = postgres_service.copy() kwargs.pop("dsn") + assert pg_cli.discover.callback is not None pg_cli.discover.callback(**kwargs) + + assert pg_cli.upgrade.callback is not None pg_cli.upgrade.callback("head") yield + assert pg_cli.downgrade.callback is not None pg_cli.downgrade.callback("base") + + assert pg_cli.clean.callback is not None pg_cli.clean.callback() - # FIXME: deletes all because downgrade is not reliable! - engine = make_engine(is_async=False) - metadata.drop_all(engine) + + postgres_tools.force_drop_all_tables(sync_engine) + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + default_app_env_vars: EnvVarsDict, + mocker: MockerFixture, +) -> EnvVarsDict: + """app environments WITH database settings""" + mocker.patch("simcore_service_api_server.core.application.setup_rabbitmq") + mocker.patch( + "simcore_service_api_server.core._prometheus_instrumentation.setup_prometheus_instrumentation" + ) + + envs = setenvs_from_dict(monkeypatch, {**default_app_env_vars}) + assert "API_SERVER_POSTGRES" not in envs + + # Should be sufficient to create settings + print(PostgresSettings.create_from_envs().model_dump_json(indent=1)) + + return envs @pytest.fixture -def app(patched_default_app_environ: EnvVarsDict, migrated_db: None) -> FastAPI: +def app(app_environment: EnvVarsDict, migrated_db: None) -> FastAPI: """Overrides app to ensure that: - it uses default environ as pg - db is started and initialized """ - the_app = init_app() - return the_app + return init_app() -## FAKE DATA injected at repositories interface ---------------------- +@pytest.fixture +async def async_engine(app: FastAPI) -> AsyncEngine: + return get_engine(app) -class _ExtendedUsersRepository(UsersRepository): - # pylint: disable=no-value-for-parameter +@pytest.fixture +async def create_user_ids( + async_engine: AsyncEngine, +) -> AsyncGenerator[Callable[[PositiveInt], AsyncGenerator[PositiveInt, None]], None]: + async def _generate_user_ids(n: PositiveInt) -> AsyncGenerator[PositiveInt, None]: + for _ in range(n): + while True: + user = random_user() + async with async_engine.connect() as conn: + result = await conn.execute( + users.select().where(users.c.name == user["name"]) + ) + entry = result.one_or_none() + if entry is None: + break + + async with async_engine.begin() as conn: + uid = await conn.scalar( + users.insert().values(user).returning(users.c.id) + ) + assert uid - async def create(self, **user) -> int: - values = random_user(**user) - async with self.db_engine.acquire() as conn: - user_id = await conn.scalar(orm.users.insert().values(**values)) + _generate_user_ids.generated_ids.append(uid) - print("Created user ", pformat(values), f"with user_id={user_id}") - return user_id + yield uid + _generate_user_ids.generated_ids = [] -class _ExtendedApiKeysRepository(BaseRepository): - # pylint: disable=no-value-for-parameter + yield _generate_user_ids + + for uid in _generate_user_ids.generated_ids: + async with async_engine.begin() as conn: + await conn.execute(users.delete().where(users.c.id == uid)) - async def create(self, name: str, *, api_key: str, api_secret: str, user_id: int): - values = dict( - display_name=name, - user_id=user_id, - api_key=api_key, - api_secret=api_secret, - ) - async with self.db_engine.acquire() as conn: - _id = await conn.scalar(orm.api_keys.insert().values(**values)) - # check inserted - row: RowProxy = await ( - await conn.execute( - orm.api_keys.select().where(orm.api_keys.c.id == _id) +@pytest.fixture +async def create_product_names( + async_engine: AsyncEngine, +) -> AsyncGenerator[Callable[[PositiveInt], AsyncGenerator[str, None]], None]: + async def _generate_product_names( + n: PositiveInt, + ) -> AsyncGenerator[str, None]: + for _ in range(n): + while True: + product = random_product(group_id=None) + async with async_engine.connect() as conn: + result = await conn.execute( + products.select().where(products.c.name == product["name"]), + ) + entry = result.one_or_none() + if entry is None: + break + + async with async_engine.begin() as conn: + name = await conn.scalar( + products.insert().values(product).returning(products.c.name) ) - ).first() - return ApiKeyInDB.from_orm(row) + assert name + _generate_product_names.generated_names.append(name) + yield name -@pytest.fixture -async def fake_user_id(app: FastAPI, faker: Faker) -> int: - # WARNING: created but not deleted upon tear-down, i.e. this is for one use! - user_id = await _ExtendedUsersRepository(app.state.engine).create( - email=faker.email(), - password=faker.password(), - name=faker.user_name(), - ) - return user_id + _generate_product_names.generated_names = [] + yield _generate_product_names + + for name in _generate_product_names.generated_names: + async with async_engine.begin() as conn: + await conn.execute(products.delete().where(products.c.name == name)) @pytest.fixture -async def fake_api_key(app: FastAPI, fake_user_id: int, faker: Faker) -> ApiKeyInDB: - # WARNING: created but not deleted upon tear-down, i.e. this is for one use! - apikey = await _ExtendedApiKeysRepository(app.state.engine).create( - "test-api-key", - api_key=faker.word(), - api_secret=faker.password(), - user_id=fake_user_id, - ) - return apikey +async def create_fake_api_keys( + async_engine: AsyncEngine, + create_user_ids: Callable[[PositiveInt], AsyncGenerator[PositiveInt, None]], + create_product_names: Callable[[PositiveInt], AsyncGenerator[str, None]], +) -> AsyncGenerator[Callable[[PositiveInt], AsyncGenerator[ApiKeyInDB, None]], None]: + + async def _generate_fake_api_key(n: PositiveInt): + users, products = create_user_ids(n), create_product_names(n) + excluded_column = "api_secret" + returning_cols = [col for col in api_keys.c if col.name != excluded_column] + + for _ in range(n): + product = await anext(products) + user = await anext(users) + + api_auth = random_api_auth(product, user) + plain_api_secret = api_auth.pop("api_secret") + + async with async_engine.begin() as conn: + result = await conn.execute( + api_keys.insert() + .values( + api_secret=sa.func.crypt( + plain_api_secret, sa.func.gen_salt("bf", 10) + ), + **api_auth, + ) + .returning(*returning_cols) + ) + row = result.one() + assert row + + _generate_fake_api_key.row_ids.append(row.id) + + yield ApiKeyInDB.model_validate({"api_secret": plain_api_secret, **row}) + + _generate_fake_api_key.row_ids = [] + yield _generate_fake_api_key + + async with async_engine.begin() as conn: + await conn.execute( + api_keys.delete().where(api_keys.c.id.in_(_generate_fake_api_key.row_ids)) + ) @pytest.fixture -def auth(fake_api_key: ApiKeyInDB) -> httpx.BasicAuth: +async def auth( + create_fake_api_keys: Callable[[PositiveInt], AsyncGenerator[ApiKeyInDB, None]], +) -> httpx.BasicAuth: """overrides auth and uses access to real repositories instead of mocks""" - return httpx.BasicAuth( - fake_api_key.api_key, fake_api_key.api_secret.get_secret_value() - ) + async for key in create_fake_api_keys(1): + return httpx.BasicAuth(key.api_key, key.api_secret) + pytest.fail("Did not generate authentication") diff --git a/services/api-server/tests/unit/_with_db/data/docker-compose.yml b/services/api-server/tests/unit/_with_db/data/docker-compose.yml new file mode 100644 index 00000000000..ae76474af7c --- /dev/null +++ b/services/api-server/tests/unit/_with_db/data/docker-compose.yml @@ -0,0 +1,30 @@ +services: + postgres: + image: postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce + environment: + - POSTGRES_USER=${POSTGRES_USER:-test} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-test} + - POSTGRES_DB=${POSTGRES_DB:-test} + - POSTGRES_HOST=${POSTGRES_HOST:-127.0.0.1} + - POSTGRES_PORT=${POSTGRES_PORT:-5432} + ports: + - "5432:5432" + # https://www.postgresql.org/docs/10/runtime-config-logging.html#GUC-LOG-STATEMENT + command: + [ + "postgres", + "-c", + "log_connections=true", + "-c", + "log_disconnections=true", + "-c", + "log_duration=true", + "-c", + "log_line_prefix=[%p] [%a] [%c] [%x] " + ] + adminer: + image: adminer + ports: + - 18080:8080 + depends_on: + - postgres diff --git a/services/api-server/tests/unit/_with_db/test_api_user.py b/services/api-server/tests/unit/_with_db/test_api_user.py index 74ff9aaffdf..5b29f72ef15 100644 --- a/services/api-server/tests/unit/_with_db/test_api_user.py +++ b/services/api-server/tests/unit/_with_db/test_api_user.py @@ -4,12 +4,13 @@ import json -from copy import deepcopy import httpx import pytest import respx from fastapi import FastAPI +from models_library.api_schemas_webserver.users import MyProfileGet as WebProfileGet +from pytest_mock import MockType from respx import MockRouter from simcore_service_api_server._meta import API_VTAG from simcore_service_api_server.core.settings import ApplicationSettings @@ -18,7 +19,7 @@ @pytest.fixture -def mocked_webserver_service_api(app: FastAPI): +def mocked_webserver_rest_api(app: FastAPI): """Mocks some responses of web-server service""" settings: ApplicationSettings = app.state.settings @@ -26,13 +27,15 @@ def mocked_webserver_service_api(app: FastAPI): # pylint: disable=not-context-manager with respx.mock( - base_url=settings.API_SERVER_WEBSERVER.base_url, + base_url=settings.API_SERVER_WEBSERVER.api_base_url, assert_all_called=False, assert_all_mocked=True, ) as respx_mock: # NOTE: webserver-api uses the same schema as api-server! # in-memory fake data - me = deepcopy(Profile.Config.schema_extra["example"]) + me: dict = WebProfileGet.model_json_schema()["examples"][0] + me["first_name"] = "James" + me["last_name"] = "Maxwell" def _get_me(request): return httpx.Response(status.HTTP_200_OK, json={"data": me}) @@ -43,17 +46,16 @@ def _update_me(request: httpx.Request): return httpx.Response(status.HTTP_200_OK, json={"data": me}) respx_mock.get("/me", name="get_me").mock(side_effect=_get_me) - respx_mock.put("/me", name="update_me").mock(side_effect=_update_me) + respx_mock.patch("/me", name="update_me").mock(side_effect=_update_me) yield respx_mock - del me - async def test_get_profile( client: httpx.AsyncClient, auth: httpx.BasicAuth, - mocked_webserver_service_api: MockRouter, + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], ): # needs no auth resp = await client.get(f"/{API_VTAG}/meta") @@ -62,11 +64,11 @@ async def test_get_profile( # needs auth resp = await client.get(f"/{API_VTAG}/me") assert resp.status_code == status.HTTP_401_UNAUTHORIZED - assert not mocked_webserver_service_api["get_me"].called + assert not mocked_webserver_rest_api["get_me"].called resp = await client.get(f"/{API_VTAG}/me", auth=auth) assert resp.status_code == status.HTTP_200_OK - assert mocked_webserver_service_api["get_me"].called + assert mocked_webserver_rest_api["get_me"].called profile = Profile(**resp.json()) assert profile.first_name == "James" @@ -76,7 +78,8 @@ async def test_get_profile( async def test_update_profile( client: httpx.AsyncClient, auth: httpx.BasicAuth, - mocked_webserver_service_api: MockRouter, + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], ): # needs auth resp = await client.put( @@ -86,6 +89,6 @@ async def test_update_profile( ) assert resp.status_code == status.HTTP_200_OK, resp.text - profile = Profile.parse_obj(resp.json()) + profile = Profile.model_validate(resp.json()) assert profile.first_name == "Oliver" assert profile.last_name == "Heaviside" diff --git a/services/api-server/tests/unit/_with_db/test_core_settings__with_db.py b/services/api-server/tests/unit/_with_db/test_core_settings__with_db.py new file mode 100644 index 00000000000..78b9ae20b7b --- /dev/null +++ b/services/api-server/tests/unit/_with_db/test_core_settings__with_db.py @@ -0,0 +1,22 @@ +# pylint: disable=unused-variable +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name + + +import logging + +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_api_server.core.settings import ApplicationSettings, BootModeEnum +from yarl import URL + + +def test_unit_with_db_app_environment(app_environment: EnvVarsDict): + settings = ApplicationSettings.create_from_envs() + print("captured settings: \n", settings.model_dump_json(indent=2)) + + assert settings.SC_BOOT_MODE == BootModeEnum.PRODUCTION + assert settings.log_level == logging.DEBUG + + assert URL(settings.API_SERVER_POSTGRES.dsn) == URL( + "postgresql://test:test@127.0.0.1:5432/test" + ) diff --git a/services/api-server/tests/unit/_with_db/test_product.py b/services/api-server/tests/unit/_with_db/test_product.py new file mode 100644 index 00000000000..bdac23b26ab --- /dev/null +++ b/services/api-server/tests/unit/_with_db/test_product.py @@ -0,0 +1,96 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import datetime +from collections.abc import AsyncGenerator, Callable +from decimal import Decimal + +import httpx +import respx +from faker import Faker +from fastapi import status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_api_server.api_keys import ApiKeyInDB +from models_library.generics import Envelope +from models_library.wallets import WalletStatus +from pydantic import PositiveInt +from pytest_mock import MockType +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.model_adapter import ( + WalletGetWithAvailableCreditsLegacy, +) + + +async def test_product_webserver( + client: httpx.AsyncClient, + mocked_webserver_rest_api_base: respx.MockRouter, + create_fake_api_keys: Callable[[PositiveInt], AsyncGenerator[ApiKeyInDB, None]], + faker: Faker, +): + assert client + + wallet_to_api_keys_map: dict[int, ApiKeyInDB] = {} + _wallet_id = faker.pyint(min_value=1) + async for api_key in create_fake_api_keys(2): + _wallet_id += faker.pyint(min_value=1) + wallet_to_api_keys_map[_wallet_id] = api_key + + def _check_key_product_compatibility(request: httpx.Request, **kwargs): + assert ( + received_product_name := request.headers.get("x-simcore-products-name") + ) is not None + assert (wallet_id := kwargs.get("wallet_id")) is not None + assert (api_key := wallet_to_api_keys_map[int(wallet_id)]) is not None + assert api_key.product_name == received_product_name + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder( + Envelope[WalletGetWithAvailableCreditsLegacy]( + data=WalletGetWithAvailableCreditsLegacy( + wallet_id=wallet_id, + name="my_wallet", + description="this is my wallet", + owner=api_key.id_, + thumbnail="something", + status=WalletStatus.ACTIVE, + created=datetime.datetime.now(), + modified=datetime.datetime.now(), + available_credits=Decimal("20.0"), + ) + ) + ), + ) + + wallet_get_mock = mocked_webserver_rest_api_base.get( + path__regex=r"/wallets/(?P[-+]?\d+)" + ).mock(side_effect=_check_key_product_compatibility) + + for wallet_id, api_key in wallet_to_api_keys_map.items(): + response = await client.get( + f"{API_VTAG}/wallets/{wallet_id}", + auth=httpx.BasicAuth(api_key.api_key, api_key.api_secret), + ) + assert response.status_code == status.HTTP_200_OK + assert wallet_get_mock.call_count == len(wallet_to_api_keys_map) + + +async def test_product_catalog( + client: httpx.AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], + create_fake_api_keys: Callable[[PositiveInt], AsyncGenerator[ApiKeyInDB, None]], +): + assert client + + valid_api_auths: list[ApiKeyInDB] = [key async for key in create_fake_api_keys(2)] + assert len({key.product_name for key in valid_api_auths}) == 2 + + for api_auth in valid_api_auths: + await client.get( + f"{API_VTAG}/solvers/simcore/services/comp/isolve/releases/2.0.24", + auth=httpx.BasicAuth(api_auth.api_key, api_auth.api_secret), + ) + + assert mocked_catalog_rpc_api["get_service"].called diff --git a/services/api-server/tests/unit/api_functions/conftest.py b/services/api-server/tests/unit/api_functions/conftest.py new file mode 100644 index 00000000000..f133d6485d7 --- /dev/null +++ b/services/api-server/tests/unit/api_functions/conftest.py @@ -0,0 +1,192 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=no-value-for-parameter +# pylint: disable=super-init-not-called +# pylint: disable=unused-argument +# pylint: disable=no-self-use +# pylint: disable=cyclic-import + +from collections.abc import Callable +from typing import Any +from uuid import uuid4 + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_webserver.functions import ( + Function, + FunctionClass, + FunctionIDNotFoundError, + FunctionJob, + FunctionJobCollection, + JSONFunctionInputSchema, + JSONFunctionOutputSchema, + ProjectFunction, + ProjectFunctionJob, + RegisteredFunction, + RegisteredFunctionJob, + RegisteredProjectFunction, + RegisteredProjectFunctionJob, +) +from models_library.functions import RegisteredFunctionJobCollection +from models_library.projects import ProjectID +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from simcore_service_api_server.api.routes.functions_routes import get_wb_api_rpc_client +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +): + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + "WEBSERVER_DEV_FEATURES_ENABLED": "1", + "WEBSERVER_FUNCTIONS": "1", + }, + ) + + +class DummyRpcClient(RabbitMQRPCClient): + + def __init__(self): + self.client_name = "dummy_client" + self.settings = {} # type: ignore # Add a settings attribute to avoid AttributeError + + async def request(self, namespace: str, method_name: str, **kwargs): + # Mock implementation of the request method + assert isinstance(namespace, str) + assert isinstance(method_name, str) + assert isinstance(kwargs, dict) + return {"mocked_response": True} + + +@pytest.fixture +async def mock_wb_api_server_rpc(app: FastAPI, mocker: MockerFixture) -> MockerFixture: + + app.dependency_overrides[get_wb_api_rpc_client] = lambda: WbApiRpcClient( + _client=DummyRpcClient() + ) + return mocker + + +@pytest.fixture +def sample_input_schema() -> JSONFunctionInputSchema: + return JSONFunctionInputSchema( + schema_content={ + "type": "object", + "properties": {"input1": {"type": "integer"}}, + } + ) + + +@pytest.fixture +def sample_output_schema() -> JSONFunctionOutputSchema: + return JSONFunctionOutputSchema( + schema_content={ + "type": "object", + "properties": {"output1": {"type": "string"}}, + } + ) + + +@pytest.fixture +def raise_function_id_not_found() -> FunctionIDNotFoundError: + return FunctionIDNotFoundError(function_id="function_id") + + +@pytest.fixture +def mock_function( + project_id: ProjectID, + sample_input_schema: JSONFunctionInputSchema, + sample_output_schema: JSONFunctionOutputSchema, +) -> Function: + sample_fields = { + "title": "test_function", + "function_class": FunctionClass.PROJECT, + "project_id": str(project_id), + "description": "A test function", + "input_schema": sample_input_schema, + "output_schema": sample_output_schema, + "default_inputs": None, + } + return ProjectFunction(**sample_fields) + + +@pytest.fixture +def mock_registered_function(mock_function: Function) -> RegisteredFunction: + return RegisteredProjectFunction(**{**mock_function.dict(), "uid": str(uuid4())}) + + +@pytest.fixture +def mock_function_job(mock_registered_function: RegisteredFunction) -> FunctionJob: + mock_function_job = { + "function_uid": mock_registered_function.uid, + "title": "Test Function Job", + "description": "A test function job", + "inputs": {"key": "value"}, + "outputs": None, + "project_job_id": str(uuid4()), + "function_class": FunctionClass.PROJECT, + } + return ProjectFunctionJob(**mock_function_job) + + +@pytest.fixture +def mock_registered_function_job( + mock_function_job: FunctionJob, +) -> RegisteredFunctionJob: + return RegisteredProjectFunctionJob( + **{**mock_function_job.dict(), "uid": str(uuid4())} + ) + + +@pytest.fixture +def mock_function_job_collection( + mock_registered_function_job: RegisteredFunctionJob, +) -> FunctionJobCollection: + mock_function_job_collection = { + "title": "Test Function Job Collection", + "description": "A test function job collection", + "function_uid": mock_registered_function_job.function_uid, + "function_class": FunctionClass.PROJECT, + "project_id": str(uuid4()), + "function_job_ids": [mock_registered_function_job.uid for _ in range(5)], + } + return FunctionJobCollection(**mock_function_job_collection) + + +@pytest.fixture +def mock_registered_function_job_collection( + mock_function_job_collection: FunctionJobCollection, +) -> RegisteredFunctionJobCollection: + return RegisteredFunctionJobCollection( + **{**mock_function_job_collection.model_dump(), "uid": str(uuid4())} + ) + + +@pytest.fixture() +def mock_handler_in_functions_rpc_interface( + mock_wb_api_server_rpc: MockerFixture, +) -> Callable[[str, Any, Exception | None], None]: + def _mock( + handler_name: str = "", + return_value: Any = None, + exception: Exception | None = None, + ) -> None: + from servicelib.rabbitmq.rpc_interfaces.webserver.functions import ( + functions_rpc_interface, + ) + + mock_wb_api_server_rpc.patch.object( + functions_rpc_interface, + handler_name, + return_value=return_value, + side_effect=exception, + ) + + return _mock diff --git a/services/api-server/tests/unit/api_functions/test_api_routers_functions.py b/services/api-server/tests/unit/api_functions/test_api_routers_functions.py new file mode 100644 index 00000000000..06cf653c1ce --- /dev/null +++ b/services/api-server/tests/unit/api_functions/test_api_routers_functions.py @@ -0,0 +1,516 @@ +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name + +from collections.abc import Callable +from typing import Any +from uuid import uuid4 + +import pytest +from httpx import AsyncClient +from models_library.api_schemas_webserver.functions import ( + FunctionIDNotFoundError, + FunctionJobCollection, + ProjectFunction, + ProjectFunctionJob, + RegisteredFunctionJobCollection, + RegisteredProjectFunction, + RegisteredProjectFunctionJob, +) +from models_library.rest_pagination import PageMetaInfoLimitOffset +from servicelib.aiohttp import status +from simcore_service_api_server._meta import API_VTAG + + +async def test_register_function( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_function: ProjectFunction, +) -> None: + registered_function = RegisteredProjectFunction( + **{**mock_function.model_dump(), "uid": str(uuid4())} + ) + + mock_handler_in_functions_rpc_interface("register_function", registered_function) + response = await client.post( + f"{API_VTAG}/functions", + json=mock_function.model_dump(mode="json"), + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + returned_function = RegisteredProjectFunction.model_validate(data) + assert returned_function.uid is not None + assert returned_function == registered_function + + +async def test_register_function_invalid( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], +) -> None: + invalid_function = { + "title": "test_function", + "function_class": "invalid_class", # Invalid class + "project_id": str(uuid4()), + } + response = await client.post(f"{API_VTAG}/functions", json=invalid_function) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + assert ( + "Input tag 'invalid_class' found using 'function_class' does not" + in response.json()["errors"][0]["msg"] + ) + + +async def test_get_function( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + function_id = str(uuid4()) + + mock_handler_in_functions_rpc_interface("get_function", mock_registered_function) + response = await client.get(f"{API_VTAG}/functions/{function_id}") + assert response.status_code == status.HTTP_200_OK + returned_function = RegisteredProjectFunction.model_validate(response.json()) + assert returned_function == mock_registered_function + + +async def test_get_function_not_found( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[ + [str, Any, Exception | None], None + ], +) -> None: + non_existent_function_id = str(uuid4()) + + mock_handler_in_functions_rpc_interface( + "get_function", + None, + FunctionIDNotFoundError(function_id=non_existent_function_id), + ) + with pytest.raises(FunctionIDNotFoundError): + await client.get(f"{API_VTAG}/functions/{non_existent_function_id}") + + +async def test_list_functions( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface( + "list_functions", + ( + [mock_registered_function for _ in range(5)], + PageMetaInfoLimitOffset(total=5, count=5, limit=10, offset=0), + ), + ) + + response = await client.get( + f"{API_VTAG}/functions", params={"limit": 10, "offset": 0} + ) + assert response.status_code == status.HTTP_200_OK + data = response.json()["items"] + assert len(data) == 5 + assert data[0]["title"] == mock_registered_function.title + + +async def test_update_function_title( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface( + "update_function_title", + RegisteredProjectFunction( + **{ + **mock_registered_function.model_dump(), + "title": "updated_example_function", + } + ), + ) + + # Update the function title + updated_title = {"title": "updated_example_function"} + response = await client.patch( + f"{API_VTAG}/functions/{mock_registered_function.uid}/title", + params=updated_title, + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data["title"] == updated_title["title"] + + +async def test_update_function_description( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + mock_handler_in_functions_rpc_interface( + "update_function_description", + RegisteredProjectFunction( + **{ + **mock_registered_function.model_dump(), + "description": "updated_example_function", + } + ), + ) + + # Update the function description + updated_description = {"description": "updated_example_function"} + response = await client.patch( + f"{API_VTAG}/functions/{mock_registered_function.uid}/description", + params=updated_description, + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data["description"] == updated_description["description"] + + +async def test_get_function_input_schema( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface("get_function", mock_registered_function) + + response = await client.get( + f"{API_VTAG}/functions/{mock_registered_function.uid}/input_schema" + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + + assert ( + data["schema_content"] == mock_registered_function.input_schema.schema_content + ) + + +async def test_get_function_output_schema( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface("get_function", mock_registered_function) + + response = await client.get( + f"{API_VTAG}/functions/{mock_registered_function.uid}/output_schema" + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + + assert ( + data["schema_content"] == mock_registered_function.output_schema.schema_content + ) + + +async def test_validate_function_inputs( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface("get_function", mock_registered_function) + + # Validate inputs + validate_payload = {"input1": 10} + response = await client.post( + f"{API_VTAG}/functions/{mock_registered_function.uid}:validate_inputs", + json=validate_payload, + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data == [True, "Inputs are valid"] + + +async def test_delete_function( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function: RegisteredProjectFunction, +) -> None: + mock_handler_in_functions_rpc_interface("delete_function", None) + + # Delete the function + response = await client.delete( + f"{API_VTAG}/functions/{mock_registered_function.uid}" + ) + assert response.status_code == status.HTTP_200_OK + + +async def test_register_function_job( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_function_job: ProjectFunctionJob, + mock_registered_function_job: RegisteredProjectFunctionJob, +) -> None: + """Test the register_function_job endpoint.""" + + mock_handler_in_functions_rpc_interface( + "register_function_job", mock_registered_function_job + ) + + response = await client.post( + f"{API_VTAG}/function_jobs", json=mock_function_job.model_dump(mode="json") + ) + + assert response.status_code == status.HTTP_200_OK + assert ( + RegisteredProjectFunctionJob.model_validate(response.json()) + == mock_registered_function_job + ) + + +async def test_get_function_job( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job: RegisteredProjectFunctionJob, +) -> None: + + mock_handler_in_functions_rpc_interface( + "get_function_job", mock_registered_function_job + ) + + # Now, get the function job + response = await client.get( + f"{API_VTAG}/function_jobs/{mock_registered_function_job.uid}" + ) + assert response.status_code == status.HTTP_200_OK + assert ( + RegisteredProjectFunctionJob.model_validate(response.json()) + == mock_registered_function_job + ) + + +async def test_list_function_jobs( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job: RegisteredProjectFunctionJob, +) -> None: + + mock_handler_in_functions_rpc_interface( + "list_function_jobs", + ( + [mock_registered_function_job for _ in range(5)], + PageMetaInfoLimitOffset(total=5, count=5, limit=10, offset=0), + ), + ) + + # Now, list function jobs + response = await client.get(f"{API_VTAG}/function_jobs") + assert response.status_code == status.HTTP_200_OK + data = response.json()["items"] + assert len(data) == 5 + assert ( + RegisteredProjectFunctionJob.model_validate(data[0]) + == mock_registered_function_job + ) + + +async def test_list_function_jobs_with_function_filter( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job: RegisteredProjectFunctionJob, + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface( + "list_function_jobs", + ( + [mock_registered_function_job for _ in range(5)], + PageMetaInfoLimitOffset(total=5, count=5, limit=10, offset=0), + ), + ) + + # Now, list function jobs with a filter + response = await client.get( + f"{API_VTAG}/functions/{mock_registered_function.uid}/jobs" + ) + + assert response.status_code == status.HTTP_200_OK + data = response.json()["items"] + assert len(data) == 5 + assert ( + RegisteredProjectFunctionJob.model_validate(data[0]) + == mock_registered_function_job + ) + + +async def test_delete_function_job( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job: RegisteredProjectFunctionJob, +) -> None: + + mock_handler_in_functions_rpc_interface("delete_function_job", None) + + # Now, delete the function job + response = await client.delete( + f"{API_VTAG}/function_jobs/{mock_registered_function_job.uid}" + ) + assert response.status_code == status.HTTP_200_OK + + +async def test_register_function_job_collection( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], +) -> None: + mock_function_job_collection = FunctionJobCollection.model_validate( + { + "title": "Test Collection", + "description": "A test function job collection", + "job_ids": [str(uuid4()), str(uuid4())], + } + ) + + mock_registered_function_job_collection = ( + RegisteredFunctionJobCollection.model_validate( + { + **mock_function_job_collection.model_dump(), + "uid": str(uuid4()), + } + ) + ) + + mock_handler_in_functions_rpc_interface( + "register_function_job_collection", mock_registered_function_job_collection + ) + + response = await client.post( + f"{API_VTAG}/function_job_collections", + json=mock_function_job_collection.model_dump(mode="json"), + ) + + # Assert + assert response.status_code == status.HTTP_200_OK + assert ( + RegisteredFunctionJobCollection.model_validate(response.json()) + == mock_registered_function_job_collection + ) + + +async def test_get_function_job_collection( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], +) -> None: + mock_registered_function_job_collection = ( + RegisteredFunctionJobCollection.model_validate( + { + "uid": str(uuid4()), + "title": "Test Collection", + "description": "A test function job collection", + "job_ids": [str(uuid4()), str(uuid4())], + } + ) + ) + + mock_handler_in_functions_rpc_interface( + "get_function_job_collection", mock_registered_function_job_collection + ) + + response = await client.get( + f"{API_VTAG}/function_job_collections/{mock_registered_function_job_collection.uid}" + ) + assert response.status_code == status.HTTP_200_OK + assert ( + RegisteredFunctionJobCollection.model_validate(response.json()) + == mock_registered_function_job_collection + ) + + +async def test_list_function_job_collections( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], +) -> None: + mock_registered_function_job_collection = ( + RegisteredFunctionJobCollection.model_validate( + { + "uid": str(uuid4()), + "title": "Test Collection", + "description": "A test function job collection", + "job_ids": [str(uuid4()), str(uuid4())], + } + ) + ) + + mock_handler_in_functions_rpc_interface( + "list_function_job_collections", + ( + [mock_registered_function_job_collection for _ in range(5)], + PageMetaInfoLimitOffset(total=5, count=5, limit=10, offset=0), + ), + ) + + response = await client.get(f"{API_VTAG}/function_job_collections") + assert response.status_code == status.HTTP_200_OK + data = response.json()["items"] + assert len(data) == 5 + assert ( + RegisteredFunctionJobCollection.model_validate(data[0]) + == mock_registered_function_job_collection + ) + + +async def test_delete_function_job_collection( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job_collection: RegisteredFunctionJobCollection, +) -> None: + + mock_handler_in_functions_rpc_interface("delete_function_job_collection", None) + + # Now, delete the function job collection + response = await client.delete( + f"{API_VTAG}/function_job_collections/{mock_registered_function_job_collection.uid}" + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data is None + + +async def test_get_function_job_collection_jobs( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job_collection: RegisteredFunctionJobCollection, +) -> None: + + mock_handler_in_functions_rpc_interface( + "get_function_job_collection", mock_registered_function_job_collection + ) + + response = await client.get( + f"{API_VTAG}/function_job_collections/{mock_registered_function_job_collection.uid}/function_jobs" + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert len(data) == len(mock_registered_function_job_collection.job_ids) + + +async def test_list_function_job_collections_with_function_filter( + client: AsyncClient, + mock_handler_in_functions_rpc_interface: Callable[[str, Any], None], + mock_registered_function_job_collection: RegisteredFunctionJobCollection, + mock_registered_function: RegisteredProjectFunction, +) -> None: + + mock_handler_in_functions_rpc_interface( + "list_function_job_collections", + ( + [mock_registered_function_job_collection for _ in range(2)], + PageMetaInfoLimitOffset(total=5, count=2, limit=2, offset=1), + ), + ) + + response = await client.get( + f"{API_VTAG}/function_job_collections?function_id={mock_registered_function.uid}&limit=2&offset=1" + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + + assert data["total"] == 5 + assert data["limit"] == 2 + assert data["offset"] == 1 + assert len(data["items"]) == 2 + assert ( + RegisteredFunctionJobCollection.model_validate(data["items"][0]) + == mock_registered_function_job_collection + ) diff --git a/services/api-server/tests/unit/api_solvers/conftest.py b/services/api-server/tests/unit/api_solvers/conftest.py index 13e690e6cf3..7052c56a0d1 100644 --- a/services/api-server/tests/unit/api_solvers/conftest.py +++ b/services/api-server/tests/unit/api_solvers/conftest.py @@ -3,174 +3,62 @@ # pylint: disable=unused-variable -import json -from copy import deepcopy -from pathlib import Path -from typing import Any, Iterator +from collections.abc import AsyncIterable, Callable +from datetime import datetime, timedelta +from typing import Final +import httpx import pytest -import respx -import yaml from fastapi import FastAPI, status -from pytest_simcore.helpers import faker_catalog -from pytest_simcore.simcore_webserver_projects_rest_api import GET_PROJECT +from fastapi.encoders import jsonable_encoder +from models_library.projects_state import RunningState from respx import MockRouter from simcore_service_api_server.core.settings import ApplicationSettings +from simcore_service_api_server.services_http.director_v2 import ComputationTaskGet @pytest.fixture -def catalog_service_openapi_specs(osparc_simcore_services_dir: Path) -> dict[str, Any]: - openapi_path = osparc_simcore_services_dir / "catalog" / "openapi.json" - openapi_specs = json.loads(openapi_path.read_text()) - return openapi_specs +def solver_key() -> str: + return "simcore/services/comp/itis/sleeper" @pytest.fixture -def directorv2_service_openapi_specs( - osparc_simcore_services_dir: Path, -) -> dict[str, Any]: - return json.loads( - (osparc_simcore_services_dir / "director-v2" / "openapi.json").read_text() - ) +def solver_version() -> str: + return "2.0.0" @pytest.fixture -def webserver_service_openapi_specs( - osparc_simcore_services_dir: Path, -) -> dict[str, Any]: - return yaml.safe_load( - ( - osparc_simcore_services_dir - / "web/server/src/simcore_service_webserver/api/v0/openapi.yaml" - ).read_text() - ) - - -@pytest.fixture -def mocked_webserver_service_api( - app: FastAPI, webserver_service_openapi_specs: dict[str, Any] -) -> Iterator[MockRouter]: +def mocked_webserver_rest_api( + app: FastAPI, + mocked_webserver_rest_api_base: MockRouter, + patch_webserver_long_running_project_tasks: Callable[[MockRouter], MockRouter], +) -> MockRouter: settings: ApplicationSettings = app.state.settings assert settings.API_SERVER_WEBSERVER - openapi = deepcopy(webserver_service_openapi_specs) + patch_webserver_long_running_project_tasks(mocked_webserver_rest_api_base) - # pylint: disable=not-context-manager - with respx.mock( - base_url=settings.API_SERVER_WEBSERVER.base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - - # include /v0 - assert settings.API_SERVER_WEBSERVER.base_url.endswith("/v0") - - # healthcheck_readiness_probe, healthcheck_liveness_probe - response_body = ( - { - "data": openapi["paths"]["/"]["get"]["responses"]["200"]["content"][ - "application/json" - ]["schema"]["properties"]["data"]["example"] - }, - ) - respx_mock.get(path="/v0/", name="healthcheck_readiness_probe").respond( - status.HTTP_200_OK, json=response_body - ) - respx_mock.get(path="/v0/health", name="healthcheck_liveness_probe").respond( - status.HTTP_200_OK, json=response_body - ) - - # get_task_status - respx_mock.get( - path__regex=r"/tasks/(?P[\w/%]+)", - name="get_task_status", - ).respond( - status.HTTP_200_OK, - json={ - "data": { - "task_progress": 1, - "done": True, - "started": "2018-07-01T11:13:43Z", - } - }, - ) - - # get_task_result - respx_mock.get( - path__regex=r"/tasks/(?P[\w/%]+)/result", - name="get_task_result", - ).respond( - status.HTTP_200_OK, - json=GET_PROJECT.response_body, - ) - - # create_projects - task_id = "abc" - # http://webserver:8080/v0/projects?hidden=true - respx_mock.post(path__regex="/projects$", name="create_projects").respond( - status.HTTP_202_ACCEPTED, - json={ - "data": { - "task_id": "123", - "status_hef": f"{settings.API_SERVER_WEBSERVER.base_url}/task/{task_id}", - "result_href": f"{settings.API_SERVER_WEBSERVER.base_url}/task/{task_id}/result", - } - }, - ) - yield respx_mock + return mocked_webserver_rest_api_base @pytest.fixture -def mocked_catalog_service_api( - app: FastAPI, catalog_service_openapi_specs: dict[str, Any] -) -> Iterator[MockRouter]: - settings: ApplicationSettings = app.state.settings - assert settings.API_SERVER_CATALOG - - openapi = deepcopy(catalog_service_openapi_specs) - schemas = openapi["components"]["schemas"] - - # pylint: disable=not-context-manager - with respx.mock( - base_url=settings.API_SERVER_CATALOG.base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - - respx_mock.get("/v0/meta").respond(200, json=schemas["Meta"]["example"]) - - # ---- - respx_mock.get( - "/v0/services?user_id=1&details=false", name="list_services" - ).respond( - 200, - json=[ - # one solver - faker_catalog.create_service_out( - key="simcore/services/comp/Foo", name="Foo" - ), - # two version of the same solver - faker_catalog.create_service_out(version="0.0.1"), - faker_catalog.create_service_out(version="1.0.1"), - # not a solver - faker_catalog.create_service_out(type="dynamic"), - ], +async def mocked_directorv2_rest_api( + mocked_directorv2_rest_api_base, +) -> AsyncIterable[MockRouter]: + stop_time: Final[datetime] = datetime.now() + timedelta(seconds=5) + + def _get_computation(request: httpx.Request, **kwargs) -> httpx.Response: + task = ComputationTaskGet.model_validate( + ComputationTaskGet.model_json_schema()["examples"][0] ) - - # ----- - # NOTE: we could use https://python-jsonschema.readthedocs.io/en/stable/ - # - - respx_mock.get( - # NOTE: regex does not work even if tested https://regex101.com/r/drVAGr/1 - # path__regex=r"/v0/services/(?P[\w/%]+)/(?P[\d\.]+)/ports\?user_id=(?P\d+)", - path__startswith="/v0/services/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.1.4/ports", - name="list_service_ports", - ).respond( - 200, - json=[ - schemas["ServicePortGet"]["example"], - ], + if datetime.now() > stop_time: + task.state = RunningState.SUCCESS + task.stopped = datetime.now() + return httpx.Response( + status_code=status.HTTP_200_OK, json=jsonable_encoder(task) ) - yield respx_mock + mocked_directorv2_rest_api_base.get( + path__regex=r"/v2/computations/(?P[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" + ).mock(side_effect=_get_computation) + return mocked_directorv2_rest_api_base diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers.py index d35c2c30d34..300bfaab963 100644 --- a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers.py +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers.py @@ -1,89 +1,236 @@ +# pylint: disable=protected-access # pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable import httpx -import pytest -import simcore_service_api_server.api.routes.solvers -from pytest_mock import MockFixture -from respx import MockRouter -from simcore_service_api_server.models.schemas.solvers import Solver -from starlette import status +from fastapi import status +from pydantic import TypeAdapter +from pytest_mock import MockType +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.pagination import OnePage +from simcore_service_api_server.models.schemas.solvers import Solver, SolverPort -@pytest.mark.skip(reason="Still under development. Currently using fake implementation") -async def test_list_solvers( +async def test_list_all_solvers( + mocked_catalog_rpc_api: dict[str, MockType], client: httpx.AsyncClient, - mocked_catalog_service_api: MockRouter, - mocker: MockFixture, + auth: httpx.BasicAuth, +): + response = await client.get(f"/{API_VTAG}/solvers", auth=auth) + assert response.status_code == status.HTTP_200_OK + + +async def test_list_all_solvers_paginated( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + response = await client.get(f"/{API_VTAG}/solvers/page", auth=auth) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["items"]) == response.json()["total"] + + +async def test_list_all_solvers_paginated_with_filters( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, ): - warn = mocker.patch.object( - simcore_service_api_server.api.routes.solvers.logger, "warning" + # Test filter by solver_id + response = await client.get( + f"/{API_VTAG}/solvers/page?solver_id=simcore/services/comp/itis/*", + auth=auth, ) + assert response.status_code == status.HTTP_200_OK + solvers = response.json()["items"] + assert all("simcore/services/comp/itis/" in solver["id"] for solver in solvers) - # list solvers latest releases - resp = await client.get("/v0/solvers") - assert resp.status_code == status.HTTP_200_OK + # Test filter by version_display + response = await client.get( + f"/{API_VTAG}/solvers/page?version_display=*Xtreme*", + auth=auth, + ) + assert response.status_code == status.HTTP_200_OK + solvers = response.json()["items"] + assert all( + solver["version_display"] and "Xtreme" in solver["version_display"] + for solver in solvers + ) - # No warnings for ValidationError with the fixture - assert ( - not warn.called - ), f"No warnings expected in this fixture, got {str(warn.call_args)}" + # Test combination of both filters + response = await client.get( + f"/{API_VTAG}/solvers/page?solver_id=simcore/services/comp/itis/*&version_display=*Xtreme*", + auth=auth, + ) + assert response.status_code == status.HTTP_200_OK + solvers = response.json()["items"] + assert all( + "simcore/services/comp/itis/" in solver["id"] + and solver["version_display"] + and "Xtreme" in solver["version_display"] + for solver in solvers + ) - data = resp.json() - assert len(data) == 2 - for item in data: - solver = Solver(**item) - print(solver.json(indent=1, exclude_unset=True)) +async def test_list_all_solvers_releases( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + response = await client.get(f"/{API_VTAG}/solvers/releases", auth=auth) + assert response.status_code == status.HTTP_200_OK + - # use link to get the same solver - assert solver.url.host == "api.testserver.io" # cli.base_url +async def test_list_all_solvers_releases_paginated( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + solver_key = "simcore/services/comp/itis/sleeper" + response = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/page", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["items"]) == response.json()["total"] - # get_solver_latest_version_by_name - resp0 = await client.get(solver.url.path) - assert resp0.status_code == status.HTTP_501_NOT_IMPLEMENTED - # assert f"GET {solver.name}:{solver.version}" in resp0.json()["errors"][0] - assert f"GET solver {solver.id}" in resp0.json()["errors"][0] - # assert Solver(**resp0.json()) == solver - # get_solver - resp1 = await client.get(f"/v0/solvers/{solver.id}") - assert resp1.status_code == status.HTTP_501_NOT_IMPLEMENTED - assert f"GET solver {solver.id}" in resp1.json()["errors"][0] - # assert Solver(**resp1.json()) == solver +async def test_list_solver_releases( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + solver_key = "simcore/services/comp/itis/sleeper" + response = await client.get(f"/{API_VTAG}/solvers/{solver_key}/releases", auth=auth) + assert response.status_code == status.HTTP_200_OK - # get_solver_latest_version_by_name - resp2 = await client.get(f"/v0/solvers/{solver.id}/latest") - assert resp2.status_code == status.HTTP_501_NOT_IMPLEMENTED - assert f"GET latest {solver.id}" in resp2.json()["errors"][0] +async def test_get_solver_release( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + solver_key = "simcore/services/comp/itis/sleeper" + solver_version = "2.2.1" + response = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}", auth=auth + ) + assert response.status_code == status.HTTP_200_OK - # assert Solver(**resp2.json()) == Solver(**resp3.json()) + solver = Solver.model_validate(response.json()) + assert solver.version_display == "2 Xtreme" async def test_list_solver_ports( - mocked_catalog_service_api: MockRouter, + mocked_catalog_rpc_api: dict[str, MockType], client: httpx.AsyncClient, auth: httpx.BasicAuth, ): resp = await client.get( - "/v0/solvers/simcore/services/comp/itis/sleeper/releases/2.1.4/ports", + f"/{API_VTAG}/solvers/simcore/services/comp/itis/sleeper/releases/2.1.4/ports", auth=auth, ) assert resp.status_code == status.HTTP_200_OK - assert resp.json() == [ - { - "key": "input_1", - "kind": "input", - "content_schema": { - "title": "Sleep interval", - "type": "integer", - "x_unit": "second", - "minimum": 0, - "maximum": 5, + assert resp.json() == { + "total": 2, + "items": [ + { + "key": "input_1", + "kind": "input", + "content_schema": { + "title": "Sleep interval", + "type": "integer", + "x_unit": "second", + "minimum": 0, + "maximum": 5, + }, + }, + { + "key": "output_1", + "kind": "output", + "content_schema": { + "description": "Integer is generated in range [1-9]", + "title": "File containing one random integer", + "type": "string", + }, }, - } - ] + ], + } + + +async def test_list_solver_ports_again( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + solver_key = "simcore/services/comp/itis/sleeper" + solver_version = "3.2.1" + response = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/ports", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + assert TypeAdapter(OnePage[SolverPort]).validate_python(response.json()) + + +async def test_solvers_page_pagination_links( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + # Use a small limit to ensure pagination is needed + limit = 2 + response = await client.get(f"/{API_VTAG}/solvers/page?limit={limit}", auth=auth) + + assert response.status_code == status.HTTP_200_OK + + response_data = response.json() + assert "links" in response_data, "Response should contain links section" + + links = response_data["links"] + assert "next" in links, "Pagination should include 'next' link" + assert "prev" in links, "Pagination should include 'prev' link" + assert "first" in links, "Pagination should include 'first' link" + assert "last" in links, "Pagination should include 'last' link" + assert "self" in links, "Pagination should include 'self' link" + + # Verify the self link contains the correct limit parameter + assert ( + f"limit={limit}" in links["self"] + ), "Self link should reflect the requested limit" + + +async def test_solvers_page_pagination_last_page( + mocked_catalog_rpc_api: dict[str, MockType], + client: httpx.AsyncClient, + auth: httpx.BasicAuth, +): + # Get total count first + response = await client.get(f"/{API_VTAG}/solvers/page", auth=auth) + assert response.status_code == status.HTTP_200_OK + total_items = response.json()["total"] + + assert ( + total_items > 1 + ), "Total items in MOCK examples should be greater than 1 for pagination test since we need 'prev', 'self' and 'prev' links" + last_item = total_items - 1 + page_size = 1 + + # Request the last page by using the total count as offset + response = await client.get( + f"/{API_VTAG}/solvers/page?limit={page_size}&offset={last_item}", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + + response_data = response.json() + assert "links" in response_data, "Response should contain links section" + + links = response_data["links"] + assert links["next"] is None, "Next link should be None for the last page (size=1)" + assert ( + links["prev"] is not None + ), "Prev link should be present for the last page (size=1)" + assert ( + links["last"] == links["self"] + ), "Last link should be the same as self link for the last page" diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py index 1e9e7cedee2..b28e40de702 100644 --- a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs.py @@ -1,41 +1,36 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable +# pylint: disable=too-many-arguments from pathlib import Path from pprint import pprint -from typing import Any, Iterator +from typing import Any from zipfile import ZipFile +import arrow import boto3 import httpx import pytest -import respx from faker import Faker from fastapi import FastAPI -from models_library.services import ServiceDockerData -from pydantic import AnyUrl, HttpUrl, parse_obj_as +from models_library.services import ServiceMetaDataPublished +from models_library.utils.fastapi_encoders import jsonable_encoder +from pydantic import AnyUrl, HttpUrl, TypeAdapter +from pytest_mock import MockType from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG from simcore_service_api_server.core.settings import ApplicationSettings from simcore_service_api_server.models.schemas.jobs import Job, JobInputs, JobStatus +from simcore_service_api_server.services_http.director_v2 import ComputationTaskGet from starlette import status @pytest.fixture -def bucket_name(): +def bucket_name() -> str: return "test-bucket" -@pytest.fixture -def project_id(faker: Faker) -> str: - return faker.uuid4() - - -@pytest.fixture -def node_id(faker: Faker) -> str: - return faker.uuid4() - - @pytest.fixture def log_zip_path(faker: Faker, tmp_path: Path, project_id: str, node_id: str) -> Path: # a log file @@ -56,14 +51,14 @@ def presigned_download_link( node_id: str, bucket_name: str, mocked_s3_server_url: HttpUrl, -) -> Iterator[AnyUrl]: - +) -> AnyUrl: s3_client = boto3.client( "s3", - endpoint_url=mocked_s3_server_url, + endpoint_url=f"{mocked_s3_server_url}", # Some fake auth, otherwise botocore.exceptions.NoCredentialsError: Unable to locate credentials - aws_secret_access_key="xxx", + aws_secret_access_key="xxx", # noqa: S106 aws_access_key_id="xxx", + region_name="us-east-1", # don't remove this ) s3_client.create_bucket(Bucket=bucket_name) @@ -83,64 +78,60 @@ def presigned_download_link( print("generated link", presigned_url) # SEE also https://gist.github.com/amarjandu/77a7d8e33623bae1e4e5ba40dc043cb9 - yield parse_obj_as(AnyUrl, presigned_url) + return TypeAdapter(AnyUrl).validate_python(presigned_url) @pytest.fixture def mocked_directorv2_service_api( app: FastAPI, presigned_download_link: AnyUrl, + mocked_directorv2_rest_api_base: MockRouter, directorv2_service_openapi_specs: dict[str, Any], -): +) -> MockRouter: settings: ApplicationSettings = app.state.settings assert settings.API_SERVER_DIRECTOR_V2 oas = directorv2_service_openapi_specs # pylint: disable=not-context-manager - with respx.mock( - base_url=settings.API_SERVER_DIRECTOR_V2.base_url, - assert_all_called=False, - assert_all_mocked=False, - ) as respx_mock: - - # check that what we emulate, actually still exists - path = "/v2/computations/{project_id}/tasks/-/logfile" - assert path in oas["paths"] - assert "get" in oas["paths"][path] - - response = oas["paths"][path]["get"]["responses"]["200"] - - assert response["content"]["application/json"]["schema"]["type"] == "array" - assert ( - response["content"]["application/json"]["schema"]["items"]["$ref"] - == "#/components/schemas/TaskLogFileGet" - ) - assert {"task_id", "download_link"} == set( - oas["components"]["schemas"]["TaskLogFileGet"]["properties"].keys() - ) - - respx_mock.get( - path__regex=r"/computations/(?P[\w-]+)/tasks/-/logfile", - name="get_computation_logs", - ).respond( - status.HTTP_200_OK, - json=[ - { - "task_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", - "download_link": presigned_download_link, - } - ], - ) + respx_mock = mocked_directorv2_rest_api_base + # check that what we emulate, actually still exists + path = "/v2/computations/{project_id}/tasks/-/logfile" + assert path in oas["paths"] + assert "get" in oas["paths"][path] + + response = oas["paths"][path]["get"]["responses"]["200"] + + assert response["content"]["application/json"]["schema"]["type"] == "array" + assert ( + response["content"]["application/json"]["schema"]["items"]["$ref"] + == "#/components/schemas/TaskLogFileGet" + ) + assert {"task_id", "download_link"} == set( + oas["components"]["schemas"]["TaskLogFileGet"]["properties"].keys() + ) + + respx_mock.get( + path__regex=r"/computations/(?P[\w-]+)/tasks/-/logfile", + name="get_computation_logs", # = operation_id + ).respond( + status.HTTP_200_OK, + json=[ + { + "task_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "download_link": f"{presigned_download_link}", + } + ], + ) - yield respx_mock + return respx_mock def test_download_presigned_link( presigned_download_link: AnyUrl, tmp_path: Path, project_id: str, node_id: str ): """Cheks that the generation of presigned_download_link works as expected""" - r = httpx.get(presigned_download_link) - pprint(dict(r.headers)) + r = httpx.get(f"{presigned_download_link}") + ## pprint(dict(r.headers)) # r.headers looks like: # { # 'access-control-allow-origin': '*', @@ -182,13 +173,13 @@ async def test_solver_logs( solver_key: str, solver_version: str, ): - resp = await client.get("/v0/meta") + resp = await client.get(f"/{API_VTAG}/meta") assert resp.status_code == 200 job_id = project_id resp = await client.get( - f"/v0/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/outputs/logfile", + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/outputs/logfile", auth=auth, follow_redirects=True, ) @@ -199,33 +190,23 @@ async def test_solver_logs( # was a re-direction resp0 = resp.history[0] assert resp0.status_code == status.HTTP_307_TEMPORARY_REDIRECT - assert resp0.headers["location"] == presigned_download_link - - assert resp.url == presigned_download_link - pprint(dict(resp.headers)) - + assert resp0.headers["location"] == f"{presigned_download_link}" -@pytest.fixture -def solver_key() -> str: - return "simcore/services/comp/itis/isolve" - - -@pytest.fixture -def solver_version() -> str: - return "1.2.3" + assert f"{resp.url}" == f"{presigned_download_link}" + pprint(dict(resp.headers)) # noqa: T203 @pytest.mark.acceptance_test( "New feature https://github.com/ITISFoundation/osparc-simcore/issues/3940" ) -@pytest.mark.xfail # TODO: will fix in next PR async def test_run_solver_job( client: httpx.AsyncClient, directorv2_service_openapi_specs: dict[str, Any], catalog_service_openapi_specs: dict[str, Any], - mocked_catalog_service_api: MockRouter, + mocked_catalog_rpc_api: dict[str, MockType], mocked_directorv2_service_api: MockRouter, - mocked_webserver_service_api: MockRouter, + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], auth: httpx.BasicAuth, project_id: str, solver_key: str, @@ -250,38 +231,59 @@ async def test_run_solver_job( "result", "pipeline_details", "iteration", - "cluster_id", "url", "stop_url", + "submitted", + "started", + "stopped", } == set(oas["components"]["schemas"]["ComputationGet"]["properties"].keys()) # CREATE and optionally start - mocked_directorv2_service_api.post( - path__regex=r"/computations", - name="create_computation_v2_computations_post", + mocked_directorv2_service_api.get( + path__regex=r"^/v2/computations/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-(3|4|5)[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", + name="inspect_computation", ).respond( status.HTTP_201_CREATED, - json={ - "id": project_id, - "state": "UNKNOWN", - "result": "string", - "pipeline_details": { - "adjacency_list": { - "additionalProp1": ["3fa85f64-5717-4562-b3fc-2c963f66afa6"], - }, - "node_states": { - "additionalProp1": { - "modified": True, - "dependencies": ["3fa85f64-5717-4562-b3fc-2c963f66afa6"], - "currentStatus": "NOT_STARTED", + json=jsonable_encoder( + ComputationTaskGet.model_validate( + { + "id": project_id, + "state": "UNKNOWN", + "result": "string", + "pipeline_details": { + "adjacency_list": { + "3fa85f64-5717-4562-b3fc-2c963f66afa6": [ + "3fa85f64-5717-4562-b3fc-2c963f66afa6" + ], + }, + "node_states": { + "3fa85f64-5717-4562-b3fc-2c963f66afa6": { + "modified": True, + "dependencies": [ + "3fa85f64-5717-4562-b3fc-2c963f66afa6" + ], + "currentStatus": "NOT_STARTED", + }, + }, + "progress": 0.0, }, - }, - }, - "iteration": 1, - "cluster_id": 0, - "url": "string", - "stop_url": "string", - }, + "iteration": 1, + "url": "http://test.com", + "stop_url": "http://test.com", + "started": None, + "stopped": None, + "submitted": arrow.utcnow().datetime.isoformat(), + } + ) + ), + ) + + mocked_webserver_rest_api.post( + path__regex=r"^/v0/computations/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-(3|4|5)[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}:start$", + name="webserver_start_job", + ).respond( + status_code=status.HTTP_201_CREATED, + json={"data": {"pipeline_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6"}}, ) # catalog_client.get_solver @@ -305,61 +307,49 @@ async def test_run_solver_job( "contact", "inputs", "outputs", + "classifiers", + "owner", } == set(oas["components"]["schemas"]["ServiceGet"]["required"]) example = next( e - for e in ServiceDockerData.Config.schema_extra["examples"][::-1] - if "boot" in e["description"] - ) - - mocked_catalog_service_api.get( - # path__regex=r"/services/(?P[\w-]+)/(?P[0-9\.]+)", - path="/v0/services/simcore%2Fservices%2Fcomp%2Fitis%2Fisolve/1.2.3", - name="get_service_v0_services__service_key___service_version__get", - ).respond( - status.HTTP_200_OK, - json=example - | { - "name": solver_key.split("/")[-1].capitalize(), - "description": solver_key.replace("/", " "), - "key": solver_key, - "version": solver_version, - "type": "computational", - }, + for e in ServiceMetaDataPublished.model_json_schema()["examples"] + if "boot-options" in e ) - # --------- + # --------------------------------------------------------------------------------------------------------- - resp = await client.get("/v0/meta") + resp = await client.get(f"/{API_VTAG}/meta") assert resp.status_code == 200 # Create Job resp = await client.post( - f"/v0/solvers/{solver_key}/releases/{solver_version}/jobs", + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs", auth=auth, - json=JobInputs(values={"x": 3.14, "n": 42}).dict(), + json=JobInputs( + values={ + "x": 3.14, + "n": 42, + # Tests https://github.com/ITISFoundation/osparc-issues/issues/948 + "a_list": [1, 2, 3], + } + ).model_dump(), ) - assert resp.status_code == status.HTTP_200_OK - assert mocked_directorv2_service_api[ - "create_computation_v2_computations_post" - ].called + assert resp.status_code == status.HTTP_201_CREATED - assert mocked_webserver_service_api["create_projects"].called - assert mocked_webserver_service_api["get_task_status"].called - assert mocked_webserver_service_api["get_task_result"].called + assert mocked_webserver_rest_api["create_projects"].called + assert mocked_webserver_rest_api["get_task_status"].called + assert mocked_webserver_rest_api["get_task_result"].called - job = Job.parse_obj(resp.json()) + job = Job.model_validate(resp.json()) # Start Job resp = await client.post( - f"/v0/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}", + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}:start", auth=auth, - params={"cluster_id", 1}, ) - assert resp.status_code == status.HTTP_200_OK - assert mocked_directorv2_service_api[ - "create_computation_v2_computations_post" - ].called + assert resp.status_code == status.HTTP_202_ACCEPTED + assert mocked_directorv2_service_api["inspect_computation"].called - job_status = JobStatus.parse_obj(resp.json()) + job_status = JobStatus.model_validate(resp.json()) + assert job_status.progress == 0.0 diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_delete.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_delete.py new file mode 100644 index 00000000000..899a18553dc --- /dev/null +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_delete.py @@ -0,0 +1,229 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pathlib import Path +from typing import TypedDict +from uuid import UUID + +import httpx +import jinja2 +import pytest +from faker import Faker +from models_library.basic_regex import UUID_RE_BASE +from pydantic import TypeAdapter +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from respx import MockRouter +from servicelib.common_headers import ( + X_SIMCORE_PARENT_NODE_ID, + X_SIMCORE_PARENT_PROJECT_UUID, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import Job, JobInputs +from starlette import status + +_faker = Faker() + + +class MockedBackendApiDict(TypedDict): + webserver: MockRouter | None + + +@pytest.fixture +def mocked_backend_services_apis_for_delete_non_existing_project( + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + project_tests_dir: Path, +) -> MockedBackendApiDict: + mock_name = "delete_project_not_found.json" + environment = jinja2.Environment( + loader=jinja2.FileSystemLoader(project_tests_dir / "mocks"), autoescape=True + ) + template = environment.get_template(mock_name) + + def _response(request: httpx.Request, project_id: str): + capture = HttpApiCallCaptureModel.model_validate_json( + template.render(project_id=project_id) + ) + return httpx.Response( + status_code=capture.status_code, json=capture.response_body + ) + + mocked_webserver_rest_api.delete( + path__regex=rf"/projects/(?P{UUID_RE_BASE})$", + name="delete_project", + ).mock(side_effect=_response) + + return MockedBackendApiDict(webserver=mocked_webserver_rest_api) + + +@pytest.mark.acceptance_test( + "For https://github.com/ITISFoundation/osparc-simcore/issues/4111" +) +async def test_delete_non_existing_solver_job( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + faker: Faker, + mocked_backend_services_apis_for_delete_non_existing_project: MockedBackendApiDict, +): + # Cannot delete if it does not exists + resp = await client.delete( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{faker.uuid4()}", + auth=auth, + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND + + mock_webserver_router = ( + mocked_backend_services_apis_for_delete_non_existing_project["webserver"] + ) + assert mock_webserver_router + assert mock_webserver_router["delete_project"].called + + +@pytest.fixture +def mocked_backend_services_apis_for_create_and_delete_solver_job( + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + project_tests_dir: Path, +) -> MockedBackendApiDict: + mock_name = "on_create_job.json" + + # fixture + captures = TypeAdapter(list[HttpApiCallCaptureModel]).validate_json( + Path(project_tests_dir / "mocks" / mock_name).read_text() + ) + + # capture = captures[0] + # assert capture.host == "catalog" + # assert capture.method == "GET" + # mocked_catalog_rest_api.request( + # method=capture.method, path=capture.path, name="get_service" # GET service + # ).respond(status_code=capture.status_code, json=capture.response_body) + + capture = captures[-1] + assert capture.host == "webserver" + assert capture.method == "DELETE" + + mocked_webserver_rest_api.delete( + path__regex=rf"/projects/(?P{UUID_RE_BASE})$", + name="delete_project", + ).respond(status_code=capture.status_code, json=capture.response_body) + + return MockedBackendApiDict( + webserver=mocked_webserver_rest_api, + ) + + +@pytest.mark.acceptance_test( + "For https://github.com/ITISFoundation/osparc-simcore/issues/4111" +) +async def test_create_and_delete_solver_job( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + mocked_catalog_rpc_api: dict[str, MockType], + mocked_backend_services_apis_for_create_and_delete_solver_job: MockedBackendApiDict, +): + # create Job + resp = await client.post( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs", + auth=auth, + json=JobInputs( + values={ + "x": 3.14, + "n": 42, + } + ).model_dump(), + ) + assert resp.status_code == status.HTTP_201_CREATED + job = Job.model_validate(resp.json()) + + # Delete Job after creation + resp = await client.delete( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}", + auth=auth, + ) + assert resp.status_code == status.HTTP_204_NO_CONTENT + + mock_webserver_router = ( + mocked_backend_services_apis_for_create_and_delete_solver_job["webserver"] + ) + assert mock_webserver_router + assert mock_webserver_router["delete_project"].called + + get_service = mocked_catalog_rpc_api["get_service"] + assert get_service + assert get_service.called + + # NOTE: ideas for further tests + # Run job and try to delete while running + # Run a job and delete when finished + + +@pytest.mark.parametrize( + "parent_node_id, parent_project_id", + [(_faker.uuid4(), _faker.uuid4()), (None, None)], +) +@pytest.mark.parametrize("hidden", [True, False]) +async def test_create_job( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + mocked_backend_services_apis_for_create_and_delete_solver_job: MockedBackendApiDict, + mocked_catalog_rpc_api: dict[str, MockType], + hidden: bool, + parent_project_id: UUID | None, + parent_node_id: UUID | None, +): + + mock_webserver_router = ( + mocked_backend_services_apis_for_create_and_delete_solver_job["webserver"] + ) + assert mock_webserver_router is not None + callback = mock_webserver_router["create_projects"].side_effect + assert callback is not None + + def create_project_side_effect(request: httpx.Request): + # check `hidden` bool + query = dict(elm.split("=") for elm in request.url.query.decode().split("&")) + _hidden = query.get("hidden") + assert _hidden == ("true" if hidden else "false") + + # check parent project and node id + if parent_project_id is not None: + assert f"{parent_project_id}" == dict(request.headers).get( + X_SIMCORE_PARENT_PROJECT_UUID.lower() + ) + if parent_node_id is not None: + assert f"{parent_node_id}" == dict(request.headers).get( + X_SIMCORE_PARENT_NODE_ID.lower() + ) + return callback(request) + + mock_webserver_router["create_projects"].side_effect = create_project_side_effect + + # create Job + header_dict = {} + if parent_project_id is not None: + header_dict[X_SIMCORE_PARENT_PROJECT_UUID] = f"{parent_project_id}" + if parent_node_id is not None: + header_dict[X_SIMCORE_PARENT_NODE_ID] = f"{parent_node_id}" + resp = await client.post( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs", + auth=auth, + params={"hidden": f"{hidden}"}, + headers=header_dict, + json=JobInputs( + values={ + "x": 3.14, + "n": 42, + } + ).model_dump(), + ) + assert resp.status_code == status.HTTP_201_CREATED + job = Job.model_validate(resp.json()) diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_logs.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_logs.py new file mode 100644 index 00000000000..44ad7a35a42 --- /dev/null +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_logs.py @@ -0,0 +1,171 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import logging +from collections.abc import Iterable +from pprint import pprint +from typing import Final + +import httpx +import pytest +from attr import dataclass +from faker import Faker +from fastapi import FastAPI, status +from models_library.api_schemas_webserver.projects import ProjectGet +from pydantic import ValidationError +from pytest_mock import MockFixture +from pytest_simcore.simcore_webserver_projects_rest_api import GET_PROJECT +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.api.dependencies.rabbitmq import get_log_distributor +from simcore_service_api_server.models.schemas.errors import ErrorGet +from simcore_service_api_server.models.schemas.jobs import JobID, JobLog + +_logger = logging.getLogger(__name__) +_faker = Faker() + + +@pytest.fixture +async def fake_log_distributor(app: FastAPI, mocker: MockFixture): + @dataclass + class FakeLogDistributor: + _job_id: JobID | None = None + _queue_name: Final[str] = "my_queue" + _n_logs: int = 0 + _produced_logs: list[str] = [] + deregister_is_called: bool = False + + async def register(self, job_id: JobID, callback: asyncio.Queue[JobLog]): + self._job_id = job_id + + async def produce_log(): + for _ in range(5): + txt = _faker.text() + self._produced_logs.append(txt) + msg = JobLog( + job_id=job_id, + node_id=_faker.uuid4(), + log_level=logging.INFO, + messages=[txt], + ) + await callback.put(msg) + await asyncio.sleep(0.1) + + asyncio.create_task(produce_log()) + return self._queue_name + + async def deregister(self, job_id): + assert self._job_id == job_id + self.deregister_is_called = True + + fake_log_distributor = FakeLogDistributor() + app.dependency_overrides[get_log_distributor] = lambda: fake_log_distributor + yield fake_log_distributor + assert fake_log_distributor.deregister_is_called + + +@pytest.fixture +def fake_project_for_streaming( + app: FastAPI, mocker: MockFixture, faker: Faker +) -> Iterable[ProjectGet]: + + assert isinstance(response_body := GET_PROJECT.response_body, dict) + assert (data := response_body.get("data")) is not None + fake_project = ProjectGet.model_validate(data) + fake_project.workbench = {faker.uuid4(): faker.uuid4()} + mocker.patch( + "simcore_service_api_server.api.dependencies.webserver_http.AuthSession.get_project", + return_value=fake_project, + ) + + mocker.patch( + "simcore_service_api_server.api.routes.solvers_jobs_read.raise_if_job_not_associated_with_solver" + ) + return fake_project + + +@pytest.mark.parametrize("disconnect", [True, False]) +async def test_log_streaming( + app: FastAPI, + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + fake_log_distributor, + fake_project_for_streaming: ProjectGet, + mocked_directorv2_rest_api: MockRouter, + disconnect: bool, +): + + job_id: JobID = fake_project_for_streaming.uuid + + collected_messages: list[str] = [] + async with client.stream( + "GET", + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/logstream", + auth=auth, + ) as response: + response.raise_for_status() + if not disconnect: + async for line in response.aiter_lines(): + job_log = JobLog.model_validate_json(line) + pprint(job_log.model_dump()) + collected_messages += job_log.messages + + assert fake_log_distributor.deregister_is_called + + assert ( + collected_messages + == fake_log_distributor._produced_logs[: len(collected_messages)] + ) + + +@pytest.fixture +async def mock_job_not_found( + mocked_directorv2_rest_api_base: MockRouter, +) -> MockRouter: + def _get_computation(request: httpx.Request, **kwargs) -> httpx.Response: + return httpx.Response(status_code=status.HTTP_404_NOT_FOUND) + + mocked_directorv2_rest_api_base.get( + path__regex=r"/v2/computations/(?P[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" + ).mock(side_effect=_get_computation) + return mocked_directorv2_rest_api_base + + +async def test_logstreaming_job_not_found_exception( + app: FastAPI, + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + fake_log_distributor, + fake_project_for_streaming: ProjectGet, + mock_job_not_found: MockRouter, +): + + job_id: JobID = fake_project_for_streaming.uuid + _received_error = False + + async with client.stream( + "GET", + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/logstream", + auth=auth, + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + try: + job_log = JobLog.model_validate_json(line) + pprint(job_log.model_dump()) + except ValidationError: + error = ErrorGet.model_validate_json(line) + _received_error = True + print(error.model_dump()) + + assert fake_log_distributor.deregister_is_called + assert _received_error diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_metadata.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_metadata.py new file mode 100644 index 00000000000..5d9147e7120 --- /dev/null +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_metadata.py @@ -0,0 +1,161 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import re +from pathlib import Path +from typing import TypedDict + +import httpx +import pytest +from faker import Faker +from models_library.basic_regex import UUID_RE_BASE +from pydantic import TypeAdapter +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import ( + Job, + JobInputs, + JobMetadata, + JobMetadataUpdate, +) +from starlette import status + + +class MockedBackendApiDict(TypedDict): + webserver: MockRouter | None + + +def _as_path_regex(initial_path: str): + return ( + re.sub(rf"({UUID_RE_BASE})", f"(?P{UUID_RE_BASE})", initial_path) + + "$" + ) + + +@pytest.fixture +def mocked_backend( + mocked_webserver_rest_api: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_catalog_rpc_api: dict[str, MockType], + project_tests_dir: Path, +) -> MockedBackendApiDict: + mock_name = "for_test_get_and_update_job_metadata.json" + + captures = { + c.name: c + for c in TypeAdapter(list[HttpApiCallCaptureModel]).validate_json( + Path(project_tests_dir / "mocks" / mock_name).read_text() + ) + } + + capture = captures["get_service"] + assert capture.host == "catalog" + + for name in ("get_project_metadata", "update_project_metadata", "delete_project"): + capture = captures[name] + assert capture.host == "webserver" + capture_path_regex = _as_path_regex(capture.path.removeprefix("/v0")) + + route = mocked_webserver_rest_api.request( + method=capture.method, + path__regex=capture_path_regex, + name=capture.name, + ) + + if name == "get_project_metadata": + # SEE https://lundberg.github.io/respx/guide/#iterable + route.side_effect = [ + captures["get_project_metadata"].as_response(), + captures["get_project_metadata_1"].as_response(), + captures["get_project_metadata_2"].as_response(), + ] + else: + route.respond( + status_code=capture.status_code, + json=capture.response_body, + ) + + return MockedBackendApiDict(webserver=mocked_webserver_rest_api) + + +@pytest.mark.acceptance_test( + "For https://github.com/ITISFoundation/osparc-simcore/issues/4110" +) +async def test_get_and_update_job_metadata( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + faker: Faker, + mocked_backend: MockedBackendApiDict, +): + # create Job + resp = await client.post( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs", + auth=auth, + json=JobInputs( + values={ + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": True, + } + ).model_dump(), + ) + assert resp.status_code == status.HTTP_201_CREATED + job = Job.model_validate(resp.json()) + + # Get metadata + resp = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_200_OK + job_meta = JobMetadata.model_validate(resp.json()) + + assert job_meta.metadata == {} + + # Update metadata + my_metadata = {"number": 3.14, "integer": 42, "string": "foo", "boolean": True} + resp = await client.patch( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}/metadata", + auth=auth, + json=JobMetadataUpdate(metadata=my_metadata).model_dump(), + ) + assert resp.status_code == status.HTTP_200_OK + + job_meta = JobMetadata.model_validate(resp.json()) + assert job_meta.metadata == my_metadata + + # Get metadata after update + resp = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_200_OK + job_meta = JobMetadata.model_validate(resp.json()) + + assert job_meta.metadata == my_metadata + + # Delete job + resp = await client.delete( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}", + auth=auth, + ) + assert resp.status_code == status.HTTP_204_NO_CONTENT + + # Get metadata -> job not found! + resp = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND + + mock_webserver_router = mocked_backend["webserver"] + assert mock_webserver_router + assert mock_webserver_router["get_project_metadata"].called + assert mock_webserver_router["update_project_metadata"].called + assert mock_webserver_router["delete_project"].called diff --git a/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_read.py b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_read.py new file mode 100644 index 00000000000..57b69e001d3 --- /dev/null +++ b/services/api-server/tests/unit/api_solvers/test_api_routers_solvers_jobs_read.py @@ -0,0 +1,186 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pathlib import Path +from typing import NamedTuple + +import httpx +import pytest +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.pagination import Page +from simcore_service_api_server.models.schemas.jobs import Job +from starlette import status + + +class MockBackendRouters(NamedTuple): + webserver_rest: MockRouter + webserver_rpc: dict[str, MockType] + catalog_rpc: dict[str, MockType] + + +@pytest.fixture +def mocked_backend( + mocked_webserver_rest_api_base: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_catalog_rpc_api: dict[str, MockType], + project_tests_dir: Path, +) -> MockBackendRouters: + mock_name = "on_list_jobs.json" + captures = TypeAdapter(list[HttpApiCallCaptureModel]).validate_json( + Path(project_tests_dir / "mocks" / mock_name).read_text() + ) + + capture = captures[1] + assert capture.host == "webserver" + assert capture.name == "list_projects" + mocked_webserver_rest_api_base.request( + method=capture.method, + name=capture.name, + path=capture.path, + ).respond( + status_code=capture.status_code, + json=capture.response_body, + ) + + return MockBackendRouters( + webserver_rest=mocked_webserver_rest_api_base, + webserver_rpc=mocked_webserver_rpc_api, + catalog_rpc=mocked_catalog_rpc_api, + ) + + +@pytest.mark.acceptance_test( + "https://github.com/ITISFoundation/osparc-simcore/issues/4110" +) +async def test_list_solver_jobs( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + solver_key: str, + solver_version: str, + mocked_backend: MockBackendRouters, +): + # list jobs (w/o pagination) + resp = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs", auth=auth + ) + assert resp.status_code == status.HTTP_200_OK + jobs = TypeAdapter(list[Job]).validate_python(resp.json()) + + # list jobs (w/ pagination) + resp = await client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/page", + auth=auth, + params={"limits": 20}, + ) + assert resp.status_code == status.HTTP_200_OK + + jobs_page = TypeAdapter(Page[Job]).validate_python(resp.json()) + + assert jobs_page.items == jobs + + # check calls to the deep-backend services + assert mocked_backend.webserver_rest["list_projects"].called + assert mocked_backend.catalog_rpc["get_service"].called + + +async def test_list_all_solvers_jobs( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + mocked_backend: MockBackendRouters, +): + """Tests the endpoint that lists all jobs across all solvers.""" + + # Call the endpoint with pagination parameters + resp = await client.get( + f"/{API_VTAG}/solvers/-/releases/-/jobs", + auth=auth, + params={"limit": 10, "offset": 0}, + ) + + # Verify the response + assert resp.status_code == status.HTTP_200_OK + + # Parse and validate the response + jobs_page = TypeAdapter(Page[Job]).validate_python(resp.json()) + + # Basic assertions on the response structure + assert isinstance(jobs_page.items, list) + assert jobs_page.total > 0 + assert jobs_page.limit == 10 + assert jobs_page.offset == 0 + assert jobs_page.total <= len(jobs_page.items) + + # Each job should have the expected structure + for job in jobs_page.items: + assert job.id + assert job.name + assert job.url is not None + assert job.runner_url is not None + assert job.outputs_url is not None + + assert mocked_backend.webserver_rpc["list_projects_marked_as_jobs"].called + + +async def test_list_all_solvers_jobs_with_metadata_filter( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + mocked_backend: MockBackendRouters, + user_id: UserID, +): + """Tests the endpoint that lists all jobs across all solvers with metadata filtering.""" + + # Test with metadata filters + metadata_filters = ["key1:val*", "key2:exactval"] + + # Construct query parameters with metadata.any filters + params = { + "limit": 10, + "offset": 0, + "metadata.any": metadata_filters, + } + + # Call the endpoint with metadata filters + resp = await client.get( + f"/{API_VTAG}/solvers/-/releases/-/jobs", + auth=auth, + params=params, + ) + + # Verify the response + assert resp.status_code == status.HTTP_200_OK + + # Parse and validate the response + jobs_page = TypeAdapter(Page[Job]).validate_python(resp.json()) + + # Basic assertions on the response structure + assert isinstance(jobs_page.items, list) + assert jobs_page.limit == 10 + assert jobs_page.offset == 0 + + # Check that the backend was called with the correct filter parameters + assert mocked_backend.webserver_rpc["list_projects_marked_as_jobs"].called + + # Get the call args to verify filter parameters were passed correctly + call_args = mocked_backend.webserver_rpc["list_projects_marked_as_jobs"].call_args + + # The filter_any_custom_metadata parameter should contain our filters + # The exact structure will depend on how your mocked function is called + assert call_args is not None + + assert call_args.kwargs["product_name"] == "osparc" + assert call_args.kwargs["user_id"] == user_id + assert call_args.kwargs["offset"] == 0 + assert call_args.kwargs["limit"] == 10 + assert call_args.kwargs["filters"] + + # Verify the metadata filters were correctly transformed and passed + assert call_args.kwargs["filters"].any_custom_metadata[0].name == "key1" + assert call_args.kwargs["filters"].any_custom_metadata[0].pattern == "val*" + assert call_args.kwargs["filters"].any_custom_metadata[1].name == "key2" + assert call_args.kwargs["filters"].any_custom_metadata[1].pattern == "exactval" diff --git a/services/api-server/tests/unit/api_studies/conftest.py b/services/api-server/tests/unit/api_studies/conftest.py new file mode 100644 index 00000000000..e25d0aedd79 --- /dev/null +++ b/services/api-server/tests/unit/api_studies/conftest.py @@ -0,0 +1,32 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from copy import deepcopy +from typing import Any +from uuid import UUID + +import pytest +from faker import Faker +from pytest_simcore.helpers.webserver_fake_ports_data import ( + PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA, +) +from simcore_service_api_server.models.schemas.studies import StudyID + + +@pytest.fixture +def study_id(faker: Faker) -> StudyID: + return faker.uuid4() + + +@pytest.fixture +def fake_study_ports() -> list[dict[str, Any]]: + # NOTE: Reuses fakes used to test web-server API responses of /projects/{project_id}/metadata/ports + # as reponses in this mock. SEE services/web/server/tests/unit/with_dbs/02/test_projects_ports_handlers.py + return deepcopy(PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA) + + +@pytest.fixture +def fake_study_id(faker: Faker) -> UUID: + return faker.uuid4() diff --git a/services/api-server/tests/unit/api_studies/test_api_routers_studies_jobs_metadata.py b/services/api-server/tests/unit/api_studies/test_api_routers_studies_jobs_metadata.py new file mode 100644 index 00000000000..b1cc4c80073 --- /dev/null +++ b/services/api-server/tests/unit/api_studies/test_api_routers_studies_jobs_metadata.py @@ -0,0 +1,201 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import json +import re +from pathlib import Path +from typing import TypedDict + +import httpx +import pytest +from fastapi.encoders import jsonable_encoder +from pydantic import TypeAdapter +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from pytest_simcore.helpers.httpx_calls_capture_parameters import PathDescription +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import ( + Job, + JobMetadata, + JobMetadataUpdate, +) +from simcore_service_api_server.models.schemas.studies import StudyID +from starlette import status + + +class MockedBackendApiDict(TypedDict): + webserver: MockRouter | None + + +@pytest.fixture +def mocked_backend( + project_tests_dir: Path, + mocked_webserver_rest_api_base: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], +) -> MockedBackendApiDict | None: + # load + captures = { + c.name: c + for c in TypeAdapter(list[HttpApiCallCaptureModel]).validate_json( + Path( + project_tests_dir + / "mocks" + / "test_get_and_update_study_job_metadata.json" + ).read_text(), + ) + } + + # group captures based on manually adjusted capture names (see assert below) + names = list(captures) + groups = {} + used = set() + for n, name in enumerate(names): + group = ( + [other for other in names[n:] if re.match(rf"{name}_\d+$", other)] + if name not in used + else [] + ) + if name not in used: + groups[name] = group + used.update(group) + + print("Captures groups:", json.dumps(groups, indent=1)) + assert groups == { + "clone_project": [], + "get_clone_project_task_status": [ + "get_clone_project_task_status_1", + "get_clone_project_task_status_2", + "get_clone_project_task_status_3", + "get_clone_project_task_status_4", + ], + "get_clone_project_task_result": [], + "patch_project": [], + "get_project_inputs": [], + "get_project_metadata": ["get_project_metadata_1", "get_project_metadata_2"], + "patch_project_metadata": [], + "delete_project": [], + } + + # setup mocks as single or iterable responses + for name, group in groups.items(): + c = captures[name] + assert isinstance(c.path, PathDescription) + if group: + # mock this entrypoint using https://lundberg.github.io/respx/guide/#iterable + cc = [c] + [captures[_] for _ in group] + mocked_webserver_rest_api_base.request( + method=c.method.upper(), + url=None, + path__regex=f"^{c.path.to_path_regex()}$", + name=name, + ).mock( + side_effect=[_.as_response() for _ in cc], + ) + else: + mocked_webserver_rest_api_base.request( + method=c.method.upper(), + url=None, + path__regex=f"^{c.path.to_path_regex()}$", + name=name, + ).mock(return_value=c.as_response()) + + return MockedBackendApiDict( + webserver=mocked_webserver_rest_api_base, + ) + + +@pytest.fixture +def study_id() -> StudyID: + # NOTE: this id is used in mocks/test_get_and_update_study_job_metadata.json + return StudyID("784f63f4-1d9f-11ef-892d-0242ac140012") + + +async def test_get_and_update_study_job_metadata( + auth: httpx.BasicAuth, + client: httpx.AsyncClient, + study_id: StudyID, + mocked_backend: MockedBackendApiDict, +): + """ + To generate mock capture you can run + + pytest \ + --ff \ + --log-cli-level=INFO \ + --pdb \ + --setup-show \ + -sx \ + -vv \ + --spy-httpx-calls-enabled=true \ + --spy-httpx-calls-capture-path=test-httpx-spy-capture.ignore.keep.json \ + --faker-user-id=1 \ + --faker-user-email=foo@email.com \ + --faker-user-api-key=test \ + --faker-user-api-secret=test \ + --faker-project-id=784f63f4-1d9f-11ef-892d-0242ac140012 \ + -k test_get_and_update_study_job_metadata + """ + + # Creates a job (w/o running it) + resp = await client.post( + f"/{API_VTAG}/studies/{study_id}/jobs", + auth=auth, + json={"values": {}}, + ) + assert resp.status_code == status.HTTP_200_OK + job = Job(**resp.json()) + + # Get metadata + resp = await client.get( + f"/{API_VTAG}/studies/{study_id}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_200_OK + job_meta = JobMetadata(**resp.json()) + + assert job_meta.metadata == {} + + # Update metadata + my_metadata = { + "number": 3.14, + "integer": 42, + "string": "foo", + "boolean": True, + } + resp = await client.put( + f"/{API_VTAG}/studies/{study_id}/jobs/{job.id}/metadata", + auth=auth, + json=jsonable_encoder(JobMetadataUpdate(metadata=my_metadata)), + ) + assert resp.status_code == status.HTTP_200_OK + + job_meta = JobMetadata(**resp.json()) + assert job_meta.metadata == my_metadata + + # Get metadata after update + resp = await client.get( + f"/{API_VTAG}/studies/{study_id}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_200_OK + job_meta = JobMetadata(**resp.json()) + + assert job_meta.metadata == my_metadata + + # Delete job + resp = await client.delete( + f"/{API_VTAG}/studies/{study_id}/jobs/{job.id}", + auth=auth, + ) + assert resp.status_code == status.HTTP_204_NO_CONTENT + + # Get metadata -> job not found! + resp = await client.get( + f"/{API_VTAG}/studies/{study_id}/jobs/{job.id}/metadata", + auth=auth, + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND diff --git a/services/api-server/tests/unit/api_studies/test_api_routes_studies.py b/services/api-server/tests/unit/api_studies/test_api_routes_studies.py new file mode 100644 index 00000000000..6c289763d7b --- /dev/null +++ b/services/api-server/tests/unit/api_studies/test_api_routes_studies.py @@ -0,0 +1,223 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Callable +from pathlib import Path +from typing import Any, TypedDict +from uuid import UUID + +import httpx +import pytest +from faker import Faker +from fastapi import status +from pydantic import TypeAdapter +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from respx import MockRouter +from servicelib.common_headers import ( + X_SIMCORE_PARENT_NODE_ID, + X_SIMCORE_PARENT_PROJECT_UUID, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.errors import ErrorGet +from simcore_service_api_server.models.schemas.studies import Study, StudyID, StudyPort + +_faker = Faker() + + +class MockedBackendApiDict(TypedDict): + catalog: MockRouter | None + webserver: MockRouter | None + + +@pytest.fixture +def mocked_backend( + mocked_webserver_rest_api_base: MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + project_tests_dir: Path, +) -> MockedBackendApiDict: + mock_name = "for_test_api_routes_studies.json" + + captures = { + c.name: c + for c in TypeAdapter(list[HttpApiCallCaptureModel]).validate_json( + Path(project_tests_dir / "mocks" / mock_name).read_text() + ) + } + + for name in ( + "get_me", + "list_projects", + "get_project", + "get_invalid_project", + "get_project_ports", + "get_invalid_project_ports", + ): + capture = captures[name] + assert capture.host == "webserver" + + route = mocked_webserver_rest_api_base.request( + method=capture.method, + path__regex=capture.path.removeprefix("/v0") + "$", + name=capture.name, + ).respond( + status_code=capture.status_code, + json=capture.response_body, + ) + print(route) + return MockedBackendApiDict(webserver=mocked_webserver_rest_api_base, catalog=None) + + +@pytest.mark.acceptance_test( + "Implements https://github.com/ITISFoundation/osparc-simcore/issues/4177" +) +async def test_studies_read_workflow( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + mocked_backend: MockedBackendApiDict, +): + study_id = StudyID("25531b1a-2565-11ee-ab43-02420a000031") + + # list_studies + resp = await client.get(f"/{API_VTAG}/studies", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + studies = TypeAdapter(list[Study]).validate_python(resp.json()["items"]) + assert len(studies) == 1 + assert studies[0].uid == study_id + + # create_study doest NOT exist -> needs to be done via GUI + resp = await client.post(f"/{API_VTAG}/studies", auth=auth) + assert resp.status_code == status.HTTP_405_METHOD_NOT_ALLOWED + + # get_study + resp = await client.get(f"/{API_VTAG}/studies/{study_id}", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + study = TypeAdapter(Study).validate_python(resp.json()) + assert study.uid == study_id + + # get ports + resp = await client.get(f"/{API_VTAG}/studies/{study_id}/ports", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + ports = TypeAdapter(list[StudyPort]).validate_python(resp.json()["items"]) + assert len(ports) == (resp.json()["total"]) + + # get_study with non-existing uuid + inexistent_study_id = StudyID("15531b1a-2565-11ee-ab43-02420a000031") + resp = await client.get(f"/{API_VTAG}/studies/{inexistent_study_id}", auth=auth) + assert resp.status_code == status.HTTP_404_NOT_FOUND + error = TypeAdapter(ErrorGet).validate_python(resp.json()) + assert f"{inexistent_study_id}" in error.errors[0] + + resp = await client.get( + f"/{API_VTAG}/studies/{inexistent_study_id}/ports", auth=auth + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND + error = TypeAdapter(ErrorGet).validate_python(resp.json()) + assert f"{inexistent_study_id}" in error.errors[0] + + +async def test_list_study_ports( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + mocked_webserver_rest_api_base: MockRouter, + fake_study_ports: list[dict[str, Any]], + study_id: StudyID, +): + # Mocks /projects/{*}/metadata/ports + + mocked_webserver_rest_api_base.get( + path__regex=r"/projects/(?P[\w-]+)/metadata/ports$", + name="list_project_metadata_ports", + ).respond( + 200, + json={"data": fake_study_ports}, + ) + + # list_study_ports + resp = await client.get(f"/{API_VTAG}/studies/{study_id}/ports", auth=auth) + assert resp.status_code == status.HTTP_200_OK + assert resp.json() == {"items": fake_study_ports, "total": len(fake_study_ports)} + + +@pytest.mark.acceptance_test( + "Implements https://github.com/ITISFoundation/osparc-simcore/issues/4651" +) +@pytest.mark.parametrize( + "parent_node_id, parent_project_id", + [(_faker.uuid4(), _faker.uuid4()), (None, None)], +) +async def test_clone_study( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + study_id: StudyID, + mocked_webserver_rest_api_base: MockRouter, + patch_webserver_long_running_project_tasks: Callable[[MockRouter], MockRouter], + parent_project_id: UUID | None, + parent_node_id: UUID | None, +): + # Mocks /projects + patch_webserver_long_running_project_tasks(mocked_webserver_rest_api_base) + + callback = mocked_webserver_rest_api_base["create_projects"].side_effect + assert callback is not None + + def clone_project_side_effect(request: httpx.Request): + if parent_project_id is not None: + _parent_project_id = dict(request.headers).get( + X_SIMCORE_PARENT_PROJECT_UUID.lower() + ) + assert _parent_project_id == f"{parent_project_id}" + if parent_node_id is not None: + _parent_node_id = dict(request.headers).get( + X_SIMCORE_PARENT_NODE_ID.lower() + ) + assert _parent_node_id == f"{parent_node_id}" + return callback(request) + + mocked_webserver_rest_api_base["create_projects"].side_effect = ( + clone_project_side_effect + ) + + _headers = {} + if parent_project_id is not None: + _headers[X_SIMCORE_PARENT_PROJECT_UUID] = f"{parent_project_id}" + if parent_node_id is not None: + _headers[X_SIMCORE_PARENT_NODE_ID] = f"{parent_node_id}" + resp = await client.post( + f"/{API_VTAG}/studies/{study_id}:clone", headers=_headers, auth=auth + ) + + assert mocked_webserver_rest_api_base["create_projects"].called + + assert resp.status_code == status.HTTP_201_CREATED + + +async def test_clone_study_not_found( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + faker: Faker, + mocked_webserver_rest_api_base: MockRouter, + patch_webserver_long_running_project_tasks: Callable[[MockRouter], MockRouter], +): + # Mocks /projects + mocked_webserver_rest_api_base.post( + path__regex=r"/projects", + name="project_clone", + ).respond( + status.HTTP_404_NOT_FOUND, + json={"message": "you should not read this message from the WEBSERVER_MARK"}, + ) + + # tests unknown study + unknown_study_id = faker.uuid4() + resp = await client.post(f"/{API_VTAG}/studies/{unknown_study_id}:clone", auth=auth) + + assert resp.status_code == status.HTTP_404_NOT_FOUND + + errors: list[str] = resp.json()["errors"] + assert any("WEBSERVER_MARK" not in error_msg for error_msg in errors) diff --git a/services/api-server/tests/unit/api_studies/test_api_routes_studies_jobs.py b/services/api-server/tests/unit/api_studies/test_api_routes_studies_jobs.py new file mode 100644 index 00000000000..84b300cf07a --- /dev/null +++ b/services/api-server/tests/unit/api_studies/test_api_routes_studies_jobs.py @@ -0,0 +1,415 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import json +from pathlib import Path +from typing import Any, Final +from uuid import UUID + +import httpx +import pytest +import respx +from faker import Faker +from fastapi import status +from pytest_mock import MockType +from pytest_simcore.helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, +) +from servicelib.common_headers import ( + X_SIMCORE_PARENT_NODE_ID, + X_SIMCORE_PARENT_PROJECT_UUID, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import Job, JobOutputs, JobStatus +from simcore_service_api_server.models.schemas.studies import JobLogsMap, Study, StudyID + +_faker = Faker() + + +@pytest.mark.xfail(reason="Still not implemented") +@pytest.mark.acceptance_test( + "Implements https://github.com/ITISFoundation/osparc-simcore/issues/4177" +) +async def test_studies_jobs_workflow( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + study_id: StudyID, +): + # get_study + resp = await client.get("/v0/studies/{study_id}", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + study = Study.model_validate(resp.json()) + assert study.uid == study_id + + # Lists study jobs + resp = await client.get("/v0/studies/{study_id}/jobs", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + # Create Study Job + resp = await client.post("/v0/studies/{study_id}/jobs", auth=auth) + assert resp.status_code == status.HTTP_201_CREATED + + job = Job.model_validate(resp.json()) + job_id = job.id + + # Get Study Job + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + # Start Study Job + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}:start", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + # Inspect Study Job + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}:inspect", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + # Get Study Job Outputs + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}/outputs", auth=auth) + assert resp.status_code == status.HTTP_200_OK + + # Get Study Job Outputs Logfile + resp = await client.get( + f"/v0/studies/{study_id}/jobs/{job_id}/outputs/logfile", auth=auth + ) + assert resp.status_code == status.HTTP_200_OK + + # Verify that the Study Job already finished and therefore is stopped + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}:stop", auth=auth) + assert resp.status_code == status.HTTP_404_NOT_FOUND + + # Delete Study Job + resp = await client.delete(f"/v0/studies/{study_id}/jobs/{job_id}", auth=auth) + assert resp.status_code == status.HTTP_204_NO_CONTENT + + # Verify that Study Job is deleted + resp = await client.delete(f"/v0/studies/{study_id}/jobs/{job_id}", auth=auth) + assert resp.status_code == status.HTTP_404_NOT_FOUND + + # job metadata + resp = await client.get(f"/v0/studies/{study_id}/jobs/{job_id}/metadata", auth=auth) + assert resp.status_code == status.HTTP_200_OK + assert resp.json()["metadata"] == {} + + # update_study metadata + custom_metadata = {"number": 3.14, "string": "str", "boolean": False} + resp = await client.put( + f"/v0/studies/{study_id}/jobs/{job_id}/metadata", + auth=auth, + json=custom_metadata, + ) + assert resp.status_code == status.HTTP_200_OK + assert resp.json()["metadata"] == custom_metadata + + # other type + new_metadata = custom_metadata.copy() + new_metadata["other"] = custom_metadata.copy() # or use json.dumps + resp = await client.put( + f"/v0/studies/{study_id}/jobs/{job_id}/metadata", + auth=auth, + json=custom_metadata, + ) + assert resp.status_code == status.HTTP_200_OK + assert resp.json()["metadata"]["other"] == str(new_metadata["other"]) + + +async def test_start_stop_delete_study_job( + client: httpx.AsyncClient, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_directorv2_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + fake_study_id: UUID, + faker: Faker, +): + capture_file = project_tests_dir / "mocks" / "study_job_start_stop_delete.json" + job_id = faker.uuid4() + + def _side_effect_no_project_id( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + return capture.response_body + + def _side_effect_with_project_id( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + path_param_job_id = path_params.get("project_id") + assert path_param_job_id == job_id + body = capture.response_body + assert isinstance(body, dict) + assert body.get("id") + body["id"] = path_param_job_id + return body + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_webserver_rest_api_base, + mocked_directorv2_rest_api_base, + ], + capture_path=capture_file, + side_effects_callbacks=[_side_effect_no_project_id] + + [_side_effect_with_project_id] * 3 + + [_side_effect_no_project_id], + ) + + def _check_response(response: httpx.Response, status_code: int): + response.raise_for_status() + assert response.status_code == status_code + if response.status_code != status.HTTP_204_NO_CONTENT: + _response_job_id = response.json().get("job_id") + assert _response_job_id + assert _response_job_id == job_id + + # start study job + response = await client.post( + f"{API_VTAG}/studies/{fake_study_id}/jobs/{job_id}:start", + auth=auth, + ) + _check_response(response, status.HTTP_202_ACCEPTED) + + # stop study job + response = await client.post( + f"{API_VTAG}/studies/{fake_study_id}/jobs/{job_id}:stop", + auth=auth, + ) + _check_response(response, status.HTTP_200_OK) + + # delete study job + response = await client.delete( + f"{API_VTAG}/studies/{fake_study_id}/jobs/{job_id}", + auth=auth, + ) + _check_response(response, status.HTTP_204_NO_CONTENT) + + +@pytest.mark.parametrize( + "parent_node_id, parent_project_id", + [(_faker.uuid4(), _faker.uuid4()), (None, None)], +) +@pytest.mark.parametrize("hidden", [True, False]) +async def test_create_study_job( + client: httpx.AsyncClient, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_directorv2_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + fake_study_id: UUID, + hidden: bool, + parent_project_id: UUID | None, + parent_node_id: UUID | None, +): + _capture_file: Final[Path] = project_tests_dir / "mocks" / "create_study_job.json" + + def _default_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + if capture.method == "PATCH": + _default_side_effect.patch_called = True + request_content = json.loads(request.content.decode()) + assert isinstance(request_content, dict) + name = request_content.get("name") + assert name is not None + project_id = path_params.get("project_id") + assert project_id is not None + assert project_id in name + if capture.method == "POST": + # test hidden boolean + _default_side_effect.post_called = True + query_dict = dict( + elm.split("=") for elm in request.url.query.decode().split("&") + ) + _hidden = query_dict.get("hidden") + assert _hidden == ("true" if hidden else "false") + + # test parent project and node ids + if parent_project_id is not None: + assert f"{parent_project_id}" == dict(request.headers).get( + X_SIMCORE_PARENT_PROJECT_UUID.lower() + ) + if parent_node_id is not None: + assert f"{parent_node_id}" == dict(request.headers).get( + X_SIMCORE_PARENT_NODE_ID.lower() + ) + return capture.response_body + + _default_side_effect.patch_called = False + _default_side_effect.post_called = False + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_webserver_rest_api_base, + mocked_directorv2_rest_api_base, + ], + capture_path=_capture_file, + side_effects_callbacks=[_default_side_effect] * 5, + ) + + header_dict = {} + if parent_project_id is not None: + header_dict[X_SIMCORE_PARENT_PROJECT_UUID] = f"{parent_project_id}" + if parent_node_id is not None: + header_dict[X_SIMCORE_PARENT_NODE_ID] = f"{parent_node_id}" + response = await client.post( + f"{API_VTAG}/studies/{fake_study_id}/jobs", + auth=auth, + headers=header_dict, + params={"hidden": f"{hidden}"}, + json={"values": {}}, + ) + assert response.status_code == 200 + assert _default_side_effect.patch_called + assert _default_side_effect.post_called + + +async def test_get_study_job_outputs( + client: httpx.AsyncClient, + fake_study_id: UUID, + auth: httpx.BasicAuth, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], +): + job_id = "cfe9a77a-f71e-11ee-8fca-0242ac140008" + + capture = { + "name": "GET /projects/cfe9a77a-f71e-11ee-8fca-0242ac140008/outputs", + "description": "", + "method": "GET", + "host": "webserver", + "path": { + "path": "/v0/projects/{project_id}/outputs", + "path_parameters": [ + { + "in": "path", + "name": "project_id", + "required": True, + "schema": { + "title": "Project Id", + "type": "str", + "pattern": None, + "format": "uuid", + "exclusiveMinimum": None, + "minimum": None, + "anyOf": None, + "allOf": None, + "oneOf": None, + }, + "response_value": "projects", + } + ], + }, + "query": None, + "request_payload": None, + "response_body": {"data": {}}, + "status_code": 200, + } + + mocked_webserver_rest_api_base.get( + path=capture["path"]["path"].format(project_id=job_id) + ).respond( + status_code=capture["status_code"], + json=capture["response_body"], + ) + + response = await client.post( + f"{API_VTAG}/studies/{fake_study_id}/jobs/{job_id}/outputs", + auth=auth, + ) + assert response.status_code == status.HTTP_200_OK + job_outputs = JobOutputs(**response.json()) + + assert str(job_outputs.job_id) == job_id + assert job_outputs.results == {} + + +async def test_get_job_logs( + client: httpx.AsyncClient, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_directorv2_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + _study_id = "7171cbf8-2fc9-11ef-95d3-0242ac140018" + _job_id = "1a4145e2-2fca-11ef-a199-0242ac14002a" + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_directorv2_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "get_study_job_logs.json", + side_effects_callbacks=[], + ) + + response = await client.get( + f"{API_VTAG}/studies/{_study_id}/jobs/{_job_id}/outputs/log-links", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + _ = JobLogsMap.model_validate(response.json()) + + +async def test_get_study_outputs( + client: httpx.AsyncClient, + create_respx_mock_from_capture: CreateRespxMockCallback, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_directorv2_rest_api_base: respx.MockRouter, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + + _study_id = "e9f34992-436c-11ef-a15d-0242ac14000c" + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_directorv2_rest_api_base, + mocked_webserver_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "get_job_outputs.json", + side_effects_callbacks=[], + ) + + response = await client.post( + f"/{API_VTAG}/studies/{_study_id}/jobs", + auth=auth, + json={ + "values": { + "inputfile": { + "filename": "inputfile", + "id": "c1dcde67-6434-31c3-95ee-bf5fe1e9422d", + } + } + }, + ) + assert response.status_code == status.HTTP_200_OK + _job = Job.model_validate(response.json()) + _job_id = _job.id + + response = await client.post( + f"/{API_VTAG}/studies/{_study_id}/jobs/{_job_id}:start", auth=auth + ) + assert response.status_code == status.HTTP_202_ACCEPTED + _ = JobStatus.model_validate(response.json()) + + response = await client.post( + f"/{API_VTAG}/studies/{_study_id}/jobs/{_job_id}/outputs", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + _ = JobOutputs.model_validate(response.json()) diff --git a/services/api-server/tests/unit/api_studies/test_api_studies_mocks.py b/services/api-server/tests/unit/api_studies/test_api_studies_mocks.py new file mode 100644 index 00000000000..4727b0f87f1 --- /dev/null +++ b/services/api-server/tests/unit/api_studies/test_api_studies_mocks.py @@ -0,0 +1,37 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import httpx +import pytest +from fastapi import FastAPI, status +from respx import MockRouter +from simcore_service_api_server.core.settings import ApplicationSettings + + +def test_mocked_webserver_service_api( + app: FastAPI, + mocked_webserver_rest_api_base: MockRouter, + services_mocks_enabled: bool, +): + if not services_mocks_enabled: + pytest.skip(f"{services_mocks_enabled=}") + + # + # This test intends to help building the urls in mocked_webserver_service_api + # At some point, it can be skipped and reenabled only for development + # + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_WEBSERVER + webserver_api_baseurl = settings.API_SERVER_WEBSERVER.api_base_url + + resp = httpx.get(f"{webserver_api_baseurl}/") + assert resp.status_code == status.HTTP_200_OK + assert resp.json() + + resp = httpx.get(f"{webserver_api_baseurl}/health") + assert resp.status_code == status.HTTP_200_OK + assert resp.json() + + mocked_webserver_rest_api_base.assert_all_called() diff --git a/services/api-server/tests/unit/captures/dummy_api_server_openapi.json b/services/api-server/tests/unit/captures/dummy_api_server_openapi.json new file mode 100644 index 00000000000..9106a05d1b2 --- /dev/null +++ b/services/api-server/tests/unit/captures/dummy_api_server_openapi.json @@ -0,0 +1,1712 @@ +{ + "openapi": "3.0.2", + "info": { + "title": "osparc.io web API", + "description": "osparc-simcore public API specifications", + "version": "0.4.5" + }, + "paths": { + "/v0/meta": { + "get": { + "tags": [ + "meta" + ], + "summary": "Get Service Metadata", + "operationId": "get_service_metadata", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Meta" + } + } + } + } + } + } + }, + "/v0/me": { + "get": { + "tags": [ + "users" + ], + "summary": "Get My Profile", + "operationId": "get_my_profile", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Profile" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + }, + "put": { + "tags": [ + "users" + ], + "summary": "Update My Profile", + "operationId": "update_my_profile", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProfileUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Profile" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/files": { + "get": { + "tags": [ + "files" + ], + "summary": "List Files", + "description": "Lists all files stored in the system\n\nSEE get_files_page for a paginated version of this function", + "operationId": "list_files", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "title": "Response List Files V0 Files Get", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/files/content": { + "put": { + "tags": [ + "files" + ], + "summary": "Upload File", + "description": "Uploads a single file to the system", + "operationId": "upload_file", + "parameters": [ + { + "required": false, + "schema": { + "title": "Content-Length", + "type": "string" + }, + "name": "content-length", + "in": "header" + } + ], + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_upload_file_v0_files_content_put" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/File" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/files/{file_id}": { + "get": { + "tags": [ + "files" + ], + "summary": "Get File", + "description": "Gets metadata for a given file resource", + "operationId": "get_file", + "parameters": [ + { + "required": true, + "schema": { + "title": "File Id", + "type": "string", + "format": "uuid" + }, + "name": "file_id", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/File" + } + } + } + }, + "404": { + "description": "File not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/files/{file_id}/content": { + "get": { + "tags": [ + "files" + ], + "summary": "Download File", + "operationId": "download_file", + "parameters": [ + { + "required": true, + "schema": { + "title": "File Id", + "type": "string", + "format": "uuid" + }, + "name": "file_id", + "in": "path" + } + ], + "responses": { + "307": { + "description": "Successful Response" + }, + "404": { + "description": "File not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ErrorGet" + } + } + } + }, + "200": { + "description": "Returns a arbitrary binary data", + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + }, + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Solvers", + "description": "Lists all available solvers (latest version)\n\nSEE get_solvers_page for paginated version of this function", + "operationId": "list_solvers", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "title": "Response List Solvers V0 Solvers Get", + "type": "array", + "items": { + "$ref": "#/components/schemas/Solver" + } + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/releases": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Lists All Releases", + "description": "Lists all released solvers i.e. all released versions\n\nSEE get_solvers_releases_page for a paginated version of this function", + "operationId": "list_solvers_releases", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "title": "Response List Solvers Releases V0 Solvers Releases Get", + "type": "array", + "items": { + "$ref": "#/components/schemas/Solver" + } + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/latest": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Latest Release of a Solver", + "description": "Gets latest release of a solver", + "operationId": "get_solver", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Solver" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Solver Releases", + "description": "Lists all releases of a given (one) solver\n\nSEE get_solver_releases_page for a paginated version of this function", + "operationId": "list_solver_releases", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "title": "Response List Solver Releases V0 Solvers Solver Key Releases Get", + "type": "array", + "items": { + "$ref": "#/components/schemas/Solver" + } + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Solver Release", + "description": "Gets a specific release of a solver", + "operationId": "get_solver_release", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Solver" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs": { + "get": { + "tags": [ + "solvers" + ], + "summary": "List Jobs", + "description": "List of jobs in a specific released solver (limited to 20 jobs)\n\nSEE get_jobs_page for paginated version of this function", + "operationId": "list_jobs", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "title": "Response List Jobs V0 Solvers Solver Key Releases Version Jobs Get", + "type": "array", + "items": { + "$ref": "#/components/schemas/Job" + } + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + }, + "post": { + "tags": [ + "solvers" + ], + "summary": "Create Job", + "description": "Creates a job in a specific release with given inputs.\n\nNOTE: This operation does **not** start the job", + "operationId": "create_job", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobInputs" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job", + "description": "Gets job of a given solver", + "operationId": "get_job", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:start": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Start Job", + "description": "Starts job job_id created with the solver solver_key:version\n\nNew in *version 0.4.3*: cluster_id", + "operationId": "start_job", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + }, + { + "required": false, + "schema": { + "title": "Cluster Id", + "minimum": 0, + "type": "integer" + }, + "name": "cluster_id", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:stop": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Stop Job", + "operationId": "stop_job", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:inspect": { + "post": { + "tags": [ + "solvers" + ], + "summary": "Inspect Job", + "operationId": "inspect_job", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobStatus" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Outputs", + "operationId": "get_job_outputs", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JobOutputs" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + }, + "/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}/outputs/logfile": { + "get": { + "tags": [ + "solvers" + ], + "summary": "Get Job Output Logfile", + "description": "Special extra output with persistent logs file for the solver run.\n\nNOTE: this is not a log stream but a predefined output that is only\navailable after the job is done.\n\nNew in *version 0.4.0*", + "operationId": "get_job_output_logfile", + "parameters": [ + { + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }, + { + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + } + ], + "responses": { + "307": { + "description": "Successful Response" + }, + "200": { + "description": "Returns a log file", + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + }, + "application/zip": { + "schema": { + "type": "string", + "format": "binary" + } + }, + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "404": { + "description": "Log not found" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + } + ] + } + } + }, + "components": { + "schemas": { + "Body_upload_file_v0_files_content_put": { + "title": "Body_upload_file_v0_files_content_put", + "required": [ + "file" + ], + "type": "object", + "properties": { + "file": { + "title": "File", + "type": "string", + "format": "binary" + } + } + }, + "ErrorGet": { + "title": "ErrorGet", + "required": [ + "errors" + ], + "type": "object", + "properties": { + "errors": { + "title": "Errors", + "type": "array", + "items": {} + } + } + }, + "File": { + "title": "File", + "required": [ + "id", + "filename" + ], + "type": "object", + "properties": { + "id": { + "title": "Id", + "type": "string", + "description": "Resource identifier", + "format": "uuid" + }, + "filename": { + "title": "Filename", + "type": "string", + "description": "Name of the file with extension" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "description": "Guess of type content [EXPERIMENTAL]" + }, + "checksum": { + "title": "Checksum", + "type": "string", + "description": "MD5 hash of the file's content [EXPERIMENTAL]" + } + }, + "description": "Represents a file stored on the server side i.e. a unique reference to a file in the cloud." + }, + "Groups": { + "title": "Groups", + "required": [ + "me", + "all" + ], + "type": "object", + "properties": { + "me": { + "$ref": "#/components/schemas/UsersGroup" + }, + "organizations": { + "title": "Organizations", + "type": "array", + "items": { + "$ref": "#/components/schemas/UsersGroup" + }, + "default": [] + }, + "all": { + "$ref": "#/components/schemas/UsersGroup" + } + } + }, + "HTTPValidationError": { + "title": "HTTPValidationError", + "type": "object", + "properties": { + "errors": { + "title": "Validation errors", + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidationError" + } + } + } + }, + "Job": { + "title": "Job", + "required": [ + "id", + "name", + "inputs_checksum", + "created_at", + "runner_name", + "url", + "runner_url", + "outputs_url" + ], + "type": "object", + "properties": { + "id": { + "title": "Id", + "type": "string", + "format": "uuid" + }, + "name": { + "title": "Name", + "pattern": "^([^\\s/]+/?){1,10}$", + "type": "string" + }, + "inputs_checksum": { + "title": "Inputs Checksum", + "type": "string", + "description": "Input's checksum" + }, + "created_at": { + "title": "Created At", + "type": "string", + "description": "Job creation timestamp", + "format": "date-time" + }, + "runner_name": { + "title": "Runner Name", + "pattern": "^([^\\s/]+/?){1,10}$", + "type": "string", + "description": "Runner that executes job" + }, + "url": { + "title": "Url", + "maxLength": 2083, + "minLength": 1, + "type": "string", + "description": "Link to get this resource (self)", + "format": "uri" + }, + "runner_url": { + "title": "Runner Url", + "maxLength": 2083, + "minLength": 1, + "type": "string", + "description": "Link to the solver's job (parent collection)", + "format": "uri" + }, + "outputs_url": { + "title": "Outputs Url", + "maxLength": 2083, + "minLength": 1, + "type": "string", + "description": "Link to the job outputs (sub-collection)", + "format": "uri" + } + }, + "example": { + "id": "f622946d-fd29-35b9-a193-abdd1095167c", + "name": "solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", + "runner_name": "solvers/isolve/releases/1.3.4", + "inputs_checksum": "12345", + "created_at": "2021-01-22T23:59:52.322176", + "url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c", + "runner_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4", + "outputs_url": "https://api.osparc.io/v0/solvers/isolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs" + } + }, + "JobInputs": { + "title": "JobInputs", + "required": [ + "values" + ], + "type": "object", + "properties": { + "values": { + "title": "Values", + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "type": "array", + "items": {} + } + ] + } + } + }, + "example": { + "values": { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": true, + "input_file": { + "filename": "input.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f" + } + } + } + }, + "JobOutputs": { + "title": "JobOutputs", + "required": [ + "job_id", + "results" + ], + "type": "object", + "properties": { + "job_id": { + "title": "Job Id", + "type": "string", + "description": "Job that produced this output", + "format": "uuid" + }, + "results": { + "title": "Results", + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "type": "array", + "items": {} + } + ] + } + } + }, + "example": { + "job_id": "99d9ac65-9f10-4e2f-a433-b5e412bb037b", + "results": { + "maxSAR": 4.33, + "n": 55, + "title": "Specific Absorption Rate", + "enabled": false, + "output_file": { + "filename": "sar_matrix.txt", + "id": "0a3b2c56-dbcd-4871-b93b-d454b7883f9f" + } + } + } + }, + "JobStatus": { + "title": "JobStatus", + "required": [ + "job_id", + "state", + "submitted_at" + ], + "type": "object", + "properties": { + "job_id": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "state": { + "$ref": "#/components/schemas/RunningState" + }, + "progress": { + "title": "Progress", + "maximum": 100, + "minimum": 0, + "type": "integer", + "default": 0 + }, + "submitted_at": { + "title": "Submitted At", + "type": "string", + "description": "Last modification timestamp of the solver job", + "format": "date-time" + }, + "started_at": { + "title": "Started At", + "type": "string", + "description": "Timestamp that indicate the moment the solver starts execution or None if the event did not occur", + "format": "date-time" + }, + "stopped_at": { + "title": "Stopped At", + "type": "string", + "description": "Timestamp at which the solver finished or killed execution or None if the event did not occur", + "format": "date-time" + } + }, + "example": { + "job_id": "145beae4-a3a8-4fde-adbb-4e8257c2c083", + "state": "STARTED", + "progress": 3, + "submitted_at": "2021-04-01 07:15:54.631007", + "started_at": "2021-04-01 07:16:43.670610" + } + }, + "Meta": { + "title": "Meta", + "required": [ + "name", + "version", + "docs_url", + "docs_dev_url" + ], + "type": "object", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "version": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "released": { + "title": "Released", + "type": "object", + "additionalProperties": { + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "description": "Maps every route's path tag with a released version" + }, + "docs_url": { + "title": "Docs Url", + "maxLength": 65536, + "minLength": 1, + "type": "string", + "format": "uri" + }, + "docs_dev_url": { + "title": "Docs Dev Url", + "maxLength": 65536, + "minLength": 1, + "type": "string", + "format": "uri" + } + }, + "example": { + "name": "simcore_service_foo", + "version": "2.4.45", + "released": { + "v1": "1.3.4", + "v2": "2.4.45" + }, + "docs_url": "https://api.osparc.io/dev/doc", + "docs_dev_url": "https://api.osparc.io/dev/doc" + } + }, + "Profile": { + "title": "Profile", + "required": [ + "login", + "role" + ], + "type": "object", + "properties": { + "first_name": { + "title": "First Name", + "type": "string", + "example": "James" + }, + "last_name": { + "title": "Last Name", + "type": "string", + "example": "Maxwell" + }, + "login": { + "title": "Login", + "type": "string", + "format": "email" + }, + "role": { + "$ref": "#/components/schemas/UserRoleEnum" + }, + "groups": { + "$ref": "#/components/schemas/Groups" + }, + "gravatar_id": { + "title": "Gravatar Id", + "maxLength": 40, + "type": "string", + "description": "md5 hash value of email to retrieve an avatar image from https://www.gravatar.com" + } + }, + "example": { + "first_name": "James", + "last_name": "Maxwell", + "login": "james-maxwell@itis.swiss", + "role": "USER", + "groups": { + "me": { + "gid": "123", + "label": "maxy", + "description": "primary group" + }, + "organizations": [], + "all": { + "gid": "1", + "label": "Everyone", + "description": "all users" + } + }, + "gravatar_id": "9a8930a5b20d7048e37740bac5c1ca4f" + } + }, + "ProfileUpdate": { + "title": "ProfileUpdate", + "type": "object", + "properties": { + "first_name": { + "title": "First Name", + "type": "string", + "example": "James" + }, + "last_name": { + "title": "Last Name", + "type": "string", + "example": "Maxwell" + } + } + }, + "RunningState": { + "title": "RunningState", + "enum": [ + "UNKNOWN", + "PUBLISHED", + "NOT_STARTED", + "PENDING", + "WAITING_FOR_RESOURCES", + "STARTED", + "SUCCESS", + "FAILED", + "ABORTED" + ], + "type": "string", + "description": "State of execution of a project's computational workflow\n\nSEE StateType for task state" + }, + "Solver": { + "title": "Solver", + "required": [ + "id", + "version", + "title", + "maintainer", + "url" + ], + "type": "object", + "properties": { + "id": { + "title": "Id", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string", + "description": "Solver identifier" + }, + "version": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string", + "description": "semantic version number of the node" + }, + "title": { + "title": "Title", + "type": "string", + "description": "Human readable name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "maintainer": { + "title": "Maintainer", + "type": "string" + }, + "url": { + "title": "Url", + "maxLength": 2083, + "minLength": 1, + "type": "string", + "description": "Link to get this resource", + "format": "uri" + } + }, + "description": "A released solver with a specific version", + "example": { + "id": "simcore/services/comp/isolve", + "version": "2.1.1", + "title": "iSolve", + "description": "EM solver", + "maintainer": "info@itis.swiss", + "url": "https://api.osparc.io/v0/solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/2.1.1" + } + }, + "UserRoleEnum": { + "title": "UserRoleEnum", + "enum": [ + "ANONYMOUS", + "GUEST", + "USER", + "TESTER", + "ADMIN" + ], + "type": "string", + "description": "An enumeration." + }, + "UsersGroup": { + "title": "UsersGroup", + "required": [ + "gid", + "label" + ], + "type": "object", + "properties": { + "gid": { + "title": "Gid", + "type": "string" + }, + "label": { + "title": "Label", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + } + } + }, + "ValidationError": { + "title": "ValidationError", + "required": [ + "loc", + "msg", + "type" + ], + "type": "object", + "properties": { + "loc": { + "title": "Location", + "type": "array", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "msg": { + "title": "Message", + "type": "string" + }, + "type": { + "title": "Error Type", + "type": "string" + } + } + } + }, + "securitySchemes": { + "HTTPBasic": { + "type": "http", + "scheme": "basic" + } + } + } +} diff --git a/services/api-server/tests/unit/captures/test__mocks_captures.py b/services/api-server/tests/unit/captures/test__mocks_captures.py new file mode 100644 index 00000000000..81297e1bbe5 --- /dev/null +++ b/services/api-server/tests/unit/captures/test__mocks_captures.py @@ -0,0 +1,274 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import json +import re +import sys +from pathlib import Path +from typing import Any, TypeAlias + +import httpx +import jsonref +import pytest +import respx +from pydantic import TypeAdapter +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel +from pytest_simcore.helpers.httpx_calls_capture_openapi import _determine_path +from pytest_simcore.helpers.httpx_calls_capture_parameters import ( + CapturedParameter, + PathDescription, +) + +try: + from openapi_core import Spec, create_spec, validate_request, validate_response + from openapi_core.contrib.starlette import ( + StarletteOpenAPIRequest, + StarletteOpenAPIResponse, + ) + + OPENAPI_CORE_INSTALLED = True + +except ImportError: + Spec: TypeAlias = Any + StarletteOpenAPIRequest = pytest.fail + StarletteOpenAPIResponse = pytest.fail + create_spec = pytest.fail + validate_request = pytest.fail + validate_response = pytest.fail + + OPENAPI_CORE_INSTALLED = False + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +_DUMMY_API_SERVER_OPENAPI = CURRENT_DIR / "dummy_api_server_openapi.json" + + +def _check_regex_pattern(pattern: str, match: str, non_match: str): + assert re.match(pattern=pattern, string=match), f"{match=} did not match {pattern=}" + assert not re.match( + pattern=pattern, string=non_match + ), f"{non_match=} matched {pattern=}" + + +@pytest.fixture +def openapi_specs( + catalog_service_openapi_specs: dict[str, Any], + webserver_service_openapi_specs: dict[str, Any], + storage_service_openapi_specs: dict[str, Any], + directorv2_service_openapi_specs: dict[str, Any], +) -> dict[str, Spec]: + return { + "catalog": create_spec(catalog_service_openapi_specs), + "webserver": create_spec(webserver_service_openapi_specs), + "storage": create_spec(storage_service_openapi_specs), + "directorv2": create_spec(directorv2_service_openapi_specs), + } + + +mock_folder_path = CURRENT_DIR.parent.parent / "mocks" +mock_folder_path.exists() + + +@pytest.mark.skipif( + not OPENAPI_CORE_INSTALLED, + reason="openapi-core is very restritive with jsonschema version and limits requirements/_base.txt", +) +@pytest.mark.parametrize( + "mock_capture_path", mock_folder_path.glob("*.json"), ids=lambda p: p.name +) +def test_openapion_capture_mock( + mock_capture_path: Path, + openapi_specs: dict[str, Spec], +): + assert mock_capture_path.exists() + assert mock_capture_path.name.endswith(".json") + + captures = TypeAdapter( + list[HttpApiCallCaptureModel] | HttpApiCallCaptureModel + ).validate_json(mock_capture_path.read_text()) + + if not isinstance(captures, list): + captures = [ + captures, + ] + + for capture in captures: + # SEE https://openapi-core.readthedocs.io/en/latest/ + + request = httpx.Request( + method=capture.method, + url=f"http://{capture.host}/{capture.path}", + params=capture.query, + json=capture.request_payload, + ) + openapi_request = StarletteOpenAPIRequest(request) + + response = httpx.Response( + status_code=capture.status_code, + json=capture.response_body, + ) + openapi_response = StarletteOpenAPIResponse(response) + + validate_request(openapi_specs[capture.host], openapi_request) + validate_response( + openapi_specs[capture.host], + openapi_request, + openapi_response, + ) + + +_CAPTURE_REGEX_TEST_CASES: list[tuple[str, str, str | None, str | None]] = [ + ( + "solver_key", + """{ + "required": true, + "schema": { + "title": "Solver Key", + "pattern": "^simcore/services/comp/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "type": "string" + }, + "name": "solver_key", + "in": "path" + }""", + "simcore/services/comp/itis/sleeper", + "simcore/something", + ), + ( + "solver_version", + r"""{ + "required": true, + "schema": { + "title": "Version", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "type": "string" + }, + "name": "version", + "in": "path" + }""", + "2.0.2", + "2.s.6", + ), + ( + "job_id", + """{ + "required": true, + "schema": { + "title": "Job Id", + "type": "string", + "format": "uuid" + }, + "name": "job_id", + "in": "path" + }""", + "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "3fa85f64-5717-4562-b3fc-2c963f66", + ), + ( + "cluster_id", + """{ + "required": false, + "schema": { + "title": "Cluster Id", + "minimum": 0, + "type": "integer" + }, + "name": "cluster_id", + "in": "query" + }""", + "15", + "2i0", + ), + ( + "test_float", + """{ + "required": false, + "schema": { + "title": "My float", + "minimum": 0.3, + "type": "float" + }, + "name": "my_float", + "in": "path" + }""", + "1.5", + "20z", + ), + ( + "data_set_id", + """{ + "required": true, + "schema": { + "title": "Dataset Id", + "type": "string" + }, + "name": "dataset_id", + "in": "path" + }""", + "my_string123.;-", + None, + ), +] + + +@pytest.mark.parametrize("params", _CAPTURE_REGEX_TEST_CASES, ids=lambda x: x[0]) +def test_param_regex_pattern(params: tuple[str, str, str, str]): + _, openapi_param, match, non_match = params + param: CapturedParameter = CapturedParameter(**json.loads(openapi_param)) + pattern = param.schema_.regex_pattern + pattern = "^" + pattern + "$" + if match is not None: + assert re.match( + pattern=pattern, string=match + ), f"{match=} did not match {pattern=}" + if non_match is not None: + assert not re.match( + pattern=pattern, string=non_match + ), f"{non_match=} matched {pattern=}" + + +_API_SERVER_PATHS: list[tuple[str, Path, str]] = [ + ( + "get_solver", + Path("/v0/solvers/{solver_key}/latest"), + "/v0/solvers/simcore/services/comp/itis/sleeper/latest", + ), + ( + "get_job", + Path("/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}"), + "/v0/solvers/simcore/services/comp/itis/sleeper/releases/2.0.2/jobs/3fa85f64-5717-4562-b3fc-2c963f66afa6", + ), + ( + "start_job", + Path("/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:start"), + "/v0/solvers/simcore/services/comp/itis/sleeper/releases/2.0.2/jobs/3fa85f64-5717-4562-b3fc-2c963f66afa6:start", + ), + ("get_service_metadata", Path("/v0/meta"), "/v0/meta"), +] + + +@pytest.mark.parametrize("params", _API_SERVER_PATHS, ids=lambda x: x[0]) +@respx.mock +def test_capture_respx_api_server(params: tuple[str, Path, str]): + _, openapi_path, example = params + assert _DUMMY_API_SERVER_OPENAPI.is_file() + openapi_spec: dict[str, Any] = jsonref.loads(_DUMMY_API_SERVER_OPENAPI.read_text()) + url_path: PathDescription = _determine_path( + openapi_spec=openapi_spec, response_path=openapi_path + ) + path_pattern = str(openapi_path) + for p in url_path.path_parameters: + path_pattern = path_pattern.replace("{" + p.name + "}", p.respx_lookup) + + def side_effect(request, **kwargs): + return httpx.Response(status_code=200, json=kwargs) + + my_route = respx.get(url__regex="https://example.org" + path_pattern).mock( + side_effect=side_effect + ) + response = httpx.get("https://example.org" + example) + assert my_route.called + assert response.status_code == 200 + assert all(param.name in response.json() for param in url_path.path_parameters) diff --git a/services/api-server/tests/unit/conftest.py b/services/api-server/tests/unit/conftest.py index 4d87e023333..174bf1bd601 100644 --- a/services/api-server/tests/unit/conftest.py +++ b/services/api-server/tests/unit/conftest.py @@ -2,140 +2,199 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable +# pylint: disable=broad-exception-caught -from pprint import pprint -from typing import AsyncIterator, Iterator +import json +import subprocess +from collections.abc import AsyncIterator, Callable, Iterator +from copy import deepcopy +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock import aiohttp.test_utils import httpx import pytest +import respx +import yaml from asgi_lifespan import LifespanManager -from cryptography.fernet import Fernet from faker import Faker -from fastapi import FastAPI -from httpx._transports.asgi import ASGITransport +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from httpx import ASGITransport +from models_library.api_schemas_long_running_tasks.tasks import ( + TaskGet, + TaskProgress, + TaskStatus, +) +from models_library.api_schemas_storage.storage_schemas import HealthCheck +from models_library.api_schemas_webserver.projects import ProjectGet +from models_library.app_diagnostics import AppStatusCheck +from models_library.generics import Envelope +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import BaseFileLink, SimcoreS3FileID +from models_library.users import UserID from moto.server import ThreadedMotoServer -from pydantic import HttpUrl, parse_obj_as -from pytest_simcore.helpers.utils_docker import get_localhost_ip -from pytest_simcore.helpers.utils_envs import EnvVarsDict, setenvs_from_dict +from packaging.version import Version +from pydantic import EmailStr, HttpUrl, TypeAdapter +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.catalog_rpc_server import CatalogRpcSideEffects +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from pytest_simcore.helpers.webserver_rpc_server import WebserverRpcSideEffects +from pytest_simcore.simcore_webserver_projects_rest_api import GET_PROJECT from requests.auth import HTTPBasicAuth +from respx import MockRouter from simcore_service_api_server.core.application import init_app from simcore_service_api_server.core.settings import ApplicationSettings +from simcore_service_api_server.repository.api_keys import UserAndProductTuple +from simcore_service_api_server.services_http.solver_job_outputs import ResultsTypes +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient -## APP + SYNC/ASYNC CLIENTS -------------------------------------------------- + +@pytest.fixture +def product_name() -> ProductName: + return "osparc" @pytest.fixture -def patched_light_app_environ( - patched_default_app_environ: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +def app_environment( + monkeypatch: pytest.MonkeyPatch, + default_app_env_vars: EnvVarsDict, + backend_env_vars_overrides: EnvVarsDict, ) -> EnvVarsDict: - """Config that disables many plugins e.g. database or tracing""" - - env_vars = {} - env_vars.update(patched_default_app_environ) - - pprint(list(ApplicationSettings.schema()["properties"].keys())) - # [ - # 'SC_BOOT_MODE', - # 'LOG_LEVEL', - # 'API_SERVER_POSTGRES', - # 'API_SERVER_WEBSERVER', - # 'API_SERVER_CATALOG', - # 'API_SERVER_STORAGE', - # 'API_SERVER_DIRECTOR_V2', - # 'API_SERVER_TRACING', - # 'API_SERVER_DEV_FEATURES_ENABLED', - # 'API_SERVER_REMOTE_DEBUG_PORT' - # ] - # - env_vars.update( + env_vars = setenvs_from_dict( + monkeypatch, { + **default_app_env_vars, "WEBSERVER_HOST": "webserver", - "WEBSERVER_SESSION_SECRET_KEY": Fernet.generate_key().decode("utf-8"), "API_SERVER_POSTGRES": "null", + "API_SERVER_RABBITMQ": "null", "API_SERVER_TRACING": "null", "LOG_LEVEL": "debug", "SC_BOOT_MODE": "production", - } + "API_SERVER_HEALTH_CHECK_TASK_PERIOD_SECONDS": "3", + "API_SERVER_HEALTH_CHECK_TASK_TIMEOUT_SECONDS": "1", + "API_SERVER_LOG_CHECK_TIMEOUT_SECONDS": "1", + **backend_env_vars_overrides, + }, ) - setenvs_from_dict(monkeypatch, env_vars) + + # should be sufficient to create settings + print(ApplicationSettings.create_from_envs().model_dump_json(indent=1)) + return env_vars @pytest.fixture -def app(patched_light_app_environ: EnvVarsDict) -> FastAPI: +def mock_missing_plugins(app_environment: EnvVarsDict, mocker: MockerFixture): + settings = ApplicationSettings.create_from_envs() + if settings.API_SERVER_RABBITMQ is None: + import simcore_service_api_server.core.application + + mocker.patch.object( + simcore_service_api_server.core.application, + "setup_rabbitmq", + autospec=True, + ) + mocker.patch.object( + simcore_service_api_server.core.application, + "setup_prometheus_instrumentation", + autospec=True, + ) + + return app_environment + + +@pytest.fixture +def app( + mock_missing_plugins: EnvVarsDict, + create_httpx_async_client_spy_if_enabled: Callable, + patch_lrt_response_urls: Callable, + spy_httpx_calls_enabled: bool, +) -> FastAPI: """Inits app on a light environment""" - the_app = init_app() - return the_app + + if spy_httpx_calls_enabled: + create_httpx_async_client_spy_if_enabled( + "simcore_service_api_server.utils.client_base.AsyncClient" + ) + + patch_lrt_response_urls() + + return init_app() + + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 @pytest.fixture -async def client(app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: +async def client( + app: FastAPI, is_pdb_enabled: bool +) -> AsyncIterator[httpx.AsyncClient]: # # Prefer this client instead of fastapi.testclient.TestClient # - async with LifespanManager(app): - # needed for app to trigger start/stop event handlers - async with httpx.AsyncClient( - app=app, + + # LifespanManager will trigger app's startup&shutown event handlers + async with ( + LifespanManager( + app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ), + httpx.AsyncClient( base_url="http://api.testserver.io", headers={"Content-Type": "application/json"}, - ) as client: - - assert isinstance(client._transport, ASGITransport) - # rewires location test's app to client.app - setattr(client, "app", client._transport.app) - - yield client + transport=ASGITransport(app=app), + ) as httpx_async_client, + ): + assert isinstance(httpx_async_client, httpx.AsyncClient) + yield httpx_async_client ## MOCKED Repositories -------------------------------------------------- @pytest.fixture -def auth(mocker, app: FastAPI, faker: Faker) -> HTTPBasicAuth: +def auth( + mocker: MockerFixture, + app: FastAPI, + user_id: UserID, + user_email: EmailStr, + user_api_key: str, + user_api_secret: str, +) -> HTTPBasicAuth: """ Auth mocking repositories and db engine (i.e. does not require db up) """ # mock engine if db was not init if app.state.settings.API_SERVER_POSTGRES is None: - engine = mocker.MagicMock() engine.minsize = 1 engine.size = 10 engine.freesize = 3 engine.maxsize = 10 app.state.engine = engine - - # patch authentication entry in repo - faker_user_id = faker.pyint() + async_engine = mocker.MagicMock() + app.state.asyncpg_engine = async_engine # NOTE: here, instead of using the database, we patch repositories interface mocker.patch( - "simcore_service_api_server.db.repositories.api_keys.ApiKeysRepository.get_user_id", + "simcore_service_api_server.repository.api_keys.ApiKeysRepository.get_user", autospec=True, - return_value=faker_user_id, + return_value=UserAndProductTuple(user_id=user_id, product_name="osparc"), ) mocker.patch( - "simcore_service_api_server.db.repositories.users.UsersRepository.get_user_id", + "simcore_service_api_server.repository.users.UsersRepository.get_active_user_email", autospec=True, - return_value=faker_user_id, - ) - mocker.patch( - "simcore_service_api_server.db.repositories.users.UsersRepository.get_email_from_user_id", - autospec=True, - return_value=faker.email(), - ) - - # patches simcore_postgres_database.utils_products.get_default_product_name - mocker.patch( - "simcore_service_api_server.api.dependencies.application.get_default_product_name", - autospec=True, - return_value="osparc", + return_value=user_email, ) - return HTTPBasicAuth(faker.word(), faker.password()) + return HTTPBasicAuth(user_api_key, user_api_secret) ## MOCKED S3 service -------------------------------------------------- @@ -153,7 +212,9 @@ def mocked_s3_server_url() -> Iterator[HttpUrl]: ) # pylint: disable=protected-access - endpoint_url = parse_obj_as(HttpUrl, f"http://{server._ip_address}:{server._port}") + endpoint_url = TypeAdapter(HttpUrl).validate_python( + f"http://{server._ip_address}:{server._port}" + ) print(f"--> started mock S3 server on {endpoint_url}") server.start() @@ -162,3 +223,505 @@ def mocked_s3_server_url() -> Iterator[HttpUrl]: server.stop() print(f"<-- stopped mock S3 server on {endpoint_url}") + + +## MOCKED res/web APIs from simcore services ------------------------------------------ + + +@pytest.fixture +def mocked_app_dependencies(app: FastAPI, mocker: MockerFixture) -> Iterator[None]: + """ + Mocks some dependency overrides for the FastAPI app. + """ + assert app.state.settings.API_SERVER_RABBITMQ is None + + from simcore_service_api_server.api.dependencies.rabbitmq import ( + get_rabbitmq_rpc_client, + ) + from simcore_service_api_server.api.dependencies.webserver_rpc import ( + get_wb_api_rpc_client, + ) + + def _get_rabbitmq_rpc_client_override(): + return mocker.MagicMock() + + async def _get_wb_api_rpc_client_override(): + return WbApiRpcClient(_client=mocker.MagicMock()) + + app.dependency_overrides[get_rabbitmq_rpc_client] = ( + _get_rabbitmq_rpc_client_override + ) + app.dependency_overrides[get_wb_api_rpc_client] = _get_wb_api_rpc_client_override + + yield + + app.dependency_overrides.pop(get_wb_api_rpc_client, None) + app.dependency_overrides.pop(get_rabbitmq_rpc_client, None) + + +@pytest.fixture +def directorv2_service_openapi_specs( + osparc_simcore_services_dir: Path, +) -> dict[str, Any]: + openapi_path = osparc_simcore_services_dir / "director-v2" / "openapi.json" + return json.loads(openapi_path.read_text()) + + +@pytest.fixture +def webserver_service_openapi_specs( + osparc_simcore_services_dir: Path, +) -> dict[str, Any]: + openapi_path = ( + osparc_simcore_services_dir + / "web/server/src/simcore_service_webserver/api/v0/openapi.yaml" + ) + return yaml.safe_load(openapi_path.read_text()) + + +@pytest.fixture +def storage_service_openapi_specs( + osparc_simcore_services_dir: Path, +) -> dict[str, Any]: + openapi_path = osparc_simcore_services_dir / "storage" / "openapi.json" + return json.loads(openapi_path.read_text()) + + +@pytest.fixture +def catalog_service_openapi_specs(osparc_simcore_services_dir: Path) -> dict[str, Any]: + openapi_path = osparc_simcore_services_dir / "catalog" / "openapi.json" + return json.loads(openapi_path.read_text()) + + +@pytest.fixture +def mocked_directorv2_rest_api_base( + app: FastAPI, + directorv2_service_openapi_specs: dict[str, Any], + services_mocks_enabled: bool, +) -> Iterator[MockRouter]: + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_DIRECTOR_V2 + + openapi = deepcopy(directorv2_service_openapi_specs) + assert Version(openapi["info"]["version"]).major == 2 + + # pylint: disable=not-context-manager + with respx.mock( + base_url=settings.API_SERVER_DIRECTOR_V2.base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + assert openapi + assert ( + openapi["paths"]["/"]["get"]["operationId"] == "check_service_health__get" + ) + + respx_mock.get(path="/", name="check_service_health__get").respond( + status.HTTP_200_OK, + json=openapi["components"]["schemas"]["HealthCheckGet"]["example"], + ) + + # SEE https://github.com/pcrespov/sandbox-python/blob/f650aad57aced304aac9d0ad56c00723d2274ad0/respx-lib/test_disable_mock.py + if not services_mocks_enabled: + respx_mock.stop() + + yield respx_mock + + +@pytest.fixture +def mocked_webserver_rest_api_base( + app: FastAPI, + webserver_service_openapi_specs: dict[str, Any], + services_mocks_enabled: bool, +) -> Iterator[MockRouter]: + """ + Creates a respx.mock to capture calls to webserver API + Includes only basic routes to check that the configuration is correct + IMPORTANT: This fixture shall be extended on a test bases + """ + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_WEBSERVER + + openapi = deepcopy(webserver_service_openapi_specs) + assert Version(openapi["info"]["version"]).major == 0 + + # pylint: disable=not-context-manager + with respx.mock( + base_url=settings.API_SERVER_WEBSERVER.base_url, + assert_all_called=False, + ) as respx_mock: + # healthcheck_readiness_probe, healthcheck_liveness_probe + response_body = { + "name": "webserver", + "version": "1.0.0", + "api_version": "1.0.0", + } + respx_mock.get(path="/v0/", name="healthcheck_readiness_probe").respond( + status.HTTP_200_OK, json=response_body + ) + respx_mock.get(path="/v0/health", name="healthcheck_liveness_probe").respond( + status.HTTP_200_OK, json=response_body + ) + + # SEE https://github.com/pcrespov/sandbox-python/blob/f650aad57aced304aac9d0ad56c00723d2274ad0/respx-lib/test_disable_mock.py + if not services_mocks_enabled: + respx_mock.stop() + + yield respx_mock + + +@pytest.fixture +def mocked_storage_rest_api_base( + app: FastAPI, + storage_service_openapi_specs: dict[str, Any], + faker: Faker, + services_mocks_enabled: bool, +) -> Iterator[MockRouter]: + """ + Creates a respx.mock to capture calls to strage API + Includes only basic routes to check that the configuration is correct + IMPORTANT: This fixture shall be extended on a test bases + """ + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_STORAGE + + openapi = deepcopy(storage_service_openapi_specs) + assert Version(openapi["info"]["version"]).major == 0 + + # pylint: disable=not-context-manager + with respx.mock( + base_url=settings.API_SERVER_STORAGE.base_url, + assert_all_called=False, + ) as respx_mock: + assert openapi["paths"]["/v0/"]["get"]["operationId"] == "get_health_v0__get" + + respx_mock.get(path="/v0/", name="get_health_v0__get").respond( + status.HTTP_200_OK, + json=Envelope[HealthCheck]( + data={ + "name": "storage", + "status": "ok", + "api_version": "1.0.0", + "version": "1.0.0", + }, + ).model_dump(), + ) + + assert ( + openapi["paths"]["/v0/status"]["get"]["operationId"] + == "get_status_v0_status_get" + ) + respx_mock.get(path="/v0/status", name="get_status_v0_status_get").respond( + status.HTTP_200_OK, + json=Envelope[AppStatusCheck]( + data={ + "app_name": "storage", + "version": "1.0.0", + "url": faker.url(), + "diagnostics_url": faker.url(), + } + ).model_dump(mode="json"), + ) + + # SEE https://github.com/pcrespov/sandbox-python/blob/f650aad57aced304aac9d0ad56c00723d2274ad0/respx-lib/test_disable_mock.py + if not services_mocks_enabled: + respx_mock.stop() + + yield respx_mock + + +@pytest.fixture +def mocked_catalog_rest_api_base( + app: FastAPI, + catalog_service_openapi_specs: dict[str, Any], + services_mocks_enabled: bool, +) -> Iterator[MockRouter]: + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_CATALOG + + openapi = deepcopy(catalog_service_openapi_specs) + assert Version(openapi["info"]["version"]).major == 0 + schemas = openapi["components"]["schemas"] + + # pylint: disable=not-context-manager + with respx.mock( + base_url=settings.API_SERVER_CATALOG.base_url, + assert_all_called=False, + ) as respx_mock: + respx_mock.get("/v0/").respond( + status.HTTP_200_OK, + text="simcore_service_catalog.api.routes.health@2023-07-03T12:59:12.024551+00:00", + ) + respx_mock.get("/v0/meta").respond( + status.HTTP_200_OK, json=schemas["BaseMeta"]["example"] + ) + + # SEE https://github.com/pcrespov/sandbox-python/blob/f650aad57aced304aac9d0ad56c00723d2274ad0/respx-lib/test_disable_mock.py + if not services_mocks_enabled: + respx_mock.stop() + + yield respx_mock + + +@pytest.fixture +def mocked_webserver_rpc_api( + mocked_app_dependencies: None, mocker: MockerFixture +) -> dict[str, MockType]: + """ + Mocks the webserver's simcore service RPC API for testing purposes. + """ + from servicelib.rabbitmq.rpc_interfaces.webserver import ( + projects as projects_rpc, # keep import here + ) + + side_effects = WebserverRpcSideEffects() + + return { + "mark_project_as_job": mocker.patch.object( + projects_rpc, + "mark_project_as_job", + autospec=True, + side_effect=side_effects.mark_project_as_job, + ), + "list_projects_marked_as_jobs": mocker.patch.object( + projects_rpc, + "list_projects_marked_as_jobs", + autospec=True, + side_effect=side_effects.list_projects_marked_as_jobs, + ), + } + + +@pytest.fixture +def catalog_rpc_side_effects(request) -> Any: + if "param" in dir(request) and request.param is not None: + return request.param + return CatalogRpcSideEffects() + + +@pytest.fixture +def mocked_catalog_rpc_api( + mocked_app_dependencies: None, mocker: MockerFixture, catalog_rpc_side_effects: Any +) -> dict[str, MockType]: + """ + Mocks the catalog's simcore service RPC API for testing purposes. + """ + from servicelib.rabbitmq.rpc_interfaces.catalog import ( + services as catalog_rpc, # keep import here + ) + + mocks = {} + + # Get all callable methods from the side effects class that are not built-ins + side_effect_methods = [ + method_name + for method_name in dir(catalog_rpc_side_effects) + if not method_name.startswith("_") + and callable(getattr(catalog_rpc_side_effects, method_name)) + ] + + # Create mocks for each method in catalog_rpc that has a corresponding side effect + for method_name in side_effect_methods: + if hasattr(catalog_rpc, method_name): + mocks[method_name] = mocker.patch.object( + catalog_rpc, + method_name, + autospec=True, + side_effect=getattr(catalog_rpc_side_effects, method_name), + ) + + return mocks + + +# +# Other Mocks +# + + +@pytest.fixture +def mocked_solver_job_outputs(mocker) -> None: + result: dict[str, ResultsTypes] = {} + result["output_1"] = 0.6 + result["output_2"] = BaseFileLink( + store=0, + path=SimcoreS3FileID( + "api/7cf771db-3ee9-319e-849f-53db0076fc93/single_number.txt" + ), + label=None, + eTag=None, + ) + mocker.patch( + "simcore_service_api_server.api.routes.solvers_jobs_read.get_solver_output_results", + autospec=True, + return_value=result, + ) + + +@pytest.fixture +def patch_lrt_response_urls(mocker: MockerFixture): + """ + Callable that patches webserver._get_lrt_urls helper + when running in spy mode + """ + + def _() -> MagicMock: + def _get_lrt_urls(lrt_response: httpx.Response): + # NOTE: this function is needed to mock + data = Envelope[TaskGet].model_validate_json(lrt_response.text).data + assert data is not None # nosec + + def _patch(href): + return lrt_response.request.url.copy_with( + raw_path=httpx.URL(href).raw_path + ) + + data.status_href = _patch(data.status_href) + data.result_href = _patch(data.result_href) + + return data.status_href, data.result_href + + return mocker.patch( + "simcore_service_api_server.services_http.webserver._get_lrt_urls", + side_effect=_get_lrt_urls, + ) + + return _ + + +@pytest.fixture +def patch_webserver_long_running_project_tasks( + app: FastAPI, faker: Faker, services_mocks_enabled: bool +) -> Callable[[MockRouter], MockRouter]: + settings: ApplicationSettings = app.state.settings + assert settings.API_SERVER_WEBSERVER is not None + + class _LongRunningProjectTasks: + """ + Preserves results per task_id + """ + + def __init__(self): + self._results: dict[str, Any] = {} + + def _set_result_and_get_reponse(self, result: Any): + task_id = faker.uuid4() + self._results[task_id] = jsonable_encoder(result, by_alias=True) + + return httpx.Response( + status.HTTP_202_ACCEPTED, + json={ + "data": TaskGet( + task_id=task_id, + task_name="fake-task-name", + status_href=f"{settings.API_SERVER_WEBSERVER.api_base_url}/tasks/{task_id}", + result_href=f"{settings.API_SERVER_WEBSERVER.api_base_url}/tasks/{task_id}/result", + abort_href=f"{settings.API_SERVER_WEBSERVER.api_base_url}/tasks/{task_id}", + ).model_dump() + }, + ) + + # SIDE EFFECT functions --- + + def create_project_task(self, request: httpx.Request): + # create result: use the request-body + query = dict( + elm.split("=") for elm in request.url.query.decode().split("&") + ) + if from_study := query.get("from_study"): + return self.clone_project_task(request=request, project_id=from_study) + project_create = json.loads(request.content) + project_get = ProjectGet.model_validate( + { + "creationDate": "2018-07-01T11:13:43Z", + "lastChangeDate": "2018-07-01T11:13:43Z", + "prjOwner": "owner@email.com", + "type": "STANDARD", + "templateType": None, + "dev": None, + "trashed_at": None, + "trashed_by": None, + **project_create, + } + ) + + return self._set_result_and_get_reponse(project_get) + + def clone_project_task(self, request: httpx.Request, *, project_id: str): + assert GET_PROJECT.response_body + + project_get = ProjectGet.model_validate( + { + "creationDate": "2018-07-01T11:13:43Z", + "lastChangeDate": "2018-07-01T11:13:43Z", + "prjOwner": "owner@email.com", + **GET_PROJECT.response_body["data"], + } + ) + project_get.uuid = ProjectID(project_id) + + return self._set_result_and_get_reponse(project_get) + + def get_result(self, request: httpx.Request, *, task_id: str): + assert request + return httpx.Response( + status.HTTP_200_OK, json={"data": self._results[task_id]} + ) + + # NOTE: Due to lack of time, i will leave it here but I believe + # it is possible to have a generic long-running task workflow + # that preserves the resultswith state + + def _mock(webserver_mock_router: MockRouter) -> MockRouter: + if services_mocks_enabled: + long_running_task_workflow = _LongRunningProjectTasks() + + webserver_mock_router.post( + path__regex="/projects", + name="create_projects", + ).mock(side_effect=long_running_task_workflow.create_project_task) + + webserver_mock_router.post( + path__regex=r"/projects/(?P[\w-]+):clone$", + name="project_clone", + ).mock(side_effect=long_running_task_workflow.clone_project_task) + + # Tasks routes ---------------- + + webserver_mock_router.get( + path__regex=r"/tasks/(?P[\w-]+)$", + name="get_task_status", + ).respond( + status.HTTP_200_OK, + json={ + "data": jsonable_encoder( + TaskStatus( + task_progress=TaskProgress( + message="fake job done", percent=1 + ), + done=True, + started="2018-07-01T11:13:43Z", + ), + by_alias=True, + ) + }, + ) + + webserver_mock_router.get( + path__regex=r"/tasks/(?P[\w-]+)/result$", + name="get_task_result", + ).mock(side_effect=long_running_task_workflow.get_result) + + return webserver_mock_router + + return _mock + + +@pytest.fixture +def openapi_dev_specs(project_slug_dir: Path) -> dict[str, Any]: + openapi_file = (project_slug_dir / "openapi-dev.json").resolve() + if openapi_file.is_file(): + openapi_file.unlink() + subprocess.run( + "make openapi-dev.json", cwd=project_slug_dir, shell=True, check=True + ) + assert openapi_file.is_file() + return json.loads(openapi_file.read_text()) diff --git a/services/api-server/tests/unit/pact_broker/README.md b/services/api-server/tests/unit/pact_broker/README.md new file mode 100644 index 00000000000..19620b24009 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/README.md @@ -0,0 +1,19 @@ +# Contract testing (PACT) + +Maintainer @matusdrobuliak66 + +```bash +PACT_BROKER_URL= PACT_BROKER_USERNAME= PACT_BROKER_PASSWORD= make test-pacts +``` + +## Install and Publish new contract to Broker +Contracts are generated by Consumer (ex. Sim4Life) +TODO: add reference to Sim4life repo where they can be generated +#### Install +```bash +npm install @pact-foundation/pact-cli +``` +#### Publish +```bash +pact-broker publish ./pacts/05_licensed_items.json --tag licensed_items --consumer-app-version 8.2.1 --broker-base-url= --broker-username= --broker-password= +``` diff --git a/services/api-server/tests/unit/pact_broker/conftest.py b/services/api-server/tests/unit/pact_broker/conftest.py new file mode 100644 index 00000000000..e63b68a2012 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/conftest.py @@ -0,0 +1,110 @@ +import os +from threading import Thread +from time import sleep + +import pytest +import uvicorn +from fastapi import FastAPI +from servicelib.utils import unused_port +from simcore_service_api_server.api.dependencies.authentication import ( + Identity, + get_current_identity, +) + + +def pytest_addoption(parser: pytest.Parser) -> None: + group = parser.getgroup( + "Pact broker contract test", + description="Pact broker contract test specific parameters", + ) + group.addoption( + "--pact-broker-url", + action="store", + default=None, + help="URL pointing to the deployment to be tested", + ) + group.addoption( + "--pact-broker-username", + action="store", + default=None, + help="User name for logging into the deployment", + ) + group.addoption( + "--pact-broker-password", + action="store", + default=None, + help="User name for logging into the deployment", + ) + + +@pytest.fixture() +def pact_broker_credentials( + request: pytest.FixtureRequest, +): + # Get credentials from either CLI arguments or environment variables + broker_url = request.config.getoption("--broker-url", None) or os.getenv( + "PACT_BROKER_URL" + ) + broker_username = request.config.getoption("--broker-username", None) or os.getenv( + "PACT_BROKER_USERNAME" + ) + broker_password = request.config.getoption("--broker-password", None) or os.getenv( + "PACT_BROKER_PASSWORD" + ) + + # Identify missing credentials + missing = [ + name + for name, value in { + "PACT_BROKER_URL": broker_url, + "PACT_BROKER_USERNAME": broker_username, + "PACT_BROKER_PASSWORD": broker_password, + }.items() + if not value + ] + + if missing: + pytest.fail( + f"Missing Pact Broker credentials: {', '.join(missing)}. Set them as environment variables or pass them as CLI arguments." + ) + + return broker_url, broker_username, broker_password + + +def mock_get_current_identity() -> Identity: + return Identity(user_id=1, product_name="osparc", email="test@itis.swiss") + + +@pytest.fixture() +def running_test_server_url( + app: FastAPI, +): + """ + Spins up a FastAPI server in a background thread and yields a base URL. + The 'mocked_catalog_service' fixture ensures the function is already + patched by the time we start the server. + """ + # Override + app.dependency_overrides[get_current_identity] = mock_get_current_identity + + port = unused_port() + base_url = f"http://localhost:{port}" + + config = uvicorn.Config( + app, + host="localhost", + port=port, + log_level="info", + ) + server = uvicorn.Server(config) + + thread = Thread(target=server.run, daemon=True) + thread.start() + + # Wait a bit for the server to be ready + sleep(1) + + yield base_url # , before_server_start + + server.should_exit = True + thread.join() diff --git a/services/api-server/tests/unit/pact_broker/pacts/01_checkout_release.json b/services/api-server/tests/unit/pact_broker/pacts/01_checkout_release.json new file mode 100644 index 00000000000..9fd2d5b7d12 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/pacts/01_checkout_release.json @@ -0,0 +1,81 @@ +{ + "consumer": { + "name": "Sim4Life" + }, + "provider": { + "name": "OsparcApiServerCheckoutRelease" + }, + "interactions": [ + { + "description": "Checkout one license", + "request": { + "method": "POST", + "path": "/v0/wallets/35/licensed-items/99580844-77fa-41bb-ad70-02dfaf1e3965/checkout", + "headers": { + "Accept": "application/json", + "Content-Type": "application/json" + }, + "body": { + "number_of_seats": 1, + "service_run_id": "1740149365_21a9352a-1d46-41f9-9a9b-42ac888f5afb" + } + }, + "response": { + "status": 200, + "headers": { + "Content-Length": "294", + "Content-Type": "application/json", + "Server": "uvicorn" + }, + "body": { + "key": "MODEL_IX_HEAD", + "licensed_item_checkout_id": "25262183-392c-4268-9311-3c4256c46012", + "licensed_item_id": "99580844-77fa-41bb-ad70-02dfaf1e3965", + "num_of_seats": 1, + "product_name": "s4l", + "started_at": "2025-02-21T15:04:47.673828Z", + "stopped_at": null, + "user_id": 425, + "version": "1.0.0", + "wallet_id": 35 + } + } + }, + { + "description": "Release item", + "request": { + "method": "POST", + "path": "/v0/licensed-items/99580844-77fa-41bb-ad70-02dfaf1e3965/checked-out-items/25262183-392c-4268-9311-3c4256c46012/release", + "headers": { + "Accept": "application/json", + "Content-Type": "application/json" + } + }, + "response": { + "status": 200, + "headers": { + "Content-Length": "319", + "Content-Type": "application/json", + "Server": "uvicorn" + }, + "body": { + "key": "MODEL_IX_HEAD", + "licensed_item_checkout_id": "25262183-392c-4268-9311-3c4256c46012", + "licensed_item_id": "99580844-77fa-41bb-ad70-02dfaf1e3965", + "num_of_seats": 1, + "product_name": "s4l", + "started_at": "2025-02-21T15:04:47.673828Z", + "stopped_at": "2025-02-21T15:04:47.901169Z", + "user_id": 425, + "version": "1.0.0", + "wallet_id": 35 + } + } + } + ], + "metadata": { + "pactSpecification": { + "version": "3.0.0" + } + } +} diff --git a/services/api-server/tests/unit/pact_broker/pacts/05_licensed_items.json b/services/api-server/tests/unit/pact_broker/pacts/05_licensed_items.json new file mode 100644 index 00000000000..4d67be3ff33 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/pacts/05_licensed_items.json @@ -0,0 +1,156 @@ +{ + "consumer": { + "name": "Sim4Life" + }, + "provider": { + "name": "OsparcApiServerLicensedItems" + }, + "interactions": [ + { + "description": "List all available licensed items", + "request": { + "method": "GET", + "path": "/v0/licensed-items", + "headers": { + "Accept": "application/json", + "Content-Type": "application/json" + } + }, + "response": { + "status": 200, + "headers": { + "Content-Type": "application/json", + "Server": "uvicorn" + }, + "body": { + "items": [ + { + "created_at": "2025-02-19T13:46:30.258102Z", + "display_name": "3 Week Male Mouse V1.0", + "is_hidden_on_market": false, + "key": "MODEL_MOUSE_3W_M_POSABLE", + "licensed_item_id": "f26587de-abad-49cb-9b4f-e6e1fad7f5c1", + "licensed_resource_type": "VIP_MODEL", + "licensed_resources": [ + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": null, + "description": "Animal Models - 3 Week Male Mouse (B6C3F1) V1.0", + "doi": "10.13099/VIP91206-01-0", + "features": { + "age": "3 weeks", + "date": "2021-03-16", + "functionality": "Posable", + "height": "70 mm", + "name": "B6C3F1N Male 3W", + "sex": "male", + "species": "Mouse", + "version": "1.0", + "weight": "12.3 g" + }, + "id": 138, + "license_key": "MODEL_MOUSE_3W_M_POSABLE", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/3WeekMouse.png" + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md" + } + ], + "modified_at": "2025-02-19T13:46:30.258102Z", + "pricing_plan_id": 21, + "version": "1.0.0" + }, + { + "created_at": "2025-02-19T13:46:30.302673Z", + "display_name": "Big Male Rat V1.0", + "is_hidden_on_market": false, + "key": "MODEL_RAT567_M", + "licensed_item_id": "0713928d-9e36-444e-b720-26e97ad7d861", + "licensed_resource_type": "VIP_MODEL", + "licensed_resources": [ + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": null, + "description": "Animal Models - Big Male Rat V1-x", + "doi": "10.13099/VIP91101-01-0", + "features": { + "date": "2012-01-01", + "functionality": "Static", + "height": "260 mm", + "name": "Big Male Rat", + "sex": "male", + "species": "Rat", + "version": "1.0", + "weight": "567 g" + }, + "id": 21, + "license_key": "MODEL_RAT567_M", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/BigMaleRat567g.png" + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md" + }, + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": null, + "description": "Animal Models - Posable Big Male Rat V1-x", + "doi": "10.13099/VIP91101-01-1", + "features": { + "date": "2018-01-22", + "functionality": "Posable", + "height": "260 mm", + "name": "Big Male Rat", + "sex": "male", + "species": "Rat", + "version": "1.0", + "weight": "567 g" + }, + "id": 111, + "license_key": "MODEL_RAT567_M", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/BigMaleRat567g.png" + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md" + } + ], + "modified_at": "2025-02-19T13:46:30.302673Z", + "pricing_plan_id": 21, + "version": "1.0.0" + } + ], + "limit": 20, + "links": { + "first": "/v0/licensed-items?offset=0", + "last": "/v0/licensed-items?offset=0", + "next": null, + "prev": null, + "self": "/v0/licensed-items" + }, + "offset": 0, + "total": 2 + }, + "matchingRules": { + "headers": { + "$.Date": { + "match": "type" + } + } + } + } + } + ], + "metadata": { + "pactSpecification": { + "version": "3.0.0" + } + } +} diff --git a/services/api-server/tests/unit/pact_broker/test_pact_checkout_release.py b/services/api-server/tests/unit/pact_broker/test_pact_checkout_release.py new file mode 100644 index 00000000000..62ff031ad78 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/test_pact_checkout_release.py @@ -0,0 +1,132 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import os + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_webserver.licensed_items_checkouts import ( + LicensedItemCheckoutRpcGet, +) +from pact.v3 import Verifier +from pytest_mock import MockerFixture +from simcore_service_api_server._meta import API_VERSION +from simcore_service_api_server.api.dependencies.resource_usage_tracker_rpc import ( + get_resource_usage_tracker_client, +) +from simcore_service_api_server.api.dependencies.webserver_rpc import ( + get_wb_api_rpc_client, +) +from simcore_service_api_server.services_rpc.resource_usage_tracker import ( + ResourceUsageTrackerClient, +) +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient + +# Fake response based on values from 01_checkout_release.json +EXPECTED_CHECKOUT = LicensedItemCheckoutRpcGet.model_validate( + { + "key": "MODEL_IX_HEAD", + "licensed_item_checkout_id": "25262183-392c-4268-9311-3c4256c46012", + "licensed_item_id": "99580844-77fa-41bb-ad70-02dfaf1e3965", + "num_of_seats": 1, + "product_name": "s4l", + "started_at": "2025-02-21T15:04:47.673828Z", + "stopped_at": None, + "user_id": 425, + "version": "1.0.0", + "wallet_id": 35, + } +) +assert EXPECTED_CHECKOUT.stopped_at is None + + +EXPECTED_RELEASE = LicensedItemCheckoutRpcGet.model_validate( + { + "key": "MODEL_IX_HEAD", + "licensed_item_checkout_id": "25262183-392c-4268-9311-3c4256c46012", + "licensed_item_id": "99580844-77fa-41bb-ad70-02dfaf1e3965", + "num_of_seats": 1, + "product_name": "s4l", + "started_at": "2025-02-21T15:04:47.673828Z", + "stopped_at": "2025-02-21T15:04:47.901169Z", + "user_id": 425, + "version": "1.0.0", + "wallet_id": 35, + } +) +assert EXPECTED_RELEASE.stopped_at is not None + + +class DummyRpcClient: + pass + + +@pytest.fixture +async def mock_wb_api_server_rpc(app: FastAPI, mocker: MockerFixture) -> None: + + app.dependency_overrides[get_wb_api_rpc_client] = lambda: WbApiRpcClient( + _client=DummyRpcClient() + ) + + mocker.patch( + "simcore_service_api_server.services_rpc.wb_api_server._checkout_licensed_item_for_wallet", + return_value=EXPECTED_CHECKOUT, + ) + + mocker.patch( + "simcore_service_api_server.services_rpc.wb_api_server._release_licensed_item_for_wallet", + return_value=EXPECTED_RELEASE, + ) + + +@pytest.fixture +async def mock_rut_server_rpc(app: FastAPI, mocker: MockerFixture) -> None: + + app.dependency_overrides[get_resource_usage_tracker_client] = ( + lambda: ResourceUsageTrackerClient(_client=DummyRpcClient()) + ) + + mocker.patch( + "simcore_service_api_server.services_rpc.resource_usage_tracker._get_licensed_item_checkout", + return_value=EXPECTED_CHECKOUT, + ) + + +@pytest.mark.skipif( + not os.getenv("PACT_BROKER_URL"), + reason="This test runs only if PACT_BROKER_URL is provided", +) +def test_provider_against_pact( + pact_broker_credentials: tuple[str, str, str], + mock_wb_api_server_rpc: None, + mock_rut_server_rpc: None, + running_test_server_url: str, +) -> None: + """ + Use the Pact Verifier to check the real provider + against the generated contract. + """ + broker_url, broker_username, broker_password = pact_broker_credentials + + broker_builder = ( + Verifier("OsparcApiServerCheckoutRelease") + .add_transport(url=running_test_server_url) + .broker_source( + broker_url, + username=broker_username, + password=broker_password, + selector=True, + ) + ) + + # NOTE: If you want to filter/test against specific contract use tags + verifier = broker_builder.consumer_tags( + "checkout_release" # <-- Here you define which pact to verify + ).build() + + # Set API version and run verification + verifier.set_publish_options(version=API_VERSION, tags=None, branch=None) + verifier.verify() diff --git a/services/api-server/tests/unit/pact_broker/test_pact_licensed_items.py b/services/api-server/tests/unit/pact_broker/test_pact_licensed_items.py new file mode 100644 index 00000000000..f04e4bd4737 --- /dev/null +++ b/services/api-server/tests/unit/pact_broker/test_pact_licensed_items.py @@ -0,0 +1,185 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import os + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_webserver.licensed_items import ( + LicensedItemRpcGet, + LicensedItemRpcGetPage, +) +from pact.v3 import Verifier +from pytest_mock import MockerFixture +from simcore_service_api_server._meta import API_VERSION +from simcore_service_api_server.api.dependencies.webserver_rpc import ( + get_wb_api_rpc_client, +) +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient + +# Fake response based on values from 05_licensed_items.json +EXPECTED_LICENSED_ITEMS = [ + { + "created_at": "2025-02-19T13:46:30.258102Z", + "display_name": "3 Week Male Mouse V1.0", + "is_hidden_on_market": False, + "key": "MODEL_MOUSE_3W_M_POSABLE", + "licensed_item_id": "f26587de-abad-49cb-9b4f-e6e1fad7f5c1", + "licensed_resource_type": "VIP_MODEL", + "licensed_resources": [ + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": None, + "description": "Animal Models - 3 Week Male Mouse (B6C3F1) V1.0", + "doi": "10.13099/VIP91206-01-0", + "features": { + "age": "3 weeks", + "date": "2021-03-16", + "functionality": "Posable", + "height": "70 mm", + "name": "B6C3F1N Male 3W", + "sex": "male", + "species": "Mouse", + "version": "1.0", + "weight": "12.3 g", + }, + "id": 138, + "license_key": "MODEL_MOUSE_3W_M_POSABLE", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/3WeekMouse.png", + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md", + } + ], + "modified_at": "2025-02-19T13:46:30.258102Z", + "pricing_plan_id": 21, + "version": "1.0.0", + }, + { + "created_at": "2025-02-19T13:46:30.302673Z", + "display_name": "Big Male Rat V1.0", + "is_hidden_on_market": False, + "key": "MODEL_RAT567_M", + "licensed_item_id": "0713928d-9e36-444e-b720-26e97ad7d861", + "licensed_resource_type": "VIP_MODEL", + "licensed_resources": [ + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": None, + "description": "Animal Models - Big Male Rat V1-x", + "doi": "10.13099/VIP91101-01-0", + "features": { + "date": "2012-01-01", + "functionality": "Static", + "height": "260 mm", + "name": "Big Male Rat", + "sex": "male", + "species": "Rat", + "version": "1.0", + "weight": "567 g", + }, + "id": 21, + "license_key": "MODEL_RAT567_M", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/BigMaleRat567g.png", + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md", + }, + { + "category_display": "Animal", + "category_id": "AnimalWholeBody", + "source": { + "available_from_url": None, + "description": "Animal Models - Posable Big Male Rat V1-x", + "doi": "10.13099/VIP91101-01-1", + "features": { + "date": "2018-01-22", + "functionality": "Posable", + "height": "260 mm", + "name": "Big Male Rat", + "sex": "male", + "species": "Rat", + "version": "1.0", + "weight": "567 g", + }, + "id": 111, + "license_key": "MODEL_RAT567_M", + "license_version": "V1.0", + "protection": "Code", + "thumbnail": "https://itis.swiss/assets/images/Virtual-Population/Animals-Cropped/BigMaleRat567g.png", + }, + "terms_of_use_url": "https://raw.githubusercontent.com/ITISFoundation/licenses/refs/heads/main/models/User%20License%20Animal%20Models%20v1.x.md", + }, + ], + "modified_at": "2025-02-19T13:46:30.302673Z", + "pricing_plan_id": 21, + "version": "1.0.0", + }, +] + + +EXPECTED_LICENSED_ITEMS_PAGE = LicensedItemRpcGetPage( + items=[LicensedItemRpcGet.model_validate(item) for item in EXPECTED_LICENSED_ITEMS], + total=len(EXPECTED_LICENSED_ITEMS), +) + + +class DummyRpcClient: + pass + + +@pytest.fixture +async def mock_wb_api_server_rpc(app: FastAPI, mocker: MockerFixture) -> None: + + app.dependency_overrides[get_wb_api_rpc_client] = lambda: WbApiRpcClient( + _client=DummyRpcClient() + ) + + mocker.patch( + "simcore_service_api_server.services_rpc.wb_api_server._get_licensed_items", + return_value=EXPECTED_LICENSED_ITEMS_PAGE, + ) + + +@pytest.mark.skipif( + not os.getenv("PACT_BROKER_URL"), + reason="This test runs only if PACT_BROKER_URL is provided", +) +def test_provider_against_pact( + pact_broker_credentials: tuple[str, str, str], + mock_wb_api_server_rpc: None, + running_test_server_url: str, +) -> None: + """ + Use the Pact Verifier to check the real provider + against the generated contract. + """ + broker_url, broker_username, broker_password = pact_broker_credentials + + broker_builder = ( + Verifier("OsparcApiServerLicensedItems") + .add_transport(url=running_test_server_url) + .broker_source( + broker_url, + username=broker_username, + password=broker_password, + selector=True, + ) + ) + + # NOTE: If you want to filter/test against specific contract use tags + verifier = broker_builder.consumer_tags( + "licensed_items" # <-- Here you define which pact to verify + ).build() + + # Set API version and run verification + verifier.set_publish_options(version=API_VERSION, tags=None, branch=None) + verifier.verify() diff --git a/services/api-server/tests/unit/service/conftest.py b/services/api-server/tests/unit/service/conftest.py new file mode 100644 index 00000000000..542f234d69e --- /dev/null +++ b/services/api-server/tests/unit/service/conftest.py @@ -0,0 +1,150 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from typing import Any + +import pytest +from models_library.api_schemas_catalog import CATALOG_RPC_NAMESPACE +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.api_schemas_webserver.projects import ProjectCreateNew, ProjectGet +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName, RPCNamespace +from models_library.users import UserID +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.catalog_rpc_server import CatalogRpcSideEffects +from pytest_simcore.helpers.webserver_rpc_server import WebserverRpcSideEffects +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from simcore_service_api_server._service_jobs import JobService +from simcore_service_api_server._service_programs import ProgramService +from simcore_service_api_server._service_solvers import SolverService +from simcore_service_api_server._service_studies import StudyService +from simcore_service_api_server.services_http.webserver import AuthSession +from simcore_service_api_server.services_rpc.catalog import CatalogService +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient + + +async def catalog_rpc_side_effect(): + return CatalogRpcSideEffects() + + +@pytest.fixture +def mocked_rpc_client(mocker: MockerFixture) -> MockType: + """This fixture mocks the RabbitMQRPCClient.request method which is used + in all RPC clients in the api-server, regardeless of the namespace. + """ + + async def _request( + namespace: RPCNamespace, + method_name: RPCMethodName, + **kwargs, + ) -> Any: + + kwargs.pop("timeout_s", None) # remove timeout from kwargs + + # NOTE: we could switch to different namespaces + if namespace == WEBSERVER_RPC_NAMESPACE: + webserver_side_effect = WebserverRpcSideEffects() + return await getattr(webserver_side_effect, method_name)( + mocker.MagicMock(), **kwargs + ) + + if namespace == CATALOG_RPC_NAMESPACE: + catalog_side_effect = CatalogRpcSideEffects() + return await getattr(catalog_side_effect, method_name)( + mocker.MagicMock(), **kwargs + ) + + pytest.fail(f"Unexpected namespace {namespace} and method {method_name}") + + mock = mocker.MagicMock(spec=RabbitMQRPCClient) + mock.request.side_effect = _request + + return mock + + +@pytest.fixture +def wb_api_rpc_client( + mocked_rpc_client: MockType, +) -> WbApiRpcClient: + return WbApiRpcClient(_client=mocked_rpc_client) + + +@pytest.fixture +def auth_session( + mocker: MockerFixture, + # mocked_webserver_rest_api_base: MockRouter, app: FastAPI +) -> AuthSession: + # return AuthSession.create(app, session_cookies={}, product_extra_headers={}) + mock = mocker.AsyncMock(spec=AuthSession) + + async def _create_project(project: ProjectCreateNew, **kwargs): + example = ProjectGet.model_json_schema()["examples"][0] + example.update(project.model_dump(exclude_unset=True)) + return ProjectGet.model_validate(example) + + mock.create_project.side_effect = _create_project + return mock + + +@pytest.fixture +def job_service( + auth_session: AuthSession, + wb_api_rpc_client: WbApiRpcClient, + product_name: ProductName, + user_id: UserID, +) -> JobService: + return JobService( + _web_rest_client=auth_session, + _web_rpc_client=wb_api_rpc_client, + user_id=user_id, + product_name=product_name, + ) + + +@pytest.fixture +def catalog_service( + mocked_rpc_client: MockType, + product_name: ProductName, + user_id: UserID, +) -> CatalogService: + return CatalogService( + _rpc_client=mocked_rpc_client, user_id=user_id, product_name=product_name + ) + + +@pytest.fixture +def solver_service( + catalog_service: CatalogService, + job_service: JobService, + product_name: ProductName, + user_id: UserID, +) -> SolverService: + return SolverService( + catalog_service=catalog_service, + job_service=job_service, + user_id=user_id, + product_name=product_name, + ) + + +@pytest.fixture +def study_service( + job_service: JobService, + product_name: ProductName, + user_id: UserID, +) -> StudyService: + + return StudyService( + job_service=job_service, + user_id=user_id, + product_name=product_name, + ) + + +@pytest.fixture +def program_service( + catalog_service: CatalogService, +) -> ProgramService: + return ProgramService(catalog_service=catalog_service) diff --git a/services/api-server/tests/unit/service/test_service_catalog.py b/services/api-server/tests/unit/service/test_service_catalog.py new file mode 100644 index 00000000000..71814f753c9 --- /dev/null +++ b/services/api-server/tests/unit/service/test_service_catalog.py @@ -0,0 +1,91 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from models_library.api_schemas_catalog.services import LatestServiceGet, ServiceGetV2 +from models_library.products import ProductName +from models_library.services_history import ServiceRelease +from models_library.users import UserID +from pydantic import HttpUrl +from pytest_mock import MockType +from simcore_service_api_server.models.schemas.solvers import Solver +from simcore_service_api_server.services_rpc.catalog import CatalogService + + +def _to_solver_schema( + service: LatestServiceGet | ServiceGetV2, href_self: HttpUrl | None = None +) -> Solver: + # NOTE: this is an adapter around models on CatalogService interface + return Solver( + id=service.key, + version=service.version, + title=service.name, + maintainer=service.owner or service.contact or "UNKNOWN", + url=href_self, + description=service.description, + ) + + +async def test_catalog_service_read_solvers( + product_name: ProductName, + user_id: UserID, + mocked_rpc_client: MockType, + catalog_service: CatalogService, +): + # Step 1: List latest releases in a page + latest_releases, meta = await catalog_service.list_latest_releases() + solver_releases_page = [_to_solver_schema(srv) for srv in latest_releases] + + assert solver_releases_page, "Releases page should not be empty" + assert meta.offset == 0 + + # Step 2: Select one release and list solver releases + selected_solver = solver_releases_page[0] + releases, meta = await catalog_service.list_release_history_latest_first( + filter_by_service_key=selected_solver.id, + ) + assert releases, "Solver releases should not be empty" + assert meta.offset == 0 + + # Step 3: Take the latest solver release and get solver details + oldest_release: ServiceRelease = releases[-1] + + service: ServiceGetV2 = await catalog_service.get( + name=selected_solver.id, + version=oldest_release.version, + ) + solver = _to_solver_schema(service) + assert solver.id == selected_solver.id + assert solver.version == oldest_release.version + + # Step 4: Get service ports for the solver + ports = await catalog_service.get_service_ports( + name=selected_solver.id, + version=oldest_release.version, + ) + + # Verify ports are returned and contain both inputs and outputs + assert ports, "Service ports should not be empty" + assert any(port.kind == "input" for port in ports), "Should contain input ports" + assert any(port.kind == "output" for port in ports), "Should contain output ports" + + # checks calls to rpc + assert mocked_rpc_client.request.call_count == 4 + assert mocked_rpc_client.request.call_args_list[0].args == ( + "catalog", + "list_services_paginated", + ) + assert mocked_rpc_client.request.call_args_list[1].args == ( + "catalog", + "list_my_service_history_latest_first", + ) + assert mocked_rpc_client.request.call_args_list[2].args == ( + "catalog", + "get_service", + ) + assert mocked_rpc_client.request.call_args_list[3].args == ( + "catalog", + "get_service_ports", + ) diff --git a/services/api-server/tests/unit/service/test_service_jobs.py b/services/api-server/tests/unit/service/test_service_jobs.py new file mode 100644 index 00000000000..d6829339507 --- /dev/null +++ b/services/api-server/tests/unit/service/test_service_jobs.py @@ -0,0 +1,77 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from pytest_mock import MockType +from simcore_service_api_server._service_jobs import JobService +from simcore_service_api_server.models.schemas.jobs import Job, JobInputs +from simcore_service_api_server.models.schemas.solvers import Solver + + +async def test_list_jobs_by_resource_prefix( + mocked_rpc_client: MockType, + job_service: JobService, +): + # Test with default pagination parameters + jobs, page_meta = await job_service.list_jobs( + job_parent_resource_name="solvers/some-solver" + ) + + assert isinstance(jobs, list) + + assert len(jobs) == page_meta.count + assert mocked_rpc_client.request.call_args.args == ( + "webserver", + "list_projects_marked_as_jobs", + ) + + # Check pagination info + assert page_meta.total >= 0 + assert page_meta.limit > 0 + assert page_meta.offset == 0 + + +async def test_create_job( + mocked_rpc_client: MockType, + job_service: JobService, +): + # Create mock solver and inputs + solver = Solver( + id="simcore/services/comp/test-solver", + version="1.0.0", + title="Test Solver", + maintainer="test@example.com", + description="Test solver for testing", + url=None, + ) + + inputs = JobInputs(values={}) + + # Mock URL generator + def mock_url_for(*args, **kwargs): + return "https://example.com/api/v1/jobs/test-job" + + # Test job creation + job, project = await job_service.create_job( + solver_or_program=solver, + inputs=inputs, + parent_project_uuid=None, + parent_node_id=None, + url_for=mock_url_for, + hidden=False, + project_name="Test Job Project", + description="Test description", + ) + + # Verify job and project creation + assert isinstance(job, Job) + assert job.id == project.uuid + assert "test-solver" in job.name + + # Verify API calls + assert mocked_rpc_client.request.call_args.args == ( + "webserver", + "mark_project_as_job", + ) diff --git a/services/api-server/tests/unit/service/test_service_solvers.py b/services/api-server/tests/unit/service/test_service_solvers.py new file mode 100644 index 00000000000..a32d3b82f6f --- /dev/null +++ b/services/api-server/tests/unit/service/test_service_solvers.py @@ -0,0 +1,75 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from models_library.products import ProductName +from models_library.users import UserID +from pytest_mock import MockType +from simcore_service_api_server._service_jobs import JobService +from simcore_service_api_server._service_solvers import SolverService +from simcore_service_api_server.exceptions.custom_errors import ( + ServiceConfigurationError, +) +from simcore_service_api_server.models.schemas.solvers import Solver +from simcore_service_api_server.services_rpc.catalog import CatalogService + + +async def test_get_solver( + mocked_rpc_client: MockType, + solver_service: SolverService, + product_name: ProductName, + user_id: UserID, +): + solver = await solver_service.get_solver( + solver_key="simcore/services/comp/solver-1", + solver_version="1.0.0", + ) + + assert isinstance(solver, Solver) + assert solver.id == "simcore/services/comp/solver-1" + + assert mocked_rpc_client.request.called + assert mocked_rpc_client.request.call_args.args == ("catalog", "get_service") + + +async def test_list_jobs( + mocked_rpc_client: MockType, + solver_service: SolverService, +): + # Test default parameters + jobs, page_meta = await solver_service.list_jobs() + + assert jobs + assert len(jobs) == page_meta.count + + assert mocked_rpc_client.request.call_args.args == ( + "webserver", + "list_projects_marked_as_jobs", + ) + + assert page_meta.total >= 0 + assert page_meta.limit >= page_meta.count + assert page_meta.offset == 0 + assert page_meta.count > 0 + + +async def test_solver_service_init_raises_configuration_error( + mocked_rpc_client: MockType, + job_service: JobService, + catalog_service: CatalogService, + user_id: UserID, +): + # Create a product name that is inconsistent with the user_id + invalid_product_name = ProductName("invalid_product") + + with pytest.raises(ServiceConfigurationError, match="SolverService"): + SolverService( + catalog_service=catalog_service, + job_service=job_service, + user_id=user_id, + product_name=invalid_product_name, + ) + # Verify the RPC call was made to check consistency + assert not mocked_rpc_client.request.called diff --git a/services/api-server/tests/unit/service/test_service_studies.py b/services/api-server/tests/unit/service/test_service_studies.py new file mode 100644 index 00000000000..fa9b9921866 --- /dev/null +++ b/services/api-server/tests/unit/service/test_service_studies.py @@ -0,0 +1,62 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pytest_mock import MockType +from simcore_service_api_server._service_studies import StudyService +from simcore_service_api_server.models.schemas.studies import StudyID + + +async def test_list_jobs_no_study_id( + mocked_rpc_client: MockType, + study_service: StudyService, +): + # Test with default parameters + jobs, page_meta = await study_service.list_jobs() + + assert isinstance(jobs, list) + assert mocked_rpc_client.request.call_args.args == ( + "webserver", + "list_projects_marked_as_jobs", + ) + + # Check pagination info + assert page_meta.total >= 0 + assert page_meta.limit > 0 + assert page_meta.offset == 0 + + # Verify proper prefix was used + assert ( + mocked_rpc_client.request.call_args.kwargs[ + "filters" + ].job_parent_resource_name_prefix + == "study" + ) + + # Check pagination parameters were passed correctly + assert mocked_rpc_client.request.call_args.kwargs["offset"] == page_meta.offset + assert mocked_rpc_client.request.call_args.kwargs["limit"] == page_meta.limit + + +async def test_list_jobs_with_study_id( + mocked_rpc_client: MockType, + study_service: StudyService, +): + # Test with a specific study ID + study_id = StudyID("914c7c33-8fb6-4164-9787-7b88b5c148bf") + jobs, page_meta = await study_service.list_jobs(filter_by_study_id=study_id) + + assert isinstance(jobs, list) + + # Verify proper prefix was used with study ID + assert ( + mocked_rpc_client.request.call_args.kwargs[ + "filters" + ].job_parent_resource_name_prefix + == f"study/{study_id}" + ) + + # Check pagination parameters were passed correctly + assert mocked_rpc_client.request.call_args.kwargs["offset"] == page_meta.offset + assert mocked_rpc_client.request.call_args.kwargs["limit"] == page_meta.limit diff --git a/services/api-server/tests/unit/test__fastapi.py b/services/api-server/tests/unit/test__fastapi.py index 6823a0f9bf0..5c1e917f25c 100644 --- a/services/api-server/tests/unit/test__fastapi.py +++ b/services/api-server/tests/unit/test__fastapi.py @@ -18,16 +18,18 @@ """ import urllib +import urllib.parse from uuid import UUID import pytest from faker import Faker from fastapi import APIRouter, FastAPI, status from fastapi.testclient import TestClient +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.programs import VersionStr from simcore_service_api_server.models.schemas.solvers import ( Solver, SolverKeyId, - VersionStr, ) @@ -78,7 +80,7 @@ async def stop_job( } the_app = FastAPI() - the_app.include_router(router, prefix="/v0") + the_app.include_router(router, prefix=f"/{API_VTAG}") return TestClient(the_app) @@ -90,7 +92,9 @@ def test_fastapi_route_paths_in_paths(client: TestClient, faker: Faker): # can be raw raw_solver_key = solver_key - resp = client.get(f"/v0/solvers/{raw_solver_key}/releases/{version}/jobs/{job_id}") + resp = client.get( + f"/{API_VTAG}/solvers/{raw_solver_key}/releases/{version}/jobs/{job_id}" + ) assert resp.status_code == status.HTTP_200_OK assert resp.json() == { "action": "get_job", @@ -102,7 +106,7 @@ def test_fastapi_route_paths_in_paths(client: TestClient, faker: Faker): # can be quoted quoted_solver_key = urllib.parse.quote_plus("simcore/services/comp/itis/isolve") resp = client.get( - f"/v0/solvers/{quoted_solver_key}/releases/{version}/jobs/{job_id}" + f"/{API_VTAG}/solvers/{quoted_solver_key}/releases/{version}/jobs/{job_id}" ) assert resp.status_code == status.HTTP_200_OK assert resp.json() == { @@ -113,27 +117,29 @@ def test_fastapi_route_paths_in_paths(client: TestClient, faker: Faker): } -def test_fastapi_route_name_parsing(client: TestClient, faker: Faker): +def test_fastapi_route_name_parsing(client: TestClient, app: FastAPI, faker: Faker): # # Ensures ':' is allowed in routes # SEE https://github.com/encode/starlette/pull/1657 - solver_key = Solver.Config.schema_extra["example"]["id"] - version = Solver.Config.schema_extra["example"]["version"] + solver_key = Solver.model_json_schema()["example"]["id"] + version = Solver.model_json_schema()["example"]["version"] job_id = faker.uuid4() # Checks whether parse correctly ":action" suffix for action in ("start", "stop"): - expected_path = client.app.router.url_path_for( + expected_path = app.router.url_path_for( f"{action}_job", solver_key=solver_key, version=version, job_id=job_id ) resp = client.post( - f"/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}:{action}" + f"/{API_VTAG}/solvers/{solver_key}/releases/{version}/jobs/{job_id}:{action}" ) - assert resp.url.endswith(expected_path) + assert resp.url.path == expected_path assert resp.status_code == status.HTTP_200_OK assert resp.json()["action"] == f"{action}_job" - resp = client.get(f"/v0/solvers/{solver_key}/releases/{version}/jobs/{job_id}") + resp = client.get( + f"/{API_VTAG}/solvers/{solver_key}/releases/{version}/jobs/{job_id}" + ) assert resp.status_code == status.HTTP_200_OK assert resp.json()["action"] == "get_job" diff --git a/services/api-server/tests/unit/test__models_examples.py b/services/api-server/tests/unit/test__models_examples.py new file mode 100644 index 00000000000..78931fd264f --- /dev/null +++ b/services/api-server/tests/unit/test__models_examples.py @@ -0,0 +1,22 @@ +from itertools import chain +from typing import Any + +import pytest +import simcore_service_api_server.models.schemas +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + chain(walk_model_examples_in_package(simcore_service_api_server.models)), +) +def test_all_models_library_models_config_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/services/api-server/tests/unit/test_api__study_workflows.py b/services/api-server/tests/unit/test_api__study_workflows.py new file mode 100644 index 00000000000..0656ad49732 --- /dev/null +++ b/services/api-server/tests/unit/test_api__study_workflows.py @@ -0,0 +1,313 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import asyncio +import functools +import io +import json +import textwrap +from contextlib import suppress +from pathlib import Path +from typing import TypedDict + +import httpx +import pytest +import respx +from fastapi.encoders import jsonable_encoder +from pytest_mock import MockerFixture +from pytest_simcore.helpers.httpx_calls_capture_models import CreateRespxMockCallback +from simcore_sdk.node_ports_common.filemanager import UploadedFile +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.pagination import OnePage +from simcore_service_api_server.models.schemas.errors import ErrorGet +from simcore_service_api_server.models.schemas.files import File +from simcore_service_api_server.models.schemas.jobs import Job, JobOutputs, JobStatus +from simcore_service_api_server.models.schemas.studies import StudyPort + + +def _handle_http_status_error(func): + @functools.wraps(func) + async def _handler(self, *args, **kwargs): + try: + return await func(self, *args, **kwargs) + + except httpx.HTTPStatusError as exc: + msg = exc.response.text + # rewrite exception's message + with suppress(Exception), io.StringIO() as sio: + print(exc, file=sio) + for e in ErrorGet(**exc.response.json()).errors: + print("\t", e, file=sio) + msg = sio.getvalue() + + raise httpx.HTTPStatusError( + message=msg, request=exc.request, response=exc.response + ) from exc + + return _handler + + +class _BaseTestApi: + def __init__( + self, client: httpx.AsyncClient, tmp_path: Path | None = None, **request_kwargs + ): + self._client = client + self._request_kwargs = request_kwargs + self._tmp_path = tmp_path + + +class FilesTestApi(_BaseTestApi): + @_handle_http_status_error + async def upload_file(self, path: Path) -> File: + with path.open("rb") as fh: + resp = await self._client.put( + f"{API_VTAG}/files/content", + files={"file": fh}, + **self._request_kwargs, + ) + resp.raise_for_status() + return File(**resp.json()) + + @_handle_http_status_error + async def download_file(self, file_id, suffix=".downloaded") -> Path: + assert self._tmp_path + path_to_save = self._tmp_path / f"{file_id}{suffix}" + resp = await self._client.get( + f"{API_VTAG}/files/{file_id}/content", + follow_redirects=True, + **self._request_kwargs, + ) + resp.raise_for_status() + path_to_save.write_bytes(resp.content) + return path_to_save + + +class StudiesTestApi(_BaseTestApi): + @_handle_http_status_error + async def list_study_ports(self, study_id): + resp = await self._client.get( + f"/{API_VTAG}/studies/{study_id}/ports", + **self._request_kwargs, + ) + resp.raise_for_status() + return OnePage[StudyPort](**resp.json()) + + @_handle_http_status_error + async def create_study_job(self, study_id, job_inputs: dict) -> Job: + resp = await self._client.post( + f"{API_VTAG}/studies/{study_id}/jobs", + json=jsonable_encoder(job_inputs), + **self._request_kwargs, + ) + resp.raise_for_status() + return Job(**resp.json()) + + @_handle_http_status_error + async def start_study_job(self, study_id, job_id) -> JobStatus: + resp = await self._client.post( + f"{API_VTAG}/studies/{study_id}/jobs/{job_id}:start", + **self._request_kwargs, + ) + resp.raise_for_status() + return JobStatus(**resp.json()) + + @_handle_http_status_error + async def inspect_study_job(self, study_id, job_id) -> JobStatus: + resp = await self._client.post( + f"/{API_VTAG}/studies/{study_id}/jobs/{job_id}:inspect", + **self._request_kwargs, + ) + resp.raise_for_status() + return JobStatus(**resp.json()) + + @_handle_http_status_error + async def get_study_job_outputs(self, study_id, job_id) -> JobOutputs: + resp = await self._client.post( + f"{API_VTAG}/studies/{study_id}/jobs/{job_id}/outputs", + **self._request_kwargs, + ) + resp.raise_for_status() + return JobOutputs(**resp.json()) + + @_handle_http_status_error + async def delete_study_job(self, study_id, job_id) -> None: + resp = await self._client.delete( + f"{API_VTAG}/studies/{study_id}/jobs/{job_id}", + **self._request_kwargs, + ) + resp.raise_for_status() + + +@pytest.fixture +def input_json_path(tmp_path: Path) -> Path: + # https://github.com/wvangeit/osparc-pyapi-tests/blob/master/noninter1/input.json + p = tmp_path / "input.json" + data = {"f1": 3} + p.write_text(json.dumps(data)) + return p + + +@pytest.fixture +def input_data_path(tmp_path: Path) -> Path: + p = tmp_path / "input.data" + data = {"x1": 5.0, "y2": 7.5} + p.write_text(json.dumps(data)) + return p + + +@pytest.fixture +def test_py_path(tmp_path: Path) -> Path: + p = tmp_path / "test.py" + code = textwrap.dedent( + """\ + import os + import json + + from pathlib import Path + + print("Konichiwa. O genki desu ka?") + + input_path = Path(os.environ["INPUT_FOLDER"]) + output_path = Path(os.environ["OUTPUT_FOLDER"]) + + test_data_path = input_path / "input.data" + + test_data = json.loads(test_data_path.read_text()) + + output_paths = {} + for output_i in range(1, 6): + output_paths[output_i] = output_path / f"output_{output_i}" + + output_paths[output_i].mkdir(parents=True, exist_ok=True) + + output_data_path = output_paths[1] / "output.data" + + output_data_path.write_text(json.dumps(test_data)) + + print(f"Wrote output files to: {output_path.resolve()}") + + print("Genki desu") + """ + ) + p.write_text(code) + return p + + +class MockedBackendApiDict(TypedDict): + webserver: respx.MockRouter | None + storage: respx.MockRouter | None + director_v2: respx.MockRouter | None + + +@pytest.fixture +def mocked_backend( + project_tests_dir: Path, + mocked_webserver_rest_api_base: respx.MockRouter, + mocked_webserver_rpc_api: respx.MockRouter, + mocked_storage_rest_api_base: respx.MockRouter, + mocked_directorv2_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + mocker: MockerFixture, +) -> MockedBackendApiDict: + # S3 and storage are accessed via simcore-sdk + mock = mocker.patch( + "simcore_service_api_server.api.routes.files.storage_upload_path", autospec=True + ) + mock.return_value = UploadedFile(store_id=0, etag="123") + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_webserver_rest_api_base, + mocked_storage_rest_api_base, + mocked_directorv2_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "run_study_workflow.json", + side_effects_callbacks=[], + ) + return MockedBackendApiDict( + webserver=mocked_webserver_rest_api_base, + storage=mocked_storage_rest_api_base, + director_v2=mocked_directorv2_rest_api_base, + ) + + +@pytest.mark.acceptance_test( + "Reproduces https://github.com/wvangeit/osparc-pyapi-tests/blob/master/noninter1/run_study.py" +) +async def test_run_study_workflow( + client: httpx.AsyncClient, + auth: httpx.BasicAuth, + mocked_backend: MockedBackendApiDict, + tmp_path: Path, + input_json_path: Path, + input_data_path: Path, + test_py_path: Path, +): + template_id = "aeab71fe-f71b-11ee-8fca-0242ac140008" + assert client.headers["Content-Type"] == "application/json" + client.headers.pop("Content-Type") + + files_api = FilesTestApi(client, tmp_path, auth=auth) + studies_api = StudiesTestApi(client, auth=auth) + + # lists + study_ports = await studies_api.list_study_ports(study_id=template_id) + assert study_ports.total == 11 + + # uploads input files + test_py_file: File = await files_api.upload_file(path=test_py_path) + assert test_py_file.filename == test_py_path.name + + test_data_file: File = await files_api.upload_file(path=input_data_path) + assert test_data_file.filename == input_data_path.name + + test_json_file: File = await files_api.upload_file(path=input_json_path) + assert test_json_file.filename == input_json_path.name + + # creates job + input_values = { + "InputInt": 42, + "InputString": "Z43", + "InputArray": [1, 2, 3], + "InputNumber": 3.14, + "InputBool": False, + "InputFile": test_json_file, + } + + new_job: Job = await studies_api.create_study_job( + study_id=template_id, + job_inputs={"values": input_values}, + ) + + # start & inspect job until done + await studies_api.start_study_job(study_id=template_id, job_id=new_job.id) + + job_status: JobStatus = await studies_api.inspect_study_job( + study_id=template_id, job_id=new_job.id + ) + + while job_status.state not in {"SUCCESS", "FAILED"}: + job_status = await studies_api.inspect_study_job( + study_id=template_id, job_id=new_job.id + ) + print(f"Status: [{job_status.state}]") + + await asyncio.sleep(1) + + print(await studies_api.inspect_study_job(study_id=template_id, job_id=new_job.id)) + + # get outputs + job_outputs = await studies_api.get_study_job_outputs( + study_id=template_id, job_id=new_job.id + ) + + assert job_outputs.results["OutputInt"] == input_values["InputInt"] + assert job_outputs.results["OutputString"] == input_values["InputString"] + assert job_outputs.results["OutputArray"] == input_values["InputArray"] + assert job_outputs.results["OutputNumber"] == input_values["InputNumber"] + assert job_outputs.results["OutputBool"] == input_values["InputBool"] + + # deletes + await studies_api.delete_study_job(study_id=template_id, job_id=new_job.id) diff --git a/services/api-server/tests/unit/test_api_dependencies.py b/services/api-server/tests/unit/test_api_dependencies.py new file mode 100644 index 00000000000..f9486382cd8 --- /dev/null +++ b/services/api-server/tests/unit/test_api_dependencies.py @@ -0,0 +1,146 @@ +from typing import Annotated + +import pytest +from fastapi import Depends, FastAPI, status +from fastapi.testclient import TestClient +from pydantic import ValidationError +from simcore_service_api_server.api.dependencies.models_schemas_jobs_filters import ( + get_job_metadata_filter, +) +from simcore_service_api_server.models.schemas.jobs_filters import ( + JobMetadataFilter, + MetadataFilterItem, +) + + +def test_get_metadata_filter(): + # Test with None input + assert get_job_metadata_filter(None) is None + + # Test with empty list + assert get_job_metadata_filter([]) is None + + # Test with valid input (matching the example in the docstring) + input_data = ["key1:val*", "key2:exactval"] + result = get_job_metadata_filter(input_data) + + expected = JobMetadataFilter( + any=[ + MetadataFilterItem(name="key1", pattern="val*"), + MetadataFilterItem(name="key2", pattern="exactval"), + ] + ) + + assert result is not None + assert len(result.any) == 2 + assert result.any[0].name == "key1" + assert result.any[0].pattern == "val*" + assert result.any[1].name == "key2" + assert result.any[1].pattern == "exactval" + assert result == expected + + # Test with invalid input (missing colon) + input_data = ["key1val", "key2:exactval"] + result = get_job_metadata_filter(input_data) + + assert result is not None + assert len(result.any) == 1 + assert result.any[0].name == "key2" + assert result.any[0].pattern == "exactval" + + # Test with empty pattern not allowed + input_data = ["key1:", "key2:exactval"] + with pytest.raises(ValidationError) as exc_info: + get_job_metadata_filter(input_data) + + assert exc_info.value.errors()[0]["type"] == "string_too_short" + + +def test_metadata_filter_in_api_route(): + # Create a test FastAPI app + app = FastAPI() + + # Define a route that uses the get_metadata_filter dependency + @app.get("/test-filter") + def filter_endpoint( + metadata_filter: Annotated[ + JobMetadataFilter | None, Depends(get_job_metadata_filter) + ] = None, + ): + if not metadata_filter: + return {"filters": None} + + # Convert to dict for easier comparison in test + return { + "filters": { + "any": [ + {"name": item.name, "pattern": item.pattern} + for item in metadata_filter.any + ] + } + } + + # Create a test client + client = TestClient(app) + + # Test with no filter + response = client.get("/test-filter") + assert response.status_code == status.HTTP_200_OK + assert response.json() == {"filters": None} + + # Test with single filter + response = client.get("/test-filter?metadata.any=key1:val*") + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "filters": {"any": [{"name": "key1", "pattern": "val*"}]} + } + + # Test with multiple filters + response = client.get( + "/test-filter?metadata.any=key1:val*&metadata.any=key2:exactval" + ) + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "filters": { + "any": [ + {"name": "key1", "pattern": "val*"}, + {"name": "key2", "pattern": "exactval"}, + ] + } + } + + # Test with invalid filter (should skip the invalid one) + response = client.get( + "/test-filter?metadata.any=invalid&metadata.any=key2:exactval" + ) + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "filters": {"any": [{"name": "key2", "pattern": "exactval"}]} + } + + # Test with URL-encoded characters + # Use special characters that need encoding: space, &, =, +, /, ? + encoded_query = "/test-filter?metadata.any=special%20key:value%20with%20spaces&metadata.any=symbols:a%2Bb%3Dc%26d%3F%2F" + response = client.get(encoded_query) + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "filters": { + "any": [ + {"name": "special key", "pattern": "value with spaces"}, + {"name": "symbols", "pattern": "a+b=c&d?/"}, + ] + } + } + + # Test with Unicode characters + unicode_query = "/test-filter?metadata.any=emoji:%F0%9F%98%8A&metadata.any=international:caf%C3%A9" + response = client.get(unicode_query) + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "filters": { + "any": [ + {"name": "emoji", "pattern": "😊"}, + {"name": "international", "pattern": "cafΓ©"}, + ] + } + } diff --git a/services/api-server/tests/unit/test_api_files.py b/services/api-server/tests/unit/test_api_files.py new file mode 100644 index 00000000000..78a150c6c61 --- /dev/null +++ b/services/api-server/tests/unit/test_api_files.py @@ -0,0 +1,349 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import datetime +from pathlib import Path +from typing import Any +from uuid import UUID + +import httpx +import pytest +import respx +import yarl +from aioresponses import aioresponses as AioResponsesMock +from faker import Faker +from fastapi import status +from fastapi.encoders import jsonable_encoder +from httpx import AsyncClient +from models_library.api_schemas_storage.storage_schemas import ( + ETag, + FileUploadCompletionBody, + UploadedPart, +) +from models_library.basic_types import SHA256Str +from pydantic import TypeAdapter +from pytest_simcore.helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, +) +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.domain.files import File +from simcore_service_api_server.models.pagination import Page +from simcore_service_api_server.models.schemas.files import ( + ClientFileUploadData, + UserFile, +) + +_FAKER = Faker() + + +class DummyFileData: + """Static class for providing consistent dummy file data for testing""" + + _file_id: UUID = UUID("3fa85f64-5717-4562-b3fc-2c963f66afa6") + _file_name: str = "myfile.txt" + _final_e_tag: ETag = "07d1c1a4-b073-4be7-b022-f405d90e99aa" + _file_size: int = 100000 + _file_sha256_checksum: SHA256Str = SHA256Str( + "E7a5B06A880dDee55A16fbc27Dc29705AE1aceadcaf0aDFd15fAF839ff5E2C2e" + ) + + @classmethod + def file(cls) -> File: + return File( + id=File.create_id( + cls._file_size, + cls._file_name, + datetime.datetime.now(datetime.UTC).isoformat(), + ), + filename=cls._file_name, + e_tag="", + sha256_checksum=cls._file_sha256_checksum, + ) + + @classmethod + def client_file(cls) -> UserFile: + return TypeAdapter(UserFile).validate_python( + { + "filename": cls._file_name, + "filesize": cls._file_size, + "sha256_checksum": cls._file_sha256_checksum, + }, + ) + + @classmethod + def file_size(cls) -> int: + return cls._file_size + + @classmethod + def uploaded_parts(cls) -> FileUploadCompletionBody: + return FileUploadCompletionBody( + parts=[UploadedPart(number=ii + 1, e_tag=_FAKER.uuid4()) for ii in range(5)] + ) + + @classmethod + def final_e_tag(cls) -> ETag: + return cls._final_e_tag + + @classmethod + def checksum(cls) -> SHA256Str: + return cls._file_sha256_checksum + + +@pytest.mark.xfail(reason="Under dev") +async def test_list_files_legacy( + client: AsyncClient, mocked_storage_rest_api_base: MockRouter +): + response = await client.get(f"{API_VTAG}/files") + + assert response.status_code == status.HTTP_200_OK + + TypeAdapter(File).validate_python(response.json()) + + assert response.json() == [ + { + "id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "filename": "string", + "content_type": "string", + "checksum": "string", + } + ] + + +@pytest.mark.xfail(reason="Under dev") +async def test_list_files_with_pagination( + client: AsyncClient, + mocked_storage_rest_api_base: MockRouter, +): + response = await client.get(f"{API_VTAG}/files/page") + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "items": [ + { + "id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "filename": "string", + "content_type": "string", + "checksum": "string", + } + ], + "total": 0, + "limit": 1, + "offset": 0, + "links": { + "first": "/api/v1/users?limit=1&offset1", + "last": "/api/v1/users?limit=1&offset1", + "self": "/api/v1/users?limit=1&offset1", + "next": "/api/v1/users?limit=1&offset1", + "prev": "/api/v1/users?limit=1&offset1", + }, + } + + +@pytest.mark.xfail(reason="Under dev") +async def test_upload_content( + client: AsyncClient, mocked_storage_rest_api_base: MockRouter, tmp_path: Path +): + upload_path = tmp_path / "test_upload_content.txt" + upload_path.write_text("test_upload_content") + + response = await client.put( + f"{API_VTAG}/files/content", files={"upload-file": upload_path.open("rb")} + ) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "filename": upload_path.name, + "content_type": "string", + "checksum": "string", + } + + +@pytest.mark.xfail(reason="Under dev") +async def test_get_file( + client: AsyncClient, mocked_storage_rest_api_base: MockRouter, tmp_path: Path +): + response = await client.get( + f"{API_VTAG}/files/3fa85f64-5717-4562-b3fc-2c963f66afa6" + ) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "id": "3fa85f64-5717-4562-b3fc-2c963f66afa6", + "filename": "string", + "content_type": "string", + "checksum": "string", + } + + +async def test_delete_file( + client: AsyncClient, + mocked_storage_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + def search_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> dict[str, Any]: + assert isinstance(capture.response_body, dict) + response: dict[str, Any] = capture.response_body + return response + + def delete_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + return capture.response_body + + create_respx_mock_from_capture( + respx_mocks=[mocked_storage_rest_api_base], + capture_path=project_tests_dir / "mocks" / "delete_file.json", + side_effects_callbacks=[search_side_effect, delete_side_effect], + ) + + response = await client.delete( + f"{API_VTAG}/files/3fa85f64-5717-4562-b3fc-2c963f66afa6", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + + +@pytest.mark.xfail(reason="Under dev") +async def test_download_content( + client: AsyncClient, mocked_storage_rest_api_base: MockRouter, tmp_path: Path +): + response = await client.get( + f"{API_VTAG}/files/3fa85f64-5717-4562-b3fc-2c963f66afa6/content" + ) + + assert response.status_code == status.HTTP_200_OK + assert response.headers["content-type"] == "application/octet-stream" + + +@pytest.mark.parametrize("follow_up_request", ["complete", "abort"]) +async def test_get_upload_links( + follow_up_request: str, + client: AsyncClient, + auth: httpx.BasicAuth, + storage_v0_service_mock: AioResponsesMock, +): + """Test that we can get data needed for performing multipart upload directly to S3""" + + assert storage_v0_service_mock # nosec + + msg = { + "filename": DummyFileData.file().filename, + "filesize": DummyFileData.file_size(), + "sha256_checksum": DummyFileData.checksum(), + } + + response = await client.post(f"{API_VTAG}/files/content", json=msg, auth=auth) + + payload: dict[str, str] = response.json() + + assert response.status_code == status.HTTP_200_OK + client_upload_schema: ClientFileUploadData = ClientFileUploadData.model_validate( + payload + ) + + if follow_up_request == "complete": + body = { + "client_file": jsonable_encoder(DummyFileData.client_file()), + "uploaded_parts": jsonable_encoder(DummyFileData.uploaded_parts()), + } + response = await client.post( + client_upload_schema.upload_schema.links.complete_upload, + json=body, + auth=auth, + ) + + payload: dict[str, str] = response.json() + + assert response.status_code == status.HTTP_200_OK + file: File = File.model_validate(payload) + assert file.sha256_checksum == DummyFileData.checksum() + elif follow_up_request == "abort": + body = { + "client_file": jsonable_encoder(DummyFileData.client_file()), + } + response = await client.post( + client_upload_schema.upload_schema.links.abort_upload, json=body, auth=auth + ) + assert response.status_code == status.HTTP_200_OK + else: + raise AssertionError + + +@pytest.mark.parametrize( + "query", + [ + {"sha256_checksum": str(DummyFileData.checksum())}, + {"file_id": str(DummyFileData.file().id)}, + { + "file_id": str(DummyFileData.file().id), + "sha256_checksum": str(DummyFileData.checksum()), + }, + ], + ids=lambda x: "&".join([f"{k}={v}" for k, v in x.items()]), +) +async def test_search_file( + query: dict[str, str], + client: AsyncClient, + mocked_storage_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + def side_effect_callback( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> dict[str, Any]: + url: yarl.URL = yarl.URL(f"{request.url}") + request_query: dict[str, str] = dict(url.query) + assert isinstance(capture.response_body, dict) + response: dict[str, Any] = capture.response_body + for key in query: + if key == "sha256_checksum": + response["data"][0][key] = request_query[key] + elif key == "file_id": + file_uuid_parts: list[str] = response["data"][0]["file_uuid"].split("/") + file_uuid_parts[1] = request_query["startswith"].split("/")[1] + response["data"][0]["file_uuid"] = "/".join(file_uuid_parts) + response["data"][0]["file_id"] = "/".join(file_uuid_parts) + else: + msg = f"Encountered unexpected {key=}" + raise ValueError(msg) + return response + + create_respx_mock_from_capture( + respx_mocks=[mocked_storage_rest_api_base], + capture_path=project_tests_dir / "mocks" / "search_file_checksum.json", + side_effects_callbacks=[side_effect_callback], + ) + + response = await client.get(f"{API_VTAG}/files:search", auth=auth, params=query) + assert response.status_code == status.HTTP_200_OK + page: Page[File] = TypeAdapter(Page[File]).validate_python(response.json()) + assert len(page.items) == page.total + file = page.items[0] + if "sha256_checksum" in query: + assert file.sha256_checksum == SHA256Str(query["sha256_checksum"]) + if "file_id" in query: + assert file.id == UUID(query["file_id"]) + + +async def test_download_file_openapi_specs(openapi_dev_specs: dict[str, Any]): + """Test that openapi-specs for download file entrypoint specifies a binary file is returned in case of return status 200""" + file_download_responses: dict[str, Any] = openapi_dev_specs["paths"][ + f"/{API_VTAG}/files/{{file_id}}/content" + ]["get"]["responses"] + assert "application/octet-stream" in file_download_responses["200"]["content"] diff --git a/services/api-server/tests/unit/test_api_health.py b/services/api-server/tests/unit/test_api_health.py new file mode 100644 index 00000000000..01a8f5ffa12 --- /dev/null +++ b/services/api-server/tests/unit/test_api_health.py @@ -0,0 +1,54 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from pathlib import Path + +from fastapi import FastAPI, status +from httpx import AsyncClient +from models_library.app_diagnostics import AppStatusCheck +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG + + +async def test_check_service_health( + mocker: MockRouter, client: AsyncClient, app: FastAPI +): + class MockHealthChecker: + @property + def healthy(self) -> bool: + return True + + app.state.health_checker = MockHealthChecker() + response = await client.get(f"{API_VTAG}/") + assert response.status_code == status.HTTP_200_OK + assert "health" in response.text + + +async def test_get_service_state( + client: AsyncClient, + mocked_directorv2_rest_api_base: MockRouter, + mocked_storage_rest_api_base: MockRouter, + mocked_webserver_rest_api_base: MockRouter, +): + response = await client.get(f"{API_VTAG}/state") + assert response.status_code == status.HTTP_200_OK + + version_file: Path = Path(__file__).parent.parent.parent / "VERSION" + assert version_file.is_file() + + assert response.json() == { + "app_name": "simcore-service-api-server", + "version": version_file.read_text().strip(), + "services": { + "director_v2": {"healthy": True}, + "storage": {"healthy": True}, + "webserver": {"healthy": True}, + }, + "url": "http://api.testserver.io/state", + } + + assert AppStatusCheck.model_validate(response.json()) diff --git a/services/api-server/tests/unit/test_api_meta.py b/services/api-server/tests/unit/test_api_meta.py index e42d8d3865e..74f5791499c 100644 --- a/services/api-server/tests/unit/test_api_meta.py +++ b/services/api-server/tests/unit/test_api_meta.py @@ -2,12 +2,9 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -import pytest from httpx import AsyncClient +from models_library.api_schemas__common.meta import BaseMeta from simcore_service_api_server._meta import API_VERSION, API_VTAG -from simcore_service_api_server.models.schemas.meta import Meta - -pytestmark = pytest.mark.asyncio async def test_read_service_meta(client: AsyncClient): @@ -16,5 +13,5 @@ async def test_read_service_meta(client: AsyncClient): assert response.status_code == 200 assert response.json()["version"] == API_VERSION - meta = Meta(**response.json()) + meta = BaseMeta(**response.json()) assert meta.version == API_VERSION diff --git a/services/api-server/tests/unit/test_api_programs.py b/services/api-server/tests/unit/test_api_programs.py new file mode 100644 index 00000000000..0becf86170a --- /dev/null +++ b/services/api-server/tests/unit/test_api_programs.py @@ -0,0 +1,187 @@ +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from functools import partial +from pathlib import Path +from typing import Any + +import httpx +import pytest +from common_library.json_serialization import json_loads +from fastapi import status +from httpx import AsyncClient +from models_library.api_schemas_storage.storage_schemas import FileUploadSchema +from models_library.users import UserID +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.catalog_rpc_server import ZeroListingCatalogRpcSideEffects +from pytest_simcore.helpers.faker_factories import DEFAULT_FAKER +from pytest_simcore.helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import Job +from simcore_service_api_server.models.schemas.programs import Program + + +async def test_get_program_release( + auth: httpx.BasicAuth, + client: AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], + mocker: MockerFixture, + user_id: UserID, +): + # Arrange + program_key = "simcore/services/dynamic/my_program" + version = "1.0.0" + + response = await client.get( + f"/{API_VTAG}/programs/{program_key}/releases/{version}", auth=auth + ) + + # Assert + assert response.status_code == status.HTTP_200_OK + program = Program.model_validate(response.json()) + assert program.id == program_key + assert program.version == version + assert program.version_display is not None + + +@pytest.mark.parametrize( + "job_name,job_description", + [ + (None, None), + (DEFAULT_FAKER.name(), None), + (None, DEFAULT_FAKER.sentence()), + (DEFAULT_FAKER.name(), DEFAULT_FAKER.sentence()), + ], +) +@pytest.mark.parametrize("capture_name", ["create_program_job_success.json"]) +async def test_create_program_job( + auth: httpx.BasicAuth, + client: AsyncClient, + mocked_webserver_rest_api_base, + mocked_webserver_rpc_api: dict[str, MockType], + mocked_catalog_rpc_api: dict[str, MockType], + create_respx_mock_from_capture: CreateRespxMockCallback, + mocker: MockerFixture, + user_id: UserID, + capture_name: str, + project_tests_dir: Path, + job_name: str | None, + job_description: str | None, +): + + mocker.patch( + "simcore_service_api_server.api.routes.programs.get_upload_links_from_s3", + return_value=( + None, + FileUploadSchema.model_validate( + next(iter(FileUploadSchema.model_json_schema()["examples"])) + ), + ), + ) + mocker.patch("simcore_service_api_server.api.routes.programs.complete_file_upload") + + def _side_effect( + server_state: dict, + request: httpx.Request, + kwargs: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> dict[str, Any]: + + response_body = capture.response_body + + # first call creates project + if server_state.get("project_uuid") is None: + get_body_field = lambda field: json_loads( + request.content.decode("utf-8") + ).get(field) + + _project_uuid = get_body_field("uuid") + assert _project_uuid + server_state["project_uuid"] = _project_uuid + + _name = get_body_field("name") + assert _name + server_state["name"] = _name + + _description = get_body_field("description") + assert _description + server_state["description"] = _description + + if job_name: + assert job_name == get_body_field("name") + if job_description: + assert job_description == get_body_field("description") + + if request.url.path.endswith("/result"): + response_body["data"]["uuid"] = server_state["project_uuid"] + response_body["data"]["name"] = server_state["name"] + response_body["data"]["description"] = server_state["description"] + assert isinstance(response_body, dict) + return response_body + + # simulate server state + _server_state = dict() + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / capture_name, + side_effects_callbacks=3 * [partial(_side_effect, _server_state)], + ) + + program_key = "simcore/services/dynamic/electrode-selector" + version = "2.1.3" + + body = {"name": job_name, "description": job_description} + + response = await client.post( + f"/{API_VTAG}/programs/{program_key}/releases/{version}/jobs", + auth=auth, + json={k: v for k, v in body.items() if v is not None}, + ) + + # Assert + assert response.status_code == status.HTTP_201_CREATED + job = Job.model_validate(response.json()) + + +async def test_list_latest_programs( + auth: httpx.BasicAuth, + client: AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], +): + # Arrange + response = await client.get(f"/{API_VTAG}/programs", auth=auth) + assert response.status_code == status.HTTP_200_OK + + +async def test_list_program_history( + auth: httpx.BasicAuth, + client: AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], +): + program_key = "simcore/services/dynamic/my_program" + # Arrange + response = await client.get( + f"/{API_VTAG}/programs/{program_key}/releases", auth=auth + ) + assert response.status_code == status.HTTP_200_OK + + +@pytest.mark.parametrize( + "catalog_rpc_side_effects", [ZeroListingCatalogRpcSideEffects()], indirect=True +) +async def test_list_program_history_no_program( + auth: httpx.BasicAuth, + client: AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], +): + program_key = "simcore/services/dynamic/my_program" + # Arrange + response = await client.get( + f"/{API_VTAG}/programs/{program_key}/releases", auth=auth + ) + assert response.status_code == status.HTTP_200_OK diff --git a/services/api-server/tests/unit/test_api_routes_studies.py b/services/api-server/tests/unit/test_api_routes_studies.py deleted file mode 100644 index 54495fa5ae0..00000000000 --- a/services/api-server/tests/unit/test_api_routes_studies.py +++ /dev/null @@ -1,143 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -from copy import deepcopy -from pathlib import Path -from typing import Any, Iterator - -import httpx -import pytest -import respx -import yaml -from faker import Faker -from fastapi import FastAPI, status -from pytest_simcore.helpers.faker_webserver import ( - PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA, -) -from respx import MockRouter -from simcore_service_api_server.core.settings import ApplicationSettings -from simcore_service_api_server.models.schemas.studies import StudyID - - -@pytest.fixture(scope="session") -def webserver_service_openapi_specs( - osparc_simcore_services_dir: Path, -) -> dict[str, Any]: - - openapi_path = ( - osparc_simcore_services_dir - / "web/server/src/simcore_service_webserver/api/v0/openapi.yaml" - ) - openapi_specs = yaml.safe_load(openapi_path.read_text()) - return openapi_specs - - -@pytest.fixture -def study_id(faker: Faker) -> StudyID: - return faker.uuid4() - - -@pytest.fixture -def fake_study_ports() -> list[dict[str, Any]]: - # NOTE: Reuses fakes used to test web-server API responses of /projects/{project_id}/metadata/ports - # as reponses in this mock. SEE services/web/server/tests/unit/with_dbs/02/test_projects_ports_handlers.py - return deepcopy(PROJECTS_METADATA_PORTS_RESPONSE_BODY_DATA) - - -@pytest.fixture -def mocked_webserver_service_api( - app: FastAPI, - webserver_service_openapi_specs: dict[str, Any], - fake_study_ports: list[dict[str, Any]], - faker: Faker, -) -> Iterator[MockRouter]: - """ - Mocks web/server http API - """ - settings: ApplicationSettings = app.state.settings - assert settings.API_SERVER_WEBSERVER - - openapi = deepcopy(webserver_service_openapi_specs) - oas_paths = openapi["paths"] - - # ENTRYPOINTS --------- - # pylint: disable=not-context-manager - with respx.mock( - base_url=settings.API_SERVER_WEBSERVER.base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - - # Mocks /health - assert oas_paths["/health"] - assert ( - oas_paths["/health"]["get"]["operationId"] == "healthcheck_liveness_probe" - ) - # 'http://webserver:8080/v0/health' - respx_mock.get("/health", name="healthcheck_liveness_probe").respond( - 200, - json={ - "data": { - "name": "simcore-director-service", - "status": "SERVICE_RUNNING", - "api_version": "0.1.0-dev+NJuzzD9S", - "version": "0.1.0-dev+N127Mfv9H", - } - }, - ) - - # Mocks /projects/{*}/metadata/ports - assert oas_paths["/projects/{project_id}/metadata/ports"] - assert "get" in oas_paths["/projects/{project_id}/metadata/ports"].keys() - respx_mock.get( - path__regex=r"/projects/(?P[\w-]+)/metadata/ports$", - name="list_project_metadata_ports", - ).respond( - 200, - json={"data": fake_study_ports}, - ) - - yield respx_mock - - -def test_mocked_webserver_service_api( - app: FastAPI, - mocked_webserver_service_api: MockRouter, - study_id: StudyID, - fake_study_ports: list[dict[str, Any]], -): - # - # This test intends to help building the urls in mocked_webserver_service_api - # At some point, it can be skipped and reenabled only for development - # - settings: ApplicationSettings = app.state.settings - assert settings.API_SERVER_WEBSERVER - webserver_api_baseurl = settings.API_SERVER_WEBSERVER.base_url - - resp = httpx.get(f"{webserver_api_baseurl}/health") - assert resp.status_code == status.HTTP_200_OK - - # Sometimes is difficult to adjust respx.Mock - resp = httpx.get(f"{webserver_api_baseurl}/projects/{study_id}/metadata/ports") - assert resp.status_code == status.HTTP_200_OK - - payload = resp.json() - assert payload.get("error") is None - assert payload.get("data") == fake_study_ports - - mocked_webserver_service_api.assert_all_called() - - -async def test_list_study_ports( - client: httpx.AsyncClient, - auth: httpx.BasicAuth, - mocked_webserver_service_api: MockRouter, - fake_study_ports: list[dict[str, Any]], - study_id: StudyID, -): - # list_study_ports - resp = await client.get(f"/v0/studies/{study_id}/ports", auth=auth) - assert resp.status_code == status.HTTP_200_OK - assert resp.json() == fake_study_ports diff --git a/services/api-server/tests/unit/test_api_solver_jobs.py b/services/api-server/tests/unit/test_api_solver_jobs.py new file mode 100644 index 00000000000..721ea4c8763 --- /dev/null +++ b/services/api-server/tests/unit/test_api_solver_jobs.py @@ -0,0 +1,444 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from decimal import Decimal +from pathlib import Path +from typing import Any, Final +from uuid import UUID + +import httpx +import pytest +from faker import Faker +from fastapi import status +from fastapi.encoders import jsonable_encoder +from httpx import AsyncClient +from models_library.generics import Envelope +from pydantic import TypeAdapter +from pytest_simcore.helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, + SideEffectCallback, +) +from respx import MockRouter +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.jobs import Job, JobStatus +from simcore_service_api_server.models.schemas.model_adapter import ( + PricingUnitGetLegacy, + WalletGetWithAvailableCreditsLegacy, +) +from simcore_service_api_server.models.schemas.solvers import Solver +from simcore_service_api_server.services_http.director_v2 import ComputationTaskGet + + +def _start_job_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, +) -> Any: + return capture.response_body + + +def _get_inspect_job_side_effect(job_id: str) -> SideEffectCallback: + def _inspect_job_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response = capture.response_body + assert isinstance(response, dict) + assert response.get("id") is not None + response["id"] = job_id + return response + + return _inspect_job_side_effect + + +@pytest.mark.parametrize( + "capture", ["get_job_wallet_found.json", "get_job_wallet_not_found.json"] +) +async def test_get_solver_job_wallet( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + capture: str, +): + _wallet_id: int = 1826 + + def _get_job_wallet_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response = capture.response_body + assert isinstance(response, dict) + if data := response.get("data"): + assert isinstance(data, dict) + assert data.get("walletId") + response["data"]["walletId"] = _wallet_id + return response + + def _get_wallet_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response = capture.response_body + assert isinstance(response, dict) + if data := response.get("data"): + assert isinstance(data, dict) + assert data.get("walletId") + response["data"]["walletId"] = _wallet_id + return response + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / capture, + side_effects_callbacks=[_get_job_wallet_side_effect, _get_wallet_side_effect], + ) + + solver_key: str = "simcore/services/comp/my_super_hpc_solver" + solver_version: str = "3.14.0" + job_id: UUID = UUID("87643648-3a38-44e2-9cfe-d86ab3d50629") + response = await client.get( + f"{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/wallet", + auth=auth, + ) + if capture == "get_job_wallet_found.json": + assert response.status_code == status.HTTP_200_OK + body = response.json() + assert isinstance(body, dict) + assert _wallet_id == body.get("walletId") + elif capture == "get_job_wallet_not_found.json": + assert response.status_code == status.HTTP_404_NOT_FOUND + body = response.json() + assert isinstance(body, dict) + assert body.get("data") is None + assert body.get("errors") is not None + else: + pytest.fail(reason=f"Uknown {capture=}") + + +@pytest.mark.parametrize( + "capture_file", + [ + "get_job_pricing_unit_invalid_job.json", + "get_job_pricing_unit_invalid_solver.json", + "get_job_pricing_unit_success.json", + ], +) +async def test_get_solver_job_pricing_unit( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + capture_file: str, +): + solver_key: str = "simcore/services/comp/my_super_hpc_solver" + solver_version: str = "3.14.0" + job_id: UUID = UUID("87643648-3a38-44e2-9cfe-d86ab3d50629") + + def _get_job_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response = capture.response_body + assert isinstance(response, dict) + if data := response.get("data"): + assert isinstance(data, dict) + assert data.get("uuid") + data["uuid"] = path_params["project_id"] + assert data.get("name") + if capture_file != "get_job_pricing_unit_invalid_solver.json": + data["name"] = Job.compose_resource_name( + parent_name=Solver.compose_resource_name(solver_key, solver_version), # type: ignore + job_id=job_id, + ) + response["data"] = data + return response + + def _get_pricing_unit_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + return capture.response_body + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / capture_file, + side_effects_callbacks=( + [_get_job_side_effect, _get_pricing_unit_side_effect] + if capture_file == "get_job_pricing_unit_success.json" + else [_get_job_side_effect] + ), + ) + + response = await client.get( + f"{API_VTAG}/solvers/{solver_key}/releases/{solver_version}/jobs/{job_id}/pricing_unit", + auth=auth, + ) + if capture_file == "get_job_pricing_unit_success.json": + assert response.status_code == status.HTTP_200_OK + _ = TypeAdapter(PricingUnitGetLegacy).validate_python(response.json()) + elif capture_file == "get_job_pricing_unit_invalid_job.json": + assert response.status_code == status.HTTP_404_NOT_FOUND + elif capture_file == "get_job_pricing_unit_invalid_solver.json": + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + else: + pytest.fail(reason=f"Unknown {capture_file=}") + + +@pytest.mark.parametrize( + "capture_name,expected_status_code", + [ + ("start_job_with_payment.json", 202), + ("start_job_not_enough_credit.json", 402), + ], +) +async def test_start_solver_job_pricing_unit_with_payment( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + mocked_directorv2_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + faker: Faker, + capture_name: str, + expected_status_code: int, +): + _solver_key: str = "simcore/services/comp/isolve" + _version: str = "2.1.24" + _job_id: str = "6e52228c-6edd-4505-9131-e901fdad5b17" + _pricing_plan_id: int = faker.pyint(min_value=1) + _pricing_unit_id: int = faker.pyint(min_value=1) + + def _get_job_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response: dict[str, str] = capture.response_body # type: ignore + data = response.get("data") + assert isinstance(data, dict) + data["name"] = Job.compose_resource_name( + parent_name=Solver.compose_resource_name(_solver_key, _version), # type: ignore + job_id=UUID(_job_id), + ) + data["uuid"] = _job_id + response["data"] = data + return response + + def _put_pricing_plan_and_unit_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + _put_pricing_plan_and_unit_side_effect.was_called = True + assert int(path_params["pricing_plan_id"]) == _pricing_plan_id + assert int(path_params["pricing_unit_id"]) == _pricing_unit_id + return capture.response_body + + callbacks = [ + _get_job_side_effect, + _put_pricing_plan_and_unit_side_effect, + _start_job_side_effect, + ] + if expected_status_code == status.HTTP_202_ACCEPTED: + callbacks.append(_get_inspect_job_side_effect(job_id=_job_id)) + + _put_pricing_plan_and_unit_side_effect.was_called = False + create_respx_mock_from_capture( + respx_mocks=[ + mocked_webserver_rest_api_base, + mocked_directorv2_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / capture_name, + side_effects_callbacks=callbacks, + ) + + response = await client.post( + f"{API_VTAG}/solvers/{_solver_key}/releases/{_version}/jobs/{_job_id}:start", + auth=auth, + headers={ + "x-pricing-plan": f"{_pricing_plan_id}", + "x-pricing-unit": f"{_pricing_unit_id}", + }, + ) + assert response.status_code == expected_status_code + if expected_status_code == status.HTTP_202_ACCEPTED: + assert _put_pricing_plan_and_unit_side_effect.was_called + assert response.json()["job_id"] == _job_id + + +async def test_get_solver_job_pricing_unit_no_payment( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + mocked_directorv2_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + _solver_key: str = "simcore/services/comp/isolve" + _version: str = "2.1.24" + _job_id: str = "1eefc09b-5d08-4022-bc18-33dedbbd7d0f" + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_directorv2_rest_api_base, + mocked_webserver_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "start_job_no_payment.json", + side_effects_callbacks=[ + _start_job_side_effect, + _get_inspect_job_side_effect(job_id=_job_id), + ], + ) + + response = await client.post( + f"{API_VTAG}/solvers/{_solver_key}/releases/{_version}/jobs/{_job_id}:start", + auth=auth, + ) + + assert response.status_code == status.HTTP_202_ACCEPTED + assert response.json()["job_id"] == _job_id + + +async def test_start_solver_job_conflict( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + mocked_directorv2_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + _solver_key: str = "simcore/services/comp/itis/sleeper" + _version: str = "2.0.2" + _job_id: str = "b9faf8d8-4928-4e50-af40-3690712c5481" + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_directorv2_rest_api_base, + mocked_webserver_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "start_solver_job.json", + side_effects_callbacks=[ + _start_job_side_effect, + _get_inspect_job_side_effect(job_id=_job_id), + ], + ) + + response = await client.post( + f"{API_VTAG}/solvers/{_solver_key}/releases/{_version}/jobs/{_job_id}:start", + auth=auth, + ) + + assert response.status_code == status.HTTP_200_OK + job_status = JobStatus.model_validate(response.json()) + assert f"{job_status.job_id}" == _job_id + + +async def test_stop_job( + client: AsyncClient, + mocked_directorv2_rest_api_base: MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + + _solver_key: Final[str] = "simcore/services/comp/isolve" + _version: Final[str] = "2.1.24" + _job_id: Final[str] = "1eefc09b-5d08-4022-bc18-33dedbbd7d0f" + + def _stop_job_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + task = ComputationTaskGet.model_validate(capture.response_body) + task.id = UUID(_job_id) + + return jsonable_encoder(task) + + create_respx_mock_from_capture( + respx_mocks=[mocked_directorv2_rest_api_base], + capture_path=project_tests_dir / "mocks" / "stop_job.json", + side_effects_callbacks=[ + _stop_job_side_effect, + _get_inspect_job_side_effect(job_id=_job_id), + ], + ) + + response = await client.post( + f"{API_VTAG}/solvers/{_solver_key}/releases/{_version}/jobs/{_job_id}:stop", + auth=auth, + ) + + assert response.status_code == status.HTTP_200_OK + status_ = JobStatus.model_validate(response.json()) + assert status_.job_id == UUID(_job_id) + + +@pytest.mark.parametrize( + "sufficient_credits,expected_status_code", + [(True, status.HTTP_200_OK), (False, status.HTTP_402_PAYMENT_REQUIRED)], +) +async def test_get_solver_job_outputs( + client: AsyncClient, + mocked_webserver_rest_api_base: MockRouter, + mocked_storage_rest_api_base: MockRouter, + mocked_solver_job_outputs: None, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + sufficient_credits: bool, + expected_status_code: int, +): + def _sf( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + return capture.response_body + + def _wallet_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ): + wallet = ( + TypeAdapter(Envelope[WalletGetWithAvailableCreditsLegacy]) + .validate_python(capture.response_body) + .data + ) + assert wallet is not None + wallet.available_credits = ( + Decimal("10.0") if sufficient_credits else Decimal("-10.0") + ) + envelope = Envelope[WalletGetWithAvailableCreditsLegacy]() + envelope.data = wallet + return jsonable_encoder(envelope) + + create_respx_mock_from_capture( + respx_mocks=[ + mocked_webserver_rest_api_base, + mocked_storage_rest_api_base, + ], + capture_path=project_tests_dir / "mocks" / "get_solver_outputs.json", + side_effects_callbacks=[_sf, _sf, _sf, _wallet_side_effect, _sf], + ) + + _solver_key: Final[str] = "simcore/services/comp/isolve" + _version: Final[str] = "2.1.24" + _job_id: Final[str] = "1eefc09b-5d08-4022-bc18-33dedbbd7d0f" + response = await client.get( + f"{API_VTAG}/solvers/{_solver_key}/releases/{_version}/jobs/{_job_id}/outputs", + auth=auth, + ) + + assert response.status_code == expected_status_code diff --git a/services/api-server/tests/unit/test_api_solvers.py b/services/api-server/tests/unit/test_api_solvers.py new file mode 100644 index 00000000000..19bcbbc4e3b --- /dev/null +++ b/services/api-server/tests/unit/test_api_solvers.py @@ -0,0 +1,103 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from pathlib import Path + +import httpx +import pytest +import respx +from fastapi import status +from httpx import AsyncClient +from models_library.api_schemas_api_server.pricing_plans import ServicePricingPlanGet +from pytest_mock import MockType +from pytest_simcore.helpers.catalog_rpc_server import ZeroListingCatalogRpcSideEffects +from pytest_simcore.helpers.httpx_calls_capture_models import CreateRespxMockCallback +from simcore_service_api_server._meta import API_VTAG + + +@pytest.mark.parametrize( + "capture,expected_status_code", + [ + ( + "get_solver_pricing_plan_invalid_solver.json", + status.HTTP_502_BAD_GATEWAY, + ), + ("get_solver_pricing_plan_success.json", status.HTTP_200_OK), + ], +) +async def test_get_solver_pricing_plan( + client: AsyncClient, + mocked_webserver_rest_api_base: respx.MockRouter, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + capture: str, + expected_status_code: int, +): + + respx_mock = create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / capture, + side_effects_callbacks=[], + ) + assert respx_mock + + _my_solver: str = "simcore/services/comp/my_solver" + _version: str = "2.4.3" + response = await client.get( + f"{API_VTAG}/solvers/{_my_solver}/releases/{_version}/pricing_plan", + auth=auth, + ) + assert expected_status_code == response.status_code + if response.status_code == status.HTTP_200_OK: + _ = ServicePricingPlanGet.model_validate(response.json()) + + +@pytest.mark.parametrize( + "solver_key,expected_status_code", + [ + ("simcore/services/comp/valid_solver", status.HTTP_200_OK), + ], +) +async def test_get_latest_solver_release( + client: AsyncClient, + mocked_catalog_rpc_api: dict[str, MockType], + auth: httpx.BasicAuth, + solver_key: str, + expected_status_code: int, +): + response = await client.get( + f"{API_VTAG}/solvers/{solver_key}/latest", + auth=auth, + ) + assert response.status_code == expected_status_code + if response.status_code == status.HTTP_200_OK: + assert "id" in response.json() + assert response.json()["id"] == solver_key + + +@pytest.mark.parametrize( + "catalog_rpc_side_effects", + [ZeroListingCatalogRpcSideEffects()], + indirect=True, +) +@pytest.mark.parametrize( + "solver_key,expected_status_code", + [ + ("simcore/services/comp/valid_solver", status.HTTP_404_NOT_FOUND), + ], +) +async def test_get_latest_solver_release_zero_releases( + client: AsyncClient, + mocked_catalog_rpc_api, + auth: httpx.BasicAuth, + solver_key: str, + expected_status_code: int, +): + response = await client.get( + f"{API_VTAG}/solvers/{solver_key}/latest", + auth=auth, + ) + assert response.status_code == expected_status_code diff --git a/services/api-server/tests/unit/test_api_wallets.py b/services/api-server/tests/unit/test_api_wallets.py new file mode 100644 index 00000000000..5249d94565a --- /dev/null +++ b/services/api-server/tests/unit/test_api_wallets.py @@ -0,0 +1,82 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from pathlib import Path +from typing import Any + +import httpx +import pytest +from fastapi import status +from httpx import AsyncClient +from pytest_simcore.helpers.httpx_calls_capture_models import ( + CreateRespxMockCallback, + HttpApiCallCaptureModel, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.models.schemas.model_adapter import ( + WalletGetWithAvailableCreditsLegacy, +) + + +@pytest.mark.parametrize( + "capture", ["get_wallet_success.json", "get_wallet_failure.json"] +) +async def test_get_wallet( + client: AsyncClient, + mocked_webserver_rest_api_base, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, + capture: str, +): + def _get_wallet_side_effect( + request: httpx.Request, + path_params: dict[str, Any], + capture: HttpApiCallCaptureModel, + ) -> Any: + response = capture.response_body + assert isinstance(response, dict) + if data := response.get("data"): + assert isinstance(data, dict) + assert data.get("walletId") + response["data"]["walletId"] = path_params["wallet_id"] + return response + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / capture, + side_effects_callbacks=[_get_wallet_side_effect], + ) + + wallet_id: int = 159873 + response = await client.get(f"{API_VTAG}/wallets/{wallet_id}", auth=auth) + if "success" in capture: + assert response.status_code == 200 + wallet: WalletGetWithAvailableCreditsLegacy = ( + WalletGetWithAvailableCreditsLegacy.model_validate(response.json()) + ) + assert wallet.wallet_id == wallet_id + elif "failure" in capture: + assert response.status_code == 403 + assert response.json().get("errors") is not None + + +async def test_get_default_wallet( + client: AsyncClient, + mocked_webserver_rest_api_base, + create_respx_mock_from_capture: CreateRespxMockCallback, + auth: httpx.BasicAuth, + project_tests_dir: Path, +): + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / "get_default_wallet.json", + side_effects_callbacks=[], + ) + + response = await client.get(f"{API_VTAG}/wallets/default", auth=auth) + assert response.status_code == status.HTTP_200_OK + _ = WalletGetWithAvailableCreditsLegacy.model_validate(response.json()) diff --git a/services/api-server/tests/unit/test_cli.py b/services/api-server/tests/unit/test_cli.py new file mode 100644 index 00000000000..febeca14b1f --- /dev/null +++ b/services/api-server/tests/unit/test_cli.py @@ -0,0 +1,40 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import os + +from fastapi import FastAPI +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_api_server.cli import main +from simcore_service_api_server.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def test_cli_help(cli_runner: CliRunner): + result = cli_runner.invoke(main, "--help") + assert result.exit_code == os.EX_OK, result.output + + +def test_cli_run(cli_runner: CliRunner): + result = cli_runner.invoke(main, "run") + assert "disabled" in result.output + assert result.exit_code == os.EX_OK, result.output + + +def test_cli_list_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK, result.output + + print(result.output) + settings = ApplicationSettings.model_validate_json(result.output) + assert settings == ApplicationSettings.create_from_envs() + + +def test_main(app_environment: EnvVarsDict): + from simcore_service_api_server.main import the_app + + assert the_app + assert isinstance(the_app, FastAPI) diff --git a/services/api-server/tests/unit/test_core_settings.py b/services/api-server/tests/unit/test_core_settings.py index 94a2c89cf16..feb5052ab0f 100644 --- a/services/api-server/tests/unit/test_core_settings.py +++ b/services/api-server/tests/unit/test_core_settings.py @@ -3,28 +3,38 @@ # pylint: disable=redefined-outer-name -import logging - -from pytest_simcore.helpers.utils_envs import EnvVarsDict -from simcore_service_api_server.core.settings import ApplicationSettings, BootModeEnum -from yarl import URL - - -def test_default_app_environ(patched_default_app_environ: EnvVarsDict): - # loads from environ +import pytest +from pytest_simcore.helpers.monkeypatch_envs import ( + EnvVarsDict, + delenvs_from_dict, + setenvs_from_dict, +) +from simcore_service_api_server.core.settings import ApplicationSettings + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + external_envfile_dict: EnvVarsDict, +) -> EnvVarsDict: + """ + NOTE: To run against repo.config in osparc-config repo + + ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + pytest --external-envfile=.secrets tests/unit/test_core_settings.py + + """ + if external_envfile_dict: + delenvs_from_dict(monkeypatch, app_environment, raising=False) + return setenvs_from_dict( + monkeypatch, + {**external_envfile_dict}, + ) + return app_environment + + +def test_unit_app_environment(app_environment: EnvVarsDict): + assert app_environment settings = ApplicationSettings.create_from_envs() - print("captured settings: \n", settings.json(indent=2)) - - assert settings.SC_BOOT_MODE == BootModeEnum.PRODUCTION - assert settings.log_level == logging.DEBUG - - assert URL(settings.API_SERVER_POSTGRES.dsn) == URL( - "postgresql://test:test@127.0.0.1:5432/test" - ) - - -def test_light_app_environ(patched_light_app_environ: EnvVarsDict): - settings = ApplicationSettings.create_from_envs() - print("captured settings: \n", settings.json(indent=2)) - - assert settings.API_SERVER_POSTGRES is None + print("captured settings: \n", settings.model_dump_json(indent=2)) diff --git a/services/api-server/tests/unit/test_credits.py b/services/api-server/tests/unit/test_credits.py new file mode 100644 index 00000000000..c78165a78f0 --- /dev/null +++ b/services/api-server/tests/unit/test_credits.py @@ -0,0 +1,26 @@ +from pathlib import Path + +from fastapi import status +from httpx import AsyncClient, BasicAuth +from models_library.api_schemas_webserver.products import CreditPriceGet +from pytest_simcore.helpers.httpx_calls_capture_models import CreateRespxMockCallback +from simcore_service_api_server._meta import API_VTAG + + +async def test_get_credits_price( + client: AsyncClient, + auth: BasicAuth, + mocked_webserver_rest_api_base, + create_respx_mock_from_capture: CreateRespxMockCallback, + project_tests_dir: Path, +): + + create_respx_mock_from_capture( + respx_mocks=[mocked_webserver_rest_api_base], + capture_path=project_tests_dir / "mocks" / "get_credits_price.json", + side_effects_callbacks=[], + ) + + response = await client.get(f"{API_VTAG}/credits/price", auth=auth) + assert response.status_code == status.HTTP_200_OK + _ = CreditPriceGet.model_validate(response.json()) diff --git a/services/api-server/tests/unit/test_exceptions.py b/services/api-server/tests/unit/test_exceptions.py new file mode 100644 index 00000000000..be81922bf4f --- /dev/null +++ b/services/api-server/tests/unit/test_exceptions.py @@ -0,0 +1,128 @@ +# pylint: disable=unused-variable +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments + +from http import HTTPStatus +from uuid import UUID + +import httpx +import pytest +from fastapi import FastAPI, HTTPException, status +from httpx import HTTPStatusError, Request, Response +from simcore_service_api_server.exceptions import setup_exception_handlers +from simcore_service_api_server.exceptions.backend_errors import ( + BaseBackEndError, + ProfileNotFoundError, +) +from simcore_service_api_server.exceptions.custom_errors import MissingWalletError +from simcore_service_api_server.exceptions.service_errors_utils import ( + _assert_correct_kwargs, + service_exception_mapper, +) +from simcore_service_api_server.models.schemas.errors import ErrorGet + + +async def test_backend_service_exception_mapper(): + @service_exception_mapper( + service_name="DummyService", + http_status_map={status.HTTP_400_BAD_REQUEST: ProfileNotFoundError}, + ) + async def my_endpoint(status_code: int): + raise HTTPStatusError( + message="hello", + request=Request("PUT", "https://asoubkjbasd.asjdbnsakjb"), + response=Response(status_code), + ) + + with pytest.raises(ProfileNotFoundError): + await my_endpoint(status.HTTP_400_BAD_REQUEST) + + with pytest.raises(HTTPException) as exc_info: + await my_endpoint(status.HTTP_500_INTERNAL_SERVER_ERROR) + assert exc_info.value.status_code == status.HTTP_502_BAD_GATEWAY + + +@pytest.fixture +def app() -> FastAPI: + """Overrides app to avoid real app and builds instead a simple app to tests exception handlers""" + app = FastAPI() + setup_exception_handlers(app) + + @app.post("/raise-http-exception") + def _raise_http_exception(): + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail="fail message" + ) + + @app.post("/raise-custom-error") + def _raise_custom_exception(): + raise MissingWalletError(job_id=123) + + return app + + +async def test_raised_http_exception(client: httpx.AsyncClient): + response = await client.post("/raise-http-exception") + + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE + + got = ErrorGet.model_validate_json(response.text) + assert got.errors == ["fail message"] + + +async def test_fastapi_http_exception_respond_with_error_model( + client: httpx.AsyncClient, +): + response = await client.get("/invalid") + + assert response.status_code == status.HTTP_404_NOT_FOUND + + got = ErrorGet.model_validate_json(response.text) + assert got.errors == [HTTPStatus(response.status_code).phrase] + + +async def test_custom_error_handlers(client: httpx.AsyncClient): + response = await client.post("/raise-custom-error") + + assert response.status_code == status.HTTP_424_FAILED_DEPENDENCY + + got = ErrorGet.model_validate_json(response.text) + assert got.errors == [f"{MissingWalletError(job_id=123)}"] + + +async def test_service_exception_mapper(): + class _ProjectMissingError(BaseBackEndError): + msg_template = "The project {project_id} is missing" + + assert _ProjectMissingError.named_fields() == {"project_id"} + + status_map = {404: _ProjectMissingError} + + async def coro1(project_id): + pass + + with pytest.raises(AssertionError): + _assert_correct_kwargs(func=coro1, exception_types=set(status_map.values())) + + async def coro2(project_id=UUID("9c201eb7-ba04-4d9b-abe6-f16b406ca86d")): + pass + + with pytest.raises(AssertionError) as exc: + _assert_correct_kwargs(func=coro2, exception_types=set(status_map.values())) + + async def coro3(*, project_id): + pass + + _assert_correct_kwargs(func=coro3, exception_types=set(status_map.values())) + + async def coro4(*, project_id=UUID("ce56af2e-e9e5-46a4-8067-662077de5528")): + pass + + _assert_correct_kwargs(func=coro4, exception_types=set(status_map.values())) + + async def coro5(*, project_uuid): + pass + + with pytest.raises(AssertionError): + _assert_correct_kwargs(func=coro5, exception_types=set(status_map.values())) diff --git a/services/api-server/tests/unit/test_licensed_items.py b/services/api-server/tests/unit/test_licensed_items.py new file mode 100644 index 00000000000..f396f1f397b --- /dev/null +++ b/services/api-server/tests/unit/test_licensed_items.py @@ -0,0 +1,317 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +import asyncio +from functools import partial +from typing import cast +from uuid import UUID + +import pytest +from faker import Faker +from fastapi import FastAPI, status +from httpx import AsyncClient, BasicAuth +from models_library.api_schemas_resource_usage_tracker.licensed_items_checkouts import ( + LicensedItemCheckoutGet, +) +from models_library.api_schemas_webserver.licensed_items import ( + LicensedItemRpcGet, + LicensedItemRpcGetPage, +) +from models_library.api_schemas_webserver.licensed_items_checkouts import ( + LicensedItemCheckoutRpcGet, +) +from models_library.licenses import LicensedItemID +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from servicelib.rabbitmq._errors import RemoteMethodNotRegisteredError +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CanNotCheckoutNotEnoughAvailableSeatsError, + CanNotCheckoutServiceIsNotRunningError, + LicensedItemCheckoutNotFoundError, + NotEnoughAvailableSeatsError, +) +from simcore_service_api_server._meta import API_VTAG +from simcore_service_api_server.api.dependencies.resource_usage_tracker_rpc import ( + get_resource_usage_tracker_client, +) +from simcore_service_api_server.api.dependencies.webserver_rpc import ( + get_wb_api_rpc_client, +) +from simcore_service_api_server.models.pagination import Page +from simcore_service_api_server.models.schemas.licensed_items import ( + LicensedItemCheckoutData, +) +from simcore_service_api_server.models.schemas.model_adapter import LicensedItemGet +from simcore_service_api_server.services_rpc.resource_usage_tracker import ( + ResourceUsageTrackerClient, +) +from simcore_service_api_server.services_rpc.wb_api_server import WbApiRpcClient + + +async def _get_backend_licensed_items( + exception_to_raise: Exception | None, + rabbitmq_rpc_client: RabbitMQRPCClient, + product_name: str, + offset: int, + limit: int, +) -> LicensedItemRpcGetPage: + if exception_to_raise is not None: + raise exception_to_raise + extra = LicensedItemRpcGet.model_config.get("json_schema_extra") + assert isinstance(extra, dict) + examples = extra.get("examples") + assert isinstance(examples, list) + return LicensedItemRpcGetPage( + items=[LicensedItemRpcGet.model_validate(ex) for ex in examples], + total=len(examples), + ) + + +class DummyRpcClient: + pass + + +@pytest.fixture +async def mock_wb_api_server_rcp(app: FastAPI, mocker: MockerFixture) -> MockerFixture: + + app.dependency_overrides[get_wb_api_rpc_client] = lambda: WbApiRpcClient( + _client=DummyRpcClient() + ) + return mocker + + +@pytest.fixture +async def mock_rut_rpc(app: FastAPI, mocker: MockerFixture) -> MockerFixture: + app.dependency_overrides[ + get_resource_usage_tracker_client + ] = lambda: ResourceUsageTrackerClient(_client=DummyRpcClient()) + return mocker + + +async def test_get_licensed_items( + mock_wb_api_server_rcp: MockerFixture, client: AsyncClient, auth: BasicAuth +): + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._get_licensed_items", + partial(_get_backend_licensed_items, None), + ) + resp = await client.get(f"{API_VTAG}/licensed-items", auth=auth) + assert resp.status_code == status.HTTP_200_OK + TypeAdapter(Page[LicensedItemGet]).validate_json(resp.text) + + +async def test_get_licensed_items_timeout( + mock_wb_api_server_rcp: MockerFixture, client: AsyncClient, auth: BasicAuth +): + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._get_licensed_items", + partial(_get_backend_licensed_items, exception_to_raise=TimeoutError()), + ) + resp = await client.get(f"{API_VTAG}/licensed-items", auth=auth) + assert resp.status_code == status.HTTP_504_GATEWAY_TIMEOUT + + +@pytest.mark.parametrize( + "exception_to_raise", + [asyncio.CancelledError(), RuntimeError(), RemoteMethodNotRegisteredError()], +) +async def test_get_licensed_items_502( + mock_wb_api_server_rcp: MockerFixture, + client: AsyncClient, + auth: BasicAuth, + exception_to_raise: Exception, +): + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._get_licensed_items", + partial(_get_backend_licensed_items, exception_to_raise), + ) + resp = await client.get(f"{API_VTAG}/licensed-items", auth=auth) + assert resp.status_code == status.HTTP_502_BAD_GATEWAY + + +@pytest.mark.parametrize( + "exception_to_raise,expected_api_server_status_code", + [ + (NotImplementedError(), status.HTTP_501_NOT_IMPLEMENTED), + ], +) +async def test_get_licensed_items_for_wallet( + mock_wb_api_server_rcp: MockerFixture, + client: AsyncClient, + auth: BasicAuth, + exception_to_raise: Exception | None, + expected_api_server_status_code: int, + faker: Faker, +): + _wallet_id = faker.pyint(min_value=1) + + async def side_effect( + rabbitmq_rpc_client: RabbitMQRPCClient, + product_name: str, + wallet_id: WalletID, + user_id: UserID, + offset: int, + limit: int, + ) -> LicensedItemRpcGetPage: + assert _wallet_id == wallet_id + if exception_to_raise is not None: + raise exception_to_raise + extra = LicensedItemRpcGet.model_config.get("json_schema_extra") + assert isinstance(extra, dict) + examples = extra.get("examples") + assert isinstance(examples, list) + return LicensedItemRpcGetPage( + items=[LicensedItemRpcGet.model_validate(ex) for ex in examples], + total=len(examples), + ) + + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._get_available_licensed_items_for_wallet", + side_effect, + ) + resp = await client.get( + f"{API_VTAG}/wallets/{_wallet_id}/licensed-items", auth=auth + ) + assert resp.status_code == expected_api_server_status_code + + +@pytest.mark.parametrize( + "exception_to_raise,expected_api_server_status_code", + [ + (None, status.HTTP_200_OK), + (NotEnoughAvailableSeatsError(), status.HTTP_409_CONFLICT), + (CanNotCheckoutNotEnoughAvailableSeatsError(), status.HTTP_409_CONFLICT), + ( + CanNotCheckoutServiceIsNotRunningError(), + status.HTTP_422_UNPROCESSABLE_ENTITY, + ), + ], +) +async def test_checkout_licensed_item( + mock_wb_api_server_rcp: MockerFixture, + client: AsyncClient, + auth: BasicAuth, + exception_to_raise: Exception | None, + expected_api_server_status_code: int, + faker: Faker, +): + _wallet_id = faker.pyint(min_value=1) + _licensed_item_id = faker.uuid4() + + async def side_effect( + rabbitmq_rpc_client: RabbitMQRPCClient, + product_name: str, + user_id: UserID, + wallet_id: WalletID, + licensed_item_id: LicensedItemID, + num_of_seats: int, + service_run_id: ServiceRunID, + ) -> LicensedItemCheckoutRpcGet: + if exception_to_raise is not None: + raise exception_to_raise + extra = LicensedItemCheckoutRpcGet.model_config.get("json_schema_extra") + assert isinstance(extra, dict) + examples = extra.get("examples") + assert isinstance(examples, list) + assert len(examples) > 0 + example = examples[0] + assert isinstance(example, dict) + return LicensedItemCheckoutRpcGet.model_validate(example) + + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._checkout_licensed_item_for_wallet", + side_effect, + ) + body = LicensedItemCheckoutData( + number_of_seats=faker.pyint(min_value=1), + service_run_id=cast(ServiceRunID, "myservice"), + ) + resp = await client.post( + f"{API_VTAG}/wallets/{_wallet_id}/licensed-items/{_licensed_item_id}/checkout", + auth=auth, + content=body.model_dump_json(), + ) + assert resp.status_code == expected_api_server_status_code + + +@pytest.mark.parametrize( + "wb_api_exception_to_raise,rut_exception_to_raise,expected_api_server_status_code,valid_license_checkout_id", + [ + (LicensedItemCheckoutNotFoundError, None, status.HTTP_404_NOT_FOUND, True), + (None, LicensedItemCheckoutNotFoundError, status.HTTP_404_NOT_FOUND, True), + (None, None, status.HTTP_200_OK, True), + (None, None, status.HTTP_422_UNPROCESSABLE_ENTITY, False), + ], +) +async def test_release_checked_out_licensed_item( + mock_wb_api_server_rcp: MockerFixture, + mock_rut_rpc: MockerFixture, + client: AsyncClient, + auth: BasicAuth, + wb_api_exception_to_raise: Exception | None, + rut_exception_to_raise: Exception | None, + expected_api_server_status_code: int, + valid_license_checkout_id: bool, + faker: Faker, +): + _licensed_item_id = cast(UUID, faker.uuid4()) + _licensed_item_checkout_id = cast(UUID, faker.uuid4()) + + async def get_licensed_item_checkout( + rabbitmq_rpc_client: RabbitMQRPCClient, + product_name: str, + licensed_item_checkout_id: LicensedItemCheckoutID, + ) -> LicensedItemCheckoutGet: + if rut_exception_to_raise is not None: + raise rut_exception_to_raise + extra = LicensedItemCheckoutGet.model_config.get("json_schema_extra") + assert isinstance(extra, dict) + examples = extra.get("examples") + assert isinstance(examples, list) + assert len(examples) > 0 + example = examples[0] + assert isinstance(example, dict) + licensed_item_checkout_get = LicensedItemCheckoutGet.model_validate(example) + if valid_license_checkout_id: + licensed_item_checkout_get.licensed_item_id = _licensed_item_id + return licensed_item_checkout_get + + async def release_licensed_item_for_wallet( + rabbitmq_rpc_client: RabbitMQRPCClient, + product_name: str, + user_id: int, + licensed_item_checkout_id: LicensedItemCheckoutID, + ) -> LicensedItemCheckoutRpcGet: + if wb_api_exception_to_raise is not None: + raise wb_api_exception_to_raise + extra = LicensedItemCheckoutRpcGet.model_config.get("json_schema_extra") + assert isinstance(extra, dict) + examples = extra.get("examples") + assert isinstance(examples, list) + assert len(examples) > 0 + example = examples[0] + assert isinstance(example, dict) + return LicensedItemCheckoutRpcGet.model_validate(example) + + mock_rut_rpc.patch( + "simcore_service_api_server.services_rpc.resource_usage_tracker._get_licensed_item_checkout", + get_licensed_item_checkout, + ) + mock_wb_api_server_rcp.patch( + "simcore_service_api_server.services_rpc.wb_api_server._release_licensed_item_for_wallet", + release_licensed_item_for_wallet, + ) + + resp = await client.post( + f"{API_VTAG}/licensed-items/{_licensed_item_id}/checked-out-items/{_licensed_item_checkout_id}/release", + auth=auth, + ) + assert resp.status_code == expected_api_server_status_code diff --git a/services/api-server/tests/unit/test_models.py b/services/api-server/tests/unit/test_models.py new file mode 100644 index 00000000000..0df9f63fc70 --- /dev/null +++ b/services/api-server/tests/unit/test_models.py @@ -0,0 +1,33 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +from typing import Any + +import pytest +import simcore_service_api_server.models +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) +from simcore_postgres_database.models.users import UserRole +from simcore_service_api_server.models.schemas.profiles import UserRoleEnum + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(simcore_service_api_server.models), +) +def test_api_server_model_examples( + model_cls: type[BaseModel], example_name: int, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) + + +def test_enums_in_sync(): + # if this test fails, API needs to be updated + assert {e.value for e in UserRole} == {e.value for e in UserRoleEnum} diff --git a/services/api-server/tests/unit/test_models_api_resources.py b/services/api-server/tests/unit/test_models_api_resources.py index 46f31b3ecd7..d204b32215d 100644 --- a/services/api-server/tests/unit/test_models_api_resources.py +++ b/services/api-server/tests/unit/test_models_api_resources.py @@ -7,14 +7,17 @@ from simcore_service_api_server.models.api_resources import ( compose_resource_name, + parse_collections_ids, parse_last_resource_id, + parse_resources_ids, split_resource_name, + split_resource_name_as_dict, ) def test_parse_resource_id(): resource_name = "solvers/simcore%2Fservices%2Fcomp%2Fisolve/releases/1.3.4/jobs/f622946d-fd29-35b9-a193-abdd1095167c/outputs/output+22" - parts = [ + parts = ( "solvers", "simcore/services/comp/isolve", "releases", @@ -23,7 +26,7 @@ def test_parse_resource_id(): "f622946d-fd29-35b9-a193-abdd1095167c", "outputs", "output 22", - ] + ) # cannot use this because cannot convert into URL? except {:path} in starlette ??? assert str(Path(*parts)) == urllib.parse.unquote_plus(resource_name) @@ -38,3 +41,19 @@ def test_parse_resource_id(): assert ( parse_last_resource_id(resource_name) == split_resource_name(resource_name)[-1] ) + + collection_to_resource_id_map = split_resource_name_as_dict(resource_name) + # Collection-ID -> Resource-ID + assert tuple(collection_to_resource_id_map.keys()) == parse_collections_ids( + resource_name + ) + assert tuple(collection_to_resource_id_map.values()) == parse_resources_ids( + resource_name + ) + + assert collection_to_resource_id_map["solvers"] == "simcore/services/comp/isolve" + assert collection_to_resource_id_map["releases"] == "1.3.4" + assert ( + collection_to_resource_id_map["jobs"] == "f622946d-fd29-35b9-a193-abdd1095167c" + ) + assert collection_to_resource_id_map["outputs"] == "output 22" diff --git a/services/api-server/tests/unit/test_models_schemas_files.py b/services/api-server/tests/unit/test_models_schemas_files.py index e8cbdf37201..30d4ead053c 100644 --- a/services/api-server/tests/unit/test_models_schemas_files.py +++ b/services/api-server/tests/unit/test_models_schemas_files.py @@ -1,105 +1,108 @@ +# pylint: disable=protected-access # pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable import hashlib import tempfile from pathlib import Path -from pprint import pformat from uuid import uuid4 import pytest from fastapi import UploadFile -from models_library.api_schemas_storage import FileMetaDataGet as StorageFileMetaData -from pydantic import ValidationError -from simcore_service_api_server.models.schemas.files import File -from simcore_service_api_server.modules.storage import to_file_api_model +from models_library.api_schemas_storage.storage_schemas import ( + FileMetaDataGet as StorageFileMetaData, +) +from models_library.basic_types import SHA256Str +from models_library.projects_nodes_io import StorageFileID +from pydantic import TypeAdapter, ValidationError +from simcore_service_api_server.models.domain.files import File +from simcore_service_api_server.services_http.storage import to_file_api_model FILE_CONTENT = "This is a test" @pytest.fixture -def mock_filepath(tmpdir) -> Path: - path = Path(tmpdir) / "mock_filepath.txt" +def mock_filepath(tmp_path: Path) -> Path: + path = tmp_path / "mock_filepath.txt" path.write_text(FILE_CONTENT) return path @pytest.fixture -def expected_md5sum(): +def expected_sha256sum() -> SHA256Str: # # $ echo -n "This is a test" | md5sum - # ce114e4501d2f4e2dcea3e17b546f339 - # - expected_md5sum = "ce114e4501d2f4e2dcea3e17b546f339" - assert hashlib.md5(FILE_CONTENT.encode()).hexdigest() == expected_md5sum - return expected_md5sum + _sha256sum: SHA256Str = TypeAdapter(SHA256Str).validate_python( + "c7be1ed902fb8dd4d48997c6452f5d7e509fbcdbe2808b16bcf4edce4c07d14e" + ) + assert hashlib.sha256(FILE_CONTENT.encode()).hexdigest() == _sha256sum + return _sha256sum -async def test_create_filemetadata_from_path(mock_filepath, expected_md5sum): +async def test_create_filemetadata_from_path( + mock_filepath: Path, expected_sha256sum: SHA256Str +): file_meta = await File.create_from_path(mock_filepath) - assert file_meta.checksum == expected_md5sum + assert file_meta.sha256_checksum == expected_sha256sum async def test_create_filemetadata_from_starlette_uploadfile( - mock_filepath, expected_md5sum + mock_filepath: Path, expected_sha256sum: SHA256Str ): # WARNING: upload is a wrapper around a file handler that can actually be in memory as well # in file - with open(mock_filepath, "rb") as file: - upload = UploadFile(mock_filepath.name, file) + with Path.open(mock_filepath, "rb") as fh: + upload = UploadFile(file=fh, filename=mock_filepath.name) assert upload.file.tell() == 0 file_meta = await File.create_from_uploaded(upload) assert upload.file.tell() > 0, "modifies current position is at the end" - assert file_meta.checksum == expected_md5sum + assert file_meta.sha256_checksum == expected_sha256sum # in memory - # UploadFile constructor: by not passing file, it enforces a tempfile.SpooledTemporaryFile - upload_in_memory = UploadFile(mock_filepath.name) + with tempfile.SpooledTemporaryFile() as spooled_tmpfile: + upload_in_memory = UploadFile(file=spooled_tmpfile, filename=mock_filepath.name) - assert isinstance(upload_in_memory.file, tempfile.SpooledTemporaryFile) - await upload_in_memory.write(FILE_CONTENT.encode()) + assert isinstance(upload_in_memory.file, tempfile.SpooledTemporaryFile) + await upload_in_memory.write(FILE_CONTENT.encode()) - await upload_in_memory.seek(0) - assert upload_in_memory.file.tell() == 0 + await upload_in_memory.seek(0) + assert upload_in_memory.file.tell() == 0 - file_meta = await File.create_from_uploaded(upload_in_memory) - assert upload_in_memory.file.tell() > 0, "modifies current position is at the end" + file_meta = await File.create_from_uploaded(upload_in_memory) + assert ( + upload_in_memory.file.tell() > 0 + ), "modifies current position is at the end" def test_convert_between_file_models(): - storage_file_meta = StorageFileMetaData( - **StorageFileMetaData.Config.schema_extra["examples"][1] + **StorageFileMetaData.model_json_schema()["examples"][1] + ) + storage_file_meta.file_id = TypeAdapter(StorageFileID).validate_python( + f"api/{uuid4()}/extensionless" ) - storage_file_meta.file_id = f"api/{uuid4()}/extensionless" apiserver_file_meta = to_file_api_model(storage_file_meta) assert apiserver_file_meta.id assert apiserver_file_meta.filename == "extensionless" assert apiserver_file_meta.content_type == "application/octet-stream" # default - assert apiserver_file_meta.checksum == storage_file_meta.entity_tag + assert apiserver_file_meta.e_tag == storage_file_meta.entity_tag with pytest.raises(ValueError): - storage_file_meta.file_id = f"{uuid4()}/{uuid4()}/foo.txt" + storage_file_meta.file_id = TypeAdapter(StorageFileID).validate_python( + f"{uuid4()}/{uuid4()}/foo.txt" + ) to_file_api_model(storage_file_meta) with pytest.raises(ValidationError): - storage_file_meta.file_id = "api/NOTUUID/foo.txt" + storage_file_meta.file_id = TypeAdapter(StorageFileID).validate_python( + "api/NOTUUID/foo.txt" + ) to_file_api_model(storage_file_meta) - - -@pytest.mark.parametrize("model_cls", (File,)) -def test_file_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - - model_instance = model_cls(**example) - - assert model_instance, f"Failed with {name}" - print(name, ":", model_instance) - - assert model_instance.content_type is not None diff --git a/services/api-server/tests/unit/test_models_schemas_jobs.py b/services/api-server/tests/unit/test_models_schemas_jobs.py index 2bc8d88909f..f11f8a494db 100644 --- a/services/api-server/tests/unit/test_models_schemas_jobs.py +++ b/services/api-server/tests/unit/test_models_schemas_jobs.py @@ -3,45 +3,28 @@ # pylint: disable=unused-variable import random +import textwrap import urllib.parse from copy import deepcopy -from pprint import pformat from uuid import uuid4 import pytest +from faker import Faker from fastapi import FastAPI +from models_library.api_schemas_webserver.projects_metadata import ProjectMetadataGet +from models_library.generics import Envelope from simcore_service_api_server._meta import API_VTAG from simcore_service_api_server.models.schemas.jobs import ( Job, + JobID, JobInputs, - JobOutputs, - JobStatus, + JobMetadata, ) from simcore_service_api_server.models.schemas.solvers import Solver -@pytest.mark.parametrize("model_cls", (Job, JobInputs, JobOutputs, JobStatus)) -def test_jobs_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -def test_create_job_model(): - job = Job.create_now("solvers/isolve/releases/1.3.4", "12345") - - print(job.json()) - assert job.id is not None - - # TODO: https://stackoverflow.com/questions/5802108/how-to-check-if-a-datetime-object-is-localized-with-pytz/27596917 - # TODO: @validator("created_at", always=True) - # def ensure_utc(cls, v): - # v.utc - - @pytest.mark.parametrize("repeat", range(100)) -def test_job_io_checksums(repeat): +def test_job_io_checksums(repeat: int): raw = { "values": { "x": 4.33, @@ -63,8 +46,8 @@ def _deepcopy_and_shuffle(src): return deepcopy(src) shuffled_raw = _deepcopy_and_shuffle(raw) - inputs1 = JobInputs.parse_obj(raw) - inputs2 = JobInputs.parse_obj(shuffled_raw) + inputs1 = JobInputs.model_validate(raw) + inputs2 = JobInputs.model_validate(shuffled_raw) print(inputs1) print(inputs2) @@ -75,8 +58,7 @@ def _deepcopy_and_shuffle(src): ), f"{inputs1}!={inputs2}" -def test_job_resouce_names_has_associated_url(app: FastAPI): - +def test_job_resource_names_has_associated_url(app: FastAPI): solver_key = "z43/name with spaces/isolve" solver_version = "1.0.3" job_id = uuid4() @@ -97,3 +79,48 @@ def test_job_resouce_names_has_associated_url(app: FastAPI): ) assert url_path == f"/{API_VTAG}/{urllib.parse.unquote_plus(job_name)}" + + +@pytest.mark.acceptance_test( + "Fixing https://github.com/ITISFoundation/osparc-simcore/issues/6556" +) +def test_parsing_job_custom_metadata(job_id: JobID, faker: Faker): + job_name = faker.name() + + got = Envelope[ProjectMetadataGet].model_validate_json( + textwrap.dedent( + f""" + {{ + "data": {{ + "projectUuid": "{job_id}", + "custom": {{ + "number": 3.14, + "string": "foo", + "boolean": true, + "integer": 42, + "job_id": "{job_id}", + "job_name": "{job_name}" + }} + }} + }} + """ + ) + ) + + assert got.data + assert got.data.custom == { + "number": 3.14, + "string": "foo", + "boolean": True, + "integer": 42, + "job_id": f"{job_id}", + "job_name": job_name, + } + + j = JobMetadata( + job_id=job_id, + metadata=got.data.custom or {}, + url=faker.url(), + ) + + assert j.metadata == got.data.custom diff --git a/services/api-server/tests/unit/test_models_schemas_meta.py b/services/api-server/tests/unit/test_models_schemas_meta.py deleted file mode 100644 index e955474be10..00000000000 --- a/services/api-server/tests/unit/test_models_schemas_meta.py +++ /dev/null @@ -1,16 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from pprint import pformat - -import pytest -from simcore_service_api_server.models.schemas.meta import Meta - - -@pytest.mark.parametrize("model_cls", (Meta,)) -def test_meta_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" diff --git a/services/api-server/tests/unit/test_models_schemas_profiles.py b/services/api-server/tests/unit/test_models_schemas_profiles.py deleted file mode 100644 index f5d24b278d2..00000000000 --- a/services/api-server/tests/unit/test_models_schemas_profiles.py +++ /dev/null @@ -1,22 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from pprint import pformat - -import pytest -from simcore_postgres_database.models.users import UserRole -from simcore_service_api_server.models.schemas.profiles import Profile, UserRoleEnum - - -@pytest.mark.parametrize("model_cls", (Profile,)) -def test_profiles_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -def test_enums_in_sync(): - # if this test fails, API needs to be updated - assert set(e.value for e in UserRole) == set(e.value for e in UserRoleEnum) diff --git a/services/api-server/tests/unit/test_models_schemas_solvers.py b/services/api-server/tests/unit/test_models_schemas_solvers.py index 551d2a1d6d1..491efdcfffd 100644 --- a/services/api-server/tests/unit/test_models_schemas_solvers.py +++ b/services/api-server/tests/unit/test_models_schemas_solvers.py @@ -3,29 +3,17 @@ # pylint: disable=unused-variable from operator import attrgetter -from pprint import pformat -import pytest -from simcore_service_api_server.models.schemas.solvers import ( - Solver, - SolverPort, - Version, -) +from faker import Faker +from packaging.version import Version +from simcore_service_api_server.models.schemas.solvers import Solver -@pytest.mark.parametrize("model_cls", (Solver, SolverPort)) -def test_solvers_model_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -def test_solvers_sorting_by_name_and_version(faker): +def test_solvers_sorting_by_name_and_version(faker: Faker): # SEE https://packaging.pypa.io/en/latest/version.html # have a solver - one_solver = Solver(**Solver.Config.schema_extra["example"]) + one_solver = Solver(**Solver.model_json_schema()["example"]) assert isinstance(one_solver.pep404_version, Version) major, minor, micro = one_solver.pep404_version.release @@ -33,14 +21,16 @@ def test_solvers_sorting_by_name_and_version(faker): # and a different version of the same # NOTE: that id=None so that it can be re-coputed - earlier_release = one_solver.copy( + earlier_release = one_solver.model_copy( update={"version": f"{one_solver.version}beta"}, deep=True ) assert earlier_release.pep404_version.is_prerelease assert earlier_release.pep404_version < one_solver.pep404_version # and yet a completely different solver - another_solver = one_solver.copy(update={"id": "simcore/services/comp/zSolve"}) + another_solver = one_solver.model_copy( + update={"id": "simcore/services/comp/zSolve"} + ) assert one_solver.id != another_solver.id assert one_solver.pep404_version == another_solver.pep404_version diff --git a/services/api-server/tests/unit/test_services_directorv2.py b/services/api-server/tests/unit/test_services_directorv2.py new file mode 100644 index 00000000000..e30187c1707 --- /dev/null +++ b/services/api-server/tests/unit/test_services_directorv2.py @@ -0,0 +1,71 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from fastapi import FastAPI, status +from httpx import AsyncClient +from models_library.projects import ProjectID +from models_library.users import UserID +from respx import MockRouter +from settings_library.director_v2 import DirectorV2Settings +from simcore_service_api_server.exceptions.backend_errors import JobNotFoundError +from simcore_service_api_server.services_http.director_v2 import DirectorV2Api + + +@pytest.fixture +def api() -> DirectorV2Api: + settings = DirectorV2Settings() + app = FastAPI() + + return DirectorV2Api.create_once( + app=app, + client=AsyncClient(base_url=settings.base_url), + service_name="director_v2", + ) + + +async def test_oec_139646582688800_missing_ctx_values_for_msg_template( + mocked_directorv2_rest_api_base: MockRouter, + project_id: ProjectID, + user_id: UserID, + api: DirectorV2Api, +): + # + # tests to reproduce reported OEC:139646582688800 + # + + # File "/home/scu/.venv/lib/python3.10/site-packages/simcore_service_api_server/services/director_v2.py", line 135, in get_computation + # response.raise_for_status() + # File "/home/scu/.venv/lib/python3.10/site-packages/httpx/_models.py", line 761, in raise_for_status + # raise HTTPStatusError(message, request=request, response=self) + # httpx.HTTPStatusError: Client error '404 Not Found' for url '/v2/computations/c7ad07d3-513f-4368-bcf0-354143b6a048?user_id=94' + + for method in ("GET", "POST", "DELETE"): + mocked_directorv2_rest_api_base.request( + method, + path__regex=r"/v2/computations/", + ).respond(status_code=status.HTTP_404_NOT_FOUND) + + # File "/home/scu/.venv/lib/python3.10/site-packages/simcore_service_api_server/exceptions/service_errors_utils.py", line 116, in service_exception_handler + # status_code, detail, headers = _get_http_exception_kwargs( + # File "/home/scu/.venv/lib/python3.10/site-packages/simcore_service_api_server/exceptions/service_errors_utils.py", line 66, in _get_http_exception_kwargs + # raise exception_type(**detail_kwargs) + # simcore_service_api_server.exceptions.backend_errors.JobNotFoundError: <-- !!!!!!!!! + # + # File "/home/scu/.venv/lib/python3.10/site-packages/simcore_service_api_server/exceptions/handlers/_handlers_backend_errors.py", line 12, in backend_error_handler + # return create_error_json_response(f"{exc}", status_code=exc.status_code) + # File "pydantic/errors.py", line 127, in pydantic.errors.PydanticErrorMixin.__str__ + # KeyError: 'project_id' + # + + with pytest.raises(JobNotFoundError, match=f"{project_id}"): + await api.get_computation(user_id=user_id, project_id=project_id) + + with pytest.raises(JobNotFoundError, match=f"{project_id}"): + await api.stop_computation(user_id=user_id, project_id=project_id) + + with pytest.raises(JobNotFoundError, match=f"{project_id}"): + await api.delete_computation(user_id=user_id, project_id=project_id) diff --git a/services/api-server/tests/unit/test_services_rabbitmq.py b/services/api-server/tests/unit/test_services_rabbitmq.py new file mode 100644 index 00000000000..c2e31b0a3d6 --- /dev/null +++ b/services/api-server/tests/unit/test_services_rabbitmq.py @@ -0,0 +1,505 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=R6301 + +import asyncio +import logging +import random +from collections.abc import AsyncIterable, Callable, Iterable +from contextlib import asynccontextmanager +from datetime import datetime, timedelta +from typing import Final, Literal, cast +from unittest.mock import AsyncMock +from uuid import UUID + +import httpx +import pytest +import respx +from attr import dataclass +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.rabbitmq_messages import LoggerRabbitMessage, RabbitMessageBase +from models_library.users import UserID +from pydantic import ValidationError +from pytest_mock import MockerFixture, MockFixture +from pytest_simcore.helpers.monkeypatch_envs import ( + EnvVarsDict, + delenvs_from_dict, + setenvs_from_dict, +) +from servicelib.rabbitmq import RabbitMQClient +from simcore_service_api_server.api.dependencies.rabbitmq import get_log_distributor +from simcore_service_api_server.core.health_checker import get_health_checker +from simcore_service_api_server.models.schemas.jobs import JobID, JobLog +from simcore_service_api_server.services_http.director_v2 import ( + ComputationTaskGet, + DirectorV2Api, +) +from simcore_service_api_server.services_http.log_streaming import ( + LogDistributor, + LogStreamer, + LogStreamerRegistrationConflictError, +) +from tenacity import AsyncRetrying, retry_if_not_exception_type, stop_after_delay + +pytest_simcore_core_services_selection = [ + "rabbit", +] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, +) -> EnvVarsDict: + # do not init other services + delenvs_from_dict(monkeypatch, ["API_SERVER_RABBITMQ"]) + return setenvs_from_dict( + monkeypatch, + { + **rabbit_env_vars_dict, + "API_SERVER_POSTGRES": "null", + "API_SERVER_HEALTH_CHECK_TASK_PERIOD_SECONDS": "3", + "API_SERVER_HEALTH_CHECK_TASK_TIMEOUT_SECONDS": "1", + }, + ) + + +@pytest.fixture +def mock_missing_plugins(app_environment: EnvVarsDict, mocker: MockerFixture): + mocker.patch("simcore_service_api_server.core.application.webserver.setup") + mocker.patch("simcore_service_api_server.core.application.storage.setup") + + +@pytest.fixture +async def log_distributor( + create_rabbitmq_client: Callable[[str], RabbitMQClient], +) -> AsyncIterable[LogDistributor]: + log_distributor = LogDistributor(create_rabbitmq_client("log_distributor_client")) + await log_distributor.setup() + yield log_distributor + await log_distributor.teardown() + + +async def test_subscribe_publish_receive_logs( + faker: Faker, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + log_distributor: LogDistributor, + mocker: MockerFixture, +): + @dataclass + class MockQueue: + called: bool = False + job_log: JobLog | None = None + + async def put(self, job_log: JobLog): + self.called = True + self.job_log = job_log + assert isinstance(job_log, JobLog) + + mock_queue = MockQueue() + await log_distributor.register(project_id, mock_queue) # type: ignore + + # log producer + rabbitmq_producer = create_rabbitmq_client("pytest_producer") + log_message = LoggerRabbitMessage( + user_id=user_id, + project_id=project_id, + node_id=node_id, + messages=[faker.text() for _ in range(10)], + ) + await rabbitmq_producer.publish(log_message.channel_name, log_message) + + # check it received + await asyncio.sleep(1) + await log_distributor.deregister(project_id) + + assert mock_queue.called + job_log = mock_queue.job_log + assert isinstance(job_log, JobLog) + assert job_log.job_id == log_message.project_id + + +@asynccontextmanager +async def _rabbit_consuming_context( + app: FastAPI, + project_id: ProjectID, +) -> AsyncIterable[AsyncMock]: + + queue = asyncio.Queue() + queue.put = AsyncMock() + log_distributor: LogDistributor = get_log_distributor(app) + await log_distributor.register(project_id, queue) + + yield queue.put + + await log_distributor.deregister(project_id) + + +@pytest.fixture +def produce_logs( + faker: Faker, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + user_id: UserID, +): + async def _go( + name, + project_id_=None, + node_id_=None, + messages_=None, + level_=None, + log_message: RabbitMessageBase | None = None, + ): + rabbitmq_producer = create_rabbitmq_client(f"pytest_producer_{name}") + if log_message is None: + log_message = LoggerRabbitMessage( + user_id=user_id, + project_id=project_id_ or cast(UUID, faker.uuid4(cast_to=None)), + node_id=node_id_, + messages=messages_ or [faker.text() for _ in range(10)], + log_level=level_ or logging.INFO, + ) + await rabbitmq_producer.publish(log_message.channel_name, log_message) + + return _go + + +async def test_multiple_producers_and_single_consumer( + client: httpx.AsyncClient, + app: FastAPI, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + produce_logs: Callable, +): + await produce_logs("lost", project_id) + + async with _rabbit_consuming_context(app, project_id) as consumer_message_handler: + # multiple producers + asyncio.gather( + *[ + produce_logs("expected", project_id, node_id, ["expected message"] * 3), + *(produce_logs(f"{n}") for n in range(5)), + ] + ) + await asyncio.sleep(1) + + # check it received + assert consumer_message_handler.await_count == 1 + (job_log,) = consumer_message_handler.call_args[0] + assert isinstance(job_log, JobLog) + + assert job_log.job_id == project_id + assert job_log.node_id == node_id + assert job_log.messages == ["expected message"] * 3 + + +# +# -------------------- +# + + +async def test_one_job_multiple_registrations( + log_distributor: LogDistributor, project_id: ProjectID +): + async def _(job_log: JobLog): + pass + + await log_distributor.register(project_id, _) + with pytest.raises(LogStreamerRegistrationConflictError): + await log_distributor.register(project_id, _) + await log_distributor.deregister(project_id) + + +async def test_log_distributor_register_deregister( + project_id: ProjectID, + node_id: NodeID, + log_distributor: LogDistributor, + produce_logs: Callable, + faker: Faker, +): + collected_logs: list[str] = [] + + class MockQueue: + async def put(self, job_log: JobLog): + for msg in job_log.messages: + collected_logs.append(msg) + + queue = MockQueue() + published_logs: list[str] = [] + + async def _log_publisher(): + for _ in range(5): + msg: str = faker.text() + await asyncio.sleep(0.1) + await produce_logs("expected", project_id, node_id, [msg], logging.DEBUG) + published_logs.append(msg) + + await log_distributor.register(project_id, queue) # type: ignore + publisher_task = asyncio.create_task(_log_publisher()) + await asyncio.sleep(0.1) + await log_distributor.deregister(project_id) + await asyncio.sleep(0.1) + await log_distributor.register(project_id, queue) # type: ignore + await asyncio.gather(publisher_task) + await asyncio.sleep(0.5) + await log_distributor.deregister(project_id) + + assert len(log_distributor._log_streamers.keys()) == 0 + assert len(collected_logs) > 0 + assert set(collected_logs).issubset( + set(published_logs) + ) # some logs might get lost while being deregistered + + +async def test_log_distributor_multiple_streams( + project_id: ProjectID, + node_id: NodeID, + log_distributor: LogDistributor, + produce_logs: Callable, + faker: Faker, +): + job_ids: Final[list[JobID]] = [JobID(faker.uuid4()) for _ in range(2)] + + collected_logs: dict[JobID, list[str]] = {id_: [] for id_ in job_ids} + + class MockQueue: + async def put(self, job_log: JobLog): + job_id = job_log.job_id + assert (msgs := collected_logs.get(job_id)) is not None + for msg in job_log.messages: + msgs.append(msg) + + queue = MockQueue() + published_logs: dict[JobID, list[str]] = {id_: [] for id_ in job_ids} + + async def _log_publisher(): + for _ in range(5): + msg: str = faker.text() + await asyncio.sleep(0.1) + job_id: JobID = random.choice(job_ids) + await produce_logs("expected", job_id, node_id, [msg], logging.DEBUG) + published_logs[job_id].append(msg) + + for job_id in job_ids: + await log_distributor.register(job_id, queue) # type: ignore + publisher_task = asyncio.create_task(_log_publisher()) + await asyncio.gather(publisher_task) + await asyncio.sleep(0.5) + for job_id in job_ids: + await log_distributor.deregister(job_id) + for key in collected_logs: + assert key in published_logs + assert collected_logs[key] == published_logs[key] + + +# +# -------------------- +# + + +@pytest.fixture +def computation_done() -> Iterable[Callable[[], bool]]: + stop_time: Final[datetime] = datetime.now() + timedelta(seconds=2) + + def _job_done() -> bool: + return datetime.now() >= stop_time + + return _job_done + + +@pytest.fixture +async def log_streamer_with_distributor( + client: httpx.AsyncClient, + app: FastAPI, + project_id: ProjectID, + user_id: UserID, + mocked_directorv2_rest_api_base: respx.MockRouter, + computation_done: Callable[[], bool], + log_distributor: LogDistributor, +) -> AsyncIterable[LogStreamer]: + def _get_computation(request: httpx.Request, **kwargs) -> httpx.Response: + task = ComputationTaskGet.model_validate( + ComputationTaskGet.model_json_schema()["examples"][0] + ) + if computation_done(): + task.state = RunningState.SUCCESS + task.stopped = datetime.now() + return httpx.Response( + status_code=status.HTTP_200_OK, json=jsonable_encoder(task) + ) + + mocked_directorv2_rest_api_base.get(f"/v2/computations/{project_id}").mock( + side_effect=_get_computation + ) + + assert isinstance(d2_client := DirectorV2Api.get_instance(app), DirectorV2Api) + yield LogStreamer( + user_id=user_id, + director2_api=d2_client, + job_id=project_id, + log_distributor=log_distributor, + log_check_timeout=1, + ) + + assert len(log_distributor._log_streamers.keys()) == 0 + + +async def test_log_streamer_with_distributor( + project_id: ProjectID, + node_id: NodeID, + produce_logs: Callable, + log_distributor: LogDistributor, + log_streamer_with_distributor: LogStreamer, + faker: Faker, + computation_done: Callable[[], bool], +): + published_logs: list[str] = [] + + async def _log_publisher(): + while not computation_done(): + msg: str = faker.text() + await produce_logs("expected", project_id, node_id, [msg], logging.DEBUG) + published_logs.append(msg) + + publish_task = asyncio.create_task(_log_publisher()) + + @asynccontextmanager + async def registered_log_streamer(): + await log_distributor.register(project_id, log_streamer_with_distributor.queue) + try: + yield + finally: + await log_distributor.deregister(project_id) + + collected_messages: list[str] = [] + async with registered_log_streamer(): + async for log in log_streamer_with_distributor.log_generator(): + job_log: JobLog = JobLog.model_validate_json(log) + assert len(job_log.messages) == 1 + assert job_log.job_id == project_id + collected_messages.append(job_log.messages[0]) + + if not publish_task.done(): + publish_task.cancel() + try: + await publish_task + except asyncio.CancelledError: + pass + + assert len(published_logs) > 0 + assert published_logs == collected_messages + + +async def test_log_streamer_not_raise_with_distributor( + user_id, + project_id: ProjectID, + node_id: NodeID, + produce_logs: Callable, + log_streamer_with_distributor: LogStreamer, + faker: Faker, + computation_done: Callable[[], bool], +): + class InvalidLoggerRabbitMessage(LoggerRabbitMessage): + channel_name: Literal["simcore.services.logs.v2"] = "simcore.services.logs.v2" + node_id: NodeID | None + messages: int + log_level: int = logging.INFO + + def routing_key(self) -> str: + return f"{self.project_id}.{self.log_level}" + + log_rabbit_message = InvalidLoggerRabbitMessage( + user_id=user_id, + project_id=project_id, + node_id=node_id, + messages=100, + log_level=logging.INFO, + ) + with pytest.raises(ValidationError): + LoggerRabbitMessage.model_validate(log_rabbit_message.model_dump()) + + await produce_logs("expected", log_message=log_rabbit_message) + + ii: int = 0 + async for log in log_streamer_with_distributor.log_generator(): + _ = JobLog.model_validate_json(log) + ii += 1 + assert ii == 0 + + +class _MockLogDistributor: + async def register(self, job_id: UUID, queue: asyncio.Queue): + return None + + async def deregister(self, job_id: None): + return None + + +async def test_log_generator(mocker: MockFixture, faker: Faker): + mocker.patch( + "simcore_service_api_server.services_http.log_streaming.LogStreamer._project_done", + return_value=True, + ) + log_streamer = LogStreamer(user_id=3, director2_api=None, job_id=None, log_distributor=_MockLogDistributor(), log_check_timeout=1) # type: ignore + + published_logs: list[str] = [] + for _ in range(10): + job_log = JobLog.model_validate(JobLog.model_json_schema()["example"]) + msg = faker.text() + published_logs.append(msg) + job_log.messages = [msg] + await log_streamer.queue.put(job_log) + + collected_logs: list[str] = [] + async for log in log_streamer.log_generator(): + job_log = JobLog.model_validate_json(log) + assert len(job_log.messages) == 1 + collected_logs.append(job_log.messages[0]) + + assert published_logs == collected_logs + + +@pytest.mark.parametrize("is_healthy", [True, False]) +async def test_logstreaming_health_checker( + mocker: MockFixture, client: httpx.AsyncClient, app: FastAPI, is_healthy: bool +): + health_checker = get_health_checker(app) + health_checker._timeout_seconds = 0.5 + health_checker._allowed_health_check_failures = 0 + put_method = health_checker._dummy_queue.put + + async def put_mock(log: JobLog): + put_mock.called = True + if is_healthy: + await put_method(log) + + put_mock.called = False + mocker.patch.object(health_checker._dummy_queue, "put", put_mock) + health_setter = mocker.spy(health_checker, "_increment_health_check_failure_count") + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(5), + retry=retry_if_not_exception_type(AssertionError), + ): + with attempt: + await asyncio.sleep(1) + assert put_mock.called + if is_healthy: + health_setter.assert_not_called() + else: + health_setter.assert_called() + + assert health_checker.healthy == is_healthy, "Health check failed" diff --git a/services/api-server/tests/unit/test_services_solver_job_models_converters.py b/services/api-server/tests/unit/test_services_solver_job_models_converters.py new file mode 100644 index 00000000000..5be97bdca2a --- /dev/null +++ b/services/api-server/tests/unit/test_services_solver_job_models_converters.py @@ -0,0 +1,253 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from faker import Faker +from models_library.projects import Project +from models_library.projects_nodes import InputsDict, InputTypes, SimCoreFileLink +from pydantic import HttpUrl, RootModel, TypeAdapter, create_model +from simcore_service_api_server.models.schemas.files import File +from simcore_service_api_server.models.schemas.jobs import ArgumentTypes, Job, JobInputs +from simcore_service_api_server.models.schemas.solvers import Solver +from simcore_service_api_server.services_http.solver_job_models_converters import ( + create_job_from_project, + create_job_inputs_from_node_inputs, + create_jobstatus_from_task, + create_new_project_for_job, + create_node_inputs_from_job_inputs, +) + + +def test_create_project_model_for_job(faker: Faker): + solver = Solver.model_validate( + { + "id": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "title": "sleeper", + "description": "A service which awaits for time to pass.", + "maintainer": "info@itis.swiss", + "url": "http://127.0.0.1:8006/v0/solvers/simcore/services/comp/itis/sleeper/releases/2.0.2", + } + ) + + inputs = JobInputs.model_validate( + { + "values": { + "input_3": False, # Fail after sleep ? + "input_2": 3, # sleep interval (secs) + "input_1": { + "id": "e2335f87-6cf9-3148-87d4-262901403621", + "filename": "file_with_number.txt", + "content_type": "text/plain", + "checksum": faker.sha256(), + }, + } + } + ) + + print(inputs.model_dump_json(indent=2)) + + job = Job.create_job_from_solver_or_program( + solver_or_program_name=solver.name, inputs=inputs + ) + + # body of create project! + createproject_body = create_new_project_for_job( + solver_or_program=solver, + job=job, + inputs=inputs, + description=None, + project_name=None, + ) + + # ensures one-to-one relation + assert createproject_body.uuid == job.id + assert createproject_body.name == job.name + + +def test_job_to_node_inputs_conversion(): + # TODO: add here service input schemas and cast correctly? + + # Two equivalent inputs + job_inputs = JobInputs( + values={ + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": True, + "some_list": [1, 2, "foo"], + "input_file": File( + filename="input.txt", + id="0a3b2c56-dbcd-4871-b93b-d454b7883f9f", + e_tag="859fda0cb82fc4acb4686510a172d9a9-1", + ), + } + ) + for value in job_inputs.values.values(): + assert TypeAdapter(ArgumentTypes).validate_python(value) == value + + node_inputs: InputsDict = { + "x": 4.33, + "n": 55, + "title": "Temperature", + "enabled": True, + "some_list": [1, 2, "foo"], + "input_file": SimCoreFileLink( + store=0, + path="api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", + eTag="859fda0cb82fc4acb4686510a172d9a9-1", + label="input.txt", + ), + } + + for value in node_inputs.values(): + assert TypeAdapter(InputTypes).validate_python(value) == value + + # test transformations in both directions + got_node_inputs = create_node_inputs_from_job_inputs(inputs=job_inputs) + got_job_inputs = create_job_inputs_from_node_inputs(inputs=node_inputs) + + NodeInputs = create_model("NodeInputs", __base__=RootModel[dict[str, InputTypes]]) + print(NodeInputs.model_validate(got_node_inputs).model_dump_json(indent=2)) + print(got_job_inputs.model_dump_json(indent=2)) + + assert got_job_inputs == job_inputs + assert got_node_inputs == node_inputs + + +def test_create_job_from_project(faker: Faker): + project = Project.model_validate( + { + "uuid": "f925e30f-19de-42dc-acab-3ce93ea0a0a7", + "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7", + "description": 'Study associated to solver job:\n{\n "id": "f925e30f-19de-42dc-acab-3ce93ea0a0a7",\n "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7",\n "inputs_checksum": "aac0bb28285d6e5918121630fa8c368130c6b05f80fd9622760078608fc44e96",\n "created_at": "2021-03-26T10:43:27.828975"\n}', + "thumbnail": "https://2xx2gy2ovf3r21jclkjio3x8-wpengine.netdna-ssl.com/wp-content/uploads/2018/12/API-Examples.jpg", + "prjOwner": "foo@itis.swiss", + "type": "STANDARD", + "templateType": None, + "creationDate": "2021-03-26T10:43:27.867Z", + "lastChangeDate": "2021-03-26T10:43:33.595Z", + "workbench": { + "e694de0b-2e91-5be7-9319-d89404170991": { + "key": "simcore/services/comp/itis/sleeper", + "version": "2.0.2", + "label": "sleeper", + "progress": 100, + "thumbnail": None, + "runHash": "1c4e09777dbf6fb1ab4bfb02f8e62c7b6fc07393d8c880d5762a86afeddb30b5", + "inputs": { + "input_3": 0, + "input_2": 3, + "input_1": { + "store": 0, + "path": "api/bfb821c0-a4ef-305e-a23b-4d79065f0078/file_with_number.txt", + "eTag": None, + "label": "file_with_number.txt", + }, + }, + "inputAccess": None, + "inputNodes": [], + "outputs": { + "output_1": { + "store": 0, + "path": "f925e30f-19de-42dc-acab-3ce93ea0a0a7/e694de0b-2e91-5be7-9319-d89404170991/single_number.txt", + "eTag": "6c22e9b968b205c0dd3614edd1b28d35-1", + }, + "output_2": 1, + }, + "outputNode": None, + "outputNodes": None, + "parent": None, + "position": None, + "state": { + "currentStatus": "SUCCESS", + "modified": False, + "dependencies": [], + }, + } + }, + "accessRights": {"2": {"read": True, "write": True, "delete": True}}, + "dev": {}, + "classifiers": [], + "ui": { + "slideshow": {}, + "workbench": { + "e694de0b-2e91-5be7-9319-d89404170991": { + "position": {"x": 633, "y": 229} + } + }, + "currentNodeId": "e694de0b-2e91-5be7-9319-d89404170991", + }, + "quality": {}, + "tags": [], + "state": { + "locked": { + "value": False, + "status": "CLOSED", + }, + "state": {"value": "SUCCESS"}, + }, + }, + ) + + expected_job = Job.model_validate( + { + "id": "f925e30f-19de-42dc-acab-3ce93ea0a0a7", + "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7", + "created_at": "2021-03-26T10:43:27.867Z", + "runner_name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2", + "inputs_checksum": "a887dfcac17f6e6045139ec00fbb3038a450111fdc8e10e94d66590f33b3f10e", + "url": None, + "runner_url": None, + "outputs_url": None, + } + ) + + solver_key = "simcore/services/comp/itis/sleeper" + solver_version = "2.0.2" + + def fake_url_for(*args, **kwargs) -> HttpUrl: + return HttpUrl(faker.url()) + + solver = Solver( + id=solver_key, + version=solver_version, + title=faker.text(max_nb_chars=20), + maintainer=faker.name(), + description=faker.text(max_nb_chars=100), + url=None, + ) + + job = create_job_from_project( + solver_or_program=solver, project=project, url_for=fake_url_for + ) + + assert job.id == project.uuid + + non_propagated_fields = { + name for name in job.model_fields if name.endswith("url") + }.union({"name"}) + assert all(getattr(job, _) for _ in non_propagated_fields) + + # this tends to be a problem + assert job.inputs_checksum == expected_job.inputs_checksum + assert job.model_dump(exclude=non_propagated_fields) == expected_job.model_dump( + exclude=non_propagated_fields + ) + + +@pytest.mark.skip(reason="TODO: next PR") +def test_create_jobstatus_from_task(): + from simcore_service_api_server.models.schemas.jobs import JobStatus + from simcore_service_api_server.services_http.director_v2 import ComputationTaskGet + + task = ComputationTaskGet.model_validate({}) # TODO: + job_status: JobStatus = create_jobstatus_from_task(task) + + assert job_status.job_id == task.id + + # TODO: activate + # #frozen = True + # #allow_mutation = False + # and remove take_snapshot by generating A NEW JobStatus! diff --git a/services/api-server/tests/unit/test_services_solver_job_outputs.py b/services/api-server/tests/unit/test_services_solver_job_outputs.py new file mode 100644 index 00000000000..01612e197e5 --- /dev/null +++ b/services/api-server/tests/unit/test_services_solver_job_outputs.py @@ -0,0 +1,33 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import types +from typing import Union, get_args, get_origin + +from simcore_service_api_server.models.schemas.jobs import ArgumentTypes, File +from simcore_service_api_server.services_http.solver_job_outputs import ( + BaseFileLink, + ResultsTypes, +) + + +def test_resultstypes_and_argument_type_sync(): + # I/O types returned by node-ports must be one-to-one mapped + # with those returned as output results + + # Python 3.10 and later treats unions with | as types.UnionType + assert get_origin(ArgumentTypes) in (types.UnionType, Union) + argument_types_args = set(get_args(ArgumentTypes)) + + assert get_origin(ResultsTypes) in (types.UnionType, Union) + results_types_args = set(get_args(ResultsTypes)) + + # files are in the inputs as File (or Raises KeyError if not) + argument_types_args.remove(File) + + # files are in the outputs as Links (or Raises KeyError if not) + results_types_args.remove(BaseFileLink) + + # identical except for File/BaseFileLink + assert argument_types_args == results_types_args diff --git a/services/api-server/tests/unit/test_utils_client_base.py b/services/api-server/tests/unit/test_utils_client_base.py index 75f527c6c62..9fe2da1a28c 100644 --- a/services/api-server/tests/unit/test_utils_client_base.py +++ b/services/api-server/tests/unit/test_utils_client_base.py @@ -14,8 +14,6 @@ setup_client_instance, ) -pytestmark = pytest.mark.asyncio - @pytest.fixture def the_service(): @@ -45,12 +43,12 @@ class TheClientApi(BaseServiceClientApi): service_name="the_service", health_check_path="/health", x=42, + tracing_settings=None, ) assert not TheClientApi.get_instance(app) # test startup/shutdown async with LifespanManager(app): - # check startup assert TheClientApi.get_instance(app) api_obj = TheClientApi.get_instance(app) diff --git a/services/api-server/tests/unit/test_utils_http_calls_capture.py b/services/api-server/tests/unit/test_utils_http_calls_capture.py new file mode 100644 index 00000000000..62e3ddac64e --- /dev/null +++ b/services/api-server/tests/unit/test_utils_http_calls_capture.py @@ -0,0 +1,140 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import re +from pathlib import Path + +import httpx +import jinja2 +import respx +from faker import Faker +from models_library.basic_regex import UUID_RE_BASE +from pydantic import HttpUrl +from pytest_simcore.helpers.httpx_calls_capture_models import HttpApiCallCaptureModel + + +async def test_capture_http_call(httpbin_base_url: HttpUrl): + # CAPTURE + async with httpx.AsyncClient() as client: + response: httpx.Response = await client.get(f"{httpbin_base_url}json") + print(response) + + _request: httpx.Request = response.request + assert response.request + + captured = HttpApiCallCaptureModel.create_from_response( + response, name="get_json", enhance_from_openapi_specs=False + ) + + print(captured.model_dump_json(indent=1)) + + # MOCK + with respx.mock( + base_url="http://test.it", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + respx_mock.request( + method=captured.method, + path=captured.path, + name=captured.name, + ).respond( + status_code=captured.status_code, + json=captured.response_body, + ) + + response: httpx.Response = await client.get("http://test.it/json") + + assert respx_mock[captured.name].called + assert response.json() == captured.response_body + assert response.status_code == captured.status_code + + +async def test_capture_http_dynamic_call(faker: Faker, httpbin_base_url: str): + # CAPTURE + async with httpx.AsyncClient() as client: + sample_uid = faker.uuid4() # used during test sampling + + response: httpx.Response = await client.post( + f"{httpbin_base_url}anything/{sample_uid}", + params={"n": 42}, + json={ + "resource_id": sample_uid, + "static": "constant", + }, + ) + print(response) + + _request: httpx.Request = response.request + assert response.request + + captured = HttpApiCallCaptureModel.create_from_response( + response, name="get_anything", enhance_from_openapi_specs=False + ) + + assert captured.query == "n=42" + + # pattern with named-group + pattern = rf"(?P{UUID_RE_BASE})" + found = re.search(pattern, captured.path) + assert found + assert found.groupdict() == {"resouce_uid": sample_uid} + + # subs_json = re.sub(f"{resource_uid}", pattern, captured.json()) + # new_capture = HttpApiCallCaptureModel.model_validate_json(subs_json) + + # MOCK + with respx.mock( + base_url="http://test.it", + assert_all_called=True, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + respx_mock.request( + method=captured.method, + path__regex=re.sub( + f"{sample_uid}", pattern, captured.path + ), # using REGEX + name=captured.name, + ).respond( + status_code=captured.status_code, + json=captured.response_body, + ) + + other_uid = faker.uuid4() + + response = await client.post( + f"http://test.it/anything/{other_uid}", + params={"n": 42}, + json={ + "resource_id": other_uid, + "static": "constant", + }, + ) + + assert respx_mock[captured.name].called + assert response.json() == captured.response_body + assert response.status_code == captured.status_code + + +def test_template_capture(project_tests_dir: Path, faker: Faker): + # parse request and search parameters + url_path = f"/v0/projects/{faker.uuid4()}" + pattern = re.compile(rf"/projects/(?P{UUID_RE_BASE})$") + found = pattern.search(url_path) + assert found + context = found.groupdict() + + # get paramters from capture + environment = jinja2.Environment( + loader=jinja2.FileSystemLoader(project_tests_dir / "mocks"), autoescape=True + ) + template = environment.get_template("delete_project_not_found.json") + + # loads parametrized capture + # replace in response and solve + capture = HttpApiCallCaptureModel.model_validate_json(template.render(context)) + print(capture.model_dump_json(indent=1)) + assert capture.path == url_path diff --git a/services/api-server/tests/unit/test_utils_solver_job_models_converters.py b/services/api-server/tests/unit/test_utils_solver_job_models_converters.py deleted file mode 100644 index 3f2c4194413..00000000000 --- a/services/api-server/tests/unit/test_utils_solver_job_models_converters.py +++ /dev/null @@ -1,225 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import pytest -from models_library.projects import Project -from models_library.projects_nodes import InputsDict, InputTypes, SimCoreFileLink -from pydantic import create_model -from simcore_service_api_server.models.schemas.files import File -from simcore_service_api_server.models.schemas.jobs import ArgumentType, Job, JobInputs -from simcore_service_api_server.models.schemas.solvers import Solver -from simcore_service_api_server.utils.solver_job_models_converters import ( - create_job_from_project, - create_job_inputs_from_node_inputs, - create_jobstatus_from_task, - create_new_project_for_job, - create_node_inputs_from_job_inputs, - get_types, -) - - -def test_create_project_model_for_job(): - solver = Solver.parse_obj( - { - "id": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "title": "sleeper", - "description": "A service which awaits for time to pass.", - "maintainer": "info@itis.swiss", - "url": "http://127.0.0.1:8006/v0/solvers/simcore/services/comp/itis/sleeper/releases/2.0.2", - } - ) - - inputs = JobInputs.parse_obj( - { - "values": { - "input_3": False, # Fail after sleep ? - "input_2": 3, # sleep interval (secs) - "input_1": { - "id": "e2335f87-6cf9-3148-87d4-262901403621", - "filename": "file_with_number.txt", - "content_type": "text/plain", - "checksum": "9fdfbdb9686b3391bbea7c9e74aba49e-1", - }, - } - } - ) - - print(inputs.json(indent=2)) - - job = Job.create_solver_job(solver=solver, inputs=inputs) - - # body of create project! - createproject_body = create_new_project_for_job(solver, job, inputs) - - # ensures one-to-one relation - assert createproject_body.uuid == job.id - assert createproject_body.name == job.name - - -def test_job_to_node_inputs_conversion(): - # TODO: add here service input schemas and cast correctly? - - # Two equivalent inputs - job_inputs = JobInputs( - values={ - "x": 4.33, - "n": 55, - "title": "Temperature", - "enabled": True, - "input_file": File( - filename="input.txt", - id="0a3b2c56-dbcd-4871-b93b-d454b7883f9f", - checksum="859fda0cb82fc4acb4686510a172d9a9-1", - ), - } - ) - for name, value in job_inputs.values.items(): - assert isinstance(value, get_types(ArgumentType)), f"Invalid type in {name}" - - node_inputs: InputsDict = { - "x": 4.33, - "n": 55, - "title": "Temperature", - "enabled": True, - "input_file": SimCoreFileLink( - store=0, - path="api/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", - eTag="859fda0cb82fc4acb4686510a172d9a9-1", - label="input.txt", - ), - } - - for name, value in node_inputs.items(): - # TODO: py3.8 use typings.get_args - assert isinstance(value, get_types(InputTypes)), f"Invalid type in {name}" - - # test transformations in both directions - got_node_inputs = create_node_inputs_from_job_inputs(inputs=job_inputs) - got_job_inputs = create_job_inputs_from_node_inputs(inputs=node_inputs) - - NodeInputs = create_model("NodeInputs", __root__=(dict[str, InputTypes], ...)) - print(NodeInputs.parse_obj(got_node_inputs).json(indent=2)) - print(got_job_inputs.json(indent=2)) - - assert got_job_inputs == job_inputs - assert got_node_inputs == node_inputs - - -def test_create_job_from_project(): - - project = Project.parse_obj( - { - "uuid": "f925e30f-19de-42dc-acab-3ce93ea0a0a7", - "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7", - "description": 'Study associated to solver job:\n{\n "id": "f925e30f-19de-42dc-acab-3ce93ea0a0a7",\n "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7",\n "inputs_checksum": "aac0bb28285d6e5918121630fa8c368130c6b05f80fd9622760078608fc44e96",\n "created_at": "2021-03-26T10:43:27.828975"\n}', - "thumbnail": "https://2xx2gy2ovf3r21jclkjio3x8-wpengine.netdna-ssl.com/wp-content/uploads/2018/12/API-Examples.jpg", - "prjOwner": "foo@itis.swiss", - "creationDate": "2021-03-26T10:43:27.867Z", - "lastChangeDate": "2021-03-26T10:43:33.595Z", - "workbench": { - "e694de0b-2e91-5be7-9319-d89404170991": { - "key": "simcore/services/comp/itis/sleeper", - "version": "2.0.2", - "label": "sleeper", - "progress": 100, - "thumbnail": None, - "runHash": "1c4e09777dbf6fb1ab4bfb02f8e62c7b6fc07393d8c880d5762a86afeddb30b5", - "inputs": { - "input_3": 0, - "input_2": 3, - "input_1": { - "store": 0, - "path": "api/bfb821c0-a4ef-305e-a23b-4d79065f0078/file_with_number.txt", - "eTag": None, - "label": "file_with_number.txt", - }, - }, - "inputAccess": None, - "inputNodes": [], - "outputs": { - "output_1": { - "store": 0, - "path": "f925e30f-19de-42dc-acab-3ce93ea0a0a7/e694de0b-2e91-5be7-9319-d89404170991/single_number.txt", - "eTag": "6c22e9b968b205c0dd3614edd1b28d35-1", - }, - "output_2": 1, - }, - "outputNode": None, - "outputNodes": None, - "parent": None, - "position": None, - "state": { - "currentStatus": "SUCCESS", - "modified": False, - "dependencies": [], - }, - } - }, - "accessRights": {"2": {"read": True, "write": True, "delete": True}}, - "dev": {}, - "classifiers": [], - "ui": { - "slideshow": {}, - "workbench": { - "e694de0b-2e91-5be7-9319-d89404170991": { - "position": {"x": 633, "y": 229} - } - }, - "currentNodeId": "e694de0b-2e91-5be7-9319-d89404170991", - }, - "quality": {}, - "tags": [], - "state": { - "locked": { - "value": False, - "status": "CLOSED", - }, - "state": {"value": "SUCCESS"}, - }, - }, - ) - - expected_job = Job.parse_obj( - { - "id": "f925e30f-19de-42dc-acab-3ce93ea0a0a7", - "name": "simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/2.0.2/jobs/f925e30f-19de-42dc-acab-3ce93ea0a0a7", - "created_at": "2021-03-26T10:43:27.867Z", - "runner_name": "solvers/simcore%2Fservices%2Fcomp%2Fitis%2Fsleeper/releases/2.0.2", - "inputs_checksum": "8f57551eb8c0798a7986b63face0eef8fed8da79dd66f871a73c27e64cd01c5f", - "url": None, - "runner_url": None, - "outputs_url": None, - } - ) - - solver_key = "simcore/services/comp/itis/sleeper" - solver_version = "2.0.2" - - job = create_job_from_project(solver_key, solver_version, project) - - assert job.id == project.uuid - assert job.name == project.name - assert not any(getattr(job, f) for f in job.__fields__ if f.startswith("url")) - - assert ( - job.inputs_checksum == expected_job.inputs_checksum - ) # this tends to be a problem - assert job == expected_job - - -@pytest.mark.skip(reason="TODO: next PR") -def test_create_jobstatus_from_task(): - from simcore_service_api_server.models.schemas.jobs import JobStatus - from simcore_service_api_server.modules.director_v2 import ComputationTaskGet - - task = ComputationTaskGet.parse_obj({}) # TODO: - job_status: JobStatus = create_jobstatus_from_task(task) - - assert job_status.job_id == task.id - - # TODO: activate - # #frozen = True - # #allow_mutation = False - # and remove take_snapshot by generating A NEW JobStatus! diff --git a/services/api-server/tests/unit/test_utils_solver_job_outputs.py b/services/api-server/tests/unit/test_utils_solver_job_outputs.py deleted file mode 100644 index be065fdf2cc..00000000000 --- a/services/api-server/tests/unit/test_utils_solver_job_outputs.py +++ /dev/null @@ -1,26 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from simcore_service_api_server.models.schemas.jobs import ArgumentType, File -from simcore_service_api_server.utils.solver_job_outputs import ( - BaseFileLink, - ResultsTypes, -) -from simcore_service_api_server.utils.typing_extra import get_types - - -def test_result_type_mapped(): - # I/O types returned by node-ports must be one-to-one mapped - # with those returned as output results - - api_arg_types = list(get_types(ArgumentType)) - output_arg_types = list(get_types(ResultsTypes)) - - assert File in api_arg_types - assert BaseFileLink in output_arg_types - - api_arg_types.remove(File) - output_arg_types.remove(BaseFileLink) - - assert set(api_arg_types) == set(output_arg_types) diff --git a/services/api-server/tests/utils/docker-compose.yml b/services/api-server/tests/utils/docker-compose.yml deleted file mode 100644 index e9472daa50c..00000000000 --- a/services/api-server/tests/utils/docker-compose.yml +++ /dev/null @@ -1,31 +0,0 @@ -version: "3.8" -services: - postgres: - image: postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce - environment: - - POSTGRES_USER=${POSTGRES_USER:-test} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-test} - - POSTGRES_DB=${POSTGRES_DB:-test} - - POSTGRES_HOST=${POSTGRES_HOST:-127.0.0.1} - - POSTGRES_PORT=${POSTGRES_PORT:-5432} - ports: - - "5432:5432" - # https://www.postgresql.org/docs/10/runtime-config-logging.html#GUC-LOG-STATEMENT - command: - [ - "postgres", - "-c", - "log_connections=true", - "-c", - "log_disconnections=true", - "-c", - "log_duration=true", - "-c", - "log_line_prefix=[%p] [%a] [%c] [%x] " - ] - adminer: - image: adminer - ports: - - 18080:8080 - depends_on: - - postgres diff --git a/services/api-server/tests/utils/init-pg.py b/services/api-server/tests/utils/init-pg.py deleted file mode 100644 index adfcd242add..00000000000 --- a/services/api-server/tests/utils/init-pg.py +++ /dev/null @@ -1,108 +0,0 @@ -# pylint: disable=no-value-for-parameter - -import asyncio -import os -from uuid import uuid4 - -import aiopg.sa -import faker -import simcore_postgres_database.cli as pg_cli -import simcore_service_api_server.db.tables as pg -import sqlalchemy as sa -import yaml - -DSN_FORMAT = "postgresql://{user}:{password}@{host}:{port}/{database}" - -default_db_settings = dict( - user=os.environ.get("POSTGRES_USER", "test"), - password=os.environ.get("POSTGRES_PASSWORD", "test"), - host=os.environ.get("POSTGRES_HOST", "127.0.0.1"), - port=os.environ.get("POSTGRES_PORT", 5432), - database=os.environ.get("POSTGRES_DB", 5432), -) -default_dsn = DSN_FORMAT.format(**default_db_settings) - -fake = faker.Faker() - - -def load_db_config() -> dict: - # TODO: - with open("docker-compose-resolved.yaml") as fh: - config = yaml.safe_load(fh) - environ = config["services"]["postgres"]["environment"] - - return dict( - user=environ["POSTGRES_USER"], - password=environ["POSTGRES_PASSWORD"], - host="127.0.0.1", - port=5432, - database=environ["POSTGRES_DB"], - ) - - -def init_tables(dsn: str = default_dsn): - engine = sa.create_engine(dsn) - meta = pg.metadata - meta.drop_all(engine) - # meta.create_all(engine, tables=[pg.api_keys, pg.users]) - - -def random_user(**overrides): - data = dict( - name=fake.name(), - email=fake.email(), - password_hash=fake.numerify(text="#" * 5), - status=pg.UserStatus.ACTIVE, - created_ip=fake.ipv4(), - ) - data.update(overrides) - return data - - -def random_api_key(**overrides): - data = dict( - user_id=1, - display_name=fake.word(), - api_key=uuid4(), - api_secret=uuid4(), - ) - data.update(overrides) - return data - - -async def fill_tables(dsn: str = default_dsn): - async with aiopg.sa.create_engine(dsn) as engine: - async with engine.acquire() as conn: - uid: int = await conn.scalar( - pg.users.insert().values(**random_user(name="me", email="me@bar.foo")) - ) - - await conn.scalar( - pg.api_keys.insert().values( - **random_api_key( - display_name="test key", - user_id=uid, - api_key="key", - api_secret="secret", - ) - ) - ) - - -async def main(): - - # discover - settings = pg_cli.discover.callback(**default_db_settings) - dsn: str = DSN_FORMAT.format(**settings) - - # upgrade - pg_cli.upgrade.callback("head") - - # FIXME: if already there, it will fail - await fill_tables(dsn) - - -if __name__ == "__main__": - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) - loop.stop() diff --git a/services/api-server/tools/gen_api.py b/services/api-server/tools/gen_api.py deleted file mode 100644 index 00dbac70c92..00000000000 --- a/services/api-server/tools/gen_api.py +++ /dev/null @@ -1,93 +0,0 @@ -import sys -from pathlib import Path -from typing import Set - -import attr -import black -from change_case import ChangeCase -from jinja2 import Environment, FileSystemLoader - -# directories -current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent - -package_dir = (current_dir / ".." / "src" / "simcore_service_api_server").resolve() - - -# formatter -black_mode = black.FileMode() - - -def render_to_file(filepath, template, content): - with open(filepath, "wt") as fh: - code = template.render(**content) - formatted_code = black.format_str(code, mode=black_mode) - fh.write(formatted_code) - return filepath - - -def as_class_name(name): - cc = ChangeCase.snake_to_camel(name) - cc = cc[0].upper() + cc[1:] - return cc - - -# templates -template_env = Environment( - autoescape=True, loader=FileSystemLoader(current_dir / "templates") -) -template_env.globals.update({"len": len, "cls_name": as_class_name}) - -template_std_endpoints = template_env.get_template( - "resource_standard_methods.py.jinja2" -) -template_custom_endpoints = template_env.get_template( - "resource_custom_methods.py.jinja2" -) -template_cruds = template_env.get_template("cruds.py.jinja2") - -template_orm = template_env.get_template("orm.py.jinja2") - - -@attr.s(auto_attribs=True) -class Generator: - generated: Set[Path] = set() - - def _render_to_file(self, filepath, template, content): - filepath = render_to_file(filepath, template, content) - self.generated.add(filepath) - - def create_resource(self, resource_name): - rn = resource_name.lower() - - content = {"rn": rn, "rnp": rn + "s", "rncc": as_class_name(resource_name)} - - self._render_to_file( - package_dir / "endpoints" / f"{rn}_std.py", - template_std_endpoints, - content, - ) - - self._render_to_file( - package_dir / "store" / f"crud_{rn}.py", - template_cruds, - content, - ) - - self._render_to_file( - package_dir / "orm" / f"orm_{rn}.py", - template_orm, - content, - ) - - def dump(self): - pass - - def clean(self): - while self.generated: - fp: Path = self.generated.pop() - fp.unlink() - - -if __name__ == "__main__": - g = Generator() - g.create_resource("nice_resource") diff --git a/services/api-server/tools/templates/cruds.py.jinja2 b/services/api-server/tools/templates/cruds.py.jinja2 deleted file mode 100644 index 7ac99a61437..00000000000 --- a/services/api-server/tools/templates/cruds.py.jinja2 +++ /dev/null @@ -1,61 +0,0 @@ -import json -from typing import List, Optional - -import sqlalchemy as sa - -from .. import db -from ..orm import orm_{{ rnp }} as orm -from ..schemas import schemas_{{ rnp }} as schemas - - -async def list_{{ rnp }}(conn: db.SAConnection) -> List[schemas.DAGAtDB]: - {{ rnp }} = [] - async for row in conn.execute(orm.{{ rnp }}.select()): - if row: - {{ rnp }}.append(schemas.DAGAtDB(**row)) - return {{ rnp }} - - -async def get_{{ rn }}(conn: db.SAConnection, {{ rn }}_id: int) -> Optional[schemas.DAGAtDB]: - stmt = orm.{{ rnp }}.select().where(orm.{{ rnp }}.c.id == {{ rn }}_id) - row: db.RowProxy = await (await conn.execute(stmt)).first() - if row: - return schemas.DAGAtDB(**row) - return None - - -async def create_{{ rn }}(conn: db.SAConnection, {{ rn }}: schemas.DAGIn): - stmt = orm.{{ rnp }}.insert().values( - workbench=json.dumps({{ rn }}.dict()["workbench"]), **{{ rn }}.dict(exclude={"workbench"}) - ) - new_id: int = await (await conn.execute(stmt)).scalar() - return new_id - - -async def replace_{{ rn }}(conn: db.SAConnection, {{ rn }}_id: int, {{ rn }}: schemas.DAGIn): - stmt = ( - orm.{{ rnp }}.update() - .values( - workbench=json.dumps({{ rn }}.dict()["workbench"]), - **{{ rn }}.dict(exclude={"workbench"}) - ) - .where(orm.{{ rnp }}.c.id == {{ rn }}_id) - ) - await conn.execute(stmt) - - -async def update_{{ rn }}(conn: db.SAConnection, {{ rn }}_id: int, {{ rn }}: schemas.DAGIn): - patch = {{ rn }}.dict(exclude_unset=True, exclude={"workbench"}) - if "workbench" in {{ rn }}.__fields_set__: - patch["workbench"] = json.dumps(patch["workbench"]) - - stmt = sa.update(orm.{{ rnp }}).values(**patch).where(orm.{{ rnp }}.c.id == {{ rn }}_id) - res = await conn.execute(stmt) - - # TODO: dev asserts - assert res.returns_rows == False # nosec - - -async def delete_{{ rn }}(conn: db.SAConnection, {{ rn }}_id: int): - stmt = sa.delete(orm.{{ rnp }}).where(orm.{{ rnp }}.c.id == {{ rn }}_id) - await conn.execute(stmt) diff --git a/services/api-server/tools/templates/orm.py.jinja2 b/services/api-server/tools/templates/orm.py.jinja2 deleted file mode 100644 index 02bbd499e4c..00000000000 --- a/services/api-server/tools/templates/orm.py.jinja2 +++ /dev/null @@ -1,26 +0,0 @@ -# TODO: sa.ORMs so that we can convert with pydantic and use __table__ with aiopg - -import sqlalchemy as sa -from sqlalchemy import Column, Integer, String - -from simcore_postgres_database.base import Base - - -class {{ cls_name(rn) }}(Base): - """ Table with - - Managed by the catalog's service - """ - - __tablename__ = "{{ rnp }}" - - id = Column(Integer, primary_key=True, index=True) - key = Column(String, index=True) - version = Column(String) - name = Column(String, nullable=False) - description = Column(String, nullable=True) - contact = Column(String, index=True) - data = Column(sa.JSON, nullable=False) - - -{{ rnp }} = {{ rncc }}.__table__ diff --git a/services/api-server/tools/templates/resource_custom_methods.py.jinja2 b/services/api-server/tools/templates/resource_custom_methods.py.jinja2 deleted file mode 100644 index 4542ce6d6f9..00000000000 --- a/services/api-server/tools/templates/resource_custom_methods.py.jinja2 +++ /dev/null @@ -1,26 +0,0 @@ -{# STANDARD METHODS: https://cloud.google.com/apis/design/custom_methods #} - -from loguru import logger - -from fastapi import APIRouter, HTTPException -from starlette import status - - -router = APIRouter() - - -@router.get("/dags:batchGet") -async def batch_get_dags(): - raise HTTPException( - status_code=status.HTTP_501_NOT_IMPLEMENTED, detail="Still not implemented" - ) - - -@router.get("/dags:search") -async def search_dags(): - # A method that takes multiple resource IDs and returns an object for each of those IDs - # Alternative to List for fetching data that does not adhere to List semantics, such as services.search. - # https://cloud.google.com/apis/design/standard_methods#list - raise HTTPException( - status_code=status.HTTP_501_NOT_IMPLEMENTED, detail="Still not implemented" - ) diff --git a/services/api-server/tools/templates/resource_standard_methods.py.jinja2 b/services/api-server/tools/templates/resource_standard_methods.py.jinja2 deleted file mode 100644 index 0553fda89f4..00000000000 --- a/services/api-server/tools/templates/resource_standard_methods.py.jinja2 +++ /dev/null @@ -1,104 +0,0 @@ -from loguru import logger -from typing import List, Optional - -from fastapi import APIRouter, Body, Depends, HTTPException, Query -from starlette import status - -from .. import db -from ..store import crud_{{ rnp }} as crud -from ..schemas import schemas_{{ rnp }} as schemas - -router = APIRouter() - -{# STANDARD METHODS: https://cloud.google.com/apis/design/standard_methods #} - - -@router.get("/{{ rnp }}", response_model=List[schemas.DAGOut]) -async def list_{{ rnp }}( - page_token: Optional[str] = Query( - None, description="Requests a specific page of the list results" - ), - page_size: int = Query( - 0, ge=0, description="Maximum number of results to be returned" - ), - order_by: Optional[str] = Query( - None, description="Sorts in ascending order comma-separated fields" - ), - conn: db.SAConnection = Depends(db.get_cnx), -): - # List is suited to data from a single collection that is bounded in size and not cached - - # Applicable common patterns - # SEE pagination: https://cloud.google.com/apis/design/design_patterns#list_pagination - # SEE sorting https://cloud.google.com/apis/design/design_patterns#sorting_order - - # Applicable naming conventions - # TODO: filter: https://cloud.google.com/apis/design/naming_convention#list_filter_field - # SEE response: https://cloud.google.com/apis/design/naming_convention#list_response - logger.debug("%s %s %s", page_token, page_size, order_by) - {{ rnp }} = await crud.list_{{ rnp }}(conn) - return {{ rnp }} - - -@router.get("/{{ rnp }}/{{'{'}}{{ rn }}_id{{'}'}}", response_model=schemas.DAGOut) -async def get_{{ rn }}({{ rn }}_id: int, conn: db.SAConnection = Depends(db.get_cnx)): - {{ rn }} = await crud.get_{{ rn }}(conn, {{ rn }}_id) - return {{ rn }} - - -@router.post( - "/{{ rnp }}", - response_model=int, - status_code=status.HTTP_201_CREATED, - response_description="Successfully created {{ rn }}", -) -async def create_{{ rn }}( - {{ rn }}: schemas.DAGIn = Body(...), conn: db.SAConnection = Depends(db.get_cnx) -): - assert {{ rn }} # nosec - - if {{ rn }}.version == "0.0.0" and {{ rn }}.key == "foo": - # client-assigned resouce name - raise HTTPException( - status_code=status.HTTP_409_CONFLICT, - detail=f"{{ rn }} version already exists", - ) - - # FIXME: conversion DAG (issue with workbench being json in orm and dict in schema) - {{ rn }}_id = await crud.create_{{ rn }}(conn, {{ rn }}) - # TODO: no need to return since there is not extra info?, perhaps return - return {{ rn }}_id - - -@router.patch("/{{ rnp }}/{{'{'}}{{ rn }}_id{{'}'}}", response_model=schemas.DAGOut) -async def udpate_{{ rn }}( - {{ rn }}_id: int, - {{ rn }}: schemas.DAGIn = Body(None), - conn: db.SAConnection = Depends(db.get_cnx), -): - async with conn.begin(): - await crud.update_{{ rn }}(conn, {{ rn }}_id, {{ rn }}) - updated_{{ rn }} = await crud.get_{{ rn }}(conn, {{ rn }}_id) - - return updated_{{ rn }} - - -@router.put("/{{ rnp }}/{{'{'}}{{ rn }}_id{{'}'}}", response_model=Optional[schemas.DAGOut]) -async def replace_{{ rn }}( - {{ rn }}_id: int, - {{ rn }}: schemas.DAGIn = Body(...), - conn: db.SAConnection = Depends(db.get_cnx), -): - await crud.replace_{{ rn }}(conn, {{ rn }}_id, {{ rn }}) - - -@router.delete( - "/{{ rnp }}/{{'{'}}{{ rn }}_id{{'}'}}", - status_code=status.HTTP_204_NO_CONTENT, - response_description="Successfully deleted", -) -async def delete_{{ rn }}({{ rn }}_id: int, conn: db.SAConnection = Depends(db.get_cnx)): - # If the Delete method immediately removes the resource, it should return an empty response. - # If the Delete method initiates a long-running operation, it should return the long-running operation. - # If the Delete method only marks the resource as being deleted, it should return the updated resource. - await crud.delete_{{ rn }}(conn, {{ rn }}_id) diff --git a/services/api-server/tools/templates/test_endpoints.py.jinja2 b/services/api-server/tools/templates/test_endpoints.py.jinja2 deleted file mode 100644 index 3e5027f2582..00000000000 --- a/services/api-server/tools/templates/test_endpoints.py.jinja2 +++ /dev/null @@ -1,61 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -from typing import List - -import pytest -from starlette.testclient import TestClient -from starlette import status - - -# TODO: app is init globally ... which is bad! -from simcore_service_api_server.core.application import api_version, app, api_vtag - - -@pytest.fixture -def client(environ_context, postgres_service): - # TODO: create new web-app everyt - with TestClient(app) as cli: - yield cli - - -def test_list_{{ rnp }}(client): - response = client.get(f"/{api_vtag}/{{ rnp }}") - assert response.status_code == status.HTTP_200_OK - assert response.json() == [] - - # inject three dagin - response = client.get(f"/{api_vtag}/{{ rnp }}") - assert response.status_code == status.HTTP_200_OK - # TODO: assert i can list them as dagouts - - # TODO: assert dagout have identifiers now - - -def test_standard_operations_on_resource(client, fake_data_dag_in): - - response = client.post(f"/{api_vtag}/{{ rnp }}", json=fake_data_dag_in) - assert response.status_code == status.HTTP_201_CREATED - assert response.json() == 1 - - # list - response = client.get(f"/{api_vtag}/{{ rnp }}") - assert response.status_code == status.HTTP_200_OK - got = response.json() - - assert isinstance(got, list) - assert len(got) == 1 - - # TODO: data_in is not the same as data_out?? - data_out = got[0] - assert data_out["id"] == 1 # extra key, once in db - - # get - response = client.get(f"/{api_vtag}/{{ rnp }}/1") - assert response.status_code == status.HTTP_200_OK - assert response.json() == data_out - - # delete - response = client.delete(f"/{api_vtag}/{{ rnp }}/1") - assert response.status_code == status.HTTP_204_NO_CONTENT diff --git a/services/autoscaling/.cookiecutterrc b/services/autoscaling/.cookiecutterrc deleted file mode 100644 index 5ce0be7364d..00000000000 --- a/services/autoscaling/.cookiecutterrc +++ /dev/null @@ -1,20 +0,0 @@ -# This file exists so you can easily regenerate your project. -# -# cookiecutter --overwrite-if-exists --config-file=.cookiecutterrc /home/crespo/devp/cookiecutter-simcore-py-fastapi -# - -default_context: - - _extensions: ['jinja2_time.TimeExtension'] - _output_dir: '/home/crespo/devp/cookiecutter-simcore-py-fastapi/.output/osparc-simcore/services' - _template: '/home/crespo/devp/cookiecutter-simcore-py-fastapi' - detailed_doc: 'n' - distribution_name: 'simcore-service-autoscaling' - full_name: 'Pedro Crespo-Valero' - github_username: 'pcrespov' - package_name: 'simcore_service_autoscaling' - project_name: 'Auto scaling service' - project_short_description: 'Service to auto-scale swarm' - project_slug: 'autoscaling' - version: '0.1.0-alpha' - year: '2022' diff --git a/services/autoscaling/Dockerfile b/services/autoscaling/Dockerfile index 8955508dcd3..6a2ed483e43 100644 --- a/services/autoscaling/Dockerfile +++ b/services/autoscaling/Dockerfile @@ -1,10 +1,22 @@ # syntax=docker/dockerfile:1 -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: -# cd sercices/autoscaling +# cd services/autoscaling # docker build -f Dockerfile -t autoscaling:prod --target production ../../ # docker run autoscaling:prod # @@ -12,8 +24,13 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=sanderegg -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +# NOTE: to list the latest version run `make` inside `scripts/apt-packages-versions` +ENV DOCKER_APT_VERSION="5:26.1.4-1~debian.12~bookworm" + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ set -eux; \ apt-get update; \ apt-get install -y --no-install-recommends \ @@ -29,15 +46,12 @@ RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=lock && apt-get update \ && apt-get install -y --no-install-recommends \ # only the cli is needed and we remove the unnecessary stuff again - docker-ce-cli \ + docker-ce-cli=${DOCKER_APT_VERSION} \ && apt-get remove -y\ - ca-certificates \ - curl \ gnupg \ + curl \ lsb-release \ - && apt-get autoclean -y\ - && apt-get autoremove -y\ - && rm -rf /var/lib/apt/lists/* \ + && apt-get clean -y\ # verify that the binary works && gosu nobody true @@ -75,59 +89,52 @@ EXPOSE 3000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ set -eux \ && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" + + -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip --no-cache-dir install --upgrade \ - pip~=23.0 \ +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build -# install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/autoscaling/requirements/_base.txt . -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip --no-cache-dir install \ - --requirement _base.txt - - # --------------------------Prod-depends-only stage ------------------- # This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) # # + /build # + services/autoscaling [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/autoscaling /build/services/autoscaling +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/autoscaling -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip --no-cache-dir install \ - --requirement requirements/prod.txt \ - && pip --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/autoscaling,target=/build/services/autoscaling,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -137,14 +144,18 @@ RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ # + /home/scu $HOME = WORKDIR # + services/autoscaling [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -154,9 +165,12 @@ COPY --chown=scu:scu services/autoscaling/docker services/autoscaling/docker RUN chmod +x services/autoscaling/docker/*.sh -HEALTHCHECK --interval=10s \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ --timeout=5s \ - --start-period=5s \ + --start-period=20s \ + --start-interval=1s \ --retries=5 \ CMD ["python3", "services/autoscaling/docker/healthcheck.py", "http://localhost:8000/"] @@ -172,7 +186,7 @@ CMD ["/bin/sh", "services/autoscaling/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_DEVEL_MOUNT=/devel/services/autoscaling diff --git a/services/autoscaling/README.md b/services/autoscaling/README.md index 70785f9659f..cd2abf6bcb8 100644 --- a/services/autoscaling/README.md +++ b/services/autoscaling/README.md @@ -1,16 +1,13 @@ # autoscaling -[![image-size]](https://microbadger.com/images/itisfoundation/autoscaling. "More on itisfoundation/autoscaling.:staging-latest image") +Service to auto-scale swarm for both dynamic and computational services -[![image-badge]](https://microbadger.com/images/itisfoundation/autoscaling "More on Auto scaling service image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/autoscaling "More on Auto scaling service image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/autoscaling "More on Auto scaling service image in registry") -Service to auto-scale swarm +## development - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/autoscaling./staging-latest.svg?label=autoscaling.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/autoscaling.svg -[image-version]https://images.microbadger.com/badges/version/itisfoundation/autoscaling.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/autoscaling.svg - +``` +make install-dev +make test-dev-unit + +# NOTE: there are manual tests that need access to AWS EC2 instances! +``` diff --git a/services/autoscaling/docker/boot.sh b/services/autoscaling/docker/boot.sh index 8956b964bb0..c78cf322c2d 100755 --- a/services/autoscaling/docker/boot.sh +++ b/services/autoscaling/docker/boot.sh @@ -23,27 +23,35 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/autoscaling || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + cd services/autoscaling + uv pip --quiet sync requirements/dev.txt + cd - + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # # RUNNING application # -APP_LOG_LEVEL=${API_SERVER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +APP_LOG_LEVEL=${AUTOSCALING_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/autoscaling/src/simcore_service_autoscaling && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${AUTOSCALING_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/autoscaling/docker/entrypoint.sh b/services/autoscaling/docker/entrypoint.sh index 28ec2326d4e..ad982fd8d5c 100755 --- a/services/autoscaling/docker/entrypoint.sh +++ b/services/autoscaling/docker/entrypoint.sh @@ -13,6 +13,13 @@ INFO="INFO: [$(basename "$0")] " WARNING="WARNING: [$(basename "$0")] " ERROR="ERROR: [$(basename "$0")] " +# Read self-signed SSH certificates (if applicable) +# +# In case clusters-keeper must access a docker registry in a secure way using +# non-standard certificates (e.g. such as self-signed certificates), this call is needed. +# It needs to be executed as root. Also required to any access for example to secure rabbitmq. +update-ca-certificates + echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." echo "$INFO" "User :$(id "$(whoami)")" echo "$INFO" "Workdir : $(pwd)" @@ -63,11 +70,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock if stat $DOCKER_MOUNT >/dev/null 2>&1; then diff --git a/services/autoscaling/docker/healthcheck.py b/services/autoscaling/docker/healthcheck.py old mode 100644 new mode 100755 index 10e58d00e21..cb51ed2399e --- a/services/autoscaling/docker/healthcheck.py +++ b/services/autoscaling/docker/healthcheck.py @@ -6,9 +6,10 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: diff --git a/services/autoscaling/requirements/_base.in b/services/autoscaling/requirements/_base.in index 2b727fc6470..e450393adb9 100644 --- a/services/autoscaling/requirements/_base.in +++ b/services/autoscaling/requirements/_base.in @@ -4,17 +4,18 @@ # NOTE: ALL version constraints MUST be commented --constraint ../../../requirements/constraints.txt --constraint ./constraints.txt +--constraint ../../../services/dask-sidecar/requirements/_dask-distributed.txt # intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/aws-library/requirements/_base.in # service-library[fastapi] --requirement ../../../packages/service-library/requirements/_base.in --requirement ../../../packages/service-library/requirements/_fastapi.in - +aiocache aiodocker -aioboto3 -fastapi +dask[distributed] packaging -types-aiobotocore[ec2] diff --git a/services/autoscaling/requirements/_base.txt b/services/autoscaling/requirements/_base.txt index 385afd1dd77..adbb78fa0f4 100644 --- a/services/autoscaling/requirements/_base.txt +++ b/services/autoscaling/requirements/_base.txt @@ -1,208 +1,876 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==9.0.4 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aioboto3==10.4.0 - # via -r requirements/_base.in -aiobotocore==2.4.2 +aio-pika==9.5.5 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aioboto3==14.3.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +aiobotocore==2.22.0 # via aioboto3 +aiocache==0.12.3 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in aiodebug==2.3.0 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -aiodocker==0.21.0 - # via -r requirements/_base.in -aiofiles==23.1.0 + # -r requirements/_base.in +aiofiles==24.1.0 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -aiohttp==3.8.4 + # aioboto3 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # aiobotocore # aiodocker -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore -aiormq==6.7.2 +aiormq==6.8.1 # via aio-pika -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp -anyio==3.6.2 +annotated-types==0.7.0 + # via pydantic +anyio==4.9.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 + # watchfiles +arrow==1.3.0 # via - # aiohttp - # redis -attrs==21.4.0 + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==25.3.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt # aiohttp # jsonschema -boto3==1.24.59 + # referencing +boto3==1.37.3 # via aiobotocore -botocore==1.27.59 +botocore==1.37.3 # via # aiobotocore # boto3 # s3transfer -botocore-stubs==1.29.78 +botocore-stubs==1.38.19 # via types-aiobotocore -certifi==2022.12.7 +certifi==2025.4.26 # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx -charset-normalizer==3.0.1 - # via aiohttp -click==8.1.3 + # requests +charset-normalizer==3.4.2 + # via requests +click==8.1.8 # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # distributed + # rich-toolkit # typer # uvicorn -dnspython==2.3.0 +cloudpickle==3.1.1 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # distributed +dask==2025.5.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/_base.in + # distributed +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +distributed==2025.5.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +dnspython==2.7.0 # via email-validator -email-validator==1.3.1 - # via pydantic -fastapi==0.90.1 +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.3.0 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -frozenlist==1.3.3 +faststream==0.5.41 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.6.0 # via # aiohttp # aiosignal -h11==0.14.0 +fsspec==2025.3.2 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +googleapis-common-protos==1.70.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.16.0 # via # httpcore # uvicorn -httpcore==0.16.3 +h2==4.2.0 # via httpx -httpx==0.23.3 - # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -idna==3.4 +hpack==4.1.0 + # via h2 +httpcore==1.0.9 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator - # rfc3986 + # httpx + # requests # yarl -jaeger-client==4.8.0 - # via fastapi-contrib +importlib-metadata==8.6.1 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed + # fastapi jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -jsonschema==3.2.0 +jsonschema==4.23.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in -multidict==6.0.4 + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2025.4.1 + # via jsonschema +locket==1.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed + # partd +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # jinja2 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +multidict==6.4.4 + # via + # aiobotocore # aiohttp # yarl -opentracing==2.4.0 +opentelemetry-api==1.33.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-propagator-aws-xray + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.33.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.54b1 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.54b1 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-botocore==0.54b1 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-propagator-aws-xray==1.0.2 + # via opentelemetry-instrumentation-botocore +opentelemetry-proto==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.33.1 # via - # fastapi-contrib - # jaeger-client -packaging==23.0 - # via -r requirements/_base.in -pamqp==3.2.1 + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.54b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.54b1 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.18 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==25.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/_base.in + # dask + # distributed + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pydantic==1.10.2 +partd==1.4.2 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +prometheus-client==0.22.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==5.29.4 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # distributed +pycryptodome==3.23.0 + # via stream-zip +pydantic==2.11.4 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends # fastapi -pyinstrument==4.4.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-extra-types==2.10.4 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.19.3 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via + # aiobotocore # arrow # botocore -pyyaml==5.4.1 +python-dotenv==1.1.0 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -redis==4.5.1 + # dask + # distributed + # uvicorn +redis==6.1.0 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -rfc3986==1.5.0 - # via httpx -s3transfer==0.6.0 - # via boto3 -six==1.16.0 +referencing==0.35.1 # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil - # thrift -sniffio==1.3.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==14.0.0 # via - # anyio - # httpcore - # httpx -starlette==0.23.1 - # via fastapi -tenacity==8.2.1 + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.25.0 + # via + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +sh==2.2.2 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sortedcontainers==2.4.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +starlette==0.46.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.2 +tblib==3.1.0 # via - # jaeger-client - # threadloop -tqdm==4.64.1 + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +tenacity==9.1.2 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.7.0 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -types-aiobotocore==2.4.2.post1 - # via -r requirements/_base.in -types-aiobotocore-ec2==2.4.2 +toolz==1.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # dask + # distributed + # partd +tornado==6.5 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +tqdm==4.67.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.4 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-aiobotocore==2.22.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +types-aiobotocore-ec2==2.22.0 + # via types-aiobotocore +types-aiobotocore-s3==2.22.0 + # via types-aiobotocore +types-aiobotocore-ssm==2.22.0 # via types-aiobotocore -types-awscrt==0.16.10 +types-awscrt==0.27.2 # via botocore-stubs -typing-extensions==4.5.0 +types-python-dateutil==2.9.0.20250516 + # via arrow +typing-extensions==4.13.2 # via # aiodebug - # aiodocker - # aioitertools + # anyio + # exceptiongroup + # fastapi + # faststream + # opentelemetry-sdk # pydantic - # starlette + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer # types-aiobotocore # types-aiobotocore-ec2 -urllib3==1.26.14 + # types-aiobotocore-s3 + # types-aiobotocore-ssm + # typing-inspection +typing-inspection==0.4.0 + # via pydantic +urllib3==2.4.0 # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # botocore -uvicorn==0.20.0 - # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -wrapt==1.14.1 - # via aiobotocore -yarl==1.8.2 + # distributed + # requests +uvicorn==0.34.2 # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.20.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zict==3.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +zipp==3.21.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # importlib-metadata diff --git a/services/autoscaling/requirements/_test.in b/services/autoscaling/requirements/_test.in index 25c2f8ed776..7aecc03cd3e 100644 --- a/services/autoscaling/requirements/_test.in +++ b/services/autoscaling/requirements/_test.in @@ -10,15 +10,15 @@ # --constraint _base.txt -asgi-lifespan -codecov +types-aiobotocore[ec2,s3,ssm,iam] +asgi-lifespan coverage -coveralls deepdiff docker faker fakeredis[lua] +flaky httpx moto[server] psutil @@ -28,4 +28,7 @@ pytest-cov pytest-mock pytest-runner python-dotenv +pytest-icdiff +pytest-sugar respx +types-PyYAML diff --git a/services/autoscaling/requirements/_test.txt b/services/autoscaling/requirements/_test.txt index b21ed516da2..669083ffa99 100644 --- a/services/autoscaling/requirements/_test.txt +++ b/services/autoscaling/requirements/_test.txt @@ -1,130 +1,120 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -anyio==3.6.2 +annotated-types==0.7.0 # via # -c requirements/_base.txt - # httpcore -asgi-lifespan==2.0.0 - # via -r requirements/_test.in -async-timeout==4.0.2 + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.9.0 # via # -c requirements/_base.txt - # redis -attrs==21.4.0 + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.3.0 # via # -c requirements/_base.txt - # jschema-to-python # jsonschema - # pytest - # sarif-om -aws-sam-translator==1.55.0 + # referencing +aws-sam-translator==1.97.0 # via cfn-lint -aws-xray-sdk==2.11.0 +aws-xray-sdk==2.14.0 # via moto -boto3==1.24.59 +blinker==1.9.0 + # via flask +boto3==1.37.3 # via # -c requirements/_base.txt # aws-sam-translator # moto -botocore==1.27.59 +botocore==1.37.3 # via # -c requirements/_base.txt # aws-xray-sdk # boto3 # moto # s3transfer -certifi==2022.12.7 +botocore-stubs==1.38.19 # via # -c requirements/_base.txt + # types-aiobotocore +certifi==2025.4.26 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # httpcore # httpx # requests -cffi==1.15.1 +cffi==1.17.1 # via cryptography -cfn-lint==0.72.6 +cfn-lint==1.35.1 # via moto -charset-normalizer==3.0.1 +charset-normalizer==3.4.2 # via # -c requirements/_base.txt # requests -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # flask -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 +coverage==7.8.0 # via # -r requirements/_test.in - # codecov - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.1 +cryptography==45.0.2 # via # -c requirements/../../../requirements/constraints.txt + # joserfc # moto - # python-jose - # sshpubkeys -deepdiff==6.2.3 +deepdiff==8.5.0 # via -r requirements/_test.in -docker==6.0.1 +docker==7.1.0 # via # -r requirements/_test.in # moto -docopt==0.6.2 - # via coveralls -ecdsa==0.18.0 - # via - # moto - # python-jose - # sshpubkeys -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 +faker==37.3.0 # via -r requirements/_test.in -fakeredis==2.9.2 +fakeredis==2.29.0 # via -r requirements/_test.in -flask==2.2.3 +flaky==3.8.1 + # via -r requirements/_test.in +flask==3.1.1 # via # flask-cors # moto -flask-cors==3.0.10 +flask-cors==6.0.0 # via moto -graphql-core==3.2.3 +graphql-core==3.2.6 # via moto -h11==0.14.0 +h11==0.16.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.16.3 +httpcore==1.0.9 # via # -c requirements/_base.txt # httpx -httpx==0.23.3 +httpx==0.28.1 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -r requirements/_test.in # respx -idna==3.4 +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 # via # -c requirements/_base.txt # anyio + # httpx # requests - # rfc3986 -importlib-metadata==6.0.0 - # via flask -iniconfig==2.0.0 +iniconfig==2.1.0 # via pytest -itsdangerous==2.1.2 +itsdangerous==2.2.0 # via flask -jinja2==3.1.2 +jinja2==3.1.6 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # flask # moto jmespath==1.0.1 @@ -132,175 +122,234 @@ jmespath==1.0.1 # -c requirements/_base.txt # boto3 # botocore -jschema-to-python==1.2.3 - # via cfn-lint -jsondiff==2.0.0 +joserfc==1.0.4 # via moto -jsonpatch==1.32 +jsonpatch==1.33 # via cfn-lint -jsonpickle==3.0.1 - # via jschema-to-python -jsonpointer==2.3 +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 # via jsonpatch -jsonschema==3.2.0 +jsonschema==4.23.0 # via # -c requirements/_base.txt # aws-sam-translator - # cfn-lint # openapi-schema-validator # openapi-spec-validator -junit-xml==1.9 - # via cfn-lint -lupa==1.14.1 +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2025.4.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.11.0 + # via openapi-spec-validator +lupa==2.4 # via fakeredis -markupsafe==2.1.2 +markupsafe==3.0.2 # via + # -c requirements/_base.txt + # flask # jinja2 # werkzeug -moto==4.1.3 +moto==5.1.4 # via -r requirements/_test.in -networkx==2.8.8 +mpmath==1.3.0 + # via sympy +networkx==3.4.2 # via cfn-lint -openapi-schema-validator==0.2.3 +openapi-schema-validator==0.6.3 # via openapi-spec-validator -openapi-spec-validator==0.4.0 +openapi-spec-validator==0.7.1 # via moto -ordered-set==4.1.0 +orderly-set==5.4.1 # via deepdiff -orjson==3.8.7 - # via deepdiff -packaging==23.0 +packaging==25.0 # via # -c requirements/_base.txt - # docker # pytest -pbr==5.11.1 - # via - # jschema-to-python - # sarif-om -pluggy==1.0.0 + # pytest-sugar +pathable==0.4.4 + # via jsonschema-path +pluggy==1.6.0 # via pytest -psutil==5.9.4 - # via -r requirements/_test.in -pyasn1==0.4.8 +ply==3.11 + # via jsonpath-ng +pprintpp==0.4.0 + # via pytest-icdiff +psutil==7.0.0 # via - # python-jose - # rsa -pycparser==2.21 - # via cffi -pyparsing==3.0.9 + # -c requirements/_base.txt + # -r requirements/_test.in +py-partiql-parser==0.6.1 # via moto -pyrsistent==0.19.3 +pycparser==2.22 + # via cffi +pydantic==2.11.4 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # jsonschema -pytest==7.2.1 + # aws-sam-translator +pydantic-core==2.33.2 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.2.3 + # via moto +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio # pytest-cov + # pytest-icdiff # pytest-mock -pytest-asyncio==0.20.3 + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.1.1 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt # botocore - # faker # moto -python-dotenv==1.0.0 - # via -r requirements/_test.in -python-jose==3.3.0 - # via moto -pyyaml==5.4.1 +python-dotenv==1.1.0 # via # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # cfn-lint + # jsonschema-path # moto - # openapi-spec-validator -redis==4.5.1 + # responses +redis==6.1.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # fakeredis -requests==2.28.2 +referencing==0.35.1 # via - # codecov - # coveralls + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 + # via + # -c requirements/_base.txt # docker + # jsonschema-path # moto # responses -responses==0.22.0 +responses==0.25.7 # via moto -respx==0.20.1 +respx==0.22.0 # via -r requirements/_test.in -rfc3986==1.5.0 +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.25.0 # via # -c requirements/_base.txt - # httpx -rsa==4.9 - # via - # -c requirements/../../../requirements/constraints.txt - # python-jose -s3transfer==0.6.0 + # jsonschema + # referencing +s3transfer==0.11.3 # via # -c requirements/_base.txt # boto3 -sarif-om==1.0.4 - # via cfn-lint -six==1.16.0 +setuptools==80.7.1 + # via moto +six==1.17.0 # via # -c requirements/_base.txt - # ecdsa - # flask-cors - # jsonschema - # junit-xml # python-dateutil -sniffio==1.3.0 + # rfc3339-validator +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio # asgi-lifespan - # httpcore - # httpx sortedcontainers==2.4.0 - # via fakeredis -sshpubkeys==3.3.1 - # via moto -toml==0.10.2 - # via responses -tomli==2.0.1 # via - # coverage - # pytest -types-toml==0.10.8.5 - # via responses -urllib3==1.26.14 + # -c requirements/_base.txt + # fakeredis +sympy==1.14.0 + # via cfn-lint +termcolor==3.1.0 + # via pytest-sugar +types-aiobotocore==2.22.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +types-aiobotocore-ec2==2.22.0 + # via + # -c requirements/_base.txt + # types-aiobotocore +types-aiobotocore-iam==2.22.0 + # via types-aiobotocore +types-aiobotocore-s3==2.22.0 + # via + # -c requirements/_base.txt + # types-aiobotocore +types-aiobotocore-ssm==2.22.0 + # via + # -c requirements/_base.txt + # types-aiobotocore +types-awscrt==0.27.2 # via # -c requirements/_base.txt + # botocore-stubs +types-pyyaml==6.0.12.20250516 + # via -r requirements/_test.in +typing-extensions==4.13.2 + # via + # -c requirements/_base.txt + # anyio + # aws-sam-translator + # cfn-lint + # pydantic + # pydantic-core + # types-aiobotocore + # types-aiobotocore-ec2 + # types-aiobotocore-iam + # types-aiobotocore-s3 + # types-aiobotocore-ssm + # typing-inspection +typing-inspection==0.4.0 + # via + # -c requirements/_base.txt + # pydantic +tzdata==2025.2 + # via faker +urllib3==2.4.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # botocore # docker # requests # responses -websocket-client==1.5.1 - # via docker -werkzeug==2.2.3 +werkzeug==3.1.3 # via # flask + # flask-cors # moto -wrapt==1.14.1 +wrapt==1.17.2 # via # -c requirements/_base.txt # aws-xray-sdk -xmltodict==0.13.0 +xmltodict==0.14.2 # via moto -zipp==3.15.0 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/services/autoscaling/requirements/_tools.txt b/services/autoscaling/requirements/_tools.txt index b33b2b19802..c76d3992bbe 100644 --- a/services/autoscaling/requirements/_tools.txt +++ b/services/autoscaling/requirements/_tools.txt @@ -1,95 +1,89 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.10 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.4.0 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.18.0 # via virtualenv -identify==2.5.18 +identify==2.6.10 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==25.0 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.1.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.8 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.2.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.7 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt # pre-commit # watchdog -tomli==2.0.1 +ruff==0.11.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==80.7.1 # via # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.13.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.31.2 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.14.1 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/autoscaling/requirements/ci.txt b/services/autoscaling/requirements/ci.txt index 7d9d3f2fe6e..74758ddb53e 100644 --- a/services/autoscaling/requirements/ci.txt +++ b/services/autoscaling/requirements/ci.txt @@ -9,12 +9,16 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/models-library -../../packages/pytest-simcore -../../packages/service-library[fastapi] -../../packages/settings-library +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library +simcore-dask-task-models-library @ ../../packages/dask-task-models-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library # installs current package -. +simcore-service-autoscaling @ . diff --git a/services/autoscaling/requirements/dev.txt b/services/autoscaling/requirements/dev.txt index cf5e4d8a9fa..ab92769203f 100644 --- a/services/autoscaling/requirements/dev.txt +++ b/services/autoscaling/requirements/dev.txt @@ -12,10 +12,13 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/aws-library +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/pytest-simcore --editable ../../packages/service-library[fastapi] --editable ../../packages/settings-library +--editable ../../packages/dask-task-models-library # installs current package --editable . diff --git a/services/autoscaling/requirements/prod.txt b/services/autoscaling/requirements/prod.txt index 31bf2dd55ec..b404473767f 100644 --- a/services/autoscaling/requirements/prod.txt +++ b/services/autoscaling/requirements/prod.txt @@ -10,8 +10,11 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library -../../packages/service-library[fastapi] -../../packages/settings-library +simcore-aws-library @ ../../packages/aws-library +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-dask-task-models-library @ ../../packages/dask-task-models-library # installs current package -. +simcore-service-autoscaling @ . diff --git a/services/autoscaling/sandbox/script.py b/services/autoscaling/sandbox/script.py deleted file mode 100644 index cbd4c720111..00000000000 --- a/services/autoscaling/sandbox/script.py +++ /dev/null @@ -1,534 +0,0 @@ -# pylint: skip-file - -# -# Moved by pcrespov from allexandre/osparc-dask-auto-scaling/-/blob/master/script.py -# - -import math -import re -import time -from datetime import datetime, timedelta - -import boto3 -import dask_gateway -import docker -from dask.distributed import Client, Scheduler -from environs import Env - -# Init :Check that schduler is working, if not use the notebook to start it - remove old node in the swarm and in aws -# TODO add python -m pip install dask distributed --upgrade to requirements -# TODO Case when ressources asked are not meet by any sidecar resource resctrictions all tasks mis dictionnaire cluster scheduler worker_Info -# Sylvain 3.8.10 -env = Env() -env.read_env() - -local_file_directory = "data" -Scheduler.pickle = True -docker_client = docker.from_env() - -list_created_clusters = [] - - -aws_EC2 = [ - # { "name": "t2.nano", "CPUs" : 1, "RAM" : 0.5}, - # { "name": "t2.micro", "CPUs" : 1, "RAM" : 1}, - # { "name": "t2.small", "CPUs" : 1, "RAM" : 2}, - # { "name": "t2.medium", "CPUs" : 2, "RAM" : 4}, - # { "name": "t2.large", "CPUs" : 2, "RAM" : 8}, - {"name": "t2.xlarge", "CPUs": 4, "RAM": 16}, - {"name": "t2.2xlarge", "CPUs": 8, "RAM": 32}, - {"name": "r5n.4xlarge", "CPUs": 16, "RAM": 128}, - {"name": "r5n.8xlarge", "CPUs": 32, "RAM": 256} - # { "name": "r5n.12xlarge", "CPUs" : 48, "RAM" : 384}, - # { "name": "r5n.16xlarge", "CPUs" : 64, "RAM" : 512}, - # { "name": "r5n.24xlarge", "CPUs" : 96, "RAM" : 768} -] - -# THanks to https://gist.github.com/shawnbutts/3906915 -def bytesto(bytes_size, to, bsize=1024): - """convert bytes to megabytes, etc. - sample code: - print('mb= ' + str(bytesto(314575262000000, 'm'))) - sample output: - mb= 300002347.946 - """ - a = {"k": 1, "m": 2, "g": 3, "t": 4, "p": 5, "e": 6} - r = float(bytes_size) - for _ in range(a[to]): - r = r / bsize - return r - - -# Inteveral between checks in s -check_time = int(env.str("INTERVAL_CHECK")) - - -def get_number_of_tasks(dask_scheduler=None): - return f"{dask_scheduler.tasks}" - - -def get_workers_info(dask_scheduler=None): - return f"{dask_scheduler.workers}" - - -def check_node_resources(): - nodes = docker_client.nodes.list() - # We compile RAM and CPU capabilities of each node who have the label sidecar - # TODO take in account personalized workers - # Total resources of the cluster - nodes_sidecar_data = [] - for node in nodes: - for label in node.attrs["Spec"]["Labels"]: - if label == "sidecar": - nodes_sidecar_data.append( - { - "ID": node.attrs["ID"], - "RAM": bytesto( - node.attrs["Description"]["Resources"]["MemoryBytes"], - "g", - bsize=1024, - ), - "CPU": int(node.attrs["Description"]["Resources"]["NanoCPUs"]) - / 1000000000, - } - ) - - total_nodes_cpus = 0 - total_nodes_ram = 0 - nodes_ids = [] - for node in nodes_sidecar_data: - total_nodes_cpus = total_nodes_cpus + node["CPU"] - total_nodes_ram = total_nodes_ram + node["RAM"] - nodes_ids.append(node["ID"]) - - return { - "total_cpus": total_nodes_cpus, - "total_ram": total_nodes_ram, - "nodes_ids": nodes_ids, - } - - -# TODO discuss with the team consideration between limits and reservations on dy services -def check_tasks_resources(nodes_ids): - total_tasks_cpus = 0 - total_tasks_ram = 0 - tasks_ressources = [] - total_pending_tasks_cpus = 0 - total_pending_tasks_ram = 0 - tasks_pending_ressources = [] - serv = docker_client.services.list() - count_tasks_pending = 0 - for service in serv: - tasks = service.tasks() - for task in tasks: - if task["Status"]["State"] == "running" and task["NodeID"] in nodes_ids: - if "Resources" in task["Spec"] and task["Spec"]["Resources"] != {}: - ram = 0 - cpu = 0 - if "Reservations" in task["Spec"]["Resources"]: - if "MemoryBytes" in task["Spec"]["Resources"]["Reservations"]: - ram = bytesto( - task["Spec"]["Resources"]["Reservations"][ - "MemoryBytes" - ], - "g", - bsize=1024, - ) - if "NanoCPUs" in task["Spec"]["Resources"]["Reservations"]: - cpu = ( - int( - task["Spec"]["Resources"]["Reservations"][ - "NanoCPUs" - ] - ) - / 1000000000 - ) - tasks_ressources.append({"ID": task["ID"], "RAM": ram, "CPU": cpu}) - - elif ( - task["Status"]["State"] == "pending" - and task["Status"]["Message"] == "pending task scheduling" - and "insufficient resources on" in task["Status"]["Err"] - ): - count_tasks_pending = count_tasks_pending + 1 - if "Resources" in task["Spec"] and task["Spec"]["Resources"] != {}: - ram = 0 - cpu = 0 - if "Reservations" in task["Spec"]["Resources"]: - if "MemoryBytes" in task["Spec"]["Resources"]["Reservations"]: - ram = bytesto( - task["Spec"]["Resources"]["Reservations"][ - "MemoryBytes" - ], - "g", - bsize=1024, - ) - if "NanoCPUs" in task["Spec"]["Resources"]["Reservations"]: - cpu = ( - int( - task["Spec"]["Resources"]["Reservations"][ - "NanoCPUs" - ] - ) - / 1000000000 - ) - tasks_pending_ressources.append( - {"ID": task["ID"], "RAM": ram, "CPU": cpu} - ) - - total_tasks_cpus = 0 - total_tasks_ram = 0 - for task in tasks_ressources: - total_tasks_cpus = total_tasks_cpus + task["CPU"] - total_tasks_ram = total_tasks_ram + task["RAM"] - - for task in tasks_pending_ressources: - total_pending_tasks_cpus = total_pending_tasks_cpus + task["CPU"] - total_pending_tasks_ram = total_pending_tasks_ram + task["RAM"] - return { - "total_cpus_running_tasks": total_tasks_cpus, - "total_ram_running_tasks": total_tasks_ram, - "total_cpus_pending_tasks": total_pending_tasks_cpus, - "total_ram_pending_tasks": total_pending_tasks_ram, - "count_tasks_pending": count_tasks_pending, - } - - -# Check if the swarm need to scale up -# TODO currently the script has to be executed directly on the manager. Implenting a version that connect with ssh and handle the case when one manager is down to be able to have redundancy -def check_dynamic(): - user_data = ( - """#!/bin/bash - cd /home/ubuntu - hostname=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "hostname" 2>&1) - token=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker swarm join-token -q worker") - host=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker swarm join-token worker" 2>&1) - docker swarm join --token ${token} ${host##* } - label=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker node ls | grep $(hostname)") - label="$(cut -d' ' -f1 <<<"$label")" - ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker node update --label-add sidecar=true $label" - ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker node update --label-add standardworker=true $label" - """ - ) - # docker_client.containers.run("ubuntu:latest", "echo hello world") - serv = docker_client.services.list() - # We need the data of each task and the data of each node to know if we need to scale up or not - # Test if some tasks are in a pending mode because of a lack of resources - need_resources = False - for service in serv: - tasks = service.tasks() - for task in tasks: - if ( - task["Status"]["State"] == "pending" - and task["Status"]["Message"] == "pending task scheduling" - and "insufficient resources on" in task["Status"]["Err"] - ): - need_resources = True - break - - # We compile RAM and CPU capabilities of each node who have the label sidecar - # TODO take in account personalized workers - # Total resources of the cluster - if need_resources: - total_nodes = check_node_resources() - total_tasks = check_tasks_resources(total_nodes["nodes_ids"]) - available_cpus = ( - total_nodes["total_cpus"] - total_tasks["total_cpus_running_tasks"] - ) - available_ram = ( - total_nodes["total_ram"] - total_tasks["total_ram_running_tasks"] - ) - # print("avail cpuz" + str(available_cpus) + " avail ram" + str(available_ram)) - needed_cpus = ( - available_cpus - total_tasks["total_cpus_pending_tasks"] - ) * -1 + 2 # Cpus used for other tasks - needed_ram = ( - available_ram - total_tasks["total_ram_pending_tasks"] - ) * -1 + 4 # Ram used for other stasks - # print("taskcpus_needed : " + str(total_tasks["total_cpus_pending_tasks"]) + " staskRAMneeded : " + str(total_tasks["total_ram_pending_tasks"])) - print( - "The Swarm currently has " - + str(total_tasks["count_tasks_pending"]) - + " task(s) in pending mode" - ) - # print("Theses task require a total of " + str(needed_cpus) + " cpus and " + str(needed_ram) + " GB of RAM in order to be executed.") - print( - "Theses task(s) require a total of " - + str(math.ceil(total_tasks["total_cpus_pending_tasks"])) - + " cpus and " - + str(math.ceil(total_tasks["total_ram_pending_tasks"])) - + " GB of RAM in order to be executed." - ) - for instance in aws_EC2: - # if instance["CPUs"] >= needed_cpus and instance["RAM"] >= needed_ram: - if instance["CPUs"] >= math.ceil( - total_tasks["total_cpus_pending_tasks"] - ) and instance["RAM"] >= math.ceil(total_tasks["total_ram_pending_tasks"]): - now = datetime.now() + timedelta(hours=2) - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - print( - "A new EC2 instance has been selected to add more resources to the cluster. Name : " - + instance["name"] - + " Cpus : " - + str(instance["CPUs"]) - + " RAM : " - + str(instance["RAM"]) - + "GB" - ) - start_instance_aws( - "ami-097895f2d7d86f07e", - instance["name"], - "Autoscaling node " + dt_string, - "dynamic", - user_data, - ) - break - else: - print("No pending task(s) on the swarm detected.") - - # TODO Better algorythm - - -# TODO VPn handling is bad -# If no cluster I create one -# Test how it works without cluster -# To start the script for the first time, create the cluster with a jupyter notebook -def check_computationnal(): - # When we launch a new task, we check if the desired capacity doesn't exceed the total cluster capacity or the most powerful worker capacity - g = dask_gateway.Gateway( - address=env.str("DASK_GATEWAY_ADDRESS"), - auth=dask_gateway.BasicAuth( - env.str("DASK_GATEWAY_LOGIN"), env.str("DASK_GATEWAY_PWD") - ), - ) - - # At first, we need to create a cluster if there is none - if g.list_clusters() == []: - print("Currently 0 cluster in the gateway. We create a new one") - list_created_clusters.append(g.new_cluster()) - - cluster = g.connect(g.list_clusters()[0].name) - # cluster.adapt(minimum=1, maximum=100) - scheduler_infos = cluster.scheduler_info - client = cluster.get_client() - - max_worker_CPUs = 0 - max_worker_RAM = 0 - total_worker_CPUs = 0 - total_worker_RAM = 0 - print(scheduler_infos) - # cluster.adapt(minimum=1, maximum=15) - # TODO: case where a task want something which has enough RAM on one sidecar and enough CPU in another one but no sidecar has both ressources - for worker in scheduler_infos["workers"].values(): - total_worker_CPUs = total_worker_CPUs + int(worker["resources"]["CPU"]) - total_worker_RAM = total_worker_RAM + int(worker["resources"]["RAM"]) - if int(worker["resources"]["CPU"]) > max_worker_CPUs: - max_worker_CPUs = int(worker["resources"]["CPU"]) - if int(worker["resources"]["RAM"]) > max_worker_RAM: - max_worker_RAM = int(worker["resources"]["RAM"]) - - max_worker_RAM = bytesto(max_worker_RAM, "g", bsize=1024) - total_worker_RAM = bytesto(total_worker_RAM, "g", bsize=1024) - # cl= Client("gateway://test.test.osparc.io:8000/993bb0c4a51f4d44bd41393679a56c8d") - - # print("Total workers CPUs : " + str(total_worker_CPUs)) - # print("Total workers RAM : " + str(round(total_worker_RAM, 1)) + "G") - # print("Max worker CPUs : " + str(max_worker_CPUs)) - # print("Total workers RAM : " + str(round(max_worker_RAM, 1)) + "G") - # s = Scheduler() - # print(cluster.scheduler_comm) - cl = Client(cluster, security=cluster.security) - # print(g.proxy_address) - # print(cl.dashboard_link) - - # s = Scheduler(host="test.test.osparc.io/993bb0c4a51f4d44bd41393679a56c8d", port=8000, protocol="gateway", interface=) - # s.workers_list - # print(s.status) - tasks_infos = cl.run_on_scheduler(get_number_of_tasks) - # print(tasks_infos) - workers_infos = cl.run_on_scheduler(get_workers_info) - # workers_infos_dic_formatted = workers_infos.replace('SortedDict(', '')[:-1] - # print(workers_infos_dic_formatted) - # res = json.loads(workers_infos_dic_formatted) - result = re.search("processing: (.*)>", workers_infos) - if result is None: - total_tasks = 0 - else: - total_tasks = int(result.group(1)) - print("Current number of tasks managed by the scheduler : " + str(total_tasks)) - - # print(workers_infos.get("processing")) - # print(workers_infos) - # res = json.loads(workers_infos_dic_formatted) - print("Current number of workers : " + str(len(client.scheduler_info()["workers"]))) - task_handled = 0 - # IN this scenario, we look at the first worker only. In the future we need to look at all the workers - if len(client.scheduler_info()["workers"]) > 0: - workers_keys = list(client.scheduler_info()["workers"].keys())[0] - print( - "Number of tasks currently executed by the workers : " - + str( - client.scheduler_info()["workers"][workers_keys]["metrics"]["executing"] - ) - ) - task_handled = client.scheduler_info()["workers"][workers_keys]["metrics"][ - "executing" - ] - if task_handled < total_tasks: - print( - "The clusted can't handle the current load... Auto-scaling to add a new host" - ) - scale_up(2, 4) - else: - print("Computational services :Current cluster state OK, pausing for 30s.....") - - # print(client.status) - # Worker.ge - - # if task[CPU] > max_worker_CPUs or task[RAM] > max_worker_RAM: - - # Sample task - # future = client.submit(add, 132,423, resources={"CPU":10}, pure=False) - # future.result() - - -def add(x, y): - time.sleep(120) - return x + y - - -def scale_up(CPUs, RAM): - print("Processing the new instance on AWS..") - - # Has to be disccused - for host in aws_EC2: - if host["CPUs"] >= CPUs and host["RAM"] >= RAM: - new_host = host - - # Do we pass our scaling limits ? - # if total_worker_CPUs + host["CPUs"] >= int(env.str("MAX_CPUs_CLUSTER")) or total_worker_RAM + host["RAM"] >= int(env.str("MAX_RAM_CLUSTER")): - # print("Error : We would pass the defined cluster limits in term of RAM/CPUs. We can't scale up") - # else: - now = datetime.now() - dt_string = now.strftime("%d/%m/%Y %H:%M:%S") - user_data = ( - """#!/bin/bash - cd /home/ubuntu - hostname=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "hostname" 2>&1) - token=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker swarm join-token -q worker") - host=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker swarm join-token worker" 2>&1) - docker swarm join --token ${token} ${host##* } - label=$(ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker node ls | grep $(hostname)") - label="$(cut -d' ' -f1 <<<"$label")" - ssh -i """ - + env.str("AWS_KEY_NAME") - + """.pem -oStrictHostKeyChecking=no ubuntu@""" - + env.str("AWS_DNS") - + """ "docker node update --label-add sidecar=true $label" - reboot_hour=$(last reboot | head -1 | awk '{print $8}') - reboot_mn="${reboot_hour: -2}" - if [ $reboot_mn -gt 4 ] - then - cron_mn=$((${reboot_mn} - 5)) - else - cron_mn=55 - fi - echo ${cron_mn} - cron_mn+=" * * * * /home/ubuntu/cron_terminate.bash" - cron_mn="*/10 * * * * /home/ubuntu/cron_terminate.bash" - echo "${cron_mn}" - (crontab -u ubuntu -l; echo "$cron_mn" ) | crontab -u ubuntu - - """ - ) - start_instance_aws( - "ami-0699f9dc425967eba", - "t2.2xlarge", - "Autoscaling node " + dt_string, - "computational", - user_data, - ) - - -def start_instance_aws(ami_id, instance_type, tag, service_type, user_data): - ec2Client = boto3.client( - "ec2", - aws_access_key_id=env.str("AWS_ACCESS_KEY_ID"), - aws_secret_access_key=env.str("AWS_SECRET_ACCESS_KEY"), - region_name="us-east-1", - ) - ec2Resource = boto3.resource("ec2", region_name="us-east-1") - ec2 = boto3.resource("ec2", region_name="us-east-1") - # TODO check bug on the auto-terminate ? - # Create the instance - instanceDict = ec2.create_instances( - ImageId=ami_id, - KeyName=env.str("AWS_KEY_NAME"), - InstanceType=instance_type, - SecurityGroupIds=[env.str("SECURITY_GROUP_IDS")], # Have to be parametrized - MinCount=1, - MaxCount=1, - InstanceInitiatedShutdownBehavior="terminate", - SubnetId=env.str("SUBNET_ID"), # Have to be parametrized - TagSpecifications=[ - {"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": tag}]} - ], - UserData=user_data, - ) - instanceDict = instanceDict[0] - print( - "New instance launched for " - + service_type - + " services. Estimated time to launch and join the cluster : 2mns" - ) - print("Pausing for 10mns before next check") - time.sleep(600) - # print("Instance state: %s" % instanceDict.state) - # print("Public dns: %s" % instanceDict.public_dns_name) - # print("Instance id: %s" % instanceDict.id) - - -if __name__ == "__main__": - while True: - # check_computationnal() - check_dynamic() - time.sleep(check_time) diff --git a/services/autoscaling/setup.cfg b/services/autoscaling/setup.cfg index 48d4cf22b1c..fbce28e13d5 100644 --- a/services/autoscaling/setup.cfg +++ b/services/autoscaling/setup.cfg @@ -9,3 +9,12 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/services/autoscaling/setup.py b/services/autoscaling/setup.py old mode 100644 new mode 100755 index e3e711abf83..94a33d4376a --- a/services/autoscaling/setup.py +++ b/services/autoscaling/setup.py @@ -42,29 +42,30 @@ def read_reqs(reqs_path: Path) -> set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name=NAME, - version=VERSION, - author=AUTHORS, - description=DESCRIPTION, - long_description=README, - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ "simcore-service-autoscaling = simcore_service_autoscaling.cli:main", + "simcore-service = simcore_service_autoscaling.cli:main", ], }, -) +} if __name__ == "__main__": setup(**SETUP) diff --git a/services/autoscaling/src/simcore_service_autoscaling/__init__.py b/services/autoscaling/src/simcore_service_autoscaling/__init__.py index 94fc632e7af..e69de29bb2d 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/__init__.py +++ b/services/autoscaling/src/simcore_service_autoscaling/__init__.py @@ -1 +0,0 @@ -from ._meta import __version__ diff --git a/services/autoscaling/src/simcore_service_autoscaling/_meta.py b/services/autoscaling/src/simcore_service_autoscaling/_meta.py index a5bc03efe11..c421cfae966 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/_meta.py +++ b/services/autoscaling/src/simcore_service_autoscaling/_meta.py @@ -1,35 +1,20 @@ -""" Application's metadata - -""" -from contextlib import suppress from typing import Final -import pkg_resources +from models_library.basic_types import VersionStr, VersionTag from packaging.version import Version +from pydantic import TypeAdapter +from servicelib.utils_meta import PackageInfo -_current_distribution = pkg_resources.get_distribution("simcore-service-autoscaling") - -__version__: str = _current_distribution.version - - -APP_NAME: Final[str] = _current_distribution.project_name -API_VERSION: Final[str] = __version__ -VERSION: Final[Version] = Version(__version__) -API_VTAG: Final[str] = f"v{VERSION.major}" - - -def get_summary() -> str: - with suppress(Exception): - try: - metadata = _current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = _current_distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" +info: Final = PackageInfo(package_name="simcore-service-autoscaling") +__version__: Final[VersionStr] = info.__version__ - -SUMMARY: Final[str] = get_summary() +APP_NAME: Final[str] = info.project_name +API_VERSION: Final[VersionStr] = info.__version__ +VERSION: Final[Version] = info.version +API_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + info.api_prefix_path_tag +) +SUMMARY: Final[str] = info.get_summary() # https://patorjk.com/software/taag/#p=testall&f=Avatar&t=Autoscaling @@ -46,7 +31,35 @@ def get_summary() -> str: f"v{__version__}" ) +APP_STARTED_COMPUTATIONAL_BANNER_MSG = r""" + _ _ _ _ + | | | | (_) | | + ___ ___ _ __ ___ _ __ _ _ | |_ __ _ | |_ _ ___ _ __ __ _ | | + / __|/ _ \ | '_ ` _ \ | '_ \ | | | || __|/ _` || __|| | / _ \ | '_ \ / _` || | + | (__| (_) || | | | | || |_) || |_| || |_| (_| || |_ | || (_) || | | || (_| || | + \___|\___/ |_| |_| |_|| .__/ \__,_| \__|\__,_| \__||_| \___/ |_| |_| \__,_||_| + | | + |_| +""" -APP_FINISHED_BANNER_MSG = "{:=^100}".format( - f"πŸŽ‰ App {APP_NAME}=={__version__} shutdown completed πŸŽ‰" -) +APP_STARTED_DYNAMIC_BANNER_MSG = r""" + _ _ + | | (_) + __| | _ _ _ __ __ _ _ __ ___ _ ___ + / _` || | | || '_ \ / _` || '_ ` _ \ | | / __| + | (_| || |_| || | | || (_| || | | | | || || (__ + \__,_| \__, ||_| |_| \__,_||_| |_| |_||_| \___| + __/ | + |___/ +""" + +APP_STARTED_DISABLED_BANNER_MSG = r""" + _ _ _ _ _ + | |(_) | | | | | | + __| | _ ___ __ _ | |__ | | ___ __| | + / _` || |/ __| / _` || '_ \ | | / _ \ / _` | + | (_| || |\__ \| (_| || |_) || || __/| (_| | + \__,_||_||___/ \__,_||_.__/ |_| \___| \__,_| +""" + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/autoscaling/src/simcore_service_autoscaling/api/dependencies/application.py b/services/autoscaling/src/simcore_service_autoscaling/api/dependencies/application.py index d5abd4b019f..02823f24864 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/api/dependencies/application.py +++ b/services/autoscaling/src/simcore_service_autoscaling/api/dependencies/application.py @@ -1,5 +1,7 @@ +from typing import cast + from fastapi import FastAPI, Request def get_app(request: Request) -> FastAPI: - return request.app + return cast(FastAPI, request.app) diff --git a/services/autoscaling/src/simcore_service_autoscaling/api/health.py b/services/autoscaling/src/simcore_service_autoscaling/api/health.py index 808ad87638b..d6b0bbb5e5f 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/api/health.py +++ b/services/autoscaling/src/simcore_service_autoscaling/api/health.py @@ -5,14 +5,17 @@ """ import datetime +from typing import Annotated from fastapi import APIRouter, Depends, FastAPI from fastapi.responses import PlainTextResponse from pydantic import BaseModel from ..modules.docker import get_docker_client +from ..modules.ec2 import get_ec2_client from ..modules.rabbitmq import get_rabbitmq_client from ..modules.redis import get_redis_client +from ..modules.ssm import get_ssm_client from .dependencies.application import get_app router = APIRouter() @@ -32,32 +35,40 @@ class _ComponentStatus(BaseModel): class _StatusGet(BaseModel): rabbitmq: _ComponentStatus ec2: _ComponentStatus + ssm: _ComponentStatus docker: _ComponentStatus - redis: _ComponentStatus + redis_client_sdk: _ComponentStatus @router.get("/status", include_in_schema=True, response_model=_StatusGet) -async def get_status(app: FastAPI = Depends(get_app)) -> _StatusGet: - +async def get_status(app: Annotated[FastAPI, Depends(get_app)]) -> _StatusGet: return _StatusGet( rabbitmq=_ComponentStatus( is_enabled=bool(app.state.rabbitmq_client), - is_responsive=await get_rabbitmq_client(app).ping() - if app.state.rabbitmq_client - else False, + is_responsive=( + await get_rabbitmq_client(app).ping() + if app.state.rabbitmq_client + else False + ), ), ec2=_ComponentStatus( is_enabled=bool(app.state.ec2_client), - is_responsive=await app.state.ec2_client.ping() - if app.state.ec2_client - else False, + is_responsive=( + await get_ec2_client(app).ping() if app.state.ec2_client else False + ), + ), + ssm=_ComponentStatus( + is_enabled=bool(app.state.ssm_client), + is_responsive=( + await get_ssm_client(app).ping() if app.state.ssm_client else False + ), ), docker=_ComponentStatus( is_enabled=bool(app.state.docker_client), is_responsive=await get_docker_client(app).ping(), ), - redis=_ComponentStatus( - is_enabled=bool(app.state.redis), + redis_client_sdk=_ComponentStatus( + is_enabled=bool(app.state.redis_client_sdk), is_responsive=await get_redis_client(app).ping(), ), ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/constants.py b/services/autoscaling/src/simcore_service_autoscaling/constants.py new file mode 100644 index 00000000000..55fe8468bf1 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/constants.py @@ -0,0 +1,40 @@ +import re +from typing import Final + +from aws_library.ec2._models import AWSTagKey, AWSTagValue, EC2Tags +from pydantic import TypeAdapter + +BUFFER_MACHINE_PULLING_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter( + AWSTagKey +).validate_python("pulling") +BUFFER_MACHINE_PULLING_COMMAND_ID_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter( + AWSTagKey +).validate_python("ssm-command-id") +PREPULL_COMMAND_NAME: Final[str] = "docker images pulling" + +DOCKER_JOIN_COMMAND_NAME: Final[str] = "docker swarm join" +DOCKER_JOIN_COMMAND_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter( + AWSTagKey +).validate_python("io.simcore.autoscaling.joined_command_sent") + + +DOCKER_PULL_COMMAND: Final[ + str +] = "docker compose -f /docker-pull.compose.yml -p buffering pull" + +PRE_PULLED_IMAGES_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter( + AWSTagKey +).validate_python("io.simcore.autoscaling.pre_pulled_images") + +BUFFER_MACHINE_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python( + "io.simcore.autoscaling.buffer_machine" +) +DEACTIVATED_BUFFER_MACHINE_EC2_TAGS: Final[EC2Tags] = { + BUFFER_MACHINE_TAG_KEY: TypeAdapter(AWSTagValue).validate_python("true") +} +ACTIVATED_BUFFER_MACHINE_EC2_TAGS: Final[EC2Tags] = { + BUFFER_MACHINE_TAG_KEY: TypeAdapter(AWSTagValue).validate_python("false") +} +PRE_PULLED_IMAGES_RE: Final[re.Pattern] = re.compile( + rf"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_(\((\d+)\)|\d+)" +) diff --git a/services/autoscaling/src/simcore_service_autoscaling/core/application.py b/services/autoscaling/src/simcore_service_autoscaling/core/application.py index 8d45da457c1..95fcff3b4b7 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/core/application.py +++ b/services/autoscaling/src/simcore_service_autoscaling/core/application.py @@ -1,6 +1,10 @@ import logging from fastapi import FastAPI +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from .._meta import ( API_VERSION, @@ -8,21 +12,42 @@ APP_FINISHED_BANNER_MSG, APP_NAME, APP_STARTED_BANNER_MSG, + APP_STARTED_COMPUTATIONAL_BANNER_MSG, + APP_STARTED_DISABLED_BANNER_MSG, + APP_STARTED_DYNAMIC_BANNER_MSG, ) from ..api.routes import setup_api_routes -from ..dynamic_scaling import setup as setup_background_task +from ..modules.auto_scaling_task import setup as setup_auto_scaler_background_task +from ..modules.buffer_machines_pool_task import setup as setup_buffer_machines_pool_task from ..modules.docker import setup as setup_docker from ..modules.ec2 import setup as setup_ec2 +from ..modules.instrumentation import setup as setup_instrumentation from ..modules.rabbitmq import setup as setup_rabbitmq from ..modules.redis import setup as setup_redis +from ..modules.ssm import setup as setup_ssm from .settings import ApplicationSettings +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS = ( + "aiobotocore", + "aio_pika", + "aiormq", + "botocore", + "werkzeug", +) + logger = logging.getLogger(__name__) def create_app(settings: ApplicationSettings) -> FastAPI: + # keep mostly quiet noisy loggers + quiet_level: int = max( + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + ) + for name in _NOISY_LOGGERS: + logging.getLogger(name).setLevel(quiet_level) - logger.info("app settings: %s", settings.json(indent=1)) + logger.info("app settings: %s", settings.model_dump_json(indent=1)) app = FastAPI( debug=settings.AUTOSCALING_DEBUG, @@ -38,22 +63,37 @@ def create_app(settings: ApplicationSettings) -> FastAPI: assert app.state.settings.API_VERSION == API_VERSION # nosec # PLUGINS SETUP + if app.state.settings.AUTOSCALING_TRACING: + setup_tracing(app, app.state.settings.AUTOSCALING_TRACING, APP_NAME) + + setup_instrumentation(app) setup_api_routes(app) setup_docker(app) setup_rabbitmq(app) setup_ec2(app) + setup_ssm(app) setup_redis(app) - # autoscaler background task - setup_background_task(app) + + if app.state.settings.AUTOSCALING_TRACING: + initialize_fastapi_app_tracing(app) + + setup_auto_scaler_background_task(app) + setup_buffer_machines_pool_task(app) # ERROR HANDLERS # EVENTS async def _on_startup() -> None: - print(APP_STARTED_BANNER_MSG, flush=True) + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + if settings.AUTOSCALING_NODES_MONITORING: + print(APP_STARTED_DYNAMIC_BANNER_MSG, flush=True) # noqa: T201 + elif settings.AUTOSCALING_DASK: + print(APP_STARTED_COMPUTATIONAL_BANNER_MSG, flush=True) # noqa: T201 + else: + print(APP_STARTED_DISABLED_BANNER_MSG, flush=True) # noqa: T201 async def _on_shutdown() -> None: - print(APP_FINISHED_BANNER_MSG, flush=True) + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 app.add_event_handler("startup", _on_startup) app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/autoscaling/src/simcore_service_autoscaling/core/errors.py b/services/autoscaling/src/simcore_service_autoscaling/core/errors.py index 1f2e5ad239d..e4294631224 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/core/errors.py +++ b/services/autoscaling/src/simcore_service_autoscaling/core/errors.py @@ -1,7 +1,7 @@ -from pydantic.errors import PydanticErrorMixin +from common_library.errors_classes import OsparcErrorMixin -class AutoscalingRuntimeError(PydanticErrorMixin, RuntimeError): +class AutoscalingRuntimeError(OsparcErrorMixin, RuntimeError): msg_template: str = "Autoscaling unexpected error" @@ -9,17 +9,24 @@ class ConfigurationError(AutoscalingRuntimeError): msg_template: str = "Application misconfiguration: {msg}" -class Ec2NotConnectedError(AutoscalingRuntimeError): - msg_template: str = "Cannot connect with ec2 server" +class TaskRequiresUnauthorizedEC2InstanceTypeError(AutoscalingRuntimeError): + msg_template: str = ( + "Task {task} requires unauthorized {instance_type}. " + "TIP: check task required instance type or allow the instance type in autoscaling service settings" + ) -class Ec2InstanceNotFoundError(AutoscalingRuntimeError): - msg_template: str = "EC2 instance was not found" +class TaskRequirementsAboveRequiredEC2InstanceTypeError(AutoscalingRuntimeError): + msg_template: str = ( + "Task {task} requires {instance_type} but requires {resources}. " + "TIP: Ensure task resources requirements fit required instance type available resources." + ) -class Ec2TooManyInstancesError(AutoscalingRuntimeError): +class TaskBestFittingInstanceNotFoundError(AutoscalingRuntimeError): msg_template: str = ( - "The maximum amount of instances {num_instances} is already reached!" + "Task requires {resources} but no instance type fits the requirements. " + "TIP: Ensure task resources requirements fit available instance types." ) @@ -27,7 +34,13 @@ class Ec2InvalidDnsNameError(AutoscalingRuntimeError): msg_template: str = "Invalid EC2 private DNS name {aws_private_dns_name}" -class RedisNotConnectedError(AutoscalingRuntimeError): - msg_template: str = ( - "Cannot connect with redis server on {dsn}, please check configuration" - ) +class DaskSchedulerNotFoundError(AutoscalingRuntimeError): + msg_template: str = "Scheduler in {url} was not found!" + + +class DaskNoWorkersError(AutoscalingRuntimeError): + msg_template: str = "There are no dask workers connected to scheduler in {url}" + + +class DaskWorkerNotFoundError(AutoscalingRuntimeError): + msg_template: str = "Dask worker running on {worker_host} is not registered to scheduler in {url}, it is not found!" diff --git a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py index e5634b8912e..ff67aeeaab1 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/core/settings.py +++ b/services/autoscaling/src/simcore_service_autoscaling/core/settings.py @@ -1,203 +1,383 @@ import datetime from functools import cached_property -from typing import Optional, cast +from typing import Annotated, Final, Self, cast +from aws_library.ec2 import EC2InstanceBootSpecific, EC2Tags from fastapi import FastAPI -from models_library.basic_types import ( - BootModeEnum, - BuildTargetEnum, - LogLevel, - VersionTag, +from models_library.basic_types import LogLevel, PortInt, VersionTag +from models_library.clusters import ClusterAuthentication +from models_library.docker import DockerLabelKey +from pydantic import ( + AliasChoices, + AnyUrl, + Field, + NonNegativeInt, + TypeAdapter, + field_validator, + model_validator, ) -from models_library.docker import DockerGenericTag, DockerLabelKey -from pydantic import Field, NonNegativeInt, PositiveInt, parse_obj_as, validator +from pydantic_settings import SettingsConfigDict +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings from settings_library.base import BaseCustomSettings from settings_library.docker_registry import RegistrySettings +from settings_library.ec2 import EC2Settings from settings_library.rabbit import RabbitSettings from settings_library.redis import RedisSettings +from settings_library.ssm import SSMSettings +from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings from types_aiobotocore_ec2.literals import InstanceTypeType from .._meta import API_VERSION, API_VTAG, APP_NAME +AUTOSCALING_ENV_PREFIX: Final[str] = "AUTOSCALING_" -class EC2Settings(BaseCustomSettings): - EC2_ACCESS_KEY_ID: str - EC2_ENDPOINT: Optional[str] = Field( - default=None, description="do not define if using standard AWS" - ) - EC2_REGION_NAME: str = "us-east-1" - EC2_SECRET_ACCESS_KEY: str - - -class EC2InstancesSettings(BaseCustomSettings): - EC2_INSTANCES_ALLOWED_TYPES: list[str] = Field( - ..., - min_items=1, - unique_items=True, - description="Defines which EC2 instances are considered as candidates for new EC2 instance", - ) - EC2_INSTANCES_AMI_ID: str = Field( - ..., - min_length=1, - description="Defines the AMI (Amazon Machine Image) ID used to start a new EC2 instance", - ) - EC2_INSTANCES_MAX_INSTANCES: int = Field( - default=10, - description="Defines the maximum number of instances the autoscaling app may create", - ) - EC2_INSTANCES_SECURITY_GROUP_IDS: list[str] = Field( - ..., - min_items=1, - description="A security group acts as a virtual firewall for your EC2 instances to control incoming and outgoing traffic" - " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html), " - " this is required to start a new EC2 instance", - ) - EC2_INSTANCES_SUBNET_ID: str = Field( - ..., - min_length=1, - description="A subnet is a range of IP addresses in your VPC " - " (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), " - "this is required to start a new EC2 instance", - ) - EC2_INSTANCES_KEY_NAME: str = Field( - ..., - min_length=1, - description="SSH key filename (without ext) to access the instance through SSH" - " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)," - "this is required to start a new EC2 instance", - ) - - EC2_INSTANCES_TIME_BEFORE_TERMINATION: datetime.timedelta = Field( - default=datetime.timedelta(minutes=1), - description="Time after which an EC2 instance may be terminated (repeat every hour, min 0, max 59 minutes)", - ) - EC2_INSTANCES_MACHINES_BUFFER: NonNegativeInt = Field( - default=0, - description="Constant reserve of drained ready machines for fast(er) usage," - "disabled when set to 0. Uses 1st machine defined in EC2_INSTANCES_ALLOWED_TYPES", - ) +class AutoscalingSSMSettings(SSMSettings): + ... - EC2_INSTANCES_MAX_START_TIME: datetime.timedelta = Field( - default=datetime.timedelta(minutes=3), - description="Usual time taken an EC2 instance with the given AMI takes to be in 'running' mode", - ) - EC2_INSTANCES_PRE_PULL_IMAGES: list[DockerGenericTag] = Field( - default_factory=list, - description="a list of docker image/tags to pull on instance cold start", +class AutoscalingEC2Settings(EC2Settings): + model_config = SettingsConfigDict( + env_prefix=AUTOSCALING_ENV_PREFIX, + json_schema_extra={ + "examples": [ + { + f"{AUTOSCALING_ENV_PREFIX}EC2_ACCESS_KEY_ID": "my_access_key_id", + f"{AUTOSCALING_ENV_PREFIX}EC2_ENDPOINT": "https://my_ec2_endpoint.com", + f"{AUTOSCALING_ENV_PREFIX}EC2_REGION_NAME": "us-east-1", + f"{AUTOSCALING_ENV_PREFIX}EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + } + ], + }, ) - EC2_INSTANCES_PRE_PULL_IMAGES_CRON_INTERVAL: datetime.timedelta = Field( - default=datetime.timedelta(minutes=30), - description="time interval between pulls of images (minimum is 1 minute)", - ) - EC2_INSTANCES_CUSTOM_BOOT_SCRIPTS: list[str] = Field( - default_factory=list, - description="script(s) to run on EC2 instance startup (be careful!), each entry is run one after the other using '&&' operator", - ) +class EC2InstancesSettings(BaseCustomSettings): + EC2_INSTANCES_ALLOWED_TYPES: Annotated[ + dict[str, EC2InstanceBootSpecific], + Field( + description="Defines which EC2 instances are considered as candidates for new EC2 instance and their respective boot specific parameters" + "NOTE: minimum length >0", + ), + ] + + EC2_INSTANCES_KEY_NAME: Annotated[ + str, + Field( + min_length=1, + description="SSH key filename (without ext) to access the instance through SSH" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)," + "this is required to start a new EC2 instance", + ), + ] + EC2_INSTANCES_MACHINES_BUFFER: Annotated[ + NonNegativeInt, + Field( + description="Constant reserve of drained ready machines for fast(er) usage," + "disabled when set to 0. Uses 1st machine defined in EC2_INSTANCES_ALLOWED_TYPES", + ), + ] = 0 + EC2_INSTANCES_MAX_INSTANCES: Annotated[ + int, + Field( + description="Defines the maximum number of instances the autoscaling app may create", + ), + ] = 10 + EC2_INSTANCES_MAX_START_TIME: Annotated[ + datetime.timedelta, + Field( + description="Usual time taken an EC2 instance with the given AMI takes to join the cluster " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)." + "NOTE: be careful that this time should always be a factor larger than the real time, as EC2 instances" + "that take longer than this time will be terminated as sometimes it happens that EC2 machine fail on start.", + ), + ] = datetime.timedelta(minutes=1) + + EC2_INSTANCES_NAME_PREFIX: Annotated[ + str, + Field( + min_length=1, + description="prefix used to name the EC2 instances created by this instance of autoscaling", + ), + ] = "autoscaling" + + EC2_INSTANCES_SECURITY_GROUP_IDS: Annotated[ + list[str], + Field( + min_length=1, + description="A security group acts as a virtual firewall for your EC2 instances to control incoming and outgoing traffic" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html), " + " this is required to start a new EC2 instance", + ), + ] + EC2_INSTANCES_SUBNET_ID: Annotated[ + str, + Field( + min_length=1, + description="A subnet is a range of IP addresses in your VPC " + " (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), " + "this is required to start a new EC2 instance", + ), + ] + EC2_INSTANCES_TIME_BEFORE_DRAINING: Annotated[ + datetime.timedelta, + Field( + description="Time after which an EC2 instance may be drained (10s<=T<=1 minutes, is automatically capped)" + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(seconds=20) + + EC2_INSTANCES_TIME_BEFORE_TERMINATION: Annotated[ + datetime.timedelta, + Field( + description="Time after which an EC2 instance may begin the termination process (0<=T<=59 minutes, is automatically capped)" + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(minutes=1) + + EC2_INSTANCES_TIME_BEFORE_FINAL_TERMINATION: Annotated[ + datetime.timedelta, + Field( + description="Time after which an EC2 instance is terminated after draining" + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(seconds=30) + + EC2_INSTANCES_CUSTOM_TAGS: Annotated[ + EC2Tags, + Field( + description="Allows to define tags that should be added to the created EC2 instance default tags. " + "a tag must have a key and an optional value. see [https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html]", + ), + ] + EC2_INSTANCES_ATTACHED_IAM_PROFILE: Annotated[ + str, + Field( + description="ARN the EC2 instance should be attached to (example: arn:aws:iam::XXXXX:role/NAME), to disable pass an empty string", + ), + ] + + @field_validator("EC2_INSTANCES_TIME_BEFORE_DRAINING") + @classmethod + def _ensure_draining_delay_time_is_in_range( + cls, value: datetime.timedelta + ) -> datetime.timedelta: + if value < datetime.timedelta(seconds=10): + value = datetime.timedelta(seconds=10) + elif value > datetime.timedelta(minutes=1): + value = datetime.timedelta(minutes=1) + return value - @validator("EC2_INSTANCES_TIME_BEFORE_TERMINATION") + @field_validator("EC2_INSTANCES_TIME_BEFORE_TERMINATION") @classmethod - def ensure_time_is_in_range(cls, value): + def _ensure_termination_delay_time_is_in_range( + cls, value: datetime.timedelta + ) -> datetime.timedelta: if value < datetime.timedelta(minutes=0): value = datetime.timedelta(minutes=0) elif value > datetime.timedelta(minutes=59): value = datetime.timedelta(minutes=59) return value - @validator("EC2_INSTANCES_ALLOWED_TYPES") + @field_validator("EC2_INSTANCES_ALLOWED_TYPES") @classmethod - def check_valid_intance_names(cls, value): + def _check_valid_instance_names_and_not_empty( + cls, value: dict[str, EC2InstanceBootSpecific] + ) -> dict[str, EC2InstanceBootSpecific]: # NOTE: needed because of a flaw in BaseCustomSettings # issubclass raises TypeError if used on Aliases - parse_obj_as(tuple[InstanceTypeType, ...], value) - return value + TypeAdapter(list[InstanceTypeType]).validate_python(list(value)) + if not value: + # NOTE: Field( ... , min_items=...) cannot be used to contraint number of iterms in a dict + msg = "At least one item expecte EC2_INSTANCES_ALLOWED_TYPES, got none" + raise ValueError(msg) -class NodesMonitoringSettings(BaseCustomSettings): - NODES_MONITORING_NODE_LABELS: list[DockerLabelKey] = Field( - default_factory=list, - description="autoscaling will only monitor nodes with the given labels (if empty all nodes will be monitored), these labels will be added to the new created nodes by default", - ) - - NODES_MONITORING_SERVICE_LABELS: list[DockerLabelKey] = Field( - default_factory=list, - description="autoscaling will only monitor services with the given labels (if empty all services will be monitored)", - ) - - NODES_MONITORING_NEW_NODES_LABELS: list[DockerLabelKey] = Field( - default=["io.simcore.autoscaled-node"], - description="autoscaling will add these labels to any new node it creates (additional to the ones in NODES_MONITORING_NODE_LABELS", - ) + return value -class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): +class NodesMonitoringSettings(BaseCustomSettings): + NODES_MONITORING_NODE_LABELS: Annotated[ + list[DockerLabelKey], + Field( + description="autoscaling will only monitor nodes with the given labels (if empty all nodes will be monitored), these labels will be added to the new created nodes by default", + ), + ] + + NODES_MONITORING_SERVICE_LABELS: Annotated[ + list[DockerLabelKey], + Field( + description="autoscaling will only monitor services with the given labels (if empty all services will be monitored)", + ), + ] + + NODES_MONITORING_NEW_NODES_LABELS: Annotated[ + list[DockerLabelKey], + Field( + description="autoscaling will add these labels to any new node it creates (additional to the ones in NODES_MONITORING_NODE_LABELS", + ), + ] + + +class DaskMonitoringSettings(BaseCustomSettings): + DASK_MONITORING_URL: Annotated[ + AnyUrl, Field(description="the url to the dask-scheduler") + ] + DASK_SCHEDULER_AUTH: Annotated[ + ClusterAuthentication, + Field( + description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)", + ), + ] + + +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): # CODE STATICS --------------------------------------------------------- API_VERSION: str = API_VERSION APP_NAME: str = APP_NAME API_VTAG: VersionTag = API_VTAG - # IMAGE BUILDTIME ------------------------------------------------------ - # @Makefile - SC_BUILD_DATE: Optional[str] = None - SC_BUILD_TARGET: Optional[BuildTargetEnum] = None - SC_VCS_REF: Optional[str] = None - SC_VCS_URL: Optional[str] = None - - # @Dockerfile - SC_BOOT_MODE: Optional[BootModeEnum] = None - SC_BOOT_TARGET: Optional[BuildTargetEnum] = None - SC_HEALTHCHECK_TIMEOUT: Optional[PositiveInt] = Field( - None, - description="If a single run of the check takes longer than timeout seconds " - "then the check is considered to have failed." - "It takes retries consecutive failures of the health check for the container to be considered unhealthy.", - ) - SC_USER_ID: Optional[int] = None - SC_USER_NAME: Optional[str] = None - # RUNTIME ----------------------------------------------------------- - AUTOSCALING_DEBUG: bool = Field( - False, description="Debug mode", env=["AUTOSCALING_DEBUG", "DEBUG"] - ) - - AUTOSCALING_LOGLEVEL: LogLevel = Field( - LogLevel.INFO, env=["AUTOSCALING_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"] - ) - - AUTOSCALING_EC2_ACCESS: Optional[EC2Settings] = Field(auto_default_from_env=True) - - AUTOSCALING_EC2_INSTANCES: Optional[EC2InstancesSettings] = Field( - auto_default_from_env=True - ) - - AUTOSCALING_NODES_MONITORING: Optional[NodesMonitoringSettings] = Field( - auto_default_from_env=True - ) - - AUTOSCALING_POLL_INTERVAL: datetime.timedelta = Field( - default=datetime.timedelta(seconds=10), - description="interval between each resource check (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", - ) - - AUTOSCALING_RABBITMQ: Optional[RabbitSettings] = Field(auto_default_from_env=True) - - AUTOSCALING_REDIS: RedisSettings = Field(auto_default_from_env=True) - - AUTOSCALING_REGISTRY: Optional[RegistrySettings] = Field(auto_default_from_env=True) + AUTOSCALING_DEBUG: Annotated[ + bool, + Field( + description="Debug mode", + validation_alias=AliasChoices("AUTOSCALING_DEBUG", "DEBUG"), + ), + ] = False + + AUTOSCALING_REMOTE_DEBUG_PORT: PortInt = 3000 + + AUTOSCALING_LOGLEVEL: Annotated[ + LogLevel, + Field( + LogLevel.INFO, + validation_alias=AliasChoices( + "AUTOSCALING_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] + AUTOSCALING_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "AUTOSCALING_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + + AUTOSCALING_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "AUTOSCALING_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] + + AUTOSCALING_EC2_ACCESS: Annotated[ + AutoscalingEC2Settings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_SSM_ACCESS: Annotated[ + AutoscalingSSMSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_EC2_INSTANCES: Annotated[ + EC2InstancesSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_NODES_MONITORING: Annotated[ + NodesMonitoringSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_POLL_INTERVAL: Annotated[ + datetime.timedelta, + Field( + description="interval between each resource check " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(seconds=10) + + AUTOSCALING_RABBITMQ: Annotated[ + RabbitSettings | None, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + AUTOSCALING_REDIS: Annotated[ + RedisSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + AUTOSCALING_REGISTRY: Annotated[ + RegistrySettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_DASK: Annotated[ + DaskMonitoringSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + AUTOSCALING_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + AUTOSCALING_DRAIN_NODES_WITH_LABELS: Annotated[ + bool, + Field( + description="If true, drained nodes" + " are maintained as active (in the docker terminology) " + "but a docker node label named osparc-services-ready is attached", + ), + ] = False + AUTOSCALING_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + AUTOSCALING_DOCKER_JOIN_DRAINED: Annotated[ + bool, + Field( + description="If true, new nodes join the swarm as drained. If false as active.", + ), + ] = True + + AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION: Annotated[ + bool, + Field( + description="If True, then explicitely wait for cloud-init process to be completed before issuing commands. " + "TIP: might be useful when cheap machines are used", + ), + ] = False @cached_property - def LOG_LEVEL(self): + def LOG_LEVEL(self): # noqa: N802 return self.AUTOSCALING_LOGLEVEL - @validator("AUTOSCALING_LOGLEVEL") + @field_validator("AUTOSCALING_LOGLEVEL", mode="before") @classmethod - def valid_log_level(cls, value: str) -> str: - # NOTE: mypy is not happy without the cast - return cast(str, cls.validate_log_level(value)) + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + @model_validator(mode="after") + def _exclude_both_dynamic_computational_mode(self) -> Self: + if ( + self.AUTOSCALING_DASK is not None + and self.AUTOSCALING_NODES_MONITORING is not None + ): + msg = "Autoscaling cannot be set to monitor both computational and dynamic services (both AUTOSCALING_DASK and AUTOSCALING_NODES_MONITORING are currently set!)" + raise ValueError(msg) + return self def get_application_settings(app: FastAPI) -> ApplicationSettings: diff --git a/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling.py b/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling.py deleted file mode 100644 index 62d3bdada33..00000000000 --- a/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling.py +++ /dev/null @@ -1,62 +0,0 @@ -import json -import logging -from typing import Awaitable, Callable - -from fastapi import FastAPI -from servicelib.background_task import start_periodic_task, stop_periodic_task -from servicelib.redis_utils import exclusive - -from .core.settings import ApplicationSettings -from .dynamic_scaling_core import cluster_scaling_from_labelled_services -from .modules.redis import get_redis_client - -_TASK_NAME = "Autoscaling EC2 instances based on docker services" - -logger = logging.getLogger(__name__) - - -def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: - async def _startup() -> None: - app_settings: ApplicationSettings = app.state.settings - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - lock_key = f"{app.title}:cluster_scaling_from_labelled_services_lock" - lock_value = json.dumps( - { - "node_labels": app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS - } - ) - app.state.autoscaler_task = await start_periodic_task( - exclusive(get_redis_client(app), lock_key=lock_key, lock_value=lock_value)( - cluster_scaling_from_labelled_services - ), - interval=app_settings.AUTOSCALING_POLL_INTERVAL, - task_name=_TASK_NAME, - app=app, - ) - - return _startup - - -def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: - async def _stop() -> None: - await stop_periodic_task(app.state.autoscaler_task) - - return _stop - - -def setup(app: FastAPI): - app_settings: ApplicationSettings = app.state.settings - if any( - s is None - for s in [ - app_settings.AUTOSCALING_NODES_MONITORING, - app_settings.AUTOSCALING_EC2_ACCESS, - app_settings.AUTOSCALING_EC2_INSTANCES, - ] - ): - logger.warning( - "the autoscaling background task is disabled by settings, nothing will happen!" - ) - return - app.add_event_handler("startup", on_app_startup(app)) - app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling_core.py b/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling_core.py deleted file mode 100644 index 059c10462f4..00000000000 --- a/services/autoscaling/src/simcore_service_autoscaling/dynamic_scaling_core.py +++ /dev/null @@ -1,555 +0,0 @@ -import asyncio -import collections -import dataclasses -import itertools -import logging -from datetime import datetime, timedelta, timezone -from typing import cast - -from fastapi import FastAPI -from models_library.generated_models.docker_rest_api import ( - Availability, - Node, - NodeState, - Task, -) -from pydantic import parse_obj_as -from types_aiobotocore_ec2.literals import InstanceTypeType - -from .core.errors import ( - Ec2InstanceNotFoundError, - Ec2InvalidDnsNameError, - Ec2TooManyInstancesError, -) -from .core.settings import ApplicationSettings, get_application_settings -from .models import ( - AssociatedInstance, - Cluster, - EC2InstanceData, - EC2InstanceType, - Resources, -) -from .modules.docker import get_docker_client -from .modules.ec2 import get_ec2_client -from .utils import ec2, utils_docker -from .utils.dynamic_scaling import ( - associate_ec2_instances_with_nodes, - ec2_startup_script, - node_host_name_from_ec2_private_dns, - try_assigning_task_to_instances, - try_assigning_task_to_node, - try_assigning_task_to_pending_instances, -) -from .utils.rabbitmq import ( - log_tasks_message, - post_autoscaling_status_message, - progress_tasks_message, -) - -logger = logging.getLogger(__name__) - - -async def _deactivate_empty_nodes(app: FastAPI, cluster: Cluster) -> Cluster: - app_settings: ApplicationSettings = app.state.settings - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - docker_client = get_docker_client(app) - active_empty_nodes: list[AssociatedInstance] = [] - active_non_empty_nodes: list[AssociatedInstance] = [] - for instance in cluster.active_nodes: - if ( - await utils_docker.compute_node_used_resources( - docker_client, - instance.node, - service_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS, - ) - == Resources.create_as_empty() - ): - active_empty_nodes.append(instance) - else: - active_non_empty_nodes.append(instance) - - # drain this empty nodes - await asyncio.gather( - *( - utils_docker.set_node_availability( - docker_client, - node.node, - available=False, - ) - for node in active_empty_nodes - ) - ) - if active_empty_nodes: - logger.info( - "The following nodes set to drain: '%s'", - f"{[node.node.Description.Hostname for node in active_empty_nodes if node.node.Description]}", - ) - return dataclasses.replace( - cluster, - active_nodes=active_non_empty_nodes, - drained_nodes=cluster.drained_nodes + active_empty_nodes, - ) - - -async def _find_terminateable_instances( - app: FastAPI, cluster: Cluster -) -> list[AssociatedInstance]: - app_settings: ApplicationSettings = app.state.settings - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - - if not cluster.drained_nodes: - # there is nothing to terminate here - return [] - - # get the corresponding ec2 instance data - terminateable_nodes: list[AssociatedInstance] = [] - - for instance in cluster.drained_nodes: - # NOTE: AWS price is hourly based (e.g. same price for a machine used 2 minutes or 1 hour, so we wait until 55 minutes) - elapsed_time_since_launched = ( - datetime.now(timezone.utc) - instance.ec2_instance.launch_time - ) - elapsed_time_since_full_hour = elapsed_time_since_launched % timedelta(hours=1) - if ( - elapsed_time_since_full_hour - >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - ): - # let's terminate that one - terminateable_nodes.append(instance) - - if terminateable_nodes: - logger.info( - "the following nodes were found to be terminateable: '%s'", - f"{[instance.node.Description.Hostname for instance in terminateable_nodes if instance.node.Description]}", - ) - return terminateable_nodes - - -async def _try_scale_down_cluster(app: FastAPI, cluster: Cluster) -> Cluster: - # 2. once it is in draining mode and we are nearing a modulo of an hour we can start the termination procedure - # NOTE: the nodes that were just changed to drain above will be eventually terminated on the next iteration - terminated_instance_ids = [] - if terminateable_instances := await _find_terminateable_instances(app, cluster): - await get_ec2_client(app).terminate_instances( - [i.ec2_instance for i in terminateable_instances] - ) - logger.info( - "EC2 terminated: '%s'", - f"{[i.node.Description.Hostname for i in terminateable_instances if i.node.Description]}", - ) - # since these nodes are being terminated, remove them from the swarm - await utils_docker.remove_nodes( - get_docker_client(app), - [i.node for i in terminateable_instances], - force=True, - ) - terminated_instance_ids = [i.ec2_instance.id for i in terminateable_instances] - - still_drained_nodes = [ - i - for i in cluster.drained_nodes - if i.ec2_instance.id not in terminated_instance_ids - ] - return dataclasses.replace( - cluster, - drained_nodes=still_drained_nodes, - terminated_instances=cluster.terminated_instances - + [i.ec2_instance for i in terminateable_instances], - ) - # 3. we could ask on rabbit whether someone would like to keep that machine for something (like the agent for example), if that is the case, we wait another hour and ask again? - # 4. - - -async def _activate_drained_nodes( - app: FastAPI, - cluster: Cluster, - pending_tasks: list[Task], -) -> tuple[list[Task], Cluster]: - """returns the tasks that were assigned to the drained nodes""" - if not pending_tasks: - # nothing to do - return [], cluster - - activatable_nodes: list[tuple[AssociatedInstance, list[Task]]] = [ - ( - node, - [], - ) - for node in itertools.chain( - cluster.drained_nodes, cluster.reserve_drained_nodes - ) - ] - - still_pending_tasks = [] - for task in pending_tasks: - if not try_assigning_task_to_node(task, activatable_nodes): - still_pending_tasks.append(task) - - nodes_to_activate = [ - (node, assigned_tasks) - for node, assigned_tasks in activatable_nodes - if assigned_tasks - ] - - async def _activate_and_notify( - drained_node: AssociatedInstance, tasks: list[Task] - ) -> list[Task]: - await asyncio.gather( - *( - utils_docker.set_node_availability( - get_docker_client(app), drained_node.node, available=True - ), - log_tasks_message( - app, - tasks, - "cluster adjusted, service should start shortly...", - ), - progress_tasks_message(app, tasks, progress=1.0), - ) - ) - return tasks - - # activate these nodes now - await asyncio.gather( - *(_activate_and_notify(node, tasks) for node, tasks in nodes_to_activate) - ) - new_active_nodes = [node for node, _ in nodes_to_activate] - new_active_node_ids = {node.ec2_instance.id for node in new_active_nodes} - remaining_drained_nodes = [ - node - for node in cluster.drained_nodes - if node.ec2_instance.id not in new_active_node_ids - ] - remaining_reserved_drained_nodes = [ - node - for node in cluster.reserve_drained_nodes - if node.ec2_instance.id not in new_active_node_ids - ] - return still_pending_tasks, dataclasses.replace( - cluster, - active_nodes=cluster.active_nodes + new_active_nodes, - drained_nodes=remaining_drained_nodes, - reserve_drained_nodes=remaining_reserved_drained_nodes, - ) - - -async def _find_needed_instances( - app: FastAPI, - pending_tasks: list[Task], - available_ec2_types: list[EC2InstanceType], - cluster: Cluster, -) -> dict[EC2InstanceType, int]: - type_to_instance_map = {t.name: t for t in available_ec2_types} - - # 1. check first the pending task needs - pending_instance_to_tasks: list[tuple[EC2InstanceData, list[Task]]] = [ - (i, []) for i in cluster.pending_ec2s - ] - needed_new_instance_to_tasks: list[tuple[EC2InstanceType, list[Task]]] = [] - for task in pending_tasks: - if await try_assigning_task_to_pending_instances( - app, task, pending_instance_to_tasks, type_to_instance_map - ): - continue - - if try_assigning_task_to_instances(task, needed_new_instance_to_tasks): - continue - - try: - # we need a new instance, let's find one - best_ec2_instance = ec2.find_best_fitting_ec2_instance( - available_ec2_types, - utils_docker.get_max_resources_from_docker_task(task), - score_type=ec2.closest_instance_policy, - ) - needed_new_instance_to_tasks.append((best_ec2_instance, [task])) - except Ec2InstanceNotFoundError: - logger.error( - "Task %s needs more resources than any EC2 instance " - "can provide with the current configuration. Please check.", - f"{task.Name or 'unknown task name'}:{task.ServiceID or 'unknown service ID'}", - ) - - num_instances_per_type = collections.defaultdict( - int, collections.Counter(t for t, _ in needed_new_instance_to_tasks) - ) - - # 2. check the buffer needs - app_settings = get_application_settings(app) - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - if ( - num_missing_nodes := ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - - len(cluster.reserve_drained_nodes) - ) - ) > 0: - # check if some are already pending - remaining_pending_instances = [ - instance - for instance, assigned_tasks in pending_instance_to_tasks - if not assigned_tasks - ] - if len(remaining_pending_instances) < ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - - len(cluster.reserve_drained_nodes) - ): - default_instance_type = available_ec2_types[0] - num_instances_per_type[default_instance_type] += num_missing_nodes - - return num_instances_per_type - - -async def _start_instances( - app: FastAPI, needed_instances: dict[EC2InstanceType, int], tasks: list[Task] -) -> list[EC2InstanceData]: - ec2_client = get_ec2_client(app) - app_settings: ApplicationSettings = app.state.settings - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - - instance_tags = ec2.get_ec2_tags(app_settings) - instance_startup_script = await ec2_startup_script(app_settings) - results = await asyncio.gather( - *[ - ec2_client.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - instance_type=parse_obj_as(InstanceTypeType, instance.name), - tags=instance_tags, - startup_script=instance_startup_script, - number_of_instances=instance_num, - ) - for instance, instance_num in needed_instances.items() - ], - return_exceptions=True, - ) - # parse results - last_issue = "" - new_pending_instances: list[EC2InstanceData] = [] - for r in results: - if isinstance(r, Ec2TooManyInstancesError): - await log_tasks_message( - app, - tasks, - "Exceptionally high load on computational cluster, please try again later.", - level=logging.ERROR, - ) - elif isinstance(r, Exception): - logger.error("Unexpected error happened when starting EC2 instance: %s", r) - last_issue = f"{r}" - elif isinstance(r, list): - new_pending_instances.extend(r) - else: - new_pending_instances.append(r) - - log_message = f"{sum(n for n in needed_instances.values())} new machines launched, it might take up to 3 minutes to start, Please wait..." - if last_issue: - log_message += "\nUnexpected issues detected, probably due to high load, please contact support" - await log_tasks_message(app, tasks, log_message) - return new_pending_instances - - -async def _scale_up_cluster( - app: FastAPI, - cluster: Cluster, - pending_tasks: list[Task], -) -> Cluster: - app_settings: ApplicationSettings = app.state.settings - assert app_settings.AUTOSCALING_EC2_ACCESS # nosec - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - ec2_client = get_ec2_client(app) - - # some instances might be able to run several tasks - allowed_instance_types = await ec2_client.get_ec2_instance_capabilities( - cast( # type: ignore - set[InstanceTypeType], - set( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES, - ), - ) - ) - - def _sort_according_to_allowed_types(instance_type: EC2InstanceType) -> int: - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - return app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.index( - instance_type.name - ) - - allowed_instance_types.sort(key=_sort_according_to_allowed_types) - - # let's start these - if needed_ec2_instances := await _find_needed_instances( - app, - pending_tasks, - allowed_instance_types, - cluster, - ): - await log_tasks_message( - app, - pending_tasks, - "service is pending due to missing resources, scaling up cluster now\n" - f"{sum(n for n in needed_ec2_instances.values())} new machines will be added, please wait...", - ) - # NOTE: notify the up-scaling progress started... - await progress_tasks_message(app, pending_tasks, 0.001) - new_pending_instances = await _start_instances( - app, needed_ec2_instances, pending_tasks - ) - cluster.pending_ec2s.extend(new_pending_instances) - # NOTE: to check the logs of UserData in EC2 instance - # run: tail -f -n 1000 /var/log/cloud-init-output.log in the instance - - return cluster - - -async def _try_attach_pending_ec2s(app: FastAPI, cluster: Cluster) -> Cluster: - """label the drained instances that connected to the swarm which are missing the monitoring labels""" - new_found_instances: list[AssociatedInstance] = [] - still_pending_ec2s: list[EC2InstanceData] = [] - app_settings = get_application_settings(app) - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - for instance_data in cluster.pending_ec2s: - try: - node_host_name = node_host_name_from_ec2_private_dns(instance_data) - if new_node := await utils_docker.find_node_with_name( - get_docker_client(app), node_host_name - ): - # it is attached, let's label it, but keep it as drained - new_node = await utils_docker.tag_node( - get_docker_client(app), - new_node, - tags=utils_docker.get_docker_tags(app_settings), - available=False, - ) - new_found_instances.append(AssociatedInstance(new_node, instance_data)) - else: - still_pending_ec2s.append(instance_data) - except Ec2InvalidDnsNameError: - logger.exception("Unexpected EC2 private dns") - # NOTE: first provision the reserve drained nodes if possible - all_drained_nodes = ( - cluster.drained_nodes + cluster.reserve_drained_nodes + new_found_instances - ) - return dataclasses.replace( - cluster, - drained_nodes=all_drained_nodes[ - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER : - ], - reserve_drained_nodes=all_drained_nodes[ - : app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - ], - pending_ec2s=still_pending_ec2s, - ) - - -async def _analyze_current_cluster(app: FastAPI) -> Cluster: - app_settings = get_application_settings(app) - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - - # get current docker nodes (these are associated (active or drained) or disconnected) - docker_nodes: list[Node] = await utils_docker.get_monitored_nodes( - get_docker_client(app), - node_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS, - ) - - # get the EC2 instances we have - existing_ec2_instances = await get_ec2_client(app).get_instances( - app_settings.AUTOSCALING_EC2_INSTANCES, - ec2.get_ec2_tags(app_settings), - ) - - terminated_ec2_instances = await get_ec2_client(app).get_instances( - app_settings.AUTOSCALING_EC2_INSTANCES, - ec2.get_ec2_tags(app_settings), - state_names=["terminated"], - ) - - attached_ec2s, pending_ec2s = await associate_ec2_instances_with_nodes( - docker_nodes, existing_ec2_instances - ) - - def _is_node_up_and_available(node: Node, availability: Availability) -> bool: - assert node.Status # nosec - assert node.Spec # nosec - return bool( - node.Status.State == NodeState.ready - and node.Spec.Availability == availability - ) - - def _node_not_ready(node: Node) -> bool: - assert node.Status # nosec - return bool(node.Status.State != NodeState.ready) - - all_drained_nodes = [ - i - for i in attached_ec2s - if _is_node_up_and_available(i.node, Availability.drain) - ] - - cluster = Cluster( - active_nodes=[ - i - for i in attached_ec2s - if _is_node_up_and_available(i.node, Availability.active) - ], - drained_nodes=all_drained_nodes[ - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER : - ], - reserve_drained_nodes=all_drained_nodes[ - : app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - ], - pending_ec2s=pending_ec2s, - terminated_instances=terminated_ec2_instances, - disconnected_nodes=[n for n in docker_nodes if _node_not_ready(n)], - ) - logger.info("current state: %s", f"{cluster=}") - return cluster - - -async def _scale_cluster(app: FastAPI, cluster: Cluster) -> Cluster: - app_settings = get_application_settings(app) - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - # 1. check if we have pending tasks and resolve them by activating some drained nodes - pending_tasks = await utils_docker.pending_service_tasks_with_insufficient_resources( - get_docker_client(app), - service_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS, - ) - # we have a number of pending tasks, try to resolve them with drained nodes if possible - still_pending_tasks, cluster = await _activate_drained_nodes( - app, cluster, pending_tasks - ) - # let's check if there are still pending tasks or if the reserve was used - if still_pending_tasks or ( - len(cluster.reserve_drained_nodes) - < app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - ): - # yes? then scale up - cluster = await _scale_up_cluster(app, cluster, still_pending_tasks) - elif still_pending_tasks == pending_tasks: - # NOTE: we only scale down in case we did not just scale up. The swarm needs some time to adjust - cluster = await _deactivate_empty_nodes(app, cluster) - cluster = await _try_scale_down_cluster(app, cluster) - - return cluster - - -async def _cleanup_disconnected_nodes(app: FastAPI, cluster: Cluster) -> Cluster: - await utils_docker.remove_nodes(get_docker_client(app), cluster.disconnected_nodes) - return dataclasses.replace(cluster, disconnected_nodes=[]) - - -async def cluster_scaling_from_labelled_services(app: FastAPI) -> None: - """Check that there are no pending tasks requiring additional resources in the cluster (docker swarm) - If there are such tasks, this method will allocate new machines in AWS to cope with - the additional load. - """ - - cluster = await _analyze_current_cluster(app) - cluster = await _cleanup_disconnected_nodes(app, cluster) - cluster = await _try_attach_pending_ec2s(app, cluster) - cluster = await _scale_cluster(app, cluster) - - # inform on rabbit about status - await post_autoscaling_status_message(app, cluster) diff --git a/services/autoscaling/src/simcore_service_autoscaling/main.py b/services/autoscaling/src/simcore_service_autoscaling/main.py index 120c4095b20..102258cac70 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/main.py +++ b/services/autoscaling/src/simcore_service_autoscaling/main.py @@ -1,15 +1,22 @@ """Main application to be deployed by uvicorn (or equivalent) server """ + import logging from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers from simcore_service_autoscaling.core.application import create_app from simcore_service_autoscaling.core.settings import ApplicationSettings the_settings = ApplicationSettings.create_from_envs() logging.basicConfig(level=the_settings.log_level) logging.root.setLevel(the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=the_settings.AUTOSCALING_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=the_settings.AUTOSCALING_LOG_FILTER_MAPPING, + tracing_settings=the_settings.AUTOSCALING_TRACING, +) # SINGLETON FastAPI app the_app: FastAPI = create_app(the_settings) diff --git a/services/autoscaling/src/simcore_service_autoscaling/models.py b/services/autoscaling/src/simcore_service_autoscaling/models.py index c0a4725847d..c77f9fe349c 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/models.py +++ b/services/autoscaling/src/simcore_service_autoscaling/models.py @@ -1,89 +1,232 @@ -import datetime -from dataclasses import dataclass, field +from collections import defaultdict +from collections.abc import Generator +from dataclasses import dataclass, field, fields +from typing import Any, TypeAlias +from aws_library.ec2 import EC2InstanceData, EC2InstanceType, Resources +from dask_task_models_library.resource_constraints import DaskTaskResources from models_library.generated_models.docker_rest_api import Node -from pydantic import BaseModel, ByteSize, NonNegativeFloat, PositiveInt -from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.literals import InstanceTypeType -class Resources(BaseModel): - cpus: NonNegativeFloat - ram: ByteSize +@dataclass(frozen=True, slots=True, kw_only=True) +class _TaskAssignmentMixin: + assigned_tasks: list = field(default_factory=list) + available_resources: Resources = field(default_factory=Resources.create_as_empty) - @classmethod - def create_as_empty(cls) -> "Resources": - return cls(cpus=0, ram=ByteSize(0)) - - def __ge__(self, other: "Resources") -> bool: - return self.cpus >= other.cpus and self.ram >= other.ram - - def __add__(self, other: "Resources") -> "Resources": - return Resources.construct( - **{ - key: a + b - for (key, a), b in zip(self.dict().items(), other.dict().values()) - } + def assign_task(self, task, task_resources: Resources) -> None: + self.assigned_tasks.append(task) + object.__setattr__( + self, "available_resources", self.available_resources - task_resources ) - def __sub__(self, other: "Resources") -> "Resources": - return Resources.construct( - **{ - key: a - b - for (key, a), b in zip(self.dict().items(), other.dict().values()) - } - ) + def has_resources_for_task(self, task_resources: Resources) -> bool: + return bool(self.available_resources >= task_resources) -@dataclass(frozen=True) -class EC2InstanceType: - name: str - cpus: PositiveInt - ram: ByteSize +@dataclass(frozen=True, kw_only=True, slots=True) +class AssignedTasksToInstanceType(_TaskAssignmentMixin): + instance_type: EC2InstanceType -InstancePrivateDNSName = str +@dataclass(frozen=True, kw_only=True, slots=True) +class _BaseInstance(_TaskAssignmentMixin): + ec2_instance: EC2InstanceData + def __post_init__(self) -> None: + if self.available_resources == Resources.create_as_empty(): + object.__setattr__(self, "available_resources", self.ec2_instance.resources) -@dataclass(frozen=True) -class EC2InstanceData: - launch_time: datetime.datetime - id: str - aws_private_dns: InstancePrivateDNSName - type: InstanceTypeType - state: InstanceStateNameType + def has_assigned_tasks(self) -> bool: + return bool(self.available_resources < self.ec2_instance.resources) -@dataclass(frozen=True) -class AssociatedInstance: +@dataclass(frozen=True, kw_only=True, slots=True) +class AssociatedInstance(_BaseInstance): node: Node - ec2_instance: EC2InstanceData -@dataclass(frozen=True) -class Cluster: +@dataclass(frozen=True, kw_only=True, slots=True) +class NonAssociatedInstance(_BaseInstance): + ... + + +@dataclass(frozen=True, kw_only=True, slots=True) +class Cluster: # pylint: disable=too-many-instance-attributes active_nodes: list[AssociatedInstance] = field( metadata={ - "description": "This is a EC2 backed docker node which is active (with running tasks)" + "description": "This is a EC2-backed docker node which is active and ready to receive tasks (or with running tasks)" + } + ) + pending_nodes: list[AssociatedInstance] = field( + metadata={ + "description": "This is a EC2-backed docker node which is active and NOT yet ready to receive tasks" } ) drained_nodes: list[AssociatedInstance] = field( metadata={ - "description": "This is a EC2 backed docker node which is drained (with no tasks)" + "description": "This is a EC2-backed docker node which is drained (cannot accept tasks)" } ) - reserve_drained_nodes: list[AssociatedInstance] = field( + buffer_drained_nodes: list[AssociatedInstance] = field( metadata={ - "description": "This is a EC2 backed docker node which is drained in the reserve if this is enabled (with no tasks)" + "description": "This is a EC2-backed docker node which is drained in the reserve if this is enabled (with no tasks)" } ) - pending_ec2s: list[EC2InstanceData] = field( + pending_ec2s: list[NonAssociatedInstance] = field( metadata={ "description": "This is an EC2 instance that is not yet associated to a docker node" } ) + broken_ec2s: list[NonAssociatedInstance] = field( + metadata={ + "description": "This is an existing EC2 instance that never properly joined the cluster and is deemed as broken and will be terminated" + } + ) + buffer_ec2s: list[NonAssociatedInstance] = field( + metadata={ + "description": "This is a prepared stopped EC2 instance, not yet associated to a docker node, ready to be used" + } + ) disconnected_nodes: list[Node] = field( metadata={ "description": "This is a docker node which is not backed by a running EC2 instance" } ) - terminated_instances: list[EC2InstanceData] + terminating_nodes: list[AssociatedInstance] = field( + metadata={ + "description": "This is a EC2-backed docker node which is docker drained and waiting for termination" + } + ) + retired_nodes: list[AssociatedInstance] = field( + metadata={ + "description": "This is a EC2-backed docker node which was retired and waiting to be drained and eventually terminated or re-used" + } + ) + terminated_instances: list[NonAssociatedInstance] + + def can_scale_down(self) -> bool: + return bool( + self.active_nodes + or self.pending_nodes + or self.drained_nodes + or self.pending_ec2s + or self.terminating_nodes + or self.retired_nodes + ) + + def total_number_of_machines(self) -> int: + """return the number of machines that are swtiched on""" + return ( + len(self.active_nodes) + + len(self.pending_nodes) + + len(self.drained_nodes) + + len(self.buffer_drained_nodes) + + len(self.pending_ec2s) + + len(self.broken_ec2s) + + len(self.terminating_nodes) + + len(self.retired_nodes) + ) + + def __repr__(self) -> str: + def _get_instance_ids( + instances: list[AssociatedInstance] | list[NonAssociatedInstance], + ) -> str: + return f"[{','.join(n.ec2_instance.id for n in instances)}]" + + return ( + f"Cluster(active-nodes: count={len(self.active_nodes)} {_get_instance_ids(self.active_nodes)}, " + f"pending-nodes: count={len(self.pending_nodes)} {_get_instance_ids(self.pending_nodes)}, " + f"drained-nodes: count={len(self.drained_nodes)} {_get_instance_ids(self.drained_nodes)}, " + f"reserve-drained-nodes: count={len(self.buffer_drained_nodes)} {_get_instance_ids(self.buffer_drained_nodes)}, " + f"pending-ec2s: count={len(self.pending_ec2s)} {_get_instance_ids(self.pending_ec2s)}, " + f"broken-ec2s: count={len(self.broken_ec2s)} {_get_instance_ids(self.broken_ec2s)}, " + f"buffer-ec2s: count={len(self.buffer_ec2s)} {_get_instance_ids(self.buffer_ec2s)}, " + f"disconnected-nodes: count={len(self.disconnected_nodes)}, " + f"terminating-nodes: count={len(self.terminating_nodes)} {_get_instance_ids(self.terminating_nodes)}, " + f"retired-nodes: count={len(self.retired_nodes)} {_get_instance_ids(self.retired_nodes)}, " + f"terminated-ec2s: count={len(self.terminated_instances)} {_get_instance_ids(self.terminated_instances)})" + ) + + +DaskTaskId: TypeAlias = str + + +@dataclass(frozen=True, kw_only=True) +class DaskTask: + task_id: DaskTaskId + required_resources: DaskTaskResources + + +@dataclass(kw_only=True, slots=True) +class BufferPool: + ready_instances: set[EC2InstanceData] = field(default_factory=set) + pending_instances: set[EC2InstanceData] = field(default_factory=set) + waiting_to_pull_instances: set[EC2InstanceData] = field(default_factory=set) + waiting_to_stop_instances: set[EC2InstanceData] = field(default_factory=set) + pulling_instances: set[EC2InstanceData] = field(default_factory=set) + stopping_instances: set[EC2InstanceData] = field(default_factory=set) + broken_instances: set[EC2InstanceData] = field(default_factory=set) + + def __repr__(self) -> str: + return ( + f"BufferPool(ready-count={len(self.ready_instances)}, " + f"pending-count={len(self.pending_instances)}, " + f"waiting-to-pull-count={len(self.waiting_to_pull_instances)}, " + f"waiting-to-stop-count={len(self.waiting_to_stop_instances)}, " + f"pulling-count={len(self.pulling_instances)}, " + f"stopping-count={len(self.stopping_instances)}, " + f"broken-count={len(self.broken_instances)})" + ) + + def _sort_by_readyness( + self, *, invert: bool = False + ) -> Generator[set[EC2InstanceData], Any, None]: + order = ( + self.ready_instances, + self.stopping_instances, + self.waiting_to_stop_instances, + self.pulling_instances, + self.waiting_to_pull_instances, + self.pending_instances, + self.broken_instances, + ) + if invert: + yield from reversed(order) + else: + yield from order + + def pre_pulled_instances(self) -> set[EC2InstanceData]: + """returns all the instances that completed image pre pulling""" + return self.ready_instances.union(self.stopping_instances) + + def all_instances(self) -> set[EC2InstanceData]: + """sorted by importance: READY (stopped) > STOPPING >""" + gen = self._sort_by_readyness() + return next(gen).union(*(_ for _ in gen)) + + def remove_instance(self, instance: EC2InstanceData) -> None: + for instances in self._sort_by_readyness(invert=True): + if instance in instances: + instances.remove(instance) + break + + +@dataclass +class BufferPoolManager: + buffer_pools: dict[InstanceTypeType, BufferPool] = field( + default_factory=lambda: defaultdict(BufferPool) + ) + + def __repr__(self) -> str: + return f"BufferPoolManager({dict(self.buffer_pools)})" + + def flatten_buffer_pool(self) -> BufferPool: + """returns a flattened buffer pool with all the EC2InstanceData""" + flat_pool = BufferPool() + + for buffer_pool in self.buffer_pools.values(): + for f in fields(BufferPool): + getattr(flat_pool, f.name).update(getattr(buffer_pool, f.name)) + + return flat_pool diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_core.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_core.py new file mode 100644 index 00000000000..a3a34e7b5d0 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_core.py @@ -0,0 +1,1305 @@ +import asyncio +import collections +import dataclasses +import datetime +import functools +import itertools +import logging +from typing import Final, cast + +import arrow +from aws_library.ec2 import ( + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + EC2Tags, + Resources, +) +from aws_library.ec2._errors import EC2TooManyInstancesError +from fastapi import FastAPI +from models_library.generated_models.docker_rest_api import Node, NodeState +from models_library.rabbitmq_messages import ProgressType +from servicelib.logging_utils import log_catch, log_context +from servicelib.utils import limited_gather +from servicelib.utils_formatting import timedelta_as_minute_second +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..constants import DOCKER_JOIN_COMMAND_EC2_TAG_KEY, DOCKER_JOIN_COMMAND_NAME +from ..core.errors import ( + Ec2InvalidDnsNameError, + TaskBestFittingInstanceNotFoundError, + TaskRequirementsAboveRequiredEC2InstanceTypeError, + TaskRequiresUnauthorizedEC2InstanceTypeError, +) +from ..core.settings import ApplicationSettings, get_application_settings +from ..models import ( + AssignedTasksToInstanceType, + AssociatedInstance, + Cluster, + NonAssociatedInstance, +) +from ..utils import utils_docker, utils_ec2 +from ..utils.auto_scaling_core import ( + associate_ec2_instances_with_nodes, + ec2_startup_script, + find_selected_instance_type_for_task, + get_machine_buffer_type, + node_host_name_from_ec2_private_dns, + sort_drained_nodes, +) +from ..utils.buffer_machines_pool_core import ( + get_activated_buffer_ec2_tags, + get_deactivated_buffer_ec2_tags, + is_buffer_machine, +) +from ..utils.rabbitmq import ( + post_autoscaling_status_message, + post_tasks_log_message, + post_tasks_progress_message, +) +from .auto_scaling_mode_base import BaseAutoscaling +from .docker import get_docker_client +from .ec2 import get_ec2_client +from .instrumentation import get_instrumentation, has_instrumentation +from .ssm import get_ssm_client + +_logger = logging.getLogger(__name__) + + +def _node_not_ready(node: Node) -> bool: + assert node.status # nosec + return bool(node.status.state != NodeState.ready) + + +async def _analyze_current_cluster( + app: FastAPI, + auto_scaling_mode: BaseAutoscaling, + allowed_instance_types: list[EC2InstanceType], +) -> Cluster: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + # get current docker nodes (these are associated (active or drained) or disconnected) + docker_nodes: list[Node] = await auto_scaling_mode.get_monitored_nodes(app) + + # get the EC2 instances we have + existing_ec2_instances = await get_ec2_client(app).get_instances( + key_names=[app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME], + tags=auto_scaling_mode.get_ec2_tags(app), + state_names=["pending", "running"], + ) + + terminated_ec2_instances = await get_ec2_client(app).get_instances( + key_names=[app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME], + tags=auto_scaling_mode.get_ec2_tags(app), + state_names=["terminated"], + ) + + buffer_ec2_instances = await get_ec2_client(app).get_instances( + key_names=[app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME], + tags=get_deactivated_buffer_ec2_tags(app, auto_scaling_mode), + state_names=["stopped"], + ) + + attached_ec2s, pending_ec2s = await associate_ec2_instances_with_nodes( + docker_nodes, existing_ec2_instances + ) + + # analyse pending ec2s, check if they are pending since too long + now = arrow.utcnow().datetime + broken_ec2s = [ + instance + for instance in pending_ec2s + if (now - instance.launch_time) + > app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ] + if broken_ec2s: + _logger.error( + "Detected broken EC2 instances that never joined the cluster after %s: %s\n" + "TIP: if this happens very often the time to start an EC2 might have increased or " + "something might be wrong with the used AMI and/or boot script in which case this" + " would happen all the time. Please check", + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME, + f"{[_.id for _ in broken_ec2s]}", + ) + # remove the broken ec2s from the pending ones + pending_ec2s = [ + instance for instance in pending_ec2s if instance not in broken_ec2s + ] + + # analyse attached ec2s + active_nodes, pending_nodes, all_drained_nodes, retired_nodes = [], [], [], [] + for instance in attached_ec2s: + if await auto_scaling_mode.is_instance_active(app, instance): + node_used_resources = await auto_scaling_mode.compute_node_used_resources( + app, instance + ) + active_nodes.append( + dataclasses.replace( + instance, + available_resources=instance.ec2_instance.resources + - node_used_resources, + ) + ) + elif auto_scaling_mode.is_instance_drained(instance): + all_drained_nodes.append(instance) + elif await auto_scaling_mode.is_instance_retired(app, instance): + # it should be drained, but it is not, so we force it to be drained such that it might be re-used if needed + retired_nodes.append(instance) + else: + pending_nodes.append(instance) + + drained_nodes, buffer_drained_nodes, terminating_nodes = sort_drained_nodes( + app_settings, all_drained_nodes, allowed_instance_types + ) + cluster = Cluster( + active_nodes=active_nodes, + pending_nodes=pending_nodes, + drained_nodes=drained_nodes, + buffer_drained_nodes=buffer_drained_nodes, + pending_ec2s=[NonAssociatedInstance(ec2_instance=i) for i in pending_ec2s], + broken_ec2s=[NonAssociatedInstance(ec2_instance=i) for i in broken_ec2s], + buffer_ec2s=[ + NonAssociatedInstance(ec2_instance=i) for i in buffer_ec2_instances + ], + terminating_nodes=terminating_nodes, + terminated_instances=[ + NonAssociatedInstance(ec2_instance=i) for i in terminated_ec2_instances + ], + disconnected_nodes=[n for n in docker_nodes if _node_not_ready(n)], + retired_nodes=retired_nodes, + ) + _logger.info("current state: %s", f"{cluster!r}") + return cluster + + +_DELAY_FOR_REMOVING_DISCONNECTED_NODES_S: Final[int] = 30 + + +async def _cleanup_disconnected_nodes(app: FastAPI, cluster: Cluster) -> Cluster: + utc_now = arrow.utcnow().datetime + removeable_nodes = [ + node + for node in cluster.disconnected_nodes + if node.updated_at + and ( + (utc_now - arrow.get(node.updated_at).datetime).total_seconds() + > _DELAY_FOR_REMOVING_DISCONNECTED_NODES_S + ) + ] + if removeable_nodes: + await utils_docker.remove_nodes(get_docker_client(app), nodes=removeable_nodes) + return dataclasses.replace(cluster, disconnected_nodes=[]) + + +async def _terminate_broken_ec2s(app: FastAPI, cluster: Cluster) -> Cluster: + broken_instances = [i.ec2_instance for i in cluster.broken_ec2s] + if broken_instances: + with log_context( + _logger, logging.WARNING, msg="terminate broken EC2 instances" + ): + await get_ec2_client(app).terminate_instances(broken_instances) + + return dataclasses.replace( + cluster, + broken_ec2s=[], + terminated_instances=cluster.terminated_instances + cluster.broken_ec2s, + ) + + +async def _make_pending_buffer_ec2s_join_cluster( + app: FastAPI, + cluster: Cluster, +) -> Cluster: + ec2_client = get_ec2_client(app) + if buffer_ec2s_pending := [ + i.ec2_instance + for i in cluster.pending_ec2s + if is_buffer_machine(i.ec2_instance.tags) + and (DOCKER_JOIN_COMMAND_EC2_TAG_KEY not in i.ec2_instance.tags) + ]: + # started buffer instance shall be asked to join the cluster once they are running + app_settings = get_application_settings(app) + ssm_client = get_ssm_client(app) + + buffer_ec2_connection_state = await limited_gather( + *[ + ssm_client.is_instance_connected_to_ssm_server(i.id) + for i in buffer_ec2s_pending + ], + reraise=False, + log=_logger, + limit=20, + ) + buffer_ec2_connected_to_ssm_server = [ + i + for i, c in zip( + buffer_ec2s_pending, buffer_ec2_connection_state, strict=True + ) + if c is True + ] + buffer_ec2_ready_for_command = buffer_ec2_connected_to_ssm_server + if app_settings.AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION: + buffer_ec2_initialized = await limited_gather( + *[ + ssm_client.wait_for_has_instance_completed_cloud_init(i.id) + for i in buffer_ec2_connected_to_ssm_server + ], + reraise=False, + log=_logger, + limit=20, + ) + buffer_ec2_ready_for_command = [ + i + for i, r in zip( + buffer_ec2_connected_to_ssm_server, + buffer_ec2_initialized, + strict=True, + ) + if r is True + ] + if buffer_ec2_ready_for_command: + ssm_command = await ssm_client.send_command( + [i.id for i in buffer_ec2_ready_for_command], + command=await utils_docker.get_docker_swarm_join_bash_command( + join_as_drained=app_settings.AUTOSCALING_DOCKER_JOIN_DRAINED + ), + command_name=DOCKER_JOIN_COMMAND_NAME, + ) + await ec2_client.set_instances_tags( + buffer_ec2_ready_for_command, + tags={ + DOCKER_JOIN_COMMAND_EC2_TAG_KEY: ssm_command.command_id, + }, + ) + return cluster + + +async def _try_attach_pending_ec2s( + app: FastAPI, + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, + allowed_instance_types: list[EC2InstanceType], +) -> Cluster: + """label the drained instances that connected to the swarm which are missing the monitoring labels""" + new_found_instances: list[AssociatedInstance] = [] + still_pending_ec2s: list[NonAssociatedInstance] = [] + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + for instance_data in cluster.pending_ec2s: + try: + node_host_name = node_host_name_from_ec2_private_dns( + instance_data.ec2_instance + ) + if new_node := await utils_docker.find_node_with_name( + get_docker_client(app), node_host_name + ): + # it is attached, let's label it + new_node = await utils_docker.attach_node( + app_settings, + get_docker_client(app), + new_node, + tags=auto_scaling_mode.get_new_node_docker_tags( + app, instance_data.ec2_instance + ), + ) + new_found_instances.append( + AssociatedInstance( + node=new_node, ec2_instance=instance_data.ec2_instance + ) + ) + _logger.info( + "Attached new EC2 instance %s", instance_data.ec2_instance.id + ) + else: + still_pending_ec2s.append(instance_data) + except Ec2InvalidDnsNameError: + _logger.exception("Unexpected EC2 private dns") + # NOTE: first provision the reserve drained nodes if possible + all_drained_nodes = ( + cluster.drained_nodes + cluster.buffer_drained_nodes + new_found_instances + ) + drained_nodes, buffer_drained_nodes, _ = sort_drained_nodes( + app_settings, all_drained_nodes, allowed_instance_types + ) + return dataclasses.replace( + cluster, + drained_nodes=drained_nodes, + buffer_drained_nodes=buffer_drained_nodes, + pending_ec2s=still_pending_ec2s, + ) + + +async def _sorted_allowed_instance_types(app: FastAPI) -> list[EC2InstanceType]: + app_settings: ApplicationSettings = app.state.settings + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + ec2_client = get_ec2_client(app) + + allowed_instance_type_names = list( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES + ) + + assert ( # nosec + allowed_instance_type_names + ), "EC2_INSTANCES_ALLOWED_TYPES cannot be empty!" + + allowed_instance_types: list[EC2InstanceType] = ( + await ec2_client.get_ec2_instance_capabilities( + cast(set[InstanceTypeType], set(allowed_instance_type_names)) + ) + ) + + def _as_selection(instance_type: EC2InstanceType) -> int: + # NOTE: will raise ValueError if allowed_instance_types not in allowed_instance_type_names + return allowed_instance_type_names.index(f"{instance_type.name}") + + allowed_instance_types.sort(key=_as_selection) + return allowed_instance_types + + +async def _activate_and_notify( + app: FastAPI, + drained_node: AssociatedInstance, +) -> AssociatedInstance: + app_settings = get_application_settings(app) + docker_client = get_docker_client(app) + updated_node, *_ = await asyncio.gather( + utils_docker.set_node_osparc_ready( + app_settings, docker_client, drained_node.node, ready=True + ), + post_tasks_log_message( + app, + tasks=drained_node.assigned_tasks, + message="cluster adjusted, service should start shortly...", + level=logging.INFO, + ), + post_tasks_progress_message( + app, + tasks=drained_node.assigned_tasks, + progress=1.0, + progress_type=ProgressType.CLUSTER_UP_SCALING, + ), + ) + return dataclasses.replace(drained_node, node=updated_node) + + +async def _activate_drained_nodes( + app: FastAPI, + cluster: Cluster, +) -> Cluster: + nodes_to_activate = [ + node + for node in itertools.chain(cluster.drained_nodes, cluster.buffer_drained_nodes) + if node.assigned_tasks + ] + + if not nodes_to_activate: + return cluster + + with log_context( + _logger, + logging.INFO, + f"activate {len(nodes_to_activate)} drained nodes {[n.ec2_instance.id for n in nodes_to_activate]}", + ): + activated_nodes = await asyncio.gather( + *(_activate_and_notify(app, node) for node in nodes_to_activate) + ) + new_active_node_ids = {node.ec2_instance.id for node in activated_nodes} + remaining_drained_nodes = [ + node + for node in cluster.drained_nodes + if node.ec2_instance.id not in new_active_node_ids + ] + remaining_reserved_drained_nodes = [ + node + for node in cluster.buffer_drained_nodes + if node.ec2_instance.id not in new_active_node_ids + ] + return dataclasses.replace( + cluster, + active_nodes=cluster.active_nodes + activated_nodes, + drained_nodes=remaining_drained_nodes, + buffer_drained_nodes=remaining_reserved_drained_nodes, + ) + + +async def _start_warm_buffer_instances( + app: FastAPI, cluster: Cluster, auto_scaling_mode: BaseAutoscaling +) -> Cluster: + """starts warm buffer if there are assigned tasks, or if a hot buffer of the same type is needed""" + + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + instances_to_start = [ + i.ec2_instance for i in cluster.buffer_ec2s if i.assigned_tasks + ] + + if ( + len(cluster.buffer_drained_nodes) + < app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ): + # check if we can migrate warm buffers to hot buffers + hot_buffer_instance_type = cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ) + free_startable_warm_buffers_to_replace_hot_buffers = [ + warm_buffer.ec2_instance + for warm_buffer in cluster.buffer_ec2s + if (warm_buffer.ec2_instance.type == hot_buffer_instance_type) + and not warm_buffer.assigned_tasks + ] + # check there are no empty pending ec2s/nodes that are not assigned to any task + unnassigned_pending_ec2s = [ + i.ec2_instance for i in cluster.pending_ec2s if not i.assigned_tasks + ] + unnassigned_pending_nodes = [ + i.ec2_instance for i in cluster.pending_nodes if not i.assigned_tasks + ] + + instances_to_start += free_startable_warm_buffers_to_replace_hot_buffers[ + : app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + - len(cluster.buffer_drained_nodes) + - len(unnassigned_pending_ec2s) + - len(unnassigned_pending_nodes) + ] + + if not instances_to_start: + return cluster + + with log_context( + _logger, logging.INFO, f"start {len(instances_to_start)} buffer machines" + ): + started_instances = await get_ec2_client(app).start_instances( + instances_to_start + ) + # NOTE: first start the instance and then set the tags in case the instance cannot start (e.g. InsufficientInstanceCapacity) + await get_ec2_client(app).set_instances_tags( + started_instances, + tags=get_activated_buffer_ec2_tags(app, auto_scaling_mode), + ) + started_instance_ids = [i.id for i in started_instances] + + return dataclasses.replace( + cluster, + buffer_ec2s=[ + i + for i in cluster.buffer_ec2s + if i.ec2_instance.id not in started_instance_ids + ], + pending_ec2s=cluster.pending_ec2s + + [NonAssociatedInstance(ec2_instance=i) for i in started_instances], + ) + + +def _try_assign_task_to_ec2_instance( + task, + *, + instances: list[AssociatedInstance] | list[NonAssociatedInstance], + task_required_ec2_instance: InstanceTypeType | None, + task_required_resources: Resources, +) -> bool: + for instance in instances: + if task_required_ec2_instance and ( + task_required_ec2_instance != instance.ec2_instance.type + ): + continue + if instance.has_resources_for_task(task_required_resources): + instance.assign_task(task, task_required_resources) + _logger.debug( + "%s", + f"assigned task with {task_required_resources=}, {task_required_ec2_instance=} to " + f"{instance.ec2_instance.id=}:{instance.ec2_instance.type}, " + f"remaining resources:{instance.available_resources}/{instance.ec2_instance.resources}", + ) + return True + return False + + +def _try_assign_task_to_ec2_instance_type( + task, + *, + instances: list[AssignedTasksToInstanceType], + task_required_ec2_instance: InstanceTypeType | None, + task_required_resources: Resources, +) -> bool: + for instance in instances: + if task_required_ec2_instance and ( + task_required_ec2_instance != instance.instance_type.name + ): + continue + if instance.has_resources_for_task(task_required_resources): + instance.assign_task(task, task_required_resources) + _logger.debug( + "%s", + f"assigned task with {task_required_resources=}, {task_required_ec2_instance=} to " + f"{instance.instance_type}, " + f"remaining resources:{instance.available_resources}/{instance.instance_type.resources}", + ) + return True + return False + + +async def _assign_tasks_to_current_cluster( + app: FastAPI, + tasks: list, + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, +) -> tuple[list, Cluster]: + """ + Evaluates whether a task can be executed on any instance within the cluster. If the task's resource requirements are met, the task is *denoted* as assigned to the cluster. + Note: This is an estimation only since actual scheduling is handled by Dask/Docker (depending on the mode). + + Returns: + A tuple containing: + - A list of unassigned tasks (tasks whose resource requirements cannot be fulfilled by the available machines in the cluster). + - The same cluster instance passed as input. + """ + unassigned_tasks = [] + assignment_predicates = [ + functools.partial(_try_assign_task_to_ec2_instance, instances=instances) + for instances in ( + cluster.active_nodes, + cluster.drained_nodes + cluster.buffer_drained_nodes, + cluster.pending_nodes, + cluster.pending_ec2s, + cluster.buffer_ec2s, + ) + ] + + for task in tasks: + task_required_resources = auto_scaling_mode.get_task_required_resources(task) + task_required_ec2_instance = await auto_scaling_mode.get_task_defined_instance( + app, task + ) + + if any( + is_assigned( + task, + task_required_ec2_instance=task_required_ec2_instance, + task_required_resources=task_required_resources, + ) + for is_assigned in assignment_predicates + ): + _logger.debug( + "task %s is assigned to one instance available in cluster", task + ) + else: + unassigned_tasks.append(task) + + if unassigned_tasks: + _logger.info( + "the current cluster should cope with %s tasks, %s are unnassigned/queued " + "tasks and need to wait or get new EC2s", + len(tasks) - len(unassigned_tasks), + len(unassigned_tasks), + ) + return unassigned_tasks, cluster + + +async def _find_needed_instances( + app: FastAPI, + unassigned_tasks: list, + available_ec2_types: list[EC2InstanceType], + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, +) -> dict[EC2InstanceType, int]: + # 1. check first the pending task needs + needed_new_instance_types_for_tasks: list[AssignedTasksToInstanceType] = [] + with log_context(_logger, logging.DEBUG, msg="finding needed instances"): + for task in unassigned_tasks: + task_required_resources = auto_scaling_mode.get_task_required_resources( + task + ) + task_required_ec2_instance = ( + await auto_scaling_mode.get_task_defined_instance(app, task) + ) + + # first check if we can assign the task to one of the newly tobe created instances + if _try_assign_task_to_ec2_instance_type( + task, + instances=needed_new_instance_types_for_tasks, + task_required_ec2_instance=task_required_ec2_instance, + task_required_resources=task_required_resources, + ): + continue + + # so we need to find what we can create now + try: + # check if exact instance type is needed first + if task_required_ec2_instance: + defined_ec2 = find_selected_instance_type_for_task( + task_required_ec2_instance, + available_ec2_types, + auto_scaling_mode, + task, + ) + needed_new_instance_types_for_tasks.append( + AssignedTasksToInstanceType( + instance_type=defined_ec2, + assigned_tasks=[task], + available_resources=defined_ec2.resources + - task_required_resources, + ) + ) + else: + # we go for best fitting type + best_ec2_instance = utils_ec2.find_best_fitting_ec2_instance( + available_ec2_types, + auto_scaling_mode.get_task_required_resources(task), + score_type=utils_ec2.closest_instance_policy, + ) + needed_new_instance_types_for_tasks.append( + AssignedTasksToInstanceType( + instance_type=best_ec2_instance, + assigned_tasks=[task], + available_resources=best_ec2_instance.resources + - task_required_resources, + ) + ) + except TaskBestFittingInstanceNotFoundError: + _logger.exception("Task %s needs more resources: ", f"{task}") + except ( + TaskRequirementsAboveRequiredEC2InstanceTypeError, + TaskRequiresUnauthorizedEC2InstanceTypeError, + ): + _logger.exception("Unexpected error:") + + _logger.info( + "found following %s needed instances: %s", + len(needed_new_instance_types_for_tasks), + [ + f"{i.instance_type.name}:{i.instance_type.resources} takes {len(i.assigned_tasks)} task{'s' if len(i.assigned_tasks) > 1 else ''}" + for i in needed_new_instance_types_for_tasks + ], + ) + + num_instances_per_type = collections.defaultdict( + int, + collections.Counter( + t.instance_type for t in needed_new_instance_types_for_tasks + ), + ) + + # 2. check the buffer needs + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + if ( + num_missing_nodes := ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + - len(cluster.buffer_drained_nodes) + ) + ) > 0: + # check if some are already pending + remaining_pending_instances = [ + i.ec2_instance for i in cluster.pending_ec2s if not i.assigned_tasks + ] + [i.ec2_instance for i in cluster.pending_nodes if not i.assigned_tasks] + if len(remaining_pending_instances) < ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + - len(cluster.buffer_drained_nodes) + ): + default_instance_type = get_machine_buffer_type(available_ec2_types) + num_instances_per_type[default_instance_type] += num_missing_nodes + + return num_instances_per_type + + +async def _cap_needed_instances( + app: FastAPI, needed_instances: dict[EC2InstanceType, int], ec2_tags: EC2Tags +) -> dict[EC2InstanceType, int]: + """caps the needed instances dict[EC2InstanceType, int] to the maximal allowed number of instances by + 1. limiting to 1 per asked type + 2. increasing each by 1 until the maximum allowed number of instances is reached + NOTE: the maximum allowed number of instances contains the current number of running/pending machines + + Raises: + Ec2TooManyInstancesError: raised when the maximum of machines is already running/pending + """ + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + current_instances = await ec2_client.get_instances( + key_names=[app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME], + tags=ec2_tags, + ) + current_number_of_instances = len(current_instances) + if ( + current_number_of_instances + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ): + # ok that is already too much + raise EC2TooManyInstancesError( + num_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + + total_number_of_needed_instances = sum(needed_instances.values()) + if ( + current_number_of_instances + total_number_of_needed_instances + <= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ): + # ok that fits no need to do anything here + return needed_instances + + # this is asking for too many, so let's cap them + max_number_of_creatable_instances = ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + - current_number_of_instances + ) + + # we start with 1 machine of each type until the max + capped_needed_instances = { + k: 1 + for count, k in enumerate(needed_instances) + if (count + 1) <= max_number_of_creatable_instances + } + + if len(capped_needed_instances) < len(needed_instances): + # there were too many types for the number of possible instances + return capped_needed_instances + + # all instance types were added, now create more of them if possible + while sum(capped_needed_instances.values()) < max_number_of_creatable_instances: + for instance_type, num_to_create in needed_instances.items(): + if ( + sum(capped_needed_instances.values()) + == max_number_of_creatable_instances + ): + break + if num_to_create > capped_needed_instances[instance_type]: + capped_needed_instances[instance_type] += 1 + + return capped_needed_instances + + +async def _launch_instances( + app: FastAPI, + needed_instances: dict[EC2InstanceType, int], + tasks: list, + auto_scaling_mode: BaseAutoscaling, +) -> list[EC2InstanceData]: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + new_instance_tags = auto_scaling_mode.get_ec2_tags(app) + capped_needed_machines = {} + try: + capped_needed_machines = await _cap_needed_instances( + app, needed_instances, new_instance_tags + ) + except EC2TooManyInstancesError: + await post_tasks_log_message( + app, + tasks=tasks, + message="The maximum number of machines in the cluster was reached. Please wait for your running jobs " + "to complete and try again later or contact osparc support if this issue does not resolve.", + level=logging.ERROR, + ) + return [] + + results = await asyncio.gather( + *[ + ec2_client.launch_instances( + EC2InstanceConfig( + type=instance_type, + tags=new_instance_tags, + startup_script=await ec2_startup_script( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ + instance_type.name + ], + app_settings, + ), + ami_id=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ + instance_type.name + ].ami_id, + key_name=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME, + security_group_ids=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SECURITY_GROUP_IDS, + subnet_id=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SUBNET_ID, + iam_instance_profile=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ATTACHED_IAM_PROFILE, + ), + min_number_of_instances=1, # NOTE: we want at least 1 if possible + number_of_instances=instance_num, + max_total_number_of_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + for instance_type, instance_num in capped_needed_machines.items() + ], + return_exceptions=True, + ) + # parse results + last_issue = "" + new_pending_instances: list[EC2InstanceData] = [] + for r in results: + if isinstance(r, EC2TooManyInstancesError): + await post_tasks_log_message( + app, + tasks=tasks, + message="Exceptionally high load on computational cluster, please try again later.", + level=logging.ERROR, + ) + elif isinstance(r, BaseException): + _logger.error("Unexpected error happened when starting EC2 instance: %s", r) + last_issue = f"{r}" + elif isinstance(r, list): + new_pending_instances.extend(r) + else: + new_pending_instances.append(r) + + log_message = ( + f"{sum(n for n in capped_needed_machines.values())} new machines launched" + ", it might take up to 3 minutes to start, Please wait..." + ) + await post_tasks_log_message( + app, tasks=tasks, message=log_message, level=logging.INFO + ) + if last_issue: + await post_tasks_log_message( + app, + tasks=tasks, + message="Unexpected issues detected, probably due to high load, please contact support", + level=logging.ERROR, + ) + + return new_pending_instances + + +async def _find_drainable_nodes( + app: FastAPI, cluster: Cluster +) -> list[AssociatedInstance]: + app_settings: ApplicationSettings = app.state.settings + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + if not cluster.active_nodes: + # there is nothing to drain here + return [] + + # get the corresponding ec2 instance data + drainable_nodes: list[AssociatedInstance] = [] + + for instance in cluster.active_nodes: + if instance.has_assigned_tasks(): + await utils_docker.set_node_found_empty( + get_docker_client(app), instance.node, empty=False + ) + continue + node_last_empty = await utils_docker.get_node_empty_since(instance.node) + if not node_last_empty: + await utils_docker.set_node_found_empty( + get_docker_client(app), instance.node, empty=True + ) + continue + elapsed_time_since_empty = arrow.utcnow().datetime - node_last_empty + _logger.debug("%s", f"{node_last_empty=}, {elapsed_time_since_empty=}") + if ( + elapsed_time_since_empty + > app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING + ): + drainable_nodes.append(instance) + else: + _logger.info( + "%s has still %ss before being drainable", + f"{instance.ec2_instance.id=}", + f"{(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING - elapsed_time_since_empty).total_seconds():.0f}", + ) + + if drainable_nodes: + _logger.info( + "the following nodes were found to be drainable: '%s'", + f"{[instance.node.description.hostname for instance in drainable_nodes if instance.node.description]}", + ) + return drainable_nodes + + +async def _deactivate_empty_nodes(app: FastAPI, cluster: Cluster) -> Cluster: + app_settings = get_application_settings(app) + docker_client = get_docker_client(app) + active_empty_instances = await _find_drainable_nodes(app, cluster) + + if not active_empty_instances: + return cluster + + with log_context( + _logger, logging.INFO, f"drain {len(active_empty_instances)} empty nodes" + ): + updated_nodes = await asyncio.gather( + *( + utils_docker.set_node_osparc_ready( + app_settings, + docker_client, + node.node, + ready=False, + ) + for node in active_empty_instances + ) + ) + if updated_nodes: + _logger.info( + "following nodes were set to drain: '%s'", + f"{[node.description.hostname for node in updated_nodes if node.description]}", + ) + newly_drained_instances = [ + AssociatedInstance(node=node, ec2_instance=instance.ec2_instance) + for instance, node in zip(active_empty_instances, updated_nodes, strict=True) + ] + return dataclasses.replace( + cluster, + active_nodes=[ + n for n in cluster.active_nodes if n not in active_empty_instances + ], + drained_nodes=cluster.drained_nodes + newly_drained_instances, + ) + + +async def _find_terminateable_instances( + app: FastAPI, cluster: Cluster +) -> list[AssociatedInstance]: + app_settings: ApplicationSettings = app.state.settings + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + if not cluster.drained_nodes: + # there is nothing to terminate here + return [] + + # get the corresponding ec2 instance data + terminateable_nodes: list[AssociatedInstance] = [] + + for instance in cluster.drained_nodes: + node_last_updated = utils_docker.get_node_last_readyness_update(instance.node) + elapsed_time_since_drained = ( + datetime.datetime.now(datetime.UTC) - node_last_updated + ) + _logger.debug("%s", f"{node_last_updated=}, {elapsed_time_since_drained=}") + if ( + elapsed_time_since_drained + > app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ): + # let's terminate that one + terminateable_nodes.append(instance) + else: + _logger.info( + "%s has still %ss before being terminateable", + f"{instance.ec2_instance.id=}", + f"{(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - elapsed_time_since_drained).total_seconds():.0f}", + ) + + if terminateable_nodes: + _logger.info( + "the following nodes were found to be terminateable: '%s'", + f"{[instance.node.description.hostname for instance in terminateable_nodes if instance.node.description]}", + ) + return terminateable_nodes + + +async def _try_scale_down_cluster(app: FastAPI, cluster: Cluster) -> Cluster: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + # instances found to be terminateable will now start the termination process. + new_terminating_instances = [] + for instance in await _find_terminateable_instances(app, cluster): + assert instance.node.description is not None # nosec + with ( + log_context( + _logger, + logging.INFO, + msg=f"termination process for {instance.node.description.hostname}:{instance.ec2_instance.id}", + ), + log_catch(_logger, reraise=False), + ): + await utils_docker.set_node_begin_termination_process( + get_docker_client(app), instance.node + ) + new_terminating_instances.append(instance) + new_terminating_instance_ids = [ + i.ec2_instance.id for i in new_terminating_instances + ] + + # instances that are in the termination process and already waited long enough are terminated. + now = arrow.utcnow().datetime + instances_to_terminate = [ + i + for i in cluster.terminating_nodes + if (now - (utils_docker.get_node_termination_started_since(i.node) or now)) + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_FINAL_TERMINATION + ] + terminated_instance_ids = [] + if instances_to_terminate: + with log_context( + _logger, + logging.INFO, + msg=f"definitely terminate '{[i.node.description.hostname for i in instances_to_terminate if i.node.description]}'", + ): + await get_ec2_client(app).terminate_instances( + [i.ec2_instance for i in instances_to_terminate] + ) + + # since these nodes are being terminated, remove them from the swarm + await utils_docker.remove_nodes( + get_docker_client(app), + nodes=[i.node for i in instances_to_terminate], + force=True, + ) + terminated_instance_ids = [i.ec2_instance.id for i in instances_to_terminate] + + still_drained_nodes = [ + i + for i in cluster.drained_nodes + if i.ec2_instance.id + not in (new_terminating_instance_ids + terminated_instance_ids) + ] + still_terminating_nodes = [ + i + for i in cluster.terminating_nodes + if i.ec2_instance.id not in terminated_instance_ids + ] + return dataclasses.replace( + cluster, + drained_nodes=still_drained_nodes, + terminating_nodes=still_terminating_nodes + new_terminating_instances, + terminated_instances=cluster.terminated_instances + + [ + NonAssociatedInstance(ec2_instance=i.ec2_instance) + for i in instances_to_terminate + ], + ) + + +async def _notify_based_on_machine_type( + app: FastAPI, + instances: list[AssociatedInstance] | list[NonAssociatedInstance], + *, + message: str, +) -> None: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + instance_max_time_to_start = ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + launch_time_to_tasks: dict[datetime.datetime, list] = collections.defaultdict(list) + now = datetime.datetime.now(datetime.UTC) + for instance in instances: + launch_time_to_tasks[ + instance.ec2_instance.launch_time + ] += instance.assigned_tasks + + for launch_time, tasks in launch_time_to_tasks.items(): + time_since_launch = now - launch_time + estimated_time_to_completion = launch_time + instance_max_time_to_start - now + msg = ( + f"{message} (time waiting: {timedelta_as_minute_second(time_since_launch)}," + f" est. remaining time: {timedelta_as_minute_second(estimated_time_to_completion)})...please wait..." + ) + if tasks: + await post_tasks_log_message( + app, tasks=tasks, message=msg, level=logging.INFO + ) + await post_tasks_progress_message( + app, + tasks=tasks, + progress=time_since_launch.total_seconds() + / instance_max_time_to_start.total_seconds(), + progress_type=ProgressType.CLUSTER_UP_SCALING, + ) + + +async def _notify_machine_creation_progress(app: FastAPI, cluster: Cluster) -> None: + await _notify_based_on_machine_type( + app, + cluster.pending_ec2s, + message="waiting for machine to join cluster", + ) + + +async def _drain_retired_nodes( + app: FastAPI, + cluster: Cluster, +) -> Cluster: + if not cluster.retired_nodes: + return cluster + + app_settings = get_application_settings(app) + docker_client = get_docker_client(app) + # drain this empty nodes + updated_nodes = await asyncio.gather( + *( + utils_docker.set_node_osparc_ready( + app_settings, + docker_client, + node.node, + ready=False, + ) + for node in cluster.retired_nodes + ) + ) + if updated_nodes: + _logger.info( + "following nodes were set to drain: '%s'", + f"{[node.description.hostname for node in updated_nodes if node.description]}", + ) + newly_drained_instances = [ + AssociatedInstance(node=node, ec2_instance=instance.ec2_instance) + for instance, node in zip(cluster.retired_nodes, updated_nodes, strict=True) + ] + return dataclasses.replace( + cluster, + retired_nodes=[], + drained_nodes=cluster.drained_nodes + newly_drained_instances, + ) + + +async def _scale_down_unused_cluster_instances( + app: FastAPI, + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, +) -> Cluster: + await auto_scaling_mode.try_retire_nodes(app) + cluster = await _deactivate_empty_nodes(app, cluster) + return await _try_scale_down_cluster(app, cluster) + + +async def _scale_up_cluster( + app: FastAPI, + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, + allowed_instance_types: list[EC2InstanceType], + unassigned_tasks: list, +) -> Cluster: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + if not unassigned_tasks and ( + len(cluster.buffer_drained_nodes) + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ): + return cluster + + if ( + cluster.total_number_of_machines() + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ): + _logger.info( + "cluster already hit the maximum allowed amount of instances (%s), not scaling up. " + "%s tasks will wait until instances are free.", + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + len(unassigned_tasks), + ) + return cluster + + # now we scale up + assert app_settings.AUTOSCALING_EC2_ACCESS # nosec + + # let's start these + if needed_ec2_instances := await _find_needed_instances( + app, unassigned_tasks, allowed_instance_types, cluster, auto_scaling_mode + ): + await post_tasks_log_message( + app, + tasks=unassigned_tasks, + message="service is pending due to missing resources, scaling up cluster now...", + level=logging.INFO, + ) + new_pending_instances = await _launch_instances( + app, needed_ec2_instances, unassigned_tasks, auto_scaling_mode + ) + cluster.pending_ec2s.extend( + [NonAssociatedInstance(ec2_instance=i) for i in new_pending_instances] + ) + # NOTE: to check the logs of UserData in EC2 instance + # run: tail -f -n 1000 /var/log/cloud-init-output.log in the instance + + return cluster + + +async def _autoscale_cluster( + app: FastAPI, + cluster: Cluster, + auto_scaling_mode: BaseAutoscaling, + allowed_instance_types: list[EC2InstanceType], +) -> Cluster: + # 1. check if we have pending tasks + unnasigned_pending_tasks = await auto_scaling_mode.list_unrunnable_tasks(app) + _logger.info( + "found %s pending task%s", + len(unnasigned_pending_tasks), + "s" if len(unnasigned_pending_tasks) > 1 else "", + ) + # NOTE: this function predicts how the backend will assign tasks + still_pending_tasks, cluster = await _assign_tasks_to_current_cluster( + app, unnasigned_pending_tasks, cluster, auto_scaling_mode + ) + + # 2. activate available drained nodes to cover some of the tasks + cluster = await _activate_drained_nodes(app, cluster) + + # 3. start warm buffer instances to cover the remaining tasks + cluster = await _start_warm_buffer_instances(app, cluster, auto_scaling_mode) + + # 4. scale down unused instances + cluster = await _scale_down_unused_cluster_instances( + app, cluster, auto_scaling_mode + ) + + # 5. scale up if necessary + return await _scale_up_cluster( + app, cluster, auto_scaling_mode, allowed_instance_types, still_pending_tasks + ) + + +async def _notify_autoscaling_status( + app: FastAPI, cluster: Cluster, auto_scaling_mode: BaseAutoscaling +) -> None: + monitored_instances = list( + itertools.chain( + cluster.active_nodes, cluster.drained_nodes, cluster.buffer_drained_nodes + ) + ) + + with log_catch(_logger, reraise=False): + (total_resources, used_resources) = await asyncio.gather( + *( + auto_scaling_mode.compute_cluster_total_resources( + app, monitored_instances + ), + auto_scaling_mode.compute_cluster_used_resources( + app, monitored_instances + ), + ) + ) + # inform on rabbitMQ about status + await post_autoscaling_status_message( + app, cluster, total_resources, used_resources + ) + # prometheus instrumentation + if has_instrumentation(app): + get_instrumentation(app).cluster_metrics.update_from_cluster(cluster) + + +async def auto_scale_cluster( + *, app: FastAPI, auto_scaling_mode: BaseAutoscaling +) -> None: + """Check that there are no pending tasks requiring additional resources in the cluster (docker swarm) + If there are such tasks, this method will allocate new machines in AWS to cope with + the additional load. + """ + # current state + allowed_instance_types = await _sorted_allowed_instance_types(app) + cluster = await _analyze_current_cluster( + app, auto_scaling_mode, allowed_instance_types + ) + + # cleanup + cluster = await _cleanup_disconnected_nodes(app, cluster) + cluster = await _terminate_broken_ec2s(app, cluster) + cluster = await _make_pending_buffer_ec2s_join_cluster(app, cluster) + cluster = await _try_attach_pending_ec2s( + app, cluster, auto_scaling_mode, allowed_instance_types + ) + cluster = await _drain_retired_nodes(app, cluster) + + # desired state + cluster = await _autoscale_cluster( + app, cluster, auto_scaling_mode, allowed_instance_types + ) + + # notify + await _notify_machine_creation_progress(app, cluster) + await _notify_autoscaling_status(app, cluster, auto_scaling_mode) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_base.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_base.py new file mode 100644 index 00000000000..b9df042c622 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_base.py @@ -0,0 +1,80 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass + +from aws_library.ec2 import EC2InstanceData, EC2Tags, Resources +from fastapi import FastAPI +from models_library.docker import DockerLabelKey +from models_library.generated_models.docker_rest_api import Node as DockerNode +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..models import AssociatedInstance +from ..utils import utils_docker + + +@dataclass +class BaseAutoscaling(ABC): # pragma: no cover + @staticmethod + @abstractmethod + async def get_monitored_nodes(app: FastAPI) -> list[DockerNode]: ... + + @staticmethod + @abstractmethod + def get_ec2_tags(app: FastAPI) -> EC2Tags: ... + + @staticmethod + @abstractmethod + def get_new_node_docker_tags( + app: FastAPI, ec2_instance_data: EC2InstanceData + ) -> dict[DockerLabelKey, str]: ... + + @staticmethod + @abstractmethod + async def list_unrunnable_tasks(app: FastAPI) -> list: ... + + @staticmethod + @abstractmethod + def get_task_required_resources(task) -> Resources: ... + + @staticmethod + @abstractmethod + async def get_task_defined_instance( + app: FastAPI, task + ) -> InstanceTypeType | None: ... + + @staticmethod + @abstractmethod + async def compute_node_used_resources( + app: FastAPI, instance: AssociatedInstance + ) -> Resources: ... + + @staticmethod + @abstractmethod + async def compute_cluster_used_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: ... + + @staticmethod + @abstractmethod + async def compute_cluster_total_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: ... + + @staticmethod + @abstractmethod + async def is_instance_active( + app: FastAPI, instance: AssociatedInstance + ) -> bool: ... + + @staticmethod + @abstractmethod + async def is_instance_retired( + app: FastAPI, instance: AssociatedInstance + ) -> bool: ... + + @staticmethod + def is_instance_drained(instance: AssociatedInstance) -> bool: + return not utils_docker.is_node_osparc_ready(instance.node) + + @staticmethod + @abstractmethod + async def try_retire_nodes(app: FastAPI) -> None: ... diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py new file mode 100644 index 00000000000..2fb2344f22f --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py @@ -0,0 +1,176 @@ +import collections +import logging +from typing import cast + +from aws_library.ec2 import EC2InstanceData, EC2Tags, Resources +from fastapi import FastAPI +from models_library.clusters import ClusterAuthentication +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + DockerLabelKey, +) +from models_library.generated_models.docker_rest_api import Node +from pydantic import AnyUrl, ByteSize +from servicelib.utils import logged_gather +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..core.errors import ( + DaskNoWorkersError, + DaskSchedulerNotFoundError, + DaskWorkerNotFoundError, +) +from ..core.settings import get_application_settings +from ..models import AssociatedInstance, DaskTask +from ..utils import computational_scaling as utils +from ..utils import utils_docker, utils_ec2 +from . import dask +from .auto_scaling_mode_base import BaseAutoscaling +from .docker import get_docker_client + +_logger = logging.getLogger(__name__) + + +def _scheduler_url(app: FastAPI) -> AnyUrl: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_DASK # nosec + return app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL + + +def _scheduler_auth(app: FastAPI) -> ClusterAuthentication: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_DASK # nosec + return app_settings.AUTOSCALING_DASK.DASK_SCHEDULER_AUTH + + +class ComputationalAutoscaling(BaseAutoscaling): + @staticmethod + async def get_monitored_nodes(app: FastAPI) -> list[Node]: + return await utils_docker.get_worker_nodes(get_docker_client(app)) + + @staticmethod + def get_ec2_tags(app: FastAPI) -> EC2Tags: + app_settings = get_application_settings(app) + return utils_ec2.get_ec2_tags_computational(app_settings) + + @staticmethod + def get_new_node_docker_tags( + app: FastAPI, ec2_instance_data: EC2InstanceData + ) -> dict[DockerLabelKey, str]: + assert app # nosec + return { + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY: ec2_instance_data.type + } + + @staticmethod + async def list_unrunnable_tasks(app: FastAPI) -> list[DaskTask]: + try: + unrunnable_tasks = await dask.list_unrunnable_tasks( + _scheduler_url(app), _scheduler_auth(app) + ) + # NOTE: any worker "processing" more than 1 task means that the other tasks are queued! + # NOTE: that is not necessarily true, in cases where 1 worker takes multiple tasks?? (osparc.io) + processing_tasks_by_worker = await dask.list_processing_tasks_per_worker( + _scheduler_url(app), _scheduler_auth(app) + ) + queued_tasks = [] + for tasks in processing_tasks_by_worker.values(): + queued_tasks += tasks[1:] + _logger.debug( + "found %s pending tasks and %s potentially queued tasks", + len(unrunnable_tasks), + len(queued_tasks), + ) + return unrunnable_tasks + queued_tasks + except DaskSchedulerNotFoundError: + _logger.warning( + "No dask scheduler found. TIP: Normal during machine startup." + ) + return [] + + @staticmethod + def get_task_required_resources(task) -> Resources: + return utils.resources_from_dask_task(task) + + @staticmethod + async def get_task_defined_instance(app: FastAPI, task) -> InstanceTypeType | None: + assert app # nosec + return cast(InstanceTypeType | None, utils.get_task_instance_restriction(task)) + + @staticmethod + async def compute_node_used_resources( + app: FastAPI, instance: AssociatedInstance + ) -> Resources: + try: + resource = await dask.get_worker_used_resources( + _scheduler_url(app), _scheduler_auth(app), instance.ec2_instance + ) + if resource == Resources.create_as_empty(): + num_results_in_memory = ( + await dask.get_worker_still_has_results_in_memory( + _scheduler_url(app), _scheduler_auth(app), instance.ec2_instance + ) + ) + if num_results_in_memory > 0: + _logger.debug( + "found %s for %s", + f"{num_results_in_memory=}", + f"{instance.ec2_instance.id}", + ) + # NOTE: this is a trick to consider the node still useful + return Resources(cpus=0, ram=ByteSize(1024 * 1024 * 1024)) + + _logger.debug( + "found %s for %s", f"{resource=}", f"{instance.ec2_instance.id}" + ) + return resource + except (DaskWorkerNotFoundError, DaskNoWorkersError): + _logger.debug("no resource found for %s", f"{instance.ec2_instance.id}") + return Resources.create_as_empty() + + @staticmethod + async def compute_cluster_used_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: + list_of_used_resources: list[Resources] = await logged_gather( + *( + ComputationalAutoscaling.compute_node_used_resources(app, i) + for i in instances + ) + ) + counter = collections.Counter({k: 0 for k in Resources.model_fields}) + for result in list_of_used_resources: + counter.update(result.model_dump()) + return Resources.model_validate(dict(counter)) + + @staticmethod + async def compute_cluster_total_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: + try: + return await dask.compute_cluster_total_resources( + _scheduler_url(app), _scheduler_auth(app), instances + ) + except DaskNoWorkersError: + return Resources.create_as_empty() + + @staticmethod + async def is_instance_active(app: FastAPI, instance: AssociatedInstance) -> bool: + if not utils_docker.is_node_osparc_ready(instance.node): + return False + + # now check if dask-scheduler/dask-worker is available and running + return await dask.is_worker_connected( + _scheduler_url(app), _scheduler_auth(app), instance.ec2_instance + ) + + @staticmethod + async def is_instance_retired(app: FastAPI, instance: AssociatedInstance) -> bool: + if not utils_docker.is_node_osparc_ready(instance.node): + return False + return await dask.is_worker_retired( + _scheduler_url(app), _scheduler_auth(app), instance.ec2_instance + ) + + @staticmethod + async def try_retire_nodes(app: FastAPI) -> None: + await dask.try_retire_nodes(_scheduler_url(app), _scheduler_auth(app)) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_dynamic.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_dynamic.py new file mode 100644 index 00000000000..a8dcd7552ac --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_dynamic.py @@ -0,0 +1,101 @@ +from aws_library.ec2 import EC2InstanceData, EC2Tags, Resources +from fastapi import FastAPI +from models_library.docker import DockerLabelKey +from models_library.generated_models.docker_rest_api import Node, Task +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..core.settings import get_application_settings +from ..models import AssociatedInstance +from ..utils import utils_docker, utils_ec2 +from .auto_scaling_mode_base import BaseAutoscaling +from .docker import get_docker_client + + +class DynamicAutoscaling(BaseAutoscaling): + @staticmethod + async def get_monitored_nodes(app: FastAPI) -> list[Node]: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_NODES_MONITORING # nosec + return await utils_docker.get_monitored_nodes( + get_docker_client(app), + node_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS, + ) + + @staticmethod + def get_ec2_tags(app: FastAPI) -> EC2Tags: + app_settings = get_application_settings(app) + return utils_ec2.get_ec2_tags_dynamic(app_settings) + + @staticmethod + def get_new_node_docker_tags( + app: FastAPI, ec2_instance_data: EC2InstanceData + ) -> dict[DockerLabelKey, str]: + app_settings = get_application_settings(app) + return utils_docker.get_new_node_docker_tags(app_settings, ec2_instance_data) + + @staticmethod + async def list_unrunnable_tasks(app: FastAPI) -> list[Task]: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_NODES_MONITORING # nosec + return await utils_docker.pending_service_tasks_with_insufficient_resources( + get_docker_client(app), + service_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS, + ) + + @staticmethod + def get_task_required_resources(task) -> Resources: + return utils_docker.get_max_resources_from_docker_task(task) + + @staticmethod + async def get_task_defined_instance(app: FastAPI, task) -> InstanceTypeType | None: + return await utils_docker.get_task_instance_restriction( + get_docker_client(app), task + ) + + @staticmethod + async def compute_node_used_resources( + app: FastAPI, instance: AssociatedInstance + ) -> Resources: + docker_client = get_docker_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_NODES_MONITORING # nosec + return await utils_docker.compute_node_used_resources( + docker_client, + instance.node, + service_labels=app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS, + ) + + @staticmethod + async def compute_cluster_used_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: + docker_client = get_docker_client(app) + return await utils_docker.compute_cluster_used_resources( + docker_client, [i.node for i in instances] + ) + + @staticmethod + async def compute_cluster_total_resources( + app: FastAPI, instances: list[AssociatedInstance] + ) -> Resources: + assert app # nosec + return await utils_docker.compute_cluster_total_resources( + [i.node for i in instances] + ) + + @staticmethod + async def is_instance_active(app: FastAPI, instance: AssociatedInstance) -> bool: + assert app # nosec + return utils_docker.is_node_osparc_ready(instance.node) + + @staticmethod + async def is_instance_retired(app: FastAPI, instance: AssociatedInstance) -> bool: + assert app # nosec + assert instance # nosec + # nothing to do here + return False + + @staticmethod + async def try_retire_nodes(app: FastAPI) -> None: + assert app # nosec + # nothing to do here diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_task.py b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_task.py new file mode 100644 index 00000000000..5ebc6a190f8 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_task.py @@ -0,0 +1,72 @@ +import logging +from collections.abc import Awaitable, Callable +from typing import Final + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.redis import exclusive + +from ..core.settings import ApplicationSettings +from ..utils.redis import create_lock_key_and_value +from .auto_scaling_core import auto_scale_cluster +from .auto_scaling_mode_computational import ComputationalAutoscaling +from .auto_scaling_mode_dynamic import DynamicAutoscaling +from .redis import get_redis_client + +_TASK_NAME: Final[str] = "Autoscaling EC2 instances" + +_logger = logging.getLogger(__name__) + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + app_settings: ApplicationSettings = app.state.settings + lock_key, lock_value = create_lock_key_and_value(app) + assert lock_key # nosec + assert lock_value # nosec + app.state.autoscaler_task = create_periodic_task( + exclusive(get_redis_client(app), lock_key=lock_key, lock_value=lock_value)( + auto_scale_cluster + ), + interval=app_settings.AUTOSCALING_POLL_INTERVAL, + task_name=_TASK_NAME, + app=app, + auto_scaling_mode=( + DynamicAutoscaling() + if app_settings.AUTOSCALING_NODES_MONITORING is not None + else ComputationalAutoscaling() + ), + ) + + return _startup + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + await cancel_wait_task(app.state.autoscaler_task) + + return _stop + + +def setup(app: FastAPI) -> None: + app_settings: ApplicationSettings = app.state.settings + if any( + s is None + for s in [ + app_settings.AUTOSCALING_EC2_ACCESS, + app_settings.AUTOSCALING_EC2_INSTANCES, + ] + ) or all( + s is None + for s in [ + app_settings.AUTOSCALING_NODES_MONITORING, + app_settings.AUTOSCALING_DASK, + ] + ): + _logger.warning( + "the autoscaling background task is disabled by settings, nothing will happen!" + ) + return + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_core.py b/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_core.py new file mode 100644 index 00000000000..d9f1c550568 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_core.py @@ -0,0 +1,439 @@ +"""Main entrypoint to manage buffer machines. + +A buffer machine is a stopped pre-initialized EC2 instance with pre-pulled Docker images in its +EBS-based storage volume. + +To create a ready buffer machine, one needs to first start the EC2 instance via EC2 API, +then via SSM api pull the Docker images to the EBS volume and finally stop the EC2 instance. + +Open features: + - handle changes in pre-pulled images (when the pre-pull images for a specific type changes), + currently one needs to terminate all the buffer machines to get an upgrade, + - use a cheap EC2 to prepare the buffer instead of the final instance type, + - possibly copy already initialized EBS volumes, instead of pulling again, + - possibly recycle de-activated EC2s instead of terminating them, +""" + +import logging +from collections import defaultdict +from typing import TypeAlias, cast + +import arrow +from aws_library.ec2 import ( + AWSTagValue, + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + Resources, +) +from aws_library.ssm import ( + SSMCommandExecutionResultError, + SSMCommandExecutionTimeoutError, +) +from fastapi import FastAPI +from pydantic import NonNegativeInt +from servicelib.logging_utils import log_context +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..constants import ( + BUFFER_MACHINE_PULLING_COMMAND_ID_EC2_TAG_KEY, + BUFFER_MACHINE_PULLING_EC2_TAG_KEY, + DOCKER_PULL_COMMAND, + PREPULL_COMMAND_NAME, +) +from ..core.settings import get_application_settings +from ..models import BufferPool, BufferPoolManager +from ..utils.auto_scaling_core import ec2_buffer_startup_script +from ..utils.buffer_machines_pool_core import ( + dump_pre_pulled_images_as_tags, + get_deactivated_buffer_ec2_tags, + load_pre_pulled_images_from_tags, +) +from .auto_scaling_mode_base import BaseAutoscaling +from .ec2 import get_ec2_client +from .instrumentation import get_instrumentation, has_instrumentation +from .ssm import get_ssm_client + +_logger = logging.getLogger(__name__) + + +async def _analyze_running_instance_state( + app: FastAPI, *, buffer_pool: BufferPool, instance: EC2InstanceData +): + ssm_client = get_ssm_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + if BUFFER_MACHINE_PULLING_EC2_TAG_KEY in instance.tags: + buffer_pool.pulling_instances.add(instance) + elif await ssm_client.is_instance_connected_to_ssm_server(instance.id): + try: + if await ssm_client.wait_for_has_instance_completed_cloud_init(instance.id): + if has_instrumentation(app): + get_instrumentation( + app + ).buffer_machines_pools_metrics.instances_ready_to_pull_seconds.labels( + instance_type=instance.type + ).observe( + (arrow.utcnow().datetime - instance.launch_time).total_seconds() + ) + if app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ + instance.type + ].pre_pull_images: + buffer_pool.waiting_to_pull_instances.add(instance) + else: + buffer_pool.waiting_to_stop_instances.add(instance) + else: + buffer_pool.pending_instances.add(instance) + except ( + SSMCommandExecutionResultError, + SSMCommandExecutionTimeoutError, + ): + _logger.exception( + "Unnexpected error when checking EC2 cloud initialization completion!. " + "The machine will be terminated. TIP: check the initialization phase for errors." + ) + buffer_pool.broken_instances.add(instance) + else: + is_broken = bool( + (arrow.utcnow().datetime - instance.launch_time) + > app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + + if is_broken: + _logger.error( + "The machine does not connect to the SSM server after %s. It will be terminated. TIP: check the initialization phase for errors.", + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME, + ) + buffer_pool.broken_instances.add(instance) + else: + buffer_pool.pending_instances.add(instance) + + +async def _analyse_current_state( + app: FastAPI, *, auto_scaling_mode: BaseAutoscaling +) -> BufferPoolManager: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + all_buffer_instances = await ec2_client.get_instances( + key_names=[app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME], + tags=get_deactivated_buffer_ec2_tags(app, auto_scaling_mode), + state_names=["stopped", "pending", "running", "stopping"], + ) + buffers_manager = BufferPoolManager() + for instance in all_buffer_instances: + match instance.state: + case "stopped": + buffers_manager.buffer_pools[instance.type].ready_instances.add( + instance + ) + case "pending": + buffers_manager.buffer_pools[instance.type].pending_instances.add( + instance + ) + case "stopping": + buffers_manager.buffer_pools[instance.type].stopping_instances.add( + instance + ) + case "running": + await _analyze_running_instance_state( + app, + buffer_pool=buffers_manager.buffer_pools[instance.type], + instance=instance, + ) + + _logger.info("Current buffer pools: %s", f"{buffers_manager}") + return buffers_manager + + +async def _terminate_unneeded_pools( + app: FastAPI, + buffers_manager: BufferPoolManager, +) -> BufferPoolManager: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + allowed_instance_types = set( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES + ) + if terminateable_warm_pool_types := set(buffers_manager.buffer_pools).difference( + allowed_instance_types + ): + with log_context( + _logger, + logging.INFO, + msg=f"removing unneeded buffer pools for '{terminateable_warm_pool_types}'", + ): + instances_to_terminate: set[EC2InstanceData] = set() + for ec2_type in terminateable_warm_pool_types: + instances_to_terminate = instances_to_terminate.union( + buffers_manager.buffer_pools[ec2_type].all_instances() + ) + await ec2_client.terminate_instances(instances_to_terminate) + for ec2_type in terminateable_warm_pool_types: + buffers_manager.buffer_pools.pop(ec2_type) + return buffers_manager + + +async def _terminate_instances_with_invalid_pre_pulled_images( + app: FastAPI, buffers_manager: BufferPoolManager +) -> BufferPoolManager: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + terminateable_instances = set() + for ( + ec2_type, + ec2_boot_config, + ) in app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.items(): + instance_type = cast(InstanceTypeType, ec2_type) + all_pre_pulled_instances = buffers_manager.buffer_pools[ + instance_type + ].pre_pulled_instances() + + for instance in all_pre_pulled_instances: + pre_pulled_images = load_pre_pulled_images_from_tags(instance.tags) + if ( + pre_pulled_images is not None + ) and pre_pulled_images != ec2_boot_config.pre_pull_images: + _logger.info( + "%s", + f"{instance.id=} has invalid {pre_pulled_images=}, expected is {ec2_boot_config.pre_pull_images=}", + ) + terminateable_instances.add(instance) + + if terminateable_instances: + await ec2_client.terminate_instances(terminateable_instances) + for instance in terminateable_instances: + buffers_manager.buffer_pools[instance.type].remove_instance(instance) + return buffers_manager + + +async def _terminate_broken_instances( + app: FastAPI, buffers_manager: BufferPoolManager +) -> BufferPoolManager: + ec2_client = get_ec2_client(app) + termineatable_instances = set() + for pool in buffers_manager.buffer_pools.values(): + termineatable_instances.update(pool.broken_instances) + if termineatable_instances: + await ec2_client.terminate_instances(termineatable_instances) + for instance in termineatable_instances: + buffers_manager.buffer_pools[instance.type].remove_instance(instance) + return buffers_manager + + +async def _add_remove_buffer_instances( + app: FastAPI, + buffers_manager: BufferPoolManager, + *, + auto_scaling_mode: BaseAutoscaling, +) -> BufferPoolManager: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + # let's find what is missing and what is not needed + missing_instances: dict[InstanceTypeType, NonNegativeInt] = defaultdict(int) + unneeded_instances: set[EC2InstanceData] = set() + + for ( + ec2_type, + ec2_boot_config, + ) in app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.items(): + instance_type = cast(InstanceTypeType, ec2_type) + all_pool_instances = buffers_manager.buffer_pools[instance_type].all_instances() + if len(all_pool_instances) < ec2_boot_config.buffer_count: + missing_instances[instance_type] += ec2_boot_config.buffer_count - len( + all_pool_instances + ) + else: + terminateable_instances = set( + list(all_pool_instances)[ec2_boot_config.buffer_count :] + ) + unneeded_instances = unneeded_instances.union(terminateable_instances) + + for ec2_type, num_to_start in missing_instances.items(): + ec2_boot_specific = ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ec2_type] + ) + await ec2_client.launch_instances( + EC2InstanceConfig( + type=EC2InstanceType( + name=ec2_type, + resources=Resources.create_as_empty(), # fake resources + ), + tags=get_deactivated_buffer_ec2_tags(app, auto_scaling_mode), + startup_script=ec2_buffer_startup_script( + ec2_boot_specific, app_settings + ), + ami_id=ec2_boot_specific.ami_id, + key_name=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME, + security_group_ids=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SECURITY_GROUP_IDS, + subnet_id=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SUBNET_ID, + iam_instance_profile=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ATTACHED_IAM_PROFILE, + ), + min_number_of_instances=num_to_start, + number_of_instances=num_to_start, + max_total_number_of_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + if unneeded_instances: + await ec2_client.terminate_instances(unneeded_instances) + for instance in unneeded_instances: + buffers_manager.buffer_pools[instance.type].remove_instance(instance) + return buffers_manager + + +InstancesToStop: TypeAlias = set[EC2InstanceData] +InstancesToTerminate: TypeAlias = set[EC2InstanceData] + + +async def _handle_pool_image_pulling( + app: FastAPI, instance_type: InstanceTypeType, pool: BufferPool +) -> tuple[InstancesToStop, InstancesToTerminate]: + ec2_client = get_ec2_client(app) + ssm_client = get_ssm_client(app) + if pool.waiting_to_pull_instances: + # trigger the image pulling + ssm_command = await ssm_client.send_command( + [instance.id for instance in pool.waiting_to_pull_instances], + command=DOCKER_PULL_COMMAND, + command_name=PREPULL_COMMAND_NAME, + ) + await ec2_client.set_instances_tags( + tuple(pool.waiting_to_pull_instances), + tags={ + BUFFER_MACHINE_PULLING_EC2_TAG_KEY: AWSTagValue("true"), + BUFFER_MACHINE_PULLING_COMMAND_ID_EC2_TAG_KEY: AWSTagValue( + ssm_command.command_id + ), + }, + ) + + instances_to_stop: set[EC2InstanceData] = pool.waiting_to_stop_instances + broken_instances_to_terminate: set[EC2InstanceData] = set() + # wait for the image pulling to complete + for instance in pool.pulling_instances: + if ssm_command_id := instance.tags.get( + BUFFER_MACHINE_PULLING_COMMAND_ID_EC2_TAG_KEY + ): + ssm_command = await ssm_client.get_command( + instance.id, command_id=ssm_command_id + ) + match ssm_command.status: + case "Success": + if has_instrumentation(app): + assert ssm_command.start_time is not None # nosec + assert ssm_command.finish_time is not None # nosec + get_instrumentation( + app + ).buffer_machines_pools_metrics.instances_completed_pulling_seconds.labels( + instance_type=instance.type + ).observe( + ( + ssm_command.finish_time - ssm_command.start_time + ).total_seconds() + ) + instances_to_stop.add(instance) + case "InProgress" | "Pending": + # do nothing we pass + pass + case _: + _logger.error( + "image pulling on buffer failed: %s", + f"{ssm_command.status}: {ssm_command.message}", + ) + broken_instances_to_terminate.add(instance) + if instances_to_stop: + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + await ec2_client.set_instances_tags( + tuple(instances_to_stop), + tags=dump_pre_pulled_images_as_tags( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ + instance_type + ].pre_pull_images + ), + ) + return instances_to_stop, broken_instances_to_terminate + + +async def _handle_image_pre_pulling( + app: FastAPI, buffers_manager: BufferPoolManager +) -> None: + ec2_client = get_ec2_client(app) + instances_to_stop: set[EC2InstanceData] = set() + broken_instances_to_terminate: set[EC2InstanceData] = set() + for instance_type, pool in buffers_manager.buffer_pools.items(): + ( + pool_instances_to_stop, + pool_instances_to_terminate, + ) = await _handle_pool_image_pulling(app, instance_type, pool) + instances_to_stop.update(pool_instances_to_stop) + broken_instances_to_terminate.update(pool_instances_to_terminate) + # 5. now stop and terminate if necessary + if instances_to_stop: + with log_context( + _logger, + logging.INFO, + "pending buffer instances completed pulling of images, stopping them", + ): + tag_keys_to_remove = ( + BUFFER_MACHINE_PULLING_EC2_TAG_KEY, + BUFFER_MACHINE_PULLING_COMMAND_ID_EC2_TAG_KEY, + ) + await ec2_client.remove_instances_tags( + tuple(instances_to_stop), + tag_keys=tag_keys_to_remove, + ) + await ec2_client.stop_instances(instances_to_stop) + if broken_instances_to_terminate: + with log_context( + _logger, logging.WARNING, "broken buffer instances, terminating them" + ): + await ec2_client.terminate_instances(broken_instances_to_terminate) + + +async def monitor_buffer_machines( + app: FastAPI, *, auto_scaling_mode: BaseAutoscaling +) -> None: + """Buffer machine creation works like so: + 1. a EC2 is created with an EBS attached volume wO auto prepulling and wO auto connect to swarm + 2. once running, a AWS SSM task is started to pull the necessary images in a controlled way + 3. once the task is completed, the EC2 is stopped and is made available as a buffer EC2 + 4. once needed the buffer machine is started, and as it is up a SSM task is sent to connect to the swarm, + 5. the usual then happens + """ + + app_settings = get_application_settings(app) + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + + # 1. Analyze the current state by type + buffers_manager = await _analyse_current_state( + app, auto_scaling_mode=auto_scaling_mode + ) + # 2. Terminate unneeded warm pools (e.g. if the user changed the allowed instance types) + buffers_manager = await _terminate_unneeded_pools(app, buffers_manager) + + buffers_manager = await _terminate_instances_with_invalid_pre_pulled_images( + app, buffers_manager + ) + # 3. terminate broken instances + buffers_manager = await _terminate_broken_instances(app, buffers_manager) + + # 3. add/remove buffer instances base on ec2 boot specific data + buffers_manager = await _add_remove_buffer_instances( + app, buffers_manager, auto_scaling_mode=auto_scaling_mode + ) + + # 4. pull docker images if needed + await _handle_image_pre_pulling(app, buffers_manager) + + # 5. instrumentation + if has_instrumentation(app): + get_instrumentation( + app + ).buffer_machines_pools_metrics.update_from_buffer_pool_manager(buffers_manager) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_task.py b/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_task.py new file mode 100644 index 00000000000..2985e2ffcc4 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/buffer_machines_pool_task.py @@ -0,0 +1,80 @@ +import logging +from collections.abc import Awaitable, Callable +from typing import Final + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.redis import exclusive + +from ..core.settings import ApplicationSettings +from ..utils.redis import create_lock_key_and_value +from .auto_scaling_mode_dynamic import DynamicAutoscaling +from .buffer_machines_pool_core import monitor_buffer_machines +from .redis import get_redis_client + +_TASK_NAME_BUFFER: Final[str] = "Autoscaling Buffer Machines Pool" + +_logger = logging.getLogger(__name__) + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + app_settings: ApplicationSettings = app.state.settings + lock_key, lock_value = create_lock_key_and_value(app) + assert lock_key # nosec + assert lock_value # nosec + + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + app.state.buffers_pool_task = create_periodic_task( + exclusive( + get_redis_client(app), + lock_key=f"{lock_key}_buffers_pool", + lock_value=lock_value, + )(monitor_buffer_machines), + interval=app_settings.AUTOSCALING_POLL_INTERVAL, + task_name=_TASK_NAME_BUFFER, + app=app, + auto_scaling_mode=(DynamicAutoscaling()), + ) + + return _startup + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + if hasattr(app.state, "buffers_pool_task"): + await cancel_wait_task(app.state.buffers_pool_task) + + return _stop + + +def setup(app: FastAPI): + app_settings: ApplicationSettings = app.state.settings + if ( + any( + s is None + for s in [ + app_settings.AUTOSCALING_EC2_ACCESS, + app_settings.AUTOSCALING_EC2_INSTANCES, + app_settings.AUTOSCALING_SSM_ACCESS, + ] + ) + or all( + s is None + for s in [ + app_settings.AUTOSCALING_NODES_MONITORING, + app_settings.AUTOSCALING_DASK, + ] + ) + or not app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ATTACHED_IAM_PROFILE # type: ignore[union-attr] # checked above + ): + _logger.warning( + "%s task is disabled by settings, there will be no buffer v2!", + _TASK_NAME_BUFFER, + ) + return + if app_settings.AUTOSCALING_NODES_MONITORING: + # NOTE: currently only available for dynamic autoscaling + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py new file mode 100644 index 00000000000..d57508babf8 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/dask.py @@ -0,0 +1,328 @@ +import collections +import contextlib +import logging +import re +from collections import defaultdict +from collections.abc import AsyncIterator, Coroutine +from typing import Any, Final, TypeAlias + +import dask.typing +import distributed +import distributed.scheduler +from aws_library.ec2 import EC2InstanceData, Resources +from dask_task_models_library.resource_constraints import DaskTaskResources +from distributed.core import Status +from models_library.clusters import ClusterAuthentication, TLSAuthentication +from pydantic import AnyUrl, ByteSize, TypeAdapter + +from ..core.errors import ( + DaskNoWorkersError, + DaskSchedulerNotFoundError, + DaskWorkerNotFoundError, +) +from ..models import AssociatedInstance, DaskTask, DaskTaskId +from ..utils.auto_scaling_core import ( + node_host_name_from_ec2_private_dns, + node_ip_from_ec2_private_dns, +) + +_logger = logging.getLogger(__name__) + + +async def _wrap_client_async_routine( + client_coroutine: Coroutine[Any, Any, Any] | Any | None +) -> Any: + """Dask async behavior does not go well with Pylance as it returns + a union of types. this wrapper makes both mypy and pylance happy""" + assert client_coroutine # nosec + return await client_coroutine + + +_DASK_SCHEDULER_CONNECT_TIMEOUT_S: Final[int] = 5 + + +@contextlib.asynccontextmanager +async def _scheduler_client( + url: AnyUrl, authentication: ClusterAuthentication +) -> AsyncIterator[distributed.Client]: + """ + Raises: + DaskSchedulerNotFoundError: if the scheduler was not found/cannot be reached + """ + try: + security = distributed.Security() + if isinstance(authentication, TLSAuthentication): + security = distributed.Security( + tls_ca_file=f"{authentication.tls_ca_file}", + tls_client_cert=f"{authentication.tls_client_cert}", + tls_client_key=f"{authentication.tls_client_key}", + require_encryption=True, + ) + async with distributed.Client( + f"{url}", + asynchronous=True, + timeout=f"{_DASK_SCHEDULER_CONNECT_TIMEOUT_S}", + security=security, + ) as client: + yield client + except OSError as exc: + raise DaskSchedulerNotFoundError(url=url) from exc + + +DaskWorkerUrl: TypeAlias = str +DaskWorkerDetails: TypeAlias = dict[str, Any] +DASK_NAME_PATTERN: Final[re.Pattern] = re.compile( + r"^(?P.+)_(?Pip-\d{1,3}-\d{1,3}-\d{1,3}-\d{1,3})[-_].*$" +) + + +def _dask_worker_from_ec2_instance( + client: distributed.Client, ec2_instance: EC2InstanceData +) -> tuple[DaskWorkerUrl, DaskWorkerDetails]: + """ + Raises: + Ec2InvalidDnsNameError + DaskNoWorkersError + DaskWorkerNotFoundError + """ + node_hostname = node_host_name_from_ec2_private_dns(ec2_instance) + scheduler_info = client.scheduler_info() + assert client.scheduler # nosec + if "workers" not in scheduler_info or not scheduler_info["workers"]: + raise DaskNoWorkersError(url=client.scheduler.address) + workers: dict[DaskWorkerUrl, DaskWorkerDetails] = scheduler_info["workers"] + + _logger.debug("looking for %s in %s", f"{ec2_instance=}", f"{workers=}") + + # dict is of type dask_worker_address: worker_details + def _find_by_worker_host( + dask_worker: tuple[DaskWorkerUrl, DaskWorkerDetails] + ) -> bool: + _, details = dask_worker + if match := re.match(DASK_NAME_PATTERN, details["name"]): + return bool(match.group("private_ip") == node_hostname) + return False + + filtered_workers = dict(filter(_find_by_worker_host, workers.items())) + if not filtered_workers: + raise DaskWorkerNotFoundError( + worker_host=ec2_instance.aws_private_dns, url=client.scheduler.address + ) + assert ( + len(filtered_workers) == 1 + ), f"returned workers {filtered_workers}, {node_hostname=}" # nosec + return next(iter(filtered_workers.items())) + + +async def is_worker_connected( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, + worker_ec2_instance: EC2InstanceData, +) -> bool: + with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError): + async with _scheduler_client(scheduler_url, authentication) as client: + _, worker_details = _dask_worker_from_ec2_instance( + client, worker_ec2_instance + ) + return Status(worker_details["status"]) == Status.running + return False + + +async def is_worker_retired( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, + worker_ec2_instance: EC2InstanceData, +) -> bool: + with contextlib.suppress(DaskNoWorkersError, DaskWorkerNotFoundError): + async with _scheduler_client(scheduler_url, authentication) as client: + _, worker_details = _dask_worker_from_ec2_instance( + client, worker_ec2_instance + ) + return Status(worker_details["status"]) in { + Status.closed, + Status.closing, + Status.closing_gracefully, + } + return False + + +def _dask_key_to_dask_task_id(key: dask.typing.Key) -> DaskTaskId: + if isinstance(key, bytes): + return key.decode("utf-8") + if isinstance(key, tuple): + return "(" + ", ".join(_dask_key_to_dask_task_id(k) for k in key) + ")" + return f"{key}" + + +async def list_unrunnable_tasks( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, +) -> list[DaskTask]: + """ + Raises: + DaskSchedulerNotFoundError + """ + + def _list_tasks( + dask_scheduler: distributed.Scheduler, + ) -> dict[dask.typing.Key, dict[str, float]]: + # NOTE: task.key can be a byte, str, or a tuple + return { + task.key: task.resource_restrictions or {} + for task in dask_scheduler.unrunnable + } + + async with _scheduler_client(scheduler_url, authentication) as client: + list_of_tasks: dict[ + dask.typing.Key, DaskTaskResources + ] = await _wrap_client_async_routine(client.run_on_scheduler(_list_tasks)) + _logger.debug("found unrunnable tasks: %s", list_of_tasks) + return [ + DaskTask( + task_id=_dask_key_to_dask_task_id(task_id), + required_resources=task_resources, + ) + for task_id, task_resources in list_of_tasks.items() + ] + + +async def list_processing_tasks_per_worker( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, +) -> dict[DaskWorkerUrl, list[DaskTask]]: + """ + Raises: + DaskSchedulerNotFoundError + """ + + def _list_processing_tasks( + dask_scheduler: distributed.Scheduler, + ) -> dict[str, list[tuple[dask.typing.Key, DaskTaskResources]]]: + worker_to_processing_tasks = defaultdict(list) + for task_key, task_state in dask_scheduler.tasks.items(): + if task_state.processing_on: + worker_to_processing_tasks[task_state.processing_on.address].append( + (task_key, task_state.resource_restrictions or {}) + ) + return worker_to_processing_tasks + + async with _scheduler_client(scheduler_url, authentication) as client: + worker_to_tasks: dict[ + str, list[tuple[dask.typing.Key, DaskTaskResources]] + ] = await _wrap_client_async_routine( + client.run_on_scheduler(_list_processing_tasks) + ) + _logger.debug("found processing tasks: %s", worker_to_tasks) + tasks_per_worker = defaultdict(list) + for worker, tasks in worker_to_tasks.items(): + for task_id, required_resources in tasks: + tasks_per_worker[worker].append( + DaskTask( + task_id=_dask_key_to_dask_task_id(task_id), + required_resources=required_resources, + ) + ) + return tasks_per_worker + + +async def get_worker_still_has_results_in_memory( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, + ec2_instance: EC2InstanceData, +) -> int: + """ + Raises: + DaskSchedulerNotFoundError + Ec2InvalidDnsNameError + DaskWorkerNotFoundError + DaskNoWorkersError + """ + async with _scheduler_client(scheduler_url, authentication) as client: + _, worker_details = _dask_worker_from_ec2_instance(client, ec2_instance) + + worker_metrics: dict[str, Any] = worker_details["metrics"] + return 1 if worker_metrics.get("task_counts") else 0 + + +async def get_worker_used_resources( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, + ec2_instance: EC2InstanceData, +) -> Resources: + """ + Raises: + DaskSchedulerNotFoundError + Ec2InvalidDnsNameError + DaskWorkerNotFoundError + DaskNoWorkersError + """ + + def _list_processing_tasks_on_worker( + dask_scheduler: distributed.Scheduler, *, worker_url: str + ) -> list[tuple[dask.typing.Key, DaskTaskResources]]: + processing_tasks = [] + for task_key, task_state in dask_scheduler.tasks.items(): + if task_state.processing_on and ( + task_state.processing_on.address == worker_url + ): + processing_tasks.append( + (task_key, task_state.resource_restrictions or {}) + ) + return processing_tasks + + async with _scheduler_client(scheduler_url, authentication) as client: + worker_url, _ = _dask_worker_from_ec2_instance(client, ec2_instance) + + _logger.debug("looking for processing tasks for %s", f"{worker_url=}") + + # now get the used resources + worker_processing_tasks: list[ + tuple[dask.typing.Key, DaskTaskResources] + ] = await _wrap_client_async_routine( + client.run_on_scheduler( + _list_processing_tasks_on_worker, worker_url=worker_url + ), + ) + + total_resources_used: collections.Counter[str] = collections.Counter() + for _, task_resources in worker_processing_tasks: + total_resources_used.update(task_resources) + + _logger.debug("found %s for %s", f"{total_resources_used=}", f"{worker_url=}") + return Resources( + cpus=total_resources_used.get("CPU", 0), + ram=TypeAdapter(ByteSize).validate_python( + total_resources_used.get("RAM", 0) + ), + ) + + +async def compute_cluster_total_resources( + scheduler_url: AnyUrl, + authentication: ClusterAuthentication, + instances: list[AssociatedInstance], +) -> Resources: + if not instances: + return Resources.create_as_empty() + async with _scheduler_client(scheduler_url, authentication) as client: + instance_hosts = ( + node_ip_from_ec2_private_dns(i.ec2_instance) for i in instances + ) + scheduler_info = client.scheduler_info() + if "workers" not in scheduler_info or not scheduler_info["workers"]: + raise DaskNoWorkersError(url=scheduler_url) + workers: dict[str, Any] = scheduler_info["workers"] + for worker_details in workers.values(): + if worker_details["host"] not in instance_hosts: + continue + + return Resources.create_as_empty() + + +async def try_retire_nodes( + scheduler_url: AnyUrl, authentication: ClusterAuthentication +) -> None: + async with _scheduler_client(scheduler_url, authentication) as client: + await _wrap_client_async_routine( + client.retire_workers(close_workers=False, remove=False) + ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/docker.py b/services/autoscaling/src/simcore_service_autoscaling/modules/docker.py index a3047b9918c..f30592bcf92 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/docker.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/docker.py @@ -3,7 +3,7 @@ import aiodocker from fastapi import FastAPI -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_random_exponential diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/ec2.py b/services/autoscaling/src/simcore_service_autoscaling/modules/ec2.py index 330bbe9f814..4b71d8a9d3f 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/ec2.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/ec2.py @@ -1,256 +1,60 @@ -import contextlib import logging -from dataclasses import dataclass -from typing import Optional, cast +from typing import cast -import aioboto3 -import botocore.exceptions -from aiobotocore.session import ClientCreatorContext +from aws_library.ec2 import SimcoreEC2API +from aws_library.ec2._errors import EC2NotConnectedError from fastapi import FastAPI -from pydantic import ByteSize, parse_obj_as -from servicelib.logging_utils import log_context -from tenacity._asyncio import AsyncRetrying +from settings_library.ec2 import EC2Settings +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_random_exponential -from types_aiobotocore_ec2 import EC2Client -from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType -from types_aiobotocore_ec2.type_defs import FilterTypeDef -from ..core.errors import ( - ConfigurationError, - Ec2InstanceNotFoundError, - Ec2NotConnectedError, - Ec2TooManyInstancesError, -) -from ..core.settings import EC2InstancesSettings, EC2Settings -from ..models import EC2InstanceData, EC2InstanceType -from ..utils.ec2 import compose_user_data +from ..core.errors import ConfigurationError +from .instrumentation import has_instrumentation, instrument_ec2_client_methods -logger = logging.getLogger(__name__) - - -@dataclass(frozen=True) -class AutoscalingEC2: - client: EC2Client - session: aioboto3.Session - exit_stack: contextlib.AsyncExitStack - - @classmethod - async def create(cls, settings: EC2Settings) -> "AutoscalingEC2": - session = aioboto3.Session() - session_client = session.client( - "ec2", - endpoint_url=settings.EC2_ENDPOINT, - aws_access_key_id=settings.EC2_ACCESS_KEY_ID, - aws_secret_access_key=settings.EC2_SECRET_ACCESS_KEY, - region_name=settings.EC2_REGION_NAME, - ) - assert isinstance(session_client, ClientCreatorContext) # nosec - exit_stack = contextlib.AsyncExitStack() - ec2_client = cast( - EC2Client, await exit_stack.enter_async_context(session_client) - ) - return cls(ec2_client, session, exit_stack) - - async def close(self) -> None: - await self.exit_stack.aclose() - - async def ping(self) -> bool: - try: - await self.client.describe_account_attributes() - return True - except Exception: # pylint: disable=broad-except - return False - - async def get_ec2_instance_capabilities( - self, - instance_type_names: set[InstanceTypeType], - ) -> list[EC2InstanceType]: - """instance_type_names must be a set of unique values""" - instance_types = await self.client.describe_instance_types( - InstanceTypes=list(instance_type_names) - ) - list_instances: list[EC2InstanceType] = [] - for instance in instance_types.get("InstanceTypes", []): - with contextlib.suppress(KeyError): - list_instances.append( - EC2InstanceType( - name=instance["InstanceType"], - cpus=instance["VCpuInfo"]["DefaultVCpus"], - ram=parse_obj_as( - ByteSize, f"{instance['MemoryInfo']['SizeInMiB']}MiB" - ), - ) - ) - return list_instances - - async def start_aws_instance( - self, - instance_settings: EC2InstancesSettings, - instance_type: InstanceTypeType, - tags: dict[str, str], - startup_script: str, - number_of_instances: int, - ) -> list[EC2InstanceData]: - with log_context( - logger, - logging.INFO, - msg=f"launching {number_of_instances} AWS instance(s) {instance_type} with {tags=}", - ): - # first check the max amount is not already reached - current_instances = await self.get_instances(instance_settings, tags) - if ( - len(current_instances) + number_of_instances - > instance_settings.EC2_INSTANCES_MAX_INSTANCES - ): - raise Ec2TooManyInstancesError( - num_instances=instance_settings.EC2_INSTANCES_MAX_INSTANCES - ) - - instances = await self.client.run_instances( - ImageId=instance_settings.EC2_INSTANCES_AMI_ID, - MinCount=number_of_instances, - MaxCount=number_of_instances, - InstanceType=instance_type, - InstanceInitiatedShutdownBehavior="terminate", - KeyName=instance_settings.EC2_INSTANCES_KEY_NAME, - SubnetId=instance_settings.EC2_INSTANCES_SUBNET_ID, - TagSpecifications=[ - { - "ResourceType": "instance", - "Tags": [ - {"Key": tag_key, "Value": tag_value} - for tag_key, tag_value in tags.items() - ], - } - ], - UserData=compose_user_data(startup_script), - SecurityGroupIds=instance_settings.EC2_INSTANCES_SECURITY_GROUP_IDS, - ) - instance_ids = [i["InstanceId"] for i in instances["Instances"]] - logger.info( - "New instances launched: %s, waiting for them to start now...", - instance_ids, - ) - - # wait for the instance to be in a pending state - # NOTE: reference to EC2 states https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html - waiter = self.client.get_waiter("instance_exists") - await waiter.wait(InstanceIds=instance_ids) - logger.info("instances %s exists now.", instance_ids) - - # get the private IPs - instances = await self.client.describe_instances(InstanceIds=instance_ids) - instance_datas = [ - EC2InstanceData( - launch_time=instance["LaunchTime"], - id=instance["InstanceId"], - aws_private_dns=instance["PrivateDnsName"], - type=instance["InstanceType"], - state=instance["State"]["Name"], - ) - for instance in instances["Reservations"][0]["Instances"] - ] - logger.info( - "%s is available, happy computing!!", - f"{instance_datas=}", - ) - return instance_datas - - async def get_instances( - self, - instance_settings: EC2InstancesSettings, - tags: dict[str, str], - *, - state_names: Optional[list[InstanceStateNameType]] = None, - ) -> list[EC2InstanceData]: - # NOTE: be careful: Name=instance-state-name,Values=["pending", "running"] means pending OR running - # NOTE2: AND is done by repeating Name=instance-state-name,Values=pending Name=instance-state-name,Values=running - if state_names is None: - state_names = ["pending", "running"] - - filters: list[FilterTypeDef] = [ - { - "Name": "key-name", - "Values": [instance_settings.EC2_INSTANCES_KEY_NAME], - }, - {"Name": "instance-state-name", "Values": state_names}, - ] - filters.extend( - [{"Name": f"tag:{key}", "Values": [value]} for key, value in tags.items()] - ) - - instances = await self.client.describe_instances(Filters=filters) - all_instances = [] - for reservation in instances["Reservations"]: - assert "Instances" in reservation # nosec - for instance in reservation["Instances"]: - assert "LaunchTime" in instance # nosec - assert "InstanceId" in instance # nosec - assert "PrivateDnsName" in instance # nosec - assert "InstanceType" in instance # nosec - assert "State" in instance # nosec - assert "Name" in instance["State"] # nosec - all_instances.append( - EC2InstanceData( - launch_time=instance["LaunchTime"], - id=instance["InstanceId"], - aws_private_dns=instance["PrivateDnsName"], - type=instance["InstanceType"], - state=instance["State"]["Name"], - ) - ) - logger.debug("received: %s", f"{all_instances=}") - return all_instances - - async def terminate_instances(self, instance_datas: list[EC2InstanceData]) -> None: - try: - await self.client.terminate_instances( - InstanceIds=[i.id for i in instance_datas] - ) - except botocore.exceptions.ClientError as exc: - if ( - exc.response.get("Error", {}).get("Code", "") - == "InvalidInstanceID.NotFound" - ): - raise Ec2InstanceNotFoundError from exc - raise +_logger = logging.getLogger(__name__) def setup(app: FastAPI) -> None: async def on_startup() -> None: app.state.ec2_client = None - settings: Optional[EC2Settings] = app.state.settings.AUTOSCALING_EC2_ACCESS + settings: EC2Settings | None = app.state.settings.AUTOSCALING_EC2_ACCESS if not settings: - logger.warning("EC2 client is de-activated in the settings") + _logger.warning("EC2 client is de-activated in the settings") return - app.state.ec2_client = client = await AutoscalingEC2.create(settings) + if has_instrumentation(app): + client = instrument_ec2_client_methods( + app, await SimcoreEC2API.create(settings) + ) + else: + client = await SimcoreEC2API.create(settings) + app.state.ec2_client = client async for attempt in AsyncRetrying( reraise=True, stop=stop_after_delay(120), wait=wait_random_exponential(max=30), - before_sleep=before_sleep_log(logger, logging.WARNING), + before_sleep=before_sleep_log(_logger, logging.WARNING), ): with attempt: connected = await client.ping() if not connected: - raise Ec2NotConnectedError() + raise EC2NotConnectedError # pragma: no cover async def on_shutdown() -> None: if app.state.ec2_client: - await cast(AutoscalingEC2, app.state.ec2_client).close() + await cast(SimcoreEC2API, app.state.ec2_client).close() app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) -def get_ec2_client(app: FastAPI) -> AutoscalingEC2: +def get_ec2_client(app: FastAPI) -> SimcoreEC2API: if not app.state.ec2_client: raise ConfigurationError( msg="EC2 client is not available. Please check the configuration." ) - return cast(AutoscalingEC2, app.state.ec2_client) + return cast(SimcoreEC2API, app.state.ec2_client) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/__init__.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/__init__.py new file mode 100644 index 00000000000..1367d41ba1a --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/__init__.py @@ -0,0 +1,11 @@ +from ._core import get_instrumentation, has_instrumentation, setup +from ._ec2_client import instrument_ec2_client_methods + +__all__: tuple[str, ...] = ( + "has_instrumentation", + "instrument_ec2_client_methods", + "setup", + "get_instrumentation", +) + +# nopycln: file diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_constants.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_constants.py new file mode 100644 index 00000000000..1224ea71907 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_constants.py @@ -0,0 +1,86 @@ +from typing import Final + +from servicelib.instrumentation import get_metrics_namespace + +from ..._meta import APP_NAME + +METRICS_NAMESPACE: Final[str] = get_metrics_namespace(APP_NAME) +EC2_INSTANCE_LABELS: Final[tuple[str, ...]] = ("instance_type",) + +CLUSTER_METRICS_DEFINITIONS: Final[dict[str, tuple[str, tuple[str, ...]]]] = { + "active_nodes": ( + "Number of EC2-backed docker nodes which are active and ready to run tasks", + EC2_INSTANCE_LABELS, + ), + "pending_nodes": ( + "Number of EC2-backed docker nodes which are active and NOT ready to run tasks", + EC2_INSTANCE_LABELS, + ), + "drained_nodes": ( + "Number of EC2-backed docker nodes which are drained", + EC2_INSTANCE_LABELS, + ), + "buffer_drained_nodes": ( + "Number of EC2-backed docker nodes which are drained and in buffer/reserve", + EC2_INSTANCE_LABELS, + ), + "pending_ec2s": ( + "Number of EC2 instances not yet part of the cluster", + EC2_INSTANCE_LABELS, + ), + "broken_ec2s": ( + "Number of EC2 instances that failed joining the cluster", + EC2_INSTANCE_LABELS, + ), + "buffer_ec2s": ( + "Number of buffer EC2 instances prepared, stopped, and ready to be activated", + EC2_INSTANCE_LABELS, + ), + "disconnected_nodes": ( + "Number of docker nodes not backed by a running EC2 instance", + (), + ), + "terminating_nodes": ( + "Number of EC2-backed docker nodes that started the termination process", + EC2_INSTANCE_LABELS, + ), + "retired_nodes": ( + "Number of EC2-backed docker nodes that were actively retired and waiting for draining and termination or re-use", + EC2_INSTANCE_LABELS, + ), + "terminated_instances": ( + "Number of EC2 instances that were terminated (they are typically visible 1 hour)", + EC2_INSTANCE_LABELS, + ), +} + +BUFFER_POOLS_METRICS_DEFINITIONS: Final[dict[str, tuple[str, tuple[str, ...]]]] = { + "ready_instances": ( + "Number of EC2 buffer instances that are ready for use", + EC2_INSTANCE_LABELS, + ), + "pending_instances": ( + "Number of EC2 buffer instances that are pending/starting", + EC2_INSTANCE_LABELS, + ), + "waiting_to_pull_instances": ( + "Number of EC2 buffer instances that are waiting to pull docker images", + EC2_INSTANCE_LABELS, + ), + "waiting_to_stop_instances": ( + "Number of EC2 buffer instances that are waiting to be stopped", + EC2_INSTANCE_LABELS, + ), + "pulling_instances": ( + "Number of EC2 buffer instances that are actively pulling docker images", + EC2_INSTANCE_LABELS, + ), + "stopping_instances": ( + "Number of EC2 buffer instances that are stopping", + EC2_INSTANCE_LABELS, + ), + "broken_instances": ( + "Number of EC2 buffer instances that are deemed as broken", + EC2_INSTANCE_LABELS, + ), +} diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_core.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_core.py new file mode 100644 index 00000000000..9de65aac078 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_core.py @@ -0,0 +1,46 @@ +from typing import cast + +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation as setup_rest_instrumentation, +) + +from ...core.errors import ConfigurationError +from ...core.settings import get_application_settings +from ._models import AutoscalingInstrumentation + + +def setup(app: FastAPI) -> None: + app_settings = get_application_settings(app) + if not app_settings.AUTOSCALING_PROMETHEUS_INSTRUMENTATION_ENABLED: + return + + # NOTE: this must be setup before application startup + registry = setup_rest_instrumentation(app) + + async def on_startup() -> None: + metrics_subsystem = ( + "dynamic" if app_settings.AUTOSCALING_NODES_MONITORING else "computational" + ) + app.state.instrumentation = ( + AutoscalingInstrumentation( # pylint: disable=unexpected-keyword-arg + registry=registry, subsystem=metrics_subsystem + ) + ) + + async def on_shutdown() -> None: ... + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_instrumentation(app: FastAPI) -> AutoscalingInstrumentation: + if not app.state.instrumentation: + raise ConfigurationError( + msg="Instrumentation not setup. Please check the configuration." + ) + return cast(AutoscalingInstrumentation, app.state.instrumentation) + + +def has_instrumentation(app: FastAPI) -> bool: + return hasattr(app.state, "instrumentation") diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_ec2_client.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_ec2_client.py new file mode 100644 index 00000000000..9d2ff2b82e5 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_ec2_client.py @@ -0,0 +1,95 @@ +import functools +from collections.abc import Callable, Coroutine, Iterable +from typing import Any, ParamSpec, TypeVar + +from aws_library.ec2._client import SimcoreEC2API +from aws_library.ec2._models import EC2InstanceData +from fastapi import FastAPI + +from ._core import get_instrumentation + +P = ParamSpec("P") +R = TypeVar("R") + + +def _instrumented_ec2_client_method( + metrics_handler: Callable[[str], None], + *, + instance_type_from_method_arguments: Callable[..., list[str]] | None, + instance_type_from_method_return: Callable[..., list[str]] | None, +) -> Callable[ + [Callable[P, Coroutine[Any, Any, R]]], + Callable[P, Coroutine[Any, Any, R]], +]: + def decorator( + func: Callable[P, Coroutine[Any, Any, R]] + ) -> Callable[P, Coroutine[Any, Any, R]]: + @functools.wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + result = await func(*args, **kwargs) + if instance_type_from_method_arguments: + for instance_type in instance_type_from_method_arguments( + *args, **kwargs + ): + metrics_handler(instance_type) + elif instance_type_from_method_return: + for instance_type in instance_type_from_method_return(result): + metrics_handler(instance_type) + return result + + return wrapper + + return decorator + + +def _instance_type_from_instance_datas( + instance_datas: Iterable[EC2InstanceData], +) -> list[str]: + return [i.type for i in instance_datas] + + +def instrument_ec2_client_methods( + app: FastAPI, ec2_client: SimcoreEC2API +) -> SimcoreEC2API: + autoscaling_instrumentation = get_instrumentation(app) + methods_to_instrument = [ + ( + "launch_instances", + autoscaling_instrumentation.ec2_client_metrics.instance_launched, + None, + _instance_type_from_instance_datas, + ), + ( + "start_instances", + autoscaling_instrumentation.ec2_client_metrics.instance_started, + _instance_type_from_instance_datas, + None, + ), + ( + "stop_instances", + autoscaling_instrumentation.ec2_client_metrics.instance_stopped, + _instance_type_from_instance_datas, + None, + ), + ( + "terminate_instances", + autoscaling_instrumentation.ec2_client_metrics.instance_terminated, + _instance_type_from_instance_datas, + None, + ), + ] + for ( + method_name, + metrics_handler, + instance_types_from_args, + instance_types_from_return, + ) in methods_to_instrument: + method = getattr(ec2_client, method_name, None) + assert method is not None # nosec + decorated_method = _instrumented_ec2_client_method( + metrics_handler, + instance_type_from_method_arguments=instance_types_from_args, + instance_type_from_method_return=instance_types_from_return, + )(method) + setattr(ec2_client, method_name, decorated_method) + return ec2_client diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_models.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_models.py new file mode 100644 index 00000000000..3831b33b826 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_models.py @@ -0,0 +1,201 @@ +from dataclasses import dataclass, field +from typing import Final + +from prometheus_client import CollectorRegistry, Counter, Histogram +from servicelib.instrumentation import MetricsBase + +from ...models import BufferPoolManager, Cluster +from ._constants import ( + BUFFER_POOLS_METRICS_DEFINITIONS, + CLUSTER_METRICS_DEFINITIONS, + EC2_INSTANCE_LABELS, + METRICS_NAMESPACE, +) +from ._utils import TrackedGauge, create_gauge + + +@dataclass(slots=True, kw_only=True) +class ClusterMetrics(MetricsBase): # pylint: disable=too-many-instance-attributes + active_nodes: TrackedGauge = field(init=False) + pending_nodes: TrackedGauge = field(init=False) + drained_nodes: TrackedGauge = field(init=False) + buffer_drained_nodes: TrackedGauge = field(init=False) + pending_ec2s: TrackedGauge = field(init=False) + broken_ec2s: TrackedGauge = field(init=False) + buffer_ec2s: TrackedGauge = field(init=False) + disconnected_nodes: TrackedGauge = field(init=False) + terminating_nodes: TrackedGauge = field(init=False) + retired_nodes: TrackedGauge = field(init=False) + terminated_instances: TrackedGauge = field(init=False) + + def __post_init__(self) -> None: + cluster_subsystem = f"{self.subsystem}_cluster" + # Creating and assigning gauges using the field names and the metric definitions + for field_name, definition in CLUSTER_METRICS_DEFINITIONS.items(): + gauge = create_gauge( + field_name=field_name, + definition=definition, + subsystem=cluster_subsystem, + registry=self.registry, + ) + setattr(self, field_name, gauge) + + def update_from_cluster(self, cluster: Cluster) -> None: + for field_name in CLUSTER_METRICS_DEFINITIONS: + if field_name != "disconnected_nodes": + tracked_gauge = getattr(self, field_name) + assert isinstance(tracked_gauge, TrackedGauge) # nosec + instances = getattr(cluster, field_name) + assert isinstance(instances, list) # nosec + tracked_gauge.update_from_instances(i.ec2_instance for i in instances) + else: + self.disconnected_nodes.gauge.set(len(cluster.disconnected_nodes)) + + +@dataclass(slots=True, kw_only=True) +class EC2ClientMetrics(MetricsBase): + launched_instances: Counter = field(init=False) + started_instances: Counter = field(init=False) + stopped_instances: Counter = field(init=False) + terminated_instances: Counter = field(init=False) + + def __post_init__(self) -> None: + self.launched_instances = Counter( + "launched_instances_total", + "Number of EC2 instances that were launched", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + self.started_instances = Counter( + "started_instances_total", + "Number of EC2 instances that were started", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + self.stopped_instances = Counter( + "stopped_instances_total", + "Number of EC2 instances that were stopped", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + self.terminated_instances = Counter( + "terminated_instances_total", + "Number of EC2 instances that were terminated", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=self.subsystem, + registry=self.registry, + ) + + def instance_started(self, instance_type: str) -> None: + self.started_instances.labels(instance_type=instance_type).inc() + + def instance_launched(self, instance_type: str) -> None: + self.launched_instances.labels(instance_type=instance_type).inc() + + def instance_stopped(self, instance_type: str) -> None: + self.stopped_instances.labels(instance_type=instance_type).inc() + + def instance_terminated(self, instance_type: str) -> None: + self.terminated_instances.labels(instance_type=instance_type).inc() + + +_MINUTE: Final[int] = 60 + + +@dataclass(slots=True, kw_only=True) +class BufferPoolsMetrics(MetricsBase): + ready_instances: TrackedGauge = field(init=False) + pending_instances: TrackedGauge = field(init=False) + waiting_to_pull_instances: TrackedGauge = field(init=False) + waiting_to_stop_instances: TrackedGauge = field(init=False) + pulling_instances: TrackedGauge = field(init=False) + stopping_instances: TrackedGauge = field(init=False) + broken_instances: TrackedGauge = field(init=False) + + instances_ready_to_pull_seconds: Histogram = field(init=False) + instances_completed_pulling_seconds: Histogram = field(init=False) + + def __post_init__(self) -> None: + buffer_pools_subsystem = f"{self.subsystem}_buffer_machines_pools" + for field_name, definition in BUFFER_POOLS_METRICS_DEFINITIONS.items(): + setattr( + self, + field_name, + create_gauge( + field_name=field_name, + definition=definition, + subsystem=buffer_pools_subsystem, + registry=self.registry, + ), + ) + self.instances_ready_to_pull_seconds = Histogram( + "instances_ready_to_pull_duration_seconds", + "Time taken for instances to be ready to pull", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=buffer_pools_subsystem, + buckets=(10, 20, 30, 40, 50, 60, 120), + registry=self.registry, + ) + self.instances_completed_pulling_seconds = Histogram( + "instances_completed_pulling_duration_seconds", + "Time taken for instances to complete docker images pre-pulling", + labelnames=EC2_INSTANCE_LABELS, + namespace=METRICS_NAMESPACE, + subsystem=buffer_pools_subsystem, + buckets=( + 30, + 1 * _MINUTE, + 2 * _MINUTE, + 3 * _MINUTE, + 5 * _MINUTE, + 10 * _MINUTE, + 20 * _MINUTE, + 30 * _MINUTE, + 40 * _MINUTE, + ), + registry=self.registry, + ) + + def update_from_buffer_pool_manager( + self, buffer_pool_manager: BufferPoolManager + ) -> None: + flat_pool = buffer_pool_manager.flatten_buffer_pool() + + for field_name in BUFFER_POOLS_METRICS_DEFINITIONS: + tracked_gauge = getattr(self, field_name) + assert isinstance(tracked_gauge, TrackedGauge) # nosec + instances = getattr(flat_pool, field_name) + assert isinstance(instances, set) # nosec + tracked_gauge.update_from_instances(instances) + + +@dataclass(slots=True, kw_only=True) +class AutoscalingInstrumentation(MetricsBase): + registry: CollectorRegistry + + cluster_metrics: ClusterMetrics = field(init=False) + ec2_client_metrics: EC2ClientMetrics = field(init=False) + buffer_machines_pools_metrics: BufferPoolsMetrics = field(init=False) + + def __post_init__(self) -> None: + self.cluster_metrics = ClusterMetrics( # pylint: disable=unexpected-keyword-arg + subsystem=self.subsystem, registry=self.registry + ) + self.ec2_client_metrics = ( + EC2ClientMetrics( # pylint: disable=unexpected-keyword-arg + subsystem=self.subsystem, registry=self.registry + ) + ) + self.buffer_machines_pools_metrics = ( + BufferPoolsMetrics( # pylint: disable=unexpected-keyword-arg + subsystem=self.subsystem, registry=self.registry + ) + ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_utils.py b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_utils.py new file mode 100644 index 00000000000..8f80b1f05e8 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/instrumentation/_utils.py @@ -0,0 +1,46 @@ +import collections +from collections.abc import Iterable +from dataclasses import dataclass, field + +from aws_library.ec2._models import EC2InstanceData +from prometheus_client import CollectorRegistry, Gauge + +from ._constants import METRICS_NAMESPACE + + +@dataclass +class TrackedGauge: + gauge: Gauge + _tracked_labels: set[str] = field(default_factory=set) + + def update_from_instances(self, instances: Iterable[EC2InstanceData]) -> None: + # Create a Counter to count nodes by instance type + instance_type_counts = collections.Counter(f"{i.type}" for i in instances) + current_instance_types = set(instance_type_counts.keys()) + self._tracked_labels.update(current_instance_types) + # update the gauge + for instance_type, count in instance_type_counts.items(): + self.gauge.labels(instance_type=instance_type).set(count) + # set the unused ones to 0 + for instance_type in self._tracked_labels - current_instance_types: + self.gauge.labels(instance_type=instance_type).set(0) + + +def create_gauge( + *, + field_name: str, + definition: tuple[str, tuple[str, ...]], + subsystem: str, + registry: CollectorRegistry, +) -> TrackedGauge: + description, labelnames = definition + return TrackedGauge( + Gauge( + name=field_name, + documentation=description, + labelnames=labelnames, + namespace=METRICS_NAMESPACE, + subsystem=subsystem, + registry=registry, + ) + ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/rabbitmq.py b/services/autoscaling/src/simcore_service_autoscaling/modules/rabbitmq.py index 8652aa8e234..2775720fc3a 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/rabbitmq.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/rabbitmq.py @@ -1,12 +1,11 @@ import contextlib import logging -from typing import Optional, cast +from typing import cast from fastapi import FastAPI from models_library.rabbitmq_messages import RabbitMessageBase from servicelib.logging_utils import log_catch -from servicelib.rabbitmq import RabbitMQClient -from servicelib.rabbitmq_utils import wait_till_rabbitmq_responsive +from servicelib.rabbitmq import RabbitMQClient, wait_till_rabbitmq_responsive from settings_library.rabbit import RabbitSettings from ..core.errors import ConfigurationError @@ -17,7 +16,7 @@ def setup(app: FastAPI) -> None: async def on_startup() -> None: app.state.rabbitmq_client = None - settings: Optional[RabbitSettings] = app.state.settings.AUTOSCALING_RABBITMQ + settings: RabbitSettings | None = app.state.settings.AUTOSCALING_RABBITMQ if not settings: logger.warning("Rabbit MQ client is de-activated in the settings") return @@ -45,4 +44,4 @@ def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: async def post_message(app: FastAPI, message: RabbitMessageBase) -> None: with log_catch(logger, reraise=False), contextlib.suppress(ConfigurationError): # NOTE: if rabbitmq was not initialized the error does not need to flood the logs - await get_rabbitmq_client(app).publish(message.channel_name, message.json()) + await get_rabbitmq_client(app).publish(message.channel_name, message) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/redis.py b/services/autoscaling/src/simcore_service_autoscaling/modules/redis.py index 78a4b33746b..c0cf7a15e07 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/modules/redis.py +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/redis.py @@ -3,40 +3,30 @@ from fastapi import FastAPI from servicelib.redis import RedisClientSDK -from settings_library.redis import RedisSettings -from tenacity._asyncio import AsyncRetrying -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_random_exponential +from settings_library.redis import RedisDatabase, RedisSettings -from ..core.errors import RedisNotConnectedError +from .._meta import APP_NAME logger = logging.getLogger(__name__) def setup(app: FastAPI) -> None: async def on_startup() -> None: - app.state.redis = None + app.state.redis_client_sdk = None settings: RedisSettings = app.state.settings.AUTOSCALING_REDIS - app.state.redis = client = RedisClientSDK(settings.dsn_locks) - async for attempt in AsyncRetrying( - reraise=True, - stop=stop_after_delay(120), - wait=wait_random_exponential(max=30), - before_sleep=before_sleep_log(logger, logging.WARNING), - ): - with attempt: - connected = await client.ping() - if not connected: - raise RedisNotConnectedError(dsn=settings.dsn_locks) + redis_locks_dsn = settings.build_redis_dsn(RedisDatabase.LOCKS) + app.state.redis_client_sdk = RedisClientSDK( + redis_locks_dsn, client_name=APP_NAME + ) async def on_shutdown() -> None: - if app.state.redis: - await app.state.redis.close() + redis_client_sdk: None | RedisClientSDK = app.state.redis_client_sdk + if redis_client_sdk: + await redis_client_sdk.shutdown() app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) def get_redis_client(app: FastAPI) -> RedisClientSDK: - return cast(RedisClientSDK, app.state.redis) + return cast(RedisClientSDK, app.state.redis_client_sdk) diff --git a/services/autoscaling/src/simcore_service_autoscaling/modules/ssm.py b/services/autoscaling/src/simcore_service_autoscaling/modules/ssm.py new file mode 100644 index 00000000000..fb1c0e88b55 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/modules/ssm.py @@ -0,0 +1,56 @@ +import logging +from typing import cast + +from aws_library.ssm import SimcoreSSMAPI +from aws_library.ssm._errors import SSMNotConnectedError +from fastapi import FastAPI +from settings_library.ssm import SSMSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_random_exponential + +from ..core.errors import ConfigurationError +from ..core.settings import get_application_settings + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.ssm_client = None + settings: SSMSettings | None = get_application_settings( + app + ).AUTOSCALING_SSM_ACCESS + + if not settings: + _logger.warning("SSM client is de-activated in the settings") + return + + app.state.ssm_client = client = await SimcoreSSMAPI.create(settings) + + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(120), + wait=wait_random_exponential(max=30), + before_sleep=before_sleep_log(_logger, logging.WARNING), + ): + with attempt: + connected = await client.ping() + if not connected: + raise SSMNotConnectedError # pragma: no cover + + async def on_shutdown() -> None: + if app.state.ssm_client: + await cast(SimcoreSSMAPI, app.state.ssm_client).close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_ssm_client(app: FastAPI) -> SimcoreSSMAPI: + if not app.state.ssm_client: + raise ConfigurationError( + msg="SSM client is not available. Please check the configuration." + ) + return cast(SimcoreSSMAPI, app.state.ssm_client) diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/auto_scaling_core.py b/services/autoscaling/src/simcore_service_autoscaling/utils/auto_scaling_core.py new file mode 100644 index 00000000000..d7f69d50b54 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/auto_scaling_core.py @@ -0,0 +1,213 @@ +import functools +import logging +import re +from typing import Final + +from aws_library.ec2 import EC2InstanceBootSpecific, EC2InstanceData, EC2InstanceType +from models_library.generated_models.docker_rest_api import Node +from types_aiobotocore_ec2.literals import InstanceTypeType + +from ..core.errors import ( + Ec2InvalidDnsNameError, + TaskRequirementsAboveRequiredEC2InstanceTypeError, + TaskRequiresUnauthorizedEC2InstanceTypeError, +) +from ..core.settings import ApplicationSettings +from ..models import AssociatedInstance +from ..modules.auto_scaling_mode_base import BaseAutoscaling +from . import utils_docker + +_EC2_INTERNAL_DNS_RE: Final[re.Pattern] = re.compile(r"^(?Pip-[^.]+).*$") +_logger = logging.getLogger(__name__) + + +def node_host_name_from_ec2_private_dns( + ec2_instance_data: EC2InstanceData, +) -> str: + """returns the node host name 'ip-10-2-3-22' from the ec2 private dns + Raises: + Ec2InvalidDnsNameError: if the dns name does not follow the expected pattern + """ + if match := re.match(_EC2_INTERNAL_DNS_RE, ec2_instance_data.aws_private_dns): + host_name: str = match.group("host_name") + return host_name + raise Ec2InvalidDnsNameError(aws_private_dns_name=ec2_instance_data.aws_private_dns) + + +def node_ip_from_ec2_private_dns( + ec2_instance_data: EC2InstanceData, +) -> str: + """returns the node ipv4 from the ec2 private dns string + Raises: + Ec2InvalidDnsNameError: if the dns name does not follow the expected pattern + """ + return ( + node_host_name_from_ec2_private_dns(ec2_instance_data) + .removeprefix("ip-") + .replace("-", ".") + ) + + +async def associate_ec2_instances_with_nodes( + nodes: list[Node], ec2_instances: list[EC2InstanceData] +) -> tuple[list[AssociatedInstance], list[EC2InstanceData]]: + """returns the associated and non-associated instances""" + associated_instances: list[AssociatedInstance] = [] + non_associated_instances: list[EC2InstanceData] = [] + + def _find_node_with_name(node: Node) -> bool: + assert node.description # nosec + return bool(node.description.hostname == docker_node_name) + + for instance_data in ec2_instances: + try: + docker_node_name = node_host_name_from_ec2_private_dns(instance_data) + except Ec2InvalidDnsNameError: + _logger.exception("Unexpected EC2 private dns name") + non_associated_instances.append(instance_data) + continue + + if node := next(iter(filter(_find_node_with_name, nodes)), None): + associated_instances.append( + AssociatedInstance(node=node, ec2_instance=instance_data) + ) + else: + non_associated_instances.append(instance_data) + return associated_instances, non_associated_instances + + +async def ec2_startup_script( + ec2_boot_specific: EC2InstanceBootSpecific, app_settings: ApplicationSettings +) -> str: + startup_commands = ec2_boot_specific.custom_boot_scripts.copy() + startup_commands.append( + await utils_docker.get_docker_swarm_join_bash_command( + join_as_drained=app_settings.AUTOSCALING_DOCKER_JOIN_DRAINED + ) + ) + if app_settings.AUTOSCALING_REGISTRY: # noqa: SIM102 + if pull_image_cmd := utils_docker.get_docker_pull_images_on_start_bash_command( + ec2_boot_specific.pre_pull_images + ): + startup_commands.append( + " && ".join( + [ + utils_docker.get_docker_login_on_start_bash_command( + app_settings.AUTOSCALING_REGISTRY + ), + pull_image_cmd, + ] + ) + ) + startup_commands.append( + utils_docker.get_docker_pull_images_crontab( + ec2_boot_specific.pre_pull_images_cron_interval + ), + ) + + return " && ".join(startup_commands) + + +def ec2_buffer_startup_script( + ec2_boot_specific: EC2InstanceBootSpecific, app_settings: ApplicationSettings +) -> str: + startup_commands = ec2_boot_specific.custom_boot_scripts.copy() + if ec2_boot_specific.pre_pull_images: + assert app_settings.AUTOSCALING_REGISTRY # nosec + startup_commands.extend( + ( + utils_docker.get_docker_login_on_start_bash_command( + app_settings.AUTOSCALING_REGISTRY + ), + utils_docker.write_compose_file_command( + ec2_boot_specific.pre_pull_images + ), + ) + ) + return " && ".join(startup_commands) + + +def _instance_type_by_type_name( + ec2_type: EC2InstanceType, *, type_name: InstanceTypeType | None +) -> bool: + if type_name is None: + return True + return bool(ec2_type.name == type_name) + + +def find_selected_instance_type_for_task( + instance_type_name: InstanceTypeType, + available_ec2_types: list[EC2InstanceType], + auto_scaling_mode: BaseAutoscaling, + task, +) -> EC2InstanceType: + filtered_instances = list( + filter( + functools.partial( + _instance_type_by_type_name, type_name=instance_type_name + ), + available_ec2_types, + ) + ) + if not filtered_instances: + raise TaskRequiresUnauthorizedEC2InstanceTypeError( + task=task, instance_type=instance_type_name + ) + + assert len(filtered_instances) == 1 # nosec + selected_instance = filtered_instances[0] + + # check that the assigned resources and the machine resource fit + if ( + auto_scaling_mode.get_task_required_resources(task) + > selected_instance.resources + ): + raise TaskRequirementsAboveRequiredEC2InstanceTypeError( + task=task, + instance_type=selected_instance, + resources=auto_scaling_mode.get_task_required_resources(task), + ) + + return selected_instance + + +def get_machine_buffer_type( + available_ec2_types: list[EC2InstanceType], +) -> EC2InstanceType: + assert len(available_ec2_types) > 0 # nosec + return available_ec2_types[0] + + +DrainedNodes = list[AssociatedInstance] +BufferDrainedNodes = list[AssociatedInstance] +TerminatingNodes = list[AssociatedInstance] + + +def sort_drained_nodes( + app_settings: ApplicationSettings, + all_drained_nodes: list[AssociatedInstance], + available_ec2_types: list[EC2InstanceType], +) -> tuple[DrainedNodes, BufferDrainedNodes, TerminatingNodes]: + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + # first sort out the drained nodes that started termination + terminating_nodes = [ + n + for n in all_drained_nodes + if utils_docker.get_node_termination_started_since(n.node) is not None + ] + remaining_drained_nodes = [ + n for n in all_drained_nodes if n not in terminating_nodes + ] + # we need to keep in reserve only the drained nodes of the right type + machine_buffer_type = get_machine_buffer_type(available_ec2_types) + # NOTE: we keep only in buffer the drained nodes with the right EC2 type, AND the right amount + buffer_drained_nodes = [ + node + for node in remaining_drained_nodes + if node.ec2_instance.type == machine_buffer_type.name + ][: app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER] + # all the others are "normal" drained nodes and may be terminated at some point + other_drained_nodes = [ + node for node in remaining_drained_nodes if node not in buffer_drained_nodes + ] + return (other_drained_nodes, buffer_drained_nodes, terminating_nodes) diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/buffer_machines_pool_core.py b/services/autoscaling/src/simcore_service_autoscaling/utils/buffer_machines_pool_core.py new file mode 100644 index 00000000000..66ff7972306 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/buffer_machines_pool_core.py @@ -0,0 +1,95 @@ +from collections.abc import Iterable +from operator import itemgetter +from typing import Final + +from aws_library.ec2 import AWS_TAG_VALUE_MAX_LENGTH, AWSTagKey, AWSTagValue, EC2Tags +from common_library.json_serialization import json_dumps +from fastapi import FastAPI +from models_library.docker import DockerGenericTag +from pydantic import TypeAdapter + +from ..constants import ( + ACTIVATED_BUFFER_MACHINE_EC2_TAGS, + BUFFER_MACHINE_TAG_KEY, + DEACTIVATED_BUFFER_MACHINE_EC2_TAGS, + PRE_PULLED_IMAGES_EC2_TAG_KEY, + PRE_PULLED_IMAGES_RE, +) +from ..modules.auto_scaling_mode_base import BaseAutoscaling + +_NAME_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python("Name") + + +def get_activated_buffer_ec2_tags( + app: FastAPI, auto_scaling_mode: BaseAutoscaling +) -> EC2Tags: + return auto_scaling_mode.get_ec2_tags(app) | ACTIVATED_BUFFER_MACHINE_EC2_TAGS + + +def get_deactivated_buffer_ec2_tags( + app: FastAPI, auto_scaling_mode: BaseAutoscaling +) -> EC2Tags: + base_ec2_tags = ( + auto_scaling_mode.get_ec2_tags(app) | DEACTIVATED_BUFFER_MACHINE_EC2_TAGS + ) + base_ec2_tags[_NAME_EC2_TAG_KEY] = AWSTagValue( + f"{base_ec2_tags[_NAME_EC2_TAG_KEY]}-buffer" + ) + return base_ec2_tags + + +def is_buffer_machine(tags: EC2Tags) -> bool: + return bool(BUFFER_MACHINE_TAG_KEY in tags) + + +def dump_pre_pulled_images_as_tags(images: Iterable[DockerGenericTag]) -> EC2Tags: + # AWS Tag Values are limited to 256 characaters so we chunk the images + # into smaller chunks + jsonized_images = json_dumps(images) + assert AWS_TAG_VALUE_MAX_LENGTH # nosec + if len(jsonized_images) > AWS_TAG_VALUE_MAX_LENGTH: + # let's chunk the string + chunk_size = AWS_TAG_VALUE_MAX_LENGTH + chunks = [ + jsonized_images[i : i + chunk_size] + for i in range(0, len(jsonized_images), chunk_size) + ] + return { + TypeAdapter(AWSTagKey) + .validate_python(f"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_{i}"): TypeAdapter( + AWSTagValue + ) + .validate_python(c) + for i, c in enumerate(chunks) + } + return { + PRE_PULLED_IMAGES_EC2_TAG_KEY: TypeAdapter(AWSTagValue).validate_python( + json_dumps(images) + ) + } + + +def load_pre_pulled_images_from_tags(tags: EC2Tags) -> list[DockerGenericTag]: + # AWS Tag values are limited to 256 characters so we chunk the images + if PRE_PULLED_IMAGES_EC2_TAG_KEY in tags: + # read directly + return TypeAdapter(list[DockerGenericTag]).validate_json( + tags[PRE_PULLED_IMAGES_EC2_TAG_KEY] + ) + + assembled_json = "".join( + map( + itemgetter(1), + sorted( + ( + (int(m.group(1)), value) + for key, value in tags.items() + if (m := PRE_PULLED_IMAGES_RE.match(key)) + ), + key=itemgetter(0), + ), + ) + ) + if assembled_json: + return TypeAdapter(list[DockerGenericTag]).validate_json(assembled_json) + return [] diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/computational_scaling.py b/services/autoscaling/src/simcore_service_autoscaling/utils/computational_scaling.py new file mode 100644 index 00000000000..07c55bf746a --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/computational_scaling.py @@ -0,0 +1,28 @@ +import logging +from typing import Final + +from aws_library.ec2 import Resources +from dask_task_models_library.resource_constraints import ( + get_ec2_instance_type_from_resources, +) + +from ..models import DaskTask + +_logger = logging.getLogger(__name__) + +_DEFAULT_MAX_CPU: Final[float] = 1 +_DEFAULT_MAX_RAM: Final[int] = 1024 + + +def resources_from_dask_task(task: DaskTask) -> Resources: + return Resources( + cpus=task.required_resources.get("CPU", _DEFAULT_MAX_CPU), + ram=task.required_resources.get("RAM", _DEFAULT_MAX_RAM), + ) + + +def get_task_instance_restriction(task: DaskTask) -> str | None: + instance_ec2_type: str | None = get_ec2_instance_type_from_resources( + task.required_resources + ) + return instance_ec2_type diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/dynamic_scaling.py b/services/autoscaling/src/simcore_service_autoscaling/utils/dynamic_scaling.py deleted file mode 100644 index be84bd7b861..00000000000 --- a/services/autoscaling/src/simcore_service_autoscaling/utils/dynamic_scaling.py +++ /dev/null @@ -1,165 +0,0 @@ -import datetime -import logging -import re -from typing import Final - -from fastapi import FastAPI -from models_library.generated_models.docker_rest_api import Node, Task - -from ..core.errors import Ec2InvalidDnsNameError -from ..core.settings import ApplicationSettings, get_application_settings -from ..models import AssociatedInstance, EC2InstanceData, EC2InstanceType, Resources -from . import utils_docker -from .rabbitmq import log_tasks_message, progress_tasks_message - -logger = logging.getLogger(__name__) - - -_EC2_INTERNAL_DNS_RE: Final[re.Pattern] = re.compile(r"^(?Pip-[0-9-]+).+$") - - -def node_host_name_from_ec2_private_dns( - ec2_instance_data: EC2InstanceData, -) -> str: - if match := re.match(_EC2_INTERNAL_DNS_RE, ec2_instance_data.aws_private_dns): - return match.group(1) - raise Ec2InvalidDnsNameError(aws_private_dns_name=ec2_instance_data.aws_private_dns) - - -async def associate_ec2_instances_with_nodes( - nodes: list[Node], ec2_instances: list[EC2InstanceData] -) -> tuple[list[AssociatedInstance], list[EC2InstanceData]]: - """returns the associated and non-associated instances""" - associated_instances: list[AssociatedInstance] = [] - non_associated_instances: list[EC2InstanceData] = [] - - def _find_node_with_name(node: Node) -> bool: - assert node.Description # nosec - return bool(node.Description.Hostname == docker_node_name) - - for instance_data in ec2_instances: - try: - docker_node_name = node_host_name_from_ec2_private_dns(instance_data) - except Ec2InvalidDnsNameError: - logger.exception("Unexcepted EC2 private dns name") - non_associated_instances.append(instance_data) - continue - - if node := next(iter(filter(_find_node_with_name, nodes)), None): - associated_instances.append(AssociatedInstance(node, instance_data)) - else: - non_associated_instances.append(instance_data) - return associated_instances, non_associated_instances - - -def try_assigning_task_to_node( - pending_task: Task, instance_to_tasks: list[tuple[AssociatedInstance, list[Task]]] -) -> bool: - for instance, node_assigned_tasks in instance_to_tasks: - instance_total_resource = utils_docker.get_node_total_resources(instance.node) - tasks_needed_resources = utils_docker.compute_tasks_needed_resources( - node_assigned_tasks - ) - if ( - instance_total_resource - tasks_needed_resources - ) >= utils_docker.get_max_resources_from_docker_task(pending_task): - node_assigned_tasks.append(pending_task) - return True - return False - - -def try_assigning_task_to_instances( - pending_task: Task, - list_of_instance_to_tasks: list[tuple[EC2InstanceType, list[Task]]], -) -> bool: - for instance, instance_assigned_tasks in list_of_instance_to_tasks: - instance_total_resource = Resources(cpus=instance.cpus, ram=instance.ram) - tasks_needed_resources = utils_docker.compute_tasks_needed_resources( - instance_assigned_tasks - ) - if ( - instance_total_resource - tasks_needed_resources - ) >= utils_docker.get_max_resources_from_docker_task(pending_task): - instance_assigned_tasks.append(pending_task) - return True - return False - - -_TIME_FORMAT = "{:02d}:{:02d}" # format for minutes:seconds - - -def _format_delta(delta: datetime.timedelta) -> str: - return _TIME_FORMAT.format(delta.seconds // 60, delta.seconds % 60) - - -async def try_assigning_task_to_pending_instances( - app: FastAPI, - pending_task: Task, - list_of_pending_instance_to_tasks: list[tuple[EC2InstanceData, list[Task]]], - type_to_instance_map: dict[str, EC2InstanceType], -) -> bool: - app_settings = get_application_settings(app) - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - instance_max_time_to_start = ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME - ) - for instance, instance_assigned_tasks in list_of_pending_instance_to_tasks: - instance_type = type_to_instance_map[instance.type] - instance_total_resources = Resources( - cpus=instance_type.cpus, ram=instance_type.ram - ) - tasks_needed_resources = utils_docker.compute_tasks_needed_resources( - instance_assigned_tasks - ) - if ( - instance_total_resources - tasks_needed_resources - ) >= utils_docker.get_max_resources_from_docker_task(pending_task): - instance_assigned_tasks.append(pending_task) - now = datetime.datetime.now(datetime.timezone.utc) - time_since_launch = now - instance.launch_time - estimated_time_to_completion = ( - instance.launch_time + instance_max_time_to_start - now - ) - - await log_tasks_message( - app, - [pending_task], - f"adding machines to the cluster (time waiting: {_format_delta(time_since_launch)}, est. remaining time: {_format_delta(estimated_time_to_completion)})...please wait...", - ) - await progress_tasks_message( - app, - [pending_task], - time_since_launch.total_seconds() - / instance_max_time_to_start.total_seconds(), - ) - return True - return False - - -async def ec2_startup_script(app_settings: ApplicationSettings) -> str: - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - startup_commands = ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_CUSTOM_BOOT_SCRIPTS.copy() - ) - startup_commands.append(await utils_docker.get_docker_swarm_join_bash_command()) - if app_settings.AUTOSCALING_REGISTRY: - if pull_image_cmd := utils_docker.get_docker_pull_images_on_start_bash_command( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_PRE_PULL_IMAGES - ): - startup_commands.append( - " && ".join( - [ - utils_docker.get_docker_login_on_start_bash_command( - app_settings.AUTOSCALING_REGISTRY - ), - pull_image_cmd, - ] - ) - ) - startup_commands.append( - utils_docker.get_docker_pull_images_crontab( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_PRE_PULL_IMAGES_CRON_INTERVAL - ), - ) - - return " && ".join(startup_commands) diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/ec2.py b/services/autoscaling/src/simcore_service_autoscaling/utils/ec2.py deleted file mode 100644 index cead301de3b..00000000000 --- a/services/autoscaling/src/simcore_service_autoscaling/utils/ec2.py +++ /dev/null @@ -1,79 +0,0 @@ -""" Free helper functions for AWS API - -""" - -import json -import logging -from collections import OrderedDict -from textwrap import dedent -from typing import Callable - -from .._meta import VERSION -from ..core.errors import ConfigurationError, Ec2InstanceNotFoundError -from ..core.settings import ApplicationSettings -from ..models import EC2InstanceType, Resources - -logger = logging.getLogger(__name__) - - -def get_ec2_tags(app_settings: ApplicationSettings) -> dict[str, str]: - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - return { - "io.simcore.autoscaling.version": f"{VERSION}", - "io.simcore.autoscaling.monitored_nodes_labels": json.dumps( - app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS - ), - "io.simcore.autoscaling.monitored_services_labels": json.dumps( - app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS - ), - # NOTE: this one gets special treatment in AWS GUI and is applied to the name of the instance - "Name": f"autoscaling-{app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME}", - } - - -def compose_user_data(docker_join_bash_command: str) -> str: - return dedent( - f"""\ -#!/bin/bash -{docker_join_bash_command} -""" - ) - - -def closest_instance_policy( - ec2_instance: EC2InstanceType, - resources: Resources, -) -> float: - if ec2_instance.cpus < resources.cpus or ec2_instance.ram < resources.ram: - return 0 - # compute a score for all the instances that are above expectations - # best is the exact ec2 instance - cpu_ratio = float(ec2_instance.cpus - resources.cpus) / float(ec2_instance.cpus) - ram_ratio = float(ec2_instance.ram - resources.ram) / float(ec2_instance.ram) - return 100 * (1.0 - cpu_ratio) * (1.0 - ram_ratio) - - -def find_best_fitting_ec2_instance( - allowed_ec2_instances: list[EC2InstanceType], - resources: Resources, - score_type: Callable[[EC2InstanceType, Resources], float] = closest_instance_policy, -) -> EC2InstanceType: - if not allowed_ec2_instances: - raise ConfigurationError(msg="allowed ec2 instances is missing!") - score_to_ec2_candidate: dict[float, EC2InstanceType] = OrderedDict( - sorted( - { - score_type(instance, resources): instance - for instance in allowed_ec2_instances - }.items(), - reverse=True, - ) - ) - - score, instance = next(iter(score_to_ec2_candidate.items())) - if score == 0: - raise Ec2InstanceNotFoundError( - needed_resources=resources, msg="no adequate EC2 instance found!" - ) - return instance diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/rabbitmq.py b/services/autoscaling/src/simcore_service_autoscaling/utils/rabbitmq.py index 5e5a11fef4a..b01c0853bb2 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/utils/rabbitmq.py +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/rabbitmq.py @@ -1,111 +1,184 @@ import asyncio -import itertools import logging +from aws_library.ec2 import Resources +from dask_task_models_library.container_tasks.utils import parse_dask_job_id from fastapi import FastAPI -from models_library.docker import SimcoreServiceDockerLabelKeys -from models_library.generated_models.docker_rest_api import Task +from models_library.docker import StandardSimcoreDockerLabels +from models_library.generated_models.docker_rest_api import Task as DockerTask +from models_library.progress_bar import ProgressReport +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID from models_library.rabbitmq_messages import ( LoggerRabbitMessage, ProgressRabbitMessageNode, ProgressType, RabbitAutoscalingStatusMessage, ) +from models_library.users import UserID from servicelib.logging_utils import log_catch -from ..core.settings import ApplicationSettings -from ..models import Cluster -from ..modules.docker import AutoscalingDocker, get_docker_client +from ..core.settings import ApplicationSettings, get_application_settings +from ..models import Cluster, DaskTask from ..modules.rabbitmq import post_message -from . import utils_docker -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -async def log_tasks_message( - app: FastAPI, tasks: list[Task], message: str, *, level: int = logging.INFO -) -> None: - await asyncio.gather( - *(post_task_log_message(app, task, message, level) for task in tasks), - return_exceptions=True, +def _get_task_ids(task: DockerTask | DaskTask) -> tuple[UserID, ProjectID, NodeID]: + if isinstance(task, DockerTask): + labels = StandardSimcoreDockerLabels.from_docker_task(task) + return labels.user_id, labels.project_id, labels.node_id + _service_key, _service_version, user_id, project_id, node_id = parse_dask_job_id( + task.task_id ) + return user_id, project_id, node_id -async def progress_tasks_message( - app: FastAPI, tasks: list[Task], progress: float +async def post_tasks_log_message( + app: FastAPI, + *, + tasks: list[DockerTask] | list[DaskTask], + message: str, + level: int = logging.INFO, ) -> None: - await asyncio.gather( - *(post_task_progress_message(app, task, progress) for task in tasks), - return_exceptions=True, - ) + if not tasks: + return - -async def post_task_progress_message(app: FastAPI, task: Task, progress: float) -> None: - with log_catch(logger, reraise=False): - simcore_label_keys = SimcoreServiceDockerLabelKeys.from_docker_task(task) - message = ProgressRabbitMessageNode.construct( - node_id=simcore_label_keys.node_id, - user_id=simcore_label_keys.user_id, - project_id=simcore_label_keys.project_id, - progress_type=ProgressType.CLUSTER_UP_SCALING, - progress=progress, + with log_catch(_logger, reraise=False): + await asyncio.gather( + *( + _post_task_log_message( + app, + user_id=user_id, + project_id=project_id, + node_id=node_id, + log=message, + level=level, + ) + for user_id, project_id, node_id in ( + _get_task_ids(task) for task in tasks + ) + ), + return_exceptions=True, ) - await post_message(app, message) - - -async def post_task_log_message(app: FastAPI, task: Task, log: str, level: int) -> None: - with log_catch(logger, reraise=False): - simcore_label_keys = SimcoreServiceDockerLabelKeys.from_docker_task(task) - message = LoggerRabbitMessage.construct( - node_id=simcore_label_keys.node_id, - user_id=simcore_label_keys.user_id, - project_id=simcore_label_keys.project_id, - messages=[f"[cluster] {log}"], - log_level=level, + + +async def post_tasks_progress_message( + app: FastAPI, + *, + tasks: list[DockerTask] | list[DaskTask], + progress: float, + progress_type: ProgressType, +) -> None: + if not tasks: + return + + with log_catch(_logger, reraise=False): + await asyncio.gather( + *( + _post_task_progress_message( + app, + user_id=user_id, + project_id=project_id, + node_id=node_id, + progress=progress, + progress_type=progress_type, + ) + for user_id, project_id, node_id in ( + _get_task_ids(task) for task in tasks + ) + ), + return_exceptions=True, ) - logger.log(level, message) - await post_message(app, message) -async def create_autoscaling_status_message( - docker_client: AutoscalingDocker, +async def _post_task_progress_message( + app: FastAPI, + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + progress: float, + progress_type: ProgressType, +) -> None: + message = ProgressRabbitMessageNode.model_construct( + user_id=user_id, + project_id=project_id, + node_id=node_id, + progress_type=progress_type, + report=ProgressReport(actual_value=progress, total=1), + ) + await post_message(app, message) + + +async def _post_task_log_message( + app: FastAPI, + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + log: str, + level: int, +) -> None: + cluster_log = f"[cluster] {log}" + _logger.log(level, cluster_log) + + message = LoggerRabbitMessage.model_construct( + user_id=user_id, + project_id=project_id, + node_id=node_id, + messages=[cluster_log], + log_level=level, + ) + await post_message(app, message) + + +async def _create_autoscaling_status_message( app_settings: ApplicationSettings, cluster: Cluster, + cluster_total_resources: Resources, + cluster_used_resources: Resources, ) -> RabbitAutoscalingStatusMessage: assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec - assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - monitored_nodes = [ - i.node - for i in itertools.chain( - cluster.active_nodes, cluster.drained_nodes, cluster.reserve_drained_nodes - ) - ] - (total_resources, used_resources) = await asyncio.gather( - *( - utils_docker.compute_cluster_total_resources(monitored_nodes), - utils_docker.compute_cluster_used_resources(docker_client, monitored_nodes), - ) - ) - return RabbitAutoscalingStatusMessage.construct( - origin=f"{app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS}", - nodes_total=len(cluster.active_nodes) + + origin = "unknown" + if app_settings.AUTOSCALING_NODES_MONITORING: + origin = f"dynamic:node_labels={app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS!s}" + elif app_settings.AUTOSCALING_DASK: + origin = f"computational:scheduler_url={app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL!s}" + + total_nodes = ( + len(cluster.active_nodes) + len(cluster.drained_nodes) - + len(cluster.reserve_drained_nodes), + + len(cluster.buffer_drained_nodes) + ) + drained_nodes = len(cluster.drained_nodes) + len(cluster.buffer_drained_nodes) + + return RabbitAutoscalingStatusMessage.model_construct( + origin=origin, + nodes_total=total_nodes, nodes_active=len(cluster.active_nodes), - nodes_drained=len(cluster.drained_nodes) + len(cluster.reserve_drained_nodes), - cluster_total_resources=total_resources.dict(), - cluster_used_resources=used_resources.dict(), + nodes_drained=drained_nodes, + cluster_total_resources=cluster_total_resources.model_dump(), + cluster_used_resources=cluster_used_resources.model_dump(), instances_pending=len(cluster.pending_ec2s), - instances_running=len(cluster.active_nodes) - + len(cluster.drained_nodes) - + len(cluster.reserve_drained_nodes), + instances_running=total_nodes, ) -async def post_autoscaling_status_message(app: FastAPI, cluster: Cluster) -> None: +async def post_autoscaling_status_message( + app: FastAPI, + cluster: Cluster, + cluster_total_resources: Resources, + cluster_used_resources: Resources, +) -> None: await post_message( app, - await create_autoscaling_status_message( - get_docker_client(app), app.state.settings, cluster + await _create_autoscaling_status_message( + get_application_settings(app), + cluster, + cluster_total_resources, + cluster_used_resources, ), ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/redis.py b/services/autoscaling/src/simcore_service_autoscaling/utils/redis.py new file mode 100644 index 00000000000..3d4998db87d --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/redis.py @@ -0,0 +1,30 @@ +from common_library.json_serialization import json_dumps +from fastapi import FastAPI + +from ..core.settings import ApplicationSettings + + +def create_lock_key_and_value(app: FastAPI) -> tuple[str, str]: + app_settings: ApplicationSettings = app.state.settings + lock_key_parts = [app.title, app.version] + lock_value = "" + if app_settings.AUTOSCALING_NODES_MONITORING: + lock_key_parts += [ + "dynamic", + *app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS, + ] + lock_value = json_dumps( + { + "node_labels": app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS + } + ) + elif app_settings.AUTOSCALING_DASK: + lock_key_parts += [ + "computational", + f"{app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL}", + ] + lock_value = json_dumps( + {"scheduler_url": f"{app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL}"} + ) + lock_key = ":".join(f"{k}" for k in lock_key_parts) + return lock_key, lock_value diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/utils_docker.py b/services/autoscaling/src/simcore_service_autoscaling/utils/utils_docker.py index 4a0b2ee3fb8..9c3f187a78f 100644 --- a/services/autoscaling/src/simcore_service_autoscaling/utils/utils_docker.py +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/utils_docker.py @@ -1,33 +1,40 @@ -""" Free helper functions for docker API - -""" +"""Free helper functions for docker API""" import asyncio import collections +import contextlib import datetime import logging import re from contextlib import suppress +from copy import deepcopy from pathlib import Path -from typing import Final, Optional, cast +from typing import Final, cast +import arrow import yaml -from models_library.docker import DockerGenericTag, DockerLabelKey +from aws_library.ec2 import EC2InstanceData, Resources +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + DockerGenericTag, + DockerLabelKey, +) from models_library.generated_models.docker_rest_api import ( + Availability, Node, NodeState, Service, Task, TaskState, ) -from pydantic import ByteSize, parse_obj_as +from pydantic import ByteSize, TypeAdapter, ValidationError from servicelib.docker_utils import to_datetime from servicelib.logging_utils import log_context from servicelib.utils import logged_gather from settings_library.docker_registry import RegistrySettings +from types_aiobotocore_ec2.literals import InstanceTypeType from ..core.settings import ApplicationSettings -from ..models import Resources from ..modules.docker import AutoscalingDocker logger = logging.getLogger(__name__) @@ -49,28 +56,71 @@ _PENDING_DOCKER_TASK_MESSAGE: Final[str] = "pending task scheduling" _INSUFFICIENT_RESOURCES_DOCKER_TASK_ERR: Final[str] = "insufficient resources on" +_NOT_SATISFIED_SCHEDULING_CONSTRAINTS_TASK_ERR: Final[str] = "no suitable node" +_OSPARC_SERVICE_READY_LABEL_KEY: Final[DockerLabelKey] = TypeAdapter( + DockerLabelKey +).validate_python( + "io.simcore.osparc-services-ready", +) +_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: Final[DockerLabelKey] = TypeAdapter( + DockerLabelKey +).validate_python(f"{_OSPARC_SERVICE_READY_LABEL_KEY}-last-changed") +_OSPARC_SERVICE_READY_LABEL_KEYS: Final[list[DockerLabelKey]] = [ + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, +] + + +_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY: Final[DockerLabelKey] = TypeAdapter( + DockerLabelKey +).validate_python("io.simcore.osparc-node-found-empty") + +_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY: Final[DockerLabelKey] = TypeAdapter( + DockerLabelKey +).validate_python("io.simcore.osparc-node-termination-started") + + +def _get_node_creation_date(node: Node) -> datetime.datetime: + assert node.created_at # nosec + return arrow.get(node.created_at).datetime async def get_monitored_nodes( docker_client: AutoscalingDocker, node_labels: list[DockerLabelKey] ) -> list[Node]: - nodes = parse_obj_as( - list[Node], + node_label_filters = [f"{label}=true" for label in node_labels] + [ + f"{label}" for label in _OSPARC_SERVICE_READY_LABEL_KEYS + ] + list_of_nodes = TypeAdapter(list[Node]).validate_python( + await docker_client.nodes.list(filters={"node.label": node_label_filters}) + ) + list_of_nodes.sort(key=_get_node_creation_date) + return list_of_nodes + + +async def get_worker_nodes(docker_client: AutoscalingDocker) -> list[Node]: + list_of_nodes = TypeAdapter(list[Node]).validate_python( await docker_client.nodes.list( - filters={"node.label": [f"{label}=true" for label in node_labels]} - ), + filters={ + "role": ["worker"], + "node.label": [ + f"{label}" for label in _OSPARC_SERVICE_READY_LABEL_KEYS + ], + } + ) ) - return nodes + list_of_nodes.sort(key=_get_node_creation_date) + return list_of_nodes async def remove_nodes( - docker_client: AutoscalingDocker, nodes: list[Node], force: bool = False + docker_client: AutoscalingDocker, *, nodes: list[Node], force: bool = False ) -> list[Node]: """removes docker nodes that are in the down state (unless force is used and they will be forcibly removed)""" def _check_if_node_is_removable(node: Node) -> bool: - if node.Status and node.Status.State: - return node.Status.State in [ + if node.status and node.status.state: + return node.status.state in [ NodeState.down, NodeState.disconnected, NodeState.unknown, @@ -86,46 +136,52 @@ def _check_if_node_is_removable(node: Node) -> bool: n for n in nodes if (force is True) or _check_if_node_is_removable(n) ] for node in nodes_that_need_removal: - assert node.ID # nosec - with log_context(logger, logging.INFO, msg=f"remove {node.ID=}"): - await docker_client.nodes.remove(node_id=node.ID, force=force) + assert node.id # nosec + with log_context(logger, logging.INFO, msg=f"remove {node.id=}"): + await docker_client.nodes.remove(node_id=node.id, force=force) return nodes_that_need_removal def _is_task_waiting_for_resources(task: Task) -> bool: # NOTE: https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/ - if ( - not task.Status - or not task.Status.State - or not task.Status.Message - or not task.Status.Err + with log_context( + logger, level=logging.DEBUG, msg=f"_is_task_waiting_for_resources: {task.id}" ): - return False - return ( - task.Status.State == TaskState.pending - and task.Status.Message == _PENDING_DOCKER_TASK_MESSAGE - and _INSUFFICIENT_RESOURCES_DOCKER_TASK_ERR in task.Status.Err - ) + if ( + not task.status + or not task.status.state + or not task.status.message + or not task.status.err + ): + return False + return ( + task.status.state == TaskState.pending + and task.status.message == _PENDING_DOCKER_TASK_MESSAGE + and ( + _INSUFFICIENT_RESOURCES_DOCKER_TASK_ERR in task.status.err + or _NOT_SATISFIED_SCHEDULING_CONSTRAINTS_TASK_ERR in task.status.err + ) + ) async def _associated_service_has_no_node_placement_contraints( docker_client: AutoscalingDocker, task: Task ) -> bool: - assert task.ServiceID # nosec - service_inspect = parse_obj_as( - Service, await docker_client.services.inspect(task.ServiceID) + assert task.service_id # nosec + service_inspect = TypeAdapter(Service).validate_python( + await docker_client.services.inspect(task.service_id) ) - assert service_inspect.Spec # nosec - assert service_inspect.Spec.TaskTemplate # nosec + assert service_inspect.spec # nosec + assert service_inspect.spec.task_template # nosec if ( - not service_inspect.Spec.TaskTemplate.Placement - or not service_inspect.Spec.TaskTemplate.Placement.Constraints + not service_inspect.spec.task_template.placement + or not service_inspect.spec.task_template.placement.constraints ): return True # parse the placement contraints service_placement_constraints = ( - service_inspect.Spec.TaskTemplate.Placement.Constraints + service_inspect.spec.task_template.placement.constraints ) for constraint in service_placement_constraints: # is of type node.id==alskjladskjs or node.hostname==thiscomputerhostname or node.role==manager, sometimes with spaces... @@ -137,15 +193,13 @@ async def _associated_service_has_no_node_placement_contraints( def _by_created_dt(task: Task) -> datetime.datetime: - # NOTE: SAFE implementation to extract task.CreatedAt as datetime for comparison - if task.CreatedAt: + # NOTE: SAFE implementation to extract task.created_at as datetime for comparison + if task.created_at: with suppress(ValueError): - created_at = to_datetime(task.CreatedAt) - created_at_utc: datetime.datetime = created_at.replace( - tzinfo=datetime.timezone.utc - ) + created_at = to_datetime(task.created_at) + created_at_utc: datetime.datetime = created_at.replace(tzinfo=datetime.UTC) return created_at_utc - return datetime.datetime.now(datetime.timezone.utc) + return datetime.datetime.now(datetime.UTC) async def pending_service_tasks_with_insufficient_resources( @@ -160,19 +214,22 @@ async def pending_service_tasks_with_insufficient_resources( - have an error message with "insufficient resources" - are not scheduled on any node """ - tasks = parse_obj_as( - list[Task], + tasks = TypeAdapter(list[Task]).validate_python( await docker_client.tasks.list( filters={ "desired-state": "running", "label": service_labels, } - ), + ) ) sorted_tasks = sorted(tasks, key=_by_created_dt) + logger.debug( + "found following tasks that might trigger autoscaling: %s", + [task.id for task in tasks], + ) - pending_tasks = [ + return [ task for task in sorted_tasks if ( @@ -183,17 +240,15 @@ async def pending_service_tasks_with_insufficient_resources( ) ] - return pending_tasks - def get_node_total_resources(node: Node) -> Resources: - assert node.Description # nosec - assert node.Description.Resources # nosec - assert node.Description.Resources.NanoCPUs # nosec - assert node.Description.Resources.MemoryBytes # nosec + assert node.description # nosec + assert node.description.resources # nosec + assert node.description.resources.nano_cp_us # nosec + assert node.description.resources.memory_bytes # nosec return Resources( - cpus=node.Description.Resources.NanoCPUs / _NANO_CPU, - ram=ByteSize(node.Description.Resources.MemoryBytes), + cpus=node.description.resources.nano_cp_us / _NANO_CPU, + ram=ByteSize(node.description.resources.memory_bytes), ) @@ -203,52 +258,85 @@ async def compute_cluster_total_resources(nodes: list[Node]) -> Resources: """ cluster_resources_counter = collections.Counter({"ram": 0, "cpus": 0}) for node in nodes: - assert node.Description # nosec - assert node.Description.Resources # nosec - assert node.Description.Resources.NanoCPUs # nosec + assert node.description # nosec + assert node.description.resources # nosec + assert node.description.resources.nano_cp_us # nosec cluster_resources_counter.update( { - "ram": node.Description.Resources.MemoryBytes, - "cpus": node.Description.Resources.NanoCPUs / _NANO_CPU, + "ram": node.description.resources.memory_bytes, + "cpus": node.description.resources.nano_cp_us / _NANO_CPU, } ) - return Resources.parse_obj(dict(cluster_resources_counter)) + return Resources.model_validate(dict(cluster_resources_counter)) def get_max_resources_from_docker_task(task: Task) -> Resources: """returns the highest values for resources based on both docker reservations and limits""" - assert task.Spec # nosec - if task.Spec.Resources: + assert task.spec # nosec + if task.spec.resources: return Resources( cpus=max( ( - task.Spec.Resources.Reservations - and task.Spec.Resources.Reservations.NanoCPUs + task.spec.resources.reservations + and task.spec.resources.reservations.nano_cp_us or 0 ), ( - task.Spec.Resources.Limits - and task.Spec.Resources.Limits.NanoCPUs + task.spec.resources.limits + and task.spec.resources.limits.nano_cp_us or 0 ), ) / _NANO_CPU, - ram=parse_obj_as( - ByteSize, + ram=TypeAdapter(ByteSize).validate_python( max( - task.Spec.Resources.Reservations - and task.Spec.Resources.Reservations.MemoryBytes + task.spec.resources.reservations + and task.spec.resources.reservations.memory_bytes or 0, - task.Spec.Resources.Limits - and task.Spec.Resources.Limits.MemoryBytes + task.spec.resources.limits + and task.spec.resources.limits.memory_bytes or 0, - ), + ) ), ) return Resources(cpus=0, ram=ByteSize(0)) +async def get_task_instance_restriction( + docker_client: AutoscalingDocker, task: Task +) -> InstanceTypeType | None: + with contextlib.suppress(ValidationError): + assert task.service_id # nosec + service_inspect = TypeAdapter(Service).validate_python( + await docker_client.services.inspect(task.service_id) + ) + assert service_inspect.spec # nosec + assert service_inspect.spec.task_template # nosec + + if ( + not service_inspect.spec.task_template.placement + or not service_inspect.spec.task_template.placement.constraints + ): + return None + # parse the placement contraints + service_placement_constraints = ( + service_inspect.spec.task_template.placement.constraints + ) + # should be node.labels.{} + node_label_to_find = ( + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}==" + ) + for constraint in service_placement_constraints: + if constraint.startswith(node_label_to_find): + return TypeAdapter(InstanceTypeType).validate_python( + constraint.removeprefix(node_label_to_find) + ) + + return None + return None + + def compute_tasks_needed_resources(tasks: list[Task]) -> Resources: total = Resources.create_as_empty() for t in tasks: @@ -259,32 +347,32 @@ def compute_tasks_needed_resources(tasks: list[Task]) -> Resources: async def compute_node_used_resources( docker_client: AutoscalingDocker, node: Node, - service_labels: Optional[list[DockerLabelKey]] = None, + service_labels: list[DockerLabelKey] | None = None, ) -> Resources: cluster_resources_counter = collections.Counter({"ram": 0, "cpus": 0}) - task_filters = {"node": node.ID} - if service_labels: + assert node.id # nosec + task_filters: dict[str, str | list[DockerLabelKey]] = {"node": node.id} + if service_labels is not None: task_filters |= {"label": service_labels} - all_tasks_on_node = parse_obj_as( - list[Task], - await docker_client.tasks.list(filters=task_filters), + all_tasks_on_node = TypeAdapter(list[Task]).validate_python( + await docker_client.tasks.list(filters=task_filters) ) for task in all_tasks_on_node: - assert task.Status # nosec + assert task.status # nosec if ( - task.Status.State in _TASK_STATUS_WITH_ASSIGNED_RESOURCES - and task.Spec - and task.Spec.Resources - and task.Spec.Resources.Reservations + task.status.state in _TASK_STATUS_WITH_ASSIGNED_RESOURCES + and task.spec + and task.spec.resources + and task.spec.resources.reservations ): - task_reservations = task.Spec.Resources.Reservations.dict(exclude_none=True) cluster_resources_counter.update( { - "ram": task_reservations.get("MemoryBytes", 0), - "cpus": task_reservations.get("NanoCPUs", 0) / _NANO_CPU, + "ram": task.spec.resources.reservations.memory_bytes or 0, + "cpus": (task.spec.resources.reservations.nano_cp_us or 0) + / _NANO_CPU, } ) - return Resources.parse_obj(dict(cluster_resources_counter)) + return Resources.model_validate(dict(cluster_resources_counter)) async def compute_cluster_used_resources( @@ -294,11 +382,11 @@ async def compute_cluster_used_resources( list_of_used_resources = await logged_gather( *(compute_node_used_resources(docker_client, node) for node in nodes) ) - counter = collections.Counter({k: 0 for k in Resources.__fields__.keys()}) + counter = collections.Counter({k: 0 for k in list(Resources.model_fields)}) for result in list_of_used_resources: - counter.update(result.dict()) + counter.update(result.model_dump()) - return Resources.parse_obj(dict(counter)) + return Resources.model_validate(dict(counter)) _COMMAND_TIMEOUT_S = 10 @@ -306,7 +394,7 @@ async def compute_cluster_used_resources( _DOCKER_SWARM_JOIN_PATTERN = re.compile(_DOCKER_SWARM_JOIN_RE) -async def get_docker_swarm_join_bash_command() -> str: +async def get_docker_swarm_join_bash_command(*, join_as_drained: bool) -> str: """this assumes we are on a manager node""" command = ["docker", "swarm", "join-token", "worker"] process = await asyncio.create_subprocess_exec( @@ -318,16 +406,14 @@ async def get_docker_swarm_join_bash_command() -> str: await asyncio.wait_for(process.wait(), timeout=_COMMAND_TIMEOUT_S) assert process.returncode is not None # nosec if process.returncode > 0: - raise RuntimeError( - f"unexpected error running '{' '.join(command)}': {stderr.decode()}" - ) + msg = f"unexpected error running '{' '.join(command)}': {stderr.decode()}" + raise RuntimeError(msg) decoded_stdout = stdout.decode() if match := re.search(_DOCKER_SWARM_JOIN_PATTERN, decoded_stdout): capture = match.groupdict() - return f"{capture['command']} --availability=drain {capture['token']} {capture['address']}" - raise RuntimeError( - f"expected docker '{_DOCKER_SWARM_JOIN_RE}' command not found: received {decoded_stdout}!" - ) + return f"{capture['command']} --availability={'drain' if join_as_drained else 'active'} {capture['token']} {capture['address']}" + msg = f"expected docker '{_DOCKER_SWARM_JOIN_RE}' command not found: received {decoded_stdout}!" + raise RuntimeError(msg) def get_docker_login_on_start_bash_command(registry_settings: RegistrySettings) -> str: @@ -352,27 +438,29 @@ def get_docker_login_on_start_bash_command(registry_settings: RegistrySettings) _CRONJOB_LOGS_PATH: Final[Path] = Path("/var/log/docker-pull-cronjob.log") -def get_docker_pull_images_on_start_bash_command( +def write_compose_file_command( docker_tags: list[DockerGenericTag], ) -> str: - if not docker_tags: - return "" - compose = { - "version": '"3.8"', "services": { - f"pre-pull-image-{n}": {"image": image_tag} + f"{image_tag.split('/')[-1].replace(':', '-')}": {"image": image_tag} for n, image_tag in enumerate(docker_tags) }, } compose_yaml = yaml.safe_dump(compose) - write_compose_file_cmd = " ".join( - ["echo", f'"{compose_yaml}"', ">", f"{_PRE_PULL_COMPOSE_PATH}"] - ) + return " ".join(["echo", f'"{compose_yaml}"', ">", f"{_PRE_PULL_COMPOSE_PATH}"]) + + +def get_docker_pull_images_on_start_bash_command( + docker_tags: list[DockerGenericTag], +) -> str: + if not docker_tags: + return "" + write_docker_compose_pull_script_cmd = " ".join( [ "echo", - f'"#!/bin/sh\necho Pulling started at \\$(date)\n{_DOCKER_COMPOSE_CMD} --file={_PRE_PULL_COMPOSE_PATH} pull"', + f'"#!/bin/sh\necho Pulling started at \\$(date)\n{_DOCKER_COMPOSE_CMD} --project-name=autoscaleprepull --file={_PRE_PULL_COMPOSE_PATH} pull --ignore-pull-failures"', ">", f"{_DOCKER_COMPOSE_PULL_SCRIPT_PATH}", ] @@ -383,7 +471,7 @@ def get_docker_pull_images_on_start_bash_command( docker_compose_pull_cmd = " ".join([f".{_DOCKER_COMPOSE_PULL_SCRIPT_PATH}"]) return " && ".join( [ - write_compose_file_cmd, + write_compose_file_command(docker_tags), write_docker_compose_pull_script_cmd, make_docker_compose_script_executable, docker_compose_pull_cmd, @@ -410,11 +498,18 @@ def get_docker_pull_images_crontab(interval: datetime.timedelta) -> str: async def find_node_with_name( docker_client: AutoscalingDocker, name: str -) -> Optional[Node]: +) -> Node | None: list_of_nodes = await docker_client.nodes.list(filters={"name": name}) if not list_of_nodes: return None - return parse_obj_as(Node, list_of_nodes[0]) + # note that there might be several nodes with a common_prefixed name. so now we want exact matching + parsed_list_of_nodes = TypeAdapter(list[Node]).validate_python(list_of_nodes) + for node in parsed_list_of_nodes: + assert node.description # nosec + if node.description.hostname == name: + return node + + return None async def tag_node( @@ -424,44 +519,186 @@ async def tag_node( tags: dict[DockerLabelKey, str], available: bool, ) -> Node: + assert node.spec # nosec + if (node.spec.labels == tags) and ( + (node.spec.availability is Availability.active) == available + ): + # nothing to do + return node with log_context( - logger, logging.DEBUG, msg=f"tagging {node.ID=} with {tags=} and {available=}" + logger, logging.DEBUG, msg=f"tag {node.id=} with {tags=} and {available=}" ): - assert node.ID # nosec - assert node.Version # nosec - assert node.Version.Index # nosec - assert node.Spec # nosec - assert node.Spec.Role # nosec + assert node.id # nosec + + latest_version_node = TypeAdapter(Node).validate_python( + await docker_client.nodes.inspect(node_id=node.id) + ) + assert latest_version_node.version # nosec + assert latest_version_node.version.index # nosec + assert latest_version_node.spec # nosec + assert latest_version_node.spec.role # nosec + + # updating now should work nicely await docker_client.nodes.update( - node_id=node.ID, - version=node.Version.Index, + node_id=node.id, + version=latest_version_node.version.index, spec={ "Availability": "active" if available else "drain", "Labels": tags, - "Role": node.Spec.Role.value, + "Role": latest_version_node.spec.role.value, }, ) - return parse_obj_as(Node, await docker_client.nodes.inspect(node_id=node.ID)) + return TypeAdapter(Node).validate_python( + await docker_client.nodes.inspect(node_id=node.id) + ) async def set_node_availability( docker_client: AutoscalingDocker, node: Node, *, available: bool ) -> Node: - assert node.Spec # nosec + assert node.spec # nosec return await tag_node( docker_client, node, - tags=cast(dict[DockerLabelKey, str], node.Spec.Labels), + tags=cast(dict[DockerLabelKey, str], node.spec.labels), available=available, ) -def get_docker_tags(app_settings: ApplicationSettings) -> dict[DockerLabelKey, str]: +def get_new_node_docker_tags( + app_settings: ApplicationSettings, ec2_instance: EC2InstanceData +) -> dict[DockerLabelKey, str]: assert app_settings.AUTOSCALING_NODES_MONITORING # nosec - return { - tag_key: "true" - for tag_key in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS - } | { - tag_key: "true" - for tag_key in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS - } + return ( + { + tag_key: "true" + for tag_key in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS + } + | { + tag_key: "true" + for tag_key in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS + } + | {DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY: ec2_instance.type} + ) + + +def is_node_ready_and_available(node: Node, *, availability: Availability) -> bool: + assert node.status # nosec + assert node.spec # nosec + return bool( + node.status.state == NodeState.ready and node.spec.availability == availability + ) + + +def is_node_osparc_ready(node: Node) -> bool: + if not is_node_ready_and_available(node, availability=Availability.active): + return False + assert node.spec # nosec + return bool( + node.spec.labels + and _OSPARC_SERVICE_READY_LABEL_KEY in node.spec.labels + and node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] == "true" + ) + + +async def set_node_osparc_ready( + app_settings: ApplicationSettings, + docker_client: AutoscalingDocker, + node: Node, + *, + ready: bool, +) -> Node: + assert node.spec # nosec + new_tags = deepcopy(cast(dict[DockerLabelKey, str], node.spec.labels)) + new_tags[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" if ready else "false" + new_tags[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = arrow.utcnow().isoformat() + # NOTE: docker drain sometimes impeed on performance when undraining see https://github.com/ITISFoundation/osparc-simcore/issues/5339 + available = app_settings.AUTOSCALING_DRAIN_NODES_WITH_LABELS or ready + return await tag_node( + docker_client, + node, + tags=new_tags, + available=available, + ) + + +def get_node_last_readyness_update(node: Node) -> datetime.datetime: + assert node.spec # nosec + assert node.spec.labels # nosec + return arrow.get( + node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] + ).datetime + + +async def set_node_found_empty( + docker_client: AutoscalingDocker, + node: Node, + *, + empty: bool, +) -> Node: + assert node.spec # nosec + new_tags = deepcopy(cast(dict[DockerLabelKey, str], node.spec.labels)) + if empty: + new_tags[_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY] = arrow.utcnow().isoformat() + else: + new_tags.pop(_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY, None) + return await tag_node( + docker_client, + node, + tags=new_tags, + available=bool(node.spec.availability is Availability.active), + ) + + +async def get_node_empty_since(node: Node) -> datetime.datetime | None: + """returns the last time when the node was found empty or None if it was not empty""" + assert node.spec # nosec + assert node.spec.labels # nosec + if _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY not in node.spec.labels: + return None + return arrow.get(node.spec.labels[_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY]).datetime + + +async def set_node_begin_termination_process( + docker_client: AutoscalingDocker, node: Node +) -> Node: + """sets the node to drain and adds a docker label with the time""" + assert node.spec # nosec + new_tags = deepcopy(cast(dict[DockerLabelKey, str], node.spec.labels)) + new_tags[_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY] = arrow.utcnow().isoformat() + + return await tag_node( + docker_client, + node, + tags=new_tags, + available=False, + ) + + +def get_node_termination_started_since(node: Node) -> datetime.datetime | None: + assert node.spec # nosec + assert node.spec.labels # nosec + if _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY not in node.spec.labels: + return None + return arrow.get( + node.spec.labels[_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY] + ).datetime + + +async def attach_node( + app_settings: ApplicationSettings, + docker_client: AutoscalingDocker, + node: Node, + *, + tags: dict[DockerLabelKey, str], +) -> Node: + assert node.spec # nosec + current_tags = cast(dict[DockerLabelKey, str], node.spec.labels or {}) + new_tags = current_tags | tags | {_OSPARC_SERVICE_READY_LABEL_KEY: "false"} + new_tags[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = arrow.utcnow().isoformat() + return await tag_node( + docker_client, + node, + tags=new_tags, + available=app_settings.AUTOSCALING_DRAIN_NODES_WITH_LABELS, # NOTE: full drain sometimes impede on performance + ) diff --git a/services/autoscaling/src/simcore_service_autoscaling/utils/utils_ec2.py b/services/autoscaling/src/simcore_service_autoscaling/utils/utils_ec2.py new file mode 100644 index 00000000000..b3b76a48717 --- /dev/null +++ b/services/autoscaling/src/simcore_service_autoscaling/utils/utils_ec2.py @@ -0,0 +1,107 @@ +"""Free helper functions for AWS API""" + +import logging +from collections import OrderedDict +from collections.abc import Callable +from textwrap import dedent + +from aws_library.ec2 import AWSTagKey, AWSTagValue, EC2InstanceType, EC2Tags, Resources +from common_library.json_serialization import json_dumps + +from .._meta import VERSION +from ..core.errors import ConfigurationError, TaskBestFittingInstanceNotFoundError +from ..core.settings import ApplicationSettings + +logger = logging.getLogger(__name__) + + +def get_ec2_tags_dynamic(app_settings: ApplicationSettings) -> EC2Tags: + assert app_settings.AUTOSCALING_NODES_MONITORING # nosec + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + return { + AWSTagKey("io.simcore.autoscaling.version"): AWSTagValue(f"{VERSION}"), + AWSTagKey("io.simcore.autoscaling.monitored_nodes_labels"): AWSTagValue( + json_dumps( + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS + ) + ), + AWSTagKey("io.simcore.autoscaling.monitored_services_labels"): AWSTagValue( + json_dumps( + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_SERVICE_LABELS + ) + ), + # NOTE: this one gets special treatment in AWS GUI and is applied to the name of the instance + AWSTagKey("Name"): AWSTagValue( + f"{app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_NAME_PREFIX}-{app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME}" + ), + } | app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_CUSTOM_TAGS + + +def get_ec2_tags_computational(app_settings: ApplicationSettings) -> EC2Tags: + assert app_settings.AUTOSCALING_DASK # nosec + assert app_settings.AUTOSCALING_EC2_INSTANCES # nosec + return { + AWSTagKey("io.simcore.autoscaling.version"): AWSTagValue(f"{VERSION}"), + AWSTagKey("io.simcore.autoscaling.dask-scheduler_url"): AWSTagValue( + f"{app_settings.AUTOSCALING_DASK.DASK_MONITORING_URL}" + ), + # NOTE: this one gets special treatment in AWS GUI and is applied to the name of the instance + AWSTagKey("Name"): AWSTagValue( + f"{app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_NAME_PREFIX}-{app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME}" + ), + } | app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_CUSTOM_TAGS + + +def compose_user_data(docker_join_bash_command: str) -> str: + return dedent( + f"""\ +#!/bin/bash +{docker_join_bash_command} +""" + ) + + +def closest_instance_policy( + ec2_instance: EC2InstanceType, + resources: Resources, +) -> float: + if ( + ec2_instance.resources.cpus < resources.cpus + or ec2_instance.resources.ram < resources.ram + ): + return 0 + # compute a score for all the instances that are above expectations + # best is the exact ec2 instance + assert ec2_instance.resources.cpus > 0 # nosec + assert ec2_instance.resources.ram > 0 # nosec + cpu_ratio = float(ec2_instance.resources.cpus - resources.cpus) / float( + ec2_instance.resources.cpus + ) + ram_ratio = float(ec2_instance.resources.ram - resources.ram) / float( + ec2_instance.resources.ram + ) + return 100 * (1.0 - cpu_ratio) * (1.0 - ram_ratio) + + +def find_best_fitting_ec2_instance( + allowed_ec2_instances: list[EC2InstanceType], + resources: Resources, + score_type: Callable[[EC2InstanceType, Resources], float] = closest_instance_policy, +) -> EC2InstanceType: + if not allowed_ec2_instances: + raise ConfigurationError(msg="allowed ec2 instances is missing!") + score_to_ec2_candidate: dict[float, EC2InstanceType] = OrderedDict( + sorted( + { + score_type(instance, resources): instance + for instance in allowed_ec2_instances + }.items(), + reverse=True, + ) + ) + + score, instance = next(iter(score_to_ec2_candidate.items())) + if score == 0: + raise TaskBestFittingInstanceNotFoundError(needed_resources=resources) + + return instance diff --git a/services/autoscaling/tests/manual/.env-devel b/services/autoscaling/tests/manual/.env-devel new file mode 100644 index 00000000000..e654a4df523 --- /dev/null +++ b/services/autoscaling/tests/manual/.env-devel @@ -0,0 +1,55 @@ +AUTOSCALING_DEBUG=true +AUTOSCALING_DRAIN_NODES_WITH_LABELS=False +AUTOSCALING_DOCKER_JOIN_DRAINED=True +AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION=False +AUTOSCALING_LOGLEVEL=INFO +AUTOSCALING_POLL_INTERVAL="00:00:10" +AUTOSCALING_EC2_ACCESS_KEY_ID=XXXXXXXXXX +AUTOSCALING_EC2_SECRET_ACCESS_KEY=XXXXXXXXXX +AUTOSCALING_EC2_ENDPOINT=null +AUTOSCALING_EC2_REGION_NAME=us-east-1 +AUTOSCALING_REMOTE_DEBUGGING_PORT=3000 +AUTOSCALING_SSM_ACCESS={} +AUTOSCALING_EC2_INSTANCES={} + +EC2_INSTANCES_MACHINES_BUFFER=0 +EC2_INSTANCES_MAX_INSTANCES=20 +EC2_INSTANCES_TIME_BEFORE_DRAINING="00:00:10" +EC2_INSTANCES_TIME_BEFORE_TERMINATION="00:03:00" +EC2_INSTANCES_ALLOWED_TYPES={"t2.micro": {"ami_id": "XXXXXXXX", "custom_boot_scripts": ["whoami"], "pre_pull_images": ["ubuntu:latest"]}} +EC2_INSTANCES_ATTACHED_IAM_PROFILE=XXXXXXXXX +EC2_INSTANCES_KEY_NAME=XXXXXXXXXX +EC2_INSTANCES_NAME_PREFIX=testing-osparc-computational-cluster +EC2_INSTANCES_SECURITY_GROUP_IDS=["XXXXXXXXXX"] +EC2_INSTANCES_SUBNET_ID=XXXXXXXXXX +EC2_INSTANCES_CUSTOM_TAGS={"special": "testing"} +EC2_INSTANCES_TIME_BEFORE_DRAINING=00:00:20 +EC2_INSTANCES_TIME_BEFORE_TERMINATION=00:01:00 +LOG_FORMAT_LOCAL_DEV_ENABLED=True +# define the following to activate dynamic autoscaling +# AUTOSCALING_NODES_MONITORING={} +# NODES_MONITORING_NEW_NODES_LABELS=["testing.autoscaled-node"] +# NODES_MONITORING_NODE_LABELS=["testing.monitored-node"] +# NODES_MONITORING_SERVICE_LABELS=["testing.monitored-service"] + +# may be activated or not +# RABBIT_HOST=rabbit +# RABBIT_PASSWORD=test +# RABBIT_PORT=5672 +# RABBIT_SECURE=false +# RABBIT_USER=test +REDIS_HOST=redis +REDIS_PORT=6379 +REGISTRY_AUTH=True +REGISTRY_AWS_REGION=XXXXXXXX +REGISTRY_PATH=XXXXXXXX +REGISTRY_SSL=True +REGISTRY_URL=XXXXXXXX +REGISTRY_USER=XXXXXXXX +REGISTRY_PW=XXXXXXXX +SC_BOOT_MODE=debug +SC_BUILD_TARGET=development +SSM_ACCESS_KEY_ID=XXXXXXXXX +SSM_ENDPOINT=XXXXXXXXX +SSM_REGION_NAME=XXXXXXXXX +SSM_SECRET_ACCESS_KEY=XXXXXXXXX diff --git a/services/autoscaling/tests/manual/Makefile b/services/autoscaling/tests/manual/Makefile new file mode 100644 index 00000000000..f6d8b97e4bb --- /dev/null +++ b/services/autoscaling/tests/manual/Makefile @@ -0,0 +1,43 @@ +include ../../../../scripts/common.Makefile +include ../../../../scripts/common-service.Makefile + + +.PHONY: up-devel up-computational-devel down + + +.stack-devel.yml: .env + ../../../../scripts/docker/docker-stack-config.bash -e .env \ + docker-compose.yml \ + > $@ + +.stack-computational-devel.yml: .env + ../../../../scripts/docker/docker-stack-config.bash -e .env \ + docker-compose.yml \ + docker-compose-computational.yml \ + > $@ + +up-devel: .init-swarm .stack-devel.yml ## starts local test application + @docker stack deploy --with-registry-auth --compose-file=.stack-devel.yml autoscaling + # to follow logs of autoscaling, run + # docker service logs --follow autoscaling_autoscaling + +up-computational-devel: .init-swarm .stack-computational-devel.yml ## starts local test application in computational mode + # DASK_MONITORING_URL set to $(DASK_MONITORING_URL) + @docker stack deploy --with-registry-auth --compose-file=.stack-computational-devel.yml comp-autoscaling + # to follow logs of autoscaling, run + # docker service logs --follow comp-autoscaling_autoscaling + +down: .env ## stops local test app dependencies (running bare metal against AWS) + # remove stacks + -@docker stack rm comp-autoscaling + -@docker stack rm autoscaling + # remove stack files + -rm -rf .stack-devel.yml + -rm -rf .stack-computational-devel.yml + + +SWARM_HOSTS = $(shell docker node ls --format="{{.Hostname}}" 2>$(if $(IS_WIN),NUL,/dev/null)) +.PHONY: .init-swarm +.init-swarm: + # Ensures swarm is initialized (careful we use a default pool of 172.20.0.0/14. Ensure you do not use private IPs in that range!) + $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip) --default-addr-pool 172.20.0.0/14) diff --git a/services/autoscaling/tests/manual/README.md b/services/autoscaling/tests/manual/README.md new file mode 100644 index 00000000000..d1819c72eef --- /dev/null +++ b/services/autoscaling/tests/manual/README.md @@ -0,0 +1,125 @@ +# autoscaling service manual testing + +The autoscaling service may be started either in computational mode or in dynamic mode. + +The computational mode is used in conjunction with a dask-scheduler/dask-worker subsystem. +The dynamic mode is used directly with docker swarm facilities. + +### requirements + +1. AWS EC2 access +2. a machine running in EC2 with docker installed and access to osparc-simcore repository (for example t2.xlarge to have some computational power) +3. Note that VScode remote can be used to directly code on the EC2 instance. + + +## computational mode + +When ```DASK_MONITORING_URL``` and ```DASK_SCHEDULER_AUTH``` is set the computational mode is enabled. + + +### instructions + +1. prepare autoscaling + +```bash +# run on EC2 instance +git clone https://github.com/ITISFoundation/osparc-simcore.git +cd osparc-simcore/services/autoscaling +make build-devel # this will build the autoscaling devel image +``` + +2. setup environment variables +```bash +# run on EC2 instance +cd osparc-simcore/services/autoscaling/tests/manual +make .env # generate an initial .env file +nano .env # edit .env and set the variables as needed +``` + +3. start autoscaling/dask-scheduler stack +```bash +# run on EC2 instance +cd osparc-simcore/services/autoscaling/tests/manual +make up-computational-devel # this will deploy the autoscaling/dask-scheduler/worker stack +``` + +4. start some dask tasks to trigger autoscaling +```bash +# run on any host +cd osparc-simcore/services/autoscaling +make install-dev +pip install ipython +ipython +``` +```python +import distributed +# connect to the dask-scheduler running on the EC2 machine +client = distributed.Client("tcp://{EC2_INSTANCE_PUBLIC_IP}:8786") + +# some dummy test function to run remotely +def test_fct(x,y): + return x+y + +# send the task over to the dask-scheduler +future = client.submit(test_fct, 3, 54, resources={"CPU": 1}, pure=False) + +# this will trigger the autoscaling to create a new machine (ensure the EC2_INSTANCES_ALLOWED_TYPES variable allows for machines capable of running the job with the wanted resources) +# after about 3 minutes the job will be run +future.done() # shall return True once done + +# remove the future from the dask-scheduler memory, shall trigger the autoscaling service to remove the created machine +del future +``` + + +## dynamic mode + +When ```NODES_MONITORING_NEW_NODES_LABELS```, ```NODES_MONITORING_NODE_LABELS``` and ```NODES_MONITORING_SERVICE_LABELS``` are set the dynamic mode is enabled. + +### instructions + +1. prepare autoscaling + +```bash +# run on EC2 instance +git clone https://github.com/ITISFoundation/osparc-simcore.git +cd osparc-simcore/services/autoscaling +make build-devel # this will build the autoscaling devel image +``` + +2. setup environment variables +```bash +# run on EC2 instance +cd osparc-simcore/services/autoscaling/tests/manual +make .env # generate an initial .env file +nano .env # edit .env and set the variables as needed +# in particular NODES_MONITORING_NEW_NODES_LABELS, NODES_MONITORING_NODE_LABELS, NODES_MONITORING_SERVICE_LABELS must be activated +``` + +3. start autoscaling stack +```bash +# run on EC2 instance +cd osparc-simcore/services/autoscaling/tests/manual +make up-devel # this will deploy the autoscaling stack +``` + +4. start some docker services to trigger autoscaling +```bash +# run on EC2 instance +docker service create \ +--name=test-service \ +--reserve-cpu=1 \ +--reserve-memory=512MiB \ +--constraint=node.labels.testing.monitored-node==true \ +--constraint=node.labels.io.simcore.osparc-services-ready==true \ +--label=testing.monitored-service=true \ +--container-label=io.simcore.runtime.user-id=99 \ +--container-label=io.simcore.runtime.project-id='5054a589-3ba4-46c3-829d-2e3d1a6a043f' \ +--container-label=io.simcore.runtime.node-id='a054a589-3ba4-46c3-829d-2e3d1a6a043a' \ +--container-label=io.simcore.runtime.product-name=theproduct \ +--container-label=io.simcore.runtime.simcore-user-agent=theagent \ +--container-label=io.simcore.runtime.swarm-stack-name=thestack \ +--container-label=io.simcore.runtime.memory-limit=1GB \ +--container-label=io.simcore.runtime.cpu-limit=1 \ +redis # will create a redis service reserving 1 CPUs and 512MiB of RAM +``` diff --git a/services/autoscaling/tests/manual/dask-manual-tester.ipynb b/services/autoscaling/tests/manual/dask-manual-tester.ipynb new file mode 100644 index 00000000000..362b6326b89 --- /dev/null +++ b/services/autoscaling/tests/manual/dask-manual-tester.ipynb @@ -0,0 +1,85 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import distributed" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "client = distributed.Client(\"tcp://XXXXXXXXXXX:8786\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def square(x):\n", + " import time\n", + " time.sleep(15)\n", + " return x ** 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A = client.map(square, range(500), resources={\"CPU\": 1}, pure=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "del A\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def fct(dask_scheduler: distributed.Scheduler):\n", + " return f\"{dask_scheduler.workers}\"\n", + " return f\"{dask_scheduler.workers_to_close()}\"\n", + "print(client.run_on_scheduler(fct))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/services/autoscaling/tests/manual/docker-compose-computational.yml b/services/autoscaling/tests/manual/docker-compose-computational.yml new file mode 100644 index 00000000000..d97387ca95b --- /dev/null +++ b/services/autoscaling/tests/manual/docker-compose-computational.yml @@ -0,0 +1,49 @@ +services: + autoscaling: + environment: + - AUTOSCALING_DASK={} + - DASK_MONITORING_URL=tcp://dask-scheduler:8786 + - DASK_SCHEDULER_AUTH={} + dask-sidecar: + image: itisfoundation/dask-sidecar:master-github-latest + init: true + hostname: "{{.Node.Hostname}}-{{.Service.Name}}" + volumes: + - computational_shared_data:${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} + - /var/run/docker.sock:/var/run/docker.sock:ro + - ${ETC_HOSTNAME:-/etc/hostname}:/home/scu/hostname:ro + + environment: + DASK_LOG_FORMAT_LOCAL_DEV_ENABLED: 1 + DASK_NPROCS: 1 + DASK_SCHEDULER_URL: ${DASK_SCHEDULER_URL:-tcp://dask-scheduler:8786} + DASK_SIDECAR_NON_USABLE_RAM: 0 + DASK_SIDECAR_NUM_NON_USABLE_CPUS: 0 + LOG_LEVEL: ${LOG_LEVEL:-INFO} + SIDECAR_COMP_SERVICES_SHARED_FOLDER: ${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} + SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME: computational_shared_data + + deploy: + mode: global + placement: + constraints: + - "node.role==worker" + + dask-scheduler: + image: itisfoundation/dask-sidecar:master-github-latest + init: true + hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + environment: + DASK_START_AS_SCHEDULER: 1 + LOG_LEVEL: ${LOG_LEVEL:-INFO} + ports: + - 8786:8786 + - 8787:8787 + deploy: + placement: + constraints: + - "node.role==manager" + +volumes: + computational_shared_data: + name: computational_shared_data diff --git a/services/autoscaling/tests/manual/docker-compose.yml b/services/autoscaling/tests/manual/docker-compose.yml new file mode 100644 index 00000000000..a28712fb0af --- /dev/null +++ b/services/autoscaling/tests/manual/docker-compose.yml @@ -0,0 +1,53 @@ +services: + rabbit: + image: itisfoundation/rabbitmq:3.13.7-management + init: true + hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + ports: + # - "5672:5672" + - "15672:15672" + - "15692" + environment: + - RABBITMQ_DEFAULT_USER=${RABBIT_USER} + - RABBITMQ_DEFAULT_PASS=${RABBIT_PASSWORD} + healthcheck: + # see https://www.rabbitmq.com/monitoring.html#individual-checks for info about health-checks available in rabbitmq + test: rabbitmq-diagnostics -q status + interval: 5s + timeout: 30s + retries: 5 + start_period: 5s + + redis: + image: "redis:6.2.6@sha256:4bed291aa5efb9f0d77b76ff7d4ab71eee410962965d052552db1fb80576431d" + init: true + hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + # ports: + # - "6379:6379" + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 30s + retries: 50 + volumes: + - redis-data:/data + + autoscaling: + image: local/autoscaling:development + init: true + hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + ports: + - "8006:8000" + - "3012:3000" + env_file: + - .env + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - ../../:/devel/services/autoscaling + - ../../../../packages:/devel/packages + deploy: + placement: + constraints: + - "node.role==manager" +volumes: + redis-data: diff --git a/services/autoscaling/tests/unit/conftest.py b/services/autoscaling/tests/unit/conftest.py index 217ad60afd9..a49ec4e46b2 100644 --- a/services/autoscaling/tests/unit/conftest.py +++ b/services/autoscaling/tests/unit/conftest.py @@ -3,71 +3,114 @@ # pylint:disable=redefined-outer-name import asyncio +import dataclasses +import datetime import json +import logging import random -from datetime import timezone +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator +from copy import deepcopy from pathlib import Path -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Final, - Iterator, - Optional, - Union, - cast, -) +from typing import Any, Final, TypeAlias, cast, get_args +from unittest import mock import aiodocker +import arrow +import distributed import httpx import psutil import pytest -import requests import simcore_service_autoscaling -from aiohttp.test_utils import unused_port from asgi_lifespan import LifespanManager +from aws_library.ec2 import ( + EC2InstanceBootSpecific, + EC2InstanceData, + EC2InstanceType, + Resources, +) +from common_library.json_serialization import json_dumps from deepdiff import DeepDiff from faker import Faker from fakeredis.aioredis import FakeRedis from fastapi import FastAPI -from models_library.docker import DockerLabelKey, SimcoreServiceDockerLabelKeys +from models_library.docker import ( + DockerGenericTag, + DockerLabelKey, + StandardSimcoreDockerLabels, +) from models_library.generated_models.docker_rest_api import ( Availability, - Node, +) +from models_library.generated_models.docker_rest_api import Node as DockerNode +from models_library.generated_models.docker_rest_api import ( NodeDescription, NodeSpec, + NodeState, + NodeStatus, ObjectVersion, ResourceObject, Service, + TaskSpec, ) -from moto.server import ThreadedMotoServer -from pydantic import ByteSize, PositiveInt, parse_obj_as -from pytest import MonkeyPatch +from pydantic import ByteSize, NonNegativeInt, PositiveInt, TypeAdapter +from pytest_mock import MockType from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_docker import get_localhost_ip -from pytest_simcore.helpers.utils_envs import EnvVarsDict, setenvs_from_dict +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.monkeypatch_envs import ( + EnvVarsDict, + delenvs_from_dict, + setenvs_from_dict, +) from settings_library.rabbit import RabbitSettings +from settings_library.ssm import SSMSettings +from simcore_service_autoscaling.constants import PRE_PULLED_IMAGES_EC2_TAG_KEY from simcore_service_autoscaling.core.application import create_app -from simcore_service_autoscaling.core.settings import ApplicationSettings, EC2Settings +from simcore_service_autoscaling.core.settings import ( + AUTOSCALING_ENV_PREFIX, + ApplicationSettings, + AutoscalingEC2Settings, + EC2InstancesSettings, + EC2Settings, +) +from simcore_service_autoscaling.models import ( + AssociatedInstance, + Cluster, + DaskTaskResources, +) +from simcore_service_autoscaling.modules import auto_scaling_core +from simcore_service_autoscaling.modules.auto_scaling_mode_dynamic import ( + DynamicAutoscaling, +) from simcore_service_autoscaling.modules.docker import AutoscalingDocker -from simcore_service_autoscaling.modules.ec2 import AutoscalingEC2, EC2InstanceData -from tenacity import retry -from tenacity._asyncio import AsyncRetrying +from simcore_service_autoscaling.modules.ec2 import SimcoreEC2API +from simcore_service_autoscaling.utils.buffer_machines_pool_core import ( + get_deactivated_buffer_ec2_tags, +) +from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, +) +from tenacity import after_log, before_sleep_log, retry from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -from types_aiobotocore_ec2.client import EC2Client -from types_aiobotocore_ec2.literals import InstanceTypeType +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.type_defs import TagTypeDef pytest_plugins = [ + "pytest_simcore.aws_server", + "pytest_simcore.aws_ec2_service", + "pytest_simcore.aws_iam_service", + "pytest_simcore.aws_ssm_service", + "pytest_simcore.dask_scheduler", + "pytest_simcore.docker", "pytest_simcore.docker_compose", "pytest_simcore.docker_swarm", "pytest_simcore.environment_configs", - "pytest_simcore.monkeypatch_extra", "pytest_simcore.rabbit_service", "pytest_simcore.repository_paths", - "pytest_simcore.tmp_path_extra", ] @@ -87,69 +130,287 @@ def installed_package_dir() -> Path: return dirpath +@pytest.fixture +def mocked_ec2_server_envs( + mocked_ec2_server_settings: EC2Settings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # NOTE: overrides the EC2Settings with what autoscaling expects + changed_envs: EnvVarsDict = { + f"{AUTOSCALING_ENV_PREFIX}{k}": v + for k, v in mocked_ec2_server_settings.model_dump().items() + } + return setenvs_from_dict(monkeypatch, changed_envs) # type: ignore + + +@pytest.fixture( + params=[ + "with_AUTOSCALING_DRAIN_NODES_WITH_LABELS", + "without_AUTOSCALING_DRAIN_NODES_WITH_LABELS", + ] +) +def with_drain_nodes_labelled(request: pytest.FixtureRequest) -> bool: + return bool(request.param == "with_AUTOSCALING_DRAIN_NODES_WITH_LABELS") + + +@pytest.fixture +def with_labelize_drain_nodes( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + with_drain_nodes_labelled: bool, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "AUTOSCALING_DRAIN_NODES_WITH_LABELS": f"{with_drain_nodes_labelled}", + }, + ) + + +@pytest.fixture( + params=[ + "with_AUTOSCALING_DOCKER_JOIN_DRAINED", + "without_AUTOSCALING_DOCKER_JOIN_DRAINED", + ] +) +def with_docker_join_drained(request: pytest.FixtureRequest) -> bool: + return bool(request.param == "with_AUTOSCALING_DOCKER_JOIN_DRAINED") + + +@pytest.fixture +def app_with_docker_join_drained( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + with_docker_join_drained: bool, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "AUTOSCALING_DOCKER_JOIN_DRAINED": f"{with_docker_join_drained}", + }, + ) + + @pytest.fixture(scope="session") -def ec2_instances() -> list[InstanceTypeType]: - # these are some examples - return ["t2.nano", "m5.12xlarge"] +def fake_ssm_settings() -> SSMSettings: + assert "json_schema_extra" in SSMSettings.model_config + assert isinstance(SSMSettings.model_config["json_schema_extra"], dict) + assert isinstance(SSMSettings.model_config["json_schema_extra"]["examples"], list) + return SSMSettings.model_validate( + SSMSettings.model_config["json_schema_extra"]["examples"][0] + ) + + +@pytest.fixture +def ec2_settings() -> EC2Settings: + return AutoscalingEC2Settings.create_from_envs() + + +@pytest.fixture +def ec2_instance_custom_tags( + faker: Faker, + external_envfile_dict: EnvVarsDict, +) -> dict[str, str]: + if external_envfile_dict: + return json.loads(external_envfile_dict["EC2_INSTANCES_CUSTOM_TAGS"]) + return {"osparc-tag": faker.text(max_nb_chars=80), "pytest": faker.pystr()} + + +@pytest.fixture +def external_ec2_instances_allowed_types( + external_envfile_dict: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None | dict[str, EC2InstanceBootSpecific]: + if not external_envfile_dict: + return None + with monkeypatch.context() as patch: + setenvs_from_dict(patch, {**external_envfile_dict}) + settings = EC2InstancesSettings.create_from_envs() + return settings.EC2_INSTANCES_ALLOWED_TYPES @pytest.fixture def app_environment( mock_env_devel_environment: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, faker: Faker, - ec2_instances: list[InstanceTypeType], + aws_allowed_ec2_instance_type_names: list[InstanceTypeType], + ec2_instance_custom_tags: dict[str, str], + external_envfile_dict: EnvVarsDict, ) -> EnvVarsDict: # SEE https://faker.readthedocs.io/en/master/providers/faker.providers.internet.html?highlight=internet#faker-providers-internet + if external_envfile_dict: + delenvs_from_dict(monkeypatch, mock_env_devel_environment, raising=False) + return setenvs_from_dict(monkeypatch, {**external_envfile_dict}) + + assert "json_schema_extra" in EC2InstanceBootSpecific.model_config + assert isinstance(EC2InstanceBootSpecific.model_config["json_schema_extra"], dict) + assert isinstance( + EC2InstanceBootSpecific.model_config["json_schema_extra"]["examples"], list + ) envs = setenvs_from_dict( monkeypatch, { - "EC2_ACCESS_KEY_ID": faker.pystr(), - "EC2_SECRET_ACCESS_KEY": faker.pystr(), + "AUTOSCALING_EC2_ACCESS": "{}", + "AUTOSCALING_EC2_ACCESS_KEY_ID": faker.pystr(), + "AUTOSCALING_EC2_SECRET_ACCESS_KEY": faker.pystr(), + "AUTOSCALING_EC2_INSTANCES": "{}", + "AUTOSCALING_SSM_ACCESS": "{}", + "AUTOSCALING_TRACING": "null", + "SSM_ACCESS_KEY_ID": faker.pystr(), + "SSM_SECRET_ACCESS_KEY": faker.pystr(), "EC2_INSTANCES_KEY_NAME": faker.pystr(), "EC2_INSTANCES_SECURITY_GROUP_IDS": json.dumps( faker.pylist(allowed_types=(str,)) ), "EC2_INSTANCES_SUBNET_ID": faker.pystr(), - "EC2_INSTANCES_AMI_ID": faker.pystr(), - "EC2_INSTANCES_ALLOWED_TYPES": json.dumps(ec2_instances), - "NODES_MONITORING_NODE_LABELS": json.dumps(["pytest.fake-node-label"]), - "NODES_MONITORING_SERVICE_LABELS": json.dumps( - ["pytest.fake-service-label"] - ), - "NODES_MONITORING_NEW_NODES_LABELS": json.dumps( - ["pytest.fake-new-node-label"] + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + ec2_type_name: random.choice( # noqa: S311 + EC2InstanceBootSpecific.model_config["json_schema_extra"][ + "examples" + ] + ) + for ec2_type_name in aws_allowed_ec2_instance_type_names + } ), + "EC2_INSTANCES_CUSTOM_TAGS": json.dumps(ec2_instance_custom_tags), + "EC2_INSTANCES_ATTACHED_IAM_PROFILE": faker.pystr(), }, ) return mock_env_devel_environment | envs @pytest.fixture -def disable_dynamic_service_background_task(mocker: MockerFixture) -> Iterator[None]: +def mocked_ec2_instances_envs( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + aws_security_group_id: str, + aws_subnet_id: str, + aws_ami_id: str, + aws_allowed_ec2_instance_type_names: list[InstanceTypeType], + aws_instance_profile: str, +) -> EnvVarsDict: + assert "json_schema_extra" in EC2InstanceBootSpecific.model_config + assert isinstance(EC2InstanceBootSpecific.model_config["json_schema_extra"], dict) + assert isinstance( + EC2InstanceBootSpecific.model_config["json_schema_extra"]["examples"], list + ) + envs = setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_KEY_NAME": "osparc-pytest", + "EC2_INSTANCES_SECURITY_GROUP_IDS": json.dumps([aws_security_group_id]), + "EC2_INSTANCES_SUBNET_ID": aws_subnet_id, + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + ec2_type_name: cast( + dict, + random.choice( # noqa: S311 + EC2InstanceBootSpecific.model_config["json_schema_extra"][ + "examples" + ] + ), + ) + | {"ami_id": aws_ami_id} + for ec2_type_name in aws_allowed_ec2_instance_type_names + } + ), + "EC2_INSTANCES_ATTACHED_IAM_PROFILE": aws_instance_profile, + }, + ) + return app_environment | envs + + +@pytest.fixture +def disable_autoscaling_background_task(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_task.create_periodic_task", + autospec=True, + ) + mocker.patch( - "simcore_service_autoscaling.dynamic_scaling.start_periodic_task", + "simcore_service_autoscaling.modules.auto_scaling_task.cancel_wait_task", autospec=True, ) + +@pytest.fixture +def disable_buffers_pool_background_task(mocker: MockerFixture) -> None: mocker.patch( - "simcore_service_autoscaling.dynamic_scaling.stop_periodic_task", + "simcore_service_autoscaling.modules.buffer_machines_pool_task.create_periodic_task", autospec=True, ) - yield + mocker.patch( + "simcore_service_autoscaling.modules.buffer_machines_pool_task.cancel_wait_task", + autospec=True, + ) @pytest.fixture -def disabled_rabbitmq(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): - monkeypatch.delenv("RABBIT_HOST") - monkeypatch.delenv("RABBIT_USER") - monkeypatch.delenv("RABBIT_PASSWORD") +def with_enabled_buffer_pools( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "AUTOSCALING_SSM_ACCESS": "{}", + }, + ) @pytest.fixture -def disabled_ec2(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): - monkeypatch.delenv("EC2_ACCESS_KEY_ID") +def enabled_dynamic_mode( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "AUTOSCALING_NODES_MONITORING": "{}", + "NODES_MONITORING_NODE_LABELS": json.dumps(["pytest.fake-node-label"]), + "NODES_MONITORING_SERVICE_LABELS": json.dumps( + ["pytest.fake-service-label"] + ), + "NODES_MONITORING_NEW_NODES_LABELS": json.dumps( + ["pytest.fake-new-node-label"] + ), + }, + ) + + +@pytest.fixture +def enabled_computational_mode( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "AUTOSCALING_DASK": "{}", + "DASK_MONITORING_URL": faker.url(), + "DASK_SCHEDULER_AUTH": "{}", + "DASK_MONITORING_USER_NAME": faker.user_name(), + "DASK_MONITORING_PASSWORD": faker.password(), + }, + ) + + +@pytest.fixture +def disabled_rabbitmq( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None: + monkeypatch.setenv("AUTOSCALING_RABBITMQ", "null") + + +@pytest.fixture +def disabled_ec2(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("AUTOSCALING_EC2_ACCESS", "null") + + +@pytest.fixture +def disabled_ssm(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("AUTOSCALING_SSM_ACCESS", "null") @pytest.fixture @@ -159,11 +420,17 @@ def enabled_rabbitmq( return rabbit_service +_LIFESPAN_TIMEOUT: Final[int] = 10 + + @pytest.fixture async def initialized_app(app_environment: EnvVarsDict) -> AsyncIterator[FastAPI]: settings = ApplicationSettings.create_from_envs() app = create_app(settings) - async with LifespanManager(app): + # NOTE: the timeout is sometime too small for CI machines, and even larger machines + async with LifespanManager( + app, startup_timeout=_LIFESPAN_TIMEOUT, shutdown_timeout=_LIFESPAN_TIMEOUT + ): yield app @@ -186,9 +453,8 @@ def service_monitored_labels( @pytest.fixture async def async_client(initialized_app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: - async with httpx.AsyncClient( - app=initialized_app, + transport=httpx.ASGITransport(app=initialized_app), base_url=f"http://{initialized_app.title}.testserver.io", headers={"Content-Type": "application/json"}, ) as client: @@ -201,42 +467,92 @@ async def autoscaling_docker() -> AsyncIterator[AutoscalingDocker]: yield cast(AutoscalingDocker, docker_client) -@pytest.fixture -async def async_docker_client() -> AsyncIterator[aiodocker.Docker]: - async with aiodocker.Docker() as docker_client: - yield docker_client - - @pytest.fixture async def host_node( docker_swarm: None, async_docker_client: aiodocker.Docker, -) -> Node: - nodes = parse_obj_as(list[Node], await async_docker_client.nodes.list()) +) -> AsyncIterator[DockerNode]: + nodes = TypeAdapter(list[DockerNode]).validate_python( + await async_docker_client.nodes.list() + ) assert len(nodes) == 1 - return nodes[0] + # keep state of node for later revert + old_node = deepcopy(nodes[0]) + assert old_node.id + assert old_node.spec + assert old_node.spec.role + assert old_node.spec.availability + assert old_node.version + assert old_node.version.index + labels = old_node.spec.labels or {} + # ensure we have the necessary labels + await async_docker_client.nodes.update( + node_id=old_node.id, + version=old_node.version.index, + spec={ + "Availability": old_node.spec.availability.value, + "Labels": labels + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "true", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: arrow.utcnow().isoformat(), + }, + "Role": old_node.spec.role.value, + }, + ) + modified_host_node = TypeAdapter(DockerNode).validate_python( + await async_docker_client.nodes.inspect(node_id=old_node.id) + ) + yield modified_host_node + # revert state + current_node = TypeAdapter(DockerNode).validate_python( + await async_docker_client.nodes.inspect(node_id=old_node.id) + ) + assert current_node.id + assert current_node.version + assert current_node.version.index + await async_docker_client.nodes.update( + node_id=current_node.id, + version=current_node.version.index, + spec={ + "Availability": old_node.spec.availability.value, + "Labels": old_node.spec.labels, + "Role": old_node.spec.role.value, + }, + ) @pytest.fixture -def fake_node(faker: Faker) -> Node: - return Node( - ID=faker.uuid4(), - Version=ObjectVersion(Index=faker.pyint()), - CreatedAt=faker.date_time(tzinfo=timezone.utc).isoformat(), - UpdatedAt=faker.date_time(tzinfo=timezone.utc).isoformat(), - Description=NodeDescription( - Hostname=faker.pystr(), - Resources=ResourceObject( - NanoCPUs=int(9 * 1e9), MemoryBytes=256 * 1024 * 1024 * 1024 +def create_fake_node(faker: Faker) -> Callable[..., DockerNode]: + def _creator(**node_overrides) -> DockerNode: + default_config = { + "ID": faker.uuid4(), + "Version": ObjectVersion(index=faker.pyint()), + "CreatedAt": datetime.datetime.now(tz=datetime.UTC).isoformat(), + "UpdatedAt": datetime.datetime.now(tz=datetime.UTC).isoformat(), + "Description": NodeDescription( + hostname=faker.pystr(), + resources=ResourceObject( + nano_cp_us=int(9 * 1e9), + memory_bytes=TypeAdapter(ByteSize).validate_python("256GiB"), + ), ), - ), - Spec=NodeSpec( - Name=None, - Labels=None, - Role=None, - Availability=Availability.drain, - ), - ) + "Spec": NodeSpec( + name=None, + labels=faker.pydict(allowed_types=(str,)), + role=None, + availability=Availability.drain, + ), + "Status": NodeStatus(state=NodeState.unknown, message=None, addr=None), + } + default_config.update(**node_overrides) + return DockerNode(**default_config) + + return _creator + + +@pytest.fixture +def fake_node(create_fake_node: Callable[..., DockerNode]) -> DockerNode: + return create_fake_node() @pytest.fixture @@ -249,12 +565,12 @@ def task_template() -> dict[str, Any]: _GIGA_NANO_CPU = 10**9 -NUM_CPUS = PositiveInt +NUM_CPUS: TypeAlias = PositiveInt @pytest.fixture def create_task_reservations() -> Callable[[NUM_CPUS, int], dict[str, Any]]: - def _creator(num_cpus: NUM_CPUS, memory: Union[ByteSize, int]) -> dict[str, Any]: + def _creator(num_cpus: NUM_CPUS, memory: ByteSize | int) -> dict[str, Any]: return { "Resources": { "Reservations": { @@ -269,7 +585,7 @@ def _creator(num_cpus: NUM_CPUS, memory: Union[ByteSize, int]) -> dict[str, Any] @pytest.fixture def create_task_limits() -> Callable[[NUM_CPUS, int], dict[str, Any]]: - def _creator(num_cpus: NUM_CPUS, memory: Union[ByteSize, int]) -> dict[str, Any]: + def _creator(num_cpus: NUM_CPUS, memory: ByteSize | int) -> dict[str, Any]: return { "Resources": { "Limits": { @@ -288,62 +604,86 @@ async def create_service( docker_swarm: None, faker: Faker, ) -> AsyncIterator[ - Callable[[dict[str, Any], Optional[dict[str, str]]], Awaitable[Service]] + Callable[[dict[str, Any], dict[DockerLabelKey, str] | None], Awaitable[Service]] ]: created_services = [] async def _creator( task_template: dict[str, Any], - labels: Optional[dict[str, str]] = None, + labels: dict[DockerLabelKey, str] | None = None, wait_for_service_state="running", + placement_constraints: list[str] | None = None, ) -> Service: service_name = f"pytest_{faker.pystr()}" - if labels: - task_labels = task_template.setdefault("ContainerSpec", {}).setdefault( - "Labels", {} + base_labels: dict[DockerLabelKey, Any] = {} + task_labels = task_template.setdefault("ContainerSpec", {}).setdefault( + "Labels", base_labels + ) + if placement_constraints: + task_template.setdefault("Placement", {}).setdefault( + "Constraints", placement_constraints ) + if labels: task_labels |= labels - service = await async_docker_client.services.create( - task_template=task_template, - name=service_name, - labels=labels or {}, # type: ignore - ) - assert service - service = parse_obj_as( - Service, await async_docker_client.services.inspect(service["ID"]) - ) - assert service.Spec - print(f"--> created docker service {service.ID} with {service.Spec.Name}") - assert service.Spec.Labels == (labels or {}) + base_labels |= labels + with log_context( + logging.INFO, msg=f"create docker service {service_name}" + ) as ctx: + service = await async_docker_client.services.create( + task_template=task_template, + name=service_name, + labels=base_labels, # type: ignore + ) + assert service + service = TypeAdapter(Service).validate_python( + await async_docker_client.services.inspect(service["ID"]) + ) + assert service.spec + ctx.logger.info( + "%s", + f"service {service.id} with {service.spec.name} created", + ) + assert service.spec.labels == base_labels created_services.append(service) # get more info on that service - assert service.Spec.Name == service_name + assert service.spec.name == service_name + + original_task_template_model = TypeAdapter(TaskSpec).validate_python( + task_template + ) + excluded_paths = { - "ForceUpdate", - "Runtime", - "root['ContainerSpec']['Isolation']", + "force_update", + "runtime", + "root['container_spec']['isolation']", } - for reservation in ["MemoryBytes", "NanoCPUs"]: + if not base_labels: + excluded_paths.add("root['container_spec']['labels']") + for reservation in ["memory_bytes", "nano_cp_us"]: if ( - task_template.get("Resources", {}) - .get("Reservations", {}) - .get(reservation, 0) + original_task_template_model.resources + and original_task_template_model.resources.reservations + and getattr( + original_task_template_model.resources.reservations, reservation + ) == 0 ): # NOTE: if a 0 memory reservation is done, docker removes it from the task inspection excluded_paths.add( - f"root['Resources']['Reservations']['{reservation}']" + f"root['resources']['reservations']['{reservation}']" ) + + assert service.spec.task_template diff = DeepDiff( - task_template, - service.Spec.TaskTemplate.dict(exclude_unset=True), - exclude_paths=excluded_paths, + original_task_template_model.model_dump(exclude_unset=True), + service.spec.task_template.model_dump(exclude_unset=True), + exclude_paths=list(excluded_paths), ) assert not diff, f"{diff}" - assert service.Spec.Labels == (labels or {}) - await assert_for_service_state( + assert service.spec.labels == base_labels + await _assert_wait_for_service_state( async_docker_client, service, [wait_for_service_state] ) return service @@ -351,8 +691,10 @@ async def _creator( yield _creator await asyncio.gather( - *(async_docker_client.services.delete(s.ID) for s in created_services) + *(async_docker_client.services.delete(s.id) for s in created_services), + return_exceptions=True, ) + # wait until all tasks are gone @retry( retry=retry_if_exception_type(AssertionError), @@ -361,42 +703,48 @@ async def _creator( stop=stop_after_delay(30), ) async def _check_service_task_gone(service: Service) -> None: - assert service.Spec - print( - f"--> checking if service {service.ID}:{service.Spec.Name} is really gone..." - ) - assert not await async_docker_client.containers.list( - all=True, - filters={ - "label": [f"com.docker.swarm.service.id={service.ID}"], - }, - ) - print(f"<-- service {service.ID}:{service.Spec.Name} is gone.") + assert service.spec + with log_context( + logging.INFO, + msg=f"check service {service.id}:{service.spec.name} is really gone", + ): + assert not await async_docker_client.containers.list( + all=True, + filters={ + "label": [f"com.docker.swarm.service.id={service.id}"], + }, + ) await asyncio.gather(*(_check_service_task_gone(s) for s in created_services)) await asyncio.sleep(0) -async def assert_for_service_state( +SUCCESS_STABLE_TIME_S: Final[float] = 3 +WAIT_TIME: Final[float] = 0.5 + + +async def _assert_wait_for_service_state( async_docker_client: aiodocker.Docker, service: Service, expected_states: list[str] ) -> None: - SUCCESS_STABLE_TIME_S: Final[float] = 3 - WAIT_TIME: Final[float] = 0.5 - number_of_success = 0 - async for attempt in AsyncRetrying( - retry=retry_if_exception_type(AssertionError), - reraise=True, - wait=wait_fixed(WAIT_TIME), - stop=stop_after_delay(10 * SUCCESS_STABLE_TIME_S), - ): - with attempt: - print( - f"--> waiting for service {service.ID} to become {expected_states}..." - ) + with log_context( + logging.INFO, msg=f"wait for service {service.id} to become {expected_states}" + ) as ctx: + number_of_success = {"count": 0} + + @retry( + retry=retry_if_exception_type(AssertionError), + reraise=True, + wait=wait_fixed(WAIT_TIME), + stop=stop_after_delay(10 * SUCCESS_STABLE_TIME_S), + before_sleep=before_sleep_log(ctx.logger, logging.DEBUG), + after=after_log(ctx.logger, logging.DEBUG), + ) + async def _() -> None: + assert service.id services = await async_docker_client.services.list( - filters={"id": service.ID} + filters={"id": service.id} ) - assert services, f"no service with {service.ID}!" + assert services, f"no service with {service.id}!" assert len(services) == 1 found_service = services[0] @@ -409,241 +757,493 @@ async def assert_for_service_state( assert ( service_task["Status"]["State"] in expected_states ), f"service {found_service['Spec']['Name']}'s task is {service_task['Status']['State']}" - print( - f"<-- service {found_service['Spec']['Name']} is now {service_task['Status']['State']} {'.'*number_of_success}" + ctx.logger.info( + "%s", + f"service {found_service['Spec']['Name']} is now {service_task['Status']['State']} {'.' * number_of_success['count']}", ) - number_of_success += 1 - assert (number_of_success * WAIT_TIME) >= SUCCESS_STABLE_TIME_S - print( - f"<-- service {found_service['Spec']['Name']} is now {service_task['Status']['State']} after {SUCCESS_STABLE_TIME_S} seconds" + number_of_success["count"] += 1 + assert (number_of_success["count"] * WAIT_TIME) >= SUCCESS_STABLE_TIME_S + ctx.logger.info( + "%s", + f"service {found_service['Spec']['Name']} is now {service_task['Status']['State']} after {SUCCESS_STABLE_TIME_S} seconds", ) - -@pytest.fixture(scope="module") -def mocked_aws_server() -> Iterator[ThreadedMotoServer]: - """creates a moto-server that emulates AWS services in place - NOTE: Never use a bucket with underscores it fails!! - """ - server = ThreadedMotoServer(ip_address=get_localhost_ip(), port=unused_port()) - # pylint: disable=protected-access - print(f"--> started mock AWS server on {server._ip_address}:{server._port}") - print( - f"--> Dashboard available on [http://{server._ip_address}:{server._port}/moto-api/]" - ) - server.start() - yield server - server.stop() - print(f"<-- stopped mock AWS server on {server._ip_address}:{server._port}") + await _() -@pytest.fixture -def reset_aws_server_state(mocked_aws_server: ThreadedMotoServer) -> Iterator[None]: - # NOTE: reset_aws_server_state [http://docs.getmoto.org/en/latest/docs/server_mode.html#reset-api] - yield - # pylint: disable=protected-access - requests.post( - f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}/moto-api/reset", - timeout=10, - ) +@pytest.fixture(scope="session") +def aws_allowed_ec2_instance_type_names() -> list[InstanceTypeType]: + return [ + "t2.xlarge", + "t2.2xlarge", + "g4dn.2xlarge", + "g4dn.8xlarge", + "r5n.4xlarge", + "r5n.8xlarge", + ] @pytest.fixture -def mocked_aws_server_envs( +def aws_allowed_ec2_instance_type_names_env( app_environment: EnvVarsDict, - mocked_aws_server: ThreadedMotoServer, - reset_aws_server_state: None, monkeypatch: pytest.MonkeyPatch, -) -> Iterator[EnvVarsDict]: - changed_envs = { - "EC2_ENDPOINT": f"http://{mocked_aws_server._ip_address}:{mocked_aws_server._port}", # pylint: disable=protected-access - "EC2_ACCESS_KEY_ID": "xxx", - "EC2_SECRET_ACCESS_KEY": "xxx", + aws_allowed_ec2_instance_type_names: list[InstanceTypeType], +) -> EnvVarsDict: + changed_envs: dict[str, str | bool] = { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps(aws_allowed_ec2_instance_type_names), } - yield app_environment | setenvs_from_dict(monkeypatch, changed_envs) + return app_environment | setenvs_from_dict(monkeypatch, changed_envs) @pytest.fixture -def aws_allowed_ec2_instance_type_names( - app_environment: EnvVarsDict, - monkeypatch: pytest.MonkeyPatch, -) -> Iterator[EnvVarsDict]: - changed_envs = { - "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( - [ - "t2.xlarge", - "t2.2xlarge", - "g3.4xlarge", - "r5n.4xlarge", - "r5n.8xlarge", - ] - ), - } - yield app_environment | setenvs_from_dict(monkeypatch, changed_envs) +def host_cpu_count() -> int: + cpus = psutil.cpu_count() + assert cpus is not None + return cpus -@pytest.fixture(scope="session") -def vpc_cidr_block() -> str: - return "10.0.0.0/16" +@pytest.fixture +def host_memory_total() -> ByteSize: + return ByteSize(psutil.virtual_memory().total) @pytest.fixture -async def aws_vpc_id( - mocked_aws_server_envs: None, - app_environment: EnvVarsDict, - monkeypatch: pytest.MonkeyPatch, - ec2_client: EC2Client, - vpc_cidr_block: str, -) -> AsyncIterator[str]: - vpc = await ec2_client.create_vpc( - CidrBlock=vpc_cidr_block, +def osparc_docker_label_keys( + faker: Faker, +) -> StandardSimcoreDockerLabels: + return StandardSimcoreDockerLabels.model_validate( + { + "user_id": faker.pyint(), + "project_id": faker.uuid4(), + "node_id": faker.uuid4(), + } ) - vpc_id = vpc["Vpc"]["VpcId"] # type: ignore - print(f"--> Created Vpc in AWS with {vpc_id=}") - yield vpc_id - await ec2_client.delete_vpc(VpcId=vpc_id) - print(f"<-- Deleted Vpc in AWS with {vpc_id=}") + +@pytest.fixture +def aws_instance_private_dns() -> str: + return "ip-10-23-40-12.ec2.internal" -@pytest.fixture(scope="session") -def subnet_cidr_block() -> str: - return "10.0.1.0/24" +@pytest.fixture +def fake_localhost_ec2_instance_data( + fake_ec2_instance_data: Callable[..., EC2InstanceData], +) -> EC2InstanceData: + local_ip = get_localhost_ip() + fake_local_ec2_private_dns = f"ip-{local_ip.replace('.', '-')}.ec2.internal" + return fake_ec2_instance_data(aws_private_dns=fake_local_ec2_private_dns) @pytest.fixture -async def aws_subnet_id( - monkeypatch: pytest.MonkeyPatch, - aws_vpc_id: str, - ec2_client: EC2Client, - subnet_cidr_block: str, -) -> AsyncIterator[str]: - subnet = await ec2_client.create_subnet( - CidrBlock=subnet_cidr_block, VpcId=aws_vpc_id - ) - assert "Subnet" in subnet - assert "SubnetId" in subnet["Subnet"] - subnet_id = subnet["Subnet"]["SubnetId"] - print(f"--> Created Subnet in AWS with {subnet_id=}") - - monkeypatch.setenv("EC2_INSTANCES_SUBNET_ID", subnet_id) - yield subnet_id - - # all the instances in the subnet must be terminated before that works - instances_in_subnet = await ec2_client.describe_instances( - Filters=[{"Name": "subnet-id", "Values": [subnet_id]}] - ) - if instances_in_subnet["Reservations"]: - print(f"--> terminating {len(instances_in_subnet)} instances in subnet") - await ec2_client.terminate_instances( - InstanceIds=[ - instance["Instances"][0]["InstanceId"] # type: ignore - for instance in instances_in_subnet["Reservations"] - ] +async def mocked_redis_server(mocker: MockerFixture) -> None: + mock_redis = FakeRedis() + mocker.patch("redis.asyncio.from_url", return_value=mock_redis) + + +@pytest.fixture +def cluster() -> Callable[..., Cluster]: + def _creator(**cluter_overrides) -> Cluster: + return dataclasses.replace( + Cluster( + active_nodes=[], + pending_nodes=[], + drained_nodes=[], + buffer_drained_nodes=[], + pending_ec2s=[], + broken_ec2s=[], + buffer_ec2s=[], + disconnected_nodes=[], + terminating_nodes=[], + retired_nodes=[], + terminated_instances=[], + ), + **cluter_overrides, ) - print(f"<-- terminated {len(instances_in_subnet)} instances in subnet") - await ec2_client.delete_subnet(SubnetId=subnet_id) - subnets = await ec2_client.describe_subnets() - print(f"<-- Deleted Subnet in AWS with {subnet_id=}") - print(f"current {subnets=}") + return _creator @pytest.fixture -async def aws_security_group_id( - monkeypatch: pytest.MonkeyPatch, - faker: Faker, - aws_vpc_id: str, - ec2_client: EC2Client, -) -> AsyncIterator[str]: - security_group = await ec2_client.create_security_group( - Description=faker.text(), GroupName=faker.pystr(), VpcId=aws_vpc_id +async def create_dask_task( + dask_spec_cluster_client: distributed.Client, +) -> Callable[..., distributed.Future]: + def _remote_pytest_fct(x: int, y: int) -> int: + return x + y + + def _creator( + required_resources: DaskTaskResources, **overrides + ) -> distributed.Future: + # NOTE: pure will ensure dask does not re-use the task results if we run it several times + future = dask_spec_cluster_client.submit( + _remote_pytest_fct, + 23, + 43, + resources=required_resources, + pure=False, + **overrides, + ) + assert future + return future + + return _creator + + +@pytest.fixture +def mock_docker_set_node_availability(mocker: MockerFixture) -> mock.Mock: + async def _fake_set_node_availability( + docker_client: AutoscalingDocker, node: DockerNode, *, available: bool + ) -> DockerNode: + returned_node = deepcopy(node) + assert returned_node.spec + returned_node.spec.availability = ( + Availability.active if available else Availability.drain + ) + returned_node.updated_at = datetime.datetime.now(tz=datetime.UTC).isoformat() + return returned_node + + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.set_node_availability", + autospec=True, + side_effect=_fake_set_node_availability, ) - security_group_id = security_group["GroupId"] - print(f"--> Created Security Group in AWS with {security_group_id=}") - monkeypatch.setenv( - "EC2_INSTANCES_SECURITY_GROUP_IDS", json.dumps([security_group_id]) + + +@pytest.fixture +def mock_docker_tag_node(mocker: MockerFixture) -> mock.Mock: + async def fake_tag_node( + docker_client: AutoscalingDocker, + node: DockerNode, + *, + tags: dict[DockerLabelKey, str], + available: bool, + ) -> DockerNode: + updated_node = deepcopy(node) + assert updated_node.spec + updated_node.spec.labels = deepcopy(cast(dict[str, str], tags)) + updated_node.spec.availability = ( + Availability.active if available else Availability.drain + ) + return updated_node + + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.tag_node", + autospec=True, + side_effect=fake_tag_node, ) - yield security_group_id - await ec2_client.delete_security_group(GroupId=security_group_id) - print(f"<-- Deleted Security Group in AWS with {security_group_id=}") @pytest.fixture -async def aws_ami_id( +def patch_ec2_client_launch_instances_min_number_of_instances( + mocker: MockerFixture, +) -> mock.Mock: + """the moto library always returns min number of instances instead of max number of instances which makes + it difficult to test scaling to multiple of machines. this should help""" + original_fct = SimcoreEC2API.launch_instances + + async def _change_parameters(*args, **kwargs) -> list[EC2InstanceData]: + new_kwargs = kwargs | {"min_number_of_instances": kwargs["number_of_instances"]} + print(f"patching launch_instances with: {new_kwargs}") + return await original_fct(*args, **new_kwargs) + + return mocker.patch.object( + SimcoreEC2API, + "launch_instances", + autospec=True, + side_effect=_change_parameters, + ) + + +@pytest.fixture +def random_fake_available_instances(faker: Faker) -> list[EC2InstanceType]: + list_of_instances = [ + EC2InstanceType( + name=random.choice(get_args(InstanceTypeType)), # noqa: S311 + resources=Resources(cpus=n, ram=ByteSize(n)), + ) + for n in range(1, 30) + ] + random.shuffle(list_of_instances) + return list_of_instances + + +@pytest.fixture +def create_associated_instance( + fake_ec2_instance_data: Callable[..., EC2InstanceData], + app_settings: ApplicationSettings, + faker: Faker, + host_cpu_count: int, + host_memory_total: ByteSize, +) -> Callable[[DockerNode, bool, dict[str, Any]], AssociatedInstance]: + def _creator( + node: DockerNode, + terminateable_time: bool, + fake_ec2_instance_data_override: dict[str, Any] | None = None, + ) -> AssociatedInstance: + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert ( + datetime.timedelta(seconds=10) + < app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ), "this tests relies on the fact that the time before termination is above 10 seconds" + assert app_settings.AUTOSCALING_EC2_INSTANCES + seconds_delta = ( + -datetime.timedelta(seconds=10) + if terminateable_time + else datetime.timedelta(seconds=10) + ) + + if fake_ec2_instance_data_override is None: + fake_ec2_instance_data_override = {} + + return AssociatedInstance( + node=node, + ec2_instance=fake_ec2_instance_data( + launch_time=datetime.datetime.now(datetime.UTC) + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + - datetime.timedelta( + days=faker.pyint(min_value=0, max_value=100), + hours=faker.pyint(min_value=0, max_value=100), + ) + + seconds_delta, + resources=Resources(cpus=host_cpu_count, ram=host_memory_total), + **fake_ec2_instance_data_override, + ), + ) + + return _creator + + +@pytest.fixture +def num_hot_buffer() -> NonNegativeInt: + return 5 + + +@pytest.fixture +def with_instances_machines_hot_buffer( + num_hot_buffer: int, app_environment: EnvVarsDict, - mocked_aws_server_envs: None, monkeypatch: pytest.MonkeyPatch, - ec2_client: EC2Client, -) -> str: - images = await ec2_client.describe_images() - image = random.choice(images["Images"]) - ami_id = image["ImageId"] # type: ignore - monkeypatch.setenv("EC2_INSTANCES_AMI_ID", ami_id) - return ami_id +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_MACHINES_BUFFER": f"{num_hot_buffer}", + }, + ) @pytest.fixture -async def autoscaling_ec2( - app_environment: EnvVarsDict, -) -> AsyncIterator[AutoscalingEC2]: - settings = EC2Settings.create_from_envs() - ec2 = await AutoscalingEC2.create(settings) - assert ec2 - yield ec2 - await ec2.close() +def hot_buffer_instance_type(app_settings: ApplicationSettings) -> InstanceTypeType: + assert app_settings.AUTOSCALING_EC2_INSTANCES + return cast( + InstanceTypeType, + next(iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES)), + ) @pytest.fixture -async def ec2_client( - autoscaling_ec2: AutoscalingEC2, -) -> AsyncIterator[EC2Client]: - yield autoscaling_ec2.client +def mock_find_node_with_name_returns_none(mocker: MockerFixture) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.find_node_with_name", + autospec=True, + return_value=None, + ) + + +@pytest.fixture(scope="session") +def short_ec2_instance_max_start_time() -> datetime.timedelta: + return datetime.timedelta(seconds=10) @pytest.fixture -def host_cpu_count() -> int: - return psutil.cpu_count() +def with_short_ec2_instances_max_start_time( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + short_ec2_instance_max_start_time: datetime.timedelta, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_MAX_START_TIME": f"{short_ec2_instance_max_start_time}", + }, + ) @pytest.fixture -def host_memory_total() -> ByteSize: - return ByteSize(psutil.virtual_memory().total) +async def spied_cluster_analysis(mocker: MockerFixture) -> MockType: + return mocker.spy(auto_scaling_core, "_analyze_current_cluster") @pytest.fixture -def osparc_docker_label_keys( - faker: Faker, -) -> SimcoreServiceDockerLabelKeys: - return SimcoreServiceDockerLabelKeys.parse_obj( - dict(user_id=faker.pyint(), project_id=faker.uuid4(), node_id=faker.uuid4()) +async def mocked_associate_ec2_instances_with_nodes(mocker: MockerFixture) -> mock.Mock: + async def _( + nodes: list[DockerNode], ec2_instances: list[EC2InstanceData] + ) -> tuple[list[AssociatedInstance], list[EC2InstanceData]]: + return [], ec2_instances + + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.associate_ec2_instances_with_nodes", + autospec=True, + side_effect=_, ) @pytest.fixture -def aws_instance_private_dns() -> str: - return "ip-10-23-40-12.ec2.internal" +def fake_pre_pull_images() -> list[DockerGenericTag]: + return TypeAdapter(list[DockerGenericTag]).validate_python( + [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ] + ) @pytest.fixture -def fake_ec2_instance_data(faker: Faker) -> Callable[..., EC2InstanceData]: - def _creator(**overrides) -> EC2InstanceData: - return EC2InstanceData( - **( - { - "launch_time": faker.date_time(tzinfo=timezone.utc), - "id": faker.uuid4(), - "aws_private_dns": faker.name(), - "type": faker.pystr(), - "state": faker.pystr(), - } - | overrides +def ec2_instances_allowed_types_with_only_1_buffered( + faker: Faker, + fake_pre_pull_images: list[DockerGenericTag], + external_ec2_instances_allowed_types: None | dict[str, EC2InstanceBootSpecific], +) -> dict[InstanceTypeType, EC2InstanceBootSpecific]: + if not external_ec2_instances_allowed_types: + return { + "t2.micro": EC2InstanceBootSpecific( + ami_id=faker.pystr(), + pre_pull_images=fake_pre_pull_images, + buffer_count=faker.pyint(min_value=2, max_value=10), ) + } + + allowed_ec2_types = external_ec2_instances_allowed_types + allowed_ec2_types_with_buffer_defined = dict( + filter( + lambda instance_type_and_settings: instance_type_and_settings[ + 1 + ].buffer_count + > 0, + allowed_ec2_types.items(), ) + ) + assert ( + allowed_ec2_types_with_buffer_defined + ), "one type with buffer is needed for the tests!" + assert ( + len(allowed_ec2_types_with_buffer_defined) == 1 + ), "more than one type with buffer is disallowed in this test!" + return { + TypeAdapter(InstanceTypeType).validate_python(k): v + for k, v in allowed_ec2_types_with_buffer_defined.items() + } - return _creator + +@pytest.fixture +def buffer_count( + ec2_instances_allowed_types_with_only_1_buffered: dict[ + InstanceTypeType, EC2InstanceBootSpecific + ], +) -> int: + def _by_buffer_count( + instance_type_and_settings: tuple[InstanceTypeType, EC2InstanceBootSpecific], + ) -> bool: + _, boot_specific = instance_type_and_settings + return boot_specific.buffer_count > 0 + + allowed_ec2_types = ec2_instances_allowed_types_with_only_1_buffered + allowed_ec2_types_with_buffer_defined = dict( + filter(_by_buffer_count, allowed_ec2_types.items()) + ) + assert allowed_ec2_types_with_buffer_defined, "you need one type with buffer" + assert ( + len(allowed_ec2_types_with_buffer_defined) == 1 + ), "more than one type with buffer is disallowed in this test!" + return next(iter(allowed_ec2_types_with_buffer_defined.values())).buffer_count @pytest.fixture -async def mocked_redis_server(mocker: MockerFixture) -> None: - mock_redis = FakeRedis() - mocker.patch("redis.asyncio.from_url", return_value=mock_redis) +async def create_buffer_machines( + ec2_client: EC2Client, + aws_ami_id: str, + app_settings: ApplicationSettings, + initialized_app: FastAPI, +) -> Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag] | None], + Awaitable[list[str]], +]: + async def _do( + num: int, + instance_type: InstanceTypeType, + instance_state_name: InstanceStateNameType, + pre_pull_images: list[DockerGenericTag] | None, + ) -> list[str]: + assert app_settings.AUTOSCALING_EC2_INSTANCES + + assert instance_state_name in [ + "running", + "stopped", + ], "only 'running' and 'stopped' are supported for testing" + + resource_tags: list[TagTypeDef] = [ + {"Key": tag_key, "Value": tag_value} + for tag_key, tag_value in get_deactivated_buffer_ec2_tags( + initialized_app, DynamicAutoscaling() + ).items() + ] + if pre_pull_images is not None and instance_state_name == "stopped": + resource_tags.append( + { + "Key": PRE_PULLED_IMAGES_EC2_TAG_KEY, + "Value": f"{json_dumps(pre_pull_images)}", + } + ) + with log_context( + logging.INFO, f"creating {num} buffer machines of {instance_type}" + ): + instances = await ec2_client.run_instances( + ImageId=aws_ami_id, + MaxCount=num, + MinCount=num, + InstanceType=instance_type, + KeyName=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_KEY_NAME, + SecurityGroupIds=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SECURITY_GROUP_IDS, + SubnetId=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_SUBNET_ID, + IamInstanceProfile={ + "Arn": app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ATTACHED_IAM_PROFILE + }, + TagSpecifications=[ + {"ResourceType": "instance", "Tags": resource_tags}, + {"ResourceType": "volume", "Tags": resource_tags}, + {"ResourceType": "network-interface", "Tags": resource_tags}, + ], + UserData="echo 'I am pytest'", + ) + instance_ids = [ + i["InstanceId"] for i in instances["Instances"] if "InstanceId" in i + ] + + waiter = ec2_client.get_waiter("instance_exists") + await waiter.wait(InstanceIds=instance_ids) + instances = await ec2_client.describe_instances(InstanceIds=instance_ids) + assert "Reservations" in instances + assert instances["Reservations"] + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == num + for instance in instances["Reservations"][0]["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == "running" + + if instance_state_name == "stopped": + await ec2_client.stop_instances(InstanceIds=instance_ids) + instances = await ec2_client.describe_instances(InstanceIds=instance_ids) + assert "Reservations" in instances + assert instances["Reservations"] + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == num + for instance in instances["Reservations"][0]["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == "stopped" + + return instance_ids + + return _do diff --git a/services/autoscaling/tests/unit/test_api_health.py b/services/autoscaling/tests/unit/test_api_health.py index 1ad9bbda13c..e3c22afddac 100644 --- a/services/autoscaling/tests/unit/test_api_health.py +++ b/services/autoscaling/tests/unit/test_api_health.py @@ -5,7 +5,7 @@ import httpx import pytest from moto.server import ThreadedMotoServer -from pytest_simcore.helpers.utils_envs import EnvVarsDict +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict from simcore_service_autoscaling.api.health import _StatusGet from starlette import status @@ -20,7 +20,8 @@ def app_environment( app_environment: EnvVarsDict, enabled_rabbitmq: None, - mocked_aws_server_envs: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, mocked_redis_server: None, ) -> EnvVarsDict: return app_environment @@ -35,12 +36,13 @@ async def test_healthcheck(async_client: httpx.AsyncClient): async def test_status_no_rabbit( disabled_rabbitmq: None, + with_enabled_buffer_pools: EnvVarsDict, async_client: httpx.AsyncClient, ): response = await async_client.get("/status") response.raise_for_status() assert response.status_code == status.HTTP_200_OK - status_response = _StatusGet.parse_obj(response.json()) + status_response = _StatusGet.model_validate(response.json()) assert status_response assert status_response.rabbitmq.is_enabled is False @@ -49,12 +51,41 @@ async def test_status_no_rabbit( assert status_response.ec2.is_enabled is True assert status_response.ec2.is_responsive is True + assert status_response.ssm.is_enabled is True + assert status_response.ssm.is_responsive is True + + assert status_response.docker.is_enabled is True + assert status_response.docker.is_responsive is True + + +async def test_status_no_ssm( + disabled_rabbitmq: None, + disabled_ssm: None, + async_client: httpx.AsyncClient, +): + response = await async_client.get("/status") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + status_response = _StatusGet.model_validate(response.json()) + assert status_response + + assert status_response.rabbitmq.is_enabled is False + assert status_response.rabbitmq.is_responsive is False + + assert status_response.ec2.is_enabled is True + assert status_response.ec2.is_responsive is True + + assert status_response.ssm.is_enabled is False + assert status_response.ssm.is_responsive is False + assert status_response.docker.is_enabled is True assert status_response.docker.is_responsive is True async def test_status( mocked_aws_server: ThreadedMotoServer, + with_enabled_buffer_pools: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, async_client: httpx.AsyncClient, ): # stop the aws server... @@ -63,7 +94,7 @@ async def test_status( response = await async_client.get("/status") response.raise_for_status() assert response.status_code == status.HTTP_200_OK - status_response = _StatusGet.parse_obj(response.json()) + status_response = _StatusGet.model_validate(response.json()) assert status_response assert status_response.rabbitmq.is_enabled is True @@ -72,6 +103,9 @@ async def test_status( assert status_response.ec2.is_enabled is True assert status_response.ec2.is_responsive is False + assert status_response.ssm.is_enabled is True + assert status_response.ssm.is_responsive is False + assert status_response.docker.is_enabled is True assert status_response.docker.is_responsive is True # restart the server @@ -80,7 +114,7 @@ async def test_status( response = await async_client.get("/status") response.raise_for_status() assert response.status_code == status.HTTP_200_OK - status_response = _StatusGet.parse_obj(response.json()) + status_response = _StatusGet.model_validate(response.json()) assert status_response assert status_response.rabbitmq.is_enabled is True @@ -89,5 +123,8 @@ async def test_status( assert status_response.ec2.is_enabled is True assert status_response.ec2.is_responsive is True + assert status_response.ssm.is_enabled is True + assert status_response.ssm.is_responsive is True + assert status_response.docker.is_enabled is True assert status_response.docker.is_responsive is True diff --git a/services/autoscaling/tests/unit/test_core_settings.py b/services/autoscaling/tests/unit/test_core_settings.py index e8a117c19b6..bc63be64cff 100644 --- a/services/autoscaling/tests/unit/test_core_settings.py +++ b/services/autoscaling/tests/unit/test_core_settings.py @@ -1,80 +1,336 @@ +# pylint: disable=no-member # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable import datetime import json +import logging +import os +from typing import Final import pytest -from pytest_simcore.helpers.utils_envs import EnvVarsDict -from simcore_service_autoscaling.core.settings import ApplicationSettings +from faker import Faker +from pydantic import ValidationError +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from settings_library.base import _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING +from simcore_service_autoscaling.core.settings import ( + ApplicationSettings, + EC2InstancesSettings, +) +from types_aiobotocore_ec2.literals import InstanceTypeType + + +def test_ec2_instances_settings(app_environment: EnvVarsDict): + settings = EC2InstancesSettings.create_from_envs() + assert isinstance(settings.EC2_INSTANCES_ALLOWED_TYPES, dict) + + +@pytest.fixture +def instance_type_with_invalid_boot_script( + mock_env_devel_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, + aws_allowed_ec2_instance_type_names: list[InstanceTypeType], +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + ec2_type_name: { + "ami_id": faker.pystr(), + "custom_boot_scripts": ['ls"'], + } + for ec2_type_name in aws_allowed_ec2_instance_type_names + } + ), + }, + ) + + +def test_ec2_instances_settings_with_invalid_custom_script_raises( + app_environment: EnvVarsDict, instance_type_with_invalid_boot_script: EnvVarsDict +): + with pytest.raises(ValidationError): + EC2InstancesSettings.create_from_envs() def test_settings(app_environment: EnvVarsDict): settings = ApplicationSettings.create_from_envs() assert settings.AUTOSCALING_EC2_ACCESS assert settings.AUTOSCALING_EC2_INSTANCES + assert settings.AUTOSCALING_NODES_MONITORING is None + assert settings.AUTOSCALING_DASK is None + assert settings.AUTOSCALING_RABBITMQ + assert settings.AUTOSCALING_REDIS + + +def test_settings_dynamic_mode(enabled_dynamic_mode: EnvVarsDict): + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_ACCESS + assert settings.AUTOSCALING_SSM_ACCESS + assert settings.AUTOSCALING_EC2_INSTANCES assert settings.AUTOSCALING_NODES_MONITORING + assert settings.AUTOSCALING_DASK is None + assert settings.AUTOSCALING_RABBITMQ + assert settings.AUTOSCALING_REDIS + + +def test_settings_computational_mode(enabled_computational_mode: EnvVarsDict): + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_ACCESS + assert settings.AUTOSCALING_SSM_ACCESS + assert settings.AUTOSCALING_EC2_INSTANCES + assert settings.AUTOSCALING_NODES_MONITORING is None + assert settings.AUTOSCALING_DASK assert settings.AUTOSCALING_RABBITMQ assert settings.AUTOSCALING_REDIS -def test_invalid_EC2_INSTANCES_TIME_BEFORE_TERMINATION( +def test_defining_both_computational_and_dynamic_modes_is_invalid_and_raises( + enabled_dynamic_mode: EnvVarsDict, enabled_computational_mode: EnvVarsDict +): + with pytest.raises(ValidationError): + ApplicationSettings.create_from_envs() + + +def test_invalid_EC2_INSTANCES_TIME_BEFORE_DRAINING( # noqa: N802 app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch ): - monkeypatch.setenv("EC2_INSTANCES_TIME_BEFORE_TERMINATION", "1:05:00") + setenvs_from_dict(monkeypatch, {"EC2_INSTANCES_TIME_BEFORE_DRAINING": "1:05:00"}) settings = ApplicationSettings.create_from_envs() assert settings.AUTOSCALING_EC2_INSTANCES - assert settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + assert settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING assert ( - settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - == datetime.timedelta(minutes=59) + datetime.timedelta(minutes=1) + == settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING ) - - monkeypatch.setenv("EC2_INSTANCES_TIME_BEFORE_TERMINATION", "-1:05:00") + setenvs_from_dict(monkeypatch, {"EC2_INSTANCES_TIME_BEFORE_DRAINING": "-1:05:00"}) settings = ApplicationSettings.create_from_envs() assert settings.AUTOSCALING_EC2_INSTANCES assert ( - settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - == datetime.timedelta(minutes=0) + datetime.timedelta(seconds=10) + == settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING ) -def test_EC2_INSTANCES_PRE_PULL_IMAGES( +def test_invalid_EC2_INSTANCES_TIME_BEFORE_TERMINATION( # noqa: N802 app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +): + setenvs_from_dict(monkeypatch, {"EC2_INSTANCES_TIME_BEFORE_TERMINATION": "1:05:00"}) + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES + assert settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + assert ( + datetime.timedelta(minutes=59) + == settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ) + setenvs_from_dict( + monkeypatch, {"EC2_INSTANCES_TIME_BEFORE_TERMINATION": "-1:05:00"} + ) + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES + assert ( + datetime.timedelta(minutes=0) + == settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ) + + +def test_EC2_INSTANCES_ALLOWED_TYPES_valid( # noqa: N802 + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker ): settings = ApplicationSettings.create_from_envs() assert settings.AUTOSCALING_EC2_INSTANCES - assert settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_PRE_PULL_IMAGES == [] + +def test_EC2_INSTANCES_ALLOWED_TYPES_passing_invalid_image_tags( # noqa: N802 + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, + caplog: pytest.LogCaptureFixture, +): # passing an invalid image tag name will fail - monkeypatch.setenv( - "EC2_INSTANCES_PRE_PULL_IMAGES", json.dumps(["io.simcore.some234.cool-"]) + setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + "t2.micro": { + "ami_id": faker.pystr(), + "pre_pull_images": ["io.simcore.some234.cool-"], + } + } + ) + }, ) - settings = ApplicationSettings.create_from_envs() - assert not settings.AUTOSCALING_EC2_INSTANCES + with caplog.at_level(logging.WARNING): + + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES is None + + assert ( + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name="AUTOSCALING_EC2_INSTANCES" + ) + in caplog.text + ) + + +def test_EC2_INSTANCES_ALLOWED_TYPES_passing_valid_image_tags( # noqa: N802 + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker +): # passing a valid will pass - monkeypatch.setenv( - "EC2_INSTANCES_PRE_PULL_IMAGES", - json.dumps( - [ - "io.simcore.some234.cool.label", - "com.example.some-label", - "nginx:latest", - "itisfoundation/my-very-nice-service:latest", - "simcore/services/dynamic/another-nice-one:2.4.5", - "asd", - ] - ), + setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + "t2.micro": { + "ami_id": faker.pystr(), + "pre_pull_images": [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ], + } + } + ), + }, ) settings = ApplicationSettings.create_from_envs() assert settings.AUTOSCALING_EC2_INSTANCES - assert settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_PRE_PULL_IMAGES == [ - "io.simcore.some234.cool.label", - "com.example.some-label", + assert next( + iter(settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.values()) + ).pre_pull_images == [ "nginx:latest", "itisfoundation/my-very-nice-service:latest", "simcore/services/dynamic/another-nice-one:2.4.5", "asd", ] + + +ENABLED_VALUE: Final = "{}" + + +def test_EC2_INSTANCES_ALLOWED_TYPES_empty_not_allowed( # noqa: N802 + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +): + assert ( + os.environ["AUTOSCALING_EC2_INSTANCES"] == ENABLED_VALUE + ) # parent field in ApplicationSettings + monkeypatch.setenv( + "EC2_INSTANCES_ALLOWED_TYPES", "{}" + ) # child field in EC2InstancesSettings + + with pytest.raises(ValidationError) as err_info: + # test **child** EC2InstancesSettings + EC2InstancesSettings.create_from_envs() + + assert err_info.value.errors()[0]["loc"] == ("EC2_INSTANCES_ALLOWED_TYPES",) + + +def test_EC2_INSTANCES_ALLOWED_TYPES_empty_not_allowed_with_main_field_env_var( # noqa: N802 + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + caplog: pytest.LogCaptureFixture, +): + assert ( + os.environ["AUTOSCALING_EC2_INSTANCES"] == ENABLED_VALUE + ) # parent field in ApplicationSettings + monkeypatch.setenv( + "EC2_INSTANCES_ALLOWED_TYPES", "{}" + ) # child field in EC2InstancesSettings + + # explicit init of parent -> fails + with pytest.raises(ValidationError) as exc_info: + # NOTE: input captured via InitSettingsSource + ApplicationSettings.create_from_envs(AUTOSCALING_EC2_INSTANCES={}) + + assert exc_info.value.error_count() == 1 + error = exc_info.value.errors()[0] + + assert error["type"] == "value_error" + assert error["input"] == {} + assert error["loc"] == ("AUTOSCALING_EC2_INSTANCES", "EC2_INSTANCES_ALLOWED_TYPES") + + # NOTE: input captured via EnvSettingsWithAutoDefaultSource + # default env factory -> None + with caplog.at_level(logging.WARNING): + + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES is None + + assert ( + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name="AUTOSCALING_EC2_INSTANCES" + ) + in caplog.text + ) + + +def test_EC2_INSTANCES_ALLOWED_TYPES_empty_not_allowed_without_main_field_env_var( # noqa: N802 + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + caplog: pytest.LogCaptureFixture, +): + assert os.environ["AUTOSCALING_EC2_INSTANCES"] == ENABLED_VALUE + monkeypatch.delenv( + "AUTOSCALING_EC2_INSTANCES" + ) # parent field in ApplicationSettings + monkeypatch.setenv( + "EC2_INSTANCES_ALLOWED_TYPES", "{}" + ) # child field in EC2InstancesSettings + + # removing any value for AUTOSCALING_EC2_INSTANCES + caplog.clear() + with caplog.at_level(logging.WARNING): + + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES is None + + assert ( + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name="AUTOSCALING_EC2_INSTANCES" + ) + in caplog.text + ) + + +def test_EC2_INSTANCES_ALLOWED_TYPES_invalid_instance_names( # noqa: N802 + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, + caplog: pytest.LogCaptureFixture, +): + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES + + # passing an invalid image tag name will fail + setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + faker.pystr(): { + "ami_id": faker.pystr(), + "pre_pull_images": [], + } + } + ) + }, + ) + caplog.clear() + with caplog.at_level(logging.WARNING): + + settings = ApplicationSettings.create_from_envs() + assert settings.AUTOSCALING_EC2_INSTANCES is None + + assert ( + _AUTO_DEFAULT_FACTORY_RESOLVES_TO_NONE_FSTRING.format( + field_name="AUTOSCALING_EC2_INSTANCES" + ) + in caplog.text + ) diff --git a/services/autoscaling/tests/unit/test_dynamic_scaling_core.py b/services/autoscaling/tests/unit/test_dynamic_scaling_core.py deleted file mode 100644 index 04daedd48c7..00000000000 --- a/services/autoscaling/tests/unit/test_dynamic_scaling_core.py +++ /dev/null @@ -1,1089 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -import asyncio -import base64 -import dataclasses -import datetime -import pickle -import warnings -from dataclasses import dataclass -from typing import Any, AsyncIterator, Awaitable, Callable, Iterator -from unittest import mock - -import aiodocker -import pytest -from faker import Faker -from fastapi import FastAPI -from models_library.docker import DockerLabelKey -from models_library.generated_models.docker_rest_api import Node, Service, Task -from models_library.rabbitmq_messages import RabbitAutoscalingStatusMessage -from pydantic import ByteSize, parse_obj_as -from pytest_mock.plugin import MockerFixture -from simcore_service_autoscaling.core.settings import ApplicationSettings -from simcore_service_autoscaling.dynamic_scaling_core import ( - _activate_drained_nodes, - _deactivate_empty_nodes, - _find_terminateable_instances, - _try_scale_down_cluster, - cluster_scaling_from_labelled_services, -) -from simcore_service_autoscaling.models import AssociatedInstance, Cluster, Resources -from simcore_service_autoscaling.modules.docker import ( - AutoscalingDocker, - get_docker_client, -) -from simcore_service_autoscaling.modules.ec2 import EC2InstanceData -from types_aiobotocore_ec2.client import EC2Client - - -@pytest.fixture -def cluster() -> Callable[..., Cluster]: - def _creator(**cluter_overrides) -> Cluster: - return dataclasses.replace( - Cluster( - active_nodes=[], - drained_nodes=[], - reserve_drained_nodes=[], - pending_ec2s=[], - disconnected_nodes=[], - terminated_instances=[], - ), - **cluter_overrides, - ) - - return _creator - - -@pytest.fixture -def mock_terminate_instances(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_terminate_instance = mocker.patch( - "simcore_service_autoscaling.modules.ec2.AutoscalingEC2.terminate_instances", - autospec=True, - ) - yield mocked_terminate_instance - - -@pytest.fixture -def mock_start_aws_instance( - mocker: MockerFixture, - aws_instance_private_dns: str, - fake_ec2_instance_data: Callable[..., EC2InstanceData], -) -> Iterator[mock.Mock]: - mocked_start_aws_instance = mocker.patch( - "simcore_service_autoscaling.modules.ec2.AutoscalingEC2.start_aws_instance", - autospec=True, - return_value=fake_ec2_instance_data(aws_private_dns=aws_instance_private_dns), - ) - yield mocked_start_aws_instance - - -@pytest.fixture -def mock_rabbitmq_post_message(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_post_message = mocker.patch( - "simcore_service_autoscaling.utils.rabbitmq.post_message", autospec=True - ) - yield mocked_post_message - - -@pytest.fixture -def mock_find_node_with_name( - mocker: MockerFixture, fake_node: Node -) -> Iterator[mock.Mock]: - mocked_wait_for_node = mocker.patch( - "simcore_service_autoscaling.dynamic_scaling_core.utils_docker.find_node_with_name", - autospec=True, - return_value=fake_node, - ) - yield mocked_wait_for_node - - -@pytest.fixture -def mock_tag_node(mocker: MockerFixture) -> Iterator[mock.Mock]: - async def fake_tag_node(*args, **kwargs) -> Node: - return args[1] - - mocked_tag_node = mocker.patch( - "simcore_service_autoscaling.utils.utils_docker.tag_node", - autospec=True, - side_effect=fake_tag_node, - ) - yield mocked_tag_node - - -@pytest.fixture -def mock_set_node_availability(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_tag_node = mocker.patch( - "simcore_service_autoscaling.utils.utils_docker.set_node_availability", - autospec=True, - ) - yield mocked_tag_node - - -@pytest.fixture -def mock_remove_nodes(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_tag_node = mocker.patch( - "simcore_service_autoscaling.utils.utils_docker.remove_nodes", - autospec=True, - ) - yield mocked_tag_node - - -@pytest.fixture -def mock_cluster_used_resources(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_cluster_used_resources = mocker.patch( - "simcore_service_autoscaling.utils.utils_docker.compute_cluster_used_resources", - autospec=True, - return_value=Resources.create_as_empty(), - ) - yield mocked_cluster_used_resources - - -@pytest.fixture -def mock_compute_node_used_resources(mocker: MockerFixture) -> Iterator[mock.Mock]: - mocked_cluster_used_resources = mocker.patch( - "simcore_service_autoscaling.utils.utils_docker.compute_node_used_resources", - autospec=True, - return_value=Resources.create_as_empty(), - ) - yield mocked_cluster_used_resources - - -@pytest.fixture -def mock_machines_buffer(monkeypatch: pytest.MonkeyPatch) -> Iterator[int]: - num_machines_in_buffer = 5 - monkeypatch.setenv("EC2_INSTANCES_MACHINES_BUFFER", f"{num_machines_in_buffer}") - yield num_machines_in_buffer - - -@pytest.fixture -def with_valid_time_before_termination( - monkeypatch: pytest.MonkeyPatch, -) -> datetime.timedelta: - time = "00:11:00" - monkeypatch.setenv("EC2_INSTANCES_TIME_BEFORE_TERMINATION", time) - return parse_obj_as(datetime.timedelta, time) - - -@pytest.fixture -async def drained_host_node( - host_node: Node, async_docker_client: aiodocker.Docker -) -> AsyncIterator[Node]: - assert host_node.ID - assert host_node.Version - assert host_node.Version.Index - assert host_node.Spec - assert host_node.Spec.Availability - assert host_node.Spec.Role - - old_availability = host_node.Spec.Availability - await async_docker_client.nodes.update( - node_id=host_node.ID, - version=host_node.Version.Index, - spec={ - "Availability": "drain", - "Labels": host_node.Spec.Labels, - "Role": host_node.Spec.Role.value, - }, - ) - drained_node = parse_obj_as( - Node, await async_docker_client.nodes.inspect(node_id=host_node.ID) - ) - yield drained_node - # revert - # NOTE: getting the node again as the version might have changed - drained_node = parse_obj_as( - Node, await async_docker_client.nodes.inspect(node_id=host_node.ID) - ) - assert drained_node.ID - assert drained_node.Version - assert drained_node.Version.Index - assert drained_node.Spec - assert drained_node.Spec.Role - reverted_node = ( - await async_docker_client.nodes.update( - node_id=drained_node.ID, - version=drained_node.Version.Index, - spec={ - "Availability": old_availability.value, - "Labels": drained_node.Spec.Labels, - "Role": drained_node.Spec.Role.value, - }, - ), - ) - - -@pytest.fixture -def minimal_configuration( - docker_swarm: None, - disabled_rabbitmq: None, - disable_dynamic_service_background_task: None, - aws_subnet_id: str, - aws_security_group_id: str, - aws_ami_id: str, - aws_allowed_ec2_instance_type_names: list[str], - mocked_redis_server: None, -) -> Iterator[None]: - yield - - -def _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message: mock.Mock, - app_settings: ApplicationSettings, - app: FastAPI, - **message_update_kwargs, -): - assert app_settings.AUTOSCALING_NODES_MONITORING - default_message = RabbitAutoscalingStatusMessage( - origin=f"{app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS}", - nodes_total=0, - nodes_active=0, - nodes_drained=0, - cluster_total_resources=Resources.create_as_empty().dict(), - cluster_used_resources=Resources.create_as_empty().dict(), - instances_pending=0, - instances_running=0, - ) - expected_message = default_message.copy(update=message_update_kwargs) - mock_rabbitmq_post_message.assert_called_once_with( - app, - expected_message, - ) - - -async def test_cluster_scaling_from_labelled_services_with_no_services_does_nothing( - minimal_configuration: None, - app_settings: ApplicationSettings, - initialized_app: FastAPI, - mock_start_aws_instance: mock.Mock, - mock_terminate_instances: mock.Mock, - mock_rabbitmq_post_message: mock.Mock, -): - await cluster_scaling_from_labelled_services(initialized_app) - mock_start_aws_instance.assert_not_called() - mock_terminate_instances.assert_not_called() - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, app_settings, initialized_app - ) - - -@pytest.fixture -def patch_get_ec2_tags(mocker: MockerFixture, faker: Faker) -> Iterator[mock.Mock]: - # NOTE: this is needed because of a bug in Moto - # https://github.com/getmoto/moto/issues/5966 - warnings.warn( - "patching get_ec2_tags due to issue https://github.com/getmoto/moto/issues/5966 in moto library...", - UserWarning, - ) - - def _json_without_square_brackets(obj) -> str: - return str(pickle.dumps(obj)) - - mocked_terminate_instance = mocker.patch( - "simcore_service_autoscaling.utils.ec2.get_ec2_tags", - autospec=True, - return_value={ - "io.simcore.autoscaling.version": faker.pystr(), - "io.simcore.autoscaling.monitored_nodes_labels": faker.pystr(), - "io.simcore.autoscaling.monitored_services_labels": faker.pystr(), - # NOTE: this one gets special treatment in AWS GUI and is applied to the name of the instance - "Name": faker.pystr(), - }, - ) - yield mocked_terminate_instance - - -async def test_cluster_scaling_from_labelled_services_with_no_services_and_machine_buffer_starts_expected_machines( - minimal_configuration: None, - patch_get_ec2_tags: mock.MagicMock, - mock_machines_buffer: int, - app_settings: ApplicationSettings, - initialized_app: FastAPI, - aws_allowed_ec2_instance_type_names: list[str], - mock_rabbitmq_post_message: mock.Mock, - mock_compute_node_used_resources: mock.Mock, - mock_find_node_with_name: mock.Mock, - mock_tag_node: mock.Mock, - fake_node: Node, - ec2_client: EC2Client, -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - assert ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER - == mock_machines_buffer - ) - await cluster_scaling_from_labelled_services(initialized_app) - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=mock_machines_buffer, - instance_type=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ - 0 - ], - instance_state="running", - ) - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, - app_settings, - initialized_app, - instances_pending=mock_machines_buffer, - ) - mock_rabbitmq_post_message.reset_mock() - # calling again should attach the new nodes to the reserve, but nothing should start - await cluster_scaling_from_labelled_services(initialized_app) - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=mock_machines_buffer, - instance_type=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ - 0 - ], - instance_state="running", - ) - assert fake_node.Description - assert fake_node.Description.Resources - assert fake_node.Description.Resources.NanoCPUs - assert fake_node.Description.Resources.MemoryBytes - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, - app_settings, - initialized_app, - nodes_total=mock_machines_buffer, - nodes_drained=mock_machines_buffer, - instances_running=mock_machines_buffer, - cluster_total_resources={ - "cpus": mock_machines_buffer - * fake_node.Description.Resources.NanoCPUs - / 1e9, - "ram": mock_machines_buffer * fake_node.Description.Resources.MemoryBytes, - }, - ) - - # calling it again should not create anything new - for _ in range(10): - await cluster_scaling_from_labelled_services(initialized_app) - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=mock_machines_buffer, - instance_type=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES[ - 0 - ], - instance_state="running", - ) - - -async def test_cluster_scaling_from_labelled_services_with_service_with_too_much_resources_starts_nothing( - minimal_configuration: None, - service_monitored_labels: dict[DockerLabelKey, str], - app_settings: ApplicationSettings, - initialized_app: FastAPI, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - mock_start_aws_instance: mock.Mock, - mock_terminate_instances: mock.Mock, - mock_rabbitmq_post_message: mock.Mock, -): - task_template_with_too_many_resource = task_template | create_task_reservations( - 1000, 0 - ) - await create_service( - task_template_with_too_many_resource, - service_monitored_labels, - "pending", - ) - - await cluster_scaling_from_labelled_services(initialized_app) - mock_start_aws_instance.assert_not_called() - mock_terminate_instances.assert_not_called() - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, app_settings, initialized_app - ) - - -async def _assert_ec2_instances( - ec2_client: EC2Client, - *, - num_reservations: int, - num_instances: int, - instance_type: str, - instance_state: str, -): - all_instances = await ec2_client.describe_instances() - - assert len(all_instances["Reservations"]) == num_reservations - for reservation in all_instances["Reservations"]: - assert "Instances" in reservation - assert len(reservation["Instances"]) == num_instances - for instance in reservation["Instances"]: - assert "InstanceType" in instance - assert instance["InstanceType"] == instance_type - assert "Tags" in instance - assert instance["Tags"] - expected_tag_keys = [ - "io.simcore.autoscaling.version", - "io.simcore.autoscaling.monitored_nodes_labels", - "io.simcore.autoscaling.monitored_services_labels", - "Name", - ] - for tag_dict in instance["Tags"]: - assert "Key" in tag_dict - assert "Value" in tag_dict - - assert tag_dict["Key"] in expected_tag_keys - assert "PrivateDnsName" in instance - instance_private_dns_name = instance["PrivateDnsName"] - assert instance_private_dns_name.endswith(".ec2.internal") - assert "State" in instance - state = instance["State"] - assert "Name" in state - assert state["Name"] == instance_state - - assert "InstanceId" in instance - user_data = await ec2_client.describe_instance_attribute( - Attribute="userData", InstanceId=instance["InstanceId"] - ) - assert "UserData" in user_data - assert "Value" in user_data["UserData"] - user_data = base64.b64decode(user_data["UserData"]["Value"]).decode() - assert user_data.count("docker swarm join") == 1 - - -async def test_cluster_scaling_up( - minimal_configuration: None, - patch_get_ec2_tags: mock.MagicMock, - service_monitored_labels: dict[DockerLabelKey, str], - app_settings: ApplicationSettings, - initialized_app: FastAPI, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - ec2_client: EC2Client, - mock_tag_node: mock.Mock, - fake_node: Node, - mock_rabbitmq_post_message: mock.Mock, - mock_find_node_with_name: mock.Mock, - mock_set_node_availability: mock.Mock, - # mock_cluster_used_resources: mock.Mock, - mock_compute_node_used_resources: mock.Mock, -): - # we have nothing running now - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - - # create a task that needs more power - await create_service( - task_template | create_task_reservations(4, parse_obj_as(ByteSize, "128GiB")), - service_monitored_labels, - "pending", - ) - - # this should trigger a scaling up as we have no nodes - await cluster_scaling_from_labelled_services(initialized_app) - - # check the instance was started and we have exactly 1 - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=1, - instance_type="r5n.4xlarge", - instance_state="running", - ) - - # as the new node is already running, but is not yet connected, hence not tagged and drained - mock_find_node_with_name.assert_not_called() - mock_tag_node.assert_not_called() - mock_set_node_availability.assert_not_called() - mock_compute_node_used_resources.assert_not_called() - # check rabbit messages were sent - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, - app_settings, - initialized_app, - instances_running=0, - instances_pending=1, - ) - mock_rabbitmq_post_message.reset_mock() - - # 2. running this again should not scale again, but tag the node and make it available - await cluster_scaling_from_labelled_services(initialized_app) - mock_compute_node_used_resources.assert_called_once_with( - get_docker_client(initialized_app), - fake_node, - ) - # check the number of instances did not change and is still running - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=1, - instance_type="r5n.4xlarge", - instance_state="running", - ) - # the node is tagged and made active right away since we still have the pending task - mock_find_node_with_name.assert_called_once() - assert app_settings.AUTOSCALING_NODES_MONITORING - expected_docker_node_tags = { - tag_key: "true" - for tag_key in ( - app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS - + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS - ) - } - mock_tag_node.assert_called_once_with( - get_docker_client(initialized_app), - fake_node, - tags=expected_docker_node_tags, - available=False, - ) - mock_set_node_availability.assert_called_once_with( - get_docker_client(initialized_app), fake_node, available=True - ) - - # check rabbit messages were sent - assert fake_node.Description - assert fake_node.Description.Resources - assert fake_node.Description.Resources.NanoCPUs - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, - app_settings, - initialized_app, - nodes_total=1, - nodes_active=1, - cluster_total_resources={ - "cpus": fake_node.Description.Resources.NanoCPUs / 1e9, - "ram": fake_node.Description.Resources.MemoryBytes, - }, - cluster_used_resources={ - "cpus": float(0), - "ram": 0, - }, - instances_running=1, - ) - mock_rabbitmq_post_message.reset_mock() - - -@dataclass(frozen=True) -class _ScaleUpParams: - service_resources: Resources - num_services: int - expected_instance_type: str - expected_num_instances: int - - -@pytest.mark.parametrize( - "scale_up_params", - [ - pytest.param( - _ScaleUpParams( - service_resources=Resources( - cpus=5, ram=parse_obj_as(ByteSize, "36Gib") - ), - num_services=10, - expected_instance_type="g3.4xlarge", - expected_num_instances=4, - ), - id="sim4life-light", - ) - ], -) -async def test_cluster_scaling_up_starts_multiple_instances( - minimal_configuration: None, - service_monitored_labels: dict[DockerLabelKey, str], - app_settings: ApplicationSettings, - initialized_app: FastAPI, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - ec2_client: EC2Client, - mock_tag_node: mock.Mock, - fake_node: Node, - scale_up_params: _ScaleUpParams, - mock_rabbitmq_post_message: mock.Mock, - mock_find_node_with_name: mock.Mock, - mock_set_node_availability: mock.Mock, -): - # we have nothing running now - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - - # create several tasks that needs more power - await asyncio.gather( - *( - create_service( - task_template - | create_task_reservations( - int(scale_up_params.service_resources.cpus), - scale_up_params.service_resources.ram, - ), - service_monitored_labels, - "pending", - ) - for _ in range(scale_up_params.num_services) - ) - ) - - # run the code - await cluster_scaling_from_labelled_services(initialized_app) - - # check the instances were started - await _assert_ec2_instances( - ec2_client, - num_reservations=1, - num_instances=scale_up_params.expected_num_instances, - instance_type="g3.4xlarge", - instance_state="running", - ) - - # as the new node is already running, but is not yet connected, hence not tagged and drained - mock_find_node_with_name.assert_not_called() - mock_tag_node.assert_not_called() - mock_set_node_availability.assert_not_called() - # check rabbit messages were sent - _assert_rabbit_autoscaling_message_sent( - mock_rabbitmq_post_message, - app_settings, - initialized_app, - instances_pending=scale_up_params.expected_num_instances, - ) - mock_rabbitmq_post_message.reset_mock() - - -async def test__deactivate_empty_nodes( - minimal_configuration: None, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - host_node: Node, - fake_ec2_instance_data: Callable[..., EC2InstanceData], - mock_set_node_availability: mock.Mock, -): - # since we have no service running, we expect the passed node to be set to drain - active_cluster = cluster( - active_nodes=[AssociatedInstance(host_node, fake_ec2_instance_data())] - ) - updated_cluster = await _deactivate_empty_nodes(initialized_app, active_cluster) - assert not updated_cluster.active_nodes - assert updated_cluster.drained_nodes == active_cluster.active_nodes - mock_set_node_availability.assert_called_once_with( - mock.ANY, host_node, available=False - ) - - -async def test__deactivate_empty_nodes_to_drain_when_services_running_are_missing_labels( - minimal_configuration: None, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - host_node: Node, - fake_ec2_instance_data: Callable[..., EC2InstanceData], - mock_set_node_availability: mock.Mock, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - host_cpu_count: int, -): - # create a service that runs without task labels - task_template_that_runs = task_template | create_task_reservations( - int(host_cpu_count / 2 + 1), 0 - ) - await create_service( - task_template_that_runs, - {}, - "running", - ) - active_cluster = cluster( - active_nodes=[AssociatedInstance(host_node, fake_ec2_instance_data())] - ) - updated_cluster = await _deactivate_empty_nodes(initialized_app, active_cluster) - assert not updated_cluster.active_nodes - assert updated_cluster.drained_nodes == active_cluster.active_nodes - mock_set_node_availability.assert_called_once_with( - mock.ANY, host_node, available=False - ) - - -async def test__deactivate_empty_nodes_does_not_drain_if_service_is_running_with_correct_labels( - minimal_configuration: None, - app_settings: ApplicationSettings, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - host_node: Node, - fake_ec2_instance_data: Callable[..., EC2InstanceData], - mock_set_node_availability: mock.Mock, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - service_monitored_labels: dict[DockerLabelKey, str], - host_cpu_count: int, -): - # create a service that runs without task labels - task_template_that_runs = task_template | create_task_reservations( - int(host_cpu_count / 2 + 1), 0 - ) - assert app_settings.AUTOSCALING_NODES_MONITORING - await create_service( - task_template_that_runs, - service_monitored_labels, - "running", - ) - - # since we have no service running, we expect the passed node to be set to drain - active_cluster = cluster( - active_nodes=[AssociatedInstance(host_node, fake_ec2_instance_data())] - ) - updated_cluster = await _deactivate_empty_nodes(initialized_app, active_cluster) - assert updated_cluster == active_cluster - mock_set_node_availability.assert_not_called() - - -async def test__find_terminateable_nodes_with_no_hosts( - minimal_configuration: None, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - host_node: Node, - fake_ec2_instance_data: Callable[..., EC2InstanceData], -): - # there is no node to terminate here since nothing is drained - active_cluster = cluster( - active_nodes=[AssociatedInstance(host_node, fake_ec2_instance_data())], - drained_nodes=[], - reserve_drained_nodes=[AssociatedInstance(host_node, fake_ec2_instance_data())], - ) - assert await _find_terminateable_instances(initialized_app, active_cluster) == [] - - -async def test__find_terminateable_nodes_with_drained_host( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - drained_host_node: Node, - app_settings: ApplicationSettings, - fake_ec2_instance_data: Callable[..., EC2InstanceData], -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - assert ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - > datetime.timedelta(seconds=10) - ), "this tests relies on the fact that the time before termination is above 10 seconds" - - # if the instance started just about now, then it should not be terminateable - active_cluster_with_drained_nodes_started_now = cluster( - drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - ), - ) - ], - reserve_drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - ), - ) - ], - ) - assert ( - await _find_terminateable_instances( - initialized_app, active_cluster_with_drained_nodes_started_now - ) - == [] - ) - - # if the instance started just after the termination time, even on several days, it is not terminateable - active_cluster_with_drained_nodes_not_inthe_window = cluster( - drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - - datetime.timedelta(days=21) - + datetime.timedelta(seconds=10) - ), - ) - ], - reserve_drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - - datetime.timedelta(days=21) - + datetime.timedelta(seconds=10) - ), - ) - ], - ) - assert ( - await _find_terminateable_instances( - initialized_app, active_cluster_with_drained_nodes_not_inthe_window - ) - == [] - ) - - # if the instance started just before the termination time, even on several days, it is terminateable - active_cluster_with_drained_nodes_long_time_ago_terminateable = cluster( - drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - - datetime.timedelta(days=21) - - datetime.timedelta(seconds=10), - ), - ) - ], - reserve_drained_nodes=[ - AssociatedInstance( - drained_host_node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - - datetime.timedelta(days=21) - - datetime.timedelta(seconds=10), - ), - ) - ], - ) - - assert ( - await _find_terminateable_instances( - initialized_app, - active_cluster_with_drained_nodes_long_time_ago_terminateable, - ) - == active_cluster_with_drained_nodes_long_time_ago_terminateable.drained_nodes - ) - - -@pytest.fixture -def create_associated_instance( - fake_ec2_instance_data: Callable[..., EC2InstanceData], - app_settings: ApplicationSettings, - faker: Faker, -) -> Callable[[Node, bool], AssociatedInstance]: - def _creator(node: Node, terminateable_time: bool) -> AssociatedInstance: - assert app_settings.AUTOSCALING_EC2_INSTANCES - assert ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - > datetime.timedelta(seconds=10) - ), "this tests relies on the fact that the time before termination is above 10 seconds" - assert app_settings.AUTOSCALING_EC2_INSTANCES - seconds_delta = ( - -datetime.timedelta(seconds=10) - if terminateable_time - else datetime.timedelta(seconds=10) - ) - return AssociatedInstance( - node, - fake_ec2_instance_data( - launch_time=datetime.datetime.now(datetime.timezone.utc) - - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - - datetime.timedelta( - days=faker.pyint(min_value=0, max_value=100), - hours=faker.pyint(min_value=0, max_value=100), - ) - + seconds_delta - ), - ) - - return _creator - - -async def test__try_scale_down_cluster_with_no_nodes( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - mock_remove_nodes: mock.Mock, - host_node: Node, - drained_host_node: Node, - create_associated_instance: Callable[[Node, bool], AssociatedInstance], -): - active_cluster = cluster( - active_nodes=[create_associated_instance(host_node, True)], - drained_nodes=[create_associated_instance(drained_host_node, False)], - reserve_drained_nodes=[create_associated_instance(drained_host_node, True)], - ) - updated_cluster = await _try_scale_down_cluster(initialized_app, active_cluster) - assert updated_cluster == active_cluster - mock_remove_nodes.assert_not_called() - - -async def test__try_scale_down_cluster( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - initialized_app: FastAPI, - cluster: Callable[..., Cluster], - host_node: Node, - drained_host_node: Node, - mock_terminate_instances: mock.Mock, - mock_remove_nodes: mock.Mock, - app_settings: ApplicationSettings, - create_associated_instance: Callable[[Node, bool], AssociatedInstance], -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - assert ( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION - > datetime.timedelta(seconds=10) - ), "this tests relies on the fact that the time before termination is above 10 seconds" - - active_cluster = cluster( - active_nodes=[create_associated_instance(host_node, True)], - drained_nodes=[create_associated_instance(drained_host_node, True)], - reserve_drained_nodes=[create_associated_instance(drained_host_node, True)], - ) - - updated_cluster = await _try_scale_down_cluster(initialized_app, active_cluster) - assert not updated_cluster.drained_nodes - assert updated_cluster.reserve_drained_nodes - assert updated_cluster.reserve_drained_nodes == active_cluster.reserve_drained_nodes - assert updated_cluster.active_nodes - assert updated_cluster.active_nodes == active_cluster.active_nodes - mock_terminate_instances.assert_called_once() - mock_remove_nodes.assert_called_once() - - -async def test__activate_drained_nodes_with_no_tasks( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - initialized_app: FastAPI, - host_node: Node, - drained_host_node: Node, - mock_tag_node: mock.Mock, - cluster: Callable[..., Cluster], - create_associated_instance: Callable[[Node, bool], AssociatedInstance], -): - # no tasks, does nothing and returns True - empty_cluster = cluster() - still_pending_tasks, updated_cluster = await _activate_drained_nodes( - initialized_app, empty_cluster, [] - ) - assert not still_pending_tasks - assert updated_cluster == empty_cluster - - active_cluster = cluster( - active_nodes=[create_associated_instance(host_node, True)], - drained_nodes=[create_associated_instance(drained_host_node, True)], - reserve_drained_nodes=[create_associated_instance(drained_host_node, True)], - ) - still_pending_tasks, updated_cluster = await _activate_drained_nodes( - initialized_app, - active_cluster, - [], - ) - assert not still_pending_tasks - assert updated_cluster == active_cluster - mock_tag_node.assert_not_called() - - -async def test__activate_drained_nodes_with_no_drained_nodes( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - autoscaling_docker: AutoscalingDocker, - initialized_app: FastAPI, - host_node: Node, - mock_tag_node: mock.Mock, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - host_cpu_count: int, - cluster: Callable[..., Cluster], - create_associated_instance: Callable[[Node, bool], AssociatedInstance], -): - task_template_that_runs = task_template | create_task_reservations( - int(host_cpu_count / 2 + 1), 0 - ) - service_with_no_reservations = await create_service( - task_template_that_runs, {}, "running" - ) - assert service_with_no_reservations.Spec - service_tasks = parse_obj_as( - list[Task], - await autoscaling_docker.tasks.list( - filters={"service": service_with_no_reservations.Spec.Name} - ), - ) - assert service_tasks - assert len(service_tasks) == 1 - - cluster_without_drained_nodes = cluster( - active_nodes=[create_associated_instance(host_node, True)] - ) - still_pending_tasks, updated_cluster = await _activate_drained_nodes( - initialized_app, - cluster_without_drained_nodes, - service_tasks, - ) - assert still_pending_tasks == service_tasks - assert updated_cluster == cluster_without_drained_nodes - mock_tag_node.assert_not_called() - - -async def test__activate_drained_nodes_with_drained_node( - minimal_configuration: None, - with_valid_time_before_termination: datetime.timedelta, - autoscaling_docker: AutoscalingDocker, - initialized_app: FastAPI, - drained_host_node: Node, - mock_tag_node: mock.Mock, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], - create_task_reservations: Callable[[int, int], dict[str, Any]], - host_cpu_count: int, - fake_ec2_instance_data: Callable[..., EC2InstanceData], - cluster: Callable[..., Cluster], - create_associated_instance: Callable[[Node, bool], AssociatedInstance], -): - # task with no drain nodes returns False - task_template_that_runs = task_template | create_task_reservations( - int(host_cpu_count / 2 + 1), 0 - ) - service_with_no_reservations = await create_service( - task_template_that_runs, {}, "pending" - ) - assert service_with_no_reservations.Spec - service_tasks = parse_obj_as( - list[Task], - await autoscaling_docker.tasks.list( - filters={"service": service_with_no_reservations.Spec.Name} - ), - ) - assert service_tasks - assert len(service_tasks) == 1 - - cluster_with_drained_nodes = cluster( - drained_nodes=[create_associated_instance(drained_host_node, True)] - ) - - still_pending_tasks, updated_cluster = await _activate_drained_nodes( - initialized_app, - cluster_with_drained_nodes, - service_tasks, - ) - assert not still_pending_tasks - assert updated_cluster.active_nodes == cluster_with_drained_nodes.drained_nodes - mock_tag_node.assert_called_once_with( - mock.ANY, drained_host_node, tags={}, available=True - ) diff --git a/services/autoscaling/tests/unit/test_dynamic_scaling_task.py b/services/autoscaling/tests/unit/test_dynamic_scaling_task.py deleted file mode 100644 index 2a771e49bee..00000000000 --- a/services/autoscaling/tests/unit/test_dynamic_scaling_task.py +++ /dev/null @@ -1,51 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -import asyncio -from unittest import mock - -import pytest -from fastapi import FastAPI -from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_envs import EnvVarsDict -from simcore_service_autoscaling.core.settings import ApplicationSettings - -_FAST_POLL_INTERVAL = 1 - - -@pytest.fixture -def app_environment( - app_environment: EnvVarsDict, - disabled_rabbitmq: None, - mocked_aws_server_envs: None, - mocked_redis_server: None, - monkeypatch: pytest.MonkeyPatch, -) -> EnvVarsDict: - # fast interval - monkeypatch.setenv("AUTOSCALING_POLL_INTERVAL", f"{_FAST_POLL_INTERVAL}") - app_environment["AUTOSCALING_POLL_INTERVAL"] = f"{_FAST_POLL_INTERVAL}" - return app_environment - - -@pytest.fixture -def mock_background_task(mocker: MockerFixture) -> mock.Mock: - mocked_task = mocker.patch( - "simcore_service_autoscaling.dynamic_scaling.cluster_scaling_from_labelled_services", - autospec=True, - ) - return mocked_task - - -async def test_dynamic_scaling_task_created_and_deleted( - app_environment: EnvVarsDict, - mock_background_task: mock.Mock, - initialized_app: FastAPI, - app_settings: ApplicationSettings, -): - assert app_settings.AUTOSCALING_POLL_INTERVAL.total_seconds() == _FAST_POLL_INTERVAL - assert hasattr(initialized_app.state, "autoscaler_task") - await asyncio.sleep(5 * _FAST_POLL_INTERVAL) - mock_background_task.assert_called() diff --git a/services/autoscaling/tests/unit/test_main.py b/services/autoscaling/tests/unit/test_main.py index 4f899fdb1fb..525748023ec 100644 --- a/services/autoscaling/tests/unit/test_main.py +++ b/services/autoscaling/tests/unit/test_main.py @@ -3,7 +3,7 @@ # pylint:disable=redefined-outer-name -from pytest_simcore.helpers.utils_envs import EnvVarsDict +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict def test_main_app(app_environment: EnvVarsDict): diff --git a/services/autoscaling/tests/unit/test_models.py b/services/autoscaling/tests/unit/test_models.py index 77b31779ccc..f2271889ddb 100644 --- a/services/autoscaling/tests/unit/test_models.py +++ b/services/autoscaling/tests/unit/test_models.py @@ -3,92 +3,14 @@ # pylint: disable=unused-variable -from typing import Any, Awaitable, Callable +from collections.abc import Awaitable, Callable +from typing import Any import aiodocker import pytest -from models_library.docker import SimcoreServiceDockerLabelKeys +from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels from models_library.generated_models.docker_rest_api import Service, Task -from pydantic import ByteSize, ValidationError, parse_obj_as -from simcore_service_autoscaling.models import Resources - - -@pytest.mark.parametrize( - "a,b,a_greater_or_equal_than_b", - [ - ( - Resources(cpus=0.2, ram=ByteSize(0)), - Resources(cpus=0.1, ram=ByteSize(0)), - True, - ), - ( - Resources(cpus=0.1, ram=ByteSize(0)), - Resources(cpus=0.1, ram=ByteSize(0)), - True, - ), - ( - Resources(cpus=0.1, ram=ByteSize(1)), - Resources(cpus=0.1, ram=ByteSize(0)), - True, - ), - ( - Resources(cpus=0.05, ram=ByteSize(1)), - Resources(cpus=0.1, ram=ByteSize(0)), - False, - ), - ( - Resources(cpus=0.1, ram=ByteSize(0)), - Resources(cpus=0.1, ram=ByteSize(1)), - False, - ), - ], -) -def test_resources_ge_operator( - a: Resources, b: Resources, a_greater_or_equal_than_b: bool -): - assert (a >= b) is a_greater_or_equal_than_b - - -@pytest.mark.parametrize( - "a,b,result", - [ - ( - Resources(cpus=0, ram=ByteSize(0)), - Resources(cpus=1, ram=ByteSize(34)), - Resources(cpus=1, ram=ByteSize(34)), - ), - ( - Resources(cpus=0.1, ram=ByteSize(-1)), - Resources(cpus=1, ram=ByteSize(34)), - Resources(cpus=1.1, ram=ByteSize(33)), - ), - ], -) -def test_resources_add(a: Resources, b: Resources, result: Resources): - assert a + b == result - a += b - assert a == result - - -@pytest.mark.parametrize( - "a,b,result", - [ - ( - Resources(cpus=0, ram=ByteSize(0)), - Resources(cpus=1, ram=ByteSize(34)), - Resources.construct(cpus=-1, ram=ByteSize(-34)), - ), - ( - Resources(cpus=0.1, ram=ByteSize(-1)), - Resources(cpus=1, ram=ByteSize(34)), - Resources.construct(cpus=-0.9, ram=ByteSize(-35)), - ), - ], -) -def test_resources_sub(a: Resources, b: Resources, result: Resources): - assert a - b == result - a -= b - assert a == result +from pydantic import TypeAdapter, ValidationError async def test_get_simcore_service_docker_labels_from_task_with_missing_labels_raises( @@ -97,38 +19,40 @@ async def test_get_simcore_service_docker_labels_from_task_with_missing_labels_r task_template: dict[str, Any], ): service_missing_osparc_labels = await create_service(task_template, {}, "running") - assert service_missing_osparc_labels.Spec - service_tasks = parse_obj_as( - list[Task], + assert service_missing_osparc_labels.spec + service_tasks = TypeAdapter(list[Task]).validate_python( await async_docker_client.tasks.list( - filters={"service": service_missing_osparc_labels.Spec.Name} - ), + filters={"service": service_missing_osparc_labels.spec.name} + ) ) assert service_tasks assert len(service_tasks) == 1 with pytest.raises(ValidationError): - SimcoreServiceDockerLabelKeys.from_docker_task(service_tasks[0]) + StandardSimcoreDockerLabels.from_docker_task(service_tasks[0]) async def test_get_simcore_service_docker_labels( async_docker_client: aiodocker.Docker, - create_service: Callable[[dict[str, Any], dict[str, str], str], Awaitable[Service]], + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] + ], task_template: dict[str, Any], - osparc_docker_label_keys: SimcoreServiceDockerLabelKeys, + osparc_docker_label_keys: StandardSimcoreDockerLabels, ): service_with_labels = await create_service( - task_template, osparc_docker_label_keys.to_docker_labels(), "running" + task_template, + osparc_docker_label_keys.to_simcore_runtime_docker_labels(), + "running", ) - assert service_with_labels.Spec - service_tasks = parse_obj_as( - list[Task], + assert service_with_labels.spec + service_tasks = TypeAdapter(list[Task]).validate_python( await async_docker_client.tasks.list( - filters={"service": service_with_labels.Spec.Name} - ), + filters={"service": service_with_labels.spec.name} + ) ) assert service_tasks assert len(service_tasks) == 1 - task_ownership = SimcoreServiceDockerLabelKeys.from_docker_task(service_tasks[0]) + task_ownership = StandardSimcoreDockerLabels.from_docker_task(service_tasks[0]) assert task_ownership assert task_ownership.user_id == osparc_docker_label_keys.user_id assert task_ownership.project_id == osparc_docker_label_keys.project_id diff --git a/services/autoscaling/tests/unit/test_modules_auto_scaling_computational.py b/services/autoscaling/tests/unit/test_modules_auto_scaling_computational.py new file mode 100644 index 00000000000..8a9f82ec847 --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_auto_scaling_computational.py @@ -0,0 +1,1707 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=too-many-statements +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import datetime +import logging +from collections import defaultdict +from collections.abc import Awaitable, Callable, Iterator +from copy import deepcopy +from dataclasses import dataclass +from typing import Any, Final, cast +from unittest import mock + +import arrow +import distributed +import pytest +from aws_library.ec2 import Resources +from dask_task_models_library.resource_constraints import ( + create_ec2_resource_constraint_key, +) +from faker import Faker +from fastapi import FastAPI +from models_library.docker import DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY +from models_library.generated_models.docker_rest_api import Availability +from models_library.generated_models.docker_rest_api import Node as DockerNode +from models_library.generated_models.docker_rest_api import NodeState, NodeStatus +from models_library.rabbitmq_messages import RabbitAutoscalingStatusMessage +from pydantic import ByteSize, TypeAdapter +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.autoscaling import ( + assert_cluster_state, + create_fake_association, +) +from pytest_simcore.helpers.aws_ec2 import assert_autoscaled_computational_ec2_instances +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from simcore_service_autoscaling.core.settings import ApplicationSettings +from simcore_service_autoscaling.models import EC2InstanceData +from simcore_service_autoscaling.modules.auto_scaling_core import auto_scale_cluster +from simcore_service_autoscaling.modules.auto_scaling_mode_computational import ( + ComputationalAutoscaling, +) +from simcore_service_autoscaling.modules.dask import DaskTaskResources +from simcore_service_autoscaling.modules.docker import get_docker_client +from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY, + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY, + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, +) +from types_aiobotocore_ec2.client import EC2Client +from types_aiobotocore_ec2.literals import InstanceTypeType + + +@pytest.fixture +def local_dask_scheduler_server_envs( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + dask_spec_local_cluster: distributed.SpecCluster, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "DASK_MONITORING_URL": dask_spec_local_cluster.scheduler_address, + }, + ) + + +@pytest.fixture +def minimal_configuration( + with_labelize_drain_nodes: EnvVarsDict, + app_with_docker_join_drained: EnvVarsDict, + docker_swarm: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + enabled_computational_mode: EnvVarsDict, + local_dask_scheduler_server_envs: EnvVarsDict, + mocked_ec2_instances_envs: EnvVarsDict, + disabled_rabbitmq: None, + disable_autoscaling_background_task: None, + disable_buffers_pool_background_task: None, + mocked_redis_server: None, +) -> None: ... + + +@pytest.fixture +def dask_workers_config() -> dict[str, Any]: + # NOTE: we override here the config to get a "weak" cluster + return { + "weak-worker": { + "cls": distributed.Worker, + "options": {"nthreads": 2, "resources": {"CPU": 2, "RAM": 2e9}}, + } + } + + +def _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message: mock.Mock, + app_settings: ApplicationSettings, + app: FastAPI, + scheduler_address: str, + **message_update_kwargs, +): + default_message = RabbitAutoscalingStatusMessage( + origin=f"computational:scheduler_url={scheduler_address}", + nodes_total=0, + nodes_active=0, + nodes_drained=0, + cluster_total_resources=Resources.create_as_empty().model_dump(), + cluster_used_resources=Resources.create_as_empty().model_dump(), + instances_pending=0, + instances_running=0, + ) + expected_message = default_message.model_copy(update=message_update_kwargs) + mock_rabbitmq_post_message.assert_called_once_with( + app, + expected_message, + ) + + +@pytest.fixture +def mock_docker_find_node_with_name_returns_fake_node( + mocker: MockerFixture, fake_node: DockerNode +) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.find_node_with_name", + autospec=True, + return_value=fake_node, + ) + + +@pytest.fixture +def mock_docker_compute_node_used_resources(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.compute_node_used_resources", + autospec=True, + return_value=Resources.create_as_empty(), + ) + + +@pytest.fixture +def mock_rabbitmq_post_message(mocker: MockerFixture) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.utils.rabbitmq.post_message", autospec=True + ) + + +@pytest.fixture +def mock_terminate_instances(mocker: MockerFixture) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.ec2.SimcoreEC2API.terminate_instances", + autospec=True, + ) + + +@pytest.fixture +def mock_launch_instances( + mocker: MockerFixture, + aws_instance_private_dns: str, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.ec2.SimcoreEC2API.launch_instances", + autospec=True, + return_value=fake_ec2_instance_data(aws_private_dns=aws_instance_private_dns), + ) + + +@pytest.fixture +def ec2_instance_custom_tags( + ec2_instance_custom_tags: dict[str, str], + faker: Faker, +) -> dict[str, str]: + # NOTE: we override here the config as the autoscaling in computational case is started with more custom tags + return { + **ec2_instance_custom_tags, + "user_id": faker.word(), + "wallet_id": faker.word(), + } + + +@pytest.fixture +def create_dask_task_resources() -> ( + Callable[[InstanceTypeType | None, Resources], DaskTaskResources] +): + def _do( + ec2_instance_type: InstanceTypeType | None, task_resource: Resources + ) -> DaskTaskResources: + resources = _dask_task_resources_from_resources(task_resource) + if ec2_instance_type is not None: + resources[create_ec2_resource_constraint_key(ec2_instance_type)] = 1 + return resources + + return _do + + +@pytest.fixture +def mock_dask_get_worker_has_results_in_memory(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.dask.get_worker_still_has_results_in_memory", + return_value=0, + autospec=True, + ) + + +@pytest.fixture +def mock_dask_get_worker_used_resources(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.dask.get_worker_used_resources", + return_value=Resources.create_as_empty(), + autospec=True, + ) + + +@pytest.fixture +def mock_dask_is_worker_connected(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.dask.is_worker_connected", + return_value=True, + autospec=True, + ) + + +async def _create_task_with_resources( + ec2_client: EC2Client, + dask_task_imposed_ec2_type: InstanceTypeType | None, + task_resources: Resources | None, + create_dask_task_resources: Callable[ + [InstanceTypeType | None, Resources], DaskTaskResources + ], + create_dask_task: Callable[[DaskTaskResources], distributed.Future], +) -> distributed.Future: + if dask_task_imposed_ec2_type and not task_resources: + instance_types = await ec2_client.describe_instance_types( + InstanceTypes=[dask_task_imposed_ec2_type] + ) + assert instance_types + assert "InstanceTypes" in instance_types + assert instance_types["InstanceTypes"] + assert "MemoryInfo" in instance_types["InstanceTypes"][0] + assert "SizeInMiB" in instance_types["InstanceTypes"][0]["MemoryInfo"] + task_resources = Resources( + cpus=1, + ram=TypeAdapter(ByteSize).validate_python( + f"{instance_types['InstanceTypes'][0]['MemoryInfo']['SizeInMiB']}MiB", + ), + ) + + assert task_resources + dask_task_resources = create_dask_task_resources( + dask_task_imposed_ec2_type, task_resources + ) + dask_future = create_dask_task(dask_task_resources) + assert dask_future + return dask_future + + +@dataclass(kw_only=True) +class _ScaleUpParams: + imposed_instance_type: InstanceTypeType | None + task_resources: Resources | None + num_tasks: int + expected_instance_type: InstanceTypeType + expected_num_instances: int + + +_RESOURCE_TO_DASK_RESOURCE_MAP: Final[dict[str, str]] = {"CPUS": "CPU", "RAM": "RAM"} + + +def _dask_task_resources_from_resources(resources: Resources) -> DaskTaskResources: + return { + _RESOURCE_TO_DASK_RESOURCE_MAP[res_key.upper()]: res_value + for res_key, res_value in resources.model_dump().items() + } + + +@pytest.fixture +async def create_tasks_batch( + ec2_client: EC2Client, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], + create_dask_task_resources: Callable[ + [InstanceTypeType | None, Resources], DaskTaskResources + ], +) -> Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]]: + async def _(scale_up_params: _ScaleUpParams) -> list[distributed.Future]: + return await asyncio.gather( + *( + _create_task_with_resources( + ec2_client, + scale_up_params.imposed_instance_type, + scale_up_params.task_resources, + create_dask_task_resources, + create_dask_task, + ) + for _ in range(scale_up_params.num_tasks) + ) + ) + + return _ + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_with_no_tasks_does_nothing( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + mock_launch_instances: mock.Mock, + mock_terminate_instances: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + dask_spec_local_cluster: distributed.SpecCluster, +): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mock_launch_instances.assert_not_called() + mock_terminate_instances.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + ) + + +@pytest.mark.acceptance_test( + "Ensure this does not happen https://github.com/ITISFoundation/osparc-simcore/issues/6227" +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_with_disabled_ssm_does_not_block_autoscaling( + minimal_configuration: None, + disabled_ssm: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + mock_launch_instances: mock.Mock, + mock_terminate_instances: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + dask_spec_local_cluster: distributed.SpecCluster, +): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mock_launch_instances.assert_not_called() + mock_terminate_instances.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_with_task_with_too_much_resources_starts_nothing( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], + mock_launch_instances: mock.Mock, + mock_terminate_instances: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + dask_spec_local_cluster: distributed.SpecCluster, +): + # create a task that needs too much power + dask_future = create_dask_task( + {"RAM": int(TypeAdapter(ByteSize).validate_python("12800GiB"))} + ) + assert dask_future + + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mock_launch_instances.assert_not_called() + mock_terminate_instances.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + ) + + +@pytest.mark.acceptance_test +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + task_resources=Resources( + cpus=1, ram=TypeAdapter(ByteSize).validate_python("128Gib") + ), + num_tasks=1, + expected_instance_type="r5n.4xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="g4dn.2xlarge", + task_resources=None, + num_tasks=1, + expected_instance_type="g4dn.2xlarge", + expected_num_instances=1, + ), + id="Explicitely ask for g4dn.2xlarge and use all the resources", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="r5n.8xlarge", + task_resources=Resources( + cpus=1, ram=TypeAdapter(ByteSize).validate_python("116Gib") + ), + num_tasks=1, + expected_instance_type="r5n.8xlarge", + expected_num_instances=1, + ), + id="Explicitely ask for r5n.8xlarge and set the resources", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="r5n.8xlarge", + task_resources=None, + num_tasks=1, + expected_instance_type="r5n.8xlarge", + expected_num_instances=1, + ), + id="Explicitely ask for r5n.8xlarge and use all the resources", + ), + ], +) +async def test_cluster_scaling_up_and_down( # noqa: PLR0915 + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_tasks_batch: Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + fake_node: DockerNode, + mock_rabbitmq_post_message: mock.Mock, + mock_docker_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_docker_compute_node_used_resources: mock.Mock, + mock_dask_get_worker_has_results_in_memory: mock.Mock, + mock_dask_get_worker_used_resources: mock.Mock, + mock_dask_is_worker_connected: mock.Mock, + mocker: MockerFixture, + dask_spec_local_cluster: distributed.SpecCluster, + with_drain_nodes_labelled: bool, + ec2_instance_custom_tags: dict[str, str], + scale_up_params: _ScaleUpParams, +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # create a task that needs more power + dask_futures = await create_tasks_batch(scale_up_params) + assert dask_futures + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # check the instance was started and we have exactly 1 + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_docker_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + mock_docker_compute_node_used_resources.assert_not_called() + mock_dask_get_worker_has_results_in_memory.assert_not_called() + mock_dask_get_worker_used_resources.assert_not_called() + mock_dask_is_worker_connected.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_running=0, + instances_pending=1, + ) + mock_rabbitmq_post_message.reset_mock() + + # 2. running this again should not scale again, but tag the node and make it available + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mock_dask_get_worker_has_results_in_memory.assert_called_once() + mock_dask_get_worker_has_results_in_memory.reset_mock() + mock_dask_get_worker_used_resources.assert_called_once() + mock_dask_get_worker_used_resources.reset_mock() + mock_dask_is_worker_connected.assert_not_called() + instances = await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + assert len(instances) == 1 + assert "PrivateDnsName" in instances[0] + internal_dns_name = instances[0]["PrivateDnsName"].removesuffix(".ec2.internal") + + # the node is attached first and then tagged and made active right away since we still have the pending task + mock_docker_find_node_with_name_returns_fake_node.assert_called_once() + mock_docker_find_node_with_name_returns_fake_node.reset_mock() + expected_docker_node_tags = { + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY: scale_up_params.expected_instance_type + } + assert mock_docker_tag_node.call_count == 3 + assert fake_node.spec + assert fake_node.spec.labels + fake_attached_node = deepcopy(fake_node) + assert fake_attached_node.spec + fake_attached_node.spec.availability = ( + Availability.active if with_drain_nodes_labelled else Availability.drain + ) + assert fake_attached_node.spec.labels + fake_attached_node.spec.labels |= expected_docker_node_tags | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + } + # check attach call + assert mock_docker_tag_node.call_args_list[0] == mock.call( + get_docker_client(initialized_app), + fake_node, + tags=fake_node.spec.labels + | expected_docker_node_tags + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=with_drain_nodes_labelled, + ) + # update our fake node + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + mock_docker_tag_node.call_args_list[0][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + # check the activate time is later than attach time + assert arrow.get( + mock_docker_tag_node.call_args_list[1][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) > arrow.get( + mock_docker_tag_node.call_args_list[0][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + + # check activate call + assert mock_docker_tag_node.call_args_list[1] == mock.call( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_node.spec.labels + | expected_docker_node_tags + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "true", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=True, + ) + # update our fake node + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + mock_docker_tag_node.call_args_list[1][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + mock_docker_tag_node.reset_mock() + mock_docker_set_node_availability.assert_not_called() + mock_rabbitmq_post_message.assert_called_once() + mock_rabbitmq_post_message.reset_mock() + + # now we have 1 monitored node that needs to be mocked + fake_attached_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" + fake_attached_node.status = NodeStatus( + state=NodeState.ready, message=None, addr=None + ) + fake_attached_node.spec.availability = Availability.active + assert fake_attached_node.description + fake_attached_node.description.hostname = internal_dns_name + + auto_scaling_mode = ComputationalAutoscaling() + mocker.patch.object( + auto_scaling_mode, + "get_monitored_nodes", + autospec=True, + return_value=[fake_attached_node], + ) + + # 3. calling this multiple times should do nothing + num_useless_calls = 10 + for _ in range(num_useless_calls): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=auto_scaling_mode + ) + mock_dask_is_worker_connected.assert_called() + assert mock_dask_is_worker_connected.call_count == num_useless_calls + mock_dask_is_worker_connected.reset_mock() + mock_dask_get_worker_has_results_in_memory.assert_called() + assert ( + mock_dask_get_worker_has_results_in_memory.call_count == 2 * num_useless_calls + ) + mock_dask_get_worker_has_results_in_memory.reset_mock() + mock_dask_get_worker_used_resources.assert_called() + assert mock_dask_get_worker_used_resources.call_count == 2 * num_useless_calls + mock_dask_get_worker_used_resources.reset_mock() + mock_docker_find_node_with_name_returns_fake_node.assert_not_called() + assert mock_docker_tag_node.call_count == num_useless_calls + mock_docker_tag_node.reset_mock() + mock_docker_set_node_availability.assert_not_called() + # check the number of instances did not change and is still running + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # check rabbit messages were sent + mock_rabbitmq_post_message.assert_called() + assert mock_rabbitmq_post_message.call_count == num_useless_calls + mock_rabbitmq_post_message.reset_mock() + + # + # 4. now scaling down, as we deleted all the tasks + # + del dask_futures + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mock_dask_is_worker_connected.assert_called_once() + mock_dask_is_worker_connected.reset_mock() + mock_dask_get_worker_has_results_in_memory.assert_called() + assert mock_dask_get_worker_has_results_in_memory.call_count == 2 + mock_dask_get_worker_has_results_in_memory.reset_mock() + mock_dask_get_worker_used_resources.assert_called() + assert mock_dask_get_worker_used_resources.call_count == 2 + mock_dask_get_worker_used_resources.reset_mock() + # the node shall be waiting before draining + mock_docker_set_node_availability.assert_not_called() + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=True, + ) + mock_docker_tag_node.reset_mock() + + # now update the fake node to have the required label as expected + assert app_settings.AUTOSCALING_EC2_INSTANCES + fake_attached_node.spec.labels[_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY] = ( + arrow.utcnow() + .shift( + seconds=-app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING.total_seconds() + - 1 + ) + .datetime.isoformat() + ) + + # now it will drain + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mock_dask_is_worker_connected.assert_called_once() + mock_dask_is_worker_connected.reset_mock() + mock_dask_get_worker_has_results_in_memory.assert_called() + assert mock_dask_get_worker_has_results_in_memory.call_count == 2 + mock_dask_get_worker_has_results_in_memory.reset_mock() + mock_dask_get_worker_used_resources.assert_called() + assert mock_dask_get_worker_used_resources.call_count == 2 + mock_dask_get_worker_used_resources.reset_mock() + # the node shall be set to drain, but not yet terminated + mock_docker_set_node_availability.assert_not_called() + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY: mock.ANY, + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=with_drain_nodes_labelled, + ) + # check the datetime was updated + assert arrow.get( + mock_docker_tag_node.call_args_list[0][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) > arrow.get( + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] + ) + mock_docker_tag_node.reset_mock() + + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # we artifically set the node to drain + fake_attached_node.spec.availability = Availability.drain + fake_attached_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "false" + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + datetime.datetime.now(tz=datetime.UTC).isoformat() + ) + + # the node will be not be terminated before the timeout triggers + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert ( + datetime.timedelta(seconds=5) + < app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ) + mocked_docker_remove_node = mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.remove_nodes", + return_value=None, + autospec=True, + ) + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_not_called() + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # now changing the last update timepoint will trigger the node removal and shutdown the ec2 instance + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + datetime.datetime.now(tz=datetime.UTC) + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + - datetime.timedelta(seconds=1) + ).isoformat() + # first making sure the node is drained, then terminate it after a delay to let it drain + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_not_called() + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY: mock.ANY, + }, + available=False, + ) + mock_docker_tag_node.reset_mock() + # set the fake node to drain + fake_attached_node.spec.availability = Availability.drain + fake_attached_node.spec.labels[_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY] = ( + arrow.utcnow() + .shift( + seconds=-app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_FINAL_TERMINATION.total_seconds() + - 1 + ) + .datetime.isoformat() + ) + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_called_once_with( + mock.ANY, nodes=[fake_attached_node], force=True + ) + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="terminated", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # this call should never be used in computational mode + mock_docker_compute_node_used_resources.assert_not_called() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_does_not_scale_up_if_defined_instance_is_not_allowed( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], + create_dask_task_resources: Callable[ + [InstanceTypeType | None, Resources], DaskTaskResources + ], + ec2_client: EC2Client, + faker: Faker, + caplog: pytest.LogCaptureFixture, +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # create a task that needs more power + dask_task_resources = create_dask_task_resources( + cast(InstanceTypeType, faker.pystr()), + Resources(cpus=1, ram=TypeAdapter(ByteSize).validate_python("128GiB")), + ) + dask_future = create_dask_task(dask_task_resources) + assert dask_future + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # nothing runs + assert not all_instances["Reservations"] + # check there is an error in the logs + error_messages = [ + x.message for x in caplog.get_records("call") if x.levelno == logging.ERROR + ] + assert len(error_messages) == 1 + assert "Unexpected error:" in error_messages[0] + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_does_not_scale_up_if_defined_instance_is_not_fitting_resources( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], + create_dask_task_resources: Callable[ + [InstanceTypeType | None, Resources], DaskTaskResources + ], + ec2_client: EC2Client, + faker: Faker, + caplog: pytest.LogCaptureFixture, +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # create a task that needs more power + dask_task_resources = create_dask_task_resources( + "t2.xlarge", + Resources(cpus=1, ram=TypeAdapter(ByteSize).validate_python("128GiB")), + ) + dask_future = create_dask_task(dask_task_resources) + assert dask_future + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # nothing runs + assert not all_instances["Reservations"] + # check there is an error in the logs + error_messages = [ + x.message for x in caplog.get_records("call") if x.levelno == logging.ERROR + ] + assert len(error_messages) == 1 + assert "Unexpected error:" in error_messages[0] + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + task_resources=Resources( + cpus=5, ram=TypeAdapter(ByteSize).validate_python("36Gib") + ), + num_tasks=10, + expected_instance_type="r5n.4xlarge", # 32 cpus, 128Gib + expected_num_instances=4, + ), + id="isolve", + ) + ], +) +async def test_cluster_scaling_up_starts_multiple_instances( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_tasks_batch: Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + scale_up_params: _ScaleUpParams, + mock_rabbitmq_post_message: mock.Mock, + mock_docker_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + dask_spec_local_cluster: distributed.SpecCluster, + ec2_instance_custom_tags: dict[str, str], +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # create several tasks that needs more power + dask_futures = await create_tasks_batch(scale_up_params) + assert dask_futures + + # run the code + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # check the instances were started + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_docker_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type="r5n.8xlarge", + task_resources=None, + num_tasks=1, + expected_instance_type="r5n.8xlarge", + expected_num_instances=1, + ), + id="Impose r5n.8xlarge without resources", + ), + ], +) +async def test_cluster_scaling_up_more_than_allowed_max_starts_max_instances_and_not_more( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_tasks_batch: Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]], + ec2_client: EC2Client, + dask_spec_local_cluster: distributed.SpecCluster, + mock_docker_tag_node: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + mock_docker_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_docker_compute_node_used_resources: mock.Mock, + mock_dask_get_worker_has_results_in_memory: mock.Mock, + mock_dask_get_worker_used_resources: mock.Mock, + ec2_instance_custom_tags: dict[str, str], + scale_up_params: _ScaleUpParams, +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES > 0 + # override the number of tasks + scale_up_params.num_tasks = ( + 3 * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + scale_up_params.expected_num_instances = ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + + # create the tasks + task_futures = await create_tasks_batch(scale_up_params) + assert all(task_futures) + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_docker_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + mock_docker_compute_node_used_resources.assert_not_called() + mock_dask_get_worker_has_results_in_memory.assert_not_called() + mock_dask_get_worker_used_resources.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_running=0, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + # 2. calling this multiple times should do nothing + num_useless_calls = 10 + for _ in range(num_useless_calls): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_up_more_than_allowed_with_multiple_types_max_starts_max_instances_and_not_more( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], + ec2_client: EC2Client, + dask_spec_local_cluster: distributed.SpecCluster, + create_dask_task_resources: Callable[ + [InstanceTypeType | None, Resources], DaskTaskResources + ], + mock_docker_tag_node: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + mock_docker_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_docker_compute_node_used_resources: mock.Mock, + mock_dask_get_worker_has_results_in_memory: mock.Mock, + mock_dask_get_worker_used_resources: mock.Mock, + aws_allowed_ec2_instance_type_names: list[InstanceTypeType], +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES > 0 + num_tasks = 3 * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + + # create the tasks + task_futures = await asyncio.gather( + *( + _create_task_with_resources( + ec2_client, + ec2_instance_type, + None, + create_dask_task_resources, + create_dask_task, + ) + for ec2_instance_type in aws_allowed_ec2_instance_type_names + for _ in range(num_tasks) + ) + ) + assert all(task_futures) + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # one of each type is created with some that will have 2 instances + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == len( + aws_allowed_ec2_instance_type_names + ) + instances_found = defaultdict(int) + for reservation in all_instances["Reservations"]: + assert "Instances" in reservation + for instance in reservation["Instances"]: + assert "InstanceType" in instance + instance_type = instance["InstanceType"] + instances_found[instance_type] += 1 + + assert sorted(instances_found.keys()) == sorted(aws_allowed_ec2_instance_type_names) + assert ( + sum(instances_found.values()) + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_docker_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + mock_docker_compute_node_used_resources.assert_not_called() + mock_dask_get_worker_has_results_in_memory.assert_not_called() + mock_dask_get_worker_used_resources.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_running=0, + instances_pending=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + mock_rabbitmq_post_message.reset_mock() + + # 2. calling this multiple times should do nothing + num_useless_calls = 10 + for _ in range(num_useless_calls): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == len( + aws_allowed_ec2_instance_type_names + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + task_resources=Resources( + cpus=1, ram=TypeAdapter(ByteSize).validate_python("128Gib") + ), + num_tasks=1, + expected_instance_type="r5n.4xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + ], +) +async def test_long_pending_ec2_is_detected_as_broken_terminated_and_restarted( + with_short_ec2_instances_max_start_time: EnvVarsDict, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_tasks_batch: Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]], + ec2_client: EC2Client, + dask_spec_local_cluster: distributed.SpecCluster, + mock_find_node_with_name_returns_none: mock.Mock, + mock_docker_tag_node: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + short_ec2_instance_max_start_time: datetime.timedelta, + ec2_instance_custom_tags: dict[str, str], + scale_up_params: _ScaleUpParams, +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert ( + short_ec2_instance_max_start_time + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + # create a task that needs more power + dask_futures = await create_tasks_batch(scale_up_params) + assert dask_futures + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + + # check the instance was started and we have exactly 1 + instances = await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_find_node_with_name_returns_none.assert_not_called() + mock_docker_tag_node.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_running=0, + instances_pending=1, + ) + mock_rabbitmq_post_message.reset_mock() + + assert instances + assert "LaunchTime" in instances[0] + original_instance_launch_time: datetime.datetime = deepcopy( + instances[0]["LaunchTime"] + ) + await asyncio.sleep(1) # NOTE: we wait here since AWS does not keep microseconds + now = arrow.utcnow().datetime + + assert now > original_instance_launch_time + assert now < ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + + # 2. running again several times the autoscaler, the node does not join + for i in range(7): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + # there should be no scaling up, since there is already a pending instance + instances = await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + assert mock_find_node_with_name_returns_none.call_count == i + 1 + mock_docker_tag_node.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + dask_spec_local_cluster.scheduler_address, + instances_running=0, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + assert instances + assert "LaunchTime" in instances[0] + assert instances[0]["LaunchTime"] == original_instance_launch_time + + # 3. wait for the instance max start time and try again, shall terminate the instance + now = arrow.utcnow().datetime + sleep_time = ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + - now + ).total_seconds() + 1 + print( + f"--> waiting now for {sleep_time}s for the pending EC2 to be deemed as unworthy" + ) + await asyncio.sleep(sleep_time) + now = arrow.utcnow().datetime + assert now > ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + # scaling now will terminate the broken ec2 that did not connect, and directly create a replacement + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + # we have therefore 2 reservations, first instance is terminated and a second one started + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == 2 + assert "Instances" in all_instances["Reservations"][0] + assert ( + len(all_instances["Reservations"][0]["Instances"]) + == scale_up_params.expected_num_instances + ) + assert "State" in all_instances["Reservations"][0]["Instances"][0] + assert "Name" in all_instances["Reservations"][0]["Instances"][0]["State"] + assert ( + all_instances["Reservations"][0]["Instances"][0]["State"]["Name"] + == "terminated" + ) + + assert "Instances" in all_instances["Reservations"][1] + assert ( + len(all_instances["Reservations"][1]["Instances"]) + == scale_up_params.expected_num_instances + ) + assert "State" in all_instances["Reservations"][1]["Instances"][0] + assert "Name" in all_instances["Reservations"][1]["Instances"][0]["State"] + assert ( + all_instances["Reservations"][1]["Instances"][0]["State"]["Name"] == "running" + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["with_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["without_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params1, scale_up_params2", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type="g4dn.2xlarge", # 1 GPU, 8 CPUs, 32GiB + task_resources=Resources( + cpus=8, ram=TypeAdapter(ByteSize).validate_python("15Gib") + ), + num_tasks=12, + expected_instance_type="g4dn.2xlarge", # 1 GPU, 8 CPUs, 32GiB + expected_num_instances=10, + ), + _ScaleUpParams( + imposed_instance_type="g4dn.8xlarge", # 32CPUs, 128GiB + task_resources=Resources( + cpus=32, ram=TypeAdapter(ByteSize).validate_python("20480MB") + ), + num_tasks=7, + expected_instance_type="g4dn.8xlarge", # 32CPUs, 128GiB + expected_num_instances=7, + ), + id="A batch of services requiring g4dn.2xlarge and a batch requiring g4dn.8xlarge", + ), + ], +) +async def test_cluster_adapts_machines_on_the_fly( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + ec2_client: EC2Client, + initialized_app: FastAPI, + app_settings: ApplicationSettings, + create_tasks_batch: Callable[[_ScaleUpParams], Awaitable[list[distributed.Future]]], + ec2_instance_custom_tags: dict[str, str], + scale_up_params1: _ScaleUpParams, + scale_up_params2: _ScaleUpParams, + mocked_associate_ec2_instances_with_nodes: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_dask_is_worker_connected: mock.Mock, + create_fake_node: Callable[..., DockerNode], + mock_docker_tag_node: mock.Mock, + spied_cluster_analysis: MockType, + mocker: MockerFixture, +): + # pre-requisites + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES > 0 + assert ( + scale_up_params1.num_tasks + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ), "this test requires to run a first batch of more services than the maximum number of instances allowed" + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # + # 1. create the first batch of services requiring the initial machines + first_batch_tasks = await create_tasks_batch(scale_up_params1) + assert first_batch_tasks + + # it will only scale once and do nothing else + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params1.expected_num_instances, + expected_instance_type=scale_up_params1.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + + assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=0, + ) + mocked_associate_ec2_instances_with_nodes.assert_called_once_with([], []) + mocked_associate_ec2_instances_with_nodes.reset_mock() + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, None, None + ) + mock_docker_tag_node.assert_not_called() + mock_dask_is_worker_connected.assert_not_called() + + # + # 2. now the machines are associated + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + mocked_associate_ec2_instances_with_nodes.assert_called_once() + mock_docker_tag_node.assert_called() + assert ( + mock_docker_tag_node.call_count + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + assert analyzed_cluster.active_nodes + + # + # 3. now we start the second batch of services requiring a different type of machines + second_batch_tasks = await create_tasks_batch(scale_up_params2) + assert second_batch_tasks + + # scaling will do nothing since we have hit the maximum number of machines + for _ in range(3): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + await assert_autoscaled_computational_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params1.expected_num_instances, + expected_instance_type=scale_up_params1.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=3, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + + # + # 4.now we simulate that some of the services in the 1st batch have completed and that we are 1 below the max + # a machine should switch off and another type should be started (just pop the future out of scope) + for _ in range( + scale_up_params1.num_tasks + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + + 1 + ): + first_batch_tasks.pop() + + # first call to auto_scale_cluster will mark 1 node as empty + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.set_node_found_empty", + autospec=True, + ) as mock_docker_set_node_found_empty: + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + # the last machine is found empty + mock_docker_set_node_found_empty.assert_called_with( + mock.ANY, + analyzed_cluster.active_nodes[-1].node, + empty=True, + ) + + # now we mock the get_node_found_empty so the next call will actually drain the machine + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_empty_since", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING, + ) as mocked_get_node_empty_since: + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mocked_get_node_empty_since.assert_called_once() + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + # now scaling again should find the drained machine + drained_machine_instance_id = analyzed_cluster.active_nodes[-1].ec2_instance.id + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, drained_machine_instance_id, None + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert analyzed_cluster.drained_nodes + + # this will initiate termination now + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_last_readyness_update", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION, + ): + mock_docker_tag_node.reset_mock() + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + mock_docker_tag_node.assert_called_with( + mock.ANY, + analyzed_cluster.drained_nodes[-1].node, + tags=mock.ANY, + available=False, + ) + + # scaling again should find the terminating machine + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, drained_machine_instance_id, drained_machine_instance_id + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + assert analyzed_cluster.terminating_nodes + + # now this will terminate it and straight away start a new machine type + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_termination_started_since", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION, + ): + mocked_docker_remove_node = mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.remove_nodes", + return_value=None, + autospec=True, + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=ComputationalAutoscaling() + ) + mocked_docker_remove_node.assert_called_once() + + # now let's check what we have + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == 2, "there should be 2 Reservations" + reservation1 = all_instances["Reservations"][0] + assert "Instances" in reservation1 + assert len(reservation1["Instances"]) == ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ), f"expected {app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES} EC2 instances, found {len(reservation1['Instances'])}" + for instance in reservation1["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == scale_up_params1.expected_instance_type + assert "InstanceId" in instance + assert "State" in instance + assert "Name" in instance["State"] + if instance["InstanceId"] == drained_machine_instance_id: + assert instance["State"]["Name"] == "terminated" + else: + assert instance["State"]["Name"] == "running" + + reservation2 = all_instances["Reservations"][1] + assert "Instances" in reservation2 + assert ( + len(reservation2["Instances"]) == 1 + ), f"expected 1 EC2 instances, found {len(reservation2['Instances'])}" + for instance in reservation2["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == scale_up_params2.expected_instance_type diff --git a/services/autoscaling/tests/unit/test_modules_auto_scaling_dynamic.py b/services/autoscaling/tests/unit/test_modules_auto_scaling_dynamic.py new file mode 100644 index 00000000000..a46a75c8006 --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_auto_scaling_dynamic.py @@ -0,0 +1,2335 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=too-many-statements +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +import datetime +import logging +import random +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Sequence +from copy import deepcopy +from dataclasses import dataclass +from typing import Any, cast +from unittest import mock + +import aiodocker +import arrow +import pytest +import tenacity +from aws_library.ec2 import EC2InstanceBootSpecific, EC2InstanceData, Resources +from fastapi import FastAPI +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + DockerGenericTag, + DockerLabelKey, + StandardSimcoreDockerLabels, +) +from models_library.generated_models.docker_rest_api import ( + Availability, + Node, + NodeState, + NodeStatus, + Service, + Task, +) +from models_library.rabbitmq_messages import RabbitAutoscalingStatusMessage +from pydantic import ByteSize, TypeAdapter +from pytest_mock import MockType +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.autoscaling import ( + assert_cluster_state, + create_fake_association, +) +from pytest_simcore.helpers.aws_ec2 import ( + assert_autoscaled_dynamic_ec2_instances, + assert_autoscaled_dynamic_warm_pools_ec2_instances, +) +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_autoscaling.constants import BUFFER_MACHINE_TAG_KEY +from simcore_service_autoscaling.core.settings import ApplicationSettings +from simcore_service_autoscaling.models import AssociatedInstance, Cluster +from simcore_service_autoscaling.modules.auto_scaling_core import ( + _activate_drained_nodes, + _find_terminateable_instances, + _try_scale_down_cluster, + auto_scale_cluster, +) +from simcore_service_autoscaling.modules.auto_scaling_mode_dynamic import ( + DynamicAutoscaling, +) +from simcore_service_autoscaling.modules.docker import ( + AutoscalingDocker, + get_docker_client, +) +from simcore_service_autoscaling.utils.auto_scaling_core import ( + node_host_name_from_ec2_private_dns, +) +from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY, + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY, + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, +) +from types_aiobotocore_ec2.client import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.type_defs import FilterTypeDef, InstanceTypeDef + + +@pytest.fixture +def mock_terminate_instances(mocker: MockerFixture) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.ec2.SimcoreEC2API.terminate_instances", + autospec=True, + ) + + +@pytest.fixture +def mock_launch_instances( + mocker: MockerFixture, + aws_instance_private_dns: str, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.ec2.SimcoreEC2API.launch_instances", + autospec=True, + return_value=fake_ec2_instance_data(aws_private_dns=aws_instance_private_dns), + ) + + +@pytest.fixture +def mock_rabbitmq_post_message(mocker: MockerFixture) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.utils.rabbitmq.post_message", autospec=True + ) + + +@pytest.fixture +def mock_find_node_with_name_returns_fake_node( + mocker: MockerFixture, fake_node: Node +) -> Iterator[mock.Mock]: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.find_node_with_name", + autospec=True, + return_value=fake_node, + ) + + +@pytest.fixture +def mock_remove_nodes(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.remove_nodes", + autospec=True, + ) + + +@pytest.fixture +def mock_compute_node_used_resources(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.compute_node_used_resources", + autospec=True, + return_value=Resources.create_as_empty(), + ) + + +@pytest.fixture +def with_valid_time_before_termination( + monkeypatch: pytest.MonkeyPatch, +) -> datetime.timedelta: + time = "00:11:00" + monkeypatch.setenv("EC2_INSTANCES_TIME_BEFORE_TERMINATION", time) + return TypeAdapter(datetime.timedelta).validate_python(time) + + +@pytest.fixture +async def drained_host_node( + host_node: Node, async_docker_client: aiodocker.Docker +) -> AsyncIterator[Node]: + assert host_node.id + assert host_node.version + assert host_node.version.index + assert host_node.spec + assert host_node.spec.availability + assert host_node.spec.role + + old_availability = host_node.spec.availability + await async_docker_client.nodes.update( + node_id=host_node.id, + version=host_node.version.index, + spec={ + "Availability": "drain", + "Labels": host_node.spec.labels, + "Role": host_node.spec.role.value, + }, + ) + drained_node = TypeAdapter(Node).validate_python( + await async_docker_client.nodes.inspect(node_id=host_node.id) + ) + yield drained_node + # revert + # NOTE: getting the node again as the version might have changed + drained_node = TypeAdapter(Node).validate_python( + await async_docker_client.nodes.inspect(node_id=host_node.id) + ) + assert drained_node.id + assert drained_node.version + assert drained_node.version.index + assert drained_node.spec + assert drained_node.spec.role + await async_docker_client.nodes.update( + node_id=drained_node.id, + version=drained_node.version.index, + spec={ + "Availability": old_availability.value, + "Labels": drained_node.spec.labels, + "Role": drained_node.spec.role.value, + }, + ) + + +@pytest.fixture +def minimal_configuration( + with_labelize_drain_nodes: EnvVarsDict, + app_with_docker_join_drained: EnvVarsDict, + docker_swarm: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + enabled_dynamic_mode: EnvVarsDict, + mocked_ec2_instances_envs: EnvVarsDict, + disabled_rabbitmq: None, + disable_autoscaling_background_task: None, + disable_buffers_pool_background_task: None, + mocked_redis_server: None, +) -> None: ... + + +def _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message: mock.Mock, + app_settings: ApplicationSettings, + app: FastAPI, + **message_update_kwargs, +): + assert app_settings.AUTOSCALING_NODES_MONITORING + default_message = RabbitAutoscalingStatusMessage( + origin=f"dynamic:node_labels={app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS}", + nodes_total=0, + nodes_active=0, + nodes_drained=0, + cluster_total_resources=Resources.create_as_empty().model_dump(), + cluster_used_resources=Resources.create_as_empty().model_dump(), + instances_pending=0, + instances_running=0, + ) + expected_message = default_message.model_copy(update=message_update_kwargs) + assert mock_rabbitmq_post_message.call_args == mock.call(app, expected_message) + + +@pytest.fixture +def instance_type_filters( + ec2_instance_custom_tags: dict[str, str], +) -> Sequence[FilterTypeDef]: + return [ + *[ + FilterTypeDef( + Name="tag-key", + Values=[tag_key], + ) + for tag_key in ec2_instance_custom_tags + ], + FilterTypeDef( + Name="instance-state-name", + Values=["pending", "running"], + ), + ] + + +@pytest.fixture +def stopped_instance_type_filters( + instance_type_filters: Sequence[FilterTypeDef], +) -> Sequence[FilterTypeDef]: + copied_filters = deepcopy(instance_type_filters) + copied_filters[-1]["Values"] = ["stopped"] + return copied_filters + + +@dataclass(frozen=True) +class _ScaleUpParams: + imposed_instance_type: InstanceTypeType | None + service_resources: Resources + num_services: int + expected_instance_type: InstanceTypeType + expected_num_instances: int + + +@pytest.fixture +async def create_services_batch( + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str], str, list[str]], Awaitable[Service] + ], + task_template: dict[str, Any], + create_task_reservations: Callable[[int, int], dict[str, Any]], + service_monitored_labels: dict[DockerLabelKey, str], + osparc_docker_label_keys: StandardSimcoreDockerLabels, +) -> Callable[[_ScaleUpParams], Awaitable[list[Service]]]: + async def _(scale_up_params: _ScaleUpParams) -> list[Service]: + return await asyncio.gather( + *( + create_service( + task_template + | create_task_reservations( + int(scale_up_params.service_resources.cpus), + scale_up_params.service_resources.ram, + ), + service_monitored_labels + | osparc_docker_label_keys.to_simcore_runtime_docker_labels(), + "pending", + ( + [ + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}=={scale_up_params.imposed_instance_type}" + ] + if scale_up_params.imposed_instance_type + else [] + ), + ) + for _ in range(scale_up_params.num_services) + ) + ) + + return _ + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_with_no_services_does_nothing( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + mock_launch_instances: mock.Mock, + mock_terminate_instances: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, +): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mock_launch_instances.assert_not_called() + mock_terminate_instances.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, app_settings, initialized_app + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_cluster_scaling_with_no_services_and_machine_buffer_starts_expected_machines( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + with_instances_machines_hot_buffer: EnvVarsDict, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + aws_allowed_ec2_instance_type_names_env: list[str], + mock_rabbitmq_post_message: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_tag_node: mock.Mock, + fake_node: Node, + ec2_client: EC2Client, + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + expected_instance_type=cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + instances_pending=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + ) + mock_rabbitmq_post_message.reset_mock() + # calling again should attach the new nodes to the reserve, but nothing should start + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + expected_instance_type=cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert fake_node.description + assert fake_node.description.resources + assert fake_node.description.resources.nano_cp_us + assert fake_node.description.resources.memory_bytes + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + nodes_total=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + nodes_drained=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + instances_running=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + cluster_total_resources={ + "cpus": app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + * fake_node.description.resources.nano_cp_us + / 1e9, + "ram": app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + * fake_node.description.resources.memory_bytes, + }, + ) + + # calling it again should not create anything new + for _ in range(10): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + expected_instance_type=cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("128000Gib") + ), + num_services=1, + expected_instance_type="r5n.4xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + ], +) +async def test_cluster_scaling_with_service_asking_for_too_much_resources_starts_nothing( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + mock_launch_instances: mock.Mock, + mock_terminate_instances: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + scale_up_params: _ScaleUpParams, +): + await create_services_batch(scale_up_params) + + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mock_launch_instances.assert_not_called() + mock_terminate_instances.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, app_settings, initialized_app + ) + + +async def _test_cluster_scaling_up_and_down( # noqa: PLR0915 + *, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + fake_node: Node, + mock_rabbitmq_post_message: mock.Mock, + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mocker: MockerFixture, + async_docker_client: aiodocker.Docker, + with_drain_nodes_labelled: bool, + ec2_instance_custom_tags: dict[str, str], + scale_up_params: _ScaleUpParams, + instance_type_filters: Sequence[FilterTypeDef], + run_against_moto: bool, + spied_cluster_analysis: MockType, +): + # we have nothing running now + all_instances = await ec2_client.describe_instances(Filters=instance_type_filters) + assert not all_instances["Reservations"] + + assert ( + scale_up_params.expected_num_instances == 1 + ), "This test is not made to work with more than 1 expected instance. so please adapt if needed" + + # create the service(s) + created_docker_services = await create_services_batch(scale_up_params) + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + assert_cluster_state( + spied_cluster_analysis, expected_calls=1, expected_num_machines=0 + ) + + with log_context(logging.INFO, "wait for EC2 instances to be running") as ctx: + + @tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay(5 if run_against_moto else 120), + retry=tenacity.retry_if_exception_type(AssertionError), + reraise=True, + before_sleep=tenacity.before_sleep_log(ctx.logger, logging.INFO), + after=tenacity.after_log(ctx.logger, logging.INFO), + ) + async def _assert_wait_for_ec2_instances_running() -> list[InstanceTypeDef]: + # check the instance was started and we have exactly 1 + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + mock_compute_node_used_resources.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + instances_running=0, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + return instances + + created_instances = await _assert_wait_for_ec2_instances_running() + + # 2. running this again should not scale again, but tag the node and make it available + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + assert_cluster_state( + spied_cluster_analysis, expected_calls=1, expected_num_machines=1 + ) + + fake_attached_node = deepcopy(fake_node) + assert fake_attached_node.spec + fake_attached_node.spec.availability = ( + Availability.active if with_drain_nodes_labelled else Availability.drain + ) + assert fake_attached_node.spec.labels + assert app_settings.AUTOSCALING_NODES_MONITORING + expected_docker_node_tags = { + tag_key: "true" + for tag_key in ( + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS + + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS + ) + } | { + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY: scale_up_params.expected_instance_type + } + fake_attached_node.spec.labels |= expected_docker_node_tags | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false" + } + + # the node is tagged and made active right away since we still have the pending task + mock_find_node_with_name_returns_fake_node.assert_called_once() + mock_find_node_with_name_returns_fake_node.reset_mock() + + assert mock_docker_tag_node.call_count == 3 + assert fake_node.spec + assert fake_node.spec.labels + # check attach call + assert mock_docker_tag_node.call_args_list[0] == mock.call( + get_docker_client(initialized_app), + fake_node, + tags=fake_node.spec.labels + | expected_docker_node_tags + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=with_drain_nodes_labelled, + ) + # update our fake node + fake_attached_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + mock_docker_tag_node.call_args_list[2][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + # check the activate time is later than attach time + assert arrow.get( + mock_docker_tag_node.call_args_list[1][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) > arrow.get( + mock_docker_tag_node.call_args_list[0][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + fake_attached_node.spec.availability = Availability.active + mock_compute_node_used_resources.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + ) + mock_compute_node_used_resources.reset_mock() + # check activate call + + assert mock_docker_tag_node.call_args_list[2] == mock.call( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_node.spec.labels + | expected_docker_node_tags + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "true", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=True, + ) + # update our fake node + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + mock_docker_tag_node.call_args_list[1][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) + mock_docker_tag_node.reset_mock() + mock_docker_set_node_availability.assert_not_called() + + # check the number of instances did not change and is still running + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + assert len(instances) == scale_up_params.expected_num_instances + assert "PrivateDnsName" in instances[0] + internal_dns_name = instances[0]["PrivateDnsName"].removesuffix(".ec2.internal") + + # check rabbit messages were sent, we do have worker + assert fake_attached_node.description + assert fake_attached_node.description.resources + assert fake_attached_node.description.resources.nano_cp_us + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + nodes_total=scale_up_params.expected_num_instances, + nodes_active=scale_up_params.expected_num_instances, + cluster_total_resources={ + "cpus": fake_attached_node.description.resources.nano_cp_us / 1e9, + "ram": fake_attached_node.description.resources.memory_bytes, + }, + cluster_used_resources={ + "cpus": float(0), + "ram": 0, + }, + instances_running=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + # now we have 1 monitored node that needs to be mocked + fake_attached_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" + fake_attached_node.status = NodeStatus( + state=NodeState.ready, message=None, addr=None + ) + fake_attached_node.spec.availability = Availability.active + fake_attached_node.description.hostname = internal_dns_name + + auto_scaling_mode = DynamicAutoscaling() + mocker.patch.object( + auto_scaling_mode, + "get_monitored_nodes", + autospec=True, + return_value=[fake_attached_node], + ) + + # 3. calling this multiple times should do nothing + num_useless_calls = 10 + for _ in range(num_useless_calls): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=auto_scaling_mode + ) + mock_compute_node_used_resources.assert_called() + assert mock_compute_node_used_resources.call_count == num_useless_calls * 2 + mock_compute_node_used_resources.reset_mock() + mock_find_node_with_name_returns_fake_node.assert_not_called() + assert mock_docker_tag_node.call_count == num_useless_calls + mock_docker_tag_node.reset_mock() + mock_docker_set_node_availability.assert_not_called() + # check the number of instances did not change and is still running + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + + # check rabbit messages were sent + mock_rabbitmq_post_message.assert_called() + assert mock_rabbitmq_post_message.call_count == num_useless_calls + mock_rabbitmq_post_message.reset_mock() + + # + # 4. now scaling down by removing the docker service + # + await asyncio.gather( + *( + async_docker_client.services.delete(d.id) + for d in created_docker_services + if d.id + ) + ) + + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + # check the number of instances did not change and is still running + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + # the node shall be waiting before draining + mock_docker_set_node_availability.assert_not_called() + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=True, + ) + mock_docker_tag_node.reset_mock() + + # now update the fake node to have the required label as expected + assert app_settings.AUTOSCALING_EC2_INSTANCES + fake_attached_node.spec.labels[_OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY] = ( + arrow.utcnow() + .shift( + seconds=-app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING.total_seconds() + - 1 + ) + .datetime.isoformat() + ) + + # now it will drain + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mock_docker_set_node_availability.assert_not_called() + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=with_drain_nodes_labelled, + ) + # check the datetime was updated + assert arrow.get( + mock_docker_tag_node.call_args_list[0][1]["tags"][ + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY + ] + ) > arrow.get( + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] + ) + mock_docker_tag_node.reset_mock() + + # calling again does the exact same + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mock_docker_set_node_availability.assert_not_called() + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=with_drain_nodes_labelled, + ) + mock_docker_tag_node.reset_mock() + + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + + # we artifically set the node to drain + if not with_drain_nodes_labelled: + fake_attached_node.spec.availability = Availability.drain + fake_attached_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "false" + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + datetime.datetime.now(tz=datetime.UTC).isoformat() + ) + + # the node will not be terminated before the timeout triggers + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert ( + datetime.timedelta(seconds=5) + < app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + ) + mocked_docker_remove_node = mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.remove_nodes", + return_value=None, + autospec=True, + ) + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_not_called() + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + + # now changing the last update timepoint will trigger the node removal process + fake_attached_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + datetime.datetime.now(tz=datetime.UTC) + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION + - datetime.timedelta(seconds=1) + ).isoformat() + # first making sure the node is drained, then terminate it after a delay to let it drain + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_not_called() + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert created_instances == instances + mock_docker_tag_node.assert_called_once_with( + get_docker_client(initialized_app), + fake_attached_node, + tags=fake_attached_node.spec.labels + | { + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY: mock.ANY, + }, + available=False, + ) + mock_docker_tag_node.reset_mock() + # set the fake node to drain + fake_attached_node.spec.availability = Availability.drain + fake_attached_node.spec.labels[_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY] = ( + arrow.utcnow() + .shift( + seconds=-app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_FINAL_TERMINATION.total_seconds() + - 1 + ) + .datetime.isoformat() + ) + + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + mocked_docker_remove_node.assert_called_once_with( + mock.ANY, nodes=[fake_attached_node], force=True + ) + # we need to check for the right instance here + + with log_context(logging.INFO, "wait for EC2 instances to be terminated") as ctx: + + @tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay(5 if run_against_moto else 120), + retry=tenacity.retry_if_exception_type(AssertionError), + reraise=True, + before_sleep=tenacity.before_sleep_log(ctx.logger, logging.INFO), + after=tenacity.after_log(ctx.logger, logging.INFO), + ) + async def _assert_wait_for_ec2_instances_terminated() -> None: + assert created_instances[0] + assert "InstanceId" in created_instances[0] + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="terminated", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=[ + FilterTypeDef( + Name="instance-id", Values=[created_instances[0]["InstanceId"]] + ) + ], + ) + + await _assert_wait_for_ec2_instances_terminated() + + +@pytest.mark.acceptance_test +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("128Gib") + ), + num_services=1, + expected_instance_type="r5n.4xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="t2.xlarge", + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("4Gib") + ), + num_services=1, + expected_instance_type="t2.xlarge", + expected_num_instances=1, + ), + id="Explicitely ask for t2.xlarge", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="r5n.8xlarge", + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("128Gib") + ), + num_services=1, + expected_instance_type="r5n.8xlarge", + expected_num_instances=1, + ), + id="Explicitely ask for r5n.8xlarge", + ), + ], +) +async def test_cluster_scaling_up_and_down( + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + fake_node: Node, + mock_rabbitmq_post_message: mock.Mock, + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mocker: MockerFixture, + async_docker_client: aiodocker.Docker, + with_drain_nodes_labelled: bool, + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], + scale_up_params: _ScaleUpParams, + spied_cluster_analysis: MockType, +): + await _test_cluster_scaling_up_and_down( + app_settings=app_settings, + initialized_app=initialized_app, + create_services_batch=create_services_batch, + ec2_client=ec2_client, + mock_docker_tag_node=mock_docker_tag_node, + fake_node=fake_node, + mock_rabbitmq_post_message=mock_rabbitmq_post_message, + mock_find_node_with_name_returns_fake_node=mock_find_node_with_name_returns_fake_node, + mock_docker_set_node_availability=mock_docker_set_node_availability, + mock_compute_node_used_resources=mock_compute_node_used_resources, + mocker=mocker, + async_docker_client=async_docker_client, + with_drain_nodes_labelled=with_drain_nodes_labelled, + ec2_instance_custom_tags=ec2_instance_custom_tags, + scale_up_params=scale_up_params, + instance_type_filters=instance_type_filters, + run_against_moto=True, + spied_cluster_analysis=spied_cluster_analysis, + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("62Gib") + ), + num_services=1, + expected_instance_type="r6a.2xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + ], +) +async def test_cluster_scaling_up_and_down_against_aws( + skip_if_external_envfile_dict: None, + external_ec2_instances_allowed_types: None | dict[str, EC2InstanceBootSpecific], + with_labelize_drain_nodes: EnvVarsDict, + app_with_docker_join_drained: EnvVarsDict, + docker_swarm: None, + disabled_rabbitmq: None, + disable_autoscaling_background_task: None, + disable_buffers_pool_background_task: None, + mocked_redis_server: None, + external_envfile_dict: EnvVarsDict, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + fake_node: Node, + mock_rabbitmq_post_message: mock.Mock, + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mocker: MockerFixture, + async_docker_client: aiodocker.Docker, + with_drain_nodes_labelled: bool, + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], + scale_up_params: _ScaleUpParams, + spied_cluster_analysis: MockType, +): + # ensure we run a test that makes sense + assert external_ec2_instances_allowed_types + assert ( + scale_up_params.expected_instance_type in external_ec2_instances_allowed_types + ), ( + f"ensure the expected created instance is at least allowed: you expect {scale_up_params.expected_instance_type}" + f" The passed external ENV allows for {list(external_ec2_instances_allowed_types)}" + ) + await _test_cluster_scaling_up_and_down( + app_settings=app_settings, + initialized_app=initialized_app, + create_services_batch=create_services_batch, + ec2_client=ec2_client, + mock_docker_tag_node=mock_docker_tag_node, + fake_node=fake_node, + mock_rabbitmq_post_message=mock_rabbitmq_post_message, + mock_find_node_with_name_returns_fake_node=mock_find_node_with_name_returns_fake_node, + mock_docker_set_node_availability=mock_docker_set_node_availability, + mock_compute_node_used_resources=mock_compute_node_used_resources, + mocker=mocker, + async_docker_client=async_docker_client, + with_drain_nodes_labelled=with_drain_nodes_labelled, + ec2_instance_custom_tags=ec2_instance_custom_tags, + scale_up_params=scale_up_params, + instance_type_filters=instance_type_filters, + run_against_moto=False, + spied_cluster_analysis=spied_cluster_analysis, + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + service_resources=Resources( + cpus=5, ram=TypeAdapter(ByteSize).validate_python("36Gib") + ), + num_services=10, + expected_instance_type="r5n.4xlarge", # 1 GPU, 16 CPUs, 128GiB + expected_num_instances=4, + ), + id="sim4life-light", + ), + pytest.param( + _ScaleUpParams( + imposed_instance_type="g4dn.8xlarge", + service_resources=Resources( + cpus=5, ram=TypeAdapter(ByteSize).validate_python("20480MB") + ), + num_services=7, + expected_instance_type="g4dn.8xlarge", # 1 GPU, 32 CPUs, 128GiB + expected_num_instances=2, + ), + id="sim4life", + ), + ], +) +async def test_cluster_scaling_up_starts_multiple_instances( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_client: EC2Client, + mock_docker_tag_node: mock.Mock, + scale_up_params: _ScaleUpParams, + mock_rabbitmq_post_message: mock.Mock, + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_docker_set_node_availability: mock.Mock, + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], +): + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # create several tasks that needs more power + await create_services_batch(scale_up_params) + + # run the code + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + + # check the instances were started + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_find_node_with_name_returns_fake_node.assert_not_called() + mock_docker_tag_node.assert_not_called() + mock_docker_set_node_availability.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params1, scale_up_params2", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type="g4dn.2xlarge", # 1 GPU, 8 CPUs, 32GiB + service_resources=Resources( + cpus=8, ram=TypeAdapter(ByteSize).validate_python("15Gib") + ), + num_services=12, + expected_instance_type="g4dn.2xlarge", # 1 GPU, 8 CPUs, 32GiB + expected_num_instances=10, + ), + _ScaleUpParams( + imposed_instance_type="g4dn.8xlarge", # 32CPUs, 128GiB + service_resources=Resources( + cpus=32, ram=TypeAdapter(ByteSize).validate_python("20480MB") + ), + num_services=7, + expected_instance_type="g4dn.8xlarge", # 32CPUs, 128GiB + expected_num_instances=7, + ), + id="A batch of services requiring g3.4xlarge and a batch requiring g4dn.8xlarge", + ), + ], +) +async def test_cluster_adapts_machines_on_the_fly( # noqa: PLR0915 + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + ec2_client: EC2Client, + initialized_app: FastAPI, + app_settings: ApplicationSettings, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], + async_docker_client: aiodocker.Docker, + scale_up_params1: _ScaleUpParams, + scale_up_params2: _ScaleUpParams, + mocked_associate_ec2_instances_with_nodes: mock.Mock, + create_fake_node: Callable[..., Node], + mock_docker_tag_node: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + spied_cluster_analysis: MockType, + mocker: MockerFixture, +): + # pre-requisites + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES > 0 + assert ( + scale_up_params1.num_services + >= app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ), "this test requires to run a first batch of more services than the maximum number of instances allowed" + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # + # 1. create the first batch of services requiring the initial machines + first_batch_services = await create_services_batch(scale_up_params1) + + # it will only scale once and do nothing else + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params1.expected_num_instances, + expected_instance_type=scale_up_params1.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=0, + ) + mocked_associate_ec2_instances_with_nodes.assert_called_once_with([], []) + mocked_associate_ec2_instances_with_nodes.reset_mock() + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, None, None + ) + + # + # 2. now the machines are associated + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + mocked_associate_ec2_instances_with_nodes.assert_called_once() + mock_docker_tag_node.assert_called() + assert ( + mock_docker_tag_node.call_count + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ) + assert analyzed_cluster.active_nodes + + # + # 3. now we start the second batch of services requiring a different type of machines + await create_services_batch(scale_up_params2) + + # scaling will do nothing since we have hit the maximum number of machines + for _ in range(3): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params1.expected_num_instances, + expected_instance_type=scale_up_params1.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=3, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + + # + # 4.now we simulate that some of the services in the 1st batch have completed and that we are 1 below the max + # a machine should switch off and another type should be started + completed_services_to_stop = random.sample( + first_batch_services, + scale_up_params1.num_services + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + + 1, + ) + await asyncio.gather( + *( + async_docker_client.services.delete(s.id) + for s in completed_services_to_stop + if s.id + ) + ) + + # first call to auto_scale_cluster will mark 1 node as empty + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.set_node_found_empty", + autospec=True, + ) as mock_docker_set_node_found_empty: + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + # the last machine is found empty + mock_docker_set_node_found_empty.assert_called_with( + mock.ANY, + analyzed_cluster.active_nodes[-1].node, + empty=True, + ) + + # now we mock the get_node_found_empty so the next call will actually drain the machine + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_empty_since", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_DRAINING, + ) as mocked_get_node_empty_since: + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mocked_get_node_empty_since.assert_called_once() + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + # now scaling again should find the drained machine + drained_machine_instance_id = analyzed_cluster.active_nodes[-1].ec2_instance.id + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, drained_machine_instance_id, None + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert analyzed_cluster.drained_nodes + + # this will initiate termination now + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_last_readyness_update", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION, + ): + mock_docker_tag_node.reset_mock() + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + mock_docker_tag_node.assert_called_with( + mock.ANY, + analyzed_cluster.drained_nodes[-1].node, + tags=mock.ANY, + available=False, + ) + + # scaling again should find the terminating machine + mocked_associate_ec2_instances_with_nodes.side_effect = create_fake_association( + create_fake_node, drained_machine_instance_id, drained_machine_instance_id + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES, + ) + assert analyzed_cluster.active_nodes + assert not analyzed_cluster.drained_nodes + assert analyzed_cluster.terminating_nodes + + # now this will terminate it and straight away start a new machine type + with mock.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.get_node_termination_started_since", + autospec=True, + return_value=arrow.utcnow().datetime + - 1.5 + * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_TIME_BEFORE_TERMINATION, + ): + mocked_docker_remove_node = mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_core.utils_docker.remove_nodes", + return_value=None, + autospec=True, + ) + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mocked_docker_remove_node.assert_called_once() + + # now let's check what we have + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == 2, "there should be 2 Reservations" + reservation1 = all_instances["Reservations"][0] + assert "Instances" in reservation1 + assert len(reservation1["Instances"]) == ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES + ), f"expected {app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES} EC2 instances, found {len(reservation1['Instances'])}" + for instance in reservation1["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == scale_up_params1.expected_instance_type + assert "InstanceId" in instance + assert "State" in instance + assert "Name" in instance["State"] + if instance["InstanceId"] == drained_machine_instance_id: + assert instance["State"]["Name"] == "terminated" + else: + assert instance["State"]["Name"] == "running" + + reservation2 = all_instances["Reservations"][1] + assert "Instances" in reservation2 + assert ( + len(reservation2["Instances"]) == 1 + ), f"expected 1 EC2 instances, found {len(reservation2['Instances'])}" + for instance in reservation2["Instances"]: + assert "InstanceType" in instance + assert instance["InstanceType"] == scale_up_params2.expected_instance_type + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +@pytest.mark.parametrize( + "scale_up_params", + [ + pytest.param( + _ScaleUpParams( + imposed_instance_type=None, + service_resources=Resources( + cpus=4, ram=TypeAdapter(ByteSize).validate_python("128Gib") + ), + num_services=1, + expected_instance_type="r5n.4xlarge", + expected_num_instances=1, + ), + id="No explicit instance defined", + ), + ], +) +async def test_long_pending_ec2_is_detected_as_broken_terminated_and_restarted( + with_short_ec2_instances_max_start_time: EnvVarsDict, + minimal_configuration: None, + app_settings: ApplicationSettings, + initialized_app: FastAPI, + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + ec2_client: EC2Client, + mock_find_node_with_name_returns_none: mock.Mock, + mock_docker_tag_node: mock.Mock, + mock_rabbitmq_post_message: mock.Mock, + short_ec2_instance_max_start_time: datetime.timedelta, + ec2_instance_custom_tags: dict[str, str], + instance_type_filters: Sequence[FilterTypeDef], + scale_up_params: _ScaleUpParams, +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert ( + short_ec2_instance_max_start_time + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + await create_services_batch(scale_up_params) + + # this should trigger a scaling up as we have no nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + + # check the instance was started and we have exactly 1 + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + + # as the new node is already running, but is not yet connected, hence not tagged and drained + mock_find_node_with_name_returns_none.assert_not_called() + mock_docker_tag_node.assert_not_called() + # check rabbit messages were sent + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + instances_running=0, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + + assert instances + assert "LaunchTime" in instances[0] + original_instance_launch_time: datetime.datetime = deepcopy( + instances[0]["LaunchTime"] + ) + await asyncio.sleep(1) # NOTE: we wait here since AWS does not keep microseconds + now = arrow.utcnow().datetime + + assert now > original_instance_launch_time + assert now < ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + + # 2. running again several times the autoscaler, the node does not join + for i in range(7): + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + # there should be no scaling up, since there is already a pending instance + instances = await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=scale_up_params.expected_num_instances, + expected_instance_type=scale_up_params.expected_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + assert mock_find_node_with_name_returns_none.call_count == i + 1 + mock_docker_tag_node.assert_not_called() + _assert_rabbit_autoscaling_message_sent( + mock_rabbitmq_post_message, + app_settings, + initialized_app, + instances_running=0, + instances_pending=scale_up_params.expected_num_instances, + ) + mock_rabbitmq_post_message.reset_mock() + assert instances + assert "LaunchTime" in instances[0] + assert instances[0]["LaunchTime"] == original_instance_launch_time + + # 3. wait for the instance max start time and try again, shall terminate the instance + now = arrow.utcnow().datetime + sleep_time = ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + - now + ).total_seconds() + 1 + print( + f"--> waiting now for {sleep_time}s for the pending EC2 to be deemed as unworthy" + ) + await asyncio.sleep(sleep_time) + now = arrow.utcnow().datetime + assert now > ( + original_instance_launch_time + + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME + ) + # scaling now will terminate the broken ec2 that did not connect, and directly create a replacement + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + # we have therefore 2 reservations, first instance is terminated and a second one started + all_instances = await ec2_client.describe_instances() + assert len(all_instances["Reservations"]) == 2 + assert "Instances" in all_instances["Reservations"][0] + assert ( + len(all_instances["Reservations"][0]["Instances"]) + == scale_up_params.expected_num_instances + ) + assert "State" in all_instances["Reservations"][0]["Instances"][0] + assert "Name" in all_instances["Reservations"][0]["Instances"][0]["State"] + assert ( + all_instances["Reservations"][0]["Instances"][0]["State"]["Name"] + == "terminated" + ) + + assert "Instances" in all_instances["Reservations"][1] + assert ( + len(all_instances["Reservations"][1]["Instances"]) + == scale_up_params.expected_num_instances + ) + assert "State" in all_instances["Reservations"][1]["Instances"][0] + assert "Name" in all_instances["Reservations"][1]["Instances"][0]["State"] + assert ( + all_instances["Reservations"][1]["Instances"][0]["State"]["Name"] == "running" + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test__find_terminateable_nodes_with_no_hosts( + minimal_configuration: None, + initialized_app: FastAPI, + cluster: Callable[..., Cluster], + host_node: Node, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + # there is no node to terminate here since nothing is drained + active_cluster = cluster( + active_nodes=[ + AssociatedInstance(node=host_node, ec2_instance=fake_ec2_instance_data()) + ], + drained_nodes=[], + buffer_drained_nodes=[ + AssociatedInstance(node=host_node, ec2_instance=fake_ec2_instance_data()) + ], + ) + assert await _find_terminateable_instances(initialized_app, active_cluster) == [] + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test__try_scale_down_cluster_with_no_nodes( + minimal_configuration: None, + with_valid_time_before_termination: datetime.timedelta, + initialized_app: FastAPI, + cluster: Callable[..., Cluster], + mock_remove_nodes: mock.Mock, + host_node: Node, + drained_host_node: Node, + create_associated_instance: Callable[[Node, bool], AssociatedInstance], +): + active_cluster = cluster( + active_nodes=[create_associated_instance(host_node, True)], # noqa: FBT003 + drained_nodes=[ + create_associated_instance(drained_host_node, False) # noqa: FBT003 + ], + buffer_drained_nodes=[ + create_associated_instance(drained_host_node, True) # noqa: FBT003 + ], + ) + updated_cluster = await _try_scale_down_cluster(initialized_app, active_cluster) + assert updated_cluster == active_cluster + mock_remove_nodes.assert_not_called() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test__activate_drained_nodes_with_no_tasks( + minimal_configuration: None, + with_valid_time_before_termination: datetime.timedelta, + initialized_app: FastAPI, + host_node: Node, + drained_host_node: Node, + mock_docker_tag_node: mock.Mock, + cluster: Callable[..., Cluster], + create_associated_instance: Callable[[Node, bool], AssociatedInstance], +): + # no tasks, does nothing and returns True + empty_cluster = cluster() + updated_cluster = await _activate_drained_nodes(initialized_app, empty_cluster) + assert updated_cluster == empty_cluster + + active_cluster = cluster( + active_nodes=[create_associated_instance(host_node, True)], # noqa: FBT003 + drained_nodes=[ + create_associated_instance(drained_host_node, True) # noqa: FBT003 + ], + buffer_drained_nodes=[ + create_associated_instance(drained_host_node, True) # noqa: FBT003 + ], + ) + updated_cluster = await _activate_drained_nodes(initialized_app, active_cluster) + assert updated_cluster == active_cluster + mock_docker_tag_node.assert_not_called() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test__activate_drained_nodes_with_no_drained_nodes( + minimal_configuration: None, + with_valid_time_before_termination: datetime.timedelta, + autoscaling_docker: AutoscalingDocker, + initialized_app: FastAPI, + host_node: Node, + mock_docker_tag_node: mock.Mock, + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] + ], + task_template: dict[str, Any], + create_task_reservations: Callable[[int, int], dict[str, Any]], + host_cpu_count: int, + cluster: Callable[..., Cluster], + create_associated_instance: Callable[[Node, bool], AssociatedInstance], +): + task_template_that_runs = task_template | create_task_reservations( + int(host_cpu_count / 2 + 1), 0 + ) + service_with_no_reservations = await create_service( + task_template_that_runs, {}, "running" + ) + assert service_with_no_reservations.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await autoscaling_docker.tasks.list( + filters={"service": service_with_no_reservations.spec.name} + ) + ) + assert service_tasks + assert len(service_tasks) == 1 + + cluster_without_drained_nodes = cluster( + active_nodes=[create_associated_instance(host_node, True)] # noqa: FBT003 + ) + updated_cluster = await _activate_drained_nodes( + initialized_app, cluster_without_drained_nodes + ) + assert updated_cluster == cluster_without_drained_nodes + mock_docker_tag_node.assert_not_called() + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test__activate_drained_nodes_with_drained_node( + minimal_configuration: None, + with_valid_time_before_termination: datetime.timedelta, + autoscaling_docker: AutoscalingDocker, + initialized_app: FastAPI, + instance_type_filters: Sequence[FilterTypeDef], + drained_host_node: Node, + mock_docker_tag_node: mock.Mock, + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] + ], + task_template: dict[str, Any], + create_task_reservations: Callable[[int, int], dict[str, Any]], + host_cpu_count: int, + cluster: Callable[..., Cluster], + create_associated_instance: Callable[[Node, bool], AssociatedInstance], +): + # task with no drain nodes returns False + task_template_that_runs = task_template | create_task_reservations( + int(host_cpu_count / 2 + 1), 0 + ) + service_with_no_reservations = await create_service( + task_template_that_runs, {}, "pending" + ) + assert service_with_no_reservations.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await autoscaling_docker.tasks.list( + filters={"service": service_with_no_reservations.spec.name} + ) + ) + assert service_tasks + assert len(service_tasks) == 1 + + cluster_with_drained_nodes = cluster( + drained_nodes=[ + create_associated_instance(drained_host_node, True) # noqa: FBT003 + ] + ) + cluster_with_drained_nodes.drained_nodes[0].assign_task( + service_tasks[0], Resources(cpus=int(host_cpu_count / 2 + 1), ram=ByteSize(0)) + ) + + updated_cluster = await _activate_drained_nodes( + initialized_app, cluster_with_drained_nodes + ) + # they are the same nodes, but the availability might have changed here + assert updated_cluster.active_nodes != cluster_with_drained_nodes.drained_nodes + assert ( + updated_cluster.active_nodes[0].assigned_tasks + == cluster_with_drained_nodes.drained_nodes[0].assigned_tasks + ) + assert ( + updated_cluster.active_nodes[0].ec2_instance + == cluster_with_drained_nodes.drained_nodes[0].ec2_instance + ) + + assert drained_host_node.spec + mock_docker_tag_node.assert_called_once_with( + mock.ANY, + drained_host_node, + tags={ + _OSPARC_SERVICE_READY_LABEL_KEY: "true", + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY: mock.ANY, + }, + available=True, + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_warm_buffers_are_started_to_replace_missing_hot_buffers( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + with_instances_machines_hot_buffer: EnvVarsDict, + ec2_client: EC2Client, + initialized_app: FastAPI, + app_settings: ApplicationSettings, + ec2_instance_custom_tags: dict[str, str], + buffer_count: int, + create_buffer_machines: Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag] | None], + Awaitable[list[str]], + ], + spied_cluster_analysis: MockType, + instance_type_filters: Sequence[FilterTypeDef], + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mock_docker_tag_node: mock.Mock, +): + # pre-requisites + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER > 0 + + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # have a few warm buffers ready with the same type as the hot buffer machines + buffer_machines = await create_buffer_machines( + buffer_count, + cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + "stopped", + None, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + expected_instance_state="stopped", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=None, + ) + + # let's autoscale, this should move the warm buffers to hot buffers + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mock_docker_tag_node.assert_not_called() + # at analysis time, we had no machines running + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=0, + ) + assert not analyzed_cluster.active_nodes + assert analyzed_cluster.buffer_ec2s + assert len(analyzed_cluster.buffer_ec2s) == len(buffer_machines) + + # now we should have a warm buffer moved to the hot buffer + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + expected_instance_type=cast( + InstanceTypeType, + next( + iter(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES) + ), + ), + expected_instance_state="running", + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + BUFFER_MACHINE_TAG_KEY, + ], + instance_filters=instance_type_filters, + expected_user_data=[], + ) + + # let's autoscale again, to check the cluster analysis and tag the nodes + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + mock_docker_tag_node.assert_called() + assert ( + mock_docker_tag_node.call_count + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + # at analysis time, we had no machines running + analyzed_cluster = assert_cluster_state( + spied_cluster_analysis, + expected_calls=1, + expected_num_machines=app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + ) + assert not analyzed_cluster.active_nodes + assert len(analyzed_cluster.buffer_ec2s) == max( + 0, + buffer_count + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER, + ), ( + "the warm buffers were not used as expected there should be" + f" {buffer_count - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER} remaining, " + f"found {len(analyzed_cluster.buffer_ec2s)}" + ) + assert ( + len(analyzed_cluster.pending_ec2s) + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + + +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_docker_join_drained", + ["without_AUTOSCALING_DOCKER_JOIN_DRAINED"], + indirect=True, +) +@pytest.mark.parametrize( + # NOTE: only the main test test_cluster_scaling_up_and_down is run with all options + "with_drain_nodes_labelled", + ["with_AUTOSCALING_DRAIN_NODES_WITH_LABELS"], + indirect=True, +) +async def test_warm_buffers_only_replace_hot_buffer_if_service_is_started_issue7071( + patch_ec2_client_launch_instances_min_number_of_instances: mock.Mock, + minimal_configuration: None, + with_instances_machines_hot_buffer: EnvVarsDict, + with_drain_nodes_labelled: bool, + ec2_client: EC2Client, + initialized_app: FastAPI, + app_settings: ApplicationSettings, + ec2_instance_custom_tags: dict[str, str], + buffer_count: int, + create_buffer_machines: Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag] | None], + Awaitable[list[str]], + ], + create_services_batch: Callable[[_ScaleUpParams], Awaitable[list[Service]]], + hot_buffer_instance_type: InstanceTypeType, + spied_cluster_analysis: MockType, + instance_type_filters: Sequence[FilterTypeDef], + stopped_instance_type_filters: Sequence[FilterTypeDef], + mock_find_node_with_name_returns_fake_node: mock.Mock, + mock_compute_node_used_resources: mock.Mock, + mock_docker_tag_node: mock.Mock, + mocker: MockerFixture, + fake_node: Node, +): + # NOTE: https://github.com/ITISFoundation/osparc-simcore/issues/7071 + + # + # PRE-requisites + # + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER > 0 + num_hot_buffer = ( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + + # we have nothing running now + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # ensure we get our running hot buffer + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_hot_buffer, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + # this brings a new analysis + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + spied_cluster = assert_cluster_state( + spied_cluster_analysis, expected_calls=2, expected_num_machines=5 + ) + # calling again should attach the new nodes to the reserve, but nothing should start + fake_attached_node_base = deepcopy(fake_node) + assert fake_attached_node_base.spec + fake_attached_node_base.spec.availability = ( + Availability.active if with_drain_nodes_labelled else Availability.drain + ) + assert fake_attached_node_base.spec.labels + assert app_settings.AUTOSCALING_NODES_MONITORING + expected_docker_node_tags = { + tag_key: "true" + for tag_key in ( + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS + + app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS + ) + } | { + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY: f"{hot_buffer_instance_type}" + } + fake_attached_node_base.spec.labels |= expected_docker_node_tags | { + _OSPARC_SERVICE_READY_LABEL_KEY: "false" + } + assert fake_attached_node_base.status + fake_attached_node_base.status.state = NodeState.ready + fake_hot_buffer_nodes = [] + for i in range(num_hot_buffer): + node = fake_attached_node_base.model_copy(deep=True) + assert node.description + node.description.hostname = node_host_name_from_ec2_private_dns( + spied_cluster.pending_ec2s[i].ec2_instance + ) + fake_hot_buffer_nodes.append(node) + auto_scaling_mode = DynamicAutoscaling() + mocker.patch.object( + auto_scaling_mode, + "get_monitored_nodes", + autospec=True, + return_value=fake_hot_buffer_nodes, + ) + + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_hot_buffer, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + spied_cluster = assert_cluster_state( + spied_cluster_analysis, expected_calls=1, expected_num_machines=5 + ) + assert len(spied_cluster.buffer_drained_nodes) == num_hot_buffer + assert not spied_cluster.buffer_ec2s + + # have a few warm buffers ready with the same type as the hot buffer machines + await create_buffer_machines( + buffer_count, + hot_buffer_instance_type, + "stopped", + None, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="stopped", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=stopped_instance_type_filters, + ) + + # calling again should do nothing + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=num_hot_buffer, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="stopped", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=stopped_instance_type_filters, + ) + spied_cluster = assert_cluster_state( + spied_cluster_analysis, expected_calls=1, expected_num_machines=5 + ) + assert len(spied_cluster.buffer_drained_nodes) == num_hot_buffer + assert len(spied_cluster.buffer_ec2s) == buffer_count + + # + # BUG REPRODUCTION + # + # start a service that imposes same type as the hot buffer + assert ( + hot_buffer_instance_type == "t2.xlarge" + ), "the test is hard-coded for this type and accordingly resource. If this changed then the resource shall be changed too" + scale_up_params = _ScaleUpParams( + imposed_instance_type=hot_buffer_instance_type, + service_resources=Resources( + cpus=2, ram=TypeAdapter(ByteSize).validate_python("1Gib") + ), + num_services=1, + expected_instance_type="t2.xlarge", + expected_num_instances=1, + ) + await create_services_batch(scale_up_params) + + # this should trigger usage of the hot buffer and the warm buffers should replace the hot buffer + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + await assert_autoscaled_dynamic_ec2_instances( + ec2_client, + expected_num_reservations=2, + check_reservation_index=0, + expected_num_instances=num_hot_buffer, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + instance_filters=instance_type_filters, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=2, + check_reservation_index=1, + expected_num_instances=1, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=instance_type_filters, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count - 1, + expected_instance_type=hot_buffer_instance_type, + expected_instance_state="stopped", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=stopped_instance_type_filters, + ) + # simulate one of the hot buffer is not drained anymore and took the pending service + random_fake_node = random.choice(fake_hot_buffer_nodes) # noqa: S311 + random_fake_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" + random_fake_node.spec.labels[_OSPARC_SERVICES_READY_DATETIME_LABEL_KEY] = ( + arrow.utcnow().isoformat() + ) + random_fake_node.spec.availability = Availability.active + # simulate the fact that the warm buffer that just started is not yet visible + mock_find_node_with_name_returns_fake_node.return_value = None + + # get the new analysis + await auto_scale_cluster(app=initialized_app, auto_scaling_mode=auto_scaling_mode) + spied_cluster = assert_cluster_state( + spied_cluster_analysis, expected_calls=2, expected_num_machines=6 + ) + assert len(spied_cluster.buffer_drained_nodes) == num_hot_buffer - 1 + assert len(spied_cluster.buffer_ec2s) == buffer_count - 1 + assert len(spied_cluster.active_nodes) == 1 + assert len(spied_cluster.pending_ec2s) == 1 + + # running it again shall do nothing + @tenacity.retry( + retry=tenacity.retry_always, + reraise=True, + wait=tenacity.wait_fixed(0.1), + stop=tenacity.stop_after_attempt(10), + ) + async def _check_autoscaling_is_stable() -> None: + await auto_scale_cluster( + app=initialized_app, auto_scaling_mode=auto_scaling_mode + ) + spied_cluster = assert_cluster_state( + spied_cluster_analysis, expected_calls=1, expected_num_machines=6 + ) + assert len(spied_cluster.buffer_drained_nodes) == num_hot_buffer - 1 + assert len(spied_cluster.buffer_ec2s) == buffer_count - 1 + assert len(spied_cluster.active_nodes) == 1 + assert len(spied_cluster.pending_ec2s) == 1 + + with pytest.raises(tenacity.RetryError): + await _check_autoscaling_is_stable() diff --git a/services/autoscaling/tests/unit/test_modules_auto_scaling_task.py b/services/autoscaling/tests/unit/test_modules_auto_scaling_task.py new file mode 100644 index 00000000000..4a3d3e85bae --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_auto_scaling_task.py @@ -0,0 +1,81 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import datetime +from typing import Final +from unittest import mock + +import pytest +from fastapi import FastAPI +from pydantic import TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_autoscaling.core.settings import ApplicationSettings + +_FAST_POLL_INTERVAL: Final[int] = 1 + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # fast interval + monkeypatch.setenv( + "AUTOSCALING_POLL_INTERVAL", + f"{TypeAdapter(datetime.timedelta).validate_python(_FAST_POLL_INTERVAL)}", + ) + app_environment["AUTOSCALING_POLL_INTERVAL"] = f"{_FAST_POLL_INTERVAL}" + return app_environment + + +@pytest.fixture +def mock_background_task(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_autoscaling.modules.auto_scaling_task.auto_scale_cluster", + autospec=True, + ) + + +async def test_auto_scaling_task_not_created_if_no_mode_defined( + app_environment: EnvVarsDict, + mock_background_task: mock.Mock, + initialized_app: FastAPI, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_POLL_INTERVAL.total_seconds() == _FAST_POLL_INTERVAL + assert not hasattr(initialized_app.state, "autoscaler_task") + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_not_called() + + +async def test_auto_scaling_task_created_and_deleted_with_dynamic_mode( + enabled_dynamic_mode: EnvVarsDict, + mock_background_task: mock.Mock, + initialized_app: FastAPI, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_POLL_INTERVAL.total_seconds() == _FAST_POLL_INTERVAL + assert hasattr(initialized_app.state, "autoscaler_task") + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_called() + + +async def test_auto_scaling_task_created_and_deleted_with_computational_mode( + enabled_computational_mode: EnvVarsDict, + mock_background_task: mock.Mock, + initialized_app: FastAPI, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_POLL_INTERVAL.total_seconds() == _FAST_POLL_INTERVAL + assert hasattr(initialized_app.state, "autoscaler_task") + await asyncio.sleep(5 * _FAST_POLL_INTERVAL) + mock_background_task.assert_called() diff --git a/services/autoscaling/tests/unit/test_modules_buffer_machine_core.py b/services/autoscaling/tests/unit/test_modules_buffer_machine_core.py new file mode 100644 index 00000000000..fc976c94e68 --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_buffer_machine_core.py @@ -0,0 +1,558 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import datetime +import json +import logging +import random +from collections.abc import Awaitable, Callable, Sequence +from dataclasses import dataclass +from typing import Any, get_args +from unittest import mock + +import pytest +import tenacity +from aws_library.ec2 import AWSTagKey +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.docker import DockerGenericTag +from pydantic import TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.aws_ec2 import ( + assert_autoscaled_dynamic_warm_pools_ec2_instances, +) +from pytest_simcore.helpers.logging_tools import log_context +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from simcore_service_autoscaling.constants import PRE_PULLED_IMAGES_EC2_TAG_KEY +from simcore_service_autoscaling.modules.auto_scaling_mode_dynamic import ( + DynamicAutoscaling, +) +from simcore_service_autoscaling.modules.buffer_machines_pool_core import ( + monitor_buffer_machines, +) +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType, InstanceTypeType +from types_aiobotocore_ec2.type_defs import FilterTypeDef + + +@pytest.fixture +def with_ec2_instance_allowed_types_env( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], +) -> EnvVarsDict: + envs = setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + jsonable_encoder(ec2_instances_allowed_types_with_only_1_buffered) + ), + }, + ) + return app_environment | envs + + +@pytest.fixture +def minimal_configuration( + disabled_rabbitmq: None, + disable_autoscaling_background_task: None, + disable_buffers_pool_background_task: None, + enabled_dynamic_mode: EnvVarsDict, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ec2_instances_envs: EnvVarsDict, + with_enabled_buffer_pools: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + with_ec2_instance_allowed_types_env: EnvVarsDict, + mocked_redis_server: None, +) -> None: + pass + + +@pytest.fixture(autouse=True) +def set_log_levels_for_noisy_libraries() -> None: + # Reduce the log level for 'werkzeug' + logging.getLogger("werkzeug").setLevel(logging.WARNING) + + +@pytest.mark.xfail( + reason="moto does not handle mocking of SSM SendCommand completely. " + "TIP: if this test passes, it will mean Moto now handles it." + " Delete 'mocked_ssm_send_command' fixture if that is the case and remove this test" +) +async def test_if_send_command_is_mocked_by_moto( + minimal_configuration: None, + initialized_app: FastAPI, + ec2_client: EC2Client, + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + buffer_count: int, +): + all_instances = await ec2_client.describe_instances() + assert not all_instances["Reservations"] + + # 1. run, this will create as many buffer machines as needed + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state="running", + expected_additional_tag_keys=[], + expected_pre_pulled_images=[], + instance_filters=None, + ) + + # 2. this should generate a failure as current version of moto does not handle this + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + + +@pytest.fixture +def mock_wait_for_has_instance_completed_cloud_init( + external_envfile_dict: EnvVarsDict, initialized_app: FastAPI, mocker: MockerFixture +) -> mock.Mock | None: + if external_envfile_dict: + return None + return mocker.patch( + "aws_library.ssm._client.SimcoreSSMAPI.wait_for_has_instance_completed_cloud_init", + autospec=True, + return_value=True, + ) + + +@pytest.fixture +def instance_type_filters( + ec2_instance_custom_tags: dict[str, str], +) -> Sequence[FilterTypeDef]: + return [ + *[ + FilterTypeDef( + Name="tag-key", + Values=[tag_key], + ) + for tag_key in ec2_instance_custom_tags + ], + FilterTypeDef( + Name="instance-state-name", + Values=["pending", "running", "stopped"], + ), + ] + + +async def _test_monitor_buffer_machines( + *, + ec2_client: EC2Client, + instance_type_filters: Sequence[FilterTypeDef], + initialized_app: FastAPI, + buffer_count: int, + pre_pulled_images: list[DockerGenericTag], + ec2_instances_allowed_types: dict[InstanceTypeType, Any], + ec2_instance_custom_tags: dict[str, str], + run_against_moto: bool, +): + # 0. we have no instances now + all_instances = await ec2_client.describe_instances(Filters=instance_type_filters) + assert not all_instances[ + "Reservations" + ], f"There should be no instances at the start of the test. Found following instance ids: {[i['InstanceId'] for r in all_instances['Reservations'] if 'Instances' in r for i in r['Instances'] if 'InstanceId' in i]}" + + # 1. run, this will create as many buffer machines as needed + with log_context(logging.INFO, "create buffer machines"): + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + with log_context( + logging.INFO, f"waiting for {buffer_count} buffer instances to be running" + ) as ctx: + + @tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay(5 if run_against_moto else 120), + retry=tenacity.retry_if_exception_type(AssertionError), + reraise=True, + before_sleep=tenacity.before_sleep_log(ctx.logger, logging.INFO), + after=tenacity.after_log(ctx.logger, logging.INFO), + ) + async def _assert_buffer_machines_running() -> None: + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next(iter(ec2_instances_allowed_types)), + expected_instance_state="running", + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + ], + expected_pre_pulled_images=None, + instance_filters=instance_type_filters, + ) + + await _assert_buffer_machines_running() + + # 2. this should now run a SSM command for pulling + with log_context(logging.INFO, "run SSM commands for pulling") as ctx: + + @tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay(5 if run_against_moto else 120), + retry=tenacity.retry_if_exception_type(AssertionError), + reraise=True, + before_sleep=tenacity.before_sleep_log(ctx.logger, logging.INFO), + after=tenacity.after_log(ctx.logger, logging.INFO), + ) + async def _assert_run_ssm_command_for_pulling() -> None: + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next(iter(ec2_instances_allowed_types)), + expected_instance_state="running", + expected_additional_tag_keys=[ + "pulling", + "ssm-command-id", + *list(ec2_instance_custom_tags), + ], + expected_pre_pulled_images=None, + instance_filters=instance_type_filters, + ) + + if pre_pulled_images: + await _assert_run_ssm_command_for_pulling() + + # 3. is the command finished? + with log_context( + logging.INFO, "wait for SSM commands and the machine to be stopped to finish" + ) as ctx: + + @tenacity.retry( + wait=tenacity.wait_fixed(5), + stop=tenacity.stop_after_delay( + 5 if run_against_moto else datetime.timedelta(minutes=10) + ), + retry=tenacity.retry_if_exception_type(AssertionError), + reraise=True, + before_sleep=tenacity.before_sleep_log(ctx.logger, logging.INFO), + after=tenacity.after_log(ctx.logger, logging.INFO), + ) + async def _assert_wait_for_ssm_command_to_finish() -> None: + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next(iter(ec2_instances_allowed_types)), + expected_instance_state="stopped", + expected_additional_tag_keys=[ + PRE_PULLED_IMAGES_EC2_TAG_KEY, + *list(ec2_instance_custom_tags), + ], + expected_pre_pulled_images=pre_pulled_images, + instance_filters=instance_type_filters, + ) + + await _assert_wait_for_ssm_command_to_finish() + + +async def test_monitor_buffer_machines( + minimal_configuration: None, + ec2_client: EC2Client, + buffer_count: int, + pre_pull_images: list[DockerGenericTag], + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + instance_type_filters: Sequence[FilterTypeDef], + ec2_instance_custom_tags: dict[str, str], + mock_wait_for_has_instance_completed_cloud_init: mock.Mock | None, + initialized_app: FastAPI, +): + await _test_monitor_buffer_machines( + ec2_client=ec2_client, + instance_type_filters=instance_type_filters, + initialized_app=initialized_app, + buffer_count=buffer_count, + pre_pulled_images=pre_pull_images, + ec2_instances_allowed_types=ec2_instances_allowed_types_with_only_1_buffered, + ec2_instance_custom_tags=ec2_instance_custom_tags, + run_against_moto=True, + ) + + +@dataclass +class _BufferMachineParams: + instance_state_name: InstanceStateNameType + pre_pulled_images: list[DockerGenericTag] | None + tag_keys: list[AWSTagKey] + + +@pytest.mark.parametrize( + "expected_buffer_params", + [ + _BufferMachineParams("running", None, []), + _BufferMachineParams( + "stopped", + [], + [ + TypeAdapter(AWSTagKey).validate_python( + "io.simcore.autoscaling.pre_pulled_images" + ) + ], + ), + ], +) +async def test_monitor_buffer_machines_terminates_supernumerary_instances( + minimal_configuration: None, + fake_pre_pull_images: list[DockerGenericTag], + ec2_client: EC2Client, + buffer_count: int, + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + instance_type_filters: Sequence[FilterTypeDef], + ec2_instance_custom_tags: dict[str, str], + initialized_app: FastAPI, + create_buffer_machines: Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag]], + Awaitable[list[str]], + ], + expected_buffer_params: _BufferMachineParams, +): + # dirty hack + if expected_buffer_params.pre_pulled_images == []: + expected_buffer_params.pre_pulled_images = fake_pre_pull_images + # have too many machines of accepted type + buffer_machines = await create_buffer_machines( + buffer_count + 5, + next(iter(list(ec2_instances_allowed_types_with_only_1_buffered))), + expected_buffer_params.instance_state_name, + fake_pre_pull_images, + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=len(buffer_machines), + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state=expected_buffer_params.instance_state_name, + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + *expected_buffer_params.tag_keys, + ], + expected_pre_pulled_images=expected_buffer_params.pre_pulled_images, + instance_filters=instance_type_filters, + ) + # this will terminate the supernumerary instances and start new ones + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state=expected_buffer_params.instance_state_name, + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + *expected_buffer_params.tag_keys, + ], + expected_pre_pulled_images=expected_buffer_params.pre_pulled_images, + instance_filters=instance_type_filters, + ) + + +async def test_monitor_buffer_machines_terminates_instances_with_incorrect_pre_pulled_images( + minimal_configuration: None, + ec2_client: EC2Client, + buffer_count: int, + pre_pull_images: list[DockerGenericTag], + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + instance_type_filters: Sequence[FilterTypeDef], + ec2_instance_custom_tags: dict[str, str], + initialized_app: FastAPI, + create_buffer_machines: Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag]], + Awaitable[list[str]], + ], +): + # have machines of correct type with missing pre-pulled images + assert ( + len(pre_pull_images) > 1 + ), "this test relies on pre-pulled images being filled with more than 1 image" + buffer_machines = await create_buffer_machines( + buffer_count + 5, + next(iter(list(ec2_instances_allowed_types_with_only_1_buffered))), + "stopped", + pre_pull_images[:-1], + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=len(buffer_machines), + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state="stopped", + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + "io.simcore.autoscaling.pre_pulled_images", + ], + expected_pre_pulled_images=pre_pull_images[:-1], + instance_filters=instance_type_filters, + ) + # this will terminate the wrong instances and start new ones and pre-pull the new set of images + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, # NOTE: these are not pre-pulled yet, just started + instance_filters=instance_type_filters, + ) + + +@pytest.fixture +def unneeded_instance_type( + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], +) -> InstanceTypeType: + random_type = next(iter(ec2_instances_allowed_types_with_only_1_buffered)) + while random_type in ec2_instances_allowed_types_with_only_1_buffered: + random_type = random.choice(get_args(InstanceTypeType)) # noqa: S311 + return random_type + + +@pytest.mark.flaky(max_runs=3) +@pytest.mark.parametrize( + "expected_buffer_params", + [ + _BufferMachineParams("running", None, []), + _BufferMachineParams( + "stopped", + [], + [ + TypeAdapter(AWSTagKey).validate_python( + "io.simcore.autoscaling.pre_pulled_images" + ) + ], + ), + ], +) +async def test_monitor_buffer_machines_terminates_unneeded_pool( + minimal_configuration: None, + ec2_client: EC2Client, + buffer_count: int, + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + instance_type_filters: Sequence[FilterTypeDef], + ec2_instance_custom_tags: dict[str, str], + initialized_app: FastAPI, + create_buffer_machines: Callable[ + [int, InstanceTypeType, InstanceStateNameType, list[DockerGenericTag]], + Awaitable[list[str]], + ], + unneeded_instance_type: InstanceTypeType, + expected_buffer_params: _BufferMachineParams, +): + # have machines of unneeded type + buffer_machines_unneeded = await create_buffer_machines( + 5, unneeded_instance_type, expected_buffer_params.instance_state_name, [] + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=len(buffer_machines_unneeded), + expected_instance_type=unneeded_instance_type, + expected_instance_state=expected_buffer_params.instance_state_name, + expected_additional_tag_keys=[ + *list(ec2_instance_custom_tags), + *expected_buffer_params.tag_keys, + ], + expected_pre_pulled_images=expected_buffer_params.pre_pulled_images, + instance_filters=instance_type_filters, + ) + + # this will terminate the unwanted buffer pool and replace with the expected ones + await monitor_buffer_machines( + initialized_app, auto_scaling_mode=DynamicAutoscaling() + ) + await assert_autoscaled_dynamic_warm_pools_ec2_instances( + ec2_client, + expected_num_reservations=1, + expected_num_instances=buffer_count, + expected_instance_type=next( + iter(ec2_instances_allowed_types_with_only_1_buffered) + ), + expected_instance_state="running", + expected_additional_tag_keys=list(ec2_instance_custom_tags), + expected_pre_pulled_images=None, + instance_filters=instance_type_filters, + ) + + +@pytest.fixture +def pre_pull_images( + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], +) -> list[DockerGenericTag]: + allowed_ec2_types = ec2_instances_allowed_types_with_only_1_buffered + allowed_ec2_types_with_pre_pull_images_defined = dict( + filter( + lambda instance_type_and_settings: instance_type_and_settings[ + 1 + ].pre_pull_images, + allowed_ec2_types.items(), + ) + ) + assert ( + len(allowed_ec2_types_with_pre_pull_images_defined) <= 1 + ), "more than one type with pre-pulled-images is disallowed in this test!" + + if allowed_ec2_types_with_pre_pull_images_defined: + return next( + iter(allowed_ec2_types_with_pre_pull_images_defined.values()) + ).pre_pull_images + return [] + + +async def test_monitor_buffer_machines_against_aws( + skip_if_external_envfile_dict: None, + disable_buffers_pool_background_task: None, + disable_autoscaling_background_task: None, + disabled_rabbitmq: None, + mocked_redis_server: None, + external_envfile_dict: EnvVarsDict, + ec2_client: EC2Client, + buffer_count: int, + pre_pull_images: list[DockerGenericTag], + ec2_instances_allowed_types_with_only_1_buffered: dict[InstanceTypeType, Any], + instance_type_filters: Sequence[FilterTypeDef], + ec2_instance_custom_tags: dict[str, str], + initialized_app: FastAPI, +): + await _test_monitor_buffer_machines( + ec2_client=ec2_client, + instance_type_filters=instance_type_filters, + initialized_app=initialized_app, + buffer_count=buffer_count, + pre_pulled_images=pre_pull_images, + ec2_instances_allowed_types=ec2_instances_allowed_types_with_only_1_buffered, + ec2_instance_custom_tags=ec2_instance_custom_tags, + run_against_moto=False, + ) diff --git a/services/autoscaling/tests/unit/test_modules_dask.py b/services/autoscaling/tests/unit/test_modules_dask.py new file mode 100644 index 00000000000..9c53865cfa3 --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_dask.py @@ -0,0 +1,372 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +from collections.abc import Callable +from typing import Any, Final + +import distributed +import pytest +from arrow import utcnow +from aws_library.ec2 import Resources +from faker import Faker +from models_library.clusters import ( + ClusterAuthentication, + NoAuthentication, + TLSAuthentication, +) +from pydantic import AnyUrl, ByteSize, TypeAdapter +from pytest_simcore.helpers.host import get_localhost_ip +from simcore_service_autoscaling.core.errors import ( + DaskNoWorkersError, + DaskSchedulerNotFoundError, + DaskWorkerNotFoundError, + Ec2InvalidDnsNameError, +) +from simcore_service_autoscaling.models import ( + DaskTaskId, + DaskTaskResources, + EC2InstanceData, +) +from simcore_service_autoscaling.modules.dask import ( + DaskTask, + _scheduler_client, + get_worker_still_has_results_in_memory, + get_worker_used_resources, + list_processing_tasks_per_worker, + list_unrunnable_tasks, +) +from tenacity import retry, stop_after_delay, wait_fixed + +_authentication_types = [ + NoAuthentication(), + TLSAuthentication.model_construct( + **TLSAuthentication.model_json_schema()["examples"][0] + ), +] + + +@pytest.mark.parametrize( + "authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}" +) +async def test__scheduler_client_with_wrong_url( + faker: Faker, authentication: ClusterAuthentication +): + with pytest.raises(DaskSchedulerNotFoundError): + async with _scheduler_client( + TypeAdapter(AnyUrl).validate_python( + f"tcp://{faker.ipv4()}:{faker.port_number()}" + ), + authentication, + ): + ... + + +@pytest.fixture +def scheduler_url(dask_spec_local_cluster: distributed.SpecCluster) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ) + + +@pytest.fixture +def scheduler_authentication() -> ClusterAuthentication: + return NoAuthentication() + + +@pytest.fixture +def dask_workers_config() -> dict[str, Any]: + # NOTE: override of pytest-simcore dask_workers_config to have only 1 worker + return { + "single-cpu_worker": { + "cls": distributed.Worker, + "options": { + "nthreads": 2, + "resources": {"CPU": 2, "RAM": 48e9}, + "name": f"dask-sidecar_ip-{get_localhost_ip().replace('.', '-')}_{utcnow()}", + }, + } + } + + +async def test__scheduler_client( + scheduler_url: AnyUrl, scheduler_authentication: ClusterAuthentication +): + async with _scheduler_client(scheduler_url, scheduler_authentication): + ... + + +async def test_list_unrunnable_tasks_with_no_workers( + dask_local_cluster_without_workers: distributed.SpecCluster, +): + scheduler_url = TypeAdapter(AnyUrl).validate_python( + dask_local_cluster_without_workers.scheduler_address + ) + assert await list_unrunnable_tasks(scheduler_url, NoAuthentication()) == [] + + +async def test_list_unrunnable_tasks( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + create_dask_task: Callable[[DaskTaskResources], distributed.Future], +): + # we have nothing running now + assert await list_unrunnable_tasks(scheduler_url, scheduler_authentication) == [] + # start a task that cannot run + dask_task_impossible_resources = {"XRAM": 213} + future = create_dask_task(dask_task_impossible_resources) + assert future + assert await list_unrunnable_tasks(scheduler_url, scheduler_authentication) == [ + DaskTask(task_id=future.key, required_resources=dask_task_impossible_resources) + ] + # remove that future, will remove the task + del future + assert await list_unrunnable_tasks(scheduler_url, scheduler_authentication) == [] + + +_REMOTE_FCT_SLEEP_TIME_S: Final[int] = 3 + + +async def test_list_processing_tasks( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + dask_spec_cluster_client: distributed.Client, +): + def _add_fct(x: int, y: int) -> int: + import time + + time.sleep(_REMOTE_FCT_SLEEP_TIME_S) + return x + y + + # there is nothing now + assert ( + await list_processing_tasks_per_worker(scheduler_url, scheduler_authentication) + == {} + ) + + # this function will be queued and executed as there are no specific resources needed + future_queued_task = dask_spec_cluster_client.submit(_add_fct, 2, 5) + assert future_queued_task + + assert await list_processing_tasks_per_worker( + scheduler_url, scheduler_authentication + ) == { + next(iter(dask_spec_cluster_client.scheduler_info()["workers"])): [ + DaskTask(task_id=DaskTaskId(future_queued_task.key), required_resources={}) + ] + } + + result = await future_queued_task.result(timeout=_REMOTE_FCT_SLEEP_TIME_S + 4) # type: ignore + assert result == 7 + + # nothing processing anymore + assert ( + await list_processing_tasks_per_worker(scheduler_url, scheduler_authentication) + == {} + ) + + +_DASK_SCHEDULER_REACTION_TIME_S: Final[int] = 4 + + +@retry(stop=stop_after_delay(_DASK_SCHEDULER_REACTION_TIME_S), wait=wait_fixed(1)) +async def _wait_for_task_done(future: distributed.Future) -> None: + assert future.done() is True + + +async def _wait_for_dask_scheduler_to_change_state() -> None: + # NOTE: I know this is kind of stupid + await asyncio.sleep(_DASK_SCHEDULER_REACTION_TIME_S) + + +@pytest.fixture +def fake_ec2_instance_data_with_invalid_ec2_name( + fake_ec2_instance_data: Callable[..., EC2InstanceData], faker: Faker +) -> EC2InstanceData: + return fake_ec2_instance_data(aws_private_dns=faker.name()) + + +async def test_get_worker_still_has_results_in_memory_with_invalid_ec2_name_raises( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData, +): + with pytest.raises(Ec2InvalidDnsNameError): + await get_worker_still_has_results_in_memory( + scheduler_url, + scheduler_authentication, + fake_ec2_instance_data_with_invalid_ec2_name, + ) + + +async def test_get_worker_still_has_results_in_memory_with_no_workers_raises( + dask_local_cluster_without_workers: distributed.SpecCluster, + fake_localhost_ec2_instance_data: EC2InstanceData, +): + scheduler_url = TypeAdapter(AnyUrl).validate_python( + dask_local_cluster_without_workers.scheduler_address + ) + with pytest.raises(DaskNoWorkersError): + await get_worker_still_has_results_in_memory( + scheduler_url, NoAuthentication(), fake_localhost_ec2_instance_data + ) + + +async def test_get_worker_still_has_results_in_memory_with_invalid_worker_host_raises( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + ec2_instance_data = fake_ec2_instance_data() + with pytest.raises(DaskWorkerNotFoundError): + await get_worker_still_has_results_in_memory( + scheduler_url, scheduler_authentication, ec2_instance_data + ) + + +@pytest.mark.parametrize("fct_shall_err", [True, False], ids=str) +async def test_get_worker_still_has_results_in_memory( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + dask_spec_cluster_client: distributed.Client, + fake_localhost_ec2_instance_data: EC2InstanceData, + fct_shall_err: bool, +): + # nothing ran, so it's 0 + assert ( + await get_worker_still_has_results_in_memory( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == 0 + ) + + # now run something quickly + def _add_fct(x: int, y: int) -> int: + if fct_shall_err: + msg = "BAM" + raise RuntimeError(msg) + return x + y + + # this will run right away and remain in memory until we fetch it + future_queued_task = dask_spec_cluster_client.submit(_add_fct, 2, 5) + assert future_queued_task + await _wait_for_task_done(future_queued_task) + assert ( + await get_worker_still_has_results_in_memory( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == 1 + ) + + # get the result will NOT bring the data back + if fct_shall_err: + exc = await future_queued_task.exception( # type: ignore + timeout=_DASK_SCHEDULER_REACTION_TIME_S + ) + assert isinstance(exc, RuntimeError) + else: + result = await future_queued_task.result( + timeout=_DASK_SCHEDULER_REACTION_TIME_S + ) # type: ignore + assert result == 7 + + await _wait_for_dask_scheduler_to_change_state() + assert ( + await get_worker_still_has_results_in_memory( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == 1 + ) + + # this should remove the memory + del future_queued_task + await _wait_for_dask_scheduler_to_change_state() + assert ( + await get_worker_still_has_results_in_memory( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == 0 + ) + + +async def test_worker_used_resources_with_invalid_ec2_name_raises( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + fake_ec2_instance_data_with_invalid_ec2_name: EC2InstanceData, +): + with pytest.raises(Ec2InvalidDnsNameError): + await get_worker_used_resources( + scheduler_url, + scheduler_authentication, + fake_ec2_instance_data_with_invalid_ec2_name, + ) + + +async def test_worker_used_resources_with_no_workers_raises( + dask_local_cluster_without_workers: distributed.SpecCluster, + fake_localhost_ec2_instance_data: EC2InstanceData, +): + scheduler_url = TypeAdapter(AnyUrl).validate_python( + dask_local_cluster_without_workers.scheduler_address + ) + with pytest.raises(DaskNoWorkersError): + await get_worker_used_resources( + scheduler_url, NoAuthentication(), fake_localhost_ec2_instance_data + ) + + +async def test_worker_used_resources_with_invalid_worker_host_raises( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + ec2_instance_data = fake_ec2_instance_data() + with pytest.raises(DaskWorkerNotFoundError): + await get_worker_used_resources( + scheduler_url, scheduler_authentication, ec2_instance_data + ) + + +async def test_worker_used_resources( + scheduler_url: AnyUrl, + scheduler_authentication: ClusterAuthentication, + dask_spec_cluster_client: distributed.Client, + fake_localhost_ec2_instance_data: EC2InstanceData, +): + # initial state + assert ( + await get_worker_used_resources( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == Resources.create_as_empty() + ) + + def _add_fct(x: int, y: int) -> int: + import time + + time.sleep(_DASK_SCHEDULER_REACTION_TIME_S * 2) + return x + y + + # run something that uses resources + num_cpus = 2 + future_queued_task = dask_spec_cluster_client.submit( + _add_fct, 2, 5, resources={"CPU": num_cpus} + ) + assert future_queued_task + await _wait_for_dask_scheduler_to_change_state() + assert await get_worker_used_resources( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) == Resources(cpus=num_cpus, ram=ByteSize(0)) + + result = await future_queued_task.result(timeout=_DASK_SCHEDULER_REACTION_TIME_S) # type: ignore + assert result == 7 + + # back to no use + assert ( + await get_worker_used_resources( + scheduler_url, scheduler_authentication, fake_localhost_ec2_instance_data + ) + == Resources.create_as_empty() + ) diff --git a/services/autoscaling/tests/unit/test_modules_ec2.py b/services/autoscaling/tests/unit/test_modules_ec2.py index f412e40f36d..aab1747983a 100644 --- a/services/autoscaling/tests/unit/test_modules_ec2.py +++ b/services/autoscaling/tests/unit/test_modules_ec2.py @@ -1,317 +1,212 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable +# pylint: disable=protected-access -from typing import Callable, cast +from collections.abc import Callable +from typing import TypedDict -import botocore.exceptions import pytest +from aws_library.ec2 import EC2InstanceConfig, EC2InstanceType, Resources from faker import Faker from fastapi import FastAPI -from moto.server import ThreadedMotoServer -from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_envs import EnvVarsDict -from simcore_service_autoscaling.core.errors import ( - ConfigurationError, - Ec2InstanceNotFoundError, - Ec2TooManyInstancesError, +from prometheus_client.metrics import MetricWrapperBase +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_autoscaling.core.errors import ConfigurationError +from simcore_service_autoscaling.modules.ec2 import get_ec2_client +from simcore_service_autoscaling.modules.instrumentation import ( + get_instrumentation, + has_instrumentation, ) -from simcore_service_autoscaling.core.settings import ApplicationSettings, EC2Settings -from simcore_service_autoscaling.modules.ec2 import ( - AutoscalingEC2, - EC2InstanceData, - get_ec2_client, -) -from types_aiobotocore_ec2 import EC2Client from types_aiobotocore_ec2.literals import InstanceTypeType -@pytest.fixture -def ec2_settings( - app_environment: EnvVarsDict, -) -> EC2Settings: - return EC2Settings.create_from_envs() - - -@pytest.fixture -def app_settings( - app_environment: EnvVarsDict, -) -> ApplicationSettings: - return ApplicationSettings.create_from_envs() - - -async def test_ec2_client_lifespan(ec2_settings: EC2Settings): - ec2 = await AutoscalingEC2.create(settings=ec2_settings) - assert ec2 - assert ec2.client - assert ec2.exit_stack - assert ec2.session - - await ec2.close() - - -async def test_ec2_client_raises_when_no_connection_available(ec2_client: EC2Client): - with pytest.raises( - botocore.exceptions.ClientError, match=r".+ AWS was not able to validate .+" - ): - await ec2_client.describe_account_attributes(DryRun=True) - - -async def test_ec2_client_with_mock_server( - mocked_aws_server_envs: None, ec2_client: EC2Client -): - # passes without exception - await ec2_client.describe_account_attributes(DryRun=True) - - async def test_ec2_does_not_initialize_if_deactivated( disabled_rabbitmq: None, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, ): assert hasattr(initialized_app.state, "ec2_client") - assert initialized_app.state.ec2_client == None + assert initialized_app.state.ec2_client is None with pytest.raises(ConfigurationError): get_ec2_client(initialized_app) -async def test_ec2_client_when_ec2_server_goes_up_and_down( - mocked_aws_server: ThreadedMotoServer, - mocked_aws_server_envs: None, - ec2_client: EC2Client, -): - # passes without exception - await ec2_client.describe_account_attributes(DryRun=True) - mocked_aws_server.stop() - with pytest.raises(botocore.exceptions.EndpointConnectionError): - await ec2_client.describe_account_attributes(DryRun=True) - - # restart - mocked_aws_server.start() - # passes without exception - await ec2_client.describe_account_attributes(DryRun=True) - - -async def test_ping( - mocked_aws_server: ThreadedMotoServer, - mocked_aws_server_envs: None, - aws_allowed_ec2_instance_type_names: list[str], - app_settings: ApplicationSettings, - autoscaling_ec2: AutoscalingEC2, -): - assert await autoscaling_ec2.ping() is True - mocked_aws_server.stop() - assert await autoscaling_ec2.ping() is False - mocked_aws_server.start() - assert await autoscaling_ec2.ping() is True - - -async def test_get_ec2_instance_capabilities( - mocked_aws_server_envs: None, - aws_allowed_ec2_instance_type_names: list[str], - app_settings: ApplicationSettings, - autoscaling_ec2: AutoscalingEC2, -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - assert app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES - instance_types = await autoscaling_ec2.get_ec2_instance_capabilities( - cast( - set[InstanceTypeType], - set(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES), - ) - ) - assert instance_types - assert len(instance_types) == len( - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES - ) - - # all the instance names are found and valid - assert all( - i.name in app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES - for i in instance_types - ) - for ( - instance_type_name - ) in app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES: - assert any(i.name == instance_type_name for i in instance_types) - - -async def test_start_aws_instance( - mocked_aws_server_envs: None, - aws_vpc_id: str, - aws_subnet_id: str, - aws_security_group_id: str, - aws_ami_id: str, - ec2_client: EC2Client, - autoscaling_ec2: AutoscalingEC2, - app_settings: ApplicationSettings, +@pytest.fixture +def create_ec2_instance_config( faker: Faker, - mocker: MockerFixture, -): - assert app_settings.AUTOSCALING_EC2_ACCESS - assert app_settings.AUTOSCALING_EC2_INSTANCES - # we have nothing running now in ec2 - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - - instance_type = faker.pystr() - tags = faker.pydict(allowed_types=(str,)) - startup_script = faker.pystr() - await autoscaling_ec2.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - instance_type, - tags=tags, - startup_script=startup_script, - number_of_instances=1, - ) - - # check we have that now in ec2 - all_instances = await ec2_client.describe_instances() - assert len(all_instances["Reservations"]) == 1 - running_instance = all_instances["Reservations"][0] - assert "Instances" in running_instance - assert len(running_instance["Instances"]) == 1 - running_instance = running_instance["Instances"][0] - assert "InstanceType" in running_instance - assert running_instance["InstanceType"] == instance_type - assert "Tags" in running_instance - assert running_instance["Tags"] == [ - {"Key": key, "Value": value} for key, value in tags.items() - ] - - -async def test_start_aws_instance_is_limited_in_number_of_instances( - mocked_aws_server_envs: None, - aws_vpc_id: str, aws_subnet_id: str, aws_security_group_id: str, aws_ami_id: str, - ec2_client: EC2Client, - autoscaling_ec2: AutoscalingEC2, - app_settings: ApplicationSettings, - faker: Faker, - mocker: MockerFixture, -): - assert app_settings.AUTOSCALING_EC2_ACCESS - assert app_settings.AUTOSCALING_EC2_INSTANCES - # we have nothing running now in ec2 - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - - # create as many instances as we can - tags = faker.pydict(allowed_types=(str,)) - startup_script = faker.pystr() - for _ in range(app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_INSTANCES): - await autoscaling_ec2.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - faker.pystr(), - tags=tags, - startup_script=startup_script, - number_of_instances=1, +) -> Callable[[InstanceTypeType], EC2InstanceConfig]: + def _(instance_type: InstanceTypeType) -> EC2InstanceConfig: + return EC2InstanceConfig( + type=EC2InstanceType( + name=instance_type, resources=Resources.create_as_empty() + ), + tags=faker.pydict(allowed_types=(str,)), + startup_script=faker.pystr(), + ami_id=aws_ami_id, + key_name=faker.pystr(), + security_group_ids=[aws_security_group_id], + subnet_id=aws_subnet_id, + iam_instance_profile="", ) - # now creating one more shall fail - with pytest.raises(Ec2TooManyInstancesError): - await autoscaling_ec2.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - faker.pystr(), - tags=tags, - startup_script=startup_script, - number_of_instances=1, - ) - - -async def test_get_instances( - mocked_aws_server_envs: None, - aws_vpc_id: str, - aws_subnet_id: str, - aws_security_group_id: str, - aws_ami_id: str, - ec2_client: EC2Client, - autoscaling_ec2: AutoscalingEC2, - app_settings: ApplicationSettings, + return _ + + +class _ExpectedSample(TypedDict): + name: str + value: float + labels: dict[str, str] + + +def _assert_metrics( + metrics_to_collect: MetricWrapperBase, + *, + expected_num_samples: int, + check_sample_index: int | None, + expected_sample: _ExpectedSample | None +) -> None: + collected_metrics = list(metrics_to_collect.collect()) + assert len(collected_metrics) == 1 + assert collected_metrics[0] + metrics = collected_metrics[0] + assert len(metrics.samples) == expected_num_samples + if expected_num_samples > 0: + assert check_sample_index is not None + assert expected_sample is not None + sample_1 = metrics.samples[check_sample_index] + assert sample_1.name == expected_sample["name"] + assert sample_1.value == expected_sample["value"] + assert sample_1.labels == expected_sample["labels"] + + +async def test_ec2_with_instrumentation_enabled( + disabled_rabbitmq: None, + disabled_ssm: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_redis_server: None, + initialized_app: FastAPI, + create_ec2_instance_config: Callable[[InstanceTypeType], EC2InstanceConfig], faker: Faker, - mocker: MockerFixture, ): - assert app_settings.AUTOSCALING_EC2_INSTANCES - # we have nothing running now in ec2 - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - assert ( - await autoscaling_ec2.get_instances(app_settings.AUTOSCALING_EC2_INSTANCES, {}) - == [] + assert hasattr(initialized_app.state, "ec2_client") + assert initialized_app.state.ec2_client + ec2_client = get_ec2_client(initialized_app) + assert has_instrumentation(initialized_app) + + # check current metrics (should be 0) + instrumentation = get_instrumentation(initialized_app) + _assert_metrics( + instrumentation.ec2_client_metrics.launched_instances, + expected_num_samples=0, + check_sample_index=None, + expected_sample=None, ) - # create some instance - instance_type = faker.pystr() - tags = faker.pydict(allowed_types=(str,)) - startup_script = faker.pystr() - created_instances = await autoscaling_ec2.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - instance_type, - tags=tags, - startup_script=startup_script, - number_of_instances=1, + # create some EC2s + a1_2xlarge_config = create_ec2_instance_config("a1.2xlarge") + num_a1_2xlarge = faker.pyint(min_value=1, max_value=12) + a1_2xlarge_instances = await ec2_client.launch_instances( + a1_2xlarge_config, + min_number_of_instances=num_a1_2xlarge, + number_of_instances=num_a1_2xlarge, + max_total_number_of_instances=500, ) - assert len(created_instances) == 1 - instance_received = await autoscaling_ec2.get_instances( - app_settings.AUTOSCALING_EC2_INSTANCES, - tags=tags, + # now the metrics shall increase + _assert_metrics( + instrumentation.ec2_client_metrics.launched_instances, + expected_num_samples=2, + check_sample_index=0, + expected_sample={ + "name": "simcore_service_autoscaling_computational_launched_instances_total", + "value": num_a1_2xlarge, + "labels": {"instance_type": "a1.2xlarge"}, + }, ) - assert created_instances == instance_received + # create some other EC2s + c5ad_12xlarge_config = create_ec2_instance_config("c5ad.12xlarge") + num_c5ad_12xlarge = faker.pyint(min_value=1, max_value=123) + c5ad_12xlarge_instances = await ec2_client.launch_instances( + c5ad_12xlarge_config, + min_number_of_instances=num_c5ad_12xlarge, + number_of_instances=num_c5ad_12xlarge, + max_total_number_of_instances=500, + ) + # we should get additional metrics with different labels + _assert_metrics( + instrumentation.ec2_client_metrics.launched_instances, + expected_num_samples=4, + check_sample_index=2, + expected_sample={ + "name": "simcore_service_autoscaling_computational_launched_instances_total", + "value": num_c5ad_12xlarge, + "labels": {"instance_type": "c5ad.12xlarge"}, + }, + ) -async def test_terminate_instance( - mocked_aws_server_envs: None, - aws_vpc_id: str, - aws_subnet_id: str, - aws_security_group_id: str, - aws_ami_id: str, - ec2_client: EC2Client, - autoscaling_ec2: AutoscalingEC2, - app_settings: ApplicationSettings, - faker: Faker, - mocker: MockerFixture, -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - # we have nothing running now in ec2 - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - # create some instance - instance_type = faker.pystr() - tags = faker.pydict(allowed_types=(str,)) - startup_script = faker.pystr() - created_instances = await autoscaling_ec2.start_aws_instance( - app_settings.AUTOSCALING_EC2_INSTANCES, - instance_type, - tags=tags, - startup_script=startup_script, - number_of_instances=1, + # now we stop the last ones created + await ec2_client.stop_instances(c5ad_12xlarge_instances) + + # we get the stopped metrics increased now + _assert_metrics( + instrumentation.ec2_client_metrics.stopped_instances, + expected_num_samples=2, + check_sample_index=0, + expected_sample={ + "name": "simcore_service_autoscaling_computational_stopped_instances_total", + "value": num_c5ad_12xlarge, + "labels": {"instance_type": "c5ad.12xlarge"}, + }, ) - assert len(created_instances) == 1 - # terminate the instance - await autoscaling_ec2.terminate_instances(created_instances) - # calling it several times is ok, the instance stays a while - await autoscaling_ec2.terminate_instances(created_instances) + # now we start it again + await ec2_client.start_instances(c5ad_12xlarge_instances) + + # we get the stopped metrics increased now + _assert_metrics( + instrumentation.ec2_client_metrics.started_instances, + expected_num_samples=2, + check_sample_index=0, + expected_sample={ + "name": "simcore_service_autoscaling_computational_started_instances_total", + "value": num_c5ad_12xlarge, + "labels": {"instance_type": "c5ad.12xlarge"}, + }, + ) + # we terminate them + await ec2_client.terminate_instances(c5ad_12xlarge_instances) + + # we get the terminated metrics increased now + _assert_metrics( + instrumentation.ec2_client_metrics.terminated_instances, + expected_num_samples=2, + check_sample_index=0, + expected_sample={ + "name": "simcore_service_autoscaling_computational_terminated_instances_total", + "value": num_c5ad_12xlarge, + "labels": {"instance_type": "c5ad.12xlarge"}, + }, + ) -async def test_terminate_instance_not_existing_raises( - mocked_aws_server_envs: None, - aws_vpc_id: str, - aws_subnet_id: str, - aws_security_group_id: str, - aws_ami_id: str, - ec2_client: EC2Client, - autoscaling_ec2: AutoscalingEC2, - app_settings: ApplicationSettings, - fake_ec2_instance_data: Callable[..., EC2InstanceData], -): - assert app_settings.AUTOSCALING_EC2_INSTANCES - # we have nothing running now in ec2 - all_instances = await ec2_client.describe_instances() - assert not all_instances["Reservations"] - with pytest.raises(Ec2InstanceNotFoundError): - await autoscaling_ec2.terminate_instances([fake_ec2_instance_data()]) + # we terminate the rest + await ec2_client.terminate_instances(a1_2xlarge_instances) + + # we get the terminated metrics increased now + _assert_metrics( + instrumentation.ec2_client_metrics.terminated_instances, + expected_num_samples=4, + check_sample_index=2, + expected_sample={ + "name": "simcore_service_autoscaling_computational_terminated_instances_total", + "value": num_a1_2xlarge, + "labels": {"instance_type": "a1.2xlarge"}, + }, + ) diff --git a/services/autoscaling/tests/unit/test_modules_instrumentation_models.py b/services/autoscaling/tests/unit/test_modules_instrumentation_models.py new file mode 100644 index 00000000000..78824bd8fb5 --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_instrumentation_models.py @@ -0,0 +1,35 @@ +from dataclasses import is_dataclass + +import pytest +from simcore_service_autoscaling.models import BufferPool, Cluster +from simcore_service_autoscaling.modules.instrumentation._constants import ( + BUFFER_POOLS_METRICS_DEFINITIONS, + CLUSTER_METRICS_DEFINITIONS, +) +from simcore_service_autoscaling.modules.instrumentation._models import ( + BufferPoolsMetrics, + ClusterMetrics, +) + + +@pytest.mark.parametrize( + "class_name, metrics_class_name, metrics_definitions", + [ + (Cluster, ClusterMetrics, CLUSTER_METRICS_DEFINITIONS), + (BufferPool, BufferPoolsMetrics, BUFFER_POOLS_METRICS_DEFINITIONS), + ], +) +def test_models_are_in_sync( + class_name: type, + metrics_class_name: type, + metrics_definitions: dict[str, tuple[str, tuple[str, ...]]], +): + assert is_dataclass(class_name) + assert is_dataclass(metrics_class_name) + for field in class_name.__dataclass_fields__: + assert ( + field in metrics_definitions + ), f"{metrics_definitions.__qualname__} is missing {field}" + assert hasattr( + metrics_class_name, field + ), f"{metrics_class_name.__qualname__} is missing {field}" diff --git a/services/autoscaling/tests/unit/test_modules_instrumentation_utils.py b/services/autoscaling/tests/unit/test_modules_instrumentation_utils.py new file mode 100644 index 00000000000..31a19701f8e --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_instrumentation_utils.py @@ -0,0 +1,90 @@ +from collections.abc import Callable +from typing import TypedDict + +from aws_library.ec2._models import EC2InstanceData +from prometheus_client import CollectorRegistry +from prometheus_client.metrics import MetricWrapperBase +from simcore_service_autoscaling.modules.instrumentation._constants import ( + EC2_INSTANCE_LABELS, +) +from simcore_service_autoscaling.modules.instrumentation._utils import create_gauge + + +class _ExpectedSample(TypedDict): + name: str + value: float + labels: dict[str, str] + + +def _assert_metrics( + metrics_to_collect: MetricWrapperBase, + *, + expected_num_samples: int, + check_sample_index: int | None, + expected_sample: _ExpectedSample | None +) -> None: + collected_metrics = list(metrics_to_collect.collect()) + assert len(collected_metrics) == 1 + assert collected_metrics[0] + metrics = collected_metrics[0] + assert len(metrics.samples) == expected_num_samples + if expected_num_samples > 0: + assert check_sample_index is not None + assert expected_sample is not None + sample_1 = metrics.samples[check_sample_index] + assert sample_1.name == expected_sample["name"] + assert sample_1.value == expected_sample["value"] + assert sample_1.labels == expected_sample["labels"] + + +def test_update_gauge_sets_old_entries_to_0( + fake_ec2_instance_data: Callable[..., EC2InstanceData] +): + # Create a Gauge with example labels + registry = CollectorRegistry() + tracked_gauge = create_gauge( + field_name="example_gauge", + definition=("An example gauge", EC2_INSTANCE_LABELS), + subsystem="whatever", + registry=registry, + ) + + ec2_instance_type_1 = fake_ec2_instance_data() + + # Update the gauge with some values + tracked_gauge.update_from_instances([ec2_instance_type_1]) + _assert_metrics( + tracked_gauge.gauge, + expected_num_samples=1, + check_sample_index=0, + expected_sample=_ExpectedSample( + name="simcore_service_autoscaling_whatever_example_gauge", + value=1, + labels={"instance_type": ec2_instance_type_1.type}, + ), + ) + + # ensure we show an explicit 0 so that prometheus correctly updates + ec2_instance_type_2 = fake_ec2_instance_data() + assert ec2_instance_type_1.type != ec2_instance_type_2.type + tracked_gauge.update_from_instances([ec2_instance_type_2]) + _assert_metrics( + tracked_gauge.gauge, + expected_num_samples=2, + check_sample_index=0, + expected_sample=_ExpectedSample( + name="simcore_service_autoscaling_whatever_example_gauge", + value=0, + labels={"instance_type": ec2_instance_type_1.type}, + ), + ) + _assert_metrics( + tracked_gauge.gauge, + expected_num_samples=2, + check_sample_index=1, + expected_sample=_ExpectedSample( + name="simcore_service_autoscaling_whatever_example_gauge", + value=1, + labels={"instance_type": ec2_instance_type_2.type}, + ), + ) diff --git a/services/autoscaling/tests/unit/test_modules_rabbitmq.py b/services/autoscaling/tests/unit/test_modules_rabbitmq.py index 245d50a960f..51991fa0d93 100644 --- a/services/autoscaling/tests/unit/test_modules_rabbitmq.py +++ b/services/autoscaling/tests/unit/test_modules_rabbitmq.py @@ -3,7 +3,8 @@ # pylint:disable=redefined-outer-name import asyncio -from typing import Any, Mapping +from collections.abc import Callable, Mapping +from typing import Any import aiodocker import pytest @@ -15,7 +16,7 @@ RabbitMessageBase, ) from pytest_mock.plugin import MockerFixture -from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq import BIND_TO_ALL_TOPICS, RabbitMQClient from settings_library.rabbit import RabbitSettings from simcore_service_autoscaling.core.errors import ConfigurationError from simcore_service_autoscaling.modules.rabbitmq import ( @@ -23,17 +24,17 @@ post_message, ) from tenacity import retry -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -_TENACITY_RETRY_PARAMS = dict( - reraise=True, - retry=retry_if_exception_type(AssertionError), - stop=stop_after_delay(30), - wait=wait_fixed(0.1), -) +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "stop": stop_after_delay(30), + "wait": wait_fixed(0.1), +} # Selection of core and tool services started in this swarm fixture (integration) pytest_simcore_core_services_selection = [ @@ -61,8 +62,8 @@ def rabbit_autoscaling_message(faker: Faker) -> RabbitAutoscalingStatusMessage: def rabbit_log_message(faker: Faker) -> LoggerRabbitMessage: return LoggerRabbitMessage( user_id=faker.pyint(min_value=1), - project_id=faker.uuid4(), - node_id=faker.uuid4(), + project_id=faker.uuid4(cast_to=None), # type: ignore + node_id=faker.uuid4(cast_to=None), # type: ignore messages=faker.pylist(allowed_types=(str,)), ) @@ -82,11 +83,12 @@ def rabbit_message( def test_rabbitmq_does_not_initialize_if_deactivated( disabled_rabbitmq: None, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, ): assert hasattr(initialized_app.state, "rabbitmq_client") - assert initialized_app.state.rabbitmq_client == None + assert initialized_app.state.rabbitmq_client is None with pytest.raises(ConfigurationError): get_rabbitmq_client(initialized_app) @@ -94,6 +96,7 @@ def test_rabbitmq_does_not_initialize_if_deactivated( def test_rabbitmq_initializes( enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, ): @@ -103,17 +106,23 @@ def test_rabbitmq_initializes( async def test_post_message( - disable_dynamic_service_background_task, + disable_autoscaling_background_task, enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, rabbit_message: RabbitMessageBase, - rabbit_client: RabbitMQClient, + create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture, ): mocked_message_handler = mocker.AsyncMock(return_value=True) - await rabbit_client.subscribe(rabbit_message.channel_name, mocked_message_handler) + client = create_rabbitmq_client("pytest_consumer") + await client.subscribe( + rabbit_message.channel_name, + mocked_message_handler, + topics=[BIND_TO_ALL_TOPICS] if rabbit_message.routing_key() else None, + ) await post_message(initialized_app, message=rabbit_message) async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): @@ -122,7 +131,7 @@ async def test_post_message( f"--> checking for message in rabbit exchange {rabbit_message.channel_name}, {attempt.retry_state.retry_object.statistics}" ) mocked_message_handler.assert_called_once_with( - rabbit_message.json().encode() + rabbit_message.model_dump_json().encode() ) print("... message received") @@ -130,6 +139,7 @@ async def test_post_message( async def test_post_message_with_disabled_rabbit_does_not_raise( disabled_rabbitmq: None, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, rabbit_message: RabbitMessageBase, @@ -168,6 +178,7 @@ async def _check_service_task_gone(service: Mapping[str, Any]) -> None: async def test_post_message_when_rabbit_disconnected( enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, rabbit_autoscaling_message: RabbitAutoscalingStatusMessage, diff --git a/services/autoscaling/tests/unit/test_modules_redis.py b/services/autoscaling/tests/unit/test_modules_redis.py index f317ca8e584..efd43f989a8 100644 --- a/services/autoscaling/tests/unit/test_modules_redis.py +++ b/services/autoscaling/tests/unit/test_modules_redis.py @@ -10,6 +10,7 @@ async def test_redis_raises_if_missing( disabled_rabbitmq: None, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, ): diff --git a/services/autoscaling/tests/unit/test_modules_ssm.py b/services/autoscaling/tests/unit/test_modules_ssm.py new file mode 100644 index 00000000000..93b46ec57ce --- /dev/null +++ b/services/autoscaling/tests/unit/test_modules_ssm.py @@ -0,0 +1,23 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=protected-access + + +import pytest +from fastapi import FastAPI +from simcore_service_autoscaling.core.errors import ConfigurationError +from simcore_service_autoscaling.modules.ssm import get_ssm_client + + +async def test_ssm_does_not_initialize_if_deactivated( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "ssm_client") + assert initialized_app.state.ssm_client is None + with pytest.raises(ConfigurationError): + get_ssm_client(initialized_app) diff --git a/services/autoscaling/tests/unit/test_utils_auto_scaling_core.py b/services/autoscaling/tests/unit/test_utils_auto_scaling_core.py new file mode 100644 index 00000000000..54d4f1b44e0 --- /dev/null +++ b/services/autoscaling/tests/unit/test_utils_auto_scaling_core.py @@ -0,0 +1,405 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import datetime +import json +import re +from collections.abc import Callable +from random import choice, shuffle + +import arrow +import pytest +from aws_library.ec2 import EC2InstanceType +from faker import Faker +from models_library.docker import DockerGenericTag +from models_library.generated_models.docker_rest_api import Node as DockerNode +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_autoscaling.core.errors import Ec2InvalidDnsNameError +from simcore_service_autoscaling.core.settings import ApplicationSettings +from simcore_service_autoscaling.models import AssociatedInstance, EC2InstanceData +from simcore_service_autoscaling.utils.auto_scaling_core import ( + associate_ec2_instances_with_nodes, + ec2_startup_script, + get_machine_buffer_type, + node_host_name_from_ec2_private_dns, + sort_drained_nodes, +) +from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY, +) + + +@pytest.fixture +def node(faker: Faker) -> Callable[..., DockerNode]: + def _creator(**overrides) -> DockerNode: + return DockerNode( + **( + { + "ID": faker.uuid4(), + "CreatedAt": f"{faker.date_time()}", + "UpdatedAt": f"{faker.date_time()}", + "Description": {"Hostname": faker.pystr()}, + } + | overrides + ) + ) + + return _creator + + +@pytest.mark.parametrize( + "aws_private_dns, expected_host_name", + [ + ("ip-10-12-32-3.internal-data", "ip-10-12-32-3"), + ("ip-10-12-32-32.internal-data", "ip-10-12-32-32"), + ("ip-10-0-3-129.internal-data", "ip-10-0-3-129"), + ("ip-10-0-3-12.internal-data", "ip-10-0-3-12"), + ], +) +def test_node_host_name_from_ec2_private_dns( + fake_ec2_instance_data: Callable[..., EC2InstanceData], + aws_private_dns: str, + expected_host_name: str, +): + instance = fake_ec2_instance_data( + aws_private_dns=aws_private_dns, + ) + assert node_host_name_from_ec2_private_dns(instance) == expected_host_name + + +def test_node_host_name_from_ec2_private_dns_raises_with_invalid_name( + fake_ec2_instance_data: Callable[..., EC2InstanceData], faker: Faker +): + instance = fake_ec2_instance_data(aws_private_dns=faker.name()) + with pytest.raises(Ec2InvalidDnsNameError): + node_host_name_from_ec2_private_dns(instance) + + +@pytest.mark.parametrize("valid_ec2_dns", [True, False]) +async def test_associate_ec2_instances_with_nodes_with_no_correspondence( + fake_ec2_instance_data: Callable[..., EC2InstanceData], + node: Callable[..., DockerNode], + valid_ec2_dns: bool, +): + nodes = [node() for _ in range(10)] + ec2_instances = [ + ( + fake_ec2_instance_data(aws_private_dns=f"ip-10-12-32-{n + 1}.internal-data") + if valid_ec2_dns + else fake_ec2_instance_data() + ) + for n in range(10) + ] + + ( + associated_instances, + non_associated_instances, + ) = await associate_ec2_instances_with_nodes(nodes, ec2_instances) + + assert not associated_instances + assert non_associated_instances + assert len(non_associated_instances) == len(ec2_instances) + + +async def test_associate_ec2_instances_with_corresponding_nodes( + fake_ec2_instance_data: Callable[..., EC2InstanceData], + node: Callable[..., DockerNode], +): + nodes = [] + ec2_instances = [] + for n in range(10): + host_name = f"ip-10-12-32-{n + 1}" + nodes.append(node(Description={"Hostname": host_name})) + ec2_instances.append( + fake_ec2_instance_data(aws_private_dns=f"{host_name}.internal-data") + ) + + ( + associated_instances, + non_associated_instances, + ) = await associate_ec2_instances_with_nodes(nodes, ec2_instances) + + assert associated_instances + assert not non_associated_instances + assert len(associated_instances) == len(ec2_instances) + assert len(associated_instances) == len(nodes) + for associated_instance in associated_instances: + assert associated_instance.node.description + assert associated_instance.node.description.hostname + assert ( + associated_instance.node.description.hostname + in associated_instance.ec2_instance.aws_private_dns + ) + + +@pytest.fixture +def minimal_configuration( + docker_swarm: None, + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + disable_autoscaling_background_task: None, + mocked_redis_server: None, +) -> None: ... + + +@pytest.fixture +def ec2_instances_boot_just_ami( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker +) -> EnvVarsDict: + envs = setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + {"t2.micro": {"ami_id": faker.pystr()}} + ), + }, + ) + return app_environment | envs + + +async def test_ec2_startup_script_just_ami( + minimal_configuration: None, + ec2_instances_boot_just_ami: EnvVarsDict, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + instance_boot_specific = next( + iter( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.values() + ) + ) + assert not instance_boot_specific.pre_pull_images + assert instance_boot_specific.pre_pull_images_cron_interval == datetime.timedelta( + minutes=30 + ) + startup_script = await ec2_startup_script(instance_boot_specific, app_settings) + assert len(startup_script.split("&&")) == 1 + assert re.fullmatch( + r"^docker swarm join --availability=drain --token .*$", startup_script + ) + + +@pytest.fixture +def ec2_instances_boot_ami_scripts( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker +) -> list[str]: + custom_scripts = faker.pylist(allowed_types=(str,)) + setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + "t2.micro": { + "ami_id": faker.pystr(), + "custom_boot_scripts": custom_scripts, + } + } + ), + }, + ) + return custom_scripts + + +@pytest.fixture +def ec2_instances_boot_ami_pre_pull( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker +) -> EnvVarsDict: + images = TypeAdapter(list[DockerGenericTag]).validate_python( + [ + "nginx:latest", + "itisfoundation/my-very-nice-service:latest", + "simcore/services/dynamic/another-nice-one:2.4.5", + "asd", + ] + ) + envs = setenvs_from_dict( + monkeypatch, + { + "EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + {"t2.micro": {"ami_id": faker.pystr(), "pre_pull_images": images}} + ), + }, + ) + return app_environment | envs + + +@pytest.fixture +def disabled_registry(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.delenv("REGISTRY_AUTH") + + +async def test_ec2_startup_script_with_pre_pulling( + minimal_configuration: None, + ec2_instances_boot_ami_pre_pull: EnvVarsDict, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + instance_boot_specific = next( + iter( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.values() + ) + ) + assert instance_boot_specific.pre_pull_images + assert instance_boot_specific.pre_pull_images_cron_interval + startup_script = await ec2_startup_script(instance_boot_specific, app_settings) + assert len(startup_script.split("&&")) == 7 + assert re.fullmatch( + r"^(docker swarm join [^&&]+) && (echo [^\s]+ \| docker login [^&&]+) && (echo [^&&]+) && (echo [^&&]+) && (chmod \+x [^&&]+) && (./docker-pull-script.sh) && (echo .+)$", + startup_script, + ), f"{startup_script=}" + + +async def test_ec2_startup_script_with_custom_scripts( + minimal_configuration: None, + ec2_instances_boot_ami_scripts: list[str], + app_settings: ApplicationSettings, +): + for _ in range(3): + assert app_settings.AUTOSCALING_EC2_INSTANCES + instance_boot_specific = next( + iter( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.values() + ) + ) + assert not instance_boot_specific.pre_pull_images + assert instance_boot_specific.pre_pull_images_cron_interval + startup_script = await ec2_startup_script(instance_boot_specific, app_settings) + assert len(startup_script.split("&&")) == 1 + len( + ec2_instances_boot_ami_scripts + ) + assert re.fullmatch( + rf"^([^&&]+ &&){{{len(ec2_instances_boot_ami_scripts)}}} (docker swarm join .+)$", + startup_script, + ), f"{startup_script=}" + + +async def test_ec2_startup_script_with_pre_pulling_but_no_registry( + minimal_configuration: None, + ec2_instances_boot_ami_pre_pull: EnvVarsDict, + disabled_registry: None, + app_settings: ApplicationSettings, +): + assert app_settings.AUTOSCALING_EC2_INSTANCES + instance_boot_specific = next( + iter( + app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_ALLOWED_TYPES.values() + ) + ) + assert instance_boot_specific.pre_pull_images + assert instance_boot_specific.pre_pull_images_cron_interval + startup_script = await ec2_startup_script(instance_boot_specific, app_settings) + assert len(startup_script.split("&&")) == 1 + assert re.fullmatch( + r"^docker swarm join --availability=drain --token .*$", startup_script + ) + + +def test_get_machine_buffer_type( + random_fake_available_instances: list[EC2InstanceType], +): + assert ( + get_machine_buffer_type(random_fake_available_instances) + == random_fake_available_instances[0] + ) + + +def test_sort_empty_drained_nodes( + minimal_configuration: None, + app_settings: ApplicationSettings, + random_fake_available_instances: list[EC2InstanceType], +): + assert sort_drained_nodes(app_settings, [], random_fake_available_instances) == ( + [], + [], + [], + ) + + +def test_sort_drained_nodes( + with_instances_machines_hot_buffer: EnvVarsDict, + minimal_configuration: None, + app_settings: ApplicationSettings, + random_fake_available_instances: list[EC2InstanceType], + create_fake_node: Callable[..., DockerNode], + create_associated_instance: Callable[..., AssociatedInstance], +): + machine_buffer_type = get_machine_buffer_type(random_fake_available_instances) + _NUM_DRAINED_NODES = 20 + _NUM_NODE_WITH_TYPE_BUFFER = ( + 3 * app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + _NUM_NODES_TERMINATING = 13 + fake_drained_nodes = [] + for _ in range(_NUM_DRAINED_NODES): + fake_node = create_fake_node() + fake_associated_instance = create_associated_instance( + fake_node, + terminateable_time=False, + fake_ec2_instance_data_override={ + "type": choice( # noqa: S311 + [ + i + for i in random_fake_available_instances + if i != machine_buffer_type + ] + ).name + }, + ) + fake_drained_nodes.append(fake_associated_instance) + + for _ in range(_NUM_NODE_WITH_TYPE_BUFFER): + fake_node = create_fake_node() + fake_associated_instance = create_associated_instance( + fake_node, + terminateable_time=False, + fake_ec2_instance_data_override={"type": machine_buffer_type.name}, + ) + fake_drained_nodes.append(fake_associated_instance) + + for _ in range(_NUM_NODES_TERMINATING): + fake_node = create_fake_node() + assert fake_node.spec + assert fake_node.spec.labels + fake_node.spec.labels[_OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY] = ( + arrow.utcnow().datetime.isoformat() + ) + fake_associated_instance = create_associated_instance( + fake_node, + terminateable_time=False, + fake_ec2_instance_data_override={"type": machine_buffer_type.name}, + ) + fake_drained_nodes.append(fake_associated_instance) + shuffle(fake_drained_nodes) + + assert ( + len(fake_drained_nodes) + == _NUM_DRAINED_NODES + _NUM_NODE_WITH_TYPE_BUFFER + _NUM_NODES_TERMINATING + ) + ( + sorted_drained_nodes, + sorted_buffer_drained_nodes, + terminating_nodes, + ) = sort_drained_nodes( + app_settings, fake_drained_nodes, random_fake_available_instances + ) + assert app_settings.AUTOSCALING_EC2_INSTANCES + assert len(sorted_drained_nodes) == ( + _NUM_DRAINED_NODES + + _NUM_NODE_WITH_TYPE_BUFFER + - app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + assert ( + len(sorted_buffer_drained_nodes) + == app_settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MACHINES_BUFFER + ) + assert len(terminating_nodes) == _NUM_NODES_TERMINATING + for n in terminating_nodes: + assert n.node.spec + assert n.node.spec.labels + assert _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY in n.node.spec.labels diff --git a/services/autoscaling/tests/unit/test_utils_buffer_machines_pool_core.py b/services/autoscaling/tests/unit/test_utils_buffer_machines_pool_core.py new file mode 100644 index 00000000000..19cc33c2575 --- /dev/null +++ b/services/autoscaling/tests/unit/test_utils_buffer_machines_pool_core.py @@ -0,0 +1,176 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +import pytest +from aws_library.ec2 import AWSTagKey, AWSTagValue, EC2Tags +from faker import Faker +from fastapi import FastAPI +from models_library.docker import DockerGenericTag +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_autoscaling.constants import ( + ACTIVATED_BUFFER_MACHINE_EC2_TAGS, + BUFFER_MACHINE_TAG_KEY, + DEACTIVATED_BUFFER_MACHINE_EC2_TAGS, + PRE_PULLED_IMAGES_EC2_TAG_KEY, +) +from simcore_service_autoscaling.modules.auto_scaling_mode_computational import ( + ComputationalAutoscaling, +) +from simcore_service_autoscaling.modules.auto_scaling_mode_dynamic import ( + DynamicAutoscaling, +) +from simcore_service_autoscaling.utils.buffer_machines_pool_core import ( + dump_pre_pulled_images_as_tags, + get_activated_buffer_ec2_tags, + get_deactivated_buffer_ec2_tags, + is_buffer_machine, + load_pre_pulled_images_from_tags, +) + + +def test_get_activated_buffer_ec2_tags_dynamic( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + initialized_app: FastAPI, +): + auto_scaling_mode = DynamicAutoscaling() + activated_buffer_tags = get_activated_buffer_ec2_tags( + initialized_app, auto_scaling_mode + ) + assert ( + auto_scaling_mode.get_ec2_tags(initialized_app) + | ACTIVATED_BUFFER_MACHINE_EC2_TAGS + ) == activated_buffer_tags + + +def test_get_deactivated_buffer_ec2_tags_dynamic( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + initialized_app: FastAPI, +): + auto_scaling_mode = DynamicAutoscaling() + deactivated_buffer_tags = get_deactivated_buffer_ec2_tags( + initialized_app, auto_scaling_mode + ) + # when deactivated the buffer EC2 name has an additional -buffer suffix + expected_tags = ( + auto_scaling_mode.get_ec2_tags(initialized_app) + | DEACTIVATED_BUFFER_MACHINE_EC2_TAGS + ) + assert "Name" in expected_tags + expected_tags[AWSTagKey("Name")] = TypeAdapter(AWSTagValue).validate_python( + str(expected_tags[AWSTagKey("Name")]) + "-buffer" + ) + assert expected_tags == deactivated_buffer_tags + + +def test_get_activated_buffer_ec2_tags_computational( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_computational_mode: EnvVarsDict, + initialized_app: FastAPI, +): + auto_scaling_mode = ComputationalAutoscaling() + activated_buffer_tags = get_activated_buffer_ec2_tags( + initialized_app, auto_scaling_mode + ) + assert ( + auto_scaling_mode.get_ec2_tags(initialized_app) + | ACTIVATED_BUFFER_MACHINE_EC2_TAGS + ) == activated_buffer_tags + + +def test_get_deactivated_buffer_ec2_tags_computational( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_computational_mode: EnvVarsDict, + initialized_app: FastAPI, +): + auto_scaling_mode = ComputationalAutoscaling() + deactivated_buffer_tags = get_deactivated_buffer_ec2_tags( + initialized_app, auto_scaling_mode + ) + # when deactivated the buffer EC2 name has an additional -buffer suffix + expected_tags = ( + auto_scaling_mode.get_ec2_tags(initialized_app) + | DEACTIVATED_BUFFER_MACHINE_EC2_TAGS + ) + assert "Name" in expected_tags + expected_tags[AWSTagKey("Name")] = TypeAdapter(AWSTagValue).validate_python( + str(expected_tags[AWSTagKey("Name")]) + "-buffer" + ) + assert expected_tags == deactivated_buffer_tags + + +@pytest.mark.parametrize( + "tags, expected_is_buffer", + [ + ({"whatever_key": "whatever_value"}, False), + ({BUFFER_MACHINE_TAG_KEY: "whatever_value"}, True), + ], +) +def test_is_buffer_machine(tags: EC2Tags, expected_is_buffer: bool): + assert is_buffer_machine(tags) is expected_is_buffer + + +@pytest.mark.parametrize( + "images, expected_tags", + [ + pytest.param( + [ + "itisfoundation/dynamic-sidecar:latest", + "itisfoundation/agent:latest", + "registry.pytest.com/simcore/services/dynamic/ti-postpro:2.0.34", + "registry.pytest.com/simcore/services/dynamic/ti-simu:1.0.12", + "registry.pytest.com/simcore/services/dynamic/ti-pers:1.0.19", + "registry.pytest.com/simcore/services/dynamic/sim4life-postpro:2.0.106", + "registry.pytest.com/simcore/services/dynamic/s4l-core-postpro:2.0.106", + "registry.pytest.com/simcore/services/dynamic/s4l-core-stream:2.0.106", + "registry.pytest.com/simcore/services/dynamic/sym-server-8-0-0-dy:2.0.106", + "registry.pytest.com/simcore/services/dynamic/sim4life-8-0-0-modeling:3.2.34", + "registry.pytest.com/simcore/services/dynamic/s4l-core-8-0-0-modeling:3.2.34", + "registry.pytest.com/simcore/services/dynamic/s4l-stream-8-0-0-dy:3.2.34", + "registry.pytest.com/simcore/services/dynamic/sym-server-8-0-0-dy:3.2.34", + ], + { + f"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_0": '["itisfoundation/dynamic-sidecar:latest","itisfoundation/agent:latest","registry.pytest.com/simcore/services/dynamic/ti-postpro:2.0.34","registry.pytest.com/simcore/services/dynamic/ti-simu:1.0.12","registry.pytest.com/simcore/services/dynamic/ti-pers:1.0.', + f"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_1": '19","registry.pytest.com/simcore/services/dynamic/sim4life-postpro:2.0.106","registry.pytest.com/simcore/services/dynamic/s4l-core-postpro:2.0.106","registry.pytest.com/simcore/services/dynamic/s4l-core-stream:2.0.106","registry.pytest.com/simcore/services', + f"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_2": '/dynamic/sym-server-8-0-0-dy:2.0.106","registry.pytest.com/simcore/services/dynamic/sim4life-8-0-0-modeling:3.2.34","registry.pytest.com/simcore/services/dynamic/s4l-core-8-0-0-modeling:3.2.34","registry.pytest.com/simcore/services/dynamic/s4l-stream-8-0-0', + f"{PRE_PULLED_IMAGES_EC2_TAG_KEY}_3": '-dy:3.2.34","registry.pytest.com/simcore/services/dynamic/sym-server-8-0-0-dy:3.2.34"]', + }, + id="many images that get chunked to AWS Tag max length", + ), + pytest.param( + ["itisfoundation/dynamic-sidecar:latest", "itisfoundation/agent:latest"], + { + PRE_PULLED_IMAGES_EC2_TAG_KEY: '["itisfoundation/dynamic-sidecar:latest","itisfoundation/agent:latest"]' + }, + id="<256 characters jsonized number of images does not get chunked", + ), + pytest.param( + [], + {PRE_PULLED_IMAGES_EC2_TAG_KEY: "[]"}, + id="empty list", + ), + ], +) +def test_dump_load_pre_pulled_images_as_tags( + images: list[DockerGenericTag], expected_tags: EC2Tags +): + assert dump_pre_pulled_images_as_tags(images) == expected_tags + assert load_pre_pulled_images_from_tags(expected_tags) == images + + +def test_load_pre_pulled_images_as_tags_no_tag_present_returns_empty_list(faker: Faker): + assert load_pre_pulled_images_from_tags(faker.pydict(allowed_types=(str,))) == [] diff --git a/services/autoscaling/tests/unit/test_utils_computational_scaling.py b/services/autoscaling/tests/unit/test_utils_computational_scaling.py new file mode 100644 index 00000000000..b5744f17053 --- /dev/null +++ b/services/autoscaling/tests/unit/test_utils_computational_scaling.py @@ -0,0 +1,57 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from aws_library.ec2 import Resources +from pydantic import ByteSize, TypeAdapter +from simcore_service_autoscaling.models import DaskTask, DaskTaskResources +from simcore_service_autoscaling.utils.computational_scaling import ( + _DEFAULT_MAX_CPU, + _DEFAULT_MAX_RAM, + resources_from_dask_task, +) + + +@pytest.mark.parametrize( + "dask_task, expected_resource", + [ + pytest.param( + DaskTask(task_id="fake", required_resources=DaskTaskResources()), + Resources( + cpus=_DEFAULT_MAX_CPU, + ram=TypeAdapter(ByteSize).validate_python(_DEFAULT_MAX_RAM), + ), + id="missing resources returns defaults", + ), + pytest.param( + DaskTask(task_id="fake", required_resources={"CPU": 2.5}), + Resources( + cpus=2.5, ram=TypeAdapter(ByteSize).validate_python(_DEFAULT_MAX_RAM) + ), + id="only cpus defined", + ), + pytest.param( + DaskTask( + task_id="fake", + required_resources={"CPU": 2.5, "RAM": 2 * 1024 * 1024 * 1024}, + ), + Resources(cpus=2.5, ram=TypeAdapter(ByteSize).validate_python("2GiB")), + id="cpu and ram defined", + ), + pytest.param( + DaskTask( + task_id="fake", + required_resources={"CPU": 2.5, "ram": 2 * 1024 * 1024 * 1024}, + ), + Resources( + cpus=2.5, ram=TypeAdapter(ByteSize).validate_python(_DEFAULT_MAX_RAM) + ), + id="invalid naming", + ), + ], +) +def test_resources_from_dask_task(dask_task: DaskTask, expected_resource: Resources): + assert resources_from_dask_task(dask_task) == expected_resource diff --git a/services/autoscaling/tests/unit/test_utils_docker.py b/services/autoscaling/tests/unit/test_utils_docker.py index 939ab9c9dba..cae93f3402e 100644 --- a/services/autoscaling/tests/unit/test_utils_docker.py +++ b/services/autoscaling/tests/unit/test_utils_docker.py @@ -1,3 +1,4 @@ +# pylint: disable=no-member # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable @@ -6,43 +7,74 @@ import datetime import itertools import random +from collections.abc import AsyncIterator, Awaitable, Callable from copy import deepcopy -from typing import Any, AsyncIterator, Awaitable, Callable +from typing import Any import aiodocker +import arrow import pytest +from aws_library.ec2 import EC2InstanceData, Resources from deepdiff import DeepDiff from faker import Faker -from models_library.docker import DockerGenericTag, DockerLabelKey +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + DockerGenericTag, + DockerLabelKey, +) from models_library.generated_models.docker_rest_api import ( Availability, + NodeDescription, + NodeSpec, NodeState, + NodeStatus, Service, Task, ) -from pydantic import ByteSize, parse_obj_as +from pydantic import ByteSize, TypeAdapter from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict from servicelib.docker_utils import to_datetime -from simcore_service_autoscaling.models import Resources +from settings_library.docker_registry import RegistrySettings +from simcore_service_autoscaling.core.settings import ApplicationSettings from simcore_service_autoscaling.modules.docker import AutoscalingDocker from simcore_service_autoscaling.utils.utils_docker import ( + _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY, + _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY, + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, Node, _by_created_dt, + attach_node, compute_cluster_total_resources, compute_cluster_used_resources, compute_node_used_resources, compute_tasks_needed_resources, find_node_with_name, + get_docker_login_on_start_bash_command, get_docker_pull_images_crontab, get_docker_pull_images_on_start_bash_command, get_docker_swarm_join_bash_command, get_max_resources_from_docker_task, get_monitored_nodes, + get_new_node_docker_tags, + get_node_empty_since, + get_node_last_readyness_update, + get_node_termination_started_since, get_node_total_resources, + get_task_instance_restriction, + get_worker_nodes, + is_node_osparc_ready, + is_node_ready_and_available, pending_service_tasks_with_insufficient_resources, remove_nodes, + set_node_availability, + set_node_begin_termination_process, + set_node_found_empty, + set_node_osparc_ready, tag_node, ) +from types_aiobotocore_ec2.literals import InstanceTypeType @pytest.fixture @@ -50,27 +82,26 @@ async def create_node_labels( host_node: Node, async_docker_client: aiodocker.Docker, ) -> AsyncIterator[Callable[[list[str]], Awaitable[None]]]: - assert host_node.Spec - old_labels = deepcopy(host_node.Spec.Labels) + assert host_node.spec + old_labels = deepcopy(host_node.spec.labels) async def _creator(labels: list[str]) -> None: - assert host_node.ID - assert host_node.Version - assert host_node.Version.Index - assert host_node.Spec - assert host_node.Spec.Role - assert host_node.Spec.Availability + assert host_node.id + assert host_node.version + assert host_node.version.index + assert host_node.spec + assert host_node.spec.role + assert host_node.spec.availability await async_docker_client.nodes.update( - node_id=host_node.ID, - version=host_node.Version.Index, + node_id=host_node.id, + version=host_node.version.index, spec={ "Name": "foo", - "Availability": host_node.Spec.Availability.value, - "Role": host_node.Spec.Role.value, + "Availability": host_node.spec.availability.value, + "Role": host_node.spec.role.value, "Labels": {f"{label}": "true" for label in labels}, }, ) - return yield _creator # revert labels @@ -116,41 +147,74 @@ async def test_get_monitored_nodes_with_valid_label( create_node_labels: Callable[[list[str]], Awaitable[None]], ): labels = faker.pylist(allowed_types=(str,)) - await create_node_labels(labels) + await create_node_labels( + [ + *labels, + _OSPARC_SERVICE_READY_LABEL_KEY, + _OSPARC_SERVICES_READY_DATETIME_LABEL_KEY, + ] + ) monitored_nodes = await get_monitored_nodes(autoscaling_docker, node_labels=labels) assert len(monitored_nodes) == 1 # this is the host node with some keys slightly changed EXCLUDED_KEYS = { - "Index": True, - "UpdatedAt": True, - "Version": True, - "Spec": {"Labels", "Name"}, + "index": True, + "updated_at": True, + "version": True, + "spec": {"labels", "name"}, } - assert host_node.dict(exclude=EXCLUDED_KEYS) == monitored_nodes[0].dict( + assert host_node.model_dump(exclude=EXCLUDED_KEYS) == monitored_nodes[0].model_dump( exclude=EXCLUDED_KEYS ) +async def test_get_monitored_nodes_are_sorted_according_to_creation_date( + mocker: MockerFixture, + autoscaling_docker: AutoscalingDocker, + create_fake_node: Callable[..., Node], + faker: Faker, +): + fake_nodes = [ + create_fake_node(CreatedAt=faker.date_time(tzinfo=datetime.UTC).isoformat()) + for _ in range(10) + ] + mocked_aiodocker = mocker.patch.object(autoscaling_docker, "nodes", autospec=True) + mocked_aiodocker.list.return_value = fake_nodes + monitored_nodes = await get_monitored_nodes(autoscaling_docker, node_labels=[]) + assert len(monitored_nodes) == len(fake_nodes) + sorted_fake_nodes = sorted(fake_nodes, key=lambda node: arrow.get(node.created_at)) + assert monitored_nodes == sorted_fake_nodes + assert monitored_nodes[0].created_at < monitored_nodes[1].created_at + + +async def test_worker_nodes( + autoscaling_docker: AutoscalingDocker, + host_node: Node, +): + worker_nodes = await get_worker_nodes(autoscaling_docker) + assert not worker_nodes + + async def test_remove_monitored_down_nodes_with_empty_list_does_nothing( autoscaling_docker: AutoscalingDocker, ): - assert await remove_nodes(autoscaling_docker, []) == [] + assert await remove_nodes(autoscaling_docker, nodes=[]) == [] async def test_remove_monitored_down_nodes_of_non_down_node_does_nothing( autoscaling_docker: AutoscalingDocker, host_node: Node, ): - assert await remove_nodes(autoscaling_docker, [host_node]) == [] + assert await remove_nodes(autoscaling_docker, nodes=[host_node]) == [] @pytest.fixture def fake_docker_node(host_node: Node, faker: Faker) -> Node: - fake_node = host_node.copy(deep=True) - fake_node.ID = faker.uuid4() + fake_node = host_node.model_copy(deep=True) + fake_node.id = faker.uuid4(cast_to=str) assert ( - host_node.ID != fake_node.ID + host_node.id != fake_node.id ), "this should never happen, or you are really unlucky" return fake_node @@ -161,15 +225,15 @@ async def test_remove_monitored_down_nodes_of_down_node( mocker: MockerFixture, ): mocked_aiodocker = mocker.patch.object(autoscaling_docker, "nodes", autospec=True) - assert fake_docker_node.Status - fake_docker_node.Status.State = NodeState.down - assert fake_docker_node.Status.State == NodeState.down - assert await remove_nodes(autoscaling_docker, [fake_docker_node]) == [ + assert fake_docker_node.status + fake_docker_node.status.state = NodeState.down + assert fake_docker_node.status.state == NodeState.down + assert await remove_nodes(autoscaling_docker, nodes=[fake_docker_node]) == [ fake_docker_node ] # NOTE: this is the same as calling with aiodocker.Docker() as docker: docker.nodes.remove() mocked_aiodocker.remove.assert_called_once_with( - node_id=fake_docker_node.ID, force=False + node_id=fake_docker_node.id, force=False ) @@ -177,10 +241,10 @@ async def test_remove_monitored_down_node_with_unexpected_state_does_nothing( autoscaling_docker: AutoscalingDocker, fake_docker_node: Node, ): - assert fake_docker_node.Status - fake_docker_node.Status = None - assert not fake_docker_node.Status - assert await remove_nodes(autoscaling_docker, [fake_docker_node]) == [] + assert fake_docker_node.status + fake_docker_node.status = None + assert not fake_docker_node.status + assert await remove_nodes(autoscaling_docker, nodes=[fake_docker_node]) == [] async def test_pending_service_task_with_insufficient_resources_with_no_service( @@ -232,7 +296,7 @@ async def test_pending_service_task_with_placement_constrain_is_skipped( service_with_too_many_resources = await create_service( task_template_with_too_many_resource, {}, "pending" ) - assert service_with_too_many_resources.Spec + assert service_with_too_many_resources.spec pending_tasks = await pending_service_tasks_with_insufficient_resources( autoscaling_docker, service_labels=[] @@ -268,13 +332,12 @@ async def test_pending_service_task_with_insufficient_resources_with_service_lac service_with_too_many_resources = await create_service( task_template_with_too_many_resource, {}, "pending" ) - assert service_with_too_many_resources.Spec + assert service_with_too_many_resources.spec - service_tasks = parse_obj_as( - list[Task], + service_tasks = TypeAdapter(list[Task]).validate_python( await autoscaling_docker.tasks.list( - filters={"service": service_with_too_many_resources.Spec.Name} - ), + filters={"service": service_with_too_many_resources.spec.name} + ) ) assert service_tasks assert len(service_tasks) == 1 @@ -288,12 +351,12 @@ async def test_pending_service_task_with_insufficient_resources_with_service_lac diff = DeepDiff( pending_tasks[0], service_tasks[0], - exclude_paths={ + exclude_paths=[ "UpdatedAt", "Version", "root['Status']['Err']", "root['Status']['Timestamp']", - }, + ], ) assert not diff, f"{diff}" @@ -324,7 +387,7 @@ async def test_pending_service_task_with_insufficient_resources_with_labelled_se # start a service with a part of the labels, we should not find it partial_service_labels = dict(itertools.islice(service_labels.items(), 2)) - _service_with_partial_labels = await create_service( + await create_service( task_template_with_too_many_resource, partial_service_labels, "pending" ) @@ -338,16 +401,15 @@ async def test_pending_service_task_with_insufficient_resources_with_labelled_se service_with_labels = await create_service( task_template_with_too_many_resource, service_labels, "pending" ) - assert service_with_labels.Spec + assert service_with_labels.spec pending_tasks = await pending_service_tasks_with_insufficient_resources( autoscaling_docker, service_labels=list(service_labels) ) - service_tasks = parse_obj_as( - list[Task], + service_tasks = TypeAdapter(list[Task]).validate_python( await autoscaling_docker.tasks.list( - filters={"service": service_with_labels.Spec.Name} - ), + filters={"service": service_with_labels.spec.name} + ) ) assert service_tasks assert len(service_tasks) == 1 @@ -356,12 +418,12 @@ async def test_pending_service_task_with_insufficient_resources_with_labelled_se diff = DeepDiff( pending_tasks[0], service_tasks[0], - exclude_paths={ + exclude_paths=[ "UpdatedAt", "Version", "root['Status']['Err']", "root['Status']['Timestamp']", - }, + ], ) assert not diff, f"{diff}" @@ -394,21 +456,16 @@ async def test_pending_service_task_with_insufficient_resources_properly_sorts_t assert len(pending_tasks) == len(services) # check sorting is done by creation date - last_date = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( - days=1 - ) + last_date = datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1) for task in pending_tasks: - assert task.CreatedAt # NOTE: in this case they are but they might be None - assert ( - to_datetime(task.CreatedAt).replace(tzinfo=datetime.timezone.utc) - > last_date - ) - last_date = to_datetime(task.CreatedAt).replace(tzinfo=datetime.timezone.utc) + assert task.created_at # NOTE: in this case they are but they might be None + assert to_datetime(task.created_at).replace(tzinfo=datetime.UTC) > last_date + last_date = to_datetime(task.created_at).replace(tzinfo=datetime.UTC) def test_safe_sort_key_callback(): tasks_with_faulty_timestamp = [ - Task(ID=n, CreatedAt=value) # type: ignore + Task(ID=f"{n}", CreatedAt=value) for n, value in enumerate( [ # SEE test_to_datetime_conversion_known_errors @@ -416,7 +473,7 @@ def test_safe_sort_key_callback(): "2023-03-15 09:20:58.123456", "2023-03-15T09:20:58.123456", "2023-03-15T09:20:58.123456Z", - f"{datetime.datetime.now(datetime.timezone.utc)}", + f"{datetime.datetime.now(datetime.UTC)}", "corrupted string", ] ) @@ -424,16 +481,16 @@ def test_safe_sort_key_callback(): sorted_tasks = sorted(tasks_with_faulty_timestamp, key=_by_created_dt) assert len(sorted_tasks) == len(tasks_with_faulty_timestamp) - assert {t.ID for t in sorted_tasks} == {t.ID for t in tasks_with_faulty_timestamp} + assert {t.id for t in sorted_tasks} == {t.id for t in tasks_with_faulty_timestamp} def test_get_node_total_resources(host_node: Node): resources = get_node_total_resources(host_node) - assert host_node.Description - assert host_node.Description.Resources - assert host_node.Description.Resources.NanoCPUs - assert resources.cpus == (host_node.Description.Resources.NanoCPUs / 10**9) - assert resources.ram == host_node.Description.Resources.MemoryBytes + assert host_node.description + assert host_node.description.resources + assert host_node.description.resources.nano_cp_us + assert resources.cpus == (host_node.description.resources.nano_cp_us / 10**9) + assert resources.ram == host_node.description.resources.memory_bytes async def test_compute_cluster_total_resources_with_no_nodes_returns_0( @@ -458,12 +515,11 @@ async def test_get_resources_from_docker_task_with_no_reservation_returns_0( task_template: dict[str, Any], ): service_with_no_resources = await create_service(task_template, {}, "running") - assert service_with_no_resources.Spec - service_tasks = parse_obj_as( - list[Task], + assert service_with_no_resources.spec + service_tasks = TypeAdapter(list[Task]).validate_python( await autoscaling_docker.tasks.list( - filters={"service": service_with_no_resources.Spec.Name} - ), + filters={"service": service_with_no_resources.spec.name} + ) ) assert service_tasks assert len(service_tasks) == 1 @@ -487,10 +543,9 @@ async def test_get_resources_from_docker_task_with_reservations( NUM_CPUS, 0 ) service = await create_service(task_template_with_reservations, {}, "running") - assert service.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list(filters={"service": service.Spec.Name}), + assert service.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await async_docker_client.tasks.list(filters={"service": service.spec.name}) ) assert service_tasks assert len(service_tasks) == 1 @@ -515,20 +570,73 @@ async def test_get_resources_from_docker_task_with_reservations_and_limits_retur NUM_CPUS, 0 ) task_template_with_reservations["Resources"] |= create_task_limits( - host_cpu_count, parse_obj_as(ByteSize, "100Mib") + host_cpu_count, TypeAdapter(ByteSize).validate_python("100Mib") )["Resources"] service = await create_service(task_template_with_reservations, {}, "running") - assert service.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list(filters={"service": service.Spec.Name}), + assert service.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await async_docker_client.tasks.list(filters={"service": service.spec.name}) ) assert service_tasks assert len(service_tasks) == 1 assert get_max_resources_from_docker_task(service_tasks[0]) == Resources( - cpus=host_cpu_count, ram=parse_obj_as(ByteSize, "100Mib") + cpus=host_cpu_count, ram=TypeAdapter(ByteSize).validate_python("100Mib") + ) + + +@pytest.mark.parametrize( + "placement_constraints, expected_instance_type", + [ + (None, None), + (["blahblah==true", "notsoblahblah!=true"], None), + (["blahblah==true", "notsoblahblah!=true", "node.labels.blahblah==true"], None), + ( + [ + "blahblah==true", + "notsoblahblah!=true", + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}==true", + ], + None, + ), + ( + [ + "blahblah==true", + "notsoblahblah!=true", + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}==t3.medium", + ], + "t3.medium", + ), + ], +) +async def test_get_task_instance_restriction( + autoscaling_docker: AutoscalingDocker, + host_node: Node, + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str] | None, str, list[str] | None], + Awaitable[Service], + ], + task_template: dict[str, Any], + create_task_reservations: Callable[[int, int], dict[str, Any]], + faker: Faker, + placement_constraints: list[str] | None, + expected_instance_type: InstanceTypeType | None, +): + # this one has no instance restriction + service = await create_service( + task_template, + None, + "pending" if placement_constraints else "running", + placement_constraints, + ) + assert service.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await autoscaling_docker.tasks.list(filters={"service": service.spec.name}) + ) + instance_type_or_none = await get_task_instance_restriction( + autoscaling_docker, service_tasks[0] ) + assert instance_type_or_none == expected_instance_type async def test_compute_tasks_needed_resources( @@ -543,12 +651,11 @@ async def test_compute_tasks_needed_resources( faker: Faker, ): service_with_no_resources = await create_service(task_template, {}, "running") - assert service_with_no_resources.Spec - service_tasks = parse_obj_as( - list[Task], + assert service_with_no_resources.spec + service_tasks = TypeAdapter(list[Task]).validate_python( await autoscaling_docker.tasks.list( - filters={"service": service_with_no_resources.Spec.Name} - ), + filters={"service": service_with_no_resources.spec.name} + ) ) assert compute_tasks_needed_resources(service_tasks) == Resources.create_as_empty() @@ -563,9 +670,9 @@ async def test_compute_tasks_needed_resources( ) all_tasks = service_tasks for s in services: - service_tasks = parse_obj_as( - list[Task], - await autoscaling_docker.tasks.list(filters={"service": s.Spec.Name}), + assert s.spec + service_tasks = TypeAdapter(list[Task]).validate_python( + await autoscaling_docker.tasks.list(filters={"service": s.spec.name}) ) assert compute_tasks_needed_resources(service_tasks) == Resources( cpus=1, ram=ByteSize(0) @@ -595,7 +702,7 @@ async def test_compute_node_used_resources_with_service( faker: Faker, ): # 1. if we have services with no defined reservations, then we cannot know what they use... - service_with_no_resources = await create_service(task_template, {}, "running") + await create_service(task_template, {}, "running") node_used_resources = await compute_node_used_resources( autoscaling_docker, host_node ) @@ -621,14 +728,14 @@ async def test_compute_node_used_resources_with_service( # 3. if we look for services with some other label, they should then become invisible again node_used_resources = await compute_node_used_resources( - autoscaling_docker, host_node, service_labels=[faker.pystr()] + autoscaling_docker, host_node, service_labels=[DockerLabelKey(faker.pystr())] ) assert node_used_resources == Resources(cpus=0, ram=ByteSize(0)) # 4. if we look for services with 1 correct label, they should then become visible again node_used_resources = await compute_node_used_resources( autoscaling_docker, host_node, - service_labels=[random.choice(list(service_labels.keys()))], + service_labels=[random.choice(list(service_labels.keys()))], # noqa: S311 ) assert node_used_resources == Resources(cpus=host_cpu_count, ram=ByteSize(0)) # 4. if we look for services with all the correct labels, they should then become visible again @@ -721,10 +828,14 @@ async def test_compute_cluster_used_resources_with_services_running( async def test_get_docker_swarm_join_script(host_node: Node): - join_script = await get_docker_swarm_join_bash_command() + join_script = await get_docker_swarm_join_bash_command(join_as_drained=True) assert join_script.startswith("docker swarm join") assert "--availability=drain" in join_script + join_script = await get_docker_swarm_join_bash_command(join_as_drained=False) + assert join_script.startswith("docker swarm join") + assert "--availability=active" in join_script + async def test_get_docker_swarm_join_script_bad_return_code_raises( host_node: Node, @@ -740,7 +851,7 @@ async def test_get_docker_swarm_join_script_bad_return_code_raises( ) mocked_asyncio_process.return_value.returncode = 137 with pytest.raises(RuntimeError, match=r"unexpected error .+"): - await get_docker_swarm_join_bash_command() + await get_docker_swarm_join_bash_command(join_as_drained=True) # NOTE: the sleep here is to provide some time for asyncio to properly close its process communication # to silence the warnings await asyncio.sleep(2) @@ -760,20 +871,31 @@ async def test_get_docker_swarm_join_script_returning_unexpected_command_raises( ) mocked_asyncio_process.return_value.returncode = 0 with pytest.raises(RuntimeError, match=r"expected docker .+"): - await get_docker_swarm_join_bash_command() + await get_docker_swarm_join_bash_command(join_as_drained=True) # NOTE: the sleep here is to provide some time for asyncio to properly close its process communication # to silence the warnings await asyncio.sleep(2) +def test_get_docker_login_on_start_bash_command(): + registry_settings = RegistrySettings( + **RegistrySettings.model_config["json_schema_extra"]["examples"][0] + ) + returned_command = get_docker_login_on_start_bash_command(registry_settings) + assert ( + f'echo "{registry_settings.REGISTRY_PW.get_secret_value()}" | docker login --username {registry_settings.REGISTRY_USER} --password-stdin {registry_settings.resolved_registry_url}' + == returned_command + ) + + async def test_try_get_node_with_name( autoscaling_docker: AutoscalingDocker, host_node: Node ): - assert host_node.Description - assert host_node.Description.Hostname + assert host_node.description + assert host_node.description.hostname received_node = await find_node_with_name( - autoscaling_docker, host_node.Description.Hostname + autoscaling_docker, host_node.description.hostname ) assert received_node == host_node @@ -781,38 +903,150 @@ async def test_try_get_node_with_name( async def test_try_get_node_with_name_fake( autoscaling_docker: AutoscalingDocker, fake_node: Node ): - assert fake_node.Description - assert fake_node.Description.Hostname + assert fake_node.description + assert fake_node.description.hostname received_node = await find_node_with_name( - autoscaling_docker, fake_node.Description.Hostname + autoscaling_docker, fake_node.description.hostname ) assert received_node is None +async def test_find_node_with_name_with_common_prefixed_nodes( + autoscaling_docker: AutoscalingDocker, + mocker: MockerFixture, + create_fake_node: Callable[..., Node], +): + common_prefix = "ip-10-0-1-" + mocked_aiodocker = mocker.patch.object(autoscaling_docker, "nodes", autospec=True) + mocked_aiodocker.list.return_value = [ + create_fake_node( + Description=NodeDescription(Hostname=f"{common_prefix}{'1' * (i + 1)}") + ) + for i in range(3) + ] + needed_host_name = f"{common_prefix}11" + found_node = await find_node_with_name(autoscaling_docker, needed_host_name) + assert found_node + assert found_node.description + assert found_node.description.hostname == needed_host_name + + +async def test_find_node_with_smaller_name_with_common_prefixed_nodes_returns_none( + autoscaling_docker: AutoscalingDocker, + mocker: MockerFixture, + create_fake_node: Callable[..., Node], +): + common_prefix = "ip-10-0-1-" + mocked_aiodocker = mocker.patch.object(autoscaling_docker, "nodes", autospec=True) + mocked_aiodocker.list.return_value = [ + create_fake_node( + Description=NodeDescription(Hostname=f"{common_prefix}{'1' * (i + 1)}") + ) + for i in range(3) + ] + needed_host_name = f"{common_prefix}" + found_node = await find_node_with_name(autoscaling_docker, needed_host_name) + assert found_node is None + + async def test_tag_node( autoscaling_docker: AutoscalingDocker, host_node: Node, faker: Faker ): - assert host_node.Description - assert host_node.Description.Hostname + assert host_node.description + assert host_node.description.hostname tags = faker.pydict(allowed_types=(str,)) await tag_node(autoscaling_docker, host_node, tags=tags, available=False) updated_node = await find_node_with_name( - autoscaling_docker, host_node.Description.Hostname + autoscaling_docker, host_node.description.hostname ) assert updated_node - assert updated_node.Spec - assert updated_node.Spec.Availability == Availability.drain - assert updated_node.Spec.Labels == tags + assert updated_node.spec + assert updated_node.spec.availability == Availability.drain + assert updated_node.spec.labels == tags await tag_node(autoscaling_docker, updated_node, tags={}, available=True) updated_node = await find_node_with_name( - autoscaling_docker, host_node.Description.Hostname + autoscaling_docker, host_node.description.hostname + ) + assert updated_node + assert updated_node.spec + assert updated_node.spec.availability == Availability.active + assert updated_node.spec.labels == {} + + +async def test_tag_node_out_of_sequence_error( + autoscaling_docker: AutoscalingDocker, host_node: Node, faker: Faker +): + assert host_node.description + assert host_node.description.hostname + tags = faker.pydict(allowed_types=(str,)) + # this works + updated_node = await tag_node( + autoscaling_docker, host_node, tags=tags, available=False ) assert updated_node - assert updated_node.Spec - assert updated_node.Spec.Availability == Availability.active - assert updated_node.Spec.Labels == {} + assert host_node.version + assert host_node.version.index + assert updated_node.version + assert updated_node.version.index + assert host_node.version.index < updated_node.version.index + + # running the same call with the old node should not raise an out of sequence error + updated_node2 = await tag_node( + autoscaling_docker, host_node, tags=tags, available=True + ) + assert updated_node2 + assert updated_node2.version + assert updated_node2.version.index + assert updated_node2.version.index > updated_node.version.index + + +async def test_set_node_availability( + autoscaling_docker: AutoscalingDocker, host_node: Node, faker: Faker +): + assert is_node_ready_and_available(host_node, availability=Availability.active) + updated_node = await set_node_availability( + autoscaling_docker, host_node, available=False + ) + assert is_node_ready_and_available(updated_node, availability=Availability.drain) + updated_node = await set_node_availability( + autoscaling_docker, host_node, available=True + ) + assert is_node_ready_and_available(updated_node, availability=Availability.active) + + +def test_get_new_node_docker_tags( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + disable_autoscaling_background_task: None, + app_settings: ApplicationSettings, + fake_ec2_instance_data: Callable[..., EC2InstanceData], +): + ec2_instance_data = fake_ec2_instance_data() + node_docker_tags = get_new_node_docker_tags(app_settings, ec2_instance_data) + assert node_docker_tags + assert DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY in node_docker_tags + assert app_settings.AUTOSCALING_NODES_MONITORING + for ( + tag_key + ) in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS: + assert tag_key in node_docker_tags + for ( + tag_key + ) in app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS: + assert tag_key in node_docker_tags + + all_keys = [ + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + *app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NODE_LABELS, + *app_settings.AUTOSCALING_NODES_MONITORING.NODES_MONITORING_NEW_NODES_LABELS, + ] + for tag_key in node_docker_tags: + assert tag_key in all_keys @pytest.mark.parametrize( @@ -820,11 +1054,11 @@ async def test_tag_node( [ ( ["nginx", "itisfoundation/simcore/services/dynamic/service:23.5.5"], - 'echo "services:\n pre-pull-image-0:\n image: nginx\n pre-pull-image-1:\n ' - 'image: itisfoundation/simcore/services/dynamic/service:23.5.5\nversion: \'"3.8"\'\n"' + 'echo "services:\n nginx:\n image: nginx\n service-23.5.5:\n ' + 'image: itisfoundation/simcore/services/dynamic/service:23.5.5\n"' " > /docker-pull.compose.yml" " && " - 'echo "#!/bin/sh\necho Pulling started at \\$(date)\ndocker compose --file=/docker-pull.compose.yml pull" > /docker-pull-script.sh' + 'echo "#!/bin/sh\necho Pulling started at \\$(date)\ndocker compose --project-name=autoscaleprepull --file=/docker-pull.compose.yml pull --ignore-pull-failures" > /docker-pull-script.sh' " && " "chmod +x /docker-pull-script.sh" " && " @@ -868,3 +1102,212 @@ def test_get_docker_pull_images_crontab( interval: datetime.timedelta, expected_cmd: str ): assert get_docker_pull_images_crontab(interval) == expected_cmd + + +def test_is_node_ready_and_available(create_fake_node: Callable[..., Node]): + # check not ready state return false + for node_status in [ + NodeStatus(State=s, Message=None, Addr=None) + for s in NodeState + if s is not NodeState.ready + ]: + fake_node = create_fake_node(Status=node_status) + assert not is_node_ready_and_available( + fake_node, availability=Availability.drain + ) + + node_ready_status = NodeStatus(State=NodeState.ready, Message=None, Addr=None) + fake_drained_node = create_fake_node( + Status=node_ready_status, + Spec=NodeSpec( + Name=None, + Labels=None, + Role=None, + Availability=Availability.drain, + ), + ) + assert is_node_ready_and_available( + fake_drained_node, availability=Availability.drain + ) + assert not is_node_ready_and_available( + fake_drained_node, availability=Availability.active + ) + assert not is_node_ready_and_available( + fake_drained_node, availability=Availability.pause + ) + + +def test_is_node_osparc_ready(create_fake_node: Callable[..., Node], faker: Faker): + fake_node = create_fake_node() + assert fake_node.spec + assert fake_node.spec.availability is Availability.drain + # no labels, not ready and drained + assert not is_node_osparc_ready(fake_node) + # no labels, not ready, but active + fake_node.spec.availability = Availability.active + assert not is_node_osparc_ready(fake_node) + # no labels, ready and active + fake_node.status = NodeStatus(State=NodeState.ready, Message=None, Addr=None) + assert not is_node_osparc_ready(fake_node) + # add some random labels + assert fake_node.spec + fake_node.spec.labels = faker.pydict(allowed_types=(str,)) + assert not is_node_osparc_ready(fake_node) + # add the expected label + fake_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "false" + assert not is_node_osparc_ready(fake_node) + # make it ready + fake_node.spec.labels[_OSPARC_SERVICE_READY_LABEL_KEY] = "true" + assert is_node_osparc_ready(fake_node) + + +async def test_set_node_osparc_ready( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + disable_autoscaling_background_task: None, + app_settings: ApplicationSettings, + autoscaling_docker: AutoscalingDocker, + host_node: Node, +): + # initial state + assert is_node_ready_and_available(host_node, availability=Availability.active) + host_node_last_readyness_update = get_node_last_readyness_update(host_node) + assert host_node_last_readyness_update + # set the node to drain + updated_node = await set_node_availability( + autoscaling_docker, host_node, available=False + ) + assert is_node_ready_and_available(updated_node, availability=Availability.drain) + # the node is also not osparc ready + assert not is_node_osparc_ready(updated_node) + # the node readyness label was not updated here + updated_last_readyness = get_node_last_readyness_update(updated_node) + assert updated_last_readyness == host_node_last_readyness_update + + # this implicitely make the node active as well + updated_node = await set_node_osparc_ready( + app_settings, autoscaling_docker, host_node, ready=True + ) + assert is_node_ready_and_available(updated_node, availability=Availability.active) + assert is_node_osparc_ready(updated_node) + updated_last_readyness = get_node_last_readyness_update(updated_node) + assert updated_last_readyness > host_node_last_readyness_update + # make it not osparc ready + updated_node = await set_node_osparc_ready( + app_settings, autoscaling_docker, host_node, ready=False + ) + assert not is_node_osparc_ready(updated_node) + assert is_node_ready_and_available(updated_node, availability=Availability.drain) + assert get_node_last_readyness_update(updated_node) > updated_last_readyness + + +async def test_set_node_found_empty( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + disable_autoscaling_background_task: None, + host_node: Node, + autoscaling_docker: AutoscalingDocker, +): + # initial state + assert is_node_ready_and_available(host_node, availability=Availability.active) + assert host_node.spec + assert host_node.spec.labels + assert _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY not in host_node.spec.labels + + # the date does not exist as nothing was done + node_empty_since = await get_node_empty_since(host_node) + assert node_empty_since is None + + # now we set it to empty + updated_node = await set_node_found_empty(autoscaling_docker, host_node, empty=True) + assert updated_node.spec + assert updated_node.spec.labels + assert _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY in updated_node.spec.labels + + # we can get that empty date back + node_empty_since = await get_node_empty_since(updated_node) + assert node_empty_since is not None + assert node_empty_since < arrow.utcnow().datetime + + # now we remove the empty label + updated_node = await set_node_found_empty( + autoscaling_docker, host_node, empty=False + ) + assert updated_node.spec + assert updated_node.spec.labels + assert _OSPARC_NODE_EMPTY_DATETIME_LABEL_KEY not in updated_node.spec.labels + + # we can't get a date anymore + node_empty_since = await get_node_empty_since(updated_node) + assert node_empty_since is None + + +async def test_set_node_begin_termination_process( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + disable_autoscaling_background_task: None, + host_node: Node, + autoscaling_docker: AutoscalingDocker, +): + # initial state + assert is_node_ready_and_available(host_node, availability=Availability.active) + assert host_node.spec + assert host_node.spec.labels + assert _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY not in host_node.spec.labels + + # the termination was not started, therefore no date + assert get_node_termination_started_since(host_node) is None + + updated_node = await set_node_begin_termination_process( + autoscaling_docker, host_node + ) + assert updated_node.spec + assert updated_node.spec.labels + assert _OSPARC_NODE_TERMINATION_PROCESS_LABEL_KEY in updated_node.spec.labels + + await asyncio.sleep(1) + + returned_termination_started_at = get_node_termination_started_since(updated_node) + assert returned_termination_started_at is not None + assert arrow.utcnow().datetime > returned_termination_started_at + + +async def test_attach_node( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + enabled_dynamic_mode: EnvVarsDict, + disable_autoscaling_background_task: None, + app_settings: ApplicationSettings, + autoscaling_docker: AutoscalingDocker, + host_node: Node, + faker: Faker, +): + # initial state + assert is_node_ready_and_available(host_node, availability=Availability.active) + # set the node to drain + updated_node = await set_node_availability( + autoscaling_docker, host_node, available=False + ) + assert is_node_ready_and_available(updated_node, availability=Availability.drain) + # now attach the node + updated_node = await attach_node( + app_settings, + autoscaling_docker, + updated_node, + tags=faker.pydict(allowed_types=(str,)), + ) + # expected the node to be active + assert is_node_ready_and_available(host_node, availability=Availability.active) + # but not osparc ready + assert not is_node_osparc_ready(updated_node) diff --git a/services/autoscaling/tests/unit/test_utils_dynamic_scaling.py b/services/autoscaling/tests/unit/test_utils_dynamic_scaling.py deleted file mode 100644 index 9a06f5a9bd7..00000000000 --- a/services/autoscaling/tests/unit/test_utils_dynamic_scaling.py +++ /dev/null @@ -1,286 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -import json -import re -from datetime import timedelta -from typing import Callable, Iterator - -import pytest -from faker import Faker -from models_library.docker import DockerGenericTag -from models_library.generated_models.docker_rest_api import Node, Task -from pydantic import ByteSize, parse_obj_as -from pytest_mock import MockerFixture -from simcore_service_autoscaling.core.errors import Ec2InvalidDnsNameError -from simcore_service_autoscaling.core.settings import ApplicationSettings -from simcore_service_autoscaling.models import EC2InstanceType -from simcore_service_autoscaling.modules.ec2 import EC2InstanceData -from simcore_service_autoscaling.utils.dynamic_scaling import ( - associate_ec2_instances_with_nodes, - ec2_startup_script, - node_host_name_from_ec2_private_dns, - try_assigning_task_to_pending_instances, -) - - -@pytest.fixture -def node(faker: Faker) -> Callable[..., Node]: - def _creator(**overrides) -> Node: - return Node( - **( - { - "ID": faker.uuid4(), - "CreatedAt": f"{faker.date_time()}", - "UpdatedAt": f"{faker.date_time()}", - "Description": {"Hostname": faker.pystr()}, - } - | overrides - ) - ) - - return _creator - - -@pytest.fixture -def fake_task(faker: Faker) -> Callable[..., Task]: - def _creator(**overrides) -> Task: - return Task( - **({"ID": faker.uuid4(), "Name": faker.pystr(), "Spec": {}} | overrides) - ) - - return _creator - - -def test_node_host_name_from_ec2_private_dns( - fake_ec2_instance_data: Callable[..., EC2InstanceData] -): - instance = fake_ec2_instance_data( - aws_private_dns="ip-10-12-32-3.internal-data", - ) - assert node_host_name_from_ec2_private_dns(instance) == "ip-10-12-32-3" - - -def test_node_host_name_from_ec2_private_dns_raises_with_invalid_name( - fake_ec2_instance_data: Callable[..., EC2InstanceData] -): - instance = fake_ec2_instance_data() - with pytest.raises(Ec2InvalidDnsNameError): - node_host_name_from_ec2_private_dns(instance) - - -@pytest.mark.parametrize("valid_ec2_dns", [True, False]) -async def test_associate_ec2_instances_with_nodes_with_no_correspondence( - fake_ec2_instance_data: Callable[..., EC2InstanceData], - node: Callable[..., Node], - valid_ec2_dns: bool, -): - nodes = [node() for _ in range(10)] - ec2_instances = [ - fake_ec2_instance_data(aws_private_dns=f"ip-10-12-32-{n+1}.internal-data") - if valid_ec2_dns - else fake_ec2_instance_data() - for n in range(10) - ] - - ( - associated_instances, - non_associated_instances, - ) = await associate_ec2_instances_with_nodes(nodes, ec2_instances) - - assert not associated_instances - assert non_associated_instances - assert len(non_associated_instances) == len(ec2_instances) - - -async def test_associate_ec2_instances_with_corresponding_nodes( - fake_ec2_instance_data: Callable[..., EC2InstanceData], - node: Callable[..., Node], -): - nodes = [] - ec2_instances = [] - for n in range(10): - host_name = f"ip-10-12-32-{n+1}" - nodes.append(node(Description={"Hostname": host_name})) - ec2_instances.append( - fake_ec2_instance_data(aws_private_dns=f"{host_name}.internal-data") - ) - - ( - associated_instances, - non_associated_instances, - ) = await associate_ec2_instances_with_nodes(nodes, ec2_instances) - - assert associated_instances - assert not non_associated_instances - assert len(associated_instances) == len(ec2_instances) - assert len(associated_instances) == len(nodes) - for associated_instance in associated_instances: - assert associated_instance.node.Description - assert associated_instance.node.Description.Hostname - assert ( - associated_instance.node.Description.Hostname - in associated_instance.ec2_instance.aws_private_dns - ) - - -async def test_try_assigning_task_to_pending_instances_with_no_instances( - mocker: MockerFixture, - fake_task: Callable[..., Task], - fake_ec2_instance_data: Callable[..., EC2InstanceData], -): - fake_app = mocker.Mock() - pending_task = fake_task() - assert ( - await try_assigning_task_to_pending_instances(fake_app, pending_task, [], {}) - is False - ) - - -async def test_try_assigning_task_to_pending_instances( - mocker: MockerFixture, - fake_task: Callable[..., Task], - fake_ec2_instance_data: Callable[..., EC2InstanceData], -): - fake_app = mocker.Mock() - fake_app.state.settings.AUTOSCALING_EC2_INSTANCES.EC2_INSTANCES_MAX_START_TIME = ( - timedelta(minutes=1) - ) - pending_task = fake_task( - Spec={"Resources": {"Reservations": {"NanoCPUs": 2 * 1e9}}} - ) - fake_instance = fake_ec2_instance_data() - pending_instance_to_tasks: list[tuple[EC2InstanceData, list[Task]]] = [ - (fake_instance, []) - ] - type_to_instance_map = { - fake_instance.type: EC2InstanceType( - name=fake_instance.type, cpus=4, ram=ByteSize(1024 * 1024) - ) - } - # calling once should allow to add that task to the instance - assert ( - await try_assigning_task_to_pending_instances( - fake_app, pending_task, pending_instance_to_tasks, type_to_instance_map - ) - is True - ) - # calling a second time as well should allow to add that task to the instance - assert ( - await try_assigning_task_to_pending_instances( - fake_app, pending_task, pending_instance_to_tasks, type_to_instance_map - ) - is True - ) - # calling a third time should fail - assert ( - await try_assigning_task_to_pending_instances( - fake_app, pending_task, pending_instance_to_tasks, type_to_instance_map - ) - is False - ) - - -@pytest.fixture -def minimal_configuration( - docker_swarm: None, - disabled_rabbitmq: None, - disabled_ec2: None, - disable_dynamic_service_background_task: None, - mocked_redis_server: None, -) -> Iterator[None]: - yield - - -async def test_ec2_startup_script_no_pre_pulling( - minimal_configuration: None, app_settings: ApplicationSettings -): - startup_script = await ec2_startup_script(app_settings) - assert len(startup_script.split("&&")) == 1 - assert re.fullmatch( - r"^docker swarm join --availability=drain --token .*$", startup_script - ) - - -@pytest.fixture -def enabled_pre_pull_images( - minimal_configuration: None, monkeypatch: pytest.MonkeyPatch -) -> list[DockerGenericTag]: - images = parse_obj_as( - list[DockerGenericTag], - [ - "io.simcore.some234.cool.label", - "com.example.some-label", - "nginx:latest", - "itisfoundation/my-very-nice-service:latest", - "simcore/services/dynamic/another-nice-one:2.4.5", - "asd", - ], - ) - monkeypatch.setenv( - "EC2_INSTANCES_PRE_PULL_IMAGES", - json.dumps(images), - ) - return images - - -@pytest.fixture -def enabled_custom_boot_scripts( - minimal_configuration: None, monkeypatch: pytest.MonkeyPatch, faker: Faker -) -> list[str]: - custom_scripts = faker.pylist(allowed_types=(str,)) - monkeypatch.setenv( - "EC2_INSTANCES_CUSTOM_BOOT_SCRIPTS", - json.dumps(custom_scripts), - ) - return custom_scripts - - -@pytest.fixture -def disabled_registry(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.delenv("REGISTRY_AUTH") - - -async def test_ec2_startup_script_with_pre_pulling( - minimal_configuration: None, - enabled_pre_pull_images: None, - app_settings: ApplicationSettings, -): - startup_script = await ec2_startup_script(app_settings) - assert len(startup_script.split("&&")) == 7 - assert re.fullmatch( - r"^(docker swarm join [^&&]+) && (echo [^\s]+ \| docker login [^&&]+) && (echo [^&&]+) && (echo [^&&]+) && (chmod \+x [^&&]+) && (./docker-pull-script.sh) && (echo .+)$", - startup_script, - ), f"{startup_script=}" - - -async def test_ec2_startup_script_with_custom_scripts( - minimal_configuration: None, - enabled_pre_pull_images: None, - enabled_custom_boot_scripts: list[str], - app_settings: ApplicationSettings, -): - for _ in range(3): - startup_script = await ec2_startup_script(app_settings) - assert len(startup_script.split("&&")) == 7 + len(enabled_custom_boot_scripts) - assert re.fullmatch( - rf"^([^&&]+ &&){{{len(enabled_custom_boot_scripts)}}} (docker swarm join [^&&]+) && (echo [^\s]+ \| docker login [^&&]+) && (echo [^&&]+) && (echo [^&&]+) && (chmod \+x [^&&]+) && (./docker-pull-script.sh) && (echo .+)$", - startup_script, - ), f"{startup_script=}" - - -async def test_ec2_startup_script_with_pre_pulling_but_no_registry( - minimal_configuration: None, - enabled_pre_pull_images: None, - disabled_registry: None, - app_settings: ApplicationSettings, -): - startup_script = await ec2_startup_script(app_settings) - assert len(startup_script.split("&&")) == 1 - assert re.fullmatch( - r"^docker swarm join --availability=drain --token .*$", startup_script - ) diff --git a/services/autoscaling/tests/unit/test_utils_ec2.py b/services/autoscaling/tests/unit/test_utils_ec2.py index 3ca06a3da63..23c5981acd2 100644 --- a/services/autoscaling/tests/unit/test_utils_ec2.py +++ b/services/autoscaling/tests/unit/test_utils_ec2.py @@ -3,18 +3,15 @@ # pylint: disable=unused-variable -import random - import pytest +from aws_library.ec2 import EC2InstanceType, Resources from faker import Faker from pydantic import ByteSize from simcore_service_autoscaling.core.errors import ( ConfigurationError, - Ec2InstanceNotFoundError, + TaskBestFittingInstanceNotFoundError, ) -from simcore_service_autoscaling.models import Resources -from simcore_service_autoscaling.utils.ec2 import ( - EC2InstanceType, +from simcore_service_autoscaling.utils.utils_ec2 import ( closest_instance_policy, compose_user_data, find_best_fitting_ec2_instance, @@ -30,24 +27,10 @@ async def test_find_best_fitting_ec2_instance_with_no_instances_raises(): ) -@pytest.fixture -def random_fake_available_instances(faker: Faker) -> list[EC2InstanceType]: - list_of_instances = [ - EC2InstanceType( - name=faker.pystr(), - cpus=n, - ram=ByteSize(n), - ) - for n in range(1, 30) - ] - random.shuffle(list_of_instances) - return list_of_instances - - async def test_find_best_fitting_ec2_instance_closest_instance_policy_with_resource_0_raises( random_fake_available_instances: list[EC2InstanceType], ): - with pytest.raises(Ec2InstanceNotFoundError): + with pytest.raises(TaskBestFittingInstanceNotFoundError): find_best_fitting_ec2_instance( allowed_ec2_instances=random_fake_available_instances, resources=Resources(cpus=0, ram=ByteSize(0)), @@ -60,10 +43,13 @@ async def test_find_best_fitting_ec2_instance_closest_instance_policy_with_resou [ ( Resources(cpus=n, ram=ByteSize(n)), - EC2InstanceType(name="fake", cpus=n, ram=ByteSize(n)), + EC2InstanceType( + name="c5ad.12xlarge", resources=Resources(cpus=n, ram=ByteSize(n)) + ), ) for n in range(1, 30) ], + ids=str, ) async def test_find_best_fitting_ec2_instance_closest_instance_policy( needed_resources: Resources, @@ -76,10 +62,7 @@ async def test_find_best_fitting_ec2_instance_closest_instance_policy( score_type=closest_instance_policy, ) - SKIPPED_KEYS = ["name"] - for k in found_instance.__dict__.keys(): - if k not in SKIPPED_KEYS: - assert getattr(found_instance, k) == getattr(expected_ec2_instance, k) + assert found_instance.resources == expected_ec2_instance.resources def test_compose_user_data(faker: Faker): diff --git a/services/autoscaling/tests/unit/test_utils_rabbitmq.py b/services/autoscaling/tests/unit/test_utils_rabbitmq.py index 5b5bb32e9b8..f9949d1d112 100644 --- a/services/autoscaling/tests/unit/test_utils_rabbitmq.py +++ b/services/autoscaling/tests/unit/test_utils_rabbitmq.py @@ -1,40 +1,59 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument +# pylint: disable=too-many-positional-arguments # pylint:disable=redefined-outer-name # pylint:disable=too-many-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable -from typing import Any, Awaitable, Callable +from collections.abc import Awaitable, Callable +from typing import Any +from unittest.mock import AsyncMock import aiodocker +import pytest +from dask_task_models_library.container_tasks.utils import generate_dask_job_id from faker import Faker from fastapi import FastAPI -from models_library.docker import DockerLabelKey, SimcoreServiceDockerLabelKeys +from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels from models_library.generated_models.docker_rest_api import Service, Task +from models_library.progress_bar import ProgressReport +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID from models_library.rabbitmq_messages import ( LoggerRabbitMessage, ProgressRabbitMessageNode, ProgressType, ) -from pydantic import parse_obj_as +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter from pytest_mock.plugin import MockerFixture -from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq import BIND_TO_ALL_TOPICS, RabbitMQClient from settings_library.rabbit import RabbitSettings +from simcore_service_autoscaling.models import DaskTask from simcore_service_autoscaling.utils.rabbitmq import ( - post_task_log_message, - post_task_progress_message, + post_tasks_log_message, + post_tasks_progress_message, ) -from tenacity._asyncio import AsyncRetrying +from tenacity import RetryError, retry_always +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -_TENACITY_RETRY_PARAMS = dict( - reraise=True, - retry=retry_if_exception_type(AssertionError), - stop=stop_after_delay(30), - wait=wait_fixed(0.1), -) +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "stop": stop_after_delay(30), + "wait": wait_fixed(0.1), +} + +_TENACITY_STABLE_RETRY_PARAMS = { + "reraise": True, + "retry": retry_always, + "stop": stop_after_delay(3), + "wait": wait_fixed(1), +} # Selection of core and tool services started in this swarm fixture (integration) @@ -45,47 +64,169 @@ pytest_simcore_ops_services_selection = [] -async def test_post_task_log_message( - disable_dynamic_service_background_task, +@pytest.fixture +async def logs_rabbitmq_consumer( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocker: MockerFixture, +) -> AsyncMock: + mocked_message_handler = mocker.AsyncMock(return_value=True) + client = create_rabbitmq_client("pytest_consumer") + await client.subscribe( + LoggerRabbitMessage.get_channel_name(), + mocked_message_handler, + topics=[BIND_TO_ALL_TOPICS], + ) + return mocked_message_handler + + +@pytest.fixture +async def progress_rabbitmq_consumer( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocker: MockerFixture, +) -> AsyncMock: + mocked_message_handler = mocker.AsyncMock(return_value=True) + client = create_rabbitmq_client("pytest_consumer") + await client.subscribe( + ProgressRabbitMessageNode.get_channel_name(), + mocked_message_handler, + topics=[BIND_TO_ALL_TOPICS], + ) + return mocked_message_handler + + +@pytest.fixture +async def running_service_tasks( + create_service: Callable[ + [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] + ], + task_template: dict[str, Any], + async_docker_client: aiodocker.Docker, +) -> Callable[[dict[DockerLabelKey, str]], Awaitable[list[Task]]]: + async def _(labels: dict[DockerLabelKey, str]) -> list[Task]: + # Simulate a running service + service = await create_service( + task_template, + labels, + "running", + ) + assert service.spec + + docker_tasks = TypeAdapter(list[Task]).validate_python( + await async_docker_client.tasks.list(filters={"service": service.spec.name}) + ) + assert docker_tasks + assert len(docker_tasks) == 1 + return docker_tasks + + return _ + + +@pytest.fixture +def service_version() -> ServiceVersion: + return "1.0.0" + + +@pytest.fixture +def service_key() -> ServiceKey: + return "simcore/services/dynamic/test" + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def project_id(faker: Faker) -> ProjectID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def user_id(faker: Faker) -> UserID: + return faker.pyint(min_value=1) + + +@pytest.fixture +def dask_task( + service_key: ServiceKey, + service_version: ServiceVersion, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> DaskTask: + dask_key = generate_dask_job_id( + service_key, service_version, user_id, project_id, node_id + ) + return DaskTask(task_id=dask_key, required_resources={}) + + +@pytest.fixture +def dask_task_with_invalid_key( + faker: Faker, +) -> DaskTask: + dask_key = faker.pystr() + return DaskTask(task_id=dask_key, required_resources={}) + + +async def test_post_task_empty_tasks( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, - rabbit_client: RabbitMQClient, - mocker: MockerFixture, - async_docker_client: aiodocker.Docker, - create_service: Callable[[dict[str, Any], dict[str, str], str], Awaitable[Service]], - task_template: dict[str, Any], - osparc_docker_label_keys: SimcoreServiceDockerLabelKeys, - faker: Faker, + logs_rabbitmq_consumer: AsyncMock, + progress_rabbitmq_consumer: AsyncMock, ): - mocked_message_handler = mocker.AsyncMock(return_value=True) - await rabbit_client.subscribe( - LoggerRabbitMessage.get_channel_name(), mocked_message_handler + await post_tasks_log_message(initialized_app, tasks=[], message="no tasks") + await post_tasks_progress_message( + initialized_app, + tasks=[], + progress=0, + progress_type=ProgressType.CLUSTER_UP_SCALING, ) - service_with_labels = await create_service( - task_template, osparc_docker_label_keys.to_docker_labels(), "running" - ) - assert service_with_labels.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list( - filters={"service": service_with_labels.Spec.Name} - ), - ) - assert service_tasks - assert len(service_tasks) == 1 + with pytest.raises(RetryError): # noqa: PT012 + async for attempt in AsyncRetrying(**_TENACITY_STABLE_RETRY_PARAMS): + with attempt: + print( + f"--> checking for message in rabbit exchange {LoggerRabbitMessage.get_channel_name()}, {attempt.retry_state.retry_object.statistics}" + ) + + logs_rabbitmq_consumer.assert_not_called() + progress_rabbitmq_consumer.assert_not_called() + print("... no message received") + +async def test_post_task_log_message_docker( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + running_service_tasks: Callable[[dict[DockerLabelKey, str]], Awaitable[list[Task]]], + osparc_docker_label_keys: StandardSimcoreDockerLabels, + faker: Faker, + logs_rabbitmq_consumer: AsyncMock, +): + docker_tasks = await running_service_tasks( + osparc_docker_label_keys.to_simcore_runtime_docker_labels() + ) + assert len(docker_tasks) == 1 log_message = faker.pystr() - await post_task_log_message(initialized_app, service_tasks[0], log_message, 0) + await post_tasks_log_message( + initialized_app, tasks=docker_tasks, message=log_message, level=0 + ) async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): with attempt: print( f"--> checking for message in rabbit exchange {LoggerRabbitMessage.get_channel_name()}, {attempt.retry_state.retry_object.statistics}" ) - mocked_message_handler.assert_called_once_with( + logs_rabbitmq_consumer.assert_called_once_with( LoggerRabbitMessage( node_id=osparc_docker_label_keys.node_id, project_id=osparc_docker_label_keys.project_id, @@ -93,121 +234,187 @@ async def test_post_task_log_message( messages=[f"[cluster] {log_message}"], log_level=0, ) - .json() + .model_dump_json() .encode() ) print("... message received") -async def test_post_task_log_message_does_not_raise_if_service_has_no_labels( - disable_dynamic_service_background_task, +async def test_post_task_log_message_dask( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, - async_docker_client: aiodocker.Docker, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], + dask_task: DaskTask, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, faker: Faker, + logs_rabbitmq_consumer: AsyncMock, ): - service_without_labels = await create_service(task_template, {}, "running") - assert service_without_labels.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list( - filters={"service": service_without_labels.Spec.Name} - ), + log_message = faker.pystr() + await post_tasks_log_message( + initialized_app, tasks=[dask_task], message=log_message, level=0 ) - assert service_tasks - assert len(service_tasks) == 1 - # this shall not raise any exception even if the task does not contain - # the necessary labels - await post_task_log_message(initialized_app, service_tasks[0], faker.pystr(), 0) + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + print( + f"--> checking for message in rabbit exchange {LoggerRabbitMessage.get_channel_name()}, {attempt.retry_state.retry_object.statistics}" + ) + logs_rabbitmq_consumer.assert_called_once_with( + LoggerRabbitMessage( + node_id=node_id, + project_id=project_id, + user_id=user_id, + messages=[f"[cluster] {log_message}"], + log_level=0, + ) + .model_dump_json() + .encode() + ) + print("... message received") -async def test_post_task_progress_message( - disable_dynamic_service_background_task, +async def test_post_task_progress_message_docker( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, - rabbit_client: RabbitMQClient, - mocker: MockerFixture, - async_docker_client: aiodocker.Docker, - create_service: Callable[[dict[str, Any], dict[str, str], str], Awaitable[Service]], - task_template: dict[str, Any], - osparc_docker_label_keys: SimcoreServiceDockerLabelKeys, + running_service_tasks: Callable[[dict[DockerLabelKey, str]], Awaitable[list[Task]]], + osparc_docker_label_keys: StandardSimcoreDockerLabels, faker: Faker, + progress_rabbitmq_consumer: AsyncMock, ): - mocked_message_handler = mocker.AsyncMock(return_value=True) - await rabbit_client.subscribe( - ProgressRabbitMessageNode.get_channel_name(), mocked_message_handler + docker_tasks = await running_service_tasks( + osparc_docker_label_keys.to_simcore_runtime_docker_labels(), ) - - service_with_labels = await create_service( - task_template, osparc_docker_label_keys.to_docker_labels(), "running" - ) - assert service_with_labels.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list( - filters={"service": service_with_labels.Spec.Name} - ), - ) - assert service_tasks - assert len(service_tasks) == 1 + assert len(docker_tasks) == 1 progress_value = faker.pyfloat(min_value=0) - await post_task_progress_message(initialized_app, service_tasks[0], progress_value) + await post_tasks_progress_message( + initialized_app, + tasks=docker_tasks, + progress=progress_value, + progress_type=ProgressType.CLUSTER_UP_SCALING, + ) async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): with attempt: print( f"--> checking for message in rabbit exchange {ProgressRabbitMessageNode.get_channel_name()}, {attempt.retry_state.retry_object.statistics}" ) - mocked_message_handler.assert_called_once_with( + progress_rabbitmq_consumer.assert_called_once_with( ProgressRabbitMessageNode( node_id=osparc_docker_label_keys.node_id, project_id=osparc_docker_label_keys.project_id, user_id=osparc_docker_label_keys.user_id, - progress=progress_value, progress_type=ProgressType.CLUSTER_UP_SCALING, + report=ProgressReport(actual_value=progress_value, total=1), ) - .json() + .model_dump_json() .encode() ) print("... message received") -async def test_post_task_progress_does_not_raise_if_service_has_no_labels( - disable_dynamic_service_background_task, +async def test_post_task_progress_message_dask( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, enabled_rabbitmq: RabbitSettings, disabled_ec2: None, + disabled_ssm: None, mocked_redis_server: None, initialized_app: FastAPI, - async_docker_client: aiodocker.Docker, - create_service: Callable[ - [dict[str, Any], dict[DockerLabelKey, str], str], Awaitable[Service] - ], - task_template: dict[str, Any], + dask_task: DaskTask, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, faker: Faker, + progress_rabbitmq_consumer: AsyncMock, ): - service_without_labels = await create_service(task_template, {}, "running") - assert service_without_labels.Spec - service_tasks = parse_obj_as( - list[Task], - await async_docker_client.tasks.list( - filters={"service": service_without_labels.Spec.Name} - ), + progress_value = faker.pyfloat(min_value=0) + await post_tasks_progress_message( + initialized_app, + tasks=[dask_task], + progress=progress_value, + progress_type=ProgressType.CLUSTER_UP_SCALING, ) - assert service_tasks - assert len(service_tasks) == 1 + + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + print( + f"--> checking for message in rabbit exchange {ProgressRabbitMessageNode.get_channel_name()}, {attempt.retry_state.retry_object.statistics}" + ) + progress_rabbitmq_consumer.assert_called_once_with( + ProgressRabbitMessageNode( + node_id=node_id, + project_id=project_id, + user_id=user_id, + progress_type=ProgressType.CLUSTER_UP_SCALING, + report=ProgressReport(actual_value=progress_value, total=1), + ) + .model_dump_json() + .encode() + ) + print("... message received") + + +async def test_post_task_messages_does_not_raise_if_service_has_no_labels( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + running_service_tasks: Callable[[dict[DockerLabelKey, str]], Awaitable[list[Task]]], + faker: Faker, +): + docker_tasks = await running_service_tasks({}) + assert len(docker_tasks) == 1 # this shall not raise any exception even if the task does not contain # the necessary labels - await post_task_progress_message( - initialized_app, service_tasks[0], faker.pyfloat(min_value=0) + await post_tasks_log_message( + initialized_app, tasks=docker_tasks, message=faker.pystr(), level=0 + ) + await post_tasks_progress_message( + initialized_app, + tasks=docker_tasks, + progress=faker.pyfloat(min_value=0), + progress_type=ProgressType.CLUSTER_UP_SCALING, + ) + + +async def test_post_task_messages_does_not_raise_if_dask_task_key_is_invalid( + disable_autoscaling_background_task, + disable_buffers_pool_background_task, + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + dask_task_with_invalid_key: DaskTask, + faker: Faker, +): + # this shall not raise any exception even if the task does not contain + # the necessary labels + await post_tasks_log_message( + initialized_app, + tasks=[dask_task_with_invalid_key], + message=faker.pystr(), + level=0, + ) + await post_tasks_progress_message( + initialized_app, + tasks=[dask_task_with_invalid_key], + progress=faker.pyfloat(min_value=0), + progress_type=ProgressType.CLUSTER_UP_SCALING, ) diff --git a/services/catalog/.env-devel b/services/catalog/.env-devel deleted file mode 100644 index fb37f2033f7..00000000000 --- a/services/catalog/.env-devel +++ /dev/null @@ -1,28 +0,0 @@ -# -# Environment variables used to configure this service -# - -CATALOG_DEV_FEATURES_ENABLED=1 - -LOG_LEVEL=DEBUG - -DIRECTOR_ENABLED=1 -DIRECTOR_HOST=localhost -DIRECTOR_PORT=28080 - -POSTGRES_USER=test -POSTGRES_PASSWORD=test -POSTGRES_DB=test -POSTGRES_HOST=localhost - -REGISTRY_AUTH=False -REGISTRY_PW=adminadmin -REGISTRY_SSL=False -REGISTRY_URL=172.17.0.1:5000 -REGISTRY_USER=admin -DIRECTOR_REGISTRY_CACHING=True -DIRECTOR_REGISTRY_CACHING_TTL=10 - -CATALOG_BACKGROUND_TASK_REST_TIME=60 - -SC_BOOT_MODE=debug-ptvsd diff --git a/services/catalog/Dockerfile b/services/catalog/Dockerfile index 8b2ea325bd7..a18034f05e2 100644 --- a/services/catalog/Dockerfile +++ b/services/catalog/Dockerfile @@ -1,5 +1,18 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # # USAGE: @@ -11,12 +24,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=pcrespov -RUN set -eux && \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ apt-get update && \ - apt-get install -y gosu && \ - rm -rf /var/lib/apt/lists/* && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -52,31 +71,34 @@ EXPOSE 3000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip install --no-cache-dir --upgrade \ - pip~=23.0 \ + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/catalog/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + # --------------------------Prod-depends-only stage ------------------- @@ -85,17 +107,19 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/catalog [scu:scu] WORKDIR # -FROM build as prod-only-deps +FROM build AS prod-only-deps -ENV SC_BUILD_TARGET prod-only-deps - -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/catalog /build/services/catalog +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/catalog -RUN pip3 --no-cache-dir install -r requirements/prod.txt &&\ - pip3 --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/catalog,target=/build/services/catalog,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -105,15 +129,19 @@ RUN pip3 --no-cache-dir install -r requirements/prod.txt &&\ # + /home/scu $HOME = WORKDIR # + services/catalog [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -121,11 +149,13 @@ COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY --chown=scu:scu services/catalog/docker services/catalog/docker RUN chmod +x services/catalog/docker/*.sh - -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/catalog/docker/healthcheck.py", "http://localhost:8000/"] ENTRYPOINT [ "/bin/sh", "services/catalog/docker/entrypoint.sh" ] @@ -140,7 +170,7 @@ CMD ["/bin/sh", "services/catalog/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development diff --git a/services/catalog/Makefile b/services/catalog/Makefile index 3981b930704..31b3a327698 100644 --- a/services/catalog/Makefile +++ b/services/catalog/Makefile @@ -5,60 +5,19 @@ include ../../scripts/common.Makefile include ../../scripts/common-service.Makefile -.PHONY: requirements reqs -requirements reqs: ## (or reqs) compiles pip requirements (.in -> .txt) - @$(MAKE_C) requirements reqs +.env-ignore: + $(APP_CLI_NAME) echo-dotenv > $@ - - -# DEVELOPMENT ######## - -.env: - cp .env-devel $@ - - -.PHONY: run-devel up-extra down down-extra - -up-extra: .env down-extra ## creates and starts adjacent services and migrates postgres database - # starting all adjacent services - docker-compose -f docker-compose-extra.yml up --detach - sleep 1 - # discovering postgres services - @export $(shell grep -v '^#' .env | xargs) && sc-pg discover - @sc-pg info - # upgrading postgres database to HEAD version - @sc-pg upgrade - -down down-extra: docker-compose-extra.yml ## stops pg fixture - # cleanup discover cache - -@sc-pg clean - # stopping extra services - -@docker-compose -f docker-compose-extra.yml down - # killing any process using port 8000 - -@fuser --kill --verbose --namespace tcp 8000 - - -run-devel: .env up-extra ## starts app with extra stack - # start app (within $<) in devel mode - uvicorn $(APP_PACKAGE_NAME).__main__:the_app \ - --reload --reload-dir $(SRC_DIR) \ - --port=8000 --host=0.0.0.0 - -run-prod: .env up-extra - # start app (within $<) in prod mode - $(APP_CLI_NAME) - - - -# BUILD ##################### - .PHONY: openapi-specs openapi.json openapi-specs: openapi.json -openapi.json: .env - # generating openapi specs file +openapi.json: .env-ignore + # generating openapi specs file (need to have the environment set for this) + @set -o allexport; \ + source $<; \ + set +o allexport; \ python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ + # validates OAS file: $@ - @cd $(CURDIR); \ - $(SCRIPTS_DIR)/openapi-generator-cli.bash validate --input-spec /local/$@ + $(call validate_openapi_specs,$@) diff --git a/services/catalog/README.md b/services/catalog/README.md index 3eb7770af7a..1495a3d9f44 100644 --- a/services/catalog/README.md +++ b/services/catalog/README.md @@ -1,44 +1,3 @@ # catalog -[![image-size]](https://microbadger.com/images/itisfoundation/catalog. "More on itisfoundation/catalog:staging-latest image") -[![image-badge]](https://microbadger.com/images/itisfoundation/catalog "More on Components Catalog Service image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/catalog "More on Components Catalog Service image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/catalog "More on Components Catalog Service image in registry") - Manages and maintains a catalog of all published components (e.g. macro-algorithms, scripts, etc) - -## Development - -Typical development workflow: - -```cmd -make devenv -source .venv/bin/activate - -cd services/api-service -make install-dev -``` - -Then -```cmd -make run-devel -``` -will start the service in development-mode together with a postgres db initialized with test data. The API can be query using -- http://127.0.0.1:8000/api/docs: swagger-UI API doc - - -Finally -```cmd -make tests -make build-devel -make build -``` - - - - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/catalog./staging-latest.svg?label=catalog.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/catalog.svg -[image-version]:https://images.microbadger.com/badges/version/itisfoundation/catalog.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/catalog.svg - diff --git a/services/catalog/VERSION b/services/catalog/VERSION index 60a2d3e96c8..c18d72be303 100644 --- a/services/catalog/VERSION +++ b/services/catalog/VERSION @@ -1 +1 @@ -0.4.0 \ No newline at end of file +0.8.1 \ No newline at end of file diff --git a/services/catalog/docker-compose-extra.yml b/services/catalog/docker-compose-extra.yml deleted file mode 100644 index 68fc041013a..00000000000 --- a/services/catalog/docker-compose-extra.yml +++ /dev/null @@ -1,79 +0,0 @@ -# -# Includes all adjancent services for testing/development purposes -# -version: "3.8" -services: - postgres: - image: postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce - init: true - environment: - - POSTGRES_USER=${POSTGRES_USER:-test} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-test} - - POSTGRES_DB=${POSTGRES_PASSWORD:-test} - - POSTGRES_HOST=${POSTGRES_HOST:-localhost} - - POSTGRES_PORT=${POSTGRES_PORT:-5432} - ports: - - "5432:5432" - # https://www.postgresql.org/docs/10/runtime-config-logging.html#GUC-LOG-STATEMENT - command: - [ - "postgres", - "-c", - "log_connections=true", - "-c", - "log_disconnections=true", - "-c", - "log_duration=true", - "-c", - "log_line_prefix=[%p] [%a] [%c] [%x] " - ] - adminer: - image: adminer - init: true - ports: - - 18080:8080 - depends_on: - - postgres - director: - image: local/director:production - init: true - environment: - - REGISTRY_URL=${REGISTRY_URL} - - REGISTRY_AUTH=${REGISTRY_AUTH} - - REGISTRY_USER=${REGISTRY_USER} - - REGISTRY_PW=${REGISTRY_PW} - - REGISTRY_SSL=${REGISTRY_SSL} - - DIRECTOR_REGISTRY_CACHING=${DIRECTOR_REGISTRY_CACHING} - - DIRECTOR_REGISTRY_CACHING_TTL=${DIRECTOR_REGISTRY_CACHING_TTL} - - DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=${DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_ID=${DIRECTOR_SELF_SIGNED_SSL_SECRET_ID} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME=${DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME} - - DIRECTOR_SELF_SIGNED_SSL_FILENAME=${DIRECTOR_SELF_SIGNED_SSL_FILENAME} - - POSTGRES_ENDPOINT=${POSTGRES_ENDPOINT} - - POSTGRES_USER=${POSTGRES_USER} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_HOST=${POSTGRES_HOST} - - POSTGRES_PORT=${POSTGRES_PORT} - - S3_ENDPOINT=${S3_ENDPOINT} - - S3_ACCESS_KEY=${S3_ACCESS_KEY} - - S3_SECRET_KEY=${S3_SECRET_KEY} - - S3_BUCKET_NAME=${S3_BUCKET_NAME} - - STORAGE_ENDPOINT=${STORAGE_ENDPOINT} - - EXTRA_HOSTS_SUFFIX=${EXTRA_HOSTS_SUFFIX:-undefined} - - SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet - - MONITORING_ENABLED=${MONITORING_ENABLED:-True} - - TRACING_ENABLED=${TRACING_ENABLED:-True} - - TRACING_ZIPKIN_ENDPOINT=${TRACING_ZIPKIN_ENDPOINT:-http://jaeger:9411} - - TRAEFIK_SIMCORE_ZONE=${TRAEFIK_SIMCORE_ZONE:-internal_simcore_stack} - - LOGLEVEL=${LOG_LEVEL:-WARNING} - - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore} - volumes: - - "/var/run/docker.sock:/var/run/docker.sock" - ports: - - "28080:8080" - registry: - image: registry:2 - init: true - ports: - - "5000:5000" diff --git a/services/catalog/docker/boot.sh b/services/catalog/docker/boot.sh index c16af406d60..9db9967c98f 100755 --- a/services/catalog/docker/boot.sh +++ b/services/catalog/docker/boot.sh @@ -18,25 +18,33 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/catalog || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/catalog + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list fi +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi # RUNNING application ---------------------------------------- APP_LOG_LEVEL=${CATALOG_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/catalog/src/simcore_service_catalog && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${CATALOG_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/catalog/docker/entrypoint.sh b/services/catalog/docker/entrypoint.sh index 9e734b5db40..63e9249be31 100755 --- a/services/catalog/docker/entrypoint.sh +++ b/services/catalog/docker/entrypoint.sh @@ -63,11 +63,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - echo "$INFO Starting $* ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" diff --git a/services/catalog/docker/healthcheck.py b/services/catalog/docker/healthcheck.py old mode 100644 new mode 100755 index 551868d3cc1..808782f3261 --- a/services/catalog/docker/healthcheck.py +++ b/services/catalog/docker/healthcheck.py @@ -6,9 +6,10 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: diff --git a/services/catalog/openapi.json b/services/catalog/openapi.json index e3e1672450b..4295d0ebd1c 100644 --- a/services/catalog/openapi.json +++ b/services/catalog/openapi.json @@ -1,151 +1,34 @@ { - "openapi": "3.0.2", + "openapi": "3.1.0", "info": { "title": "simcore-service-catalog", - "description": " Manages and maintains a catalog of all published components (e.g. macro-algorithms, scripts, etc)", - "version": "0.4.0" + "description": "Manages and maintains a catalog of all published components (e.g. macro-algorithms, scripts, etc)", + "version": "0.8.1" }, "paths": { - "/v0/meta": { - "get": { - "tags": [ - "meta" - ], - "summary": "Get Service Metadata", - "operationId": "get_service_metadata_v0_meta_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Meta" - } - } - } - } - } - } - }, - "/v0/dags": { + "/": { "get": { - "tags": [ - "DAG" - ], - "summary": "List Dags", - "operationId": "list_dags_v0_dags_get", - "parameters": [ - { - "description": "Requests a specific page of the list results", - "required": false, - "schema": { - "title": "Page Token", - "type": "string", - "description": "Requests a specific page of the list results" - }, - "name": "page_token", - "in": "query" - }, - { - "description": "Maximum number of results to be returned by the server", - "required": false, - "schema": { - "title": "Page Size", - "minimum": 0.0, - "type": "integer", - "description": "Maximum number of results to be returned by the server", - "default": 0 - }, - "name": "page_size", - "in": "query" - }, - { - "description": "Sorts in ascending order comma-separated fields", - "required": false, - "schema": { - "title": "Order By", - "type": "string", - "description": "Sorts in ascending order comma-separated fields" - }, - "name": "order_by", - "in": "query" - } - ], + "summary": "Check Service Health", + "operationId": "check_service_health__get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { - "schema": { - "title": "Response List Dags V0 Dags Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/DAGOut" - } - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "post": { - "tags": [ - "DAG" - ], - "summary": "Create Dag", - "operationId": "create_dag_v0_dags_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DAGIn" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "Successfully created", - "content": { - "application/json": { - "schema": { - "title": "Response Create Dag V0 Dags Post", - "type": "integer" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } + "schema": {} } } } } } }, - "/v0/dags:batchGet": { + "/v0/": { "get": { "tags": [ - "DAG" + "diagnostics" ], - "summary": "Batch Get Dags", - "operationId": "batch_get_dags_v0_dags_batchGet_get", + "summary": "Check Service Health", + "operationId": "check_service_health_v0__get", "responses": { "200": { "description": "Successful Response", @@ -158,100 +41,84 @@ } } }, - "/v0/dags:search": { + "/v0/meta": { "get": { "tags": [ - "DAG" + "meta" ], - "summary": "Search Dags", - "operationId": "search_dags_v0_dags_search_get", + "summary": "Get Service Metadata", + "operationId": "get_service_metadata_v0_meta_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "$ref": "#/components/schemas/BaseMeta" + } } } } } } }, - "/v0/dags/{dag_id}": { + "/v0/services/{service_key}/{service_version}/resources": { "get": { "tags": [ - "DAG" + "services" ], - "summary": "Get Dag", - "operationId": "get_dag_v0_dags__dag_id__get", + "summary": "Get Service Resources", + "operationId": "get_service_resources_v0_services__service_key___service_version__resources_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Dag Id", - "type": "integer" - }, - "name": "dag_id", - "in": "path" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DAGOut" - } - } + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" } }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "put": { - "tags": [ - "DAG" - ], - "summary": "Replace Dag", - "operationId": "replace_dag_v0_dags__dag_id__put", - "parameters": [ { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Dag Id", - "type": "integer" + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + } + }, + { + "name": "user_id", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "description": "if passed, and that user has custom resources, they will be merged with default resources and returned.", + "title": "User Id" }, - "name": "dag_id", - "in": "path" + "description": "if passed, and that user has custom resources, they will be merged with default resources and returned." } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DAGIn" - } - } - }, - "required": true - }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/DAGOut" + "type": "object", + "title": "Response Get Service Resources V0 Services Service Key Service Version Resources Get" } } } @@ -267,73 +134,45 @@ } } } - }, - "delete": { + } + }, + "/v0/services/{service_key}/{service_version}/labels": { + "get": { "tags": [ - "DAG" + "services" ], - "summary": "Delete Dag", - "operationId": "delete_dag_v0_dags__dag_id__delete", + "summary": "Get Service Labels", + "operationId": "get_service_labels_v0_services__service_key___service_version__labels_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Dag Id", - "type": "integer" - }, - "name": "dag_id", - "in": "path" - } - ], - "responses": { - "204": { - "description": "Successfully deleted" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" } - } - } - }, - "patch": { - "tags": [ - "DAG" - ], - "summary": "Udpate Dag", - "operationId": "udpate_dag_v0_dags__dag_id__patch", - "parameters": [ + }, { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Dag Id", - "type": "integer" - }, - "name": "dag_id", - "in": "path" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DAGIn" - } + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" } } - }, + ], "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/DAGOut" + "type": "object", + "title": "Response Get Service Labels V0 Services Service Key Service Version Labels Get" } } } @@ -351,33 +190,33 @@ } } }, - "/v0/services/{service_key}/{service_version}/resources": { + "/v0/services/{service_key}/{service_version}/extras": { "get": { "tags": [ "services" ], - "summary": "Get Service Resources", - "operationId": "get_service_resources_v0_services__service_key___service_version__resources_get", + "summary": "Get Service Extras", + "operationId": "get_service_extras_v0_services__service_key___service_version__extras_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "[\\w/-]+", - "type": "string" - }, - "name": "service_key", - "in": "path" + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } }, { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Service Version", - "pattern": "[\\w/.]+", - "type": "string" - }, - "name": "service_version", - "in": "path" + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + } } ], "responses": { @@ -386,8 +225,7 @@ "content": { "application/json": { "schema": { - "title": "Response Get Service Resources V0 Services Service Key Service Version Resources Get", - "type": "object" + "$ref": "#/components/schemas/ServiceExtras" } } } @@ -414,47 +252,47 @@ "operationId": "get_service_specifications_v0_services__service_key___service_version__specifications_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string" - }, - "name": "service_key", - "in": "path" + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } }, { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Service Version", + "type": "string", "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "service_version", - "in": "path" + "title": "Service Version" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "User Id", - "exclusiveMinimum": true, "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", "minimum": 0 - }, - "name": "user_id", - "in": "query" + } }, { - "description": "if True only the version specs will be retrieved, if False the latest version will be used instead", + "name": "strict", + "in": "query", "required": false, "schema": { - "title": "Strict", "type": "boolean", "description": "if True only the version specs will be retrieved, if False the latest version will be used instead", - "default": false + "default": false, + "title": "Strict" }, - "name": "strict", - "in": "query" + "description": "if True only the version specs will be retrieved, if False the latest version will be used instead" } ], "responses": { @@ -491,42 +329,42 @@ "operationId": "list_service_ports_v0_services__service_key___service_version__ports_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string" - }, - "name": "service_key", - "in": "path" + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } }, { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Service Version", + "type": "string", "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "service_version", - "in": "path" + "title": "Service Version" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "User Id", - "type": "integer" - }, - "name": "user_id", - "in": "query" + "type": "integer", + "title": "User Id" + } }, { + "name": "x-simcore-products-name", + "in": "header", "required": false, "schema": { - "title": "X-Simcore-Products-Name", - "type": "string" - }, - "name": "x-simcore-products-name", - "in": "header" + "type": "string", + "title": "X-Simcore-Products-Name" + } } ], "responses": { @@ -535,11 +373,11 @@ "content": { "application/json": { "schema": { - "title": "Response List Service Ports V0 Services Service Key Service Version Ports Get", "type": "array", "items": { "$ref": "#/components/schemas/ServicePortGet" - } + }, + "title": "Response List Service Ports V0 Services Service Key Service Version Ports Get" } } } @@ -557,43 +395,52 @@ } } }, - "/v0/services": { + "/v0/services/{service_key}/{service_version}/accessRights": { "get": { "tags": [ "services" ], - "summary": "List Services", - "operationId": "list_services_v0_services_get", + "summary": "Get Service Access Rights", + "description": "Returns access rights information for provided service and product", + "operationId": "get_service_access_rights_v0_services__service_key___service_version__accessRights_get", "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } }, { - "required": false, + "name": "service_version", + "in": "path", + "required": true, "schema": { - "title": "Details", - "type": "boolean", - "default": true - }, - "name": "details", - "in": "query" + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "X-Simcore-Products-Name", - "type": "string" - }, + "type": "integer", + "title": "User Id" + } + }, + { "name": "x-simcore-products-name", - "in": "header" + "in": "header", + "required": true, + "schema": { + "type": "string", + "title": "X-Simcore-Products-Name" + } } ], "responses": { @@ -602,11 +449,7 @@ "content": { "application/json": { "schema": { - "title": "Response List Services V0 Services Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/ServiceGet" - } + "$ref": "#/components/schemas/ServiceAccessRightsGet" } } } @@ -624,51 +467,45 @@ } } }, - "/v0/services/{service_key}/{service_version}": { + "/v0/services": { "get": { "tags": [ "services" ], - "summary": "Get Service", - "operationId": "get_service_v0_services__service_key___service_version__get", + "summary": "List Services", + "description": "Use instead rpc._service.list_services_paginated -> PageRpcServicesGetV2", + "operationId": "list_services_v0_services_get", + "deprecated": true, "parameters": [ { + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string" - }, - "name": "service_key", - "in": "path" + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + } }, { - "required": true, + "name": "details", + "in": "query", + "required": false, "schema": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "service_version", - "in": "path" + "type": "boolean", + "default": true, + "title": "Details" + } }, { + "name": "x-simcore-products-name", + "in": "header", "required": true, "schema": { - "title": "User Id", - "type": "integer" - }, - "name": "user_id", - "in": "query" - }, - { - "required": false, - "schema": { - "title": "X-Simcore-Products-Name", - "type": "string" - }, - "name": "x-simcore-products-name", - "in": "header" + "type": "string", + "title": "X-Simcore-Products-Name" + } } ], "responses": { @@ -677,7 +514,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ServiceGet" + "type": "array", + "items": { + "$ref": "#/components/schemas/ServiceGet" + }, + "title": "Response List Services V0 Services Get" } } } @@ -693,63 +534,57 @@ } } } - }, - "patch": { + } + }, + "/v0/services/{service_key}/{service_version}": { + "get": { "tags": [ "services" ], - "summary": "Update Service", - "operationId": "update_service_v0_services__service_key___service_version__patch", + "summary": "Get Service", + "description": "Use instead rpc._service.get_service -> ServiceGetV2", + "operationId": "get_service_v0_services__service_key___service_version__get", + "deprecated": true, "parameters": [ { + "name": "service_key", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string" - }, - "name": "service_key", - "in": "path" + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } }, { + "name": "service_version", + "in": "path", "required": true, "schema": { - "title": "Service Version", + "type": "string", "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "name": "service_version", - "in": "path" + "title": "Service Version" + } }, { + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "User Id", - "type": "integer" - }, - "name": "user_id", - "in": "query" + "type": "integer", + "title": "User Id" + } }, { + "name": "x-simcore-products-name", + "in": "header", "required": false, "schema": { - "title": "X-Simcore-Products-Name", - "type": "string" - }, - "name": "x-simcore-products-name", - "in": "header" + "type": "string", + "title": "X-Simcore-Products-Name" + } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ServiceUpdate" - } - } - }, - "required": true - }, "responses": { "200": { "description": "Successful Response", @@ -777,2801 +612,3547 @@ }, "components": { "schemas": { - "AccessEnum": { - "title": "AccessEnum", - "enum": [ - "ReadAndWrite", - "Invisible", - "ReadOnly" - ], - "type": "string", - "description": "An enumeration." - }, "Author": { - "title": "Author", - "required": [ - "name", - "email" - ], - "type": "object", "properties": { "name": { - "title": "Name", "type": "string", - "description": "Name of the author", - "example": "Jim Knopf" + "title": "Name", + "description": "Name of the author" }, "email": { - "title": "Email", "type": "string", - "description": "Email address", - "format": "email" + "format": "email", + "title": "Email", + "description": "Email address" }, "affiliation": { - "title": "Affiliation", - "type": "string", - "description": "Affiliation of the author" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Affiliation" } }, - "additionalProperties": false - }, - "Badge": { - "title": "Badge", + "type": "object", "required": [ "name", - "image", - "url" + "email" ], - "type": "object", + "title": "Author" + }, + "Badge": { "properties": { "name": { - "title": "Name", "type": "string", + "title": "Name", "description": "Name of the subject" }, "image": { - "title": "Image", + "type": "string", "maxLength": 2083, "minLength": 1, - "type": "string", - "description": "Url to the badge", - "format": "uri" + "format": "uri", + "title": "Image", + "description": "Url to the badge" }, "url": { - "title": "Url", + "type": "string", "maxLength": 2083, "minLength": 1, + "format": "uri", + "title": "Url", + "description": "Link to the status" + } + }, + "type": "object", + "required": [ + "name", + "image", + "url" + ], + "title": "Badge", + "example": { + "image": "https://img.shields.io/website-up-down-green-red/https/itisfoundation.github.io.svg?label=documentation", + "name": "osparc.io", + "url": "https://itisfoundation.github.io/" + } + }, + "BaseMeta": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "version": { "type": "string", - "description": "Link to the status", - "format": "uri" + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + }, + "released": { + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Released", + "description": "Maps every route's path tag with a released version" } }, - "additionalProperties": false + "type": "object", + "required": [ + "name", + "version" + ], + "title": "BaseMeta", + "example": { + "name": "simcore_service_foo", + "released": { + "v1": "1.3.4", + "v2": "2.4.45" + }, + "version": "2.4.45" + } }, "BindOptions": { - "title": "BindOptions", - "type": "object", "properties": { "Propagation": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/Propagation" + }, + { + "type": "null" } ], "description": "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." }, "NonRecursive": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], "title": "Nonrecursive", - "type": "boolean", "description": "Disable recursive bind mount.", "default": false } }, + "type": "object", + "title": "BindOptions", "description": "Optional configuration for the `bind` type." }, "BootChoice": { - "title": "BootChoice", - "required": [ - "label", - "description" - ], - "type": "object", "properties": { "label": { - "title": "Label", - "type": "string" + "type": "string", + "title": "Label" }, "description": { - "title": "Description", - "type": "string" + "type": "string", + "title": "Description" } - } - }, - "BootOption": { - "title": "BootOption", + }, + "type": "object", "required": [ "label", - "description", - "default", - "items" + "description" ], - "type": "object", + "title": "BootChoice" + }, + "BootMode": { + "type": "string", + "enum": [ + "CPU", + "GPU", + "MPI" + ], + "title": "BootMode" + }, + "BootOption": { "properties": { "label": { - "title": "Label", - "type": "string" + "type": "string", + "title": "Label" }, "description": { - "title": "Description", - "type": "string" + "type": "string", + "title": "Description" }, "default": { - "title": "Default", - "type": "string" + "type": "string", + "title": "Default" }, "items": { - "title": "Items", - "type": "object", "additionalProperties": { "$ref": "#/components/schemas/BootChoice" - } + }, + "type": "object", + "title": "Items" } - } + }, + "type": "object", + "required": [ + "label", + "description", + "default", + "items" + ], + "title": "BootOption" }, "Condition": { - "title": "Condition", + "type": "string", "enum": [ "none", "on-failure", "any" ], + "title": "Condition", "description": "Condition for restart." }, "Config1": { - "title": "Config1", - "type": "object", "properties": { "File": { - "title": "File", - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/File1" + }, + { + "type": "null" } ], "description": "File represents a specific target that is backed by a file.\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive\n" }, "Runtime": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], "title": "Runtime", - "type": "object", "description": "Runtime represents a target that is not mounted into the\ncontainer but is used by the task\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually\n> exclusive\n" }, "ConfigID": { - "title": "Configid", - "type": "string", - "description": "ConfigID represents the ID of the specific config that we're\nreferencing.\n" - }, - "ConfigName": { - "title": "Configname", - "type": "string", - "description": "ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n" - } - } - }, - "ContainerSpec": { - "title": "ContainerSpec", - "type": "object", - "properties": { - "Image": { - "title": "Image", - "type": "string", - "description": "The image name to use for the container" - }, - "Labels": { - "title": "Labels", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "User-defined key/value data." - }, - "Command": { - "title": "Command", - "type": "array", - "items": { - "type": "string" - }, - "description": "The command to be run in the image." - }, - "Args": { - "title": "Args", - "type": "array", - "items": { - "type": "string" - }, - "description": "Arguments to the command." - }, - "Hostname": { - "title": "Hostname", - "type": "string", - "description": "The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n" - }, - "Env": { - "title": "Env", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of environment variables in the form `VAR=value`.\n" - }, - "Dir": { - "title": "Dir", - "type": "string", - "description": "The working directory for commands to run in." - }, - "User": { - "title": "User", - "type": "string", - "description": "The user inside the container." - }, - "Groups": { - "title": "Groups", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of additional groups that the container process will run as.\n" - }, - "Privileges": { - "title": "Privileges", - "allOf": [ + "anyOf": [ { - "$ref": "#/components/schemas/Privileges" - } - ], - "description": "Security options for the container" - }, - "TTY": { - "title": "Tty", - "type": "boolean", - "description": "Whether a pseudo-TTY should be allocated." - }, - "OpenStdin": { - "title": "Openstdin", - "type": "boolean", - "description": "Open `stdin`" - }, - "ReadOnly": { - "title": "Readonly", - "type": "boolean", - "description": "Mount the container's root filesystem as read only." - }, - "Mounts": { - "title": "Mounts", - "type": "array", - "items": { - "$ref": "#/components/schemas/Mount" - }, - "description": "Specification for mounts to be added to containers created as part\nof the service.\n" - }, - "StopSignal": { - "title": "Stopsignal", - "type": "string", - "description": "Signal to stop the container." - }, - "StopGracePeriod": { - "title": "Stopgraceperiod", - "type": "integer", - "description": "Amount of time to wait for the container to terminate before\nforcefully killing it.\n" - }, - "HealthCheck": { - "$ref": "#/components/schemas/HealthConfig" - }, - "Hosts": { - "title": "Hosts", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n" - }, - "DNSConfig": { - "title": "Dnsconfig", - "allOf": [ + "type": "string" + }, { - "$ref": "#/components/schemas/DNSConfig" + "type": "null" } ], - "description": "Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n" - }, - "Secrets": { - "title": "Secrets", - "type": "array", - "items": { - "$ref": "#/components/schemas/Secret" - }, - "description": "Secrets contains references to zero or more secrets that will be\nexposed to the service.\n" - }, - "Configs": { - "title": "Configs", - "type": "array", - "items": { - "$ref": "#/components/schemas/Config1" - }, - "description": "Configs contains references to zero or more configs that will be\nexposed to the service.\n" + "title": "Configid", + "description": "ConfigID represents the ID of the specific config that we're\nreferencing.\n" }, - "Isolation": { - "allOf": [ + "ConfigName": { + "anyOf": [ + { + "type": "string" + }, { - "$ref": "#/components/schemas/Isolation" + "type": "null" } ], - "description": "Isolation technology of the containers running the service.\n(Windows only)\n" - }, - "Init": { - "title": "Init", - "type": "boolean", - "description": "Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n" - }, - "Sysctls": { - "title": "Sysctls", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n" - }, - "CapabilityAdd": { - "title": "Capabilityadd", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of kernel capabilities to add to the default set\nfor the container.\n", - "example": [ - "CAP_NET_RAW", - "CAP_SYS_ADMIN", - "CAP_SYS_CHROOT", - "CAP_SYSLOG" - ] - }, - "CapabilityDrop": { - "title": "Capabilitydrop", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of kernel capabilities to drop from the default set\nfor the container.\n", - "example": [ - "CAP_NET_RAW" - ] - }, - "Ulimits": { - "title": "Ulimits", - "type": "array", - "items": { - "$ref": "#/components/schemas/Ulimit1" - }, - "description": "A list of resource limits to set in the container. For example: `{\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048}`\"\n" + "title": "Configname", + "description": "ConfigName is the name of the config that this references,\nbut this is just provided for lookup/display purposes. The\nconfig in the reference will be identified by its ID.\n" } }, - "description": " Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." + "type": "object", + "title": "Config1" }, "CredentialSpec": { - "title": "CredentialSpec", - "type": "object", "properties": { "Config": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Config", - "type": "string", - "description": "Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", - "example": "0bt9dmxjvjiqermk6xrop3ekq" + "description": "Load credential spec from a Swarm Config with the given ID.\nThe specified config must also be present in the Configs\nfield with the Runtime property set.\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n" }, "File": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "File", - "type": "string", - "description": "Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n", - "example": "spec.json" + "description": "Load credential spec from this file. The file is read by\nthe daemon, and must be present in the `CredentialSpecs`\nsubdirectory in the docker data directory, which defaults\nto `C:\\ProgramData\\Docker\\` on Windows.\n\nFor example, specifying `spec.json` loads\n`C:\\ProgramData\\Docker\\CredentialSpecs\\spec.json`.\n\n


\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n" }, "Registry": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Registry", - "type": "string", "description": "Load credential spec from this value in the Windows\nregistry. The specified registry value must be located in:\n\n`HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Virtualization\\Containers\\CredentialSpecs`\n\n


\n\n\n> **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,\n> and `CredentialSpec.Config` are mutually exclusive.\n" } }, + "type": "object", + "title": "CredentialSpec", "description": "CredentialSpec for managed service account (Windows only)" }, - "DAGIn": { - "title": "DAGIn", - "required": [ - "key", - "version", - "name" - ], - "type": "object", + "DiscreteResourceSpec": { "properties": { - "key": { - "title": "Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string", - "example": "simcore/services/frontend/nodes-group/macros/1" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "example": "1.0.0" - }, - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - }, - "contact": { - "title": "Contact", - "type": "string", - "format": "email" + "Kind": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Kind" }, - "workbench": { - "title": "Workbench", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Node" - } + "Value": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Value" } - } - }, - "DAGOut": { - "title": "DAGOut", - "required": [ - "key", - "version", - "name", - "id" - ], + }, "type": "object", - "properties": { - "key": { - "title": "Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string", - "example": "simcore/services/frontend/nodes-group/macros/1" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "example": "1.0.0" - }, - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - }, - "contact": { - "title": "Contact", - "type": "string", - "format": "email" - }, - "id": { - "title": "Id", - "type": "integer" - }, - "workbench": { - "title": "Workbench", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Node" - } - } - } + "title": "DiscreteResourceSpec" }, - "DNSConfig": { - "title": "DNSConfig", - "type": "object", + "DnsConfig": { "properties": { "Nameservers": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Nameservers", - "type": "array", - "items": { - "type": "string" - }, "description": "The IP addresses of the name servers." }, "Search": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Search", - "type": "array", - "items": { - "type": "string" - }, "description": "A search list for host-name lookup." }, "Options": { - "title": "Options", - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n" - } - }, - "description": " Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`)." - }, - "DatCoreFileLink": { - "title": "DatCoreFileLink", - "required": [ - "store", - "path", - "label", - "dataset" - ], - "type": "object", - "properties": { - "store": { - "title": "Store", - "type": "integer", - "description": "The store identifier: 0 for simcore S3, 1 for datcore" - }, - "path": { - "title": "Path", "anyOf": [ { - "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$", - "type": "string" + "items": { + "type": "string" + }, + "type": "array" }, { - "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$", - "type": "string" + "type": "null" } ], - "description": "The path to the file in the storage provider domain" - }, - "label": { - "title": "Label", - "type": "string", - "description": "The real file name" - }, - "eTag": { - "title": "Etag", - "type": "string", - "description": "Entity tag that uniquely represents the file. The method to generate the tag is not specified (black box)." - }, - "dataset": { - "title": "Dataset", - "type": "string", - "description": "Unique identifier to access the dataset on datcore (REQUIRED for datcore)" + "title": "Options", + "description": "A list of internal resolver variables to be modified (e.g.,\n`debug`, `ndots:3`, etc.).\n" } }, - "additionalProperties": false, - "description": "I/O port type to hold a link to a file in DATCORE storage", - "example": { - "store": 1, - "dataset": "N:dataset:ea2325d8-46d7-4fbd-a644-30f6433070b4", - "path": "N:package:32df09ba-e8d6-46da-bd54-f696157de6ce", - "label": "initial_WTstates" - } - }, - "DiscreteResourceSpec": { - "title": "DiscreteResourceSpec", - "type": "object", - "properties": { - "Kind": { - "title": "Kind", - "type": "string" - }, - "Value": { - "title": "Value", - "type": "integer" - } - } - }, - "DownloadLink": { - "title": "DownloadLink", - "required": [ - "downloadLink" - ], "type": "object", - "properties": { - "downloadLink": { - "title": "Downloadlink", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "format": "uri" - }, - "label": { - "title": "Label", - "type": "string" - } - }, - "additionalProperties": false, - "description": "I/O port type to hold a generic download link to a file (e.g. S3 pre-signed link, etc)" + "title": "DnsConfig", + "description": "Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`)." }, "DriverConfig": { - "title": "DriverConfig", - "type": "object", "properties": { "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Name", - "type": "string", "description": "Name of the driver to use to create the volume." }, "Options": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], "title": "Options", - "type": "object", - "additionalProperties": { - "type": "string" - }, "description": "key/value map of driver specific options." } }, + "type": "object", + "title": "DriverConfig", "description": "Map of driver specific options" }, "EndpointPortConfig": { - "title": "EndpointPortConfig", - "type": "object", "properties": { "Name": { - "title": "Name", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" }, "Protocol": { - "$ref": "#/components/schemas/Type" + "anyOf": [ + { + "$ref": "#/components/schemas/Type" + }, + { + "type": "null" + } + ] }, "TargetPort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Targetport", - "type": "integer", "description": "The port inside the container." }, "PublishedPort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Publishedport", - "type": "integer", "description": "The port on the swarm hosts." }, "PublishMode": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/PublishMode" + }, + { + "type": "null" } ], "description": "The mode in which port is published.\n\n


\n\n- \"ingress\" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- \"host\" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running.\n", - "default": "ingress", - "example": "ingress" + "default": "ingress" } - } + }, + "type": "object", + "title": "EndpointPortConfig" }, "EndpointSpec": { - "title": "EndpointSpec", - "type": "object", "properties": { "Mode": { - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/Mode1" + }, + { + "type": "null" } ], "description": "The mode of resolution to use for internal load balancing between tasks.\n", "default": "vip" }, "Ports": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/EndpointPortConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Ports", - "type": "array", - "items": { - "$ref": "#/components/schemas/EndpointPortConfig" - }, "description": "List of exposed ports that this service is accessible on from the\noutside. Ports can only be provided if `vip` resolution mode is used.\n" } }, + "type": "object", + "title": "EndpointSpec", "description": "Properties that can be configured to access and load balance a service." }, "FailureAction": { - "title": "FailureAction", + "type": "string", "enum": [ "continue", "pause", "rollback" ], - "description": " Action to take if an updated task fails to run, or stops running\nduring the update." + "title": "FailureAction", + "description": "Action to take if an updated task fails to run, or stops running\nduring the update." }, "FailureAction1": { - "title": "FailureAction1", + "type": "string", "enum": [ "continue", "pause" ], - "description": " Action to take if an rolled back task fails to run, or stops\nrunning during the rollback." + "title": "FailureAction1", + "description": "Action to take if an rolled back task fails to run, or stops\nrunning during the rollback." }, "File": { - "title": "File", - "type": "object", "properties": { "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Name", - "type": "string", "description": "Name represents the final filename in the filesystem.\n" }, "UID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Uid", - "type": "string", "description": "UID represents the file UID." }, "GID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Gid", - "type": "string", "description": "GID represents the file GID." }, "Mode": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Mode", - "type": "integer", "description": "Mode represents the FileMode of the file." } }, + "type": "object", + "title": "File", "description": "File represents a specific target that is backed by a file." }, "File1": { - "title": "File1", - "type": "object", "properties": { "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Name", - "type": "string", "description": "Name represents the final filename in the filesystem.\n" }, "UID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Uid", - "type": "string", "description": "UID represents the file UID." }, "GID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Gid", - "type": "string", "description": "GID represents the file GID." }, "Mode": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], "title": "Mode", - "type": "integer", "description": "Mode represents the FileMode of the file." } }, - "description": " File represents a specific target that is backed by a file.\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive" + "type": "object", + "title": "File1", + "description": "File represents a specific target that is backed by a file.\n\n


\n\n> **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive" }, "GenericResource": { - "title": "GenericResource", - "type": "object", "properties": { "NamedResourceSpec": { - "$ref": "#/components/schemas/NamedResourceSpec" + "anyOf": [ + { + "$ref": "#/components/schemas/NamedResourceSpec" + }, + { + "type": "null" + } + ] }, "DiscreteResourceSpec": { - "$ref": "#/components/schemas/DiscreteResourceSpec" + "anyOf": [ + { + "$ref": "#/components/schemas/DiscreteResourceSpec" + }, + { + "type": "null" + } + ] } - } + }, + "type": "object", + "title": "GenericResource" }, "GenericResources": { - "title": "GenericResources", - "type": "array", "items": { "$ref": "#/components/schemas/GenericResource" }, - "description": "User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`).\n", - "example": [ - { - "DiscreteResourceSpec": { - "Kind": "SSD", - "Value": 3 - } + "type": "array", + "title": "GenericResources", + "description": "User-defined resources can be either Integer resources (e.g, `SSD=3`) or\nString resources (e.g, `GPU=UUID1`)." + }, + "HTTPValidationError": { + "properties": { + "errors": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Validation errors" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "HealthConfig": { + "properties": { + "Test": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Test", + "description": "The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `[\"NONE\"]` disable healthcheck\n- `[\"CMD\", args...]` exec arguments directly\n- `[\"CMD-SHELL\", command]` run command with system's default shell\n" }, - { - "NamedResourceSpec": { - "Kind": "GPU", - "Value": "UUID1" - } + "Interval": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Interval", + "description": "The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n" }, - { - "NamedResourceSpec": { - "Kind": "GPU", - "Value": "UUID2" + "Timeout": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Timeout", + "description": "The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n" + }, + "Retries": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Retries", + "description": "The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n" + }, + "StartPeriod": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Startperiod", + "description": "Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n" + } + }, + "type": "object", + "title": "HealthConfig", + "description": "A test to perform to check that the container is healthy." + }, + "ImageResources": { + "properties": { + "image": { + "type": "string", + "pattern": "^(?:([a-z0-9-]+(?:\\.[a-z0-9-]+)+(?::\\d+)?|[a-z0-9-]+:\\d+)/)?((?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])(?::([\\w][\\w.-]{0,127}))?(\\@sha256:[a-fA-F0-9]{32,64})?$", + "title": "Image", + "description": "Used by the frontend to provide a context for the users.Services with a docker-compose spec will have multiple entries.Using the `image:version` instead of the docker-compose spec is more helpful for the end user." + }, + "resources": { + "additionalProperties": { + "$ref": "#/components/schemas/ResourceValue" + }, + "type": "object", + "title": "Resources" + }, + "boot_modes": { + "items": { + "$ref": "#/components/schemas/BootMode" + }, + "type": "array", + "title": "Boot Modes", + "description": "describe how a service shall be booted, using CPU, MPI, openMP or GPU", + "default": [ + "CPU" + ] + } + }, + "type": "object", + "required": [ + "image", + "resources" + ], + "title": "ImageResources", + "example": { + "image": "simcore/service/dynamic/pretty-intense:1.0.0", + "resources": { + "AIRAM": { + "limit": 1, + "reservation": 1 + }, + "ANY_resource": { + "limit": "some_value", + "reservation": "some_value" + }, + "CPU": { + "limit": 4, + "reservation": 0.1 + }, + "RAM": { + "limit": 103079215104, + "reservation": 536870912 + }, + "VRAM": { + "limit": 1, + "reservation": 1 } } - ] - }, - "HTTPValidationError": { - "title": "HTTPValidationError", + } + }, + "Isolation1": { + "type": "string", + "enum": [ + "default", + "process", + "hyperv" + ], + "title": "Isolation1", + "description": "Isolation technology of the containers running the service.\n(Windows only)" + }, + "Limit": { + "properties": { + "NanoCPUs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Nanocpus" + }, + "MemoryBytes": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Memorybytes" + }, + "Pids": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Pids", + "description": "Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n", + "default": 0 + } + }, + "type": "object", + "title": "Limit", + "description": "An object describing a limit on resources which can be requested by a task." + }, + "LogDriver1": { + "properties": { + "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "Options": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Options" + } + }, + "type": "object", + "title": "LogDriver1", + "description": "Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified." + }, + "Mode": { + "properties": { + "Replicated": { + "anyOf": [ + { + "$ref": "#/components/schemas/Replicated" + }, + { + "type": "null" + } + ] + }, + "Global": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Global" + }, + "ReplicatedJob": { + "anyOf": [ + { + "$ref": "#/components/schemas/ReplicatedJob" + }, + { + "type": "null" + } + ], + "description": "The mode used for services with a finite number of tasks that run\nto a completed state.\n" + }, + "GlobalJob": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Globaljob", + "description": "The mode used for services which run a task to the completed state\non each valid node.\n" + } + }, + "type": "object", + "title": "Mode", + "description": "Scheduling mode for the service." + }, + "Mode1": { + "type": "string", + "enum": [ + "vip", + "dnsrr" + ], + "title": "Mode1", + "description": "The mode of resolution to use for internal load balancing between tasks." + }, + "Mount": { + "properties": { + "Target": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Target", + "description": "Container path." + }, + "Source": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source", + "description": "Mount source (e.g. a volume name, a host path)." + }, + "Type": { + "anyOf": [ + { + "$ref": "#/components/schemas/Type2" + }, + { + "type": "null" + } + ], + "description": "The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n" + }, + "ReadOnly": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Readonly", + "description": "Whether the mount should be read-only." + }, + "Consistency": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Consistency", + "description": "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + }, + "BindOptions": { + "anyOf": [ + { + "$ref": "#/components/schemas/BindOptions" + }, + { + "type": "null" + } + ], + "description": "Optional configuration for the `bind` type." + }, + "VolumeOptions": { + "anyOf": [ + { + "$ref": "#/components/schemas/VolumeOptions" + }, + { + "type": "null" + } + ], + "description": "Optional configuration for the `volume` type." + }, + "TmpfsOptions": { + "anyOf": [ + { + "$ref": "#/components/schemas/TmpfsOptions" + }, + { + "type": "null" + } + ], + "description": "Optional configuration for the `tmpfs` type." + } + }, + "type": "object", + "title": "Mount" + }, + "NamedResourceSpec": { + "properties": { + "Kind": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Kind" + }, + "Value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Value" + } + }, + "type": "object", + "title": "NamedResourceSpec" + }, + "NetworkAttachmentConfig": { + "properties": { + "Target": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Target", + "description": "The target network for attachment. Must be a network name or ID.\n" + }, + "Aliases": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Aliases", + "description": "Discoverable alternate names for the service on this network.\n" + }, + "DriverOpts": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Driveropts", + "description": "Driver attachment options for the network target.\n" + } + }, + "type": "object", + "title": "NetworkAttachmentConfig", + "description": "Specifies how a service should be attached to a particular network." + }, + "NetworkAttachmentSpec": { + "properties": { + "ContainerID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Containerid", + "description": "ID of the container represented by this task" + } + }, + "type": "object", + "title": "NetworkAttachmentSpec", + "description": "Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." + }, + "NodeRequirements": { + "properties": { + "CPU": { + "type": "number", + "exclusiveMinimum": true, + "title": "Cpu", + "description": "defines the required (maximum) CPU shares for running the services", + "minimum": 0.0 + }, + "GPU": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Gpu", + "description": "defines the required (maximum) GPU for running the services" + }, + "RAM": { + "type": "integer", + "minimum": 0, + "title": "Ram", + "description": "defines the required (maximum) amount of RAM for running the services" + }, + "VRAM": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Vram", + "description": "defines the required (maximum) amount of VRAM for running the services" + } + }, + "type": "object", + "required": [ + "CPU", + "RAM" + ], + "title": "NodeRequirements" + }, + "Order": { + "type": "string", + "enum": [ + "stop-first", + "start-first" + ], + "title": "Order", + "description": "The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down." + }, + "Order1": { + "type": "string", + "enum": [ + "stop-first", + "start-first" + ], + "title": "Order1", + "description": "The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down." + }, + "Placement": { + "properties": { + "Constraints": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Constraints", + "description": "An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n" + }, + "Preferences": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Preference" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Preferences", + "description": "Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n" + }, + "MaxReplicas": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Maxreplicas", + "description": "Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n", + "default": 0 + }, + "Platforms": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Platform" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Platforms", + "description": "Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n" + } + }, + "type": "object", + "title": "Placement" + }, + "Platform": { + "properties": { + "Architecture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Architecture", + "description": "Architecture represents the hardware architecture (for example,\n`x86_64`).\n" + }, + "OS": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Os", + "description": "OS represents the Operating System (for example, `linux` or `windows`).\n" + } + }, + "type": "object", + "title": "Platform", + "description": "Platform represents the platform (Arch/OS)." + }, + "PluginPrivilege": { + "properties": { + "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "Description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "Value": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Value" + } + }, + "type": "object", + "title": "PluginPrivilege", + "description": "Describes a permission the user has to accept upon installing\nthe plugin." + }, + "PluginSpec": { + "properties": { + "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "The name or 'alias' to use for the plugin." + }, + "Remote": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Remote", + "description": "The plugin image reference to use." + }, + "Disabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Disabled", + "description": "Disable the plugin once scheduled." + }, + "PluginPrivilege": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/PluginPrivilege" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Pluginprivilege" + } + }, + "type": "object", + "title": "PluginSpec", + "description": "Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." + }, + "Preference": { + "properties": { + "Spread": { + "anyOf": [ + { + "$ref": "#/components/schemas/Spread" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Preference" + }, + "Privileges": { + "properties": { + "CredentialSpec": { + "anyOf": [ + { + "$ref": "#/components/schemas/CredentialSpec" + }, + { + "type": "null" + } + ], + "description": "CredentialSpec for managed service account (Windows only)" + }, + "SELinuxContext": { + "anyOf": [ + { + "$ref": "#/components/schemas/SeLinuxContext" + }, + { + "type": "null" + } + ], + "description": "SELinux labels of the container" + } + }, + "type": "object", + "title": "Privileges", + "description": "Security options for the container" + }, + "Propagation": { + "type": "string", + "enum": [ + "private", + "rprivate", + "shared", + "rshared", + "slave", + "rslave" + ], + "title": "Propagation", + "description": "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + }, + "PublishMode": { + "type": "string", + "enum": [ + "ingress", + "host" + ], + "title": "PublishMode", + "description": "The mode in which port is published.\n\n


\n\n- \"ingress\" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- \"host\" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running." + }, + "Replicated": { + "properties": { + "Replicas": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Replicas" + } + }, + "type": "object", + "title": "Replicated" + }, + "ReplicatedJob": { + "properties": { + "MaxConcurrent": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Maxconcurrent", + "description": "The maximum number of replicas to run simultaneously.\n", + "default": 1 + }, + "TotalCompletions": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Totalcompletions", + "description": "The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n" + } + }, + "type": "object", + "title": "ReplicatedJob", + "description": "The mode used for services with a finite number of tasks that run\nto a completed state." + }, + "ResourceObject": { + "properties": { + "NanoCPUs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Nanocpus" + }, + "MemoryBytes": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Memorybytes" + }, + "GenericResources": { + "anyOf": [ + { + "$ref": "#/components/schemas/GenericResources" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "ResourceObject", + "description": "An object describing the resources which can be advertised by a node and\nrequested by a task." + }, + "ResourceValue": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "title": "Limit" + }, + "reservation": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "title": "Reservation" + } + }, + "type": "object", + "required": [ + "limit", + "reservation" + ], + "title": "ResourceValue" + }, + "Resources1": { + "properties": { + "Limits": { + "anyOf": [ + { + "$ref": "#/components/schemas/Limit" + }, + { + "type": "null" + } + ], + "description": "Define resources limits." + }, + "Reservations": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResourceObject" + }, + { + "type": "null" + } + ], + "description": "Define resources reservation." + } + }, + "type": "object", + "title": "Resources1", + "description": "Resource requirements which apply to each individual container created\nas part of the service." + }, + "RestartPolicy1": { + "properties": { + "Condition": { + "anyOf": [ + { + "$ref": "#/components/schemas/Condition" + }, + { + "type": "null" + } + ], + "description": "Condition for restart." + }, + "Delay": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Delay", + "description": "Delay between restart attempts." + }, + "MaxAttempts": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Maxattempts", + "description": "Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n", + "default": 0 + }, + "Window": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Window", + "description": "Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n", + "default": 0 + } + }, + "type": "object", + "title": "RestartPolicy1", + "description": "Specification for the restart policy which applies to containers\ncreated as part of this service." + }, + "RollbackConfig": { + "properties": { + "Parallelism": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Parallelism", + "description": "Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n" + }, + "Delay": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Delay", + "description": "Amount of time between rollback iterations, in nanoseconds.\n" + }, + "FailureAction": { + "anyOf": [ + { + "$ref": "#/components/schemas/FailureAction1" + }, + { + "type": "null" + } + ], + "description": "Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n" + }, + "Monitor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Monitor", + "description": "Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n" + }, + "MaxFailureRatio": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Maxfailureratio", + "description": "The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", + "default": 0 + }, + "Order": { + "anyOf": [ + { + "$ref": "#/components/schemas/Order1" + }, + { + "type": "null" + } + ], + "description": "The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n" + } + }, + "type": "object", + "title": "RollbackConfig", + "description": "Specification for the rollback strategy of the service." + }, + "SeLinuxContext": { + "properties": { + "Disable": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Disable", + "description": "Disable SELinux" + }, + "User": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "User", + "description": "SELinux user label" + }, + "Role": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Role", + "description": "SELinux role label" + }, + "Type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Type", + "description": "SELinux type label" + }, + "Level": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Level", + "description": "SELinux level label" + } + }, + "type": "object", + "title": "SeLinuxContext", + "description": "SELinux labels of the container" + }, + "Secret": { + "properties": { + "File": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "File represents a specific target that is backed by a file.\n" + }, + "SecretID": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Secretid", + "description": "SecretID represents the ID of the specific secret that we're\nreferencing.\n" + }, + "SecretName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Secretname", + "description": "SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n" + } + }, "type": "object", - "properties": { - "errors": { - "title": "Validation errors", - "type": "array", - "items": { - "$ref": "#/components/schemas/ValidationError" - } - } - } + "title": "Secret" }, - "HealthConfig": { - "title": "HealthConfig", - "type": "object", + "SelectBox": { "properties": { - "Test": { - "title": "Test", - "type": "array", + "structure": { "items": { - "type": "string" + "$ref": "#/components/schemas/Structure" }, - "description": "The test to perform. Possible values are:\n\n- `[]` inherit healthcheck from image or parent image\n- `[\"NONE\"]` disable healthcheck\n- `[\"CMD\", args...]` exec arguments directly\n- `[\"CMD-SHELL\", command]` run command with system's default shell\n" - }, - "Interval": { - "title": "Interval", - "type": "integer", - "description": "The time to wait between checks in nanoseconds. It should be 0 or at\nleast 1000000 (1 ms). 0 means inherit.\n" - }, - "Timeout": { - "title": "Timeout", - "type": "integer", - "description": "The time to wait before considering the check to have hung. It should\nbe 0 or at least 1000000 (1 ms). 0 means inherit.\n" - }, - "Retries": { - "title": "Retries", - "type": "integer", - "description": "The number of consecutive failures needed to consider a container as\nunhealthy. 0 means inherit.\n" - }, - "StartPeriod": { - "title": "Startperiod", - "type": "integer", - "description": "Start period for the container to initialize before starting\nhealth-retries countdown in nanoseconds. It should be 0 or at least\n1000000 (1 ms). 0 means inherit.\n" + "type": "array", + "minItems": 1, + "title": "Structure" } }, - "description": "A test to perform to check that the container is healthy." - }, - "ImageResources": { - "title": "ImageResources", + "additionalProperties": false, + "type": "object", "required": [ - "image", - "resources" + "structure" ], - "type": "object", + "title": "SelectBox" + }, + "ServiceAccessRightsGet": { "properties": { - "image": { - "title": "Image", - "pattern": "[\\w/-]+:[\\w.@]+", + "service_key": { "type": "string", - "description": "Used by the frontend to provide a context for the users.Services with a docker-compose spec will have multiple entries.Using the `image:version` instead of the docker-compose spec is more helpful for the end user." + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" }, - "resources": { - "title": "Resources", - "type": "object", + "service_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + }, + "gids_with_access_rights": { "additionalProperties": { - "$ref": "#/components/schemas/ResourceValue" - } - } - }, - "example": { - "image": "simcore/service/dynamic/pretty-intense:1.0.0", - "resources": { - "CPU": { - "limit": 4, - "reservation": 0.1 - }, - "RAM": { - "limit": 103079215104, - "reservation": 536870912 - }, - "VRAM": { - "limit": 1, - "reservation": 1 - }, - "AIRAM": { - "limit": 1, - "reservation": 1 + "additionalProperties": { + "type": "boolean" + }, + "type": "object" }, - "ANY_resource": { - "limit": "some_value", - "reservation": "some_value" - } + "type": "object", + "title": "Gids With Access Rights" } - } - }, - "Isolation": { - "title": "Isolation", - "enum": [ - "default", - "process", - "hyperv" + }, + "type": "object", + "required": [ + "service_key", + "service_version", + "gids_with_access_rights" ], - "description": "Isolation technology of the container. (Windows only)" + "title": "ServiceAccessRightsGet" }, - "Limit": { - "title": "Limit", - "type": "object", + "ServiceBuildDetails": { "properties": { - "NanoCPUs": { - "title": "Nanocpus", - "type": "integer", - "example": 4000000000 + "build_date": { + "type": "string", + "title": "Build Date" }, - "MemoryBytes": { - "title": "Memorybytes", - "type": "integer", - "example": 8272408576 + "vcs_ref": { + "type": "string", + "title": "Vcs Ref" }, - "Pids": { - "title": "Pids", - "type": "integer", - "description": "Limits the maximum number of PIDs in the container. Set `0` for unlimited.\n", - "default": 0, - "example": 100 + "vcs_url": { + "type": "string", + "title": "Vcs Url" } }, - "description": "An object describing a limit on resources which can be requested by a task." - }, - "LogDriver1": { - "title": "LogDriver1", "type": "object", - "properties": { - "Name": { - "title": "Name", - "type": "string" - }, - "Options": { - "title": "Options", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "description": " Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified." - }, - "Meta": { - "title": "Meta", "required": [ - "name", - "version" + "build_date", + "vcs_ref", + "vcs_url" ], - "type": "object", - "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "released": { - "title": "Released", - "type": "object", - "additionalProperties": { - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" - }, - "description": "Maps every route's path tag with a released version" - } - }, - "example": { - "name": "simcore_service_foo", - "version": "2.4.45", - "released": { - "v1": "1.3.4", - "v2": "2.4.45" - } - } + "title": "ServiceBuildDetails" }, - "Mode": { - "title": "Mode", - "type": "object", + "ServiceExtras": { "properties": { - "Replicated": { - "$ref": "#/components/schemas/Replicated" - }, - "Global": { - "title": "Global", - "type": "object" + "node_requirements": { + "$ref": "#/components/schemas/NodeRequirements" }, - "ReplicatedJob": { - "title": "Replicatedjob", - "allOf": [ + "service_build_details": { + "anyOf": [ { - "$ref": "#/components/schemas/ReplicatedJob" + "$ref": "#/components/schemas/ServiceBuildDetails" + }, + { + "type": "null" } - ], - "description": "The mode used for services with a finite number of tasks that run\nto a completed state.\n" + ] }, - "GlobalJob": { - "title": "Globaljob", - "type": "object", - "description": "The mode used for services which run a task to the completed state\non each valid node.\n" + "container_spec": { + "anyOf": [ + { + "$ref": "#/components/schemas/models_library__service_settings_labels__ContainerSpec" + }, + { + "type": "null" + } + ] } }, - "description": "Scheduling mode for the service." - }, - "Mode1": { - "title": "Mode1", - "enum": [ - "vip", - "dnsrr" + "type": "object", + "required": [ + "node_requirements" ], - "description": "The mode of resolution to use for internal load balancing between tasks." + "title": "ServiceExtras" }, - "Mount": { - "title": "Mount", - "type": "object", + "ServiceGet": { "properties": { - "Target": { - "title": "Target", + "name": { "type": "string", - "description": "Container path." + "title": "Name", + "description": "Display name: short, human readable name for the node" }, - "Source": { - "title": "Source", - "type": "string", - "description": "Mount source (e.g. a volume name, a host path)." + "thumbnail": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Thumbnail", + "description": "URL to the service thumbnail" }, - "Type": { - "allOf": [ + "icon": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, { - "$ref": "#/components/schemas/Type1" + "type": "null" } ], - "description": "The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.\n" + "title": "Icon", + "description": "URL to the service icon" }, - "ReadOnly": { - "title": "Readonly", + "description": { + "type": "string", + "title": "Description", + "description": "human readable description of the purpose of the node" + }, + "description_ui": { "type": "boolean", - "description": "Whether the mount should be read-only." + "title": "Description Ui", + "description": "A flag to enable the `description` to be presented as a single web page (=true) or in another structured format (default=false).", + "default": false }, - "Consistency": { - "title": "Consistency", - "type": "string", - "description": "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + "version_display": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version Display", + "description": "A user-friendly or marketing name for the release.This can be used to reference the release in a more readable and recognizable format, such as 'Matterhorn Release,' 'Spring Update,' or 'Holiday Edition.' This name is not used for version comparison but is useful for communication and documentation purposes." }, - "BindOptions": { - "title": "Bindoptions", - "allOf": [ + "deprecated": { + "anyOf": [ { - "$ref": "#/components/schemas/BindOptions" + "type": "string", + "format": "date-time" + }, + { + "type": "null" } ], - "description": "Optional configuration for the `bind` type." + "title": "Deprecated", + "description": "Owner can set the date to retire the service. Three possibilities:If None, the service is marked as `published`;If now=deprecated, the service is retired" }, - "VolumeOptions": { - "title": "Volumeoptions", - "allOf": [ + "classifiers": { + "anyOf": [ { - "$ref": "#/components/schemas/VolumeOptions" + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" } ], - "description": "Optional configuration for the `volume` type." + "title": "Classifiers" }, - "TmpfsOptions": { - "title": "Tmpfsoptions", - "allOf": [ + "quality": { + "type": "object", + "title": "Quality", + "default": {} + }, + "accessRights": { + "anyOf": [ { - "$ref": "#/components/schemas/TmpfsOptions" + "additionalProperties": { + "$ref": "#/components/schemas/ServiceGroupAccessRights" + }, + "type": "object" + }, + { + "type": "null" } ], - "description": "Optional configuration for the `tmpfs` type." - } - } - }, - "NamedResourceSpec": { - "title": "NamedResourceSpec", - "type": "object", - "properties": { - "Kind": { - "title": "Kind", - "type": "string" - }, - "Value": { - "title": "Value", - "type": "string" - } - } - }, - "NetworkAttachmentConfig": { - "title": "NetworkAttachmentConfig", - "type": "object", - "properties": { - "Target": { - "title": "Target", - "type": "string", - "description": "The target network for attachment. Must be a network name or ID.\n" - }, - "Aliases": { - "title": "Aliases", - "type": "array", - "items": { - "type": "string" - }, - "description": "Discoverable alternate names for the service on this network.\n" + "title": "Accessrights", + "description": "service access rights per group id" }, - "DriverOpts": { - "title": "Driveropts", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Driver attachment options for the network target.\n" - } - }, - "description": "Specifies how a service should be attached to a particular network." - }, - "NetworkAttachmentSpec": { - "title": "NetworkAttachmentSpec", - "type": "object", - "properties": { - "ContainerID": { - "title": "Containerid", - "type": "string", - "description": "ID of the container represented by this task" - } - }, - "description": " Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." - }, - "Node": { - "title": "Node", - "required": [ - "key", - "version", - "label" - ], - "type": "object", - "properties": { "key": { - "title": "Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Key", "description": "distinctive name for the node based on the docker registry path" }, "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "description": "semantic version number of the node" - }, - "label": { - "title": "Label", "type": "string", - "description": "The short name of the node" - }, - "progress": { - "title": "Progress", - "maximum": 100.0, - "minimum": 0.0, - "type": "number", - "description": "the node progress value" - }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "url of the latest screenshot of the node", - "format": "uri" + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version", + "description": "service version number" }, - "runHash": { + "release_date": { "anyOf": [ { - "type": "null" + "type": "string", + "format": "date-time" }, { - "title": "Runhash", - "type": "string", - "description": "the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated" + "type": "null" } - ] - }, - "inputs": { - "title": "Inputs", - "type": "object", - "description": "values of input properties" - }, - "inputsUnits": { - "title": "Inputsunits", - "type": "object", - "description": "Overrides default unit (if any) defined in the service for each port" - }, - "inputAccess": { - "type": "object", - "description": "map with key - access level pairs" + ], + "title": "Release Date", + "description": "A timestamp when the specific version of the service was released. This field helps in tracking the timeline of releases and understanding the sequence of updates. A timestamp string should be formatted as YYYY-MM-DD[T]HH:MM[:SS[.ffffff]][Z or [\u00b1]HH[:]MM]" }, - "inputNodes": { - "title": "Inputnodes", - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "description": "node IDs of where the node is connected to" + "integration-version": { + "anyOf": [ + { + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + { + "type": "null" + } + ], + "title": "Integration-Version", + "description": "This version is used to maintain backward compatibility when there are changes in the way a service is integrated into the framework" }, - "outputs": { - "title": "Outputs", - "type": "object", - "description": "values of output properties" + "type": { + "$ref": "#/components/schemas/ServiceType", + "description": "service type" }, - "outputNode": { - "title": "Outputnode", - "type": "boolean", + "badges": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Badge" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Badges", "deprecated": true }, - "outputNodes": { - "title": "Outputnodes", - "type": "array", + "authors": { "items": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/Author" }, - "description": "Used in group-nodes. Node IDs of those connected to the output" + "type": "array", + "minItems": 1, + "title": "Authors" + }, + "contact": { + "type": "string", + "format": "email", + "title": "Contact", + "description": "email to correspond to the authors about the node" }, - "parent": { + "inputs": { "anyOf": [ { - "type": "null" + "type": "object" }, { - "title": "Parent", - "type": "string", - "description": "Parent's (group-nodes') node ID s. Used to group", - "format": "uuid" + "type": "null" } - ] + ], + "title": "Inputs", + "description": "definition of the inputs of this node" }, - "position": { - "title": "Position", - "allOf": [ + "outputs": { + "anyOf": [ + { + "type": "object" + }, { - "$ref": "#/components/schemas/Position" + "type": "null" } ], - "description": "Use projects_ui.WorkbenchUI.position instead", - "deprecated": true + "title": "Outputs", + "description": "definition of the outputs of this node" }, - "state": { - "title": "State", - "allOf": [ + "boot-options": { + "anyOf": [ + { + "type": "object" + }, { - "$ref": "#/components/schemas/NodeState" + "type": "null" } ], - "description": "The node's state object" - }, - "bootOptions": { - "title": "Bootoptions", - "type": "object", - "description": "Some services provide alternative parameters to be injected at boot time. The user selection should be stored here, and it will overwrite the services's defaults." - } - }, - "additionalProperties": false - }, - "NodeState": { - "title": "NodeState", - "type": "object", - "properties": { - "modified": { - "title": "Modified", - "type": "boolean", - "description": "true if the node's outputs need to be re-computed", - "default": true - }, - "dependencies": { - "title": "Dependencies", - "uniqueItems": true, - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "description": "contains the node inputs dependencies if they need to be computed first" + "title": "Boot-Options", + "description": "Service defined boot options. These get injected in the service as env variables." }, - "currentStatus": { - "allOf": [ + "min-visible-inputs": { + "anyOf": [ + { + "type": "integer", + "minimum": 0 + }, { - "$ref": "#/components/schemas/RunningState" + "type": "null" } ], - "description": "the node's current state", - "default": "NOT_STARTED" - } - }, - "additionalProperties": false - }, - "Order": { - "title": "Order", - "enum": [ - "stop-first", - "start-first" - ], - "description": " The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down." - }, - "Placement": { - "title": "Placement", - "type": "object", - "properties": { - "Constraints": { - "title": "Constraints", - "type": "array", - "items": { - "type": "string" - }, - "description": "An array of constraint expressions to limit the set of nodes where\na task can be scheduled. Constraint expressions can either use a\n_match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find\nnodes that satisfy every expression (AND match). Constraints can\nmatch node or Docker Engine labels as follows:\n\nnode attribute | matches | example\n---------------------|--------------------------------|-----------------------------------------------\n`node.id` | Node ID | `node.id==2ivku8v2gvtg4`\n`node.hostname` | Node hostname | `node.hostname!=node-2`\n`node.role` | Node role (`manager`/`worker`) | `node.role==manager`\n`node.platform.os` | Node operating system | `node.platform.os==windows`\n`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`\n`node.labels` | User-defined node labels | `node.labels.security==high`\n`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`\n\n`engine.labels` apply to Docker Engine labels like operating system,\ndrivers, etc. Swarm administrators add `node.labels` for operational\npurposes by using the [`node update endpoint`](#operation/NodeUpdate).\n", - "example": [ - "node.hostname!=node3.corp.example.com", - "node.role!=manager", - "node.labels.type==production", - "node.platform.os==linux", - "node.platform.arch==x86_64" - ] + "title": "Min-Visible-Inputs", + "description": "The number of 'data type inputs' displayed by default in the UI. When None all 'data type inputs' are displayed." }, - "Preferences": { - "title": "Preferences", - "type": "array", - "items": { - "$ref": "#/components/schemas/Preference" - }, - "description": "Preferences provide a way to make the scheduler aware of factors\nsuch as topology. They are provided in order from highest to\nlowest precedence.\n", - "example": [ + "progress_regexp": { + "anyOf": [ { - "Spread": { - "SpreadDescriptor": "node.labels.datacenter" - } + "type": "string" }, { - "Spread": { - "SpreadDescriptor": "node.labels.rack" - } + "type": "null" } - ] - }, - "MaxReplicas": { - "title": "Maxreplicas", - "type": "integer", - "description": "Maximum number of replicas for per node (default value is 0, which\nis unlimited)\n", - "default": 0 - }, - "Platforms": { - "title": "Platforms", - "type": "array", - "items": { - "$ref": "#/components/schemas/Platform" - }, - "description": "Platforms stores all the platforms that the service's image can\nrun on. This field is used in the platform filter for scheduling.\nIf empty, then the platform filter is off, meaning there are no\nscheduling restrictions.\n" - } - } - }, - "Platform": { - "title": "Platform", - "type": "object", - "properties": { - "Architecture": { - "title": "Architecture", - "type": "string", - "description": "Architecture represents the hardware architecture (for example,\n`x86_64`).\n", - "example": "x86_64" + ], + "title": "Progress Regexp", + "description": "regexp pattern for detecting computational service's progress" }, - "OS": { - "title": "Os", - "type": "string", - "description": "OS represents the Operating System (for example, `linux` or `windows`).\n", - "example": "linux" + "image_digest": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Digest", + "description": "Image manifest digest. Note that this is NOT injected as an image label" + }, + "owner": { + "anyOf": [ + { + "type": "string", + "format": "email" + }, + { + "type": "null" + } + ], + "title": "Owner", + "description": "None when the owner email cannot be found in the database" } }, - "description": "Platform represents the platform (Arch/OS)." - }, - "PluginPrivilege": { - "title": "PluginPrivilege", "type": "object", + "required": [ + "name", + "description", + "classifiers", + "key", + "version", + "type", + "authors", + "contact", + "inputs", + "outputs", + "owner" + ], + "title": "ServiceGet" + }, + "ServiceGroupAccessRights": { "properties": { - "Name": { - "title": "Name", - "type": "string", - "example": "network" - }, - "Description": { - "title": "Description", - "type": "string" + "execute_access": { + "type": "boolean", + "title": "Execute Access", + "description": "defines whether the group can execute the service", + "default": false }, - "Value": { - "title": "Value", - "type": "array", - "items": { - "type": "string" - }, - "example": [ - "host" - ] + "write_access": { + "type": "boolean", + "title": "Write Access", + "description": "defines whether the group can modify the service", + "default": false } }, - "description": " Describes a permission the user has to accept upon installing\nthe plugin." - }, - "PluginSpec": { - "title": "PluginSpec", "type": "object", + "title": "ServiceGroupAccessRights" + }, + "ServiceInput": { "properties": { - "Name": { - "title": "Name", - "type": "string", - "description": "The name or 'alias' to use for the plugin." + "displayOrder": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true }, - "Remote": { - "title": "Remote", + "label": { "type": "string", - "description": "The plugin image reference to use." - }, - "Disabled": { - "title": "Disabled", - "type": "boolean", - "description": "Disable the plugin once scheduled." + "title": "Label", + "description": "short name for the property" }, - "PluginPrivilege": { - "title": "Pluginprivilege", - "type": "array", - "items": { - "$ref": "#/components/schemas/PluginPrivilege" - } - } - }, - "description": " Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." - }, - "PortLink": { - "title": "PortLink", - "required": [ - "nodeUuid", - "output" - ], - "type": "object", - "properties": { - "nodeUuid": { - "title": "Nodeuuid", + "description": { "type": "string", - "description": "The node to get the port output from", - "format": "uuid" + "title": "Description", + "description": "description of the property" }, - "output": { - "title": "Output", - "pattern": "^[-_a-zA-Z0-9]+$", + "type": { "type": "string", - "description": "The port key in the node given by nodeUuid" - } - }, - "additionalProperties": false, - "description": "I/O port type to reference to an output port of another node in the same project" - }, - "Position": { - "title": "Position", - "required": [ - "x", - "y" - ], - "type": "object", - "properties": { - "x": { - "title": "X", - "type": "integer", - "description": "The x position", - "example": [ - "12" - ] + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed" }, - "y": { - "title": "Y", - "type": "integer", - "description": "The y position", - "example": [ - "15" - ] - } - }, - "additionalProperties": false - }, - "Preference": { - "title": "Preference", - "type": "object", - "properties": { - "Spread": { - "$ref": "#/components/schemas/Spread" - } - } - }, - "Privileges": { - "title": "Privileges", - "type": "object", - "properties": { - "CredentialSpec": { - "title": "Credentialspec", - "allOf": [ + "contentSchema": { + "anyOf": [ { - "$ref": "#/components/schemas/CredentialSpec" + "type": "object" + }, + { + "type": "null" } ], - "description": "CredentialSpec for managed service account (Windows only)" + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" }, - "SELinuxContext": { - "title": "Selinuxcontext", - "allOf": [ + "fileToKeyMap": { + "anyOf": [ + { + "type": "object" + }, { - "$ref": "#/components/schemas/SELinuxContext" + "type": "null" } ], - "description": "SELinux labels of the container" - } - }, - "description": "Security options for the container" - }, - "Propagation": { - "title": "Propagation", - "enum": [ - "private", - "rprivate", - "shared", - "rshared", - "slave", - "rslave" - ], - "description": "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - }, - "PublishMode": { - "title": "PublishMode", - "enum": [ - "ingress", - "host" - ], - "description": " The mode in which port is published.\n\n


\n\n- \"ingress\" makes the target port accessible on every node,\n regardless of whether there is a task for the service running on\n that node or not.\n- \"host\" bypasses the routing mesh and publish the port directly on\n the swarm node where that service is running." - }, - "Replicated": { - "title": "Replicated", - "type": "object", - "properties": { - "Replicas": { - "title": "Replicas", - "type": "integer" - } - } - }, - "ReplicatedJob": { - "title": "ReplicatedJob", - "type": "object", - "properties": { - "MaxConcurrent": { - "title": "Maxconcurrent", - "type": "integer", - "description": "The maximum number of replicas to run simultaneously.\n", - "default": 1 + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files" }, - "TotalCompletions": { - "title": "Totalcompletions", - "type": "integer", - "description": "The total number of replicas desired to reach the Completed\nstate. If unset, will default to the value of `MaxConcurrent`\n" - } - }, - "description": " The mode used for services with a finite number of tasks that run\nto a completed state." - }, - "ResourceObject": { - "title": "ResourceObject", - "type": "object", - "properties": { - "NanoCPUs": { - "title": "Nanocpus", - "type": "integer", - "example": 4000000000 + "unit": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "deprecated": true }, - "MemoryBytes": { - "title": "Memorybytes", - "type": "integer", - "example": 8272408576 + "defaultValue": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Defaultvalue", + "deprecated": true }, - "GenericResources": { - "$ref": "#/components/schemas/GenericResources" + "widget": { + "anyOf": [ + { + "$ref": "#/components/schemas/Widget" + }, + { + "type": "null" + } + ], + "description": "custom widget to use instead of the default one determined from the data-type" } }, - "description": " An object describing the resources which can be advertised by a node and\nrequested by a task." - }, - "ResourceValue": { - "title": "ResourceValue", + "additionalProperties": false, + "type": "object", "required": [ - "limit", - "reservation" + "label", + "description", + "type" ], - "type": "object", + "title": "ServiceInput", + "description": "Metadata on a service input port" + }, + "ServiceOutput": { "properties": { - "limit": { - "title": "Limit", + "displayOrder": { "anyOf": [ - { - "type": "integer" - }, { "type": "number" }, { - "type": "string" + "type": "null" } - ] + ], + "title": "Displayorder", + "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", + "deprecated": true }, - "reservation": { - "title": "Reservation", + "label": { + "type": "string", + "title": "Label", + "description": "short name for the property" + }, + "description": { + "type": "string", + "title": "Description", + "description": "description of the property" + }, + "type": { + "type": "string", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "title": "Type", + "description": "data type expected on this input glob matching for data type is allowed" + }, + "contentSchema": { "anyOf": [ { - "type": "integer" + "type": "object" }, { - "type": "number" + "type": "null" + } + ], + "title": "Contentschema", + "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" + }, + "fileToKeyMap": { + "anyOf": [ + { + "type": "object" }, { - "type": "string" + "type": "null" } - ] - } - } - }, - "Resources1": { - "title": "Resources1", - "type": "object", - "properties": { - "Limits": { - "title": "Limits", - "allOf": [ + ], + "title": "Filetokeymap", + "description": "Place the data associated with the named keys in files" + }, + "unit": { + "anyOf": [ { - "$ref": "#/components/schemas/Limit" + "type": "string" + }, + { + "type": "null" } ], - "description": "Define resources limits." + "title": "Unit", + "description": "Units, when it refers to a physical quantity", + "deprecated": true }, - "Reservation": { - "title": "Reservation", - "allOf": [ + "widget": { + "anyOf": [ { - "$ref": "#/components/schemas/ResourceObject" + "$ref": "#/components/schemas/Widget" + }, + { + "type": "null" } ], - "description": "Define resources reservation." + "description": "custom widget to use instead of the default one determined from the data-type", + "deprecated": true } }, - "description": " Resource requirements which apply to each individual container created\nas part of the service." - }, - "RestartPolicy1": { - "title": "RestartPolicy1", + "additionalProperties": false, "type": "object", + "required": [ + "label", + "description", + "type" + ], + "title": "ServiceOutput" + }, + "ServicePortGet": { "properties": { - "Condition": { - "allOf": [ + "key": { + "type": "string", + "pattern": "^[^_\\W0-9]\\w*$", + "title": "Key name", + "description": "Port identifier name" + }, + "kind": { + "type": "string", + "enum": [ + "input", + "output" + ], + "title": "Kind" + }, + "content_media_type": { + "anyOf": [ { - "$ref": "#/components/schemas/Condition" + "type": "string" + }, + { + "type": "null" } ], - "description": "Condition for restart." - }, - "Delay": { - "title": "Delay", - "type": "integer", - "description": "Delay between restart attempts." - }, - "MaxAttempts": { - "title": "Maxattempts", - "type": "integer", - "description": "Maximum attempts to restart a given container before giving up\n(default value is 0, which is ignored).\n", - "default": 0 + "title": "Content Media Type" }, - "Window": { - "title": "Window", - "type": "integer", - "description": "Windows is the time window used to evaluate the restart policy\n(default value is 0, which is unbounded).\n", - "default": 0 + "content_schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Content Schema", + "description": "jsonschema for the port's value. SEE https://json-schema.org/understanding-json-schema/" } }, - "description": " Specification for the restart policy which applies to containers\ncreated as part of this service." - }, - "RollbackConfig": { - "title": "RollbackConfig", "type": "object", - "properties": { - "Parallelism": { - "title": "Parallelism", - "type": "integer", - "description": "Maximum number of tasks to be rolled back in one iteration (0 means\nunlimited parallelism).\n" - }, - "Delay": { - "title": "Delay", + "required": [ + "key", + "kind" + ], + "title": "ServicePortGet", + "example": { + "content_schema": { + "maximum": 5, + "minimum": 0, + "title": "Sleep interval", "type": "integer", - "description": "Amount of time between rollback iterations, in nanoseconds.\n" + "x_unit": "second" }, - "FailureAction": { - "allOf": [ + "key": "input_1", + "kind": "input" + } + }, + "ServiceSpec": { + "properties": { + "Name": { + "anyOf": [ { - "$ref": "#/components/schemas/FailureAction1" + "type": "string" + }, + { + "type": "null" } ], - "description": "Action to take if an rolled back task fails to run, or stops\nrunning during the rollback.\n" + "title": "Name", + "description": "Name of the service." }, - "Monitor": { - "title": "Monitor", - "type": "integer", - "description": "Amount of time to monitor each rolled back task for failures, in\nnanoseconds.\n" + "Labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Labels", + "description": "User-defined key/value metadata." }, - "MaxFailureRatio": { - "title": "Maxfailureratio", - "type": "number", - "description": "The fraction of tasks that may fail during a rollback before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", - "default": 0 + "TaskTemplate": { + "anyOf": [ + { + "$ref": "#/components/schemas/TaskSpec" + }, + { + "type": "null" + } + ] }, - "Order": { - "allOf": [ + "Mode": { + "anyOf": [ { - "$ref": "#/components/schemas/Order" + "$ref": "#/components/schemas/Mode" + }, + { + "type": "null" } ], - "description": "The order of operations when rolling back a task. Either the old\ntask is shut down before the new task is started, or the new task\nis started before the old task is shut down.\n" - } - }, - "description": "Specification for the rollback strategy of the service." - }, - "RunningState": { - "title": "RunningState", - "enum": [ - "UNKNOWN", - "PUBLISHED", - "NOT_STARTED", - "PENDING", - "STARTED", - "RETRY", - "SUCCESS", - "FAILED", - "ABORTED" - ], - "type": "string", - "description": "State of execution of a project's computational workflow\n\nSEE StateType for task state" - }, - "SELinuxContext": { - "title": "SELinuxContext", - "type": "object", - "properties": { - "Disable": { - "title": "Disable", - "type": "boolean", - "description": "Disable SELinux" + "description": "Scheduling mode for the service." }, - "User": { - "title": "User", - "type": "string", - "description": "SELinux user label" + "UpdateConfig": { + "anyOf": [ + { + "$ref": "#/components/schemas/UpdateConfig" + }, + { + "type": "null" + } + ], + "description": "Specification for the update strategy of the service." }, - "Role": { - "title": "Role", - "type": "string", - "description": "SELinux role label" + "RollbackConfig": { + "anyOf": [ + { + "$ref": "#/components/schemas/RollbackConfig" + }, + { + "type": "null" + } + ], + "description": "Specification for the rollback strategy of the service." }, - "Type": { - "title": "Type", - "type": "string", - "description": "SELinux type label" + "Networks": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/NetworkAttachmentConfig" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Networks", + "description": "Specifies which networks the service should attach to." }, - "Level": { - "title": "Level", - "type": "string", - "description": "SELinux level label" + "EndpointSpec": { + "anyOf": [ + { + "$ref": "#/components/schemas/EndpointSpec" + }, + { + "type": "null" + } + ] } }, - "description": "SELinux labels of the container" - }, - "Secret": { - "title": "Secret", "type": "object", + "title": "ServiceSpec", + "description": "User modifiable configuration for a service." + }, + "ServiceSpecificationsGet": { "properties": { - "File": { - "title": "File", - "allOf": [ + "sidecar": { + "anyOf": [ { - "$ref": "#/components/schemas/File" + "$ref": "#/components/schemas/ServiceSpec" + }, + { + "type": "null" } ], - "description": "File represents a specific target that is backed by a file.\n" - }, - "SecretID": { - "title": "Secretid", - "type": "string", - "description": "SecretID represents the ID of the specific secret that we're\nreferencing.\n" + "description": "schedule-time specifications for the service sidecar (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.25/#operation/ServiceCreate)" }, - "SecretName": { - "title": "Secretname", - "type": "string", - "description": "SecretName is the name of the secret that this references,\nbut this is just provided for lookup/display purposes. The\nsecret in the reference will be identified by its ID.\n" + "service": { + "anyOf": [ + { + "$ref": "#/components/schemas/ServiceSpec" + }, + { + "type": "null" + } + ], + "description": "schedule-time specifications specifications for the service (follows Docker Service creation API (specifically only the Resources part), see https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate" } - } + }, + "type": "object", + "title": "ServiceSpecificationsGet" }, - "SelectBox": { - "title": "SelectBox", - "required": [ - "structure" + "ServiceType": { + "type": "string", + "enum": [ + "computational", + "dynamic", + "frontend", + "backend" ], - "type": "object", + "title": "ServiceType" + }, + "Spread": { "properties": { - "structure": { - "title": "Structure", - "minItems": 1, - "type": "array", - "items": { - "$ref": "#/components/schemas/Structure" - } + "SpreadDescriptor": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Spreaddescriptor", + "description": "label descriptor, such as `engine.labels.az`.\n" } }, - "additionalProperties": false - }, - "ServiceGet": { - "title": "ServiceGet", - "required": [ - "name", - "description", - "key", - "version", - "type", - "authors", - "contact", - "inputs", - "outputs" - ], "type": "object", + "title": "Spread" + }, + "Structure": { "properties": { - "name": { - "title": "Name", - "type": "string", - "description": "short, human readable name for the node", - "example": "Fast Counter" - }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "url to the thumbnail", - "format": "uri" - }, - "description": { - "title": "Description", - "type": "string", - "description": "human readable description of the purpose of the node" - }, - "deprecated": { - "title": "Deprecated", - "type": "string", - "description": "If filled with a date, then the service is to be deprecated at that date (e.g. cannot start anymore)", - "format": "date-time" - }, - "classifiers": { - "title": "Classifiers", - "type": "array", - "items": { - "type": "string" - } - }, - "quality": { - "title": "Quality", - "type": "object", - "default": {} - }, - "access_rights": { - "title": "Access Rights", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ServiceGroupAccessRights" - }, - "description": "service access rights per group id" - }, "key": { - "title": "Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string", - "description": "distinctive name for the node based on the docker registry path" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "description": "service version number" - }, - "integration-version": { - "title": "Integration-Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "description": "integration version number" - }, - "type": { - "allOf": [ + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, { - "$ref": "#/components/schemas/ServiceType" + "type": "number" } ], - "description": "service type" - }, - "badges": { - "title": "Badges", - "type": "array", - "items": { - "$ref": "#/components/schemas/Badge" - } - }, - "authors": { - "title": "Authors", - "minItems": 1, - "type": "array", - "items": { - "$ref": "#/components/schemas/Author" - } - }, - "contact": { - "title": "Contact", - "type": "string", - "description": "email to correspond to the authors about the node", - "format": "email" - }, - "inputs": { - "title": "Inputs", - "type": "object", - "description": "definition of the inputs of this node" + "title": "Key" }, - "outputs": { - "title": "Outputs", - "type": "object", - "description": "definition of the outputs of this node" - }, - "boot-options": { - "title": "Boot-Options", - "type": "object", - "description": "Service defined boot options. These get injected in the service as env variables." - }, - "owner": { - "title": "Owner", + "label": { "type": "string", - "format": "email" + "title": "Label" } }, - "description": "Static metadata for a service injected in the image labels\n\nThis is one to one with node-meta-v0.0.1.json", - "example": { - "name": "File Picker", - "description": "File Picker", - "classifiers": [], - "quality": {}, - "access_rights": { - "1": { - "execute_access": true, - "write_access": false - }, - "4": { - "execute_access": true, - "write_access": true - } - }, - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "type": "dynamic", - "authors": [ - { - "name": "Red Pandas", - "email": "redpandas@wonderland.com" - } - ], - "contact": "redpandas@wonderland.com", - "inputs": {}, - "outputs": { - "outFile": { - "displayOrder": 0, - "label": "File", - "description": "Chosen File", - "type": "data:*/*" - } - }, - "owner": "redpandas@wonderland.com" - } - }, - "ServiceGroupAccessRights": { - "title": "ServiceGroupAccessRights", + "additionalProperties": false, "type": "object", - "properties": { - "execute_access": { - "title": "Execute Access", - "type": "boolean", - "description": "defines whether the group can execute the service", - "default": false - }, - "write_access": { - "title": "Write Access", - "type": "boolean", - "description": "defines whether the group can modify the service", - "default": false - } - } - }, - "ServiceInput": { - "title": "ServiceInput", "required": [ - "label", - "description", - "type" + "key", + "label" ], - "type": "object", - "properties": { - "displayOrder": { - "title": "Displayorder", - "type": "number", - "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", - "deprecated": true - }, - "label": { - "title": "Label", - "type": "string", - "description": "short name for the property", - "example": "Age" - }, - "description": { - "title": "Description", - "type": "string", - "description": "description of the property", - "example": "Age in seconds since 1970" + "title": "Structure" + }, + "TaskSpec": { + "properties": { + "PluginSpec": { + "anyOf": [ + { + "$ref": "#/components/schemas/PluginSpec" + }, + { + "type": "null" + } + ], + "description": "Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" }, - "type": { - "title": "Type", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", - "type": "string", - "description": "data type expected on this input glob matching for data type is allowed" + "ContainerSpec": { + "anyOf": [ + { + "$ref": "#/components/schemas/models_library__generated_models__docker_rest_api__ContainerSpec" + }, + { + "type": "null" + } + ], + "description": "Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" }, - "contentSchema": { - "title": "Contentschema", - "type": "object", - "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" + "NetworkAttachmentSpec": { + "anyOf": [ + { + "$ref": "#/components/schemas/NetworkAttachmentSpec" + }, + { + "type": "null" + } + ], + "description": "Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" }, - "fileToKeyMap": { - "title": "Filetokeymap", - "type": "object", - "description": "Place the data associated with the named keys in files" + "Resources": { + "anyOf": [ + { + "$ref": "#/components/schemas/Resources1" + }, + { + "type": "null" + } + ], + "description": "Resource requirements which apply to each individual container created\nas part of the service.\n" }, - "unit": { - "title": "Unit", - "type": "string", - "description": "Units, when it refers to a physical quantity" + "RestartPolicy": { + "anyOf": [ + { + "$ref": "#/components/schemas/RestartPolicy1" + }, + { + "type": "null" + } + ], + "description": "Specification for the restart policy which applies to containers\ncreated as part of this service.\n" }, - "defaultValue": { - "title": "Defaultvalue", + "Placement": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/Placement" }, + { + "type": "null" + } + ] + }, + "ForceUpdate": { + "anyOf": [ { "type": "integer" }, { - "type": "number" - }, + "type": "null" + } + ], + "title": "Forceupdate", + "description": "A counter that triggers an update even if no relevant parameters have\nbeen changed.\n" + }, + "Runtime": { + "anyOf": [ { "type": "string" + }, + { + "type": "null" } - ] + ], + "title": "Runtime", + "description": "Runtime is the type of runtime specified for the task executor.\n" }, - "widget": { - "title": "Widget", - "allOf": [ + "Networks": { + "anyOf": [ { - "$ref": "#/components/schemas/Widget" + "items": { + "$ref": "#/components/schemas/NetworkAttachmentConfig" + }, + "type": "array" + }, + { + "type": "null" } ], - "description": "custom widget to use instead of the default one determined from the data-type" + "title": "Networks", + "description": "Specifies which networks the service should attach to." + }, + "LogDriver": { + "anyOf": [ + { + "$ref": "#/components/schemas/LogDriver1" + }, + { + "type": "null" + } + ], + "description": "Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n" } }, - "additionalProperties": false, - "description": "Metadata on a service input port" + "type": "object", + "title": "TaskSpec", + "description": "User modifiable task configuration." }, - "ServiceOutput": { - "title": "ServiceOutput", + "TextArea": { + "properties": { + "minHeight": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Minheight", + "description": "minimum Height of the textarea", + "minimum": 0 + } + }, + "additionalProperties": false, + "type": "object", "required": [ - "label", - "description", - "type" + "minHeight" ], - "type": "object", + "title": "TextArea" + }, + "TmpfsOptions": { "properties": { - "displayOrder": { - "title": "Displayorder", - "type": "number", - "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", - "deprecated": true - }, - "label": { - "title": "Label", - "type": "string", - "description": "short name for the property", - "example": "Age" - }, - "description": { - "title": "Description", - "type": "string", - "description": "description of the property", - "example": "Age in seconds since 1970" - }, - "type": { - "title": "Type", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", - "type": "string", - "description": "data type expected on this input glob matching for data type is allowed" - }, - "contentSchema": { - "title": "Contentschema", - "type": "object", - "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" - }, - "fileToKeyMap": { - "title": "Filetokeymap", - "type": "object", - "description": "Place the data associated with the named keys in files" - }, - "unit": { - "title": "Unit", - "type": "string", - "description": "Units, when it refers to a physical quantity" + "SizeBytes": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Sizebytes", + "description": "The size for the tmpfs mount in bytes." }, - "widget": { - "title": "Widget", - "allOf": [ + "Mode": { + "anyOf": [ { - "$ref": "#/components/schemas/Widget" + "type": "integer" + }, + { + "type": "null" } ], - "description": "custom widget to use instead of the default one determined from the data-type", - "deprecated": true + "title": "Mode", + "description": "The permission mode for the tmpfs mount in an integer." } }, - "additionalProperties": false, - "description": "Base class for service input/outputs" + "type": "object", + "title": "TmpfsOptions", + "description": "Optional configuration for the `tmpfs` type." }, - "ServicePortGet": { - "title": "ServicePortGet", - "required": [ - "key", - "kind" + "Type": { + "type": "string", + "enum": [ + "tcp", + "udp", + "sctp" ], - "type": "object", + "title": "Type" + }, + "Type2": { + "type": "string", + "enum": [ + "bind", + "volume", + "tmpfs", + "npipe" + ], + "title": "Type2", + "description": "The mount type. Available types:\n\n- `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.\n- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.\n- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.\n- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container." + }, + "Ulimit": { "properties": { - "key": { - "title": "Key name", - "pattern": "^[^_\\W0-9]\\w*$", - "type": "string", - "description": "port identifier name" + "Name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "Name of ulimit" }, - "kind": { - "title": "Kind", - "enum": [ - "input", - "output" + "Soft": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Soft", + "description": "Soft limit" + }, + "Hard": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } ], - "type": "string" - }, - "content_media_type": { - "title": "Content Media Type", - "type": "string" - }, - "content_schema": { - "title": "Content Schema", - "type": "object", - "description": "jsonschema for the port's value. SEE https://json-schema.org/understanding-json-schema/" + "title": "Hard", + "description": "Hard limit" } }, - "example": { - "key": "input_1", - "kind": "input", - "content_schema": { - "title": "Sleep interval", - "type": "integer", - "x_unit": "second", - "minimum": 0, - "maximum": 5 - } - } - }, - "ServiceSpec": { - "title": "ServiceSpec", "type": "object", + "title": "Ulimit" + }, + "UpdateConfig": { "properties": { - "Name": { - "title": "Name", - "type": "string", - "description": "Name of the service." - }, - "Labels": { - "title": "Labels", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "User-defined key/value metadata." - }, - "TaskTemplate": { - "$ref": "#/components/schemas/TaskSpec" + "Parallelism": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Parallelism", + "description": "Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n" }, - "Mode": { - "title": "Mode", - "allOf": [ + "Delay": { + "anyOf": [ { - "$ref": "#/components/schemas/Mode" + "type": "integer" + }, + { + "type": "null" } ], - "description": "Scheduling mode for the service." + "title": "Delay", + "description": "Amount of time between updates, in nanoseconds." }, - "UpdateConfig": { - "title": "Updateconfig", - "allOf": [ + "FailureAction": { + "anyOf": [ { - "$ref": "#/components/schemas/UpdateConfig" + "$ref": "#/components/schemas/FailureAction" + }, + { + "type": "null" } ], - "description": "Specification for the update strategy of the service." + "description": "Action to take if an updated task fails to run, or stops running\nduring the update.\n" }, - "RollbackConfig": { - "title": "Rollbackconfig", - "allOf": [ + "Monitor": { + "anyOf": [ { - "$ref": "#/components/schemas/RollbackConfig" + "type": "integer" + }, + { + "type": "null" } ], - "description": "Specification for the rollback strategy of the service." + "title": "Monitor", + "description": "Amount of time to monitor each updated task for failures, in\nnanoseconds.\n" }, - "Networks": { - "title": "Networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/NetworkAttachmentConfig" - }, - "description": "Specifies which networks the service should attach to." + "MaxFailureRatio": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Maxfailureratio", + "description": "The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", + "default": 0 }, - "EndpointSpec": { - "$ref": "#/components/schemas/EndpointSpec" - } - }, - "description": "User modifiable configuration for a service." - }, - "ServiceSpecificationsGet": { - "title": "ServiceSpecificationsGet", - "type": "object", - "properties": { - "sidecar": { - "title": "Sidecar", - "allOf": [ + "Order": { + "anyOf": [ { - "$ref": "#/components/schemas/ServiceSpec" + "$ref": "#/components/schemas/Order" + }, + { + "type": "null" } ], - "description": "schedule-time specifications for the service sidecar (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.25/#operation/ServiceCreate)" + "description": "The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n" } - } - }, - "ServiceType": { - "title": "ServiceType", - "enum": [ - "computational", - "dynamic", - "frontend", - "backend" - ], - "type": "string", - "description": "An enumeration." - }, - "ServiceUpdate": { - "title": "ServiceUpdate", + }, "type": "object", + "title": "UpdateConfig", + "description": "Specification for the update strategy of the service." + }, + "ValidationError": { "properties": { - "access_rights": { - "title": "Access Rights", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ServiceGroupAccessRights" + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] }, - "description": "service access rights per group id" - }, - "name": { - "title": "Name", - "type": "string" + "type": "array", + "title": "Location" }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, + "msg": { "type": "string", - "format": "uri" - }, - "description": { - "title": "Description", - "type": "string" + "title": "Message" }, - "deprecated": { - "title": "Deprecated", + "type": { "type": "string", - "description": "If filled with a date, then the service is to be deprecated at that date (e.g. cannot start anymore)", - "format": "date-time" - }, - "classifiers": { - "title": "Classifiers", - "type": "array", - "items": { - "type": "string" - } - }, - "quality": { - "title": "Quality", - "type": "object", - "default": {} + "title": "Error Type" } }, - "example": { - "access_rights": { - "1": { - "execute_access": false, - "write_access": false - }, - "2": { - "execute_access": true, - "write_access": true - }, - "44": { - "execute_access": false, - "write_access": false - } - }, - "name": "My Human Readable Service Name", - "description": "An interesting service that does something", - "classifiers": [ - "RRID:SCR_018997", - "RRID:SCR_019001" - ], - "quality": { - "tsr": { - "r01": { - "level": 3, - "references": "" - }, - "r02": { - "level": 2, - "references": "" - }, - "r03": { - "level": 0, - "references": "" - }, - "r04": { - "level": 0, - "references": "" - }, - "r05": { - "level": 2, - "references": "" - }, - "r06": { - "level": 0, - "references": "" - }, - "r07": { - "level": 0, - "references": "" - }, - "r08": { - "level": 1, - "references": "" - }, - "r09": { - "level": 0, - "references": "" - }, - "r10": { - "level": 0, - "references": "" - } - }, - "enabled": true, - "annotations": { - "vandv": "", - "purpose": "", - "standards": "", - "limitations": "", - "documentation": "", - "certificationLink": "", - "certificationStatus": "Uncertified" - } - } - } - }, - "SimCoreFileLink": { - "title": "SimCoreFileLink", + "type": "object", "required": [ - "store", - "path" + "loc", + "msg", + "type" ], - "type": "object", + "title": "ValidationError" + }, + "VolumeOptions": { "properties": { - "store": { - "title": "Store", - "type": "integer", - "description": "The store identifier: 0 for simcore S3, 1 for datcore" - }, - "path": { - "title": "Path", + "NoCopy": { "anyOf": [ { - "pattern": "^(api|([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}))\\/([0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12})\\/(.+)$", - "type": "string" + "type": "boolean" }, { - "pattern": "^N:package:[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$", - "type": "string" + "type": "null" } ], - "description": "The path to the file in the storage provider domain" + "title": "Nocopy", + "description": "Populate volume with data from the target.", + "default": false }, - "label": { - "title": "Label", - "type": "string", - "description": "The real file name" + "Labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Labels", + "description": "User-defined key/value metadata." }, - "eTag": { - "title": "Etag", - "type": "string", - "description": "Entity tag that uniquely represents the file. The method to generate the tag is not specified (black box)." + "DriverConfig": { + "anyOf": [ + { + "$ref": "#/components/schemas/DriverConfig" + }, + { + "type": "null" + } + ], + "description": "Map of driver specific options" + } + }, + "type": "object", + "title": "VolumeOptions", + "description": "Optional configuration for the `volume` type." + }, + "Widget": { + "properties": { + "type": { + "$ref": "#/components/schemas/WidgetType", + "description": "type of the property" }, - "dataset": { - "title": "Dataset", - "type": "string", - "deprecated": true + "details": { + "anyOf": [ + { + "$ref": "#/components/schemas/TextArea" + }, + { + "$ref": "#/components/schemas/SelectBox" + } + ], + "title": "Details" } }, "additionalProperties": false, - "description": "I/O port type to hold a link to a file in simcore S3 storage", - "example": { - "store": 0, - "path": "94453a6a-c8d4-52b3-a22d-ccbf81f8d636/0a3b2c56-dbcd-4871-b93b-d454b7883f9f/input.txt", - "eTag": "859fda0cb82fc4acb4686510a172d9a9-1", - "label": "input.txt" - } - }, - "Spread": { - "title": "Spread", "type": "object", - "properties": { - "SpreadDescriptor": { - "title": "Spreaddescriptor", - "type": "string", - "description": "label descriptor, such as `engine.labels.az`.\n" - } - } - }, - "Structure": { - "title": "Structure", "required": [ - "key", - "label" + "type", + "details" ], - "type": "object", + "title": "Widget" + }, + "WidgetType": { + "type": "string", + "enum": [ + "TextArea", + "SelectBox" + ], + "title": "WidgetType" + }, + "models_library__generated_models__docker_rest_api__ContainerSpec": { "properties": { - "key": { - "title": "Key", + "Image": { "anyOf": [ { "type": "string" }, { - "type": "boolean" + "type": "null" + } + ], + "title": "Image", + "description": "The image name to use for the container" + }, + "Labels": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" }, { - "type": "number" + "type": "null" } - ] + ], + "title": "Labels", + "description": "User-defined key/value data." }, - "label": { - "title": "Label", - "type": "string" - } - }, - "additionalProperties": false - }, - "TaskSpec": { - "title": "TaskSpec", - "type": "object", - "properties": { - "PluginSpec": { - "title": "Pluginspec", - "allOf": [ + "Command": { + "anyOf": [ { - "$ref": "#/components/schemas/PluginSpec" + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" } ], - "description": "Plugin spec for the service. *(Experimental release only.)*\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" + "title": "Command", + "description": "The command to be run in the image." }, - "ContainerSpec": { - "title": "Containerspec", - "allOf": [ + "Args": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, { - "$ref": "#/components/schemas/ContainerSpec" + "type": "null" } ], - "description": "Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" + "title": "Args", + "description": "Arguments to the command." }, - "NetworkAttachmentSpec": { - "title": "Networkattachmentspec", - "allOf": [ + "Hostname": { + "anyOf": [ { - "$ref": "#/components/schemas/NetworkAttachmentSpec" + "type": "string" + }, + { + "type": "null" } ], - "description": "Read-only spec type for non-swarm containers attached to swarm overlay\nnetworks.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`.\n" + "title": "Hostname", + "description": "The hostname to use for the container, as a valid\n[RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.\n" }, - "Resources": { - "title": "Resources", - "allOf": [ + "Env": { + "anyOf": [ { - "$ref": "#/components/schemas/Resources1" + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" } ], - "description": "Resource requirements which apply to each individual container created\nas part of the service.\n" + "title": "Env", + "description": "A list of environment variables in the form `VAR=value`.\n" }, - "RestartPolicy": { - "title": "Restartpolicy", - "allOf": [ + "Dir": { + "anyOf": [ { - "$ref": "#/components/schemas/RestartPolicy1" + "type": "string" + }, + { + "type": "null" } ], - "description": "Specification for the restart policy which applies to containers\ncreated as part of this service.\n" - }, - "Placement": { - "$ref": "#/components/schemas/Placement" + "title": "Dir", + "description": "The working directory for commands to run in." }, - "ForceUpdate": { - "title": "Forceupdate", - "type": "integer", - "description": "A counter that triggers an update even if no relevant parameters have\nbeen changed.\n" + "User": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "User", + "description": "The user inside the container." }, - "Runtime": { - "title": "Runtime", - "type": "string", - "description": "Runtime is the type of runtime specified for the task executor.\n" + "Groups": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Groups", + "description": "A list of additional groups that the container process will run as.\n" }, - "Networks": { - "title": "Networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/NetworkAttachmentConfig" - }, - "description": "Specifies which networks the service should attach to." + "Privileges": { + "anyOf": [ + { + "$ref": "#/components/schemas/Privileges" + }, + { + "type": "null" + } + ], + "description": "Security options for the container" }, - "LogDriver": { - "title": "Logdriver", - "allOf": [ + "TTY": { + "anyOf": [ { - "$ref": "#/components/schemas/LogDriver1" + "type": "boolean" + }, + { + "type": "null" } ], - "description": "Specifies the log driver to use for tasks created from this spec. If\nnot present, the default one for the swarm will be used, finally\nfalling back to the engine default if not specified.\n" - } - }, - "description": "User modifiable task configuration." - }, - "TextArea": { - "title": "TextArea", - "required": [ - "minHeight" - ], - "type": "object", - "properties": { - "minHeight": { - "title": "Minheight", - "exclusiveMinimum": true, - "type": "integer", - "description": "minimum Height of the textarea", - "minimum": 0 - } - }, - "additionalProperties": false - }, - "TmpfsOptions": { - "title": "TmpfsOptions", - "type": "object", - "properties": { - "SizeBytes": { - "title": "Sizebytes", - "type": "integer", - "description": "The size for the tmpfs mount in bytes." + "title": "Tty", + "description": "Whether a pseudo-TTY should be allocated." }, - "Mode": { - "title": "Mode", - "type": "integer", - "description": "The permission mode for the tmpfs mount in an integer." - } - }, - "description": "Optional configuration for the `tmpfs` type." - }, - "Type": { - "title": "Type", - "enum": [ - "tcp", - "udp", - "sctp" - ], - "description": "An enumeration." - }, - "Type1": { - "title": "Type1", - "enum": [ - "bind", - "volume", - "tmpfs", - "npipe" - ], - "description": " The mount type:\n\n- `bind` a mount of a file or directory from the host into the container.\n- `volume` a docker volume with the given `Name`.\n- `tmpfs` a `tmpfs`.\n- `npipe` a named pipe from the host into the container." - }, - "Ulimit1": { - "title": "Ulimit1", - "type": "object", - "properties": { - "Name": { - "title": "Name", - "type": "string", - "description": "Name of ulimit" + "OpenStdin": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Openstdin", + "description": "Open `stdin`" }, - "Soft": { - "title": "Soft", - "type": "integer", - "description": "Soft limit" + "ReadOnly": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Readonly", + "description": "Mount the container's root filesystem as read only." }, - "Hard": { - "title": "Hard", - "type": "integer", - "description": "Hard limit" - } - } - }, - "UpdateConfig": { - "title": "UpdateConfig", - "type": "object", - "properties": { - "Parallelism": { - "title": "Parallelism", - "type": "integer", - "description": "Maximum number of tasks to be updated in one iteration (0 means\nunlimited parallelism).\n" + "Mounts": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Mount" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Mounts", + "description": "Specification for mounts to be added to containers created as part\nof the service.\n" }, - "Delay": { - "title": "Delay", - "type": "integer", - "description": "Amount of time between updates, in nanoseconds." + "StopSignal": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Stopsignal", + "description": "Signal to stop the container." }, - "FailureAction": { - "allOf": [ + "StopGracePeriod": { + "anyOf": [ { - "$ref": "#/components/schemas/FailureAction" + "type": "integer" + }, + { + "type": "null" } ], - "description": "Action to take if an updated task fails to run, or stops running\nduring the update.\n" + "title": "Stopgraceperiod", + "description": "Amount of time to wait for the container to terminate before\nforcefully killing it.\n" }, - "Monitor": { - "title": "Monitor", - "type": "integer", - "description": "Amount of time to monitor each updated task for failures, in\nnanoseconds.\n" + "HealthCheck": { + "anyOf": [ + { + "$ref": "#/components/schemas/HealthConfig" + }, + { + "type": "null" + } + ] }, - "MaxFailureRatio": { - "title": "Maxfailureratio", - "type": "number", - "description": "The fraction of tasks that may fail during an update before the\nfailure action is invoked, specified as a floating point number\nbetween 0 and 1.\n", - "default": 0 + "Hosts": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Hosts", + "description": "A list of hostname/IP mappings to add to the container's `hosts`\nfile. The format of extra hosts is specified in the\n[hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)\nman page:\n\n IP_address canonical_hostname [aliases...]\n" }, - "Order": { - "allOf": [ + "DNSConfig": { + "anyOf": [ { - "$ref": "#/components/schemas/Order" + "$ref": "#/components/schemas/DnsConfig" + }, + { + "type": "null" } ], - "description": "The order of operations when rolling out an updated task. Either\nthe old task is shut down before the new task is started, or the\nnew task is started before the old task is shut down.\n" - } - }, - "description": "Specification for the update strategy of the service." - }, - "ValidationError": { - "title": "ValidationError", - "required": [ - "loc", - "msg", - "type" - ], - "type": "object", - "properties": { - "loc": { - "title": "Location", - "type": "array", - "items": { - "anyOf": [ - { - "type": "string" + "description": "Specification for DNS related configurations in resolver configuration\nfile (`resolv.conf`).\n" + }, + "Secrets": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Secret" }, - { - "type": "integer" - } - ] - } + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Secrets", + "description": "Secrets contains references to zero or more secrets that will be\nexposed to the service.\n" }, - "msg": { - "title": "Message", - "type": "string" + "Configs": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Config1" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Configs", + "description": "Configs contains references to zero or more configs that will be\nexposed to the service.\n" }, - "type": { - "title": "Error Type", - "type": "string" - } - } - }, - "VolumeOptions": { - "title": "VolumeOptions", - "type": "object", - "properties": { - "NoCopy": { - "title": "Nocopy", - "type": "boolean", - "description": "Populate volume with data from the target.", - "default": false + "Isolation": { + "anyOf": [ + { + "$ref": "#/components/schemas/Isolation1" + }, + { + "type": "null" + } + ], + "description": "Isolation technology of the containers running the service.\n(Windows only)\n" }, - "Labels": { - "title": "Labels", - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "User-defined key/value metadata." + "Init": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Init", + "description": "Run an init inside the container that forwards signals and reaps\nprocesses. This field is omitted if empty, and the default (as\nconfigured on the daemon) is used.\n" }, - "DriverConfig": { - "title": "Driverconfig", - "allOf": [ + "Sysctls": { + "anyOf": [ { - "$ref": "#/components/schemas/DriverConfig" + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" } ], - "description": "Map of driver specific options" - } - }, - "description": "Optional configuration for the `volume` type." - }, - "Widget": { - "title": "Widget", - "required": [ - "type", - "details" - ], - "type": "object", - "properties": { - "type": { - "allOf": [ + "title": "Sysctls", + "description": "Set kernel namedspaced parameters (sysctls) in the container.\nThe Sysctls option on services accepts the same sysctls as the\nare supported on containers. Note that while the same sysctls are\nsupported, no guarantees or checks are made about their\nsuitability for a clustered environment, and it's up to the user\nto determine whether a given sysctl will work properly in a\nService.\n" + }, + "CapabilityAdd": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, { - "$ref": "#/components/schemas/WidgetType" + "type": "null" } ], - "description": "type of the property" + "title": "Capabilityadd", + "description": "A list of kernel capabilities to add to the default set\nfor the container.\n" }, - "details": { - "title": "Details", + "CapabilityDrop": { "anyOf": [ { - "$ref": "#/components/schemas/TextArea" + "items": { + "type": "string" + }, + "type": "array" }, { - "$ref": "#/components/schemas/SelectBox" + "type": "null" } - ] + ], + "title": "Capabilitydrop", + "description": "A list of kernel capabilities to drop from the default set\nfor the container.\n" + }, + "Ulimits": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/Ulimit" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Ulimits", + "description": "A list of resource limits to set in the container. For example: `{\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048}`\"\n" } }, - "additionalProperties": false + "type": "object", + "title": "ContainerSpec", + "description": "Container spec for the service.\n\n


\n\n> **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are\n> mutually exclusive. PluginSpec is only used when the Runtime field\n> is set to `plugin`. NetworkAttachmentSpec is used when the Runtime\n> field is set to `attachment`." }, - "WidgetType": { - "title": "WidgetType", - "enum": [ - "TextArea", - "SelectBox" + "models_library__service_settings_labels__ContainerSpec": { + "properties": { + "Command": { + "items": { + "type": "string" + }, + "type": "array", + "maxItems": 2, + "minItems": 1, + "title": "Command", + "description": "Used to override the container's command" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "Command" ], - "type": "string", - "description": "An enumeration." + "title": "ContainerSpec", + "description": "Implements entries that can be overriden for https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate\nrequest body: TaskTemplate -> ContainerSpec" } } } diff --git a/services/catalog/requirements/_base.in b/services/catalog/requirements/_base.in index 9d4caa6ec66..7d985b6a1bc 100644 --- a/services/catalog/requirements/_base.in +++ b/services/catalog/requirements/_base.in @@ -6,6 +6,7 @@ --constraint ../../../requirements/constraints.txt --constraint constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/postgres-database/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in @@ -14,21 +15,10 @@ --requirement ../../../packages/service-library/requirements/_fastapi.in -# fastapi and extensions -fastapi[all] -# data models -pydantic[dotenv] - -# database -asyncpg -sqlalchemy[asyncio] - -# web client -httpx - -# other aiocache[redis,msgpack] -tenacity +asyncpg # database packaging +pydantic[dotenv] # data models pyyaml +tenacity diff --git a/services/catalog/requirements/_base.txt b/services/catalog/requirements/_base.txt index f2917984c48..d87cbb51b88 100644 --- a/services/catalog/requirements/_base.txt +++ b/services/catalog/requirements/_base.txt @@ -1,229 +1,608 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.5 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aiocache==0.11.1 - # via -r requirements/_base.in -aiodebug==2.3.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aiofiles==0.8.0 +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -aioredis==2.0.1 - # via aiocache -aiormq==6.4.2 + # -r requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 # via aio-pika -alembic==1.8.1 +aiosignal==1.3.2 + # via aiohttp +alembic==1.15.1 # via -r requirements/../../../packages/postgres-database/requirements/_base.in -anyio==3.6.1 +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette - # watchgod -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -asgiref==3.5.2 - # via uvicorn -async-timeout==4.0.2 + # watchfiles +arrow==1.3.0 # via - # aioredis - # redis -asyncpg==0.25.0 - # via -r requirements/_base.in -attrs==21.4.0 + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt - # jsonschema -certifi==2022.12.7 + # -r requirements/_base.in + # sqlalchemy +attrs==25.2.0 # via + # aiohttp + # jsonschema + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx # requests -charset-normalizer==2.0.12 +charset-normalizer==3.4.1 # via requests -click==8.1.3 +click==8.1.8 # via + # rich-toolkit # typer # uvicorn -dnspython==2.1.0 +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -email-validator==1.2.1 +email-validator==2.2.0 # via # fastapi # pydantic -fastapi==0.85.0 +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -greenlet==1.1.2 +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.69.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 # via sqlalchemy -h11==0.12.0 +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 # via # httpcore # uvicorn -httpcore==0.15.0 +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.7 # via httpx -httptools==0.2.0 +httptools==0.6.4 # via uvicorn -httpx==0.23.0 - # via +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -idna==2.10 + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator + # httpx # requests - # rfc3986 # yarl -itsdangerous==1.1.0 - # via fastapi -jaeger-client==4.8.0 - # via fastapi-contrib -jinja2==3.1.2 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt +importlib-metadata==8.6.1 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # fastapi -jsonschema==3.2.0 +jsonschema==4.23.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt # -r requirements/../../../packages/models-library/requirements/_base.in -mako==1.2.2 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.1 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via # jinja2 # mako -msgpack==1.0.3 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 # via aiocache -multidict==6.0.2 - # via yarl -opentracing==2.4.0 +multidict==6.1.0 # via - # fastapi-contrib - # jaeger-client -orjson==3.7.2 - # via fastapi -packaging==21.3 - # via -r requirements/_base.in -pamqp==3.2.1 - # via aiormq -psycopg2-binary==2.9.3 - # via sqlalchemy -pydantic==1.9.0 + # aiohttp + # yarl +opentelemetry-api==1.31.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.31.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.31.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.31.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.31.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.52b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.52b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.52b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.31.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.31.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.52b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.52b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via # -r requirements/_base.in - # fastapi -pyinstrument==4.1.1 + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -pyparsing==3.0.9 - # via packaging -pyrsistent==0.18.1 - # via jsonschema -python-dateutil==2.8.2 + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.3 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 # via arrow -python-dotenv==0.20.0 +python-dotenv==1.0.1 # via - # pydantic + # pydantic-settings # uvicorn -python-multipart==0.0.5 +python-multipart==0.0.20 # via fastapi -pyyaml==5.4.1 - # via +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in - # fastapi # uvicorn -redis==4.4.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in -requests==2.27.1 - # via fastapi -rfc3986==1.4.0 - # via httpx -six==1.15.0 - # via + # aiocache +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil - # python-multipart - # thrift -sniffio==1.2.0 - # via - # anyio - # httpcore - # httpx -sqlalchemy==1.4.37 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.13.2 + # via fastapi-cli +rpds-py==0.23.1 # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/postgres-database/requirements/_base.in - # -r requirements/_base.in # alembic -starlette==0.20.4 - # via fastapi -tenacity==8.0.1 +starlette==0.46.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.1 - # via - # jaeger-client - # threadloop -tqdm==4.64.0 +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.4.1 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.3.0 + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug - # aioredis - # pydantic - # starlette -ujson==5.5.0 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # alembic + # anyio # fastapi -urllib3==1.26.9 - # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # requests -uvicorn==0.15.0 +uvicorn==0.34.2 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in # fastapi -uvloop==0.16.0 + # fastapi-cli +uvloop==0.21.0 # via uvicorn -watchgod==0.8.2 +watchfiles==1.0.4 # via uvicorn -websockets==10.1 +websockets==15.0.1 # via uvicorn -yarl==1.7.2 +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika + # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/catalog/requirements/_test.in b/services/catalog/requirements/_test.in index 079b52ae51f..c6189773150 100644 --- a/services/catalog/requirements/_test.in +++ b/services/catalog/requirements/_test.in @@ -4,15 +4,17 @@ # --constraint ../../../requirements/constraints.txt --constraint constraints.txt + # Adds base AS CONSTRAINT specs, not requirement. # - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt # + --constraint _base.txt + alembic # migration due to pytest_simcore.postgres_service +asgi_lifespan click -codecov -coveralls docker Faker jsonschema # 'services/catalog/tests/unit' dependencies @@ -25,4 +27,6 @@ pytest-docker pytest-mock pytest-runner respx -sqlalchemy[asyncio, mypy] +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types-psycopg2 +types-PyYAML diff --git a/services/catalog/requirements/_test.txt b/services/catalog/requirements/_test.txt index 4b3f675e080..30015c3c870 100644 --- a/services/catalog/requirements/_test.txt +++ b/services/catalog/requirements/_test.txt @@ -1,130 +1,127 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 +aiohappyeyeballs==2.6.1 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 # via # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # pytest-aiohttp -aiosignal==1.3.1 - # via aiohttp -alembic==1.8.1 - # via -r requirements/_test.in -anyio==3.6.1 +aiosignal==1.3.2 # via # -c requirements/_base.txt - # httpcore -async-timeout==4.0.2 + # aiohttp +alembic==1.15.1 # via # -c requirements/_base.txt - # aiohttp -attrs==21.4.0 + # -r requirements/_test.in +anyio==4.8.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.2.0 # via # -c requirements/_base.txt # aiohttp # jsonschema - # pytest # pytest-docker -certifi==2022.12.7 + # referencing +certifi==2025.1.31 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # httpcore # httpx # requests -charset-normalizer==2.0.12 +charset-normalizer==3.4.1 # via # -c requirements/_base.txt - # aiohttp # requests -click==8.1.3 - # via -r requirements/_test.in -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 +click==8.1.8 # via - # codecov - # coveralls - # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -docker==6.0.1 + # -c requirements/_base.txt + # -r requirements/_test.in +coverage==7.6.12 + # via pytest-cov +docker==7.1.0 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 +faker==37.0.0 # via -r requirements/_test.in -frozenlist==1.3.3 +frozenlist==1.5.0 # via + # -c requirements/_base.txt # aiohttp # aiosignal -greenlet==1.1.2 +greenlet==3.1.1 # via # -c requirements/_base.txt # sqlalchemy -h11==0.12.0 +h11==0.14.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.15.0 +httpcore==1.0.7 # via # -c requirements/_base.txt # httpx -httpx==0.23.0 +httpx==0.28.1 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # respx -idna==2.10 +idna==3.10 # via # -c requirements/_base.txt # anyio + # httpx # requests - # rfc3986 # yarl iniconfig==2.0.0 # via pytest -jsonschema==3.2.0 - # via -r requirements/_test.in -mako==1.2.2 +jsonschema==4.23.0 # via # -c requirements/_base.txt + # -r requirements/_test.in +jsonschema-specifications==2024.10.1 + # via + # -c requirements/_base.txt + # jsonschema +mako==1.3.9 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # alembic -markupsafe==2.1.1 +markupsafe==3.0.2 # via # -c requirements/_base.txt # mako -multidict==6.0.2 +multidict==6.1.0 # via # -c requirements/_base.txt # aiohttp # yarl -mypy==1.1.1 +mypy==1.15.0 # via sqlalchemy mypy-extensions==1.0.0 # via mypy -packaging==21.3 +packaging==24.2 # via # -c requirements/_base.txt - # docker # pytest -pluggy==1.0.0 +pluggy==1.5.0 # via pytest +propcache==0.3.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl ptvsd==4.3.2 # via -r requirements/_test.in py-cpuinfo==9.0.0 # via pytest-benchmark -pyparsing==3.0.9 - # via - # -c requirements/_base.txt - # packaging -pyrsistent==0.18.1 - # via - # -c requirements/_base.txt - # jsonschema -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in # pytest-aiohttp @@ -133,74 +130,70 @@ pytest==7.2.1 # pytest-cov # pytest-docker # pytest-mock -pytest-aiohttp==1.0.4 +pytest-aiohttp==1.1.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via pytest-aiohttp -pytest-benchmark==4.0.0 +pytest-benchmark==5.1.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-docker==1.0.1 +pytest-docker==3.2.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +referencing==0.35.1 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # faker -requests==2.27.1 + # jsonschema + # jsonschema-specifications +requests==2.32.3 # via # -c requirements/_base.txt - # codecov - # coveralls # docker -respx==0.20.1 +respx==0.22.0 # via -r requirements/_test.in -rfc3986==1.4.0 - # via - # -c requirements/_base.txt - # httpx -six==1.15.0 +rpds-py==0.23.1 # via # -c requirements/_base.txt # jsonschema - # python-dateutil -sniffio==1.2.0 + # referencing +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio - # httpcore - # httpx -sqlalchemy==1.4.37 + # asgi-lifespan +sqlalchemy==1.4.54 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -r requirements/_test.in # alembic -sqlalchemy2-stubs==0.0.2a32 +sqlalchemy2-stubs==0.0.2a38 # via sqlalchemy -tomli==2.0.1 - # via - # coverage - # mypy - # pytest -typing-extensions==4.3.0 +types-psycopg2==2.9.21.20250121 + # via -r requirements/_test.in +types-pyyaml==6.0.12.20241230 + # via -r requirements/_test.in +typing-extensions==4.12.2 # via # -c requirements/_base.txt + # alembic + # anyio # mypy # sqlalchemy2-stubs -urllib3==1.26.9 +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # docker # requests -websocket-client==1.5.1 - # via docker -yarl==1.7.2 +yarl==1.18.3 # via # -c requirements/_base.txt # aiohttp - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/services/catalog/requirements/_tools.txt b/services/catalog/requirements/_tools.txt index 96ec943c6ae..4ef3f43c67d 100644 --- a/services/catalog/requirements/_tools.txt +++ b/services/catalog/requirements/_tools.txt @@ -1,98 +1,88 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.9 # via pylint -black==22.12.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.9 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 # via # -c requirements/_test.txt # black -nodeenv==1.7.0 + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==21.3 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt + # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.5 # via -r requirements/../../../requirements/devenv.txt -pyparsing==3.0.9 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # packaging -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 + # build + # pip-tools +pyyaml==6.0.2 # via # -c requirements/_base.txt # pre-commit # watchdog -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 +ruff==0.9.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==76.0.0 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.3.0 +typing-extensions==4.12.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.29.3 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/catalog/requirements/ci.txt b/services/catalog/requirements/ci.txt index 5387291c154..68ad56caa9a 100644 --- a/services/catalog/requirements/ci.txt +++ b/services/catalog/requirements/ci.txt @@ -9,13 +9,15 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/models-library -../../packages/postgres-database -../../packages/pytest-simcore/ -../../packages/service-library[fastapi] -../../packages/settings-library +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library # installs current package -. +simcore-service-catalog @ . diff --git a/services/catalog/requirements/dev.txt b/services/catalog/requirements/dev.txt index dccc4f79f39..c9df003398e 100644 --- a/services/catalog/requirements/dev.txt +++ b/services/catalog/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/postgres-database --editable ../../packages/pytest-simcore/ diff --git a/services/catalog/requirements/prod.txt b/services/catalog/requirements/prod.txt index a7ea4f35c59..96a80690986 100644 --- a/services/catalog/requirements/prod.txt +++ b/services/catalog/requirements/prod.txt @@ -10,10 +10,11 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library -../../packages/postgres-database/ -../../packages/service-library[fastapi] -../../packages/settings-library +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library # installs current package -. +simcore-service-catalog @ . diff --git a/services/catalog/setup.cfg b/services/catalog/setup.cfg index e17423c5d32..812aa1c836c 100644 --- a/services/catalog/setup.cfg +++ b/services/catalog/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.4.0 +current_version = 0.8.1 commit = True message = services/catalog version: {current_version} β†’ {new_version} tag = False @@ -9,5 +9,11 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function markers = testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/services/catalog/setup.py b/services/catalog/setup.py index d724da7744e..bcb8e076346 100644 --- a/services/catalog/setup.py +++ b/services/catalog/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -32,29 +31,35 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-service-catalog", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "name": "simcore-service-catalog", + "version": (CURRENT_DIR / "VERSION").read_text().strip(), + "author": ", ".join( ( "Pedro Crespo-Valero (pcrespov)", "Sylvain Anderegg (sanderegg)", ) ), - description="Manages and maintains a catalog of all published components (e.g. macro-algorithms, scripts, etc)", - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ + "description": "Manages and maintains a catalog of all published components (e.g. macro-algorithms, scripts, etc)", + "long_description": (CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, -) + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-catalog=simcore_service_catalog.cli:main", + "simcore-service=simcore_service_catalog.cli:main", + ], + }, +} if __name__ == "__main__": setup(**SETUP) diff --git a/services/catalog/src/simcore_service_catalog/__init__.py b/services/catalog/src/simcore_service_catalog/__init__.py index 68863c1682a..3c882b121f4 100644 --- a/services/catalog/src/simcore_service_catalog/__init__.py +++ b/services/catalog/src/simcore_service_catalog/__init__.py @@ -1,4 +1,4 @@ """ Python package for the simcore_service_catalog. """ -from .meta import __version__ +from ._meta import __version__ diff --git a/services/catalog/src/simcore_service_catalog/_constants.py b/services/catalog/src/simcore_service_catalog/_constants.py new file mode 100644 index 00000000000..bc500c24e18 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/_constants.py @@ -0,0 +1,18 @@ +from typing import Any, Final + +# These are equivalent to pydantic export models but for responses +# SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeldict +# SEE https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter +RESPONSE_MODEL_POLICY: Final[dict[str, Any]] = { + "response_model_by_alias": True, + "response_model_exclude_unset": True, + "response_model_exclude_defaults": False, + "response_model_exclude_none": False, +} + +SECOND: Final[int] = 1 +MINUTE: Final[int] = 60 * SECOND +DIRECTOR_CACHING_TTL: Final[int] = 5 * MINUTE +LIST_SERVICES_CACHING_TTL: Final[int] = 30 * SECOND + +SIMCORE_SERVICE_SETTINGS_LABELS: Final[str] = "simcore.service.settings" diff --git a/services/catalog/src/simcore_service_catalog/_meta.py b/services/catalog/src/simcore_service_catalog/_meta.py new file mode 100644 index 00000000000..770d24a4e28 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/_meta.py @@ -0,0 +1,32 @@ +from typing import Final + +from models_library.basic_types import VersionStr +from packaging.version import Version +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore-service-catalog") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +API_VTAG: Final[str] = info.api_prefix_path_tag +APP_NAME: Final[str] = info.project_name +SUMMARY: Final[str] = info.get_summary() + + +# NOTE: https://patorjk.com/software/taag/#p=display&h=0&f=Ogre&t=Catalog +APP_STARTED_BANNER_MSG = r""" + ___ _ _ + / __\ __ _ | |_ __ _ | | ___ __ _ + / / / _` || __| / _` || | / _ \ / _` | +/ /___ | (_| || |_ | (_| || || (_) || (_| | +\____/ \__,_| \__| \__,_||_| \___/ \__, | + |___/ {} +""".format( + f"v{__version__}" +) + + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/catalog/src/simcore_service_catalog/api/_dependencies/__init__.py b/services/catalog/src/simcore_service_catalog/api/_dependencies/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/catalog/src/simcore_service_catalog/api/_dependencies/director.py b/services/catalog/src/simcore_service_catalog/api/_dependencies/director.py new file mode 100644 index 00000000000..f6cf9655263 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/_dependencies/director.py @@ -0,0 +1,13 @@ +from typing import Annotated + +from fastapi import Depends, FastAPI +from servicelib.fastapi.dependencies import get_app + +from ...clients.director import DirectorClient + + +def get_director_client( + app: Annotated[FastAPI, Depends(get_app)], +) -> DirectorClient: + director: DirectorClient = app.state.director_api + return director diff --git a/services/catalog/src/simcore_service_catalog/api/_dependencies/repository.py b/services/catalog/src/simcore_service_catalog/api/_dependencies/repository.py new file mode 100644 index 00000000000..b0ad2dcacb4 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/_dependencies/repository.py @@ -0,0 +1,31 @@ +import logging +from collections.abc import AsyncGenerator, Callable +from typing import Annotated + +from fastapi import Depends +from fastapi.requests import Request +from sqlalchemy.ext.asyncio import AsyncEngine + +from ...repository._base import BaseRepository + +_logger = logging.getLogger(__name__) + + +def _get_db_engine(request: Request) -> AsyncEngine: + engine: AsyncEngine = request.app.state.engine + assert engine # nosec + return engine + + +def get_repository(repo_type: type[BaseRepository]) -> Callable: + async def _get_repo( + engine: Annotated[AsyncEngine, Depends(_get_db_engine)], + ) -> AsyncGenerator[BaseRepository, None]: + # NOTE: 2 different ideas were tried here with not so good + # 1st one was acquiring a connection per repository which lead to the following issue https://github.com/ITISFoundation/osparc-simcore/pull/1966 + # 2nd one was acquiring a connection per request which works but blocks the director-v2 responsiveness once + # the max amount of connections is reached + # now the current solution is to connect connection when needed. + yield repo_type(db_engine=engine) + + return _get_repo diff --git a/services/catalog/src/simcore_service_catalog/api/_dependencies/services.py b/services/catalog/src/simcore_service_catalog/api/_dependencies/services.py new file mode 100644 index 00000000000..eb29eecfe38 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/_dependencies/services.py @@ -0,0 +1,113 @@ +import logging +from dataclasses import dataclass +from typing import Annotated, cast + +from fastapi import Depends, FastAPI, Header, HTTPException, status +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.services_metadata_published import ServiceMetaDataPublished +from models_library.services_resources import ResourcesDict +from models_library.services_types import ServiceKey, ServiceVersion +from pydantic import ValidationError +from servicelib.fastapi.dependencies import get_app + +from ...clients.director import DirectorClient +from ...core.settings import ApplicationSettings +from ...repository.groups import GroupsRepository +from ...repository.services import ServicesRepository +from ...service import manifest +from .director import get_director_client +from .repository import get_repository + +_logger = logging.getLogger(__name__) + + +def get_default_service_resources( + app: Annotated[FastAPI, Depends(get_app)], +) -> ResourcesDict: + app_settings: ApplicationSettings = app.state.settings + return app_settings.CATALOG_SERVICES_DEFAULT_RESOURCES + + +def get_default_service_specifications( + app: Annotated[FastAPI, Depends(get_app)], +) -> ServiceSpecifications: + app_settings: ApplicationSettings = app.state.settings + return app_settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS + + +@dataclass(frozen=True) +class AccessInfo: + uid: int + gid: list[int] + product: str + + +async def check_service_read_access( + user_id: int, + service_key: ServiceKey, + service_version: ServiceVersion, + groups_repository: Annotated[ + GroupsRepository, Depends(get_repository(GroupsRepository)) + ], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + x_simcore_products_name: str = Header(None), +) -> AccessInfo: + # get the user's groups + user_groups = await groups_repository.list_user_groups(user_id) + if not user_groups: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You have unsufficient rights to access the service", + ) + + # check the user has access to this service and to which extent + if not await services_repo.get_service( + service_key, + service_version, + gids=[group.gid for group in user_groups], + product_name=x_simcore_products_name, + ): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Cannot access this service. It is either not published or not exposed to this user.", + ) + + return AccessInfo( + uid=user_id, + gid=[group.gid for group in user_groups], + product=x_simcore_products_name, + ) + + +async def get_service_from_manifest( + service_key: ServiceKey, + service_version: ServiceVersion, + director_client: Annotated[DirectorClient, Depends(get_director_client)], +) -> ServiceMetaDataPublished: + """ + Retrieves service metadata from the docker registry via the director + """ + try: + return cast( + ServiceMetaDataPublished, + await manifest.get_service( + director_client=director_client, + key=service_key, + version=service_version, + ), + ) + + except ValidationError as exc: + _logger.warning( + "Invalid service metadata in registry. Audit registry data for %s %s", + f"{service_key=}", + f"{service_version=}", + ) + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Service {service_key}:{service_version} not found", + ) from exc diff --git a/services/catalog/src/simcore_service_catalog/api/_dependencies/user_groups.py b/services/catalog/src/simcore_service_catalog/api/_dependencies/user_groups.py new file mode 100644 index 00000000000..f5471aaffd6 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/_dependencies/user_groups.py @@ -0,0 +1,23 @@ +from typing import Annotated + +from fastapi import Depends, Query +from models_library.groups import GroupAtDB +from models_library.users import UserID + +from ...repository.groups import GroupsRepository +from .repository import get_repository + + +async def list_user_groups( + groups_repository: Annotated[ + GroupsRepository, Depends(get_repository(GroupsRepository)) + ], + user_id: Annotated[ + UserID | None, + Query( + description="if passed, and that user has custom resources, " + "they will be merged with default resources and returned.", + ), + ] = None, +) -> list[GroupAtDB]: + return await groups_repository.list_user_groups(user_id) if user_id else [] diff --git a/services/catalog/src/simcore_service_catalog/api/dependencies/database.py b/services/catalog/src/simcore_service_catalog/api/dependencies/database.py deleted file mode 100644 index ed12ca5afa1..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/dependencies/database.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging -from typing import AsyncGenerator, Callable - -from fastapi import Depends -from fastapi.requests import Request -from sqlalchemy.ext.asyncio import AsyncEngine - -from ...db.repositories._base import BaseRepository - -logger = logging.getLogger(__name__) - - -def _get_db_engine(request: Request) -> AsyncEngine: - return request.app.state.engine - - -def get_repository(repo_type: type[BaseRepository]) -> Callable: - async def _get_repo( - engine: AsyncEngine = Depends(_get_db_engine), - ) -> AsyncGenerator[BaseRepository, None]: - # NOTE: 2 different ideas were tried here with not so good - # 1st one was acquiring a connection per repository which lead to the following issue https://github.com/ITISFoundation/osparc-simcore/pull/1966 - # 2nd one was acquiring a connection per request which works but blocks the director-v2 responsiveness once - # the max amount of connections is reached - # now the current solution is to connect connection when needed. - yield repo_type(db_engine=engine) - - return _get_repo diff --git a/services/catalog/src/simcore_service_catalog/api/dependencies/director.py b/services/catalog/src/simcore_service_catalog/api/dependencies/director.py deleted file mode 100644 index f55b55ac68e..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/dependencies/director.py +++ /dev/null @@ -1,15 +0,0 @@ -from fastapi import Depends, FastAPI -from fastapi.requests import Request - -from ...services.director import DirectorApi - - -def _get_app(request: Request) -> FastAPI: - return request.app - - -def get_director_api( - app: FastAPI = Depends(_get_app), -) -> DirectorApi: - director: DirectorApi = app.state.director_api - return director diff --git a/services/catalog/src/simcore_service_catalog/api/dependencies/services.py b/services/catalog/src/simcore_service_catalog/api/dependencies/services.py deleted file mode 100644 index 02bb97fee65..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/dependencies/services.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -import urllib.parse -from dataclasses import dataclass -from typing import Any, cast - -from fastapi import Depends, Header, HTTPException, status -from fastapi.requests import Request -from models_library.services import ServiceKey, ServiceVersion -from models_library.services_resources import ResourcesDict -from pydantic import ValidationError - -from ...core.settings import AppSettings -from ...db.repositories.groups import GroupsRepository -from ...db.repositories.services import ServicesRepository -from ...models.schemas.services import ServiceGet -from ...models.schemas.services_specifications import ServiceSpecifications -from ...services.director import DirectorApi -from ...services.function_services import get_function_service, is_function_service -from .database import get_repository -from .director import get_director_api - - -def get_default_service_resources(request: Request) -> ResourcesDict: - app_settings: AppSettings = request.app.state.settings - return app_settings.CATALOG_SERVICES_DEFAULT_RESOURCES - - -def get_default_service_specifications(request: Request) -> ServiceSpecifications: - app_settings: AppSettings = request.app.state.settings - return app_settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - - -@dataclass(frozen=True) -class AccessInfo: - uid: int - gid: list[int] - product: str - - -async def check_service_read_access( - user_id: int, - service_key: ServiceKey, - service_version: ServiceVersion, - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - x_simcore_products_name: str = Header(None), -) -> AccessInfo: - - # get the user's groups - user_groups = await groups_repository.list_user_groups(user_id) - if not user_groups: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to access the service", - ) - - # check the user has access to this service and to which extent - if not await services_repo.get_service( - service_key, - service_version, - gids=[group.gid for group in user_groups], - product_name=x_simcore_products_name, - ): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="Cannot access this service. It is either not published or not exposed to this user.", - ) - - return AccessInfo( - uid=user_id, - gid=[group.gid for group in user_groups], - product=x_simcore_products_name, - ) - - -logger = logging.getLogger(__name__) - - -async def get_service_from_registry( - service_key: ServiceKey, - service_version: ServiceVersion, - director_client: DirectorApi = Depends(get_director_api), -) -> ServiceGet: - """ - Retrieves service metadata from the docker registry via the director - """ - try: - if is_function_service(service_key): - frontend_service: dict[str, Any] = get_function_service( - key=service_key, version=service_version - ) - _service_data = frontend_service - else: - # NOTE: raises HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) on ANY failure - services_in_registry = cast( - list[Any], - await director_client.get( - f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}" - ), - ) - _service_data = services_in_registry[0] - - service: ServiceGet = ServiceGet.parse_obj(_service_data) - return service - - except ValidationError as exc: - logger.warning( - "Invalid service metadata in registry. Audit registry data for %s %s", - f"{service_key=}", - f"{service_version=}", - ) - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Service {service_key}:{service_version} not found", - ) from exc diff --git a/services/catalog/src/simcore_service_catalog/api/dependencies/user_groups.py b/services/catalog/src/simcore_service_catalog/api/dependencies/user_groups.py deleted file mode 100644 index be13c8a58ac..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/dependencies/user_groups.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Optional - -from fastapi import Depends, Query -from models_library.users import UserID - -from ...db.repositories.groups import GroupsRepository -from ...models.domain.group import GroupAtDB -from .database import get_repository - - -async def list_user_groups( - user_id: Optional[UserID] = Query( - default=None, - description="if passed, and that user has custom resources, " - "they will be merged with default resources and returned.", - ), - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), -) -> list[GroupAtDB]: - return await groups_repository.list_user_groups(user_id) if user_id else [] diff --git a/services/catalog/src/simcore_service_catalog/api/errors/http_error.py b/services/catalog/src/simcore_service_catalog/api/errors/http_error.py deleted file mode 100644 index cd6466b7033..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/errors/http_error.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Callable, Type - -from fastapi import HTTPException -from fastapi.encoders import jsonable_encoder -from starlette.requests import Request -from starlette.responses import JSONResponse - - -async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": [exc.detail]}), status_code=exc.status_code - ) - - -def make_http_error_handler_for_exception( - status_code: int, exception_cls: Type[BaseException] -) -> Callable[[Request, Type[BaseException]], JSONResponse]: - """ - Produces a handler for BaseException-type exceptions which converts them - into an error JSON response with a given status code - - SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions - """ - - async def _http_error_handler(_: Request, exc: Type[BaseException]) -> JSONResponse: - assert isinstance(exc, exception_cls) # nosec - return JSONResponse( - content=jsonable_encoder({"errors": [str(exc)]}), status_code=status_code - ) - - return _http_error_handler diff --git a/services/catalog/src/simcore_service_catalog/api/errors/validation_error.py b/services/catalog/src/simcore_service_catalog/api/errors/validation_error.py deleted file mode 100644 index fb70f6791ac..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/errors/validation_error.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Union - -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError -from fastapi.openapi.constants import REF_PREFIX -from fastapi.openapi.utils import validation_error_response_definition -from pydantic import ValidationError -from starlette.requests import Request -from starlette.responses import JSONResponse -from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY - - -async def http422_error_handler( - _: Request, - exc: Union[RequestValidationError, ValidationError], -) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": exc.errors()}), - status_code=HTTP_422_UNPROCESSABLE_ENTITY, - ) - - -validation_error_response_definition["properties"] = { - "errors": { - "title": "Validation errors", - "type": "array", - "items": {"$ref": f"{REF_PREFIX}ValidationError"}, - }, -} diff --git a/services/catalog/src/simcore_service_catalog/api/rest/__init__.py b/services/catalog/src/simcore_service_catalog/api/rest/__init__.py new file mode 100644 index 00000000000..0d506564106 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/__init__.py @@ -0,0 +1,10 @@ +from fastapi import FastAPI + +from .errors import setup_rest_api_error_handlers +from .routes import setup_rest_api_routes + + +def initialize_rest_api(app: FastAPI): + + setup_rest_api_routes(app) + setup_rest_api_error_handlers(app) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_health.py b/services/catalog/src/simcore_service_catalog/api/rest/_health.py new file mode 100644 index 00000000000..a4360dff292 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_health.py @@ -0,0 +1,10 @@ +import datetime + +from fastapi import APIRouter + +router = APIRouter() + + +@router.get("/") +async def check_service_health(): + return f"{__name__}@{datetime.datetime.now(tz=datetime.UTC).isoformat()}" diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_meta.py b/services/catalog/src/simcore_service_catalog/api/rest/_meta.py new file mode 100644 index 00000000000..e78a06ddb61 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_meta.py @@ -0,0 +1,15 @@ +from fastapi import APIRouter +from models_library.api_schemas__common.meta import BaseMeta + +from ..._meta import API_VERSION, API_VTAG + +router = APIRouter() + + +@router.get("", response_model=BaseMeta) +async def get_service_metadata(): + return BaseMeta( + name=__name__.split(".")[0], + version=API_VERSION, + released={API_VTAG: API_VERSION}, + ) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services.py b/services/catalog/src/simcore_service_catalog/api/rest/_services.py new file mode 100644 index 00000000000..55108b8c2c6 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services.py @@ -0,0 +1,270 @@ +# pylint: disable=too-many-arguments + +import asyncio +import logging +from typing import Annotated, Any, TypeAlias, cast + +from aiocache import cached # type: ignore[import-untyped] +from fastapi import APIRouter, Depends, Header, HTTPException, status +from models_library.api_schemas_catalog.services import ServiceGet +from models_library.services import ServiceType +from models_library.services_authoring import Author +from models_library.services_metadata_published import ServiceMetaDataPublished +from pydantic import ValidationError +from pydantic.types import PositiveInt +from servicelib.fastapi.requests_decorators import cancel_on_disconnect +from starlette.requests import Request + +from ..._constants import ( + DIRECTOR_CACHING_TTL, + LIST_SERVICES_CACHING_TTL, + RESPONSE_MODEL_POLICY, +) +from ...clients.director import DirectorClient +from ...models.services_db import ServiceAccessRightsDB, ServiceMetaDataDBGet +from ...repository.groups import GroupsRepository +from ...repository.services import ServicesRepository +from .._dependencies.director import get_director_client +from .._dependencies.repository import get_repository +from .._dependencies.services import get_service_from_manifest + +_logger = logging.getLogger(__name__) + +ServicesSelection: TypeAlias = set[tuple[str, str]] + + +def _compose_service_details( + service_in_registry: dict[str, Any], # published part + service_in_db: ServiceMetaDataDBGet, # editable part + service_access_rights_in_db: list[ServiceAccessRightsDB], + service_owner: str | None, +) -> ServiceGet | None: + # compose service from registry and DB + service = service_in_registry + service.update( + service_in_db.model_dump(exclude_unset=True, exclude={"owner"}), + access_rights={rights.gid: rights for rights in service_access_rights_in_db}, + owner=service_owner if service_owner else None, + ) + + # validate the service + try: + return ServiceGet(**service) + except ValidationError as exc: + _logger.warning( + "Could not validate service [%s:%s]: %s", + service.get("key"), + service.get("version"), + exc, + ) + return None + + +def _build_cache_key(fct, *_, **kwargs): + return f"{fct.__name__}_{kwargs['user_id']}_{kwargs['x_simcore_products_name']}_{kwargs['details']}" + + +router = APIRouter() + + +@router.get( + "", + response_model=list[ServiceGet], + **RESPONSE_MODEL_POLICY, + deprecated=True, + description="Use instead rpc._service.list_services_paginated -> PageRpcServicesGetV2", +) +@cancel_on_disconnect +@cached( + ttl=LIST_SERVICES_CACHING_TTL, + key_builder=_build_cache_key, + # NOTE: this call is pretty expensive and can be called several times + # (when e2e runs or by the webserver when listing projects) therefore + # a cache is setup here +) +async def list_services( + request: Request, # pylint:disable=unused-argument + *, + user_id: PositiveInt, + director_client: Annotated[DirectorClient, Depends(get_director_client)], + groups_repository: Annotated[ + GroupsRepository, Depends(get_repository(GroupsRepository)) + ], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + x_simcore_products_name: Annotated[str, Header(...)], + details: bool = True, +): + # Access layer + user_groups = await groups_repository.list_user_groups(user_id) + if not user_groups: + # deny access + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You have unsufficient rights to access the services", + ) + + # now get the executable or writable services + services_in_db = { + (s.key, s.version): s + for s in await services_repo.list_services( + gids=[group.gid for group in user_groups], + execute_access=True, + write_access=True, + combine_access_with_and=False, + product_name=x_simcore_products_name, + ) + } + # Non-detailed views from the services_repo database + if not details: + # only return a stripped down version + # NOTE: here validation is not necessary since key,version were already validated + # in terms of time, this takes the most + return [ + ServiceGet.model_construct( + key=key, + version=version, + name="nodetails", + description="nodetails", + type=ServiceType.COMPUTATIONAL, + authors=[ + Author.model_construct( + name="nodetails", email="nodetails@nodetails.com" + ) + ], + contact="nodetails@nodetails.com", + inputs={}, + outputs={}, + deprecated=services_in_db[(key, version)].deprecated, + classifiers=[], + owner=None, + ) + for key, version in services_in_db + ] + + # caching this steps brings down the time to generate it at the expense of being sometimes a bit out of date + @cached(ttl=DIRECTOR_CACHING_TTL) + async def cached_registry_services() -> dict[str, Any]: + return cast(dict[str, Any], await director_client.get("/services")) + + ( + services_in_registry, + services_access_rights, + services_owner_emails, + ) = await asyncio.gather( + cached_registry_services(), + services_repo.batch_get_services_access_rights( + key_versions=services_in_db, + product_name=x_simcore_products_name, + ), + groups_repository.list_user_emails_from_gids( + {s.owner for s in services_in_db.values() if s.owner} + ), + ) + + # NOTE: for the details of the services: + # 1. we get all the services from the director-v0 (TODO: move the registry to the catalog) + # 2. we filter the services using the visible ones from the db + # 3. then we compose the final service using as a base the registry service, overriding with the same + # service from the database, adding also the access rights and the owner as email address instead of gid + # NOTE: This step takes the bulk of the time to generate the list + services_details = await asyncio.gather( + *[ + asyncio.get_event_loop().run_in_executor( + None, + _compose_service_details, + s, + services_in_db[s["key"], s["version"]], + services_access_rights[s["key"], s["version"]], + services_owner_emails.get( + services_in_db[s["key"], s["version"]].owner or 0 + ), + ) + for s in ( + request.app.state.frontend_services_catalog + services_in_registry + ) + if (s.get("key"), s.get("version")) in services_in_db + ] + ) + return [s for s in services_details if s is not None] + + +@router.get( + "/{service_key:path}/{service_version}", + response_model=ServiceGet, + **RESPONSE_MODEL_POLICY, + deprecated=True, + description="Use instead rpc._service.get_service -> ServiceGetV2", +) +async def get_service( + user_id: int, + service_in_manifest: Annotated[ + ServiceMetaDataPublished, Depends(get_service_from_manifest) + ], + groups_repository: Annotated[ + GroupsRepository, Depends(get_repository(GroupsRepository)) + ], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + x_simcore_products_name: str = Header(None), +): + service_data: dict[str, Any] = {"owner": None} + + # get the user groups + user_groups = await groups_repository.list_user_groups(user_id) + if not user_groups: + # deny access + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You have unsufficient rights to access the service", + ) + # check the user has access to this service and to which extent + service_in_db = await services_repo.get_service( + service_in_manifest.key, + service_in_manifest.version, + gids=[group.gid for group in user_groups], + write_access=True, + product_name=x_simcore_products_name, + ) + if service_in_db: + # we have full access, let's add the access to the output + service_access_rights: list[ServiceAccessRightsDB] = ( + await services_repo.get_service_access_rights( + service_in_manifest.key, + service_in_manifest.version, + product_name=x_simcore_products_name, + ) + ) + service_data["access_rights"] = { + rights.gid: rights for rights in service_access_rights + } + else: + # check if we have executable rights + service_in_db = await services_repo.get_service( + service_in_manifest.key, + service_in_manifest.version, + gids=[group.gid for group in user_groups], + execute_access=True, + product_name=x_simcore_products_name, + ) + if not service_in_db: + # we have no access here + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You have insufficient rights to access the service", + ) + + # the owner shall be converted to an email address + if service_in_db.owner: + service_data["owner"] = await groups_repository.get_user_email_from_gid( + service_in_db.owner + ) + + # access is allowed, override some of the values with what is in the db + service_data.update( + service_in_manifest.model_dump(exclude_unset=True, by_alias=True) + | service_in_db.model_dump(exclude_unset=True, exclude={"owner"}) + ) + return service_data diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_access_rights.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_access_rights.py new file mode 100644 index 00000000000..2c35b5e5051 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_access_rights.py @@ -0,0 +1,52 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, Header +from models_library.api_schemas_catalog.service_access_rights import ( + ServiceAccessRightsGet, +) +from models_library.services import ServiceKey, ServiceVersion + +from ..._constants import RESPONSE_MODEL_POLICY +from ...repository.services import ServicesRepository +from .._dependencies.repository import get_repository +from .._dependencies.services import AccessInfo, check_service_read_access + +_logger = logging.getLogger(__name__) + +router = APIRouter() + + +@router.get( + "/{service_key:path}/{service_version}/accessRights", + response_model=ServiceAccessRightsGet, + description="Returns access rights information for provided service and product", + **RESPONSE_MODEL_POLICY, +) +async def get_service_access_rights( + service_key: ServiceKey, + service_version: ServiceVersion, + _user: Annotated[AccessInfo, Depends(check_service_read_access)], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + x_simcore_products_name: Annotated[str, Header(...)], +): + service_access_rights = await services_repo.get_service_access_rights( + key=service_key, + version=service_version, + product_name=x_simcore_products_name, + ) + + gids_with_access_rights = {} + for s in service_access_rights: + gids_with_access_rights[s.gid] = { + "execute_access": s.execute_access, + "write_access": s.write_access, + } + + return ServiceAccessRightsGet( + service_key=service_key, + service_version=service_version, + gids_with_access_rights=gids_with_access_rights, + ) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_extras.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_extras.py new file mode 100644 index 00000000000..94a70baf8cc --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_extras.py @@ -0,0 +1,23 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas_directorv2.services import ServiceExtras +from models_library.services import ServiceKey, ServiceVersion + +from ...clients.director import DirectorClient +from ...service import catalog_services +from .._dependencies.director import get_director_client + +router = APIRouter() + + +@router.get("/{service_key:path}/{service_version}/extras") +async def get_service_extras( + service_key: ServiceKey, + service_version: ServiceVersion, + director_client: Annotated[DirectorClient, Depends(get_director_client)], +) -> ServiceExtras: + + return await catalog_services.get_catalog_service_extras( + director_client, service_key, service_version + ) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_labels.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_labels.py new file mode 100644 index 00000000000..accd516f781 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_labels.py @@ -0,0 +1,18 @@ +from typing import Annotated, Any + +from fastapi import APIRouter, Depends +from models_library.services import ServiceKey, ServiceVersion + +from ...clients.director import DirectorClient +from .._dependencies.director import get_director_client + +router = APIRouter() + + +@router.get("/{service_key:path}/{service_version}/labels") +async def get_service_labels( + service_key: ServiceKey, + service_version: ServiceVersion, + director_client: Annotated[DirectorClient, Depends(get_director_client)], +) -> dict[str, Any]: + return await director_client.get_service_labels(service_key, service_version) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_ports.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_ports.py new file mode 100644 index 00000000000..5b6d55306a6 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_ports.py @@ -0,0 +1,48 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.services_metadata_published import ServiceMetaDataPublished + +from ..._constants import RESPONSE_MODEL_POLICY +from .._dependencies.services import ( + AccessInfo, + check_service_read_access, + get_service_from_manifest, +) + +_logger = logging.getLogger(__name__) + +router = APIRouter() + + +@router.get( + "/{service_key:path}/{service_version}/ports", + response_model=list[ServicePortGet], + description="Returns a list of service ports starting with inputs and followed by outputs", + **RESPONSE_MODEL_POLICY, +) +async def list_service_ports( + _user: Annotated[AccessInfo, Depends(check_service_read_access)], + service: Annotated[ServiceMetaDataPublished, Depends(get_service_from_manifest)], +): + ports: list[ServicePortGet] = [] + + if service.inputs: + for name, input_port in service.inputs.items(): + ports.append( + ServicePortGet.from_domain_model( + kind="input", key=name, port=input_port + ) + ) + + if service.outputs: + for name, output_port in service.outputs.items(): + ports.append( + ServicePortGet.from_domain_model( + kind="output", key=name, port=output_port + ) + ) + + return ports diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_resources.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_resources.py new file mode 100644 index 00000000000..d5c5156feb7 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_resources.py @@ -0,0 +1,285 @@ +import logging +import urllib.parse +from copy import deepcopy +from typing import Annotated, Any, Final + +import yaml +from fastapi import APIRouter, Depends, HTTPException, status +from models_library.docker import DockerGenericTag +from models_library.groups import GroupAtDB +from models_library.service_settings_labels import ( + ComposeSpecLabelDict, + SimcoreServiceSettingLabelEntry, +) +from models_library.services import ServiceKey, ServiceVersion +from models_library.services_resources import ( + BootMode, + ImageResources, + ResourcesDict, + ServiceResourcesDict, + ServiceResourcesDictHelpers, +) +from models_library.utils.docker_compose import replace_env_vars_in_compose_spec +from pydantic import TypeAdapter + +from ..._constants import RESPONSE_MODEL_POLICY, SIMCORE_SERVICE_SETTINGS_LABELS +from ...clients.director import DirectorClient +from ...repository.services import ServicesRepository +from ...service.function_services import is_function_service +from ...utils.service_resources import ( + merge_service_resources_with_user_specs, + parse_generic_resource, +) +from .._dependencies.director import get_director_client +from .._dependencies.repository import get_repository +from .._dependencies.services import get_default_service_resources +from .._dependencies.user_groups import list_user_groups + +router = APIRouter() +_logger = logging.getLogger(__name__) + +SIMCORE_SERVICE_COMPOSE_SPEC_LABEL: Final[str] = "simcore.service.compose-spec" +_DEPRECATED_RESOURCES: Final[list[str]] = ["MPI"] +_BOOT_MODE_TO_RESOURCE_NAME_MAP: Final[dict[str, str]] = {"MPI": "MPI", "GPU": "VRAM"} + + +def _compute_service_available_boot_modes( + settings: list[SimcoreServiceSettingLabelEntry], + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[BootMode]: + """returns the service boot-modes. + currently this uses the simcore.service.settings labels if available for backwards compatiblity. + if MPI is found, then boot mode is set to MPI, if GPU is found then boot mode is set to GPU, else to CPU. + In the future a dedicated label might be used, to add openMP for example. and to not abuse the resources of a service. + Also these will be used in a project to allow the user to choose among different boot modes + """ + + resource_entries = filter(lambda entry: entry.name.lower() == "resources", settings) + generic_resources: ResourcesDict = {} + for entry in resource_entries: + if not isinstance(entry.value, dict): + _logger.warning( + "resource %s for %s got invalid type", + f"{entry.model_dump()!r}", + f"{service_key}:{service_version}", + ) + continue + generic_resources |= parse_generic_resource( + entry.value.get("Reservations", {}).get("GenericResources", []), + ) + # currently these are unique boot modes + for mode in BootMode: + if ( + _BOOT_MODE_TO_RESOURCE_NAME_MAP.get(mode.value, mode.value) + in generic_resources + ): + return [mode] + + return [BootMode.CPU] + + +def _remove_deprecated_resources(resources: ResourcesDict) -> ResourcesDict: + for res_name in _DEPRECATED_RESOURCES: + resources.pop(res_name, None) + return resources + + +def _resources_from_settings( + settings: list[SimcoreServiceSettingLabelEntry], + default_service_resources: ResourcesDict, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> ResourcesDict: + # filter resource entries + resource_entries = filter(lambda entry: entry.name.lower() == "resources", settings) + # get the service resources + service_resources = deepcopy(default_service_resources) + for entry in resource_entries: + if not isinstance(entry.value, dict): + _logger.warning( + "resource %s for %s got invalid type", + f"{entry.model_dump()!r}", + f"{service_key}:{service_version}", + ) + continue + if nano_cpu_limit := entry.value.get("Limits", {}).get("NanoCPUs"): + service_resources["CPU"].limit = nano_cpu_limit / 1.0e09 + if nano_cpu_reservation := entry.value.get("Reservations", {}).get("NanoCPUs"): + # NOTE: if the limit was below, it needs to be increased as well + service_resources["CPU"].limit = max( + service_resources["CPU"].limit, nano_cpu_reservation / 1.0e09 + ) + service_resources["CPU"].reservation = nano_cpu_reservation / 1.0e09 + if ram_limit := entry.value.get("Limits", {}).get("MemoryBytes"): + service_resources["RAM"].limit = ram_limit + if ram_reservation := entry.value.get("Reservations", {}).get("MemoryBytes"): + # NOTE: if the limit was below, it needs to be increased as well + service_resources["RAM"].limit = max( + service_resources["RAM"].limit, ram_reservation + ) + service_resources["RAM"].reservation = ram_reservation + + service_resources |= parse_generic_resource( + entry.value.get("Reservations", {}).get("GenericResources", []), + ) + + return _remove_deprecated_resources(service_resources) + + +async def _get_service_labels( + director_client: DirectorClient, key: ServiceKey, version: ServiceVersion +) -> dict[str, Any] | None: + try: + service_labels = await director_client.get_service_labels(key, version) + _logger.debug( + "received for %s %s", + f"/services/{urllib.parse.quote_plus(key)}/{version}/labels", + f"{service_labels=}", + ) + return service_labels + except HTTPException as err: + # NOTE: some services will fail validation, eg: + # `busybox:latest` or `traefik:latest` because + # the director-v0 cannot extract labels from them + # and will fail validating the key or the version + if err.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY: + return None + raise + + +def _get_service_settings( + labels: dict[str, Any], +) -> list[SimcoreServiceSettingLabelEntry]: + service_settings = TypeAdapter(list[SimcoreServiceSettingLabelEntry]).validate_json( + labels.get(SIMCORE_SERVICE_SETTINGS_LABELS, "[]"), + ) + _logger.debug("received %s", f"{service_settings=}") + return service_settings + + +@router.get( + "/{service_key:path}/{service_version}/resources", + response_model=ServiceResourcesDict, + **RESPONSE_MODEL_POLICY, +) +async def get_service_resources( + service_key: ServiceKey, + service_version: ServiceVersion, + director_client: Annotated[DirectorClient, Depends(get_director_client)], + default_service_resources: Annotated[ + ResourcesDict, Depends(get_default_service_resources) + ], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + user_groups: Annotated[list[GroupAtDB], Depends(list_user_groups)], +) -> ServiceResourcesDict: + image_version = TypeAdapter(DockerGenericTag).validate_python( + f"{service_key}:{service_version}" + ) + if is_function_service(service_key): + return ServiceResourcesDictHelpers.create_from_single_service( + image_version, default_service_resources + ) + + service_labels: dict[str, Any] | None = await _get_service_labels( + director_client, service_key, service_version + ) + + if not service_labels: + return ServiceResourcesDictHelpers.create_from_single_service( + image_version, default_service_resources + ) + + service_spec: ComposeSpecLabelDict | None = TypeAdapter( + ComposeSpecLabelDict | None + ).validate_json(service_labels.get(SIMCORE_SERVICE_COMPOSE_SPEC_LABEL, "null")) + _logger.debug("received %s", f"{service_spec=}") + + if service_spec is None: + # no compose specifications -> single service + service_settings = _get_service_settings(service_labels) + service_resources = _resources_from_settings( + service_settings, default_service_resources, service_key, service_version + ) + service_boot_modes = _compute_service_available_boot_modes( + service_settings, service_key, service_version + ) + + user_specific_service_specs = await services_repo.get_service_specifications( + service_key, + service_version, + tuple(user_groups), + allow_use_latest_service_version=True, + ) + if user_specific_service_specs and user_specific_service_specs.service: + service_resources = merge_service_resources_with_user_specs( + service_resources, user_specific_service_specs.service + ) + + return ServiceResourcesDictHelpers.create_from_single_service( + image_version, service_resources, service_boot_modes + ) + + # compose specifications available, potentially multiple services + stringified_service_spec = replace_env_vars_in_compose_spec( + service_spec=service_spec, + replace_simcore_registry="", + replace_service_version=service_version, + ) + full_service_spec: ComposeSpecLabelDict = yaml.safe_load(stringified_service_spec) + + service_to_resources: ServiceResourcesDict = TypeAdapter( + ServiceResourcesDict + ).validate_python({}) + + for spec_key, spec_data in full_service_spec["services"].items(): + # image can be: + # - `/simcore/service/dynamic/service-name:0.0.1` + # - `traefik:0.0.1` + # leading slashes must be stripped + image = spec_data["image"].lstrip("/") + key, version = image.split(":") + spec_service_labels: dict[str, Any] | None = await _get_service_labels( + director_client, key, version + ) + + spec_service_resources: ResourcesDict + + if not spec_service_labels: + spec_service_resources = default_service_resources + service_boot_modes = [BootMode.CPU] + else: + spec_service_settings = _get_service_settings(spec_service_labels) + spec_service_resources = _resources_from_settings( + spec_service_settings, + default_service_resources, + service_key, + service_version, + ) + service_boot_modes = _compute_service_available_boot_modes( + spec_service_settings, service_key, service_version + ) + user_specific_service_specs = ( + await services_repo.get_service_specifications( + key, + version, + tuple(user_groups), + allow_use_latest_service_version=True, + ) + ) + if user_specific_service_specs and user_specific_service_specs.service: + spec_service_resources = merge_service_resources_with_user_specs( + spec_service_resources, user_specific_service_specs.service + ) + + service_to_resources[spec_key] = ImageResources.model_validate( + { + "image": image, + "resources": spec_service_resources, + "boot_modes": service_boot_modes, + } + ) + + return service_to_resources diff --git a/services/catalog/src/simcore_service_catalog/api/rest/_services_specifications.py b/services/catalog/src/simcore_service_catalog/api/rest/_services_specifications.py new file mode 100644 index 00000000000..7f937a2b0df --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/_services_specifications.py @@ -0,0 +1,76 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, + ServiceSpecificationsGet, +) +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID + +from ..._constants import RESPONSE_MODEL_POLICY +from ...repository.groups import GroupsRepository +from ...repository.services import ServicesRepository +from ...service.function_services import is_function_service +from .._dependencies.repository import get_repository +from .._dependencies.services import get_default_service_specifications + +router = APIRouter() +_logger = logging.getLogger(__name__) + + +@router.get( + "/{service_key:path}/{service_version}/specifications", + response_model=ServiceSpecificationsGet, + **RESPONSE_MODEL_POLICY, +) +async def get_service_specifications( + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + groups_repository: Annotated[ + GroupsRepository, Depends(get_repository(GroupsRepository)) + ], + services_repo: Annotated[ + ServicesRepository, Depends(get_repository(ServicesRepository)) + ], + default_service_specifications: Annotated[ + ServiceSpecifications, Depends(get_default_service_specifications) + ], + *, + strict: Annotated[ + bool, + Query( + description="if True only the version specs will be retrieved, if False the latest version will be used instead", + ), + ] = False, +): + _logger.debug("getting specifications for '%s:%s'", service_key, service_version) + + if is_function_service(service_key): + # There is no specification for these, return empty specs + return ServiceSpecifications() + + # Access layer + user_groups = await groups_repository.list_user_groups(user_id) + if not user_groups: + # deny access, but this should not happen + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You have unsufficient rights to access the services", + ) + + service_specs = await services_repo.get_service_specifications( + service_key, + service_version, + tuple(user_groups), + allow_use_latest_service_version=not strict, + ) + + if not service_specs: + # nothing found, let's return the default then + service_specs = default_service_specifications.model_copy() + + _logger.debug("returning %s", f"{service_specs=}") + return service_specs diff --git a/services/catalog/src/simcore_service_catalog/api/rest/errors.py b/services/catalog/src/simcore_service_catalog/api/rest/errors.py new file mode 100644 index 00000000000..78189bdad91 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/errors.py @@ -0,0 +1,6 @@ +from fastapi import FastAPI +from servicelib.fastapi.http_error import set_app_default_http_error_handlers + + +def setup_rest_api_error_handlers(app: FastAPI): + set_app_default_http_error_handlers(app) diff --git a/services/catalog/src/simcore_service_catalog/api/rest/routes.py b/services/catalog/src/simcore_service_catalog/api/rest/routes.py new file mode 100644 index 00000000000..156fdb668f0 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rest/routes.py @@ -0,0 +1,84 @@ +from enum import Enum + +from fastapi import APIRouter, FastAPI + +from ..._meta import ( + API_VTAG, +) +from . import ( + _health, + _meta, + _services, + _services_access_rights, + _services_extras, + _services_labels, + _services_ports, + _services_resources, + _services_specifications, +) + +v0_router = APIRouter() + +# health +health_router = _health.router +v0_router.include_router( + _health.router, + tags=["diagnostics"], +) + +# meta +v0_router.include_router( + _meta.router, + tags=["meta"], + prefix="/meta", +) + +# services +_SERVICE_PREFIX = "/services" +_SERVICE_TAGS: list[str | Enum] = [ + "services", +] +v0_router.include_router( + _services_resources.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) +v0_router.include_router( + _services_labels.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) +v0_router.include_router( + _services_extras.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) +v0_router.include_router( + _services_specifications.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) +v0_router.include_router( + _services_ports.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) +v0_router.include_router( + _services_access_rights.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) + +# NOTE: that this router must come after resources/specifications/ports/access_rights +v0_router.include_router( + _services.router, + tags=_SERVICE_TAGS, + prefix=_SERVICE_PREFIX, +) + + +def setup_rest_api_routes(app: FastAPI): + # healthcheck at / and at /v0/ + app.include_router(health_router) + # api under /v* + app.include_router(v0_router, prefix=f"/{API_VTAG}") diff --git a/services/catalog/src/simcore_service_catalog/api/root.py b/services/catalog/src/simcore_service_catalog/api/root.py deleted file mode 100644 index 9b5200fd2c2..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/root.py +++ /dev/null @@ -1,35 +0,0 @@ -from fastapi import APIRouter - -from .routes import ( - dags, - health, - meta, - services, - services_ports, - services_resources, - services_specifications, -) - -router = APIRouter() -router.include_router(health.router) - -# API -router.include_router(meta.router, tags=["meta"], prefix="/meta") -router.include_router(dags.router, tags=["DAG"], prefix="/dags") - -SERVICE_PREFIX = "/services" -SERVICE_TAGS = [ - "services", -] - -router.include_router( - services_resources.router, tags=SERVICE_TAGS, prefix=SERVICE_PREFIX -) -router.include_router( - services_specifications.router, tags=SERVICE_TAGS, prefix=SERVICE_PREFIX -) - -router.include_router(services_ports.router, tags=SERVICE_TAGS, prefix=SERVICE_PREFIX) - -# NOTE: that this router must come after resources/specifications/ports -router.include_router(services.router, tags=SERVICE_TAGS, prefix=SERVICE_PREFIX) diff --git a/services/catalog/src/simcore_service_catalog/api/routes/dags.py b/services/catalog/src/simcore_service_catalog/api/routes/dags.py deleted file mode 100644 index cf22a9d9c81..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/dags.py +++ /dev/null @@ -1,133 +0,0 @@ -import logging -from typing import Optional - -from fastapi import APIRouter, Body, Depends, HTTPException, Query -from starlette.status import ( - HTTP_201_CREATED, - HTTP_204_NO_CONTENT, - HTTP_409_CONFLICT, - HTTP_501_NOT_IMPLEMENTED, -) - -from ...db.repositories.dags import DAGsRepository -from ...models.schemas.dag import DAGIn, DAGOut -from ..dependencies.database import get_repository - -router = APIRouter() -log = logging.getLogger(__name__) - - -@router.get("", response_model=list[DAGOut]) -async def list_dags( - page_token: Optional[str] = Query( - None, description="Requests a specific page of the list results" - ), - page_size: int = Query( - 0, ge=0, description="Maximum number of results to be returned by the server" - ), - order_by: Optional[str] = Query( - None, description="Sorts in ascending order comma-separated fields" - ), - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - - # List is suited to data from a single collection that is bounded in size and not cached - - # Applicable common patterns - # SEE pagination: https://cloud.google.com/apis/design/design_patterns#list_pagination - # SEE sorting https://cloud.google.com/apis/design/design_patterns#sorting_order - - # Applicable naming conventions - # TODO: filter: https://cloud.google.com/apis/design/naming_convention#list_filter_field - # SEE response: https://cloud.google.com/apis/design/naming_convention#list_response - log.debug("%s %s %s", page_token, page_size, order_by) - dags = await dags_repo.list_dags() - return dags - - -@router.get(":batchGet") -async def batch_get_dags(): - raise HTTPException( - status_code=HTTP_501_NOT_IMPLEMENTED, detail="Still not implemented" - ) - - -@router.get(":search") -async def search_dags(): - # A method that takes multiple resource IDs and returns an object for each of those IDs - # Alternative to List for fetching data that does not adhere to List semantics, such as services.search. - # https://cloud.google.com/apis/design/standard_methods#list - raise HTTPException( - status_code=HTTP_501_NOT_IMPLEMENTED, detail="Still not implemented" - ) - - -@router.get("/{dag_id}", response_model=DAGOut) -async def get_dag( - dag_id: int, - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - dag = await dags_repo.get_dag(dag_id) - return dag - - -@router.post( - "", - response_model=int, - status_code=HTTP_201_CREATED, - response_description="Successfully created", -) -async def create_dag( - dag: DAGIn = Body(...), - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - assert dag # nosec - - if dag.version == "0.0.0" and dag.key == "foo": - # client-assigned resouce name - raise HTTPException( - status_code=HTTP_409_CONFLICT, - detail=f"DAG {dag.key}:{dag.version} already exists", - ) - - # FIXME: conversion DAG (issue with workbench being json in orm and dict in schema) - dag_id = await dags_repo.create_dag(dag) - # TODO: no need to return since there is not extra info?, perhaps return - return dag_id - - -@router.patch("/{dag_id}", response_model=DAGOut) -async def udpate_dag( - dag_id: int, - dag: DAGIn = Body(None), - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - async with dags_repo.db_engine.begin(): - await dags_repo.update_dag(dag_id, dag) - updated_dag = await dags_repo.get_dag(dag_id) - - return updated_dag - - -@router.put("/{dag_id}", response_model=Optional[DAGOut]) -async def replace_dag( - dag_id: int, - dag: DAGIn = Body(...), - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - await dags_repo.replace_dag(dag_id, dag) - - -@router.delete( - "/{dag_id}", - status_code=HTTP_204_NO_CONTENT, - response_description="Successfully deleted", -) -async def delete_dag( - dag_id: int, - dags_repo: DAGsRepository = Depends(get_repository(DAGsRepository)), -): - # If the Delete method immediately removes the resource, it should return an empty response. - # If the Delete method initiates a long-running operation, it should return the long-running operation. - # If the Delete method only marks the resource as being deleted, it should return the updated resource. - await dags_repo.delete_dag(dag_id) diff --git a/services/catalog/src/simcore_service_catalog/api/routes/health.py b/services/catalog/src/simcore_service_catalog/api/routes/health.py deleted file mode 100644 index 39569335684..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/health.py +++ /dev/null @@ -1,10 +0,0 @@ -from datetime import datetime - -from fastapi import APIRouter - -router = APIRouter() - - -@router.get("/", include_in_schema=False) -async def check_service_health(): - return f"{__name__}@{datetime.utcnow().isoformat()}" diff --git a/services/catalog/src/simcore_service_catalog/api/routes/meta.py b/services/catalog/src/simcore_service_catalog/api/routes/meta.py deleted file mode 100644 index 5d984d4cd36..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/meta.py +++ /dev/null @@ -1,16 +0,0 @@ -from fastapi import APIRouter -from pydantic import parse_obj_as - -from ...meta import API_VERSION, API_VTAG -from ...models.schemas.meta import Meta, VersionStr - -router = APIRouter() - - -@router.get("", response_model=Meta) -async def get_service_metadata(): - return Meta( - name=__name__.split(".")[0], - version=parse_obj_as(VersionStr, API_VERSION), - released={API_VTAG: parse_obj_as(VersionStr, API_VERSION)}, - ) diff --git a/services/catalog/src/simcore_service_catalog/api/routes/services.py b/services/catalog/src/simcore_service_catalog/api/routes/services.py deleted file mode 100644 index 4def397a34b..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/services.py +++ /dev/null @@ -1,356 +0,0 @@ -# pylint: disable=too-many-arguments - -import asyncio -import logging -import urllib.parse -from typing import Any, Optional, cast - -from aiocache import cached -from fastapi import APIRouter, Depends, Header, HTTPException, status -from models_library.services import ServiceKey, ServiceType, ServiceVersion -from models_library.services_db import ServiceAccessRightsAtDB, ServiceMetaDataAtDB -from pydantic import ValidationError -from pydantic.types import PositiveInt -from starlette.requests import Request - -from ...db.repositories.groups import GroupsRepository -from ...db.repositories.services import ServicesRepository -from ...models.schemas.constants import ( - DIRECTOR_CACHING_TTL, - LIST_SERVICES_CACHING_TTL, - RESPONSE_MODEL_POLICY, -) -from ...models.schemas.services import ServiceGet, ServiceUpdate -from ...services.director import DirectorApi -from ...services.function_services import is_function_service -from ...utils.requests_decorators import cancellable_request -from ..dependencies.database import get_repository -from ..dependencies.director import get_director_api -from ..dependencies.services import get_service_from_registry - -logger = logging.getLogger(__name__) - -ServicesSelection = set[tuple[str, str]] - - -def _prepare_service_details( - service_in_registry: dict[str, Any], - service_in_db: ServiceMetaDataAtDB, - service_access_rights_in_db: list[ServiceAccessRightsAtDB], - service_owner: Optional[str], -) -> Optional[ServiceGet]: - # compose service from registry and DB - composed_service = service_in_registry - composed_service.update( - service_in_db.dict(exclude_unset=True, exclude={"owner"}), - access_rights={rights.gid: rights for rights in service_access_rights_in_db}, - owner=service_owner if service_owner else None, - ) - - # validate the service - validated_service = None - try: - validated_service = ServiceGet(**composed_service) - except ValidationError as exc: - logger.warning( - "could not validate service [%s:%s]: %s", - composed_service.get("key"), - composed_service.get("version"), - exc, - ) - return validated_service - - -def _build_cache_key(fct, *_, **kwargs): - return f"{fct.__name__}_{kwargs['user_id']}_{kwargs['x_simcore_products_name']}_{kwargs['details']}" - - -# -# Routes -# - -router = APIRouter() - - -# NOTE: this call is pretty expensive and can be called several times -# (when e2e runs or by the webserver when listing projects) therefore -# a cache is setup here -@router.get("", response_model=list[ServiceGet], **RESPONSE_MODEL_POLICY) -@cancellable_request -@cached( - ttl=LIST_SERVICES_CACHING_TTL, - key_builder=_build_cache_key, -) -async def list_services( - request: Request, # pylint:disable=unused-argument - user_id: PositiveInt, - details: Optional[bool] = True, - director_client: DirectorApi = Depends(get_director_api), - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - x_simcore_products_name: str = Header(...), -): - # Access layer - user_groups = await groups_repository.list_user_groups(user_id) - if not user_groups: - # deny access - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to access the services", - ) - - # now get the executable or writable services - services_in_db = { - (s.key, s.version): s - for s in await services_repo.list_services( - gids=[group.gid for group in user_groups], - execute_access=True, - write_access=True, - combine_access_with_and=False, - product_name=x_simcore_products_name, - ) - } - # Non-detailed views from the services_repo database - if not details: - # only return a stripped down version - # FIXME: add name, ddescription, type, etc... - # NOTE: here validation is not necessary since key,version were already validated - # in terms of time, this takes the most - services_overview = [ - ServiceGet.construct( - key=key, - version=version, - name="nodetails", - description="nodetails", - type=ServiceType.COMPUTATIONAL, - authors=[{"name": "nodetails", "email": "nodetails@nodetails.com"}], - contact="nodetails@nodetails.com", - inputs={}, - outputs={}, - deprecated=services_in_db[(key, version)].deprecated, - ) - for key, version in services_in_db - ] - return services_overview - - # caching this steps brings down the time to generate it at the expense of being sometimes a bit out of date - @cached(ttl=DIRECTOR_CACHING_TTL) - async def cached_registry_services() -> dict[str, Any]: - return cast(dict[str, Any], await director_client.get("/services")) - - ( - services_in_registry, - services_access_rights, - services_owner_emails, - ) = await asyncio.gather( - cached_registry_services(), - services_repo.list_services_access_rights( - key_versions=services_in_db, - product_name=x_simcore_products_name, - ), - groups_repository.list_user_emails_from_gids( - {s.owner for s in services_in_db.values() if s.owner} - ), - ) - - # NOTE: for the details of the services: - # 1. we get all the services from the director-v0 (TODO: move the registry to the catalog) - # 2. we filter the services using the visible ones from the db - # 3. then we compose the final service using as a base the registry service, overriding with the same - # service from the database, adding also the access rights and the owner as email address instead of gid - # NOTE: This step takes the bulk of the time to generate the list - services_details = await asyncio.gather( - *[ - asyncio.get_event_loop().run_in_executor( - None, - _prepare_service_details, - s, - services_in_db[s["key"], s["version"]], - services_access_rights[s["key"], s["version"]], - services_owner_emails.get( - services_in_db[s["key"], s["version"]].owner or 0 - ), - ) - for s in ( - request.app.state.frontend_services_catalog + services_in_registry - ) - if (s.get("key"), s.get("version")) in services_in_db - ] - ) - return [s for s in services_details if s is not None] - - -@router.get( - "/{service_key:path}/{service_version}", - response_model=ServiceGet, - **RESPONSE_MODEL_POLICY, -) -async def get_service( - user_id: int, - service: ServiceGet = Depends(get_service_from_registry), - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - x_simcore_products_name: str = Header(None), -): - # get the user groups - user_groups = await groups_repository.list_user_groups(user_id) - if not user_groups: - # deny access - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to access the service", - ) - # check the user has access to this service and to which extent - service_in_db = await services_repo.get_service( - service.key, - service.version, - gids=[group.gid for group in user_groups], - write_access=True, - product_name=x_simcore_products_name, - ) - if service_in_db: - # we have full access, let's add the access to the output - service_access_rights: list[ - ServiceAccessRightsAtDB - ] = await services_repo.get_service_access_rights( - service.key, service.version, product_name=x_simcore_products_name - ) - service.access_rights = {rights.gid: rights for rights in service_access_rights} - else: - # check if we have executable rights - service_in_db = await services_repo.get_service( - service.key, - service.version, - gids=[group.gid for group in user_groups], - execute_access=True, - product_name=x_simcore_products_name, - ) - if not service_in_db: - # we have no access here - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have insufficient rights to access the service", - ) - # access is allowed, override some of the values with what is in the db - service = service.copy( - update=service_in_db.dict(exclude_unset=True, exclude={"owner"}) - ) - # the owner shall be converted to an email address - if service_in_db.owner: - service.owner = await groups_repository.get_user_email_from_gid( - service_in_db.owner - ) - - return service - - -@router.patch( - "/{service_key:path}/{service_version}", - response_model=ServiceGet, - **RESPONSE_MODEL_POLICY, -) -async def update_service( - # pylint: disable=too-many-arguments - user_id: int, - service_key: ServiceKey, - service_version: ServiceVersion, - updated_service: ServiceUpdate, - director_client: DirectorApi = Depends(get_director_api), - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - x_simcore_products_name: str = Header(None), -): - if is_function_service(service_key): - # NOTE: this is a temporary decision after discussing with OM - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="Cannot update front-end services", - ) - - # check the service exists - await director_client.get( - f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}" - ) - # the director client already raises an exception if not found - - # get the user groups - user_groups = await groups_repository.list_user_groups(user_id) - if not user_groups: - # deny access - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to access the service", - ) - # check the user has write access to this service - writable_service = await services_repo.get_service( - service_key, - service_version, - gids=[group.gid for group in user_groups], - write_access=True, - product_name=x_simcore_products_name, - ) - if not writable_service: - # deny access - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to modify the service", - ) - - # let's modify the service then - await services_repo.update_service( - ServiceMetaDataAtDB( - key=service_key, - version=service_version, - **updated_service.dict(exclude_unset=True), - ) - ) - # let's modify the service access rights (they can be added/removed/modified) - current_gids_in_db = [ - r.gid - for r in await services_repo.get_service_access_rights( - service_key, service_version, product_name=x_simcore_products_name - ) - ] - - if updated_service.access_rights: - # start by updating/inserting new entries - new_access_rights = [ - ServiceAccessRightsAtDB( - key=service_key, - version=service_version, - gid=gid, - execute_access=rights.execute_access, - write_access=rights.write_access, - product_name=x_simcore_products_name, - ) - for gid, rights in updated_service.access_rights.items() - ] - await services_repo.upsert_service_access_rights(new_access_rights) - - # then delete the ones that were removed - removed_gids = [ - gid - for gid in current_gids_in_db - if gid not in updated_service.access_rights - ] - deleted_access_rights = [ - ServiceAccessRightsAtDB( - key=service_key, - version=service_version, - gid=gid, - product_name=x_simcore_products_name, - ) - for gid in removed_gids - ] - await services_repo.delete_service_access_rights(deleted_access_rights) - - # now return the service - return await get_service( - user_id=user_id, - service=await get_service_from_registry( - service_key, service_version, director_client - ), - groups_repository=groups_repository, - services_repo=services_repo, - x_simcore_products_name=x_simcore_products_name, - ) diff --git a/services/catalog/src/simcore_service_catalog/api/routes/services_ports.py b/services/catalog/src/simcore_service_catalog/api/routes/services_ports.py deleted file mode 100644 index de6148dc5e6..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/services_ports.py +++ /dev/null @@ -1,44 +0,0 @@ -import logging - -from fastapi import APIRouter, Depends - -from ...models.schemas.constants import RESPONSE_MODEL_POLICY -from ...models.schemas.services import ServiceGet -from ...models.schemas.services_ports import ServicePortGet -from ..dependencies.services import ( - AccessInfo, - check_service_read_access, - get_service_from_registry, -) - -logger = logging.getLogger(__name__) - - -# -# Routes ----------------------------------------------------------------------------------------------- -# - -router = APIRouter() - - -@router.get( - "/{service_key:path}/{service_version}/ports", - response_model=list[ServicePortGet], - description="Returns a list of service ports starting with inputs and followed by outputs", - **RESPONSE_MODEL_POLICY, -) -async def list_service_ports( - _user: AccessInfo = Depends(check_service_read_access), - service: ServiceGet = Depends(get_service_from_registry), -): - ports: list[ServicePortGet] = [] - - if service.inputs: - for name, port in service.inputs.items(): - ports.append(ServicePortGet.from_service_io("input", name, port)) - - if service.outputs: - for name, port in service.outputs.items(): - ports.append(ServicePortGet.from_service_io("output", name, port)) - - return ports diff --git a/services/catalog/src/simcore_service_catalog/api/routes/services_resources.py b/services/catalog/src/simcore_service_catalog/api/routes/services_resources.py deleted file mode 100644 index 8f5c1051bf2..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/services_resources.py +++ /dev/null @@ -1,285 +0,0 @@ -import logging -import urllib.parse -from copy import deepcopy -from typing import Any, Final, Optional, cast - -import yaml -from fastapi import APIRouter, Depends, HTTPException, status -from models_library.docker import DockerGenericTag -from models_library.service_settings_labels import ( - ComposeSpecLabel, - SimcoreServiceSettingLabelEntry, -) -from models_library.services import ServiceKey, ServiceVersion -from models_library.services_resources import ( - BootMode, - ImageResources, - ResourcesDict, - ServiceResourcesDict, - ServiceResourcesDictHelpers, -) -from models_library.utils.docker_compose import replace_env_vars_in_compose_spec -from pydantic import parse_obj_as, parse_raw_as - -from ...db.repositories.services import ServicesRepository -from ...models.domain.group import GroupAtDB -from ...models.schemas.constants import ( - RESPONSE_MODEL_POLICY, - SIMCORE_SERVICE_SETTINGS_LABELS, -) -from ...services.director import DirectorApi -from ...services.function_services import is_function_service -from ...utils.service_resources import ( - merge_service_resources_with_user_specs, - parse_generic_resource, -) -from ..dependencies.database import get_repository -from ..dependencies.director import get_director_api -from ..dependencies.services import get_default_service_resources -from ..dependencies.user_groups import list_user_groups - -router = APIRouter() -logger = logging.getLogger(__name__) - -SIMCORE_SERVICE_COMPOSE_SPEC_LABEL: Final[str] = "simcore.service.compose-spec" -_DEPRECATED_RESOURCES: Final[list[str]] = ["MPI"] -_BOOT_MODE_TO_RESOURCE_NAME_MAP: Final[dict[str, str]] = {"MPI": "MPI", "GPU": "VRAM"} - - -def _compute_service_available_boot_modes( - settings: list[SimcoreServiceSettingLabelEntry], - service_key: ServiceKey, - service_version: ServiceVersion, -) -> list[BootMode]: - """returns the service boot-modes. - currently this uses the simcore.service.settings labels if available for backwards compatiblity. - if MPI is found, then boot mode is set to MPI, if GPU is found then boot mode is set to GPU, else to CPU. - In the future a dedicated label might be used, to add openMP for example. and to not abuse the resources of a service. - Also these will be used in a project to allow the user to choose among different boot modes - """ - - resource_entries = filter(lambda entry: entry.name.lower() == "resources", settings) - generic_resources = {} - for entry in resource_entries: - if not isinstance(entry.value, dict): - logger.warning( - "resource %s for %s got invalid type", - f"{entry.dict()!r}", - f"{service_key}:{service_version}", - ) - continue - generic_resources |= parse_generic_resource( - entry.value.get("Reservations", {}).get("GenericResources", []), - ) - # currently these are unique boot modes - for mode in BootMode: - if ( - _BOOT_MODE_TO_RESOURCE_NAME_MAP.get(mode.value, mode.value) - in generic_resources - ): - return [mode] - - return [BootMode.CPU] - - -def _remove_deprecated_resources(resources: ResourcesDict) -> ResourcesDict: - for res_name in _DEPRECATED_RESOURCES: - resources.pop(res_name, None) - return resources - - -def _resources_from_settings( - settings: list[SimcoreServiceSettingLabelEntry], - default_service_resources: ResourcesDict, - service_key: ServiceKey, - service_version: ServiceVersion, -) -> ResourcesDict: - # filter resource entries - resource_entries = filter(lambda entry: entry.name.lower() == "resources", settings) - # get the service resources - service_resources = deepcopy(default_service_resources) - for entry in resource_entries: - if not isinstance(entry.value, dict): - logger.warning( - "resource %s for %s got invalid type", - f"{entry.dict()!r}", - f"{service_key}:{service_version}", - ) - continue - if nano_cpu_limit := entry.value.get("Limits", {}).get("NanoCPUs"): - service_resources["CPU"].limit = nano_cpu_limit / 1.0e09 - if nano_cpu_reservation := entry.value.get("Reservations", {}).get("NanoCPUs"): - # NOTE: if the limit was below, it needs to be increased as well - service_resources["CPU"].limit = max( - service_resources["CPU"].limit, nano_cpu_reservation / 1.0e09 - ) - service_resources["CPU"].reservation = nano_cpu_reservation / 1.0e09 - if ram_limit := entry.value.get("Limits", {}).get("MemoryBytes"): - service_resources["RAM"].limit = ram_limit - if ram_reservation := entry.value.get("Reservations", {}).get("MemoryBytes"): - # NOTE: if the limit was below, it needs to be increased as well - service_resources["RAM"].limit = max( - service_resources["RAM"].limit, ram_reservation - ) - service_resources["RAM"].reservation = ram_reservation - - service_resources |= parse_generic_resource( - entry.value.get("Reservations", {}).get("GenericResources", []), - ) - - return _remove_deprecated_resources(service_resources) - - -async def _get_service_labels( - director_client: DirectorApi, key: ServiceKey, version: ServiceVersion -) -> Optional[dict[str, Any]]: - try: - service_labels = cast( - dict[str, Any], - await director_client.get( - f"/services/{urllib.parse.quote_plus(key)}/{version}/labels" - ), - ) - logger.debug( - "received for %s %s", - f"/services/{urllib.parse.quote_plus(key)}/{version}/labels", - f"{service_labels=}", - ) - return service_labels - except HTTPException as err: - # NOTE: some services will fail validation, eg: - # `busybox:latest` or `traefik:latest` because - # the director-v0 cannot extract labels from them - # and will fail validating the key or the version - if err.status_code == status.HTTP_400_BAD_REQUEST: - return None - raise err - - -def _get_service_settings( - labels: dict[str, Any] -) -> list[SimcoreServiceSettingLabelEntry]: - service_settings = parse_raw_as( - list[SimcoreServiceSettingLabelEntry], - labels.get(SIMCORE_SERVICE_SETTINGS_LABELS, "[]"), - ) - logger.debug("received %s", f"{service_settings=}") - return service_settings - - -@router.get( - "/{service_key:path}/{service_version}/resources", - response_model=ServiceResourcesDict, - **RESPONSE_MODEL_POLICY, -) -async def get_service_resources( - service_key: ServiceKey, - service_version: ServiceVersion, - director_client: DirectorApi = Depends(get_director_api), - default_service_resources: ResourcesDict = Depends(get_default_service_resources), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - user_groups: list[GroupAtDB] = Depends(list_user_groups), -) -> ServiceResourcesDict: - image_version = parse_obj_as(DockerGenericTag, f"{service_key}:{service_version}") - if is_function_service(service_key): - return ServiceResourcesDictHelpers.create_from_single_service( - image_version, default_service_resources - ) - - service_labels: Optional[dict[str, Any]] = await _get_service_labels( - director_client, service_key, service_version - ) - - if not service_labels: - return ServiceResourcesDictHelpers.create_from_single_service( - image_version, default_service_resources - ) - - service_spec: Optional[ComposeSpecLabel] = parse_raw_as( - Optional[ComposeSpecLabel], - service_labels.get(SIMCORE_SERVICE_COMPOSE_SPEC_LABEL, "null"), - ) - logger.debug("received %s", f"{service_spec=}") - - if service_spec is None: - # no compose specifications -> single service - service_settings = _get_service_settings(service_labels) - service_resources = _resources_from_settings( - service_settings, default_service_resources, service_key, service_version - ) - service_boot_modes = _compute_service_available_boot_modes( - service_settings, service_key, service_version - ) - - user_specific_service_specs = await services_repo.get_service_specifications( - service_key, - service_version, - tuple(user_groups), - allow_use_latest_service_version=True, - ) - if user_specific_service_specs and user_specific_service_specs.service: - service_resources = merge_service_resources_with_user_specs( - service_resources, user_specific_service_specs.service - ) - - return ServiceResourcesDictHelpers.create_from_single_service( - image_version, service_resources, service_boot_modes - ) - - # compose specifications available, potentially multiple services - stringified_service_spec = replace_env_vars_in_compose_spec( - service_spec=service_spec, - replace_simcore_registry="", - replace_service_version=service_version, - ) - full_service_spec: ComposeSpecLabel = yaml.safe_load(stringified_service_spec) - - service_to_resources: ServiceResourcesDict = parse_obj_as(ServiceResourcesDict, {}) - - for spec_key, spec_data in full_service_spec["services"].items(): - # image can be: - # - `/simcore/service/dynamic/service-name:0.0.1` - # - `traefik:0.0.1` - # leading slashes must be stripped - image = spec_data["image"].lstrip("/") - key, version = image.split(":") - spec_service_labels: Optional[dict[str, Any]] = await _get_service_labels( - director_client, key, version - ) - - if not spec_service_labels: - spec_service_resources: ResourcesDict = default_service_resources - service_boot_modes = [BootMode.CPU] - else: - spec_service_settings = _get_service_settings(spec_service_labels) - spec_service_resources: ResourcesDict = _resources_from_settings( - spec_service_settings, - default_service_resources, - service_key, - service_version, - ) - service_boot_modes = _compute_service_available_boot_modes( - spec_service_settings, service_key, service_version - ) - user_specific_service_specs = ( - await services_repo.get_service_specifications( - key, - version, - tuple(user_groups), - allow_use_latest_service_version=True, - ) - ) - if user_specific_service_specs and user_specific_service_specs.service: - spec_service_resources = merge_service_resources_with_user_specs( - spec_service_resources, user_specific_service_specs.service - ) - - service_to_resources[spec_key] = ImageResources.parse_obj( - { - "image": image, - "resources": spec_service_resources, - "boot_modes": service_boot_modes, - } - ) - - return service_to_resources diff --git a/services/catalog/src/simcore_service_catalog/api/routes/services_specifications.py b/services/catalog/src/simcore_service_catalog/api/routes/services_specifications.py deleted file mode 100644 index 9c04c8402c6..00000000000 --- a/services/catalog/src/simcore_service_catalog/api/routes/services_specifications.py +++ /dev/null @@ -1,68 +0,0 @@ -import logging - -from fastapi import APIRouter, Depends, HTTPException, Query, status -from models_library.services import ServiceKey, ServiceVersion -from models_library.users import UserID - -from ...db.repositories.groups import GroupsRepository -from ...db.repositories.services import ServicesRepository -from ...models.schemas.constants import RESPONSE_MODEL_POLICY -from ...models.schemas.services_specifications import ( - ServiceSpecifications, - ServiceSpecificationsGet, -) -from ...services.function_services import is_function_service -from ..dependencies.database import get_repository -from ..dependencies.services import get_default_service_specifications - -router = APIRouter() -logger = logging.getLogger(__name__) - - -@router.get( - "/{service_key:path}/{service_version}/specifications", - response_model=ServiceSpecificationsGet, - **RESPONSE_MODEL_POLICY, -) -async def get_service_specifications( - user_id: UserID, - service_key: ServiceKey, - service_version: ServiceVersion, - strict: bool = Query( - False, - description="if True only the version specs will be retrieved, if False the latest version will be used instead", - ), - groups_repository: GroupsRepository = Depends(get_repository(GroupsRepository)), - services_repo: ServicesRepository = Depends(get_repository(ServicesRepository)), - default_service_specifications: ServiceSpecifications = Depends( - get_default_service_specifications - ), -): - logger.debug("getting specifications for '%s:%s'", service_key, service_version) - - if is_function_service(service_key): - # There is no specification for these, return empty specs - return ServiceSpecifications() - - # Access layer - user_groups = await groups_repository.list_user_groups(user_id) - if not user_groups: - # deny access, but this should not happen - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You have unsufficient rights to access the services", - ) - - service_specs = await services_repo.get_service_specifications( - service_key, - service_version, - tuple(user_groups), - allow_use_latest_service_version=not strict, - ) - - if not service_specs: - # nothing found, let's return the default then - service_specs = default_service_specifications.copy() - - logger.debug("returning %s", f"{service_specs=}") - return service_specs diff --git a/services/catalog/src/simcore_service_catalog/api/rpc/__init__.py b/services/catalog/src/simcore_service_catalog/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/catalog/src/simcore_service_catalog/api/rpc/_services.py b/services/catalog/src/simcore_service_catalog/api/rpc/_services.py new file mode 100644 index 00000000000..73d5bc3e562 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rpc/_services.py @@ -0,0 +1,369 @@ +import functools +import logging +from typing import cast + +from fastapi import FastAPI +from models_library.api_schemas_catalog.services import ( + MyServiceGet, + PageRpcLatestServiceGet, + PageRpcServiceRelease, + PageRpcServiceSummary, + ServiceGetV2, + ServiceListFilters, + ServiceUpdateV2, +) +from models_library.api_schemas_catalog.services_ports import ServicePortGet +from models_library.products import ProductName +from models_library.rest_pagination import PageOffsetInt +from models_library.rpc_pagination import DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, PageLimitInt +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter, ValidationError, validate_call +from pyinstrument import Profiler +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.catalog.errors import ( + CatalogForbiddenError, + CatalogItemNotFoundError, +) + +from ...models.services_db import ServiceDBFilters +from ...repository.groups import GroupsRepository +from ...repository.services import ServicesRepository +from ...service import catalog_services +from .._dependencies.director import get_director_client + +_logger = logging.getLogger(__name__) + +router = RPCRouter() + + +def _profile_rpc_call(coro): + @functools.wraps(coro) + async def _wrapper(app: FastAPI, **kwargs): + profile_enabled = ( + (settings := getattr(app.state, "settings", None)) + and settings.CATALOG_PROFILING + and _logger.isEnabledFor(logging.INFO) + ) + if profile_enabled: + with Profiler() as profiler: + result = await coro(app, **kwargs) + profiler_output = profiler.output_text(unicode=True, color=False) + _logger.info("[PROFILING]: %s", profiler_output) + return result + + # bypasses w/o profiling + return await coro(app, **kwargs) + + return _wrapper + + +@router.expose(reraise_if_error_type=(CatalogForbiddenError, ValidationError)) +@_profile_rpc_call +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_services_paginated( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcLatestServiceGet: + assert app.state.engine # nosec + + total_count, items = await catalog_services.list_latest_catalog_services( + repo=ServicesRepository(app.state.engine), + director_api=get_director_client(app), + product_name=product_name, + user_id=user_id, + limit=limit, + offset=offset, + filters=TypeAdapter(ServiceDBFilters | None).validate_python( + filters, from_attributes=True + ), + ) + + assert len(items) <= total_count # nosec + assert len(items) <= limit # nosec + + return cast( + PageRpcLatestServiceGet, + PageRpcLatestServiceGet.create( + items, + total=total_count, + limit=limit, + offset=offset, + ), + ) + + +@router.expose( + reraise_if_error_type=( + CatalogItemNotFoundError, + CatalogForbiddenError, + ValidationError, + ) +) +@log_decorator(_logger, level=logging.DEBUG) +@_profile_rpc_call +@validate_call(config={"arbitrary_types_allowed": True}) +async def get_service( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> ServiceGetV2: + assert app.state.engine # nosec + + service = await catalog_services.get_catalog_service( + repo=ServicesRepository(app.state.engine), + director_api=get_director_client(app), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + + assert service.key == service_key # nosec + assert service.version == service_version # nosec + + return service + + +@router.expose( + reraise_if_error_type=( + CatalogItemNotFoundError, + CatalogForbiddenError, + ValidationError, + ) +) +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def update_service( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + update: ServiceUpdateV2, +) -> ServiceGetV2: + """Updates editable fields of a service""" + + assert app.state.engine # nosec + + service = await catalog_services.update_catalog_service( + repo=ServicesRepository(app.state.engine), + director_api=get_director_client(app), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + update=update, + ) + + assert service.key == service_key # nosec + assert service.version == service_version # nosec + + return service + + +@router.expose( + reraise_if_error_type=( + CatalogItemNotFoundError, + CatalogForbiddenError, + ValidationError, + ) +) +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def check_for_service( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> None: + """Checks whether service exists and can be accessed, otherwise it raise""" + assert app.state.engine # nosec + + await catalog_services.check_catalog_service_permissions( + repo=ServicesRepository(app.state.engine), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + permission="read", + ) + + +@router.expose(reraise_if_error_type=(CatalogForbiddenError, ValidationError)) +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def batch_get_my_services( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + ids: list[ + tuple[ + ServiceKey, + ServiceVersion, + ] + ], +) -> list[MyServiceGet]: + assert app.state.engine # nosec + + services_batch = await catalog_services.batch_get_user_services( + repo=ServicesRepository(app.state.engine), + groups_repo=GroupsRepository(app.state.engine), + product_name=product_name, + user_id=user_id, + ids=ids, + ) + + assert [(sv.key, sv.release.version) for sv in services_batch] == ids # nosec + + return services_batch + + +@router.expose(reraise_if_error_type=(ValidationError,)) +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_my_service_history_latest_first( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcServiceRelease: + """sorts service releases by version (latest first)""" + assert app.state.engine # nosec + + total_count, items = await catalog_services.list_user_service_release_history( + repo=ServicesRepository(app.state.engine), + product_name=product_name, + user_id=user_id, + service_key=service_key, + pagination_limit=limit, + pagination_offset=offset, + filters=TypeAdapter(ServiceDBFilters | None).validate_python( + filters, from_attributes=True + ), + ) + + assert len(items) <= total_count # nosec + assert len(items) <= limit # nosec + + return cast( + PageRpcServiceRelease, + PageRpcServiceRelease.create( + items, + total=total_count, + limit=limit, + offset=offset, + ), + ) + + +@router.expose( + reraise_if_error_type=( + CatalogItemNotFoundError, + CatalogForbiddenError, + ValidationError, + ) +) +@log_decorator(_logger, level=logging.DEBUG) +@validate_call(config={"arbitrary_types_allowed": True}) +async def get_service_ports( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[ServicePortGet]: + """Get service ports (inputs and outputs) for a specific service version""" + assert app.state.engine # nosec + + service_ports = await catalog_services.get_user_services_ports( + repo=ServicesRepository(app.state.engine), + director_api=get_director_client(app), + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + + return [ + ServicePortGet.from_domain_model( + kind=port.kind, + key=port.key, + port=port.port, + ) + for port in service_ports + ] + + +@router.expose(reraise_if_error_type=(CatalogForbiddenError, ValidationError)) +@_profile_rpc_call +@validate_call(config={"arbitrary_types_allowed": True}) +async def list_all_services_summaries_paginated( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt = DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + offset: PageOffsetInt = 0, + filters: ServiceListFilters | None = None, +) -> PageRpcServiceSummary: + """Lists all services with pagination, including all versions of each service. + + Returns a lightweight summary view of services for better performance compared to + full service details. This is useful for listings where complete details aren't needed. + + Args: + app: FastAPI application + product_name: Product name + user_id: User ID + limit: Maximum number of items to return + offset: Number of items to skip + filters: Optional filters to apply + + Returns: + Paginated list of all services as summaries + """ + assert app.state.engine # nosec + + total_count, items = await catalog_services.list_all_service_summaries( + repo=ServicesRepository(app.state.engine), + director_api=get_director_client(app), + product_name=product_name, + user_id=user_id, + limit=limit, + offset=offset, + filters=TypeAdapter(ServiceDBFilters | None).validate_python( + filters, from_attributes=True + ), + ) + + assert len(items) <= total_count # nosec + assert len(items) <= limit if limit is not None else True # nosec + + return cast( + PageRpcServiceSummary, + PageRpcServiceSummary.create( + items, + total=total_count, + limit=limit, + offset=offset, + ), + ) diff --git a/services/catalog/src/simcore_service_catalog/api/rpc/events.py b/services/catalog/src/simcore_service_catalog/api/rpc/events.py new file mode 100644 index 00000000000..dd8f879fb09 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/api/rpc/events.py @@ -0,0 +1,21 @@ +import logging +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from models_library.api_schemas_catalog import CATALOG_RPC_NAMESPACE + +from ...clients.rabbitmq import get_rabbitmq_rpc_server +from . import _services + +_logger = logging.getLogger(__name__) + + +async def rpc_api_lifespan(app: FastAPI) -> AsyncIterator[State]: + rpc_server = get_rabbitmq_rpc_server(app) + await rpc_server.register_router(_services.router, CATALOG_RPC_NAMESPACE, app) + try: + yield {} + finally: + # No specific cleanup required for now + pass diff --git a/services/catalog/src/simcore_service_catalog/cli.py b/services/catalog/src/simcore_service_catalog/cli.py new file mode 100644 index 00000000000..0d4fbf5107b --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/cli.py @@ -0,0 +1,95 @@ +import logging +import os + +import typer +from settings_library.http_client_request import ClientRequestSettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.utils_cli import ( + create_settings_command, + create_version_callback, + print_as_envfile, +) + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings, DirectorSettings + +_logger = logging.getLogger(__name__) + +# NOTE: 'main' variable is referred in the setup's entrypoint! +main = typer.Typer(name=PROJECT_NAME) + +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def run(): + """Runs application""" + typer.secho("Sorry, this entrypoint is intentionally disabled. Use instead") + typer.secho( + "$ uvicorn simcore_service_catalog.main:the_app", + fg=typer.colors.BLUE, + ) + + +@main.command() +def echo_dotenv(ctx: typer.Context, *, minimal: bool = True) -> None: + """Generates and displays a valid environment variables file (also known as dot-envfile) + + Usage: + $ simcore-service echo-dotenv > .env + $ cat .env + $ set -o allexport; source .env; set +o allexport + """ + assert ctx # nosec + + # NOTE: we normally DO NOT USE `os.environ` to capture env vars but this is a special case + # The idea here is to have a command that can generate a **valid** `.env` file that can be used + # to initialized the app. For that reason we fill required fields of the `ApplicationSettings` with + # "fake" but valid values (e.g. generating a password or adding tags as `replace-with-api-key). + # Nonetheless, if the caller of this CLI has already some **valid** env vars in the environment we want to use them ... + # and that is why we use `os.environ`. + + settings = ApplicationSettings.create_from_envs( + CATALOG_POSTGRES=os.environ.get( + "CATALOG_POSTGRES", + PostgresSettings.create_from_envs( + POSTGRES_HOST=os.environ.get( + "POSTGRES_HOST", "replace-with-postgres-host" + ), + POSTGRES_USER=os.environ.get( + "POSTGRES_USER", "replace-with-postgres-user" + ), + POSTGRES_DB=os.environ.get("POSTGRES_DB", "replace-with-postgres-db"), + POSTGRES_PASSWORD=os.environ.get( + "POSTGRES_PASSWORD", "replace-with-postgres-password" + ), + ), + ), + CATALOG_RABBITMQ=os.environ.get( + "CATALOG_RABBITMQ", + RabbitSettings.create_from_envs( + RABBIT_HOST=os.environ.get("RABBIT_HOST", "replace-with-rabbit-host"), + RABBIT_SECURE=os.environ.get("RABBIT_SECURE", "True"), + RABBIT_USER=os.environ.get("RABBIT_USER", "replace-with-rabbit-user"), + RABBIT_PASSWORD=os.environ.get( + "RABBIT_PASSWORD", "replace-with-rabbit-password" + ), + ), + ), + CATALOG_DIRECTOR=DirectorSettings.create_from_envs( + DIRECTOR_HOST=os.environ.get("DIRECTOR_HOST", "fake-director") + ), + CATALOG_CLIENT_REQUEST=ClientRequestSettings.create_from_envs(), + ) + + print_as_envfile( + settings, + compact=False, + verbose=True, + show_secrets=True, + exclude_unset=minimal, + ) diff --git a/services/catalog/src/simcore_service_catalog/clients/__init__.py b/services/catalog/src/simcore_service_catalog/clients/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/catalog/src/simcore_service_catalog/clients/director.py b/services/catalog/src/simcore_service_catalog/clients/director.py new file mode 100644 index 00000000000..4d9fe37a3df --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/clients/director.py @@ -0,0 +1,323 @@ +import asyncio +import functools +import logging +import urllib.parse +from collections.abc import AsyncIterator, Awaitable, Callable +from contextlib import suppress +from pprint import pformat +from typing import Any, Final + +import httpx +from common_library.json_serialization import json_dumps, json_loads +from fastapi import FastAPI, HTTPException +from fastapi_lifespan_manager import State +from models_library.api_schemas_directorv2.services import ServiceExtras +from models_library.services_metadata_published import ServiceMetaDataPublished +from models_library.services_types import ServiceKey, ServiceVersion +from pydantic import NonNegativeInt, TypeAdapter +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from servicelib.logging_utils import log_catch, log_context +from starlette import status +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_random + +from ..core.settings import ApplicationSettings, DirectorSettings +from ..errors import DirectorUnresponsiveError + +_logger = logging.getLogger(__name__) + +_MINUTE: Final[NonNegativeInt] = 60 + + +_SERVICE_RUNTIME_SETTINGS: Final[str] = "simcore.service.settings" +_ORG_LABELS_TO_SCHEMA_LABELS: Final[dict[str, str]] = { + "org.label-schema.build-date": "build_date", + "org.label-schema.vcs-ref": "vcs_ref", + "org.label-schema.vcs-url": "vcs_url", +} + +_CONTAINER_SPEC_ENTRY_NAME = "ContainerSpec".lower() +_RESOURCES_ENTRY_NAME = "Resources".lower() + + +def _validate_kind(entry_to_validate: dict[str, Any], kind_name: str): + for element in ( + entry_to_validate.get("value", {}) + .get("Reservations", {}) + .get("GenericResources", []) + ): + if element.get("DiscreteResourceSpec", {}).get("Kind") == kind_name: + return True + return False + + +_director_startup_retry_policy: dict[str, Any] = { + # Random service startup order in swarm. + # wait_random prevents saturating other services while startup + # + "wait": wait_random(2, 5), + "stop": stop_after_delay(2 * _MINUTE), + "before_sleep": before_sleep_log(_logger, logging.WARNING), + "reraise": True, +} + + +def _return_data_or_raise_error( + request_func: Callable[..., Awaitable[httpx.Response]], +) -> Callable[..., Awaitable[list[Any] | dict[str, Any]]]: + """ + Creates a context for safe inter-process communication (IPC) + """ + assert asyncio.iscoroutinefunction(request_func) + + def _unenvelope_or_raise_error( + resp: httpx.Response, + ) -> list[Any] | dict[str, Any]: + """ + Director responses are enveloped + If successful response, we un-envelop it and return data as a dict + If error, it raise an HTTPException + """ + body = resp.json() + + assert "data" in body or "error" in body # nosec + data = body.get("data") + error = body.get("error") + + if httpx.codes.is_server_error(resp.status_code): + _logger.error( + "director error %d [%s]: %s", + resp.status_code, + resp.reason_phrase, + error, + ) + raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) + + if httpx.codes.is_client_error(resp.status_code): + msg = error or resp.reason_phrase + raise HTTPException(resp.status_code, detail=msg) + + if isinstance(data, list): + return data + + return data or {} + + @functools.wraps(request_func) + async def request_wrapper( + zelf: "DirectorClient", path: str, *args, **kwargs + ) -> list[Any] | dict[str, Any]: + normalized_path = path.lstrip("/") + try: + resp = await request_func(zelf, path=normalized_path, *args, **kwargs) + except Exception as err: + _logger.exception( + "Failed request %s to %s%s", + request_func.__name__, + zelf.client.base_url, + normalized_path, + ) + raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err + + return _unenvelope_or_raise_error(resp) + + return request_wrapper + + +class DirectorClient: + """ + - wrapper around thin-client to simplify director's API + - sets endspoint upon construction + - MIME type: application/json + - processes responses, returning data or raising formatted HTTP exception + + SEE services/catalog/src/simcore_service_catalog/api/dependencies/director.py + """ + + def __init__(self, base_url: str, app: FastAPI): + settings: ApplicationSettings = app.state.settings + + assert settings.CATALOG_CLIENT_REQUEST # nosec + self.client = httpx.AsyncClient( + base_url=base_url, + timeout=settings.CATALOG_CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, + ) + if settings.CATALOG_TRACING: + setup_httpx_client_tracing(self.client) + + assert settings.CATALOG_DIRECTOR # nosec + self.vtag = settings.CATALOG_DIRECTOR.DIRECTOR_VTAG + + self.default_max_memory = settings.DIRECTOR_DEFAULT_MAX_MEMORY + self.default_max_nano_cpus = settings.DIRECTOR_DEFAULT_MAX_NANO_CPUS + + async def close(self): + await self.client.aclose() + + # + # Low level API + # + + @_return_data_or_raise_error + async def get(self, path: str) -> httpx.Response: + # temp solution: default timeout increased to 20" + return await self.client.get(path, timeout=20.0) + + # + # High level API + # + + async def is_responsive(self) -> bool: + try: + _logger.debug("checking director-v0 is responsive") + health_check_path: str = "/" + response = await self.client.head(health_check_path, timeout=1.0) + response.raise_for_status() + return True + except (httpx.HTTPStatusError, httpx.RequestError, httpx.TimeoutException): + return False + + async def get_service( + self, service_key: ServiceKey, service_version: ServiceVersion + ) -> ServiceMetaDataPublished: + data = await self.get( + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}" + ) + # NOTE: the fact that it returns a list of one element is a defect of the director API + assert isinstance(data, list) # nosec + assert len(data) == 1 # nosec + return ServiceMetaDataPublished.model_validate(data[0]) + + async def get_service_labels( + self, + service_key: ServiceKey, + service_version: ServiceVersion, + ) -> dict[str, Any]: + response = await self.get( + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}/labels" + ) + assert isinstance(response, dict) # nosec + return response + + async def get_service_extras( + self, + service_key: ServiceKey, + service_version: ServiceVersion, + ) -> ServiceExtras: + # NOTE: SEE https://github.com/ITISFoundation/osparc-simcore/issues/7509 + + # check physical node requirements + # all nodes require "CPU" + result: dict[str, Any] = { + "node_requirements": { + "CPU": self.default_max_nano_cpus / 1.0e09, + "RAM": self.default_max_memory, + } + } + + labels = await self.get_service_labels(service_key, service_version) + _logger.debug("Compiling service extras from labels %s", pformat(labels)) + + if _SERVICE_RUNTIME_SETTINGS in labels: + service_settings: list[dict[str, Any]] = json_loads( + labels[_SERVICE_RUNTIME_SETTINGS] + ) + for entry in service_settings: + entry_name = entry.get("name", "").lower() + entry_value = entry.get("value") + invalid_with_msg = None + + if entry_name == _RESOURCES_ENTRY_NAME: + if entry_value and isinstance(entry_value, dict): + res_limit = entry_value.get("Limits", {}) + res_reservation = entry_value.get("Reservations", {}) + # CPU + result["node_requirements"]["CPU"] = ( + float(res_limit.get("NanoCPUs", 0)) + or float(res_reservation.get("NanoCPUs", 0)) + or self.default_max_nano_cpus + ) / 1.0e09 + # RAM + result["node_requirements"]["RAM"] = ( + res_limit.get("MemoryBytes", 0) + or res_reservation.get("MemoryBytes", 0) + or self.default_max_memory + ) + else: + invalid_with_msg = f"invalid type for resource [{entry_value}]" + + # discrete resources (custom made ones) --- + # check if the service requires GPU support + if not invalid_with_msg and _validate_kind(entry, "VRAM"): + + result["node_requirements"]["GPU"] = 1 + if not invalid_with_msg and _validate_kind(entry, "MPI"): + result["node_requirements"]["MPI"] = 1 + + elif entry_name == _CONTAINER_SPEC_ENTRY_NAME: + # NOTE: some minor validation + # expects {'name': 'ContainerSpec', 'type': 'ContainerSpec', 'value': {'Command': [...]}} + if ( + entry_value + and isinstance(entry_value, dict) + and "Command" in entry_value + ): + result["container_spec"] = entry_value + else: + invalid_with_msg = f"invalid container_spec [{entry_value}]" + + if invalid_with_msg: + _logger.warning( + "%s entry [%s] encoded in settings labels of service image %s:%s", + invalid_with_msg, + entry, + service_key, + service_version, + ) + + # get org labels + result.update( + { + sl: labels[dl] + for dl, sl in _ORG_LABELS_TO_SCHEMA_LABELS.items() + if dl in labels + } + ) + + _logger.debug("Following service extras were compiled: %s", pformat(result)) + + return TypeAdapter(ServiceExtras).validate_python(result) + + +async def director_lifespan(app: FastAPI) -> AsyncIterator[State]: + client: DirectorClient | None = None + settings = app.state.settings.CATALOG_DIRECTOR + + assert isinstance(settings, DirectorSettings) # nosec + + with log_context( + _logger, logging.DEBUG, "Setup director at %s", f"{settings.base_url=}" + ): + async for attempt in AsyncRetrying(**_director_startup_retry_policy): + with attempt: + client = DirectorClient(base_url=settings.base_url, app=app) + if not await client.is_responsive(): + with suppress(Exception): + await client.close() + raise DirectorUnresponsiveError + + _logger.info( + "Connection to director-v0 succeeded [%s]", + json_dumps(attempt.retry_state.retry_object.statistics), + ) + + # set when connected + app.state.director_api = client + + try: + yield {} + finally: + if client: + with log_catch(_logger, reraise=False): + await asyncio.wait_for(client.close(), timeout=10) diff --git a/services/catalog/src/simcore_service_catalog/clients/rabbitmq.py b/services/catalog/src/simcore_service_catalog/clients/rabbitmq.py new file mode 100644 index 00000000000..ba46173b647 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/clients/rabbitmq.py @@ -0,0 +1,38 @@ +import logging +from collections.abc import AsyncIterator +from typing import cast + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.rabbitmq import RabbitMQRPCClient, wait_till_rabbitmq_responsive +from settings_library.rabbit import RabbitSettings + +from .._meta import PROJECT_NAME + +_logger = logging.getLogger(__name__) + + +def get_rabbitmq_settings(app: FastAPI) -> RabbitSettings: + settings: RabbitSettings = app.state.settings.CATALOG_RABBITMQ + return settings + + +async def rabbitmq_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: RabbitSettings = get_rabbitmq_settings(app) + await wait_till_rabbitmq_responsive(settings.dsn) + + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name=f"{PROJECT_NAME}_rpc_server", settings=settings + ) + + try: + yield {} + finally: + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + app.state.rabbitmq_rpc_server = None + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) diff --git a/services/catalog/src/simcore_service_catalog/core/application.py b/services/catalog/src/simcore_service_catalog/core/application.py index 46045a81c1d..3f726883066 100644 --- a/services/catalog/src/simcore_service_catalog/core/application.py +++ b/services/catalog/src/simcore_service_catalog/core/application.py @@ -1,43 +1,53 @@ import logging -import time -from typing import Callable, Optional -from fastapi import FastAPI, Request -from fastapi.exceptions import RequestValidationError +from fastapi import FastAPI from fastapi.middleware.gzip import GZipMiddleware from models_library.basic_types import BootModeEnum +from servicelib.fastapi import timing_middleware +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) from servicelib.fastapi.openapi import override_fastapi_openapi_method -from starlette import status -from starlette.exceptions import HTTPException +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from starlette.middleware.base import BaseHTTPMiddleware -from ..api.errors.http_error import ( - http_error_handler, - make_http_error_handler_for_exception, +from .._meta import ( + API_VERSION, + API_VTAG, + APP_NAME, + PROJECT_NAME, + SUMMARY, ) -from ..api.errors.validation_error import http422_error_handler -from ..api.root import router as api_router -from ..api.routes.health import router as health_router -from ..meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY -from ..services.function_services import setup_function_services -from .events import ( - create_start_app_handler, - create_stop_app_handler, - on_shutdown, - on_startup, +from ..api.rest import initialize_rest_api +from . import events +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS = ( + "aio_pika", + "aiobotocore", + "aiormq", + "botocore", + "httpcore", + "werkzeug", ) -from .settings import AppSettings -logger = logging.getLogger(__name__) +def create_app() -> FastAPI: + # keep mostly quiet noisy loggers + quiet_level: int = max( + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + ) + for name in _NOISY_LOGGERS: + logging.getLogger(name).setLevel(quiet_level) -def init_app(settings: Optional[AppSettings] = None) -> FastAPI: - if settings is None: - settings = AppSettings.create_from_envs() - assert settings # nosec - logging.basicConfig(level=settings.CATALOG_LOG_LEVEL.value) - logging.root.setLevel(settings.CATALOG_LOG_LEVEL.value) - logger.debug(settings.json(indent=2)) + settings = ApplicationSettings.create_from_envs() + _logger.debug(settings.model_dump_json(indent=2)) app = FastAPI( debug=settings.SC_BOOT_MODE @@ -48,57 +58,31 @@ def init_app(settings: Optional[AppSettings] = None) -> FastAPI: openapi_url=f"/api/{API_VTAG}/openapi.json", docs_url="/dev/doc", redoc_url=None, # default disabled + lifespan=events.create_app_lifespan(), ) override_fastapi_openapi_method(app) + # STATE app.state.settings = settings - setup_function_services(app) - - # events - app.add_event_handler("startup", on_startup) - app.add_event_handler("startup", create_start_app_handler(app)) - - app.add_event_handler("shutdown", on_shutdown) - app.add_event_handler("shutdown", create_stop_app_handler(app)) - - # exception handlers - app.add_exception_handler(HTTPException, http_error_handler) - app.add_exception_handler(RequestValidationError, http422_error_handler) - # SEE https://docs.python.org/3/library/exceptions.html#exception-hierarchy - app.add_exception_handler( - NotImplementedError, - make_http_error_handler_for_exception( - status.HTTP_501_NOT_IMPLEMENTED, NotImplementedError - ), - ) - app.add_exception_handler( - Exception, - make_http_error_handler_for_exception( - status.HTTP_500_INTERNAL_SERVER_ERROR, Exception - ), - ) - - # Routing + # MIDDLEWARES + if settings.CATALOG_TRACING: + setup_tracing(app, settings.CATALOG_TRACING, APP_NAME) + if settings.CATALOG_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) - # healthcheck at / and at /v0/ - app.include_router(health_router) + if settings.CATALOG_TRACING: + initialize_fastapi_app_tracing(app) - # api under /v* - app.include_router(api_router, prefix=f"/{API_VTAG}") - # middleware to time requests (ONLY for development) if settings.SC_BOOT_MODE != BootModeEnum.PRODUCTION: + # middleware to time requests (ONLY for development) + app.add_middleware( + BaseHTTPMiddleware, dispatch=timing_middleware.add_process_time_header + ) - async def _add_process_time_header(request: Request, call_next: Callable): - start_time = time.time() - response = await call_next(request) - process_time = time.time() - start_time - response.headers["X-Process-Time"] = str(process_time) - return response - - app.add_middleware(BaseHTTPMiddleware, dispatch=_add_process_time_header) - - # gzip middleware app.add_middleware(GZipMiddleware) + # ROUTES & ERROR HANDLERS + initialize_rest_api(app) + return app diff --git a/services/catalog/src/simcore_service_catalog/core/background_tasks.py b/services/catalog/src/simcore_service_catalog/core/background_tasks.py index d77cb11c88a..657f255bc85 100644 --- a/services/catalog/src/simcore_service_catalog/core/background_tasks.py +++ b/services/catalog/src/simcore_service_catalog/core/background_tasks.py @@ -11,62 +11,33 @@ import asyncio import logging +from collections.abc import AsyncIterator from contextlib import suppress from pprint import pformat -from typing import Any, Final, cast +from typing import Final -from fastapi import FastAPI -from models_library.function_services_catalog.api import iter_service_docker_data -from models_library.services import ServiceDockerData -from models_library.services_db import ServiceAccessRightsAtDB, ServiceMetaDataAtDB +from fastapi import FastAPI, HTTPException +from fastapi_lifespan_manager import State +from models_library.services import ServiceMetaDataPublished +from models_library.services_types import ServiceKey, ServiceVersion from packaging.version import Version from pydantic import ValidationError +from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.ext.asyncio import AsyncEngine -from ..api.dependencies.director import get_director_api -from ..db.repositories.groups import GroupsRepository -from ..db.repositories.projects import ProjectsRepository -from ..db.repositories.services import ServicesRepository -from ..services import access_rights +from ..api._dependencies.director import get_director_client +from ..models.services_db import ServiceAccessRightsDB, ServiceMetaDataDBCreate +from ..repository.groups import GroupsRepository +from ..repository.projects import ProjectsRepository +from ..repository.services import ServicesRepository +from ..service import access_rights, manifest -logger = logging.getLogger(__name__) - -# NOTE: by PC I tried to unify with models_library.services but there are other inconsistencies so I leave if for another time! -ServiceKey = str -ServiceVersion = str -ServiceDockerDataMap = dict[tuple[ServiceKey, ServiceVersion], ServiceDockerData] - - -async def _list_services_in_registry( - app: FastAPI, -) -> ServiceDockerDataMap: - client = get_director_api(app) - registry_services = cast(list[dict[str, Any]], await client.get("/services")) - - services: ServiceDockerDataMap = { - # services w/o associated image - (s.key, s.version): s - for s in iter_service_docker_data() - } - for service in registry_services: - try: - service_data = ServiceDockerData.parse_obj(service) - services[(service_data.key, service_data.version)] = service_data - - except ValidationError as exc: - logger.warning( - "Skipping %s:%s from the catalog of services:\n%s", - service.get("key"), - service.get("version"), - exc, - ) - - return services +_logger = logging.getLogger(__name__) async def _list_services_in_database( db_engine: AsyncEngine, -) -> set[tuple[ServiceKey, ServiceVersion]]: +): services_repo = ServicesRepository(db_engine=db_engine) return { (service.key, service.version) @@ -77,7 +48,9 @@ async def _list_services_in_database( async def _create_services_in_database( app: FastAPI, service_keys: set[tuple[ServiceKey, ServiceVersion]], - services_in_registry: dict[tuple[ServiceKey, ServiceVersion], ServiceDockerData], + services_in_registry: dict[ + tuple[ServiceKey, ServiceVersion], ServiceMetaDataPublished + ], ) -> None: """Adds a new service in the database @@ -92,38 +65,47 @@ def _by_version(t: tuple[ServiceKey, ServiceVersion]) -> Version: sorted_services = sorted(service_keys, key=_by_version) for service_key, service_version in sorted_services: - service_metadata: ServiceDockerData = services_in_registry[ + + service_metadata: ServiceMetaDataPublished = services_in_registry[ (service_key, service_version) ] - ## Set deprecation date to null (is valid date value for postgres) - - # DEFAULT policies - ( - owner_gid, - service_access_rights, - ) = await access_rights.evaluate_default_policy(app, service_metadata) + try: + ## Set deprecation date to null (is valid date value for postgres) - # AUTO-UPGRADE PATCH policy - inherited_access_rights = await access_rights.evaluate_auto_upgrade_policy( - service_metadata, services_repo - ) + # DEFAULT policies + ( + owner_gid, + service_access_rights, + ) = await access_rights.evaluate_default_policy(app, service_metadata) - service_access_rights += inherited_access_rights - service_access_rights = access_rights.reduce_access_rights( - service_access_rights - ) + # AUTO-UPGRADE PATCH policy + inherited_access_rights = await access_rights.evaluate_auto_upgrade_policy( + service_metadata, services_repo + ) - service_metadata_dict = service_metadata.dict() - # set the service in the DB - await services_repo.create_service( - ServiceMetaDataAtDB(**service_metadata_dict, owner=owner_gid), - service_access_rights, - ) + service_access_rights += inherited_access_rights + service_access_rights = access_rights.reduce_access_rights( + service_access_rights + ) + # set the service in the DB + await services_repo.create_or_update_service( + ServiceMetaDataDBCreate( + **service_metadata.model_dump(exclude_unset=True), owner=owner_gid + ), + service_access_rights, + ) -async def update_latest_versions_cache(app: FastAPI): - services_repo = ServicesRepository(app.state.engine) - await services_repo.update_latest_versions_cache() + except (HTTPException, ValidationError, SQLAlchemyError) as err: + # Resilient to single failures: errors in individual (service,key) should not prevent the evaluation of the rest + # and stop the background task from running. + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/6318 + _logger.warning( + "Skipping '%s:%s' due to %s", + service_key, + service_version, + err, + ) async def _ensure_registry_and_database_are_synced(app: FastAPI) -> None: @@ -131,30 +113,26 @@ async def _ensure_registry_and_database_are_synced(app: FastAPI) -> None: Notice that a services here refers to a 2-tuple (key, version) """ - services_in_registry: dict[ - tuple[ServiceKey, ServiceVersion], ServiceDockerData - ] = await _list_services_in_registry(app) + director_api = get_director_client(app) + services_in_manifest_map = await manifest.get_services_map(director_api) - services_in_db: set[ - tuple[ServiceKey, ServiceVersion] - ] = await _list_services_in_database(app.state.engine) + services_in_db: set[tuple[ServiceKey, ServiceVersion]] = ( + await _list_services_in_database(app.state.engine) + ) # check that the db has all the services at least once - missing_services_in_db = set(services_in_registry.keys()) - services_in_db + missing_services_in_db = set(services_in_manifest_map.keys()) - services_in_db if missing_services_in_db: - logger.debug( + _logger.debug( "Missing services in db: %s", pformat(missing_services_in_db), ) # update db await _create_services_in_database( - app, missing_services_in_db, services_in_registry + app, missing_services_in_db, services_in_manifest_map ) - # will account for new service updates - await update_latest_versions_cache(app) - async def _ensure_published_templates_accessible( db_engine: AsyncEngine, default_product_name: str @@ -180,9 +158,9 @@ async def _ensure_published_templates_accessible( missing_services = published_services - available_services missing_services_access_rights = [ - ServiceAccessRightsAtDB( - key=service[0], - version=service[1], + ServiceAccessRightsDB( + key=ServiceKey(service[0]), + version=ServiceVersion(service[1]), gid=everyone_gid, execute_access=True, product_name=default_product_name, @@ -190,45 +168,44 @@ async def _ensure_published_templates_accessible( for service in missing_services ] if missing_services_access_rights: - logger.info( + _logger.info( "Adding access rights for published templates\n: %s", missing_services_access_rights, ) await services_repo.upsert_service_access_rights(missing_services_access_rights) -async def _sync_services_task(app: FastAPI) -> None: +async def _run_sync_services(app: FastAPI): default_product: Final[str] = app.state.default_product_name engine: AsyncEngine = app.state.engine - first_run = True - while app.state.registry_syncer_running: - try: - logger.debug("Syncing services between registry and database...") + # check that the list of services is in sync with the registry + await _ensure_registry_and_database_are_synced(app) - if first_run: - await update_latest_versions_cache(app) + # check that the published services are available to everyone + # (templates are published to GUESTs, so their services must be also accessible) + await _ensure_published_templates_accessible(engine, default_product) - # check that the list of services is in sync with the registry - await _ensure_registry_and_database_are_synced(app) - # check that the published services are available to everyone - # (templates are published to GUESTs, so their services must be also accessible) - await _ensure_published_templates_accessible(engine, default_product) +async def _sync_services_task(app: FastAPI) -> None: + while app.state.registry_syncer_running: + try: + _logger.debug("Syncing services between registry and database...") + + await _run_sync_services(app) await asyncio.sleep(app.state.settings.CATALOG_BACKGROUND_TASK_REST_TIME) - first_run = False - except asyncio.CancelledError: + except asyncio.CancelledError: # noqa: PERF203 # task is stopped - logger.info("registry syncing task cancelled") + _logger.info("registry syncing task cancelled") raise except Exception: # pylint: disable=broad-except if not app.state.registry_syncer_running: - logger.warning("registry syncing task forced to stop") + _logger.warning("registry syncing task forced to stop") break - logger.exception( + _logger.exception( "Unexpected error while syncing registry entries, restarting now..." ) # wait a bit before retrying, so it does not block everything until the director is up @@ -245,7 +222,7 @@ async def start_registry_sync_task(app: FastAPI) -> None: app.state.registry_syncer_running = True task = asyncio.create_task(_sync_services_task(app)) app.state.registry_sync_task = task - logger.info("registry syncing task started") + _logger.info("registry syncing task started") async def stop_registry_sync_task(app: FastAPI) -> None: @@ -255,4 +232,12 @@ async def stop_registry_sync_task(app: FastAPI) -> None: task.cancel() await task app.state.registry_sync_task = None - logger.info("registry syncing task stopped") + _logger.info("registry syncing task stopped") + + +async def background_task_lifespan(app: FastAPI) -> AsyncIterator[State]: + await start_registry_sync_task(app) + try: + yield {} + finally: + await stop_registry_sync_task(app) diff --git a/services/catalog/src/simcore_service_catalog/core/events.py b/services/catalog/src/simcore_service_catalog/core/events.py index 45d2084bece..8695b10f15e 100644 --- a/services/catalog/src/simcore_service_catalog/core/events.py +++ b/services/catalog/src/simcore_service_catalog/core/events.py @@ -1,81 +1,81 @@ import logging -from typing import Callable +from collections.abc import AsyncIterator from fastapi import FastAPI -from models_library.basic_types import BootModeEnum -from servicelib.fastapi.tracing import setup_tracing - -from ..db.events import close_db_connection, connect_to_db, setup_default_product -from ..meta import PROJECT_NAME, __version__ -from ..services.director import close_director, setup_director -from ..services.remote_debug import setup_remote_debugging -from .background_tasks import start_registry_sync_task, stop_registry_sync_task - -logger = logging.getLogger(__name__) - -# -# SEE https://patorjk.com/software/taag/#p=display&h=0&f=Ogre&t=Catalog -# -WELCOME_MSG = r""" - ___ _ _ - / __\ __ _ | |_ __ _ | | ___ __ _ - / / / _` || __| / _` || | / _ \ / _` | -/ /___ | (_| || |_ | (_| || || (_) || (_| | -\____/ \__,_| \__| \__,_||_| \___/ \__, | - |___/ {} -""".format( - f"v{__version__}" +from fastapi_lifespan_manager import LifespanManager, State +from servicelib.fastapi.monitoring import ( + create_prometheus_instrumentationmain_input_state, + prometheus_instrumentation_lifespan, ) +from servicelib.fastapi.postgres_lifespan import ( + create_postgres_database_input_state, +) + +from .._meta import APP_FINISHED_BANNER_MSG, APP_STARTED_BANNER_MSG +from ..api.rpc.events import rpc_api_lifespan +from ..clients.director import director_lifespan +from ..clients.rabbitmq import rabbitmq_lifespan +from ..repository.events import repository_lifespan_manager +from ..service.function_services import function_services_lifespan +from .background_tasks import background_task_lifespan +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def _flush_started_banner() -> None: + # WARNING: this function is spied in the tests + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + + +def _flush_finished_banner() -> None: + # WARNING: this function is spied in the tests + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + +async def _banners_lifespan(_) -> AsyncIterator[State]: + _flush_started_banner() + yield {} + _flush_finished_banner() -def on_startup() -> None: - print(WELCOME_MSG, flush=True) +async def _settings_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSettings = app.state.settings -def on_shutdown() -> None: - msg = PROJECT_NAME + f" v{__version__} SHUT DOWN" - print(f"{msg:=^100}", flush=True) + yield { + **create_postgres_database_input_state(settings.CATALOG_POSTGRES), + **create_prometheus_instrumentationmain_input_state( + enabled=settings.CATALOG_PROMETHEUS_INSTRUMENTATION_ENABLED + ), + } -def create_start_app_handler(app: FastAPI) -> Callable: - async def start_app() -> None: - logger.info("Application started") +def create_app_lifespan() -> LifespanManager: + # WARNING: order matters + app_lifespan = LifespanManager() + app_lifespan.add(_settings_lifespan) - # setup connection to remote debugger (if applies) - setup_remote_debugging( - force_enabled=app.state.settings.SC_BOOT_MODE == BootModeEnum.DEBUG - ) + # - postgres + app_lifespan.include(repository_lifespan_manager) - # setup connection to pg db - if app.state.settings.CATALOG_POSTGRES: - await connect_to_db(app) - await setup_default_product(app) + # - rabbitmq + app_lifespan.add(rabbitmq_lifespan) - if app.state.settings.CATALOG_DIRECTOR: - # setup connection to director - await setup_director(app) + # - rpc api routes + app_lifespan.add(rpc_api_lifespan) - # FIXME: check director service is in place and ready. Hand-shake?? - # SEE https://github.com/ITISFoundation/osparc-simcore/issues/1728 - await start_registry_sync_task(app) + # - director + app_lifespan.add(director_lifespan) - if app.state.settings.CATALOG_TRACING: - setup_tracing(app, app.state.settings.CATALOG_TRACING) + # - function services + app_lifespan.add(function_services_lifespan) - return start_app + # - background task + app_lifespan.add(background_task_lifespan) + # - prometheus instrumentation + app_lifespan.add(prometheus_instrumentation_lifespan) -def create_stop_app_handler(app: FastAPI) -> Callable: - async def stop_app() -> None: - logger.info("Application stopping") - if app.state.settings.CATALOG_DIRECTOR: - try: - await stop_registry_sync_task(app) - await close_director(app) - await close_db_connection(app) - except Exception: # pylint: disable=broad-except - logger.exception( - "Unexpected error while closing application", exc_info=True - ) + app_lifespan.add(_banners_lifespan) - return stop_app + return app_lifespan diff --git a/services/catalog/src/simcore_service_catalog/core/settings.py b/services/catalog/src/simcore_service_catalog/core/settings.py index ea86e690c9e..5581bf4ba99 100644 --- a/services/catalog/src/simcore_service_catalog/core/settings.py +++ b/services/catalog/src/simcore_service_catalog/core/settings.py @@ -1,20 +1,31 @@ import logging from functools import cached_property -from typing import Final, Optional +from typing import Annotated, Final -from models_library.basic_types import BootModeEnum, BuildTargetEnum, LogLevel -from models_library.services_resources import ResourcesDict -from pydantic import ByteSize, Field, PositiveInt, parse_obj_as +from common_library.basic_types import DEFAULT_FACTORY +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.basic_types import LogLevel +from models_library.services_resources import ResourcesDict, ResourceValue +from pydantic import ( + AliasChoices, + ByteSize, + Field, + NonNegativeInt, + PositiveInt, + TypeAdapter, +) +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings from settings_library.base import BaseCustomSettings from settings_library.http_client_request import ClientRequestSettings from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings -from simcore_service_catalog.models.schemas.services_specifications import ( - ServiceSpecifications, -) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) class DirectorSettings(BaseCustomSettings): @@ -27,51 +38,93 @@ def base_url(self) -> str: return f"http://{self.DIRECTOR_HOST}:{self.DIRECTOR_PORT}/{self.DIRECTOR_VTAG}" -_DEFAULT_RESOURCES: Final[ResourcesDict] = parse_obj_as( - ResourcesDict, - { - "CPU": {"limit": 0.1, "reservation": 0.1}, - "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), - }, - }, -) - -_DEFAULT_SERVICE_SPECIFICATIONS: Final[ - ServiceSpecifications -] = ServiceSpecifications.parse_obj({}) - +_in_bytes = TypeAdapter(ByteSize).validate_python -class AppSettings(BaseCustomSettings, MixinLoggingSettings): - # docker environs - SC_BOOT_MODE: Optional[BootModeEnum] - SC_BOOT_TARGET: Optional[BuildTargetEnum] +_DEFAULT_RESOURCES: Final[ResourcesDict] = ResourcesDict( + CPU=ResourceValue(limit=0.1, reservation=0.1), + RAM=ResourceValue(limit=_in_bytes("2Gib"), reservation=_in_bytes("2Gib")), +) - CATALOG_LOG_LEVEL: LogLevel = Field( - LogLevel.INFO.value, - env=["CATALOG_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"], - ) - CATALOG_DEV_FEATURES_ENABLED: bool = Field( - False, - description="Enables development features. WARNING: make sure it is disabled in production .env file!", - ) - CATALOG_POSTGRES: Optional[PostgresSettings] = Field(auto_default_from_env=True) +_DEFAULT_SERVICE_SPECIFICATIONS: Final[ServiceSpecifications] = ( + ServiceSpecifications.model_validate({}) +) - CATALOG_CLIENT_REQUEST: Optional[ClientRequestSettings] = Field( - auto_default_from_env=True - ) - CATALOG_DIRECTOR: Optional[DirectorSettings] = Field(auto_default_from_env=True) +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "CATALOG_LOG_LEVEL", "CATALOG_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + CATALOG_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "CATALOG_LOG_FORMAT_LOCAL_DEV_ENABLED", "LOG_FORMAT_LOCAL_DEV_ENABLED" + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + CATALOG_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "CATALOG_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + CATALOG_DEV_FEATURES_ENABLED: Annotated[ + bool, + Field( + description="Enables development features. WARNING: make sure it is disabled in production .env file!", + ), + ] = False + + CATALOG_POSTGRES: Annotated[ + PostgresSettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CATALOG_RABBITMQ: Annotated[ + RabbitSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + CATALOG_CLIENT_REQUEST: Annotated[ + ClientRequestSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CATALOG_DIRECTOR: Annotated[ + DirectorSettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CATALOG_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + CATALOG_PROFILING: bool = False # BACKGROUND TASK CATALOG_BACKGROUND_TASK_REST_TIME: PositiveInt = 60 CATALOG_BACKGROUND_TASK_WAIT_AFTER_FAILURE: PositiveInt = 5 # secs - CATALOG_TRACING: Optional[TracingSettings] = None - CATALOG_SERVICES_DEFAULT_RESOURCES: ResourcesDict = _DEFAULT_RESOURCES CATALOG_SERVICES_DEFAULT_SPECIFICATIONS: ServiceSpecifications = ( _DEFAULT_SERVICE_SPECIFICATIONS ) + CATALOG_TRACING: Annotated[ + TracingSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ), + ] + + DIRECTOR_DEFAULT_MAX_MEMORY: NonNegativeInt = 0 + DIRECTOR_DEFAULT_MAX_NANO_CPUS: NonNegativeInt = 0 diff --git a/services/catalog/src/simcore_service_catalog/db/errors.py b/services/catalog/src/simcore_service_catalog/db/errors.py deleted file mode 100644 index 1f01735568e..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/errors.py +++ /dev/null @@ -1,6 +0,0 @@ -""" Custom db.repository errors -""" - - -class RepositoryError(Exception): - pass diff --git a/services/catalog/src/simcore_service_catalog/db/events.py b/services/catalog/src/simcore_service_catalog/db/events.py deleted file mode 100644 index 3e7e075c151..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/events.py +++ /dev/null @@ -1,65 +0,0 @@ -import logging - -from fastapi import FastAPI -from servicelib.retry_policies import PostgresRetryPolicyUponInitialization -from settings_library.postgres import PostgresSettings -from simcore_postgres_database.utils_aiosqlalchemy import ( - get_pg_engine_stateinfo, - raise_if_migration_not_ready, -) -from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine -from tenacity import retry - -from .repositories.products import ProductsRepository - -logger = logging.getLogger(__name__) - - -@retry(**PostgresRetryPolicyUponInitialization(logger).kwargs) -async def connect_to_db(app: FastAPI) -> None: - logger.debug("Connecting db ...") - cfg: PostgresSettings = app.state.settings.CATALOG_POSTGRES - - engine: AsyncEngine = create_async_engine( - cfg.dsn_with_async_sqlalchemy, - pool_size=cfg.POSTGRES_MINSIZE, - max_overflow=cfg.POSTGRES_MAXSIZE - cfg.POSTGRES_MINSIZE, - connect_args={ - "server_settings": {"application_name": cfg.POSTGRES_CLIENT_NAME} - }, - pool_pre_ping=True, # https://docs.sqlalchemy.org/en/14/core/pooling.html#dealing-with-disconnects - future=True, # this uses sqlalchemy 2.0 API, shall be removed when sqlalchemy 2.0 is released - ) - - logger.debug("Connected to %s", engine.url) # pylint: disable=no-member - - logger.debug("Checking db migration...") - try: - await raise_if_migration_not_ready(engine) - except Exception: - # NOTE: engine must be closed because retry will create a new engine - await engine.dispose() - raise - - logger.debug("Migration up-to-date") - - app.state.engine = engine - - logger.debug( - "Setup engine: %s", - await get_pg_engine_stateinfo(engine), - ) - - -async def close_db_connection(app: FastAPI) -> None: - logger.debug("Disconnecting db ...") - - if engine := app.state.engine: - await engine.dispose() - - logger.debug("Disconnected from %s", engine.url) # pylint: disable=no-member - - -async def setup_default_product(app: FastAPI): - repo = ProductsRepository(db_engine=app.state.engine) - app.state.default_product_name = await repo.get_default_product_name() diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/__init__.py b/services/catalog/src/simcore_service_catalog/db/repositories/__init__.py deleted file mode 100644 index a5eeffe1ff5..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ._base import BaseRepository diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/dags.py b/services/catalog/src/simcore_service_catalog/db/repositories/dags.py deleted file mode 100644 index d381d0b25fe..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/dags.py +++ /dev/null @@ -1,61 +0,0 @@ -import json -from typing import Optional - -import sqlalchemy as sa - -from ...models.domain.dag import DAGAtDB -from ...models.schemas.dag import DAGIn -from ..tables import dags -from ._base import BaseRepository - - -class DAGsRepository(BaseRepository): - async def list_dags(self) -> list[DAGAtDB]: - dagraphs = [] - async with self.db_engine.connect() as conn: - async for row in await conn.stream(dags.select()): - dagraphs.append(DAGAtDB.parse_obj(row)) - return dagraphs - - async def get_dag(self, dag_id: int) -> Optional[DAGAtDB]: - async with self.db_engine.connect() as conn: - result = await conn.execute(dags.select().where(dags.c.id == dag_id)) - row = result.first() - if row: - return DAGAtDB.from_orm(row) - return None - - async def create_dag(self, dag: DAGIn) -> int: - async with self.db_engine.begin() as conn: - new_id: int = await conn.scalar( - dags.insert().values( - workbench=dag.json(include={"workbench"}), - **dag.dict(exclude={"workbench"}) - ) - ) - - return new_id - - async def replace_dag(self, dag_id: int, dag: DAGIn) -> None: - async with self.db_engine.begin() as conn: - await conn.execute( - dags.update() - .values( - workbench=dag.json(include={"workbench"}), - **dag.dict(exclude={"workbench"}) - ) - .where(dags.c.id == dag_id) - ) - - async def update_dag(self, dag_id: int, dag: DAGIn) -> None: - patch = dag.dict(exclude_unset=True, exclude={"workbench"}) - if "workbench" in dag.__fields_set__: - patch["workbench"] = json.dumps(patch["workbench"]) - async with self.db_engine.begin() as conn: - await conn.execute( - sa.update(dags).values(**patch).where(dags.c.id == dag_id) - ) - - async def delete_dag(self, dag_id: int) -> None: - async with self.db_engine.begin() as conn: - await conn.execute(sa.delete(dags).where(dags.c.id == dag_id)) diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/groups.py b/services/catalog/src/simcore_service_catalog/db/repositories/groups.py deleted file mode 100644 index 8bcec96c420..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/groups.py +++ /dev/null @@ -1,83 +0,0 @@ -from typing import Optional, cast - -import sqlalchemy as sa -from models_library.emails import LowerCaseEmailStr -from pydantic.types import PositiveInt - -from ...models.domain.group import GroupAtDB -from ..errors import RepositoryError -from ..tables import GroupType, groups, user_to_groups, users -from ._base import BaseRepository - - -class GroupsRepository(BaseRepository): - async def list_user_groups(self, user_id: int) -> list[GroupAtDB]: - groups_in_db = [] - async with self.db_engine.connect() as conn: - async for row in await conn.stream( - sa.select([groups]) - .select_from( - user_to_groups.join(groups, user_to_groups.c.gid == groups.c.gid), - ) - .where(user_to_groups.c.uid == user_id) - ): - groups_in_db.append(GroupAtDB.from_orm(row)) - return groups_in_db - - async def get_everyone_group(self) -> GroupAtDB: - async with self.db_engine.connect() as conn: - result = await conn.execute( - sa.select([groups]).where(groups.c.type == GroupType.EVERYONE) - ) - row = result.first() - if not row: - raise RepositoryError(f"{GroupType.EVERYONE} groups was never initialized") - return GroupAtDB.from_orm(row) - - async def get_user_gid_from_email( - self, user_email: LowerCaseEmailStr - ) -> Optional[PositiveInt]: - async with self.db_engine.connect() as conn: - return cast( - Optional[PositiveInt], - await conn.scalar( - sa.select([users.c.primary_gid]).where(users.c.email == user_email) - ), - ) - - async def get_gid_from_affiliation(self, affiliation: str) -> Optional[PositiveInt]: - async with self.db_engine.connect() as conn: - return cast( - Optional[PositiveInt], - await conn.scalar( - sa.select([groups.c.gid]).where(groups.c.name == affiliation) - ), - ) - - async def get_user_email_from_gid( - self, gid: PositiveInt - ) -> Optional[LowerCaseEmailStr]: - async with self.db_engine.connect() as conn: - return cast( - Optional[LowerCaseEmailStr], - await conn.scalar( - sa.select([users.c.email]).where(users.c.primary_gid == gid) - ), - ) - - async def list_user_emails_from_gids( - self, gids: set[PositiveInt] - ) -> dict[PositiveInt, Optional[LowerCaseEmailStr]]: - service_owners = {} - async with self.db_engine.connect() as conn: - async for row in await conn.stream( - sa.select([users.c.primary_gid, users.c.email]).where( - users.c.primary_gid.in_(gids) - ) - ): - service_owners[row[users.c.primary_gid]] = ( - LowerCaseEmailStr(row[users.c.email]) - if row[users.c.email] - else None - ) - return service_owners diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/products.py b/services/catalog/src/simcore_service_catalog/db/repositories/products.py deleted file mode 100644 index 57b036150d2..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/products.py +++ /dev/null @@ -1,10 +0,0 @@ -from simcore_postgres_database.utils_products import get_default_product_name - -from ._base import BaseRepository - - -class ProductsRepository(BaseRepository): - async def get_default_product_name(self) -> str: - async with self.db_engine.begin() as conn: - product_name: str = await get_default_product_name(conn) - return product_name diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/projects.py b/services/catalog/src/simcore_service_catalog/db/repositories/projects.py deleted file mode 100644 index 9d8fa4e491f..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/projects.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging -from typing import List - -import sqlalchemy as sa -from models_library.services import ServiceKeyVersion -from pydantic import ValidationError - -from ..tables import ProjectType, projects -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - - -class ProjectsRepository(BaseRepository): - async def list_services_from_published_templates(self) -> List[ServiceKeyVersion]: - list_of_published_services: List[ServiceKeyVersion] = [] - async with self.db_engine.connect() as conn: - async for row in await conn.stream( - sa.select([projects]).where( - (projects.c.type == ProjectType.TEMPLATE) - & (projects.c.published == True) - ) - ): - project_workbench = row.workbench - for node in project_workbench: - service = project_workbench[node] - try: - if ( - "file-picker" in service["key"] - or "nodes-group" in service["key"] - ): - # these 2 are not going to pass the validation tests, they are frontend only nodes. - continue - list_of_published_services.append(ServiceKeyVersion(**service)) - except ValidationError: - logger.warning( - "service %s could not be validated", service, exc_info=True - ) - continue - - return list_of_published_services diff --git a/services/catalog/src/simcore_service_catalog/db/repositories/services.py b/services/catalog/src/simcore_service_catalog/db/repositories/services.py deleted file mode 100644 index 8e912301448..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/repositories/services.py +++ /dev/null @@ -1,487 +0,0 @@ -import logging -from collections import defaultdict -from itertools import chain -from typing import Any, Iterable, Optional, cast - -import packaging.version -import sqlalchemy as sa -from models_library.services import ServiceKey, ServiceVersion -from models_library.services_db import ServiceAccessRightsAtDB, ServiceMetaDataAtDB -from models_library.users import GroupID -from psycopg2.errors import ForeignKeyViolation -from pydantic import ValidationError -from simcore_postgres_database.models.groups import GroupType -from simcore_postgres_database.models.services import services_latest -from simcore_service_catalog.models.domain.service_specifications import ( - ServiceSpecificationsAtDB, -) -from simcore_service_catalog.models.schemas.services_specifications import ( - ServiceSpecifications, -) -from sqlalchemy import literal_column -from sqlalchemy.dialects.postgresql import insert as pg_insert -from sqlalchemy.sql import and_, or_ -from sqlalchemy.sql.expression import tuple_ -from sqlalchemy.sql.selectable import Select - -from ...models.domain.group import GroupAtDB -from ..tables import services_access_rights, services_meta_data, services_specifications -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - - -def _make_list_services_query( - gids: Optional[list[int]] = None, - execute_access: Optional[bool] = None, - write_access: Optional[bool] = None, - combine_access_with_and: Optional[bool] = True, - product_name: Optional[str] = None, -) -> Select: - query = sa.select([services_meta_data]) - if gids or execute_access or write_access: - logic_operator = and_ if combine_access_with_and else or_ - default = ( - True # pylint: disable=simplifiable-if-expression - if combine_access_with_and - else False - ) - access_query_part = logic_operator( - services_access_rights.c.execute_access if execute_access else default, - services_access_rights.c.write_access if write_access else default, - ) - query = ( - sa.select( - [services_meta_data], - ) - .distinct(services_meta_data.c.key, services_meta_data.c.version) - .select_from(services_meta_data.join(services_access_rights)) - .where( - and_( - or_(*[services_access_rights.c.gid == gid for gid in gids]) - if gids - else True, - access_query_part, - (services_access_rights.c.product_name == product_name) - if product_name - else True, - ) - ) - .order_by(services_meta_data.c.key, services_meta_data.c.version) - ) - return query - - -class ServicesRepository(BaseRepository): - """ - API that operates on services_access_rights and services_meta_data tables - """ - - async def list_services( - self, - *, - gids: Optional[list[int]] = None, - execute_access: Optional[bool] = None, - write_access: Optional[bool] = None, - combine_access_with_and: Optional[bool] = True, - product_name: Optional[str] = None, - ) -> list[ServiceMetaDataAtDB]: - services_in_db = [] - - async with self.db_engine.connect() as conn: - async for row in await conn.stream( - _make_list_services_query( - gids, - execute_access, - write_access, - combine_access_with_and, - product_name, - ) - ): - services_in_db.append(ServiceMetaDataAtDB.from_orm(row)) - return services_in_db - - async def list_service_releases( - self, - key: str, - *, - major: Optional[int] = None, - minor: Optional[int] = None, - limit_count: Optional[int] = None, - ) -> list[ServiceMetaDataAtDB]: - """Lists LAST n releases of a given service, sorted from latest first - - major, minor is used to filter as major.minor.* or major.* - limit_count limits returned value. None or non-positive values returns all matches - """ - if minor is not None and major is None: - raise ValueError("Expected only major.*.* or major.minor.*") - - search_condition = services_meta_data.c.key == key - if major is not None: - if minor is not None: - # All patches - search_condition &= services_meta_data.c.version.like( - f"{major}.{minor}.%" - ) - else: - # All minor and patches - search_condition &= services_meta_data.c.version.like(f"{major}.%") - - query = ( - sa.select([services_meta_data]) - .where(search_condition) - .order_by(sa.desc(services_meta_data.c.version)) - ) - - if limit_count and limit_count > 0: - query = query.limit(limit_count) - - releases = [] - async with self.db_engine.connect() as conn: - async for row in await conn.stream(query): - releases.append(ServiceMetaDataAtDB.from_orm(row)) - - # Now sort naturally from latest first: (This is lame, the sorting should be done in the db) - def _by_version(x: ServiceMetaDataAtDB) -> packaging.version.Version: - return cast(packaging.version.Version, packaging.version.parse(x.version)) - - releases_sorted = sorted(releases, key=_by_version, reverse=True) - return releases_sorted - - async def get_latest_release(self, key: str) -> Optional[ServiceMetaDataAtDB]: - """Returns last release or None if service was never released""" - query = ( - sa.select(services_meta_data) - .select_from( - services_latest.join( - services_meta_data, - (services_meta_data.c.key == services_latest.c.key) - & (services_meta_data.c.version == services_latest.c.version), - ) - ) - .where(services_latest.c.key == key) - ) - async with self.db_engine.connect() as conn: - result = await conn.execute(query) - row = result.first() - if row: - return ServiceMetaDataAtDB.from_orm(row) - return None # mypy - - async def get_service( - self, - key: str, - version: str, - *, - gids: Optional[list[int]] = None, - execute_access: Optional[bool] = None, - write_access: Optional[bool] = None, - product_name: Optional[str] = None, - ) -> Optional[ServiceMetaDataAtDB]: - query = sa.select([services_meta_data]).where( - (services_meta_data.c.key == key) - & (services_meta_data.c.version == version) - ) - if gids or execute_access or write_access: - query = ( - sa.select([services_meta_data]) - .select_from(services_meta_data.join(services_access_rights)) - .where( - and_( - (services_meta_data.c.key == key), - (services_meta_data.c.version == version), - or_(*[services_access_rights.c.gid == gid for gid in gids]) - if gids - else True, - services_access_rights.c.execute_access - if execute_access - else True, - services_access_rights.c.write_access if write_access else True, - (services_access_rights.c.product_name == product_name) - if product_name - else True, - ) - ) - ) - async with self.db_engine.connect() as conn: - result = await conn.execute(query) - row = result.first() - if row: - return ServiceMetaDataAtDB.from_orm(row) - return None # mypy - - async def create_service( - self, - new_service: ServiceMetaDataAtDB, - new_service_access_rights: list[ServiceAccessRightsAtDB], - ) -> ServiceMetaDataAtDB: - for access_rights in new_service_access_rights: - if ( - access_rights.key != new_service.key - or access_rights.version != new_service.version - ): - raise ValueError( - f"{access_rights} does not correspond to service {new_service.key}:{new_service.version}" - ) - async with self.db_engine.begin() as conn: - # NOTE: this ensure proper rollback in case of issue - result = await conn.execute( - # pylint: disable=no-value-for-parameter - services_meta_data.insert() - .values(**new_service.dict(by_alias=True)) - .returning(literal_column("*")) - ) - row = result.first() - assert row # nosec - created_service = ServiceMetaDataAtDB.from_orm(row) - - for access_rights in new_service_access_rights: - insert_stmt = pg_insert(services_access_rights).values( - **access_rights.dict(by_alias=True) - ) - await conn.execute(insert_stmt) - return created_service - - async def update_service( - self, patched_service: ServiceMetaDataAtDB - ) -> ServiceMetaDataAtDB: - # update the services_meta_data table - async with self.db_engine.begin() as conn: - result = await conn.execute( - # pylint: disable=no-value-for-parameter - services_meta_data.update() - .where( - (services_meta_data.c.key == patched_service.key) - & (services_meta_data.c.version == patched_service.version) - ) - .values(**patched_service.dict(by_alias=True, exclude_unset=True)) - .returning(literal_column("*")) - ) - row = result.first() - assert row # nosec - updated_service = ServiceMetaDataAtDB.from_orm(row) - return updated_service - - async def get_service_access_rights( - self, - key: str, - version: str, - product_name: Optional[str] = None, - ) -> list[ServiceAccessRightsAtDB]: - """ - - If product_name is not specificed, then all are considered in the query - """ - services_in_db = [] - search_expression = (services_access_rights.c.key == key) & ( - services_access_rights.c.version == version - ) - if product_name: - search_expression &= services_access_rights.c.product_name == product_name - - query = sa.select([services_access_rights]).where(search_expression) - - async with self.db_engine.connect() as conn: - async for row in await conn.stream(query): - services_in_db.append(ServiceAccessRightsAtDB.from_orm(row)) - return services_in_db - - async def list_services_access_rights( - self, - key_versions: Iterable[tuple[str, str]], - product_name: Optional[str] = None, - ) -> dict[tuple[str, str], list[ServiceAccessRightsAtDB]]: - """Batch version of get_service_access_rights""" - service_to_access_rights = defaultdict(list) - query = ( - sa.select([services_access_rights]) - .select_from(services_access_rights) - .where( - tuple_( - services_access_rights.c.key, services_access_rights.c.version - ).in_(key_versions) - & (services_access_rights.c.product_name == product_name) - if product_name - else True - ) - ) - async with self.db_engine.connect() as conn: - async for row in await conn.stream(query): - service_to_access_rights[ - ( - row[services_access_rights.c.key], - row[services_access_rights.c.version], - ) - ].append(ServiceAccessRightsAtDB.from_orm(row)) - return service_to_access_rights - - async def upsert_service_access_rights( - self, new_access_rights: list[ServiceAccessRightsAtDB] - ) -> None: - # update the services_access_rights table (some might be added/removed/modified) - for rights in new_access_rights: - insert_stmt = pg_insert(services_access_rights).values( - **rights.dict(by_alias=True) - ) - on_update_stmt = insert_stmt.on_conflict_do_update( - index_elements=[ - services_access_rights.c.key, - services_access_rights.c.version, - services_access_rights.c.gid, - services_access_rights.c.product_name, - ], - set_=rights.dict( - by_alias=True, - exclude_unset=True, - exclude={"key", "version", "gid", "product_name"}, - ), - ) - try: - async with self.db_engine.begin() as conn: - result = await conn.execute(on_update_stmt) - assert result # nosec - except ForeignKeyViolation: - logger.warning( - "The service %s:%s is missing from services_meta_data", - rights.key, - rights.version, - ) - - async def delete_service_access_rights( - self, delete_access_rights: list[ServiceAccessRightsAtDB] - ) -> None: - async with self.db_engine.begin() as conn: - for rights in delete_access_rights: - await conn.execute( - # pylint: disable=no-value-for-parameter - services_access_rights.delete().where( - (services_access_rights.c.key == rights.key) - & (services_access_rights.c.version == rights.version) - & (services_access_rights.c.gid == rights.gid) - & (services_access_rights.c.product_name == rights.product_name) - ) - ) - - async def get_service_specifications( - self, - key: ServiceKey, - version: ServiceVersion, - groups: tuple[GroupAtDB, ...], - allow_use_latest_service_version: bool = False, - ) -> Optional[ServiceSpecifications]: - """returns the service specifications for service 'key:version' and for 'groups' - returns None if nothing found - - :param allow_use_latest_service_version: if True, then the latest version of the specs will be returned, defaults to False - """ - logger.debug( - "getting specifications from db for %s", f"{key}:{version} for {groups=}" - ) - gid_to_group_map = {group.gid: group for group in groups} - - everyone_specs = None - primary_specs = None - teams_specs: dict[GroupID, ServiceSpecificationsAtDB] = {} - - queried_version = packaging.version.parse(version) - # we should instead use semver enabled postgres [https://pgxn.org/dist/semver/doc/semver.html] - async with self.db_engine.connect() as conn: - async for row in await conn.stream( - sa.select([services_specifications]).where( - (services_specifications.c.service_key == key) - & ( - (services_specifications.c.service_version == version) - if not allow_use_latest_service_version - else True - ) - & (services_specifications.c.gid.in_(group.gid for group in groups)) - ), - ): - try: - logger.debug("found following %s", f"{row=}") - # validate the specs first - db_service_spec = ServiceSpecificationsAtDB.from_orm(row) - db_spec_version = packaging.version.parse( - db_service_spec.service_version - ) - if allow_use_latest_service_version and ( - db_spec_version > queried_version - ): - # NOTE: in this case we look for the latest version only (e.g <=queried_version) - # and we skip them if they are above - continue - # filter by group type - group = gid_to_group_map[row.gid] - if (group.group_type == GroupType.STANDARD) and _is_newer( - teams_specs.get(db_service_spec.gid), - db_service_spec, - ): - teams_specs[db_service_spec.gid] = db_service_spec - elif (group.group_type == GroupType.EVERYONE) and _is_newer( - everyone_specs, db_service_spec - ): - everyone_specs = db_service_spec - elif (group.group_type == GroupType.PRIMARY) and _is_newer( - primary_specs, db_service_spec - ): - primary_specs = db_service_spec - - except ValidationError as exc: - logger.warning( - "skipping service specifications for group '%s' as invalid: %s", - f"{row.gid}", - f"{exc}", - ) - - if merged_specifications := _merge_specs( - everyone_specs, teams_specs, primary_specs - ): - return ServiceSpecifications.parse_obj(merged_specifications) - return None # mypy - - async def update_latest_versions_cache(self): - # Select query for latest - latest_select_subquery = sa.select( - services_meta_data.c.key, - sa.text( - "array_to_string(MAX(string_to_array(version, '.')::int[]), '.') AS version" - ), - ).group_by(services_meta_data.c.key) - - insert_query = pg_insert(services_latest).from_select( - [services_latest.c.key, services_latest.c.version], - latest_select_subquery, - ) - upsert_query = insert_query.on_conflict_do_update( - index_elements=[ - services_latest.c.key, - ], - set_=dict(version=insert_query.excluded.version), - ) - - async with self.db_engine.begin() as conn: - result = await conn.execute(upsert_query) - - assert result # nosec - - -def _is_newer( - old: Optional[ServiceSpecificationsAtDB], - new: ServiceSpecificationsAtDB, -): - return old is None or ( - packaging.version.parse(old.service_version) - < packaging.version.parse(new.service_version) - ) - - -def _merge_specs( - everyone_spec: Optional[ServiceSpecificationsAtDB], - team_specs: dict[GroupID, ServiceSpecificationsAtDB], - user_spec: Optional[ServiceSpecificationsAtDB], -) -> dict[str, Any]: - merged_spec = {} - for spec in chain([everyone_spec], team_specs.values(), [user_spec]): - if spec is not None: - merged_spec.update(spec.dict(include={"sidecar", "service"})) - return merged_spec diff --git a/services/catalog/src/simcore_service_catalog/db/tables.py b/services/catalog/src/simcore_service_catalog/db/tables.py deleted file mode 100644 index 27735fb203e..00000000000 --- a/services/catalog/src/simcore_service_catalog/db/tables.py +++ /dev/null @@ -1,24 +0,0 @@ -from simcore_postgres_database.models.direct_acyclic_graphs import dags -from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups -from simcore_postgres_database.models.projects import ProjectType, projects -from simcore_postgres_database.models.services import ( - services_access_rights, - services_meta_data, -) -from simcore_postgres_database.models.services_specifications import ( - services_specifications, -) -from simcore_postgres_database.models.users import users - -__all__ = ( - "dags", - "services_meta_data", - "services_access_rights", - "services_specifications", - "users", - "user_to_groups", - "groups", - "GroupType", - "projects", - "ProjectType", -) diff --git a/services/catalog/src/simcore_service_catalog/errors.py b/services/catalog/src/simcore_service_catalog/errors.py new file mode 100644 index 00000000000..7e33eb08d0d --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/errors.py @@ -0,0 +1,22 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class CatalogBaseError(OsparcErrorMixin, Exception): ... + + +class RepositoryError(CatalogBaseError): + msg_template = "Unexpected error in {repo_cls}" + + +class UninitializedGroupError(RepositoryError): + msg_tempalte = "{group} groups was never initialized" + + +class BaseDirectorError(CatalogBaseError): ... + + +class DirectorUnresponsiveError(BaseDirectorError): + msg_template = "Director-v0 is not responsive" + + +class DirectorStatusError(BaseDirectorError): ... diff --git a/services/catalog/src/simcore_service_catalog/main.py b/services/catalog/src/simcore_service_catalog/main.py index f775b602a5a..52bd949a542 100644 --- a/services/catalog/src/simcore_service_catalog/main.py +++ b/services/catalog/src/simcore_service_catalog/main.py @@ -1,7 +1,24 @@ """Main application to be deployed in for example uvicorn. """ + +import logging + from fastapi import FastAPI -from simcore_service_catalog.core.application import init_app +from servicelib.logging_utils import config_all_loggers +from simcore_service_catalog.core.application import create_app +from simcore_service_catalog.core.settings import ApplicationSettings + +_the_settings = ApplicationSettings.create_from_envs() + +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=_the_settings.log_level) # NOSONAR +logging.root.setLevel(_the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=_the_settings.CATALOG_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=_the_settings.CATALOG_LOG_FILTER_MAPPING, + tracing_settings=_the_settings.CATALOG_TRACING, +) + # SINGLETON FastAPI app -the_app: FastAPI = init_app() +the_app: FastAPI = create_app() diff --git a/services/catalog/src/simcore_service_catalog/meta.py b/services/catalog/src/simcore_service_catalog/meta.py deleted file mode 100644 index 8b9b74e3e67..00000000000 --- a/services/catalog/src/simcore_service_catalog/meta.py +++ /dev/null @@ -1,22 +0,0 @@ -""" Package Metadata - -""" -import pkg_resources - -_current_distribution = pkg_resources.get_distribution("simcore_service_catalog") - -PROJECT_NAME: str = _current_distribution.project_name - -API_VERSION: str = _current_distribution.version -MAJOR, MINOR, PATCH = _current_distribution.version.split(".") -API_VTAG: str = f"v{MAJOR}" - -__version__ = _current_distribution.version - - -try: - metadata = _current_distribution.get_metadata_lines("METADATA") -except FileNotFoundError: - metadata = _current_distribution.get_metadata_lines("PKG-INFO") - -SUMMARY: str = next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] diff --git a/services/catalog/src/simcore_service_catalog/models/__init__.py b/services/catalog/src/simcore_service_catalog/models/__init__.py index e69de29bb2d..d699149a31c 100644 --- a/services/catalog/src/simcore_service_catalog/models/__init__.py +++ b/services/catalog/src/simcore_service_catalog/models/__init__.py @@ -0,0 +1,4 @@ +""" +This packages is intended for domain (i.e. internal) models +Any common/base and API (schema) models go in models_library or models_library.api_schemas_catalog +""" diff --git a/services/catalog/src/simcore_service_catalog/models/domain/dag.py b/services/catalog/src/simcore_service_catalog/models/domain/dag.py deleted file mode 100644 index 19dffa0e675..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/domain/dag.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Optional - -from models_library.basic_regex import VERSION_RE -from models_library.emails import LowerCaseEmailStr -from models_library.projects_nodes import Node -from models_library.services import SERVICE_KEY_RE -from pydantic import BaseModel, Field, Json - - -class DAGBase(BaseModel): - key: str = Field( - ..., - regex=SERVICE_KEY_RE, - example="simcore/services/frontend/nodes-group/macros/1", - ) - version: str = Field(..., regex=VERSION_RE, example="1.0.0") - name: str - description: Optional[str] - contact: Optional[LowerCaseEmailStr] - - -class DAGAtDB(DAGBase): - id: int - workbench: Json[dict[str, Node]] # pylint: disable=unsubscriptable-object - - class Config: - orm_mode = True - - -class DAGData(DAGAtDB): - workbench: Optional[dict[str, Node]] diff --git a/services/catalog/src/simcore_service_catalog/models/domain/group.py b/services/catalog/src/simcore_service_catalog/models/domain/group.py deleted file mode 100644 index b8eb35b34c0..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/domain/group.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Optional - -from pydantic import BaseModel, Field -from pydantic.types import PositiveInt - -from ...db.tables import GroupType - - -class Group(BaseModel): - gid: PositiveInt - name: str - description: str - group_type: GroupType = Field(..., alias="type") - thumbnail: Optional[str] - - -class GroupAtDB(Group): - class Config: - orm_mode = True - - schema_extra = { - "example": { - "gid": 218, - "name": "Friends group", - "description": "Joey, Ross, Rachel, Monica, Phoeby and Chandler", - "type": GroupType.STANDARD, - "thumbnail": "https://image.flaticon.com/icons/png/512/23/23374.png", - } - } diff --git a/services/catalog/src/simcore_service_catalog/models/domain/service_specifications.py b/services/catalog/src/simcore_service_catalog/models/domain/service_specifications.py deleted file mode 100644 index d0966f0d0aa..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/domain/service_specifications.py +++ /dev/null @@ -1,13 +0,0 @@ -from models_library.services import ServiceKey, ServiceVersion -from models_library.users import GroupID - -from ..schemas.services_specifications import ServiceSpecifications - - -class ServiceSpecificationsAtDB(ServiceSpecifications): - service_key: ServiceKey - service_version: ServiceVersion - gid: GroupID - - class Config(ServiceSpecifications.Config): - orm_mode: bool = True diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/constants.py b/services/catalog/src/simcore_service_catalog/models/schemas/constants.py deleted file mode 100644 index 0f3a8f3aff8..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/constants.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Final - -# These are equivalent to pydantic export models but for responses -# SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeldict -# SEE https://fastapi.tiangolo.com/tutorial/response-model/#use-the-response_model_exclude_unset-parameter -RESPONSE_MODEL_POLICY: Final[dict[str, bool]] = { - "response_model_by_alias": True, - "response_model_exclude_unset": True, - "response_model_exclude_defaults": False, - "response_model_exclude_none": False, -} - -SECOND: Final[int] = 1 -MINUTE: Final[int] = 60 * SECOND -DIRECTOR_CACHING_TTL: Final[int] = 5 * MINUTE -LIST_SERVICES_CACHING_TTL: Final[int] = 30 * SECOND - -SIMCORE_SERVICE_SETTINGS_LABELS: Final[str] = "simcore.service.settings" diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/dag.py b/services/catalog/src/simcore_service_catalog/models/schemas/dag.py deleted file mode 100644 index 1ebc66db648..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/dag.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Dict, Optional - -from models_library.projects_nodes import Node - -from ..domain.dag import DAGBase, DAGData - - -class DAGIn(DAGBase): - workbench: Optional[Dict[str, Node]] - - -class DAGInPath(DAGBase): - version: str - name: str - description: Optional[str] - contact: Optional[str] - workbench: Optional[Dict[str, Node]] - - -class DAGOut(DAGData): - pass diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/meta.py b/services/catalog/src/simcore_service_catalog/models/schemas/meta.py deleted file mode 100644 index 3cb7da165d4..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/meta.py +++ /dev/null @@ -1,31 +0,0 @@ -import re -from typing import Optional - -from pydantic import BaseModel, ConstrainedStr, Field - -# TODO: review this RE -# use https://www.python.org/dev/peps/pep-0440/#version-scheme -# or https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions -# -VERSION_RE = r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z]+)*)?$" - - -class VersionStr(ConstrainedStr): - regex = re.compile(VERSION_RE) - - -class Meta(BaseModel): - name: str - version: VersionStr - released: Optional[dict[str, VersionStr]] = Field( - None, description="Maps every route's path tag with a released version" - ) - - class Config: - schema_extra = { - "example": { - "name": "simcore_service_foo", - "version": "2.4.45", - "released": {"v1": "1.3.4", "v2": "2.4.45"}, - } - } diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/services.py b/services/catalog/src/simcore_service_catalog/models/schemas/services.py deleted file mode 100644 index d7bef5f214f..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/services.py +++ /dev/null @@ -1,140 +0,0 @@ -from typing import Optional - -from models_library.emails import LowerCaseEmailStr -from models_library.services import ServiceDockerData, ServiceMetaData -from models_library.services_access import ServiceAccessRights -from models_library.services_resources import ServiceResourcesDict -from pydantic import Extra -from pydantic.main import BaseModel - - -# OpenAPI models (contain both service metadata and access rights) -class ServiceUpdate(ServiceMetaData, ServiceAccessRights): - class Config: - schema_extra = { - "example": { - # ServiceAccessRights - "accessRights": { - 1: { - "execute_access": False, - "write_access": False, - }, - 2: { - "execute_access": True, - "write_access": True, - }, - 44: { - "execute_access": False, - "write_access": False, - }, - }, - # ServiceMetaData = ServiceCommonData + - "name": "My Human Readable Service Name", - "thumbnail": None, - "description": "An interesting service that does something", - "classifiers": ["RRID:SCR_018997", "RRID:SCR_019001"], - "quality": { - "tsr": { - "r01": {"level": 3, "references": ""}, - "r02": {"level": 2, "references": ""}, - "r03": {"level": 0, "references": ""}, - "r04": {"level": 0, "references": ""}, - "r05": {"level": 2, "references": ""}, - "r06": {"level": 0, "references": ""}, - "r07": {"level": 0, "references": ""}, - "r08": {"level": 1, "references": ""}, - "r09": {"level": 0, "references": ""}, - "r10": {"level": 0, "references": ""}, - }, - "enabled": True, - "annotations": { - "vandv": "", - "purpose": "", - "standards": "", - "limitations": "", - "documentation": "", - "certificationLink": "", - "certificationStatus": "Uncertified", - }, - }, - } - } - - -class ServiceGet( - ServiceDockerData, ServiceAccessRights, ServiceMetaData -): # pylint: disable=too-many-ancestors - owner: Optional[LowerCaseEmailStr] - - class Config: - allow_population_by_field_name = True - extra = Extra.ignore - schema_extra = { - "example": { - "name": "File Picker", - "thumbnail": None, - "description": "File Picker", - "classifiers": [], - "quality": {}, - "accessRights": { - "1": {"execute_access": True, "write_access": False}, - "4": {"execute_access": True, "write_access": True}, - }, - "key": "simcore/services/frontend/file-picker", - "version": "1.0.0", - "integration-version": None, - "type": "dynamic", - "badges": None, - "authors": [ - { - "name": "Red Pandas", - "email": "redpandas@wonderland.com", - "affiliation": None, - } - ], - "contact": "redpandas@wonderland.com", - "inputs": {}, - "outputs": { - "outFile": { - "displayOrder": 0, - "label": "File", - "description": "Chosen File", - "type": "data:*/*", - "fileToKeyMap": None, - "widget": None, - } - }, - "owner": "redpandas@wonderland.com", - } - } - - -# TODO: prototype for next iteration -# Items are non-detailed version of resources listed -class ServiceItem(BaseModel): - class Config: - extra = Extra.ignore - schema_extra = { - "example": { - "title": "File Picker", # NEW: rename 'name' as title (so it is not confused with an identifier!) - "thumbnail": None, # optional - "description": "File Picker", - "classifiers_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/classifiers", - "quality": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/quality", - "access_rights_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/access_rights", - "key_id": "simcore/services/frontend/file-picker", # NEW: renames key -> key_id - "version": "1.0.0", - "id": "a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4", # NEW: alternative identifier to key_id:version - "integration-version": "1.0.0", - "type": "dynamic", - "badges_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/badges", - "authors_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/authors", - "inputs_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/inputs", - "outputs_url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4/outputs", - "owner": "maiz@itis.swiss", # NEW, replaces "contact": "maiz@itis.swiss" - "url": "https://catalog:8080/services/a8f5a503-01d5-40bc-b416-f5b7cc5d1fa4", # NEW self - } - } - - -ServiceResourcesGet = ServiceResourcesDict diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/services_ports.py b/services/catalog/src/simcore_service_catalog/models/schemas/services_ports.py deleted file mode 100644 index 1df64110e11..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/services_ports.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Any, Literal, Optional, Union - -from models_library.basic_regex import PUBLIC_VARIABLE_NAME_RE -from models_library.services import ServiceInput, ServiceOutput -from models_library.utils.services_io import ( - get_service_io_json_schema, - guess_media_type, - update_schema_doc, -) -from pydantic import BaseModel, Field - -PortKindStr = Literal["input", "output"] - - -# -# Model ------------------------------------------------------------------------------- -# - - -class ServicePortGet(BaseModel): - key: str = Field( - ..., - description="port identifier name", - regex=PUBLIC_VARIABLE_NAME_RE, - title="Key name", - ) - kind: PortKindStr - content_media_type: Optional[str] = None - content_schema: Optional[dict[str, Any]] = Field( - None, - description="jsonschema for the port's value. SEE https://json-schema.org/understanding-json-schema/", - ) - - class Config: - schema_extra = { - "example": { - "key": "input_1", - "kind": "input", - "content_schema": { - "title": "Sleep interval", - "type": "integer", - "x_unit": "second", - "minimum": 0, - "maximum": 5, - }, - } - } - - @classmethod - def from_service_io( - cls, - kind: PortKindStr, - key: str, - port: Union[ServiceInput, ServiceOutput], - ) -> "ServicePortGet": - kwargs: dict[str, Any] = {"key": key, "kind": kind} - - # Convert old format into schemas - schema = port.content_schema - if not schema: - schema = get_service_io_json_schema(port) - - # Deduce media_type - if port.property_type.startswith("data:"): - kwargs["content_media_type"] = guess_media_type(port) - # Based on https://swagger.io/docs/specification/describing-request-body/file-upload/ - schema = update_schema_doc({"type": "string"}, port) - - kwargs["content_schema"] = schema - return cls(**kwargs) diff --git a/services/catalog/src/simcore_service_catalog/models/schemas/services_specifications.py b/services/catalog/src/simcore_service_catalog/models/schemas/services_specifications.py deleted file mode 100644 index 000903e56d2..00000000000 --- a/services/catalog/src/simcore_service_catalog/models/schemas/services_specifications.py +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Optional - -from models_library.generated_models.docker_rest_api import ( - ServiceSpec as DockerServiceSpec, -) -from pydantic import BaseModel, Field - - -class ServiceSpecifications(BaseModel): - sidecar: Optional[DockerServiceSpec] = Field( - default=None, - description="schedule-time specifications for the service sidecar (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.25/#operation/ServiceCreate)", - ) - service: Optional[DockerServiceSpec] = Field( - default=None, - description="schedule-time specifications specifications for the service (follows Docker Service creation API (specifically only the Resources part), see https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate", - ) - - class Config: - pass - - -class ServiceSpecificationsGet(ServiceSpecifications): - ... diff --git a/services/catalog/src/simcore_service_catalog/models/services_db.py b/services/catalog/src/simcore_service_catalog/models/services_db.py new file mode 100644 index 00000000000..3c76eb60a21 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/models/services_db.py @@ -0,0 +1,276 @@ +from datetime import datetime +from typing import Annotated, Any + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import IdInt +from models_library.groups import GroupID +from models_library.products import ProductName +from models_library.rest_filters import Filters +from models_library.services_access import ServiceGroupAccessRights +from models_library.services_base import ServiceKeyVersion +from models_library.services_enums import ServiceType +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.utils.common_validators import empty_str_to_none_pre_validator +from pydantic import ( + BaseModel, + BeforeValidator, + ConfigDict, + Field, + HttpUrl, + field_validator, +) +from pydantic.config import JsonDict +from simcore_postgres_database.models.services_compatibility import CompatiblePolicyDict + + +class ServiceMetaDataDBGet(BaseModel): + # primary-keys + key: ServiceKey + version: ServiceVersion + + # ownership + owner: GroupID | None + + # display + name: str + description: str + description_ui: bool + thumbnail: str | None + icon: str | None + version_display: str | None + + # tagging + classifiers: list[str] + quality: dict[str, Any] + + # lifecycle + created: datetime + modified: datetime + deprecated: datetime | None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "key": "simcore/services/dynamic/reading", + "version": "1.0.9", + "owner": 8, + "name": "reading", + "description": "example for service metadata db GET", + "description_ui": False, + "thumbnail": None, + "icon": "https://picsum.photos/50", + "version_display": "S4L X", + "classifiers": ["foo", "bar"], + "quality": { + "enabled": True, + "tsr_target": { + f"r{n:02d}": {"level": 4, "references": ""} + for n in range(1, 11) + }, + "annotations": { + "vandv": "", + "limitations": "", + "certificationLink": "", + "certificationStatus": "Uncertified", + }, + "tsr_current": { + f"r{n:02d}": {"level": 0, "references": ""} + for n in range(1, 11) + }, + }, + "created": "2021-01-18 12:46:57.7315", + "modified": "2021-01-19 12:45:00", + "deprecated": "2099-01-19 12:45:00", + } + } + ) + + model_config = ConfigDict( + from_attributes=True, json_schema_extra=_update_json_schema_extra + ) + + +def _httpurl_to_str(value: HttpUrl | str | None) -> str | None: + if isinstance(value, HttpUrl): + return f"{value}" + return value + + +class ServiceMetaDataDBCreate(BaseModel): + # primary-keys + key: ServiceKey + version: ServiceVersion + + # ownership + owner: IdInt | None = None + + # display + name: str + description: str + description_ui: bool = False + thumbnail: str | None = None + icon: Annotated[str | None, BeforeValidator(_httpurl_to_str)] = None + version_display: str | None = None + + # tagging + classifiers: Annotated[list[str], Field(default_factory=list)] = DEFAULT_FACTORY + quality: Annotated[dict[str, Any], Field(default_factory=dict)] = DEFAULT_FACTORY + + # lifecycle + deprecated: datetime | None = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "examples": [ + # minimal w/ required values + { + "key": "simcore/services/dynamic/creating", + "version": "1.0.9", + "name": "creating", + "description": "example for service metadata db CREATE", + } + ] + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + _prevent_empty_strings_in_nullable_string_cols = field_validator( + "icon", "thumbnail", "version_display", mode="before" + )(empty_str_to_none_pre_validator) + + +class ServiceMetaDataDBPatch(BaseModel): + # ownership + owner: IdInt | None = None + + # display + name: str | None = None + description: str | None = None + description_ui: bool = False + version_display: str | None = None + thumbnail: str | None = None + icon: str | None = None + + # tagging + classifiers: Annotated[list[str], Field(default_factory=list)] = DEFAULT_FACTORY + quality: Annotated[dict[str, Any], Field(default_factory=dict)] = DEFAULT_FACTORY + + # lifecycle + deprecated: datetime | None = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "name": "patching", + "description": "example for service metadata db PATCH", + "thumbnail": "https://picsum.photos/200", + "icon": "https://picsum.photos/50", + "version_display": "S4L X", + } + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) + + _prevent_empty_strings_in_nullable_string_cols = field_validator( + "icon", "thumbnail", "version_display", mode="before" + )(empty_str_to_none_pre_validator) + + +class ReleaseDBGet(BaseModel): + version: ServiceVersion + version_display: str | None + deprecated: datetime | None + created: datetime + compatibility_policy: CompatiblePolicyDict | None + + +class ServiceWithHistoryDBGet(BaseModel): + key: ServiceKey + version: ServiceVersion + # display + name: str + description: str + description_ui: bool + thumbnail: str | None + icon: str | None + version_display: str | None + # ownership + owner_email: str | None + # tags + classifiers: list[str] + quality: dict[str, Any] + # lifetime + created: datetime + modified: datetime + deprecated: datetime | None + # releases + history: list[ReleaseDBGet] + + +assert ( # nosec + set(ReleaseDBGet.model_fields) + .difference({"compatibility_policy"}) + .issubset(set(ServiceWithHistoryDBGet.model_fields)) +) + + +class ServiceAccessRightsDB(ServiceKeyVersion, ServiceGroupAccessRights): + gid: GroupID + product_name: ProductName + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "key": "simcore/services/dynamic/sim4life", + "version": "1.0.9", + "gid": 8, + "execute_access": True, + "write_access": True, + "product_name": "osparc", + "created": "2021-01-18 12:46:57.7315", + "modified": "2021-01-19 12:45:00", + } + } + ) + + model_config = ConfigDict( + from_attributes=True, json_schema_extra=_update_json_schema_extra + ) + + +class ServiceDBFilters(Filters): + service_type: ServiceType | None = None + service_key_pattern: str | None = None + version_display_pattern: str | None = None + + @staticmethod + def _update_json_schema_extra(schema: JsonDict) -> None: + schema.update( + { + "example": { + "service_type": "computational", + }, + "examples": [ + { + "service_key_pattern": "simcore/services/dynamic/*", + "version_display_pattern": "S4L X", + }, + { + "service_type": "computational", + "version_display_pattern": "S4L X", + }, + ], + } + ) + + model_config = ConfigDict(json_schema_extra=_update_json_schema_extra) diff --git a/services/catalog/src/simcore_service_catalog/models/services_ports.py b/services/catalog/src/simcore_service_catalog/models/services_ports.py new file mode 100644 index 00000000000..24e26749dc4 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/models/services_ports.py @@ -0,0 +1,15 @@ +from typing import Annotated, Literal + +from models_library.services_io import ServiceInput, ServiceOutput +from pydantic import BaseModel, Field + + +class ServicePort(BaseModel): + kind: Annotated[ + Literal["input", "output"], + Field(description="Whether this is an input or output port"), + ] + key: Annotated[ + str, Field(description="The unique identifier for this port within the service") + ] + port: ServiceInput | ServiceOutput diff --git a/services/catalog/src/simcore_service_catalog/models/services_specifications.py b/services/catalog/src/simcore_service_catalog/models/services_specifications.py new file mode 100644 index 00000000000..fc03805537f --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/models/services_specifications.py @@ -0,0 +1,14 @@ +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.groups import GroupID +from models_library.services import ServiceKey, ServiceVersion +from pydantic import ConfigDict + + +class ServiceSpecificationsAtDB(ServiceSpecifications): + service_key: ServiceKey + service_version: ServiceVersion + gid: GroupID + + model_config = ConfigDict(from_attributes=True) diff --git a/services/api-server/src/simcore_service_api_server/db/repositories/__init__.py b/services/catalog/src/simcore_service_catalog/repository/__init__.py similarity index 100% rename from services/api-server/src/simcore_service_api_server/db/repositories/__init__.py rename to services/catalog/src/simcore_service_catalog/repository/__init__.py diff --git a/services/catalog/src/simcore_service_catalog/repository/_base.py b/services/catalog/src/simcore_service_catalog/repository/_base.py new file mode 100644 index 00000000000..4a20b37c735 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/_base.py @@ -0,0 +1,12 @@ +from dataclasses import dataclass + +from sqlalchemy.ext.asyncio import AsyncEngine + + +@dataclass +class BaseRepository: + """ + Repositories are pulled at every request + """ + + db_engine: AsyncEngine diff --git a/services/catalog/src/simcore_service_catalog/repository/_services_sql.py b/services/catalog/src/simcore_service_catalog/repository/_services_sql.py new file mode 100644 index 00000000000..ca05641a592 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/_services_sql.py @@ -0,0 +1,514 @@ +from typing import Any + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.services_regex import ( + SERVICE_TYPE_TO_PREFIX_MAP, +) +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from simcore_postgres_database.models.groups import user_to_groups +from simcore_postgres_database.models.services import ( + services_access_rights, + services_meta_data, +) +from simcore_postgres_database.models.services_compatibility import ( + services_compatibility, +) +from simcore_postgres_database.models.users import users +from simcore_postgres_database.utils_repos import get_columns_from_db_model +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER, array_agg +from sqlalchemy.sql import and_, or_ +from sqlalchemy.sql.expression import func +from sqlalchemy.sql.selectable import Select + +from ..models.services_db import ServiceDBFilters, ServiceMetaDataDBGet + +SERVICES_META_DATA_COLS = get_columns_from_db_model( + services_meta_data, ServiceMetaDataDBGet +) + + +def list_services_stmt( + *, + gids: list[int] | None = None, + execute_access: bool | None = None, + write_access: bool | None = None, + combine_access_with_and: bool | None = True, + product_name: str | None = None, +) -> Select: + stmt = sa.select(*SERVICES_META_DATA_COLS) + if gids or execute_access or write_access: + conditions: list[Any] = [] + + # access rights + logic_operator = and_ if combine_access_with_and else or_ + default = bool(combine_access_with_and) + + access_query_part = logic_operator( # type: ignore[type-var] + services_access_rights.c.execute_access if execute_access else default, + services_access_rights.c.write_access if write_access else default, + ) + conditions.append(access_query_part) + + # on groups + if gids: + conditions.append( + or_(*[services_access_rights.c.gid == gid for gid in gids]) + ) + + # and product name + if product_name: + conditions.append(services_access_rights.c.product_name == product_name) + + stmt = stmt.distinct( + services_meta_data.c.key, services_meta_data.c.version + ).select_from(services_meta_data.join(services_access_rights)) + if conditions: + stmt = stmt.where(and_(*conditions)) + stmt = stmt.order_by(services_meta_data.c.key, services_meta_data.c.version) + + return stmt + + +def by_version(column_or_value): + # converts version value string to array[integer] that can be compared + # i.e. '1.2.3' -> [1, 2, 3] + return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) + + +class AccessRightsClauses: + can_execute = services_access_rights.c.execute_access + can_read = ( + services_access_rights.c.execute_access | services_access_rights.c.write_access + ) + can_edit = services_access_rights.c.write_access + is_owner = ( + services_access_rights.c.execute_access & services_access_rights.c.write_access + ) + + +def _join_services_with_access_rights(): + # services_meta_data | services_access_rights | user_to_groups + return services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + + +def _has_access_rights( + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + service_key: ServiceKey, + service_version: ServiceVersion, +): + return ( + (services_meta_data.c.key == service_key) + & (services_meta_data.c.version == service_version) + & (user_to_groups.c.uid == user_id) + & (services_access_rights.c.product_name == product_name) + & access_rights + ) + + +def apply_services_filters( + stmt: sa.sql.Select, + filters: ServiceDBFilters, +) -> sa.sql.Select: + conditions = [] + + if filters.service_type: + prefix = SERVICE_TYPE_TO_PREFIX_MAP.get(filters.service_type) + if prefix is None: + msg = f"Undefined service type {filters.service_type}. Please update prefix expressions" + raise ValueError(msg) + + assert not prefix.endswith("/") # nosec + conditions.append(services_meta_data.c.key.like(f"{prefix}/%")) + + if filters.service_key_pattern: + # Convert glob pattern to SQL LIKE pattern + sql_pattern = filters.service_key_pattern.replace("*", "%") + conditions.append(services_meta_data.c.key.like(sql_pattern)) + + if filters.version_display_pattern: + # Convert glob pattern to SQL LIKE pattern and handle NULL values + sql_pattern = filters.version_display_pattern.replace("*", "%") + version_display_condition = services_meta_data.c.version_display.like( + sql_pattern + ) + + if sql_pattern == "%": + conditions.append( + sa.or_( + version_display_condition, + # If pattern==*, also match NULL when rest is empty + services_meta_data.c.version_display.is_(None), + ) + ) + else: + conditions.append(version_display_condition) + + if conditions: + stmt = stmt.where(sa.and_(*conditions)) + + return stmt + + +def latest_services_total_count_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + filters: ServiceDBFilters | None = None, +): + stmt = ( + sa.select(func.count(sa.distinct(services_meta_data.c.key))) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version) + & (services_access_rights.c.product_name == product_name), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid) + & (user_to_groups.c.uid == user_id), + ) + ) + .where(access_rights) + ) + + if filters: + stmt = apply_services_filters(stmt, filters) + + return stmt + + +def list_latest_services_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + limit: int | None, + offset: int | None, + filters: ServiceDBFilters | None = None, +): + # get all distinct services key fitting a page + # and its corresponding latest version + cte_stmt = ( + sa.select( + services_meta_data.c.key, + services_meta_data.c.version.label("latest_version"), + ) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version) + & (services_access_rights.c.product_name == product_name), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid) + & (user_to_groups.c.uid == user_id), + ) + ) + .where(access_rights) + .order_by( + services_meta_data.c.key, + sa.desc(by_version(services_meta_data.c.version)), # latest first + ) + .distinct(services_meta_data.c.key) # get only first + .limit(limit) + .offset(offset) + ) + + if filters: + cte_stmt = apply_services_filters(cte_stmt, filters) + + cte = cte_stmt.cte("cte") + + # get all information of latest's services listed in CTE + latest_stmt = ( + sa.select( + services_meta_data.c.key, + services_meta_data.c.version, + users.c.email.label("owner_email"), + services_meta_data.c.name, + services_meta_data.c.description, + services_meta_data.c.description_ui, + services_meta_data.c.thumbnail, + services_meta_data.c.icon, + services_meta_data.c.version_display, + services_meta_data.c.classifiers, + services_meta_data.c.created, + services_meta_data.c.modified, + services_meta_data.c.deprecated, + services_meta_data.c.quality, + ) + .join( + cte, + (services_meta_data.c.key == cte.c.key) + & (services_meta_data.c.version == cte.c.latest_version), + ) + # NOTE: owner can be NULL + .join( + user_to_groups, + services_meta_data.c.owner == user_to_groups.c.gid, + isouter=True, + ) + .join(users, user_to_groups.c.uid == users.c.id, isouter=True) + .subquery("latest_sq") + ) + + return sa.select( + latest_stmt.c.key, + latest_stmt.c.version, + # display + latest_stmt.c.name, + latest_stmt.c.description, + latest_stmt.c.description_ui, + latest_stmt.c.thumbnail, + latest_stmt.c.icon, + latest_stmt.c.version_display, + # ownership + latest_stmt.c.owner_email, + # tags + latest_stmt.c.classifiers, + latest_stmt.c.quality, + # lifetime + latest_stmt.c.created, + latest_stmt.c.modified, + latest_stmt.c.deprecated, + ).order_by(latest_stmt.c.key) + + +def can_get_service_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + service_key: ServiceKey, + service_version: ServiceVersion, +): + subquery = ( + sa.select(1) + .select_from(_join_services_with_access_rights()) + .where( + _has_access_rights( + product_name=product_name, + user_id=user_id, + access_rights=access_rights, + service_key=service_key, + service_version=service_version, + ) + ) + .limit(1) + ) + + return sa.select(sa.exists(subquery)) + + +def get_service_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + service_key: ServiceKey, + service_version: ServiceVersion, +): + owner_subquery = ( + sa.select(users.c.email) + .select_from(user_to_groups.join(users, user_to_groups.c.uid == users.c.id)) + .where(user_to_groups.c.gid == services_meta_data.c.owner) + .limit(1) + .scalar_subquery() + ) + + return ( + sa.select( + services_meta_data.c.key, + services_meta_data.c.version, + # display + services_meta_data.c.name, + services_meta_data.c.description, + services_meta_data.c.description_ui, + services_meta_data.c.thumbnail, + services_meta_data.c.icon, + services_meta_data.c.version_display, + # ownership + owner_subquery.label("owner_email"), + # tags + services_meta_data.c.classifiers, + services_meta_data.c.quality, + # lifetime + services_meta_data.c.created, + services_meta_data.c.modified, + services_meta_data.c.deprecated, + # w/o releases history! + ) + .select_from(_join_services_with_access_rights()) + .where( + _has_access_rights( + product_name=product_name, + user_id=user_id, + access_rights=access_rights, + service_key=service_key, + service_version=service_version, + ) + ) + .limit(1) + ) + + +def get_service_history_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: sa.sql.ClauseElement, + service_key: ServiceKey, +): + _sq = ( + sa.select( + services_meta_data.c.key, + services_meta_data.c.version, + services_meta_data.c.version_display, + services_meta_data.c.deprecated, + services_meta_data.c.created, + services_compatibility.c.custom_policy, # CompatiblePolicyDict | None + ) + .select_from( + # joins because access-rights might change per version + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version), + ) + .join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + .outerjoin( + services_compatibility, + (services_meta_data.c.key == services_compatibility.c.key) + & (services_meta_data.c.version == services_compatibility.c.version), + ) + ) + .where( + (services_meta_data.c.key == service_key) + & (services_access_rights.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + & access_rights + ) + .distinct() + ).subquery() + + history_subquery = ( + sa.select(_sq) + .order_by( + sa.desc(by_version(_sq.c.version)), # latest version first + ) + .alias("history_subquery") + ) + + return ( + sa.select( + array_agg( + func.json_build_object( + "version", + history_subquery.c.version, + "version_display", + history_subquery.c.version_display, + "deprecated", + history_subquery.c.deprecated, + "created", + history_subquery.c.created, + "compatibility_policy", # NOTE: this is the `policy` + history_subquery.c.custom_policy, + ) + ).label("history"), + ) + .select_from(history_subquery) + .group_by(history_subquery.c.key) + ) + + +def all_services_total_count_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: AccessRightsClauses, + filters: ServiceDBFilters | None = None, +) -> sa.sql.Select: + """Statement to count all services""" + stmt = ( + sa.select(sa.func.count()) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + ) + .where( + (services_access_rights.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + & access_rights + ) + ) + + if filters: + stmt = apply_services_filters(stmt, filters) + + return stmt + + +def list_all_services_stmt( + *, + product_name: ProductName, + user_id: UserID, + access_rights: AccessRightsClauses, + limit: int | None = None, + offset: int | None = None, + filters: ServiceDBFilters | None = None, +) -> sa.sql.Select: + """Statement to list all services with pagination""" + stmt = ( + sa.select(*SERVICES_META_DATA_COLS) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & (services_meta_data.c.version == services_access_rights.c.version), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + ) + .where( + (services_access_rights.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + & access_rights + ) + .order_by( + services_meta_data.c.key, sa.desc(by_version(services_meta_data.c.version)) + ) + ) + + if filters: + stmt = apply_services_filters(stmt, filters) + + if offset is not None: + stmt = stmt.offset(offset) + if limit is not None: + stmt = stmt.limit(limit) + + return stmt diff --git a/services/catalog/src/simcore_service_catalog/repository/events.py b/services/catalog/src/simcore_service_catalog/repository/events.py new file mode 100644 index 00000000000..af6e77f178b --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/events.py @@ -0,0 +1,28 @@ +import logging +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from servicelib.fastapi.postgres_lifespan import ( + PostgresLifespanState, + postgres_database_lifespan, +) + +from .products import ProductsRepository + +_logger = logging.getLogger(__name__) + + +async def _database_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + app.state.engine = state[PostgresLifespanState.POSTGRES_ASYNC_ENGINE] + + repo = ProductsRepository(db_engine=app.state.engine) + + app.state.default_product_name = await repo.get_default_product_name() + + yield {} + + +repository_lifespan_manager = LifespanManager() +repository_lifespan_manager.add(postgres_database_lifespan) +repository_lifespan_manager.add(_database_lifespan) diff --git a/services/catalog/src/simcore_service_catalog/repository/groups.py b/services/catalog/src/simcore_service_catalog/repository/groups.py new file mode 100644 index 00000000000..076ed0baf8b --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/groups.py @@ -0,0 +1,87 @@ +from typing import cast + +import sqlalchemy as sa +from models_library.emails import LowerCaseEmailStr +from models_library.groups import GroupAtDB +from pydantic import TypeAdapter +from pydantic.types import PositiveInt +from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups +from simcore_postgres_database.models.users import users + +from ..errors import UninitializedGroupError +from ._base import BaseRepository + + +class GroupsRepository(BaseRepository): + async def list_user_groups(self, user_id: int) -> list[GroupAtDB]: + async with self.db_engine.connect() as conn: + return [ + GroupAtDB.model_validate(row) + async for row in await conn.stream( + sa.select(groups) + .select_from( + user_to_groups.join( + groups, user_to_groups.c.gid == groups.c.gid + ), + ) + .where(user_to_groups.c.uid == user_id) + ) + ] + + async def get_everyone_group(self) -> GroupAtDB: + async with self.db_engine.connect() as conn: + result = await conn.execute( + sa.select(groups).where(groups.c.type == GroupType.EVERYONE) + ) + row = result.first() + if not row: + raise UninitializedGroupError( + group=GroupType.EVERYONE, repo_cls=GroupsRepository + ) + return GroupAtDB.model_validate(row) + + async def get_user_gid_from_email( + self, user_email: LowerCaseEmailStr + ) -> PositiveInt | None: + async with self.db_engine.connect() as conn: + return cast( + PositiveInt | None, + await conn.scalar( + sa.select(users.c.primary_gid).where(users.c.email == user_email) + ), + ) + + async def get_gid_from_affiliation(self, affiliation: str) -> PositiveInt | None: + async with self.db_engine.connect() as conn: + return cast( + PositiveInt | None, + await conn.scalar( + sa.select(groups.c.gid).where(groups.c.name == affiliation) + ), + ) + + async def get_user_email_from_gid( + self, gid: PositiveInt + ) -> LowerCaseEmailStr | None: + async with self.db_engine.connect() as conn: + email = await conn.scalar( + sa.select(users.c.email).where(users.c.primary_gid == gid) + ) + return email or None + + async def list_user_emails_from_gids( + self, gids: set[PositiveInt] + ) -> dict[PositiveInt, LowerCaseEmailStr | None]: + service_owners = {} + async with self.db_engine.connect() as conn: + async for row in await conn.stream( + sa.select([users.c.primary_gid, users.c.email]).where( + users.c.primary_gid.in_(gids) + ) + ): + service_owners[row[users.c.primary_gid]] = ( + TypeAdapter(LowerCaseEmailStr).validate_python(row[users.c.email]) + if row[users.c.email] + else None + ) + return service_owners diff --git a/services/catalog/src/simcore_service_catalog/repository/products.py b/services/catalog/src/simcore_service_catalog/repository/products.py new file mode 100644 index 00000000000..ea59f9dab05 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/products.py @@ -0,0 +1,10 @@ +from simcore_postgres_database.utils_products import get_default_product_name + +from ._base import BaseRepository + + +class ProductsRepository(BaseRepository): + async def get_default_product_name(self) -> str: + async with self.db_engine.connect() as conn: + product_name: str = await get_default_product_name(conn) + return product_name diff --git a/services/catalog/src/simcore_service_catalog/repository/projects.py b/services/catalog/src/simcore_service_catalog/repository/projects.py new file mode 100644 index 00000000000..48de3867a6c --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/projects.py @@ -0,0 +1,40 @@ +import logging + +import sqlalchemy as sa +from models_library.services import ServiceKeyVersion +from pydantic import ValidationError +from simcore_postgres_database.models.projects import ProjectType, projects + +from ._base import BaseRepository + +_logger = logging.getLogger(__name__) + + +class ProjectsRepository(BaseRepository): + async def list_services_from_published_templates(self) -> list[ServiceKeyVersion]: + list_of_published_services: list[ServiceKeyVersion] = [] + async with self.db_engine.connect() as conn: + async for row in await conn.stream( + sa.select(projects).where( + (projects.c.type == ProjectType.TEMPLATE) + & (projects.c.published.is_(True)) + ) + ): + project_workbench = row.workbench + for node in project_workbench: + service = project_workbench[node] + try: + if ( + "file-picker" in service["key"] + or "nodes-group" in service["key"] + ): + # these 2 are not going to pass the validation tests, they are frontend only nodes. + continue + list_of_published_services.append(ServiceKeyVersion(**service)) + except ValidationError: + _logger.warning( + "service %s could not be validated", service, exc_info=True + ) + continue + + return list_of_published_services diff --git a/services/catalog/src/simcore_service_catalog/repository/services.py b/services/catalog/src/simcore_service_catalog/repository/services.py new file mode 100644 index 00000000000..12e0d88a0fe --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/repository/services.py @@ -0,0 +1,810 @@ +import itertools +import logging +from collections import defaultdict +from collections.abc import Iterable +from typing import Any + +import packaging.version +import sqlalchemy as sa +from common_library.groups_enums import GroupType +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.groups import GroupAtDB, GroupID +from models_library.products import ProductName +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID +from psycopg2.errors import ForeignKeyViolation +from pydantic import PositiveInt, TypeAdapter, ValidationError +from simcore_postgres_database.models.groups import user_to_groups +from simcore_postgres_database.models.services import ( + services_access_rights, + services_meta_data, +) +from simcore_postgres_database.models.services_compatibility import ( + services_compatibility, +) +from simcore_postgres_database.models.services_specifications import ( + services_specifications, +) +from simcore_postgres_database.utils_repos import pass_or_acquire_connection +from simcore_postgres_database.utils_services import create_select_latest_services_query +from sqlalchemy import sql +from sqlalchemy.dialects.postgresql import insert as pg_insert + +from ..models.services_db import ( + ReleaseDBGet, + ServiceAccessRightsDB, + ServiceDBFilters, + ServiceMetaDataDBCreate, + ServiceMetaDataDBGet, + ServiceMetaDataDBPatch, + ServiceWithHistoryDBGet, +) +from ..models.services_specifications import ServiceSpecificationsAtDB +from . import _services_sql +from ._base import BaseRepository +from ._services_sql import ( + SERVICES_META_DATA_COLS, + AccessRightsClauses, +) + +_logger = logging.getLogger(__name__) + + +def _is_newer( + old: ServiceSpecificationsAtDB | None, + new: ServiceSpecificationsAtDB, +) -> bool: + return old is None or ( + packaging.version.parse(old.service_version) + < packaging.version.parse(new.service_version) + ) + + +def _merge_specs( + everyone_spec: ServiceSpecificationsAtDB | None, + team_specs: dict[GroupID, ServiceSpecificationsAtDB], + user_spec: ServiceSpecificationsAtDB | None, +) -> dict[str, Any]: + merged_spec = {} + for spec in itertools.chain([everyone_spec], team_specs.values(), [user_spec]): + if spec is not None: + merged_spec.update(spec.model_dump(include={"sidecar", "service"})) + return merged_spec + + +class ServicesRepository(BaseRepository): + """ + API that operates on services_access_rights and services_meta_data tables + """ + + async def list_services( + self, + *, + gids: list[int] | None = None, + execute_access: bool | None = None, + write_access: bool | None = None, + combine_access_with_and: bool | None = True, + product_name: str | None = None, + ) -> list[ServiceMetaDataDBGet]: + + async with self.db_engine.connect() as conn: + return [ + ServiceMetaDataDBGet.model_validate(row) + async for row in await conn.stream( + _services_sql.list_services_stmt( + gids=gids, + execute_access=execute_access, + write_access=write_access, + combine_access_with_and=combine_access_with_and, + product_name=product_name, + ) + ) + ] + + async def list_service_releases( + self, + key: str, + *, + major: int | None = None, + minor: int | None = None, + limit_count: int | None = None, + ) -> list[ServiceMetaDataDBGet]: + """Lists LAST n releases of a given service, sorted from latest first + + major, minor is used to filter as major.minor.* or major.* + limit_count limits returned value. None or non-positive values returns all matches + """ + if minor is not None and major is None: + msg = "Expected only major.*.* or major.minor.*" + raise ValueError(msg) + + search_condition = services_meta_data.c.key == key + if major is not None: + if minor is not None: + # All patches + search_condition &= services_meta_data.c.version.like( + f"{major}.{minor}.%" + ) + else: + # All minor and patches + search_condition &= services_meta_data.c.version.like(f"{major}.%") + + query = ( + sa.select(*SERVICES_META_DATA_COLS) + .where(search_condition) + .order_by(sa.desc(services_meta_data.c.version)) + ) + + if limit_count and limit_count > 0: + query = query.limit(limit_count) + + async with self.db_engine.connect() as conn: + releases = [ + ServiceMetaDataDBGet.model_validate(row) + async for row in await conn.stream(query) + ] + + # Now sort naturally from latest first: (This is lame, the sorting should be done in the db) + def _by_version(x: ServiceMetaDataDBGet) -> packaging.version.Version: + return packaging.version.parse(x.version) + + return sorted(releases, key=_by_version, reverse=True) + + async def get_latest_release(self, key: str) -> ServiceMetaDataDBGet | None: + """Returns last release or None if service was never released""" + services_latest = create_select_latest_services_query().alias("services_latest") + + query = ( + sa.select(*SERVICES_META_DATA_COLS) + .select_from( + services_latest.join( + services_meta_data, + (services_meta_data.c.key == services_latest.c.key) + & (services_meta_data.c.version == services_latest.c.latest), + ) + ) + .where(services_latest.c.key == key) + ) + async with self.db_engine.connect() as conn: + result = await conn.execute(query) + row = result.first() + if row: + return ServiceMetaDataDBGet.model_validate(row) + return None # mypy + + async def get_service( + self, + key: str, + version: str, + *, + gids: list[int] | None = None, + execute_access: bool | None = None, + write_access: bool | None = None, + product_name: str | None = None, + ) -> ServiceMetaDataDBGet | None: + + query = sa.select(*SERVICES_META_DATA_COLS) + + if gids or execute_access or write_access: + conditions = [ + services_meta_data.c.key == key, + services_meta_data.c.version == version, + ] + if gids: + conditions.append( + sql.or_(*[services_access_rights.c.gid == gid for gid in gids]) + ) + if execute_access is not None: + conditions.append(services_access_rights.c.execute_access) + if write_access is not None: + conditions.append(services_access_rights.c.write_access) + if product_name: + conditions.append(services_access_rights.c.product_name == product_name) + + query = query.select_from( + services_meta_data.join(services_access_rights) + ).where(sql.and_(*conditions)) + else: + query = query.where( + (services_meta_data.c.key == key) + & (services_meta_data.c.version == version) + ) + + async with self.db_engine.connect() as conn: + result = await conn.execute(query) + row = result.first() + if row: + return ServiceMetaDataDBGet.model_validate(row) + return None # mypy + + async def create_or_update_service( + self, + new_service: ServiceMetaDataDBCreate, + new_service_access_rights: list[ServiceAccessRightsDB], + ) -> ServiceMetaDataDBGet: + for access_rights in new_service_access_rights: + if ( + access_rights.key != new_service.key + or access_rights.version != new_service.version + ): + msg = f"{access_rights} does not correspond to service {new_service.key}:{new_service.version}" + raise ValueError(msg) + + async with self.db_engine.begin() as conn: + # NOTE: this ensure proper rollback in case of issue + result = await conn.execute( + # pylint: disable=no-value-for-parameter + services_meta_data.insert() + .values(**new_service.model_dump(exclude_unset=True)) + .returning(*SERVICES_META_DATA_COLS) + ) + row = result.first() + assert row # nosec + created_service = ServiceMetaDataDBGet.model_validate(row) + + for access_rights in new_service_access_rights: + insert_stmt = pg_insert(services_access_rights).values( + **jsonable_encoder(access_rights, by_alias=True) + ) + await conn.execute(insert_stmt) + return created_service + + async def update_service( + self, + service_key: ServiceKey, + service_version: ServiceVersion, + patched_service: ServiceMetaDataDBPatch, + ) -> None: + + stmt_update = ( + services_meta_data.update() + .where( + (services_meta_data.c.key == service_key) + & (services_meta_data.c.version == service_version) + ) + .values( + **patched_service.model_dump( + by_alias=True, + exclude_unset=True, + exclude={"key", "version"}, + ) + ) + ) + async with self.db_engine.begin() as conn: + await conn.execute(stmt_update) + + async def can_get_service( + self, + # access-rights + product_name: ProductName, + user_id: UserID, + # get args + key: ServiceKey, + version: ServiceVersion, + ) -> bool: + """Returns False if it cannot get the service i.e. not found or does not have access""" + async with self.db_engine.begin() as conn: + result = await conn.execute( + _services_sql.can_get_service_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=key, + service_version=version, + ) + ) + return bool(result.scalar()) + + async def can_update_service( + self, + # access-rights + product_name: ProductName, + user_id: UserID, + # get args + key: ServiceKey, + version: ServiceVersion, + ) -> bool: + async with self.db_engine.begin() as conn: + result = await conn.execute( + _services_sql.can_get_service_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_edit, + service_key=key, + service_version=version, + ) + ) + return bool(result.scalar()) + + async def get_service_with_history( + self, + # access-rights + product_name: ProductName, + user_id: UserID, + # get args + key: ServiceKey, + version: ServiceVersion, + ) -> ServiceWithHistoryDBGet | None: + + stmt_get = _services_sql.get_service_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=key, + service_version=version, + ) + + async with self.db_engine.begin() as conn: + result = await conn.execute(stmt_get) + row = result.one_or_none() + + if row: + stmt_history = _services_sql.get_service_history_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=key, + ) + async with self.db_engine.begin() as conn: + result = await conn.execute(stmt_history) + row_h = result.one_or_none() + + return ServiceWithHistoryDBGet( + key=row.key, + version=row.version, + # display + name=row.name, + description=row.description, + description_ui=row.description_ui, + icon=row.icon, + thumbnail=row.thumbnail, + version_display=row.version_display, + # ownership + owner_email=row.owner_email, + # tagging + classifiers=row.classifiers, + quality=row.quality, + # lifetime + created=row.created, + modified=row.modified, + deprecated=row.deprecated, + # releases + history=row_h.history if row_h else [], + ) + return None + + async def list_all_services( + self, + *, + # access-rights + product_name: ProductName, + user_id: UserID, + # list args: pagination + pagination_limit: int | None = None, + pagination_offset: int | None = None, + filters: ServiceDBFilters | None = None, + ) -> tuple[PositiveInt, list[ServiceMetaDataDBGet]]: + # Create base query that's common to both count and content queries + base_query = ( + sa.select(services_meta_data.c.key, services_meta_data.c.version) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & ( + services_meta_data.c.version == services_access_rights.c.version + ), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + ) + .where( + (services_access_rights.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + & AccessRightsClauses.can_read + ) + .distinct() + ) + + if filters: + base_query = _services_sql.apply_services_filters(base_query, filters) + + # Subquery for efficient counting and further joins + subquery = base_query.subquery() + + # Count query - only counts distinct key/version pairs + stmt_total = sa.select(sa.func.count()).select_from(subquery) + + # Content query - gets all details with pagination + stmt_page = ( + sa.select(*SERVICES_META_DATA_COLS) + .select_from( + subquery.join( + services_meta_data, + (subquery.c.key == services_meta_data.c.key) + & (subquery.c.version == services_meta_data.c.version), + ) + ) + .order_by( + services_meta_data.c.key, + sa.desc(_services_sql.by_version(services_meta_data.c.version)), + ) + ) + + # Apply pagination to content query + if pagination_offset is not None: + stmt_page = stmt_page.offset(pagination_offset) + if pagination_limit is not None: + stmt_page = stmt_page.limit(pagination_limit) + + # Execute both queries + async with self.db_engine.connect() as conn: + result = await conn.execute(stmt_total) + total_count = result.scalar() or 0 + + items_page = [ + ServiceMetaDataDBGet.model_validate(row) + async for row in await conn.stream(stmt_page) + ] + + return (total_count, items_page) + + async def list_latest_services( + self, + *, + # access-rights + product_name: ProductName, + user_id: UserID, + # list args: pagination + pagination_limit: int | None = None, + pagination_offset: int | None = None, + filters: ServiceDBFilters | None = None, + ) -> tuple[PositiveInt, list[ServiceWithHistoryDBGet]]: + + # get page + stmt_total = _services_sql.latest_services_total_count_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + filters=filters, + ) + stmt_page = _services_sql.list_latest_services_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + limit=pagination_limit, + offset=pagination_offset, + filters=filters, + ) + + async with self.db_engine.connect() as conn: + result = await conn.execute(stmt_total) + total_count = result.scalar() or 0 + + result = await conn.execute(stmt_page) + rows = result.fetchall() + assert len(rows) <= total_count # nosec + + # compose history with latest + items_page = [ + ServiceWithHistoryDBGet( + key=r.key, + version=r.version, + # display + name=r.name, + description=r.description, + description_ui=r.description_ui, + thumbnail=r.thumbnail, + icon=r.icon, + version_display=r.version_display, + # ownership + owner_email=r.owner_email, + # tagging + classifiers=r.classifiers, + quality=r.quality, + # lifetime + created=r.created, + modified=r.modified, + deprecated=r.deprecated, + # releases + history=[], # NOTE: for listing we will not add history. Only get service will produce history + ) + for r in rows + ] + + return (total_count, items_page) + + async def get_service_history( + self, + # access-rights + product_name: ProductName, + user_id: UserID, + # get args + key: ServiceKey, + ) -> list[ReleaseDBGet]: + """ + DEPRECATED: use get_service_history_page instead! + """ + stmt_history = _services_sql.get_service_history_stmt( + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=key, + ) + async with self.db_engine.connect() as conn: + result = await conn.execute(stmt_history) + row = result.one_or_none() + + return ( + TypeAdapter(list[ReleaseDBGet]).validate_python(row.history) if row else [] + ) + + async def get_service_history_page( + self, + *, + # access-rights + product_name: ProductName, + user_id: UserID, + # get args + key: ServiceKey, + # list args: pagination + pagination_limit: int | None = None, + pagination_offset: int | None = None, + filters: ServiceDBFilters | None = None, + ) -> tuple[PositiveInt, list[ReleaseDBGet]]: + + base_stmt = ( + # Search on service (key, *) for (product_name, user_id w/ access) + sql.select( + services_meta_data.c.key, + services_meta_data.c.version, + ) + .select_from( + services_meta_data.join( + services_access_rights, + (services_meta_data.c.key == services_access_rights.c.key) + & ( + services_meta_data.c.version == services_access_rights.c.version + ), + ).join( + user_to_groups, + (user_to_groups.c.gid == services_access_rights.c.gid), + ) + ) + .where( + (services_meta_data.c.key == key) + & (services_access_rights.c.product_name == product_name) + & (user_to_groups.c.uid == user_id) + & AccessRightsClauses.can_read + ) + ) + + if filters: + base_stmt = _services_sql.apply_services_filters(base_stmt, filters) + + base_subquery = base_stmt.subquery() + + # Query to count the TOTAL number of rows + count_query = sql.select(sql.func.count()).select_from(base_subquery) + + # Query to retrieve page with additional columns, ordering, offset, and limit + page_query = ( + sql.select( + services_meta_data.c.key, + services_meta_data.c.version, + services_meta_data.c.version_display, + services_meta_data.c.deprecated, + services_meta_data.c.created, + # CompatiblePolicyDict | None + services_compatibility.c.custom_policy.label("compatibility_policy"), + ) + .select_from( + # NOTE: these joins are avoided in count_query + base_subquery.join( + services_meta_data, + (base_subquery.c.key == services_meta_data.c.key) + & (base_subquery.c.version == services_meta_data.c.version), + ).outerjoin( + services_compatibility, + (services_meta_data.c.key == services_compatibility.c.key) + & ( + services_meta_data.c.version == services_compatibility.c.version + ), + ) + ) + .order_by(sql.desc(_services_sql.by_version(services_meta_data.c.version))) + .offset(pagination_offset) + .limit(pagination_limit) + ) + + async with pass_or_acquire_connection(self.db_engine) as conn: + total_count: PositiveInt = await conn.scalar(count_query) or 0 + + result = await conn.stream(page_query) + items: list[ReleaseDBGet] = [ + ReleaseDBGet.model_validate(row, from_attributes=True) + async for row in result + ] + + return total_count, items + + # Service Access Rights ---- + + async def get_service_access_rights( + self, + key: str, + version: str, + product_name: str | None = None, + ) -> list[ServiceAccessRightsDB]: + """ + - If product_name is not specificed, then all are considered in the query + """ + search_expression = (services_access_rights.c.key == key) & ( + services_access_rights.c.version == version + ) + if product_name: + search_expression &= services_access_rights.c.product_name == product_name + + query = sa.select(services_access_rights).where(search_expression) + + async with self.db_engine.connect() as conn: + return [ + ServiceAccessRightsDB.model_validate(row) + async for row in await conn.stream(query) + ] + + async def batch_get_services_access_rights( + self, + key_versions: Iterable[tuple[str, str]], + product_name: str | None = None, + ) -> dict[tuple[str, str], list[ServiceAccessRightsDB]]: + """Batch version of get_service_access_rights""" + service_to_access_rights = defaultdict(list) + query = ( + sa.select(services_access_rights) + .select_from(services_access_rights) + .where( + sql.tuple_( + services_access_rights.c.key, services_access_rights.c.version + ).in_(key_versions) + & (services_access_rights.c.product_name == product_name) + if product_name + else True + ) + ) + async with self.db_engine.connect() as conn: + async for row in await conn.stream(query): + service_to_access_rights[(row.key, row.version)].append( + ServiceAccessRightsDB.model_validate(row) + ) + return service_to_access_rights + + async def upsert_service_access_rights( + self, new_access_rights: list[ServiceAccessRightsDB] + ) -> None: + # update the services_access_rights table (some might be added/removed/modified) + for rights in new_access_rights: + insert_stmt = pg_insert(services_access_rights).values( + **rights.model_dump(by_alias=True) + ) + on_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[ + services_access_rights.c.key, + services_access_rights.c.version, + services_access_rights.c.gid, + services_access_rights.c.product_name, + ], + set_=rights.model_dump( + by_alias=True, + exclude_unset=True, + exclude={"key", "version", "gid", "product_name"}, + ), + ) + try: + async with self.db_engine.begin() as conn: + result = await conn.execute(on_update_stmt) + assert result # nosec + except ForeignKeyViolation: + _logger.warning( + "The service %s:%s is missing from services_meta_data", + rights.key, + rights.version, + ) + + async def delete_service_access_rights( + self, delete_access_rights: list[ServiceAccessRightsDB] + ) -> None: + async with self.db_engine.begin() as conn: + for rights in delete_access_rights: + await conn.execute( + # pylint: disable=no-value-for-parameter + services_access_rights.delete().where( + (services_access_rights.c.key == rights.key) + & (services_access_rights.c.version == rights.version) + & (services_access_rights.c.gid == rights.gid) + & (services_access_rights.c.product_name == rights.product_name) + ) + ) + + # Service Specs --- + async def get_service_specifications( + self, + key: ServiceKey, + version: ServiceVersion, + groups: tuple[GroupAtDB, ...], + *, + allow_use_latest_service_version: bool = False, + ) -> ServiceSpecifications | None: + """returns the service specifications for service 'key:version' and for 'groups' + returns None if nothing found + + :param allow_use_latest_service_version: if True, then the latest version of the specs will be returned, defaults to False + """ + _logger.debug( + "getting specifications from db for %s", f"{key}:{version} for {groups=}" + ) + gid_to_group_map = {group.gid: group for group in groups} + + everyone_specs = None + primary_specs = None + teams_specs: dict[GroupID, ServiceSpecificationsAtDB] = {} + + queried_version = packaging.version.parse(version) + # we should instead use semver enabled postgres [https://pgxn.org/dist/semver/doc/semver.html] + async with self.db_engine.connect() as conn: + async for row in await conn.stream( + sa.select(services_specifications).where( + (services_specifications.c.service_key == key) + & ( + (services_specifications.c.service_version == version) + if not allow_use_latest_service_version + else True + ) + & (services_specifications.c.gid.in_(group.gid for group in groups)) + ), + ): + try: + _logger.debug("found following %s", f"{row=}") + # validate the specs first + db_service_spec = ServiceSpecificationsAtDB.model_validate(row) + db_spec_version = packaging.version.parse( + db_service_spec.service_version + ) + if allow_use_latest_service_version and ( + db_spec_version > queried_version + ): + # NOTE: in this case we look for the latest version only (e.g <=queried_version) + # and we skip them if they are above + continue + # filter by group type + group = gid_to_group_map[row.gid] + if (group.group_type == GroupType.STANDARD) and _is_newer( + teams_specs.get(db_service_spec.gid), + db_service_spec, + ): + teams_specs[db_service_spec.gid] = db_service_spec + elif (group.group_type == GroupType.EVERYONE) and _is_newer( + everyone_specs, db_service_spec + ): + everyone_specs = db_service_spec + elif (group.group_type == GroupType.PRIMARY) and _is_newer( + primary_specs, db_service_spec + ): + primary_specs = db_service_spec + + except ValidationError as exc: + _logger.warning( + "skipping service specifications for group '%s' as invalid: %s", + f"{row.gid}", + f"{exc}", + ) + + if merged_specifications := _merge_specs( + everyone_specs, teams_specs, primary_specs + ): + return ServiceSpecifications.model_validate(merged_specifications) + return None # mypy diff --git a/services/catalog/src/simcore_service_catalog/service/__init__.py b/services/catalog/src/simcore_service_catalog/service/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/catalog/src/simcore_service_catalog/service/access_rights.py b/services/catalog/src/simcore_service_catalog/service/access_rights.py new file mode 100644 index 00000000000..8e3adbeb429 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/service/access_rights.py @@ -0,0 +1,193 @@ +"""Services Access Rights policies""" + +import logging +import operator +from collections.abc import Callable +from datetime import UTC, datetime +from typing import cast + +import arrow +from fastapi import FastAPI +from models_library.services import ServiceMetaDataPublished +from models_library.services_types import ServiceKey, ServiceVersion +from packaging.version import Version +from pydantic.types import PositiveInt +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api._dependencies.director import get_director_client +from ..models.services_db import ServiceAccessRightsDB +from ..repository.groups import GroupsRepository +from ..repository.services import ServicesRepository +from ..utils.versioning import as_version, is_patch_release + +_logger = logging.getLogger(__name__) + +_LEGACY_SERVICES_DATE: datetime = datetime(year=2020, month=8, day=19, tzinfo=UTC) + + +def _is_frontend_service(service: ServiceMetaDataPublished) -> bool: + return "/frontend/" in service.key + + +async def _is_old_service(app: FastAPI, service: ServiceMetaDataPublished) -> bool: + # NOTE: https://github.com/ITISFoundation/osparc-simcore/pull/6003#discussion_r1658200909 + # get service build date + client = get_director_client(app) + + data = await client.get_service_extras(service.key, service.version) + if not data or data.service_build_details is None: + return True + service_build_data = arrow.get(data.service_build_details.build_date).datetime + return bool(service_build_data < _LEGACY_SERVICES_DATE) + + +async def evaluate_default_policy( + app: FastAPI, service: ServiceMetaDataPublished +) -> tuple[PositiveInt | None, list[ServiceAccessRightsDB]]: + """Given a service, it returns the owner's group-id (gid) and a list of access rights following + default access-rights policies + + - DEFAULT Access Rights policies: + 1. All services published in osparc prior 19.08.2020 will be visible to everyone (refered as 'old service'). + 2. Services published after 19.08.2020 will be visible ONLY to his/her owner + 3. Front-end services are have execute-access to everyone + + + Raises: + HTTPException: from calls to director's rest API. Maps director errors into catalog's server error + SQLAlchemyError: from access to pg database + ValidationError: from pydantic model errors + """ + db_engine: AsyncEngine = app.state.engine + + groups_repo = GroupsRepository(db_engine) + owner_gid = None + group_ids: list[PositiveInt] = [] + + if _is_frontend_service(service) or await _is_old_service(app, service): + everyone_gid = (await groups_repo.get_everyone_group()).gid + _logger.debug("service %s:%s is old or frontend", service.key, service.version) + # let's make that one available to everyone + group_ids.append(everyone_gid) + + # try to find the owner + possible_owner_email = [service.contact] + [ + author.email for author in service.authors + ] + + for user_email in possible_owner_email: + possible_gid = await groups_repo.get_user_gid_from_email(user_email) + if possible_gid and not owner_gid: + owner_gid = possible_gid + if not owner_gid: + _logger.warning("service %s:%s has no owner", service.key, service.version) + else: + group_ids.append(owner_gid) + + # we add the owner with full rights, unless it's everyone + default_access_rights = [ + ServiceAccessRightsDB( + key=service.key, + version=service.version, + gid=gid, + execute_access=True, + write_access=(gid == owner_gid), + product_name=app.state.default_product_name, + ) + for gid in set(group_ids) + ] + + return (owner_gid, default_access_rights) + + +async def evaluate_auto_upgrade_policy( + service_metadata: ServiceMetaDataPublished, services_repo: ServicesRepository +) -> list[ServiceAccessRightsDB]: + # AUTO-UPGRADE PATCH policy: + # + # - Any new patch released, inherits the access rights from previous compatible version + # - IDEA: add as option in the publication contract, i.e. in ServiceDockerData? + # - Does NOT apply to front-end services + # + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/2244) + # + if _is_frontend_service(service_metadata): + return [] + + service_access_rights = [] + new_version: Version = as_version(service_metadata.version) + latest_releases = await services_repo.list_service_releases( + service_metadata.key, + major=new_version.major, + minor=new_version.minor, + ) + + previous_release = None + for release in latest_releases: + # NOTE: latest_release is sorted from newer to older + # Here we search for the previous version patched by new-version + if is_patch_release(new_version, release.version): + previous_release = release + break + + if previous_release: + previous_access_rights = await services_repo.get_service_access_rights( + previous_release.key, previous_release.version + ) + + service_access_rights = [ + access.model_copy( + update={"version": service_metadata.version}, + deep=True, + ) + for access in previous_access_rights + ] + return service_access_rights + + +def reduce_access_rights( + access_rights: list[ServiceAccessRightsDB], + reduce_operation: Callable = operator.ior, +) -> list[ServiceAccessRightsDB]: + """ + Reduces a list of access-rights per target + By default, the reduction is OR (i.e. preserves True flags) + """ + # TODO: probably a lot of room to optimize + # helper functions to simplify operation of access rights + + def _get_target(access: ServiceAccessRightsDB) -> tuple[str | int, ...]: + """Hashable identifier of the resource the access rights apply to""" + return (access.key, access.version, access.gid, access.product_name) + + def _get_flags(access: ServiceAccessRightsDB) -> dict[str, bool]: + """Extracts only""" + flags = access.model_dump(include={"execute_access", "write_access"}) + return cast(dict[str, bool], flags) + + access_flags_map: dict[tuple[str | int, ...], dict[str, bool]] = {} + for access in access_rights: + target = _get_target(access) + access_flags = access_flags_map.get(target) + + if access_flags: + # applies reduction on flags + for key, value in _get_flags(access).items(): + access_flags[key] = reduce_operation( # defaults to a |= b + access_flags[key], value + ) + else: + access_flags_map[target] = _get_flags(access) + + reduced_access_rights: list[ServiceAccessRightsDB] = [ + ServiceAccessRightsDB( + key=ServiceKey(f"{target[0]}"), + version=ServiceVersion(f"{target[1]}"), + gid=int(target[2]), + product_name=f"{target[3]}", + **access_flags_map[target], + ) + for target in access_flags_map + ] + + return reduced_access_rights diff --git a/services/catalog/src/simcore_service_catalog/service/catalog_services.py b/services/catalog/src/simcore_service_catalog/service/catalog_services.py new file mode 100644 index 00000000000..d1377bb4db6 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/service/catalog_services.py @@ -0,0 +1,769 @@ +"""Includes manifest (services in the registry) and function-service (front-end/back-end services)""" + +import logging +from contextlib import suppress +from typing import Literal, TypeVar + +from models_library.api_schemas_catalog.services import ( + LatestServiceGet, + MyServiceGet, + ServiceGetV2, + ServiceSummary, + ServiceUpdateV2, +) +from models_library.api_schemas_directorv2.services import ServiceExtras +from models_library.basic_types import VersionStr +from models_library.groups import GroupID +from models_library.products import ProductName +from models_library.rest_pagination import PageLimitInt, PageOffsetInt, PageTotalCount +from models_library.services_access import ServiceGroupAccessRightsV2 +from models_library.services_history import Compatibility, ServiceRelease +from models_library.services_metadata_published import ServiceMetaDataPublished +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import HttpUrl +from servicelib.logging_errors import ( + create_troubleshotting_log_kwargs, +) +from servicelib.rabbitmq.rpc_interfaces.catalog.errors import ( + CatalogForbiddenError, + CatalogInconsistentError, + CatalogItemNotFoundError, +) + +from ..clients.director import DirectorClient +from ..models.services_db import ( + ServiceAccessRightsDB, + ServiceDBFilters, + ServiceMetaDataDBGet, + ServiceMetaDataDBPatch, + ServiceWithHistoryDBGet, +) +from ..models.services_ports import ServicePort +from ..repository.groups import GroupsRepository +from ..repository.services import ServicesRepository +from . import manifest +from .compatibility import evaluate_service_compatibility_map +from .function_services import is_function_service + +_logger = logging.getLogger(__name__) + +# Type variable for service models that can be returned from list functions +T = TypeVar("T", ServiceGetV2, LatestServiceGet) + + +def _aggregate( + service_db: ServiceWithHistoryDBGet | ServiceMetaDataDBGet, + access_rights_db: list[ServiceAccessRightsDB], + service_manifest: ServiceMetaDataPublished, +) -> dict: + return { + "key": service_db.key, + "version": service_db.version, + "name": service_db.name, + "thumbnail": HttpUrl(service_db.thumbnail) if service_db.thumbnail else None, + "icon": HttpUrl(service_db.icon) if service_db.icon else None, + "description": service_db.description, + "description_ui": service_db.description_ui, + "version_display": service_db.version_display, + "service_type": service_manifest.service_type, + "contact": service_manifest.contact, + "authors": service_manifest.authors, + # Check if owner attribute is available in the service_db object + "owner": getattr(service_db, "owner_email", None), + "inputs": service_manifest.inputs or {}, + "outputs": service_manifest.outputs or {}, + "boot_options": service_manifest.boot_options, + "min_visible_inputs": service_manifest.min_visible_inputs, + "access_rights": { + a.gid: ServiceGroupAccessRightsV2.model_construct( + execute=a.execute_access, + write=a.write_access, + ) + for a in access_rights_db + }, + "classifiers": service_db.classifiers, + "quality": service_db.quality, + # NOTE: history/release field is removed + } + + +def _aggregate_summary( + service_db: ServiceWithHistoryDBGet | ServiceMetaDataDBGet, + service_manifest: ServiceMetaDataPublished, +) -> dict: + """Creates a minimal dictionary with only the fields needed for ServiceSummary""" + return { + "key": service_db.key, + "version": service_db.version, + "name": service_db.name, + "description": service_db.description, + "version_display": service_db.version_display, + "contact": service_manifest.contact, + } + + +def _to_latest_get_schema( + service_db: ServiceWithHistoryDBGet, + access_rights_db: list[ServiceAccessRightsDB], + service_manifest: ServiceMetaDataPublished, +) -> LatestServiceGet: + + assert len(service_db.history) == 0 # nosec + + return LatestServiceGet.model_validate( + { + **_aggregate(service_db, access_rights_db, service_manifest), + "release": ServiceRelease.model_construct( + version=service_db.version, + version_display=service_db.version_display, + released=service_db.created, + retired=service_db.deprecated, + compatibility=None, + ), + } + ) + + +def _to_get_schema( + service_db: ServiceWithHistoryDBGet, + access_rights_db: list[ServiceAccessRightsDB], + service_manifest: ServiceMetaDataPublished, + compatibility_map: dict[ServiceVersion, Compatibility | None] | None = None, +) -> ServiceGetV2: + compatibility_map = compatibility_map or {} + + return ServiceGetV2.model_validate( + { + **_aggregate(service_db, access_rights_db, service_manifest), + "history": [ + ServiceRelease.model_construct( + version=h.version, + version_display=h.version_display, + released=h.created, + retired=h.deprecated, + compatibility=compatibility_map.get(h.version), + ) + for h in service_db.history + ], + } + ) + + +async def _get_services_with_access_rights( + repo: ServicesRepository, + services: list[ServiceWithHistoryDBGet] | list[ServiceMetaDataDBGet], + product_name: ProductName, + user_id: UserID, +) -> dict[tuple[str, str], list[ServiceAccessRightsDB]]: + """Common function to get access rights for a list of services. + + Args: + repo: Repository for services + services: List of services to get access rights for + product_name: Product name + user_id: User ID + + Returns: + Dictionary mapping (key, version) to list of access rights + + Raises: + CatalogForbiddenError: If no access rights are found for any service + """ + if not services: + return {} + + # Inject access-rights + access_rights = await repo.batch_get_services_access_rights( + ((sc.key, sc.version) for sc in services), product_name=product_name + ) + if not access_rights: + raise CatalogForbiddenError( + name="any service", + user_id=user_id, + product_name=product_name, + ) + + return access_rights + + +async def _get_services_manifests( + services: list[ServiceWithHistoryDBGet] | list[ServiceMetaDataDBGet], + access_rights: dict[tuple[str, str], list[ServiceAccessRightsDB]], + director_api: DirectorClient, + product_name: ProductName, + user_id: UserID, + filters: ServiceDBFilters | None, + limit: PageLimitInt | None, + offset: PageOffsetInt | None, +) -> dict[tuple[str, str], ServiceMetaDataPublished]: + """Common function to get service manifests from director. + + Args: + services: List of services to get manifests for + access_rights: Dictionary mapping (key, version) to list of access rights + director_api: Director API client + product_name: Product name + user_id: User ID + filters: Filters that were applied + limit: Pagination limit that was applied + offset: Pagination offset that was applied + + Returns: + Dictionary mapping (key, version) to manifest + + Raises: + CatalogInconsistentError: Logs warning if some services are missing from manifest + """ + # Get manifest of those with access rights + got = await manifest.get_batch_services( + [ + (sc.key, sc.version) + for sc in services + if access_rights.get((sc.key, sc.version)) + ], + director_api, + ) + service_manifest = { + (sc.key, sc.version): sc + for sc in got + if isinstance(sc, ServiceMetaDataPublished) + } + + # Log a warning for missing services + missing_services = [ + (sc.key, sc.version) + for sc in services + if (sc.key, sc.version) not in service_manifest + ] + if missing_services: + msg = f"Found {len(missing_services)} services that are in the database but missing in the registry manifest" + _logger.warning( + **create_troubleshotting_log_kwargs( + msg, + error=CatalogInconsistentError( + missing_services=missing_services, + user_id=user_id, + product_name=product_name, + filters=filters, + limit=limit, + offset=offset, + ), + tip="This might be due to malfunction of the background-task or that this call was done while the sync was taking place", + ) + ) + # NOTE: tests should fail if this happens but it is not a critical error so it is ignored in production + assert len(missing_services) == 0, msg # nosec + + return service_manifest + + +async def list_all_service_summaries( + repo: ServicesRepository, + director_api: DirectorClient, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt | None, + offset: PageOffsetInt = 0, + filters: ServiceDBFilters | None = None, +) -> tuple[PageTotalCount, list[ServiceSummary]]: + """Lists all catalog services with minimal information. + + This is different from list_latest_catalog_services which only returns the latest version of each service + and includes complete service information. + + Args: + repo: Repository for services + director_api: Director API client + product_name: Product name + user_id: User ID + limit: Pagination limit + offset: Pagination offset + filters: Filters to apply + + Returns: + Tuple of total count and list of service summaries + """ + # Get all services with pagination + total_count, services = await repo.list_all_services( + product_name=product_name, + user_id=user_id, + pagination_limit=limit, + pagination_offset=offset, + filters=filters, + ) + + # Get access rights and manifests + access_rights = await _get_services_with_access_rights( + repo, services, product_name, user_id + ) + service_manifest = await _get_services_manifests( + services, + access_rights, + director_api, + product_name, + user_id, + filters, + limit, + offset, + ) + + # Create service summaries + items = [] + for sc in services: + sm = service_manifest.get((sc.key, sc.version)) + if access_rights.get((sc.key, sc.version)) and sm: + # Create a minimal ServiceSummary + service_data = _aggregate_summary( + service_db=sc, + service_manifest=sm, + ) + items.append(ServiceSummary.model_validate(service_data)) + + return total_count, items + + +async def list_latest_catalog_services( + repo: ServicesRepository, + director_api: DirectorClient, + *, + product_name: ProductName, + user_id: UserID, + limit: PageLimitInt | None, + offset: PageOffsetInt = 0, + filters: ServiceDBFilters | None = None, +) -> tuple[PageTotalCount, list[LatestServiceGet]]: + """Lists latest versions of catalog services. + + Args: + repo: Repository for services + director_api: Director API client + product_name: Product name + user_id: UserID + limit: Pagination limit + offset: Pagination offset + filters: Filters to apply + + Returns: + Tuple of total count and list of latest services + """ + # defines the order + total_count, services = await repo.list_latest_services( + product_name=product_name, + user_id=user_id, + pagination_limit=limit, + pagination_offset=offset, + filters=filters, + ) + + # Get access rights and manifests using shared functions + access_rights = await _get_services_with_access_rights( + repo, services, product_name, user_id + ) + service_manifest = await _get_services_manifests( + services, + access_rights, + director_api, + product_name, + user_id, + filters, + limit, + offset, + ) + + # Aggregate the services manifest and access-rights + items = [ + _to_latest_get_schema( + service_db=sc, + access_rights_db=ar, + service_manifest=sm, + ) + for sc in services + if ( + (ar := access_rights.get((sc.key, sc.version))) + and (sm := service_manifest.get((sc.key, sc.version))) + ) + ] + + return total_count, items + + +async def get_catalog_service( + repo: ServicesRepository, + director_api: DirectorClient, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> ServiceGetV2: + + access_rights = await check_catalog_service_permissions( + repo=repo, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + permission="read", + ) + + service = await repo.get_service_with_history( + product_name=product_name, + user_id=user_id, + key=service_key, + version=service_version, + ) + if not service: + # no service found provided `access_rights` + raise CatalogForbiddenError( + name=f"{service_key}:{service_version}", + service_key=service_key, + service_version=service_version, + user_id=user_id, + product_name=product_name, + ) + + service_manifest = await manifest.get_service( + key=service_key, + version=service_version, + director_client=director_api, + ) + + compatibility_map = await evaluate_service_compatibility_map( + repo, + product_name=product_name, + user_id=user_id, + service_release_history=service.history, + ) + + return _to_get_schema(service, access_rights, service_manifest, compatibility_map) + + +async def update_catalog_service( + repo: ServicesRepository, + director_api: DirectorClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + update: ServiceUpdateV2, +) -> ServiceGetV2: + + if is_function_service(service_key): + raise CatalogForbiddenError( + name=f"function service {service_key}:{service_version}", + service_key=service_key, + service_version=service_version, + user_id=user_id, + product_name=product_name, + ) + + # Check access rights first + access_rights = await check_catalog_service_permissions( + repo=repo, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + permission="write", + ) + + # Updates service_meta_data + await repo.update_service( + service_key, + service_version, + ServiceMetaDataDBPatch.model_validate( + update.model_dump( + exclude_unset=True, exclude={"access_rights"}, mode="json" + ), + ), + ) + + # Updates service_access_rights (they can be added/removed/modified) + if update.access_rights: + + # before + previous_gids = [r.gid for r in access_rights] + + # new + new_access_rights = [ + ServiceAccessRightsDB( + key=service_key, + version=service_version, + gid=gid, + execute_access=rights.execute, + write_access=rights.write, + product_name=product_name, + ) + for gid, rights in update.access_rights.items() + ] + await repo.upsert_service_access_rights(new_access_rights) + + # then delete the ones that were removed + removed_access_rights = [ + ServiceAccessRightsDB( + key=service_key, + version=service_version, + gid=gid, + product_name=product_name, + ) + for gid in previous_gids + if gid not in update.access_rights + ] + await repo.delete_service_access_rights(removed_access_rights) + + return await get_catalog_service( + repo=repo, + director_api=director_api, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + + +async def check_catalog_service_permissions( + repo: ServicesRepository, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + permission: Literal["read", "write"], +) -> list[ServiceAccessRightsDB]: + """Raises if the service cannot be accessed with the specified permission level + + Args: + repo: Repository for services + product_name: Product name + user_id: User ID + service_key: Service key + service_version: Service version + permission: Permission level to check, either "read" or "write". + + Raises: + CatalogItemNotFoundError: service (key,version) not found + CatalogForbiddenError: insufficient access rights to get the requested access + """ + + access_rights = await repo.get_service_access_rights( + key=service_key, + version=service_version, + product_name=product_name, + ) + if not access_rights: + raise CatalogItemNotFoundError( + name=f"{service_key}:{service_version}", + service_key=service_key, + service_version=service_version, + user_id=user_id, + product_name=product_name, + ) + + has_permission = False + if permission == "read": + has_permission = await repo.can_get_service( + product_name=product_name, + user_id=user_id, + key=service_key, + version=service_version, + ) + elif permission == "write": + has_permission = await repo.can_update_service( + product_name=product_name, + user_id=user_id, + key=service_key, + version=service_version, + ) + + if not has_permission: + raise CatalogForbiddenError( + name=f"{service_key}:{service_version}", + service_key=service_key, + service_version=service_version, + user_id=user_id, + product_name=product_name, + ) + + return access_rights + + +async def batch_get_user_services( + repo: ServicesRepository, + groups_repo: GroupsRepository, + *, + product_name: ProductName, + user_id: UserID, + ids: list[ + tuple[ + ServiceKey, + ServiceVersion, + ] + ], +) -> list[MyServiceGet]: + + services_access_rights = await repo.batch_get_services_access_rights( + key_versions=ids, product_name=product_name + ) + + user_groups = await groups_repo.list_user_groups(user_id=user_id) + my_group_ids = {g.gid for g in user_groups} + + my_services = [] + for service_key, service_version in ids: + + # Evaluate user's access-rights to this service key:version + access_rights = services_access_rights.get((service_key, service_version), []) + my_access_rights = ServiceGroupAccessRightsV2(execute=False, write=False) + for ar in access_rights: + if ar.gid in my_group_ids: + my_access_rights.execute |= ar.execute_access + my_access_rights.write |= ar.write_access + + # Get service metadata + service_db = await repo.get_service( + product_name=product_name, + key=service_key, + version=service_version, + ) + assert service_db # nosec + + # Find service owner (if defined!) + owner: GroupID | None = service_db.owner + if not owner: + # NOTE can be more than one. Just get first. + with suppress(StopIteration): + owner = next( + ar.gid + for ar in access_rights + if ar.write_access and ar.execute_access + ) + + # Evaluate `compatibility` + compatibility: Compatibility | None = None + if my_access_rights.execute or my_access_rights.write: + history = await repo.get_service_history( + # NOTE: that the service history might be different for each user + # since access rights are defined on a version basis (i.e. one use can have access to v1 but ot to v2) + product_name=product_name, + user_id=user_id, + key=service_key, + ) + assert history # nosec + + compatibility_map = await evaluate_service_compatibility_map( + repo, + product_name=product_name, + user_id=user_id, + service_release_history=history, + ) + + compatibility = compatibility_map.get(service_db.version) + + my_services.append( + MyServiceGet( + key=service_db.key, + release=ServiceRelease( + version=service_db.version, + version_display=service_db.version_display, + released=service_db.created, + retired=service_db.deprecated, + compatibility=compatibility, + ), + owner=owner, + my_access_rights=my_access_rights, + ) + ) + + return my_services + + +async def list_user_service_release_history( + repo: ServicesRepository, + *, + # access-rights + product_name: ProductName, + user_id: UserID, + # target service + service_key: ServiceKey, + # pagination + pagination_limit: PageLimitInt | None = None, + pagination_offset: PageOffsetInt | None = None, + # filters + filters: ServiceDBFilters | None = None, + # result options + include_compatibility: bool = False, +) -> tuple[PageTotalCount, list[ServiceRelease]]: + + total_count, history = await repo.get_service_history_page( + # NOTE: that the service history might be different for each user + # since access rights are defined on a version basis (i.e. one use can have access to v1 but ot to v2) + product_name=product_name, + user_id=user_id, + key=service_key, + pagination_limit=pagination_limit, + pagination_offset=pagination_offset, + filters=filters, + ) + + compatibility_map: dict[ServiceVersion, Compatibility | None] = {} + if include_compatibility: + msg = "This operation is heavy and for the moment is not necessary" + raise NotImplementedError(msg) + + items = [ + # domain -> domain + ServiceRelease.model_construct( + version=h.version, + version_display=h.version_display, + released=h.created, + retired=h.deprecated, + compatibility=compatibility_map.get(h.version), + ) + for h in history + ] + + return total_count, items + + +async def get_user_services_ports( + repo: ServicesRepository, + director_api: DirectorClient, + *, + product_name: ProductName, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[ServicePort]: + """Get service ports (inputs and outputs) for a specific service version. + + Raises: + CatalogItemNotFoundError: When service is not found + CatalogForbiddenError: When user doesn't have access rights + """ + + # Check access rights first + await check_catalog_service_permissions( + repo=repo, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + permission="read", + ) + + # Get service ports from manifest + return await manifest.get_service_ports( + director_client=director_api, + key=service_key, + version=service_version, + ) + + +async def get_catalog_service_extras( + director_api: DirectorClient, service_key: ServiceKey, service_version: VersionStr +) -> ServiceExtras: + return await director_api.get_service_extras( + service_key=service_key, service_version=service_version + ) diff --git a/services/catalog/src/simcore_service_catalog/service/compatibility.py b/services/catalog/src/simcore_service_catalog/service/compatibility.py new file mode 100644 index 00000000000..696726e5ef0 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/service/compatibility.py @@ -0,0 +1,124 @@ +"""Manages service compatibility policies""" + +from models_library.products import ProductName +from models_library.services_history import Compatibility, CompatibleService +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from packaging.specifiers import SpecifierSet +from packaging.version import Version +from simcore_service_catalog.utils.versioning import as_version + +from ..models.services_db import ReleaseDBGet +from ..repository.services import ServicesRepository + + +def _get_default_compatibility_specs(target: ServiceVersion | Version) -> SpecifierSet: + """Default policy: + A version is compatible with target X.Y.Z if `>X.Y.Z, ~=X.Y.Z` (i.e. any patch released newer than the target) + SEE https://packaging.python.org/en/latest/specifications/version-specifiers/#id5 + """ + version = as_version(target) + return SpecifierSet( + f">{version}, ~={version.major}.{version.minor}.{version.micro}" + ) + + +def _get_latest_compatible_version( + target: ServiceVersion | Version, + service_versions: list[Version], + compatibility_specs: SpecifierSet | None = None, +) -> Version | None: + """ + Returns latest version in history that satisfies `>X.Y.Z, ~=X.Y.Z` (default policy if compatibility_specs=None) or compatibility_specs + Returns None if no version in history satisfies specs. + """ + compatibility_specs = compatibility_specs or _get_default_compatibility_specs( + target + ) + compatible_versions = [v for v in service_versions if v in compatibility_specs] + return max(compatible_versions, default=None) + + +def _convert_to_versions(service_history: list[ReleaseDBGet]) -> list[Version]: + return sorted( + (as_version(h.version) for h in service_history if not h.deprecated), + reverse=True, # latest first + ) + + +async def _evaluate_custom_compatibility( + repo: ServicesRepository, + product_name: ProductName, + user_id: UserID, + target_version: ServiceVersion, + released_versions: list[Version], + compatibility_policy: dict, +) -> Compatibility | None: + other_service_key = compatibility_policy.get("other_service_key") + other_service_versions = [] + + if other_service_key and ( + other_service_history := await repo.get_service_history( + product_name=product_name, + user_id=user_id, + key=ServiceKey(other_service_key), + ) + ): + other_service_versions = _convert_to_versions(other_service_history) + + versions_specifier = SpecifierSet(compatibility_policy["versions_specifier"]) + versions_to_check = other_service_versions or released_versions + + if latest_version := _get_latest_compatible_version( + target_version, versions_to_check, versions_specifier + ): + if other_service_key: + return Compatibility( + can_update_to=CompatibleService( + key=other_service_key, + version=f"{latest_version}", + ) + ) + return Compatibility( + can_update_to=CompatibleService( + version=f"{latest_version}", + ) + ) + + return None + + +async def evaluate_service_compatibility_map( + repo: ServicesRepository, + product_name: ProductName, + user_id: UserID, + service_release_history: list[ReleaseDBGet], +) -> dict[ServiceVersion, Compatibility | None]: + """ + Evaluates the compatibility among a list of service releases for a given product and user. + + """ + compatibility_map: dict[ServiceVersion, Compatibility | None] = {} + + released_versions = _convert_to_versions(service_release_history) + for release in service_release_history: + compatibility = None + if release.compatibility_policy: + compatibility = await _evaluate_custom_compatibility( + product_name=product_name, + user_id=user_id, + repo=repo, + target_version=release.version, + released_versions=released_versions, + compatibility_policy=dict(release.compatibility_policy), + ) + elif latest_version := _get_latest_compatible_version( + release.version, + released_versions, + ): + compatibility = Compatibility( + can_update_to=CompatibleService(version=f"{latest_version}") + ) + compatibility_map[release.version] = compatibility + + return compatibility_map diff --git a/services/catalog/src/simcore_service_catalog/service/function_services.py b/services/catalog/src/simcore_service_catalog/service/function_services.py new file mode 100644 index 00000000000..c5f326fec4b --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/service/function_services.py @@ -0,0 +1,52 @@ +from collections.abc import AsyncIterator + +# mypy: disable-error-code=truthy-function +from typing import Any + +from fastapi import status +from fastapi.applications import FastAPI +from fastapi.exceptions import HTTPException +from fastapi_lifespan_manager import State +from models_library.function_services_catalog import ( + is_function_service, + iter_service_docker_data, +) +from models_library.services import ServiceMetaDataPublished + +assert is_function_service # nosec + + +def _as_dict(model_instance: ServiceMetaDataPublished) -> dict[str, Any]: + return model_instance.model_dump(by_alias=True, exclude_unset=True) + + +def get_function_service(key, version) -> ServiceMetaDataPublished: + try: + return next( + sc + for sc in iter_service_docker_data() + if sc.key == key and sc.version == version + ) + except StopIteration as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Frontend service '{key}:{version}' not found", + ) from err + + +async def function_services_lifespan(app: FastAPI) -> AsyncIterator[State]: + app.state.frontend_services_catalog = [ + _as_dict(metadata) for metadata in iter_service_docker_data() + ] + + try: + yield {} + finally: + app.state.frontend_services_catalog = None + + +__all__: tuple[str, ...] = ( + "get_function_service", + "is_function_service", + "function_services_lifespan", +) diff --git a/services/catalog/src/simcore_service_catalog/service/manifest.py b/services/catalog/src/simcore_service_catalog/service/manifest.py new file mode 100644 index 00000000000..6173d261529 --- /dev/null +++ b/services/catalog/src/simcore_service_catalog/service/manifest.py @@ -0,0 +1,163 @@ +"""Services Manifest API Documentation + +The `service.manifest` module provides a read-only API to access the services catalog. The term "Manifest" refers to a detailed, finalized list, +traditionally used to denote items that are recorded as part of an official inventory or log, emphasizing the immutable nature of the data. + +### Service Registration +Services are registered within the manifest in two distinct methods: + +1. **Docker Registry Integration:** + - Services can be registered by pushing a Docker image, complete with appropriate labels and tags, to a Docker registry. + - These are generally services registered through the Docker registry method, catering primarily to end-user functionalities. + - Example services include user-oriented applications like `sleeper`. + +2. **Function Service Definition:** + - Services can also be directly defined in the codebase as function services, which typically support framework operations. + - These services are usually defined programmatically within the code and are integral to the framework's infrastructure. + - Examples include utility services like `FilePicker`. + + +### Usage +This API is designed for read-only interactions, allowing users to retrieve information about registered services but not to modify the registry. +This ensures data integrity and consistency across the system. + + +""" + +import logging +from typing import Any, TypeAlias, cast + +from aiocache import cached # type: ignore[import-untyped] +from models_library.function_services_catalog.api import iter_service_docker_data +from models_library.services_metadata_published import ServiceMetaDataPublished +from models_library.services_types import ServiceKey, ServiceVersion +from pydantic import ValidationError +from servicelib.utils import limited_gather + +from .._constants import DIRECTOR_CACHING_TTL +from ..clients.director import DirectorClient +from ..models.services_ports import ServicePort +from .function_services import get_function_service, is_function_service + +_logger = logging.getLogger(__name__) + + +ServiceMetaDataPublishedDict: TypeAlias = dict[ + tuple[ServiceKey, ServiceVersion], ServiceMetaDataPublished +] + + +_error_already_logged: set[tuple[str | None, str | None]] = set() + + +async def get_services_map( + director_client: DirectorClient, +) -> ServiceMetaDataPublishedDict: + + # NOTE: using Low-level API to avoid validation + services_in_registry = cast( + list[dict[str, Any]], await director_client.get("/services") + ) + + # NOTE: functional-services are services w/o associated image + services: ServiceMetaDataPublishedDict = { + (sc.key, sc.version): sc for sc in iter_service_docker_data() + } + for service in services_in_registry: + try: + service_data = ServiceMetaDataPublished.model_validate(service) + services[(service_data.key, service_data.version)] = service_data + + except ValidationError: # noqa: PERF203 + # NOTE: this is necessary since registry DOES NOT provides any guarantee of the meta-data + # in the labels, i.e. it is not validated + errored_service = (service.get("key"), service.get("version")) + if errored_service not in _error_already_logged: + _logger.warning( + "Skipping '%s:%s' from the catalog of services! So far %s invalid services in registry.", + *errored_service, + len(_error_already_logged) + 1, + exc_info=True, + ) + _error_already_logged.add(errored_service) + + return services + + +@cached( + ttl=DIRECTOR_CACHING_TTL, + namespace=__name__, + key_builder=lambda f, *ag, **kw: f"{f.__name__}/{kw['key']}/{kw['version']}", +) +async def get_service( + director_client: DirectorClient, + *, + key: ServiceKey, + version: ServiceVersion, +) -> ServiceMetaDataPublished: + """ + Retrieves service metadata from the docker registry via the director and accounting + + raises if does not exist or if validation fails + """ + if is_function_service(key): + service = get_function_service(key=key, version=version) + else: + service = await director_client.get_service( + service_key=key, service_version=version + ) + return service + + +async def get_batch_services( + selection: list[tuple[ServiceKey, ServiceVersion]], + director_client: DirectorClient, +) -> list[ServiceMetaDataPublished | BaseException]: + + batch: list[ServiceMetaDataPublished | BaseException] = await limited_gather( + *( + get_service(key=k, version=v, director_client=director_client) + for k, v in selection + ), + reraise=False, + log=_logger, + tasks_group_prefix="manifest.get_batch_services", + ) + return batch + + +async def get_service_ports( + director_client: DirectorClient, + *, + key: ServiceKey, + version: ServiceVersion, +) -> list[ServicePort]: + """Retrieves all ports (inputs and outputs) from a service""" + ports = [] + service = await get_service( + director_client=director_client, + key=key, + version=version, + ) + + if service.inputs: + for input_name, service_input in service.inputs.items(): + ports.append( + ServicePort( + kind="input", + key=input_name, + port=service_input, + ) + ) + + if service.outputs: + for output_name, service_output in service.outputs.items(): + ports.append( + ServicePort( + kind="output", + key=output_name, + port=service_output, + ) + ) + + return ports diff --git a/services/catalog/src/simcore_service_catalog/services/access_rights.py b/services/catalog/src/simcore_service_catalog/services/access_rights.py deleted file mode 100644 index 037ab20121f..00000000000 --- a/services/catalog/src/simcore_service_catalog/services/access_rights.py +++ /dev/null @@ -1,194 +0,0 @@ -""" Services Access Rights policies - -""" -import logging -import operator -from datetime import datetime -from typing import Any, Callable, Optional, Union, cast -from urllib.parse import quote_plus - -from fastapi import FastAPI -from models_library.services import ServiceDockerData -from models_library.services_db import ServiceAccessRightsAtDB -from packaging.version import Version -from pydantic.types import PositiveInt -from sqlalchemy.ext.asyncio import AsyncEngine - -from ..api.dependencies.director import get_director_api -from ..db.repositories.groups import GroupsRepository -from ..db.repositories.services import ServicesRepository -from ..utils.versioning import as_version, is_patch_release - -logger = logging.getLogger(__name__) - -OLD_SERVICES_DATE: datetime = datetime(2020, 8, 19) - - -def _is_frontend_service(service: ServiceDockerData) -> bool: - return "/frontend/" in service.key - - -async def _is_old_service(app: FastAPI, service: ServiceDockerData) -> bool: - # get service build date - client = get_director_api(app) - data = cast( - dict[str, Any], - await client.get( - f"/service_extras/{quote_plus(service.key)}/{service.version}" - ), - ) - if not data or "build_date" not in data: - return True - - logger.debug("retrieved service extras are %s", data) - - service_build_data = datetime.strptime(data["build_date"], "%Y-%m-%dT%H:%M:%SZ") - return service_build_data < OLD_SERVICES_DATE - - -async def evaluate_default_policy( - app: FastAPI, service: ServiceDockerData -) -> tuple[Optional[PositiveInt], list[ServiceAccessRightsAtDB]]: - """Given a service, it returns the owner's group-id (gid) and a list of access rights following - default access-rights policies - - - DEFAULT Access Rights policies: - 1. All services published in osparc prior 19.08.2020 will be visible to everyone (refered as 'old service'). - 2. Services published after 19.08.2020 will be visible ONLY to his/her owner - 3. Front-end services are have execute-access to everyone - """ - db_engine: AsyncEngine = app.state.engine - - groups_repo = GroupsRepository(db_engine) - owner_gid = None - group_ids: list[PositiveInt] = [] - - if _is_frontend_service(service) or await _is_old_service(app, service): - everyone_gid = (await groups_repo.get_everyone_group()).gid - logger.debug("service %s:%s is old or frontend", service.key, service.version) - # let's make that one available to everyone - group_ids.append(everyone_gid) - - # try to find the owner - possible_owner_email = [service.contact] + [ - author.email for author in service.authors - ] - - for user_email in possible_owner_email: - possible_gid = await groups_repo.get_user_gid_from_email(user_email) - if possible_gid: - if not owner_gid: - owner_gid = possible_gid - if not owner_gid: - logger.warning("service %s:%s has no owner", service.key, service.version) - else: - group_ids.append(owner_gid) - - # we add the owner with full rights, unless it's everyone - default_access_rights = [ - ServiceAccessRightsAtDB( - key=service.key, - version=service.version, - gid=gid, - execute_access=True, - write_access=(gid == owner_gid), - product_name=app.state.default_product_name, - ) - for gid in set(group_ids) - ] - - return (owner_gid, default_access_rights) - - -async def evaluate_auto_upgrade_policy( - service_metadata: ServiceDockerData, services_repo: ServicesRepository -) -> list[ServiceAccessRightsAtDB]: - # AUTO-UPGRADE PATCH policy: - # - # - Any new patch released, inherits the access rights from previous compatible version - # - TODO: add as option in the publication contract, i.e. in ServiceDockerData - # - Does NOT apply to front-end services - # - # SEE https://github.com/ITISFoundation/osparc-simcore/issues/2244) - # - if _is_frontend_service(service_metadata): - return [] - - service_access_rights = [] - new_version: Version = as_version(service_metadata.version) - latest_releases = await services_repo.list_service_releases( - service_metadata.key, - major=new_version.major, - minor=new_version.minor, - ) - - previous_release = None - for release in latest_releases: - # NOTE: latest_release is sorted from newer to older - # Here we search for the previous version patched by new-version - if is_patch_release(new_version, release.version): - previous_release = release - break - - if previous_release: - previous_access_rights = await services_repo.get_service_access_rights( - previous_release.key, previous_release.version - ) - - for access in previous_access_rights: - service_access_rights.append( - access.copy( - exclude={"created", "modified"}, - update={"version": service_metadata.version}, - deep=True, - ) - ) - - return service_access_rights - - -def reduce_access_rights( - access_rights: list[ServiceAccessRightsAtDB], - reduce_operation: Callable = operator.ior, -) -> list[ServiceAccessRightsAtDB]: - """ - Reduces a list of access-rights per target - By default, the reduction is OR (i.e. preserves True flags) - """ - # TODO: probably a lot of room to optimize - # helper functions to simplify operation of access rights - - def get_target(access: ServiceAccessRightsAtDB) -> tuple[Union[str, int], ...]: - """Hashable identifier of the resource the access rights apply to""" - return tuple([access.key, access.version, access.gid, access.product_name]) - - def get_flags(access: ServiceAccessRightsAtDB) -> dict[str, bool]: - """Extracts only""" - flags = access.dict(include={"execute_access", "write_access"}) - return cast(dict[str, bool], flags) - - access_flags_map: dict[tuple[Union[str, int], ...], dict[str, bool]] = {} - for access in access_rights: - target = get_target(access) - access_flags = access_flags_map.get(target) - - if access_flags: - # applies reduction on flags - for key, value in get_flags(access).items(): - access_flags[key] = reduce_operation(access_flags[key], value) # a |= b - else: - access_flags_map[target] = get_flags(access) - - reduced_access_rights = [] - for target in access_flags_map: - reduced_access_rights.append( - ServiceAccessRightsAtDB( - key=f"{target[0]}", - version=f"{target[1]}", - gid=int(target[2]), - product_name=f"{target[3]}", - **access_flags_map[target], - ) - ) - - return reduced_access_rights diff --git a/services/catalog/src/simcore_service_catalog/services/director.py b/services/catalog/src/simcore_service_catalog/services/director.py deleted file mode 100644 index 7c9a4cf4d02..00000000000 --- a/services/catalog/src/simcore_service_catalog/services/director.py +++ /dev/null @@ -1,171 +0,0 @@ -import asyncio -import functools -import logging -from typing import Any, Awaitable, Callable, Optional, Union - -import httpx -from fastapi import FastAPI, HTTPException -from servicelib.json_serialization import json_dumps -from starlette import status -from tenacity._asyncio import AsyncRetrying -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_random - -logger = logging.getLogger(__name__) - -MINUTE = 60 - -director_startup_retry_policy = dict( - # Random service startup order in swarm. - # wait_random prevents saturating other services while startup - # - wait=wait_random(2, 5), - stop=stop_after_delay(2 * MINUTE), - before_sleep=before_sleep_log(logger, logging.WARNING), - reraise=True, -) - - -class UnresponsiveService(RuntimeError): - pass - - -async def setup_director(app: FastAPI) -> None: - if settings := app.state.settings.CATALOG_DIRECTOR: - # init client-api - logger.debug("Setup director at %s ...", f"{settings.base_url=}") - client = DirectorApi(base_url=settings.base_url, app=app) - - # check that the director is accessible - try: - async for attempt in AsyncRetrying(**director_startup_retry_policy): - with attempt: - if not await client.is_responsive(): - raise UnresponsiveService("Director-v0 is not responsive") - - logger.info( - "Connection to director-v0 succeded [%s]", - json_dumps(attempt.retry_state.retry_object.statistics), - ) - except UnresponsiveService: - await client.close() - raise - - app.state.director_api = client - - -async def close_director(app: FastAPI) -> None: - client: Optional[DirectorApi] - if client := app.state.director_api: - await client.close() - - logger.debug("Director client closed successfully") - - -# DIRECTOR API CLASS --------------------------------------------- - - -def safe_request( - request_func: Callable[..., Awaitable[httpx.Response]] -) -> Callable[..., Awaitable[Union[list[Any], dict[str, Any]]]]: - """ - Creates a context for safe inter-process communication (IPC) - """ - assert asyncio.iscoroutinefunction(request_func) - - def _unenvelope_or_raise_error( - resp: httpx.Response, - ) -> Union[list[Any], dict[str, Any]]: - """ - Director responses are enveloped - If successful response, we un-envelop it and return data as a dict - If error, it raise an HTTPException - """ - body = resp.json() - - assert "data" in body or "error" in body # nosec - data = body.get("data") - error = body.get("error") - - if httpx.codes.is_server_error(resp.status_code): - logger.error( - "director error %d [%s]: %s", - resp.status_code, - resp.reason_phrase, - error, - ) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) - - if httpx.codes.is_client_error(resp.status_code): - msg = error or resp.reason_phrase - raise HTTPException(resp.status_code, detail=msg) - - if isinstance(data, list): - return data - - return data or {} - - @functools.wraps(request_func) - async def request_wrapper( - zelf: "DirectorApi", path: str, *args, **kwargs - ) -> Union[list[Any], dict[str, Any]]: - normalized_path = path.lstrip("/") - try: - resp = await request_func(zelf, path=normalized_path, *args, **kwargs) - except Exception as err: - logger.exception( - "Failed request %s to %s%s", - request_func.__name__, - zelf.client.base_url, - normalized_path, - ) - raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err - - return _unenvelope_or_raise_error(resp) - - return request_wrapper - - -class DirectorApi: - """ - - wrapper around thin-client to simplify director's API - - sets endspoint upon construction - - MIME type: application/json - - processes responses, returning data or raising formatted HTTP exception - - SEE services/catalog/src/simcore_service_catalog/api/dependencies/director.py - """ - - def __init__(self, base_url: str, app: FastAPI): - self.client = httpx.AsyncClient( - base_url=base_url, - timeout=app.state.settings.CATALOG_CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, - ) - self.vtag = app.state.settings.CATALOG_DIRECTOR.DIRECTOR_VTAG - - async def close(self): - await self.client.aclose() - - # OPERATIONS - # TODO: policy to retry if NetworkError/timeout? - # TODO: add ping to healthcheck - - @safe_request - async def get(self, path: str) -> httpx.Response: - # temp solution: default timeout increased to 20" - return await self.client.get(path, timeout=20.0) - - @safe_request - async def put(self, path: str, body: dict) -> httpx.Response: - return await self.client.put(path, json=body) - - async def is_responsive(self) -> bool: - try: - logger.debug("checking director-v0 is responsive") - health_check_path: str = "/" - result = await self.client.head(health_check_path, timeout=1.0) - result.raise_for_status() - return True - except (httpx.HTTPStatusError, httpx.RequestError, httpx.TimeoutException): - return False diff --git a/services/catalog/src/simcore_service_catalog/services/function_services.py b/services/catalog/src/simcore_service_catalog/services/function_services.py deleted file mode 100644 index c8bd85f2952..00000000000 --- a/services/catalog/src/simcore_service_catalog/services/function_services.py +++ /dev/null @@ -1,58 +0,0 @@ -""" - Catalog of i/o metadata for functions implemented in the front-end -""" - -from typing import Any, cast - -from fastapi import status -from fastapi.applications import FastAPI -from fastapi.exceptions import HTTPException -from models_library.function_services_catalog import ( - is_function_service, - iter_service_docker_data, -) -from models_library.services import ServiceDockerData - -assert is_function_service # nosec - - -def _as_dict(model_instance: ServiceDockerData) -> dict[str, Any]: - # FIXME: In order to convert to ServiceOut, now we have to convert back to front-end service because of alias - # FIXME: set the same policy for f/e and director datasets! - return cast(dict[str, Any], model_instance.dict(by_alias=True, exclude_unset=True)) - - -def get_function_service(key, version) -> dict[str, Any]: - try: - found = next( - s - for s in iter_service_docker_data() - if s.key == key and s.version == version - ) - return _as_dict(found) - except StopIteration as err: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=f"Frontend service '{key}:{version}' not found", - ) from err - - -def setup_function_services(app: FastAPI): - """ - Setup entrypoint for this app module. - - Used in core.application.init_app - """ - - def _on_startup() -> None: - catalog = [_as_dict(metadata) for metadata in iter_service_docker_data()] - app.state.frontend_services_catalog = catalog - - app.add_event_handler("startup", _on_startup) - - -__all__: tuple[str, ...] = ( - "get_function_service", - "is_function_service", - "setup_function_services", -) diff --git a/services/catalog/src/simcore_service_catalog/services/remote_debug.py b/services/catalog/src/simcore_service_catalog/services/remote_debug.py deleted file mode 100644 index 5f98524ef12..00000000000 --- a/services/catalog/src/simcore_service_catalog/services/remote_debug.py +++ /dev/null @@ -1,41 +0,0 @@ -""" Setup remote debugger with Python Tools for Visual Studio (PTVSD) - -""" - -import logging -import os - -logger = logging.getLogger(__name__) - -REMOTE_DEBUG_PORT = 3000 - - -def setup_remote_debugging(force_enabled=False, *, boot_mode=None): - """ - Programaticaly enables remote debugging if SC_BOOT_MODE==debug-ptvsd - """ - boot_mode = boot_mode or os.environ.get("SC_BOOT_MODE") - if boot_mode == "debug-ptvsd" or force_enabled: - try: - logger.debug("Enabling attach ptvsd ...") - # - # SEE https://github.com/microsoft/ptvsd#enabling-debugging - # - import ptvsd - - ptvsd.enable_attach( - address=("0.0.0.0", REMOTE_DEBUG_PORT), # nosec - ) # nosec - except ImportError as err: - raise ValueError( - "Cannot enable remote debugging. Please install ptvsd first" - ) from err - - logger.info("Remote debugging enabled: listening port %s", REMOTE_DEBUG_PORT) - else: - logger.debug( - "Booting without remote debugging since SC_BOOT_MODE=%s", boot_mode - ) - - -__all__ = ["setup_remote_debugging"] diff --git a/services/catalog/src/simcore_service_catalog/utils/pools.py b/services/catalog/src/simcore_service_catalog/utils/pools.py deleted file mode 100644 index 39d6f5e9801..00000000000 --- a/services/catalog/src/simcore_service_catalog/utils/pools.py +++ /dev/null @@ -1,40 +0,0 @@ -from concurrent.futures import ProcessPoolExecutor -from contextlib import contextmanager -from typing import Iterator - -# only gets created on use and is guaranteed to be the s -# ame for the entire lifetime of the application -__shared_process_pool_executor = {} - - -def get_shared_process_pool_executor(**kwargs) -> ProcessPoolExecutor: - # sometimes a pool requires a specific configuration - # the key helps to distinguish between them in the same application - key = "".join(sorted("_".join((k, str(v))) for k, v in kwargs.items())) - - if key not in __shared_process_pool_executor: - # pylint: disable=consider-using-with - __shared_process_pool_executor[key] = ProcessPoolExecutor(**kwargs) - - return __shared_process_pool_executor[key] - - -# because there is no shared fastapi library, this is a -# duplicate of servicelib.pools.non_blocking_process_pool_executor -@contextmanager -def non_blocking_process_pool_executor(**kwargs) -> Iterator[ProcessPoolExecutor]: - """ - Avoids default context manger behavior which calls - shutdown with wait=True an blocks. - """ - executor = get_shared_process_pool_executor(**kwargs) - try: - yield executor - finally: - # due to an issue in cpython https://bugs.python.org/issue34073 - # bypassing shutdown and using a shared pool - # remove call to get_shared_process_pool_executor and replace with - # a new instance when the issue is fixed - # FIXME: uncomment below line when the issue is fixed - # executor.shutdown(wait=False) - pass diff --git a/services/catalog/src/simcore_service_catalog/utils/requests_decorators.py b/services/catalog/src/simcore_service_catalog/utils/requests_decorators.py deleted file mode 100644 index 534ed7f2d12..00000000000 --- a/services/catalog/src/simcore_service_catalog/utils/requests_decorators.py +++ /dev/null @@ -1,47 +0,0 @@ -import asyncio -import logging -from contextlib import suppress -from functools import wraps -from typing import Any, Callable, Coroutine - -from fastapi import Request, Response - -logger = logging.getLogger(__name__) - -_DEFAULT_CHECK_INTERVAL_S: float = 0.5 - - -async def _cancel_task_if_client_disconnected( - request: Request, task: asyncio.Task, interval: float = _DEFAULT_CHECK_INTERVAL_S -) -> None: - with suppress(asyncio.CancelledError): - while True: - if await request.is_disconnected(): - logger.warning("client %s disconnected!", request.client) - task.cancel() - break - await asyncio.sleep(interval) - - -def cancellable_request(handler: Callable[..., Coroutine[Any, Any, Any]]): - """this decorator periodically checks if the client disconnected and then will cancel the request and return a 499 code (a la nginx).""" - - @wraps(handler) - async def decorator(request: Request, *args, **kwargs) -> Response: - handler_task = asyncio.get_event_loop().create_task( - handler(request, *args, **kwargs) - ) - auto_cancel_task = asyncio.get_event_loop().create_task( - _cancel_task_if_client_disconnected(request, handler_task) - ) - try: - return await handler_task - except asyncio.CancelledError: - logger.warning( - "request %s was cancelled by client %s!", request.url, request.client - ) - return Response("Oh No!", status_code=499) - finally: - auto_cancel_task.cancel() - - return decorator diff --git a/services/catalog/src/simcore_service_catalog/utils/service_resources.py b/services/catalog/src/simcore_service_catalog/utils/service_resources.py index 1b6b7ddcbc9..1e61dfffbe5 100644 --- a/services/catalog/src/simcore_service_catalog/utils/service_resources.py +++ b/services/catalog/src/simcore_service_catalog/utils/service_resources.py @@ -42,12 +42,15 @@ def merge_service_resources_with_user_specs( service_resources: ResourcesDict, user_specific_spec: ServiceSpec ) -> ResourcesDict: if ( - not user_specific_spec.TaskTemplate - or not user_specific_spec.TaskTemplate.Resources + not user_specific_spec.task_template + or not user_specific_spec.task_template.resources ): return service_resources - user_specific_resources = user_specific_spec.dict( - include={"TaskTemplate": {"Resources"}} + + assert "task_template" in user_specific_spec.model_fields # nosec + + user_specific_resources = user_specific_spec.model_dump( + include={"task_template": {"resources"}}, by_alias=True )["TaskTemplate"]["Resources"] merged_resources = deepcopy(service_resources) @@ -58,25 +61,29 @@ def merge_service_resources_with_user_specs( # res_name: NanoCPUs, MemoryBytes, Pids, GenericResources if res_value is None: continue + if res_name == "GenericResources": # special case here merged_resources |= parse_generic_resource(res_value) continue + if res_name not in _DOCKER_TO_OSPARC_RESOURCE_MAP: continue - if _DOCKER_TO_OSPARC_RESOURCE_MAP[res_name] in merged_resources: - # upgrade - merged_resources[_DOCKER_TO_OSPARC_RESOURCE_MAP[res_name]].__setattr__( - osparc_res_attr, - res_value * _DOCKER_TO_OSPARC_RESOURCE_CONVERTER[res_name], - ) + + scale = _DOCKER_TO_OSPARC_RESOURCE_CONVERTER[res_name] + key = _DOCKER_TO_OSPARC_RESOURCE_MAP[res_name] + if key in merged_resources: + # updates. + # NOTE: do not use assignment! + # SEE test_reservation_is_cap_by_limit_on_assigment_pydantic_2_bug + data = merged_resources[key].model_dump() + data[osparc_res_attr] = res_value * scale + merged_resources[key] = ResourceValue(**data) else: - merged_resources[ - _DOCKER_TO_OSPARC_RESOURCE_MAP[res_name] - ] = ResourceValue( - limit=res_value * _DOCKER_TO_OSPARC_RESOURCE_CONVERTER[res_name], - reservation=res_value - * _DOCKER_TO_OSPARC_RESOURCE_CONVERTER[res_name], + # constructs + merged_resources[key] = ResourceValue( + limit=res_value * scale, + reservation=res_value * scale, ) return merged_resources diff --git a/services/catalog/src/simcore_service_catalog/utils/versioning.py b/services/catalog/src/simcore_service_catalog/utils/versioning.py index 5aedea6c8b0..d577b81bd75 100644 --- a/services/catalog/src/simcore_service_catalog/utils/versioning.py +++ b/services/catalog/src/simcore_service_catalog/utils/versioning.py @@ -2,20 +2,20 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from typing import Union +from typing import TypeAlias import packaging.version from packaging.version import Version -_VersionT = Union[Version, str] +_VersionOrStr: TypeAlias = Version | str -def as_version(v: _VersionT) -> Version: +def as_version(v: _VersionOrStr) -> Version: return packaging.version.Version(v) if isinstance(v, str) else v -def is_patch_release(version: _VersionT, reference: _VersionT) -> bool: +def is_patch_release(version: _VersionOrStr, reference: _VersionOrStr) -> bool: """Returns True if version is a patch release from reference""" v: Version = as_version(version) r: Version = as_version(reference) - return v.major == r.major and v.minor == r.minor and r.micro < v.micro # type: ignore + return v.major == r.major and v.minor == r.minor and r.micro < v.micro diff --git a/services/catalog/tests/unit/conftest.py b/services/catalog/tests/unit/conftest.py index 078f111e349..296cc47bd19 100644 --- a/services/catalog/tests/unit/conftest.py +++ b/services/catalog/tests/unit/conftest.py @@ -1,42 +1,62 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable -import sys -from copy import deepcopy + +import hashlib +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator from pathlib import Path -from typing import Dict +from typing import Any, NamedTuple +import httpx import pytest +import respx import simcore_service_catalog +import simcore_service_catalog.core.application +import simcore_service_catalog.core.events +import simcore_service_catalog.repository +import simcore_service_catalog.repository.events +import yaml +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI, status +from fastapi.testclient import TestClient +from models_library.api_schemas_directorv2.services import ServiceExtras +from packaging.version import Version +from pydantic import EmailStr, TypeAdapter +from pytest_mock import MockerFixture, MockType +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from simcore_service_catalog.core.application import create_app +from simcore_service_catalog.core.settings import ApplicationSettings pytest_plugins = [ + "pytest_simcore.cli_runner", "pytest_simcore.docker_compose", "pytest_simcore.docker_registry", "pytest_simcore.docker_swarm", - "pytest_simcore.monkeypatch_extra", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_products_data", + "pytest_simcore.faker_users_data", "pytest_simcore.postgres_service", "pytest_simcore.pydantic_models", - "pytest_simcore.repository_paths", - "pytest_simcore.schemas", - "pytest_simcore.service_environs", - "pytest_simcore.tmp_path_extra", "pytest_simcore.pytest_global_environs", + "pytest_simcore.rabbit_service", + "pytest_simcore.repository_paths", ] -current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent - - -## FOLDER LAYOUT --------------------------------------------------------------------- - - @pytest.fixture(scope="session") -def project_slug_dir() -> Path: - folder = current_dir.parent.parent - assert folder.exists() - assert any(folder.glob("src/simcore_service_catalog")) - return folder +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "catalog" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_catalog")) + return service_folder @pytest.fixture(scope="session") @@ -49,72 +69,462 @@ def package_dir() -> Path: return dirpath -# FAKE DATA ------ - - -@pytest.fixture() -def fake_data_dag_in() -> Dict: - DAG_DATA_IN_DICT = { - "key": "simcore/services/frontend/nodes-group/macros/1", - "version": "1.0.0", - "name": "string", - "description": "string", - "contact": "user@example.com", - "workbench": { - "additionalProp1": { - "key": "simcore/services/comp/sleeper", - "version": "6.2.0", - "label": "string", - "progress": 0, - "thumbnail": "https://string.com", - "inputs": {}, - "inputAccess": { - "additionalProp1": "ReadAndWrite", - "additionalProp2": "ReadAndWrite", - "additionalProp3": "ReadAndWrite", - }, - "inputNodes": ["ba8e4558-1088-49b1-8fe6-f591634089e5"], - "outputs": {}, - "outputNodes": ["ba8e4558-1088-49b1-8fe6-f591634089e5"], - "parent": "ba8e4558-1088-49b1-8fe6-f591634089e5", - "position": {"x": 0, "y": 0}, +@pytest.fixture(scope="session") +def env_devel_dict( + env_devel_dict: EnvVarsDict, external_envfile_dict: EnvVarsDict +) -> EnvVarsDict: + if external_envfile_dict: + assert "CATALOG_DEV_FEATURES_ENABLED" in external_envfile_dict + assert "CATALOG_SERVICES_DEFAULT_RESOURCES" in external_envfile_dict + return external_envfile_dict + return env_devel_dict + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + docker_compose_service_environment_dict: EnvVarsDict, +) -> EnvVarsDict: + """Produces testing environment for the app + by replicating the environment defined in the docker-compose + when initialized with .env-devel + """ + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_environment_dict, + "CATALOG_TRACING": "null", + }, + ) + + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 + + +@pytest.fixture +def app_settings(app_environment: EnvVarsDict) -> ApplicationSettings: + assert app_environment + return ApplicationSettings.create_from_envs() + + +class AppLifeSpanSpyTargets(NamedTuple): + on_startup: MockType + on_shutdown: MockType + + +@pytest.fixture +def spy_app(mocker: MockerFixture) -> AppLifeSpanSpyTargets: + # Used to ensure startup/teardown workflows using different fixtures + # work as expected + return AppLifeSpanSpyTargets( + on_startup=mocker.spy( + simcore_service_catalog.core.events, + "_flush_started_banner", + ), + on_shutdown=mocker.spy( + simcore_service_catalog.core.events, + "_flush_finished_banner", + ), + ) + + +@pytest.fixture +async def app( + app_settings: ApplicationSettings, + is_pdb_enabled: bool, + spy_app: AppLifeSpanSpyTargets, +) -> AsyncIterator[FastAPI]: + """ + NOTE that this app was started when the fixture is setup + and shutdown when the fixture is tear-down + """ + + # create instance + assert app_environment + app_under_test = create_app() + + assert spy_app.on_startup.call_count == 0 + assert spy_app.on_shutdown.call_count == 0 + + async with LifespanManager( + app_under_test, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 0 + + yield app_under_test + + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 1 + + +@pytest.fixture +def client( + app_settings: ApplicationSettings, spy_app: AppLifeSpanSpyTargets +) -> Iterator[TestClient]: + # NOTE: DO NOT add `app` as a dependency since it is already initialized + + # create instance + assert app_environment + app_under_test = create_app() + + assert ( + spy_app.on_startup.call_count == 0 + ), "TIP: Remove dependencies from `app` fixture and get it via `client.app`" + assert spy_app.on_shutdown.call_count == 0 + + with TestClient(app_under_test) as cli: + + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 0 + + yield cli + + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 1 + + +@pytest.fixture +async def aclient( + app: FastAPI, spy_app: AppLifeSpanSpyTargets +) -> AsyncIterator[httpx.AsyncClient]: + # NOTE: Avoids TestClient since `app` fixture already runs LifespanManager + # Otherwise `with TestClient` will call twice start/shutdown events + + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 0 + + async with httpx.AsyncClient( + base_url="http://catalog.testserver.io", + headers={"Content-Type": "application/json"}, + transport=httpx.ASGITransport(app=app), + ) as acli: + assert isinstance(acli._transport, httpx.ASGITransport) # noqa: SLF001 + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 0 + + yield acli + + assert spy_app.on_startup.call_count == 1 + assert spy_app.on_shutdown.call_count == 0 + + +@pytest.fixture +def service_caching_disabled(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("AIOCACHE_DISABLE", "1") + + +@pytest.fixture +def repository_lifespan_disabled(mocker: MockerFixture): + mocker.patch.object( + simcore_service_catalog.core.events, + "repository_lifespan_manager", + autospec=True, + ) + + +@pytest.fixture +def background_task_lifespan_disabled(mocker: MockerFixture) -> None: + class MockedBackgroundTaskContextManager: + async def __aenter__(self): + print( + "TEST", + background_task_lifespan_disabled.__name__, + "Disabled background tasks. Skipping execution of __aenter__", + ) + + async def __aexit__(self, exc_type, exc_value, traceback): + print( + "TEST", + background_task_lifespan_disabled.__name__, + "Disabled background tasks. Skipping execution of __aexit__", + ) + + mocker.patch.object( + simcore_service_catalog.core.events, + "background_task_lifespan", + autospec=True, + return_value=MockedBackgroundTaskContextManager(), + ) + + +# +# rabbit-MQ +# + + +@pytest.fixture +def rabbitmq_and_rpc_setup_disabled(mocker: MockerFixture): + # The following services are affected if rabbitmq is not in place + mocker.patch.object( + simcore_service_catalog.core.events, "rabbitmq_lifespan", autospec=True + ) + mocker.patch.object( + simcore_service_catalog.core.events, "rpc_api_lifespan", autospec=True + ) + + +@pytest.fixture +async def rpc_client( + faker: Faker, rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]] +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client(f"catalog-client-{faker.word()}") + + +# +# director +# + + +@pytest.fixture +def director_lifespan_disabled(mocker: MockerFixture) -> None: + mocker.patch.object( + simcore_service_catalog.core.events, "director_lifespan", autospec=True + ) + + +@pytest.fixture +def director_rest_openapi_specs( + osparc_simcore_services_dir: Path, +) -> dict[str, Any]: + openapi_path = ( + osparc_simcore_services_dir + / "director" + / "src" + / "simcore_service_director" + / "api" + / "v0" + / "openapi.yaml" + ) + return yaml.safe_load(openapi_path.read_text()) + + +@pytest.fixture +def expected_director_rest_api_list_services( + user_email: EmailStr, user_first_name: str, user_last_name: str +) -> list[dict[str, Any]]: + """This fixture has at least TWO purposes: + + 1. can be used as a reference to check the results at the other end + 2. can be used to change responses of the director API downstream (override fixture) + + """ + return [ + { + "image_digest": hashlib.sha256( + f"simcore/services/comp/ans-model:{major}".encode() + ).hexdigest(), + "authors": [ + { + "name": f"{user_first_name} {user_last_name}", + "email": user_email, + "affiliation": "ACME", + } + ], + "contact": user_email, + "description": "Autonomous Nervous System Network model", + "inputs": { + "input_1": { + "displayOrder": 1, + "label": "Simulation time", + "description": "Duration of the simulation", + "type": "ref_contentSchema", + "contentSchema": { + "type": "number", + "x_unit": "milli-second", + }, + "defaultValue": 2, + } }, - "additionalProp2": { - "key": "simcore/services/comp/sleeper", - "version": "6.2.0", - "label": "string", - "progress": 0, - "thumbnail": "https://string.com", - "inputs": {}, - "inputAccess": { - "additionalProp1": "ReadAndWrite", - "additionalProp2": "ReadAndWrite", - "additionalProp3": "ReadAndWrite", + "integration-version": "1.0.0", + "key": "simcore/services/comp/ans-model", + "name": "Autonomous Nervous System Network model", + "outputs": { + "output_1": { + "displayOrder": 1, + "label": "ANS output", + "description": "Output of simulation of Autonomous Nervous System Network model", + "type": "data:*/*", + "fileToKeyMap": {"ANS_output.txt": "output_1"}, + }, + "output_2": { + "displayOrder": 2, + "label": "Stimulation parameters", + "description": "stim_param.txt file containing the input provided in the inputs port", + "type": "data:*/*", + "fileToKeyMap": {"ANS_stim_param.txt": "output_2"}, }, - "inputNodes": ["ba8e4558-1088-49b1-8fe6-f591634089e5"], - "outputs": {}, - "outputNodes": ["ba8e4558-1088-49b1-8fe6-f591634089e5"], - "parent": "ba8e4558-1088-49b1-8fe6-f591634089e5", - "position": {"x": 0, "y": 0}, }, - "additionalProp3": { - "key": "simcore/services/comp/sleeper", - "version": "6.2.0", - "label": "string", - "progress": 0, - "thumbnail": "https://string.com", - "inputs": {}, - "inputAccess": { - "additionalProp1": "ReadAndWrite", - "additionalProp2": "ReadOnly", - "additionalProp3": "ReadAndWrite", + "thumbnail": "https://www.statnews.com/wp-content/uploads/2020/05/3D-rat-heart.-iScience--768x432.png", + "type": "computational", + "version": f"{major}.0.0", + } + for major in range(1, 4) + ] + + +@pytest.fixture +def mocked_director_rest_api_base( + app_settings: ApplicationSettings, + director_rest_openapi_specs: dict[str, Any], +) -> Iterator[respx.MockRouter]: + """ + BASIC fixture to mock director service API + + Use `mocked_director_service_api_base` to customize the mocks + + """ + assert ( + app_settings.CATALOG_DIRECTOR + ), "Check dependency on fixture `director_setup_disabled`" + + # NOTE: this MUST be in sync with services/director/src/simcore_service_director/api/v0/openapi.yaml + openapi = director_rest_openapi_specs + assert Version(openapi["info"]["version"]) == Version("0.1.0") + + with respx.mock( + base_url=app_settings.CATALOG_DIRECTOR.base_url, # NOTE: it include v0/ + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + + # HEATHCHECK + assert openapi["paths"].get("/") + respx_mock.head("/", name="healthcheck").respond( + status.HTTP_200_OK, + json={ + "data": { + "name": "simcore-service-director", + "status": "SERVICE_RUNNING", + "api_version": "0.1.0", + "version": "0.1.0", + } + }, + ) + + yield respx_mock + + +@pytest.fixture +def get_mocked_service_labels() -> Callable[[str, str], dict]: + def _(service_key: str, service_version: str) -> dict: + return { + "io.simcore.authors": '{"authors": [{"name": "John Smith", "email": "john@acme.com", "affiliation": "ACME\'IS Foundation"}]}', + "io.simcore.contact": '{"contact": "john@acme.com"}', + "io.simcore.description": '{"description": "Autonomous Nervous System Network model"}', + "io.simcore.inputs": '{"inputs": {"input_1": {"displayOrder": 1.0, "label": "Simulation time", "description": "Duration of the simulation", "type": "ref_contentSchema", "contentSchema": {"type": "number", "x_unit": "milli-second"}, "defaultValue": 2.0}}}', + "io.simcore.integration-version": '{"integration-version": "1.0.0"}', + "io.simcore.key": '{"key": "xxxxx"}'.replace("xxxxx", service_key), + "io.simcore.name": '{"name": "Autonomous Nervous System Network model"}', + "io.simcore.outputs": '{"outputs": {"output_1": {"displayOrder": 1.0, "label": "ANS output", "description": "Output of simulation of Autonomous Nervous System Network model", "type": "data:*/*", "fileToKeyMap": {"ANS_output.txt": "output_1"}}, "output_2": {"displayOrder": 2.0, "label": "Stimulation parameters", "description": "stim_param.txt file containing the input provided in the inputs port", "type": "data:*/*", "fileToKeyMap": {"ANS_stim_param.txt": "output_2"}}}}', + "io.simcore.thumbnail": '{"thumbnail": "https://www.statnews.com/wp-content/uploads/2020/05/3D-rat-heart.-iScience--768x432.png"}', + "io.simcore.type": '{"type": "computational"}', + "io.simcore.version": '{"version": "xxxxx"}'.replace( + "xxxxx", service_version + ), + "maintainer": "johnsmith", + "org.label-schema.build-date": "2023-04-17T08:04:15Z", + "org.label-schema.schema-version": "1.0", + "org.label-schema.vcs-ref": "4d79449a2e79f8a3b3b2e1dd0290af9f3d1a8792", + "org.label-schema.vcs-url": "https://github.com/ITISFoundation/jupyter-math.git", + "simcore.service.restart-policy": "no-restart", + "simcore.service.settings": '[{"name": "Resources", "type": "Resources", "value": {"Limits": {"NanoCPUs": 1000000000, "MemoryBytes": 4194304}, "Reservations": {"NanoCPUs": 4000000000, "MemoryBytes": 2147483648}}}]', + } + + return _ + + +@pytest.fixture +def mock_service_extras() -> ServiceExtras: + return TypeAdapter(ServiceExtras).validate_python( + ServiceExtras.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def mocked_director_rest_api( + mocked_director_rest_api_base: respx.MockRouter, + director_rest_openapi_specs: dict[str, Any], + expected_director_rest_api_list_services: list[dict[str, Any]], + get_mocked_service_labels: Callable[[str, str], dict], + mock_service_extras: ServiceExtras, +) -> respx.MockRouter: + """ + STANDARD fixture to mock director service API + + To customize the mock responses use `mocked_director_service_api_base` instead + """ + # alias + openapi = director_rest_openapi_specs + respx_mock = mocked_director_rest_api_base + + def _search(service_key, service_version): + try: + return next( + s + for s in expected_director_rest_api_list_services + if (s["key"] == service_key and s["version"] == service_version) + ) + except StopIteration: + return None + + # LIST + assert openapi["paths"].get("/services") + + respx_mock.get(path__regex=r"/services$", name="list_services").respond( + status.HTTP_200_OK, json={"data": expected_director_rest_api_list_services} + ) + + # GET + assert openapi["paths"].get("/services/{service_key}/{service_version}") + + @respx_mock.get( + path__regex=r"^/services/(?P[/\w-]+)/(?P[0-9.]+)$", + name="get_service", + ) + def _get_service(request: httpx.Request, service_key, service_version): + if found := _search(service_key, service_version): + # NOTE: this is a defect in director's API + single_service_list = [found] + return httpx.Response( + status.HTTP_200_OK, json={"data": single_service_list} + ) + return httpx.Response( + status.HTTP_404_NOT_FOUND, + json={ + "data": { + "status": status.HTTP_404_NOT_FOUND, + "message": f"The service {service_key}:{service_version} does not exist", + } + }, + ) + + # GET LABELS + assert openapi["paths"].get("/services/{service_key}/{service_version}/labels") + + @respx_mock.get( + path__regex=r"^/services/(?P[/\w-]+)/(?P[0-9\.]+)/labels$", + name="get_service_labels", + ) + def _get_service_labels(request, service_key, service_version): + if found := _search(service_key, service_version): + return httpx.Response( + status_code=status.HTTP_200_OK, + json={ + "data": get_mocked_service_labels(found["key"], found["version"]) }, - "inputNodes": [], - "outputs": {}, - "outputNodes": [], - "parent": None, - "position": {"x": 0, "y": 0}, + ) + return httpx.Response( + status.HTTP_404_NOT_FOUND, + json={ + "data": { + "status": status.HTTP_404_NOT_FOUND, + "message": f"The service {service_key}:{service_version} does not exist", + } }, - }, - } - return deepcopy(DAG_DATA_IN_DICT) + ) + + return respx_mock diff --git a/services/catalog/tests/unit/test__model_examples.py b/services/catalog/tests/unit/test__model_examples.py new file mode 100644 index 00000000000..763c5944de7 --- /dev/null +++ b/services/catalog/tests/unit/test__model_examples.py @@ -0,0 +1,64 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from typing import Any + +import pytest +import simcore_service_catalog.models +from models_library.api_schemas_catalog.services import ( + ServiceListFilters, +) +from models_library.services_enums import ServiceType +from pydantic import BaseModel, TypeAdapter +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) +from simcore_service_catalog.models.services_db import ServiceDBFilters + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(simcore_service_catalog.models), +) +def test_catalog_service_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) + + +@pytest.mark.parametrize( + "filters", + [ + pytest.param( + None, + id="no filters", + ), + pytest.param( + ServiceListFilters( + service_type=ServiceType.COMPUTATIONAL, + service_key_pattern="*", + version_display_pattern="*", + ), + id="all filters", + ), + pytest.param( + ServiceListFilters( + service_type=ServiceType.COMPUTATIONAL, + service_key_pattern="*", + version_display_pattern="*", + ), + id="all filters with regex", + ), + ], +) +def test_adapter_to_domain_model( + filters: ServiceListFilters | None, +): + + TypeAdapter(ServiceDBFilters | None).validate_python(filters, from_attributes=True) diff --git a/services/catalog/tests/unit/test_api_rest.py b/services/catalog/tests/unit/test_api_rest.py new file mode 100644 index 00000000000..ca40dc60a5b --- /dev/null +++ b/services/catalog/tests/unit/test_api_rest.py @@ -0,0 +1,37 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import httpx +from fastapi import status +from fastapi.testclient import TestClient + + +def test_sync_client( + repository_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + background_task_lifespan_disabled: None, + director_lifespan_disabled: None, + client: TestClient, +): + + response = client.get("/v0/") + assert response.status_code == status.HTTP_200_OK + + response = client.get("/v0/meta") + assert response.status_code == status.HTTP_200_OK + + +async def test_async_client( + repository_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + background_task_lifespan_disabled: None, + director_lifespan_disabled: None, + aclient: httpx.AsyncClient, +): + response = await aclient.get("/v0/") + assert response.status_code == status.HTTP_200_OK + + response = await aclient.get("/v0/meta") + assert response.status_code == status.HTTP_200_OK diff --git a/services/catalog/tests/unit/test_cli.py b/services/catalog/tests/unit/test_cli.py new file mode 100644 index 00000000000..95d4794306d --- /dev/null +++ b/services/catalog/tests/unit/test_cli.py @@ -0,0 +1,35 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import os + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_catalog._meta import API_VERSION +from simcore_service_catalog.cli import main +from simcore_service_catalog.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def test_cli_help_and_version(cli_runner: CliRunner): + result = cli_runner.invoke(main, "--help") + assert result.exit_code == os.EX_OK, result.output + + result = cli_runner.invoke(main, "--version") + assert result.exit_code == os.EX_OK, result.output + assert result.stdout.strip() == API_VERSION + + +def test_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK + + print(result.output) + settings = ApplicationSettings(result.output) + assert settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() + + +def test_run(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["run"]) + assert result.exit_code == 0 + assert "disabled" in result.stdout diff --git a/services/catalog/tests/unit/test_clients_director.py b/services/catalog/tests/unit/test_clients_director.py new file mode 100644 index 00000000000..3c91d9bc52a --- /dev/null +++ b/services/catalog/tests/unit/test_clients_director.py @@ -0,0 +1,86 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import urllib.parse +from typing import Any + +import pytest +from fastapi import FastAPI +from models_library.services_metadata_published import ServiceMetaDataPublished +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx.router import MockRouter +from simcore_service_catalog.api._dependencies.director import get_director_client +from simcore_service_catalog.clients.director import DirectorClient + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, app_environment: EnvVarsDict +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + "SC_BOOT_MODE": "local-development", + }, + ) + + +async def test_director_client_high_level_api( + repository_lifespan_disabled: None, + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + expected_director_rest_api_list_services: list[dict[str, Any]], + mocked_director_rest_api: MockRouter, + app: FastAPI, +): + # gets director client as used in handlers + director_api = get_director_client(app) + + assert app.state.director_api == director_api + assert isinstance(director_api, DirectorClient) + + # PING + assert await director_api.is_responsive() + + # GET + expected_service = ServiceMetaDataPublished( + **expected_director_rest_api_list_services[0] + ) + assert ( + await director_api.get_service(expected_service.key, expected_service.version) + == expected_service + ) + # TODO: error handling! + + +async def test_director_client_low_level_api( + repository_lifespan_disabled: None, + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + expected_director_rest_api_list_services: list[dict[str, Any]], + app: FastAPI, +): + director_api = get_director_client(app) + + expected_service = expected_director_rest_api_list_services[0] + key = expected_service["key"] + version = expected_service["version"] + + service_labels = await director_api.get( + f"/services/{urllib.parse.quote_plus(key)}/{version}/labels" + ) + + assert service_labels + + service = await director_api.get( + f"/services/{urllib.parse.quote_plus(key)}/{version}" + ) + assert service diff --git a/services/catalog/tests/unit/test_core_settings.py b/services/catalog/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..9f94c6c3588 --- /dev/null +++ b/services/catalog/tests/unit/test_core_settings.py @@ -0,0 +1,22 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_catalog.core.settings import ApplicationSettings + + +def test_valid_web_application_settings(app_environment: EnvVarsDict): + """ + We can validate actual .env files (also refered as `repo.config` files) by passing them via the CLI + + $ ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + $ pytest --external-envfile=.secrets --pdb tests/unit/test_core_settings.py + + """ + settings = ApplicationSettings() # type: ignore + assert settings + + assert settings == ApplicationSettings.create_from_envs() diff --git a/services/catalog/tests/unit/test_models_domain_groups.py b/services/catalog/tests/unit/test_models_domain_groups.py deleted file mode 100644 index 84ef885f13c..00000000000 --- a/services/catalog/tests/unit/test_models_domain_groups.py +++ /dev/null @@ -1,17 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -from pprint import pformat - -import pytest -from simcore_service_catalog.models.domain.group import GroupAtDB - - -@pytest.mark.parametrize("model_cls", (GroupAtDB,)) -def test_service_api_models_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" diff --git a/services/catalog/tests/unit/test_models_schemas.py b/services/catalog/tests/unit/test_models_schemas.py deleted file mode 100644 index adb7e226e99..00000000000 --- a/services/catalog/tests/unit/test_models_schemas.py +++ /dev/null @@ -1,89 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -from pprint import pformat - -import pytest -from models_library.services import ServiceInput -from simcore_service_catalog.models.schemas.services import ( - ServiceGet, - ServiceItem, - ServiceUpdate, -) -from simcore_service_catalog.models.schemas.services_ports import ServicePortGet - - -@pytest.mark.parametrize( - "model_cls", - ( - ServiceGet, - ServiceUpdate, - ServiceItem, - ServicePortGet, - ), -) -def test_service_api_models_examples(model_cls, model_cls_examples): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -def test_service_port_with_file(): - - io = ServiceInput.parse_obj( - { - "displayOrder": 1, - "label": "Input files", - "description": "Files downloaded from service connected at the input", - "type": "data:*/*", # < --- generic mimetype! - "fileToKeyMap": { - "single_number.txt": "input_1" - }, # <-- provides a file with an extension - } - ) - - port = ServicePortGet.from_service_io("input", "input_1", io).dict( - exclude_unset=True - ) - - assert port == { - "key": "input_1", - "kind": "input", - "content_media_type": "text/plain", # <-- deduced from extension - "content_schema": { - "type": "string", - "title": "Input files", - "description": "Files downloaded from service connected at the input", - }, - } - - -def test_service_port_with_boolean(): - - io = ServiceInput.parse_obj( - { - "displayOrder": 3, - "label": "Same title and description is more usual than you might think", - "description": "Same title and description is more usual than you might think", # <- same label and description! - "type": "boolean", - "defaultValue": False, # <- has a default - } - ) - - port = ServicePortGet.from_service_io("input", "input_1", io).dict( - exclude_unset=True - ) - - assert port == { - "key": "input_1", - "kind": "input", - # "content_media_type": None, # <-- no content media - "content_schema": { - "type": "boolean", - "title": "Same title and description is more usual than you might think", # <-- no description - "default": False, # <-- - }, - } diff --git a/services/catalog/tests/unit/test_repository_services_sql.py b/services/catalog/tests/unit/test_repository_services_sql.py new file mode 100644 index 00000000000..7acc31c585f --- /dev/null +++ b/services/catalog/tests/unit/test_repository_services_sql.py @@ -0,0 +1,77 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from simcore_postgres_database.utils import as_postgres_sql_query_str +from simcore_service_catalog.repository._services_sql import ( + AccessRightsClauses, + can_get_service_stmt, + get_service_history_stmt, + get_service_stmt, + latest_services_total_count_stmt, + list_latest_services_stmt, +) + + +def test_building_services_sql_statements(): + def _check(func_smt, **kwargs): + print(f"{func_smt.__name__:*^100}") + stmt = func_smt(**kwargs) + print() + print(as_postgres_sql_query_str(stmt)) + print() + + # some data + product_name = "osparc" + user_id = 425 # 425 (guidon) # 4 (odei) + service_key = "simcore/services/comp/isolve" + service_version = "2.0.85" + + service_key = "simcore/services/dynamic/raw-graphs" + service_version = "2.11.2" + + service_key = "simcore/services/dynamic/s4l-core-8-0-0-dy" + service_version = "3.2.39" + + _check( + get_service_history_stmt, + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=service_key, + ) + + _check( + can_get_service_stmt, + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=service_key, + service_version=service_version, + ) + + _check( + get_service_stmt, + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + service_key=service_key, + service_version=service_version, + ) + + _check( + list_latest_services_stmt, + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + limit=15, + offset=80, + ) + + _check( + latest_services_total_count_stmt, + product_name=product_name, + user_id=user_id, + access_rights=AccessRightsClauses.can_read, + ) diff --git a/services/catalog/tests/unit/test_service_compatibility.py b/services/catalog/tests/unit/test_service_compatibility.py new file mode 100644 index 00000000000..30cbb673358 --- /dev/null +++ b/services/catalog/tests/unit/test_service_compatibility.py @@ -0,0 +1,284 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import arrow +import pytest +from models_library.services_types import ServiceVersion +from models_library.users import UserID +from packaging.specifiers import SpecifierSet +from packaging.version import Version +from pytest_mock import MockerFixture, MockType +from simcore_service_catalog.models.services_db import ReleaseDBGet +from simcore_service_catalog.repository.services import ServicesRepository +from simcore_service_catalog.service.compatibility import ( + _get_latest_compatible_version, + evaluate_service_compatibility_map, +) + +# References +# +# - Semantic versioning: +# - https://semver.org/ +# - Python Packaging User Guide: +# - https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers +# - `packaging` library +# - https://packaging.pypa.io/en/stable/version.html +# - https://packaging.pypa.io/en/stable/specifiers.html +# + + +def test_compatible_with_minor_release(): + """Testing https://packaging.python.org/en/latest/specifications/version-specifiers/#compatible-release + + The following groups of version clauses are equivalent: + ~= 2.2 + >= 2.2, == 2.* + """ + minor_compatible_spec = SpecifierSet("~=2.2") + + assert "2.2" in minor_compatible_spec + assert "2.2.0" in minor_compatible_spec + + assert Version("2.2") == Version("2.2.0") + + # bigger patch -> compatible + assert "2.2.1" in minor_compatible_spec + + # bigger minor -> compatible + assert "2.3" in minor_compatible_spec + + # bigger major -> INcompatible + assert "3.3" not in minor_compatible_spec + + # smaller major -> INcompatible + assert "2.1" not in minor_compatible_spec + assert "1.0" not in minor_compatible_spec + assert "0.1.5" not in minor_compatible_spec + + +def test_compatible_with_patch_release(): + """Testing https://packaging.python.org/en/latest/specifications/version-specifiers/#compatible-release + + The following groups of version clauses are equivalent: + ~= 1.4.5 + >= 1.4.5, == 1.4.* + """ + patch_compatible_spec = SpecifierSet("~=1.4.5") + + assert "1.4.5" in patch_compatible_spec + + # bigger patch -> compatible + assert "1.4.6" in patch_compatible_spec + + # smaller patch -> INcompatible + assert "1.4.4" not in patch_compatible_spec + + # bigger minor -> INcompatible! + assert "1.5" not in patch_compatible_spec + assert "1.5.1" not in patch_compatible_spec + + # smaller major -> INcompatible + assert "0.1.5" not in patch_compatible_spec + assert "1.0" not in patch_compatible_spec + assert "1.1" not in patch_compatible_spec + assert "1.3" not in patch_compatible_spec + + +@pytest.fixture +def versions_history() -> list[Version]: + return sorted( + Version(f"{M}.{m}.{p}") + for M in range(10) + for m in range(0, 5, 2) + for p in range(0, 10, 4) + ) + + +def test_version_specifiers(versions_history: list[Version]): + # given a list of versions, test the first compatibilty starting from the latest + # If i have ">1.2.23,~=1.2.23" + + version = Version("1.2.3") + + # >1.2.3 + newer_version_spec = SpecifierSet(f">{version}") + + # >= 1.2, == 1.* + minor_compatible_spec = SpecifierSet(f"~={version.major}.{version.minor}") + + # >= 1.2.3, == 1.2.* + patch_compatible_spec = SpecifierSet( + f"~={version.major}.{version.minor}.{version.micro}" + ) + + compatible = list( + (minor_compatible_spec & newer_version_spec).filter(versions_history) + ) + assert version not in compatible + assert all(v > version for v in compatible) + assert all(v.major == version.major for v in compatible) + + latest_compatible = compatible[-1] + assert version < latest_compatible + + compatible = list( + (patch_compatible_spec & newer_version_spec).filter(versions_history) + ) + assert version not in compatible + assert all(v > version for v in compatible) + assert all( + v.major == version.major and v.minor == version.minor for v in compatible + ) + latest_compatible = compatible[-1] + assert version < latest_compatible + + +def test_get_latest_compatible_version(versions_history: list[Version]): + latest_first_releases = sorted(versions_history, reverse=True) + + # cannot upgrde to anything + latest = latest_first_releases[0] + assert _get_latest_compatible_version(latest, latest_first_releases) is None + + # bump MAJOR + not_released = Version(f"{latest.major+1}") + assert _get_latest_compatible_version(not_released, latest_first_releases) is None + + # decrease patch + target = Version(f"{latest.major}.{latest.minor}.{latest.micro-1}") + assert _get_latest_compatible_version(target, latest_first_releases) == latest + + # decrease minor (with default compatibility specs) + target = Version(f"{latest.major}.{latest.minor-2}.0") + latest_compatible = _get_latest_compatible_version(target, latest_first_releases) + assert latest_compatible + assert latest_compatible < latest + + +def _create_as(cls, **overrides): + kwargs = { + "compatibility_policy": None, + "created": arrow.now().datetime, + "deprecated": None, + "version_display": None, + } + kwargs.update(overrides) + return cls(**kwargs) + + +@pytest.fixture +def mock_repo(mocker: MockerFixture) -> MockType: + return mocker.AsyncMock(ServicesRepository) + + +async def test_evaluate_service_compatibility_map_with_default_policy( + mock_repo: MockType, user_id: UserID +): + service_release_history = [ + _create_as(ReleaseDBGet, version="1.0.0"), + _create_as(ReleaseDBGet, version="1.0.1"), + _create_as(ReleaseDBGet, version="1.1.0"), + _create_as(ReleaseDBGet, version="2.0.0"), + ] + + compatibility_map = await evaluate_service_compatibility_map( + mock_repo, "product_name", user_id, service_release_history + ) + + assert len(compatibility_map) == 4 + assert compatibility_map[ServiceVersion("1.0.0")].can_update_to.version == "1.0.1" + assert compatibility_map[ServiceVersion("1.0.1")] is None + assert compatibility_map[ServiceVersion("1.1.0")] is None + assert compatibility_map[ServiceVersion("2.0.0")] is None + + +async def test_evaluate_service_compatibility_map_with_custom_policy( + mock_repo: MockType, user_id: UserID +): + service_release_history = [ + _create_as(ReleaseDBGet, version="1.0.0"), + _create_as( + ReleaseDBGet, + version="1.0.1", + compatibility_policy={"versions_specifier": ">1.1.0,<=2.0.0"}, + ), + _create_as(ReleaseDBGet, version="1.2.0"), + _create_as(ReleaseDBGet, version="2.0.0"), + ] + + compatibility_map = await evaluate_service_compatibility_map( + mock_repo, "product_name", user_id, service_release_history + ) + + assert len(compatibility_map) == 4 + assert ( + compatibility_map[ServiceVersion("1.0.0")].can_update_to.version == "1.0.1" + ) # default + assert ( + compatibility_map[ServiceVersion("1.0.1")].can_update_to.version == "2.0.0" + ) # version customized + assert compatibility_map[ServiceVersion("1.2.0")] is None + assert compatibility_map[ServiceVersion("2.0.0")] is None + + +async def test_evaluate_service_compatibility_map_with_other_service( + mock_repo: MockType, user_id: UserID +): + service_release_history = [ + _create_as(ReleaseDBGet, version="1.0.0"), + _create_as( + ReleaseDBGet, + version="1.0.1", + compatibility_policy={ + "other_service_key": "simcore/services/comp/other_service", + "versions_specifier": "<=5.1.0", + }, + ), + ] + + mock_repo.get_service_history.return_value = [ + _create_as(ReleaseDBGet, version="5.0.0"), + _create_as(ReleaseDBGet, version="5.1.0"), + _create_as(ReleaseDBGet, version="5.2.0"), + ] + + compatibility_map = await evaluate_service_compatibility_map( + mock_repo, "product_name", user_id, service_release_history + ) + + assert len(compatibility_map) == 2 + assert compatibility_map[ServiceVersion("1.0.0")].can_update_to.version == "1.0.1" + # NOTE: 1.0.1 is also upgradable but it is not evaluated as so because our algorithm only + # checks comptatibility once instead of recursively + + assert ( + compatibility_map[ServiceVersion("1.0.1")].can_update_to.key + == "simcore/services/comp/other_service" + ) + assert compatibility_map[ServiceVersion("1.0.1")].can_update_to.version == "5.1.0" + + +async def test_evaluate_service_compatibility_map_with_deprecated_versions( + mock_repo: MockType, user_id: UserID +): + service_release_history = [ + _create_as(ReleaseDBGet, version="1.0.0"), + _create_as(ReleaseDBGet, version="1.0.1", deprecated=arrow.now().datetime), + _create_as(ReleaseDBGet, version="1.2.0"), + _create_as(ReleaseDBGet, version="1.2.5"), + ] + + compatibility_map = await evaluate_service_compatibility_map( + mock_repo, "product_name", user_id, service_release_history + ) + + assert len(compatibility_map) == 4 + assert ( + compatibility_map[ServiceVersion("1.0.0")] is None + ) # cannot upgrade to deprecated 1.0.1 + assert compatibility_map[ServiceVersion("1.0.1")] is None # Deprecated version + assert compatibility_map[ServiceVersion("1.2.0")].can_update_to.version == "1.2.5" + assert compatibility_map[ServiceVersion("1.2.5")] is None diff --git a/services/catalog/tests/unit/test_service_function_services.py b/services/catalog/tests/unit/test_service_function_services.py new file mode 100644 index 00000000000..06853bcf715 --- /dev/null +++ b/services/catalog/tests/unit/test_service_function_services.py @@ -0,0 +1,23 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from models_library.api_schemas_catalog.services import ServiceMetaDataPublished +from simcore_service_catalog.service.function_services import ( + is_function_service, + iter_service_docker_data, +) + + +@pytest.mark.parametrize( + "image_metadata", iter_service_docker_data(), ids=lambda obj: obj.name +) +def test_create_services_metadata(image_metadata: ServiceMetaDataPublished): + assert isinstance(image_metadata, ServiceMetaDataPublished) + + assert is_function_service(image_metadata.key) diff --git a/services/catalog/tests/unit/test_service_manifest.py b/services/catalog/tests/unit/test_service_manifest.py new file mode 100644 index 00000000000..d2a57c098dc --- /dev/null +++ b/services/catalog/tests/unit/test_service_manifest.py @@ -0,0 +1,140 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +import toolz +from fastapi import FastAPI +from models_library.function_services_catalog.api import is_function_service +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx.router import MockRouter +from simcore_service_catalog.api._dependencies.director import get_director_client +from simcore_service_catalog.clients.director import DirectorClient +from simcore_service_catalog.service import manifest + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, app_environment: EnvVarsDict +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + "SC_BOOT_MODE": "local-development", + }, + ) + + +@pytest.fixture +async def director_client( + repository_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + app: FastAPI, +) -> DirectorClient: + _client = get_director_client(app) + assert app.state.director_api == _client + assert isinstance(_client, DirectorClient) + return _client + + +@pytest.fixture +async def all_services_map( + director_client: DirectorClient, +) -> manifest.ServiceMetaDataPublishedDict: + return await manifest.get_services_map(director_client) + + +async def test_get_services_map( + mocked_director_rest_api: MockRouter, + director_client: DirectorClient, +): + all_services_map = await manifest.get_services_map(director_client) + assert mocked_director_rest_api["list_services"].called + + for service in all_services_map.values(): + if is_function_service(service.key): + assert service.image_digest is None + else: + assert service.image_digest is not None + + services_image_digest = { + s.image_digest for s in all_services_map.values() if s.image_digest + } + assert len(services_image_digest) < len(all_services_map) + + +async def test_get_service( + mocked_director_rest_api: MockRouter, + director_client: DirectorClient, + all_services_map: manifest.ServiceMetaDataPublishedDict, +): + + for expected_service in all_services_map.values(): + service = await manifest.get_service( + key=expected_service.key, + version=expected_service.version, + director_client=director_client, + ) + + assert service == expected_service + if not is_function_service(service.key): + assert mocked_director_rest_api["get_service"].called + + +async def test_get_service_ports( + director_client: DirectorClient, + all_services_map: manifest.ServiceMetaDataPublishedDict, +): + + for expected_service in all_services_map.values(): + ports = await manifest.get_service_ports( + key=expected_service.key, + version=expected_service.version, + director_client=director_client, + ) + + # Verify all ports are properly retrieved + assert isinstance(ports, list) + + # Check input ports + input_ports = [p for p in ports if p.kind == "input"] + if expected_service.inputs: + assert len(input_ports) == len(expected_service.inputs) + for port in input_ports: + assert port.key in expected_service.inputs + assert port.port == expected_service.inputs[port.key] + else: + assert not input_ports + + # Check output ports + output_ports = [p for p in ports if p.kind == "output"] + if expected_service.outputs: + assert len(output_ports) == len(expected_service.outputs) + for port in output_ports: + assert port.key in expected_service.outputs + assert port.port == expected_service.outputs[port.key] + else: + assert not output_ports + + +async def test_get_batch_services( + director_client: DirectorClient, + all_services_map: manifest.ServiceMetaDataPublishedDict, +): + + for expected_services in toolz.partition(2, all_services_map.values()): + selection = [(s.key, s.version) for s in expected_services] + got_services = await manifest.get_batch_services(selection, director_client) + + assert [(s.key, s.version) for s in got_services] == selection + + # NOTE: simplier to visualize + for got, expected in zip(got_services, expected_services, strict=True): + assert got == expected diff --git a/services/catalog/tests/unit/test_services_director.py b/services/catalog/tests/unit/test_services_director.py deleted file mode 100644 index 707fe1a6693..00000000000 --- a/services/catalog/tests/unit/test_services_director.py +++ /dev/null @@ -1,77 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=protected-access -# pylint:disable=not-context-manager - - -from typing import Iterator - -import pytest -import respx -from fastapi import FastAPI -from fastapi.testclient import TestClient -from pytest import MonkeyPatch -from respx.router import MockRouter -from simcore_service_catalog.api.dependencies.director import get_director_api -from simcore_service_catalog.core.application import init_app -from simcore_service_catalog.services.director import DirectorApi - - -@pytest.fixture -def minimal_app( - monkeypatch: MonkeyPatch, testing_environ_vars: dict[str, str] -) -> Iterator[FastAPI]: - # disable a couple of subsystems - monkeypatch.setenv("CATALOG_POSTGRES", "null") - monkeypatch.setenv("CATALOG_TRACING", "null") - monkeypatch.setenv("SC_BOOT_MODE", "local-development") - - app = init_app() - - yield app - - -@pytest.fixture() -def client(minimal_app: FastAPI) -> Iterator[TestClient]: - # NOTE: this way we ensure the events are run in the application - # since it starts the app on a test server - with TestClient(minimal_app) as client: - yield client - - -@pytest.fixture -def mocked_director_service_api(minimal_app: FastAPI) -> Iterator[MockRouter]: - with respx.mock( - base_url=minimal_app.state.settings.CATALOG_DIRECTOR.base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - respx_mock.head("/", name="healthcheck").respond(200, json={"health": "OK"}) - respx_mock.get("/services", name="list_services").respond( - 200, json={"data": ["one", "two"]} - ) - - yield respx_mock - - -async def test_director_client_setup( - mocked_director_service_api: MockRouter, - minimal_app: FastAPI, - client: TestClient, -): - - # gets director client as used in handlers - director_api = get_director_api(minimal_app) - - assert minimal_app.state.director_api == director_api - assert isinstance(director_api, DirectorApi) - - # use it - data = await director_api.get("/services") - - # director entry-point has hit - assert mocked_director_service_api["list_services"].called - - # returns un-enveloped response - assert data == ["one", "two"] diff --git a/services/catalog/tests/unit/test_services_function_services.py b/services/catalog/tests/unit/test_services_function_services.py deleted file mode 100644 index 798e414c1d6..00000000000 --- a/services/catalog/tests/unit/test_services_function_services.py +++ /dev/null @@ -1,20 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=protected-access - -import pytest -from simcore_service_catalog.models.schemas.services import ServiceDockerData -from simcore_service_catalog.services.function_services import ( - is_function_service, - iter_service_docker_data, -) - - -@pytest.mark.parametrize( - "image_metadata", iter_service_docker_data(), ids=lambda obj: obj.name -) -def test_create_services_metadata(image_metadata: ServiceDockerData): - assert isinstance(image_metadata, ServiceDockerData) - - assert is_function_service(image_metadata.key) diff --git a/services/catalog/tests/unit/test_utils_service_extras.py b/services/catalog/tests/unit/test_utils_service_extras.py new file mode 100644 index 00000000000..15db550e774 --- /dev/null +++ b/services/catalog/tests/unit/test_utils_service_extras.py @@ -0,0 +1,35 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from unittest.mock import AsyncMock + +import pytest +from fastapi import FastAPI, status +from httpx import AsyncClient +from models_library.api_schemas_directorv2.services import ServiceExtras +from pydantic import TypeAdapter +from respx import MockRouter + + +@pytest.fixture +def mock_engine(app: FastAPI) -> None: + app.state.engine = AsyncMock() + + +async def test_get_service_extras( + repository_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + background_task_lifespan_disabled: None, + mock_engine: None, + mock_service_extras: ServiceExtras, + aclient: AsyncClient, +): + service_key = "simcore/services/comp/ans-model" + service_version = "3.0.0" + result = await aclient.get(f"/v0/services/{service_key}/{service_version}/extras") + assert result.status_code == status.HTTP_200_OK, result.text + + assert ( + TypeAdapter(ServiceExtras).validate_python(result.json()) == mock_service_extras + ) diff --git a/services/catalog/tests/unit/test_utils_service_labels.py b/services/catalog/tests/unit/test_utils_service_labels.py new file mode 100644 index 00000000000..577fb5f0457 --- /dev/null +++ b/services/catalog/tests/unit/test_utils_service_labels.py @@ -0,0 +1,31 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from collections.abc import Callable +from unittest.mock import AsyncMock + +import pytest +from fastapi import FastAPI, status +from httpx import AsyncClient +from respx import MockRouter + + +@pytest.fixture +def mock_engine(app: FastAPI) -> None: + app.state.engine = AsyncMock() + + +async def test_get_service_labels( + repository_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + background_task_lifespan_disabled: None, + mock_engine: None, + get_mocked_service_labels: Callable[[str, str], dict], + aclient: AsyncClient, +): + service_key = "simcore/services/comp/ans-model" + service_version = "3.0.0" + result = await aclient.get(f"/v0/services/{service_key}/{service_version}/labels") + assert result.status_code == status.HTTP_200_OK, result.text + assert result.json() == get_mocked_service_labels(service_key, service_version) diff --git a/services/catalog/tests/unit/test_utils_service_resources.py b/services/catalog/tests/unit/test_utils_service_resources.py index 2b73bdfc40f..3fc329d2f50 100644 --- a/services/catalog/tests/unit/test_utils_service_resources.py +++ b/services/catalog/tests/unit/test_utils_service_resources.py @@ -1,3 +1,11 @@ +# pylint: disable=not-context-manager +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + from typing import Any import pytest @@ -69,12 +77,6 @@ def test_parse_generic_resources( ResourcesDict(), id="empty task resource spec", ), - pytest.param( - ResourcesDict(), - ServiceSpec(TaskTemplate=TaskSpec(Resources=Resources1())), # type: ignore - ResourcesDict(), - id="empty task resource spec", - ), pytest.param( ResourcesDict(), ServiceSpec(TaskTemplate=TaskSpec(Resources=Resources1(Limits=Limit()))), # type: ignore @@ -285,10 +287,10 @@ def test_merge_service_resources_with_user_specs( merged_resources = merge_service_resources_with_user_specs( service_resources, user_specs ) - assert all(key in expected_resources for key in merged_resources.keys()) - assert all(key in merged_resources for key in expected_resources.keys()) + assert all(key in expected_resources for key in merged_resources) + assert all(key in merged_resources for key in expected_resources) for resource_key, resource_value in merged_resources.items(): # NOTE: so that float values are compared correctly - assert resource_value.dict() == pytest.approx( - expected_resources[resource_key].dict() + assert resource_value.model_dump() == pytest.approx( + expected_resources[resource_key].model_dump() ) diff --git a/services/catalog/tests/unit/with_dbs/conftest.py b/services/catalog/tests/unit/with_dbs/conftest.py index 8c5a39540ea..7be70290d6f 100644 --- a/services/catalog/tests/unit/with_dbs/conftest.py +++ b/services/catalog/tests/unit/with_dbs/conftest.py @@ -1,105 +1,85 @@ # pylint: disable=not-context-manager +# pylint: disable=protected-access # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable import itertools -import random +from collections.abc import AsyncIterator, Awaitable, Callable from copy import deepcopy from datetime import datetime -from random import randint -from typing import Any, AsyncIterator, Awaitable, Callable, Iterable, Iterator, Optional +from typing import Any import pytest -import respx import sqlalchemy as sa from faker import Faker -from fastapi import FastAPI -from models_library.services import ServiceDockerData +from fastapi.encoders import jsonable_encoder +from models_library.products import ProductName +from models_library.services import ServiceMetaDataPublished from models_library.users import UserID -from pytest import MonkeyPatch -from pytest_mock.plugin import MockerFixture +from pydantic import ConfigDict, TypeAdapter +from pytest_simcore.helpers.catalog_services import CreateFakeServiceDataCallable +from pytest_simcore.helpers.faker_factories import ( + random_service_access_rights, + random_service_meta_data, + random_user, +) +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import ( + PostgresTestConfig, + insert_and_get_row_lifespan, +) +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_postgres_database.models.groups import groups from simcore_postgres_database.models.products import products -from simcore_postgres_database.models.users import UserRole, UserStatus, users -from simcore_service_catalog.core.application import init_app -from simcore_service_catalog.db.tables import ( - groups, +from simcore_postgres_database.models.services import ( services_access_rights, services_meta_data, ) -from sqlalchemy import tuple_ +from simcore_postgres_database.models.users import users +from simcore_service_catalog.core.settings import ApplicationSettings +from sqlalchemy import sql from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.ext.asyncio import AsyncEngine -from starlette.testclient import TestClient - -@pytest.fixture() -async def products_names( - sqlalchemy_async_engine: AsyncEngine, -) -> AsyncIterator[list[str]]: - """Inits products db table and returns product names""" - data = [ - # already upon creation: ("osparc", r"([\.-]{0,1}osparc[\.-])"), - ("s4l", r"(^s4l[\.-])|(^sim4life\.)|(^api.s4l[\.-])|(^api.sim4life\.)"), - ("tis", r"(^tis[\.-])|(^ti-solutions\.)"), - ] - - # pylint: disable=no-value-for-parameter - async with sqlalchemy_async_engine.begin() as conn: - # NOTE: The 'default' dialect with current database version settings does not support in-place multirow inserts - for n, (name, regex) in enumerate(data): - stmt = products.insert().values(name=name, host_regex=regex, priority=n) - await conn.execute(stmt) - - names = [ - "osparc", - ] + [items[0] for items in data] - - yield names - - async with sqlalchemy_async_engine.begin() as conn: - await conn.execute(products.delete()) +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "SC_BOOT_MODE": "local-development", + "POSTGRES_CLIENT_NAME": "pytest_client", + }, + ) @pytest.fixture -def app( - monkeypatch: MonkeyPatch, - mocker: MockerFixture, - service_test_environ: None, +async def app_settings( # starts postgres service before app starts postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - products_names: list[str], -) -> Iterable[FastAPI]: + postgres_host_config: PostgresTestConfig, + app_settings: ApplicationSettings, +) -> ApplicationSettings: + # Database is init BEFORE app + assert postgres_db print("database started:", postgres_host_config) - print("database w/products in table:", products_names) - - monkeypatch.setenv("CATALOG_TRACING", "null") - monkeypatch.setenv("SC_BOOT_MODE", "local-development") - monkeypatch.setenv("POSTGRES_CLIENT_NAME", "pytest_client") - app = init_app() - yield app - - -@pytest.fixture -def client(app: FastAPI) -> Iterator[TestClient]: - with TestClient(app) as cli: - # Note: this way we ensure the events are run in the application - yield cli - -@pytest.fixture() -def director_mockup(app: FastAPI) -> Iterator[respx.MockRouter]: - with respx.mock( - base_url=app.state.settings.CATALOG_DIRECTOR.base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - respx_mock.head("/", name="healthcheck").respond(200, json={"health": "OK"}) - respx_mock.get("/services", name="list_services").respond( - 200, json={"data": []} - ) - yield respx_mock + # Ensures both postgres service and app environs are the same! + assert app_settings + assert app_settings.CATALOG_POSTGRES + assert postgres_host_config["user"] == app_settings.CATALOG_POSTGRES.POSTGRES_USER + assert postgres_host_config["database"] == app_settings.CATALOG_POSTGRES.POSTGRES_DB + assert ( + app_settings.CATALOG_POSTGRES.POSTGRES_PASSWORD.get_secret_value() + == postgres_host_config["password"] + ) + return app_settings # DATABASE tables fixtures ----------------------------------- @@ -121,40 +101,87 @@ def director_mockup(app: FastAPI) -> Iterator[respx.MockRouter]: # -@pytest.fixture(scope="session") -def user_id() -> UserID: - return UserID(randint(1, 10000)) +@pytest.fixture +async def product( + product: dict[str, Any], + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[dict[str, Any]]: + """ + injects product in db + """ + # NOTE: this fixture ignores products' group-id but it is fine for this test context + assert product["group_id"] is None + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + sqlalchemy_async_engine, + table=products, + values=product, + pk_col=products.c.name, + pk_value=product["name"], + ) as row: + yield row -@pytest.fixture() -def user_db(postgres_db: sa.engine.Engine, user_id: UserID) -> Iterator[dict]: - with postgres_db.connect() as con: - # removes all users before continuing - con.execute(users.delete()) - con.execute( - users.insert() - .values( - id=user_id, - name="test user", - email="test@user.com", - password_hash="testhash", - status=UserStatus.ACTIVE, - role=UserRole.USER, - ) - .returning(sa.literal_column("*")) - ) - # this is needed to get the primary_gid correctly - result = con.execute(sa.select([users]).where(users.c.id == user_id)) - user = result.first() - assert user - yield dict(user) +@pytest.fixture +def target_product(product: dict[str, Any], product_name: ProductName) -> ProductName: + assert product_name == TypeAdapter(ProductName).validate_python(product["name"]) + return product_name - con.execute(users.delete().where(users.c.id == user_id)) + +@pytest.fixture +def other_product(product: dict[str, Any]) -> ProductName: + other = TypeAdapter(ProductName).validate_python("osparc") + assert other != product["name"] + return other + + +@pytest.fixture +def products_names( + target_product: ProductName, other_product: ProductName +) -> list[str]: + return [other_product, target_product] + + +@pytest.fixture +async def user( + user: dict[str, Any], + user_id: UserID, + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[dict[str, Any]]: + """ + injects a user in db + """ + assert user_id == user["id"] + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + sqlalchemy_async_engine, + table=users, + values=user, + pk_col=users.c.id, + pk_value=user["id"], + ) as row: + yield row + + +@pytest.fixture +async def other_user( + user_id: UserID, + sqlalchemy_async_engine: AsyncEngine, + faker: Faker, +) -> AsyncIterator[dict[str, Any]]: + + _user = random_user(fake=faker, id=user_id + 1) + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + sqlalchemy_async_engine, + table=users, + values=_user, + pk_col=users.c.id, + pk_value=_user["id"], + ) as row: + yield row @pytest.fixture() async def user_groups_ids( - sqlalchemy_async_engine: AsyncEngine, user_db: dict[str, Any] + sqlalchemy_async_engine: AsyncEngine, user: dict[str, Any] ) -> AsyncIterator[list[int]]: """Inits groups table and returns group identifiers""" @@ -167,15 +194,17 @@ async def user_groups_ids( "STANDARD", "http://mib.org", {"email": "@(foo|testers|mib)+.(org|com)$"}, - ), + ) ] # pylint: disable=no-value-for-parameter async with sqlalchemy_async_engine.begin() as conn: for row in data: # NOTE: The 'default' dialect with current database version settings does not support in-place multirow inserts - await conn.execute(groups.insert().values(**dict(zip(cols, row)))) + await conn.execute( + groups.insert().values(**dict(zip(cols, row, strict=False))) + ) - gids = [1, user_db["primary_gid"]] + [items[0] for items in data] + gids = [1, user["primary_gid"]] + [items[0] for items in data] yield gids @@ -191,19 +220,19 @@ async def services_db_tables_injector( """Returns a helper function to init services_meta_data and services_access_rights tables - Can use service_catalog_faker to generate inputs + Can use `create_fake_service_data` to generate inputs Example: await services_db_tables_injector( [ - service_catalog_faker( + create_fake_service_data( "simcore/services/dynamic/jupyterlab", "0.0.1", team_access=None, everyone_access=None, product=target_product, ), - service_catalog_faker( + create_fake_service_data( "simcore/services/dynamic/jupyterlab", "0.0.7", team_access=None, @@ -216,7 +245,7 @@ async def services_db_tables_injector( # pylint: disable=no-value-for-parameter inserted_services: set[tuple[str, str]] = set() - async def inject_in_db(fake_catalog: list[tuple]): + async def _inject_in_db(fake_catalog: list[tuple]): # [(service, ar1, ...), (service2, ar1, ...) ] async with sqlalchemy_async_engine.begin() as conn: @@ -237,12 +266,12 @@ async def inject_in_db(fake_catalog: list[tuple]): stmt_access = services_access_rights.insert().values(access_rights) await conn.execute(stmt_access) - yield inject_in_db + yield _inject_in_db async with sqlalchemy_async_engine.begin() as conn: await conn.execute( services_meta_data.delete().where( - tuple_(services_meta_data.c.key, services_meta_data.c.version).in_( + sql.tuple_(services_meta_data.c.key, services_meta_data.c.version).in_( inserted_services ) ) @@ -337,73 +366,68 @@ def _fake_factory(**overrides): data = deepcopy(template) data.update(**overrides) - assert ServiceDockerData.parse_obj(data), "Invalid fake data. Out of sync!" + assert ServiceMetaDataPublished.model_validate( + data + ), "Invalid fake data. Out of sync!" return data return _fake_factory -@pytest.fixture() -async def service_catalog_faker( +@pytest.fixture +async def create_fake_service_data( user_groups_ids: list[int], products_names: list[str], faker: Faker, -) -> Callable: +) -> CreateFakeServiceDataCallable: """Returns a fake factory that creates catalog DATA that can be used to fill both services_meta_data and services_access_rights tables Example: - fake_service, *fake_access_rights = service_catalog_faker( + fake_service, *fake_access_rights = create_fake_service_data( "simcore/services/dynamic/jupyterlab", "0.0.1", - team_access=None, - everyone_access=None, + team_access="xw", + everyone_access="x", product=target_product, ), owner_access, team_access, everyone_access = fake_access_rights """ - everyone_gid, user_gid, team_gid = user_groups_ids + everyone_gid, user_primary_gid, team_standard_gid = user_groups_ids def _random_service(**overrides) -> dict[str, Any]: - data = dict( - key=f"simcore/services/{random.choice(['dynamic', 'computational'])}/{faker.name()}", - version=".".join([str(faker.pyint()) for _ in range(3)]), - owner=user_gid, - name=faker.name(), - description=faker.sentence(), - thumbnail=random.choice([faker.image_url(), None]), - classifiers=[], - quality={}, - deprecated=None, + return random_service_meta_data( + owner_primary_gid=user_primary_gid, + fake=faker, + **overrides, ) - data.update(overrides) - return data def _random_access(service, **overrides) -> dict[str, Any]: - data = dict( + return random_service_access_rights( key=service["key"], version=service["version"], - gid=random.choice(user_groups_ids), - execute_access=faker.pybool(), - write_access=faker.pybool(), - product_name=random.choice(products_names), + fake=faker, + **overrides, ) - data.update(overrides) - return data def _fake_factory( key, version, - team_access=None, - everyone_access=None, - product=products_names[0], - deprecated: Optional[datetime] = None, + team_access: str | None = None, + everyone_access: str | None = None, + product: ProductName = products_names[0], + deprecated: datetime | None = None, + version_display: str | None = None, ) -> tuple[dict[str, Any], ...]: - - service = _random_service(key=key, version=version, deprecated=deprecated) + service = _random_service( + key=key, + version=version, + deprecated=deprecated, + version_display=version_display, + ) # owner always has full-access owner_access = _random_access( @@ -422,7 +446,7 @@ def _fake_factory( fakes.append( _random_access( service, - gid=team_gid, + gid=team_standard_gid, execute_access="x" in team_access, write_access="w" in team_access, product_name=product, @@ -444,15 +468,36 @@ def _fake_factory( @pytest.fixture -def mock_catalog_background_task(mocker: MockerFixture) -> None: - """patch the setup of the background task so we can call it manually""" - mocker.patch( - "simcore_service_catalog.core.events.start_registry_sync_task", - return_value=None, - autospec=True, - ) - mocker.patch( - "simcore_service_catalog.core.events.stop_registry_sync_task", - return_value=None, - autospec=True, - ) +def create_director_list_services_from() -> ( + Callable[[list[dict[str, Any]], list], list[dict[str, Any]]] +): + """Convenience function to merge outputs of + - `create_fake_service_data` callable with those of + - `expected_director_rest_api_list_services` fixture + + to produce a new expected_director_rest_api_list_services + """ + + class _Loader(ServiceMetaDataPublished): + model_config = ConfigDict(extra="ignore", populate_by_name=True) + + def _( + expected_director_rest_api_list_services: list[dict[str, Any]], + fake_services_data: list, + ): + return [ + jsonable_encoder( + _Loader.model_validate( + { + **next( + itertools.cycle(expected_director_rest_api_list_services) + ), + **data[0], # service, **access_rights = data + } + ), + exclude_unset=True, + ) + for data in fake_services_data + ] + + return _ diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services__get.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services__get.py new file mode 100644 index 00000000000..ae320377eab --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services__get.py @@ -0,0 +1,97 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=not-an-iterable +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import urllib.parse +from collections.abc import Callable +from typing import Any + +import pytest +import respx +from fastapi.testclient import TestClient +from models_library.api_schemas_catalog.services import ServiceGet +from models_library.products import ProductName +from models_library.users import UserID +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +async def expected_service( + expected_director_rest_api_list_services: list[dict[str, Any]], + user: dict[str, Any], + services_db_tables_injector: Callable, + target_product: ProductName, +) -> dict[str, Any]: + # Just selected one of the list provided by the director (i.e. emulated from registry) + service = expected_director_rest_api_list_services[-1] + + # Emulates sync of registry with db and injects the expected response model + # of the director (i.e. coming from the registry) in the database + await services_db_tables_injector( + [ + ( # service + { + "key": service["key"], + "version": service["version"], + "owner": user["primary_gid"], + "name": service["name"], + "description": service["description"], + "thumbnail": service["thumbnail"], + }, + # owner_access, + { + "key": service["key"], + "version": service["version"], + "gid": user["primary_gid"], + "execute_access": True, + "write_access": True, + "product_name": target_product, + }, + # team_access, everyone_access [optional] + ) + ] + ) + return service + + +def test_get_service_with_details( + service_caching_disabled: None, + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: respx.MockRouter, + user_id: UserID, + expected_service: dict[str, Any], + target_product: ProductName, + client: TestClient, +): + service_key = expected_service["key"] + service_version = expected_service["version"] + + url = URL( + f"/v0/services/{urllib.parse.quote_plus(service_key)}/{service_version}" + ).with_query({"user_id": user_id}) + + response = client.get( + f"{url}", + headers={ + "x-simcore-products-name": target_product, + }, + ) + + assert response.status_code == 200 + + got = ServiceGet.model_validate(response.json()) + assert got.key == service_key + assert got.version == service_version + + assert mocked_director_rest_api["get_service"].called diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services__list.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services__list.py new file mode 100644 index 00000000000..9f942cca334 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services__list.py @@ -0,0 +1,290 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=not-an-iterable +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import re +from collections.abc import Callable +from datetime import datetime, timedelta + +from models_library.api_schemas_catalog.services import ServiceGet +from models_library.products import ProductName +from models_library.services import ServiceMetaDataPublished +from models_library.users import UserID +from pydantic import TypeAdapter +from respx.router import MockRouter +from starlette import status +from starlette.testclient import TestClient +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_list_services_with_details( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api_base: MockRouter, + user_id: UserID, + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, + benchmark, +): + # create some fake services + NUM_SERVICES = 1000 + fake_services = [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + # injects fake data in db + await services_db_tables_injector(fake_services) + + url = URL("/v0/services").with_query({"user_id": user_id, "details": "true"}) + + # now fake the director such that it returns half the services + fake_registry_service_data = ServiceMetaDataPublished.model_json_schema()[ + "examples" + ][0] + + mocked_director_rest_api_base.get("/services", name="list_services").respond( + 200, + json={ + "data": [ + { + **fake_registry_service_data, + "key": s[0]["key"], + "version": s[0]["version"], + } + for s in fake_services[::2] + ] + }, + ) + + response = benchmark( + client.get, f"{url}", headers={"x-simcore-products-name": target_product} + ) + + assert response.status_code == 200 + data = response.json() + assert len(data) == round(NUM_SERVICES / 2) + + +async def test_list_services_without_details( + background_task_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user_id: int, + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, + benchmark, +): + + # injects fake data in db + NUM_SERVICES = 1000 + SERVICE_KEY = "simcore/services/dynamic/jupyterlab" + await services_db_tables_injector( + [ + create_fake_service_data( + SERVICE_KEY, + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + ) + + url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) + response = benchmark( + client.get, f"{url}", headers={"x-simcore-products-name": target_product} + ) + assert response.status_code == 200 + data = response.json() + assert len(data) == NUM_SERVICES + for service in data: + assert service["key"] == SERVICE_KEY + assert re.match("1.0.[0-9]+", service["version"]) is not None + assert service["name"] == "nodetails" + assert service["description"] == "nodetails" + assert service["contact"] == "nodetails@nodetails.com" + + +async def test_list_services_without_details_with_wrong_user_id_returns_403( + service_caching_disabled, + background_task_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user_id: int, + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, +): + + # injects fake data in db + NUM_SERVICES = 1 + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + ) + + url = URL("/v0/services").with_query({"user_id": user_id + 1, "details": "false"}) + response = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) + assert response.status_code == 403 + + +async def test_list_services_without_details_with_another_product_returns_other_services( + service_caching_disabled: None, + background_task_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user_id: int, + target_product: ProductName, + other_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, +): + NUM_SERVICES = 15 + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + ) + + url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) + response = client.get(f"{url}", headers={"x-simcore-products-name": other_product}) + assert response.status_code == 200 + data = response.json() + assert len(data) == 0 + + +async def test_list_services_without_details_with_wrong_product_returns_0_service( + service_caching_disabled, + background_task_lifespan_disabled, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user_id: int, + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, +): + + # injects fake data in db + NUM_SERVICES = 1 + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + ) + + url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) + response = client.get( + f"{url}", headers={"x-simcore-products-name": "no valid product"} + ) + assert response.status_code == 200 + data = response.json() + assert len(data) == 0 + + +async def test_list_services_that_are_deprecated( + service_caching_disabled, + background_task_lifespan_disabled, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api_base: MockRouter, + user_id: int, + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, +): + + # injects fake data in db + deprecation_date = datetime.utcnow() + timedelta( # NOTE: old offset-naive column + days=1 + ) + deprecated_service = create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.1", + team_access=None, + everyone_access=None, + product=target_product, + deprecated=deprecation_date, + ) + await services_db_tables_injector([deprecated_service]) + + # check without details + url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) + resp = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) + assert resp.status_code == status.HTTP_200_OK + list_of_services = TypeAdapter(list[ServiceGet]).validate_python(resp.json()) + assert list_of_services + assert len(list_of_services) == 1 + received_service = list_of_services[0] + assert received_service.deprecated == deprecation_date + + # for details, the director must return the same service + fake_registry_service_data = ServiceMetaDataPublished.model_json_schema()[ + "examples" + ][0] + mocked_director_rest_api_base.get("/services", name="list_services").respond( + 200, + json={ + "data": [ + { + **fake_registry_service_data, + "key": deprecated_service[0]["key"], + "version": deprecated_service[0]["version"], + } + ] + }, + ) + + url = URL("/v0/services").with_query({"user_id": user_id, "details": "true"}) + resp = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) + assert resp.status_code == status.HTTP_200_OK + list_of_services = TypeAdapter(list[ServiceGet]).validate_python(resp.json()) + assert list_of_services + assert len(list_of_services) == 1 + received_service = list_of_services[0] + assert received_service.deprecated == deprecation_date diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services_access_rights.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services_access_rights.py new file mode 100644 index 00000000000..7804f6de60d --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services_access_rights.py @@ -0,0 +1,118 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=not-an-iterable +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# type: ignore + +import random +from collections.abc import Callable +from typing import Any + +from models_library.api_schemas_catalog.service_access_rights import ( + ServiceAccessRightsGet, +) +from models_library.products import ProductName +from pydantic import TypeAdapter +from respx.router import MockRouter +from starlette.testclient import TestClient +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_get_service_access_rights( + background_task_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user: dict[str, Any], + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + client: TestClient, +): + user_id = user["id"] + user_primary_gid = user["primary_gid"] + + # create some fake services + NUM_SERVICES = 3 + fake_services = [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + f"1.0.{s}", + team_access=None, + everyone_access=None, + product=target_product, + ) + for s in range(NUM_SERVICES) + ] + # injects fake data in db + await services_db_tables_injector(fake_services) + + service_to_check = fake_services[random.choice(range(NUM_SERVICES))][ + 0 + ] # --> service_meta_data table format + url = URL( + f"/v0/services/{service_to_check['key']}/{service_to_check['version']}/accessRights" + ).with_query({"user_id": user_id}) + response = client.get( + f"{url}", + headers={"x-simcore-products-name": target_product}, + ) + assert response.status_code == 200 + data = TypeAdapter(ServiceAccessRightsGet).validate_python(response.json()) + assert data.service_key == service_to_check["key"] + assert data.service_version == service_to_check["version"] + assert data.gids_with_access_rights == { + user_primary_gid: {"execute_access": True, "write_access": True} + } + + +async def test_get_service_access_rights_with_more_gids( + background_task_lifespan_disabled: None, + mocked_director_rest_api: MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + user: dict[str, Any], + other_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + user_groups_ids: list[int], + client: TestClient, +): + user_id = user["id"] + user_primary_gid = user["primary_gid"] + everyone_gid, user_gid, team_gid = user_groups_ids + + fake_service = create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.1", + team_access="x", + everyone_access="x", + product=other_product, + ) + # injects fake data in db + await services_db_tables_injector([fake_service]) + + service_to_check = fake_service[0] # --> service_meta_data table format + url = URL( + f"/v0/services/{service_to_check['key']}/{service_to_check['version']}/accessRights" + ).with_query({"user_id": user_id}) + response = client.get( + f"{url}", + headers={"x-simcore-products-name": other_product}, + ) + assert response.status_code == 200 + data = TypeAdapter(ServiceAccessRightsGet).validate_python(response.json()) + assert data.service_key == service_to_check["key"] + assert data.service_version == service_to_check["version"] + assert data.gids_with_access_rights == { + 1: {"execute_access": True, "write_access": False}, + user_primary_gid: {"execute_access": True, "write_access": True}, + team_gid: {"execute_access": True, "write_access": False}, + } diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services_ports.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services_ports.py new file mode 100644 index 00000000000..2bc0f98781e --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services_ports.py @@ -0,0 +1,179 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import urllib.parse +from collections.abc import Callable +from typing import Any + +import pytest +import simcore_service_catalog.api._dependencies.services +from pytest_mock.plugin import MockerFixture +from respx.router import MockRouter +from starlette import status +from starlette.testclient import TestClient +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def service_key() -> str: + return "simcore/services/comp/itis/fake_sleeper" + + +@pytest.fixture +def service_version() -> str: + return "1.2.3" + + +@pytest.fixture +def service_metadata( + service_key: str, + service_version: str, + service_metadata_faker: Callable, +) -> dict[str, Any]: + return service_metadata_faker(key=service_key, version=service_version) + + +@pytest.fixture +async def mocked_check_service_read_access( + mocker: MockerFixture, user_groups_ids: dict[str, Any] +): + # MOCKS functionality inside "simcore_service_catalog.api.dependencies.services.check_service_read_access" + # to provide read access to a service to user_groups_ids + # + assert user_groups_ids + + mocker.patch.object( + simcore_service_catalog.api._dependencies.services.ServicesRepository, + "get_service", + autospec=True, + return_value=True, + ) + + +@pytest.fixture +async def mocked_director_service_api( + mocked_director_rest_api_base: MockRouter, + service_key: str, + service_version: str, + service_metadata: dict[str, Any], +): + # SEE services/director/src/simcore_service_director/api/v0/openapi.yaml + mocked_director_rest_api_base.get( + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}", + name="services_by_key_version_get", + ).respond( + status.HTTP_200_OK, + json={ + "data": [ + service_metadata, + ], + }, + ) + + +async def test_list_service_ports( + service_caching_disabled: None, + background_task_lifespan_disabled: None, + mocked_check_service_read_access: None, + mocked_director_service_api: None, + rabbitmq_and_rpc_setup_disabled: None, + client: TestClient, + product_name: str, + user_id: int, + service_key: str, + service_version: str, + service_metadata: dict[str, Any], # expected + benchmark, +): + url = URL(f"/v0/services/{service_key}/{service_version}/ports").with_query( + {"user_id": user_id} + ) + response = benchmark( + client.get, f"{url}", headers={"x-simcore-products-name": product_name} + ) + assert response.status_code == 200 + ports = response.json() + + # same order and name identifier + expected_inputs = service_metadata["inputs"] + expected_outputs = service_metadata["outputs"] + + assert [p["key"] for p in ports if p["kind"] == "input"] == list( + expected_inputs.keys() + ) + assert [p["key"] for p in ports if p["kind"] == "output"] == list( + expected_outputs.keys() + ) + + assert ports == [ + { + "key": "input_1", + "kind": "input", + "content_media_type": "text/plain", + "content_schema": { + "type": "string", + "title": "File with int number", + "description": "Pick a file containing only one integer", + }, + }, + { + "key": "input_2", + "kind": "input", + "content_schema": { + "title": "Sleep interval", + "type": "integer", + "x_unit": "second", + "minimum": 0, + "maximum": 5, + }, + }, + { + "key": "input_3", + "kind": "input", + "content_schema": { + "title": "Fail after sleep", + "type": "boolean", + "description": "If set to true will cause service to fail after it sleeps", + "default": False, + }, + }, + { + "key": "input_4", + "kind": "input", + "content_schema": { + "title": "Distance to bed", + "type": "integer", + "x_unit": "meter", + }, + }, + { + "key": "output_1", + "kind": "output", + "content_media_type": "text/plain", + "content_schema": { + "type": "string", + "title": "File containing one random integer", + "description": "Integer is generated in range [1-9]", + }, + }, + { + "key": "output_2", + "kind": "output", + "content_schema": { + "title": "Random sleep interval", + "type": "integer", + "x_unit": "second", + }, + }, + ] diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services_resources.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services_resources.py new file mode 100644 index 00000000000..fe11f2ee165 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services_resources.py @@ -0,0 +1,340 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import urllib.parse +from collections.abc import Callable +from copy import deepcopy +from dataclasses import dataclass +from typing import Any + +import httpx +import pytest +import respx +from faker import Faker +from fastapi.encoders import jsonable_encoder +from models_library.docker import DockerGenericTag +from models_library.services_resources import ( + BootMode, + ResourcesDict, + ResourceValue, + ServiceResourcesDict, + ServiceResourcesDictHelpers, +) +from pydantic import ByteSize, TypeAdapter +from respx.models import Route +from simcore_service_catalog.core.settings import _DEFAULT_RESOURCES +from starlette.testclient import TestClient +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def mocked_director_service_labels( + mocked_director_rest_api_base: respx.MockRouter, +) -> Route: + """ + Customizes mock for labels entrypoints at the director service's API + """ + slash = urllib.parse.quote_plus("/") + return mocked_director_rest_api_base.get( + url__regex=rf"v0/services/simcore{slash}services{slash}(comp|dynamic|frontend)({slash}[\w{slash}-]+)+/[0-9]+.[0-9]+.[0-9]+/labels", + name="get_service_labels", + ).respond(200, json={"data": {}}) + + +@pytest.fixture +def service_labels(faker: Faker) -> Callable[..., dict[str, Any]]: + def creator(): + return {faker.get_label(): faker.text()} + + return creator + + +@pytest.fixture +def service_key(faker: Faker) -> str: + return f"simcore/services/{faker.random_element(['comp', 'dynamic','frontend'])}/jupyter-math" + + +@pytest.fixture +def service_version(faker: Faker) -> str: + return ( + f"{faker.random_int(0,100)}.{faker.random_int(0,100)}.{faker.random_int(0,100)}" + ) + + +@pytest.fixture +def mock_service_labels() -> dict[str, Any]: + return { + "simcore.service.settings": '[ {"name": "ports", "type": "int", "value": 8888}, {"name": "constraints", "type": "string", "value": ["node.platform.os == linux"]}, {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 } } } ]', + } + + +def _update_copy(dict_data: dict, update: dict) -> dict: + dict_data_copy = deepcopy(dict_data) + dict_data_copy.update(update) + return dict_data_copy + + +@dataclass +class _ServiceResourceParams: + simcore_service_label: dict[str, str] + expected_resources: ResourcesDict + expected_boot_modes: list[BootMode] + + +@pytest.mark.parametrize( + "params", + [ + pytest.param( + _ServiceResourceParams({}, _DEFAULT_RESOURCES, [BootMode.CPU]), + id="nothing_defined_returns_default_resources", + ), + pytest.param( + _ServiceResourceParams( + { + "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 } } } ]', + }, + _update_copy( + _DEFAULT_RESOURCES, + { + "CPU": ResourceValue( + limit=4.0, + reservation=_DEFAULT_RESOURCES["CPU"].reservation, + ), + "RAM": ResourceValue( + limit=ByteSize(17179869184), + reservation=_DEFAULT_RESOURCES["RAM"].reservation, + ), + }, + ), + [BootMode.CPU], + ), + id="only_limits_defined_returns_default_reservations", + ), + pytest.param( + _ServiceResourceParams( + { + "simcore.service.settings": '[ {"name": "constraints", "type": "string", "value": [ "node.platform.os == linux" ]}, {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 }, "Reservations": { "NanoCPUs": 100000000, "MemoryBytes": 536870912, "GenericResources": [ { "DiscreteResourceSpec": { "Kind": "VRAM", "Value": 1 } }, { "NamedResourceSpec": { "Kind": "AIRAM", "Value": "some_string" } } ] } } } ]' + }, + _update_copy( + _DEFAULT_RESOURCES, + { + "CPU": ResourceValue(limit=4.0, reservation=0.1), + "RAM": ResourceValue( + limit=ByteSize(17179869184), reservation=ByteSize(536870912) + ), + "VRAM": ResourceValue(limit=1, reservation=1), + "AIRAM": ResourceValue(limit=0, reservation="some_string"), + }, + ), + [BootMode.GPU], + ), + id="everything_rightly_defined", + ), + pytest.param( + _ServiceResourceParams( + { + "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 100000000, "MemoryBytes": 536870912, "GenericResources": [ ] } } } ]' + }, + _update_copy( + _DEFAULT_RESOURCES, + { + "CPU": ResourceValue( + limit=_DEFAULT_RESOURCES["CPU"].limit, + reservation=0.1, + ), + "RAM": ResourceValue( + limit=_DEFAULT_RESOURCES["RAM"].limit, + reservation=ByteSize(536870912), + ), + }, + ), + [BootMode.CPU], + ), + id="no_limits_defined_returns_default_limits", + ), + pytest.param( + _ServiceResourceParams( + { + "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 10000000000, "MemoryBytes": 53687091232, "GenericResources": [ { "DiscreteResourceSpec": { "Kind": "VRAM", "Value": 1 } } ] } } } ]' + }, + _update_copy( + _DEFAULT_RESOURCES, + { + "CPU": ResourceValue( + limit=10.0, + reservation=10.0, + ), + "RAM": ResourceValue( + limit=ByteSize(53687091232), + reservation=ByteSize(53687091232), + ), + "VRAM": ResourceValue(limit=1, reservation=1), + }, + ), + [BootMode.GPU], + ), + id="no_limits_with_reservations_above_default_returns_same_as_reservation", + ), + ], +) +async def test_get_service_resources( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_service_labels: Route, + client: TestClient, + params: _ServiceResourceParams, + service_key: str, + service_version: str, +) -> None: + + mocked_director_service_labels.respond(json={"data": params.simcore_service_label}) + url = URL(f"/v0/services/{service_key}/{service_version}/resources") + response = client.get(f"{url}") + assert response.status_code == 200, f"{response.text}" + data = response.json() + received_resources: ServiceResourcesDict = ServiceResourcesDict(**data) + assert isinstance(received_resources, dict) + + expected_service_resources = ServiceResourcesDictHelpers.create_from_single_service( + TypeAdapter(DockerGenericTag).validate_python( + f"{service_key}:{service_version}" + ), + params.expected_resources, + boot_modes=params.expected_boot_modes, + ) + assert isinstance(expected_service_resources, dict) + assert received_resources == jsonable_encoder(expected_service_resources) + + +@pytest.fixture +def create_mock_director_service_labels( + mocked_director_rest_api_base: respx.MockRouter, +) -> Callable: + def factory(services_labels: dict[str, dict[str, Any]]) -> None: + for service_name, data in services_labels.items(): + encoded_key = urllib.parse.quote_plus( + f"simcore/services/dynamic/{service_name}" + ) + for k, mock_key in enumerate((encoded_key, service_name)): + mocked_director_rest_api_base.get( + url__regex=rf"v0/services/{mock_key}/[\w/.]+/labels", + name=f"get_service_labels_for_{service_name}_{k}", + ).respond(200, json={"data": data}) + + return factory + + +@pytest.mark.parametrize( + "mapped_services_labels, expected_service_resources, service_key, service_version", + [ + pytest.param( + { + "sim4life-dy": { + "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 300000000, "MemoryBytes": 53687091232 } } } ]', + "simcore.service.compose-spec": '{"version": "2.3", "services": {"rt-web-dy":{"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life-dy:${SERVICE_VERSION}","init": true, "depends_on": ["s4l-core"]}, "s4l-core": {"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core-dy:${SERVICE_VERSION}","runtime": "nvidia", "init": true, "environment": ["DISPLAY=${DISPLAY}"],"volumes": ["/tmp/.X11-unix:/tmp/.X11-unix"]}, "sym-server": {"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sym-server:${SERVICE_VERSION}","init": true}}}', + }, + "s4l-core-dy": { + "simcore.service.settings": '[{"name": "env", "type": "string", "value": ["DISPLAY=:0"]},{"name": "env", "type": "string", "value": ["SYM_SERVER_HOSTNAME=%%container_name.sym-server%%"]},{"name": "mount", "type": "object", "value": [{"ReadOnly": true, "Source":"/tmp/.X11-unix", "Target": "/tmp/.X11-unix", "Type": "bind"}]}, {"name":"constraints", "type": "string", "value": ["node.platform.os == linux"]},{"name": "Resources", "type": "Resources", "value": {"Limits": {"NanoCPUs":4000000000, "MemoryBytes": 17179869184}, "Reservations": {"NanoCPUs": 100000000,"MemoryBytes": 536870912, "GenericResources": [{"DiscreteResourceSpec":{"Kind": "VRAM", "Value": 1}}]}}}]' + }, + "sym-server": {"simcore.service.settings": "[]"}, + }, + TypeAdapter(ServiceResourcesDict).validate_python( + ServiceResourcesDictHelpers.model_config["json_schema_extra"][ + "examples" + ][1] + ), + "simcore/services/dynamic/sim4life-dy", + "3.0.0", + id="s4l_case", + ), + pytest.param( + { + "jupyter-math": { + "simcore.service.settings": "[]", + "simcore.service.compose-spec": '{"version": "2.3", "services": {"jupyter-math":{"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/jupyter-math:${SERVICE_VERSION}"}, "busybox": {"image": "busybox:1.2.3"}}}', + }, + "busybox": {"simcore.service.settings": "[]"}, + }, + TypeAdapter(ServiceResourcesDict).validate_python( + { + "jupyter-math": { + "image": "simcore/services/dynamic/jupyter-math:2.0.5", + "resources": { + "CPU": {"limit": 0.1, "reservation": 0.1}, + "RAM": { + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), + }, + }, + }, + "busybox": { + "image": "busybox:1.2.3", + "resources": { + "CPU": {"limit": 0.1, "reservation": 0.1}, + "RAM": { + "limit": TypeAdapter(ByteSize).validate_python("2Gib"), + "reservation": TypeAdapter(ByteSize).validate_python( + "2Gib" + ), + }, + }, + }, + }, + ), + "simcore/services/dynamic/jupyter-math", + "2.0.5", + id="using_an_external_image", + ), + ], +) +async def test_get_service_resources_sim4life_case( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + create_mock_director_service_labels: Callable, + client: TestClient, + mapped_services_labels: dict[str, dict[str, Any]], + expected_service_resources: ServiceResourcesDict, + service_key: str, + service_version: str, +) -> None: + create_mock_director_service_labels(mapped_services_labels) + + url = URL(f"/v0/services/{service_key}/{service_version}/resources") + response = client.get(f"{url}") + assert response.status_code == 200, f"{response.text}" + data = response.json() + received_service_resources = TypeAdapter(ServiceResourcesDict).validate_python(data) + + assert received_service_resources == expected_service_resources + + +async def test_get_service_resources_raises_errors( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_service_labels: Route, + client: TestClient, + service_key: str, + service_version: str, +) -> None: + + url = URL(f"/v0/services/{service_key}/{service_version}/resources") + # simulate a communication error + mocked_director_service_labels.side_effect = httpx.HTTPError + response = client.get(f"{url}") + assert response.status_code == httpx.codes.SERVICE_UNAVAILABLE, f"{response.text}" + # simulate a missing service + mocked_director_service_labels.respond( + httpx.codes.NOT_FOUND, json={"error": "service not found"} + ) + response = client.get(f"{url}") + assert response.status_code == httpx.codes.NOT_FOUND, f"{response.text}" diff --git a/services/catalog/tests/unit/with_dbs/test_api_rest_services_specifications.py b/services/catalog/tests/unit/with_dbs/test_api_rest_services_specifications.py new file mode 100644 index 00000000000..554167b48d9 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rest_services_specifications.py @@ -0,0 +1,393 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Any + +import pytest +import respx +from faker import Faker +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.generated_models.docker_rest_api import ( + DiscreteResourceSpec, + GenericResource, + GenericResources, + Limit, + NamedResourceSpec, + ResourceObject, +) +from models_library.generated_models.docker_rest_api import ( + Resources1 as ServiceTaskResources, +) +from models_library.generated_models.docker_rest_api import ( + ServiceSpec, + TaskSpec, +) +from models_library.products import ProductName +from models_library.users import UserID +from simcore_postgres_database.models.groups import user_to_groups +from simcore_postgres_database.models.services_specifications import ( + services_specifications, +) +from simcore_service_catalog.models.services_specifications import ( + ServiceSpecificationsAtDB, +) +from sqlalchemy.ext.asyncio import AsyncEngine +from starlette import status +from starlette.testclient import TestClient +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +async def services_specifications_injector( + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[Callable[[ServiceSpecificationsAtDB], Awaitable[None]]]: + inserted_specs: list[ServiceSpecificationsAtDB] = [] + + async def _injector( + service_spec: ServiceSpecificationsAtDB, + ): + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + services_specifications.insert().values(jsonable_encoder(service_spec)) + ) + inserted_specs.append(service_spec) + + yield _injector + + # clean up + async with sqlalchemy_async_engine.begin() as conn: + for spec in inserted_specs: + await conn.execute( + services_specifications.delete().where( + (services_specifications.c.service_key == spec.service_key) + & ( + services_specifications.c.service_version + == spec.service_version + ) + & (services_specifications.c.gid == spec.gid) + ) + ) + + +@pytest.fixture +def create_service_specifications( + faker: Faker, +) -> Callable[..., ServiceSpecificationsAtDB]: + def _creator(service_key, service_version, gid) -> ServiceSpecificationsAtDB: + return ServiceSpecificationsAtDB( + service_key=service_key, + service_version=service_version, + gid=gid, + sidecar=ServiceSpec(Labels=faker.pydict(allowed_types=(str,))), + service=ServiceSpec( + TaskTemplate=TaskSpec( + Resources=ServiceTaskResources( + Limits=Limit( + NanoCPUs=faker.pyint(), + MemoryBytes=faker.pyint(), + Pids=faker.pyint(), + ), + Reservations=ResourceObject( + NanoCPUs=faker.pyint(), + MemoryBytes=faker.pyint(), + GenericResources=GenericResources( + root=[ + GenericResource( + NamedResourceSpec=NamedResourceSpec( + Kind=faker.pystr(), Value=faker.pystr() + ), + DiscreteResourceSpec=DiscreteResourceSpec( + Kind=faker.pystr(), Value=faker.pyint() + ), + ) + ] + ), + ), + ) + ) + ), + ) + + return _creator + + +async def test_get_service_specifications_returns_403_if_user_does_not_exist( + background_task_lifespan_disabled, + mocked_director_rest_api: respx.MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + client: TestClient, + user_id: UserID, + faker: Faker, +): + service_key = ( + f"simcore/services/{faker.random_element(['comp', 'dynamic'])}/jupyter-math" + ) + service_version = ( + f"{faker.random_int(0,100)}.{faker.random_int(0,100)}.{faker.random_int(0,100)}" + ) + url = URL( + f"/v0/services/{service_key}/{service_version}/specifications" + ).with_query(user_id=user_id) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_403_FORBIDDEN + + +async def test_get_service_specifications_of_unknown_service_returns_default_specs( + background_task_lifespan_disabled, + mocked_director_rest_api: respx.MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + client: TestClient, + user_id: UserID, + user: dict[str, Any], + faker: Faker, +): + service_key = f"simcore/services/{faker.random_element(['comp', 'dynamic'])}/{faker.pystr().lower()}" + service_version = ( + f"{faker.random_int(0,100)}.{faker.random_int(0,100)}.{faker.random_int(0,100)}" + ) + url = URL( + f"/v0/services/{service_key}/{service_version}/specifications" + ).with_query(user_id=user_id) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + + assert ( + service_specs.model_dump() + == client.app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS.model_dump() + ) + + +async def test_get_service_specifications( + background_task_lifespan_disabled, + mocked_director_rest_api: respx.MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + client: TestClient, + user_id: UserID, + user: dict[str, Any], + user_groups_ids: list[int], + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + services_specifications_injector: Callable, + sqlalchemy_async_engine: AsyncEngine, + create_service_specifications: Callable[..., ServiceSpecificationsAtDB], +): + SERVICE_KEY = "simcore/services/dynamic/jupyterlab" + SERVICE_VERSION = "0.0.1" + await services_db_tables_injector( + [ + create_fake_service_data( + SERVICE_KEY, + SERVICE_VERSION, + team_access=None, + everyone_access=None, + product=target_product, + ) + ] + ) + + url = URL( + f"/v0/services/{SERVICE_KEY}/{SERVICE_VERSION}/specifications" + ).with_query(user_id=user_id) + + # this should now return default specs since there are none in the db + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert ( + service_specs.model_dump() + == client.app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS.model_dump() + ) + + everyone_gid, user_gid, team_gid = user_groups_ids + # let's inject some rights for everyone group + everyone_service_specs = create_service_specifications( + SERVICE_KEY, SERVICE_VERSION, everyone_gid + ) + await services_specifications_injector(everyone_service_specs) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + everyone_service_specs.model_dump() + ) + + # let's inject some rights in a standard group, user is not part of that group yet, so it should still return only everyone + standard_group_service_specs = create_service_specifications( + SERVICE_KEY, SERVICE_VERSION, team_gid + ) + await services_specifications_injector(standard_group_service_specs) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + everyone_service_specs.model_dump() + ) + + # put the user in that group now and try again + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute(user_to_groups.insert().values(uid=user_id, gid=team_gid)) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + standard_group_service_specs.model_dump() + ) + + # now add some other spec in the primary gid, this takes precedence + user_group_service_specs = create_service_specifications( + SERVICE_KEY, SERVICE_VERSION, user_gid + ) + await services_specifications_injector(user_group_service_specs) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + user_group_service_specs.model_dump() + ) + + +async def test_get_service_specifications_are_passed_to_newer_versions_of_service( + background_task_lifespan_disabled, + mocked_director_rest_api: respx.MockRouter, + rabbitmq_and_rpc_setup_disabled: None, + client: TestClient, + user_id: UserID, + user: dict[str, Any], + user_groups_ids: list[int], + target_product: ProductName, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, + services_specifications_injector: Callable, + create_service_specifications: Callable[..., ServiceSpecificationsAtDB], +): + SERVICE_KEY = "simcore/services/dynamic/jupyterlab" + sorted_versions = [ + "0.0.1", + "0.0.2", + "0.1.0", + "0.1.1", + "0.2.3", + "1.0.0", + "1.0.1", + "1.0.10", + "1.1.1", + "1.10.1", + "1.11.1", + "10.0.0", + ] + await asyncio.gather( + *[ + services_db_tables_injector( + [ + create_fake_service_data( + SERVICE_KEY, + version, + team_access=None, + everyone_access=None, + product=target_product, + ) + ] + ) + for version in sorted_versions + ] + ) + + everyone_gid, user_gid, team_gid = user_groups_ids + # let's inject some rights for everyone group ONLY for some versions + INDEX_FIRST_SERVICE_VERSION_WITH_SPEC = 2 + INDEX_SECOND_SERVICE_VERSION_WITH_SPEC = 6 + versions_with_specs = [ + sorted_versions[INDEX_FIRST_SERVICE_VERSION_WITH_SPEC], + sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC], + ] + version_speced: list[ServiceSpecificationsAtDB] = [] + + for version in versions_with_specs: + specs = create_service_specifications(SERVICE_KEY, version, everyone_gid) + await services_specifications_injector(specs) + version_speced.append(specs) + + # check versions before first speced service return the default + for version in sorted_versions[:INDEX_FIRST_SERVICE_VERSION_WITH_SPEC]: + url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( + user_id=user_id + ) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert ( + service_specs.model_dump() + == client.app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS.model_dump() + ) + + # check version between first index and second all return the specs of the first + for version in sorted_versions[ + INDEX_FIRST_SERVICE_VERSION_WITH_SPEC:INDEX_SECOND_SERVICE_VERSION_WITH_SPEC + ]: + url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( + user_id=user_id + ) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + version_speced[0].model_dump() + ), f"specifications for {version=} are not passed down from {sorted_versions[INDEX_FIRST_SERVICE_VERSION_WITH_SPEC]}" + + # check version from second to last use the second version + for version in sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC:]: + url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( + user_id=user_id + ) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + assert service_specs == ServiceSpecifications.model_validate( + version_speced[1].model_dump() + ), f"specifications for {version=} are not passed down from {sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC]}" + + # if we call with the strict parameter set to true, then we should only get the specs for the one that were specified + for version in sorted_versions: + url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( + user_id=user_id, strict=1 + ) + response = client.get(f"{url}") + assert response.status_code == status.HTTP_200_OK + service_specs = ServiceSpecifications.model_validate(response.json()) + assert service_specs + if version in versions_with_specs: + assert ( + service_specs + != client.app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS + ) + else: + assert ( + service_specs + == client.app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS + ) diff --git a/services/catalog/tests/unit/with_dbs/test_api_routes_dags.py b/services/catalog/tests/unit/with_dbs/test_api_routes_dags.py deleted file mode 100644 index 6d91e57e469..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_api_routes_dags.py +++ /dev/null @@ -1,80 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -from typing import Any - -import pytest -from fastapi import FastAPI -from respx.router import MockRouter -from simcore_service_catalog.meta import API_VERSION -from simcore_service_catalog.models.schemas.meta import Meta -from starlette.testclient import TestClient - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -def test_read_healthcheck(director_mockup: MockRouter, client: TestClient): - response = client.get("/") - assert response.status_code == 200 - assert response.text - - -def test_read_meta(director_mockup: MockRouter, app: FastAPI, client: TestClient): - response = client.get("/v0/meta") - assert response.status_code == 200 - meta = Meta(**response.json()) - assert meta.version == API_VERSION - assert meta.name == "simcore_service_catalog" - - -def test_list_dags(director_mockup: MockRouter, app: FastAPI, client: TestClient): - response = client.get("/v0/dags") - assert response.status_code == 200 - assert response.json() == [] - - # inject three dagin - response = client.get("/v0/dags") - assert response.status_code == 200 - # TODO: assert i can list them as dagouts - - # TODO: assert dagout have identifiers now - - -@pytest.mark.skip(reason="does not work") -def test_standard_operations_on_resource( - director_mockup: MockRouter, - app: FastAPI, - client: TestClient, - fake_data_dag_in: dict[str, Any], -): - - response = client.post("/v0/dags", json=fake_data_dag_in) - assert response.status_code == 201 - assert response.json() == 1 - - # list - response = client.get("/v0/dags") - assert response.status_code == 200 - got = response.json() - - assert isinstance(got, list) - assert len(got) == 1 - - # TODO: data_in is not the same as data_out?? - data_out = got[0] - assert data_out["id"] == 1 # extra key, once in db - - # get - response = client.get("/v0/dags/1") - assert response.status_code == 200 - assert response.json() == data_out - - # delete - response = client.delete("/v0/dags/1") - assert response.status_code == 204 diff --git a/services/catalog/tests/unit/with_dbs/test_api_routes_services.py b/services/catalog/tests/unit/with_dbs/test_api_routes_services.py deleted file mode 100644 index 53c6eaa8a58..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_api_routes_services.py +++ /dev/null @@ -1,296 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=not-an-iterable -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import re -from datetime import datetime, timedelta -from typing import Callable - -import pytest -from models_library.services import ServiceDockerData -from pydantic import parse_obj_as -from respx.router import MockRouter -from simcore_service_catalog.models.schemas.services import ServiceGet -from starlette import status -from starlette.testclient import TestClient -from yarl import URL - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -def disable_service_caching(monkeypatch): - monkeypatch.setenv("AIOCACHE_DISABLE", 1) - - -async def test_list_services_with_details( - mock_catalog_background_task: None, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, - benchmark, -): - target_product = products_names[-1] - # create some fake services - NUM_SERVICES = 1000 - fake_services = [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - f"1.0.{s}", - team_access=None, - everyone_access=None, - product=target_product, - ) - for s in range(NUM_SERVICES) - ] - # injects fake data in db - await services_db_tables_injector(fake_services) - - url = URL("/v0/services").with_query({"user_id": user_id, "details": "true"}) - - # now fake the director such that it returns half the services - fake_registry_service_data = ServiceDockerData.Config.schema_extra["examples"][0] - - director_mockup.get("/services", name="list_services").respond( - 200, - json={ - "data": [ - { - **fake_registry_service_data, - **{"key": s[0]["key"], "version": s[0]["version"]}, - } - for s in fake_services[::2] - ] - }, - ) - - response = benchmark( - client.get, f"{url}", headers={"x-simcore-products-name": target_product} - ) - - assert response.status_code == 200 - data = response.json() - assert len(data) == round(NUM_SERVICES / 2) - - -async def test_list_services_without_details( - mock_catalog_background_task: None, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, - benchmark, -): - target_product = products_names[-1] - # injects fake data in db - NUM_SERVICES = 1000 - SERVICE_KEY = "simcore/services/dynamic/jupyterlab" - await services_db_tables_injector( - [ - service_catalog_faker( - SERVICE_KEY, - f"1.0.{s}", - team_access=None, - everyone_access=None, - product=target_product, - ) - for s in range(NUM_SERVICES) - ] - ) - - url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) - response = benchmark( - client.get, f"{url}", headers={"x-simcore-products-name": target_product} - ) - assert response.status_code == 200 - data = response.json() - assert len(data) == NUM_SERVICES - for service in data: - assert service["key"] == SERVICE_KEY - assert re.match("1.0.[0-9]+", service["version"]) is not None - assert service["name"] == "nodetails" - assert service["description"] == "nodetails" - assert service["contact"] == "nodetails@nodetails.com" - - -async def test_list_services_without_details_with_wrong_user_id_returns_403( - disable_service_caching, - mock_catalog_background_task: None, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -): - target_product = products_names[-1] - # injects fake data in db - NUM_SERVICES = 1 - await services_db_tables_injector( - [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - f"1.0.{s}", - team_access=None, - everyone_access=None, - product=target_product, - ) - for s in range(NUM_SERVICES) - ] - ) - - url = URL("/v0/services").with_query({"user_id": user_id + 1, "details": "false"}) - response = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) - assert response.status_code == 403 - - -async def test_list_services_without_details_with_another_product_returns_other_services( - disable_service_caching: None, - mock_catalog_background_task: None, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -): - target_product = products_names[-1] - assert ( - len(products_names) > 1 - ), "please adjust the fixture to have the right number of products" - # injects fake data in db - NUM_SERVICES = 15 - await services_db_tables_injector( - [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - f"1.0.{s}", - team_access=None, - everyone_access=None, - product=target_product, - ) - for s in range(NUM_SERVICES) - ] - ) - - url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) - response = client.get( - f"{url}", headers={"x-simcore-products-name": products_names[0]} - ) - assert response.status_code == 200 - data = response.json() - assert len(data) == 0 - - -async def test_list_services_without_details_with_wrong_product_returns_0_service( - disable_service_caching, - mock_catalog_background_task, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -): - target_product = products_names[-1] - assert ( - len(products_names) > 1 - ), "please adjust the fixture to have the right number of products" - # injects fake data in db - NUM_SERVICES = 1 - await services_db_tables_injector( - [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - f"1.0.{s}", - team_access=None, - everyone_access=None, - product=target_product, - ) - for s in range(NUM_SERVICES) - ] - ) - - url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) - response = client.get( - f"{url}", headers={"x-simcore-products-name": "no valid product"} - ) - assert response.status_code == 200 - data = response.json() - assert len(data) == 0 - - -async def test_list_services_that_are_deprecated( - disable_service_caching, - mock_catalog_background_task, - director_mockup: MockRouter, - client: TestClient, - user_id: int, - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -): - target_product = products_names[-1] - assert ( - len(products_names) > 1 - ), "please adjust the fixture to have the right number of products" - # injects fake data in db - deprecation_date = datetime.utcnow() + timedelta(days=1) - deprecated_service = service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.0.1", - team_access=None, - everyone_access=None, - product=target_product, - deprecated=deprecation_date, - ) - await services_db_tables_injector([deprecated_service]) - - # check without details - url = URL("/v0/services").with_query({"user_id": user_id, "details": "false"}) - resp = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) - assert resp.status_code == status.HTTP_200_OK - list_of_services = parse_obj_as(list[ServiceGet], resp.json()) - assert list_of_services - assert len(list_of_services) == 1 - received_service = list_of_services[0] - assert received_service.deprecated == deprecation_date - - # for details, the director must return the same service - fake_registry_service_data = ServiceDockerData.Config.schema_extra["examples"][0] - director_mockup.get("/services", name="list_services").respond( - 200, - json={ - "data": [ - { - **fake_registry_service_data, - **{ - "key": deprecated_service[0]["key"], - "version": deprecated_service[0]["version"], - }, - } - ] - }, - ) - - url = URL("/v0/services").with_query({"user_id": user_id, "details": "true"}) - resp = client.get(f"{url}", headers={"x-simcore-products-name": target_product}) - assert resp.status_code == status.HTTP_200_OK - list_of_services = parse_obj_as(list[ServiceGet], resp.json()) - assert list_of_services - assert len(list_of_services) == 1 - received_service = list_of_services[0] - assert received_service.deprecated == deprecation_date diff --git a/services/catalog/tests/unit/with_dbs/test_api_routes_services_ports.py b/services/catalog/tests/unit/with_dbs/test_api_routes_services_ports.py deleted file mode 100644 index b06623a0332..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_api_routes_services_ports.py +++ /dev/null @@ -1,187 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - -import urllib.parse -from typing import Any, Callable - -import pytest -from pytest_mock.plugin import MockerFixture -from respx.router import MockRouter -from starlette import status -from starlette.testclient import TestClient -from yarl import URL - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -def disable_service_caching(monkeypatch: pytest.MonkeyPatch): - monkeypatch.setenv("AIOCACHE_DISABLE", "1") - - -@pytest.fixture -def product_name( - products_names: list[str], -) -> str: - target_product = products_names[-1] - assert target_product - return target_product - - -@pytest.fixture -def service_key() -> str: - return "simcore/services/comp/itis/fake_sleeper" - - -@pytest.fixture -def service_version() -> str: - return "1.2.3" - - -@pytest.fixture -def service_metadata( - service_key: str, - service_version: str, - service_metadata_faker: Callable, -) -> dict[str, Any]: - return service_metadata_faker(key=service_key, version=service_version) - - -@pytest.fixture -async def mock_check_service_read_access( - mocker: MockerFixture, user_groups_ids: dict[str, Any] -): - # MOCKS functionality inside "simcore_service_catalog.api.dependencies.services.check_service_read_access" - # to provide read access to a service to user_groups_ids - # - print(user_groups_ids) - - mocker.patch( - "simcore_service_catalog.api.dependencies.services.ServicesRepository.get_service", - autospec=True, - return_value=True, - ) - - -@pytest.fixture -async def mock_director_service_api( - director_mockup: MockRouter, - service_key: str, - service_version: str, - service_metadata: dict[str, Any], -): - # SEE services/director/src/simcore_service_director/api/v0/openapi.yaml - director_mockup.get( - f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}", - name="services_by_key_version_get", - ).respond( - status.HTTP_200_OK, - json={ - "data": [ - service_metadata, - ], - }, - ) - - -async def test_list_service_ports( - disable_service_caching: None, - mock_catalog_background_task: None, - mock_check_service_read_access: None, - mock_director_service_api: None, - client: TestClient, - product_name: str, - user_id: int, - service_key: str, - service_version: str, - service_metadata: dict[str, Any], # expected - benchmark, -): - url = URL(f"/v0/services/{service_key}/{service_version}/ports").with_query( - {"user_id": user_id} - ) - response = benchmark( - client.get, f"{url}", headers={"x-simcore-products-name": product_name} - ) - assert response.status_code == 200 - ports = response.json() - - # same order and name identifier - expected_inputs = service_metadata["inputs"] - expected_outputs = service_metadata["outputs"] - - assert [p["key"] for p in ports if p["kind"] == "input"] == list( - expected_inputs.keys() - ) - assert [p["key"] for p in ports if p["kind"] == "output"] == list( - expected_outputs.keys() - ) - - assert ports == [ - { - "key": "input_1", - "kind": "input", - "content_media_type": "text/plain", - "content_schema": { - "type": "string", - "title": "File with int number", - "description": "Pick a file containing only one integer", - }, - }, - { - "key": "input_2", - "kind": "input", - "content_schema": { - "title": "Sleep interval", - "type": "integer", - "x_unit": "second", - "minimum": 0, - "maximum": 5, - }, - }, - { - "key": "input_3", - "kind": "input", - "content_schema": { - "title": "Fail after sleep", - "type": "boolean", - "description": "If set to true will cause service to fail after it sleeps", - "default": False, - }, - }, - { - "key": "input_4", - "kind": "input", - "content_schema": { - "title": "Distance to bed", - "type": "integer", - "x_unit": "meter", - }, - }, - { - "key": "output_1", - "kind": "output", - "content_media_type": "text/plain", - "content_schema": { - "type": "string", - "title": "File containing one random integer", - "description": "Integer is generated in range [1-9]", - }, - }, - { - "key": "output_2", - "kind": "output", - "content_schema": { - "title": "Random sleep interval", - "type": "integer", - "x_unit": "second", - }, - }, - ] diff --git a/services/catalog/tests/unit/with_dbs/test_api_routes_services_resources.py b/services/catalog/tests/unit/with_dbs/test_api_routes_services_resources.py deleted file mode 100644 index 7e324b10f84..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_api_routes_services_resources.py +++ /dev/null @@ -1,327 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import urllib.parse -from copy import deepcopy -from dataclasses import dataclass -from random import choice, randint -from typing import Any, Callable - -import httpx -import pytest -import respx -from faker import Faker -from fastapi import FastAPI -from models_library.docker import DockerGenericTag -from models_library.services_resources import ( - BootMode, - ResourcesDict, - ResourceValue, - ServiceResourcesDict, - ServiceResourcesDictHelpers, -) -from pydantic import ByteSize, parse_obj_as -from respx.models import Route -from simcore_service_catalog.core.settings import _DEFAULT_RESOURCES -from starlette.testclient import TestClient -from yarl import URL - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -def mock_director_service_labels( - director_mockup: respx.MockRouter, app: FastAPI -) -> Route: - slash = urllib.parse.quote_plus("/") - mock_route = director_mockup.get( - url__regex=rf"v0/services/simcore{slash}services{slash}(comp|dynamic|frontend)({slash}[\w{slash}-]+)+/[0-9]+.[0-9]+.[0-9]+/labels", - name="get_service_labels", - ).respond(200, json={"data": {}}) - - return mock_route - - -@pytest.fixture -def service_labels(faker: Faker) -> Callable[..., dict[str, Any]]: - def creator(): - return {faker.get_label(): faker.text()} - - return creator - - -@pytest.fixture -def service_key(faker: Faker) -> str: - return f"simcore/services/{choice(['comp', 'dynamic','frontend'])}/jupyter-math" - - -@pytest.fixture -def service_version() -> str: - return f"{randint(0,100)}.{randint(0,100)}.{randint(0,100)}" - - -@pytest.fixture -def mock_service_labels(faker: Faker) -> dict[str, Any]: - return { - "simcore.service.settings": '[ {"name": "ports", "type": "int", "value": 8888}, {"name": "constraints", "type": "string", "value": ["node.platform.os == linux"]}, {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 } } } ]', - } - - -def _update_copy(dict_data: dict, update: dict) -> dict: - dict_data_copy = deepcopy(dict_data) - dict_data_copy.update(update) - return dict_data_copy - - -@dataclass -class _ServiceResourceParams: - simcore_service_label: dict[str, str] - expected_resources: ResourcesDict - expected_boot_modes: list[BootMode] - - -@pytest.mark.parametrize( - "params", - [ - pytest.param( - _ServiceResourceParams({}, _DEFAULT_RESOURCES, [BootMode.CPU]), - id="nothing_defined_returns_default_resources", - ), - pytest.param( - _ServiceResourceParams( - { - "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 } } } ]', - }, - _update_copy( - _DEFAULT_RESOURCES, - { - "CPU": ResourceValue( - limit=4.0, - reservation=_DEFAULT_RESOURCES["CPU"].reservation, - ), - "RAM": ResourceValue( - limit=ByteSize(17179869184), - reservation=_DEFAULT_RESOURCES["RAM"].reservation, - ), - }, - ), - [BootMode.CPU], - ), - id="only_limits_defined_returns_default_reservations", - ), - pytest.param( - _ServiceResourceParams( - { - "simcore.service.settings": '[ {"name": "constraints", "type": "string", "value": [ "node.platform.os == linux" ]}, {"name": "Resources", "type": "Resources", "value": { "Limits": { "NanoCPUs": 4000000000, "MemoryBytes": 17179869184 }, "Reservations": { "NanoCPUs": 100000000, "MemoryBytes": 536870912, "GenericResources": [ { "DiscreteResourceSpec": { "Kind": "VRAM", "Value": 1 } }, { "NamedResourceSpec": { "Kind": "AIRAM", "Value": "some_string" } } ] } } } ]' - }, - _update_copy( - _DEFAULT_RESOURCES, - { - "CPU": ResourceValue(limit=4.0, reservation=0.1), - "RAM": ResourceValue( - limit=ByteSize(17179869184), reservation=ByteSize(536870912) - ), - "VRAM": ResourceValue(limit=1, reservation=1), - "AIRAM": ResourceValue(limit=0, reservation="some_string"), - }, - ), - [BootMode.GPU], - ), - id="everything_rightly_defined", - ), - pytest.param( - _ServiceResourceParams( - { - "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 100000000, "MemoryBytes": 536870912, "GenericResources": [ ] } } } ]' - }, - _update_copy( - _DEFAULT_RESOURCES, - { - "CPU": ResourceValue( - limit=_DEFAULT_RESOURCES["CPU"].limit, - reservation=0.1, - ), - "RAM": ResourceValue( - limit=_DEFAULT_RESOURCES["RAM"].limit, - reservation=ByteSize(536870912), - ), - }, - ), - [BootMode.CPU], - ), - id="no_limits_defined_returns_default_limits", - ), - pytest.param( - _ServiceResourceParams( - { - "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 10000000000, "MemoryBytes": 53687091232, "GenericResources": [ { "DiscreteResourceSpec": { "Kind": "VRAM", "Value": 1 } } ] } } } ]' - }, - _update_copy( - _DEFAULT_RESOURCES, - { - "CPU": ResourceValue( - limit=10.0, - reservation=10.0, - ), - "RAM": ResourceValue( - limit=ByteSize(53687091232), - reservation=ByteSize(53687091232), - ), - "VRAM": ResourceValue(limit=1, reservation=1), - }, - ), - [BootMode.GPU], - ), - id="no_limits_with_reservations_above_default_returns_same_as_reservation", - ), - ], -) -async def test_get_service_resources( - mock_catalog_background_task, - mock_director_service_labels: Route, - client: TestClient, - params: _ServiceResourceParams, -) -> None: - service_key = f"simcore/services/{choice(['comp', 'dynamic'])}/jupyter-math" - service_version = f"{randint(0,100)}.{randint(0,100)}.{randint(0,100)}" - mock_director_service_labels.respond(json={"data": params.simcore_service_label}) - url = URL(f"/v0/services/{service_key}/{service_version}/resources") - response = client.get(f"{url}") - assert response.status_code == 200, f"{response.text}" - data = response.json() - received_resources: ServiceResourcesDict = parse_obj_as(ServiceResourcesDict, data) - assert isinstance(received_resources, dict) - - expected_service_resources = ServiceResourcesDictHelpers.create_from_single_service( - parse_obj_as(DockerGenericTag, f"{service_key}:{service_version}"), - params.expected_resources, - boot_modes=params.expected_boot_modes, - ) - assert isinstance(expected_service_resources, dict) - assert received_resources == expected_service_resources - - -@pytest.fixture -def create_mock_director_service_labels( - director_mockup: respx.MockRouter, app: FastAPI -) -> Callable: - def factory(services_labels: dict[str, dict[str, Any]]) -> None: - for service_name, data in services_labels.items(): - encoded_key = urllib.parse.quote_plus( - f"simcore/services/dynamic/{service_name}" - ) - for k, mock_key in enumerate((encoded_key, service_name)): - director_mockup.get( - url__regex=rf"v0/services/{mock_key}/[\w/.]+/labels", - name=f"get_service_labels_for_{service_name}_{k}", - ).respond(200, json={"data": data}) - - return factory - - -@pytest.mark.parametrize( - "mapped_services_labels, expected_service_resources, service_key, service_version", - [ - pytest.param( - { - "sim4life-dy": { - "simcore.service.settings": '[ {"name": "Resources", "type": "Resources", "value": { "Reservations": { "NanoCPUs": 300000000, "MemoryBytes": 53687091232 } } } ]', - "simcore.service.compose-spec": '{"version": "2.3", "services": {"rt-web-dy":{"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life-dy:${SERVICE_VERSION}","init": true, "depends_on": ["s4l-core"]}, "s4l-core": {"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core-dy:${SERVICE_VERSION}","runtime": "nvidia", "init": true, "environment": ["DISPLAY=${DISPLAY}"],"volumes": ["/tmp/.X11-unix:/tmp/.X11-unix"]}, "sym-server": {"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/sym-server:${SERVICE_VERSION}","init": true}}}', - }, - "s4l-core-dy": { - "simcore.service.settings": '[{"name": "env", "type": "string", "value": ["DISPLAY=:0"]},{"name": "env", "type": "string", "value": ["SYM_SERVER_HOSTNAME=%%container_name.sym-server%%"]},{"name": "mount", "type": "object", "value": [{"ReadOnly": true, "Source":"/tmp/.X11-unix", "Target": "/tmp/.X11-unix", "Type": "bind"}]}, {"name":"constraints", "type": "string", "value": ["node.platform.os == linux"]},{"name": "Resources", "type": "Resources", "value": {"Limits": {"NanoCPUs":4000000000, "MemoryBytes": 17179869184}, "Reservations": {"NanoCPUs": 100000000,"MemoryBytes": 536870912, "GenericResources": [{"DiscreteResourceSpec":{"Kind": "VRAM", "Value": 1}}]}}}]' - }, - "sym-server": {"simcore.service.settings": "[]"}, - }, - parse_obj_as( - ServiceResourcesDict, - ServiceResourcesDictHelpers.Config.schema_extra["examples"][1], - ), - "simcore/services/dynamic/sim4life-dy", - "3.0.0", - id="s4l_case", - ), - pytest.param( - { - "jupyter-math": { - "simcore.service.settings": "[]", - "simcore.service.compose-spec": '{"version": "2.3", "services": {"jupyter-math":{"image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/jupyter-math:${SERVICE_VERSION}"}, "busybox": {"image": "busybox:latest"}}}', - }, - "busybox": {"simcore.service.settings": "[]"}, - }, - parse_obj_as( - ServiceResourcesDict, - { - "jupyter-math": { - "image": "simcore/services/dynamic/jupyter-math:2.0.5", - "resources": { - "CPU": {"limit": 0.1, "reservation": 0.1}, - "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), - }, - }, - }, - "busybox": { - "image": "busybox:latest", - "resources": { - "CPU": {"limit": 0.1, "reservation": 0.1}, - "RAM": { - "limit": parse_obj_as(ByteSize, "2Gib"), - "reservation": parse_obj_as(ByteSize, "2Gib"), - }, - }, - }, - }, - ), - "simcore/services/dynamic/jupyter-math", - "2.0.5", - id="using_an_external_image", - ), - ], -) -async def test_get_service_resources_sim4life_case( - mock_catalog_background_task, - create_mock_director_service_labels: Callable, - client: TestClient, - mapped_services_labels: dict[str, dict[str, Any]], - expected_service_resources: ServiceResourcesDict, - service_key: str, - service_version: str, -) -> None: - create_mock_director_service_labels(mapped_services_labels) - - url = URL(f"/v0/services/{service_key}/{service_version}/resources") - response = client.get(f"{url}") - assert response.status_code == 200, f"{response.text}" - data = response.json() - received_service_resources = parse_obj_as(ServiceResourcesDict, data) - - assert received_service_resources == expected_service_resources - - -async def test_get_service_resources_raises_errors( - mock_catalog_background_task, - mock_director_service_labels: Route, - client: TestClient, -) -> None: - - service_key = f"simcore/services/{choice(['comp', 'dynamic'])}/jupyter-math" - service_version = f"{randint(0,100)}.{randint(0,100)}.{randint(0,100)}" - url = URL(f"/v0/services/{service_key}/{service_version}/resources") - # simulate a communication error - mock_director_service_labels.side_effect = httpx.HTTPError - response = client.get(f"{url}") - assert response.status_code == httpx.codes.SERVICE_UNAVAILABLE, f"{response.text}" - # simulate a missing service - mock_director_service_labels.respond( - httpx.codes.NOT_FOUND, json={"error": "service not found"} - ) - response = client.get(f"{url}") - assert response.status_code == httpx.codes.NOT_FOUND, f"{response.text}" diff --git a/services/catalog/tests/unit/with_dbs/test_api_routes_services_specifications.py b/services/catalog/tests/unit/with_dbs/test_api_routes_services_specifications.py deleted file mode 100644 index 820a7b1188b..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_api_routes_services_specifications.py +++ /dev/null @@ -1,374 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments - - -import asyncio -from random import choice, randint -from typing import Any, AsyncIterator, Awaitable, Callable - -import pytest -import respx -from faker import Faker -from fastapi import FastAPI -from fastapi.encoders import jsonable_encoder -from models_library.generated_models.docker_rest_api import ( - DiscreteResourceSpec, - GenericResource, - GenericResources, - Limit, - NamedResourceSpec, - ResourceObject, -) -from models_library.generated_models.docker_rest_api import ( - Resources1 as ServiceTaskResources, -) -from models_library.generated_models.docker_rest_api import ServiceSpec -from models_library.users import UserID -from simcore_postgres_database.models.groups import user_to_groups -from simcore_postgres_database.models.services_specifications import ( - services_specifications, -) -from simcore_service_catalog.models.domain.service_specifications import ( - ServiceSpecificationsAtDB, -) -from simcore_service_catalog.models.schemas.services_specifications import ( - ServiceSpecifications, - ServiceSpecificationsGet, -) -from sqlalchemy.ext.asyncio import AsyncEngine -from starlette import status -from starlette.testclient import TestClient -from yarl import URL - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -async def services_specifications_injector( - sqlalchemy_async_engine: AsyncEngine, -) -> AsyncIterator[Callable[[ServiceSpecificationsAtDB], Awaitable[None]]]: - inserted_specs: list[ServiceSpecificationsAtDB] = [] - - async def _injector( - service_spec: ServiceSpecificationsAtDB, - ): - async with sqlalchemy_async_engine.begin() as conn: - await conn.execute( - services_specifications.insert().values(jsonable_encoder(service_spec)) - ) - inserted_specs.append(service_spec) - - yield _injector - - # clean up - async with sqlalchemy_async_engine.begin() as conn: - for spec in inserted_specs: - await conn.execute( - services_specifications.delete().where( - (services_specifications.c.service_key == spec.service_key) - & ( - services_specifications.c.service_version - == spec.service_version - ) - & (services_specifications.c.gid == spec.gid) - ) - ) - - -@pytest.fixture -def create_service_specifications( - faker: Faker, -) -> Callable[..., ServiceSpecificationsAtDB]: - def _creator(service_key, service_version, gid) -> ServiceSpecificationsAtDB: - return ServiceSpecificationsAtDB( - service_key=service_key, - service_version=service_version, - gid=gid, - sidecar=ServiceSpec(Labels=faker.pydict(allowed_types=(str,))), # type: ignore - service=ServiceTaskResources( - Limits=Limit( - NanoCPUs=faker.pyint(), - MemoryBytes=faker.pyint(), - Pids=faker.pyint(), - ), - Reservations=ResourceObject( - NanoCPUs=faker.pyint(), - MemoryBytes=faker.pyint(), - GenericResources=GenericResources( - __root__=[ - GenericResource( - NamedResourceSpec=NamedResourceSpec( - Kind=faker.pystr(), Value=faker.pystr() - ), - DiscreteResourceSpec=DiscreteResourceSpec( - Kind=faker.pystr(), Value=faker.pyint() - ), - ) - ] - ), - ), - ), - ) - - return _creator - - -async def test_get_service_specifications_returns_403_if_user_does_not_exist( - mock_catalog_background_task, - director_mockup: respx.MockRouter, - client: TestClient, - user_id: UserID, -): - service_key = f"simcore/services/{choice(['comp', 'dynamic'])}/jupyter-math" - service_version = f"{randint(0,100)}.{randint(0,100)}.{randint(0,100)}" - url = URL( - f"/v0/services/{service_key}/{service_version}/specifications" - ).with_query(user_id=user_id) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_403_FORBIDDEN - - -async def test_get_service_specifications_of_unknown_service_returns_default_specs( - mock_catalog_background_task, - director_mockup: respx.MockRouter, - app: FastAPI, - client: TestClient, - user_id: UserID, - user_db: dict[str, Any], - faker: Faker, -): - service_key = f"simcore/services/{choice(['comp', 'dynamic'])}/{faker.pystr()}" - service_version = f"{randint(0,100)}.{randint(0,100)}.{randint(0,100)}" - url = URL( - f"/v0/services/{service_key}/{service_version}/specifications" - ).with_query(user_id=user_id) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - - assert service_specs == app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - - -async def test_get_service_specifications( - mock_catalog_background_task, - director_mockup: respx.MockRouter, - app: FastAPI, - client: TestClient, - user_id: UserID, - user_db: dict[str, Any], - user_groups_ids: list[int], - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, - services_specifications_injector: Callable, - sqlalchemy_async_engine: AsyncEngine, - create_service_specifications: Callable[..., ServiceSpecificationsAtDB], -): - target_product = products_names[-1] - SERVICE_KEY = "simcore/services/dynamic/jupyterlab" - SERVICE_VERSION = "0.0.1" - await services_db_tables_injector( - [ - service_catalog_faker( - SERVICE_KEY, - SERVICE_VERSION, - team_access=None, - everyone_access=None, - product=target_product, - ) - ] - ) - - url = URL( - f"/v0/services/{SERVICE_KEY}/{SERVICE_VERSION}/specifications" - ).with_query(user_id=user_id) - - # this should now return default specs since there are none in the db - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - - everyone_gid, user_gid, team_gid = user_groups_ids - # let's inject some rights for everyone group - everyone_service_specs = create_service_specifications( - SERVICE_KEY, SERVICE_VERSION, everyone_gid - ) - await services_specifications_injector(everyone_service_specs) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - everyone_service_specs.dict() - ) - - # let's inject some rights in a standard group, user is not part of that group yet, so it should still return only everyone - standard_group_service_specs = create_service_specifications( - SERVICE_KEY, SERVICE_VERSION, team_gid - ) - await services_specifications_injector(standard_group_service_specs) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - everyone_service_specs.dict() - ) - - # put the user in that group now and try again - async with sqlalchemy_async_engine.begin() as conn: - await conn.execute(user_to_groups.insert().values(uid=user_id, gid=team_gid)) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - standard_group_service_specs.dict() - ) - - # now add some other spec in the primary gid, this takes precedence - user_group_service_specs = create_service_specifications( - SERVICE_KEY, SERVICE_VERSION, user_gid - ) - await services_specifications_injector(user_group_service_specs) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - user_group_service_specs.dict() - ) - - -async def test_get_service_specifications_are_passed_to_newer_versions_of_service( - mock_catalog_background_task, - director_mockup: respx.MockRouter, - app: FastAPI, - client: TestClient, - user_id: UserID, - user_db: dict[str, Any], - user_groups_ids: list[int], - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, - services_specifications_injector: Callable, - create_service_specifications: Callable[..., ServiceSpecificationsAtDB], -): - target_product = products_names[-1] - SERVICE_KEY = "simcore/services/dynamic/jupyterlab" - sorted_versions = [ - "0.0.1", - "0.0.2", - "0.1.0", - "0.1.1", - "0.2.3", - "1.0.0", - "1.0.1", - "1.0.10", - "1.1.1", - "1.10.1", - "1.11.1", - "10.0.0", - ] - await asyncio.gather( - *[ - services_db_tables_injector( - [ - service_catalog_faker( - SERVICE_KEY, - version, - team_access=None, - everyone_access=None, - product=target_product, - ) - ] - ) - for version in sorted_versions - ] - ) - - everyone_gid, user_gid, team_gid = user_groups_ids - # let's inject some rights for everyone group ONLY for some versions - INDEX_FIRST_SERVICE_VERSION_WITH_SPEC = 2 - INDEX_SECOND_SERVICE_VERSION_WITH_SPEC = 6 - versions_with_specs = [ - sorted_versions[INDEX_FIRST_SERVICE_VERSION_WITH_SPEC], - sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC], - ] - version_speced: list[ServiceSpecificationsAtDB] = [] - - for version in versions_with_specs: - specs = create_service_specifications(SERVICE_KEY, version, everyone_gid) - await services_specifications_injector(specs) - version_speced.append(specs) - - # check versions before first speced service return the default - for version in sorted_versions[:INDEX_FIRST_SERVICE_VERSION_WITH_SPEC]: - url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( - user_id=user_id - ) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert ( - service_specs == app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - ) - - # check version between first index and second all return the specs of the first - for version in sorted_versions[ - INDEX_FIRST_SERVICE_VERSION_WITH_SPEC:INDEX_SECOND_SERVICE_VERSION_WITH_SPEC - ]: - url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( - user_id=user_id - ) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - version_speced[0].dict() - ), f"specifications for {version=} are not passed down from {sorted_versions[INDEX_FIRST_SERVICE_VERSION_WITH_SPEC]}" - - # check version from second to last use the second version - for version in sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC:]: - url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( - user_id=user_id - ) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - assert service_specs == ServiceSpecifications.parse_obj( - version_speced[1].dict() - ), f"specifications for {version=} are not passed down from {sorted_versions[INDEX_SECOND_SERVICE_VERSION_WITH_SPEC]}" - - # if we call with the strict parameter set to true, then we should only get the specs for the one that were specified - for version in sorted_versions: - url = URL(f"/v0/services/{SERVICE_KEY}/{version}/specifications").with_query( - user_id=user_id, strict=1 - ) - response = client.get(f"{url}") - assert response.status_code == status.HTTP_200_OK - service_specs = ServiceSpecificationsGet.parse_obj(response.json()) - assert service_specs - if version in versions_with_specs: - assert ( - service_specs - != app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - ) - else: - assert ( - service_specs - == app.state.settings.CATALOG_SERVICES_DEFAULT_SPECIFICATIONS - ) diff --git a/services/catalog/tests/unit/with_dbs/test_api_rpc.py b/services/catalog/tests/unit/with_dbs/test_api_rpc.py new file mode 100644 index 00000000000..5b43b728261 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_api_rpc.py @@ -0,0 +1,1043 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Callable +from typing import Any + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_catalog.services import ( + ServiceListFilters, + ServiceUpdateV2, +) +from models_library.products import ProductName +from models_library.rest_pagination import ( + DEFAULT_NUMBER_OF_ITEMS_PER_PAGE, + MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE, +) +from models_library.services_enums import ServiceType +from models_library.services_history import ServiceRelease +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from packaging import version +from pydantic import ValidationError +from pytest_simcore.helpers.faker_factories import random_icon_url +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx.router import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.catalog import services as catalog_rpc +from servicelib.rabbitmq.rpc_interfaces.catalog.errors import ( + CatalogForbiddenError, + CatalogItemNotFoundError, +) + +pytest_simcore_core_services_selection = [ + "rabbit", + "postgres", +] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service +) -> EnvVarsDict: + monkeypatch.delenv("CATALOG_RABBITMQ", raising=False) + return setenvs_from_dict( + monkeypatch, + {**app_environment, **rabbit_env_vars_dict}, + ) + + +@pytest.fixture +def num_services() -> int: + return 5 + + +@pytest.fixture +def num_versions_per_service() -> int: + return 20 + + +@pytest.fixture +def fake_data_for_services( + target_product: ProductName, + create_fake_service_data: Callable, + num_services: int, + num_versions_per_service: int, +) -> list: + return [ + create_fake_service_data( + f"simcore/services/comp/test-api-rpc-service-{n}", + f"{v}.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + for n in range(num_services) + for v in range(num_versions_per_service) + ] + + +@pytest.fixture +def expected_director_rest_api_list_services( + expected_director_rest_api_list_services: list[dict[str, Any]], + fake_data_for_services: list, + create_director_list_services_from: Callable, +) -> list[dict[str, Any]]: + # OVERRIDES: Changes the values returned by the mocked_director_service_api + + return create_director_list_services_from( + expected_director_rest_api_list_services, fake_data_for_services + ) + + +@pytest.fixture +async def background_sync_task_mocked( + background_task_lifespan_disabled: None, + services_db_tables_injector: Callable, + fake_data_for_services: list, +) -> None: + # inject db services (typically done by the sync background task) + await services_db_tables_injector(fake_data_for_services) + + +async def test_rpc_list_services_paginated_with_no_services_returns_empty_page( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + user_id: UserID, + app: FastAPI, +): + assert app + + page = await catalog_rpc.list_services_paginated( + rpc_client, product_name="not_existing_returns_no_services", user_id=user_id + ) + assert page.data == [] + assert page.links.next is None + assert page.links.prev is None + assert page.meta.count == 0 + assert page.meta.total == 0 + + +async def test_rpc_list_services_paginated_with_filters( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, +): + assert app + + # only computational services introduced by the background_sync_task_mocked + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters={"service_type": "computational"}, + ) + # Fixed: Count might be capped by page limit + assert page.meta.count <= page.meta.total + assert page.meta.total > 0 + + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters(service_type=ServiceType.DYNAMIC), + ) + assert page.meta.total == 0 + + +@pytest.mark.skip( + reason="Issue with mocked_director_rest_api fixture. Urgent feature in master needed. Will follow up." +) +async def test_rpc_list_services_paginated_with_filter_combinations( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, +): + """Tests all combinations of filters for list_services_paginated""" + # Setup: Create test services with different patterns and types + test_services = [ + # Computational services + create_fake_service_data( + "simcore/services/comp/test-service1", + "1.0.0", + team_access=None, + everyone_access=None, + product=product_name, + version_display="2023 Release", + ), + create_fake_service_data( + "simcore/services/comp/test-service2", + "1.0.0", + team_access=None, + everyone_access=None, + product=product_name, + version_display=None, + ), + # Dynamic services + create_fake_service_data( + "simcore/services/dynamic/jupyter-lab", + "1.0.0", + team_access=None, + everyone_access=None, + product=product_name, + version_display="2024 Beta", + ), + create_fake_service_data( + "simcore/services/dynamic/jupyter-python", + "1.0.0", + team_access=None, + everyone_access=None, + product=product_name, + version_display=None, + ), + ] + await services_db_tables_injector(test_services) + + # Test 1: Filter by service type only + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters(service_type=ServiceType.COMPUTATIONAL), + ) + assert page.meta.total == 2 + assert all("services/comp/" in item.key for item in page.data) + + # Test 2: Filter by key pattern only + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters(service_key_pattern="*/jupyter-*"), + ) + assert page.meta.total == 2 + assert all("jupyter-" in item.key for item in page.data) + + # Test 3: Filter by version display pattern only + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters(version_display_pattern="*2023*"), + ) + assert page.meta.total == 1 + assert page.data[0].version_display == "2023 Release" + + # Test 4: Combined filters - type and key pattern + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters( + service_type=ServiceType.DYNAMIC, service_key_pattern="*/jupyter-*" + ), + ) + assert page.meta.total == 2 + assert all( + "services/dynamic/" in item.key and "jupyter-" in item.key for item in page.data + ) + + # Test 5: Combined filters with version display pattern + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters( + service_type=ServiceType.DYNAMIC, + service_key_pattern="*/jupyter-*", + version_display_pattern="*2024*", + ), + ) + assert page.meta.total == 1 + assert page.data[0].key == "simcore/services/dynamic/jupyter-lab" + assert page.data[0].version_display == "2024 Beta" + page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters( + service_type=ServiceType.DYNAMIC, + service_key_pattern="*/jupyter-*", + version_display_pattern="*2024*", + ), + ) + assert page.meta.total == 1 + assert page.data[0].version_display == "2024 Beta" + + +async def test_rpc_catalog_client_workflow( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + faker: Faker, +): + assert app + + page = await catalog_rpc.list_services_paginated( + rpc_client, product_name=product_name, user_id=user_id + ) + + assert page.data + service_key = page.data[0].key + service_version = page.data[0].version + + with pytest.raises(ValidationError): + await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + limit=MAXIMUM_NUMBER_OF_ITEMS_PER_PAGE + 1, + ) + + got = await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + assert got.key == service_key + assert got.version == service_version + + assert got.model_dump(exclude={"history"}) == next( + item.model_dump(exclude={"release"}) + for item in page.data + if (item.key == service_key and item.version == service_version) + ) + + updated = await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + update=ServiceUpdateV2( + name="foo", + description="bar", + icon=random_icon_url(faker), + version_display="this is a nice version", + description_ui=True, # owner activates wiki view + ), + ) + + assert updated.key == got.key + assert updated.version == got.version + assert updated.name == "foo" + assert updated.description == "bar" + assert updated.description_ui + assert updated.version_display == "this is a nice version" + assert updated.icon is not None + assert not updated.classifiers + + got = await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + assert got == updated + + +async def test_rpc_get_service_not_found_error( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, +): + + with pytest.raises(CatalogItemNotFoundError, match="unknown"): + await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key="simcore/services/dynamic/unknown", + service_version="1.0.0", + ) + + +async def test_rpc_get_service_validation_error( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, +): + + with pytest.raises(ValidationError, match="service_key"): + await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key="wrong-format/unknown", + service_version="1.0.0", + ) + + +async def test_rpc_check_for_service( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, +): + with pytest.raises(CatalogItemNotFoundError, match="unknown"): + await catalog_rpc.check_for_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key="simcore/services/dynamic/unknown", + service_version="1.0.0", + ) + + +async def test_rpc_get_service_access_rights( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user: dict[str, Any], + user_id: UserID, + other_user: dict[str, Any], + app: FastAPI, +): + assert app + assert user["id"] == user_id + + # user_id owns a service (created in background_sync_task_mocked) + service_key = ServiceKey("simcore/services/comp/test-api-rpc-service-0") + service_version = ServiceVersion("0.0.0") + + service = await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + assert service + assert service.access_rights + assert service.access_rights[user["primary_gid"]].write + assert service.access_rights[user["primary_gid"]].execute + + assert other_user["primary_gid"] not in service.access_rights + + # other_user does not have EXECUTE access ----------------- + with pytest.raises(CatalogForbiddenError, match=service_key): + await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + ) + + # other_user does not have WRITE access + with pytest.raises(CatalogForbiddenError, match=service_key): + await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + update={ + "name": "foo", + "description": "bar", + }, + ) + + # user_id gives "x access" to other_user ------------ + assert service.access_rights is not None + await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + update={ + "access_rights": { + **service.access_rights, + other_user["primary_gid"]: { + "execute": True, + "write": False, + }, + } + }, + ) + + # other user can now GET but NOT UPDATE + await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + ) + + with pytest.raises(CatalogForbiddenError, match=service_key): + await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + update={ + "name": "foo", + "description": "bar", + }, + ) + + # user_id gives "xw access" to other_user ------------------ + assert service.access_rights is not None + await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + update={ + "access_rights": { + **service.access_rights, + other_user["primary_gid"]: { + "execute": True, + "write": True, + }, + } + }, + ) + + # other_user can now update and get + await catalog_rpc.update_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + update={ + "name": "foo", + "description": "bar", + }, + ) + updated_service = await catalog_rpc.get_service( + rpc_client, + product_name=product_name, + user_id=other_user["id"], + service_key=service_key, + service_version=service_version, + ) + assert updated_service.model_dump(include={"name", "description"}) == { + "name": "foo", + "description": "bar", + } + + +async def test_rpc_batch_get_my_services( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user: dict[str, Any], + user_id: UserID, + app: FastAPI, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, +): + # Create fake services data + service_key = "simcore/services/comp/test-batch-service" + service_version_1 = "1.0.0" + service_version_2 = "1.0.5" + + other_service_key = "simcore/services/comp/other-batch-service" + other_service_version = "1.0.0" + + fake_service_1 = create_fake_service_data( + service_key, + service_version_1, + team_access=None, + everyone_access=None, + product=product_name, + ) + fake_service_2 = create_fake_service_data( + service_key, + service_version_2, + team_access="x", + everyone_access=None, + product=product_name, + ) + fake_service_3 = create_fake_service_data( + other_service_key, + other_service_version, + team_access=None, + everyone_access=None, + product=product_name, + ) + + # Inject fake services into the database + await services_db_tables_injector([fake_service_1, fake_service_2, fake_service_3]) + + # Batch get my services: project with two, not three + ids = [ + (service_key, service_version_1), + (other_service_key, other_service_version), + ] + + my_services = await catalog_rpc.batch_get_my_services( + rpc_client, + product_name=product_name, + user_id=user_id, + ids=ids, + ) + + assert len(my_services) == 2 + + # Check access rights to all of them + assert my_services[0].my_access_rights.model_dump() == { + "execute": True, + "write": True, + } + assert my_services[0].owner == user["primary_gid"] + assert my_services[0].key == service_key + assert my_services[0].release.version == service_version_1 + assert my_services[0].release.compatibility + assert ( + my_services[0].release.compatibility.can_update_to.version == service_version_2 + ) + + assert my_services[1].my_access_rights.model_dump() == { + "execute": True, + "write": True, + } + assert my_services[1].owner == user["primary_gid"] + assert my_services[1].key == other_service_key + assert my_services[1].release.version == other_service_version + + +async def test_rpc_list_my_service_history_paginated( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, +): + assert app + + service_key = "simcore/services/comp/test-service-release-history" + service_version_1 = "1.0.0" + service_version_2 = "1.1.0" + + assert version.Version(service_version_1) < version.Version(service_version_2) + + # Inject fake service releases for the target service + fake_releases = [ + create_fake_service_data( + service_key, + srv_version, + team_access=None, + everyone_access=None, + product=product_name, + ) + for srv_version in (service_version_1, service_version_2) + ] + + # Inject unrelated fake service releases + unrelated_service_key_1 = "simcore/services/comp/unrelated-service-1" + unrelated_service_key_2 = "simcore/services/comp/unrelated-service-2" + unrelated_releases = [ + *[ + create_fake_service_data( + unrelated_service_key_1, + srv_version, + team_access=None, + everyone_access=None, + product=product_name, + ) + for srv_version in (service_version_1, service_version_2) + ], + create_fake_service_data( + unrelated_service_key_2, + "2.0.0", + team_access=None, + everyone_access=None, + product=product_name, + ), + ] + + await services_db_tables_injector(fake_releases + unrelated_releases) + + # Call the RPC function + page = await catalog_rpc.list_my_service_history_latest_first( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + ) + release_history: list[ServiceRelease] = page.data + + # Validate the response + assert isinstance(release_history, list) + assert len(release_history) == 2 + assert release_history[0].version == service_version_2, "expected newest first" + assert release_history[1].version == service_version_1 + + +async def test_rpc_get_service_ports_successful_retrieval( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + expected_director_rest_api_list_services: list[dict[str, Any]], +): + """Tests successful retrieval of service ports for a specific service version""" + assert app + + # Create a service with known ports + expected_service = expected_director_rest_api_list_services[0] + service_key = expected_service["key"] + service_version = expected_service["version"] + + # Call the RPC function to get service ports + ports = await catalog_rpc.get_service_ports( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=service_key, + service_version=service_version, + ) + + # Validate the response + expected_inputs = expected_service["inputs"] + expected_outputs = expected_service["outputs"] + assert len(ports) == len(expected_inputs) + len(expected_outputs) + + +async def test_rpc_get_service_ports_not_found( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, +): + """Tests that appropriate error is raised when service does not exist""" + assert app + + service_version = "1.0.0" + non_existent_key = "simcore/services/comp/non-existent-service" + + # Test service not found scenario + with pytest.raises(CatalogItemNotFoundError, match="non-existent-service"): + await catalog_rpc.get_service_ports( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key=non_existent_key, + service_version=service_version, + ) + + +async def test_rpc_get_service_ports_permission_denied( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user: dict[str, Any], + user_id: UserID, + other_user: dict[str, Any], + app: FastAPI, + create_fake_service_data: Callable, + services_db_tables_injector: Callable, +): + """Tests that appropriate error is raised when user doesn't have permission""" + assert app + + assert other_user["id"] != user_id + assert user["id"] == user_id + + # Create a service with restricted access + restricted_service_key = "simcore/services/comp/restricted-service" + service_version = "1.0.0" + + fake_restricted_service = create_fake_service_data( + restricted_service_key, + service_version, + team_access=None, + everyone_access=None, + product=product_name, + ) + + # Modify access rights to restrict access + # Remove user's access if present + if ( + "access_rights" in fake_restricted_service + and user["primary_gid"] in fake_restricted_service["access_rights"] + ): + fake_restricted_service["access_rights"].pop(user["primary_gid"]) + + await services_db_tables_injector([fake_restricted_service]) + + # Attempt to access without permission + with pytest.raises(CatalogForbiddenError): + await catalog_rpc.get_service_ports( + rpc_client, + product_name=product_name, + user_id=other_user["id"], # Use a different user ID + service_key=restricted_service_key, + service_version=service_version, + ) + + +async def test_rpc_get_service_ports_validation_error( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, +): + """Tests validation error handling for list_all_services_summaries_paginated.""" + assert app + + # Test with invalid service key format + with pytest.raises(ValidationError, match="service_key"): + await catalog_rpc.get_service_ports( + rpc_client, + product_name=product_name, + user_id=user_id, + service_key="invalid-service-key-format", + service_version="1.0.0", + ) + + +async def test_rpc_list_all_services_summaries_paginated_with_no_services_returns_empty_page( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + user_id: UserID, + app: FastAPI, +): + """Tests that requesting summaries for non-existing services returns an empty page.""" + assert app + + page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, product_name="not_existing_returns_no_services", user_id=user_id + ) + assert page.data == [] + assert page.links.next is None + assert page.links.prev is None + assert page.meta.count == 0 + assert page.meta.total == 0 + + +async def test_rpc_list_all_services_summaries_paginated_with_filters( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, +): + """Tests that service summaries can be filtered by service type.""" + assert app + + # Get all computational services introduced by the background_sync_task_mocked + page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters={"service_type": "computational"}, + ) + # Fixed: Count might be capped by page limit + assert page.meta.count <= page.meta.total + assert page.meta.total > 0 + + # All items should be service summaries with the expected minimal fields + for item in page.data: + assert "key" in item.model_dump() + assert "name" in item.model_dump() + assert "version" in item.model_dump() + assert "description" in item.model_dump() + + # Filter for a service type that doesn't exist + page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + filters=ServiceListFilters(service_type=ServiceType.DYNAMIC), + ) + assert page.meta.total == 0 + + +async def test_rpc_list_all_services_summaries_paginated_with_pagination( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + num_services: int, + num_versions_per_service: int, +): + """Tests pagination of service summaries.""" + assert app + + total_services = num_services * num_versions_per_service + + # Get first page with default page size + first_page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + ) + + # Verify total count is correct + assert first_page.meta.total == total_services + + # Maximum items per page is constrained by DEFAULT_NUMBER_OF_ITEMS_PER_PAGE + assert len(first_page.data) <= DEFAULT_NUMBER_OF_ITEMS_PER_PAGE + + # Test with small page size + page_size = 5 + first_small_page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + limit=page_size, + offset=0, + ) + assert len(first_small_page.data) == page_size + assert first_small_page.meta.total == total_services + assert first_small_page.links.next is not None + assert first_small_page.links.prev is None + + # Get next page and verify different content + next_page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + limit=page_size, + offset=page_size, + ) + assert len(next_page.data) == page_size + assert next_page.meta.total == first_small_page.meta.total + + # Check that first and second page contain different items + first_page_keys = {(item.key, item.version) for item in first_small_page.data} + next_page_keys = {(item.key, item.version) for item in next_page.data} + assert not first_page_keys.intersection(next_page_keys) + + +async def test_rpc_compare_latest_vs_all_services_summaries( + background_sync_task_mocked: None, + mocked_director_rest_api: MockRouter, + rpc_client: RabbitMQRPCClient, + product_name: ProductName, + user_id: UserID, + app: FastAPI, + num_services: int, + num_versions_per_service: int, +): + """Compares results of list_services_paginated vs list_all_services_summaries_paginated.""" + assert app + + total_expected_services = num_services * num_versions_per_service + + # Get all latest services (should fit in one page) + latest_page = await catalog_rpc.list_services_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + ) + assert latest_page.meta.total == num_services + + # For all services (all versions), we might need multiple requests + # First page to get metadata + first_all_page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + ) + assert first_all_page.meta.total == total_expected_services + + # Collect all items across multiple pages if needed + all_items = list(first_all_page.data) + offset = len(all_items) + + # Continue fetching pages until we have all items + while offset < total_expected_services: + next_page = await catalog_rpc.list_all_services_summaries_paginated( + rpc_client, + product_name=product_name, + user_id=user_id, + offset=offset, + ) + all_items.extend(next_page.data) + offset += len(next_page.data) + if not next_page.links.next: + break + + # Verify we got all items + assert len(all_items) == total_expected_services + + # Collect unique keys from both responses + latest_keys = {item.key for item in latest_page.data} + all_keys = {item.key for item in all_items} + + # All service keys in latest should be in all services + assert latest_keys.issubset(all_keys) + + # For each key in latest, there should be exactly num_versions_per_service entries in all + for key in latest_keys: + versions_in_all = [item.version for item in all_items if item.key == key] + assert len(versions_in_all) == num_versions_per_service + + # Get the latest version from latest_page + latest_version = next( + item.version for item in latest_page.data if item.key == key + ) + + # Verify this version exists in versions_in_all + assert latest_version in versions_in_all + + # Verify all items are ServiceSummary objects with just the essential fields + for item in all_items: + item_dict = item.model_dump() + assert "key" in item_dict + assert "version" in item_dict + assert "name" in item_dict + assert "description" in item_dict + assert "thumbnail" not in item_dict + assert "service_type" not in item_dict + assert "inputs" not in item_dict + assert "outputs" not in item_dict + assert "access_rights" not in item_dict diff --git a/services/catalog/tests/unit/with_dbs/test_core_background_task__sync.py b/services/catalog/tests/unit/with_dbs/test_core_background_task__sync.py new file mode 100644 index 00000000000..8e61016ad06 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_core_background_task__sync.py @@ -0,0 +1,98 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=not-an-iterable +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Any + +import pytest +import simcore_service_catalog.service.access_rights +from fastapi import FastAPI, HTTPException, status +from pytest_mock import MockerFixture +from respx.router import MockRouter +from simcore_postgres_database.models.services import services_meta_data +from simcore_service_catalog.core.background_tasks import _run_sync_services +from simcore_service_catalog.repository.services import ServicesRepository +from sqlalchemy.ext.asyncio.engine import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def services_repo(app: FastAPI) -> ServicesRepository: + # depends on client so the app has a state ready + assert len(app.state._state) > 0 # noqa: SLF001 + return ServicesRepository(app.state.engine) + + +@pytest.fixture +async def cleanup_service_meta_data_db_content(sqlalchemy_async_engine: AsyncEngine): + # NOTE: necessary because _run_sync_services fills tables + yield + + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute(services_meta_data.delete()) + + +@pytest.mark.parametrize("director_fails", [False, True]) +async def test_registry_sync_task( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + expected_director_rest_api_list_services: list[dict[str, Any]], + user: dict[str, Any], + app: FastAPI, + services_repo: ServicesRepository, + cleanup_service_meta_data_db_content: None, + mocker: MockerFixture, + director_fails: bool, +): + assert app.state + + if director_fails: + # Emulates issue https://github.com/ITISFoundation/osparc-simcore/issues/6318 + mocker.patch.object( + simcore_service_catalog.service.access_rights, + "_is_old_service", + side_effect=HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="fake director error" + ), + ) + + service_key = expected_director_rest_api_list_services[0]["key"] + service_version = expected_director_rest_api_list_services[0]["version"] + + # in registry but NOT in db + got_from_db = await services_repo.get_service_with_history( + product_name="osparc", + user_id=user["id"], + key=service_key, + version=service_version, + ) + assert not got_from_db + + # let's sync + await _run_sync_services(app) + + # after sync, it should be in db as well + got_from_db = await services_repo.get_service_with_history( + product_name="osparc", + user_id=user["id"], + key=service_key, + version=service_version, + ) + + if director_fails: + assert not got_from_db + else: + assert got_from_db + assert got_from_db.key == service_key + assert got_from_db.version == service_version diff --git a/services/catalog/tests/unit/with_dbs/test_db_repositories.py b/services/catalog/tests/unit/with_dbs/test_db_repositories.py deleted file mode 100644 index ed2ecc3d260..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_db_repositories.py +++ /dev/null @@ -1,265 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from dataclasses import dataclass, field -from typing import Callable - -import pytest -from models_library.services_db import ServiceAccessRightsAtDB, ServiceMetaDataAtDB -from packaging import version -from simcore_service_catalog.db.repositories.services import ServicesRepository -from simcore_service_catalog.utils.versioning import is_patch_release -from sqlalchemy.ext.asyncio import AsyncEngine - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -def services_repo(sqlalchemy_async_engine: AsyncEngine): - repo = ServicesRepository(sqlalchemy_async_engine) - return repo - - -@dataclass -class FakeCatalogInfo: - jupyter_service_key: str = "simcore/services/dynamic/jupyterlab" - expected_services_count: int = 5 - expected_latest: str = "1.1.3" - expected_1_1_x: list[str] = field(default_factory=list) - expected_0_x_x: list[str] = field(default_factory=list) - - -@pytest.fixture() -async def fake_catalog_with_jupyterlab( - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -) -> FakeCatalogInfo: - target_product = products_names[-1] - - # injects fake data in db - await services_db_tables_injector( - [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "0.0.1", - team_access=None, - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "0.0.7", - team_access=None, - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "0.10.0", - team_access="x", - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.1.0", - team_access="xw", - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.1.3", - team_access=None, - everyone_access=None, - product=target_product, - ), - ] - ) - - info = FakeCatalogInfo( - expected_services_count=5, - expected_latest="1.1.3", - expected_1_1_x=["1.1.3", "1.1.0"], - expected_0_x_x=["0.10.0", "0.0.7", "0.0.1"], - ) - return info - - -async def test_create_services( - services_repo: ServicesRepository, service_catalog_faker: Callable -): - # creates fake data - fake_service, *fake_access_rights = service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.0.0", - team_access=None, - everyone_access=None, - ) - - # validation - service = ServiceMetaDataAtDB.parse_obj(fake_service) - service_access_rights = [ - ServiceAccessRightsAtDB.parse_obj(a) for a in fake_access_rights - ] - - new_service = await services_repo.create_service(service, service_access_rights) - - assert new_service.dict(include=set(fake_service.keys())) == service.dict() - - -async def test_read_services( - services_repo: ServicesRepository, - user_groups_ids: list[int], - products_names: list[str], - service_catalog_faker: Callable, - services_db_tables_injector: Callable, -): - target_product = products_names[-1] - - # injects fake data in db - await services_db_tables_injector( - [ - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.0.0", - team_access=None, - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - "simcore/services/dynamic/jupyterlab", - "1.0.2", - team_access="x", - everyone_access=None, - product=target_product, - ), - ] - ) - - # list - services = await services_repo.list_services() - assert len(services) == 2 - - everyone_gid, user_gid, team_gid = user_groups_ids - assert everyone_gid == 1 - - services = await services_repo.list_services( - gids=[ - user_gid, - ] - ) - assert len(services) == 2 - - services = await services_repo.list_services( - gids=[ - team_gid, - ] - ) - assert len(services) == 1 - - # get 1.0.0 - service = await services_repo.get_service( - "simcore/services/dynamic/jupyterlab", "1.0.0" - ) - assert service - - access_rights = await services_repo.get_service_access_rights( - product_name=target_product, **service.dict(include={"key", "version"}) - ) - assert { - user_gid, - } == {a.gid for a in access_rights} - - # get patched version - service = await services_repo.get_service( - "simcore/services/dynamic/jupyterlab", "1.0.2" - ) - assert service - - access_rights = await services_repo.get_service_access_rights( - product_name=target_product, **service.dict(include={"key", "version"}) - ) - assert {user_gid, team_gid} == {a.gid for a in access_rights} - - -async def test_list_service_releases( - fake_catalog_with_jupyterlab: FakeCatalogInfo, - services_repo: ServicesRepository, -): - services: list[ServiceMetaDataAtDB] = await services_repo.list_service_releases( - "simcore/services/dynamic/jupyterlab" - ) - assert len(services) == fake_catalog_with_jupyterlab.expected_services_count - - vs = [version.Version(s.version) for s in services] - assert sorted(vs, reverse=True) == vs - - # list all patches w.r.t latest - patches = [v for v in vs if is_patch_release("1.1.4", v)] - assert len(patches) == 2 - - # check limit - releases = await services_repo.list_service_releases( - "simcore/services/dynamic/jupyterlab", limit_count=2 - ) - - assert len(releases) == 2 - last_release, previous_release = releases - - assert is_patch_release(last_release.version, previous_release.version) - - await services_repo.update_latest_versions_cache() - assert last_release == await services_repo.get_latest_release( - "simcore/services/dynamic/jupyterlab" - ) - - -async def test_list_service_releases_version_filtered( - fake_catalog_with_jupyterlab: FakeCatalogInfo, - services_repo: ServicesRepository, -): - await services_repo.update_latest_versions_cache() - latest = await services_repo.get_latest_release( - "simcore/services/dynamic/jupyterlab" - ) - assert latest - assert latest.version == fake_catalog_with_jupyterlab.expected_latest - - releases_1_1_x: list[ - ServiceMetaDataAtDB - ] = await services_repo.list_service_releases( - "simcore/services/dynamic/jupyterlab", major=1, minor=1 - ) - assert [ - s.version for s in releases_1_1_x - ] == fake_catalog_with_jupyterlab.expected_1_1_x - - expected_0_x_x: list[ - ServiceMetaDataAtDB - ] = await services_repo.list_service_releases( - "simcore/services/dynamic/jupyterlab", major=0 - ) - assert [ - s.version for s in expected_0_x_x - ] == fake_catalog_with_jupyterlab.expected_0_x_x - - -async def test_update_latest_versions_cache( - services_repo: ServicesRepository, fake_catalog_with_jupyterlab: FakeCatalogInfo -): - await services_repo.update_latest_versions_cache() - - latest = await services_repo.get_latest_release( - "simcore/services/dynamic/jupyterlab" - ) - - assert latest - assert latest.version == fake_catalog_with_jupyterlab.expected_latest diff --git a/services/catalog/tests/unit/with_dbs/test_repositories.py b/services/catalog/tests/unit/with_dbs/test_repositories.py new file mode 100644 index 00000000000..ec8fca12825 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_repositories.py @@ -0,0 +1,1162 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import logging +import random +from collections import Counter +from collections.abc import Callable +from contextlib import AsyncExitStack +from dataclasses import dataclass, field +from typing import Any + +import pytest +from models_library.products import ProductName +from models_library.services_enums import ServiceType # Import ServiceType enum +from models_library.services_regex import ( + COMPUTATIONAL_SERVICE_KEY_PREFIX, + DYNAMIC_SERVICE_KEY_PREFIX, + SERVICE_TYPE_TO_PREFIX_MAP, +) +from models_library.users import UserID +from packaging import version +from pydantic import EmailStr, HttpUrl, TypeAdapter +from pytest_simcore.helpers.catalog_services import CreateFakeServiceDataCallable +from pytest_simcore.helpers.faker_factories import random_project +from pytest_simcore.helpers.postgres_tools import insert_and_get_row_lifespan +from simcore_postgres_database.models.projects import ProjectType, projects +from simcore_service_catalog.models.services_db import ( + ServiceAccessRightsDB, + ServiceDBFilters, + ServiceMetaDataDBCreate, + ServiceMetaDataDBGet, + ServiceMetaDataDBPatch, +) +from simcore_service_catalog.repository.projects import ProjectsRepository +from simcore_service_catalog.repository.services import ServicesRepository +from simcore_service_catalog.utils.versioning import is_patch_release +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def services_repo(sqlalchemy_async_engine: AsyncEngine) -> ServicesRepository: + return ServicesRepository(sqlalchemy_async_engine) + + +@pytest.fixture +def projects_repo(sqlalchemy_async_engine: AsyncEngine) -> ProjectsRepository: + return ProjectsRepository(sqlalchemy_async_engine) + + +@dataclass +class FakeCatalogInfo: + jupyter_service_key: str = "simcore/services/dynamic/jupyterlab" + expected_services_count: int = 5 + expected_latest: str = "1.1.3" + expected_1_1_x: list[str] = field(default_factory=list) + expected_0_x_x: list[str] = field(default_factory=list) + + +@pytest.fixture +async def fake_catalog_with_jupyterlab( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, +) -> FakeCatalogInfo: + + # injects fake data in db + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "0.0.1", + team_access=None, + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "0.0.7", + team_access=None, + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "0.10.0", + team_access="x", + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.1.0", + team_access="xw", + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.1.3", + team_access=None, + everyone_access=None, + product=target_product, + ), + ] + ) + + return FakeCatalogInfo( + expected_services_count=5, + expected_latest="1.1.3", + expected_1_1_x=["1.1.3", "1.1.0"], + expected_0_x_x=["0.10.0", "0.0.7", "0.0.1"], + ) + + +async def test_create_services( + services_repo: ServicesRepository, + create_fake_service_data: CreateFakeServiceDataCallable, +): + # creates fake data + fake_service, *fake_access_rights = create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.0", + team_access="x", + everyone_access="x", + ) + + # validation + service_db_create = ServiceMetaDataDBCreate.model_validate(fake_service) + service_access_rights = [ + ServiceAccessRightsDB.model_validate(a) for a in fake_access_rights + ] + + new_service = await services_repo.create_or_update_service( + service_db_create, service_access_rights + ) + + assert new_service.model_dump( + include=service_db_create.model_fields_set + ) == service_db_create.model_dump(exclude_unset=True) + + +@pytest.mark.parametrize( + "url_object", + [ + "https://github.com/some/path/to/image.png?raw=true", + TypeAdapter(HttpUrl).validate_python( + "https://github.com/some/path/to/image.png?raw=true" + ), + "", + None, + ], +) +async def test_regression_service_meta_data_db_create( + create_fake_service_data: CreateFakeServiceDataCallable, + url_object: str | HttpUrl | None, +): + fake_service, *_ = create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.0", + team_access="x", + everyone_access="x", + ) + + fake_service["icon"] = url_object + assert ServiceMetaDataDBCreate.model_validate(fake_service) + + +async def test_read_services( + services_repo: ServicesRepository, + user_groups_ids: list[int], + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, +): + + # injects fake data in db + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + "simcore/services/dynamic/jupyterlab", + "1.0.2", + team_access="x", + everyone_access=None, + product=target_product, + ), + ] + ) + + # list + services = await services_repo.list_services() + assert len(services) == 2 + + everyone_gid, user_gid, team_gid = user_groups_ids + assert everyone_gid == 1 + + services = await services_repo.list_services( + gids=[ + user_gid, + ] + ) + assert len(services) == 2 + + services = await services_repo.list_services( + gids=[ + team_gid, + ] + ) + assert len(services) == 1 + + # get 1.0.0 + service = await services_repo.get_service( + "simcore/services/dynamic/jupyterlab", "1.0.0" + ) + assert service + + access_rights = await services_repo.get_service_access_rights( + product_name=target_product, **service.model_dump(include={"key", "version"}) + ) + assert { + user_gid, + } == {a.gid for a in access_rights} + + # get patched version + service = await services_repo.get_service( + "simcore/services/dynamic/jupyterlab", "1.0.2" + ) + assert service + + access_rights = await services_repo.get_service_access_rights( + product_name=target_product, **service.model_dump(include={"key", "version"}) + ) + assert {user_gid, team_gid} == {a.gid for a in access_rights} + + +async def test_list_service_releases( + fake_catalog_with_jupyterlab: FakeCatalogInfo, + services_repo: ServicesRepository, +): + services: list[ServiceMetaDataDBGet] = await services_repo.list_service_releases( + "simcore/services/dynamic/jupyterlab" + ) + assert len(services) == fake_catalog_with_jupyterlab.expected_services_count + + vs = [version.Version(s.version) for s in services] + assert sorted(vs, reverse=True) == vs + + # list all patches w.r.t latest + patches = [v for v in vs if is_patch_release("1.1.4", v)] + assert len(patches) == 2 + + # check limit + releases = await services_repo.list_service_releases( + "simcore/services/dynamic/jupyterlab", limit_count=2 + ) + + assert len(releases) == 2 + last_release, previous_release = releases + + assert is_patch_release(last_release.version, previous_release.version) + + assert last_release == await services_repo.get_latest_release( + "simcore/services/dynamic/jupyterlab" + ) + + +async def test_list_service_releases_version_filtered( + fake_catalog_with_jupyterlab: FakeCatalogInfo, + services_repo: ServicesRepository, +): + latest = await services_repo.get_latest_release( + "simcore/services/dynamic/jupyterlab" + ) + assert latest + assert latest.version == fake_catalog_with_jupyterlab.expected_latest + + releases_1_1_x: list[ServiceMetaDataDBGet] = ( + await services_repo.list_service_releases( + "simcore/services/dynamic/jupyterlab", major=1, minor=1 + ) + ) + assert [ + s.version for s in releases_1_1_x + ] == fake_catalog_with_jupyterlab.expected_1_1_x + + expected_0_x_x: list[ServiceMetaDataDBGet] = ( + await services_repo.list_service_releases( + "simcore/services/dynamic/jupyterlab", major=0 + ) + ) + assert [ + s.version for s in expected_0_x_x + ] == fake_catalog_with_jupyterlab.expected_0_x_x + + +async def test_get_latest_release( + services_repo: ServicesRepository, fake_catalog_with_jupyterlab: FakeCatalogInfo +): + + latest = await services_repo.get_latest_release( + "simcore/services/dynamic/jupyterlab" + ) + + assert latest + assert latest.version == fake_catalog_with_jupyterlab.expected_latest + + +async def test_list_latest_services( + target_product: ProductName, + user_id: UserID, + services_repo: ServicesRepository, + fake_catalog_with_jupyterlab: FakeCatalogInfo, +): + + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + assert len(services_items) == 1 + assert total_count == 1 + + # latest + assert services_items[0].key == "simcore/services/dynamic/jupyterlab" + assert services_items[0].version == fake_catalog_with_jupyterlab.expected_latest + + assert ( + len(services_items[0].history) == 0 + ), "list_latest_service does NOT show history" + + +async def test_list_latest_services_with_no_services( + target_product: ProductName, + services_repo: ServicesRepository, + user_id: UserID, +): + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + assert len(services_items) == 0 + assert total_count == 0 + + +async def test_list_latest_services_with_pagination( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + # inject services + num_services = 5 + num_versions_per_service = 20 + await services_db_tables_injector( + [ + create_fake_service_data( + f"simcore/services/dynamic/some-service-{n}", + f"{v}.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + for n in range(num_services) + for v in range(num_versions_per_service) + ] + ) + expected_latest_version = f"{num_versions_per_service-1}.0.0" + + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + assert len(services_items) == num_services + assert total_count == num_services + + for service in services_items: + assert len(service.history) == 0, "Do not show history in listing" + assert service.version == expected_latest_version + + _, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, pagination_limit=2 + ) + assert len(services_items) == 2 + + for service in services_items: + assert len(service.history) == 0, "Do not show history in listing" + + assert TypeAdapter(EmailStr).validate_python( + service.owner_email + ), "resolved own'es email" + + duplicates = [ + service_key + for service_key, count in Counter( + service.key for service in services_items + ).items() + if count > 1 + ] + assert ( + not duplicates + ), f"list of latest versions of services cannot have duplicates, found: {duplicates}" + + +async def test_list_latest_services_with_filters( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + # Setup: Inject services with different service types + await services_db_tables_injector( + [ + create_fake_service_data( + f"{DYNAMIC_SERVICE_KEY_PREFIX}/service-name-a-{i}", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + for i in range(3) + ] + + [ + create_fake_service_data( + f"{COMPUTATIONAL_SERVICE_KEY_PREFIX}/service-name-b-{i}", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + for i in range(2) + ] + ) + + # Test: Apply filter for ServiceType.DYNAMIC + filters = ServiceDBFilters(service_type=ServiceType.DYNAMIC) + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 3 + assert len(services_items) == 3 + assert all( + service.key.startswith(DYNAMIC_SERVICE_KEY_PREFIX) for service in services_items + ) + + # Test: Apply filter for ServiceType.COMPUTATIONAL + filters = ServiceDBFilters(service_type=ServiceType.COMPUTATIONAL) + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 2 + assert len(services_items) == 2 + assert all( + service.key.startswith(COMPUTATIONAL_SERVICE_KEY_PREFIX) + for service in services_items + ) + + +async def test_list_latest_services_with_pattern_filters( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + # Setup: Inject services with different patterns + await services_db_tables_injector( + [ + create_fake_service_data( + "simcore/services/dynamic/jupyter-lab", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + version_display="2023 Release", + ), + create_fake_service_data( + "simcore/services/dynamic/jupyter-r", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + version_display="2024 Beta", + ), + create_fake_service_data( + "simcore/services/dynamic/jupyter-python", + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ), + ] + ) + + # Test: Filter by service key pattern + filters = ServiceDBFilters(service_key_pattern="*/jupyter-*") + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 3 + assert len(services_items) == 3 + assert all( + service.key.endswith("jupyter-lab") + or service.key.endswith("jupyter-r") + or service.key.endswith("jupyter-python") + for service in services_items + ) + + # Test: More specific pattern + filters = ServiceDBFilters(service_key_pattern="*/jupyter-l*") + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 1 + assert len(services_items) == 1 + assert services_items[0].key.endswith("jupyter-lab") + + # Test: Filter by version display pattern + filters = ServiceDBFilters(version_display_pattern="*2023*") + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 1 + assert len(services_items) == 1 + assert services_items[0].version_display == "2023 Release" + + # Test: Filter by version display pattern with NULL handling + filters = ServiceDBFilters(version_display_pattern="*") + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 3 # Should match all, including NULL version_display + assert len(services_items) == 3 + + # Test: Combined filters + filters = ServiceDBFilters( + service_key_pattern="*/jupyter-*", version_display_pattern="*2024*" + ) + total_count, services_items = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + assert total_count == 1 + assert len(services_items) == 1 + assert services_items[0].version_display == "2024 Beta" + assert services_items[0].key.endswith("jupyter-r") + + +async def test_get_and_update_service_meta_data( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + + # inject service + service_key = "simcore/services/dynamic/some-service" + service_version = "1.2.3" + await services_db_tables_injector( + [ + create_fake_service_data( + service_key, + service_version, + team_access=None, + everyone_access=None, + product=target_product, + ) + ] + ) + + got = await services_repo.get_service(service_key, service_version) + assert got is not None + assert got.key == service_key + assert got.version == service_version + + await services_repo.update_service( + service_key, + service_version, + ServiceMetaDataDBPatch(name="foo"), + ) + updated = await services_repo.get_service(service_key, service_version) + assert updated + + expected = got.model_copy(update={"name": "foo", "modified": updated.modified}) + assert updated == expected + + +async def test_can_get_service( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + + # inject service + service_key = "simcore/services/dynamic/some-service" + service_version = "1.2.3" + await services_db_tables_injector( + [ + create_fake_service_data( + service_key, + service_version, + team_access=None, + everyone_access=None, + product=target_product, + ) + ] + ) + + # have access + assert await services_repo.can_get_service( + product_name=target_product, + user_id=user_id, + key=service_key, + version=service_version, + ) + + # not found + assert not await services_repo.can_get_service( + product_name=target_product, + user_id=user_id, + key=service_key, + version="0.1.0", + ) + + # has no access + assert not await services_repo.can_get_service( + product_name=target_product, + user_id=5, # OTHER user + key=service_key, + version=service_version, + ) + + +def _create_fake_release_versions(num_versions: int) -> set[str]: + release_versions = set() + while len(release_versions) < num_versions: + release_versions.add( + f"{random.randint(0, 2)}.{random.randint(0, 9)}.{random.randint(0, 9)}" # noqa: S311 + ) + return release_versions + + +async def test_get_service_history_page( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + # inject services with multiple versions + service_key = "simcore/services/dynamic/test-some-service" + num_versions = 10 + + release_versions = _create_fake_release_versions(num_versions) + await services_db_tables_injector( + [ + create_fake_service_data( + service_key, + service_version, + team_access=None, + everyone_access=None, + product=target_product, + ) + for service_version in release_versions + ] + ) + # sorted AFTER injecting + release_versions = sorted(release_versions, key=version.Version, reverse=True) + + assert version.Version(release_versions[0]) > version.Version(release_versions[-1]) + + # fetch full history using get_service_history_page + total_count, history = await services_repo.get_service_history_page( + product_name=target_product, + user_id=user_id, + key=service_key, + ) + assert total_count == num_versions + assert len(history) == num_versions + assert [release.version for release in history] == release_versions + + # fetch full history using deprecated get_service_history + deprecated_history = await services_repo.get_service_history( + product_name=target_product, + user_id=user_id, + key=service_key, + ) + assert len(deprecated_history) == len(history) + assert [release.version for release in deprecated_history] == [ + release.version for release in history + ] + + # fetch paginated history + limit = 3 + offset = 2 + total_count, paginated_history = await services_repo.get_service_history_page( + product_name=target_product, + user_id=user_id, + key=service_key, + pagination_limit=limit, + pagination_offset=offset, + ) + assert total_count == num_versions + assert len(paginated_history) == limit + assert [release.version for release in paginated_history] == release_versions[ + offset : offset + limit + ] + + # compare paginated results with the corresponding slice of the full history + assert paginated_history == history[offset : offset + limit] + + +@pytest.mark.parametrize( + "expected_service_type,service_prefix", SERVICE_TYPE_TO_PREFIX_MAP.items() +) +async def test_get_service_history_page_with_filters( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, + expected_service_type: ServiceType, + service_prefix: str, +): + # Setup: Inject services with multiple versions and types + service_key = f"{service_prefix}/test-service" + num_versions = 10 + + release_versions = _create_fake_release_versions(num_versions) + + await services_db_tables_injector( + [ + create_fake_service_data( + service_key, + service_version, + team_access=None, + everyone_access=None, + product=target_product, + ) + for _, service_version in enumerate(release_versions) + ] + ) + # Sort versions after injecting + release_versions = sorted(release_versions, key=version.Version, reverse=True) + + # Test: Fetch full history with no filters + total_count, history = await services_repo.get_service_history_page( + product_name=target_product, + user_id=user_id, + key=service_key, + ) + assert total_count == num_versions + assert len(history) == num_versions + assert [release.version for release in history] == release_versions + + # Test: Apply filter for + filters = ServiceDBFilters(service_type=expected_service_type) + total_count, filtered_history = await services_repo.get_service_history_page( + product_name=target_product, + user_id=user_id, + key=service_key, + filters=filters, + ) + assert total_count == num_versions + assert len(filtered_history) == num_versions + assert [release.version for release in filtered_history] == release_versions + + # Final check: filter by a different service type expecting no results + different_service_type = ( + ServiceType.COMPUTATIONAL + if expected_service_type != ServiceType.COMPUTATIONAL + else ServiceType.DYNAMIC + ) + filters = ServiceDBFilters(service_type=different_service_type) + total_count, no_history = await services_repo.get_service_history_page( + product_name=target_product, + user_id=user_id, + key=service_key, + filters=filters, + ) + assert total_count == 0 + assert no_history == [] + + +async def test_list_services_from_published_templates( + user: dict[str, Any], + projects_repo: ProjectsRepository, + sqlalchemy_async_engine: AsyncEngine, +): + # Setup: Use AsyncExitStack to manage multiple insert_and_get_row_lifespan + async with AsyncExitStack() as stack: + await stack.enter_async_context( + insert_and_get_row_lifespan( + sqlalchemy_async_engine, + table=projects, + values=random_project( + uuid="template-1", + type=ProjectType.TEMPLATE, + published=True, + prj_owner=user["id"], + workbench={ + "node-1": { + "key": "simcore/services/dynamic/jupyterlab", + "version": "1.0.0", + }, + "node-2": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + }, + }, + ), + pk_col=projects.c.uuid, + pk_value="template-1", + ) + ) + await stack.enter_async_context( + insert_and_get_row_lifespan( + sqlalchemy_async_engine, + table=projects, + values=random_project( + uuid="template-2", + type=ProjectType.TEMPLATE, + published=False, + prj_owner=user["id"], + workbench={ + "node-1": { + "key": "simcore/services/dynamic/some-service", + "version": "2.0.0", + }, + }, + ), + pk_col=projects.c.uuid, + pk_value="template-2", + ) + ) + + # Act: Call the method + services = await projects_repo.list_services_from_published_templates() + + # Assert: Validate the results + assert len(services) == 1 + assert services[0].key == "simcore/services/dynamic/jupyterlab" + assert services[0].version == "1.0.0" + + +async def test_list_services_from_published_templates_with_invalid_service( + user: dict[str, Any], + projects_repo: ProjectsRepository, + sqlalchemy_async_engine: AsyncEngine, + caplog, +): + # Setup: Use AsyncExitStack to manage insert_and_get_row_lifespan + async with AsyncExitStack() as stack: + await stack.enter_async_context( + insert_and_get_row_lifespan( + sqlalchemy_async_engine, + table=projects, + values=random_project( + uuid="template-1", + type=ProjectType.TEMPLATE, + published=True, + prj_owner=user["id"], + workbench={ + "node-1": { + "key": "simcore/services/frontend/file-picker", + "version": "1.0.0", + }, + "node-2": { + "key": "simcore/services/dynamic/invalid-service", + "version": "invalid", + }, + }, + ), + pk_col=projects.c.uuid, + pk_value="template-1", + ) + ) + + # Act: Call the method and capture logs + with caplog.at_level(logging.WARNING): + services = await projects_repo.list_services_from_published_templates() + + # Assert: Validate the results + assert len(services) == 0 # No valid services should be returned + assert ( + "service {'key': 'simcore/services/dynamic/invalid-service', 'version': 'invalid'} could not be validated" + in caplog.text + ) + + +async def test_compare_list_all_and_latest_services( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + # Setup: Create multiple versions of the same service and a few distinct services + service_data: list[tuple] = [] + + # Service 1 with multiple versions + service_key_1 = "simcore/services/dynamic/multi-version" + service_versions_1 = ["1.0.0", "1.1.0", "2.0.0"] + service_data.extend( + [ + create_fake_service_data( + service_key_1, + version_, + team_access=None, + everyone_access=None, + product=target_product, + ) + for version_ in service_versions_1 + ] + ) + + # Service 2 with single version + service_key_2 = "simcore/services/dynamic/single-version" + service_data.append( + create_fake_service_data( + service_key_2, + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + ) + + # Service 3 with computational type + service_key_3 = "simcore/services/comp/computational-service" + service_versions_3 = ["0.5.0", "1.0.0"] + service_data.extend( + [ + create_fake_service_data( + service_key_3, + version_, + team_access=None, + everyone_access=None, + product=target_product, + ) + for version_ in service_versions_3 + ] + ) + + await services_db_tables_injector(service_data) + + # Test 1: Compare all services vs latest without filters + total_all, all_services = await services_repo.list_all_services( + product_name=target_product, user_id=user_id + ) + total_latest, latest_services = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + + # Verify counts + # All services should be 6 (3 versions of service 1, 1 of service 2, 2 of service 3) + assert total_all == 6 + # Latest services should be 3 (one latest for each distinct service key) + assert total_latest == 3 + + # Verify latest services are contained in all services + latest_key_versions = {(s.key, s.version) for s in latest_services} + all_key_versions = {(s.key, s.version) for s in all_services} + assert latest_key_versions.issubset(all_key_versions) + + # Verify latest versions are correct + latest_versions_by_key = {s.key: s.version for s in latest_services} + assert latest_versions_by_key[service_key_1] == "2.0.0" + assert latest_versions_by_key[service_key_2] == "1.0.0" + assert latest_versions_by_key[service_key_3] == "1.0.0" + + # Test 2: Using service_type filter to get only dynamic services + filters = ServiceDBFilters(service_type=ServiceType.DYNAMIC) + + total_all_filtered, all_services_filtered = await services_repo.list_all_services( + product_name=target_product, user_id=user_id, filters=filters + ) + total_latest_filtered, latest_services_filtered = ( + await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + ) + + # Verify counts with filter + assert total_all_filtered == 4 # 3 versions of service 1, 1 of service 2 + assert total_latest_filtered == 2 # 1 latest each for service 1 and 2 + + # Verify service types are correct after filtering + assert all( + s.key.startswith(DYNAMIC_SERVICE_KEY_PREFIX) for s in all_services_filtered + ) + assert all( + s.key.startswith(DYNAMIC_SERVICE_KEY_PREFIX) for s in latest_services_filtered + ) + + # Verify latest versions are correct + latest_versions_by_key = {s.key: s.version for s in latest_services_filtered} + assert latest_versions_by_key[service_key_1] == "2.0.0" + assert latest_versions_by_key[service_key_2] == "1.0.0" + assert service_key_3 not in latest_versions_by_key # Filtered out + + # Test 3: Using service_key_pattern to find specific service + filters = ServiceDBFilters(service_key_pattern="*/multi-*") + + total_all_filtered, all_services_filtered = await services_repo.list_all_services( + product_name=target_product, user_id=user_id, filters=filters + ) + total_latest_filtered, latest_services_filtered = ( + await services_repo.list_latest_services( + product_name=target_product, user_id=user_id, filters=filters + ) + ) + + # Verify counts with key pattern filter + assert total_all_filtered == 3 # All 3 versions of service 1 + assert total_latest_filtered == 1 # Only latest version of service 1 + + # Verify service key pattern is matched + assert all(s.key == service_key_1 for s in all_services_filtered) + assert all(s.key == service_key_1 for s in latest_services_filtered) + + # Test 4: Pagination + # Get first page (limit=2) + total_all_page1, all_services_page1 = await services_repo.list_all_services( + product_name=target_product, + user_id=user_id, + pagination_limit=2, + pagination_offset=0, + ) + + # Get second page (limit=2, offset=2) + total_all_page2, all_services_page2 = await services_repo.list_all_services( + product_name=target_product, + user_id=user_id, + pagination_limit=2, + pagination_offset=2, + ) + + # Verify pagination + assert total_all_page1 == 6 # Total count should still be total + assert total_all_page2 == 6 + assert len(all_services_page1) == 2 # But only 2 items on first page + assert len(all_services_page2) == 2 # And 2 items on second page + + # Ensure pages have different items + page1_key_versions = {(s.key, s.version) for s in all_services_page1} + page2_key_versions = {(s.key, s.version) for s in all_services_page2} + assert not page1_key_versions.intersection(page2_key_versions) + + +async def test_list_all_services_empty_database( + target_product: ProductName, + services_repo: ServicesRepository, + user_id: UserID, +): + """Test list_all_services and list_latest_services with an empty database.""" + # Test with empty database + total_all, all_services = await services_repo.list_all_services( + product_name=target_product, user_id=user_id + ) + total_latest, latest_services = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + + assert total_all == 0 + assert len(all_services) == 0 + assert total_latest == 0 + assert len(latest_services) == 0 + + +async def test_list_all_services_deprecated_versions( + target_product: ProductName, + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, + services_repo: ServicesRepository, + user_id: UserID, +): + """Test that list_all_services includes deprecated versions while list_latest_services ignores them.""" + from datetime import datetime, timedelta + + # Create a service with regular and deprecated versions + service_key = "simcore/services/dynamic/with-deprecated" + service_data = [] + + # Add regular version + service_data.append( + create_fake_service_data( + service_key, + "1.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + ) + + # Add deprecated version (with higher version number) + deprecated_service = create_fake_service_data( + service_key, + "2.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + # Set deprecated timestamp to yesterday + deprecated_service[0]["deprecated"] = datetime.now() - timedelta(days=1) + service_data.append(deprecated_service) + + # Add newer non-deprecated version + service_data.append( + create_fake_service_data( + service_key, + "3.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + ) + + await services_db_tables_injector(service_data) + + # Get all services - should include both deprecated and non-deprecated + total_all, all_services = await services_repo.list_all_services( + product_name=target_product, user_id=user_id + ) + + # Get latest services - should only show latest non-deprecated + total_latest, latest_services = await services_repo.list_latest_services( + product_name=target_product, user_id=user_id + ) + + # Verify counts + assert total_all == 3 # All 3 versions + + # Verify latest is the newest non-deprecated version + assert len(latest_services) == 1 + assert latest_services[0].key == service_key + assert latest_services[0].version == "3.0.0" + + # Get versions from all services + versions = [s.version for s in all_services if s.key == service_key] + assert sorted(versions) == ["1.0.0", "2.0.0", "3.0.0"] + + # Verify the deprecated status is correctly set + for service in all_services: + if service.key == service_key and service.version == "2.0.0": + assert service.deprecated is not None + else: + assert service.deprecated is None diff --git a/services/catalog/tests/unit/with_dbs/test_service_access_rights.py b/services/catalog/tests/unit/with_dbs/test_service_access_rights.py new file mode 100644 index 00000000000..eeef6bc8bb5 --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_service_access_rights.py @@ -0,0 +1,205 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import Callable + +import simcore_service_catalog.service.access_rights +from fastapi import FastAPI +from models_library.groups import GroupAtDB +from models_library.products import ProductName +from models_library.services import ServiceMetaDataPublished, ServiceVersion +from pydantic import TypeAdapter +from simcore_service_catalog.models.services_db import ServiceAccessRightsDB +from simcore_service_catalog.repository.services import ServicesRepository +from simcore_service_catalog.service.access_rights import ( + evaluate_auto_upgrade_policy, + evaluate_default_policy, + reduce_access_rights, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +def test_reduce_access_rights(): + sample = ServiceAccessRightsDB.model_validate( + { + "key": "simcore/services/dynamic/sim4life", + "version": "1.0.9", + "gid": 8, + "execute_access": True, + "write_access": True, + "product_name": "osparc", + } + ) + + # fixture with overrides and with other products + reduced = reduce_access_rights( + [ + sample.model_copy(deep=True), + sample.model_copy(deep=True), + sample.model_copy(update={"execute_access": False}, deep=True), + sample.model_copy(update={"product_name": "s4l"}, deep=True), + ] + ) + + # two products with the same flags + assert len(reduced) == 2 + assert reduced[0].model_dump(include={"execute_access", "write_access"}) == { + "execute_access": True, + "write_access": True, + } + assert reduced[1].model_dump(include={"execute_access", "write_access"}) == { + "execute_access": True, + "write_access": True, + } + + # two gids with the different falgs + reduced = reduce_access_rights( + [ + sample.model_copy(deep=True), + sample.model_copy( + update={"gid": 1, "execute_access": True, "write_access": False}, + deep=True, + ), + ] + ) + + assert len(reduced) == 2 + assert reduced[0].model_dump(include={"execute_access", "write_access"}) == { + "execute_access": True, + "write_access": True, + } + assert reduced[1].model_dump(include={"execute_access", "write_access"}) == { + "execute_access": True, + "write_access": False, + } + + +async def test_auto_upgrade_policy( + sqlalchemy_async_engine: AsyncEngine, + user_groups_ids: list[int], + target_product: ProductName, + other_product: ProductName, + services_db_tables_injector: Callable, + create_fake_service_data: Callable, + mocker, +): + everyone_gid, user_gid, team_gid = user_groups_ids + + # Avoids calls to director API + mocker.patch.object( + simcore_service_catalog.service.access_rights, + "_is_old_service", + return_value=False, + ) + # Avoids creating a users + user_to_group table + data = GroupAtDB.model_json_schema()["example"] + data["gid"] = everyone_gid + mocker.patch.object( + simcore_service_catalog.service.access_rights.GroupsRepository, + "get_everyone_group", + return_value=GroupAtDB.model_validate(data), + ) + mocker.patch.object( + simcore_service_catalog.service.access_rights.GroupsRepository, + "get_user_gid_from_email", + return_value=user_gid, + ) + + # SETUP --- + MOST_UPDATED_EXAMPLE = -1 + new_service_metadata = ServiceMetaDataPublished.model_validate( + ServiceMetaDataPublished.model_json_schema()["examples"][MOST_UPDATED_EXAMPLE] + ) + new_service_metadata.version = TypeAdapter(ServiceVersion).validate_python("1.0.11") + + # we have three versions of the service in the database for which the sorting matters: (1.0.11 should inherit from 1.0.10 not 1.0.9) + await services_db_tables_injector( + [ + create_fake_service_data( + new_service_metadata.key, + "1.0.1", + team_access=None, + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + new_service_metadata.key, + "1.0.9", + team_access=None, + everyone_access=None, + product=target_product, + ), + # new release is a patch on released 1.0.X + # which were released in two different product + create_fake_service_data( + new_service_metadata.key, + "1.0.10", + team_access="x", + everyone_access=None, + product=target_product, + ), + create_fake_service_data( + new_service_metadata.key, + "1.0.10", + team_access="x", + everyone_access=None, + product=other_product, + ), + ] + ) + # ------------ + + app = FastAPI() + app.state.engine = sqlalchemy_async_engine + app.state.settings = mocker.Mock() + app.state.default_product_name = target_product + + services_repo = ServicesRepository(app.state.engine) + + # DEFAULT policies + owner_gid, service_access_rights = await evaluate_default_policy( + app, new_service_metadata + ) + assert owner_gid == user_gid + assert len(service_access_rights) == 1 + assert {a.gid for a in service_access_rights} == {owner_gid} + assert service_access_rights[0].model_dump() == { + "key": new_service_metadata.key, + "version": new_service_metadata.version, + "gid": user_gid, + "product_name": target_product, + "execute_access": True, + "write_access": True, + } + assert service_access_rights[0].product_name == target_product + + # AUTO-UPGRADE PATCH policy + inherited_access_rights = await evaluate_auto_upgrade_policy( + new_service_metadata, services_repo + ) + + assert len(inherited_access_rights) == 4 + assert {a.gid for a in inherited_access_rights} == {team_gid, owner_gid} + assert {a.product_name for a in inherited_access_rights} == { + target_product, + other_product, + } + + # ALL + service_access_rights += inherited_access_rights + service_access_rights = reduce_access_rights(service_access_rights) + + assert len(service_access_rights) == 4 + assert {a.gid for a in service_access_rights} == {team_gid, owner_gid} + assert {a.product_name for a in service_access_rights} == { + target_product, + other_product, + } diff --git a/services/catalog/tests/unit/with_dbs/test_service_catalog_services.py b/services/catalog/tests/unit/with_dbs/test_service_catalog_services.py new file mode 100644 index 00000000000..5faaf6b384d --- /dev/null +++ b/services/catalog/tests/unit/with_dbs/test_service_catalog_services.py @@ -0,0 +1,353 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from collections.abc import Callable +from datetime import datetime, timedelta +from typing import Any + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_catalog.services import MyServiceGet, ServiceSummary +from models_library.products import ProductName +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.catalog_services import CreateFakeServiceDataCallable +from respx.router import MockRouter +from simcore_service_catalog.api._dependencies.director import get_director_client +from simcore_service_catalog.clients.director import DirectorClient +from simcore_service_catalog.repository.groups import GroupsRepository +from simcore_service_catalog.repository.services import ServicesRepository +from simcore_service_catalog.service import catalog_services, manifest +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def services_repo(sqlalchemy_async_engine: AsyncEngine): + return ServicesRepository(sqlalchemy_async_engine) + + +@pytest.fixture +def groups_repo(sqlalchemy_async_engine: AsyncEngine): + return GroupsRepository(sqlalchemy_async_engine) + + +@pytest.fixture +def num_services() -> int: + return 5 + + +@pytest.fixture +def num_versions_per_service() -> int: + return 20 + + +@pytest.fixture +def fake_services_data( + target_product: ProductName, + create_fake_service_data: Callable, + num_services: int, + num_versions_per_service: int, +) -> list: + return [ + create_fake_service_data( + f"simcore/services/comp/some-service-{n}", + f"{v}.0.0", + team_access=None, + everyone_access=None, + product=target_product, + ) + for n in range(num_services) + for v in range(num_versions_per_service) + ] + + +@pytest.fixture +def expected_director_rest_api_list_services( + expected_director_rest_api_list_services: list[dict[str, Any]], + fake_services_data: list, + create_director_list_services_from: Callable, +) -> list[dict[str, Any]]: + # OVERRIDES: Changes the values returned by the mocked_director_service_api + + return create_director_list_services_from( + expected_director_rest_api_list_services, fake_services_data + ) + + +@pytest.fixture +async def background_sync_task_mocked( + background_task_lifespan_disabled: None, + services_db_tables_injector: Callable, + fake_services_data: list, +) -> None: + """ + Emulates a sync backgroundtask that injects + some services in the db + """ + await services_db_tables_injector(fake_services_data) + + +@pytest.fixture +async def director_client(app: FastAPI) -> DirectorClient: + director_api = get_director_client(app) + + # ensures manifest API cache is reset + assert hasattr(manifest.get_service, "cache") + assert await manifest.get_service.cache.clear() + + return director_api + + +async def test_list_latest_catalog_services( + background_sync_task_mocked: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + target_product: ProductName, + services_repo: ServicesRepository, + user_id: UserID, + director_client: DirectorClient, + num_services: int, +): + + offset = 1 + limit = 2 + assert limit < num_services + + assert not mocked_director_rest_api["get_service"].called + + total_count, page_items = await catalog_services.list_latest_catalog_services( + services_repo, + director_client, + product_name=target_product, + user_id=user_id, + limit=limit, + offset=offset, + ) + + assert total_count == num_services + assert page_items + assert len(page_items) <= limit + assert mocked_director_rest_api["get_service"].called + assert mocked_director_rest_api["get_service"].call_count == limit + + for item in page_items: + assert item.access_rights + assert item.owner is not None + + got = await catalog_services.get_catalog_service( + services_repo, + director_client, + product_name=target_product, + user_id=user_id, + service_key=item.key, + service_version=item.version, + ) + + assert got.model_dump(exclude={"history"}) == item.model_dump( + exclude={"release"} + ) + assert item.release in got.history + + # since it is cached, it should only call it `limit` times + assert mocked_director_rest_api["get_service"].call_count == limit + + +async def test_batch_get_my_services( + background_task_lifespan_disabled: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + target_product: ProductName, + services_repo: ServicesRepository, + groups_repo: GroupsRepository, + user_id: UserID, + user: dict[str, Any], + other_user: dict[str, Any], + create_fake_service_data: CreateFakeServiceDataCallable, + services_db_tables_injector: Callable, +): + # catalog + service_key = "simcore/services/comp/some-service" + service_version_1 = "1.0.0" # can upgrade to 1.0.1 + service_version_2 = "1.0.10" # latest + + other_service_key = "simcore/services/comp/other-service" + other_service_version = "2.1.2" + + expected_retirement = datetime.utcnow() + timedelta( + days=1 + ) # NOTE: old offset-naive column + + # Owned by user + fake_service_1 = create_fake_service_data( + service_key, + service_version_1, + team_access=None, + everyone_access=None, + product=target_product, + deprecated=expected_retirement, + ) + fake_service_2 = create_fake_service_data( + service_key, + service_version_2, + team_access="x", + everyone_access=None, + product=target_product, + ) + + # Owned by other-user + fake_service_3 = create_fake_service_data( + other_service_key, + other_service_version, + team_access=None, + everyone_access=None, + product=target_product, + ) + _service, _owner_access = fake_service_3 + _service["owner"] = other_user["primary_gid"] + _owner_access["gid"] = other_user["primary_gid"] + + # Inject fake services into the database + await services_db_tables_injector([fake_service_1, fake_service_2, fake_service_3]) + + # UNDER TEST ------------------------------- + + # Batch get services e.g. services in a project + services_ids = [ + (service_key, service_version_1), + (other_service_key, other_service_version), + ] + + my_services = await catalog_services.batch_get_user_services( + services_repo, + groups_repo, + product_name=target_product, + user_id=user_id, + ids=services_ids, + ) + + # CHECKS ------------------------------- + + # assert returned order and length as ids + assert services_ids == [(sc.key, sc.release.version) for sc in my_services] + + assert my_services == TypeAdapter(list[MyServiceGet]).validate_python( + [ + { + "key": "simcore/services/comp/some-service", + "release": { + "version": service_version_1, + "version_display": None, + "released": my_services[0].release.released, + "retired": expected_retirement, + "compatibility": { + "can_update_to": {"version": service_version_2} + }, # can be updated + }, + "owner": user["primary_gid"], + "my_access_rights": {"execute": True, "write": True}, # full access + }, + { + "key": "simcore/services/comp/other-service", + "release": { + "version": other_service_version, + "version_display": None, + "released": my_services[1].release.released, + "retired": None, + "compatibility": None, # cannot be updated + }, + "owner": other_user["primary_gid"], # needs to request access + "my_access_rights": { + "execute": False, + "write": False, + }, + }, + ] + ) + + +async def test_list_all_vs_latest_services( + background_sync_task_mocked: None, + rabbitmq_and_rpc_setup_disabled: None, + mocked_director_rest_api: MockRouter, + target_product: ProductName, + services_repo: ServicesRepository, + user_id: UserID, + director_client: DirectorClient, + num_services: int, + num_versions_per_service: int, +): + """Test that list_all_catalog_services returns all services as summaries while + list_latest_catalog_services returns only the latest version of each service with full details. + """ + # No pagination to get all services + limit = None + offset = 0 + + # Get latest services first + latest_total_count, latest_items = ( + await catalog_services.list_latest_catalog_services( + services_repo, + director_client, + product_name=target_product, + user_id=user_id, + limit=limit, + offset=offset, + ) + ) + + # Get all services as summaries + all_total_count, all_items = await catalog_services.list_all_service_summaries( + services_repo, + director_client, + product_name=target_product, + user_id=user_id, + limit=limit, + offset=offset, + ) + + # Verify counts + # - latest_total_count should equal num_services since we only get the latest version of each service + # - all_total_count should equal num_services * num_versions_per_service since we get all versions + assert latest_total_count == num_services + assert all_total_count == num_services * num_versions_per_service + + # Verify we got the expected number of items + assert len(latest_items) == num_services + assert len(all_items) == num_services * num_versions_per_service + + # Collect all service keys from latest items + latest_keys = {item.key for item in latest_items} + + # Verify all returned items have the expected structure + for item in all_items: + # Each summary should have the basic fields + assert item.key in latest_keys + assert item.name + assert item.description is not None + assert isinstance(item, ServiceSummary) + + # Group all items by key + key_to_all_versions = {} + for item in all_items: + if item.key not in key_to_all_versions: + key_to_all_versions[item.key] = [] + key_to_all_versions[item.key].append(item) + + # For each service key, verify we have the expected number of versions + for key, versions in key_to_all_versions.items(): + assert len(versions) == num_versions_per_service + + # Find this service in latest_items + latest_item = next(item for item in latest_items if item.key == key) + # Verify there's a summary item with the same version as the latest + assert any(item.version == latest_item.version for item in versions) diff --git a/services/catalog/tests/unit/with_dbs/test_services_access_rights.py b/services/catalog/tests/unit/with_dbs/test_services_access_rights.py deleted file mode 100644 index f5bc2c06b5a..00000000000 --- a/services/catalog/tests/unit/with_dbs/test_services_access_rights.py +++ /dev/null @@ -1,199 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from typing import Callable - -from fastapi import FastAPI -from models_library.services import ServiceDockerData -from models_library.services_db import ServiceAccessRightsAtDB -from simcore_service_catalog.db.repositories.services import ServicesRepository -from simcore_service_catalog.models.domain.group import GroupAtDB -from simcore_service_catalog.services.access_rights import ( - evaluate_auto_upgrade_policy, - evaluate_default_policy, - reduce_access_rights, -) -from sqlalchemy.ext.asyncio import AsyncEngine - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -def test_reduce_access_rights(): - sample = ServiceAccessRightsAtDB.parse_obj( - { - "key": "simcore/services/dynamic/sim4life", - "version": "1.0.9", - "gid": 8, - "execute_access": True, - "write_access": True, - "product_name": "osparc", - } - ) - - # fixture with overrides and with other products - reduced = reduce_access_rights( - [ - sample.copy(deep=True), - sample.copy(deep=True), - sample.copy(update={"execute_access": False}, deep=True), - sample.copy(update={"product_name": "s4l"}, deep=True), - ] - ) - - # two products with the same flags - assert len(reduced) == 2 - assert reduced[0].dict(include={"execute_access", "write_access"}) == { - "execute_access": True, - "write_access": True, - } - assert reduced[1].dict(include={"execute_access", "write_access"}) == { - "execute_access": True, - "write_access": True, - } - - # two gids with the different falgs - reduced = reduce_access_rights( - [ - sample.copy(deep=True), - sample.copy( - update={"gid": 1, "execute_access": True, "write_access": False}, - deep=True, - ), - ] - ) - - assert len(reduced) == 2 - assert reduced[0].dict(include={"execute_access", "write_access"}) == { - "execute_access": True, - "write_access": True, - } - assert reduced[1].dict(include={"execute_access", "write_access"}) == { - "execute_access": True, - "write_access": False, - } - - -async def test_auto_upgrade_policy( - sqlalchemy_async_engine: AsyncEngine, - user_groups_ids: list[int], - products_names: list[str], - services_db_tables_injector: Callable, - service_catalog_faker: Callable, - mocker, -): - everyone_gid, user_gid, team_gid = user_groups_ids - target_product = products_names[0] - - # Avoids calls to director API - mocker.patch( - "simcore_service_catalog.services.access_rights._is_old_service", - return_value=False, - ) - # Avoids creating a users + user_to_group table - data = GroupAtDB.Config.schema_extra["example"] - data["gid"] = everyone_gid - mocker.patch( - "simcore_service_catalog.services.access_rights.GroupsRepository.get_everyone_group", - return_value=GroupAtDB.parse_obj(data), - ) - mocker.patch( - "simcore_service_catalog.services.access_rights.GroupsRepository.get_user_gid_from_email", - return_value=user_gid, - ) - - # SETUP --- - MOST_UPDATED_EXAMPLE = -1 - new_service_metadata = ServiceDockerData.parse_obj( - ServiceDockerData.Config.schema_extra["examples"][MOST_UPDATED_EXAMPLE] - ) - new_service_metadata.version = "1.0.11" - - # we have three versions of the service in the database for which the sorting matters: (1.0.11 should inherit from 1.0.10 not 1.0.9) - await services_db_tables_injector( - [ - service_catalog_faker( - new_service_metadata.key, - "1.0.1", - team_access=None, - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - new_service_metadata.key, - "1.0.9", - team_access=None, - everyone_access=None, - product=target_product, - ), - # new release is a patch on released 1.0.X - # which were released in two different product - service_catalog_faker( - new_service_metadata.key, - "1.0.10", - team_access="x", - everyone_access=None, - product=target_product, - ), - service_catalog_faker( - new_service_metadata.key, - "1.0.10", - team_access="x", - everyone_access=None, - product=products_names[-1], - ), - ] - ) - # ------------ - - app = FastAPI() - app.state.engine = sqlalchemy_async_engine - app.state.settings = mocker.Mock() - app.state.default_product_name = target_product - - services_repo = ServicesRepository(app.state.engine) - - # DEFAULT policies - owner_gid, service_access_rights = await evaluate_default_policy( - app, new_service_metadata - ) - assert owner_gid == user_gid - assert len(service_access_rights) == 1 - assert {a.gid for a in service_access_rights} == {owner_gid} - assert service_access_rights[0].dict() == { - "key": new_service_metadata.key, - "version": new_service_metadata.version, - "gid": user_gid, - "product_name": target_product, - "execute_access": True, - "write_access": True, - } - assert service_access_rights[0].product_name == target_product - - # AUTO-UPGRADE PATCH policy - inherited_access_rights = await evaluate_auto_upgrade_policy( - new_service_metadata, services_repo - ) - - assert len(inherited_access_rights) == 4 - assert {a.gid for a in inherited_access_rights} == {team_gid, owner_gid} - assert {a.product_name for a in inherited_access_rights} == { - target_product, - products_names[-1], - } - - # ALL - service_access_rights += inherited_access_rights - service_access_rights = reduce_access_rights(service_access_rights) - - assert len(service_access_rights) == 4 - assert {a.gid for a in service_access_rights} == {team_gid, owner_gid} - assert {a.product_name for a in service_access_rights} == { - target_product, - products_names[-1], - } diff --git a/services/clusters-keeper/.env-devel b/services/clusters-keeper/.env-devel new file mode 100644 index 00000000000..1c103bc8dd2 --- /dev/null +++ b/services/clusters-keeper/.env-devel @@ -0,0 +1,21 @@ +CLUSTERS_KEEPER_DEBUG=true +CLUSTERS_KEEPER_LOGLEVEL=INFO +CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION=60 +CLUSTERS_KEEPER_TASK_INTERVAL=30 +EC2_CLUSTERS_KEEPER_ACCESS_KEY_ID=XXXXXXXXXX +PRIMARY_EC2_INSTANCES_ALLOWED_TYPES='{"t2.medium":"ami_id": "XXXXXXXXXX", "custom_boot_scripts": ["whoami"]}}' +PRIMARY_EC2_INSTANCES_KEY_NAME=XXXXXXXXXX +PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS=XXXXXXXXXX +PRIMARY_EC2_INSTANCES_SUBNET_ID=XXXXXXXXXX +EC2_CLUSTERS_KEEPER_SECRET_ACCESS_KEY=XXXXXXXXXX +CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX="testing" +LOG_FORMAT_LOCAL_DEV_ENABLED=True +RABBIT_HOST=rabbit +RABBIT_PASSWORD=test +RABBIT_PORT=5672 +RABBIT_SECURE=false +RABBIT_USER=test +REDIS_HOST=redis +REDIS_PORT=6379 +SC_BOOT_MODE=debug +SC_BUILD_TARGET=development diff --git a/services/clusters-keeper/Dockerfile b/services/clusters-keeper/Dockerfile new file mode 100644 index 00000000000..4df6845c6fd --- /dev/null +++ b/services/clusters-keeper/Dockerfile @@ -0,0 +1,202 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd sercices/clusters-keeper +# docker build -f Dockerfile -t clusters-keeper:prod --target production ../../ +# docker run clusters-keeper:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=sanderegg + +# NOTE: to list the latest version run `make` inside `scripts/apt-packages-versions` +ENV DOCKER_APT_VERSION="5:26.1.4-1~debian.12~bookworm" + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + gosu \ + ca-certificates \ + curl \ + gnupg \ + lsb-release \ + && mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + # only the cli is needed and we remove the unnecessary stuff again + docker-ce-cli=${DOCKER_APT_VERSION} \ + && apt-get remove -y\ + gnupg \ + curl \ + lsb-release \ + && apt-get clean -y\ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/scu/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +EXPOSE 8000 +EXPOSE 3000 + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/clusters-keeper [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/clusters-keeper + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/clusters-keeper,target=/build/services/clusters-keeper,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/clusters-keeper [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu + +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY --chown=scu:scu services/clusters-keeper/docker services/clusters-keeper/docker +RUN chmod +x services/clusters-keeper/docker/*.sh + + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/clusters-keeper/docker/healthcheck.py", "http://localhost:8000/"] + +ENTRYPOINT [ "/bin/sh", "services/clusters-keeper/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/clusters-keeper/docker/boot.sh"] + + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/clusters-keeper + +WORKDIR /devel + +RUN chown -R scu:scu "${VIRTUAL_ENV}" + +ENTRYPOINT ["/bin/sh", "services/clusters-keeper/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/clusters-keeper/docker/boot.sh"] diff --git a/services/clusters-keeper/Makefile b/services/clusters-keeper/Makefile new file mode 100644 index 00000000000..ea96e6102ed --- /dev/null +++ b/services/clusters-keeper/Makefile @@ -0,0 +1,5 @@ +# +# DEVELOPMENT recipes for Clusters-keeper service +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile diff --git a/services/clusters-keeper/README.md b/services/clusters-keeper/README.md new file mode 100644 index 00000000000..d7a73745ba3 --- /dev/null +++ b/services/clusters-keeper/README.md @@ -0,0 +1,39 @@ +# clusters-keeper + + +Service to automatically create computational clusters + + +```mermaid + +sequenceDiagram + box simcore + participant director-v2 + participant clusters-keeper + end + box external-cluster + participant primary + participant worker + end + Note over primary: dask-scheduler
autoscaling
redis + Note over worker: dask-sidecar + director-v2->>+clusters-keeper: get or create on demand cluster + clusters-keeper-->>+primary: create or get primary EC2 for user_id/wallet_id + Note over clusters-keeper,primary: EC2 + clusters-keeper-->>-director-v2: scheduler url + + director-v2->>+primary: send computational job + primary->>worker: autoscaling: create workers if needed + Note over primary,worker: EC2 + worker->worker: execute job + worker-->>director-v2: return job results + primary->>worker: autoscaling: remove unused workers + Note over primary,worker: EC2 + + clusters-keeper-->>primary: terminate unused clusters + Note over clusters-keeper,primary: EC2 + + + + +``` diff --git a/services/clusters-keeper/VERSION b/services/clusters-keeper/VERSION new file mode 100644 index 00000000000..3eefcb9dd5b --- /dev/null +++ b/services/clusters-keeper/VERSION @@ -0,0 +1 @@ +1.0.0 diff --git a/services/clusters-keeper/docker/boot.sh b/services/clusters-keeper/docker/boot.sh new file mode 100755 index 00000000000..384133c7a87 --- /dev/null +++ b/services/clusters-keeper/docker/boot.sh @@ -0,0 +1,66 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/clusters-keeper + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${CLUSTERS_KEEPER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/clusters-keeper/src/simcore_service_clusters_keeper && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${CLUSTERS_KEEPER_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_clusters_keeper.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" +fi diff --git a/services/clusters-keeper/docker/entrypoint.sh b/services/clusters-keeper/docker/entrypoint.sh new file mode 100755 index 00000000000..66fbfba9200 --- /dev/null +++ b/services/clusters-keeper/docker/entrypoint.sh @@ -0,0 +1,94 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +# Read self-signed SSH certificates (if applicable) +# +# In case clusters-keeper must access a docker registry in a secure way using +# non-standard certificates (e.g. such as self-signed certificates), this call is needed. +# It needs to be executed as root. Also required to any access for example to secure rabbitmq. +update-ca-certificates + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi +fi + + +# Appends docker group if socket is mounted +DOCKER_MOUNT=/var/run/docker.sock +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker + + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" +fi + +echo "$INFO Starting $* ..." +echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" +echo " local dir : $(ls -al)" + +exec gosu "$SC_USER_NAME" "$@" diff --git a/services/clusters-keeper/docker/healthcheck.py b/services/clusters-keeper/docker/healthcheck.py new file mode 100755 index 00000000000..cb51ed2399e --- /dev/null +++ b/services/clusters-keeper/docker/healthcheck.py @@ -0,0 +1,42 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=20s \ + --start-interval=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + +# Adds a base-path if defined in environ +SIMCORE_NODE_BASEPATH = os.environ.get("SIMCORE_NODE_BASEPATH", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception): + with urlopen(f"{sys.argv[1]}{SIMCORE_NODE_BASEPATH}") as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/clusters-keeper/requirements/Makefile b/services/clusters-keeper/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/clusters-keeper/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/clusters-keeper/requirements/_base.in b/services/clusters-keeper/requirements/_base.in new file mode 100644 index 00000000000..b89686645e2 --- /dev/null +++ b/services/clusters-keeper/requirements/_base.in @@ -0,0 +1,21 @@ +# +# Specifies third-party dependencies for 'services/clusters-keeper/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt +--constraint ../../../services/dask-sidecar/requirements/_dask-distributed.txt + +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/aws-library/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + + +dask[distributed] +packaging diff --git a/services/clusters-keeper/requirements/_base.txt b/services/clusters-keeper/requirements/_base.txt new file mode 100644 index 00000000000..08f39756613 --- /dev/null +++ b/services/clusters-keeper/requirements/_base.txt @@ -0,0 +1,874 @@ +aio-pika==9.5.5 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aioboto3==14.3.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +aiobotocore==2.22.0 + # via aioboto3 +aiocache==0.12.3 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aioboto3 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiobotocore + # aiodocker +aioitertools==0.12.0 + # via aiobotocore +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.9.0 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==25.3.0 + # via + # aiohttp + # jsonschema + # referencing +boto3==1.37.3 + # via aiobotocore +botocore==1.37.3 + # via + # aiobotocore + # boto3 + # s3transfer +botocore-stubs==1.38.19 + # via types-aiobotocore +certifi==2025.4.26 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.2 + # via requests +click==8.1.8 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # distributed + # rich-toolkit + # typer + # uvicorn +cloudpickle==3.1.1 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # distributed +dask==2025.5.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/_base.in + # distributed +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +distributed==2025.5.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.3.0 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.41 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.6.0 + # via + # aiohttp + # aiosignal +fsspec==2025.3.2 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +googleapis-common-protos==1.70.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.16.0 + # via + # httpcore + # uvicorn +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.9 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +importlib-metadata==8.6.1 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed + # fastapi +jmespath==1.0.1 + # via + # aiobotocore + # boto3 + # botocore +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2025.4.1 + # via jsonschema +locket==1.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed + # partd +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # jinja2 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +multidict==6.4.4 + # via + # aiobotocore + # aiohttp + # yarl +opentelemetry-api==1.33.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-propagator-aws-xray + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.33.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.54b1 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.54b1 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-botocore==0.54b1 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.54b1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-propagator-aws-xray==1.0.2 + # via opentelemetry-instrumentation-botocore +opentelemetry-proto==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.33.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.54b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.54b1 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.18 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==25.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/_base.in + # dask + # distributed + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +partd==1.4.2 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask +prometheus-client==0.22.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==5.29.4 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # distributed +pycryptodome==3.23.0 + # via stream-zip +pydantic==2.11.4 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-extra-types==2.10.4 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via + # aiobotocore + # arrow + # botocore +python-dotenv==1.1.0 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # dask + # distributed + # uvicorn +redis==6.1.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==14.0.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.25.0 + # via + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +sh==2.2.2 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sortedcontainers==2.4.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +starlette==0.46.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +tblib==3.1.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +tenacity==9.1.2 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # dask + # distributed + # partd +tornado==6.5 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +tqdm==4.67.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.4 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-aiobotocore==2.22.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +types-aiobotocore-ec2==2.22.0 + # via types-aiobotocore +types-aiobotocore-s3==2.22.0 + # via types-aiobotocore +types-aiobotocore-ssm==2.22.0 + # via types-aiobotocore +types-awscrt==0.27.2 + # via botocore-stubs +types-python-dateutil==2.9.0.20250516 + # via arrow +typing-extensions==4.13.2 + # via + # aiodebug + # anyio + # exceptiongroup + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer + # types-aiobotocore + # types-aiobotocore-ec2 + # types-aiobotocore-s3 + # types-aiobotocore-ssm + # typing-inspection +typing-inspection==0.4.0 + # via pydantic +urllib3==2.4.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # botocore + # distributed + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.20.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zict==3.0.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # distributed +zipp==3.21.0 + # via + # -c requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # importlib-metadata diff --git a/services/clusters-keeper/requirements/_test.in b/services/clusters-keeper/requirements/_test.in new file mode 100644 index 00000000000..04a702b3cef --- /dev/null +++ b/services/clusters-keeper/requirements/_test.in @@ -0,0 +1,33 @@ +# +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +aiodocker +asgi-lifespan +coverage +debugpy +deepdiff +docker +faker +fakeredis[lua] +httpx +moto[server] +parse +psutil +pytest +pytest-asyncio +pytest-cov +pytest-mock +pytest-runner +python-dotenv +respx +types-PyYAML diff --git a/services/clusters-keeper/requirements/_test.txt b/services/clusters-keeper/requirements/_test.txt new file mode 100644 index 00000000000..41eacfca34a --- /dev/null +++ b/services/clusters-keeper/requirements/_test.txt @@ -0,0 +1,351 @@ +aiodocker==0.24.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +aiohappyeyeballs==2.6.1 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aiodocker +aiosignal==1.3.2 + # via + # -c requirements/_base.txt + # aiohttp +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.9.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.3.0 + # via + # -c requirements/_base.txt + # aiohttp + # jsonschema + # referencing +aws-sam-translator==1.97.0 + # via cfn-lint +aws-xray-sdk==2.14.0 + # via moto +blinker==1.9.0 + # via flask +boto3==1.37.3 + # via + # -c requirements/_base.txt + # aws-sam-translator + # moto +botocore==1.37.3 + # via + # -c requirements/_base.txt + # aws-xray-sdk + # boto3 + # moto + # s3transfer +certifi==2025.4.26 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +cffi==1.17.1 + # via cryptography +cfn-lint==1.35.1 + # via moto +charset-normalizer==3.4.2 + # via + # -c requirements/_base.txt + # requests +click==8.1.8 + # via + # -c requirements/_base.txt + # flask +coverage==7.8.0 + # via + # -r requirements/_test.in + # pytest-cov +cryptography==45.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # joserfc + # moto +debugpy==1.8.14 + # via -r requirements/_test.in +deepdiff==8.5.0 + # via -r requirements/_test.in +docker==7.1.0 + # via + # -r requirements/_test.in + # moto +faker==37.3.0 + # via -r requirements/_test.in +fakeredis==2.29.0 + # via -r requirements/_test.in +flask==3.1.1 + # via + # flask-cors + # moto +flask-cors==6.0.0 + # via moto +frozenlist==1.6.0 + # via + # -c requirements/_base.txt + # aiohttp + # aiosignal +graphql-core==3.2.6 + # via moto +h11==0.16.0 + # via + # -c requirements/_base.txt + # httpcore +httpcore==1.0.9 + # via + # -c requirements/_base.txt + # httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in + # respx +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests + # yarl +iniconfig==2.1.0 + # via pytest +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # flask + # moto +jmespath==1.0.1 + # via + # -c requirements/_base.txt + # boto3 + # botocore +joserfc==1.0.4 + # via moto +jsonpatch==1.33 + # via cfn-lint +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 + # via jsonpatch +jsonschema==4.23.0 + # via + # -c requirements/_base.txt + # aws-sam-translator + # openapi-schema-validator + # openapi-spec-validator +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2025.4.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.11.0 + # via openapi-spec-validator +lupa==2.4 + # via fakeredis +markupsafe==3.0.2 + # via + # -c requirements/_base.txt + # flask + # jinja2 + # werkzeug +moto==5.1.4 + # via -r requirements/_test.in +mpmath==1.3.0 + # via sympy +multidict==6.4.4 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +networkx==3.4.2 + # via cfn-lint +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 + # via moto +orderly-set==5.4.1 + # via deepdiff +packaging==25.0 + # via + # -c requirements/_base.txt + # pytest +parse==1.20.2 + # via -r requirements/_test.in +pathable==0.4.4 + # via jsonschema-path +pluggy==1.6.0 + # via pytest +ply==3.11 + # via jsonpath-ng +propcache==0.3.1 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +psutil==7.0.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 + # via cffi +pydantic==2.11.4 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aws-sam-translator +pydantic-core==2.33.2 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.2.3 + # via moto +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-mock +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.1.1 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 + # via + # -c requirements/_base.txt + # botocore + # moto +python-dotenv==1.1.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # cfn-lint + # jsonschema-path + # moto + # responses +redis==6.1.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # fakeredis +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker + # jsonschema-path + # moto + # responses +responses==0.25.7 + # via moto +respx==0.22.0 + # via -r requirements/_test.in +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.25.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 + # via + # -c requirements/_base.txt + # boto3 +setuptools==80.7.1 + # via moto +six==1.17.0 + # via + # -c requirements/_base.txt + # python-dateutil + # rfc3339-validator +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan +sortedcontainers==2.4.0 + # via + # -c requirements/_base.txt + # fakeredis +sympy==1.14.0 + # via cfn-lint +types-pyyaml==6.0.12.20250516 + # via -r requirements/_test.in +typing-extensions==4.13.2 + # via + # -c requirements/_base.txt + # anyio + # aws-sam-translator + # cfn-lint + # pydantic + # pydantic-core + # typing-inspection +typing-inspection==0.4.0 + # via + # -c requirements/_base.txt + # pydantic +tzdata==2025.2 + # via faker +urllib3==2.4.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore + # docker + # requests + # responses +werkzeug==3.1.3 + # via + # flask + # flask-cors + # moto +wrapt==1.17.2 + # via + # -c requirements/_base.txt + # aws-xray-sdk +xmltodict==0.14.2 + # via moto +yarl==1.20.0 + # via + # -c requirements/_base.txt + # aiohttp diff --git a/services/clusters-keeper/requirements/_tools.in b/services/clusters-keeper/requirements/_tools.in new file mode 100644 index 00000000000..52a9a39d162 --- /dev/null +++ b/services/clusters-keeper/requirements/_tools.in @@ -0,0 +1,7 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt + +watchdog[watchmedo] diff --git a/services/clusters-keeper/requirements/_tools.txt b/services/clusters-keeper/requirements/_tools.txt new file mode 100644 index 00000000000..c76d3992bbe --- /dev/null +++ b/services/clusters-keeper/requirements/_tools.txt @@ -0,0 +1,89 @@ +astroid==3.3.10 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # pip-tools +dill==0.4.0 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.18.0 + # via virtualenv +identify==2.6.10 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==25.0 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.1.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.8 + # via + # black + # pylint + # virtualenv +pre-commit==4.2.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.7 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_test.txt + # pre-commit + # watchdog +ruff==0.11.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==80.7.1 + # via + # -c requirements/_test.txt + # pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.13.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.31.2 + # via pre-commit +watchdog==6.0.0 + # via -r requirements/_tools.in +wheel==0.45.1 + # via pip-tools diff --git a/services/clusters-keeper/requirements/ci.txt b/services/clusters-keeper/requirements/ci.txt new file mode 100644 index 00000000000..22fd83a2698 --- /dev/null +++ b/services/clusters-keeper/requirements/ci.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/clusters-keeper' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library +simcore-dask-task-models-library @ ../../packages/dask-task-models-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library + +# installs current package +simcore-service-clusters-keeper @ . diff --git a/services/clusters-keeper/requirements/constraints.txt b/services/clusters-keeper/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/requirements/dev.txt b/services/clusters-keeper/requirements/dev.txt new file mode 100644 index 00000000000..004ee6c6241 --- /dev/null +++ b/services/clusters-keeper/requirements/dev.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages needed to develop 'services/clusters-keeper' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/aws-library +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library +--editable ../../packages/dask-task-models-library + +# installs current package +--editable . diff --git a/services/clusters-keeper/requirements/prod.txt b/services/clusters-keeper/requirements/prod.txt new file mode 100644 index 00000000000..a0337e60a07 --- /dev/null +++ b/services/clusters-keeper/requirements/prod.txt @@ -0,0 +1,19 @@ +# Shortcut to install 'services/clusters-keeper' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +# installs current package +simcore-service-clusters-keeper @ . diff --git a/services/clusters-keeper/setup.cfg b/services/clusters-keeper/setup.cfg new file mode 100644 index 00000000000..e3236d8ebb0 --- /dev/null +++ b/services/clusters-keeper/setup.cfg @@ -0,0 +1,18 @@ +[bumpversion] +current_version = 1.0.0 +commit = True +message = services/clusters-keeper version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/services/clusters-keeper/setup.py b/services/clusters-keeper/setup.py new file mode 100755 index 00000000000..dde5ed2898b --- /dev/null +++ b/services/clusters-keeper/setup.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-clusters-keeper" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Sylvain Anderegg (sanderegg)",) +DESCRIPTION = "Service to autoscale swarm resources" +README = (CURRENT_DIR / "README.md").read_text() + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-aws-library", + "simcore-models-library", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "package_data": {"": ["data/*.yml"]}, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-clusters-keeper = simcore_service_clusters_keeper.cli:main", + "simcore-service = simcore_service_clusters_keeper.cli:main", + ], + }, +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/__init__.py new file mode 100644 index 00000000000..f513c971cca --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/__init__.py @@ -0,0 +1,3 @@ +from ._meta import __version__ + +assert __version__ # nosec diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/_meta.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/_meta.py new file mode 100644 index 00000000000..58d79f3b9ba --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/_meta.py @@ -0,0 +1,63 @@ +""" Application's metadata + +""" + +from importlib.metadata import distribution, version +from importlib.resources import files +from pathlib import Path +from typing import Final + +from models_library.basic_types import VersionStr, VersionTag +from packaging.version import Version +from pydantic import TypeAdapter + +_current_distribution = distribution("simcore-service-clusters-keeper") +__version__: str = version("simcore-service-clusters-keeper") + + +APP_NAME: Final[str] = _current_distribution.metadata["Name"] +API_VERSION: Final[VersionStr] = TypeAdapter(VersionStr).validate_python(__version__) +VERSION: Final[Version] = Version(__version__) +API_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + f"v{VERSION.major}" +) +RPC_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + f"v{VERSION.major}" +) + + +def get_summary() -> str: + return _current_distribution.metadata.get_all("Summary", [""])[-1] + + +SUMMARY: Final[str] = get_summary() +PACKAGE_DATA_FOLDER: Final[Path] = Path(f'{files(APP_NAME.replace("-", "_")) / "data"}') + +# https://patorjk.com/software/taag/#p=testall&f=Avatar&t=clusters_keeper +APP_STARTED_BANNER_MSG = r""" + + _______ _ _______ _________ _______ _______ _______ _ _______ _______ _______ _______ _______ +( ____ \( \ |\ /|( ____ \\__ __/( ____ \( ____ )( ____ \ | \ /\( ____ \( ____ \( ____ )( ____ \( ____ ) +| ( \/| ( | ) ( || ( \/ ) ( | ( \/| ( )|| ( \/ | \ / /| ( \/| ( \/| ( )|| ( \/| ( )| +| | | | | | | || (_____ | | | (__ | (____)|| (_____ _____ | (_/ / | (__ | (__ | (____)|| (__ | (____)| +| | | | | | | |(_____ ) | | | __) | __)(_____ )(_____)| _ ( | __) | __) | _____)| __) | __) +| | | | | | | | ) | | | | ( | (\ ( ) | | ( \ \ | ( | ( | ( | ( | (\ ( +| (____/\| (____/\| (___) |/\____) | | | | (____/\| ) \ \__/\____) | | / \ \| (____/\| (____/\| ) | (____/\| ) \ \__ +(_______/(_______/(_______)\_______) )_( (_______/|/ \__/\_______) |_/ \/(_______/(_______/|/ (_______/|/ \__/ + {} +""".format( + f"v{__version__}" +) + +APP_STARTED_DISABLED_BANNER_MSG = r""" + _ _ _ _ _ + | |(_) | | | | | | + __| | _ ___ __ _ | |__ | | ___ __| | + / _` || |/ __| / _` || '_ \ | | / _ \ / _` | + | (_| || |\__ \| (_| || |_) || || __/| (_| | + \__,_||_||___/ \__,_||_.__/ |_| \___| \__,_| +""" + +APP_FINISHED_BANNER_MSG = "{:=^100}".format( + f"πŸŽ‰ App {APP_NAME}=={__version__} shutdown completed πŸŽ‰" +) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/api/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/api/dependencies/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/dependencies/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/api/dependencies/application.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/dependencies/application.py new file mode 100644 index 00000000000..02823f24864 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/dependencies/application.py @@ -0,0 +1,7 @@ +from typing import cast + +from fastapi import FastAPI, Request + + +def get_app(request: Request) -> FastAPI: + return cast(FastAPI, request.app) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/api/health.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/health.py new file mode 100644 index 00000000000..a971a551e4e --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/health.py @@ -0,0 +1,66 @@ +""" +All entrypoints used for operations + +for instance: service health-check (w/ different variants), diagnostics, debugging, status, etc +""" + +import datetime +from typing import Annotated + +from fastapi import APIRouter, Depends, FastAPI +from fastapi.responses import PlainTextResponse +from pydantic import BaseModel + +from ..modules.rabbitmq import get_rabbitmq_client, is_rabbitmq_enabled +from ..modules.redis import get_redis_client +from .dependencies.application import get_app + +router = APIRouter() + + +@router.get("/", include_in_schema=True, response_class=PlainTextResponse) +async def health_check(): + # NOTE: sync url in docker/healthcheck.py with this entrypoint! + return f"{__name__}.health_check@{datetime.datetime.now(datetime.UTC).isoformat()}" + + +class _ComponentStatus(BaseModel): + is_enabled: bool + is_responsive: bool + + +class _StatusGet(BaseModel): + rabbitmq: _ComponentStatus + ec2: _ComponentStatus + redis_client_sdk: _ComponentStatus + ssm: _ComponentStatus + + +@router.get("/status", include_in_schema=True, response_model=_StatusGet) +async def get_status(app: Annotated[FastAPI, Depends(get_app)]) -> _StatusGet: + return _StatusGet( + rabbitmq=_ComponentStatus( + is_enabled=is_rabbitmq_enabled(app), + is_responsive=( + await get_rabbitmq_client(app).ping() + if is_rabbitmq_enabled(app) + else False + ), + ), + ec2=_ComponentStatus( + is_enabled=bool(app.state.ec2_client), + is_responsive=( + await app.state.ec2_client.ping() if app.state.ec2_client else False + ), + ), + redis_client_sdk=_ComponentStatus( + is_enabled=bool(app.state.redis_client_sdk), + is_responsive=await get_redis_client(app).ping(), + ), + ssm=_ComponentStatus( + is_enabled=(app.state.ssm_client is not None), + is_responsive=( + await app.state.ssm_client.ping() if app.state.ssm_client else False + ), + ), + ) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/api/routes.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/routes.py new file mode 100644 index 00000000000..acfcc55e970 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/api/routes.py @@ -0,0 +1,17 @@ +from fastapi import APIRouter, FastAPI + +from .._meta import API_VTAG +from . import health + + +def setup_api_routes(app: FastAPI): + """ + Composes resources/sub-resources routers + """ + router = APIRouter() + + # include operations in / + app.include_router(health.router, tags=["operations"]) + + # include the rest under /vX + app.include_router(router, prefix=f"/{API_VTAG}") diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/cli.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/cli.py new file mode 100644 index 00000000000..b65355463c4 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/cli.py @@ -0,0 +1,24 @@ +import logging + +import typer +from settings_library.utils_cli import create_settings_command + +from ._meta import APP_NAME +from .core.settings import ApplicationSettings + +log = logging.getLogger(__name__) + +# NOTE: 'main' variable is referred in the setup's entrypoint! +main = typer.Typer(name=APP_NAME) + +main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) + + +@main.command() +def run(): + """Runs application""" + typer.secho("Sorry, this entrypoint is intentionally disabled. Use instead") + typer.secho( + "$ uvicorn simcore_service_clusters_keeper.main:the_app", + fg=typer.colors.BLUE, + ) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/constants.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/constants.py new file mode 100644 index 00000000000..a5d4f3636da --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/constants.py @@ -0,0 +1,21 @@ +from typing import Final + +from aws_library.ec2._models import AWSTagKey, AWSTagValue +from pydantic import TypeAdapter + +DOCKER_STACK_DEPLOY_COMMAND_NAME: Final[str] = "private cluster docker deploy" +DOCKER_STACK_DEPLOY_COMMAND_EC2_TAG_KEY: Final[AWSTagKey] = TypeAdapter( + AWSTagKey +).validate_python("io.simcore.clusters-keeper.private_cluster_docker_deploy") + +USER_ID_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python("user_id") +WALLET_ID_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python( + "wallet_id" +) +ROLE_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python("role") +WORKER_ROLE_TAG_VALUE: Final[AWSTagValue] = TypeAdapter(AWSTagValue).validate_python( + "worker" +) +MANAGER_ROLE_TAG_VALUE: Final[AWSTagValue] = TypeAdapter(AWSTagValue).validate_python( + "manager" +) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/application.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/application.py new file mode 100644 index 00000000000..bbda1b456a4 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/application.py @@ -0,0 +1,104 @@ +import logging + +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) + +from .._meta import ( + API_VERSION, + API_VTAG, + APP_FINISHED_BANNER_MSG, + APP_NAME, + APP_STARTED_BANNER_MSG, + APP_STARTED_DISABLED_BANNER_MSG, +) +from ..api.routes import setup_api_routes +from ..modules.clusters_management_task import setup as setup_clusters_management +from ..modules.ec2 import setup as setup_ec2 +from ..modules.rabbitmq import setup as setup_rabbitmq +from ..modules.redis import setup as setup_redis +from ..modules.ssm import setup as setup_ssm +from ..rpc.rpc_routes import setup_rpc_routes +from .settings import ApplicationSettings + +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS = ( + "aiobotocore", + "aio_pika", + "aiormq", + "botocore", + "werkzeug", +) + +_logger = logging.getLogger(__name__) + + +def create_app(settings: ApplicationSettings) -> FastAPI: + # keep mostly quiet noisy loggers + quiet_level: int = max( + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + ) + for name in _NOISY_LOGGERS: + logging.getLogger(name).setLevel(quiet_level) + + _logger.info("app settings: %s", settings.model_dump_json(indent=1)) + + app = FastAPI( + debug=settings.CLUSTERS_KEEPER_DEBUG, + title=APP_NAME, + description="Service to keep external clusters alive", + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url="/dev/doc", + redoc_url=None, # default disabled + ) + # STATE + app.state.settings = settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + + if app.state.settings.CLUSTERS_KEEPER_TRACING: + setup_tracing( + app, + app.state.settings.CLUSTERS_KEEPER_TRACING, + APP_NAME, + ) + if app.state.settings.CLUSTERS_KEEPER_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) + + # PLUGINS SETUP + setup_api_routes(app) + setup_rabbitmq(app) + setup_rpc_routes(app) + setup_ec2(app) + setup_ssm(app) + setup_redis(app) + setup_clusters_management(app) + + if app.state.settings.CLUSTERS_KEEPER_TRACING: + initialize_fastapi_app_tracing(app) + # ERROR HANDLERS + + # EVENTS + async def _on_startup() -> None: + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + if any( + s is None + for s in [ + settings.CLUSTERS_KEEPER_EC2_ACCESS, + settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES, + ] + ): + print(APP_STARTED_DISABLED_BANNER_MSG, flush=True) # noqa: T201 + + async def _on_shutdown() -> None: + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + return app diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/errors.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/errors.py new file mode 100644 index 00000000000..02824102d43 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/errors.py @@ -0,0 +1,9 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class ClustersKeeperRuntimeError(OsparcErrorMixin, RuntimeError): + msg_template: str = "clusters-keeper unexpected error" + + +class ConfigurationError(ClustersKeeperRuntimeError): + msg_template: str = "Application misconfiguration: {msg}" diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py new file mode 100644 index 00000000000..525148fa257 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/core/settings.py @@ -0,0 +1,467 @@ +import datetime +from functools import cached_property +from typing import Annotated, Final, Literal, cast + +from aws_library.ec2 import EC2InstanceBootSpecific, EC2Tags +from common_library.basic_types import DEFAULT_FACTORY +from fastapi import FastAPI +from models_library.basic_types import ( + BootModeEnum, + BuildTargetEnum, + LogLevel, + VersionTag, +) +from models_library.clusters import ClusterAuthentication +from pydantic import ( + AliasChoices, + Field, + NonNegativeFloat, + NonNegativeInt, + PositiveInt, + SecretStr, + TypeAdapter, + field_validator, +) +from pydantic_settings import SettingsConfigDict +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.base import BaseCustomSettings +from settings_library.docker_registry import RegistrySettings +from settings_library.ec2 import EC2Settings +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.ssm import SSMSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings +from types_aiobotocore_ec2.literals import InstanceTypeType + +from .._meta import API_VERSION, API_VTAG, APP_NAME + +CLUSTERS_KEEPER_ENV_PREFIX: Final[str] = "CLUSTERS_KEEPER_" + + +class ClustersKeeperEC2Settings(EC2Settings): + model_config = SettingsConfigDict( + env_prefix=CLUSTERS_KEEPER_ENV_PREFIX, + json_schema_extra={ + "examples": [ + { + f"{CLUSTERS_KEEPER_ENV_PREFIX}EC2_ACCESS_KEY_ID": "my_access_key_id", + f"{CLUSTERS_KEEPER_ENV_PREFIX}EC2_ENDPOINT": "https://my_ec2_endpoint.com", + f"{CLUSTERS_KEEPER_ENV_PREFIX}EC2_REGION_NAME": "us-east-1", + f"{CLUSTERS_KEEPER_ENV_PREFIX}EC2_SECRET_ACCESS_KEY": "my_secret_access_key", + } + ], + }, + ) + + +class ClustersKeeperSSMSettings(SSMSettings): + model_config = SettingsConfigDict( + env_prefix=CLUSTERS_KEEPER_ENV_PREFIX, + json_schema_extra={ + "examples": [ + { + f"{CLUSTERS_KEEPER_ENV_PREFIX}{key}": var + for key, var in example.items() # type:ignore[union-attr] + } + for example in SSMSettings.model_config[ # type:ignore[union-attr,index] + "json_schema_extra" + ][ + "examples" + ] + ], + }, + ) + + +class WorkersEC2InstancesSettings(BaseCustomSettings): + WORKERS_EC2_INSTANCES_ALLOWED_TYPES: Annotated[ + dict[str, EC2InstanceBootSpecific], + Field( + description="Defines which EC2 instances are considered as candidates for new EC2 instance and their respective boot specific parameters", + ), + ] + + WORKERS_EC2_INSTANCES_KEY_NAME: Annotated[ + str, + Field( + min_length=1, + description="SSH key filename (without ext) to access the instance through SSH" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)," + "this is required to start a new EC2 instance", + ), + ] + # BUFFER is not exposed since we set it to 0 + WORKERS_EC2_INSTANCES_MAX_START_TIME: Annotated[ + datetime.timedelta, + Field( + description="Usual time taken an EC2 instance with the given AMI takes to join the cluster " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)." + "NOTE: be careful that this time should always be a factor larger than the real time, as EC2 instances" + "that take longer than this time will be terminated as sometimes it happens that EC2 machine fail on start.", + ), + ] = datetime.timedelta(minutes=1) + WORKERS_EC2_INSTANCES_MAX_INSTANCES: Annotated[ + int, + Field( + description="Defines the maximum number of instances the clusters_keeper app may create", + ), + ] = 10 + # NAME PREFIX is not exposed since we override it anyway + WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS: Annotated[ + list[str], + Field( + min_length=1, + description="A security group acts as a virtual firewall for your EC2 instances to control incoming and outgoing traffic" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html), " + " this is required to start a new EC2 instance", + ), + ] + WORKERS_EC2_INSTANCES_SUBNET_ID: Annotated[ + str, + Field( + min_length=1, + description="A subnet is a range of IP addresses in your VPC " + " (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), " + "this is required to start a new EC2 instance", + ), + ] + + WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING: Annotated[ + datetime.timedelta, + Field( + description="Time after which an EC2 instance may be terminated (min 0 max 1 minute) " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(minutes=1) + + WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION: Annotated[ + datetime.timedelta, + Field( + description="Time after which an EC2 instance may be terminated (min 0, max 59 minutes) " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(minutes=3) + + WORKERS_EC2_INSTANCES_CUSTOM_TAGS: Annotated[ + EC2Tags, + Field( + description="Allows to define tags that should be added to the created EC2 instance default tags. " + "a tag must have a key and an optional value. see [https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html]", + ), + ] + + @field_validator("WORKERS_EC2_INSTANCES_ALLOWED_TYPES") + @classmethod + def _check_valid_instance_names( + cls, value: dict[str, EC2InstanceBootSpecific] + ) -> dict[str, EC2InstanceBootSpecific]: + # NOTE: needed because of a flaw in BaseCustomSettings + # issubclass raises TypeError if used on Aliases + TypeAdapter(list[InstanceTypeType]).validate_python(list(value)) + return value + + +class PrimaryEC2InstancesSettings(BaseCustomSettings): + PRIMARY_EC2_INSTANCES_ALLOWED_TYPES: Annotated[ + dict[str, EC2InstanceBootSpecific], + Field( + description="Defines which EC2 instances are considered as candidates for new EC2 instance and their respective boot specific parameters", + ), + ] + + PRIMARY_EC2_INSTANCES_MAX_INSTANCES: Annotated[ + int, + Field( + description="Defines the maximum number of instances the clusters_keeper app may create", + ), + ] = 10 + PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS: Annotated[ + list[str], + Field( + min_length=1, + description="A security group acts as a virtual firewall for your EC2 instances to control incoming and outgoing traffic" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html), " + " this is required to start a new EC2 instance", + ), + ] + PRIMARY_EC2_INSTANCES_SUBNET_ID: Annotated[ + str, + Field( + min_length=1, + description="A subnet is a range of IP addresses in your VPC " + " (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), " + "this is required to start a new EC2 instance", + ), + ] + + PRIMARY_EC2_INSTANCES_KEY_NAME: Annotated[ + str, + Field( + min_length=1, + description="SSH key filename (without ext) to access the instance through SSH" + " (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)," + "this is required to start a new EC2 instance", + ), + ] + PRIMARY_EC2_INSTANCES_CUSTOM_TAGS: Annotated[ + EC2Tags, + Field( + description="Allows to define tags that should be added to the created EC2 instance default tags. " + "a tag must have a key and an optional value. see [https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html]", + ), + ] + PRIMARY_EC2_INSTANCES_ATTACHED_IAM_PROFILE: Annotated[ + str, + Field( + description="ARN the EC2 instance should be attached to (example: arn:aws:iam::XXXXX:role/NAME), to disable pass an empty string", + ), + ] + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA: Annotated[ + str, Field(description="Name of the dask TLC CA in AWS Parameter Store") + ] + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT: Annotated[ + str, + Field(description="Name of the dask TLC certificate in AWS Parameter Store"), + ] + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY: Annotated[ + str, Field(description="Name of the dask TLC key in AWS Parameter Store") + ] + PRIMARY_EC2_INSTANCES_PROMETHEUS_USERNAME: Annotated[ + str, Field(description="Username for accessing prometheus data") + ] + PRIMARY_EC2_INSTANCES_PROMETHEUS_PASSWORD: Annotated[ + SecretStr, Field(description="Password for accessing prometheus data") + ] + + PRIMARY_EC2_INSTANCES_MAX_START_TIME: Annotated[ + datetime.timedelta, + Field( + description="Usual time taken an EC2 instance with the given AMI takes to startup and be ready to receive jobs " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)." + "NOTE: be careful that this time should always be a factor larger than the real time, as EC2 instances" + "that take longer than this time will be terminated as sometimes it happens that EC2 machine fail on start.", + ), + ] = datetime.timedelta(minutes=2) + + PRIMARY_EC2_INSTANCES_DOCKER_DEFAULT_ADDRESS_POOL: Annotated[ + str, + Field( + description="defines the docker swarm default address pool in CIDR format " + "(see https://docs.docker.com/reference/cli/docker/swarm/init/)", + ), + ] = "172.20.0.0/14" # nosec + + PRIMARY_EC2_INSTANCES_RABBIT: Annotated[ + RabbitSettings | None, + Field( + description="defines the Rabbit settings for the primary instance (may be disabled)", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + @field_validator("PRIMARY_EC2_INSTANCES_ALLOWED_TYPES") + @classmethod + def _check_valid_instance_names( + cls, value: dict[str, EC2InstanceBootSpecific] + ) -> dict[str, EC2InstanceBootSpecific]: + # NOTE: needed because of a flaw in BaseCustomSettings + # issubclass raises TypeError if used on Aliases + TypeAdapter(list[InstanceTypeType]).validate_python(list(value)) + return value + + @field_validator("PRIMARY_EC2_INSTANCES_ALLOWED_TYPES") + @classmethod + def _check_only_one_value( + cls, value: dict[str, EC2InstanceBootSpecific] + ) -> dict[str, EC2InstanceBootSpecific]: + if len(value) != 1: + msg = "Only one exact value is accepted (empty or multiple is invalid)" + raise ValueError(msg) + + return value + + +class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): + # CODE STATICS --------------------------------------------------------- + API_VERSION: str = API_VERSION + APP_NAME: str = APP_NAME + API_VTAG: VersionTag = API_VTAG + + # IMAGE BUILDTIME ------------------------------------------------------ + # @Makefile + SC_BUILD_DATE: str | None = None + SC_BUILD_TARGET: BuildTargetEnum | None = None + SC_VCS_REF: str | None = None + SC_VCS_URL: str | None = None + + # @Dockerfile + SC_BOOT_MODE: BootModeEnum | None = None + SC_BOOT_TARGET: BuildTargetEnum | None = None + SC_HEALTHCHECK_TIMEOUT: Annotated[ + PositiveInt | None, + Field( + description="If a single run of the check takes longer than timeout seconds " + "then the check is considered to have failed." + "It takes retries consecutive failures of the health check for the container to be considered unhealthy.", + ), + ] = None + SC_USER_ID: int | None = None + SC_USER_NAME: str | None = None + + # RUNTIME ----------------------------------------------------------- + CLUSTERS_KEEPER_DEBUG: Annotated[ + bool, + Field( + default=False, + description="Debug mode", + validation_alias=AliasChoices("CLUSTERS_KEEPER_DEBUG", "DEBUG"), + ), + ] = False + CLUSTERS_KEEPER_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "CLUSTERS_KEEPER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + CLUSTERS_KEEPER_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "CLUSTERS_KEEPER_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + CLUSTERS_KEEPER_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "CLUSTERS_KEEPER_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + CLUSTERS_KEEPER_EC2_ACCESS: Annotated[ + ClustersKeeperEC2Settings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CLUSTERS_KEEPER_SSM_ACCESS: Annotated[ + ClustersKeeperSSMSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES: Annotated[ + PrimaryEC2InstancesSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES: Annotated[ + WorkersEC2InstancesSettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX: Annotated[ + str, + Field( + description="set a prefix to all machines created (useful for testing)", + ), + ] + + CLUSTERS_KEEPER_RABBITMQ: Annotated[ + RabbitSettings | None, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + CLUSTERS_KEEPER_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + CLUSTERS_KEEPER_REDIS: Annotated[ + RedisSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + + CLUSTERS_KEEPER_REGISTRY: Annotated[ + RegistrySettings | None, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + CLUSTERS_KEEPER_TASK_INTERVAL: Annotated[ + datetime.timedelta, + Field( + description="interval between each clusters clean check " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(seconds=30) + + SERVICE_TRACKING_HEARTBEAT: Annotated[ + datetime.timedelta, + Field( + description="Service heartbeat interval (everytime a heartbeat is sent into RabbitMQ) " + "(default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(seconds=60) + + CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION: Annotated[ + NonNegativeInt, + Field( + description="Max number of missed heartbeats before a cluster is terminated", + ), + ] = 5 + + CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG: Annotated[ + str, + Field( + description="defines the image tag to use for the computational backend sidecar image (NOTE: it currently defaults to use itisfoundation organisation in Dockerhub)", + ), + ] + + CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: Annotated[ + ClusterAuthentication, + Field( + description="defines the authentication of the clusters created via clusters-keeper (can be None or TLS)", + ), + ] + + CLUSTERS_KEEPER_DASK_NTHREADS: Annotated[ + NonNegativeInt, + Field( + description="overrides the default number of threads in the dask-sidecars, setting it to 0 will use the default (see description in dask-sidecar)", + ), + ] + + CLUSTERS_KEEPER_DASK_WORKER_SATURATION: Annotated[ + NonNegativeFloat | Literal["inf"], + Field( + description="override the dask scheduler 'worker-saturation' field" + ", see https://selectfrom.dev/deep-dive-into-dask-distributed-scheduler-9fdb3b36b7c7", + ), + ] = "inf" + + CLUSTERS_KEEPER_TRACING: Annotated[ + TracingSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ), + ] + + SWARM_STACK_NAME: Annotated[ + str, Field(description="Stack name defined upon deploy (see main Makefile)") + ] + + @cached_property + def LOG_LEVEL(self) -> LogLevel: # noqa: N802 + return self.CLUSTERS_KEEPER_LOGLEVEL + + @field_validator("CLUSTERS_KEEPER_LOGLEVEL", mode="before") + @classmethod + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +def get_application_settings(app: FastAPI) -> ApplicationSettings: + return cast(ApplicationSettings, app.state.settings) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/data/docker-compose.yml b/services/clusters-keeper/src/simcore_service_clusters_keeper/data/docker-compose.yml new file mode 100644 index 00000000000..6ba13f58eac --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/data/docker-compose.yml @@ -0,0 +1,221 @@ +services: + dask-scheduler: + image: ${DOCKER_REGISTRY:-itisfoundation}/dask-sidecar:${DOCKER_IMAGE_TAG} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + volumes: + - computational_shared_data:${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} + networks: + - cluster + environment: + DASK_TLS_CA_FILE: ${DASK_TLS_CA_FILE} + DASK_TLS_CERT: ${DASK_TLS_CERT} + DASK_TLS_KEY: ${DASK_TLS_KEY} + DASK_SIDECAR_RABBITMQ: ${AUTOSCALING_RABBITMQ} + DASK_SCHEDULER_URL: tls://dask-scheduler:8786 + DASK_START_AS_SCHEDULER: 1 + DASK_WORKER_SATURATION: ${DASK_WORKER_SATURATION} + LOG_LEVEL: ${LOG_LEVEL} + + SIDECAR_COMP_SERVICES_SHARED_FOLDER: /home/scu/computational_shared_data + SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME: computational_shared_data + + ports: + - 8786:8786 # dask-scheduler access + - 8787:8787 # dashboard + deploy: + labels: + prometheus-job: scheduler + prometheus-port: 8787 + placement: + constraints: + - "node.role==manager" + resources: + limits: + memory: 2048M + secrets: + - source: dask_tls_ca + target: ${DASK_TLS_CA_FILE} + mode: 444 + - source: dask_tls_key + target: ${DASK_TLS_KEY} + mode: 444 + - source: dask_tls_cert + target: ${DASK_TLS_CERT} + mode: 444 + + dask-sidecar: + image: ${DOCKER_REGISTRY:-itisfoundation}/dask-sidecar:${DOCKER_IMAGE_TAG} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + volumes: + - computational_shared_data:${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} + - /var/run/docker.sock:/var/run/docker.sock:ro + - ${ETC_HOSTNAME:-/etc/hostname}:/home/scu/hostname:ro + networks: + - cluster + environment: + DASK_LOG_FORMAT_LOCAL_DEV_ENABLED: 1 + DASK_NPROCS: 1 + DASK_NTHREADS: ${DASK_NTHREADS} + DASK_SCHEDULER_URL: tls://dask-scheduler:8786 + DASK_SIDECAR_NON_USABLE_RAM: 0 + DASK_SIDECAR_NUM_NON_USABLE_CPUS: 0 + DASK_SIDECAR_RABBITMQ: ${AUTOSCALING_RABBITMQ} + DASK_TLS_CA_FILE: ${DASK_TLS_CA_FILE} + DASK_TLS_CERT: ${DASK_TLS_CERT} + DASK_TLS_KEY: ${DASK_TLS_KEY} + DASK_WORKER_SATURATION: ${DASK_WORKER_SATURATION} + LOG_LEVEL: ${LOG_LEVEL} + + SIDECAR_COMP_SERVICES_SHARED_FOLDER: /home/scu/computational_shared_data + SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME: computational_shared_data + deploy: + mode: global + labels: + prometheus-job: sidecars + prometheus-port: 8787 + placement: + constraints: + - "node.role==worker" + secrets: + - source: dask_tls_ca + target: ${DASK_TLS_CA_FILE} + mode: 444 + - source: dask_tls_key + target: ${DASK_TLS_KEY} + mode: 444 + - source: dask_tls_cert + target: ${DASK_TLS_CERT} + mode: 444 + + autoscaling: + image: ${DOCKER_REGISTRY:-itisfoundation}/autoscaling:${DOCKER_IMAGE_TAG} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + environment: + AUTOSCALING_EC2_ACCESS_KEY_ID: ${CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID} + AUTOSCALING_EC2_ENDPOINT: ${CLUSTERS_KEEPER_EC2_ENDPOINT} + AUTOSCALING_EC2_REGION_NAME: ${CLUSTERS_KEEPER_EC2_REGION_NAME} + AUTOSCALING_EC2_SECRET_ACCESS_KEY: ${CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY} + AUTOSCALING_NODES_MONITORING: null + AUTOSCALING_POLL_INTERVAL: 00:00:10 + AUTOSCALING_RABBITMQ: ${AUTOSCALING_RABBITMQ} + DASK_MONITORING_URL: tls://dask-scheduler:8786 + DASK_SCHEDULER_AUTH: '{"type":"tls","tls_ca_file":"${DASK_TLS_CA_FILE}","tls_client_cert":"${DASK_TLS_CERT}","tls_client_key":"${DASK_TLS_KEY}"}' + EC2_INSTANCES_ALLOWED_TYPES: ${WORKERS_EC2_INSTANCES_ALLOWED_TYPES} + EC2_INSTANCES_CUSTOM_TAGS: ${WORKERS_EC2_INSTANCES_CUSTOM_TAGS} + EC2_INSTANCES_ATTACHED_IAM_PROFILE: "" + EC2_INSTANCES_KEY_NAME: ${WORKERS_EC2_INSTANCES_KEY_NAME} + EC2_INSTANCES_MACHINES_BUFFER: 0 + EC2_INSTANCES_MAX_INSTANCES: ${WORKERS_EC2_INSTANCES_MAX_INSTANCES} + EC2_INSTANCES_NAME_PREFIX: ${EC2_INSTANCES_NAME_PREFIX} + EC2_INSTANCES_SECURITY_GROUP_IDS: ${WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS} + EC2_INSTANCES_SUBNET_ID: ${WORKERS_EC2_INSTANCES_SUBNET_ID} + EC2_INSTANCES_TIME_BEFORE_DRAINING: ${WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING} + EC2_INSTANCES_TIME_BEFORE_TERMINATION: ${WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION} + LOG_FORMAT_LOCAL_DEV_ENABLED: 1 + LOG_LEVEL: ${LOG_LEVEL} + REDIS_HOST: redis + REDIS_PORT: 6379 + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + networks: + - cluster + deploy: + labels: + prometheus-job: autoscaling + prometheus-port: 8000 + placement: + constraints: + - "node.role==manager" + resources: + limits: + memory: 512M + secrets: + - source: dask_tls_ca + target: ${DASK_TLS_CA_FILE} + mode: 444 + - source: dask_tls_key + target: ${DASK_TLS_KEY} + mode: 444 + - source: dask_tls_cert + target: ${DASK_TLS_CERT} + mode: 444 + + redis: + # NOTE: currently autoscaling requires redis to run + image: "redis:6.2.6@sha256:4bed291aa5efb9f0d77b76ff7d4ab71eee410962965d052552db1fb80576431d" + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 30s + retries: 50 + volumes: + - redis-data:/data + networks: + - cluster + deploy: + placement: + constraints: + - "node.role==manager" + resources: + limits: + memory: 512M + cpus: "0.5" + + prometheus: + image: prom/prometheus:v2.51.0@sha256:5ccad477d0057e62a7cd1981ffcc43785ac10c5a35522dc207466ff7e7ec845f + command: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--web.config.file=/etc/prometheus/web.yml" + - "--storage.tsdb.path=/prometheus" + - "--storage.tsdb.retention.size=1GB" + ports: + - 9090:9090 + configs: + - source: prometheus-config + target: /etc/prometheus/prometheus.yml + - source: prometheus-web-config + target: /etc/prometheus/web.yml + volumes: + - prometheus-data:/prometheus + - /var/run/docker.sock:/var/run/docker.sock:ro + user: root # because of docker + networks: + - cluster + deploy: + placement: + constraints: + - "node.role==manager" + resources: + limits: + memory: 1024M + cpus: "1.0" + +networks: + cluster: + + +configs: + prometheus-config: + file: ./prometheus.yml + prometheus-web-config: + file: ./prometheus-web.yml + +volumes: + computational_shared_data: + name: computational_shared_data + redis-data: + prometheus-data: + + +secrets: + dask_tls_ca: + file: ${DASK_TLS_CA_FILE} + dask_tls_key: + file: ${DASK_TLS_KEY} + dask_tls_cert: + file: ${DASK_TLS_CERT} diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/data/prometheus.yml b/services/clusters-keeper/src/simcore_service_clusters_keeper/data/prometheus.yml new file mode 100644 index 00000000000..33e8212f477 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/data/prometheus.yml @@ -0,0 +1,50 @@ +global: + scrape_interval: "29s" +scrape_configs: + # Create a job for Docker Swarm containers. + - job_name: 'docker nodes' + dockerswarm_sd_configs: + - host: unix:///var/run/docker.sock + role: nodes + relabel_configs: + # Fetch metrics on port 9323. + - source_labels: [__meta_dockerswarm_node_address] + target_label: __address__ + replacement: $1:9323 + # Set hostname as instance label + - source_labels: [__meta_dockerswarm_node_hostname] + target_label: instance + # Create a job for Docker Swarm containers. + - job_name: 'docker tasks' + dockerswarm_sd_configs: + - host: unix:///var/run/docker.sock + role: tasks + relabel_configs: + # Set hostname as instance label + - source_labels: [__meta_dockerswarm_node_hostname] + target_label: instance + # Only keep containers that should be running. + - source_labels: [__meta_dockerswarm_task_desired_state] + regex: running + action: keep + # Only keep tasks with a `prometheus_port` label. + - source_labels: [__meta_dockerswarm_service_label_prometheus_port] + regex: .+ + action: keep + # Only keep containers that have a `prometheus-job` label. + - source_labels: [__meta_dockerswarm_service_label_prometheus_job] + regex: .+ + action: keep + # Use the prometheus-job Swarm label as Prometheus job label. + - source_labels: [__meta_dockerswarm_service_label_prometheus_job] + target_label: job + # Specify the metric path if needed (optional) + - source_labels: [__meta_dockerswarm_service_label_prometheus_path] + target_label: __metrics_path__ + regex: (.+) + # Use the `prometheus_port` Swarm label to set the __address__ for scraping. + - source_labels: [__address__, __meta_dockerswarm_service_label_prometheus_port] + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + action: replace diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/main.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/main.py new file mode 100644 index 00000000000..b2844bde6af --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/main.py @@ -0,0 +1,22 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_clusters_keeper.core.application import create_app +from simcore_service_clusters_keeper.core.settings import ApplicationSettings + +the_settings = ApplicationSettings.create_from_envs() +logging.basicConfig(level=the_settings.log_level) +logging.root.setLevel(the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=the_settings.CLUSTERS_KEEPER_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=the_settings.CLUSTERS_KEEPER_LOG_FILTER_MAPPING, + tracing_settings=the_settings.CLUSTERS_KEEPER_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(the_settings) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters.py new file mode 100644 index 00000000000..89860549fd3 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters.py @@ -0,0 +1,166 @@ +import logging +from collections.abc import Iterable + +import arrow +from aws_library.ec2 import ( + AWSTagKey, + AWSTagValue, + EC2InstanceBootSpecific, + EC2InstanceConfig, + EC2InstanceData, + EC2InstanceType, + SimcoreEC2API, +) +from aws_library.ec2._errors import EC2InstanceNotFoundError +from fastapi import FastAPI +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.logging_utils import log_context + +from ..core.settings import ApplicationSettings, get_application_settings +from ..utils.clusters import create_startup_script +from ..utils.ec2 import ( + HEARTBEAT_TAG_KEY, + all_created_ec2_instances_filter, + creation_ec2_tags, + ec2_instances_for_user_wallet_filter, + get_cluster_name, +) +from .ec2 import get_ec2_client + +_logger = logging.getLogger(__name__) + + +async def _get_primary_ec2_params( + app_settings: ApplicationSettings, ec2_client: SimcoreEC2API +) -> tuple[EC2InstanceType, EC2InstanceBootSpecific]: + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + assert ( + len( + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_ALLOWED_TYPES + ) + == 1 + ) # nosec + ec2_type_name, ec2_boot_specs = next( + iter( + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_ALLOWED_TYPES.items() + ) + ) + ec2_instance_types: list[ + EC2InstanceType + ] = await ec2_client.get_ec2_instance_capabilities( + instance_type_names={ec2_type_name} + ) + assert ec2_instance_types # nosec + assert len(ec2_instance_types) == 1 # nosec + return ec2_instance_types[0], ec2_boot_specs + + +async def create_cluster( + app: FastAPI, *, user_id: UserID, wallet_id: WalletID | None +) -> list[EC2InstanceData]: + ec2_client = get_ec2_client(app) + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + + ec2_instance_type, ec2_instance_boot_specs = await _get_primary_ec2_params( + app_settings, ec2_client + ) + + instance_config = EC2InstanceConfig( + type=ec2_instance_type, + tags=creation_ec2_tags(app_settings, user_id=user_id, wallet_id=wallet_id), + startup_script=create_startup_script( + app_settings, + ec2_boot_specific=ec2_instance_boot_specs, + ), + ami_id=ec2_instance_boot_specs.ami_id, + key_name=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_KEY_NAME, + security_group_ids=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS, + subnet_id=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_SUBNET_ID, + iam_instance_profile=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_ATTACHED_IAM_PROFILE, + ) + new_ec2_instance_data: list[EC2InstanceData] = await ec2_client.launch_instances( + instance_config, + min_number_of_instances=1, + number_of_instances=1, + max_total_number_of_instances=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_MAX_INSTANCES, + ) + return new_ec2_instance_data + + +async def get_all_clusters(app: FastAPI) -> set[EC2InstanceData]: + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + ec2_instance_data: set[EC2InstanceData] = set( + await get_ec2_client(app).get_instances( + key_names=[ + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_KEY_NAME + ], + tags=all_created_ec2_instances_filter(app_settings), + state_names=["running"], + ) + ) + return ec2_instance_data + + +async def get_cluster( + app: FastAPI, *, user_id: UserID, wallet_id: WalletID | None +) -> EC2InstanceData: + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + if instances := await get_ec2_client(app).get_instances( + key_names=[ + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_KEY_NAME + ], + tags=ec2_instances_for_user_wallet_filter( + app_settings, user_id=user_id, wallet_id=wallet_id + ), + ): + assert len(instances) == 1 # nosec + return instances[0] + raise EC2InstanceNotFoundError + + +async def get_cluster_workers( + app: FastAPI, *, user_id: UserID, wallet_id: WalletID | None +) -> list[EC2InstanceData]: + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES # nosec + ec2_instance_data: list[EC2InstanceData] = await get_ec2_client(app).get_instances( + key_names=[ + app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_KEY_NAME + ], + tags={ + AWSTagKey("Name"): AWSTagValue( + f"{get_cluster_name(app_settings, user_id=user_id, wallet_id=wallet_id, is_manager=False)}*" + ) + }, + ) + return ec2_instance_data + + +async def cluster_heartbeat( + app: FastAPI, *, user_id: UserID, wallet_id: WalletID | None +) -> None: + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + instance = await get_cluster(app, user_id=user_id, wallet_id=wallet_id) + await set_instance_heartbeat(app, instance=instance) + + +async def set_instance_heartbeat(app: FastAPI, *, instance: EC2InstanceData) -> None: + with log_context( + _logger, logging.DEBUG, msg=f"set instance heartbeat for {instance.id}" + ): + ec2_client = get_ec2_client(app) + await ec2_client.set_instances_tags( + [instance], + tags={HEARTBEAT_TAG_KEY: AWSTagValue(arrow.utcnow().datetime.isoformat())}, + ) + + +async def delete_clusters( + app: FastAPI, *, instances: Iterable[EC2InstanceData] +) -> None: + await get_ec2_client(app).terminate_instances(instances) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_core.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_core.py new file mode 100644 index 00000000000..f3ebe712b9a --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_core.py @@ -0,0 +1,249 @@ +import datetime +import logging +from collections.abc import Iterable +from typing import Final + +import arrow +from aws_library.ec2 import AWSTagKey, EC2InstanceData +from aws_library.ec2._models import AWSTagValue +from fastapi import FastAPI +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from servicelib.logging_utils import log_catch +from servicelib.utils import limited_gather + +from ..constants import ( + DOCKER_STACK_DEPLOY_COMMAND_EC2_TAG_KEY, + DOCKER_STACK_DEPLOY_COMMAND_NAME, + ROLE_TAG_KEY, + USER_ID_TAG_KEY, + WALLET_ID_TAG_KEY, + WORKER_ROLE_TAG_VALUE, +) +from ..core.settings import get_application_settings +from ..modules.clusters import ( + delete_clusters, + get_all_clusters, + get_cluster_workers, + set_instance_heartbeat, +) +from ..utils.clusters import create_deploy_cluster_stack_script +from ..utils.dask import get_scheduler_auth, get_scheduler_url +from ..utils.ec2 import ( + HEARTBEAT_TAG_KEY, + get_cluster_name, + user_id_from_instance_tags, + wallet_id_from_instance_tags, +) +from .dask import is_scheduler_busy, ping_scheduler +from .ec2 import get_ec2_client +from .ssm import get_ssm_client + +_logger = logging.getLogger(__name__) + + +def _get_instance_last_heartbeat(instance: EC2InstanceData) -> datetime.datetime | None: + if last_heartbeat := instance.tags.get( + HEARTBEAT_TAG_KEY, + ): + last_heartbeat_time: datetime.datetime = arrow.get(last_heartbeat).datetime + return last_heartbeat_time + + return None + + +_USER_ID_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python("user_id") +_WALLET_ID_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python( + "wallet_id" +) + + +async def _get_all_associated_worker_instances( + app: FastAPI, + primary_instances: Iterable[EC2InstanceData], +) -> set[EC2InstanceData]: + worker_instances: set[EC2InstanceData] = set() + for instance in primary_instances: + assert "user_id" in instance.tags # nosec + user_id = UserID(instance.tags[_USER_ID_TAG_KEY]) + assert "wallet_id" in instance.tags # nosec + # NOTE: wallet_id can be None + wallet_id = ( + WalletID(instance.tags[_WALLET_ID_TAG_KEY]) + if instance.tags[_WALLET_ID_TAG_KEY] != "None" + else None + ) + + worker_instances.update( + await get_cluster_workers(app, user_id=user_id, wallet_id=wallet_id) + ) + return worker_instances + + +async def _find_terminateable_instances( + app: FastAPI, instances: Iterable[EC2InstanceData] +) -> set[EC2InstanceData]: + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + + # get the corresponding ec2 instance data + terminateable_instances: set[EC2InstanceData] = set() + + time_to_wait_before_termination = ( + app_settings.CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION + * app_settings.SERVICE_TRACKING_HEARTBEAT + ) + startup_delay = ( + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_MAX_START_TIME + ) + for instance in instances: + if last_heartbeat := _get_instance_last_heartbeat(instance): + elapsed_time_since_heartbeat = arrow.utcnow().datetime - last_heartbeat + allowed_time_to_wait = time_to_wait_before_termination + if elapsed_time_since_heartbeat >= allowed_time_to_wait: + terminateable_instances.add(instance) + else: + _logger.info( + "%s has still %ss before being terminateable", + f"{instance.id=}", + f"{(allowed_time_to_wait - elapsed_time_since_heartbeat).total_seconds()}", + ) + else: + elapsed_time_since_startup = arrow.utcnow().datetime - instance.launch_time + allowed_time_to_wait = startup_delay + if elapsed_time_since_startup >= allowed_time_to_wait: + terminateable_instances.add(instance) + + # get all terminateable instances associated worker instances + worker_instances = await _get_all_associated_worker_instances( + app, terminateable_instances + ) + + return terminateable_instances.union(worker_instances) + + +async def check_clusters(app: FastAPI) -> None: + primary_instances = await get_all_clusters(app) + + connected_intances = { + instance + for instance in primary_instances + if await ping_scheduler(get_scheduler_url(instance), get_scheduler_auth(app)) + } + + # set intance heartbeat if scheduler is busy + for instance in connected_intances: + with log_catch(_logger, reraise=False): + # NOTE: some connected instance could in theory break between these 2 calls, therefore this is silenced and will + # be handled in the next call to check_clusters + if await is_scheduler_busy( + get_scheduler_url(instance), get_scheduler_auth(app) + ): + _logger.info( + "%s is running tasks", + f"{instance.id=} for {instance.tags=}", + ) + await set_instance_heartbeat(app, instance=instance) + # clean any cluster that is not doing anything + if terminateable_instances := await _find_terminateable_instances( + app, connected_intances + ): + await delete_clusters(app, instances=terminateable_instances) + + # analyse disconnected instances (currently starting or broken) + disconnected_instances = primary_instances - connected_intances + + # starting instances do not have a heartbeat set but sometimes might fail and should be terminated + starting_instances = { + instance + for instance in disconnected_instances + if _get_instance_last_heartbeat(instance) is None + } + # remove instances that were starting for too long + if terminateable_instances := await _find_terminateable_instances( + app, starting_instances + ): + _logger.warning( + "The following clusters'primary EC2 were starting for too long and will be terminated now " + "(either because a cluster was started and is not needed anymore, or there is an issue): '%s", + f"{[i.id for i in terminateable_instances]}", + ) + await delete_clusters(app, instances=terminateable_instances) + + # NOTE: transmit command to start docker swarm/stack if needed + # once the instance is connected to the SSM server, + # use ssm client to send the command to these instances, + # we send a command that contain: + # the docker-compose file in binary, + # the call to init the docker swarm and the call to deploy the stack + instances_in_need_of_deployment = { + i + for i in starting_instances - terminateable_instances + if DOCKER_STACK_DEPLOY_COMMAND_EC2_TAG_KEY not in i.tags + } + + if instances_in_need_of_deployment: + app_settings = get_application_settings(app) + ssm_client = get_ssm_client(app) + ec2_client = get_ec2_client(app) + instances_in_need_of_deployment_ssm_connection_state = await limited_gather( + *[ + ssm_client.is_instance_connected_to_ssm_server(i.id) + for i in instances_in_need_of_deployment + ], + reraise=False, + log=_logger, + limit=20, + ) + ec2_connected_to_ssm_server = [ + i + for i, c in zip( + instances_in_need_of_deployment, + instances_in_need_of_deployment_ssm_connection_state, + strict=True, + ) + if c is True + ] + started_instances_ready_for_command = ec2_connected_to_ssm_server + if started_instances_ready_for_command: + # we need to send 1 command per machine here, as the user_id/wallet_id changes + for i in started_instances_ready_for_command: + ssm_command = await ssm_client.send_command( + [i.id], + command=create_deploy_cluster_stack_script( + app_settings, + cluster_machines_name_prefix=get_cluster_name( + app_settings, + user_id=user_id_from_instance_tags(i.tags), + wallet_id=wallet_id_from_instance_tags(i.tags), + is_manager=False, + ), + additional_custom_tags={ + USER_ID_TAG_KEY: i.tags[USER_ID_TAG_KEY], + WALLET_ID_TAG_KEY: i.tags[WALLET_ID_TAG_KEY], + ROLE_TAG_KEY: WORKER_ROLE_TAG_VALUE, + }, + ), + command_name=DOCKER_STACK_DEPLOY_COMMAND_NAME, + ) + await ec2_client.set_instances_tags( + started_instances_ready_for_command, + tags={ + DOCKER_STACK_DEPLOY_COMMAND_EC2_TAG_KEY: AWSTagValue( + ssm_command.command_id + ), + }, + ) + + # the remaining instances are broken (they were at some point connected but now not anymore) + broken_instances = disconnected_instances - starting_instances + if terminateable_instances := await _find_terminateable_instances( + app, broken_instances + ): + _logger.error( + "The following clusters'primary EC2 were found as unresponsive " + "(TIP: there is something wrong here, please inform support) and will be terminated now: '%s", + f"{[i.id for i in terminateable_instances]}", + ) + await delete_clusters(app, instances=terminateable_instances) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_task.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_task.py new file mode 100644 index 00000000000..c540d7b160f --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/clusters_management_task.py @@ -0,0 +1,60 @@ +import json +import logging +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.redis import exclusive + +from .._meta import APP_NAME +from ..core.settings import ApplicationSettings +from ..modules.redis import get_redis_client +from .clusters_management_core import check_clusters + +_TASK_NAME = "Clusters-keeper EC2 instances management" + +logger = logging.getLogger(__name__) + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + app_settings: ApplicationSettings = app.state.settings + + lock_key = f"{APP_NAME}:clusters-management_lock" + lock_value = json.dumps({}) + app.state.clusters_cleaning_task = create_periodic_task( + exclusive(get_redis_client(app), lock_key=lock_key, lock_value=lock_value)( + check_clusters + ), + interval=app_settings.CLUSTERS_KEEPER_TASK_INTERVAL, + task_name=_TASK_NAME, + app=app, + ) + + return _startup + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + await cancel_wait_task(app.state.clusters_cleaning_task, max_delay=5) + + return _stop + + +def setup(app: FastAPI): + app_settings: ApplicationSettings = app.state.settings + if any( + s is None + for s in [ + app_settings.CLUSTERS_KEEPER_EC2_ACCESS, + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES, + app_settings.CLUSTERS_KEEPER_SSM_ACCESS, + ] + ): + logger.warning( + "the clusters management background task is disabled by settings, nothing will happen!" + ) + return + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py new file mode 100644 index 00000000000..0641e812777 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/dask.py @@ -0,0 +1,74 @@ +import logging +from collections.abc import Coroutine +from typing import Any, Final + +import distributed +from models_library.clusters import ClusterAuthentication, TLSAuthentication +from pydantic import AnyUrl + +_logger = logging.getLogger(__name__) + + +async def _wrap_client_async_routine( + client_coroutine: Coroutine[Any, Any, Any] | Any | None +) -> Any: + """Dask async behavior does not go well with Pylance as it returns + a union of types. this wrapper makes both mypy and pylance happy""" + assert client_coroutine # nosec + return await client_coroutine + + +_CONNECTION_TIMEOUT: Final[str] = "5" + + +async def ping_scheduler(url: AnyUrl, authentication: ClusterAuthentication) -> bool: + try: + security = distributed.Security() + if isinstance(authentication, TLSAuthentication): + security = distributed.Security( + tls_ca_file=f"{authentication.tls_ca_file}", + tls_client_cert=f"{authentication.tls_client_cert}", + tls_client_key=f"{authentication.tls_client_key}", + require_encryption=True, + ) + async with distributed.Client( + f"{url}", asynchronous=True, timeout=_CONNECTION_TIMEOUT, security=security + ): + ... + return True + except OSError: + _logger.info( + "osparc-dask-scheduler %s ping timed-out, the machine is likely still starting/hanged or broken...", + url, + ) + + return False + + +async def is_scheduler_busy(url: AnyUrl, authentication: ClusterAuthentication) -> bool: + security = distributed.Security() + if isinstance(authentication, TLSAuthentication): + security = distributed.Security( + tls_ca_file=f"{authentication.tls_ca_file}", + tls_client_cert=f"{authentication.tls_client_cert}", + tls_client_key=f"{authentication.tls_client_key}", + require_encryption=True, + ) + async with distributed.Client( + f"{url}", asynchronous=True, timeout=_CONNECTION_TIMEOUT, security=security + ) as client: + datasets_on_scheduler = await _wrap_client_async_routine(client.list_datasets()) + _logger.info("cluster currently has %s datasets", len(datasets_on_scheduler)) + num_processing_tasks = 0 + if worker_to_processing_tasks := await _wrap_client_async_routine( + client.processing() + ): + _logger.info( + "cluster current workers: %s", worker_to_processing_tasks.keys() + ) + num_processing_tasks = sum( + len(tasks) for tasks in worker_to_processing_tasks.values() + ) + _logger.info("cluster currently processes %s tasks", num_processing_tasks) + + return bool(datasets_on_scheduler or num_processing_tasks) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ec2.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ec2.py new file mode 100644 index 00000000000..c0ac5126e1e --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ec2.py @@ -0,0 +1,57 @@ +import logging +from typing import cast + +from aws_library.ec2 import SimcoreEC2API +from aws_library.ec2._errors import EC2NotConnectedError +from fastapi import FastAPI +from settings_library.ec2 import EC2Settings +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_random_exponential + +from ..core.errors import ConfigurationError +from ..core.settings import get_application_settings + +logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.ec2_client = None + + settings: EC2Settings | None = get_application_settings( + app + ).CLUSTERS_KEEPER_EC2_ACCESS + + if not settings: + logger.warning("EC2 client is de-activated in the settings") + return + + app.state.ec2_client = client = await SimcoreEC2API.create(settings) + + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(120), + wait=wait_random_exponential(max=30), + before_sleep=before_sleep_log(logger, logging.WARNING), + ): + with attempt: + connected = await client.ping() + if not connected: + raise EC2NotConnectedError # pragma: no cover + + async def on_shutdown() -> None: + if app.state.ec2_client: + await cast(SimcoreEC2API, app.state.ec2_client).close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_ec2_client(app: FastAPI) -> SimcoreEC2API: + if not app.state.ec2_client: + raise ConfigurationError( + msg="EC2 client is not available. Please check the configuration." + ) + return cast(SimcoreEC2API, app.state.ec2_client) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/rabbitmq.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/rabbitmq.py new file mode 100644 index 00000000000..c0ff928fe71 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/rabbitmq.py @@ -0,0 +1,73 @@ +import contextlib +import logging +from typing import cast + +from fastapi import FastAPI +from models_library.rabbitmq_messages import RabbitMessageBase +from servicelib.logging_utils import log_catch +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) +from settings_library.rabbit import RabbitSettings + +from ..core.errors import ConfigurationError +from ..core.settings import get_application_settings + +logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.rabbitmq_client = None + app.state.rabbitmq_rpc_server = None + settings: RabbitSettings | None = get_application_settings( + app + ).CLUSTERS_KEEPER_RABBITMQ + if not settings: + logger.warning("Rabbit MQ client is de-activated in the settings") + return + await wait_till_rabbitmq_responsive(settings.dsn) + # create the clients + app.state.rabbitmq_client = RabbitMQClient( + client_name="clusters_keeper", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="clusters_keeper_rpc_server", settings=settings + ) + + async def on_shutdown() -> None: + if app.state.rabbitmq_client: + await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + if not app.state.rabbitmq_client: + raise ConfigurationError( + msg="RabbitMQ client is not available. Please check the configuration." + ) + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def is_rabbitmq_enabled(app: FastAPI) -> bool: + return app.state.rabbitmq_client is not None + + +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + if not app.state.rabbitmq_rpc_server: + raise ConfigurationError( + msg="RabbitMQ client for RPC is not available. Please check the configuration." + ) + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + +async def post_message(app: FastAPI, message: RabbitMessageBase) -> None: + with log_catch(logger, reraise=False), contextlib.suppress(ConfigurationError): + # NOTE: if rabbitmq was not initialized the error does not need to flood the logs + await get_rabbitmq_client(app).publish(message.channel_name, message) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/redis.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/redis.py new file mode 100644 index 00000000000..8e2d5b71e33 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/redis.py @@ -0,0 +1,33 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase, RedisSettings + +from .._meta import APP_NAME +from ..core.settings import get_application_settings + +logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.redis_client_sdk = None + settings: RedisSettings = get_application_settings(app).CLUSTERS_KEEPER_REDIS + redis_locks_dsn = settings.build_redis_dsn(RedisDatabase.LOCKS) + app.state.redis_client_sdk = RedisClientSDK( + redis_locks_dsn, client_name=APP_NAME + ) + + async def on_shutdown() -> None: + redis_client_sdk: None | RedisClientSDK = app.state.redis_client_sdk + if redis_client_sdk: + await redis_client_sdk.shutdown() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_redis_client(app: FastAPI) -> RedisClientSDK: + return cast(RedisClientSDK, app.state.redis_client_sdk) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ssm.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ssm.py new file mode 100644 index 00000000000..218812d5523 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/modules/ssm.py @@ -0,0 +1,56 @@ +import logging +from typing import cast + +from aws_library.ssm import SimcoreSSMAPI +from aws_library.ssm._errors import SSMNotConnectedError +from fastapi import FastAPI +from settings_library.ssm import SSMSettings +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_random_exponential + +from ..core.errors import ConfigurationError +from ..core.settings import get_application_settings + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.ssm_client = None + settings: SSMSettings | None = get_application_settings( + app + ).CLUSTERS_KEEPER_SSM_ACCESS + + if not settings: + _logger.warning("SSM client is de-activated in the settings") + return + + app.state.ssm_client = client = await SimcoreSSMAPI.create(settings) + + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(120), + wait=wait_random_exponential(max=30), + before_sleep=before_sleep_log(_logger, logging.WARNING), + ): + with attempt: + connected = await client.ping() + if not connected: + raise SSMNotConnectedError # pragma: no cover + + async def on_shutdown() -> None: + if app.state.ssm_client: + await cast(SimcoreSSMAPI, app.state.ssm_client).close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_ssm_client(app: FastAPI) -> SimcoreSSMAPI: + if not app.state.ssm_client: + raise ConfigurationError( + msg="SSM client is not available. Please check the configuration." + ) + return cast(SimcoreSSMAPI, app.state.ssm_client) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/clusters.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/clusters.py new file mode 100644 index 00000000000..82f84b9d471 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/clusters.py @@ -0,0 +1,90 @@ +import datetime + +from aws_library.ec2 import EC2InstanceData +from aws_library.ec2._errors import EC2InstanceNotFoundError +from fastapi import FastAPI +from models_library.api_schemas_clusters_keeper.clusters import OnDemandCluster +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.rabbitmq import RPCRouter +from servicelib.redis import RedisClientSDK, exclusive + +from ..core.settings import get_application_settings +from ..modules import clusters +from ..modules.dask import ping_scheduler +from ..modules.redis import get_redis_client +from ..utils.clusters import create_cluster_from_ec2_instance +from ..utils.dask import get_scheduler_auth, get_scheduler_url + +router = RPCRouter() + + +def _get_app_from_args(*args, **kwargs) -> FastAPI: + assert kwargs is not None # nosec + if args: + app = args[0] + else: + assert "app" in kwargs # nosec + app = kwargs["app"] + assert isinstance(app, FastAPI) # nosec + return app + + +def _get_redis_client_from_app(*args, **kwargs) -> RedisClientSDK: + app = _get_app_from_args(*args, **kwargs) + return get_redis_client(app) + + +def _get_redis_lock_key(*_args, user_id: UserID, wallet_id: WalletID | None) -> str: + return f"get_or_create_cluster-{user_id=}-{wallet_id=}" + + +@router.expose() +@exclusive( + _get_redis_client_from_app, + lock_key=_get_redis_lock_key, + blocking=True, + blocking_timeout=datetime.timedelta(seconds=10), +) +async def get_or_create_cluster( + app: FastAPI, *, user_id: UserID, wallet_id: WalletID | None +) -> OnDemandCluster: + """Get or create cluster for user_id and wallet_id + This function will create a new instance on AWS if needed or return the already running one. + It will also check that the underlying computational backend is up and running. + Calling several time will always return the same cluster. + """ + ec2_instance: EC2InstanceData | None = None + dask_scheduler_ready = False + cluster_auth = get_scheduler_auth(app) + + try: + ec2_instance = await clusters.get_cluster( + app, user_id=user_id, wallet_id=wallet_id + ) + except EC2InstanceNotFoundError: + new_ec2_instances = await clusters.create_cluster( + app, user_id=user_id, wallet_id=wallet_id + ) + assert new_ec2_instances # nosec + assert len(new_ec2_instances) == 1 # nosec + ec2_instance = new_ec2_instances[0] + + dask_scheduler_ready = bool( + ec2_instance.state == "running" + and await ping_scheduler(get_scheduler_url(ec2_instance), cluster_auth) + ) + if dask_scheduler_ready: + await clusters.cluster_heartbeat(app, user_id=user_id, wallet_id=wallet_id) + + assert ec2_instance is not None # nosec + app_settings = get_application_settings(app) + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + return create_cluster_from_ec2_instance( + ec2_instance, + user_id, + wallet_id, + dask_scheduler_ready=dask_scheduler_ready, + cluster_auth=cluster_auth, + max_cluster_start_time=app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_MAX_START_TIME, + ) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/ec2_instances.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/ec2_instances.py new file mode 100644 index 00000000000..0b1e6a4c5a5 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/ec2_instances.py @@ -0,0 +1,23 @@ +from typing import Literal + +from aws_library.ec2 import EC2InstanceType +from fastapi import FastAPI +from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet +from servicelib.rabbitmq import RPCRouter + +from ..modules.ec2 import get_ec2_client + +router = RPCRouter() + + +@router.expose() +async def get_instance_type_details( + app: FastAPI, *, instance_type_names: set[str] | Literal["ALL"] +) -> list[EC2InstanceTypeGet]: + instance_capabilities: list[EC2InstanceType] = await get_ec2_client( + app + ).get_ec2_instance_capabilities(instance_type_names) + return [ + EC2InstanceTypeGet(name=t.name, cpus=t.resources.cpus, ram=t.resources.ram) + for t in instance_capabilities + ] diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/rpc_routes.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/rpc_routes.py new file mode 100644 index 00000000000..6bce8825d80 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/rpc/rpc_routes.py @@ -0,0 +1,32 @@ +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from models_library.api_schemas_clusters_keeper import CLUSTERS_KEEPER_RPC_NAMESPACE + +from ..modules.rabbitmq import get_rabbitmq_rpc_client, is_rabbitmq_enabled +from .clusters import router as clusters_router +from .ec2_instances import router as ec2_instances_router + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _start() -> None: + if is_rabbitmq_enabled(app): + rpc_client = get_rabbitmq_rpc_client(app) + for router in [clusters_router, ec2_instances_router]: + await rpc_client.register_router( + router, CLUSTERS_KEEPER_RPC_NAMESPACE, app + ) + + return _start + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + assert app # nosec + + return _stop + + +def setup_rpc_routes(app: FastAPI) -> None: + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/__init__.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py new file mode 100644 index 00000000000..d2820ef2b88 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/clusters.py @@ -0,0 +1,212 @@ +import base64 +import datetime +import functools +import json +from pathlib import Path +from typing import Any, Final + +import arrow +import yaml +from aws_library.ec2 import EC2InstanceBootSpecific, EC2InstanceData, EC2Tags +from aws_library.ec2._models import CommandStr +from common_library.serialization import model_dump_with_secrets +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_clusters_keeper.clusters import ( + ClusterState, + OnDemandCluster, +) +from models_library.clusters import ClusterAuthentication, TLSAuthentication +from models_library.users import UserID +from models_library.wallets import WalletID +from types_aiobotocore_ec2.literals import InstanceStateNameType + +from .._meta import PACKAGE_DATA_FOLDER +from ..core.settings import ApplicationSettings +from .dask import get_scheduler_url + +_DOCKER_COMPOSE_FILE_NAME: Final[str] = "docker-compose.yml" +_PROMETHEUS_FILE_NAME: Final[str] = "prometheus.yml" +_PROMETHEUS_WEB_FILE_NAME: Final[str] = "prometheus-web.yml" +_HOST_DOCKER_COMPOSE_PATH: Final[Path] = Path(f"/{_DOCKER_COMPOSE_FILE_NAME}") +_HOST_PROMETHEUS_PATH: Final[Path] = Path(f"/{_PROMETHEUS_FILE_NAME}") +_HOST_PROMETHEUS_WEB_PATH: Final[Path] = Path(f"/{_PROMETHEUS_WEB_FILE_NAME}") +_HOST_CERTIFICATES_BASE_PATH: Final[Path] = Path("/.dask-sidecar-certificates") +_HOST_TLS_CA_FILE_PATH: Final[Path] = _HOST_CERTIFICATES_BASE_PATH / "tls_dask_ca.pem" +_HOST_TLS_CERT_FILE_PATH: Final[Path] = ( + _HOST_CERTIFICATES_BASE_PATH / "tls_dask_cert.pem" +) +_HOST_TLS_KEY_FILE_PATH: Final[Path] = _HOST_CERTIFICATES_BASE_PATH / "tls_dask_key.pem" + + +def _base_64_encode(file: Path) -> str: + assert file.exists() # nosec + with file.open("rb") as f: + return base64.b64encode(f.read()).decode("utf-8") + + +@functools.lru_cache +def _docker_compose_yml_base64_encoded() -> str: + file_path = PACKAGE_DATA_FOLDER / _DOCKER_COMPOSE_FILE_NAME + return _base_64_encode(file_path) + + +@functools.lru_cache +def _prometheus_yml_base64_encoded() -> str: + file_path = PACKAGE_DATA_FOLDER / _PROMETHEUS_FILE_NAME + return _base_64_encode(file_path) + + +@functools.lru_cache +def _prometheus_basic_auth_yml_base64_encoded( + prometheus_username: str, prometheus_password: str +) -> str: + web_config = {"basic_auth_users": {prometheus_username: prometheus_password}} + yaml_content = yaml.safe_dump(web_config) + base64_bytes = base64.b64encode(yaml_content.encode("utf-8")) + return base64_bytes.decode("utf-8") + + +def _prepare_environment_variables( + app_settings: ApplicationSettings, + *, + cluster_machines_name_prefix: str, + additional_custom_tags: EC2Tags, +) -> list[str]: + assert app_settings.CLUSTERS_KEEPER_EC2_ACCESS # nosec + assert app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES # nosec + + def _convert_to_env_list(entries: list[Any]) -> str: + entries_as_str = ",".join(rf"\"{k}\"" for k in entries) + return f"[{entries_as_str}]" + + def _convert_to_env_dict(entries: dict[str, Any]) -> str: + return f"'{json.dumps(jsonable_encoder(entries))}'" + + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + + return [ + f"CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID={app_settings.CLUSTERS_KEEPER_EC2_ACCESS.EC2_ACCESS_KEY_ID}", + f"CLUSTERS_KEEPER_EC2_ENDPOINT={app_settings.CLUSTERS_KEEPER_EC2_ACCESS.EC2_ENDPOINT or 'null'}", + f"CLUSTERS_KEEPER_EC2_REGION_NAME={app_settings.CLUSTERS_KEEPER_EC2_ACCESS.EC2_REGION_NAME}", + f"CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY={app_settings.CLUSTERS_KEEPER_EC2_ACCESS.EC2_SECRET_ACCESS_KEY}", + f"DASK_NTHREADS={app_settings.CLUSTERS_KEEPER_DASK_NTHREADS or ''}", + f"DASK_TLS_CA_FILE={_HOST_TLS_CA_FILE_PATH}", + f"DASK_TLS_CERT={_HOST_TLS_CERT_FILE_PATH}", + f"DASK_TLS_KEY={_HOST_TLS_KEY_FILE_PATH}", + f"DASK_WORKER_SATURATION={app_settings.CLUSTERS_KEEPER_DASK_WORKER_SATURATION}", + f"DOCKER_IMAGE_TAG={app_settings.CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG}", + f"EC2_INSTANCES_NAME_PREFIX={cluster_machines_name_prefix}", + f"LOG_LEVEL={app_settings.LOG_LEVEL}", + f"WORKERS_EC2_INSTANCES_ALLOWED_TYPES={_convert_to_env_dict(app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_ALLOWED_TYPES)}", + f"WORKERS_EC2_INSTANCES_CUSTOM_TAGS={_convert_to_env_dict(app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_CUSTOM_TAGS | additional_custom_tags)}", + f"WORKERS_EC2_INSTANCES_KEY_NAME={app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_KEY_NAME}", + f"WORKERS_EC2_INSTANCES_MAX_INSTANCES={app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_MAX_INSTANCES}", + f"WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS={_convert_to_env_list(app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS)}", + f"WORKERS_EC2_INSTANCES_SUBNET_ID={app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_SUBNET_ID}", + f"WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING={app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING}", + f"WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION={app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION}", + f"AUTOSCALING_RABBITMQ={_convert_to_env_dict(model_dump_with_secrets(app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_RABBIT, show_secrets=True)) if app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_RABBIT else 'null'}", + ] + + +def create_startup_script( + app_settings: ApplicationSettings, + *, + ec2_boot_specific: EC2InstanceBootSpecific, +) -> str: + assert app_settings.CLUSTERS_KEEPER_EC2_ACCESS # nosec + assert app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES # nosec + + startup_commands = ec2_boot_specific.custom_boot_scripts.copy() + return "\n".join(startup_commands) + + +def create_deploy_cluster_stack_script( + app_settings: ApplicationSettings, + *, + cluster_machines_name_prefix: str, + additional_custom_tags: EC2Tags, +) -> str: + deploy_script: list[CommandStr] = [] + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + if isinstance( + app_settings.CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH, + TLSAuthentication, + ): + # get the dask certificates + download_certificates_commands = [ + f"mkdir --parents {_HOST_CERTIFICATES_BASE_PATH}", + f'aws ssm get-parameter --name "{app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA}" --region us-east-1 --with-decryption --query "Parameter.Value" --output text > {_HOST_TLS_CA_FILE_PATH}', + f'aws ssm get-parameter --name "{app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT}" --region us-east-1 --with-decryption --query "Parameter.Value" --output text > {_HOST_TLS_CERT_FILE_PATH}', + f'aws ssm get-parameter --name "{app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY}" --region us-east-1 --with-decryption --query "Parameter.Value" --output text > {_HOST_TLS_KEY_FILE_PATH}', + ] + deploy_script.extend(download_certificates_commands) + + environment_variables = _prepare_environment_variables( + app_settings, + cluster_machines_name_prefix=cluster_machines_name_prefix, + additional_custom_tags=additional_custom_tags, + ) + + deploy_script.extend( + [ + # NOTE: https://stackoverflow.com/questions/41203492/solving-redis-warnings-on-overcommit-memory-and-transparent-huge-pages-for-ubunt + "sysctl vm.overcommit_memory=1", + f"echo '{_docker_compose_yml_base64_encoded()}' | base64 -d > {_HOST_DOCKER_COMPOSE_PATH}", + f"echo '{_prometheus_yml_base64_encoded()}' | base64 -d > {_HOST_PROMETHEUS_PATH}", + f"echo '{_prometheus_basic_auth_yml_base64_encoded(app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_PROMETHEUS_USERNAME, app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_PROMETHEUS_PASSWORD.get_secret_value())}' | base64 -d > {_HOST_PROMETHEUS_WEB_PATH}", + # NOTE: --default-addr-pool is necessary in order to prevent conflicts with AWS node IPs + f"docker swarm init --default-addr-pool {app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_DOCKER_DEFAULT_ADDRESS_POOL}", + f"{' '.join(environment_variables)} docker stack deploy --with-registry-auth --compose-file={_HOST_DOCKER_COMPOSE_PATH} dask_stack", + ] + ) + return "\n".join(deploy_script) + + +def _convert_ec2_state_to_cluster_state( + ec2_state: InstanceStateNameType, +) -> ClusterState: + match ec2_state: + case "pending": + return ClusterState.STARTED + case "running": + return ClusterState.RUNNING + case _: + return ClusterState.STOPPED + + +def _create_eta( + instance_launch_time: datetime.datetime, + *, + dask_scheduler_ready: bool, + max_cluster_start_time: datetime.timedelta, +) -> datetime.timedelta: + now = arrow.utcnow().datetime + estimated_time_to_running = instance_launch_time + max_cluster_start_time - now + if dask_scheduler_ready is True: + estimated_time_to_running = datetime.timedelta(seconds=0) + return estimated_time_to_running + + +def create_cluster_from_ec2_instance( + instance: EC2InstanceData, + user_id: UserID, + wallet_id: WalletID | None, + *, + dask_scheduler_ready: bool, + cluster_auth: ClusterAuthentication, + max_cluster_start_time: datetime.timedelta, +) -> OnDemandCluster: + return OnDemandCluster( + endpoint=get_scheduler_url(instance), + authentication=cluster_auth, + state=_convert_ec2_state_to_cluster_state(instance.state), + user_id=user_id, + wallet_id=wallet_id, + dask_scheduler_ready=dask_scheduler_ready, + eta=_create_eta( + instance.launch_time, + dask_scheduler_ready=dask_scheduler_ready, + max_cluster_start_time=max_cluster_start_time, + ), + ) diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py new file mode 100644 index 00000000000..6d32010cdd9 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/dask.py @@ -0,0 +1,18 @@ +from aws_library.ec2 import EC2InstanceData +from fastapi import FastAPI +from models_library.clusters import ClusterAuthentication +from pydantic import AnyUrl, TypeAdapter + +from ..core.settings import get_application_settings + + +def get_scheduler_url(ec2_instance: EC2InstanceData) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"tls://{ec2_instance.aws_private_dns}:8786" + ) + + +def get_scheduler_auth(app: FastAPI) -> ClusterAuthentication: + return get_application_settings( + app + ).CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH diff --git a/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/ec2.py b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/ec2.py new file mode 100644 index 00000000000..1d4534ff025 --- /dev/null +++ b/services/clusters-keeper/src/simcore_service_clusters_keeper/utils/ec2.py @@ -0,0 +1,102 @@ +from textwrap import dedent +from typing import Final + +from aws_library.ec2 import AWSTagKey, AWSTagValue, EC2Tags +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter + +from .._meta import VERSION +from ..constants import ( + MANAGER_ROLE_TAG_VALUE, + ROLE_TAG_KEY, + USER_ID_TAG_KEY, + WALLET_ID_TAG_KEY, +) +from ..core.settings import ApplicationSettings + +_APPLICATION_TAG_KEY: Final[str] = "io.simcore.clusters-keeper" +_APPLICATION_VERSION_TAG: Final[EC2Tags] = TypeAdapter(EC2Tags).validate_python( + {f"{_APPLICATION_TAG_KEY}.version": f"{VERSION}"} +) + +HEARTBEAT_TAG_KEY: Final[AWSTagKey] = TypeAdapter(AWSTagKey).validate_python( + "last_heartbeat" +) +CLUSTER_NAME_PREFIX: Final[str] = "osparc-computational-cluster-" + + +def get_cluster_name( + app_settings: ApplicationSettings, + *, + user_id: UserID, + wallet_id: WalletID | None, + is_manager: bool, +) -> str: + return f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}{CLUSTER_NAME_PREFIX}{'manager' if is_manager else 'worker'}-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:{wallet_id}" + + +def _minimal_identification_tag(app_settings: ApplicationSettings) -> EC2Tags: + return { + AWSTagKey(".".join([_APPLICATION_TAG_KEY, "deploy",])): AWSTagValue( + f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}{app_settings.SWARM_STACK_NAME}" + ) + } + + +def creation_ec2_tags( + app_settings: ApplicationSettings, *, user_id: UserID, wallet_id: WalletID | None +) -> EC2Tags: + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES # nosec + return ( + _minimal_identification_tag(app_settings) + | _APPLICATION_VERSION_TAG + | { + # NOTE: this one gets special treatment in AWS GUI and is applied to the name of the instance + AWSTagKey("Name"): AWSTagValue( + get_cluster_name( + app_settings, user_id=user_id, wallet_id=wallet_id, is_manager=True + ) + ), + USER_ID_TAG_KEY: AWSTagValue(f"{user_id}"), + WALLET_ID_TAG_KEY: AWSTagValue(f"{wallet_id}"), + ROLE_TAG_KEY: MANAGER_ROLE_TAG_VALUE, + } + | app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_CUSTOM_TAGS + ) + + +def all_created_ec2_instances_filter(app_settings: ApplicationSettings) -> EC2Tags: + return _minimal_identification_tag(app_settings) + + +def ec2_instances_for_user_wallet_filter( + app_settings: ApplicationSettings, *, user_id: UserID, wallet_id: WalletID | None +) -> EC2Tags: + return ( + _minimal_identification_tag(app_settings) + | {USER_ID_TAG_KEY: AWSTagValue(f"{user_id}")} + | {WALLET_ID_TAG_KEY: AWSTagValue(f"{wallet_id}")} + ) + + +def compose_user_data(bash_command: str) -> str: + return dedent( + f"""\ +#!/bin/bash +echo "started user data bash script" +{bash_command} +echo "completed user data bash script" +""" + ) + + +def wallet_id_from_instance_tags(tags: EC2Tags) -> WalletID | None: + wallet_id_str = tags[WALLET_ID_TAG_KEY] + if wallet_id_str == "None": + return None + return WalletID(wallet_id_str) + + +def user_id_from_instance_tags(tags: EC2Tags) -> UserID: + return UserID(tags[USER_ID_TAG_KEY]) diff --git a/services/clusters-keeper/tests/integration/.gitkeep b/services/clusters-keeper/tests/integration/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/clusters-keeper/tests/manual/README.md b/services/clusters-keeper/tests/manual/README.md new file mode 100644 index 00000000000..4ef8e0bd72c --- /dev/null +++ b/services/clusters-keeper/tests/manual/README.md @@ -0,0 +1,97 @@ +# clusters-keeper manual testing + +This describes how a setup to manually test a local simcore deployment against AWS using the clusters-keeper and the external on-demand clusters. + +## architecture + +In order for local testing to work (e.g. locally deployed osparc-simcore with external clusters): +- the same AWS S3 bucket must be accessible from both simcore/cluster, +- the same Docker registry accessible from both simcore/cluster, +- the same AWS EC2 must be accessible from both simcore/cluster. + +```mermaid +flowchart TD + S3-bucket + EC2 + DockerRegistry + + S3-bucket --> Simcore + DockerRegistry --> Simcore + EC2 --> Simcore + + S3-bucket --> On-demand-Cluster + DockerRegistry --> On-demand-Cluster + EC2 --> On-demand-Cluster +``` + +## requirements +1. AWS S3 access +2. AWS EC2 access + + +## instructions + +1. build simcore +```bash +git clone https://github.com/ITISFoundation/osparc-simcore.git +cd osparc-simcore +make .env # generate initial .env file +make build-devel # build for development mode or +make build # for production mode +``` + +2. prepare docker registry (it **must** be accessible from both simcore/cluster), edit .env file and change the following ENVs: +```bash +REGISTRY_AUTH=True +REGISTRY_PW=XXXXXXX +REGISTRY_SSL=True +REGISTRY_URL=XXXXXXX +REGISTRY_USER=XXXXXXX +``` + +3. prepare S3 access in AWS, a dedicated accessible bucket is required: +```bash +S3_ACCESS_KEY=XXXXXXX +S3_BUCKET_NAME=XXXXXXX +S3_ENDPOINT=https://s3.amazonaws.com +S3_SECRET_KEY=XXXXXXX +``` + +4. prepare clusters-keeper: +```bash +CLUSTERS_KEEPER_EC2_ACCESS={} +CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID=XXXXXXX +CLUSTERS_KEEPER_EC2_ENDPOINT=https://ec2.amazonaws.com +CLUSTERS_KEEPER_EC2_REGION_NAME=us-east-1 +CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY=XXXXXXX + +CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES={} +PRIMARY_EC2_INSTANCES_ALLOWED_TYPES='{"t2.medium":"ami_id": "XXXXXXXX", "custom_boot_scripts": ["whoami"]}}' +PRIMARY_EC2_INSTANCES_KEY_NAME=XXXXXXX +PRIMARY_EC2_INSTANCES_MAX_INSTANCES=10 +PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS="[\"XXXXXXX\"]" +PRIMARY_EC2_INSTANCES_SUBNET_ID=XXXXXXX + +CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES={} +WORKERS_EC2_INSTANCES_ALLOWED_TYPES='{"g4dn.xlarge": {"ami_id": "XXXXXXXX", "custom_boot_scripts": ["whoami"], "pre_pull_images": ["ubuntu:latest"]}}' +WORKERS_EC2_INSTANCES_KEY_NAME=XXXXXXX +WORKERS_EC2_INSTANCES_MAX_INSTANCES=10 +WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS="[\"XXXXXXX\"]" +WORKERS_EC2_INSTANCES_SUBNET_ID=XXXXXXX +WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING="00:00:20" +WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION="00:03:00" +WORKERS_EC2_INSTANCES_CUSTOM_TAGS='{"osparc-tag": "some fun tag value"}' +``` + +4. prepare dask TLS certificates +NOTE: the dask TLS certificates are in AWS and shall be copied into the local stack such that the director-v2 can access the clusters +these are defined by PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA, PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT and PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY + 1. one need to go to the AWS Parameter Store (SSM) + 2. find these entries then copy their contents into respectively services/dask-sidecar/.dask-certificates/dask-cert.pem and services/dask-sidecar/.dask-certificates/dask-key.pem + + +5. start osparc +```bash +make up-devel # for devel mode +make up-prod # for prod mode +``` diff --git a/services/clusters-keeper/tests/unit/conftest.py b/services/clusters-keeper/tests/unit/conftest.py new file mode 100644 index 00000000000..a80776951de --- /dev/null +++ b/services/clusters-keeper/tests/unit/conftest.py @@ -0,0 +1,372 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import importlib.resources +import json +import random +from collections.abc import AsyncIterator, Awaitable, Callable, Iterator +from pathlib import Path +from typing import Any + +import aiodocker +import httpx +import pytest +import simcore_service_clusters_keeper +import simcore_service_clusters_keeper.data +import yaml +from asgi_lifespan import LifespanManager +from aws_library.ec2 import EC2InstanceBootSpecific +from faker import Faker +from fakeredis.aioredis import FakeRedis +from fastapi import FastAPI +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import SecretStr +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.rabbitmq import RabbitMQRPCClient +from settings_library.ec2 import EC2Settings +from settings_library.rabbit import RabbitSettings +from settings_library.ssm import SSMSettings +from simcore_service_clusters_keeper.core.application import create_app +from simcore_service_clusters_keeper.core.settings import ( + CLUSTERS_KEEPER_ENV_PREFIX, + ApplicationSettings, +) +from simcore_service_clusters_keeper.utils.ec2 import get_cluster_name +from types_aiobotocore_ec2.client import EC2Client +from types_aiobotocore_ec2.literals import InstanceTypeType + +pytest_plugins = [ + "pytest_simcore.aws_ec2_service", + "pytest_simcore.aws_server", + "pytest_simcore.docker", + "pytest_simcore.dask_scheduler", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_users_data", + "pytest_simcore.rabbit_service", + "pytest_simcore.repository_paths", + "pytest_simcore.simcore_service_library_fixtures", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "clusters-keeper" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_clusters_keeper")) + return service_folder + + +@pytest.fixture(scope="session") +def installed_package_dir() -> Path: + dirpath = Path(simcore_service_clusters_keeper.__file__).resolve().parent + assert dirpath.exists() + return dirpath + + +@pytest.fixture(scope="session") +def ec2_instances() -> list[InstanceTypeType]: + # these are some examples + return ["t2.nano", "m5.12xlarge"] + + +@pytest.fixture +def mocked_ec2_server_envs( + mocked_ec2_server_settings: EC2Settings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # NOTE: overrides the EC2Settings with what clusters-keeper expects + changed_envs: EnvVarsDict = { + f"{CLUSTERS_KEEPER_ENV_PREFIX}{k}": v + for k, v in mocked_ec2_server_settings.model_dump().items() + } + return setenvs_from_dict(monkeypatch, changed_envs) + + +@pytest.fixture +def mocked_ssm_server_envs( + mocked_ssm_server_settings: SSMSettings, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # NOTE: overrides the SSMSettings with what clusters-keeper expects + changed_envs: EnvVarsDict = { + f"{CLUSTERS_KEEPER_ENV_PREFIX}{k}": ( + v.get_secret_value() if isinstance(v, SecretStr) else v + ) + for k, v in mocked_ssm_server_settings.model_dump().items() + } + return setenvs_from_dict(monkeypatch, changed_envs) + + +@pytest.fixture +def ec2_settings(mocked_ec2_server_settings: EC2Settings) -> EC2Settings: + return mocked_ec2_server_settings + + +@pytest.fixture +def app_environment( + mock_env_devel_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, + ec2_instances: list[InstanceTypeType], +) -> EnvVarsDict: + # SEE https://faker.readthedocs.io/en/master/providers/faker.providers.internet.html?highlight=internet#faker-providers-internet + envs = setenvs_from_dict( + monkeypatch, + { + "CLUSTERS_KEEPER_TRACING": "null", + "CLUSTERS_KEEPER_EC2_ACCESS": "{}", + "CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID": faker.pystr(), + "CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY": faker.pystr(), + "CLUSTERS_KEEPER_SSM_ACCESS": "{}", + "CLUSTERS_KEEPER_SSM_ACCESS_KEY_ID": faker.pystr(), + "CLUSTERS_KEEPER_SSM_SECRET_ACCESS_KEY": faker.pystr(), + "CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES": "{}", + "CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX": faker.pystr(), + "CLUSTERS_KEEPER_DASK_NTHREADS": f"{faker.pyint(min_value=0)}", + "CLUSTERS_KEEPER_DASK_WORKER_SATURATION": f"{faker.pyfloat(min_value=0.1)}", + "CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH": "{}", + "PRIMARY_EC2_INSTANCES_KEY_NAME": faker.pystr(), + "PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS": json.dumps( + faker.pylist(allowed_types=(str,)) + ), + "PRIMARY_EC2_INSTANCES_SUBNET_ID": faker.pystr(), + "PRIMARY_EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + random.choice( # noqa: S311 + ec2_instances + ): EC2InstanceBootSpecific.model_config["json_schema_extra"][ + "examples" + ][ + 1 + ] # NOTE: we use example with custom script + } + ), + "PRIMARY_EC2_INSTANCES_CUSTOM_TAGS": json.dumps( + {"osparc-tag": "the pytest tag is here"} + ), + "PRIMARY_EC2_INSTANCES_ATTACHED_IAM_PROFILE": "", # must be empty since we would need to add it to moto as well + "PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA": faker.pystr(), + "PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT": faker.pystr(), + "PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY": faker.pystr(), + "PRIMARY_EC2_INSTANCES_PROMETHEUS_USERNAME": faker.user_name(), + "PRIMARY_EC2_INSTANCES_PROMETHEUS_PASSWORD": faker.password(), + "CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES": "{}", + "WORKERS_EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + ec2_type_name: random.choice( # noqa: S311 + EC2InstanceBootSpecific.model_config["json_schema_extra"][ + "examples" + ] + ) + for ec2_type_name in ec2_instances + } + ), + "WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS": json.dumps( + faker.pylist(allowed_types=(str,)) + ), + "WORKERS_EC2_INSTANCES_SUBNET_ID": faker.pystr(), + "WORKERS_EC2_INSTANCES_KEY_NAME": faker.pystr(), + "WORKERS_EC2_INSTANCES_CUSTOM_TAGS": json.dumps( + {"osparc-tag": "the pytest worker tag value is here"} + ), + }, + ) + return mock_env_devel_environment | envs + + +@pytest.fixture +def mocked_primary_ec2_instances_envs( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + aws_security_group_id: str, + aws_subnet_id: str, + aws_ami_id: str, +) -> EnvVarsDict: + envs = setenvs_from_dict( + monkeypatch, + { + "PRIMARY_EC2_INSTANCES_KEY_NAME": "osparc-pytest", + "PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS": json.dumps( + [aws_security_group_id] + ), + "PRIMARY_EC2_INSTANCES_SUBNET_ID": aws_subnet_id, + }, + ) + return app_environment | envs + + +@pytest.fixture +def disable_clusters_management_background_task( + mocker: MockerFixture, +) -> Iterator[None]: + start_background_task = mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_task.create_periodic_task", + autospec=True, + ) + + stop_background_task = mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_task.cancel_wait_task", + autospec=True, + ) + + yield + + start_background_task.assert_called_once() + stop_background_task.assert_called_once() + + +@pytest.fixture +def disabled_rabbitmq(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("CLUSTERS_KEEPER_RABBITMQ", "null") + monkeypatch.delenv("RABBIT_HOST", raising=False) + + +@pytest.fixture +def disabled_ec2(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("CLUSTERS_KEEPER_EC2_ACCESS", "null") + + +@pytest.fixture +def disabled_ssm(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("CLUSTERS_KEEPER_SSM_ACCESS", "null") + + +@pytest.fixture +def enabled_rabbitmq( + app_environment: EnvVarsDict, rabbit_service: RabbitSettings +) -> RabbitSettings: + return rabbit_service + + +@pytest.fixture +async def initialized_app( + app_environment: EnvVarsDict, is_pdb_enabled: bool +) -> AsyncIterator[FastAPI]: + settings = ApplicationSettings.create_from_envs() + app = create_app(settings) + async with LifespanManager(app, shutdown_timeout=None if is_pdb_enabled else 20): + yield app + + +@pytest.fixture +def app_settings(initialized_app: FastAPI) -> ApplicationSettings: + assert initialized_app.state.settings + return initialized_app.state.settings + + +@pytest.fixture +async def async_client(initialized_app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: + async with httpx.AsyncClient( + transport=httpx.ASGITransport(app=initialized_app), + base_url=f"http://{initialized_app.title}.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +async def mocked_redis_server(mocker: MockerFixture) -> None: + mock_redis = FakeRedis() + mocker.patch("redis.asyncio.from_url", return_value=mock_redis) + + +@pytest.fixture +async def async_docker_client() -> AsyncIterator[aiodocker.Docker]: + async with aiodocker.Docker() as docker_client: + yield docker_client + + +@pytest.fixture +def clusters_keeper_docker_compose_file(installed_package_dir: Path) -> Path: + docker_compose_path = installed_package_dir / "data" / "docker-compose.yml" + assert docker_compose_path.exists() + return docker_compose_path + + +@pytest.fixture +def clusters_keeper_docker_compose() -> dict[str, Any]: + data = ( + importlib.resources.files(simcore_service_clusters_keeper.data) + .joinpath("docker-compose.yml") + .read_text() + ) + assert data + return yaml.safe_load(data) + + +@pytest.fixture +async def clusters_keeper_rabbitmq_rpc_client( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + rpc_client = await rabbitmq_rpc_client("pytest_clusters_keeper_rpc_client") + assert rpc_client + return rpc_client + + +@pytest.fixture +def create_ec2_workers( + aws_ami_id: str, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + app_settings: ApplicationSettings, +) -> Callable[[int], Awaitable[list[str]]]: + async def _do(num: int) -> list[str]: + instance_type: InstanceTypeType = "c3.8xlarge" + assert app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES + instances = await ec2_client.run_instances( + ImageId=aws_ami_id, + MinCount=num, + MaxCount=num, + InstanceType=instance_type, + KeyName=app_settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES.WORKERS_EC2_INSTANCES_KEY_NAME, + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + { + "Key": "Name", + "Value": f"{get_cluster_name(app_settings, user_id=user_id, wallet_id=wallet_id, is_manager=False)}_blahblah", + } + ], + } + ], + ) + print(f"--> created {num} new instances of {instance_type=}") + instance_ids = [ + i["InstanceId"] for i in instances["Instances"] if "InstanceId" in i + ] + waiter = ec2_client.get_waiter("instance_exists") + await waiter.wait(InstanceIds=instance_ids) + instances = await ec2_client.describe_instances(InstanceIds=instance_ids) + assert "Reservations" in instances + assert instances["Reservations"] + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == num + for instance in instances["Reservations"][0]["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == "running" + assert "Tags" in instance + for tags in instance["Tags"]: + assert "Key" in tags + if "Name" in tags["Key"]: + assert "Value" in tags + assert ( + get_cluster_name( + app_settings, + user_id=user_id, + wallet_id=wallet_id, + is_manager=False, + ) + in tags["Value"] + ) + return instance_ids + + return _do diff --git a/services/clusters-keeper/tests/unit/test__meta.py b/services/clusters-keeper/tests/unit/test__meta.py new file mode 100644 index 00000000000..cc3791539e4 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test__meta.py @@ -0,0 +1,6 @@ +from simcore_service_clusters_keeper._meta import PACKAGE_DATA_FOLDER + + +def test_access_to_docker_compose_yml_file(): + assert f"{PACKAGE_DATA_FOLDER}".endswith("data") + assert (PACKAGE_DATA_FOLDER / "docker-compose.yml").exists() diff --git a/services/clusters-keeper/tests/unit/test_api_health.py b/services/clusters-keeper/tests/unit/test_api_health.py new file mode 100644 index 00000000000..e1a5de4c6ce --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_api_health.py @@ -0,0 +1,92 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import httpx +import pytest +from moto.server import ThreadedMotoServer +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_clusters_keeper.api.health import _StatusGet +from starlette import status + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + enabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, +) -> EnvVarsDict: + return app_environment + + +async def test_healthcheck(async_client: httpx.AsyncClient): + response = await async_client.get("/") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + assert "simcore_service_clusters_keeper" in response.text + + +async def test_status_no_rabbit( + disabled_rabbitmq: None, + async_client: httpx.AsyncClient, +): + response = await async_client.get("/status") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + status_response = _StatusGet.model_validate(response.json()) + assert status_response + + assert status_response.rabbitmq.is_enabled is False + assert status_response.rabbitmq.is_responsive is False + + assert status_response.ec2.is_enabled is True + assert status_response.ec2.is_responsive is True + + +async def test_status( + mocked_aws_server: ThreadedMotoServer, + async_client: httpx.AsyncClient, +): + # stop the aws server... + mocked_aws_server.stop() + + response = await async_client.get("/status") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + status_response = _StatusGet.model_validate(response.json()) + assert status_response + + assert status_response.rabbitmq.is_enabled is True + assert status_response.rabbitmq.is_responsive is True + + assert status_response.ec2.is_enabled is True + assert status_response.ec2.is_responsive is False + + assert status_response.ssm.is_enabled is True + assert status_response.ssm.is_responsive is False + + # restart the server + mocked_aws_server.start() + + response = await async_client.get("/status") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + status_response = _StatusGet.model_validate(response.json()) + assert status_response + + assert status_response.rabbitmq.is_enabled is True + assert status_response.rabbitmq.is_responsive is True + + assert status_response.ec2.is_enabled is True + assert status_response.ec2.is_responsive is True + + assert status_response.ssm.is_enabled is True + assert status_response.ssm.is_responsive is True diff --git a/services/clusters-keeper/tests/unit/test_cli.py b/services/clusters-keeper/tests/unit/test_cli.py new file mode 100644 index 00000000000..aae19378ebe --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_cli.py @@ -0,0 +1,21 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from simcore_service_clusters_keeper.cli import main +from typer.testing import CliRunner + +runner = CliRunner() + + +def test_settings(app_environment): + result = runner.invoke(main, ["settings"]) + assert result.exit_code == 0 + assert "APP_NAME=simcore-service-clusters-keeper" in result.stdout + + +def test_run(): + result = runner.invoke(main, ["run"]) + assert result.exit_code == 0 + assert "disabled" in result.stdout diff --git a/services/clusters-keeper/tests/unit/test_core_settings.py b/services/clusters-keeper/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..021d7f4f107 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_core_settings.py @@ -0,0 +1,112 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import json +import random + +import pytest +from aws_library.ec2 import EC2InstanceBootSpecific +from pydantic import ValidationError +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from simcore_service_clusters_keeper.core.settings import ApplicationSettings +from types_aiobotocore_ec2.literals import InstanceTypeType + + +def test_settings(app_environment: EnvVarsDict): + settings = ApplicationSettings.create_from_envs() + assert settings.CLUSTERS_KEEPER_EC2_ACCESS + assert settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES + assert settings.CLUSTERS_KEEPER_RABBITMQ + assert settings.CLUSTERS_KEEPER_REDIS + assert settings.CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES + + +@pytest.mark.xfail( + reason="disabling till pydantic2 migration is complete see https://github.com/ITISFoundation/osparc-simcore/pull/6705" +) +def test_empty_primary_ec2_instances_raises( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +): + setenvs_from_dict( + monkeypatch, {"PRIMARY_EC2_INSTANCES_ALLOWED_TYPES": json.dumps({})} + ) + with pytest.raises(ValidationError, match="Only one exact value"): + ApplicationSettings.create_from_envs() + + +@pytest.mark.xfail( + reason="disabling till pydantic2 migration is complete see https://github.com/ITISFoundation/osparc-simcore/pull/6705" +) +def test_multiple_primary_ec2_instances_raises( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + ec2_instances: list[InstanceTypeType], +): + setenvs_from_dict( + monkeypatch, + { + "PRIMARY_EC2_INSTANCES_ALLOWED_TYPES": json.dumps( + { + ec2_type_name: random.choice( # noqa: S311 + EC2InstanceBootSpecific.model_config["json_schema_extra"][ + "examples" + ] + ) + for ec2_type_name in ec2_instances + } + ) + }, + ) + with pytest.raises(ValidationError, match="Only one exact value"): + ApplicationSettings.create_from_envs() + + +@pytest.mark.xfail( + reason="disabling till pydantic2 migration is complete see https://github.com/ITISFoundation/osparc-simcore/pull/6705" +) +@pytest.mark.parametrize( + "invalid_tag", + [ + {".": "single dot is invalid"}, + {"..": "single 2 dots is invalid"}, + {"": "empty tag key"}, + {"/": "slash is invalid"}, + {" ": "space is invalid"}, + ], + ids=str, +) +def test_invalid_primary_custom_tags_raises( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + invalid_tag: dict[str, str], +): + setenvs_from_dict( + monkeypatch, + {"PRIMARY_EC2_INSTANCES_CUSTOM_TAGS": json.dumps(invalid_tag)}, + ) + with pytest.raises(ValidationError): + ApplicationSettings.create_from_envs() + + +@pytest.mark.parametrize( + "valid_tag", + [ + {"...": "3 dots is valid"}, + {"..fdkjdlk..dsflkjsd=-lkjfie@": ""}, + {"abcdef-lsaj+-=._:@": "values are able to take almost anything"}, + ], + ids=str, +) +def test_valid_primary_custom_tags( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + valid_tag: dict[str, str], +): + setenvs_from_dict( + monkeypatch, + {"PRIMARY_EC2_INSTANCES_CUSTOM_TAGS": json.dumps(valid_tag)}, + ) + ApplicationSettings.create_from_envs() diff --git a/services/clusters-keeper/tests/unit/test_data_docker_compose.py b/services/clusters-keeper/tests/unit/test_data_docker_compose.py new file mode 100644 index 00000000000..a18edd0c793 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_data_docker_compose.py @@ -0,0 +1,45 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from typing import Any + + +def _get_service_from_compose( + compose: dict[str, Any], service_name: str +) -> dict[str, Any]: + services = compose.get("services") + assert services + assert isinstance(services, dict) + assert len(services) > 0 + assert service_name in services, f"{service_name} is missing from {services}" + return services[service_name] + + +def test_redis_version_same_as_main_docker_compose( + simcore_docker_compose: dict[str, Any], + clusters_keeper_docker_compose: dict[str, Any], +): + simcore_redis = _get_service_from_compose(simcore_docker_compose, "redis") + clusters_keeper_redis = _get_service_from_compose( + clusters_keeper_docker_compose, "redis" + ) + + assert simcore_redis["image"] == clusters_keeper_redis["image"] + + +def test_all_services_run_on_manager_but_dask_sidecar( + clusters_keeper_docker_compose: dict[str, Any] +): + for service_name, service_config in clusters_keeper_docker_compose[ + "services" + ].items(): + assert "deploy" in service_config + assert "placement" in service_config["deploy"] + assert "constraints" in service_config["deploy"]["placement"] + assert service_config["deploy"]["placement"]["constraints"] == [ + "node.role==worker" + if service_name == "dask-sidecar" + else "node.role==manager" + ] diff --git a/services/clusters-keeper/tests/unit/test_main.py b/services/clusters-keeper/tests/unit/test_main.py new file mode 100644 index 00000000000..96d7fb8507d --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_main.py @@ -0,0 +1,12 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict + + +def test_main_app(app_environment: EnvVarsDict): + from simcore_service_clusters_keeper.main import the_app, the_settings + + assert the_app.state.settings == the_settings diff --git a/services/clusters-keeper/tests/unit/test_modules_clusters.py b/services/clusters-keeper/tests/unit/test_modules_clusters.py new file mode 100644 index 00000000000..497b9e447e7 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_clusters.py @@ -0,0 +1,287 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import datetime +from collections.abc import Awaitable, Callable + +import arrow +import pytest +from aws_library.ec2 import EC2InstanceData +from aws_library.ec2._errors import EC2InstanceNotFoundError +from faker import Faker +from fastapi import FastAPI +from models_library.users import UserID +from models_library.wallets import WalletID +from parse import Result, search +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_clusters_keeper._meta import VERSION as APP_VERSION +from simcore_service_clusters_keeper.core.settings import ( + ApplicationSettings, + get_application_settings, +) +from simcore_service_clusters_keeper.modules.clusters import ( + cluster_heartbeat, + create_cluster, + delete_clusters, + get_cluster, + get_cluster_workers, +) +from simcore_service_clusters_keeper.utils.ec2 import ( + _APPLICATION_TAG_KEY, + CLUSTER_NAME_PREFIX, + HEARTBEAT_TAG_KEY, +) +from types_aiobotocore_ec2 import EC2Client + + +@pytest.fixture +def wallet_id(faker: Faker) -> WalletID: + return faker.pyint(min_value=1) + + +@pytest.fixture +def _base_configuration( + docker_swarm: None, + disabled_rabbitmq: None, + mocked_redis_server: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_primary_ec2_instances_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, +) -> None: + ... + + +async def _assert_cluster_instance_created( + app_settings: ApplicationSettings, + ec2_client: EC2Client, + instance_id: str, + user_id: UserID, + wallet_id: WalletID, +) -> None: + instances = await ec2_client.describe_instances(InstanceIds=[instance_id]) + assert len(instances["Reservations"]) == 1 + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == 1 + assert "Tags" in instances["Reservations"][0]["Instances"][0] + instance_ec2_tags = instances["Reservations"][0]["Instances"][0]["Tags"] + assert len(instance_ec2_tags) == 7 + assert all("Key" in x for x in instance_ec2_tags) + assert all("Value" in x for x in instance_ec2_tags) + + _EXPECTED_TAGS: dict[str, str] = { + f"{_APPLICATION_TAG_KEY}.deploy": f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}{app_settings.SWARM_STACK_NAME}", + f"{_APPLICATION_TAG_KEY}.version": f"{APP_VERSION}", + "Name": f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}{CLUSTER_NAME_PREFIX}manager-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:{wallet_id}", + "user_id": f"{user_id}", + "wallet_id": f"{wallet_id}", + "role": "manager", + "osparc-tag": "the pytest tag is here", + } + for tag in instances["Reservations"][0]["Instances"][0]["Tags"]: + assert "Key" in tag + assert "Value" in tag + assert tag["Key"] in _EXPECTED_TAGS + assert tag["Value"] == _EXPECTED_TAGS[tag["Key"]] + + assert "Key" in instances["Reservations"][0]["Instances"][0]["Tags"][2] + assert instances["Reservations"][0]["Instances"][0]["Tags"][2]["Key"] == "Name" + assert "Value" in instances["Reservations"][0]["Instances"][0]["Tags"][2] + instance_name = instances["Reservations"][0]["Instances"][0]["Tags"][2]["Value"] + + parse_result = search("user_id:{user_id:d}-wallet_id:{wallet_id:d}", instance_name) + assert isinstance(parse_result, Result) + assert parse_result["user_id"] == user_id + assert parse_result["wallet_id"] == wallet_id + + +async def _create_cluster( + app: FastAPI, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, +) -> list[EC2InstanceData]: + created_clusters = await create_cluster(app, user_id=user_id, wallet_id=wallet_id) + assert len(created_clusters) == 1 + # check we do have a new machine in AWS + + await _assert_cluster_instance_created( + get_application_settings(app), + ec2_client, + created_clusters[0].id, + user_id, + wallet_id, + ) + return created_clusters + + +async def test_create_cluster( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + await _create_cluster(initialized_app, ec2_client, user_id, wallet_id) + + +async def test_get_cluster( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + # create multiple clusters for different users + user_ids = [user_id, user_id + 13, user_id + 456] + list_created_clusters = await asyncio.gather( + *( + _create_cluster(initialized_app, ec2_client, user_id=u, wallet_id=wallet_id) + for u in user_ids + ) + ) + for u, created_clusters in zip(user_ids, list_created_clusters, strict=True): + returned_cluster = await get_cluster( + initialized_app, user_id=u, wallet_id=wallet_id + ) + assert created_clusters[0] == returned_cluster + + +async def test_get_cluster_raises_if_not_found( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + with pytest.raises(EC2InstanceNotFoundError): + await get_cluster(initialized_app, user_id=user_id, wallet_id=wallet_id) + + +async def test_get_cluster_workers_returns_empty_if_no_workers( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + assert ( + await get_cluster_workers(initialized_app, user_id=user_id, wallet_id=wallet_id) + == [] + ) + + +async def test_get_cluster_workers_does_not_return_cluster_primary_machine( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + await _create_cluster(initialized_app, ec2_client, user_id, wallet_id) + assert ( + await get_cluster_workers(initialized_app, user_id=user_id, wallet_id=wallet_id) + == [] + ) + + +async def test_get_cluster_workers( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, + create_ec2_workers: Callable[[int], Awaitable[list[str]]], +): + created_instance_ids = await create_ec2_workers(10) + returned_ec2_instances = await get_cluster_workers( + initialized_app, user_id=user_id, wallet_id=wallet_id + ) + assert len(created_instance_ids) == len(returned_ec2_instances) + + +async def _assert_cluster_heartbeat_on_instance( + ec2_client: EC2Client, +) -> datetime.datetime: + instances = await ec2_client.describe_instances() + assert len(instances["Reservations"]) == 1 + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == 1 + assert "Tags" in instances["Reservations"][0]["Instances"][0] + instance_tags = instances["Reservations"][0]["Instances"][0]["Tags"] + assert len(instance_tags) == 8 + assert all("Key" in x for x in instance_tags) + list_of_heartbeats = list( + filter(lambda x: x["Key"] == HEARTBEAT_TAG_KEY, instance_tags) # type:ignore + ) + assert len(list_of_heartbeats) == 1 + assert "Value" in list_of_heartbeats[0] + this_heartbeat_time = arrow.get(list_of_heartbeats[0]["Value"]).datetime + assert this_heartbeat_time + return this_heartbeat_time + + +async def test_cluster_heartbeat( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + await _create_cluster(initialized_app, ec2_client, user_id, wallet_id) + + await cluster_heartbeat(initialized_app, user_id=user_id, wallet_id=wallet_id) + first_heartbeat_time = await _assert_cluster_heartbeat_on_instance(ec2_client) + + await asyncio.sleep(1) + + await cluster_heartbeat(initialized_app, user_id=user_id, wallet_id=wallet_id) + next_heartbeat_time = await _assert_cluster_heartbeat_on_instance(ec2_client) + + assert next_heartbeat_time > first_heartbeat_time + + +async def test_cluster_heartbeat_on_non_existing_cluster_raises( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + with pytest.raises(EC2InstanceNotFoundError): + await cluster_heartbeat(initialized_app, user_id=user_id, wallet_id=wallet_id) + + +async def _assert_all_clusters_terminated( + ec2_client: EC2Client, +) -> None: + described_instances = await ec2_client.describe_instances() + if "Reservations" not in described_instances: + print("no reservations on AWS. ok.") + return + + for reservation in described_instances["Reservations"]: + if "Instances" not in reservation: + print("no instance in reservation on AWS, weird but ok.") + continue + + for instance in reservation["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == "terminated" + + +async def test_delete_cluster( + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + initialized_app: FastAPI, +): + created_instances = await _create_cluster( + initialized_app, ec2_client, user_id, wallet_id + ) + await delete_clusters(initialized_app, instances=created_instances) + await _assert_all_clusters_terminated(ec2_client) diff --git a/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py b/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py new file mode 100644 index 00000000000..d06a6aaeedd --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_clusters_management_core.py @@ -0,0 +1,328 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +import dataclasses +import datetime +from collections.abc import Awaitable, Callable +from typing import Final +from unittest.mock import MagicMock + +import arrow +import pytest +from attr import dataclass +from aws_library.ec2 import EC2InstanceData +from faker import Faker +from fastapi import FastAPI +from models_library.users import UserID +from models_library.wallets import WalletID +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_clusters_keeper.core.settings import ApplicationSettings +from simcore_service_clusters_keeper.modules.clusters import ( + cluster_heartbeat, + create_cluster, +) +from simcore_service_clusters_keeper.modules.clusters_management_core import ( + check_clusters, +) +from types_aiobotocore_ec2 import EC2Client +from types_aiobotocore_ec2.literals import InstanceStateNameType + + +@pytest.fixture(params=("with_wallet", "without_wallet")) +def wallet_id(faker: Faker, request: pytest.FixtureRequest) -> WalletID | None: + return faker.pyint(min_value=1) if request.param == "with_wallet" else None + + +_FAST_TIME_BEFORE_TERMINATION_SECONDS: Final[datetime.timedelta] = datetime.timedelta( + seconds=10 +) + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # fast interval + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION": "1", + "SERVICE_TRACKING_HEARTBEAT": f"{_FAST_TIME_BEFORE_TERMINATION_SECONDS}", + }, + ) + + +@pytest.fixture +def _base_configuration( + disabled_rabbitmq: None, + mocked_redis_server: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_primary_ec2_instances_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, +) -> None: + ... + + +async def _assert_cluster_exist_and_state( + ec2_client: EC2Client, + *, + instances: list[EC2InstanceData], + state: InstanceStateNameType, +) -> None: + described_instances = await ec2_client.describe_instances( + InstanceIds=[i.id for i in instances] + ) + assert described_instances + assert "Reservations" in described_instances + + for reservation in described_instances["Reservations"]: + assert "Instances" in reservation + + for instance in reservation["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == state + + +async def _assert_instances_state( + ec2_client: EC2Client, *, instance_ids: list[str], state: InstanceStateNameType +) -> None: + described_instances = await ec2_client.describe_instances(InstanceIds=instance_ids) + assert described_instances + assert "Reservations" in described_instances + + for reservation in described_instances["Reservations"]: + assert "Instances" in reservation + + for instance in reservation["Instances"]: + assert "State" in instance + assert "Name" in instance["State"] + assert instance["State"]["Name"] == state + + +@dataclass +class MockedDaskModule: + ping_scheduler: MagicMock + is_scheduler_busy: MagicMock + + +@pytest.fixture +def mocked_dask_ping_scheduler(mocker: MockerFixture) -> MockedDaskModule: + return MockedDaskModule( + ping_scheduler=mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_core.ping_scheduler", + autospec=True, + return_value=True, + ), + is_scheduler_busy=mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_core.is_scheduler_busy", + autospec=True, + return_value=True, + ), + ) + + +async def test_cluster_management_core_properly_removes_unused_instances( + disable_clusters_management_background_task: None, + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID | None, + initialized_app: FastAPI, + mocked_dask_ping_scheduler: MockedDaskModule, +): + created_clusters = await create_cluster( + initialized_app, user_id=user_id, wallet_id=wallet_id + ) + assert len(created_clusters) == 1 + + # running the cluster management task shall not remove anything + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() + + # running the cluster management task after the heartbeat came in shall not remove anything + await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1) + await cluster_heartbeat(initialized_app, user_id=user_id, wallet_id=wallet_id) + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() + + # after waiting the termination time, running the task shall remove the cluster + await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1) + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="terminated" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + + +async def test_cluster_management_core_properly_removes_workers_on_shutdown( + disable_clusters_management_background_task: None, + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID | None, + initialized_app: FastAPI, + mocked_dask_ping_scheduler: MockedDaskModule, + create_ec2_workers: Callable[[int], Awaitable[list[str]]], +): + created_clusters = await create_cluster( + initialized_app, user_id=user_id, wallet_id=wallet_id + ) + assert len(created_clusters) == 1 + + # running the cluster management task shall not remove anything + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() + + # create some workers + worker_instance_ids = await create_ec2_workers(10) + await _assert_instances_state( + ec2_client, instance_ids=worker_instance_ids, state="running" + ) + # after waiting the termination time, running the task shall remove the cluster + await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1) + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="terminated" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + # check workers were also terminated + await _assert_instances_state( + ec2_client, instance_ids=worker_instance_ids, state="terminated" + ) + + +async def test_cluster_management_core_removes_long_starting_clusters_after_some_delay( + disable_clusters_management_background_task: None, + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID | None, + initialized_app: FastAPI, + mocked_dask_ping_scheduler: MockedDaskModule, + app_settings: ApplicationSettings, + mocker: MockerFixture, +): + created_clusters = await create_cluster( + initialized_app, user_id=user_id, wallet_id=wallet_id + ) + assert len(created_clusters) == 1 + + # simulate unresponsive dask-scheduler + mocked_dask_ping_scheduler.ping_scheduler.return_value = False + + # running the cluster management task shall not remove anything + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_not_called() + + the_cluster = created_clusters[0] + + # running now the cluster management task shall remove the cluster + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES + mocked_get_all_clusters = mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_core.get_all_clusters", + autospec=True, + return_value={ + dataclasses.replace( + the_cluster, + launch_time=arrow.utcnow() + .shift( + seconds=-app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_MAX_START_TIME.total_seconds() + ) + .datetime, + ) + }, + ) + await check_clusters(initialized_app) + mocked_get_all_clusters.assert_called_once() + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="terminated" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_not_called() + + +async def test_cluster_management_core_removes_broken_clusters_after_some_delay( + disable_clusters_management_background_task: None, + _base_configuration: None, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID | None, + initialized_app: FastAPI, + mocked_dask_ping_scheduler: MockedDaskModule, + create_ec2_workers: Callable[[int], Awaitable[list[str]]], + app_settings: ApplicationSettings, + mocker: MockerFixture, +): + created_clusters = await create_cluster( + initialized_app, user_id=user_id, wallet_id=wallet_id + ) + assert len(created_clusters) == 1 + + # simulate a responsive dask-scheduler + mocked_dask_ping_scheduler.ping_scheduler.return_value = True + + # running the cluster management task shall not remove anything + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_called_once() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() + + # simulate now a non responsive dask-scheduler, which means it is broken + mocked_dask_ping_scheduler.ping_scheduler.return_value = False + + # running now the cluster management will not instantly remove the cluster, so now nothing will happen + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="running" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_not_called() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() + + # waiting for the termination time will now terminate the cluster + await asyncio.sleep(_FAST_TIME_BEFORE_TERMINATION_SECONDS.total_seconds() + 1) + await check_clusters(initialized_app) + await _assert_cluster_exist_and_state( + ec2_client, instances=created_clusters, state="terminated" + ) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + mocked_dask_ping_scheduler.is_scheduler_busy.assert_not_called() + mocked_dask_ping_scheduler.is_scheduler_busy.reset_mock() diff --git a/services/clusters-keeper/tests/unit/test_modules_clusters_management_task.py b/services/clusters-keeper/tests/unit/test_modules_clusters_management_task.py new file mode 100644 index 00000000000..66bbf12c42e --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_clusters_management_task.py @@ -0,0 +1,47 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +from unittest import mock + +import pytest +from fastapi import FastAPI +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from simcore_service_clusters_keeper.core.settings import ApplicationSettings + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, {"CLUSTERS_KEEPER_TASK_INTERVAL": "00:00:01"} + ) + + +@pytest.fixture +def mock_background_task(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_clusters_keeper.modules.clusters_management_task.check_clusters", + autospec=True, + ) + + +async def test_clusters_management_task_created_and_deleted( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + mock_background_task: mock.Mock, + initialized_app: FastAPI, + app_settings: ApplicationSettings, +): + assert app_settings.CLUSTERS_KEEPER_TASK_INTERVAL.total_seconds() == 1 + assert hasattr(initialized_app.state, "clusters_cleaning_task") + await asyncio.sleep(5) + mock_background_task.assert_called() diff --git a/services/clusters-keeper/tests/unit/test_modules_dask.py b/services/clusters-keeper/tests/unit/test_modules_dask.py new file mode 100644 index 00000000000..bc8d8739f54 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_dask.py @@ -0,0 +1,96 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +import time + +import distributed +import pytest +from distributed import SpecCluster +from faker import Faker +from models_library.clusters import ( + ClusterAuthentication, + NoAuthentication, + TLSAuthentication, +) +from pydantic import AnyUrl, TypeAdapter +from simcore_service_clusters_keeper.modules.dask import ( + is_scheduler_busy, + ping_scheduler, +) +from tenacity import retry +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +_authentication_types = [ + NoAuthentication(), + TLSAuthentication.model_construct( + **TLSAuthentication.model_json_schema()["examples"][0] + ), +] + + +@pytest.mark.parametrize( + "authentication", _authentication_types, ids=lambda p: f"authentication-{p.type}" +) +async def test_ping_scheduler_non_existing_scheduler( + faker: Faker, authentication: ClusterAuthentication +): + assert ( + await ping_scheduler( + TypeAdapter(AnyUrl).validate_python( + f"tcp://{faker.ipv4()}:{faker.port_number()}" + ), + authentication, + ) + is False + ) + + +async def test_ping_scheduler(dask_spec_local_cluster: SpecCluster): + assert ( + await ping_scheduler( + TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ), + NoAuthentication(), + ) + is True + ) + + +@retry( + wait=wait_fixed(1), + stop=stop_after_delay(30), + retry=retry_if_exception_type(AssertionError), +) +async def _assert_scheduler_is_busy(url: AnyUrl, *, busy: bool) -> None: + print(f"--> waiting for osparc-dask-scheduler to become {busy=}") + assert await is_scheduler_busy(url, NoAuthentication()) is busy + print(f"scheduler is now {busy=}") + + +async def test_is_scheduler_busy( + dask_spec_local_cluster: distributed.SpecCluster, + dask_spec_cluster_client: distributed.Client, +): + # nothing runs right now + scheduler_address = TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ) + assert await is_scheduler_busy(scheduler_address, NoAuthentication()) is False + _SLEEP_TIME = 5 + + def _some_long_running_fct(sleep_time: int) -> str: + time.sleep(sleep_time) + return f"I slept for {sleep_time} seconds" + + future = dask_spec_cluster_client.submit(_some_long_running_fct, _SLEEP_TIME) + + await _assert_scheduler_is_busy( + url=scheduler_address, + busy=True, + ) + + result = await future.result(timeout=2 * _SLEEP_TIME) + assert "seconds" in result diff --git a/services/clusters-keeper/tests/unit/test_modules_ec2.py b/services/clusters-keeper/tests/unit/test_modules_ec2.py new file mode 100644 index 00000000000..439e54aaa2d --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_ec2.py @@ -0,0 +1,26 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_clusters_keeper.core.errors import ConfigurationError +from simcore_service_clusters_keeper.modules.ec2 import get_ec2_client +from simcore_service_clusters_keeper.modules.ssm import get_ssm_client + + +async def test_ec2_does_not_initialize_if_ec2_deactivated( + disabled_rabbitmq: None, + disabled_ec2: None, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "ec2_client") + assert initialized_app.state.ec2_client is None + with pytest.raises(ConfigurationError): + get_ec2_client(initialized_app) + + assert get_ssm_client(initialized_app) diff --git a/services/clusters-keeper/tests/unit/test_modules_rabbitmq.py b/services/clusters-keeper/tests/unit/test_modules_rabbitmq.py new file mode 100644 index 00000000000..e1ef5f850dc --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_rabbitmq.py @@ -0,0 +1,152 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +from collections.abc import Callable +from contextlib import AbstractAsyncContextManager + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.rabbitmq_messages import LoggerRabbitMessage, RabbitMessageBase +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq import BIND_TO_ALL_TOPICS, RabbitMQClient +from settings_library.rabbit import RabbitSettings +from simcore_service_clusters_keeper.core.errors import ConfigurationError +from simcore_service_clusters_keeper.modules.rabbitmq import ( + get_rabbitmq_client, + get_rabbitmq_rpc_client, + is_rabbitmq_enabled, + post_message, +) +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "stop": stop_after_delay(30), + "wait": wait_fixed(0.1), +} + +# Selection of core and tool services started in this swarm fixture (integration) +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def rabbit_log_message(faker: Faker) -> LoggerRabbitMessage: + return LoggerRabbitMessage( + user_id=faker.pyint(min_value=1), + project_id=faker.uuid4(cast_to=None), + node_id=faker.uuid4(cast_to=None), + messages=faker.pylist(allowed_types=(str,)), + ) + + +@pytest.fixture(params=["rabbit_log_message"]) +def rabbit_message( + request: pytest.FixtureRequest, + rabbit_log_message: LoggerRabbitMessage, +) -> RabbitMessageBase: + return { + "rabbit_log_message": rabbit_log_message, + }[request.param] + + +def test_rabbitmq_does_not_initialize_if_deactivated( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "rabbitmq_client") + assert initialized_app.state.rabbitmq_client is None + assert initialized_app.state.rabbitmq_rpc_server is None + with pytest.raises(ConfigurationError): + get_rabbitmq_client(initialized_app) + with pytest.raises(ConfigurationError): + get_rabbitmq_rpc_client(initialized_app) + assert is_rabbitmq_enabled(initialized_app) is False + + +def test_rabbitmq_initializes( + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "rabbitmq_client") + assert initialized_app.state.rabbitmq_client is not None + assert initialized_app.state.rabbitmq_rpc_server is not None + assert get_rabbitmq_client(initialized_app) == initialized_app.state.rabbitmq_client + assert ( + get_rabbitmq_rpc_client(initialized_app) + == initialized_app.state.rabbitmq_rpc_server + ) + assert is_rabbitmq_enabled(initialized_app) is True + + +async def test_post_message( + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + rabbit_message: RabbitMessageBase, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocker: MockerFixture, +): + mocked_message_handler = mocker.AsyncMock(return_value=True) + client = create_rabbitmq_client("pytest_consumer") + await client.subscribe( + rabbit_message.channel_name, + mocked_message_handler, + topics=[BIND_TO_ALL_TOPICS] if rabbit_message.routing_key() else None, + ) + await post_message(initialized_app, message=rabbit_message) + + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + print( + f"--> checking for message in rabbit exchange {rabbit_message.channel_name}, {attempt.retry_state.retry_object.statistics}" + ) + mocked_message_handler.assert_called_once_with( + rabbit_message.model_dump_json().encode() + ) + print("... message received") + + +async def test_post_message_with_disabled_rabbit_does_not_raise( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + rabbit_message: RabbitMessageBase, +): + await post_message(initialized_app, message=rabbit_message) + + +async def test_post_message_when_rabbit_disconnected_does_not_raise( + paused_container: Callable[[str], AbstractAsyncContextManager[None]], + enabled_rabbitmq: RabbitSettings, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, + rabbit_log_message: LoggerRabbitMessage, +): + # NOTE: if the connection is not initialized before pausing the container, then + # this test hangs forever!!! This needs investigations! + await post_message(initialized_app, message=rabbit_log_message) + async with paused_container("rabbit"): + # now posting should not raise out + await post_message(initialized_app, message=rabbit_log_message) diff --git a/services/clusters-keeper/tests/unit/test_modules_redis.py b/services/clusters-keeper/tests/unit/test_modules_redis.py new file mode 100644 index 00000000000..44fb9a9f6ac --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_redis.py @@ -0,0 +1,18 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from fastapi import FastAPI +from simcore_service_clusters_keeper.modules.redis import get_redis_client + + +async def test_redis_raises_if_missing( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + client = get_redis_client(initialized_app) + assert await client.ping() is True diff --git a/services/clusters-keeper/tests/unit/test_modules_remote_debug.py b/services/clusters-keeper/tests/unit/test_modules_remote_debug.py new file mode 100644 index 00000000000..3fe8b823d13 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_remote_debug.py @@ -0,0 +1,30 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "SC_BOOT_MODE": "debug", + }, + ) + + +def test_application_with_debug_enabled( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + ... diff --git a/services/clusters-keeper/tests/unit/test_modules_ssm.py b/services/clusters-keeper/tests/unit/test_modules_ssm.py new file mode 100644 index 00000000000..3bcffb72661 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_modules_ssm.py @@ -0,0 +1,22 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from fastapi import FastAPI +from simcore_service_clusters_keeper.core.errors import ConfigurationError +from simcore_service_clusters_keeper.modules.ssm import get_ssm_client + + +async def test_ssm_does_not_initialize_if_ssm_deactivated( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "ssm_client") + assert initialized_app.state.ssm_client is None + with pytest.raises(ConfigurationError): + get_ssm_client(initialized_app) diff --git a/services/clusters-keeper/tests/unit/test_rpc_clusters.py b/services/clusters-keeper/tests/unit/test_rpc_clusters.py new file mode 100644 index 00000000000..a280cbb5338 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_rpc_clusters.py @@ -0,0 +1,172 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import datetime +from dataclasses import dataclass +from unittest.mock import MagicMock + +import arrow +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_clusters_keeper.clusters import OnDemandCluster +from models_library.users import UserID +from models_library.wallets import WalletID +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.clusters_keeper.clusters import ( + get_or_create_cluster, +) +from simcore_service_clusters_keeper.utils.ec2 import HEARTBEAT_TAG_KEY +from types_aiobotocore_ec2 import EC2Client + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def wallet_id(faker: Faker) -> WalletID: + return faker.pyint(min_value=1) + + +@pytest.fixture +def _base_configuration( + docker_swarm: None, + enabled_rabbitmq: None, + mocked_redis_server: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_primary_ec2_instances_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + initialized_app: FastAPI, + ensure_run_in_sequence_context_is_empty: None, +) -> None: + ... + + +async def _assert_cluster_instance_created(ec2_client: EC2Client) -> None: + instances = await ec2_client.describe_instances() + assert len(instances["Reservations"]) == 1 + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == 1 + + +async def _assert_cluster_heartbeat_on_instance( + ec2_client: EC2Client, +) -> datetime.datetime: + instances = await ec2_client.describe_instances() + assert len(instances["Reservations"]) == 1 + assert "Instances" in instances["Reservations"][0] + assert len(instances["Reservations"][0]["Instances"]) == 1 + assert "Tags" in instances["Reservations"][0]["Instances"][0] + instance_tags = instances["Reservations"][0]["Instances"][0]["Tags"] + assert all("Key" in x for x in instance_tags) + list_of_heartbeats = list( + filter(lambda x: x["Key"] == HEARTBEAT_TAG_KEY, instance_tags) # type:ignore + ) + assert len(list_of_heartbeats) == 1 + assert "Value" in list_of_heartbeats[0] + this_heartbeat_time = arrow.get(list_of_heartbeats[0]["Value"]).datetime + assert this_heartbeat_time + return this_heartbeat_time + + +@dataclass +class MockedDaskModule: + ping_scheduler: MagicMock + + +@pytest.fixture +def mocked_dask_ping_scheduler(mocker: MockerFixture) -> MockedDaskModule: + return MockedDaskModule( + ping_scheduler=mocker.patch( + "simcore_service_clusters_keeper.rpc.clusters.ping_scheduler", + autospec=True, + return_value=True, + ), + ) + + +@pytest.fixture +def disable_get_or_create_cluster_caching(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("AIOCACHE_DISABLE", "1") + + +@pytest.mark.parametrize("use_wallet_id", [True, False]) +async def test_get_or_create_cluster( + disable_get_or_create_cluster_caching: None, + _base_configuration: None, + clusters_keeper_rabbitmq_rpc_client: RabbitMQRPCClient, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + use_wallet_id: bool, + mocked_dask_ping_scheduler: MockedDaskModule, +): + # send rabbitmq rpc to create_cluster + + rpc_response = await get_or_create_cluster( + clusters_keeper_rabbitmq_rpc_client, + user_id=user_id, + wallet_id=wallet_id if use_wallet_id else None, + ) + assert rpc_response + assert isinstance(rpc_response, OnDemandCluster) + created_cluster = rpc_response + # check we do have a new machine in AWS + await _assert_cluster_instance_created(ec2_client) + # it is called once as moto server creates instances instantly + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + mocked_dask_ping_scheduler.ping_scheduler.reset_mock() + + # calling it again returns the existing cluster + rpc_response = await get_or_create_cluster( + clusters_keeper_rabbitmq_rpc_client, + user_id=user_id, + wallet_id=wallet_id if use_wallet_id else None, + ) + assert rpc_response + assert isinstance(rpc_response, OnDemandCluster) + returned_cluster = rpc_response + # check we still have only 1 instance + await _assert_cluster_heartbeat_on_instance(ec2_client) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() + + assert created_cluster == returned_cluster + + +async def test_get_or_create_cluster_massive_calls( + _base_configuration: None, + clusters_keeper_rabbitmq_rpc_client: RabbitMQRPCClient, + ec2_client: EC2Client, + user_id: UserID, + wallet_id: WalletID, + mocked_dask_ping_scheduler: MockedDaskModule, +): + # NOTE: when a user starts many computational jobs in parallel + # the get_or_create_cluster is flooded with a lot of calls for the + # very same cluster (user_id/wallet_id) (e.g. for 256 jobs, that means 256 calls every 5 seconds) + # that means locking the distributed lock 256 times, then calling AWS API 256 times for the very same information + # therefore these calls are sequentialized *and* cached! Just sequentializing would make the call last + # forever otherwise. In effect this creates a rate limiter on this call. + num_calls = 2000 + results = await asyncio.gather( + *( + get_or_create_cluster( + clusters_keeper_rabbitmq_rpc_client, + user_id=user_id, + wallet_id=wallet_id, + ) + for i in range(num_calls) + ) + ) + + assert results + assert all(isinstance(response, OnDemandCluster) for response in results) + mocked_dask_ping_scheduler.ping_scheduler.assert_called_once() diff --git a/services/clusters-keeper/tests/unit/test_rpc_ec2_instances.py b/services/clusters-keeper/tests/unit/test_rpc_ec2_instances.py new file mode 100644 index 00000000000..a833d13743e --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_rpc_ec2_instances.py @@ -0,0 +1,70 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient, RPCServerError +from servicelib.rabbitmq.rpc_interfaces.clusters_keeper.ec2_instances import ( + get_instance_type_details, +) + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def _base_configuration( + docker_swarm: None, + enabled_rabbitmq: None, + mocked_redis_server: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + initialized_app: FastAPI, +) -> None: + ... + + +async def test_get_instance_type_details_all_options( + _base_configuration: None, + clusters_keeper_rabbitmq_rpc_client: RabbitMQRPCClient, +): + # an empty set returns all options + + rpc_response = await get_instance_type_details( + clusters_keeper_rabbitmq_rpc_client, instance_type_names="ALL" + ) + assert rpc_response + assert isinstance(rpc_response, list) + assert isinstance(rpc_response[0], EC2InstanceTypeGet) + + +async def test_get_instance_type_details_specific_type_names( + _base_configuration: None, + clusters_keeper_rabbitmq_rpc_client: RabbitMQRPCClient, +): + rpc_response = await get_instance_type_details( + clusters_keeper_rabbitmq_rpc_client, + instance_type_names={"t2.micro", "g4dn.xlarge"}, + ) + assert rpc_response + assert isinstance(rpc_response, list) + assert len(rpc_response) == 2 + assert rpc_response[1].name == "t2.micro" + assert rpc_response[0].name == "g4dn.xlarge" + + +async def test_get_instance_type_details_with_invalid_type_names( + _base_configuration: None, + clusters_keeper_rabbitmq_rpc_client: RabbitMQRPCClient, +): + with pytest.raises(RPCServerError): + await get_instance_type_details( + clusters_keeper_rabbitmq_rpc_client, + instance_type_names={"t2.micro", "g4dn.xlarge", "invalid.name"}, + ) diff --git a/services/clusters-keeper/tests/unit/test_utils_clusters.py b/services/clusters-keeper/tests/unit/test_utils_clusters.py new file mode 100644 index 00000000000..5a96d17cde0 --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_utils_clusters.py @@ -0,0 +1,385 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import random +import re +import subprocess +from collections.abc import Callable +from pathlib import Path +from typing import Any + +import pytest +from aws_library.ec2 import ( + AWSTagKey, + AWSTagValue, + EC2InstanceBootSpecific, + EC2InstanceData, +) +from common_library.json_serialization import json_dumps +from faker import Faker +from models_library.api_schemas_clusters_keeper.clusters import ClusterState +from models_library.clusters import ( + ClusterAuthentication, + NoAuthentication, + TLSAuthentication, +) +from pydantic import ByteSize, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from settings_library.rabbit import RabbitSettings +from simcore_service_clusters_keeper.core.settings import ApplicationSettings +from simcore_service_clusters_keeper.utils.clusters import ( + _prepare_environment_variables, + create_cluster_from_ec2_instance, + create_deploy_cluster_stack_script, + create_startup_script, +) +from types_aiobotocore_ec2.literals import InstanceStateNameType + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def cluster_machines_name_prefix(faker: Faker) -> str: + return faker.pystr() + + +@pytest.fixture +def ec2_boot_specs(app_settings: ApplicationSettings) -> EC2InstanceBootSpecific: + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES + ec2_boot_specs = next( + iter( + app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_ALLOWED_TYPES.values() + ) + ) + assert isinstance(ec2_boot_specs, EC2InstanceBootSpecific) + return ec2_boot_specs + + +@pytest.fixture(params=[TLSAuthentication, NoAuthentication]) +def backend_cluster_auth( + request: pytest.FixtureRequest, +) -> ClusterAuthentication: + return request.param + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + backend_cluster_auth: ClusterAuthentication, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + { + "CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH": json_dumps( + TLSAuthentication.model_json_schema()["examples"][0] + if isinstance(backend_cluster_auth, TLSAuthentication) + else NoAuthentication.model_json_schema()["examples"][0] + ) + }, + ) + + +def test_create_startup_script( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + ec2_boot_specs: EC2InstanceBootSpecific, +): + startup_script = create_startup_script( + app_settings, + ec2_boot_specific=ec2_boot_specs, + ) + assert isinstance(startup_script, str) + assert len(ec2_boot_specs.custom_boot_scripts) > 0 + for boot_script in ec2_boot_specs.custom_boot_scripts: + assert boot_script in startup_script + + +def test_create_deploy_cluster_stack_script( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + cluster_machines_name_prefix: str, + clusters_keeper_docker_compose: dict[str, Any], +): + additional_custom_tags = { + TypeAdapter(AWSTagKey) + .validate_python("pytest-tag-key"): TypeAdapter(AWSTagValue) + .validate_python("pytest-tag-value") + } + deploy_script = create_deploy_cluster_stack_script( + app_settings, + cluster_machines_name_prefix=cluster_machines_name_prefix, + additional_custom_tags=additional_custom_tags, + ) + assert isinstance(deploy_script, str) + # we have commands to pipe into a docker-compose file + assert " | base64 -d > /docker-compose.yml" in deploy_script + # we have commands to init a docker-swarm + assert "docker swarm init --default-addr-pool" in deploy_script + # we have commands to deploy a stack + assert ( + "docker stack deploy --with-registry-auth --compose-file=/docker-compose.yml dask_stack" + in deploy_script + ) + # before that we have commands that setup ENV variables, let's check we have all of them as defined in the docker-compose + # let's get what was set in the startup script and compare with the expected one of the docker-compose + startup_script_envs_definition = ( + deploy_script.splitlines()[-1].split("docker stack deploy")[0].strip() + ) + assert startup_script_envs_definition + # Use regular expression to split the string into key-value pairs (courtesy of chatGPT) + startup_script_key_value_pairs: list[tuple[str, str]] = re.findall( + r"(\S+)=([\S\s]+?)(?=\S+=|$)", startup_script_envs_definition + ) + startup_script_env_keys_names = [key for key, _ in startup_script_key_value_pairs] + # docker-compose expected values + docker_compose_expected_environment: dict[str, str] = {} + assert "services" in clusters_keeper_docker_compose + assert isinstance(clusters_keeper_docker_compose["services"], dict) + for service_details in clusters_keeper_docker_compose["services"].values(): + if "environment" in service_details: + assert isinstance(service_details["environment"], dict) + docker_compose_expected_environment |= service_details["environment"] + + # check the expected environment variables are set so the docker-compose will be complete (we define enough) + expected_env_keys = [ + v[2:-1].split(":")[0] + for v in docker_compose_expected_environment.values() + if isinstance(v, str) and v.startswith("${") + ] + ["DOCKER_IMAGE_TAG"] + for env_key in expected_env_keys: + assert ( + env_key in startup_script_env_keys_names + ), f"{env_key} is missing from startup script! please adjust" + + # check we do not define "too much" + for env_key in startup_script_env_keys_names: + assert env_key in expected_env_keys + + # check lists have \" written in them + list_settings = [ + "WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS", + ] + assert all( + re.search(rf"{i}=\[(\\\".+\\\")*\]", deploy_script) for i in list_settings + ) + + # check dicts have \' in front + dict_settings = [ + "WORKERS_EC2_INSTANCES_ALLOWED_TYPES", + "WORKERS_EC2_INSTANCES_CUSTOM_TAGS", + ] + assert all( + re.search(rf"{i}=\'{{(\".+\":\s\".*\")+}}\'", deploy_script) + for i in dict_settings + ) + + # check that the RabbitMQ settings are null since rabbit is disabled + assert re.search(r"AUTOSCALING_RABBITMQ=null", deploy_script) + + # check the additional tags are in + assert all( + f'"{key}": "{value}"' in deploy_script + for key, value in additional_custom_tags.items() + ) + + +@pytest.fixture( + params=["default", "custom"], ids=["defaultRabbitMQ", "specialClusterRabbitMQ"] +) +def rabbitmq_settings_fixture( + app_environment: EnvVarsDict, + enabled_rabbitmq: RabbitSettings, + request: pytest.FixtureRequest, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, +) -> RabbitSettings | None: + if request.param == "custom": + # Create random RabbitMQ settings using faker + custom_rabbit_settings = random.choice( # noqa: S311 + RabbitSettings.model_json_schema()["examples"] + ) + monkeypatch.setenv( + "PRIMARY_EC2_INSTANCES_RABBIT", json_dumps(custom_rabbit_settings) + ) + return RabbitSettings.model_validate(custom_rabbit_settings) + assert request.param == "default" + return enabled_rabbitmq + + +def test_rabbitmq_settings_are_passed_with_pasword_clear( + docker_swarm: None, + rabbitmq_settings_fixture: RabbitSettings | None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + cluster_machines_name_prefix: str, + clusters_keeper_docker_compose: dict[str, Any], +): + assert app_settings.CLUSTERS_KEEPER_RABBITMQ + assert app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES + assert ( + rabbitmq_settings_fixture + == app_settings.CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES.PRIMARY_EC2_INSTANCES_RABBIT + ) + + additional_custom_tags = { + TypeAdapter(AWSTagKey) + .validate_python("pytest-tag-key"): TypeAdapter(AWSTagValue) + .validate_python("pytest-tag-value") + } + deploy_script = create_deploy_cluster_stack_script( + app_settings, + cluster_machines_name_prefix=cluster_machines_name_prefix, + additional_custom_tags=additional_custom_tags, + ) + assert isinstance(deploy_script, str) + + match = re.search(r"AUTOSCALING_RABBITMQ=\'({.*?})\'", deploy_script) + assert match, "AUTOSCALING_RABBITMQ is not present in the deploy script!" + autoscaling_rabbitmq = match.group(1) + passed_settings = RabbitSettings.model_validate_json(autoscaling_rabbitmq) + assert passed_settings == rabbitmq_settings_fixture + + +def test_create_deploy_cluster_stack_script_below_64kb( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + cluster_machines_name_prefix: str, + clusters_keeper_docker_compose: dict[str, Any], +): + additional_custom_tags = { + TypeAdapter(AWSTagKey) + .validate_python("pytest-tag-key"): TypeAdapter(AWSTagValue) + .validate_python("pytest-tag-value") + } + deploy_script = create_deploy_cluster_stack_script( + app_settings, + cluster_machines_name_prefix=cluster_machines_name_prefix, + additional_custom_tags=additional_custom_tags, + ) + deploy_script_size_in_bytes = len(deploy_script.encode("utf-8")) + assert deploy_script_size_in_bytes < 64000, ( + f"script size is {deploy_script_size_in_bytes} bytes that exceeds the SSM command of 64KB. " + "TIP: split commands or reduce size." + ) + + +def test_create_startup_script_script_size_below_16kb( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + cluster_machines_name_prefix: str, + clusters_keeper_docker_compose: dict[str, Any], + ec2_boot_specs: EC2InstanceBootSpecific, +): + startup_script = create_startup_script( + app_settings, + ec2_boot_specific=ec2_boot_specs, + ) + script_size_in_bytes = len(startup_script.encode("utf-8")) + + print( + f"current script size is {TypeAdapter(ByteSize).validate_python(script_size_in_bytes).human_readable()}" + ) + # NOTE: EC2 user data cannot be above 16KB, we keep some margin here + assert script_size_in_bytes < 15 * 1024 + + +def test__prepare_environment_variables_defines_all_envs_for_docker_compose( + disabled_rabbitmq: None, + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + mocked_redis_server: None, + app_settings: ApplicationSettings, + cluster_machines_name_prefix: str, + clusters_keeper_docker_compose_file: Path, +): + additional_custom_tags = { + TypeAdapter(AWSTagKey) + .validate_python("pytest-tag-key"): TypeAdapter(AWSTagValue) + .validate_python("pytest-tag-value") + } + environment_variables = _prepare_environment_variables( + app_settings, + cluster_machines_name_prefix=cluster_machines_name_prefix, + additional_custom_tags=additional_custom_tags, + ) + assert environment_variables + process = subprocess.run( # noqa: S603 + [ # noqa: S607 + "docker", + "compose", + "--dry-run", + f"--file={clusters_keeper_docker_compose_file}", + "up", + ], + capture_output=True, + check=True, + env={ + e.split("=", maxsplit=1)[0]: e.split("=", maxsplit=1)[1] + for e in environment_variables + }, + ) + assert process + assert process.stderr + _ENV_VARIABLE_NOT_SET_ERROR = "variable is not set" + assert _ENV_VARIABLE_NOT_SET_ERROR not in process.stderr.decode() + assert process.stdout + + +@pytest.mark.parametrize( + "ec2_state, expected_cluster_state", + [ + ("pending", ClusterState.STARTED), + ("running", ClusterState.RUNNING), + ("shutting-down", ClusterState.STOPPED), + ("stopped", ClusterState.STOPPED), + ("stopping", ClusterState.STOPPED), + ("terminated", ClusterState.STOPPED), + ("whatever", ClusterState.STOPPED), + ], +) +@pytest.mark.parametrize( + "authentication", + [ + NoAuthentication(), + TLSAuthentication(**TLSAuthentication.model_json_schema()["examples"][0]), + ], +) +def test_create_cluster_from_ec2_instance( + fake_ec2_instance_data: Callable[..., EC2InstanceData], + faker: Faker, + ec2_state: InstanceStateNameType, + expected_cluster_state: ClusterState, + authentication: ClusterAuthentication, +): + instance_data = fake_ec2_instance_data(state=ec2_state) + cluster_instance = create_cluster_from_ec2_instance( + instance_data, + faker.pyint(), + faker.pyint(), + dask_scheduler_ready=faker.pybool(), + cluster_auth=authentication, + max_cluster_start_time=faker.time_delta(), + ) + assert cluster_instance + assert cluster_instance.state is expected_cluster_state + assert cluster_instance.authentication == authentication diff --git a/services/clusters-keeper/tests/unit/test_utils_ec2.py b/services/clusters-keeper/tests/unit/test_utils_ec2.py new file mode 100644 index 00000000000..125670475db --- /dev/null +++ b/services/clusters-keeper/tests/unit/test_utils_ec2.py @@ -0,0 +1,121 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from faker import Faker +from models_library.users import UserID +from models_library.wallets import WalletID +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_clusters_keeper.core.settings import ApplicationSettings +from simcore_service_clusters_keeper.utils.ec2 import ( + _APPLICATION_TAG_KEY, + all_created_ec2_instances_filter, + compose_user_data, + creation_ec2_tags, + get_cluster_name, +) + + +@pytest.fixture +def wallet_id(faker: Faker) -> WalletID: + return faker.pyint(min_value=1) + + +def test_get_cluster_name( + disabled_rabbitmq: None, + disabled_ec2: None, + disabled_ssm: None, + mocked_redis_server: None, + app_settings: ApplicationSettings, + user_id: UserID, + wallet_id: WalletID, +): + assert app_settings.SWARM_STACK_NAME + # manager + assert ( + get_cluster_name( + app_settings, user_id=user_id, wallet_id=wallet_id, is_manager=True + ) + == f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}osparc-computational-cluster-manager-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:{wallet_id}" + ) + # worker + assert ( + get_cluster_name( + app_settings, user_id=user_id, wallet_id=wallet_id, is_manager=False + ) + == f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}osparc-computational-cluster-worker-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:{wallet_id}" + ) + + assert ( + get_cluster_name(app_settings, user_id=user_id, wallet_id=None, is_manager=True) + == f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}osparc-computational-cluster-manager-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:None" + ) + assert ( + get_cluster_name( + app_settings, user_id=user_id, wallet_id=None, is_manager=False + ) + == f"{app_settings.CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX}osparc-computational-cluster-worker-{app_settings.SWARM_STACK_NAME}-user_id:{user_id}-wallet_id:None" + ) + + +def test_creation_ec2_tags( + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + disabled_rabbitmq: None, + mocked_redis_server: None, + app_settings: ApplicationSettings, + user_id: UserID, + wallet_id: WalletID, +): + received_tags = creation_ec2_tags( + app_settings, user_id=user_id, wallet_id=wallet_id + ) + assert received_tags + EXPECTED_TAG_KEY_NAMES = [ + f"{_APPLICATION_TAG_KEY}.deploy", + f"{_APPLICATION_TAG_KEY}.version", + "Name", + "user_id", + "wallet_id", + "role", + "osparc-tag", + ] + assert all( + tag_key_name in received_tags for tag_key_name in EXPECTED_TAG_KEY_NAMES + ), f"missing tag key names in {received_tags.keys()}, expected {EXPECTED_TAG_KEY_NAMES}" + assert all( + tag_key_name in EXPECTED_TAG_KEY_NAMES for tag_key_name in received_tags + ), f"non expected tag key names in {received_tags.keys()}, expected {EXPECTED_TAG_KEY_NAMES}" + + +def test_all_created_ec2_instances_filter( + mocked_ec2_server_envs: EnvVarsDict, + mocked_ssm_server_envs: EnvVarsDict, + disabled_rabbitmq: None, + mocked_redis_server: None, + app_settings: ApplicationSettings, +): + received_tags = all_created_ec2_instances_filter(app_settings) + assert len(received_tags) == 1 + EXPECTED_TAG_KEY_NAMES = [ + f"{_APPLICATION_TAG_KEY}.deploy", + ] + assert all( + tag_key_name in received_tags for tag_key_name in EXPECTED_TAG_KEY_NAMES + ), f"missing tag key names in {received_tags.keys()}, expected {EXPECTED_TAG_KEY_NAMES}" + assert all( + tag_key_name in EXPECTED_TAG_KEY_NAMES for tag_key_name in received_tags + ), f"non expected tag key names in {received_tags.keys()}, expected {EXPECTED_TAG_KEY_NAMES}" + + +@pytest.fixture +def bash_command(faker: Faker) -> str: + return faker.pystr() + + +def test_compose_user_data(bash_command: str): + received_user_data = compose_user_data(bash_command) + assert received_user_data + assert received_user_data.startswith("#!/bin/bash\n") + assert bash_command in received_user_data diff --git a/services/dask-sidecar/.gitignore b/services/dask-sidecar/.gitignore new file mode 100644 index 00000000000..37979bc3728 --- /dev/null +++ b/services/dask-sidecar/.gitignore @@ -0,0 +1 @@ +.dask-certificates diff --git a/services/dask-sidecar/Dockerfile b/services/dask-sidecar/Dockerfile index e00a9edc64c..45ef90d7e01 100644 --- a/services/dask-sidecar/Dockerfile +++ b/services/dask-sidecar/Dockerfile @@ -1,6 +1,11 @@ # syntax=docker/dockerfile:1 -ARG PYTHON_VERSION="3.9.12" -FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim-buster as base + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build +# we docker image is built based on debian +FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim-bookworm AS base ARG TARGETPLATFORM ARG BUILDPLATFORM RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" > /log @@ -14,15 +19,17 @@ RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" > /log LABEL maintainer=sanderegg -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ set -eux \ && apt-get update \ && apt-get install -y --no-install-recommends \ iputils-ping \ curl \ gosu \ - && apt-get clean \ + && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* \ # verify that the binary works && gosu nobody true @@ -50,48 +57,49 @@ ENV LANG=C.UTF-8 \ ENV PATH="${VIRTUAL_ENV}/bin:$PATH" # for ARM architecture this helps a lot VS building packages -ENV PIP_EXTRA_INDEX_URL=https://www.piwheels.org/simple +# NOTE: remove as this might create bad caching behaviour +# ENV PIP_EXTRA_INDEX_URL=https://www.piwheels.org/simple EXPOSE 8080 EXPOSE 8786 EXPOSE 8787 +# create dask configuration folder +RUN mkdir --parents /home/scu/.config/dask \ + && chown -R scu:scu /home/scu/.config # -------------------------- Build stage ------------------- # Installs build/package management tools and third party dependencies # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ set -eux \ && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install --upgrade \ - pip~=23.0 \ +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build -# install base 3rd party dependencies (NOTE: this speeds up devel mode) -COPY --chown=scu:scu services/dask-sidecar/requirements/_base.txt . -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install \ - --requirement _base.txt + # --------------------------Prod-depends-only stage ------------------- # This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) @@ -99,18 +107,19 @@ RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ # + /build # + services/dask-sidecar [scu:scu] WORKDIR # -FROM build as prod-only-deps +FROM build AS prod-only-deps ENV SC_BUILD_TARGET=prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/dask-sidecar /build/services/dask-sidecar - WORKDIR /build/services/dask-sidecar -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install \ - --requirement requirements/prod.txt +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/dask-sidecar,target=/build/services/dask-sidecar,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- # Final cleanup up to reduce image size and startup setup @@ -119,29 +128,37 @@ RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ # + /home/scu $HOME = WORKDIR # + services/dask-sidecar [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu -# bring installed package without build tools +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --from=prod-only-deps --chown=scu:scu ${VIRTUAL_ENV} ${VIRTUAL_ENV} -# copy docker entrypoint and boot scripts + +# Copies booting scripts COPY --chown=scu:scu services/dask-sidecar/docker services/dask-sidecar/docker +RUN chmod +x services/dask-sidecar/docker/*.sh # WARNING: This image is used for dask-scheduler and dask-worker. # In order to have the same healty entrypoint port # make sure dask worker is started as ``dask-worker --dashboard-address 8787``. # Otherwise the worker will take random ports to serve the /health entrypoint. +# https://docs.docker.com/reference/dockerfile/#healthcheck HEALTHCHECK \ --interval=10s \ --timeout=5s \ - --start-period=5s \ + --start-period=20s \ + --start-interval=1s \ --retries=5 \ CMD ["curl", "-Lf", "http://127.0.0.1:8787/health"] @@ -157,7 +174,7 @@ CMD ["/bin/sh", "services/dask-sidecar/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development diff --git a/services/dask-sidecar/Makefile b/services/dask-sidecar/Makefile index ff839e8ba81..07c06669cd8 100644 --- a/services/dask-sidecar/Makefile +++ b/services/dask-sidecar/Makefile @@ -7,12 +7,40 @@ include ../../scripts/common-service.Makefile TEMP_DIR := $(shell mktemp -d -t dask-docker-XXX) -PHONY: build-official-dask -# -# NOTE: At this moment, this does not seem to work https://docs.docker.com/engine/reference/commandline/build/ -# export DOCKER_BUILDKIT=0; docker build --tag local/dask:master https://github.com/dask/dask-docker.git#:base -# -build-official-dask: # builds official dask container from master branch repo - git clone --depth 1 https://github.com/dask/dask-docker.git ${TEMP_DIR} && \ - docker build --tag local/dask:master ${TEMP_DIR}/base && \ - rm -rf ${TEMP_DIR} +.PHONY: settings-schema.json +settings-schema.json: ## [container] dumps json-shcema of this service settings + # Dumping settings schema of ${DOCKER_REGISTRY}/${APP_NAME}:${DOCKER_IMAGE_TAG} + @docker run \ + --entrypoint="${APP_CLI_NAME}" \ + ${DOCKER_REGISTRY}/${APP_NAME}:${DOCKER_IMAGE_TAG} \ + settings --as-json-schema \ + | sed --expression='1,/{/ {/{/!d}' \ + > $@ + # Dumped '$(CURDIR)/$@' + +.dask-certificates: + # create new certificates + mkdir --parents $@ + # Set variables for the key and certificate paths + # Run openssl without prompts using the -subj argument to pass subject information + key_path="$@/dask-key.pem" && \ + cert_path="$@/dask-cert.pem" && \ + subj="/C=CH/ST=ZH/L=ZH/O=ITIS/OU=OSPARC/CN=osparc.io" && \ + openssl req -x509 -newkey rsa:4096 -nodes -keyout "$$key_path" -out "$$cert_path" -days 365 -subj "$$subj" + + +.PHONY: certificates info-certificates clean-certificates + +certificates: .dask-certificates ## creates a self-signed certificate for use with dask communication + # validating certificates + @openssl verify -CAfile $/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # RUNNING application ---------------------------------------- # # - If DASK_START_AS_SCHEDULER is set, then it boots as scheduler otherwise as worker -# - SEE https://docs.dask.org/en/latest/setup/cli.html -# - SEE https://stackoverflow.com/questions/3601515/how-to-check-if-a-variable-is-set-in-bash -# - FIXME: create command prefix: https://unix.stackexchange.com/questions/444946/how-can-we-run-a-command-stored-in-a-variable # +mkdir --parents /home/scu/.config/dask +cat >/home/scu/.config/dask/distributed.yaml </home/scu/.config/dask/distributed.yaml <>/home/scu/.config/dask/distributed.yaml <> /home/scu/.config/dask/distributed.yaml - - echo "$INFO" "Starting as dask scheduler:${scheduler_version}..." - if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then + print_info "Starting as dask scheduler:${scheduler_version}..." + if [ "${SC_BOOT_MODE}" = "debug" ]; then exec watchmedo auto-restart \ - --recursive \ - --pattern="*.py;*/src/*" \ - --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" \ - --ignore-directories -- \ - dask scheduler + --recursive \ + --pattern="*.py;*/src/*" \ + --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" \ + --ignore-directories -- \ + dask scheduler \ + --preload simcore_service_dask_sidecar.scheduler else - exec dask scheduler + exec dask scheduler \ + --preload simcore_service_dask_sidecar.scheduler + fi else DASK_WORKER_VERSION=$(dask worker --version) - DASK_SCHEDULER_URL=${DASK_SCHEDULER_URL:="tcp://${DASK_SCHEDULER_HOST}:8786"} + DASK_SCHEDULER_URL=${DASK_SCHEDULER_URL:="tls://${DASK_SCHEDULER_HOST}:8786"} # # DASK RESOURCES DEFINITION @@ -63,6 +111,7 @@ else # GPU: number GPUs available (= num of GPUs if a nvidia-smi can be run inside a docker container) # RAM: amount of RAM available (= CPU/nproc * total virtual memory given by python psutil - DASK_SIDECAR_NON_USABLE_RAM) # VRAM: amount of VRAM available (in bytes) + # DASK_SIDECAR_CUSTOM_RESOURCES: any amount of anything (in name=NUMBER,name2=NUMBER2,..., see https://distributed.dask.org/en/stable/resources.html#worker-resources) # CPUs num_cpus=$(($(nproc) - ${DASK_SIDECAR_NUM_NON_USABLE_CPUS:-2})) @@ -72,7 +121,7 @@ else fi # GPUs - num_gpus=$(python -c "from simcore_service_dask_sidecar.utils import num_available_gpus; print(num_available_gpus());") + num_gpus=$(python -c "from simcore_service_dask_sidecar.utils.gpus import num_available_gpus; print(num_available_gpus());") # RAM (is computed similarly as the default dask-sidecar computation) _value=$(python -c "import psutil; print(int(psutil.virtual_memory().total * $num_cpus/$(nproc)))") @@ -83,10 +132,33 @@ else # add the GPUs if there are any if [ "$num_gpus" -gt 0 ]; then - total_vram=$(python -c "from simcore_service_dask_sidecar.utils import video_memory; print(video_memory());") + total_vram=$(python -c "from simcore_service_dask_sidecar.utils.gpus import video_memory; print(video_memory());") resources="$resources,GPU=$num_gpus,VRAM=$total_vram" fi + # check whether we might have an EC2 instance and retrieve its type + get_ec2_instance_type() { + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html + print_info "Finding out if we are running on an EC2 instance" + + # fetch headers only + if http_response=$(curl --max-time 5 --silent --head http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null); then + # Extract the HTTP status code (e.g., 200, 404) + http_status_code=$(echo "$http_response" | awk '/^HTTP/ {print $2}') + + if [ "$http_status_code" = "200" ]; then + # Instance type is available + ec2_instance_type=$(curl --max-time 5 --silent http://169.254.169.254/latest/meta-data/instance-type) + print_info "Running on an EC2 instance of type: $ec2_instance_type" + resources="$resources,EC2-INSTANCE-TYPE:$ec2_instance_type=1" + else + print_info "Not running on an EC2 instance. HTTP Status Code: $http_status_code" + fi + else + print_info "Failed to fetch instance type. Not running on an EC2 instance." + fi + } + get_ec2_instance_type # # DASK RESOURCES DEFINITION --------------------------------- END @@ -99,13 +171,13 @@ else # 'daemonic processes are not allowed to have children' arises when running the sidecar.cli # because multi-processing library is used by the sidecar and the nanny does not like it # setting --no-nanny fixes this: see https://github.com/dask/distributed/issues/2142 - echo "$INFO" "Starting as a ${DASK_WORKER_VERSION} -> ${DASK_SCHEDULER_URL} ..." - echo "$INFO" "Worker resources set as: $resources" - if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then + print_info "Starting as a dask worker "${DASK_WORKER_VERSION}" -> "${DASK_SCHEDULER_URL}" ..." + print_info "Worker resources set as: "$resources"" + if [ "${SC_BOOT_MODE}" = "debug" ]; then exec watchmedo auto-restart --recursive --pattern="*.py;*/src/*" --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" --ignore-directories -- \ dask worker "${DASK_SCHEDULER_URL}" \ --local-directory /tmp/dask-sidecar \ - --preload simcore_service_dask_sidecar.tasks \ + --preload simcore_service_dask_sidecar.worker \ --nworkers ${DASK_NPROCS} \ --nthreads "${DASK_NTHREADS}" \ --dashboard-address 8787 \ @@ -115,7 +187,7 @@ else else exec dask worker "${DASK_SCHEDULER_URL}" \ --local-directory /tmp/dask-sidecar \ - --preload simcore_service_dask_sidecar.tasks \ + --preload simcore_service_dask_sidecar.worker \ --nworkers ${DASK_NPROCS} \ --nthreads "${DASK_NTHREADS}" \ --dashboard-address 8787 \ diff --git a/services/dask-sidecar/docker/entrypoint.sh b/services/dask-sidecar/docker/entrypoint.sh index 994b62c57cc..f69fd1a71d8 100755 --- a/services/dask-sidecar/docker/entrypoint.sh +++ b/services/dask-sidecar/docker/entrypoint.sh @@ -22,93 +22,77 @@ update-ca-certificates # *runs* as non-root user [scu] # echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." -echo User :"$(id "$(whoami)")" -echo Workdir :"$(pwd)" -echo scuUser :"$(id scu)" - - -if [ "${SC_BUILD_TARGET}" = "development" ] -then - echo "$INFO" "development mode detected..." - # NOTE: expects docker run ... -v $(pwd):/devel/services/dask-sidecar - DEVEL_MOUNT="/devel/services/dask-sidecar" - - stat $DEVEL_MOUNT > /dev/null 2>&1 || \ - (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1) - - echo "setting correct user id/group id..." - HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}") - HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}") - CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) - if [ "$HOST_USERID" -eq 0 ] - then - echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..." - adduser "$SC_USER_NAME" root +echo User :"$(id "$(whoami)")" +echo Workdir :"$(pwd)" +echo scuUser :"$(id scu)" + +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + # NOTE: expects docker run ... -v $(pwd):/devel/services/dask-sidecar + DEVEL_MOUNT="/devel/services/dask-sidecar" + + stat $DEVEL_MOUNT >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "Creating new group my$SC_USER_NAME" + CONT_GROUPNAME=my$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" else - echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." - # take host's credentials in $SC_USER_NAME - if [ -z "$CONT_GROUPNAME" ] - then - echo "Creating new group my$SC_USER_NAME" - CONT_GROUPNAME=my$SC_USER_NAME - addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" - else - echo "group already exists" - fi - echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..." - adduser "$SC_USER_NAME" "$CONT_GROUPNAME" - - echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" - usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" - - echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; - # change user property of files already around - echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + echo "group already exists" fi -fi + echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ] -then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd + echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi fi - if [ ${DASK_START_AS_SCHEDULER+x} ]; then echo "$INFO Starting $* as SCHEDULER ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" - else # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock - if stat $DOCKER_MOUNT > /dev/null 2>&1 - then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker - - if ! addgroup --gid "$GROUPID" $GROUPNAME > /dev/null 2>&1 - then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" + if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker + + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" fi echo "$INFO ensuring write rights on computational shared folder ..." mkdir --parents "${SIDECAR_COMP_SERVICES_SHARED_FOLDER}" chown --recursive "$SC_USER_NAME":"$GROUPNAME" "${SIDECAR_COMP_SERVICES_SHARED_FOLDER}" - echo "$INFO Starting $* as WORKER ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" diff --git a/services/dask-sidecar/requirements/_base.in b/services/dask-sidecar/requirements/_base.in index a20fe813a0e..9571b106d4f 100644 --- a/services/dask-sidecar/requirements/_base.in +++ b/services/dask-sidecar/requirements/_base.in @@ -11,6 +11,7 @@ # - Added as constraints instead of requirements in order to avoid polluting base.txt # - Will be installed when prod.txt or dev.txt # +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/dask-task-models-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/service-library/requirements/_base.in @@ -21,7 +22,8 @@ aiodocker aiofiles blosc # for compression dask[distributed, diagnostics] -dask-gateway # needed for the osparc-dask-gateway to preload the module fsspec[http, s3] # sub types needed as we acces http and s3 here lz4 # for compression -pydantic[email,dotenv] +pydantic +prometheus_client +repro-zipfile diff --git a/services/dask-sidecar/requirements/_base.txt b/services/dask-sidecar/requirements/_base.txt index 065556f1ba4..d38bf44ab71 100644 --- a/services/dask-sidecar/requirements/_base.txt +++ b/services/dask-sidecar/requirements/_base.txt @@ -1,215 +1,601 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==9.0.4 +aio-pika==9.5.5 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiobotocore==2.4.2 +aiobotocore==2.22.0 # via s3fs +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in aiodebug==2.3.0 # via -r requirements/../../../packages/service-library/requirements/_base.in -aiodocker==0.21.0 - # via -r requirements/_base.in -aiofiles==23.1.0 +aiodocker==0.24.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +aiofiles==24.1.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiohttp==3.8.4 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # aiobotocore # aiodocker - # dask-gateway # fsspec # s3fs -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore -aiormq==6.7.3 +aiormq==6.8.1 # via aio-pika -aiosignal==1.3.1 +aiosignal==1.3.2 # via aiohttp -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 +annotated-types==0.7.0 + # via pydantic +anyio==4.9.0 # via - # aiohttp - # redis -attrs==21.4.0 + # fast-depends + # faststream +arrow==1.3.0 + # via + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +attrs==25.3.0 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt # aiohttp # jsonschema -blosc==1.11.1 + # referencing +blosc==1.11.3 # via -r requirements/_base.in -bokeh==2.4.3 +bokeh==3.7.3 # via dask -botocore==1.27.59 +botocore==1.37.3 # via aiobotocore -certifi==2022.12.7 - # via requests -charset-normalizer==3.1.0 +certifi==2025.4.26 # via - # aiohttp + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # requests -click==8.1.3 +charset-normalizer==3.4.2 + # via requests +click==8.1.8 # via # dask - # dask-gateway # distributed # typer -cloudpickle==2.2.1 +cloudpickle==3.1.1 # via # dask # distributed -dask==2023.3.0 +contourpy==1.3.2 + # via bokeh +dask==2025.5.0 # via + # -c requirements/constraints.txt # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in # -r requirements/_base.in - # dask-gateway # distributed -dask-gateway==2023.1.1 - # via -r requirements/_base.in -distributed==2023.3.0 +deprecated==1.2.18 # via - # dask - # dask-gateway -dnspython==2.3.0 + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +distributed==2025.5.0 + # via dask +dnspython==2.7.0 # via email-validator -email-validator==1.3.1 +email-validator==2.2.0 # via pydantic -frozenlist==1.3.3 +exceptiongroup==1.3.0 + # via aio-pika +fast-depends==2.4.12 + # via faststream +faststream==0.5.41 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.6.0 # via # aiohttp # aiosignal -fsspec==2023.3.0 +fsspec==2025.3.2 # via # -r requirements/_base.in # dask # s3fs -heapdict==1.0.1 - # via zict -idna==3.4 +googleapis-common-protos==1.70.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +idna==3.10 # via + # anyio # email-validator # requests # yarl -jinja2==3.1.2 +importlib-metadata==8.6.1 + # via + # dask + # opentelemetry-api +jinja2==3.1.6 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # bokeh # dask # distributed jmespath==1.0.1 - # via botocore -jsonschema==3.2.0 # via - # -c requirements/../../../packages/service-library/requirements/./constraints.txt + # aiobotocore + # botocore +jsonschema==4.23.0 + # via # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2025.4.1 + # via jsonschema locket==1.0.0 # via # distributed # partd -lz4==4.3.2 +lz4==4.4.4 # via -r requirements/_base.in -markupsafe==2.1.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via jinja2 -msgpack==1.0.5 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 # via distributed -multidict==6.0.4 +multidict==6.4.4 # via + # aiobotocore # aiohttp # yarl -numpy==1.24.2 +narwhals==1.40.0 # via bokeh -packaging==23.0 +numpy==2.2.6 + # via + # bokeh + # contourpy + # pandas +opentelemetry-api==1.33.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.33.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.54b1 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-logging==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.33.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.54b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.54b1 + # via opentelemetry-instrumentation-requests +orjson==3.10.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==25.0 # via # bokeh # dask # distributed -pamqp==3.2.1 + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -partd==1.3.0 +pandas==2.2.3 + # via bokeh +partd==1.4.2 # via dask -pillow==9.4.0 +pillow==11.2.1 # via bokeh -psutil==5.9.4 - # via distributed -pydantic==1.10.2 +prometheus-client==0.22.0 + # via -r requirements/_base.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==5.29.4 # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # distributed +pycryptodome==3.23.0 + # via stream-zip +pydantic==2.11.4 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in -pyinstrument==4.4.0 + # fast-depends + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-extra-types==2.10.4 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via -r requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.19.3 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via + # aiobotocore # arrow # botocore -python-dotenv==1.0.0 - # via pydantic -pyyaml==5.4.1 + # pandas +python-dotenv==1.1.0 + # via pydantic-settings +pytz==2025.2 + # via pandas +pyyaml==6.0.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in # bokeh # dask - # dask-gateway # distributed -redis==4.5.1 - # via -r requirements/../../../packages/service-library/requirements/_base.in -requests==2.28.2 - # via fsspec -s3fs==2023.3.0 - # via fsspec -six==1.16.0 +redis==6.1.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +repro-zipfile==0.4.0 + # via -r requirements/_base.in +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==14.0.0 + # via + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.25.0 # via # jsonschema - # python-dateutil + # referencing +s3fs==2025.3.2 + # via fsspec +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio sortedcontainers==2.4.0 # via distributed -tblib==1.7.0 +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tblib==3.1.0 # via distributed -tenacity==8.2.2 +tenacity==9.1.2 # via -r requirements/../../../packages/service-library/requirements/_base.in -toolz==0.12.0 +toolz==1.0.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # dask # distributed # partd -tornado==6.2 +tornado==6.5 # via # bokeh - # dask-gateway # distributed -tqdm==4.65.0 +tqdm==4.67.1 # via -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.7.0 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.5.0 +typer==0.15.4 + # via + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20250516 + # via arrow +typing-extensions==4.13.2 # via # aiodebug - # aiodocker - # aioitertools - # bokeh + # anyio + # exceptiongroup + # faststream + # opentelemetry-sdk # pydantic -urllib3==1.26.14 + # pydantic-core + # pydantic-extra-types + # typer + # typing-inspection +typing-inspection==0.4.0 + # via pydantic +tzdata==2025.2 + # via pandas +urllib3==2.4.0 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # botocore # distributed # requests -wrapt==1.15.0 - # via aiobotocore -yarl==1.8.2 +wrapt==1.17.2 # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +xyzservices==2025.4.0 + # via bokeh +yarl==1.20.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq -zict==2.2.0 +zict==3.0.0 # via distributed - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/dask-sidecar/requirements/_dask-distributed.in b/services/dask-sidecar/requirements/_dask-distributed.in index a8ca232b47b..c12b7c00715 100644 --- a/services/dask-sidecar/requirements/_dask-distributed.in +++ b/services/dask-sidecar/requirements/_dask-distributed.in @@ -10,3 +10,4 @@ dask[distributed] blosc # for compression lz4 # for compression +numpy diff --git a/services/dask-sidecar/requirements/_dask-distributed.txt b/services/dask-sidecar/requirements/_dask-distributed.txt index d41eed31872..35f16cb2d4e 100644 --- a/services/dask-sidecar/requirements/_dask-distributed.txt +++ b/services/dask-sidecar/requirements/_dask-distributed.txt @@ -1,38 +1,35 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_dask-distributed.txt --resolver=backtracking --strip-extras requirements/_dask-distributed.in -# -blosc==1.11.1 - # via -r requirements/_dask-distributed.in -click==8.1.3 +blosc==1.11.3 + # via + # -c requirements/./_base.txt + # -r requirements/_dask-distributed.in +click==8.1.8 # via # -c requirements/./_base.txt # dask # distributed -cloudpickle==2.2.1 +cloudpickle==3.1.1 # via # -c requirements/./_base.txt # dask # distributed -dask==2023.3.0 +dask==2025.5.0 # via + # -c requirements/./_base.txt # -r requirements/_dask-distributed.in # distributed -distributed==2023.3.0 +distributed==2025.5.0 # via # -c requirements/./_base.txt # dask -fsspec==2023.3.0 +fsspec==2025.3.2 # via # -c requirements/./_base.txt # dask -heapdict==1.0.1 +importlib-metadata==8.6.1 # via # -c requirements/./_base.txt - # zict -jinja2==3.1.2 + # dask +jinja2==3.1.6 # via # -c requirements/./_base.txt # distributed @@ -41,30 +38,36 @@ locket==1.0.0 # -c requirements/./_base.txt # distributed # partd -lz4==4.3.2 - # via -r requirements/_dask-distributed.in -markupsafe==2.1.2 +lz4==4.4.4 + # via + # -c requirements/./_base.txt + # -r requirements/_dask-distributed.in +markupsafe==3.0.2 # via # -c requirements/./_base.txt # jinja2 -msgpack==1.0.5 +msgpack==1.1.0 # via # -c requirements/./_base.txt # distributed -packaging==23.0 +numpy==2.2.6 + # via + # -c requirements/./_base.txt + # -r requirements/_dask-distributed.in +packaging==25.0 # via # -c requirements/./_base.txt # dask # distributed -partd==1.3.0 +partd==1.4.2 # via # -c requirements/./_base.txt # dask -psutil==5.9.4 +psutil==7.0.0 # via # -c requirements/./_base.txt # distributed -pyyaml==5.4.1 +pyyaml==6.0.2 # via # -c requirements/./_base.txt # dask @@ -73,25 +76,29 @@ sortedcontainers==2.4.0 # via # -c requirements/./_base.txt # distributed -tblib==1.7.0 +tblib==3.1.0 # via # -c requirements/./_base.txt # distributed -toolz==0.12.0 +toolz==1.0.0 # via # -c requirements/./_base.txt # dask # distributed # partd -tornado==6.2 +tornado==6.5 # via # -c requirements/./_base.txt # distributed -urllib3==1.26.14 +urllib3==2.4.0 # via # -c requirements/./_base.txt # distributed -zict==2.2.0 +zict==3.0.0 # via # -c requirements/./_base.txt # distributed +zipp==3.21.0 + # via + # -c requirements/./_base.txt + # importlib-metadata diff --git a/services/dask-sidecar/requirements/_test.in b/services/dask-sidecar/requirements/_test.in index d8eb6fc6503..791e6edaa35 100644 --- a/services/dask-sidecar/requirements/_test.in +++ b/services/dask-sidecar/requirements/_test.in @@ -7,13 +7,13 @@ # --constraint _base.txt +aioboto3 coverage -coveralls docker faker -minio +moto[server] pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-asyncio pytest-cov pytest-icdiff pytest-instafail @@ -21,3 +21,7 @@ pytest-localftpserver pytest-mock pytest-sugar python-dotenv +# mypy +types-aioboto3 +types-aiobotocore[s3] +types-aiofiles diff --git a/services/dask-sidecar/requirements/_test.txt b/services/dask-sidecar/requirements/_test.txt index 012c5dc0c14..91ad970abd0 100644 --- a/services/dask-sidecar/requirements/_test.txt +++ b/services/dask-sidecar/requirements/_test.txt @@ -1,98 +1,219 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 +aioboto3==14.3.0 + # via -r requirements/_test.in +aiobotocore==2.22.0 + # via + # -c requirements/_base.txt + # aioboto3 +aiofiles==24.1.0 # via # -c requirements/_base.txt - # pytest-aiohttp -aiosignal==1.3.1 + # aioboto3 +aiohappyeyeballs==2.6.1 # via # -c requirements/_base.txt # aiohttp -async-timeout==4.0.2 +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aiobotocore +aioitertools==0.12.0 + # via + # -c requirements/_base.txt + # aiobotocore +aiosignal==1.3.2 # via # -c requirements/_base.txt # aiohttp -attrs==21.4.0 +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +attrs==25.3.0 # via # -c requirements/_base.txt # aiohttp - # pytest -certifi==2022.12.7 + # jsonschema + # referencing +aws-sam-translator==1.97.0 + # via cfn-lint +aws-xray-sdk==2.14.0 + # via moto +blinker==1.9.0 + # via flask +boto3==1.37.3 + # via + # aiobotocore + # aws-sam-translator + # moto +botocore==1.37.3 # via # -c requirements/_base.txt - # minio + # aiobotocore + # aws-xray-sdk + # boto3 + # moto + # s3transfer +botocore-stubs==1.38.19 + # via + # types-aioboto3 + # types-aiobotocore +certifi==2025.4.26 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # requests -cffi==1.15.1 +cffi==1.17.1 # via cryptography -charset-normalizer==3.1.0 +cfn-lint==1.35.1 + # via moto +charset-normalizer==3.4.2 # via # -c requirements/_base.txt - # aiohttp # requests -coverage==6.5.0 +click==8.1.8 + # via + # -c requirements/_base.txt + # flask +coverage==7.8.0 # via # -r requirements/_test.in - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.2 +cryptography==45.0.2 # via # -c requirements/../../../requirements/constraints.txt + # joserfc + # moto # pyopenssl -docker==6.0.1 - # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.6.0 +docker==7.1.0 + # via + # -r requirements/_test.in + # moto +faker==37.3.0 # via -r requirements/_test.in -frozenlist==1.3.3 +flask==3.1.1 + # via + # flask-cors + # moto +flask-cors==6.0.0 + # via moto +frozenlist==1.6.0 # via # -c requirements/_base.txt # aiohttp # aiosignal -icdiff==2.0.6 +graphql-core==3.2.6 + # via moto +icdiff==2.0.7 # via pytest-icdiff -idna==3.4 +idna==3.10 # via # -c requirements/_base.txt # requests # yarl -iniconfig==2.0.0 +iniconfig==2.1.0 # via pytest -minio==7.0.4 +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # flask + # moto +jmespath==1.0.1 + # via + # -c requirements/_base.txt + # aiobotocore + # boto3 + # botocore +joserfc==1.0.4 + # via moto +jsonpatch==1.33 + # via cfn-lint +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 + # via jsonpatch +jsonschema==4.23.0 + # via + # -c requirements/_base.txt + # aws-sam-translator + # openapi-schema-validator + # openapi-spec-validator +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2025.4.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.11.0 + # via openapi-spec-validator +markupsafe==3.0.2 + # via + # -c requirements/_base.txt + # flask + # jinja2 + # werkzeug +moto==5.1.4 # via -r requirements/_test.in -multidict==6.0.4 +mpmath==1.3.0 + # via sympy +multidict==6.4.4 # via # -c requirements/_base.txt + # aiobotocore # aiohttp # yarl -packaging==23.0 +networkx==3.4.2 + # via cfn-lint +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 + # via moto +packaging==25.0 # via # -c requirements/_base.txt - # docker # pytest # pytest-sugar -pluggy==1.0.0 +pathable==0.4.4 + # via jsonschema-path +pluggy==1.6.0 # via pytest +ply==3.11 + # via jsonpath-ng pprintpp==0.4.0 # via pytest-icdiff -pycparser==2.21 +propcache==0.3.1 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 # via cffi -pyftpdlib==1.5.7 +pydantic==2.11.4 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aws-sam-translator +pydantic-core==2.33.2 + # via + # -c requirements/_base.txt + # pydantic +pyftpdlib==2.0.1 # via pytest-localftpserver -pyopenssl==23.0.0 +pyopenssl==25.1.0 # via pytest-localftpserver -pytest==7.2.2 +pyparsing==3.2.3 + # via moto +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-icdiff @@ -100,52 +221,131 @@ pytest==7.2.2 # pytest-localftpserver # pytest-mock # pytest-sugar -pytest-aiohttp==1.0.4 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-cov==6.1.1 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-localftpserver==1.1.4 +pytest-localftpserver==1.3.2 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt - # faker -python-dotenv==1.0.0 - # via -r requirements/_test.in -requests==2.28.2 + # aiobotocore + # botocore + # moto +python-dotenv==1.1.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # cfn-lint + # jsonschema-path + # moto + # responses +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 # via # -c requirements/_base.txt - # coveralls # docker -six==1.16.0 + # jsonschema-path + # moto + # responses +responses==0.25.7 + # via moto +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.25.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +setuptools==80.7.1 + # via moto +six==1.17.0 # via # -c requirements/_base.txt # python-dateutil -termcolor==2.2.0 + # rfc3339-validator +sympy==1.14.0 + # via cfn-lint +termcolor==3.1.0 # via pytest-sugar -tomli==2.0.1 +types-aioboto3==14.3.0 + # via -r requirements/_test.in +types-aiobotocore==2.22.0 # via - # coverage - # pytest -urllib3==1.26.14 + # -r requirements/_test.in + # types-aioboto3 +types-aiobotocore-s3==2.22.0 + # via types-aiobotocore +types-aiofiles==24.1.0.20250516 + # via -r requirements/_test.in +types-awscrt==0.27.2 + # via botocore-stubs +types-s3transfer==0.12.0 + # via types-aioboto3 +typing-extensions==4.13.2 + # via + # -c requirements/_base.txt + # aws-sam-translator + # cfn-lint + # pydantic + # pydantic-core + # pyopenssl + # types-aioboto3 + # types-aiobotocore + # types-aiobotocore-s3 + # typing-inspection +typing-inspection==0.4.0 # via # -c requirements/_base.txt + # pydantic +tzdata==2025.2 + # via + # -c requirements/_base.txt + # faker +urllib3==2.4.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore # docker - # minio # requests -websocket-client==1.5.1 - # via docker -yarl==1.8.2 + # responses +werkzeug==3.1.3 + # via + # flask + # flask-cors + # moto +wrapt==1.17.2 + # via + # -c requirements/_base.txt + # aiobotocore + # aws-xray-sdk +xmltodict==0.14.2 + # via moto +yarl==1.20.0 # via # -c requirements/_base.txt # aiohttp diff --git a/services/dask-sidecar/requirements/_tools.txt b/services/dask-sidecar/requirements/_tools.txt index 4f55d6d7017..c76d3992bbe 100644 --- a/services/dask-sidecar/requirements/_tools.txt +++ b/services/dask-sidecar/requirements/_tools.txt @@ -1,95 +1,89 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.15.0 +astroid==3.3.10 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt + # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 +dill==0.4.0 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.18.0 # via virtualenv -identify==2.5.18 +identify==2.6.10 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==25.0 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.3 +pip==25.1.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.1.0 +platformdirs==4.3.8 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.2.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.4 +pylint==3.3.7 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt + # -c requirements/_test.txt # pre-commit # watchdog -tomli==2.0.1 +ruff==0.11.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==80.7.1 # via # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.5.0 +typing-extensions==4.13.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.31.2 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_base.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/dask-sidecar/requirements/ci.txt b/services/dask-sidecar/requirements/ci.txt index 00a2f8f637d..343964753b0 100644 --- a/services/dask-sidecar/requirements/ci.txt +++ b/services/dask-sidecar/requirements/ci.txt @@ -9,13 +9,15 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/dask-task-models-library/ -../../packages/models-library/ -../../packages/pytest-simcore/ -../../packages/service-library/ -../../packages/settings-library/ +simcore-common-library @ ../../packages/common-library/ +simcore-dask-task-models-library @ ../../packages/dask-task-models-library/ +simcore-models-library @ ../../packages/models-library/ +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library @ ../../packages/service-library/ +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-dask-sidecar @ . diff --git a/services/dask-sidecar/requirements/constraints.txt b/services/dask-sidecar/requirements/constraints.txt index b8767f58a98..d8a10d684a0 100644 --- a/services/dask-sidecar/requirements/constraints.txt +++ b/services/dask-sidecar/requirements/constraints.txt @@ -5,7 +5,7 @@ # # Breaking changes # - +dask[distributed]>=2024.4.2 # issue with publish_dataset: https://github.com/dask/distributed/issues/7859 # # Bugs # diff --git a/services/dask-sidecar/requirements/dev.txt b/services/dask-sidecar/requirements/dev.txt index 82fbeaefec6..6ad6237135b 100644 --- a/services/dask-sidecar/requirements/dev.txt +++ b/services/dask-sidecar/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library/ --editable ../../packages/dask-task-models-library/ --editable ../../packages/models-library/ --editable ../../packages/pytest-simcore/ diff --git a/services/dask-sidecar/requirements/prod.txt b/services/dask-sidecar/requirements/prod.txt index fd4759028c0..27de101557c 100644 --- a/services/dask-sidecar/requirements/prod.txt +++ b/services/dask-sidecar/requirements/prod.txt @@ -10,10 +10,11 @@ --requirement _base.txt # installs this repo's packages -../../packages/dask-task-models-library/ -../../packages/models-library/ -../../packages/service-library/ -../../packages/settings-library/ +simcore-dask-task-models-library @ ../../packages/dask-task-models-library/ +simcore-models-library @ ../../packages/models-library/ +simcore-common-library @ ../../packages/common-library/ +simcore-service-library @ ../../packages/service-library/ +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-dask-sidecar @ . diff --git a/services/dask-sidecar/setup.cfg b/services/dask-sidecar/setup.cfg index f588ec5e7fd..7100631fdfb 100644 --- a/services/dask-sidecar/setup.cfg +++ b/services/dask-sidecar/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.0-alpha +current_version = 0.1.1 commit = True message = services/dask-sidecar version: {current_version} β†’ {new_version} tag = False @@ -9,3 +9,4 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function diff --git a/services/dask-sidecar/setup.py b/services/dask-sidecar/setup.py index ab1d750e06d..d14fe17a9b1 100644 --- a/services/dask-sidecar/setup.py +++ b/services/dask-sidecar/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -24,29 +23,35 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-service-dask-sidecar", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author="Pedro Crespo-Valero (pcrespov)", - description="A dask-worker that runs as a sidecar", - classifiers=[ +SETUP = { + "name": "simcore-service-dask-sidecar", + "version": (CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Pedro Crespo-Valero (pcrespov)", + "description": "A dask-worker that runs as a sidecar", + "classifiers": [ "Development Status :: 1 - Planning", "License :: OSI Approved :: MIT License", "Natural Language :: English", - "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ + "long_description": (CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - install_requires=INSTALL_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, -) + "install_requires": INSTALL_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-dask-sidecar = simcore_service_dask_sidecar.cli:main", + "simcore-service = simcore_service_dask_sidecar.cli:main", + ], + }, +} if __name__ == "__main__": diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/_meta.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/_meta.py new file mode 100644 index 00000000000..36e8a5c664b --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/_meta.py @@ -0,0 +1,47 @@ +""" Application's metadata + +""" + + +from typing import Final + +import dask +from models_library.basic_types import VersionStr +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore_service_dask_sidecar") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +API_VERSION: Final[VersionStr] = info.__version__ + +# https://patorjk.com/software/taag/#p=display&f=Standard&t=dask%20sidecar +DASK_SIDECAR_APP_STARTED_BANNER_MSG = rf""" + + _ _ _ _ + ___ ___ _ __ __ _ _ __ ___ __| | __ _ ___| | __ ___(_) __| | ___ ___ __ _ _ __ + / _ \/ __| '_ \ / _` | '__/ __| / _` |/ _` / __| |/ / / __| |/ _` |/ _ \/ __/ _` | '__| + | (_) \__ \ |_) | (_| | | | (__ | (_| | (_| \__ \ < \__ \ | (_| | __/ (_| (_| | | + \___/|___/ .__/ \__,_|_| \___| \__,_|\__,_|___/_|\_\ |___/_|\__,_|\___|\___\__,_|_| v{__version__} with dask=={dask.__version__} + |_| +""" + +DASK_SCHEDULER_APP_STARTED_BANNER_MSG = rf""" + + _ _ _ _ _ + ___ ___ _ __ __ _ _ __ ___ __| | __ _ ___| | __ ___ ___| |__ ___ __| |_ _| | ___ _ __ + / _ \/ __| '_ \ / _` | '__/ __| / _` |/ _` / __| |/ / / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \ '__| + | (_) \__ \ |_) | (_| | | | (__ | (_| | (_| \__ \ < \__ \ (__| | | | __/ (_| | |_| | | __/ | + \___/|___/ .__/ \__,_|_| \___| \__,_|\__,_|___/_|\_\ |___/\___|_| |_|\___|\__,_|\__,_|_|\___|_| v{__version__} with dask=={dask.__version__} + |_| + +""" + + +def print_dask_sidecar_banner() -> None: + print(DASK_SIDECAR_APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + + +def print_dask_scheduler_banner() -> None: + print(DASK_SCHEDULER_APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/cli.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/cli.py new file mode 100644 index 00000000000..55fef766232 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/cli.py @@ -0,0 +1,20 @@ +import logging + +import typer +from settings_library.utils_cli import create_settings_command, create_version_callback + +from ._meta import PROJECT_NAME, __version__ +from .settings import ApplicationSettings + +# SEE setup entrypoint 'simcore_service_dask_sidecar.cli:the_app' +_logger = logging.getLogger(__name__) + +main = typer.Typer(name=PROJECT_NAME) + +# +# COMMANDS +# +main.callback()(create_version_callback(__version__)) +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/constants.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/constants.py new file mode 100644 index 00000000000..88a6b9028ee --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/constants.py @@ -0,0 +1,10 @@ +import re +from typing import Final + +LEGACY_SERVICE_LOG_FILE_NAME: Final[str] = "log.dat" +PARSE_LOG_INTERVAL_S: Final[float] = 0.5 + +DOCKER_LOG_REGEXP_WITH_TIMESTAMP: re.Pattern[str] = re.compile( + r"^(?P(?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)" + r"\s(?P.*)$" +) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/core.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/core.py index b83895c0db6..2bd094306fb 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/core.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/core.py @@ -1,62 +1,58 @@ import asyncio -import json +import logging import os import socket +from collections.abc import Coroutine from dataclasses import dataclass from pathlib import Path from pprint import pformat from types import TracebackType -from typing import Coroutine, Optional, cast +from typing import Final, cast from uuid import uuid4 from aiodocker import Docker +from common_library.json_serialization import json_dumps from dask_task_models_library.container_tasks.docker import DockerBasicAuth -from dask_task_models_library.container_tasks.events import TaskLogEvent, TaskStateEvent -from dask_task_models_library.container_tasks.io import ( - FileUrl, - TaskInputData, - TaskOutputData, - TaskOutputDataSchema, -) -from models_library.projects_state import RunningState -from models_library.services_resources import BootMode +from dask_task_models_library.container_tasks.errors import ServiceRuntimeError +from dask_task_models_library.container_tasks.io import FileUrl, TaskOutputData +from dask_task_models_library.container_tasks.protocol import ContainerTaskParameters +from models_library.progress_bar import ProgressReport from packaging import version from pydantic import ValidationError from pydantic.networks import AnyUrl +from servicelib.logging_utils import LogLevelInt, LogMessageStr +from servicelib.progress_bar import ProgressBarData from settings_library.s3 import S3Settings from yarl import URL -from ..dask_utils import TaskPublisher, create_dask_worker_logger, publish_event -from ..file_utils import pull_file_from_remote, push_file_to_remote -from ..settings import Settings +from ..settings import ApplicationSettings +from ..utils.dask import TaskPublisher +from ..utils.files import pull_file_from_remote, push_file_to_remote from .docker_utils import ( create_container_config, get_computational_shared_data_mount_point, - get_integration_version, + get_image_labels, managed_container, managed_monitor_container_log_task, pull_image, ) -from .errors import ServiceBadFormattedOutputError, ServiceRunError -from .models import LEGACY_INTEGRATION_VERSION +from .errors import ServiceBadFormattedOutputError +from .models import LEGACY_INTEGRATION_VERSION, ImageLabels from .task_shared_volume import TaskSharedVolumes -logger = create_dask_worker_logger(__name__) +_logger = logging.getLogger(__name__) CONTAINER_WAIT_TIME_SECS = 2 +_TASK_PROCESSING_PROGRESS_WEIGHT: Final[float] = 0.99 -@dataclass -class ComputationalSidecar: # pylint: disable=too-many-instance-attributes +@dataclass(kw_only=True, frozen=True, slots=True) +class ComputationalSidecar: + task_parameters: ContainerTaskParameters docker_auth: DockerBasicAuth - service_key: str - service_version: str - input_data: TaskInputData - output_data_keys: TaskOutputDataSchema log_file_url: AnyUrl - boot_mode: BootMode task_max_resources: dict[str, float] task_publishers: TaskPublisher - s3_settings: Optional[S3Settings] + s3_settings: S3Settings | None async def _write_input_data( self, @@ -70,11 +66,11 @@ async def _write_input_data( local_input_data_file = {} download_tasks = [] - for input_key, input_params in self.input_data.items(): + for input_key, input_params in self.task_parameters.input_data.items(): if isinstance(input_params, FileUrl): file_name = ( input_params.file_mapping - or Path(URL(input_params.url).path.strip("/")).name + or Path(URL(f"{input_params.url}").path.strip("/")).name ) destination_path = task_volumes.inputs_folder / file_name @@ -96,8 +92,10 @@ async def _write_input_data( ) else: local_input_data_file[input_key] = input_params - await asyncio.gather(*download_tasks) - input_data_file.write_text(json.dumps(local_input_data_file)) + # NOTE: temporary solution until new version is created + for task in download_tasks: + await task + input_data_file.write_text(json_dumps(local_input_data_file)) await self._publish_sidecar_log("All the input data were downloaded.") @@ -108,22 +106,24 @@ async def _retrieve_output_data( ) -> TaskOutputData: try: await self._publish_sidecar_log("Retrieving output data...") - logger.debug( + _logger.debug( "following files are located in output folder %s:\n%s", task_volumes.outputs_folder, pformat(list(task_volumes.outputs_folder.rglob("*"))), ) - logger.debug( + _logger.debug( "following outputs will be searched for:\n%s", - self.output_data_keys.json(indent=1), + self.task_parameters.output_data_keys.model_dump_json(indent=1), ) output_data = TaskOutputData.from_task_output( - self.output_data_keys, + self.task_parameters.output_data_keys, task_volumes.outputs_folder, - "outputs.json" - if integration_version > LEGACY_INTEGRATION_VERSION - else "output.json", + ( + "outputs.json" + if integration_version > LEGACY_INTEGRATION_VERSION + else "output.json" + ), ) upload_tasks = [] @@ -131,7 +131,7 @@ async def _retrieve_output_data( if isinstance(output_params, FileUrl): assert ( # nosec output_params.file_mapping - ), f"{output_params.json(indent=1)} expected resolved in TaskOutputData.from_task_output" + ), f"{output_params.model_dump_json(indent=1)} expected resolved in TaskOutputData.from_task_output" src_path = task_volumes.outputs_folder / output_params.file_mapping upload_tasks.append( @@ -145,129 +145,150 @@ async def _retrieve_output_data( await asyncio.gather(*upload_tasks) await self._publish_sidecar_log("All the output data were uploaded.") - logger.info("retrieved outputs data:\n%s", output_data.json(indent=1)) + _logger.info( + "retrieved outputs data:\n%s", output_data.model_dump_json(indent=1) + ) return output_data except (ValueError, ValidationError) as exc: raise ServiceBadFormattedOutputError( - service_key=self.service_key, - service_version=self.service_version, + service_key=self.task_parameters.image, + service_version=self.task_parameters.tag, exc=exc, ) from exc - async def _publish_sidecar_log(self, log: str) -> None: - publish_event( - self.task_publishers.logs, - TaskLogEvent.from_dask_worker(log=f"[sidecar] {log}"), - ) - logger.info(log) - - async def _publish_sidecar_state( - self, state: RunningState, msg: Optional[str] = None + async def _publish_sidecar_log( + self, log: LogMessageStr, log_level: LogLevelInt = logging.INFO ) -> None: - publish_event( - self.task_publishers.state, - TaskStateEvent.from_dask_worker(state=state, msg=msg), + await self.task_publishers.publish_logs( + message=f"[sidecar] {log}", log_level=log_level ) async def run(self, command: list[str]) -> TaskOutputData: - await self._publish_sidecar_state(RunningState.STARTED) + # ensure we pass the initial logs and progress await self._publish_sidecar_log( - f"Starting task for {self.service_key}:{self.service_version} on {socket.gethostname()}..." + f"Starting task {self.task_parameters.image}:{self.task_parameters.tag} on {socket.gethostname()}..." ) + # NOTE: this is for tracing purpose + _logger.info("Running task owner: %s", self.task_parameters.task_owner) - settings = Settings.create_from_envs() + settings = ApplicationSettings.create_from_envs() run_id = f"{uuid4()}" - async with Docker() as docker_client, TaskSharedVolumes( - Path(f"{settings.SIDECAR_COMP_SERVICES_SHARED_FOLDER}/{run_id}") - ) as task_volumes: + async with ( + Docker() as docker_client, + TaskSharedVolumes( + Path(f"{settings.SIDECAR_COMP_SERVICES_SHARED_FOLDER}/{run_id}") + ) as task_volumes, + ProgressBarData( + num_steps=3, + step_weights=[5 / 100, 90 / 100, 5 / 100], + progress_report_cb=self.task_publishers.publish_progress, + description="running", + ) as progress_bar, + ): # PRE-PROCESSING await pull_image( docker_client, self.docker_auth, - self.service_key, - self.service_version, + self.task_parameters.image, + self.task_parameters.tag, self._publish_sidecar_log, ) - integration_version = await get_integration_version( - docker_client, self.docker_auth, self.service_key, self.service_version + image_labels: ImageLabels = await get_image_labels( + docker_client, + self.docker_auth, + self.task_parameters.image, + self.task_parameters.tag, ) computational_shared_data_mount_point = ( await get_computational_shared_data_mount_point(docker_client) ) config = await create_container_config( docker_registry=self.docker_auth.server_address, - service_key=self.service_key, - service_version=self.service_version, + image=self.task_parameters.image, + tag=self.task_parameters.tag, command=command, comp_volume_mount_point=f"{computational_shared_data_mount_point}/{run_id}", - boot_mode=self.boot_mode, + boot_mode=self.task_parameters.boot_mode, task_max_resources=self.task_max_resources, + envs=self.task_parameters.envs, + labels=self.task_parameters.labels, ) - await self._write_input_data(task_volumes, integration_version) - - # PROCESSING - async with managed_container(docker_client, config) as container: - async with managed_monitor_container_log_task( + await self._write_input_data( + task_volumes, image_labels.get_integration_version() + ) + await progress_bar.update() # NOTE: (1 step weighting 5%) + # PROCESSING (1 step weighted 90%) + async with ( + managed_container( + docker_client, + config, + name=f"{self.task_parameters.image.split(sep='/')[-1]}_{run_id}", + ) as container, + progress_bar.sub_progress( + 100, description="processing" + ) as processing_progress_bar, + managed_monitor_container_log_task( container=container, - service_key=self.service_key, - service_version=self.service_version, - progress_pub=self.task_publishers.progress, - logs_pub=self.task_publishers.logs, - integration_version=integration_version, + progress_regexp=image_labels.get_progress_regexp(), + service_key=self.task_parameters.image, + service_version=self.task_parameters.tag, + task_publishers=self.task_publishers, + integration_version=image_labels.get_integration_version(), task_volumes=task_volumes, log_file_url=self.log_file_url, log_publishing_cb=self._publish_sidecar_log, s3_settings=self.s3_settings, - ): - await container.start() - await self._publish_sidecar_log( - f"Container started as '{container.id}' on {socket.gethostname()}..." - ) - # wait until the container finished, either success or fail or timeout - while (container_data := await container.show())["State"][ - "Running" - ]: - await asyncio.sleep(CONTAINER_WAIT_TIME_SECS) - if container_data["State"]["ExitCode"] > os.EX_OK: - await self._publish_sidecar_state( - RunningState.FAILED, - msg=f"error while running container '{container.id}' for '{self.service_key}:{self.service_version}'", - ) - - raise ServiceRunError( - service_key=self.service_key, - service_version=self.service_version, - container_id=container.id, - exit_code=container_data["State"]["ExitCode"], - service_logs=await cast( - Coroutine, - container.log( - stdout=True, stderr=True, tail=20, follow=False - ), + progress_bar=processing_progress_bar, + ), + ): + await container.start() + await self._publish_sidecar_log( + f"Container started as '{container.id}' on {socket.gethostname()}..." + ) + # wait until the container finished, either success or fail or timeout + while (container_data := await container.show())["State"]["Running"]: + await asyncio.sleep(CONTAINER_WAIT_TIME_SECS) + if container_data["State"]["ExitCode"] > os.EX_OK: + raise ServiceRuntimeError( + service_key=self.task_parameters.image, + service_version=self.task_parameters.tag, + container_id=container.id, + exit_code=container_data["State"]["ExitCode"], + service_logs=await cast( + Coroutine, + container.log( + stdout=True, stderr=True, tail=20, follow=False ), - ) - await self._publish_sidecar_log("Container ran successfully.") + ), + ) + await self._publish_sidecar_log("Container ran successfully.") - # POST-PROCESSING + # POST-PROCESSING (1 step weighted 5%) results = await self._retrieve_output_data( - task_volumes, integration_version + task_volumes, image_labels.get_integration_version() ) await self._publish_sidecar_log("Task completed successfully.") return results async def __aenter__(self) -> "ComputationalSidecar": + # ensure we start publishing progress + self.task_publishers.publish_progress(ProgressReport(actual_value=0)) return self async def __aexit__( self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, ) -> None: if exc: - await self._publish_sidecar_log(f"Task error:\n{exc}") await self._publish_sidecar_log( - "There might be more information in the service log file" + f"Task error:\n{exc}", log_level=logging.ERROR + ) + await self._publish_sidecar_log( + "TIP: There might be more information in the service log file in the service outputs", ) + # ensure we pass the final progress + self.task_publishers.publish_progress(ProgressReport(actual_value=1)) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/docker_utils.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/docker_utils.py index 08ab96f3532..7b34ef409d2 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/docker_utils.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/docker_utils.py @@ -1,80 +1,96 @@ import asyncio import contextlib -import json import logging import re import socket -from pathlib import Path -from pprint import pformat -from typing import ( - Any, +from collections.abc import ( AsyncGenerator, AsyncIterator, Awaitable, Callable, Coroutine, - Final, - Optional, - cast, ) +from pathlib import Path +from pprint import pformat +from typing import Any, Final, cast import aiofiles import aiofiles.tempfile +import aiohttp +import arrow from aiodocker import Docker, DockerError from aiodocker.containers import DockerContainer from aiodocker.volumes import DockerVolume from dask_task_models_library.container_tasks.docker import DockerBasicAuth -from distributed.pubsub import Pub +from dask_task_models_library.container_tasks.protocol import ( + ContainerCommands, + ContainerEnvsDict, + ContainerImage, + ContainerLabelsDict, + ContainerTag, + LogFileUploadURL, +) from models_library.services_resources import BootMode +from models_library.utils.labels_annotations import OSPARC_LABEL_PREFIXES, from_labels from packaging import version -from pydantic import ByteSize -from pydantic.networks import AnyUrl -from servicelib.docker_utils import to_datetime -from servicelib.logging_utils import log_catch, log_context +from pydantic import ByteSize, TypeAdapter +from servicelib.logging_utils import ( + LogLevelInt, + LogMessageStr, + guess_message_log_level, + log_catch, + log_context, +) +from servicelib.progress_bar import ProgressBarData from settings_library.s3 import S3Settings -from ..dask_utils import LogType, create_dask_worker_logger, publish_task_logs -from ..file_utils import push_file_to_remote -from ..settings import Settings +from ..settings import ApplicationSettings +from ..utils.dask import TaskPublisher +from ..utils.files import push_file_to_remote +from .constants import LEGACY_SERVICE_LOG_FILE_NAME from .models import ( LEGACY_INTEGRATION_VERSION, ContainerHostConfig, DockerContainerConfig, + ImageLabels, ) from .task_shared_volume import TaskSharedVolumes -logger = create_dask_worker_logger(__name__) -LogPublishingCB = Callable[[str], Awaitable[None]] +logger = logging.getLogger(__name__) +LogPublishingCB = Callable[[LogMessageStr, LogLevelInt], Coroutine[Any, Any, None]] async def create_container_config( + *, docker_registry: str, - service_key: str, - service_version: str, - command: list[str], + image: ContainerImage, + tag: ContainerTag, + command: ContainerCommands, comp_volume_mount_point: str, boot_mode: BootMode, task_max_resources: dict[str, Any], + envs: ContainerEnvsDict, + labels: ContainerLabelsDict, ) -> DockerContainerConfig: nano_cpus_limit = int(task_max_resources.get("CPU", 1) * 1e9) memory_limit = ByteSize(task_max_resources.get("RAM", 1024**3)) + env_variables = [ + "INPUT_FOLDER=/inputs", + "OUTPUT_FOLDER=/outputs", + "LOG_FOLDER=/logs", + f"SC_COMP_SERVICES_SCHEDULED_AS={boot_mode.value}", + f"SIMCORE_NANO_CPUS_LIMIT={nano_cpus_limit}", + f"SIMCORE_MEMORY_BYTES_LIMIT={memory_limit}", + ] + if envs: + env_variables += [ + f"{env_key}={env_value}" for env_key, env_value in envs.items() + ] config = DockerContainerConfig( - Env=[ - *[ - f"{name.upper()}_FOLDER=/{name}s" - for name in [ - "input", - "output", - "log", - ] - ], - f"SC_COMP_SERVICES_SCHEDULED_AS={boot_mode.value}", - f"SIMCORE_NANO_CPUS_LIMIT={nano_cpus_limit}", - f"SIMCORE_MEMORY_BYTES_LIMIT={memory_limit}", - ], + Env=env_variables, Cmd=command, - Image=f"{docker_registry}/{service_key}:{service_version}", - Labels={}, + Image=f"{docker_registry}/{image}:{tag}", + Labels=cast(dict[str, str], labels), HostConfig=ContainerHostConfig( Init=True, Binds=[ @@ -86,13 +102,13 @@ async def create_container_config( NanoCPUs=nano_cpus_limit, ), ) - logger.debug("Container configuration: \n%s", pformat(config.dict())) + logger.debug("Container configuration: \n%s", pformat(config.model_dump())) return config @contextlib.asynccontextmanager async def managed_container( - docker_client: Docker, config: DockerContainerConfig, *, name: Optional[str] = None + docker_client: Docker, config: DockerContainerConfig, *, name: str | None = None ) -> AsyncIterator[DockerContainer]: container = None try: @@ -100,7 +116,7 @@ async def managed_container( logger, logging.DEBUG, msg=f"managing container {name} for {config.image}" ): container = await docker_client.containers.create( - config.dict(by_alias=True), name=name + config.model_dump(by_alias=True), name=name ) yield container except asyncio.CancelledError: @@ -127,313 +143,261 @@ async def managed_container( raise -_DOCKER_LOG_REGEXP: re.Pattern[str] = re.compile( - r"^(?P\d+-\d+-\d+T\d+:\d+:\d+\.\d+[^\s]+) (?P.+)$" -) -_PROGRESS_REGEXP: re.Pattern[str] = re.compile( - r"\[?progress\]?:?\s*([0-1]?\.\d+|\d+(%)|\d+\s*(percent)|(\d+\/\d+))" -) -DEFAULT_TIME_STAMP = "2000-01-01T00:00:00.000000000Z" +def _guess_progress_value(progress_match: re.Match[str]) -> float: + # can be anything from "23 percent", 23%, 23/234, 0.0-1.0 + value_str = progress_match.group("value") + if progress_match.group("percent_sign"): + # this is of the 23% kind + return float(value_str.split("%")[0].strip()) / 100.0 + if progress_match.group("percent_explicit"): + # this is of the 23 percent kind + return float(value_str.split("percent")[0].strip()) / 100.0 + if progress_match.group("fraction"): + # this is of the 23/123 kind + nums = progress_match.group("fraction").strip().split("/") + return float(nums[0].strip()) / float(nums[1].strip()) + # this is of the 0.0-1.0 kind + return float(value_str.strip()) -async def parse_line(line: str) -> tuple[LogType, str, str]: - match = re.search(_DOCKER_LOG_REGEXP, line) - if not match: - # default return as log - return (LogType.LOG, DEFAULT_TIME_STAMP, f"{line}") +_OSPARC_LOG_NUM_PARTS: Final[int] = 2 - log_type = LogType.LOG - timestamp = match.group("timestamp") - log = f"{match.group('log')}" - # now look for progress - match = re.search(_PROGRESS_REGEXP, log.lower()) - if match: - try: - # can be anything from "23 percent", 23%, 23/234, 0.0-1.0 - progress = match.group(1) - log_type = LogType.PROGRESS - if match.group(2): - # this is of the 23% kind - log = f"{float(progress.rstrip('%').strip()) / 100.0:.2f}" - elif match.group(3): - # this is of the 23 percent kind - log = f"{float(progress.rstrip('percent').strip()) / 100.0:.2f}" - elif match.group(4): - # this is of the 23/123 kind - nums = progress.strip().split("/") - log = f"{float(nums[0]) / float(nums[1]):.2f}" - else: - # this is of the 0.0-1.0 kind - log = f"{float(progress.strip()):.2f}" - except ValueError: - logger.exception("Could not extract progress from log line %s", line) - return (log_type, timestamp, log) +async def _try_parse_progress( + line: str, *, progress_regexp: re.Pattern[str] +) -> float | None: + with log_catch(logger, reraise=False): + # pattern might be like "timestamp log" + log = line.strip("\n") + splitted_log = log.split(" ", maxsplit=1) + with contextlib.suppress(arrow.ParserError, ValueError): + if len(splitted_log) == _OSPARC_LOG_NUM_PARTS and arrow.get( + splitted_log[0] + ): + log = splitted_log[1] + if match := re.search(progress_regexp, log): + return _guess_progress_value(match) -async def publish_container_logs( - service_key: str, - service_version: str, - container: DockerContainer, - container_name: str, - progress_pub: Pub, - logs_pub: Pub, - log_type: LogType, - message: str, + return None + + +async def _parse_and_publish_logs( + log_line: str, + *, + task_publishers: TaskPublisher, + progress_regexp: re.Pattern[str], + progress_bar: ProgressBarData, ) -> None: - return publish_task_logs( - progress_pub, - logs_pub, - log_type, - message_prefix=f"{service_key}:{service_version} - {container.id}{container_name}", - message=message, + progress_value = await _try_parse_progress( + log_line, progress_regexp=progress_regexp ) + if progress_value is not None: + await progress_bar.set_(round(progress_value * 100.0)) - -LEGACY_SERVICE_LOG_FILE_NAME: Final[str] = "log.dat" -PARSE_LOG_INTERVAL_S: Final[float] = 0.5 + await task_publishers.publish_logs( + message=log_line, log_level=guess_message_log_level(log_line) + ) -async def _parse_container_log_file( +async def _parse_container_log_file( # noqa: PLR0913 # pylint: disable=too-many-arguments + *, container: DockerContainer, - service_key: str, - service_version: str, + progress_regexp: re.Pattern[str], + service_key: ContainerImage, + service_version: ContainerTag, container_name: str, - progress_pub: Pub, - logs_pub: Pub, + task_publishers: TaskPublisher, task_volumes: TaskSharedVolumes, - log_file_url: AnyUrl, + log_file_url: LogFileUploadURL, log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], + s3_settings: S3Settings | None, + progress_bar: ProgressBarData, ) -> None: log_file = task_volumes.logs_folder / LEGACY_SERVICE_LOG_FILE_NAME - logger.debug("monitoring legacy-style container log file in %s", log_file) - - async with aiofiles.open(log_file, mode="r") as file_pointer: - logger.debug("monitoring legacy-style container log file: opened %s", log_file) - while (await container.show())["State"]["Running"]: - if line := await file_pointer.readline(): - log_type, _, message = await parse_line(line) - await publish_container_logs( - service_key=service_key, - service_version=service_version, - container=container, - container_name=container_name, - progress_pub=progress_pub, - logs_pub=logs_pub, - log_type=log_type, - message=message, + with log_context( + logger, + logging.DEBUG, + "started monitoring of pre-1.0 service - using log file in /logs folder", + ): + async with aiofiles.open(log_file, mode="rt") as file_pointer: + while (await container.show())["State"]["Running"]: + if line := await file_pointer.readline(): + logger.info( + "[%s]: %s", + f"{service_key}:{service_version} - {container.id}{container_name}", + line, + ) + await _parse_and_publish_logs( + line, + task_publishers=task_publishers, + progress_regexp=progress_regexp, + progress_bar=progress_bar, + ) + + # finish reading the logs if possible + async for line in file_pointer: + logger.info( + "[%s]: %s", + f"{service_key}:{service_version} - {container.id}{container_name}", + line, + ) + await _parse_and_publish_logs( + line, + task_publishers=task_publishers, + progress_regexp=progress_regexp, + progress_bar=progress_bar, ) - await asyncio.sleep(PARSE_LOG_INTERVAL_S) - # finish reading the logs if possible - async for line in file_pointer: - log_type, _, message = await parse_line(line) - await publish_container_logs( - service_key=service_key, - service_version=service_version, - container=container, - container_name=container_name, - progress_pub=progress_pub, - logs_pub=logs_pub, - log_type=log_type, - message=message, + # copy the log file to the log_file_url + await push_file_to_remote( + log_file, log_file_url, log_publishing_cb, s3_settings ) - logger.debug( - "monitoring legacy-style container log file: completed reading of %s", - log_file, - ) - logger.debug( - "monitoring legacy-style container log file: copying log file from %s to %s...", - log_file, - log_file_url, - ) - # copy the log file to the log_file_url - file_to_upload = log_file - await push_file_to_remote( - file_to_upload, log_file_url, log_publishing_cb, s3_settings - ) - logger.debug( - "monitoring legacy-style container log file: copying log file from %s to %s completed", - log_file, - log_file_url, - ) + +_MINUTE: Final[int] = 60 +_HOUR: Final[int] = 60 * _MINUTE +_AIODOCKER_LOGS_TIMEOUT_S: Final[int] = 1 * _HOUR async def _parse_container_docker_logs( + *, container: DockerContainer, - service_key: str, - service_version: str, + progress_regexp: re.Pattern[str], + service_key: ContainerImage, + service_version: ContainerTag, container_name: str, - progress_pub: Pub, - logs_pub: Pub, - log_file_url: AnyUrl, + task_publishers: TaskPublisher, + log_file_url: LogFileUploadURL, log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], + s3_settings: S3Settings | None, + progress_bar: ProgressBarData, ) -> None: - latest_log_timestamp = DEFAULT_TIME_STAMP - logger.debug( - "monitoring 1.0+ container logs from container %s:%s", - container.id, - container_name, - ) - # TODO: move that file somewhere else - async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: - log_file_path = ( - Path(tmp_dir) / f"{service_key.split(sep='/')[-1]}_{service_version}.logs" - ) - log_file_path.parent.mkdir(parents=True, exist_ok=True) - async with aiofiles.open(log_file_path, mode="wb+") as log_fp: - async for log_line in cast( - AsyncGenerator[str, None], - container.log(stdout=True, stderr=True, follow=True, timestamps=True), - ): - await log_fp.write(log_line.encode("utf-8")) - log_type, latest_log_timestamp, message = await parse_line(log_line) - await publish_container_logs( - service_key=service_key, - service_version=service_version, - container=container, - container_name=container_name, - progress_pub=progress_pub, - logs_pub=logs_pub, - log_type=log_type, - message=message, - ) - - logger.debug( - "monitoring 1.0+ container logs from container %s:%s: getting remaining logs", - container.id, - container_name, + with log_context( + logger, logging.DEBUG, "started monitoring of >=1.0 service - using docker logs" + ): + assert isinstance(container.docker.connector, aiohttp.UnixConnector) # nosec + async with Docker( + session=aiohttp.ClientSession( + connector=aiohttp.UnixConnector(container.docker.connector.path), + timeout=aiohttp.ClientTimeout(total=_AIODOCKER_LOGS_TIMEOUT_S), ) - # NOTE: The log stream may be interrupted before all the logs are gathered! - # therefore it is needed to get the remaining logs - missing_logs = await cast( - Coroutine, - container.log( - stdout=True, - stderr=True, - timestamps=True, - follow=False, - since=to_datetime(latest_log_timestamp).strftime("%s"), - ), + ) as docker_client_for_logs: + # NOTE: this is a workaround for aiodocker not being able to get the container + # logs when the container is not running + container_for_long_running_logs = ( + await docker_client_for_logs.containers.get(container.id) ) - for log_line in missing_logs: - await log_fp.write(log_line.encode("utf-8")) - log_type, latest_log_timestamp, message = await parse_line(log_line) - await publish_container_logs( - service_key=service_key, - service_version=service_version, - container=container, - container_name=container_name, - progress_pub=progress_pub, - logs_pub=logs_pub, - log_type=log_type, - message=message, + # NOTE: this is a workaround for aiodocker not being able to get the container + # logs when the container is not running + await container.show() + await container_for_long_running_logs.show() + async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: + log_file_path = ( + Path(tmp_dir) + / f"{service_key.split(sep='/')[-1]}_{service_version}.logs" + ) + log_file_path.parent.mkdir(parents=True, exist_ok=True) + async with aiofiles.open(log_file_path, mode="wb+") as log_fp: + async for log_line in cast( + AsyncGenerator[str, None], + container_for_long_running_logs.log( + stdout=True, + stderr=True, + follow=True, + timestamps=True, + ), + ): + log_msg_without_timestamp = log_line.split(" ", maxsplit=1)[1] + logger.info( + "[%s]: %s", + f"{service_key}:{service_version} - {container_for_long_running_logs.id}{container_name}", + log_msg_without_timestamp, + ) + await log_fp.write(log_line.encode("utf-8")) + # NOTE: here we remove the timestamp, only needed for the file + await _parse_and_publish_logs( + log_msg_without_timestamp, + task_publishers=task_publishers, + progress_regexp=progress_regexp, + progress_bar=progress_bar, + ) + + # copy the log file to the log_file_url + await push_file_to_remote( + log_file_path, log_file_url, log_publishing_cb, s3_settings ) - - logger.debug( - "monitoring 1.0+ container logs from container %s:%s: completed", - container.id, - container_name, - ) - - logger.debug( - "monitoring 1.0+ container logs from container %s:%s: copying log file from %s to %s...", - container.id, - container_name, - log_file_path, - log_file_url, - ) - - # copy the log file to the log_file_url - await push_file_to_remote( - log_file_path, log_file_url, log_publishing_cb, s3_settings - ) - - logger.debug( - "monitoring 1.0+ container logs from container %s:%s: copying log file to %s completed", - container.id, - container_name, - log_file_url, - ) -async def monitor_container_logs( +async def _monitor_container_logs( # noqa: PLR0913 # pylint: disable=too-many-arguments + *, container: DockerContainer, - service_key: str, - service_version: str, - progress_pub: Pub, - logs_pub: Pub, + progress_regexp: re.Pattern[str], + service_key: ContainerImage, + service_version: ContainerTag, + task_publishers: TaskPublisher, integration_version: version.Version, task_volumes: TaskSharedVolumes, - log_file_url: AnyUrl, + log_file_url: LogFileUploadURL, log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], + s3_settings: S3Settings | None, + progress_bar: ProgressBarData, ) -> None: """Services running with integration version 0.0.0 are logging into a file that must be available in task_volumes.log / log.dat Services above are not creating a file and use the usual docker logging. These logs are retrieved using the usual cli 'docker logs CONTAINERID' """ + with log_catch(logger, reraise=False): container_info = await container.show() container_name = container_info.get("Name", "undefined") - logger.info( - "Starting to parse information of task [%s:%s - %s%s]", - service_key, - service_version, - container.id, - container_name, - ) - - if integration_version > LEGACY_INTEGRATION_VERSION: - await _parse_container_docker_logs( - container, - service_key, - service_version, - container_name, - progress_pub, - logs_pub, - log_file_url, - log_publishing_cb, - s3_settings, - ) - else: - await _parse_container_log_file( - container, - service_key, - service_version, - container_name, - progress_pub, - logs_pub, - task_volumes, - log_file_url, - log_publishing_cb, - s3_settings, - ) - - logger.info( - "Finished parsing information of task [%s:%s - %s%s]", - service_key, - service_version, - container.id, - container_name, - ) + with log_context( + logger, + logging.INFO, + f"parse logs of {service_key}:{service_version} - {container.id}-{container_name}", + ): + if integration_version > LEGACY_INTEGRATION_VERSION: + await _parse_container_docker_logs( + container=container, + progress_regexp=progress_regexp, + service_key=service_key, + service_version=service_version, + container_name=container_name, + task_publishers=task_publishers, + log_file_url=log_file_url, + log_publishing_cb=log_publishing_cb, + s3_settings=s3_settings, + progress_bar=progress_bar, + ) + else: + await _parse_container_log_file( + container=container, + progress_regexp=progress_regexp, + service_key=service_key, + service_version=service_version, + container_name=container_name, + task_publishers=task_publishers, + task_volumes=task_volumes, + log_file_url=log_file_url, + log_publishing_cb=log_publishing_cb, + s3_settings=s3_settings, + progress_bar=progress_bar, + ) @contextlib.asynccontextmanager -async def managed_monitor_container_log_task( +async def managed_monitor_container_log_task( # noqa: PLR0913 # pylint: disable=too-many-arguments container: DockerContainer, - service_key: str, - service_version: str, - progress_pub: Pub, - logs_pub: Pub, + progress_regexp: re.Pattern[str], + service_key: ContainerImage, + service_version: ContainerTag, + task_publishers: TaskPublisher, integration_version: version.Version, task_volumes: TaskSharedVolumes, - log_file_url: AnyUrl, + log_file_url: LogFileUploadURL, log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], + s3_settings: S3Settings | None, + progress_bar: ProgressBarData, ) -> AsyncIterator[Awaitable[None]]: monitoring_task = None try: @@ -441,36 +405,40 @@ async def managed_monitor_container_log_task( # NOTE: ensure the file is present before the container is started (necessary for old services) log_file = task_volumes.logs_folder / LEGACY_SERVICE_LOG_FILE_NAME log_file.touch() - monitoring_task = asyncio.create_task( - monitor_container_logs( - container, - service_key, - service_version, - progress_pub, - logs_pub, - integration_version, - task_volumes, - log_file_url, - log_publishing_cb, - s3_settings, - ), - name=f"{service_key}:{service_version}_{container.id}_monitoring_task", + monitoring_task = asyncio.shield( + asyncio.create_task( + _monitor_container_logs( + container=container, + progress_regexp=progress_regexp, + service_key=service_key, + service_version=service_version, + task_publishers=task_publishers, + integration_version=integration_version, + task_volumes=task_volumes, + log_file_url=log_file_url, + log_publishing_cb=log_publishing_cb, + s3_settings=s3_settings, + progress_bar=progress_bar, + ), + name=f"{service_key}:{service_version}_{container.id}_monitoring_task", + ) ) yield monitoring_task # wait for task to complete, so we get the complete log await monitoring_task finally: if monitoring_task: - monitoring_task.cancel() - with contextlib.suppress(asyncio.CancelledError): - await monitoring_task + with log_context(logger, logging.DEBUG, "cancel logs monitoring task"): + monitoring_task.cancel() + with contextlib.suppress(asyncio.CancelledError): + await monitoring_task async def pull_image( docker_client: Docker, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, + service_key: ContainerImage, + service_version: ContainerTag, log_publishing_cb: LogPublishingCB, ) -> None: async for pull_progress in docker_client.images.pull( @@ -482,51 +450,37 @@ async def pull_image( }, ): await log_publishing_cb( - f"Pulling {service_key}:{service_version}: {pull_progress}..." + f"Pulling {service_key}:{service_version}: {pull_progress}...", + logging.DEBUG, ) await log_publishing_cb( - f"Docker image for {service_key}:{service_version} ready on {socket.gethostname()}." + f"Docker image for {service_key}:{service_version} ready on {socket.gethostname()}.", + logging.INFO, ) -async def get_integration_version( +async def get_image_labels( docker_client: Docker, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, -) -> version.Version: + service_key: ContainerImage, + service_version: ContainerTag, +) -> ImageLabels: image_cfg = await docker_client.images.inspect( f"{docker_auth.server_address}/{service_key}:{service_version}" ) # NOTE: old services did not have the integration-version label - integration_version = LEGACY_INTEGRATION_VERSION # image labels are set to None when empty if image_labels := image_cfg["Config"].get("Labels"): logger.debug("found following image labels:\n%s", pformat(image_labels)) - service_integration_label = image_labels.get( - "io.simcore.integration-version", "{}" + data = from_labels( + image_labels, prefix_key=OSPARC_LABEL_PREFIXES[0], trim_key_head=False ) - - service_integration_label = json.loads(service_integration_label).get( - "integration-version", f"{LEGACY_INTEGRATION_VERSION}" - ) - logger.debug( - "found following integration version: %s", - pformat(service_integration_label), - ) - integration_version = version.Version(service_integration_label) - - logger.info( - "%s:%s has integration version %s", - service_key, - service_version, - integration_version, - ) - return integration_version + return TypeAdapter(ImageLabels).validate_python(data) + return ImageLabels() async def get_computational_shared_data_mount_point(docker_client: Docker) -> Path: - app_settings = Settings.create_from_envs() + app_settings = ApplicationSettings.create_from_envs() try: logger.debug( "getting computational shared data mount point for %s", diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/errors.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/errors.py index 5d9779044aa..009ae95f650 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/errors.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/errors.py @@ -1,16 +1,4 @@ -from pydantic.errors import PydanticErrorMixin - - -class ComputationalSidecarRuntimeError(PydanticErrorMixin, RuntimeError): - ... - - -class ServiceRunError(ComputationalSidecarRuntimeError): - msg_template = ( - "The service {service_key}:{service_version} running " - "in container {container_id} failed with exit code {exit_code}\n" - "last logs: {service_logs}" - ) +from ..errors import ComputationalSidecarRuntimeError class ServiceBadFormattedOutputError(ComputationalSidecarRuntimeError): diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/models.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/models.py index 1665c9f8311..c505329af50 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/models.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/models.py @@ -1,9 +1,28 @@ -from typing import Optional +import re +from typing import Self +from models_library.basic_regex import SIMPLE_VERSION_RE +from models_library.services import ServiceMetaDataPublished from packaging import version -from pydantic import BaseModel, ByteSize, Field, validator +from pydantic import ( + BaseModel, + ByteSize, + ConfigDict, + Field, + field_validator, + model_validator, +) LEGACY_INTEGRATION_VERSION = version.Version("0") +PROGRESS_REGEXP: re.Pattern[str] = re.compile( + r"^(?:\[?PROGRESS\]?:?)?\s*" + r"(?P[0-1]?\.\d+|" + r"\d+\s*(?:(?P%)|" + r"\d+\s*" + r"(?Ppercent))|" + r"\[?(?P\d+\/\d+)\]?" + r"|0|1)" +) class ContainerHostConfig(BaseModel): @@ -21,7 +40,7 @@ class ContainerHostConfig(BaseModel): description="Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used", ) memory: ByteSize = Field(..., alias="Memory", description="Memory limit in bytes") - memory_swap: Optional[ByteSize] = Field( + memory_swap: ByteSize | None = Field( default=None, alias="MemorySwap", description="Total memory limit (memory + swap). Set as -1 to enable unlimited swap.", @@ -30,20 +49,15 @@ class ContainerHostConfig(BaseModel): ..., alias="NanoCPUs", description="CPU quota in units of 10-9 CPUs" ) - @validator("memory_swap", pre=True, always=True) - @classmethod - def ensure_no_memory_swap_means_no_swap(cls, v, values): - if v is None: - # if not set it will be the same value as memory to ensure swap is disabled - return values["memory"] - return v + @model_validator(mode="after") + def ensure_memory_swap_is_not_unlimited(self) -> Self: + if self.memory_swap is None: + self.memory_swap = self.memory - @validator("memory_swap") - @classmethod - def ensure_memory_swap_cannot_be_unlimited_nor_smaller_than_memory(cls, v, values): - if v < values["memory"]: - raise ValueError("Memory swap cannot be set to a smaller value than memory") - return v + if self.memory_swap < self.memory: + msg = "Memory swap cannot be set to a smaller value than memory" + raise ValueError(msg) + return self class DockerContainerConfig(BaseModel): @@ -52,3 +66,44 @@ class DockerContainerConfig(BaseModel): image: str = Field(..., alias="Image") labels: dict[str, str] = Field(..., alias="Labels") host_config: ContainerHostConfig = Field(..., alias="HostConfig") + + +class ImageLabels(BaseModel): + integration_version: str = Field( + default=str(LEGACY_INTEGRATION_VERSION), + alias="integration-version", + description="integration version number", + pattern=SIMPLE_VERSION_RE, + examples=["1.0.0"], + ) + progress_regexp: str = Field( + default=PROGRESS_REGEXP.pattern, + alias="progress_regexp", + description="regexp pattern for detecting computational service's progress", + ) + model_config = ConfigDict(extra="ignore") + + @field_validator("integration_version", mode="before") + @classmethod + def default_integration_version(cls, v): + if v is None: + return ImageLabels().integration_version + return v + + @field_validator("progress_regexp", mode="before") + @classmethod + def default_progress_regexp(cls, v): + if v is None: + return ImageLabels().progress_regexp + return v + + def get_integration_version(self) -> version.Version: + return version.Version(self.integration_version) + + def get_progress_regexp(self) -> re.Pattern[str]: + return re.compile(self.progress_regexp) + + +assert set(ImageLabels.model_fields).issubset( + ServiceMetaDataPublished.model_fields +), "ImageLabels must be compatible with ServiceDockerData" diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/task_shared_volume.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/task_shared_volume.py index f550c8dec74..a757c8b6306 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/task_shared_volume.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/computational_sidecar/task_shared_volume.py @@ -1,13 +1,11 @@ import asyncio +import logging import shutil from dataclasses import dataclass from pathlib import Path from types import TracebackType -from typing import Optional, Type -from ..dask_utils import create_dask_worker_logger - -logger = create_dask_worker_logger(__name__) +logger = logging.getLogger(__name__) @dataclass(frozen=True) @@ -59,8 +57,8 @@ async def __aenter__(self) -> "TaskSharedVolumes": async def __aexit__( self, - exc_type: Optional[Type[BaseException]], - exc: Optional[BaseException], - tb: Optional[TracebackType], + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, ) -> None: await asyncio.get_event_loop().run_in_executor(None, self.cleanup) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/dask_utils.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/dask_utils.py deleted file mode 100644 index 4c68ac40eba..00000000000 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/dask_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -import asyncio -import contextlib -import logging -from dataclasses import dataclass, field -from enum import Enum -from typing import AsyncIterator, Final, Optional - -import distributed -from dask_task_models_library.container_tasks.errors import TaskCancelledError -from dask_task_models_library.container_tasks.events import ( - BaseTaskEvent, - TaskLogEvent, - TaskProgressEvent, - TaskStateEvent, -) -from dask_task_models_library.container_tasks.io import TaskCancelEventName -from distributed.worker import get_worker -from distributed.worker_state_machine import TaskState - - -def create_dask_worker_logger(name: str) -> logging.Logger: - return logging.getLogger(f"distributed.worker.{name}") - - -logger = create_dask_worker_logger(__name__) - - -def _get_current_task_state() -> Optional[TaskState]: - worker = get_worker() - logger.debug("current worker %s", f"{worker=}") - current_task = worker.get_current_task() - logger.debug("current task %s", f"{current_task=}") - return worker.state.tasks.get(current_task) - - -def is_current_task_aborted() -> bool: - task: Optional[TaskState] = _get_current_task_state() - logger.debug("found following TaskState: %s", task) - if task is None: - # the task was removed from the list of tasks this worker should work on, meaning it is aborted - # NOTE: this does not work in distributed mode, hence we need to use Events, Variables,or PubSub - logger.debug("%s shall be aborted", f"{task=}") - return True - - # NOTE: in distributed mode an event is necessary! - cancel_event = distributed.Event(name=TaskCancelEventName.format(task.key)) - if cancel_event.is_set(): - logger.debug("%s shall be aborted", f"{task=}") - return True - return False - - -_DEFAULT_MAX_RESOURCES: Final[dict[str, float]] = {"CPU": 1, "RAM": 1024**3} - - -def get_current_task_resources() -> dict[str, float]: - current_task_resources = _DEFAULT_MAX_RESOURCES - if task := _get_current_task_state(): - if task_resources := task.resource_restrictions: - current_task_resources.update(task_resources) - return current_task_resources - - -@dataclass() -class TaskPublisher: - state: distributed.Pub = field(init=False) - progress: distributed.Pub = field(init=False) - logs: distributed.Pub = field(init=False) - - def __post_init__(self): - self.state = distributed.Pub(TaskStateEvent.topic_name()) - self.progress = distributed.Pub(TaskProgressEvent.topic_name()) - self.logs = distributed.Pub(TaskLogEvent.topic_name()) - - -_TASK_ABORTION_INTERVAL_CHECK_S: int = 2 - - -@contextlib.asynccontextmanager -async def monitor_task_abortion( - task_name: str, log_publisher: distributed.Pub -) -> AsyncIterator[None]: - """This context manager periodically checks whether the client cancelled the - monitored task. If that is the case, the monitored task will be cancelled (e.g. - a asyncioCancelledError is raised in the task). The context manager will then - raise a TaskCancelledError exception which will be propagated back to the client.""" - - async def cancel_task(task_name: str) -> None: - if task := next( - (t for t in asyncio.all_tasks() if t.get_name() == task_name), None - ): - publish_event( - log_publisher, - TaskLogEvent.from_dask_worker(log="[sidecar] cancelling task..."), - ) - logger.debug("cancelling %s....................", f"{task=}") - task.cancel() - - async def periodicaly_check_if_aborted(task_name: str) -> None: - while await asyncio.sleep(_TASK_ABORTION_INTERVAL_CHECK_S, result=True): - logger.debug("checking if %s should be cancelled", f"{task_name=}") - if is_current_task_aborted(): - await cancel_task(task_name) - - periodically_checking_task = None - try: - periodically_checking_task = asyncio.create_task( - periodicaly_check_if_aborted(task_name), - name=f"{task_name}_monitor_task_abortion", - ) - - yield - except asyncio.CancelledError as exc: - publish_event( - log_publisher, - TaskLogEvent.from_dask_worker(log="[sidecar] task run was aborted"), - ) - raise TaskCancelledError from exc - finally: - if periodically_checking_task: - logger.debug( - "cancelling task cancellation checker for task '%s'", - task_name, - ) - periodically_checking_task.cancel() - with contextlib.suppress(asyncio.CancelledError): - await periodically_checking_task - - -def publish_event(dask_pub: distributed.Pub, event: BaseTaskEvent) -> None: - dask_pub.put(event.json()) - - -class LogType(Enum): - LOG = 1 - PROGRESS = 2 - INSTRUMENTATION = 3 - - -def publish_task_logs( - progress_pub: distributed.Pub, - logs_pub: distributed.Pub, - log_type: LogType, - message_prefix: str, - message: str, -) -> None: - logger.info("[%s - %s]: %s", message_prefix, log_type.name, message) - if log_type == LogType.PROGRESS: - publish_event( - progress_pub, - TaskProgressEvent.from_dask_worker(progress=float(message)), - ) - else: - publish_event(logs_pub, TaskLogEvent.from_dask_worker(log=message)) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/errors.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/errors.py new file mode 100644 index 00000000000..1400bf1a269 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/errors.py @@ -0,0 +1,8 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class ComputationalSidecarRuntimeError(OsparcErrorMixin, RuntimeError): ... + + +class ConfigurationError(ComputationalSidecarRuntimeError): + msg_template: str = "Application misconfiguration: {msg}" diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/file_utils.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/file_utils.py deleted file mode 100644 index 50e766846d7..00000000000 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/file_utils.py +++ /dev/null @@ -1,267 +0,0 @@ -import asyncio -import functools -import mimetypes -import time -import zipfile -from io import BytesIO -from pathlib import Path -from typing import Any, Awaitable, Callable, Final, Optional, TypedDict, Union, cast - -import aiofiles -import aiofiles.tempfile -import fsspec -from pydantic import ByteSize, FileUrl, parse_obj_as -from pydantic.networks import AnyUrl -from settings_library.s3 import S3Settings -from yarl import URL - -from .dask_utils import create_dask_worker_logger - -logger = create_dask_worker_logger(__name__) - -HTTP_FILE_SYSTEM_SCHEMES: Final = ["http", "https"] -S3_FILE_SYSTEM_SCHEMES: Final = ["s3", "s3a"] - - -LogPublishingCB = Callable[[str], Awaitable[None]] - - -def _file_progress_cb( - size, - value, - log_publishing_cb: LogPublishingCB, - text_prefix: str, - main_loop: asyncio.AbstractEventLoop, - **kwargs, -): - asyncio.run_coroutine_threadsafe( - log_publishing_cb( - f"{text_prefix}" - f" {100.0 * float(value or 0)/float(size or 1):.1f}%" - f" ({ByteSize(value).human_readable() if value else 0} / {ByteSize(size).human_readable() if size else 'NaN'})" - ), - main_loop, - ) - - -CHUNK_SIZE = 4 * 1024 * 1024 - - -class ClientKWArgsDict(TypedDict): - endpoint_url: str - - -class S3FsSettingsDict(TypedDict): - key: str - secret: str - token: Optional[str] - use_ssl: bool - client_kwargs: ClientKWArgsDict - - -def _s3fs_settings_from_s3_settings(s3_settings: S3Settings) -> S3FsSettingsDict: - return { - "key": s3_settings.S3_ACCESS_KEY, - "secret": s3_settings.S3_SECRET_KEY, - "token": s3_settings.S3_ACCESS_TOKEN, - "use_ssl": s3_settings.S3_SECURE, - "client_kwargs": {"endpoint_url": s3_settings.S3_ENDPOINT}, - } - - -def _file_chunk_streamer(src: BytesIO, dst: BytesIO): - data = src.read(CHUNK_SIZE) - segment_len = dst.write(data) - return (data, segment_len) - - -# TODO: use filecaching to leverage fsspec local cache of files for future improvements -# TODO: use unzip from fsspec to simplify code - - -async def _copy_file( - src_url: AnyUrl, - dst_url: AnyUrl, - *, - log_publishing_cb: LogPublishingCB, - text_prefix: str, - src_storage_cfg: Optional[dict[str, Any]] = None, - dst_storage_cfg: Optional[dict[str, Any]] = None, -): - src_storage_kwargs = src_storage_cfg or {} - dst_storage_kwargs = dst_storage_cfg or {} - with fsspec.open(src_url, mode="rb", **src_storage_kwargs) as src_fp: - with fsspec.open(dst_url, "wb", **dst_storage_kwargs) as dst_fp: - file_size = getattr(src_fp, "size", None) - data_read = True - total_data_written = 0 - t = time.process_time() - while data_read: - ( - data_read, - data_written, - ) = await asyncio.get_event_loop().run_in_executor( - None, _file_chunk_streamer, src_fp, dst_fp - ) - elapsed_time = time.process_time() - t - total_data_written += data_written or 0 - await log_publishing_cb( - f"{text_prefix}" - f" {100.0 * float(total_data_written or 0)/float(file_size or 1):.1f}%" - f" ({ByteSize(total_data_written).human_readable() if total_data_written else 0} / {ByteSize(file_size).human_readable() if file_size else 'NaN'})" - f" [{ByteSize(total_data_written).to('MB')/elapsed_time:.2f} MBytes/s (avg)]" - ) - - -_ZIP_MIME_TYPE: Final[str] = "application/zip" - - -async def pull_file_from_remote( - src_url: AnyUrl, - target_mime_type: Optional[str], - dst_path: Path, - log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], -) -> None: - assert src_url.path # nosec - await log_publishing_cb( - f"Downloading '{src_url.path.strip('/')}' into local file '{dst_path.name}'..." - ) - if not dst_path.parent.exists(): - raise ValueError( - f"{dst_path.parent=} does not exist. It must be created by the caller" - ) - - src_mime_type, _ = mimetypes.guess_type(f"{src_url.path}") - if not target_mime_type: - target_mime_type, _ = mimetypes.guess_type(dst_path) - - storage_kwargs: Union[S3FsSettingsDict, dict[str, Any]] = {} - if s3_settings and src_url.scheme in S3_FILE_SYSTEM_SCHEMES: - storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - await _copy_file( - src_url, - parse_obj_as(FileUrl, dst_path.as_uri()), - src_storage_cfg=cast(dict[str, Any], storage_kwargs), - log_publishing_cb=log_publishing_cb, - text_prefix=f"Downloading '{src_url.path.strip('/')}':", - ) - - await log_publishing_cb( - f"Download of '{src_url.path.strip('/')}' into local file '{dst_path.name}' complete." - ) - - if src_mime_type == _ZIP_MIME_TYPE and target_mime_type != _ZIP_MIME_TYPE: - await log_publishing_cb(f"Uncompressing '{dst_path.name}'...") - logger.debug("%s is a zip file and will be now uncompressed", dst_path) - with zipfile.ZipFile(dst_path, "r") as zip_obj: - await asyncio.get_event_loop().run_in_executor( - None, zip_obj.extractall, dst_path.parents[0] - ) - # finally remove the zip archive - await log_publishing_cb(f"Uncompressing '{dst_path.name}' complete.") - dst_path.unlink() - - -async def _push_file_to_http_link( - file_to_upload: Path, dst_url: AnyUrl, log_publishing_cb: LogPublishingCB -): - # NOTE: special case for http scheme when uploading. this is typically a S3 put presigned link. - # Therefore, we need to use the http filesystem directly in order to call the put_file function. - # writing on httpfilesystem is disabled by default. - fs = fsspec.filesystem( - "http", - headers={ - "Content-Length": f"{file_to_upload.stat().st_size}", - }, - asynchronous=True, - ) - assert dst_url.path # nosec - await fs._put_file( # pylint: disable=protected-access - file_to_upload, - f"{dst_url}", - method="PUT", - callback=fsspec.Callback( - hooks={ - "progress": functools.partial( - _file_progress_cb, - log_publishing_cb=log_publishing_cb, - text_prefix=f"Uploading '{dst_url.path.strip('/')}':", - main_loop=asyncio.get_event_loop(), - ) - } - ), - ) - - -async def _push_file_to_remote( - file_to_upload: Path, - dst_url: AnyUrl, - log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], -): - logger.debug("Uploading %s to %s...", file_to_upload, dst_url) - assert dst_url.path # nosec - - storage_kwargs: Union[S3FsSettingsDict, dict[str, Any]] = {} - if s3_settings: - storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - - await _copy_file( - parse_obj_as(FileUrl, file_to_upload.as_uri()), - dst_url, - dst_storage_cfg=cast(dict[str, Any], storage_kwargs), - log_publishing_cb=log_publishing_cb, - text_prefix=f"Uploading '{dst_url.path.strip('/')}':", - ) - - -MIMETYPE_APPLICATION_ZIP = "application/zip" - - -async def push_file_to_remote( - src_path: Path, - dst_url: AnyUrl, - log_publishing_cb: LogPublishingCB, - s3_settings: Optional[S3Settings], -) -> None: - if not src_path.exists(): - raise ValueError(f"{src_path=} does not exist") - assert dst_url.path # nosec - async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: - file_to_upload = src_path - - dst_mime_type, _ = mimetypes.guess_type(f"{dst_url.path}") - src_mime_type, _ = mimetypes.guess_type(src_path) - - if dst_mime_type == _ZIP_MIME_TYPE and src_mime_type != _ZIP_MIME_TYPE: - archive_file_path = Path(tmp_dir) / Path(URL(dst_url).path).name - await log_publishing_cb( - f"Compressing '{src_path.name}' to '{archive_file_path.name}'..." - ) - with zipfile.ZipFile( - archive_file_path, mode="w", compression=zipfile.ZIP_DEFLATED - ) as zfp: - await asyncio.get_event_loop().run_in_executor( - None, zfp.write, src_path, src_path.name - ) - logger.debug("%s created.", archive_file_path) - assert archive_file_path.exists() # nosec - file_to_upload = archive_file_path - await log_publishing_cb( - f"Compression of '{src_path.name}' to '{archive_file_path.name}' complete." - ) - - await log_publishing_cb(f"Uploading '{file_to_upload.name}' to '{dst_url}'...") - - if dst_url.scheme in HTTP_FILE_SYSTEM_SCHEMES: - logger.debug("destination is a http presigned link") - await _push_file_to_http_link(file_to_upload, dst_url, log_publishing_cb) - else: - await _push_file_to_remote( - file_to_upload, dst_url, log_publishing_cb, s3_settings - ) - - await log_publishing_cb( - f"Upload of '{src_path.name}' to '{dst_url.path.strip('/')}' complete" - ) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/meta.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/meta.py deleted file mode 100644 index c73ad4fdbe1..00000000000 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/meta.py +++ /dev/null @@ -1,43 +0,0 @@ -from contextlib import suppress - -import dask -import pkg_resources - -current_distribution = pkg_resources.get_distribution("simcore_service_dask_sidecar") - -project_name: str = current_distribution.project_name - -api_version: str = current_distribution.version -major, minor, patch = current_distribution.version.split(".") -api_vtag: str = f"v{major}" - -__version__ = current_distribution.version - - -def get_summary() -> str: - with suppress(Exception): - try: - metadata = current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = current_distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" - - -summary: str = get_summary() - - -# https://patorjk.com/software/taag/#p=display&f=Standard&t=dask%20sidecar -BANNER_MESSAGE = rf""" - _ _ _ _ - __| | __ _ ___| | __ ___(_) __| | ___ ___ __ _ _ __ - / _` |/ _` / __| |/ / / __| |/ _` |/ _ \/ __/ _` | '__| - | (_| | (_| \__ \ < \__ \ | (_| | __/ (_| (_| | | - \__,_|\__,_|___/_|\_\ |___/_|\__,_|\___|\___\__,_|_| v{__version__} with dask=={dask.__version__} - -""" - - -def print_banner() -> None: - print(BANNER_MESSAGE, flush=True) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/rabbitmq_worker_plugin.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/rabbitmq_worker_plugin.py new file mode 100644 index 00000000000..ba4936284d7 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/rabbitmq_worker_plugin.py @@ -0,0 +1,159 @@ +import asyncio +import logging +import threading +from asyncio import AbstractEventLoop +from collections.abc import Awaitable +from typing import Final + +import distributed +from servicelib.async_utils import cancel_wait_task +from servicelib.logging_utils import log_catch, log_context +from servicelib.rabbitmq import RabbitMQClient, wait_till_rabbitmq_responsive +from servicelib.rabbitmq._models import RabbitMessage +from settings_library.rabbit import RabbitSettings + +from .errors import ConfigurationError + +_logger = logging.getLogger(__name__) + +_RABBITMQ_CONFIGURATION_ERROR: Final[str] = ( + "RabbitMQ client is not available. Please check the configuration." +) + + +class RabbitMQPlugin(distributed.WorkerPlugin): + """Dask Worker Plugin for RabbitMQ integration""" + + name = "rabbitmq_worker_plugin" + _main_thread_loop: AbstractEventLoop | None = None + _client: RabbitMQClient | None = None + _settings: RabbitSettings | None = None + _message_queue: asyncio.Queue | None = None + _message_processor: asyncio.Task | None = None + + def __init__(self, settings: RabbitSettings): + self._settings = settings + + async def _process_messages(self) -> None: + """Process messages from worker threads in the main thread""" + assert self._message_queue is not None # nosec + assert self._client is not None # nosec + + with log_context(_logger, logging.INFO, "RabbitMQ message processor"): + while True: + with log_catch(_logger, reraise=False): + exchange_name, message_data = await self._message_queue.get() + try: + await self._client.publish(exchange_name, message_data) + finally: + self._message_queue.task_done() + + def setup(self, worker: distributed.Worker) -> Awaitable[None]: + """Called when the plugin is attached to a worker""" + + async def _() -> None: + if not self._settings: + _logger.warning( + "RabbitMQ client is de-activated (no settings provided)" + ) + return + + if threading.current_thread() is not threading.main_thread(): + _logger.warning( + "RabbitMQ client plugin setup is not in the main thread! TIP: if in pytest it's ok." + ) + + with log_context( + _logger, + logging.INFO, + f"RabbitMQ client initialization for worker {worker.address}", + ): + self._main_thread_loop = asyncio.get_event_loop() + await wait_till_rabbitmq_responsive(self._settings.dsn) + self._client = RabbitMQClient( + client_name="dask-sidecar", settings=self._settings + ) + + self._message_queue = asyncio.Queue() + self._message_processor = asyncio.create_task( + self._process_messages(), name="rabbit_message_processor" + ) + + return _() + + def teardown(self, worker: distributed.Worker) -> Awaitable[None]: + """Called when the worker shuts down or the plugin is removed""" + + async def _() -> None: + with log_context( + _logger, + logging.INFO, + f"RabbitMQ client teardown for worker {worker.address}", + ): + if not self._client: + return + if threading.current_thread() is threading.main_thread(): + _logger.info( + "RabbitMQ client plugin setup is in the main thread! That is good." + ) + else: + _logger.warning( + "RabbitMQ client plugin setup is not the main thread! TIP: if in pytest it's ok." + ) + + # Cancel the message processor task + if self._message_processor: + with log_catch(_logger, reraise=False): + await cancel_wait_task(self._message_processor, max_delay=5) + self._message_processor = None + + # close client + current_loop = asyncio.get_event_loop() + if self._main_thread_loop != current_loop: + _logger.warning("RabbitMQ client is de-activated (loop mismatch)") + assert self._main_thread_loop # nosec + with log_catch(_logger, reraise=False): + await asyncio.wait_for(self._client.close(), timeout=5.0) + + self._client = None + + return _() + + def get_client(self) -> RabbitMQClient: + """Returns the RabbitMQ client or raises an error if not available""" + if not self._client: + raise ConfigurationError(msg=_RABBITMQ_CONFIGURATION_ERROR) + return self._client + + async def publish_message_from_any_thread( + self, exchange_name: str, message_data: RabbitMessage + ) -> None: + """Enqueue a message to be published to RabbitMQ from any thread""" + assert self._message_queue # nosec + + if threading.current_thread() is threading.main_thread(): + # If we're in the main thread, add directly to the queue + await self._message_queue.put((exchange_name, message_data)) + return + + # If we're in a worker thread, we need to use a different approach + assert self._main_thread_loop # nosec + + # Create a Future in the main thread's event loop + future = asyncio.run_coroutine_threadsafe( + self._message_queue.put((exchange_name, message_data)), + self._main_thread_loop, + ) + + # waiting here is quick, just queueing + future.result() + + +def get_rabbitmq_client(worker: distributed.Worker) -> RabbitMQPlugin: + """Returns the RabbitMQ client or raises an error if not available""" + if not worker.plugins: + raise ConfigurationError(msg=_RABBITMQ_CONFIGURATION_ERROR) + rabbitmq_plugin = worker.plugins.get(RabbitMQPlugin.name) + if not isinstance(rabbitmq_plugin, RabbitMQPlugin): + raise ConfigurationError(msg=_RABBITMQ_CONFIGURATION_ERROR) + return rabbitmq_plugin diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/scheduler.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/scheduler.py new file mode 100644 index 00000000000..0813da01741 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/scheduler.py @@ -0,0 +1,34 @@ +import logging + +import distributed +from dask_task_models_library.plugins.task_life_cycle_scheduler_plugin import ( + TaskLifecycleSchedulerPlugin, +) +from servicelib.logging_utils import log_context + +from ._meta import print_dask_scheduler_banner +from .settings import ApplicationSettings +from .utils.logs import setup_app_logging + +_logger = logging.getLogger(__name__) + + +async def dask_setup(scheduler: distributed.Scheduler) -> None: + """This is a special function recognized by dask when starting with flag --preload""" + assert scheduler # nosec + + settings = ApplicationSettings.create_from_envs() + setup_app_logging(settings) + + with log_context(_logger, logging.INFO, "Launch dask scheduler"): + _logger.info("app settings: %s", settings.model_dump_json(indent=1)) + + scheduler.add_plugin(TaskLifecycleSchedulerPlugin()) + print_dask_scheduler_banner() + + +async def dask_teardown(scheduler: distributed.Scheduler) -> None: + with log_context( + _logger, logging.INFO, f"Tear down dask scheduler at {scheduler.address}" + ): + ... diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py index 65c32469d40..e0a3e41d3a5 100644 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/settings.py @@ -1,48 +1,67 @@ from pathlib import Path -from typing import Any, Optional, cast +from typing import Annotated, Any from models_library.basic_types import LogLevel -from pydantic import Field, validator -from settings_library.base import BaseCustomSettings +from pydantic import AliasChoices, Field, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.rabbit import RabbitSettings from settings_library.utils_logging import MixinLoggingSettings -class Settings(BaseCustomSettings, MixinLoggingSettings): - SC_BUILD_TARGET: Optional[str] = None - SC_BOOT_MODE: Optional[str] = None - LOG_LEVEL: LogLevel = Field( - LogLevel.INFO.value, - env=["DASK_SIDECAR_LOGLEVEL", "SIDECAR_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"], - ) - - # sidecar config --- +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + DASK_SIDECAR_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "DASK_SIDECAR_LOGLEVEL", "SIDECAR_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME: str SIDECAR_COMP_SERVICES_SHARED_FOLDER: Path - SIDECAR_INTERVAL_TO_CHECK_TASK_ABORTED_S: Optional[int] = 5 - - # dask config ---- + DASK_SIDECAR_INTERVAL_TO_CHECK_TASK_ABORTED_S: int | None = 5 - DASK_START_AS_SCHEDULER: Optional[bool] = Field( - False, description="If this env is set, then the app boots as scheduler" - ) + DASK_START_AS_SCHEDULER: Annotated[ + bool | None, + Field(description="If this env is set, then the app boots as scheduler"), + ] = False - DASK_SCHEDULER_HOST: Optional[str] = Field( - None, - description="Address of the scheduler to register (only if started as worker )", - ) + DASK_SCHEDULER_HOST: Annotated[ + str | None, + Field( + description="Address of the scheduler to register (only if started as worker )", + ), + ] = None - def as_scheduler(self) -> bool: - return bool(self.DASK_START_AS_SCHEDULER) + DASK_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "DASK_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + DASK_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "DASK_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] - def as_worker(self) -> bool: - as_worker = not self.as_scheduler() - if as_worker: - assert self.DASK_SCHEDULER_HOST is not None # nosec - return as_worker + DASK_SIDECAR_RABBITMQ: Annotated[ + RabbitSettings | None, Field(json_schema_extra={"auto_default_from_env": True}) + ] - @validator("LOG_LEVEL", pre=True) + @field_validator("DASK_SIDECAR_LOGLEVEL", mode="before") @classmethod def _validate_loglevel(cls, value: Any) -> str: - return cast(str, cls.validate_log_level(f"{value}")) + return cls.validate_log_level(f"{value}") diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/tasks.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/tasks.py deleted file mode 100644 index 5c958e852bb..00000000000 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/tasks.py +++ /dev/null @@ -1,161 +0,0 @@ -import asyncio -import logging -import signal -import threading -from pprint import pformat -from typing import Optional - -import distributed -from dask_task_models_library.container_tasks.docker import DockerBasicAuth -from dask_task_models_library.container_tasks.io import ( - TaskInputData, - TaskOutputData, - TaskOutputDataSchema, -) -from distributed.worker import logger -from models_library.services_resources import BootMode -from pydantic.networks import AnyUrl -from settings_library.s3 import S3Settings - -from .computational_sidecar.core import ComputationalSidecar -from .dask_utils import ( - TaskPublisher, - create_dask_worker_logger, - get_current_task_resources, - monitor_task_abortion, -) -from .meta import print_banner -from .settings import Settings - -log = create_dask_worker_logger(__name__) - - -class GracefulKiller: - """this ensure the dask-worker is gracefully stopped. - the current implementation of distributed.dask_workers does not call close() on the - worker as it probably should. Note: this is still a work in progress though. - """ - - kill_now = False - worker = None - task = None - - def __init__(self, worker: distributed.Worker): - signal.signal(signal.SIGINT, self.exit_gracefully) - signal.signal(signal.SIGTERM, self.exit_gracefully) - self.worker = worker - - def exit_gracefully(self, *_args): - tasks = asyncio.all_tasks() - logger.warning( - "Application shutdown detected!\n %s", - pformat([t.get_name() for t in tasks]), - ) - self.kill_now = True - assert self.worker # nosec - self.task = asyncio.create_task( - self.worker.close(timeout=5), name="close_dask_worker_task" - ) - - -async def dask_setup(worker: distributed.Worker) -> None: - """This is a special function recognized by the dask worker when starting with flag --preload""" - settings = Settings.create_from_envs() - # set up logging - logging.basicConfig(level=settings.LOG_LEVEL.value) - logging.root.setLevel(level=settings.LOG_LEVEL.value) - logger.setLevel(level=settings.LOG_LEVEL.value) - - logger.info("Setting up worker...") - logger.info("Settings: %s", pformat(settings.dict())) - - print_banner() - - if threading.current_thread() is threading.main_thread(): - loop = asyncio.get_event_loop() - logger.info("We do have a running loop in the main thread: %s", f"{loop=}") - - if threading.current_thread() is threading.main_thread(): - GracefulKiller(worker) - - -async def dask_teardown(_worker: distributed.Worker) -> None: - logger.warning("Tearing down worker!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") - - -async def _run_computational_sidecar_async( - docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode, -) -> TaskOutputData: - task_publishers = TaskPublisher() - - log.debug( - "run_computational_sidecar %s", - f"{docker_auth=}, {service_key=}, {service_version=}, {input_data=}, {output_data_keys=}, {command=}, {s3_settings=}", - ) - current_task = asyncio.current_task() - assert current_task # nosec - async with monitor_task_abortion( - task_name=current_task.get_name(), log_publisher=task_publishers.logs - ): - task_max_resources = get_current_task_resources() - async with ComputationalSidecar( - service_key=service_key, - service_version=service_version, - input_data=input_data, - output_data_keys=output_data_keys, - log_file_url=log_file_url, - docker_auth=docker_auth, - boot_mode=boot_mode, - task_max_resources=task_max_resources, - task_publishers=task_publishers, - s3_settings=s3_settings, - ) as sidecar: - output_data = await sidecar.run(command=command) - log.debug("completed run of sidecar with result %s", f"{output_data=}") - return output_data - - -def run_computational_sidecar( - docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, -) -> TaskOutputData: - # NOTE: The event loop MUST BE created in the main thread prior to this - # Dask creates threads to run these calls, and the loop shall be created before - # else the loop might get closed by another thread running another task - - try: - _ = asyncio.get_event_loop() - except RuntimeError: - # NOTE: this happens in testing when the dask cluster runs INProcess - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - - result = asyncio.get_event_loop().run_until_complete( - _run_computational_sidecar_async( - docker_auth, - service_key, - service_version, - input_data, - output_data_keys, - log_file_url, - command, - s3_settings, - boot_mode, - ) - ) - return result diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils.py deleted file mode 100644 index 4115ccfa169..00000000000 --- a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils.py +++ /dev/null @@ -1,124 +0,0 @@ -import asyncio -import logging -import uuid -from typing import Any, Awaitable, Coroutine, Optional, cast - -import aiodocker -from aiodocker.containers import DockerContainer -from pydantic import ByteSize, parse_obj_as - -logger = logging.getLogger(__name__) - - -def _wrap_async_call(fct: Awaitable[Any]) -> Any: - return asyncio.get_event_loop().run_until_complete(fct) - - -def _nvidia_smi_docker_config(cmd: list[str]) -> dict[str, Any]: - return { - "Cmd": ["nvidia-smi"] + cmd, - "Image": "nvidia/cuda:10.0-base", - "AttachStdin": False, - "AttachStdout": False, - "AttachStderr": False, - "Tty": False, - "OpenStdin": False, - "HostConfig": { - "Init": True, - "AutoRemove": False, - }, # NOTE: The Init parameter shows a weird behavior: no exception thrown when the container fails - } - - -def num_available_gpus() -> int: - """Returns the number of available GPUs, 0 if not a gpu node""" - - async def async_num_available_gpus() -> int: - num_gpus = 0 - container: Optional[DockerContainer] = None - async with aiodocker.Docker() as docker: - spec_config = _nvidia_smi_docker_config(["--list-gpus"]) - try: - container = await docker.containers.run( - config=spec_config, name=f"sidecar_{uuid.uuid4()}_test_gpu" - ) - if not container: - return 0 - - container_data = await container.wait(timeout=10) - container_logs = await cast( - Coroutine, - container.log(stdout=True, stderr=True, follow=False), - ) - num_gpus = ( - len(container_logs) - if container_data.setdefault("StatusCode", 127) == 0 - else 0 - ) - except asyncio.TimeoutError as err: - logger.warning( - "num_gpus timedout while check-run %s: %s", spec_config, err - ) - except aiodocker.exceptions.DockerError as err: - logger.warning( - "num_gpus DockerError while check-run %s: %s", spec_config, err - ) - finally: - if container is not None: - # ensure container is removed - await container.delete() - - return num_gpus - - return cast(int, _wrap_async_call(async_num_available_gpus())) - - -def video_memory() -> int: - """Returns the amount of VRAM available in bytes. 0 if no GPU available""" - - async def async_video_memory() -> int: - video_ram: ByteSize = ByteSize(0) - container: Optional[DockerContainer] = None - async with aiodocker.Docker() as docker: - spec_config = _nvidia_smi_docker_config( - [ - "--query-gpu=memory.total", - "--format=csv,noheader", - ] - ) - - try: - container = await docker.containers.run( - config=spec_config, name=f"sidecar_{uuid.uuid4()}_test_gpu_memory" - ) - if not container: - return 0 - - container_data = await container.wait(timeout=10) - container_logs = await cast( - Coroutine, - container.log(stdout=True, stderr=True, follow=False), - ) - video_ram = parse_obj_as(ByteSize, 0) - if container_data.setdefault("StatusCode", 127) == 0: - for line in container_logs: - video_ram = parse_obj_as( - ByteSize, video_ram + parse_obj_as(ByteSize, line) - ) - - except asyncio.TimeoutError as err: - logger.warning( - "num_gpus timedout while check-run %s: %s", spec_config, err - ) - except aiodocker.exceptions.DockerError as err: - logger.warning( - "num_gpus DockerError while check-run %s: %s", spec_config, err - ) - finally: - if container is not None: - # ensure container is removed - await container.delete() - - return video_ram - - return cast(int, _wrap_async_call(async_video_memory())) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/__init__.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/dask.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/dask.py new file mode 100644 index 00000000000..60e84053ed0 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/dask.py @@ -0,0 +1,188 @@ +import asyncio +import contextlib +import logging +from collections.abc import AsyncIterator +from dataclasses import dataclass +from typing import Final + +import distributed +from dask_task_models_library.container_tasks.errors import TaskCancelledError +from dask_task_models_library.container_tasks.events import ( + BaseTaskEvent, + TaskProgressEvent, +) +from dask_task_models_library.container_tasks.io import TaskCancelEventName +from dask_task_models_library.container_tasks.protocol import TaskOwner +from dask_task_models_library.models import TASK_RUNNING_PROGRESS_EVENT +from distributed.worker import get_worker +from distributed.worker_state_machine import TaskState +from models_library.progress_bar import ProgressReport +from models_library.rabbitmq_messages import LoggerRabbitMessage +from servicelib.logging_utils import LogLevelInt, LogMessageStr, log_catch, log_context + +from ..rabbitmq_worker_plugin import get_rabbitmq_client + +_logger = logging.getLogger(__name__) + + +def _get_current_task_state() -> TaskState | None: + worker = get_worker() + _logger.debug("current worker %s", f"{worker=}") + current_task = worker.get_current_task() + _logger.debug("current task %s", f"{current_task=}") + return worker.state.tasks.get(current_task) + + +def is_current_task_aborted() -> bool: + task: TaskState | None = _get_current_task_state() + _logger.debug("found following TaskState: %s", task) + if task is None: + # the task was removed from the list of tasks this worker should work on, meaning it is aborted + # NOTE: this does not work in distributed mode, hence we need to use Events, Variables,or PubSub + _logger.debug("%s shall be aborted", f"{task=}") + return True + + # NOTE: in distributed mode an event is necessary! + cancel_event = distributed.Event(name=TaskCancelEventName.format(task.key)) + if cancel_event.is_set(): + _logger.debug("%s shall be aborted", f"{task=}") + return True + return False + + +_DEFAULT_MAX_RESOURCES: Final[dict[str, float]] = {"CPU": 1, "RAM": 1024**3} + + +def get_current_task_resources() -> dict[str, float]: + current_task_resources = _DEFAULT_MAX_RESOURCES + if task := _get_current_task_state(): + if task_resources := task.resource_restrictions: + current_task_resources.update(task_resources) + return current_task_resources + + +@dataclass(slots=True, kw_only=True) +class TaskPublisher: + task_owner: TaskOwner + _last_published_progress_value: float = -1 + + def publish_progress(self, report: ProgressReport) -> None: + rounded_value = round(report.percent_value, ndigits=2) + if rounded_value > self._last_published_progress_value: + with ( + log_catch(logger=_logger, reraise=False), + log_context( + _logger, logging.DEBUG, msg=f"publish progress {rounded_value=}" + ), + ): + publish_event( + TaskProgressEvent.from_dask_worker( + progress=rounded_value, task_owner=self.task_owner + ), + ) + self._last_published_progress_value = rounded_value + + async def publish_logs( + self, + *, + message: LogMessageStr, + log_level: LogLevelInt, + ) -> None: + with log_catch(logger=_logger, reraise=False): + rabbitmq_client = get_rabbitmq_client(get_worker()) + base_message = LoggerRabbitMessage.model_construct( + user_id=self.task_owner.user_id, + project_id=self.task_owner.project_id, + node_id=self.task_owner.node_id, + messages=[message], + log_level=log_level, + ) + await rabbitmq_client.publish_message_from_any_thread( + base_message.channel_name, base_message + ) + if self.task_owner.has_parent: + assert self.task_owner.parent_project_id # nosec + assert self.task_owner.parent_node_id # nosec + parent_message = LoggerRabbitMessage.model_construct( + user_id=self.task_owner.user_id, + project_id=self.task_owner.parent_project_id, + node_id=self.task_owner.parent_node_id, + messages=[message], + log_level=log_level, + ) + await rabbitmq_client.publish_message_from_any_thread( + parent_message.channel_name, parent_message + ) + + _logger.log(log_level, message) + + +_TASK_ABORTION_INTERVAL_CHECK_S: int = 2 + + +@contextlib.asynccontextmanager +async def monitor_task_abortion( + task_name: str, task_publishers: TaskPublisher +) -> AsyncIterator[None]: + """This context manager periodically checks whether the client cancelled the + monitored task. If that is the case, the monitored task will be cancelled (e.g. + a asyncioCancelledError is raised in the task). The context manager will then + raise a TaskCancelledError exception which will be propagated back to the client.""" + + async def cancel_task(task_name: str) -> None: + if task := next( + (t for t in asyncio.all_tasks() if t.get_name() == task_name), None + ): + await task_publishers.publish_logs( + message="[sidecar] cancelling task...", log_level=logging.INFO + ) + task.cancel() + + async def periodicaly_check_if_aborted(task_name: str) -> None: + while await asyncio.sleep(_TASK_ABORTION_INTERVAL_CHECK_S, result=True): + _logger.debug("checking if %s should be cancelled", f"{task_name=}") + if is_current_task_aborted(): + await cancel_task(task_name) + + periodically_checking_task = None + try: + periodically_checking_task = asyncio.create_task( + periodicaly_check_if_aborted(task_name), + name=f"{task_name}_monitor_task_abortion", + ) + + yield + except asyncio.CancelledError as exc: + await task_publishers.publish_logs( + message="[sidecar] task run was aborted", log_level=logging.INFO + ) + + raise TaskCancelledError from exc + finally: + if periodically_checking_task: + _logger.debug( + "cancelling task cancellation checker for task '%s'", + task_name, + ) + periodically_checking_task.cancel() + with contextlib.suppress(asyncio.CancelledError): + await periodically_checking_task + + +def publish_event( + event: BaseTaskEvent, +) -> None: + """never reraises, only CancellationError""" + worker = get_worker() + _logger.debug("current worker %s", f"{worker=}") + with ( + log_catch(_logger, reraise=False), + log_context(_logger, logging.DEBUG, msg=f"publishing {event=}"), + ): + worker.log_event( + [ + TaskProgressEvent.topic_name(), + TASK_RUNNING_PROGRESS_EVENT.format(key=event.job_id), + ], + event.model_dump_json(), + ) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/files.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/files.py new file mode 100644 index 00000000000..2a108cc595f --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/files.py @@ -0,0 +1,301 @@ +import asyncio +import functools +import logging +import mimetypes +import time +import zipfile +from collections.abc import Callable, Coroutine +from io import IOBase +from pathlib import Path +from typing import Any, Final, TypedDict, cast + +import aiofiles +import aiofiles.tempfile +import fsspec # type: ignore[import-untyped] +import repro_zipfile +from pydantic import ByteSize, FileUrl, TypeAdapter +from pydantic.networks import AnyUrl +from servicelib.logging_utils import LogLevelInt, LogMessageStr +from settings_library.s3 import S3Settings +from yarl import URL + +logger = logging.getLogger(__name__) + +HTTP_FILE_SYSTEM_SCHEMES: Final = ["http", "https"] +S3_FILE_SYSTEM_SCHEMES: Final = ["s3", "s3a"] + + +LogPublishingCB = Callable[[LogMessageStr, LogLevelInt], Coroutine[Any, Any, None]] + + +def _file_progress_cb( + size, + value, + log_publishing_cb: LogPublishingCB, + text_prefix: str, + main_loop: asyncio.AbstractEventLoop, + **kwargs, # noqa: ARG001 +): + asyncio.run_coroutine_threadsafe( + log_publishing_cb( + f"{text_prefix}" + f" {100.0 * float(value or 0) / float(size or 1):.1f}%" + f" ({ByteSize(value).human_readable() if value else 0} / {ByteSize(size).human_readable() if size else 'NaN'})", + logging.DEBUG, + ), + main_loop, + ) + + +CHUNK_SIZE = 4 * 1024 * 1024 + + +class ClientKWArgsDict(TypedDict, total=False): + endpoint_url: str + region_name: str + + +class S3FsSettingsDict(TypedDict): + key: str + secret: str + client_kwargs: ClientKWArgsDict + config_kwargs: dict[str, str] # For botocore config options + + +_DEFAULT_AWS_REGION: Final[str] = "us-east-1" + + +def _s3fs_settings_from_s3_settings(s3_settings: S3Settings) -> S3FsSettingsDict: + s3fs_settings: S3FsSettingsDict = { + "key": s3_settings.S3_ACCESS_KEY, + "secret": s3_settings.S3_SECRET_KEY, + "client_kwargs": {}, + "config_kwargs": { + # This setting tells the S3 client to only calculate checksums when explicitly required + # by the operation. This avoids unnecessary checksum calculations for operations that + # don't need them, improving performance. + # See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3.html#calculating-checksums + "request_checksum_calculation": "when_required", + "signature_version": "s3v4", + }, + } + if s3_settings.S3_REGION != _DEFAULT_AWS_REGION: + # NOTE: see https://github.com/boto/boto3/issues/125 why this is so... (sic) + # setting it for the us-east-1 creates issue when creating buckets (which we do in tests) + s3fs_settings["client_kwargs"]["region_name"] = s3_settings.S3_REGION + if s3_settings.S3_ENDPOINT is not None: + s3fs_settings["client_kwargs"]["endpoint_url"] = f"{s3_settings.S3_ENDPOINT}" + return s3fs_settings + + +def _file_chunk_streamer(src: IOBase, dst: IOBase): + data = src.read(CHUNK_SIZE) + segment_len = dst.write(data) + return (data, segment_len) + + +async def _copy_file( + src_url: AnyUrl, + dst_url: AnyUrl, + *, + log_publishing_cb: LogPublishingCB, + text_prefix: str, + src_storage_cfg: dict[str, Any] | None = None, + dst_storage_cfg: dict[str, Any] | None = None, +): + src_storage_kwargs = src_storage_cfg or {} + dst_storage_kwargs = dst_storage_cfg or {} + with ( + fsspec.open( + f"{src_url}", mode="rb", expand=False, **src_storage_kwargs + ) as src_fp, + fsspec.open( + f"{dst_url}", mode="wb", expand=False, **dst_storage_kwargs + ) as dst_fp, + ): + assert isinstance(src_fp, IOBase) # nosec + assert isinstance(dst_fp, IOBase) # nosec + file_size = getattr(src_fp, "size", None) + data_read = True + total_data_written = 0 + t = time.process_time() + while data_read: + ( + data_read, + data_written, + ) = await asyncio.get_event_loop().run_in_executor( + None, _file_chunk_streamer, src_fp, dst_fp + ) + elapsed_time = time.process_time() - t + total_data_written += data_written or 0 + await log_publishing_cb( + f"{text_prefix}" + f" {100.0 * float(total_data_written or 0) / float(file_size or 1):.1f}%" + f" ({ByteSize(total_data_written).human_readable() if total_data_written else 0} / {ByteSize(file_size).human_readable() if file_size else 'NaN'})" + f" [{ByteSize(total_data_written).to('MB') / elapsed_time:.2f} MBytes/s (avg)]", + logging.DEBUG, + ) + + +_ZIP_MIME_TYPE: Final[str] = "application/zip" + + +async def pull_file_from_remote( + src_url: AnyUrl, + target_mime_type: str | None, + dst_path: Path, + log_publishing_cb: LogPublishingCB, + s3_settings: S3Settings | None, +) -> None: + assert src_url.path # nosec + await log_publishing_cb( + f"Downloading '{src_url}' into local file '{dst_path}'...", + logging.INFO, + ) + if not dst_path.parent.exists(): + msg = f"{dst_path.parent=} does not exist. It must be created by the caller" + raise ValueError(msg) + + src_mime_type, _ = mimetypes.guess_type(f"{src_url.path}") + if not target_mime_type: + target_mime_type, _ = mimetypes.guess_type(dst_path) + + storage_kwargs: S3FsSettingsDict | dict[str, Any] = {} + if s3_settings and src_url.scheme in S3_FILE_SYSTEM_SCHEMES: + storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + await _copy_file( + src_url, + TypeAdapter(FileUrl).validate_python(dst_path.as_uri()), + src_storage_cfg=cast(dict[str, Any], storage_kwargs), + log_publishing_cb=log_publishing_cb, + text_prefix=f"Downloading '{src_url.path.strip('/')}':", + ) + + await log_publishing_cb( + f"Download of '{src_url}' into local file '{dst_path}' complete.", + logging.INFO, + ) + + if src_mime_type == _ZIP_MIME_TYPE and target_mime_type != _ZIP_MIME_TYPE: + await log_publishing_cb(f"Uncompressing '{dst_path.name}'...", logging.INFO) + logger.debug("%s is a zip file and will be now uncompressed", dst_path) + with repro_zipfile.ReproducibleZipFile(dst_path, "r") as zip_obj: + await asyncio.get_event_loop().run_in_executor( + None, zip_obj.extractall, dst_path.parents[0] + ) + # finally remove the zip archive + await log_publishing_cb( + f"Uncompressing '{dst_path.name}' complete.", logging.INFO + ) + dst_path.unlink() + + +async def _push_file_to_http_link( + file_to_upload: Path, dst_url: AnyUrl, log_publishing_cb: LogPublishingCB +) -> None: + # NOTE: special case for http scheme when uploading. this is typically a S3 put presigned link. + # Therefore, we need to use the http filesystem directly in order to call the put_file function. + # writing on httpfilesystem is disabled by default. + fs = fsspec.filesystem( + "http", + headers={ + "Content-Length": f"{file_to_upload.stat().st_size}", + }, + asynchronous=True, + ) + assert dst_url.path # nosec + await fs._put_file( # pylint: disable=protected-access # noqa: SLF001 + file_to_upload, + f"{dst_url}", + method="PUT", + callback=fsspec.Callback( + hooks={ + "progress": functools.partial( + _file_progress_cb, + log_publishing_cb=log_publishing_cb, + text_prefix=f"Uploading '{dst_url.path.strip('/')}':", + main_loop=asyncio.get_event_loop(), + ) + } + ), + ) + + +async def _push_file_to_remote( + file_to_upload: Path, + dst_url: AnyUrl, + log_publishing_cb: LogPublishingCB, + s3_settings: S3Settings | None, +) -> None: + logger.debug("Uploading %s to %s...", file_to_upload, dst_url) + assert dst_url.path # nosec + + storage_kwargs: S3FsSettingsDict | dict[str, Any] = {} + if s3_settings: + storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + + await _copy_file( + TypeAdapter(FileUrl).validate_python(file_to_upload.as_uri()), + dst_url, + dst_storage_cfg=cast(dict[str, Any], storage_kwargs), + log_publishing_cb=log_publishing_cb, + text_prefix=f"Uploading '{dst_url.path.strip('/')}':", + ) + + +MIMETYPE_APPLICATION_ZIP = "application/zip" + + +async def push_file_to_remote( + src_path: Path, + dst_url: AnyUrl, + log_publishing_cb: LogPublishingCB, + s3_settings: S3Settings | None, +) -> None: + if not src_path.exists(): + msg = f"{src_path=} does not exist" + raise ValueError(msg) + assert dst_url.path # nosec + async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: + file_to_upload = src_path + + dst_mime_type, _ = mimetypes.guess_type(f"{dst_url.path}") + src_mime_type, _ = mimetypes.guess_type(src_path) + + if dst_mime_type == _ZIP_MIME_TYPE and src_mime_type != _ZIP_MIME_TYPE: + archive_file_path = Path(tmp_dir) / Path(URL(f"{dst_url}").path).name + await log_publishing_cb( + f"Compressing '{src_path.name}' to '{archive_file_path.name}'...", + logging.INFO, + ) + + with repro_zipfile.ReproducibleZipFile( + archive_file_path, mode="w", compression=zipfile.ZIP_STORED + ) as zfp: + await asyncio.get_event_loop().run_in_executor( + None, zfp.write, src_path, src_path.name + ) + logger.debug("%s created.", archive_file_path) + assert archive_file_path.exists() # nosec + file_to_upload = archive_file_path + await log_publishing_cb( + f"Compression of '{src_path.name}' to '{archive_file_path.name}' complete.", + logging.INFO, + ) + + await log_publishing_cb( + f"Uploading '{file_to_upload.name}' to '{dst_url}'...", logging.INFO + ) + + if dst_url.scheme in HTTP_FILE_SYSTEM_SCHEMES: + logger.debug("destination is a http presigned link") + await _push_file_to_http_link(file_to_upload, dst_url, log_publishing_cb) + else: + await _push_file_to_remote( + file_to_upload, dst_url, log_publishing_cb, s3_settings + ) + + await log_publishing_cb( + f"Upload of '{src_path.name}' to '{dst_url.path.strip('/')}' complete", + logging.INFO, + ) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/gpus.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/gpus.py new file mode 100644 index 00000000000..61481d32c0a --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/gpus.py @@ -0,0 +1,124 @@ +import asyncio +import logging +import uuid +from collections.abc import Awaitable, Coroutine +from typing import Any, cast + +import aiodocker +from aiodocker.containers import DockerContainer +from pydantic import ByteSize, TypeAdapter + +logger = logging.getLogger(__name__) + + +def _wrap_async_call(fct: Awaitable[Any]) -> Any: + return asyncio.get_event_loop().run_until_complete(fct) + + +def _nvidia_smi_docker_config(cmd: list[str]) -> dict[str, Any]: + return { + "Cmd": ["nvidia-smi", *cmd], + "Image": "nvidia/cuda:12.2.0-base-ubuntu22.04", + "AttachStdin": False, + "AttachStdout": False, + "AttachStderr": False, + "Tty": False, + "OpenStdin": False, + "HostConfig": { + "Init": True, + "AutoRemove": False, # NOTE: this cannot be True as we need the logs of the container before removing it + "LogConfig": {"Type": "json-file"}, + }, # NOTE: The Init parameter shows a weird behavior: no exception thrown when the container fails + } + + +def num_available_gpus() -> int: + """Returns the number of available GPUs, 0 if not a gpu node""" + + async def async_num_available_gpus() -> int: + num_gpus = 0 + container: DockerContainer | None = None + async with aiodocker.Docker() as docker: + spec_config = _nvidia_smi_docker_config(["--list-gpus"]) + try: + container = await docker.containers.run( + config=spec_config, name=f"sidecar_{uuid.uuid4()}_test_gpu" + ) + if not container: + return 0 + + container_data = await container.wait(timeout=10) + container_logs = await cast( + Coroutine, + container.log(stdout=True, stderr=True, follow=False), + ) + num_gpus = ( + len(container_logs) + if container_data.setdefault("StatusCode", 127) == 0 + else 0 + ) + except TimeoutError as err: + logger.warning( + "num_gpus timedout while check-run %s: %s", spec_config, err + ) + except aiodocker.exceptions.DockerError as err: + logger.warning( + "num_gpus DockerError while check-run %s: %s", spec_config, err + ) + finally: + if container is not None: + await container.delete(v=True, force=True) + + return num_gpus + + return cast(int, _wrap_async_call(async_num_available_gpus())) + + +def video_memory() -> int: + """Returns the amount of VRAM available in bytes. 0 if no GPU available""" + + async def async_video_memory() -> int: + video_ram: ByteSize = ByteSize(0) + container: DockerContainer | None = None + async with aiodocker.Docker() as docker: + spec_config = _nvidia_smi_docker_config( + [ + "--query-gpu=memory.total", + "--format=csv,noheader", + ] + ) + + try: + container = await docker.containers.run( + config=spec_config, name=f"sidecar_{uuid.uuid4()}_test_gpu_memory" + ) + if not container: + return 0 + + container_data = await container.wait(timeout=10) + container_logs = await cast( + Coroutine, + container.log(stdout=True, stderr=True, follow=False), + ) + video_ram = TypeAdapter(ByteSize).validate_python(0) + if container_data.setdefault("StatusCode", 127) == 0: + for line in container_logs: + video_ram = TypeAdapter(ByteSize).validate_python( + video_ram + TypeAdapter(ByteSize).validate_python(line) + ) + + except TimeoutError as err: + logger.warning( + "num_gpus timedout while check-run %s: %s", spec_config, err + ) + except aiodocker.exceptions.DockerError as err: + logger.warning( + "num_gpus DockerError while check-run %s: %s", spec_config, err + ) + finally: + if container is not None: + await container.delete(v=True, force=True) + + return video_ram + + return cast(int, _wrap_async_call(async_video_memory())) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/logs.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/logs.py new file mode 100644 index 00000000000..74b158de9e2 --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/utils/logs.py @@ -0,0 +1,20 @@ +import logging + +from servicelib.logging_utils import config_all_loggers + +from ..settings import ApplicationSettings + + +def setup_app_logging(settings: ApplicationSettings) -> None: + # set up logging + logging.basicConfig(level=settings.DASK_SIDECAR_LOGLEVEL.value) + logging.root.setLevel(level=settings.DASK_SIDECAR_LOGLEVEL.value) + # NOTE: Dask attaches a StreamHandler to the logger in distributed + # removing them solves dual propagation of logs + for handler in logging.getLogger("distributed").handlers: + logging.getLogger("distributed").removeHandler(handler) + config_all_loggers( + log_format_local_dev_enabled=settings.DASK_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.DASK_LOG_FILTER_MAPPING, + tracing_settings=None, # no tracing for dask sidecar + ) diff --git a/services/dask-sidecar/src/simcore_service_dask_sidecar/worker.py b/services/dask-sidecar/src/simcore_service_dask_sidecar/worker.py new file mode 100644 index 00000000000..abaedd698ed --- /dev/null +++ b/services/dask-sidecar/src/simcore_service_dask_sidecar/worker.py @@ -0,0 +1,156 @@ +import asyncio +import logging +import signal +import threading +from pprint import pformat + +import distributed +from dask_task_models_library.container_tasks.docker import DockerBasicAuth +from dask_task_models_library.container_tasks.io import TaskOutputData +from dask_task_models_library.container_tasks.protocol import ( + ContainerTaskParameters, + LogFileUploadURL, +) +from dask_task_models_library.plugins.task_life_cycle_worker_plugin import ( + TaskLifecycleWorkerPlugin, +) +from servicelib.logging_utils import log_context +from settings_library.s3 import S3Settings + +from ._meta import print_dask_sidecar_banner +from .computational_sidecar.core import ComputationalSidecar +from .rabbitmq_worker_plugin import RabbitMQPlugin +from .settings import ApplicationSettings +from .utils.dask import ( + TaskPublisher, + get_current_task_resources, + monitor_task_abortion, +) +from .utils.logs import setup_app_logging + +_logger = logging.getLogger(__name__) + + +class GracefulKiller: + """this ensure the dask-worker is gracefully stopped. + the current implementation of distributed.dask_workers does not call close() on the + worker as it probably should. Note: this is still a work in progress though. + """ + + kill_now = False + worker = None + task = None + + def __init__(self, worker: distributed.Worker): + signal.signal(signal.SIGINT, self.exit_gracefully) + signal.signal(signal.SIGTERM, self.exit_gracefully) + self.worker = worker + + def exit_gracefully(self, *_args): + tasks = asyncio.all_tasks() + _logger.warning( + "Application shutdown detected!\n %s", + pformat([t.get_name() for t in tasks]), + ) + self.kill_now = True + assert self.worker # nosec + self.task = asyncio.create_task( + self.worker.close(timeout=5), name="close_dask_worker_task" + ) + + +async def dask_setup(worker: distributed.Worker) -> None: + """This is a special function recognized by dask when starting with flag --preload""" + settings = ApplicationSettings.create_from_envs() + setup_app_logging(settings) + + with log_context(_logger, logging.INFO, "Launch dask worker"): + _logger.info("app settings: %s", settings.model_dump_json(indent=1)) + + if threading.current_thread() is threading.main_thread(): + GracefulKiller(worker) + + loop = asyncio.get_event_loop() + _logger.info("We do have a running loop in the main thread: %s", f"{loop=}") + + if settings.DASK_SIDECAR_RABBITMQ: + try: + await worker.plugin_add( + RabbitMQPlugin(settings.DASK_SIDECAR_RABBITMQ), catch_errors=False + ) + except Exception: + await worker.close(reason="failed to add RabbitMQ plugin") + raise + try: + await worker.plugin_add(TaskLifecycleWorkerPlugin(), catch_errors=False) + except Exception: + await worker.close(reason="failed to add TaskLifecycleWorkerPlugin") + raise + + print_dask_sidecar_banner() + + +async def dask_teardown(worker: distributed.Worker) -> None: + with log_context( + _logger, logging.INFO, f"tear down dask worker at {worker.address}" + ): + ... + + +async def _run_computational_sidecar_async( + *, + task_parameters: ContainerTaskParameters, + docker_auth: DockerBasicAuth, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, +) -> TaskOutputData: + task_publishers = TaskPublisher(task_owner=task_parameters.task_owner) + + _logger.info( + "run_computational_sidecar %s", + f"{task_parameters.model_dump()=}, {docker_auth=}, {log_file_url=}, {s3_settings=}", + ) + current_task = asyncio.current_task() + assert current_task # nosec + async with monitor_task_abortion( + task_name=current_task.get_name(), task_publishers=task_publishers + ): + task_max_resources = get_current_task_resources() + async with ComputationalSidecar( + task_parameters=task_parameters, + docker_auth=docker_auth, + log_file_url=log_file_url, + s3_settings=s3_settings, + task_max_resources=task_max_resources, + task_publishers=task_publishers, + ) as sidecar: + output_data = await sidecar.run(command=task_parameters.command) + _logger.info("completed run of sidecar with result %s", f"{output_data=}") + return output_data + + +def run_computational_sidecar( + task_parameters: ContainerTaskParameters, + docker_auth: DockerBasicAuth, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, +) -> TaskOutputData: + # NOTE: The event loop MUST BE created in the main thread prior to this + # Dask creates threads to run these calls, and the loop shall be created before + # else the loop might get closed by another thread running another task + + try: + _ = asyncio.get_event_loop() + except RuntimeError: + # NOTE: this happens in testing when the dask cluster runs INProcess + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + return asyncio.get_event_loop().run_until_complete( + _run_computational_sidecar_async( + task_parameters=task_parameters, + docker_auth=docker_auth, + log_file_url=log_file_url, + s3_settings=s3_settings, + ) + ) diff --git a/services/dask-sidecar/tests/unit/conftest.py b/services/dask-sidecar/tests/unit/conftest.py index c9f4ede9854..2e3fb246f88 100644 --- a/services/dask-sidecar/tests/unit/conftest.py +++ b/services/dask-sidecar/tests/unit/conftest.py @@ -3,35 +3,47 @@ # pylint: disable=unused-variable # pylint: disable=too-many-arguments +from collections.abc import AsyncIterator, Callable, Iterator from pathlib import Path from pprint import pformat -from typing import Any, Callable, Iterable, Iterator, Optional +from typing import cast import dask +import dask.config import distributed import fsspec import pytest import simcore_service_dask_sidecar +from common_library.json_serialization import json_dumps +from common_library.serialization import model_dump_with_secrets +from dask_task_models_library.container_tasks.protocol import TaskOwner from faker import Faker -from minio import Minio -from pydantic import AnyUrl, parse_obj_as -from pytest import MonkeyPatch, TempPathFactory +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import AnyUrl, TypeAdapter from pytest_localftpserver.servers import ProcessFTPServer from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings from settings_library.s3 import S3Settings -from simcore_service_dask_sidecar.file_utils import _s3fs_settings_from_s3_settings +from simcore_service_dask_sidecar.utils.files import ( + _s3fs_settings_from_s3_settings, +) from yarl import URL pytest_plugins = [ + "pytest_simcore.aws_server", + "pytest_simcore.aws_s3_service", + "pytest_simcore.cli_runner", "pytest_simcore.docker_compose", "pytest_simcore.docker_registry", "pytest_simcore.docker_swarm", "pytest_simcore.environment_configs", - "pytest_simcore.minio_service", - "pytest_simcore.monkeypatch_extra", - "pytest_simcore.pytest_global_environs", + "pytest_simcore.faker_users_data", + "pytest_simcore.rabbit_service", "pytest_simcore.repository_paths", - "pytest_simcore.tmp_path_extra", ] @@ -51,43 +63,100 @@ def installed_package_dir() -> Path: return dirpath -@pytest.fixture() -def mock_service_envs( - mock_env_devel_environment: dict[str, Optional[str]], - monkeypatch: MonkeyPatch, +@pytest.fixture +def shared_data_folder( + tmp_path: Path, mocker: MockerFixture, - tmp_path_factory: TempPathFactory, -) -> None: - - # Variables directly define inside Dockerfile - monkeypatch.setenv("SC_BOOT_MODE", "debug-ptvsd") - - monkeypatch.setenv("SIDECAR_LOGLEVEL", "DEBUG") - monkeypatch.setenv( - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME", "simcore_computational_shared_data" - ) +) -> Path: + """Emulates shared folder mounted BEFORE app starts""" + shared_data_folder = tmp_path / "home/scu/computational_shared_data" + shared_data_folder.mkdir(parents=True, exist_ok=True) - shared_data_folder = tmp_path_factory.mktemp("pytest_comp_shared_data") assert shared_data_folder.exists() - monkeypatch.setenv("SIDECAR_COMP_SERVICES_SHARED_FOLDER", f"{shared_data_folder}") + mocker.patch( "simcore_service_dask_sidecar.computational_sidecar.core.get_computational_shared_data_mount_point", return_value=shared_data_folder, ) + return shared_data_folder @pytest.fixture -def dask_client(mock_service_envs: None) -> Iterable[distributed.Client]: +def app_environment( + monkeypatch: pytest.MonkeyPatch, + env_devel_dict: EnvVarsDict, + shared_data_folder: Path, + rabbit_service: RabbitSettings, +) -> EnvVarsDict: + # configured as worker + envs = setenvs_from_dict( + monkeypatch, + { + # .env-devel + **env_devel_dict, + # Variables directly define inside Dockerfile + "DASK_SIDECAR_RABBITMQ": json_dumps( + model_dump_with_secrets(rabbit_service, show_secrets=True) + ), + "SC_BOOT_MODE": "debug", + "DASK_SIDECAR_LOGLEVEL": "DEBUG", + "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": "simcore_computational_shared_data", + "SIDECAR_COMP_SERVICES_SHARED_FOLDER": f"{shared_data_folder}", + }, + ) + + # Variables passed upon start via services/docker-compose.yml file under dask-sidecar/scheduler + monkeypatch.delenv("DASK_START_AS_SCHEDULER", raising=False) + + return envs + + +@pytest.fixture +def local_cluster(app_environment: EnvVarsDict) -> Iterator[distributed.LocalCluster]: print(pformat(dask.config.get("distributed"))) with distributed.LocalCluster( worker_class=distributed.Worker, - **{ - "resources": {"CPU": 10, "GPU": 10}, - "preload": "simcore_service_dask_sidecar.tasks", - }, + resources={"CPU": 10, "GPU": 10}, + scheduler_kwargs={"preload": "simcore_service_dask_sidecar.scheduler"}, + preload="simcore_service_dask_sidecar.worker", + ) as cluster: + assert cluster + assert isinstance(cluster, distributed.LocalCluster) + print(cluster.workers) + yield cluster + + +@pytest.fixture +def dask_client( + local_cluster: distributed.LocalCluster, +) -> Iterator[distributed.Client]: + with distributed.Client(local_cluster) as client: + client.wait_for_workers(1, timeout=10) + yield client + + +@pytest.fixture +async def async_local_cluster( + app_environment: EnvVarsDict, +) -> AsyncIterator[distributed.LocalCluster]: + print(pformat(dask.config.get("distributed"))) + async with distributed.LocalCluster( + worker_class=distributed.Worker, + resources={"CPU": 10, "GPU": 10}, + preload="simcore_service_dask_sidecar.worker", + asynchronous=True, ) as cluster: - with distributed.Client(cluster) as client: - yield client + assert cluster + assert isinstance(cluster, distributed.LocalCluster) + yield cluster + + +@pytest.fixture +async def async_dask_client( + async_local_cluster: distributed.LocalCluster, +) -> AsyncIterator[distributed.Client]: + async with distributed.Client(async_local_cluster, asynchronous=True) as client: + yield client @pytest.fixture(scope="module") @@ -107,27 +176,17 @@ def ftp_server(ftpserver: ProcessFTPServer) -> list[URL]: @pytest.fixture -def s3_endpoint_url(minio_config: dict[str, Any]) -> AnyUrl: - return parse_obj_as( - AnyUrl, - f"http{'s' if minio_config['client']['secure'] else ''}://{minio_config['client']['endpoint']}", - ) - - -@pytest.fixture -def s3_settings(minio_config: dict[str, Any], minio_service: Minio) -> S3Settings: +def s3_settings(mocked_s3_server_envs: None) -> S3Settings: return S3Settings.create_from_envs() @pytest.fixture -def s3_remote_file_url( - minio_config: dict[str, Any], faker: Faker -) -> Callable[..., AnyUrl]: - def creator(file_path: Optional[Path] = None) -> AnyUrl: - file_path_with_bucket = Path(minio_config["bucket_name"]) / ( +def s3_remote_file_url(s3_settings: S3Settings, faker: Faker) -> Callable[..., AnyUrl]: + def creator(file_path: Path | None = None) -> AnyUrl: + file_path_with_bucket = Path(s3_settings.S3_BUCKET_NAME) / ( file_path or faker.file_name() ) - return parse_obj_as(AnyUrl, f"s3://{file_path_with_bucket}") + return TypeAdapter(AnyUrl).validate_python(f"s3://{file_path_with_bucket}") return creator @@ -143,10 +202,10 @@ def file_on_s3_server( def creator() -> AnyUrl: new_remote_file = s3_remote_file_url() - open_file = fsspec.open(new_remote_file, mode="wt", **s3_storage_kwargs) + open_file = fsspec.open(f"{new_remote_file}", mode="wt", **s3_storage_kwargs) with open_file as fp: fp.write( # type: ignore - f"This is the file contents of file #'{(len(list_of_created_files)+1):03}'" + f"This is the file contents of file #'{(len(list_of_created_files) + 1):03}'\n" ) for s in faker.sentences(5): fp.write(f"{s}\n") # type: ignore @@ -158,4 +217,44 @@ def creator() -> AnyUrl: # cleanup fs = fsspec.filesystem("s3", **s3_storage_kwargs) for file in list_of_created_files: - fs.delete(file.partition(f"{file.scheme}://")[2]) + fs.delete(f"{file}".partition(f"{file.scheme}://")[2]) + + +@pytest.fixture +def job_id() -> str: + return "some_incredible_string" + + +@pytest.fixture +def project_id(faker: Faker) -> ProjectID: + return cast(ProjectID, faker.uuid4(cast_to=None)) + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return cast(NodeID, faker.uuid4(cast_to=None)) + + +@pytest.fixture(params=["no_parent_node", "with_parent_node"]) +def task_owner( + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + request: pytest.FixtureRequest, + faker: Faker, +) -> TaskOwner: + return TaskOwner( + user_id=user_id, + project_id=project_id, + node_id=node_id, + parent_project_id=( + None + if request.param == "no_parent_node" + else cast(ProjectID, faker.uuid4(cast_to=None)) + ), + parent_node_id=( + None + if request.param == "no_parent_node" + else cast(NodeID, faker.uuid4(cast_to=None)) + ), + ) diff --git a/services/dask-sidecar/tests/unit/test__requirements.py b/services/dask-sidecar/tests/unit/test__requirements.py index 737f4417a9f..de6bd947e8c 100644 --- a/services/dask-sidecar/tests/unit/test__requirements.py +++ b/services/dask-sidecar/tests/unit/test__requirements.py @@ -4,6 +4,7 @@ import re from pathlib import Path +from typing import TypeAlias import pytest @@ -16,11 +17,13 @@ def requirements_folder(project_slug_dir: Path) -> Path: return reqs_dir +NameVersionTuple: TypeAlias = tuple[str, str] + + def test_dask_requirements_in_sync(requirements_folder: Path): """If this test fails, do update requirements to re-sync all listings""" REQS_ENTRY_REGEX = re.compile(r"(\w+)==([\.\w]+)") - NameVersionTuple = tuple[str, str] def get_reqs(fname: str) -> set[NameVersionTuple]: return set(REQS_ENTRY_REGEX.findall((requirements_folder / fname).read_text())) diff --git a/services/dask-sidecar/tests/unit/test_cli.py b/services/dask-sidecar/tests/unit/test_cli.py new file mode 100644 index 00000000000..09762400f4e --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_cli.py @@ -0,0 +1,36 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import os + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dask_sidecar._meta import API_VERSION +from simcore_service_dask_sidecar.cli import main +from simcore_service_dask_sidecar.settings import ApplicationSettings +from typer.testing import CliRunner + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +def test_cli_help_and_version(cli_runner: CliRunner): + # invitations-maker --help + result = cli_runner.invoke(main, "--help") + assert result.exit_code == os.EX_OK, result.output + + result = cli_runner.invoke(main, "--version") + assert result.exit_code == os.EX_OK, result.output + assert result.stdout.strip() == API_VERSION + + +def test_list_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK, result.output + + settings = ApplicationSettings(result.output) + assert settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() diff --git a/services/dask-sidecar/tests/unit/test_computational_docker_utils.py b/services/dask-sidecar/tests/unit/test_computational_docker_utils.py new file mode 100644 index 00000000000..4bc154edd95 --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_computational_docker_utils.py @@ -0,0 +1,271 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member + +import asyncio +import re +from typing import Any +from unittest.mock import call + +import aiodocker +import arrow +import pytest +from dask_task_models_library.container_tasks.protocol import ( + ContainerCommands, + ContainerEnvsDict, + ContainerImage, + ContainerLabelsDict, + ContainerTag, +) +from models_library.services_resources import BootMode +from pytest_mock.plugin import MockerFixture +from simcore_service_dask_sidecar.computational_sidecar.docker_utils import ( + _try_parse_progress, + create_container_config, + managed_container, +) +from simcore_service_dask_sidecar.computational_sidecar.models import PROGRESS_REGEXP + + +@pytest.fixture() +def docker_registry() -> str: + return "myregistry.local" + + +@pytest.fixture() +def image() -> str: + return "myfake/image" + + +@pytest.fixture() +def tag() -> str: + return "2.3.45" + + +@pytest.fixture() +def command() -> list[str]: + return ["sh", "-c", "some_app"] + + +@pytest.fixture() +def comp_volume_mount_point() -> str: + return "/some/fake/entrypoint" + + +@pytest.mark.parametrize( + "task_max_resources", + [{}, {"CPU": 12, "RAM": 2**9}, {"GPU": 4, "RAM": 1**6}], + ids=lambda x: f"task_resources={x}", +) +@pytest.mark.parametrize("boot_mode", list(BootMode), ids=lambda x: f"bootmode={x}") +@pytest.mark.parametrize( + "task_envs", + [{}, {"SOME_ENV": "whatever value that is"}], + ids=lambda x: f"task_envs={x}", +) +@pytest.mark.parametrize( + "task_labels", + [{}, {"some_label": "some_label value"}], + ids=lambda x: f"task_labels={x}", +) +async def test_create_container_config( + docker_registry: str, + image: ContainerImage, + tag: ContainerTag, + command: ContainerCommands, + comp_volume_mount_point: str, + boot_mode: BootMode, + task_max_resources: dict[str, Any], + task_envs: ContainerEnvsDict, + task_labels: ContainerLabelsDict, +): + container_config = await create_container_config( + docker_registry=docker_registry, + image=image, + tag=tag, + command=command, + comp_volume_mount_point=comp_volume_mount_point, + boot_mode=boot_mode, + task_max_resources=task_max_resources, + envs=task_envs, + labels=task_labels, + ) + assert container_config.model_dump(by_alias=True) == ( + { + "Env": [ + "INPUT_FOLDER=/inputs", + "OUTPUT_FOLDER=/outputs", + "LOG_FOLDER=/logs", + f"SC_COMP_SERVICES_SCHEDULED_AS={boot_mode.value}", + f"SIMCORE_NANO_CPUS_LIMIT={task_max_resources.get('CPU', 1) * 1e9:.0f}", + f"SIMCORE_MEMORY_BYTES_LIMIT={task_max_resources.get('RAM', 1024 ** 3)}", + *[f"{env_var}={env_value}" for env_var, env_value in task_envs.items()], + ], + "Cmd": command, + "Image": f"{docker_registry}/{image}:{tag}", + "Labels": task_labels, + "HostConfig": { + "Binds": [ + f"{comp_volume_mount_point}/inputs:/inputs", + f"{comp_volume_mount_point}/outputs:/outputs", + f"{comp_volume_mount_point}/logs:/logs", + ], + "Init": True, + "Memory": task_max_resources.get("RAM", 1024**3), + "MemorySwap": task_max_resources.get("RAM", 1024**3), + "NanoCPUs": task_max_resources.get("CPU", 1) * 1e9, + }, + } + ) + + +@pytest.mark.parametrize("with_timestamp", [True, False], ids=str) +@pytest.mark.parametrize( + "log_line, expected_progress_value, progress_regexp", + [ + ("hello from the logs", None, PROGRESS_REGEXP), + ( + "[PROGRESS] this is some whatever progress without number", + None, + PROGRESS_REGEXP, + ), + ("[PROGRESS] .34", 0.34, PROGRESS_REGEXP), + ("Progress: this is some progress", None, PROGRESS_REGEXP), + ("PROGRESS: .34", 0.34, PROGRESS_REGEXP), + ("PROGRESS: 44 percent done", 0.44, PROGRESS_REGEXP), + ("44 percent done", 0.44, PROGRESS_REGEXP), + ("PROGRESS: 44/150", 44.0 / 150.0, PROGRESS_REGEXP), + ("PROGRESS: 44/150...", 44.0 / 150.0, PROGRESS_REGEXP), + ("any kind of message even with progress inside", None, PROGRESS_REGEXP), + ("[PROGRESS]1.000000\n", 1.00, PROGRESS_REGEXP), + ("[PROGRESS] 1\n", 1.00, PROGRESS_REGEXP), + ("[PROGRESS] 0\n", 0.00, PROGRESS_REGEXP), + ( + "[PROGRESS]: 1% [ 10 / 624 ] Time Update, estimated remaining time 1 seconds @ 26.43 MCells/s", + 0.01, + PROGRESS_REGEXP, + ), + ("[warn]: this is some warning", None, PROGRESS_REGEXP), + ("err: this is some error", None, PROGRESS_REGEXP), + ( + "progress: 10/0 asd this is a 15% 10/asdf progress without progress it will not break the system", + None, + PROGRESS_REGEXP, + ), + ( + "[PROGRESS]: 21% [ 1219946 / 5545233 ] Assembling matrix", + 0.21, + re.compile( + "^(?:\\[?PROGRESS\\]?:?)?\\s*(?P[0-1]?\\.\\d+|\\d+\\s*(?P%))" + ), + ), + ], +) +async def test__try_parse_progress( + with_timestamp: bool, + log_line: str, + expected_progress_value: float, + progress_regexp: re.Pattern[str], +): + expected_time_stamp = arrow.utcnow().datetime + if with_timestamp: + log_line = f"{expected_time_stamp.isoformat()} {log_line}" + + received_progress = await _try_parse_progress( + log_line, progress_regexp=progress_regexp + ) + assert received_progress == expected_progress_value + + +@pytest.mark.parametrize( + "exception_type", + [ + KeyError("testkey"), + asyncio.CancelledError("testcancel"), + aiodocker.DockerError(status=404, data={"message": None}), + ], + ids=str, +) +async def test_managed_container_always_removes_container( + docker_registry: str, + image: ContainerImage, + tag: ContainerTag, + command: ContainerCommands, + comp_volume_mount_point: str, + mocker: MockerFixture, + exception_type: Exception, +): + container_config = await create_container_config( + docker_registry=docker_registry, + image=image, + tag=tag, + command=command, + comp_volume_mount_point=comp_volume_mount_point, + boot_mode=BootMode.CPU, + task_max_resources={}, + envs={}, + labels={}, + ) + + mocked_aiodocker = mocker.patch("aiodocker.Docker", autospec=True) + async with aiodocker.Docker() as docker_client: + with pytest.raises(type(exception_type)): + async with managed_container( + docker_client=docker_client, config=container_config + ) as container: + mocked_aiodocker.assert_has_calls( + calls=[ + call(), + call().__aenter__(), + call() + .__aenter__() + .containers.create( + container_config.model_dump(by_alias=True), name=None + ), + ] + ) + mocked_aiodocker.reset_mock() + assert container is not None + + raise exception_type + # check the container was deleted + mocked_aiodocker.assert_has_calls( + calls=[ + call() + .__aenter__() + .containers.create() + .delete(remove=True, v=True, force=True) + ] + ) + + +async def test_managed_container_with_broken_container_raises_docker_exception( + docker_registry: str, + image: ContainerImage, + tag: ContainerTag, + command: ContainerCommands, + comp_volume_mount_point: str, + mocker: MockerFixture, +): + container_config = await create_container_config( + docker_registry=docker_registry, + image=image, + tag=tag, + command=command, + comp_volume_mount_point=comp_volume_mount_point, + boot_mode=BootMode.CPU, + task_max_resources={}, + envs={}, + labels={}, + ) + mocked_aiodocker = mocker.patch("aiodocker.Docker", autospec=True) + mocked_aiodocker.return_value.__aenter__.return_value.containers.create.return_value.delete.side_effect = aiodocker.DockerError( + "bad", {"message": "pytest fake bad message"} + ) + async with aiodocker.Docker() as docker_client: + with pytest.raises(aiodocker.DockerError, match="pytest fake bad message"): + async with managed_container( + docker_client=docker_client, config=container_config + ) as container: + assert container is not None diff --git a/services/dask-sidecar/tests/unit/test_computational_models.py b/services/dask-sidecar/tests/unit/test_computational_models.py new file mode 100644 index 00000000000..f9e80f67fa4 --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_computational_models.py @@ -0,0 +1,59 @@ +import pytest +from faker import Faker +from pydantic import ByteSize, ValidationError +from simcore_service_dask_sidecar.computational_sidecar.models import ( + ContainerHostConfig, +) + + +def test_container_host_config_sets_swap_same_as_memory_if_not_set(faker: Faker): + instance = ContainerHostConfig( + Binds=[faker.pystr() for _ in range(5)], + Memory=ByteSize(faker.pyint()), + NanoCPUs=faker.pyint(min_value=1), + ) + assert instance.memory == instance.memory_swap + + +def test_container_host_config_raises_if_set_negative( + faker: Faker, +): + with pytest.raises(ValidationError): + ContainerHostConfig( + Binds=[faker.pystr() for _ in range(5)], + Memory=ByteSize(faker.pyint(min_value=234)), + NanoCPUs=faker.pyint(min_value=1), + MemorySwap=ByteSize(faker.pyint(min_value=-84654, max_value=-1)), + ) + + +def test_container_host_config_raises_if_set_smaller_than_memory( + faker: Faker, +): + with pytest.raises(ValidationError): + ContainerHostConfig( + Binds=[faker.pystr() for _ in range(5)], + Memory=ByteSize(faker.pyint(min_value=234)), + NanoCPUs=faker.pyint(min_value=1), + MemorySwap=ByteSize(0), + ) + with pytest.raises(ValidationError): + ContainerHostConfig( + Binds=[faker.pystr() for _ in range(5)], + Memory=ByteSize(faker.pyint(min_value=234)), + NanoCPUs=faker.pyint(min_value=1), + MemorySwap=ByteSize(faker.pyint(min_value=1, max_value=233)), + ) + + +def test_container_host_config_sets_swap_if_set_bigger_than_memory( + faker: Faker, +): + instance = ContainerHostConfig( + Binds=[faker.pystr() for _ in range(5)], + Memory=ByteSize(faker.pyint(min_value=234, max_value=434234)), + NanoCPUs=faker.pyint(min_value=1), + MemorySwap=ByteSize(faker.pyint(min_value=434235, max_value=12343424234)), + ) + assert instance.memory_swap + assert instance.memory < instance.memory_swap diff --git a/services/dask-sidecar/tests/unit/test_task_shared_volume.py b/services/dask-sidecar/tests/unit/test_computational_sidecar_task_shared_volume.py similarity index 100% rename from services/dask-sidecar/tests/unit/test_task_shared_volume.py rename to services/dask-sidecar/tests/unit/test_computational_sidecar_task_shared_volume.py diff --git a/services/dask-sidecar/tests/unit/test_computational_sidecar_tasks.py b/services/dask-sidecar/tests/unit/test_computational_sidecar_tasks.py new file mode 100644 index 00000000000..57cf06de1ad --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_computational_sidecar_tasks.py @@ -0,0 +1,853 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member +# pylint: disable=too-many-instance-attributes + +import asyncio +import json +import logging +import re +import threading +from collections.abc import AsyncIterator, Callable, Iterable + +# copied out from dask +from dataclasses import dataclass +from pprint import pformat +from random import randint +from typing import Any +from unittest import mock + +import distributed +import fsspec +import pytest +from common_library.json_serialization import json_dumps +from dask_task_models_library.container_tasks.docker import DockerBasicAuth +from dask_task_models_library.container_tasks.errors import ServiceRuntimeError +from dask_task_models_library.container_tasks.events import TaskProgressEvent +from dask_task_models_library.container_tasks.io import ( + FileUrl, + TaskInputData, + TaskOutputData, + TaskOutputDataSchema, +) +from dask_task_models_library.container_tasks.protocol import ( + ContainerTaskParameters, + TaskOwner, +) +from faker import Faker +from models_library.basic_types import EnvVarKey +from models_library.rabbitmq_messages import LoggerRabbitMessage +from models_library.services import ServiceMetaDataPublished +from models_library.services_resources import BootMode +from packaging import version +from pydantic import AnyUrl, SecretStr, TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq._client import RabbitMQClient +from servicelib.rabbitmq._constants import BIND_TO_ALL_TOPICS +from settings_library.s3 import S3Settings +from simcore_service_dask_sidecar.computational_sidecar.docker_utils import ( + LEGACY_SERVICE_LOG_FILE_NAME, +) +from simcore_service_dask_sidecar.computational_sidecar.errors import ( + ServiceBadFormattedOutputError, +) +from simcore_service_dask_sidecar.computational_sidecar.models import ( + LEGACY_INTEGRATION_VERSION, + ImageLabels, +) +from simcore_service_dask_sidecar.utils.dask import _DEFAULT_MAX_RESOURCES +from simcore_service_dask_sidecar.utils.files import ( + _s3fs_settings_from_s3_settings, +) +from simcore_service_dask_sidecar.worker import run_computational_sidecar +from tenacity import ( + AsyncRetrying, + retry_if_exception_type, + stop_after_delay, + wait_fixed, +) + +_logger = logging.getLogger(__name__) + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture() +def dask_subsystem_mock( + mocker: MockerFixture, create_rabbitmq_client: Callable[[str], RabbitMQClient] +) -> dict[str, mock.Mock]: + # mock dask client + dask_client_mock = mocker.patch("distributed.Client", autospec=True) + + # mock tasks get worker and state + dask_distributed_worker_mock = mocker.patch( + "simcore_service_dask_sidecar.utils.dask.get_worker", autospec=True + ) + dask_task_mock = mocker.patch( + "simcore_service_dask_sidecar.utils.dask.TaskState", autospec=True + ) + dask_task_mock.resource_restrictions = {} + dask_distributed_worker_mock.return_value.state.tasks.get.return_value = ( + dask_task_mock + ) + + # ensure dask logger propagates + logging.getLogger("distributed").propagate = True + + # mock dask event worker + dask_distributed_worker_events_mock = mocker.patch( + "dask_task_models_library.container_tasks.events.get_worker", autospec=True + ) + dask_distributed_worker_events_mock.return_value.get_current_task.return_value = ( + "pytest_jobid" + ) + # mock dask event publishing + mocker.patch( + "simcore_service_dask_sidecar.utils.dask.is_current_task_aborted", + autospec=True, + return_value=False, + ) + # mock dask rabbitmq plugin + mock_dask_rabbitmq_plugin = mocker.patch( + "simcore_service_dask_sidecar.rabbitmq_worker_plugin.RabbitMQPlugin", + autospec=True, + ) + mock_rabbitmq_client = create_rabbitmq_client("pytest_dask_sidecar_logs_publisher") + mock_dask_rabbitmq_plugin.get_client.return_value = mock_rabbitmq_client + mock_dask_rabbitmq_plugin.publish_message_from_any_thread = ( + mock_rabbitmq_client.publish + ) + + mocker.patch( + "simcore_service_dask_sidecar.utils.dask.get_rabbitmq_client", + autospec=True, + return_value=mock_dask_rabbitmq_plugin, + ) + + return { + "dask_client": dask_client_mock, + "dask_task_state": dask_task_mock, + } + + +@dataclass(slots=True, kw_only=True) +class ServiceExampleParam: + docker_basic_auth: DockerBasicAuth + service_key: str + service_version: str + command: list[str] + input_data: TaskInputData + output_data_keys: TaskOutputDataSchema + log_file_url: AnyUrl + expected_output_data: TaskOutputData + expected_logs: list[str] + integration_version: version.Version + task_envs: dict[EnvVarKey, str] + task_owner: TaskOwner + boot_mode: BootMode + s3_settings: S3Settings + + def sidecar_params(self) -> dict[str, Any]: + return { + "task_parameters": ContainerTaskParameters( + image=self.service_key, + tag=self.service_version, + input_data=self.input_data, + output_data_keys=self.output_data_keys, + command=self.command, + envs=self.task_envs, + labels={}, + task_owner=self.task_owner, + boot_mode=self.boot_mode, + ), + "docker_auth": self.docker_basic_auth, + "log_file_url": self.log_file_url, + "s3_settings": self.s3_settings, + } + + +def _bash_check_env_exist(variable_name: str, variable_value: str) -> list[str]: + return [ + f"if [ -z ${{{variable_name}+x}} ];then echo {variable_name} does not exist && exit 9;fi", + f'if [ "${{{variable_name}}}" != "{variable_value}" ];then echo expected "{variable_value}" and found "${{{variable_name}}}" && exit 9;fi', + ] + + +@pytest.fixture( + params=list(BootMode), + ids=lambda v: f"boot_mode.{v.name}", +) +def boot_mode(request: pytest.FixtureRequest) -> BootMode: + return request.param + + +@pytest.fixture( + # NOTE: legacy version comes second as it is less easy to debug issues with that one + params=[ + "1.0.0", + f"{LEGACY_INTEGRATION_VERSION}", + ], + ids=lambda v: f"integration.version.{v}", +) +def integration_version(request: pytest.FixtureRequest) -> version.Version: + print("--> Using service integration:", request.param) + return version.Version(request.param) + + +@pytest.fixture +def additional_envs(faker: Faker) -> dict[EnvVarKey, str]: + return TypeAdapter(dict[EnvVarKey, str]).validate_python( + faker.pydict(allowed_types=(str,)) + ) + + +@pytest.fixture +def sleeper_task( + integration_version: version.Version, + file_on_s3_server: Callable[..., AnyUrl], + s3_remote_file_url: Callable[..., AnyUrl], + boot_mode: BootMode, + additional_envs: dict[EnvVarKey, str], + faker: Faker, + task_owner: TaskOwner, + s3_settings: S3Settings, +) -> ServiceExampleParam: + """Creates a console task in an ubuntu distro that checks for the expected files and error in case they are missing""" + # let's have some input files on the file server + NUM_FILES = 12 + list_of_files = [file_on_s3_server() for _ in range(NUM_FILES)] + + # defines the inputs of the task + input_data = TaskInputData.model_validate( + { + "input_1": 23, + "input_23": "a string input", + "the_input_43": 15.0, + "the_bool_input_54": False, + **{ + f"some_file_input_{index + 1}": FileUrl(url=file) + for index, file in enumerate(list_of_files) + }, + **{ + f"some_file_input_with_mapping{index + 1}": FileUrl( + url=file, + file_mapping=f"{index + 1}/some_file_input_{index + 1}", + ) + for index, file in enumerate(list_of_files) + }, + } + ) + # check in the console that the expected files are present in the expected INPUT folder (set as ${INPUT_FOLDER} in the service) + file_names = [file.path for file in list_of_files] + list_of_bash_commands = [ + "echo User: $(id $(whoami))", + "echo Inputs:", + "ls -tlah -R ${INPUT_FOLDER}", + "echo Outputs:", + "ls -tlah -R ${OUTPUT_FOLDER}", + "echo Logs:", + "ls -tlah -R ${LOG_FOLDER}", + "echo Envs:", + "printenv", + ] + + # check expected ENVS are set + list_of_bash_commands += _bash_check_env_exist( + variable_name="SC_COMP_SERVICES_SCHEDULED_AS", + variable_value=f"{boot_mode.value}", + ) + list_of_bash_commands += _bash_check_env_exist( + variable_name="SIMCORE_NANO_CPUS_LIMIT", + variable_value=f"{int(_DEFAULT_MAX_RESOURCES['CPU'] * 1e9)}", + ) + list_of_bash_commands += _bash_check_env_exist( + variable_name="SIMCORE_MEMORY_BYTES_LIMIT", + variable_value=f"{_DEFAULT_MAX_RESOURCES['RAM']}", + ) + for env_name, env_value in additional_envs.items(): + list_of_bash_commands += _bash_check_env_exist( + variable_name=env_name, variable_value=env_value + ) + + # check input files + list_of_bash_commands += [ + f"(test -f ${{INPUT_FOLDER}}/{file} || (echo ${{INPUT_FOLDER}}/{file} does not exist && exit 1))" + for file in file_names + ] + [f"echo $(cat ${{INPUT_FOLDER}}/{file})" for file in file_names] + + input_json_file_name = ( + "inputs.json" + if integration_version > LEGACY_INTEGRATION_VERSION + else "input.json" + ) + + list_of_bash_commands += [ + f"echo '{faker.text(max_nb_chars=17216)}'", + f"(test -f ${{INPUT_FOLDER}}/{input_json_file_name} || (echo ${{INPUT_FOLDER}}/{input_json_file_name} file does not exists && exit 1))", + f"echo $(cat ${{INPUT_FOLDER}}/{input_json_file_name})", + f"sleep {randint(1, 4)}", # noqa: S311 + ] + + # defines the expected outputs + jsonable_outputs = { + "pytest_string": "is quite an amazing feat", + "pytest_integer": 432, + "pytest_float": 3.2, + "pytest_bool": False, + } + output_file_url = s3_remote_file_url(file_path="output_file") + expected_output_keys = TaskOutputDataSchema.model_validate( + { + **( + {k: {"required": True} for k in jsonable_outputs} + | { + "pytest_file": { + "required": True, + "mapping": "a_outputfile", + "url": f"{output_file_url}", + }, + "pytest_file_with_mapping": { + "required": True, + "mapping": "subfolder/a_outputfile", + "url": f"{output_file_url}", + }, + } + ), + } + ) + expected_output_data = TaskOutputData.model_validate( + { + **( + jsonable_outputs + | { + "pytest_file": { + "url": f"{output_file_url}", + "file_mapping": "a_outputfile", + }, + "pytest_file_with_mapping": { + "url": f"{output_file_url}", + "file_mapping": "subfolder/a_outputfile", + }, + } + ), + } + ) + jsonized_outputs = json.dumps(jsonable_outputs).replace('"', '\\"') + output_json_file_name = ( + "outputs.json" + if integration_version > LEGACY_INTEGRATION_VERSION + else "output.json" + ) + + # check for the log file if legacy version + list_of_bash_commands += [ + "echo $(ls -tlah ${LOG_FOLDER})", + f"(test {'!' if integration_version > LEGACY_INTEGRATION_VERSION else ''} -f ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} || (echo ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} file does {'' if integration_version > LEGACY_INTEGRATION_VERSION else 'not'} exists && exit 1))", + ] + if integration_version == LEGACY_INTEGRATION_VERSION: + list_of_bash_commands = [ + f"{c} >> ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME}" + for c in list_of_bash_commands + ] + # set the final command to generate the output file(s) (files and json output) + list_of_bash_commands += [ + f"echo {jsonized_outputs} > ${{OUTPUT_FOLDER}}/{output_json_file_name}", + "echo 'some data for the output file' > ${OUTPUT_FOLDER}/a_outputfile", + "mkdir -p ${OUTPUT_FOLDER}/subfolder", + "echo 'some data for the output file' > ${OUTPUT_FOLDER}/subfolder/a_outputfile", + ] + + log_file_url = s3_remote_file_url(file_path="log.dat") + + return ServiceExampleParam( + docker_basic_auth=DockerBasicAuth( + server_address="docker.io", username="pytest", password=SecretStr("") + ), + # + # NOTE: we use sleeper because it defines a user + # that can write in outputs and the + # sidecar can remove the outputs dirs + # it is based on ubuntu though but the bad part is that now it uses sh instead of bash... + # cause the entrypoint uses sh + service_key="itisfoundation/sleeper", + service_version="2.1.2", + command=[ + "/bin/bash", + "-c", + " && ".join(list_of_bash_commands), + ], + input_data=input_data, + output_data_keys=expected_output_keys, + log_file_url=log_file_url, + expected_output_data=expected_output_data, + expected_logs=[ + json_dumps( + { + "input_1": 23, + "input_23": "a string input", + "the_input_43": 15.0, + "the_bool_input_54": False, + } + ), + "This is the file contents of file #'001'", + "This is the file contents of file #'002'", + "This is the file contents of file #'003'", + "This is the file contents of file #'004'", + "This is the file contents of file #'005'", + ], + integration_version=integration_version, + task_envs=additional_envs, + task_owner=task_owner, + boot_mode=boot_mode, + s3_settings=s3_settings, + ) + + +@pytest.fixture() +def sidecar_task( + integration_version: version.Version, + file_on_s3_server: Callable[..., AnyUrl], + s3_remote_file_url: Callable[..., AnyUrl], + boot_mode: BootMode, + faker: Faker, + task_owner: TaskOwner, + s3_settings: S3Settings, +) -> Callable[..., ServiceExampleParam]: + def _creator(command: list[str] | None = None) -> ServiceExampleParam: + return ServiceExampleParam( + docker_basic_auth=DockerBasicAuth( + server_address="docker.io", username="pytest", password=SecretStr("") + ), + service_key="ubuntu", + service_version="latest", + command=command + or ["/bin/bash", "-c", "echo 'hello I'm an empty ubuntu task!"], + input_data=TaskInputData.model_validate({}), + output_data_keys=TaskOutputDataSchema.model_validate({}), + log_file_url=s3_remote_file_url(file_path="log.dat"), + expected_output_data=TaskOutputData.model_validate({}), + expected_logs=[], + integration_version=integration_version, + task_envs={}, + task_owner=task_owner, + boot_mode=boot_mode, + s3_settings=s3_settings, + ) + + return _creator + + +@pytest.fixture() +def failing_ubuntu_task( + sidecar_task: Callable[..., ServiceExampleParam], +) -> ServiceExampleParam: + return sidecar_task(command=["/bin/bash", "-c", "some stupid failing command"]) + + +@pytest.fixture() +def sleeper_task_unexpected_output( + sleeper_task: ServiceExampleParam, +) -> ServiceExampleParam: + sleeper_task.command = ["/bin/bash", "-c", "echo we create nothingness"] + return sleeper_task + + +@pytest.fixture() +def caplog_info_level( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: + with caplog.at_level(logging.INFO, logger="simcore_service_dask_sidecar"): + yield caplog + + +@pytest.fixture +def mocked_get_image_labels( + integration_version: version.Version, mocker: MockerFixture +) -> mock.Mock: + assert "json_schema_extra" in ServiceMetaDataPublished.model_config + labels: ImageLabels = TypeAdapter(ImageLabels).validate_python( + ServiceMetaDataPublished.model_json_schema()["examples"][0], + ) + labels.integration_version = f"{integration_version}" + return mocker.patch( + "simcore_service_dask_sidecar.computational_sidecar.core.get_image_labels", + autospec=True, + return_value=labels, + ) + + +@pytest.fixture +async def log_rabbit_client_parser( + create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture +) -> AsyncIterator[mock.AsyncMock]: + # Create a threading event to track when subscription is ready + ready_event = threading.Event() + shutdown_event = threading.Event() + the_mock = mocker.AsyncMock(return_value=True) + + # Worker function to process messages in a separate thread + def message_processor(a_mock: mock.AsyncMock): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + client = create_rabbitmq_client("dask_sidecar_pytest_logs_consumer") + + async def subscribe_and_process(a_mock: mock.AsyncMock): + queue_name, _ = await client.subscribe( + LoggerRabbitMessage.get_channel_name(), + a_mock, + exclusive_queue=False, + topics=[BIND_TO_ALL_TOPICS], + ) + ready_event.set() + + # Wait until the test is done + while not shutdown_event.is_set(): # noqa: ASYNC110 + await asyncio.sleep(0.1) + + # Cleanup + await client.unsubscribe(queue_name) + + loop.run_until_complete(subscribe_and_process(a_mock)) + loop.run_until_complete(client.close()) + loop.close() + + # Start the worker thread + worker = threading.Thread( + target=message_processor, kwargs={"a_mock": the_mock}, daemon=False + ) + worker.start() + + # Wait for subscription to be ready + assert ready_event.wait(timeout=10), "Failed to initialize RabbitMQ subscription" + + try: + yield the_mock + finally: + # Signal the worker thread to shut down + shutdown_event.set() + worker.join(timeout=5) + if worker.is_alive(): + _logger.warning("RabbitMQ worker thread did not terminate properly") + + +def test_run_computational_sidecar_real_fct( + caplog_info_level: pytest.LogCaptureFixture, + app_environment: EnvVarsDict, + dask_subsystem_mock: dict[str, mock.Mock], + sleeper_task: ServiceExampleParam, + mocked_get_image_labels: mock.Mock, + s3_settings: S3Settings, + log_rabbit_client_parser: mock.AsyncMock, +): + output_data = run_computational_sidecar( + **sleeper_task.sidecar_params(), + ) + mocked_get_image_labels.assert_called_once_with( + mock.ANY, + sleeper_task.docker_basic_auth, + sleeper_task.service_key, + sleeper_task.service_version, + ) + assert log_rabbit_client_parser.called + + # check that the task produces expected logs + for log in sleeper_task.expected_logs: + r = re.compile( + rf"\[{sleeper_task.service_key}:{sleeper_task.service_version} - .+\/.+\]: ({log})" + ) + search_results = list(filter(r.search, caplog_info_level.messages)) + assert ( + len(search_results) > 0 + ), f"Could not find '{log}' in worker_logs:\n {pformat(caplog_info_level.messages, width=240)}" + for log in sleeper_task.expected_logs: + assert re.search( + rf"\[{sleeper_task.service_key}:{sleeper_task.service_version} - .+\/.+\]: ({log})", + caplog_info_level.text, + ) + # check that the task produce the expected data, not less not more + for k, v in sleeper_task.expected_output_data.items(): + assert k in output_data + assert output_data[k] == v + + s3_storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + + for k, v in output_data.items(): + assert k in sleeper_task.expected_output_data + assert v == sleeper_task.expected_output_data[k] + + # if there are file urls in the output, check they exist + if isinstance(v, FileUrl): + with fsspec.open(f"{v.url}", **s3_storage_kwargs) as fp: + assert fp.details.get("size") > 0 # type: ignore + + # check the task has created a log file + with fsspec.open( + f"{sleeper_task.log_file_url}", mode="rt", **s3_storage_kwargs + ) as fp: + saved_logs = fp.read() # type: ignore + assert saved_logs + for log in sleeper_task.expected_logs: + assert log in saved_logs + + +@pytest.mark.parametrize( + "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True +) +def test_run_multiple_computational_sidecar_dask( + dask_client: distributed.Client, + sleeper_task: ServiceExampleParam, + mocked_get_image_labels: mock.Mock, +): + NUMBER_OF_TASKS = 50 + + futures = [ + dask_client.submit( + run_computational_sidecar, + **sleeper_task.sidecar_params(), + resources={}, + ) + for _ in range(NUMBER_OF_TASKS) + ] + + results = dask_client.gather(futures) + assert results + assert isinstance(results, list) + # for result in results: + # check that the task produce the expected data, not less not more + for output_data in results: + for k, v in sleeper_task.expected_output_data.items(): + assert k in output_data + assert output_data[k] == v + + mocked_get_image_labels.assert_called() + + +@pytest.fixture +def progress_event_handler(dask_client: distributed.Client) -> mock.Mock: + mocked_parser = mock.Mock() + dask_client.subscribe_topic(TaskProgressEvent.topic_name(), mocked_parser) + return mocked_parser + + +def _assert_parse_progresses_from_progress_event_handler( + progress_event_handler: mock.Mock, +) -> list[float]: + assert progress_event_handler.called + worker_progresses = [ + TaskProgressEvent.model_validate_json(msg.args[0][1]).progress + for msg in progress_event_handler.call_args_list + ] + assert worker_progresses == sorted( + set(worker_progresses) + ), "ordering of progress values incorrectly sorted!" + assert worker_progresses[0] == 0, "missing/incorrect initial progress value" + assert worker_progresses[-1] == 1, "missing/incorrect final progress value" + return worker_progresses + + +@pytest.mark.parametrize( + "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True +) +async def test_run_computational_sidecar_dask( + app_environment: EnvVarsDict, + sleeper_task: ServiceExampleParam, + progress_event_handler: mock.Mock, + mocked_get_image_labels: mock.Mock, + s3_settings: S3Settings, + log_rabbit_client_parser: mock.AsyncMock, + dask_client: distributed.Client, +): + future = dask_client.submit( + run_computational_sidecar, + **sleeper_task.sidecar_params(), + resources={}, + ) + + worker_name = next(iter(dask_client.scheduler_info()["workers"])) + assert worker_name + output_data = future.result() + assert output_data + assert isinstance(output_data, TaskOutputData) + + # check that the task produces expected logs + _assert_parse_progresses_from_progress_event_handler(progress_event_handler) + + async for attempt in AsyncRetrying( + wait=wait_fixed(1), + stop=stop_after_delay(30), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert log_rabbit_client_parser.called + worker_logs = [ + message + for msg in log_rabbit_client_parser.call_args_list + for message in LoggerRabbitMessage.model_validate_json( + msg.args[0] + ).messages + ] + + print(f"<-- we got {len(worker_logs)} lines of logs") + + for log in sleeper_task.expected_logs: + r = re.compile(rf"^({log}).*") + search_results = list(filter(r.search, worker_logs)) + assert ( + len(search_results) > 0 + ), f"Could not find {log} in worker_logs:\n {pformat(worker_logs, width=240)}" + + # check that the task produce the expected data, not less not more + assert isinstance(output_data, TaskOutputData) + for k, v in sleeper_task.expected_output_data.items(): + assert k in output_data + assert output_data[k] == v + + s3_storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + for k, v in output_data.items(): + assert k in sleeper_task.expected_output_data + assert v == sleeper_task.expected_output_data[k] + + # if there are file urls in the output, check they exist + if isinstance(v, FileUrl): + with fsspec.open(f"{v.url}", **s3_storage_kwargs) as fp: + assert fp.details.get("size") > 0 # type: ignore + mocked_get_image_labels.assert_called() + + +@pytest.mark.parametrize( + "integration_version, boot_mode, task_owner", + [("1.0.0", BootMode.CPU, "no_parent_node")], + indirect=True, +) +async def test_run_computational_sidecar_dask_does_not_lose_messages_with_pubsub( + dask_client: distributed.Client, + sidecar_task: Callable[..., ServiceExampleParam], + progress_event_handler: mock.Mock, + mocked_get_image_labels: mock.Mock, + log_rabbit_client_parser: mock.AsyncMock, +): + mocked_get_image_labels.assert_not_called() + NUMBER_OF_LOGS = 20000 + future = dask_client.submit( + run_computational_sidecar, + **sidecar_task( + command=[ + "/bin/bash", + "-c", + " && ".join( + [ + f'N={NUMBER_OF_LOGS}; for ((i=1; i<=N; i++));do echo "This is iteration $i"; echo "progress: $i/{NUMBER_OF_LOGS}"; done ' + ] + ), + ], + ).sidecar_params(), + resources={}, + ) + output_data = future.result() + assert output_data is not None + assert isinstance(output_data, TaskOutputData) + + # check that the task produces expected logs + _assert_parse_progresses_from_progress_event_handler(progress_event_handler) + + async for attempt in AsyncRetrying( + wait=wait_fixed(1), + stop=stop_after_delay(30), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert log_rabbit_client_parser.called + + worker_logs = [ + message + for msg in log_rabbit_client_parser.call_args_list + for message in LoggerRabbitMessage.model_validate_json( + msg.args[0] + ).messages + ] + # check all the awaited logs are in there + filtered_worker_logs = filter( + lambda log: "This is iteration" in log, worker_logs + ) + assert len(list(filtered_worker_logs)) == NUMBER_OF_LOGS + mocked_get_image_labels.assert_called() + + +@pytest.mark.parametrize( + "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True +) +def test_failing_service_raises_exception( + caplog_info_level: pytest.LogCaptureFixture, + app_environment: EnvVarsDict, + dask_subsystem_mock: dict[str, mock.Mock], + failing_ubuntu_task: ServiceExampleParam, + mocked_get_image_labels: mock.Mock, +): + with pytest.raises(ServiceRuntimeError): + run_computational_sidecar(**failing_ubuntu_task.sidecar_params()) + + +@pytest.mark.parametrize( + "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True +) +def test_running_service_that_generates_unexpected_data_raises_exception( + caplog_info_level: pytest.LogCaptureFixture, + app_environment: EnvVarsDict, + dask_subsystem_mock: dict[str, mock.Mock], + sleeper_task_unexpected_output: ServiceExampleParam, +): + with pytest.raises(ServiceBadFormattedOutputError): + run_computational_sidecar( + **sleeper_task_unexpected_output.sidecar_params(), + ) + + +@pytest.mark.parametrize( + "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True +) +def test_delayed_logging_with_small_timeout_raises_exception( + caplog: pytest.LogCaptureFixture, + app_environment: EnvVarsDict, + dask_subsystem_mock: dict[str, mock.Mock], + sidecar_task: Callable[..., ServiceExampleParam], + mocked_get_image_labels: mock.Mock, + mocker: MockerFixture, +): + """https://github.com/aio-libs/aiodocker/issues/901""" + # Mock the timeout with a very small value + mocker.patch( + "simcore_service_dask_sidecar.computational_sidecar.docker_utils._AIODOCKER_LOGS_TIMEOUT_S", + 0.5, # Small timeout that should cause failure + ) + + # Configure the task to sleep first and then generate logs + waiting_task = sidecar_task( + command=[ + "/bin/bash", + "-c", + 'echo "Starting task"; sleep 5; echo "After sleep"', + ] + ) + + # Execute the task and expect a timeout exception in the logs + with caplog.at_level(logging.ERROR, logger="simcore_service_dask_sidecar"): + run_computational_sidecar(**waiting_task.sidecar_params()) + assert len(caplog.records) == 1 + record = caplog.records[0] + assert record.exc_info + assert isinstance(record.exc_info[1], TimeoutError) + caplog.clear() + mocker.patch( + "simcore_service_dask_sidecar.computational_sidecar.docker_utils._AIODOCKER_LOGS_TIMEOUT_S", + 10, # larger timeout to avoid issues + ) + with caplog.at_level(logging.ERROR, logger="simcore_service_dask_sidecar"): + run_computational_sidecar(**waiting_task.sidecar_params()) + assert len(caplog.records) == 0 diff --git a/services/dask-sidecar/tests/unit/test_dask_utils.py b/services/dask-sidecar/tests/unit/test_dask_utils.py deleted file mode 100644 index b75e5366a50..00000000000 --- a/services/dask-sidecar/tests/unit/test_dask_utils.py +++ /dev/null @@ -1,159 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=no-member - - -import asyncio -import concurrent.futures -import time -from typing import Any - -import distributed -import pytest -from dask_task_models_library.container_tasks.errors import TaskCancelledError -from dask_task_models_library.container_tasks.events import TaskLogEvent -from dask_task_models_library.container_tasks.io import TaskCancelEventName -from simcore_service_dask_sidecar.dask_utils import ( - _DEFAULT_MAX_RESOURCES, - get_current_task_resources, - is_current_task_aborted, - monitor_task_abortion, - publish_event, -) -from tenacity._asyncio import AsyncRetrying -from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -DASK_TASK_STARTED_EVENT = "task_started" -DASK_TESTING_TIMEOUT_S = 25 - - -async def test_publish_event(dask_client: distributed.Client): - dask_pub = distributed.Pub("some_topic") - dask_sub = distributed.Sub("some_topic") - async for attempt in AsyncRetrying( - reraise=True, - retry=retry_if_exception_type(AssertionError), - wait=wait_fixed(0.01), - stop=stop_after_delay(60), - ): - with attempt: - print( - f"waiting for subscribers... attempt={attempt.retry_state.attempt_number}" - ) - assert dask_pub.subscribers - print("we do have subscribers!") - - event_to_publish = TaskLogEvent(job_id="some_fake_job_id", log="the log") - publish_event(dask_pub=dask_pub, event=event_to_publish) - # NOTE: this tests runs a sync dask client, - # and the CI seems to have sometimes difficulties having this run in a reasonable time - # hence the long time out - message = dask_sub.get(timeout=DASK_TESTING_TIMEOUT_S) - assert message is not None - received_task_log_event = TaskLogEvent.parse_raw(message) # type: ignore - assert received_task_log_event == event_to_publish - - -def _wait_for_task_to_start(): - start_event = distributed.Event(DASK_TASK_STARTED_EVENT) - start_event.wait(timeout=DASK_TESTING_TIMEOUT_S) - - -def _notify_task_is_started_and_ready(): - start_event = distributed.Event(DASK_TASK_STARTED_EVENT) - start_event.set() - - -def _some_long_running_task() -> int: - assert is_current_task_aborted() == False - _notify_task_is_started_and_ready() - - for i in range(300): - print("running iteration", i) - time.sleep(0.1) - if is_current_task_aborted(): - print("task is aborted") - return -1 - assert is_current_task_aborted() - return 12 - - -def test_task_is_aborted(dask_client: distributed.Client): - """Tests aborting a task without using an event. In theory once - the future is cancelled, the dask worker shall 'forget' the task. Sadly this does - not work in distributed mode where an Event is necessary.""" - # NOTE: this works because the cluster is in the same machine - future = dask_client.submit(_some_long_running_task) - _wait_for_task_to_start() - future.cancel() - assert future.cancelled() - with pytest.raises(concurrent.futures.CancelledError): - future.result(timeout=DASK_TESTING_TIMEOUT_S) - - -def test_task_is_aborted_using_event(dask_client: distributed.Client): - job_id = "myfake_job_id" - future = dask_client.submit(_some_long_running_task, key=job_id) - _wait_for_task_to_start() - - dask_event = distributed.Event(TaskCancelEventName.format(job_id)) - dask_event.set() - - result = future.result(timeout=2) - assert result == -1 - - -def _some_long_running_task_with_monitoring() -> int: - assert is_current_task_aborted() == False - # we are started now - start_event = distributed.Event(DASK_TASK_STARTED_EVENT) - start_event.set() - - async def _long_running_task_async() -> int: - log_publisher = distributed.Pub(TaskLogEvent.topic_name()) - _notify_task_is_started_and_ready() - async with monitor_task_abortion(task_name=asyncio.current_task().get_name(), log_publisher=log_publisher): # type: ignore - for i in range(300): - print("running iteration", i) - await asyncio.sleep(0.5) - return 12 - - try: - loop = asyncio.get_event_loop() - except RuntimeError: - # NOTE: this happens in testing when the dask cluster runs INProcess - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - return asyncio.get_event_loop().run_until_complete(_long_running_task_async()) - - -def test_monitor_task_abortion(dask_client: distributed.Client): - job_id = "myfake_job_id" - future = dask_client.submit(_some_long_running_task_with_monitoring, key=job_id) - _wait_for_task_to_start() - # trigger cancellation - dask_event = distributed.Event(TaskCancelEventName.format(job_id)) - dask_event.set() - with pytest.raises(TaskCancelledError): - future.result(timeout=DASK_TESTING_TIMEOUT_S) - - -@pytest.mark.parametrize( - "resources", - [ - ({"CPU": 2}), - ({"GPU": 5.0}), - ], -) -def test_task_resources( - dask_client: distributed.Client, - resources: dict[str, Any], -): - future = dask_client.submit(get_current_task_resources, resources=resources) - received_resources = future.result(timeout=DASK_TESTING_TIMEOUT_S) - current_resources = _DEFAULT_MAX_RESOURCES - current_resources.update(resources) - assert received_resources == current_resources diff --git a/services/dask-sidecar/tests/unit/test_deployment.py b/services/dask-sidecar/tests/unit/test_deployment.py index 08beb0cd2e7..ee4fedf3d38 100644 --- a/services/dask-sidecar/tests/unit/test_deployment.py +++ b/services/dask-sidecar/tests/unit/test_deployment.py @@ -1,8 +1,8 @@ -from typing import Any, Dict +from typing import Any def test_sidecar_service_is_deployed_in_global_mode( - simcore_docker_compose: Dict[str, Any] + simcore_docker_compose: dict[str, Any], ): dask_sidecar_deploy_config = simcore_docker_compose["services"]["dask-sidecar"][ "deploy" diff --git a/services/dask-sidecar/tests/unit/test_docker_utils.py b/services/dask-sidecar/tests/unit/test_docker_utils.py deleted file mode 100644 index 69d3d68a0a1..00000000000 --- a/services/dask-sidecar/tests/unit/test_docker_utils.py +++ /dev/null @@ -1,284 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=no-member - -import asyncio -from typing import Any -from unittest.mock import call - -import aiodocker -import pytest -from models_library.services_resources import BootMode -from pytest_mock.plugin import MockerFixture -from simcore_service_dask_sidecar.computational_sidecar.docker_utils import ( - DEFAULT_TIME_STAMP, - LogType, - create_container_config, - managed_container, - parse_line, -) - - -@pytest.fixture() -def docker_registry() -> str: - return "myregistry.local" - - -@pytest.fixture() -def service_key() -> str: - return "myfake/service_key" - - -@pytest.fixture() -def service_version() -> str: - return "2.3.45" - - -@pytest.fixture() -def command() -> list[str]: - return ["sh", "-c", "some_app"] - - -@pytest.fixture() -def comp_volume_mount_point() -> str: - return "/some/fake/entrypoint" - - -@pytest.mark.parametrize( - "task_max_resources", [{}, {"CPU": 12, "RAM": 2**9}, {"GPU": 4, "RAM": 1**6}] -) -@pytest.mark.parametrize("boot_mode", list(BootMode)) -async def test_create_container_config( - docker_registry: str, - service_key: str, - service_version: str, - command: list[str], - comp_volume_mount_point: str, - boot_mode: BootMode, - task_max_resources: dict[str, Any], -): - container_config = await create_container_config( - docker_registry, - service_key, - service_version, - command, - comp_volume_mount_point, - boot_mode, - task_max_resources, - ) - assert container_config.dict(by_alias=True) == ( - { - "Env": [ - "INPUT_FOLDER=/inputs", - "OUTPUT_FOLDER=/outputs", - "LOG_FOLDER=/logs", - f"SC_COMP_SERVICES_SCHEDULED_AS={boot_mode.value}", - f"SIMCORE_NANO_CPUS_LIMIT={task_max_resources.get('CPU', 1) * 1e9:.0f}", - f"SIMCORE_MEMORY_BYTES_LIMIT={task_max_resources.get('RAM', 1024 ** 3)}", - ], - "Cmd": command, - "Image": f"{docker_registry}/{service_key}:{service_version}", - "Labels": {}, - "HostConfig": { - "Binds": [ - f"{comp_volume_mount_point}/inputs:/inputs", - f"{comp_volume_mount_point}/outputs:/outputs", - f"{comp_volume_mount_point}/logs:/logs", - ], - "Init": True, - "Memory": task_max_resources.get("RAM", 1024**3), - "MemorySwap": task_max_resources.get("RAM", 1024**3), - "NanoCPUs": task_max_resources.get("CPU", 1) * 1e9, - }, - } - ) - - -@pytest.mark.parametrize( - "log_line, expected_parsing", - [ - ( - "2021-10-05T09:53:48.873236400Z hello from the logs", - ( - LogType.LOG, - "2021-10-05T09:53:48.873236400Z", - "hello from the logs", - ), - ), - ( - "This is not an expected docker log", - ( - LogType.LOG, - DEFAULT_TIME_STAMP, - "This is not an expected docker log", - ), - ), - ( - "2021-10-05T09:53:48.873236400Z [progress] this is some whatever progress without number", - ( - LogType.LOG, - "2021-10-05T09:53:48.873236400Z", - "[progress] this is some whatever progress without number", - ), - ), - ( - "2021-10-05T09:53:48.873236400Z [Progress] 34%", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.34"), - ), - ( - "2021-10-05T09:53:48.873236400Z [PROGRESS] .34", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.34"), - ), - ( - "2021-10-05T09:53:48.873236400Z [progress] 0.44", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.44"), - ), - ( - "2021-10-05T09:53:48.873236400Z [progress] 44 percent done", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.44"), - ), - ( - "2021-10-05T09:53:48.873236400Z [progress] 44/150", - ( - LogType.PROGRESS, - "2021-10-05T09:53:48.873236400Z", - f"{(44.0 / 150.0):.2f}", - ), - ), - ( - "2021-10-05T09:53:48.873236400Z Progress: this is some progress", - ( - LogType.LOG, - "2021-10-05T09:53:48.873236400Z", - "Progress: this is some progress", - ), - ), - ( - "2021-10-05T09:53:48.873236400Z progress: 34%", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.34"), - ), - ( - "2021-10-05T09:53:48.873236400Z PROGRESS: .34", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.34"), - ), - ( - "2021-10-05T09:53:48.873236400Z progress: 0.44", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.44"), - ), - ( - "2021-10-05T09:53:48.873236400Z progress: 44 percent done", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.44"), - ), - ( - "2021-10-05T09:53:48.873236400Z progress: 44/150", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", f"{(44.0/150.0):.2f}"), - ), - ( - "2021-10-05T09:53:48.873236400Z any kind of message even with progress inside", - ( - LogType.LOG, - "2021-10-05T09:53:48.873236400Z", - "any kind of message even with progress inside", - ), - ), - ( - "2021-10-05T09:53:48.873236400Z [PROGRESS]1.000000\n", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "1.00"), - ), - ( - "2021-10-05T09:53:48.873236400Z [PROGRESS]: 1% [ 10 / 624 ] Time Update, estimated remaining time 1 seconds @ 26.43 MCells/s", - (LogType.PROGRESS, "2021-10-05T09:53:48.873236400Z", "0.01"), - ), - ], -) -async def test_parse_line(log_line: str, expected_parsing: tuple[LogType, str, str]): - assert await parse_line(log_line) == expected_parsing - - -@pytest.mark.parametrize( - "exception_type", - [ - KeyError("testkey"), - asyncio.CancelledError("testcancel"), - aiodocker.DockerError(status=404, data={"message": None}), - ], -) -async def test_managed_container_always_removes_container( - docker_registry: str, - service_key: str, - service_version: str, - command: list[str], - comp_volume_mount_point: str, - mocker: MockerFixture, - exception_type: Exception, -): - container_config = await create_container_config( - docker_registry, - service_key, - service_version, - command, - comp_volume_mount_point, - boot_mode=BootMode.CPU, - task_max_resources={}, - ) - - mocked_aiodocker = mocker.patch("aiodocker.Docker", autospec=True) - async with aiodocker.Docker() as docker_client: - with pytest.raises(type(exception_type)): - async with managed_container( - docker_client=docker_client, config=container_config - ) as container: - mocked_aiodocker.assert_has_calls( - calls=[ - call(), - call().__aenter__(), - call() - .__aenter__() - .containers.create( - container_config.dict(by_alias=True), name=None - ), - ] - ) - mocked_aiodocker.reset_mock() - assert container is not None - - raise exception_type - # check the container was deleted - mocked_aiodocker.assert_has_calls( - calls=[ - call() - .__aenter__() - .containers.create() - .delete(remove=True, v=True, force=True) - ] - ) - - -async def test_managed_container_with_broken_container_raises_docker_exception( - docker_registry: str, - service_key: str, - service_version: str, - command: list[str], - comp_volume_mount_point: str, - mocker: MockerFixture, -): - container_config = await create_container_config( - docker_registry, - service_key, - service_version, - command, - comp_volume_mount_point, - boot_mode=BootMode.CPU, - task_max_resources={}, - ) - mocked_aiodocker = mocker.patch("aiodocker.Docker", autospec=True) - mocked_aiodocker.return_value.__aenter__.return_value.containers.create.return_value.delete.side_effect = aiodocker.DockerError( - "bad", {"message": "pytest fake bad message"} - ) - async with aiodocker.Docker() as docker_client: - with pytest.raises(aiodocker.DockerError, match="pytest fake bad message"): - async with managed_container( - docker_client=docker_client, config=container_config - ) as container: - assert container is not None diff --git a/services/dask-sidecar/tests/unit/test_file_utils.py b/services/dask-sidecar/tests/unit/test_file_utils.py deleted file mode 100644 index 8a0512146fd..00000000000 --- a/services/dask-sidecar/tests/unit/test_file_utils.py +++ /dev/null @@ -1,380 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import asyncio -import mimetypes -import zipfile -from dataclasses import dataclass -from pathlib import Path -from typing import Any, AsyncIterable, Optional, cast -from unittest import mock - -import fsspec -import pytest -from faker import Faker -from minio import Minio -from pydantic import AnyUrl, parse_obj_as -from pytest import FixtureRequest -from pytest_localftpserver.servers import ProcessFTPServer -from pytest_mock.plugin import MockerFixture -from settings_library.s3 import S3Settings -from simcore_service_dask_sidecar.file_utils import ( - _s3fs_settings_from_s3_settings, - pull_file_from_remote, - push_file_to_remote, -) - - -@pytest.fixture() -async def mocked_log_publishing_cb( - event_loop: asyncio.AbstractEventLoop, - mocker: MockerFixture, -) -> AsyncIterable[mock.AsyncMock]: - async with mocker.AsyncMock() as mocked_callback: - yield mocked_callback - - -pytest_simcore_core_services_selection = [ - "postgres" -] # TODO: unnecessary but test framework requires it, only minio is useful here -pytest_simcore_ops_services_selection = ["minio"] - - -@pytest.fixture -def s3_presigned_link_storage_kwargs( - minio_config: dict[str, Any], minio_service: Minio -) -> dict[str, Any]: - return {} - - -@pytest.fixture -def ftp_remote_file_url(ftpserver: ProcessFTPServer, faker: Faker) -> AnyUrl: - return parse_obj_as( - AnyUrl, f"{ftpserver.get_login_data(style='url')}/{faker.file_name()}" - ) - - -@pytest.fixture -def s3_presigned_link_remote_file_url( - minio_config: dict[str, Any], - minio_service: Minio, - faker: Faker, -) -> AnyUrl: - - return parse_obj_as( - AnyUrl, - minio_service.presigned_put_object( - minio_config["bucket_name"], faker.file_name() - ), - ) - - -@pytest.fixture -def s3_remote_file_url(minio_config: dict[str, Any], faker: Faker) -> AnyUrl: - return parse_obj_as( - AnyUrl, f"s3://{minio_config['bucket_name']}{faker.file_path()}" - ) - - -@dataclass(frozen=True) -class StorageParameters: - s3_settings: Optional[S3Settings] - remote_file_url: AnyUrl - - -@pytest.fixture(params=["ftp", "s3"]) -def remote_parameters( - request: FixtureRequest, - ftp_remote_file_url: AnyUrl, - s3_remote_file_url: AnyUrl, - s3_settings: S3Settings, -) -> StorageParameters: - return { - "ftp": StorageParameters(s3_settings=None, remote_file_url=ftp_remote_file_url), - "s3": StorageParameters( - s3_settings=s3_settings, remote_file_url=s3_remote_file_url - ), - }[ - request.param # type: ignore - ] - - -async def test_push_file_to_remote( - remote_parameters: StorageParameters, - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - # let's create some file with text inside - src_path = tmp_path / faker.file_name() - TEXT_IN_FILE = faker.text() - src_path.write_text(TEXT_IN_FILE) - assert src_path.exists() - # push it to the remote - await push_file_to_remote( - src_path, - remote_parameters.remote_file_url, - mocked_log_publishing_cb, - remote_parameters.s3_settings, - ) - - # check the remote is actually having the file in - storage_kwargs = {} - if remote_parameters.s3_settings: - storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) - - with cast( - fsspec.core.OpenFile, - fsspec.open( - remote_parameters.remote_file_url, - mode="rt", - **storage_kwargs, - ), - ) as fp: - assert fp.read() == TEXT_IN_FILE - mocked_log_publishing_cb.assert_called() - - -async def test_push_file_to_remote_s3_http_presigned_link( - s3_presigned_link_remote_file_url: AnyUrl, - s3_settings: S3Settings, - minio_config: dict[str, Any], - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - # let's create some file with text inside - src_path = tmp_path / faker.file_name() - TEXT_IN_FILE = faker.text() - src_path.write_text(TEXT_IN_FILE) - assert src_path.exists() - # push it to the remote - await push_file_to_remote( - src_path, - s3_presigned_link_remote_file_url, - mocked_log_publishing_cb, - s3_settings=None, - ) - - # check the remote is actually having the file in, but we need s3 access now - s3_remote_file_url = parse_obj_as( - AnyUrl, - f"s3:/{s3_presigned_link_remote_file_url.path}", - ) - - storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - with cast( - fsspec.core.OpenFile, - fsspec.open(s3_remote_file_url, mode="rt", **storage_kwargs), - ) as fp: - assert fp.read() == TEXT_IN_FILE - mocked_log_publishing_cb.assert_called() - - -async def test_push_file_to_remote_compresses_if_zip_destination( - remote_parameters: StorageParameters, - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - destination_url = parse_obj_as(AnyUrl, f"{remote_parameters.remote_file_url}.zip") - src_path = tmp_path / faker.file_name() - TEXT_IN_FILE = faker.text() - src_path.write_text(TEXT_IN_FILE) - assert src_path.exists() - - await push_file_to_remote( - src_path, - destination_url, - mocked_log_publishing_cb, - remote_parameters.s3_settings, - ) - - storage_kwargs = {} - if remote_parameters.s3_settings: - storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) - open_files = fsspec.open_files( - f"zip://*::{destination_url}", - mode="rt", - **{destination_url.scheme: storage_kwargs}, - ) - assert len(open_files) == 1 - with open_files[0] as fp: - assert fp.read() == TEXT_IN_FILE # type: ignore - mocked_log_publishing_cb.assert_called() - - -async def test_pull_file_from_remote( - remote_parameters: StorageParameters, - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - storage_kwargs = {} - if remote_parameters.s3_settings: - storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) - # put some file on the remote - TEXT_IN_FILE = faker.text() - with cast( - fsspec.core.OpenFile, - fsspec.open( - remote_parameters.remote_file_url, - mode="wt", - **storage_kwargs, - ), - ) as fp: - fp.write(TEXT_IN_FILE) - - # now let's get the file through the util - dst_path = tmp_path / faker.file_name() - await pull_file_from_remote( - src_url=remote_parameters.remote_file_url, - target_mime_type=None, - dst_path=dst_path, - log_publishing_cb=mocked_log_publishing_cb, - s3_settings=remote_parameters.s3_settings, - ) - assert dst_path.exists() - assert dst_path.read_text() == TEXT_IN_FILE - mocked_log_publishing_cb.assert_called() - - -async def test_pull_file_from_remote_s3_presigned_link( - s3_settings: S3Settings, - s3_remote_file_url: AnyUrl, - minio_service: Minio, - minio_config: dict[str, Any], - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - # put some file on the remote - TEXT_IN_FILE = faker.text() - with cast( - fsspec.core.OpenFile, - fsspec.open( - s3_remote_file_url, - mode="wt", - **storage_kwargs, - ), - ) as fp: - fp.write(TEXT_IN_FILE) - - # create a corresponding presigned get link - assert s3_remote_file_url.path - remote_file_url = parse_obj_as( - AnyUrl, - minio_service.presigned_get_object( - minio_config["bucket_name"], - s3_remote_file_url.path.removeprefix(f"/{minio_config['bucket_name']}/"), - ), - ) - # now let's get the file through the util - dst_path = tmp_path / faker.file_name() - await pull_file_from_remote( - src_url=remote_file_url, - target_mime_type=None, - dst_path=dst_path, - log_publishing_cb=mocked_log_publishing_cb, - s3_settings=None, - ) - assert dst_path.exists() - assert dst_path.read_text() == TEXT_IN_FILE - mocked_log_publishing_cb.assert_called() - - -async def test_pull_compressed_zip_file_from_remote( - remote_parameters: StorageParameters, - tmp_path: Path, - faker: Faker, - mocked_log_publishing_cb: mock.AsyncMock, -): - # put some zip file on the remote - local_zip_file_path = tmp_path / f"{faker.file_name()}.zip" - file_names_within_zip_file = set() - with zipfile.ZipFile( - local_zip_file_path, compression=zipfile.ZIP_DEFLATED, mode="w" - ) as zfp: - for file_number in range(5): - local_test_file = tmp_path / f"{file_number}_{faker.file_name()}" - local_test_file.write_text(faker.text()) - assert local_test_file.exists() - zfp.write(local_test_file, local_test_file.name) - file_names_within_zip_file.add(local_test_file.name) - - destination_url = parse_obj_as(AnyUrl, f"{remote_parameters.remote_file_url}.zip") - storage_kwargs = {} - if remote_parameters.s3_settings: - storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) - - with cast( - fsspec.core.OpenFile, - fsspec.open( - destination_url, - mode="wb", - **storage_kwargs, - ), - ) as dest_fp: - with local_zip_file_path.open("rb") as src_fp: - dest_fp.write(src_fp.read()) - - # now we want to download that file so it becomes the source - src_url = destination_url - - # USE-CASE 1: if destination is a zip then no decompression is done - download_folder = tmp_path / "download" - download_folder.mkdir(parents=True, exist_ok=True) - assert download_folder.exists() - dst_path = download_folder / f"{faker.file_name()}.zip" - - await pull_file_from_remote( - src_url=src_url, - target_mime_type=None, - dst_path=dst_path, - log_publishing_cb=mocked_log_publishing_cb, - s3_settings=remote_parameters.s3_settings, - ) - assert dst_path.exists() - dst_path.unlink() - mocked_log_publishing_cb.assert_called() - mocked_log_publishing_cb.reset_mock() - - # USE-CASE 2: if destination is not a zip, then we decompress - assert download_folder.exists() - dst_path = download_folder / faker.file_name() - await pull_file_from_remote( - src_url=src_url, - target_mime_type=None, - dst_path=dst_path, - log_publishing_cb=mocked_log_publishing_cb, - s3_settings=remote_parameters.s3_settings, - ) - assert not dst_path.exists() - for file in download_folder.glob("*"): - assert file.exists() - assert file.name in file_names_within_zip_file - mocked_log_publishing_cb.assert_called() - mocked_log_publishing_cb.reset_mock() - - # USE-CASE 3: if destination is a zip, but we pass a target mime type that is not, then we decompress - download_folder = tmp_path / "download2" - download_folder.mkdir(parents=True, exist_ok=True) - assert download_folder.exists() - dst_path = download_folder / f"{faker.file_name()}.zip" - mime_type, _ = mimetypes.guess_type( - faker.file_name() - ) # let's have a standard mime type - await pull_file_from_remote( - src_url=src_url, - target_mime_type=mime_type, - dst_path=dst_path, - log_publishing_cb=mocked_log_publishing_cb, - s3_settings=remote_parameters.s3_settings, - ) - assert not dst_path.exists() - for file in download_folder.glob("*"): - assert file.exists() - assert file.name in file_names_within_zip_file - mocked_log_publishing_cb.assert_called() diff --git a/services/dask-sidecar/tests/unit/test_models.py b/services/dask-sidecar/tests/unit/test_models.py deleted file mode 100644 index 65ec5304631..00000000000 --- a/services/dask-sidecar/tests/unit/test_models.py +++ /dev/null @@ -1,59 +0,0 @@ -import pytest -from faker import Faker -from pydantic import ByteSize, ValidationError -from simcore_service_dask_sidecar.computational_sidecar.models import ( - ContainerHostConfig, -) - - -def test_container_host_config_sets_swap_same_as_memory_if_not_set(faker: Faker): - instance = ContainerHostConfig( - Binds=[faker.pystr() for _ in range(5)], - Memory=ByteSize(faker.pyint()), - NanoCPUs=faker.pyfloat(min_value=0.1), - ) - assert instance.memory == instance.memory_swap - - -def test_container_host_config_raises_if_set_negative( - faker: Faker, -): - with pytest.raises(ValidationError): - ContainerHostConfig( - Binds=[faker.pystr() for _ in range(5)], - Memory=ByteSize(faker.pyint(min_value=234)), - NanoCPUs=faker.pyfloat(min_value=0.1), - MemorySwap=ByteSize(faker.pyint(min_value=-84654, max_value=-1)), - ) - - -def test_container_host_config_raises_if_set_smaller_than_memory( - faker: Faker, -): - with pytest.raises(ValidationError): - ContainerHostConfig( - Binds=[faker.pystr() for _ in range(5)], - Memory=ByteSize(faker.pyint(min_value=234)), - NanoCPUs=faker.pyfloat(min_value=0.1), - MemorySwap=ByteSize(0), - ) - with pytest.raises(ValidationError): - ContainerHostConfig( - Binds=[faker.pystr() for _ in range(5)], - Memory=ByteSize(faker.pyint(min_value=234)), - NanoCPUs=faker.pyfloat(min_value=0.1), - MemorySwap=ByteSize(faker.pyint(min_value=1, max_value=233)), - ) - - -def test_container_host_config_sets_swap_if_set_bigger_than_memory( - faker: Faker, -): - instance = ContainerHostConfig( - Binds=[faker.pystr() for _ in range(5)], - Memory=ByteSize(faker.pyint(min_value=234, max_value=434234)), - NanoCPUs=faker.pyfloat(min_value=0.1), - MemorySwap=ByteSize(faker.pyint(min_value=434235, max_value=12343424234)), - ) - assert instance.memory_swap - assert instance.memory < instance.memory_swap diff --git a/services/dask-sidecar/tests/unit/test_rabbitmq_plugin.py b/services/dask-sidecar/tests/unit/test_rabbitmq_plugin.py new file mode 100644 index 00000000000..6d2b56cf57e --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_rabbitmq_plugin.py @@ -0,0 +1,37 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member + +import asyncio +from unittest import mock + +import distributed +import pytest +from pytest_mock import MockerFixture + +# Selection of core and tool services started in this swarm fixture (integration) +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +def test_rabbitmq_plugin_initializes(dask_client: distributed.Client): ... + + +@pytest.fixture +def erroring_rabbitmq_plugin(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_dask_sidecar.rabbitmq_worker_plugin.RabbitMQPlugin", + autospec=True, + side_effect=RuntimeError("Pytest: RabbitMQ plugin initialization failed"), + ) + + +async def test_dask_worker_closes_if_plugin_fails_on_start( + erroring_rabbitmq_plugin: mock.Mock, + local_cluster: distributed.LocalCluster, +): + await asyncio.sleep(10) diff --git a/services/dask-sidecar/tests/unit/test_settings.py b/services/dask-sidecar/tests/unit/test_settings.py deleted file mode 100644 index 8c78e21712a..00000000000 --- a/services/dask-sidecar/tests/unit/test_settings.py +++ /dev/null @@ -1,40 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -from typing import Optional - -import pytest -from pytest import MonkeyPatch -from simcore_service_dask_sidecar.settings import Settings - - -@pytest.fixture -def mock_service_envs( - mock_env_devel_environment: dict[str, Optional[str]], monkeypatch: MonkeyPatch -) -> None: - - # Variables directly define inside Dockerfile - monkeypatch.setenv("SC_BOOT_MODE", "debug-ptvsd") - - # Variables passed upon start via services/docker-compose.yml file under dask-sidecar/scheduler - monkeypatch.setenv("DASK_START_AS_SCHEDULER", "1") - - monkeypatch.setenv("SIDECAR_LOGLEVEL", "DEBUG") - monkeypatch.setenv( - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME", "simcore_computational_shared_data" - ) - monkeypatch.setenv( - "SIDECAR_COMP_SERVICES_SHARED_FOLDER", "/home/scu/computational_shared_data" - ) - - -def test_settings(mock_service_envs: None, monkeypatch: MonkeyPatch): - - monkeypatch.delenv("DASK_START_AS_SCHEDULER") - settings = Settings.create_from_envs() - assert settings.as_worker() - - monkeypatch.setenv("DASK_START_AS_SCHEDULER", "1") - settings = Settings.create_from_envs() - assert settings.as_scheduler() diff --git a/services/dask-sidecar/tests/unit/test_tasks.py b/services/dask-sidecar/tests/unit/test_tasks.py deleted file mode 100644 index 8309c38cb53..00000000000 --- a/services/dask-sidecar/tests/unit/test_tasks.py +++ /dev/null @@ -1,602 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=no-member -# pylint: disable=too-many-instance-attributes - -import asyncio -import json -import logging -import re - -# copied out from dask -from dataclasses import dataclass -from pprint import pformat -from random import randint -from typing import Callable, Coroutine, Iterable -from unittest import mock -from uuid import uuid4 - -import fsspec -import pytest -from dask_task_models_library.container_tasks.docker import DockerBasicAuth -from dask_task_models_library.container_tasks.events import ( - TaskLogEvent, - TaskProgressEvent, - TaskStateEvent, -) -from dask_task_models_library.container_tasks.io import ( - FileUrl, - TaskInputData, - TaskOutputData, - TaskOutputDataSchema, -) -from distributed import Client -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.services_resources import BootMode -from models_library.users import UserID -from packaging import version -from pydantic import AnyUrl, SecretStr -from pytest import FixtureRequest, LogCaptureFixture -from pytest_mock.plugin import MockerFixture -from settings_library.s3 import S3Settings -from simcore_service_dask_sidecar.computational_sidecar.docker_utils import ( - LEGACY_SERVICE_LOG_FILE_NAME, -) -from simcore_service_dask_sidecar.computational_sidecar.errors import ( - ServiceBadFormattedOutputError, - ServiceRunError, -) -from simcore_service_dask_sidecar.computational_sidecar.models import ( - LEGACY_INTEGRATION_VERSION, -) -from simcore_service_dask_sidecar.dask_utils import _DEFAULT_MAX_RESOURCES -from simcore_service_dask_sidecar.file_utils import _s3fs_settings_from_s3_settings -from simcore_service_dask_sidecar.tasks import run_computational_sidecar - -logger = logging.getLogger(__name__) - - -@pytest.fixture -def job_id() -> str: - return "some_incredible_string" - - -@pytest.fixture -def user_id() -> UserID: - return 1 - - -@pytest.fixture -def project_id() -> ProjectID: - return uuid4() - - -@pytest.fixture -def node_id() -> NodeID: - return uuid4() - - -@pytest.fixture() -def dask_subsystem_mock(mocker: MockerFixture) -> dict[str, MockerFixture]: - # mock dask client - dask_client_mock = mocker.patch("distributed.Client", autospec=True) - - # mock tasks get worker and state - dask_distributed_worker_mock = mocker.patch( - "simcore_service_dask_sidecar.dask_utils.get_worker", autospec=True - ) - dask_task_mock = mocker.patch( - "simcore_service_dask_sidecar.dask_utils.TaskState", autospec=True - ) - dask_task_mock.resource_restrictions = {} - dask_distributed_worker_mock.return_value.state.tasks.get.return_value = ( - dask_task_mock - ) - - # ensure dask logger propagates - logging.getLogger("distributed").propagate = True - - # mock dask event worker - dask_distributed_worker_events_mock = mocker.patch( - "dask_task_models_library.container_tasks.events.get_worker", autospec=True - ) - dask_distributed_worker_events_mock.return_value.get_current_task.return_value = ( - "pytest_jobid" - ) - # mock dask event publishing - dask_utils_publish_event_mock = mocker.patch( - "simcore_service_dask_sidecar.dask_utils.distributed.Pub", - autospec=True, - ) - mocker.patch( - "simcore_service_dask_sidecar.dask_utils.distributed.Sub", - autospec=True, - ) - mocker.patch( - "simcore_service_dask_sidecar.dask_utils.is_current_task_aborted", - autospec=True, - return_value=False, - ) - - return { - "dask_client": dask_client_mock, - "dask_task_state": dask_task_mock, - "dask_event_publish": dask_utils_publish_event_mock, - } - - -@dataclass -class ServiceExampleParam: - docker_basic_auth: DockerBasicAuth - service_key: str - service_version: str - command: list[str] - input_data: TaskInputData - output_data_keys: TaskOutputDataSchema - log_file_url: AnyUrl - expected_output_data: TaskOutputData - expected_logs: list[str] - integration_version: version.Version - - -pytest_simcore_core_services_selection = ["postgres"] -pytest_simcore_ops_services_selection = ["minio"] - - -def _bash_check_env_exist(variable_name: str, variable_value: str) -> list[str]: - return [ - f"if [ -z ${{{variable_name}+x}} ];then echo {variable_name} does not exist && exit 9;fi", - f'if [ "${{{variable_name}}}" != "{variable_value}" ];then echo expected "{variable_value}" and found "${{{variable_name}}}" && exit 9;fi', - ] - - -@pytest.fixture(params=list(BootMode), ids=str) -def boot_mode(request: FixtureRequest) -> BootMode: - return request.param - - -@pytest.fixture( - # NOTE: legacy version comes second as it is less easy to debug issues with that one - params=[ - "1.0.0", - f"{LEGACY_INTEGRATION_VERSION}", - ], - ids=lambda v: f"integration.version.{v}", -) -def integration_version(request: FixtureRequest) -> version.Version: - print("Using service integration:", request.param) - return version.Version(request.param) - - -@pytest.fixture -def ubuntu_task( - integration_version: version.Version, - file_on_s3_server: Callable[..., AnyUrl], - s3_remote_file_url: Callable[..., AnyUrl], - boot_mode: BootMode, -) -> ServiceExampleParam: - """Creates a console task in an ubuntu distro that checks for the expected files and error in case they are missing""" - # let's have some input files on the file server - NUM_FILES = 12 - list_of_files = [file_on_s3_server() for _ in range(NUM_FILES)] - - # defines the inputs of the task - input_data = TaskInputData.parse_obj( - { - "input_1": 23, - "input_23": "a string input", - "the_input_43": 15.0, - "the_bool_input_54": False, - **{ - f"some_file_input_{index+1}": FileUrl(url=file) - for index, file in enumerate(list_of_files) - }, - **{ - f"some_file_input_with_mapping{index+1}": FileUrl( - url=file, - file_mapping=f"{index+1}/some_file_input_{index+1}", - ) - for index, file in enumerate(list_of_files) - }, - } - ) - # check in the console that the expected files are present in the expected INPUT folder (set as ${INPUT_FOLDER} in the service) - file_names = [file.path for file in list_of_files] - list_of_commands = [ - "echo User: $(id $(whoami))", - "echo Inputs:", - "ls -tlah -R ${INPUT_FOLDER}", - "echo Outputs:", - "ls -tlah -R ${OUTPUT_FOLDER}", - "echo Logs:", - "ls -tlah -R ${LOG_FOLDER}", - "echo Envs:", - "printenv", - ] - - # check expected ENVS are set - list_of_commands += _bash_check_env_exist( - variable_name="SC_COMP_SERVICES_SCHEDULED_AS", variable_value=boot_mode.value - ) - list_of_commands += _bash_check_env_exist( - variable_name="SIMCORE_NANO_CPUS_LIMIT", - variable_value=f"{int(_DEFAULT_MAX_RESOURCES['CPU']*1e9)}", - ) - list_of_commands += _bash_check_env_exist( - variable_name="SIMCORE_MEMORY_BYTES_LIMIT", - variable_value=f"{_DEFAULT_MAX_RESOURCES['RAM']}", - ) - - # check input files - list_of_commands += [ - f"(test -f ${{INPUT_FOLDER}}/{file} || (echo ${{INPUT_FOLDER}}/{file} does not exist && exit 1))" - for file in file_names - ] + [f"echo $(cat ${{INPUT_FOLDER}}/{file})" for file in file_names] - - input_json_file_name = ( - "inputs.json" - if integration_version > LEGACY_INTEGRATION_VERSION - else "input.json" - ) - - list_of_commands += [ - f"(test -f ${{INPUT_FOLDER}}/{input_json_file_name} || (echo ${{INPUT_FOLDER}}/{input_json_file_name} file does not exists && exit 1))", - f"echo $(cat ${{INPUT_FOLDER}}/{input_json_file_name})", - f"sleep {randint(1,4)}", - ] - - # defines the expected outputs - jsonable_outputs = { - "pytest_string": "is quite an amazing feat", - "pytest_integer": 432, - "pytest_float": 3.2, - "pytest_bool": False, - } - output_file_url = s3_remote_file_url(file_path="output_file") - expected_output_keys = TaskOutputDataSchema.parse_obj( - { - **{k: {"required": True} for k in jsonable_outputs.keys()}, - **{ - "pytest_file": { - "required": True, - "mapping": "a_outputfile", - "url": f"{output_file_url}", - }, - "pytest_file_with_mapping": { - "required": True, - "mapping": "subfolder/a_outputfile", - "url": f"{output_file_url}", - }, - }, - } - ) - expected_output_data = TaskOutputData.parse_obj( - { - **jsonable_outputs, - **{ - "pytest_file": { - "url": f"{output_file_url}", - "file_mapping": "a_outputfile", - }, - "pytest_file_with_mapping": { - "url": f"{output_file_url}", - "file_mapping": "subfolder/a_outputfile", - }, - }, - } - ) - jsonized_outputs = json.dumps(jsonable_outputs).replace('"', '\\"') - output_json_file_name = ( - "outputs.json" - if integration_version > LEGACY_INTEGRATION_VERSION - else "output.json" - ) - - # check for the log file if legacy version - list_of_commands += [ - "echo $(ls -tlah ${LOG_FOLDER})", - f"(test {'!' if integration_version > LEGACY_INTEGRATION_VERSION else ''} -f ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} || (echo ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME} file does {'' if integration_version > LEGACY_INTEGRATION_VERSION else 'not'} exists && exit 1))", - ] - if integration_version == LEGACY_INTEGRATION_VERSION: - list_of_commands = [ - f"{c} >> ${{LOG_FOLDER}}/{LEGACY_SERVICE_LOG_FILE_NAME}" - for c in list_of_commands - ] - # set the final command to generate the output file(s) (files and json output) - list_of_commands += [ - f"echo {jsonized_outputs} > ${{OUTPUT_FOLDER}}/{output_json_file_name}", - "echo 'some data for the output file' > ${OUTPUT_FOLDER}/a_outputfile", - "mkdir -p ${OUTPUT_FOLDER}/subfolder", - "echo 'some data for the output file' > ${OUTPUT_FOLDER}/subfolder/a_outputfile", - ] - - log_file_url = s3_remote_file_url(file_path="log.dat") - - return ServiceExampleParam( - docker_basic_auth=DockerBasicAuth( - server_address="docker.io", username="pytest", password=SecretStr("") - ), - # - # NOTE: we use sleeper because it defines a user - # that can write in outputs and the - # sidecar can remove the outputs dirs - # it is based on ubuntu though but the bad part is that now it uses sh instead of bash... - # cause the entrypoint uses sh - service_key="itisfoundation/sleeper", - service_version="2.1.2", - command=[ - "/bin/bash", - "-c", - " && ".join(list_of_commands), - ], - input_data=input_data, - output_data_keys=expected_output_keys, - log_file_url=log_file_url, - expected_output_data=expected_output_data, - expected_logs=[ - '{"input_1": 23, "input_23": "a string input", "the_input_43": 15.0, "the_bool_input_54": false}', - "This is the file contents of file #'001'", - "This is the file contents of file #'002'", - "This is the file contents of file #'003'", - "This is the file contents of file #'004'", - "This is the file contents of file #'005'", - ], - integration_version=integration_version, - ) - - -@pytest.fixture() -def ubuntu_task_fail(ubuntu_task: ServiceExampleParam) -> ServiceExampleParam: - ubuntu_task.command = ["/bin/bash", "-c", "some stupid failing command"] - return ubuntu_task - - -@pytest.fixture() -def ubuntu_task_unexpected_output( - ubuntu_task: ServiceExampleParam, -) -> ServiceExampleParam: - ubuntu_task.command = ["/bin/bash", "-c", "echo we create nothingness"] - return ubuntu_task - - -@pytest.fixture() -def caplog_info_level(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]: - with caplog.at_level( - logging.INFO, - ): - yield caplog - - -def test_run_computational_sidecar_real_fct( - caplog_info_level: LogCaptureFixture, - event_loop: asyncio.AbstractEventLoop, - mock_service_envs: None, - dask_subsystem_mock: dict[str, MockerFixture], - ubuntu_task: ServiceExampleParam, - mocker: MockerFixture, - s3_settings: S3Settings, - boot_mode: BootMode, -): - mocked_get_integration_version = mocker.patch( - "simcore_service_dask_sidecar.computational_sidecar.core.get_integration_version", - autospec=True, - return_value=ubuntu_task.integration_version, - ) - output_data = run_computational_sidecar( - ubuntu_task.docker_basic_auth, - ubuntu_task.service_key, - ubuntu_task.service_version, - ubuntu_task.input_data, - ubuntu_task.output_data_keys, - ubuntu_task.log_file_url, - ubuntu_task.command, - s3_settings, - boot_mode, - ) - mocked_get_integration_version.assert_called_once_with( - mock.ANY, - ubuntu_task.docker_basic_auth, - ubuntu_task.service_key, - ubuntu_task.service_version, - ) - for event in [TaskProgressEvent, TaskStateEvent, TaskLogEvent]: - dask_subsystem_mock["dask_event_publish"].assert_any_call( # type: ignore - name=event.topic_name() - ) - - # check that the task produces expected logs - for log in ubuntu_task.expected_logs: - r = re.compile( - rf"\[{ubuntu_task.service_key}:{ubuntu_task.service_version} - .+\/.+ - .+\]: ({log})" - ) - search_results = list(filter(r.search, caplog_info_level.messages)) - assert ( - len(search_results) > 0 - ), f"Could not find '{log}' in worker_logs:\n {pformat(caplog_info_level.messages, width=240)}" - for log in ubuntu_task.expected_logs: - assert re.search( - rf"\[{ubuntu_task.service_key}:{ubuntu_task.service_version} - .+\/.+ - .+\]: ({log})", - caplog_info_level.text, - ) - # check that the task produce the expected data, not less not more - for k, v in ubuntu_task.expected_output_data.items(): - assert k in output_data - assert output_data[k] == v - - s3_storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - - for k, v in output_data.items(): - assert k in ubuntu_task.expected_output_data - assert v == ubuntu_task.expected_output_data[k] - - # if there are file urls in the output, check they exist - if isinstance(v, FileUrl): - with fsspec.open(f"{v.url}", **s3_storage_kwargs) as fp: - assert fp.details.get("size") > 0 # type: ignore - - # check the task has created a log file - with fsspec.open( - f"{ubuntu_task.log_file_url}", mode="rt", **s3_storage_kwargs - ) as fp: - saved_logs = fp.read() # type: ignore - assert saved_logs - for log in ubuntu_task.expected_logs: - assert log in saved_logs - - -@pytest.mark.parametrize( - "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True -) -def test_run_multiple_computational_sidecar_dask( - event_loop: asyncio.AbstractEventLoop, - dask_client: Client, - ubuntu_task: ServiceExampleParam, - mocker: MockerFixture, - s3_settings: S3Settings, - boot_mode: BootMode, -): - NUMBER_OF_TASKS = 50 - - mocker.patch( - "simcore_service_dask_sidecar.computational_sidecar.core.get_integration_version", - autospec=True, - return_value=ubuntu_task.integration_version, - ) - futures = [ - dask_client.submit( - run_computational_sidecar, - ubuntu_task.docker_basic_auth, - ubuntu_task.service_key, - ubuntu_task.service_version, - ubuntu_task.input_data, - ubuntu_task.output_data_keys, - ubuntu_task.log_file_url, - ubuntu_task.command, - s3_settings, - resources={}, - boot_mode=boot_mode, - ) - for _ in range(NUMBER_OF_TASKS) - ] - - results = dask_client.gather(futures) - assert results - assert not isinstance(results, Coroutine) - # for result in results: - # check that the task produce the expected data, not less not more - for output_data in results: - for k, v in ubuntu_task.expected_output_data.items(): - assert k in output_data - assert output_data[k] == v - - -@pytest.mark.parametrize( - "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True -) -def test_run_computational_sidecar_dask( - dask_client: Client, - ubuntu_task: ServiceExampleParam, - mocker: MockerFixture, - s3_settings: S3Settings, - boot_mode: BootMode, -): - mocker.patch( - "simcore_service_dask_sidecar.computational_sidecar.core.get_integration_version", - autospec=True, - return_value=ubuntu_task.integration_version, - ) - future = dask_client.submit( - run_computational_sidecar, - ubuntu_task.docker_basic_auth, - ubuntu_task.service_key, - ubuntu_task.service_version, - ubuntu_task.input_data, - ubuntu_task.output_data_keys, - ubuntu_task.log_file_url, - ubuntu_task.command, - s3_settings, - resources={}, - boot_mode=boot_mode, - ) - - worker_name = next(iter(dask_client.scheduler_info()["workers"])) - - output_data = future.result() - assert isinstance(output_data, TaskOutputData) - - # check that the task produces expected logs - worker_logs = [log for _, log in dask_client.get_worker_logs()[worker_name]] # type: ignore - worker_logs.reverse() - for log in ubuntu_task.expected_logs: - r = re.compile( - rf"\[{ubuntu_task.service_key}:{ubuntu_task.service_version} - [^\/]+\/[^\s]+ - [^\]]+\]: ({log})" - ) - search_results = list(filter(r.search, worker_logs)) - assert ( - len(search_results) > 0 - ), f"Could not find {log} in worker_logs:\n {pformat(worker_logs, width=240)}" - - # check that the task produce the expected data, not less not more - for k, v in ubuntu_task.expected_output_data.items(): - assert k in output_data - assert output_data[k] == v - - s3_storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) - for k, v in output_data.items(): - assert k in ubuntu_task.expected_output_data - assert v == ubuntu_task.expected_output_data[k] - - # if there are file urls in the output, check they exist - if isinstance(v, FileUrl): - with fsspec.open(f"{v.url}", **s3_storage_kwargs) as fp: - assert fp.details.get("size") > 0 # type: ignore - - -@pytest.mark.parametrize( - "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True -) -def test_failing_service_raises_exception( - caplog_info_level: LogCaptureFixture, - event_loop: asyncio.AbstractEventLoop, - mock_service_envs: None, - dask_subsystem_mock: dict[str, MockerFixture], - ubuntu_task_fail: ServiceExampleParam, - s3_settings: S3Settings, -): - with pytest.raises(ServiceRunError): - run_computational_sidecar( - ubuntu_task_fail.docker_basic_auth, - ubuntu_task_fail.service_key, - ubuntu_task_fail.service_version, - ubuntu_task_fail.input_data, - ubuntu_task_fail.output_data_keys, - ubuntu_task_fail.log_file_url, - ubuntu_task_fail.command, - s3_settings, - ) - - -@pytest.mark.parametrize( - "integration_version, boot_mode", [("1.0.0", BootMode.CPU)], indirect=True -) -def test_running_service_that_generates_unexpected_data_raises_exception( - caplog_info_level: LogCaptureFixture, - event_loop: asyncio.AbstractEventLoop, - mock_service_envs: None, - dask_subsystem_mock: dict[str, MockerFixture], - ubuntu_task_unexpected_output: ServiceExampleParam, - s3_settings: S3Settings, -): - with pytest.raises(ServiceBadFormattedOutputError): - run_computational_sidecar( - ubuntu_task_unexpected_output.docker_basic_auth, - ubuntu_task_unexpected_output.service_key, - ubuntu_task_unexpected_output.service_version, - ubuntu_task_unexpected_output.input_data, - ubuntu_task_unexpected_output.output_data_keys, - ubuntu_task_unexpected_output.log_file_url, - ubuntu_task_unexpected_output.command, - s3_settings, - ) diff --git a/services/dask-sidecar/tests/unit/test_tasks_life_cycle.py b/services/dask-sidecar/tests/unit/test_tasks_life_cycle.py new file mode 100644 index 00000000000..2b8b077cbe0 --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_tasks_life_cycle.py @@ -0,0 +1,82 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member + +import time +from collections.abc import Iterable + +import distributed +import pytest +from dask_task_models_library.models import TASK_LIFE_CYCLE_EVENT, TaskLifeCycleState +from models_library.projects_state import RunningState +from tenacity import Retrying, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +def test_task_state_lifecycle(local_cluster: distributed.LocalCluster) -> None: + def _some_task() -> int: + time.sleep(1) + return 2 + + def _some_failing_task() -> None: + time.sleep(1) + msg = "Some error" + raise RuntimeError(msg) + + local_cluster.scale(0) + for attempt in Retrying( + stop=stop_after_delay(10), wait=wait_fixed(1), reraise=True + ): + with attempt: + assert len(local_cluster.workers) == 0 + with distributed.Client(local_cluster) as dask_client: + # submit the task and wait until it goes into WAITING_FOR_RESOURCES + future = dask_client.submit(_some_task, resources={"CPU": 1}) + for attempt in Retrying( + stop=stop_after_delay(10), wait=wait_fixed(1), reraise=True + ): + with attempt: + events = dask_client.get_events( + TASK_LIFE_CYCLE_EVENT.format(key=future.key) + ) + assert isinstance(events, tuple) + assert len(events) >= 2 + parsed_events = [ + TaskLifeCycleState.model_validate(event[1]) for event in events + ] + assert parsed_events[0].state is RunningState.PENDING + assert parsed_events[-1].state is RunningState.WAITING_FOR_RESOURCES + + # now add a worker and wait for it to take the task + local_cluster.scale(1) + + # we basically wait for the tasks to finish + assert future.result(timeout=15) == 2 + + events = dask_client.get_events(TASK_LIFE_CYCLE_EVENT.format(key=future.key)) + assert isinstance(events, tuple) + parsed_events = [ + TaskLifeCycleState.model_validate(event[1]) for event in events + ] + assert parsed_events[0].state is RunningState.PENDING + assert RunningState.STARTED in {event.state for event in parsed_events} + assert RunningState.FAILED not in {event.state for event in parsed_events} + assert parsed_events[-1].state is RunningState.SUCCESS + + future = dask_client.submit(_some_failing_task) + with pytest.raises(RuntimeError): + future.result(timeout=10) + events = dask_client.get_events(TASK_LIFE_CYCLE_EVENT.format(key=future.key)) + assert isinstance(events, Iterable) + parsed_events = [ + TaskLifeCycleState.model_validate(event[1]) for event in events + ] + assert parsed_events[0].state is RunningState.PENDING + assert RunningState.STARTED in {event.state for event in parsed_events} + assert RunningState.FAILED in {event.state for event in parsed_events} + assert RunningState.SUCCESS not in {event.state for event in parsed_events} + assert parsed_events[-1].state is RunningState.FAILED diff --git a/services/dask-sidecar/tests/unit/test_utils.py b/services/dask-sidecar/tests/unit/test_utils.py deleted file mode 100644 index 00f39b39971..00000000000 --- a/services/dask-sidecar/tests/unit/test_utils.py +++ /dev/null @@ -1,117 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - - -import asyncio -from typing import Optional -from unittest import mock - -import aiodocker -import pytest -from pytest import MonkeyPatch -from pytest_mock.plugin import MockerFixture -from simcore_service_dask_sidecar.utils import num_available_gpus - - -@pytest.fixture -def mock_service_envs( - mock_env_devel_environment: dict[str, Optional[str]], monkeypatch: MonkeyPatch -) -> None: - monkeypatch.setenv( - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME", "simcore_computational_shared_data" - ) - monkeypatch.setenv( - "SIDECAR_COMP_SERVICES_SHARED_FOLDER", "/home/scu/computational_shared_data" - ) - - -@pytest.fixture(scope="function") -def mock_aiodocker(mocker: MockerFixture) -> mock.MagicMock: - mock_docker = mocker.patch( - "simcore_service_dask_sidecar.utils.aiodocker.Docker", autospec=True - ) - return mock_docker - - -def test_num_available_gpus_returns_0_when_container_not_created( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - mock_aiodocker: mock.MagicMock, -): - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value = ( - None - ) - - assert num_available_gpus() == 0 - - -def test_num_available_gpus_returns_0_when_container_throws_exception_on_run( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - mock_aiodocker: mock.MagicMock, -): - mock_aiodocker.return_value.__aenter__.return_value.containers.run.side_effect = ( - aiodocker.exceptions.DockerError( - status="testing bad status", data={"message": "error when running"} - ) - ) - assert num_available_gpus() == 0 - - -def test_num_available_gpus_returns_0_when_no_status_code_returned( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - mock_aiodocker: mock.MagicMock, -): - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { - "mistakeinthereturnvalue": "kdsfjh" - } - assert num_available_gpus() == 0 - - -def test_num_available_gpus_returns_0_when_bad_status_code_returned( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - mock_aiodocker: mock.MagicMock, -): - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { - "StatusCode": 1 - } - assert num_available_gpus() == 0 - - -def test_num_available_gpus_returns_0_when_container_wait_timesout( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - mock_aiodocker: mock.MagicMock, -): - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.side_effect = ( - asyncio.TimeoutError() - ) - assert num_available_gpus() == 0 - - -@pytest.mark.parametrize( - "container_logs, expected_num_gpus", - [([], 0), (["gpu1"], 1), (["gpu1", "gpu2", "gpu4"], 3)], -) -def test_num_available_gpus( - event_loop: asyncio.events.AbstractEventLoop, - mock_service_envs: None, - container_logs: list[str], - expected_num_gpus: int, - mock_aiodocker: mock.MagicMock, -): - # default with mock should return 0 gpus - assert num_available_gpus() == 0 - - # add the correct log - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.log.return_value = ( - container_logs - ) - # set the correct status code - mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { - "StatusCode": 0 - } - assert num_available_gpus() == expected_num_gpus diff --git a/services/dask-sidecar/tests/unit/test_utils_dask.py b/services/dask-sidecar/tests/unit/test_utils_dask.py new file mode 100644 index 00000000000..0e8193558be --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_utils_dask.py @@ -0,0 +1,216 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=no-member + + +import asyncio +import concurrent.futures +import logging +import time +from typing import Any +from unittest import mock + +import distributed +import pytest +from common_library.async_tools import maybe_await +from dask_task_models_library.container_tasks.errors import TaskCancelledError +from dask_task_models_library.container_tasks.events import TaskProgressEvent +from dask_task_models_library.container_tasks.io import TaskCancelEventName +from dask_task_models_library.container_tasks.protocol import TaskOwner +from pytest_simcore.helpers.logging_tools import log_context +from simcore_service_dask_sidecar.utils.dask import ( + _DEFAULT_MAX_RESOURCES, + TaskPublisher, + get_current_task_resources, + is_current_task_aborted, + monitor_task_abortion, + publish_event, +) +from tenacity import Retrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +DASK_TASK_STARTED_EVENT = "task_started" +DASK_TESTING_TIMEOUT_S = 25 + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture(params=["sync-dask-client", "async-dask-client"]) +def dask_client_multi( + request: pytest.FixtureRequest, + dask_client: distributed.Client, + async_dask_client: distributed.Client, +) -> distributed.Client: + if request.param == "sync-dask-client": + return dask_client + return async_dask_client + + +@pytest.mark.parametrize( + "handler", [mock.Mock(), mock.AsyncMock()], ids=["sync-handler", "async-handler"] +) +async def test_publish_event( + dask_client_multi: distributed.Client, + job_id: str, + task_owner: TaskOwner, + handler: mock.Mock | mock.AsyncMock, +): + event_to_publish = TaskProgressEvent( + job_id=job_id, + msg="the log", + progress=1, + task_owner=task_owner, + ) + + # NOTE: only 1 handler per topic is allowed at a time + dask_client_multi.subscribe_topic(TaskProgressEvent.topic_name(), handler) + + def _worker_task() -> int: + with log_context(logging.INFO, "_worker_task"): + + async def _() -> int: + with log_context(logging.INFO, "_worker_task_async"): + publish_event(event_to_publish) + return 2 + + return asyncio.run(_()) + + future = dask_client_multi.submit(_worker_task) + assert await maybe_await(future.result(timeout=DASK_TESTING_TIMEOUT_S)) == 2 + + for attempt in Retrying( + wait=wait_fixed(0.2), + stop=stop_after_delay(15), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + events = await maybe_await( + dask_client_multi.get_events(TaskProgressEvent.topic_name()) + ) + assert events is not None, "No events received" + assert isinstance(events, tuple) + + handler.assert_called_with(events[-1]) + + assert isinstance(events, tuple) + assert len(events) == 1 + assert isinstance(events[0], tuple) + received_task_log_event = TaskProgressEvent.model_validate_json(events[0][1]) + assert received_task_log_event == event_to_publish + + +def _wait_for_task_to_start(dask_client: distributed.Client) -> None: + start_event = distributed.Event(DASK_TASK_STARTED_EVENT, dask_client) + start_event.wait(timeout=DASK_TESTING_TIMEOUT_S) + + +def _notify_task_is_started_and_ready(dask_client: distributed.Client) -> None: + start_event = distributed.Event(DASK_TASK_STARTED_EVENT, dask_client) + start_event.set() + + +def _some_long_running_task() -> int: + assert is_current_task_aborted() is False + dask_client = distributed.get_worker().client + _notify_task_is_started_and_ready(dask_client) + + for i in range(300): + print("running iteration", i) + time.sleep(0.1) + if is_current_task_aborted(): + print("task is aborted") + return -1 + assert is_current_task_aborted() + return 12 + + +def test_task_is_aborted(dask_client: distributed.Client): + """Tests aborting a task without using an event. In theory once + the future is cancelled, the dask worker shall 'forget' the task. Sadly this does + not work in distributed mode where an Event is necessary.""" + # NOTE: this works because the cluster is in the same machine + future = dask_client.submit(_some_long_running_task) + _wait_for_task_to_start(dask_client) + future.cancel() + assert future.cancelled() + with pytest.raises(concurrent.futures.CancelledError): + future.result(timeout=DASK_TESTING_TIMEOUT_S) + + +def test_task_is_aborted_using_event(dask_client: distributed.Client): + job_id = "myfake_job_id" + future = dask_client.submit(_some_long_running_task, key=job_id) + _wait_for_task_to_start(dask_client) + + dask_event = distributed.Event(TaskCancelEventName.format(job_id)) + dask_event.set() + + result = future.result(timeout=2) + assert result == -1 + + +def _some_long_running_task_with_monitoring(task_owner: TaskOwner) -> int: + assert is_current_task_aborted() is False + # we are started now + start_event = distributed.Event(DASK_TASK_STARTED_EVENT) + start_event.set() + + async def _long_running_task_async() -> int: + task_publishers = TaskPublisher(task_owner=task_owner) + worker = distributed.get_worker() + _notify_task_is_started_and_ready(worker.client) + current_task = asyncio.current_task() + assert current_task + async with monitor_task_abortion( + task_name=current_task.get_name(), task_publishers=task_publishers + ): + for i in range(300): + print("running iteration", i) + await asyncio.sleep(0.5) + return 12 + + try: + loop = asyncio.get_event_loop() + except RuntimeError: + # NOTE: this happens in testing when the dask cluster runs INProcess + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return asyncio.get_event_loop().run_until_complete(_long_running_task_async()) + + +def test_monitor_task_abortion( + dask_client: distributed.Client, job_id: str, task_owner: TaskOwner +): + future = dask_client.submit( + _some_long_running_task_with_monitoring, task_owner=task_owner, key=job_id + ) + _wait_for_task_to_start(dask_client) + # trigger cancellation + dask_event = distributed.Event(TaskCancelEventName.format(job_id)) + dask_event.set() + with pytest.raises(TaskCancelledError): + future.result(timeout=DASK_TESTING_TIMEOUT_S) + + +@pytest.mark.parametrize( + "resources", + [ + ({"CPU": 2}), + ({"GPU": 5.0}), + ], +) +def test_task_resources( + dask_client: distributed.Client, + resources: dict[str, Any], +): + future = dask_client.submit(get_current_task_resources, resources=resources) + received_resources = future.result(timeout=DASK_TESTING_TIMEOUT_S) + current_resources = _DEFAULT_MAX_RESOURCES + current_resources.update(resources) + assert received_resources == current_resources diff --git a/services/dask-sidecar/tests/unit/test_utils_files.py b/services/dask-sidecar/tests/unit/test_utils_files.py new file mode 100644 index 00000000000..f7c6e68f816 --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_utils_files.py @@ -0,0 +1,513 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +import hashlib +import mimetypes +import zipfile +from collections.abc import AsyncIterable +from dataclasses import dataclass +from pathlib import Path +from typing import Any, cast +from unittest import mock + +import fsspec +import pytest +from faker import Faker +from pydantic import AnyUrl, TypeAdapter +from pytest_localftpserver.servers import ProcessFTPServer +from pytest_mock.plugin import MockerFixture +from settings_library.s3 import S3Settings +from simcore_service_dask_sidecar.utils.files import ( + _s3fs_settings_from_s3_settings, + pull_file_from_remote, + push_file_to_remote, +) +from types_aiobotocore_s3 import S3Client + + +@pytest.fixture() +async def mocked_log_publishing_cb( + mocker: MockerFixture, +) -> AsyncIterable[mock.AsyncMock]: + async with mocker.AsyncMock() as mocked_callback: + yield mocked_callback + + +pytest_simcore_core_services_selection = ["rabbit"] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def s3_presigned_link_storage_kwargs(s3_settings: S3Settings) -> dict[str, Any]: + return {} + + +@pytest.fixture +def ftp_remote_file_url(ftpserver: ProcessFTPServer, faker: Faker) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"{ftpserver.get_login_data(style='url')}/{faker.file_name()}" + ) + + +@pytest.fixture +async def s3_presigned_link_remote_file_url( + s3_settings: S3Settings, + s3_client: S3Client, + faker: Faker, +) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + await s3_client.generate_presigned_url( + "put_object", + Params={"Bucket": s3_settings.S3_BUCKET_NAME, "Key": faker.file_name()}, + ExpiresIn=30, + ), + ) + + +@pytest.fixture +def s3_remote_file_url(s3_settings: S3Settings, faker: Faker) -> AnyUrl: + return TypeAdapter(AnyUrl).validate_python( + f"s3://{s3_settings.S3_BUCKET_NAME}{faker.file_path()}" + ) + + +@dataclass(frozen=True) +class StorageParameters: + s3_settings: S3Settings | None + remote_file_url: AnyUrl + + +@pytest.fixture(params=["ftp", "s3"]) +def remote_parameters( + request: pytest.FixtureRequest, + ftp_remote_file_url: AnyUrl, + s3_remote_file_url: AnyUrl, + s3_settings: S3Settings, +) -> StorageParameters: + return { + "ftp": StorageParameters(s3_settings=None, remote_file_url=ftp_remote_file_url), + "s3": StorageParameters( + s3_settings=s3_settings, remote_file_url=s3_remote_file_url + ), + }[ + request.param # type: ignore + ] + + +async def test_push_file_to_remote( + remote_parameters: StorageParameters, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + # let's create some file with text inside + src_path = tmp_path / faker.file_name() + TEXT_IN_FILE = faker.text() + src_path.write_text(TEXT_IN_FILE) + assert src_path.exists() + # push it to the remote + await push_file_to_remote( + src_path, + remote_parameters.remote_file_url, + mocked_log_publishing_cb, + remote_parameters.s3_settings, + ) + + # check the remote is actually having the file in + storage_kwargs = {} + if remote_parameters.s3_settings: + storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) + + with cast( + fsspec.core.OpenFile, + fsspec.open( + f"{remote_parameters.remote_file_url}", + mode="rt", + **storage_kwargs, + ), + ) as fp: + assert fp.read() == TEXT_IN_FILE + mocked_log_publishing_cb.assert_called() + + +async def test_push_file_to_remote_s3_http_presigned_link( + s3_presigned_link_remote_file_url: AnyUrl, + s3_settings: S3Settings, + tmp_path: Path, + s3_bucket: str, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + # let's create some file with text inside + src_path = tmp_path / faker.file_name() + TEXT_IN_FILE = faker.text() + src_path.write_text(TEXT_IN_FILE) + assert src_path.exists() + # push it to the remote + await push_file_to_remote( + src_path, + s3_presigned_link_remote_file_url, + mocked_log_publishing_cb, + s3_settings=None, + ) + + # check the remote is actually having the file in, but we need s3 access now + s3_remote_file_url = TypeAdapter(AnyUrl).validate_python( + f"s3:/{s3_presigned_link_remote_file_url.path}", + ) + + storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + with cast( + fsspec.core.OpenFile, + fsspec.open(f"{s3_remote_file_url}", mode="rt", **storage_kwargs), + ) as fp: + assert fp.read() == TEXT_IN_FILE + mocked_log_publishing_cb.assert_called() + + +async def test_push_file_to_remote_compresses_if_zip_destination( + remote_parameters: StorageParameters, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + destination_url = TypeAdapter(AnyUrl).validate_python( + f"{remote_parameters.remote_file_url}.zip" + ) + src_path = tmp_path / faker.file_name() + TEXT_IN_FILE = faker.text() + src_path.write_text(TEXT_IN_FILE) + assert src_path.exists() + + await push_file_to_remote( + src_path, + destination_url, + mocked_log_publishing_cb, + remote_parameters.s3_settings, + ) + + storage_kwargs = {} + if remote_parameters.s3_settings: + storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) + open_files = fsspec.open_files( + f"zip://*::{destination_url}", + mode="rt", + **{destination_url.scheme: storage_kwargs}, + ) + assert len(open_files) == 1 + with open_files[0] as fp: + assert fp.read() == TEXT_IN_FILE # type: ignore + mocked_log_publishing_cb.assert_called() + + +async def test_pull_file_from_remote( + remote_parameters: StorageParameters, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + storage_kwargs = {} + if remote_parameters.s3_settings: + storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) + # put some file on the remote + TEXT_IN_FILE = faker.text() + with cast( + fsspec.core.OpenFile, + fsspec.open( + f"{remote_parameters.remote_file_url}", + mode="wt", + **storage_kwargs, + ), + ) as fp: + fp.write(TEXT_IN_FILE) + + # now let's get the file through the util + dst_path = tmp_path / faker.file_name() + await pull_file_from_remote( + src_url=remote_parameters.remote_file_url, + target_mime_type=None, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert dst_path.exists() + assert dst_path.read_text() == TEXT_IN_FILE + mocked_log_publishing_cb.assert_called() + + +async def test_pull_file_from_remote_s3_presigned_link( + s3_settings: S3Settings, + s3_remote_file_url: AnyUrl, + s3_client: S3Client, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + # put some file on the remote + TEXT_IN_FILE = faker.text() + with cast( + fsspec.core.OpenFile, + fsspec.open( + f"{s3_remote_file_url}", + mode="wt", + **storage_kwargs, + ), + ) as fp: + fp.write(TEXT_IN_FILE) + + # create a corresponding presigned get link + assert s3_remote_file_url.path + remote_file_url = TypeAdapter(AnyUrl).validate_python( + await s3_client.generate_presigned_url( + "get_object", + Params={ + "Bucket": s3_settings.S3_BUCKET_NAME, + "Key": f"{s3_remote_file_url.path.removeprefix('/')}", + }, + ExpiresIn=30, + ) + ) + assert remote_file_url.scheme.startswith("http") + print(f"remote_file_url: {remote_file_url}") + # now let's get the file through the util + dst_path = tmp_path / faker.file_name() + await pull_file_from_remote( + src_url=remote_file_url, + target_mime_type=None, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=None, + ) + assert dst_path.exists() + assert dst_path.read_text() == TEXT_IN_FILE + mocked_log_publishing_cb.assert_called() + + +async def test_pull_file_from_remote_s3_presigned_link_invalid_file( + s3_settings: S3Settings, + s3_remote_file_url: AnyUrl, + s3_client: S3Client, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + storage_kwargs = _s3fs_settings_from_s3_settings(s3_settings) + # put some file on the remote + TEXT_IN_FILE = faker.text() + with cast( + fsspec.core.OpenFile, + fsspec.open( + f"{s3_remote_file_url}", + mode="wt", + **storage_kwargs, + ), + ) as fp: + fp.write(TEXT_IN_FILE) + + # create a corresponding presigned get link + assert s3_remote_file_url.path + invalid_remote_file_url = TypeAdapter(AnyUrl).validate_python( + await s3_client.generate_presigned_url( + "get_object", + Params={ + "Bucket": s3_settings.S3_BUCKET_NAME, + "Key": f"{s3_remote_file_url.path.removeprefix('/')}_invalid", + }, + ExpiresIn=30, + ) + ) + assert invalid_remote_file_url.scheme.startswith("http") + print(f"remote_file_url: {invalid_remote_file_url}") + # now let's get the file through the util + dst_path = tmp_path / faker.file_name() + with pytest.raises( + FileNotFoundError, + match=rf"{s3_remote_file_url.path.removeprefix('/')}_invalid", + ): + await pull_file_from_remote( + src_url=invalid_remote_file_url, + target_mime_type=None, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=None, + ) + + assert not dst_path.exists() + mocked_log_publishing_cb.assert_called() + + +async def test_pull_compressed_zip_file_from_remote( + remote_parameters: StorageParameters, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + # put some zip file on the remote + local_zip_file_path = tmp_path / f"{faker.file_name()}.zip" + file_names_within_zip_file = set() + with zipfile.ZipFile( + local_zip_file_path, compression=zipfile.ZIP_DEFLATED, mode="w" + ) as zfp: + for file_number in range(5): + local_test_file = tmp_path / f"{file_number}_{faker.file_name()}" + local_test_file.write_text(faker.text()) + assert local_test_file.exists() + zfp.write(local_test_file, local_test_file.name) + file_names_within_zip_file.add(local_test_file.name) + + destination_url = TypeAdapter(AnyUrl).validate_python( + f"{remote_parameters.remote_file_url}.zip" + ) + storage_kwargs = {} + if remote_parameters.s3_settings: + storage_kwargs = _s3fs_settings_from_s3_settings(remote_parameters.s3_settings) + + with ( + cast( + fsspec.core.OpenFile, + fsspec.open( + f"{destination_url}", + mode="wb", + **storage_kwargs, + ), + ) as dest_fp, + local_zip_file_path.open("rb") as src_fp, + ): + dest_fp.write(src_fp.read()) + + # now we want to download that file so it becomes the source + src_url = destination_url + + # USE-CASE 1: if destination is a zip then no decompression is done + download_folder = tmp_path / "download" + download_folder.mkdir(parents=True, exist_ok=True) + assert download_folder.exists() + dst_path = download_folder / f"{faker.file_name()}.zip" + + await pull_file_from_remote( + src_url=src_url, + target_mime_type=None, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert dst_path.exists() + dst_path.unlink() + mocked_log_publishing_cb.assert_called() + mocked_log_publishing_cb.reset_mock() + + # USE-CASE 2: if destination is not a zip, then we decompress + assert download_folder.exists() + dst_path = download_folder / faker.file_name() + await pull_file_from_remote( + src_url=src_url, + target_mime_type=None, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert not dst_path.exists() + for file in download_folder.glob("*"): + assert file.exists() + assert file.name in file_names_within_zip_file + mocked_log_publishing_cb.assert_called() + mocked_log_publishing_cb.reset_mock() + + # USE-CASE 3: if destination is a zip, but we pass a target mime type that is not, then we decompress + download_folder = tmp_path / "download2" + download_folder.mkdir(parents=True, exist_ok=True) + assert download_folder.exists() + dst_path = download_folder / f"{faker.file_name()}.zip" + mime_type, _ = mimetypes.guess_type( + faker.file_name() + ) # let's have a standard mime type + await pull_file_from_remote( + src_url=src_url, + target_mime_type=mime_type, + dst_path=dst_path, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert not dst_path.exists() + for file in download_folder.glob("*"): + assert file.exists() + assert file.name in file_names_within_zip_file + mocked_log_publishing_cb.assert_called() + + +def _compute_hash(file_path: Path) -> str: + with file_path.open("rb") as file_to_hash: + file_hash = hashlib.sha256() + chunk = file_to_hash.read(8192) + while chunk: + file_hash.update(chunk) + chunk = file_to_hash.read(8192) + + return file_hash.hexdigest() + + +async def test_push_file_to_remote_creates_reproducible_zip_archive( + remote_parameters: StorageParameters, + tmp_path: Path, + faker: Faker, + mocked_log_publishing_cb: mock.AsyncMock, +): + destination_url1 = TypeAdapter(AnyUrl).validate_python( + f"{remote_parameters.remote_file_url}1.zip" + ) + destination_url2 = TypeAdapter(AnyUrl).validate_python( + f"{remote_parameters.remote_file_url}2.zip" + ) + src_path = tmp_path / faker.file_name() + TEXT_IN_FILE = faker.text() + src_path.write_text(TEXT_IN_FILE) + assert src_path.exists() + + # pushing 2 times should produce the same archive with the same hash + await push_file_to_remote( + src_path, + destination_url1, + mocked_log_publishing_cb, + remote_parameters.s3_settings, + ) + await asyncio.sleep( + 5 + ) # NOTE: we wait a bit to ensure the created zipfile has a different creation time (that is normally used for computing the hash) + await push_file_to_remote( + src_path, + destination_url2, + mocked_log_publishing_cb, + remote_parameters.s3_settings, + ) + + # now we pull both file and compare their hash + + # USE-CASE 1: if destination is a zip then no decompression is done + download_folder = tmp_path / "download" + download_folder.mkdir(parents=True, exist_ok=True) + assert download_folder.exists() + dst_path1 = download_folder / f"{faker.file_name()}1.zip" + dst_path2 = download_folder / f"{faker.file_name()}2.zip" + + await pull_file_from_remote( + src_url=destination_url1, + target_mime_type=None, + dst_path=dst_path1, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert dst_path1.exists() + + await pull_file_from_remote( + src_url=destination_url2, + target_mime_type=None, + dst_path=dst_path2, + log_publishing_cb=mocked_log_publishing_cb, + s3_settings=remote_parameters.s3_settings, + ) + assert dst_path2.exists() + + assert _compute_hash(dst_path1) == _compute_hash(dst_path2) diff --git a/services/dask-sidecar/tests/unit/test_utils_gpus.py b/services/dask-sidecar/tests/unit/test_utils_gpus.py new file mode 100644 index 00000000000..01492256990 --- /dev/null +++ b/services/dask-sidecar/tests/unit/test_utils_gpus.py @@ -0,0 +1,103 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name + + +from unittest import mock + +import aiodocker +import pytest +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dask_sidecar.utils.gpus import num_available_gpus + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def mock_aiodocker(mocker: MockerFixture) -> mock.MagicMock: + return mocker.patch( + "simcore_service_dask_sidecar.utils.gpus.aiodocker.Docker", autospec=True + ) + + +def test_num_available_gpus_returns_0_when_container_not_created( + app_environment: EnvVarsDict, + mock_aiodocker: mock.MagicMock, +): + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value = ( + None + ) + + assert num_available_gpus() == 0 + + +def test_num_available_gpus_returns_0_when_container_throws_exception_on_run( + app_environment: EnvVarsDict, + mock_aiodocker: mock.MagicMock, +): + mock_aiodocker.return_value.__aenter__.return_value.containers.run.side_effect = ( + aiodocker.exceptions.DockerError( + status="testing bad status", data={"message": "error when running"} + ) + ) + assert num_available_gpus() == 0 + + +def test_num_available_gpus_returns_0_when_no_status_code_returned( + app_environment: EnvVarsDict, + mock_aiodocker: mock.MagicMock, +): + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { + "mistakeinthereturnvalue": "kdsfjh" + } + assert num_available_gpus() == 0 + + +def test_num_available_gpus_returns_0_when_bad_status_code_returned( + app_environment: EnvVarsDict, + mock_aiodocker: mock.MagicMock, +): + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { + "StatusCode": 1 + } + assert num_available_gpus() == 0 + + +def test_num_available_gpus_returns_0_when_container_wait_timesout( + app_environment: EnvVarsDict, + mock_aiodocker: mock.MagicMock, +): + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.side_effect = ( + TimeoutError() + ) + assert num_available_gpus() == 0 + + +@pytest.mark.parametrize( + "container_logs, expected_num_gpus", + [([], 0), (["gpu1"], 1), (["gpu1", "gpu2", "gpu4"], 3)], +) +def test_num_available_gpus( + app_environment: EnvVarsDict, + container_logs: list[str], + expected_num_gpus: int, + mock_aiodocker: mock.MagicMock, +): + # default with mock should return 0 gpus + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { + "StatusCode": 0 + } + assert num_available_gpus() == 0 + + # add the correct log + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.log.return_value = ( + container_logs + ) + # set the correct status code + mock_aiodocker.return_value.__aenter__.return_value.containers.run.return_value.wait.return_value = { + "StatusCode": 0 + } + assert num_available_gpus() == expected_num_gpus diff --git a/services/datcore-adapter/.cookiecutterrc b/services/datcore-adapter/.cookiecutterrc deleted file mode 100644 index bee7aaa59cf..00000000000 --- a/services/datcore-adapter/.cookiecutterrc +++ /dev/null @@ -1,19 +0,0 @@ -# This file exists so you can easily regenerate your project. -# -# cookiecutter --overwrite-if-exists --config-file=.cookiecutterrc /home/anderegg/dev/github/cookiecutter-simcore-py-fastapi -# - -default_context: - - _extensions: ['jinja2_time.TimeExtension'] - _template: '/home/anderegg/dev/github/cookiecutter-simcore-py-fastapi' - detailed_doc: 'n' - distribution_name: 'simcore-service-datcore-adapter' - full_name: 'Sylvain Anderegg' - github_username: 'sanderegg' - package_name: 'simcore_service_datcore_adapter' - project_name: 'datcore-adapter' - project_short_description: 'Interfaces with datcore storage' - project_slug: 'datcore-adapter' - version: '0.1.0-alpha' - year: '2021' diff --git a/services/datcore-adapter/Dockerfile b/services/datcore-adapter/Dockerfile index 59e196c301d..dff4c2a4357 100644 --- a/services/datcore-adapter/Dockerfile +++ b/services/datcore-adapter/Dockerfile @@ -1,9 +1,22 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: -# cd sercices/datcore-adapter +# cd services/datcore-adapter # docker build -f Dockerfile -t datcore-adapter:prod --target production ../../ # docker run datcore-adapter:prod # @@ -11,12 +24,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=sanderegg -RUN set -eux && \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ apt-get update && \ - apt-get install -y --no-install-recommends gosu && \ - rm -rf /var/lib/apt/lists/* && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -52,32 +71,34 @@ EXPOSE 3000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/datcore-adapter/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + # --------------------------Prod-depends-only stage ------------------- @@ -86,17 +107,19 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/datcore-adapter [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/datcore-adapter /build/services/datcore-adapter +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/datcore-adapter -RUN pip3 --no-cache-dir install -r requirements/prod.txt \ - && pip3 --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/datcore-adapter,target=/build/services/datcore-adapter,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -106,15 +129,19 @@ RUN pip3 --no-cache-dir install -r requirements/prod.txt \ # + /home/scu $HOME = WORKDIR # + services/datcore-adapter [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -123,10 +150,13 @@ COPY --chown=scu:scu services/datcore-adapter/docker services/datcore-adapter/do RUN chmod +x services/datcore-adapter/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/datcore-adapter/docker/healthcheck.py", "http://localhost:8000/v0/live"] ENTRYPOINT [ "/bin/sh", "services/datcore-adapter/docker/entrypoint.sh" ] @@ -141,7 +171,7 @@ CMD ["/bin/sh", "services/datcore-adapter/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_DEVEL_MOUNT=/devel/services/datcore-adapter diff --git a/services/datcore-adapter/README.md b/services/datcore-adapter/README.md index 3b4492d5421..f6252d023cd 100644 --- a/services/datcore-adapter/README.md +++ b/services/datcore-adapter/README.md @@ -1,20 +1,5 @@ # datcore-adapter -[![image-size]](https://microbadger.com/images/itisfoundation/datcore-adapter. "More on itisfoundation/datcore-adapter.:staging-latest image") - -[![image-badge]](https://microbadger.com/images/itisfoundation/datcore-adapter "More on datcore-adapter image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/datcore-adapter "More on datcore-adapter image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/datcore-adapter "More on datcore-adapter image in registry") - -Interfaces with datcore storage - - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/datcore-adapter./staging-latest.svg?label=datcore-adapter.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/datcore-adapter.svg -[image-version]https://images.microbadger.com/badges/version/itisfoundation/datcore-adapter.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/datcore-adapter.svg - - ## Development Setup environment diff --git a/services/datcore-adapter/VERSION b/services/datcore-adapter/VERSION index 388bb06819f..17e51c385ea 100644 --- a/services/datcore-adapter/VERSION +++ b/services/datcore-adapter/VERSION @@ -1 +1 @@ -0.1.0-alpha +0.1.1 diff --git a/services/datcore-adapter/docker/boot.sh b/services/datcore-adapter/docker/boot.sh index 00ac1f94238..187ea506ba8 100755 --- a/services/datcore-adapter/docker/boot.sh +++ b/services/datcore-adapter/docker/boot.sh @@ -23,11 +23,20 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/datcore-adapter || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/datcore-adapter + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # RUNNING application ---------------------------------------- @@ -35,12 +44,12 @@ APP_LOG_LEVEL=${DATCORE_ADAPTER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/datcore-adapter/src/simcore_service_datcore_adapter && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${DATCORE_ADAPTER_REMOTE_DEBUG_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/datcore-adapter/docker/entrypoint.sh b/services/datcore-adapter/docker/entrypoint.sh index db6e2f4c61c..25153a6b2a2 100755 --- a/services/datcore-adapter/docker/entrypoint.sh +++ b/services/datcore-adapter/docker/entrypoint.sh @@ -20,7 +20,6 @@ echo "$INFO" "User : $(id scu)" echo "$INFO" "python : $(command -v python)" echo "$INFO" "pip : $(command -v pip)" - # # DEVELOPMENT MODE # - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT @@ -64,11 +63,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - echo "$INFO Starting $* ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" diff --git a/services/datcore-adapter/docker/healthcheck.py b/services/datcore-adapter/docker/healthcheck.py old mode 100644 new mode 100755 index 7dff21be9f1..87f59876ed6 --- a/services/datcore-adapter/docker/healthcheck.py +++ b/services/datcore-adapter/docker/healthcheck.py @@ -6,9 +6,10 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: diff --git a/services/datcore-adapter/requirements/_base.in b/services/datcore-adapter/requirements/_base.in index de131dd6430..80620680281 100644 --- a/services/datcore-adapter/requirements/_base.in +++ b/services/datcore-adapter/requirements/_base.in @@ -4,6 +4,7 @@ # NOTE: ALL version constraints MUST be commented --constraint ../../../requirements/constraints.txt +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in # service-library[fastapi] @@ -14,9 +15,6 @@ aiocache boto3 aiofiles -fastapi fastapi-pagination -httpx[http2] -pydantic[email] +pydantic python-multipart # for fastapi multipart uploads -uvicorn[standard] diff --git a/services/datcore-adapter/requirements/_base.txt b/services/datcore-adapter/requirements/_base.txt index 4409ddebf41..3017b02e2a1 100644 --- a/services/datcore-adapter/requirements/_base.txt +++ b/services/datcore-adapter/requirements/_base.txt @@ -1,67 +1,126 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.5 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aiocache==0.11.1 - # via -r requirements/_base.in -aiodebug==2.3.0 +aio-pika==9.4.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.2 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in # -r requirements/../../../packages/service-library/requirements/_base.in -aiofiles==22.1.0 + # -r requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.21.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==23.2.1 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiormq==6.4.2 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.0 # via aio-pika -anyio==3.6.2 +aiosignal==1.3.1 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.3.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette # watchfiles -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 - # via redis -attrs==21.4.0 +arrow==1.3.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==23.2.0 + # via + # aiohttp # jsonschema -boto3==1.24.96 + # referencing +boto3==1.38.1 # via -r requirements/_base.in -botocore==1.27.96 +botocore==1.38.1 # via # boto3 # s3transfer -certifi==2022.12.7 +certifi==2024.2.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx -click==8.1.3 + # requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 # via + # rich-toolkit # typer # uvicorn -dnspython==2.2.1 +deprecated==1.2.14 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.6.1 # via email-validator -email-validator==1.3.0 - # via pydantic -fastapi==0.85.1 +email-validator==2.1.1 + # via + # fastapi + # pydantic +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib - # fastapi-pagination -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -fastapi-pagination==0.10.0 +fastapi-pagination==0.12.31 # via -r requirements/_base.in -h11==0.12.0 +faststream==0.5.31 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.65.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.66.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 # via # httpcore # uvicorn @@ -69,127 +128,411 @@ h2==4.1.0 # via httpx hpack==4.0.0 # via h2 -httpcore==0.15.0 +httpcore==1.0.5 # via httpx -httptools==0.5.0 +httptools==0.6.4 # via uvicorn -httpx==0.23.0 +httpx==0.27.0 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in + # fastapi hyperframe==6.0.1 # via h2 -idna==3.4 +idna==3.6 # via # anyio # email-validator - # rfc3986 + # httpx + # requests # yarl -jaeger-client==4.8.0 - # via fastapi-contrib +importlib-metadata==8.0.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi jmespath==1.0.1 # via # boto3 # botocore -jsonschema==3.2.0 +jsonschema==4.21.1 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt # -r requirements/../../../packages/models-library/requirements/_base.in -multidict==6.0.3 - # via yarl -opentracing==2.4.0 + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2023.7.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.0.5 + # via + # aiohttp + # yarl +opentelemetry-api==1.26.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.26.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.26.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.26.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.26.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.47b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.47b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-fastapi==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.26.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.26.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.47b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.47b0 # via - # fastapi-contrib - # jaeger-client -pamqp==3.2.1 + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pamqp==3.3.0 # via aiormq -pydantic==1.10.2 +prometheus-client==0.20.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==4.25.4 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==6.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in + # fast-depends # fastapi # fastapi-pagination -pyinstrument==4.3.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.9.0 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.18.1 - # via jsonschema -python-dateutil==2.8.2 + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.17.2 + # via rich +pyinstrument==4.6.2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 # via # arrow # botocore -python-dotenv==0.21.0 - # via uvicorn -python-multipart==0.0.5 - # via -r requirements/_base.in -pyyaml==5.4.1 +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -r requirements/_base.in + # fastapi +pyyaml==6.0.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in # uvicorn -redis==4.4.0 +redis==5.2.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in -rfc3986==1.5.0 - # via httpx -s3transfer==0.6.0 - # via boto3 -six==1.16.0 +referencing==0.29.3 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil - # python-multipart - # thrift -sniffio==1.3.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.7.1 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.18.0 + # via + # jsonschema + # referencing +s3transfer==0.12.0 + # via boto3 +setuptools==74.0.0 + # via opentelemetry-instrumentation +shellingham==1.5.4 + # via typer +six==1.16.0 + # via python-dateutil +sniffio==1.3.1 # via # anyio - # httpcore # httpx -starlette==0.20.4 - # via fastapi -tenacity==8.1.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.2 +starlette==0.41.0 # via - # jaeger-client - # threadloop -tqdm==4.64.1 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==8.5.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==0.12.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.66.2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.12.3 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.6.1 - # via -r requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.4.0 + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20240316 + # via arrow +typing-extensions==4.12.2 # via # aiodebug + # aiodocker + # fastapi + # fastapi-pagination + # faststream + # opentelemetry-sdk # pydantic - # starlette -urllib3==1.26.12 + # pydantic-core + # rich-toolkit + # typer +urllib3==2.2.3 # via - # -c requirements/../../../packages/service-library/requirements/./../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # botocore -uvicorn==0.19.0 + # requests +uvicorn==0.34.2 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -uvloop==0.17.0 + # fastapi + # fastapi-cli +uvloop==0.21.0 # via uvicorn -watchfiles==0.18.0 +watchfiles==0.21.0 # via uvicorn -websockets==10.3 +websockets==12.0 # via uvicorn -yarl==1.8.2 +wrapt==1.16.0 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.20.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika + # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.20.1 + # via importlib-metadata diff --git a/services/datcore-adapter/requirements/_test.in b/services/datcore-adapter/requirements/_test.in index c8548cff045..be147167572 100644 --- a/services/datcore-adapter/requirements/_test.in +++ b/services/datcore-adapter/requirements/_test.in @@ -6,9 +6,9 @@ asgi_lifespan -codecov +botocore-stubs +boto3-stubs coverage -coveralls faker pytest pytest-asyncio @@ -19,4 +19,5 @@ pytest-mock pytest-runner pytest-sugar pytest-xdist +requests respx diff --git a/services/datcore-adapter/requirements/_test.txt b/services/datcore-adapter/requirements/_test.txt index 2a3ce2367f4..4ff7b32ba9a 100644 --- a/services/datcore-adapter/requirements/_test.txt +++ b/services/datcore-adapter/requirements/_test.txt @@ -1,76 +1,66 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -anyio==3.6.2 +anyio==4.3.0 # via # -c requirements/_base.txt - # httpcore -asgi-lifespan==2.0.0 + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +boto3-stubs==1.37.4 # via -r requirements/_test.in -attrs==21.4.0 +botocore-stubs==1.37.4 # via - # -c requirements/_base.txt - # pytest -certifi==2022.12.7 + # -r requirements/_test.in + # boto3-stubs +certifi==2024.2.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # httpcore # httpx # requests -charset-normalizer==3.0.1 - # via requests -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 +charset-normalizer==3.3.2 + # via + # -c requirements/_base.txt + # requests +coverage==7.6.12 # via # -r requirements/_test.in - # codecov - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -execnet==1.9.0 +execnet==2.1.1 # via pytest-xdist -faker==17.4.0 +faker==36.1.1 # via -r requirements/_test.in -h11==0.12.0 +h11==0.14.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.15.0 +httpcore==1.0.5 # via # -c requirements/_base.txt # httpx -httpx==0.23.0 +httpx==0.27.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # respx -icdiff==2.0.6 +icdiff==2.0.7 # via pytest-icdiff -idna==3.4 +idna==3.6 # via # -c requirements/_base.txt # anyio + # httpx # requests - # rfc3986 iniconfig==2.0.0 # via pytest -packaging==23.0 +packaging==24.2 # via # pytest # pytest-sugar -pluggy==1.0.0 +pluggy==1.5.0 # via pytest pprintpp==0.4.0 # via pytest-icdiff -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio @@ -80,54 +70,48 @@ pytest==7.2.1 # pytest-mock # pytest-sugar # pytest-xdist -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -pytest-xdist==3.2.0 +pytest-xdist==3.6.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +requests==2.32.3 # via # -c requirements/_base.txt - # faker -requests==2.28.2 - # via - # codecov - # coveralls -respx==0.20.1 + # -r requirements/_test.in +respx==0.22.0 # via -r requirements/_test.in -rfc3986==1.5.0 - # via - # -c requirements/_base.txt - # httpx -six==1.16.0 - # via - # -c requirements/_base.txt - # python-dateutil -sniffio==1.3.0 +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio # asgi-lifespan - # httpcore # httpx -termcolor==2.2.0 +termcolor==2.5.0 # via pytest-sugar -tomli==2.0.1 +types-awscrt==0.23.10 + # via botocore-stubs +types-s3transfer==0.11.3 + # via boto3-stubs +typing-extensions==4.12.2 # via - # coverage - # pytest -urllib3==1.26.12 + # -c requirements/_base.txt + # boto3-stubs +tzdata==2025.1 + # via faker +urllib3==2.2.3 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # requests diff --git a/services/datcore-adapter/requirements/_tools.txt b/services/datcore-adapter/requirements/_tools.txt index d9c99ce78d0..68ae37614ad 100644 --- a/services/datcore-adapter/requirements/_tools.txt +++ b/services/datcore-adapter/requirements/_tools.txt @@ -1,93 +1,86 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.7 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via + # build + # pip-tools +pyyaml==6.0.1 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # pre-commit # watchdog -tomli==2.0.1 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==74.0.0 # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # -c requirements/_base.txt + # pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.4.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/datcore-adapter/requirements/ci.txt b/services/datcore-adapter/requirements/ci.txt index 65c18eea1ea..95484d40524 100644 --- a/services/datcore-adapter/requirements/ci.txt +++ b/services/datcore-adapter/requirements/ci.txt @@ -9,12 +9,14 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/models-library -../../packages/pytest-simcore -../../packages/service-library[fastapi] -../../packages/settings-library/ +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-datcore-adapter @ . diff --git a/services/datcore-adapter/requirements/dev.txt b/services/datcore-adapter/requirements/dev.txt index 73afce79c61..04e2ca59025 100644 --- a/services/datcore-adapter/requirements/dev.txt +++ b/services/datcore-adapter/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/pytest-simcore --editable ../../packages/service-library[fastapi] diff --git a/services/datcore-adapter/requirements/prod.txt b/services/datcore-adapter/requirements/prod.txt index 1b9fa94bd0f..2ca94d67b8b 100644 --- a/services/datcore-adapter/requirements/prod.txt +++ b/services/datcore-adapter/requirements/prod.txt @@ -10,9 +10,10 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library -../../packages/service-library[fastapi] -../../packages/settings-library/ +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-datcore-adapter @ . diff --git a/services/datcore-adapter/setup.cfg b/services/datcore-adapter/setup.cfg index 4bdffb8d9b6..8a5fd76af1f 100644 --- a/services/datcore-adapter/setup.cfg +++ b/services/datcore-adapter/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.0-alpha +current_version = 0.1.1 commit = True message = services/datcore-adapter version: {current_version} β†’ {new_version} tag = False @@ -9,3 +9,8 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +[mypy] +plugins = + pydantic.mypy diff --git a/services/datcore-adapter/setup.py b/services/datcore-adapter/setup.py index 7d31111d7a2..e7550e6b8a2 100644 --- a/services/datcore-adapter/setup.py +++ b/services/datcore-adapter/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -31,23 +30,29 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-service-datcore-adapter", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author="Sylvain Anderegg (sanderegg)", - description="Interfaces with datcore storage", - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ +SETUP = { + "name": "simcore-service-datcore-adapter", + "version": (CURRENT_DIR / "VERSION").read_text().strip(), + "author": "Sylvain Anderegg (sanderegg)", + "description": "Interfaces with datcore storage", + "long_description": (CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, -) + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-datcore-adapter=simcore_service_datcore_adapter.cli:main", + "simcore-service=simcore_service_datcore_adapter.cli:main", + ], + }, +} if __name__ == "__main__": diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/__init__.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/__init__.py index f69e279affd..94fc632e7af 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/__init__.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/__init__.py @@ -1 +1 @@ -from .meta import __version__ +from ._meta import __version__ diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/_meta.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/_meta.py new file mode 100644 index 00000000000..db004a8a9d3 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/_meta.py @@ -0,0 +1,25 @@ +""" Application's metadata + +""" + +from importlib.metadata import distribution, version +from typing import Final + +from models_library.basic_types import VersionStr +from pydantic import TypeAdapter + +current_distribution = distribution("simcore_service_datcore_adapter") +__version__ = version("simcore_service_datcore_adapter") + +API_VERSION: Final[VersionStr] = TypeAdapter(VersionStr).validate_python(__version__) +MAJOR, MINOR, PATCH = __version__.split(".") +API_VTAG: Final[str] = f"v{MAJOR}" +APP_NAME: Final[str] = current_distribution.metadata["Name"] +PROJECT_NAME: Final[str] = current_distribution.metadata["Name"] + + +def get_summary() -> str: + return current_distribution.metadata.get_all("Summary", [""])[-1] + + +summary: str = get_summary() diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/application.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/application.py index ef24be18231..337738ecf46 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/application.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/application.py @@ -1,11 +1,11 @@ -from typing import Any, Callable, cast +# mypy: disable-error-code=truthy-function -from fastapi import Request +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper +assert get_reverse_url_mapper # nosec +assert get_app # nosec -def get_reverse_url_mapper(request: Request) -> Callable[..., str]: - def reverse_url_mapper(name: str, **path_params: Any) -> str: - # NOTE: the cast appears to be needed by mypy - return cast(str, request.url_for(name, **path_params)) - - return reverse_url_mapper +__all__: tuple[str, ...] = ( + "get_app", + "get_reverse_url_mapper", +) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/pennsieve.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/pennsieve.py index 8e1378004a1..c7bc55f1561 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/pennsieve.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/dependencies/pennsieve.py @@ -1,4 +1,4 @@ -from typing import cast +from typing import Annotated, cast from fastapi import Depends, FastAPI from fastapi.requests import Request @@ -7,11 +7,11 @@ def _get_app(request: Request) -> FastAPI: - return request.app + return cast(FastAPI, request.app) def get_pennsieve_api_client( - app: FastAPI = Depends(_get_app), + app: Annotated[FastAPI, Depends(_get_app)], ) -> PennsieveApiClient: client = PennsieveApiClient.get_instance(app) assert client # nosec diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/http_error.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/http_error.py deleted file mode 100644 index 6b8dcd0796e..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/http_error.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Callable, Optional - -from fastapi import HTTPException -from fastapi.encoders import jsonable_encoder -from starlette.requests import Request -from starlette.responses import JSONResponse - - -async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": [exc.detail]}), status_code=exc.status_code - ) - - -def make_http_error_handler_for_exception( - status_code: int, - exception_cls: type[BaseException], - *, - override_detail_message: Optional[str] = None, -) -> Callable: - """ - Produces a handler for BaseException-type exceptions which converts them - into an error JSON response with a given status code - - SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions - """ - - async def _http_error_handler(_: Request, exc: type[BaseException]) -> JSONResponse: - assert isinstance(exc, exception_cls) # nosec - details = override_detail_message or f"{exc}" - return JSONResponse( - content=jsonable_encoder({"errors": [details]}), status_code=status_code - ) - - return _http_error_handler diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/pennsieve_error.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/pennsieve_error.py deleted file mode 100644 index ab71c949cd1..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/pennsieve_error.py +++ /dev/null @@ -1,21 +0,0 @@ -from fastapi.encoders import jsonable_encoder -from starlette.requests import Request -from starlette.responses import JSONResponse -from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_500_INTERNAL_SERVER_ERROR - -from botocore.exceptions import ClientError - - -async def botocore_exceptions_handler( - _: Request, - exc: ClientError, -) -> JSONResponse: - if exc.response["Error"]["Code"] == "NotAuthorizedException": - return JSONResponse( - content=jsonable_encoder({"errors": exc.response["Error"]}), - status_code=HTTP_401_UNAUTHORIZED, - ) - return JSONResponse( - content=jsonable_encoder({"errors": exc.response["Error"]}), - status_code=HTTP_500_INTERNAL_SERVER_ERROR, - ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/validation_error.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/validation_error.py deleted file mode 100644 index fb70f6791ac..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/errors/validation_error.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Union - -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError -from fastapi.openapi.constants import REF_PREFIX -from fastapi.openapi.utils import validation_error_response_definition -from pydantic import ValidationError -from starlette.requests import Request -from starlette.responses import JSONResponse -from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY - - -async def http422_error_handler( - _: Request, - exc: Union[RequestValidationError, ValidationError], -) -> JSONResponse: - return JSONResponse( - content=jsonable_encoder({"errors": exc.errors()}), - status_code=HTTP_422_UNPROCESSABLE_ENTITY, - ) - - -validation_error_response_definition["properties"] = { - "errors": { - "title": "Validation errors", - "type": "array", - "items": {"$ref": f"{REF_PREFIX}ValidationError"}, - }, -} diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/middleware_timing.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/middleware_timing.py deleted file mode 100644 index 131f5fd3285..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/middleware_timing.py +++ /dev/null @@ -1,14 +0,0 @@ -import time -import logging -from fastapi import Request - -logger = logging.getLogger(__name__) - - -async def add_process_time_header(request: Request, call_next): - start_time = time.time() - response = await call_next(request) - process_time = time.time() - start_time - logger.debug("time to process %.2fs", process_time) - response.headers["X-Process-Time"] = str(process_time) - return response diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/module_setup.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/module_setup.py deleted file mode 100644 index d27dccf4feb..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/module_setup.py +++ /dev/null @@ -1,31 +0,0 @@ -""" - api app module -""" -from botocore.exceptions import ClientError -from fastapi import APIRouter, FastAPI -from fastapi.exceptions import HTTPException, RequestValidationError - -from ..meta import api_vtag -from .errors.http_error import http_error_handler -from .errors.pennsieve_error import botocore_exceptions_handler -from .errors.validation_error import http422_error_handler -from .middleware_timing import add_process_time_header -from .routes import datasets, files, health, user - - -def setup_api(app: FastAPI): - router = APIRouter() - - app.include_router(router, prefix=f"/{api_vtag}") - app.include_router(health.router, tags=["healthcheck"], prefix=f"/{api_vtag}") - app.include_router(user.router, tags=["user"], prefix=f"/{api_vtag}") - app.include_router(datasets.router, tags=["datasets"], prefix=f"/{api_vtag}") - app.include_router(files.router, tags=["files"], prefix=f"/{api_vtag}") - - # exception handlers - app.add_exception_handler(HTTPException, http_error_handler) - app.add_exception_handler(RequestValidationError, http422_error_handler) - app.add_exception_handler(ClientError, botocore_exceptions_handler) - - # middlewares - app.middleware("http")(add_process_time_header) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/__init__.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/datasets.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/datasets.py new file mode 100644 index 00000000000..17ff729ead8 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/datasets.py @@ -0,0 +1,191 @@ +import logging +from typing import Annotated, Final, TypeAlias, TypeVar + +from aiocache import cached # type: ignore[import-untyped] +from fastapi import APIRouter, Depends, Header, Query, Request +from fastapi_pagination import LimitOffsetPage, Params +from fastapi_pagination.api import create_page, resolve_params +from fastapi_pagination.bases import RawParams +from fastapi_pagination.customization import CustomizedPage, UseParamsFields +from models_library.api_schemas_datcore_adapter.datasets import ( + DatasetMetaData, + FileMetaData, +) +from models_library.api_schemas_storage.storage_schemas import ( + DEFAULT_NUMBER_OF_PATHS_PER_PAGE, + MAX_NUMBER_OF_PATHS_PER_PAGE, +) +from servicelib.fastapi.requests_decorators import cancel_on_disconnect +from starlette import status + +from ...modules.pennsieve import PennsieveApiClient +from ..dependencies.pennsieve import get_pennsieve_api_client + +router = APIRouter() +log = logging.getLogger(__file__) + +_MINUTE: Final[int] = 60 +_PENNSIEVE_CACHING_TTL_S: Final[int] = ( + 5 * _MINUTE +) # NOTE: this caching time is arbitrary + + +_T = TypeVar("_T") +_CustomPage = CustomizedPage[ + LimitOffsetPage[_T], + UseParamsFields( + limit=Query( + DEFAULT_NUMBER_OF_PATHS_PER_PAGE, ge=1, le=MAX_NUMBER_OF_PATHS_PER_PAGE + ), + ), +] + +_CustomizedPageParams: TypeAlias = _CustomPage.__params_type__ # type: ignore + + +@router.get( + "/datasets", + summary="list datasets", + status_code=status.HTTP_200_OK, + response_model=_CustomPage[DatasetMetaData], +) +@cancel_on_disconnect +@cached( + ttl=_PENNSIEVE_CACHING_TTL_S, + key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['page_params']}", +) +async def list_datasets( + request: Request, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], + page_params: Annotated[_CustomizedPageParams, Depends()], +): + assert request # nosec + assert page_params.limit is not None # nosec + assert page_params.offset is not None # nosec + datasets, total = await pennsieve_client.list_datasets( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + limit=page_params.limit, + offset=page_params.offset, + ) + return create_page(datasets, total=total, params=page_params) + + +@router.get( + "/datasets/{dataset_id}", + status_code=status.HTTP_200_OK, + response_model=DatasetMetaData, +) +@cancel_on_disconnect +async def get_dataset( + request: Request, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], + params: Annotated[Params, Depends()], + dataset_id: str, +) -> DatasetMetaData: + assert request # nosec + raw_params: RawParams = resolve_params(params).to_raw_params() + assert raw_params.limit is not None # nosec + assert raw_params.offset is not None # nosec + return await pennsieve_client.get_dataset( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + dataset_id=dataset_id, + ) + + +@router.get( + "/datasets/{dataset_id}/files", + summary="list top level files/folders in a dataset", + status_code=status.HTTP_200_OK, + response_model=_CustomPage[FileMetaData], +) +@cancel_on_disconnect +@cached( + ttl=_PENNSIEVE_CACHING_TTL_S, + key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}_{kwargs['page_params']}", +) +async def list_dataset_top_level_files( + request: Request, + dataset_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], + page_params: Annotated[_CustomizedPageParams, Depends()], +): + assert request # nosec + + assert page_params.limit is not None # nosec + assert page_params.offset is not None # nosec + file_metas, total = await pennsieve_client.list_packages_in_dataset( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + dataset_id=dataset_id, + limit=page_params.limit, + offset=page_params.offset, + ) + return create_page(file_metas, total=total, params=page_params) + + +@router.get( + "/datasets/{dataset_id}/files/{collection_id}", + summary="list top level files/folders in a collection in a dataset", + status_code=status.HTTP_200_OK, + response_model=_CustomPage[FileMetaData], +) +@cancel_on_disconnect +@cached( + ttl=_PENNSIEVE_CACHING_TTL_S, + key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}_{kwargs['collection_id']}_{kwargs['page_params']}", +) +async def list_dataset_collection_files( + request: Request, + dataset_id: str, + collection_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], + page_params: Annotated[_CustomizedPageParams, Depends()], +): + assert request # nosec + assert page_params.limit is not None # nosec + assert page_params.offset is not None # nosec + file_metas, total = await pennsieve_client.list_packages_in_collection( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + limit=page_params.limit, + offset=page_params.offset, + dataset_id=dataset_id, + collection_id=collection_id, + ) + return create_page(file_metas, total=total, params=page_params) + + +@router.get( + "/datasets/{dataset_id}/files_legacy", + summary="list all file meta data in dataset", + status_code=status.HTTP_200_OK, + response_model=list[FileMetaData], +) +@cancel_on_disconnect +@cached( + ttl=_PENNSIEVE_CACHING_TTL_S, + key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}", +) +async def list_dataset_files_legacy( + request: Request, + dataset_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], +) -> list[FileMetaData]: + assert request # nosec + return await pennsieve_client.list_all_dataset_files( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + dataset_id=dataset_id, + ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/files.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/files.py new file mode 100644 index 00000000000..c69cb6d0e0c --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/files.py @@ -0,0 +1,86 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, Header, Request +from models_library.api_schemas_datcore_adapter.datasets import PackageMetaData +from pydantic import AnyUrl, TypeAdapter +from servicelib.fastapi.requests_decorators import cancel_on_disconnect +from starlette import status + +from ...models.files import FileDownloadOut +from ...modules.pennsieve import PennsieveApiClient +from ..dependencies.pennsieve import get_pennsieve_api_client + +router = APIRouter() +_logger = logging.getLogger(__file__) + + +@router.get( + "/files/{file_id}", + summary="returns a pre-signed download link for the file", + status_code=status.HTTP_200_OK, + response_model=FileDownloadOut, +) +@cancel_on_disconnect +async def download_file( + request: Request, + file_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], +) -> FileDownloadOut: + assert request # nosec + presigned_download_link = await pennsieve_client.get_presigned_download_link( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + package_id=file_id, + ) + return FileDownloadOut( + link=TypeAdapter(AnyUrl).validate_python(f"{presigned_download_link}") + ) + + +@router.delete( + "/files/{file_id}", summary="deletes a file", status_code=status.HTTP_204_NO_CONTENT +) +@cancel_on_disconnect +async def delete_file( + request: Request, + file_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], +): + assert request # nosec + await pennsieve_client.delete_object( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + obj_id=file_id, + ) + + +@router.get( + "/packages/{package_id}/files", + summary="returns a package (i.e. a file)", + status_code=status.HTTP_200_OK, + response_model=list[PackageMetaData], +) +@cancel_on_disconnect +async def get_package( + request: Request, + package_id: str, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], +) -> list[PackageMetaData]: + assert request # nosec + + data = await pennsieve_client.get_package_files( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + package_id=package_id, + limit=1, + offset=0, + fill_path=True, + ) + return [_.to_api_model() for _ in data] diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/health.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/health.py new file mode 100644 index 00000000000..120767f3d11 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/health.py @@ -0,0 +1,41 @@ +import logging +from collections.abc import Callable +from datetime import UTC, datetime +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.app_diagnostics import AppStatusCheck +from starlette import status +from starlette.responses import PlainTextResponse + +from ..._meta import API_VERSION, PROJECT_NAME +from ...modules.pennsieve import PennsieveApiClient +from ..dependencies.application import get_reverse_url_mapper +from ..dependencies.pennsieve import get_pennsieve_api_client + +router = APIRouter() +log = logging.getLogger(__file__) + + +@router.get( + "/live", + summary="return service health", + response_class=PlainTextResponse, + status_code=status.HTTP_200_OK, +) +async def get_service_alive(): + return f"{__name__}@{datetime.now(UTC).isoformat()}" + + +@router.get("/ready", status_code=status.HTTP_200_OK, response_model=AppStatusCheck) +async def get_service_ready( + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + pennsieve_health_ok = await pennsieve_client.is_responsive() + return AppStatusCheck( + app_name=PROJECT_NAME, + version=API_VERSION, + services={"pennsieve": pennsieve_health_ok}, + url=url_for("get_service_ready"), + ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/user.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/user.py new file mode 100644 index 00000000000..dea213f5ec7 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/rest/user.py @@ -0,0 +1,33 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, Header, Request +from servicelib.fastapi.requests_decorators import cancel_on_disconnect +from starlette import status + +from ...models.user import Profile +from ...modules.pennsieve import PennsieveApiClient +from ..dependencies.pennsieve import get_pennsieve_api_client + +router = APIRouter() +log = logging.getLogger(__file__) + + +@router.get( + "/user/profile", + summary="returns the user profile", + status_code=status.HTTP_200_OK, + response_model=Profile, +) +@cancel_on_disconnect +async def get_user_profile( + request: Request, + x_datcore_api_key: Annotated[str, Header(..., description="Datcore API Key")], + x_datcore_api_secret: Annotated[str, Header(..., description="Datcore API Secret")], + pennsieve_client: Annotated[PennsieveApiClient, Depends(get_pennsieve_api_client)], +): + assert request # nosec + return await pennsieve_client.get_user_profile( + api_key=x_datcore_api_key, + api_secret=x_datcore_api_secret, + ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes.py new file mode 100644 index 00000000000..d316434bc98 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes.py @@ -0,0 +1,18 @@ +""" +api app module +""" + +from fastapi import APIRouter, FastAPI + +from .._meta import API_VTAG +from .rest import datasets, files, health, user + + +def setup_rest_api_routes(app: FastAPI) -> None: + router = APIRouter() + + app.include_router(router, prefix=f"/{API_VTAG}") + app.include_router(health.router, tags=["healthcheck"], prefix=f"/{API_VTAG}") + app.include_router(user.router, tags=["user"], prefix=f"/{API_VTAG}") + app.include_router(datasets.router, tags=["datasets"], prefix=f"/{API_VTAG}") + app.include_router(files.router, tags=["files"], prefix=f"/{API_VTAG}") diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/datasets.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/datasets.py deleted file mode 100644 index 73004d7932a..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/datasets.py +++ /dev/null @@ -1,144 +0,0 @@ -import logging -from typing import Final - -from aiocache import cached -from fastapi import APIRouter, Depends, Header, Request -from fastapi_pagination import Page, Params -from fastapi_pagination.api import create_page, resolve_params -from fastapi_pagination.bases import RawParams -from servicelib.fastapi.requests_decorators import cancel_on_disconnect -from starlette import status - -from ...models.domains.datasets import DatasetsOut, FileMetaDataOut -from ...modules.pennsieve import PennsieveApiClient -from ..dependencies.pennsieve import get_pennsieve_api_client - -router = APIRouter() -log = logging.getLogger(__file__) - -_MINUTE: Final[int] = 60 -_PENNSIEVE_CACHING_TTL_S: Final[int] = ( - 5 * _MINUTE -) # NOTE: this caching time is arbitrary - - -@router.get( - "/datasets", - summary="list datasets", - status_code=status.HTTP_200_OK, - response_model=Page[DatasetsOut], -) -@cancel_on_disconnect -@cached( - ttl=_PENNSIEVE_CACHING_TTL_S, - key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['params']}", -) -async def list_datasets( - request: Request, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), - params: Params = Depends(), -) -> Page[DatasetsOut]: - assert request # nosec - raw_params: RawParams = resolve_params(params).to_raw_params() - datasets, total = await pennsieve_client.list_datasets( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - limit=raw_params.limit, - offset=raw_params.offset, - ) - return create_page(items=datasets, total=total, params=params) - - -@router.get( - "/datasets/{dataset_id}/files", - summary="list top level files/folders in a dataset", - status_code=status.HTTP_200_OK, - response_model=Page[FileMetaDataOut], -) -@cancel_on_disconnect -@cached( - ttl=_PENNSIEVE_CACHING_TTL_S, - key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}_{kwargs['params']}", -) -async def list_dataset_top_level_files( - request: Request, - dataset_id: str, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), - params: Params = Depends(), -) -> Page[FileMetaDataOut]: - assert request # nosec - raw_params: RawParams = resolve_params(params).to_raw_params() - - file_metas, total = await pennsieve_client.list_packages_in_dataset( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - dataset_id=dataset_id, - limit=raw_params.limit, - offset=raw_params.offset, - ) - return create_page(items=file_metas, total=total, params=params) - - -@router.get( - "/datasets/{dataset_id}/files/{collection_id}", - summary="list top level files/folders in a collection in a dataset", - status_code=status.HTTP_200_OK, - response_model=Page[FileMetaDataOut], -) -@cancel_on_disconnect -@cached( - ttl=_PENNSIEVE_CACHING_TTL_S, - key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}_{kwargs['collection_id']}_{kwargs['params']}", -) -async def list_dataset_collection_files( - request: Request, - dataset_id: str, - collection_id: str, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), - params: Params = Depends(), -) -> Page[FileMetaDataOut]: - assert request # nosec - raw_params: RawParams = resolve_params(params).to_raw_params() - - file_metas, total = await pennsieve_client.list_packages_in_collection( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - limit=raw_params.limit, - offset=raw_params.offset, - dataset_id=dataset_id, - collection_id=collection_id, - ) - return create_page(items=file_metas, total=total, params=params) - - -@router.get( - "/datasets/{dataset_id}/files_legacy", - summary="list all file meta data in dataset", - status_code=status.HTTP_200_OK, - response_model=list[FileMetaDataOut], -) -@cancel_on_disconnect -@cached( - ttl=_PENNSIEVE_CACHING_TTL_S, - key_builder=lambda f, *args, **kwargs: f"{f.__name__}_{kwargs['x_datcore_api_key']}_{kwargs['x_datcore_api_secret']}_{kwargs['dataset_id']}", -) -async def list_dataset_files_legacy( - request: Request, - dataset_id: str, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), -) -> list[FileMetaDataOut]: - assert request # nosec - file_metas = await pennsieve_client.list_all_dataset_files( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - dataset_id=dataset_id, - ) - return file_metas diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/files.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/files.py deleted file mode 100644 index 8e96349219d..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/files.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging - -from fastapi import APIRouter, Depends, Header, Request -from pydantic import AnyUrl, parse_obj_as -from servicelib.fastapi.requests_decorators import cancel_on_disconnect -from starlette import status - -from ...models.domains.files import FileDownloadOut -from ...modules.pennsieve import PennsieveApiClient -from ..dependencies.pennsieve import get_pennsieve_api_client - -router = APIRouter() -log = logging.getLogger(__file__) - - -@router.get( - "/files/{file_id}", - summary="returns a pre-signed download link for the file", - status_code=status.HTTP_200_OK, - response_model=FileDownloadOut, -) -@cancel_on_disconnect -async def download_file( - request: Request, - file_id: str, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), -) -> FileDownloadOut: - assert request # nosec - presigned_download_link = await pennsieve_client.get_presigned_download_link( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - package_id=file_id, - ) - return FileDownloadOut(link=parse_obj_as(AnyUrl, f"{presigned_download_link}")) - - -@router.delete( - "/files/{file_id}", summary="deletes a file", status_code=status.HTTP_204_NO_CONTENT -) -@cancel_on_disconnect -async def delete_file( - request: Request, - file_id: str, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), -): - assert request # nosec - await pennsieve_client.delete_object( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - obj_id=file_id, - ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/health.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/health.py deleted file mode 100644 index a775627ca91..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/health.py +++ /dev/null @@ -1,40 +0,0 @@ -import logging -from datetime import datetime -from typing import Callable - -from fastapi import APIRouter, Depends -from models_library.app_diagnostics import AppStatusCheck -from starlette import status -from starlette.responses import PlainTextResponse - -from ...meta import api_version, project_name -from ...modules.pennsieve import PennsieveApiClient -from ..dependencies.application import get_reverse_url_mapper -from ..dependencies.pennsieve import get_pennsieve_api_client - -router = APIRouter() -log = logging.getLogger(__file__) - - -@router.get( - "/live", - summary="return service health", - response_class=PlainTextResponse, - status_code=status.HTTP_200_OK, -) -async def get_service_alive(): - return f"{__name__}@{datetime.utcnow().isoformat()}" - - -@router.get("/ready", status_code=status.HTTP_200_OK, response_model=AppStatusCheck) -async def get_service_ready( - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), - url_for: Callable = Depends(get_reverse_url_mapper), -): - pennsieve_health_ok = await pennsieve_client.is_responsive() - return AppStatusCheck( - app_name=project_name, - version=api_version, - services={"pennsieve": pennsieve_health_ok}, - url=url_for("get_service_ready"), - ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/user.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/user.py deleted file mode 100644 index 1b983029f2c..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/api/routes/user.py +++ /dev/null @@ -1,32 +0,0 @@ -import logging - -from fastapi import APIRouter, Depends, Header, Request -from servicelib.fastapi.requests_decorators import cancel_on_disconnect -from starlette import status - -from ...models.domains.user import Profile -from ...modules.pennsieve import PennsieveApiClient -from ..dependencies.pennsieve import get_pennsieve_api_client - -router = APIRouter() -log = logging.getLogger(__file__) - - -@router.get( - "/user/profile", - summary="returns the user profile", - status_code=status.HTTP_200_OK, - response_model=Profile, -) -@cancel_on_disconnect -async def get_user_profile( - request: Request, - x_datcore_api_key: str = Header(..., description="Datcore API Key"), - x_datcore_api_secret: str = Header(..., description="Datcore API Secret"), - pennsieve_client: PennsieveApiClient = Depends(get_pennsieve_api_client), -): - assert request # nosec - return await pennsieve_client.get_user_profile( - api_key=x_datcore_api_key, - api_secret=x_datcore_api_secret, - ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/cli.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/cli.py new file mode 100644 index 00000000000..60839168e97 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/cli.py @@ -0,0 +1,25 @@ +import logging + +import typer +from settings_library.utils_cli import create_settings_command, create_version_callback + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings + +log = logging.getLogger(__name__) + +# NOTE: 'main' variable is referred in the setup's entrypoint! +main = typer.Typer(name=PROJECT_NAME) + +main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def run() -> None: + """Runs application""" + typer.secho("Sorry, this entrypoint is intentionally disabled. Use instead") + typer.secho( + f"$ uvicorn {PROJECT_NAME}.main:the_app", + fg=typer.colors.BLUE, + ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/application.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/application.py index e7527db21d9..50fd0b8b888 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/application.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/application.py @@ -1,15 +1,23 @@ import logging -from typing import Optional -from fastapi import FastAPI, HTTPException -from fastapi.exceptions import RequestValidationError +from common_library.basic_types import BootModeEnum +from fastapi import FastAPI +from fastapi.middleware.gzip import GZipMiddleware +from fastapi_pagination import add_pagination +from servicelib.fastapi import timing_middleware +from servicelib.fastapi.http_error import set_app_default_http_error_handlers +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) from servicelib.fastapi.openapi import override_fastapi_openapi_method -from servicelib.fastapi.tracing import setup_tracing +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) +from starlette.middleware.base import BaseHTTPMiddleware -from ..api.errors.http_error import http_error_handler -from ..api.errors.validation_error import http422_error_handler -from ..api.module_setup import setup_api -from ..meta import api_version, api_vtag +from .._meta import API_VERSION, API_VTAG, APP_NAME +from ..api.routes import setup_rest_api_routes from ..modules import pennsieve from .events import ( create_start_app_handler, @@ -17,7 +25,7 @@ on_shutdown, on_startup, ) -from .settings import Settings +from .settings import ApplicationSettings LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR NOISY_LOGGERS = ( @@ -26,16 +34,10 @@ "hpack", ) -logger = logging.getLogger(__name__) - +_logger = logging.getLogger(__name__) -def create_app(settings: Optional[Settings] = None) -> FastAPI: - if settings is None: - settings = Settings.create_from_envs() - assert settings # nosec - logging.basicConfig(level=settings.LOG_LEVEL.value) - logging.root.setLevel(settings.LOG_LEVEL.value) +def create_app(settings: ApplicationSettings) -> FastAPI: # keep mostly quiet noisy loggers quiet_level: int = max( min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING @@ -43,21 +45,43 @@ def create_app(settings: Optional[Settings] = None) -> FastAPI: for name in NOISY_LOGGERS: logging.getLogger(name).setLevel(quiet_level) - logger.debug("App settings:\n%s", settings.json(indent=2)) + + _logger.debug("App settings:\n%s", settings.model_dump_json(indent=1)) app = FastAPI( - debug=settings.debug, - title="Datcore Adapter Service", + debug=settings.SC_BOOT_MODE + in [BootModeEnum.DEBUG, BootModeEnum.DEVELOPMENT, BootModeEnum.LOCAL], + title=APP_NAME, description="Interfaces with Pennsieve storage service", - version=api_version, - openapi_url=f"/api/{api_vtag}/openapi.json", + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", docs_url="/dev/doc", redoc_url=None, # default disabled ) override_fastapi_openapi_method(app) + add_pagination(app) app.state.settings = settings + if app.state.settings.DATCORE_ADAPTER_TRACING: + setup_tracing( + app, + app.state.settings.DATCORE_ADAPTER_TRACING, + APP_NAME, + ) + if app.state.settings.DATCORE_ADAPTER_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) + + if settings.SC_BOOT_MODE != BootModeEnum.PRODUCTION: + # middleware to time requests (ONLY for development) + app.add_middleware( + BaseHTTPMiddleware, dispatch=timing_middleware.add_process_time_header + ) + app.add_middleware(GZipMiddleware) + + if app.state.settings.DATCORE_ADAPTER_TRACING: + initialize_fastapi_app_tracing(app) + # events app.add_event_handler("startup", on_startup) app.add_event_handler("startup", create_start_app_handler(app)) @@ -65,15 +89,11 @@ def create_app(settings: Optional[Settings] = None) -> FastAPI: app.add_event_handler("shutdown", on_shutdown) # Routing - setup_api(app) + setup_rest_api_routes(app) if settings.PENNSIEVE.PENNSIEVE_ENABLED: pennsieve.setup(app, settings.PENNSIEVE) - if settings.DATCORE_ADAPTER_TRACING: - setup_tracing(app, settings.DATCORE_ADAPTER_TRACING) - - app.add_exception_handler(HTTPException, http_error_handler) - app.add_exception_handler(RequestValidationError, http422_error_handler) + set_app_default_http_error_handlers(app) return app diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/events.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/events.py index 69be9a9e7cb..9c41df9aad5 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/events.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/events.py @@ -2,10 +2,8 @@ from typing import Callable from fastapi import FastAPI -from models_library.basic_types import BootModeEnum -from ..meta import __version__, project_name -from ..modules.remote_debug import setup_remote_debugging +from .._meta import PROJECT_NAME, __version__ logger = logging.getLogger(__name__) @@ -30,19 +28,14 @@ def on_startup() -> None: def on_shutdown() -> None: - msg = project_name + f" v{__version__} SHUT DOWN" + msg = PROJECT_NAME + f" v{__version__} SHUT DOWN" print(f"{msg:=^100}", flush=True) -def create_start_app_handler(app: FastAPI) -> Callable: +def create_start_app_handler(_app: FastAPI) -> Callable: async def start_app() -> None: logger.info("Application started") - # setup connection to remote debugger (if applies) - setup_remote_debugging( - force_enabled=app.state.settings.SC_BOOT_MODE == BootModeEnum.DEBUG - ) - return start_app diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py index 14915357e11..98f091c76e9 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/core/settings.py @@ -1,9 +1,11 @@ -from functools import cached_property -from typing import Optional, cast +from typing import Annotated -from models_library.basic_types import BootModeEnum, LogLevel -from pydantic import Field, parse_obj_as, validator +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import LogLevel +from pydantic import AliasChoices, Field, TypeAdapter, field_validator from pydantic.networks import AnyUrl +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings from settings_library.base import BaseCustomSettings from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings @@ -12,41 +14,61 @@ class PennsieveSettings(BaseCustomSettings): PENNSIEVE_ENABLED: bool = True - PENNSIEVE_API_URL: AnyUrl = parse_obj_as(AnyUrl, "https://api.pennsieve.io") + PENNSIEVE_API_URL: AnyUrl = TypeAdapter(AnyUrl).validate_python( + "https://api.pennsieve.io" + ) PENNSIEVE_API_GENERAL_TIMEOUT: float = 20.0 PENNSIEVE_HEALTCHCHECK_TIMEOUT: float = 1.0 -class Settings(BaseCustomSettings, MixinLoggingSettings): - # DOCKER - SC_BOOT_MODE: Optional[BootModeEnum] - - LOG_LEVEL: LogLevel = Field( - LogLevel.INFO.value, - env=[ - "DATCORE_ADAPTER_LOGLEVEL", - "DATCORE_ADAPTER_LOG_LEVEL", - "LOG_LEVEL", - "LOGLEVEL", - ], - ) +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "DATCORE_ADAPTER_LOGLEVEL", + "DATCORE_ADAPTER_LOG_LEVEL", + "LOG_LEVEL", + "LOGLEVEL", + ), + ), + ] = LogLevel.INFO - PENNSIEVE: PennsieveSettings = Field(auto_default_from_env=True) + PENNSIEVE: Annotated[ + PennsieveSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] - DATCORE_ADAPTER_TRACING: Optional[TracingSettings] = Field( - auto_default_from_env=True - ) + DATCORE_ADAPTER_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "DATCORE_ADAPTER_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + DATCORE_ADAPTER_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "DATCORE_ADAPTER_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY - @cached_property - def debug(self) -> bool: - """If True, debug tracebacks should be returned on errors.""" - return self.SC_BOOT_MODE in [ - BootModeEnum.DEBUG, - BootModeEnum.DEVELOPMENT, - BootModeEnum.LOCAL, - ] + DATCORE_ADAPTER_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + DATCORE_ADAPTER_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] - @validator("LOG_LEVEL", pre=True) + @field_validator("LOG_LEVEL", mode="before") @classmethod - def _validate_loglevel(cls, value) -> str: - return cast(str, cls.validate_log_level(value)) + def _validate_loglevel(cls, value: str) -> str: + return cls.validate_log_level(value) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/errors/__init__.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/errors/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/errors/handlers.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/errors/handlers.py new file mode 100644 index 00000000000..90561e459f6 --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/errors/handlers.py @@ -0,0 +1,32 @@ +from botocore.exceptions import ClientError +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from servicelib.fastapi.http_error import set_app_default_http_error_handlers +from starlette.requests import Request +from starlette.responses import JSONResponse +from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_500_INTERNAL_SERVER_ERROR + + +async def botocore_exceptions_handler( + _: Request, + exc: Exception, +) -> JSONResponse: + assert isinstance(exc, ClientError) # nosec + assert "Error" in exc.response # nosec + assert "Code" in exc.response["Error"] # nosec + error_content = {"errors": [f"{exc}"]} + if exc.response["Error"]["Code"] == "NotAuthorizedException": + return JSONResponse( + content=jsonable_encoder({"error": error_content}), + status_code=HTTP_401_UNAUTHORIZED, + ) + return JSONResponse( + content=jsonable_encoder({"error": error_content}), + status_code=HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +def set_exception_handlers(app: FastAPI) -> None: + set_app_default_http_error_handlers(app) + + app.add_exception_handler(ClientError, botocore_exceptions_handler) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/main.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/main.py index d163148a200..7bd6a787163 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/main.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/main.py @@ -1,7 +1,22 @@ -"""Main application to be deployed in for example uvicorn -""" +"""Main application to be deployed in for example uvicorn""" + +import logging + from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers from simcore_service_datcore_adapter.core.application import create_app +from simcore_service_datcore_adapter.core.settings import ApplicationSettings + +_the_settings = ApplicationSettings.create_from_envs() + +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=_the_settings.log_level) # NOSONAR +logging.root.setLevel(_the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=_the_settings.DATCORE_ADAPTER_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=_the_settings.DATCORE_ADAPTER_LOG_FILTER_MAPPING, + tracing_settings=_the_settings.DATCORE_ADAPTER_TRACING, +) # SINGLETON FastAPI app -the_app: FastAPI = create_app() +the_app: FastAPI = create_app(_the_settings) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/meta.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/meta.py deleted file mode 100644 index 06a92e0eaab..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/meta.py +++ /dev/null @@ -1,29 +0,0 @@ -""" Application's metadata - -""" -from contextlib import suppress - -import pkg_resources - -current_distribution = pkg_resources.get_distribution("simcore_service_datcore_adapter") - -__version__ = current_distribution.version -api_version: str = __version__ -major, minor, patch = __version__.split(".") -api_vtag: str = f"v{major}" - -project_name: str = current_distribution.project_name - - -def get_summary() -> str: - with suppress(Exception): - try: - metadata = current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = current_distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" - - -summary: str = get_summary() diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/datasets.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/datasets.py deleted file mode 100644 index e91d632d30d..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/datasets.py +++ /dev/null @@ -1,6 +0,0 @@ -from ..schemas.datasets import DatasetMetaData, FileMetaData - - -DatasetsOut = DatasetMetaData - -FileMetaDataOut = FileMetaData diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/files.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/files.py deleted file mode 100644 index a125faaa5fd..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/files.py +++ /dev/null @@ -1,5 +0,0 @@ -from pydantic import AnyUrl, BaseModel - - -class FileDownloadOut(BaseModel): - link: AnyUrl diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/files.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/files.py new file mode 100644 index 00000000000..8275315b42b --- /dev/null +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/files.py @@ -0,0 +1,36 @@ +import datetime +from pathlib import Path +from typing import Annotated + +from models_library.api_schemas_datcore_adapter.datasets import PackageMetaData +from pydantic import AnyUrl, BaseModel, ByteSize, Field + + +class FileDownloadOut(BaseModel): + link: AnyUrl + + +class DatCorePackageMetaData(BaseModel): + id: int + path: Path + display_path: Path + package_id: Annotated[str, Field(alias="packageId")] + name: str + filename: str + s3_bucket: Annotated[str, Field(alias="s3bucket")] + size: ByteSize + created_at: Annotated[datetime.datetime, Field(alias="createdAt")] + updated_at: Annotated[datetime.datetime, Field(alias="updatedAt")] + + def to_api_model(self) -> PackageMetaData: + return PackageMetaData( + path=self.path, + display_path=self.display_path, + package_id=self.package_id, + name=self.name, + filename=self.filename, + s3_bucket=self.s3_bucket, + size=self.size, + created_at=self.created_at, + updated_at=self.updated_at, + ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/schemas/datasets.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/schemas/datasets.py deleted file mode 100644 index 4d5190c5512..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/schemas/datasets.py +++ /dev/null @@ -1,58 +0,0 @@ -from datetime import datetime -from enum import Enum, unique -from pathlib import Path -from typing import Any - -from pydantic import BaseModel - - -class DatasetMetaData(BaseModel): - id: str - display_name: str - - -@unique -class DataType(str, Enum): - FILE = "FILE" - FOLDER = "FOLDER" - - -class FileMetaData(BaseModel): - dataset_id: str - package_id: str - id: str - name: str - type: str - path: Path - size: int - created_at: datetime - last_modified_at: datetime - data_type: DataType - - @classmethod - def from_pennsieve_package( - cls, package: dict[str, Any], files: list[dict[str, Any]], base_path: Path - ): - """creates a FileMetaData from a pennsieve data structure.""" - pck_name: str = package["content"]["name"] - if "extension" in package and not pck_name.endswith(package["extension"]): - pck_name += ".".join((pck_name, package["extension"])) - - file_size = 0 - if package["content"]["packageType"] != "Collection" and files: - file_size = files[0]["content"]["size"] - - return cls( - dataset_id=package["content"]["datasetNodeId"], - package_id=package["content"]["nodeId"], - id=package["content"]["id"], - name=pck_name, - path=base_path / pck_name, - type=package["content"]["packageType"], - size=file_size, - created_at=package["content"]["createdAt"], - last_modified_at=package["content"]["updatedAt"], - data_type=DataType.FOLDER - if package["content"]["packageType"] == "Collection" - else DataType.FILE, - ) diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/user.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/models/user.py similarity index 100% rename from services/datcore-adapter/src/simcore_service_datcore_adapter/models/domains/user.py rename to services/datcore-adapter/src/simcore_service_datcore_adapter/models/user.py diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/pennsieve.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/pennsieve.py index 6b2b7192025..79148b72f7c 100644 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/pennsieve.py +++ b/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/pennsieve.py @@ -4,11 +4,17 @@ from dataclasses import dataclass from itertools import islice from pathlib import Path -from typing import Any, Final, Optional, TypedDict, cast +from typing import Any, Final, TypedDict, cast import boto3 -from aiocache import SimpleMemoryCache +from aiocache import SimpleMemoryCache # type: ignore[import-untyped] from fastapi.applications import FastAPI +from models_library.api_schemas_datcore_adapter.datasets import ( + DatasetMetaData, + DataType, + FileMetaData, +) +from pydantic import ByteSize from servicelib.logging_utils import log_context from servicelib.utils import logged_gather from starlette import status @@ -19,8 +25,8 @@ from tenacity.stop import stop_after_attempt from ..core.settings import PennsieveSettings -from ..models.domains.user import Profile -from ..models.schemas.datasets import DatasetMetaData, FileMetaData +from ..models.files import DatCorePackageMetaData +from ..models.user import Profile from ..utils.client_base import BaseServiceClientApi, setup_client_instance logger = logging.getLogger(__name__) @@ -29,6 +35,36 @@ _GATHER_MAX_CONCURRENCY = 10 +def _to_file_meta_data( + package: dict[str, Any], files: list[DatCorePackageMetaData], base_path: Path +) -> FileMetaData: + """creates a FileMetaData from a pennsieve data structure.""" + pck_name: str = package["content"]["name"] + if "extension" in package and not pck_name.endswith(package["extension"]): + pck_name += ".".join((pck_name, package["extension"])) + + file_size = 0 + if package["content"]["packageType"] != "Collection" and files: + file_size = files[0].size + + return FileMetaData( + dataset_id=package["content"]["datasetNodeId"], + package_id=package["content"]["nodeId"], + id=f"{package['content']['id']}", + name=pck_name, + path=base_path / pck_name, + type=package["content"]["packageType"], + size=file_size, + created_at=package["content"]["createdAt"], + last_modified_at=package["content"]["updatedAt"], + data_type=( + DataType.FOLDER + if package["content"]["packageType"] == "Collection" + else DataType.FILE + ), + ) + + def _compute_file_path( all_packages: dict[str, dict[str, Any]], pck: dict[str, Any] ) -> Path: @@ -46,9 +82,9 @@ class PennsieveAuthorizationHeaders(TypedDict): Authorization: str -_TTL_CACHE_AUTHORIZATION_HEADERS_SECONDS: Final[ - int -] = 3530 # NOTE: observed while developing this code, pennsieve authorizes 3600 seconds, so we cache a bit less +_TTL_CACHE_AUTHORIZATION_HEADERS_SECONDS: Final[int] = ( + 3530 # NOTE: observed while developing this code, pennsieve authorizes 3600 seconds, so we cache a bit less +) ExpirationTimeSecs = int @@ -128,8 +164,8 @@ async def _request( api_secret: str, method: str, path: str, - params: Optional[dict[str, Any]] = None, - json: Optional[dict[str, Any]] = None, + params: dict[str, Any] | None = None, + json: dict[str, Any] | None = None, ) -> Any: response = await self.client.request( method, @@ -181,7 +217,7 @@ async def _get_dataset_packages( page_size: int, cursor: str, ) -> dict[str, Any]: - return cast( + packages = cast( dict[str, Any], await self._request( api_key, @@ -195,6 +231,10 @@ async def _get_dataset_packages( }, ), ) + packages["packages"] = [ + f for f in packages["packages"] if f["content"]["state"] != "DELETED" + ] + return packages async def _get_package( self, api_key: str, api_secret: str, package_id: str @@ -210,28 +250,67 @@ async def _get_package( ), ) - async def _get_package_files( - self, api_key: str, api_secret: str, package_id: str, limit: int, offset: int - ) -> list[dict[str, Any]]: - return cast( - list[dict[str, Any]], - await self._request( - api_key, - api_secret, - "GET", - f"/packages/{package_id}/files", - params={"limit": limit, "offset": offset}, - ), + async def get_package_files( + self, + *, + api_key: str, + api_secret: str, + package_id: str, + limit: int, + offset: int, + fill_path: bool, + ) -> list[DatCorePackageMetaData]: + raw_data = await self._request( + api_key, + api_secret, + "GET", + f"/packages/{package_id}/files", + params={"limit": limit, "offset": offset}, ) + path = display_path = Path() + if fill_path: + package_info = await self._get_package(api_key, api_secret, package_id) + dataset_id = package_info["content"]["datasetId"] + dataset = await self._get_dataset(api_key, api_secret, dataset_id) + + path = ( + Path(dataset_id) + / Path( + "/".join( + ancestor["content"]["id"] + for ancestor in package_info.get("ancestors", []) + ) + ) + / Path(package_info["content"]["name"]) + ) + display_path = ( + Path(dataset["content"]["name"]) + / Path( + "/".join( + ancestor["content"]["name"] + for ancestor in package_info.get("ancestors", []) + ) + ) + / Path(package_info["content"]["name"]) + ) + + return [ + DatCorePackageMetaData(**_["content"], path=path, display_path=display_path) + for _ in raw_data + ] async def _get_pck_id_files( self, api_key: str, api_secret: str, pck_id: str, pck: dict[str, Any] - ) -> tuple[str, list[dict[str, Any]]]: - + ) -> tuple[str, list[DatCorePackageMetaData]]: return ( pck_id, - await self._get_package_files( - api_key, api_secret, pck["content"]["nodeId"], limit=1, offset=0 + await self.get_package_files( + api_key=api_key, + api_secret=api_secret, + package_id=pck["content"]["nodeId"], + limit=1, + offset=0, + fill_path=False, ), ) @@ -268,12 +347,29 @@ async def list_datasets( DatasetMetaData( id=d["content"]["id"], display_name=d["content"]["name"], + size=( + ByteSize(sz) + if (sz := d.get("storage", 0)) > 0 # NOSONAR + else None + ), ) for d in dataset_page["datasets"] ], dataset_page["totalCount"], ) + async def get_dataset( + self, api_key: str, api_secret: str, dataset_id: str + ) -> DatasetMetaData: + dataset_pck = await self._get_dataset(api_key, api_secret, dataset_id) + return DatasetMetaData( + id=dataset_pck["content"]["id"], + display_name=dataset_pck["content"]["name"], + size=( + ByteSize(dataset_pck["storage"]) if dataset_pck["storage"] > 0 else None + ), + ) + async def list_packages_in_dataset( self, api_key: str, @@ -289,7 +385,7 @@ async def list_packages_in_dataset( for pck in islice(dataset_pck["children"], offset, offset + limit) if pck["content"]["packageType"] != "Collection" ] - package_files = dict( + package_files: dict[str, list[DatCorePackageMetaData]] = dict( await logged_gather( *package_files_tasks, log=logger, @@ -298,11 +394,13 @@ async def list_packages_in_dataset( ) return ( [ - FileMetaData.from_pennsieve_package( + _to_file_meta_data( pck, - package_files[pck["content"]["id"]] - if pck["content"]["packageType"] != "Collection" - else [], + ( + package_files[pck["content"]["id"]] + if pck["content"]["packageType"] != "Collection" + else [] + ), base_path=Path(dataset_pck["content"]["name"]), ) for pck in islice(dataset_pck["children"], offset, offset + limit) @@ -347,11 +445,13 @@ async def list_packages_in_collection( return ( [ - FileMetaData.from_pennsieve_package( + _to_file_meta_data( pck, - package_files[pck["content"]["id"]] - if pck["content"]["packageType"] != "Collection" - else [], + ( + package_files[pck["content"]["id"]] + if pck["content"]["packageType"] != "Collection" + else [] + ), base_path=base_path, ) for pck in islice(collection_pck["children"], offset, offset + limit) @@ -380,7 +480,8 @@ async def list_all_dataset_files( while resp := await self._get_dataset_packages( api_key, api_secret, dataset_id, PAGE_SIZE, cursor ): - cursor = resp.get("cursor") + cursor = resp.get("cursor") # type: ignore[assignment] + assert isinstance(cursor, str | None) # nosec all_packages.update( {p["content"]["id"]: p for p in resp.get("packages", [])} ) @@ -424,7 +525,7 @@ async def list_all_dataset_files( file_path = base_path / _compute_file_path(all_packages, package) file_meta_data.append( - FileMetaData.from_pennsieve_package( + _to_file_meta_data( package, package_files[package_id], file_path.parent ) ) @@ -435,12 +536,17 @@ async def get_presigned_download_link( self, api_key: str, api_secret: str, package_id: str ) -> URL: """returns the presigned download link of the first file in the package""" - files = await self._get_package_files( - api_key, api_secret, package_id, limit=1, offset=0 + files = await self.get_package_files( + api_key=api_key, + api_secret=api_secret, + package_id=package_id, + limit=1, + offset=0, + fill_path=False, ) # NOTE: this was done like this in the original dsm. we might encounter a problem when there are more than one files assert len(files) == 1 # nosec - file_id = files[0]["content"]["id"] + file_id = files[0].id file_link = cast( dict[str, Any], await self._request( diff --git a/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/remote_debug.py b/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/remote_debug.py deleted file mode 100644 index afe86012143..00000000000 --- a/services/datcore-adapter/src/simcore_service_datcore_adapter/modules/remote_debug.py +++ /dev/null @@ -1,41 +0,0 @@ -""" Setup remote debugger with Python Tools for Visual Studio (PTVSD) - -""" - -import logging -import os - -logger = logging.getLogger(__name__) - -REMOTE_DEBUG_PORT = 3000 - - -def setup_remote_debugging(force_enabled=False, *, boot_mode=None): - """ - Programaticaly enables remote debugging if SC_BOOT_MODE==debug-ptvsd - """ - boot_mode = boot_mode or os.environ.get("SC_BOOT_MODE") - if boot_mode == "debug-ptvsd" or force_enabled: - try: - logger.debug("Enabling attach ptvsd ...") - # - # SEE https://github.com/microsoft/ptvsd#enabling-debugging - # - import ptvsd - - ptvsd.enable_attach( - address=("0.0.0.0", REMOTE_DEBUG_PORT), # nosec - ) # nosec - except ImportError as err: - raise ValueError( - "Cannot enable remote debugging. Please install ptvsd first" - ) from err - - logger.info("Remote debugging enabled: listening port %s", REMOTE_DEBUG_PORT) - else: - logger.debug( - "Booting without remote debugging since SC_BOOT_MODE=%s", boot_mode - ) - - -__all__ = ("setup_remote_debugging",) diff --git a/services/datcore-adapter/tests/unit/conftest.py b/services/datcore-adapter/tests/unit/conftest.py index 2255753fa2a..6090efe85ae 100644 --- a/services/datcore-adapter/tests/unit/conftest.py +++ b/services/datcore-adapter/tests/unit/conftest.py @@ -3,8 +3,9 @@ # pylint:disable=redefined-outer-name import json +from collections.abc import AsyncIterator, Callable from pathlib import Path -from typing import Any, AsyncIterator, Callable, Optional +from typing import Any from uuid import uuid4 import faker @@ -14,8 +15,9 @@ import simcore_service_datcore_adapter from asgi_lifespan import LifespanManager from fastapi.applications import FastAPI -from pytest import MonkeyPatch +from models_library.utils.fastapi_encoders import jsonable_encoder from pytest_mock import MockFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict from simcore_service_datcore_adapter.modules.pennsieve import ( PennsieveAuthorizationHeaders, ) @@ -23,6 +25,8 @@ from starlette.testclient import TestClient pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.environment_configs", "pytest_simcore.repository_paths", "pytest_simcore.pytest_global_environs", ] @@ -62,7 +66,9 @@ def pennsieve_mock_dataset_packages(mocks_dir: Path) -> dict[str, Any]: @pytest.fixture() -def minimal_app() -> FastAPI: +def minimal_app( + app_environment: None, +) -> FastAPI: from simcore_service_datcore_adapter.main import the_app return the_app @@ -75,24 +81,30 @@ def client(minimal_app: FastAPI) -> TestClient: @pytest.fixture -def app_envs(monkeypatch: MonkeyPatch): - # disable tracing as together with LifespanManager, it does not remove itself nicely - monkeypatch.setenv("DATCORE_ADAPTER_TRACING", "null") +def app_environment( + mock_env_devel_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **mock_env_devel_environment, + "DATCORE_ADAPTER_TRACING": "null", + }, + ) @pytest.fixture() async def initialized_app( - app_envs: None, minimal_app: FastAPI + app_environment: None, minimal_app: FastAPI ) -> AsyncIterator[FastAPI]: async with LifespanManager(minimal_app): yield minimal_app -@pytest.fixture(scope="function") +@pytest.fixture async def async_client(initialized_app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: - async with httpx.AsyncClient( - app=initialized_app, + transport=httpx.ASGITransport(app=initialized_app), base_url="http://datcore-adapter.testserver.io", headers={"Content-Type": "application/json"}, ) as client: @@ -217,18 +229,23 @@ def pennsieve_api_headers( def pennsieve_random_fake_datasets( create_pennsieve_fake_dataset_id: Callable, ) -> dict[str, Any]: - datasets = { + return { "datasets": [ - {"content": {"id": create_pennsieve_fake_dataset_id(), "name": fake.text()}} + { + "content": { + "id": create_pennsieve_fake_dataset_id(), + "name": fake.text(), + }, + "storage": fake.pyint(), + } for _ in range(10) ], "totalCount": 20, } - return datasets @pytest.fixture -def disable_aiocache(monkeypatch: MonkeyPatch): +def disable_aiocache(monkeypatch: pytest.MonkeyPatch): monkeypatch.setenv("AIOCACHE_DISABLE", "1") @@ -262,7 +279,7 @@ async def pennsieve_subsystem_mock( pennsieve_collection_id: str, pennsieve_file_id: str, faker: faker.Faker, -) -> AsyncIterator[Optional[respx.MockRouter]]: +) -> AsyncIterator[respx.MockRouter | None]: if use_real_pennsieve_interface: yield else: @@ -297,7 +314,11 @@ async def pennsieve_subsystem_mock( ).respond( status.HTTP_200_OK, json={ - "content": {"name": "Some dataset name that is awesome"}, + "content": { + "name": "Some dataset name that is awesome", + "id": pennsieve_dataset_id, + }, + "storage": fake.pyint(), "children": pennsieve_mock_dataset_packages["packages"], }, ) @@ -308,15 +329,40 @@ async def pennsieve_subsystem_mock( # get collection packages mock.get( - f"https://api.pennsieve.io/packages/{pennsieve_collection_id}" + rf"https://api.pennsieve.io/packages/{pennsieve_collection_id}" ).respond( status.HTTP_200_OK, json={ "content": {"name": "this package name is also awesome"}, "children": pennsieve_mock_dataset_packages["packages"], "ancestors": [ - {"content": {"name": "Bigger guy"}}, - {"content": {"name": "Big guy"}}, + { + "content": { + "name": "Bigger guy", + } + }, + { + "content": { + "name": "Big guy", + } + }, + ], + }, + ) + # get package ancestry + mock.get( + url__regex=rf"https://api.pennsieve.io/packages/{pennsieve_file_id}\?includeAncestors=(?P.+)$" + ).respond( + status.HTTP_200_OK, + json={ + "content": { + "datasetId": pennsieve_dataset_id, + "name": pennsieve_file_id, + }, + "ancestors": [ + {"content": {"id": faker.pystr(), "name": faker.name()}}, + {"content": {"id": faker.pystr(), "name": faker.name()}}, + {"content": {"id": faker.pystr(), "name": faker.name()}}, ], }, ) @@ -325,7 +371,22 @@ async def pennsieve_subsystem_mock( url__regex=r"https://api.pennsieve.io/packages/.+/files\?limit=1&offset=0$" ).respond( status.HTTP_200_OK, - json=[{"content": {"size": 12345, "id": "fake_file_id"}}], + json=[ + jsonable_encoder( + { + "content": { + "size": 12345, + "id": faker.pyint(), + "packageId": "N:package:475beff2-03c8-4dca-a221-d1d02e17f064", + "name": faker.file_name(), + "filename": faker.file_name(), + "s3bucket": faker.pystr(), + "createdAt": faker.date_time(), + "updatedAt": faker.date_time(), + } + } + ) + ], ) # download file diff --git a/services/datcore-adapter/tests/unit/test_cli.py b/services/datcore-adapter/tests/unit/test_cli.py new file mode 100644 index 00000000000..ef7b2b8a4f6 --- /dev/null +++ b/services/datcore-adapter/tests/unit/test_cli.py @@ -0,0 +1,35 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import os + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_datcore_adapter._meta import API_VERSION +from simcore_service_datcore_adapter.cli import main +from simcore_service_datcore_adapter.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def test_cli_help_and_version(cli_runner: CliRunner): + result = cli_runner.invoke(main, "--help") + assert result.exit_code == os.EX_OK, result.output + + result = cli_runner.invoke(main, "--version") + assert result.exit_code == os.EX_OK, result.output + assert result.stdout.strip() == API_VERSION + + +def test_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK + + print(result.output) + settings = ApplicationSettings(result.output) + assert settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() + + +def test_run(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["run"]) + assert result.exit_code == 0 + assert "disabled" in result.stdout diff --git a/services/datcore-adapter/tests/unit/test_core_settings.py b/services/datcore-adapter/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..6ab82562ad2 --- /dev/null +++ b/services/datcore-adapter/tests/unit/test_core_settings.py @@ -0,0 +1,42 @@ +# pylint: disable=unused-variable +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name + + +import pytest +from pytest_simcore.helpers.monkeypatch_envs import ( + EnvVarsDict, + delenvs_from_dict, + setenvs_from_dict, +) +from simcore_service_datcore_adapter.core.settings import ApplicationSettings + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + external_envfile_dict: EnvVarsDict, +) -> EnvVarsDict: + """ + NOTE: To run against repo.config in osparc-config repo + + ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + pytest --external-envfile=.secrets tests/unit/test_core_settings.py + + """ + if external_envfile_dict: + delenvs_from_dict(monkeypatch, app_environment, raising=False) + return setenvs_from_dict( + monkeypatch, + {**external_envfile_dict}, + ) + return app_environment + + +def test_unit_app_environment(app_environment: EnvVarsDict): + assert app_environment + settings = ApplicationSettings.create_from_envs() + print("captured settings: \n", settings.model_dump_json(indent=2)) + + assert settings.PENNSIEVE diff --git a/services/datcore-adapter/tests/unit/test_exceptions_handlers.py b/services/datcore-adapter/tests/unit/test_exceptions_handlers.py new file mode 100644 index 00000000000..53a28bb736c --- /dev/null +++ b/services/datcore-adapter/tests/unit/test_exceptions_handlers.py @@ -0,0 +1,162 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator + +import httpx +import pytest +from botocore.exceptions import ClientError +from fastapi import FastAPI, HTTPException, status +from fastapi.exceptions import RequestValidationError +from httpx import AsyncClient +from pydantic import ValidationError +from pytest_simcore.helpers.httpx_assert_checks import assert_status +from simcore_service_datcore_adapter.errors.handlers import set_exception_handlers + + +@pytest.fixture +def initialized_app() -> FastAPI: + app = FastAPI() + set_exception_handlers(app) + return app + + +@pytest.fixture +async def client(initialized_app: FastAPI) -> AsyncIterator[AsyncClient]: + async with AsyncClient( + transport=httpx.ASGITransport(app=initialized_app), + base_url="http://test", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.mark.parametrize( + "exception, status_code", + [ + ( + ClientError( + { + "Status": "pytest status", + "StatusReason": "pytest", + "Error": { + "Code": "NotAuthorizedException", + "Message": "pytest message", + }, + }, + operation_name="pytest operation", + ), + status.HTTP_401_UNAUTHORIZED, + ), + ( + ClientError( + { + "Status": "pytest status", + "StatusReason": "pytest", + "Error": { + "Code": "Whatever", + "Message": "pytest message", + }, + }, + operation_name="pytest operation", + ), + status.HTTP_500_INTERNAL_SERVER_ERROR, + ), + ( + NotImplementedError("pytest not implemented error"), + status.HTTP_501_NOT_IMPLEMENTED, + ), + ], + ids=str, +) +async def test_exception_handlers( + initialized_app: FastAPI, + client: AsyncClient, + exception: Exception, + status_code: int, +): + @initialized_app.get("/test") + async def test_endpoint(): + raise exception + + response = await client.get("/test") + assert_status( + response, + status_code, + None, + expected_msg=f"{exception}".replace("(", "\\(").replace(")", "\\)"), + ) + + +async def test_generic_http_exception_handler( + initialized_app: FastAPI, client: AsyncClient +): + @initialized_app.get("/test") + async def test_endpoint(): + raise HTTPException(status_code=status.HTTP_410_GONE) + + response = await client.get("/test") + assert_status(response, status.HTTP_410_GONE, None, expected_msg="Gone") + + +async def test_request_validation_error_handler( + initialized_app: FastAPI, client: AsyncClient +): + _error_msg = "pytest request validation error" + + @initialized_app.get("/test") + async def test_endpoint(): + raise RequestValidationError(errors=[_error_msg]) + + response = await client.get("/test") + assert_status( + response, + status.HTTP_422_UNPROCESSABLE_ENTITY, + None, + expected_msg=_error_msg, + ) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + + +async def test_validation_error_handler(initialized_app: FastAPI, client: AsyncClient): + _error_msg = "pytest request validation error" + + @initialized_app.get("/test") + async def test_endpoint(): + raise ValidationError.from_exception_data( + _error_msg, + line_errors=[], + ) + + response = await client.get("/test") + assert_status( + response, + status.HTTP_500_INTERNAL_SERVER_ERROR, + None, + expected_msg=f"0 validation errors for {_error_msg}", + ) + + +@pytest.mark.xfail( + reason="Generic exception handler is not working as expected as shown in https://github.com/ITISFoundation/osparc-simcore/blob/5732a12e07e63d5ce55010ede9b9ab543bb9b278/packages/service-library/tests/fastapi/test_exceptions_utils.py" +) +async def test_generic_exception_handler(initialized_app: FastAPI, client: AsyncClient): + _error_msg = "Generic pytest exception" + + @initialized_app.get("/test") + async def test_endpoint(): + raise Exception( # pylint: disable=broad-exception-raised # noqa: TRY002 + _error_msg + ) + + response = await client.get("/test") + assert_status( + response, + status.HTTP_500_INTERNAL_SERVER_ERROR, + None, + expected_msg=_error_msg, + ) diff --git a/services/datcore-adapter/tests/unit/test_route_datasets.py b/services/datcore-adapter/tests/unit/test_route_datasets.py index 2c9c98b20f4..afa4c7e1769 100644 --- a/services/datcore-adapter/tests/unit/test_route_datasets.py +++ b/services/datcore-adapter/tests/unit/test_route_datasets.py @@ -3,22 +3,37 @@ # pylint:disable=redefined-outer-name -from typing import Optional - import httpx import respx -from fastapi_pagination import Page -from pydantic import parse_obj_as -from simcore_service_datcore_adapter.models.schemas.datasets import ( +from fastapi_pagination import LimitOffsetPage +from models_library.api_schemas_datcore_adapter.datasets import ( DatasetMetaData, FileMetaData, ) +from pydantic import TypeAdapter from starlette import status +async def test_get_dataset_entrypoint( + async_client: httpx.AsyncClient, + pennsieve_dataset_id: str, + pennsieve_subsystem_mock: respx.MockRouter | None, + pennsieve_api_headers: dict[str, str], +): + response = await async_client.get( + f"v0/datasets/{pennsieve_dataset_id}", + headers=pennsieve_api_headers, + ) + + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + TypeAdapter(DatasetMetaData).validate_python(data) + + async def test_list_datasets_entrypoint( async_client: httpx.AsyncClient, - pennsieve_subsystem_mock: Optional[respx.MockRouter], + pennsieve_subsystem_mock: respx.MockRouter | None, pennsieve_api_headers: dict[str, str], ): response = await async_client.get( @@ -29,7 +44,7 @@ async def test_list_datasets_entrypoint( assert response.status_code == status.HTTP_200_OK data = response.json() assert data - parse_obj_as(Page[DatasetMetaData], data) + TypeAdapter(LimitOffsetPage[DatasetMetaData]).validate_python(data) async def test_list_dataset_files_legacy_entrypoint( @@ -47,7 +62,7 @@ async def test_list_dataset_files_legacy_entrypoint( assert response.status_code == status.HTTP_200_OK data = response.json() assert data - parse_obj_as(list[FileMetaData], data) + TypeAdapter(list[FileMetaData]).validate_python(data) async def test_list_dataset_top_level_files_entrypoint( @@ -65,7 +80,7 @@ async def test_list_dataset_top_level_files_entrypoint( assert response.status_code == status.HTTP_200_OK data = response.json() assert data - parse_obj_as(Page[FileMetaData], data) + TypeAdapter(LimitOffsetPage[FileMetaData]).validate_python(data) async def test_list_dataset_collection_files_entrypoint( @@ -85,4 +100,4 @@ async def test_list_dataset_collection_files_entrypoint( assert response.status_code == status.HTTP_200_OK data = response.json() assert data - parse_obj_as(Page[FileMetaData], data) + TypeAdapter(LimitOffsetPage[FileMetaData]).validate_python(data) diff --git a/services/datcore-adapter/tests/unit/test_route_files.py b/services/datcore-adapter/tests/unit/test_route_files.py index e32bb15edd8..1a083d71daa 100644 --- a/services/datcore-adapter/tests/unit/test_route_files.py +++ b/services/datcore-adapter/tests/unit/test_route_files.py @@ -5,8 +5,8 @@ from unittest.mock import Mock import httpx -from pydantic import parse_obj_as -from simcore_service_datcore_adapter.models.domains.files import FileDownloadOut +from pydantic import TypeAdapter +from simcore_service_datcore_adapter.models.files import FileDownloadOut from starlette import status @@ -23,7 +23,7 @@ async def test_download_file_entrypoint( assert response.status_code == status.HTTP_200_OK data = response.json() assert data - parse_obj_as(FileDownloadOut, data) + TypeAdapter(FileDownloadOut).validate_python(data) async def test_delete_file_entrypoint( @@ -38,3 +38,20 @@ async def test_delete_file_entrypoint( ) assert response.status_code == status.HTTP_204_NO_CONTENT assert response.num_bytes_downloaded == 0 + + +async def test_package_file_entrypoint( + async_client: httpx.AsyncClient, + pennsieve_subsystem_mock: Mock, + pennsieve_api_headers: dict[str, str], + pennsieve_file_id: str, +): + response = await async_client.get( + f"v0/packages/{pennsieve_file_id}/files", + headers=pennsieve_api_headers, + params={"limit": 1, "offset": 0}, + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + assert len(data) == 1 diff --git a/services/datcore-adapter/tests/unit/test_route_health.py b/services/datcore-adapter/tests/unit/test_route_health.py index 7ab697612c4..65f04aece0c 100644 --- a/services/datcore-adapter/tests/unit/test_route_health.py +++ b/services/datcore-adapter/tests/unit/test_route_health.py @@ -17,8 +17,7 @@ async def test_live_entrypoint(async_client: httpx.AsyncClient): assert response.text assert datetime.fromisoformat(response.text.split("@")[1]) assert ( - response.text.split("@")[0] - == "simcore_service_datcore_adapter.api.routes.health" + response.text.split("@")[0] == "simcore_service_datcore_adapter.api.rest.health" ) @@ -31,7 +30,7 @@ async def test_check_subsystem_health(async_client: httpx.AsyncClient): assert pennsieve_health_route.called assert response.status_code == status.HTTP_200_OK - app_status = AppStatusCheck.parse_obj(response.json()) + app_status = AppStatusCheck.model_validate(response.json()) assert app_status assert app_status.app_name == "simcore-service-datcore-adapter" assert app_status.services == {"pennsieve": True} @@ -43,7 +42,7 @@ async def test_check_subsystem_health(async_client: httpx.AsyncClient): assert pennsieve_health_route.called assert response.status_code == status.HTTP_200_OK - app_status = AppStatusCheck.parse_obj(response.json()) + app_status = AppStatusCheck.model_validate(response.json()) assert app_status assert app_status.app_name == "simcore-service-datcore-adapter" assert app_status.services == {"pennsieve": False} diff --git a/services/director-v2/.env-devel b/services/director-v2/.env-devel index ef0c1bc55ae..33425caf303 100644 --- a/services/director-v2/.env-devel +++ b/services/director-v2/.env-devel @@ -6,13 +6,16 @@ # # Variables directly define inside Dockerfile -SC_BOOT_MODE=debug-ptvsd +SC_BOOT_MODE=debug # Variables typically passed upon start via services/docker-compose.yml files -EXTRA_HOSTS_SUFFIX=undefined + +COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL=tcp://dask-scheduler:8786 +COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH='{}' DYNAMIC_SIDECAR_IMAGE=local/dynamic-sidecar:development +DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS={} # old director DIRECTOR_HOST=director @@ -22,9 +25,11 @@ DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID=1234 DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME=1234 DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME=filename +DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS='{}' + LOG_LEVEL=DEBUG -POSTGRES_ENDPOINT=${POSTGRES_ENDPOINT} +POSTGRES_ENDPOINT=postgres:5432 POSTGRES_USER=test POSTGRES_PASSWORD=test POSTGRES_DB=test @@ -33,6 +38,7 @@ POSTGRES_PORT=5432 RABBIT_HOST=rabbit RABBIT_USER=admin +RABBIT_SECURE=false RABBIT_PASSWORD=adminadmin REGISTRY_AUTH=True @@ -40,19 +46,22 @@ REGISTRY_PW=adminadmin REGISTRY_SSL=True REGISTRY_URL=registry.osparc-master.speag.com REGISTRY_USER=admin +DIRECTOR_V2_DOCKER_HUB_REGISTRY=null SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet SWARM_STACK_NAME=simcore # S3 configuration -S3_ENDPOINT=172.17.0.1:9001 +S3_ENDPOINT=http://172.17.0.1:9001 S3_ACCESS_KEY=12345678 +S3_REGION=us-east-1 S3_SECRET_KEY=12345678 S3_BUCKET_NAME=simcore -S3_SECURE=0 R_CLONE_PROVIDER=MINIO +R_CLONE_OPTION_TRANSFERS=5 +R_CLONE_OPTION_RETRIES=3 +R_CLONE_OPTION_BUFFER_SIZE=16M -TRACING_ENABLED=True -TRACING_ZIPKIN_ENDPOINT=http://jaeger:9411 +TRACING_OBSERVABILITY_BACKEND_ENDPOINT=http://jaeger:9411 TRAEFIK_SIMCORE_ZONE=internal_simcore_stack diff --git a/services/director-v2/Dockerfile b/services/director-v2/Dockerfile index 14f1bc683a6..858cab03760 100644 --- a/services/director-v2/Dockerfile +++ b/services/director-v2/Dockerfile @@ -1,5 +1,18 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: @@ -11,12 +24,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=pcrespov -RUN set -eux && \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ apt-get update && \ - apt-get install -y --no-install-recommends gosu && \ - rm -rf /var/lib/apt/lists/* && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -52,32 +71,34 @@ EXPOSE 3000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/director-v2/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + # --------------------------Prod-depends-only stage ------------------- @@ -86,17 +107,19 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/director-v2 [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/director-v2 /build/services/director-v2 +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/director-v2 -RUN pip3 --no-cache-dir install -r requirements/prod.txt \ - && pip3 --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/director-v2,target=/build/services/director-v2,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -106,15 +129,19 @@ RUN pip3 --no-cache-dir install -r requirements/prod.txt \ # + /home/scu $HOME = WORKDIR # + services/director-v2 [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -123,10 +150,13 @@ COPY --chown=scu:scu services/director-v2/docker services/director-v2/docker RUN chmod +x services/director-v2/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/director-v2/docker/healthcheck.py", "http://localhost:8000/"] ENTRYPOINT [ "/bin/sh", "services/director-v2/docker/entrypoint.sh" ] @@ -141,7 +171,7 @@ CMD ["/bin/sh", "services/director-v2/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_DEVEL_MOUNT=/devel/services/director-v2 diff --git a/services/director-v2/Makefile b/services/director-v2/Makefile index 01b22e17ba7..030084bcb4e 100644 --- a/services/director-v2/Makefile +++ b/services/director-v2/Makefile @@ -52,14 +52,14 @@ REGISTRY_NAMESPACE=$(subst http://,,${REGISTRY_URL}) up-extra: .env down # starts pg stack fixture # resolving $@ under environ $< - docker-compose -f docker-compose-extra.yml config + docker compose -f docker-compose-extra.yml config # starting database stack as defined in ... - docker-compose -f docker-compose-extra.yml up --detach + docker compose -f docker-compose-extra.yml up --detach down down-extra: ## stops extra stack # stopping extra services - -@docker-compose -f docker-compose-extra.yml down + -@docker compose -f docker-compose-extra.yml down diff --git a/services/director-v2/README.md b/services/director-v2/README.md index 1afd78f0356..29bd8832d3d 100644 --- a/services/director-v2/README.md +++ b/services/director-v2/README.md @@ -1,38 +1,8 @@ # director-v2 -[![image-size]](https://microbadger.com/images/itisfoundation/director-v2 "More on itisfoundation/director-v2.:staging-latest image") - -[![image-badge]](https://microbadger.com/images/itisfoundation/director-v2 "More on director-v2 image in registry") -[![image-version]](https://microbadger.com/images/itisfoundation/director-v2 "More on director-v2 image in registry") -[![image-commit]](https://microbadger.com/images/itisfoundation/director-v2 "More on director-v2 image in registry") - Director service in simcore stack - -[image-size]:https://img.shields.io/microbadger/image-size/itisfoundation/director-v2./staging-latest.svg?label=director-v2.&style=flat -[image-badge]:https://images.microbadger.com/badges/image/itisfoundation/director-v2.svg -[image-version]https://images.microbadger.com/badges/version/itisfoundation/director-v2.svg -[image-commit]:https://images.microbadger.com/badges/commit/itisfoundation/director-v2.svg - - ## Development -Setup environment - -```cmd -make devenv -source .venv/bin/activate -cd services/director-v2 -make install-dev -make info -``` - -Then - -```cmd -make run-devel -``` - -The latter will start the director-v2 service in development-mode together with a postgres db initialized with test data. Open the following sites and use the test credentials ``user=key, password=secret`` to manually test the API: - -- http://127.0.0.1:8000/dev/doc: swagger type of documentation +Since services are often heavily interconnected, it's best to build and run the entire osparc repo in the development mode. +Instruction can be found in the [development build](../../README.md#development-build) section of the main README. diff --git a/services/director-v2/VERSION b/services/director-v2/VERSION index ccbccc3dc62..276cbf9e285 100644 --- a/services/director-v2/VERSION +++ b/services/director-v2/VERSION @@ -1 +1 @@ -2.2.0 +2.3.0 diff --git a/services/director-v2/docker-compose-extra.yml b/services/director-v2/docker-compose-extra.yml index 835409c3775..5923b000384 100644 --- a/services/director-v2/docker-compose-extra.yml +++ b/services/director-v2/docker-compose-extra.yml @@ -1,4 +1,3 @@ -version: "3.8" services: postgres: image: postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce @@ -25,7 +24,7 @@ services: "log_line_prefix=[%p] [%a] [%c] [%x] " ] rabbit: - image: itisfoundation/rabbitmq:3.11.2-management + image: itisfoundation/rabbitmq:3.13.7-management init: true environment: - RABBITMQ_DEFAULT_USER=${RABBIT_USER} diff --git a/services/director-v2/docker/boot.sh b/services/director-v2/docker/boot.sh index 99b049bb4ad..1af7ab240de 100755 --- a/services/director-v2/docker/boot.sh +++ b/services/director-v2/docker/boot.sh @@ -23,11 +23,20 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/director-v2 || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/director-v2 + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # @@ -37,13 +46,12 @@ APP_LOG_LEVEL=${DIRECTOR_V2_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" - -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/director-v2/src/simcore_service_director_v2 && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${DIRECTOR_V2_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/director-v2/docker/entrypoint.sh b/services/director-v2/docker/entrypoint.sh index 5c7fa62cd17..42995c6ae78 100755 --- a/services/director-v2/docker/entrypoint.sh +++ b/services/director-v2/docker/entrypoint.sh @@ -27,7 +27,6 @@ echo "$INFO" "User : $(id scu)" echo "$INFO" "python : $(command -v python)" echo "$INFO" "pip : $(command -v pip)" - # # DEVELOPMENT MODE # - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT @@ -71,27 +70,20 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock -if stat $DOCKER_MOUNT > /dev/null 2>&1 -then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker - if ! addgroup --gid "$GROUPID" $GROUPNAME > /dev/null 2>&1 - then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" fi echo "$INFO Starting $* ..." diff --git a/services/director-v2/docker/healthcheck.py b/services/director-v2/docker/healthcheck.py old mode 100644 new mode 100755 index 8df0bcbd649..87f59876ed6 --- a/services/director-v2/docker/healthcheck.py +++ b/services/director-v2/docker/healthcheck.py @@ -6,15 +6,17 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: 1. why not to use curl instead of a python script? - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ """ + import os import sys from urllib.request import urlopen diff --git a/services/director-v2/openapi.json b/services/director-v2/openapi.json index 392b055bbf4..5f57b836564 100644 --- a/services/director-v2/openapi.json +++ b/services/director-v2/openapi.json @@ -1,9 +1,9 @@ { - "openapi": "3.0.2", + "openapi": "3.1.0", "info": { "title": "simcore-service-director-v2", - "description": " Orchestrates the pipeline of services defined by the user", - "version": "2.2.0" + "description": "Orchestrates the pipeline of services defined by the user", + "version": "2.3.0" }, "servers": [ { @@ -33,7 +33,9 @@ "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "$ref": "#/components/schemas/HealthCheckGet" + } } } } @@ -50,7 +52,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Meta" + "$ref": "#/components/schemas/BaseMeta" } } } @@ -58,86 +60,85 @@ } } }, - "/v0/services": { - "get": { + "/v2/computations": { + "post": { "tags": [ - "services" - ], - "summary": "List Services", - "description": "Lists services available in the deployed registry", - "operationId": "list_services_v0_services_get", - "parameters": [ - { - "description": "The service type:\n - computational - a computational service\n - interactive - an interactive service\n", - "required": false, - "schema": { - "allOf": [ - { - "$ref": "#/components/schemas/ServiceType" - } - ], - "description": "The service type:\n - computational - a computational service\n - interactive - an interactive service\n" - }, - "name": "service_type", - "in": "query" - } + "computations" ], + "summary": "Create Computation", + "description": "Create and optionally start a new computation", + "operationId": "create_computation_v2_computations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ComputationCreate" + } + } + }, + "required": true + }, "responses": { - "200": { + "201": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ServicesArrayEnveloped" + "$ref": "#/components/schemas/ComputationGet" } } } }, + "404": { + "description": "Project or pricing details not found" + }, + "406": { + "description": "Cluster not found" + }, + "503": { + "description": "Service not available" + }, "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } + "description": "Configuration error" + }, + "402": { + "description": "Payment required" + }, + "409": { + "description": "Project already started" } } } }, - "/v0/services/{service_key}/{service_version}/extras": { + "/v2/computations/{project_id}": { "get": { "tags": [ - "services" + "computations" ], - "summary": "Get Extra Service Versioned", - "description": "Returns the service extras", - "operationId": "get_extra_service_versioned_v0_services__service_key___service_version__extras_get", + "summary": "Get Computation", + "description": "Returns a computation pipeline state", + "operationId": "get_computation_v2_computations__project_id__get", "parameters": [ { - "description": "Distinctive name for the node based on the docker registry path", + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", "type": "string", - "description": "Distinctive name for the node based on the docker registry path" - }, - "name": "service_key", - "in": "path" + "format": "uuid", + "title": "Project Id" + } }, { - "description": "The tag/version of the service", + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "description": "The tag/version of the service" - }, - "name": "service_version", - "in": "path" + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + } } ], "responses": { @@ -146,7 +147,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ServiceExtrasEnveloped" + "$ref": "#/components/schemas/ComputationGet" } } } @@ -162,49 +163,90 @@ } } } - } - }, - "/v0/services/{service_key}/{service_version}": { - "get": { + }, + "delete": { "tags": [ - "services" + "computations" ], - "summary": "Get Service Versioned", - "description": "Returns details of the selected service if available in the platform", - "operationId": "get_service_versioned_v0_services__service_key___service_version__get", + "summary": "Delete Computation", + "description": "Deletes a computation pipeline", + "operationId": "delete_computation_v2_computations__project_id__delete", "parameters": [ { - "description": "Distinctive name for the node based on the docker registry path", + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", "type": "string", - "description": "Distinctive name for the node based on the docker registry path" - }, - "name": "service_key", - "in": "path" + "format": "uuid", + "title": "Project Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ComputationDelete" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/computations/{project_id}:stop": { + "post": { + "tags": [ + "computations" + ], + "summary": "Stop Computation", + "description": "Stops a computation pipeline", + "operationId": "stop_computation_v2_computations__project_id__stop_post", + "parameters": [ { - "description": "The tag/version of the service", + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", "type": "string", - "description": "The tag/version of the service" - }, - "name": "service_version", - "in": "path" + "format": "uuid", + "title": "Project Id" + } } ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ComputationStop" + } + } + } + }, "responses": { - "200": { + "202": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ServicesArrayEnveloped" + "$ref": "#/components/schemas/ComputationGet" } } } @@ -222,36 +264,35 @@ } } }, - "/v0/running_interactive_services": { + "/v2/computations/{project_id}/tasks/-/logfile": { "get": { "tags": [ - "services" + "computations" ], - "summary": "List Running Interactive Services", - "description": "Lists of running interactive services", - "operationId": "list_running_interactive_services_v0_running_interactive_services_get", + "summary": "Gets computation task logs file after is done", + "description": "Returns download links to log-files of each task in a computation.\nEach log is only available when the corresponding task is done", + "operationId": "get_all_tasks_log_files_v2_computations__project_id__tasks___logfile_get", "parameters": [ { - "description": "The ID of the user that starts the service", + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "User Id", "type": "string", - "description": "The ID of the user that starts the service" - }, - "name": "user_id", - "in": "query" + "format": "uuid", + "title": "Project Id" + } }, { - "description": "The ID of the project in which the service starts", + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "Project Id", - "type": "string", - "description": "The ID of the project in which the service starts" - }, - "name": "project_id", - "in": "query" + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + } } ], "responses": { @@ -260,7 +301,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RunningServicesDetailsArrayEnveloped" + "type": "array", + "items": { + "$ref": "#/components/schemas/TaskLogFileGet" + }, + "title": "Response Get All Tasks Log Files V2 Computations Project Id Tasks Logfile Get" } } } @@ -276,97 +321,57 @@ } } } - }, - "post": { + } + }, + "/v2/computations/{project_id}/tasks/{node_uuid}/logfile": { + "get": { "tags": [ - "services" + "computations" ], - "summary": "Start Interactive Service", - "description": "Starts an interactive service in the platform", - "operationId": "start_interactive_service_v0_running_interactive_services_post", + "summary": "Gets computation task logs file after is done", + "description": "Returns a link to download logs file of a give task.\nThe log is only available when the task is done", + "operationId": "get_task_log_file_v2_computations__project_id__tasks__node_uuid__logfile_get", "parameters": [ { - "description": "The ID of the user that starts the service", - "required": true, - "schema": { - "title": "User Id", - "type": "string", - "description": "The ID of the user that starts the service" - }, - "name": "user_id", - "in": "query" - }, - { - "description": "The ID of the project in which the service starts", - "required": true, - "schema": { - "title": "Project Id", - "type": "string", - "description": "The ID of the project in which the service starts" - }, "name": "project_id", - "in": "query" - }, - { - "description": "distinctive name for the node based on the docker registry path", + "in": "path", "required": true, "schema": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", "type": "string", - "description": "distinctive name for the node based on the docker registry path" - }, - "example": [ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer" - ], - "name": "service_key", - "in": "query" + "format": "uuid", + "title": "Project Id" + } }, { - "description": "The tag/version of the service", + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Service Tag", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", "type": "string", - "description": "The tag/version of the service" - }, - "example": "1.0.0", - "name": "service_tag", - "in": "query" + "format": "uuid", + "title": "Node Uuid" + } }, { - "description": "The uuid to assign the service with", + "name": "user_id", + "in": "query", "required": true, "schema": { - "title": "Service Uuid", - "type": "string", - "description": "The uuid to assign the service with" - }, - "name": "service_uuid", - "in": "query" - }, - { - "description": "predefined basepath for the backend service otherwise uses root", - "required": false, - "schema": { - "title": "Service Base Path", - "type": "string", - "description": "predefined basepath for the backend service otherwise uses root", - "default": "" - }, - "example": "/x/EycCXbU0H/", - "name": "service_base_path", - "in": "query" + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + } } ], "responses": { - "201": { + "200": { "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "$ref": "#/components/schemas/TaskLogFileGet" + } } } }, @@ -383,69 +388,49 @@ } } }, - "/v0/running_interactive_services/{service_uuid}": { - "delete": { + "/v2/computations/{project_id}/tasks/-/outputs:batchGet": { + "post": { "tags": [ - "services" + "computations" ], - "summary": "Stop Interactive Service", - "operationId": "stop_interactive_service_v0_running_interactive_services__service_uuid__delete", + "summary": "Gets all outputs for selected tasks", + "operationId": "get_batch_tasks_outputs_v2_computations__project_id__tasks___outputs_batchGet_post", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Service Uuid", - "type": "string" - }, - "name": "service_uuid", - "in": "path" - } - ], - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } + "type": "string", + "format": "uuid", + "title": "Project Id" } } - } - } - }, - "/v2/computations": { - "post": { - "tags": [ - "computations" ], - "summary": "Create and optionally start a new computation", - "operationId": "create_computation_v2_computations_post", "requestBody": { + "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ComputationCreate" + "$ref": "#/components/schemas/TasksSelection" } } - }, - "required": true + } }, "responses": { - "201": { + "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ComputationGet" + "$ref": "#/components/schemas/TasksOutputs" } } } }, + "404": { + "description": "Cannot find computation or the tasks in it" + }, "422": { "description": "Validation Error", "content": { @@ -459,34 +444,48 @@ } } }, - "/v2/computations/{project_id}": { + "/v2/dynamic_services": { "get": { "tags": [ - "computations" + "dynamic services" ], - "summary": "Returns a computation pipeline state", - "operationId": "get_computation_v2_computations__project_id__get", + "summary": "returns a list of running interactive services filtered by user_id and/or project_idboth legacy (director-v0) and modern (director-v2)", + "operationId": "list_tracked_dynamic_services_v2_dynamic_services_get", "parameters": [ { - "required": true, + "name": "user_id", + "in": "query", + "required": false, "schema": { - "title": "Project Id", - "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "User Id" + } }, { - "required": true, + "name": "project_id", + "in": "query", + "required": false, "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Project Id" + } } ], "responses": { @@ -495,7 +494,11 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ComputationGet" + "type": "array", + "items": { + "$ref": "#/components/schemas/RunningDynamicServiceDetails" + }, + "title": "Response List Tracked Dynamic Services V2 Dynamic Services Get" } } } @@ -512,87 +515,58 @@ } } }, - "delete": { + "post": { "tags": [ - "computations" + "dynamic services" ], - "summary": "Deletes a computation pipeline", - "operationId": "delete_computation_v2_computations__project_id__delete", + "summary": "creates & starts the dynamic service", + "operationId": "create_dynamic_service_v2_dynamic_services_post", "parameters": [ { + "name": "x-dynamic-sidecar-request-dns", + "in": "header", "required": true, "schema": { - "title": "Project Id", "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ComputationDelete" - } + "title": "X-Dynamic-Sidecar-Request-Dns" } }, - "required": true - }, - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } + { + "name": "x-dynamic-sidecar-request-scheme", + "in": "header", + "required": true, + "schema": { + "type": "string", + "title": "X-Dynamic-Sidecar-Request-Scheme" } - } - } - } - }, - "/v2/computations/{project_id}:stop": { - "post": { - "tags": [ - "computations" - ], - "summary": "Stops a computation pipeline", - "operationId": "stop_computation_v2_computations__project_id__stop_post", - "parameters": [ + }, { + "name": "x-simcore-user-agent", + "in": "header", "required": true, "schema": { - "title": "Project Id", "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" + "title": "X-Simcore-User-Agent" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ComputationStop" + "$ref": "#/components/schemas/DynamicServiceCreate" } } - }, - "required": true + } }, "responses": { - "202": { + "201": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ComputationGet" + "$ref": "#/components/schemas/RunningDynamicServiceDetails" } } } @@ -610,35 +584,23 @@ } } }, - "/v2/computations/{project_id}/tasks/-/logfile": { + "/v2/dynamic_services/{node_uuid}": { "get": { "tags": [ - "computations" + "dynamic services" ], - "summary": "Gets computation task logs file after is done", - "description": "Returns download links to log-files of each task in a computation.\nEach log is only available when the corresponding task is done", - "operationId": "get_all_tasks_log_files_v2_computations__project_id__tasks___logfile_get", + "summary": "assembles the status for the dynamic-sidecar", + "operationId": "get_dynamic_sidecar_status_v2_dynamic_services__node_uuid__get", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Project Id", "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { @@ -647,11 +609,7 @@ "content": { "application/json": { "schema": { - "title": "Response Get All Tasks Log Files V2 Computations Project Id Tasks Logfile Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/TaskLogFileGet" - } + "$ref": "#/components/schemas/RunningDynamicServiceDetails" } } } @@ -667,59 +625,45 @@ } } } - } - }, - "/v2/computations/{project_id}/tasks/{node_uuid}/logfile": { - "get": { + }, + "delete": { "tags": [ - "computations" + "dynamic services" ], - "summary": "Gets computation task logs file after is done", - "description": "Returns a link to download logs file of a give task.\nThe log is only available when the task is done", - "operationId": "get_task_log_file_v2_computations__project_id__tasks__node_uuid__logfile_get", + "summary": "stops previously spawned dynamic-sidecar", + "operationId": "stop_dynamic_service_v2_dynamic_services__node_uuid__delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Project Id", - "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "Node Uuid", "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" + "format": "uuid", + "title": "Node Uuid" + } }, { - "required": true, + "name": "can_save", + "in": "query", + "required": false, "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": true, + "title": "Can Save" + } } ], "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/TaskLogFileGet" - } - } - } + "204": { + "description": "Successful Response" }, "422": { "description": "Validation Error", @@ -734,47 +678,42 @@ } } }, - "/v2/dynamic_services": { - "get": { + "/v2/dynamic_services/{node_uuid}:retrieve": { + "post": { "tags": [ "dynamic services" ], - "summary": "returns a list of running interactive services filtered by user_id and/or project_idboth legacy (director-v0) and modern (director-v2)", - "operationId": "list_tracked_dynamic_services_v2_dynamic_services_get", + "summary": "Calls the dynamic service's retrieve endpoint with optional port_keys", + "operationId": "service_retrieve_data_on_ports_v2_dynamic_services__node_uuid__retrieve_post", "parameters": [ { - "required": false, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - }, - { - "required": false, + "name": "node_uuid", + "in": "path", + "required": true, "schema": { - "title": "Project Id", "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "query" + "format": "uuid", + "title": "Node Uuid" + } } ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RetrieveDataIn" + } + } + } + }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "title": "Response List Tracked Dynamic Services V2 Dynamic Services Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/RunningDynamicServiceDetails" - } + "$ref": "#/components/schemas/RetrieveDataOutEnveloped" } } } @@ -790,53 +729,30 @@ } } } - }, + } + }, + "/v2/dynamic_services/{node_uuid}:restart": { "post": { "tags": [ "dynamic services" ], - "summary": "creates & starts the dynamic service", - "operationId": "create_dynamic_service_v2_dynamic_services_post", + "summary": "Calls the dynamic service's restart containers endpoint", + "operationId": "service_restart_containers_v2_dynamic_services__node_uuid__restart_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "X-Dynamic-Sidecar-Request-Dns", - "type": "string" - }, - "name": "x-dynamic-sidecar-request-dns", - "in": "header" - }, - { - "required": true, - "schema": { - "title": "X-Dynamic-Sidecar-Request-Scheme", - "type": "string" - }, - "name": "x-dynamic-sidecar-request-scheme", - "in": "header" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DynamicServiceCreate" - } - } - }, - "required": true - }, "responses": { - "201": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunningDynamicServiceDetails" - } - } - } + "204": { + "description": "Successful Response" }, "422": { "description": "Validation Error", @@ -851,38 +767,31 @@ } } }, - "/v2/dynamic_services/{node_uuid}": { - "get": { + "/v2/dynamic_services/projects/{project_id}/-/networks": { + "patch": { "tags": [ "dynamic services" ], - "summary": "assembles the status for the dynamic-sidecar", - "operationId": "get_dynamic_sidecar_status_v2_dynamic_services__node_uuid__get", + "summary": "Updates the project networks according to the current project's workbench", + "operationId": "update_projects_networks_v2_dynamic_services_projects__project_id____networks_patch", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Node Uuid", "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" + "format": "uuid", + "title": "Project Id" + } } ], "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunningDynamicServiceDetails" - } - } - } - }, - "422": { - "description": "Validation Error", + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", "content": { "application/json": { "schema": { @@ -892,33 +801,35 @@ } } } - }, - "delete": { + } + }, + "/v2/dynamic_services/projects/{project_id}/inactivity": { + "get": { "tags": [ "dynamic services" ], - "summary": "stops previously spawned dynamic-sidecar", - "operationId": "stop_dynamic_service_v2_dynamic_services__node_uuid__delete", + "summary": "returns if the project is inactive", + "operationId": "get_project_inactivity_v2_dynamic_services_projects__project_id__inactivity_get", "parameters": [ { + "name": "project_id", + "in": "path", "required": true, "schema": { - "title": "Node Uuid", "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" + "format": "uuid", + "title": "Project Id" + } }, { - "required": false, + "name": "max_inactivity_seconds", + "in": "query", + "required": true, "schema": { - "title": "Can Save", - "type": "boolean", - "default": true - }, - "name": "can_save", - "in": "query" + "type": "number", + "minimum": 0.0, + "title": "Max Inactivity Seconds" + } } ], "responses": { @@ -926,13 +837,12 @@ "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "$ref": "#/components/schemas/GetProjectInactivityResponse" + } } } }, - "204": { - "description": "No Content" - }, "422": { "description": "Validation Error", "content": { @@ -946,45 +856,38 @@ } } }, - "/v2/dynamic_services/{node_uuid}:retrieve": { - "post": { + "/v2/dynamic_scheduler/services/{node_uuid}/observation": { + "patch": { "tags": [ - "dynamic services" + "dynamic scheduler" ], - "summary": "Calls the dynamic service's retrieve endpoint with optional port_keys", - "operationId": "service_retrieve_data_on_ports_v2_dynamic_services__node_uuid__retrieve_post", + "summary": "Enable/disable observation of the service", + "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Node Uuid", "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" + "format": "uuid", + "title": "Node Uuid" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RetrieveDataIn" + "$ref": "#/components/schemas/ObservationItem" } } - }, - "required": true + } }, "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RetrieveDataOutEnveloped" - } - } - } + "204": { + "description": "Successful Response" }, "422": { "description": "Validation Error", @@ -999,28 +902,39 @@ } } }, - "/v2/dynamic_services/{node_uuid}:restart": { - "post": { + "/v2/dynamic_scheduler/services/{node_uuid}/containers": { + "delete": { "tags": [ - "dynamic services" + "dynamic scheduler" ], - "summary": "Calls the dynamic service's restart containers endpoint", - "operationId": "service_restart_containers_v2_dynamic_services__node_uuid__restart_post", + "summary": "Removes the service's user services", + "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Node Uuid", "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { - "204": { - "description": "Successful Response" + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string", + "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete" + } + } + } + }, + "409": { + "description": "Task already running, cannot start a new one" }, "422": { "description": "Validation Error", @@ -1035,28 +949,35 @@ } } }, - "/v2/dynamic_services/projects/{project_id}/-/networks": { - "patch": { + "/v2/dynamic_scheduler/services/{node_uuid}/state": { + "get": { "tags": [ - "dynamic services" + "dynamic scheduler" ], - "summary": "Updates the project networks according to the current project's workbench", - "operationId": "update_projects_networks_v2_dynamic_services_projects__project_id____networks_patch", + "summary": "Returns the internals of the scheduler for the given service", + "operationId": "get_service_state_v2_dynamic_scheduler_services__node_uuid__state_get", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Project Id", "type": "string", - "format": "uuid" - }, - "name": "project_id", - "in": "path" + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { - "204": { - "description": "Successful Response" + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SchedulerData" + } + } + } }, "422": { "description": "Validation Error", @@ -1071,41 +992,40 @@ } } }, - "/v2/clusters": { - "get": { + "/v2/dynamic_scheduler/services/{node_uuid}/state:save": { + "post": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Lists clusters for user", - "operationId": "list_clusters_v2_clusters_get", + "summary": "Starts the saving of the state for the service", + "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { - "200": { + "202": { "description": "Successful Response", "content": { "application/json": { "schema": { - "title": "Response List Clusters V2 Clusters Get", - "type": "array", - "items": { - "$ref": "#/components/schemas/ClusterGet" - } + "type": "string", + "title": "Response Save Service State V2 Dynamic Scheduler Services Node Uuid State Save Post" } } } }, + "409": { + "description": "Task already running, cannot start a new one" + }, "422": { "description": "Validation Error", "content": { @@ -1117,47 +1037,42 @@ } } } - }, + } + }, + "/v2/dynamic_scheduler/services/{node_uuid}/outputs:push": { "post": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Create a new cluster for a user", - "operationId": "create_cluster_v2_clusters_post", + "summary": "Starts the pushing of the outputs for the service", + "operationId": "push_service_outputs_v2_dynamic_scheduler_services__node_uuid__outputs_push_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterCreate" - } - } - }, - "required": true - }, "responses": { - "201": { + "202": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ClusterGet" + "type": "string", + "title": "Response Push Service Outputs V2 Dynamic Scheduler Services Node Uuid Outputs Push Post" } } } }, + "409": { + "description": "Task already running, cannot start a new one" + }, "422": { "description": "Validation Error", "content": { @@ -1171,68 +1086,40 @@ } } }, - "/v2/clusters/default": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the default cluster", - "operationId": "get_default_cluster_v2_clusters_default_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterGet" - } - } - } - } - } - } - }, - "/v2/clusters/{cluster_id}": { - "get": { + "/v2/dynamic_scheduler/services/{node_uuid}/docker-resources": { + "delete": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Get one cluster for user", - "operationId": "get_cluster_v2_clusters__cluster_id__get", + "summary": "Removes the service's sidecar, proxy and docker networks & volumes", + "operationId": "delete_service_docker_resources_v2_dynamic_scheduler_services__node_uuid__docker_resources_delete", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { - "200": { + "202": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ClusterGet" + "type": "string", + "title": "Response Delete Service Docker Resources V2 Dynamic Scheduler Services Node Uuid Docker Resources Delete" } } } }, + "409": { + "description": "Task already running, cannot start a new one" + }, "422": { "description": "Validation Error", "content": { @@ -1244,34 +1131,25 @@ } } } - }, - "delete": { + } + }, + "/v2/dynamic_scheduler/services/{node_uuid}/disk/reserved:free": { + "post": { "tags": [ - "clusters" + "dynamic scheduler" ], - "summary": "Remove a cluster for user", - "operationId": "delete_cluster_v2_clusters__cluster_id__delete", + "summary": "Free up reserved disk space", + "operationId": "free_reserved_disk_space_v2_dynamic_scheduler_services__node_uuid__disk_reserved_free_post", "parameters": [ { + "name": "node_uuid", + "in": "path", "required": true, "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" + "type": "string", + "format": "uuid", + "title": "Node Uuid" + } } ], "responses": { @@ -1289,1179 +1167,716 @@ } } } - }, - "patch": { - "tags": [ - "clusters" - ], - "summary": "Modify a cluster for user", - "operationId": "update_cluster_v2_clusters__cluster_id__patch", - "parameters": [ - { - "required": true, - "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "path" + } + } + }, + "components": { + "schemas": { + "BaseMeta": { + "properties": { + "name": { + "type": "string", + "title": "Name" }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterPatch" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/default/details": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the cluster details", - "operationId": "get_default_cluster_details_v2_clusters_default_details_get", - "parameters": [ - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterDetailsGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/{cluster_id}/details": { - "get": { - "tags": [ - "clusters" - ], - "summary": "Returns the cluster details", - "operationId": "get_cluster_details_v2_clusters__cluster_id__details_get", - "parameters": [ - { - "required": true, - "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterDetailsGet" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_cluster_connection_v2_clusters_ping_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ClusterPing" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/clusters/default:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_default_cluster_connection_v2_clusters_default_ping_post", - "responses": { - "204": { - "description": "Successful Response" - } - } - } - }, - "/v2/clusters/{cluster_id}:ping": { - "post": { - "tags": [ - "clusters" - ], - "summary": "Test cluster connection", - "operationId": "test_specific_cluster_connection_v2_clusters__cluster_id__ping_post", - "parameters": [ - { - "required": true, - "schema": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer" - }, - "name": "cluster_id", - "in": "path" - }, - { - "required": true, - "schema": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "name": "user_id", - "in": "query" - } - ], - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/observation": { - "patch": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Enable/disable observation of the service", - "operationId": "update_service_observation_v2_dynamic_scheduler_services__node_uuid__observation_patch", - "parameters": [ - { - "required": true, - "schema": { - "title": "Node Uuid", - "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ObservationItem" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "Successful Response" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/containers": { - "delete": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Removes the service's user services", - "operationId": "delete_service_containers_v2_dynamic_scheduler_services__node_uuid__containers_delete", - "parameters": [ - { - "required": true, - "schema": { - "title": "Node Uuid", - "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "202": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "title": "Response Delete Service Containers V2 Dynamic Scheduler Services Node Uuid Containers Delete", - "type": "string" - } - } - } - }, - "409": { - "description": "Task already running, cannot start a new one" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/state:save": { - "post": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Starts the saving of the state for the service", - "operationId": "save_service_state_v2_dynamic_scheduler_services__node_uuid__state_save_post", - "parameters": [ - { - "required": true, - "schema": { - "title": "Node Uuid", - "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "202": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "title": "Response Save Service State V2 Dynamic Scheduler Services Node Uuid State Save Post", - "type": "string" - } - } - } - }, - "409": { - "description": "Task already running, cannot start a new one" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/outputs:push": { - "post": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Starts the pushing of the outputs for the service", - "operationId": "push_service_outputs_v2_dynamic_scheduler_services__node_uuid__outputs_push_post", - "parameters": [ - { - "required": true, - "schema": { - "title": "Node Uuid", - "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "202": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "title": "Response Push Service Outputs V2 Dynamic Scheduler Services Node Uuid Outputs Push Post", - "type": "string" - } - } - } - }, - "409": { - "description": "Task already running, cannot start a new one" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/v2/dynamic_scheduler/services/{node_uuid}/docker-resources": { - "delete": { - "tags": [ - "dynamic scheduler" - ], - "summary": "Removes the service's sidecar, proxy and docker networks & volumes", - "operationId": "delete_service_docker_resources_v2_dynamic_scheduler_services__node_uuid__docker_resources_delete", - "parameters": [ - { - "required": true, - "schema": { - "title": "Node Uuid", - "type": "string", - "format": "uuid" - }, - "name": "node_uuid", - "in": "path" - } - ], - "responses": { - "202": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "title": "Response Delete Service Docker Resources V2 Dynamic Scheduler Services Node Uuid Docker Resources Delete", - "type": "string" - } - } - } - }, - "409": { - "description": "Task already running, cannot start a new one" - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - } - }, - "components": { - "schemas": { - "Author": { - "title": "Author", - "required": [ - "name", - "email" - ], - "type": "object", - "properties": { - "name": { - "title": "Name", - "type": "string", - "description": "Name of the author", - "example": "Jim Knopf" - }, - "email": { - "title": "Email", + "version": { "type": "string", - "description": "Email address", - "format": "email" + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" }, - "affiliation": { - "title": "Affiliation", - "type": "string", - "description": "Affiliation of the author" + "released": { + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Released", + "description": "Maps every route's path tag with a released version" } }, - "additionalProperties": false - }, - "Badge": { - "title": "Badge", - "required": [ - "name", - "image", - "url" - ], "type": "object", - "properties": { - "name": { - "title": "Name", - "type": "string", - "description": "Name of the subject" - }, - "image": { - "title": "Image", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "Url to the badge", - "format": "uri" - }, - "url": { - "title": "Url", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "Link to the status", - "format": "uri" - } - }, - "additionalProperties": false - }, - "BootChoice": { - "title": "BootChoice", "required": [ - "label", - "description" + "name", + "version" ], - "type": "object", - "properties": { - "label": { - "title": "Label", - "type": "string" + "title": "BaseMeta", + "example": { + "name": "simcore_service_foo", + "released": { + "v1": "1.3.4", + "v2": "2.4.45" }, - "description": { - "title": "Description", - "type": "string" - } + "version": "2.4.45" } }, "BootMode": { - "title": "BootMode", + "type": "string", "enum": [ "CPU", "GPU", "MPI" ], - "type": "string", - "description": "An enumeration." + "title": "BootMode" }, - "BootOption": { - "title": "BootOption", - "required": [ - "label", - "description", - "default", - "items" - ], - "type": "object", + "CallbacksMapping": { "properties": { - "label": { - "title": "Label", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" + "metrics": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserServiceCommand" + }, + { + "type": "null" + } + ], + "description": "command to recover prometheus metrics from a specific user service" }, - "default": { - "title": "Default", - "type": "string" + "before_shutdown": { + "items": { + "$ref": "#/components/schemas/UserServiceCommand" + }, + "type": "array", + "title": "Before Shutdown", + "description": "commands to run before shutting down the user servicescommands get executed first to last, multiple commands for the sameuser services are allowed" }, - "items": { - "title": "Items", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BootChoice" - } + "inactivity": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserServiceCommand" + }, + { + "type": "null" + } + ], + "description": "command used to figure out for how much time the user service(s) were inactive for" } - } - }, - "ClusterAccessRights": { - "title": "ClusterAccessRights", - "required": [ - "read", - "write", - "delete" - ], + }, + "additionalProperties": false, "type": "object", + "title": "CallbacksMapping" + }, + "ComputationCreate": { "properties": { - "read": { - "title": "Read", - "type": "boolean", - "description": "allows to run pipelines on that cluster" + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 }, - "write": { - "title": "Write", - "type": "boolean", - "description": "allows to modify the cluster" + "project_id": { + "type": "string", + "format": "uuid", + "title": "Project Id" + }, + "start_pipeline": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Start Pipeline", + "description": "if True the computation pipeline will start right away", + "default": false + }, + "product_name": { + "type": "string", + "title": "Product Name" + }, + "product_api_base_url": { + "type": "string", + "minLength": 1, + "format": "uri", + "title": "Product Api Base Url", + "description": "Base url of the product" + }, + "subgraph": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Subgraph", + "description": "An optional set of nodes that must be executed, if empty the whole pipeline is executed" + }, + "force_restart": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Force Restart", + "description": "if True will force re-running all dependent nodes", + "default": false }, - "delete": { - "title": "Delete", + "simcore_user_agent": { + "type": "string", + "title": "Simcore User Agent", + "default": "" + }, + "use_on_demand_clusters": { "type": "boolean", - "description": "allows to delete a cluster" + "title": "Use On Demand Clusters", + "description": "if True, a cluster will be created as necessary (wallet_id cannot be None)", + "default": false + }, + "wallet_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/WalletInfo-Input" + }, + { + "type": "null" + } + ], + "description": "contains information about the wallet used to bill the running service" } }, - "additionalProperties": false - }, - "ClusterCreate": { - "title": "ClusterCreate", + "type": "object", "required": [ - "name", - "type", - "endpoint", - "authentication" + "user_id", + "project_id", + "product_name", + "product_api_base_url" ], + "title": "ComputationCreate" + }, + "ComputationDelete": { + "properties": { + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + }, + "force": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Force", + "description": "if True then the pipeline will be removed even if it is running", + "default": false + } + }, "type": "object", + "required": [ + "user_id" + ], + "title": "ComputationDelete" + }, + "ComputationGet": { "properties": { - "name": { - "title": "Name", + "id": { "type": "string", - "description": "The human readable name of the cluster" + "format": "uuid", + "title": "Id", + "description": "the id of the computation task" }, - "description": { - "title": "Description", - "type": "string" + "state": { + "$ref": "#/components/schemas/RunningState", + "description": "the state of the computational task" }, - "type": { - "$ref": "#/components/schemas/ClusterType" + "result": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Result", + "description": "the result of the computational task" }, - "owner": { - "title": "Owner", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 + "pipeline_details": { + "$ref": "#/components/schemas/PipelineDetails", + "description": "the details of the generated pipeline" + }, + "iteration": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Iteration", + "description": "the iteration id of the computation task (none if no task ran yet)" + }, + "started": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Started", + "description": "the timestamp when the computation was started or None if not started yet" + }, + "stopped": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Stopped", + "description": "the timestamp when the computation was stopped or None if not started nor stopped yet" }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "url to the image describing this cluster", - "format": "uri" + "submitted": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Submitted", + "description": "task last modification timestamp or None if the there is no task" }, - "endpoint": { - "title": "Endpoint", - "maxLength": 65536, - "minLength": 1, + "url": { "type": "string", - "format": "uri" + "minLength": 1, + "format": "uri", + "title": "Url", + "description": "the link where to get the status of the task" }, - "authentication": { - "title": "Authentication", + "stop_url": { "anyOf": [ { - "$ref": "#/components/schemas/SimpleAuthentication" - }, - { - "$ref": "#/components/schemas/KerberosAuthentication" + "type": "string", + "minLength": 1, + "format": "uri" }, { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" + "type": "null" } - ] - }, - "accessRights": { - "title": "Accessrights", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - } + ], + "title": "Stop Url", + "description": "the link where to stop the task" } }, - "additionalProperties": false - }, - "ClusterDetailsGet": { - "title": "ClusterDetailsGet", + "type": "object", "required": [ - "scheduler", - "dashboard_link" + "id", + "state", + "pipeline_details", + "iteration", + "started", + "stopped", + "submitted", + "url" ], - "type": "object", + "title": "ComputationGet" + }, + "ComputationStop": { "properties": { - "scheduler": { - "title": "Scheduler", - "allOf": [ - { - "$ref": "#/components/schemas/Scheduler" - } - ], - "description": "This contains dask scheduler information given by the underlying dask library" - }, - "dashboard_link": { - "title": "Dashboard Link", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "description": "Link to this scheduler's dashboard", - "format": "uri" + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 } - } - }, - "ClusterGet": { - "title": "ClusterGet", + }, + "type": "object", "required": [ - "name", - "type", - "owner", - "endpoint", - "authentication", - "id" + "user_id" ], - "type": "object", + "title": "ComputationStop" + }, + "ContainerState": { "properties": { - "name": { - "title": "Name", - "type": "string", - "description": "The human readable name of the cluster" - }, - "description": { - "title": "Description", - "type": "string" - }, - "type": { - "$ref": "#/components/schemas/ClusterType" - }, - "owner": { - "title": "Owner", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "url to the image describing this cluster", - "format": "uri" - }, - "endpoint": { - "title": "Endpoint", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "format": "uri" - }, - "authentication": { - "title": "Authentication", + "Status": { "anyOf": [ { - "$ref": "#/components/schemas/SimpleAuthentication" + "$ref": "#/components/schemas/Status2" }, { - "$ref": "#/components/schemas/KerberosAuthentication" - }, + "type": "null" + } + ], + "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\".\n" + }, + "Running": { + "anyOf": [ { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" + "type": "boolean" }, { - "$ref": "#/components/schemas/NoAuthentication" + "type": "null" } ], - "description": "Dask gateway authentication" - }, - "accessRights": { - "title": "Accessrights", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - } - }, - "id": { - "title": "Id", - "minimum": 0.0, - "type": "integer", - "description": "The cluster ID" - } - }, - "additionalProperties": false - }, - "ClusterPatch": { - "title": "ClusterPatch", - "type": "object", - "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - }, - "type": { - "$ref": "#/components/schemas/ClusterType" - }, - "owner": { - "title": "Owner", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "format": "uri" + "title": "Running", + "description": "Whether this container is running.\n\nNote that a running container can be _paused_. The `Running` and `Paused`\nbooleans are not mutually exclusive:\n\nWhen pausing a container (on Linux), the freezer cgroup is used to suspend\nall processes in the container. Freezing the process requires the process to\nbe running. As a result, paused containers are both `Running` _and_ `Paused`.\n\nUse the `Status` field instead to determine if a container's state is \"running\".\n" }, - "endpoint": { - "title": "Endpoint", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "format": "uri" - }, - "authentication": { - "title": "Authentication", + "Paused": { "anyOf": [ { - "$ref": "#/components/schemas/SimpleAuthentication" + "type": "boolean" }, { - "$ref": "#/components/schemas/KerberosAuthentication" + "type": "null" + } + ], + "title": "Paused", + "description": "Whether this container is paused." + }, + "Restarting": { + "anyOf": [ + { + "type": "boolean" }, { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" + "type": "null" } - ] - }, - "accessRights": { - "title": "Accessrights", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ClusterAccessRights" - } - } - }, - "additionalProperties": false - }, - "ClusterPing": { - "title": "ClusterPing", - "required": [ - "endpoint", - "authentication" - ], - "type": "object", - "properties": { - "endpoint": { - "title": "Endpoint", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "format": "uri" + ], + "title": "Restarting", + "description": "Whether this container is restarting." }, - "authentication": { - "title": "Authentication", + "OOMKilled": { "anyOf": [ { - "$ref": "#/components/schemas/SimpleAuthentication" + "type": "boolean" }, { - "$ref": "#/components/schemas/KerberosAuthentication" - }, + "type": "null" + } + ], + "title": "Oomkilled", + "description": "Whether this container has been killed because it ran out of memory.\n" + }, + "Dead": { + "anyOf": [ { - "$ref": "#/components/schemas/JupyterHubTokenAuthentication" + "type": "boolean" }, { - "$ref": "#/components/schemas/NoAuthentication" + "type": "null" } ], - "description": "Dask gateway authentication" - } - } - }, - "ClusterType": { - "title": "ClusterType", - "enum": [ - "AWS", - "ON_PREMISE" - ], - "description": "An enumeration." - }, - "ComputationCreate": { - "title": "ComputationCreate", - "required": [ - "user_id", - "project_id", - "product_name" - ], - "type": "object", - "properties": { - "user_id": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 - }, - "project_id": { - "title": "Project Id", - "type": "string", - "format": "uuid" + "title": "Dead" }, - "start_pipeline": { - "title": "Start Pipeline", - "type": "boolean", - "description": "if True the computation pipeline will start right away", - "default": false + "Pid": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Pid", + "description": "The process ID of this container" }, - "product_name": { - "title": "Product Name", - "type": "string" + "ExitCode": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Exitcode", + "description": "The last exit code of this container" }, - "subgraph": { - "title": "Subgraph", - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "description": "An optional set of nodes that must be executed, if empty the whole pipeline is executed" + "Error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Error" }, - "force_restart": { - "title": "Force Restart", - "type": "boolean", - "description": "if True will force re-running all dependent nodes", - "default": false + "StartedAt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Startedat", + "description": "The time when this container was last started." }, - "cluster_id": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer", - "description": "the computation shall use the cluster described by its id, 0 is the default cluster" - } - } - }, - "ComputationDelete": { - "title": "ComputationDelete", - "required": [ - "user_id" - ], - "type": "object", - "properties": { - "user_id": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 + "FinishedAt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Finishedat", + "description": "The time when this container last exited." }, - "force": { - "title": "Force", - "type": "boolean", - "description": "if True then the pipeline will be removed even if it is running", - "default": false + "Health": { + "anyOf": [ + { + "$ref": "#/components/schemas/Health" + }, + { + "type": "null" + } + ] } - } - }, - "ComputationGet": { - "title": "ComputationGet", - "required": [ - "id", - "state", - "pipeline_details", - "iteration", - "cluster_id", - "url" - ], + }, "type": "object", - "properties": { - "id": { - "title": "Id", - "type": "string", - "description": "the id of the computation task", - "format": "uuid" - }, - "state": { - "allOf": [ + "title": "ContainerState", + "description": "ContainerState stores container's running state. It's part of ContainerJSONBase\nand will be returned by the \"inspect\" command." + }, + "DNSResolver": { + "properties": { + "address": { + "anyOf": [ + { + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" + }, { - "$ref": "#/components/schemas/RunningState" + "type": "string" } ], - "description": "the state of the computational task" + "title": "Address", + "description": "this is not an url address is derived from IP address" }, - "result": { - "title": "Result", - "type": "string", - "description": "the result of the computational task" - }, - "pipeline_details": { - "title": "Pipeline Details", - "allOf": [ + "port": { + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, { - "$ref": "#/components/schemas/PipelineDetails" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" } ], - "description": "the details of the generated pipeline" - }, - "iteration": { - "title": "Iteration", - "exclusiveMinimum": true, - "type": "integer", - "description": "the iteration id of the computation task (none if no task ran yet)", - "minimum": 0 - }, - "cluster_id": { - "title": "Cluster Id", - "minimum": 0.0, - "type": "integer", - "description": "the cluster on which the computaional task runs/ran (none if no task ran yet)" - }, - "url": { - "title": "Url", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "description": "the link where to get the status of the task", - "format": "uri" - }, - "stop_url": { - "title": "Stop Url", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "description": "the link where to stop the task", - "format": "uri" + "title": "Port" } - } - }, - "ComputationStop": { - "title": "ComputationStop", + }, + "additionalProperties": true, + "type": "object", "required": [ - "user_id" + "address", + "port" ], - "type": "object", + "title": "DNSResolver" + }, + "DelayedExceptionHandler": { "properties": { - "user_id": { - "title": "User Id", - "exclusiveMinimum": true, - "type": "integer", - "minimum": 0 + "delay_for": { + "type": "number", + "minimum": 0.0, + "title": "Delay For", + "description": "interval of time during which exceptions are ignored" } - } - }, - "ContainerSpec": { - "title": "ContainerSpec", + }, + "type": "object", "required": [ - "Command" + "delay_for" ], - "type": "object", + "title": "DelayedExceptionHandler", + "description": "Allows to ignore an exception for an established\nperiod of time after which it is raised.\n\nThis use case most commonly occurs when dealing with\nexternal systems.\nFor example, due to poor network performance or\nnetwork congestion, an external system which is healthy,\ncurrently is not reachable any longer.\nA possible solution:\n- ignore exceptions for an interval in which the\n system usually is reachable again by not\n raising the error\n- if the error persist give up and raise it\n\nExample code usage:\n\n delayed_handler_external_service = DelayedExceptionHandler(\n delay_for=60\n )\n try:\n function_called_periodically_accessing_external_service()\n except TargetException as e:\n delayed_handler_external_service.try_to_raise(e)\n else:\n delayed_handler_external_service.else_reset()" + }, + "DockerContainerInspect": { "properties": { - "Command": { - "title": "Command", - "maxItems": 2, - "minItems": 1, - "type": "array", - "items": { - "type": "string" - }, - "description": "Used to override the container's command" + "container_state": { + "$ref": "#/components/schemas/ContainerState", + "description": "current state of container" + }, + "name": { + "type": "string", + "title": "Name", + "description": "docker name of the container" + }, + "id": { + "type": "string", + "title": "Id", + "description": "docker id of the container" } }, - "additionalProperties": false, - "description": "Implements entries that can be overriden for https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate\nrequest body: TaskTemplate -> ContainerSpec" - }, - "DictModel_str__PositiveFloat_": { - "title": "DictModel[str, PositiveFloat]", "type": "object", - "additionalProperties": { - "exclusiveMinimum": true, - "type": "number", - "minimum": 0.0 - } - }, - "DynamicServiceCreate": { - "title": "DynamicServiceCreate", "required": [ - "service_key", - "service_version", - "user_id", - "project_id", - "service_uuid", - "service_resources", - "product_name" + "container_state", + "name", + "id" ], - "type": "object", + "title": "DockerContainerInspect" + }, + "DynamicServiceCreate": { "properties": { "service_key": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/dynamic(/[\\w/-]+)+$", "type": "string", + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key", "description": "distinctive name for the node based on the docker registry path" }, "service_version": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version", "description": "semantic version number of the node" }, "user_id": { - "title": "User Id", - "exclusiveMinimum": true, "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", "minimum": 0 }, "project_id": { - "title": "Project Id", "type": "string", - "format": "uuid" + "format": "uuid", + "title": "Project Id" }, "service_uuid": { - "title": "Service Uuid", "type": "string", - "format": "uuid" + "format": "uuid", + "title": "Service Uuid" }, "service_basepath": { + "anyOf": [ + { + "type": "string", + "format": "path" + }, + { + "type": "null" + } + ], "title": "Service Basepath", - "type": "string", - "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint.", - "format": "path" + "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint." }, "service_resources": { - "title": "Service Resources", - "type": "object" + "type": "object", + "title": "Service Resources" }, "product_name": { - "title": "Product Name", "type": "string", + "title": "Product Name", "description": "Current product name" + }, + "product_api_base_url": { + "type": "string", + "title": "Product Api Base Url", + "description": "Current product API base URL" + }, + "can_save": { + "type": "boolean", + "title": "Can Save", + "description": "the service data must be saved when closing" + }, + "wallet_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/WalletInfo-Input" + }, + { + "type": "null" + } + ], + "description": "contains information about the wallet used to bill the running service" + }, + "pricing_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/PricingInfo" + }, + { + "type": "null" + } + ], + "description": "contains pricing information (ex. pricing plan and unit ids)" + }, + "hardware_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/HardwareInfo" + }, + { + "type": "null" + } + ], + "description": "contains hardware information (ex. aws_ec2_instances)" } }, + "type": "object", + "required": [ + "service_key", + "service_version", + "user_id", + "project_id", + "service_uuid", + "service_resources", + "product_name", + "product_api_base_url", + "can_save" + ], + "title": "DynamicServiceCreate", "example": { + "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "can_save": true, + "hardware_info": { + "aws_ec2_instances": [ + "c6a.4xlarge" + ] + }, "key": "simcore/services/dynamic/3dviewer", - "version": "2.4.5", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", + "pricing_info": { + "pricing_plan_id": 1, + "pricing_unit_cost_id": 1, + "pricing_unit_id": 1 + }, + "product_api_base_url": "https://api.local/", "product_name": "osparc", + "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", "service_resources": { "container": { + "boot_modes": [ + "CPU" + ], "image": "simcore/services/dynamic/jupyter-math:2.0.5", "resources": { "CPU": { @@ -2472,62 +1887,395 @@ "limit": 2147483648, "reservation": 2147483648 } + } + } + }, + "user_id": 234, + "version": "2.4.5", + "wallet_info": { + "wallet_credit_amount": "10", + "wallet_id": 1, + "wallet_name": "My Wallet" + } + } + }, + "DynamicSidecar": { + "properties": { + "status": { + "$ref": "#/components/schemas/simcore_service_director_v2__models__dynamic_services_scheduler__Status", + "description": "status of the service sidecar also with additional information", + "default": { + "current": "ok", + "info": "" + } + }, + "is_ready": { + "type": "boolean", + "title": "Is Ready", + "description": "is True while the health check on the dynamic-sidecar is responding. Meaning that the dynamic-sidecar is reachable and can accept requests", + "default": false + }, + "was_compose_spec_submitted": { + "type": "boolean", + "title": "Was Compose Spec Submitted", + "description": "if the docker-compose spec was already submitted this fields is True", + "default": false + }, + "containers_inspect": { + "items": { + "$ref": "#/components/schemas/DockerContainerInspect" + }, + "type": "array", + "title": "Containers Inspect", + "description": "docker inspect results from all the container ran at regular intervals", + "default": [] + }, + "was_dynamic_sidecar_started": { + "type": "boolean", + "title": "Was Dynamic Sidecar Started", + "default": false + }, + "is_healthy": { + "type": "boolean", + "title": "Is Healthy", + "default": false + }, + "were_containers_created": { + "type": "boolean", + "title": "Were Containers Created", + "description": "when True no longer will the Docker api be used to check if the services were started", + "default": false + }, + "is_project_network_attached": { + "type": "boolean", + "title": "Is Project Network Attached", + "description": "When True, all containers were in running state and project networks were attached. Waiting for the container sto be in running state guarantees all containers have been created", + "default": false + }, + "is_service_environment_ready": { + "type": "boolean", + "title": "Is Service Environment Ready", + "description": "True when the environment setup required by the dynamic-sidecars created services was completed.Example: nodeports data downloaded, globally shared service data fetched, etc..", + "default": false + }, + "service_removal_state": { + "$ref": "#/components/schemas/ServiceRemovalState", + "description": "stores information used during service removal from the dynamic-sidecar scheduler" + }, + "wait_for_manual_intervention_after_error": { + "type": "boolean", + "title": "Wait For Manual Intervention After Error", + "description": "Marks the sidecar as untouchable since there was an error and important data might be lost. awaits for manual intervention.", + "default": false + }, + "wait_for_manual_intervention_logged": { + "type": "boolean", + "title": "Wait For Manual Intervention Logged", + "description": "True if a relative message was logged", + "default": false + }, + "were_state_and_outputs_saved": { + "type": "boolean", + "title": "Were State And Outputs Saved", + "description": "set True if the dy-sidecar saves the state and uploads the outputs", + "default": false + }, + "instrumentation": { + "$ref": "#/components/schemas/ServicesInstrumentation", + "description": "keeps track times for various operations" + }, + "dynamic_sidecar_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" }, - "boot_modes": [ - "CPU" - ] + { + "type": "null" + } + ], + "title": "Dynamic Sidecar Id", + "description": "returned by the docker engine; used for starting the proxy" + }, + "dynamic_sidecar_network_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" + }, + { + "type": "null" + } + ], + "title": "Dynamic Sidecar Network Id", + "description": "returned by the docker engine; used for starting the proxy" + }, + "swarm_network_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 25, + "pattern": "[A-Za-z0-9]{25}" + }, + { + "type": "null" + } + ], + "title": "Swarm Network Id", + "description": "returned by the docker engine; used for starting the proxy" + }, + "swarm_network_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Swarm Network Name", + "description": "used for starting the proxy" + }, + "docker_node_id": { + "anyOf": [ + { + "type": "string", + "pattern": "[a-zA-Z0-9]" + }, + { + "type": "null" + } + ], + "title": "Docker Node Id", + "description": "contains node id of the docker node where all services and created containers are started" + }, + "inspect_error_handler": { + "$ref": "#/components/schemas/DelayedExceptionHandler", + "description": "Set when the dy-sidecar can no longer be reached by the director-v2. If it will be possible to reach the dy-sidecar again, this value will be set to None.", + "default": { + "delay_for": 0.0 } } + }, + "type": "object", + "title": "DynamicSidecar" + }, + "DynamicSidecarStatus": { + "type": "string", + "enum": [ + "ok", + "failing" + ], + "title": "DynamicSidecarStatus" + }, + "GetProjectInactivityResponse": { + "properties": { + "is_inactive": { + "type": "boolean", + "title": "Is Inactive" + } + }, + "type": "object", + "required": [ + "is_inactive" + ], + "title": "GetProjectInactivityResponse", + "example": { + "is_inactive": "false" } }, "HTTPValidationError": { - "title": "HTTPValidationError", - "type": "object", "properties": { "errors": { - "title": "Validation errors", - "type": "array", "items": { "$ref": "#/components/schemas/ValidationError" - } + }, + "type": "array", + "title": "Validation errors" } - } + }, + "type": "object", + "title": "HTTPValidationError" }, - "ImageResources": { - "title": "ImageResources", + "HardwareInfo": { + "properties": { + "aws_ec2_instances": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Aws Ec2 Instances" + } + }, + "type": "object", "required": [ - "image", - "resources" + "aws_ec2_instances" + ], + "title": "HardwareInfo" + }, + "Health": { + "properties": { + "Status": { + "anyOf": [ + { + "$ref": "#/components/schemas/models_library__generated_models__docker_rest_api__Status" + }, + { + "type": "null" + } + ], + "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem\n" + }, + "FailingStreak": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Failingstreak", + "description": "FailingStreak is the number of consecutive failures" + }, + "Log": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/HealthcheckResult" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Log", + "description": "Log contains the last few results (oldest first)\n" + } + }, + "type": "object", + "title": "Health", + "description": "Health stores information about the container's healthcheck results." + }, + "HealthCheckGet": { + "properties": { + "timestamp": { + "type": "string", + "title": "Timestamp" + } + }, + "type": "object", + "required": [ + "timestamp" ], + "title": "HealthCheckGet", + "example": { + "timestamp": "simcore_service_directorv2.api.routes.health@2023-07-03T12:59:12.024551+00:00" + } + }, + "HealthcheckResult": { + "properties": { + "Start": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Start", + "description": "Date and time at which this check started in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n" + }, + "End": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "End", + "description": "Date and time at which this check ended in\n[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.\n" + }, + "ExitCode": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Exitcode", + "description": "ExitCode meanings:\n\n- `0` healthy\n- `1` unhealthy\n- `2` reserved (considered unhealthy)\n- other values: error running probe\n" + }, + "Output": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Output", + "description": "Output from last check" + } + }, "type": "object", + "title": "HealthcheckResult", + "description": "HealthcheckResult stores information about a single run of a healthcheck probe" + }, + "ImageResources": { "properties": { "image": { - "title": "Image", - "pattern": "^(?P(?:(?:(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(?::\\d+)?)|[a-zA-Z0-9-]+:\\d+))?(?:/)?(?P(?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])(?::(?P[\\w][\\w.-]{0,126}[\\w]))?(?P\\@sha256:[a-fA-F0-9]{64})?$", "type": "string", + "pattern": "^(?:([a-z0-9-]+(?:\\.[a-z0-9-]+)+(?::\\d+)?|[a-z0-9-]+:\\d+)/)?((?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])(?::([\\w][\\w.-]{0,127}))?(\\@sha256:[a-fA-F0-9]{32,64})?$", + "title": "Image", "description": "Used by the frontend to provide a context for the users.Services with a docker-compose spec will have multiple entries.Using the `image:version` instead of the docker-compose spec is more helpful for the end user." }, "resources": { - "title": "Resources", - "type": "object", "additionalProperties": { "$ref": "#/components/schemas/ResourceValue" - } + }, + "type": "object", + "title": "Resources" }, "boot_modes": { - "type": "array", "items": { "$ref": "#/components/schemas/BootMode" }, + "type": "array", + "title": "Boot Modes", "description": "describe how a service shall be booted, using CPU, MPI, openMP or GPU", "default": [ "CPU" ] } }, + "type": "object", + "required": [ + "image", + "resources" + ], + "title": "ImageResources", "example": { "image": "simcore/service/dynamic/pretty-intense:1.0.0", "resources": { + "AIRAM": { + "limit": 1, + "reservation": 1 + }, + "ANY_resource": { + "limit": "some_value", + "reservation": "some_value" + }, "CPU": { "limit": 4, "reservation": 0.1 @@ -2539,224 +2287,293 @@ "VRAM": { "limit": 1, "reservation": 1 - }, - "AIRAM": { - "limit": 1, - "reservation": 1 - }, - "ANY_resource": { - "limit": "some_value", - "reservation": "some_value" } } } }, - "JupyterHubTokenAuthentication": { - "title": "JupyterHubTokenAuthentication", - "required": [ - "api_token" - ], - "type": "object", + "LegacyState": { "properties": { - "type": { - "title": "Type", - "enum": [ - "jupyterhub" - ], + "old_state_path": { "type": "string", - "default": "jupyterhub" + "format": "path", + "title": "Old State Path" }, - "api_token": { - "title": "Api Token", - "type": "string" - } - }, - "additionalProperties": false - }, - "KerberosAuthentication": { - "title": "KerberosAuthentication", - "type": "object", - "properties": { - "type": { - "title": "Type", - "enum": [ - "kerberos" - ], + "new_state_path": { "type": "string", - "default": "kerberos" + "format": "path", + "title": "New State Path" } }, - "additionalProperties": false - }, - "Meta": { - "title": "Meta", + "type": "object", "required": [ - "name", - "version" + "old_state_path", + "new_state_path" ], - "type": "object", + "title": "LegacyState" + }, + "NATRule": { "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" + "hostname": { + "anyOf": [ + { + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" + }, + { + "type": "string" + } + ], + "title": "Hostname" }, - "released": { - "title": "Released", - "type": "object", - "additionalProperties": { - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string" + "tcp_ports": { + "items": { + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, + { + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" + }, + { + "$ref": "#/components/schemas/_PortRange" + } + ] }, - "description": "Maps every route's path tag with a released version" + "type": "array", + "title": "Tcp Ports" + }, + "dns_resolver": { + "$ref": "#/components/schemas/DNSResolver", + "description": "specify a DNS resolver address and port" } }, - "example": { - "name": "simcore_service_foo", - "version": "2.4.45", - "released": { - "v1": "1.3.4", - "v2": "2.4.45" - } - } - }, - "NoAuthentication": { - "title": "NoAuthentication", "type": "object", - "properties": { - "type": { - "title": "Type", - "enum": [ - "none" - ], - "type": "string", - "default": "none" - } - }, - "additionalProperties": false - }, - "NodeRequirements": { - "title": "NodeRequirements", "required": [ - "CPU", - "RAM" + "hostname", + "tcp_ports" ], - "type": "object", - "properties": { - "CPU": { - "title": "Cpu", - "exclusiveMinimum": true, - "type": "number", - "description": "defines the required (maximum) CPU shares for running the services", - "minimum": 0.0 - }, - "GPU": { - "title": "Gpu", - "minimum": 0.0, - "type": "integer", - "description": "defines the required (maximum) GPU for running the services" - }, - "RAM": { - "title": "Ram", - "type": "integer", - "description": "defines the required (maximum) amount of RAM for running the services" - }, - "VRAM": { - "title": "Vram", - "type": "integer", - "description": "defines the required (maximum) amount of VRAM for running the services" - } - } + "title": "NATRule", + "description": "Content of \"simcore.service.containers-allowed-outgoing-permit-list\" label" }, "NodeState": { - "title": "NodeState", - "type": "object", "properties": { "modified": { - "title": "Modified", "type": "boolean", + "title": "Modified", "description": "true if the node's outputs need to be re-computed", "default": true }, "dependencies": { - "title": "Dependencies", - "uniqueItems": true, - "type": "array", "items": { "type": "string", "format": "uuid" }, + "type": "array", + "uniqueItems": true, + "title": "Dependencies", "description": "contains the node inputs dependencies if they need to be computed first" }, - "currentStatus": { - "allOf": [ + "currentStatus": { + "$ref": "#/components/schemas/RunningState", + "description": "the node's current state", + "default": "NOT_STARTED" + }, + "progress": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Progress", + "description": "current progress of the task if available (None if not started or not a computational task)", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "title": "NodeState" + }, + "ObservationItem": { + "properties": { + "is_disabled": { + "type": "boolean", + "title": "Is Disabled" + } + }, + "type": "object", + "required": [ + "is_disabled" + ], + "title": "ObservationItem" + }, + "PathMappingsLabel": { + "properties": { + "inputs_path": { + "type": "string", + "format": "path", + "title": "Inputs Path", + "description": "folder path where the service expects all the inputs" + }, + "outputs_path": { + "type": "string", + "format": "path", + "title": "Outputs Path", + "description": "folder path where the service is expected to provide all its outputs" + }, + "state_paths": { + "items": { + "type": "string", + "format": "path" + }, + "type": "array", + "title": "State Paths", + "description": "optional list of paths which contents need to be persisted" + }, + "state_exclude": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "State Exclude", + "description": "optional list unix shell rules used to exclude files from the state" + }, + "volume_size_limits": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, { - "$ref": "#/components/schemas/RunningState" + "type": "null" } ], - "description": "the node's current state", - "default": "NOT_STARTED" + "title": "Volume Size Limits", + "description": "Apply volume size limits to entries in: `inputs_path`, `outputs_path` and `state_paths`. Limits must be parsable by Pydantic's ByteSize." + }, + "legacy_state": { + "anyOf": [ + { + "$ref": "#/components/schemas/LegacyState" + }, + { + "type": "null" + } + ], + "description": "if present, the service needs to first try to download the legacy statecoming from a different path." } }, - "additionalProperties": false - }, - "ObservationItem": { - "title": "ObservationItem", + "additionalProperties": false, + "type": "object", "required": [ - "is_disabled" + "inputs_path", + "outputs_path" ], - "type": "object", - "properties": { - "is_disabled": { - "title": "Is Disabled", - "type": "boolean" - } - } + "title": "PathMappingsLabel", + "description": "Content of \"simcore.service.paths-mapping\" label" }, "PipelineDetails": { - "title": "PipelineDetails", - "required": [ - "adjacency_list", - "node_states" - ], - "type": "object", "properties": { "adjacency_list": { - "title": "Adjacency List", - "type": "object", "additionalProperties": { - "type": "array", "items": { "type": "string", "format": "uuid" - } + }, + "type": "array" + }, + "propertyNames": { + "format": "uuid" }, + "type": "object", + "title": "Adjacency List", "description": "The adjacency list of the current pipeline in terms of {NodeID: [successor NodeID]}" }, + "progress": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Progress", + "description": "the progress of the pipeline (None if there are no computational tasks)" + }, "node_states": { - "title": "Node States", - "type": "object", "additionalProperties": { "$ref": "#/components/schemas/NodeState" }, + "propertyNames": { + "format": "uuid" + }, + "type": "object", + "title": "Node States", "description": "The states of each of the computational nodes in the pipeline" } - } - }, - "ResourceValue": { - "title": "ResourceValue", + }, + "type": "object", "required": [ - "limit", - "reservation" + "adjacency_list", + "progress", + "node_states" ], + "title": "PipelineDetails" + }, + "PricingInfo": { + "properties": { + "pricing_plan_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Plan Id", + "minimum": 0 + }, + "pricing_unit_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Unit Id", + "minimum": 0 + }, + "pricing_unit_cost_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Unit Cost Id", + "minimum": 0 + } + }, "type": "object", + "required": [ + "pricing_plan_id", + "pricing_unit_id", + "pricing_unit_cost_id" + ], + "title": "PricingInfo" + }, + "ResourceValue": { "properties": { "limit": { - "title": "Limit", "anyOf": [ { "type": "integer" @@ -2767,10 +2584,10 @@ { "type": "string" } - ] + ], + "title": "Limit" }, "reservation": { - "title": "Reservation", "anyOf": [ { "type": "integer" @@ -2781,796 +2598,695 @@ { "type": "string" } - ] + ], + "title": "Reservation" } - } - }, - "RetrieveDataIn": { - "title": "RetrieveDataIn", + }, + "type": "object", "required": [ - "port_keys" + "limit", + "reservation" ], - "type": "object", + "title": "ResourceValue" + }, + "RestartPolicy": { + "type": "string", + "enum": [ + "no-restart", + "on-inputs-downloaded" + ], + "title": "RestartPolicy", + "description": "Content of \"simcore.service.restart-policy\" label" + }, + "RetrieveDataIn": { "properties": { "port_keys": { - "title": "Port Keys", - "type": "array", "items": { - "pattern": "^[-_a-zA-Z0-9]+$", - "type": "string" + "type": "string", + "pattern": "^[-_a-zA-Z0-9]+$" }, + "type": "array", + "title": "Port Keys", "description": "The port keys to retrieve data from" } - } - }, - "RetrieveDataOut": { - "title": "RetrieveDataOut", + }, + "type": "object", "required": [ - "size_bytes" + "port_keys" ], - "type": "object", + "title": "RetrieveDataIn" + }, + "RetrieveDataOut": { "properties": { "size_bytes": { - "title": "Size Bytes", "type": "integer", + "minimum": 0, + "title": "Size Bytes", "description": "The amount of data transferred by the retrieve call" } - } - }, - "RetrieveDataOutEnveloped": { - "title": "RetrieveDataOutEnveloped", + }, + "type": "object", "required": [ - "data" + "size_bytes" ], - "type": "object", + "title": "RetrieveDataOut" + }, + "RetrieveDataOutEnveloped": { "properties": { "data": { "$ref": "#/components/schemas/RetrieveDataOut" } - } - }, - "RunningDynamicServiceDetails": { - "title": "RunningDynamicServiceDetails", + }, + "type": "object", "required": [ - "service_key", - "service_version", - "user_id", - "project_id", - "service_uuid", - "service_host", - "service_port", - "service_state" + "data" ], - "type": "object", + "title": "RetrieveDataOutEnveloped" + }, + "RunningDynamicServiceDetails": { "properties": { "service_key": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/dynamic(/[\\w/-]+)+$", "type": "string", + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key", "description": "distinctive name for the node based on the docker registry path" }, "service_version": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version", "description": "semantic version number of the node" }, "user_id": { - "title": "User Id", - "exclusiveMinimum": true, "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", "minimum": 0 }, "project_id": { - "title": "Project Id", "type": "string", - "format": "uuid" + "format": "uuid", + "title": "Project Id" }, "service_uuid": { - "title": "Service Uuid", "type": "string", - "format": "uuid" + "format": "uuid", + "title": "Service Uuid" }, "service_basepath": { - "title": "Service Basepath", - "type": "string", - "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint.", - "format": "path" - }, - "boot_type": { - "allOf": [ + "anyOf": [ + { + "type": "string", + "format": "path" + }, { - "$ref": "#/components/schemas/ServiceBootType" + "type": "null" } ], + "title": "Service Basepath", + "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint." + }, + "boot_type": { + "$ref": "#/components/schemas/ServiceBootType", "description": "Describes how the dynamic services was started (legacy=V0, modern=V2).Since legacy services do not have this label it defaults to V0.", "default": "V0" }, "service_host": { - "title": "Service Host", "type": "string", + "title": "Service Host", "description": "the service swarm internal host name" }, "service_port": { - "title": "Service Port", + "type": "integer", "exclusiveMaximum": true, "exclusiveMinimum": true, - "type": "integer", + "title": "Service Port", "description": "the service swarm internal port", "maximum": 65535, "minimum": 0 }, "published_port": { - "title": "Published Port", - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "type": "integer", - "description": "the service swarm published port if any", - "deprecated": true, - "maximum": 65535, - "minimum": 0 - }, - "entry_point": { - "title": "Entry Point", - "type": "string", - "description": "if empty the service entrypoint is on the root endpoint.", - "deprecated": true - }, - "service_state": { - "allOf": [ + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, { - "$ref": "#/components/schemas/ServiceState" + "type": "null" } ], - "description": "service current state" - }, - "service_message": { - "title": "Service Message", - "type": "string", - "description": "additional information related to service state" - } - } - }, - "RunningServiceDetails": { - "title": "RunningServiceDetails", - "required": [ - "entry_point", - "service_uuid", - "service_key", - "service_version", - "service_host", - "service_basepath", - "service_state", - "service_message" - ], - "type": "object", - "properties": { - "published_port": { "title": "Published Port", - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "type": "integer", - "description": "The ports where the service provides its interface on the docker swarm", - "deprecated": true, - "maximum": 65535, - "minimum": 0 + "description": "the service swarm published port if any", + "deprecated": true }, "entry_point": { - "title": "Entry Point", - "type": "string", - "description": "The entry point where the service provides its interface" - }, - "service_uuid": { - "title": "Service Uuid", - "pattern": "^[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$", - "type": "string", - "description": "The node UUID attached to the service" - }, - "service_key": { - "title": "Service Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", - "type": "string", - "description": "distinctive name for the node based on the docker registry path", - "example": [ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer" - ] - }, - "service_version": { - "title": "Service Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", - "type": "string", - "description": "service version number", - "example": [ - "1.0.0", - "0.0.1" - ] - }, - "service_host": { - "title": "Service Host", - "type": "string", - "description": "service host name within the network" - }, - "service_port": { - "title": "Service Port", - "exclusiveMaximum": true, - "exclusiveMinimum": true, - "type": "integer", - "description": "port to access the service within the network", - "default": 80, - "maximum": 65535, - "minimum": 0 - }, - "service_basepath": { - "title": "Service Basepath", - "type": "string", - "description": "the service base entrypoint where the service serves its contents" - }, - "service_state": { - "allOf": [ + "anyOf": [ + { + "type": "string" + }, { - "$ref": "#/components/schemas/ServiceState" + "type": "null" } ], - "description": "the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start * 'stopping' - The service is stopping" + "title": "Entry Point", + "description": "if empty the service entrypoint is on the root endpoint.", + "deprecated": true + }, + "service_state": { + "$ref": "#/components/schemas/ServiceState", + "description": "service current state" }, "service_message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Service Message", - "type": "string", - "description": "the service message" + "description": "additional information related to service state" } - } - }, - "RunningServicesDetailsArray": { - "title": "RunningServicesDetailsArray", - "type": "array", - "items": { - "$ref": "#/components/schemas/RunningServiceDetails" - } - }, - "RunningServicesDetailsArrayEnveloped": { - "title": "RunningServicesDetailsArrayEnveloped", + }, + "type": "object", "required": [ - "data" + "service_key", + "service_version", + "user_id", + "project_id", + "service_uuid", + "service_host", + "service_port", + "service_state" ], - "type": "object", - "properties": { - "data": { - "$ref": "#/components/schemas/RunningServicesDetailsArray" - } - } + "title": "RunningDynamicServiceDetails" }, "RunningState": { - "title": "RunningState", + "type": "string", "enum": [ "UNKNOWN", "PUBLISHED", "NOT_STARTED", "PENDING", + "WAITING_FOR_RESOURCES", "STARTED", - "RETRY", "SUCCESS", "FAILED", - "ABORTED" + "ABORTED", + "WAITING_FOR_CLUSTER" ], - "type": "string", + "title": "RunningState", "description": "State of execution of a project's computational workflow\n\nSEE StateType for task state" }, - "Scheduler": { - "title": "Scheduler", - "required": [ - "status" - ], - "type": "object", + "SchedulerData": { "properties": { - "status": { - "title": "Status", - "type": "string", - "description": "The running status of the scheduler" + "paths_mapping": { + "$ref": "#/components/schemas/PathMappingsLabel" }, - "workers": { - "$ref": "#/components/schemas/WorkersDict" - } - } - }, - "SelectBox": { - "title": "SelectBox", - "required": [ - "structure" - ], - "type": "object", - "properties": { - "structure": { - "title": "Structure", - "minItems": 1, - "type": "array", - "items": { - "$ref": "#/components/schemas/Structure" - } - } - }, - "additionalProperties": false - }, - "ServiceBootType": { - "title": "ServiceBootType", - "enum": [ - "V0", - "V2" - ], - "type": "string", - "description": "An enumeration." - }, - "ServiceBuildDetails": { - "title": "ServiceBuildDetails", - "required": [ - "build_date", - "vcs_ref", - "vcs_url" - ], - "type": "object", - "properties": { - "build_date": { - "title": "Build Date", - "type": "string" + "simcore.service.compose-spec": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Simcore.Service.Compose-Spec", + "description": "json encoded docker-compose specifications. see https://docs.docker.com/compose/compose-file/, only used by dynamic-sidecar." }, - "vcs_ref": { - "title": "Vcs Ref", - "type": "string" + "simcore.service.container-http-entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Simcore.Service.Container-Http-Entrypoint", + "description": "When a docker-compose specifications is provided, the container where the traffic must flow has to be specified. Required by dynamic-sidecar when compose_spec is set." }, - "vcs_url": { - "title": "Vcs Url", - "type": "string" - } - } - }, - "ServiceDockerData": { - "title": "ServiceDockerData", - "required": [ - "name", - "description", - "key", - "version", - "type", - "authors", - "contact", - "inputs", - "outputs" - ], - "type": "object", - "properties": { - "name": { - "title": "Name", + "user_preferences_path": { + "anyOf": [ + { + "type": "string", + "format": "path" + }, + { + "type": "null" + } + ], + "title": "User Preferences Path" + }, + "simcore.service.restart-policy": { + "$ref": "#/components/schemas/RestartPolicy", + "description": "the dynamic-sidecar can restart all running containers on certain events. Supported events:\n- `no-restart` default\n- `on-inputs-downloaded` after inputs are downloaded\n", + "default": "no-restart" + }, + "simcore.service.containers-allowed-outgoing-permit-list": { + "anyOf": [ + { + "additionalProperties": { + "items": { + "$ref": "#/components/schemas/NATRule" + }, + "type": "array" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Simcore.Service.Containers-Allowed-Outgoing-Permit-List", + "description": "allow internet access to certain domain names and ports per container" + }, + "simcore.service.containers-allowed-outgoing-internet": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "Simcore.Service.Containers-Allowed-Outgoing-Internet", + "description": "allow complete internet access to containers in here" + }, + "callbacks_mapping": { + "$ref": "#/components/schemas/CallbacksMapping" + }, + "service_key": { "type": "string", - "description": "short, human readable name for the node", - "example": "Fast Counter" + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key", + "description": "distinctive name for the node based on the docker registry path" }, - "thumbnail": { - "title": "Thumbnail", - "maxLength": 2083, - "minLength": 1, + "service_version": { "type": "string", - "description": "url to the thumbnail", - "format": "uri" + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version", + "description": "semantic version number of the node" + }, + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 }, - "description": { - "title": "Description", + "project_id": { "type": "string", - "description": "human readable description of the purpose of the node" + "format": "uuid", + "title": "Project Id" }, - "key": { - "title": "Key", - "pattern": "^(simcore)/(services)/(comp|dynamic|frontend)(/[\\w/-]+)+$", + "service_uuid": { "type": "string", - "description": "distinctive name for the node based on the docker registry path" + "format": "uuid", + "title": "Service Uuid" }, - "version": { - "title": "Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "service_name": { "type": "string", - "description": "service version number" + "minLength": 2, + "title": "Service Name", + "description": "Name of the current dynamic-sidecar being observed" }, - "integration-version": { - "title": "Integration-Version", - "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "run_id": { "type": "string", - "description": "integration version number" + "title": "Run Id", + "description": "Uniquely identify the dynamic sidecar session (a.k.a. 2 subsequent exact same services will have a different run_id)" }, - "type": { - "allOf": [ - { - "$ref": "#/components/schemas/ServiceType" - } - ], - "description": "service type" + "hostname": { + "type": "string", + "title": "Hostname", + "description": "dy-sidecar's service hostname (provided by docker-swarm)" }, - "badges": { - "title": "Badges", - "type": "array", - "items": { - "$ref": "#/components/schemas/Badge" - } + "port": { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "title": "Port", + "description": "dynamic-sidecar port", + "default": 8000, + "maximum": 65535, + "minimum": 0 }, - "authors": { - "title": "Authors", - "minItems": 1, - "type": "array", - "items": { - "$ref": "#/components/schemas/Author" - } + "dynamic_sidecar": { + "$ref": "#/components/schemas/DynamicSidecar", + "description": "stores information fetched from the dynamic-sidecar" }, - "contact": { - "title": "Contact", + "dynamic_sidecar_network_name": { "type": "string", - "description": "email to correspond to the authors about the node", - "format": "email" + "title": "Dynamic Sidecar Network Name", + "description": "overlay network biding the proxy to the container spaned by the dynamic-sidecar" }, - "inputs": { - "title": "Inputs", - "type": "object", - "description": "definition of the inputs of this node" + "simcore_traefik_zone": { + "type": "string", + "title": "Simcore Traefik Zone", + "description": "required for Traefik to correctly route requests to the spawned container" }, - "outputs": { - "title": "Outputs", - "type": "object", - "description": "definition of the outputs of this node" + "service_port": { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "title": "Service Port", + "description": "port where the service is exposed defined by the service; NOTE: temporary default because it will be changed once the service is started, this value is fetched from the service start spec", + "default": 65534, + "maximum": 65535, + "minimum": 0 }, - "boot-options": { - "title": "Boot-Options", + "service_resources": { "type": "object", - "description": "Service defined boot options. These get injected in the service as env variables." - } - }, - "additionalProperties": false, - "description": "Static metadata for a service injected in the image labels\n\nThis is one to one with node-meta-v0.0.1.json" - }, - "ServiceExtras": { - "title": "ServiceExtras", - "required": [ - "node_requirements" - ], - "type": "object", - "properties": { - "node_requirements": { - "$ref": "#/components/schemas/NodeRequirements" - }, - "service_build_details": { - "$ref": "#/components/schemas/ServiceBuildDetails" - }, - "container_spec": { - "$ref": "#/components/schemas/ContainerSpec" - } - } - }, - "ServiceExtrasEnveloped": { - "title": "ServiceExtrasEnveloped", - "required": [ - "data" - ], - "type": "object", - "properties": { - "data": { - "$ref": "#/components/schemas/ServiceExtras" - } - } - }, - "ServiceInput": { - "title": "ServiceInput", - "required": [ - "label", - "description", - "type" - ], - "type": "object", - "properties": { - "displayOrder": { - "title": "Displayorder", - "type": "number", - "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", - "deprecated": true + "title": "Service Resources", + "description": "service resources used to enforce limits" }, - "label": { - "title": "Label", + "request_dns": { "type": "string", - "description": "short name for the property", - "example": "Age" + "title": "Request Dns", + "description": "used when configuring the CORS options on the proxy" }, - "description": { - "title": "Description", + "request_scheme": { "type": "string", - "description": "description of the property", - "example": "Age in seconds since 1970" + "title": "Request Scheme", + "description": "used when configuring the CORS options on the proxy" }, - "type": { - "title": "Type", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "request_simcore_user_agent": { "type": "string", - "description": "data type expected on this input glob matching for data type is allowed" - }, - "contentSchema": { - "title": "Contentschema", - "type": "object", - "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" - }, - "fileToKeyMap": { - "title": "Filetokeymap", - "type": "object", - "description": "Place the data associated with the named keys in files" + "title": "Request Simcore User Agent", + "description": "used as label to filter out the metrics from the cAdvisor prometheus metrics" }, - "unit": { - "title": "Unit", + "proxy_service_name": { "type": "string", - "description": "Units, when it refers to a physical quantity" + "title": "Proxy Service Name", + "description": "service name given to the proxy" }, - "defaultValue": { - "title": "Defaultvalue", + "proxy_admin_api_port": { "anyOf": [ { - "type": "boolean" + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 }, { - "type": "integer" - }, + "type": "null" + } + ], + "title": "Proxy Admin Api Port", + "description": "used as the admin endpoint API port" + }, + "wallet_info": { + "anyOf": [ { - "type": "number" + "$ref": "#/components/schemas/WalletInfo-Output" }, { - "type": "string" + "type": "null" } - ] + ], + "description": "contains information about the wallet used to bill the running service" }, - "widget": { - "title": "Widget", - "allOf": [ + "pricing_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/PricingInfo" + }, { - "$ref": "#/components/schemas/Widget" + "type": "null" } ], - "description": "custom widget to use instead of the default one determined from the data-type" - } - }, - "additionalProperties": false, - "description": "Metadata on a service input port" - }, - "ServiceOutput": { - "title": "ServiceOutput", - "required": [ - "label", - "description", - "type" - ], - "type": "object", - "properties": { - "displayOrder": { - "title": "Displayorder", - "type": "number", - "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", - "deprecated": true - }, - "label": { - "title": "Label", - "type": "string", - "description": "short name for the property", - "example": "Age" - }, - "description": { - "title": "Description", - "type": "string", - "description": "description of the property", - "example": "Age in seconds since 1970" + "description": "contains pricing information so we know what is the cost of running of the service" }, - "type": { - "title": "Type", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", - "type": "string", - "description": "data type expected on this input glob matching for data type is allowed" - }, - "contentSchema": { - "title": "Contentschema", - "type": "object", - "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" - }, - "fileToKeyMap": { - "title": "Filetokeymap", - "type": "object", - "description": "Place the data associated with the named keys in files" + "hardware_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/HardwareInfo" + }, + { + "type": "null" + } + ], + "description": "contains harware information so we know on which hardware to run the service" }, - "unit": { - "title": "Unit", - "type": "string", - "description": "Units, when it refers to a physical quantity" + "product_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Product Name", + "description": "Current product upon which this service is scheduledIf set to None, the current product is undefined. Mostly for backwards compatibility" }, - "widget": { - "title": "Widget", - "allOf": [ + "product_api_base_url": { + "anyOf": [ + { + "type": "string" + }, { - "$ref": "#/components/schemas/Widget" + "type": "null" } ], - "description": "custom widget to use instead of the default one determined from the data-type", - "deprecated": true + "title": "Product Api Base Url", + "description": "Base URL for the current product's API." } }, - "additionalProperties": false, - "description": "Base class for service input/outputs" - }, - "ServiceState": { - "title": "ServiceState", - "enum": [ - "pending", - "pulling", - "starting", - "running", - "complete", - "failed", - "stopping" + "additionalProperties": true, + "type": "object", + "required": [ + "paths_mapping", + "service_key", + "service_version", + "user_id", + "project_id", + "service_uuid", + "service_name", + "hostname", + "dynamic_sidecar", + "dynamic_sidecar_network_name", + "simcore_traefik_zone", + "service_resources", + "request_dns", + "request_scheme", + "request_simcore_user_agent", + "proxy_service_name" ], - "description": "An enumeration." + "title": "SchedulerData" }, - "ServiceType": { - "title": "ServiceType", - "enum": [ - "computational", - "dynamic", - "frontend", - "backend" - ], + "ServiceBootType": { "type": "string", - "description": "An enumeration." - }, - "ServicesArrayEnveloped": { - "title": "ServicesArrayEnveloped", - "required": [ - "data" + "enum": [ + "V0", + "V2" ], - "type": "object", - "properties": { - "data": { - "title": "Data", - "type": "array", - "items": { - "$ref": "#/components/schemas/ServiceDockerData" - } - } - } + "title": "ServiceBootType" }, - "SimpleAuthentication": { - "title": "SimpleAuthentication", - "required": [ - "username", - "password" - ], - "type": "object", + "ServiceRemovalState": { "properties": { - "type": { - "title": "Type", - "enum": [ - "simple" - ], - "type": "string", - "default": "simple" + "can_remove": { + "type": "boolean", + "title": "Can Remove", + "description": "when True, marks the service as ready to be removed", + "default": false }, - "username": { - "title": "Username", - "type": "string" + "can_save": { + "type": "boolean", + "title": "Can Save", + "description": "when True, saves the internal state and upload outputs of the service", + "default": false }, - "password": { - "title": "Password", - "type": "string", - "format": "password", - "writeOnly": true + "was_removed": { + "type": "boolean", + "title": "Was Removed", + "description": "Will be True when the removal finished. Used primarily to cancel retrying long running operations.", + "default": false } }, - "additionalProperties": false + "type": "object", + "title": "ServiceRemovalState" }, - "Structure": { - "title": "Structure", - "required": [ - "key", - "label" + "ServiceState": { + "type": "string", + "enum": [ + "failed", + "pending", + "pulling", + "starting", + "running", + "stopping", + "complete", + "idle" ], - "type": "object", + "title": "ServiceState" + }, + "ServicesInstrumentation": { "properties": { - "key": { - "title": "Key", + "start_requested_at": { "anyOf": [ { - "type": "string" + "type": "string", + "format": "date-time" }, { - "type": "boolean" + "type": "null" + } + ], + "title": "Start Requested At", + "description": "moment in which the process of starting the service was requested" + }, + "close_requested_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" }, { - "type": "number" + "type": "null" } - ] - }, - "label": { - "title": "Label", - "type": "string" + ], + "title": "Close Requested At", + "description": "moment in which the process of stopping the service was requested" } }, - "additionalProperties": false - }, - "TaskCounts": { - "title": "TaskCounts", "type": "object", - "properties": { - "error": { - "title": "Error", - "type": "integer", - "default": 0 - }, - "memory": { - "title": "Memory", - "type": "integer", - "default": 0 - }, - "executing": { - "title": "Executing", - "type": "integer", - "default": 0 - } - } + "title": "ServicesInstrumentation" }, - "TaskLogFileGet": { - "title": "TaskLogFileGet", - "required": [ - "task_id" + "Status2": { + "type": "string", + "enum": [ + "created", + "running", + "paused", + "restarting", + "removing", + "exited", + "dead" ], - "type": "object", + "title": "Status2", + "description": "String representation of the container state. Can be one of \"created\",\n\"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\"." + }, + "TaskLogFileGet": { "properties": { "task_id": { - "title": "Task Id", "type": "string", - "format": "uuid" + "format": "uuid", + "title": "Task Id" }, "download_link": { + "anyOf": [ + { + "type": "string", + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], "title": "Download Link", - "maxLength": 65536, - "minLength": 1, - "type": "string", - "description": "Presigned link for log file or None if still not available", - "format": "uri" + "description": "Presigned link for log file or None if still not available" } - } - }, - "TextArea": { - "title": "TextArea", + }, + "type": "object", "required": [ - "minHeight" + "task_id" ], - "type": "object", + "title": "TaskLogFileGet" + }, + "TasksOutputs": { "properties": { - "minHeight": { - "title": "Minheight", - "exclusiveMinimum": true, - "type": "integer", - "description": "minimum Height of the textarea", - "minimum": 0 + "nodes_outputs": { + "additionalProperties": { + "additionalProperties": true, + "propertyNames": { + "maxLength": 100, + "minLength": 1 + }, + "type": "object" + }, + "propertyNames": { + "format": "uuid" + }, + "type": "object", + "title": "Nodes Outputs" } }, - "additionalProperties": false - }, - "UsedResources": { - "title": "UsedResources", "type": "object", - "additionalProperties": { - "minimum": 0.0, - "type": "number" - } + "required": [ + "nodes_outputs" + ], + "title": "TasksOutputs" }, - "ValidationError": { - "title": "ValidationError", + "TasksSelection": { + "properties": { + "nodes_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Nodes Ids" + } + }, + "type": "object", "required": [ - "loc", - "msg", - "type" + "nodes_ids" ], + "title": "TasksSelection" + }, + "UserServiceCommand": { + "properties": { + "service": { + "type": "string", + "title": "Service", + "description": "name of the docker-compose service in the docker-compose spec" + }, + "command": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Command", + "description": "command to run in container" + }, + "timeout": { + "type": "number", + "minimum": 0.0, + "title": "Timeout", + "description": "after this interval the command will be timed-out" + } + }, + "additionalProperties": false, "type": "object", + "required": [ + "service", + "command", + "timeout" + ], + "title": "UserServiceCommand" + }, + "ValidationError": { "properties": { "loc": { - "title": "Location", - "type": "array", "items": { "anyOf": [ { @@ -3580,134 +3296,157 @@ "type": "integer" } ] - } + }, + "type": "array", + "title": "Location" }, "msg": { - "title": "Message", - "type": "string" + "type": "string", + "title": "Message" }, "type": { - "title": "Error Type", - "type": "string" + "type": "string", + "title": "Error Type" } - } - }, - "Widget": { - "title": "Widget", + }, + "type": "object", "required": [ - "type", - "details" + "loc", + "msg", + "type" ], - "type": "object", + "title": "ValidationError" + }, + "WalletInfo-Input": { "properties": { - "type": { - "allOf": [ - { - "$ref": "#/components/schemas/WidgetType" - } - ], - "description": "type of the property" + "wallet_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 + }, + "wallet_name": { + "type": "string", + "title": "Wallet Name" }, - "details": { - "title": "Details", + "wallet_credit_amount": { "anyOf": [ { - "$ref": "#/components/schemas/TextArea" + "type": "number" }, { - "$ref": "#/components/schemas/SelectBox" + "type": "string" } - ] + ], + "title": "Wallet Credit Amount" } }, - "additionalProperties": false - }, - "WidgetType": { - "title": "WidgetType", - "enum": [ - "TextArea", - "SelectBox" - ], - "type": "string", - "description": "An enumeration." - }, - "Worker": { - "title": "Worker", + "type": "object", "required": [ - "id", - "name", - "resources", - "used_resources", - "memory_limit", - "metrics" + "wallet_id", + "wallet_name", + "wallet_credit_amount" ], - "type": "object", + "title": "WalletInfo" + }, + "WalletInfo-Output": { "properties": { - "id": { - "title": "Id", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "resources": { - "$ref": "#/components/schemas/DictModel_str__PositiveFloat_" - }, - "used_resources": { - "$ref": "#/components/schemas/UsedResources" + "wallet_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 }, - "memory_limit": { - "title": "Memory Limit", - "type": "integer" + "wallet_name": { + "type": "string", + "title": "Wallet Name" }, - "metrics": { - "$ref": "#/components/schemas/WorkerMetrics" + "wallet_credit_amount": { + "type": "string", + "title": "Wallet Credit Amount" } - } - }, - "WorkerMetrics": { - "title": "WorkerMetrics", + }, + "type": "object", "required": [ - "cpu", - "memory", - "num_fds", - "task_counts" + "wallet_id", + "wallet_name", + "wallet_credit_amount" ], - "type": "object", + "title": "WalletInfo" + }, + "_PortRange": { "properties": { - "cpu": { - "title": "Cpu", - "type": "number", - "description": "consumed % of cpus" - }, - "memory": { - "title": "Memory", - "type": "integer", - "description": "consumed memory" - }, - "num_fds": { - "title": "Num Fds", - "type": "integer", - "description": "consumed file descriptors" + "lower": { + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, + { + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" + } + ], + "title": "Lower" }, - "task_counts": { - "title": "Task Counts", - "allOf": [ + "upper": { + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, { - "$ref": "#/components/schemas/TaskCounts" + "type": "string", + "pattern": "^\\${1,2}(?:\\{)?OSPARC_VARIABLE_[A-Za-z0-9_]+(?:\\})?(:-.+)?$" } ], - "description": "task details" + "title": "Upper" } - } + }, + "type": "object", + "required": [ + "lower", + "upper" + ], + "title": "_PortRange", + "description": "`lower` and `upper` are included" + }, + "models_library__generated_models__docker_rest_api__Status": { + "type": "string", + "enum": [ + "none", + "starting", + "healthy", + "unhealthy" + ], + "title": "Status", + "description": "Status is one of `none`, `starting`, `healthy` or `unhealthy`\n\n- \"none\" Indicates there is no healthcheck\n- \"starting\" Starting indicates that the container is not yet ready\n- \"healthy\" Healthy indicates that the container is running correctly\n- \"unhealthy\" Unhealthy indicates that the container has a problem" }, - "WorkersDict": { - "title": "WorkersDict", + "simcore_service_director_v2__models__dynamic_services_scheduler__Status": { + "properties": { + "current": { + "$ref": "#/components/schemas/DynamicSidecarStatus", + "description": "status of the service" + }, + "info": { + "type": "string", + "title": "Info", + "description": "additional information for the user" + } + }, "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Worker" - } + "required": [ + "current", + "info" + ], + "title": "Status", + "description": "Generated from data from docker container inspect API" } } } diff --git a/services/director-v2/requirements/_base.in b/services/director-v2/requirements/_base.in index 3662d8e6a00..7a30881a331 100644 --- a/services/director-v2/requirements/_base.in +++ b/services/director-v2/requirements/_base.in @@ -6,6 +6,7 @@ --constraint ./constraints.txt # NOTE: Make sure they are added in setup.install_requires +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/dask-task-models-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/postgres-database/requirements/_base.in @@ -21,14 +22,11 @@ aio-pika aiocache[redis,msgpack] aiodocker -aiopg[sa] -dask-gateway -fastapi[all] -httpx networkx -orderedset +ordered-set orjson pydantic[dotenv] +python-socketio redis rich tenacity diff --git a/services/director-v2/requirements/_base.txt b/services/director-v2/requirements/_base.txt index f3e2e0aaaa2..2eded29c106 100644 --- a/services/director-v2/requirements/_base.txt +++ b/services/director-v2/requirements/_base.txt @@ -1,358 +1,1111 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.4 +aio-pika==9.5.5 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiocache==0.11.1 +aiocache==0.12.3 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in aiodebug==2.3.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -aiodocker==0.19.1 - # via -r requirements/_base.in -aiofiles==0.8.0 +aiodocker==0.24.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in - # -r requirements/../../../packages/simcore-sdk/requirements/_base.in -aiohttp==3.8.3 + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +aiofiles==24.1.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in - # aiodocker - # dask-gateway -aiopg==1.3.3 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/simcore-sdk/requirements/_base.in - # -r requirements/_base.in -aioredis==2.0.1 - # via aiocache -aiormq==6.4.2 + # aiodocker +aiormq==6.8.1 # via aio-pika -aiosignal==1.2.0 +aiosignal==1.3.2 # via aiohttp -alembic==1.8.1 +alembic==1.15.2 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in -anyio==3.6.1 +annotated-types==0.7.0 + # via pydantic +anyio==4.9.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette - # watchgod -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -asgiref==3.5.2 - # via uvicorn -async-timeout==4.0.2 + # watchfiles +arrow==1.3.0 # via - # aiohttp - # aiopg - # aioredis - # redis -attrs==21.4.0 + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.3.0 # via - # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/./constraints.txt # aiohttp # jsonschema -blosc==1.11.1 + # referencing +bidict==0.23.1 + # via python-socketio +blosc==1.11.3 # via -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt -certifi==2022.12.7 +certifi==2025.4.26 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx # requests -charset-normalizer==2.0.12 - # via - # aiohttp - # requests -click==8.1.3 +charset-normalizer==3.4.2 + # via requests +click==8.1.8 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway # distributed + # rich-toolkit # typer # uvicorn -cloudpickle==2.2.1 +cloudpickle==3.1.1 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask # distributed -commonmark==0.9.1 - # via rich -dask==2023.3.0 +dask==2025.5.0 # via # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway # distributed -dask-gateway==2023.1.1 - # via -r requirements/_base.in -decorator==4.4.2 - # via networkx -distributed==2023.3.0 +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +distributed==2025.5.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway -dnspython==2.1.0 +dnspython==2.7.0 # via email-validator -email-validator==1.2.1 +email-validator==2.2.0 # via # fastapi # pydantic -fastapi==0.85.0 +exceptiongroup==1.3.0 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -frozenlist==1.3.0 +faststream==0.5.41 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +frozenlist==1.6.0 # via # aiohttp # aiosignal -fsspec==2023.3.0 +fsspec==2025.3.2 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask -greenlet==1.1.2 +googleapis-common-protos==1.70.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.2.2 # via sqlalchemy -h11==0.12.0 +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.16.0 # via # httpcore # uvicorn -heapdict==1.0.1 - # via - # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt - # zict -httpcore==0.15.0 + # wsproto +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.9 # via httpx -httptools==0.2.0 +httptools==0.6.4 # via uvicorn -httpx==0.23.0 +httpx==0.28.1 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -idna==2.10 + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator + # httpx # requests - # rfc3986 # yarl -itsdangerous==1.1.0 - # via fastapi -jaeger-client==4.8.0 - # via fastapi-contrib -jinja2==3.1.2 +importlib-metadata==8.6.1 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # dask + # opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed # fastapi -jsonschema==3.2.0 +jsonschema==4.23.0 # via # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2025.4.1 + # via jsonschema locket==1.0.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed # partd -lz4==4.3.2 +lz4==4.4.4 # via -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt -mako==1.2.2 +mako==1.3.10 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # jinja2 # mako -msgpack==1.0.5 +mdurl==0.1.2 + # via markdown-it-py +msgpack==1.1.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # aiocache # distributed -multidict==6.0.2 +multidict==6.4.4 # via # aiohttp # yarl -networkx==2.5.1 +networkx==3.4.2 # via -r requirements/_base.in -opentracing==2.4.0 +numpy==2.2.6 + # via -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt +opentelemetry-api==1.33.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.33.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.33.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.54b1 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.54b1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.54b1 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.54b1 # via - # fastapi-contrib - # jaeger-client -orderedset==2.0.3 + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.54b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.54b1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.54b1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.54b1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.33.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.33.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.54b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.54b1 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +ordered-set==4.1.0 # via -r requirements/_base.in -orjson==3.7.2 +orjson==3.10.18 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/_base.in - # fastapi -packaging==23.0 +packaging==25.0 # via # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask # distributed -pamqp==3.2.1 + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -partd==1.3.0 +partd==1.4.2 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask -pint==0.19.2 +pint==0.24.4 # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in -psutil==5.9.4 +platformdirs==4.3.8 + # via pint +prometheus-client==0.22.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 # via + # aiohttp + # yarl +protobuf==5.29.4 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed -psycopg2-binary==2.9.3 - # via - # aiopg - # sqlalchemy -pydantic==1.9.0 +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.23.0 + # via stream-zip +pydantic==2.11.4 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in + # fast-depends # fastapi -pygments==2.13.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.2 + # via pydantic +pydantic-extra-types==2.10.4 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 # via rich -pyinstrument==4.1.1 +pyinstrument==5.0.1 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.18.1 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -python-dotenv==0.20.0 +python-dotenv==1.1.0 # via - # pydantic + # pydantic-settings # uvicorn -python-multipart==0.0.5 +python-engineio==4.12.1 + # via python-socketio +python-multipart==0.0.20 # via fastapi -pyyaml==5.4.1 +python-socketio==5.13.0 + # via -r requirements/_base.in +pyyaml==6.0.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask - # dask-gateway # distributed - # fastapi # uvicorn -redis==4.4.0 +redis==6.1.0 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -requests==2.27.1 - # via fastapi -rfc3986==1.4.0 - # via httpx -rich==12.5.1 - # via -r requirements/_base.in -six==1.15.0 + # aiocache +referencing==0.35.1 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil - # python-multipart - # thrift -sniffio==1.2.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==14.0.0 # via - # anyio - # httpcore - # httpx + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.6 + # via fastapi-cli +rpds-py==0.25.0 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +simple-websocket==1.1.0 + # via python-engineio +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio sortedcontainers==2.4.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed -sqlalchemy==1.4.37 +sqlalchemy==1.4.54 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/postgres-database/requirements/_base.in - # aiopg + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # alembic -starlette==0.20.4 - # via fastapi -tblib==1.7.0 +starlette==0.46.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +tblib==3.1.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed -tenacity==8.0.1 +tenacity==9.1.2 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -toolz==0.12.0 +toolz==1.0.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # dask # distributed # partd -tornado==6.2 +tornado==6.5 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway # distributed - # jaeger-client - # threadloop -tqdm==4.64.0 +tqdm==4.67.1 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in -typer==0.4.1 +typer==0.15.4 # via + # -r requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.3.0 + # fastapi-cli +types-python-dateutil==2.9.0.20250516 + # via arrow +typing-extensions==4.13.2 # via # aiodebug - # aiodocker - # aioredis + # alembic + # anyio + # exceptiongroup + # fastapi + # faststream + # flexcache + # flexparser + # opentelemetry-sdk + # pint # pydantic - # starlette -ujson==5.5.0 + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer + # typing-inspection +typing-inspection==0.4.0 + # via pydantic +urllib3==2.4.0 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/dask-task-models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt - # fastapi -urllib3==1.26.14 - # via + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed # requests -uvicorn==0.15.0 +uvicorn==0.34.2 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in # fastapi -uvloop==0.16.0 + # fastapi-cli +uvloop==0.21.0 # via uvicorn -watchgod==0.8.2 +watchfiles==1.0.5 # via uvicorn -websockets==10.1 +websockets==15.0.1 # via uvicorn -yarl==1.7.2 +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +wsproto==1.2.0 + # via simple-websocket +yarl==1.20.0 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq -zict==2.2.0 +zict==3.0.0 # via # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt # distributed - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via + # -r requirements/../../../services/dask-sidecar/requirements/_dask-distributed.txt + # importlib-metadata diff --git a/services/director-v2/requirements/_test.in b/services/director-v2/requirements/_test.in index b40ecc4b9dd..2fb831189ba 100644 --- a/services/director-v2/requirements/_test.in +++ b/services/director-v2/requirements/_test.in @@ -14,16 +14,12 @@ aioboto3 alembic # migration due to pytest_simcore.postgres_service2 asgi_lifespan async-asgi-testclient # replacement for fastapi.testclient.TestClient [see b) below] -codecov -coveralls -dask-gateway-server[local] +dask[distributed,diagnostics] docker Faker flaky -minio -pylint pytest -pytest-aiohttp +pytest-asyncio pytest-cov pytest-docker pytest-icdiff @@ -31,20 +27,7 @@ pytest-mock pytest-runner pytest-xdist respx - - -# NOTE: What test client to use for fastapi-based apps? -# -# fastapi comes with a default test client: fatapi.testclient.TestClient (SEE https://fastapi.tiangolo.com/tutorial/testing/) -# which is essentially an indirection to starlette.testclient (SEE https://www.starlette.io/testclient/) -# -# the limitation of that client is that it is fd synchronous. -# -# There are two options in place: -# a) fastapi recommends to use httpx and create your own AsyncTestClient: https://fastapi.tiangolo.com/advanced/async-tests/ -# PROS: can use respx to mock responses, used to httpx API -# CONS: do it yourself, does not include app member out-of-the-box -# b) use generic Async ASGI TestClient library: https://github.com/vinissimus/async-asgi-testclient -# PROS: generic closed solution, has 'app' member , requests-like API (i.e. equivalent to starletter TESTClient) -# CONS: basically does not have the PROS from a), adds extra deps to 'requests' lib. -# +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types-networkx +types-psycopg2 +types-PyYAML diff --git a/services/director-v2/requirements/_test.txt b/services/director-v2/requirements/_test.txt index 6759b6caf56..b556d2b115e 100644 --- a/services/director-v2/requirements/_test.txt +++ b/services/director-v2/requirements/_test.txt @@ -1,275 +1,356 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aio-pika==8.2.4 - # via -r requirements/_test.in -aioboto3==10.4.0 +aio-pika==9.5.5 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +aioboto3==14.3.0 # via -r requirements/_test.in -aiobotocore==2.4.2 +aiobotocore==2.22.0 # via aioboto3 -aiohttp==3.8.3 +aiofiles==24.1.0 # via # -c requirements/_base.txt + # aioboto3 +aiohappyeyeballs==2.6.1 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # aiobotocore - # dask-gateway-server - # pytest-aiohttp -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore -aiormq==6.4.2 +aiormq==6.8.1 # via # -c requirements/_base.txt # aio-pika -aiosignal==1.2.0 +aiosignal==1.3.2 # via # -c requirements/_base.txt # aiohttp -alembic==1.8.1 - # via -r requirements/_test.in -anyio==3.6.1 +alembic==1.15.2 # via # -c requirements/_base.txt - # httpcore -asgi-lifespan==2.0.0 + # -r requirements/_test.in +anyio==4.9.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 # via -r requirements/_test.in -astroid==2.14.2 - # via pylint async-asgi-testclient==1.4.11 # via -r requirements/_test.in -async-timeout==4.0.2 - # via - # -c requirements/_base.txt - # aiohttp -attrs==21.4.0 +attrs==25.3.0 # via # -c requirements/_base.txt # aiohttp - # pytest # pytest-docker -boto3==1.24.59 +bokeh==3.7.3 + # via dask +boto3==1.37.3 # via aiobotocore -botocore==1.27.59 +botocore==1.37.3 # via # aiobotocore # boto3 # s3transfer -certifi==2022.12.7 +certifi==2025.4.26 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # httpcore # httpx - # minio # requests -cffi==1.15.1 - # via cryptography -charset-normalizer==2.0.12 +charset-normalizer==3.4.2 # via # -c requirements/_base.txt - # aiohttp # requests -codecov==2.1.12 - # via -r requirements/_test.in -colorlog==6.7.0 - # via dask-gateway-server -coverage==6.5.0 +click==8.1.8 # via - # codecov - # coveralls - # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -cryptography==39.0.1 + # -c requirements/_base.txt + # dask + # distributed +cloudpickle==3.1.1 # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server -dask-gateway-server==2023.1.1 - # via -r requirements/_test.in -dill==0.3.6 - # via pylint -docker==6.0.1 + # -c requirements/_base.txt + # dask + # distributed +contourpy==1.3.2 + # via bokeh +coverage==7.8.0 + # via pytest-cov +dask==2025.5.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in + # distributed +distributed==2025.5.0 + # via + # -c requirements/_base.txt + # dask +docker==7.1.0 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -execnet==1.9.0 +exceptiongroup==1.3.0 + # via + # -c requirements/_base.txt + # aio-pika +execnet==2.1.1 # via pytest-xdist -faker==17.4.0 +faker==37.3.0 # via -r requirements/_test.in -flaky==3.7.0 +flaky==3.8.1 # via -r requirements/_test.in -frozenlist==1.3.0 +frozenlist==1.6.0 # via # -c requirements/_base.txt # aiohttp # aiosignal -greenlet==1.1.2 +fsspec==2025.3.2 + # via + # -c requirements/_base.txt + # dask +greenlet==3.2.2 # via # -c requirements/_base.txt # sqlalchemy -h11==0.12.0 +h11==0.16.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.15.0 +httpcore==1.0.9 # via # -c requirements/_base.txt # httpx -httpx==0.23.0 +httpx==0.28.1 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # respx -icdiff==2.0.6 +icdiff==2.0.7 # via pytest-icdiff -idna==2.10 +idna==3.10 # via # -c requirements/_base.txt # anyio + # httpx # requests - # rfc3986 # yarl -iniconfig==2.0.0 +importlib-metadata==8.6.1 + # via + # -c requirements/_base.txt + # dask +iniconfig==2.1.0 # via pytest -isort==5.12.0 - # via pylint +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # bokeh + # dask + # distributed jmespath==1.0.1 # via + # aiobotocore # boto3 # botocore -lazy-object-proxy==1.9.0 - # via astroid -mako==1.2.2 +locket==1.0.0 + # via + # -c requirements/_base.txt + # distributed + # partd +mako==1.3.10 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # alembic -markupsafe==2.1.2 +markupsafe==3.0.2 # via # -c requirements/_base.txt + # jinja2 # mako -mccabe==0.7.0 - # via pylint -minio==7.0.4 - # via -r requirements/_test.in -multidict==6.0.2 +msgpack==1.1.0 + # via + # -c requirements/_base.txt + # distributed +multidict==6.4.4 # via # -c requirements/_base.txt + # aiobotocore # aiohttp # async-asgi-testclient # yarl -packaging==23.0 +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.1.0 + # via mypy +narwhals==1.40.0 + # via bokeh +numpy==2.2.6 # via # -c requirements/_base.txt - # docker + # bokeh + # contourpy + # pandas + # types-networkx +packaging==25.0 + # via + # -c requirements/_base.txt + # bokeh + # dask + # distributed # pytest -pamqp==3.2.1 +pamqp==3.3.0 # via # -c requirements/_base.txt # aiormq -platformdirs==3.0.0 - # via pylint -pluggy==1.0.0 +pandas==2.2.3 + # via bokeh +partd==1.4.2 + # via + # -c requirements/_base.txt + # dask +pillow==11.2.1 + # via bokeh +pluggy==1.6.0 # via pytest pprintpp==0.4.0 # via pytest-icdiff -pycparser==2.21 - # via cffi -pylint==2.16.2 - # via -r requirements/_test.in -pytest==7.2.1 +propcache==0.3.1 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +psutil==7.0.0 + # via + # -c requirements/_base.txt + # distributed +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp # pytest-asyncio # pytest-cov # pytest-docker # pytest-icdiff # pytest-mock # pytest-xdist -pytest-aiohttp==1.0.4 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-asyncio==0.20.3 - # via pytest-aiohttp -pytest-cov==4.0.0 +pytest-cov==6.1.1 # via -r requirements/_test.in -pytest-docker==1.0.1 +pytest-docker==3.2.1 # via -r requirements/_test.in -pytest-icdiff==0.6 +pytest-icdiff==0.9 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-xdist==3.2.0 +pytest-xdist==3.6.1 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements/_base.txt + # aiobotocore # botocore - # faker -requests==2.27.1 + # pandas +pytz==2025.2 + # via pandas +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # bokeh + # dask + # distributed +requests==2.32.3 # via # -c requirements/_base.txt # async-asgi-testclient - # codecov - # coveralls # docker -respx==0.20.1 +respx==0.22.0 # via -r requirements/_test.in -rfc3986==1.4.0 - # via - # -c requirements/_base.txt - # httpx -s3transfer==0.6.0 +s3transfer==0.11.3 # via boto3 -six==1.15.0 +six==1.17.0 # via # -c requirements/_base.txt # python-dateutil -sniffio==1.2.0 +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio # asgi-lifespan - # httpcore - # httpx -sqlalchemy==1.4.37 +sortedcontainers==2.4.0 # via # -c requirements/_base.txt + # distributed +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in # alembic - # dask-gateway-server -tomli==2.0.1 +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +tblib==3.1.0 # via - # coverage - # pylint - # pytest -tomlkit==0.11.6 - # via pylint -traitlets==5.9.0 - # via dask-gateway-server -typing-extensions==4.3.0 + # -c requirements/_base.txt + # distributed +toolz==1.0.0 + # via + # -c requirements/_base.txt + # dask + # distributed + # partd +tornado==6.5 # via # -c requirements/_base.txt - # aioitertools - # astroid - # pylint -urllib3==1.26.14 + # bokeh + # distributed +types-networkx==3.4.2.20250515 + # via -r requirements/_test.in +types-psycopg2==2.9.21.20250516 + # via -r requirements/_test.in +types-pyyaml==6.0.12.20250516 + # via -r requirements/_test.in +typing-extensions==4.13.2 # via # -c requirements/_base.txt + # alembic + # anyio + # exceptiongroup + # mypy + # sqlalchemy2-stubs +tzdata==2025.2 + # via + # faker + # pandas +urllib3==2.4.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # botocore + # distributed # docker - # minio # requests -websocket-client==1.5.1 - # via docker -wrapt==1.15.0 +wrapt==1.17.2 # via + # -c requirements/_base.txt # aiobotocore - # astroid -yarl==1.7.2 +xyzservices==2025.4.0 + # via bokeh +yarl==1.20.0 # via # -c requirements/_base.txt # aio-pika # aiohttp # aiormq +zict==3.0.0 + # via + # -c requirements/_base.txt + # distributed +zipp==3.21.0 + # via + # -c requirements/_base.txt + # importlib-metadata diff --git a/services/director-v2/requirements/_tools.txt b/services/director-v2/requirements/_tools.txt index 7aa2da34ce7..19dcf22bed6 100644 --- a/services/director-v2/requirements/_tools.txt +++ b/services/director-v2/requirements/_tools.txt @@ -1,105 +1,91 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 - # via - # -c requirements/_test.txt - # pylint -black==22.12.0 +astroid==3.3.10 + # via pylint +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt + # -c requirements/_test.txt # black # pip-tools -dill==0.3.6 - # via - # -c requirements/_test.txt - # pylint -distlib==0.3.6 +dill==0.4.0 + # via pylint +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.18.0 # via virtualenv -identify==2.5.18 +identify==2.6.10 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 +mccabe==0.7.0 + # via pylint +mypy==1.15.0 # via # -c requirements/_test.txt - # astroid -mccabe==0.7.0 + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 # via # -c requirements/_test.txt - # pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==25.0 # via + # -c requirements/_base.txt # -c requirements/_test.txt + # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.1.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.8 # via - # -c requirements/_test.txt + # -c requirements/_base.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.2.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.7 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via - # -c requirements/_base.txt - # pre-commit - # watchdog -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # -c requirements/_test.txt - # pylint -typing-extensions==4.3.0 + # pre-commit + # watchdog +ruff==0.11.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==80.7.1 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.13.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # mypy +virtualenv==20.31.2 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/director-v2/requirements/ci.txt b/services/director-v2/requirements/ci.txt index bb142c7d5cc..17eacb4cfda 100644 --- a/services/director-v2/requirements/ci.txt +++ b/services/director-v2/requirements/ci.txt @@ -10,16 +10,18 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/dask-task-models-library/ -../../packages/models-library -../../packages/postgres-database -../../packages/pytest-simcore/ -../../packages/service-library[fastapi] -../../packages/settings-library/ -../../packages/simcore-sdk/ +simcore-common-library @ ../../packages/common-library/ +simcore-dask-task-models-library @ ../../packages/dask-task-models-library/ +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ +simcore-sdk @ ../../packages/simcore-sdk/ # installs current package -. +simcore-service-director-v2 @ . diff --git a/services/director-v2/requirements/dev.txt b/services/director-v2/requirements/dev.txt index 6d932514ae9..f183201fd55 100644 --- a/services/director-v2/requirements/dev.txt +++ b/services/director-v2/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library/ --editable ../../packages/dask-task-models-library/ --editable ../../packages/models-library --editable ../../packages/postgres-database/ diff --git a/services/director-v2/requirements/prod.txt b/services/director-v2/requirements/prod.txt index dd2894e17ef..8a770919b4f 100644 --- a/services/director-v2/requirements/prod.txt +++ b/services/director-v2/requirements/prod.txt @@ -10,12 +10,13 @@ --requirement _base.txt # installs this repo's packages -../../packages/dask-task-models-library/ -../../packages/models-library -../../packages/postgres-database/ -../../packages/service-library[fastapi] -../../packages/settings-library/ -../../packages/simcore-sdk/ +simcore-dask-task-models-library @ ../../packages/dask-task-models-library/ +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ +simcore-sdk @ ../../packages/simcore-sdk/ # installs current package -. +simcore-service-director-v2 @ . diff --git a/services/director-v2/setup.cfg b/services/director-v2/setup.cfg index de804c8e71c..f84ced2849b 100644 --- a/services/director-v2/setup.cfg +++ b/services/director-v2/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 2.2.0 +current_version = 2.3.0 commit = True message = services/director-v2 version: {current_version} β†’ {new_version} tag = False @@ -9,5 +9,12 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function markers = testit: "marks test to run during development" + acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/services/director-v2/setup.py b/services/director-v2/setup.py index 1f57f74e031..d9976937794 100644 --- a/services/director-v2/setup.py +++ b/services/director-v2/setup.py @@ -1,12 +1,11 @@ import re import sys from pathlib import Path -from typing import Set from setuptools import find_packages, setup -def read_reqs(reqs_path: Path) -> Set[str]: +def read_reqs(reqs_path: Path) -> set[str]: return { r for r in re.findall( @@ -36,33 +35,34 @@ def read_reqs(reqs_path: Path) -> Set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-service-director-v2", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "name": "simcore-service-director-v2", + "version": (CURRENT_DIR / "VERSION").read_text().strip(), + "author": ", ".join( ( "Pedro Crespo-Valero (pcrespov)", "Sylvain Anderegg (sanderegg)", ) ), - description="Orchestrates the pipeline of services defined by the user", - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ + "description": "Orchestrates the pipeline of services defined by the user", + "long_description": (CURRENT_DIR / "README.md").read_text(), + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ "simcore-service-director-v2=simcore_service_director_v2.cli:main", + "simcore-service=simcore_service_director_v2.cli:main", ], }, -) +} if __name__ == "__main__": diff --git a/services/director-v2/src/simcore_service_director_v2/__init__.py b/services/director-v2/src/simcore_service_director_v2/__init__.py index d689cb28b1f..f047bb136bd 100644 --- a/services/director-v2/src/simcore_service_director_v2/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/__init__.py @@ -1,4 +1,4 @@ """ Python package for the simcore_service_director_v2. """ -from .meta import __version__ +from ._meta import __version__ diff --git a/services/director-v2/src/simcore_service_director_v2/_meta.py b/services/director-v2/src/simcore_service_director_v2/_meta.py new file mode 100644 index 00000000000..4ebfef7135c --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/_meta.py @@ -0,0 +1,19 @@ +""" Application's metadata + +""" +from typing import Final + +from models_library.basic_types import VersionStr +from packaging.version import Version +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore-service-director-v2") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +API_VTAG: Final[str] = info.api_prefix_path_tag +APP_NAME: Final[str] = PROJECT_NAME +SUMMARY: Final[str] = info.get_summary() diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/__init__.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/__init__.py index d5abd4b019f..02823f24864 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/__init__.py @@ -1,5 +1,7 @@ +from typing import cast + from fastapi import FastAPI, Request def get_app(request: Request) -> FastAPI: - return request.app + return cast(FastAPI, request.app) diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/database.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/database.py index 3ec9a64fb91..949ef83bbdf 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/database.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/database.py @@ -1,45 +1,52 @@ import logging -from typing import AsyncGenerator, Callable, Type +from collections.abc import AsyncGenerator, Callable +from typing import Annotated, TypeVar, cast -from aiopg.sa import Engine from fastapi import Depends from fastapi.requests import Request +from sqlalchemy.ext.asyncio import AsyncEngine from ...modules.db.repositories import BaseRepository logger = logging.getLogger(__name__) -def _get_db_engine(request: Request) -> Engine: - return request.app.state.engine +RepoType = TypeVar("RepoType", bound=BaseRepository) -def get_base_repository( - engine: Engine, repo_type: Type[BaseRepository] -) -> BaseRepository: +def _get_db_engine(request: Request) -> AsyncEngine: + return cast(AsyncEngine, request.app.state.engine) + + +def get_base_repository(engine: AsyncEngine, repo_type: type[RepoType]) -> RepoType: # NOTE: 2 different ideas were tried here with not so good # 1st one was acquiring a connection per repository which lead to the following issue https://github.com/ITISFoundation/osparc-simcore/pull/1966 # 2nd one was acquiring a connection per request which works but blocks the director-v2 responsiveness once # the max amount of connections is reached # now the current solution is to acquire connection when needed. - available_engines = engine.maxsize - (engine.size - engine.freesize) - if available_engines <= 1: + # Get pool metrics + checkedin = engine.pool.checkedin() # type: ignore # connections available in pool + checkedout = engine.pool.checkedout() # type: ignore # connections in use + total_size = engine.pool.size() # type: ignore # current total connections + + if (checkedin < 2) and (total_size > 1): # noqa: PLR2004 logger.warning( - "Low pg connections available in pool: pool size=%d, acquired=%d, free=%d, reserved=[%d, %d]", - engine.size, - engine.size - engine.freesize, - engine.freesize, - engine.minsize, - engine.maxsize, + "Database connection pool near limits: total=%d, in_use=%d, available=%d", + total_size, + checkedout, + checkedin, ) + return repo_type(db_engine=engine) -def get_repository(repo_type: Type[BaseRepository]) -> Callable: +def get_repository( + repo_type: type[RepoType], +) -> Callable[..., AsyncGenerator[RepoType, None]]: async def _get_repo( - engine: Engine = Depends(_get_db_engine), - ) -> AsyncGenerator[BaseRepository, None]: + engine: Annotated[AsyncEngine, Depends(_get_db_engine)], + ) -> AsyncGenerator[RepoType, None]: yield get_base_repository(engine=engine, repo_type=repo_type) return _get_repo diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/director_v0.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/director_v0.py index c0e8b4580bc..b003b8f465b 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/director_v0.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/director_v0.py @@ -1,16 +1,7 @@ -from fastapi import Depends, Request, Response +from fastapi import Request from ...modules.director_v0 import DirectorV0Client def get_director_v0_client(request: Request) -> DirectorV0Client: - client = DirectorV0Client.instance(request.app) - return client - - -async def forward_to_director_v0( - request: Request, - response: Response, - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), -) -> Response: - return await director_v0_client.forward(request, response) + return DirectorV0Client.instance(request.app) diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_services.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_services.py index 9aa964fa942..ecb3e0e2e12 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_services.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_services.py @@ -1,15 +1,19 @@ import logging +from typing import Annotated from fastapi import Depends, Request -from models_library.projects_nodes import NodeID +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.projects_nodes_io import NodeID +from servicelib.logging_utils import log_decorator from starlette.datastructures import URL -from ...core.settings import DynamicServicesSettings -from ...models.schemas.dynamic_services import RunningDynamicServiceDetails +from ...core.dynamic_services_settings import DynamicServicesSettings +from ...modules.director_v0 import DirectorV0Client from ...modules.dynamic_services import ServicesClient from ...modules.dynamic_sidecar.scheduler import DynamicSidecarsScheduler -from ...utils.logging_utils import log_decorator -from .director_v0 import DirectorV0Client, get_director_v0_client +from .director_v0 import get_director_v0_client logger = logging.getLogger(__name__) @@ -17,9 +21,8 @@ @log_decorator(logger=logger) async def get_service_base_url( node_uuid: NodeID, - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], ) -> URL: - # get the service details service_details: RunningDynamicServiceDetails = ( await director_v0_client.get_running_service_details(node_uuid) @@ -29,13 +32,14 @@ async def get_service_base_url( @log_decorator(logger=logger) def get_services_client(request: Request) -> ServicesClient: - client = ServicesClient.instance(request.app) - return client + return ServicesClient.instance(request.app) def get_dynamic_services_settings(request: Request) -> DynamicServicesSettings: - return request.app.state.settings.DYNAMIC_SERVICES + settings: DynamicServicesSettings = request.app.state.settings.DYNAMIC_SERVICES + return settings def get_scheduler(request: Request) -> DynamicSidecarsScheduler: - return request.app.state.dynamic_sidecar_scheduler + scheduler: DynamicSidecarsScheduler = request.app.state.dynamic_sidecar_scheduler + return scheduler diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_sidecar.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_sidecar.py index fb01e279967..cece0284a2f 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_sidecar.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/dynamic_sidecar.py @@ -1,17 +1,16 @@ from fastapi import Request -from ...core.settings import DynamicSidecarSettings -from ...modules.dynamic_sidecar.api_client import DynamicSidecarClient +from ...core.dynamic_services_settings.sidecar import DynamicSidecarSettings from ...modules.dynamic_sidecar.scheduler import DynamicSidecarsScheduler -def get_dynamic_sidecar_client(request: Request) -> DynamicSidecarClient: - return request.app.state.dynamic_sidecar_api_client - - def get_dynamic_sidecar_scheduler(request: Request) -> DynamicSidecarsScheduler: - return request.app.state.dynamic_sidecar_scheduler + scheduler: DynamicSidecarsScheduler = request.app.state.dynamic_sidecar_scheduler + return scheduler def get_dynamic_sidecar_settings(request: Request) -> DynamicSidecarSettings: - return request.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + settings: DynamicSidecarSettings = ( + request.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + ) + return settings diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py index fa4238a8133..07aa0272e67 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py @@ -1,7 +1,12 @@ from fastapi import Request +from servicelib.rabbitmq import RabbitMQRPCClient -from ...modules.rabbitmq import RabbitMQClient +from ...modules.rabbitmq import get_rabbitmq_client, get_rabbitmq_rpc_client -def get_rabbitmq_client(request: Request) -> RabbitMQClient: - return request.app.state.rabbitmq_client +def get_rabbitmq_client_from_request(request: Request): + return get_rabbitmq_client(request.app) + + +def rabbitmq_rpc_client(request: Request) -> RabbitMQRPCClient: + return get_rabbitmq_rpc_client(request.app) diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/rut_client.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/rut_client.py new file mode 100644 index 00000000000..70bc94b7ad2 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/rut_client.py @@ -0,0 +1,7 @@ +from fastapi import Request + +from ...modules.resource_usage_tracker_client import ResourceUsageTrackerClient + + +def get_rut_client(request: Request) -> ResourceUsageTrackerClient: + return ResourceUsageTrackerClient.get_from_state(request.app) diff --git a/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py b/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py index 6382a78adab..e480d204d3b 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py +++ b/services/director-v2/src/simcore_service_director_v2/api/dependencies/scheduler.py @@ -1,12 +1,15 @@ -from fastapi import Depends, FastAPI, Request - -from ...modules.comp_scheduler.base_scheduler import BaseCompScheduler -from . import get_app +from typing import Annotated +from fastapi import Depends, FastAPI -def get_scheduler(request: Request) -> BaseCompScheduler: - return request.app.state.scheduler +from ...core.settings import ComputationalBackendSettings +from . import get_app -def get_scheduler_settings(app: FastAPI = Depends(get_app)): - return app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND +def get_scheduler_settings( + app: Annotated[FastAPI, Depends(get_app)] +) -> ComputationalBackendSettings: + settings: ComputationalBackendSettings = ( + app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND + ) + return settings diff --git a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py index bbc457c66f9..df3d607049c 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py +++ b/services/director-v2/src/simcore_service_director_v2/api/entrypoints.py @@ -1,16 +1,13 @@ from fastapi import APIRouter -from ..meta import API_VTAG +from .._meta import API_VTAG from .routes import ( - clusters, computations, computations_tasks, dynamic_scheduler, dynamic_services, health, meta, - running_interactive, - services, ) # Info @@ -18,15 +15,6 @@ meta_router.include_router(health.router) meta_router.include_router(meta.router, prefix="/meta") -# API v0 (Legacy) -v0_router = APIRouter() -v0_router.include_router(services.router, tags=["services"], prefix="/services") -v0_router.include_router( - running_interactive.router, - tags=["services"], - prefix="/running_interactive_services", -) - # Latest API v2_router = APIRouter() v2_router.include_router( @@ -38,7 +26,6 @@ v2_router.include_router( dynamic_services.router, tags=["dynamic services"], prefix="/dynamic_services" ) -v2_router.include_router(clusters.router, tags=["clusters"], prefix="/clusters") v2_router.include_router( dynamic_scheduler.router, tags=["dynamic scheduler"], prefix="/dynamic_scheduler" @@ -48,7 +35,6 @@ # root api_router = APIRouter() api_router.include_router(meta_router) -api_router.include_router(v0_router, prefix="/v0") api_router.include_router(v2_router, prefix=f"/{API_VTAG}") __all__ = ["api_router"] diff --git a/services/director-v2/src/simcore_service_director_v2/api/errors/http_error.py b/services/director-v2/src/simcore_service_director_v2/api/errors/http_error.py index cd6466b7033..5edfb25aa20 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/errors/http_error.py +++ b/services/director-v2/src/simcore_service_director_v2/api/errors/http_error.py @@ -1,4 +1,4 @@ -from typing import Callable, Type +from typing import Awaitable, Callable from fastapi import HTTPException from fastapi.encoders import jsonable_encoder @@ -6,15 +6,17 @@ from starlette.responses import JSONResponse -async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse: +async def http_error_handler(_: Request, exc: Exception) -> JSONResponse: + assert isinstance(exc, HTTPException) + return JSONResponse( content=jsonable_encoder({"errors": [exc.detail]}), status_code=exc.status_code ) def make_http_error_handler_for_exception( - status_code: int, exception_cls: Type[BaseException] -) -> Callable[[Request, Type[BaseException]], JSONResponse]: + status_code: int, exception_cls: type[BaseException] +) -> Callable[[Request, Exception], Awaitable[JSONResponse]]: """ Produces a handler for BaseException-type exceptions which converts them into an error JSON response with a given status code @@ -22,7 +24,7 @@ def make_http_error_handler_for_exception( SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions """ - async def _http_error_handler(_: Request, exc: Type[BaseException]) -> JSONResponse: + async def _http_error_handler(_: Request, exc: Exception) -> JSONResponse: assert isinstance(exc, exception_cls) # nosec return JSONResponse( content=jsonable_encoder({"errors": [str(exc)]}), status_code=status_code diff --git a/services/director-v2/src/simcore_service_director_v2/api/errors/validation_error.py b/services/director-v2/src/simcore_service_director_v2/api/errors/validation_error.py index fb70f6791ac..cbdc2243701 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/errors/validation_error.py +++ b/services/director-v2/src/simcore_service_director_v2/api/errors/validation_error.py @@ -1,5 +1,3 @@ -from typing import Union - from fastapi.encoders import jsonable_encoder from fastapi.exceptions import RequestValidationError from fastapi.openapi.constants import REF_PREFIX @@ -12,8 +10,10 @@ async def http422_error_handler( _: Request, - exc: Union[RequestValidationError, ValidationError], + exc: Exception, ) -> JSONResponse: + assert isinstance(exc, RequestValidationError | ValidationError) + return JSONResponse( content=jsonable_encoder({"errors": exc.errors()}), status_code=HTTP_422_UNPROCESSABLE_ENTITY, diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py b/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py deleted file mode 100644 index ae857d1c2c2..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/clusters.py +++ /dev/null @@ -1,248 +0,0 @@ -import logging -from asyncio.log import logger -from typing import Final - -from aiocache import cached -from fastapi import APIRouter, Depends, HTTPException -from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster, ClusterID -from models_library.users import UserID -from simcore_service_director_v2.api.dependencies.scheduler import ( - get_scheduler_settings, -) -from simcore_service_director_v2.utils.dask_client_utils import test_scheduler_endpoint -from starlette import status - -from ...core.errors import ( - ClusterInvalidOperationError, - ConfigurationError, - DaskClientAcquisisitonError, -) -from ...core.settings import ComputationalBackendSettings -from ...models.schemas.clusters import ( - ClusterCreate, - ClusterDetails, - ClusterDetailsGet, - ClusterGet, - ClusterPatch, - ClusterPing, -) -from ...modules.dask_clients_pool import DaskClientsPool -from ...modules.db.repositories.clusters import ClustersRepository -from ..dependencies.dask import get_dask_clients_pool -from ..dependencies.database import get_repository - -router = APIRouter() -log = logging.getLogger(__name__) - - -GET_CLUSTER_DETAILS_CACHING_TTL: Final[int] = 3 - - -def _build_cache_key(fct, *_, **kwargs): - return f"{fct.__name__}_{kwargs['cluster_id']}" - - -@cached(ttl=GET_CLUSTER_DETAILS_CACHING_TTL, key_builder=_build_cache_key) -async def _get_cluster_details_with_id( - settings: ComputationalBackendSettings, - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository, - dask_clients_pool: DaskClientsPool, -) -> ClusterDetails: - log.debug("Getting details for cluster '%s'", cluster_id) - cluster: Cluster = settings.default_cluster - if cluster_id != DEFAULT_CLUSTER_ID: - cluster = await clusters_repo.get_cluster(user_id, cluster_id) - async with dask_clients_pool.acquire(cluster) as client: - cluster_details = await client.get_cluster_details() - - return cluster_details - - -@router.post( - "", - summary="Create a new cluster for a user", - response_model=ClusterGet, - status_code=status.HTTP_201_CREATED, -) -async def create_cluster( - user_id: UserID, - new_cluster: ClusterCreate, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - return await clusters_repo.create_cluster(user_id, new_cluster) - - -@router.get("", summary="Lists clusters for user", response_model=list[ClusterGet]) -async def list_clusters( - user_id: UserID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - default_cluster = settings.default_cluster - return [default_cluster] + await clusters_repo.list_clusters(user_id) - - -@router.get( - "/default", - summary="Returns the default cluster", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def get_default_cluster( - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - cluster = settings.default_cluster - return cluster - - -@router.get( - "/{cluster_id}", - summary="Get one cluster for user", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def get_cluster( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - return await clusters_repo.get_cluster(user_id, cluster_id) - - -@router.patch( - "/{cluster_id}", - summary="Modify a cluster for user", - response_model=ClusterGet, - status_code=status.HTTP_200_OK, -) -async def update_cluster( - user_id: UserID, - cluster_id: ClusterID, - updated_cluster: ClusterPatch, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - try: - return await clusters_repo.update_cluster(user_id, cluster_id, updated_cluster) - except ClusterInvalidOperationError as e: - raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=f"{e}") from e - - -@router.delete( - "/{cluster_id}", - summary="Remove a cluster for user", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def delete_cluster( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - await clusters_repo.delete_cluster(user_id, cluster_id) - - -@router.get( - "/default/details", - summary="Returns the cluster details", - response_model=ClusterDetailsGet, - status_code=status.HTTP_200_OK, -) -async def get_default_cluster_details( - user_id: UserID, - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool), -): - - default_cluster = await _get_cluster_details_with_id( - settings=settings, - user_id=user_id, - cluster_id=DEFAULT_CLUSTER_ID, - clusters_repo=clusters_repo, - dask_clients_pool=dask_clients_pool, - ) - logger.debug("found followind %s", f"{default_cluster=!r}") - return default_cluster - - -@router.get( - "/{cluster_id}/details", - summary="Returns the cluster details", - response_model=ClusterDetailsGet, - status_code=status.HTTP_200_OK, -) -async def get_cluster_details( - user_id: UserID, - cluster_id: ClusterID, - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), - dask_clients_pool: DaskClientsPool = Depends(get_dask_clients_pool), -): - try: - cluster_details = await _get_cluster_details_with_id( - settings=settings, - user_id=user_id, - cluster_id=cluster_id, - clusters_repo=clusters_repo, - dask_clients_pool=dask_clients_pool, - ) - logger.debug("found following %s", f"{cluster_details=!r}") - return cluster_details - except DaskClientAcquisisitonError as exc: - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, detail=f"{exc}" - ) from exc - - -@router.post( - ":ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_cluster_connection( - cluster_auth: ClusterPing, -): - try: - return await test_scheduler_endpoint( - endpoint=cluster_auth.endpoint, authentication=cluster_auth.authentication - ) - - except ConfigurationError as e: - raise HTTPException( - status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=f"{e}" - ) from e - - -@router.post( - "/default:ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_default_cluster_connection( - settings: ComputationalBackendSettings = Depends(get_scheduler_settings), -): - cluster = settings.default_cluster - return await test_scheduler_endpoint( - endpoint=cluster.endpoint, authentication=cluster.authentication - ) - - -@router.post( - "/{cluster_id}:ping", - summary="Test cluster connection", - response_model=None, - status_code=status.HTTP_204_NO_CONTENT, -) -async def test_specific_cluster_connection( - user_id: UserID, - cluster_id: ClusterID, - clusters_repo: ClustersRepository = Depends(get_repository(ClustersRepository)), -): - cluster = await clusters_repo.get_cluster(user_id, cluster_id) - return await test_scheduler_endpoint( - endpoint=cluster.endpoint, authentication=cluster.authentication - ) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py index ab648b20353..9f096911030 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations.py @@ -1,4 +1,4 @@ -""" CRUD operations on a "computation" resource +"""CRUD operations on a "computation" resource A computation is a resource that represents a running pipeline of computational services in a give project Therefore, @@ -11,22 +11,34 @@ - the task ID is the same as the associated node uuid """ -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments +# pylint: disable=too-many-statements import contextlib import logging -from typing import Any, Optional +from datetime import timedelta +from typing import Annotated, Any, Final import networkx as nx -from fastapi import APIRouter, Depends, HTTPException -from models_library.clusters import DEFAULT_CLUSTER_ID +from fastapi import APIRouter, Depends, FastAPI, HTTPException +from models_library.api_schemas_directorv2.computations import ( + ComputationCreate, + ComputationDelete, + ComputationGet, + ComputationStop, +) from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState from models_library.services import ServiceKeyVersion from models_library.users import UserID from models_library.utils.fastapi_encoders import jsonable_encoder -from pydantic import AnyHttpUrl, parse_obj_as +from pydantic import AnyHttpUrl, TypeAdapter from servicelib.async_utils import run_sequentially_in_context +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQRPCClient +from simcore_postgres_database.utils_projects_metadata import DBProjectNotFoundError from starlette import status from starlette.requests import Request from tenacity import retry @@ -36,34 +48,32 @@ from tenacity.wait import wait_random from ...core.errors import ( + ClusterNotFoundError, + ClustersKeeperNotAvailableError, ComputationalRunNotFoundError, + ComputationalSchedulerError, + ConfigurationError, + PricingPlanUnitNotFoundError, ProjectNotFoundError, - SchedulerError, -) -from ...models.domains.comp_pipelines import CompPipelineAtDB -from ...models.domains.comp_runs import CompRunsAtDB -from ...models.domains.comp_tasks import CompTaskAtDB -from ...models.schemas.comp_tasks import ( - ComputationCreate, - ComputationDelete, - ComputationGet, - ComputationStop, + WalletNotEnoughCreditsError, ) +from ...models.comp_pipelines import CompPipelineAtDB +from ...models.comp_runs import CompRunsAtDB, ProjectMetadataDict, RunMetadataDict +from ...models.comp_tasks import CompTaskAtDB from ...modules.catalog import CatalogClient -from ...modules.comp_scheduler.base_scheduler import BaseCompScheduler +from ...modules.comp_scheduler import run_new_pipeline, stop_pipeline from ...modules.db.repositories.comp_pipelines import CompPipelinesRepository from ...modules.db.repositories.comp_runs import CompRunsRepository from ...modules.db.repositories.comp_tasks import CompTasksRepository from ...modules.db.repositories.projects import ProjectsRepository -from ...modules.director_v0 import DirectorV0Client -from ...utils.computations import ( - find_deprecated_tasks, - get_pipeline_state_from_task_states, - is_pipeline_running, - is_pipeline_stopped, -) +from ...modules.db.repositories.projects_metadata import ProjectsMetadataRepository +from ...modules.db.repositories.users import UsersRepository +from ...modules.resource_usage_tracker_client import ResourceUsageTrackerClient +from ...utils import computations as utils from ...utils.dags import ( compute_pipeline_details, + compute_pipeline_started_timestamp, + compute_pipeline_stopped_timestamp, create_complete_dag, create_complete_dag_from_tasks, create_minimal_computational_graph_based_on_selection, @@ -71,39 +81,217 @@ ) from ..dependencies.catalog import get_catalog_client from ..dependencies.database import get_repository -from ..dependencies.director_v0 import get_director_v0_client -from ..dependencies.scheduler import get_scheduler +from ..dependencies.rabbitmq import rabbitmq_rpc_client +from ..dependencies.rut_client import get_rut_client from .computations_tasks import analyze_pipeline -PIPELINE_ABORT_TIMEOUT_S = 10 +_PIPELINE_ABORT_TIMEOUT_S: Final[timedelta] = timedelta(seconds=30) -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) router = APIRouter() +async def _check_pipeline_not_running_or_raise_409( + comp_tasks_repo: CompTasksRepository, computation: ComputationCreate +) -> None: + pipeline_state = utils.get_pipeline_state_from_task_states( + await comp_tasks_repo.list_computational_tasks(computation.project_id) + ) + if utils.is_pipeline_running(pipeline_state): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Project {computation.project_id} already started, current state is {pipeline_state}", + ) + + +async def _check_pipeline_startable( + pipeline_dag: nx.DiGraph, + computation: ComputationCreate, + catalog_client: CatalogClient, +) -> None: + assert computation.product_name # nosec + if deprecated_tasks := await utils.find_deprecated_tasks( + computation.user_id, + computation.product_name, + [ + ServiceKeyVersion(key=node[1]["key"], version=node[1]["version"]) + for node in pipeline_dag.nodes.data() + ], + catalog_client, + ): + raise HTTPException( + status_code=status.HTTP_406_NOT_ACCEPTABLE, + detail=f"Project {computation.project_id} cannot run since it contains deprecated tasks {jsonable_encoder(deprecated_tasks)}", + ) + + +_UNKNOWN_NODE: Final[str] = "unknown node" + + +@log_decorator(_logger) +async def _get_project_metadata( + project_id: ProjectID, + project_repo: ProjectsRepository, + projects_metadata_repo: ProjectsMetadataRepository, +) -> ProjectMetadataDict: + try: + project_ancestors = await projects_metadata_repo.get_project_ancestors( + project_id + ) + if project_ancestors.parent_project_uuid is None: + _logger.debug("no parent found for project %s", project_id) + return {} + + assert project_ancestors.parent_node_id is not None # nosec + assert project_ancestors.root_project_uuid is not None # nosec + assert project_ancestors.root_node_id is not None # nosec + + async def _get_project_node_names( + project_uuid: ProjectID, node_id: NodeID + ) -> tuple[str, str]: + prj = await project_repo.get_project(project_uuid) + node_id_str = f"{node_id}" + if node_id_str not in prj.workbench: + _logger.error( + "%s not found in %s. it is an ancestor of %s. Please check!", + f"{node_id=}", + f"{prj.uuid=}", + f"{project_id=}", + ) + return prj.name, _UNKNOWN_NODE + return prj.name, prj.workbench[node_id_str].label + + parent_project_name, parent_node_name = await _get_project_node_names( + project_ancestors.parent_project_uuid, project_ancestors.parent_node_id + ) + root_parent_project_name, root_parent_node_name = await _get_project_node_names( + project_ancestors.root_project_uuid, project_ancestors.root_node_id + ) + return ProjectMetadataDict( + parent_node_id=project_ancestors.parent_node_id, + parent_node_name=parent_node_name, + parent_project_id=project_ancestors.parent_project_uuid, + parent_project_name=parent_project_name, + root_parent_node_id=project_ancestors.root_node_id, + root_parent_node_name=root_parent_node_name, + root_parent_project_id=project_ancestors.root_project_uuid, + root_parent_project_name=root_parent_project_name, + ) + + except DBProjectNotFoundError: + _logger.exception("Could not find project: %s", f"{project_id=}") + except ProjectNotFoundError as exc: + _logger.exception( + "Could not find parent project: %s", exc.error_context().get("project_id") + ) + + return {} + + +async def _try_start_pipeline( + app: FastAPI, + *, + project_repo: ProjectsRepository, + computation: ComputationCreate, + complete_dag: nx.DiGraph, + minimal_dag: nx.DiGraph, + project: ProjectAtDB, + users_repo: UsersRepository, + projects_metadata_repo: ProjectsMetadataRepository, +) -> None: + if not minimal_dag.nodes(): + # 2 options here: either we have cycles in the graph or it's really done + if find_computational_node_cycles(complete_dag): + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Project {computation.project_id} contains cycles with computational services which are currently not supported! Please remove them.", + ) + # there is nothing else to be run here, so we are done + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Project {computation.project_id} has no computational services", + ) + + # Billing info + wallet_id = None + wallet_name = None + if computation.wallet_info: + wallet_id = computation.wallet_info.wallet_id + wallet_name = computation.wallet_info.wallet_name + + await run_new_pipeline( + app, + user_id=computation.user_id, + project_id=computation.project_id, + run_metadata=RunMetadataDict( + node_id_names_map={ + NodeID(node_idstr): node_data.label + for node_idstr, node_data in project.workbench.items() + }, + product_name=computation.product_name, + project_name=project.name, + simcore_user_agent=computation.simcore_user_agent, + user_email=await users_repo.get_user_email(computation.user_id), + wallet_id=wallet_id, + wallet_name=wallet_name, + project_metadata=await _get_project_metadata( + computation.project_id, project_repo, projects_metadata_repo + ), + ) + or {}, + use_on_demand_clusters=computation.use_on_demand_clusters, + ) + + @router.post( "", - summary="Create and optionally start a new computation", + description="Create and optionally start a new computation", response_model=ComputationGet, status_code=status.HTTP_201_CREATED, + responses={ + status.HTTP_404_NOT_FOUND: { + "description": "Project or pricing details not found", + }, + status.HTTP_406_NOT_ACCEPTABLE: { + "description": "Cluster not found", + }, + status.HTTP_503_SERVICE_UNAVAILABLE: { + "description": "Service not available", + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "description": "Configuration error", + }, + status.HTTP_402_PAYMENT_REQUIRED: {"description": "Payment required"}, + status.HTTP_409_CONFLICT: {"description": "Project already started"}, + }, ) # NOTE: in case of a burst of calls to that endpoint, we might end up in a weird state. @run_sequentially_in_context(target_args=["computation.project_id"]) -async def create_computation( +async def create_computation( # noqa: PLR0913 # pylint: disable=too-many-positional-arguments computation: ComputationCreate, request: Request, - project_repo: ProjectsRepository = Depends(get_repository(ProjectsRepository)), - comp_pipelines_repo: CompPipelinesRepository = Depends( - get_repository(CompPipelinesRepository) - ), - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), - comp_runs_repo: CompRunsRepository = Depends(get_repository(CompRunsRepository)), - director_client: DirectorV0Client = Depends(get_director_v0_client), - scheduler: BaseCompScheduler = Depends(get_scheduler), - catalog_client: CatalogClient = Depends(get_catalog_client), + project_repo: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], + comp_pipelines_repo: Annotated[ + CompPipelinesRepository, Depends(get_repository(CompPipelinesRepository)) + ], + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], + comp_runs_repo: Annotated[ + CompRunsRepository, Depends(get_repository(CompRunsRepository)) + ], + users_repo: Annotated[UsersRepository, Depends(get_repository(UsersRepository))], + projects_metadata_repo: Annotated[ + ProjectsMetadataRepository, Depends(get_repository(ProjectsMetadataRepository)) + ], + catalog_client: Annotated[CatalogClient, Depends(get_catalog_client)], + rut_client: Annotated[ResourceUsageTrackerClient, Depends(get_rut_client)], + rpc_client: Annotated[RabbitMQRPCClient, Depends(rabbitmq_rpc_client)], ) -> ComputationGet: - log.debug( + _logger.debug( "User %s is creating a new computation from project %s", f"{computation.user_id=}", f"{computation.project_id=}", @@ -112,18 +300,8 @@ async def create_computation( # get the project project: ProjectAtDB = await project_repo.get_project(computation.project_id) - # FIXME: this could not be valid anymore if the user deletes the project in between right? - # check if current state allow to modify the computation - comp_tasks: list[CompTaskAtDB] = await comp_tasks_repo.get_comp_tasks( - computation.project_id - ) - pipeline_state = get_pipeline_state_from_task_states(comp_tasks) - if is_pipeline_running(pipeline_state): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=f"Project {computation.project_id} already started, current state is {pipeline_state}", - ) + await _check_pipeline_not_running_or_raise_409(comp_tasks_repo, computation) # create the complete DAG graph complete_dag = create_complete_dag(project.workbench) @@ -137,20 +315,10 @@ async def create_computation( ) if computation.start_pipeline: - assert computation.product_name # nosec - if deprecated_tasks := await find_deprecated_tasks( - computation.user_id, - computation.product_name, - [ - ServiceKeyVersion(key=node[1]["key"], version=node[1]["version"]) - for node in minimal_computational_dag.nodes.data() - ], - catalog_client, - ): - raise HTTPException( - status_code=status.HTTP_406_NOT_ACCEPTABLE, - detail=f"Project {computation.project_id} cannot run since it contains deprecated tasks {jsonable_encoder( deprecated_tasks)}", - ) + await _check_pipeline_startable( + minimal_computational_dag, computation, catalog_client + ) + # ok so put the tasks in the db await comp_pipelines_repo.upsert_pipeline( project.uuid, @@ -158,48 +326,42 @@ async def create_computation( publish=computation.start_pipeline or False, ) assert computation.product_name # nosec - inserted_comp_tasks = await comp_tasks_repo.upsert_tasks_from_project( - project, - catalog_client, - director_client, - published_nodes=list(minimal_computational_dag.nodes()) - if computation.start_pipeline - else [], + min_computation_nodes: list[NodeID] = [ + NodeID(n) for n in minimal_computational_dag.nodes() + ] + comp_tasks = await comp_tasks_repo.upsert_tasks_from_project( + project=project, + catalog_client=catalog_client, + published_nodes=min_computation_nodes if computation.start_pipeline else [], user_id=computation.user_id, product_name=computation.product_name, + rut_client=rut_client, + wallet_info=computation.wallet_info, + rabbitmq_rpc_client=rpc_client, ) if computation.start_pipeline: - if not minimal_computational_dag.nodes(): - # 2 options here: either we have cycles in the graph or it's really done - list_of_cycles = find_computational_node_cycles(complete_dag) - if list_of_cycles: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail=f"Project {computation.project_id} contains cycles with computational services which are currently not supported! Please remove them.", - ) - # there is nothing else to be run here, so we are done - raise HTTPException( - status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, - detail=f"Project {computation.project_id} has no computational services", - ) - - await scheduler.run_new_pipeline( - computation.user_id, - computation.project_id, - computation.cluster_id or DEFAULT_CLUSTER_ID, + await _try_start_pipeline( + request.app, + project_repo=project_repo, + computation=computation, + complete_dag=complete_dag, + minimal_dag=minimal_computational_dag, + project=project, + users_repo=users_repo, + projects_metadata_repo=projects_metadata_repo, ) # filter the tasks by the effective pipeline filtered_tasks = [ t - for t in inserted_comp_tasks + for t in comp_tasks if f"{t.node_id}" in set(minimal_computational_dag.nodes()) ] - pipeline_state = get_pipeline_state_from_task_states(filtered_tasks) + pipeline_state = utils.get_pipeline_state_from_task_states(filtered_tasks) # get run details if any - last_run: Optional[CompRunsAtDB] = None + last_run: CompRunsAtDB | None = None with contextlib.suppress(ComputationalRunNotFoundError): last_run = await comp_runs_repo.get( user_id=computation.user_id, project_id=computation.project_id @@ -209,30 +371,54 @@ async def create_computation( id=computation.project_id, state=pipeline_state, pipeline_details=await compute_pipeline_details( - complete_dag, minimal_computational_dag, inserted_comp_tasks + complete_dag, minimal_computational_dag, comp_tasks ), - url=parse_obj_as( - AnyHttpUrl, + url=TypeAdapter(AnyHttpUrl).validate_python( f"{request.url}/{computation.project_id}?user_id={computation.user_id}", ), - stop_url=parse_obj_as( - AnyHttpUrl, - f"{request.url}/{computation.project_id}:stop?user_id={computation.user_id}", - ) - if computation.start_pipeline - else None, + stop_url=( + TypeAdapter(AnyHttpUrl).validate_python( + f"{request.url}/{computation.project_id}:stop?user_id={computation.user_id}", + ) + if computation.start_pipeline + else None + ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, + started=compute_pipeline_started_timestamp( + minimal_computational_dag, comp_tasks + ), + stopped=compute_pipeline_stopped_timestamp( + minimal_computational_dag, comp_tasks + ), + submitted=last_run.created if last_run else None, ) except ProjectNotFoundError as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{e}") from e + except ClusterNotFoundError as e: + raise HTTPException( + status_code=status.HTTP_406_NOT_ACCEPTABLE, detail=f"{e}" + ) from e + except PricingPlanUnitNotFoundError as e: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{e}") from e + except ClustersKeeperNotAvailableError as e: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=f"{e}" + ) from e + except ConfigurationError as e: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=f"{e}" + ) from e + except WalletNotEnoughCreditsError as e: + raise HTTPException( + status_code=status.HTTP_402_PAYMENT_REQUIRED, detail=f"{e}" + ) from e @router.get( "/{project_id}", - summary="Returns a computation pipeline state", + description="Returns a computation pipeline state", response_model=ComputationGet, status_code=status.HTTP_200_OK, ) @@ -240,14 +426,20 @@ async def get_computation( user_id: UserID, project_id: ProjectID, request: Request, - project_repo: ProjectsRepository = Depends(get_repository(ProjectsRepository)), - comp_pipelines_repo: CompPipelinesRepository = Depends( - get_repository(CompPipelinesRepository) - ), - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), - comp_runs_repo: CompRunsRepository = Depends(get_repository(CompRunsRepository)), + project_repo: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], + comp_pipelines_repo: Annotated[ + CompPipelinesRepository, Depends(get_repository(CompPipelinesRepository)) + ], + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], + comp_runs_repo: Annotated[ + CompRunsRepository, Depends(get_repository(CompRunsRepository)) + ], ) -> ComputationGet: - log.debug( + _logger.debug( "User %s getting computation status for project %s", f"{user_id=}", f"{project_id=}", @@ -260,9 +452,11 @@ async def get_computation( project_id, comp_pipelines_repo, comp_tasks_repo ) - pipeline_state = get_pipeline_state_from_task_states(filtered_tasks) + pipeline_state: RunningState = utils.get_pipeline_state_from_task_states( + filtered_tasks + ) - log.debug( + _logger.debug( "Computational task status by %s for %s has %s", f"{user_id=}", f"{project_id=}", @@ -276,29 +470,34 @@ async def get_computation( ) # get run details if any - last_run: Optional[CompRunsAtDB] = None + last_run: CompRunsAtDB | None = None with contextlib.suppress(ComputationalRunNotFoundError): last_run = await comp_runs_repo.get(user_id=user_id, project_id=project_id) self_url = request.url.remove_query_params("user_id") - task_out = ComputationGet( + return ComputationGet( id=project_id, state=pipeline_state, pipeline_details=pipeline_details, - url=parse_obj_as(AnyHttpUrl, f"{request.url}"), - stop_url=parse_obj_as(AnyHttpUrl, f"{self_url}:stop?user_id={user_id}") - if pipeline_state.is_running() - else None, + url=TypeAdapter(AnyHttpUrl).validate_python(f"{request.url}"), + stop_url=( + TypeAdapter(AnyHttpUrl).validate_python( + f"{self_url}:stop?user_id={user_id}" + ) + if pipeline_state.is_running() + else None + ), iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, + started=compute_pipeline_started_timestamp(pipeline_dag, all_tasks), + stopped=compute_pipeline_stopped_timestamp(pipeline_dag, all_tasks), + submitted=last_run.created if last_run else None, ) - return task_out @router.post( "/{project_id}:stop", - summary="Stops a computation pipeline", + description="Stops a computation pipeline", response_model=ComputationGet, status_code=status.HTTP_202_ACCEPTED, ) @@ -306,15 +505,20 @@ async def stop_computation( computation_stop: ComputationStop, project_id: ProjectID, request: Request, - project_repo: ProjectsRepository = Depends(get_repository(ProjectsRepository)), - comp_pipelines_repo: CompPipelinesRepository = Depends( - get_repository(CompPipelinesRepository) - ), - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), - comp_runs_repo: CompRunsRepository = Depends(get_repository(CompRunsRepository)), - scheduler: BaseCompScheduler = Depends(get_scheduler), + project_repo: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], + comp_pipelines_repo: Annotated[ + CompPipelinesRepository, Depends(get_repository(CompPipelinesRepository)) + ], + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], + comp_runs_repo: Annotated[ + CompRunsRepository, Depends(get_repository(CompRunsRepository)) + ], ) -> ComputationGet: - log.debug( + _logger.debug( "User %s stopping computation for project %s", computation_stop.user_id, project_id, @@ -328,20 +532,22 @@ async def stop_computation( ) pipeline_dag: nx.DiGraph = pipeline_at_db.get_graph() # get the project task states - tasks: list[CompTaskAtDB] = await comp_tasks_repo.get_all_tasks(project_id) + tasks: list[CompTaskAtDB] = await comp_tasks_repo.list_tasks(project_id) # create the complete DAG graph complete_dag = create_complete_dag_from_tasks(tasks) # filter the tasks by the effective pipeline filtered_tasks = [ t for t in tasks if f"{t.node_id}" in set(pipeline_dag.nodes()) ] - pipeline_state = get_pipeline_state_from_task_states(filtered_tasks) + pipeline_state = utils.get_pipeline_state_from_task_states(filtered_tasks) - if is_pipeline_running(pipeline_state): - await scheduler.stop_pipeline(computation_stop.user_id, project_id) + if utils.is_pipeline_running(pipeline_state): + await stop_pipeline( + request.app, user_id=computation_stop.user_id, project_id=project_id + ) # get run details if any - last_run: Optional[CompRunsAtDB] = None + last_run: CompRunsAtDB | None = None with contextlib.suppress(ComputationalRunNotFoundError): last_run = await comp_runs_repo.get( user_id=computation_stop.user_id, project_id=project_id @@ -353,44 +559,50 @@ async def stop_computation( pipeline_details=await compute_pipeline_details( complete_dag, pipeline_dag, tasks ), - url=parse_obj_as(AnyHttpUrl, f"{request.url}"), + url=TypeAdapter(AnyHttpUrl).validate_python(f"{request.url}"), stop_url=None, iteration=last_run.iteration if last_run else None, - cluster_id=last_run.cluster_id if last_run else None, result=None, + started=compute_pipeline_started_timestamp(pipeline_dag, tasks), + stopped=compute_pipeline_stopped_timestamp(pipeline_dag, tasks), + submitted=last_run.created if last_run else None, ) except ProjectNotFoundError as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{e}") from e - except SchedulerError as e: + except ComputationalSchedulerError as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{e}") from e @router.delete( "/{project_id}", - summary="Deletes a computation pipeline", + description="Deletes a computation pipeline", response_model=None, status_code=status.HTTP_204_NO_CONTENT, ) async def delete_computation( computation_stop: ComputationDelete, project_id: ProjectID, - project_repo: ProjectsRepository = Depends(get_repository(ProjectsRepository)), - comp_pipelines_repo: CompPipelinesRepository = Depends( - get_repository(CompPipelinesRepository) - ), - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), - scheduler: BaseCompScheduler = Depends(get_scheduler), + request: Request, + project_repo: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], + comp_pipelines_repo: Annotated[ + CompPipelinesRepository, Depends(get_repository(CompPipelinesRepository)) + ], + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], ) -> None: try: # get the project project: ProjectAtDB = await project_repo.get_project(project_id) # check if current state allow to stop the computation - comp_tasks: list[CompTaskAtDB] = await comp_tasks_repo.get_comp_tasks( + comp_tasks: list[CompTaskAtDB] = await comp_tasks_repo.list_computational_tasks( project_id ) - pipeline_state = get_pipeline_state_from_task_states(comp_tasks) - if is_pipeline_running(pipeline_state): + pipeline_state = utils.get_pipeline_state_from_task_states(comp_tasks) + if utils.is_pipeline_running(pipeline_state): if not computation_stop.force: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, @@ -398,9 +610,11 @@ async def delete_computation( ) # abort the pipeline first try: - await scheduler.stop_pipeline(computation_stop.user_id, project_id) - except SchedulerError as e: - log.warning( + await stop_pipeline( + request.app, user_id=computation_stop.user_id, project_id=project_id + ) + except ComputationalSchedulerError as e: + _logger.warning( "Project %s could not be stopped properly.\n reason: %s", project_id, e, @@ -411,32 +625,32 @@ def return_last_value(retry_state: Any) -> Any: return retry_state.outcome.result() @retry( - stop=stop_after_delay(PIPELINE_ABORT_TIMEOUT_S), + stop=stop_after_delay(_PIPELINE_ABORT_TIMEOUT_S.total_seconds()), wait=wait_random(0, 2), retry_error_callback=return_last_value, retry=retry_if_result(lambda result: result is False), reraise=False, - before_sleep=before_sleep_log(log, logging.INFO), + before_sleep=before_sleep_log(_logger, logging.INFO), ) async def check_pipeline_stopped() -> bool: - comp_tasks: list[CompTaskAtDB] = await comp_tasks_repo.get_comp_tasks( - project_id + comp_tasks: list[CompTaskAtDB] = ( + await comp_tasks_repo.list_computational_tasks(project_id) ) - pipeline_state = get_pipeline_state_from_task_states( + pipeline_state = utils.get_pipeline_state_from_task_states( comp_tasks, ) - return is_pipeline_stopped(pipeline_state) + return utils.is_pipeline_stopped(pipeline_state) # wait for the pipeline to be stopped if not await check_pipeline_stopped(): - log.error( - "pipeline %s could not be stopped properly after %ss", + _logger.error( + "pipeline %s could not be stopped properly after %s", project_id, - PIPELINE_ABORT_TIMEOUT_S, + _PIPELINE_ABORT_TIMEOUT_S, ) # delete the pipeline now - await comp_tasks_repo.delete_tasks_from_project(project) + await comp_tasks_repo.delete_tasks_from_project(project.uuid) await comp_pipelines_repo.delete_pipeline(project_id) except ProjectNotFoundError as e: diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/computations_tasks.py b/services/director-v2/src/simcore_service_director_v2/api/routes/computations_tasks.py index 2b728da08e0..45f24d13835 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/computations_tasks.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/computations_tasks.py @@ -1,4 +1,4 @@ -""" CRUD operations on a computation's tasks sub-resource +"""CRUD operations on a computation's tasks sub-resource A task is computation sub-resource that respresents a running computational service in the pipeline described above Therefore, @@ -7,24 +7,26 @@ """ import logging -from typing import NamedTuple +from typing import Annotated, NamedTuple import networkx as nx from fastapi import APIRouter, Depends, HTTPException +from models_library.api_schemas_directorv2.computations import ( + TaskLogFileGet, + TasksOutputs, + TasksSelection, +) from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.users import UserID from servicelib.utils import logged_gather -from simcore_sdk.node_ports_common.exceptions import NodeportsException -from simcore_sdk.node_ports_v2 import FileLinkType from starlette import status -from ...models.domains.comp_pipelines import CompPipelineAtDB -from ...models.domains.comp_tasks import CompTaskAtDB -from ...models.schemas.comp_tasks import TaskLogFileGet +from ...models.comp_pipelines import CompPipelineAtDB +from ...models.comp_tasks import CompTaskAtDB from ...modules.db.repositories.comp_pipelines import CompPipelinesRepository from ...modules.db.repositories.comp_tasks import CompTasksRepository -from ...utils.dask import get_service_log_file_download_link +from ...utils import dask as dask_utils from ..dependencies.database import get_repository log = logging.getLogger(__name__) @@ -60,7 +62,7 @@ async def analyze_pipeline( pipeline_dag: nx.DiGraph = pipeline_at_db.get_graph() # get the project task states - all_tasks: list[CompTaskAtDB] = await comp_tasks_repo.get_all_tasks(project_id) + all_tasks: list[CompTaskAtDB] = await comp_tasks_repo.list_tasks(project_id) # filter the tasks by the effective pipeline filtered_tasks = [ @@ -77,32 +79,6 @@ async def analyze_pipeline( return PipelineInfo(pipeline_dag, all_tasks, filtered_tasks) -async def _get_task_log_file( - user_id: UserID, project_id: ProjectID, node_id: NodeID -) -> TaskLogFileGet: - try: - - log_file_url = await get_service_log_file_download_link( - user_id, project_id, node_id, file_link_type=FileLinkType.PRESIGNED - ) - - except NodeportsException as err: - # Unexpected error: Cannot determine the cause of failure - # to get donwload link and cannot handle it automatically. - # Will treat it as "not available" and log a warning - log_file_url = None - log.warning( - "Failed to get log-file of %s: %s.", - f"{user_id=}/{project_id=}/{node_id=}", - err, - ) - - return TaskLogFileGet( - task_id=node_id, - download_link=log_file_url, - ) - - # ROUTES HANDLERS -------------------------------------------------------------- @@ -114,10 +90,12 @@ async def _get_task_log_file( async def get_all_tasks_log_files( user_id: UserID, project_id: ProjectID, - comp_pipelines_repo: CompPipelinesRepository = Depends( - get_repository(CompPipelinesRepository) - ), - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), + comp_pipelines_repo: Annotated[ + CompPipelinesRepository, Depends(get_repository(CompPipelinesRepository)) + ], + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], ) -> list[TaskLogFileGet]: """Returns download links to log-files of each task in a computation. Each log is only available when the corresponding task is done @@ -126,14 +104,15 @@ async def get_all_tasks_log_files( info = await analyze_pipeline(project_id, comp_pipelines_repo, comp_tasks_repo) iter_task_ids = (t.node_id for t in info.filtered_tasks) - return await logged_gather( + tasks_logs_files: list[TaskLogFileGet] = await logged_gather( *[ - _get_task_log_file(user_id, project_id, node_id) + dask_utils.get_task_log_file(user_id, project_id, node_id) for node_id in iter_task_ids ], reraise=True, log=log, ) + return tasks_logs_files @router.get( @@ -145,37 +124,45 @@ async def get_task_log_file( user_id: UserID, project_id: ProjectID, node_uuid: NodeID, - comp_tasks_repo: CompTasksRepository = Depends(get_repository(CompTasksRepository)), + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], ) -> TaskLogFileGet: """Returns a link to download logs file of a give task. The log is only available when the task is done """ - if not await comp_tasks_repo.check_task_exists(project_id, node_uuid): + if not await comp_tasks_repo.task_exists(project_id, node_uuid): raise HTTPException( status.HTTP_404_NOT_FOUND, detail=[f"No task_id={node_uuid} found under computation {project_id}"], ) - return await _get_task_log_file(user_id, project_id, node_uuid) + return await dask_utils.get_task_log_file(user_id, project_id, node_uuid) -# NOTE: This handler function is NOT ACTIVE -# but still kept as reference for future extensions that will tackle -# real-time log streaming (instead of logfile download) -# -# @router.get( -# "/{project_id}/tasks/{node_uuid}/logs", -# summary="Gets computation task log", -# ) -# - Implement close as possible to https://docs.docker.com/engine/api/v1.41/#operation/ContainerTop -async def get_task_logs( - user_id: UserID, +@router.post( + "/{project_id}/tasks/-/outputs:batchGet", + summary="Gets all outputs for selected tasks", + response_model=TasksOutputs, + responses={ + status.HTTP_404_NOT_FOUND: { + "description": "Cannot find computation or the tasks in it" + } + }, +) +async def get_batch_tasks_outputs( project_id: ProjectID, - node_uuid: NodeID, -) -> str: - """Gets ``stdout`` and ``stderr`` logs from a computation task. - It can return a list of the tail or stream live - """ + selection: TasksSelection, + comp_tasks_repo: Annotated[ + CompTasksRepository, Depends(get_repository(CompTasksRepository)) + ], +): + nodes_outputs = await comp_tasks_repo.get_outputs_from_tasks( + project_id, set(selection.nodes_ids) + ) + + if not nodes_outputs: + raise HTTPException(status.HTTP_404_NOT_FOUND) - raise NotImplementedError(f"/{project_id=}/tasks/{node_uuid=}/logs") + return TasksOutputs(nodes_outputs=nodes_outputs) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_scheduler.py b/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_scheduler.py index 17e2be1991f..dadfdc3cdfc 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_scheduler.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_scheduler.py @@ -1,6 +1,9 @@ +import logging +from typing import Annotated, Final + from fastapi import APIRouter, Depends, HTTPException, status -from models_library.projects_nodes import NodeID -from pydantic import BaseModel +from models_library.projects_nodes_io import NodeID +from pydantic import BaseModel, PositiveInt from servicelib.fastapi.long_running_tasks.client import ( ProgressMessage, ProgressPercent, @@ -13,11 +16,21 @@ get_tasks_manager, start_task, ) +from tenacity import retry +from tenacity.before_sleep import before_sleep_log +from tenacity.retry import retry_if_result +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_random_exponential +from ...models.dynamic_services_scheduler import SchedulerData from ...modules.dynamic_sidecar.scheduler import DynamicSidecarsScheduler from ...utils.routes import NoContentResponse from ..dependencies.dynamic_sidecar import get_dynamic_sidecar_scheduler +_logger = logging.getLogger(__name__) + +_MINUTE: Final[PositiveInt] = 60 + class ObservationItem(BaseModel): is_disabled: bool @@ -26,6 +39,23 @@ class ObservationItem(BaseModel): router = APIRouter() +@retry( + wait=wait_random_exponential(max=10), + stop=stop_after_delay(1 * _MINUTE), + retry=retry_if_result(lambda result: result is False), + reraise=False, + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), +) +def _toggle_observation_succeeded( + dynamic_sidecars_scheduler: DynamicSidecarsScheduler, + node_uuid: NodeID, + *, + is_disabled: bool, +) -> bool: + # returns True if the `toggle_observation` operation succeeded + return dynamic_sidecars_scheduler.toggle_observation(node_uuid, disable=is_disabled) + + @router.patch( "/services/{node_uuid}/observation", summary="Enable/disable observation of the service", @@ -34,12 +64,14 @@ class ObservationItem(BaseModel): async def update_service_observation( node_uuid: NodeID, observation_item: ObservationItem, - dynamic_sidecars_scheduler: DynamicSidecarsScheduler = Depends( - get_dynamic_sidecar_scheduler - ), + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], ) -> NoContentResponse: - if dynamic_sidecars_scheduler.toggle_observation( - node_uuid, observation_item.is_disabled + if _toggle_observation_succeeded( + dynamic_sidecars_scheduler=dynamic_sidecars_scheduler, + node_uuid=node_uuid, + is_disabled=observation_item.is_disabled, ): return NoContentResponse() @@ -62,16 +94,16 @@ async def update_service_observation( ) async def delete_service_containers( node_uuid: NodeID, - tasks_manager: TasksManager = Depends(get_tasks_manager), - dynamic_sidecars_scheduler: DynamicSidecarsScheduler = Depends( - get_dynamic_sidecar_scheduler - ), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], ): async def _task_remove_service_containers( task_progress: TaskProgress, node_uuid: NodeID ) -> None: async def _progress_callback( - message: ProgressMessage, percent: ProgressPercent, _: TaskId + message: ProgressMessage, percent: ProgressPercent | None, _: TaskId ) -> None: task_progress.update(message=message, percent=percent) @@ -80,17 +112,33 @@ async def _progress_callback( ) try: - task_id = start_task( + return start_task( tasks_manager, - task=_task_remove_service_containers, + task=_task_remove_service_containers, # type: ignore[arg-type] unique=True, node_uuid=node_uuid, ) - return task_id except TaskAlreadyRunningError as e: raise HTTPException(status.HTTP_409_CONFLICT, detail=f"{e}") from e +@router.get( + "/services/{node_uuid}/state", + summary="Returns the internals of the scheduler for the given service", + status_code=status.HTTP_200_OK, + response_model=SchedulerData, +) +async def get_service_state( + node_uuid: NodeID, + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], +): + return dynamic_sidecars_scheduler.scheduler.get_scheduler_data( # noqa: SLF001 + node_uuid + ) + + @router.post( "/services/{node_uuid}/state:save", summary="Starts the saving of the state for the service", @@ -104,17 +152,17 @@ async def _progress_callback( ) async def save_service_state( node_uuid: NodeID, - tasks_manager: TasksManager = Depends(get_tasks_manager), - dynamic_sidecars_scheduler: DynamicSidecarsScheduler = Depends( - get_dynamic_sidecar_scheduler - ), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], ): async def _task_save_service_state( task_progress: TaskProgress, node_uuid: NodeID, ) -> None: async def _progress_callback( - message: ProgressMessage, percent: ProgressPercent, _: TaskId + message: ProgressMessage, percent: ProgressPercent | None, _: TaskId ) -> None: task_progress.update(message=message, percent=percent) @@ -123,13 +171,12 @@ async def _progress_callback( ) try: - task_id = start_task( + return start_task( tasks_manager, - task=_task_save_service_state, + task=_task_save_service_state, # type: ignore[arg-type] unique=True, node_uuid=node_uuid, ) - return task_id except TaskAlreadyRunningError as e: raise HTTPException(status.HTTP_409_CONFLICT, detail=f"{e}") from e @@ -147,16 +194,16 @@ async def _progress_callback( ) async def push_service_outputs( node_uuid: NodeID, - tasks_manager: TasksManager = Depends(get_tasks_manager), - dynamic_sidecars_scheduler: DynamicSidecarsScheduler = Depends( - get_dynamic_sidecar_scheduler - ), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], ): async def _task_push_service_outputs( task_progress: TaskProgress, node_uuid: NodeID ) -> None: async def _progress_callback( - message: ProgressMessage, percent: ProgressPercent, _: TaskId + message: ProgressMessage, percent: ProgressPercent | None, _: TaskId ) -> None: task_progress.update(message=message, percent=percent) @@ -165,13 +212,12 @@ async def _progress_callback( ) try: - task_id = start_task( + return start_task( tasks_manager, - task=_task_push_service_outputs, + task=_task_push_service_outputs, # type: ignore[arg-type] unique=True, node_uuid=node_uuid, ) - return task_id except TaskAlreadyRunningError as e: raise HTTPException(status.HTTP_409_CONFLICT, detail=f"{e}") from e @@ -189,10 +235,10 @@ async def _progress_callback( ) async def delete_service_docker_resources( node_uuid: NodeID, - tasks_manager: TasksManager = Depends(get_tasks_manager), - dynamic_sidecars_scheduler: DynamicSidecarsScheduler = Depends( - get_dynamic_sidecar_scheduler - ), + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], ): async def _task_cleanup_service_docker_resources( task_progress: TaskProgress, node_uuid: NodeID @@ -202,12 +248,25 @@ async def _task_cleanup_service_docker_resources( ) try: - task_id = start_task( + return start_task( tasks_manager, - task=_task_cleanup_service_docker_resources, + task=_task_cleanup_service_docker_resources, # type: ignore[arg-type] unique=True, node_uuid=node_uuid, ) - return task_id except TaskAlreadyRunningError as e: raise HTTPException(status.HTTP_409_CONFLICT, detail=f"{e}") from e + + +@router.post( + "/services/{node_uuid}/disk/reserved:free", + summary="Free up reserved disk space", + status_code=status.HTTP_204_NO_CONTENT, +) +async def free_reserved_disk_space( + node_uuid: NodeID, + dynamic_sidecars_scheduler: Annotated[ + DynamicSidecarsScheduler, Depends(get_dynamic_sidecar_scheduler) + ], +): + await dynamic_sidecars_scheduler.free_reserved_disk_space(node_id=node_uuid) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_services.py b/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_services.py index f13aae3005e..c7562ccf5cd 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_services.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/dynamic_services.py @@ -1,55 +1,64 @@ import asyncio import logging -from typing import Coroutine, Optional, Union, cast +from typing import Annotated, Final import httpx -from fastapi import APIRouter, Depends, Header, Request +from common_library.json_serialization import json_dumps +from fastapi import APIRouter, Depends, Header, HTTPException, Request from fastapi.responses import RedirectResponse -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceCreate, + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataIn, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_sidecar.containers import ActivityInfoOrNone +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import SimcoreServiceLabels -from models_library.services import ServiceKeyVersion from models_library.users import UserID +from pydantic import NonNegativeFloat, NonNegativeInt from servicelib.fastapi.requests_decorators import cancel_on_disconnect -from servicelib.json_serialization import json_dumps +from servicelib.logging_utils import log_decorator +from servicelib.rabbitmq import RabbitMQClient +from servicelib.utils import logged_gather from starlette import status from starlette.datastructures import URL from tenacity import RetryCallState, TryAgain -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed +from ...api.dependencies.catalog import get_catalog_client from ...api.dependencies.database import get_repository -from ...api.dependencies.rabbitmq import get_rabbitmq_client -from ...core.settings import DynamicServicesSettings, DynamicSidecarSettings -from ...models.domains.dynamic_services import ( - DynamicServiceCreate, - DynamicServiceGet, - RetrieveDataIn, - RetrieveDataOutEnveloped, -) +from ...api.dependencies.rabbitmq import get_rabbitmq_client_from_request +from ...core.dynamic_services_settings import DynamicServicesSettings +from ...core.dynamic_services_settings.scheduler import DynamicServicesSchedulerSettings from ...modules import projects_networks +from ...modules.catalog import CatalogClient from ...modules.db.repositories.projects import ProjectsRepository from ...modules.db.repositories.projects_networks import ProjectsNetworksRepository +from ...modules.director_v0 import DirectorV0Client +from ...modules.dynamic_services import ServicesClient from ...modules.dynamic_sidecar.docker_api import is_sidecar_running from ...modules.dynamic_sidecar.errors import ( DynamicSidecarNotFoundError, LegacyServiceIsNotSupportedError, ) from ...modules.dynamic_sidecar.scheduler import DynamicSidecarsScheduler -from ...modules.rabbitmq import RabbitMQClient -from ...utils.logging_utils import log_decorator from ...utils.routes import NoContentResponse -from ..dependencies.director_v0 import DirectorV0Client, get_director_v0_client +from ..dependencies.director_v0 import get_director_v0_client from ..dependencies.dynamic_services import ( - ServicesClient, get_dynamic_services_settings, get_scheduler, get_service_base_url, get_services_client, ) +_MAX_PARALLELISM: Final[NonNegativeInt] = 10 + router = APIRouter() logger = logging.getLogger(__name__) @@ -65,17 +74,16 @@ ), ) async def list_tracked_dynamic_services( - user_id: Optional[UserID] = None, - project_id: Optional[ProjectID] = None, - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], + user_id: UserID | None = None, + project_id: ProjectID | None = None, ) -> list[DynamicServiceGet]: - legacy_running_services: list[DynamicServiceGet] = cast( - list[DynamicServiceGet], - await director_v0_client.get_running_services(user_id, project_id), + legacy_running_services = await director_v0_client.get_running_services( + user_id, project_id ) - get_stack_statuse_tasks: list[Coroutine] = [ + get_stack_statuse_tasks = [ scheduler.get_stack_status(service_uuid) for service_uuid in scheduler.list_services( user_id=user_id, project_id=project_id @@ -83,9 +91,7 @@ async def list_tracked_dynamic_services( ] # NOTE: Review error handling https://github.com/ITISFoundation/osparc-simcore/issues/3194 - dynamic_sidecar_running_services: list[DynamicServiceGet] = cast( - list[DynamicServiceGet], await asyncio.gather(*get_stack_statuse_tasks) - ) + dynamic_sidecar_running_services = await asyncio.gather(*get_stack_statuse_tasks) return legacy_running_services + dynamic_sidecar_running_services @@ -99,19 +105,18 @@ async def list_tracked_dynamic_services( @log_decorator(logger=logger) async def create_dynamic_service( service: DynamicServiceCreate, + catalog_client: Annotated[CatalogClient, Depends(get_catalog_client)], + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], + dynamic_services_settings: Annotated[ + DynamicServicesSettings, Depends(get_dynamic_services_settings) + ], + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], x_dynamic_sidecar_request_dns: str = Header(...), x_dynamic_sidecar_request_scheme: str = Header(...), x_simcore_user_agent: str = Header(...), - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - dynamic_services_settings: DynamicServicesSettings = Depends( - get_dynamic_services_settings - ), - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), -) -> Union[DynamicServiceGet, RedirectResponse]: +) -> DynamicServiceGet | RedirectResponse: simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion(key=service.key, version=service.version) - ) + await catalog_client.get_service_labels(service.key, service.version) ) # LEGACY (backwards compatibility) @@ -131,9 +136,8 @@ async def create_dynamic_service( logger.debug("Redirecting %s", redirect_url_with_query) return RedirectResponse(str(redirect_url_with_query)) - # if not await is_sidecar_running( - service.node_uuid, dynamic_services_settings.DYNAMIC_SIDECAR + service.node_uuid, dynamic_services_settings.DYNAMIC_SCHEDULER.SWARM_STACK_NAME ): await scheduler.add_service( service=service, @@ -142,9 +146,10 @@ async def create_dynamic_service( request_dns=x_dynamic_sidecar_request_dns, request_scheme=x_dynamic_sidecar_request_scheme, request_simcore_user_agent=x_simcore_user_agent, + can_save=service.can_save, ) - return cast(DynamicServiceGet, await scheduler.get_stack_status(service.node_uuid)) + return await scheduler.get_stack_status(service.node_uuid) @router.get( @@ -154,11 +159,11 @@ async def create_dynamic_service( ) async def get_dynamic_sidecar_status( node_uuid: NodeID, - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), -) -> Union[DynamicServiceGet, RedirectResponse]: + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], +) -> DynamicServiceGet | RedirectResponse: try: - return cast(DynamicServiceGet, await scheduler.get_stack_status(node_uuid)) + return await scheduler.get_stack_status(node_uuid) except DynamicSidecarNotFoundError: # legacy service? if it's not then a 404 will anyway be received # forward to director-v0 @@ -171,20 +176,22 @@ async def get_dynamic_sidecar_status( @router.delete( "/{node_uuid}", - responses={204: {"model": None}}, + status_code=status.HTTP_204_NO_CONTENT, + response_model=None, summary="stops previously spawned dynamic-sidecar", ) @cancel_on_disconnect async def stop_dynamic_service( request: Request, node_uuid: NodeID, - can_save: Optional[bool] = True, - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), - dynamic_services_settings: DynamicServicesSettings = Depends( - get_dynamic_services_settings - ), -) -> Union[NoContentResponse, RedirectResponse]: + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], + dynamic_services_settings: Annotated[ + DynamicServicesSettings, Depends(get_dynamic_services_settings) + ], + *, + can_save: bool | None = True, +) -> NoContentResponse | RedirectResponse: assert request # nosec try: @@ -199,14 +206,16 @@ async def stop_dynamic_service( return RedirectResponse(str(redirection_url)) + if await scheduler.is_service_awaiting_manual_intervention(node_uuid): + raise HTTPException(status.HTTP_409_CONFLICT, detail="waiting_for_intervention") + # Service was marked for removal, the scheduler will # take care of stopping cleaning up all allocated resources: # services, containers, volumes and networks. # Once the service is no longer being tracked this can return - dynamic_sidecar_settings: DynamicSidecarSettings = ( - dynamic_services_settings.DYNAMIC_SIDECAR + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + dynamic_services_settings.DYNAMIC_SCHEDULER ) - _STOPPED_CHECK_INTERVAL = 1.0 def _log_error(retry_state: RetryCallState): logger.error( @@ -216,9 +225,9 @@ def _log_error(retry_state: RetryCallState): ) async for attempt in AsyncRetrying( - wait=wait_fixed(_STOPPED_CHECK_INTERVAL), + wait=wait_fixed(1.0), stop=stop_after_delay( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP ), before_sleep=before_sleep_log(logger=logger, log_level=logging.INFO), reraise=False, @@ -241,12 +250,12 @@ def _log_error(retry_state: RetryCallState): async def service_retrieve_data_on_ports( node_uuid: NodeID, retrieve_settings: RetrieveDataIn, - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), - dynamic_services_settings: DynamicServicesSettings = Depends( - get_dynamic_services_settings - ), - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - services_client: ServicesClient = Depends(get_services_client), + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], + dynamic_services_settings: Annotated[ + DynamicServicesSettings, Depends(get_dynamic_services_settings) + ], + director_v0_client: Annotated[DirectorV0Client, Depends(get_director_v0_client)], + services_client: Annotated[ServicesClient, Depends(get_services_client)], ) -> RetrieveDataOutEnveloped: try: return await scheduler.retrieve_service_inputs( @@ -260,24 +269,24 @@ async def service_retrieve_data_on_ports( node_uuid, director_v0_client ) - dynamic_sidecar_settings: DynamicSidecarSettings = ( - dynamic_services_settings.DYNAMIC_SIDECAR + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + dynamic_services_settings.DYNAMIC_SCHEDULER ) timeout = httpx.Timeout( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, - connect=dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), + connect=dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, ) # this call waits for the service to download data response = await services_client.request( "POST", f"{service_base_url}/retrieve", - content=retrieve_settings.json(by_alias=True), + content=retrieve_settings.model_dump_json(by_alias=True), timeout=timeout, ) # validate and return - return RetrieveDataOutEnveloped.parse_obj(response.json()) + return RetrieveDataOutEnveloped.model_validate(response.json()) @router.post( @@ -287,12 +296,13 @@ async def service_retrieve_data_on_ports( ) @log_decorator(logger=logger) async def service_restart_containers( - node_uuid: NodeID, scheduler: DynamicSidecarsScheduler = Depends(get_scheduler) + node_uuid: NodeID, + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], ) -> NoContentResponse: try: await scheduler.restart_containers(node_uuid) except DynamicSidecarNotFoundError as error: - raise LegacyServiceIsNotSupportedError() from error + raise LegacyServiceIsNotSupportedError from error return NoContentResponse() @@ -307,21 +317,72 @@ async def service_restart_containers( @log_decorator(logger=logger) async def update_projects_networks( project_id: ProjectID, - projects_networks_repository: ProjectsNetworksRepository = Depends( - get_repository(ProjectsNetworksRepository) - ), - projects_repository: ProjectsRepository = Depends( - get_repository(ProjectsRepository) - ), - scheduler: DynamicSidecarsScheduler = Depends(get_scheduler), - director_v0_client: DirectorV0Client = Depends(get_director_v0_client), - rabbitmq_client: RabbitMQClient = Depends(get_rabbitmq_client), + projects_networks_repository: Annotated[ + ProjectsNetworksRepository, Depends(get_repository(ProjectsNetworksRepository)) + ], + projects_repository: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], + catalog_client: Annotated[CatalogClient, Depends(get_catalog_client)], + rabbitmq_client: Annotated[ + RabbitMQClient, Depends(get_rabbitmq_client_from_request) + ], ) -> None: + # NOTE: This needs to be called to update networks only when adding, removing, or renaming a node. await projects_networks.update_from_workbench( projects_networks_repository=projects_networks_repository, projects_repository=projects_repository, scheduler=scheduler, - director_v0_client=director_v0_client, + catalog_client=catalog_client, rabbitmq_client=rabbitmq_client, project_id=project_id, ) + + +def is_service_inactive_since( + activity_info: ActivityInfoOrNone, threshold: float +) -> bool: + if activity_info is None: + # services which do not support inactivity are treated as being inactive + return True + + is_inactive: bool = activity_info.seconds_inactive >= threshold + return is_inactive + + +@router.get( + "/projects/{project_id}/inactivity", summary="returns if the project is inactive" +) +@log_decorator(logger=logger) +async def get_project_inactivity( + project_id: ProjectID, + max_inactivity_seconds: NonNegativeFloat, + scheduler: Annotated[DynamicSidecarsScheduler, Depends(get_scheduler)], + projects_repository: Annotated[ + ProjectsRepository, Depends(get_repository(ProjectsRepository)) + ], +) -> GetProjectInactivityResponse: + # A project is considered inactive when all it's services are inactive for + # more than `max_inactivity_seconds`. + # A `service` which does not support the inactivity callback is considered + # inactive. + + project: ProjectAtDB = await projects_repository.get_project(project_id) + + inactivity_responses: list[ActivityInfoOrNone] = await logged_gather( + *[ + scheduler.get_service_activity(NodeID(node_id)) + for node_id in project.workbench + # NOTE: only new style services expose service inactivity information + # director-v2 only tracks internally new style services + if scheduler.is_service_tracked(NodeID(node_id)) + ], + max_concurrency=_MAX_PARALLELISM, + ) + + all_services_inactive = all( + is_service_inactive_since(r, max_inactivity_seconds) + for r in inactivity_responses + ) + return GetProjectInactivityResponse(is_inactive=all_services_inactive) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/health.py b/services/director-v2/src/simcore_service_director_v2/api/routes/health.py index cc2d21f7669..9ce8dc97ef6 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/health.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/health.py @@ -1,11 +1,30 @@ -from datetime import datetime -from typing import Dict +import datetime +from typing import Annotated + +from fastapi import APIRouter, Depends +from models_library.api_schemas__common.health import HealthCheckGet +from models_library.errors import RABBITMQ_CLIENT_UNHEALTHY_MSG +from servicelib.rabbitmq import RabbitMQClient + +from ...api.dependencies.rabbitmq import get_rabbitmq_client_from_request + + +class HealthCheckError(RuntimeError): + """Failed a health check""" -from fastapi import APIRouter router = APIRouter() -@router.get("/") -async def check_service_health() -> Dict[str, str]: - return {"timestamp": f"{__name__}@{datetime.utcnow().isoformat()}"} +@router.get("/", response_model=HealthCheckGet) +async def check_service_health( + rabbitmq_client: Annotated[ + RabbitMQClient, Depends(get_rabbitmq_client_from_request) + ] +): + if not rabbitmq_client.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + return { + "timestamp": f"{__name__}@{datetime.datetime.now(tz=datetime.timezone.utc).isoformat()}" + } diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/meta.py b/services/director-v2/src/simcore_service_director_v2/api/routes/meta.py index da1c58694dd..775a6f0b65c 100644 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/meta.py +++ b/services/director-v2/src/simcore_service_director_v2/api/routes/meta.py @@ -1,15 +1,16 @@ from fastapi import APIRouter +from models_library.api_schemas__common.meta import BaseMeta +from models_library.basic_types import VersionStr -from ...meta import API_VERSION, API_VTAG, __version__ -from ...models.schemas.meta import Meta +from ..._meta import API_VERSION, API_VTAG router = APIRouter() -@router.get("", response_model=Meta) -async def get_service_metadata(): - return Meta( +@router.get("", response_model=BaseMeta) +async def get_service_metadata() -> BaseMeta: + return BaseMeta( name=__name__.split(".")[0], - version=API_VERSION, - released={API_VTAG: API_VERSION, "v0": "0.1.0"}, + version=VersionStr(API_VERSION), + released={API_VTAG: VersionStr(API_VERSION), "v0": VersionStr("0.1.0")}, ) diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/running_interactive.py b/services/director-v2/src/simcore_service_director_v2/api/routes/running_interactive.py deleted file mode 100644 index c40592543bb..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/running_interactive.py +++ /dev/null @@ -1,74 +0,0 @@ -# pylint: disable=unused-argument -from fastapi import APIRouter, Depends, Query, Response, status -from models_library.services import KEY_RE, VERSION_RE - -from ...models.schemas.services import RunningServicesDetailsArrayEnveloped -from ..dependencies.director_v0 import forward_to_director_v0 - -router = APIRouter() - - -UserIdQuery = Query( - ..., - description="The ID of the user that starts the service", -) -ProjectIdQuery = Query( - ..., description="The ID of the project in which the service starts" -) - - -@router.get( - "", - description="Lists of running interactive services", - response_model=RunningServicesDetailsArrayEnveloped, -) -async def list_running_interactive_services( - user_id: str = UserIdQuery, - project_id: str = ProjectIdQuery, - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request - - -@router.post( - "", - description="Starts an interactive service in the platform", - status_code=status.HTTP_201_CREATED, -) -async def start_interactive_service( - user_id: str = UserIdQuery, - project_id: str = ProjectIdQuery, - service_key: str = Query( - ..., - description="distinctive name for the node based on the docker registry path", - regex=KEY_RE, - example=[ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer", - ], - ), - service_tag: str = Query( - ..., - description="The tag/version of the service", - regex=VERSION_RE, - example="1.0.0", - ), - service_uuid: str = Query(..., description="The uuid to assign the service with"), - service_base_path: str = Query( - "", - description="predefined basepath for the backend service otherwise uses root", - example="/x/EycCXbU0H/", - ), - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request - - -@router.delete( - "/{service_uuid}", status_code=status.HTTP_204_NO_CONTENT, response_model=None -) -async def stop_interactive_service( - service_uuid: str, - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request diff --git a/services/director-v2/src/simcore_service_director_v2/api/routes/services.py b/services/director-v2/src/simcore_service_director_v2/api/routes/services.py deleted file mode 100644 index 1423dc07caf..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/api/routes/services.py +++ /dev/null @@ -1,65 +0,0 @@ -# pylint: disable=unused-argument -from typing import Optional - -from fastapi import APIRouter, Depends, Path, Query, Response -from models_library.services import KEY_RE, VERSION_RE, ServiceType - -from ...models.schemas.services import ServiceExtrasEnveloped, ServicesArrayEnveloped -from ..dependencies.director_v0 import forward_to_director_v0 - -router = APIRouter() - - -@router.get( - "", - description="Lists services available in the deployed registry", - response_model=ServicesArrayEnveloped, -) -async def list_services( - service_type: Optional[ServiceType] = Query( - None, - description=( - "The service type:\n" - " - computational - a computational service\n" - " - interactive - an interactive service\n" - ), - ), - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request - - -ServiceKeyPath = Path( - ..., - description="Distinctive name for the node based on the docker registry path", - regex=KEY_RE, -) -ServiceKeyVersionPath = Path( - ..., description="The tag/version of the service", regex=VERSION_RE -) - - -@router.get( - "/{service_key:path}/{service_version}/extras", - description="Returns the service extras", - response_model=ServiceExtrasEnveloped, -) -async def get_extra_service_versioned( - service_key: str = ServiceKeyPath, - service_version: str = ServiceKeyVersionPath, - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request - - -@router.get( - "/{service_key:path}/{service_version}", - description="Returns details of the selected service if available in the platform", - response_model=ServicesArrayEnveloped, -) -async def get_service_versioned( - service_key: str = ServiceKeyPath, - service_version: str = ServiceKeyVersionPath, - forward_request: Response = Depends(forward_to_director_v0), -): - return forward_request diff --git a/services/director-v2/src/simcore_service_director_v2/api/rpc/__init__.py b/services/director-v2/src/simcore_service_director_v2/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director-v2/src/simcore_service_director_v2/api/rpc/_computations.py b/services/director-v2/src/simcore_service_director_v2/api/rpc/_computations.py new file mode 100644 index 00000000000..eeb270c46cb --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/api/rpc/_computations.py @@ -0,0 +1,184 @@ +# pylint: disable=too-many-arguments +from fastapi import FastAPI +from models_library.api_schemas_directorv2.comp_runs import ( + ComputationRunRpcGetPage, + ComputationTaskRpcGet, + ComputationTaskRpcGetPage, +) +from models_library.api_schemas_directorv2.computations import TaskLogFileGet +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.rest_ordering import OrderBy +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from servicelib.rabbitmq import RPCRouter +from servicelib.utils import limited_gather + +from ...core.errors import ComputationalRunNotFoundError +from ...models.comp_runs import CompRunsAtDB +from ...models.comp_tasks import ComputationTaskForRpcDBGet +from ...modules.db.repositories.comp_runs import CompRunsRepository +from ...modules.db.repositories.comp_tasks import CompTasksRepository +from ...utils import dask as dask_utils + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=()) +async def list_computations_latest_iteration_page( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + # filters + filter_only_running: bool = False, + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationRunRpcGetPage: + comp_runs_repo = CompRunsRepository.instance(db_engine=app.state.engine) + total, comp_runs_output = ( + await comp_runs_repo.list_for_user__only_latest_iterations( + product_name=product_name, + user_id=user_id, + filter_only_running=filter_only_running, + offset=offset, + limit=limit, + order_by=order_by, + ) + ) + return ComputationRunRpcGetPage( + items=comp_runs_output, + total=total, + ) + + +@router.expose(reraise_if_error_type=()) +async def list_computations_iterations_page( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + project_ids: list[ProjectID], + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationRunRpcGetPage: + comp_runs_repo = CompRunsRepository.instance(db_engine=app.state.engine) + total, comp_runs_output = ( + await comp_runs_repo.list_for_user_and_project_all_iterations( + product_name=product_name, + user_id=user_id, + project_ids=project_ids, + offset=offset, + limit=limit, + order_by=order_by, + ) + ) + return ComputationRunRpcGetPage( + items=comp_runs_output, + total=total, + ) + + +async def _fetch_task_log( + user_id: UserID, task: ComputationTaskForRpcDBGet +) -> TaskLogFileGet | None: + if not task.state.is_running(): + return await dask_utils.get_task_log_file( + user_id=user_id, + project_id=task.project_uuid, + node_id=task.node_id, + ) + return None + + +async def _get_latest_run_or_none( + comp_runs_repo: CompRunsRepository, + user_id: UserID, + project_uuid: ProjectID, +) -> CompRunsAtDB | None: + try: + return await comp_runs_repo.get( + user_id=user_id, project_id=project_uuid, iteration=None + ) + except ComputationalRunNotFoundError: + return None + + +@router.expose(reraise_if_error_type=()) +async def list_computations_latest_iteration_tasks_page( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + project_ids: list[ProjectID], + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ComputationTaskRpcGetPage: + assert product_name # nosec NOTE: Whether project_id belong to the product_name was checked in the webserver + assert user_id # nosec NOTE: Whether user_id has access to the project was checked in the webserver + + comp_tasks_repo = CompTasksRepository.instance(db_engine=app.state.engine) + comp_runs_repo = CompRunsRepository.instance(db_engine=app.state.engine) + + total, comp_tasks = await comp_tasks_repo.list_computational_tasks_rpc_domain( + project_ids=project_ids, + offset=offset, + limit=limit, + order_by=order_by, + ) + + # Get unique set of all project_uuids from comp_tasks + unique_project_uuids = {task.project_uuid for task in comp_tasks} + + # Fetch latest run for each project concurrently + latest_runs = await limited_gather( + *[ + _get_latest_run_or_none(comp_runs_repo, user_id, project_uuid) + for project_uuid in unique_project_uuids + ], + limit=20, + ) + # Build a dict: project_uuid -> iteration + project_uuid_to_iteration = { + run.project_uuid: run.iteration for run in latest_runs if run is not None + } + + # Run all log fetches concurrently + log_files = await limited_gather( + *[_fetch_task_log(user_id, task) for task in comp_tasks], + limit=20, + ) + + comp_tasks_output = [ + ComputationTaskRpcGet( + project_uuid=task.project_uuid, + node_id=task.node_id, + state=task.state, + progress=task.progress, + image=task.image, + started_at=task.started_at, + ended_at=task.ended_at, + log_download_link=log_file.download_link if log_file else None, + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, + task.project_uuid, + task.node_id, + project_uuid_to_iteration[task.project_uuid], + ), + ) + for task, log_file in zip(comp_tasks, log_files, strict=True) + ] + + return ComputationTaskRpcGetPage( + items=comp_tasks_output, + total=total, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/api/rpc/routes.py b/services/director-v2/src/simcore_service_director_v2/api/rpc/routes.py new file mode 100644 index 00000000000..ad6bdba28c7 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/api/rpc/routes.py @@ -0,0 +1,34 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_directorv2 import ( + DIRECTOR_V2_RPC_NAMESPACE, +) +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RPCRouter + +from ...modules.rabbitmq import get_rabbitmq_rpc_server +from . import ( + _computations, +) + +_logger = logging.getLogger(__name__) + + +ROUTERS: list[RPCRouter] = [ + _computations.router, +] + + +def setup_rpc_api_routes(app: FastAPI) -> None: + async def startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="Director-v2 startup RPC API Routes", + ): + rpc_server = get_rabbitmq_rpc_server(app) + for router in ROUTERS: + await rpc_server.register_router(router, DIRECTOR_V2_RPC_NAMESPACE, app) + + app.add_event_handler("startup", startup) diff --git a/services/director-v2/src/simcore_service_director_v2/cli/__init__.py b/services/director-v2/src/simcore_service_director_v2/cli/__init__.py index 5fbda07f926..f33d5972260 100644 --- a/services/director-v2/src/simcore_service_director_v2/cli/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/cli/__init__.py @@ -2,15 +2,22 @@ import logging from typing import Final +import rich import typer from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from settings_library.utils_cli import create_settings_command +from .._meta import PROJECT_NAME from ..core.settings import AppSettings -from ..meta import PROJECT_NAME +from ..modules.osparc_variables import substitutions from ._close_and_save_service import async_close_and_save_service -from ._core import async_project_save_state, async_project_state +from ._core import ( + async_free_service_disk_space, + async_project_save_state, + async_project_state, + async_service_state, +) DEFAULT_NODE_SAVE_ATTEMPTS: Final[int] = 3 DEFAULT_STATE_UPDATE_INTERVAL_S: Final[int] = 5 @@ -19,10 +26,15 @@ DEFAULT_OUTPUTS_PUSH_ATTEMPTS: Final[int] = 3 DEFAULT_TASK_UPDATE_INTERVAL_S: Final[int] = 1 -main = typer.Typer(name=PROJECT_NAME) +main = typer.Typer( + name=PROJECT_NAME, + pretty_exceptions_enable=False, + pretty_exceptions_show_locals=False, +) -log = logging.getLogger(__name__) -main.command()(create_settings_command(settings_cls=AppSettings, logger=log)) +_logger = logging.getLogger(__name__) + +main.command()(create_settings_command(settings_cls=AppSettings, logger=_logger)) @main.command() @@ -50,6 +62,22 @@ def project_state( asyncio.run(async_project_state(project_id, blocking, update_interval)) +@main.command() +def service_state(node_id: NodeID): + """ + Prints the state of a services as tracked by director-v2 + """ + asyncio.run(async_service_state(node_id)) + + +@main.command() +def free_reserved_disk_space(node_id: NodeID): + """ + Frees service's reserved disk space + """ + asyncio.run(async_free_service_disk_space(node_id)) + + @main.command() def close_and_save_service( node_id: NodeID, @@ -101,3 +129,10 @@ def close_and_save_service( update_interval, ) ) + + +@main.command() +def osparc_variables(): + """Lists all registered osparc session variables""" + for name in substitutions.list_osparc_session_variables(): + rich.print(name) diff --git a/services/director-v2/src/simcore_service_director_v2/cli/_client.py b/services/director-v2/src/simcore_service_director_v2/cli/_client.py index 7bcff75ce7c..872c08f3b5f 100644 --- a/services/director-v2/src/simcore_service_director_v2/cli/_client.py +++ b/services/director-v2/src/simcore_service_director_v2/cli/_client.py @@ -1,62 +1,71 @@ -import logging - from fastapi import status -from httpx import AsyncClient, Response, Timeout - -from ..modules.dynamic_sidecar.api_client._base import ( +from httpx import Response, Timeout +from servicelib.fastapi.http_client_thin import ( BaseThinClient, expect_status, retry_on_errors, ) -logger = logging.getLogger(__name__) - class ThinDV2LocalhostClient(BaseThinClient): - BASE_ADDRESS: str = "http://localhost:8000" + BASE_ADDRESS: str = "http://localhost:8000" # NOSONAR def __init__(self): - self.client = AsyncClient(timeout=Timeout(5)) - - super().__init__(request_timeout=10) + super().__init__( + total_retry_interval=10, + default_http_client_timeout=Timeout(5), + tracing_settings=None, + ) def _get_url(self, postfix: str) -> str: return f"{self.BASE_ADDRESS}/v2/dynamic_scheduler{postfix}" - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_204_NO_CONTENT) async def toggle_service_observation( self, node_uuid: str, *, is_disabled: bool ) -> Response: return await self.client.patch( self._get_url(f"/services/{node_uuid}/observation"), - json=dict(is_disabled=is_disabled), + json={"is_disabled": is_disabled}, ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def delete_service_containers(self, node_uuid: str) -> Response: return await self.client.delete( self._get_url(f"/services/{node_uuid}/containers") ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def save_service_state(self, node_uuid: str) -> Response: return await self.client.post( self._get_url(f"/services/{node_uuid}/state:save") ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def push_service_outputs(self, node_uuid: str) -> Response: return await self.client.post( self._get_url(f"/services/{node_uuid}/outputs:push") ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def delete_service_docker_resources(self, node_uuid: str) -> Response: return await self.client.delete( self._get_url(f"/services/{node_uuid}/docker-resources") ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_service_state(self, node_uuid: str) -> Response: + return await self.client.get(self._get_url(f"/services/{node_uuid}/state")) + + @retry_on_errors() + @expect_status(status.HTTP_204_NO_CONTENT) + async def free_service_reserved_disk_space(self, node_uuid: str) -> Response: + return await self.client.post( + self._get_url(f"/services/{node_uuid}/disk/reserved:free") + ) diff --git a/services/director-v2/src/simcore_service_director_v2/cli/_close_and_save_service.py b/services/director-v2/src/simcore_service_director_v2/cli/_close_and_save_service.py index 636a8696c52..fb8f70bf62f 100644 --- a/services/director-v2/src/simcore_service_director_v2/cli/_close_and_save_service.py +++ b/services/director-v2/src/simcore_service_director_v2/cli/_close_and_save_service.py @@ -1,11 +1,11 @@ -import logging +from collections.abc import AsyncIterator from contextlib import asynccontextmanager -from typing import AsyncIterator, Final +from typing import Final import rich from fastapi import FastAPI from models_library.projects_nodes_io import NodeID -from pydantic import AnyHttpUrl, PositiveFloat, parse_obj_as +from pydantic import AnyHttpUrl, PositiveFloat, TypeAdapter from rich.progress import ( BarColumn, Progress, @@ -13,6 +13,7 @@ TextColumn, TimeElapsedColumn, ) +from servicelib.fastapi.http_client_thin import UnexpectedStatusError from servicelib.fastapi.long_running_tasks.client import ( Client, ProgressMessage, @@ -21,19 +22,16 @@ periodic_task_result, setup, ) -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from ..modules.dynamic_sidecar.api_client._errors import UnexpectedStatusError from ._client import ThinDV2LocalhostClient _MIN: Final[PositiveFloat] = 60 HEADING: Final[str] = "[green]*[/green]" -logger = logging.getLogger(__name__) - @asynccontextmanager async def _minimal_app() -> AsyncIterator[FastAPI]: @@ -62,7 +60,7 @@ async def _track_and_display( task = progress.add_task("...", total=1.0, visible=True) async def _debug_progress_callback( - message: ProgressMessage, percent: ProgressPercent, _: TaskId + message: ProgressMessage, percent: ProgressPercent | None, _: TaskId ) -> None: progress.update(task, completed=percent, description=message, visible=True) @@ -87,13 +85,12 @@ async def async_close_and_save_service( outputs_push_retry_attempts: int, update_interval: int, ) -> None: - async with _minimal_app() as app: + task_id: TaskId + async with _minimal_app() as app, ThinDV2LocalhostClient() as thin_dv2_localhost_client: rich.print( f"[yellow]Starting[/yellow] cleanup for service [green]{node_id}[/green]" ) - thin_dv2_localhost_client = ThinDV2LocalhostClient() - rich.print(f"{HEADING} disabling service observation") async for attempt in AsyncRetrying( wait=wait_fixed(1), @@ -109,7 +106,7 @@ async def async_close_and_save_service( client = Client( app=app, async_client=thin_dv2_localhost_client.client, - base_url=parse_obj_as(AnyHttpUrl, thin_dv2_localhost_client.BASE_ADDRESS), + base_url=f"{TypeAdapter(AnyHttpUrl).validate_python(thin_dv2_localhost_client.BASE_ADDRESS)}", ) if not skip_container_removal: @@ -117,7 +114,7 @@ async def async_close_and_save_service( response = await thin_dv2_localhost_client.delete_service_containers( f"{node_id}" ) - task_id: TaskId = response.json() + task_id = response.json() await _track_and_display( client, task_id, update_interval, task_timeout=5 * _MIN ) @@ -133,7 +130,7 @@ async def async_close_and_save_service( response = await thin_dv2_localhost_client.save_service_state( f"{node_id}" ) - task_id: TaskId = response.json() + task_id = response.json() await _track_and_display( client, task_id, update_interval, task_timeout=60 * _MIN ) @@ -149,7 +146,7 @@ async def async_close_and_save_service( response = await thin_dv2_localhost_client.push_service_outputs( f"{node_id}" ) - task_id: TaskId = response.json() + task_id = response.json() await _track_and_display( client, task_id, update_interval, task_timeout=60 * _MIN ) @@ -161,7 +158,7 @@ async def async_close_and_save_service( response = await thin_dv2_localhost_client.delete_service_docker_resources( f"{node_id}" ) - task_id: TaskId = response.json() + task_id = response.json() await _track_and_display( client, task_id, update_interval, task_timeout=5 * _MIN ) diff --git a/services/director-v2/src/simcore_service_director_v2/cli/_core.py b/services/director-v2/src/simcore_service_director_v2/cli/_core.py index 2328179accd..bc7cc095898 100644 --- a/services/director-v2/src/simcore_service_director_v2/cli/_core.py +++ b/services/director-v2/src/simcore_service_director_v2/cli/_core.py @@ -1,50 +1,51 @@ import asyncio import sys +from collections.abc import AsyncIterator from contextlib import asynccontextmanager from enum import Enum -from typing import AsyncIterator, Optional import typer from fastapi import FastAPI, status from httpx import AsyncClient, HTTPError -from models_library.projects import NodeIDStr, ProjectID -from models_library.projects_nodes_io import NodeID +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, NodeIDStr from models_library.services import ServiceType -from pydantic import AnyHttpUrl, BaseModel, PositiveInt, parse_obj_as +from models_library.services_enums import ServiceBootType, ServiceState +from pydantic import AnyHttpUrl, BaseModel, PositiveInt, TypeAdapter from rich.live import Live from rich.table import Table from servicelib.services_utils import get_service_from_key -from tenacity._asyncio import AsyncRetrying +from simcore_service_director_v2.modules.catalog import CatalogClient +from tenacity.asyncio import AsyncRetrying from tenacity.stop import stop_after_attempt from tenacity.wait import wait_random_exponential from ..core.application import create_base_app from ..core.settings import AppSettings -from ..models.domains.dynamic_services import DynamicServiceGet -from ..models.schemas.dynamic_services import ( - DynamicSidecarNamesHelper, - ServiceBootType, - ServiceState, -) +from ..models.dynamic_services_scheduler import DynamicSidecarNamesHelper from ..modules import db, director_v0, dynamic_sidecar from ..modules.db.repositories.projects import ProjectsRepository -from ..modules.director_v0 import DirectorV0Client from ..modules.dynamic_sidecar import api_client from ..modules.projects_networks import requires_dynamic_sidecar from ..utils.db import get_repository +from ._client import ThinDV2LocalhostClient @asynccontextmanager async def _initialized_app(only_db: bool = False) -> AsyncIterator[FastAPI]: app = create_base_app() settings: AppSettings = app.state.settings - # Initialize minimal required components for the application db.setup(app, settings.POSTGRES) if not only_db: dynamic_sidecar.setup(app) - director_v0.setup(app, settings.DIRECTOR_V0) + director_v0.setup( + app, + director_v0_settings=settings.DIRECTOR_V0, + tracing_settings=settings.DIRECTOR_V2_TRACING, + ) await app.router.startup() yield app @@ -60,12 +61,15 @@ def _get_dynamic_sidecar_endpoint( dynamic_sidecar_names = DynamicSidecarNamesHelper.make(NodeID(node_id)) hostname = dynamic_sidecar_names.service_name_dynamic_sidecar port = settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_PORT - return parse_obj_as(AnyHttpUrl, f"http://{hostname}:{port}") # NOSONAR + url: AnyHttpUrl = TypeAdapter(AnyHttpUrl).validate_python( + f"http://{hostname}:{port}" + ) + return url async def _save_node_state( app, - dynamic_sidecar_client: api_client.DynamicSidecarClient, + sidecars_client: api_client.SidecarsClient, save_attempts: int, node_uuid: NodeIDStr, label: str, @@ -78,7 +82,7 @@ async def _save_node_state( ): with attempt: typer.echo(f"Attempting to save {node_uuid} {label}") - await dynamic_sidecar_client.save_service_state( + await sidecars_client.save_service_state( _get_dynamic_sidecar_endpoint(app.state.settings, node_uuid) ) @@ -91,22 +95,20 @@ async def async_project_save_state(project_id: ProjectID, save_attempts: int) -> project_at_db = await projects_repository.get_project(project_id) typer.echo(f"Saving project '{project_at_db.uuid}' - '{project_at_db.name}'") - - dynamic_sidecar_client = api_client.get_dynamic_sidecar_client(app) nodes_failed_to_save: list[NodeIDStr] = [] for node_uuid, node_content in project_at_db.workbench.items(): # onl dynamic-sidecars are used if not await requires_dynamic_sidecar( service_key=node_content.key, service_version=node_content.version, - director_v0_client=DirectorV0Client.instance(app), + catalog_client=CatalogClient.instance(app), ): continue try: await _save_node_state( app, - dynamic_sidecar_client, + await api_client.get_sidecars_client(app, node_uuid), save_attempts, node_uuid, node_content.label, @@ -146,7 +148,7 @@ class RenderData(BaseModel): async def _get_dy_service_state( client: AsyncClient, node_uuid: NodeIDStr -) -> Optional[DynamicServiceGet]: +) -> DynamicServiceGet | None: try: result = await client.get( f"http://localhost:8000/v2/dynamic_services/{node_uuid}", # NOSONAR @@ -216,14 +218,19 @@ async def _to_render_data( ) +def _get_node_id(x: RenderData) -> NodeIDStr: + return x.node_uuid + + async def _get_nodes_render_data( app: FastAPI, project_id: ProjectID, ) -> list[RenderData]: projects_repository: ProjectsRepository = get_repository(app, ProjectsRepository) + project_at_db = await projects_repository.get_project(project_id) - render_data = [] + render_data: list[RenderData] = [] async with AsyncClient() as client: for node_uuid, node_content in project_at_db.workbench.items(): service_type = get_service_from_key(service_key=node_content.key) @@ -232,7 +239,8 @@ async def _get_nodes_render_data( client, node_uuid, node_content.label, service_type ) ) - return sorted(render_data, key=lambda x: x.node_uuid) + sorted_render_data: list[RenderData] = sorted(render_data, key=_get_node_id) + return sorted_render_data async def _display( @@ -271,3 +279,15 @@ async def async_project_state( await _display( app, project_id, update_interval=update_interval, blocking=blocking ) + + +async def async_service_state(node_id: NodeID) -> None: + async with ThinDV2LocalhostClient() as client: + result = await client.get_service_state(node_id) + typer.echo(f"Service state: {result.text}") + + +async def async_free_service_disk_space(node_id: NodeID) -> None: + async with ThinDV2LocalhostClient() as client: + await client.free_service_reserved_disk_space(node_id) + typer.echo("Done freeing reserved disk space!") diff --git a/services/director-v2/src/simcore_service_director_v2/constants.py b/services/director-v2/src/simcore_service_director_v2/constants.py new file mode 100644 index 00000000000..d4a5690d9bb --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/constants.py @@ -0,0 +1,27 @@ +from typing import Final + +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_PROXY_SERVICE_PREFIX, + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) + +# dynamic services + +# label storing scheduler_data to allow service +# monitoring recovery after director-v2 reboots +DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: Final[str] = "io.simcore.scheduler-data" + +# This matches registries by: +# - local +# - itisfoundation +# - 10.0.0.0:8473 (IP & Port) +DYNAMIC_SIDECAR_DOCKER_IMAGE_RE = ( + r"^(([_a-zA-Z0-9:.-]+)/)?(dynamic-sidecar):([_a-zA-Z0-9.-]+)$" +) + +REGEX_DY_SERVICE_SIDECAR = rf"^{DYNAMIC_SIDECAR_SERVICE_PREFIX}_[a-zA-Z0-9-_]*" +REGEX_DY_SERVICE_PROXY = rf"^{DYNAMIC_PROXY_SERVICE_PREFIX}_[a-zA-Z0-9-_]*" + +UNDEFINED_STR_METADATA = "undefined-metadata" +UNDEFINED_DOCKER_LABEL = "undefined-label" +UNDEFINED_API_BASE_URL = "https://api.local" diff --git a/services/director-v2/src/simcore_service_director_v2/core/application.py b/services/director-v2/src/simcore_service_director_v2/core/application.py index 6247ea95e67..5031c74a618 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/application.py +++ b/services/director-v2/src/simcore_service_director_v2/core/application.py @@ -1,5 +1,4 @@ import logging -from typing import Optional from fastapi import FastAPI, HTTPException, status from fastapi.exceptions import RequestValidationError @@ -7,15 +6,21 @@ get_common_oas_options, override_fastapi_openapi_method, ) -from servicelib.fastapi.tracing import setup_tracing +from servicelib.fastapi.profiler import initialize_profiler +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) +from servicelib.logging_utils import config_all_loggers +from .._meta import API_VERSION, API_VTAG, APP_NAME, PROJECT_NAME, SUMMARY from ..api.entrypoints import api_router from ..api.errors.http_error import ( http_error_handler, make_http_error_handler_for_exception, ) from ..api.errors.validation_error import http422_error_handler -from ..meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY +from ..api.rpc.routes import setup_rpc_api_routes from ..modules import ( catalog, comp_scheduler, @@ -24,22 +29,25 @@ director_v0, dynamic_services, dynamic_sidecar, - node_rights, + instrumentation, + notifier, rabbitmq, - remote_debug, + redis, + resource_usage_tracker_client, + socketio, storage, ) -from ..utils.logging_utils import config_all_loggers +from ..modules.osparc_variables import substitutions from .errors import ( - ClusterAccessForbiddenError, ClusterNotFoundError, PipelineNotFoundError, + ProjectNetworkNotFoundError, ProjectNotFoundError, ) from .events import on_shutdown, on_startup -from .settings import AppSettings, BootModeEnum +from .settings import AppSettings -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) def _set_exception_handlers(app: FastAPI): @@ -53,21 +61,21 @@ def _set_exception_handlers(app: FastAPI): ), ) app.add_exception_handler( - PipelineNotFoundError, + ProjectNetworkNotFoundError, make_http_error_handler_for_exception( - status.HTTP_404_NOT_FOUND, PipelineNotFoundError + status.HTTP_404_NOT_FOUND, ProjectNetworkNotFoundError ), ) app.add_exception_handler( - ClusterNotFoundError, + PipelineNotFoundError, make_http_error_handler_for_exception( - status.HTTP_404_NOT_FOUND, ClusterNotFoundError + status.HTTP_404_NOT_FOUND, PipelineNotFoundError ), ) app.add_exception_handler( - ClusterAccessForbiddenError, + ClusterNotFoundError, make_http_error_handler_for_exception( - status.HTTP_403_FORBIDDEN, ClusterAccessForbiddenError + status.HTTP_404_NOT_FOUND, ClusterNotFoundError ), ) @@ -86,37 +94,44 @@ def _set_exception_handlers(app: FastAPI): ) -LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR -NOISY_LOGGERS = ( +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS = ( "aio_pika", "aiormq", + "httpcore", ) -def create_base_app(settings: Optional[AppSettings] = None) -> FastAPI: +def create_base_app(settings: AppSettings | None = None) -> FastAPI: if settings is None: settings = AppSettings.create_from_envs() assert settings # nosec logging.basicConfig(level=settings.LOG_LEVEL.value) logging.root.setLevel(settings.LOG_LEVEL.value) - logger.debug(settings.json(indent=2)) + config_all_loggers( + log_format_local_dev_enabled=settings.DIRECTOR_V2_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.DIRECTOR_V2_LOG_FILTER_MAPPING, + tracing_settings=settings.DIRECTOR_V2_TRACING, + ) + _logger.debug(settings.model_dump_json(indent=2)) # keep mostly quiet noisy loggers quiet_level: int = max( - min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING ) - for name in NOISY_LOGGERS: + for name in _NOISY_LOGGERS: logging.getLogger(name).setLevel(quiet_level) + assert settings.SC_BOOT_MODE # nosec app = FastAPI( debug=settings.SC_BOOT_MODE.is_devel_mode(), title=PROJECT_NAME, description=SUMMARY, version=API_VERSION, openapi_url=f"/api/{API_VTAG}/openapi.json", - **get_common_oas_options(settings.SC_BOOT_MODE.is_devel_mode()), + **get_common_oas_options(is_devel_mode=settings.SC_BOOT_MODE.is_devel_mode()), ) override_fastapi_openapi_method(app) app.state.settings = settings @@ -125,29 +140,48 @@ def create_base_app(settings: Optional[AppSettings] = None) -> FastAPI: return app -def init_app(settings: Optional[AppSettings] = None) -> FastAPI: +def init_app(settings: AppSettings | None = None) -> FastAPI: app = create_base_app(settings) if settings is None: settings = app.state.settings assert settings # nosec - if settings.SC_BOOT_MODE == BootModeEnum.DEBUG: - remote_debug.setup(app) + substitutions.setup(app) + + if settings.DIRECTOR_V2_TRACING: + setup_tracing(app, settings.DIRECTOR_V2_TRACING, APP_NAME) + + if settings.DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED: + instrumentation.setup(app) - if settings.DIRECTOR_V0.DIRECTOR_V0_ENABLED: - director_v0.setup(app, settings.DIRECTOR_V0) + if settings.DIRECTOR_V0.DIRECTOR_ENABLED: + director_v0.setup( + app, + director_v0_settings=settings.DIRECTOR_V0, + tracing_settings=settings.DIRECTOR_V2_TRACING, + ) if settings.DIRECTOR_V2_STORAGE: - storage.setup(app, settings.DIRECTOR_V2_STORAGE) + storage.setup( + app, + storage_settings=settings.DIRECTOR_V2_STORAGE, + tracing_settings=settings.DIRECTOR_V2_TRACING, + ) if settings.DIRECTOR_V2_CATALOG: - catalog.setup(app, settings.DIRECTOR_V2_CATALOG) + catalog.setup( + app, + catalog_settings=settings.DIRECTOR_V2_CATALOG, + tracing_settings=settings.DIRECTOR_V2_TRACING, + ) - if settings.POSTGRES.DIRECTOR_V2_POSTGRES_ENABLED: - db.setup(app, settings.POSTGRES) + db.setup(app, settings.POSTGRES) + + if settings.DIRECTOR_V2_TRACING: + initialize_fastapi_app_tracing(app) if settings.DYNAMIC_SERVICES.DIRECTOR_V2_DYNAMIC_SERVICES_ENABLED: - dynamic_services.setup(app, settings.DYNAMIC_SERVICES) + dynamic_services.setup(app, tracing_settings=settings.DIRECTOR_V2_TRACING) dynamic_scheduler_enabled = settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR and ( settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER @@ -159,9 +193,13 @@ def init_app(settings: Optional[AppSettings] = None) -> FastAPI: ) if dynamic_scheduler_enabled or computational_backend_enabled: rabbitmq.setup(app) + setup_rpc_api_routes(app) # Requires rabbitmq to be setup first + redis.setup(app) if dynamic_scheduler_enabled: dynamic_sidecar.setup(app) + socketio.setup(app) + notifier.setup(app) if ( settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED @@ -171,16 +209,14 @@ def init_app(settings: Optional[AppSettings] = None) -> FastAPI: if computational_backend_enabled: comp_scheduler.setup(app) - node_rights.setup(app) + resource_usage_tracker_client.setup(app) - if settings.DIRECTOR_V2_TRACING: - setup_tracing(app, settings.DIRECTOR_V2_TRACING) + if settings.DIRECTOR_V2_PROFILING: + initialize_profiler(app) # setup app -- app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) _set_exception_handlers(app) - config_all_loggers() - return app diff --git a/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/__init__.py b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/__init__.py new file mode 100644 index 00000000000..c3ed002edd6 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/__init__.py @@ -0,0 +1,34 @@ +from pydantic import Field +from settings_library.base import BaseCustomSettings +from settings_library.webserver import WebServerSettings + +from .egress_proxy import EgressProxySettings +from .proxy import DynamicSidecarProxySettings +from .scheduler import DynamicServicesSchedulerSettings +from .sidecar import DynamicSidecarSettings, PlacementSettings + + +class DynamicServicesSettings(BaseCustomSettings): + DIRECTOR_V2_DYNAMIC_SERVICES_ENABLED: bool = Field( + default=True, description="Enables/Disables the dynamic_sidecar submodule" + ) + + DYNAMIC_SIDECAR: DynamicSidecarSettings = Field(json_schema_extra={"auto_default_from_env": True}) + + DYNAMIC_SCHEDULER: DynamicServicesSchedulerSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_PROXY_SETTINGS: DynamicSidecarProxySettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_EGRESS_PROXY_SETTINGS: EgressProxySettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_PLACEMENT_SETTINGS: PlacementSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + WEBSERVER_SETTINGS: WebServerSettings = Field(json_schema_extra={"auto_default_from_env": True}) diff --git a/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/egress_proxy.py b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/egress_proxy.py new file mode 100644 index 00000000000..fe7b25029d4 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/egress_proxy.py @@ -0,0 +1,31 @@ +from enum import auto + +from models_library.docker import DockerGenericTag +from models_library.utils.enums import StrAutoEnum +from pydantic import Field +from settings_library.base import BaseCustomSettings + + +class EnvoyLogLevel(StrAutoEnum): + TRACE = auto() + DEBUG = auto() + INFO = auto() + WARNING = auto() + ERROR = auto() + CRITICAL = auto() + + def to_log_level(self) -> str: + assert isinstance(self.value, str) # nosec + lower_log_level: str = self.value.lower() + return lower_log_level + + +class EgressProxySettings(BaseCustomSettings): + DYNAMIC_SIDECAR_ENVOY_IMAGE: DockerGenericTag = Field( + "envoyproxy/envoy:v1.25-latest", + description="envoy image to use", + ) + DYNAMIC_SIDECAR_ENVOY_LOG_LEVEL: EnvoyLogLevel = Field( + default=EnvoyLogLevel.ERROR, + description="log level for envoy proxy service", + ) diff --git a/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/proxy.py b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/proxy.py new file mode 100644 index 00000000000..06da8acb6e8 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/proxy.py @@ -0,0 +1,20 @@ +from models_library.basic_types import PortInt +from pydantic import Field +from servicelib.utils_secrets import secure_randint +from settings_library.base import BaseCustomSettings + + +class DynamicSidecarProxySettings(BaseCustomSettings): + DYNAMIC_SIDECAR_CADDY_VERSION: str = Field( + "2.6.4-alpine", + description="current version of the Caddy image to be pulled and used from dockerhub", + ) + DYNAMIC_SIDECAR_CADDY_ADMIN_API_PORT: PortInt = Field( + default_factory=lambda: secure_randint(1025, 65535), + description="port where to expose the proxy's admin API", + ) + + PROXY_EXPOSE_PORT: bool = Field( + default=False, + description="exposes the proxy on localhost for debugging and testing", + ) diff --git a/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/scheduler.py b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/scheduler.py new file mode 100644 index 00000000000..5072a365af6 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/scheduler.py @@ -0,0 +1,178 @@ +from datetime import timedelta +from typing import Final + +from common_library.pydantic_validators import validate_numeric_string_as_timedelta +from models_library.projects_networks import DockerNetworkName +from pydantic import Field, NonNegativeInt, PositiveFloat +from settings_library.base import BaseCustomSettings + +_MINUTE: Final[NonNegativeInt] = 60 + + +class DynamicServicesSchedulerSettings(BaseCustomSettings): + DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED: bool = True + + DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL: timedelta = Field( + timedelta(seconds=5), + description="interval at which the scheduler cycle is repeated", + ) + + DIRECTOR_V2_DYNAMIC_SCHEDULER_PENDING_VOLUME_REMOVAL_INTERVAL_S: PositiveFloat = ( + Field( + 30 * _MINUTE, + description="interval at which cleaning of unused dy-sidecar " + "docker volume removal services is executed", + ) + ) + + SIMCORE_SERVICES_NETWORK_NAME: DockerNetworkName = Field( + ..., + description="network all dynamic services are connected to", + ) + + DYNAMIC_SIDECAR_DOCKER_COMPOSE_VERSION: str = Field( + "3.8", + description="docker-compose spec version used in the compose-specs", + deprecated=True, + ) + + DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS: bool = Field( + default=False, + description="enables support for limiting service's volume size", + ) + + SWARM_STACK_NAME: str = Field( + ..., + description="in case there are several deployments on the same docker swarm, it is attached as a label on all spawned services", + ) + + TRAEFIK_SIMCORE_ZONE: str = Field( + ..., + description="Names the traefik zone for services that must be accessible from platform http entrypoint", + ) + + DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS: dict[str, str] = Field( + ..., + description=( + "Provided by ops, are injected as service labels when starting the dy-sidecar, " + "and Prometheus identifies the service as to be scraped" + ), + ) + + DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS: list[str] = Field( + default_factory=list, + description="Prometheus will scrape service placed on these networks", + ) + + DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED: ( + bool + ) = Field( + default=True, + description=( + "when the message indicating there are no more credits left in a wallet " + "the director-v2 will shutdown the services via the help of the frontend" + ), + ) + + # + # TIMEOUTS AND RETRY dark worlds + # + DYNAMIC_SIDECAR_API_REQUEST_TIMEOUT: PositiveFloat = Field( + 15.0, + description=( + "the default timeout each request to the dynamic-sidecar API in seconds; as per " + "design, all requests should answer quite quickly, in theory a few seconds or less" + ), + ) + + DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT: PositiveFloat = Field( + 5.0, + description=( + "Connections to the dynamic-sidecars in the same swarm deployment should be very fast." + ), + ) + + DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S: PositiveFloat = Field( + 60 * _MINUTE, + description=( + "After starting the dynamic-sidecar its docker_node_id is required. " + "This operation can be slow based on system load, sometimes docker " + "swarm takes more than seconds to assign the node." + "Autoscaling of nodes takes time, it is required to wait longer" + "for nodes to be assigned." + ), + ) + + DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT: timedelta = Field( + timedelta(hours=1), + description=( + "When saving and restoring the state of a dynamic service, depending on the payload " + "some services take longer or shorter to save and restore. Across the " + "platform this value is set to 1 hour." + ), + ) + + DYNAMIC_SIDECAR_API_USER_SERVICES_PULLING_TIMEOUT: PositiveFloat = Field( + 60.0 * _MINUTE, + description="before starting the user services pull all the images in parallel", + ) + + DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT: PositiveFloat = Field( + 1.0 * _MINUTE, + description=( + "Restarts all started containers. During this operation, no data " + "stored in the container will be lost as docker compose restart " + "will not alter the state of the files on the disk nor its environment." + ), + ) + + DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START: PositiveFloat = Field( + 60.0 * _MINUTE, + description=( + "When starting container (`docker compose up`), images might " + "require pulling before containers are started." + ), + ) + + DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP: PositiveFloat = Field( + 60.0 * _MINUTE, + description=( + "When stopping a service, depending on the amount of data to store, " + "the operation might be very long. Also all relative created resources: " + "services, containsers, volumes and networks need to be removed. " + ), + ) + + DYNAMIC_SIDECAR_PROJECT_NETWORKS_ATTACH_DETACH_S: PositiveFloat = Field( + 3.0 * _MINUTE, + description=( + "timeout for attaching/detaching project networks to/from a container" + ), + ) + + DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S: PositiveFloat = Field( + 1 * _MINUTE, + description=( + "Connectivity between director-v2 and a dy-sidecar can be " + "temporarily disrupted if network between swarm nodes has " + "issues. To avoid the sidecar being marked as failed, " + "allow for some time to pass before declaring it failed." + ), + ) + + # + # DEBUG + # + + DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL: timedelta = Field( + timedelta(0), description="time to sleep before removing a container" + ) + + _validate_director_v2_dynamic_scheduler_interval = ( + validate_numeric_string_as_timedelta("DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL") + ) + _validate_director_v2_dynamic_sidecar_sleep_after_container_removal = ( + validate_numeric_string_as_timedelta( + "DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL" + ) + ) diff --git a/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/sidecar.py b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/sidecar.py new file mode 100644 index 00000000000..fa0d0e670b7 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/core/dynamic_services_settings/sidecar.py @@ -0,0 +1,206 @@ +import logging +import warnings +from enum import Enum +from pathlib import Path +from typing import Annotated + +from models_library.basic_types import BootModeEnum, PortInt +from models_library.docker import DockerPlacementConstraint +from models_library.utils.common_validators import ( + ensure_unique_dict_values_validator, + ensure_unique_list_values_validator, +) +from pydantic import AliasChoices, Field, PositiveInt, ValidationInfo, field_validator +from settings_library.aws_s3_cli import AwsS3CliSettings +from settings_library.base import BaseCustomSettings +from settings_library.efs import AwsEfsSettings +from settings_library.r_clone import RCloneSettings as SettingsLibraryRCloneSettings +from settings_library.utils_logging import MixinLoggingSettings +from settings_library.utils_service import DEFAULT_FASTAPI_PORT + +from ...constants import DYNAMIC_SIDECAR_DOCKER_IMAGE_RE + +_logger = logging.getLogger(__name__) + + +class VFSCacheMode(str, Enum): + __slots__ = () + + OFF = "off" + MINIMAL = "minimal" + WRITES = "writes" + FULL = "full" + + +class RCloneSettings(SettingsLibraryRCloneSettings): + R_CLONE_DIR_CACHE_TIME_SECONDS: PositiveInt = Field( + 10, + description="time to cache directory entries for", + ) + R_CLONE_POLL_INTERVAL_SECONDS: PositiveInt = Field( + 9, + description="time to wait between polling for changes", + ) + R_CLONE_VFS_CACHE_MODE: VFSCacheMode = Field( + VFSCacheMode.MINIMAL, # SEE https://rclone.org/commands/rclone_mount/#vfs-file-caching + description="VFS operation mode, defines how and when the disk cache is synced", + ) + + @field_validator("R_CLONE_POLL_INTERVAL_SECONDS") + @classmethod + def enforce_r_clone_requirement(cls, v: int, info: ValidationInfo) -> PositiveInt: + dir_cache_time = info.data["R_CLONE_DIR_CACHE_TIME_SECONDS"] + if v >= dir_cache_time: + msg = f"R_CLONE_POLL_INTERVAL_SECONDS={v} must be lower than R_CLONE_DIR_CACHE_TIME_SECONDS={dir_cache_time}" + raise ValueError(msg) + return v + + +class PlacementSettings(BaseCustomSettings): + # This is just a service placement constraint, see + # https://docs.docker.com/engine/swarm/services/#control-service-placement. + DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS: list[DockerPlacementConstraint] = Field( + default_factory=list, + examples=['["node.labels.region==east", "one!=yes"]'], + ) + + DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS: dict[ + str, DockerPlacementConstraint + ] = Field( + default_factory=dict, + description=( + "Use placement constraints in place of generic resources, for details " + "see https://github.com/ITISFoundation/osparc-simcore/issues/5250 " + "When `None` (default), uses generic resources" + ), + examples=['{"AIRAM": "node.labels.custom==true"}'], + ) + + _unique_custom_constraints = field_validator( + "DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS", + )(ensure_unique_list_values_validator) + + _unique_resource_placement_constraints_substitutions = field_validator( + "DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS", + )(ensure_unique_dict_values_validator) + + @field_validator("DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS") + @classmethod + def warn_if_any_values_provided(cls, value: dict) -> dict: + if len(value) > 0: + warnings.warn( # noqa: B028 + "Generic resources will be replaced by the following " + f"placement constraints {value}. This is a workaround " + "for https://github.com/moby/swarmkit/pull/3162", + UserWarning, + ) + return value + + +class DynamicSidecarSettings(BaseCustomSettings, MixinLoggingSettings): + DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED: bool = Field( # doc: https://docs.docker.com/engine/swarm/networking/#configure-service-discovery + default=False, + validation_alias=AliasChoices( + "DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED" + ), + description="dynamic-sidecar's service 'endpoint_spec' with {'Mode': 'dnsrr'}", + ) + DYNAMIC_SIDECAR_SC_BOOT_MODE: Annotated[ + BootModeEnum, + Field( + ..., + description="Boot mode used for the dynamic-sidecar services" + "By defaults, it uses the same boot mode set for the director-v2", + validation_alias=AliasChoices( + "DYNAMIC_SIDECAR_SC_BOOT_MODE", "SC_BOOT_MODE" + ), + ), + ] + + DYNAMIC_SIDECAR_LOG_LEVEL: str = Field( + "WARNING", + description="log level of the dynamic sidecar" + "If defined, it captures global env vars LOG_LEVEL and LOGLEVEL from the director-v2 service", + validation_alias=AliasChoices( + "DYNAMIC_SIDECAR_LOG_LEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ) + + DYNAMIC_SIDECAR_IMAGE: str = Field( + ..., + pattern=DYNAMIC_SIDECAR_DOCKER_IMAGE_RE, + description="used by the director to start a specific version of the dynamic-sidecar", + ) + + DYNAMIC_SIDECAR_R_CLONE_SETTINGS: RCloneSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_AWS_S3_CLI_SETTINGS: AwsS3CliSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + DYNAMIC_SIDECAR_EFS_SETTINGS: AwsEfsSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_PLACEMENT_SETTINGS: PlacementSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + # + # DEVELOPMENT ONLY config + # + + DYNAMIC_SIDECAR_MOUNT_PATH_DEV: Path | None = Field( + None, + description="Host path to the dynamic-sidecar project. Used as source path to mount to the dynamic-sidecar [DEVELOPMENT ONLY]", + examples=["osparc-simcore/services/dynamic-sidecar"], + ) + + DYNAMIC_SIDECAR_PORT: PortInt = Field( + DEFAULT_FASTAPI_PORT, + description="port on which the webserver for the dynamic-sidecar is exposed [DEVELOPMENT ONLY]", + ) + + DYNAMIC_SIDECAR_EXPOSE_PORT: bool = Field( + default=False, + description="Publishes the service on localhost for debuging and testing [DEVELOPMENT ONLY]" + "Can be used to access swagger doc from the host as http://127.0.0.1:30023/dev/doc " + "where 30023 is the host published port", + validate_default=True, + ) + + @field_validator("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", mode="before") + @classmethod + def auto_disable_if_production(cls, v, info: ValidationInfo): + if ( + v + and info.data.get("DYNAMIC_SIDECAR_SC_BOOT_MODE") == BootModeEnum.PRODUCTION + ): + _logger.warning( + "In production DYNAMIC_SIDECAR_MOUNT_PATH_DEV cannot be set to %s, enforcing None", + v, + ) + return None + return v + + @field_validator("DYNAMIC_SIDECAR_EXPOSE_PORT", mode="before") + @classmethod + def auto_enable_if_development(cls, v, info: ValidationInfo): + if ( + boot_mode := info.data.get("DYNAMIC_SIDECAR_SC_BOOT_MODE") + ) and boot_mode.is_devel_mode(): + # Can be used to access swagger doc from the host as http://127.0.0.1:30023/dev/doc + return True + return v + + @field_validator("DYNAMIC_SIDECAR_IMAGE", mode="before") + @classmethod + def strip_leading_slashes(cls, v: str) -> str: + return v.lstrip("/") + + @field_validator("DYNAMIC_SIDECAR_LOG_LEVEL") + @classmethod + def _validate_log_level(cls, value) -> str: + log_level: str = cls.validate_log_level(value) + return log_level diff --git a/services/director-v2/src/simcore_service_director_v2/core/errors.py b/services/director-v2/src/simcore_service_director_v2/core/errors.py index b648760ffc9..cd992d5155f 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/errors.py +++ b/services/director-v2/src/simcore_service_director_v2/core/errors.py @@ -1,4 +1,4 @@ -""" Defines the different exceptions that may arise in the director +"""Defines the different exceptions that may arise in the director TODO: Exceptions should provide all info to create Error instances of the API model @@ -19,246 +19,151 @@ } """ -from typing import Optional +from typing import Any +from common_library.errors_classes import OsparcErrorMixin from models_library.errors import ErrorDict from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID -from pydantic.errors import PydanticErrorMixin -class DirectorException(Exception): - """Basic exception""" +class DirectorError(OsparcErrorMixin, RuntimeError): + msg_template: str = "Director-v2 unexpected error" - def message(self) -> str: - return self.args[0] +class ConfigurationError(DirectorError): + msg_template: str = "Application misconfiguration: {msg}" -class ConfigurationError(DirectorException): - """An error in the director-v2 configuration""" - def __init__(self, msg: Optional[str] = None): - super().__init__( - msg or "Invalid configuration of the director-v2 application. Please check." - ) +class UserNotFoundError(DirectorError): + msg_template: str = "user {user_id} not found" -class GenericDockerError(DirectorException): - """Generic docker library error""" +class ProjectNotFoundError(DirectorError): + msg_template: str = "project {project_id} not found" - def __init__(self, msg: str, original_exception: Exception): - super().__init__(msg + f": {original_exception}") - self.original_exception = original_exception +class ProjectNetworkNotFoundError(DirectorError): + msg_template: str = "no networks found for project {project_id}" -class ServiceNotAvailableError(DirectorException): - """Service not found""" - def __init__(self, service_name: str, service_tag: Optional[str] = None): - service_tag = service_tag or "UNDEFINED" - super().__init__(f"The service {service_name}:{service_tag} does not exist") - self.service_name = service_name - self.service_tag = service_tag +class PricingPlanUnitNotFoundError(DirectorError): + msg_template: str = "pricing plan not found {msg}" -class ServiceUUIDNotFoundError(DirectorException): - """Service not found""" +class PipelineNotFoundError(DirectorError): + msg_template: str = "pipeline {pipeline_id} not found" - def __init__(self, service_uuid: str): - super().__init__(f"The service with uuid {service_uuid} was not found") - self.service_uuid = service_uuid - -class ServiceUUIDInUseError(DirectorException): - """Service UUID is already in use""" - - def __init__(self, service_uuid: str): - super().__init__(f"The service uuid {service_uuid} is already in use") - self.service_uuid = service_uuid - - -class ServiceStartTimeoutError(DirectorException): - """The service was created but never run (time-out)""" - - def __init__(self, service_name: str, service_uuid: str): - super().__init__(f"Service {service_name}:{service_uuid} failed to start ") - self.service_name = service_name - self.service_uuid = service_uuid - - -class ProjectNotFoundError(DirectorException): - """Project not found error""" - - def __init__(self, project_id: ProjectID): - super().__init__(f"project {project_id} not found") - - -class PipelineNotFoundError(DirectorException): - """Pipeline not found error""" - - def __init__(self, pipeline_id: str): - super().__init__(f"pipeline {pipeline_id} not found") +class ComputationalRunNotFoundError(DirectorError): + msg_template = "Computational run not found" -class ComputationalRunNotFoundError(PydanticErrorMixin, DirectorException): - msg_template = "Computational run not found" +class ComputationalTaskNotFoundError(DirectorError): + msg_template = "Computational task {node_id} not found" -class NodeRightsAcquireError(PydanticErrorMixin, DirectorException): - msg_template = "Could not acquire a lock for {docker_node_id} since all {slots} slots are used." +class WalletNotEnoughCreditsError(DirectorError): + msg_template = "Wallet '{wallet_name}' has {wallet_credit_amount} credits." # # SCHEDULER ERRORS # +class ComputationalSchedulerError(DirectorError): + msg_template = "Computational scheduler unexpected error {msg}" -class SchedulerError(DirectorException): - code = "scheduler_error" - - def __init__(self, msg: Optional[str] = None): - super().__init__(msg or "Unexpected error in the scheduler") - +class InvalidPipelineError(ComputationalSchedulerError): + msg_template = "Computational scheduler: Invalid configuration of pipeline {pipeline_id}: {msg}" -class InvalidPipelineError(SchedulerError): - """A pipeline is misconfigured""" - def __init__(self, pipeline_id: str, msg: Optional[str] = None): - super().__init__(msg or f"Invalid configuration of pipeline {pipeline_id}") +class TaskSchedulingError(ComputationalSchedulerError): + msg_template = "Computational scheduler: Task {node_id} in project {project_id} could not be scheduled {msg}" - -class TaskSchedulingError(SchedulerError): - """A task cannot be scheduled""" - - def __init__( - self, project_id: ProjectID, node_id: NodeID, msg: Optional[str] = None - ): - super().__init__(msg=msg) + def __init__(self, project_id: ProjectID, node_id: NodeID, **ctx: Any) -> None: + super().__init__(project_id=project_id, node_id=node_id, **ctx) self.project_id = project_id self.node_id = node_id def get_errors(self) -> list[ErrorDict]: # default implementation + return [ { "loc": ( f"{self.project_id}", f"{self.node_id}", ), - "msg": self.message(), + "msg": f"{self}", "type": self.code, }, ] -class MissingComputationalResourcesError(TaskSchedulingError): - """A task cannot be scheduled because the cluster does not have the required resources""" - - code = "scheduler_error.missing_resources" - - def __init__( - self, project_id: ProjectID, node_id: NodeID, msg: Optional[str] = None - ): - super().__init__(project_id, node_id, msg=msg) - - -class InsuficientComputationalResourcesError(TaskSchedulingError): - """A task cannot be scheduled because the cluster does not have *enough* of the required resources""" +class MissingComputationalResourcesError( + TaskSchedulingError +): # pylint: disable=too-many-ancestors + msg_template = ( + "Service {service_name}:{service_version} cannot be scheduled " + "on cluster: task needs '{task_resources}', " + "cluster has {cluster_resources}" + ) - code = "scheduler_error.insuficient_resources" - def __init__( - self, project_id: ProjectID, node_id: NodeID, msg: Optional[str] = None - ): - super().__init__(project_id, node_id, msg=msg) +class InsuficientComputationalResourcesError( + TaskSchedulingError +): # pylint: disable=too-many-ancestors + msg_template: str = ( + "Insufficient computational resources to run {service_name}:{service_version} with {service_requested_resources} on cluster." + "Cluster available workers: {cluster_available_resources}" + "TIP: Reduce service required resources or contact oSparc support" + ) -class PortsValidationError(TaskSchedulingError): - """ - Gathers all validation errors raised while checking input/output - ports in a project's node. - """ +class PortsValidationError(TaskSchedulingError): # pylint: disable=too-many-ancestors + msg_template: str = ( + "Node {node_id} in {project_id} with ports having invalid values {errors_list}" + ) - def __init__(self, project_id: ProjectID, node_id: NodeID, errors: list[ErrorDict]): - super().__init__( - project_id, - node_id, - msg=f"Node with {len(errors)} ports having invalid values", - ) - self.errors = errors - def get_errors(self) -> list[ErrorDict]: - """Returns 'public errors': filters only value_error.port_validation errors for the client. - The rest only shown as number - """ - value_errors = [] - for error in self.errors: - # NOTE: should I filter? if error["type"].startswith("value_error."): - - loc_tail = [] - if port_key := error.get("ctx", {}).get("port_key"): - loc_tail.append(f"{port_key}") - - if schema_error_path := error.get("ctx", {}).get("schema_error_path"): - loc_tail += list(schema_error_path) - - # WARNING: error in a node, might come from the previous node's port - # DO NOT remove project/node/port hiearchy - value_errors.append( - { - "loc": ( - f"{self.project_id}", - f"{self.node_id}", - ) - + tuple(loc_tail), - "msg": error["msg"], - # NOTE: here we list the codes of the PydanticValueErrors collected in ValidationError - "type": error["type"], - } - ) - return value_errors - - -class ComputationalSchedulerChangedError(PydanticErrorMixin, SchedulerError): - code = "computational_backend.scheduler_changed" +class ComputationalSchedulerChangedError(ComputationalSchedulerError): msg_template = "The dask scheduler ID changed from '{original_scheduler_id}' to '{current_scheduler_id}'" -class ComputationalBackendNotConnectedError(PydanticErrorMixin, SchedulerError): - code = "computational_backend.not_connected" +class ComputationalBackendNotConnectedError(ComputationalSchedulerError): msg_template = "The dask computational backend is not connected" -class ComputationalBackendNoS3AccessError(PydanticErrorMixin, SchedulerError): +class ComputationalBackendNoS3AccessError(ComputationalSchedulerError): msg_template = "The S3 backend is not ready, please try again later" -class ComputationalBackendTaskNotFoundError(PydanticErrorMixin, SchedulerError): - code = "computational_backend.task_not_found" +class ComputationalBackendTaskNotFoundError(ComputationalSchedulerError): msg_template = ( "The dask computational backend does not know about the task '{job_id}'" ) -class ComputationalBackendTaskResultsNotReadyError(PydanticErrorMixin, SchedulerError): - code = "computational_backend.task_result_not_ready" +class ComputationalBackendTaskResultsNotReadyError(ComputationalSchedulerError): msg_template = "The task result is not ready yet for job '{job_id}'" -# -# SCHEDULER/CLUSTER ERRORS -# -class ClusterNotFoundError(PydanticErrorMixin, SchedulerError): - code = "cluster.not_found" - msg_template = "The cluster '{cluster_id}' not found" +class ClustersKeeperNotAvailableError(ComputationalSchedulerError): + msg_template = "clusters-keeper service is not available!" -class ClusterAccessForbiddenError(PydanticErrorMixin, SchedulerError): - msg_template = "Insufficient rights to access cluster '{cluster_id}'" +class ComputationalBackendOnDemandNotReadyError(ComputationalSchedulerError): + msg_template = ( + "The on demand computational cluster is not ready 'est. remaining time: {eta}'" + ) -class ClusterInvalidOperationError(PydanticErrorMixin, SchedulerError): - msg_template = "Invalid operation on cluster '{cluster_id}'" +# +# SCHEDULER/CLUSTER ERRORS +# +class ClusterNotFoundError(ComputationalSchedulerError): + msg_template = "The cluster '{cluster_id}' not found" # @@ -266,25 +171,21 @@ class ClusterInvalidOperationError(PydanticErrorMixin, SchedulerError): # -class DaskClientRequestError(PydanticErrorMixin, SchedulerError): - code = "dask_client.request.error" +class DaskClientRequestError(ComputationalSchedulerError): msg_template = ( "The dask client to cluster on '{endpoint}' did an invalid request '{error}'" ) -class DaskClusterError(PydanticErrorMixin, SchedulerError): - code = "cluster.error" +class DaskClusterError(ComputationalSchedulerError): msg_template = "The dask cluster on '{endpoint}' encountered an error: '{error}'" -class DaskGatewayServerError(PydanticErrorMixin, SchedulerError): - code = "gateway.error" +class DaskGatewayServerError(ComputationalSchedulerError): msg_template = "The dask gateway on '{endpoint}' encountered an error: '{error}'" -class DaskClientAcquisisitonError(PydanticErrorMixin, SchedulerError): - code = "dask_client.acquisition.error" +class DaskClientAcquisisitonError(ComputationalSchedulerError): msg_template = ( "The dask client to cluster '{cluster}' encountered an error '{error}'" ) diff --git a/services/director-v2/src/simcore_service_director_v2/core/events.py b/services/director-v2/src/simcore_service_director_v2/core/events.py index 9f79abfc69b..13af6ca3009 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/events.py +++ b/services/director-v2/src/simcore_service_director_v2/core/events.py @@ -1,6 +1,6 @@ from servicelib.async_utils import cancel_sequential_workers -from ..meta import __version__, info +from .._meta import __version__, info # # SEE https://patorjk.com/software/taag/#p=display&f=Small&t=Director diff --git a/services/director-v2/src/simcore_service_director_v2/core/settings.py b/services/director-v2/src/simcore_service_director_v2/core/settings.py index 9b1e1e72b71..03f256b01b0 100644 --- a/services/director-v2/src/simcore_service_director_v2/core/settings.py +++ b/services/director-v2/src/simcore_service_director_v2/core/settings.py @@ -2,457 +2,73 @@ # pylint: disable=no-self-use -import logging -import re -from enum import Enum, auto +import datetime from functools import cached_property -from pathlib import Path -from typing import Optional - -from models_library.basic_types import ( - BootModeEnum, - BuildTargetEnum, - LogLevel, - PortInt, - VersionTag, -) +from typing import Annotated, cast + +from common_library.pydantic_validators import validate_numeric_string_as_timedelta +from fastapi import FastAPI +from models_library.basic_types import LogLevel, PortInt from models_library.clusters import ( - DEFAULT_CLUSTER_ID, - Cluster, + BaseCluster, ClusterAuthentication, + ClusterTypeInModel, NoAuthentication, ) -from models_library.docker import DockerGenericTag -from models_library.projects_networks import SERVICE_NETWORK_RE -from models_library.utils.enums import StrAutoEnum from pydantic import ( - AnyHttpUrl, + AliasChoices, AnyUrl, - ConstrainedStr, Field, - PositiveFloat, + NonNegativeInt, PositiveInt, - validator, + field_validator, ) +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings from settings_library.base import BaseCustomSettings from settings_library.catalog import CatalogSettings +from settings_library.director_v0 import DirectorV0Settings from settings_library.docker_registry import RegistrySettings from settings_library.http_client_request import ClientRequestSettings +from settings_library.node_ports import ( + NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS_DEFAULT_VALUE, + StorageAuthSettings, +) from settings_library.postgres import PostgresSettings -from settings_library.r_clone import RCloneSettings from settings_library.rabbit import RabbitSettings from settings_library.redis import RedisSettings +from settings_library.resource_usage_tracker import ( + DEFAULT_RESOURCE_USAGE_HEARTBEAT_INTERVAL, + ResourceUsageTrackerSettings, +) +from settings_library.storage import StorageSettings from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings -from settings_library.utils_service import DEFAULT_FASTAPI_PORT -from simcore_postgres_database.models.clusters import ClusterType from simcore_sdk.node_ports_v2 import FileLinkType -from ..meta import API_VTAG -from ..models.schemas.constants import DYNAMIC_SIDECAR_DOCKER_IMAGE_RE - -logger = logging.getLogger(__name__) - - -MINS = 60 -API_ROOT: str = "api" - -SERVICE_RUNTIME_SETTINGS: str = "simcore.service.settings" -SERVICE_REVERSE_PROXY_SETTINGS: str = "simcore.service.reverse-proxy-settings" -SERVICE_RUNTIME_BOOTSETTINGS: str = "simcore.service.bootsettings" - -ORG_LABELS_TO_SCHEMA_LABELS: dict[str, str] = { - "org.label-schema.build-date": "build_date", - "org.label-schema.vcs-ref": "vcs_ref", - "org.label-schema.vcs-url": "vcs_url", -} - -SUPPORTED_TRAEFIK_LOG_LEVELS: set[str] = {"info", "debug", "warn", "error"} - - -class PlacementConstraintStr(ConstrainedStr): - strip_whitespace = True - regex = re.compile( - r"^(?!-)(?![.])(?!.*--)(?!.*[.][.])[a-zA-Z0-9.-]*(? str: - return self.value.lower() - - -class RCloneSettings(RCloneSettings): # pylint: disable=function-redefined - R_CLONE_DIR_CACHE_TIME_SECONDS: PositiveInt = Field( - 10, - description="time to cache directory entries for", - ) - R_CLONE_POLL_INTERVAL_SECONDS: PositiveInt = Field( - 9, - description="time to wait between polling for changes", - ) - R_CLONE_VFS_CACHE_MODE: VFSCacheMode = Field( - VFSCacheMode.MINIMAL, - description="used primarly for easy testing without requiring requiring code changes", - ) - - @validator("R_CLONE_POLL_INTERVAL_SECONDS") - @classmethod - def enforce_r_clone_requirement(cls, v, values) -> PositiveInt: - dir_cache_time = values["R_CLONE_DIR_CACHE_TIME_SECONDS"] - if not v < dir_cache_time: - raise ValueError( - f"R_CLONE_POLL_INTERVAL_SECONDS={v} must be lower " - f"than R_CLONE_DIR_CACHE_TIME_SECONDS={dir_cache_time}" - ) - return v - - -class StorageSettings(BaseCustomSettings): - STORAGE_HOST: str = "storage" - STORAGE_PORT: int = 8080 - STORAGE_VTAG: str = "v0" - - @cached_property - def endpoint(self) -> str: - return AnyHttpUrl.build( - scheme="http", - host=self.STORAGE_HOST, - port=f"{self.STORAGE_PORT}", - path=f"/{self.STORAGE_VTAG}", - ) - - -class DirectorV0Settings(BaseCustomSettings): - DIRECTOR_V0_ENABLED: bool = True - - DIRECTOR_HOST: str = "director" - DIRECTOR_PORT: PortInt = 8080 - DIRECTOR_V0_VTAG: VersionTag = Field( - default="v0", description="Director-v0 service API's version tag" - ) - - @cached_property - def endpoint(self) -> str: - return AnyHttpUrl.build( - scheme="http", - host=self.DIRECTOR_HOST, - port=f"{self.DIRECTOR_PORT}", - path=f"/{self.DIRECTOR_V0_VTAG}", - ) - - -class DynamicSidecarProxySettings(BaseCustomSettings): - DYNAMIC_SIDECAR_CADDY_VERSION: str = Field( - "2.4.5-alpine", - description="current version of the Caddy image to be pulled and used from dockerhub", - ) - - -class DynamicSidecarEgressSettings(BaseCustomSettings): - DYNAMIC_SIDECAR_ENVOY_IMAGE: DockerGenericTag = Field( - "envoyproxy/envoy:v1.25-latest", - description="envoy image to use", - ) - DYNAMIC_SIDECAR_ENVOY_LOG_LEVEL: EnvoyLogLevel = Field( - EnvoyLogLevel.ERROR, description="log level for envoy proxy service" - ) - - -class DynamicSidecarSettings(BaseCustomSettings): - DYNAMIC_SIDECAR_SC_BOOT_MODE: BootModeEnum = Field( - ..., - description="Boot mode used for the dynamic-sidecar services" - "By defaults, it uses the same boot mode set for the director-v2", - env=["DYNAMIC_SIDECAR_SC_BOOT_MODE", "SC_BOOT_MODE"], - ) - - DYNAMIC_SIDECAR_LOG_LEVEL: str = Field( - "WARNING", - description="log level of the dynamic sidecar" - "If defined, it captures global env vars LOG_LEVEL and LOGLEVEL from the director-v2 service", - env=["DYNAMIC_SIDECAR_LOG_LEVEL", "LOG_LEVEL", "LOGLEVEL"], - ) - - DYNAMIC_SIDECAR_IMAGE: str = Field( - ..., - regex=DYNAMIC_SIDECAR_DOCKER_IMAGE_RE, - description="used by the director to start a specific version of the dynamic-sidecar", - ) - - SIMCORE_SERVICES_NETWORK_NAME: str = Field( - ..., - regex=SERVICE_NETWORK_RE, - description="network all dynamic services are connected to", - ) - - DYNAMIC_SIDECAR_DOCKER_COMPOSE_VERSION: str = Field( - "3.8", description="docker-compose version used in the compose-specs" - ) - - SWARM_STACK_NAME: str = Field( - ..., - description="in case there are several deployments on the same docker swarm, it is attached as a label on all spawned services", - ) - - TRAEFIK_SIMCORE_ZONE: str = Field( - ..., - description="Names the traefik zone for services that must be accessible from platform http entrypoint", - ) - - DYNAMIC_SIDECAR_PROXY_SETTINGS: DynamicSidecarProxySettings = Field( - auto_default_from_env=True - ) - - DYNAMIC_SIDECAR_EGRESS_PROXY_SETTINGS: DynamicSidecarEgressSettings = Field( - auto_default_from_env=True - ) - - DYNAMIC_SIDECAR_R_CLONE_SETTINGS: RCloneSettings = Field(auto_default_from_env=True) - - # - # TIMEOUTS AND RETRY dark worlds - # - - DYNAMIC_SIDECAR_API_REQUEST_TIMEOUT: PositiveFloat = Field( - 15.0, - description=( - "the default timeout each request to the dynamic-sidecar API in seconds; as per " - "design, all requests should answer quite quickly, in theory a few seconds or less" - ), - ) - DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT: PositiveFloat = Field( - 5.0, - description=( - "Connections to the dynamic-sidecars in the same swarm deployment should be very fast." - ), - ) - DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S: PositiveFloat = Field( - 60 * MINS, - description=( - "After starting the dynamic-sidecar its docker_node_id is required. " - "This operation can be slow based on system load, sometimes docker " - "swarm takes more than seconds to assign the node." - "Autoscaling of nodes takes time, it is required to wait longer" - "for nodes to be assigned." - ), - ) - DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT: PositiveFloat = Field( - 60.0 * MINS, - description=( - "When saving and restoring the state of a dynamic service, depending on the payload " - "some services take longer or shorter to save and restore. Across the " - "platform this value is set to 1 hour." - ), - ) - DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT: PositiveFloat = Field( - 1.0 * MINS, - description=( - "Restarts all started containers. During this operation, no data " - "stored in the container will be lost as docker-compose restart " - "will not alter the state of the files on the disk nor its environment." - ), - ) - DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START: PositiveFloat = Field( - 60.0 * MINS, - description=( - "When starting container (`docker-compose up`), images might " - "require pulling before containers are started." - ), - ) - DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP: PositiveFloat = Field( - 60.0 * MINS, - description=( - "When stopping a service, depending on the amount of data to store, " - "the operation might be very long. Also all relative created resources: " - "services, containsers, volumes and networks need to be removed. " - ), - ) - - DYNAMIC_SIDECAR_PROJECT_NETWORKS_ATTACH_DETACH_S: PositiveFloat = Field( - 3.0 * MINS, - description=( - "timeout for attaching/detaching project networks to/from a container" - ), - ) - DYNAMIC_SIDECAR_VOLUMES_REMOVAL_TIMEOUT_S: PositiveFloat = Field( - 1.0 * MINS, - description=( - "time to wait before giving up on removing dynamic-sidecar's volumes" - ), - ) - DYNAMIC_SIDECAR_STATUS_API_TIMEOUT_S: PositiveFloat = Field( - 1.0, - description=( - "when requesting the status of a service this is the " - "maximum amount of time the request can last" - ), - ) - - DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S: PositiveFloat = Field( - 1 * MINS, - description=( - "Connectivity between director-v2 and a dy-sidecar can be " - "temporarily disrupted if network between swarm nodes has " - "issues. To avoid the sidecar being marked as failed, " - "allow for some time to pass before declaring it failed." - ), - ) - - # - # DEVELOPMENT ONLY config - # - - DYNAMIC_SIDECAR_MOUNT_PATH_DEV: Optional[Path] = Field( - None, - description="Host path to the dynamic-sidecar project. Used as source path to mount to the dynamic-sidecar [DEVELOPMENT ONLY]", - example="osparc-simcore/services/dynamic-sidecar", - ) - - DYNAMIC_SIDECAR_PORT: PortInt = Field( - DEFAULT_FASTAPI_PORT, - description="port on which the webserver for the dynamic-sidecar is exposed [DEVELOPMENT ONLY]", - ) - - DYNAMIC_SIDECAR_EXPOSE_PORT: bool = Field( - False, - description="Publishes the service on localhost for debuging and testing [DEVELOPMENT ONLY]" - "Can be used to access swagger doc from the host as http://127.0.0.1:30023/dev/doc " - "where 30023 is the host published port", - ) - - PROXY_EXPOSE_PORT: bool = Field( - False, - description="exposes the proxy on localhost for debuging and testing", - ) - - DYNAMIC_SIDECAR_DOCKER_NODE_RESOURCE_LIMITS_ENABLED: bool = Field( - False, - description=( - "Limits concurrent service saves for a docker node. Guarantees " - "that no more than X services use a resource together. " - "NOTE: A node can end up with all the services from a single study. " - "When the study is closed/opened all the services will try to " - "upload/download their data. This causes a lot of disk " - "and network stress (especially for low power nodes like in AWS). " - "Some nodes collapse under load or behave unexpectedly." - ), - ) - DYNAMIC_SIDECAR_DOCKER_NODE_CONCURRENT_RESOURCE_SLOTS: PositiveInt = Field( - 2, description="Amount of slots per resource on a node" - ) - DYNAMIC_SIDECAR_DOCKER_NODE_SAVES_LOCK_TIMEOUT_S: PositiveFloat = Field( - 10, - description=( - "Lifetime of the lock. Allows the system to recover a lock " - "in case of crash, the lock will expire and result as released." - ), - ) - - @validator("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", pre=True) - @classmethod - def auto_disable_if_production(cls, v, values): - if v and values.get("DYNAMIC_SIDECAR_SC_BOOT_MODE") == BootModeEnum.PRODUCTION: - logger.warning( - "In production DYNAMIC_SIDECAR_MOUNT_PATH_DEV cannot be set to %s, enforcing None", - v, - ) - return None - return v - - @validator("DYNAMIC_SIDECAR_EXPOSE_PORT", pre=True, always=True) - @classmethod - def auto_enable_if_development(cls, v, values): - if ( - boot_mode := values.get("DYNAMIC_SIDECAR_SC_BOOT_MODE") - ) and boot_mode.is_devel_mode(): - # Can be used to access swagger doc from the host as http://127.0.0.1:30023/dev/doc - return True - return v - - @validator("DYNAMIC_SIDECAR_IMAGE", pre=True) - @classmethod - def strip_leading_slashes(cls, v) -> str: - return v.lstrip("/") - - @validator("DYNAMIC_SIDECAR_LOG_LEVEL") - @classmethod - def validate_log_level(cls, v) -> str: - valid_log_levels = {"DEBUG", "INFO", "WARNING", "ERROR"} - if v not in valid_log_levels: - raise ValueError(f"Log level must be one of {valid_log_levels} not {v}") - return v - - -class DynamicServicesSchedulerSettings(BaseCustomSettings): - DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED: bool = True - - DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS: PositiveFloat = Field( - 5.0, description="interval at which the scheduler cycle is repeated" - ) - - DIRECTOR_V2_DYNAMIC_SCHEDULER_PENDING_VOLUME_REMOVAL_INTERVAL_S: PositiveFloat = ( - Field( - 30 * MINS, - description="interval at which cleaning of unused dy-sidecar " - "docker volume removal services is executed", - ) - ) - - -class DynamicServicesSettings(BaseCustomSettings): - # TODO: PC->ANE: refactor dynamic-sidecar settings. One settings per app module - # WARNING: THIS IS NOT the same module as dynamic-sidecar - DIRECTOR_V2_DYNAMIC_SERVICES_ENABLED: bool = Field( - True, description="Enables/Disables the dynamic_sidecar submodule" - ) - - DYNAMIC_SIDECAR: DynamicSidecarSettings = Field(auto_default_from_env=True) - - DYNAMIC_SCHEDULER: DynamicServicesSchedulerSettings = Field( - auto_default_from_env=True - ) - - -class PGSettings(PostgresSettings): - DIRECTOR_V2_POSTGRES_ENABLED: bool = Field( - True, - description="Enables/Disables connection with service", - ) +from .dynamic_services_settings import DynamicServicesSettings class ComputationalBackendSettings(BaseCustomSettings): COMPUTATIONAL_BACKEND_ENABLED: bool = Field( - True, + default=True, + ) + COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY: PositiveInt = Field( + default=50, + description="defines how many pipelines the application can schedule concurrently", ) COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED: bool = Field( - True, + default=True, ) COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL: AnyUrl = Field( - "tcp://dask-scheduler:8786", + ..., description="This is the cluster that will be used by default" " when submitting computational services (typically " - "tcp://dask-scheduler:8786 for the internal cluster, or " - "http(s)/GATEWAY_IP:8000 for a osparc-dask-gateway)", + "tcp://dask-scheduler:8786, tls://dask-scheduler:8786 for the internal cluster", ) - COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: Optional[ClusterAuthentication] = Field( - NoAuthentication(), - description="Empty for the internal cluster, must be one " - "of simple/kerberos/jupyterhub for the osparc-dask-gateway", + COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ClusterAuthentication = Field( + default=..., + description="this is the cluster authentication that will be used by default", ) COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE: FileLinkType = Field( FileLinkType.S3, @@ -462,38 +78,58 @@ class ComputationalBackendSettings(BaseCustomSettings): FileLinkType.PRESIGNED, description=f"Default file link type to use with computational backend '{list(FileLinkType)}'", ) + COMPUTATIONAL_BACKEND_ON_DEMAND_CLUSTERS_FILE_LINK_TYPE: FileLinkType = Field( + FileLinkType.PRESIGNED, + description=f"Default file link type to use with computational backend on-demand clusters '{list(FileLinkType)}'", + ) @cached_property - def default_cluster(self): - return Cluster( - id=DEFAULT_CLUSTER_ID, + def default_cluster(self) -> BaseCluster: + return BaseCluster( name="Default cluster", endpoint=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL, authentication=self.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH, owner=1, # NOTE: currently this is a soft hack (the group of everyone is the group 1) - type=ClusterType.ON_PREMISE, - ) # type: ignore + type=ClusterTypeInModel.ON_PREMISE, + ) - @validator("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", pre=True) - def empty_auth_is_none(v): + @field_validator("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", mode="before") + @classmethod + def _empty_auth_is_none(cls, v): if not v: return NoAuthentication() return v -class AppSettings(BaseCustomSettings, MixinLoggingSettings): - # docker environs - SC_BOOT_MODE: BootModeEnum - SC_BOOT_TARGET: Optional[BuildTargetEnum] +class AppSettings(BaseApplicationSettings, MixinLoggingSettings): + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "DIRECTOR_V2_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO - LOG_LEVEL: LogLevel = Field( - LogLevel.INFO.value, - env=["DIRECTOR_V2_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"], + DIRECTOR_V2_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field( + default=False, + validation_alias=AliasChoices( + "DIRECTOR_V2_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ) + DIRECTOR_V2_LOG_FILTER_MAPPING: dict[LoggerName, list[MessageSubstring]] = Field( + default_factory=dict, + validation_alias=AliasChoices( + "DIRECTOR_V2_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", ) DIRECTOR_V2_DEV_FEATURES_ENABLED: bool = False DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: bool = Field( - False, + default=False, description=( "Under development feature. If enabled state " "is saved using rclone docker volumes." @@ -502,81 +138,114 @@ class AppSettings(BaseCustomSettings, MixinLoggingSettings): # for passing self-signed certificate to spawned services DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID: str = Field( - "", + default="", description="ID of the docker secret containing the self-signed certificate", ) DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME: str = Field( - "", + default="", description="Name of the docker secret containing the self-signed certificate", ) DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME: str = Field( - "", + default="", description="Filepath to self-signed osparc.crt file *as mounted inside the container*, empty strings disables it", ) + DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + DIRECTOR_V2_PROFILING: bool = False - # extras - EXTRA_HOSTS_SUFFIX: str = Field("undefined", env="EXTRA_HOSTS_SUFFIX") - PUBLISHED_HOSTS_NAME: str = Field("", env="PUBLISHED_HOSTS_NAME") - SWARM_STACK_NAME: str = Field("undefined-please-check", env="SWARM_STACK_NAME") + DIRECTOR_V2_REMOTE_DEBUGGING_PORT: PortInt | None = Field(default=None) - NODE_SCHEMA_LOCATION: str = Field( - f"{API_ROOT}/{API_VTAG}/schemas/node-meta-v0.0.1.json", - description="used when in devel mode vs release mode", + # extras + SWARM_STACK_NAME: str = Field(default="undefined-please-check") + SERVICE_TRACKING_HEARTBEAT: datetime.timedelta = Field( + default=DEFAULT_RESOURCE_USAGE_HEARTBEAT_INTERVAL, + description="Service scheduler heartbeat (everytime a heartbeat is sent into RabbitMQ)" + " (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", ) - SIMCORE_SERVICES_NETWORK_NAME: Optional[str] = Field( - None, + SIMCORE_SERVICES_NETWORK_NAME: str | None = Field( + default=None, description="used to find the right network name", ) - SIMCORE_SERVICES_PREFIX: Optional[str] = Field( + SIMCORE_SERVICES_PREFIX: str | None = Field( "simcore/services", description="useful when developing with an alternative registry namespace", ) - # monitoring - MONITORING_ENABLED: bool = False - - # fastappi app settings - DIRECTOR_V2_DEBUG: bool = False - - # ptvsd settings - DIRECTOR_V2_REMOTE_DEBUG_PORT: PortInt = 3000 + DIRECTOR_V2_NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS: NonNegativeInt = Field( + default=NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS_DEFAULT_VALUE, + description="forwarded to sidecars which use nodeports", + ) - CLIENT_REQUEST: ClientRequestSettings = Field(auto_default_from_env=True) + # debug settings + CLIENT_REQUEST: ClientRequestSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) # App modules settings --------------------- - DIRECTOR_V2_STORAGE: StorageSettings = Field(auto_default_from_env=True) + DIRECTOR_V2_STORAGE: Annotated[ + StorageSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH: StorageAuthSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) - DIRECTOR_V2_CATALOG: Optional[CatalogSettings] = Field(auto_default_from_env=True) + DIRECTOR_V2_CATALOG: Annotated[ + CatalogSettings | None, Field(json_schema_extra={"auto_default_from_env": True}) + ] - DIRECTOR_V0: DirectorV0Settings = Field(auto_default_from_env=True) + DIRECTOR_V0: DirectorV0Settings = Field( + json_schema_extra={"auto_default_from_env": True} + ) - DYNAMIC_SERVICES: DynamicServicesSettings = Field(auto_default_from_env=True) + DYNAMIC_SERVICES: Annotated[ + DynamicServicesSettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] - POSTGRES: PGSettings = Field(auto_default_from_env=True) + POSTGRES: Annotated[ + PostgresSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] - REDIS: RedisSettings = Field(auto_default_from_env=True) + REDIS: RedisSettings = Field(json_schema_extra={"auto_default_from_env": True}) - DIRECTOR_V2_RABBITMQ: RabbitSettings = Field(auto_default_from_env=True) + DIRECTOR_V2_RABBITMQ: RabbitSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) TRAEFIK_SIMCORE_ZONE: str = Field("internal_simcore_stack") DIRECTOR_V2_COMPUTATIONAL_BACKEND: ComputationalBackendSettings = Field( - auto_default_from_env=True + json_schema_extra={"auto_default_from_env": True} ) - DIRECTOR_V2_TRACING: Optional[TracingSettings] = Field(auto_default_from_env=True) + DIRECTOR_V2_DOCKER_REGISTRY: RegistrySettings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for the private registry deployed with the platform", + ) + DIRECTOR_V2_DOCKER_HUB_REGISTRY: RegistrySettings | None = Field( + default=None, description="public DockerHub registry settings" + ) - DIRECTOR_V2_DOCKER_REGISTRY: RegistrySettings = Field(auto_default_from_env=True) + DIRECTOR_V2_RESOURCE_USAGE_TRACKER: ResourceUsageTrackerSettings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="resource usage tracker service client's plugin", + ) - # This is just a service placement constraint, see - # https://docs.docker.com/engine/swarm/services/#control-service-placement. - DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS: list[PlacementConstraintStr] = Field( - default_factory=list, - example='["node.labels.region==east", "one!=yes"]', + DIRECTOR_V2_TRACING: TracingSettings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", ) - @validator("LOG_LEVEL", pre=True) + @field_validator("LOG_LEVEL", mode="before") @classmethod - def _validate_loglevel(cls, value) -> str: - return cls.validate_log_level(value) + def _validate_loglevel(cls, value: str) -> str: + log_level: str = cls.validate_log_level(value) + return log_level + + _validate_service_tracking_heartbeat = validate_numeric_string_as_timedelta( + "SERVICE_TRACKING_HEARTBEAT" + ) + + +def get_application_settings(app: FastAPI) -> AppSettings: + return cast(AppSettings, app.state.settings) diff --git a/services/director-v2/src/simcore_service_director_v2/meta.py b/services/director-v2/src/simcore_service_director_v2/meta.py deleted file mode 100644 index 8f81276fa52..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/meta.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Application's metadata - -""" -from typing import Final - -from packaging.version import Version -from servicelib.utils_meta import PackageInfo - -info: Final = PackageInfo(package_name="simcore-service-director-v2") -__version__: Final[str] = info.__version__ - - -PROJECT_NAME: Final[str] = info.project_name -VERSION: Final[Version] = info.version -API_VERSION: Final[str] = info.__version__ -API_VTAG: Final[str] = info.api_prefix_path_tag -SUMMARY: Final[str] = info.get_summary() diff --git a/services/director-v2/src/simcore_service_director_v2/models/__init__.py b/services/director-v2/src/simcore_service_director_v2/models/__init__.py index e69de29bb2d..166628e0516 100644 --- a/services/director-v2/src/simcore_service_director_v2/models/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/models/__init__.py @@ -0,0 +1,4 @@ +""" +This packages is intended for domain (i.e. internal) models +Any common/base and API (schema) models go in models_library or models_library.api_schemas_directorv2 +""" diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py b/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py new file mode 100644 index 00000000000..63017ee62e7 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/models/comp_pipelines.py @@ -0,0 +1,63 @@ +from contextlib import suppress +from typing import cast + +import networkx as nx +from models_library.projects import ProjectID +from models_library.projects_state import RunningState +from pydantic import BaseModel, ConfigDict, field_validator +from simcore_postgres_database.models.comp_pipeline import StateType + +from ..utils.db import DB_TO_RUNNING_STATE + + +class CompPipelineAtDB(BaseModel): + project_id: ProjectID + dag_adjacency_list: dict[str, list[str]] # json serialization issue if using NodeID + state: RunningState + + @field_validator("state", mode="before") + @classmethod + def _convert_state_from_state_type_enum_if_needed(cls, v): + if isinstance(v, str): + # try to convert to a StateType, if it fails the validations will continue + # and pydantic will try to convert it to a RunninState later on + with suppress(ValueError): + v = StateType(v) + if isinstance(v, StateType): + return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) + return v + + @field_validator("dag_adjacency_list", mode="before") + @classmethod + def _auto_convert_dag(cls, v): + # this enforcement is here because the serialization using json is not happy with non str Dict keys, also comparison gets funny if the lists are having sometimes UUIDs or str. + # NOTE: this might not be necessary anymore once we have something fully defined + return {str(key): [str(n) for n in value] for key, value in v.items()} + + def get_graph(self) -> nx.DiGraph: + return cast( + nx.DiGraph, + nx.convert.from_dict_of_lists( + self.dag_adjacency_list, create_using=nx.DiGraph # type: ignore[arg-type] # list is an Iterable but dict is Invariant + ), + ) + + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ + # DB model + { + "project_id": "65fee9d2-e030-452c-a29c-45d288577ca5", + "dag_adjacency_list": { + "539531c4-afb9-4ca8-bda3-06ad3d7bc339": [ + "f98e20e5-b235-43ed-a63d-15b71bc7c762" + ], + "f98e20e5-b235-43ed-a63d-15b71bc7c762": [], + "5332fcde-b043-41f5-8786-a3a359b110ad": [], + }, + "state": "NOT_STARTED", + } + ] + }, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py new file mode 100644 index 00000000000..ab17131186d --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/models/comp_runs.py @@ -0,0 +1,161 @@ +import datetime +from contextlib import suppress +from typing import TypeAlias + +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.users import UserID +from pydantic import BaseModel, ConfigDict, PositiveInt, field_validator +from simcore_postgres_database.models.comp_pipeline import StateType +from typing_extensions import ( # https://docs.pydantic.dev/latest/api/standard_library_types/#typeddict + TypedDict, +) + +from ..utils.db import DB_TO_RUNNING_STATE + + +class ProjectMetadataDict(TypedDict, total=False): + parent_node_id: NodeID + parent_node_name: str + parent_project_id: ProjectID + parent_project_name: str + root_parent_project_id: ProjectID + root_parent_project_name: str + root_parent_node_id: NodeID + root_parent_node_name: str + + +class RunMetadataDict(TypedDict, total=False): + node_id_names_map: dict[NodeID, str] + project_name: str + product_name: str + product_api_base_url: str + simcore_user_agent: str + user_email: str + wallet_id: int | None + wallet_name: str | None + project_metadata: ProjectMetadataDict + + +Iteration: TypeAlias = PositiveInt + + +class CompRunsAtDB(BaseModel): + run_id: PositiveInt + project_uuid: ProjectID + user_id: UserID + iteration: Iteration + result: RunningState + created: datetime.datetime + modified: datetime.datetime + started: datetime.datetime | None + ended: datetime.datetime | None + cancelled: datetime.datetime | None + metadata: RunMetadataDict = RunMetadataDict() + use_on_demand_clusters: bool + scheduled: datetime.datetime | None + processed: datetime.datetime | None + + @field_validator("result", mode="before") + @classmethod + def convert_result_from_state_type_enum_if_needed(cls, v): + if isinstance(v, str): + # try to convert to a StateType, if it fails the validations will continue + # and pydantic will try to convert it to a RunninState later on + with suppress(ValueError): + v = StateType(v) + if isinstance(v, StateType): + return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) + return v + + @field_validator("created", "modified", "started", "ended") + @classmethod + def ensure_utc(cls, v: datetime.datetime | None) -> datetime.datetime | None: + if v is not None and v.tzinfo is None: + v = v.replace(tzinfo=datetime.UTC) + return v + + @field_validator("metadata", mode="before") + @classmethod + def convert_null_to_empty_metadata(cls, v): + if v is None: + v = RunMetadataDict() + return v + + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ + # DB model + { + "run_id": 432, + "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", + "user_id": 132, + "iteration": 42, + "result": "UNKNOWN", + "started": None, + "ended": None, + "created": "2021-03-01T13:07:34.191610", + "modified": "2021-03-01T13:07:34.191610", + "cancelled": None, + "use_on_demand_clusters": False, + "scheduled": None, + "processed": None, + }, + { + "run_id": 432, + "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", + "user_id": 132, + "iteration": 42, + "result": "NOT_STARTED", + "started": None, + "ended": None, + "created": "2021-03-01T13:07:34.191610", + "modified": "2021-03-01T13:07:34.191610", + "cancelled": None, + "use_on_demand_clusters": False, + "scheduled": None, + "processed": None, + }, + { + "run_id": 43243, + "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", + "user_id": 132, + "iteration": 12, + "result": "SUCCESS", + "created": "2021-03-01T13:07:34.191610", + "modified": "2021-03-01T13:07:34.191610", + "started": "2021-03-01T08:07:34.191610", + "ended": "2021-03-01T13:07:34.10", + "cancelled": None, + "metadata": { + "node_id_names_map": {}, + "product_name": "osparc", + "project_name": "my awesome project", + "simcore_user_agent": "undefined", + "some-other-metadata-which-is-an-array": [1, 3, 4], + }, + "use_on_demand_clusters": False, + "scheduled": None, + "processed": None, + }, + { + "run_id": 43243, + "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", + "user_id": 132, + "iteration": 12, + "result": "SUCCESS", + "created": "2021-03-01T13:07:34.191610", + "modified": "2021-03-01T13:07:34.191610", + "started": "2021-03-01T08:07:34.191610", + "ended": "2021-03-01T13:07:34.10", + "cancelled": None, + "metadata": None, + "use_on_demand_clusters": False, + "scheduled": None, + "processed": None, + }, + ] + }, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py b/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py new file mode 100644 index 00000000000..3d10ddc2070 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/models/comp_tasks.py @@ -0,0 +1,276 @@ +import datetime as dt +from contextlib import suppress +from typing import Annotated, Any + +from dask_task_models_library.container_tasks.protocol import ContainerEnvsDict +from models_library.api_schemas_directorv2.services import NodeRequirements +from models_library.basic_regex import SIMPLE_VERSION_RE +from models_library.errors import ErrorDict +from models_library.projects import ProjectID +from models_library.projects_nodes import InputsDict, OutputsDict +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.resource_tracker import HardwareInfo +from models_library.services import ServiceInputsDict, ServiceOutput, ServicePortKey +from models_library.services_regex import SERVICE_KEY_RE +from models_library.services_resources import BootMode +from pydantic import ( + BaseModel, + BeforeValidator, + ByteSize, + ConfigDict, + Field, + PositiveInt, + TypeAdapter, + ValidationInfo, + field_validator, +) +from simcore_postgres_database.models.comp_pipeline import StateType +from simcore_postgres_database.models.comp_tasks import NodeClass + +from ..utils.db import DB_TO_RUNNING_STATE, RUNNING_STATE_TO_DB + + +class Image(BaseModel): + name: str = Field(..., pattern=SERVICE_KEY_RE.pattern) + tag: str = Field(..., pattern=SIMPLE_VERSION_RE) + + requires_gpu: bool | None = Field( + default=None, deprecated=True, description="Use instead node_requirements" + ) + requires_mpi: bool | None = Field( + default=None, deprecated=True, description="Use instead node_requirements" + ) + node_requirements: NodeRequirements | None = Field( + default=None, + description="the requirements for the service to run on a node", + validate_default=True, + ) + boot_mode: BootMode = BootMode.CPU + command: list[str] = Field( + default=[ + "run", + ], + description="command to run container. Can override using ContainerSpec service labels", + ) + envs: ContainerEnvsDict = Field( + default_factory=dict, description="The environment to use to run the service" + ) + + @field_validator("node_requirements", mode="before") + @classmethod + def _migrate_from_requirements(cls, v, info: ValidationInfo): + if v is None: + # NOTE: 'node_requirements' field's default=None although is NOT declared as nullable. + # Then this validator with `pre=True, always=True` is used to create a default + # based on that accounts for an old version. + # This strategy guarantees backwards compatibility + v = NodeRequirements( + CPU=1.0, + GPU=1 if info.data.get("requires_gpu") else 0, + RAM=TypeAdapter(ByteSize).validate_python("128 MiB"), + ) + return v + + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ # type: ignore + { + "name": "simcore/services/dynamic/jupyter-octave-python-math", + "tag": "1.3.1", + "node_requirements": node_req_example, + } + for node_req_example in NodeRequirements.model_config[ # type: ignore + "json_schema_extra" + ]["examples"] + ] + + + # old version + [ + { + "name": "simcore/services/dynamic/jupyter-octave-python-math", + "tag": "0.0.1", + "requires_gpu": True, + "requires_mpi": False, + } + ] + }, + ) + + +class _ServiceOutputOverride(ServiceOutput): + # NOTE: for a long time defaultValue field was added to ServiceOutput wrongly in the DB. + # this flags allows parsing of the outputs without error. This MUST not leave the director-v2! + model_config = ConfigDict(extra="ignore") + + +_ServiceOutputsOverride = dict[ServicePortKey, _ServiceOutputOverride] + + +class NodeSchema(BaseModel): + inputs: ServiceInputsDict = Field(..., description="the inputs scheam") + outputs: _ServiceOutputsOverride = Field(..., description="the outputs schema") + model_config = ConfigDict(extra="forbid", from_attributes=True) + + +class CompTaskAtDB(BaseModel): + project_id: ProjectID + node_id: NodeID + job_id: str | None = Field(default=None, description="The worker job ID") + node_schema: NodeSchema = Field(..., alias="schema") + inputs: InputsDict | None = Field(..., description="the inputs payload") + outputs: Annotated[ + OutputsDict | None, + Field(default_factory=dict, description="the outputs payload"), + ] + run_hash: str | None = Field( + default=None, + description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated", + ) + image: Image + start: dt.datetime | None = None + end: dt.datetime | None = None + state: RunningState + task_id: PositiveInt | None = None + internal_id: PositiveInt + node_class: NodeClass + errors: list[ErrorDict] | None = None + progress: float | None = Field( + default=None, + ge=0.0, + le=1.0, + description="current progress of the task if available", + ) + last_heartbeat: dt.datetime | None = Field( + ..., description="Last time the running task was checked by the backend" + ) + created: dt.datetime + modified: dt.datetime + # Additional information about price and hardware (ex. AWS EC2 instance type) + pricing_info: dict | None + hardware_info: HardwareInfo + + submit: dt.datetime | None = Field( + default=None, deprecated=True, description="Required for legacy services" + ) + + @field_validator("state", mode="before") + @classmethod + def _convert_state_from_state_type_enum_if_needed(cls, v): + if isinstance(v, str): + # try to convert to a StateType, if it fails the validations will continue + # and pydantic will try to convert it to a RunninState later on + with suppress(ValueError): + v = StateType(v) + if isinstance(v, StateType): + return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) + return v + + @field_validator("start", "end") + @classmethod + def _ensure_utc(cls, v: dt.datetime | None) -> dt.datetime | None: + if v is not None and v.tzinfo is None: + v = v.replace(tzinfo=dt.UTC) + return v + + @field_validator("hardware_info", mode="before") + @classmethod + def _backward_compatible_null_value(cls, v: HardwareInfo | None) -> HardwareInfo: + if v is None: + return HardwareInfo(aws_ec2_instances=[]) + return v + + def to_db_model(self, **exclusion_rules) -> dict[str, Any]: + # mode json is used to ensure the UUIDs are converted to strings + comp_task_dict = self.model_dump( + mode="json", by_alias=True, exclude_unset=True, **exclusion_rules + ) + # Convert state to DB enum value + if "state" in comp_task_dict: + comp_task_dict["state"] = RUNNING_STATE_TO_DB[comp_task_dict["state"]].value + # but now the datetimes are strings which are not compatible with the DB + # so we need to convert them back to datetime objects + for field in ["start", "end", "last_heartbeat", "created", "modified"]: + if field in comp_task_dict and isinstance(comp_task_dict[field], str): + comp_task_dict[field] = dt.datetime.fromisoformat(comp_task_dict[field]) + return comp_task_dict + + model_config = ConfigDict( + extra="forbid", + from_attributes=True, + json_schema_extra={ + "examples": [ + # DB model + { + "task_id": 324, + "project_id": "341351c4-23d1-4366-95d0-bc01386001a7", + "node_id": "7f62be0e-1298-4fe4-be76-66b6e859c260", + "job_id": None, + "internal_id": 3, + "schema": { + "inputs": { + "input_1": { + "label": "input_files", + "description": "Any input files. One or serveral files compressed in a zip will be downloaded in an inputs folder.", + "type": "data:*/*", + "displayOrder": 1.0, + } + }, + "outputs": { + "output_1": { + "label": "Output files", + "description": "Output files uploaded from the outputs folder", + "type": "data:*/*", + "displayOrder": 1.0, + } + }, + }, + "inputs": { + "input_1": { + "nodeUuid": "48a7ac7a-cfc3-44a6-ba9b-5a1a578b922c", + "output": "output_1", + } + }, + "outputs": { + "output_1": { + "store": 0, + "path": "341351c4-23d1-4366-95d0-bc01386001a7/7f62be0e-1298-4fe4-be76-66b6e859c260/output_1.zip", + } + }, + "image": image_example, + "node_class": "INTERACTIVE", + "state": "NOT_STARTED", + "progress": 0.44, + "last_heartbeat": None, + "created": "2022-05-20 13:28:31.139", + "modified": "2023-06-23 15:58:32.833081", + "pricing_info": { + "pricing_plan_id": 1, + "pricing_unit_id": 1, + "pricing_unit_cost_id": 1, + }, + "hardware_info": next( + iter(HardwareInfo.model_config["json_schema_extra"]["examples"]) # type: ignore + ), + } + for image_example in Image.model_config["json_schema_extra"]["examples"] # type: ignore + ] + }, + ) + + +def _none_to_zero_float_pre_validator(value: Any): + if value is None: + return 0.0 + return value + + +class ComputationTaskForRpcDBGet(BaseModel): + project_uuid: ProjectID + node_id: NodeID + state: RunningState + progress: Annotated[float, BeforeValidator(_none_to_zero_float_pre_validator)] + image: dict[str, Any] + started_at: dt.datetime | None + ended_at: dt.datetime | None diff --git a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_pipelines.py b/services/director-v2/src/simcore_service_director_v2/models/domains/comp_pipelines.py deleted file mode 100644 index eab9da588d6..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_pipelines.py +++ /dev/null @@ -1,58 +0,0 @@ -from contextlib import suppress -from typing import Dict, List - -import networkx as nx -from models_library.projects import ProjectID -from models_library.projects_state import RunningState -from pydantic import BaseModel, validator -from simcore_postgres_database.models.comp_pipeline import StateType - -from ...utils.db import DB_TO_RUNNING_STATE - - -class CompPipelineAtDB(BaseModel): - project_id: ProjectID - dag_adjacency_list: Dict[str, List[str]] # json serialization issue if using NodeID - state: RunningState - - @validator("state", pre=True) - @classmethod - def convert_state_from_state_type_enum_if_needed(cls, v): - if isinstance(v, str): - # try to convert to a StateType, if it fails the validations will continue - # and pydantic will try to convert it to a RunninState later on - with suppress(ValueError): - v = StateType(v) - if isinstance(v, StateType): - return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) - return v - - @validator("dag_adjacency_list", pre=True) - @classmethod - def auto_convert_dag(cls, v): - # this enforcement is here because the serialization using json is not happy with non str Dict keys, also comparison gets funny if the lists are having sometimes UUIDs or str. - # NOTE: this might not be necessary anymore once we have something fully defined - return {str(key): [str(n) for n in value] for key, value in v.items()} - - def get_graph(self) -> nx.DiGraph: - return nx.from_dict_of_lists(self.dag_adjacency_list, create_using=nx.DiGraph) - - class Config: - orm_mode = True - - schema_extra = { - "examples": [ - # DB model - { - "project_id": "65fee9d2-e030-452c-a29c-45d288577ca5", - "dag_adjacency_list": { - "539531c4-afb9-4ca8-bda3-06ad3d7bc339": [ - "f98e20e5-b235-43ed-a63d-15b71bc7c762" - ], - "f98e20e5-b235-43ed-a63d-15b71bc7c762": [], - "5332fcde-b043-41f5-8786-a3a359b110ad": [], - }, - "state": "NOT_STARTED", - } - ] - } diff --git a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/models/domains/comp_runs.py deleted file mode 100644 index 9bf5afdf516..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_runs.py +++ /dev/null @@ -1,74 +0,0 @@ -from contextlib import suppress -from datetime import datetime -from typing import Optional - -from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID -from models_library.projects import ProjectID -from models_library.projects_state import RunningState -from models_library.users import UserID -from pydantic import BaseModel, PositiveInt, validator -from simcore_postgres_database.models.comp_pipeline import StateType - -from ...utils.db import DB_TO_RUNNING_STATE - - -class CompRunsAtDB(BaseModel): - run_id: PositiveInt - project_uuid: ProjectID - user_id: UserID - cluster_id: Optional[ClusterID] - iteration: PositiveInt - result: RunningState - created: datetime - modified: datetime - started: Optional[datetime] - ended: Optional[datetime] - - @validator("result", pre=True) - @classmethod - def convert_result_from_state_type_enum_if_needed(cls, v): - if isinstance(v, str): - # try to convert to a StateType, if it fails the validations will continue - # and pydantic will try to convert it to a RunninState later on - with suppress(ValueError): - v = StateType(v) - if isinstance(v, StateType): - return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) - return v - - @validator("cluster_id", pre=True) - @classmethod - def concert_null_to_default_cluster_id(cls, v): - if v is None: - v = DEFAULT_CLUSTER_ID - return v - - class Config: - orm_mode = True - schema_extra = { - "examples": [ - # DB model - { - "run_id": 432, - "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", - "user_id": 132, - "cluster_id": 0, - "iteration": 42, - "result": "NOT_STARTED", - "created": "2021-03-01 13:07:34.19161", - "modified": "2021-03-01 13:07:34.19161", - }, - { - "run_id": 43243, - "project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5", - "user_id": 132, - "cluster_id": 123, - "iteration": 12, - "result": "SUCCESS", - "created": "2021-03-01 13:07:34.19161", - "modified": "2021-03-01 13:07:34.19161", - "started": "2021-03-01 8:07:34.19161", - "ended": "2021-03-01 13:07:34.10", - }, - ] - } diff --git a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_tasks.py b/services/director-v2/src/simcore_service_director_v2/models/domains/comp_tasks.py deleted file mode 100644 index 7403a9b33a8..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/domains/comp_tasks.py +++ /dev/null @@ -1,192 +0,0 @@ -from contextlib import suppress -from datetime import datetime -from typing import Any, Optional - -from models_library.basic_regex import VERSION_RE -from models_library.errors import ErrorDict -from models_library.projects import ProjectID -from models_library.projects_nodes import InputsDict, NodeID, OutputsDict -from models_library.projects_state import RunningState -from models_library.services import ( - KEY_RE, - ServiceInputsDict, - ServiceOutput, - ServicePortKey, -) -from models_library.services_resources import BootMode -from pydantic import BaseModel, ByteSize, Extra, Field, parse_obj_as, validator -from pydantic.types import PositiveInt -from simcore_postgres_database.models.comp_tasks import NodeClass, StateType - -from ...utils.db import DB_TO_RUNNING_STATE, RUNNING_STATE_TO_DB -from ..schemas.services import NodeRequirements - - -class Image(BaseModel): - name: str = Field(..., regex=KEY_RE) - tag: str = Field(..., regex=VERSION_RE) - - requires_gpu: Optional[bool] = Field( - None, deprecated=True, description="Use instead node_requirements" - ) - requires_mpi: Optional[bool] = Field( - None, deprecated=True, description="Use instead node_requirements" - ) - node_requirements: Optional[NodeRequirements] = Field( - None, description="the requirements for the service to run on a node" - ) - boot_mode: BootMode = BootMode.CPU - command: list[str] = Field( - default=[ - "run", - ], - description="command to run container. Can override using ContainerSpec service labels", - ) - - @validator("node_requirements", pre=True, always=True) - @classmethod - def migrate_from_requirements(cls, v, values): - if v is None: - # NOTE: 'node_requirements' field's default=None although is NOT declared as nullable. - # Then this validator with `pre=True, always=True` is used to create a default - # based on that accounts for an old version. - # This strategy guarantees backwards compatibility - v = NodeRequirements( - CPU=1.0, - GPU=1 if values.get("requires_gpu") else 0, - RAM=parse_obj_as(ByteSize, "128 MiB"), - ) - return v - - class Config: - orm_mode = True - schema_extra = { - "examples": [ - { - "name": "simcore/services/dynamic/jupyter-octave-python-math", - "tag": "1.3.1", - "node_requirements": node_req_example, - } - for node_req_example in NodeRequirements.Config.schema_extra["examples"] - ] - + - # old version - [ - { - "name": "simcore/services/dynamic/jupyter-octave-python-math", - "tag": "0.0.1", - "requires_gpu": True, - "requires_mpi": False, - } - ] - } - - -# NOTE: for a long time defaultValue field was added to ServiceOutput wrongly in the DB. -# this flags allows parsing of the outputs without error. This MUST not leave the director-v2! -class _ServiceOutputOverride(ServiceOutput): - class Config(ServiceOutput.Config): - extra = Extra.ignore - - -_ServiceOutputsOverride = dict[ServicePortKey, _ServiceOutputOverride] - - -class NodeSchema(BaseModel): - inputs: ServiceInputsDict = Field(..., description="the inputs scheam") - outputs: _ServiceOutputsOverride = Field(..., description="the outputs schema") - - class Config: - extra = Extra.forbid - orm_mode = True - - -class CompTaskAtDB(BaseModel): - project_id: ProjectID - node_id: NodeID - job_id: Optional[str] = Field(default=None, description="The worker job ID") - node_schema: NodeSchema = Field(..., alias="schema") - inputs: Optional[InputsDict] = Field(..., description="the inputs payload") - outputs: Optional[OutputsDict] = Field({}, description="the outputs payload") - run_hash: Optional[str] = Field( - default=None, - description="the hex digest of the resolved inputs +outputs hash at the time when the last outputs were generated", - ) - image: Image - submit: datetime - start: Optional[datetime] = Field(default=None) - end: Optional[datetime] = Field(default=None) - state: RunningState - task_id: Optional[PositiveInt] = Field(default=None) - internal_id: PositiveInt - node_class: NodeClass - errors: Optional[list[ErrorDict]] = Field(default=None) - - @validator("state", pre=True) - @classmethod - def convert_state_from_state_type_enum_if_needed(cls, v): - if isinstance(v, str): - # try to convert to a StateType, if it fails the validations will continue - # and pydantic will try to convert it to a RunninState later on - with suppress(ValueError): - v = StateType(v) - if isinstance(v, StateType): - return RunningState(DB_TO_RUNNING_STATE[StateType(v)]) - return v - - def to_db_model(self, **exclusion_rules) -> dict[str, Any]: - comp_task_dict = self.dict(by_alias=True, exclude_unset=True, **exclusion_rules) - if "state" in comp_task_dict: - comp_task_dict["state"] = RUNNING_STATE_TO_DB[comp_task_dict["state"]].value - return comp_task_dict - - class Config: - extra = Extra.forbid - orm_mode = True - schema_extra = { - "examples": [ - # DB model - { - "task_id": 324, - "project_id": "341351c4-23d1-4366-95d0-bc01386001a7", - "node_id": "7f62be0e-1298-4fe4-be76-66b6e859c260", - "job_id": None, - "internal_id": 3, - "schema": { - "inputs": { - "input_1": { - "label": "input_files", - "description": "Any input files. One or serveral files compressed in a zip will be downloaded in an inputs folder.", - "type": "data:*/*", - "displayOrder": 1.0, - } - }, - "outputs": { - "output_1": { - "label": "Output files", - "description": "Output files uploaded from the outputs folder", - "type": "data:*/*", - "displayOrder": 1.0, - } - }, - }, - "inputs": { - "input_1": { - "nodeUuid": "48a7ac7a-cfc3-44a6-ba9b-5a1a578b922c", - "output": "output_1", - } - }, - "outputs": { - "output_1": { - "store": 0, - "path": "341351c4-23d1-4366-95d0-bc01386001a7/7f62be0e-1298-4fe4-be76-66b6e859c260/output_1.zip", - } - }, - "image": image_example, - "submit": "2021-03-01 13:07:34.19161", - "node_class": "INTERACTIVE", - "state": "NOT_STARTED", - } - for image_example in Image.Config.schema_extra["examples"] - ] - } diff --git a/services/director-v2/src/simcore_service_director_v2/models/domains/dynamic_services.py b/services/director-v2/src/simcore_service_director_v2/models/domains/dynamic_services.py deleted file mode 100644 index 612c88660ec..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/domains/dynamic_services.py +++ /dev/null @@ -1,59 +0,0 @@ -from models_library.services import ServicePortKey -from models_library.services_resources import ( - ServiceResourcesDict, - ServiceResourcesDictHelpers, -) -from pydantic import BaseModel, ByteSize, Field - -from ..schemas.dynamic_services import RunningDynamicServiceDetails, ServiceDetails - - -class RetrieveDataIn(BaseModel): - port_keys: list[ServicePortKey] = Field( - ..., description="The port keys to retrieve data from" - ) - - -class RetrieveDataOut(BaseModel): - size_bytes: ByteSize = Field( - ..., description="The amount of data transferred by the retrieve call" - ) - - -class RetrieveDataOutEnveloped(BaseModel): - data: RetrieveDataOut - - @classmethod - def from_transferred_bytes( - cls, transferred_bytes: int - ) -> "RetrieveDataOutEnveloped": - return cls(data=RetrieveDataOut(size_bytes=ByteSize(transferred_bytes))) - - class Config: - schema_extra = {"examples": [{"data": {"size_bytes": 42}}]} - - -class DynamicServiceCreate(ServiceDetails): - - service_resources: ServiceResourcesDict - - product_name: str = Field(..., description="Current product name") - - class Config: - schema_extra = { - "example": { - "key": "simcore/services/dynamic/3dviewer", - "version": "2.4.5", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "product_name": "osparc", - "service_resources": ServiceResourcesDictHelpers.Config.schema_extra[ - "examples" - ][0], - } - } - - -DynamicServiceGet = RunningDynamicServiceDetails diff --git a/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py b/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py new file mode 100644 index 00000000000..d0888dd1acf --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/models/dynamic_services_scheduler.py @@ -0,0 +1,558 @@ +import logging +from collections.abc import Mapping +from datetime import datetime +from enum import Enum +from functools import cached_property +from pathlib import Path +from typing import Annotated, Any, TypeAlias +from uuid import UUID + +import arrow +from common_library.error_codes import ErrorCodeStr +from common_library.json_serialization import json_dumps +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceCreate +from models_library.api_schemas_directorv2.dynamic_services_service import ( + CommonServiceDetails, +) +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_PROXY_SERVICE_PREFIX, + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) +from models_library.basic_types import PortInt +from models_library.callbacks_mapping import CallbacksMapping +from models_library.docker import DockerNodeID +from models_library.generated_models.docker_rest_api import ContainerState, Status2 +from models_library.projects_nodes_io import NodeID +from models_library.resource_tracker import HardwareInfo, PricingInfo +from models_library.service_settings_labels import ( + DynamicSidecarServiceLabels, + PathMappingsLabel, + SimcoreServiceLabels, +) +from models_library.services import ServiceRunID +from models_library.services_resources import ServiceResourcesDict +from models_library.wallets import WalletInfo +from pydantic import ( + AnyHttpUrl, + BaseModel, + BeforeValidator, + ConfigDict, + Field, + StringConstraints, + TypeAdapter, + field_validator, +) +from servicelib.exception_utils import DelayedExceptionHandler + +from ..constants import ( + DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL, + REGEX_DY_SERVICE_PROXY, + REGEX_DY_SERVICE_SIDECAR, +) + +TEMPORARY_PORT_NUMBER = 65_534 + +MAX_ALLOWED_SERVICE_NAME_LENGTH: int = 63 + + +DockerStatus: TypeAlias = Status2 + + +DockerId: TypeAlias = Annotated[ + str, StringConstraints(max_length=25, pattern=r"[A-Za-z0-9]{25}") +] + +ServiceId: TypeAlias = DockerId +NetworkId: TypeAlias = DockerId + + +ServiceName: TypeAlias = Annotated[ + str, StringConstraints(min_length=2, strip_whitespace=True) +] + + +logger = logging.getLogger() + + +def _strip_service_name(service_name: str) -> str: + """returns: the maximum allowed service name in docker swarm""" + return service_name[:MAX_ALLOWED_SERVICE_NAME_LENGTH] + + +def assemble_service_name(service_prefix: str, node_uuid: NodeID) -> str: + return _strip_service_name("_".join([service_prefix, str(node_uuid)])) + + +class DynamicSidecarStatus(str, Enum): + OK = "ok" # running as expected + FAILING = "failing" # requests to the sidecar API are failing service should be cosnidered as unavailable + + +class Status(BaseModel): + """Generated from data from docker container inspect API""" + + current: DynamicSidecarStatus = Field(..., description="status of the service") + info: str = Field(..., description="additional information for the user") + + def _update(self, new_status: DynamicSidecarStatus, new_info: str) -> None: + self.current = new_status + self.info = new_info + + def update_ok_status(self, info: str) -> None: + self._update(DynamicSidecarStatus.OK, info) + + def update_failing_status( + self, user_msg: str, error_code: ErrorCodeStr | None = None + ) -> None: + next_info = f"{user_msg}" + if error_code: + next_info = f"{user_msg} [{error_code}]" + + self._update(DynamicSidecarStatus.FAILING, next_info) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Status): + return NotImplemented + return self.current == other.current and self.info == other.info + + @classmethod + def create_as_initially_ok(cls) -> "Status": + # the service is initially ok when started + initial_state: Status = cls(current=DynamicSidecarStatus.OK, info="") + return initial_state + + +class DockerContainerInspect(BaseModel): + container_state: Annotated[ + ContainerState, Field(..., description="current state of container") + ] + name: str = Field(..., description="docker name of the container") + id: str = Field(..., description="docker id of the container") + + @cached_property + def status(self) -> DockerStatus: + assert self.container_state.status # nosec + result: DockerStatus = self.container_state.status + return result + + @classmethod + def from_container(cls, container: dict[str, Any]) -> "DockerContainerInspect": + return cls( + container_state=ContainerState(**container["State"]), + name=container["Name"], + id=container["Id"], + ) + + model_config = ConfigDict(ignored_types=(cached_property,), frozen=True) + + +class ServiceRemovalState(BaseModel): + can_remove: bool = Field( + default=False, + description="when True, marks the service as ready to be removed", + ) + can_save: bool = Field( + default=False, + description="when True, saves the internal state and upload outputs of the service", + ) + was_removed: bool = Field( + default=False, + description=( + "Will be True when the removal finished. Used primarily " + "to cancel retrying long running operations." + ), + ) + + def mark_to_remove(self, *, can_save: bool) -> None: + self.can_remove = True + self.can_save = can_save + + def mark_removed(self) -> None: + self.can_remove = False + self.was_removed = True + + +class ServicesInstrumentation(BaseModel): + start_requested_at: datetime | None = Field( + None, + description="moment in which the process of starting the service was requested", + ) + close_requested_at: datetime | None = Field( + None, + description="moment in which the process of stopping the service was requested", + ) + + def elapsed_since_start_request(self) -> float | None: + if self.start_requested_at is None: + return None + + return (arrow.utcnow().datetime - self.start_requested_at).total_seconds() + + def elapsed_since_close_request(self) -> float | None: + if self.close_requested_at is None: + return None + return (arrow.utcnow().datetime - self.close_requested_at).total_seconds() + + +class DynamicSidecar(BaseModel): + status: Status = Field( + Status.create_as_initially_ok(), + description="status of the service sidecar also with additional information", + ) + + is_ready: bool = Field( + default=False, + description=( + "is True while the health check on the dynamic-sidecar is responding. " + "Meaning that the dynamic-sidecar is reachable and can accept requests" + ), + ) + + @property + def compose_spec_submitted(self) -> bool: + """ + If the director-v2 is rebooted was_compose_spec_submitted is False + If the compose-spec is submitted it can be safely assumed that the + containers_inspect contains some elements. + """ + return self.was_compose_spec_submitted or len(self.containers_inspect) > 0 + + was_compose_spec_submitted: bool = Field( + default=False, + description="if the docker-compose spec was already submitted this fields is True", + ) + + containers_inspect: list[DockerContainerInspect] = Field( + [], + description="docker inspect results from all the container ran at regular intervals", + ) + + was_dynamic_sidecar_started: bool = False + is_healthy: bool = False + were_containers_created: bool = Field( + default=False, + description=( + "when True no longer will the Docker api " + "be used to check if the services were started" + ), + ) + is_project_network_attached: bool = Field( + default=False, + description=( + "When True, all containers were in running state and project " + "networks were attached. Waiting for the container sto be in " + "running state guarantees all containers have been created" + ), + ) + + is_service_environment_ready: bool = Field( + default=False, + description=( + "True when the environment setup required by the " + "dynamic-sidecars created services was completed." + "Example: nodeports data downloaded, globally " + "shared service data fetched, etc.." + ), + ) + + service_removal_state: ServiceRemovalState = Field( + default_factory=ServiceRemovalState, + description=( + "stores information used during service removal " + "from the dynamic-sidecar scheduler" + ), + ) + + wait_for_manual_intervention_after_error: bool = Field( + default=False, + description=( + "Marks the sidecar as untouchable since there was an error and " + "important data might be lost. awaits for manual intervention." + ), + ) + wait_for_manual_intervention_logged: bool = Field( + default=False, description="True if a relative message was logged" + ) + were_state_and_outputs_saved: bool = Field( + default=False, + description="set True if the dy-sidecar saves the state and uploads the outputs", + ) + + instrumentation: ServicesInstrumentation = Field( + default_factory=lambda: ServicesInstrumentation.model_validate({}), + description="keeps track times for various operations", + ) + + # below had already been validated and + # used only to start the proxy + dynamic_sidecar_id: ServiceId | None = Field( + default=None, + description="returned by the docker engine; used for starting the proxy", + ) + dynamic_sidecar_network_id: NetworkId | None = Field( + default=None, + description="returned by the docker engine; used for starting the proxy", + ) + swarm_network_id: NetworkId | None = Field( + default=None, + description="returned by the docker engine; used for starting the proxy", + ) + swarm_network_name: str | None = Field( + default=None, description="used for starting the proxy" + ) + + docker_node_id: DockerNodeID | None = Field( + default=None, + description=( + "contains node id of the docker node where all services " + "and created containers are started" + ), + ) + + inspect_error_handler: DelayedExceptionHandler = Field( + default=DelayedExceptionHandler(delay_for=0), + description=( + "Set when the dy-sidecar can no longer be reached by the " + "director-v2. If it will be possible to reach the dy-sidecar again, " + "this value will be set to None." + ), + ) + model_config = ConfigDict(validate_assignment=True) + + +class DynamicSidecarNamesHelper(BaseModel): + """ + Service naming schema: + NOTE: name is max 63 characters + dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf + dy-proxy_4dde07ea-73be-4c44-845a-89479d1556cf + + dynamic sidecar structure + 0. a network is created: dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf + 1. a dynamic-sidecar is started: dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf + a traefik instance: dy-proxy_4dde07ea-73be-4c44-845a-89479d1556cf + """ + + service_name_dynamic_sidecar: str = Field( + ..., + pattern=REGEX_DY_SERVICE_SIDECAR, + max_length=MAX_ALLOWED_SERVICE_NAME_LENGTH, + description="unique name of the dynamic-sidecar service", + ) + proxy_service_name: str = Field( + ..., + pattern=REGEX_DY_SERVICE_PROXY, + max_length=MAX_ALLOWED_SERVICE_NAME_LENGTH, + description="name of the proxy for the dynamic-sidecar", + ) + + simcore_traefik_zone: str = Field( + ..., + pattern=REGEX_DY_SERVICE_SIDECAR, + description="unique name for the traefik constraints", + ) + dynamic_sidecar_network_name: str = Field( + ..., + pattern=REGEX_DY_SERVICE_SIDECAR, + description="based on the node_id and project_id", + ) + + @classmethod + def make(cls, node_uuid: UUID) -> "DynamicSidecarNamesHelper": + return cls( + service_name_dynamic_sidecar=assemble_service_name( + DYNAMIC_SIDECAR_SERVICE_PREFIX, node_uuid + ), + proxy_service_name=assemble_service_name( + DYNAMIC_PROXY_SERVICE_PREFIX, node_uuid + ), + simcore_traefik_zone=f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}", + dynamic_sidecar_network_name=f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}", + ) + + +class SchedulerData(CommonServiceDetails, DynamicSidecarServiceLabels): + # TODO: ANE this object is just the context of the dy-sidecar. Should + # be called like so and subcontexts for different handlers should + # also be added. It will make keeping track of env vars more easily + + service_name: ServiceName = Field( + ..., + description="Name of the current dynamic-sidecar being observed", + ) + run_id: ServiceRunID = Field( + default_factory=ServiceRunID.get_resource_tracking_run_id_for_dynamic, + description=( + "Uniquely identify the dynamic sidecar session (a.k.a. 2 " + "subsequent exact same services will have a different run_id)" + ), + ) + hostname: str = Field( + ..., description="dy-sidecar's service hostname (provided by docker-swarm)" + ) + port: PortInt = Field(default=8000, description="dynamic-sidecar port") + + @property + def endpoint(self) -> AnyHttpUrl: + """endpoint where all the services are exposed""" + return AnyHttpUrl.build( # pylint: disable=no-member + scheme="http", host=self.hostname, port=self.port + ) + + dynamic_sidecar: DynamicSidecar = Field( + ..., + description="stores information fetched from the dynamic-sidecar", + ) + + paths_mapping: PathMappingsLabel # overwrites in DynamicSidecarServiceLabels + + user_preferences_path: Path | None = None + callbacks_mapping: Annotated[CallbacksMapping, Field(default_factory=dict)] + + dynamic_sidecar_network_name: str = Field( + ..., + description="overlay network biding the proxy to the container spaned by the dynamic-sidecar", + ) + + simcore_traefik_zone: str = Field( + ..., + description="required for Traefik to correctly route requests to the spawned container", + ) + + service_port: PortInt = Field( + default=TEMPORARY_PORT_NUMBER, + description=( + "port where the service is exposed defined by the service; " + "NOTE: temporary default because it will be changed once the service " + "is started, this value is fetched from the service start spec" + ), + ) + + service_resources: ServiceResourcesDict = Field( + ..., description="service resources used to enforce limits" + ) + + request_dns: str = Field( + ..., description="used when configuring the CORS options on the proxy" + ) + request_scheme: str = Field( + ..., description="used when configuring the CORS options on the proxy" + ) + request_simcore_user_agent: str = Field( + ..., + description="used as label to filter out the metrics from the cAdvisor prometheus metrics", + ) + proxy_service_name: str = Field(description="service name given to the proxy") + proxy_admin_api_port: PortInt | None = Field( + default=None, description="used as the admin endpoint API port" + ) + wallet_info: WalletInfo | None = Field( + default=None, + description="contains information about the wallet used to bill the running service", + ) + pricing_info: PricingInfo | None = Field( + default=None, + description="contains pricing information so we know what is the cost of running of the service", + ) + hardware_info: HardwareInfo | None = Field( + default=None, + description="contains harware information so we know on which hardware to run the service", + ) + + @property + def get_proxy_endpoint(self) -> AnyHttpUrl: + """get the endpoint where the proxy's admin API is exposed""" + assert self.proxy_admin_api_port # nosec + url: AnyHttpUrl = TypeAdapter(AnyHttpUrl).validate_python( + f"http://{self.proxy_service_name}:{self.proxy_admin_api_port}", # nosec # NOSONAR + ) + return url + + product_name: Annotated[ + str | None, + Field( + description="Current product upon which this service is scheduled" + "If set to None, the current product is undefined. Mostly for backwards compatibility", + ), + ] = None + + product_api_base_url: Annotated[ + str | None, + BeforeValidator(lambda v: f"{AnyHttpUrl(v)}"), + Field( + description="Base URL for the current product's API.", + ), + ] = None + + @classmethod + def from_http_request( + # pylint: disable=too-many-arguments + cls, + service: DynamicServiceCreate, + simcore_service_labels: SimcoreServiceLabels, + port: PortInt, + request_dns: str, + request_scheme: str, + request_simcore_user_agent: str, + can_save: bool, + run_id: ServiceRunID | None = None, + ) -> "SchedulerData": + # This constructor method sets current product + names_helper = DynamicSidecarNamesHelper.make(service.node_uuid) + + obj_dict = { + "service_name": names_helper.service_name_dynamic_sidecar, + "hostname": names_helper.service_name_dynamic_sidecar, + "port": port, + "node_uuid": service.node_uuid, + "project_id": service.project_id, + "user_id": service.user_id, + "key": service.key, + "version": service.version, + "service_resources": service.service_resources, + "product_name": service.product_name, + "product_api_base_url": service.product_api_base_url, + "paths_mapping": simcore_service_labels.paths_mapping, + "callbacks_mapping": simcore_service_labels.callbacks_mapping, + "compose_spec": json_dumps(simcore_service_labels.compose_spec), + "container_http_entry": simcore_service_labels.container_http_entry, + "restart_policy": simcore_service_labels.restart_policy, + "dynamic_sidecar_network_name": names_helper.dynamic_sidecar_network_name, + "simcore_traefik_zone": names_helper.simcore_traefik_zone, + "request_dns": request_dns, + "request_scheme": request_scheme, + "user_preferences_path": simcore_service_labels.user_preferences_path, + "proxy_service_name": names_helper.proxy_service_name, + "request_simcore_user_agent": request_simcore_user_agent, + "dynamic_sidecar": {"service_removal_state": {"can_save": can_save}}, + "wallet_info": service.wallet_info, + "pricing_info": service.pricing_info, + "hardware_info": service.hardware_info, + } + if run_id: + obj_dict["run_id"] = run_id + return cls.model_validate(obj_dict) + + @field_validator("user_preferences_path", mode="before") + @classmethod + def strip_path_serialization_to_none(cls, v): + if v == "None": + return None + return v + + @classmethod + def from_service_inspect( + cls, service_inspect: Mapping[str, Any] + ) -> "SchedulerData": + labels = service_inspect["Spec"]["Labels"] + return cls.model_validate_json(labels[DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL]) + + def as_label_data(self) -> str: + # compose_spec needs to be json encoded before encoding it to json + # and storing it in the label + return self.model_copy( + update={"compose_spec": json_dumps(self.compose_spec)}, + deep=True, + ).model_dump_json() + + model_config = ConfigDict(extra="allow", populate_by_name=True) diff --git a/services/director-v2/src/simcore_service_director_v2/models/pricing.py b/services/director-v2/src/simcore_service_director_v2/models/pricing.py new file mode 100644 index 00000000000..43ade424954 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/models/pricing.py @@ -0,0 +1,28 @@ +from decimal import Decimal + +from models_library.resource_tracker import ( + PricingPlanId, + PricingUnitCostId, + PricingUnitId, +) +from pydantic import BaseModel, ConfigDict + + +class PricingInfo(BaseModel): + pricing_plan_id: PricingPlanId + pricing_unit_id: PricingUnitId + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "pricing_plan_id": 1, + "pricing_unit_id": 1, + "pricing_unit_cost_id": 1, + "pricing_unit_cost": Decimal(10), # type: ignore[dict-item] + } + ] + } + ) diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/clusters.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/clusters.py deleted file mode 100644 index c2f240fee5c..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/clusters.py +++ /dev/null @@ -1,203 +0,0 @@ -from typing import Optional - -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - BaseCluster, - Cluster, - ClusterAccessRights, - ClusterAuthentication, - ClusterType, - ExternalClusterAuthentication, -) -from models_library.generics import DictModel -from models_library.users import GroupID -from pydantic import ( - AnyHttpUrl, - BaseModel, - Field, - HttpUrl, - NonNegativeFloat, - root_validator, - validator, -) -from pydantic.networks import AnyUrl -from pydantic.types import ByteSize, PositiveFloat - - -class TaskCounts(BaseModel): - error: int = 0 - memory: int = 0 - executing: int = 0 - - -class WorkerMetrics(BaseModel): - cpu: float = Field(..., description="consumed % of cpus") - memory: ByteSize = Field(..., description="consumed memory") - num_fds: int = Field(..., description="consumed file descriptors") - task_counts: TaskCounts = Field(..., description="task details") - - -AvailableResources = DictModel[str, PositiveFloat] - - -class UsedResources(DictModel[str, NonNegativeFloat]): - @root_validator(pre=True) - @classmethod - def ensure_negative_value_is_zero(cls, values): - # dasks adds/remove resource values and sometimes - # they end up being negative instead of 0 - if v := values.get("__root__", {}): - for res_key, res_value in v.items(): - if res_value < 0: - v[res_key] = 0 - return values - - -class Worker(BaseModel): - id: str - name: str - resources: AvailableResources - used_resources: UsedResources - memory_limit: ByteSize - metrics: WorkerMetrics - - -class WorkersDict(DictModel[AnyUrl, Worker]): - ... - - -class Scheduler(BaseModel): - status: str = Field(..., description="The running status of the scheduler") - workers: Optional[WorkersDict] = Field(default_factory=dict) - - @validator("workers", pre=True, always=True) - @classmethod - def ensure_workers_is_empty_dict(cls, v): - if v is None: - return {} - return v - - -class ClusterDetails(BaseModel): - scheduler: Scheduler = Field( - ..., - description="This contains dask scheduler information given by the underlying dask library", - ) - dashboard_link: AnyUrl = Field( - ..., description="Link to this scheduler's dashboard" - ) - - -class ClusterGet(Cluster): - access_rights: dict[GroupID, ClusterAccessRights] = Field( - alias="accessRights", default_factory=dict - ) - - class Config(Cluster.Config): - allow_population_by_field_name = True - - @root_validator(pre=True) - @classmethod - def ensure_access_rights_converted(cls, values): - if "access_rights" in values: - access_rights = values.pop("access_rights") - values["accessRights"] = access_rights - return values - - -class ClusterDetailsGet(ClusterDetails): - ... - - -class ClusterCreate(BaseCluster): - owner: Optional[GroupID] - authentication: ExternalClusterAuthentication - access_rights: dict[GroupID, ClusterAccessRights] = Field( - alias="accessRights", default_factory=dict - ) - - @validator("thumbnail", always=True, pre=True) - @classmethod - def set_default_thumbnail_if_empty(cls, v, values): - if v is None: - cluster_type = values["type"] - default_thumbnails = { - ClusterType.AWS.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Amazon_Web_Services_Logo.svg/250px-Amazon_Web_Services_Logo.svg.png", - ClusterType.ON_PREMISE.value: "https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Crystal_Clear_app_network_local.png/120px-Crystal_Clear_app_network_local.png", - } - return default_thumbnails[cluster_type] - return v - - class Config(BaseCluster.Config): - schema_extra = { - "examples": [ - { - "name": "My awesome cluster", - "type": ClusterType.ON_PREMISE, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - }, - { - "name": "My AWS cluster", - "description": "a AWS cluster administered by me", - "type": ClusterType.AWS, - "owner": 154, - "endpoint": "https://registry.osparc-development.fake.dev", - "authentication": { - "type": "simple", - "username": "someuser", - "password": "somepassword", - }, - "accessRights": { - 154: CLUSTER_ADMIN_RIGHTS, - 12: CLUSTER_MANAGER_RIGHTS, - 7899: CLUSTER_USER_RIGHTS, - }, - }, - ] - } - - -class ClusterPatch(BaseCluster): - name: Optional[str] - description: Optional[str] - type: Optional[ClusterType] - owner: Optional[GroupID] - thumbnail: Optional[HttpUrl] - endpoint: Optional[AnyUrl] - authentication: Optional[ExternalClusterAuthentication] - access_rights: Optional[dict[GroupID, ClusterAccessRights]] = Field( - alias="accessRights" - ) - - class Config(BaseCluster.Config): - schema_extra = { - "examples": [ - { - "name": "Changing the name of my cluster", - }, - { - "description": "adding a better description", - }, - { - "accessRights": { - 154: CLUSTER_ADMIN_RIGHTS, - 12: CLUSTER_MANAGER_RIGHTS, - 7899: CLUSTER_USER_RIGHTS, - }, - }, - ] - } - - -class ClusterPing(BaseModel): - endpoint: AnyHttpUrl - authentication: ClusterAuthentication = Field( - ..., description="Dask gateway authentication" - ) diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/comp_tasks.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/comp_tasks.py deleted file mode 100644 index 47b2eb7d8d9..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/comp_tasks.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import Optional - -from models_library.clusters import ClusterID -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.projects_pipeline import ComputationTask -from models_library.users import UserID -from pydantic import AnyHttpUrl, AnyUrl, BaseModel, Field, validator - - -class ComputationGet(ComputationTask): - url: AnyHttpUrl = Field( - ..., description="the link where to get the status of the task" - ) - stop_url: Optional[AnyHttpUrl] = Field( - None, description="the link where to stop the task" - ) - - -class ComputationCreate(BaseModel): - user_id: UserID - project_id: ProjectID - start_pipeline: Optional[bool] = Field( - default=False, - description="if True the computation pipeline will start right away", - ) - product_name: str - subgraph: Optional[list[NodeID]] = Field( - default=None, - description="An optional set of nodes that must be executed, if empty the whole pipeline is executed", - ) - force_restart: Optional[bool] = Field( - default=False, description="if True will force re-running all dependent nodes" - ) - cluster_id: Optional[ClusterID] = Field( - default=None, - description="the computation shall use the cluster described by its id, 0 is the default cluster", - ) - - @validator("product_name", always=True) - @classmethod - def ensure_product_name_defined_if_computation_starts(cls, v, values): - if "start_pipeline" in values and values["start_pipeline"] and v is None: - raise ValueError("product_name must be set if computation shall start!") - return v - - -class ComputationStop(BaseModel): - user_id: UserID - - -class ComputationDelete(ComputationStop): - force: Optional[bool] = Field( - False, - description="if True then the pipeline will be removed even if it is running", - ) - - -class TaskLogFileGet(BaseModel): - task_id: NodeID - download_link: Optional[AnyUrl] = Field( - None, description="Presigned link for log file or None if still not available" - ) diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/constants.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/constants.py deleted file mode 100644 index 8a2b7e8e94c..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/constants.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Final - -# dynamic services - -DYNAMIC_SIDECAR_SERVICE_PREFIX: Final[str] = "dy-sidecar" -DYNAMIC_PROXY_SERVICE_PREFIX: Final[str] = "dy-proxy" -DYNAMIC_VOLUME_REMOVER_PREFIX: Final[str] = "dy-volrm" - -# label storing scheduler_data to allow service -# monitoring recovery after director-v2 reboots -DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: Final[str] = "io.simcore.scheduler-data" - -# This matches registries by: -# - local -# - itisfoundation -# - 10.0.0.0:8473 (IP & Port) -DYNAMIC_SIDECAR_DOCKER_IMAGE_RE = ( - r"(^([_a-zA-Z0-9:.-]+)/)?(dynamic-sidecar):([_a-zA-Z0-9.-]+$)" -) - -REGEX_DY_SERVICE_SIDECAR = rf"^{DYNAMIC_SIDECAR_SERVICE_PREFIX}_[a-zA-Z0-9-_]*" -REGEX_DY_SERVICE_PROXY = rf"^{DYNAMIC_PROXY_SERVICE_PREFIX}_[a-zA-Z0-9-_]*" diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py deleted file mode 100644 index db7fb1894e1..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .scheduler import * -from .service import * diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/scheduler.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/scheduler.py deleted file mode 100644 index 1303d2e4021..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/scheduler.py +++ /dev/null @@ -1,472 +0,0 @@ -import json -import logging -import warnings -from enum import Enum -from functools import cached_property -from typing import Any, Mapping, Optional -from uuid import UUID, uuid4 - -from models_library.basic_types import PortInt -from models_library.generated_models.docker_rest_api import ContainerState -from models_library.generated_models.docker_rest_api import Status2 as DockerStatus -from models_library.projects_nodes_io import NodeID -from models_library.service_settings_labels import ( - DynamicSidecarServiceLabels, - PathMappingsLabel, - SimcoreServiceLabels, -) -from models_library.services import RunID -from models_library.services_resources import ServiceResourcesDict -from pydantic import ( - AnyHttpUrl, - BaseModel, - Extra, - Field, - constr, - parse_obj_as, - root_validator, -) -from servicelib.error_codes import ErrorCodeStr -from servicelib.exception_utils import DelayedExceptionHandler - -from ..constants import ( - DYNAMIC_PROXY_SERVICE_PREFIX, - DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL, - DYNAMIC_SIDECAR_SERVICE_PREFIX, - REGEX_DY_SERVICE_PROXY, - REGEX_DY_SERVICE_SIDECAR, -) -from .service import CommonServiceDetails - -TEMPORARY_PORT_NUMBER = 65_534 - -MAX_ALLOWED_SERVICE_NAME_LENGTH: int = 63 - -DockerId = constr(max_length=25, regex=r"[A-Za-z0-9]{25}") -ServiceId = DockerId -NetworkId = DockerId -ServiceName = constr(strip_whitespace=True, min_length=2) - -logger = logging.getLogger() - - -def _strip_service_name(service_name: str) -> str: - """returns: the maximum allowed service name in docker swarm""" - return service_name[:MAX_ALLOWED_SERVICE_NAME_LENGTH] - - -def assemble_service_name(service_prefix: str, node_uuid: NodeID) -> str: - return _strip_service_name("_".join([service_prefix, str(node_uuid)])) - - -class DynamicSidecarStatus(str, Enum): - OK = "ok" # running as expected - FAILING = "failing" # requests to the sidecar API are failing service should be cosnidered as unavailable - - -class Status(BaseModel): - """Generated from data from docker container inspect API""" - - current: DynamicSidecarStatus = Field(..., description="status of the service") - info: str = Field(..., description="additional information for the user") - - def _update(self, new_status: DynamicSidecarStatus, new_info: str) -> None: - self.current = new_status - self.info = new_info - - def update_ok_status(self, info: str) -> None: - self._update(DynamicSidecarStatus.OK, info) - - def update_failing_status( - self, user_msg: str, error_code: Optional[ErrorCodeStr] = None - ) -> None: - next_info = f"{user_msg}" - if error_code: - next_info = f"{user_msg} [{error_code}]" - - self._update(DynamicSidecarStatus.FAILING, next_info) - - def __eq__(self, other: "Status") -> bool: - return self.current == other.current and self.info == other.info - - @classmethod - def create_as_initially_ok(cls) -> "Status": - # the service is initially ok when started - initial_state = cls(current=DynamicSidecarStatus.OK, info="") - return initial_state - - -class DockerContainerInspect(BaseModel): - container_state: ContainerState = Field( - ..., description="current state of container" - ) - name: str = Field(..., description="docker name of the container") - id: str = Field(..., description="docker id of the container") - - @cached_property - def status(self) -> DockerStatus: - assert self.container_state.Status # nosec - return self.container_state.Status - - @classmethod - def from_container(cls, container: dict[str, Any]) -> "DockerContainerInspect": - return cls( - container_state=ContainerState(**container["State"]), - name=container["Name"], - id=container["Id"], - ) - - class Config: - keep_untouched = (cached_property,) - allow_mutation = False - - -class ServiceRemovalState(BaseModel): - can_remove: bool = Field( - False, - description="when True, marks the service as ready to be removed", - ) - can_save: Optional[bool] = Field( - None, - description="when True, saves the internal state and upload outputs of the service", - ) - was_removed: bool = Field( - False, - description=( - "Will be True when the removal finished. Used primarily " - "to cancel retrying long running operations." - ), - ) - - def mark_to_remove(self, can_save: Optional[bool]) -> None: - self.can_remove = True - self.can_save = can_save - - def mark_removed(self) -> None: - self.can_remove = False - self.was_removed = True - - -class DynamicSidecar(BaseModel): - status: Status = Field( - Status.create_as_initially_ok(), - description="status of the service sidecar also with additional information", - ) - - is_ready: bool = Field( - False, - scription=( - "is True while the health check on the dynamic-sidecar is responding. " - "Meaning that the dynamic-sidecar is reachable and can accept requests" - ), - ) - - @property - def compose_spec_submitted(self) -> bool: - """ - If the director-v2 is rebooted was_compose_spec_submitted is False - If the compose-spec is submitted it can be safely assumed that the - containers_inspect contains some elements. - """ - return self.was_compose_spec_submitted or len(self.containers_inspect) > 0 - - was_compose_spec_submitted: bool = Field( - False, - description="if the docker-compose spec was already submitted this fields is True", - ) - - containers_inspect: list[DockerContainerInspect] = Field( - [], - scription="docker inspect results from all the container ran at regular intervals", - ) - - was_dynamic_sidecar_started: bool = False - is_healthy: bool = False - were_containers_created: bool = Field( - False, - description=( - "when True no longer will the Docker api " - "be used to check if the services were started" - ), - ) - is_project_network_attached: bool = Field( - False, - description=( - "When True, all containers were in running state and project " - "networks were attached. Waiting for the container sto be in " - "running state guarantees all containers have been created" - ), - ) - - is_service_environment_ready: bool = Field( - False, - description=( - "True when the environment setup required by the " - "dynamic-sidecars created services was completed." - "Example: nodeports data downloaded, globally " - "shared service data fetched, etc.." - ), - ) - - service_removal_state: ServiceRemovalState = Field( - default_factory=ServiceRemovalState, - description=( - "stores information used during service removal " - "from the dynamic-sidecar scheduler" - ), - ) - - wait_for_manual_intervention_after_error: bool = Field( - False, - description=( - "Marks the sidecar as untouchable since there was an error and " - "important data might be lost. awaits for manual intervention." - ), - ) - were_state_and_outputs_saved: bool = Field( - False, - description="set True if the dy-sidecar saves the state and uploads the outputs", - ) - - # below had already been validated and - # used only to start the proxy - dynamic_sidecar_id: Optional[ServiceId] = Field( - None, description="returned by the docker engine; used for starting the proxy" - ) - dynamic_sidecar_network_id: Optional[NetworkId] = Field( - None, description="returned by the docker engine; used for starting the proxy" - ) - swarm_network_id: Optional[NetworkId] = Field( - None, description="returned by the docker engine; used for starting the proxy" - ) - swarm_network_name: Optional[str] = Field( - None, description="used for starting the proxy" - ) - - docker_node_id: Optional[str] = Field( - None, - description=( - "contains node id of the docker node where all services " - "and created containers are started" - ), - ) - - inspect_error_handler: DelayedExceptionHandler = Field( - DelayedExceptionHandler(delay_for=0), - description=( - "Set when the dy-sidecar can no longer be reached by the " - "director-v2. If it will be possible to reach the dy-sidecar again, " - "this value will be set to None." - ), - ) - - class Config: - validate_assignment = True - - -class DynamicSidecarNamesHelper(BaseModel): - """ - Service naming schema: - NOTE: name is max 63 characters - dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf - dy-proxy_4dde07ea-73be-4c44-845a-89479d1556cf - - dynamic sidecar structure - 0. a network is created: dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf - 1. a dynamic-sidecar is started: dy-sidecar_4dde07ea-73be-4c44-845a-89479d1556cf - a traefik instance: dy-proxy_4dde07ea-73be-4c44-845a-89479d1556cf - """ - - service_name_dynamic_sidecar: str = Field( - ..., - regex=REGEX_DY_SERVICE_SIDECAR, - max_length=MAX_ALLOWED_SERVICE_NAME_LENGTH, - description="unique name of the dynamic-sidecar service", - ) - proxy_service_name: str = Field( - ..., - regex=REGEX_DY_SERVICE_PROXY, - max_length=MAX_ALLOWED_SERVICE_NAME_LENGTH, - description="name of the proxy for the dynamic-sidecar", - ) - - simcore_traefik_zone: str = Field( - ..., - regex=REGEX_DY_SERVICE_SIDECAR, - description="unique name for the traefik constraints", - ) - dynamic_sidecar_network_name: str = Field( - ..., - regex=REGEX_DY_SERVICE_SIDECAR, - description="based on the node_id and project_id", - ) - - @classmethod - def make(cls, node_uuid: UUID) -> "DynamicSidecarNamesHelper": - return cls( - service_name_dynamic_sidecar=assemble_service_name( - DYNAMIC_SIDECAR_SERVICE_PREFIX, node_uuid - ), - proxy_service_name=assemble_service_name( - DYNAMIC_PROXY_SERVICE_PREFIX, node_uuid - ), - simcore_traefik_zone=f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}", - dynamic_sidecar_network_name=f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}", - ) - - -class SchedulerData(CommonServiceDetails, DynamicSidecarServiceLabels): - # TODO: ANE this object is just the context of the dy-sidecar. Should - # be called like so and subcontexts for different handlers should - # also be added. It will make keeping track of env vars more easily - - service_name: ServiceName = Field( - ..., - description="Name of the current dynamic-sidecar being observed", - ) - run_id: RunID = Field( - default_factory=uuid4, - description=( - "Uniquely identify the dynamic sidecar session (a.k.a. 2 " - "subsequent exact same services will have a different run_id)" - ), - ) - hostname: str = Field( - ..., description="dy-sidecar's service hostname (provided by docker-swarm)" - ) - port: PortInt = Field(8000, description="dynamic-sidecar port") - - @property - def endpoint(self) -> AnyHttpUrl: - """endpoint where all the services are exposed""" - return parse_obj_as( - AnyHttpUrl, f"http://{self.hostname}:{self.port}" # NOSONAR - ) - - dynamic_sidecar: DynamicSidecar = Field( - ..., - description="stores information fetched from the dynamic-sidecar", - ) - - paths_mapping: PathMappingsLabel # overwrites in DynamicSidecarServiceLabels - - dynamic_sidecar_network_name: str = Field( - ..., - description="overlay network biding the proxy to the container spaned by the dynamic-sidecar", - ) - - simcore_traefik_zone: str = Field( - ..., - description="required for Traefik to correctly route requests to the spawned container", - ) - - service_port: PortInt = Field( - TEMPORARY_PORT_NUMBER, - description=( - "port where the service is exposed defined by the service; " - "NOTE: temporary default because it will be changed once the service " - "is started, this value is fetched from the service start spec" - ), - ) - - service_resources: ServiceResourcesDict = Field( - ..., description="service resources used to enforce limits" - ) - - request_dns: str = Field( - ..., description="used when configuring the CORS options on the proxy" - ) - request_scheme: str = Field( - ..., description="used when configuring the CORS options on the proxy" - ) - request_simcore_user_agent: str = Field( - ..., - description="used as label to filter out the metrics from the cAdvisor prometheus metrics", - ) - proxy_service_name: str = Field(None, description="service name given to the proxy") - - product_name: str = Field( - None, - description="Current product upon which this service is scheduled. " - "If set to None, the current product is undefined. Mostly for backwards compatibility", - ) - - @root_validator(pre=True) - @classmethod - def _ensure_legacy_format_compatibility(cls, values): - warnings.warn( - ( - "Once https://github.com/ITISFoundation/osparc-simcore/pull/3990 " - "reaches production this entire root_validator function " - "can be safely removed. Please check " - "https://github.com/ITISFoundation/osparc-simcore/issues/3996" - ), - DeprecationWarning, - stacklevel=2, - ) - request_simcore_user_agent: Optional[str] = values.get( - "request_simcore_user_agent" - ) - if not request_simcore_user_agent: - values["request_simcore_user_agent"] = "" - return values - - @classmethod - def from_http_request( - # pylint: disable=too-many-arguments - cls, - service: "DynamicServiceCreate", - simcore_service_labels: SimcoreServiceLabels, - port: PortInt, - request_dns: str, - request_scheme: str, - request_simcore_user_agent: str, - run_id: Optional[UUID] = None, - ) -> "SchedulerData": - # This constructor method sets current product - names_helper = DynamicSidecarNamesHelper.make(service.node_uuid) - - obj_dict = dict( - service_name=names_helper.service_name_dynamic_sidecar, - hostname=names_helper.service_name_dynamic_sidecar, - port=port, - node_uuid=service.node_uuid, - project_id=service.project_id, - user_id=service.user_id, - key=service.key, - version=service.version, - service_resources=service.service_resources, - product_name=service.product_name, - paths_mapping=simcore_service_labels.paths_mapping, - compose_spec=json.dumps(simcore_service_labels.compose_spec), - container_http_entry=simcore_service_labels.container_http_entry, - restart_policy=simcore_service_labels.restart_policy, - dynamic_sidecar_network_name=names_helper.dynamic_sidecar_network_name, - simcore_traefik_zone=names_helper.simcore_traefik_zone, - request_dns=request_dns, - request_scheme=request_scheme, - proxy_service_name=names_helper.proxy_service_name, - request_simcore_user_agent=request_simcore_user_agent, - dynamic_sidecar={}, - ) - if run_id: - obj_dict["run_id"] = run_id - return cls.parse_obj(obj_dict) - - @classmethod - def from_service_inspect( - cls, service_inspect: Mapping[str, Any] - ) -> "SchedulerData": - labels = service_inspect["Spec"]["Labels"] - return cls.parse_raw(labels[DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL]) - - def as_label_data(self) -> str: - # compose_spec needs to be json encoded before encoding it to json - # and storing it in the label - return self.copy( - update={"compose_spec": json.dumps(self.compose_spec)}, deep=True - ).json() - - class Config: - extra = Extra.allow - allow_population_by_field_name = True diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/service.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/service.py deleted file mode 100644 index 9bccb93eaee..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/service.py +++ /dev/null @@ -1,204 +0,0 @@ -from enum import Enum, unique -from functools import cached_property, lru_cache, total_ordering -from pathlib import Path -from typing import Optional - -from models_library.basic_types import PortInt -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.services import DYNAMIC_SERVICE_KEY_RE, VERSION_RE -from models_library.users import UserID -from pydantic import BaseModel, Field - -from ....meta import API_VTAG - - -@unique -class ServiceType(Enum): - """ - Used to filter out services spawned by this service in the stack. - The version was added to distinguish from the ones spawned by director-v0 - These values are attached to the dynamic-sidecar and its relative proxy. - """ - - MAIN = f"main-{API_VTAG}" - DEPENDENCY = f"dependency-{API_VTAG}" - - -class CommonServiceDetails(BaseModel): - key: str = Field( - ..., - description="distinctive name for the node based on the docker registry path", - regex=DYNAMIC_SERVICE_KEY_RE, - examples=[ - "simcore/services/dynamic/3dviewer", - ], - alias="service_key", - ) - version: str = Field( - ..., - description="semantic version number of the node", - regex=VERSION_RE, - examples=["1.0.0", "0.0.1"], - alias="service_version", - ) - - user_id: UserID - project_id: ProjectID - node_uuid: NodeID = Field(..., alias="service_uuid") - - -class ServiceDetails(CommonServiceDetails): - basepath: Path = Field( - default=None, - description="predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint.", - alias="service_basepath", - ) - - class Config: - allow_population_by_field_name = True - schema_extra = { - "example": { - "key": "simcore/services/dynamic/3dviewer", - "version": "2.4.5", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", - } - } - - -@unique -class ServiceBootType(str, Enum): - V0 = "V0" - V2 = "V2" - - -@total_ordering -@unique -class ServiceState(Enum): - PENDING = "pending" - PULLING = "pulling" - STARTING = "starting" - RUNNING = "running" - COMPLETE = "complete" - FAILED = "failed" - STOPPING = "stopping" - - def __lt__(self, other): - if self.__class__ is other.__class__: - comparison_order = ServiceState.comparison_order() - self_index = comparison_order[self] - other_index = comparison_order[other] - return self_index < other_index - return NotImplemented - - @staticmethod - @lru_cache(maxsize=2) - def comparison_order() -> dict["ServiceState", int]: - """States are comparable to supportmin() on a list of ServiceState""" - return { - ServiceState.FAILED: 0, - ServiceState.PENDING: 1, - ServiceState.PULLING: 2, - ServiceState.STARTING: 3, - ServiceState.RUNNING: 4, - ServiceState.STOPPING: 5, - ServiceState.COMPLETE: 6, - } - - -class RunningDynamicServiceDetails(ServiceDetails): - boot_type: ServiceBootType = Field( - default=ServiceBootType.V0, - description=( - "Describes how the dynamic services was started (legacy=V0, modern=V2)." - "Since legacy services do not have this label it defaults to V0." - ), - ) - - host: str = Field( - ..., description="the service swarm internal host name", alias="service_host" - ) - internal_port: PortInt = Field( - ..., description="the service swarm internal port", alias="service_port" - ) - published_port: PortInt = Field( - default=None, - description="the service swarm published port if any", - deprecated=True, - ) - - entry_point: Optional[str] = Field( - default=None, - description="if empty the service entrypoint is on the root endpoint.", - deprecated=True, - ) - state: ServiceState = Field( - ..., description="service current state", alias="service_state" - ) - message: Optional[str] = Field( - default=None, - description="additional information related to service state", - alias="service_message", - ) - - @cached_property - def legacy_service_url(self) -> str: - return f"http://{self.host}:{self.internal_port}{self.basepath}" - - @classmethod - def from_scheduler_data( - cls, - node_uuid: NodeID, - scheduler_data: "SchedulerData", - service_state: ServiceState, - service_message: str, - ) -> "RunningDynamicServiceDetails": - return cls( - boot_type=ServiceBootType.V2, - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - service_uuid=node_uuid, - service_key=scheduler_data.key, - service_version=scheduler_data.version, - service_host=scheduler_data.service_name, - service_port=scheduler_data.service_port, - service_state=service_state.value, - service_message=service_message, - ) - - class Config(ServiceDetails.Config): - keep_untouched = (cached_property,) - schema_extra = { - "examples": [ - { - "boot_type": "V0", - "key": "simcore/services/dynamic/3dviewer", - "version": "2.4.5", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "basepath": "/x/75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "host": "3dviewer_75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "internal_port": 8888, - "state": "running", - "message": "", - "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - }, - { - "boot_type": "V2", - "key": "simcore/services/dynamic/dy-static-file-viewer-dynamic-sidecar", - "version": "1.0.0", - "user_id": 234, - "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "host": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "internal_port": 80, - "state": "running", - "message": "", - "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - }, - ] - } diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/errors.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/errors.py deleted file mode 100644 index 9b04c308f6c..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/errors.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel, Field - - -class Error(BaseModel): - code: Optional[str] = Field(None, description="Server Exception") - - -class ErrorType(BaseModel): - message: str = Field(..., description="Error message") - errors: Optional[List[Error]] = None - status: int = Field(..., description="Error code") - - -class ErrorEnveloped(BaseModel): - error: ErrorType diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/meta.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/meta.py deleted file mode 100644 index 1163bcb2cbe..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/meta.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Dict, Optional - -from models_library.basic_regex import VERSION_RE -from pydantic import BaseModel, Field, constr - -VersionStr = constr(regex=VERSION_RE) - - -class Meta(BaseModel): - name: str - version: VersionStr - released: Optional[Dict[str, VersionStr]] = Field( - None, description="Maps every route's path tag with a released version" - ) - - class Config: - schema_extra = { - "example": { - "name": "simcore_service_foo", - "version": "2.4.45", - "released": {"v1": "1.3.4", "v2": "2.4.45"}, - } - } diff --git a/services/director-v2/src/simcore_service_director_v2/models/schemas/services.py b/services/director-v2/src/simcore_service_director_v2/models/schemas/services.py deleted file mode 100644 index 9d411f6a1d3..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/models/schemas/services.py +++ /dev/null @@ -1,164 +0,0 @@ -from typing import Optional - -from models_library.basic_regex import UUID_RE -from models_library.basic_types import PortInt -from models_library.service_settings_labels import ContainerSpec -from models_library.services import KEY_RE, VERSION_RE, ServiceDockerData -from pydantic import BaseModel, Field, validator -from pydantic.types import ByteSize, NonNegativeInt - -from .dynamic_services import ServiceState - - -class ServiceBuildDetails(BaseModel): - build_date: str - vcs_ref: str - vcs_url: str - - -class NodeRequirements(BaseModel): - cpu: float = Field( - ..., - description="defines the required (maximum) CPU shares for running the services", - alias="CPU", - gt=0.0, - ) - gpu: Optional[NonNegativeInt] = Field( - None, - description="defines the required (maximum) GPU for running the services", - alias="GPU", - ) - ram: ByteSize = Field( - ..., - description="defines the required (maximum) amount of RAM for running the services", - alias="RAM", - ) - vram: Optional[ByteSize] = Field( - default=None, - description="defines the required (maximum) amount of VRAM for running the services", - alias="VRAM", - ) - - @validator("vram", "gpu", always=True, pre=True) - @classmethod - def check_0_is_none(cls, v): - if v == 0: - v = None - return v - - class Config: - schema_extra = { - "examples": [ - {"CPU": 1.0, "RAM": 4194304}, - {"CPU": 1.0, "GPU": 1, "RAM": 4194304}, - { - "CPU": 1.0, - "RAM": 4194304, - }, - ] - } - - -class ServiceExtras(BaseModel): - node_requirements: NodeRequirements - service_build_details: Optional[ServiceBuildDetails] = None - container_spec: Optional[ContainerSpec] = None - - class Config: - schema_extra = { - "examples": [ - {"node_requirements": node_example} - for node_example in NodeRequirements.Config.schema_extra["examples"] - ] - + [ - { - "node_requirements": node_example, - "service_build_details": { - "build_date": "2021-08-13T12:56:28Z", - "vcs_ref": "8251ade", - "vcs_url": "git@github.com:ITISFoundation/osparc-simcore.git", - }, - } - for node_example in NodeRequirements.Config.schema_extra["examples"] - ] - + [ - { - "node_requirements": node_example, - "service_build_details": { - "build_date": "2021-08-13T12:56:28Z", - "vcs_ref": "8251ade", - "vcs_url": "git@github.com:ITISFoundation/osparc-simcore.git", - }, - "container_spec": {"Command": ["run", "subcommand"]}, - } - for node_example in NodeRequirements.Config.schema_extra["examples"] - ] - } - - -class ServiceExtrasEnveloped(BaseModel): - data: ServiceExtras - - -class RunningServiceDetails(BaseModel): - published_port: Optional[PortInt] = Field( - None, - description="The ports where the service provides its interface on the docker swarm", - deprecated=True, - ) - entry_point: str = Field( - ..., - description="The entry point where the service provides its interface", - ) - service_uuid: str = Field( - ..., regex=UUID_RE, description="The node UUID attached to the service" - ) - service_key: str = Field( - ..., - regex=KEY_RE, - description="distinctive name for the node based on the docker registry path", - example=[ - "simcore/services/comp/itis/sleeper", - "simcore/services/dynamic/3dviewer", - ], - ) - service_version: str = Field( - ..., - regex=VERSION_RE, - description="service version number", - example=["1.0.0", "0.0.1"], - ) - service_host: str = Field(..., description="service host name within the network") - service_port: PortInt = Field( - 80, description="port to access the service within the network" - ) - service_basepath: str = Field( - ..., - description="the service base entrypoint where the service serves its contents", - ) - service_state: ServiceState = Field( - ..., - description=( - "the service state" - " * 'pending' - The service is waiting for resources to start" - " * 'pulling' - The service is being pulled from the registry" - " * 'starting' - The service is starting" - " * 'running' - The service is running" - " * 'complete' - The service completed" - " * 'failed' - The service failed to start" - " * 'stopping' - The service is stopping" - ), - ) - service_message: str = Field(..., description="the service message") - - -class RunningServicesDetailsArray(BaseModel): - __root__: list[RunningServiceDetails] - - -class RunningServicesDetailsArrayEnveloped(BaseModel): - data: RunningServicesDetailsArray - - -class ServicesArrayEnveloped(BaseModel): - data: list[ServiceDockerData] diff --git a/services/director-v2/src/simcore_service_director_v2/modules/catalog.py b/services/director-v2/src/simcore_service_director_v2/modules/catalog.py index 2d404bb28ad..d9d4c3e6144 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/catalog.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/catalog.py @@ -5,28 +5,43 @@ import httpx from fastapi import FastAPI, HTTPException, status +from models_library.api_schemas_directorv2.services import ServiceExtras +from models_library.service_settings_labels import SimcoreServiceLabels from models_library.services import ServiceKey, ServiceVersion +from models_library.services_resources import ServiceResourcesDict from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from settings_library.catalog import CatalogSettings +from settings_library.tracing import TracingSettings -from ..core.settings import CatalogSettings from ..utils.client_decorators import handle_errors, handle_retry logger = logging.getLogger(__name__) -def setup(app: FastAPI, settings: CatalogSettings) -> None: - if not settings: - settings = CatalogSettings() +def setup( + app: FastAPI, + catalog_settings: CatalogSettings | None, + tracing_settings: TracingSettings | None, +) -> None: + + if not catalog_settings: + catalog_settings = CatalogSettings() async def on_startup() -> None: + client = httpx.AsyncClient( + base_url=f"{catalog_settings.api_base_url}", + timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, + ) + if tracing_settings: + setup_httpx_client_tracing(client=client) + CatalogClient.create( app, - client=httpx.AsyncClient( - base_url=f"{settings.api_base_url}", - timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, - ), + client=client, ) - logger.debug("created client for catalog: %s", settings.api_base_url) + logger.debug("created client for catalog: %s", catalog_settings.api_base_url) # Here we currently do not ensure the catalog is up on start # This will need to be assessed. @@ -51,6 +66,7 @@ def create(cls, app: FastAPI, **kwargs): @classmethod def instance(cls, app: FastAPI) -> "CatalogClient": + assert isinstance(app.state.catalog_client, CatalogClient) # nosec return app.state.catalog_client @handle_errors("Catalog", logger) @@ -67,26 +83,53 @@ async def get_service( ) -> dict[str, Any]: resp = await self.request( "GET", - f"/services/{urllib.parse.quote( service_key, safe='')}/{service_version}", + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}", params={"user_id": user_id}, headers={"X-Simcore-Products-Name": product_name}, ) resp.raise_for_status() if resp.status_code == status.HTTP_200_OK: - return resp.json() + json_response: dict[str, Any] = resp.json() + return json_response raise HTTPException(status_code=resp.status_code, detail=resp.content) async def get_service_resources( self, user_id: UserID, service_key: ServiceKey, service_version: ServiceVersion - ) -> dict[str, Any]: + ) -> ServiceResourcesDict: resp = await self.request( "GET", - f"/services/{urllib.parse.quote( service_key, safe='')}/{service_version}/resources", + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}/resources", params={"user_id": user_id}, ) resp.raise_for_status() if resp.status_code == status.HTTP_200_OK: - return resp.json() + json_response: ServiceResourcesDict = TypeAdapter( + ServiceResourcesDict + ).validate_python(resp.json()) + return json_response + raise HTTPException(status_code=resp.status_code, detail=resp.content) + + async def get_service_labels( + self, service_key: ServiceKey, service_version: ServiceVersion + ) -> SimcoreServiceLabels: + resp = await self.request( + "GET", + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}/labels", + ) + resp.raise_for_status() + if resp.status_code == status.HTTP_200_OK: + return SimcoreServiceLabels.model_validate(resp.json()) + raise HTTPException(status_code=resp.status_code, detail=resp.content) + + async def get_service_extras( + self, service_key: ServiceKey, service_version: ServiceVersion + ) -> ServiceExtras: + resp = await self.request( + "GET", + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}/extras", + ) + if resp.status_code == status.HTTP_200_OK: + return ServiceExtras.model_validate(resp.json()) raise HTTPException(status_code=resp.status_code, detail=resp.content) async def get_service_specifications( @@ -94,12 +137,13 @@ async def get_service_specifications( ) -> dict[str, Any]: resp = await self.request( "GET", - f"/services/{urllib.parse.quote( service_key, safe='')}/{service_version}/specifications", + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}/specifications", params={"user_id": user_id}, ) resp.raise_for_status() if resp.status_code == status.HTTP_200_OK: - return resp.json() + json_response: dict[str, Any] = resp.json() + return json_response raise HTTPException(status_code=resp.status_code, detail=resp.content) async def is_responsive(self) -> bool: diff --git a/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py new file mode 100644 index 00000000000..01f5586fc35 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/clusters_keeper.py @@ -0,0 +1,56 @@ +import logging + +from models_library.api_schemas_clusters_keeper.clusters import ClusterState +from models_library.clusters import BaseCluster, ClusterTypeInModel +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.rabbitmq import ( + RabbitMQRPCClient, + RemoteMethodNotRegisteredError, + RPCServerError, +) +from servicelib.rabbitmq.rpc_interfaces.clusters_keeper.clusters import ( + get_or_create_cluster, +) +from servicelib.utils_formatting import timedelta_as_minute_second + +from ..core.errors import ( + ClustersKeeperNotAvailableError, + ComputationalBackendOnDemandNotReadyError, +) + +_logger = logging.getLogger(__name__) + + +async def get_or_create_on_demand_cluster( + rabbitmq_rpc_client: RabbitMQRPCClient, + *, + user_id: UserID, + wallet_id: WalletID | None, +) -> BaseCluster: + try: + returned_cluster = await get_or_create_cluster( + rabbitmq_rpc_client, user_id=user_id, wallet_id=wallet_id + ) + _logger.info("received cluster: %s", returned_cluster) + if returned_cluster.state is not ClusterState.RUNNING: + raise ComputationalBackendOnDemandNotReadyError( + eta=timedelta_as_minute_second(returned_cluster.eta) + ) + if not returned_cluster.dask_scheduler_ready: + raise ComputationalBackendOnDemandNotReadyError( + eta=timedelta_as_minute_second(returned_cluster.eta) + ) + + return BaseCluster( + name=f"{user_id=}on-demand-cluster", + type=ClusterTypeInModel.ON_DEMAND, + owner=user_id, + endpoint=returned_cluster.endpoint, + authentication=returned_cluster.authentication, + ) + except RemoteMethodNotRegisteredError as exc: + # no clusters-keeper, that is not going to work! + raise ClustersKeeperNotAvailableError from exc + except RPCServerError as exc: + raise ClustersKeeperNotAvailableError from exc diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py index 2a42e228510..cf3370f4da8 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/__init__.py @@ -1 +1,46 @@ -from .background_task import setup +import logging +from collections.abc import Callable, Coroutine +from typing import Any + +from fastapi import FastAPI +from servicelib.logging_utils import log_context + +from ._constants import MODULE_NAME_SCHEDULER +from ._manager import run_new_pipeline, setup_manager, shutdown_manager, stop_pipeline +from ._worker import setup_worker, shutdown_worker + +_logger = logging.getLogger(__name__) + + +def on_app_startup(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]: + async def start_scheduler() -> None: + with log_context( + _logger, level=logging.INFO, msg=f"starting {MODULE_NAME_SCHEDULER}" + ): + await setup_worker(app) + await setup_manager(app) + + return start_scheduler + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]: + async def stop_scheduler() -> None: + with log_context( + _logger, level=logging.INFO, msg=f"stopping {MODULE_NAME_SCHEDULER}" + ): + await shutdown_manager(app) + await shutdown_worker(app) + + return stop_scheduler + + +def setup(app: FastAPI): + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) + + +__all__: tuple[str, ...] = ( + "setup", + "run_new_pipeline", + "stop_pipeline", +) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py new file mode 100644 index 00000000000..45efe93f0b0 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_constants.py @@ -0,0 +1,7 @@ +import datetime +from typing import Final + +MODULE_NAME_SCHEDULER: Final[str] = "computational-distributed-scheduler" +MODULE_NAME_WORKER: Final[str] = "computational-distributed-worker" +SCHEDULER_INTERVAL: Final[datetime.timedelta] = datetime.timedelta(seconds=5) +MAX_CONCURRENT_PIPELINE_SCHEDULING: Final[int] = 10 diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py new file mode 100644 index 00000000000..bf859070b8a --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_manager.py @@ -0,0 +1,170 @@ +import logging +from typing import Final + +import networkx as nx +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.users import UserID +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.exception_utils import silence_exceptions +from servicelib.logging_utils import log_context +from servicelib.redis import CouldNotAcquireLockError, exclusive +from servicelib.utils import limited_gather +from sqlalchemy.ext.asyncio import AsyncEngine + +from ...models.comp_runs import RunMetadataDict +from ...utils.rabbitmq import publish_project_log +from ..db import get_db_engine +from ..db.repositories.comp_pipelines import CompPipelinesRepository +from ..db.repositories.comp_runs import CompRunsRepository +from ..rabbitmq import get_rabbitmq_client +from ._constants import ( + MAX_CONCURRENT_PIPELINE_SCHEDULING, + MODULE_NAME_SCHEDULER, + SCHEDULER_INTERVAL, +) +from ._publisher import request_pipeline_scheduling +from ._utils import SCHEDULED_STATES, get_redis_client_from_app, get_redis_lock_key + +_logger = logging.getLogger(__name__) + + +async def run_new_pipeline( + app: FastAPI, + *, + user_id: UserID, + project_id: ProjectID, + run_metadata: RunMetadataDict, + use_on_demand_clusters: bool, +) -> None: + """Sets a new pipeline to be scheduled on the computational resources.""" + # ensure the pipeline exists and is populated with something + db_engine = get_db_engine(app) + dag = await _get_pipeline_dag(project_id, db_engine) + if not dag: + _logger.warning( + "project %s has no computational dag defined. not scheduled for a run.", + f"{project_id=}", + ) + return + + new_run = await CompRunsRepository.instance(db_engine).create( + user_id=user_id, + project_id=project_id, + metadata=run_metadata, + use_on_demand_clusters=use_on_demand_clusters, + ) + + rabbitmq_client = get_rabbitmq_client(app) + await request_pipeline_scheduling( + rabbitmq_client, + db_engine, + user_id=new_run.user_id, + project_id=new_run.project_uuid, + iteration=new_run.iteration, + ) + await publish_project_log( + rabbitmq_client, + user_id, + project_id, + log=f"Project pipeline scheduled using {'on-demand clusters' if use_on_demand_clusters else 'pre-defined clusters'}, starting soon...", + log_level=logging.INFO, + ) + + +async def stop_pipeline( + app: FastAPI, + *, + user_id: UserID, + project_id: ProjectID, + iteration: int | None = None, +) -> None: + db_engine = get_db_engine(app) + comp_run = await CompRunsRepository.instance(db_engine).get( + user_id, project_id, iteration + ) + + # mark the scheduled pipeline for stopping + updated_comp_run = await CompRunsRepository.instance( + db_engine + ).mark_for_cancellation( + user_id=user_id, project_id=project_id, iteration=comp_run.iteration + ) + if updated_comp_run: + # ensure the scheduler starts right away + rabbitmq_client = get_rabbitmq_client(app) + await request_pipeline_scheduling( + rabbitmq_client, + db_engine, + user_id=updated_comp_run.user_id, + project_id=updated_comp_run.project_uuid, + iteration=updated_comp_run.iteration, + ) + + +async def _get_pipeline_dag( + project_id: ProjectID, db_engine: AsyncEngine +) -> nx.DiGraph: + comp_pipeline_repo = CompPipelinesRepository.instance(db_engine) + pipeline_at_db = await comp_pipeline_repo.get_pipeline(project_id) + return pipeline_at_db.get_graph() + + +_LOST_TASKS_FACTOR: Final[int] = 10 + + +@exclusive( + get_redis_client_from_app, + lock_key=get_redis_lock_key(MODULE_NAME_SCHEDULER, unique_lock_key_builder=None), +) +async def schedule_all_pipelines(app: FastAPI) -> None: + with log_context(_logger, logging.DEBUG, msg="scheduling pipelines"): + db_engine = get_db_engine(app) + runs_to_schedule = await CompRunsRepository.instance(db_engine).list_( + filter_by_state=SCHEDULED_STATES, + never_scheduled=True, + processed_since=SCHEDULER_INTERVAL, + ) + possibly_lost_scheduled_pipelines = await CompRunsRepository.instance( + db_engine + ).list_( + filter_by_state=SCHEDULED_STATES, + scheduled_since=SCHEDULER_INTERVAL * _LOST_TASKS_FACTOR, + ) + if possibly_lost_scheduled_pipelines: + _logger.error( + "found %d lost pipelines, they will be re-scheduled now", + len(possibly_lost_scheduled_pipelines), + ) + + rabbitmq_client = get_rabbitmq_client(app) + with log_context(_logger, logging.DEBUG, msg="distributing pipelines"): + await limited_gather( + *( + request_pipeline_scheduling( + rabbitmq_client, + db_engine, + user_id=run.user_id, + project_id=run.project_uuid, + iteration=run.iteration, + ) + for run in runs_to_schedule + possibly_lost_scheduled_pipelines + ), + limit=MAX_CONCURRENT_PIPELINE_SCHEDULING, + ) + if runs_to_schedule: + _logger.debug("distributed %d pipelines", len(runs_to_schedule)) + + +async def setup_manager(app: FastAPI) -> None: + app.state.scheduler_manager = create_periodic_task( + silence_exceptions((CouldNotAcquireLockError,))(schedule_all_pipelines), + interval=SCHEDULER_INTERVAL, + task_name=MODULE_NAME_SCHEDULER, + app=app, + ) + + +async def shutdown_manager(app: FastAPI) -> None: + await cancel_wait_task(app.state.scheduler_manager) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py new file mode 100644 index 00000000000..28dca04dc53 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_models.py @@ -0,0 +1,19 @@ +from typing import Literal + +from models_library.projects import ProjectID +from models_library.rabbitmq_messages import RabbitMessageBase +from models_library.users import UserID + +from ...models.comp_runs import Iteration + + +class SchedulePipelineRabbitMessage(RabbitMessageBase): + channel_name: Literal[ + "simcore.services.director-v2.scheduling" + ] = "simcore.services.director-v2.scheduling" + user_id: UserID + project_id: ProjectID + iteration: Iteration + + def routing_key(self) -> str | None: # pylint: disable=no-self-use # abstract + return None diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py new file mode 100644 index 00000000000..42c4b1d7938 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_publisher.py @@ -0,0 +1,32 @@ +from models_library.projects import ProjectID +from models_library.users import UserID +from servicelib.rabbitmq import RabbitMQClient +from sqlalchemy.ext.asyncio import AsyncEngine + +from ...models.comp_runs import Iteration +from ..db.repositories.comp_runs import CompRunsRepository +from ._models import SchedulePipelineRabbitMessage + + +async def request_pipeline_scheduling( + rabbitmq_client: RabbitMQClient, + db_engine: AsyncEngine, + *, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, +) -> None: + # NOTE: we should use the transaction and the asyncpg engine here to ensure 100% consistency + # https://github.com/ITISFoundation/osparc-simcore/issues/6818 + # async with transaction_context(get_asyncpg_engine(app)) as connection: + await rabbitmq_client.publish( + SchedulePipelineRabbitMessage.get_channel_name(), + SchedulePipelineRabbitMessage( + user_id=user_id, + project_id=project_id, + iteration=iteration, + ), + ) + await CompRunsRepository.instance(db_engine).mark_for_scheduling( + user_id=user_id, project_id=project_id, iteration=iteration + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py new file mode 100644 index 00000000000..8ee9733a953 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_base.py @@ -0,0 +1,892 @@ +"""The scheduler shall be run as a background task. +Based on oSparc pipelines, it monitors when to start the next worker task(s), either one at a time or as a group of tasks. + +In principle the Scheduler maintains the comp_runs table in the database. +It contains how the pipeline was run and by whom. +It also contains the final result of the pipeline run. + +When a pipeline is scheduled first all the tasks contained in the DAG are set to PUBLISHED state. +Once the scheduler determines a task shall run, its state is set to PENDING, so that the sidecar can pick up the task. +The sidecar will then change the state to STARTED, then to SUCCESS or FAILED. + +""" + +import asyncio +import datetime +import logging +from abc import ABC, abstractmethod +from collections.abc import Callable +from dataclasses import dataclass +from typing import Final + +import arrow +import networkx as nx +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, NodeIDStr +from models_library.projects_state import RunningState +from models_library.services import ServiceType +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from networkx.classes.reportviews import InDegreeView +from servicelib.common_headers import UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE +from servicelib.logging_utils import log_catch, log_context +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient +from servicelib.redis import RedisClientSDK +from sqlalchemy.ext.asyncio import AsyncEngine + +from ...constants import UNDEFINED_STR_METADATA +from ...core.errors import ( + ClustersKeeperNotAvailableError, + ComputationalBackendNotConnectedError, + ComputationalBackendOnDemandNotReadyError, + ComputationalSchedulerChangedError, + DaskClientAcquisisitonError, + InvalidPipelineError, + PipelineNotFoundError, + TaskSchedulingError, +) +from ...core.settings import ComputationalBackendSettings +from ...models.comp_pipelines import CompPipelineAtDB +from ...models.comp_runs import CompRunsAtDB, Iteration, RunMetadataDict +from ...models.comp_tasks import CompTaskAtDB +from ...utils.computations import get_pipeline_state_from_task_states +from ...utils.rabbitmq import ( + publish_project_log, + publish_service_resource_tracking_heartbeat, + publish_service_resource_tracking_started, + publish_service_started_metrics, +) +from ..db.repositories.comp_pipelines import CompPipelinesRepository +from ..db.repositories.comp_runs import CompRunsRepository +from ..db.repositories.comp_tasks import CompTasksRepository +from ._publisher import request_pipeline_scheduling +from ._utils import ( + COMPLETED_STATES, + PROCESSING_STATES, + RUNNING_STATES, + TASK_TO_START_STATES, + WAITING_FOR_START_STATES, + create_service_resources_from_task, +) + +_logger = logging.getLogger(__name__) + + +_Previous = CompTaskAtDB +_Current = CompTaskAtDB +_MAX_WAITING_FOR_CLUSTER_TIMEOUT_IN_MIN: Final[int] = 10 + + +def _auto_schedule_callback( + loop: asyncio.AbstractEventLoop, + db_engine: AsyncEngine, + rabbit_mq_client: RabbitMQClient, + *, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, +) -> Callable[[], None]: + """this function is called via Dask-backend from a separate thread. + Therefore the need to use run_coroutine_threadsafe to request a new + pipeline scheduling""" + + def _cb() -> None: + async def _async_cb() -> None: + await request_pipeline_scheduling( + rabbit_mq_client, + db_engine, + user_id=user_id, + project_id=project_id, + iteration=iteration, + ) + + future = asyncio.run_coroutine_threadsafe(_async_cb(), loop) + with log_catch(_logger, reraise=False): + future.result(timeout=10) + + return _cb + + +@dataclass(frozen=True, slots=True) +class SortedTasks: + started: list[CompTaskAtDB] + completed: list[CompTaskAtDB] + waiting: list[CompTaskAtDB] + potentially_lost: list[CompTaskAtDB] + + +_MAX_WAITING_TIME_FOR_UNKNOWN_TASKS: Final[datetime.timedelta] = datetime.timedelta( + seconds=30 +) + + +async def _triage_changed_tasks( + changed_tasks: list[tuple[_Previous, _Current]], +) -> SortedTasks: + started_tasks = [ + current + for previous, current in changed_tasks + if current.state in RUNNING_STATES + or ( + previous.state in WAITING_FOR_START_STATES + and current.state in COMPLETED_STATES + ) + ] + + completed_tasks = [ + current for _, current in changed_tasks if current.state in COMPLETED_STATES + ] + + waiting_for_resources_tasks = [ + current + for previous, current in changed_tasks + if current.state in WAITING_FOR_START_STATES + ] + + lost_tasks = [ + current + for previous, current in changed_tasks + if (current.state is RunningState.UNKNOWN) + and ( + (arrow.utcnow().datetime - previous.modified) + > _MAX_WAITING_TIME_FOR_UNKNOWN_TASKS + ) + ] + if lost_tasks: + _logger.warning( + "%s are currently in unknown state. TIP: If they are running in an external cluster and it is not yet ready, that might explain it. But inform @sanderegg nevertheless!", + [t.node_id for t in lost_tasks], + ) + + return SortedTasks( + started_tasks, + completed_tasks, + waiting_for_resources_tasks, + lost_tasks, + ) + + +@dataclass +class BaseCompScheduler(ABC): + db_engine: AsyncEngine + rabbitmq_client: RabbitMQClient + rabbitmq_rpc_client: RabbitMQRPCClient + settings: ComputationalBackendSettings + service_runtime_heartbeat_interval: datetime.timedelta + redis_client: RedisClientSDK + + async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph: + comp_pipeline_repo = CompPipelinesRepository.instance(self.db_engine) + pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline( + project_id + ) + dag = pipeline_at_db.get_graph() + _logger.debug("%s: current %s", f"{project_id=}", f"{dag=}") + return dag + + async def _get_pipeline_tasks( + self, project_id: ProjectID, pipeline_dag: nx.DiGraph + ) -> dict[NodeIDStr, CompTaskAtDB]: + comp_tasks_repo = CompTasksRepository.instance(self.db_engine) + pipeline_comp_tasks: dict[NodeIDStr, CompTaskAtDB] = { + f"{t.node_id}": t + for t in await comp_tasks_repo.list_computational_tasks(project_id) + if (f"{t.node_id}" in list(pipeline_dag.nodes())) + } + if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()): # type: ignore[arg-type] + msg = ( + f"The tasks defined for {project_id} do not contain all" + f" the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check." + ) + raise InvalidPipelineError(pipeline_id=project_id, msg=msg) + return pipeline_comp_tasks + + async def _update_run_result_from_tasks( + self, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + pipeline_tasks: dict[NodeIDStr, CompTaskAtDB], + ) -> RunningState: + pipeline_state_from_tasks = get_pipeline_state_from_task_states( + list(pipeline_tasks.values()), + ) + _logger.debug( + "pipeline %s is currently in %s", + f"{user_id=}_{project_id=}_{iteration=}", + f"{pipeline_state_from_tasks}", + ) + await self._set_run_result( + user_id, project_id, iteration, pipeline_state_from_tasks + ) + return pipeline_state_from_tasks + + async def _set_run_result( + self, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + run_result: RunningState, + ) -> None: + comp_runs_repo = CompRunsRepository.instance(self.db_engine) + await comp_runs_repo.set_run_result( + user_id=user_id, + project_id=project_id, + iteration=iteration, + result_state=run_result, + final_state=(run_result in COMPLETED_STATES), + ) + + async def _set_schedule_done( + self, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + ) -> None: + await CompRunsRepository.instance(self.db_engine).mark_as_processed( + user_id=user_id, + project_id=project_id, + iteration=iteration, + ) + + async def _set_states_following_failed_to_aborted( + self, project_id: ProjectID, dag: nx.DiGraph + ) -> dict[NodeIDStr, CompTaskAtDB]: + tasks = await self._get_pipeline_tasks(project_id, dag) + # Perform a reverse topological sort to ensure tasks are ordered from last to first + sorted_node_ids = list(reversed(list(nx.topological_sort(dag)))) + tasks = { + node_id: tasks[node_id] for node_id in sorted_node_ids if node_id in tasks + } + # we need the tasks ordered from the last task to the first + node_ids_to_set_as_aborted: set[NodeIDStr] = set() + for task in tasks.values(): + if task.state == RunningState.FAILED: + node_ids_to_set_as_aborted.update(nx.bfs_tree(dag, f"{task.node_id}")) + node_ids_to_set_as_aborted.remove(f"{task.node_id}") + for node_id in node_ids_to_set_as_aborted: + tasks[f"{node_id}"].state = RunningState.ABORTED + if node_ids_to_set_as_aborted: + # update the current states back in DB + comp_tasks_repo = CompTasksRepository.instance(self.db_engine) + await comp_tasks_repo.update_project_tasks_state( + project_id, + [NodeID(n) for n in node_ids_to_set_as_aborted], + RunningState.ABORTED, + optional_progress=1.0, + optional_stopped=arrow.utcnow().datetime, + ) + return tasks + + async def _send_running_tasks_heartbeat( + self, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + dag: nx.DiGraph, + ) -> None: + utc_now = arrow.utcnow().datetime + + def _need_heartbeat(task: CompTaskAtDB) -> bool: + if task.state not in RUNNING_STATES: + return False + if task.last_heartbeat is None: + assert task.start # nosec + return bool( + (utc_now - task.start.replace(tzinfo=datetime.UTC)) + > self.service_runtime_heartbeat_interval + ) + return bool( + (utc_now - task.last_heartbeat) + > self.service_runtime_heartbeat_interval + ) + + tasks: dict[NodeIDStr, CompTaskAtDB] = await self._get_pipeline_tasks( + project_id, dag + ) + if running_tasks := [t for t in tasks.values() if _need_heartbeat(t)]: + await asyncio.gather( + *( + publish_service_resource_tracking_heartbeat( + self.rabbitmq_client, + ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, t.project_id, t.node_id, iteration + ), + ) + for t in running_tasks + ) + ) + comp_tasks_repo = CompTasksRepository(self.db_engine) + await asyncio.gather( + *( + comp_tasks_repo.update_project_task_last_heartbeat( + t.project_id, t.node_id, utc_now + ) + for t in running_tasks + ) + ) + + async def _get_changed_tasks_from_backend( + self, + user_id: UserID, + processing_tasks: list[CompTaskAtDB], + comp_run: CompRunsAtDB, + ) -> tuple[list[tuple[_Previous, _Current]], list[CompTaskAtDB]]: + tasks_backend_status = await self._get_tasks_status( + user_id, processing_tasks, comp_run + ) + + return ( + [ + ( + task, + task.model_copy(update={"state": backend_state}), + ) + for task, backend_state in zip( + processing_tasks, tasks_backend_status, strict=True + ) + if task.state is not backend_state + ], + [ + task + for task, backend_state in zip( + processing_tasks, tasks_backend_status, strict=True + ) + if task.state is backend_state is RunningState.STARTED + ], + ) + + async def _process_started_tasks( + self, + tasks: list[CompTaskAtDB], + *, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + run_metadata: RunMetadataDict, + ) -> None: + utc_now = arrow.utcnow().datetime + + # resource tracking + await asyncio.gather( + *( + publish_service_resource_tracking_started( + self.rabbitmq_client, + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, t.project_id, t.node_id, iteration + ), + wallet_id=run_metadata.get("wallet_id"), + wallet_name=run_metadata.get("wallet_name"), + pricing_plan_id=( + t.pricing_info.get("pricing_plan_id") + if t.pricing_info + else None + ), + pricing_unit_id=( + t.pricing_info.get("pricing_unit_id") + if t.pricing_info + else None + ), + pricing_unit_cost_id=( + t.pricing_info.get("pricing_unit_cost_id") + if t.pricing_info + else None + ), + product_name=run_metadata.get( + "product_name", UNDEFINED_STR_METADATA + ), + simcore_user_agent=run_metadata.get( + "simcore_user_agent", UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE + ), + user_id=user_id, + user_email=run_metadata.get("user_email", UNDEFINED_STR_METADATA), + project_id=t.project_id, + project_name=run_metadata.get("project_metadata", {}).get( # type: ignore[arg-type] + "project_name", UNDEFINED_STR_METADATA + ), + node_id=t.node_id, + node_name=run_metadata.get("node_id_names_map", {}).get( + t.node_id, UNDEFINED_STR_METADATA + ), + parent_project_id=run_metadata.get("project_metadata", {}).get( + "parent_project_id" + ), + parent_node_id=run_metadata.get("project_metadata", {}).get( + "parent_node_id" + ), + root_parent_project_id=run_metadata.get("project_metadata", {}).get( + "root_parent_project_id" + ), + root_parent_project_name=run_metadata.get( + "project_metadata", {} + ).get("root_parent_project_name"), + root_parent_node_id=run_metadata.get("project_metadata", {}).get( + "root_parent_node_id" + ), + service_key=t.image.name, + service_version=t.image.tag, + service_type=ServiceType.COMPUTATIONAL, + service_resources=create_service_resources_from_task(t), + service_additional_metadata={}, + ) + for t in tasks + ) + ) + # instrumentation + await asyncio.gather( + *( + publish_service_started_metrics( + self.rabbitmq_client, + user_id=user_id, + simcore_user_agent=run_metadata.get( + "simcore_user_agent", UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE + ), + task=t, + ) + for t in tasks + ) + ) + + # update DB + comp_tasks_repo = CompTasksRepository(self.db_engine) + await asyncio.gather( + *( + comp_tasks_repo.update_project_tasks_state( + t.project_id, + [t.node_id], + t.state, + optional_started=utc_now, + optional_progress=t.progress, + ) + for t in tasks + ) + ) + await CompRunsRepository.instance(self.db_engine).mark_as_started( + user_id=user_id, + project_id=project_id, + iteration=iteration, + started_time=utc_now, + ) + + async def _process_waiting_tasks(self, tasks: list[CompTaskAtDB]) -> None: + comp_tasks_repo = CompTasksRepository(self.db_engine) + await asyncio.gather( + *( + comp_tasks_repo.update_project_tasks_state( + t.project_id, + [t.node_id], + t.state, + ) + for t in tasks + ) + ) + + async def _update_states_from_comp_backend( + self, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + pipeline_dag: nx.DiGraph, + comp_run: CompRunsAtDB, + ) -> None: + tasks = await self._get_pipeline_tasks(project_id, pipeline_dag) + tasks_inprocess = [t for t in tasks.values() if t.state in PROCESSING_STATES] + if not tasks_inprocess: + return + + # get the tasks which state actually changed since last check + ( + tasks_with_changed_states, + executing_tasks, + ) = await self._get_changed_tasks_from_backend( + user_id, tasks_inprocess, comp_run + ) + # NOTE: typical states a task goes through + # NOT_STARTED (initial state) -> PUBLISHED (user press run/API call) -> PENDING -> WAITING_FOR_CLUSTER (cluster creation) -> + # PENDING -> WAITING_FOR_RESOURCES (workers creation or missing) -> PENDING -> STARTED (worker started processing the task) -> SUCCESS/FAILED + # or ABORTED (user cancelled) or UNKNOWN (lost task - it might be transient, be careful with this one) + sorted_tasks = await _triage_changed_tasks(tasks_with_changed_states) + _logger.debug("found the following %s tasks with changed states", sorted_tasks) + # now process the tasks + if sorted_tasks.started: + # NOTE: the dask-scheduler cannot differentiate between tasks that are effectively computing and + # tasks that are only queued and accepted by a dask-worker. We use dask plugins to report on tasks states + # states are published to log_event, and we directly publish into RabbitMQ the sidecar and services logs. + # tasks_started should therefore be mostly empty but for cases where + # - dask log_event/subscribe_topic mechanism failed, the tasks goes from PENDING -> SUCCESS/FAILED/ABORTED without STARTED + # - the task finished so fast that the STARTED state was skipped between 2 runs of the dv-2 comp scheduler + await self._process_started_tasks( + sorted_tasks.started, + user_id=user_id, + project_id=project_id, + iteration=iteration, + run_metadata=comp_run.metadata, + ) + + if sorted_tasks.completed or sorted_tasks.potentially_lost: + await self._process_completed_tasks( + user_id, + sorted_tasks.completed + sorted_tasks.potentially_lost, + iteration, + comp_run=comp_run, + ) + + if sorted_tasks.waiting: + await self._process_waiting_tasks(sorted_tasks.waiting) + + if executing_tasks: + await self._process_executing_tasks(user_id, executing_tasks, comp_run) + + @abstractmethod + async def _start_tasks( + self, + *, + user_id: UserID, + project_id: ProjectID, + scheduled_tasks: dict[NodeID, CompTaskAtDB], + comp_run: CompRunsAtDB, + wake_up_callback: Callable[[], None], + ) -> None: + """start tasks in the 3rd party backend""" + + @abstractmethod + async def _get_tasks_status( + self, user_id: UserID, tasks: list[CompTaskAtDB], comp_run: CompRunsAtDB + ) -> list[RunningState]: + """returns tasks status from the 3rd party backend""" + + @abstractmethod + async def _stop_tasks( + self, user_id: UserID, tasks: list[CompTaskAtDB], comp_run: CompRunsAtDB + ) -> None: + """stop tasks in the 3rd party backend""" + + @abstractmethod + async def _process_completed_tasks( + self, + user_id: UserID, + tasks: list[CompTaskAtDB], + iteration: Iteration, + comp_run: CompRunsAtDB, + ) -> None: + """process tasks from the 3rd party backend""" + + @abstractmethod + async def _process_executing_tasks( + self, + user_id: UserID, + tasks: list[CompTaskAtDB], + comp_run: CompRunsAtDB, + ) -> None: + """process executing tasks from the 3rd party backend""" + + async def apply( + self, + *, + user_id: UserID, + project_id: ProjectID, + iteration: Iteration, + ) -> None: + """apply the scheduling of a pipeline for a given user, project and iteration.""" + with log_context( + _logger, + level=logging.INFO, + msg=f"scheduling pipeline {user_id=}:{project_id=}:{iteration=}", + ): + dag: nx.DiGraph = nx.DiGraph() + try: + comp_run = await CompRunsRepository.instance(self.db_engine).get( + user_id, project_id, iteration + ) + dag = await self._get_pipeline_dag(project_id) + # 1. Update our list of tasks with data from backend (state, results) + await self._update_states_from_comp_backend( + user_id, project_id, iteration, dag, comp_run + ) + # 2. Any task following a FAILED task shall be ABORTED + comp_tasks = await self._set_states_following_failed_to_aborted( + project_id, dag + ) + # 3. do we want to stop the pipeline now? + if comp_run.cancelled: + await self._schedule_tasks_to_stop( + user_id, project_id, comp_tasks, comp_run + ) + else: + # let's get the tasks to schedule then + comp_tasks = await self._schedule_tasks_to_start( + user_id=user_id, + project_id=project_id, + comp_tasks=comp_tasks, + dag=dag, + comp_run=comp_run, + wake_up_callback=_auto_schedule_callback( + asyncio.get_running_loop(), + self.db_engine, + self.rabbitmq_client, + user_id=user_id, + project_id=project_id, + iteration=iteration, + ), + ) + # 4. timeout if waiting for cluster has been there for more than X minutes + comp_tasks = await self._timeout_if_waiting_for_cluster_too_long( + user_id, project_id, comp_tasks + ) + # 5. send a heartbeat + await self._send_running_tasks_heartbeat( + user_id, project_id, iteration, dag + ) + + # 6. Update the run result + pipeline_result = await self._update_run_result_from_tasks( + user_id, project_id, iteration, comp_tasks + ) + + # 7. Are we done scheduling that pipeline? + if not dag.nodes() or pipeline_result in COMPLETED_STATES: + # there is nothing left, the run is completed, we're done here + _logger.info( + "pipeline %s scheduling completed with result %s", + f"{project_id=}", + f"{pipeline_result=}", + ) + except PipelineNotFoundError: + _logger.warning( + "pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler", + f"{project_id=}", + ) + await self._set_run_result( + user_id, project_id, iteration, RunningState.ABORTED + ) + except InvalidPipelineError as exc: + _logger.warning( + "pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s", + f"{project_id=}", + exc, + ) + await self._set_run_result( + user_id, project_id, iteration, RunningState.ABORTED + ) + except (DaskClientAcquisisitonError, ClustersKeeperNotAvailableError): + _logger.exception( + "Unexpected error while connecting with computational backend, aborting pipeline" + ) + tasks: dict[NodeIDStr, CompTaskAtDB] = await self._get_pipeline_tasks( + project_id, dag + ) + comp_tasks_repo = CompTasksRepository(self.db_engine) + await comp_tasks_repo.update_project_tasks_state( + project_id, + [t.node_id for t in tasks.values()], + RunningState.FAILED, + ) + await self._set_run_result( + user_id, project_id, iteration, RunningState.FAILED + ) + except ComputationalBackendNotConnectedError: + _logger.exception("Computational backend is not connected!") + finally: + await self._set_schedule_done(user_id, project_id, iteration) + + async def _schedule_tasks_to_stop( + self, + user_id: UserID, + project_id: ProjectID, + comp_tasks: dict[NodeIDStr, CompTaskAtDB], + comp_run: CompRunsAtDB, + ) -> None: + # get any running task and stop them + comp_tasks_repo = CompTasksRepository.instance(self.db_engine) + await ( + comp_tasks_repo.mark_project_published_waiting_for_cluster_tasks_as_aborted( + project_id + ) + ) + # stop any remaining running task, these are already submitted + if tasks_to_stop := [ + t for t in comp_tasks.values() if t.state in PROCESSING_STATES + ]: + await self._stop_tasks(user_id, tasks_to_stop, comp_run) + + async def _schedule_tasks_to_start( # noqa: C901 + self, + user_id: UserID, + project_id: ProjectID, + comp_tasks: dict[NodeIDStr, CompTaskAtDB], + dag: nx.DiGraph, + comp_run: CompRunsAtDB, + wake_up_callback: Callable[[], None], + ) -> dict[NodeIDStr, CompTaskAtDB]: + # filter out the successfully completed tasks + dag.remove_nodes_from( + { + node_id + for node_id, t in comp_tasks.items() + if t.state == RunningState.SUCCESS + } + ) + dag_in_degree = dag.in_degree() + assert isinstance(dag_in_degree, InDegreeView) # nosec + next_task_node_ids = [ + node_id for node_id, degree in dag_in_degree if degree == 0 + ] + + # get the tasks to start + tasks_ready_to_start: dict[NodeID, CompTaskAtDB] = { + node_id: comp_tasks[f"{node_id}"] + for node_id in next_task_node_ids + if comp_tasks[f"{node_id}"].state in TASK_TO_START_STATES + } + + if not tasks_ready_to_start: + # nothing to do + return comp_tasks + + try: + await self._start_tasks( + user_id=user_id, + project_id=project_id, + scheduled_tasks=tasks_ready_to_start, + comp_run=comp_run, + wake_up_callback=wake_up_callback, + ) + except ( + ComputationalBackendNotConnectedError, + ComputationalSchedulerChangedError, + ): + _logger.exception( + "Issue with computational backend. Tasks are set back " + "to WAITING_FOR_CLUSTER state until scheduler comes back!", + ) + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + list(tasks_ready_to_start.keys()), + RunningState.WAITING_FOR_CLUSTER, + ) + for task in tasks_ready_to_start: + comp_tasks[f"{task}"].state = RunningState.WAITING_FOR_CLUSTER + + except ComputationalBackendOnDemandNotReadyError as exc: + _logger.info( + "The on demand computational backend is not ready yet: %s", exc + ) + await publish_project_log( + self.rabbitmq_client, + user_id, + project_id, + log=f"{exc}", + log_level=logging.INFO, + ) + + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + list(tasks_ready_to_start.keys()), + RunningState.WAITING_FOR_CLUSTER, + ) + for task in tasks_ready_to_start: + comp_tasks[f"{task}"].state = RunningState.WAITING_FOR_CLUSTER + except ClustersKeeperNotAvailableError: + _logger.exception("Unexpected error while starting tasks:") + await publish_project_log( + self.rabbitmq_client, + user_id, + project_id, + log="Unexpected error while scheduling computational tasks! TIP: contact osparc support.", + log_level=logging.ERROR, + ) + + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + list(tasks_ready_to_start.keys()), + RunningState.FAILED, + optional_progress=1.0, + optional_stopped=arrow.utcnow().datetime, + ) + for task in tasks_ready_to_start: + comp_tasks[f"{task}"].state = RunningState.FAILED + raise + except TaskSchedulingError as exc: + _logger.exception( + "Project '%s''s task '%s' could not be scheduled", + exc.project_id, + exc.node_id, + ) + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + [exc.node_id], + RunningState.FAILED, + exc.get_errors(), + optional_progress=1.0, + optional_stopped=arrow.utcnow().datetime, + ) + comp_tasks[f"{exc.node_id}"].state = RunningState.FAILED + except Exception: + _logger.exception( + "Unexpected error for %s with %s on %s happened when scheduling %s:", + f"{comp_run.user_id=}", + f"{comp_run.project_uuid=}", + f"{comp_run.use_on_demand_clusters=}", + f"{tasks_ready_to_start.keys()=}", + ) + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + list(tasks_ready_to_start.keys()), + RunningState.FAILED, + optional_progress=1.0, + optional_stopped=arrow.utcnow().datetime, + ) + for task in tasks_ready_to_start: + comp_tasks[f"{task}"].state = RunningState.FAILED + raise + + return comp_tasks + + async def _timeout_if_waiting_for_cluster_too_long( + self, + user_id: UserID, + project_id: ProjectID, + comp_tasks: dict[NodeIDStr, CompTaskAtDB], + ) -> dict[NodeIDStr, CompTaskAtDB]: + if all( + c.state is RunningState.WAITING_FOR_CLUSTER for c in comp_tasks.values() + ): + # get latest modified task + latest_modified_of_all_tasks = max( + comp_tasks.values(), key=lambda task: task.modified + ).modified + + if ( + arrow.utcnow().datetime - latest_modified_of_all_tasks + ) > datetime.timedelta(minutes=_MAX_WAITING_FOR_CLUSTER_TIMEOUT_IN_MIN): + await CompTasksRepository.instance( + self.db_engine + ).update_project_tasks_state( + project_id, + [NodeID(idstr) for idstr in comp_tasks], + RunningState.FAILED, + optional_progress=1.0, + optional_stopped=arrow.utcnow().datetime, + ) + for task in comp_tasks.values(): + task.state = RunningState.FAILED + msg = "Timed-out waiting for computational cluster! Please try again and/or contact Osparc support." + _logger.error(msg) + await publish_project_log( + self.rabbitmq_client, + user_id, + project_id, + log=msg, + log_level=logging.ERROR, + ) + return comp_tasks diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py new file mode 100644 index 00000000000..7f1d4747275 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_dask.py @@ -0,0 +1,408 @@ +import asyncio +import contextlib +import logging +from collections.abc import AsyncIterator, Callable +from contextlib import asynccontextmanager +from dataclasses import dataclass +from typing import Any + +import arrow +from dask_task_models_library.container_tasks.errors import TaskCancelledError +from dask_task_models_library.container_tasks.events import ( + TaskProgressEvent, +) +from dask_task_models_library.container_tasks.io import TaskOutputData +from dask_task_models_library.container_tasks.utils import parse_dask_job_id +from models_library.clusters import BaseCluster +from models_library.errors import ErrorDict +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.rabbitmq_messages import SimcorePlatformStatus +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from servicelib.common_headers import UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE +from servicelib.logging_utils import log_catch + +from ...core.errors import ( + ComputationalBackendNotConnectedError, + ComputationalBackendOnDemandNotReadyError, + TaskSchedulingError, +) +from ...models.comp_runs import CompRunsAtDB, Iteration, RunMetadataDict +from ...models.comp_tasks import CompTaskAtDB +from ...utils.dask import ( + clean_task_output_and_log_files_if_invalid, + parse_output_data, +) +from ...utils.dask_client_utils import TaskHandlers, UnixTimestamp +from ...utils.rabbitmq import ( + publish_service_progress, + publish_service_resource_tracking_stopped, + publish_service_stopped_metrics, +) +from ..clusters_keeper import get_or_create_on_demand_cluster +from ..dask_client import DaskClient, PublishedComputationTask +from ..dask_clients_pool import DaskClientsPool +from ..db.repositories.comp_runs import ( + CompRunsRepository, +) +from ..db.repositories.comp_tasks import CompTasksRepository +from ._scheduler_base import BaseCompScheduler +from ._utils import ( + WAITING_FOR_START_STATES, +) + +_logger = logging.getLogger(__name__) + + +@asynccontextmanager +async def _cluster_dask_client( + user_id: UserID, + scheduler: "DaskScheduler", + *, + use_on_demand_clusters: bool, + run_metadata: RunMetadataDict, +) -> AsyncIterator[DaskClient]: + cluster: BaseCluster = scheduler.settings.default_cluster + if use_on_demand_clusters: + cluster = await get_or_create_on_demand_cluster( + scheduler.rabbitmq_rpc_client, + user_id=user_id, + wallet_id=run_metadata.get("wallet_id"), + ) + async with scheduler.dask_clients_pool.acquire(cluster) as client: + yield client + + +@dataclass +class DaskScheduler(BaseCompScheduler): + dask_clients_pool: DaskClientsPool + + def __post_init__(self) -> None: + self.dask_clients_pool.register_handlers( + TaskHandlers( + self._task_progress_change_handler, + ) + ) + + async def _start_tasks( + self, + *, + user_id: UserID, + project_id: ProjectID, + scheduled_tasks: dict[NodeID, CompTaskAtDB], + comp_run: CompRunsAtDB, + wake_up_callback: Callable[[], None], + ) -> None: + # now transfer the pipeline to the dask scheduler + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + # Change the tasks state to PENDING + comp_tasks_repo = CompTasksRepository.instance(self.db_engine) + await comp_tasks_repo.update_project_tasks_state( + project_id, + list(scheduled_tasks.keys()), + RunningState.PENDING, + ) + # each task is started independently + results: list[list[PublishedComputationTask]] = await asyncio.gather( + *( + client.send_computation_tasks( + user_id=user_id, + project_id=project_id, + tasks={node_id: task.image}, + hardware_info=task.hardware_info, + callback=wake_up_callback, + metadata=comp_run.metadata, + resource_tracking_run_id=ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, project_id, node_id, comp_run.iteration + ), + ) + for node_id, task in scheduled_tasks.items() + ), + ) + + # update the database so we do have the correct job_ids there + await asyncio.gather( + *( + comp_tasks_repo.update_project_task_job_id( + project_id, task.node_id, task.job_id + ) + for task_sents in results + for task in task_sents + ) + ) + + async def _get_tasks_status( + self, + user_id: UserID, + tasks: list[CompTaskAtDB], + comp_run: CompRunsAtDB, + ) -> list[RunningState]: + try: + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + return await client.get_tasks_status([f"{t.job_id}" for t in tasks]) + + except ComputationalBackendOnDemandNotReadyError: + _logger.info("The on demand computational backend is not ready yet...") + return [RunningState.WAITING_FOR_CLUSTER] * len(tasks) + + async def _process_executing_tasks( + self, + user_id: UserID, + tasks: list[CompTaskAtDB], + comp_run: CompRunsAtDB, + ) -> None: + task_progresses = [] + try: + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + task_progresses = await client.get_tasks_progress( + [f"{t.job_id}" for t in tasks], + ) + for task_progress_event in task_progresses: + if task_progress_event: + await CompTasksRepository( + self.db_engine + ).update_project_task_progress( + task_progress_event.task_owner.project_id, + task_progress_event.task_owner.node_id, + task_progress_event.progress, + ) + + except ComputationalBackendOnDemandNotReadyError: + _logger.info("The on demand computational backend is not ready yet...") + + comp_tasks_repo = CompTasksRepository(self.db_engine) + await asyncio.gather( + *( + comp_tasks_repo.update_project_task_progress( + t.task_owner.project_id, + t.task_owner.node_id, + t.progress, + ) + for t in task_progresses + if t + ) + ) + await asyncio.gather( + *( + publish_service_progress( + self.rabbitmq_client, + user_id=t.task_owner.user_id, + project_id=t.task_owner.project_id, + node_id=t.task_owner.node_id, + progress=t.progress, + ) + for t in task_progresses + if t + ) + ) + + async def _stop_tasks( + self, user_id: UserID, tasks: list[CompTaskAtDB], comp_run: CompRunsAtDB + ) -> None: + # NOTE: if this exception raises, it means the backend was anyway not up + with contextlib.suppress(ComputationalBackendOnDemandNotReadyError): + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + await asyncio.gather( + *[ + client.abort_computation_task(t.job_id) + for t in tasks + if t.job_id + ] + ) + # tasks that have no-worker must be unpublished as these are blocking forever + tasks_with_no_worker = [ + t for t in tasks if t.state is RunningState.WAITING_FOR_RESOURCES + ] + await asyncio.gather( + *[ + client.release_task_result(t.job_id) + for t in tasks_with_no_worker + if t.job_id + ] + ) + + async def _process_completed_tasks( + self, + user_id: UserID, + tasks: list[CompTaskAtDB], + iteration: Iteration, + comp_run: CompRunsAtDB, + ) -> None: + try: + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + tasks_results = await asyncio.gather( + *[client.get_task_result(t.job_id or "undefined") for t in tasks], + return_exceptions=True, + ) + await asyncio.gather( + *[ + self._process_task_result( + task, result, comp_run.metadata, iteration + ) + for task, result in zip(tasks, tasks_results, strict=True) + ] + ) + finally: + async with _cluster_dask_client( + user_id, + self, + use_on_demand_clusters=comp_run.use_on_demand_clusters, + run_metadata=comp_run.metadata, + ) as client: + await asyncio.gather( + *[client.release_task_result(t.job_id) for t in tasks if t.job_id] + ) + + async def _process_task_result( + self, + task: CompTaskAtDB, + result: BaseException | TaskOutputData, + run_metadata: RunMetadataDict, + iteration: Iteration, + ) -> None: + _logger.debug("received %s result: %s", f"{task=}", f"{result=}") + task_final_state = RunningState.FAILED + simcore_platform_status = SimcorePlatformStatus.OK + errors: list[ErrorDict] = [] + + if task.job_id is not None: + ( + _service_key, + _service_version, + user_id, + project_id, + node_id, + ) = parse_dask_job_id(task.job_id) + + assert task.project_id == project_id # nosec + assert task.node_id == node_id # nosec + + try: + if isinstance(result, TaskOutputData): + # success! + await parse_output_data( + self.db_engine, + task.job_id, + result, + ) + task_final_state = RunningState.SUCCESS + + else: + if isinstance(result, TaskCancelledError): + task_final_state = RunningState.ABORTED + else: + task_final_state = RunningState.FAILED + errors.append( + { + "loc": ( + f"{task.project_id}", + f"{task.node_id}", + ), + "msg": f"{result}", + "type": "runtime", + } + ) + if isinstance(result, ComputationalBackendNotConnectedError): + simcore_platform_status = SimcorePlatformStatus.BAD + # we need to remove any invalid files in the storage + await clean_task_output_and_log_files_if_invalid( + self.db_engine, user_id, project_id, node_id + ) + except TaskSchedulingError as err: + task_final_state = RunningState.FAILED + simcore_platform_status = SimcorePlatformStatus.BAD + errors = err.get_errors() + _logger.debug( + "Unexpected failure while processing results of %s: %s", + f"{task=}", + f"{errors=}", + ) + + # resource tracking + await publish_service_resource_tracking_stopped( + self.rabbitmq_client, + ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, project_id, node_id, iteration + ), + simcore_platform_status=simcore_platform_status, + ) + # instrumentation + await publish_service_stopped_metrics( + self.rabbitmq_client, + user_id=user_id, + simcore_user_agent=run_metadata.get( + "simcore_user_agent", UNDEFINED_DEFAULT_SIMCORE_USER_AGENT_VALUE + ), + task=task, + task_final_state=task_final_state, + ) + + await CompTasksRepository(self.db_engine).update_project_tasks_state( + task.project_id, + [task.node_id], + task_final_state, + errors=errors, + optional_progress=1, + optional_stopped=arrow.utcnow().datetime, + ) + + async def _task_progress_change_handler( + self, event: tuple[UnixTimestamp, Any] + ) -> None: + with log_catch(_logger, reraise=False): + task_progress_event = TaskProgressEvent.model_validate_json(event[1]) + _logger.debug("received task progress update: %s", task_progress_event) + user_id = task_progress_event.task_owner.user_id + project_id = task_progress_event.task_owner.project_id + node_id = task_progress_event.task_owner.node_id + comp_tasks_repo = CompTasksRepository(self.db_engine) + task = await comp_tasks_repo.get_task(project_id, node_id) + if task.state in WAITING_FOR_START_STATES: + task.state = RunningState.STARTED + task.progress = task_progress_event.progress + run = await CompRunsRepository(self.db_engine).get(user_id, project_id) + await self._process_started_tasks( + [task], + user_id=user_id, + project_id=project_id, + iteration=run.iteration, + run_metadata=run.metadata, + ) + else: + await comp_tasks_repo.update_project_task_progress( + project_id, node_id, task_progress_event.progress + ) + await publish_service_progress( + self.rabbitmq_client, + user_id=user_id, + project_id=project_id, + node_id=node_id, + progress=task_progress_event.progress, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py new file mode 100644 index 00000000000..edda456f303 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_scheduler_factory.py @@ -0,0 +1,31 @@ +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import log_context +from settings_library.redis import RedisDatabase + +from ...core.settings import AppSettings +from ..dask_clients_pool import DaskClientsPool +from ..db import get_db_engine +from ..rabbitmq import get_rabbitmq_client, get_rabbitmq_rpc_client +from ..redis import get_redis_client_manager +from ._scheduler_base import BaseCompScheduler +from ._scheduler_dask import DaskScheduler + +_logger = logging.getLogger(__name__) + + +def create_scheduler(app: FastAPI) -> BaseCompScheduler: + with log_context( + _logger, logging.INFO, msg="Creating Dask-based computational scheduler" + ): + app_settings: AppSettings = app.state.settings + return DaskScheduler( + settings=app_settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, + dask_clients_pool=DaskClientsPool.instance(app), + rabbitmq_client=get_rabbitmq_client(app), + rabbitmq_rpc_client=get_rabbitmq_rpc_client(app), + redis_client=get_redis_client_manager(app).client(RedisDatabase.LOCKS), + db_engine=get_db_engine(app), + service_runtime_heartbeat_interval=app_settings.SERVICE_TRACKING_HEARTBEAT, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py new file mode 100644 index 00000000000..dc414376db0 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_utils.py @@ -0,0 +1,97 @@ +from collections.abc import Callable + +from fastapi import FastAPI +from models_library.docker import DockerGenericTag +from models_library.projects_state import RunningState +from models_library.services_resources import ( + ResourceValue, + ServiceResourcesDict, + ServiceResourcesDictHelpers, +) +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase + +from ...models.comp_tasks import CompTaskAtDB +from ..redis import get_redis_client_manager + +SCHEDULED_STATES: set[RunningState] = { + RunningState.PUBLISHED, + RunningState.PENDING, + RunningState.WAITING_FOR_RESOURCES, + RunningState.STARTED, + RunningState.WAITING_FOR_CLUSTER, +} + +TASK_TO_START_STATES: set[RunningState] = { + RunningState.PUBLISHED, + RunningState.WAITING_FOR_CLUSTER, +} + +WAITING_FOR_START_STATES: set[RunningState] = { + RunningState.PUBLISHED, + RunningState.PENDING, + RunningState.WAITING_FOR_RESOURCES, + RunningState.WAITING_FOR_CLUSTER, +} + +PROCESSING_STATES: set[RunningState] = { + RunningState.PENDING, + RunningState.WAITING_FOR_RESOURCES, + RunningState.STARTED, +} + +RUNNING_STATES: set[RunningState] = { + RunningState.STARTED, +} + +COMPLETED_STATES: set[RunningState] = { + RunningState.ABORTED, + RunningState.SUCCESS, + RunningState.FAILED, +} + + +def create_service_resources_from_task(task: CompTaskAtDB) -> ServiceResourcesDict: + assert task.image.node_requirements # nosec + return ServiceResourcesDictHelpers.create_from_single_service( + DockerGenericTag(f"{task.image.name}:{task.image.tag}"), + { + res_name: ResourceValue(limit=res_value, reservation=res_value) + for res_name, res_value in task.image.node_requirements.model_dump( + by_alias=True + ).items() + if res_value is not None + }, + [task.image.boot_mode], + ) + + +def _get_app_from_args(*args, **kwargs) -> FastAPI: + assert kwargs is not None # nosec + if args: + app = args[0] + else: + assert "app" in kwargs # nosec + app = kwargs["app"] + assert isinstance(app, FastAPI) # nosec + return app + + +def get_redis_client_from_app(*args, **kwargs) -> RedisClientSDK: + app = _get_app_from_args(*args, **kwargs) + return get_redis_client_manager(app).client(RedisDatabase.LOCKS) + + +def get_redis_lock_key( + suffix: str, *, unique_lock_key_builder: Callable[..., str] | None +) -> Callable[..., str]: + def _(*args, **kwargs) -> str: + app = _get_app_from_args(*args, **kwargs) + unique_lock_part = ( + unique_lock_key_builder(*args, **kwargs) if unique_lock_key_builder else "" + ) + if unique_lock_part: + unique_lock_part = f"-{unique_lock_part}" + return f"{app.title}-{suffix}{unique_lock_part}" + + return _ diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py new file mode 100644 index 00000000000..914629bcef5 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/_worker.py @@ -0,0 +1,92 @@ +import asyncio +import contextlib +import functools +import logging +from typing import cast + +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.users import UserID +from servicelib.logging_utils import log_context +from servicelib.redis import CouldNotAcquireLockError, exclusive + +from ...core.settings import get_application_settings +from ...models.comp_runs import Iteration +from ..rabbitmq import get_rabbitmq_client +from ._constants import MODULE_NAME_WORKER +from ._models import SchedulePipelineRabbitMessage +from ._scheduler_base import BaseCompScheduler +from ._scheduler_factory import create_scheduler +from ._utils import get_redis_client_from_app, get_redis_lock_key + +_logger = logging.getLogger(__name__) + + +def _get_scheduler_worker(app: FastAPI) -> BaseCompScheduler: + return cast(BaseCompScheduler, app.state.scheduler_worker) + + +def _unique_key_builder( + _app, user_id: UserID, project_id: ProjectID, iteration: Iteration +) -> str: + return f"{user_id}:{project_id}:{iteration}" + + +@exclusive( + get_redis_client_from_app, + lock_key=get_redis_lock_key( + MODULE_NAME_WORKER, unique_lock_key_builder=_unique_key_builder + ), +) +async def _exclusively_schedule_pipeline( + app: FastAPI, *, user_id: UserID, project_id: ProjectID, iteration: Iteration +) -> None: + await _get_scheduler_worker(app).apply( + user_id=user_id, + project_id=project_id, + iteration=iteration, + ) + + +async def _handle_apply_distributed_schedule(app: FastAPI, data: bytes) -> bool: + with log_context(_logger, logging.DEBUG, msg="handling scheduling"): + to_schedule_pipeline = SchedulePipelineRabbitMessage.model_validate_json(data) + with contextlib.suppress(CouldNotAcquireLockError): + await _exclusively_schedule_pipeline( + app, + user_id=to_schedule_pipeline.user_id, + project_id=to_schedule_pipeline.project_id, + iteration=to_schedule_pipeline.iteration, + ) + return True + + +async def setup_worker(app: FastAPI) -> None: + app_settings = get_application_settings(app) + rabbitmq_client = get_rabbitmq_client(app) + app.state.scheduler_worker_consumers = await asyncio.gather( + *( + rabbitmq_client.subscribe( + SchedulePipelineRabbitMessage.get_channel_name(), + functools.partial(_handle_apply_distributed_schedule, app), + exclusive_queue=False, + ) + for _ in range( + app_settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY + ) + ) + ) + + app.state.scheduler_worker = create_scheduler(app) + + +async def shutdown_worker(app: FastAPI) -> None: + assert app.state.scheduler_worker # nosec + rabbitmq_client = get_rabbitmq_client(app) + await asyncio.gather( + *( + rabbitmq_client.unsubscribe_consumer(*consumer) + for consumer in app.state.scheduler_worker_consumers + ), + return_exceptions=False, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/background_task.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/background_task.py deleted file mode 100644 index b6fc18efbce..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/background_task.py +++ /dev/null @@ -1,73 +0,0 @@ -import asyncio -import logging -from asyncio import CancelledError -from contextlib import suppress -from typing import Any, Callable, Coroutine - -from fastapi import FastAPI - -from . import factory - -logger = logging.getLogger(__name__) - -_DEFAULT_TIMEOUT_S: int = 5 - - -async def scheduler_task(app: FastAPI) -> None: - scheduler = app.state.scheduler - while app.state.comp_scheduler_running: - try: - logger.debug("Computational scheduler task running...") - await scheduler.schedule_all_pipelines() - with suppress(asyncio.TimeoutError): - await asyncio.wait_for( - scheduler.wake_up_event.wait(), timeout=_DEFAULT_TIMEOUT_S - ) - except CancelledError: - logger.info("Computational scheduler task cancelled") - raise - except Exception: # pylint: disable=broad-except - if not app.state.comp_scheduler_running: - logger.warning("Forced to stop computational scheduler") - break - logger.exception( - "Unexpected error in computational scheduler task, restarting scheduler now..." - ) - # wait a bit before restarting the task - await asyncio.sleep(_DEFAULT_TIMEOUT_S) - - -def on_app_startup(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]: - async def start_scheduler() -> None: - # FIXME: added this variable to overcome the state in which the - # task cancelation is ignored and the exceptions enter in a loop - # that never stops the background task. This flag is an additional - # mechanism to enforce stopping the background task - app.state.comp_scheduler_running = True - app.state.scheduler = await factory.create_from_db(app) - app.state.scheduler_task = asyncio.create_task( - scheduler_task(app), name="computational services scheduler" - ) - logger.info("Computational services Scheduler started") - - return start_scheduler - - -def on_app_shutdown(app: FastAPI) -> Callable[[], Coroutine[Any, Any, None]]: - async def stop_scheduler() -> None: - logger.info("Computational services Scheduler stopping...") - task = app.state.scheduler_task - with suppress(CancelledError): - app.state.comp_scheduler_running = False - task.cancel() - await task - app.state.scheduler = None - app.state.scheduler_task = None - logger.info("Computational services Scheduler stopped") - - return stop_scheduler - - -def setup(app: FastAPI): - app.add_event_handler("startup", on_app_startup(app)) - app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py deleted file mode 100644 index 42996ad7b74..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py +++ /dev/null @@ -1,486 +0,0 @@ -"""The scheduler shall be run as a background task. -Based on oSparc pipelines, it monitors when to start the next worker task(s), either one at a time or as a group of tasks. - -In principle the Scheduler maintains the comp_runs table in the database. -It contains how the pipeline was run and by whom. -It also contains the final result of the pipeline run. - -When a pipeline is scheduled first all the tasks contained in the DAG are set to PUBLISHED state. -Once the scheduler determines a task shall run, its state is set to PENDING, so that the sidecar can pick up the task. -The sidecar will then change the state to STARTED, then to SUCCESS or FAILED. - -""" -import asyncio -import logging -import traceback -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Set, Tuple, cast - -import networkx as nx -from aiopg.sa.engine import Engine -from models_library.clusters import ClusterID -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID, NodeIDStr -from models_library.projects_state import RunningState -from models_library.users import UserID -from pydantic import PositiveInt - -from ...core.errors import ( - ComputationalBackendNotConnectedError, - ComputationalSchedulerChangedError, - InvalidPipelineError, - PipelineNotFoundError, - SchedulerError, - TaskSchedulingError, -) -from ...models.domains.comp_pipelines import CompPipelineAtDB -from ...models.domains.comp_runs import CompRunsAtDB -from ...models.domains.comp_tasks import CompTaskAtDB, Image -from ...utils.computations import get_pipeline_state_from_task_states -from ...utils.scheduler import COMPLETED_STATES, Iteration, get_repository -from ..db.repositories.comp_pipelines import CompPipelinesRepository -from ..db.repositories.comp_runs import CompRunsRepository -from ..db.repositories.comp_tasks import CompTasksRepository - -logger = logging.getLogger(__name__) - - -@dataclass -class ScheduledPipelineParams: - cluster_id: ClusterID - mark_for_cancellation: bool = False - - -@dataclass -class BaseCompScheduler(ABC): - scheduled_pipelines: Dict[ - Tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams - ] - db_engine: Engine - wake_up_event: asyncio.Event = field(default_factory=asyncio.Event, init=False) - - async def run_new_pipeline( - self, user_id: UserID, project_id: ProjectID, cluster_id: ClusterID - ) -> None: - """Sets a new pipeline to be scheduled on the computational resources. - Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct - the scheduler to run the tasks on the defined cluster""" - # ensure the pipeline exists and is populated with something - dag = await self._get_pipeline_dag(project_id) - if not dag: - logger.warning( - "project %s has no computational dag defined. not scheduled for a run.", - f"{project_id=}", - ) - return - - runs_repo: CompRunsRepository = get_repository( - self.db_engine, CompRunsRepository - ) # type: ignore - new_run: CompRunsAtDB = await runs_repo.create( - user_id=user_id, - project_id=project_id, - cluster_id=cluster_id, - ) - self.scheduled_pipelines[ - (user_id, project_id, new_run.iteration) - ] = ScheduledPipelineParams(cluster_id=cluster_id) - # ensure the scheduler starts right away - self._wake_up_scheduler_now() - - async def stop_pipeline( - self, user_id: UserID, project_id: ProjectID, iteration: Optional[int] = None - ) -> None: - if not iteration: - # if no iteration given find the latest one in the list - possible_iterations = { - it - for u_id, p_id, it in self.scheduled_pipelines - if u_id == user_id and p_id == project_id - } - if not possible_iterations: - raise SchedulerError( - f"There are no pipeline scheduled for {user_id}:{project_id}" - ) - iteration = max(possible_iterations) - - # mark the scheduled pipeline for stopping - self.scheduled_pipelines[ - (user_id, project_id, iteration) - ].mark_for_cancellation = True - # ensure the scheduler starts right away - self._wake_up_scheduler_now() - - async def schedule_all_pipelines(self) -> None: - self.wake_up_event.clear() - # if one of the task throws, the other are NOT cancelled which is what we want - await asyncio.gather( - *[ - self._schedule_pipeline( - user_id, - project_id, - pipeline_params.cluster_id, - iteration, - pipeline_params.mark_for_cancellation, - ) - for ( - user_id, - project_id, - iteration, - ), pipeline_params in self.scheduled_pipelines.items() - ] - ) - - async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph: - comp_pipeline_repo: CompPipelinesRepository = get_repository( - self.db_engine, CompPipelinesRepository - ) # type: ignore - pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline( - project_id - ) - dag = pipeline_at_db.get_graph() - logger.debug("%s: current %s", f"{project_id=}", f"{dag=}") - return dag - - async def _get_pipeline_tasks( - self, project_id: ProjectID, pipeline_dag: nx.DiGraph - ) -> Dict[str, CompTaskAtDB]: - comp_tasks_repo: CompTasksRepository = get_repository( - self.db_engine, CompTasksRepository - ) # type: ignore - pipeline_comp_tasks: Dict[str, CompTaskAtDB] = { - str(t.node_id): t - for t in await comp_tasks_repo.get_comp_tasks(project_id) - if (str(t.node_id) in list(pipeline_dag.nodes())) - } - if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()): - raise InvalidPipelineError( - f"{project_id}" - f"The tasks defined for {project_id} do not contain all the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check." - ) - return pipeline_comp_tasks - - async def _update_run_result_from_tasks( - self, - user_id: UserID, - project_id: ProjectID, - iteration: PositiveInt, - pipeline_tasks: Dict[str, CompTaskAtDB], - ) -> RunningState: - - pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states( - list(pipeline_tasks.values()), - ) - await self._set_run_result( - user_id, project_id, iteration, pipeline_state_from_tasks - ) - return pipeline_state_from_tasks - - async def _set_run_result( - self, - user_id: UserID, - project_id: ProjectID, - iteration: PositiveInt, - run_result: RunningState, - ) -> None: - comp_runs_repo: CompRunsRepository = get_repository( - self.db_engine, CompRunsRepository - ) # type: ignore - await comp_runs_repo.set_run_result( - user_id=user_id, - project_id=project_id, - iteration=iteration, - result_state=run_result, - final_state=(run_result in COMPLETED_STATES), - ) - - async def _set_states_following_failed_to_aborted( - self, project_id: ProjectID, dag: nx.DiGraph - ) -> Dict[str, CompTaskAtDB]: - tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks(project_id, dag) - tasks_to_set_aborted: Set[NodeIDStr] = set() - for task in tasks.values(): - if task.state == RunningState.FAILED: - tasks_to_set_aborted.update(nx.bfs_tree(dag, f"{task.node_id}")) - tasks_to_set_aborted.remove(f"{task.node_id}") - for task in tasks_to_set_aborted: - tasks[f"{task}"].state = RunningState.ABORTED - if tasks_to_set_aborted: - # update the current states back in DB - comp_tasks_repo: CompTasksRepository = cast( - CompTasksRepository, - get_repository(self.db_engine, CompTasksRepository), - ) - await comp_tasks_repo.set_project_tasks_state( - project_id, - [NodeID(n) for n in tasks_to_set_aborted], - RunningState.ABORTED, - ) - return tasks - - async def _update_states_from_comp_backend( - self, - user_id: UserID, - cluster_id: ClusterID, - project_id: ProjectID, - pipeline_dag: nx.DiGraph, - ): - pipeline_tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks( - project_id, pipeline_dag - ) - tasks_completed: List[CompTaskAtDB] = [] - if tasks_supposedly_processing := [ - task - for task in pipeline_tasks.values() - if task.state in [RunningState.STARTED, RunningState.PENDING] - ]: - logger.debug( - "Currently pending/running tasks are: %s", - f"{((task.node_id, task.state) for task in tasks_supposedly_processing)}", - ) - # ensure these tasks still exist in the backend, if not we abort these - tasks_backend_status = await self._get_tasks_status( - user_id, cluster_id, tasks_supposedly_processing - ) - logger.debug("Computational states: %s", f"{tasks_backend_status=}") - for task, backend_state in zip( - tasks_supposedly_processing, tasks_backend_status - ): - if backend_state == RunningState.UNKNOWN: - tasks_completed.append(task) - # these tasks should be running but they are not available in the backend, something bad happened - logger.error( - "Project %s: %s has %s. The task disappeared from the dask-scheduler" - ", aborting the computational pipeline!\n" - "TIP: Check if the connected dask-scheduler was restarted.", - f"{project_id}", - f"{task=}", - f"{backend_state=}", - ) - elif backend_state in COMPLETED_STATES: - tasks_completed.append(task) - if tasks_completed: - await self._process_completed_tasks(user_id, cluster_id, tasks_completed) - - @abstractmethod - async def _start_tasks( - self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - scheduled_tasks: Dict[NodeID, Image], - ) -> None: - ... - - @abstractmethod - async def _get_tasks_status( - self, user_id: UserID, cluster_id: ClusterID, tasks: List[CompTaskAtDB] - ) -> List[RunningState]: - ... - - @abstractmethod - async def _stop_tasks( - self, user_id: UserID, cluster_id: ClusterID, tasks: List[CompTaskAtDB] - ) -> None: - ... - - @abstractmethod - async def _process_completed_tasks( - self, user_id: UserID, cluster_id: ClusterID, tasks: List[CompTaskAtDB] - ) -> None: - ... - - async def _schedule_pipeline( - self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - iteration: PositiveInt, - marked_for_stopping: bool, - ) -> None: - logger.debug( - "checking run of project [%s:%s] for user [%s]", - f"{project_id=}", - f"{iteration=}", - f"{user_id=}", - ) - - try: - dag: nx.DiGraph = await self._get_pipeline_dag(project_id) - # 1. Update our list of tasks with data from backend (state, results) - await self._update_states_from_comp_backend( - user_id, cluster_id, project_id, dag - ) - # 2. Any task following a FAILED task shall be ABORTED - comp_tasks = await self._set_states_following_failed_to_aborted( - project_id, dag - ) - # 3. do we want to stop the pipeline now? - if marked_for_stopping: - await self._schedule_tasks_to_stop( - user_id, project_id, cluster_id, comp_tasks - ) - else: - # let's get the tasks to schedule then - await self._schedule_tasks_to_start( - user_id, project_id, cluster_id, comp_tasks, dag - ) - # 4. Update the run result - pipeline_result = await self._update_run_result_from_tasks( - user_id, project_id, iteration, comp_tasks - ) - # 5. Are we done scheduling that pipeline? - if not dag.nodes() or pipeline_result in COMPLETED_STATES: - # there is nothing left, the run is completed, we're done here - self.scheduled_pipelines.pop((user_id, project_id, iteration), None) - logger.info( - "pipeline %s scheduling completed with result %s", - f"{project_id=}", - f"{pipeline_result=}", - ) - except PipelineNotFoundError: - logger.warning( - "pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler", - f"{project_id=}", - ) - await self._set_run_result( - user_id, project_id, iteration, RunningState.ABORTED - ) - self.scheduled_pipelines.pop((user_id, project_id, iteration), None) - except InvalidPipelineError as exc: - logger.warning( - "pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s", - f"{project_id=}", - exc, - ) - await self._set_run_result( - user_id, project_id, iteration, RunningState.ABORTED - ) - self.scheduled_pipelines.pop((user_id, project_id, iteration), None) - - async def _schedule_tasks_to_stop( - self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - comp_tasks: Dict[str, CompTaskAtDB], - ) -> None: - # get any running task and stop them - comp_tasks_repo: CompTasksRepository = get_repository( - self.db_engine, CompTasksRepository - ) # type: ignore - await comp_tasks_repo.mark_project_published_tasks_as_aborted(project_id) - # stop any remaining running task, these are already submitted - tasks_to_stop = [ - t - for t in comp_tasks.values() - if t.state - in [RunningState.STARTED, RunningState.RETRY, RunningState.PENDING] - ] - await self._stop_tasks(user_id, cluster_id, tasks_to_stop) - - async def _schedule_tasks_to_start( - self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - comp_tasks: Dict[str, CompTaskAtDB], - dag: nx.DiGraph, - ): - # filter out the successfully completed tasks - dag.remove_nodes_from( - { - node_id - for node_id, t in comp_tasks.items() - if t.state == RunningState.SUCCESS - } - ) - next_task_node_ids = [node_id for node_id, degree in dag.in_degree() if degree == 0] # type: ignore - - # get the tasks to start - tasks_ready_to_start: Dict[NodeID, CompTaskAtDB] = { - node_id: comp_tasks[f"{node_id}"] - for node_id in next_task_node_ids - if comp_tasks[f"{node_id}"].state == RunningState.PUBLISHED - } - - if not tasks_ready_to_start: - # nothing to do - return - - # Change the tasks state to PENDING - comp_tasks_repo: CompTasksRepository = get_repository( - self.db_engine, CompTasksRepository - ) # type: ignore - await comp_tasks_repo.set_project_tasks_state( - project_id, list(tasks_ready_to_start.keys()), RunningState.PENDING - ) - - # we pass the tasks to the dask-client in a gather such that each task can be stopped independently - results = await asyncio.gather( - *[ - self._start_tasks( - user_id, - project_id, - cluster_id, - scheduled_tasks={node_id: task.image}, - ) - for node_id, task in tasks_ready_to_start.items() - ], - return_exceptions=True, - ) - # Handling errors raised when _start_tasks(...) - for r, t in zip(results, tasks_ready_to_start): - if isinstance(r, TaskSchedulingError): - logger.error( - "Project '%s''s task '%s' could not be scheduled due to the following: %s", - r.project_id, - r.node_id, - f"{r}", - ) - - await comp_tasks_repo.set_project_tasks_state( - project_id, - [r.node_id], - RunningState.FAILED, - r.get_errors(), - ) - elif isinstance( - r, - ( - ComputationalBackendNotConnectedError, - ComputationalSchedulerChangedError, - ), - ): - logger.error( - "Issue with computational backend: %s. Tasks are set back " - "to PUBLISHED state until scheduler comes back!", - r, - ) - # we should try re-connecting. - # in the meantime we cannot schedule tasks on the scheduler, - # let's put these tasks back to PUBLISHED, so they might be re-submitted later - await asyncio.gather( - comp_tasks_repo.set_project_tasks_state( - project_id, - list(tasks_ready_to_start.keys()), - RunningState.PUBLISHED, - ), - ) - elif isinstance(r, Exception): - logger.error( - "Unexpected error for %s with %s on %s happened when scheduling %s:\n%s\n%s", - f"{user_id=}", - f"{project_id=}", - f"{cluster_id=}", - f"{tasks_ready_to_start.keys()=}", - f"{r}", - "".join(traceback.format_tb(r.__traceback__)), - ) - await comp_tasks_repo.set_project_tasks_state( - project_id, [t], RunningState.FAILED - ) - - def _wake_up_scheduler_now(self) -> None: - self.wake_up_event.set() diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/dask_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/dask_scheduler.py deleted file mode 100644 index ffa24390cb8..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/dask_scheduler.py +++ /dev/null @@ -1,259 +0,0 @@ -import asyncio -import logging -from contextlib import asynccontextmanager -from dataclasses import dataclass -from typing import AsyncIterator, Union - -from dask_task_models_library.container_tasks.errors import TaskCancelledError -from dask_task_models_library.container_tasks.events import ( - TaskLogEvent, - TaskProgressEvent, - TaskStateEvent, -) -from dask_task_models_library.container_tasks.io import TaskOutputData -from models_library.clusters import DEFAULT_CLUSTER_ID, Cluster, ClusterID -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.projects_state import RunningState -from models_library.rabbitmq_messages import ( - InstrumentationRabbitMessage, - LoggerRabbitMessage, - ProgressRabbitMessageNode, -) -from models_library.users import UserID -from simcore_postgres_database.models.comp_tasks import NodeClass -from simcore_service_director_v2.core.errors import TaskSchedulingError - -from ...core.settings import ComputationalBackendSettings -from ...models.domains.comp_tasks import CompTaskAtDB, Image -from ...modules.dask_client import DaskClient, TaskHandlers -from ...modules.dask_clients_pool import DaskClientsPool -from ...modules.db.repositories.clusters import ClustersRepository -from ...utils.dask import ( - clean_task_output_and_log_files_if_invalid, - parse_dask_job_id, - parse_output_data, -) -from ...utils.scheduler import get_repository -from ..db.repositories.comp_tasks import CompTasksRepository -from ..rabbitmq import RabbitMQClient -from .base_scheduler import BaseCompScheduler - -logger = logging.getLogger(__name__) - - -@asynccontextmanager -async def _cluster_dask_client( - user_id: UserID, cluster_id: ClusterID, scheduler: "DaskScheduler" -) -> AsyncIterator[DaskClient]: - cluster: Cluster = scheduler.settings.default_cluster - if cluster_id != DEFAULT_CLUSTER_ID: - clusters_repo: ClustersRepository = get_repository( - scheduler.db_engine, ClustersRepository - ) # type: ignore - cluster = await clusters_repo.get_cluster(user_id, cluster_id) - async with scheduler.dask_clients_pool.acquire(cluster) as client: - yield client - - -@dataclass -class DaskScheduler(BaseCompScheduler): - settings: ComputationalBackendSettings - dask_clients_pool: DaskClientsPool - rabbitmq_client: RabbitMQClient - - def __post_init__(self): - self.dask_clients_pool.register_handlers( - TaskHandlers( - self._task_state_change_handler, - self._task_progress_change_handler, - self._task_log_change_handler, - ) - ) - - async def _start_tasks( - self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - scheduled_tasks: dict[NodeID, Image], - ): - # now transfer the pipeline to the dask scheduler - async with _cluster_dask_client(user_id, cluster_id, self) as client: - task_job_ids: list[ - tuple[NodeID, str] - ] = await client.send_computation_tasks( - user_id=user_id, - project_id=project_id, - cluster_id=cluster_id, - tasks=scheduled_tasks, - callback=self._wake_up_scheduler_now, - ) - logger.debug( - "started following tasks (node_id, job_id)[%s] on cluster %s", - f"{task_job_ids=}", - f"{cluster_id=}", - ) - # update the database so we do have the correct job_ids there - comp_tasks_repo: CompTasksRepository = get_repository( - self.db_engine, CompTasksRepository - ) # type: ignore - await asyncio.gather( - *[ - comp_tasks_repo.set_project_task_job_id(project_id, node_id, job_id) - for node_id, job_id in task_job_ids - ] - ) - - async def _get_tasks_status( - self, user_id: UserID, cluster_id: ClusterID, tasks: list[CompTaskAtDB] - ) -> list[RunningState]: - async with _cluster_dask_client(user_id, cluster_id, self) as client: - return await client.get_tasks_status([f"{t.job_id}" for t in tasks]) - - async def _stop_tasks( - self, user_id: UserID, cluster_id: ClusterID, tasks: list[CompTaskAtDB] - ) -> None: - async with _cluster_dask_client(user_id, cluster_id, self) as client: - await asyncio.gather( - *[client.abort_computation_task(t.job_id) for t in tasks if t.job_id] - ) - - async def _process_completed_tasks( - self, user_id: UserID, cluster_id: ClusterID, tasks: list[CompTaskAtDB] - ) -> None: - try: - async with _cluster_dask_client(user_id, cluster_id, self) as client: - tasks_results = await asyncio.gather( - *[client.get_task_result(t.job_id or "undefined") for t in tasks], - return_exceptions=True, - ) - await asyncio.gather( - *[ - self._process_task_result(task, result) - for task, result in zip(tasks, tasks_results) - ] - ) - finally: - async with _cluster_dask_client(user_id, cluster_id, self) as client: - await asyncio.gather( - *[client.release_task_result(t.job_id) for t in tasks if t.job_id] - ) - - async def _process_task_result( - self, task: CompTaskAtDB, result: Union[Exception, TaskOutputData] - ) -> None: - logger.debug("received %s result: %s", f"{task=}", f"{result=}") - task_final_state = RunningState.FAILED - errors = None - - if task.job_id is not None: - ( - service_key, - service_version, - user_id, - project_id, - node_id, - ) = parse_dask_job_id(task.job_id) - - assert task.project_id == project_id # nosec - assert task.node_id == node_id # nosec - - try: - if isinstance(result, TaskOutputData): - # success! - await parse_output_data( - self.db_engine, - task.job_id, - result, - ) - task_final_state = RunningState.SUCCESS - - else: - if isinstance(result, TaskCancelledError): - task_final_state = RunningState.ABORTED - else: - task_final_state = RunningState.FAILED - # we need to remove any invalid files in the storage - await clean_task_output_and_log_files_if_invalid( - self.db_engine, user_id, project_id, node_id - ) - except TaskSchedulingError as err: - task_final_state = RunningState.FAILED - errors = err.get_errors() - logger.debug( - "Unexpected failure while processing results of %s: %s", - f"{task=}", - f"{errors=}", - ) - - # instrumentation - message = InstrumentationRabbitMessage( - metrics="service_stopped", - user_id=user_id, - project_id=task.project_id, - node_id=task.node_id, - service_uuid=task.node_id, - service_type=NodeClass.COMPUTATIONAL.value, - service_key=service_key, - service_tag=service_version, - result=task_final_state, - ) - await self.rabbitmq_client.publish(message.channel_name, message.json()) - - await CompTasksRepository(self.db_engine).set_project_tasks_state( - task.project_id, [task.node_id], task_final_state, errors=errors - ) - - async def _task_state_change_handler(self, event: str) -> None: - task_state_event = TaskStateEvent.parse_raw(event) - logger.debug( - "received task state update: %s", - task_state_event, - ) - service_key, service_version, user_id, project_id, node_id = parse_dask_job_id( - task_state_event.job_id - ) - - if task_state_event.state == RunningState.STARTED: - message = InstrumentationRabbitMessage( - metrics="service_started", - user_id=user_id, - project_id=project_id, - node_id=node_id, - service_uuid=node_id, - service_type=NodeClass.COMPUTATIONAL.value, - service_key=service_key, - service_tag=service_version, - ) - await self.rabbitmq_client.publish(message.channel_name, message.json()) - - await CompTasksRepository(self.db_engine).set_project_tasks_state( - project_id, [node_id], task_state_event.state - ) - - async def _task_progress_change_handler(self, event: str) -> None: - task_progress_event = TaskProgressEvent.parse_raw(event) - logger.debug("received task progress update: %s", task_progress_event) - *_, user_id, project_id, node_id = parse_dask_job_id(task_progress_event.job_id) - message = ProgressRabbitMessageNode( - user_id=user_id, - project_id=project_id, - node_id=node_id, - progress=task_progress_event.progress, - ) - await self.rabbitmq_client.publish(message.channel_name, message.json()) - - async def _task_log_change_handler(self, event: str) -> None: - task_log_event = TaskLogEvent.parse_raw(event) - logger.debug("received task log update: %s", task_log_event) - *_, user_id, project_id, node_id = parse_dask_job_id(task_log_event.job_id) - message = LoggerRabbitMessage( - user_id=user_id, - project_id=project_id, - node_id=node_id, - messages=[task_log_event.log], - log_level=logging.INFO, - ) - - await self.rabbitmq_client.publish(message.channel_name, message.json()) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/factory.py b/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/factory.py deleted file mode 100644 index 87346d69aa8..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/factory.py +++ /dev/null @@ -1,54 +0,0 @@ -import logging -from typing import cast - -from fastapi import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID -from simcore_service_director_v2.modules.dask_clients_pool import DaskClientsPool - -from ...core.errors import ConfigurationError -from ...models.domains.comp_runs import CompRunsAtDB -from ...modules.rabbitmq import get_rabbitmq_client -from ...utils.scheduler import SCHEDULED_STATES, get_repository -from ..db.repositories.comp_runs import CompRunsRepository -from .base_scheduler import BaseCompScheduler, ScheduledPipelineParams -from .dask_scheduler import DaskScheduler - -logger = logging.getLogger(__name__) - - -async def create_from_db(app: FastAPI) -> BaseCompScheduler: - if not hasattr(app.state, "engine"): - raise ConfigurationError( - "Database connection is missing. Please check application configuration." - ) - db_engine = app.state.engine - runs_repository: CompRunsRepository = cast( - CompRunsRepository, get_repository(db_engine, CompRunsRepository) - ) - - # get currently scheduled runs - runs: list[CompRunsAtDB] = await runs_repository.list( - filter_by_state=SCHEDULED_STATES - ) - - logger.debug( - "Following scheduled comp_runs found still to be scheduled: %s", - runs if runs else "NONE", - ) - - logger.info("Creating Dask-based scheduler...") - return DaskScheduler( - settings=app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, - dask_clients_pool=DaskClientsPool.instance(app), - rabbitmq_client=get_rabbitmq_client(app), - db_engine=db_engine, - scheduled_pipelines={ - (r.user_id, r.project_uuid, r.iteration): ScheduledPipelineParams( - cluster_id=r.cluster_id - if r.cluster_id is not None - else DEFAULT_CLUSTER_ID, - mark_for_cancellation=False, - ) - for r in runs - }, - ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py index 81d0f4ef7f7..849640eebfe 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dask_client.py @@ -1,5 +1,5 @@ """The dask client is the osparc part that communicates with a -dask-scheduler/worker backend directly or through a dask-gateway. +dask-scheduler/worker backend. From dask documentation any Data or function must follow the criteria to be usable in dask [http://distributed.dask.org/en/stable/limitations.html?highlight=cloudpickle#assumptions-on-functions-and-data]: @@ -9,36 +9,61 @@ """ import asyncio -import json import logging import traceback -from collections import deque -from dataclasses import dataclass, field +from collections.abc import Callable, Iterable +from copy import deepcopy +from dataclasses import dataclass from http.client import HTTPException -from typing import Any, Callable, Deque, Final, Optional +from typing import Any, Final, cast import distributed +from aiohttp import ClientResponseError +from common_library.json_serialization import json_dumps from dask_task_models_library.container_tasks.docker import DockerBasicAuth from dask_task_models_library.container_tasks.errors import TaskCancelledError +from dask_task_models_library.container_tasks.events import TaskProgressEvent from dask_task_models_library.container_tasks.io import ( TaskCancelEventName, TaskInputData, TaskOutputData, TaskOutputDataSchema, ) +from dask_task_models_library.container_tasks.protocol import ( + ContainerEnvsDict, + ContainerLabelsDict, + ContainerRemoteFct, + ContainerTaskParameters, + LogFileUploadURL, + TaskOwner, +) +from dask_task_models_library.container_tasks.utils import generate_dask_job_id +from dask_task_models_library.models import ( + TASK_LIFE_CYCLE_EVENT, + TASK_RUNNING_PROGRESS_EVENT, + DaskJobID, + DaskResources, + TaskLifeCycleState, +) +from dask_task_models_library.resource_constraints import ( + create_ec2_resource_constraint_key, +) from fastapi import FastAPI -from models_library.clusters import ClusterAuthentication, ClusterID +from models_library.api_schemas_directorv2.clusters import ClusterDetails, Scheduler +from models_library.clusters import ClusterAuthentication, ClusterTypeInModel from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState -from models_library.services_resources import BootMode +from models_library.resource_tracker import HardwareInfo +from models_library.services import ServiceRunID from models_library.users import UserID -from pydantic import parse_obj_as +from pydantic import TypeAdapter, ValidationError from pydantic.networks import AnyUrl +from servicelib.logging_utils import log_catch, log_context from settings_library.s3 import S3Settings +from simcore_sdk.node_ports_common.exceptions import NodeportsException from simcore_sdk.node_ports_v2 import FileLinkType -from simcore_service_director_v2.modules.storage import StorageClient -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed @@ -47,64 +72,34 @@ ComputationalBackendNoS3AccessError, ComputationalBackendTaskNotFoundError, ComputationalBackendTaskResultsNotReadyError, + TaskSchedulingError, ) from ..core.settings import AppSettings, ComputationalBackendSettings -from ..models.domains.comp_tasks import Image -from ..models.schemas.clusters import ClusterDetails, Scheduler -from ..utils.dask import ( - check_communication_with_scheduler_is_open, - check_if_cluster_is_able_to_run_pipeline, - check_scheduler_is_still_the_same, - check_scheduler_status, - compute_input_data, - compute_output_data_schema, - compute_service_log_file_upload_link, - create_node_ports, - dask_sub_consumer_task, - from_node_reqs_to_dask_resources, - generate_dask_job_id, -) +from ..models.comp_runs import RunMetadataDict +from ..models.comp_tasks import Image +from ..modules.storage import StorageClient +from ..utils import dask as dask_utils from ..utils.dask_client_utils import ( DaskSubSystem, TaskHandlers, - create_internal_client_based_on_auth, + UnixTimestamp, + connect_to_dask_scheduler, ) +from .db import get_db_engine -logger = logging.getLogger(__name__) - - -_DASK_TASK_STATUS_RUNNING_STATE_MAP = { - "new": RunningState.PENDING, - "released": RunningState.PENDING, - "waiting": RunningState.PENDING, - "no-worker": RunningState.PENDING, - "processing": RunningState.STARTED, - "memory": RunningState.SUCCESS, - "erred": RunningState.FAILED, -} - -DASK_DEFAULT_TIMEOUT_S = 1 - - -ServiceKey = str -ServiceVersion = str -LogFileUploadURL = AnyUrl -Commands = list[str] -RemoteFct = Callable[ - [ - DockerBasicAuth, - ServiceKey, - ServiceVersion, - TaskInputData, - TaskOutputDataSchema, - LogFileUploadURL, - Commands, - Optional[S3Settings], - BootMode, - ], - TaskOutputData, -] -UserCallbackInSepThread = Callable[[], None] +_logger = logging.getLogger(__name__) + + +_DASK_DEFAULT_TIMEOUT_S: Final[int] = 5 + + +_UserCallbackInSepThread = Callable[[], None] + + +@dataclass(frozen=True, kw_only=True, slots=True) +class PublishedComputationTask: + node_id: NodeID + job_id: DaskJobID @dataclass @@ -112,9 +107,8 @@ class DaskClient: app: FastAPI backend: DaskSubSystem settings: ComputationalBackendSettings - tasks_file_link_type: Final[FileLinkType] - - _subscribed_tasks: list[asyncio.Task] = field(default_factory=list) + tasks_file_link_type: FileLinkType + cluster_type: ClusterTypeInModel @classmethod async def create( @@ -124,111 +118,175 @@ async def create( endpoint: AnyUrl, authentication: ClusterAuthentication, tasks_file_link_type: FileLinkType, + cluster_type: ClusterTypeInModel, ) -> "DaskClient": - logger.info( - "Initiating connection to %s with auth: %s", - f"dask-scheduler/gateway at {endpoint}", + _logger.info( + "Initiating connection to %s with auth: %s, type: %s", + f"dask-scheduler at {endpoint}", authentication, + cluster_type, ) async for attempt in AsyncRetrying( reraise=True, - before_sleep=before_sleep_log(logger, logging.INFO), + before_sleep=before_sleep_log(_logger, logging.INFO), wait=wait_fixed(0.3), stop=stop_after_attempt(3), ): with attempt: - logger.debug( + _logger.debug( "Connecting to %s, attempt %s...", endpoint, attempt.retry_state.attempt_number, ) - backend = await create_internal_client_based_on_auth( - endpoint, authentication - ) - check_scheduler_status(backend.client) + backend = await connect_to_dask_scheduler(endpoint, authentication) + dask_utils.check_scheduler_status(backend.client) instance = cls( app=app, backend=backend, settings=settings, tasks_file_link_type=tasks_file_link_type, + cluster_type=cluster_type, ) - logger.info( + _logger.info( "Connection to %s succeeded [%s]", - f"dask-scheduler/gateway at {endpoint}", - json.dumps(attempt.retry_state.retry_object.statistics), + f"dask-scheduler at {endpoint}", + json_dumps(attempt.retry_state.retry_object.statistics), ) - logger.info( + _logger.info( "Scheduler info:\n%s", - json.dumps(backend.client.scheduler_info(), indent=2), + json_dumps(backend.client.scheduler_info(), indent=2), ) return instance # this is to satisfy pylance - raise ValueError("Could not create client") + err_msg = "Could not create client" + raise ValueError(err_msg) async def delete(self) -> None: - logger.debug("closing dask client...") - for task in self._subscribed_tasks: - task.cancel() - await asyncio.gather(*self._subscribed_tasks, return_exceptions=True) - await self.backend.close() - logger.info("dask client properly closed") + with log_context(_logger, logging.INFO, msg="close dask client"): + await self.backend.close() def register_handlers(self, task_handlers: TaskHandlers) -> None: - _EVENT_CONSUMER_MAP = [ - (self.backend.state_sub, task_handlers.task_change_handler), - (self.backend.progress_sub, task_handlers.task_progress_handler), - (self.backend.logs_sub, task_handlers.task_log_handler), - ] - self._subscribed_tasks = [ - asyncio.create_task( - dask_sub_consumer_task(dask_sub, handler), - name=f"{dask_sub.name}_dask_sub_consumer_task", - ) - for dask_sub, handler in _EVENT_CONSUMER_MAP + _event_consumer_map = [ + (TaskProgressEvent.topic_name(), task_handlers.task_progress_handler), ] + for topic_name, handler in _event_consumer_map: + self.backend.client.subscribe_topic(topic_name, handler) - async def send_computation_tasks( + async def _publish_in_dask( # noqa: PLR0913 # pylint: disable=too-many-arguments self, - user_id: UserID, - project_id: ProjectID, - cluster_id: ClusterID, - tasks: dict[NodeID, Image], - callback: UserCallbackInSepThread, - remote_fct: Optional[RemoteFct] = None, - ) -> list[tuple[NodeID, str]]: - """actually sends the function remote_fct to be remotely executed. if None is kept then the default - function that runs container will be started.""" - + *, + remote_fct: ContainerRemoteFct | None = None, + node_image: Image, + input_data: TaskInputData, + output_data_keys: TaskOutputDataSchema, + log_file_url: AnyUrl, + task_envs: ContainerEnvsDict, + task_labels: ContainerLabelsDict, + task_owner: TaskOwner, + s3_settings: S3Settings | None, + dask_resources: DaskResources, + node_id: NodeID, + job_id: DaskJobID, + callback: _UserCallbackInSepThread, + ) -> PublishedComputationTask: def _comp_sidecar_fct( + *, + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: """This function is serialized by the Dask client and sent over to the Dask sidecar(s) Therefore, (screaming here) DO NOT MOVE THAT IMPORT ANYWHERE ELSE EVER!!""" - from simcore_service_dask_sidecar.tasks import run_computational_sidecar - - return run_computational_sidecar( - docker_auth, - service_key, - service_version, - input_data, - output_data_keys, - log_file_url, - command, - s3_settings, - boot_mode, + from simcore_service_dask_sidecar.worker import ( # type: ignore[import-not-found] # this runs inside the dask-sidecar + run_computational_sidecar, + ) + + return run_computational_sidecar( # type: ignore[no-any-return] # this runs inside the dask-sidecar + task_parameters=task_parameters, + docker_auth=docker_auth, + log_file_url=log_file_url, + s3_settings=s3_settings, ) if remote_fct is None: remote_fct = _comp_sidecar_fct - list_of_node_id_to_job_id: list[tuple[NodeID, str]] = [] + try: + assert self.app.state # nosec + assert self.app.state.settings # nosec + settings: AppSettings = self.app.state.settings + task_future = self.backend.client.submit( + remote_fct, + task_parameters=ContainerTaskParameters( + image=node_image.name, + tag=node_image.tag, + input_data=input_data, + output_data_keys=output_data_keys, + command=node_image.command, + envs=task_envs, + labels=task_labels, + boot_mode=node_image.boot_mode, + task_owner=task_owner, + ), + docker_auth=DockerBasicAuth( + server_address=settings.DIRECTOR_V2_DOCKER_REGISTRY.resolved_registry_url, + username=settings.DIRECTOR_V2_DOCKER_REGISTRY.REGISTRY_USER, + password=settings.DIRECTOR_V2_DOCKER_REGISTRY.REGISTRY_PW, + ), + log_file_url=log_file_url, + s3_settings=s3_settings, + key=job_id, + resources=dask_resources, + retries=0, + pure=False, + ) + # NOTE: the callback is running in a secondary thread, and takes a future as arg + task_future.add_done_callback(lambda _: callback()) + await distributed.Variable(job_id, client=self.backend.client).set( + task_future + ) + + await dask_utils.wrap_client_async_routine( + self.backend.client.publish_dataset(task_future, name=job_id) + ) + + _logger.debug( + "Dask task %s started [%s]", + f"{task_future.key=}", + f"{node_image.command=}", + ) + return PublishedComputationTask(node_id=node_id, job_id=DaskJobID(job_id)) + except Exception: + # Dask raises a base Exception here in case of connection error, this will raise a more precise one + dask_utils.check_scheduler_status(self.backend.client) + # if the connection is good, then the problem is different, so we re-raise + raise + + async def send_computation_tasks( + self, + *, + user_id: UserID, + project_id: ProjectID, + tasks: dict[NodeID, Image], + callback: _UserCallbackInSepThread, + remote_fct: ContainerRemoteFct | None = None, + metadata: RunMetadataDict, + hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, + ) -> list[PublishedComputationTask]: + """actually sends the function remote_fct to be remotely executed. if None is kept then the default + function that runs container will be started. + + Raises: + - ComputationalBackendNoS3AccessError when storage is not accessible + - ComputationalSchedulerChangedError when expected scheduler changed + - ComputationalBackendNotConnectedError when scheduler is not connected/running + - MissingComputationalResourcesError (only for internal cluster) + - InsuficientComputationalResourcesError (only for internal cluster) + - TaskSchedulingError when any other error happens + """ + + list_of_node_id_to_job_id: list[PublishedComputationTask] = [] for node_id, node_image in tasks.items(): job_id = generate_dask_job_id( service_key=node_image.name, @@ -238,29 +296,33 @@ def _comp_sidecar_fct( node_id=node_id, ) assert node_image.node_requirements # nosec - dask_resources = from_node_reqs_to_dask_resources( + dask_resources = dask_utils.from_node_reqs_to_dask_resources( node_image.node_requirements ) + if hardware_info.aws_ec2_instances: + dask_resources[ + create_ec2_resource_constraint_key( + hardware_info.aws_ec2_instances[0] + ) + ] = 1 - check_scheduler_is_still_the_same( + dask_utils.check_scheduler_is_still_the_same( self.backend.scheduler_id, self.backend.client ) - check_communication_with_scheduler_is_open(self.backend.client) - check_scheduler_status(self.backend.client) - # NOTE: in case it's a gateway we do not check a priori if the task + dask_utils.check_communication_with_scheduler_is_open(self.backend.client) + dask_utils.check_scheduler_status(self.backend.client) + # NOTE: in case it is an on-demand cluster + # we do not check a priori if the task # is runnable because we CAN'T. A cluster might auto-scale, the worker(s) - # might also auto-scale and the gateway does not know that a priori. + # might also auto-scale we do not know that a priori. # So, we'll just send the tasks over and see what happens after a while. - # TODO: one idea is to do a lazy checking. A cluster might take a few seconds to run a - # sidecar, which will then populate the scheduler with resources available on the cluster - if not self.backend.gateway: - check_if_cluster_is_able_to_run_pipeline( + if self.cluster_type != ClusterTypeInModel.ON_DEMAND: + dask_utils.check_if_cluster_is_able_to_run_pipeline( project_id=project_id, node_id=node_id, scheduler_info=self.backend.client.scheduler_info(), task_resources=dask_resources, node_image=node_image, - cluster_id=cluster_id, ) s3_settings = None @@ -270,127 +332,151 @@ def _comp_sidecar_fct( user_id ) except HTTPException as err: - raise ComputationalBackendNoS3AccessError() from err - - # This instance is created only once so it can be reused in calls below - node_ports = await create_node_ports( - db_engine=self.app.state.engine, - user_id=user_id, - project_id=project_id, - node_id=node_id, - ) - # NOTE: for download there is no need to go with S3 links - input_data = await compute_input_data( - self.app, - user_id, - project_id, - node_id, - ports=node_ports, - file_link_type=FileLinkType.PRESIGNED, - ) - output_data_keys = await compute_output_data_schema( - self.app, - user_id, - project_id, - node_id, - ports=node_ports, - file_link_type=self.tasks_file_link_type, - ) - log_file_url = await compute_service_log_file_upload_link( - user_id, - project_id, - node_id, - file_link_type=self.tasks_file_link_type, - ) + raise ComputationalBackendNoS3AccessError from err try: - assert self.app.state # nosec - assert self.app.state.settings # nosec - settings: AppSettings = self.app.state.settings - task_future = self.backend.client.submit( - remote_fct, - docker_auth=DockerBasicAuth( - server_address=settings.DIRECTOR_V2_DOCKER_REGISTRY.resolved_registry_url, - username=settings.DIRECTOR_V2_DOCKER_REGISTRY.REGISTRY_USER, - password=settings.DIRECTOR_V2_DOCKER_REGISTRY.REGISTRY_PW, - ), - service_key=node_image.name, - service_version=node_image.tag, - input_data=input_data, - output_data_keys=output_data_keys, - log_file_url=log_file_url, - command=node_image.command, - s3_settings=s3_settings, - boot_mode=node_image.boot_mode, - key=job_id, - resources=dask_resources, - retries=0, + # This instance is created only once so it can be reused in calls below + node_ports = await dask_utils.create_node_ports( + db_engine=get_db_engine(self.app), + user_id=user_id, + project_id=project_id, + node_id=node_id, + ) + # NOTE: for download there is no need to go with S3 links + input_data = await dask_utils.compute_input_data( + project_id=project_id, + node_id=node_id, + node_ports=node_ports, + file_link_type=FileLinkType.PRESIGNED, + ) + output_data_keys = await dask_utils.compute_output_data_schema( + user_id=user_id, + project_id=project_id, + node_id=node_id, + node_ports=node_ports, + file_link_type=self.tasks_file_link_type, + ) + log_file_url = await dask_utils.compute_service_log_file_upload_link( + user_id, + project_id, + node_id, + file_link_type=self.tasks_file_link_type, + ) + task_labels = dask_utils.compute_task_labels( + user_id=user_id, + project_id=project_id, + node_id=node_id, + run_metadata=metadata, + node_requirements=node_image.node_requirements, ) - # NOTE: the callback is running in a secondary thread, and takes a future as arg - task_future.add_done_callback(lambda _: callback()) - - list_of_node_id_to_job_id.append((node_id, job_id)) - await self.backend.client.publish_dataset( - task_future, name=job_id - ) # type: ignore - - logger.debug( - "Dask task %s started [%s]", - f"{task_future.key=}", - f"{node_image.command=}", + task_envs = await dask_utils.compute_task_envs( + self.app, + user_id=user_id, + project_id=project_id, + node_id=node_id, + node_image=node_image, + metadata=metadata, + resource_tracking_run_id=resource_tracking_run_id, + wallet_id=metadata.get("wallet_id"), + ) + task_owner = dask_utils.compute_task_owner( + user_id, project_id, node_id, metadata.get("project_metadata", {}) ) - except Exception: - # Dask raises a base Exception here in case of connection error, this will raise a more precise one - check_scheduler_status(self.backend.client) - # if the connection is good, then the problem is different, so we re-raise - raise + list_of_node_id_to_job_id.append( + await self._publish_in_dask( + remote_fct=remote_fct, + node_image=node_image, + input_data=input_data, + output_data_keys=output_data_keys, + log_file_url=log_file_url, + task_envs=task_envs, + task_labels=task_labels, + task_owner=task_owner, + s3_settings=s3_settings, + dask_resources=dask_resources, + node_id=node_id, + job_id=job_id, + callback=callback, + ) + ) + except (NodeportsException, ValidationError, ClientResponseError) as exc: + raise TaskSchedulingError( + project_id=project_id, node_id=node_id, msg=f"{exc}" + ) from exc + return list_of_node_id_to_job_id - async def get_task_status(self, job_id: str) -> RunningState: - return (await self.get_tasks_status(job_ids=[job_id]))[0] + async def get_tasks_progress( + self, job_ids: list[str] + ) -> list[TaskProgressEvent | None]: + dask_utils.check_scheduler_is_still_the_same( + self.backend.scheduler_id, self.backend.client + ) + dask_utils.check_communication_with_scheduler_is_open(self.backend.client) + dask_utils.check_scheduler_status(self.backend.client) - async def get_tasks_status(self, job_ids: list[str]) -> list[RunningState]: - check_scheduler_is_still_the_same( + async def _get_task_progress(job_id: str) -> TaskProgressEvent | None: + dask_events: tuple[tuple[UnixTimestamp, str], ...] = ( + await self.backend.client.get_events( + TASK_RUNNING_PROGRESS_EVENT.format(key=job_id) + ) + ) + if not dask_events: + return None + # we are interested in the last event + return TaskProgressEvent.model_validate_json(dask_events[-1][1]) + + return await asyncio.gather(*(_get_task_progress(job_id) for job_id in job_ids)) + + async def get_tasks_status(self, job_ids: Iterable[str]) -> list[RunningState]: + dask_utils.check_scheduler_is_still_the_same( self.backend.scheduler_id, self.backend.client ) - check_communication_with_scheduler_is_open(self.backend.client) - check_scheduler_status(self.backend.client) - # try to get the task from the scheduler - task_statuses = await self.backend.client.run_on_scheduler( - lambda dask_scheduler: dask_scheduler.get_task_status(keys=job_ids) - ) # type: ignore - logger.debug("found dask task statuses: %s", f"{task_statuses=}") - - running_states: Deque[RunningState] = deque() - for job_id in job_ids: - dask_status = task_statuses.get(job_id, "lost") - if dask_status == "erred": - # find out if this was a cancellation - exception = await distributed.Future(job_id).exception(timeout=DASK_DEFAULT_TIMEOUT_S) # type: ignore - - if isinstance(exception, TaskCancelledError): - running_states.append(RunningState.ABORTED) - else: + dask_utils.check_communication_with_scheduler_is_open(self.backend.client) + dask_utils.check_scheduler_status(self.backend.client) + + async def _get_task_state(job_id: str) -> RunningState: + dask_events: tuple[tuple[UnixTimestamp, str], ...] = ( + await self.backend.client.get_events( + TASK_LIFE_CYCLE_EVENT.format(key=job_id) + ) + ) + if not dask_events: + return RunningState.UNKNOWN + # we are interested in the last event + parsed_event = TaskLifeCycleState.model_validate(dask_events[-1][1]) + + if parsed_event.state == RunningState.FAILED: + try: + # find out if this was a cancellation + var = distributed.Variable(job_id, client=self.backend.client) + future: distributed.Future = await var.get( + timeout=_DASK_DEFAULT_TIMEOUT_S + ) + exception = await future.exception(timeout=_DASK_DEFAULT_TIMEOUT_S) + assert isinstance(exception, Exception) # nosec + + if isinstance(exception, TaskCancelledError): + return RunningState.ABORTED assert exception # nosec - logger.warning( + _logger.warning( "Task %s completed in error:\n%s\nTrace:\n%s", job_id, exception, - "".join( - traceback.format_exception( - exception.__class__, exception, exception.__traceback__ - ) - ), + "".join(traceback.format_exception(exception)), ) - running_states.append(RunningState.FAILED) - else: - running_states.append( - _DASK_TASK_STATUS_RUNNING_STATE_MAP.get( - dask_status, RunningState.UNKNOWN + return RunningState.FAILED + except TimeoutError: + _logger.warning( + "Task %s could not be retrieved from dask-scheduler, it is lost\n" + "TIP:If the task was unpublished this can happen, or if the dask-scheduler was restarted.", + job_id, ) - ) + return RunningState.UNKNOWN + + return parsed_event.state - return list(running_states) + return await asyncio.gather(*(_get_task_state(job_id) for job_id in job_ids)) async def abort_computation_task(self, job_id: str) -> None: # Dask future may be cancelled, but only a future that was not already taken by @@ -398,45 +484,68 @@ async def abort_computation_task(self, job_id: str) -> None: # If the sidecar has already taken the job, then the cancellation must be user-defined. # therefore the dask PUB is used, and the dask-sidecar will then let the abort # process, and report when it is finished and properly cancelled. - logger.debug("cancelling task with %s", f"{job_id=}") + _logger.debug("cancelling task with %s", f"{job_id=}") try: - task_future: distributed.Future = await self.backend.client.get_dataset(name=job_id) # type: ignore + task_future: distributed.Future = ( + await dask_utils.wrap_client_async_routine( + self.backend.client.get_dataset(name=job_id) + ) + ) # NOTE: It seems there is a bug in the pubsub system in dask # Event are more robust to connections/disconnections cancel_event = await distributed.Event( name=TaskCancelEventName.format(job_id), client=self.backend.client ) - await cancel_event.set() # type: ignore - await task_future.cancel() # type: ignore - logger.debug("Dask task %s cancelled", task_future.key) + await dask_utils.wrap_client_async_routine(cancel_event.set()) + await dask_utils.wrap_client_async_routine(task_future.cancel()) + _logger.debug("Dask task %s cancelled", task_future.key) except KeyError: - logger.warning("Unknown task cannot be aborted: %s", f"{job_id=}") + _logger.warning("Unknown task cannot be aborted: %s", f"{job_id=}") async def get_task_result(self, job_id: str) -> TaskOutputData: - logger.debug("getting result of %s", f"{job_id=}") + _logger.debug("getting result of %s", f"{job_id=}") try: - task_future = await self.backend.client.get_dataset(name=job_id) # type: ignore - return await task_future.result(timeout=DASK_DEFAULT_TIMEOUT_S) # type: ignore + task_future: distributed.Future = ( + await dask_utils.wrap_client_async_routine( + self.backend.client.get_dataset(name=job_id) + ) + ) + return cast( + TaskOutputData, + await task_future.result(timeout=_DASK_DEFAULT_TIMEOUT_S), + ) except KeyError as exc: raise ComputationalBackendTaskNotFoundError(job_id=job_id) from exc except distributed.TimeoutError as exc: - raise ComputationalBackendTaskResultsNotReadyError from exc + raise ComputationalBackendTaskResultsNotReadyError(job_id=job_id) from exc async def release_task_result(self, job_id: str) -> None: - logger.debug("releasing results for %s", f"{job_id=}") + _logger.debug("releasing results for %s", f"{job_id=}") try: + # NOTE: The distributed Variable holds the future of the tasks in the dask-scheduler + # Alas, deleting the variable is done asynchronously and there is no way to ensure + # the variable was effectively deleted. + # This is annoying as one can re-create the variable without error. + var = distributed.Variable(job_id, client=self.backend.client) + await asyncio.get_event_loop().run_in_executor(None, var.delete) # first check if the key exists - await self.backend.client.get_dataset(name=job_id) # type: ignore - await self.backend.client.unpublish_dataset(name=job_id) # type: ignore + await dask_utils.wrap_client_async_routine( + self.backend.client.get_dataset(name=job_id) + ) + + await dask_utils.wrap_client_async_routine( + self.backend.client.unpublish_dataset(name=job_id) + ) + except KeyError: - logger.warning("Unknown task cannot be unpublished: %s", f"{job_id=}") + _logger.warning("Unknown task cannot be unpublished: %s", f"{job_id=}") async def get_cluster_details(self) -> ClusterDetails: - check_scheduler_is_still_the_same( + dask_utils.check_scheduler_is_still_the_same( self.backend.scheduler_id, self.backend.client ) - check_communication_with_scheduler_is_open(self.backend.client) - check_scheduler_status(self.backend.client) + dask_utils.check_communication_with_scheduler_is_open(self.backend.client) + dask_utils.check_scheduler_status(self.backend.client) scheduler_info = self.backend.client.scheduler_info() scheduler_status = self.backend.client.status dashboard_link = self.backend.client.dashboard_link @@ -445,22 +554,35 @@ def _get_worker_used_resources( dask_scheduler: distributed.Scheduler, ) -> dict[str, dict]: used_resources = {} - for worker_name in dask_scheduler.workers: - worker = dask_scheduler.workers[worker_name] - used_resources[worker_name] = worker.used_resources + for worker_name, worker_state in dask_scheduler.workers.items(): + used_resources[worker_name] = worker_state.used_resources return used_resources - used_resources_per_worker: dict[ - str, dict[str, Any] - ] = await self.backend.client.run_on_scheduler( - _get_worker_used_resources - ) # type: ignore + with log_catch(_logger, reraise=False): + # NOTE: this runs directly on the dask-scheduler and may rise exceptions + used_resources_per_worker: dict[str, dict[str, Any]] = ( + await dask_utils.wrap_client_async_routine( + self.backend.client.run_on_scheduler(_get_worker_used_resources) + ) + ) - for k, v in used_resources_per_worker.items(): - scheduler_info.get("workers", {}).get(k, {}).update(used_resources=v) + # let's update the scheduler info, with default to 0s since sometimes + # workers are destroyed/created without us knowing right away + for worker_name, worker_info in scheduler_info.get("workers", {}).items(): + used_resources: dict[str, float] = deepcopy( + worker_info.get("resources", {}) + ) + # reset default values + for res_name in used_resources: + used_resources[res_name] = 0 + # if the scheduler has info, let's override them + used_resources = used_resources_per_worker.get( + worker_name, used_resources + ) + worker_info.update(used_resources=used_resources) assert dashboard_link # nosec return ClusterDetails( scheduler=Scheduler(status=scheduler_status, **scheduler_info), - dashboard_link=parse_obj_as(AnyUrl, dashboard_link), + dashboard_link=TypeAdapter(AnyUrl).validate_python(dashboard_link), ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dask_clients_pool.py b/services/director-v2/src/simcore_service_director_v2/modules/dask_clients_pool.py index 56c717b8a89..31177b5a616 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dask_clients_pool.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dask_clients_pool.py @@ -1,11 +1,13 @@ import asyncio import logging +from collections.abc import AsyncIterator from contextlib import asynccontextmanager from dataclasses import dataclass, field -from typing import AsyncIterator, Optional +from typing import TypeAlias from fastapi import FastAPI -from models_library.clusters import Cluster, ClusterID +from models_library.clusters import BaseCluster, ClusterTypeInModel +from pydantic import AnyUrl from ..core.errors import ( ComputationalBackendNotConnectedError, @@ -14,18 +16,22 @@ DaskClientAcquisisitonError, ) from ..core.settings import ComputationalBackendSettings -from .dask_client import DaskClient, TaskHandlers +from ..utils.dask_client_utils import TaskHandlers +from .dask_client import DaskClient logger = logging.getLogger(__name__) +_ClusterUrl: TypeAlias = AnyUrl + + @dataclass class DaskClientsPool: app: FastAPI settings: ComputationalBackendSettings _client_acquisition_lock: asyncio.Lock = field(init=False) - _cluster_to_client_map: dict[ClusterID, DaskClient] = field(default_factory=dict) - _task_handlers: Optional[TaskHandlers] = None + _cluster_to_client_map: dict[_ClusterUrl, DaskClient] = field(default_factory=dict) + _task_handlers: TaskHandlers | None = None def __post_init__(self): # NOTE: to ensure the correct loop is used @@ -43,10 +49,10 @@ async def create( @staticmethod def instance(app: FastAPI) -> "DaskClientsPool": if not hasattr(app.state, "dask_clients_pool"): - raise ConfigurationError( - "Dask clients pool is not available. Please check the configuration." - ) - return app.state.dask_clients_pool + msg = "Dask clients pool is not available. Please check the configuration." + raise ConfigurationError(msg=msg) + dask_clients_pool: DaskClientsPool = app.state.dask_clients_pool + return dask_clients_pool async def delete(self) -> None: await asyncio.gather( @@ -55,15 +61,16 @@ async def delete(self) -> None: ) @asynccontextmanager - async def acquire(self, cluster: Cluster) -> AsyncIterator[DaskClient]: + async def acquire(self, cluster: BaseCluster) -> AsyncIterator[DaskClient]: async def _concurently_safe_acquire_client() -> DaskClient: async with self._client_acquisition_lock: - assert isinstance(cluster.id, int) # nosec - dask_client = self._cluster_to_client_map.get(cluster.id) + dask_client = self._cluster_to_client_map.get(cluster.endpoint) # we create a new client if that cluster was never used before logger.debug( - "acquiring connection to cluster %s:%s", cluster.id, cluster.name + "acquiring connection to cluster %s:%s", + cluster.endpoint, + cluster.name, ) if not dask_client: tasks_file_link_type = ( @@ -73,14 +80,19 @@ async def _concurently_safe_acquire_client() -> DaskClient: tasks_file_link_type = ( self.settings.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE ) + if cluster.type == ClusterTypeInModel.ON_DEMAND.value: + tasks_file_link_type = ( + self.settings.COMPUTATIONAL_BACKEND_ON_DEMAND_CLUSTERS_FILE_LINK_TYPE + ) self._cluster_to_client_map[ - cluster.id + cluster.endpoint ] = dask_client = await DaskClient.create( app=self.app, settings=self.settings, endpoint=cluster.endpoint, authentication=cluster.authentication, tasks_file_link_type=tasks_file_link_type, + cluster_type=cluster.type, ) if self._task_handlers: dask_client.register_handlers(self._task_handlers) @@ -106,7 +118,7 @@ async def _concurently_safe_acquire_client() -> DaskClient: ComputationalSchedulerChangedError, ): # cleanup and re-raise - if dask_client := self._cluster_to_client_map.pop(cluster.id, None): + if dask_client := self._cluster_to_client_map.pop(cluster.endpoint, None): # type: ignore[arg-type] # https://github.com/python/mypy/issues/10152 await dask_client.delete() raise diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py index 09b6cbc7c58..34a955cdfc2 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/__init__.py @@ -1,13 +1,13 @@ from fastapi import FastAPI +from servicelib.fastapi.db_asyncpg_engine import ( + close_db_connection, + connect_to_db, +) +from servicelib.fastapi.db_asyncpg_engine import get_engine as get_db_engine +from settings_library.postgres import PostgresSettings -from ...core.settings import PostgresSettings -from .events import close_db_connection, connect_to_db - - -def setup(app: FastAPI, settings: PostgresSettings): - if not settings: - settings = PostgresSettings() +def setup(app: FastAPI, settings: PostgresSettings) -> None: async def on_startup() -> None: await connect_to_db(app, settings) @@ -16,3 +16,9 @@ async def on_shutdown() -> None: app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) + + +__all__: tuple[str, ...] = ( + "get_db_engine", + "setup", +) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/events.py b/services/director-v2/src/simcore_service_director_v2/modules/db/events.py deleted file mode 100644 index f6d7632b0ae..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/events.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging -from typing import Any - -import orjson -from aiopg.sa import Engine, create_engine -from aiopg.sa.engine import get_dialect -from fastapi import FastAPI -from servicelib.retry_policies import PostgresRetryPolicyUponInitialization -from settings_library.postgres import PostgresSettings -from simcore_postgres_database.utils_aiopg import ( - close_engine, - get_pg_engine_info, - raise_if_migration_not_ready, -) -from tenacity import retry - -logger = logging.getLogger(__name__) - - -def json_serializer(o: Any) -> str: - return str(orjson.dumps(o), "utf-8") - - -@retry(**PostgresRetryPolicyUponInitialization(logger).kwargs) -async def connect_to_db(app: FastAPI, settings: PostgresSettings) -> None: - """ - Creates an engine to communicate to the db and retries until - the db is ready - """ - logger.debug("Connecting db ...") - engine: Engine = await create_engine( - str(settings.dsn), - application_name=f"{__name__}_{id(app)}", # unique identifier per app - minsize=settings.POSTGRES_MINSIZE, - maxsize=settings.POSTGRES_MAXSIZE, - dialect=get_dialect(json_serializer=json_serializer), - ) - logger.debug("Connected to %s", engine.dsn) - - logger.debug("Checking db migrationn ...") - try: - await raise_if_migration_not_ready(engine) - except Exception: - # NOTE: engine must be closed because retry will create a new engine - await close_engine(engine) - raise - - logger.debug("Migration up-to-date") - - app.state.engine = engine - logger.debug("Setup engine: %s", get_pg_engine_info(engine)) - - -async def close_db_connection(app: FastAPI) -> None: - logger.debug("Disconnecting db ...") - - if engine := app.state.engine: - await close_engine(engine) - - logger.debug("Disconnected from %s", engine.dsn) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/__init__.py index a5eeffe1ff5..93da4003de3 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/__init__.py @@ -1 +1,3 @@ from ._base import BaseRepository + +__all__: tuple[str, ...] = ("BaseRepository",) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/_base.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/_base.py index 1f7cc57a069..7f0530abaef 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/_base.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/_base.py @@ -1,6 +1,9 @@ from dataclasses import dataclass +from typing import TypeVar -from aiopg.sa import Engine +from sqlalchemy.ext.asyncio import AsyncEngine + +RepositoryType = TypeVar("RepositoryType", bound="BaseRepository") @dataclass @@ -9,4 +12,8 @@ class BaseRepository: Repositories are pulled at every request """ - db_engine: Engine = None + db_engine: AsyncEngine + + @classmethod + def instance(cls: type[RepositoryType], db_engine: AsyncEngine) -> RepositoryType: + return cls(db_engine=db_engine) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py deleted file mode 100644 index 9a09a8f0577..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/clusters.py +++ /dev/null @@ -1,284 +0,0 @@ -import logging -from typing import Dict, Iterable, List, Optional - -import psycopg2 -import sqlalchemy as sa -from aiopg.sa import connection -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_NO_RIGHTS, - CLUSTER_USER_RIGHTS, - Cluster, - ClusterAccessRights, - ClusterID, -) -from models_library.users import UserID -from pydantic.types import PositiveInt -from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups -from simcore_postgres_database.models.clusters import clusters -from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups -from simcore_postgres_database.models.users import users -from sqlalchemy.dialects.postgresql import insert as pg_insert - -from ....core.errors import ( - ClusterAccessForbiddenError, - ClusterInvalidOperationError, - ClusterNotFoundError, -) -from ....models.schemas.clusters import ClusterCreate, ClusterPatch -from ....utils.db import to_clusters_db -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - - -async def _clusters_from_cluster_ids( - conn: connection.SAConnection, - cluster_ids: Iterable[PositiveInt], - offset: int = 0, - limit: Optional[int] = None, -) -> List[Cluster]: - cluster_id_to_cluster: Dict[PositiveInt, Cluster] = {} - async for row in conn.execute( - sa.select( - [ - clusters, - cluster_to_groups.c.gid, - cluster_to_groups.c.read, - cluster_to_groups.c.write, - cluster_to_groups.c.delete, - ] - ) - .select_from( - clusters.join( - cluster_to_groups, - clusters.c.id == cluster_to_groups.c.cluster_id, - ) - ) - .where(clusters.c.id.in_(cluster_ids)) - .offset(offset) - .limit(limit) - ): - cluster_access_rights = { - row[cluster_to_groups.c.gid]: ClusterAccessRights( - **{ - "read": row[cluster_to_groups.c.read], - "write": row[cluster_to_groups.c.write], - "delete": row[cluster_to_groups.c.delete], - } - ) - } - - cluster_id = row[clusters.c.id] - if cluster_id not in cluster_id_to_cluster: - cluster_id_to_cluster[cluster_id] = Cluster( - id=cluster_id, - name=row[clusters.c.name], - description=row[clusters.c.description], - type=row[clusters.c.type], - owner=row[clusters.c.owner], - endpoint=row[clusters.c.endpoint], - authentication=row[clusters.c.authentication], - thumbnail=row[clusters.c.thumbnail], - access_rights=cluster_access_rights, - ) - else: - cluster_id_to_cluster[cluster_id].access_rights.update( - cluster_access_rights - ) - - return list(cluster_id_to_cluster.values()) - - -async def _compute_user_access_rights( - conn: connection.SAConnection, user_id: UserID, cluster: Cluster -) -> ClusterAccessRights: - result = await conn.execute( - sa.select([user_to_groups.c.gid, groups.c.type]) - .where(user_to_groups.c.uid == user_id) - .order_by(groups.c.type) - .join(groups) - ) - user_groups = await result.fetchall() - - # get the primary group first, as it has precedence - if primary_group_row := next( - filter(lambda ugrp: ugrp[1] == GroupType.PRIMARY, user_groups), None - ): - if primary_grp_rights := cluster.access_rights.get(primary_group_row.gid): - return primary_grp_rights - - solved_rights = CLUSTER_NO_RIGHTS.dict() - for group_row in filter(lambda ugrp: ugrp[1] != GroupType.PRIMARY, user_groups): - grp_access = cluster.access_rights.get(group_row.gid, CLUSTER_NO_RIGHTS).dict() - for operation in ["read", "write", "delete"]: - solved_rights[operation] |= grp_access[operation] - return ClusterAccessRights(**solved_rights) - - -class ClustersRepository(BaseRepository): - async def create_cluster(self, user_id, new_cluster: ClusterCreate) -> Cluster: - async with self.db_engine.acquire() as conn: - user_primary_gid = await conn.scalar( - sa.select([users.c.primary_gid]).where(users.c.id == user_id) - ) - new_cluster.owner = user_primary_gid - new_cluster_id = await conn.scalar( - sa.insert( - clusters, values=to_clusters_db(new_cluster, only_update=False) - ).returning(clusters.c.id) - ) - assert new_cluster_id # nosec - return await self.get_cluster(user_id, new_cluster_id) - - async def list_clusters(self, user_id: UserID) -> List[Cluster]: - async with self.db_engine.acquire() as conn: - result = await conn.execute( - sa.select([clusters.c.id], distinct=True) - .where( - cluster_to_groups.c.gid.in_( - # get the groups of the user where he/she has read access - sa.select([groups.c.gid]) - .where((user_to_groups.c.uid == user_id)) - .order_by(groups.c.gid) - .select_from(groups.join(user_to_groups)) - ) - & cluster_to_groups.c.read - ) - .join(cluster_to_groups) - ) - cluster_ids = await result.fetchall() - return await _clusters_from_cluster_ids(conn, {c.id for c in cluster_ids}) - - async def get_cluster(self, user_id: UserID, cluster_id: ClusterID) -> Cluster: - async with self.db_engine.acquire() as conn: - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{access_rights=}", - ) - if not access_rights.read: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - return the_cluster - - async def update_cluster( # pylint: disable=too-many-branches - self, user_id: UserID, cluster_id: ClusterID, updated_cluster: ClusterPatch - ) -> Cluster: - async with self.db_engine.acquire() as conn: - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if len(clusters_list) != 1: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - this_user_access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{this_user_access_rights=}", - ) - - if not this_user_access_rights.write: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - if updated_cluster.owner and updated_cluster.owner != the_cluster.owner: - # if the user wants to change the owner, we need more rights here - if this_user_access_rights != CLUSTER_ADMIN_RIGHTS: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - # ensure the new owner has admin rights, too - if not updated_cluster.access_rights: - updated_cluster.access_rights = { - updated_cluster.owner: CLUSTER_ADMIN_RIGHTS - } - else: - updated_cluster.access_rights[ - updated_cluster.owner - ] = CLUSTER_ADMIN_RIGHTS - - # resolve access rights changes - resolved_access_rights = the_cluster.access_rights - if updated_cluster.access_rights: - # if the user is a manager he/she may ONLY add/remove users - if this_user_access_rights == CLUSTER_MANAGER_RIGHTS: - for grp, rights in updated_cluster.access_rights.items(): - if grp == the_cluster.owner or rights not in [ - CLUSTER_USER_RIGHTS, - CLUSTER_NO_RIGHTS, - ]: - # a manager cannot change the owner abilities or create - # managers/admins - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - resolved_access_rights.update(updated_cluster.access_rights) - # ensure the user is not trying to mess around owner admin rights - if ( - resolved_access_rights.setdefault( - the_cluster.owner, CLUSTER_ADMIN_RIGHTS - ) - != CLUSTER_ADMIN_RIGHTS - ): - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - - # ok we can update now - try: - await conn.execute( - sa.update(clusters) - .where(clusters.c.id == the_cluster.id) - .values(to_clusters_db(updated_cluster, only_update=True)) - ) - except psycopg2.DatabaseError as e: - raise ClusterInvalidOperationError(cluster_id=cluster_id) from e - # upsert the rights - if updated_cluster.access_rights: - for grp, rights in resolved_access_rights.items(): - insert_stmt = pg_insert(cluster_to_groups).values( - **rights.dict(by_alias=True), gid=grp, cluster_id=the_cluster.id - ) - on_update_stmt = insert_stmt.on_conflict_do_update( - index_elements=[ - cluster_to_groups.c.cluster_id, - cluster_to_groups.c.gid, - ], - set_=rights.dict(by_alias=True), - ) - await conn.execute(on_update_stmt) - - clusters_list: List[Cluster] = await _clusters_from_cluster_ids( - conn, {cluster_id} - ) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - return the_cluster - - async def delete_cluster(self, user_id: UserID, cluster_id: ClusterID) -> None: - async with self.db_engine.acquire() as conn: - clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) - if not clusters_list: - raise ClusterNotFoundError(cluster_id=cluster_id) - the_cluster = clusters_list[0] - - access_rights = await _compute_user_access_rights( - conn, user_id, the_cluster - ) - logger.debug( - "found cluster in DB: %s, with computed %s", - f"{the_cluster=}", - f"{access_rights=}", - ) - if not access_rights.delete: - raise ClusterAccessForbiddenError(cluster_id=cluster_id) - await conn.execute(sa.delete(clusters).where(clusters.c.id == cluster_id)) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_pipelines.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_pipelines.py index d651b1296ab..b01886564c9 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_pipelines.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_pipelines.py @@ -1,15 +1,13 @@ import logging -from typing import Optional import networkx as nx import sqlalchemy as sa -from aiopg.sa.result import RowProxy from models_library.projects import ProjectID from models_library.projects_state import RunningState from sqlalchemy.dialects.postgresql import insert from ....core.errors import PipelineNotFoundError -from ....models.domains.comp_pipelines import CompPipelineAtDB +from ....models.comp_pipelines import CompPipelineAtDB from ..tables import comp_pipeline from ._base import BaseRepository @@ -18,16 +16,16 @@ class CompPipelinesRepository(BaseRepository): async def get_pipeline(self, project_id: ProjectID) -> CompPipelineAtDB: - async with self.db_engine.acquire() as conn: + async with self.db_engine.connect() as conn: result = await conn.execute( - sa.select([comp_pipeline]).where( + sa.select(comp_pipeline).where( comp_pipeline.c.project_id == str(project_id) ) ) - row: Optional[RowProxy] = await result.fetchone() + row = result.one_or_none() if not row: - raise PipelineNotFoundError(str(project_id)) - return CompPipelineAtDB.from_orm(row) + raise PipelineNotFoundError(pipeline_id=project_id) + return CompPipelineAtDB.model_validate(row) async def upsert_pipeline( self, @@ -35,28 +33,32 @@ async def upsert_pipeline( dag_graph: nx.DiGraph, publish: bool, ) -> None: - pipeline_at_db = CompPipelineAtDB( project_id=project_id, dag_adjacency_list=nx.to_dict_of_lists(dag_graph), state=RunningState.PUBLISHED if publish else RunningState.NOT_STARTED, ) - insert_stmt = insert(comp_pipeline).values(**pipeline_at_db.dict(by_alias=True)) + insert_stmt = insert(comp_pipeline).values( + **pipeline_at_db.model_dump(mode="json", by_alias=True) + ) # FIXME: This is not a nice thing. this part of the information should be kept in comp_runs. update_exclusion_policy = set() if not dag_graph.nodes(): update_exclusion_policy.add("dag_adjacency_list") on_update_stmt = insert_stmt.on_conflict_do_update( index_elements=[comp_pipeline.c.project_id], - set_=pipeline_at_db.dict( - by_alias=True, exclude_unset=True, exclude=update_exclusion_policy + set_=pipeline_at_db.model_dump( + mode="json", + by_alias=True, + exclude_unset=True, + exclude=update_exclusion_policy, ), ) - async with self.db_engine.acquire() as conn: + async with self.db_engine.begin() as conn: await conn.execute(on_update_stmt) async def delete_pipeline(self, project_id: ProjectID) -> None: - async with self.db_engine.acquire() as conn: + async with self.db_engine.begin() as conn: await conn.execute( sa.delete(comp_pipeline).where( comp_pipeline.c.project_id == str(project_id) @@ -66,7 +68,7 @@ async def delete_pipeline(self, project_id: ProjectID) -> None: async def mark_pipeline_state( self, project_id: ProjectID, state: RunningState ) -> None: - async with self.db_engine.acquire() as conn: + async with self.db_engine.begin() as conn: await conn.execute( sa.update(comp_pipeline) .where(comp_pipeline.c.project_id == str(project_id)) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py index 82b5e1840a0..75e1639ad84 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_runs.py @@ -1,43 +1,110 @@ +import datetime import logging -from collections import deque -from datetime import datetime -from typing import List, Optional, Set +from typing import Any, Final, cast +import arrow +import asyncpg # type: ignore[import-untyped] import sqlalchemy as sa -from aiopg.sa.result import RowProxy -from models_library.clusters import DEFAULT_CLUSTER_ID, ClusterID +import sqlalchemy.exc as sql_exc +from models_library.api_schemas_directorv2.comp_runs import ComputationRunRpcGet +from models_library.basic_types import IDStr from models_library.projects import ProjectID from models_library.projects_state import RunningState +from models_library.rest_ordering import OrderBy, OrderDirection from models_library.users import UserID +from models_library.utils.fastapi_encoders import jsonable_encoder from pydantic import PositiveInt +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.dialects.postgresql.asyncpg import AsyncAdapt_asyncpg_dbapi +from sqlalchemy.ext.asyncio import AsyncConnection from sqlalchemy.sql import or_ from sqlalchemy.sql.elements import literal_column from sqlalchemy.sql.expression import desc -from ....core.errors import ComputationalRunNotFoundError -from ....models.domains.comp_runs import CompRunsAtDB -from ....utils.db import RUNNING_STATE_TO_DB +from ....core.errors import ( + ComputationalRunNotFoundError, + DirectorError, + ProjectNotFoundError, + UserNotFoundError, +) +from ....models.comp_runs import CompRunsAtDB, RunMetadataDict +from ....utils.db import DB_TO_RUNNING_STATE, RUNNING_STATE_TO_DB from ..tables import comp_runs from ._base import BaseRepository logger = logging.getLogger(__name__) +_POSTGRES_FK_COLUMN_TO_ERROR_MAP: Final[ + dict[sa.Column, tuple[type[DirectorError], tuple[str, ...]]] +] = { + comp_runs.c.user_id: (UserNotFoundError, ("users", "user_id")), + comp_runs.c.project_uuid: ( + ProjectNotFoundError, + ("projects", "project_id"), + ), +} + + +async def _get_next_iteration( + conn: AsyncConnection, user_id: UserID, project_id: ProjectID +) -> PositiveInt: + """Calculate the next iteration number for a project""" + last_iteration = await conn.scalar( + sa.select(comp_runs.c.iteration) + .where( + (comp_runs.c.user_id == user_id) + & (comp_runs.c.project_uuid == f"{project_id}") + ) + .order_by(desc(comp_runs.c.iteration)) + ) + return cast(PositiveInt, (last_iteration or 0) + 1) + + +def _handle_foreign_key_violation( + exc: sql_exc.IntegrityError, **error_keys: Any +) -> None: + """Handle foreign key violation errors and raise appropriate exceptions""" + if not isinstance(exc.orig, AsyncAdapt_asyncpg_dbapi.IntegrityError): + return + + if ( + not hasattr(exc.orig, "pgcode") + or exc.orig.pgcode != asyncpg.ForeignKeyViolationError.sqlstate + ): + return + + if not isinstance( + exc.orig.__cause__, asyncpg.ForeignKeyViolationError + ) or not hasattr(exc.orig.__cause__, "constraint_name"): + return + + constraint_name = exc.orig.__cause__.constraint_name + + for foreign_key in comp_runs.foreign_keys: + if constraint_name == foreign_key.name and foreign_key.parent is not None: + exc_type, exc_keys = _POSTGRES_FK_COLUMN_TO_ERROR_MAP[foreign_key.parent] + raise exc_type(**{k: error_keys.get(k) for k in exc_keys}) + class CompRunsRepository(BaseRepository): async def get( self, user_id: UserID, project_id: ProjectID, - iteration: Optional[PositiveInt] = None, + iteration: PositiveInt | None = None, ) -> CompRunsAtDB: """returns the run defined by user_id, project_id and iteration In case iteration is None then returns the last iteration :raises ComputationalRunNotFoundError: no entry found """ - async with self.db_engine.acquire() as conn: + + async with pass_or_acquire_connection(self.db_engine) as conn: result = await conn.execute( - sa.select([comp_runs]) + sa.select(comp_runs) .where( (comp_runs.c.user_id == user_id) & (comp_runs.c.project_uuid == f"{project_id}") @@ -46,97 +113,353 @@ async def get( .order_by(desc(comp_runs.c.iteration)) .limit(1) ) - row: Optional[RowProxy] = await result.first() + row = result.one_or_none() if not row: - raise ComputationalRunNotFoundError() - return CompRunsAtDB.from_orm(row) - - async def list( - self, filter_by_state: Optional[Set[RunningState]] = None - ) -> List[CompRunsAtDB]: - if not filter_by_state: - filter_by_state = set() - runs_in_db = deque() - async with self.db_engine.acquire() as conn: - async for row in conn.execute( - sa.select([comp_runs]).where( - or_( - *[ - comp_runs.c.result == RUNNING_STATE_TO_DB[s] - for s in filter_by_state - ] + raise ComputationalRunNotFoundError + return CompRunsAtDB.model_validate(row) + + async def list_( + self, + *, + filter_by_state: set[RunningState] | None = None, + never_scheduled: bool = False, + processed_since: datetime.timedelta | None = None, + scheduled_since: datetime.timedelta | None = None, + ) -> list[CompRunsAtDB]: + """lists the computational runs: + filter_by_state AND (never_scheduled OR processed_since OR scheduled_since) + + + Keyword Arguments: + filter_by_state -- will return only the runs with result in filter_by_state (default: {None}) + never_scheduled -- will return the runs which were never scheduled (default: {False}) + processed_since -- will return the runs which were processed since X, which are not re-scheduled since then (default: {None}) + scheduled_since -- will return the runs which were scheduled since X, which are not processed since then (default: {None}) + """ + + conditions = [] + if filter_by_state: + conditions.append( + or_( + *[ + comp_runs.c.result == RUNNING_STATE_TO_DB[s] + for s in filter_by_state + ] + ) + ) + + scheduling_or_conditions = [] + if never_scheduled: + scheduling_or_conditions.append(comp_runs.c.scheduled.is_(None)) + if scheduled_since is not None: + # a scheduled run is a run that has been scheduled but not processed yet + # e.g. the processing timepoint is either null or before the scheduling timepoint + scheduled_cutoff = arrow.utcnow().datetime - scheduled_since + scheduling_filter = ( + comp_runs.c.scheduled.is_not(None) + & ( + comp_runs.c.processed.is_(None) + | (comp_runs.c.scheduled > comp_runs.c.processed) + ) + & (comp_runs.c.scheduled <= scheduled_cutoff) + ) + scheduling_or_conditions.append(scheduling_filter) + + if processed_since is not None: + # a processed run is a run that has been scheduled and processed + # and the processing timepoint is after the scheduling timepoint + processed_cutoff = arrow.utcnow().datetime - processed_since + processed_filter = ( + comp_runs.c.processed.is_not(None) + & (comp_runs.c.processed > comp_runs.c.scheduled) + & (comp_runs.c.processed <= processed_cutoff) + ) + + scheduling_or_conditions.append(processed_filter) + + if scheduling_or_conditions: + conditions.append(sa.or_(*scheduling_or_conditions)) + + async with self.db_engine.connect() as conn: + return [ + CompRunsAtDB.model_validate(row) + async for row in await conn.stream( + sa.select(comp_runs).where( + sa.and_(True, *conditions) # noqa: FBT003 ) ) - ): - runs_in_db.append(CompRunsAtDB.from_orm(row)) - return list(runs_in_db) + ] + + _COMPUTATION_RUNS_RPC_GET_COLUMNS = [ # noqa: RUF012 + comp_runs.c.project_uuid, + comp_runs.c.iteration, + comp_runs.c.result.label("state"), + comp_runs.c.metadata.label("info"), + comp_runs.c.created.label("submitted_at"), + comp_runs.c.started.label("started_at"), + comp_runs.c.ended.label("ended_at"), + ] + + async def list_for_user__only_latest_iterations( + self, + *, + product_name: str, + user_id: UserID, + # filters + filter_only_running: bool, + # pagination + offset: int, + limit: int, + # ordering + order_by: OrderBy | None = None, + ) -> tuple[int, list[ComputationRunRpcGet]]: + # NOTE: Currently, we list only pipelines created by the user themselves. + # If we want to list all pipelines that the user has read access to + # via project access rights, we need to join the `projects_to_groups` + # and `workspaces_access_rights` tables (which will make it slower, but we do + # the same for listing projects). + if order_by is None: + order_by = OrderBy(field=IDStr("run_id")) # default ordering + + _latest_runs = ( + sa.select( + comp_runs.c.project_uuid, + sa.func.max(comp_runs.c.iteration).label( + "latest_iteration" + ), # <-- NOTE: We might create a boolean column with latest iteration for fast retrieval + ) + .where( + (comp_runs.c.user_id == user_id) + & ( + comp_runs.c.metadata["product_name"].astext == product_name + ) # <-- NOTE: We might create a separate column for this for fast retrieval + ) + .group_by(comp_runs.c.project_uuid) + ) + if filter_only_running: + _latest_runs = _latest_runs.where( + comp_runs.c.result.in_( + [ + RUNNING_STATE_TO_DB[item] + for item in RunningState.list_running_states() + ] + ) + ) + _latest_runs_subquery = _latest_runs.subquery().alias("latest_runs") + + base_select_query = sa.select( + *self._COMPUTATION_RUNS_RPC_GET_COLUMNS + ).select_from( + _latest_runs_subquery.join( + comp_runs, + sa.and_( + comp_runs.c.project_uuid == _latest_runs_subquery.c.project_uuid, + comp_runs.c.iteration == _latest_runs_subquery.c.latest_iteration, + ), + ) + ) + + # Select total count from base_query + count_query = sa.select(sa.func.count()).select_from( + base_select_query.subquery() + ) + + # Ordering and pagination + if order_by.direction == OrderDirection.ASC: + list_query = base_select_query.order_by( + sa.asc(getattr(comp_runs.c, order_by.field)), comp_runs.c.run_id + ) + else: + list_query = base_select_query.order_by( + desc(getattr(comp_runs.c, order_by.field)), comp_runs.c.run_id + ) + list_query = list_query.offset(offset).limit(limit) + + async with pass_or_acquire_connection(self.db_engine) as conn: + total_count = await conn.scalar(count_query) + + items = [ + ComputationRunRpcGet.model_validate( + { + **row, + "state": DB_TO_RUNNING_STATE[row["state"]], + } + ) + async for row in await conn.stream(list_query) + ] + + return cast(int, total_count), items + + async def list_for_user_and_project_all_iterations( + self, + *, + product_name: str, + user_id: UserID, + project_ids: list[ProjectID], + # pagination + offset: int, + limit: int, + # ordering + order_by: OrderBy | None = None, + ) -> tuple[int, list[ComputationRunRpcGet]]: + if order_by is None: + order_by = OrderBy(field=IDStr("run_id")) # default ordering + + base_select_query = sa.select( + *self._COMPUTATION_RUNS_RPC_GET_COLUMNS, + ).where( + (comp_runs.c.user_id == user_id) + & ( + comp_runs.c.project_uuid.in_( + [f"{project_id}" for project_id in project_ids] + ) + ) + & ( + comp_runs.c.metadata["product_name"].astext == product_name + ) # <-- NOTE: We might create a separate column for this for fast retrieval + ) + + # Select total count from base_query + count_query = sa.select(sa.func.count()).select_from( + base_select_query.subquery() + ) + + # Ordering and pagination + if order_by.direction == OrderDirection.ASC: + list_query = base_select_query.order_by( + sa.asc(getattr(comp_runs.c, order_by.field)), comp_runs.c.run_id + ) + else: + list_query = base_select_query.order_by( + desc(getattr(comp_runs.c, order_by.field)), comp_runs.c.run_id + ) + list_query = list_query.offset(offset).limit(limit) + + async with pass_or_acquire_connection(self.db_engine) as conn: + total_count = await conn.scalar(count_query) + + items = [ + ComputationRunRpcGet.model_validate( + { + **row, + "state": DB_TO_RUNNING_STATE[row["state"]], + } + ) + async for row in await conn.stream(list_query) + ] + + return cast(int, total_count), items async def create( self, + *, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, - iteration: Optional[PositiveInt] = None, + iteration: PositiveInt | None = None, + metadata: RunMetadataDict, + use_on_demand_clusters: bool, ) -> CompRunsAtDB: - async with self.db_engine.acquire() as conn: - if iteration is None: - # let's get the latest if it exists - last_iteration = await conn.scalar( - sa.select([comp_runs.c.iteration]) - .where( - (comp_runs.c.user_id == user_id) - & (comp_runs.c.project_uuid == str(project_id)) - ) - .order_by(desc(comp_runs.c.iteration)) - ) - iteration = (last_iteration or 0) + 1 + try: + async with transaction_context(self.db_engine) as conn: + if iteration is None: + iteration = await _get_next_iteration(conn, user_id, project_id) - result = await conn.execute( - comp_runs.insert() # pylint: disable=no-value-for-parameter - .values( - user_id=user_id, - project_uuid=f"{project_id}", - cluster_id=cluster_id if cluster_id != DEFAULT_CLUSTER_ID else None, - iteration=iteration, - result=RUNNING_STATE_TO_DB[RunningState.PUBLISHED], - started=datetime.utcnow(), + result = await conn.execute( + comp_runs.insert() # pylint: disable=no-value-for-parameter + .values( + user_id=user_id, + project_uuid=f"{project_id}", + iteration=iteration, + result=RUNNING_STATE_TO_DB[RunningState.PUBLISHED], + metadata=jsonable_encoder(metadata), + use_on_demand_clusters=use_on_demand_clusters, + ) + .returning(literal_column("*")) ) - .returning(literal_column("*")) - ) - row = await result.first() - return CompRunsAtDB.from_orm(row) + row = result.one() + return CompRunsAtDB.model_validate(row) + except sql_exc.IntegrityError as exc: + _handle_foreign_key_violation(exc, project_id=project_id, user_id=user_id) + raise DirectorError from exc async def update( self, user_id: UserID, project_id: ProjectID, iteration: PositiveInt, **values - ) -> Optional[CompRunsAtDB]: - async with self.db_engine.acquire() as conn: + ) -> CompRunsAtDB | None: + async with transaction_context(self.db_engine) as conn: result = await conn.execute( sa.update(comp_runs) .where( (comp_runs.c.project_uuid == f"{project_id}") - & (comp_runs.c.user_id == f"{user_id}") + & (comp_runs.c.user_id == user_id) & (comp_runs.c.iteration == iteration) ) .values(**values) .returning(literal_column("*")) ) - row: RowProxy = await result.first() - return CompRunsAtDB.from_orm(row) if row else None + row = result.one_or_none() + return CompRunsAtDB.model_validate(row) if row else None async def set_run_result( self, + *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt, result_state: RunningState, - final_state: Optional[bool] = False, - ) -> Optional[CompRunsAtDB]: - values = {"result": RUNNING_STATE_TO_DB[result_state]} + final_state: bool | None = False, + ) -> CompRunsAtDB | None: + values: dict[str, Any] = {"result": RUNNING_STATE_TO_DB[result_state]} if final_state: - values.update({"ended": datetime.utcnow()}) + values.update({"ended": arrow.utcnow().datetime}) return await self.update( user_id, project_id, iteration, **values, ) + + async def mark_as_started( + self, + *, + user_id: UserID, + project_id: ProjectID, + iteration: PositiveInt, + started_time: datetime.datetime, + ) -> CompRunsAtDB | None: + return await self.update( + user_id, + project_id, + iteration, + started=started_time, + ) + + async def mark_for_cancellation( + self, *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt + ) -> CompRunsAtDB | None: + return await self.update( + user_id, + project_id, + iteration, + cancelled=arrow.utcnow().datetime, + ) + + async def mark_for_scheduling( + self, *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt + ) -> CompRunsAtDB | None: + return await self.update( + user_id, + project_id, + iteration, + scheduled=arrow.utcnow().datetime, + processed=None, + ) + + async def mark_as_processed( + self, *, user_id: UserID, project_id: ProjectID, iteration: PositiveInt + ) -> CompRunsAtDB | None: + return await self.update( + user_id, + project_id, + iteration, + processed=arrow.utcnow().datetime, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks.py deleted file mode 100644 index 1c3cc4d2c4d..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks.py +++ /dev/null @@ -1,318 +0,0 @@ -import asyncio -import logging -from datetime import datetime -from typing import Any, Optional - -import sqlalchemy as sa -from models_library.function_services_catalog import iter_service_docker_data -from models_library.projects import ProjectAtDB, ProjectID -from models_library.projects_nodes import Node -from models_library.projects_nodes_io import NodeID -from models_library.projects_state import RunningState -from models_library.services import ServiceDockerData -from models_library.services_resources import BootMode -from models_library.users import UserID -from sqlalchemy import literal_column -from sqlalchemy.dialects.postgresql import insert - -from ....core.errors import ErrorDict -from ....models.domains.comp_tasks import CompTaskAtDB, Image, NodeSchema -from ....models.schemas.services import NodeRequirements, ServiceExtras -from ....utils.computations import to_node_class -from ....utils.db import RUNNING_STATE_TO_DB -from ...catalog import CatalogClient -from ...director_v0 import DirectorV0Client -from ..tables import NodeClass, StateType, comp_tasks -from ._base import BaseRepository - -logger = logging.getLogger(__name__) - -# -# This is a catalog of front-end services that are translated as tasks -# -# The evaluation of this task is already done in the front-end -# The front-end sets the outputs in the node payload and therefore -# no evaluation is expected in the backend. -# -# Examples are nodes like file-picker or parameter/* -# -_FRONTEND_SERVICES_CATALOG: dict[str, ServiceDockerData] = { - meta.key: meta for meta in iter_service_docker_data() -} - - -async def _get_service_details( - catalog_client: CatalogClient, user_id: UserID, product_name: str, node: Node -) -> ServiceDockerData: - service_details = await catalog_client.get_service( - user_id, - node.key, - node.version, - product_name, - ) - return ServiceDockerData.construct(**service_details) - - -def _compute_node_requirements(node_resources: dict[str, Any]) -> NodeRequirements: - node_defined_resources = {} - - for image_data in node_resources.values(): - for resource_name, resource_value in image_data.get("resources", {}).items(): - node_defined_resources[resource_name] = node_defined_resources.get( - resource_name, 0 - ) + min(resource_value["limit"], resource_value["reservation"]) - return NodeRequirements.parse_obj(node_defined_resources) - - -def _compute_node_boot_mode(node_resources: dict[str, Any]) -> BootMode: - for image_data in node_resources.values(): - return BootMode(image_data.get("boot_modes")[0]) - raise RuntimeError("No BootMode") - - -async def _generate_tasks_list_from_project( - project: ProjectAtDB, - catalog_client: CatalogClient, - director_client: DirectorV0Client, - published_nodes: list[NodeID], - user_id: UserID, - product_name: str, -) -> list[CompTaskAtDB]: - list_comp_tasks = [] - for internal_id, node_id in enumerate(project.workbench, 1): - node: Node = project.workbench[node_id] - - # get node infos - node_class = to_node_class(node.key) - node_details: Optional[ServiceDockerData] = None - node_resources: Optional[dict[str, Any]] = None - node_extras: Optional[ServiceExtras] = None - if node_class == NodeClass.FRONTEND: - node_details = _FRONTEND_SERVICES_CATALOG.get(node.key, None) - else: - node_details, node_resources, node_extras = await asyncio.gather( - _get_service_details(catalog_client, user_id, product_name, node), - catalog_client.get_service_resources(user_id, node.key, node.version), - director_client.get_service_extras(node.key, node.version), - ) - - if not node_details: - continue - - # aggregates node_details and node_extras into Image - data: dict[str, Any] = { - "name": node.key, - "tag": node.version, - } - - if node_resources: - data.update(node_requirements=_compute_node_requirements(node_resources)) - data["boot_mode"] = _compute_node_boot_mode(node_resources) - if node_extras and node_extras.container_spec: - data.update(command=node_extras.container_spec.command) - image = Image.parse_obj(data) - - assert node.state is not None # nosec - task_state = node.state.current_status - if node_id in published_nodes and node_class == NodeClass.COMPUTATIONAL: - task_state = RunningState.PUBLISHED - - task_db = CompTaskAtDB( - project_id=project.uuid, - node_id=NodeID(node_id), - schema=NodeSchema.parse_obj( - node_details.dict( - exclude_unset=True, by_alias=True, include={"inputs", "outputs"} - ) - ), - inputs=node.inputs, - outputs=node.outputs, - image=image, - submit=datetime.utcnow(), - state=task_state, - internal_id=internal_id, - node_class=node_class, - ) - - list_comp_tasks.append(task_db) - return list_comp_tasks - - -class CompTasksRepository(BaseRepository): - async def get_all_tasks( - self, - project_id: ProjectID, - ) -> list[CompTaskAtDB]: - tasks: list[CompTaskAtDB] = [] - async with self.db_engine.acquire() as conn: - async for row in conn.execute( - sa.select([comp_tasks]).where( - comp_tasks.c.project_id == f"{project_id}" - ) - ): - task_db = CompTaskAtDB.from_orm(row) - tasks.append(task_db) - - return tasks - - async def get_comp_tasks( - self, - project_id: ProjectID, - ) -> list[CompTaskAtDB]: - tasks: list[CompTaskAtDB] = [] - async with self.db_engine.acquire() as conn: - async for row in conn.execute( - sa.select([comp_tasks]).where( - (comp_tasks.c.project_id == f"{project_id}") - & (comp_tasks.c.node_class == NodeClass.COMPUTATIONAL) - ) - ): - task_db = CompTaskAtDB.from_orm(row) - tasks.append(task_db) - return tasks - - async def check_task_exists(self, project_id: ProjectID, node_id: NodeID) -> bool: - async with self.db_engine.acquire() as conn: - nid: Optional[str] = await conn.scalar( - sa.select([comp_tasks.c.node_id]).where( - (comp_tasks.c.project_id == f"{project_id}") - & (comp_tasks.c.node_id == f"{node_id}") - ) - ) - return nid is not None - - async def upsert_tasks_from_project( - self, - project: ProjectAtDB, - catalog_client: CatalogClient, - director_client: DirectorV0Client, - published_nodes: list[NodeID], - user_id: UserID, - product_name: str, - ) -> list[CompTaskAtDB]: - # NOTE: really do an upsert here because of issue https://github.com/ITISFoundation/osparc-simcore/issues/2125 - list_of_comp_tasks_in_project: list[ - CompTaskAtDB - ] = await _generate_tasks_list_from_project( - project, - catalog_client, - director_client, - published_nodes, - user_id, - product_name, - ) - async with self.db_engine.acquire() as conn: - # get current tasks - result = await conn.execute( - sa.select([comp_tasks.c.node_id]).where( - comp_tasks.c.project_id == str(project.uuid) - ) - ) - # remove the tasks that were removed from project workbench - node_ids_to_delete = [ - t.node_id - for t in await result.fetchall() - if t.node_id not in project.workbench - ] - for node_id in node_ids_to_delete: - await conn.execute( - sa.delete(comp_tasks).where( - (comp_tasks.c.project_id == str(project.uuid)) - & (comp_tasks.c.node_id == node_id) - ) - ) - - # insert or update the remaining tasks - # NOTE: comp_tasks DB only trigger a notification to the webserver if an UPDATE on comp_tasks.outputs or comp_tasks.state is done - # NOTE: an exception to this is when a frontend service changes its output since there is no node_ports, the UPDATE must be done here. - inserted_comp_tasks_db: list[CompTaskAtDB] = [] - for comp_task_db in list_of_comp_tasks_in_project: - insert_stmt = insert(comp_tasks).values(**comp_task_db.to_db_model()) - - exclusion_rule = ( - {"state"} - if str(comp_task_db.node_id) not in published_nodes - else set() - ) - if to_node_class(comp_task_db.image.name) != NodeClass.FRONTEND: - exclusion_rule.add("outputs") - on_update_stmt = insert_stmt.on_conflict_do_update( - index_elements=[comp_tasks.c.project_id, comp_tasks.c.node_id], - set_=comp_task_db.to_db_model(exclude=exclusion_rule), - ).returning(literal_column("*")) - result = await conn.execute(on_update_stmt) - row = await result.fetchone() - assert row # nosec - inserted_comp_tasks_db.append(CompTaskAtDB.from_orm(row)) - logger.debug( - "inserted the following tasks in comp_tasks: %s", - f"{inserted_comp_tasks_db=}", - ) - return inserted_comp_tasks_db - - async def mark_project_published_tasks_as_aborted( - self, project_id: ProjectID - ) -> None: - # block all pending tasks, so the sidecars stop taking them - async with self.db_engine.acquire() as conn: - await conn.execute( - sa.update(comp_tasks) - .where( - (comp_tasks.c.project_id == f"{project_id}") - & (comp_tasks.c.node_class == NodeClass.COMPUTATIONAL) - & (comp_tasks.c.state == StateType.PUBLISHED) - ) - .values(state=StateType.ABORTED) - ) - logger.debug("marked project %s published tasks as aborted", f"{project_id=}") - - async def set_project_task_job_id( - self, project_id: ProjectID, task: NodeID, job_id: str - ) -> None: - async with self.db_engine.acquire() as conn: - await conn.execute( - sa.update(comp_tasks) - .where( - (comp_tasks.c.project_id == f"{project_id}") - & (comp_tasks.c.node_id == f"{task}") - ) - .values(job_id=job_id) - ) - logger.debug( - "set project %s task %s with job id: %s", - f"{project_id=}", - f"{task=}", - f"{job_id=}", - ) - - async def set_project_tasks_state( - self, - project_id: ProjectID, - tasks: list[NodeID], - state: RunningState, - errors: Optional[list[ErrorDict]] = None, - ) -> None: - async with self.db_engine.acquire() as conn: - await conn.execute( - sa.update(comp_tasks) - .where( - (comp_tasks.c.project_id == f"{project_id}") - & (comp_tasks.c.node_id.in_([str(t) for t in tasks])) - ) - .values(state=RUNNING_STATE_TO_DB[state], errors=errors) - ) - logger.debug( - "set project %s tasks %s with state %s", - f"{project_id=}", - f"{tasks=}", - f"{state=}", - ) - - async def delete_tasks_from_project(self, project: ProjectAtDB) -> None: - async with self.db_engine.acquire() as conn: - await conn.execute( - sa.delete(comp_tasks).where( - comp_tasks.c.project_id == str(project.uuid) - ) - ) - logger.debug("deleted tasks from project %s", f"{project.uuid=}") diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/__init__.py new file mode 100644 index 00000000000..458e5a23a84 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/__init__.py @@ -0,0 +1,3 @@ +from ._core import CompTasksRepository + +__all__: tuple[str, ...] = ("CompTasksRepository",) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_core.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_core.py new file mode 100644 index 00000000000..67f19db7e0e --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_core.py @@ -0,0 +1,349 @@ +import logging +from datetime import datetime +from typing import Any, cast + +import arrow +import sqlalchemy as sa +from models_library.basic_types import IDStr +from models_library.errors import ErrorDict +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.rest_ordering import OrderBy, OrderDirection +from models_library.users import UserID +from models_library.wallets import WalletInfo +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.utils import logged_gather +from sqlalchemy import literal_column +from sqlalchemy.dialects.postgresql import insert + +from .....core.errors import ComputationalTaskNotFoundError +from .....models.comp_tasks import CompTaskAtDB, ComputationTaskForRpcDBGet +from .....modules.resource_usage_tracker_client import ResourceUsageTrackerClient +from .....utils.computations import to_node_class +from .....utils.db import DB_TO_RUNNING_STATE, RUNNING_STATE_TO_DB +from ....catalog import CatalogClient +from ...tables import NodeClass, StateType, comp_tasks +from .._base import BaseRepository +from . import _utils + +_logger = logging.getLogger(__name__) + + +class CompTasksRepository(BaseRepository): + async def get_task(self, project_id: ProjectID, node_id: NodeID) -> CompTaskAtDB: + async with self.db_engine.connect() as conn: + result = await conn.execute( + sa.select(comp_tasks).where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_id == f"{node_id}") + ) + ) + row = result.one_or_none() + if not row: + raise ComputationalTaskNotFoundError(node_id=node_id) + return CompTaskAtDB.model_validate(row) + + async def list_tasks( + self, + project_id: ProjectID, + ) -> list[CompTaskAtDB]: + tasks: list[CompTaskAtDB] = [] + async with self.db_engine.connect() as conn: + async for row in await conn.stream( + sa.select(comp_tasks).where(comp_tasks.c.project_id == f"{project_id}") + ): + task_db = CompTaskAtDB.model_validate(row) + tasks.append(task_db) + + return tasks + + async def list_computational_tasks( + self, + project_id: ProjectID, + ) -> list[CompTaskAtDB]: + tasks: list[CompTaskAtDB] = [] + async with self.db_engine.connect() as conn: + async for row in await conn.stream( + sa.select(comp_tasks).where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_class == NodeClass.COMPUTATIONAL) + ) + ): + task_db = CompTaskAtDB.model_validate(row) + tasks.append(task_db) + return tasks + + async def list_computational_tasks_rpc_domain( + self, + *, + project_ids: list[ProjectID], + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, + ) -> tuple[int, list[ComputationTaskForRpcDBGet]]: + if order_by is None: + order_by = OrderBy(field=IDStr("task_id")) # default ordering + + base_select_query = ( + sa.select( + comp_tasks.c.project_id.label("project_uuid"), + comp_tasks.c.node_id, + comp_tasks.c.state, + comp_tasks.c.progress, + comp_tasks.c.image, + comp_tasks.c.start.label("started_at"), + comp_tasks.c.end.label("ended_at"), + ) + .select_from(comp_tasks) + .where( + ( + comp_tasks.c.project_id.in_( + [f"{project_id}" for project_id in project_ids] + ) + ) + & (comp_tasks.c.node_class == NodeClass.COMPUTATIONAL) + ) + ) + + # Select total count from base_query + count_query = sa.select(sa.func.count()).select_from( + base_select_query.subquery() + ) + + # Ordering and pagination + if order_by.direction == OrderDirection.ASC: + list_query = base_select_query.order_by( + sa.asc(getattr(comp_tasks.c, order_by.field)), comp_tasks.c.task_id + ) + else: + list_query = base_select_query.order_by( + sa.desc(getattr(comp_tasks.c, order_by.field)), comp_tasks.c.task_id + ) + list_query = list_query.offset(offset).limit(limit) + + async with self.db_engine.connect() as conn: + total_count = await conn.scalar(count_query) + + items = [ + ComputationTaskForRpcDBGet.model_validate( + { + **row, + "state": DB_TO_RUNNING_STATE[row["state"]], # Convert the state + } + ) + async for row in await conn.stream(list_query) + ] + return cast(int, total_count), items + + async def task_exists(self, project_id: ProjectID, node_id: NodeID) -> bool: + async with self.db_engine.connect() as conn: + nid: str | None = await conn.scalar( + sa.select(comp_tasks.c.node_id).where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_id == f"{node_id}") + ) + ) + return nid is not None + + async def upsert_tasks_from_project( + self, + *, + project: ProjectAtDB, + catalog_client: CatalogClient, + published_nodes: list[NodeID], + user_id: UserID, + product_name: str, + rut_client: ResourceUsageTrackerClient, + wallet_info: WalletInfo | None, + rabbitmq_rpc_client: RabbitMQRPCClient, + ) -> list[CompTaskAtDB]: + # NOTE: really do an upsert here because of issue https://github.com/ITISFoundation/osparc-simcore/issues/2125 + async with self.db_engine.begin() as conn: + list_of_comp_tasks_in_project: list[CompTaskAtDB] = ( + await _utils.generate_tasks_list_from_project( + project=project, + catalog_client=catalog_client, + published_nodes=published_nodes, + user_id=user_id, + product_name=product_name, + connection=conn, + rut_client=rut_client, + wallet_info=wallet_info, + rabbitmq_rpc_client=rabbitmq_rpc_client, + ) + ) + # get current tasks + result = await conn.execute( + sa.select(comp_tasks.c.node_id).where( + comp_tasks.c.project_id == str(project.uuid) + ) + ) + # remove the tasks that were removed from project workbench + if all_nodes := result.all(): + node_ids_to_delete = [ + t.node_id for t in all_nodes if t.node_id not in project.workbench + ] + for node_id in node_ids_to_delete: + await conn.execute( + sa.delete(comp_tasks).where( + (comp_tasks.c.project_id == str(project.uuid)) + & (comp_tasks.c.node_id == node_id) + ) + ) + + # insert or update the remaining tasks + # NOTE: comp_tasks DB only trigger a notification to the webserver if an UPDATE on comp_tasks.outputs or comp_tasks.state is done + # NOTE: an exception to this is when a frontend service changes its output since there is no node_ports, the UPDATE must be done here. + inserted_comp_tasks_db: list[CompTaskAtDB] = [] + for comp_task_db in list_of_comp_tasks_in_project: + insert_stmt = insert(comp_tasks).values( + **comp_task_db.to_db_model(exclude={"created", "modified"}) + ) + + exclusion_rule = ( + {"state", "progress"} + if comp_task_db.node_id not in published_nodes + else set() + ) + update_values = ( + {"progress": None} + if comp_task_db.node_id in published_nodes + else {} + ) + + if to_node_class(comp_task_db.image.name) != NodeClass.FRONTEND: + exclusion_rule.add("outputs") + else: + update_values = {} + on_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[comp_tasks.c.project_id, comp_tasks.c.node_id], + set_=comp_task_db.to_db_model(exclude=exclusion_rule) + | update_values, + ).returning(literal_column("*")) + result = await conn.execute(on_update_stmt) + row = result.one() + inserted_comp_tasks_db.append(CompTaskAtDB.model_validate(row)) + _logger.debug( + "inserted the following tasks in comp_tasks: %s", + f"{inserted_comp_tasks_db=}", + ) + return inserted_comp_tasks_db + + async def _update_task( + self, project_id: ProjectID, task: NodeID, **task_kwargs + ) -> CompTaskAtDB: + with log_context( + _logger, + logging.DEBUG, + msg=f"update task {project_id=}:{task=} with '{task_kwargs}'", + ): + async with self.db_engine.begin() as conn: + result = await conn.execute( + sa.update(comp_tasks) + .where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_id == f"{task}") + ) + .values(**task_kwargs) + .returning(literal_column("*")) + ) + row = result.one() + return CompTaskAtDB.model_validate(row) + + async def mark_project_published_waiting_for_cluster_tasks_as_aborted( + self, project_id: ProjectID + ) -> None: + # block all pending tasks, so the sidecars stop taking them + async with self.db_engine.begin() as conn: + await conn.execute( + sa.update(comp_tasks) + .where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_class == NodeClass.COMPUTATIONAL) + & ( + (comp_tasks.c.state == StateType.PUBLISHED) + | (comp_tasks.c.state == StateType.WAITING_FOR_CLUSTER) + ) + ) + .values( + state=StateType.ABORTED, progress=1.0, end=arrow.utcnow().datetime + ) + ) + _logger.debug("marked project %s published tasks as aborted", f"{project_id=}") + + async def update_project_task_job_id( + self, project_id: ProjectID, task: NodeID, job_id: str + ) -> None: + await self._update_task(project_id, task, job_id=job_id) + + async def update_project_tasks_state( + self, + project_id: ProjectID, + tasks: list[NodeID], + state: RunningState, + errors: list[ErrorDict] | None = None, + *, + optional_progress: float | None = None, + optional_started: datetime | None = None, + optional_stopped: datetime | None = None, + ) -> None: + """update the task state values in the database + passing None for the optional arguments will not update the respective values in the database + Keyword Arguments: + errors -- _description_ (default: {None}) + optional_progress -- _description_ (default: {None}) + optional_started -- _description_ (default: {None}) + optional_stopped -- _description_ (default: {None}) + """ + update_values: dict[str, Any] = { + "state": RUNNING_STATE_TO_DB[state], + "errors": errors, + } + if optional_progress is not None: + update_values["progress"] = optional_progress + if optional_started is not None: + update_values["start"] = optional_started + if optional_stopped is not None: + update_values["end"] = optional_stopped + await logged_gather( + *( + self._update_task(project_id, task_id, **update_values) + for task_id in tasks + ) + ) + + async def update_project_task_progress( + self, project_id: ProjectID, node_id: NodeID, progress: float + ) -> None: + await self._update_task(project_id, node_id, progress=progress) + + async def update_project_task_last_heartbeat( + self, project_id: ProjectID, node_id: NodeID, heartbeat_time: datetime + ) -> None: + await self._update_task(project_id, node_id, last_heartbeat=heartbeat_time) + + async def delete_tasks_from_project(self, project_id: ProjectID) -> None: + async with self.db_engine.begin() as conn: + await conn.execute( + sa.delete(comp_tasks).where(comp_tasks.c.project_id == f"{project_id}") + ) + + async def get_outputs_from_tasks( + self, project_id: ProjectID, node_ids: set[NodeID] + ) -> dict[NodeID, dict[IDStr, Any]]: + selection = list(map(str, node_ids)) + query = sa.select(comp_tasks.c.node_id, comp_tasks.c.outputs).where( + (comp_tasks.c.project_id == f"{project_id}") + & (comp_tasks.c.node_id.in_(selection)) + ) + async with self.db_engine.connect() as conn: + result = await conn.execute(query) + rows = result.all() + if rows: + assert set(selection) == {f"{_.node_id}" for _ in rows} # nosec + return {NodeID(_.node_id): _.outputs or {} for _ in rows} + return {} diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py new file mode 100644 index 00000000000..7b23eb3451a --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/comp_tasks/_utils.py @@ -0,0 +1,454 @@ +import asyncio +import logging +from decimal import Decimal +from typing import Any, Final, cast + +import arrow +from dask_task_models_library.container_tasks.protocol import ContainerEnvsDict +from models_library.api_schemas_catalog.services import ServiceGet +from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet +from models_library.api_schemas_directorv2.services import ( + NodeRequirements, + ServiceExtras, +) +from models_library.function_services_catalog import iter_service_docker_data +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes import Node +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.resource_tracker import HardwareInfo +from models_library.service_settings_labels import ( + SimcoreServiceLabels, + SimcoreServiceSettingsLabel, +) +from models_library.services import ( + ServiceKey, + ServiceKeyVersion, + ServiceMetaDataPublished, + ServiceVersion, +) +from models_library.services_resources import ( + DEFAULT_SINGLE_SERVICE_NAME, + BootMode, + ImageResources, + ServiceResourcesDict, + ServiceResourcesDictHelpers, +) +from models_library.users import UserID +from models_library.wallets import ZERO_CREDITS, WalletInfo +from pydantic import TypeAdapter +from servicelib.rabbitmq import ( + RabbitMQRPCClient, + RemoteMethodNotRegisteredError, + RPCServerError, +) +from servicelib.rabbitmq.rpc_interfaces.clusters_keeper.ec2_instances import ( + get_instance_type_details, +) +from simcore_postgres_database.utils_projects_nodes import ProjectNodesRepo +from sqlalchemy.ext.asyncio import AsyncConnection + +from .....core.errors import ( + ClustersKeeperNotAvailableError, + ConfigurationError, + WalletNotEnoughCreditsError, +) +from .....models.comp_tasks import CompTaskAtDB, Image, NodeSchema +from .....models.pricing import PricingInfo +from .....modules.resource_usage_tracker_client import ResourceUsageTrackerClient +from .....utils.computations import to_node_class +from ....catalog import CatalogClient +from ....comp_scheduler._utils import COMPLETED_STATES +from ...tables import NodeClass + +_logger = logging.getLogger(__name__) + +# +# This is a catalog of front-end services that are translated as tasks +# +# The evaluation of this task is already done in the front-end +# The front-end sets the outputs in the node payload and therefore +# no evaluation is expected in the backend. +# +# Examples are nodes like file-picker or parameter/* +# +_FRONTEND_SERVICES_CATALOG: dict[str, ServiceMetaDataPublished] = { + meta.key: meta for meta in iter_service_docker_data() +} + + +async def _get_service_details( + catalog_client: CatalogClient, + user_id: UserID, + product_name: str, + node: ServiceKeyVersion, +) -> ServiceMetaDataPublished: + service_details = await catalog_client.get_service( + user_id, + node.key, + node.version, + product_name, + ) + obj: ServiceMetaDataPublished = ServiceGet(**service_details) + return obj + + +def _compute_node_requirements( + node_resources: ServiceResourcesDict, +) -> NodeRequirements: + node_defined_resources: dict[str, Any] = {} + + for image_data in node_resources.values(): + for resource_name, resource_value in image_data.resources.items(): + node_defined_resources[resource_name] = node_defined_resources.get( + resource_name, 0 + ) + min(resource_value.limit, resource_value.reservation) + return NodeRequirements(**node_defined_resources) + + +def _compute_node_boot_mode(node_resources: ServiceResourcesDict) -> BootMode: + for image_data in node_resources.values(): + return image_data.boot_modes[0] + msg = "No BootMode" + raise RuntimeError(msg) + + +_VALID_ENV_VALUE_NUM_PARTS: Final[int] = 2 + + +def _compute_node_envs(node_labels: SimcoreServiceLabels) -> ContainerEnvsDict: + node_envs = {} + for service_setting in cast(SimcoreServiceSettingsLabel, node_labels.settings): + if service_setting.name == "env": + for complete_env in service_setting.value: + parts = complete_env.split("=") + if len(parts) == _VALID_ENV_VALUE_NUM_PARTS: + node_envs[parts[0]] = parts[1] + + return node_envs + + +async def _get_node_infos( + catalog_client: CatalogClient, + user_id: UserID, + product_name: str, + node: ServiceKeyVersion, +) -> tuple[ + ServiceMetaDataPublished | None, ServiceExtras | None, SimcoreServiceLabels | None +]: + if to_node_class(node.key) == NodeClass.FRONTEND: + return ( + _FRONTEND_SERVICES_CATALOG.get(node.key, None), + None, + None, + ) + + result: tuple[ServiceMetaDataPublished, ServiceExtras, SimcoreServiceLabels] = ( + await asyncio.gather( + _get_service_details(catalog_client, user_id, product_name, node), + catalog_client.get_service_extras(node.key, node.version), + catalog_client.get_service_labels(node.key, node.version), + ) + ) + return result + + +async def _generate_task_image( + *, + catalog_client: CatalogClient, + connection: AsyncConnection, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + node: Node, + node_extras: ServiceExtras | None, + node_labels: SimcoreServiceLabels | None, +) -> Image: + # aggregates node_details and node_extras into Image + data: dict[str, Any] = { + "name": node.key, + "tag": node.version, + } + project_nodes_repo = ProjectNodesRepo(project_uuid=project_id) + project_node = await project_nodes_repo.get(connection, node_id=node_id) + node_resources = TypeAdapter(ServiceResourcesDict).validate_python( + project_node.required_resources + ) + if not node_resources: + node_resources = await catalog_client.get_service_resources( + user_id, node.key, node.version + ) + + if node_resources: + data.update(node_requirements=_compute_node_requirements(node_resources)) + data.update(boot_mode=_compute_node_boot_mode(node_resources)) + if node_labels: + data.update(envs=_compute_node_envs(node_labels)) + if node_extras and node_extras.container_spec: + data.update(command=node_extras.container_spec.command) + return Image(**data) + + +async def _get_pricing_and_hardware_infos( + connection: AsyncConnection, + rut_client: ResourceUsageTrackerClient, + *, + is_wallet: bool, + project_id: ProjectID, + node_id: NodeID, + product_name: str, + node_key: ServiceKey, + node_version: ServiceVersion, +) -> tuple[PricingInfo | None, HardwareInfo]: + if not is_wallet or (to_node_class(node_key) == NodeClass.FRONTEND): + # NOTE: frontend services have no pricing plans, therefore no need to call RUT + return None, HardwareInfo(aws_ec2_instances=[]) + project_nodes_repo = ProjectNodesRepo(project_uuid=project_id) + output = await project_nodes_repo.get_project_node_pricing_unit_id( + connection, node_uuid=node_id + ) + # NOTE: this is some kind of lazy insertion of the pricing unit + # the projects_nodes is already in at this time, and not in sync with the hardware info + # this will need to move away and be in sync. + if output: + pricing_plan_id, pricing_unit_id = output + else: + ( + pricing_plan_id, + pricing_unit_id, + _, + _, + ) = await rut_client.get_default_pricing_and_hardware_info( + product_name, node_key, node_version + ) + await project_nodes_repo.connect_pricing_unit_to_project_node( + connection, + node_uuid=node_id, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + ) + + pricing_unit_get = await rut_client.get_pricing_unit( + product_name, pricing_plan_id, pricing_unit_id + ) + pricing_unit_cost_id = pricing_unit_get.current_cost_per_unit_id + aws_ec2_instances = pricing_unit_get.specific_info.aws_ec2_instances + + pricing_info = PricingInfo( + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + pricing_unit_cost_id=pricing_unit_cost_id, + pricing_unit_cost=pricing_unit_get.current_cost_per_unit, + ) + hardware_info = HardwareInfo(aws_ec2_instances=aws_ec2_instances) + return pricing_info, hardware_info + + +_RAM_SAFE_MARGIN_RATIO: Final[float] = ( + 0.1 # NOTE: machines always have less available RAM than advertised +) +_CPUS_SAFE_MARGIN: Final[float] = 0.1 + + +async def _update_project_node_resources_from_hardware_info( + connection: AsyncConnection, + *, + is_wallet: bool, + project_id: ProjectID, + node_id: NodeID, + hardware_info: HardwareInfo, + rabbitmq_rpc_client: RabbitMQRPCClient, +) -> None: + if not is_wallet: + return + if not hardware_info.aws_ec2_instances: + return + try: + unordered_list_ec2_instance_types: list[EC2InstanceTypeGet] = ( + await get_instance_type_details( + rabbitmq_rpc_client, + instance_type_names=set(hardware_info.aws_ec2_instances), + ) + ) + + assert unordered_list_ec2_instance_types # nosec + + # NOTE: with the current implementation, there is no use to get the instance past the first one + def _by_type_name(ec2: EC2InstanceTypeGet) -> bool: + return bool(ec2.name == hardware_info.aws_ec2_instances[0]) + + selected_ec2_instance_type = next( + iter(filter(_by_type_name, unordered_list_ec2_instance_types)) + ) + + # now update the project node required resources + # NOTE: we keep a safe margin with the RAM as the dask-sidecar "sees" + # less memory than the machine theoretical amount + project_nodes_repo = ProjectNodesRepo(project_uuid=project_id) + node = await project_nodes_repo.get(connection, node_id=node_id) + node_resources = TypeAdapter(ServiceResourcesDict).validate_python( + node.required_resources + ) + if DEFAULT_SINGLE_SERVICE_NAME in node_resources: + image_resources: ImageResources = node_resources[ + DEFAULT_SINGLE_SERVICE_NAME + ] + image_resources.resources["CPU"].set_value( + float(selected_ec2_instance_type.cpus) - _CPUS_SAFE_MARGIN + ) + image_resources.resources["RAM"].set_value( + int( + selected_ec2_instance_type.ram + - _RAM_SAFE_MARGIN_RATIO * selected_ec2_instance_type.ram + ) + ) + + await project_nodes_repo.update( + connection, + node_id=node_id, + required_resources=ServiceResourcesDictHelpers.create_jsonable( + node_resources + ), + ) + else: + _logger.warning( + "Services resource override not implemented yet for multi-container services!!!" + ) + except StopIteration as exc: + msg = ( + f"invalid EC2 type name selected {set(hardware_info.aws_ec2_instances)}." + " TIP: adjust product configuration" + ) + raise ConfigurationError(msg=msg) from exc + except ( + RemoteMethodNotRegisteredError, + RPCServerError, + TimeoutError, + ) as exc: + raise ClustersKeeperNotAvailableError from exc + + +async def generate_tasks_list_from_project( + *, + project: ProjectAtDB, + catalog_client: CatalogClient, + published_nodes: list[NodeID], + user_id: UserID, + product_name: str, + connection: AsyncConnection, + rut_client: ResourceUsageTrackerClient, + wallet_info: WalletInfo | None, + rabbitmq_rpc_client: RabbitMQRPCClient, +) -> list[CompTaskAtDB]: + list_comp_tasks = [] + + unique_service_key_versions: set[ServiceKeyVersion] = { + ServiceKeyVersion( + key=node.key, version=node.version + ) # the service key version is frozen + for node in project.workbench.values() + } + + key_version_to_node_infos = { + key_version: await _get_node_infos( + catalog_client, + user_id, + product_name, + key_version, + ) + for key_version in unique_service_key_versions + } + + for internal_id, node_id in enumerate(project.workbench, 1): + node: Node = project.workbench[node_id] + node_key_version = ServiceKeyVersion(key=node.key, version=node.version) + node_details, node_extras, node_labels = key_version_to_node_infos.get( + node_key_version, + (None, None, None), + ) + + if not node_details: + continue + + assert node.state is not None # nosec + task_state = node.state.current_status + task_progress = None + if task_state in COMPLETED_STATES: + task_progress = node.state.progress + if ( + NodeID(node_id) in published_nodes + and to_node_class(node.key) == NodeClass.COMPUTATIONAL + ): + task_state = RunningState.PUBLISHED + + pricing_info, hardware_info = await _get_pricing_and_hardware_infos( + connection, + rut_client, + is_wallet=bool(wallet_info), + project_id=project.uuid, + node_id=NodeID(node_id), + product_name=product_name, + node_key=node.key, + node_version=node.version, + ) + # Check for zero credits (if pricing unit is greater than 0). + if ( + wallet_info + and pricing_info + and pricing_info.pricing_unit_cost > Decimal(0) + and wallet_info.wallet_credit_amount <= ZERO_CREDITS + ): + raise WalletNotEnoughCreditsError( + wallet_name=wallet_info.wallet_name, + wallet_credit_amount=wallet_info.wallet_credit_amount, + ) + + assert rabbitmq_rpc_client # nosec + await _update_project_node_resources_from_hardware_info( + connection, + is_wallet=bool(wallet_info), + project_id=project.uuid, + node_id=NodeID(node_id), + hardware_info=hardware_info, + rabbitmq_rpc_client=rabbitmq_rpc_client, + ) + + image = await _generate_task_image( + catalog_client=catalog_client, + connection=connection, + user_id=user_id, + project_id=project.uuid, + node_id=NodeID(node_id), + node=node, + node_extras=node_extras, + node_labels=node_labels, + ) + + task_db = CompTaskAtDB( + project_id=project.uuid, + node_id=NodeID(node_id), + schema=NodeSchema( + **node_details.model_dump( + exclude_unset=True, by_alias=True, include={"inputs", "outputs"} + ) + ), + inputs=node.inputs, + outputs=node.outputs, + image=image, + state=task_state, + internal_id=internal_id, + node_class=to_node_class(node.key), + progress=task_progress, + last_heartbeat=None, + created=arrow.utcnow().datetime, + modified=arrow.utcnow().datetime, + pricing_info=( + pricing_info.model_dump(exclude={"pricing_unit_cost"}) + if pricing_info + else None + ), + hardware_info=hardware_info, + ) + + list_comp_tasks.append(task_db) + return list_comp_tasks diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/groups_extra_properties.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/groups_extra_properties.py index 41ea99e2fdc..b0a1d2a0bdf 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/groups_extra_properties.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/groups_extra_properties.py @@ -1,28 +1,52 @@ -import sqlalchemy as sa -from models_library.products import ProductName -from models_library.users import UserID +from pydantic import BaseModel +from simcore_postgres_database.utils_groups_extra_properties import ( + GroupExtraProperties, + GroupExtraPropertiesRepo, +) -from ..tables import groups_extra_properties, user_to_groups from ._base import BaseRepository +class UserExtraProperties(BaseModel): + is_internet_enabled: bool + is_telemetry_enabled: bool + is_efs_enabled: bool + + class GroupsExtraPropertiesRepository(BaseRepository): - async def has_internet_access( - self, user_id: UserID, product_name: ProductName - ) -> bool: - async with self.db_engine.acquire() as conn: - # checks if one of the groups which the user is part of has internet access - select_stmt = sa.select( - [groups_extra_properties.c.internet_access] - ).select_from( - user_to_groups.join( - groups_extra_properties, - (groups_extra_properties.c.group_id == user_to_groups.c.gid) - & (user_to_groups.c.uid == user_id) - & (groups_extra_properties.c.internet_access == True) - & (groups_extra_properties.c.product_name == product_name), - ) + async def _get_aggregated_properties_for_user( + self, + *, + user_id: int, + product_name: str, + ) -> GroupExtraProperties: + async with self.db_engine.connect() as conn: + return await GroupExtraPropertiesRepo.get_aggregated_properties_for_user( + conn, user_id=user_id, product_name=product_name ) - user_with_access = await conn.scalar(select_stmt) - return user_with_access is not None + async def has_internet_access(self, *, user_id: int, product_name: str) -> bool: + group_extra_properties = await self._get_aggregated_properties_for_user( + user_id=user_id, product_name=product_name + ) + internet_access: bool = group_extra_properties.internet_access + return internet_access + + async def is_telemetry_enabled(self, *, user_id: int, product_name: str) -> bool: + group_extra_properties = await self._get_aggregated_properties_for_user( + user_id=user_id, product_name=product_name + ) + telemetry_enabled: bool = group_extra_properties.enable_telemetry + return telemetry_enabled + + async def get_user_extra_properties( + self, *, user_id: int, product_name: str + ) -> UserExtraProperties: + group_extra_properties = await self._get_aggregated_properties_for_user( + user_id=user_id, product_name=product_name + ) + return UserExtraProperties( + is_internet_enabled=group_extra_properties.internet_access, + is_telemetry_enabled=group_extra_properties.enable_telemetry, + is_efs_enabled=group_extra_properties.enable_efs, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects.py index 75ee34ff3be..2935b6ec251 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects.py @@ -1,9 +1,9 @@ import logging import sqlalchemy as sa -from aiopg.sa.result import RowProxy from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID +from simcore_postgres_database.utils_projects_nodes import ProjectNodesRepo from ....core.errors import ProjectNotFoundError from ..tables import projects @@ -14,15 +14,15 @@ class ProjectsRepository(BaseRepository): async def get_project(self, project_id: ProjectID) -> ProjectAtDB: - async with self.db_engine.acquire() as conn: - row: RowProxy = await ( + async with self.db_engine.connect() as conn: + row = ( await conn.execute( - sa.select([projects]).where(projects.c.uuid == str(project_id)) + sa.select(projects).where(projects.c.uuid == str(project_id)) ) - ).first() + ).one_or_none() if not row: - raise ProjectNotFoundError(project_id) - return ProjectAtDB.from_orm(row) + raise ProjectNotFoundError(project_id=project_id) + return ProjectAtDB.model_validate(row) async def is_node_present_in_workbench( self, project_id: ProjectID, node_uuid: NodeID @@ -32,3 +32,9 @@ async def is_node_present_in_workbench( return f"{node_uuid}" in project.workbench except ProjectNotFoundError: return False + + async def get_project_id_from_node(self, node_id: NodeID) -> ProjectID: + async with self.db_engine.connect() as conn: + return await ProjectNodesRepo.get_project_id_from_node_id( + conn, node_id=node_id + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_metadata.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_metadata.py new file mode 100644 index 00000000000..2b87d4c8e68 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_metadata.py @@ -0,0 +1,33 @@ +from dataclasses import dataclass + +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from simcore_postgres_database.utils_projects_metadata import ( + get as projects_metadata_get, +) + +from ._base import BaseRepository + + +@dataclass(frozen=True, kw_only=True, slots=True) +class ProjectAncestors: + parent_project_uuid: ProjectID | None + parent_node_id: NodeID | None + root_project_uuid: ProjectID | None + root_node_id: NodeID | None + + +class ProjectsMetadataRepository(BaseRepository): + async def get_project_ancestors(self, project_id: ProjectID) -> ProjectAncestors: + """ + Raises: + DBProjectNotFoundError: project not found + """ + async with self.db_engine.connect() as conn: + project_metadata = await projects_metadata_get(conn, project_id) + return ProjectAncestors( + parent_project_uuid=project_metadata.parent_project_uuid, + parent_node_id=project_metadata.parent_node_id, + root_project_uuid=project_metadata.root_parent_project_uuid, + root_node_id=project_metadata.root_parent_node_id, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_networks.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_networks.py index b9e296a68e4..2896e765d4a 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_networks.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/projects_networks.py @@ -1,39 +1,37 @@ -import json - import sqlalchemy as sa -from aiopg.sa.result import RowProxy +from common_library.json_serialization import json_loads from models_library.projects import ProjectID from models_library.projects_networks import NetworksWithAliases, ProjectsNetworks from sqlalchemy.dialects.postgresql import insert as pg_insert -from ....core.errors import ProjectNotFoundError +from ....core.errors import ProjectNetworkNotFoundError from ..tables import projects_networks from ._base import BaseRepository class ProjectsNetworksRepository(BaseRepository): async def get_projects_networks(self, project_id: ProjectID) -> ProjectsNetworks: - async with self.db_engine.acquire() as conn: - row: RowProxy = await ( + async with self.db_engine.connect() as conn: + row = ( await conn.execute( - sa.select([projects_networks]).where( + sa.select(projects_networks).where( projects_networks.c.project_uuid == f"{project_id}" ) ) - ).first() + ).one_or_none() if not row: - raise ProjectNotFoundError(project_id) - return ProjectsNetworks.from_orm(row) + raise ProjectNetworkNotFoundError(project_id=project_id) + return ProjectsNetworks.model_validate(row) async def upsert_projects_networks( self, project_id: ProjectID, networks_with_aliases: NetworksWithAliases ) -> None: - projects_networks_to_insert = ProjectsNetworks.parse_obj( - dict(project_uuid=project_id, networks_with_aliases=networks_with_aliases) + projects_networks_to_insert = ProjectsNetworks.model_validate( + {"project_uuid": project_id, "networks_with_aliases": networks_with_aliases} ) - async with self.db_engine.acquire() as conn: - row_data = json.loads(projects_networks_to_insert.json()) + async with self.db_engine.begin() as conn: + row_data = json_loads(projects_networks_to_insert.model_dump_json()) insert_stmt = pg_insert(projects_networks).values(**row_data) upsert_snapshot = insert_stmt.on_conflict_do_update( constraint=projects_networks.primary_key, set_=row_data diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/services_environments.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/services_environments.py new file mode 100644 index 00000000000..ddfb93d11e1 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/services_environments.py @@ -0,0 +1,42 @@ +from typing import Literal + +from models_library.products import ProductName +from models_library.services import ServiceKey, ServiceVersion +from simcore_postgres_database.models.services_environments import VENDOR_SECRET_PREFIX +from simcore_postgres_database.utils_services_environments import ( + VendorSecret, + get_vendor_secrets, +) + +from ._base import BaseRepository + + +class ServicesEnvironmentsRepository(BaseRepository): + """ + Access to Vendor settings for a service + """ + + async def get_vendor_secrets( + self, + service_key: ServiceKey, + service_version: ServiceVersion | Literal["latest"], + product_name: ProductName, + ) -> dict[str, VendorSecret]: + """Fetches vendor secrets for a service using normalized names""" + async with self.db_engine.connect() as conn: + vendor_secrets = await get_vendor_secrets( + conn, + product_name=product_name, + vendor_service_key=service_key, + vendor_service_version=service_version, + normalize_names=True, + ) + assert all( # nosec + self.is_vendor_secret_identifier(key) for key in vendor_secrets + ) + + return vendor_secrets + + @classmethod + def is_vendor_secret_identifier(cls, identifier: str) -> bool: + return identifier.startswith(VENDOR_SECRET_PREFIX) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/user_preferences_frontend.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/user_preferences_frontend.py new file mode 100644 index 00000000000..e5c969f4c42 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/user_preferences_frontend.py @@ -0,0 +1,35 @@ +from models_library.products import ProductName +from models_library.user_preferences import FrontendUserPreference, PreferenceName +from models_library.users import UserID +from simcore_postgres_database.utils_user_preferences import FrontendUserPreferencesRepo + +from ._base import BaseRepository + + +def _get_user_preference_name(user_id: UserID, preference_name: PreferenceName) -> str: + return f"{user_id}/{preference_name}" + + +class UserPreferencesFrontendRepository(BaseRepository): + async def get_user_preference( + self, + *, + user_id: UserID, + product_name: ProductName, + preference_class: type[FrontendUserPreference], + ) -> FrontendUserPreference | None: + async with self.db_engine.connect() as conn: + preference_payload: dict | None = await FrontendUserPreferencesRepo.load( + conn, + user_id=user_id, + preference_name=_get_user_preference_name( + user_id, preference_class.get_preference_name() + ), + product_name=product_name, + ) + + return ( + None + if preference_payload is None + else preference_class.model_validate(preference_payload) + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/users.py b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/users.py new file mode 100644 index 00000000000..80118e2f1b6 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/repositories/users.py @@ -0,0 +1,17 @@ +from models_library.users import UserID +from pydantic import EmailStr, TypeAdapter +from simcore_postgres_database.models.users import UserRole +from simcore_postgres_database.utils_users import UsersRepo + +from ._base import BaseRepository + + +class UsersRepository(BaseRepository): + async def get_user_email(self, user_id: UserID) -> EmailStr: + async with self.db_engine.connect() as conn: + email = await UsersRepo.get_email(conn, user_id) + return TypeAdapter(EmailStr).validate_python(email) + + async def get_user_role(self, user_id: UserID) -> UserRole: + async with self.db_engine.connect() as conn: + return await UsersRepo().get_role(conn, user_id=user_id) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/db/tables.py b/services/director-v2/src/simcore_service_director_v2/modules/db/tables.py index 52021907af0..03a6a87b97b 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/db/tables.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/db/tables.py @@ -1,6 +1,6 @@ -from simcore_postgres_database.models.comp_pipeline import comp_pipeline +from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline from simcore_postgres_database.models.comp_runs import comp_runs -from simcore_postgres_database.models.comp_tasks import NodeClass, StateType, comp_tasks +from simcore_postgres_database.models.comp_tasks import NodeClass, comp_tasks from simcore_postgres_database.models.groups import user_to_groups from simcore_postgres_database.models.groups_extra_properties import ( groups_extra_properties, diff --git a/services/director-v2/src/simcore_service_director_v2/modules/director_v0.py b/services/director-v2/src/simcore_service_director_v2/modules/director_v0.py index 9ca20d4208f..8b8d1046812 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/director_v0.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/director_v0.py @@ -1,61 +1,59 @@ -""" Module that takes care of communications with director v0 service +"""Module that takes care of communications with director v0 service""" - -""" import logging -import urllib.parse from dataclasses import dataclass -from typing import Any, Optional, cast +from typing import Any, cast import httpx import yarl -from fastapi import FastAPI, HTTPException, Request, Response -from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.service_settings_labels import SimcoreServiceLabels -from models_library.services import ( - ServiceDockerData, - ServiceKey, - ServiceKeyVersion, - ServiceVersion, +from fastapi import FastAPI, HTTPException, status +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, ) +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID from models_library.users import UserID +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from servicelib.logging_utils import log_decorator +from settings_library.director_v0 import DirectorV0Settings +from settings_library.tracing import TracingSettings -# Module's business logic --------------------------------------------- -from starlette import status -from starlette.datastructures import URL - -from ..core.settings import DirectorV0Settings -from ..models.schemas.dynamic_services import RunningDynamicServiceDetails -from ..models.schemas.services import ServiceExtras from ..utils.client_decorators import handle_errors, handle_retry from ..utils.clients import unenvelope_or_raise_error -from ..utils.logging_utils import log_decorator logger = logging.getLogger(__name__) # Module's setup logic --------------------------------------------- -def setup(app: FastAPI, settings: Optional[DirectorV0Settings]): - if not settings: - settings = DirectorV0Settings() +def setup( + app: FastAPI, + director_v0_settings: DirectorV0Settings | None, + tracing_settings: TracingSettings | None, +): + if not director_v0_settings: + director_v0_settings = DirectorV0Settings() def on_startup() -> None: + client = httpx.AsyncClient( + base_url=f"{director_v0_settings.endpoint}", + timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, + ) + if tracing_settings: + setup_httpx_client_tracing(client=client) DirectorV0Client.create( app, - client=httpx.AsyncClient( - base_url=f"{settings.endpoint}", - timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, - ), + client=client, + ) + logger.debug( + "created client for director-v0: %s", director_v0_settings.endpoint ) - logger.debug("created client for director-v0: %s", settings.endpoint) async def on_shutdown() -> None: client = DirectorV0Client.instance(app).client await client.aclose() del client - logger.debug("delete client for director-v0: %s", settings.endpoint) + logger.debug("delete client for director-v0: %s", director_v0_settings.endpoint) app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) @@ -66,93 +64,36 @@ class DirectorV0Client: client: httpx.AsyncClient @classmethod - def create(cls, app: FastAPI, **kwargs): + def create(cls, app: FastAPI, **kwargs) -> "DirectorV0Client": app.state.director_v0_client = cls(**kwargs) return cls.instance(app) @classmethod - def instance(cls, app: FastAPI): - return app.state.director_v0_client + def instance(cls, app: FastAPI) -> "DirectorV0Client": + client: DirectorV0Client = app.state.director_v0_client + return client @handle_errors("Director", logger) @handle_retry(logger) - async def request(self, method: str, tail_path: str, **kwargs) -> httpx.Response: + async def _request(self, method: str, tail_path: str, **kwargs) -> httpx.Response: return await self.client.request(method, tail_path, **kwargs) - async def forward(self, request: Request, response: Response) -> Response: - url_tail = URL( - path=request.url.path.replace("/v0", ""), - fragment=request.url.fragment, - ) - body: bytes = await request.body() - - resp = await self.client.request( - request.method, - str(url_tail), - params=dict(request.query_params), - content=body, - headers=dict(request.headers), - ) - - # Prepared response - response.body = resp.content - response.status_code = resp.status_code - response.headers.update(resp.headers) - - # NOTE: the response is NOT validated! - return response - - @log_decorator(logger=logger) - async def get_service_details( - self, service: ServiceKeyVersion - ) -> ServiceDockerData: - resp = await self.request( - "GET", f"/services/{urllib.parse.quote_plus(service.key)}/{service.version}" - ) - if resp.status_code == status.HTTP_200_OK: - data = cast(list[dict[str, Any]], unenvelope_or_raise_error(resp)) - return ServiceDockerData.parse_obj(data[0]) - raise HTTPException(status_code=resp.status_code, detail=resp.content) - - @log_decorator(logger=logger) - async def get_service_extras( - self, service_key: ServiceKey, service_version: ServiceVersion - ) -> ServiceExtras: - resp = await self.request( - "GET", - f"/service_extras/{urllib.parse.quote_plus(service_key)}/{service_version}", - ) - if resp.status_code == status.HTTP_200_OK: - return ServiceExtras.parse_obj(unenvelope_or_raise_error(resp)) - raise HTTPException(status_code=resp.status_code, detail=resp.content) - @log_decorator(logger=logger) async def get_running_service_details( self, service_uuid: NodeID ) -> RunningDynamicServiceDetails: - resp = await self.request("GET", f"running_interactive_services/{service_uuid}") + resp = await self._request( + "GET", f"running_interactive_services/{service_uuid}" + ) if resp.status_code == status.HTTP_200_OK: - return RunningDynamicServiceDetails.parse_obj( + return RunningDynamicServiceDetails.model_validate( unenvelope_or_raise_error(resp) ) raise HTTPException(status_code=resp.status_code, detail=resp.content) - @log_decorator(logger=logger) - async def get_service_labels( - self, service: ServiceKeyVersion - ) -> SimcoreServiceLabels: - resp = await self.request( - "GET", - f"services/{urllib.parse.quote_plus(service.key)}/{service.version}/labels", - ) - resp.raise_for_status() - if resp.status_code == status.HTTP_200_OK: - return SimcoreServiceLabels.parse_obj(unenvelope_or_raise_error(resp)) - raise HTTPException(status_code=resp.status_code, detail=resp.content) - @log_decorator(logger=logger) async def get_running_services( - self, user_id: Optional[UserID] = None, project_id: Optional[ProjectID] = None + self, user_id: UserID | None = None, project_id: ProjectID | None = None ) -> list[RunningDynamicServiceDetails]: query_params = {} if user_id is not None: @@ -161,7 +102,7 @@ async def get_running_services( query_params["study_id"] = f"{project_id}" request_url = yarl.URL("running_interactive_services").with_query(query_params) - resp = await self.request("GET", str(request_url)) + resp = await self._request("GET", str(request_url)) resp.raise_for_status() if resp.status_code == status.HTTP_200_OK: diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_services.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_services.py index 40352833dca..acbc08849a6 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_services.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_services.py @@ -2,28 +2,30 @@ """ + import logging from dataclasses import dataclass import httpx -from fastapi import FastAPI, Response +from fastapi import FastAPI +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from settings_library.tracing import TracingSettings -from ..core.settings import DynamicServicesSettings from ..utils.client_decorators import handle_errors, handle_retry logger = logging.getLogger(__name__) -def setup(app: FastAPI, settings: DynamicServicesSettings): - if not settings: - settings = DynamicServicesSettings() - +def setup(app: FastAPI, tracing_settings: TracingSettings | None) -> None: def on_startup() -> None: + client = httpx.AsyncClient( + timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT + ) + if tracing_settings: + setup_httpx_client_tracing(client=client) ServicesClient.create( app, - client=httpx.AsyncClient( - timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT - ), + client=client, ) async def on_shutdown() -> None: @@ -40,15 +42,16 @@ class ServicesClient: client: httpx.AsyncClient @classmethod - def create(cls, app: FastAPI, **kwargs): + def create(cls, app: FastAPI, **kwargs) -> "ServicesClient": app.state.dynamic_services_client = cls(**kwargs) return cls.instance(app) @classmethod - def instance(cls, app: FastAPI): - return app.state.dynamic_services_client + def instance(cls, app: FastAPI) -> "ServicesClient": + client: ServicesClient = app.state.dynamic_services_client + return client @handle_errors("DynamicService", logger) @handle_retry(logger) - async def request(self, method: str, tail_path: str, **kwargs) -> Response: + async def request(self, method: str, tail_path: str, **kwargs) -> httpx.Response: return await self.client.request(method, tail_path, **kwargs) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/__init__.py index 5be08e96d06..04634602dda 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/__init__.py @@ -6,3 +6,5 @@ """ from .module_setup import setup + +__all__: tuple[str, ...] = ("setup",) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/_namespace.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/_namespace.py index caa43a669d6..37dc914451c 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/_namespace.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/_namespace.py @@ -1,6 +1,7 @@ -from models_library.projects_nodes import NodeID - -from ...models.schemas.constants import DYNAMIC_SIDECAR_SERVICE_PREFIX +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) +from models_library.projects_nodes_io import NodeID def get_compose_namespace(node_uuid: NodeID) -> str: diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/__init__.py index a116e134865..444e3798643 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/__init__.py @@ -1,19 +1,17 @@ -from ._errors import BaseClientHTTPError, ClientHttpError, UnexpectedStatusError from ._public import ( - DynamicSidecarClient, - get_dynamic_sidecar_client, + SidecarsClient, get_dynamic_sidecar_service_health, + get_sidecars_client, + remove_sidecars_client, setup, shutdown, ) __all__: tuple[str, ...] = ( - "BaseClientHTTPError", - "ClientHttpError", - "DynamicSidecarClient", - "get_dynamic_sidecar_client", "get_dynamic_sidecar_service_health", + "get_sidecars_client", + "remove_sidecars_client", "setup", "shutdown", - "UnexpectedStatusError", + "SidecarsClient", ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_base.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_base.py deleted file mode 100644 index 47701abd1d2..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_base.py +++ /dev/null @@ -1,166 +0,0 @@ -import asyncio -import functools -import inspect -import logging -from typing import Any, Awaitable, Callable, Optional - -from httpx import AsyncClient, ConnectError, HTTPError, PoolTimeout, Response -from httpx._types import TimeoutTypes, URLTypes -from tenacity import RetryCallState -from tenacity._asyncio import AsyncRetrying -from tenacity.before_sleep import before_sleep_log -from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_exponential - -from ._errors import ClientHttpError, UnexpectedStatusError, _WrongReturnType - -logger = logging.getLogger(__name__) - - -def _log_pool_status(client: AsyncClient, event_name: str) -> None: - # pylint: disable=protected-access - logger.warning( - "Pool status @ '%s': requests(%s)=%s, connections(%s)=%s", - event_name.upper(), - len(client._transport._pool._requests), - [ - (id(r), r.request.method, r.request.url, r.request.headers) - for r in client._transport._pool._requests - ], - len(client._transport._pool.connections), - [(id(c), c.__dict__) for c in client._transport._pool.connections], - ) - - -def _after_log(log: logging.Logger) -> Callable[[RetryCallState], None]: - def log_it(retry_state: RetryCallState) -> None: - # pylint: disable=protected-access - - assert retry_state.outcome # nosec - e = retry_state.outcome.exception() - assert isinstance(e, HTTPError) # nosec - log.error( - "Request timed-out after %s attempts with an unexpected error: '%s':%s", - retry_state.attempt_number, - f"{e.request=}", - f"{e=}", - ) - - return log_it - - -def retry_on_errors( - request_func: Callable[..., Awaitable[Response]] -) -> Callable[..., Awaitable[Response]]: - """ - Will retry the request on `ConnectError` and `PoolTimeout`. - Also wraps `httpx.HTTPError` - raises: - - `ClientHttpError` - """ - assert asyncio.iscoroutinefunction(request_func) - - RETRY_ERRORS = (ConnectError, PoolTimeout) - - @functools.wraps(request_func) - async def request_wrapper(zelf: "BaseThinClient", *args, **kwargs) -> Response: - # pylint: disable=protected-access - try: - async for attempt in AsyncRetrying( - stop=stop_after_delay(zelf.request_timeout), - wait=wait_exponential(min=1), - retry=retry_if_exception_type(RETRY_ERRORS), - before_sleep=before_sleep_log(logger, logging.WARNING), - after=_after_log(logger), - reraise=True, - ): - with attempt: - r: Response = await request_func(zelf, *args, **kwargs) - return r - except HTTPError as e: - if isinstance(e, PoolTimeout): - _log_pool_status(zelf.client, "pool timeout") - raise ClientHttpError(e) from e - - return request_wrapper - - -def expect_status(expected_code: int): - """ - raises an `UnexpectedStatusError` if the request's status is different - from `expected_code` - NOTE: always apply after `retry_on_errors` - - raises: - - `UnexpectedStatusError` - - `ClientHttpError` - """ - - def decorator( - request_func: Callable[..., Awaitable[Response]] - ) -> Callable[..., Awaitable[Response]]: - assert asyncio.iscoroutinefunction(request_func) - - @functools.wraps(request_func) - async def request_wrapper(zelf: "BaseThinClient", *args, **kwargs) -> Response: - response = await request_func(zelf, *args, **kwargs) - if response.status_code != expected_code: - raise UnexpectedStatusError(response, expected_code) - - return response - - return request_wrapper - - return decorator - - -class BaseThinClient: - SKIP_METHODS: set[str] = {"close"} - - def __init__( - self, - *, - request_timeout: int, - base_url: Optional[URLTypes] = None, - timeout: Optional[TimeoutTypes] = None, - ) -> None: - self.request_timeout: int = request_timeout - - client_args: dict[str, Any] = { - # NOTE: the default httpx pool limit configurations look good - # https://www.python-httpx.org/advanced/#pool-limit-configuration - # instruct the remote uvicorn web server to close the connections - # https://www.uvicorn.org/server-behavior/#http-headers - "headers": { - "Connection": "Close", - } - } - if base_url: - client_args["base_url"] = base_url - if timeout: - client_args["timeout"] = timeout - self.client = AsyncClient(**client_args) - - # ensure all user defined public methods return `httpx.Response` - # NOTE: ideally these checks should be ran at import time! - public_methods = [ - t[1] - for t in inspect.getmembers(self, predicate=inspect.ismethod) - if not (t[0].startswith("_") or t[0] in self.SKIP_METHODS) - ] - - for method in public_methods: - signature = inspect.signature(method) - if signature.return_annotation != Response: - raise _WrongReturnType(method, signature.return_annotation) - - async def close(self) -> None: - _log_pool_status(self.client, "closing") - await self.client.aclose() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_t, exc_v, exc_tb): - await self.close() diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_errors.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_errors.py deleted file mode 100644 index e867005af65..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_errors.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Exception hierarchy: - -* BaseClientError - x BaseRequestError - + ClientHttpError - + UnexpectedStatusError - x WrongReturnType -""" - -from httpx import Response - - -class BaseClientError(Exception): - """ - Used as based for all the raised errors - """ - - -class _WrongReturnType(BaseClientError): - """ - used internally to signal the user that the defined method - has an invalid return time annotation - """ - - def __init__(self, method, return_annotation) -> None: - super().__init__( - ( - f"{method=} should return an instance " - f"of {Response}, not '{return_annotation}'!" - ) - ) - - -class BaseClientHTTPError(BaseClientError): - """Base class to wrap all http related client errors""" - - -class ClientHttpError(BaseClientHTTPError): - """used to captures all httpx.HttpError""" - - def __init__(self, error: Exception) -> None: - super().__init__() - self.error: Exception = error - - -class UnexpectedStatusError(BaseClientHTTPError): - """raised when the status of the request is not the one it was expected""" - - def __init__(self, response: Response, expecting: int) -> None: - message = ( - f"Expected status: {expecting}, got {response.status_code} for: {response.url}: " - f"headers={response.headers}, body='{response.text}'" - ) - super().__init__(message) - self.response = response diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_public.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_public.py index 5e22f18833d..5945e07b8e3 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_public.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_public.py @@ -1,13 +1,27 @@ import logging from collections import deque +from collections.abc import Coroutine from functools import cached_property -from typing import Any, Final, Optional +from typing import Any, Final from fastapi import FastAPI, status from httpx import AsyncClient +from models_library.api_schemas_dynamic_sidecar.containers import ( + ActivityInfo, + ActivityInfoOrNone, +) +from models_library.basic_types import PortInt from models_library.projects import ProjectID from models_library.projects_networks import DockerNetworkAlias +from models_library.projects_nodes_io import NodeID +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from models_library.services_types import ServicePortKey +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus from pydantic import AnyHttpUrl, PositiveFloat +from servicelib.fastapi.http_client_thin import ( + BaseHttpClientError, + UnexpectedStatusError, +) from servicelib.fastapi.long_running_tasks.client import ( Client, ProgressCallback, @@ -16,39 +30,55 @@ TaskId, periodic_task_result, ) +from servicelib.logging_utils import log_context, log_decorator from servicelib.utils import logged_gather -from simcore_service_director_v2.core.settings import DynamicSidecarSettings -from ....models.schemas.dynamic_services import SchedulerData +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from ....models.dynamic_services_scheduler import SchedulerData from ....modules.dynamic_sidecar.docker_api import get_or_create_networks_ids -from ....utils.logging_utils import log_decorator from ..errors import EntrypointContainerNotFoundError -from ._errors import BaseClientHTTPError, UnexpectedStatusError -from ._thin import ThinDynamicSidecarClient +from ._thin import ThinSidecarsClient -STATUS_POLL_INTERVAL: Final[PositiveFloat] = 1 +_logger = logging.getLogger(__name__) -logger = logging.getLogger(__name__) +_STATUS_POLL_INTERVAL: Final[PositiveFloat] = 1 async def _debug_progress_callback( - message: ProgressMessage, percent: ProgressPercent, task_id: TaskId + message: ProgressMessage, percent: ProgressPercent | None, task_id: TaskId ) -> None: - logger.debug("%s: %.2f %s", task_id, percent, message) + _logger.debug("%s: %.2f %s", task_id, percent, message) -class DynamicSidecarClient: +class SidecarsClient: # pylint: disable=too-many-public-methods + """ + API client used for talking with: + - dynamic-sidecar + - caddy proxy + """ + def __init__(self, app: FastAPI): self._app = app - self._thin_client: ThinDynamicSidecarClient = ThinDynamicSidecarClient(app) + self._thin_client = ThinSidecarsClient(app) + + async def teardown(self) -> None: + await self._thin_client.teardown_client() + + async def setup(self) -> None: + await self._thin_client.setup_client() @cached_property def _async_client(self) -> AsyncClient: return self._thin_client.client @cached_property - def _dynamic_sidecar_settings(self) -> DynamicSidecarSettings: - return self._app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + def _dynamic_services_scheduler_settings(self) -> DynamicServicesSchedulerSettings: + settings: DynamicServicesSchedulerSettings = ( + self._app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + return settings async def is_healthy( self, dynamic_sidecar_endpoint: AnyHttpUrl, *, with_retry: bool = True @@ -62,8 +92,9 @@ async def is_healthy( response = await self._thin_client.get_health_no_retry( dynamic_sidecar_endpoint ) - return response.json()["is_healthy"] - except BaseClientHTTPError: + result: bool = response.json()["is_healthy"] + return result + except BaseHttpClientError: return False async def containers_inspect( @@ -76,9 +107,10 @@ async def containers_inspect( response = await self._thin_client.get_containers( dynamic_sidecar_endpoint, only_status=False ) - return response.json() + result: dict[str, Any] = response.json() + return result - @log_decorator(logger=logger) + @log_decorator(logger=_logger) async def containers_docker_status( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> dict[str, dict[str, str]]: @@ -86,27 +118,26 @@ async def containers_docker_status( response = await self._thin_client.get_containers( dynamic_sidecar_endpoint, only_status=True ) - return response.json() + result: dict[str, dict[str, str]] = response.json() + return result except UnexpectedStatusError: return {} - @log_decorator(logger=logger) - async def disable_service_outputs_watcher( - self, dynamic_sidecar_endpoint: AnyHttpUrl - ) -> None: - await self._thin_client.patch_containers_outputs_watcher( - dynamic_sidecar_endpoint, is_enabled=False - ) - - @log_decorator(logger=logger) - async def enable_service_outputs_watcher( - self, dynamic_sidecar_endpoint: AnyHttpUrl + @log_decorator(logger=_logger) + async def toggle_service_ports_io( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + *, + enable_outputs: bool, + enable_inputs: bool, ) -> None: - await self._thin_client.patch_containers_outputs_watcher( - dynamic_sidecar_endpoint, is_enabled=True + await self._thin_client.patch_containers_ports_io( + dynamic_sidecar_endpoint, + enable_outputs=enable_outputs, + enable_inputs=enable_inputs, ) - @log_decorator(logger=logger) + @log_decorator(logger=_logger) async def service_outputs_create_dirs( self, dynamic_sidecar_endpoint: AnyHttpUrl, outputs_labels: dict[str, Any] ) -> None: @@ -114,7 +145,7 @@ async def service_outputs_create_dirs( dynamic_sidecar_endpoint, outputs_labels=outputs_labels ) - @log_decorator(logger=logger) + @log_decorator(logger=_logger) async def get_entrypoint_container_name( self, dynamic_sidecar_endpoint: AnyHttpUrl, dynamic_sidecar_network_name: str ) -> str: @@ -128,11 +159,15 @@ async def get_entrypoint_container_name( dynamic_sidecar_endpoint, dynamic_sidecar_network_name=dynamic_sidecar_network_name, ) - return response.json() + container_name: str = response.json() + return container_name except UnexpectedStatusError as e: - if e.response.status_code == status.HTTP_404_NOT_FOUND: - raise EntrypointContainerNotFoundError() from e - raise e + if ( + e.response.status_code # type: ignore[attr-defined] # pylint: disable=no-member # type: ignore + == status.HTTP_404_NOT_FOUND + ): + raise EntrypointContainerNotFoundError from e + raise async def _attach_container_to_network( self, @@ -170,7 +205,7 @@ async def attach_service_containers_to_project_network( containers_status = await self.containers_docker_status( dynamic_sidecar_endpoint=dynamic_sidecar_endpoint ) - except BaseClientHTTPError: + except BaseHttpClientError: # if no containers are found it is ok to skip the operations, # there are no containers to attach the network to return @@ -192,7 +227,7 @@ async def attach_service_containers_to_project_network( ) network_id = network_names_to_ids[project_network] - tasks = deque() + coroutines: deque[Coroutine] = deque() for k, container_name in enumerate(sorted_container_names): # by default we attach `alias-0`, `alias-1`, etc... @@ -202,7 +237,7 @@ async def attach_service_containers_to_project_network( # by definition the entrypoint container will be exposed as the `alias` aliases.append(network_alias) - tasks.append( + coroutines.append( self._attach_container_to_network( dynamic_sidecar_endpoint=dynamic_sidecar_endpoint, container_id=container_name, @@ -211,7 +246,7 @@ async def attach_service_containers_to_project_network( ) ) - await logged_gather(*tasks) + await logged_gather(*coroutines) async def detach_service_containers_from_project_network( self, @@ -224,7 +259,7 @@ async def detach_service_containers_from_project_network( containers_status = await self.containers_docker_status( dynamic_sidecar_endpoint=dynamic_sidecar_endpoint ) - except BaseClientHTTPError: + except BaseHttpClientError: # if no containers are found it is ok to skip the operations, # there are no containers to detach the network from return @@ -245,11 +280,20 @@ async def detach_service_containers_from_project_network( ] ) + async def submit_docker_compose_spec( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + compose_spec: str, + ) -> None: + await self._thin_client.post_containers_compose_spec( + dynamic_sidecar_endpoint, compose_spec=compose_spec + ) + def _get_client(self, dynamic_sidecar_endpoint: AnyHttpUrl) -> Client: return Client( app=self._app, async_client=self._async_client, - base_url=dynamic_sidecar_endpoint, + base_url=f"{dynamic_sidecar_endpoint}", ) async def _await_for_result( @@ -257,40 +301,41 @@ async def _await_for_result( task_id: TaskId, dynamic_sidecar_endpoint: AnyHttpUrl, task_timeout: PositiveFloat, - progress_callback: Optional[ProgressCallback] = None, - ) -> Optional[Any]: + progress_callback: ProgressCallback | None = None, + ) -> Any | None: async with periodic_task_result( self._get_client(dynamic_sidecar_endpoint), task_id, task_timeout=task_timeout, progress_callback=progress_callback, - status_poll_interval=STATUS_POLL_INTERVAL, + status_poll_interval=_STATUS_POLL_INTERVAL, ) as result: - logger.debug("Task %s finished", task_id) + _logger.debug("Task %s finished", task_id) return result async def create_containers( self, dynamic_sidecar_endpoint: AnyHttpUrl, - compose_spec: str, - progress_callback: Optional[ProgressCallback] = None, + metrics_params: CreateServiceMetricsAdditionalParams, + progress_callback: ProgressCallback | None = None, ) -> None: response = await self._thin_client.post_containers_tasks( - dynamic_sidecar_endpoint, compose_spec=compose_spec + dynamic_sidecar_endpoint, + metrics_params=metrics_params, ) task_id: TaskId = response.json() await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START, progress_callback, ) async def stop_service( self, dynamic_sidecar_endpoint: AnyHttpUrl, - progress_callback: Optional[ProgressCallback] = None, + progress_callback: ProgressCallback | None = None, ) -> None: response = await self._thin_client.post_containers_tasks_down( dynamic_sidecar_endpoint @@ -300,44 +345,63 @@ async def stop_service( await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_WAIT_FOR_SERVICE_TO_STOP, progress_callback, ) - async def restore_service_state(self, dynamic_sidecar_endpoint: AnyHttpUrl) -> None: + async def restore_service_state(self, dynamic_sidecar_endpoint: AnyHttpUrl) -> int: response = await self._thin_client.post_containers_tasks_state_restore( dynamic_sidecar_endpoint ) task_id: TaskId = response.json() + result: Any | None = await self._await_for_result( + task_id, + dynamic_sidecar_endpoint, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), + _debug_progress_callback, + ) + assert isinstance(result, int) # nosec + return result + + async def pull_user_services_images( + self, dynamic_sidecar_endpoint: AnyHttpUrl + ) -> None: + response = await self._thin_client.post_containers_images_pull( + dynamic_sidecar_endpoint + ) + task_id: TaskId = response.json() + await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_USER_SERVICES_PULLING_TIMEOUT, _debug_progress_callback, ) async def save_service_state( self, dynamic_sidecar_endpoint: AnyHttpUrl, - progress_callback: Optional[ProgressCallback] = None, - ) -> None: + progress_callback: ProgressCallback | None = None, + ) -> int: response = await self._thin_client.post_containers_tasks_state_save( dynamic_sidecar_endpoint ) task_id: TaskId = response.json() - await self._await_for_result( + result: Any | None = await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), progress_callback, ) + assert isinstance(result, int) # nosec + return result async def pull_service_input_ports( self, dynamic_sidecar_endpoint: AnyHttpUrl, - port_keys: Optional[list[str]] = None, + port_keys: list[ServicePortKey] | None = None, ) -> int: response = await self._thin_client.post_containers_tasks_ports_inputs_pull( dynamic_sidecar_endpoint, port_keys @@ -347,7 +411,7 @@ async def pull_service_input_ports( transferred_bytes = await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), _debug_progress_callback, ) return transferred_bytes or 0 @@ -355,24 +419,26 @@ async def pull_service_input_ports( async def pull_service_output_ports( self, dynamic_sidecar_endpoint: AnyHttpUrl, - port_keys: Optional[list[str]] = None, - ) -> None: + port_keys: list[str] | None = None, + ) -> int: response = await self._thin_client.post_containers_tasks_ports_outputs_pull( dynamic_sidecar_endpoint, port_keys ) task_id: TaskId = response.json() - await self._await_for_result( + result: Any | None = await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), _debug_progress_callback, ) + assert isinstance(result, int) # nosec + return result async def push_service_output_ports( self, dynamic_sidecar_endpoint: AnyHttpUrl, - progress_callback: Optional[ProgressCallback] = None, + progress_callback: ProgressCallback | None = None, ) -> None: response = await self._thin_client.post_containers_tasks_ports_outputs_push( dynamic_sidecar_endpoint @@ -382,7 +448,7 @@ async def push_service_output_ports( await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), progress_callback, ) @@ -395,34 +461,115 @@ async def restart_containers(self, dynamic_sidecar_endpoint: AnyHttpUrl) -> None await self._await_for_result( task_id, dynamic_sidecar_endpoint, - self._dynamic_sidecar_settings.DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT, + self._dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT, _debug_progress_callback, ) + async def update_volume_state( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + volume_category: VolumeCategory, + volume_status: VolumeStatus, + ) -> None: + await self._thin_client.put_volumes( + dynamic_sidecar_endpoint, + volume_category=volume_category, + volume_status=volume_status, + ) + + async def configure_proxy( + self, + proxy_endpoint: AnyHttpUrl, + entrypoint_container_name: str, + service_port: PortInt, + ) -> None: + proxy_configuration = _get_proxy_configuration( + entrypoint_container_name, service_port + ) + await self._thin_client.proxy_config_load(proxy_endpoint, proxy_configuration) + + async def get_service_activity( + self, dynamic_sidecar_endpoint: AnyHttpUrl + ) -> ActivityInfoOrNone: + response = await self._thin_client.get_containers_activity( + dynamic_sidecar_endpoint + ) + decoded_response = response.json() + return ( + ActivityInfo.model_validate(decoded_response) if decoded_response else None + ) + + async def free_reserved_disk_space( + self, dynamic_sidecar_endpoint: AnyHttpUrl + ) -> None: + await self._thin_client.post_disk_reserved_free(dynamic_sidecar_endpoint) + + +def _get_proxy_configuration( + entrypoint_container_name: str, service_port: PortInt +) -> dict[str, Any]: + return { + # NOTE: the admin endpoint is not present any more. + # This avoids user services from being able to access it. + "apps": { + "http": { + "servers": { + "userservice": { + "listen": ["0.0.0.0:80"], + "routes": [ + { + "handle": [ + { + "handler": "reverse_proxy", + "upstreams": [ + { + "dial": f"{entrypoint_container_name}:{service_port}" + } + ], + } + ] + } + ], + } + } + } + }, + } + async def setup(app: FastAPI) -> None: - logger.debug("dynamic-sidecar api client setup") - app.state.dynamic_sidecar_api_client = DynamicSidecarClient(app) + with log_context(_logger, logging.DEBUG, "dynamic-sidecar api client setup"): + app.state.sidecars_api_clients = {} async def shutdown(app: FastAPI) -> None: - logger.debug("dynamic-sidecar api client closing...") - client: Optional[DynamicSidecarClient] - if client := app.state.dynamic_sidecar_api_client: - await client._thin_client.close() # pylint: disable=protected-access + with log_context(_logger, logging.DEBUG, "dynamic-sidecar api client closing..."): + await logged_gather( + *(client.teardown() for client in app.state.sidecars_api_clients.values()), + reraise=False, + ) + + +async def get_sidecars_client(app: FastAPI, node_id: str | NodeID) -> SidecarsClient: + str_node_id = f"{node_id}" + + if str_node_id not in app.state.sidecars_api_clients: + sidecars_client = SidecarsClient(app) + app.state.sidecars_api_clients[str_node_id] = sidecars_client + await sidecars_client.setup() + + client: SidecarsClient = app.state.sidecars_api_clients[str_node_id] + return client -def get_dynamic_sidecar_client(app: FastAPI) -> DynamicSidecarClient: - assert app.state.dynamic_sidecar_api_client # nosec - return app.state.dynamic_sidecar_api_client +def remove_sidecars_client(app: FastAPI, node_id: NodeID) -> None: + app.state.sidecars_api_clients.pop(f"{node_id}", None) async def get_dynamic_sidecar_service_health( app: FastAPI, scheduler_data: SchedulerData, *, with_retry: bool = True ) -> bool: - api_client = get_dynamic_sidecar_client(app) - service_endpoint = scheduler_data.endpoint + api_client = await get_sidecars_client(app, scheduler_data.node_uuid) # update service health - is_healthy = await api_client.is_healthy(service_endpoint, with_retry=with_retry) - return is_healthy + return await api_client.is_healthy(scheduler_data.endpoint, with_retry=with_retry) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_thin.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_thin.py index 42a0b83e2a5..3a3cc1c3118 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_thin.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/api_client/_thin.py @@ -1,19 +1,25 @@ -import json -import logging -from typing import Any, Optional +from typing import Any +from common_library.json_serialization import json_dumps from fastapi import FastAPI, status from httpx import Response, Timeout +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus from pydantic import AnyHttpUrl from servicelib.docker_constants import SUFFIX_EGRESS_PROXY_NAME +from servicelib.fastapi.http_client_thin import ( + BaseThinClient, + expect_status, + retry_on_errors, +) +from settings_library.tracing import TracingSettings -from ....core.settings import DynamicSidecarSettings -from ._base import BaseThinClient, expect_status, retry_on_errors +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) -logger = logging.getLogger(__name__) - -class ThinDynamicSidecarClient(BaseThinClient): +class ThinSidecarsClient(BaseThinClient): # pylint: disable=too-many-public-methods """ NOTE: all calls can raise the following errors. - `UnexpectedStatusError` @@ -23,31 +29,35 @@ class ThinDynamicSidecarClient(BaseThinClient): API_VERSION = "v1" def __init__(self, app: FastAPI): - settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + tracing_settings: TracingSettings | None = ( + app.state.settings.DIRECTOR_V2_TRACING ) # timeouts self._health_request_timeout = Timeout(1.0, connect=1.0) self._save_restore_timeout = Timeout( - settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT, - connect=settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, + scheduler_settings.DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT.total_seconds(), + connect=scheduler_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, ) self._restart_containers_timeout = Timeout( - settings.DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT, - connect=settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, + scheduler_settings.DYNAMIC_SIDECAR_API_RESTART_CONTAINERS_TIMEOUT, + connect=scheduler_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, ) self._attach_detach_network_timeout = Timeout( - settings.DYNAMIC_SIDECAR_PROJECT_NETWORKS_ATTACH_DETACH_S, - connect=settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, + scheduler_settings.DYNAMIC_SIDECAR_PROJECT_NETWORKS_ATTACH_DETACH_S, + connect=scheduler_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, ) super().__init__( - request_timeout=settings.DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S, - timeout=Timeout( - settings.DYNAMIC_SIDECAR_API_REQUEST_TIMEOUT, - connect=settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, + total_retry_interval=scheduler_settings.DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S, + default_http_client_timeout=Timeout( + scheduler_settings.DYNAMIC_SIDECAR_API_REQUEST_TIMEOUT, + connect=scheduler_settings.DYNAMIC_SIDECAR_API_CONNECT_TIMEOUT, ), + tracing_settings=tracing_settings, ) def _get_url( @@ -58,16 +68,16 @@ def _get_url( no_api_version: bool = False, ) -> str: """formats and returns an url for the request""" - api_version = "" if no_api_version else f"/{self.API_VERSION}" + api_version = "" if no_api_version else f"{self.API_VERSION}/" return f"{dynamic_sidecar_endpoint}{api_version}{postfix}" async def _get_health_common( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/health", no_api_version=True) + url = self._get_url(dynamic_sidecar_endpoint, "health", no_api_version=True) return await self.client.get(url, timeout=self._health_request_timeout) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_200_OK) async def get_health(self, dynamic_sidecar_endpoint: AnyHttpUrl) -> Response: return await self._get_health_common(dynamic_sidecar_endpoint) @@ -78,47 +88,53 @@ async def get_health_no_retry( ) -> Response: return await self._get_health_common(dynamic_sidecar_endpoint) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_200_OK) async def get_containers( self, dynamic_sidecar_endpoint: AnyHttpUrl, *, only_status: bool ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers") - return await self.client.get(url, params=dict(only_status=only_status)) + url = self._get_url(dynamic_sidecar_endpoint, "containers") + return await self.client.get(url, params={"only_status": only_status}) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_204_NO_CONTENT) - async def patch_containers_outputs_watcher( - self, dynamic_sidecar_endpoint: AnyHttpUrl, *, is_enabled: bool + async def patch_containers_ports_io( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + *, + enable_outputs: bool, + enable_inputs: bool, ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers/directory-watcher") - return await self.client.patch(url, json=dict(is_enabled=is_enabled)) + url = self._get_url(dynamic_sidecar_endpoint, "containers/ports/io") + return await self.client.patch( + url, json={"enable_outputs": enable_outputs, "enable_inputs": enable_inputs} + ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_204_NO_CONTENT) async def post_containers_ports_outputs_dirs( self, dynamic_sidecar_endpoint: AnyHttpUrl, *, outputs_labels: dict[str, Any] ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers/ports/outputs/dirs") - return await self.client.post(url, json=dict(outputs_labels=outputs_labels)) + url = self._get_url(dynamic_sidecar_endpoint, "containers/ports/outputs/dirs") + return await self.client.post(url, json={"outputs_labels": outputs_labels}) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_200_OK) async def get_containers_name( self, dynamic_sidecar_endpoint: AnyHttpUrl, *, dynamic_sidecar_network_name: str ) -> Response: - filters = json.dumps( + filters = json_dumps( { "network": dynamic_sidecar_network_name, "exclude": SUFFIX_EGRESS_PROXY_NAME, } ) url = self._get_url( - dynamic_sidecar_endpoint, f"/containers/name?filters={filters}" + dynamic_sidecar_endpoint, f"containers/name?filters={filters}" ) return await self.client.get(url=url) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_204_NO_CONTENT) async def post_containers_networks_attach( self, @@ -129,15 +145,15 @@ async def post_containers_networks_attach( network_aliases: list[str], ) -> Response: url = self._get_url( - dynamic_sidecar_endpoint, f"/containers/{container_id}/networks:attach" + dynamic_sidecar_endpoint, f"containers/{container_id}/networks:attach" ) return await self.client.post( url, - json=dict(network_id=network_id, network_aliases=network_aliases), + json={"network_id": network_id, "network_aliases": network_aliases}, timeout=self._attach_detach_network_timeout, ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_204_NO_CONTENT) async def post_containers_networks_detach( self, @@ -147,81 +163,142 @@ async def post_containers_networks_detach( network_id: str, ) -> Response: url = self._get_url( - dynamic_sidecar_endpoint, f"/containers/{container_id}/networks:detach" + dynamic_sidecar_endpoint, f"containers/{container_id}/networks:detach" ) return await self.client.post( url, - json=dict(network_id=network_id), + json={"network_id": network_id}, timeout=self._attach_detach_network_timeout, ) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) - async def post_containers_tasks( - self, dynamic_sidecar_endpoint: AnyHttpUrl, *, compose_spec: str + async def post_containers_compose_spec( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + *, + compose_spec: str, ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers") - # change introduce in OAS version==1.1.0 + url = self._get_url(dynamic_sidecar_endpoint, "containers/compose-spec") return await self.client.post(url, json={"docker_compose_yaml": compose_spec}) - @retry_on_errors + @retry_on_errors() + @expect_status(status.HTTP_202_ACCEPTED) + async def post_containers_tasks( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + *, + metrics_params: CreateServiceMetricsAdditionalParams, + ) -> Response: + url = self._get_url(dynamic_sidecar_endpoint, "containers") + return await self.client.post( + url, json={"metrics_params": metrics_params.model_dump()} + ) + + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_down( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers:down") + url = self._get_url(dynamic_sidecar_endpoint, "containers:down") return await self.client.post(url) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_state_restore( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers/state:restore") + url = self._get_url(dynamic_sidecar_endpoint, "containers/state:restore") return await self.client.post(url) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_state_save( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers/state:save") + url = self._get_url(dynamic_sidecar_endpoint, "containers/state:save") + return await self.client.post(url) + + @retry_on_errors() + @expect_status(status.HTTP_202_ACCEPTED) + async def post_containers_images_pull( + self, dynamic_sidecar_endpoint: AnyHttpUrl + ) -> Response: + url = self._get_url(dynamic_sidecar_endpoint, "containers/images:pull") return await self.client.post(url) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_ports_inputs_pull( self, dynamic_sidecar_endpoint: AnyHttpUrl, - port_keys: Optional[list[str]] = None, + port_keys: list[str] | None = None, ) -> Response: port_keys = [] if port_keys is None else port_keys - url = self._get_url(dynamic_sidecar_endpoint, "/containers/ports/inputs:pull") + url = self._get_url(dynamic_sidecar_endpoint, "containers/ports/inputs:pull") return await self.client.post(url, json=port_keys) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_ports_outputs_pull( self, dynamic_sidecar_endpoint: AnyHttpUrl, - port_keys: Optional[list[str]] = None, + port_keys: list[str] | None = None, ) -> Response: port_keys = [] if port_keys is None else port_keys - url = self._get_url(dynamic_sidecar_endpoint, "/containers/ports/outputs:pull") + url = self._get_url(dynamic_sidecar_endpoint, "containers/ports/outputs:pull") return await self.client.post(url, json=port_keys) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_ports_outputs_push( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers/ports/outputs:push") + url = self._get_url(dynamic_sidecar_endpoint, "containers/ports/outputs:push") return await self.client.post(url) - @retry_on_errors + @retry_on_errors() @expect_status(status.HTTP_202_ACCEPTED) async def post_containers_tasks_restart( self, dynamic_sidecar_endpoint: AnyHttpUrl ) -> Response: - url = self._get_url(dynamic_sidecar_endpoint, "/containers:restart") + url = self._get_url(dynamic_sidecar_endpoint, "containers:restart") + return await self.client.post(url) + + @retry_on_errors() + @expect_status(status.HTTP_204_NO_CONTENT) + async def put_volumes( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + volume_category: VolumeCategory, + volume_status: VolumeStatus, + ) -> Response: + url = self._get_url(dynamic_sidecar_endpoint, f"volumes/{volume_category}") + + return await self.client.put(url, json={"status": volume_status}) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def proxy_config_load( + self, proxy_endpoint: AnyHttpUrl, proxy_configuration: dict[str, Any] + ) -> Response: + url = self._get_url(proxy_endpoint, "load", no_api_version=True) + return await self.client.post(url, json=proxy_configuration) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_containers_activity( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + ) -> Response: + url = self._get_url(dynamic_sidecar_endpoint, "containers/activity") + return await self.client.get(url) + + @retry_on_errors() + @expect_status(status.HTTP_204_NO_CONTENT) + async def post_disk_reserved_free( + self, + dynamic_sidecar_endpoint: AnyHttpUrl, + ) -> Response: + url = self._get_url(dynamic_sidecar_endpoint, "disk/reserved:free") return await self.client.post(url) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/__init__.py index 2fd7fa99031..5fb63db124b 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/__init__.py @@ -9,16 +9,13 @@ get_or_create_networks_ids, get_projects_networks_containers, get_swarm_network, - inspect_service, is_dynamic_sidecar_stack_missing, is_sidecar_running, - list_dynamic_sidecar_services, remove_dynamic_sidecar_network, remove_dynamic_sidecar_stack, try_to_remove_network, update_scheduler_data_label, ) -from ._volume import remove_pending_volume_removal_services, remove_volumes_from_node __all__: tuple[str, ...] = ( "are_sidecar_and_proxy_services_present", @@ -31,14 +28,10 @@ "get_or_create_networks_ids", "get_projects_networks_containers", "get_swarm_network", - "inspect_service", - "is_sidecar_running", "is_dynamic_sidecar_stack_missing", - "list_dynamic_sidecar_services", + "is_sidecar_running", "remove_dynamic_sidecar_network", "remove_dynamic_sidecar_stack", - "remove_pending_volume_removal_services", - "remove_volumes_from_node", "try_to_remove_network", "update_scheduler_data_label", ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_core.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_core.py index 72727b4145a..8a7e5d152d4 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_core.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_core.py @@ -1,29 +1,34 @@ -import json import logging -from typing import Any, Mapping, Optional, Union +import re +from collections.abc import Mapping +from typing import Any, Final import aiodocker from aiodocker.utils import clean_filters, clean_map -from fastapi import status +from common_library.json_serialization import json_dumps from fastapi.encoders import jsonable_encoder from models_library.aiodocker_api import AioDockerServiceSpec +from models_library.api_schemas_directorv2.services import ( + DYNAMIC_SIDECAR_SERVICE_PREFIX, +) +from models_library.docker import DockerNodeID, to_simcore_runtime_docker_label_key from models_library.projects import ProjectID +from models_library.projects_networks import DockerNetworkName from models_library.projects_nodes_io import NodeID -from models_library.users import UserID -from servicelib.json_serialization import json_dumps +from models_library.services_enums import ServiceState from servicelib.utils import logged_gather +from starlette import status from tenacity import TryAgain, retry -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_exponential, wait_random_exponential -from ....core.settings import DynamicSidecarSettings -from ....models.schemas.constants import ( - DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL, - DYNAMIC_SIDECAR_SERVICE_PREFIX, +from ....constants import DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, ) -from ....models.schemas.dynamic_services import SchedulerData, ServiceState, ServiceType +from ....models.dynamic_services_scheduler import NetworkId, SchedulerData, ServiceId from ....utils.dict_utils import get_leaf_key_paths, nested_update from ..docker_states import TASK_STATES_RUNNING, extract_task_state from ..errors import DockerServiceNotFoundError, DynamicSidecarError, GenericDockerError @@ -35,101 +40,97 @@ ServiceState.RUNNING, } - log = logging.getLogger(__name__) -async def get_swarm_network(dynamic_sidecar_settings: DynamicSidecarSettings) -> dict: +async def get_swarm_network(simcore_services_network_name: DockerNetworkName) -> dict: async with docker_client() as client: all_networks = await client.networks.list() - network_name = "_default" - if dynamic_sidecar_settings.SIMCORE_SERVICES_NETWORK_NAME: - network_name = dynamic_sidecar_settings.SIMCORE_SERVICES_NETWORK_NAME # try to find the network name (usually named STACKNAME_default) - networks = [ - x for x in all_networks if "swarm" in x["Scope"] and network_name in x["Name"] + networks: list[dict] = [ + x + for x in all_networks + if "swarm" in x["Scope"] and simcore_services_network_name in x["Name"] ] if not networks or len(networks) > 1: - raise DynamicSidecarError( - f"Swarm network name (searching for '*{network_name}*') is not configured." - f"Found following networks: {networks}" + msg = ( + f"Swarm network name (searching for '*{simcore_services_network_name}*') " + f"is not configured.Found following networks: {networks}" ) + raise DynamicSidecarError(msg=msg) return networks[0] -async def create_network(network_config: dict[str, Any]) -> str: +async def create_network(network_config: dict[str, Any]) -> NetworkId: async with docker_client() as client: try: docker_network = await client.networks.create(network_config) - return docker_network.id + docker_network_id: NetworkId = docker_network.id + return docker_network_id except aiodocker.exceptions.DockerError as e: network_name = network_config["Name"] # make sure the current error being trapped is network dose not exit if f"network with name {network_name} already exists" not in str(e): - raise e + raise # Fetch network name if network already exists. # The environment is trashed because there seems to be an issue # when stopping previous services. # It is not possible to immediately remove the network after - # a docker-compose down involving and external overlay network + # a docker compose down involving and external overlay network # has removed a container; it results as already attached for network_details in await client.networks.list(): if network_name == network_details["Name"]: - return network_details["Id"] + network_id: NetworkId = network_details["Id"] + return network_id # finally raise an error if a network cannot be spawned # pylint: disable=raise-missing-from - raise DynamicSidecarError( - f"Could not create or recover a network ID for {network_config}" - ) + msg = f"Could not create or recover a network ID for {network_config}" + raise DynamicSidecarError(msg=msg) from e + + +def _to_snake_case(string: str) -> str: + # Convert camelCase or PascalCase to snake_case + return re.sub(r"(? str: + create_service_data: AioDockerServiceSpec | dict[str, Any], +) -> ServiceId: # NOTE: ideally the argument should always be AioDockerServiceSpec # but for that we need get_dynamic_proxy_spec to return that type async with docker_client() as client: kwargs = jsonable_encoder( create_service_data, by_alias=True, exclude_unset=True ) - logging.debug("Creating service with\n%s", json.dumps(kwargs, indent=1)) + kwargs = {_to_snake_case(k): v for k, v in kwargs.items()} + + logging.debug("Creating service with\n%s", json_dumps(kwargs, indent=1)) service_start_result = await client.services.create(**kwargs) log.debug( "Started service %s with\n%s", service_start_result, - json.dumps(kwargs, indent=1), + json_dumps(kwargs, indent=1), ) if "ID" not in service_start_result: - raise DynamicSidecarError( - f"Error while starting service: {str(service_start_result)}" - ) - return service_start_result["ID"] - - -async def inspect_service(service_id: str) -> dict[str, Any]: - async with docker_client() as client: - return await client.services.inspect(service_id) + msg = f"Error while starting service: {service_start_result!s}" + raise DynamicSidecarError(msg=msg) + service_id: ServiceId = service_start_result["ID"] + return service_id -async def get_dynamic_sidecars_to_observe( - dynamic_sidecar_settings: DynamicSidecarSettings, -) -> list[SchedulerData]: +async def get_dynamic_sidecars_to_observe(swarm_stack_name: str) -> list[SchedulerData]: """called when scheduler is started to discover new services to observe""" async with docker_client() as client: - running_dynamic_sidecar_services: list[ - Mapping[str, Any] - ] = await client.services.list( - filters={ - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}" - ], - "name": [f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}"], - } + running_dynamic_sidecar_services = await _list_docker_services( + client, + node_id=None, + swarm_stack_name=swarm_stack_name, + return_only_sidecars=True, ) return [ SchedulerData.from_service_inspect(x) for x in running_dynamic_sidecar_services @@ -139,29 +140,36 @@ async def get_dynamic_sidecars_to_observe( async def _get_service_latest_task(service_id: str) -> Mapping[str, Any]: try: async with docker_client() as client: - running_services = await client.tasks.list( + service_associated_tasks = await client.tasks.list( filters={"service": f"{service_id}"} ) - if not running_services: + if not service_associated_tasks: raise DockerServiceNotFoundError(service_id=service_id) # The service might have more then one task because the # previous might have died out. # Only interested in the latest task as only one task per # service will be running. - sorted_tasks = sorted(running_services, key=lambda task: task["UpdatedAt"]) + sorted_tasks = sorted( + service_associated_tasks, + key=lambda task: task["UpdatedAt"], + ) - last_task = sorted_tasks[-1] + last_task: Mapping[str, Any] = sorted_tasks[-1] return last_task except GenericDockerError as err: - if err.original_exception.status == 404: + if ( + err.error_context()["original_exception"].status + == status.HTTP_404_NOT_FOUND + ): raise DockerServiceNotFoundError(service_id=service_id) from err raise async def get_dynamic_sidecar_placement( - service_id: str, dynamic_sidecar_settings: DynamicSidecarSettings -) -> str: + service_id: str, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, +) -> DockerNodeID: """ Waits until the service has a task in `running` state and returns it's `docker_node_id`. @@ -179,7 +187,7 @@ async def get_dynamic_sidecar_placement( @retry( wait=wait_random_exponential(multiplier=2, min=1, max=20), stop=stop_after_delay( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S ), ) async def _get_task_data_when_service_running(service_id: str) -> Mapping[str, Any]: @@ -191,17 +199,15 @@ async def _get_task_data_when_service_running(service_id: str) -> Mapping[str, A service_state = task["Status"]["State"] if service_state not in TASK_STATES_RUNNING: - raise TryAgain() + raise TryAgain return task task = await _get_task_data_when_service_running(service_id=service_id) - docker_node_id = task.get("NodeID", None) + docker_node_id: DockerNodeID | None = task.get("NodeID", None) if not docker_node_id: - raise DynamicSidecarError( - f"Could not find an assigned NodeID for service_id={service_id}. " - f"Last task inspect result: {task}" - ) + msg = f"Could not find an assigned NodeID for service_id={service_id}. Last task inspect result: {task}" + raise DynamicSidecarError(msg=msg) return docker_node_id @@ -212,57 +218,83 @@ async def get_dynamic_sidecar_state(service_id: str) -> tuple[ServiceState, str] return service_state, message -async def _get_dynamic_sidecar_stack_services( - node_uuid: NodeID, dynamic_sidecar_settings: DynamicSidecarSettings -) -> list[Mapping]: - filters = { - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - f"uuid={node_uuid}", - ] - } - async with docker_client() as client: - return await client.services.list(filters=filters) - - async def is_dynamic_sidecar_stack_missing( - node_uuid: NodeID, dynamic_sidecar_settings: DynamicSidecarSettings + node_uuid: NodeID, swarm_stack_name: str ) -> bool: """Check if the proxy and the dynamic-sidecar are absent""" - stack_services = await _get_dynamic_sidecar_stack_services( - node_uuid, dynamic_sidecar_settings - ) + async with docker_client() as client: + stack_services = await _list_docker_services( + client, + node_id=node_uuid, + swarm_stack_name=swarm_stack_name, + return_only_sidecars=False, + ) return len(stack_services) == 0 +_NUM_SIDECAR_STACK_SERVICES: Final[int] = 2 + + async def are_sidecar_and_proxy_services_present( - node_uuid: NodeID, dynamic_sidecar_settings: DynamicSidecarSettings + node_uuid: NodeID, swarm_stack_name: str ) -> bool: """ The dynamic-sidecar stack always expects to have 2 running services """ - stack_services = await _get_dynamic_sidecar_stack_services( - node_uuid, dynamic_sidecar_settings - ) - if len(stack_services) != 2: + async with docker_client() as client: + stack_services = await _list_docker_services( + client, + node_id=node_uuid, + swarm_stack_name=swarm_stack_name, + return_only_sidecars=False, + ) + if len(stack_services) != _NUM_SIDECAR_STACK_SERVICES: return False return True +async def _list_docker_services( + client: aiodocker.docker.Docker, + *, + node_id: NodeID | None, + swarm_stack_name: str, + return_only_sidecars: bool, +) -> list[Mapping]: + # NOTE: this is here for backward compatibility when first deploying this change. + # shall be removed after 1-2 releases without issues + # backwards compatibility part + + def _make_filters() -> Mapping[str, Any]: + filters = { + "label": [ + f"{to_simcore_runtime_docker_label_key('swarm_stack_name')}={swarm_stack_name}", + ], + } + if node_id: + filters["label"].append( + f"{to_simcore_runtime_docker_label_key('node_id')}={node_id}" + ) + if return_only_sidecars: + filters["name"] = [f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}"] + return filters + + services_list: list[Mapping] = await client.services.list(filters=_make_filters()) + return services_list + + async def remove_dynamic_sidecar_stack( - node_uuid: NodeID, dynamic_sidecar_settings: DynamicSidecarSettings + node_uuid: NodeID, swarm_stack_name: str ) -> None: """Removes all services from the stack, in theory there should only be 2 services""" async with docker_client() as client: - services_to_remove = await client.services.list( - filters={ - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - f"uuid={node_uuid}", - ] - } + services_to_remove = await _list_docker_services( + client, + node_id=node_uuid, + swarm_stack_name=swarm_stack_name, + return_only_sidecars=False, ) + if services_to_remove: await logged_gather( *( @@ -288,38 +320,13 @@ async def remove_dynamic_sidecar_network(network_name: str) -> bool: return False -async def list_dynamic_sidecar_services( - dynamic_sidecar_settings: DynamicSidecarSettings, - user_id: Optional[UserID] = None, - project_id: Optional[ProjectID] = None, -) -> list[dict[str, Any]]: - service_filters = { - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - ], - "name": [f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}"], - } - if user_id is not None: - service_filters["label"].append(f"user_id={user_id}") - if project_id is not None: - service_filters["label"].append(f"study_id={project_id}") - +async def is_sidecar_running(node_uuid: NodeID, swarm_stack_name: str) -> bool: async with docker_client() as client: - return await client.services.list(filters=service_filters) - - -async def is_sidecar_running( - node_uuid: NodeID, dynamic_sidecar_settings: DynamicSidecarSettings -) -> bool: - async with docker_client() as client: - sidecar_service_list = await client.services.list( - filters={ - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - f"type={ServiceType.MAIN.value}", - f"uuid={node_uuid}", - ] - } + sidecar_service_list = await _list_docker_services( + client, + node_id=node_uuid, + swarm_stack_name=swarm_stack_name, + return_only_sidecars=True, ) if len(sidecar_service_list) != 1: return False @@ -338,7 +345,8 @@ async def get_or_create_networks_ids( async def _get_id_from_name(client, network_name: str) -> str: network = await client.networks.get(network_name) network_inspect = await network.show() - return network_inspect["Id"] + network_id: str = network_inspect["Id"] + return network_id async with docker_client() as client: existing_networks_names = {x["Name"] for x in await client.networks.list()} @@ -373,7 +381,7 @@ async def _get_id_from_name(client, network_name: str) -> str: *[_get_id_from_name(client, network) for network in networks] ) - return dict(zip(networks, networks_ids)) + return dict(zip(networks, networks_ids, strict=True)) async def get_projects_networks_containers( @@ -387,14 +395,16 @@ async def get_projects_networks_containers( params = {"filters": clean_filters({"label": [f"project_id={project_id}"]})} filtered_networks = ( # pylint:disable=protected-access - await client.networks.docker._query_json("networks", params=params) + await client.networks.docker._query_json( # noqa: SLF001 + "networks", params=params + ) ) if not filtered_networks: return {} def _count_containers(item: dict[str, Any]) -> int: - containers: Optional[list] = item.get("Containers") + containers: list | None = item.get("Containers") return 0 if containers is None else len(containers) return {x["Name"]: _count_containers(x) for x in filtered_networks} @@ -450,7 +460,7 @@ async def _update_service_spec( include=get_leaf_key_paths(update_in_service_spec), ) - await client._query_json( # pylint: disable=protected-access + await client._query_json( # pylint: disable=protected-access # noqa: SLF001 f"services/{service_id}/update", method="POST", data=json_dumps(clean_map(updated_spec)), @@ -461,8 +471,8 @@ async def _update_service_spec( e.status == status.HTTP_500_INTERNAL_SERVER_ERROR and "out of sequence" in e.message ): - raise TryAgain() from e - raise e + raise TryAgain from e + raise async def update_scheduler_data_label(scheduler_data: SchedulerData) -> None: @@ -477,18 +487,20 @@ async def update_scheduler_data_label(scheduler_data: SchedulerData) -> None: ) except GenericDockerError as e: if e.original_exception.status == status.HTTP_404_NOT_FOUND: - log.warning( + log.info( "Skipped labels update for service '%s' which could not be found.", scheduler_data.service_name, ) -async def constrain_service_to_node(service_name: str, docker_node_id: str) -> None: +async def constrain_service_to_node( + service_name: str, docker_node_id: DockerNodeID +) -> None: await _update_service_spec( service_name, update_in_service_spec={ "TaskTemplate": { - "Placement": {"Constraints": [f"node.id == {docker_node_id}"]} + "Placement": {"Constraints": [f"node.id=={docker_node_id}"]} } }, ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_utils.py index ceb9d276c13..f625c2ea625 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_utils.py @@ -13,8 +13,7 @@ async def docker_client() -> AsyncIterator[aiodocker.docker.Docker]: client = aiodocker.Docker() yield client except aiodocker.exceptions.DockerError as e: - message = "Unexpected error from docker client" - raise GenericDockerError(message, e) from e + raise GenericDockerError(msg=f"{e.message}", original_exception=e) from e finally: if client is not None: await client.close() diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_volume.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_volume.py deleted file mode 100644 index be8218268a0..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_api/_volume.py +++ /dev/null @@ -1,161 +0,0 @@ -import logging -from datetime import datetime, timezone - -from fastapi.encoders import jsonable_encoder -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.users import UserID -from pydantic import parse_obj_as -from servicelib.docker_utils import to_datetime -from tenacity import TryAgain -from tenacity._asyncio import AsyncRetrying -from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -from ....core.settings import DynamicSidecarSettings -from ....models.schemas.constants import DYNAMIC_VOLUME_REMOVER_PREFIX -from ..docker_service_specs.volume_remover import ( - DockerVersion, - spec_volume_removal_service, -) -from ._utils import docker_client - -log = logging.getLogger(__name__) - -# FROM https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/ -SERVICE_FINISHED_STATES: set[str] = { - "complete", - "failed", - "shutdown", - "rejected", - "orphaned", - "remove", -} - - -async def remove_volumes_from_node( - dynamic_sidecar_settings: DynamicSidecarSettings, - volume_names: list[str], - docker_node_id: str, - user_id: UserID, - project_id: ProjectID, - node_uuid: NodeID, - *, - volume_removal_attempts: int = 15, - sleep_between_attempts_s: int = 2, -) -> bool: - """ - Starts a service at target docker node which will remove - all entries in the `volumes_names` list. - """ - - async with docker_client() as client: - # When running docker-dind makes sure to use the same image as the - # underlying docker-engine. - # Will start a container with the same version across the entire cluster. - # It is safe to assume the local docker engine version will be - # the same as the one on the targeted node. - version_request = await client._query_json( # pylint: disable=protected-access - "version", versioned_api=False - ) - docker_version: DockerVersion = parse_obj_as( - DockerVersion, version_request["Version"] - ) - - # Timeout for the runtime of the service is calculated based on the amount - # of attempts required to remove each individual volume, - # in the worst case scenario when all volumes are do not exit. - volume_removal_timeout_s = volume_removal_attempts * sleep_between_attempts_s - service_timeout_s = volume_removal_timeout_s * len(volume_names) - - service_spec = spec_volume_removal_service( - dynamic_sidecar_settings=dynamic_sidecar_settings, - docker_node_id=docker_node_id, - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - volume_names=volume_names, - docker_version=docker_version, - volume_removal_attempts=volume_removal_attempts, - sleep_between_attempts_s=sleep_between_attempts_s, - service_timeout_s=service_timeout_s, - ) - - volume_removal_service = await client.services.create( - **jsonable_encoder(service_spec, by_alias=True, exclude_unset=True) - ) - - service_id = volume_removal_service["ID"] - try: - async for attempt in AsyncRetrying( - stop=stop_after_delay(service_timeout_s), - wait=wait_fixed(0.5), - retry=retry_if_exception_type(TryAgain), - reraise=True, - ): - with attempt: - tasks = await client.tasks.list(filters={"service": service_id}) - # NOTE: the service will have at most 1 task, since there is no restart - # policy present - if len(tasks) != 1: - # Docker swarm needs a bit of time to startup the tasks - raise TryAgain( - f"Expected 1 task for service {service_id}, found {tasks=}" - ) - - task = tasks[0] - task_status = task["Status"] - log.debug("Service %s, %s", service_id, f"{task_status=}") - task_state = task_status["State"] - if task_state not in SERVICE_FINISHED_STATES: - raise TryAgain(f"Waiting for task to finish: {task_status=}") - - if not ( - task_state == "complete" - and task_status["ContainerStatus"]["ExitCode"] == 0 - ): - log.error( - "Service %s status: %s", service_id, f"{task_status=}" - ) - # NOTE: above implies the volumes will remain in the system and - # have to be manually removed. - return False - finally: - # NOTE: services created in swarm need to be removed, there is no way - # to instruct swarm to remove a service after it's created - # container/task finished - await client.services.delete(service_id) - - return True - - -async def remove_pending_volume_removal_services( - dynamic_sidecar_settings: DynamicSidecarSettings, -) -> None: - """ - Removes all pending volume removal services. Such a service - will be considered pending if it is running for longer than its - intended duration (defined in the `service_timeout_s` label). - """ - service_filters = { - "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - ], - "name": [f"{DYNAMIC_VOLUME_REMOVER_PREFIX}"], - } - async with docker_client() as client: - volume_removal_services = await client.services.list(filters=service_filters) - - for volume_removal_service in volume_removal_services: - service_timeout_s = int( - volume_removal_service["Spec"]["Labels"]["service_timeout_s"] - ) - created_at = to_datetime(volume_removal_services[0]["CreatedAt"]) - time_diff = datetime.now(tz=timezone.utc) - created_at - service_timed_out = time_diff.seconds > (service_timeout_s * 10) - if service_timed_out: - service_id = volume_removal_service["ID"] - service_name = volume_removal_service["Spec"]["Name"] - log.debug("Removing pending volume removal service %s", service_name) - await client.services.delete(service_id) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_egress_config.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_egress_config.py index 946270abaac..53dad10c062 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_egress_config.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_egress_config.py @@ -5,15 +5,16 @@ import yaml from models_library.basic_types import PortInt +from models_library.osparc_variable_identifier import raise_if_unresolved from models_library.service_settings_labels import ( - ComposeSpecLabel, - NATRule, + ComposeSpecLabelDict, SimcoreServiceLabels, ) -from orderedset import OrderedSet +from models_library.service_settings_nat_rule import NATRule +from ordered_set import OrderedSet from servicelib.docker_constants import SUFFIX_EGRESS_PROXY_NAME -from ...core.settings import DynamicSidecarEgressSettings +from ...core.dynamic_services_settings.egress_proxy import EgressProxySettings _DEFAULT_USER_SERVICES_NETWORK_WITH_INTERNET_NAME: Final[str] = "with-internet" @@ -161,9 +162,9 @@ def _get_egress_proxy_network_name(egress_proxy_name: str) -> str: def _add_egress_proxy_network( - service_spec: ComposeSpecLabel, egress_proxy_name: str + service_spec: ComposeSpecLabelDict, egress_proxy_name: str ) -> None: - networks = service_spec.get("networks") + networks = service_spec.get("networks", {}) networks[_get_egress_proxy_network_name(egress_proxy_name)] = {"internal": True} service_spec["networks"] = networks @@ -171,7 +172,7 @@ def _add_egress_proxy_network( def _get_egress_proxy_service_config( egress_proxy_rules: OrderedSet[_ProxyRule], network_with_internet: str, - egress_proxy_settings: DynamicSidecarEgressSettings, + egress_proxy_settings: EgressProxySettings, egress_proxy_name: str, ) -> dict[str, Any]: network_aliases: set[str] = {x[0].hostname for x in egress_proxy_rules} @@ -211,7 +212,7 @@ def _get_egress_proxy_dns_port_rules( ) -> list[OrderedSet[_ProxyRule]]: """returns a list of sets of rules to be applied to each proxy""" # 1. map all ports to hostnames to compute overlapping ports per proxy - port_to_hostname: dict[PortInt, set[_HostData]] = {} + port_to_hostname: dict[PortInt, OrderedSet[_HostData]] = {} for host_permit_list_policy in all_host_permit_list_policies: for port in host_permit_list_policy.iter_tcp_ports(): @@ -219,9 +220,13 @@ def _get_egress_proxy_dns_port_rules( port_to_hostname[port] = OrderedSet() port_to_hostname[port].add( _HostData( - hostname=host_permit_list_policy.hostname, - dns_resolver_address=host_permit_list_policy.dns_resolver.address, - dns_resolver_port=host_permit_list_policy.dns_resolver.port, + hostname=raise_if_unresolved(host_permit_list_policy.hostname), + dns_resolver_address=raise_if_unresolved( + host_permit_list_policy.dns_resolver.address + ), + dns_resolver_port=raise_if_unresolved( + host_permit_list_policy.dns_resolver.port + ), ) ) @@ -239,11 +244,11 @@ def _get_egress_proxy_dns_port_rules( grouped_proxy_rules.append(proxy_rules) - return list(sorted(grouped_proxy_rules)) + return sorted(grouped_proxy_rules) def _allow_outgoing_internet( - service_spec: ComposeSpecLabel, container_name: str + service_spec: ComposeSpecLabelDict, container_name: str ) -> None: # containers are allowed complete access to the internet by # connecting them to an isolated network (from the rest @@ -255,9 +260,9 @@ def _allow_outgoing_internet( def add_egress_configuration( - service_spec: ComposeSpecLabel, + service_spec: ComposeSpecLabelDict, simcore_service_labels: SimcoreServiceLabels, - egress_proxy_settings: DynamicSidecarEgressSettings, + egress_proxy_settings: EgressProxySettings, ) -> None: """ Each service defines rules to allow certain containers to gain access @@ -293,7 +298,7 @@ def add_egress_configuration( all_host_permit_list_policies: list[NATRule] = [] hostname_port_to_container_name: dict[tuple[str, PortInt], str] = {} - container_name_to_proxies_names: dict[str, set[set]] = {} + container_name_to_proxies_names: dict[str, set[str]] = {} for ( container_name, @@ -304,7 +309,10 @@ def add_egress_configuration( for port in host_permit_list_policy.iter_tcp_ports(): hostname_port_to_container_name[ - (host_permit_list_policy.hostname, port) + ( + raise_if_unresolved(host_permit_list_policy.hostname), + port, + ) ] = container_name # assemble proxy configuration based on all HostPermitListPolicy entries diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_specs.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_specs.py index 6d49a788b34..7ed0736d3cd 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_specs.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_compose_specs.py @@ -1,14 +1,15 @@ import logging from copy import deepcopy -from typing import Optional, Union +from typing import Any, Final, TypeAlias, TypedDict +from common_library.json_serialization import json_dumps from fastapi.applications import FastAPI -from models_library.docker import SimcoreServiceDockerLabelKeys +from models_library.docker import DockerGenericTag, StandardSimcoreDockerLabels from models_library.products import ProductName from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import ( - ComposeSpecLabel, + ComposeSpecLabelDict, PathMappingsLabel, SimcoreServiceLabels, ) @@ -19,23 +20,34 @@ ResourceValue, ServiceResourcesDict, ) +from models_library.services_types import ServiceRunID from models_library.users import UserID from models_library.utils.docker_compose import replace_env_vars_in_compose_spec -from servicelib.json_serialization import json_dumps +from models_library.wallets import WalletID +from pydantic import ByteSize from servicelib.resources import CPU_RESOURCE_LIMIT_KEY, MEM_RESOURCE_LIMIT_KEY from settings_library.docker_registry import RegistrySettings +from ...core.dynamic_services_settings.egress_proxy import EgressProxySettings +from ..osparc_variables.substitutions import ( + auto_inject_environments, + resolve_and_substitute_session_variables_in_model, + resolve_and_substitute_session_variables_in_specs, + substitute_vendor_secrets_in_model, + substitute_vendor_secrets_in_specs, +) from .docker_compose_egress_config import add_egress_configuration -EnvKeyEqValueList = list[str] -EnvVarsMap = dict[str, Optional[str]] +EnvKeyEqValueList: TypeAlias = list[str] +EnvVarsMap: TypeAlias = dict[str, str | None] +_COMPOSE_MAJOR_VERSION: Final[int] = 3 -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) def _update_networking_configuration( - service_spec: ComposeSpecLabel, + service_spec: ComposeSpecLabelDict, target_http_entrypoint_container: str, dynamic_sidecar_network_name: str, swarm_network_name: str, @@ -50,12 +62,14 @@ def _update_networking_configuration( networks = service_spec.get("networks", {}) # used by the proxy to contact the service http entrypoint networks[dynamic_sidecar_network_name] = { - "external": {"name": dynamic_sidecar_network_name}, + "name": dynamic_sidecar_network_name, + "external": True, "driver": "overlay", } # used by egress proxies to gain access to the internet networks[swarm_network_name] = { - "external": {"name": swarm_network_name}, + "name": swarm_network_name, + "external": True, "driver": "overlay", } service_spec["networks"] = networks @@ -67,8 +81,8 @@ def _update_networking_configuration( target_container_spec["networks"] = container_networks -class _environment_section: - """the 'environment' field in a docker-compose can be either a dict (EnvVarsMap) +class _EnvironmentSection: + """the 'environment' field in a docker-compose spec can be either a dict (EnvVarsMap) or a list of "key=value" (EnvKeyEqValueList) These helpers can resolve parsing and exporting between these formats @@ -77,14 +91,15 @@ class _environment_section: """ @staticmethod - def parse(environment: Union[EnvVarsMap, EnvKeyEqValueList]) -> EnvVarsMap: + def parse(environment: EnvVarsMap | EnvKeyEqValueList) -> EnvVarsMap: envs = {} if isinstance(environment, list): for key_eq_value in environment: assert isinstance(key_eq_value, str) # nosec - key, value, *_ = key_eq_value.split("=", maxsplit=1) + [ - None, - ] # type: ignore + key, value, *_ = key_eq_value.split("=", maxsplit=1) + [ # noqa: RUF005 + None + ] + assert key is not None # nosec envs[key] = value else: assert isinstance(environment, dict) # nosec @@ -103,33 +118,40 @@ def export_as_list(environment: EnvVarsMap) -> EnvKeyEqValueList: def _update_paths_mappings( - service_spec: ComposeSpecLabel, path_mappings: PathMappingsLabel + service_spec: ComposeSpecLabelDict, path_mappings: PathMappingsLabel ) -> None: for service_name in service_spec["services"]: service_content = service_spec["services"][service_name] - env_vars: EnvVarsMap = _environment_section.parse( + env_vars: EnvVarsMap = _EnvironmentSection.parse( service_content.get("environment", {}) ) env_vars["DY_SIDECAR_PATH_INPUTS"] = f"{path_mappings.inputs_path}" env_vars["DY_SIDECAR_PATH_OUTPUTS"] = f"{path_mappings.outputs_path}" - env_vars[ - "DY_SIDECAR_STATE_PATHS" - ] = f"{json_dumps( { f'{p}' for p in path_mappings.state_paths } )}" + env_vars["DY_SIDECAR_STATE_PATHS"] = ( + f"{json_dumps( { f'{p}' for p in path_mappings.state_paths } )}" + ) + + service_content["environment"] = _EnvironmentSection.export_as_list(env_vars) - service_content["environment"] = _environment_section.export_as_list(env_vars) + +class _AssignedLimits(TypedDict): + cpu: float + memory: int def _update_resource_limits_and_reservations( - service_resources: ServiceResourcesDict, service_spec: ComposeSpecLabel -) -> None: + service_resources: ServiceResourcesDict, service_spec: ComposeSpecLabelDict +) -> dict[DockerGenericTag, _AssignedLimits]: # example: '2.3' -> 2 ; '3.7' -> 3 + assigned_limits = {} docker_compose_major_version: int = int(service_spec["version"].split(".")[0]) for spec_service_key, spec in service_spec["services"].items(): if spec_service_key not in service_resources: continue + resources: ResourcesDict = service_resources[spec_service_key].resources - logger.debug("Resources for %s: %s", spec_service_key, f"{resources=}") + _logger.debug("Resources for %s: %s", spec_service_key, f"{resources=}") cpu: ResourceValue = resources["CPU"] memory: ResourceValue = resources["RAM"] @@ -138,26 +160,26 @@ def _update_resource_limits_and_reservations( mem_limits: str = "0" _NANO = 10**9 # cpu's in nano-cpu's - if docker_compose_major_version >= 3: + if docker_compose_major_version >= _COMPOSE_MAJOR_VERSION: # compos spec version 3 and beyond - deploy = spec.get("deploy", {}) - resources = deploy.get("resources", {}) - limits = resources.get("limits", {}) - reservations = resources.get("reservations", {}) + deploy: dict[str, Any] = spec.get("deploy", {}) + resources_v3: dict[str, Any] = deploy.get("resources", {}) + limits: dict[str, Any] = resources_v3.get("limits", {}) + reservations: dict[str, Any] = resources_v3.get("reservations", {}) # assign limits - limits["cpus"] = float(cpu.limit) + limits["cpus"] = f"{cpu.limit}" limits["memory"] = f"{memory.limit}" # assing reservations - reservations["cpus"] = float(cpu.reservation) + reservations["cpus"] = f"{cpu.reservation}" reservations["memory"] = f"{memory.reservation}" - resources["reservations"] = reservations - resources["limits"] = limits - deploy["resources"] = resources + resources_v3["reservations"] = reservations + resources_v3["limits"] = limits + deploy["resources"] = resources_v3 spec["deploy"] = deploy - nano_cpu_limits = limits["cpus"] + nano_cpu_limits = float(cpu.limit) mem_limits = limits["memory"] else: # compos spec version 2 @@ -187,50 +209,82 @@ def _update_resource_limits_and_reservations( environment.extend(resource_limits) spec["environment"] = environment + assigned_limits[spec_service_key] = _AssignedLimits( + cpu=nano_cpu_limits, memory=int(memory.limit) + ) + return assigned_limits + + +def _strip_service_quotas(service_spec: ComposeSpecLabelDict): + """ + When disk quotas are not supported by the node, it is required to remove + any reference from the docker-compose spec. + """ + for spec in service_spec["services"].values(): + spec.pop("storage_opt", None) + def _update_container_labels( - service_spec: ComposeSpecLabel, + service_spec: ComposeSpecLabelDict, user_id: UserID, project_id: ProjectID, node_id: NodeID, simcore_user_agent: str, product_name: ProductName, + swarm_stack_name: str, + assigned_limits: dict[DockerGenericTag, _AssignedLimits], ) -> None: - for spec in service_spec["services"].values(): + default_limits = _AssignedLimits(memory=0, cpu=0) + for spec_service_key, spec in service_spec["services"].items(): labels: list[str] = spec.setdefault("labels", []) + container_limits: _AssignedLimits = assigned_limits.get( + spec_service_key, default_limits + ) - label_keys = SimcoreServiceDockerLabelKeys( - user_id=user_id, - study_id=project_id, - uuid=node_id, - simcore_user_agent=simcore_user_agent, - product_name=product_name, + label_keys = StandardSimcoreDockerLabels.model_validate( + { + "user_id": user_id, + "project_id": project_id, + "node_id": node_id, + "simcore_user_agent": simcore_user_agent, + "product_name": product_name, + "swarm_stack_name": swarm_stack_name, + "memory_limit": ByteSize(container_limits["memory"]), + "cpu_limit": container_limits["cpu"], + } ) - docker_labels = [f"{k}={v}" for k, v in label_keys.to_docker_labels().items()] + docker_labels = [ + f"{k}={v}" for k, v in label_keys.to_simcore_runtime_docker_labels().items() + ] for docker_label in docker_labels: if docker_label not in labels: labels.append(docker_label) -def assemble_spec( +async def assemble_spec( # pylint: disable=too-many-arguments # noqa: PLR0913 *, app: FastAPI, service_key: ServiceKey, service_version: ServiceVersion, paths_mapping: PathMappingsLabel, - compose_spec: Optional[ComposeSpecLabel], - container_http_entry: Optional[str], + compose_spec: ComposeSpecLabelDict | None, + container_http_entry: str | None, dynamic_sidecar_network_name: str, swarm_network_name: str, service_resources: ServiceResourcesDict, + has_quota_support: bool, simcore_service_labels: SimcoreServiceLabels, allow_internet_access: bool, product_name: ProductName, + product_api_base_url: str, user_id: UserID, project_id: ProjectID, node_id: NodeID, simcore_user_agent: str, + swarm_stack_name: str, + service_run_id: ServiceRunID, + wallet_id: WalletID | None, ) -> str: """ returns a docker-compose spec used by @@ -242,16 +296,17 @@ def assemble_spec( ) docker_compose_version = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_DOCKER_COMPOSE_VERSION + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DYNAMIC_SIDECAR_DOCKER_COMPOSE_VERSION ) - egress_proxy_settings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_EGRESS_PROXY_SETTINGS + egress_proxy_settings: EgressProxySettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR_EGRESS_PROXY_SETTINGS ) # when no compose yaml file was provided + container_name: str | None = None if compose_spec is None: - service_spec: ComposeSpecLabel = { + service_spec: ComposeSpecLabelDict = { "version": docker_compose_version, "services": { DEFAULT_SINGLE_SERVICE_NAME: { @@ -259,7 +314,7 @@ def assemble_spec( } }, } - container_name = DEFAULT_SINGLE_SERVICE_NAME + container_name = f"{DEFAULT_SINGLE_SERVICE_NAME}" else: service_spec = deepcopy(compose_spec) container_name = container_http_entry @@ -276,13 +331,37 @@ def assemble_spec( _update_paths_mappings(service_spec, paths_mapping) - _update_resource_limits_and_reservations( + assigned_limits = _update_resource_limits_and_reservations( service_resources=service_resources, service_spec=service_spec ) + if not has_quota_support: + _strip_service_quotas(service_spec) + if not allow_internet_access: - # NOTE: when service has no access to the internet, - # there could be some components that still require access + simcore_service_labels = await substitute_vendor_secrets_in_model( + app=app, + model=simcore_service_labels, + safe=True, + service_key=service_key, + service_version=service_version, + product_name=product_name, + ) + simcore_service_labels = await resolve_and_substitute_session_variables_in_model( + app=app, + model=simcore_service_labels, + # NOTE: at this point all OsparcIdentifiers have to be replaced + # an error will be raised otherwise + safe=False, + user_id=user_id, + product_name=product_name, + product_api_base_url=product_api_base_url, + project_id=project_id, + node_id=node_id, + service_run_id=service_run_id, + wallet_id=wallet_id, + ) + add_egress_configuration( service_spec=service_spec, simcore_service_labels=simcore_service_labels, @@ -296,12 +375,35 @@ def assemble_spec( node_id=node_id, product_name=product_name, simcore_user_agent=simcore_user_agent, + swarm_stack_name=swarm_stack_name, + assigned_limits=assigned_limits, ) - # TODO: will be used in next PR - assert product_name # nosec + # resolve service-spec + service_spec = auto_inject_environments(service_spec) + + service_spec = await substitute_vendor_secrets_in_specs( + app=app, + specs=service_spec, + safe=True, + service_key=service_key, + service_version=service_version, + product_name=product_name, + ) + service_spec = await resolve_and_substitute_session_variables_in_specs( + app=app, + specs=service_spec, + user_id=user_id, + safe=True, + product_name=product_name, + product_api_base_url=product_api_base_url, + project_id=project_id, + node_id=node_id, + service_run_id=service_run_id, + wallet_id=wallet_id, + ) - stringified_service_spec = replace_env_vars_in_compose_spec( + stringified_service_spec: str = replace_env_vars_in_compose_spec( service_spec=service_spec, replace_simcore_registry=docker_registry_settings.resolved_registry_url, replace_service_version=service_version, diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/__init__.py index 381f378a843..4c831825053 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/__init__.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/__init__.py @@ -1,6 +1,11 @@ from .proxy import get_dynamic_proxy_spec from .settings import merge_settings_before_use, update_service_params_from_settings -from .sidecar import ( - extract_service_port_from_compose_start_spec, - get_dynamic_sidecar_spec, +from .sidecar import extract_service_port_service_settings, get_dynamic_sidecar_spec + +__all__: tuple[str, ...] = ( + "extract_service_port_service_settings", + "get_dynamic_proxy_spec", + "get_dynamic_sidecar_spec", + "merge_settings_before_use", + "update_service_params_from_settings", ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py index 634cabb93b2..eb06fa02b79 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/proxy.py @@ -1,26 +1,33 @@ from typing import Any +from models_library.docker import StandardSimcoreDockerLabels from models_library.services_resources import ( CPU_10_PERCENT, CPU_100_PERCENT, MEMORY_50MB, MEMORY_250MB, ) -from pydantic.types import PositiveInt +from pydantic import ByteSize +from servicelib.common_headers import X_SIMCORE_USER_AGENT +from settings_library import webserver +from settings_library.utils_session import DEFAULT_SESSION_COOKIE_NAME -from ....core.settings import DynamicSidecarProxySettings, DynamicSidecarSettings -from ....models.schemas.dynamic_services import SchedulerData, ServiceType +from ....core.dynamic_services_settings import DynamicServicesSettings +from ....core.dynamic_services_settings.proxy import DynamicSidecarProxySettings +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from ....core.dynamic_services_settings.sidecar import DynamicSidecarSettings +from ....models.dynamic_services_scheduler import SchedulerData from ._constants import DOCKER_CONTAINER_SPEC_RESTART_POLICY_DEFAULTS def get_dynamic_proxy_spec( scheduler_data: SchedulerData, - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_settings: DynamicServicesSettings, dynamic_sidecar_network_id: str, swarm_network_id: str, swarm_network_name: str, - entrypoint_container_name: str, - service_port: PositiveInt, ) -> dict[str, Any]: """ The Traefik proxy is the entrypoint which forwards @@ -28,6 +35,22 @@ def get_dynamic_proxy_spec( The proxy is used to create network isolation from the rest of the platform. """ + assert ( + scheduler_data.product_name is not None + ), "ONLY for legacy. This function should not be called with product_name==None" # nosec + + proxy_settings: DynamicSidecarProxySettings = ( + dynamic_services_settings.DYNAMIC_SIDECAR_PROXY_SETTINGS + ) + dynamic_sidecar_settings: DynamicSidecarSettings = ( + dynamic_services_settings.DYNAMIC_SIDECAR + ) + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + dynamic_services_settings.DYNAMIC_SCHEDULER + ) + webserver_settings: webserver.WebServerSettings = ( + dynamic_services_settings.WEBSERVER_SETTINGS + ) mounts = [ # docker socket needed to use the docker api @@ -38,25 +61,35 @@ def get_dynamic_proxy_spec( "ReadOnly": True, } ] - proxy_settings: DynamicSidecarProxySettings = ( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_PROXY_SETTINGS + caddy_file = ( + f"{{\n admin 0.0.0.0:{proxy_settings.DYNAMIC_SIDECAR_CADDY_ADMIN_API_PORT} \n}}" ) # expose this service on an empty port - endpint_spec = {} - if dynamic_sidecar_settings.PROXY_EXPOSE_PORT: - endpint_spec["Ports"] = [{"Protocol": "tcp", "TargetPort": 80}] + + ports = [] + if dynamic_sidecar_settings.DYNAMIC_SIDECAR_EXPOSE_PORT: + ports.append( + # server port + { + "Protocol": "tcp", + "TargetPort": proxy_settings.DYNAMIC_SIDECAR_CADDY_ADMIN_API_PORT, + } + ) + if proxy_settings.PROXY_EXPOSE_PORT: + ports.append({"Protocol": "tcp", "TargetPort": 80}) return { - "endpoint_spec": endpint_spec, + "endpoint_spec": {"Ports": ports} if ports else {}, "labels": { - # TODO: let's use a pydantic model with descriptions - "io.simcore.zone": f"{dynamic_sidecar_settings.TRAEFIK_SIMCORE_ZONE}", - "swarm_stack_name": dynamic_sidecar_settings.SWARM_STACK_NAME, - "traefik.docker.network": swarm_network_name, + "io.simcore.zone": f"{dynamic_services_scheduler_settings.TRAEFIK_SIMCORE_ZONE}", + "traefik.swarm.network": swarm_network_name, "traefik.enable": "true", + # security + f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.accesscontrolallowcredentials": "true", f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.customresponseheaders.Content-Security-Policy": f"frame-ancestors {scheduler_data.request_dns} {scheduler_data.node_uuid}.services.{scheduler_data.request_dns}", f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.accesscontrolallowmethods": "GET,OPTIONS,PUT,POST,DELETE,PATCH,HEAD", + f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.accesscontrolallowheaders": f"{X_SIMCORE_USER_AGENT},Set-Cookie", f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.accessControlAllowOriginList": ",".join( [ f"{scheduler_data.request_scheme}://{scheduler_data.request_dns}", @@ -65,17 +98,34 @@ def get_dynamic_proxy_spec( ), f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.accesscontrolmaxage": "100", f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-security-headers.headers.addvaryheader": "true", + # auth + f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-auth.forwardauth.address": f"{webserver_settings.api_base_url}/auth:check", + f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-auth.forwardauth.trustForwardHeader": "true", + f"traefik.http.middlewares.{scheduler_data.proxy_service_name}-auth.forwardauth.authResponseHeaders": f"Set-Cookie,{DEFAULT_SESSION_COOKIE_NAME}", + # routing f"traefik.http.services.{scheduler_data.proxy_service_name}.loadbalancer.server.port": "80", f"traefik.http.routers.{scheduler_data.proxy_service_name}.entrypoints": "http", f"traefik.http.routers.{scheduler_data.proxy_service_name}.priority": "10", - f"traefik.http.routers.{scheduler_data.proxy_service_name}.rule": f"hostregexp(`{scheduler_data.node_uuid}.services.{{host:.+}}`)", - f"traefik.http.routers.{scheduler_data.proxy_service_name}.middlewares": f"{dynamic_sidecar_settings.SWARM_STACK_NAME}_gzip@docker, {scheduler_data.proxy_service_name}-security-headers", - "type": ServiceType.DEPENDENCY.value, + f"traefik.http.routers.{scheduler_data.proxy_service_name}.rule": rf"HostRegexp(`{scheduler_data.node_uuid}\.services\.(?P.+)`)", + f"traefik.http.routers.{scheduler_data.proxy_service_name}.middlewares": ",".join( + [ + f"{dynamic_services_scheduler_settings.SWARM_STACK_NAME}_gzip@swarm", + f"{scheduler_data.proxy_service_name}-security-headers", + f"{scheduler_data.proxy_service_name}-auth", + ] + ), "dynamic_type": "dynamic-sidecar", # tagged as dynamic service - "study_id": f"{scheduler_data.project_id}", - "user_id": f"{scheduler_data.user_id}", - "uuid": f"{scheduler_data.node_uuid}", # needed for removal when project is closed - }, + } + | StandardSimcoreDockerLabels( + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + product_name=scheduler_data.product_name, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + memory_limit=ByteSize(MEMORY_50MB), + cpu_limit=float(CPU_10_PERCENT) / 1e9, + ).to_simcore_runtime_docker_labels(), "name": scheduler_data.proxy_service_name, "networks": [swarm_network_id, dynamic_sidecar_network_id], "task_template": { @@ -84,19 +134,22 @@ def get_dynamic_proxy_spec( "Hosts": [], "Image": f"caddy:{proxy_settings.DYNAMIC_SIDECAR_CADDY_VERSION}", "Init": True, - "Labels": { - # NOTE: these labels get on the tasks and that is also useful to trace - "study_id": f"{scheduler_data.project_id}", - "user_id": f"{scheduler_data.user_id}", - "uuid": f"{scheduler_data.node_uuid}", - }, + "Labels": StandardSimcoreDockerLabels( + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + product_name=scheduler_data.product_name, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + memory_limit=ByteSize(MEMORY_50MB), + cpu_limit=float(CPU_10_PERCENT) / 1e9, + ).to_simcore_runtime_docker_labels(), "Command": [ - "caddy", - "reverse-proxy", - "--from", - ":80", - "--to", - f"{entrypoint_container_name}:{service_port}", + "sh", + "-c", + f"echo -e '{caddy_file}' > /etc/caddy/Caddyfile && " + "cat /etc/caddy/Caddyfile && " + "caddy run --adapter caddyfile --config /etc/caddy/Caddyfile", ], "Mounts": mounts, }, diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/settings.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/settings.py index 07598eb4e22..49b9e0c5670 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/settings.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/settings.py @@ -1,16 +1,19 @@ -import json import logging from collections import deque -from typing import Any, Optional, cast - -from models_library.boot_options import BootOption, EnvVarKey +from typing import Any, cast + +from common_library.json_serialization import json_dumps, json_loads +from models_library.basic_types import EnvVarKey, PortInt +from models_library.boot_options import BootOption +from models_library.docker import ( + DockerPlacementConstraint, + to_simcore_runtime_docker_label_key, +) from models_library.service_settings_labels import ( - ComposeSpecLabel, SimcoreServiceLabels, SimcoreServiceSettingLabelEntry, SimcoreServiceSettingsLabel, ) -from models_library.services import ServiceKeyVersion from models_library.services_resources import ( CPU_100_PERCENT, DEFAULT_SINGLE_SERVICE_NAME, @@ -18,13 +21,14 @@ MEMORY_1GB, ServiceResourcesDict, ) +from models_library.services_types import ServiceKey, ServiceVersion from models_library.utils.docker_compose import ( MATCH_IMAGE_END, MATCH_IMAGE_START, MATCH_SERVICE_VERSION, ) -from ....api.dependencies.director_v0 import DirectorV0Client +from ....modules.catalog import CatalogClient from ..errors import DynamicSidecarError BOOT_OPTION_PREFIX = "DY_BOOT_OPTION" @@ -72,14 +76,35 @@ def _parse_env_settings(settings: list[str]) -> dict: return envs +def extract_service_port_from_settings( + labels_service_settings: SimcoreServiceSettingsLabel, +) -> PortInt: + param: SimcoreServiceSettingLabelEntry + for param in labels_service_settings: + # publishing port on the ingress network. + if param.name == "ports" and param.setting_type == "int": # backward comp + return PortInt(param.value) + # REST-API compatible + if ( + param.setting_type == "EndpointSpec" + and "Ports" in param.value + and ( + isinstance(param.value["Ports"], list) + and "TargetPort" in param.value["Ports"][0] + ) + ): + return PortInt(param.value["Ports"][0]["TargetPort"]) + msg = "service port not found!" + raise ValueError(msg) + + # pylint: disable=too-many-branches -# TODO: PC->ANE: i tend to agree with pylint, perhaps we can refactor this together def update_service_params_from_settings( labels_service_settings: SimcoreServiceSettingsLabel, create_service_params: dict[str, Any], ) -> None: + param: SimcoreServiceSettingLabelEntry for param in labels_service_settings: - param: SimcoreServiceSettingLabelEntry = param # NOTE: the below capitalize addresses a bug in a lot of already in use services # where Resources was written in lower case if param.setting_type.capitalize() == "Resources": @@ -105,31 +130,10 @@ def update_service_params_from_settings( # NOTE: The Docker REST API reads Reservation when actually it's Reservations create_service_params["task_template"]["Resources"].update(param.value) - # publishing port on the ingress network. - elif param.name == "ports" and param.setting_type == "int": # backward comp - create_service_params["labels"]["port"] = create_service_params["labels"][ - "service_port" - ] = str(param.value) - # REST-API compatible - elif param.setting_type == "EndpointSpec": - if "Ports" in param.value: - if ( - isinstance(param.value["Ports"], list) - and "TargetPort" in param.value["Ports"][0] - ): - create_service_params["labels"]["port"] = create_service_params[ - "labels" - ]["service_port"] = str(param.value["Ports"][0]["TargetPort"]) - # placement constraints - elif param.name == "constraints": # python-API compatible - - create_service_params["task_template"]["Placement"][ - "Constraints" - ] += param.value - - elif param.setting_type == "Constraints": # REST-API compatible - + elif ( + param.name == "constraints" or param.setting_type == "Constraints" + ): # python-API compatible create_service_params["task_template"]["Placement"][ "Constraints" ] += param.value @@ -150,11 +154,32 @@ def update_service_params_from_settings( ].extend(mount_settings) container_spec = create_service_params["task_template"]["ContainerSpec"] - # set labels for CPU and Memory limits - container_spec["Labels"]["nano_cpus_limit"] = str( - create_service_params["task_template"]["Resources"]["Limits"]["NanoCPUs"] + # set labels for CPU and Memory limits, for both service and container labels + # NOTE: cpu-limit is a float not NanoCPUs!! + container_spec["Labels"][f"{to_simcore_runtime_docker_label_key('cpu-limit')}"] = ( + str( + float( + create_service_params["task_template"]["Resources"]["Limits"][ + "NanoCPUs" + ] + ) + / (1 * 10**9) + ) + ) + create_service_params["labels"][ + f"{to_simcore_runtime_docker_label_key('cpu-limit')}" + ] = str( + float(create_service_params["task_template"]["Resources"]["Limits"]["NanoCPUs"]) + / (1 * 10**9) ) - container_spec["Labels"]["mem_limit"] = str( + container_spec["Labels"][ + f"{to_simcore_runtime_docker_label_key('memory-limit')}" + ] = str( + create_service_params["task_template"]["Resources"]["Limits"]["MemoryBytes"] + ) + create_service_params["labels"][ + f"{to_simcore_runtime_docker_label_key('memory-limit')}" + ] = str( create_service_params["task_template"]["Resources"]["Limits"]["MemoryBytes"] ) @@ -171,7 +196,7 @@ def _assemble_key(service_key: str, service_tag: str) -> str: async def _extract_osparc_involved_service_labels( - director_v0_client: DirectorV0Client, + catalog_client: CatalogClient, service_key: str, service_tag: str, service_labels: SimcoreServiceLabels, @@ -194,13 +219,11 @@ async def _extract_osparc_involved_service_labels( # maps form image_name to compose_spec key reverse_mapping: dict[str, str] = {_default_key: DEFAULT_SINGLE_SERVICE_NAME} - def remap_to_compose_spec_key() -> dict[str, str]: + def remap_to_compose_spec_key() -> dict[str, SimcoreServiceLabels]: # remaps from image_name as key to compose_spec key return {reverse_mapping[k]: v for k, v in docker_image_name_by_services.items()} - compose_spec: Optional[ComposeSpecLabel] = cast( - ComposeSpecLabel, service_labels.compose_spec - ) + compose_spec = service_labels.compose_spec if compose_spec is None: return remap_to_compose_spec_key() @@ -232,10 +255,8 @@ def remap_to_compose_spec_key() -> dict[str, str]: reverse_mapping[involved_key] = compose_service_key simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion( - key=current_service_key, version=current_service_tag - ) + await catalog_client.get_service_labels( + current_service_key, current_service_tag ) ) docker_image_name_by_services[involved_key] = simcore_service_labels @@ -247,7 +268,7 @@ def remap_to_compose_spec_key() -> dict[str, str]: f"docker_image_name_by_services={docker_image_name_by_services}" ) log.error(message) - raise DynamicSidecarError(message) + raise DynamicSidecarError(msg=message) return remap_to_compose_spec_key() @@ -258,8 +279,7 @@ def _add_compose_destination_containers_to_settings_entries( def _inject_destination_container( item: SimcoreServiceSettingLabelEntry, ) -> SimcoreServiceSettingLabelEntry: - # pylint: disable=protected-access - item._destination_containers = destination_containers + item.set_destination_containers(destination_containers) return item return [_inject_destination_container(x) for x in settings] @@ -268,14 +288,16 @@ def _inject_destination_container( def _merge_resources_in_settings( settings: deque[SimcoreServiceSettingLabelEntry], service_resources: ServiceResourcesDict, + *, + placement_substitutions: dict[str, DockerPlacementConstraint], ) -> deque[SimcoreServiceSettingLabelEntry]: """All oSPARC services which have defined resource requirements will be added""" log.debug("MERGING\n%s\nAND\n%s", f"{settings=}", f"{service_resources}") result: deque[SimcoreServiceSettingLabelEntry] = deque() + entry: SimcoreServiceSettingLabelEntry for entry in settings: - entry: SimcoreServiceSettingLabelEntry = entry if entry.name == "Resources" and entry.setting_type == "Resources": # skipping resources continue @@ -283,11 +305,11 @@ def _merge_resources_in_settings( # merge all resources empty_resource_entry: SimcoreServiceSettingLabelEntry = ( - SimcoreServiceSettingLabelEntry.parse_obj( - dict( - name="Resources", - type="Resources", - value={ + SimcoreServiceSettingLabelEntry.model_validate( + { + "name": "Resources", + "type": "Resources", + "value": { "Limits": {"NanoCPUs": 0, "MemoryBytes": 0}, "Reservations": { "NanoCPUs": 0, @@ -295,11 +317,11 @@ def _merge_resources_in_settings( "GenericResources": [], }, }, - ) + } ) ) - for _, image_resources in service_resources.items(): + for image_resources in service_resources.values(): for resource_name, resource_value in image_resources.resources.items(): if resource_name == "CPU": empty_resource_entry.value["Limits"]["NanoCPUs"] += int( @@ -316,6 +338,9 @@ def _merge_resources_in_settings( "MemoryBytes" ] += resource_value.reservation else: # generic resources + if resource_name in placement_substitutions: + # NOTE: placement constraint will be used in favour of this generic resource + continue generic_resource = { "DiscreteResourceSpec": { "Kind": resource_name, @@ -349,19 +374,18 @@ def _patch_target_service_into_env_vars( def _format_env_var(env_var: str, destination_container: list[str]) -> str: var_name, var_payload = env_var.split("=") - json_encoded = json.dumps( - dict(destination_containers=destination_container, env_var=var_payload) + json_encoded = json_dumps( + {"destination_containers": destination_container, "env_var": var_payload} ) return f"{var_name}={json_encoded}" + entry: SimcoreServiceSettingLabelEntry for entry in settings: - entry: SimcoreServiceSettingLabelEntry = entry if entry.name == "env" and entry.setting_type == "string": # process entry list_of_env_vars = entry.value if entry.value else [] - # pylint: disable=protected-access - destination_containers: list[str] = entry._destination_containers + destination_containers: list[str] = entry.get_destination_containers() # transforms settings defined environment variables # from `ENV_VAR=PAYLOAD` @@ -375,22 +399,21 @@ def _format_env_var(env_var: str, destination_container: list[str]) -> str: def _get_boot_options( service_labels: SimcoreServiceLabels, -) -> Optional[dict[EnvVarKey, BootOption]]: - as_dict = service_labels.dict() +) -> dict[EnvVarKey, BootOption] | None: + as_dict = service_labels.model_dump() boot_options_encoded = as_dict.get("io.simcore.boot-options", None) if boot_options_encoded is None: return None - boot_options = json.loads(boot_options_encoded)["boot-options"] + boot_options = json_loads(boot_options_encoded)["boot-options"] log.debug("got boot_options=%s", boot_options) - return {k: BootOption.parse_obj(v) for k, v in boot_options.items()} + return {k: BootOption.model_validate(v) for k, v in boot_options.items()} def _assemble_env_vars_for_boot_options( boot_options: dict[EnvVarKey, BootOption], service_user_selection_boot_options: dict[EnvVarKey, str], ) -> SimcoreServiceSettingsLabel: - env_vars: deque[str] = deque() for env_var_key, boot_option in boot_options.items(): # fetch value selected by the user or use default if not present @@ -401,21 +424,21 @@ def _assemble_env_vars_for_boot_options( env_vars.append(f"{env_var_name}={value}") return SimcoreServiceSettingsLabel( - __root__=[ + root=[ SimcoreServiceSettingLabelEntry( - name="env", type="string", value=list(env_vars) + name="env", setting_type="string", value=list(env_vars) ) ] ) async def get_labels_for_involved_services( - director_v0_client: DirectorV0Client, service_key: str, service_tag: str + catalog_client: CatalogClient, + service_key: ServiceKey, + service_tag: ServiceVersion, ) -> dict[str, SimcoreServiceLabels]: simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion(key=service_key, version=service_tag) - ) + await catalog_client.get_service_labels(service_key, service_tag) ) log.info( "image=%s, tag=%s, labels=%s", service_key, service_tag, simcore_service_labels @@ -424,36 +447,38 @@ async def get_labels_for_involved_services( # paths_mapping express how to map dynamic-sidecar paths to the compose-spec volumes # where the service expects to find its certain folders - labels_for_involved_services: dict[ - str, SimcoreServiceLabels - ] = await _extract_osparc_involved_service_labels( - director_v0_client=director_v0_client, - service_key=service_key, - service_tag=service_tag, - service_labels=simcore_service_labels, + labels_for_involved_services: dict[str, SimcoreServiceLabels] = ( + await _extract_osparc_involved_service_labels( + catalog_client=catalog_client, + service_key=service_key, + service_tag=service_tag, + service_labels=simcore_service_labels, + ) ) logging.info("labels_for_involved_services=%s", labels_for_involved_services) return labels_for_involved_services async def merge_settings_before_use( - director_v0_client: DirectorV0Client, - service_key: str, - service_tag: str, + catalog_client: CatalogClient, + *, + service_key: ServiceKey, + service_tag: ServiceVersion, service_user_selection_boot_options: dict[EnvVarKey, str], service_resources: ServiceResourcesDict, + placement_substitutions: dict[str, DockerPlacementConstraint], ) -> SimcoreServiceSettingsLabel: labels_for_involved_services = await get_labels_for_involved_services( - director_v0_client=director_v0_client, + catalog_client=catalog_client, service_key=service_key, service_tag=service_tag, ) - settings: deque[SimcoreServiceSettingLabelEntry] = deque() # TODO: fix typing here + settings: deque[SimcoreServiceSettingLabelEntry] = deque() - boot_options_settings_env_vars: Optional[SimcoreServiceSettingsLabel] = None + boot_options_settings_env_vars: SimcoreServiceSettingsLabel | None = None # search for boot options first and inject to all containers - for compose_spec_key, service_labels in labels_for_involved_services.items(): + for service_labels in labels_for_involved_services.values(): labels_boot_options = _get_boot_options(service_labels) if labels_boot_options: # create a new setting from SimcoreServiceSettingsLabel as env var @@ -480,10 +505,12 @@ async def merge_settings_before_use( ) ) - settings = _merge_resources_in_settings(settings, service_resources) + settings = _merge_resources_in_settings( + settings, service_resources, placement_substitutions=placement_substitutions + ) settings = _patch_target_service_into_env_vars(settings) - return SimcoreServiceSettingsLabel.parse_obj(settings) + return SimcoreServiceSettingsLabel.model_validate(settings) __all__ = ["merge_settings_before_use", "update_service_params_from_settings"] diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py index 4632e6eb65b..d7d013208cb 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/sidecar.py @@ -1,47 +1,128 @@ import logging from copy import deepcopy +from typing import Any, NamedTuple +from common_library.json_serialization import json_dumps +from common_library.serialization import model_dump_with_secrets from models_library.aiodocker_api import AioDockerServiceSpec -from models_library.basic_types import BootModeEnum +from models_library.basic_types import BootModeEnum, PortInt +from models_library.callbacks_mapping import CallbacksMapping +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + DockerLabelKey, + DockerPlacementConstraint, + StandardSimcoreDockerLabels, + to_simcore_runtime_docker_label_key, +) +from models_library.resource_tracker import HardwareInfo from models_library.service_settings_labels import SimcoreServiceSettingsLabel -from pydantic import parse_obj_as -from servicelib.json_serialization import json_dumps +from pydantic import ByteSize, TypeAdapter +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.efs_guardian import efs_manager +from servicelib.utils import unused_port -from ....core.settings import AppSettings, DynamicSidecarSettings -from ....models.schemas.constants import DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL -from ....models.schemas.dynamic_services import SchedulerData, ServiceType +from ....constants import DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from ....core.dynamic_services_settings.sidecar import DynamicSidecarSettings +from ....core.settings import AppSettings +from ....models.dynamic_services_scheduler import SchedulerData +from ....modules.db.repositories.groups_extra_properties import UserExtraProperties from .._namespace import get_compose_namespace from ..volumes import DynamicSidecarVolumesPathsResolver from ._constants import DOCKER_CONTAINER_SPEC_RESTART_POLICY_DEFAULTS -from .settings import update_service_params_from_settings +from .settings import ( + extract_service_port_from_settings, + update_service_params_from_settings, +) -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -def extract_service_port_from_compose_start_spec( - create_service_params: AioDockerServiceSpec, -) -> int: - assert create_service_params.Labels # nosec - return parse_obj_as(int, create_service_params.Labels["service_port"]) +def extract_service_port_service_settings( + settings: SimcoreServiceSettingsLabel, +) -> PortInt: + return extract_service_port_from_settings(settings) + + +class _StorageConfig(NamedTuple): + host: str + port: str + username: str + password: str + secure: str + + +def _get_storage_config(app_settings: AppSettings) -> _StorageConfig: + host: str = app_settings.DIRECTOR_V2_STORAGE.STORAGE_HOST + port: str = f"{app_settings.DIRECTOR_V2_STORAGE.STORAGE_PORT}" + username: str = "null" + password: str = "null" + secure: str = "0" + + storage_auth_settings = app_settings.DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH + + if storage_auth_settings and storage_auth_settings.auth_required: + host = storage_auth_settings.STORAGE_HOST + port = f"{storage_auth_settings.STORAGE_PORT}" + assert storage_auth_settings.STORAGE_USERNAME # nosec + username = storage_auth_settings.STORAGE_USERNAME + assert storage_auth_settings.STORAGE_PASSWORD # nosec + password = storage_auth_settings.STORAGE_PASSWORD.get_secret_value() + secure = "1" if storage_auth_settings.STORAGE_SECURE else "0" + + return _StorageConfig( + host=host, + port=port, + username=username, + password=password, + secure=secure, + ) def _get_environment_variables( compose_namespace: str, scheduler_data: SchedulerData, app_settings: AppSettings, + *, allow_internet_access: bool, + metrics_collection_allowed: bool, + telemetry_enabled: bool, ) -> dict[str, str]: - registry_settings = app_settings.DIRECTOR_V2_DOCKER_REGISTRY rabbit_settings = app_settings.DIRECTOR_V2_RABBITMQ r_clone_settings = ( app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_R_CLONE_SETTINGS ) + dy_sidecar_aws_s3_cli_settings = None + if ( + app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_AWS_S3_CLI_SETTINGS + and app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_AWS_S3_CLI_SETTINGS.AWS_S3_CLI_S3 + ): + dy_sidecar_aws_s3_cli_settings = json_dumps( + model_dump_with_secrets( + app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_AWS_S3_CLI_SETTINGS, + show_secrets=True, + ) + ) state_exclude = set() if scheduler_data.paths_mapping.state_exclude is not None: state_exclude = scheduler_data.paths_mapping.state_exclude - return { + callbacks_mapping: CallbacksMapping = scheduler_data.callbacks_mapping + + if not metrics_collection_allowed: + _logger.info( + "user=%s disabled metrics collection, disable prometheus metrics for node_id=%s", + scheduler_data.user_id, + scheduler_data.node_uuid, + ) + callbacks_mapping.metrics = None + + storage_config = _get_storage_config(app_settings) + + envs: dict[str, str] = { # These environments will be captured by # services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/settings.py::ApplicationSettings # @@ -49,68 +130,119 @@ def _get_environment_variables( "DY_SIDECAR_PATH_INPUTS": f"{scheduler_data.paths_mapping.inputs_path}", "DY_SIDECAR_PATH_OUTPUTS": f"{scheduler_data.paths_mapping.outputs_path}", "DY_SIDECAR_PROJECT_ID": f"{scheduler_data.project_id}", - "DY_SIDECAR_RUN_ID": f"{scheduler_data.run_id}", + "DY_SIDECAR_RUN_ID": scheduler_data.run_id, "DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS": f"{allow_internet_access}", + "DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE": f"{telemetry_enabled}", "DY_SIDECAR_STATE_EXCLUDE": json_dumps(f"{x}" for x in state_exclude), + "DY_SIDECAR_LEGACY_STATE": ( + "null" + if scheduler_data.paths_mapping.legacy_state is None + else scheduler_data.paths_mapping.legacy_state.model_dump_json() + ), + "DY_SIDECAR_CALLBACKS_MAPPING": callbacks_mapping.model_dump_json(), "DY_SIDECAR_STATE_PATHS": json_dumps( f"{x}" for x in scheduler_data.paths_mapping.state_paths ), "DY_SIDECAR_USER_ID": f"{scheduler_data.user_id}", + "DY_SIDECAR_AWS_S3_CLI_SETTINGS": dy_sidecar_aws_s3_cli_settings or "null", "DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": compose_namespace, "DYNAMIC_SIDECAR_LOG_LEVEL": app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_LOG_LEVEL, + "DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED": f"{app_settings.DIRECTOR_V2_LOG_FORMAT_LOCAL_DEV_ENABLED}", "POSTGRES_DB": f"{app_settings.POSTGRES.POSTGRES_DB}", "POSTGRES_ENDPOINT": f"{app_settings.POSTGRES.POSTGRES_HOST}:{app_settings.POSTGRES.POSTGRES_PORT}", "POSTGRES_HOST": f"{app_settings.POSTGRES.POSTGRES_HOST}", "POSTGRES_PASSWORD": f"{app_settings.POSTGRES.POSTGRES_PASSWORD.get_secret_value()}", "POSTGRES_PORT": f"{app_settings.POSTGRES.POSTGRES_PORT}", "POSTGRES_USER": f"{app_settings.POSTGRES.POSTGRES_USER}", - "R_CLONE_ENABLED": f"{r_clone_settings.R_CLONE_ENABLED}", "R_CLONE_PROVIDER": r_clone_settings.R_CLONE_PROVIDER, + "R_CLONE_OPTION_TRANSFERS": f"{r_clone_settings.R_CLONE_OPTION_TRANSFERS}", + "R_CLONE_OPTION_RETRIES": f"{r_clone_settings.R_CLONE_OPTION_RETRIES}", + "R_CLONE_OPTION_BUFFER_SIZE": r_clone_settings.R_CLONE_OPTION_BUFFER_SIZE, "RABBIT_HOST": f"{rabbit_settings.RABBIT_HOST}", "RABBIT_PASSWORD": f"{rabbit_settings.RABBIT_PASSWORD.get_secret_value()}", "RABBIT_PORT": f"{rabbit_settings.RABBIT_PORT}", "RABBIT_USER": f"{rabbit_settings.RABBIT_USER}", - "REGISTRY_AUTH": f"{registry_settings.REGISTRY_AUTH}", - "REGISTRY_PATH": f"{registry_settings.REGISTRY_PATH}", - "REGISTRY_PW": f"{registry_settings.REGISTRY_PW.get_secret_value()}", - "REGISTRY_SSL": f"{registry_settings.REGISTRY_SSL}", - "REGISTRY_URL": f"{registry_settings.REGISTRY_URL}", - "REGISTRY_USER": f"{registry_settings.REGISTRY_USER}", + "RABBIT_SECURE": f"{rabbit_settings.RABBIT_SECURE}", + "DY_DEPLOYMENT_REGISTRY_SETTINGS": ( + json_dumps( + model_dump_with_secrets( + app_settings.DIRECTOR_V2_DOCKER_REGISTRY, + show_secrets=True, + exclude={"resolved_registry_url", "api_url"}, + ) + ) + ), + "DY_DOCKER_HUB_REGISTRY_SETTINGS": ( + json_dumps( + model_dump_with_secrets( + app_settings.DIRECTOR_V2_DOCKER_HUB_REGISTRY, + show_secrets=True, + exclude={"resolved_registry_url", "api_url"}, + ) + ) + if app_settings.DIRECTOR_V2_DOCKER_HUB_REGISTRY + else "null" + ), "S3_ACCESS_KEY": r_clone_settings.R_CLONE_S3.S3_ACCESS_KEY, "S3_BUCKET_NAME": r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME, - "S3_ENDPOINT": r_clone_settings.R_CLONE_S3.S3_ENDPOINT, + "S3_REGION": r_clone_settings.R_CLONE_S3.S3_REGION, "S3_SECRET_KEY": r_clone_settings.R_CLONE_S3.S3_SECRET_KEY, - "S3_SECURE": f"{r_clone_settings.R_CLONE_S3.S3_SECURE}", "SC_BOOT_MODE": f"{app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_SC_BOOT_MODE}", "SSL_CERT_FILE": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME, + "DYNAMIC_SIDECAR_TRACING": ( + app_settings.DIRECTOR_V2_TRACING.json() + if app_settings.DIRECTOR_V2_TRACING + else "null" + ), # For background info on this special env-var above, see # - https://stackoverflow.com/questions/31448854/how-to-force-requests-use-the-certificates-on-my-ubuntu-system#comment78596389_37447847 "SIMCORE_HOST_NAME": scheduler_data.service_name, - "STORAGE_HOST": app_settings.DIRECTOR_V2_STORAGE.STORAGE_HOST, - "STORAGE_PORT": f"{app_settings.DIRECTOR_V2_STORAGE.STORAGE_PORT}", + "STORAGE_HOST": storage_config.host, + "STORAGE_PASSWORD": storage_config.password, + "STORAGE_PORT": storage_config.port, + "STORAGE_SECURE": storage_config.secure, + "STORAGE_USERNAME": storage_config.username, + "DY_SIDECAR_SERVICE_KEY": scheduler_data.key, + "DY_SIDECAR_SERVICE_VERSION": scheduler_data.version, + "DY_SIDECAR_USER_PREFERENCES_PATH": f"{scheduler_data.user_preferences_path}", + "DY_SIDECAR_PRODUCT_NAME": f"{scheduler_data.product_name}", + "NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS": f"{app_settings.DIRECTOR_V2_NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS}", } + if r_clone_settings.R_CLONE_S3.S3_ENDPOINT is not None: + envs["S3_ENDPOINT"] = f"{r_clone_settings.R_CLONE_S3.S3_ENDPOINT}" + return envs -def get_dynamic_sidecar_spec( +def get_prometheus_service_labels( + prometheus_service_labels: dict[str, str], callbacks_mapping: CallbacksMapping +) -> dict[str, str]: + # NOTE: if the service must be scraped it will expose a /metrics endpoint + # these labels instruct prometheus to scrape it. + enable_prometheus_scraping = callbacks_mapping.metrics is not None + return prometheus_service_labels if enable_prometheus_scraping else {} + + +def get_prometheus_monitoring_networks( + prometheus_networks: list[str], callbacks_mapping: CallbacksMapping +) -> list[dict[str, str]]: + return ( + [] + if callbacks_mapping.metrics is None + else [{"Target": network_name} for network_name in prometheus_networks] + ) + + +async def _get_mounts( + *, scheduler_data: SchedulerData, dynamic_sidecar_settings: DynamicSidecarSettings, - swarm_network_id: str, - settings: SimcoreServiceSettingsLabel, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, app_settings: AppSettings, - allow_internet_access: bool, -) -> AioDockerServiceSpec: - """ - The dynamic-sidecar is responsible for managing the lifecycle - of the dynamic service. The director-v2 directly coordinates with - the dynamic-sidecar for this purpose. - - returns: the compose the request body for service creation - SEE https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate - """ - compose_namespace = get_compose_namespace(scheduler_data.node_uuid) - - # MOUNTS ----------- - mounts = [ + has_quota_support: bool, + rpc_client: RabbitMQRPCClient, + is_efs_enabled: bool, +) -> list[dict[str, Any]]: + mounts: list[dict[str, Any]] = [ # docker socket needed to use the docker api { "Source": "/var/run/docker.sock", @@ -118,14 +250,21 @@ def get_dynamic_sidecar_spec( "Type": "bind", }, DynamicSidecarVolumesPathsResolver.mount_shared_store( - swarm_stack_name=dynamic_sidecar_settings.SWARM_STACK_NAME, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, node_uuid=scheduler_data.node_uuid, - run_id=scheduler_data.run_id, + service_run_id=scheduler_data.run_id, project_id=scheduler_data.project_id, user_id=scheduler_data.user_id, + has_quota_support=has_quota_support, ), ] + volume_size_limits = ( + scheduler_data.paths_mapping.volume_size_limits or {} + if has_quota_support + else {} + ) + # Docker does not allow mounting of subfolders from volumes as the following: # `volume_name/inputs:/target_folder/inputs` # `volume_name/outputs:/target_folder/inputs` @@ -140,26 +279,52 @@ def get_dynamic_sidecar_spec( scheduler_data.paths_mapping.inputs_path, scheduler_data.paths_mapping.outputs_path, ]: - mounts.append( + mounts.append( # noqa: PERF401 DynamicSidecarVolumesPathsResolver.mount_entry( - swarm_stack_name=dynamic_sidecar_settings.SWARM_STACK_NAME, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, path=path_to_mount, node_uuid=scheduler_data.node_uuid, - run_id=scheduler_data.run_id, + service_run_id=scheduler_data.run_id, project_id=scheduler_data.project_id, user_id=scheduler_data.user_id, + volume_size_limit=volume_size_limits.get(f"{path_to_mount}"), ) ) + # state paths now get mounted via different driver and are synced to s3 automatically for path_to_mount in scheduler_data.paths_mapping.state_paths: + if is_efs_enabled: + assert dynamic_sidecar_settings.DYNAMIC_SIDECAR_EFS_SETTINGS # nosec + + _storage_directory_name = DynamicSidecarVolumesPathsResolver.volume_name( + path_to_mount + ).strip("_") + await efs_manager.create_project_specific_data_dir( + rpc_client, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + storage_directory_name=_storage_directory_name, + ) + mounts.append( + DynamicSidecarVolumesPathsResolver.mount_efs( + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + path=path_to_mount, + node_uuid=scheduler_data.node_uuid, + service_run_id=scheduler_data.run_id, + project_id=scheduler_data.project_id, + user_id=scheduler_data.user_id, + efs_settings=dynamic_sidecar_settings.DYNAMIC_SIDECAR_EFS_SETTINGS, + storage_directory_name=_storage_directory_name, + ) + ) # for now only enable this with dev features enabled - if app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: + elif app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: mounts.append( DynamicSidecarVolumesPathsResolver.mount_r_clone( - swarm_stack_name=dynamic_sidecar_settings.SWARM_STACK_NAME, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, path=path_to_mount, node_uuid=scheduler_data.node_uuid, - run_id=scheduler_data.run_id, + service_run_id=scheduler_data.run_id, project_id=scheduler_data.project_id, user_id=scheduler_data.user_id, r_clone_settings=dynamic_sidecar_settings.DYNAMIC_SIDECAR_R_CLONE_SETTINGS, @@ -168,12 +333,13 @@ def get_dynamic_sidecar_spec( else: mounts.append( DynamicSidecarVolumesPathsResolver.mount_entry( - swarm_stack_name=dynamic_sidecar_settings.SWARM_STACK_NAME, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, path=path_to_mount, node_uuid=scheduler_data.node_uuid, - run_id=scheduler_data.run_id, + service_run_id=scheduler_data.run_id, project_id=scheduler_data.project_id, user_id=scheduler_data.user_id, + volume_size_limit=volume_size_limits.get(f"{path_to_mount}"), ) ) @@ -205,14 +371,33 @@ def get_dynamic_sidecar_spec( } ) - # PORTS ----------- - ports = [] # expose this service on an empty port + if scheduler_data.user_preferences_path: + mounts.append( + DynamicSidecarVolumesPathsResolver.mount_user_preferences( + user_preferences_path=scheduler_data.user_preferences_path, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + node_uuid=scheduler_data.node_uuid, + service_run_id=scheduler_data.run_id, + project_id=scheduler_data.project_id, + user_id=scheduler_data.user_id, + has_quota_support=has_quota_support, + ) + ) + return mounts + + +def _get_ports( + *, dynamic_sidecar_settings: DynamicSidecarSettings, app_settings: AppSettings +) -> list[dict[str, Any]]: + ports: list[dict[str, Any]] = [] # expose this service on an empty port if dynamic_sidecar_settings.DYNAMIC_SIDECAR_EXPOSE_PORT: ports.append( # server port { "Protocol": "tcp", "TargetPort": dynamic_sidecar_settings.DYNAMIC_SIDECAR_PORT, + "PublishedPort": unused_port(), + "PublishMode": "host", } ) @@ -221,69 +406,157 @@ def get_dynamic_sidecar_spec( # debugger port { "Protocol": "tcp", - "TargetPort": app_settings.DIRECTOR_V2_REMOTE_DEBUG_PORT, + "TargetPort": app_settings.DIRECTOR_V2_REMOTE_DEBUGGING_PORT, + "PublishedPort": unused_port(), + "PublishMode": "host", } ) + return ports + + +async def get_dynamic_sidecar_spec( # pylint:disable=too-many-arguments# noqa: PLR0913 + scheduler_data: SchedulerData, + dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, + swarm_network_id: str, + settings: SimcoreServiceSettingsLabel, + app_settings: AppSettings, + *, + has_quota_support: bool, + hardware_info: HardwareInfo | None, + metrics_collection_allowed: bool, + user_extra_properties: UserExtraProperties, + rpc_client: RabbitMQRPCClient, +) -> AioDockerServiceSpec: + """ + The dynamic-sidecar is responsible for managing the lifecycle + of the dynamic service. The director-v2 directly coordinates with + the dynamic-sidecar for this purpose. + + returns: the compose the request body for service creation + SEE https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate + """ + compose_namespace = get_compose_namespace(scheduler_data.node_uuid) + + mounts = await _get_mounts( + scheduler_data=scheduler_data, + dynamic_services_scheduler_settings=dynamic_services_scheduler_settings, + dynamic_sidecar_settings=dynamic_sidecar_settings, + app_settings=app_settings, + has_quota_support=has_quota_support, + rpc_client=rpc_client, + is_efs_enabled=user_extra_properties.is_efs_enabled, + ) + + ports = _get_ports( + dynamic_sidecar_settings=dynamic_sidecar_settings, app_settings=app_settings + ) + + assert ( + scheduler_data.product_name is not None + ), "ONLY for legacy. This function should not be called with product_name==None" # nosec + + standard_simcore_docker_labels: dict[DockerLabelKey, str] = ( + StandardSimcoreDockerLabels( + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + product_name=scheduler_data.product_name, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + memory_limit=ByteSize(0), # this should get overwritten + cpu_limit=0, # this should get overwritten + ).to_simcore_runtime_docker_labels() + ) + + service_labels: dict[str, str] = ( + { + DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: scheduler_data.as_label_data(), + to_simcore_runtime_docker_label_key("service_key"): scheduler_data.key, + to_simcore_runtime_docker_label_key( + "service_version" + ): scheduler_data.version, + } + | get_prometheus_service_labels( + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS, + scheduler_data.callbacks_mapping, + ) + | standard_simcore_docker_labels + ) + + placement_settings = ( + app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_PLACEMENT_SETTINGS + ) + placement_constraints = deepcopy( + placement_settings.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS + ) + # if service has a pricing plan apply constraints for autoscaling + if hardware_info and len(hardware_info.aws_ec2_instances) == 1: + ec2_instance_type: str = hardware_info.aws_ec2_instances[0] + placement_constraints.append( + TypeAdapter(DockerPlacementConstraint).validate_python( + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}=={ec2_instance_type}", + ) + ) + + placement_substitutions: dict[str, DockerPlacementConstraint] = ( + placement_settings.DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS + ) + for image_resources in scheduler_data.service_resources.values(): + for resource_name in image_resources.resources: + if resource_name in placement_substitutions: + placement_constraints.append(placement_substitutions[resource_name]) # ----------- create_service_params = { "endpoint_spec": {"Ports": ports} if ports else {}, - "labels": { - "type": ServiceType.MAIN.value, # required to be listed as an interactive service and be properly cleaned up - "user_id": f"{scheduler_data.user_id}", - "port": f"{dynamic_sidecar_settings.DYNAMIC_SIDECAR_PORT}", - "study_id": f"{scheduler_data.project_id}", - # the following are used for scheduling - "uuid": f"{scheduler_data.node_uuid}", # also needed for removal when project is closed - "swarm_stack_name": dynamic_sidecar_settings.SWARM_STACK_NAME, # required for listing services with uuid - DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: scheduler_data.as_label_data(), - "service_image": dynamic_sidecar_settings.DYNAMIC_SIDECAR_IMAGE, - }, + "labels": service_labels, "name": scheduler_data.service_name, - "networks": [{"Target": swarm_network_id}], + "networks": [ + {"Target": swarm_network_id}, + *get_prometheus_monitoring_networks( + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS, + scheduler_data.callbacks_mapping, + ), + ], "task_template": { "ContainerSpec": { "Env": _get_environment_variables( compose_namespace, scheduler_data, app_settings, - allow_internet_access, + allow_internet_access=user_extra_properties.is_internet_enabled, + metrics_collection_allowed=metrics_collection_allowed, + telemetry_enabled=user_extra_properties.is_telemetry_enabled, ), "Hosts": [], "Image": dynamic_sidecar_settings.DYNAMIC_SIDECAR_IMAGE, "Init": True, - "Labels": { - # NOTE: these labels get on the tasks and that is also useful to trace - "user_id": f"{scheduler_data.user_id}", - "study_id": f"{scheduler_data.project_id}", - "uuid": f"{scheduler_data.node_uuid}", - }, + "Labels": standard_simcore_docker_labels, "Mounts": mounts, - "Secrets": [ - { - "SecretID": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID, - "SecretName": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME, - "File": { - "Name": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME, - "Mode": 444, - "UID": "0", - "GID": "0", - }, - } - ] - if ( - app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME - and app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID - and app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME - and app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED - ) - else None, - }, - "Placement": { - "Constraints": deepcopy( - app_settings.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS - ) + "Secrets": ( + [ + { + "SecretID": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID, + "SecretName": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME, + "File": { + "Name": app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME, + "Mode": 444, + "UID": "0", + "GID": "0", + }, + } + ] + if ( + app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_FILENAME + and app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_ID + and app_settings.DIRECTOR_V2_SELF_SIGNED_SSL_SECRET_NAME + and app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED + ) + else None + ), }, + "Placement": {"Constraints": placement_constraints}, "RestartPolicy": DOCKER_CONTAINER_SPEC_RESTART_POLICY_DEFAULTS, # this will get overwritten "Resources": { @@ -293,9 +566,12 @@ def get_dynamic_sidecar_spec( }, } + if dynamic_sidecar_settings.DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED: + create_service_params["endpoint_spec"] = {"Mode": "dnsrr"} + update_service_params_from_settings( labels_service_settings=settings, create_service_params=create_service_params, ) - return AioDockerServiceSpec.parse_obj(create_service_params) + return AioDockerServiceSpec.model_validate(create_service_params) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/volume_remover.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/volume_remover.py deleted file mode 100644 index 9bbd4edef33..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_service_specs/volume_remover.py +++ /dev/null @@ -1,181 +0,0 @@ -import json -import re -from asyncio.log import logger -from uuid import uuid4 - -from models_library.aiodocker_api import AioDockerServiceSpec -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.services_resources import ( - CPU_10_PERCENT, - CPU_100_PERCENT, - MEMORY_50MB, - MEMORY_250MB, -) -from models_library.users import UserID - -from ....core.settings import DynamicSidecarSettings -from ....models.schemas.constants import DYNAMIC_VOLUME_REMOVER_PREFIX - - -class DockerVersion(str): - """ - Extracts `XX.XX.XX` where X is a range [0-9] from - a given docker version - """ - - @classmethod - def __get_validators__(cls): - yield cls.validate_docker_version - - @classmethod - def validate_docker_version(cls, docker_version: str) -> str: - try: - search_result = re.search(r"^\d\d.(\d\d|\d).(\d\d|\d)", docker_version) - return search_result.group() - except AttributeError: - raise ValueError( # pylint: disable=raise-missing-from - f"{docker_version} appears not to be a valid docker version" - ) - - -# NOTE: below `retry` function is inspired by -# https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746 -SH_SCRIPT_REMOVE_VOLUMES = """ -set -e; - -error_counter=0 - -function retry {{ - local retries=$1 - shift - - local count=0 - while true; - do - - local command_result - set +e - $($@ > /tmp/command_result 2>&1) - exit_code=$? - set -e - - command_result=$(cat /tmp/command_result) - echo "$command_result" - volume_name=$4 - - case "$command_result" in - *"Error: No such volume: $volume_name"*) - return 0 - ;; - esac - - if [ $exit_code -eq 0 ]; then - return 0 - fi - - count=$(($count + 1)) - if [ $count -lt $retries ]; then - echo "Retry $count/$retries exited $exit_code, retrying in {sleep} seconds..." - sleep {sleep} - else - echo "Retry $count/$retries exited $exit_code, no more retries left." - let error_counter=error_counter+1 - return 0 - fi - done - return 0 -}} - -for volume_name in {volume_names_seq} -do - retry {retries} docker volume rm "$volume_name" -done - -if [ "$error_counter" -ne "0" ]; then - echo "ERROR: Please check above logs, there was/were $error_counter error/s." - exit 1 -fi -""" - - -def spec_volume_removal_service( - dynamic_sidecar_settings: DynamicSidecarSettings, - docker_node_id: str, - user_id: UserID, - project_id: ProjectID, - node_uuid: NodeID, - volume_names: list[str], - docker_version: DockerVersion, - *, - volume_removal_attempts: int, - sleep_between_attempts_s: int, - service_timeout_s: int, -) -> AioDockerServiceSpec: - """ - Generates a service spec for with base image - `docker:{docker_version}-dind` running the above bash script. - - The bash script will attempt to remove each individual volume - a few times before giving up. - The script will exit with error if it is not capable of - removing the volume. - - NOTE: expect the container of the service to exit with code 0, - otherwise there was an error. - NOTE: the bash script will exit 1 if it cannot find a - volume to remove. - NOTE: service must be removed once it finishes or it will - remain in the system. - NOTE: when running docker-in-docker https://hub.docker.com/_/docker - selecting the same version as the actual docker engine running - on the current node allows to avoid possible incompatible - versions. It is assumed that the same version of docker - will be running in the entire swarm. - """ - - volume_names_seq = " ".join(volume_names) - formatted_command = SH_SCRIPT_REMOVE_VOLUMES.format( - volume_names_seq=volume_names_seq, - retries=volume_removal_attempts, - sleep=sleep_between_attempts_s, - ) - logger.debug("Service will run:\n%s", formatted_command) - command = ["sh", "-c", formatted_command] - - create_service_params = { - "labels": { - "volume_names": json.dumps(volume_names), - "volume_removal_attempts": f"{volume_removal_attempts}", - "sleep_between_attempts_s": f"{sleep_between_attempts_s}", - "service_timeout_s": f"{service_timeout_s}", - "swarm_stack_name": dynamic_sidecar_settings.SWARM_STACK_NAME, - "user_id": f"{user_id}", - "study_id": f"{project_id}", - "node_id": f"{node_uuid}", - }, - "name": f"{DYNAMIC_VOLUME_REMOVER_PREFIX}_{uuid4()}", - "task_template": { - "ContainerSpec": { - "Command": command, - "Image": f"docker:{docker_version}-dind", - "Mounts": [ - { - "Source": "/var/run/docker.sock", - "Target": "/var/run/docker.sock", - "Type": "bind", - } - ], - }, - "Placement": {"Constraints": [f"node.id == {docker_node_id}"]}, - "RestartPolicy": {"Condition": "none"}, - "Resources": { - "Reservations": { - "MemoryBytes": MEMORY_50MB, - "NanoCPUs": CPU_10_PERCENT, - }, - "Limits": {"MemoryBytes": MEMORY_250MB, "NanoCPUs": CPU_100_PERCENT}, - }, - }, - } - return AioDockerServiceSpec.parse_obj(create_service_params) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_states.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_states.py index fca9f89acf7..afd44dc0f59 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_states.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/docker_states.py @@ -4,9 +4,9 @@ import logging from models_library.generated_models.docker_rest_api import ContainerState +from models_library.services_enums import ServiceState -from ...models.schemas.dynamic_services import ServiceState -from ...models.schemas.dynamic_services.scheduler import DockerContainerInspect +from ...models.dynamic_services_scheduler import DockerContainerInspect logger = logging.getLogger(__name__) @@ -74,10 +74,10 @@ def extract_task_state(task_status: dict[str, str]) -> tuple[ServiceState, str]: def _extract_container_status( container_state: ContainerState, ) -> tuple[ServiceState, ServiceMessage]: - assert container_state.Status # nosec + assert container_state.status # nosec return ( - _CONTAINER_STATE_TO_SERVICE_STATE[container_state.Status], - container_state.Error if container_state.Error else "", + _CONTAINER_STATE_TO_SERVICE_STATE[container_state.status], + container_state.error if container_state.error else "", ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/errors.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/errors.py index 0b84c48e56b..3b0a400223b 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/errors.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/errors.py @@ -1,44 +1,37 @@ -from aiodocker.exceptions import DockerError -from models_library.projects_nodes import NodeID -from pydantic.errors import PydanticErrorMixin +from typing import Any -from ...core.errors import DirectorException +from aiodocker import DockerError +from ...core.errors import DirectorError -class DynamicSidecarError(DirectorException): - pass +class DynamicSidecarError(DirectorError): + msg_template: str = "Unexpected dynamic sidecar error: {msg}" -class GenericDockerError(DynamicSidecarError): - """Generic docker library error""" - def __init__(self, msg: str, original_exception: DockerError): - super().__init__(msg + f": {original_exception.message}") +class GenericDockerError(DynamicSidecarError): + def __init__(self, original_exception: DockerError, **ctx: Any) -> None: + super().__init__(original_exception=original_exception, **ctx) self.original_exception = original_exception + msg_template: str = "Unexpected error using docker client: {msg}" -class DynamicSidecarNotFoundError(DirectorException): - """Dynamic sidecar was not found""" - - def __init__(self, node_uuid: NodeID): - super().__init__(f"node {node_uuid} not found") +class DynamicSidecarNotFoundError(DirectorError): + msg_template: str = "node {node_uuid} not found" -class DockerServiceNotFoundError(DirectorException): - """Raised when an expected docker service is not found""" - def __init__(self, service_id: str): - super().__init__(f"docker service with {service_id=} not found") +class DockerServiceNotFoundError(DirectorError): + msg_template: str = "docker service with {service_id} not found" class EntrypointContainerNotFoundError(DynamicSidecarError): """Raised while the entrypoint container was nto yet started""" -class LegacyServiceIsNotSupportedError(DirectorException): +class LegacyServiceIsNotSupportedError(DirectorError): """This API is not implemented by the director-v0""" -class UnexpectedContainerStatusError(PydanticErrorMixin, DynamicSidecarError): - code = "dynamic_sidecar.container_status" - msg_template = "Unexpected status from containers: {containers_with_error}" +class UnexpectedContainerStatusError(DynamicSidecarError): + msg_template: str = "Unexpected status from containers: {containers_with_error}" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_abc.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_abc.py index 8093ff3f707..fc550e6a74d 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_abc.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_abc.py @@ -1,21 +1,23 @@ from abc import ABC, abstractmethod -from typing import Optional +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceCreate, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) from models_library.basic_types import PortInt from models_library.projects import ProjectID from models_library.projects_networks import DockerNetworkAlias from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services_types import ServicePortKey from models_library.users import UserID +from models_library.wallets import WalletID from servicelib.fastapi.long_running_tasks.client import ProgressCallback from servicelib.fastapi.long_running_tasks.server import TaskProgress -from ....models.domains.dynamic_services import ( - DynamicServiceCreate, - RetrieveDataOutEnveloped, -) -from ....models.schemas.dynamic_services import RunningDynamicServiceDetails - class SchedulerInternalsInterface(ABC): @abstractmethod @@ -29,7 +31,7 @@ async def shutdown(self): class SchedulerPublicInterface(ABC): @abstractmethod - def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool: + def toggle_observation(self, node_uuid: NodeID, *, disable: bool) -> bool: """ Enables/disables the observation of the service temporarily. NOTE: Used by director-v2 cli. @@ -37,7 +39,7 @@ def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool: @abstractmethod async def push_service_outputs( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: """ Push service outputs. @@ -46,7 +48,7 @@ async def push_service_outputs( @abstractmethod async def remove_service_containers( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: """ Removes all started service containers. @@ -64,7 +66,7 @@ async def remove_service_sidecar_proxy_docker_networks_and_volumes( @abstractmethod async def save_service_state( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: """ Saves the state of the service. @@ -80,6 +82,8 @@ async def add_service( request_dns: str, request_scheme: str, request_simcore_user_agent: str, + *, + can_save: bool, ) -> None: """ Adds a new service. @@ -89,11 +93,12 @@ async def add_service( def is_service_tracked(self, node_uuid: NodeID) -> bool: """returns True if service is being actively observed""" + @abstractmethod def list_services( self, *, - user_id: Optional[UserID] = None, - project_id: Optional[ProjectID] = None, + user_id: UserID | None = None, + project_id: ProjectID | None = None, ) -> list[NodeID]: """Returns the list of tracked service UUIDs""" @@ -101,18 +106,35 @@ def list_services( async def mark_service_for_removal( self, node_uuid: NodeID, - can_save: Optional[bool], + can_save: bool | None, + *, skip_observation_recreation: bool = False, ) -> None: """The service will be removed as soon as possible""" + @abstractmethod + async def mark_all_services_in_wallet_for_removal( + self, wallet_id: WalletID + ) -> None: + """When a certain threshold is reached a message for removing all the + services running under a certain wallet_id will be received. + """ + + @abstractmethod + async def is_service_awaiting_manual_intervention(self, node_uuid: NodeID) -> bool: + """ + returns True if services is waiting for manual intervention + A service will wait for manual intervention if there was an issue while saving + it's state or it's outputs. + """ + @abstractmethod async def get_stack_status(self, node_uuid: NodeID) -> RunningDynamicServiceDetails: """Polled by the frontend for the status of the service""" @abstractmethod async def retrieve_service_inputs( - self, node_uuid: NodeID, port_keys: list[str] + self, node_uuid: NodeID, port_keys: list[ServicePortKey] ) -> RetrieveDataOutEnveloped: """Pulls data from input ports for the service""" @@ -131,3 +153,7 @@ async def detach_project_network( @abstractmethod async def restart_containers(self, node_uuid: NodeID) -> None: """Restarts containers without saving or restoring the state or I/O ports""" + + @abstractmethod + async def free_reserved_disk_space(self, node_id: NodeID) -> None: + """Frees reserved disk space""" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_abc.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_abc.py index 98394de69da..bb2e9b92fb1 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_abc.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_abc.py @@ -4,7 +4,7 @@ from fastapi import FastAPI -from .....models.schemas.dynamic_services import SchedulerData +from .....models.dynamic_services_scheduler import SchedulerData class DynamicSchedulerEvent(ABC): diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py new file mode 100644 index 00000000000..6ea9efc4e37 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_event_create_sidecars.py @@ -0,0 +1,323 @@ +# pylint: disable=relative-beyond-top-level + +import logging +from typing import Any, Final + +from common_library.json_serialization import json_dumps +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.aiodocker_api import AioDockerServiceSpec +from models_library.progress_bar import ProgressReport +from models_library.projects import ProjectAtDB +from models_library.projects_nodes import Node +from models_library.projects_nodes_io import NodeIDStr +from models_library.rabbitmq_messages import ( + InstrumentationRabbitMessage, + ProgressRabbitMessageNode, + ProgressType, +) +from models_library.service_settings_labels import SimcoreServiceSettingsLabel +from models_library.services import ServiceRunID +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient +from simcore_postgres_database.models.comp_tasks import NodeClass + +from .....core.dynamic_services_settings import DynamicServicesSettings +from .....core.dynamic_services_settings.proxy import DynamicSidecarProxySettings +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from .....core.dynamic_services_settings.sidecar import ( + DynamicSidecarSettings, + PlacementSettings, +) +from .....models.dynamic_services_scheduler import NetworkId, SchedulerData +from .....utils.db import get_repository +from .....utils.dict_utils import nested_update +from ....catalog import CatalogClient +from ....db.repositories.groups_extra_properties import GroupsExtraPropertiesRepository +from ....db.repositories.projects import ProjectsRepository +from ...docker_api import ( + constrain_service_to_node, + create_network, + create_service_and_get_id, + get_dynamic_sidecar_placement, + get_swarm_network, + is_dynamic_sidecar_stack_missing, +) +from ...docker_service_specs import ( + extract_service_port_service_settings, + get_dynamic_proxy_spec, + get_dynamic_sidecar_spec, + merge_settings_before_use, +) +from ._abc import DynamicSchedulerEvent +from ._events_utils import get_allow_metrics_collection + +_logger = logging.getLogger(__name__) + +_DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS: Final[tuple[list[str], ...]] = ( + ["labels"], + ["task_template", "container_spec", "env"], + ["task_template", "placement", "constraints"], + ["task_template", "resources", "reservation", "generic_resources"], + ["task_template", "resources", "limits"], + ["task_template", "resources", "reservation", "memory_bytes"], + ["task_template", "resources", "reservation", "nano_cp_us"], +) + + +def _merge_service_base_and_user_specs( + dynamic_sidecar_service_spec_base: AioDockerServiceSpec, + user_specific_service_spec: AioDockerServiceSpec, +) -> AioDockerServiceSpec: + # NOTE: since user_specific_service_spec follows Docker Service Spec and not Aio + # we do not use aliases when exporting dynamic_sidecar_service_spec_base + return AioDockerServiceSpec.model_validate( + nested_update( + jsonable_encoder( + dynamic_sidecar_service_spec_base, exclude_unset=True, by_alias=False + ), + jsonable_encoder( + user_specific_service_spec, exclude_unset=True, by_alias=False + ), + include=_DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS, + ) + ) + + +async def _create_proxy_service( + app, + *, + scheduler_data: SchedulerData, + dynamic_sidecar_network_id: NetworkId, + swarm_network_id: NetworkId, + swarm_network_name: str, +): + proxy_settings: DynamicSidecarProxySettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR_PROXY_SETTINGS + ) + scheduler_data.proxy_admin_api_port = ( + proxy_settings.DYNAMIC_SIDECAR_CADDY_ADMIN_API_PORT + ) + + dynamic_services_settings: DynamicServicesSettings = ( + app.state.settings.DYNAMIC_SERVICES + ) + + dynamic_sidecar_proxy_create_service_params: dict[ + str, Any + ] = get_dynamic_proxy_spec( + scheduler_data=scheduler_data, + dynamic_services_settings=dynamic_services_settings, + dynamic_sidecar_network_id=dynamic_sidecar_network_id, + swarm_network_id=swarm_network_id, + swarm_network_name=swarm_network_name, + ) + _logger.debug( + "dynamic-sidecar-proxy create_service_params %s", + json_dumps(dynamic_sidecar_proxy_create_service_params), + ) + + await create_service_and_get_id(dynamic_sidecar_proxy_create_service_params) + + +class CreateSidecars(DynamicSchedulerEvent): + """Created the dynamic-sidecar and the proxy.""" + + @classmethod + async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + # the call to is_dynamic_sidecar_stack_missing is expensive + # if the dynamic sidecar was started skip + if scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started: + return False + + settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + return await is_dynamic_sidecar_stack_missing( + node_uuid=scheduler_data.node_uuid, + swarm_stack_name=settings.SWARM_STACK_NAME, + ) + + @classmethod + async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: + # instrumentation + message = InstrumentationRabbitMessage( + metrics="service_started", + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + service_uuid=scheduler_data.node_uuid, + service_type=NodeClass.INTERACTIVE.value, + service_key=scheduler_data.key, + service_tag=scheduler_data.version, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + ) + rabbitmq_client: RabbitMQClient = app.state.rabbitmq_client + await rabbitmq_client.publish(message.channel_name, message) + + dynamic_sidecar_settings: DynamicSidecarSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + ) + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + dynamic_services_placement_settings: PlacementSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_PLACEMENT_SETTINGS + ) + + # the dynamic-sidecar should merge all the settings, especially: + # resources and placement derived from all the images in + # the provided docker-compose spec + # also other encodes the env vars to target the proper container + + # fetching project form DB and fetching user settings + projects_repository = get_repository(app, ProjectsRepository) + + project: ProjectAtDB = await projects_repository.get_project( + project_id=scheduler_data.project_id + ) + + node_uuid_str = NodeIDStr(scheduler_data.node_uuid) + node: Node | None = project.workbench.get(node_uuid_str) + boot_options = ( + node.boot_options + if node is not None and node.boot_options is not None + else {} + ) + _logger.info("%s", f"{boot_options=}") + + catalog_client = CatalogClient.instance(app) + + settings: SimcoreServiceSettingsLabel = await merge_settings_before_use( + catalog_client=catalog_client, + service_key=scheduler_data.key, + service_tag=scheduler_data.version, + service_user_selection_boot_options=boot_options, + service_resources=scheduler_data.service_resources, + placement_substitutions=dynamic_services_placement_settings.DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS, + ) + + groups_extra_properties = get_repository(app, GroupsExtraPropertiesRepository) + + assert ( + scheduler_data.product_name is not None + ), "ONLY for legacy. This function should not be called with product_name==None" # nosec + + user_extra_properties = await groups_extra_properties.get_user_extra_properties( + user_id=scheduler_data.user_id, product_name=scheduler_data.product_name + ) + + network_config = { + "Name": scheduler_data.dynamic_sidecar_network_name, + "Driver": "overlay", + "Labels": { + "io.simcore.zone": f"{dynamic_services_scheduler_settings.TRAEFIK_SIMCORE_ZONE}", + "com.simcore.description": f"interactive for node: {scheduler_data.node_uuid}", + "uuid": f"{scheduler_data.node_uuid}", # needed for removal when project is closed + }, + "Attachable": True, + "Internal": not user_extra_properties.is_internet_enabled, + } + dynamic_sidecar_network_id = await create_network(network_config) + + # attach the service to the swarm network dedicated to services + swarm_network: dict[str, Any] = await get_swarm_network( + dynamic_services_scheduler_settings.SIMCORE_SERVICES_NETWORK_NAME + ) + swarm_network_id: NetworkId = swarm_network["Id"] + swarm_network_name: str = swarm_network["Name"] + + metrics_collection_allowed: bool = await get_allow_metrics_collection( + app, + user_id=scheduler_data.user_id, + product_name=scheduler_data.product_name, + ) + + # start dynamic-sidecar and run the proxy on the same node + + # Each time a new dynamic-sidecar service is created + # generate a new `run_id` to avoid resource collisions + scheduler_data.run_id = ServiceRunID.get_resource_tracking_run_id_for_dynamic() + + rpc_client: RabbitMQRPCClient = app.state.rabbitmq_rpc_client + + # WARNING: do NOT log, this structure has secrets in the open + # If you want to log, please use an obfuscator + dynamic_sidecar_service_spec_base: AioDockerServiceSpec = await get_dynamic_sidecar_spec( + scheduler_data=scheduler_data, + dynamic_sidecar_settings=dynamic_sidecar_settings, + dynamic_services_scheduler_settings=dynamic_services_scheduler_settings, + swarm_network_id=swarm_network_id, + settings=settings, + app_settings=app.state.settings, + hardware_info=scheduler_data.hardware_info, + has_quota_support=dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS, + metrics_collection_allowed=metrics_collection_allowed, + user_extra_properties=user_extra_properties, + rpc_client=rpc_client, + ) + + user_specific_service_spec = ( + await catalog_client.get_service_specifications( + scheduler_data.user_id, scheduler_data.key, scheduler_data.version + ) + ).get("sidecar", {}) or {} + user_specific_service_spec = AioDockerServiceSpec.model_validate( + user_specific_service_spec + ) + dynamic_sidecar_service_final_spec = _merge_service_base_and_user_specs( + dynamic_sidecar_service_spec_base, user_specific_service_spec + ) + rabbit_message = ProgressRabbitMessageNode.model_construct( + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + progress_type=ProgressType.SIDECARS_PULLING, + report=ProgressReport(actual_value=0, total=1), + ) + await rabbitmq_client.publish(rabbit_message.channel_name, rabbit_message) + dynamic_sidecar_id = await create_service_and_get_id( + dynamic_sidecar_service_final_spec + ) + # constrain service to the same node + scheduler_data.dynamic_sidecar.docker_node_id = ( + await get_dynamic_sidecar_placement( + dynamic_sidecar_id, dynamic_services_scheduler_settings + ) + ) + + rabbit_message = ProgressRabbitMessageNode.model_construct( + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + progress_type=ProgressType.SIDECARS_PULLING, + report=ProgressReport(actual_value=1, total=1), + ) + await rabbitmq_client.publish(rabbit_message.channel_name, rabbit_message) + + await constrain_service_to_node( + service_name=scheduler_data.service_name, + docker_node_id=scheduler_data.dynamic_sidecar.docker_node_id, + ) + + # update service_port and assign it to the status + # needed by CreateUserServices action + scheduler_data.service_port = extract_service_port_service_settings(settings) + + await _create_proxy_service( + app, + scheduler_data=scheduler_data, + dynamic_sidecar_network_id=dynamic_sidecar_network_id, + swarm_network_id=swarm_network_id, + swarm_network_name=swarm_network_name, + ) + + # finally mark services created + scheduler_data.dynamic_sidecar.dynamic_sidecar_id = dynamic_sidecar_id + scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id = ( + dynamic_sidecar_network_id + ) + scheduler_data.dynamic_sidecar.swarm_network_id = swarm_network_id + scheduler_data.dynamic_sidecar.swarm_network_name = swarm_network_name + scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started = True diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events.py index 86900388830..d7af228204a 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events.py @@ -1,275 +1,40 @@ # pylint: disable=relative-beyond-top-level import logging -from typing import Any, Final, Optional, cast -from uuid import uuid4 +from typing import Any from fastapi import FastAPI -from fastapi.encoders import jsonable_encoder -from models_library.aiodocker_api import AioDockerServiceSpec -from models_library.projects import ProjectAtDB -from models_library.projects_nodes import Node -from models_library.projects_nodes_io import NodeIDStr -from models_library.rabbitmq_messages import ( - InstrumentationRabbitMessage, - ProgressRabbitMessageNode, - ProgressType, -) -from models_library.service_settings_labels import ( - SimcoreServiceLabels, - SimcoreServiceSettingsLabel, +from servicelib.fastapi.http_client_thin import BaseHttpClientError + +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, ) -from models_library.services import ServiceKeyVersion -from pydantic import PositiveFloat -from servicelib.fastapi.long_running_tasks.client import TaskId -from servicelib.json_serialization import json_dumps -from simcore_postgres_database.models.comp_tasks import NodeClass -from simcore_service_director_v2.utils.dict_utils import nested_update -from tenacity._asyncio import AsyncRetrying -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -from .....core.settings import DynamicSidecarSettings -from .....models.schemas.dynamic_services import DynamicSidecarStatus, SchedulerData -from .....models.schemas.dynamic_services.scheduler import ( +from .....models.dynamic_services_scheduler import ( DockerContainerInspect, DockerStatus, + DynamicSidecarStatus, + SchedulerData, ) -from .....utils.db import get_repository -from ....catalog import CatalogClient -from ....db.repositories.groups_extra_properties import GroupsExtraPropertiesRepository -from ....db.repositories.projects import ProjectsRepository -from ....director_v0 import DirectorV0Client -from ....rabbitmq import RabbitMQClient -from ...api_client import ( - BaseClientHTTPError, - get_dynamic_sidecar_client, - get_dynamic_sidecar_service_health, -) -from ...docker_api import ( - constrain_service_to_node, - create_network, - create_service_and_get_id, - get_dynamic_sidecar_placement, - get_swarm_network, - is_dynamic_sidecar_stack_missing, -) -from ...docker_compose_specs import assemble_spec -from ...docker_service_specs import ( - extract_service_port_from_compose_start_spec, - get_dynamic_proxy_spec, - get_dynamic_sidecar_spec, - merge_settings_before_use, -) -from ...errors import EntrypointContainerNotFoundError, UnexpectedContainerStatusError +from ...api_client import get_dynamic_sidecar_service_health, get_sidecars_client +from ...errors import UnexpectedContainerStatusError from ._abc import DynamicSchedulerEvent +from ._event_create_sidecars import CreateSidecars +from ._events_user_services import create_user_services, submit_compose_sepc from ._events_utils import ( are_all_user_services_containers_running, attach_project_networks, attempt_pod_removal_and_data_saving, - get_director_v0_client, parse_containers_inspect, prepare_services_environment, wait_for_sidecar_api, ) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS: Final[tuple[list[str], ...]] = ( - ["labels"], - ["task_template", "Resources", "Limits"], - ["task_template", "Resources", "Reservation", "MemoryBytes"], - ["task_template", "Resources", "Reservation", "NanoCPUs"], - ["task_template", "Placement", "Constraints"], - ["task_template", "ContainerSpec", "Env"], - ["task_template", "Resources", "Reservation", "GenericResources"], -) _EXPECTED_STATUSES: set[DockerStatus] = {DockerStatus.created, DockerStatus.running} -class CreateSidecars(DynamicSchedulerEvent): - """Created the dynamic-sidecar and the proxy.""" - - @classmethod - async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: - # the call to is_dynamic_sidecar_stack_missing is expensive - # if the dynamic sidecar was started skip - if scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started: - return False - - return await is_dynamic_sidecar_stack_missing( - node_uuid=scheduler_data.node_uuid, - dynamic_sidecar_settings=app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR, - ) - - @classmethod - async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: - # instrumentation - message = InstrumentationRabbitMessage( - metrics="service_started", - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - node_id=scheduler_data.node_uuid, - service_uuid=scheduler_data.node_uuid, - service_type=NodeClass.INTERACTIVE.value, - service_key=scheduler_data.key, - service_tag=scheduler_data.version, - ) - rabbitmq_client: RabbitMQClient = app.state.rabbitmq_client - await rabbitmq_client.publish(message.channel_name, message.json()) - - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) - # the dynamic-sidecar should merge all the settings, especially: - # resources and placement derived from all the images in - # the provided docker-compose spec - # also other encodes the env vars to target the proper container - director_v0_client: DirectorV0Client = get_director_v0_client(app) - # fetching project form DB and fetching user settings - projects_repository = cast( - ProjectsRepository, get_repository(app, ProjectsRepository) - ) - project: ProjectAtDB = await projects_repository.get_project( - project_id=scheduler_data.project_id - ) - - node_uuid_str = NodeIDStr(scheduler_data.node_uuid) - node: Optional[Node] = project.workbench.get(node_uuid_str) - boot_options = ( - node.boot_options - if node is not None and node.boot_options is not None - else {} - ) - logger.info("%s", f"{boot_options=}") - - settings: SimcoreServiceSettingsLabel = await merge_settings_before_use( - director_v0_client=director_v0_client, - service_key=scheduler_data.key, - service_tag=scheduler_data.version, - service_user_selection_boot_options=boot_options, - service_resources=scheduler_data.service_resources, - ) - - groups_extra_properties = cast( - GroupsExtraPropertiesRepository, - get_repository(app, GroupsExtraPropertiesRepository), - ) - assert scheduler_data.product_name is not None # nosec - allow_internet_access: bool = await groups_extra_properties.has_internet_access( - user_id=scheduler_data.user_id, product_name=scheduler_data.product_name - ) - - network_config = { - "Name": scheduler_data.dynamic_sidecar_network_name, - "Driver": "overlay", - "Labels": { - "io.simcore.zone": f"{dynamic_sidecar_settings.TRAEFIK_SIMCORE_ZONE}", - "com.simcore.description": f"interactive for node: {scheduler_data.node_uuid}", - "uuid": f"{scheduler_data.node_uuid}", # needed for removal when project is closed - }, - "Attachable": True, - "Internal": not allow_internet_access, - } - dynamic_sidecar_network_id = await create_network(network_config) - - # attach the service to the swarm network dedicated to services - swarm_network: dict[str, Any] = await get_swarm_network( - dynamic_sidecar_settings - ) - swarm_network_id: str = swarm_network["Id"] - swarm_network_name: str = swarm_network["Name"] - - # start dynamic-sidecar and run the proxy on the same node - - # Each time a new dynamic-sidecar service is created - # generate a new `run_id` to avoid resource collisions - scheduler_data.run_id = uuid4() - - # WARNING: do NOT log, this structure has secrets in the open - # If you want to log, please use an obfuscator - dynamic_sidecar_service_spec_base: AioDockerServiceSpec = ( - get_dynamic_sidecar_spec( - scheduler_data=scheduler_data, - dynamic_sidecar_settings=dynamic_sidecar_settings, - swarm_network_id=swarm_network_id, - settings=settings, - app_settings=app.state.settings, - allow_internet_access=allow_internet_access, - ) - ) - - catalog_client = CatalogClient.instance(app) - user_specific_service_spec = ( - await catalog_client.get_service_specifications( - scheduler_data.user_id, scheduler_data.key, scheduler_data.version - ) - ).get("sidecar", {}) or {} - user_specific_service_spec = AioDockerServiceSpec.parse_obj( - user_specific_service_spec - ) - # NOTE: since user_specific_service_spec follows Docker Service Spec and not Aio - # we do not use aliases when exporting dynamic_sidecar_service_spec_base - dynamic_sidecar_service_final_spec = AioDockerServiceSpec.parse_obj( - nested_update( - jsonable_encoder(dynamic_sidecar_service_spec_base, exclude_unset=True), - jsonable_encoder(user_specific_service_spec, exclude_unset=True), - include=DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS, - ) - ) - await rabbitmq_client.publish( - ProgressRabbitMessageNode.get_channel_name(), - ProgressRabbitMessageNode( - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - node_id=scheduler_data.node_uuid, - progress_type=ProgressType.SIDECARS_PULLING, - progress=0, - ).json(), - ) - dynamic_sidecar_id = await create_service_and_get_id( - dynamic_sidecar_service_final_spec - ) - # constrain service to the same node - scheduler_data.dynamic_sidecar.docker_node_id = ( - await get_dynamic_sidecar_placement( - dynamic_sidecar_id, dynamic_sidecar_settings - ) - ) - await rabbitmq_client.publish( - ProgressRabbitMessageNode.get_channel_name(), - ProgressRabbitMessageNode( - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - node_id=scheduler_data.node_uuid, - progress_type=ProgressType.SIDECARS_PULLING, - progress=1, - ).json(), - ) - - await constrain_service_to_node( - service_name=scheduler_data.service_name, - docker_node_id=scheduler_data.dynamic_sidecar.docker_node_id, - ) - - # update service_port and assign it to the status - # needed by CreateUserServices action - scheduler_data.service_port = extract_service_port_from_compose_start_spec( - dynamic_sidecar_service_final_spec - ) - - # finally mark services created - scheduler_data.dynamic_sidecar.dynamic_sidecar_id = dynamic_sidecar_id - scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id = ( - dynamic_sidecar_network_id - ) - scheduler_data.dynamic_sidecar.swarm_network_id = swarm_network_id - scheduler_data.dynamic_sidecar.swarm_network_name = swarm_network_name - scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started = True - - class WaitForSidecarAPI(DynamicSchedulerEvent): """ Waits for the sidecar to start and respond to API calls. @@ -277,6 +42,7 @@ class WaitForSidecarAPI(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return ( scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started and not scheduler_data.dynamic_sidecar.is_healthy @@ -294,6 +60,7 @@ class UpdateHealth(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return scheduler_data.dynamic_sidecar.was_dynamic_sidecar_started @classmethod @@ -313,6 +80,7 @@ class GetStatus(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return ( scheduler_data.dynamic_sidecar.status.current == DynamicSidecarStatus.OK and scheduler_data.dynamic_sidecar.is_ready @@ -320,22 +88,20 @@ async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool @classmethod async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: - dynamic_sidecar_client = get_dynamic_sidecar_client(app) + sidecars_client = await get_sidecars_client(app, scheduler_data.node_uuid) dynamic_sidecar_endpoint = scheduler_data.endpoint - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + dynamic_sidecars_scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) scheduler_data.dynamic_sidecar.inspect_error_handler.delay_for = ( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S + dynamic_sidecars_scheduler_settings.DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S ) try: containers_inspect: dict[ str, Any - ] = await dynamic_sidecar_client.containers_inspect( - dynamic_sidecar_endpoint - ) - except BaseClientHTTPError as e: + ] = await sidecars_client.containers_inspect(dynamic_sidecar_endpoint) + except BaseHttpClientError as e: were_service_containers_previously_present = ( len(scheduler_data.dynamic_sidecar.containers_inspect) > 0 ) @@ -348,14 +114,6 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: # Adding a delay between when the error is first seen and when the # error is raised to avoid random shutdowns of dynamic-sidecar services. scheduler_data.dynamic_sidecar.inspect_error_handler.try_to_raise(e) - - # After the service creation it takes a bit of time for the container to start - # If the same message appears in the log multiple times in a row (for the same - # service) something might be wrong with the service. - logger.warning( - "No container present for %s. Please investigate.", - scheduler_data.service_name, - ) return scheduler_data.dynamic_sidecar.inspect_error_handler.else_reset() @@ -369,10 +127,11 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: # Extra containers (utilities like forward proxies) can also be present here, # these also are expected to be created or running. - containers_with_error: list[DockerContainerInspect] = [] - for container_inspect in scheduler_data.dynamic_sidecar.containers_inspect: - if container_inspect.status not in _EXPECTED_STATUSES: - containers_with_error.append(container_inspect) + containers_with_error: list[DockerContainerInspect] = [ + container_inspect + for container_inspect in scheduler_data.dynamic_sidecar.containers_inspect + if container_inspect.status not in _EXPECTED_STATUSES + ] if len(containers_with_error) > 0: raise UnexpectedContainerStatusError( @@ -380,17 +139,40 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: ) -class PrepareServicesEnvironment(DynamicSchedulerEvent): +class SendUserServicesSpec(DynamicSchedulerEvent): """ Triggered when the dynamic-sidecar is responding to http requests. This step runs before CreateUserServices. + Sends over the configuration that is used for all docker compose commands. + """ + + @classmethod + async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose + return ( + scheduler_data.dynamic_sidecar.status.current == DynamicSidecarStatus.OK + and scheduler_data.dynamic_sidecar.is_ready + and not scheduler_data.dynamic_sidecar.was_compose_spec_submitted + ) + + @classmethod + async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: + await submit_compose_sepc(app, scheduler_data) + + +class PrepareServicesEnvironment(DynamicSchedulerEvent): + """ + Triggered when the dynamic-sidecar has it's docker-copose spec loaded. + This step runs before SendUserServicesSpec. + Sets up the environment on the host required by the service. - restores service state """ @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return ( scheduler_data.dynamic_sidecar.status.current == DynamicSidecarStatus.OK and scheduler_data.dynamic_sidecar.is_ready @@ -411,151 +193,16 @@ class CreateUserServices(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return ( scheduler_data.dynamic_sidecar.is_service_environment_ready - and not scheduler_data.dynamic_sidecar.compose_spec_submitted + and not scheduler_data.dynamic_sidecar.were_containers_created + and scheduler_data.dynamic_sidecar.compose_spec_submitted ) @classmethod async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: - logger.debug( - "Getting docker compose spec for service %s", scheduler_data.service_name - ) - - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) - dynamic_sidecar_client = get_dynamic_sidecar_client(app) - dynamic_sidecar_endpoint = scheduler_data.endpoint - - # check values have been set by previous step - if ( - scheduler_data.dynamic_sidecar.dynamic_sidecar_id is None - or scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id is None - or scheduler_data.dynamic_sidecar.swarm_network_id is None - or scheduler_data.dynamic_sidecar.swarm_network_name is None - ): - raise ValueError( - "Expected a value for all the following values: " - f"{scheduler_data.dynamic_sidecar.dynamic_sidecar_id=} " - f"{scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id=} " - f"{scheduler_data.dynamic_sidecar.swarm_network_id=} " - f"{scheduler_data.dynamic_sidecar.swarm_network_name=}" - ) - - # Starts dynamic SIDECAR ------------------------------------- - # creates a docker compose spec given the service key and tag - # fetching project form DB and fetching user settings - - director_v0_client: DirectorV0Client = get_director_v0_client(app) - simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion( - key=scheduler_data.key, version=scheduler_data.version - ) - ) - ) - - groups_extra_properties = cast( - GroupsExtraPropertiesRepository, - get_repository(app, GroupsExtraPropertiesRepository), - ) - assert scheduler_data.product_name is not None # nosec - allow_internet_access: bool = await groups_extra_properties.has_internet_access( - user_id=scheduler_data.user_id, product_name=scheduler_data.product_name - ) - - compose_spec = assemble_spec( - app=app, - service_key=scheduler_data.key, - service_version=scheduler_data.version, - paths_mapping=scheduler_data.paths_mapping, - compose_spec=scheduler_data.compose_spec, - container_http_entry=scheduler_data.container_http_entry, - dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, - swarm_network_name=scheduler_data.dynamic_sidecar.swarm_network_name, - service_resources=scheduler_data.service_resources, - simcore_service_labels=simcore_service_labels, - allow_internet_access=allow_internet_access, - product_name=scheduler_data.product_name, - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - node_id=scheduler_data.node_uuid, - simcore_user_agent=scheduler_data.request_simcore_user_agent, - ) - - logger.debug( - "Starting containers %s with compose-specs:\n%s", - scheduler_data.service_name, - compose_spec, - ) - - async def progress_create_containers( - message: str, percent: PositiveFloat, task_id: TaskId - ) -> None: - # TODO: detect when images are pulling and change the status - # of the service to pulling - logger.debug("%s: %.2f %s", task_id, percent, message) - - await dynamic_sidecar_client.create_containers( - dynamic_sidecar_endpoint, compose_spec, progress_create_containers - ) - - await dynamic_sidecar_client.enable_service_outputs_watcher( - dynamic_sidecar_endpoint - ) - - # Starts PROXY ----------------------------------------------- - # The entrypoint container name was now computed - # continue starting the proxy - - async for attempt in AsyncRetrying( - stop=stop_after_delay( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START - ), - wait=wait_fixed(1), - retry_error_cls=EntrypointContainerNotFoundError, - before_sleep=before_sleep_log(logger, logging.WARNING), - ): - with attempt: - if scheduler_data.dynamic_sidecar.service_removal_state.was_removed: - # the service was removed while waiting for the operation to finish - logger.warning( - "Stopping `get_entrypoint_container_name` operation. " - "Will no try to start the service." - ) - return - - entrypoint_container = await dynamic_sidecar_client.get_entrypoint_container_name( - dynamic_sidecar_endpoint=dynamic_sidecar_endpoint, - dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, - ) - logger.info( - "Fetched container entrypoint name %s", entrypoint_container - ) - - dynamic_sidecar_proxy_create_service_params: dict[ - str, Any - ] = get_dynamic_proxy_spec( - scheduler_data=scheduler_data, - dynamic_sidecar_settings=dynamic_sidecar_settings, - dynamic_sidecar_network_id=scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id, - swarm_network_id=scheduler_data.dynamic_sidecar.swarm_network_id, - swarm_network_name=scheduler_data.dynamic_sidecar.swarm_network_name, - entrypoint_container_name=entrypoint_container, - service_port=scheduler_data.service_port, - ) - - logger.debug( - "dynamic-sidecar-proxy create_service_params %s", - json_dumps(dynamic_sidecar_proxy_create_service_params), - ) - - # no need for the id any longer - await create_service_and_get_id(dynamic_sidecar_proxy_create_service_params) - scheduler_data.dynamic_sidecar.were_containers_created = True - - scheduler_data.dynamic_sidecar.was_compose_spec_submitted = True + await create_user_services(app, scheduler_data) class AttachProjectsNetworks(DynamicSchedulerEvent): @@ -568,6 +215,7 @@ class AttachProjectsNetworks(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return ( scheduler_data.dynamic_sidecar.were_containers_created and not scheduler_data.dynamic_sidecar.is_project_network_attached @@ -596,6 +244,7 @@ class RemoveUserCreatedServices(DynamicSchedulerEvent): @classmethod async def will_trigger(cls, app: FastAPI, scheduler_data: SchedulerData) -> bool: + assert app # nose return scheduler_data.dynamic_sidecar.service_removal_state.can_remove @classmethod @@ -610,6 +259,7 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: WaitForSidecarAPI, UpdateHealth, GetStatus, + SendUserServicesSpec, PrepareServicesEnvironment, CreateUserServices, AttachProjectsNetworks, diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py new file mode 100644 index 00000000000..d7dd034134b --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_user_services.py @@ -0,0 +1,247 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_long_running_tasks.base import ProgressPercent +from models_library.projects import ProjectAtDB +from models_library.projects_nodes_io import NodeIDStr +from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services import ServiceVersion +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from pydantic import TypeAdapter +from servicelib.fastapi.long_running_tasks.client import TaskId +from tenacity import RetryError +from tenacity.asyncio import AsyncRetrying +from tenacity.before_sleep import before_sleep_log +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from .....models.dynamic_services_scheduler import SchedulerData +from .....modules.catalog import CatalogClient +from .....modules.instrumentation import get_instrumentation, get_metrics_labels +from .....utils.db import get_repository +from ....db.repositories.groups_extra_properties import GroupsExtraPropertiesRepository +from ....db.repositories.projects import ProjectsRepository +from ....db.repositories.users import UsersRepository +from ...api_client import get_sidecars_client +from ...docker_compose_specs import assemble_spec +from ...errors import EntrypointContainerNotFoundError + +_logger = logging.getLogger(__name__) + + +async def submit_compose_sepc(app: FastAPI, scheduler_data: SchedulerData) -> None: + _logger.debug( + "Getting docker compose spec for service %s", scheduler_data.service_name + ) + + sidecars_client = await get_sidecars_client(app, scheduler_data.node_uuid) + dynamic_sidecar_endpoint = scheduler_data.endpoint + + # check values have been set by previous step + if ( + scheduler_data.dynamic_sidecar.dynamic_sidecar_id is None + or scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id is None + or scheduler_data.dynamic_sidecar.swarm_network_id is None + or scheduler_data.dynamic_sidecar.swarm_network_name is None + or scheduler_data.proxy_admin_api_port is None + ): + msg = ( + "Did not expect None for any of the following: " + f"{scheduler_data.dynamic_sidecar.dynamic_sidecar_id=} " + f"{scheduler_data.dynamic_sidecar.dynamic_sidecar_network_id=} " + f"{scheduler_data.dynamic_sidecar.swarm_network_id=} " + f"{scheduler_data.dynamic_sidecar.swarm_network_name=} " + f"{scheduler_data.proxy_admin_api_port=}" + ) + raise ValueError(msg) + + # Starts dynamic SIDECAR ------------------------------------- + # creates a docker compose spec given the service key and tag + # fetching project form DB and fetching user settings + + catalog_client = CatalogClient.instance(app) + simcore_service_labels: SimcoreServiceLabels = ( + await catalog_client.get_service_labels( + scheduler_data.key, scheduler_data.version + ) + ) + + groups_extra_properties = get_repository(app, GroupsExtraPropertiesRepository) + assert ( + scheduler_data.product_name is not None # nosec + ), "ONLY for legacy. This function should not be called with product_name==None" + allow_internet_access: bool = await groups_extra_properties.has_internet_access( + user_id=scheduler_data.user_id, product_name=scheduler_data.product_name + ) + assert scheduler_data.product_api_base_url is not None # nosec + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + + compose_spec: str = await assemble_spec( + app=app, + service_key=scheduler_data.key, + service_version=scheduler_data.version, + paths_mapping=scheduler_data.paths_mapping, + compose_spec=scheduler_data.compose_spec, + container_http_entry=scheduler_data.container_http_entry, + dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, + swarm_network_name=scheduler_data.dynamic_sidecar.swarm_network_name, + service_resources=scheduler_data.service_resources, + has_quota_support=dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS, + simcore_service_labels=simcore_service_labels, + allow_internet_access=allow_internet_access, + product_name=scheduler_data.product_name, + product_api_base_url=scheduler_data.product_api_base_url, + user_id=scheduler_data.user_id, + project_id=scheduler_data.project_id, + node_id=scheduler_data.node_uuid, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, + service_run_id=scheduler_data.run_id, + wallet_id=( + scheduler_data.wallet_info.wallet_id if scheduler_data.wallet_info else None + ), + ) + + _logger.debug( + "Submitting to %s it's compose-specs:\n%s", + scheduler_data.service_name, + compose_spec, + ) + await sidecars_client.submit_docker_compose_spec( + dynamic_sidecar_endpoint, compose_spec=compose_spec + ) + scheduler_data.dynamic_sidecar.was_compose_spec_submitted = True + + +async def create_user_services( # pylint: disable=too-many-statements + app: FastAPI, scheduler_data: SchedulerData +) -> None: + assert ( + scheduler_data.product_name is not None # nosec + ), "ONLY for legacy. This function should not be called with product_name==None" + + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + sidecars_client = await get_sidecars_client(app, scheduler_data.node_uuid) + dynamic_sidecar_endpoint = scheduler_data.endpoint + + _logger.debug("Starting containers %s", scheduler_data.service_name) + + async def progress_create_containers( + message: str, percent: ProgressPercent | None, task_id: TaskId + ) -> None: + _logger.debug("%s: %.2f %s", task_id, percent, message) + + # data from project + projects_repository = get_repository(app, ProjectsRepository) + project: ProjectAtDB = await projects_repository.get_project( + project_id=scheduler_data.project_id + ) + project_name = project.name + node_name = project.workbench[NodeIDStr(scheduler_data.node_uuid)].label + + # data from user + users_repository = get_repository(app, UsersRepository) + user_email = await users_repository.get_user_email(scheduler_data.user_id) + + # Billing info + wallet_id = None + wallet_name = None + pricing_plan_id = None + pricing_unit_id = None + pricing_unit_cost_id = None + if scheduler_data.wallet_info: + wallet_id = scheduler_data.wallet_info.wallet_id + wallet_name = scheduler_data.wallet_info.wallet_name + assert scheduler_data.pricing_info # nosec + pricing_plan_id = scheduler_data.pricing_info.pricing_plan_id + pricing_unit_id = scheduler_data.pricing_info.pricing_unit_id + pricing_unit_cost_id = scheduler_data.pricing_info.pricing_unit_cost_id + + metrics_params = CreateServiceMetricsAdditionalParams( + wallet_id=wallet_id, + wallet_name=wallet_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + pricing_unit_cost_id=pricing_unit_cost_id, + product_name=scheduler_data.product_name, + simcore_user_agent=scheduler_data.request_simcore_user_agent, + user_email=user_email, + project_name=project_name, + node_name=node_name, + service_key=scheduler_data.key, + service_version=TypeAdapter(ServiceVersion).validate_python( + scheduler_data.version + ), + service_resources=scheduler_data.service_resources, + service_additional_metadata={}, + ) + await sidecars_client.create_containers( + dynamic_sidecar_endpoint, + metrics_params, + progress_create_containers, + ) + + # NOTE: when in READ ONLY mode disable the outputs watcher + enable_outputs = scheduler_data.dynamic_sidecar.service_removal_state.can_save + await sidecars_client.toggle_service_ports_io( + dynamic_sidecar_endpoint, enable_outputs=enable_outputs, enable_inputs=True + ) + + # Starts PROXY ----------------------------------------------- + # The entrypoint container name was now computed + # continue starting the proxy + + try: + async for attempt in AsyncRetrying( + stop=stop_after_delay( + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_WAIT_FOR_CONTAINERS_TO_START + ), + wait=wait_fixed(1), + before_sleep=before_sleep_log(_logger, logging.WARNING), + ): + with attempt: + if scheduler_data.dynamic_sidecar.service_removal_state.was_removed: + # the service was removed while waiting for the operation to finish + _logger.warning( + "Stopping `get_entrypoint_container_name` operation. " + "Will no try to start the service." + ) + return + + entrypoint_container = await sidecars_client.get_entrypoint_container_name( + dynamic_sidecar_endpoint=dynamic_sidecar_endpoint, + dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, + ) + _logger.info( + "Fetched container entrypoint name %s", entrypoint_container + ) + except RetryError as err: + raise EntrypointContainerNotFoundError from err + + await sidecars_client.configure_proxy( + proxy_endpoint=scheduler_data.get_proxy_endpoint, + entrypoint_container_name=entrypoint_container, + service_port=scheduler_data.service_port, + ) + + scheduler_data.dynamic_sidecar.were_containers_created = True + + # NOTE: user services are already in running state, meaning it is safe to pull inputs + await sidecars_client.pull_service_input_ports(dynamic_sidecar_endpoint) + + start_duration = ( + scheduler_data.dynamic_sidecar.instrumentation.elapsed_since_start_request() + ) + if start_duration is not None: + get_instrumentation(app).dynamic_sidecar_metrics.start_time_duration.labels( + **get_metrics_labels(scheduler_data) + ).observe(start_duration) + + _logger.info("Internal state after creating user services %s", scheduler_data) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_utils.py index f0c91677943..93a3b1d6923 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_events_utils.py @@ -1,86 +1,110 @@ # pylint: disable=relative-beyond-top-level -import json +import asyncio import logging -from collections import deque -from typing import Any, Deque, Final, Optional, cast +from typing import TYPE_CHECKING, Any, cast +from common_library.json_serialization import json_loads from fastapi import FastAPI +from models_library.api_schemas_long_running_tasks.base import ProgressPercent +from models_library.products import ProductName from models_library.projects_networks import ProjectsNetworks -from models_library.projects_nodes import NodeID -from models_library.projects_nodes_io import NodeIDStr +from models_library.projects_nodes_io import NodeID, NodeIDStr from models_library.rabbitmq_messages import InstrumentationRabbitMessage +from models_library.rpc.webserver.auth.api_keys import generate_unique_api_key from models_library.service_settings_labels import SimcoreServiceLabels -from models_library.services import ServiceKeyVersion +from models_library.shared_user_preferences import ( + AllowMetricsCollectionFrontendUserPreference, +) +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from models_library.user_preferences import FrontendUserPreference +from models_library.users import UserID +from servicelib.fastapi.http_client_thin import BaseHttpClientError from servicelib.fastapi.long_running_tasks.client import ( ProgressCallback, TaskClientResultError, ) from servicelib.fastapi.long_running_tasks.server import TaskProgress -from servicelib.utils import logged_gather +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from servicelib.rabbitmq._errors import RemoteMethodNotRegisteredError +from servicelib.rabbitmq.rpc_interfaces.agent.containers import force_container_cleanup +from servicelib.rabbitmq.rpc_interfaces.agent.errors import ( + NoServiceVolumesFoundRPCError, +) +from servicelib.rabbitmq.rpc_interfaces.agent.volumes import ( + remove_volumes_without_backup_for_service, +) +from servicelib.utils import limited_gather, logged_gather from simcore_postgres_database.models.comp_tasks import NodeClass -from tenacity import TryAgain -from tenacity._asyncio import AsyncRetrying +from tenacity import RetryError, TryAgain +from tenacity.asyncio import AsyncRetrying from tenacity.before_sleep import before_sleep_log from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -from .....core.errors import NodeRightsAcquireError -from .....core.settings import AppSettings, DynamicSidecarSettings -from .....models.schemas.dynamic_services.scheduler import ( +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from .....core.settings import AppSettings +from .....models.dynamic_services_scheduler import ( DockerContainerInspect, DockerStatus, SchedulerData, ) +from .....modules.catalog import CatalogClient +from .....modules.instrumentation import ( + get_instrumentation, + get_metrics_labels, + get_rate, + track_duration, +) +from .....modules.osparc_variables._api_auth import create_unique_api_name_for from .....utils.db import get_repository from ....db.repositories.projects import ProjectsRepository from ....db.repositories.projects_networks import ProjectsNetworksRepository +from ....db.repositories.user_preferences_frontend import ( + UserPreferencesFrontendRepository, +) from ....director_v0 import DirectorV0Client -from ....node_rights import NodeRightsManager, ResourceName -from ....rabbitmq import RabbitMQClient +from ....osparc_variables._api_auth_rpc import delete_api_key_by_key from ...api_client import ( - BaseClientHTTPError, - DynamicSidecarClient, - get_dynamic_sidecar_client, + SidecarsClient, get_dynamic_sidecar_service_health, + get_sidecars_client, + remove_sidecars_client, ) from ...docker_api import ( get_projects_networks_containers, remove_dynamic_sidecar_network, remove_dynamic_sidecar_stack, - remove_volumes_from_node, try_to_remove_network, ) from ...errors import EntrypointContainerNotFoundError -from ...volumes import DY_SIDECAR_SHARED_STORE_PATH, DynamicSidecarVolumesPathsResolver -logger = logging.getLogger(__name__) +if TYPE_CHECKING: + # NOTE: TYPE_CHECKING is True when static type checkers are running, + # allowing for circular imports only for them (mypy, pylance, ruff) + from .._task import DynamicSidecarsScheduler - -# Used to ensure no more that X services per node pull or push data -# Locking is applied when: -# - study is being opened (state and outputs are pulled) -# - study is being closed (state and outputs are saved) -RESOURCE_STATE_AND_INPUTS: Final[ResourceName] = "state_and_inputs" +_logger = logging.getLogger(__name__) def get_director_v0_client(app: FastAPI) -> DirectorV0Client: - client = DirectorV0Client.instance(app) - return client + return DirectorV0Client.instance(app) def parse_containers_inspect( - containers_inspect: Optional[dict[str, Any]] + containers_inspect: dict[str, Any] | None, ) -> list[DockerContainerInspect]: - results: Deque[DockerContainerInspect] = deque() - if containers_inspect is None: return [] - for container_id in containers_inspect: - container_inspect_data = containers_inspect[container_id] - results.append(DockerContainerInspect.from_container(container_inspect_data)) - return list(results) + return [ + DockerContainerInspect.from_container(containers_inspect[container_id]) + for container_id in containers_inspect + ] def are_all_user_services_containers_running( @@ -92,119 +116,161 @@ def are_all_user_services_containers_running( def _get_scheduler_data(app: FastAPI, node_uuid: NodeID) -> SchedulerData: - dynamic_sidecars_scheduler: "DynamicSidecarsScheduler" = ( + dynamic_sidecars_scheduler: DynamicSidecarsScheduler = ( app.state.dynamic_sidecar_scheduler ) # pylint: disable=protected-access - return dynamic_sidecars_scheduler._scheduler.get_scheduler_data(node_uuid) + scheduler_data: SchedulerData = ( + dynamic_sidecars_scheduler.scheduler.get_scheduler_data(node_uuid) + ) + return scheduler_data async def service_remove_containers( app: FastAPI, node_uuid: NodeID, - dynamic_sidecar_client: DynamicSidecarClient, - progress_callback: Optional[ProgressCallback] = None, + sidecars_client: SidecarsClient, + progress_callback: ProgressCallback | None = None, ) -> None: scheduler_data: SchedulerData = _get_scheduler_data(app, node_uuid) try: - await dynamic_sidecar_client.stop_service( + await sidecars_client.stop_service( scheduler_data.endpoint, progress_callback=progress_callback ) - except (BaseClientHTTPError, TaskClientResultError) as e: - logger.warning( + except (BaseHttpClientError, TaskClientResultError) as e: + _logger.info( ( - "Could not remove service containers for " - "%s\n%s. Will continue to save the data from the service!" + "Could not remove service containers for %s. " + "Will continue to save the data from the service! Error: %s" ), scheduler_data.service_name, - f"{e}", + f"{type(e)}: {e}", + ) + + +async def service_free_reserved_disk_space( + app: FastAPI, node_id: NodeID, sidecars_client: SidecarsClient +) -> None: + scheduler_data: SchedulerData = _get_scheduler_data(app, node_id) + try: + await sidecars_client.free_reserved_disk_space(scheduler_data.endpoint) + except BaseHttpClientError as e: + _logger.info( + ( + "Could not remove service containers for %s. " + "Will continue to save the data from the service! Error: %s" + ), + scheduler_data.service_name, + f"{type(e)}: {e}", ) async def service_save_state( app: FastAPI, node_uuid: NodeID, - dynamic_sidecar_client: DynamicSidecarClient, - progress_callback: Optional[ProgressCallback] = None, + sidecars_client: SidecarsClient, + progress_callback: ProgressCallback | None = None, ) -> None: scheduler_data: SchedulerData = _get_scheduler_data(app, node_uuid) - await dynamic_sidecar_client.save_service_state( - scheduler_data.endpoint, progress_callback=progress_callback + + with track_duration() as duration: + size = await sidecars_client.save_service_state( + scheduler_data.endpoint, progress_callback=progress_callback + ) + if size and size > 0: + get_instrumentation(app).dynamic_sidecar_metrics.push_service_state_rate.labels( + **get_metrics_labels(scheduler_data) + ).observe(get_rate(size, duration.to_float())) + + await sidecars_client.update_volume_state( + scheduler_data.endpoint, + volume_category=VolumeCategory.STATES, + volume_status=VolumeStatus.CONTENT_WAS_SAVED, ) async def service_push_outputs( app: FastAPI, node_uuid: NodeID, - dynamic_sidecar_client: DynamicSidecarClient, - progress_callback: Optional[ProgressCallback] = None, + sidecars_client: SidecarsClient, + progress_callback: ProgressCallback | None = None, ) -> None: scheduler_data: SchedulerData = _get_scheduler_data(app, node_uuid) - await dynamic_sidecar_client.push_service_output_ports( + await sidecars_client.push_service_output_ports( scheduler_data.endpoint, progress_callback=progress_callback ) + await sidecars_client.update_volume_state( + scheduler_data.endpoint, + volume_category=VolumeCategory.OUTPUTS, + volume_status=VolumeStatus.CONTENT_WAS_SAVED, + ) async def service_remove_sidecar_proxy_docker_networks_and_volumes( task_progress: TaskProgress, app: FastAPI, node_uuid: NodeID, - dynamic_sidecar_settings: DynamicSidecarSettings, - set_were_state_and_outputs_saved: Optional[bool] = None, + swarm_stack_name: str, + set_were_state_and_outputs_saved: bool | None = None, ) -> None: scheduler_data: SchedulerData = _get_scheduler_data(app, node_uuid) + rabbit_rpc_client: RabbitMQRPCClient = app.state.rabbitmq_rpc_client if set_were_state_and_outputs_saved is not None: scheduler_data.dynamic_sidecar.were_state_and_outputs_saved = True - # remove the 2 services - task_progress.update(message="removing dynamic sidecar stack", percent=0.1) + task_progress.update( + message="removing dynamic sidecar stack", percent=ProgressPercent(0.1) + ) await remove_dynamic_sidecar_stack( node_uuid=scheduler_data.node_uuid, - dynamic_sidecar_settings=dynamic_sidecar_settings, + swarm_stack_name=swarm_stack_name, ) - # remove network - task_progress.update(message="removing network", percent=0.2) + if scheduler_data.dynamic_sidecar.docker_node_id: + await force_container_cleanup( + rabbit_rpc_client, + docker_node_id=scheduler_data.dynamic_sidecar.docker_node_id, + swarm_stack_name=swarm_stack_name, + node_id=scheduler_data.node_uuid, + ) + + task_progress.update(message="removing network", percent=ProgressPercent(0.2)) await remove_dynamic_sidecar_network(scheduler_data.dynamic_sidecar_network_name) if scheduler_data.dynamic_sidecar.were_state_and_outputs_saved: if scheduler_data.dynamic_sidecar.docker_node_id is None: - logger.warning( + _logger.warning( "Skipped volume removal for %s, since a docker_node_id was not found.", scheduler_data.node_uuid, ) else: # Remove all dy-sidecar associated volumes from node - task_progress.update(message="removing volumes", percent=0.3) - unique_volume_names = [ - DynamicSidecarVolumesPathsResolver.source( - path=volume_path, - node_uuid=scheduler_data.node_uuid, - run_id=scheduler_data.run_id, - ) - for volume_path in [ - DY_SIDECAR_SHARED_STORE_PATH, - scheduler_data.paths_mapping.inputs_path, - scheduler_data.paths_mapping.outputs_path, - ] - + scheduler_data.paths_mapping.state_paths - ] - await remove_volumes_from_node( - dynamic_sidecar_settings=dynamic_sidecar_settings, - volume_names=unique_volume_names, - docker_node_id=scheduler_data.dynamic_sidecar.docker_node_id, - user_id=scheduler_data.user_id, - project_id=scheduler_data.project_id, - node_uuid=scheduler_data.node_uuid, + task_progress.update( + message="removing volumes", percent=ProgressPercent(0.3) ) + with log_context(_logger, logging.DEBUG, f"removing volumes '{node_uuid}'"): + try: + await remove_volumes_without_backup_for_service( + rabbit_rpc_client, + docker_node_id=scheduler_data.dynamic_sidecar.docker_node_id, + swarm_stack_name=swarm_stack_name, + node_id=scheduler_data.node_uuid, + ) + except ( + NoServiceVolumesFoundRPCError, + RemoteMethodNotRegisteredError, # happens when autoscaling node was removed + ) as e: + _logger.info("Could not remove volumes, because: '%s'", e) - logger.debug( + _logger.debug( "Removed dynamic-sidecar services and crated container for '%s'", scheduler_data.service_name, ) - task_progress.update(message="removing project networks", percent=0.8) + task_progress.update( + message="removing project networks", percent=ProgressPercent(0.8) + ) used_projects_networks = await get_projects_networks_containers( project_id=scheduler_data.project_id ) @@ -217,11 +283,13 @@ async def service_remove_sidecar_proxy_docker_networks_and_volumes( ) # pylint: disable=protected-access - await app.state.dynamic_sidecar_scheduler._scheduler.remove_service_from_observation( + scheduler_data.dynamic_sidecar.service_removal_state.mark_removed() + await app.state.dynamic_sidecar_scheduler.scheduler.remove_service_from_observation( scheduler_data.node_uuid ) - scheduler_data.dynamic_sidecar.service_removal_state.mark_removed() - task_progress.update(message="finished removing resources", percent=1) + task_progress.update( + message="finished removing resources", percent=ProgressPercent(1) + ) async def attempt_pod_removal_and_data_saving( @@ -229,100 +297,107 @@ async def attempt_pod_removal_and_data_saving( ) -> None: # invoke container cleanup at this point app_settings: AppSettings = app.state.settings - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + settings: DynamicServicesSchedulerSettings = ( + app_settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) - async def _remove_containers_save_state_and_outputs() -> None: - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client(app) + _logger.debug("removing service; scheduler_data=%s", scheduler_data) - await service_remove_containers( - app, scheduler_data.node_uuid, dynamic_sidecar_client - ) - - # only try to save the status if : - # - it is requested to save the state - # - the dynamic-sidecar has finished booting correctly + sidecars_client: SidecarsClient = await get_sidecars_client( + app, scheduler_data.node_uuid + ) - can_really_save: bool = False - if scheduler_data.dynamic_sidecar.service_removal_state.can_save: - # if node is not present in the workbench it makes no sense - # to try and save the data, nodeports will raise errors - # and sidecar will hang + await service_remove_containers(app, scheduler_data.node_uuid, sidecars_client) - projects_repository = cast( - ProjectsRepository, get_repository(app, ProjectsRepository) + if scheduler_data.product_name: + try: + display_name = create_unique_api_name_for( + scheduler_data.product_name, + scheduler_data.user_id, + scheduler_data.project_id, + scheduler_data.node_uuid, ) - can_really_save = await projects_repository.is_node_present_in_workbench( - project_id=scheduler_data.project_id, node_uuid=scheduler_data.node_uuid + api_key = generate_unique_api_key(display_name) + await delete_api_key_by_key( + app, + product_name=scheduler_data.product_name, + user_id=scheduler_data.user_id, + api_key=api_key, ) + except Exception: # pylint: disable=broad-except + _logger.warning("failed to delete api key %s", display_name) - if can_really_save and scheduler_data.dynamic_sidecar.were_containers_created: - dynamic_sidecar_client = get_dynamic_sidecar_client(app) + # used for debuug, normally sleeps 0 + await asyncio.sleep( + settings.DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL.total_seconds() + ) - logger.info("Calling into dynamic-sidecar to save: state and output ports") - try: - tasks = [ - service_push_outputs( - app, scheduler_data.node_uuid, dynamic_sidecar_client - ) - ] - - # When enabled no longer uploads state via nodeports - # It uses rclone mounted volumes for this task. - if not app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: - tasks.append( - service_save_state( - app, scheduler_data.node_uuid, dynamic_sidecar_client - ) - ) + # only try to save the status if : + # - it is requested to save the state + # - the dynamic-sidecar has finished booting correctly - await logged_gather(*tasks, max_concurrency=2) - scheduler_data.dynamic_sidecar.were_state_and_outputs_saved = True - - logger.info("dynamic-sidecar saved: state and output ports") - except (BaseClientHTTPError, TaskClientResultError) as e: - logger.error( - ( - "Could not contact dynamic-sidecar to save service " - "state or output ports %s\n%s" - ), - scheduler_data.service_name, - f"{e}", - ) - # ensure dynamic-sidecar does not get removed - # user data can be manually saved and manual - # cleanup of the dynamic-sidecar is required + can_really_save: bool = False + if scheduler_data.dynamic_sidecar.service_removal_state.can_save: + # if node is not present in the workbench it makes no sense + # to try and save the data, nodeports will raise errors + # and sidecar will hang - scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error = ( - True - ) - raise e + projects_repository: ProjectsRepository = get_repository( + app, ProjectsRepository + ) + + can_really_save = await projects_repository.is_node_present_in_workbench( + project_id=scheduler_data.project_id, node_uuid=scheduler_data.node_uuid + ) + + if can_really_save and scheduler_data.dynamic_sidecar.were_containers_created: + _logger.info("Calling into dynamic-sidecar to save: state and output ports") + + await service_free_reserved_disk_space( + app, scheduler_data.node_uuid, sidecars_client + ) - if dynamic_sidecar_settings.DYNAMIC_SIDECAR_DOCKER_NODE_RESOURCE_LIMITS_ENABLED: - node_rights_manager = NodeRightsManager.instance(app) - assert scheduler_data.dynamic_sidecar.docker_node_id # nosec try: - async with node_rights_manager.acquire( - scheduler_data.dynamic_sidecar.docker_node_id, - resource_name=RESOURCE_STATE_AND_INPUTS, - ): - await _remove_containers_save_state_and_outputs() - except NodeRightsAcquireError: - # Next observation cycle, the service will try again - logger.debug( - "Skip saving service state for %s. Docker node %s is busy. Will try later.", - scheduler_data.node_uuid, - scheduler_data.dynamic_sidecar.docker_node_id, + tasks = [ + service_push_outputs(app, scheduler_data.node_uuid, sidecars_client) + ] + + # When enabled no longer uploads state via nodeports + # It uses rclone mounted volumes for this task. + if not app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: + tasks.append( + service_save_state(app, scheduler_data.node_uuid, sidecars_client) + ) + + await logged_gather(*tasks, max_concurrency=2) + scheduler_data.dynamic_sidecar.were_state_and_outputs_saved = True + + _logger.info("dynamic-sidecar saved: state and output ports") + except (BaseHttpClientError, TaskClientResultError) as e: + _logger.error( # noqa: TRY400 + ( + "Could not contact dynamic-sidecar to save service " + "state or output ports %s\n%s" + ), + scheduler_data.service_name, + f"{e}", + ) + # ensure dynamic-sidecar does not get removed + # user data can be manually saved and manual + # cleanup of the dynamic-sidecar is required + + scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error = ( + True ) - return - else: - await _remove_containers_save_state_and_outputs() + raise await service_remove_sidecar_proxy_docker_networks_and_volumes( - TaskProgress.create(), app, scheduler_data.node_uuid, dynamic_sidecar_settings + TaskProgress.create(), app, scheduler_data.node_uuid, settings.SWARM_STACK_NAME ) + # remove sidecar's api client + remove_sidecars_client(app, scheduler_data.node_uuid) + # instrumentation message = InstrumentationRabbitMessage( metrics="service_stopped", @@ -333,20 +408,30 @@ async def _remove_containers_save_state_and_outputs() -> None: service_type=NodeClass.INTERACTIVE.value, service_key=scheduler_data.key, service_tag=scheduler_data.version, + simcore_user_agent=scheduler_data.request_simcore_user_agent, ) rabbitmq_client: RabbitMQClient = app.state.rabbitmq_client - await rabbitmq_client.publish(message.channel_name, message.json()) + await rabbitmq_client.publish(message.channel_name, message) + + # metrics + + stop_duration = ( + scheduler_data.dynamic_sidecar.instrumentation.elapsed_since_close_request() + ) + if stop_duration is not None: + get_instrumentation(app).dynamic_sidecar_metrics.stop_time_duration.labels( + **get_metrics_labels(scheduler_data) + ).observe(stop_duration) async def attach_project_networks(app: FastAPI, scheduler_data: SchedulerData) -> None: - logger.debug("Attaching project networks for %s", scheduler_data.service_name) + _logger.debug("Attaching project networks for %s", scheduler_data.service_name) - dynamic_sidecar_client = get_dynamic_sidecar_client(app) + sidecars_client = await get_sidecars_client(app, scheduler_data.node_uuid) dynamic_sidecar_endpoint = scheduler_data.endpoint - projects_networks_repository: ProjectsNetworksRepository = cast( - ProjectsNetworksRepository, - get_repository(app, ProjectsNetworksRepository), + projects_networks_repository: ProjectsNetworksRepository = get_repository( + app, ProjectsNetworksRepository ) projects_networks: ProjectsNetworks = ( @@ -360,7 +445,7 @@ async def attach_project_networks(app: FastAPI, scheduler_data: SchedulerData) - ) in projects_networks.networks_with_aliases.items(): network_alias = container_aliases.get(NodeIDStr(scheduler_data.node_uuid)) if network_alias is not None: - await dynamic_sidecar_client.attach_service_containers_to_project_network( + await sidecars_client.attach_service_containers_to_project_network( dynamic_sidecar_endpoint=dynamic_sidecar_endpoint, dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, project_network=network_name, @@ -372,87 +457,144 @@ async def attach_project_networks(app: FastAPI, scheduler_data: SchedulerData) - async def wait_for_sidecar_api(app: FastAPI, scheduler_data: SchedulerData) -> None: - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) - - async for attempt in AsyncRetrying( - stop=stop_after_delay( - dynamic_sidecar_settings.DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S - ), - wait=wait_fixed(1), - retry_error_cls=EntrypointContainerNotFoundError, - before_sleep=before_sleep_log(logger, logging.DEBUG), - ): - with attempt: - if not await get_dynamic_sidecar_service_health( - app, scheduler_data, with_retry=False - ): - raise TryAgain() - scheduler_data.dynamic_sidecar.is_healthy = True + try: + async for attempt in AsyncRetrying( + stop=stop_after_delay( + dynamic_services_scheduler_settings.DYNAMIC_SIDECAR_STARTUP_TIMEOUT_S + ), + wait=wait_fixed(1), + before_sleep=before_sleep_log(_logger, logging.DEBUG), + ): + with attempt: + if not await get_dynamic_sidecar_service_health( + app, scheduler_data, with_retry=False + ): + raise TryAgain + scheduler_data.dynamic_sidecar.is_healthy = True + except RetryError as e: + raise EntrypointContainerNotFoundError from e async def prepare_services_environment( app: FastAPI, scheduler_data: SchedulerData ) -> None: app_settings: AppSettings = app.state.settings - dynamic_sidecar_client = get_dynamic_sidecar_client(app) + sidecars_client = await get_sidecars_client(app, scheduler_data.node_uuid) dynamic_sidecar_endpoint = scheduler_data.endpoint - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + + # Before starting, update the volume states. It is not always + # required to save the data from these volumes, eg: when services + # are opened in read only mode. + volume_status: VolumeStatus = ( + VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED + if scheduler_data.dynamic_sidecar.service_removal_state.can_save + else VolumeStatus.CONTENT_NO_SAVE_REQUIRED + ) + await logged_gather( + *( + sidecars_client.update_volume_state( + scheduler_data.endpoint, + volume_category=VolumeCategory.STATES, + volume_status=volume_status, + ), + sidecars_client.update_volume_state( + scheduler_data.endpoint, + volume_category=VolumeCategory.OUTPUTS, + volume_status=volume_status, + ), + ) ) - async def _pull_outputs_and_state(): - tasks = [ - dynamic_sidecar_client.pull_service_output_ports(dynamic_sidecar_endpoint) - ] - # When enabled no longer downloads state via nodeports - # S3 is used to store state paths - if not app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: - tasks.append( - dynamic_sidecar_client.restore_service_state(dynamic_sidecar_endpoint) + async def _pull_output_ports_with_metrics() -> None: + with track_duration() as duration: + size: int = await sidecars_client.pull_service_output_ports( + dynamic_sidecar_endpoint + ) + if size and size > 0: + get_instrumentation( + app + ).dynamic_sidecar_metrics.output_ports_pull_rate.labels( + **get_metrics_labels(scheduler_data) + ).observe( + get_rate(size, duration.to_float()) ) - await logged_gather(*tasks, max_concurrency=2) + async def _pull_user_services_images_with_metrics() -> None: + with track_duration() as duration: + await sidecars_client.pull_user_services_images(dynamic_sidecar_endpoint) - # inside this directory create the missing dirs, fetch those form the labels - director_v0_client: DirectorV0Client = get_director_v0_client(app) - simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion( - key=scheduler_data.key, version=scheduler_data.version - ) - ) + get_instrumentation( + app + ).dynamic_sidecar_metrics.pull_user_services_images_duration.labels( + **get_metrics_labels(scheduler_data) + ).observe( + duration.to_float() ) - service_outputs_labels = json.loads( - simcore_service_labels.dict().get("io.simcore.outputs", "{}") - ).get("outputs", {}) - logger.debug( - "Creating dirs from service outputs labels: %s", - service_outputs_labels, - ) - await dynamic_sidecar_client.service_outputs_create_dirs( - dynamic_sidecar_endpoint, service_outputs_labels + + async def _restore_service_state_with_metrics() -> None: + with track_duration() as duration: + size = await sidecars_client.restore_service_state(dynamic_sidecar_endpoint) + + if size and size > 0: + get_instrumentation( + app + ).dynamic_sidecar_metrics.pull_service_state_rate.labels( + **get_metrics_labels(scheduler_data) + ).observe( + get_rate(size, duration.to_float()) + ) + + tasks = [ + _pull_user_services_images_with_metrics(), + _pull_output_ports_with_metrics(), + ] + # When enabled no longer downloads state via nodeports + # S3 is used to store state paths + if not app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED: + tasks.append(_restore_service_state_with_metrics()) + + await limited_gather(*tasks, limit=3) + + # inside this directory create the missing dirs, fetch those form the labels + catalog_client = CatalogClient.instance(app) + simcore_service_labels: SimcoreServiceLabels = ( + await catalog_client.get_service_labels( + scheduler_data.key, scheduler_data.version ) + ) + service_outputs_labels = json_loads( + simcore_service_labels.model_dump().get("io.simcore.outputs", "{}") + ).get("outputs", {}) + _logger.debug( + "Creating dirs from service outputs labels: %s", + service_outputs_labels, + ) + await sidecars_client.service_outputs_create_dirs( + dynamic_sidecar_endpoint, service_outputs_labels + ) - scheduler_data.dynamic_sidecar.is_service_environment_ready = True + scheduler_data.dynamic_sidecar.is_service_environment_ready = True - if dynamic_sidecar_settings.DYNAMIC_SIDECAR_DOCKER_NODE_RESOURCE_LIMITS_ENABLED: - node_rights_manager = NodeRightsManager.instance(app) - assert scheduler_data.dynamic_sidecar.docker_node_id # nosec - try: - async with node_rights_manager.acquire( - scheduler_data.dynamic_sidecar.docker_node_id, - resource_name=RESOURCE_STATE_AND_INPUTS, - ): - await _pull_outputs_and_state() - except NodeRightsAcquireError: - # Next observation cycle, the service will try again - logger.debug( - "Skip saving service state for %s. Docker node %s is busy. Will try later.", - scheduler_data.node_uuid, - scheduler_data.dynamic_sidecar.docker_node_id, - ) - return - else: - await _pull_outputs_and_state() + +async def get_allow_metrics_collection( + app: FastAPI, user_id: UserID, product_name: ProductName +) -> bool: + repo = get_repository(app, UserPreferencesFrontendRepository) + preference: FrontendUserPreference | None = await repo.get_user_preference( + user_id=user_id, + product_name=product_name, + preference_class=AllowMetricsCollectionFrontendUserPreference, + ) + + if preference is None: + return cast( + bool, AllowMetricsCollectionFrontendUserPreference.get_default_value() + ) + + allow_metrics_collection = ( + AllowMetricsCollectionFrontendUserPreference.model_validate(preference) + ) + return allow_metrics_collection.value diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_observer.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_observer.py index ffff436862c..949ba98f4fe 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_observer.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_observer.py @@ -5,15 +5,14 @@ from copy import deepcopy from math import floor +from common_library.error_codes import create_error_code from fastapi import FastAPI -from servicelib.error_codes import create_error_code +from servicelib.logging_errors import create_troubleshotting_log_kwargs -from .....core.settings import ( +from .....core.dynamic_services_settings.scheduler import ( DynamicServicesSchedulerSettings, - DynamicServicesSettings, - DynamicSidecarSettings, ) -from .....models.schemas.dynamic_services import ( +from .....models.dynamic_services_scheduler import ( DynamicSidecarStatus, SchedulerData, ServiceName, @@ -31,24 +30,24 @@ async def _apply_observation_cycle( - scheduler: "DynamicSidecarsScheduler", scheduler_data: SchedulerData + scheduler: "DynamicSidecarsScheduler", # type: ignore # noqa: F821 + scheduler_data: SchedulerData, ) -> None: """ fetches status for service and then processes all the registered events and updates the status back """ app: FastAPI = scheduler.app - dynamic_services_settings: DynamicServicesSettings = ( - app.state.settings.DYNAMIC_SERVICES + settings: DynamicServicesSchedulerSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) - # TODO: PC-> ANE: custom settings are frozen. in principle, no need to create copies. initial_status = deepcopy(scheduler_data.dynamic_sidecar.status) if ( # do not refactor, second part of "and condition" is skipped most times scheduler_data.dynamic_sidecar.were_containers_created and not await are_sidecar_and_proxy_services_present( node_uuid=scheduler_data.node_uuid, - dynamic_sidecar_settings=dynamic_services_settings.DYNAMIC_SIDECAR, + swarm_stack_name=settings.SWARM_STACK_NAME, ) ): # NOTE: once marked for removal the observation cycle needs @@ -85,10 +84,9 @@ def _trigger_every_30_seconds(observation_counter: int, wait_interval: float) -> async def observing_single_service( - scheduler: "DynamicSidecarsScheduler", + scheduler: "DynamicSidecarsScheduler", # type: ignore service_name: ServiceName, scheduler_data: SchedulerData, - dynamic_sidecar_settings: DynamicSidecarSettings, dynamic_scheduler: DynamicServicesSchedulerSettings, ) -> None: app: FastAPI = scheduler.app @@ -113,11 +111,11 @@ async def observing_single_service( # NOTE: do not change below order, reduces pressure on the # docker swarm engine API. _trigger_every_30_seconds( - scheduler._observation_counter, # pylint:disable=protected-access - dynamic_scheduler.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS, + scheduler._observation_counter, # pylint:disable=protected-access # noqa: SLF001 + dynamic_scheduler.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL.total_seconds(), ) and await is_dynamic_sidecar_stack_missing( - scheduler_data.node_uuid, dynamic_sidecar_settings + scheduler_data.node_uuid, dynamic_scheduler.SWARM_STACK_NAME ) ): # if both proxy and sidecar ar missing at this point it @@ -142,27 +140,38 @@ async def observing_single_service( logger.debug("completed observation cycle of %s", f"{service_name=}") except asyncio.CancelledError: # pylint: disable=try-except-raise raise # pragma: no cover - except Exception as e: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except service_name = scheduler_data.service_name # With unhandled errors, let's generate and ID and send it to the end-user # so that we can trace the logs and debug the issue. + user_error_msg = ( + f"The service ({service_name}) experienced a problem. " + "Our team has recorded the issue. " + "If the issue persists please report it." + ) + error_code = create_error_code(exc) - error_code = create_error_code(e) logger.exception( - "Observation of %s unexpectedly failed [%s]", - f"{service_name=} ", - f"{error_code}", - extra={"error_code": error_code}, + **create_troubleshotting_log_kwargs( + user_error_msg, + error=exc, + error_context={ + "service_name": service_name, + "user_id": scheduler_data.user_id, + }, + error_code=error_code, + tip=f"Observation of {service_name=} unexpectedly failed", + ) ) scheduler_data.dynamic_sidecar.status.update_failing_status( # This message must be human-friendly - f"Upss! This service ({service_name}) unexpectedly failed", + user_error_msg, error_code, ) finally: if scheduler_data_copy != scheduler_data: try: await update_scheduler_data_label(scheduler_data) - except GenericDockerError as e: - logger.warning("Skipped labels update, please check:\n %s", f"{e}") + except GenericDockerError as exc: + logger.warning("Skipped labels update, please check:\n %s", f"{exc}") diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler.py index 380dae8c35d..6860717238d 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler.py @@ -14,72 +14,160 @@ """ import asyncio +import contextlib import functools import logging -from asyncio import sleep -from contextlib import suppress -from dataclasses import dataclass -from typing import Optional, Union - +import time +from asyncio import Lock, Queue, Task +from dataclasses import dataclass, field +from typing import Final + +import arrow +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceCreate, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.api_schemas_dynamic_sidecar.containers import ActivityInfoOrNone from models_library.basic_types import PortInt from models_library.projects import ProjectID from models_library.projects_networks import DockerNetworkAlias from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import RestartPolicy, SimcoreServiceLabels +from models_library.services_types import ServicePortKey from models_library.users import UserID -from pydantic import AnyHttpUrl +from models_library.wallets import WalletID +from pydantic import NonNegativeFloat +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task from servicelib.fastapi.long_running_tasks.client import ProgressCallback from servicelib.fastapi.long_running_tasks.server import TaskProgress +from servicelib.redis import RedisClientsManager, exclusive +from settings_library.redis import RedisDatabase -from .....core.settings import DynamicServicesSchedulerSettings, DynamicSidecarSettings -from .....models.domains.dynamic_services import ( - DynamicServiceCreate, - RetrieveDataOutEnveloped, +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, ) -from .....models.schemas.dynamic_services import ( - DynamicSidecarStatus, - RunningDynamicServiceDetails, - SchedulerData, -) -from ...api_client import DynamicSidecarClient, get_dynamic_sidecar_client -from ...docker_api import ( - get_dynamic_sidecar_state, - get_dynamic_sidecars_to_observe, - remove_pending_volume_removal_services, - update_scheduler_data_label, -) -from ...docker_states import ServiceState, extract_containers_minimum_statuses -from ...errors import ( - DockerServiceNotFoundError, - DynamicSidecarError, - DynamicSidecarNotFoundError, +from .....models.dynamic_services_scheduler import SchedulerData, ServiceName +from .....modules.instrumentation import ( + get_instrumentation, + get_metrics_labels, + get_rate, ) +from ...api_client import SidecarsClient, get_sidecars_client +from ...docker_api import update_scheduler_data_label +from ...errors import DynamicSidecarError, DynamicSidecarNotFoundError from .._abc import SchedulerPublicInterface +from . import _scheduler_utils from ._events_utils import ( - service_push_outputs, service_remove_containers, service_remove_sidecar_proxy_docker_networks_and_volumes, service_save_state, ) from ._observer import observing_single_service -from ._scheduler_mixin import SchedulerInternalsMixin logger = logging.getLogger(__name__) _DISABLED_MARK = object() +_MAX_WAIT_TASKS_SHUTDOWN_S: Final[NonNegativeFloat] = 5 @dataclass -class Scheduler(SchedulerInternalsMixin, SchedulerPublicInterface): - def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool: +class Scheduler( # pylint: disable=too-many-instance-attributes, too-many-public-methods + SchedulerPublicInterface +): + app: FastAPI + + _lock: Lock = field(default_factory=Lock) + _to_observe: dict[ServiceName, SchedulerData] = field(default_factory=dict) + _service_observation_task: dict[ServiceName, asyncio.Task | object | None] = field( + default_factory=dict + ) + _inverse_search_mapping: dict[NodeID, ServiceName] = field(default_factory=dict) + _scheduler_task: Task | None = None + _trigger_observation_queue_task: Task | None = None + _trigger_observation_queue: Queue = field(default_factory=Queue) + _observation_counter: int = 0 + + async def start(self) -> None: + # run as a background task + logger.info("Starting dynamic-sidecar scheduler") + + redis_clients_manager: RedisClientsManager = ( + self.app.state.redis_clients_manager + ) + + settings: DynamicServicesSchedulerSettings = ( + self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + self._scheduler_task = create_periodic_task( + exclusive( + redis_clients_manager.client(RedisDatabase.LOCKS), + lock_key=f"{__name__}.{self.__class__.__name__}", + )(self._run_scheduler_task), + interval=settings.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL, + task_name=f"{__name__}.{self.__class__.__name__}", + ) + + self._trigger_observation_queue_task = asyncio.create_task( + self._run_trigger_observation_queue_task(), + name="dynamic-scheduler-trigger-obs-queue", + ) + + await _scheduler_utils.discover_running_services(self) + + async def shutdown(self) -> None: + logger.info("Shutting down dynamic-sidecar scheduler") + self._inverse_search_mapping = {} + self._to_observe = {} + + if self._scheduler_task is not None: + await cancel_wait_task(self._scheduler_task, max_delay=5) + self._scheduler_task = None + + if self._trigger_observation_queue_task is not None: + await self._trigger_observation_queue.put(None) + + self._trigger_observation_queue_task.cancel() + with contextlib.suppress(asyncio.CancelledError): + await self._trigger_observation_queue_task + self._trigger_observation_queue_task = None + self._trigger_observation_queue = Queue() + + # let's properly cleanup remaining observation tasks + running_tasks = [ + x for x in self._service_observation_task.values() if isinstance(x, Task) + ] + for task in running_tasks: + task.cancel() + try: + results = await asyncio.wait_for( + asyncio.gather(*running_tasks, return_exceptions=True), + timeout=_MAX_WAIT_TASKS_SHUTDOWN_S, + ) + if bad_results := list(filter(lambda r: isinstance(r, Exception), results)): + logger.error( + "Following observation tasks completed with an unexpected error:%s", + f"{bad_results}", + ) + except TimeoutError: + logger.exception( + "Timed-out waiting for %s to complete. Action: Check why this is blocking", + f"{running_tasks=}", + ) + + def toggle_observation(self, node_uuid: NodeID, *, disable: bool) -> bool: """ returns True if it managed to enable/disable observation of the service raises DynamicSidecarNotFoundError """ if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] service_task = self._service_observation_task.get(service_name) @@ -97,54 +185,44 @@ def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool: async def push_service_outputs( self, node_uuid: NodeID, - progress_callback: Optional[ProgressCallback] = None, + progress_callback: ProgressCallback | None = None, ) -> None: - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) - await service_push_outputs( - app=self.app, - node_uuid=node_uuid, - dynamic_sidecar_client=dynamic_sidecar_client, - progress_callback=progress_callback, + await _scheduler_utils.push_service_outputs( + self.app, node_uuid, progress_callback ) async def remove_service_containers( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_uuid) await service_remove_containers( app=self.app, node_uuid=node_uuid, - dynamic_sidecar_client=dynamic_sidecar_client, + sidecars_client=sidecars_client, progress_callback=progress_callback, ) async def remove_service_sidecar_proxy_docker_networks_and_volumes( self, task_progress: TaskProgress, node_uuid: NodeID ) -> None: - dynamic_sidecar_settings: DynamicSidecarSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings = ( + self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) await service_remove_sidecar_proxy_docker_networks_and_volumes( task_progress=task_progress, app=self.app, node_uuid=node_uuid, - dynamic_sidecar_settings=dynamic_sidecar_settings, + swarm_stack_name=dynamic_services_scheduler_settings.SWARM_STACK_NAME, ) async def save_service_state( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_uuid) await service_save_state( app=self.app, node_uuid=node_uuid, - dynamic_sidecar_client=dynamic_sidecar_client, + sidecars_client=sidecars_client, progress_callback=progress_callback, ) @@ -156,6 +234,8 @@ async def add_service( request_dns: str, request_scheme: str, request_simcore_user_agent: str, + *, + can_save: bool, ) -> None: """Invoked before the service is started""" scheduler_data = SchedulerData.from_http_request( @@ -165,10 +245,16 @@ async def add_service( request_dns=request_dns, request_scheme=request_scheme, request_simcore_user_agent=request_simcore_user_agent, + can_save=can_save, + ) + scheduler_data.dynamic_sidecar.instrumentation.start_requested_at = ( + arrow.utcnow().datetime ) - await self._add_service(scheduler_data) + await self.add_service_from_scheduler_data(scheduler_data) - async def _add_service(self, scheduler_data: SchedulerData) -> None: + async def add_service_from_scheduler_data( + self, scheduler_data: SchedulerData + ) -> None: # NOTE: Because we do not have all items require to compute the # service_name the node_uuid is used to keep track of the service # for faster searches. @@ -180,11 +266,11 @@ async def _add_service(self, scheduler_data: SchedulerData) -> None: return if scheduler_data.node_uuid in self._inverse_search_mapping: - raise DynamicSidecarError( - "node_uuids at a global level collided. A running " - f"service for node {scheduler_data.node_uuid} already exists. " - "Please checkout other projects which may have this issue." + msg = ( + f"node_uuids at a global level collided. A running service for node {scheduler_data.node_uuid} already exists." + " Please checkout other projects which may have this issue." ) + raise DynamicSidecarError(msg=msg) self._inverse_search_mapping[ scheduler_data.node_uuid @@ -198,15 +284,15 @@ def is_service_tracked(self, node_uuid: NodeID) -> bool: def get_scheduler_data(self, node_uuid: NodeID) -> SchedulerData: if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] return self._to_observe[service_name] def list_services( self, *, - user_id: Optional[UserID] = None, - project_id: Optional[ProjectID] = None, + user_id: UserID | None = None, + project_id: ProjectID | None = None, ) -> list[NodeID]: """ Returns the list of tracked service UUIDs @@ -239,68 +325,94 @@ def _is_scheduled(node_id: NodeID) -> bool: async def mark_service_for_removal( self, node_uuid: NodeID, - can_save: Optional[bool], + can_save: bool | None, + *, skip_observation_recreation: bool = False, ) -> None: """Marks service for removal, causing RemoveMarkedService to trigger""" async with self._lock: if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] if service_name not in self._to_observe: return current: SchedulerData = self._to_observe[service_name] - current.dynamic_sidecar.service_removal_state.mark_to_remove(can_save) + + # if service is already being removed no need to force a cancellation and removal of the service + if current.dynamic_sidecar.service_removal_state.can_remove: + logger.debug( + "Service %s is already being removed, will not cancel observation", + node_uuid, + ) + return + + current.dynamic_sidecar.instrumentation.close_requested_at = ( + arrow.utcnow().datetime + ) + + # PC-> ANE: could you please review what to do when can_save=None + assert can_save is not None # nosec + current.dynamic_sidecar.service_removal_state.mark_to_remove( + can_save=can_save + ) await update_scheduler_data_label(current) # cancel current observation task if service_name in self._service_observation_task: - service_task: Optional[ - Union[asyncio.Task, object] - ] = self._service_observation_task[service_name] + service_task: None | asyncio.Task | object = ( + self._service_observation_task[service_name] + ) if isinstance(service_task, asyncio.Task): - service_task.cancel() - - async def _await_task(task: asyncio.Task) -> None: - await task - - with suppress(asyncio.CancelledError): - try: - await asyncio.wait_for( - _await_task(service_task), timeout=10 - ) - except asyncio.TimeoutError: - pass + await cancel_wait_task(service_task, max_delay=10) if skip_observation_recreation: return # recreate new observation - dynamic_sidecar_settings: DynamicSidecarSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) dynamic_scheduler: DynamicServicesSchedulerSettings = ( self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) self._service_observation_task[ service_name - ] = self.__create_observation_task( - dynamic_sidecar_settings, dynamic_scheduler, service_name - ) + ] = self.__create_observation_task(dynamic_scheduler, service_name) logger.debug("Service '%s' marked for removal from scheduler", service_name) + async def mark_all_services_in_wallet_for_removal( + self, wallet_id: WalletID + ) -> None: + async with self._lock: + to_remove: list[SchedulerData] = [ + scheduler_data + for scheduler_data in self._to_observe.values() + if ( + scheduler_data.wallet_info + and scheduler_data.wallet_info.wallet_id == wallet_id + ) + ] + + for scheduler_data in to_remove: + await self.mark_service_for_removal( + scheduler_data.node_uuid, + can_save=scheduler_data.dynamic_sidecar.service_removal_state.can_save, + ) + + async def is_service_awaiting_manual_intervention(self, node_uuid: NodeID) -> bool: + """returns True if services is waiting for manual intervention""" + return await _scheduler_utils.service_awaits_manual_interventions( + self.get_scheduler_data(node_uuid) + ) + async def remove_service_from_observation(self, node_uuid: NodeID) -> None: - # TODO: this is used internally no need to be here exposed in the interface """ directly invoked from RemoveMarkedService once it's finished and removes the service from the observation cycle """ async with self._lock: if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] if service_name not in self._to_observe: @@ -317,106 +429,49 @@ async def remove_service_from_observation(self, node_uuid: NodeID) -> None: logger.debug("Removed service '%s' from scheduler", service_name) async def get_stack_status(self, node_uuid: NodeID) -> RunningDynamicServiceDetails: - # pylint: disable=too-many-return-statements """ raises DynamicSidecarNotFoundError """ if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] scheduler_data: SchedulerData = self._to_observe[service_name] - - # check if there was an error picked up by the scheduler - # and marked this service as failed - if scheduler_data.dynamic_sidecar.status.current != DynamicSidecarStatus.OK: - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=ServiceState.FAILED, - service_message=scheduler_data.dynamic_sidecar.status.info, - ) - - # is the service stopping? - if scheduler_data.dynamic_sidecar.service_removal_state.can_remove: - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=ServiceState.STOPPING, - service_message=scheduler_data.dynamic_sidecar.status.info, - ) - - # the service should be either running or starting - try: - sidecar_state, sidecar_message = await get_dynamic_sidecar_state( - # the service_name is unique and will not collide with other names - # it can be used in place of the service_id here, as the docker API accepts both - service_id=scheduler_data.service_name - ) - except DockerServiceNotFoundError: - # in this case, the service is starting, so state is pending - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=ServiceState.PENDING, - service_message=scheduler_data.dynamic_sidecar.status.info, - ) - - # while the dynamic-sidecar state is not RUNNING report it's state - if sidecar_state != ServiceState.RUNNING: - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=sidecar_state, - service_message=sidecar_message, - ) - - # NOTE: This will be repeatedly called until the - # user services are effectively started - - # wait for containers to start - if len(scheduler_data.dynamic_sidecar.containers_inspect) == 0: - # marks status as waiting for containers - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=ServiceState.STARTING, - service_message="", - ) - - # compute composed containers states - container_state, container_message = extract_containers_minimum_statuses( - scheduler_data.dynamic_sidecar.containers_inspect - ) - return RunningDynamicServiceDetails.from_scheduler_data( - node_uuid=node_uuid, - scheduler_data=scheduler_data, - service_state=container_state, - service_message=container_message, + return await _scheduler_utils.get_stack_status_from_scheduler_data( + scheduler_data ) async def retrieve_service_inputs( - self, node_uuid: NodeID, port_keys: list[str] + self, node_uuid: NodeID, port_keys: list[ServicePortKey] ) -> RetrieveDataOutEnveloped: """Pulls data from input ports for the service""" if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) service_name = self._inverse_search_mapping[node_uuid] scheduler_data: SchedulerData = self._to_observe[service_name] - dynamic_sidecar_endpoint: AnyHttpUrl = scheduler_data.endpoint - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + dynamic_sidecar_endpoint = scheduler_data.endpoint + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_uuid) - transferred_bytes = await dynamic_sidecar_client.pull_service_input_ports( + started = time.time() + transferred_bytes = await sidecars_client.pull_service_input_ports( dynamic_sidecar_endpoint, port_keys ) + duration = time.time() - started + + if transferred_bytes and transferred_bytes > 0: + get_instrumentation( + self.app + ).dynamic_sidecar_metrics.input_ports_pull_rate.labels( + **get_metrics_labels(scheduler_data) + ).observe( + get_rate(transferred_bytes, duration) + ) if scheduler_data.restart_policy == RestartPolicy.ON_INPUTS_DOWNLOADED: logger.info("Will restart containers") - await dynamic_sidecar_client.restart_containers(dynamic_sidecar_endpoint) + await sidecars_client.restart_containers(dynamic_sidecar_endpoint) return RetrieveDataOutEnveloped.from_transferred_bytes(transferred_bytes) @@ -429,11 +484,9 @@ async def attach_project_network( service_name = self._inverse_search_mapping[node_id] scheduler_data = self._to_observe[service_name] - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_id) - await dynamic_sidecar_client.attach_service_containers_to_project_network( + await sidecars_client.attach_service_containers_to_project_network( dynamic_sidecar_endpoint=scheduler_data.endpoint, dynamic_sidecar_network_name=scheduler_data.dynamic_sidecar_network_name, project_network=project_network, @@ -450,11 +503,9 @@ async def detach_project_network( service_name = self._inverse_search_mapping[node_id] scheduler_data = self._to_observe[service_name] - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_id) - await dynamic_sidecar_client.detach_service_containers_from_project_network( + await sidecars_client.detach_service_containers_from_project_network( dynamic_sidecar_endpoint=scheduler_data.endpoint, project_network=project_network, project_id=scheduler_data.project_id, @@ -463,25 +514,29 @@ async def detach_project_network( async def restart_containers(self, node_uuid: NodeID) -> None: """Restarts containers without saving or restoring the state or I/O ports""" if node_uuid not in self._inverse_search_mapping: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) - service_name = self._inverse_search_mapping[node_uuid] + service_name: ServiceName = self._inverse_search_mapping[node_uuid] scheduler_data: SchedulerData = self._to_observe[service_name] - dynamic_sidecar_client: DynamicSidecarClient = get_dynamic_sidecar_client( - self.app - ) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_uuid) + + await sidecars_client.restart_containers(scheduler_data.endpoint) + + async def get_service_activity(self, node_id: NodeID) -> ActivityInfoOrNone: + service_name: ServiceName = self._inverse_search_mapping[node_id] + scheduler_data: SchedulerData = self._to_observe[service_name] - await dynamic_sidecar_client.restart_containers(scheduler_data.endpoint) + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_id) + return await sidecars_client.get_service_activity(scheduler_data.endpoint) def _enqueue_observation_from_service_name(self, service_name: str) -> None: self._trigger_observation_queue.put_nowait(service_name) def __create_observation_task( self, - dynamic_sidecar_settings: DynamicSidecarSettings, dynamic_scheduler: DynamicServicesSchedulerSettings, - service_name: str, + service_name: ServiceName, ) -> asyncio.Task: scheduler_data: SchedulerData = self._to_observe[service_name] observation_task = asyncio.create_task( @@ -489,7 +544,6 @@ def __create_observation_task( scheduler=self, service_name=service_name, scheduler_data=scheduler_data, - dynamic_sidecar_settings=dynamic_sidecar_settings, dynamic_scheduler=dynamic_scheduler, ), name=f"{__name__}.observe_{service_name}", @@ -505,14 +559,11 @@ def __create_observation_task( async def _run_trigger_observation_queue_task(self) -> None: """generates events at regular time interval""" - dynamic_sidecar_settings: DynamicSidecarSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) dynamic_scheduler: DynamicServicesSchedulerSettings = ( self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) - service_name: str + service_name: ServiceName while service_name := await self._trigger_observation_queue.get(): logger.info("Handling observation for %s", service_name) @@ -523,82 +574,29 @@ async def _run_trigger_observation_queue_task(self) -> None: continue if self._service_observation_task.get(service_name) is None: + logger.info("Create observation task for service %s", service_name) self._service_observation_task[ service_name - ] = self.__create_observation_task( - dynamic_sidecar_settings, dynamic_scheduler, service_name - ) + ] = self.__create_observation_task(dynamic_scheduler, service_name) logger.info("Scheduler 'trigger observation queue task' was shut down") async def _run_scheduler_task(self) -> None: - settings: DynamicServicesSchedulerSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER - ) - logger.debug( - "dynamic-sidecars observation interval %s", - settings.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS, - ) - - while self._keep_running: - logger.debug("Observing dynamic-sidecars %s", list(self._to_observe.keys())) - - try: - # prevent access to self._to_observe - async with self._lock: - for service_name in self._to_observe: - self._enqueue_observation_from_service_name(service_name) - except asyncio.CancelledError: # pragma: no cover - logger.info("Stopped dynamic scheduler") - raise - except Exception: # pylint: disable=broad-except - logger.exception( - "Unexpected error while scheduling sidecars observation" - ) - - await sleep(settings.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS) - self._observation_counter += 1 + logger.debug("Observing dynamic-sidecars %s", list(self._to_observe.keys())) - async def _discover_running_services(self) -> None: - """discover all services which were started before and add them to the scheduler""" - dynamic_sidecar_settings: DynamicSidecarSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) - services_to_observe: list[ - SchedulerData - ] = await get_dynamic_sidecars_to_observe(dynamic_sidecar_settings) - - logger.info( - "The following services need to be observed: %s", services_to_observe - ) - - for scheduler_data in services_to_observe: - await self._add_service(scheduler_data) + try: + # prevent access to self._to_observe + async with self._lock: + for service_name in self._to_observe: + self._enqueue_observation_from_service_name(service_name) + except Exception: # pylint: disable=broad-except + logger.exception("Unexpected error while scheduling sidecars observation") - async def _cleanup_volume_removal_services(self) -> None: - settings: DynamicServicesSchedulerSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER - ) - dynamic_sidecar_settings: DynamicSidecarSettings = ( - self.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) + self._observation_counter += 1 - logger.debug( - "dynamic-sidecars cleanup pending volume removal services every %s seconds", - settings.DIRECTOR_V2_DYNAMIC_SCHEDULER_PENDING_VOLUME_REMOVAL_INTERVAL_S, - ) - while await asyncio.sleep( - settings.DIRECTOR_V2_DYNAMIC_SCHEDULER_PENDING_VOLUME_REMOVAL_INTERVAL_S, - True, - ): - logger.debug("Removing pending volume removal services...") + async def free_reserved_disk_space(self, node_id: NodeID) -> None: + sidecars_client: SidecarsClient = await get_sidecars_client(self.app, node_id) + service_name = self._inverse_search_mapping[node_id] + scheduler_data: SchedulerData = self._to_observe[service_name] - try: - await remove_pending_volume_removal_services(dynamic_sidecar_settings) - except asyncio.CancelledError: - logger.info("Stopped pending volume removal services task") - raise - except Exception: # pylint: disable=broad-except - logger.exception( - "Unexpected error while cleaning up pending volume removal services" - ) + return await sidecars_client.free_reserved_disk_space(scheduler_data.endpoint) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_mixin.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_mixin.py deleted file mode 100644 index 21be6a591f8..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_mixin.py +++ /dev/null @@ -1,100 +0,0 @@ -import asyncio -import contextlib -import logging -from asyncio import Lock, Queue, Task -from dataclasses import dataclass, field -from typing import Optional, Union - -from fastapi import FastAPI -from models_library.projects_nodes_io import NodeID - -from .....models.schemas.dynamic_services import SchedulerData, ServiceName -from .._abc import SchedulerInternalsInterface - -logger = logging.getLogger(__name__) - - -@dataclass -class SchedulerInternalsMixin( # pylint: disable=too-many-instance-attributes - SchedulerInternalsInterface -): - app: FastAPI - - _lock: Lock = field(default_factory=Lock) - _to_observe: dict[ServiceName, SchedulerData] = field(default_factory=dict) - _service_observation_task: dict[ - ServiceName, Optional[Union[asyncio.Task, object]] - ] = field(default_factory=dict) - _keep_running: bool = False - _inverse_search_mapping: dict[NodeID, str] = field(default_factory=dict) - _scheduler_task: Optional[Task] = None - _cleanup_volume_removal_services_task: Optional[Task] = None - _trigger_observation_queue_task: Optional[Task] = None - _trigger_observation_queue: Queue = field(default_factory=Queue) - _observation_counter: int = 0 - - async def start(self) -> None: - # run as a background task - logger.info("Starting dynamic-sidecar scheduler") - self._keep_running = True - self._scheduler_task = asyncio.create_task( - self._run_scheduler_task(), name="dynamic-scheduler" - ) - self._trigger_observation_queue_task = asyncio.create_task( - self._run_trigger_observation_queue_task(), - name="dynamic-scheduler-trigger-obs-queue", - ) - - self._cleanup_volume_removal_services_task = asyncio.create_task( - self._cleanup_volume_removal_services(), - name="dynamic-scheduler-cleanup-volume-removal-services", - ) - await self._discover_running_services() - - async def shutdown(self): - logger.info("Shutting down dynamic-sidecar scheduler") - self._keep_running = False - self._inverse_search_mapping = {} - self._to_observe = {} - - if self._cleanup_volume_removal_services_task is not None: - self._cleanup_volume_removal_services_task.cancel() - with contextlib.suppress(asyncio.CancelledError): - await self._cleanup_volume_removal_services_task - self._cleanup_volume_removal_services_task = None - - if self._scheduler_task is not None: - self._scheduler_task.cancel() - with contextlib.suppress(asyncio.CancelledError): - await self._scheduler_task - self._scheduler_task = None - - if self._trigger_observation_queue_task is not None: - await self._trigger_observation_queue.put(None) - - self._trigger_observation_queue_task.cancel() - with contextlib.suppress(asyncio.CancelledError): - await self._trigger_observation_queue_task - self._trigger_observation_queue_task = None - self._trigger_observation_queue = Queue() - - # let's properly cleanup remaining observation tasks - running_tasks = self._service_observation_task.values() - for task in running_tasks: - task.cancel() - try: - MAX_WAIT_TIME_SECONDS = 5 - results = await asyncio.wait_for( - asyncio.gather(*running_tasks, return_exceptions=True), - timeout=MAX_WAIT_TIME_SECONDS, - ) - if bad_results := list(filter(lambda r: isinstance(r, Exception), results)): - logger.error( - "Following observation tasks completed with an unexpected error:%s", - f"{bad_results}", - ) - except asyncio.TimeoutError: - logger.error( - "Timed-out waiting for %s to complete. Action: Check why this is blocking", - f"{running_tasks=}", - ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_utils.py new file mode 100644 index 00000000000..5a4a011a874 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core/_scheduler_utils.py @@ -0,0 +1,183 @@ +import logging +from typing import Final + +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.projects_nodes_io import NodeID +from models_library.services_enums import ServiceBootType, ServiceState +from servicelib.fastapi.long_running_tasks.client import ProgressCallback + +from .....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from .....models.dynamic_services_scheduler import DynamicSidecarStatus, SchedulerData +from ...api_client import SidecarsClient, get_sidecars_client +from ...docker_api import get_dynamic_sidecar_state, get_dynamic_sidecars_to_observe +from ...docker_states import extract_containers_minimum_statuses +from ...errors import DockerServiceNotFoundError +from ._events_utils import service_push_outputs + +_logger = logging.getLogger(__name__) + +# NOTE: take care in changing this message, part of it is used by +# graylog and it will break the notifications +LOG_MSG_MANUAL_INTERVENTION: Final[str] = "Service waiting for manual intervention" + + +async def push_service_outputs( + app: FastAPI, + node_uuid: NodeID, + progress_callback: ProgressCallback | None = None, +) -> None: + sidecars_client: SidecarsClient = await get_sidecars_client(app, node_uuid) + await service_push_outputs( + app=app, + node_uuid=node_uuid, + sidecars_client=sidecars_client, + progress_callback=progress_callback, + ) + + +async def service_awaits_manual_interventions(scheduler_data: SchedulerData) -> bool: + service_awaits_intervention = ( + scheduler_data.dynamic_sidecar.status.current == DynamicSidecarStatus.FAILING + and scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error + is True + ) + if ( + service_awaits_intervention + and not scheduler_data.dynamic_sidecar.wait_for_manual_intervention_logged + ): + scheduler_data.dynamic_sidecar.wait_for_manual_intervention_logged = True + _logger.warning(" %s %s", LOG_MSG_MANUAL_INTERVENTION, scheduler_data.node_uuid) + return service_awaits_intervention + + +async def discover_running_services(scheduler: "Scheduler") -> None: # type: ignore # noqa: F821 + """discover all services which were started before and add them to the scheduler""" + settings: DynamicServicesSchedulerSettings = ( + scheduler.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER + ) + services_to_observe: list[SchedulerData] = await get_dynamic_sidecars_to_observe( + settings.SWARM_STACK_NAME + ) + + _logger.info("The following services need to be observed: %s", services_to_observe) + + for scheduler_data in services_to_observe: + await scheduler.add_service_from_scheduler_data(scheduler_data) + + +def create_model_from_scheduler_data( + node_uuid: NodeID, + scheduler_data: SchedulerData, + service_state: ServiceState, + service_message: str, +) -> RunningDynamicServiceDetails: + return RunningDynamicServiceDetails.model_validate( + { + "boot_type": ServiceBootType.V2, + "user_id": scheduler_data.user_id, + "project_id": scheduler_data.project_id, + "service_uuid": node_uuid, + "service_key": scheduler_data.key, + "service_version": scheduler_data.version, + "service_host": scheduler_data.service_name, + "service_port": scheduler_data.service_port, + "service_state": service_state.value, + "service_message": service_message, + } + ) + + +async def get_stack_status_from_scheduler_data( + scheduler_data: SchedulerData, +) -> RunningDynamicServiceDetails: + # pylint: disable=too-many-return-statements + + # check if there was an error picked up by the scheduler + # and marked this service as failed + if scheduler_data.dynamic_sidecar.status.current != DynamicSidecarStatus.OK: + _logger.debug("sidecar issue: sidecar_data=%s", scheduler_data.dynamic_sidecar) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=ServiceState.FAILED, + service_message=scheduler_data.dynamic_sidecar.status.info, + ) + + # is the service stopping? + if scheduler_data.dynamic_sidecar.service_removal_state.can_remove: + _logger.debug( + "stopping service sidecar_data=%s", scheduler_data.dynamic_sidecar + ) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=ServiceState.STOPPING, + service_message=scheduler_data.dynamic_sidecar.status.info, + ) + + # the service should be either running or starting + try: + sidecar_state, sidecar_message = await get_dynamic_sidecar_state( + # the service_name is unique and will not collide with other names + # it can be used in place of the service_id here, as the docker API accepts both + service_id=scheduler_data.service_name + ) + except DockerServiceNotFoundError: + # in this case, the service is starting, so state is pending + _logger.debug( + "docker service not found sidecar_data=%s", scheduler_data.dynamic_sidecar + ) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=ServiceState.PENDING, + service_message=scheduler_data.dynamic_sidecar.status.info, + ) + + # while the dynamic-sidecar state is not RUNNING report it's state + if sidecar_state != ServiceState.RUNNING: + _logger.debug( + "sidecar NOT running sidecar_data=%s, state=%s, message=%s", + scheduler_data.dynamic_sidecar, + sidecar_state, + sidecar_message, + ) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=sidecar_state, + service_message=sidecar_message, + ) + + # NOTE: This will be repeatedly called until the + # user services are effectively started + + # wait for containers to start + if len(scheduler_data.dynamic_sidecar.containers_inspect) == 0: + # marks status as waiting for containers + _logger.debug( + "waiting for containers sidecar_data=%s", scheduler_data.dynamic_sidecar + ) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=ServiceState.STARTING, + service_message="", + ) + + # compute composed containers states + container_state, container_message = extract_containers_minimum_statuses( + scheduler_data.dynamic_sidecar.containers_inspect + ) + _logger.debug("status at runtime sidecar_data=%s", scheduler_data.dynamic_sidecar) + return create_model_from_scheduler_data( + node_uuid=scheduler_data.node_uuid, + scheduler_data=scheduler_data, + service_state=container_state, + service_message=container_message, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/__init__.py deleted file mode 100644 index 87ac499569e..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from ._action import Action -from ._context_in_memory import InMemoryContext -from ._marker import mark_step -from ._models import ExceptionInfo -from ._workflow import Workflow -from ._workflow_context import WorkflowContext -from ._workflow_runner_manager import WorkflowRunnerManager - -__all__: tuple[str, ...] = ( - "Action", - "ExceptionInfo", - "InMemoryContext", - "mark_step", - "Workflow", - "WorkflowContext", - "WorkflowRunnerManager", -) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_action.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_action.py deleted file mode 100644 index 60f8a2b2c54..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_action.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Any, Callable, Optional - -from pydantic import BaseModel, Field, validator - -from ._models import ActionName - - -class Action(BaseModel): - """ - A sequence of steps (functions) - """ - - name: ActionName - steps: list[Callable] = Field( - ..., - description=( - "list awaitables marked as steps, the order in this list " - "is the order in which steps will be executed" - ), - ) - - next_action: Optional[ActionName] = Field( - ..., - description="optional, name of the action to run after this one", - ) - on_error_action: Optional[ActionName] = Field( - ..., - description="optional, name of the action to run after this one raises an unexpected error", - ) - - @property - def steps_names(self) -> list[str]: - return [x.__name__ for x in self.steps] - - @validator("steps") - @classmethod - def ensure_all_marked_as_step(cls, steps): - for step in steps: - for attr_name in ("input_types", "return_type"): - if not hasattr(step, attr_name): - raise ValueError( - f"Event handler {step.__name__} should expose `{attr_name}` " - "attribute. Was it decorated with @mark_step?" - ) - if type(getattr(step, "input_types")) != dict: - raise ValueError( - f"`{step.__name__}.input_types` should be of type {dict}" - ) - if getattr(step, "return_type") != dict[str, Any]: - raise ValueError( - f"`{step.__name__}.return_type` should be of type {dict[str, Any]}" - ) - return steps diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_base.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_base.py deleted file mode 100644 index 755c6b4dd07..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_base.py +++ /dev/null @@ -1,80 +0,0 @@ -from abc import ABC, abstractmethod -from enum import Enum, auto -from typing import Any, Optional - - -class ReservedContextKeys(str, Enum): - def _generate_next_value_(self, *_: Any) -> str: # pylint:disable=arguments-differ - return self.lower() - - APP = auto() - - WORKFLOW_NAME = auto() - WORKFLOW_ACTION_NAME = auto() - WORKFLOW_CURRENT_STEP_NAME = auto() - WORKFLOW_CURRENT_STEP_INDEX = auto() - - UNEXPECTED_RUNTIME_EXCEPTION = auto() - - @classmethod - def is_reserved(cls, key: str) -> bool: - return key.upper() in cls.__members__ - - @classmethod - def is_stored_locally(cls, key: str) -> bool: - return key in _STORED_LOCALLY - - -_STORED_LOCALLY: set[str] = { - f"{ReservedContextKeys.APP}", -} - - -class _ContextIOInterface(ABC): - """ - Used to save/load the context in bulk. - Useful for those types of stores which are not capable of guaranteeing - data persistance between reboots. (eg: in memory implementation) - Should become obsolete in the future if something like a Redis based - store will be used. - """ - - @abstractmethod - async def to_dict(self) -> dict[str, Any]: - """returns the context of a store as a dictionary""" - - @abstractmethod - async def update(self, incoming: dict[str, Any]) -> None: - """stores data from incoming deserialized data""" - - -class _ContextStorageInterface(ABC): - """ - Base interface for saving and loading data from a store. - """ - - @abstractmethod - async def save(self, key: str, value: Any) -> None: - """saves value to sore""" - - @abstractmethod - async def load(self, key: str) -> Optional[Any]: - """load value from store""" - - @abstractmethod - async def has_key(self, key: str) -> bool: - """is True if key is in store""" - - @abstractmethod - async def setup(self) -> None: - """run storage specific initializers""" - - @abstractmethod - async def teardown(self) -> None: - """run storage specific halt and cleanup""" - - -class ContextInterface(_ContextStorageInterface, _ContextIOInterface): - """ - This should be inherited when defining a new type of Context. - """ diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_in_memory.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_in_memory.py deleted file mode 100644 index 92cdc735607..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_context_in_memory.py +++ /dev/null @@ -1,36 +0,0 @@ -from copy import deepcopy -from typing import Any, Optional - -from ._context_base import ContextInterface - - -class InMemoryContext(ContextInterface): - """ - Very simple context to keep track of data. - NOTE: Does not support data persistance. Requires - external system to back it up. - """ - - def __init__(self) -> None: - self._context: dict[str, Any] = {} - - async def save(self, key: str, value: Any) -> None: - self._context[key] = value - - async def load(self, key: str) -> Optional[Any]: - return self._context[key] - - async def has_key(self, key: str) -> bool: - return key in self._context - - async def to_dict(self) -> dict[str, Any]: - return deepcopy(self._context) - - async def update(self, incoming: dict[str, Any]) -> None: - self._context.update(incoming) - - async def setup(self) -> None: - """nothing to do here""" - - async def teardown(self) -> None: - """nothing to do here""" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_errors.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_errors.py deleted file mode 100644 index cc36e6f2bcf..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_errors.py +++ /dev/null @@ -1,99 +0,0 @@ -from pydantic.errors import PydanticErrorMixin - - -class BaseSchedulerException(PydanticErrorMixin, RuntimeError): - """base for all exceptions here""" - - -class BaseContextException(BaseSchedulerException): - """use as base for all context related errors""" - - -class NotInContextError(BaseContextException): - code = "dynamic_sidecar.scheduler.v2.not_in_context" - msg_template = "Could not find a variable named '{key}' in context: {context}" - - -class SetTypeMismatchError(BaseContextException): - code = "dynamic_sidecar.scheduler.v2.set_type_mismatch" - msg_template = ( - "Found a variable named '{key}' of type='{existing_type}' and value=" - "'{existing_value}'. Trying to set to type='{new_type}' and value=" - "'{new_value}'" - ) - - -class GetTypeMismatchError(BaseContextException): - code = "dynamic_sidecar.scheduler.v2.get_type_mismatch" - msg_template = ( - "Found a variable named '{key}' of type='{existing_type}' and value=" - "'{existing_value}'. Expecting type='{expected_type}'" - ) - - -class NotAllowedContextKeyError(BaseContextException): - code = "dynamic_sidecar.scheduler.v2.key_not_allowed" - msg_template = ( - "Provided key='{key}' is reserved for internal usage, " - "please try using a different one." - ) - - -class UnexpectedStepReturnTypeError(BaseSchedulerException): - code = "dynamic_sidecar.scheduler.v2.unexpected_step_return_type" - msg_template = "Step should always return `dict[str, Any]`, returning: {type}" - - -class BaseWorkflowException(BaseSchedulerException): - """use as base for all workflow related errors""" - - -class WorkflowAlreadyExistingException(BaseWorkflowException): - code = "dynamic_sidecar.scheduler.v2.workflow_already_exists" - msg_template = "Another workflow named '{workflow_name}' already exists" - - -class WorkflowNotFoundException(BaseWorkflowException): - code = "dynamic_sidecar.scheduler.v2.workflow_not_found" - msg_template = "Workflow '{workflow_name}' not found" - - -class WorkflowNotInitializedException(BaseWorkflowException): - code = "dynamic_sidecar.scheduler.v2.workflow_not_initialized_found" - msg_template = "Workflow '{workflow_name}' has to be initialized before using this." - - -class InvalidSerializedContextException(BaseWorkflowException): - code = "dynamic_sidecar.scheduler.v2.invalid_serialized_context_for_workflow" - msg_template = ( - "Trying to resume workflow '{workflow_name}' from an invalid " - "context '{serialized_context}'" - ) - - -class BaseActionException(BaseSchedulerException): - """use as base for all action related errors""" - - -class ActionNotRegisteredException(BaseActionException): - code = "dynamic_sidecar.scheduler.v2.action_not_registered" - msg_template = ( - "Trying to start action '{action_name}' but these are the only" - "available actions {workflow}" - ) - - -class OnErrorActionNotInWorkflowException(BaseActionException): - code = "dynamic_sidecar.scheduler.v2.on_error_action_not_in_workflow" - msg_template = ( - "Action '{action_name}' defines an on_error_action '{on_error_action}'" - "that is not in the present in the workflow {workflow}" - ) - - -class NextActionNotInWorkflowException(BaseActionException): - code = "dynamic_sidecar.scheduler.v2.next_action_not_in_workflow" - msg_template = ( - "Action '{action_name}' defines an next_action '{next_action}'" - "that is not in the present in the workflow {workflow}" - ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_marker.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_marker.py deleted file mode 100644 index 1ba68be6e58..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_marker.py +++ /dev/null @@ -1,31 +0,0 @@ -import inspect -from functools import wraps -from typing import Any, Callable - -from ._errors import UnexpectedStepReturnTypeError - - -def mark_step(func: Callable) -> Callable: - """ - Decorate a coroutine as an step. - Return type must always be of type `dict[str, Any]` - Stores input types in `.input_types` and return type - in `.return_type` for later usage. - """ - - func_annotations = inspect.getfullargspec(func).annotations - - # ensure output type is correct, only support sone - return_type = func_annotations.pop("return", None) - if return_type != dict[str, Any]: - raise UnexpectedStepReturnTypeError(type=return_type) - - @wraps(func) - async def wrapped(*args, **kwargs) -> Any: - return await func(*args, **kwargs) - - # store input and return types for later usage - wrapped.return_type = return_type - wrapped.input_types = func_annotations - - return wrapped diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_models.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_models.py deleted file mode 100644 index 3bfed093260..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_models.py +++ /dev/null @@ -1,12 +0,0 @@ -from pydantic import BaseModel - -StepName = str -ActionName = str -WorkflowName = str - - -class ExceptionInfo(BaseModel): - exception_class: type - action_name: ActionName - step_name: StepName - serialized_traceback: str diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow.py deleted file mode 100644 index d147e1cc531..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow.py +++ /dev/null @@ -1,46 +0,0 @@ -from itertools import chain - -from ._action import Action -from ._errors import ( - NextActionNotInWorkflowException, - OnErrorActionNotInWorkflowException, -) -from ._models import ActionName - - -class Workflow: - """ - contains `Action` entries which define links to `next_action` and `on_error_action` - Links must exist, otherwise an error is raised upon creation. - """ - - def __init__(self, *actions: Action) -> None: - self._registry: dict[ActionName, Action] = {s.name: s for s in actions} - for action in actions: - if ( - action.on_error_action is not None - and action.on_error_action not in self._registry - ): - raise OnErrorActionNotInWorkflowException( - action_name=action.name, - on_error_action=action.on_error_action, - workflow=self._registry, - ) - if ( - action.next_action is not None - and action.next_action not in self._registry - ): - raise NextActionNotInWorkflowException( - action_name=action.name, - next_action=action.next_action, - workflow=self._registry, - ) - - def __contains__(self, item: ActionName) -> bool: - return item in self._registry - - def __getitem__(self, key: ActionName) -> Action: - return self._registry[key] - - def __add__(self, other: "Workflow") -> "Workflow": - return Workflow(*(chain(self._registry.values(), other._registry.values()))) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_context.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_context.py deleted file mode 100644 index 30edadf427a..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_context.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import Any, Optional - -from fastapi import FastAPI - -from ._context_base import ContextInterface, ReservedContextKeys -from ._errors import ( - GetTypeMismatchError, - NotAllowedContextKeyError, - NotInContextError, - SetTypeMismatchError, -) -from ._models import ActionName, WorkflowName - - -def _ensure_type_matches(key: str, existing_value: Any, value: Any) -> None: - # if a value previously existed, ensure it has a compatible type - existing_type = type(existing_value) - value_type = type(value) - if not isinstance(existing_value, value_type): - raise SetTypeMismatchError( - key=key, - existing_value=existing_value, - existing_type=existing_type, - new_value=value, - new_type=value_type, - ) - - -class WorkflowContext: - """ - Data container responsible for keeping track of the state of a workflow. - """ - - def __init__( - self, - context: ContextInterface, - app: FastAPI, - workflow_name: WorkflowName, - action_name: ActionName, - ) -> None: - self._context = context - self._app = app - self._workflow_name = workflow_name - self._action_name = action_name - - self._local_storage: dict[str, Any] = {} - - async def set(self, key: str, value: Any, *, set_reserved: bool = False) -> None: - """ - Stores a value. - NOTE: the type of the value is deduced the first time this was set. - """ - if ReservedContextKeys.is_reserved(key) and not set_reserved: - raise NotAllowedContextKeyError(key=key) - - if ReservedContextKeys.is_stored_locally(key): - if key in self._local_storage: - _ensure_type_matches( - key=key, existing_value=self._local_storage[key], value=value - ) - self._local_storage[key] = value - else: - if await self._context.has_key(key): - _ensure_type_matches( - key=key, existing_value=await self._context.load(key), value=value - ) - await self._context.save(key, value) - - async def get(self, key: str, expected_type: type) -> Optional[Any]: - """ - Loads a value. Raises an error if value is missing. - """ - if not ReservedContextKeys.is_stored_locally( - key - ) and not await self._context.has_key(key): - raise NotInContextError(key=key, context=await self._context.to_dict()) - - existing_value = ( - self._local_storage[key] - if ReservedContextKeys.is_stored_locally(key) - else await self._context.load(key) - ) - exiting_type = type(existing_value) - if not isinstance(existing_value, expected_type): - raise GetTypeMismatchError( - key=key, - existing_value=existing_value, - existing_type=exiting_type, - expected_type=expected_type, - ) - return existing_value - - async def get_serialized_context(self) -> dict[str, Any]: - return await self._context.to_dict() - - async def import_from_serialized_context( - self, serialized_context: dict[str, Any] - ) -> None: - await self._context.update(serialized_context) - - async def setup(self) -> None: - # adding app to context - await self.set(key=ReservedContextKeys.APP, value=self._app, set_reserved=True) - await self.set( - key=ReservedContextKeys.WORKFLOW_NAME, - value=self._workflow_name, - set_reserved=True, - ) - await self.set( - key=ReservedContextKeys.WORKFLOW_ACTION_NAME, - value=self._action_name, - set_reserved=True, - ) - await self.set( - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX, 0, set_reserved=True - ) - - async def teardown(self) -> None: - """no code required here""" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner.py deleted file mode 100644 index 306b34a2943..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner.py +++ /dev/null @@ -1,145 +0,0 @@ -import logging -import traceback -from typing import Any, Awaitable, Callable, Iterable, Optional - -from pydantic import NonNegativeInt -from servicelib.utils import logged_gather - -from ._action import Action -from ._context_base import ReservedContextKeys -from ._errors import NotInContextError -from ._models import ActionName, ExceptionInfo, StepName, WorkflowName -from ._workflow import Workflow -from ._workflow_context import WorkflowContext - -logger = logging.getLogger(__name__) - - -def _iter_index_step( - iterable: Iterable[Callable], *, index: NonNegativeInt = 0 -) -> Iterable[tuple[NonNegativeInt, Callable]]: - for i, value in enumerate(iterable): - if i >= index: - yield i, value - - -async def workflow_runner( - workflow: Workflow, - workflow_context: WorkflowContext, - *, - before_step_hook: Optional[ - Callable[[ActionName, StepName], Awaitable[None]] - ] = None, - after_step_hook: Optional[Callable[[ActionName, StepName], Awaitable[None]]] = None, -) -> None: - """ - Given a `Workflow` and a `WorkflowContext`: - - runs from a given starting action - - recovers from an already initialized `WorkflowContext` - """ - - action_name: ActionName = await workflow_context.get( - ReservedContextKeys.WORKFLOW_ACTION_NAME, ActionName - ) - action: Optional[Action] = workflow[action_name] - - start_from_index: int = 0 - try: - start_from_index = await workflow_context.get( - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX, int - ) - except NotInContextError: - pass - - while action is not None: - action_name = action.name - await workflow_context.set( - ReservedContextKeys.WORKFLOW_ACTION_NAME, action_name, set_reserved=True - ) - logger.debug("Running action='%s', step=%s", action_name, action.steps_names) - try: - for index, step in _iter_index_step(action.steps, index=start_from_index): - step_name = step.__name__ - - if before_step_hook: - await before_step_hook(action_name, step_name) - - # fetching inputs from context - inputs: dict[str, Any] = {} - if step.input_types: - get_inputs_results = await logged_gather( - *[ - workflow_context.get(var_name, var_type) - for var_name, var_type in step.input_types.items() - ] - ) - inputs = dict(zip(step.input_types, get_inputs_results)) - logger.debug("step='%s' inputs=%s", step_name, inputs) - - # running event handler - await workflow_context.set( - ReservedContextKeys.WORKFLOW_CURRENT_STEP_NAME, - step_name, - set_reserved=True, - ) - await workflow_context.set( - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX, - index, - set_reserved=True, - ) - result = await step(**inputs) - - # saving outputs to context - logger.debug("step='%s' result=%s", step_name, result) - await logged_gather( - *[ - workflow_context.set(key=var_name, value=var_value) - for var_name, var_value in result.items() - ] - ) - - if after_step_hook: - await after_step_hook(action_name, step_name) - except Exception as e: # pylint: disable=broad-except - logger.exception( - "Unexpected exception, deferring handling to action='%s'", - action.on_error_action, - ) - - if action.on_error_action is None: - # NOTE: since there is no state that takes care of the error - # just raise it here and halt the task - logger.error( - "workflow_context=%s", - await workflow_context.get_serialized_context(), - ) - raise - - # Storing exception to be possibly handled by the error state - exception_info = ExceptionInfo( - exception_class=e.__class__, - action_name=await workflow_context.get( - ReservedContextKeys.WORKFLOW_ACTION_NAME, WorkflowName - ), - step_name=await workflow_context.get( - ReservedContextKeys.WORKFLOW_CURRENT_STEP_NAME, ActionName - ), - serialized_traceback=traceback.format_exc(), - ) - await workflow_context.set( - ReservedContextKeys.UNEXPECTED_RUNTIME_EXCEPTION, - exception_info, - set_reserved=True, - ) - - action = ( - None - if action.on_error_action is None - else workflow[action.on_error_action] - ) - else: - action = ( - None if action.next_action is None else workflow[action.next_action] - ) - finally: - start_from_index = 0 diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner_manager.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner_manager.py deleted file mode 100644 index e43352d76d9..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_core2/_workflow_runner_manager.py +++ /dev/null @@ -1,192 +0,0 @@ -import asyncio -import logging -from asyncio import Task -from contextlib import suppress -from functools import partial -from typing import Any, Awaitable, Callable, Optional - -from fastapi import FastAPI - -from ._context_base import ContextInterface, ReservedContextKeys -from ._errors import ( - ActionNotRegisteredException, - InvalidSerializedContextException, - WorkflowAlreadyExistingException, - WorkflowNotFoundException, - WorkflowNotInitializedException, -) -from ._models import ActionName, StepName, WorkflowName -from ._workflow import Workflow -from ._workflow_context import WorkflowContext -from ._workflow_runner import workflow_runner - -logger = logging.getLogger(__name__) - - -async def _cancel_task(task: Optional[Task]) -> None: - if task is None: - return - - async def _await_task(task: Task) -> None: - await task - - task.cancel() - with suppress(asyncio.CancelledError): - try: - await asyncio.wait_for(_await_task(task), timeout=10) - except asyncio.TimeoutError: - logger.warning( - "Timed out while awaiting for cancellation of '%s'", task.get_name() - ) - - -class WorkflowRunnerManager: - """ - Keeps track of `workflow_runner` tasks and is responsible for: - starting, stopping and cancelling them. - """ - - def __init__( - self, - context_factory: Awaitable[ContextInterface], - app: FastAPI, - workflow: Workflow, - *, - before_step_hook: Optional[ - Callable[[ActionName, StepName], Awaitable[None]] - ] = None, - after_step_hook: Optional[ - Callable[[ActionName, StepName], Awaitable[None]] - ] = None, - ) -> None: - self.context_factory = context_factory - self.app = app - self.workflow = workflow - self.before_step_hook = before_step_hook - self.after_step_hook = after_step_hook - - self._workflow_tasks: dict[WorkflowName, Task] = {} - self._workflow_context: dict[WorkflowName, WorkflowContext] = {} - self._workflow_context_shutdown_tasks: dict[WorkflowName, Task] = {} - - def _add_workflow_runner_task( - self, workflow_runner_awaitable: Awaitable, workflow_name: WorkflowName - ) -> None: - workflow_task = self._workflow_tasks[workflow_name] = asyncio.create_task( - workflow_runner_awaitable, name=f"workflow_task_{workflow_name}" - ) - - def workflow_runner_complete(_: Task) -> None: - self._workflow_tasks.pop(workflow_name, None) - workflow_context: Optional[WorkflowContext] = self._workflow_context.pop( - workflow_name, None - ) - if workflow_context: - # shutting down context resolver and ensure task will not be pending - task = self._workflow_context_shutdown_tasks[ - workflow_name - ] = asyncio.create_task(workflow_context.teardown()) - task.add_done_callback( - partial( - lambda s, _: self._workflow_context_shutdown_tasks.pop(s, None), - workflow_name, - ) - ) - - # remove when task is done - workflow_task.add_done_callback(workflow_runner_complete) - - async def initialize_workflow_runner( - self, workflow_name: WorkflowName, action_name: ActionName - ) -> None: - """initializes a new workflow with a unique name""" - - if workflow_name in self._workflow_context: - raise WorkflowAlreadyExistingException(workflow_name=workflow_name) - if action_name not in self.workflow: - raise ActionNotRegisteredException( - action_name=action_name, workflow=self.workflow - ) - - self._workflow_context[workflow_name] = workflow_context = WorkflowContext( - context=await self.context_factory(), - app=self.app, - workflow_name=workflow_name, - action_name=action_name, - ) - await workflow_context.setup() - - def get_workflow_context(self, workflow_name: WorkflowName) -> WorkflowContext: - if workflow_name not in self._workflow_context: - raise WorkflowNotInitializedException(workflow_name=workflow_name) - - return self._workflow_context[workflow_name] - - async def start_workflow_runner(self, workflow_name: WorkflowName) -> None: - """starts an initialized workflow""" - if workflow_name not in self._workflow_context: - raise WorkflowNotInitializedException(workflow_name=workflow_name) - - workflow_runner_awaitable: Awaitable = workflow_runner( - workflow=self.workflow, - workflow_context=self._workflow_context[workflow_name], - before_step_hook=self.before_step_hook, - after_step_hook=self.after_step_hook, - ) - - self._add_workflow_runner_task(workflow_runner_awaitable, workflow_name) - - async def resume_workflow_runner( - self, workflow_name: WorkflowName, serialized_context: dict[str, Any] - ) -> None: - if workflow_name not in self._workflow_context: - raise WorkflowNotInitializedException(workflow_name=workflow_name) - if ( - ReservedContextKeys.WORKFLOW_NAME not in serialized_context - and ReservedContextKeys.WORKFLOW_ACTION_NAME not in serialized_context - and serialized_context.get(ReservedContextKeys.WORKFLOW_NAME) - != workflow_name - ): - raise InvalidSerializedContextException( - workflow_name=workflow_name, serialized_context=serialized_context - ) - - await self._workflow_context[workflow_name].import_from_serialized_context( - serialized_context - ) - await self.start_workflow_runner(workflow_name) - - async def wait_workflow_runner(self, workflow_name: WorkflowName) -> None: - """waits for action workflow task to finish""" - if workflow_name not in self._workflow_tasks: - raise WorkflowNotFoundException(workflow_name=workflow_name) - - workflow_task = self._workflow_tasks[workflow_name] - await workflow_task - - async def cancel_and_wait_workflow_runner( - self, workflow_name: WorkflowName - ) -> None: - """cancels current action workflow Task""" - if workflow_name not in self._workflow_tasks: - raise WorkflowNotFoundException(workflow_name=workflow_name) - - task = self._workflow_tasks[workflow_name] - await _cancel_task(task) - - async def teardown(self) -> None: - # NOTE: content can change while iterating - for key in set(self._workflow_context.keys()): - workflow_context: Optional[WorkflowContext] = self._workflow_context.get( - key, None - ) - if workflow_context: - await workflow_context.teardown() - - # NOTE: content can change while iterating - for key in set(self._workflow_context_shutdown_tasks.keys()): - task: Optional[Task] = self._workflow_context_shutdown_tasks.get(key, None) - await _cancel_task(task) - - async def setup(self) -> None: - """no code required""" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_task.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_task.py index 103b9dd60e0..e712958a32a 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_task.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_task.py @@ -1,22 +1,28 @@ import logging -from typing import Optional from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceCreate, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.api_schemas_dynamic_sidecar.containers import ActivityInfoOrNone from models_library.basic_types import PortInt from models_library.projects import ProjectID from models_library.projects_networks import DockerNetworkAlias from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services_types import ServicePortKey from models_library.users import UserID +from models_library.wallets import WalletID from servicelib.fastapi.long_running_tasks.client import ProgressCallback from servicelib.fastapi.long_running_tasks.server import TaskProgress -from ....core.settings import DynamicServicesSchedulerSettings -from ....models.domains.dynamic_services import ( - DynamicServiceCreate, - RetrieveDataOutEnveloped, +from ....core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, ) -from ....models.schemas.dynamic_services import RunningDynamicServiceDetails from ._abc import SchedulerInternalsInterface, SchedulerPublicInterface from ._core._scheduler import Scheduler @@ -28,40 +34,40 @@ class DynamicSidecarsScheduler(SchedulerInternalsInterface, SchedulerPublicInter def __init__(self, app: FastAPI) -> None: self.app: FastAPI = app - self._scheduler = Scheduler(app=app) + self.scheduler = Scheduler(app=app) async def start(self) -> None: - return await self._scheduler.start() + return await self.scheduler.start() async def shutdown(self): - return await self._scheduler.shutdown() + return await self.scheduler.shutdown() - def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool: - return self._scheduler.toggle_observation(node_uuid, disable) + def toggle_observation(self, node_uuid: NodeID, *, disable: bool) -> bool: + return self.scheduler.toggle_observation(node_uuid, disable=disable) async def push_service_outputs( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: - return await self._scheduler.push_service_outputs(node_uuid, progress_callback) + return await self.scheduler.push_service_outputs(node_uuid, progress_callback) async def remove_service_containers( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: - return await self._scheduler.remove_service_containers( + return await self.scheduler.remove_service_containers( node_uuid, progress_callback ) async def remove_service_sidecar_proxy_docker_networks_and_volumes( self, task_progress: TaskProgress, node_uuid: NodeID ) -> None: - return await self._scheduler.remove_service_sidecar_proxy_docker_networks_and_volumes( + return await self.scheduler.remove_service_sidecar_proxy_docker_networks_and_volumes( task_progress, node_uuid ) async def save_service_state( - self, node_uuid: NodeID, progress_callback: Optional[ProgressCallback] = None + self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None ) -> None: - return await self._scheduler.save_service_state(node_uuid, progress_callback) + return await self.scheduler.save_service_state(node_uuid, progress_callback) async def add_service( self, @@ -71,62 +77,77 @@ async def add_service( request_dns: str, request_scheme: str, request_simcore_user_agent: str, + *, + can_save: bool, ) -> None: - return await self._scheduler.add_service( - service, - simcore_service_labels, - port, - request_dns, - request_scheme, - request_simcore_user_agent, + return await self.scheduler.add_service( + service=service, + simcore_service_labels=simcore_service_labels, + port=port, + request_dns=request_dns, + request_scheme=request_scheme, + request_simcore_user_agent=request_simcore_user_agent, + can_save=can_save, ) def is_service_tracked(self, node_uuid: NodeID) -> bool: - return self._scheduler.is_service_tracked(node_uuid) + return self.scheduler.is_service_tracked(node_uuid) def list_services( - self, - *, - user_id: Optional[UserID] = None, - project_id: Optional[ProjectID] = None + self, *, user_id: UserID | None = None, project_id: ProjectID | None = None ) -> list[NodeID]: - return self._scheduler.list_services(user_id=user_id, project_id=project_id) + return self.scheduler.list_services(user_id=user_id, project_id=project_id) async def mark_service_for_removal( self, node_uuid: NodeID, - can_save: Optional[bool], + can_save: bool | None, + *, skip_observation_recreation: bool = False, ) -> None: - return await self._scheduler.mark_service_for_removal( - node_uuid, can_save, skip_observation_recreation + return await self.scheduler.mark_service_for_removal( + node_uuid, can_save, skip_observation_recreation=skip_observation_recreation ) + async def mark_all_services_in_wallet_for_removal( + self, wallet_id: WalletID + ) -> None: + await self.scheduler.mark_all_services_in_wallet_for_removal(wallet_id) + + async def is_service_awaiting_manual_intervention(self, node_uuid: NodeID) -> bool: + return await self.scheduler.is_service_awaiting_manual_intervention(node_uuid) + async def get_stack_status(self, node_uuid: NodeID) -> RunningDynamicServiceDetails: - return await self._scheduler.get_stack_status(node_uuid) + return await self.scheduler.get_stack_status(node_uuid) async def retrieve_service_inputs( - self, node_uuid: NodeID, port_keys: list[str] + self, node_uuid: NodeID, port_keys: list[ServicePortKey] ) -> RetrieveDataOutEnveloped: - return await self._scheduler.retrieve_service_inputs(node_uuid, port_keys) + return await self.scheduler.retrieve_service_inputs(node_uuid, port_keys) async def attach_project_network( self, node_id: NodeID, project_network: str, network_alias: DockerNetworkAlias ) -> None: - return await self._scheduler.attach_project_network( + return await self.scheduler.attach_project_network( node_id, project_network, network_alias ) async def detach_project_network( self, node_id: NodeID, project_network: str ) -> None: - return await self._scheduler.detach_project_network(node_id, project_network) + return await self.scheduler.detach_project_network(node_id, project_network) async def restart_containers(self, node_uuid: NodeID) -> None: - return await self._scheduler.restart_containers(node_uuid) + return await self.scheduler.restart_containers(node_uuid) + + async def get_service_activity(self, node_id: NodeID) -> ActivityInfoOrNone: + return await self.scheduler.get_service_activity(node_id) + + async def free_reserved_disk_space(self, node_id: NodeID) -> None: + await self.scheduler.free_reserved_disk_space(node_id) -async def setup_scheduler(app: FastAPI): +async def setup_scheduler(app: FastAPI) -> None: settings: DynamicServicesSchedulerSettings = ( app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) @@ -138,7 +159,7 @@ async def setup_scheduler(app: FastAPI): await scheduler.start() -async def shutdown_scheduler(app: FastAPI): +async def shutdown_scheduler(app: FastAPI) -> None: settings: DynamicServicesSchedulerSettings = ( app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER ) @@ -146,7 +167,8 @@ async def shutdown_scheduler(app: FastAPI): logger.warning("dynamic-sidecar scheduler not started, nothing to shutdown!!!") return - scheduler: Optional[DynamicSidecarsScheduler] = app.state.dynamic_sidecar_scheduler + scheduler: DynamicSidecarsScheduler | None = app.state.dynamic_sidecar_scheduler + assert scheduler is not None # nosec await scheduler.shutdown() diff --git a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/volumes.py b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/volumes.py index 63061e7c0b3..bf375b29eed 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/volumes.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/volumes.py @@ -2,14 +2,27 @@ from pathlib import Path from typing import Any +from models_library.api_schemas_directorv2.services import ( + CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME, +) from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID -from models_library.services import RunID +from models_library.services import ServiceRunID from models_library.users import UserID from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES +from settings_library.efs import ( + NFS_PROTOCOL, + NFS_REQUEST_TIMEOUT, + NUMBER_OF_RETRANSMISSIONS, + PORT_MODE, + READ_SIZE, + RECOVERY_MODE, + WRITE_SIZE, + AwsEfsSettings, +) from settings_library.r_clone import S3Provider -from ...core.settings import RCloneSettings +from ...core.dynamic_services_settings.sidecar import RCloneSettings from .errors import DynamicSidecarError DY_SIDECAR_SHARED_STORE_PATH = Path("/shared-store") @@ -22,13 +35,12 @@ def _get_s3_volume_driver_config( storage_directory_name: str, ) -> dict[str, Any]: assert "/" not in storage_directory_name # nosec - driver_config = { + driver_config: dict[str, Any] = { "Name": "rclone", "Options": { "type": "s3", "s3-access_key_id": r_clone_settings.R_CLONE_S3.S3_ACCESS_KEY, "s3-secret_access_key": r_clone_settings.R_CLONE_S3.S3_SECRET_KEY, - "s3-endpoint": r_clone_settings.R_CLONE_S3.S3_ENDPOINT, "path": f"{r_clone_settings.R_CLONE_S3.S3_BUCKET_NAME}/{project_id}/{node_uuid}/{storage_directory_name}", "allow-other": "true", "vfs-cache-mode": r_clone_settings.R_CLONE_VFS_CACHE_MODE.value, @@ -40,8 +52,12 @@ def _get_s3_volume_driver_config( "poll-interval": f"{r_clone_settings.R_CLONE_POLL_INTERVAL_SECONDS}s", }, } + if r_clone_settings.R_CLONE_S3.S3_ENDPOINT: + driver_config["Options"][ + "s3-endpoint" + ] = r_clone_settings.R_CLONE_S3.S3_ENDPOINT - extra_options = None + extra_options: dict[str, str] | None = None if r_clone_settings.R_CLONE_PROVIDER == S3Provider.MINIO: extra_options = { @@ -58,17 +74,34 @@ def _get_s3_volume_driver_config( elif r_clone_settings.R_CLONE_PROVIDER == S3Provider.AWS: extra_options = { "s3-provider": "AWS", - "s3-region": "us-east-1", + "s3-region": r_clone_settings.R_CLONE_S3.S3_REGION, "s3-acl": "private", } else: - raise DynamicSidecarError( - f"Unexpected, all {S3Provider.__name__} should be covered" - ) + msg = f"Unexpected, all {S3Provider.__name__} should be covered" + raise DynamicSidecarError(msg=msg) assert extra_options is not None # nosec - driver_config["Options"].update(extra_options) + options: dict[str, Any] = driver_config["Options"] + options.update(extra_options) + + return driver_config + +def _get_efs_volume_driver_config( + efs_settings: AwsEfsSettings, + project_id: ProjectID, + node_uuid: NodeID, + storage_directory_name: str, +) -> dict[str, Any]: + assert "/" not in storage_directory_name # nosec + driver_config: dict[str, Any] = { + "Options": { + "type": "nfs", + "o": f"addr={efs_settings.EFS_DNS_NAME},rw,nfsvers={NFS_PROTOCOL},rsize={READ_SIZE},wsize={WRITE_SIZE},{RECOVERY_MODE},timeo={NFS_REQUEST_TIMEOUT},retrans={NUMBER_OF_RETRANSMISSIONS},{PORT_MODE}", + "device": f":/{efs_settings.EFS_PROJECT_SPECIFIC_DATA_DIRECTORY}/{project_id}/{node_uuid}/{storage_directory_name}", + }, + } return driver_config @@ -82,11 +115,12 @@ def target(cls, path: Path) -> str: return f"{target_path}" @classmethod - def _volume_name(cls, path: Path) -> str: + def volume_name(cls, path: Path) -> str: + """Returns a volume name created from path. There is not possibility to go back to the original path from the volume name""" return f"{path}".replace(os.sep, "_") @classmethod - def source(cls, path: Path, node_uuid: NodeID, run_id: RunID) -> str: + def source(cls, path: Path, node_uuid: NodeID, service_run_id: ServiceRunID) -> str: """Returns a valid and unique volume name that is composed out of identifiers, namely - relative target path - node_uuid @@ -101,8 +135,13 @@ def source(cls, path: Path, node_uuid: NodeID, run_id: RunID) -> str: # NOTE: issues can occur when the paths of the mounted outputs, inputs # and state folders are very long and share the same subdirectory path. # Reversing volume name to prevent these issues from happening. - reversed_volume_name = cls._volume_name(path)[::-1] - unique_name = f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{run_id}_{node_uuid}_{reversed_volume_name}" + reversed_volume_name = cls.volume_name(path)[::-1] + + # ensure prefix size does not change + prefix = f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{service_run_id}_{node_uuid}" + assert len(prefix) == CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME - 1 # nosec + + unique_name = f"{prefix}_{reversed_volume_name}" return unique_name[:255] @classmethod @@ -111,45 +150,79 @@ def mount_entry( swarm_stack_name: str, path: Path, node_uuid: NodeID, - run_id: RunID, + service_run_id: ServiceRunID, project_id: ProjectID, user_id: UserID, + volume_size_limit: str | None, ) -> dict[str, Any]: """ Creates specification for mount to be added to containers created as part of a service """ return { - "Source": cls.source(path, node_uuid, run_id), + "Source": cls.source(path, node_uuid, service_run_id), "Target": cls.target(path), "Type": "volume", "VolumeOptions": { "Labels": { - "source": cls.source(path, node_uuid, run_id), - "run_id": f"{run_id}", + "source": cls.source(path, node_uuid, service_run_id), + "run_id": f"{service_run_id}", "node_uuid": f"{node_uuid}", "study_id": f"{project_id}", "user_id": f"{user_id}", "swarm_stack_name": swarm_stack_name, - } + }, + "DriverConfig": ( + {"Options": {"size": volume_size_limit}} + if volume_size_limit is not None + else None + ), }, } @classmethod def mount_shared_store( cls, - run_id: RunID, + service_run_id: ServiceRunID, node_uuid: NodeID, project_id: ProjectID, user_id: UserID, swarm_stack_name: str, + *, + has_quota_support: bool, ) -> dict[str, Any]: return cls.mount_entry( swarm_stack_name=swarm_stack_name, path=DY_SIDECAR_SHARED_STORE_PATH, node_uuid=node_uuid, - run_id=run_id, + service_run_id=service_run_id, + project_id=project_id, + user_id=user_id, + volume_size_limit="1M" if has_quota_support else None, + ) + + @classmethod + def mount_user_preferences( + cls, + user_preferences_path: Path, + service_run_id: ServiceRunID, + node_uuid: NodeID, + project_id: ProjectID, + user_id: UserID, + swarm_stack_name: str, + *, + has_quota_support: bool, + ): + return cls.mount_entry( + swarm_stack_name=swarm_stack_name, + path=user_preferences_path, + node_uuid=node_uuid, + service_run_id=service_run_id, project_id=project_id, user_id=user_id, + # NOTE: the contents of this volume will be zipped and much + # be at most `_MAX_PREFERENCES_TOTAL_SIZE`, this 10M accounts + # for files and data that can be compressed a lot + volume_size_limit="10M" if has_quota_support else None, ) @classmethod @@ -158,19 +231,19 @@ def mount_r_clone( swarm_stack_name: str, path: Path, node_uuid: NodeID, - run_id: RunID, + service_run_id: ServiceRunID, project_id: ProjectID, user_id: UserID, r_clone_settings: RCloneSettings, ) -> dict[str, Any]: return { - "Source": cls.source(path, node_uuid, run_id), + "Source": cls.source(path, node_uuid, service_run_id), "Target": cls.target(path), "Type": "volume", "VolumeOptions": { "Labels": { - "source": cls.source(path, node_uuid, run_id), - "run_id": f"{run_id}", + "source": cls.source(path, node_uuid, service_run_id), + "run_id": f"{service_run_id}", "node_uuid": f"{node_uuid}", "study_id": f"{project_id}", "user_id": f"{user_id}", @@ -180,7 +253,41 @@ def mount_r_clone( r_clone_settings=r_clone_settings, project_id=project_id, node_uuid=node_uuid, - storage_directory_name=cls._volume_name(path).strip("_"), + storage_directory_name=cls.volume_name(path).strip("_"), + ), + }, + } + + @classmethod + def mount_efs( + cls, + swarm_stack_name: str, + path: Path, + node_uuid: NodeID, + service_run_id: ServiceRunID, + project_id: ProjectID, + user_id: UserID, + efs_settings: AwsEfsSettings, + storage_directory_name: str, + ) -> dict[str, Any]: + return { + "Source": cls.source(path, node_uuid, service_run_id), + "Target": cls.target(path), + "Type": "volume", + "VolumeOptions": { + "Labels": { + "source": cls.source(path, node_uuid, service_run_id), + "run_id": f"{service_run_id}", + "node_uuid": f"{node_uuid}", + "study_id": f"{project_id}", + "user_id": f"{user_id}", + "swarm_stack_name": swarm_stack_name, + }, + "DriverConfig": _get_efs_volume_driver_config( + efs_settings=efs_settings, + project_id=project_id, + node_uuid=node_uuid, + storage_directory_name=storage_directory_name, ), }, } diff --git a/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/__init__.py new file mode 100644 index 00000000000..8c08a824d3f --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/__init__.py @@ -0,0 +1,10 @@ +from ._setup import get_instrumentation, setup +from ._utils import get_metrics_labels, get_rate, track_duration + +__all__: tuple[str, ...] = ( + "get_instrumentation", + "get_metrics_labels", + "get_rate", + "setup", + "track_duration", +) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_models.py b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_models.py new file mode 100644 index 00000000000..85e56d52e63 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_models.py @@ -0,0 +1,145 @@ +from dataclasses import dataclass, field +from typing import Final + +from prometheus_client import CollectorRegistry, Histogram +from pydantic import ByteSize, TypeAdapter +from servicelib.instrumentation import MetricsBase, get_metrics_namespace + +from ..._meta import PROJECT_NAME + +_METRICS_NAMESPACE: Final[str] = get_metrics_namespace(PROJECT_NAME) +_INSTRUMENTATION_LABELS: Final[tuple[str, ...]] = ( + "user_id", + "wallet_id", + "service_key", + "service_version", +) + +_MINUTE: Final[int] = 60 +_BUCKETS_TIME_S: Final[tuple[float, ...]] = ( + 10, + 30, + 1 * _MINUTE, + 2 * _MINUTE, + 3 * _MINUTE, + 5 * _MINUTE, + 7 * _MINUTE, + 10 * _MINUTE, + 15 * _MINUTE, + 20 * _MINUTE, +) + + +_RATE_BPS_BUCKETS: Final[tuple[float, ...]] = tuple( + TypeAdapter(ByteSize).validate_python(f"{m}MiB") + for m in ( + 1, + 30, + 60, + 90, + 120, + 150, + 200, + 300, + 400, + 500, + 600, + ) +) + + +@dataclass(slots=True, kw_only=True) +class DynamiSidecarMetrics(MetricsBase): + start_time_duration: Histogram = field(init=False) + stop_time_duration: Histogram = field(init=False) + pull_user_services_images_duration: Histogram = field(init=False) + + # ingress rates + output_ports_pull_rate: Histogram = field(init=False) + input_ports_pull_rate: Histogram = field(init=False) + pull_service_state_rate: Histogram = field(init=False) + + # egress rates + # NOTE: input ports are never pushed + # NOTE: output ports are pushed by the dy-sidecar, upon change making recovering the metric very complicated + push_service_state_rate: Histogram = field(init=False) + + def __post_init__(self) -> None: + self.start_time_duration = Histogram( + "start_time_duration_seconds", + "time to start dynamic service (from start request in dv-2 till service containers are in running state (healthy))", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_BUCKETS_TIME_S, + subsystem=self.subsystem, + registry=self.registry, + ) + self.stop_time_duration = Histogram( + "stop_time_duration_seconds", + "time to stop dynamic service (from stop request in dv-2 till all allocated resources (services + dynamic-sidecar) are removed)", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_BUCKETS_TIME_S, + subsystem=self.subsystem, + registry=self.registry, + ) + self.pull_user_services_images_duration = Histogram( + "pull_user_services_images_duration_seconds", + "time to pull docker images", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_BUCKETS_TIME_S, + subsystem=self.subsystem, + registry=self.registry, + ) + + self.output_ports_pull_rate = Histogram( + "output_ports_pull_rate_bps", + "rate at which output ports were pulled", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_RATE_BPS_BUCKETS, + subsystem=self.subsystem, + registry=self.registry, + ) + self.input_ports_pull_rate = Histogram( + "input_ports_pull_rate_bps", + "rate at which input ports were pulled", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_RATE_BPS_BUCKETS, + subsystem=self.subsystem, + registry=self.registry, + ) + self.pull_service_state_rate = Histogram( + "pull_service_state_rate_bps", + "rate at which service states were recovered", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_RATE_BPS_BUCKETS, + subsystem=self.subsystem, + registry=self.registry, + ) + + self.push_service_state_rate = Histogram( + "push_service_state_rate_bps", + "rate at which service states were saved", + labelnames=_INSTRUMENTATION_LABELS, + namespace=_METRICS_NAMESPACE, + buckets=_RATE_BPS_BUCKETS, + subsystem=self.subsystem, + registry=self.registry, + ) + + +@dataclass(slots=True, kw_only=True) +class DirectorV2Instrumentation: + registry: CollectorRegistry + dynamic_sidecar_metrics: DynamiSidecarMetrics = field(init=False) + + def __post_init__(self) -> None: + self.dynamic_sidecar_metrics = ( + DynamiSidecarMetrics( # pylint: disable=unexpected-keyword-arg + subsystem="dynamic_services", registry=self.registry + ) + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_setup.py b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_setup.py new file mode 100644 index 00000000000..bfc02835eac --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_setup.py @@ -0,0 +1,26 @@ +from typing import cast + +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) + +from ...core.errors import ConfigurationError +from ._models import DirectorV2Instrumentation + + +def setup(app: FastAPI) -> None: + registry = setup_prometheus_instrumentation(app) + + async def on_startup() -> None: + app.state.instrumentation = DirectorV2Instrumentation(registry=registry) + + app.add_event_handler("startup", on_startup) + + +def get_instrumentation(app: FastAPI) -> DirectorV2Instrumentation: + if not app.state.instrumentation: + raise ConfigurationError( + msg="Instrumentation not setup. Please check the configuration." + ) + return cast(DirectorV2Instrumentation, app.state.instrumentation) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_utils.py b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_utils.py new file mode 100644 index 00000000000..2ff4fd8e789 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/instrumentation/_utils.py @@ -0,0 +1,61 @@ +import time +from collections.abc import Iterator +from contextlib import contextmanager +from typing import Final + +from pydantic import NonNegativeFloat + +from ...models.dynamic_services_scheduler import SchedulerData + +_EPSILON: Final[NonNegativeFloat] = 1e9 + + +def get_metrics_labels(scheduler_data: "SchedulerData") -> dict[str, str]: + return { + "user_id": f"{scheduler_data.user_id}", + "wallet_id": ( + f"{scheduler_data.wallet_info.wallet_id}" + if scheduler_data.wallet_info + else "" + ), + "service_key": scheduler_data.key, + "service_version": scheduler_data.version, + } + + +def get_rate( + size: NonNegativeFloat | None, duration: NonNegativeFloat +) -> NonNegativeFloat: + if size is None or size <= 0: + size = _EPSILON + return size / duration + + +class DeferredFloat: + def __init__(self): + self._value: float | None = None + + def set_value(self, value): + if not isinstance(value, float | int): + msg = "Value must be a float or an int." + raise TypeError(msg) + + self._value = float(value) + + def to_float(self) -> float: + if not isinstance(self._value, float): + msg = "Value must be a float or an int." + raise TypeError(msg) + + return self._value + + +@contextmanager +def track_duration() -> Iterator[DeferredFloat]: + duration = DeferredFloat() + start_time = time.time() + + yield duration + + end_time = time.time() + duration.set_value(end_time - start_time) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/node_rights.py b/services/director-v2/src/simcore_service_director_v2/modules/node_rights.py deleted file mode 100644 index 469fc86546d..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/node_rights.py +++ /dev/null @@ -1,236 +0,0 @@ -import asyncio -import logging -from contextlib import asynccontextmanager, suppress -from dataclasses import dataclass -from typing import AsyncIterator, Optional - -from fastapi import FastAPI -from pydantic import NonNegativeInt, PositiveFloat, PositiveInt -from redis.asyncio import Redis -from redis.asyncio.lock import Lock -from settings_library.redis import RedisSettings -from tenacity import retry -from tenacity.before_sleep import before_sleep_log -from tenacity.wait import wait_random - -from ..core.errors import ConfigurationError, NodeRightsAcquireError -from ..core.settings import DynamicSidecarSettings - -DockerNodeId = str -ResourceName = str - - -logger = logging.getLogger(__name__) - -redis_retry_policy = dict( - wait=wait_random(5, 10), - before_sleep=before_sleep_log(logger, logging.WARNING), - reraise=True, -) - - -def setup(app: FastAPI): - @retry(**redis_retry_policy) - async def on_startup() -> None: - app.state.node_rights_manager = await NodeRightsManager.create(app) - - async def on_shutdown() -> None: - node_rights_manager: NodeRightsManager = app.state.node_rights_manager - await node_rights_manager.close() - - app.add_event_handler("startup", on_startup) - app.add_event_handler("shutdown", on_shutdown) - - -class ExtendLock: - def __init__( - self, - lock: Lock, - timeout_s: PositiveFloat, - extend_interval_s: PositiveFloat, - ) -> None: - self.timeout_s: PositiveFloat = timeout_s - self.extend_interval_s: PositiveFloat = extend_interval_s - self._redis_lock: Lock = lock - self.task: Optional[asyncio.Task] = asyncio.create_task( - self._extend_task(), name=f"{self.__class__.__name__}" - ) - - async def _extend_task(self) -> None: - while True: - await asyncio.sleep(self.extend_interval_s) - await self._redis_lock.extend(self.timeout_s, replace_ttl=True) - - @property - def name(self) -> str: - return self._redis_lock.name - - async def initialize(self) -> None: - await self._redis_lock.do_reacquire() - - async def release(self) -> None: - await self._redis_lock.release() - - -# acquire the rights to use a docker swarm node -@dataclass -class NodeRightsManager: - """ - A `slot` is used to limit `resource` usage. It can be viewed as a token - which has to be returned, once the user finished using the `resource`. - - A slot can be reserved via the `acquire` context manger. If no - `NodeRightsAcquireError` is raised, the user is free to use - the locked `resource`. If an error is raised the - user must try again at a later time. - """ - - app: FastAPI - _redis: Redis - is_enabled: bool - lock_timeout_s: PositiveFloat - concurrent_resource_slots: PositiveInt - - @classmethod - async def create(cls, app: FastAPI) -> "NodeRightsManager": - redis_settings: RedisSettings = app.state.settings.REDIS - dynamic_sidecar_settings: DynamicSidecarSettings = ( - app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR - ) - return cls( - app=app, - _redis=Redis.from_url(redis_settings.dsn_locks), - is_enabled=dynamic_sidecar_settings.DYNAMIC_SIDECAR_DOCKER_NODE_RESOURCE_LIMITS_ENABLED, - concurrent_resource_slots=dynamic_sidecar_settings.DYNAMIC_SIDECAR_DOCKER_NODE_CONCURRENT_RESOURCE_SLOTS, - lock_timeout_s=dynamic_sidecar_settings.DYNAMIC_SIDECAR_DOCKER_NODE_SAVES_LOCK_TIMEOUT_S, - ) - - @classmethod - def instance(cls, app: FastAPI) -> "NodeRightsManager": - if not hasattr(app.state, "node_rights_manager"): - raise ConfigurationError( - "RedisLockManager client is not available. Please check the configuration." - ) - return app.state.node_rights_manager - - @classmethod - def _get_key(cls, docker_node_id: DockerNodeId, resource_name: ResourceName) -> str: - return f"{cls.__name__}.{docker_node_id}.{resource_name}.lock_slots" - - @classmethod - def _get_lock_name( - cls, - docker_node_id: DockerNodeId, - resource_name: ResourceName, - slot_index: NonNegativeInt, - ) -> str: - return f"{cls._get_key(docker_node_id, resource_name)}.{slot_index}" - - async def _get_node_slots( - self, docker_node_id: DockerNodeId, resource_name: ResourceName - ) -> int: - """ - get the total amount of slots available for the provided - resource on the node - """ - - node_slots_key = self._get_key(docker_node_id, resource_name) - slots: Optional[bytes] = await self._redis.get(node_slots_key) - if slots is not None: - return int(slots) - - await self._redis.set(node_slots_key, self.concurrent_resource_slots) - return self.concurrent_resource_slots - - @staticmethod - async def _release_extend_lock(extend_lock: ExtendLock) -> None: - """ - Releases a lock and all its related resources. - Cancels the extend_task - """ - - if extend_lock.task: - extend_lock.task.cancel() - with suppress(asyncio.CancelledError): - - async def _await_task(task: asyncio.Task) -> None: - await task - - # NOTE: When the extension task is awaited it sometimes blocks - # we can safely timeout the task and ignore the error. - # The **most probable* cause of the error is when the extend_task - # and the release are called at the same time. Some internal locking - # is involved and the task is blocked forever. - - # it should not take more than `extend_interval_s` to cancel task - try: - await asyncio.wait_for( - _await_task(extend_lock.task), - timeout=extend_lock.extend_interval_s * 2, - ) - except asyncio.TimeoutError: - logger.warning( - "Timed out while awaiting for cancellation of '%s'", - extend_lock.name, - ) - - extend_lock.task = None - logger.info("Lock '%s' released", extend_lock.name) - else: - # Below will appear in the logs only if the logs was released twice, - # in which case a `redis.exceptions.LockError: Cannot release an unlocked lock` - # will be raised. - # Otherwise there might be some issues. - logger.warning( - "Lock '%s' has no associated `extend_task`.", extend_lock.name - ) - - await extend_lock.release() - - @asynccontextmanager - async def acquire( - self, docker_node_id: DockerNodeId, *, resource_name: ResourceName - ) -> AsyncIterator[ExtendLock]: - """ - Context manger to helo with acquire and release. If it is not possible - - raises: `NodeRightsAcquireError` if the lock was not acquired. - """ - slots = await self._get_node_slots(docker_node_id, resource_name) - acquired_lock: Optional[Lock] = None - for slot in range(slots): - node_lock_name = self._get_lock_name(docker_node_id, resource_name, slot) - - lock = self._redis.lock(name=node_lock_name, timeout=self.lock_timeout_s) - lock_acquired = await lock.acquire(blocking=False) - - if lock_acquired: - acquired_lock = lock - logger.debug("Acquired %s/%s named '%s'", slot + 1, slots, lock.name) - break - - if acquired_lock is None: - raise NodeRightsAcquireError(docker_node_id=docker_node_id, slots=slots) - - # In order to avoid deadlock situations, where resources are not being - # released, a lock with a timeout will be acquired. - - # When the lock is acquired a background task which extends its - # validity will also be created. - - # Since the lifecycle of the extend task is tied to the one of the lock - # the task reference is attached to the lock. - extend_lock = ExtendLock( - lock=acquired_lock, - timeout_s=self.lock_timeout_s, - extend_interval_s=self.lock_timeout_s / 2, - ) - await extend_lock.initialize() - - try: - yield extend_lock - finally: - await self._release_extend_lock(extend_lock) - - async def close(self) -> None: - await self._redis.close(close_connection_pool=True) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/notifier.py b/services/director-v2/src/simcore_service_director_v2/modules/notifier.py new file mode 100644 index 00000000000..a0318b681dc --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/notifier.py @@ -0,0 +1,59 @@ +import contextlib + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_directorv2.notifications import ServiceNoMoreCredits +from models_library.api_schemas_directorv2.socketio import ( + SOCKET_IO_SERVICE_NO_MORE_CREDITS_EVENT, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.fastapi.app_state import SingletonInAppStateMixin + + +class Notifier(SingletonInAppStateMixin): + app_state_name: str = "notifier" + + def __init__(self, sio_manager: socketio.AsyncAioPikaManager): + self._sio_manager = sio_manager + + async def notify_shutdown_no_more_credits( + self, user_id: UserID, node_id: NodeID, wallet_id: WalletID + ) -> None: + await self._sio_manager.emit( + SOCKET_IO_SERVICE_NO_MORE_CREDITS_EVENT, + data=jsonable_encoder( + ServiceNoMoreCredits(node_id=node_id, wallet_id=wallet_id) + ), + room=SocketIORoomStr.from_user_id(user_id), + ) + + +async def publish_shutdown_no_more_credits( + app: FastAPI, *, user_id: UserID, node_id: NodeID, wallet_id: WalletID +) -> None: + notifier: Notifier = Notifier.get_from_app_state(app) + await notifier.notify_shutdown_no_more_credits( + user_id=user_id, node_id=node_id, wallet_id=wallet_id + ) + + +def setup(app: FastAPI): + async def _on_startup() -> None: + assert app.state.external_socketio # nosec + + notifier = Notifier( + sio_manager=app.state.external_socketio, + ) + notifier.set_to_app_state(app) + assert Notifier.get_from_app_state(app) == notifier # nosec + + async def _on_shutdown() -> None: + with contextlib.suppress(AttributeError): + Notifier.pop_from_app_state(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/__init__.py b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth.py b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth.py new file mode 100644 index 00000000000..18fb5f4ff17 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth.py @@ -0,0 +1,89 @@ +import logging +import uuid +from datetime import timedelta +from uuid import uuid5 + +from fastapi import FastAPI +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rpc.webserver.auth.api_keys import generate_unique_api_key +from models_library.users import UserID + +from ._api_auth_rpc import create_api_key as rpc_create_api_key +from ._api_auth_rpc import delete_api_key_by_key as rpc_delete_api_key + +_EXPIRATION_AUTO_KEYS = timedelta(weeks=4) + + +_logger = logging.getLogger(__name__) + + +def create_unique_api_name_for( + product_name: ProductName, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> str: + # NOTE: The namespace chosen doesn't significantly impact the resulting UUID + # as long as it's consistently used across the same context + return f"__auto_{uuid5(uuid.NAMESPACE_DNS, f'{product_name}/{user_id}/{project_id}/{node_id}')}" + + +async def create_user_api_key( + app: FastAPI, # pylint: disable=unused-argument + product_name: ProductName, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> str: + # NOTE: Given the display name, the API key is deterministically generated + return generate_unique_api_key( + create_unique_api_name_for(product_name, user_id, project_id, node_id) + ) + + +async def create_user_api_secret( + app: FastAPI, + product_name: ProductName, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> str: + display_name = create_unique_api_name_for( + product_name, user_id, project_id, node_id + ) + _logger.debug("Creating API key for %s", display_name) + data = await rpc_create_api_key( + app, + user_id=user_id, + product_name=product_name, + display_name=display_name, + expiration=_EXPIRATION_AUTO_KEYS, + ) + assert data.api_secret # nosec + assert isinstance(data.api_secret, str) # nosec + return data.api_secret + + +async def delete_api_key_by_key( + app: FastAPI, + product_name: ProductName, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> None: + api_key = create_unique_api_name_for(product_name, user_id, project_id, node_id) + await rpc_delete_api_key( + app=app, + product_name=product_name, + user_id=user_id, + api_key=api_key, + ) + + +__all__: tuple[str, ...] = ( + "create_user_api_key", + "create_user_api_secret", + "delete_api_key_by_key", +) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth_rpc.py b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth_rpc.py new file mode 100644 index 00000000000..d623305229d --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_api_auth_rpc.py @@ -0,0 +1,52 @@ +from datetime import timedelta + +from fastapi import FastAPI +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.products import ProductName +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rpc.webserver.auth.api_keys import ApiKeyGet +from models_library.users import UserID +from pydantic import TypeAdapter + +from ..rabbitmq import get_rabbitmq_rpc_client + +# +# RPC interface +# + + +async def create_api_key( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + display_name: str, + expiration: timedelta | None = None, +) -> ApiKeyGet: + rpc_client = get_rabbitmq_rpc_client(app) + result = await rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("create_api_key"), + product_name=product_name, + user_id=user_id, + display_name=display_name, + expiration=expiration, + ) + return ApiKeyGet.model_validate(result) + + +async def delete_api_key_by_key( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + api_key: str, +) -> None: + rpc_client = get_rabbitmq_rpc_client(app) + await rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_api_key_by_key"), + product_name=product_name, + user_id=user_id, + api_key=api_key, + ) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_user.py b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_user.py new file mode 100644 index 00000000000..10368ff4389 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/_user.py @@ -0,0 +1,17 @@ +from fastapi import FastAPI +from models_library.users import UserID +from simcore_postgres_database.models.users import UserRole + +from ...utils.db import get_repository +from ..db.repositories.users import UsersRepository + + +async def request_user_email(app: FastAPI, user_id: UserID) -> str: + repo = get_repository(app, UsersRepository) + return await repo.get_user_email(user_id=user_id) + + +async def request_user_role(app: FastAPI, user_id: UserID) -> str: + repo = get_repository(app, UsersRepository) + user_role: UserRole = await repo.get_user_role(user_id=user_id) + return f"{user_role.value}" diff --git a/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/substitutions.py b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/substitutions.py new file mode 100644 index 00000000000..0d458749ba3 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/osparc_variables/substitutions.py @@ -0,0 +1,285 @@ +"""Substitution of osparc variables and secrets""" + +import functools +import logging +from copy import deepcopy +from typing import Any, Final, TypeVar + +from fastapi import FastAPI +from models_library.osparc_variable_identifier import ( + UnresolvedOsparcVariableIdentifierError, + raise_if_unresolved_osparc_variable_identifier_found, + replace_osparc_variable_identifier, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.service_settings_labels import ComposeSpecLabelDict +from models_library.services import ServiceKey, ServiceVersion +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.utils.specs_substitution import SpecsSubstitutionsResolver +from models_library.wallets import WalletID +from pydantic import BaseModel +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.logging_utils import log_context + +from ...utils.db import get_repository +from ...utils.osparc_variables import ( + ContextDict, + OsparcVariablesTable, + resolve_variables_from_context, +) +from ..db.repositories.services_environments import ServicesEnvironmentsRepository +from ._api_auth import create_user_api_key, create_user_api_secret +from ._user import request_user_email, request_user_role + +_logger = logging.getLogger(__name__) + +TBaseModel = TypeVar("TBaseModel", bound=BaseModel) + + +async def substitute_vendor_secrets_in_model( + app: FastAPI, + model: TBaseModel, + *, + safe: bool = True, + service_key: ServiceKey, + service_version: ServiceVersion, + product_name: ProductName, +) -> TBaseModel: + result: TBaseModel = model + try: + with log_context(_logger, logging.DEBUG, "substitute_vendor_secrets_in_model"): + # checks before to avoid unnecessary calls to pg + # if it raises an error vars need replacement + _logger.debug("model in which to replace model=%s", model) + raise_if_unresolved_osparc_variable_identifier_found(model) + except UnresolvedOsparcVariableIdentifierError as err: + repo = get_repository(app, ServicesEnvironmentsRepository) + vendor_secrets = await repo.get_vendor_secrets( + service_key=service_key, + service_version=service_version, + product_name=product_name, + ) + _logger.warning( + "Failed to resolve osparc variable identifiers in model (%s). Replacing vendor secrets", + err, + ) + result = replace_osparc_variable_identifier(model, vendor_secrets) + + if not safe: + raise_if_unresolved_osparc_variable_identifier_found(result) + + return result + + +async def substitute_vendor_secrets_in_specs( + app: FastAPI, + specs: dict[str, Any], + *, + safe: bool = True, + service_key: ServiceKey, + service_version: ServiceVersion, + product_name: ProductName, +) -> dict[str, Any]: + resolver = SpecsSubstitutionsResolver(specs, upgrade=False) + repo = get_repository(app, ServicesEnvironmentsRepository) + + _logger.debug( + "substitute_vendor_secrets_in_specs detected_identifiers=%s", + resolver.get_identifiers(), + ) + + if any(repo.is_vendor_secret_identifier(idr) for idr in resolver.get_identifiers()): + # checks before to avoid unnecessary calls to pg + vendor_secrets = await repo.get_vendor_secrets( + service_key=service_key, + service_version=service_version, + product_name=product_name, + ) + + # resolve substitutions + resolver.set_substitutions(mappings=vendor_secrets) + new_specs: dict[str, Any] = resolver.run(safe=safe) + return new_specs + + return deepcopy(specs) + + +class OsparcSessionVariablesTable(OsparcVariablesTable, SingletonInAppStateMixin): + app_state_name: str = "session_variables_table" + + @classmethod + def create(cls, app: FastAPI): + table = cls() + # Registers some session osparc_variables + # WARNING: context_name needs to match session_context! + # NOTE: please keep alphabetically ordered + for name, context_name in [ + ("OSPARC_VARIABLE_NODE_ID", "node_id"), + ("OSPARC_VARIABLE_PRODUCT_NAME", "product_name"), + ("OSPARC_VARIABLE_STUDY_UUID", "project_id"), + ("OSPARC_VARIABLE_SERVICE_RUN_ID", "run_id"), + ("OSPARC_VARIABLE_WALLET_ID", "wallet_id"), + ("OSPARC_VARIABLE_USER_ID", "user_id"), + ("OSPARC_VARIABLE_API_HOST", "api_server_base_url"), + ]: + table.register_from_context(name, context_name) + + table.register_from_handler("OSPARC_VARIABLE_USER_EMAIL")(request_user_email) + table.register_from_handler("OSPARC_VARIABLE_USER_ROLE")(request_user_role) + table.register_from_handler("OSPARC_VARIABLE_API_KEY")(create_user_api_key) + table.register_from_handler("OSPARC_VARIABLE_API_SECRET")( + create_user_api_secret + ) + + _logger.debug( + "Registered session_variables_table=%s", sorted(table.variables_names()) + ) + table.set_to_app_state(app) + return table + + +_NEW_ENVIRONMENTS: Final = { + "OSPARC_API_BASE_URL": "$OSPARC_VARIABLE_API_HOST", + "OSPARC_API_KEY": "$OSPARC_VARIABLE_API_KEY", + "OSPARC_API_SECRET": "$OSPARC_VARIABLE_API_SECRET", + "OSPARC_STUDY_ID": "$OSPARC_VARIABLE_STUDY_UUID", + "OSPARC_NODE_ID": "$OSPARC_VARIABLE_NODE_ID", +} + + +def auto_inject_environments( + compose_spec: ComposeSpecLabelDict, +) -> ComposeSpecLabelDict: + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/5925 + for service in compose_spec.get("services", {}).values(): + current_environment = deepcopy(service.get("environment", {})) + + # if _NEW_ENVIRONMENTS are already defined, then do not change them + if isinstance(current_environment, dict): + service["environment"] = { + **_NEW_ENVIRONMENTS, + **current_environment, + } + elif isinstance(current_environment, list): + service["environment"] += [ + f"{name}={value}" + for name, value in _NEW_ENVIRONMENTS.items() + if not any(e.startswith(name) for e in current_environment) + ] + return compose_spec + + +async def resolve_and_substitute_session_variables_in_model( + app: FastAPI, + model: TBaseModel, + *, + safe: bool = True, + user_id: UserID, + product_name: str, + product_api_base_url: str | None, + project_id: ProjectID, + node_id: NodeID, + service_run_id: ServiceRunID, + wallet_id: WalletID | None, +) -> TBaseModel: + result: TBaseModel = model + try: + with log_context( + _logger, logging.DEBUG, "resolve_and_substitute_session_variables_in_model" + ): + # checks before to avoid unnecessary calls to pg + # if it raises an error vars need replacement + raise_if_unresolved_osparc_variable_identifier_found(model) + except UnresolvedOsparcVariableIdentifierError: + table = OsparcSessionVariablesTable.get_from_app_state(app) + identifiers = await resolve_variables_from_context( + table.copy(), + context=ContextDict( + app=app, + user_id=user_id, + product_name=product_name, + project_id=project_id, + node_id=node_id, + run_id=service_run_id, + wallet_id=wallet_id, + api_server_base_url=product_api_base_url, + ), + ) + _logger.debug("replacing with the identifiers=%s", identifiers) + result = replace_osparc_variable_identifier(model, identifiers) + + if not safe: + raise_if_unresolved_osparc_variable_identifier_found(result) + + return result + + +async def resolve_and_substitute_session_variables_in_specs( + app: FastAPI, + specs: dict[str, Any], + *, + safe: bool = True, + user_id: UserID, + product_name: str, + product_api_base_url: str, + project_id: ProjectID, + node_id: NodeID, + service_run_id: ServiceRunID, + wallet_id: WalletID | None, +) -> dict[str, Any]: + table = OsparcSessionVariablesTable.get_from_app_state(app) + resolver = SpecsSubstitutionsResolver(specs, upgrade=False) + + if requested := set(resolver.get_identifiers()): + available = set(table.variables_names()) + identifiers_to_replace = available.intersection(requested) + _logger.debug( + "resolve_and_substitute_session_variables_in_specs identifiers_to_replace=%s", + identifiers_to_replace, + ) + if identifiers_to_replace: + environs = await resolve_variables_from_context( + table.copy(include=identifiers_to_replace), + context=ContextDict( + app=app, + user_id=user_id, + product_name=product_name, + project_id=project_id, + node_id=node_id, + run_id=service_run_id, + wallet_id=wallet_id, + api_server_base_url=product_api_base_url, + ), + ) + + resolver.set_substitutions(mappings=environs) + new_specs: dict[str, Any] = resolver.run(safe=safe) + return new_specs + + return deepcopy(specs) + + +def setup(app: FastAPI): + """ + **o2sparc variables and secrets** are identifiers-value maps that are substituted on the service specs (e.g. docker-compose). + - **vendor secrets**: information set by a vendor on the platform. e.g. a vendor service license + - **session variables**: some session information as "current user email" or the "current product name" + - **lifespan variables**: produced before a service is started and cleaned up after it finishes (e.g. API tokens ) + """ + app.add_event_handler( + "startup", functools.partial(OsparcSessionVariablesTable.create, app) + ) + + +# +# CLI helpers +# + + +def list_osparc_session_variables() -> list[str]: + app = FastAPI() + table = OsparcSessionVariablesTable.create(app) + return sorted(table.variables_names()) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/projects_networks.py b/services/director-v2/src/simcore_service_director_v2/modules/projects_networks.py index 7249af54c49..f58a5ddcfbf 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/projects_networks.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/projects_networks.py @@ -3,7 +3,7 @@ from typing import NamedTuple from uuid import UUID -from models_library.projects import ProjectAtDB, ProjectID, Workbench +from models_library.projects import NodesDict, ProjectAtDB, ProjectID from models_library.projects_networks import ( PROJECT_NETWORK_PREFIX, ContainerAliases, @@ -17,12 +17,12 @@ from models_library.service_settings_labels import SimcoreServiceLabels from models_library.services import ServiceKeyVersion from models_library.users import UserID -from pydantic import ValidationError, parse_obj_as +from pydantic import TypeAdapter, ValidationError +from servicelib.rabbitmq import RabbitMQClient from servicelib.utils import logged_gather -from simcore_service_director_v2.core.errors import ProjectNotFoundError -from simcore_service_director_v2.modules.rabbitmq import RabbitMQClient -from ..api.dependencies.director_v0 import DirectorV0Client +from ..core.errors import ProjectNetworkNotFoundError +from ..modules.catalog import CatalogClient from ..modules.db.repositories.projects import ProjectsRepository from ..modules.db.repositories.projects_networks import ProjectsNetworksRepository from ..modules.dynamic_sidecar.scheduler import DynamicSidecarsScheduler @@ -40,18 +40,18 @@ class _ToAdd(NamedTuple): project_id: ProjectID node_id: NodeIDStr network_name: str - network_alias: str + network_alias: DockerNetworkAlias def _network_name(project_id: ProjectID, user_defined: str) -> DockerNetworkName: network_name = f"{PROJECT_NETWORK_PREFIX}_{project_id}_{user_defined}" - return parse_obj_as(DockerNetworkName, network_name) + return TypeAdapter(DockerNetworkName).validate_python(network_name) async def requires_dynamic_sidecar( service_key: str, service_version: str, - director_v0_client: DirectorV0Client, + catalog_client: CatalogClient, ) -> bool: decoded_service_key = urllib.parse.unquote_plus(service_key) @@ -62,12 +62,16 @@ async def requires_dynamic_sidecar( if service_type != "dynamic": return False + service_key_version = ServiceKeyVersion.model_validate( + {"key": decoded_service_key, "version": service_version} + ) simcore_service_labels: SimcoreServiceLabels = ( - await director_v0_client.get_service_labels( - service=ServiceKeyVersion(key=decoded_service_key, version=service_version) + await catalog_client.get_service_labels( + service_key_version.key, service_key_version.version ) ) - return simcore_service_labels.needs_dynamic_sidecar + requires_dynamic_sidecar_: bool = simcore_service_labels.needs_dynamic_sidecar + return requires_dynamic_sidecar_ async def _send_network_configuration_to_dynamic_sidecar( @@ -102,7 +106,7 @@ async def _send_network_configuration_to_dynamic_sidecar( # if alias is different remove the network for new_network_name, node_ids_and_aliases in new_networks_with_aliases.items(): existing_node_ids_and_aliases = existing_networks_with_aliases.get( - new_network_name, {} + new_network_name, {} # type: ignore[arg-type] # -> should refactor code to not use DictModel it is useless ) for node_id, alias in node_ids_and_aliases.items(): # node does not exist @@ -143,7 +147,7 @@ async def _send_network_configuration_to_dynamic_sidecar( # all aliases which are different or missing should be added for new_network_name, node_ids_and_aliases in new_networks_with_aliases.items(): existing_node_ids_and_aliases = existing_networks_with_aliases.get( - new_network_name, {} + new_network_name, {} # type: ignore[arg-type] # -> should refactor code to not use DictModel it is useless ) for node_id, alias in node_ids_and_aliases.items(): existing_alias = existing_node_ids_and_aliases.get(node_id) @@ -172,8 +176,8 @@ async def _send_network_configuration_to_dynamic_sidecar( async def _get_networks_with_aliases_for_default_network( project_id: ProjectID, user_id: UserID, - new_workbench: Workbench, - director_v0_client: DirectorV0Client, + new_workbench: NodesDict, + catalog_client: CatalogClient, rabbitmq_client: RabbitMQClient, ) -> NetworksWithAliases: """ @@ -181,24 +185,27 @@ async def _get_networks_with_aliases_for_default_network( be on the same network. Return an updated version of the projects_networks """ - new_networks_with_aliases: NetworksWithAliases = NetworksWithAliases.parse_obj({}) + new_networks_with_aliases: NetworksWithAliases = NetworksWithAliases.model_validate( + {} + ) default_network = _network_name(project_id, "default") - new_networks_with_aliases[default_network] = ContainerAliases.parse_obj({}) + new_networks_with_aliases[default_network] = ContainerAliases.model_validate({}) for node_uuid, node_content in new_workbench.items(): - # only add dynamic-sidecar nodes if not await requires_dynamic_sidecar( service_key=node_content.key, service_version=node_content.version, - director_v0_client=director_v0_client, + catalog_client=catalog_client, ): continue # only add if network label is valid, otherwise it will be skipped try: - network_alias = parse_obj_as(DockerNetworkAlias, node_content.label) + network_alias = TypeAdapter(DockerNetworkAlias).validate_python( + node_content.label + ) except ValidationError: message = LoggerRabbitMessage( user_id=user_id, @@ -215,12 +222,14 @@ async def _get_networks_with_aliases_for_default_network( f"Network name is {default_network}" ) ], - log_level=logging.INFO, + log_level=logging.WARNING, ) - await rabbitmq_client.publish(message.channel_name, message.json()) + await rabbitmq_client.publish(message.channel_name, message) continue - new_networks_with_aliases[default_network][f"{node_uuid}"] = network_alias + new_networks_with_aliases[default_network][ + NodeIDStr(f"{node_uuid}") + ] = network_alias return new_networks_with_aliases @@ -229,7 +238,7 @@ async def update_from_workbench( projects_networks_repository: ProjectsNetworksRepository, projects_repository: ProjectsRepository, scheduler: DynamicSidecarsScheduler, - director_v0_client: DirectorV0Client, + catalog_client: CatalogClient, rabbitmq_client: RabbitMQClient, project_id: ProjectID, ) -> None: @@ -243,9 +252,9 @@ async def update_from_workbench( project_id=project_id ) ) - except ProjectNotFoundError: - existing_projects_networks = ProjectsNetworks.parse_obj( - dict(project_uuid=project_id, networks_with_aliases={}) + except ProjectNetworkNotFoundError: + existing_projects_networks = ProjectsNetworks.model_validate( + {"project_uuid": project_id, "networks_with_aliases": {}} ) existing_networks_with_aliases = existing_projects_networks.networks_with_aliases @@ -258,7 +267,7 @@ async def update_from_workbench( project_id=project_id, user_id=project.prj_owner, new_workbench=project.workbench, - director_v0_client=director_v0_client, + catalog_client=catalog_client, rabbitmq_client=rabbitmq_client, ) logger.debug("%s", f"{existing_networks_with_aliases=}") diff --git a/services/director-v2/src/simcore_service_director_v2/modules/rabbitmq.py b/services/director-v2/src/simcore_service_director_v2/modules/rabbitmq.py index ea854eb93d0..86f6e703791 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/rabbitmq.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/rabbitmq.py @@ -1,14 +1,53 @@ import logging +from functools import partial from typing import cast from fastapi import FastAPI -from servicelib.rabbitmq import RabbitMQClient -from servicelib.rabbitmq_utils import wait_till_rabbitmq_responsive +from models_library.rabbitmq_messages import ( + CreditsLimit, + WalletCreditsLimitReachedMessage, +) +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) from settings_library.rabbit import RabbitSettings from ..core.errors import ConfigurationError +from ..core.settings import AppSettings +from .notifier import publish_shutdown_no_more_credits -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + + +async def handler_out_of_credits(app: FastAPI, data: bytes) -> bool: + message = WalletCreditsLimitReachedMessage.model_validate_json(data) + + scheduler: "DynamicSidecarsScheduler" = app.state.dynamic_sidecar_scheduler # type: ignore[name-defined] # noqa: F821 + settings: AppSettings = app.state.settings + + if ( + settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED + ): + _logger.warning( + "Notifying frontend to shutdown service: '%s' for user '%s' because wallet '%s' is out of credits.", + message.node_id, + message.user_id, + message.wallet_id, + ) + await publish_shutdown_no_more_credits( + app, + user_id=message.user_id, + node_id=message.node_id, + wallet_id=message.wallet_id, + ) + else: + await scheduler.mark_all_services_in_wallet_for_removal( + wallet_id=message.wallet_id + ) + + return True def setup(app: FastAPI) -> None: @@ -18,10 +57,27 @@ async def on_startup() -> None: app.state.rabbitmq_client = RabbitMQClient( client_name="director-v2", settings=settings ) + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="director-v2-rpc-client", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="director-v2-rpc-server", settings=settings + ) + + await app.state.rabbitmq_client.subscribe( + WalletCreditsLimitReachedMessage.get_channel_name(), + partial(handler_out_of_credits, app), + exclusive_queue=False, + topics=[f"*.{CreditsLimit.OUT_OF_CREDITS}"], + ) async def on_shutdown() -> None: if app.state.rabbitmq_client: await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_client: + await app.state.rabbitmq_rpc_client.close() + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) @@ -29,7 +85,20 @@ async def on_shutdown() -> None: def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: if not hasattr(app.state, "rabbitmq_client"): - raise ConfigurationError( - "RabbitMQ client is not available. Please check the configuration." - ) + msg = "RabbitMQ client is not available. Please check the configuration." + raise ConfigurationError(msg=msg) return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + if not hasattr(app.state, "rabbitmq_rpc_client"): + msg = ( + "RabbitMQ client for RPC is not available. Please check the configuration." + ) + raise ConfigurationError(msg=msg) + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/redis.py b/services/director-v2/src/simcore_service_director_v2/modules/redis.py new file mode 100644 index 00000000000..5928cc78e97 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/redis.py @@ -0,0 +1,37 @@ +from typing import cast + +from fastapi import FastAPI +from servicelib.redis import RedisClientsManager, RedisManagerDBConfig +from settings_library.redis import RedisDatabase + +from .._meta import APP_NAME +from ..core.settings import AppSettings + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + settings: AppSettings = app.state.settings + + app.state.redis_clients_manager = redis_clients_manager = RedisClientsManager( + databases_configs={ + RedisManagerDBConfig(database=db) + for db in ( + RedisDatabase.LOCKS, + RedisDatabase.DISTRIBUTED_IDENTIFIERS, + ) + }, + settings=settings.REDIS, + client_name=APP_NAME, + ) + await redis_clients_manager.setup() + + async def on_shutdown() -> None: + redis_clients_manager: RedisClientsManager = app.state.redis_clients_manager + await redis_clients_manager.shutdown() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_redis_client_manager(app: FastAPI) -> RedisClientsManager: + return cast(RedisClientsManager, app.state.redis_clients_manager) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/remote_debug.py b/services/director-v2/src/simcore_service_director_v2/modules/remote_debug.py deleted file mode 100644 index 8d6a6caa675..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/modules/remote_debug.py +++ /dev/null @@ -1,32 +0,0 @@ -""" Setup remote debugger with Python Tools for Visual Studio (PTVSD) - -""" -import logging - -from fastapi import FastAPI - -logger = logging.getLogger(__name__) - - -def setup(app: FastAPI): - remote_debug_port = app.state.settings.DIRECTOR_V2_REMOTE_DEBUG_PORT - - def on_startup() -> None: - try: - logger.debug("Enabling attach ptvsd ...") - # - # SEE https://github.com/microsoft/ptvsd#enabling-debugging - # - import ptvsd - - ptvsd.enable_attach( - address=("0.0.0.0", remote_debug_port), # nosec - ) # nosec - except ImportError as err: - raise RuntimeError( - "Cannot enable remote debugging. Please install ptvsd first" - ) from err - - logger.info("Remote debugging enabled: listening port %s", remote_debug_port) - - app.add_event_handler("startup", on_startup) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/resource_usage_tracker_client.py b/services/director-v2/src/simcore_service_director_v2/modules/resource_usage_tracker_client.py new file mode 100644 index 00000000000..550b2eddfef --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/resource_usage_tracker_client.py @@ -0,0 +1,183 @@ +""" Interface to communicate with the resource usage tracker +""" + +import contextlib +import logging +from dataclasses import dataclass +from typing import cast + +import httpx +from fastapi import FastAPI, status +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingPlanGet, + RutPricingUnitGet, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingAndHardwareInfoTuple, + PricingPlanId, + PricingUnitId, +) +from models_library.services import ServiceKey, ServiceVersion +from models_library.wallets import WalletID +from servicelib.fastapi.tracing import setup_httpx_client_tracing + +from ..core.errors import PricingPlanUnitNotFoundError +from ..core.settings import AppSettings + +_logger = logging.getLogger(__name__) + + +@dataclass +class ResourceUsageTrackerClient: + client: httpx.AsyncClient + exit_stack: contextlib.AsyncExitStack + + @classmethod + def create(cls, settings: AppSettings) -> "ResourceUsageTrackerClient": + client = httpx.AsyncClient( + base_url=settings.DIRECTOR_V2_RESOURCE_USAGE_TRACKER.api_base_url, + ) + if settings.DIRECTOR_V2_TRACING: + setup_httpx_client_tracing(client=client) + exit_stack = contextlib.AsyncExitStack() + + return cls(client=client, exit_stack=exit_stack) + + async def start(self): + await self.exit_stack.enter_async_context(self.client) + + async def close(self): + await self.exit_stack.aclose() + + # + # service diagnostics + # + async def ping(self) -> bool: + """Check whether server is reachable""" + try: + await self.client.get("/") + return True + except httpx.RequestError: + return False + + async def is_healhy(self) -> bool: + """Service is reachable and ready""" + try: + response = await self.client.get("/") + response.raise_for_status() + return True + except httpx.HTTPError: + return False + + # + # pricing plans methods + # + + async def get_default_service_pricing_plan( + self, + product_name: ProductName, + service_key: ServiceKey, + service_version: ServiceVersion, + ) -> RutPricingPlanGet: + response = await self.client.get( + f"/services/{service_key}/{service_version}/pricing-plan", + params={ + "product_name": product_name, + }, + ) + if response.status_code == status.HTTP_404_NOT_FOUND: + msg = "No pricing plan defined" + raise PricingPlanUnitNotFoundError(msg=msg) + + response.raise_for_status() + return RutPricingPlanGet.model_validate(response.json()) + + async def get_default_pricing_and_hardware_info( + self, + product_name: ProductName, + service_key: ServiceKey, + service_version: ServiceVersion, + ) -> PricingAndHardwareInfoTuple: + service_pricing_plan_get = await self.get_default_service_pricing_plan( + product_name=product_name, + service_key=service_key, + service_version=service_version, + ) + assert service_pricing_plan_get.pricing_units # nosec + for unit in service_pricing_plan_get.pricing_units: + if unit.default: + return PricingAndHardwareInfoTuple( + service_pricing_plan_get.pricing_plan_id, + unit.pricing_unit_id, + unit.current_cost_per_unit_id, + unit.specific_info.aws_ec2_instances, + ) + msg = "Default pricing plan and unit does not exist" + raise PricingPlanUnitNotFoundError(msg=msg) + + async def get_pricing_unit( + self, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, + ) -> RutPricingUnitGet: + response = await self.client.get( + f"/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}", + params={ + "product_name": product_name, + }, + ) + response.raise_for_status() + return RutPricingUnitGet.model_validate(response.json()) + + async def get_wallet_credits( + self, + product_name: ProductName, + wallet_id: WalletID, + ) -> WalletTotalCredits: + response = await self.client.post( + "/credit-transactions/credits:sum", + params={"product_name": product_name, "wallet_id": wallet_id}, + ) + response.raise_for_status() + return WalletTotalCredits.model_validate(response.json()) + + # + # app + # + + @classmethod + def get_from_state(cls, app: FastAPI) -> "ResourceUsageTrackerClient": + return cast("ResourceUsageTrackerClient", app.state.resource_usage_api) + + @classmethod + def setup(cls, app: FastAPI): + assert app.state # nosec + if exists := getattr(app.state, "resource_usage_api", None): + _logger.warning( + "Skipping setup. Cannot setup more than once %s: %s", cls, exists + ) + return + + assert not hasattr(app.state, "resource_usage_api") # nosec + app_settings: AppSettings = app.state.settings + + app.state.resource_usage_api = api = cls.create(app_settings) + + async def on_startup(): + await api.start() + + async def on_shutdown(): + await api.close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def setup(app: FastAPI): + assert app.state # nosec + ResourceUsageTrackerClient.setup(app) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/socketio.py b/services/director-v2/src/simcore_service_director_v2/modules/socketio.py new file mode 100644 index 00000000000..5a6a561e973 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/modules/socketio.py @@ -0,0 +1,33 @@ +import logging + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from servicelib.socketio_utils import cleanup_socketio_async_pubsub_manager + +from ..core.settings import AppSettings + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI): + settings: AppSettings = app.state.settings + + async def _on_startup() -> None: + assert app.state.rabbitmq_client # nosec + + # Connect to the as an external process in write-only mode + # SEE https://python-socketio.readthedocs.io/en/stable/server.html#emitting-from-external-processes + app.state.external_socketio = socketio.AsyncAioPikaManager( + url=settings.DIRECTOR_V2_RABBITMQ.dsn, + logger=_logger, + write_only=True, + ) + + async def _on_shutdown() -> None: + if external_socketio := getattr(app.state, "external_socketio"): # noqa: B009 + await cleanup_socketio_async_pubsub_manager( + server_manager=external_socketio + ) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/director-v2/src/simcore_service_director_v2/modules/storage.py b/services/director-v2/src/simcore_service_director_v2/modules/storage.py index addad597151..08e18de0aeb 100644 --- a/services/director-v2/src/simcore_service_director_v2/modules/storage.py +++ b/services/director-v2/src/simcore_service_director_v2/modules/storage.py @@ -8,34 +8,44 @@ import httpx from fastapi import FastAPI, HTTPException from models_library.users import UserID +from servicelib.fastapi.tracing import setup_httpx_client_tracing +from servicelib.logging_utils import log_decorator from settings_library.s3 import S3Settings +from settings_library.storage import StorageSettings +from settings_library.tracing import TracingSettings # Module's business logic --------------------------------------------- from starlette import status -from ..core.settings import StorageSettings from ..utils.client_decorators import handle_errors, handle_retry from ..utils.clients import unenvelope_or_raise_error -from ..utils.logging_utils import log_decorator logger = logging.getLogger(__name__) # Module's setup logic --------------------------------------------- -def setup(app: FastAPI, settings: StorageSettings): - if not settings: - settings = StorageSettings() +def setup( + app: FastAPI, + storage_settings: StorageSettings | None, + tracing_settings: TracingSettings | None, +): + + if not storage_settings: + storage_settings = StorageSettings() def on_startup() -> None: + client = httpx.AsyncClient( + base_url=f"{storage_settings.api_base_url}", + timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, + ) + if tracing_settings: + setup_httpx_client_tracing(client=client) StorageClient.create( app, - client=httpx.AsyncClient( - base_url=f"{settings.endpoint}", - timeout=app.state.settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT, - ), + client=client, ) - logger.debug("created client for storage: %s", settings.endpoint) + logger.debug("created client for storage: %s", storage_settings.api_base_url) async def on_shutdown() -> None: client = StorageClient.instance(app).client @@ -57,7 +67,8 @@ def create(cls, app: FastAPI, **kwargs): @classmethod def instance(cls, app: FastAPI) -> "StorageClient": - return app.state.storage_client + client: StorageClient = app.state.storage_client + return client @handle_errors("Storage", logger) @handle_retry(logger) @@ -71,5 +82,5 @@ async def get_s3_access(self, user_id: UserID) -> S3Settings: ) resp.raise_for_status() if resp.status_code == status.HTTP_200_OK: - return S3Settings.parse_obj(unenvelope_or_raise_error(resp)) + return S3Settings.model_validate(unenvelope_or_raise_error(resp)) raise HTTPException(status_code=resp.status_code, detail=resp.content) diff --git a/services/director-v2/src/simcore_service_director_v2/utils/base_distributed_identifier.py b/services/director-v2/src/simcore_service_director_v2/utils/base_distributed_identifier.py new file mode 100644 index 00000000000..ea685777a0d --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/utils/base_distributed_identifier.py @@ -0,0 +1,286 @@ +import logging +from abc import ABC, abstractmethod +from asyncio import Task +from datetime import timedelta +from typing import Final, Generic, TypeVar + +from pydantic import NonNegativeInt +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.logging_utils import log_catch, log_context +from servicelib.redis import RedisClientSDK +from servicelib.utils import logged_gather +from settings_library.redis import RedisDatabase + +_logger = logging.getLogger(__name__) + +_REDIS_MAX_CONCURRENCY: Final[NonNegativeInt] = 10 +_DEFAULT_CLEANUP_INTERVAL: Final[timedelta] = timedelta(minutes=1) + +Identifier = TypeVar("Identifier") +ResourceObject = TypeVar("ResourceObject") +CleanupContext = TypeVar("CleanupContext") + + +class BaseDistributedIdentifierManager( + ABC, Generic[Identifier, ResourceObject, CleanupContext] +): + """Used to implement managers for resources that require book keeping + in a distributed system. + + NOTE: that ``Identifier`` and ``CleanupContext`` are serialized and deserialized + to and from Redis. + + Generics: + Identifier -- a user defined object: used to uniquely identify the resource + ResourceObject -- a user defined object: referring to an existing resource + CleanupContext -- a user defined object: contains all necessary + arguments used for removal and cleanup. + """ + + def __init__( + self, + redis_client_sdk: RedisClientSDK, + *, + cleanup_interval: timedelta = _DEFAULT_CLEANUP_INTERVAL, + ) -> None: + """ + Arguments: + redis_client_sdk -- client connecting to Redis + + Keyword Arguments: + cleanup_interval -- interval at which cleanup for unused + resources runs (default: {_DEFAULT_CLEANUP_INTERVAL}) + """ + + if not redis_client_sdk.redis_dsn.endswith( + f"{RedisDatabase.DISTRIBUTED_IDENTIFIERS}" + ): + msg = ( + f"Redis endpoint {redis_client_sdk.redis_dsn} contains the wrong database." + f"Expected {RedisDatabase.DISTRIBUTED_IDENTIFIERS}" + ) + raise TypeError(msg) + + self._redis_client_sdk = redis_client_sdk + self.cleanup_interval = cleanup_interval + + self._cleanup_task: Task | None = None + + async def setup(self) -> None: + self._cleanup_task = create_periodic_task( + self._cleanup_unused_identifiers, + interval=self.cleanup_interval, + task_name="cleanup_unused_identifiers_task", + ) + + async def shutdown(self) -> None: + if self._cleanup_task: + await cancel_wait_task(self._cleanup_task, max_delay=5) + + @classmethod + def class_path(cls) -> str: + return f"{cls.__module__}.{cls.__name__}" + + @classmethod + def _redis_key_prefix(cls) -> str: + return f"{cls.class_path()}:" + + @classmethod + def _to_redis_key(cls, identifier: Identifier) -> str: + return f"{cls._redis_key_prefix()}{cls._serialize_identifier(identifier)}" + + @classmethod + def _from_redis_key(cls, redis_key: str) -> Identifier: + sad = redis_key.removeprefix(cls._redis_key_prefix()) + return cls._deserialize_identifier(sad) + + async def _get_identifier_context( + self, identifier: Identifier + ) -> CleanupContext | None: + raw: str | None = await self._redis_client_sdk.redis.get( + self._to_redis_key(identifier) + ) + return self._deserialize_cleanup_context(raw) if raw else None + + async def _get_tracked(self) -> dict[Identifier, CleanupContext]: + identifiers: list[Identifier] = [ + self._from_redis_key(redis_key) + for redis_key in await self._redis_client_sdk.redis.keys( + f"{self._redis_key_prefix()}*" + ) + ] + + cleanup_contexts: list[CleanupContext | None] = await logged_gather( + *(self._get_identifier_context(identifier) for identifier in identifiers), + max_concurrency=_REDIS_MAX_CONCURRENCY, + ) + + return { + identifier: cleanup_context + for identifier, cleanup_context in zip( + identifiers, cleanup_contexts, strict=True + ) + # NOTE: cleanup_context will be None if the key was removed before + # recovering all the cleanup_contexts + if cleanup_context is not None + } + + async def _cleanup_unused_identifiers(self) -> None: + # removes no longer used identifiers + tracked_data: dict[Identifier, CleanupContext] = await self._get_tracked() + _logger.info("Will remove unused %s", list(tracked_data.keys())) + + for identifier, cleanup_context in tracked_data.items(): + if await self.is_used(identifier, cleanup_context): + continue + + await self.remove(identifier) + + async def create( + self, *, cleanup_context: CleanupContext, **extra_kwargs + ) -> tuple[Identifier, ResourceObject]: + """Used for creating the resources + + Arguments: + cleanup_context -- user defined CleanupContext object + **extra_kwargs -- can be overloaded by the user + + Returns: + tuple[identifier for the resource, resource object] + """ + identifier, result = await self._create(**extra_kwargs) + await self._redis_client_sdk.redis.set( + self._to_redis_key(identifier), + self._serialize_cleanup_context(cleanup_context), + ) + return identifier, result + + async def remove(self, identifier: Identifier, *, reraise: bool = False) -> None: + """Attempts to remove the resource, if an error occurs it is logged. + + Arguments: + identifier -- user chosen identifier for the resource + reraise -- when True raises any exception raised by ``destroy`` (default: {False}) + """ + + cleanup_context = await self._get_identifier_context(identifier) + if cleanup_context is None: + _logger.warning( + "Something went wrong, did not find any context for %s", identifier + ) + return + + with ( + log_context( + _logger, logging.DEBUG, f"{self.__class__}: removing {identifier}" + ), + log_catch(_logger, reraise=reraise), + ): + await self._destroy(identifier, cleanup_context) + + await self._redis_client_sdk.redis.delete(self._to_redis_key(identifier)) + + @classmethod + @abstractmethod + def _deserialize_identifier(cls, raw: str) -> Identifier: + """User provided deserialization for the identifier + + Arguments: + raw -- stream to be deserialized + + Returns: + an identifier object + """ + + @classmethod + @abstractmethod + def _serialize_identifier(cls, identifier: Identifier) -> str: + """User provided serialization for the identifier + + Arguments: + cleanup_context -- user defined identifier object + + Returns: + object encoded as string + """ + + @classmethod + @abstractmethod + def _deserialize_cleanup_context(cls, raw: str) -> CleanupContext: + """User provided deserialization for the context + + Arguments: + raw -- stream to be deserialized + + Returns: + an object of the type chosen by the user + """ + + @classmethod + @abstractmethod + def _serialize_cleanup_context(cls, cleanup_context: CleanupContext) -> str: + """User provided serialization for the context + + Arguments: + cleanup_context -- user defined cleanup context object + + Returns: + object encoded as string + """ + + @abstractmethod + async def is_used( + self, identifier: Identifier, cleanup_context: CleanupContext + ) -> bool: + """Check if the resource associated to the ``identifier`` is + still being used. + # NOTE: a resource can be created but not in use. + + Arguments: + identifier -- user chosen identifier for the resource + cleanup_context -- user defined CleanupContext object + + Returns: + True if ``identifier`` is still being used + """ + + @abstractmethod + async def _create(self, **extra_kwargs) -> tuple[Identifier, ResourceObject]: + """Used INTERNALLY for creating the resources. + # NOTE: should not be used directly, use the public + version ``create`` instead. + + Arguments: + **extra_kwargs -- can be overloaded by the user + + Returns: + tuple[identifier for the resource, resource object] + """ + + @abstractmethod + async def get( + self, identifier: Identifier, **extra_kwargs + ) -> ResourceObject | None: + """If exists, returns the resource. + + Arguments: + identifier -- user chosen identifier for the resource + **extra_kwargs -- can be overloaded by the user + + Returns: + None if the resource does not exit + """ + + @abstractmethod + async def _destroy( + self, identifier: Identifier, cleanup_context: CleanupContext + ) -> None: + """Used to destroy an existing resource + # NOTE: should not be used directly, use the public + version ``remove`` instead. + + Arguments: + identifier -- user chosen identifier for the resource + cleanup_context -- user defined CleanupContext object + """ diff --git a/services/director-v2/src/simcore_service_director_v2/utils/clients.py b/services/director-v2/src/simcore_service_director_v2/utils/clients.py index d01d38a1907..e12cf2d09f0 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/clients.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/clients.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Union +from typing import Any import httpx from fastapi import HTTPException @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -def unenvelope_or_raise_error(resp: httpx.Response) -> Union[list[Any], dict[str, Any]]: +def unenvelope_or_raise_error(resp: httpx.Response) -> list[Any] | dict[str, Any]: """ Director responses are enveloped If successful response, we un-envelop it and return data as a dict diff --git a/services/director-v2/src/simcore_service_director_v2/utils/computations.py b/services/director-v2/src/simcore_service_director_v2/utils/computations.py index 8167097c87f..bd04303dc02 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/computations.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/computations.py @@ -1,25 +1,37 @@ +import datetime as dt import logging -import re -from datetime import datetime from typing import Any +import arrow from models_library.projects_state import RunningState -from models_library.services import SERVICE_KEY_RE, ServiceKeyVersion +from models_library.services import ServiceKeyVersion +from models_library.services_regex import SERVICE_KEY_RE from models_library.users import UserID -from pydantic import parse_obj_as from servicelib.utils import logged_gather -from ..models.domains.comp_tasks import CompTaskAtDB +from ..models.comp_tasks import CompTaskAtDB from ..modules.catalog import CatalogClient from ..modules.db.tables import NodeClass -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) _COMPLETED_STATES = (RunningState.ABORTED, RunningState.FAILED, RunningState.SUCCESS) -_RUNNING_STATES = (RunningState.STARTED, RunningState.RETRY) +_RUNNING_STATES = (RunningState.STARTED,) _TASK_TO_PIPELINE_CONVERSIONS = { # tasks are initially in NOT_STARTED state, then they transition to published (RunningState.PUBLISHED, RunningState.NOT_STARTED): RunningState.PUBLISHED, + # if there are tasks waiting for clusters, then the pipeline is also waiting for a cluster + ( + RunningState.PUBLISHED, + RunningState.NOT_STARTED, + RunningState.WAITING_FOR_CLUSTER, + ): RunningState.WAITING_FOR_CLUSTER, + # if there are tasks waiting for resources and nothing is running/pending, then the pipeline is also waiting for resources + ( + RunningState.PUBLISHED, + RunningState.NOT_STARTED, + RunningState.WAITING_FOR_RESOURCES, + ): RunningState.WAITING_FOR_RESOURCES, # if there are PENDING states that means the pipeline was published and is awaiting sidecars ( RunningState.PENDING, @@ -39,7 +51,7 @@ ): RunningState.NOT_STARTED, # if there are only completed states with FAILED --> FAILED (*_COMPLETED_STATES,): RunningState.FAILED, - # if there are only completed states with FAILED --> NOT_STARTED + # if there are only completed states with FAILED and not started ones --> NOT_STARTED ( *_COMPLETED_STATES, RunningState.NOT_STARTED, @@ -52,12 +64,12 @@ RunningState.PUBLISHED, RunningState.PENDING, RunningState.NOT_STARTED, + RunningState.WAITING_FOR_CLUSTER, ): RunningState.STARTED, } def get_pipeline_state_from_task_states(tasks: list[CompTaskAtDB]) -> RunningState: - # compute pipeline state from task states if not tasks: return RunningState.UNKNOWN @@ -65,8 +77,7 @@ def get_pipeline_state_from_task_states(tasks: list[CompTaskAtDB]) -> RunningSta set_states: set[RunningState] = {task.state for task in tasks} if len(set_states) == 1: # there is only one state, so it's the one - the_state = next(iter(set_states)) - return the_state + return next(iter(set_states)) for option, result in _TASK_TO_PIPELINE_CONVERSIONS.items(): if set_states.issubset(option): @@ -75,7 +86,6 @@ def get_pipeline_state_from_task_states(tasks: list[CompTaskAtDB]) -> RunningSta return RunningState.UNKNOWN -_node_key_re = re.compile(SERVICE_KEY_RE) _STR_TO_NODECLASS = { "comp": NodeClass.COMPUTATIONAL, "dynamic": NodeClass.INTERACTIVE, @@ -84,16 +94,17 @@ def get_pipeline_state_from_task_states(tasks: list[CompTaskAtDB]) -> RunningSta def to_node_class(service_key: str) -> NodeClass: - match = _node_key_re.match(service_key) + match = SERVICE_KEY_RE.match(service_key) if match: - node_class = _STR_TO_NODECLASS.get(match.group(3)) + node_class = _STR_TO_NODECLASS.get(match.group("type")) if node_class: return node_class raise ValueError def is_pipeline_running(pipeline_state: RunningState) -> bool: - return pipeline_state.is_running() + is_running: bool = pipeline_state.is_running() + return is_running def is_pipeline_stopped(pipeline_state: RunningState) -> bool: @@ -106,8 +117,7 @@ async def find_deprecated_tasks( task_key_versions: list[ServiceKeyVersion], catalog_client: CatalogClient, ) -> list[ServiceKeyVersion]: - - task_services = await logged_gather( + services_details = await logged_gather( *( catalog_client.get_service( user_id=user_id, @@ -115,21 +125,28 @@ async def find_deprecated_tasks( service_version=key_version.version, product_name=product_name, ) - for key_version in task_key_versions + for key_version in set(task_key_versions) ) ) - today = datetime.utcnow() + service_key_version_to_details = { + ServiceKeyVersion.model_construct( + key=details["key"], version=details["version"] + ): details + for details in services_details + } + today = dt.datetime.now(tz=dt.UTC) def _is_service_deprecated(service: dict[str, Any]) -> bool: if deprecation_date := service.get("deprecated"): - deprecation_date = parse_obj_as(datetime, deprecation_date) - return today > deprecation_date + deprecation_date = arrow.get(deprecation_date).datetime.replace( + tzinfo=dt.UTC + ) + is_deprecated: bool = today > deprecation_date + return is_deprecated return False - deprecated_tasks = [ + return [ task - for task, service in zip(task_key_versions, task_services) - if _is_service_deprecated(service) + for task in task_key_versions + if _is_service_deprecated(service_key_version_to_details[task]) ] - - return deprecated_tasks diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dags.py b/services/director-v2/src/simcore_service_director_v2/utils/dags.py index a954d6545c1..a1ae4762278 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dags.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dags.py @@ -1,32 +1,34 @@ +import contextlib +import datetime import logging from copy import deepcopy -from typing import Any +from typing import Any, cast +import arrow import networkx as nx -from models_library.projects import Workbench -from models_library.projects_nodes import NodeID, NodeState -from models_library.projects_nodes_io import PortLink +from models_library.projects import NodesDict +from models_library.projects_nodes import NodeState +from models_library.projects_nodes_io import NodeID, NodeIDStr, PortLink from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState from models_library.utils.nodes import compute_node_hash -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB -from .computations import NodeClass, to_node_class +from ..models.comp_tasks import CompTaskAtDB +from ..modules.db.tables import NodeClass +from .computations import to_node_class -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -def _is_node_computational(node_key: str) -> bool: - try: - return to_node_class(node_key) == NodeClass.COMPUTATIONAL - except ValueError: - return False +kNODE_MODIFIED_STATE = "modified_state" +kNODE_DEPENDENCIES_TO_COMPUTE = "dependencies_state" -def create_complete_dag(workbench: Workbench) -> nx.DiGraph: +def create_complete_dag(workbench: NodesDict) -> nx.DiGraph: """creates a complete graph out of the project workbench""" - dag_graph = nx.DiGraph() + dag_graph: nx.DiGraph = nx.DiGraph() for node_id, node in workbench.items(): + assert node.state # nosec dag_graph.add_node( node_id, name=node.label, @@ -36,20 +38,22 @@ def create_complete_dag(workbench: Workbench) -> nx.DiGraph: run_hash=node.run_hash, outputs=node.outputs, state=node.state.current_status, + node_class=to_node_class(node.key), ) - for input_node_id in node.input_nodes: - predecessor_node = workbench.get(str(input_node_id)) - if predecessor_node: - dag_graph.add_edge(str(input_node_id), node_id) + if node.input_nodes: + for input_node_id in node.input_nodes: + predecessor_node = workbench.get(NodeIDStr(input_node_id)) + if predecessor_node: + dag_graph.add_edge(str(input_node_id), node_id) return dag_graph def create_complete_dag_from_tasks(tasks: list[CompTaskAtDB]) -> nx.DiGraph: - dag_graph = nx.DiGraph() + dag_graph: nx.DiGraph = nx.DiGraph() for task in tasks: dag_graph.add_node( - str(task.node_id), + f"{task.node_id}", name=task.job_id, key=task.image.name, version=task.image.tag, @@ -57,17 +61,20 @@ def create_complete_dag_from_tasks(tasks: list[CompTaskAtDB]) -> nx.DiGraph: run_hash=task.run_hash, outputs=task.outputs, state=task.state, + node_class=task.node_class, + progress=task.progress, ) - for input_data in task.inputs.values(): - if isinstance(input_data, PortLink): - dag_graph.add_edge(str(input_data.node_uuid), str(task.node_id)) + if task.inputs: + for input_data in task.inputs.values(): + if isinstance(input_data, PortLink): + dag_graph.add_edge(str(input_data.node_uuid), f"{task.node_id}") return dag_graph -async def compute_node_modified_state( - nodes_data_view: nx.classes.reportviews.NodeDataView, node_id: NodeID +async def _compute_node_modified_state( + graph_data: nx.classes.reportviews.NodeDataView, node_id: NodeID ) -> bool: - node = nodes_data_view[str(node_id)] + node = graph_data[f"{node_id}"] # if the node state is in the modified state already if node["state"] in [ None, @@ -84,7 +91,8 @@ async def compute_node_modified_state( # maybe our inputs changed? let's compute the node hash and compare with the saved one async def get_node_io_payload_cb(node_id: NodeID) -> dict[str, Any]: - return nodes_data_view[str(node_id)] + result: dict[str, Any] = graph_data[f"{node_id}"] + return result computed_hash = await compute_node_hash(node_id, get_node_io_payload_cb) if computed_hash != node["run_hash"]: @@ -92,58 +100,53 @@ async def get_node_io_payload_cb(node_id: NodeID) -> dict[str, Any]: return False -async def compute_node_dependencies_state(nodes_data_view, node_id) -> set[NodeID]: - node = nodes_data_view[str(node_id)] +async def _compute_node_dependencies_state(graph_data, node_id) -> set[NodeID]: + node = graph_data[f"{node_id}"] # check if the previous node is outdated or waits for dependencies... in which case this one has to wait non_computed_dependencies: set[NodeID] = set() for input_port in node.get("inputs", {}).values(): if isinstance(input_port, PortLink): - if node_needs_computation(nodes_data_view, input_port.node_uuid): + if _node_needs_computation(graph_data, input_port.node_uuid): non_computed_dependencies.add(input_port.node_uuid) # all good. ready return non_computed_dependencies -kNODE_MODIFIED_STATE = "modified_state" -kNODE_DEPENDENCIES_TO_COMPUTE = "dependencies_state" - - -async def compute_node_states( - nodes_data_view: nx.classes.reportviews.NodeDataView, node_id: NodeID -): - node = nodes_data_view[str(node_id)] - node[kNODE_MODIFIED_STATE] = await compute_node_modified_state( - nodes_data_view, node_id - ) - node[kNODE_DEPENDENCIES_TO_COMPUTE] = await compute_node_dependencies_state( - nodes_data_view, node_id +async def _compute_node_states( + graph_data: nx.classes.reportviews.NodeDataView, node_id: NodeID +) -> None: + node = graph_data[f"{node_id}"] + node[kNODE_MODIFIED_STATE] = await _compute_node_modified_state(graph_data, node_id) + node[kNODE_DEPENDENCIES_TO_COMPUTE] = await _compute_node_dependencies_state( + graph_data, node_id ) -def node_needs_computation( - nodes_data_view: nx.classes.reportviews.NodeDataView, node_id: NodeID +def _node_needs_computation( + graph_data: nx.classes.reportviews.NodeDataView, node_id: NodeID ) -> bool: - node = nodes_data_view[str(node_id)] - return node.get(kNODE_MODIFIED_STATE, False) or node.get( + node = graph_data[f"{node_id}"] + needs_computation: bool = node.get(kNODE_MODIFIED_STATE, False) or node.get( kNODE_DEPENDENCIES_TO_COMPUTE, None ) + return needs_computation async def _set_computational_nodes_states(complete_dag: nx.DiGraph) -> None: - nodes_data_view: nx.classes.reportviews.NodeDataView = complete_dag.nodes.data() - for node in nx.topological_sort(complete_dag): - if _is_node_computational(nodes_data_view[node].get("key", "")): - await compute_node_states(nodes_data_view, node) + graph_data: nx.classes.reportviews.NodeDataView = complete_dag.nodes.data() + for node_id in nx.algorithms.dag.topological_sort(complete_dag): + if graph_data[node_id]["node_class"] is NodeClass.COMPUTATIONAL: + await _compute_node_states(graph_data, node_id) async def create_minimal_computational_graph_based_on_selection( complete_dag: nx.DiGraph, selected_nodes: list[NodeID], force_restart: bool ) -> nx.DiGraph: - nodes_data_view: nx.classes.reportviews.NodeDataView = complete_dag.nodes.data() + graph_data: nx.classes.reportviews.NodeDataView = complete_dag.nodes.data() try: # first pass, traversing in topological order to correctly get the dependencies, set the nodes states await _set_computational_nodes_states(complete_dag) - except nx.NetworkXUnfeasible: + except nx.exception.NetworkXUnfeasible: # not acyclic, return an empty graph return nx.DiGraph() @@ -154,9 +157,9 @@ async def create_minimal_computational_graph_based_on_selection( minimal_nodes_selection.update( { n - for n, _ in nodes_data_view - if _is_node_computational(nodes_data_view[n]["key"]) - and (force_restart or node_needs_computation(nodes_data_view, n)) + for n, _ in graph_data + if graph_data[n]["node_class"] is NodeClass.COMPUTATIONAL + and (force_restart or _node_needs_computation(graph_data, n)) } ) else: @@ -166,41 +169,92 @@ async def create_minimal_computational_graph_based_on_selection( { n for n in nx.bfs_tree(complete_dag, f"{node}", reverse=True) - if _is_node_computational(nodes_data_view[n]["key"]) - and node_needs_computation(nodes_data_view, n) + if graph_data[n]["node_class"] is NodeClass.COMPUTATIONAL + and _node_needs_computation(graph_data, n) } ) - if force_restart and _is_node_computational( - nodes_data_view[f"{node}"]["key"] + if ( + force_restart + and graph_data[f"{node}"]["node_class"] is NodeClass.COMPUTATIONAL ): minimal_nodes_selection.add(f"{node}") - return complete_dag.subgraph(minimal_nodes_selection) + return cast(nx.DiGraph, complete_dag.subgraph(minimal_nodes_selection)) + + +def compute_pipeline_started_timestamp( + pipeline_dag: nx.DiGraph, comp_tasks: list[CompTaskAtDB] +) -> datetime.datetime | None: + if not pipeline_dag.nodes: + return None + node_id_to_comp_task: dict[NodeIDStr, CompTaskAtDB] = { + NodeIDStr(f"{task.node_id}"): task for task in comp_tasks + } + TOMORROW = arrow.utcnow().shift(days=1).datetime + pipeline_started_at: datetime.datetime | None = min( + node_id_to_comp_task[node_id].start or TOMORROW + for node_id in pipeline_dag.nodes + ) + if pipeline_started_at == TOMORROW: + pipeline_started_at = None + return pipeline_started_at + + +def compute_pipeline_stopped_timestamp( + pipeline_dag: nx.DiGraph, comp_tasks: list[CompTaskAtDB] +) -> datetime.datetime | None: + if not pipeline_dag.nodes: + return None + node_id_to_comp_task: dict[NodeIDStr, CompTaskAtDB] = { + NodeIDStr(f"{task.node_id}"): task for task in comp_tasks + } + TOMORROW = arrow.utcnow().shift(days=1).datetime + pipeline_stopped_at: datetime.datetime | None = max( + node_id_to_comp_task[node_id].end or TOMORROW for node_id in pipeline_dag.nodes + ) + if pipeline_stopped_at == TOMORROW: + pipeline_stopped_at = None + return pipeline_stopped_at async def compute_pipeline_details( complete_dag: nx.DiGraph, pipeline_dag: nx.DiGraph, comp_tasks: list[CompTaskAtDB] ) -> PipelineDetails: - try: - # FIXME: this problem of cyclic graphs for control loops create all kinds of issues that must be fixed + with contextlib.suppress(nx.exception.NetworkXUnfeasible): + # NOTE: this problem of cyclic graphs for control loops create all kinds of issues that must be fixed # first pass, traversing in topological order to correctly get the dependencies, set the nodes states await _set_computational_nodes_states(complete_dag) - except nx.NetworkXUnfeasible: - # not acyclic - pass + + # NOTE: the latest progress is available in comp_tasks only + node_id_to_comp_task: dict[NodeIDStr, CompTaskAtDB] = { + NodeIDStr(f"{task.node_id}"): task for task in comp_tasks + } + pipeline_progress = None + if len(pipeline_dag.nodes) > 0: + + pipeline_progress = sum( + (node_id_to_comp_task[node_id].progress or 0) / len(pipeline_dag.nodes) + for node_id in pipeline_dag.nodes + if node_id_to_comp_task[node_id].progress is not None + ) + pipeline_progress = max(0.0, min(pipeline_progress, 1.0)) + return PipelineDetails( - adjacency_list=nx.to_dict_of_lists(pipeline_dag), + adjacency_list=nx.convert.to_dict_of_lists(pipeline_dag), + progress=pipeline_progress, node_states={ node_id: NodeState( modified=node_data.get(kNODE_MODIFIED_STATE, False), dependencies=node_data.get(kNODE_DEPENDENCIES_TO_COMPUTE, set()), - currentStatus=next( - (task.state for task in comp_tasks if str(task.node_id) == node_id), - RunningState.UNKNOWN, + current_status=node_id_to_comp_task[node_id].state, + progress=( + node_id_to_comp_task[node_id].progress + if node_id_to_comp_task[node_id].progress is not None + else None ), ) for node_id, node_data in complete_dag.nodes.data() - if _is_node_computational(node_data.get("key", "")) + if node_data["node_class"] is NodeClass.COMPUTATIONAL }, ) @@ -208,8 +262,11 @@ async def compute_pipeline_details( def find_computational_node_cycles(dag: nx.DiGraph) -> list[list[str]]: """returns a list of nodes part of a cycle and computational, which is currently forbidden.""" computational_node_cycles = [] - list_potential_cycles = nx.simple_cycles(dag) + list_potential_cycles = nx.algorithms.cycles.simple_cycles(dag) for cycle in list_potential_cycles: - if any(_is_node_computational(dag.nodes[node_id]["key"]) for node_id in cycle): + if any( + dag.nodes[node_id]["node_class"] is NodeClass.COMPUTATIONAL + for node_id in cycle + ): computational_node_cycles.append(deepcopy(cycle)) return computational_node_cycles diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask.py b/services/director-v2/src/simcore_service_director_v2/utils/dask.py index 8a1816a7a97..7ffbcd6e9f8 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dask.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dask.py @@ -1,34 +1,46 @@ -import asyncio import collections import logging -from typing import Any, Awaitable, Callable, Final, Iterable, Optional, Union, get_args -from uuid import uuid4 +from collections.abc import Coroutine, Generator +from typing import Any, ParamSpec, TypeVar, cast import distributed -from aiopg.sa.engine import Engine +from common_library.json_serialization import json_dumps from dask_task_models_library.container_tasks.io import ( FileUrl, - PortValue, TaskInputData, TaskOutputData, TaskOutputDataSchema, ) +from dask_task_models_library.container_tasks.protocol import ( + ContainerEnvsDict, + ContainerLabelsDict, + TaskOwner, +) +from dask_task_models_library.container_tasks.utils import parse_dask_job_id from fastapi import FastAPI -from models_library.clusters import ClusterID +from models_library.api_schemas_directorv2.computations import TaskLogFileGet +from models_library.api_schemas_directorv2.services import NodeRequirements +from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels from models_library.errors import ErrorDict -from models_library.projects import ProjectID -from models_library.projects_nodes_io import NodeID +from models_library.projects import ProjectID, ProjectIDStr +from models_library.projects_nodes_io import NodeID, NodeIDStr +from models_library.services import ServiceKey, ServiceVersion +from models_library.services_types import ServiceRunID from models_library.users import UserID -from pydantic import AnyUrl, ByteSize, ValidationError -from servicelib.json_serialization import json_dumps +from models_library.wallets import WalletID +from pydantic import AnyUrl, ByteSize, TypeAdapter, ValidationError from simcore_sdk import node_ports_v2 from simcore_sdk.node_ports_common.exceptions import ( + NodeportsException, S3InvalidPathError, StorageInvalidCall, + UnboundPortError, ) from simcore_sdk.node_ports_v2 import FileLinkType, Port, links, port_utils from simcore_sdk.node_ports_v2.links import ItemValue as _NPItemValue +from sqlalchemy.ext.asyncio import AsyncEngine +from ..constants import UNDEFINED_API_BASE_URL, UNDEFINED_DOCKER_LABEL from ..core.errors import ( ComputationalBackendNotConnectedError, ComputationalSchedulerChangedError, @@ -36,19 +48,17 @@ MissingComputationalResourcesError, PortsValidationError, ) -from ..models.domains.comp_tasks import Image -from ..models.schemas.services import NodeRequirements - -logger = logging.getLogger(__name__) +from ..models.comp_runs import ProjectMetadataDict, RunMetadataDict +from ..models.comp_tasks import Image +from ..modules.osparc_variables.substitutions import ( + resolve_and_substitute_session_variables_in_specs, + substitute_vendor_secrets_in_specs, +) -ServiceKeyStr = str -ServiceVersionStr = str +_logger = logging.getLogger(__name__) -_PVType = Optional[_NPItemValue] -assert len(get_args(_PVType)) == len( # nosec - get_args(PortValue) -), "Types returned by port.get_value() -> _PVType MUST map one-to-one to PortValue. See compute_input_data" +_PVType = _NPItemValue | None def _get_port_validation_errors(port_key: str, err: ValidationError) -> list[ErrorDict]: @@ -56,40 +66,14 @@ def _get_port_validation_errors(port_key: str, err: ValidationError) -> list[Err for error in errors: assert error["loc"][-1] != (port_key,) error["loc"] = error["loc"] + (port_key,) - return errors + return list(errors) -def generate_dask_job_id( - service_key: ServiceKeyStr, - service_version: ServiceVersionStr, +async def create_node_ports( + db_engine: AsyncEngine, user_id: UserID, project_id: ProjectID, node_id: NodeID, -) -> str: - """creates a dask job id: - The job ID shall contain the user_id, project_id, node_id - Also, it must be unique - and it is shown in the Dask scheduler dashboard website - """ - return f"{service_key}:{service_version}:userid_{user_id}:projectid_{project_id}:nodeid_{node_id}:uuid_{uuid4()}" - - -def parse_dask_job_id( - job_id: str, -) -> tuple[ServiceKeyStr, ServiceVersionStr, UserID, ProjectID, NodeID]: - parts = job_id.split(":") - assert len(parts) == 6 # nosec - return ( - parts[0], - parts[1], - UserID(parts[2][len("userid_") :]), - ProjectID(parts[3][len("projectid_") :]), - NodeID(parts[4][len("nodeid_") :]), - ) - - -async def create_node_ports( - db_engine: Engine, user_id: UserID, project_id: ProjectID, node_id: NodeID ) -> node_ports_v2.Nodeports: """ This function create a nodeports object by fetching the node state from the database @@ -107,19 +91,21 @@ async def create_node_ports( db_manager = node_ports_v2.DBManager(db_engine) return await node_ports_v2.ports( user_id=user_id, - project_id=f"{project_id}", - node_uuid=f"{node_id}", + project_id=ProjectIDStr(f"{project_id}"), + node_uuid=TypeAdapter(NodeIDStr).validate_python(f"{node_id}"), db_manager=db_manager, ) except ValidationError as err: - raise PortsValidationError(project_id, node_id, err.errors()) from err + raise PortsValidationError( + project_id=project_id, node_id=node_id, errors_list=list(err.errors()) + ) from err async def parse_output_data( - db_engine: Engine, + db_engine: AsyncEngine, job_id: str, data: TaskOutputData, - ports: Optional[node_ports_v2.Nodeports] = None, + ports: node_ports_v2.Nodeports | None = None, ) -> None: """ @@ -132,7 +118,7 @@ async def parse_output_data( project_id, node_id, ) = parse_dask_job_id(job_id) - logger.debug( + _logger.debug( "parsing output %s of dask task for %s:%s of user %s on project '%s' and node '%s'", json_dumps(data, indent=2), service_key, @@ -152,7 +138,7 @@ async def parse_output_data( ports_errors = [] for port_key, port_value in data.items(): - value_to_transfer: Optional[links.ItemValue] = None + value_to_transfer: links.ItemValue | None = None if isinstance(port_value, FileUrl): value_to_transfer = port_value.url else: @@ -162,45 +148,49 @@ async def parse_output_data( await (await ports.outputs)[port_key].set_value(value_to_transfer) except ValidationError as err: ports_errors.extend(_get_port_validation_errors(port_key, err)) + except UnboundPortError as err: + ports_errors.extend( + [ + { + "loc": ( + f"{project_id}", + f"{node_id}", + f"{port_key}", + ), + "msg": str(err), + "type": "unbound_port", + } + ] + ) if ports_errors: - raise PortsValidationError(project_id, node_id, ports_errors) + raise PortsValidationError( + project_id=project_id, node_id=node_id, errors_list=ports_errors + ) async def compute_input_data( - app: FastAPI, - user_id: UserID, + *, project_id: ProjectID, node_id: NodeID, file_link_type: FileLinkType, - ports: Optional[node_ports_v2.Nodeports] = None, + node_ports: node_ports_v2.Nodeports, ) -> TaskInputData: """Retrieves values registered to the inputs of project_id/node_id - - - ports is optional because - :raises PortsValidationError: when inputs ports validation fail """ - if ports is None: - ports = await create_node_ports( - db_engine=app.state.engine, - user_id=user_id, - project_id=project_id, - node_id=node_id, - ) - - input_data = {} + input_data: dict[str, Any] = {} ports_errors = [] port: Port - for port in (await ports.inputs).values(): + for port in (await node_ports.inputs).values(): try: value: _PVType = await port.get_value(file_link_type=file_link_type) # Mapping _PVType -> PortValue if isinstance(value, AnyUrl): - logger.debug("Creating file url for %s", f"{port=}") + _logger.debug("Creating file url for %s", f"{port=}") input_data[port.key] = FileUrl( url=value, file_mapping=( @@ -217,37 +207,28 @@ async def compute_input_data( ports_errors.extend(_get_port_validation_errors(port.key, err)) if ports_errors: - raise PortsValidationError(project_id, node_id, ports_errors) + raise PortsValidationError( + project_id=project_id, node_id=node_id, errors_list=ports_errors + ) - return TaskInputData.parse_obj(input_data) + return TaskInputData.model_validate(input_data) async def compute_output_data_schema( - app: FastAPI, + *, user_id: UserID, project_id: ProjectID, node_id: NodeID, file_link_type: FileLinkType, - ports: Optional[node_ports_v2.Nodeports] = None, + node_ports: node_ports_v2.Nodeports, ) -> TaskOutputDataSchema: """ :raises PortsValidationError """ - if ports is None: - # Based on when this function is normally called, - # it is very unlikely that NodePorts raise an exception here - # This function only needs the outputs but the design of NodePorts - # will validate all inputs and outputs. - ports = await create_node_ports( - db_engine=app.state.engine, - user_id=user_id, - project_id=project_id, - node_id=node_id, - ) - output_data_schema = {} - for port in (await ports.outputs).values(): + output_data_schema: dict[str, Any] = {} + for port in (await node_ports.outputs).values(): output_data_schema[port.key] = {"required": port.default_value is None} if port_utils.is_file_type(port.property_type): @@ -255,24 +236,29 @@ async def compute_output_data_schema( user_id=user_id, project_id=f"{project_id}", node_id=f"{node_id}", - file_name=next(iter(port.file_to_key_map)) - if port.file_to_key_map - else port.key, + file_name=( + next(iter(port.file_to_key_map)) + if port.file_to_key_map + else port.key + ), link_type=file_link_type, file_size=ByteSize(0), # will create a single presigned link + sha256_checksum=None, ) assert value_links.urls # nosec assert len(value_links.urls) == 1 # nosec output_data_schema[port.key].update( { - "mapping": next(iter(port.file_to_key_map)) - if port.file_to_key_map - else None, + "mapping": ( + next(iter(port.file_to_key_map)) + if port.file_to_key_map + else None + ), "url": f"{value_links.urls[0]}", } ) - return TaskOutputDataSchema.parse_obj(output_data_schema) + return TaskOutputDataSchema.model_validate(output_data_schema) _LOGS_FILE_NAME = "logs.zip" @@ -291,23 +277,101 @@ async def compute_service_log_file_upload_link( file_name=_LOGS_FILE_NAME, link_type=file_link_type, file_size=ByteSize(0), # will create a single presigned link + sha256_checksum=None, ) - return value_links.urls[0] + url: AnyUrl = value_links.urls[0] + return url -async def get_service_log_file_download_link( +def compute_task_labels( + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + run_metadata: RunMetadataDict, + node_requirements: NodeRequirements, +) -> ContainerLabelsDict: + """ + Raises: + ValidationError + """ + product_name = run_metadata.get("product_name", UNDEFINED_DOCKER_LABEL) + standard_simcore_labels = StandardSimcoreDockerLabels.model_validate( + { + "user_id": user_id, + "project_id": project_id, + "node_id": node_id, + "product_name": product_name, + "simcore_user_agent": run_metadata.get( + "simcore_user_agent", UNDEFINED_DOCKER_LABEL + ), + "swarm_stack_name": UNDEFINED_DOCKER_LABEL, # NOTE: there is currently no need for this label in the comp backend + "memory_limit": node_requirements.ram, + "cpu_limit": node_requirements.cpu, + } + ).to_simcore_runtime_docker_labels() + return standard_simcore_labels | TypeAdapter(ContainerLabelsDict).validate_python( + { + DockerLabelKey.from_key(k): f"{v}" + for k, v in run_metadata.items() + if k not in ["product_name", "simcore_user_agent"] + }, + ) + + +async def compute_task_envs( + app: FastAPI, + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + node_image: Image, + metadata: RunMetadataDict, + resource_tracking_run_id: ServiceRunID, + wallet_id: WalletID | None, +) -> ContainerEnvsDict: + product_name = metadata.get("product_name", UNDEFINED_DOCKER_LABEL) + product_api_base_url = metadata.get("product_api_base_url", UNDEFINED_API_BASE_URL) + task_envs = node_image.envs + if task_envs: + vendor_substituted_envs = await substitute_vendor_secrets_in_specs( + app, + cast(dict[str, Any], node_image.envs), + service_key=TypeAdapter(ServiceKey).validate_python(node_image.name), + service_version=TypeAdapter(ServiceVersion).validate_python(node_image.tag), + product_name=product_name, + ) + resolved_envs = await resolve_and_substitute_session_variables_in_specs( + app, + vendor_substituted_envs, + user_id=user_id, + product_name=product_name, + product_api_base_url=product_api_base_url, + project_id=project_id, + node_id=node_id, + service_run_id=resource_tracking_run_id, + wallet_id=wallet_id, + ) + # NOTE: see https://github.com/ITISFoundation/osparc-simcore/issues/3638 + # we currently do not validate as we are using illegal docker key names with underscores + task_envs = cast(ContainerEnvsDict, resolved_envs) + + return task_envs + + +async def _get_service_log_file_download_link( user_id: UserID, project_id: ProjectID, node_id: NodeID, file_link_type: FileLinkType, -) -> Optional[AnyUrl]: +) -> AnyUrl | None: """Returns None if log file is not available (e.g. when tasks is not done) : raises StorageServerIssue : raises NodeportsException """ try: - value_link = await port_utils.get_download_link_from_storage_overload( + value_link: AnyUrl = await port_utils.get_download_link_from_storage_overload( user_id=user_id, project_id=f"{project_id}", node_id=f"{node_id}", @@ -315,18 +379,42 @@ async def get_service_log_file_download_link( link_type=file_link_type, ) return value_link - except (S3InvalidPathError, StorageInvalidCall) as err: - logger.debug("Log for task %s not found: %s", f"{project_id=}/{node_id=}", err) + _logger.debug("Log for task %s not found: %s", f"{project_id=}/{node_id=}", err) return None +async def get_task_log_file( + user_id: UserID, project_id: ProjectID, node_id: NodeID +) -> TaskLogFileGet: + try: + log_file_url = await _get_service_log_file_download_link( + user_id, project_id, node_id, file_link_type=FileLinkType.PRESIGNED + ) + + except NodeportsException as err: + # Unexpected error: Cannot determine the cause of failure + # to get donwload link and cannot handle it automatically. + # Will treat it as "not available" and log a warning + log_file_url = None + _logger.warning( + "Failed to get log-file of %s: %s.", + f"{user_id=}/{project_id=}/{node_id=}", + err, + ) + + return TaskLogFileGet( + task_id=node_id, + download_link=log_file_url, + ) + + async def clean_task_output_and_log_files_if_invalid( - db_engine: Engine, + db_engine: AsyncEngine, user_id: UserID, project_id: ProjectID, node_id: NodeID, - ports: Optional[node_ports_v2.Nodeports] = None, + ports: node_ports_v2.Nodeports | None = None, ) -> None: """ @@ -347,7 +435,7 @@ async def clean_task_output_and_log_files_if_invalid( user_id, f"{project_id}", f"{node_id}", file_name ): continue - logger.debug("entry %s is invalid, cleaning...", port.key) + _logger.debug("entry %s is invalid, cleaning...", port.key) await port_utils.delete_target_link( user_id, f"{project_id}", f"{node_id}", file_name ) @@ -363,58 +451,23 @@ async def clean_task_output_and_log_files_if_invalid( ) -async def dask_sub_consumer( - dask_sub: distributed.Sub, - handler: Callable[[str], Awaitable[None]], -): - async for dask_event in dask_sub: - logger.debug( - "received dask event '%s' of topic %s", - dask_event, - dask_sub.name, - ) - await handler(dask_event) - await asyncio.sleep(0.010) - - -async def dask_sub_consumer_task( - dask_sub: distributed.Sub, - handler: Callable[[str], Awaitable[None]], -): - while True: - try: - logger.info("starting dask consumer task for topic '%s'", dask_sub.name) - await dask_sub_consumer(dask_sub, handler) - except asyncio.CancelledError: - logger.info("stopped dask consumer task for topic '%s'", dask_sub.name) - raise - except Exception: # pylint: disable=broad-except - _REST_TIMEOUT_S: Final[int] = 1 - logger.exception( - "unknown exception in dask consumer task for topic '%s', restarting task in %s sec...", - dask_sub.name, - _REST_TIMEOUT_S, - ) - await asyncio.sleep(_REST_TIMEOUT_S) - - def from_node_reqs_to_dask_resources( node_reqs: NodeRequirements, -) -> dict[str, Union[int, float]]: +) -> dict[str, int | float]: """Dask resources are set such as {"CPU": X.X, "GPU": Y.Y, "RAM": INT}""" - dask_resources = node_reqs.dict( + dask_resources: dict[str, int | float] = node_reqs.model_dump( exclude_unset=True, by_alias=True, exclude_none=True, ) - logger.debug("transformed to dask resources: %s", dask_resources) + _logger.debug("transformed to dask resources: %s", dask_resources) return dask_resources def check_scheduler_is_still_the_same( original_scheduler_id: str, client: distributed.Client ): - logger.debug("current %s", f"{client.scheduler_info()=}") + _logger.debug("current %s", f"{client.scheduler_info()=}") if "id" not in client.scheduler_info(): raise ComputationalSchedulerChangedError( original_scheduler_id=original_scheduler_id, @@ -422,7 +475,7 @@ def check_scheduler_is_still_the_same( ) current_scheduler_id = client.scheduler_info()["id"] if current_scheduler_id != original_scheduler_id: - logger.error("The computational backend changed!") + _logger.error("The computational backend changed!") raise ComputationalSchedulerChangedError( original_scheduler_id=original_scheduler_id, current_scheduler_id=current_scheduler_id, @@ -435,16 +488,60 @@ def check_communication_with_scheduler_is_open(client: distributed.Client): and client.scheduler_comm.comm is not None and client.scheduler_comm.comm.closed() ): - raise ComputationalBackendNotConnectedError() + raise ComputationalBackendNotConnectedError def check_scheduler_status(client: distributed.Client): client_status = client.status if client_status not in "running": - logger.error( + _logger.error( "The computational backend is not connected!", ) - raise ComputationalBackendNotConnectedError() + raise ComputationalBackendNotConnectedError + + +def _can_task_run_on_worker( + task_resources: dict[str, Any], worker_resources: dict[str, Any] +) -> bool: + def gen_check( + task_resources: dict[str, Any], worker_resources: dict[str, Any] + ) -> Generator[bool, None, None]: + for name, required_value in task_resources.items(): + if required_value is None: + yield True + elif worker_has := worker_resources.get(name): + yield worker_has >= required_value + else: + yield False + + return all(gen_check(task_resources, worker_resources)) + + +def _cluster_missing_resources( + task_resources: dict[str, Any], cluster_resources: dict[str, Any] +) -> list[str]: + return [r for r in task_resources if r not in cluster_resources] + + +def _to_human_readable_resource_values(resources: dict[str, Any]) -> dict[str, Any]: + human_readable_resources = {} + + for res_name, res_value in resources.items(): + if "RAM" in res_name: + try: + human_readable_resources[res_name] = ( + TypeAdapter(ByteSize).validate_python(res_value).human_readable() + ) + except ValidationError: + _logger.warning( + "could not parse %s:%s, please check what changed in how Dask prepares resources!", + f"{res_name=}", + res_value, + ) + human_readable_resources[res_name] = res_value + else: + human_readable_resources[res_name] = res_value + return human_readable_resources def check_if_cluster_is_able_to_run_pipeline( @@ -453,53 +550,33 @@ def check_if_cluster_is_able_to_run_pipeline( scheduler_info: dict[str, Any], task_resources: dict[str, Any], node_image: Image, - cluster_id: ClusterID, -): - logger.debug("Dask scheduler infos: %s", json_dumps(scheduler_info, indent=2)) +) -> None: + _logger.debug( + "Dask scheduler infos: %s", f"{scheduler_info}" + ) # NOTE: be careful not to json_dumps this as it sometimes contain keys that are tuples! + workers = scheduler_info.get("workers", {}) - def can_task_run_on_worker( - task_resources: dict[str, Any], worker_resources: dict[str, Any] - ) -> bool: - def gen_check( - task_resources: dict[str, Any], worker_resources: dict[str, Any] - ) -> Iterable[bool]: - for name, required_value in task_resources.items(): - if required_value is None: - yield True - elif worker_has := worker_resources.get(name): - yield worker_has >= required_value - else: - yield False - - return all(gen_check(task_resources, worker_resources)) - - def cluster_missing_resources( - task_resources: dict[str, Any], cluster_resources: dict[str, Any] - ) -> list[str]: - return [r for r in task_resources if r not in cluster_resources] - - cluster_resources_counter = collections.Counter() + cluster_resources_counter: collections.Counter = collections.Counter() can_a_worker_run_task = False for worker in workers: worker_resources = workers[worker].get("resources", {}) cluster_resources_counter.update(worker_resources) - if can_task_run_on_worker(task_resources, worker_resources): + if _can_task_run_on_worker(task_resources, worker_resources): can_a_worker_run_task = True all_available_resources_in_cluster = dict(cluster_resources_counter) - logger.debug( - "Dask scheduler total available resources in cluster %s: %s, task needed resources %s", - cluster_id, + _logger.debug( + "Dask scheduler total available resources in cluster: %s, task needed resources %s", json_dumps(all_available_resources_in_cluster, indent=2), json_dumps(task_resources, indent=2), ) - if can_a_worker_run_task: + if can_a_worker_run_task: # OsparcErrorMixin return # check if we have missing resources - if missing_resources := cluster_missing_resources( + if missing_resources := _cluster_missing_resources( task_resources, all_available_resources_in_cluster ): cluster_resources = ( @@ -511,17 +588,49 @@ def cluster_missing_resources( raise MissingComputationalResourcesError( project_id=project_id, node_id=node_id, - msg=f"Service {node_image.name}:{node_image.tag} cannot be scheduled " - f"on cluster {cluster_id}: task needs '{task_resources}', " - f"cluster has {cluster_resources}", + service_name=node_image.name, + service_version=node_image.tag, + task_resources=task_resources, + cluster_resources=cluster_resources, ) # well then our workers are not powerful enough raise InsuficientComputationalResourcesError( project_id=project_id, node_id=node_id, - msg=f"Service {node_image.name}:{node_image.tag} cannot be scheduled " - f"on cluster {cluster_id}: insuficient resources" - f"cluster has '{all_available_resources_in_cluster}', cluster has no worker with the" - " necessary computational resources for running the service! TIP: contact oSparc support", + service_name=node_image.name, + service_version=node_image.tag, + service_requested_resources=_to_human_readable_resource_values(task_resources), + cluster_available_resources=[ + _to_human_readable_resource_values(worker.get("resources", None)) + for worker in workers.values() + ], + ) + + +P = ParamSpec("P") +R = TypeVar("R") + + +async def wrap_client_async_routine( + client_coroutine: Coroutine[Any, Any, Any] | Any | None, +) -> Any: + """Dask async behavior does not go well with Pylance as it returns + a union of types. this wrapper makes both mypy and pylance happy""" + assert client_coroutine # nosec + return await client_coroutine + + +def compute_task_owner( + user_id: UserID, + project_id: ProjectID, + node_id: ProjectID, + project_metadata: ProjectMetadataDict, +) -> TaskOwner: + return TaskOwner( + user_id=user_id, + project_id=project_id, + node_id=node_id, + parent_node_id=project_metadata.get("parent_node_id"), + parent_project_id=project_metadata.get("parent_project_id"), ) diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py index fbd3d60b67d..a692b3abf16 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dask_client_utils.py @@ -1,46 +1,23 @@ import logging import os import socket -from contextlib import suppress -from dataclasses import dataclass, field -from typing import Awaitable, Callable, Final, Optional, Union +from collections.abc import Awaitable, Callable +from dataclasses import dataclass +from typing import Any, TypeAlias -import dask_gateway import distributed -import httpx -from aiohttp import ClientConnectionError, ClientResponseError -from dask_task_models_library.container_tasks.events import ( - TaskLogEvent, - TaskProgressEvent, - TaskStateEvent, -) -from models_library.clusters import ( - ClusterAuthentication, - JupyterHubTokenAuthentication, - KerberosAuthentication, - NoAuthentication, - SimpleAuthentication, -) +from models_library.clusters import ClusterAuthentication, TLSAuthentication from pydantic import AnyUrl -from ..core.errors import ( - ConfigurationError, - DaskClientRequestError, - DaskClusterError, - DaskGatewayServerError, - SchedulerError, -) +from ..core.errors import ConfigurationError +from .dask import wrap_client_async_routine -DaskGatewayAuths = Union[ - dask_gateway.BasicAuth, dask_gateway.KerberosAuth, dask_gateway.JupyterHubAuth -] +UnixTimestamp: TypeAlias = float @dataclass class TaskHandlers: - task_change_handler: Callable[[str], Awaitable[None]] - task_progress_handler: Callable[[str], Awaitable[None]] - task_log_handler: Callable[[str], Awaitable[None]] + task_progress_handler: Callable[[tuple[UnixTimestamp, Any]], Awaitable[None]] logger = logging.getLogger(__name__) @@ -50,198 +27,34 @@ class TaskHandlers: class DaskSubSystem: client: distributed.Client scheduler_id: str - gateway: Optional[dask_gateway.Gateway] - gateway_cluster: Optional[dask_gateway.GatewayCluster] - state_sub: distributed.Sub = field(init=False) - progress_sub: distributed.Sub = field(init=False) - logs_sub: distributed.Sub = field(init=False) - def __post_init__(self) -> None: - self.state_sub = distributed.Sub( - TaskStateEvent.topic_name(), client=self.client - ) - self.progress_sub = distributed.Sub( - TaskProgressEvent.topic_name(), client=self.client - ) - self.logs_sub = distributed.Sub(TaskLogEvent.topic_name(), client=self.client) - - async def close(self): + async def close(self) -> None: # NOTE: if the Sub are deleted before closing the connection, # then the dask-scheduler goes in a bad state [https://github.com/dask/distributed/issues/3276] # closing the client appears to fix the issue and the dask-scheduler remains happy if self.client: - await self.client.close() # type: ignore - if self.gateway_cluster: - await self.gateway_cluster.close() # type: ignore - if self.gateway: - await self.gateway.close() # type: ignore + await wrap_client_async_routine(self.client.close()) -async def _connect_to_dask_scheduler(endpoint: AnyUrl) -> DaskSubSystem: +async def connect_to_dask_scheduler( + endpoint: AnyUrl, authentication: ClusterAuthentication +) -> DaskSubSystem: try: - client = await distributed.Client( # type: ignore + security = distributed.Security() + if isinstance(authentication, TLSAuthentication): + security = distributed.Security( + tls_ca_file=f"{authentication.tls_ca_file}", + tls_client_cert=f"{authentication.tls_client_cert}", + tls_client_key=f"{authentication.tls_client_key}", + require_encryption=True, + ) + client = await distributed.Client( f"{endpoint}", asynchronous=True, name=f"director-v2_{socket.gethostname()}_{os.getpid()}", + security=security, ) - return DaskSubSystem( - client=client, - scheduler_id=client.scheduler_info()["id"], - gateway=None, - gateway_cluster=None, - ) - except (TypeError) as exc: - raise ConfigurationError( - f"Scheduler has invalid configuration: {endpoint=}" - ) from exc - - -async def _connect_with_gateway_and_create_cluster( - endpoint: AnyUrl, auth_params: ClusterAuthentication -) -> DaskSubSystem: - try: - logger.debug( - "connecting with gateway at %s with %s", f"{endpoint!r}", f"{auth_params=}" - ) - gateway_auth = await get_gateway_auth_from_params(auth_params) - gateway = dask_gateway.Gateway( - address=f"{endpoint}", auth=gateway_auth, asynchronous=True - ) - - try: - # if there is already a cluster that means we can re-connect to it, - # and IT SHALL BE the first in the list - cluster_reports_list = await gateway.list_clusters() - logger.debug( - "current clusters on the gateway: %s", f"{cluster_reports_list=}" - ) - cluster = None - if cluster_reports_list: - assert ( - len(cluster_reports_list) == 1 - ), "More than 1 cluster at this location, that is unexpected!!" # nosec - cluster = await gateway.connect( - cluster_reports_list[0].name, shutdown_on_close=False - ) - logger.debug("connected to %s", f"{cluster=}") - else: - cluster = await gateway.new_cluster(shutdown_on_close=False) - logger.debug("created %s", f"{cluster=}") - assert cluster # nosec - logger.info("Cluster dashboard available: %s", cluster.dashboard_link) - # NOTE: we scale to 1 worker as they are global - await cluster.adapt(active=True) - client = await cluster.get_client() # type: ignore - assert client # nosec - return DaskSubSystem( - client=client, - scheduler_id=client.scheduler_info()["id"], - gateway=gateway, - gateway_cluster=cluster, - ) - except Exception as exc: - # cleanup - with suppress(Exception): - await gateway.close() # type: ignore - raise exc - - except (TypeError) as exc: - raise ConfigurationError( - f"Cluster has invalid configuration: {endpoint=}, {auth_params=}" - ) from exc - except (ValueError) as exc: - # this is when a 404=NotFound,422=MalformedData comes up - raise DaskClientRequestError(endpoint=endpoint, error=exc) from exc - except (dask_gateway.GatewayClusterError) as exc: - # this is when a 409=Conflict/Cannot complete request comes up - raise DaskClusterError(endpoint=endpoint, error=exc) from exc - except (dask_gateway.GatewayServerError) as exc: - # this is when a 500 comes up - raise DaskGatewayServerError(endpoint=endpoint, error=exc) from exc - - -def _is_internal_scheduler(authentication: ClusterAuthentication) -> bool: - return isinstance(authentication, NoAuthentication) - - -async def create_internal_client_based_on_auth( - endpoint: AnyUrl, authentication: ClusterAuthentication -) -> DaskSubSystem: - if _is_internal_scheduler(authentication): - # if no auth then we go for a standard scheduler connection - return await _connect_to_dask_scheduler(endpoint) - # we do have some auth, so it is going through a gateway - return await _connect_with_gateway_and_create_cluster(endpoint, authentication) - - -async def get_gateway_auth_from_params( - auth_params: ClusterAuthentication, -) -> DaskGatewayAuths: - try: - if isinstance(auth_params, SimpleAuthentication): - return dask_gateway.BasicAuth( - username=auth_params.username, - password=auth_params.password.get_secret_value(), - ) - if isinstance(auth_params, KerberosAuthentication): - return dask_gateway.KerberosAuth() - if isinstance(auth_params, JupyterHubTokenAuthentication): - return dask_gateway.JupyterHubAuth(auth_params.api_token) - except (TypeError, ValueError) as exc: - raise ConfigurationError( - f"Cluster has invalid configuration: {auth_params}" - ) from exc - - raise ConfigurationError(f"Cluster has invalid configuration: {auth_params=}") - - -_PING_TIMEOUT_S: Final[int] = 5 -_DASK_SCHEDULER_RUNNING_STATE: Final[str] = "running" - - -async def test_scheduler_endpoint( - endpoint: AnyUrl, authentication: ClusterAuthentication -) -> None: - """This method will try to connect to a gateway endpoint and raise a ConfigurationError in case of problem - - :raises ConfigurationError: contians some information as to why the connection failed - """ - try: - if _is_internal_scheduler(authentication): - async with distributed.Client( - address=endpoint, timeout=_PING_TIMEOUT_S, asynchronous=True - ) as dask_client: - if not dask_client.status == _DASK_SCHEDULER_RUNNING_STATE: - raise SchedulerError("internal scheduler is not running!") - - else: - gateway_auth = await get_gateway_auth_from_params(authentication) - async with dask_gateway.Gateway( - address=f"{endpoint}", auth=gateway_auth, asynchronous=True - ) as gateway: - # this does not yet create any connection to the underlying gateway. - # since using a fct from dask gateway is going to timeout after a long time - # we bypass the pinging by calling in ourselves with a short timeout - async with httpx.AsyncClient( - transport=httpx.AsyncHTTPTransport(retries=2) - ) as httpx_client: - # try to get something the api shall return fast - response = await httpx_client.get( - f"{endpoint}/api/version", timeout=_PING_TIMEOUT_S - ) - response.raise_for_status() - # now we try to list the clusters to check the gateway responds in a sensible way - await gateway.list_clusters() - - logger.debug("Pinging %s, succeeded", f"{endpoint=}") - except ( - dask_gateway.GatewayServerError, - ClientConnectionError, - ClientResponseError, - httpx.HTTPError, - SchedulerError, - ) as exc: - logger.debug("Pinging %s, failed: %s", f"{endpoint=}", f"{exc=!r}") - raise ConfigurationError( - f"Could not connect to cluster in {endpoint}: error: {exc}" - ) from exc + return DaskSubSystem(client=client, scheduler_id=client.scheduler_info()["id"]) + except TypeError as exc: + msg = f"Scheduler has invalid configuration: {endpoint=}" + raise ConfigurationError(msg=msg) from exc diff --git a/services/director-v2/src/simcore_service_director_v2/utils/db.py b/services/director-v2/src/simcore_service_director_v2/utils/db.py index cbe333942d0..43e3a371089 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/db.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/db.py @@ -1,14 +1,10 @@ -import json -from typing import Any +import logging from fastapi import FastAPI -from models_library.clusters import BaseCluster from models_library.projects_state import RunningState -from settings_library.utils_cli import create_json_encoder_wo_secrets from simcore_postgres_database.models.comp_pipeline import StateType -from ..api.dependencies.database import get_base_repository -from ..modules.db.repositories import BaseRepository +from ..api.dependencies.database import RepoType, get_base_repository DB_TO_RUNNING_STATE = { StateType.FAILED: RunningState.FAILED, @@ -18,26 +14,16 @@ StateType.NOT_STARTED: RunningState.NOT_STARTED, StateType.RUNNING: RunningState.STARTED, StateType.ABORTED: RunningState.ABORTED, + StateType.WAITING_FOR_RESOURCES: RunningState.WAITING_FOR_RESOURCES, + StateType.WAITING_FOR_CLUSTER: RunningState.WAITING_FOR_CLUSTER, } -RUNNING_STATE_TO_DB = { - **{v: k for k, v in DB_TO_RUNNING_STATE.items()}, - **{RunningState.RETRY: StateType.RUNNING}, +RUNNING_STATE_TO_DB = {v: k for k, v in DB_TO_RUNNING_STATE.items()} | { + RunningState.UNKNOWN: StateType.FAILED } +_logger = logging.getLogger(__name__) -def to_clusters_db(cluster: BaseCluster, only_update: bool) -> dict[str, Any]: - db_model = json.loads( - cluster.json( - by_alias=True, - exclude={"id", "access_rights"}, - exclude_unset=only_update, - exclude_none=only_update, - encoder=create_json_encoder_wo_secrets(BaseCluster), - ) - ) - return db_model - -def get_repository(app: FastAPI, repo_type: type[BaseRepository]) -> BaseRepository: +def get_repository(app: FastAPI, repo_type: type[RepoType]) -> RepoType: return get_base_repository(engine=app.state.engine, repo_type=repo_type) diff --git a/services/director-v2/src/simcore_service_director_v2/utils/dict_utils.py b/services/director-v2/src/simcore_service_director_v2/utils/dict_utils.py index 1686281c3e5..37383d129bd 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/dict_utils.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/dict_utils.py @@ -1,7 +1,7 @@ from copy import deepcopy -from typing import Any, Optional +from typing import Any -from toolz.dicttoolz import get_in, update_in +from toolz.dicttoolz import get_in, update_in # type: ignore[import-untyped] def nested_update( @@ -64,7 +64,7 @@ def get_leaf_key_paths(data: dict[str, Any]) -> tuple[list[str], ...]: """ def _get_parent_keys( - dict_data: dict[str, Any], parents: Optional[list[str]] + dict_data: dict[str, Any], parents: list[str] | None ) -> list[list[str]]: root_parents: list[str] = parents or [] diff --git a/services/director-v2/src/simcore_service_director_v2/utils/logging_utils.py b/services/director-v2/src/simcore_service_director_v2/utils/logging_utils.py deleted file mode 100644 index ffee9a932d6..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/utils/logging_utils.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -NOTE: This is temporarily here. this is a copy from packages/service-library/src/servicelib/logging_utils.py - This should go in another library soon. - -This codes originates from this article (https://medium.com/swlh/add-log-decorators-to-your-python-project-84094f832181) -""" -import functools -import logging -import os -import sys -from asyncio import iscoroutinefunction -from inspect import getframeinfo, stack -from logging import Formatter -from typing import Callable, Optional - -log = logging.getLogger(__name__) - - -BLACK = "\033[0;30m" -BLUE = "\033[0;34m" -GREEN = "\033[0;32m" -CYAN = "\033[0;36m" -RED = "\033[0;31m" -ORANGE = "\033[48;2;255;165;0m" -PURPLE = "\033[0;35m" -BROWN = "\033[0;33m" -GRAY = "\033[0;37m" -BOLDGRAY = "\033[1;30m" -BOLDBLUE = "\033[1;34m" -BOLDGREEN = "\033[1;32m" -BOLDCYAN = "\033[1;36m" -BOLDRED = "\033[1;31m" -BOLDPURPLE = "\033[1;35m" -BOLDYELLOW = "\033[1;33m" -WHITE = "\033[1;37m" - -NORMAL = "\033[0m" - -COLORS = { - "WARNING": BOLDYELLOW, - "INFO": GREEN, - "DEBUG": GRAY, - "CRITICAL": ORANGE, - "ERROR": RED, -} - - -class CustomFormatter(logging.Formatter): - """Custom Formatter does these 2 things: - 1. Overrides 'funcName' with the value of 'func_name_override', if it exists. - 2. Overrides 'filename' with the value of 'file_name_override', if it exists. - """ - - def format(self, record): - if hasattr(record, "func_name_override"): - record.funcName = record.func_name_override - if hasattr(record, "file_name_override"): - record.filename = record.file_name_override - - # add color - levelname = record.levelname - if levelname in COLORS: - levelname_color = COLORS[levelname] + levelname + NORMAL - record.levelname = levelname_color - return super().format(record) - - -DEFAULT_FORMATTING = "%(levelname)s: [%(asctime)s/%(processName)s] [%(name)s:%(funcName)s(%(lineno)d)] %(message)s" - - -def config_all_loggers(): - the_manager: logging.Manager = logging.Logger.manager - - loggers = [logging.getLogger(name) for name in the_manager.loggerDict] - for logger in loggers: - set_logging_handler(logger) - - -def set_logging_handler( - logger: logging.Logger, - formatter_base: Optional[type[Formatter]] = None, - formatting: Optional[str] = None, -) -> None: - if not formatting: - formatting = DEFAULT_FORMATTING - if not formatter_base: - formatter_base = CustomFormatter - - for handler in logger.handlers: - handler.setFormatter( - formatter_base( - "%(levelname)s: %(name)s:%(funcName)s(%(lineno)s) - %(message)s" - ) - ) - - -def _log_arguments( - logger_obj: logging.Logger, func: Callable, *args, **kwargs -) -> dict[str, str]: - args_passed_in_function = [repr(a) for a in args] - kwargs_passed_in_function = [f"{k}={v!r}" for k, v in kwargs.items()] - - # The lists of positional and keyword arguments is joined together to form final string - formatted_arguments = ", ".join(args_passed_in_function + kwargs_passed_in_function) - - # Generate file name and function name for calling function. __func.name__ will give the name of the - # caller function ie. wrapper_log_info and caller file name ie log-decorator.py - # - In order to get actual function and file name we will use 'extra' parameter. - # - To get the file name we are using in-built module inspect.getframeinfo which returns calling file name - py_file_caller = getframeinfo(stack()[1][0]) - extra_args = { - "func_name_override": func.__name__, - "file_name_override": os.path.basename(py_file_caller.filename), - } - - # Before to the function execution, log function details. - logger_obj.debug( - "Arguments: %s - Begin function", - formatted_arguments, - extra=extra_args, - ) - - return extra_args - - -def log_decorator( - *, logger: Optional[logging.Logger] = None, log_exceptions: bool = False -): - """will automatically log entry/end of decorated function. - Args: - logger ([logging.Logger], optional): [description]. Defaults to None. - log_exceptions (bool, optional): [If True, then exceptions will be logged as errors, if False then exceptions will just be re-raised]. Defaults to False. - """ - # Build logger object - logger_obj = logger or log - - def decorator(func: Callable): - @functools.wraps(func) - async def async_wrapper(*args, **kwargs): - extra_args = _log_arguments(logger_obj, func, *args, **kwargs) - try: - # log return value from the function - value = await func(*args, **kwargs) - logger_obj.debug("Returned: - End function %r", value, extra=extra_args) - except: - # log exception if occurs in function - if log_exceptions: - logger_obj.error( - "Exception: %s", sys.exc_info()[1], extra=extra_args - ) - raise - # Return function value - return value - - @functools.wraps(func) - def sync_wrapper(*args, **kwargs): - extra_args = _log_arguments(logger_obj, func, *args, **kwargs) - try: - # log return value from the function - value = func(*args, **kwargs) - logger_obj.debug("Returned: - End function %r", value, extra=extra_args) - except: - # log exception if occurs in function - logger_obj.error("Exception: %s", sys.exc_info()[1], extra=extra_args) - raise - # Return function value - return value - - # wrapper - return async_wrapper if iscoroutinefunction(func) else sync_wrapper - - return decorator diff --git a/services/director-v2/src/simcore_service_director_v2/utils/osparc_variables.py b/services/director-v2/src/simcore_service_director_v2/utils/osparc_variables.py new file mode 100644 index 00000000000..28ce84c605d --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/utils/osparc_variables.py @@ -0,0 +1,139 @@ +import asyncio +import inspect +from collections.abc import Callable +from typing import Any, Final, NamedTuple, TypeAlias + +from models_library.utils.specs_substitution import SubstitutionValue +from pydantic import NonNegativeInt +from servicelib.utils import logged_gather + +ContextDict: TypeAlias = dict[str, Any] +ContextGetter: TypeAlias = Callable[[ContextDict], Any] + + +class CaptureError(ValueError): + ... + + +def factory_context_getter(parameter_name: str) -> ContextGetter: + """Factory that creates a function that gets a context as argument and gets a named parameter + + i.e. create_context_getter("foo")(context) == context["foo"] + """ + + def _get_or_raise(context: ContextDict) -> Any: + try: + return context[parameter_name] + except KeyError as err: + msg = f"{parameter_name=} missing from substitution context" + raise CaptureError(msg) from err + + # For context["foo"] -> return operator.methodcaller("__getitem__", keyname) + # For context.foo -> return operator.attrgetter("project_id") + return _get_or_raise + + +class RequestTuple(NamedTuple): + handler: Callable + kwargs: dict[str, Any] + + +def factory_handler(coro: Callable) -> Callable[[ContextDict], RequestTuple]: + assert inspect.iscoroutinefunction(coro) # nosec + + def _create(context: ContextDict): + # NOTE: we could delay this as well ... + kwargs_from_context = { + param.name: factory_context_getter(param.name)(context) + for param in inspect.signature(coro).parameters.values() + } + return RequestTuple(handler=coro, kwargs=kwargs_from_context) + + return _create + + +class OsparcVariablesTable: + def __init__(self): + self._variables_getters: dict[str, ContextGetter] = {} + + def register(self, table: dict[str, Callable]): + assert all( # nosec + name.startswith("OSPARC_VARIABLE_") for name in table + ) # nosec + self._variables_getters.update(table) + + def register_from_context(self, name: str, context_name: str): + self.register({name: factory_context_getter(context_name)}) + + def register_from_handler(self, name: str): + def _decorator(coro: Callable): + assert inspect.iscoroutinefunction(coro) # nosec + self.register({name: factory_handler(coro)}) + + return _decorator + + def variables_names(self): + return self._variables_getters.keys() + + def copy( + self, include: set[str] | None = None, exclude: set[str] | None = None + ) -> dict[str, ContextGetter]: + all_ = set(self._variables_getters.keys()) + exclude = exclude or set() + include = include or all_ + + assert exclude.issubset(all_) # nosec + assert include.issubset(all_) # nosec + + selection = include.difference(exclude) + return {k: self._variables_getters[k] for k in selection} + + +_HANDLERS_TIMEOUT: Final[NonNegativeInt] = 4 + + +async def resolve_variables_from_context( + variables_getters: dict[str, ContextGetter], + context: ContextDict, + *, + resolve_in_parallel: bool = True, +) -> dict[str, SubstitutionValue]: + """Resolves variables given a list of handlers and a context + containing vars which can be used by the handlers. + + Arguments: + variables_getters -- mapping of awaitables which resolve the value + context -- variables which can be passed to the awaitables + + Keyword Arguments: + resolve_in_parallel -- sometimes the variable_getters cannot be ran in parallel, + for example due to race conditions, + for those situations set to False (default: {True}) + """ + # evaluate getters from context values + pre_environs: dict[str, SubstitutionValue | RequestTuple] = { + key: fun(context) for key, fun in variables_getters.items() + } + + environs: dict[str, SubstitutionValue] = {} + + coros = {} + for key, value in pre_environs.items(): + if isinstance(value, RequestTuple): + handler, kwargs = value + coro = handler(**kwargs) + # extra wrap to control timeout + coros[key] = asyncio.wait_for(coro, timeout=_HANDLERS_TIMEOUT) + else: + environs[key] = value + + # evaluates handlers + values = await logged_gather( + *coros.values(), + max_concurrency=0 if resolve_in_parallel else 1, + ) + for handler_key, handler_value in zip(coros.keys(), values, strict=True): + environs[handler_key] = handler_value + + assert set(environs.keys()) == set(variables_getters.keys()) # nosec + return environs diff --git a/services/director-v2/src/simcore_service_director_v2/utils/rabbitmq.py b/services/director-v2/src/simcore_service_director_v2/utils/rabbitmq.py new file mode 100644 index 00000000000..6f6e1693193 --- /dev/null +++ b/services/director-v2/src/simcore_service_director_v2/utils/rabbitmq.py @@ -0,0 +1,199 @@ +from typing import Any + +from models_library.progress_bar import ProgressReport +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.rabbitmq_messages import ( + InstrumentationRabbitMessage, + LoggerRabbitMessage, + ProgressRabbitMessageNode, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.services import ServiceKey, ServiceType, ServiceVersion +from models_library.services_resources import ServiceResourcesDict +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import NonNegativeFloat +from servicelib.logging_utils import LogLevelInt +from servicelib.rabbitmq import RabbitMQClient + +from ..models.comp_tasks import CompTaskAtDB + + +async def publish_service_started_metrics( + rabbitmq_client: RabbitMQClient, + *, + user_id: UserID, + simcore_user_agent: str, + task: CompTaskAtDB, +) -> None: + message = InstrumentationRabbitMessage.model_construct( + metrics="service_started", + user_id=user_id, + project_id=task.project_id, + node_id=task.node_id, + service_uuid=task.node_id, + service_type=task.node_class.value, + service_key=task.image.name, + service_tag=task.image.tag, + simcore_user_agent=simcore_user_agent, + ) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_stopped_metrics( + rabbitmq_client: RabbitMQClient, + *, + user_id: UserID, + simcore_user_agent: str, + task: CompTaskAtDB, + task_final_state: RunningState, +) -> None: + message = InstrumentationRabbitMessage.model_construct( + metrics="service_stopped", + user_id=user_id, + project_id=task.project_id, + node_id=task.node_id, + service_uuid=task.node_id, + service_type=task.node_class.value, + service_key=task.image.name, + service_tag=task.image.tag, + result=task_final_state, + simcore_user_agent=simcore_user_agent, + ) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_resource_tracking_started( # pylint: disable=too-many-arguments # noqa: PLR0913 + rabbitmq_client: RabbitMQClient, + service_run_id: ServiceRunID, + *, + wallet_id: WalletID | None, + wallet_name: str | None, + pricing_plan_id: int | None, + pricing_unit_id: int | None, + pricing_unit_cost_id: int | None, + product_name: str, + simcore_user_agent: str, + user_id: UserID, + user_email: str, + project_id: ProjectID, + project_name: str, + node_id: NodeID, + node_name: str, + parent_project_id: ProjectID | None, + parent_node_id: NodeID | None, + root_parent_project_id: ProjectID | None, + root_parent_project_name: str | None, + root_parent_node_id: NodeID | None, + service_key: ServiceKey, + service_version: ServiceVersion, + service_type: ServiceType, + service_resources: ServiceResourcesDict, + service_additional_metadata: dict[str, Any], +) -> None: + message = RabbitResourceTrackingStartedMessage( + service_run_id=service_run_id, + wallet_id=wallet_id, + wallet_name=wallet_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + pricing_unit_cost_id=pricing_unit_cost_id, + product_name=product_name, + simcore_user_agent=simcore_user_agent, + user_id=user_id, + user_email=user_email, + project_id=project_id, + project_name=project_name, + node_id=node_id, + node_name=node_name, + parent_project_id=parent_project_id or project_id, + root_parent_project_id=root_parent_project_id or project_id, + root_parent_project_name=root_parent_project_name or project_name, + parent_node_id=parent_node_id or node_id, + root_parent_node_id=root_parent_node_id or node_id, + service_key=service_key, + service_version=service_version, + service_type=service_type, + service_resources=service_resources, + service_additional_metadata=service_additional_metadata, + ) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_resource_tracking_stopped( + rabbitmq_client: RabbitMQClient, + service_run_id: ServiceRunID, + *, + simcore_platform_status: SimcorePlatformStatus, +) -> None: + message = RabbitResourceTrackingStoppedMessage( + service_run_id=service_run_id, simcore_platform_status=simcore_platform_status + ) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_resource_tracking_heartbeat( + rabbitmq_client: RabbitMQClient, service_run_id: ServiceRunID +) -> None: + message = RabbitResourceTrackingHeartbeatMessage(service_run_id=service_run_id) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_log( + rabbitmq_client: RabbitMQClient, + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + log: str, + log_level: LogLevelInt, +) -> None: + message = LoggerRabbitMessage.model_construct( + user_id=user_id, + project_id=project_id, + node_id=node_id, + messages=[log], + log_level=log_level, + ) + + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_service_progress( + rabbitmq_client: RabbitMQClient, + *, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + progress: NonNegativeFloat, +) -> None: + message = ProgressRabbitMessageNode.model_construct( + user_id=user_id, + project_id=project_id, + node_id=node_id, + report=ProgressReport(actual_value=progress, total=1), + ) + await rabbitmq_client.publish(message.channel_name, message) + + +async def publish_project_log( + rabbitmq_client: RabbitMQClient, + user_id: UserID, + project_id: ProjectID, + log: str, + log_level: LogLevelInt, +) -> None: + message = LoggerRabbitMessage.model_construct( + user_id=user_id, + project_id=project_id, + node_id=None, + messages=[log], + log_level=log_level, + ) + await rabbitmq_client.publish(message.channel_name, message) diff --git a/services/director-v2/src/simcore_service_director_v2/utils/routes.py b/services/director-v2/src/simcore_service_director_v2/utils/routes.py index 373ac703b86..88296bf19aa 100644 --- a/services/director-v2/src/simcore_service_director_v2/utils/routes.py +++ b/services/director-v2/src/simcore_service_director_v2/utils/routes.py @@ -6,8 +6,8 @@ class NoContentResponse(Response): def __init__( self, # pylint: disable=unused-argument status_code: int = 204, - headers: dict = None, - background: BackgroundTask = None, + headers: dict = None, # type: ignore[assignment] # ANE: mypy does not like that at all, this is implicit optional and PEP 484 does not support it + background: BackgroundTask = None, # type: ignore[assignment] # ANE: mypy does not like that at all, this is implicit optional and PEP 484 does not support it ) -> None: super().__init__( content=b"", status_code=status_code, headers=headers, background=background diff --git a/services/director-v2/src/simcore_service_director_v2/utils/scheduler.py b/services/director-v2/src/simcore_service_director_v2/utils/scheduler.py deleted file mode 100644 index 74fa9445500..00000000000 --- a/services/director-v2/src/simcore_service_director_v2/utils/scheduler.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Set, Type - -from aiopg.sa.engine import Engine -from models_library.projects_state import RunningState -from pydantic import PositiveInt - -from ..modules.db.repositories import BaseRepository - -SCHEDULED_STATES: Set[RunningState] = { - RunningState.PUBLISHED, - RunningState.PENDING, - RunningState.STARTED, - RunningState.RETRY, -} - -COMPLETED_STATES: Set[RunningState] = { - RunningState.ABORTED, - RunningState.SUCCESS, - RunningState.FAILED, -} - - -def get_repository(db_engine: Engine, repo_cls: Type[BaseRepository]) -> BaseRepository: - return repo_cls(db_engine=db_engine) - - -Iteration = PositiveInt diff --git a/services/director-v2/tests/conftest.py b/services/director-v2/tests/conftest.py index 979361c3f63..8335706ad7b 100644 --- a/services/director-v2/tests/conftest.py +++ b/services/director-v2/tests/conftest.py @@ -3,35 +3,49 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable +import functools import json import logging import os +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable from copy import deepcopy +from datetime import timedelta from pathlib import Path -from typing import Any, AsyncIterable, Iterable +from typing import Any from unittest.mock import AsyncMock import httpx import pytest import simcore_service_director_v2 from asgi_lifespan import LifespanManager +from faker import Faker from fastapi import FastAPI -from models_library.projects import Node, Workbench -from pytest import MonkeyPatch +from models_library.products import ProductName +from models_library.projects import Node, NodesDict +from models_library.rpc.webserver.auth.api_keys import ApiKeyGet +from models_library.users import UserID +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import ( + setenvs_from_dict, + setenvs_from_envfile, +) from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict, setenvs_from_envfile +from servicelib.rabbitmq import RabbitMQRPCClient +from settings_library.rabbit import RabbitSettings from simcore_service_director_v2.core.application import init_app from simcore_service_director_v2.core.settings import AppSettings from starlette.testclient import ASGI3App, TestClient pytest_plugins = [ + "pytest_simcore.dask_scheduler", "pytest_simcore.db_entries_mocks", "pytest_simcore.docker_compose", "pytest_simcore.docker_registry", "pytest_simcore.docker_swarm", "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_users_data", "pytest_simcore.minio_service", - "pytest_simcore.monkeypatch_extra", "pytest_simcore.postgres_service", "pytest_simcore.pydantic_models", "pytest_simcore.pytest_global_environs", @@ -42,7 +56,7 @@ "pytest_simcore.simcore_dask_service", "pytest_simcore.simcore_services", "pytest_simcore.simcore_storage_service", - "pytest_simcore.tmp_path_extra", + "pytest_simcore.socketio", ] logger = logging.getLogger(__name__) @@ -67,16 +81,15 @@ def package_dir() -> Path: return dirpath -@pytest.fixture(scope="function") +@pytest.fixture() def project_env_devel_environment( - monkeypatch: MonkeyPatch, project_slug_dir: Path + monkeypatch: pytest.MonkeyPatch, project_slug_dir: Path ) -> EnvVarsDict: env_devel_file = project_slug_dir / ".env-devel" assert env_devel_file.exists() - envs = setenvs_from_envfile( + return setenvs_from_envfile( monkeypatch, env_devel_file.read_text(), verbose=True, interpolate=True ) - return envs @pytest.fixture(scope="session") @@ -139,70 +152,91 @@ def dynamic_sidecar_docker_image_name() -> str: return f"{registry}/dynamic-sidecar:{image_tag}" -@pytest.fixture(scope="function") +@pytest.fixture def mock_env( - monkeypatch: MonkeyPatch, dynamic_sidecar_docker_image_name: str + monkeypatch: pytest.MonkeyPatch, + dynamic_sidecar_docker_image_name: str, + faker: Faker, ) -> EnvVarsDict: """This is the base mock envs used to configure the app. Do override/extend this fixture to change configurations """ - env_vars: EnvVarsDict = { - "DYNAMIC_SIDECAR_IMAGE": f"{dynamic_sidecar_docker_image_name}", - "SIMCORE_SERVICES_NETWORK_NAME": "test_network_name", - "TRAEFIK_SIMCORE_ZONE": "test_traefik_zone", - "SWARM_STACK_NAME": "test_swarm_name", - "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "false", - "COMPUTATIONAL_BACKEND_ENABLED": "false", - "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "false", - "RABBIT_HOST": "mocked_host", - "RABBIT_USER": "mocked_user", - "RABBIT_PASSWORD": "mocked_password", - "REGISTRY_AUTH": "false", - "REGISTRY_USER": "test", - "REGISTRY_PW": "test", - "REGISTRY_SSL": "false", - "R_CLONE_PROVIDER": "MINIO", - "DIRECTOR_V2_POSTGRES_ENABLED": "false", - "SC_BOOT_MODE": "production", - # disable tracing as together with LifespanManager, it does not remove itself nicely - "DIRECTOR_V2_TRACING": "null", - } - setenvs_from_dict(monkeypatch, env_vars) - return env_vars - - -@pytest.fixture(scope="function") -async def client(mock_env: EnvVarsDict) -> Iterable[TestClient]: - settings = AppSettings.create_from_envs() - app = init_app(settings) - print("Application settings\n", settings.json(indent=2)) - # NOTE: this way we ensure the events are run in the application - # since it starts the app on a test server - with TestClient(app, raise_server_exceptions=True) as test_client: - yield test_client + return setenvs_from_dict( + monkeypatch, + envs={ + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "false", + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH": "{}", + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL": f"{faker.url()}", + "COMPUTATIONAL_BACKEND_ENABLED": "false", + "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "false", + "DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED": "0", + "DYNAMIC_SIDECAR_IMAGE": f"{dynamic_sidecar_docker_image_name}", + "DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS": "{}", + "POSTGRES_DB": "test", + "POSTGRES_HOST": "test", + "POSTGRES_PASSWORD": "test", + "POSTGRES_USER": "test", + "R_CLONE_PROVIDER": "MINIO", + "RABBIT_HOST": "mocked_host", + "RABBIT_PASSWORD": "mocked_password", + "RABBIT_SECURE": "false", + "RABBIT_USER": "mocked_user", + "REGISTRY_AUTH": "false", + "REGISTRY_PW": "test", + "REGISTRY_SSL": "false", + "REGISTRY_USER": "test", + "REGISTRY_URL": faker.url(), + "SC_BOOT_MODE": "production", + "SIMCORE_SERVICES_NETWORK_NAME": "test_network_name", + "SWARM_STACK_NAME": "pytest-simcore", + "TRAEFIK_SIMCORE_ZONE": "test_traefik_zone", + "DIRECTOR_V2_TRACING": "null", + }, + ) -@pytest.fixture(scope="function") +@pytest.fixture() async def initialized_app(mock_env: EnvVarsDict) -> AsyncIterable[FastAPI]: settings = AppSettings.create_from_envs() app = init_app(settings) + print("Application settings\n", settings.model_dump_json(indent=2)) async with LifespanManager(app): yield app -@pytest.fixture(scope="function") -async def async_client(initialized_app: FastAPI) -> AsyncIterable[httpx.AsyncClient]: +@pytest.fixture() +async def client(mock_env: EnvVarsDict) -> AsyncIterator[TestClient]: + # NOTE: this way we ensure the events are run in the application + # since it starts the app on a test server + settings = AppSettings.create_from_envs() + app = init_app(settings) + # NOTE: we cannot use the initialized_app fixture here as the TestClient also creates it + print("Application settings\n", settings.model_dump_json(indent=2)) + with TestClient(app, raise_server_exceptions=True) as test_client: + yield test_client + +@pytest.fixture() +async def async_client(initialized_app: FastAPI) -> AsyncIterable[httpx.AsyncClient]: async with httpx.AsyncClient( - app=initialized_app, + transport=httpx.ASGITransport(app=initialized_app), base_url="http://director-v2.testserver.io", headers={"Content-Type": "application/json"}, ) as client: yield client -@pytest.fixture(scope="function") +@pytest.fixture +async def rpc_client( + rabbit_service: RabbitSettings, + initialized_app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") + + +@pytest.fixture() def minimal_app(client: TestClient) -> ASGI3App: # NOTICE that this app triggers events # SEE: https://fastapi.tiangolo.com/advanced/testing-events/ @@ -213,27 +247,26 @@ def minimal_app(client: TestClient) -> ASGI3App: @pytest.fixture -def fake_workbench(fake_workbench_file: Path) -> Workbench: +def fake_workbench(fake_workbench_file: Path) -> NodesDict: workbench_dict = json.loads(fake_workbench_file.read_text()) workbench = {} for node_id, node_data in workbench_dict.items(): - workbench[node_id] = Node.parse_obj(node_data) + workbench[node_id] = Node.model_validate(node_data) return workbench @pytest.fixture def fake_workbench_as_dict(fake_workbench_file: Path) -> dict[str, Any]: - workbench_dict = json.loads(fake_workbench_file.read_text()) - return workbench_dict + return json.loads(fake_workbench_file.read_text()) @pytest.fixture def fake_workbench_without_outputs( - fake_workbench_as_dict: dict[str, Any] + fake_workbench_as_dict: dict[str, Any], ) -> dict[str, Any]: workbench = deepcopy(fake_workbench_as_dict) # remove all the outputs from the workbench - for _, data in workbench.items(): + for data in workbench.values(): data["outputs"] = {} return workbench @@ -254,10 +287,96 @@ def fake_workbench_complete_adjacency( @pytest.fixture -def disable_rabbitmq(mocker) -> None: - def mock_setup(app: FastAPI) -> None: +def disable_rabbitmq(mocker: MockerFixture) -> None: + def rabbitmq_mock_setup(app: FastAPI) -> None: app.state.rabbitmq_client = AsyncMock() + def rpc_api_routes_mock_setup(app: FastAPI) -> None: + app.state.rabbitmq_rpc_server = AsyncMock() + + mocker.patch( + "simcore_service_director_v2.modules.rabbitmq.setup", + side_effect=rabbitmq_mock_setup, + ) + mocker.patch( + "simcore_service_director_v2.core.application.setup_rpc_api_routes", + side_effect=rpc_api_routes_mock_setup, + ) + + +@pytest.fixture +def mocked_service_awaits_manual_interventions(mocker: MockerFixture) -> None: + module_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler" + mocker.patch( + f"{module_base}._core._scheduler.Scheduler.is_service_awaiting_manual_intervention", + autospec=True, + return_value=False, + ) + + +@pytest.fixture +def mock_redis(mocker: MockerFixture) -> None: + def _mock_setup(app: FastAPI) -> None: + def _mock_client(*args, **kwargs) -> AsyncMock: + return AsyncMock() + + mock = AsyncMock() + mock.client = _mock_client + + async def on_startup() -> None: + app.state.redis_clients_manager = mock + + app.add_event_handler("startup", on_startup) + + mocker.patch( + "simcore_service_director_v2.modules.redis.setup", side_effect=_mock_setup + ) + + +@pytest.fixture +def mock_exclusive(mock_redis: None, mocker: MockerFixture) -> None: + def _mock_exclusive( + _: Any, *, lock_key: str, lock_value: bytes | str | None = None + ): + def decorator(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + return await func(*args, **kwargs) + + return wrapper + + return decorator + + module_base = ( + "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler" + ) + mocker.patch(f"{module_base}.exclusive", side_effect=_mock_exclusive) + + +@pytest.fixture +def mock_osparc_variables_api_auth_rpc(mocker: MockerFixture) -> None: + + fake_data = ApiKeyGet.model_validate(ApiKeyGet.model_json_schema()["examples"][0]) + + async def _create( + app: FastAPI, + *, + product_name: ProductName, + user_id: UserID, + display_name: str, + expiration: timedelta, + ): + assert app + assert product_name + assert user_id + assert expiration + + fake_data.display_name = display_name + return fake_data + + # mocks RPC interface mocker.patch( - "simcore_service_director_v2.modules.rabbitmq.setup", side_effect=mock_setup + "simcore_service_director_v2.modules.osparc_variables._api_auth.rpc_create_api_key", + side_effect=_create, + autospec=True, ) diff --git a/services/director-v2/tests/helpers/__init__.py b/services/director-v2/tests/helpers/__init__.py new file mode 100644 index 00000000000..c81922d7d4a --- /dev/null +++ b/services/director-v2/tests/helpers/__init__.py @@ -0,0 +1,4 @@ +import pytest + +# NOTE: this ensures that pytest rewrites the assertion so that comparison look nice in the console +pytest.register_assert_rewrite("helpers.shared_comp_utils") diff --git a/services/director-v2/tests/integration/shared_comp_utils.py b/services/director-v2/tests/helpers/shared_comp_utils.py similarity index 82% rename from services/director-v2/tests/integration/shared_comp_utils.py rename to services/director-v2/tests/helpers/shared_comp_utils.py index 645f0a389c0..f2ce2ff4283 100644 --- a/services/director-v2/tests/integration/shared_comp_utils.py +++ b/services/director-v2/tests/helpers/shared_comp_utils.py @@ -1,10 +1,9 @@ import json import time -from typing import Optional from uuid import UUID import httpx -from models_library.clusters import ClusterID +from models_library.api_schemas_directorv2.computations import ComputationGet from models_library.projects import ProjectAtDB from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState @@ -12,9 +11,8 @@ from pydantic import PositiveInt from pydantic.networks import AnyHttpUrl from pytest_simcore.helpers.constants import MINUTE -from simcore_service_director_v2.models.schemas.comp_tasks import ComputationGet from starlette import status -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed @@ -26,9 +24,8 @@ async def assert_computation_task_out_obj( project: ProjectAtDB, exp_task_state: RunningState, exp_pipeline_details: PipelineDetails, - iteration: Optional[PositiveInt], - cluster_id: Optional[ClusterID], -): + iteration: PositiveInt | None, +) -> None: assert task_out.id == project.uuid assert task_out.state == exp_task_state assert task_out.url.path == f"/v2/computations/{project.uuid}" @@ -42,9 +39,10 @@ async def assert_computation_task_out_obj( else: assert task_out.stop_url is None assert task_out.iteration == iteration - assert task_out.cluster_id == cluster_id # check pipeline details contents - assert task_out.pipeline_details.dict() == exp_pipeline_details.dict() + received_task_out_pipeline = task_out.pipeline_details.model_dump() + expected_task_out_pipeline = exp_pipeline_details.model_dump() + assert received_task_out_pipeline == expected_task_out_pipeline async def assert_and_wait_for_pipeline_status( @@ -52,7 +50,7 @@ async def assert_and_wait_for_pipeline_status( url: AnyHttpUrl, user_id: UserID, project_uuid: UUID, - wait_for_states: Optional[list[RunningState]] = None, + wait_for_states: list[RunningState] | None = None, ) -> ComputationGet: if not wait_for_states: wait_for_states = [ @@ -63,11 +61,11 @@ async def assert_and_wait_for_pipeline_status( MAX_TIMEOUT_S = 5 * MINUTE async def check_pipeline_state() -> ComputationGet: - response = await client.get(url, params={"user_id": user_id}) + response = await client.get(f"{url}", params={"user_id": user_id}) assert ( response.status_code == status.HTTP_200_OK ), f"response code is {response.status_code}, error: {response.text}" - task_out = ComputationGet.parse_obj(response.json()) + task_out = ComputationGet.model_validate(response.json()) assert task_out.id == project_uuid assert task_out.url.path == f"/v2/computations/{project_uuid}" print( @@ -99,4 +97,5 @@ async def check_pipeline_state() -> ComputationGet: return task_out # this is only to satisfy pylance - raise AssertionError("No computation task generated!") + msg = "No computation task generated!" + raise AssertionError(msg) diff --git a/services/director-v2/tests/integration/01/test_computation_api.py b/services/director-v2/tests/integration/01/test_computation_api.py index 20e15ffd49e..f16977bc1cf 100644 --- a/services/director-v2/tests/integration/01/test_computation_api.py +++ b/services/director-v2/tests/integration/01/test_computation_api.py @@ -7,27 +7,32 @@ import asyncio import json +from collections.abc import Awaitable, Callable from copy import deepcopy from dataclasses import dataclass from pathlib import Path -from typing import Any, Awaitable, Callable +from typing import Any import httpx import pytest import sqlalchemy as sa -from models_library.clusters import DEFAULT_CLUSTER_ID +from helpers.shared_comp_utils import ( + assert_and_wait_for_pipeline_status, + assert_computation_task_out_obj, +) +from models_library.api_schemas_directorv2.computations import ComputationGet +from models_library.clusters import ClusterAuthentication from models_library.projects import ProjectAtDB from models_library.projects_nodes import NodeState from models_library.projects_nodes_io import NodeID from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState -from pytest import MonkeyPatch +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import PostgresTestConfig +from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.rabbit import RabbitSettings -from shared_comp_utils import ( - assert_and_wait_for_pipeline_status, - assert_computation_task_out_obj, -) -from simcore_service_director_v2.models.schemas.comp_tasks import ComputationGet +from settings_library.redis import RedisSettings from starlette import status from starlette.testclient import TestClient from yarl import URL @@ -41,46 +46,52 @@ "postgres", "rabbit", "storage", + "redis", ] pytest_simcore_ops_services_selection = ["minio", "adminer"] -@pytest.fixture(scope="function") +@pytest.fixture def mock_env( - monkeypatch: MonkeyPatch, + mock_env: EnvVarsDict, + minimal_configuration: None, + monkeypatch: pytest.MonkeyPatch, dynamic_sidecar_docker_image_name: str, dask_scheduler_service: str, + dask_scheduler_auth: ClusterAuthentication, ) -> None: # used by the client fixture - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "1") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") - monkeypatch.setenv("DIRECTOR_V2_TRACING", "null") - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", - dask_scheduler_service, + setenvs_from_dict( + monkeypatch, + { + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "1", + "COMPUTATIONAL_BACKEND_ENABLED": "1", + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL": dask_scheduler_service, + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH": dask_scheduler_auth.model_dump_json(), + "DYNAMIC_SIDECAR_IMAGE": dynamic_sidecar_docker_image_name, + "SIMCORE_SERVICES_NETWORK_NAME": "test_swarm_network_name", + "SWARM_STACK_NAME": "test_mocked_stack_name", + "TRAEFIK_SIMCORE_ZONE": "test_mocked_simcore_zone", + "R_CLONE_PROVIDER": "MINIO", + "SC_BOOT_MODE": "production", + "DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS": "{}", + }, ) - monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", dynamic_sidecar_docker_image_name) - monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", "test_swarm_network_name") - monkeypatch.setenv("SWARM_STACK_NAME", "test_mocked_stack_name") - monkeypatch.setenv("TRAEFIK_SIMCORE_ZONE", "test_mocked_simcore_zone") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("SC_BOOT_MODE", "production") -@pytest.fixture() +@pytest.fixture def minimal_configuration( sleeper_service: dict[str, str], jupyter_service: dict[str, str], dask_scheduler_service: str, dask_sidecar_service: None, postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], + postgres_host_config: PostgresTestConfig, rabbit_service: RabbitSettings, + redis_service: RedisSettings, simcore_services_ready: None, storage_service: URL, -) -> None: - ... +) -> None: ... @pytest.fixture(scope="session") @@ -97,8 +108,8 @@ def fake_workbench_computational_pipeline_details( ) -> PipelineDetails: adjacency_list = json.loads(fake_workbench_computational_adjacency_file.read_text()) node_states = json.loads(fake_workbench_node_states_file.read_text()) - return PipelineDetails.parse_obj( - {"adjacency_list": adjacency_list, "node_states": node_states} + return PipelineDetails.model_validate( + {"adjacency_list": adjacency_list, "node_states": node_states, "progress": 0} ) @@ -111,6 +122,8 @@ def fake_workbench_computational_pipeline_details_completed( node_state.modified = False node_state.dependencies = set() node_state.current_status = RunningState.SUCCESS + node_state.progress = 1 + completed_pipeline_details.progress = 1 return completed_pipeline_details @@ -122,6 +135,7 @@ def fake_workbench_computational_pipeline_details_not_started( for node_state in completed_pipeline_details.node_states.values(): node_state.modified = True node_state.current_status = RunningState.NOT_STARTED + node_state.progress = None return completed_pipeline_details @@ -136,6 +150,7 @@ def fake_workbench_computational_pipeline_details_not_started( "user_id": "some invalid id", "project_id": "not a uuid", "product_name": "not a product", + "product_api_base_url": "http://invalid", }, status.HTTP_422_UNPROCESSABLE_ENTITY, ), @@ -144,6 +159,7 @@ def fake_workbench_computational_pipeline_details_not_started( "user_id": 2, "project_id": "not a uuid", "product_name": "not a product", + "product_api_base_url": "http://invalid", }, status.HTTP_422_UNPROCESSABLE_ENTITY, ), @@ -152,13 +168,13 @@ def fake_workbench_computational_pipeline_details_not_started( "user_id": 3, "project_id": "16e60a5d-834e-4267-b44d-3af49171bf21", "product_name": "not a product", + "product_api_base_url": "http://invalid", }, status.HTTP_404_NOT_FOUND, ), ], ) def test_invalid_computation( - minimal_configuration: None, client: TestClient, body: dict, exp_response: int, @@ -174,15 +190,15 @@ def test_invalid_computation( async def test_start_empty_computation_is_refused( - minimal_configuration: None, async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() - empty_project = project(user) + user = create_registered_user() + empty_project = await project(user) with pytest.raises( httpx.HTTPStatusError, match=f"{status.HTTP_422_UNPROCESSABLE_ENTITY}" ): @@ -192,6 +208,7 @@ async def test_start_empty_computation_is_refused( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) @@ -217,18 +234,22 @@ class PartialComputationParams: "modified": True, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 2: { "modified": True, "dependencies": [1], + "progress": None, }, 3: { "modified": True, "dependencies": [], + "progress": None, }, 4: { "modified": True, "dependencies": [2, 3], + "progress": None, }, }, exp_node_states_after_run={ @@ -236,18 +257,22 @@ class PartialComputationParams: "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, 2: { "modified": True, "dependencies": [], + "progress": None, }, 3: { "modified": True, "dependencies": [], + "progress": None, }, 4: { "modified": True, "dependencies": [2, 3], + "progress": None, }, }, exp_pipeline_adj_list_after_force_run={1: []}, @@ -256,21 +281,25 @@ class PartialComputationParams: "modified": False, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 2: { "modified": True, "dependencies": [], "currentStatus": RunningState.NOT_STARTED, + "progress": None, }, 3: { "modified": True, "dependencies": [], "currentStatus": RunningState.NOT_STARTED, + "progress": None, }, 4: { "modified": True, "dependencies": [2, 3], "currentStatus": RunningState.NOT_STARTED, + "progress": None, }, }, ), @@ -285,21 +314,25 @@ class PartialComputationParams: "modified": True, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 2: { "modified": True, "dependencies": [1], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 3: { "modified": True, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 4: { "modified": True, "dependencies": [2, 3], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, }, exp_node_states_after_run={ @@ -307,21 +340,25 @@ class PartialComputationParams: "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, 2: { "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, 3: { "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, 4: { "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, }, exp_pipeline_adj_list_after_force_run={1: [2], 2: [4], 4: []}, @@ -330,21 +367,25 @@ class PartialComputationParams: "modified": False, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 2: { "modified": False, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, 3: { "modified": False, "dependencies": [], "currentStatus": RunningState.SUCCESS, + "progress": 1, }, 4: { "modified": False, "dependencies": [], "currentStatus": RunningState.PUBLISHED, + "progress": None, }, }, ), @@ -353,18 +394,20 @@ class PartialComputationParams: ], ) async def test_run_partial_computation( - minimal_configuration: None, + wait_for_catalog_service: Callable[[UserID, str], Awaitable[None]], async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], update_project_workbench_with_comp_tasks: Callable, fake_workbench_without_outputs: dict[str, Any], params: PartialComputationParams, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() - sleepers_project: ProjectAtDB = project( + user = create_registered_user() + await wait_for_catalog_service(user["id"], osparc_product_name) + sleepers_project: ProjectAtDB = await project( user, workbench=fake_workbench_without_outputs ) @@ -386,11 +429,18 @@ def _convert_to_pipeline_details( NodeID(workbench_node_uuids[dep_n]) for dep_n in s["dependencies"] }, currentStatus=s.get("currentStatus", RunningState.NOT_STARTED), + progress=s.get("progress"), ) for n, s in exp_node_states.items() } + pipeline_progress = 0 + for node_id in converted_adj_list: + node = converted_node_states[node_id] + pipeline_progress += (node.progress or 0) / len(converted_adj_list) return PipelineDetails( - adjacency_list=converted_adj_list, node_states=converted_node_states + adjacency_list=converted_adj_list, + node_states=converted_node_states, + progress=pipeline_progress, ) # convert the ids to the node uuids from the project @@ -405,6 +455,7 @@ def _convert_to_pipeline_details( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, subgraph=[ str(node_id) for index, node_id in enumerate(sleepers_project.workbench) @@ -418,7 +469,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # now wait for the computation to finish @@ -434,7 +484,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=expected_pipeline_details_after_run, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # run it a second time. the tasks are all up-to-date, nothing should be run @@ -450,6 +499,7 @@ def _convert_to_pipeline_details( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, expected_response_status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, subgraph=[ str(node_id) @@ -471,6 +521,7 @@ def _convert_to_pipeline_details( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, expected_response_status_code=status.HTTP_201_CREATED, subgraph=[ str(node_id) @@ -486,7 +537,6 @@ def _convert_to_pipeline_details( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details_forced, iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) # now wait for the computation to finish @@ -496,19 +546,21 @@ def _convert_to_pipeline_details( async def test_run_computation( - minimal_configuration: None, + wait_for_catalog_service: Callable[[UserID, str], Awaitable[None]], async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], fake_workbench_without_outputs: dict[str, Any], update_project_workbench_with_comp_tasks: Callable, fake_workbench_computational_pipeline_details: PipelineDetails, fake_workbench_computational_pipeline_details_completed: PipelineDetails, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() - sleepers_project = project(user, workbench=fake_workbench_without_outputs) + user = create_registered_user() + await wait_for_catalog_service(user["id"], osparc_product_name) + sleepers_project = await project(user, workbench=fake_workbench_without_outputs) # send a valid project with sleepers task_out = await create_pipeline( async_client, @@ -516,6 +568,7 @@ async def test_run_computation( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, expected_response_status_code=status.HTTP_201_CREATED, ) @@ -526,7 +579,6 @@ async def test_run_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait for the computation to start @@ -549,10 +601,9 @@ async def test_run_computation( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=fake_workbench_computational_pipeline_details_completed, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) - # FIXME: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash + # NOTE: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash update_project_workbench_with_comp_tasks(str(sleepers_project.uuid)) # run again should return a 422 cause everything is uptodate with pytest.raises( @@ -564,6 +615,7 @@ async def test_run_computation( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # now force run again @@ -577,12 +629,17 @@ async def test_run_computation( node_id ].current_status ) + node_data.progress = fake_workbench_computational_pipeline_details.node_states[ + node_id + ].progress + expected_pipeline_details_forced.progress = 0 task_out = await create_pipeline( async_client, project=sleepers_project, user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, force_restart=True, ) # check the contents is correct @@ -592,7 +649,6 @@ async def test_run_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=expected_pipeline_details_forced, # NOTE: here the pipeline already ran so its states are different iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait for the computation to finish @@ -605,28 +661,27 @@ async def test_run_computation( exp_task_state=RunningState.SUCCESS, exp_pipeline_details=fake_workbench_computational_pipeline_details_completed, iteration=2, - cluster_id=DEFAULT_CLUSTER_ID, ) async def test_abort_computation( - minimal_configuration: None, async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], fake_workbench_without_outputs: dict[str, Any], fake_workbench_computational_pipeline_details: PipelineDetails, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() + user = create_registered_user() # we need long running tasks to ensure cancellation is done properly for node in fake_workbench_without_outputs.values(): if "sleeper" in node["key"]: node["inputs"].setdefault("in_2", 120) if not isinstance(node["inputs"]["in_2"], dict): node["inputs"]["in_2"] = 120 - sleepers_project = project(user, workbench=fake_workbench_without_outputs) + sleepers_project = await project(user, workbench=fake_workbench_without_outputs) # send a valid project with sleepers task_out = await create_pipeline( async_client, @@ -634,6 +689,7 @@ async def test_abort_computation( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # check the contents is correctb @@ -643,7 +699,6 @@ async def test_abort_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait until the pipeline is started @@ -671,9 +726,9 @@ async def test_abort_computation( assert ( response.status_code == status.HTTP_202_ACCEPTED ), f"response code is {response.status_code}, error: {response.text}" - task_out = ComputationGet.parse_obj(response.json()) + task_out = ComputationGet.model_validate(response.json()) assert task_out.url.path == f"/v2/computations/{sleepers_project.uuid}:stop" - assert task_out.stop_url == None + assert task_out.stop_url is None # check that the pipeline is aborted/stopped task_out = await assert_and_wait_for_pipeline_status( @@ -689,18 +744,18 @@ async def test_abort_computation( async def test_update_and_delete_computation( - minimal_configuration: None, async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], fake_workbench_without_outputs: dict[str, Any], fake_workbench_computational_pipeline_details_not_started: PipelineDetails, fake_workbench_computational_pipeline_details: PipelineDetails, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() - sleepers_project = project(user, workbench=fake_workbench_without_outputs) + user = create_registered_user() + sleepers_project = await project(user, workbench=fake_workbench_without_outputs) # send a valid project with sleepers task_out = await create_pipeline( async_client, @@ -708,6 +763,7 @@ async def test_update_and_delete_computation( user_id=user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # check the contents is correctb @@ -717,7 +773,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # update the pipeline @@ -727,6 +782,7 @@ async def test_update_and_delete_computation( user_id=user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # check the contents is correctb @@ -736,7 +792,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # update the pipeline @@ -746,6 +801,7 @@ async def test_update_and_delete_computation( user_id=user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # check the contents is correctb @@ -755,7 +811,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.NOT_STARTED, exp_pipeline_details=fake_workbench_computational_pipeline_details_not_started, iteration=None, - cluster_id=None, ) # start it now @@ -765,6 +820,7 @@ async def test_update_and_delete_computation( user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # check the contents is correctb await assert_computation_task_out_obj( @@ -773,7 +829,6 @@ async def test_update_and_delete_computation( exp_task_state=RunningState.PUBLISHED, exp_pipeline_details=fake_workbench_computational_pipeline_details, iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) # wait until the pipeline is started @@ -789,18 +844,19 @@ async def test_update_and_delete_computation( ), f"pipeline is not in the expected starting state but in {task_out.state}" # now try to update the pipeline, is expected to be forbidden - with pytest.raises(httpx.HTTPStatusError, match=f"{status.HTTP_403_FORBIDDEN}"): + with pytest.raises(httpx.HTTPStatusError, match=f"{status.HTTP_409_CONFLICT}"): await create_pipeline( async_client, project=sleepers_project, user_id=user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # try to delete the pipeline, is expected to be forbidden if force parameter is false (default) response = await async_client.request( - "DELETE", task_out.url, json={"user_id": user["id"]} + "DELETE", f"{task_out.url}", json={"user_id": user["id"]} ) assert ( response.status_code == status.HTTP_403_FORBIDDEN @@ -808,7 +864,7 @@ async def test_update_and_delete_computation( # try again with force=True this should abort and delete the pipeline response = await async_client.request( - "DELETE", task_out.url, json={"user_id": user["id"], "force": True} + "DELETE", f"{task_out.url}", json={"user_id": user["id"], "force": True} ) assert ( response.status_code == status.HTTP_204_NO_CONTENT @@ -816,17 +872,17 @@ async def test_update_and_delete_computation( async def test_pipeline_with_no_computational_services_still_create_correct_comp_tasks_in_db( - minimal_configuration: None, async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], jupyter_service: dict[str, Any], osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() + user = create_registered_user() # create a workbench with just a dynamic service - project_with_dynamic_node = project( + project_with_dynamic_node = await project( user, workbench={ "39e92f80-9286-5612-85d1-639fa47ec57d": { @@ -847,6 +903,7 @@ async def test_pipeline_with_no_computational_services_still_create_correct_comp user_id=user["id"], start_pipeline=True, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) # still this pipeline shall be createable if we do not want to start it @@ -856,20 +913,21 @@ async def test_pipeline_with_no_computational_services_still_create_correct_comp user_id=user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) -def test_pipeline_with_control_loop_made_of_dynamic_services_is_allowed( - minimal_configuration: None, +async def test_pipeline_with_control_loop_made_of_dynamic_services_is_allowed( client: TestClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], jupyter_service: dict[str, Any], osparc_product_name: str, + osparc_product_api_base_url: str, ): - user = registered_user() + user = create_registered_user() # create a workbench with just 2 dynamic service in a cycle - project_with_dynamic_node = project( + project_with_dynamic_node = await project( user, workbench={ "39e92f80-9286-5612-85d1-639fa47ec57d": { @@ -907,6 +965,7 @@ def test_pipeline_with_control_loop_made_of_dynamic_services_is_allowed( "project_id": str(project_with_dynamic_node.uuid), "start_pipeline": True, "product_name": osparc_product_name, + "product_api_base_url": osparc_product_api_base_url, }, ) assert ( @@ -921,6 +980,7 @@ def test_pipeline_with_control_loop_made_of_dynamic_services_is_allowed( "project_id": str(project_with_dynamic_node.uuid), "start_pipeline": False, "product_name": osparc_product_name, + "product_api_base_url": osparc_product_api_base_url, }, ) assert ( @@ -928,18 +988,18 @@ def test_pipeline_with_control_loop_made_of_dynamic_services_is_allowed( ), f"response code is {response.status_code}, error: {response.text}" -def test_pipeline_with_cycle_containing_a_computational_service_is_forbidden( - minimal_configuration: None, +async def test_pipeline_with_cycle_containing_a_computational_service_is_forbidden( client: TestClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], sleeper_service: dict[str, Any], jupyter_service: dict[str, Any], osparc_product_name: str, + osparc_product_api_base_url: str, ): - user = registered_user() + user = create_registered_user() # create a workbench with just 2 dynamic service in a cycle - project_with_cycly_and_comp_service = project( + project_with_cycly_and_comp_service = await project( user, workbench={ "39e92f80-9286-5612-85d1-639fa47ec57d": { @@ -989,10 +1049,11 @@ def test_pipeline_with_cycle_containing_a_computational_service_is_forbidden( "project_id": str(project_with_cycly_and_comp_service.uuid), "start_pipeline": True, "product_name": osparc_product_name, + "product_api_base_url": osparc_product_api_base_url, }, ) assert ( - response.status_code == status.HTTP_403_FORBIDDEN + response.status_code == status.HTTP_409_CONFLICT ), f"response code is {response.status_code}, error: {response.text}" # still this pipeline shall be createable if we do not want to start it @@ -1003,6 +1064,7 @@ def test_pipeline_with_cycle_containing_a_computational_service_is_forbidden( "project_id": str(project_with_cycly_and_comp_service.uuid), "start_pipeline": False, "product_name": osparc_product_name, + "product_api_base_url": osparc_product_api_base_url, }, ) assert ( @@ -1011,20 +1073,20 @@ def test_pipeline_with_cycle_containing_a_computational_service_is_forbidden( async def test_burst_create_computations( - minimal_configuration: None, async_client: httpx.AsyncClient, - registered_user: Callable, - project: Callable, + create_registered_user: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], fake_workbench_without_outputs: dict[str, Any], update_project_workbench_with_comp_tasks: Callable, fake_workbench_computational_pipeline_details: PipelineDetails, fake_workbench_computational_pipeline_details_completed: PipelineDetails, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ): - user = registered_user() - sleepers_project = project(user, workbench=fake_workbench_without_outputs) - sleepers_project2 = project(user, workbench=fake_workbench_without_outputs) + user = create_registered_user() + sleepers_project = await project(user, workbench=fake_workbench_without_outputs) + sleepers_project2 = await project(user, workbench=fake_workbench_without_outputs) NUMBER_OF_CALLS = 4 @@ -1038,6 +1100,7 @@ async def test_burst_create_computations( project=sleepers_project, user_id=user["id"], product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, start_pipeline=False, ) for _ in range(NUMBER_OF_CALLS) @@ -1048,6 +1111,7 @@ async def test_burst_create_computations( project=sleepers_project2, user_id=user["id"], product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, start_pipeline=False, ) ] @@ -1065,6 +1129,7 @@ async def test_burst_create_computations( project=sleepers_project, user_id=user["id"], product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, start_pipeline=True, ) for _ in range(NUMBER_OF_CALLS) @@ -1075,6 +1140,7 @@ async def test_burst_create_computations( project=sleepers_project2, user_id=user["id"], product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, start_pipeline=False, ) ] diff --git a/services/director-v2/tests/integration/02/conftest.py b/services/director-v2/tests/integration/02/conftest.py index 11be5a52c78..4e0b6a5b31f 100644 --- a/services/director-v2/tests/integration/02/conftest.py +++ b/services/director-v2/tests/integration/02/conftest.py @@ -1,16 +1,20 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument +from collections.abc import AsyncIterator from uuid import uuid4 import aiodocker import pytest +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingPlanGet, +) from models_library.projects_networks import ProjectsNetworks from models_library.services_resources import ( ServiceResourcesDict, ServiceResourcesDictHelpers, ) -from pydantic import parse_obj_as +from pydantic import TypeAdapter from pytest_mock.plugin import MockerFixture @@ -20,7 +24,9 @@ def network_name() -> str: @pytest.fixture -async def ensure_swarm_and_networks(network_name: str, docker_swarm: None): +async def ensure_swarm_and_networks( + network_name: str, docker_swarm: None +) -> AsyncIterator[None]: """ Make sure to always have a docker swarm network. If one is not present crete one. There can not be more then one. @@ -58,15 +64,28 @@ def mock_projects_networks_repository(mocker: MockerFixture) -> None: "simcore_service_director_v2.modules.db.repositories." "projects_networks.ProjectsNetworksRepository.get_projects_networks" ), - return_value=ProjectsNetworks.parse_obj( - dict(project_uuid=uuid4(), networks_with_aliases={}) + return_value=ProjectsNetworks.model_validate( + {"project_uuid": uuid4(), "networks_with_aliases": {}} ), ) @pytest.fixture def service_resources() -> ServiceResourcesDict: - return parse_obj_as( - ServiceResourcesDict, - ServiceResourcesDictHelpers.Config.schema_extra["examples"][0], + return TypeAdapter(ServiceResourcesDict).validate_python( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"][0], + ) + + +@pytest.fixture +def mock_resource_usage_tracker(mocker: MockerFixture) -> None: + base_module = "simcore_service_director_v2.modules.resource_usage_tracker_client" + service_pricing_plan = RutPricingPlanGet.model_validate( + RutPricingPlanGet.model_config["json_schema_extra"]["examples"][1] + ) + for unit in service_pricing_plan.pricing_units: + unit.specific_info.aws_ec2_instances.clear() + mocker.patch( + f"{base_module}.ResourceUsageTrackerClient.get_default_service_pricing_plan", + return_value=service_pricing_plan, ) diff --git a/services/director-v2/tests/integration/02/test_dynamic_services_routes.py b/services/director-v2/tests/integration/02/test_dynamic_services_routes.py index 95087e29978..e4a5cc39047 100644 --- a/services/director-v2/tests/integration/02/test_dynamic_services_routes.py +++ b/services/director-v2/tests/integration/02/test_dynamic_services_routes.py @@ -1,12 +1,15 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments import asyncio import json import logging +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable from contextlib import asynccontextmanager -from typing import Any, AsyncIterable, AsyncIterator, Callable -from unittest.mock import Mock +from typing import Any +from unittest.mock import MagicMock import aiodocker import pytest @@ -23,8 +26,9 @@ ) from models_library.users import UserID from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_docker import get_localhost_ip from servicelib.common_headers import ( X_DYNAMIC_SIDECAR_REQUEST_DNS, X_DYNAMIC_SIDECAR_REQUEST_SCHEME, @@ -34,11 +38,12 @@ from settings_library.redis import RedisSettings from simcore_service_director_v2.core.application import init_app from simcore_service_director_v2.core.settings import AppSettings -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed from utils import ensure_network_cleanup, patch_dynamic_service_url +from yarl import URL SERVICE_IS_READY_TIMEOUT = 2 * 60 @@ -47,15 +52,19 @@ logger = logging.getLogger(__name__) pytest_simcore_core_services_selection = [ + "agent", "catalog", "director", "migration", "postgres", "rabbit", "redis", + "redis", + "storage", ] pytest_simcore_ops_services_selection = [ "adminer", + "minio", ] @@ -67,31 +76,30 @@ def minimal_configuration( dy_static_file_server_dynamic_sidecar_service: dict, simcore_services_ready: None, rabbit_service: RabbitSettings, -): - ... + storage_service: URL, +): ... @pytest.fixture -def mock_env(mock_env: EnvVarsDict, monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: - monkeypatch.setenv("RABBIT_USER", "admin") - monkeypatch.setenv("RABBIT_PASSWORD", "adminadmin") - return mock_env | {"RABBIT_USER": "admin", "RABBIT_PASSWORD": "adminadmin"} +def mock_env(mock_env: EnvVarsDict, minimal_configuration) -> None: ... @pytest.fixture -def user_db(registered_user: Callable[..., dict[str, Any]]) -> dict[str, Any]: - user = registered_user() +def user_db(create_registered_user: Callable[..., dict[str, Any]]) -> dict[str, Any]: + user = create_registered_user() return user @pytest.fixture -def user_id(user_db) -> UserID: +def user_id(user_db: dict[str, Any]) -> UserID: return UserID(user_db["id"]) @pytest.fixture -def project_id(user_db, project: Callable[..., ProjectAtDB]) -> str: - prj = project(user=user_db) +async def project_id( + user_db: dict[str, Any], project: Callable[..., Awaitable[ProjectAtDB]] +) -> str: + prj = await project(user=user_db) return f"{prj.uuid}" @@ -109,17 +117,22 @@ def start_request_data( service_resources: ServiceResourcesDict, ensure_swarm_and_networks: None, osparc_product_name: str, + osparc_product_api_base_url: str, ) -> dict[str, Any]: - return dict( - user_id=user_id, - project_id=project_id, - product_name=osparc_product_name, - service_uuid=node_uuid, - service_key=dy_static_file_server_dynamic_sidecar_service["image"]["name"], - service_version=dy_static_file_server_dynamic_sidecar_service["image"]["tag"], - request_scheme="http", - request_dns="localhost:50000", - settings=[ + return { + "user_id": user_id, + "project_id": project_id, + "product_name": osparc_product_name, + "product_api_base_url": osparc_product_api_base_url, + "service_uuid": node_uuid, + "service_key": dy_static_file_server_dynamic_sidecar_service["image"]["name"], + "service_version": dy_static_file_server_dynamic_sidecar_service["image"][ + "tag" + ], + "request_scheme": "http", + "request_dns": "localhost:50000", + "can_save": True, + "settings": [ { "name": "resources", "type": "Resources", @@ -132,46 +145,53 @@ def start_request_data( "value": ["node.platform.os == linux"], }, ], - paths_mapping={"outputs_path": "/tmp/outputs", "inputs_path": "/tmp/inputs"}, - service_resources=ServiceResourcesDictHelpers.create_jsonable( + "paths_mapping": { + "outputs_path": "/tmp/outputs", # noqa: S108 + "inputs_path": "/tmp/inputs", # noqa: S108 + }, + "service_resources": ServiceResourcesDictHelpers.create_jsonable( service_resources ), - ) + } @pytest.fixture async def director_v2_client( - minimal_configuration: None, mock_env: EnvVarsDict, network_name: str, redis_settings: RedisSettings, monkeypatch: pytest.MonkeyPatch, + faker: Faker, ) -> AsyncIterable[TestClient]: - monkeypatch.setenv("SC_BOOT_MODE", "production") - monkeypatch.setenv("DYNAMIC_SIDECAR_EXPOSE_PORT", "true") - monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", network_name) + setenvs_from_dict( + monkeypatch, + { + "SC_BOOT_MODE": "production", + "DYNAMIC_SIDECAR_EXPOSE_PORT": "true", + "SIMCORE_SERVICES_NETWORK_NAME": network_name, + "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "true", + "DYNAMIC_SIDECAR_LOG_LEVEL": "DEBUG", + "DIRECTOR_V2_LOGLEVEL": "DEBUG", + "DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS": "{}", + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "false", + "COMPUTATIONAL_BACKEND_ENABLED": "false", + "R_CLONE_PROVIDER": "MINIO", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + # patch host for dynamic-sidecar, not reachable via localhost + # the dynamic-sidecar (running inside a container) will use + # this address to reach the rabbit service + "RABBIT_HOST": f"{get_localhost_ip()}", + "REDIS_HOST": redis_settings.REDIS_HOST, + "REDIS_PORT": f"{redis_settings.REDIS_PORT}", + "REDIS_PASSWORD": f"{redis_settings.REDIS_PASSWORD.get_secret_value()}", + "DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED": "1", + }, + ) monkeypatch.delenv("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", raising=False) - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - monkeypatch.setenv("DYNAMIC_SIDECAR_LOG_LEVEL", "DEBUG") - monkeypatch.setenv("DIRECTOR_V2_LOGLEVEL", "DEBUG") - - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "false") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "true") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - # patch host for dynamic-sidecar, not reachable via localhost - # the dynamic-sidecar (running inside a container) will use - # this address to reach the rabbit service - monkeypatch.setenv("RABBIT_HOST", f"{get_localhost_ip()}") - - monkeypatch.setenv("REDIS_HOST", redis_settings.REDIS_HOST) - monkeypatch.setenv("REDIS_PORT", f"{redis_settings.REDIS_PORT}") settings = AppSettings.create_from_envs() @@ -198,27 +218,34 @@ async def ensure_services_stopped( assert delete_result is True scheduler_interval = ( - director_v2_client.application.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS + director_v2_client.application.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL ) # sleep enough to ensure the observation cycle properly stopped the service - await asyncio.sleep(2 * scheduler_interval) + await asyncio.sleep(2 * scheduler_interval.total_seconds()) await ensure_network_cleanup(docker_client, project_id) @pytest.fixture def mock_project_repository(mocker: MockerFixture) -> None: + class ExtendedMagicMock(MagicMock): + @property + def name(self) -> str: + return "test_name" + + @property + def label(self) -> str: + return "test_label" + mocker.patch( f"{DIRECTOR_V2_MODULES}.db.repositories.projects.ProjectsRepository.get_project", - side_effect=lambda *args, **kwargs: Mock(), + side_effect=lambda *args, **kwargs: ExtendedMagicMock(), ) @pytest.fixture def mock_dynamic_sidecar_api_calls(mocker: MockerFixture) -> None: - class_path = ( - f"{DIRECTOR_V2_MODULES}.dynamic_sidecar.api_client.DynamicSidecarClient" - ) + class_path = f"{DIRECTOR_V2_MODULES}.dynamic_sidecar.api_client.SidecarsClient" for function_name, return_value in [ ("pull_service_output_ports", None), ("restore_service_state", None), @@ -268,6 +295,7 @@ async def key_version_expected( return results +@pytest.mark.flaky(max_runs=3) async def test_start_status_stop( director_v2_client: TestClient, node_uuid: str, @@ -277,6 +305,9 @@ async def test_start_status_stop( mock_dynamic_sidecar_api_calls: None, mock_projects_networks_repository: None, mock_projects_repository: None, + mocked_service_awaits_manual_interventions: None, + mock_resource_usage_tracker: None, + mock_osparc_variables_api_auth_rpc: None, ): # NOTE: this test does not like it when the catalog is not fully ready!!! diff --git a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py index f5509fc0e54..30563157d6d 100644 --- a/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py +++ b/services/director-v2/tests/integration/02/test_dynamic_sidecar_nodeports_integration.py @@ -1,38 +1,40 @@ # pylint: disable=protected-access # pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments # pylint: disable=unused-argument +# pylint:disable=too-many-positional-arguments import asyncio import hashlib import json import logging import os -from collections import namedtuple -from itertools import tee +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Coroutine from pathlib import Path -from typing import ( - Any, - AsyncIterable, - Awaitable, - Callable, - Iterable, - Iterator, - Optional, - cast, -) +from typing import Any, NamedTuple, cast from uuid import uuid4 import aioboto3 import aiodocker -import aiopg.sa import httpx import pytest import sqlalchemy as sa from aiodocker.containers import DockerContainer -from aiopg.sa import Engine +from faker import Faker from fastapi import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID -from models_library.projects import Node, ProjectAtDB, ProjectID, Workbench +from helpers.shared_comp_utils import ( + assert_and_wait_for_pipeline_status, + assert_computation_task_out_obj, +) +from models_library.api_schemas_directorv2.computations import ComputationGet +from models_library.clusters import ClusterAuthentication +from models_library.projects import ( + Node, + NodesDict, + ProjectAtDB, + ProjectID, + ProjectIDStr, +) from models_library.projects_networks import ( PROJECT_NETWORK_PREFIX, ContainerAliases, @@ -43,9 +45,11 @@ from models_library.projects_pipeline import PipelineDetails from models_library.projects_state import RunningState from models_library.users import UserID -from pytest import MonkeyPatch +from pydantic import AnyHttpUrl, TypeAdapter from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_docker import get_localhost_ip +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict from servicelib.fastapi.long_running_tasks.client import ( Client, ProgressMessage, @@ -54,26 +58,28 @@ periodic_task_result, ) from servicelib.progress_bar import ProgressBarData +from servicelib.sequences_utils import pairwise from settings_library.rabbit import RabbitSettings from settings_library.redis import RedisSettings -from shared_comp_utils import ( - assert_and_wait_for_pipeline_status, - assert_computation_task_out_obj, -) +from settings_library.storage import StorageSettings +from settings_library.tracing import TracingSettings from simcore_postgres_database.models.comp_pipeline import comp_pipeline from simcore_postgres_database.models.comp_tasks import comp_tasks from simcore_postgres_database.models.projects_networks import projects_networks -from simcore_postgres_database.models.services import services_access_rights from simcore_sdk import node_ports_v2 from simcore_sdk.node_data import data_manager +from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB from simcore_sdk.node_ports_v2 import DBManager, Nodeports, Port -from simcore_service_director_v2.core.settings import AppSettings, RCloneSettings -from simcore_service_director_v2.models.schemas.comp_tasks import ComputationGet -from simcore_service_director_v2.models.schemas.constants import ( - DYNAMIC_SIDECAR_SERVICE_PREFIX, +from simcore_service_director_v2.constants import DYNAMIC_SIDECAR_SERVICE_PREFIX +from simcore_service_director_v2.core.dynamic_services_settings.sidecar import ( + RCloneSettings, ) +from simcore_service_director_v2.core.settings import AppSettings +from simcore_service_director_v2.modules import storage as dv2_modules_storage from sqlalchemy.dialects.postgresql import insert as pg_insert -from tenacity._asyncio import AsyncRetrying +from sqlalchemy.ext.asyncio import AsyncEngine +from tenacity import TryAgain +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt, stop_after_delay from tenacity.wait import wait_fixed @@ -93,6 +99,7 @@ from yarl import URL pytest_simcore_core_services_selection = [ + "agent", "catalog", "dask-scheduler", "dask-sidecar", @@ -100,18 +107,29 @@ "migration", "postgres", "rabbit", + "redis", "storage", + "sto-worker", "redis", ] pytest_simcore_ops_services_selection = [ "adminer", "minio", + "portainer", ] -ServicesNodeUUIDs = namedtuple("ServicesNodeUUIDs", "sleeper, dy, dy_compose_spec") -InputsOutputs = namedtuple("InputsOutputs", "inputs, outputs") +class ServicesNodeUUIDs(NamedTuple): + sleeper: str + dy: str + dy_compose_spec: str + + +class InputsOutputs(NamedTuple): + inputs: dict[str, Any] + outputs: dict[str, Any] + DY_VOLUMES: str = "/dy-volumes/" DY_SERVICES_STATE_PATH: Path = Path(DY_VOLUMES) / "workdir/generated-data" @@ -131,7 +149,8 @@ @pytest.fixture -def minimal_configuration( # pylint:disable=too-many-arguments +async def minimal_configuration( + wait_for_catalog_service: Callable[[UserID, str], Awaitable[None]], sleeper_service: dict, dy_static_file_server_dynamic_sidecar_service: dict, dy_static_file_server_dynamic_sidecar_compose_spec_service: dict, @@ -144,29 +163,15 @@ def minimal_configuration( # pylint:disable=too-many-arguments dask_scheduler_service: str, dask_sidecar_service: None, ensure_swarm_and_networks: None, + minio_s3_settings_envs: EnvVarsDict, + current_user: dict[str, Any], osparc_product_name: str, -) -> Iterator[None]: - +) -> AsyncIterator[None]: + await wait_for_catalog_service(current_user["id"], osparc_product_name) with postgres_db.connect() as conn: # pylint: disable=no-value-for-parameter conn.execute(comp_tasks.delete()) conn.execute(comp_pipeline.delete()) - # NOTE: ensure access to services to everyone [catalog access needed] - for service in ( - dy_static_file_server_dynamic_sidecar_service, - dy_static_file_server_dynamic_sidecar_compose_spec_service, - ): - service_image = service["image"] - conn.execute( - services_access_rights.insert().values( - key=service_image["name"], - version=service_image["tag"], - gid=1, - execute_access=1, - write_access=0, - product_name=osparc_product_name, - ) - ) yield @@ -217,13 +222,6 @@ def fake_dy_success(mocks_dir: Path) -> dict[str, Any]: return json.loads(fake_dy_status_success.read_text()) -@pytest.fixture -def fake_dy_published(mocks_dir: Path) -> dict[str, Any]: - fake_dy_status_published = mocks_dir / "fake_dy_status_published.json" - assert fake_dy_status_published.exists() - return json.loads(fake_dy_status_published.read_text()) - - @pytest.fixture def services_node_uuids( fake_dy_workbench: dict[str, Any], @@ -235,14 +233,16 @@ def _get_node_uuid(registry_service_data: dict) -> str: key = registry_service_data["schema"]["key"] version = registry_service_data["schema"]["version"] + found_node_uuid: str | None = None for node_uuid, workbench_service_data in fake_dy_workbench.items(): if ( workbench_service_data["key"] == key and workbench_service_data["version"] == version ): - return node_uuid - - assert False, f"No node_uuid found for {key}:{version}" + found_node_uuid = node_uuid + break + assert found_node_uuid is not None, f"No node_uuid found for {key}:{version}" + return found_node_uuid return ServicesNodeUUIDs( sleeper=_get_node_uuid(sleeper_service), @@ -254,21 +254,21 @@ def _get_node_uuid(registry_service_data: dict) -> str: @pytest.fixture -def current_user(registered_user: Callable) -> dict[str, Any]: - return registered_user() +def current_user(create_registered_user: Callable) -> dict[str, Any]: + return create_registered_user() @pytest.fixture async def current_study( current_user: dict[str, Any], - project: Callable[..., ProjectAtDB], + project: Callable[..., Awaitable[ProjectAtDB]], fake_dy_workbench: dict[str, Any], async_client: httpx.AsyncClient, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], ) -> ProjectAtDB: - - project_at_db = project(current_user, workbench=fake_dy_workbench) + project_at_db = await project(current_user, workbench=fake_dy_workbench) # create entries in comp_task table in order to pull output ports await create_pipeline( @@ -277,6 +277,7 @@ async def current_study( user_id=current_user["id"], start_pipeline=False, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, ) return project_at_db @@ -293,13 +294,12 @@ def workbench_dynamic_services( @pytest.fixture -async def db_manager(aiopg_engine: aiopg.sa.engine.Engine) -> DBManager: - return DBManager(aiopg_engine) +async def db_manager(sqlalchemy_async_engine: AsyncEngine) -> DBManager: + return DBManager(sqlalchemy_async_engine) def _is_docker_r_clone_plugin_installed() -> bool: - is_plugin_installed = "rclone:" in run_command("docker plugin ls") - return is_plugin_installed + return "rclone:" in run_command("docker plugin ls") @pytest.fixture( @@ -318,16 +318,36 @@ def dev_feature_r_clone_enabled(request) -> str: return request.param -@pytest.fixture(scope="function") +@pytest.fixture +async def patch_storage_setup( + mocker: MockerFixture, +) -> None: + local_settings = StorageSettings.create_from_envs() + + original_setup = dv2_modules_storage.setup + + def setup( + app: FastAPI, + storage_settings: StorageSettings, + tracing_settings: TracingSettings | None, + ) -> None: + original_setup( + app, storage_settings=local_settings, tracing_settings=tracing_settings + ) + + mocker.patch("simcore_service_director_v2.modules.storage.setup", side_effect=setup) + + +@pytest.fixture def mock_env( - monkeypatch: MonkeyPatch, - redis_service: RedisSettings, + mock_env: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, network_name: str, dev_feature_r_clone_enabled: str, - rabbit_service: RabbitSettings, dask_scheduler_service: str, - minio_config: dict[str, Any], - storage_service: URL, + dask_scheduler_auth: ClusterAuthentication, + minimal_configuration: None, + patch_storage_setup: None, ) -> None: # Works as below line in docker.compose.yml # ${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} @@ -337,45 +357,48 @@ def mock_env( image_name = f"{registry}/dynamic-sidecar:{image_tag}" + local_settings = StorageSettings.create_from_envs() + logger.warning("Patching to: DYNAMIC_SIDECAR_IMAGE=%s", image_name) - monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", image_name) - monkeypatch.setenv("TRAEFIK_SIMCORE_ZONE", "test_traefik_zone") - monkeypatch.setenv("SWARM_STACK_NAME", "test_swarm_name") - - monkeypatch.setenv("SC_BOOT_MODE", "production") - monkeypatch.setenv("DYNAMIC_SIDECAR_EXPOSE_PORT", "true") - monkeypatch.setenv("DYNAMIC_SIDECAR_LOG_LEVEL", "DEBUG") - monkeypatch.setenv("PROXY_EXPOSE_PORT", "true") - monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", network_name) - monkeypatch.delenv("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", raising=False) - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - monkeypatch.setenv("DIRECTOR_V2_LOGLEVEL", "DEBUG") - monkeypatch.setenv("DYNAMIC_SIDECAR_TRAEFIK_ACCESS_LOG", "true") - monkeypatch.setenv("DYNAMIC_SIDECAR_TRAEFIK_LOGLEVEL", "debug") - # patch host for dynamic-sidecar, not reachable via localhost - # the dynamic-sidecar (running inside a container) will use - # this address to reach the rabbit service - monkeypatch.setenv("RABBIT_HOST", f"{get_localhost_ip()}") - monkeypatch.setenv("POSTGRES_HOST", f"{get_localhost_ip()}") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", minio_config["client"]["endpoint"]) - monkeypatch.setenv("S3_ACCESS_KEY", minio_config["client"]["access_key"]) - monkeypatch.setenv("S3_SECRET_KEY", minio_config["client"]["secret_key"]) - monkeypatch.setenv("S3_BUCKET_NAME", minio_config["bucket_name"]) - monkeypatch.setenv("S3_SECURE", f"{minio_config['client']['secure']}") - monkeypatch.setenv( - "DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED", dev_feature_r_clone_enabled + setenvs_from_dict( + monkeypatch, + { + "STORAGE_HOST": "storage", + "STORAGE_PORT": "8080", + "NODE_PORTS_STORAGE_AUTH": json.dumps( + { + "STORAGE_HOST": local_settings.STORAGE_HOST, + "STORAGE_PORT": local_settings.STORAGE_PORT, + } + ), + "DYNAMIC_SIDECAR_IMAGE": image_name, + "DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS": "{}", + "TRAEFIK_SIMCORE_ZONE": "test_traefik_zone", + "SWARM_STACK_NAME": "pytest-simcore", + "SC_BOOT_MODE": "production", + "DYNAMIC_SIDECAR_EXPOSE_PORT": "true", + "DYNAMIC_SIDECAR_LOG_LEVEL": "DEBUG", + "PROXY_EXPOSE_PORT": "true", + "SIMCORE_SERVICES_NETWORK_NAME": network_name, + "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "true", + "DIRECTOR_V2_LOGLEVEL": "DEBUG", + "DYNAMIC_SIDECAR_TRAEFIK_ACCESS_LOG": "true", + "DYNAMIC_SIDECAR_TRAEFIK_LOGLEVEL": "debug", + # patch host for dynamic-sidecar, not reachable via localhost + # the dynamic-sidecar (running inside a container) will use + # this address to reach the rabbit service + "RABBIT_HOST": f"{get_localhost_ip()}", + "POSTGRES_HOST": f"{get_localhost_ip()}", + "R_CLONE_PROVIDER": "MINIO", + "DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED": dev_feature_r_clone_enabled, + "COMPUTATIONAL_BACKEND_ENABLED": "true", + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "true", + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL": dask_scheduler_service, + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH": dask_scheduler_auth.model_dump_json(), + "DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED": "1", + }, ) - monkeypatch.setenv("DIRECTOR_V2_TRACING", "null") - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", - dask_scheduler_service, - ) - monkeypatch.setenv("REDIS_HOST", redis_service.REDIS_HOST) - monkeypatch.setenv("REDIS_PORT", f"{redis_service.REDIS_PORT}") - - # always test the node limit feature, by default is disabled - monkeypatch.setenv("DYNAMIC_SIDECAR_DOCKER_NODE_RESOURCE_LIMITS_ENABLED", "true") + monkeypatch.delenv("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", raising=False) @pytest.fixture @@ -414,13 +437,13 @@ async def projects_networks_db( # NOTE: director-v2 does not have access to the webserver which creates this # injecting all dynamic-sidecar started services on a default networks - container_aliases: ContainerAliases = ContainerAliases.parse_obj({}) + container_aliases: ContainerAliases = ContainerAliases.model_validate({}) for k, (node_uuid, node) in enumerate(current_study.workbench.items()): if not is_legacy(node): container_aliases[node_uuid] = f"networkable_alias_{k}" - networks_with_aliases: NetworksWithAliases = NetworksWithAliases.parse_obj({}) + networks_with_aliases: NetworksWithAliases = NetworksWithAliases.model_validate({}) default_network_name = f"{PROJECT_NETWORK_PREFIX}_{current_study.uuid}_test" networks_with_aliases[default_network_name] = container_aliases @@ -428,10 +451,10 @@ async def projects_networks_db( project_uuid=current_study.uuid, networks_with_aliases=networks_with_aliases ) - engine: Engine = initialized_app.state.engine + engine: AsyncEngine = initialized_app.state.engine - async with engine.acquire() as conn: - row_data = projects_networks_to_insert.dict() + async with engine.begin() as conn: + row_data = projects_networks_to_insert.model_dump(mode="json") insert_stmt = pg_insert(projects_networks).values(**row_data) upsert_snapshot = insert_stmt.on_conflict_do_update( constraint=projects_networks.primary_key, set_=row_data @@ -439,16 +462,24 @@ async def projects_networks_db( await conn.execute(upsert_snapshot) +@pytest.fixture +def mock_io_log_redirect_cb() -> LogRedirectCB: + async def _mocked_function(*args, **kwargs) -> None: + pass + + return _mocked_function + + async def _get_mapped_nodeports_values( - user_id: UserID, project_id: str, workbench: Workbench, db_manager: DBManager + user_id: UserID, project_id: str, workbench: NodesDict, db_manager: DBManager ) -> dict[str, InputsOutputs]: result: dict[str, InputsOutputs] = {} for node_uuid in workbench: PORTS: Nodeports = await node_ports_v2.ports( user_id=user_id, - project_id=project_id, - node_uuid=NodeIDStr(node_uuid), + project_id=ProjectIDStr(project_id), + node_uuid=TypeAdapter(NodeIDStr).validate_python(node_uuid), db_manager=db_manager, ) result[str(node_uuid)] = InputsOutputs( @@ -494,8 +525,8 @@ async def _assert_port_values( # files - async def _int_value_port(port: Port) -> Optional[int]: - file_path = cast(Optional[Path], await port.get()) + async def _int_value_port(port: Port) -> int | None: + file_path = cast(Path | None, await port.get()) if file_path is None: return None return int(file_path.read_text()) @@ -609,30 +640,40 @@ async def _fetch_data_from_container( async def _fetch_data_via_data_manager( - dir_tag: str, user_id: UserID, project_id: str, service_uuid: str, temp_dir: Path + r_clone_settings: RCloneSettings, + dir_tag: str, + user_id: UserID, + project_id: ProjectID, + service_uuid: NodeID, + temp_dir: Path, + io_log_redirect_cb: LogRedirectCB, + faker: Faker, ) -> Path: save_to = temp_dir / f"data-manager_{dir_tag}_{uuid4()}" save_to.mkdir(parents=True, exist_ok=True) assert ( - await data_manager.exists( + await data_manager._state_metadata_entry_exists( # noqa: SLF001 user_id=user_id, project_id=project_id, node_uuid=service_uuid, - file_path=DY_SERVICES_STATE_PATH, + path=DY_SERVICES_STATE_PATH, + is_archive=False, ) is True ) - async with ProgressBarData(steps=1) as progress_bar: - await data_manager.pull( + async with ProgressBarData(num_steps=1, description=faker.pystr()) as progress_bar: + await data_manager._pull_directory( # noqa: SLF001 user_id=user_id, project_id=project_id, node_uuid=service_uuid, - file_or_folder=DY_SERVICES_STATE_PATH, + destination_path=DY_SERVICES_STATE_PATH, save_to=save_to, - io_log_redirect_cb=None, + io_log_redirect_cb=io_log_redirect_cb, + r_clone_settings=r_clone_settings, progress_bar=progress_bar, + aws_s3_cli_settings=None, ) return save_to @@ -642,7 +683,7 @@ async def _fetch_data_via_aioboto( r_clone_settings: RCloneSettings, dir_tag: str, temp_dir: Path, - node_id: NodeID, + node_id: NodeIDStr, project_id: ProjectID, ) -> Path: save_to = temp_dir / f"aioboto_{dir_tag}_{uuid4()}" @@ -671,6 +712,7 @@ async def _fetch_data_via_aioboto( async def _start_and_wait_for_dynamic_services_ready( director_v2_client: httpx.AsyncClient, product_name: str, + product_api_base_url: str, user_id: UserID, workbench_dynamic_services: dict[str, Node], current_study: ProjectAtDB, @@ -682,6 +724,7 @@ async def _start_and_wait_for_dynamic_services_ready( assert_start_service( director_v2_client=director_v2_client, product_name=product_name, + product_api_base_url=product_api_base_url, user_id=user_id, project_id=str(current_study.uuid), service_key=node.key, @@ -699,7 +742,7 @@ async def _start_and_wait_for_dynamic_services_ready( for service_uuid in workbench_dynamic_services: dynamic_service_url = await patch_dynamic_service_url( # pylint: disable=protected-access - app=director_v2_client._transport.app, + app=director_v2_client._transport.app, # type: ignore # noqa: SLF001 node_uuid=service_uuid, ) dynamic_services_urls[service_uuid] = dynamic_service_url @@ -720,39 +763,35 @@ async def _wait_for_dy_services_to_fully_stop( director_v2_client: httpx.AsyncClient, ) -> None: # pylint: disable=protected-access + app: FastAPI = director_v2_client._transport.app # type: ignore # noqa: SLF001 to_observe = ( - director_v2_client._transport.app.state.dynamic_sidecar_scheduler._scheduler._to_observe + app.state.dynamic_sidecar_scheduler.scheduler._to_observe # noqa: SLF001 ) - # TODO: ANE please use tenacity - for i in range(TIMEOUT_DETECT_DYNAMIC_SERVICES_STOPPED): - print( - f"Sleeping for {i+1}/{TIMEOUT_DETECT_DYNAMIC_SERVICES_STOPPED} " - "seconds while waiting for removal of all dynamic-sidecars" - ) - await asyncio.sleep(1) - if len(to_observe) == 0: - break - - if i == TIMEOUT_DETECT_DYNAMIC_SERVICES_STOPPED - 1: - assert False, "Timeout reached" - -def _pairwise(iterable) -> Iterable[tuple[Any, Any]]: - "s -> (s0,s1), (s1,s2), (s2, s3), ..." - a, b = tee(iterable) - next(b, None) - return zip(a, b) + async for attempt in AsyncRetrying( + stop=stop_after_delay(TIMEOUT_DETECT_DYNAMIC_SERVICES_STOPPED), + wait=wait_fixed(1), + reraise=True, + retry=retry_if_exception_type(TryAgain), + ): + with attempt: + print( + f"Sleeping for {attempt.retry_state.attempt_number}/{TIMEOUT_DETECT_DYNAMIC_SERVICES_STOPPED} " + "seconds while waiting for removal of all dynamic-sidecars" + ) + if len(to_observe) != 0: + raise TryAgain def _assert_same_set(*sets_to_compare: set[Any]) -> None: - for first, second in _pairwise(sets_to_compare): + for first, second in pairwise(sets_to_compare): assert first == second def _get_file_hashes_in_path(path_to_hash: Path) -> set[tuple[Path, str]]: def _hash_path(path: Path): sha256_hash = hashlib.sha256() - with open(path, "rb") as f: + with Path.open(path, "rb") as f: # Read and update hash string value in blocks of 4K for byte_block in iter(lambda: f.read(4096), b""): sha256_hash.update(byte_block) @@ -787,7 +826,7 @@ async def _assert_push_non_file_outputs( logger.debug("Going to poll task %s", task_id) async def _debug_progress_callback( - message: ProgressMessage, percent: ProgressPercent, task_id: TaskId + message: ProgressMessage, percent: ProgressPercent | None, task_id: TaskId ) -> None: logger.debug("%s: %.2f %s", task_id, percent, message) @@ -795,7 +834,9 @@ async def _debug_progress_callback( Client( app=initialized_app, async_client=director_v2_client, - base_url=director_v2_client.base_url, + base_url=TypeAdapter(AnyHttpUrl).validate_python( + f"{director_v2_client.base_url}" + ), ), task_id, task_timeout=60, @@ -835,17 +876,24 @@ async def _assert_retrieve_completed( container_id ) - logs = " ".join(await container.log(stdout=True, stderr=True)) + logs = " ".join( + await cast( + Coroutine[Any, Any, list[str]], + container.log(stdout=True, stderr=True), + ) + ) assert ( _CONTROL_TESTMARK_DY_SIDECAR_NODEPORT_UPLOADED_MESSAGE in logs ), "TIP: Message missing suggests that the data was never uploaded: look in services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/nodeports.py" +@pytest.mark.flaky(max_runs=3) async def test_nodeports_integration( - # pylint: disable=too-many-arguments - minimal_configuration: None, cleanup_services_and_networks: None, projects_networks_db: None, + mocked_service_awaits_manual_interventions: None, + mock_resource_usage_tracker: None, + mock_osparc_variables_api_auth_rpc: None, initialized_app: FastAPI, update_project_workbench_with_comp_tasks: Callable, async_client: httpx.AsyncClient, @@ -856,11 +904,12 @@ async def test_nodeports_integration( workbench_dynamic_services: dict[str, Node], services_node_uuids: ServicesNodeUUIDs, fake_dy_success: dict[str, Any], - fake_dy_published: dict[str, Any], tmp_path: Path, - mocker: MockerFixture, osparc_product_name: str, + osparc_product_api_base_url: str, create_pipeline: Callable[..., Awaitable[ComputationGet]], + mock_io_log_redirect_cb: LogRedirectCB, + faker: Faker, ) -> None: """ Creates a new project with where the following connections @@ -890,15 +939,16 @@ async def test_nodeports_integration( `aioboto` instead of `docker` or `storage-data_manager API`. """ # STEP 1 - dynamic_services_urls: dict[ - str, str - ] = await _start_and_wait_for_dynamic_services_ready( - director_v2_client=async_client, - product_name=osparc_product_name, - user_id=current_user["id"], - workbench_dynamic_services=workbench_dynamic_services, - current_study=current_study, - catalog_url=services_endpoint["catalog"], + dynamic_services_urls: dict[str, str] = ( + await _start_and_wait_for_dynamic_services_ready( + director_v2_client=async_client, + product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, + user_id=current_user["id"], + workbench_dynamic_services=workbench_dynamic_services, + current_study=current_study, + catalog_url=services_endpoint["catalog"], + ) ) # STEP 2 @@ -908,25 +958,7 @@ async def test_nodeports_integration( user_id=current_user["id"], start_pipeline=True, product_name=osparc_product_name, - ) - - # check the contents is correct: a pipeline that just started gets PUBLISHED - await assert_computation_task_out_obj( - task_out, - project=current_study, - exp_task_state=RunningState.PUBLISHED, - exp_pipeline_details=PipelineDetails.parse_obj(fake_dy_published), - iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, - ) - - # wait for the computation to start - await assert_and_wait_for_pipeline_status( - async_client, - task_out.url, - current_user["id"], - current_study.uuid, - wait_for_states=[RunningState.STARTED], + product_api_base_url=osparc_product_api_base_url, ) # wait for the computation to finish (either by failing, success or abort) @@ -938,9 +970,8 @@ async def test_nodeports_integration( task_out, project=current_study, exp_task_state=RunningState.SUCCESS, - exp_pipeline_details=PipelineDetails.parse_obj(fake_dy_success), + exp_pipeline_details=PipelineDetails.model_validate(fake_dy_success), iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, ) update_project_workbench_with_comp_tasks(str(current_study.uuid)) @@ -1000,7 +1031,7 @@ async def test_nodeports_integration( # STEP 4 - app_settings: AppSettings = async_client._transport.app.state.settings + app_settings: AppSettings = async_client._transport.app.state.settings # type: ignore r_clone_settings: RCloneSettings = ( app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_R_CLONE_SETTINGS ) @@ -1071,11 +1102,14 @@ async def test_nodeports_integration( ) if app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED else await _fetch_data_via_data_manager( + r_clone_settings=r_clone_settings, dir_tag="dy", user_id=current_user["id"], - project_id=str(current_study.uuid), - service_uuid=services_node_uuids.dy, + project_id=current_study.uuid, + service_uuid=NodeID(services_node_uuids.dy), temp_dir=tmp_path, + io_log_redirect_cb=mock_io_log_redirect_cb, + faker=faker, ) ) @@ -1089,11 +1123,14 @@ async def test_nodeports_integration( ) if app_settings.DIRECTOR_V2_DEV_FEATURE_R_CLONE_MOUNTS_ENABLED else await _fetch_data_via_data_manager( + r_clone_settings=r_clone_settings, dir_tag="dy_compose_spec", user_id=current_user["id"], - project_id=str(current_study.uuid), - service_uuid=services_node_uuids.dy_compose_spec, + project_id=current_study.uuid, + service_uuid=NodeID(services_node_uuids.dy_compose_spec), temp_dir=tmp_path, + io_log_redirect_cb=mock_io_log_redirect_cb, + faker=faker, ) ) @@ -1102,6 +1139,7 @@ async def test_nodeports_integration( await _start_and_wait_for_dynamic_services_ready( director_v2_client=async_client, product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, user_id=current_user["id"], workbench_dynamic_services=workbench_dynamic_services, current_study=current_study, diff --git a/services/director-v2/tests/integration/02/test_mixed_dynamic_sidecar_and_legacy_project.py b/services/director-v2/tests/integration/02/test_mixed_dynamic_sidecar_and_legacy_project.py index 8c28ca39974..61af3dd5823 100644 --- a/services/director-v2/tests/integration/02/test_mixed_dynamic_sidecar_and_legacy_project.py +++ b/services/director-v2/tests/integration/02/test_mixed_dynamic_sidecar_and_legacy_project.py @@ -1,28 +1,31 @@ -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=too-many-arguments +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable import asyncio import logging -import os +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable from contextlib import asynccontextmanager -from typing import Any, AsyncIterable, AsyncIterator, Callable, Iterable +from typing import Any, cast +from unittest import mock import aiodocker import httpx import pytest import sqlalchemy as sa -from asgi_lifespan import LifespanManager from faker import Faker +from fastapi import FastAPI from models_library.projects import ProjectAtDB from models_library.services_resources import ServiceResourcesDict -from pytest import MonkeyPatch +from models_library.users import UserID from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_docker import get_localhost_ip +from pytest_simcore.helpers.host import get_localhost_ip +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.rabbit import RabbitSettings from settings_library.redis import RedisSettings -from simcore_service_director_v2.core.application import init_app -from simcore_service_director_v2.core.settings import AppSettings from utils import ( assert_all_services_running, assert_services_reply_200, @@ -38,18 +41,68 @@ pytest_simcore_core_services_selection = [ + "agent", "catalog", "director", "migration", "postgres", "rabbit", - "storage", "redis", + "storage", ] -pytest_simcore_ops_services_selection = [ - "minio", -] +pytest_simcore_ops_services_selection = ["adminer", "minio", "portainer"] + + +@pytest.fixture() +def mock_env( + mock_env: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + redis_service: RedisSettings, + rabbit_service: RabbitSettings, + postgres_db: sa.engine.Engine, + postgres_host_config: dict[str, str], + minio_s3_settings_envs: EnvVarsDict, + storage_service: URL, + network_name: str, + services_endpoint: dict[str, URL], +) -> EnvVarsDict: + director_host = services_endpoint["director"].host + assert director_host + director_port = services_endpoint["director"].port + assert director_port + + catalog_host = services_endpoint["catalog"].host + assert catalog_host + catalog_port = services_endpoint["catalog"].port + assert catalog_port + + monkeypatch.delenv("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", raising=False) + mock_env.pop("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", None) + + return mock_env | setenvs_from_dict( + monkeypatch, + { + "DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS": "{}", + "TRAEFIK_SIMCORE_ZONE": "test_traefik_zone", + "SWARM_STACK_NAME": "pytest-simcore", + "DYNAMIC_SIDECAR_LOG_LEVEL": "DEBUG", + "SC_BOOT_MODE": "production", + "DYNAMIC_SIDECAR_EXPOSE_PORT": "true", + "PROXY_EXPOSE_PORT": "true", + "SIMCORE_SERVICES_NETWORK_NAME": network_name, + "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "true", + "POSTGRES_HOST": f"{get_localhost_ip()}", + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "false", + "COMPUTATIONAL_BACKEND_ENABLED": "false", + "R_CLONE_PROVIDER": "MINIO", + "DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED": "1", + "DIRECTOR_HOST": director_host, + "DIRECTOR_PORT": f"{director_port}", + "CATALOG_HOST": catalog_host, + "CATALOG_PORT": f"{catalog_port}", + }, + ) @pytest.fixture @@ -57,42 +110,36 @@ def minimal_configuration( dy_static_file_server_service: dict, dy_static_file_server_dynamic_sidecar_service: dict, dy_static_file_server_dynamic_sidecar_compose_spec_service: dict, - redis_service: RedisSettings, - postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - rabbit_service: RabbitSettings, simcore_services_ready: None, - storage_service: URL, ensure_swarm_and_networks: None, -): - ... +): ... @pytest.fixture def uuid_legacy(faker: Faker) -> str: - return faker.uuid4() + return cast(str, faker.uuid4()) @pytest.fixture def uuid_dynamic_sidecar(faker: Faker) -> str: - return faker.uuid4() + return cast(str, faker.uuid4()) @pytest.fixture def uuid_dynamic_sidecar_compose(faker: Faker) -> str: - return faker.uuid4() + return cast(str, faker.uuid4()) @pytest.fixture -def user_dict(registered_user: Callable) -> Iterable[dict[str, Any]]: - yield registered_user() +def user_dict(create_registered_user: Callable) -> dict[str, Any]: + return create_registered_user() @pytest.fixture async def dy_static_file_server_project( minimal_configuration: None, user_dict: dict[str, Any], - project: Callable, + project: Callable[..., Awaitable[ProjectAtDB]], dy_static_file_server_service: dict, dy_static_file_server_dynamic_sidecar_service: dict, dy_static_file_server_dynamic_sidecar_compose_spec_service: dict, @@ -107,7 +154,7 @@ def _assemble_node_data(spec: dict, label: str) -> dict[str, str]: "label": label, } - return project( + return await project( user=user_dict, workbench={ uuid_legacy: _assemble_node_data( @@ -126,66 +173,10 @@ def _assemble_node_data(spec: dict, label: str) -> dict[str, str]: ) -@pytest.fixture -async def director_v2_client( - redis_service: RedisSettings, - minimal_configuration: None, - minio_config: dict[str, Any], - storage_service: URL, - network_name: str, - monkeypatch: MonkeyPatch, -) -> AsyncIterable[httpx.AsyncClient]: - # Works as below line in docker.compose.yml - # ${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} - - registry = os.environ.get("DOCKER_REGISTRY", "local") - image_tag = os.environ.get("DOCKER_IMAGE_TAG", "production") - - image_name = f"{registry}/dynamic-sidecar:{image_tag}" - - logger.warning("Patching to: DYNAMIC_SIDECAR_IMAGE=%s", image_name) - monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", image_name) - monkeypatch.setenv("TRAEFIK_SIMCORE_ZONE", "test_traefik_zone") - monkeypatch.setenv("SWARM_STACK_NAME", "test_swarm_name") - monkeypatch.setenv("DYNAMIC_SIDECAR_LOG_LEVEL", "DEBUG") - - monkeypatch.setenv("SC_BOOT_MODE", "production") - monkeypatch.setenv("DYNAMIC_SIDECAR_EXPOSE_PORT", "true") - monkeypatch.setenv("PROXY_EXPOSE_PORT", "true") - monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", network_name) - monkeypatch.delenv("DYNAMIC_SIDECAR_MOUNT_PATH_DEV", raising=False) - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - - monkeypatch.setenv("POSTGRES_HOST", f"{get_localhost_ip()}") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "false") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "false") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", minio_config["client"]["endpoint"]) - monkeypatch.setenv("S3_ACCESS_KEY", minio_config["client"]["access_key"]) - monkeypatch.setenv("S3_SECRET_KEY", minio_config["client"]["secret_key"]) - monkeypatch.setenv("S3_BUCKET_NAME", minio_config["bucket_name"]) - monkeypatch.setenv("S3_SECURE", f"{minio_config['client']['secure']}") - - # patch host for dynamic-sidecar, not reachable via localhost - # the dynamic-sidecar (running inside a container) will use - # this address to reach the rabbit service - monkeypatch.setenv("RABBIT_HOST", f"{get_localhost_ip()}") - - monkeypatch.setenv("REDIS_HOST", redis_service.REDIS_HOST) - monkeypatch.setenv("REDIS_PORT", f"{redis_service.REDIS_PORT}") - - settings = AppSettings.create_from_envs() - - app = init_app(settings) - - async with LifespanManager(app): - async with httpx.AsyncClient(app=app, base_url="http://testserver") as client: - yield client - - @pytest.fixture async def ensure_services_stopped( - dy_static_file_server_project: ProjectAtDB, director_v2_client: httpx.AsyncClient + dy_static_file_server_project: ProjectAtDB, + initialized_app: FastAPI, ) -> AsyncIterable[None]: yield # ensure service cleanup when done testing @@ -204,20 +195,23 @@ async def ensure_services_stopped( # pylint: disable=protected-access scheduler_interval = ( - director_v2_client._transport.app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS + initialized_app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL ) # sleep enough to ensure the observation cycle properly stopped the service - await asyncio.sleep(2 * scheduler_interval) + await asyncio.sleep(2 * scheduler_interval.total_seconds()) await ensure_network_cleanup(docker_client, project_id) @pytest.fixture -def mock_dynamic_sidecar_client(mocker: MockerFixture) -> None: - class_path = "simcore_service_director_v2.modules.dynamic_sidecar.api_client.DynamicSidecarClient" +def mock_sidecars_client(mocker: MockerFixture) -> mock.Mock: + class_path = ( + "simcore_service_director_v2.modules.dynamic_sidecar.api_client.SidecarsClient" + ) for function_name, return_value in [ - ("pull_service_output_ports", None), - ("restore_service_state", None), + ("pull_service_output_ports", 0), + ("restore_service_state", 0), ("push_service_output_ports", None), + ("save_service_state", 0), ]: mocker.patch( f"{class_path}.{function_name}", @@ -231,7 +225,7 @@ def mock_dynamic_sidecar_client(mocker: MockerFixture) -> None: async def _mocked_context_manger(*args, **kwargs) -> AsyncIterator[None]: yield - mocker.patch( + return mocker.patch( "simcore_service_director_v2.modules.dynamic_sidecar.api_client._public.periodic_task_result", side_effect=_mocked_context_manger, ) @@ -239,15 +233,21 @@ async def _mocked_context_manger(*args, **kwargs) -> AsyncIterator[None]: @pytest.mark.flaky(max_runs=3) async def test_legacy_and_dynamic_sidecar_run( + initialized_app: FastAPI, + wait_for_catalog_service: Callable[[UserID, str], Awaitable[None]], dy_static_file_server_project: ProjectAtDB, user_dict: dict[str, Any], services_endpoint: dict[str, URL], - director_v2_client: httpx.AsyncClient, + async_client: httpx.AsyncClient, + osparc_product_name: str, + osparc_product_api_base_url: str, ensure_services_stopped: None, mock_projects_networks_repository: None, - mock_dynamic_sidecar_client: None, + mock_sidecars_client: mock.Mock, service_resources: ServiceResourcesDict, - osparc_product_name: str, + mocked_service_awaits_manual_interventions: None, + mock_resource_usage_tracker: None, + mock_osparc_variables_api_auth_rpc: None, ): """ The test will start 3 dynamic services in the same project and check @@ -258,15 +258,14 @@ async def test_legacy_and_dynamic_sidecar_run( - dy-static-file-server-dynamic-sidecar (sidecared w/ std config) - dy-static-file-server-dynamic-sidecar-compose (sidecared w/ docker-compose) """ - # FIXME: ANE can you instead parametrize this test? - # why do we need to run all these services at the same time? it would be simpler one by one - + await wait_for_catalog_service(user_dict["id"], osparc_product_name) await asyncio.gather( *( assert_start_service( - director_v2_client=director_v2_client, + director_v2_client=async_client, # context product_name=osparc_product_name, + product_api_base_url=osparc_product_api_base_url, user_id=user_dict["id"], project_id=str(dy_static_file_server_project.uuid), # service @@ -285,22 +284,18 @@ async def test_legacy_and_dynamic_sidecar_run( if is_legacy(node): continue - await patch_dynamic_service_url( - # pylint: disable=protected-access - app=director_v2_client._transport.app, - node_uuid=node_id, - ) + await patch_dynamic_service_url(app=initialized_app, node_uuid=node_id) assert len(dy_static_file_server_project.workbench) == 3 await assert_all_services_running( - director_v2_client, + async_client, workbench=dy_static_file_server_project.workbench, ) # query the service directly and check if it responding accordingly await assert_services_reply_200( - director_v2_client=director_v2_client, + director_v2_client=async_client, workbench=dy_static_file_server_project.workbench, ) @@ -308,7 +303,7 @@ async def test_legacy_and_dynamic_sidecar_run( await asyncio.gather( *( assert_stop_service( - director_v2_client=director_v2_client, + director_v2_client=async_client, service_uuid=service_uuid, ) for service_uuid in dy_static_file_server_project.workbench diff --git a/services/director-v2/tests/integration/02/utils.py b/services/director-v2/tests/integration/02/utils.py index 6aa25c70b19..69c7ca81d4d 100644 --- a/services/director-v2/tests/integration/02/utils.py +++ b/services/director-v2/tests/integration/02/utils.py @@ -5,33 +5,43 @@ import logging import os import urllib.parse -from typing import Any, Optional +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager, suppress +from typing import Any import aiodocker import httpx from fastapi import FastAPI -from models_library.projects import Node +from models_library.basic_types import PortInt +from models_library.projects import Node, NodesDict +from models_library.projects_nodes_io import NodeID from models_library.services_resources import ( ServiceResourcesDict, ServiceResourcesDictHelpers, ) from models_library.users import UserID -from pydantic import PositiveInt, parse_obj_as -from pytest_simcore.helpers.utils_docker import get_localhost_ip +from pydantic import PositiveInt, TypeAdapter +from pytest_simcore.helpers.host import get_localhost_ip from servicelib.common_headers import ( X_DYNAMIC_SIDECAR_REQUEST_DNS, X_DYNAMIC_SIDECAR_REQUEST_SCHEME, X_SIMCORE_USER_AGENT, ) -from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.schemas.constants import ( +from simcore_service_director_v2.constants import ( DYNAMIC_PROXY_SERVICE_PREFIX, DYNAMIC_SIDECAR_SERVICE_PREFIX, ) +from simcore_service_director_v2.core.dynamic_services_settings.proxy import ( + DynamicSidecarProxySettings, +) +from simcore_service_director_v2.core.dynamic_services_settings.sidecar import ( + DynamicSidecarSettings, +) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.scheduler import ( DynamicSidecarsScheduler, ) -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt, stop_after_delay from tenacity.wait import wait_fixed @@ -82,19 +92,27 @@ async def _get_volume_names() -> set[str]: async def ensure_network_cleanup( docker_client: aiodocker.Docker, project_id: str ) -> None: - async for attempt in AsyncRetrying( - reraise=False, - stop=stop_after_attempt(20), - wait=wait_fixed(5), - ): - with attempt: - for network_name in { - x["Name"] for x in await docker_client.networks.list() - }: - if project_id in network_name: - network = await docker_client.networks.get(network_name) - delete_result = await network.delete() - assert delete_result is True + async def _try_to_clean(): + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_attempt(20), + wait=wait_fixed(5), + ): + with attempt: + for network_name in { + x["Name"] for x in await docker_client.networks.list() + }: + if project_id in network_name: + network = await docker_client.networks.get(network_name) + delete_result = await network.delete() + assert delete_result is True + + # NOTE: since this is ONLY used for cleanup + # in the on fixture teardown, relaxing a bit + # this is mainly used for keeping the + # dev environment clean + with suppress(aiodocker.DockerError): + await _try_to_clean() async def _wait_for_service(service_name: str) -> None: @@ -120,9 +138,7 @@ async def _wait_for_service(service_name: str) -> None: ) -async def _get_service_published_port( - service_name: str, target_port: Optional[int] = None -) -> int: +async def _get_service_published_port(service_name: str, target_port: int) -> PortInt: # it takes a bit of time for the port to be auto generated # keep trying until it is there async with aiodocker.Docker() as docker_client: @@ -175,9 +191,8 @@ async def _get_service_published_port( if p.get("TargetPort") == target_port ) except StopIteration as e: - raise RuntimeError( - f"Cannot find {target_port} in {ports=} for {service_name=}" - ) from e + msg = f"Cannot find {target_port} in ports={ports!r} for service_name={service_name!r}" + raise RuntimeError(msg) from e else: assert len(ports) == 1, f"number of ports in {service_name=} is not 1!" published_port = ports[0]["PublishedPort"] @@ -190,47 +205,81 @@ async def _get_service_published_port( return published_port -async def patch_dynamic_service_url(app: FastAPI, node_uuid: str) -> str: +@asynccontextmanager +async def _disable_create_user_services( + scheduler_data: SchedulerData, +) -> AsyncIterator[None]: """ - Normally director-v2 talks via docker-netwoks with the dynamic-sidecar. - Since the director-v2 was started outside docker and is not - running in a container, the service port needs to be exposed and the - url needs to be changed to get_localhost_ip() + disables action to avoid proxy configuration from being updated + before the service is fully port forwarded + """ + scheduler_data.dynamic_sidecar.was_compose_spec_submitted = True + try: + yield None + finally: + scheduler_data.dynamic_sidecar.was_compose_spec_submitted = False - returns: the local endpoint + +async def patch_dynamic_service_url(app: FastAPI, node_uuid: str) -> str: """ - service_name = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}" + Calls from director-v2 to dy-sidecar and dy-proxy + are normally resolved via docker networks. + Since the director-v2 was started outside docker (it is not + running in a container) the service port needs to be exposed and the + url needs to be changed to get_localhost_ip(). - assert app.state - settings: AppSettings = app.state.settings - await _wait_for_service(service_name) + returns: the dy-sidecar's new endpoint + """ - published_port = await _get_service_published_port( - service_name, - target_port=settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.DYNAMIC_SIDECAR_PORT, + # pylint: disable=protected-access + scheduler: DynamicSidecarsScheduler = app.state.dynamic_sidecar_scheduler + service_name = scheduler.scheduler._inverse_search_mapping[ # noqa: SLF001 + NodeID(node_uuid) + ] + scheduler_data: SchedulerData = scheduler.scheduler._to_observe[ # noqa: SLF001 + service_name + ] + + sidecar_settings: DynamicSidecarSettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR + ) + dynamic_sidecar_proxy_settings: DynamicSidecarProxySettings = ( + app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR_PROXY_SETTINGS ) - assert ( - published_port is not None - ), f"{settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.json()=}" - # patch the endppoint inside the scheduler - scheduler: DynamicSidecarsScheduler = app.state.dynamic_sidecar_scheduler - endpoint: Optional[str] = None - async with scheduler._scheduler._lock: # pylint: disable=protected-access - for ( - scheduler_data - ) in ( # pylint: disable=protected-access - scheduler._scheduler._to_observe.values() - ): - if scheduler_data.service_name == service_name: - scheduler_data.hostname = f"{get_localhost_ip()}" - scheduler_data.port = published_port + async with _disable_create_user_services(scheduler_data): + sidecar_service_name = f"{DYNAMIC_SIDECAR_SERVICE_PREFIX}_{node_uuid}" + proxy_service_name = f"{DYNAMIC_PROXY_SERVICE_PREFIX}_{node_uuid}" - endpoint = scheduler_data.endpoint - assert endpoint == f"http://{get_localhost_ip()}:{published_port}" - break + await _wait_for_service(sidecar_service_name) + await _wait_for_service(proxy_service_name) + + sidecar_published_port = await _get_service_published_port( + sidecar_service_name, + target_port=sidecar_settings.DYNAMIC_SIDECAR_PORT, + ) + + proxy_published_port = await _get_service_published_port( + proxy_service_name, + target_port=dynamic_sidecar_proxy_settings.DYNAMIC_SIDECAR_CADDY_ADMIN_API_PORT, + ) + assert ( + proxy_published_port is not None + ), f"{sidecar_settings.model_dump_json(warnings='none')=}" - assert endpoint is not None + async with scheduler.scheduler._lock: # noqa: SLF001 + localhost_ip = get_localhost_ip() + + # use prot forwarded address for dy-sidecar + scheduler_data.hostname = localhost_ip + scheduler_data.port = sidecar_published_port + + # use port forwarded address for dy-proxy + scheduler_data.proxy_service_name = localhost_ip + scheduler_data.proxy_admin_api_port = proxy_published_port + + endpoint = f"{scheduler_data.endpoint}".rstrip("/") + assert endpoint == f"http://{localhost_ip}:{sidecar_published_port}" return endpoint @@ -244,7 +293,7 @@ async def _get_proxy_port(node_uuid: str) -> PositiveInt: returns: the local endpoint """ service_name = f"{DYNAMIC_PROXY_SERVICE_PREFIX}_{node_uuid}" - port = await _get_service_published_port(service_name) + port = await _get_service_published_port(service_name, target_port=80) assert port is not None return port @@ -256,18 +305,36 @@ async def _get_service_resources( url = f"{catalog_url}/v0/services/{encoded_key}/{service_version}/resources" async with httpx.AsyncClient() as client: response = await client.get(f"{url}") - return parse_obj_as(ServiceResourcesDict, response.json()) + return TypeAdapter(ServiceResourcesDict).validate_python(response.json()) + + +async def _handle_redirection( + redirection_response: httpx.Response, *, method: str, **kwargs +) -> httpx.Response: + """since we are in a test environment with a test server, a real client must be used in order to get to an external server + i.e. the async_client used with the director test server is unable to follow redirects + """ + assert ( + redirection_response.next_request + ), f"no redirection set in {redirection_response}" + async with httpx.AsyncClient() as real_client: + response = await real_client.request( + method, f"{redirection_response.next_request.url}", **kwargs + ) + response.raise_for_status() + return response async def assert_start_service( director_v2_client: httpx.AsyncClient, product_name: str, + product_api_base_url: str, user_id: UserID, project_id: str, service_key: str, service_version: str, service_uuid: str, - basepath: Optional[str], + basepath: str | None, catalog_url: URL, ) -> None: service_resources: ServiceResourcesDict = await _get_service_resources( @@ -275,29 +342,41 @@ async def assert_start_service( service_key=service_key, service_version=service_version, ) - data = dict( - user_id=user_id, - project_id=project_id, - service_key=service_key, - service_version=service_version, - service_uuid=service_uuid, - basepath=basepath, - service_resources=ServiceResourcesDictHelpers.create_jsonable( + data = { + "user_id": user_id, + "project_id": project_id, + "service_key": service_key, + "service_version": service_version, + "service_uuid": service_uuid, + "can_save": True, + "basepath": basepath, + "service_resources": ServiceResourcesDictHelpers.create_jsonable( service_resources ), - product_name=product_name, - ) + "product_name": product_name, + "product_api_base_url": product_api_base_url, + } headers = { X_DYNAMIC_SIDECAR_REQUEST_DNS: director_v2_client.base_url.host, X_DYNAMIC_SIDECAR_REQUEST_SCHEME: director_v2_client.base_url.scheme, X_SIMCORE_USER_AGENT: "", } - result = await director_v2_client.post( - "/v2/dynamic_services", json=data, headers=headers, follow_redirects=True + response = await director_v2_client.post( + "/v2/dynamic_services", + json=data, + headers=headers, + follow_redirects=False, + timeout=30, ) - result.raise_for_status() - assert result.status_code == httpx.codes.CREATED, result.text + + if response.status_code == httpx.codes.TEMPORARY_REDIRECT: + response = await _handle_redirection( + response, method="POST", json=data, headers=headers, timeout=30 + ) + response.raise_for_status() + + assert response.status_code == httpx.codes.CREATED, response.text async def get_service_data( @@ -309,17 +388,13 @@ async def get_service_data( response = await director_v2_client.get( f"/v2/dynamic_services/{service_uuid}", follow_redirects=False ) + if response.status_code == httpx.codes.TEMPORARY_REDIRECT: - # NOTE: so we have a redirect, and it seems the director_v2_client does not like it at all - # moving from the testserver to the director in this GET call - # which is why we use a DIFFERENT httpx client for this... (sic). - # This actually works well when running inside the swarm... WTF??? - assert response.next_request - response = httpx.get(f"{response.next_request.url}") + response = await _handle_redirection(response, method="GET") + response.raise_for_status() assert response.status_code == httpx.codes.OK, response.text payload = response.json() - data = payload["data"] if is_legacy(node_data) else payload - return data + return payload["data"] if is_legacy(node_data) else payload async def _get_service_state( @@ -334,7 +409,7 @@ async def _get_service_state( async def assert_all_services_running( director_v2_client: httpx.AsyncClient, - workbench: dict[str, Node], + workbench: NodesDict, ) -> None: async for attempt in AsyncRetrying( reraise=True, @@ -354,7 +429,7 @@ async def assert_all_services_running( ) ) - assert all(x == "running" for x in service_states) + assert all(state == "running" for state in service_states) print("--> all services are up and running!") @@ -367,29 +442,39 @@ async def assert_retrieve_service( X_SIMCORE_USER_AGENT: "", } - result = await director_v2_client.post( + response = await director_v2_client.post( f"/v2/dynamic_services/{service_uuid}:retrieve", - json=dict(port_keys=[]), + json={"port_keys": []}, headers=headers, - follow_redirects=True, + follow_redirects=False, ) - assert result.status_code == httpx.codes.OK, result.text - json_result = result.json() + if response.status_code == httpx.codes.TEMPORARY_REDIRECT: + response = await _handle_redirection( + response, + method="POST", + json={"port_keys": []}, + headers=headers, + ) + response.raise_for_status() + assert response.status_code == httpx.codes.OK, response.text + json_result = response.json() print(f"{service_uuid}:retrieve result ", json_result) size_bytes = json_result["data"]["size_bytes"] assert size_bytes > 0 - assert type(size_bytes) == int + assert isinstance(size_bytes, int) async def assert_stop_service( director_v2_client: httpx.AsyncClient, service_uuid: str ) -> None: - result = await director_v2_client.delete( - f"/v2/dynamic_services/{service_uuid}", follow_redirects=True + response = await director_v2_client.delete( + f"/v2/dynamic_services/{service_uuid}", follow_redirects=False ) - assert result.status_code == httpx.codes.NO_CONTENT - assert result.text == "" + if response.status_code == httpx.codes.TEMPORARY_REDIRECT: + response = await _handle_redirection(response, method="DELETE") + assert response.status_code == httpx.codes.NO_CONTENT + assert response.text == "" async def _inspect_service_and_print_logs( @@ -448,13 +533,19 @@ async def _port_forward_legacy_service( # pylint: disable=redefined-outer-name # Legacy services are started --endpoint-mode dnsrr, it needs to # be changed to vip otherwise the port forward will not work result = run_command(f"docker service update {service_name} --endpoint-mode=vip") - assert "verify: Service converged" in result + assert ( + "verify: Service converged" in result + or f"verify: Service {service_name} converged" in result + ) # Finally forward the port on a random assigned port. result = run_command( f"docker service update {service_name} --publish-add :{internal_port}" ) - assert "verify: Service converged" in result + assert ( + "verify: Service converged" in result + or f"verify: Service {service_name} converged" in result + ) # inspect service and fetch the port async with aiodocker.Docker() as docker_client: @@ -462,8 +553,7 @@ async def _port_forward_legacy_service( # pylint: disable=redefined-outer-name ports = service_details["Endpoint"]["Ports"] assert len(ports) == 1, service_details - exposed_port = ports[0]["PublishedPort"] - return exposed_port + return ports[0]["PublishedPort"] async def assert_service_is_ready( # pylint: disable=redefined-outer-name @@ -491,7 +581,7 @@ async def assert_service_is_ready( # pylint: disable=redefined-outer-name async def assert_services_reply_200( director_v2_client: httpx.AsyncClient, - workbench: dict[str, Node], + workbench: NodesDict, ) -> None: print("Giving dy-proxies some time to start") await asyncio.sleep(PROXY_BOOT_TIME) diff --git a/services/director-v2/tests/integration/conftest.py b/services/director-v2/tests/integration/conftest.py index d40193d5c62..cc4c32899ae 100644 --- a/services/director-v2/tests/integration/conftest.py +++ b/services/director-v2/tests/integration/conftest.py @@ -1,29 +1,46 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-import + import asyncio -from typing import AsyncIterator, Awaitable, Callable, Iterator +from collections.abc import AsyncIterator, Awaitable, Callable from unittest.mock import AsyncMock import httpx import pytest import sqlalchemy as sa +from models_library.api_schemas_directorv2.computations import ComputationGet from models_library.projects import ProjectAtDB from models_library.users import UserID from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict from simcore_postgres_database.models.comp_tasks import comp_tasks from simcore_postgres_database.models.projects import projects -from simcore_service_director_v2.models.schemas.comp_tasks import ComputationGet from starlette import status +from tenacity import retry +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed +from yarl import URL + + +@pytest.fixture +def mock_env(mock_env: EnvVarsDict, minio_s3_settings_envs: EnvVarsDict) -> EnvVarsDict: + # overwrite to add minio real settings + return mock_env @pytest.fixture def update_project_workbench_with_comp_tasks( postgres_db: sa.engine.Engine, -) -> Iterator[Callable]: +) -> Callable: def updator(project_uuid: str): with postgres_db.connect() as con: result = con.execute( projects.select().where(projects.c.uuid == project_uuid) ) prj_row = result.first() + assert prj_row prj_workbench = prj_row.workbench result = con.execute( @@ -41,7 +58,7 @@ def updator(project_uuid: str): .where(projects.c.uuid == project_uuid) ) - yield updator + return updator @pytest.fixture(scope="session") @@ -50,6 +67,11 @@ def osparc_product_name() -> str: return "osparc" +@pytest.fixture(scope="session") +def osparc_product_api_base_url() -> str: + return "https://api.osparc.io" + + COMPUTATION_URL: str = "v2/computations" @@ -65,6 +87,7 @@ async def _creator( project: ProjectAtDB, user_id: UserID, product_name: str, + product_api_base_url: str, start_pipeline: bool, **kwargs, ) -> ComputationGet: @@ -75,13 +98,14 @@ async def _creator( "project_id": str(project.uuid), "start_pipeline": start_pipeline, "product_name": product_name, + "product_api_base_url": product_api_base_url, **kwargs, }, ) response.raise_for_status() assert response.status_code == status.HTTP_201_CREATED - computation_task = ComputationGet.parse_obj(response.json()) + computation_task = ComputationGet.model_validate(response.json()) created_comp_tasks.append((user_id, computation_task)) return computation_task @@ -91,12 +115,12 @@ async def _creator( responses: list[httpx.Response] = await asyncio.gather( *( async_client.request( - "DELETE", task.url, json={"user_id": user_id, "force": True} + "DELETE", f"{task.url}", json={"user_id": user_id, "force": True} ) for user_id, task in created_comp_tasks ) ) - assert all(r.raise_for_status() is None for r in responses) + assert all(isinstance(r.raise_for_status(), httpx.Response) for r in responses) @pytest.fixture @@ -109,3 +133,49 @@ def mock_projects_repository(mocker: MockerFixture) -> None: f"{module_base}.ProjectsRepository.is_node_present_in_workbench", return_value=mocked_obj, ) + + +@pytest.fixture +async def wait_for_catalog_service( + services_endpoint: dict[str, URL], +) -> Callable[[UserID, str], Awaitable[None]]: + async def _waiter(user_id: UserID, product_name: str) -> None: + catalog_endpoint = list( + filter( + lambda service_endpoint: "catalog" in service_endpoint[0], + services_endpoint.items(), + ) + ) + assert ( + len(catalog_endpoint) == 1 + ), f"no catalog service found! {services_endpoint=}" + catalog_endpoint = catalog_endpoint[0][1] + print(f"--> found catalog endpoint at {catalog_endpoint=}") + client = httpx.AsyncClient() + + @retry( + wait=wait_fixed(1), + stop=stop_after_delay(60), + retry=retry_if_exception_type(AssertionError) + | retry_if_exception_type(httpx.HTTPError), + ) + async def _ensure_catalog_services_answers() -> None: + print("--> checking catalog is up and ready...") + response = await client.get( + f"{catalog_endpoint}/v0/services", + params={"details": False, "user_id": user_id}, + headers={"x-simcore-products-name": product_name}, + timeout=1, + ) + assert ( + response.status_code == status.HTTP_200_OK + ), f"catalog is not ready {response.status_code}:{response.text}, TIP: migration not completed or catalog broken?" + services = response.json() + assert services != [], "catalog is not ready: no services available" + print( + f"<-- catalog is up and ready, received {response.status_code}:{response.text}" + ) + + await _ensure_catalog_services_answers() + + return _waiter diff --git a/services/director-v2/tests/mocks/fake_dy_status_published.json b/services/director-v2/tests/mocks/fake_dy_status_published.json index 84bca67c1ae..a5d9d396a83 100644 --- a/services/director-v2/tests/mocks/fake_dy_status_published.json +++ b/services/director-v2/tests/mocks/fake_dy_status_published.json @@ -6,7 +6,9 @@ "e6becb37-4699-47f5-81ef-e58fbdf8a9e5": { "modified": true, "dependencies": [], - "currentStatus": "PUBLISHED" + "currentStatus": "PUBLISHED", + "progress": 0 } - } + }, + "progress": 0 } diff --git a/services/director-v2/tests/mocks/fake_dy_status_success.json b/services/director-v2/tests/mocks/fake_dy_status_success.json index c183993fd9a..0b47992af9a 100644 --- a/services/director-v2/tests/mocks/fake_dy_status_success.json +++ b/services/director-v2/tests/mocks/fake_dy_status_success.json @@ -6,7 +6,9 @@ "e6becb37-4699-47f5-81ef-e58fbdf8a9e5": { "modified": false, "dependencies": [], - "currentStatus": "SUCCESS" + "currentStatus": "SUCCESS", + "progress": 1.0 } - } + }, + "progress": 1.0 } diff --git a/services/director-v2/tests/mocks/fake_dy_workbench_template.json b/services/director-v2/tests/mocks/fake_dy_workbench_template.json index e395ad9859f..b6685974173 100644 --- a/services/director-v2/tests/mocks/fake_dy_workbench_template.json +++ b/services/director-v2/tests/mocks/fake_dy_workbench_template.json @@ -17,7 +17,7 @@ }, "80103e12-6b01-40f2-94b8-556bd6c3dd98": { "key": "simcore/services/dynamic/dy-static-file-server-dynamic-sidecar", - "version": "2.0.4", + "version": "2.0.7", "label": "dy-static-file-server-dynamic-sidecar", "inputs": { "string_input": "not the default value", @@ -41,7 +41,7 @@ }, "78f06db4-5feb-4ea3-ad1b-176310ac71a7": { "key": "simcore/services/dynamic/dy-static-file-server-dynamic-sidecar-compose-spec", - "version": "2.0.4", + "version": "2.0.7", "label": "dy-static-file-server-dynamic-sidecar-compose-spec", "inputs": { "string_input": { diff --git a/services/director-v2/tests/mocks/fake_scheduler_data.json b/services/director-v2/tests/mocks/fake_scheduler_data.json index 3efbfb9b8c9..0f7b03e3369 100644 --- a/services/director-v2/tests/mocks/fake_scheduler_data.json +++ b/services/director-v2/tests/mocks/fake_scheduler_data.json @@ -39,7 +39,7 @@ "is_service_environment_ready": true, "service_removal_state": { "can_remove": false, - "can_save": null, + "can_save": false, "was_removed": false }, "dynamic_sidecar_id": "sbig36r7lmciw0qvmbyjq8l87", diff --git a/services/director-v2/tests/mocks/fake_scheduler_data_compose_spec.json b/services/director-v2/tests/mocks/fake_scheduler_data_compose_spec.json index ff14ebb3247..e34daee2b2e 100644 --- a/services/director-v2/tests/mocks/fake_scheduler_data_compose_spec.json +++ b/services/director-v2/tests/mocks/fake_scheduler_data_compose_spec.json @@ -39,7 +39,7 @@ "is_service_environment_ready": false, "service_removal_state": { "can_remove": false, - "can_save": null, + "can_save": false, "was_removed": false }, "dynamic_sidecar_id": "mz4vljrbwcnj6ffoiu7rozkqb", diff --git a/services/director-v2/tests/mocks/fake_task.json b/services/director-v2/tests/mocks/fake_task.json index 298d85bdff5..57d7a4c2837 100644 --- a/services/director-v2/tests/mocks/fake_task.json +++ b/services/director-v2/tests/mocks/fake_task.json @@ -45,17 +45,23 @@ }, "inputs": {}, "image": { - "name": "simcore/services/dynamic/sdfkljhsdffsd121231/sdfkjhsdjf/12kdfsjlsj98u0923----dsdljklsd_", + "name": "simcore/services/dynamic/sdfkljhsdffsd121231/sdfkjhsdjf/12kdfsjlsj98u0923----dsdljklsd", "tag": "0.5824.51", "requires_gpu": false, "requires_mpi": true }, - "submit": "1994-11-10T19:23:02.115Z", "state": "PUBLISHED", "internal_id": 21107840, "node_class": "COMPUTATIONAL", "job_id": "voluptate amet non consectetur Lorem", "outputs": {}, "start": "1961-07-06T11:24:30.877Z", - "end": "2008-03-24T07:02:09.279Z" + "end": "2008-03-24T07:02:09.279Z", + "created": "1961-07-06T11:24:30.877Z", + "modified": "2008-03-24T07:02:09.279Z", + "pricing_info": null, + "last_heartbeat": null, + "hardware_info": { + "aws_ec2_instances": [] + } } diff --git a/services/director-v2/tests/mocks/fake_workbench_computational_node_states.json b/services/director-v2/tests/mocks/fake_workbench_computational_node_states.json index d8ed46e44f7..52bcf2271dd 100644 --- a/services/director-v2/tests/mocks/fake_workbench_computational_node_states.json +++ b/services/director-v2/tests/mocks/fake_workbench_computational_node_states.json @@ -2,19 +2,22 @@ "3a710d8b-565c-5f46-870b-b45ebe195fc7": { "modified": true, "dependencies": [], - "currentStatus": "PUBLISHED" + "currentStatus": "PUBLISHED", + "progress": null }, "e1e2ea96-ce8f-5abc-8712-b8ed312a782c": { "modified": true, "dependencies": [], - "currentStatus": "PUBLISHED" + "currentStatus": "PUBLISHED", + "progress": null }, "415fefd1-d08b-53c1-adb0-16bed3a687ef": { "modified": true, "dependencies": [ "3a710d8b-565c-5f46-870b-b45ebe195fc7" ], - "currentStatus": "PUBLISHED" + "currentStatus": "PUBLISHED", + "progress": null }, "6ede1209-b459-5735-91fc-761aa584808d": { "modified": true, @@ -22,6 +25,7 @@ "e1e2ea96-ce8f-5abc-8712-b8ed312a782c", "415fefd1-d08b-53c1-adb0-16bed3a687ef" ], - "currentStatus": "PUBLISHED" + "currentStatus": "PUBLISHED", + "progress": null } } diff --git a/services/director-v2/tests/mocks/legacy_scheduler_data_format.json b/services/director-v2/tests/mocks/legacy_scheduler_data_format.json index 95a3d67217b..cf3b1635cbc 100644 --- a/services/director-v2/tests/mocks/legacy_scheduler_data_format.json +++ b/services/director-v2/tests/mocks/legacy_scheduler_data_format.json @@ -89,6 +89,7 @@ }, "request_dns": "master.com", "request_scheme": "https", + "request_simcore_user_agent": "", "proxy_service_name": "dy-proxy_12fb3055-db35-4a34-a9c0-bff1267aa859" }, { @@ -185,7 +186,7 @@ "is_service_environment_ready": true, "service_removal_state": { "can_remove": false, - "can_save": null, + "can_save": false, "was_removed": false }, "wait_for_manual_intervention_after_error": false, @@ -249,6 +250,7 @@ }, "request_dns": "master.com", "request_scheme": "https", + "request_simcore_user_agent": "", "proxy_service_name": "dy-proxy_d14bf3ea-abcf-52f2-8146-fc244b70f307" } ] diff --git a/services/director-v2/tests/unit/_dask_helpers.py b/services/director-v2/tests/unit/_dask_helpers.py deleted file mode 100644 index 6b9b46fd19b..00000000000 --- a/services/director-v2/tests/unit/_dask_helpers.py +++ /dev/null @@ -1,59 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument - -from typing import Any, NamedTuple - -from dask_gateway_server.app import DaskGateway -from dask_task_models_library.container_tasks.docker import DockerBasicAuth -from dask_task_models_library.container_tasks.io import ( - TaskInputData, - TaskOutputData, - TaskOutputDataSchema, -) -from pydantic import AnyUrl - - -class DaskGatewayServer(NamedTuple): - address: str - proxy_address: str - password: str - server: DaskGateway - - -def fake_sidecar_fct( - docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - expected_annotations: dict[str, Any], -) -> TaskOutputData: - import time - - from dask.distributed import get_worker - - # sleep a bit in case someone is aborting us - time.sleep(1) - - # get the task data - worker = get_worker() - task = worker.state.tasks.get(worker.get_current_task()) - assert task is not None - assert task.annotations == expected_annotations - - return TaskOutputData.parse_obj({"some_output_key": 123}) - - -def fake_failing_sidecar_fct( - docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], -) -> TaskOutputData: - - raise ValueError("sadly we are failing to execute anything cause we are dumb...") diff --git a/services/director-v2/tests/unit/_helpers.py b/services/director-v2/tests/unit/_helpers.py index 10f9c9f9e45..aba737fe932 100644 --- a/services/director-v2/tests/unit/_helpers.py +++ b/services/director-v2/tests/unit/_helpers.py @@ -1,106 +1,42 @@ -import asyncio +from collections.abc import Callable from dataclasses import dataclass -from typing import Any, Dict, Iterator, List +from typing import Any -import aiopg +import sqlalchemy as sa from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState -from models_library.users import UserID -from pydantic.tools import parse_obj_as -from simcore_postgres_database.models.comp_pipeline import StateType +from pydantic import TypeAdapter from simcore_postgres_database.models.comp_runs import comp_runs from simcore_postgres_database.models.comp_tasks import comp_tasks -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB -from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB -from simcore_service_director_v2.modules.comp_scheduler.base_scheduler import ( - BaseCompScheduler, -) +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB +from sqlalchemy.ext.asyncio import AsyncEngine @dataclass class PublishedProject: + user: dict[str, Any] project: ProjectAtDB pipeline: CompPipelineAtDB - tasks: List[CompTaskAtDB] + tasks: list[CompTaskAtDB] -@dataclass +@dataclass(kw_only=True) class RunningProject(PublishedProject): runs: CompRunsAtDB - - -async def assert_comp_run_state( - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - user_id: UserID, - project_uuid: ProjectID, - exp_state: RunningState, -): - # check the database is correctly updated, the run is published - async with aiopg_engine.acquire() as conn: # type: ignore - result = await conn.execute( - comp_runs.select().where( - (comp_runs.c.user_id == user_id) - & (comp_runs.c.project_uuid == f"{project_uuid}") - ) # there is only one entry - ) - run_entry = CompRunsAtDB.parse_obj(await result.first()) - assert ( - run_entry.result == exp_state - ), f"comp_runs: expected state '{exp_state}, found '{run_entry.result}'" - - -async def assert_comp_tasks_state( - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - project_uuid: ProjectID, - task_ids: List[NodeID], - exp_state: RunningState, -): - # check the database is correctly updated, the run is published - async with aiopg_engine.acquire() as conn: # type: ignore - result = await conn.execute( - comp_tasks.select().where( - (comp_tasks.c.project_id == f"{project_uuid}") - & (comp_tasks.c.node_id.in_([f"{n}" for n in task_ids])) - ) # there is only one entry - ) - tasks = parse_obj_as(List[CompTaskAtDB], await result.fetchall()) - assert all( # pylint: disable=use-a-generator - [t.state == exp_state for t in tasks] - ), f"expected state: {exp_state}, found: {[t.state for t in tasks]}" - - -async def trigger_comp_scheduler(scheduler: BaseCompScheduler): - # trigger the scheduler - scheduler._wake_up_scheduler_now() # pylint: disable=protected-access - # let the scheduler be actually triggered - await asyncio.sleep(1) - - -async def manually_run_comp_scheduler(scheduler: BaseCompScheduler): - # trigger the scheduler - await scheduler.schedule_all_pipelines() - - -async def set_comp_task_state( - aiopg_engine: Iterator[aiopg.sa.engine.Engine], node_id: str, state: StateType # type: ignore -): - async with aiopg_engine.acquire() as conn: # type: ignore - await conn.execute( - # pylint: disable=no-value-for-parameter - comp_tasks.update() - .where(comp_tasks.c.node_id == node_id) - .values(state=state) - ) + task_to_callback_mapping: dict[NodeID, Callable[[], None]] async def set_comp_task_outputs( - aiopg_engine: aiopg.sa.engine.Engine, node_id: NodeID, outputs_schema: Dict[str, Any], outputs: Dict[str, Any] # type: ignore -): - async with aiopg_engine.acquire() as conn: # type: ignore + sqlalchemy_async_engine: AsyncEngine, + node_id: NodeID, + outputs_schema: dict[str, Any], + outputs: dict[str, Any], +) -> None: + async with sqlalchemy_async_engine.begin() as conn: await conn.execute( - # pylint: disable=no-value-for-parameter comp_tasks.update() .where(comp_tasks.c.node_id == f"{node_id}") .values(outputs=outputs, schema={"outputs": outputs_schema, "inputs": {}}) @@ -108,12 +44,66 @@ async def set_comp_task_outputs( async def set_comp_task_inputs( - aiopg_engine: aiopg.sa.engine.Engine, node_id: NodeID, inputs_schema: Dict[str, Any], inputs: Dict[str, Any] # type: ignore -): - async with aiopg_engine.acquire() as conn: # type: ignore + sqlalchemy_async_engine: AsyncEngine, + node_id: NodeID, + inputs_schema: dict[str, Any], + inputs: dict[str, Any], +) -> None: + async with sqlalchemy_async_engine.begin() as conn: await conn.execute( - # pylint: disable=no-value-for-parameter comp_tasks.update() .where(comp_tasks.c.node_id == f"{node_id}") .values(inputs=inputs, schema={"outputs": {}, "inputs": inputs_schema}) ) + + +async def assert_comp_runs( + sqlalchemy_async_engine: AsyncEngine, + *, + expected_total: int, + expected_state: RunningState | None = None, + where_statement: Any | None = None, +) -> list[CompRunsAtDB]: + async with sqlalchemy_async_engine.connect() as conn: + query = sa.select(comp_runs) + if where_statement is not None: + query = query.where(where_statement) + list_of_comp_runs = [ + CompRunsAtDB.model_validate(row) for row in await conn.execute(query) + ] + assert len(list_of_comp_runs) == expected_total + if list_of_comp_runs and expected_state: + assert all( + r.result is expected_state for r in list_of_comp_runs + ), f"expected state '{expected_state}', got {[r.result for r in list_of_comp_runs]}" + return list_of_comp_runs + + +async def assert_comp_runs_empty(sqlalchemy_async_engine: AsyncEngine) -> None: + await assert_comp_runs(sqlalchemy_async_engine, expected_total=0) + + +async def assert_comp_tasks( + sqlalchemy_async_engine: AsyncEngine, + *, + project_uuid: ProjectID, + task_ids: list[NodeID], + expected_state: RunningState, + expected_progress: float | None, +) -> list[CompTaskAtDB]: + # check the database is correctly updated, the run is published + async with sqlalchemy_async_engine.connect() as conn: + result = await conn.execute( + comp_tasks.select().where( + (comp_tasks.c.project_id == f"{project_uuid}") + & (comp_tasks.c.node_id.in_([f"{n}" for n in task_ids])) + ) # there is only one entry + ) + tasks = TypeAdapter(list[CompTaskAtDB]).validate_python(result.fetchall()) + assert all( + t.state == expected_state for t in tasks + ), f"expected state: {expected_state}, found: {[t.state for t in tasks]}" + assert all( + t.progress == expected_progress for t in tasks + ), f"{expected_progress=}, found: {[t.progress for t in tasks]}" + return tasks diff --git a/services/director-v2/tests/unit/conftest.py b/services/director-v2/tests/unit/conftest.py index bcc2b376595..eb22767b34a 100644 --- a/services/director-v2/tests/unit/conftest.py +++ b/services/director-v2/tests/unit/conftest.py @@ -3,56 +3,60 @@ import json import logging -import random import urllib.parse -from typing import ( - Any, - AsyncIterable, - AsyncIterator, - Callable, - Iterable, - Iterator, - Mapping, -) +from collections.abc import AsyncIterable, Iterable, Iterator, Mapping +from typing import Any +from unittest import mock import aiodocker import pytest import respx -import traitlets.config -from _dask_helpers import DaskGatewayServer -from dask.distributed import Scheduler, Worker -from dask_gateway_server.app import DaskGateway -from dask_gateway_server.backends.local import UnsafeLocalBackend -from distributed.deploy.spec import SpecCluster from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceCreate +from models_library.api_schemas_directorv2.dynamic_services_service import ( + ServiceDetails, +) +from models_library.basic_types import PortInt +from models_library.callbacks_mapping import CallbacksMapping from models_library.generated_models.docker_rest_api import ( ServiceSpec as DockerServiceSpec, ) +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import SimcoreServiceLabels -from models_library.services import RunID, ServiceKeyVersion -from pydantic import parse_obj_as -from pydantic.types import NonNegativeInt -from pytest import LogCaptureFixture, MonkeyPatch +from models_library.services import ( + ServiceKey, + ServiceKeyVersion, + ServiceRunID, + ServiceVersion, +) +from models_library.services_enums import ServiceState +from models_library.users import UserID +from models_library.utils._original_fastapi_encoders import jsonable_encoder +from pydantic import TypeAdapter from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.s3 import S3Settings from simcore_sdk.node_ports_v2 import FileLinkType +from simcore_service_director_v2.constants import DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.domains.dynamic_services import ( - DynamicServiceCreate, -) -from simcore_service_director_v2.models.schemas.constants import ( - DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL, -) -from simcore_service_director_v2.models.schemas.dynamic_services import ( - SchedulerData, - ServiceDetails, - ServiceState, -) -from simcore_service_director_v2.modules.dynamic_sidecar.docker_service_specs.volume_remover import ( - DockerVersion, -) -from yarl import URL +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData + + +@pytest.fixture +def disable_postgres(mocker) -> None: + fake_engine = mock.AsyncMock() + + def mock_setup(app: FastAPI, *args, **kwargs) -> None: + app.state.engine = fake_engine + + mocker.patch("simcore_service_director_v2.modules.db.setup", side_effect=mock_setup) + for module in [ + "simcore_service_director_v2.modules.db", + "simcore_service_director_v2.modules.dask_client", + ]: + mocker.patch(f"{module}.get_db_engine", return_value=fake_engine) @pytest.fixture @@ -62,26 +66,37 @@ def simcore_services_network_name() -> str: @pytest.fixture def simcore_service_labels() -> SimcoreServiceLabels: - return SimcoreServiceLabels.parse_obj( - SimcoreServiceLabels.Config.schema_extra["examples"][1] - ) + example = SimcoreServiceLabels.model_json_schema()["examples"][1] + + simcore_service_labels = SimcoreServiceLabels.model_validate(example) + simcore_service_labels.callbacks_mapping = CallbacksMapping.model_validate({}) + return simcore_service_labels @pytest.fixture def dynamic_service_create() -> DynamicServiceCreate: - return DynamicServiceCreate.parse_obj( - DynamicServiceCreate.Config.schema_extra["example"] + return DynamicServiceCreate.model_validate( + DynamicServiceCreate.model_json_schema()["example"] ) @pytest.fixture -def dynamic_sidecar_port() -> int: +def dynamic_sidecar_port() -> PortInt: return 1222 @pytest.fixture -def run_id(faker: Faker) -> RunID: - return faker.uuid4(cast_to=None) +def service_run_id() -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_dynamic() + + +@pytest.fixture +def resource_tracking_run_id( + user_id: UserID, project_id: ProjectID, node_id: NodeID +) -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_computational( + user_id, project_id, node_id, iteration=42 + ) @pytest.fixture @@ -94,6 +109,11 @@ def request_scheme() -> str: return "http" +@pytest.fixture +def can_save() -> bool: + return True + + @pytest.fixture def request_simcore_user_agent() -> str: return "python/test" @@ -103,11 +123,12 @@ def request_simcore_user_agent() -> str: def scheduler_data_from_http_request( dynamic_service_create: DynamicServiceCreate, simcore_service_labels: SimcoreServiceLabels, - dynamic_sidecar_port: int, + dynamic_sidecar_port: PortInt, request_dns: str, request_scheme: str, request_simcore_user_agent: str, - run_id: RunID, + can_save: bool, + service_run_id: ServiceRunID, ) -> SchedulerData: return SchedulerData.from_http_request( service=dynamic_service_create, @@ -116,7 +137,8 @@ def scheduler_data_from_http_request( request_dns=request_dns, request_scheme=request_scheme, request_simcore_user_agent=request_simcore_user_agent, - run_id=run_id, + can_save=can_save, + run_id=service_run_id, ) @@ -124,7 +146,7 @@ def scheduler_data_from_http_request( def mock_service_inspect( scheduler_data_from_http_request: ServiceDetails, ) -> Mapping[str, Any]: - service_details = json.loads(scheduler_data_from_http_request.json()) + service_details = json.loads(scheduler_data_from_http_request.model_dump_json()) service_details["compose_spec"] = json.dumps(service_details["compose_spec"]) return { "Spec": { @@ -137,7 +159,7 @@ def mock_service_inspect( @pytest.fixture def scheduler_data_from_service_inspect( - mock_service_inspect: Mapping[str, Any] + mock_service_inspect: Mapping[str, Any], ) -> SchedulerData: return SchedulerData.from_service_inspect(mock_service_inspect) @@ -159,112 +181,6 @@ def scheduler_data( }[request.param] -@pytest.fixture -def cluster_id() -> NonNegativeInt: - return random.randint(0, 10) - - -@pytest.fixture -async def dask_spec_local_cluster( - monkeypatch: MonkeyPatch, - unused_tcp_port_factory: Callable, -) -> AsyncIterable[SpecCluster]: - # in this mode we can precisely create a specific cluster - workers = { - "cpu-worker": { - "cls": Worker, - "options": { - "nthreads": 2, - "resources": {"CPU": 2, "RAM": 48e9}, - }, - }, - "gpu-worker": { - "cls": Worker, - "options": { - "nthreads": 1, - "resources": { - "CPU": 1, - "GPU": 1, - "RAM": 48e9, - }, - }, - }, - "bigcpu-worker": { - "cls": Worker, - "options": { - "nthreads": 1, - "resources": { - "CPU": 8, - "RAM": 768e9, - }, - }, - }, - } - scheduler = { - "cls": Scheduler, - "options": { - "port": unused_tcp_port_factory(), - "dashboard_address": f":{unused_tcp_port_factory()}", - }, - } - - async with SpecCluster( - workers=workers, scheduler=scheduler, asynchronous=True, name="pytest_cluster" - ) as cluster: - scheduler_address = URL(cluster.scheduler_address) - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", - f"{scheduler_address}" or "invalid", - ) - yield cluster - - -@pytest.fixture -def local_dask_gateway_server_config( - unused_tcp_port_factory: Callable, -) -> traitlets.config.Config: - c = traitlets.config.Config() - c.DaskGateway.backend_class = UnsafeLocalBackend # type: ignore - c.DaskGateway.address = f"127.0.0.1:{unused_tcp_port_factory()}" # type: ignore - c.Proxy.address = f"127.0.0.1:{unused_tcp_port_factory()}" # type: ignore - c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator" # type: ignore - c.SimpleAuthenticator.password = "qweqwe" # type: ignore - c.ClusterConfig.worker_cmd = [ # type: ignore - "dask-worker", - "--resources", - f"CPU=12,GPU=1,RAM={16e9}", - ] - # NOTE: This must be set such that the local unsafe backend creates a worker with enough cores/memory - c.ClusterConfig.worker_cores = 12 # type: ignore - c.ClusterConfig.worker_memory = "16G" # type: ignore - - c.DaskGateway.log_level = "DEBUG" # type: ignore - return c - - -@pytest.fixture -async def local_dask_gateway_server( - local_dask_gateway_server_config: traitlets.config.Config, -) -> AsyncIterator[DaskGatewayServer]: - print("--> creating local dask gateway server") - dask_gateway_server = DaskGateway(config=local_dask_gateway_server_config) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - - print("--> local dask gateway server setup completed") - yield DaskGatewayServer( - f"http://{dask_gateway_server.backend.proxy.address}", - f"gateway://{dask_gateway_server.backend.proxy.tcp_address}", - local_dask_gateway_server_config.SimpleAuthenticator.password, # type: ignore - dask_gateway_server, - ) - print("--> local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") - - @pytest.fixture(params=list(FileLinkType)) def tasks_file_link_type(request) -> FileLinkType: """parametrized fixture on all FileLinkType enum variants""" @@ -277,14 +193,19 @@ def tasks_file_link_type(request) -> FileLinkType: @pytest.fixture def fake_s3_settings(faker: Faker) -> S3Settings: return S3Settings( - S3_ENDPOINT=faker.uri(), + S3_ENDPOINT=faker.url(), S3_ACCESS_KEY=faker.uuid4(), S3_SECRET_KEY=faker.uuid4(), - S3_ACCESS_TOKEN=faker.uuid4(), S3_BUCKET_NAME=faker.pystr(), + S3_REGION=faker.pystr(), ) +@pytest.fixture +def fake_s3_envs(fake_s3_settings: S3Settings) -> EnvVarsDict: + return fake_s3_settings.model_dump() + + @pytest.fixture def mocked_storage_service_api( fake_s3_settings: S3Settings, @@ -295,14 +216,14 @@ def mocked_storage_service_api( # pylint: disable=not-context-manager with respx.mock( # type: ignore - base_url=settings.DIRECTOR_V2_STORAGE.endpoint, + base_url=settings.DIRECTOR_V2_STORAGE.api_base_url, assert_all_called=False, assert_all_mocked=True, ) as respx_mock: respx_mock.post( "/simcore-s3:access", name="get_or_create_temporary_s3_access", - ).respond(json={"data": fake_s3_settings.dict(by_alias=True)}) + ).respond(json=jsonable_encoder({"data": fake_s3_settings}, by_alias=True)) yield respx_mock @@ -312,7 +233,12 @@ def mocked_storage_service_api( @pytest.fixture def mock_service_key_version() -> ServiceKeyVersion: - return ServiceKeyVersion(key="simcore/services/dynamic/myservice", version="1.4.5") + return ServiceKeyVersion( + key=TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/myservice" + ), + version=TypeAdapter(ServiceVersion).validate_python("1.4.5"), + ) @pytest.fixture @@ -320,7 +246,7 @@ def fake_service_specifications(faker: Faker) -> dict[str, Any]: # the service specifications follow the Docker service creation available # https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate return { - "sidecar": DockerServiceSpec.parse_obj( + "sidecar": DockerServiceSpec.model_validate( { "Labels": {"label_one": faker.pystr(), "label_two": faker.pystr()}, "TaskTemplate": { @@ -363,7 +289,7 @@ def fake_service_specifications(faker: Faker) -> dict[str, Any]: }, }, } - ).dict(by_alias=True, exclude_unset=True) + ).model_dump(by_alias=True, exclude_unset=True) } @@ -398,13 +324,17 @@ def mocked_catalog_service_api( @pytest.fixture() -def caplog_info_level(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]: +def caplog_info_level( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: with caplog.at_level(logging.INFO): yield caplog @pytest.fixture() -def caplog_debug_level(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]: +def caplog_debug_level( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: with caplog.at_level(logging.DEBUG): yield caplog @@ -413,7 +343,7 @@ def caplog_debug_level(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture] def mock_docker_api(mocker: MockerFixture) -> None: module_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler" mocker.patch( - f"{module_base}._core._scheduler.get_dynamic_sidecars_to_observe", + f"{module_base}._core._scheduler_utils.get_dynamic_sidecars_to_observe", autospec=True, return_value=[], ) @@ -423,7 +353,7 @@ def mock_docker_api(mocker: MockerFixture) -> None: return_value=True, ) mocker.patch( - f"{module_base}._core._scheduler.get_dynamic_sidecar_state", + f"{module_base}._core._scheduler_utils.get_dynamic_sidecar_state", return_value=(ServiceState.PENDING, ""), ) @@ -432,13 +362,3 @@ def mock_docker_api(mocker: MockerFixture) -> None: async def async_docker_client() -> AsyncIterable[aiodocker.Docker]: async with aiodocker.Docker() as docker_client: yield docker_client - - -@pytest.fixture -async def docker_version(async_docker_client: aiodocker.Docker) -> DockerVersion: - version_request = ( - await async_docker_client._query_json( # pylint: disable=protected-access - "version", versioned_api=False - ) - ) - return parse_obj_as(DockerVersion, version_request["Version"]) diff --git a/services/director-v2/tests/unit/dynamic_scheduler/conftest.py b/services/director-v2/tests/unit/dynamic_scheduler/conftest.py deleted file mode 100644 index 1ce64cbc284..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/conftest.py +++ /dev/null @@ -1,32 +0,0 @@ -# pylint: disable=redefined-outer-name - -from typing import Awaitable - -import pytest -from pytest import FixtureRequest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - ContextInterface, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_in_memory import ( - InMemoryContext, -) - - -@pytest.fixture(params=[InMemoryContext]) -def context_interface_type(request: FixtureRequest) -> type[ContextInterface]: - return request.param - - -@pytest.fixture -def context(context_interface_type: type[ContextInterface]) -> ContextInterface: - return context_interface_type() - - -@pytest.fixture -def context_interface_factory( - context_interface_type: type[ContextInterface], -) -> Awaitable[ContextInterface]: - async def _factory() -> Awaitable[ContextInterface]: - return context_interface_type() - - return _factory diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2.py deleted file mode 100644 index 29d3ec878ca..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2.py +++ /dev/null @@ -1,291 +0,0 @@ -# pylint: disable=redefined-outer-name - -import asyncio -import logging -from contextlib import asynccontextmanager -from typing import Any, AsyncIterator, Awaitable -from unittest.mock import AsyncMock, call - -import pytest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2 import ( - Action, - ExceptionInfo, - Workflow, - WorkflowRunnerManager, - mark_step, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - ContextInterface, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._models import ( - WorkflowName, -) - -logger = logging.getLogger(__name__) - - -# UTILS - - -@asynccontextmanager -async def workflow_runner_manager_lifecycle( - app: AsyncMock, - context_interface_factory: Awaitable[ContextInterface], - workflow: Workflow, -) -> AsyncIterator[WorkflowRunnerManager]: - workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=app, workflow=workflow - ) - await workflow_runner_manager.setup() - yield workflow_runner_manager - await workflow_runner_manager.teardown() - - -# FIXTURES - - -@pytest.fixture -def app() -> AsyncMock: - return AsyncMock() - - -@pytest.fixture -def workflow_name() -> WorkflowName: - return "test_workflow_name" - - -# TESTS - - -async def test_workflow_runs_in_expected_order_without_errors( - app: AsyncMock, - context_interface_factory: Awaitable[ContextInterface], - workflow_name: WorkflowName, -): - call_tracker = AsyncMock() - - @mark_step - async def initialize_workflow() -> dict[str, Any]: - await call_tracker(initialize_workflow) - return {"number_a": 10, "number_b": 2} - - @mark_step - async def compute_product(number_a: int, number_b: int) -> dict[str, Any]: - await call_tracker(compute_product) - product = number_a * number_b - return {"product": product} - - @mark_step - async def check_result(product: int) -> dict[str, Any]: - await call_tracker(check_result) - assert product == 20 - return {} - - workflow = Workflow( - Action( - name="initialize", - steps=[ - initialize_workflow, - ], - next_action="compute", - on_error_action=None, - ), - Action( - name="compute", - steps=[ - compute_product, - ], - next_action="validate", - on_error_action=None, - ), - Action( - name="validate", - steps=[ - check_result, - ], - next_action=None, - on_error_action=None, - ), - ) - - async with workflow_runner_manager_lifecycle( - app, context_interface_factory, workflow - ) as workflow_runner_manager: - - await workflow_runner_manager.initialize_workflow_runner( - workflow_name=workflow_name, action_name="initialize" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name=workflow_name) - await workflow_runner_manager.wait_workflow_runner(workflow_name) - - assert call_tracker.call_args_list == [ - call(initialize_workflow), - call(compute_product), - call(check_result), - ] - - -async def test_error_raised_but_handled_by_on_error_action( - app: AsyncMock, - context_interface_factory: Awaitable[ContextInterface], - workflow_name: WorkflowName, -): - call_tracker = AsyncMock() - - class HandledError(RuntimeError): - ... - - @mark_step - async def error_raising() -> dict[str, Any]: - await call_tracker(error_raising) - raise HandledError() - - @mark_step - async def handle_error( - unexpected_runtime_exception: ExceptionInfo, - ) -> dict[str, Any]: - await call_tracker(handle_error) - # NOTE: the users has a chance to do something here based on the - # generated exception - print("Raised exception data", unexpected_runtime_exception) - return {} - - workflow = Workflow( - Action( - name="raising_error", - steps=[error_raising], - next_action=None, - on_error_action="error_handling", - ), - Action( - name="error_handling", - steps=[handle_error], - next_action=None, - on_error_action=None, - ), - ) - async with workflow_runner_manager_lifecycle( - app, context_interface_factory, workflow - ) as workflow_runner_manager: - await workflow_runner_manager.initialize_workflow_runner( - workflow_name=workflow_name, action_name="raising_error" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name=workflow_name) - await workflow_runner_manager.wait_workflow_runner(workflow_name) - - assert call_tracker.call_args_list == [ - call(error_raising), - call(handle_error), - ] - - -async def test_error_raised_but_not_handled( - app: AsyncMock, - context_interface_factory: Awaitable[ContextInterface], - workflow_name: WorkflowName, -): - call_tracker = AsyncMock() - - class UnhandledError(RuntimeError): - ... - - @mark_step - async def error_raising() -> dict[str, Any]: - await call_tracker(error_raising) - raise UnhandledError() - - workflow = Workflow( - Action( - name="raising_error", - steps=[error_raising], - next_action=None, - on_error_action=None, - ) - ) - async with workflow_runner_manager_lifecycle( - app, context_interface_factory, workflow - ) as workflow_runner_manager: - await workflow_runner_manager.initialize_workflow_runner( - workflow_name=workflow_name, action_name="raising_error" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name=workflow_name) - with pytest.raises(UnhandledError): - await workflow_runner_manager.wait_workflow_runner(workflow_name) - - assert call_tracker.call_args_list == [ - call(error_raising), - ] - - -# TEST 4: test cancellation of very long pending event and schedule a new workflow which will finish -async def test_cancellation_of_current_workflow_and_changing_to_a_different_one( - app: AsyncMock, context_interface_factory: Awaitable[ContextInterface] -): - call_tracker = AsyncMock() - - # WORKFLOW_PENDING_FOREVER - - @mark_step - async def pending_forever() -> dict[str, Any]: - await call_tracker(pending_forever) - await asyncio.sleep(1e10) - return {} - - workflow_pending = Workflow( - Action( - name="pending", - steps=[pending_forever], - next_action=None, - on_error_action=None, - ) - ) - - # WORKFLOW_FINISHES_IMMEDIATELY - - @mark_step - async def print_something_and_finish() -> dict[str, Any]: - await call_tracker(print_something_and_finish) - print("a thing") - return {} - - workflow_finishing = Workflow( - Action( - name="finishing", - steps=[print_something_and_finish], - next_action=None, - on_error_action=None, - ) - ) - - async with workflow_runner_manager_lifecycle( - app=app, - context_interface_factory=context_interface_factory, - workflow=workflow_pending + workflow_finishing, - ) as workflow_runner_manager: - - # start the first workflow and cancel it immediately - await workflow_runner_manager.initialize_workflow_runner( - workflow_name="pending_workflow", action_name="pending" - ) - await workflow_runner_manager.start_workflow_runner( - workflow_name="pending_workflow" - ) - WAIT_FOR_STEP_TO_START = 0.1 - await asyncio.sleep(WAIT_FOR_STEP_TO_START) - await workflow_runner_manager.cancel_and_wait_workflow_runner( - "pending_workflow" - ) - - # start second workflow which wil finish afterwards - await workflow_runner_manager.initialize_workflow_runner( - workflow_name="finishing_workflow", action_name="finishing" - ) - await workflow_runner_manager.start_workflow_runner( - workflow_name="finishing_workflow" - ) - await workflow_runner_manager.wait_workflow_runner("finishing_workflow") - - assert call_tracker.call_args_list == [ - call(pending_forever), - call(print_something_and_finish), - ] diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__action.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__action.py deleted file mode 100644 index 573726f623e..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__action.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Any - -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._action import ( - Action, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._marker import ( - mark_step, -) - - -async def test_action_ok(): - @mark_step - async def print_info() -> dict[str, Any]: - print("some info") - return {} - - @mark_step - async def verify(x: float, y: int) -> dict[str, Any]: - assert type(x) == float - assert type(y) == int - return {} - - INFO_CHECK = Action( - name="test", - steps=[ - print_info, - verify, - ], - next_action=None, - on_error_action=None, - ) - assert INFO_CHECK diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_base.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_base.py deleted file mode 100644 index c1216c4b7eb..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_base.py +++ /dev/null @@ -1,10 +0,0 @@ -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - ReservedContextKeys, -) - - -def test_asd(): - assert ReservedContextKeys.is_reserved("app") is True - assert ReservedContextKeys.is_reserved("missing") is False - assert ReservedContextKeys.is_stored_locally("app") is True - assert ReservedContextKeys.is_reserved("missing") is False diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_in_memory.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_in_memory.py deleted file mode 100644 index cdbb551d045..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__context_in_memory.py +++ /dev/null @@ -1,36 +0,0 @@ -# pylint: disable=redefined-outer-name - -from typing import Any - -import pytest -from pytest import FixtureRequest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_in_memory import ( - InMemoryContext, -) - - -@pytest.fixture -def key_1() -> str: - return "key_1" - - -@pytest.fixture(params=[1, "a", {"a": {"1": 2}}, set()]) -def value(request: FixtureRequest) -> Any: - return request.param - - -async def test_in_memory_context(key_1: str, value: Any): - context = InMemoryContext() - - assert await context.has_key(key_1) is False - await context.save(key_1, value) - assert await context.has_key(key_1) is True - - stored_value = await context.load(key_1) - assert stored_value == value - - # ensure serialization is working - serialized_context = await context.to_dict() - new_context = InMemoryContext() - await new_context.update(serialized_context) - assert serialized_context == await new_context.to_dict() diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__marker.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__marker.py deleted file mode 100644 index 15ada9222fe..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__marker.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Any - -import pytest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._errors import ( - UnexpectedStepReturnTypeError, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._marker import ( - mark_step, -) - - -async def test_register_step_with_return_value(): - @mark_step - async def return_inputs(x: str, y: float, z: dict[str, int]) -> dict[str, Any]: - return {"x": x, "y": y, "z": z} - - assert return_inputs.input_types == {"x": str, "y": float, "z": dict[str, int]} - - assert await return_inputs(1, 2, {"a": 3}) == dict(x=1, y=2, z={"a": 3}) - - -async def test_register_step_wrong_return_type(): - with pytest.raises(UnexpectedStepReturnTypeError) as exec_info: - - @mark_step - async def wrong_return_type(x: str, y: float, z: dict[str, int]) -> str: - return {"x": x, "y": y, "z": z} - - assert ( - f"{exec_info.value}" - == "Step should always return `dict[str, Any]`, returning: " - ) diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_context.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_context.py deleted file mode 100644 index 17bda7ca4f3..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_context.py +++ /dev/null @@ -1,116 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name - -from typing import Any - -import pytest -from fastapi import FastAPI -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - _STORED_LOCALLY, - ContextInterface, - ReservedContextKeys, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._errors import ( - GetTypeMismatchError, - NotAllowedContextKeyError, - NotInContextError, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow_context import ( - WorkflowContext, -) - -WORKFLOW_NAME = "test_workflow" -WORKFLOW_ACTION_NAME = "test_workflow_action_name" - -EXTRA_WORKFLOW_CONTEXT_DATA: dict[str, str] = { - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX: 0, - ReservedContextKeys.WORKFLOW_NAME: WORKFLOW_NAME, - ReservedContextKeys.WORKFLOW_ACTION_NAME: WORKFLOW_ACTION_NAME, -} - - -@pytest.fixture -def key_1() -> str: - return "key_1" - - -@pytest.fixture -def app() -> FastAPI: - return FastAPI() - - -@pytest.fixture -async def workflow_context(app: FastAPI, context: ContextInterface) -> WorkflowContext: - workflow_context = WorkflowContext( - context=context, - app=app, - workflow_name=WORKFLOW_NAME, - action_name=WORKFLOW_ACTION_NAME, - ) - await workflow_context.setup() - yield workflow_context - await workflow_context.teardown() - - -async def test_workflow_context_local_values( - app: FastAPI, workflow_context: WorkflowContext -): - # check all locally stored values are available - for key in _STORED_LOCALLY: - assert key in workflow_context._local_storage - - for local_key, value_1, value_2, value_type in [ - # add future served below - (ReservedContextKeys.APP, app, FastAPI(), FastAPI), - ]: - # check local getter - assert value_1 == workflow_context._local_storage[local_key] - assert value_1 == await workflow_context.get(local_key, value_type) - # check local setter - await workflow_context.set(local_key, value_2, set_reserved=True) - assert value_2 == workflow_context._local_storage[local_key] - - -async def test_workflow_context_reserved_key(workflow_context: WorkflowContext): - with pytest.raises(NotAllowedContextKeyError): - await workflow_context.set( - ReservedContextKeys.UNEXPECTED_RUNTIME_EXCEPTION, "value" - ) - - await workflow_context.set( - ReservedContextKeys.UNEXPECTED_RUNTIME_EXCEPTION, "value", set_reserved=True - ) - - -async def test_key_not_found_in_workflow_context(workflow_context: WorkflowContext): - with pytest.raises(NotInContextError): - await workflow_context.get("for_sure_I_am_missing", str) - - -async def test_key_get_wrong_type(key_1: str, workflow_context: WorkflowContext): - await workflow_context.set(key_1, 4) - with pytest.raises(GetTypeMismatchError): - await workflow_context.get(key_1, str) - - -async def test_set_and_get_non_local(key_1: str, workflow_context: WorkflowContext): - await workflow_context.set(key_1, 4) - assert await workflow_context.get(key_1, int) == 4 - - -async def test_get_serialized_context(key_1: str, workflow_context: WorkflowContext): - await workflow_context.set(key_1, 4) - assert ( - await workflow_context.get_serialized_context() - == {key_1: 4} | EXTRA_WORKFLOW_CONTEXT_DATA - ) - - -async def test_import_from_serialized_context(workflow_context: WorkflowContext): - serialized_workflow_context: dict[str, Any] = {"1": 1, "d": dict(me=1.1)} - - await workflow_context.import_from_serialized_context(serialized_workflow_context) - assert ( - await workflow_context.get_serialized_context() - == serialized_workflow_context | EXTRA_WORKFLOW_CONTEXT_DATA - ) diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner.py deleted file mode 100644 index f65f07963ff..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner.py +++ /dev/null @@ -1,150 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name - -import logging -from typing import Any -from unittest.mock import AsyncMock - -import pytest -from pytest import LogCaptureFixture -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._action import ( - Action, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - ContextInterface, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._marker import ( - mark_step, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._models import ( - ActionName, - StepName, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow import ( - Workflow, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow_context import ( - WorkflowContext, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow_runner import ( - _iter_index_step, - workflow_runner, -) - -logger = logging.getLogger(__name__) - - -# FIXTURES - - -@pytest.fixture -async def workflow_context( - context: ContextInterface, -) -> WorkflowContext: - workflow_context = WorkflowContext( - context=context, app=AsyncMock(), workflow_name="unique", action_name="first" - ) - await workflow_context.setup() - yield workflow_context - await workflow_context.teardown() - - -# TESTS - - -@pytest.mark.skip(reason="DEV") -async def test_iter_index_step(): - async def first(): - pass - - async def second(): - pass - - async def third(): - pass - - awaitables = [first, second, third] - step_sequence = list(enumerate(awaitables)) - - three_element_list = list(_iter_index_step(awaitables)) - assert three_element_list == step_sequence - assert len(three_element_list) == 3 - - three_element_list = list(_iter_index_step(awaitables, index=0)) - assert three_element_list == step_sequence - assert len(three_element_list) == 3 - - two_element_list = list(_iter_index_step(awaitables, index=1)) - assert two_element_list == step_sequence[1:] - assert len(two_element_list) == 2 - - one_element_list = list(_iter_index_step(awaitables, index=2)) - assert one_element_list == step_sequence[2:] - assert len(one_element_list) == 1 - - for out_of_bound_index in range(3, 10): - zero_element_list = list(_iter_index_step(awaitables, index=out_of_bound_index)) - assert zero_element_list == step_sequence[out_of_bound_index:] - assert len(zero_element_list) == 0 - - -# TESTS - - -async def test_workflow_runner( - workflow_context: WorkflowContext, caplog_info_level: LogCaptureFixture -): - @mark_step - async def initial() -> dict[str, Any]: - print("initial") - return {"x": 10, "y": 12.3} - - @mark_step - async def verify(x: int, y: float) -> dict[str, Any]: - assert type(x) == int - assert type(y) == float - return {"z": x + y} - - @mark_step - async def print_second() -> dict[str, Any]: - print("SECOND") - return {} - - FIRST_STATE = Action( - name="first", - steps=[ - initial, - verify, - ], - next_action="second", - on_error_action=None, - ) - SECOND_STATE = Action( - name="second", - steps=[ - print_second, - verify, - verify, - ], - next_action=None, - on_error_action=None, - ) - - workflow = Workflow(FIRST_STATE, SECOND_STATE) - - async def hook_before(action: ActionName, step: StepName) -> None: - logger.info("hook_before %s %s", f"{action=}", f"{step=}") - - async def hook_after(action: ActionName, step: StepName) -> None: - logger.info("hook_after %s %s", f"{action=}", f"{step=}") - - await workflow_runner( - workflow=workflow, - workflow_context=workflow_context, - before_step_hook=hook_before, - after_step_hook=hook_after, - ) - - # check hooks are working as expected - assert "hook_before action='first' step='initial'" in caplog_info_level.messages - assert "hook_after action='first' step='initial'" in caplog_info_level.messages diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner_manager.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner_manager.py deleted file mode 100644 index fb5fdacd6f1..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2__workflow_runner_manager.py +++ /dev/null @@ -1,408 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name - -import asyncio -from contextlib import asynccontextmanager -from typing import Any, AsyncIterable, Awaitable -from unittest.mock import AsyncMock, call - -import pytest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._action import ( - Action, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._context_base import ( - ContextInterface, - ReservedContextKeys, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._errors import ( - ActionNotRegisteredException, - InvalidSerializedContextException, - WorkflowAlreadyExistingException, - WorkflowNotFoundException, - WorkflowNotInitializedException, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._marker import ( - mark_step, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._models import ( - ExceptionInfo, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow import ( - Workflow, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow_context import ( - WorkflowContext, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow_runner_manager import ( - WorkflowRunnerManager, -) - -# UTILS - - -@asynccontextmanager -async def _workflow_runner_manager_lifecycle( - workflow_runner_manager: WorkflowRunnerManager, -) -> None: - try: - await workflow_runner_manager.setup() - yield None - finally: - await workflow_runner_manager.teardown() - - -# FIXTURES - - -@pytest.fixture -async def simple_workflow_runner_manager( - context_interface_factory: Awaitable[ContextInterface], -) -> AsyncIterable[WorkflowRunnerManager]: - workflow = Workflow( - Action(name="", steps=[], next_action=None, on_error_action=None) - ) - workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(workflow_runner_manager): - yield workflow_runner_manager - - -# TESTS - - -async def test_initialize_workflow_runner_exceptions( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - await simple_workflow_runner_manager.initialize_workflow_runner( - workflow_name="first", action_name="" - ) - with pytest.raises(WorkflowAlreadyExistingException): - await simple_workflow_runner_manager.initialize_workflow_runner( - workflow_name="first", action_name="" - ) - - with pytest.raises(ActionNotRegisteredException): - await simple_workflow_runner_manager.initialize_workflow_runner( - workflow_name="second", action_name="not_registered" - ) - - -async def test_get_workflow_context_exception( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - with pytest.raises(WorkflowNotInitializedException): - simple_workflow_runner_manager.get_workflow_context("not_existing_workflow") - - -async def test_start_workflow_runner_exception( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - with pytest.raises(WorkflowNotInitializedException): - await simple_workflow_runner_manager.start_workflow_runner("first") - - -async def test_resume_workflow_runner_exceptions( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - invalid_serialized_context = {} - - with pytest.raises(WorkflowNotInitializedException): - await simple_workflow_runner_manager.resume_workflow_runner( - "first", invalid_serialized_context - ) - - await simple_workflow_runner_manager.initialize_workflow_runner( - workflow_name="first", action_name="" - ) - with pytest.raises(InvalidSerializedContextException): - await simple_workflow_runner_manager.resume_workflow_runner( - "first", invalid_serialized_context - ) - - -async def test_wait_workflow_runner_exception( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - with pytest.raises(WorkflowNotFoundException): - await simple_workflow_runner_manager.wait_workflow_runner( - "not_started_workflow" - ) - - -async def test_cancel_and_wait_workflow_runner_exception( - simple_workflow_runner_manager: WorkflowRunnerManager, -): - with pytest.raises(WorkflowNotFoundException): - await simple_workflow_runner_manager.cancel_and_wait_workflow_runner( - "not_started_workflow" - ) - - -async def test_workflow_runner_manager( - context_interface_factory: Awaitable[ContextInterface], -): - @mark_step - async def initial_state() -> dict[str, Any]: - print("initial state") - return {"x": 10, "y": 12.3} - - @mark_step - async def verify(x: int, y: float) -> dict[str, Any]: - assert type(x) == int - assert type(y) == float - return {"z": x + y} - - @mark_step - async def print_second() -> dict[str, Any]: - print("SECOND") - return {} - - FIRST_ACTION = Action( - name="first", - steps=[ - initial_state, - verify, - ], - next_action="second", - on_error_action=None, - ) - SECOND_ACTION = Action( - name="second", - steps=[ - print_second, - verify, - verify, - ], - next_action=None, - on_error_action=None, - ) - - workflow = Workflow(FIRST_ACTION, SECOND_ACTION) - - workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(workflow_runner_manager): - # ok workflow_runner - await workflow_runner_manager.initialize_workflow_runner( - workflow_name="start_first", action_name="first" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name="start_first") - assert "start_first" in workflow_runner_manager._workflow_context - assert "start_first" in workflow_runner_manager._workflow_tasks - await workflow_runner_manager.wait_workflow_runner("start_first") - assert "start_first" not in workflow_runner_manager._workflow_context - assert "start_first" not in workflow_runner_manager._workflow_tasks - - # cancel workflow_runner - await workflow_runner_manager.initialize_workflow_runner( - workflow_name="start_first", action_name="first" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name="start_first") - await workflow_runner_manager.cancel_and_wait_workflow_runner("start_first") - assert "start_first" not in workflow_runner_manager._workflow_context - assert "start_first" not in workflow_runner_manager._workflow_tasks - with pytest.raises(WorkflowNotFoundException): - await workflow_runner_manager.wait_workflow_runner("start_first") - - -async def test_workflow_runner_error_handling( - context_interface_factory: Awaitable[ContextInterface], -): - ERROR_MARKER_IN_TB = "__this message must be present in the traceback__" - - @mark_step - async def error_raiser() -> dict[str, Any]: - raise RuntimeError(ERROR_MARKER_IN_TB) - - @mark_step - async def graceful_error_handler( - unexpected_runtime_exception: ExceptionInfo, - ) -> dict[str, Any]: - assert unexpected_runtime_exception.exception_class == RuntimeError - assert unexpected_runtime_exception.action_name in { - "case_1_rasing_error", - "case_2_rasing_error", - } - assert unexpected_runtime_exception.step_name == error_raiser.__name__ - assert ERROR_MARKER_IN_TB in unexpected_runtime_exception.serialized_traceback - await asyncio.sleep(0.1) - return {} - - # CASE 1 - # error is raised by first state, second state handles it -> no error raised - CASE_1_RAISING_ERROR = Action( - name="case_1_rasing_error", - steps=[ - error_raiser, - ], - next_action=None, - on_error_action="case_1_handling_error", - ) - CASE_1_HANDLING_ERROR = Action( - name="case_1_handling_error", - steps=[ - graceful_error_handler, - ], - next_action=None, - on_error_action=None, - ) - - # CASE 2 - # error is raised by first state -> raises error - CASE_2_RASING_ERROR = Action( - name="case_2_raising_error", - steps=[ - error_raiser, - ], - next_action=None, - on_error_action=None, - ) - - workflow = Workflow( - CASE_1_RAISING_ERROR, - CASE_1_HANDLING_ERROR, - CASE_2_RASING_ERROR, - ) - - workflow_name = "test_workflow_name" - # CASE 1 - workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(workflow_runner_manager): - await workflow_runner_manager.initialize_workflow_runner( - workflow_name=workflow_name, action_name="case_1_rasing_error" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name=workflow_name) - await workflow_runner_manager.wait_workflow_runner(workflow_name) - - # CASE 2 - workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(workflow_runner_manager): - await workflow_runner_manager.initialize_workflow_runner( - workflow_name=workflow_name, action_name="case_2_raising_error" - ) - await workflow_runner_manager.start_workflow_runner(workflow_name=workflow_name) - with pytest.raises(RuntimeError): - await workflow_runner_manager.wait_workflow_runner(workflow_name) - - -async def test_resume_workflow_runner_workflow( - context_interface_factory: Awaitable[ContextInterface], -): - call_tracker_1 = AsyncMock() - call_tracker_2 = AsyncMock() - - @mark_step - async def first_step() -> dict[str, Any]: - await call_tracker_1(first_step) - return {} - - @mark_step - async def optionally_long_sleep(sleep: bool) -> dict[str, Any]: - if sleep: - await call_tracker_1(optionally_long_sleep) - await asyncio.sleep(1e10) - await call_tracker_2(optionally_long_sleep) - return {} - - @mark_step - async def third_step() -> dict[str, Any]: - await call_tracker_2(third_step) - return {} - - workflow = Workflow( - Action( - name="initial", - steps=[ - first_step, - optionally_long_sleep, - third_step, - ], - next_action=None, - on_error_action=None, - ) - ) - - # start workflow which will wait forever on step `optionally_long_sleep` - first_workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(first_workflow_runner_manager): - await first_workflow_runner_manager.initialize_workflow_runner( - "test", action_name="initial" - ) - - # NOTE: allows the workflow to wait for forever - # this is also a way to initialize some data before - # starting the workflow_runner - # NOTE: after calling `cancel_and_wait_workflow_runner`, `WorkflowRunnerManager` - # will no longer keep track of the context of the workflow - first_context: WorkflowContext = ( - first_workflow_runner_manager.get_workflow_context("test") - ) - await first_context.set("sleep", True) - await first_workflow_runner_manager.start_workflow_runner("test") - - WAIT_TO_REACH_SECOND_STEP = 0.1 - await asyncio.sleep(WAIT_TO_REACH_SECOND_STEP) - await first_workflow_runner_manager.cancel_and_wait_workflow_runner("test") - - # ensure state as expected, stopped while handling `optionally_long_sleep`` - assert call_tracker_1.call_args_list == [ - call(first_step), - call(optionally_long_sleep), - ] - serialized_first_context_data = await first_context.get_serialized_context() - assert serialized_first_context_data == { - "sleep": True, - ReservedContextKeys.WORKFLOW_ACTION_NAME: "initial", - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX: 1, - ReservedContextKeys.WORKFLOW_CURRENT_STEP_NAME: "optionally_long_sleep", - ReservedContextKeys.WORKFLOW_NAME: "test", - } - - # resume workflow which rune from step `optionally_long_sleep` and finish - - second_workflow_runner_manager = WorkflowRunnerManager( - context_factory=context_interface_factory, app=AsyncMock(), workflow=workflow - ) - async with _workflow_runner_manager_lifecycle(second_workflow_runner_manager): - await second_workflow_runner_manager.initialize_workflow_runner( - "test", action_name="initial" - ) - # NOTE: after calling `wait_workflow_runner`, `WorkflowRunnerManager` - # will no longer keep track of the context of the workflow - second_context: WorkflowContext = ( - second_workflow_runner_manager.get_workflow_context("test") - ) - - # NOTE: allows the workflow to finish, normally the incoming - # serialized_context would not be touched, - # this is just required for the test - serialized_first_context_data["sleep"] = False - - workflow_name = serialized_first_context_data[ReservedContextKeys.WORKFLOW_NAME] - await second_workflow_runner_manager.resume_workflow_runner( - workflow_name=workflow_name, - serialized_context=serialized_first_context_data, - ) - await second_workflow_runner_manager.wait_workflow_runner(workflow_name) - - # ensure state as expected, finished on `third_step` - assert call_tracker_2.call_args_list == [ - call(optionally_long_sleep), - call(third_step), - ] - new_context_data = await second_context.get_serialized_context() - assert new_context_data == { - "sleep": False, - ReservedContextKeys.WORKFLOW_ACTION_NAME: "initial", - ReservedContextKeys.WORKFLOW_CURRENT_STEP_INDEX: 2, - ReservedContextKeys.WORKFLOW_CURRENT_STEP_NAME: "third_step", - ReservedContextKeys.WORKFLOW_NAME: "test", - } diff --git a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2_workflow.py b/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2_workflow.py deleted file mode 100644 index e309eac2e08..00000000000 --- a/services/director-v2/tests/unit/dynamic_scheduler/test_modules_dynamic_sidecar_scheduler__core2_workflow.py +++ /dev/null @@ -1,78 +0,0 @@ -import pytest -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._action import ( - Action, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._errors import ( - NextActionNotInWorkflowException, - OnErrorActionNotInWorkflowException, -) -from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core2._workflow import ( - Workflow, -) - - -def test_workflow(): - ACTION_ONE_NAME = "one" - ACTION_TWO_NAME = "two" - ACTION_MISSING_NAME = "not_existing_action" - - action_one = Action( - name=ACTION_ONE_NAME, steps=[], next_action=None, on_error_action=None - ) - acton_two = Action( - name=ACTION_TWO_NAME, steps=[], next_action=None, on_error_action=None - ) - - workflow = Workflow( - action_one, - acton_two, - ) - - # in operator - assert ACTION_ONE_NAME in workflow - assert ACTION_TWO_NAME in workflow - assert ACTION_MISSING_NAME not in workflow - - # get key operator - assert workflow[ACTION_ONE_NAME] == action_one - assert workflow[ACTION_TWO_NAME] == acton_two - with pytest.raises(KeyError): - workflow[ACTION_MISSING_NAME] # pylint:disable=pointless-statement - - -def test_workflow_missing_next_action(): - action = Action( - name="some_name", - steps=[], - next_action="missing_next_action", - on_error_action=None, - ) - with pytest.raises(NextActionNotInWorkflowException): - Workflow(action) - - -def test_workflow_missing_on_error_action(): - action = Action( - name="some_name", - steps=[], - next_action=None, - on_error_action="missing_on_error_action", - ) - with pytest.raises(OnErrorActionNotInWorkflowException): - Workflow(action) - - -def test_workflow_add(): - ACTION_ONE = "action_1" - ACTION_TWO = "action_2" - - action_1 = Action(name=ACTION_ONE, steps=[], next_action=None, on_error_action=None) - workflow_1 = Workflow(action_1) - action_2 = Action(name=ACTION_TWO, steps=[], next_action=None, on_error_action=None) - workflow_2 = Workflow(action_2) - - workflow = workflow_1 + workflow_2 - assert ACTION_ONE in workflow - assert ACTION_TWO in workflow - assert workflow[ACTION_ONE] == action_1 - assert workflow[ACTION_TWO] == action_2 diff --git a/services/director-v2/tests/unit/test_api_route_dynamic_scheduler.py b/services/director-v2/tests/unit/test_api_route_dynamic_scheduler.py index 829be0c8bfa..41abda858bb 100644 --- a/services/director-v2/tests/unit/test_api_route_dynamic_scheduler.py +++ b/services/director-v2/tests/unit/test_api_route_dynamic_scheduler.py @@ -3,22 +3,19 @@ # pylint: disable=unused-variable import asyncio -from typing import AsyncIterator +from collections.abc import AsyncIterator import pytest import respx +from faker import Faker from fastapi import status +from httpx import Response +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceCreate +from models_library.basic_types import PortInt from models_library.service_settings_labels import SimcoreServiceLabels -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict -from requests import Response -from simcore_service_director_v2.models.domains.dynamic_services import ( - DynamicServiceCreate, -) -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - SchedulerData, -) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.errors import ( DynamicSidecarNotFoundError, ) @@ -30,46 +27,58 @@ @pytest.fixture def mock_env( + mock_exclusive: None, disable_rabbitmq: None, + disable_postgres: None, mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, docker_swarm: None, + faker: Faker, ) -> None: monkeypatch.setenv("SC_BOOT_MODE", "default") monkeypatch.setenv("DIRECTOR_ENABLED", "false") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "false") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V2_TRACING", "null") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - monkeypatch.setenv("POSTGRES_HOST", "mocked_host") - monkeypatch.setenv("POSTGRES_USER", "mocked_user") - monkeypatch.setenv("POSTGRES_PASSWORD", "mocked_password") - monkeypatch.setenv("POSTGRES_DB", "mocked_db") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "false") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) @pytest.fixture def dynamic_sidecar_scheduler(client: TestClient) -> DynamicSidecarsScheduler: - return client.app.state.dynamic_sidecar_scheduler + return client.app.state.dynamic_sidecar_scheduler # type: ignore + + +@pytest.fixture +def mock_apply_observation_cycle(mocker: MockerFixture) -> None: + module_base = ( + "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer" + ) + mocker.patch(f"{module_base}._apply_observation_cycle", autospec=True) + + +@pytest.fixture +def mock_free_reserved_disk_space(mocker: MockerFixture) -> None: + module_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._task" + mocker.patch( + f"{module_base}.DynamicSidecarsScheduler.free_reserved_disk_space", + autospec=True, + ) @pytest.fixture -async def mock_sidecar_api(scheduler_data: SchedulerData) -> AsyncIterator[None]: - with respx.mock( - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: +async def mock_sidecar_api( + scheduler_data: SchedulerData, +) -> AsyncIterator[None]: + with respx.mock(assert_all_called=False, assert_all_mocked=True) as respx_mock: respx_mock.get(f"{scheduler_data.endpoint}/health", name="is_healthy").respond( - json=dict(is_healthy=True) + json={"is_healthy": True} ) yield @@ -80,20 +89,21 @@ async def observed_service( dynamic_sidecar_scheduler: DynamicSidecarsScheduler, dynamic_service_create: DynamicServiceCreate, simcore_service_labels: SimcoreServiceLabels, - dynamic_sidecar_port: int, + dynamic_sidecar_port: PortInt, request_dns: str, request_scheme: str, + can_save: bool, ) -> SchedulerData: await dynamic_sidecar_scheduler.add_service( - dynamic_service_create, - simcore_service_labels, - dynamic_sidecar_port, - request_dns, - request_scheme, + service=dynamic_service_create, + simcore_service_labels=simcore_service_labels, + port=dynamic_sidecar_port, + request_dns=request_dns, + request_scheme=request_scheme, request_simcore_user_agent="", + can_save=can_save, ) - # pylint:disable=protected-access - return dynamic_sidecar_scheduler._scheduler.get_scheduler_data( + return dynamic_sidecar_scheduler.scheduler.get_scheduler_data( dynamic_service_create.node_uuid ) @@ -116,17 +126,20 @@ async def test_update_service_observation_node_not_found( with pytest.raises(DynamicSidecarNotFoundError): client.patch( f"/v2/dynamic_scheduler/services/{scheduler_data.node_uuid}/observation", - json=dict(is_disabled=False), + json={"is_disabled": False}, ) async def test_update_service_observation( - mock_sidecar_api: None, client: TestClient, observed_service: SchedulerData + mock_apply_observation_cycle: None, + mock_sidecar_api: None, + client: TestClient, + observed_service: SchedulerData, ): def _toggle(*, is_disabled: bool) -> Response: return client.patch( f"/v2/dynamic_scheduler/services/{observed_service.node_uuid}/observation", - json=dict(is_disabled=is_disabled), + json={"is_disabled": is_disabled}, ) # trying to lock the service @@ -174,6 +187,7 @@ def _toggle(*, is_disabled: bool) -> Response: ], ) async def test_409_response( + mock_apply_observation_cycle: None, mock_scheduler_service_shutdown_tasks: None, client: TestClient, observed_service: SchedulerData, @@ -197,3 +211,15 @@ async def test_409_response( ) assert response.status_code == status.HTTP_409_CONFLICT assert "must be unique" in response.text + + +async def test_free_reserved_disk_space( + mock_apply_observation_cycle: None, + mock_free_reserved_disk_space: None, + client: TestClient, + observed_service: SchedulerData, +): + response = client.post( + f"/v2/dynamic_scheduler/services/{observed_service.node_uuid}/disk/reserved:free", + ) + assert response.status_code == status.HTTP_204_NO_CONTENT diff --git a/services/director-v2/tests/unit/test_core_settings.py b/services/director-v2/tests/unit/test_core_settings.py index ea8ed76373c..2151d64cfa5 100644 --- a/services/director-v2/tests/unit/test_core_settings.py +++ b/services/director-v2/tests/unit/test_core_settings.py @@ -5,22 +5,24 @@ from typing import Any import pytest -from models_library.basic_types import LogLevel +from models_library.basic_types import BootModeEnum, LogLevel from pydantic import ValidationError -from pytest import FixtureRequest, MonkeyPatch from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.base import DefaultFromEnvFactoryError from settings_library.r_clone import S3Provider -from simcore_service_director_v2.core.settings import ( - AppSettings, - BootModeEnum, - DynamicSidecarSettings, +from simcore_service_director_v2.core.dynamic_services_settings.egress_proxy import ( EnvoyLogLevel, +) +from simcore_service_director_v2.core.dynamic_services_settings.sidecar import ( + DynamicSidecarSettings, + PlacementSettings, RCloneSettings, ) +from simcore_service_director_v2.core.settings import AppSettings def _get_backend_type_options() -> set[str]: - return {x for x in dir(S3Provider) if not x.startswith("_")} + return {x for x in S3Provider if not x.startswith("_")} def test_supported_backends_did_not_change() -> None: @@ -32,33 +34,7 @@ def test_supported_backends_did_not_change() -> None: ) -@pytest.mark.parametrize( - "endpoint, is_secure", - [ - ("localhost", False), - ("s3_aws", False), - ("https://ceph.home", True), - ("http://local.dev", False), - ], -) -def test_expected_s3_endpoint( - endpoint: str, is_secure: bool, monkeypatch: MonkeyPatch -) -> None: - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", endpoint) - monkeypatch.setenv("S3_SECURE", "true" if is_secure else "false") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - - r_clone_settings = RCloneSettings.create_from_envs() - - scheme = "https" if is_secure else "http" - assert r_clone_settings.R_CLONE_S3.S3_ENDPOINT.startswith(f"{scheme}://") - assert r_clone_settings.R_CLONE_S3.S3_ENDPOINT.endswith(endpoint) - - -def test_enforce_r_clone_requirement(monkeypatch: MonkeyPatch) -> None: +def test_enforce_r_clone_requirement(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") monkeypatch.setenv("R_CLONE_POLL_INTERVAL_SECONDS", "11") with pytest.raises(ValueError): @@ -68,7 +44,7 @@ def test_enforce_r_clone_requirement(monkeypatch: MonkeyPatch) -> None: def test_settings_with_project_env_devel(project_env_devel_environment: dict[str, Any]): # loads from environ settings = AppSettings.create_from_envs() - print("captured settings: \n", settings.json(indent=2)) + print("captured settings: \n", settings.model_dump_json(indent=2)) assert settings.SC_BOOT_MODE == BootModeEnum.DEBUG assert settings.LOG_LEVEL == LogLevel.DEBUG @@ -77,12 +53,15 @@ def test_settings_with_project_env_devel(project_env_devel_environment: dict[str def test_settings_with_repository_env_devel( - mock_env_devel_environment: dict[str, str], monkeypatch: MonkeyPatch + mock_env_devel_environment: dict[str, str], monkeypatch: pytest.MonkeyPatch ): monkeypatch.setenv("SC_BOOT_MODE", "production") # defined in Dockerfile + monkeypatch.setenv( + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", "{}" + ) # defined in docker-compose settings = AppSettings.create_from_envs() - print("captured settings: \n", settings.json(indent=2)) + print("captured settings: \n", settings.model_dump_json(indent=2)) assert settings @@ -111,9 +90,9 @@ def test_settings_with_repository_env_devel( ], ) def testing_environ_expected_success( - request: FixtureRequest, + request: pytest.FixtureRequest, project_env_devel_environment, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> str: container_path: str = request.param monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", container_path) @@ -122,8 +101,8 @@ def testing_environ_expected_success( def test_dynamic_sidecar_settings(testing_environ_expected_success: str) -> None: settings = DynamicSidecarSettings.create_from_envs() - assert settings.DYNAMIC_SIDECAR_IMAGE == testing_environ_expected_success.lstrip( - "/" + assert ( + testing_environ_expected_success.lstrip("/") == settings.DYNAMIC_SIDECAR_IMAGE ) @@ -134,9 +113,9 @@ def test_dynamic_sidecar_settings(testing_environ_expected_success: str) -> None ], ) def environment_with_invalid_values( - request: FixtureRequest, + request: pytest.FixtureRequest, project_env_devel_environment, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ): container_path: str = request.param monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", container_path) @@ -152,7 +131,7 @@ def test_expected_failure_dynamic_sidecar_settings( @pytest.mark.parametrize( "custom_constraints, expected", - ( + [ ("[]", []), ('["one==yes"]', ["one==yes"]), ('["two!=no"]', ["two!=no"]), @@ -163,23 +142,26 @@ def test_expected_failure_dynamic_sidecar_settings( '["node.labels.standard-worker==true"]', ["node.labels.standard-worker==true"], ), - ), + ], ) def test_services_custom_constraints( custom_constraints: str, expected: list[str], project_env_devel_environment: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: monkeypatch.setenv("DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS", custom_constraints) settings = AppSettings.create_from_envs() - assert type(settings.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS) == list - assert settings.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS == expected + custom_constraints = ( + settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR_PLACEMENT_SETTINGS.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS + ) + assert isinstance(custom_constraints, list) + assert expected == custom_constraints @pytest.mark.parametrize( "custom_constraints, expected", - ( + [ # Whitespaces in key are not allowed https://docs.docker.com/config/labels-custom-metadata/#label-keys-and-values ('["strips.white spaces==ok "]', ["strips.white spaces==ok"]), ('[".starting.trailing.dot.==forbidden"]', [".starting.dot==forbidden"]), @@ -195,28 +177,31 @@ def test_services_custom_constraints( '["node.labels.standard_worker==true"]', ["node.labels.standard_worker==true"], ), - ), + ], ) def test_services_custom_constraint_failures( custom_constraints: str, expected: list[str], project_env_devel_environment: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: monkeypatch.setenv("DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS", custom_constraints) - with pytest.raises(Exception) as excinfo: - settings = AppSettings.create_from_envs() + with pytest.raises(DefaultFromEnvFactoryError): + AppSettings.create_from_envs() def test_services_custom_constraints_default_empty_list( project_env_devel_environment: EnvVarsDict, ) -> None: settings = AppSettings.create_from_envs() - assert settings.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS == [] + assert ( + [] + == settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR_PLACEMENT_SETTINGS.DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS + ) def test_class_dynamicsidecarsettings_in_development( - monkeypatch: MonkeyPatch, project_env_devel_environment: EnvVarsDict + monkeypatch: pytest.MonkeyPatch, project_env_devel_environment: EnvVarsDict ): # assume in environ is set monkeypatch.setenv( @@ -236,7 +221,7 @@ def test_class_dynamicsidecarsettings_in_development( def test_class_dynamicsidecarsettings_in_production( - monkeypatch: MonkeyPatch, project_env_devel_environment: EnvVarsDict + monkeypatch: pytest.MonkeyPatch, project_env_devel_environment: EnvVarsDict ): # assume in environ is set monkeypatch.setenv( @@ -258,3 +243,16 @@ def test_class_dynamicsidecarsettings_in_production( def test_envoy_log_level(): for enum in (EnvoyLogLevel("WARNING"), EnvoyLogLevel.WARNING): assert enum.to_log_level() == "warning" + + +def test_placement_settings(monkeypatch: pytest.MonkeyPatch): + assert PlacementSettings() + + monkeypatch.setenv( + "DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS", "{}" + ) + placement_settings = PlacementSettings() + assert ( + placement_settings.DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS + == {} + ) diff --git a/services/director-v2/tests/unit/test_models_clusters.py b/services/director-v2/tests/unit/test_models_clusters.py index d7bb73b19f6..a974d680246 100644 --- a/services/director-v2/tests/unit/test_models_clusters.py +++ b/services/director-v2/tests/unit/test_models_clusters.py @@ -1,72 +1,34 @@ -from pprint import pformat -from typing import Any - -import pytest from faker import Faker -from pydantic import BaseModel, parse_obj_as -from simcore_service_director_v2.models.schemas.clusters import ( +from models_library.api_schemas_directorv2.clusters import ( AvailableResources, - ClusterCreate, - ClusterPatch, Scheduler, UsedResources, Worker, WorkerMetrics, - WorkersDict, -) - - -@pytest.mark.parametrize( - "model_cls", - [ClusterCreate, ClusterPatch], -) -def test_clusters_model_examples( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for name, example in model_cls_examples.items(): - print(name, ":", pformat(example)) - model_instance = model_cls(**example) - assert model_instance, f"Failed with {name}" - - -@pytest.mark.parametrize( - "model_cls", - [ - ClusterCreate, - ], ) -def test_cluster_creation_brings_default_thumbail( - model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] -): - for example in model_cls_examples.values(): - if "thumbnail" in example: - example.pop("thumbnail") - instance = model_cls(**example) - assert instance - assert instance.thumbnail +from pydantic import ByteSize, TypeAdapter def test_scheduler_constructor_with_default_has_correct_dict(faker: Faker): scheduler = Scheduler(status=faker.text()) - assert isinstance(scheduler.workers, WorkersDict) + assert scheduler.workers is not None assert len(scheduler.workers) == 0 def test_scheduler_constructor_with_no_workers_has_correct_dict(faker: Faker): scheduler = Scheduler(status=faker.text(), workers=None) - assert isinstance(scheduler.workers, WorkersDict) + assert scheduler.workers is not None assert len(scheduler.workers) == 0 def test_worker_constructor_corrects_negative_used_resources(faker: Faker): worker = Worker( - id=faker.pyint(min_value=1), + id=f"{faker.pyint(min_value=1)}", name=faker.name(), - resources=parse_obj_as(AvailableResources, {}), - used_resources=parse_obj_as(UsedResources, {"CPU": -0.0000234}), - memory_limit=faker.pyint(min_value=1), - metrics=parse_obj_as( - WorkerMetrics, + resources=TypeAdapter(AvailableResources).validate_python({}), + used_resources=TypeAdapter(UsedResources).validate_python({"CPU": -0.0000234}), + memory_limit=ByteSize(faker.pyint(min_value=1)), + metrics=WorkerMetrics.model_validate( { "cpu": faker.pyfloat(min_value=0), "memory": faker.pyint(min_value=0), diff --git a/services/director-v2/tests/unit/test_models_comp_pipelines.py b/services/director-v2/tests/unit/test_models_comp_pipelines.py index 642ff40ff22..98f7722b841 100644 --- a/services/director-v2/tests/unit/test_models_comp_pipelines.py +++ b/services/director-v2/tests/unit/test_models_comp_pipelines.py @@ -4,14 +4,14 @@ from copy import deepcopy from pprint import pformat -from typing import Any, Dict, Type +from typing import Any from uuid import UUID import networkx as nx import pytest from models_library.projects_state import RunningState from pydantic.main import BaseModel -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB @pytest.mark.parametrize( @@ -19,7 +19,7 @@ (CompPipelineAtDB,), ) def test_computation_pipeline_model_examples( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) @@ -32,10 +32,12 @@ def test_computation_pipeline_model_examples( (CompPipelineAtDB,), ) def test_computation_pipeline_model_with_running_state_value_field( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): - example["state"] = RunningState.RETRY.value # this is a specific Runningstate + example[ + "state" + ] = RunningState.WAITING_FOR_RESOURCES.value # this is a specific Runningstate print(name, ":", pformat(example)) model_instance = model_cls(**example) assert model_instance, f"Failed with {name}" @@ -46,7 +48,7 @@ def test_computation_pipeline_model_with_running_state_value_field( (CompPipelineAtDB,), ) def test_computation_pipeline_model_with_uuids_in_dag_field( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): old_dag_list = deepcopy(example["dag_adjacency_list"]) @@ -63,7 +65,7 @@ def test_computation_pipeline_model_with_uuids_in_dag_field( (CompPipelineAtDB,), ) def test_computation_pipeline_model_get_graph( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) diff --git a/services/director-v2/tests/unit/test_models_comp_runs.py b/services/director-v2/tests/unit/test_models_comp_runs.py index 078e2b26be9..5505982f2d1 100644 --- a/services/director-v2/tests/unit/test_models_comp_runs.py +++ b/services/director-v2/tests/unit/test_models_comp_runs.py @@ -3,20 +3,22 @@ # pylint:disable=redefined-outer-name from pprint import pformat -from typing import Any, Dict, Type +from typing import Any import pytest from models_library.projects_state import RunningState from pydantic.main import BaseModel -from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB @pytest.mark.parametrize( "model_cls", - (CompRunsAtDB,), + [ + CompRunsAtDB, + ], ) def test_computation_run_model_examples( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) @@ -26,13 +28,15 @@ def test_computation_run_model_examples( @pytest.mark.parametrize( "model_cls", - (CompRunsAtDB,), + [ + CompRunsAtDB, + ], ) def test_computation_run_model_with_run_result_value_field( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): - example["result"] = RunningState.RETRY.value + example["result"] = RunningState.WAITING_FOR_RESOURCES.value print(name, ":", pformat(example)) model_instance = model_cls(**example) assert model_instance, f"Failed with {name}" diff --git a/services/director-v2/tests/unit/test_models_comp_tasks.py b/services/director-v2/tests/unit/test_models_comp_tasks.py index 54e85d1a3b8..6898acface4 100644 --- a/services/director-v2/tests/unit/test_models_comp_tasks.py +++ b/services/director-v2/tests/unit/test_models_comp_tasks.py @@ -3,13 +3,13 @@ # pylint:disable=redefined-outer-name from pprint import pformat -from typing import Any, Dict, Type +from typing import Any import pytest from models_library.projects_state import RunningState from pydantic.main import BaseModel from simcore_postgres_database.models.comp_pipeline import StateType -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB @pytest.mark.parametrize( @@ -17,7 +17,7 @@ (CompTaskAtDB,), ) def test_computation_task_model_examples( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) @@ -27,10 +27,10 @@ def test_computation_task_model_examples( @pytest.mark.parametrize( "model_cls", - (CompTaskAtDB,), + [CompTaskAtDB], ) def test_computation_task_model_export_to_db_model( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): print(name, ":", pformat(example)) @@ -41,18 +41,18 @@ def test_computation_task_model_export_to_db_model( db_model = model_instance.to_db_model() assert isinstance(db_model, dict) - StateType(db_model["state"]) + assert StateType(db_model["state"]) @pytest.mark.parametrize( "model_cls", - (CompTaskAtDB,), + [CompTaskAtDB], ) def test_computation_task_model_with_running_state_value_field( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): - example["state"] = RunningState.RETRY.value + example["state"] = RunningState.WAITING_FOR_RESOURCES.value print(name, ":", pformat(example)) model_instance = model_cls(**example) assert model_instance, f"Failed with {name}" @@ -60,10 +60,10 @@ def test_computation_task_model_with_running_state_value_field( @pytest.mark.parametrize( "model_cls", - (CompTaskAtDB,), + [CompTaskAtDB], ) def test_computation_task_model_with_wrong_default_value_field( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] + model_cls: type[BaseModel], model_cls_examples: dict[str, dict[str, Any]] ): for name, example in model_cls_examples.items(): for output_schema in example.get("schema", {}).get("outputs", {}).values(): diff --git a/services/director-v2/tests/unit/test_models_dynamic_services.py b/services/director-v2/tests/unit/test_models_dynamic_services.py index 03c9084f24b..99a22ece3bb 100644 --- a/services/director-v2/tests/unit/test_models_dynamic_services.py +++ b/services/director-v2/tests/unit/test_models_dynamic_services.py @@ -1,21 +1,28 @@ +# pylint: disable=protected-access # pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + import string from collections import namedtuple import pytest -from simcore_service_director_v2.models.schemas.dynamic_services import ( +from models_library.api_schemas_directorv2.dynamic_services import ( RunningDynamicServiceDetails, - SchedulerData, - ServiceBootType, - ServiceState, ) -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( +from models_library.services_enums import ServiceBootType, ServiceState +from simcore_service_director_v2.models.dynamic_services_scheduler import ( DockerContainerInspect, + SchedulerData, ) from simcore_service_director_v2.modules.dynamic_sidecar.docker_states import ( CONTAINER_STATUSES_UNEXPECTED, extract_containers_minimum_statuses, ) +from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler_utils import ( + create_model_from_scheduler_data, +) # the following is the predefined expected ordering, change below test only if # this order is not adequate anymore @@ -122,7 +129,7 @@ def _all_states() -> set[ServiceState]: def test_running_service_details_make_status( scheduler_data: SchedulerData, service_message: str, service_state: ServiceState ): - running_service_details = RunningDynamicServiceDetails.from_scheduler_data( + running_service_details = create_model_from_scheduler_data( node_uuid=scheduler_data.node_uuid, scheduler_data=scheduler_data, service_state=service_state, @@ -131,7 +138,7 @@ def test_running_service_details_make_status( print(running_service_details) assert running_service_details - running_service_details_dict = running_service_details.dict( + running_service_details_dict = running_service_details.model_dump( exclude_unset=True, by_alias=True ) @@ -212,7 +219,7 @@ def test_regression_legacy_service_compatibility() -> None: "user_id": "1", "project_id": "b1ec5c8e-f5bb-11eb-b1d5-02420a000006", } - service_details = RunningDynamicServiceDetails.parse_obj(api_response) + service_details = RunningDynamicServiceDetails.model_validate(api_response) assert service_details diff --git a/services/director-v2/tests/unit/test_models_schemas_dynamic_services_scheduler.py b/services/director-v2/tests/unit/test_models_schemas_dynamic_services_scheduler.py index a16449560fd..0bbd9bca526 100644 --- a/services/director-v2/tests/unit/test_models_schemas_dynamic_services_scheduler.py +++ b/services/director-v2/tests/unit/test_models_schemas_dynamic_services_scheduler.py @@ -5,8 +5,8 @@ from pathlib import Path import pytest -from pydantic import parse_file_as -from simcore_service_director_v2.models.schemas.dynamic_services import SchedulerData +from pydantic import TypeAdapter +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData @pytest.fixture @@ -20,13 +20,13 @@ def test_regression_as_label_data(scheduler_data: SchedulerData) -> None: # old tested implementation scheduler_data_copy = deepcopy(scheduler_data) scheduler_data_copy.compose_spec = json.dumps(scheduler_data_copy.compose_spec) - json_encoded = scheduler_data_copy.json() + json_encoded = scheduler_data_copy.model_dump_json() # using pydantic's internals label_data = scheduler_data.as_label_data() - parsed_json_encoded = SchedulerData.parse_raw(json_encoded) - parsed_label_data = SchedulerData.parse_raw(label_data) + parsed_json_encoded = SchedulerData.model_validate_json(json_encoded) + parsed_label_data = SchedulerData.model_validate_json(label_data) assert parsed_json_encoded == parsed_label_data @@ -35,4 +35,6 @@ def test_ensure_legacy_format_compatibility(legacy_scheduler_data_format: Path): # PRs applying changes to the legacy format: # - https://github.com/ITISFoundation/osparc-simcore/pull/3610 - assert parse_file_as(list[SchedulerData], legacy_scheduler_data_format) + assert TypeAdapter(list[SchedulerData]).validate_json( + legacy_scheduler_data_format.read_text() + ) diff --git a/services/director-v2/tests/unit/test_modules_catalog.py b/services/director-v2/tests/unit/test_modules_catalog.py index f967ab1d1d3..3dbf5b13235 100644 --- a/services/director-v2/tests/unit/test_modules_catalog.py +++ b/services/director-v2/tests/unit/test_modules_catalog.py @@ -7,30 +7,27 @@ import pytest import respx -from faker import Faker from fastapi import FastAPI from models_library.services import ServiceKeyVersion from models_library.users import UserID +from pytest_simcore.helpers.typing_env import EnvVarsDict from simcore_service_director_v2.modules.catalog import CatalogClient @pytest.fixture -def minimal_catalog_config(project_env_devel_environment, monkeypatch): +def minimal_catalog_config( + disable_postgres: None, + project_env_devel_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> None: """set a minimal configuration for testing the director connection only""" monkeypatch.setenv("DIRECTOR_ENABLED", "0") - monkeypatch.setenv("POSTGRES_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "0") + monkeypatch.setenv("DIRECTOR_ENABLED", "0") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "0") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") -@pytest.fixture -def user_id(faker: Faker) -> UserID: - return UserID(faker.pyint(min_value=1)) - - def test_get_catalog_client_instance( minimal_catalog_config: None, mocked_catalog_service_api: respx.MockRouter, diff --git a/services/director-v2/tests/unit/test_modules_dask_client.py b/services/director-v2/tests/unit/test_modules_dask_client.py index 3aac6b498e7..909c3c238de 100644 --- a/services/director-v2/tests/unit/test_modules_dask_client.py +++ b/services/director-v2/tests/unit/test_modules_dask_client.py @@ -6,23 +6,22 @@ # pylint: disable=reimported import asyncio import functools +import logging import traceback +from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine from dataclasses import dataclass -from typing import Any, AsyncIterator, Awaitable, Callable, Optional +from typing import Any, NoReturn, cast from unittest import mock from uuid import uuid4 import distributed import pytest import respx -from _dask_helpers import DaskGatewayServer from dask.distributed import get_worker from dask_task_models_library.container_tasks.docker import DockerBasicAuth from dask_task_models_library.container_tasks.errors import TaskCancelledError from dask_task_models_library.container_tasks.events import ( - TaskLogEvent, TaskProgressEvent, - TaskStateEvent, ) from dask_task_models_library.container_tasks.io import ( TaskCancelEventName, @@ -30,20 +29,28 @@ TaskOutputData, TaskOutputDataSchema, ) +from dask_task_models_library.container_tasks.protocol import ( + ContainerEnvsDict, + ContainerLabelsDict, + ContainerTaskParameters, + LogFileUploadURL, +) from distributed import Event, Scheduler from distributed.deploy.spec import SpecCluster from faker import Faker from fastapi.applications import FastAPI -from models_library.clusters import ClusterID, NoAuthentication, SimpleAuthentication +from models_library.api_schemas_directorv2.services import NodeRequirements +from models_library.clusters import ClusterTypeInModel, NoAuthentication +from models_library.docker import to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID from models_library.projects_state import RunningState -from models_library.services_resources import BootMode +from models_library.resource_tracker import HardwareInfo +from models_library.services_types import ServiceRunID from models_library.users import UserID -from pydantic import AnyUrl, ByteSize, SecretStr -from pydantic.tools import parse_obj_as -from pytest import MonkeyPatch +from pydantic import AnyUrl, ByteSize, TypeAdapter from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.logging_tools import log_context from pytest_simcore.helpers.typing_env import EnvVarsDict from settings_library.s3 import S3Settings from simcore_sdk.node_ports_v2 import FileLinkType @@ -54,10 +61,10 @@ InsuficientComputationalResourcesError, MissingComputationalResourcesError, ) -from simcore_service_director_v2.models.domains.comp_tasks import Image -from simcore_service_director_v2.models.schemas.services import NodeRequirements +from simcore_service_director_v2.models.comp_runs import RunMetadataDict +from simcore_service_director_v2.models.comp_tasks import Image from simcore_service_director_v2.modules.dask_client import DaskClient, TaskHandlers -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed, wait_random @@ -66,7 +73,7 @@ _ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS = 20 -async def _assert_wait_for_cb_call(mocked_fct, timeout: Optional[int] = None): +async def _assert_wait_for_cb_call(mocked_fct, timeout: int | None = None): async for attempt in AsyncRetrying( stop=stop_after_delay(timeout or 10), wait=wait_random(0, 1), @@ -86,7 +93,7 @@ async def _assert_wait_for_task_status( job_id: str, dask_client: DaskClient, expected_status: RunningState, - timeout: Optional[int] = None, + timeout: int | None = None, # noqa: ASYNC109 ): async for attempt in AsyncRetrying( reraise=True, @@ -99,29 +106,33 @@ async def _assert_wait_for_task_status( f"waiting for task to be {expected_status=}, " f"Attempt={attempt.retry_state.attempt_number}" ) - current_task_status = await dask_client.get_task_status(job_id) - assert isinstance(current_task_status, RunningState) - print(f"{current_task_status=} vs {expected_status=}") - assert current_task_status == expected_status + got = (await dask_client.get_tasks_status([job_id]))[0] + assert isinstance(got, RunningState) + print(f"{got=} vs {expected_status=}") + if got is RunningState.FAILED and expected_status not in [ + RunningState.FAILED, + RunningState.UNKNOWN, + ]: + try: + # we can fail fast here + # this will raise and we catch the Assertion to not reraise too long + await dask_client.get_task_result(job_id) + except AssertionError as exc: + raise RuntimeError from exc + assert got is expected_status @pytest.fixture -def user_id(faker: Faker) -> UserID: - return faker.pyint(min_value=1) - - -@pytest.fixture -def minimal_dask_config( +def _minimal_dask_config( + disable_postgres: None, mock_env: EnvVarsDict, project_env_devel_environment: dict[str, Any], - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """set a minimal configuration for testing the dask connection only""" monkeypatch.setenv("DIRECTOR_ENABLED", "0") - monkeypatch.setenv("POSTGRES_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "0") + monkeypatch.setenv("DIRECTOR_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_CATALOG", "null") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") @@ -130,106 +141,78 @@ def minimal_dask_config( @pytest.fixture async def create_dask_client_from_scheduler( - minimal_dask_config: None, - dask_spec_local_cluster: SpecCluster, + _minimal_dask_config: None, + dask_spec_local_cluster: distributed.SpecCluster, minimal_app: FastAPI, tasks_file_link_type: FileLinkType, ) -> AsyncIterator[Callable[[], Awaitable[DaskClient]]]: created_clients = [] async def factory() -> DaskClient: - client = await DaskClient.create( - app=minimal_app, - settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, - endpoint=parse_obj_as(AnyUrl, dask_spec_local_cluster.scheduler_address), - authentication=NoAuthentication(), - tasks_file_link_type=tasks_file_link_type, - ) - assert client - assert client.app == minimal_app - assert ( - client.settings - == minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) - assert not client._subscribed_tasks - - assert client.backend.client - assert not client.backend.gateway - assert not client.backend.gateway_cluster - scheduler_infos = client.backend.client.scheduler_info() # type: ignore - print( - f"--> Connected to scheduler via client {client=} to scheduler {scheduler_infos=}" - ) - created_clients.append(client) - return client - - yield factory - await asyncio.gather(*[client.delete() for client in created_clients]) - print(f"<-- Disconnected scheduler clients {created_clients=}") - + with log_context( + logging.INFO, + f"Create director-v2 DaskClient to {dask_spec_local_cluster.scheduler_address}", + ) as ctx: + client = await DaskClient.create( + app=minimal_app, + settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, + endpoint=TypeAdapter(AnyUrl).validate_python( + dask_spec_local_cluster.scheduler_address + ), + authentication=NoAuthentication(), + tasks_file_link_type=tasks_file_link_type, + cluster_type=ClusterTypeInModel.ON_PREMISE, + ) + assert client + assert client.app == minimal_app + assert ( + client.settings + == minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND + ) -@pytest.fixture -async def create_dask_client_from_gateway( - minimal_dask_config: None, - local_dask_gateway_server: DaskGatewayServer, - minimal_app: FastAPI, - tasks_file_link_type: FileLinkType, -) -> AsyncIterator[Callable[[], Awaitable[DaskClient]]]: - created_clients = [] + assert client.backend.client + scheduler_infos = client.backend.client.scheduler_info() # type: ignore + ctx.logger.info( + "%s", + f"--> Connected to scheduler via client {client=} to scheduler {scheduler_infos=}", + ) - async def factory() -> DaskClient: - client = await DaskClient.create( - app=minimal_app, - settings=minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, - endpoint=parse_obj_as(AnyUrl, local_dask_gateway_server.address), - authentication=SimpleAuthentication( - username="pytest_user", - password=SecretStr(local_dask_gateway_server.password), - ), - tasks_file_link_type=tasks_file_link_type, - ) - assert client - assert client.app == minimal_app - assert ( - client.settings - == minimal_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) - assert not client._subscribed_tasks - - assert client.backend.client - assert client.backend.gateway - assert client.backend.gateway_cluster - - scheduler_infos = client.backend.client.scheduler_info() # type: ignore - print(f"--> Connected to gateway {client.backend.gateway=}") - print(f"--> Cluster {client.backend.gateway_cluster=}") - print(f"--> Client {client=}") - print( - f"--> Cluster dashboard link {client.backend.gateway_cluster.dashboard_link}" - ) created_clients.append(client) return client yield factory - await asyncio.gather(*[client.delete() for client in created_clients]) - print(f"<-- Disconnected gateway clients {created_clients=}") + with log_context(logging.INFO, "Disconnect scheduler clients"): + await asyncio.gather(*[client.delete() for client in created_clients]) -@pytest.fixture( - params=["create_dask_client_from_scheduler", "create_dask_client_from_gateway"] -) + +@pytest.fixture(params=["create_dask_client_from_scheduler"]) async def dask_client( create_dask_client_from_scheduler: Callable[[], Awaitable[DaskClient]], - create_dask_client_from_gateway: Callable[[], Awaitable[DaskClient]], - request, + request: pytest.FixtureRequest, ) -> DaskClient: client: DaskClient = await { "create_dask_client_from_scheduler": create_dask_client_from_scheduler, - "create_dask_client_from_gateway": create_dask_client_from_gateway, }[request.param]() try: assert client.app.state.engine is not None + + # check we can run some simple python script + def _square(x): + return x**2 + + def neg(x): + return -x + + a = client.backend.client.map(_square, range(10)) + b = client.backend.client.map(neg, a) + total = client.backend.client.submit(sum, b) + future = total.result() + assert future + assert isinstance(future, Coroutine) + result = await future + assert result == -285 except AttributeError: # enforces existance of 'app.state.engine' and sets to None client.app.state.engine = None @@ -242,11 +225,6 @@ def project_id() -> ProjectID: return uuid4() -@pytest.fixture -def node_id() -> NodeID: - return uuid4() - - @dataclass class ImageParams: image: Image @@ -262,7 +240,7 @@ def cpu_image(node_id: NodeID) -> ImageParams: tag="1.5.5", node_requirements=NodeRequirements( CPU=1, - RAM=parse_obj_as(ByteSize, "128 MiB"), + RAM=TypeAdapter(ByteSize).validate_python("128 MiB"), GPU=None, ), ) # type: ignore @@ -290,7 +268,7 @@ def gpu_image(node_id: NodeID) -> ImageParams: node_requirements=NodeRequirements( CPU=1, GPU=1, - RAM=parse_obj_as(ByteSize, "256 MiB"), + RAM=TypeAdapter(ByteSize).validate_python("256 MiB"), ), ) # type: ignore return ImageParams( @@ -321,24 +299,24 @@ def image_params( }[request.param] -@pytest.fixture() -def mocked_node_ports(mocker: MockerFixture): +@pytest.fixture +def _mocked_node_ports(mocker: MockerFixture) -> None: mocker.patch( - "simcore_service_director_v2.modules.dask_client.create_node_ports", + "simcore_service_director_v2.modules.dask_client.dask_utils.create_node_ports", return_value=None, ) mocker.patch( - "simcore_service_director_v2.modules.dask_client.compute_input_data", - return_value=TaskInputData.parse_obj({}), + "simcore_service_director_v2.modules.dask_client.dask_utils.compute_input_data", + return_value=TaskInputData.model_validate({}), ) mocker.patch( - "simcore_service_director_v2.modules.dask_client.compute_output_data_schema", - return_value=TaskOutputDataSchema.parse_obj({}), + "simcore_service_director_v2.modules.dask_client.dask_utils.compute_output_data_schema", + return_value=TaskOutputDataSchema.model_validate({}), ) mocker.patch( - "simcore_service_director_v2.modules.dask_client.compute_service_log_file_upload_link", - return_value=parse_obj_as(AnyUrl, "file://undefined"), + "simcore_service_director_v2.modules.dask_client.dask_utils.compute_service_log_file_upload_link", + return_value=TypeAdapter(AnyUrl).validate_python("file://undefined"), ) @@ -367,19 +345,18 @@ def test_fct_add(x: int, y: int) -> int: async def test_dask_does_not_report_asyncio_cancelled_error_in_task( dask_client: DaskClient, ): - def fct_that_raise_cancellation_error(): + def fct_that_raise_cancellation_error() -> NoReturn: import asyncio - raise asyncio.CancelledError("task was cancelled, but dask does not care...") + cancel_msg = "task was cancelled, but dask does not care..." + raise asyncio.CancelledError(cancel_msg) future = dask_client.backend.client.submit(fct_that_raise_cancellation_error) # NOTE: Since asyncio.CancelledError is derived from BaseException and the worker code checks Exception only # this goes through... # The day this is fixed, this test should detect it... SAN would be happy to know about it. - assert ( - await future.exception(timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore - and future.cancelled() == True - ) + assert await future.exception(timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore + assert future.cancelled() is True @pytest.mark.xfail( @@ -389,20 +366,18 @@ def fct_that_raise_cancellation_error(): "dask_client", ["create_dask_client_from_scheduler"], indirect=True ) async def test_dask_does_not_report_base_exception_in_task(dask_client: DaskClient): - def fct_that_raise_base_exception(): - - raise BaseException( # pylint: disable=broad-exception-raised - "task triggers a base exception, but dask does not care..." + def fct_that_raise_base_exception() -> NoReturn: + err_msg = "task triggers a base exception, but dask does not care..." + raise BaseException( # pylint: disable=broad-exception-raised # noqa: TRY002 + err_msg ) future = dask_client.backend.client.submit(fct_that_raise_base_exception) # NOTE: Since asyncio.CancelledError is derived from BaseException and the worker code checks Exception only # this goes through... # The day this is fixed, this test should detect it... SAN would be happy to know about it. - assert ( - await future.exception(timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore - and future.cancelled() == True - ) + assert await future.exception(timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore + assert future.cancelled() is True @pytest.mark.parametrize("exc", [Exception, TaskCancelledError]) @@ -422,70 +397,135 @@ def fct_that_raise_exception(): ) # type: ignore assert task_exception assert isinstance(task_exception, exc) - task_traceback = await future.traceback(timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore + task_traceback = await future.traceback( + timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS + ) # type: ignore assert task_traceback - trace = traceback.format_exception( - type(task_exception), value=task_exception, tb=task_traceback - ) + trace = traceback.format_exception(task_exception) assert trace +@pytest.fixture +def comp_run_metadata(faker: Faker) -> RunMetadataDict: + return RunMetadataDict( + product_name=faker.pystr(), + simcore_user_agent=faker.pystr(), + ) | cast(RunMetadataDict, faker.pydict(allowed_types=(str,))) + + +@pytest.fixture +def task_labels(comp_run_metadata: RunMetadataDict) -> ContainerLabelsDict: + return TypeAdapter(ContainerLabelsDict).validate_python( + { + k.replace("_", "-").lower(): v + for k, v in comp_run_metadata.items() + if k not in ["product_name", "simcore_user_agent"] + }, + ) + + +@pytest.fixture +def hardware_info() -> HardwareInfo: + assert "json_schema_extra" in HardwareInfo.model_config + assert isinstance(HardwareInfo.model_config["json_schema_extra"], dict) + assert isinstance(HardwareInfo.model_config["json_schema_extra"]["examples"], list) + return HardwareInfo.model_validate( + HardwareInfo.model_config["json_schema_extra"]["examples"][0] + ) + + +@pytest.fixture +def empty_hardware_info() -> HardwareInfo: + return HardwareInfo(aws_ec2_instances=[]) + + async def test_send_computation_task( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, + node_id: NodeID, image_params: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + task_labels: ContainerLabelsDict, + empty_hardware_info: HardwareInfo, faker: Faker, + resource_tracking_run_id: ServiceRunID, ): _DASK_EVENT_NAME = faker.pystr() + # NOTE: this must be inlined so that the test works, # the dask-worker must be able to import the function def fake_sidecar_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode, - expected_annotations, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, + expected_annotations: dict[str, Any], + expected_envs: ContainerEnvsDict, + expected_labels: ContainerLabelsDict, ) -> TaskOutputData: # get the task data worker = get_worker() task = worker.state.tasks.get(worker.get_current_task()) assert task is not None assert task.annotations == expected_annotations - assert command == ["run"] + assert task_parameters.envs == expected_envs + assert task_parameters.labels == expected_labels + assert task_parameters.command == ["run"] event = distributed.Event(_DASK_EVENT_NAME) event.wait(timeout=25) - return TaskOutputData.parse_obj({"some_output_key": 123}) + return TaskOutputData.model_validate({"some_output_key": 123}) # NOTE: We pass another fct so it can run in our localy created dask cluster + # NOTE2: since there is only 1 task here, it's ok to pass the nodeID + node_params = image_params.fake_tasks[node_id] + assert node_params.node_requirements is not None + assert node_params.node_requirements.cpu + assert node_params.node_requirements.ram + assert "product_name" in comp_run_metadata + assert "simcore_user_agent" in comp_run_metadata + node_requirements = image_params.fake_tasks[node_id].node_requirements + assert node_requirements + node_id_to_job_ids = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=functools.partial( - fake_sidecar_fct, expected_annotations=image_params.expected_annotations + fake_sidecar_fct, + expected_annotations=image_params.expected_annotations, + expected_envs={}, + expected_labels=task_labels + | { + f"{to_simcore_runtime_docker_label_key('user-id')}": f"{user_id}", + f"{to_simcore_runtime_docker_label_key('project-id')}": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('node-id')}": f"{node_id}", + f"{to_simcore_runtime_docker_label_key('cpu-limit')}": f"{node_requirements.cpu}", + f"{to_simcore_runtime_docker_label_key('memory-limit')}": f"{node_requirements.ram}", + f"{to_simcore_runtime_docker_label_key('product-name')}": f"{comp_run_metadata['product_name']}", + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}": f"{comp_run_metadata['simcore_user_agent']}", + f"{to_simcore_runtime_docker_label_key('swarm-stack-name')}": "undefined-label", + }, # type: ignore ), + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) assert node_id_to_job_ids assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in image_params.fake_tasks + published_computation_task = node_id_to_job_ids[0] + assert published_computation_task.node_id in image_params.fake_tasks # check status goes to PENDING/STARTED await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.STARTED + published_computation_task.job_id, + dask_client, + expected_status=RunningState.STARTED, ) # using the event we let the remote fct continue @@ -497,34 +537,41 @@ def fake_sidecar_fct( # check the task status await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.SUCCESS + published_computation_task.job_id, + dask_client, + expected_status=RunningState.SUCCESS, ) # check the results - task_result = await dask_client.get_task_result(job_id) + task_result = await dask_client.get_task_result(published_computation_task.job_id) assert isinstance(task_result, TaskOutputData) assert task_result.get("some_output_key") == 123 # now release the results - await dask_client.release_task_result(job_id) + await dask_client.release_task_result(published_computation_task.job_id) # check the status now await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.UNKNOWN, timeout=60 + published_computation_task.job_id, + dask_client, + expected_status=RunningState.UNKNOWN, + timeout=60, ) with pytest.raises(ComputationalBackendTaskNotFoundError): - await dask_client.get_task_result(job_id) + await dask_client.get_task_result(published_computation_task.job_id) async def test_computation_task_is_persisted_on_dask_scheduler( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): """rationale: When a task is submitted to the dask backend, a dask future is returned. @@ -535,61 +582,64 @@ async def test_computation_task_is_persisted_on_dask_scheduler( When submitting a computation task, the future corresponding to that task is "published" on the scheduler. """ + # NOTE: this must be inlined so that the test works, # the dask-worker must be able to import the function def fake_sidecar_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: # get the task data worker = get_worker() task = worker.state.tasks.get(worker.get_current_task()) assert task is not None - return TaskOutputData.parse_obj({"some_output_key": 123}) + return TaskOutputData.model_validate({"some_output_key": 123}) # NOTE: We pass another fct so it can run in our localy created dask cluster - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_sidecar_fct, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] + assert published_computation_task + assert len(published_computation_task) == 1 await _assert_wait_for_cb_call( mocked_user_completed_cb, timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS ) # check the task status await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.SUCCESS + published_computation_task[0].job_id, + dask_client, + expected_status=RunningState.SUCCESS, ) - assert node_id in image_params.fake_tasks + assert published_computation_task[0].node_id in image_params.fake_tasks # creating a new future shows that it is not done???? - assert not distributed.Future(job_id).done() + assert not distributed.Future( + published_computation_task[0].job_id, client=dask_client.backend.client + ).done() # as the task is published on the dask-scheduler when sending, it shall still be published on the dask scheduler list_of_persisted_datasets = await dask_client.backend.client.list_datasets() # type: ignore assert list_of_persisted_datasets assert isinstance(list_of_persisted_datasets, tuple) assert len(list_of_persisted_datasets) == 1 - assert job_id in list_of_persisted_datasets - assert list_of_persisted_datasets[0] == job_id + assert published_computation_task[0].job_id in list_of_persisted_datasets + assert list_of_persisted_datasets[0] == published_computation_task[0].job_id # get the persisted future from the scheduler back - task_future = await dask_client.backend.client.get_dataset(name=job_id) # type: ignore + task_future = await dask_client.backend.client.get_dataset( + name=published_computation_task[0].job_id + ) # type: ignore assert task_future assert isinstance(task_future, distributed.Future) - assert task_future.key == job_id + assert task_future.key == published_computation_task[0].job_id # NOTE: the future was persisted BEFORE the computation was completed.. therefore it is not updated # this is a bit weird, but it is so, this assertion demonstrates it. we need to await the results. assert not task_future.done() @@ -599,33 +649,33 @@ def fake_sidecar_fct( assert isinstance(task_result, TaskOutputData) assert task_result.get("some_output_key") == 123 # try to create another future and this one is already done - assert distributed.Future(job_id).done() + assert distributed.Future( + published_computation_task[0].job_id, client=dask_client.backend.client + ).done() async def test_abort_computation_tasks( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, faker: Faker, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): _DASK_EVENT_NAME = faker.pystr() + # NOTE: this must be inlined so that the test works, # the dask-worker must be able to import the function def fake_remote_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: # get the task data worker = get_worker() @@ -644,21 +694,27 @@ def fake_remote_fct( print("--> raising cancellation error now") raise TaskCancelledError - return TaskOutputData.parse_obj({"some_output_key": 123}) + return TaskOutputData.model_validate({"some_output_key": 123}) - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, + ) + assert published_computation_task + assert len(published_computation_task) == 1 + + assert published_computation_task[0].node_id in image_params.fake_tasks + await _assert_wait_for_task_status( + published_computation_task[0].job_id, + dask_client, + RunningState.STARTED, ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in image_params.fake_tasks - await _assert_wait_for_task_status(job_id, dask_client, RunningState.STARTED) # we wait to be sure the remote fct is started start_event = Event(_DASK_EVENT_NAME, client=dask_client.backend.client) @@ -666,67 +722,72 @@ def fake_remote_fct( # now let's abort the computation cancel_event = await distributed.Event( - name=TaskCancelEventName.format(job_id), client=dask_client.backend.client + name=TaskCancelEventName.format(published_computation_task[0].job_id), + client=dask_client.backend.client, ) - await dask_client.abort_computation_task(job_id) + await dask_client.abort_computation_task(published_computation_task[0].job_id) assert await cancel_event.is_set() # type: ignore await _assert_wait_for_cb_call(mocked_user_completed_cb) - await _assert_wait_for_task_status(job_id, dask_client, RunningState.ABORTED) + await _assert_wait_for_task_status( + published_computation_task[0].job_id, dask_client, RunningState.ABORTED + ) # getting the results should throw the cancellation error with pytest.raises(TaskCancelledError): - await dask_client.get_task_result(job_id) + await dask_client.get_task_result(published_computation_task[0].job_id) + await dask_client.release_task_result(published_computation_task[0].job_id) # after releasing the results, the task shall be UNKNOWN - await dask_client.release_task_result(job_id) + _ALLOW_TIME_FOR_LOCAL_DASK_SCHEDULER_TO_UPDATE_TIMEOUT_S = 5 + await asyncio.sleep(_ALLOW_TIME_FOR_LOCAL_DASK_SCHEDULER_TO_UPDATE_TIMEOUT_S) # NOTE: this change of status takes a very long time to happen and is not relied upon so we skip it since it # makes the test fail a lot for no gain (it's kept here in case it ever becomes an issue) - # await _assert_wait_for_task_status( - # job_id, dask_client, RunningState.UNKNOWN, timeout=120 - # ) + await _assert_wait_for_task_status( + published_computation_task[0].job_id, + dask_client, + RunningState.UNKNOWN, + timeout=10, + ) async def test_failed_task_returns_exceptions( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, gpu_image: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): # NOTE: this must be inlined so that the test works, # the dask-worker must be able to import the function def fake_failing_sidecar_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: + err_msg = "sadly we are failing to execute anything cause we are dumb..." + raise ValueError(err_msg) - raise ValueError( - "sadly we are failing to execute anything cause we are dumb..." - ) - - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=gpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_failing_sidecar_fct, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in gpu_image.fake_tasks + assert published_computation_task + assert len(published_computation_task) == 1 + + assert published_computation_task[0].node_id in gpu_image.fake_tasks # this waits for the computation to run await _assert_wait_for_cb_call( @@ -735,15 +796,17 @@ def fake_failing_sidecar_fct( # the computation status is FAILED await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.FAILED + published_computation_task[0].job_id, + dask_client, + expected_status=RunningState.FAILED, ) with pytest.raises( ValueError, match="sadly we are failing to execute anything cause we are dumb...", ): - await dask_client.get_task_result(job_id) + await dask_client.get_task_result(published_computation_task[0].job_id) assert len(await dask_client.backend.client.list_datasets()) > 0 # type: ignore - await dask_client.release_task_result(job_id) + await dask_client.release_task_result(published_computation_task[0].job_id) assert len(await dask_client.backend.client.list_datasets()) == 0 # type: ignore @@ -751,18 +814,19 @@ def fake_failing_sidecar_fct( @pytest.mark.parametrize( "dask_client", ["create_dask_client_from_scheduler"], indirect=True ) -async def test_missing_resource_send_computation_task( +async def test_send_computation_task_with_missing_resources_raises( dask_spec_local_cluster: SpecCluster, dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): - # remove the workers that can handle gpu scheduler_info = dask_client.backend.client.scheduler_info() assert scheduler_info @@ -783,10 +847,43 @@ async def test_missing_resource_send_computation_task( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, + ) + mocked_user_completed_cb.assert_not_called() + + +@pytest.mark.parametrize( + "dask_client", ["create_dask_client_from_scheduler"], indirect=True +) +async def test_send_computation_task_with_hardware_info_raises( + dask_spec_local_cluster: SpecCluster, + dask_client: DaskClient, + user_id: UserID, + project_id: ProjectID, + image_params: ImageParams, + _mocked_node_ports: None, + mocked_user_completed_cb: mock.AsyncMock, + mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, +): + # NOTE: running on the default cluster will raise missing resources + with pytest.raises(MissingComputationalResourcesError): + await dask_client.send_computation_tasks( + user_id=user_id, + project_id=project_id, + tasks=image_params.fake_tasks, + callback=mocked_user_completed_cb, + remote_fct=None, + metadata=comp_run_metadata, + hardware_info=hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) mocked_user_completed_cb.assert_not_called() @@ -799,10 +896,12 @@ async def test_too_many_resources_send_computation_task( user_id: UserID, project_id: ProjectID, node_id: NodeID, - cluster_id: ClusterID, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): # create an image that needs a huge amount of CPU image = Image( @@ -810,7 +909,7 @@ async def test_too_many_resources_send_computation_task( tag="1.4.5", node_requirements=NodeRequirements( CPU=10000000000000000, - RAM=parse_obj_as(ByteSize, "128 MiB"), + RAM=TypeAdapter(ByteSize).validate_python("128 MiB"), GPU=None, ), ) # type: ignore @@ -821,10 +920,12 @@ async def test_too_many_resources_send_computation_task( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=fake_task, callback=mocked_user_completed_cb, remote_fct=None, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) mocked_user_completed_cb.assert_not_called() @@ -832,28 +933,29 @@ async def test_too_many_resources_send_computation_task( async def test_disconnected_backend_raises_exception( dask_spec_local_cluster: SpecCluster, - local_dask_gateway_server: DaskGatewayServer, dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): # DISCONNECT THE CLUSTER await dask_spec_local_cluster.close() # type: ignore - await local_dask_gateway_server.server.cleanup() - # with pytest.raises(ComputationalBackendNotConnectedError): await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) mocked_user_completed_cb.assert_not_called() @@ -866,12 +968,14 @@ async def test_changed_scheduler_raises_exception( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, unused_tcp_port_factory: Callable, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): # change the scheduler (stop the current one and start another at the same address) scheduler_address = URL(dask_spec_local_cluster.scheduler_address) @@ -896,152 +1000,150 @@ async def test_changed_scheduler_raises_exception( await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=None, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) mocked_user_completed_cb.assert_not_called() -@pytest.mark.flaky(max_runs=3) @pytest.mark.parametrize("fail_remote_fct", [False, True]) async def test_get_tasks_status( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, faker: Faker, fail_remote_fct: bool, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): # NOTE: this must be inlined so that the test works, # the dask-worker must be able to import the function _DASK_EVENT_NAME = faker.pystr() def fake_remote_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: # wait here until the client allows us to continue start_event = Event(_DASK_EVENT_NAME) start_event.wait(timeout=5) if fail_remote_fct: - raise ValueError("We fail because we're told to!") - return TaskOutputData.parse_obj({"some_output_key": 123}) + err_msg = "We fail because we're told to!" + raise ValueError(err_msg) + return TaskOutputData.model_validate({"some_output_key": 123}) - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in cpu_image.fake_tasks - # let's get a dask future for the task here so dask will not remove the task from the scheduler at the end - computation_future = distributed.Future(key=job_id) - assert computation_future + assert published_computation_task + assert len(published_computation_task) == 1 + + assert published_computation_task[0].node_id in cpu_image.fake_tasks - await _assert_wait_for_task_status(job_id, dask_client, RunningState.STARTED) + await _assert_wait_for_task_status( + published_computation_task[0].job_id, + dask_client, + RunningState.STARTED, + ) # let the remote fct run through now start_event = Event(_DASK_EVENT_NAME, dask_client.backend.client) await start_event.set() # type: ignore # it will become successful hopefuly await _assert_wait_for_task_status( - job_id, + published_computation_task[0].job_id, dask_client, RunningState.FAILED if fail_remote_fct else RunningState.SUCCESS, ) # release the task results - await dask_client.release_task_result(job_id) - # the task is still present since we hold a future here - await _assert_wait_for_task_status( - job_id, - dask_client, - RunningState.FAILED if fail_remote_fct else RunningState.SUCCESS, - ) + await dask_client.release_task_result(published_computation_task[0].job_id) - # removing the future will let dask eventually delete the task from its memory, so its status becomes undefined - del computation_future + await asyncio.sleep( + 5 # NOTE: here we wait to be sure that the dask-scheduler properly updates its state + ) + # the task is gone, since the distributed Variable was removed above await _assert_wait_for_task_status( - job_id, dask_client, RunningState.UNKNOWN, timeout=60 + published_computation_task[0].job_id, + dask_client, + RunningState.UNKNOWN, + timeout=60, ) @pytest.fixture async def fake_task_handlers(mocker: MockerFixture) -> TaskHandlers: - return TaskHandlers(mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) + return TaskHandlers(task_progress_handler=mocker.MagicMock()) async def test_dask_sub_handlers( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, cpu_image: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, fake_task_handlers: TaskHandlers, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, + resource_tracking_run_id: ServiceRunID, ): dask_client.register_handlers(fake_task_handlers) _DASK_START_EVENT = "start" def fake_remote_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode = BootMode.CPU, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, ) -> TaskOutputData: - - state_pub = distributed.Pub(TaskStateEvent.topic_name()) - progress_pub = distributed.Pub(TaskProgressEvent.topic_name()) - logs_pub = distributed.Pub(TaskLogEvent.topic_name()) - state_pub.put("my name is state") - progress_pub.put("my name is progress") - logs_pub.put("my name is logs") + get_worker().log_event(TaskProgressEvent.topic_name(), "my name is progress") # tell the client we are done published_event = Event(name=_DASK_START_EVENT) published_event.set() - return TaskOutputData.parse_obj({"some_output_key": 123}) + return TaskOutputData.model_validate({"some_output_key": 123}) # run the computation - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=cpu_image.fake_tasks, callback=mocked_user_completed_cb, remote_fct=fake_remote_fct, + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, + ) + assert published_computation_task + assert len(published_computation_task) == 1 + + assert published_computation_task[0].node_id in cpu_image.fake_tasks + computation_future = distributed.Future( + published_computation_task[0].job_id, client=dask_client.backend.client ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in cpu_image.fake_tasks - computation_future = distributed.Future(job_id) print("--> waiting for job to finish...") - await distributed.wait(computation_future, timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS) # type: ignore + await distributed.wait( + computation_future, timeout=_ALLOW_TIME_FOR_GATEWAY_TO_CREATE_WORKERS + ) assert computation_future.done() print("job finished, now checking that we received the publications...") @@ -1056,13 +1158,9 @@ def fake_remote_fct( f"Attempt={attempt.retry_state.attempt_number}" ) # we should have received data in our TaskHandlers - fake_task_handlers.task_change_handler.assert_called_with( - "my name is state" - ) fake_task_handlers.task_progress_handler.assert_called_with( - "my name is progress" + (mock.ANY, "my name is progress") ) - fake_task_handlers.task_log_handler.assert_called_with("my name is logs") await _assert_wait_for_cb_call(mocked_user_completed_cb) @@ -1070,28 +1168,26 @@ async def test_get_cluster_details( dask_client: DaskClient, user_id: UserID, project_id: ProjectID, - cluster_id: ClusterID, image_params: ImageParams, - mocked_node_ports: None, + _mocked_node_ports: None, mocked_user_completed_cb: mock.AsyncMock, mocked_storage_service_api: respx.MockRouter, + comp_run_metadata: RunMetadataDict, + empty_hardware_info: HardwareInfo, faker: Faker, + resource_tracking_run_id: ServiceRunID, ): cluster_details = await dask_client.get_cluster_details() assert cluster_details _DASK_EVENT_NAME = faker.pystr() + # send a fct that uses resources def fake_sidecar_fct( + task_parameters: ContainerTaskParameters, docker_auth: DockerBasicAuth, - service_key: str, - service_version: str, - input_data: TaskInputData, - output_data_keys: TaskOutputDataSchema, - log_file_url: AnyUrl, - command: list[str], - s3_settings: Optional[S3Settings], - boot_mode: BootMode, + log_file_url: LogFileUploadURL, + s3_settings: S3Settings | None, expected_annotations, ) -> TaskOutputData: # get the task data @@ -1099,36 +1195,40 @@ def fake_sidecar_fct( task = worker.state.tasks.get(worker.get_current_task()) assert task is not None assert task.annotations == expected_annotations - assert command == ["run"] + assert task_parameters.command == ["run"] event = distributed.Event(_DASK_EVENT_NAME) event.wait(timeout=25) - return TaskOutputData.parse_obj({"some_output_key": 123}) + return TaskOutputData.model_validate({"some_output_key": 123}) # NOTE: We pass another fct so it can run in our localy created dask cluster - node_id_to_job_ids = await dask_client.send_computation_tasks( + published_computation_task = await dask_client.send_computation_tasks( user_id=user_id, project_id=project_id, - cluster_id=cluster_id, tasks=image_params.fake_tasks, callback=mocked_user_completed_cb, remote_fct=functools.partial( fake_sidecar_fct, expected_annotations=image_params.expected_annotations ), + metadata=comp_run_metadata, + hardware_info=empty_hardware_info, + resource_tracking_run_id=resource_tracking_run_id, ) - assert node_id_to_job_ids - assert len(node_id_to_job_ids) == 1 - node_id, job_id = node_id_to_job_ids[0] - assert node_id in image_params.fake_tasks + assert published_computation_task + assert len(published_computation_task) == 1 + + assert published_computation_task[0].node_id in image_params.fake_tasks # check status goes to PENDING/STARTED await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.STARTED + published_computation_task[0].job_id, + dask_client, + expected_status=RunningState.STARTED, ) # check we have one worker using the resources # one of the workers should now get the job and use the resources - worker_with_the_task: Optional[AnyUrl] = None + worker_with_the_task: AnyUrl | None = None async for attempt in AsyncRetrying(reraise=True, stop=stop_after_delay(10)): with attempt: cluster_details = await dask_client.get_cluster_details() @@ -1152,7 +1252,9 @@ def fake_sidecar_fct( # wait for the task to complete await _assert_wait_for_task_status( - job_id, dask_client, expected_status=RunningState.SUCCESS + published_computation_task[0].job_id, + dask_client, + expected_status=RunningState.SUCCESS, ) # check the resources are released diff --git a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py index a548ee5260e..d3c6274fa7c 100644 --- a/services/director-v2/tests/unit/test_modules_dask_clients_pool.py +++ b/services/director-v2/tests/unit/test_modules_dask_clients_pool.py @@ -3,29 +3,25 @@ # pylint:disable=redefined-outer-name +from collections.abc import AsyncIterator, Callable +from pathlib import Path from random import choice -from typing import Any, AsyncIterator, Callable, get_args +from typing import Any, cast, get_args from unittest import mock import pytest -from _dask_helpers import DaskGatewayServer from distributed.deploy.spec import SpecCluster from faker import Faker +from fastapi import FastAPI from models_library.clusters import ( - DEFAULT_CLUSTER_ID, - Cluster, + BaseCluster, ClusterAuthentication, - JupyterHubTokenAuthentication, - KerberosAuthentication, + ClusterTypeInModel, NoAuthentication, - SimpleAuthentication, + TLSAuthentication, ) -from pydantic import SecretStr -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict -from settings_library.utils_cli import create_json_encoder_wo_secrets -from simcore_postgres_database.models.clusters import ClusterType from simcore_service_director_v2.core.application import init_app from simcore_service_director_v2.core.errors import ( ConfigurationError, @@ -38,16 +34,15 @@ @pytest.fixture def minimal_dask_config( + disable_postgres: None, mock_env: EnvVarsDict, project_env_devel_environment: dict[str, Any], - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """set a minimal configuration for testing the dask connection only""" monkeypatch.setenv("DIRECTOR_ENABLED", "0") - monkeypatch.setenv("POSTGRES_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "0") + monkeypatch.setenv("DIRECTOR_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_CATALOG", "null") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") @@ -55,15 +50,15 @@ def minimal_dask_config( def test_dask_clients_pool_missing_raises_configuration_error( - minimal_dask_config: None, monkeypatch: MonkeyPatch + minimal_dask_config: None, monkeypatch: pytest.MonkeyPatch ): monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "0") settings = AppSettings.create_from_envs() app = init_app(settings) - with TestClient(app, raise_server_exceptions=True) as client: + with TestClient(app, raise_server_exceptions=True): # noqa: SIM117 with pytest.raises(ConfigurationError): - DaskClientsPool.instance(client.app) + DaskClientsPool.instance(app) def test_dask_clients_pool_properly_setup_and_deleted( @@ -77,66 +72,43 @@ def test_dask_clients_pool_properly_setup_and_deleted( settings = AppSettings.create_from_envs() app = init_app(settings) - with TestClient(app, raise_server_exceptions=True) as client: + with TestClient(app, raise_server_exceptions=True): mocked_dask_clients_pool.create.assert_called_once() mocked_dask_clients_pool.delete.assert_called_once() @pytest.fixture -def fake_clusters(faker: Faker) -> Callable[[int], list[Cluster]]: - def creator(num_clusters: int) -> list[Cluster]: - fake_clusters = [] - for n in range(num_clusters): - fake_clusters.append( - Cluster.parse_obj( - { - "id": faker.pyint(), - "name": faker.name(), - "type": ClusterType.ON_PREMISE, - "owner": faker.pyint(), - "endpoint": faker.uri(), - "authentication": choice( - [ - NoAuthentication(), - SimpleAuthentication( - username=faker.user_name(), - password=faker.password(), - ), - KerberosAuthentication(), - JupyterHubTokenAuthentication(api_token=faker.uuid4()), - ] - ), - } - ) +def fake_clusters(faker: Faker) -> Callable[[int], list[BaseCluster]]: + def creator(num_clusters: int) -> list[BaseCluster]: + return [ + BaseCluster.model_validate( + { + "id": faker.pyint(), + "name": faker.name(), + "type": ClusterTypeInModel.ON_PREMISE, + "owner": faker.pyint(), + "endpoint": faker.uri(), + "authentication": choice( # noqa: S311 + [ + NoAuthentication(), + TLSAuthentication( + tls_client_cert=Path(faker.file_path()), + tls_client_key=Path(faker.file_path()), + tls_ca_file=Path(faker.file_path()), + ), + ] + ), + } ) - return fake_clusters - - return creator - - -@pytest.fixture() -def default_scheduler_set_as_osparc_gateway( - local_dask_gateway_server: DaskGatewayServer, monkeypatch: MonkeyPatch, faker: Faker -) -> Callable: - def creator(): - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", - local_dask_gateway_server.proxy_address, - ) - monkeypatch.setenv( - "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", - SimpleAuthentication( - username=faker.user_name(), - password=SecretStr(local_dask_gateway_server.password), - ).json(encoder=create_json_encoder_wo_secrets(SimpleAuthentication)), - ) + for _n in range(num_clusters) + ] return creator @pytest.fixture() def default_scheduler_set_as_dask_scheduler( - dask_spec_local_cluster: SpecCluster, monkeypatch: MonkeyPatch + dask_spec_local_cluster: SpecCluster, monkeypatch: pytest.MonkeyPatch ) -> Callable: def creator(): monkeypatch.setenv( @@ -150,17 +122,14 @@ def creator(): @pytest.fixture( params=[ "default_scheduler_set_as_dask_scheduler", - "default_scheduler_set_as_osparc_gateway", ] ) def default_scheduler( default_scheduler_set_as_dask_scheduler, - default_scheduler_set_as_osparc_gateway, request, ): { "default_scheduler_set_as_dask_scheduler": default_scheduler_set_as_dask_scheduler, - "default_scheduler_set_as_osparc_gateway": default_scheduler_set_as_osparc_gateway, }[request.param]() @@ -168,34 +137,38 @@ async def test_dask_clients_pool_acquisition_creates_client_on_demand( minimal_dask_config: None, mocker: MockerFixture, client: TestClient, - fake_clusters: Callable[[int], list[Cluster]], + fake_clusters: Callable[[int], list[BaseCluster]], ): + assert client.app + the_app = cast(FastAPI, client.app) mocked_dask_client = mocker.patch( "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", autospec=True, ) mocked_dask_client.create.return_value = mocked_dask_client - clients_pool = DaskClientsPool.instance(client.app) + clients_pool = DaskClientsPool.instance(the_app) mocked_dask_client.create.assert_not_called() mocked_dask_client.register_handlers.assert_not_called() clusters = fake_clusters(30) mocked_creation_calls = [] + assert isinstance(the_app.state.settings, AppSettings) for cluster in clusters: mocked_creation_calls.append( mock.call( app=client.app, - settings=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, + settings=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND, authentication=cluster.authentication, endpoint=cluster.endpoint, - tasks_file_link_type=client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE, + tasks_file_link_type=the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE, + cluster_type=ClusterTypeInModel.ON_PREMISE, ) ) - async with clients_pool.acquire(cluster) as dask_client: + async with clients_pool.acquire(cluster): # on start it is created mocked_dask_client.create.assert_has_calls(mocked_creation_calls) - async with clients_pool.acquire(cluster) as dask_client: + async with clients_pool.acquire(cluster): # the connection already exists, so there is no new call to create mocked_dask_client.create.assert_has_calls(mocked_creation_calls) @@ -209,14 +182,16 @@ async def test_acquiring_wrong_cluster_raises_exception( minimal_dask_config: None, mocker: MockerFixture, client: TestClient, - fake_clusters: Callable[[int], list[Cluster]], + fake_clusters: Callable[[int], list[BaseCluster]], ): + assert client.app + the_app = cast(FastAPI, client.app) mocked_dask_client = mocker.patch( "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", autospec=True, ) mocked_dask_client.create.side_effect = Exception - clients_pool = DaskClientsPool.instance(client.app) + clients_pool = DaskClientsPool.instance(the_app) mocked_dask_client.assert_not_called() non_existing_cluster = fake_clusters(1)[0] @@ -228,9 +203,9 @@ async def test_acquiring_wrong_cluster_raises_exception( def test_default_cluster_correctly_initialized( minimal_dask_config: None, default_scheduler: None, client: TestClient ): - dask_scheduler_settings = ( - client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) + assert client.app + the_app = cast(FastAPI, client.app) + dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND default_cluster = dask_scheduler_settings.default_cluster assert default_cluster assert ( @@ -238,7 +213,6 @@ def test_default_cluster_correctly_initialized( == dask_scheduler_settings.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL ) - assert default_cluster.id == DEFAULT_CLUSTER_ID assert isinstance(default_cluster.authentication, get_args(ClusterAuthentication)) @@ -248,8 +222,9 @@ async def dask_clients_pool( default_scheduler, client: TestClient, ) -> AsyncIterator[DaskClientsPool]: - - clients_pool = DaskClientsPool.instance(client.app) + assert client.app + the_app = cast(FastAPI, client.app) + clients_pool = DaskClientsPool.instance(the_app) assert clients_pool yield clients_pool await clients_pool.delete() @@ -260,9 +235,8 @@ async def test_acquire_default_cluster( client: TestClient, ): assert client.app - dask_scheduler_settings = ( - client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND - ) + the_app = cast(FastAPI, client.app) + dask_scheduler_settings = the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND default_cluster = dask_scheduler_settings.default_cluster assert default_cluster async with dask_clients_pool.acquire(default_cluster) as dask_client: @@ -272,9 +246,9 @@ def just_a_quick_fct(x, y): assert ( dask_client.tasks_file_link_type - == client.app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE + == the_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND.COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE ) future = dask_client.backend.client.submit(just_a_quick_fct, 12, 23) assert future - result = await future.result(timeout=10) # type: ignore + result = await future.result(timeout=10) assert result == 35 diff --git a/services/director-v2/tests/unit/test_modules_db_repositories_services_environments.py b/services/director-v2/tests/unit/test_modules_db_repositories_services_environments.py new file mode 100644 index 00000000000..32c39f416ee --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_db_repositories_services_environments.py @@ -0,0 +1,11 @@ +from models_library.osparc_variable_identifier import OsparcVariableIdentifier +from pydantic import TypeAdapter +from simcore_postgres_database.models.services_environments import VENDOR_SECRET_PREFIX + + +def test_vendor_secret_names_are_osparc_environments(): + # NOTE that this is tested here because the constants are defined in + # packages simcore_postgres_database and models_library which are indenpendent + assert VENDOR_SECRET_PREFIX.endswith("_") + + TypeAdapter(OsparcVariableIdentifier).validate_python(f"${VENDOR_SECRET_PREFIX}FAKE_SECRET") diff --git a/services/director-v2/tests/unit/test_modules_director_v0.py b/services/director-v2/tests/unit/test_modules_director_v0.py deleted file mode 100644 index 00b624ed943..00000000000 --- a/services/director-v2/tests/unit/test_modules_director_v0.py +++ /dev/null @@ -1,271 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import json -import re -import urllib.parse -from pathlib import Path -from random import choice -from typing import Any, NamedTuple -from uuid import uuid4 - -import pytest -import respx -from fastapi import FastAPI, status -from models_library.services import ServiceDockerData, ServiceKeyVersion -from simcore_service_director_v2.models.schemas.dynamic_services import ( - RunningDynamicServiceDetails, - SimcoreServiceLabels, -) -from simcore_service_director_v2.models.schemas.services import ServiceExtras -from simcore_service_director_v2.modules.director_v0 import DirectorV0Client - -MOCK_SERVICE_KEY = "simcore/services/dynamic/myservice" -MOCK_SERVICE_VERSION = "1.3.4" - - -@pytest.fixture -def minimal_director_config(project_env_devel_environment, monkeypatch): - """set a minimal configuration for testing the director connection only""" - monkeypatch.setenv("DIRECTOR_ENABLED", "1") - monkeypatch.setenv("POSTGRES_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "1") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_CATALOG", "null") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "0") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") - - -@pytest.fixture -def mocked_director_v0_service_api( - minimal_app, entrypoint, exp_data: dict, resp_alias: str -): - # pylint: disable=not-context-manager - with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - # lists services - respx_mock.get( - urllib.parse.unquote(entrypoint), - name=resp_alias, - ).respond(json=exp_data) - - yield respx_mock - - -@pytest.fixture -def mock_service_key_version() -> ServiceKeyVersion: - return ServiceKeyVersion(key=MOCK_SERVICE_KEY, version=MOCK_SERVICE_VERSION) - - -class ForwardToDirectorParams(NamedTuple): - entrypoint: str - exp_status: int - exp_data: dict[str, Any] - resp_alias: str - - -def _get_list_services_calls() -> list[ForwardToDirectorParams]: - return [ - ForwardToDirectorParams( - entrypoint="services", - exp_status=status.HTTP_200_OK, - exp_data={"data": ["service1", "service2"]}, - resp_alias="list_all_services", - ), - ForwardToDirectorParams( - entrypoint="services?service_type=computational", - exp_status=status.HTTP_200_OK, - exp_data={"data": ["service1", "service2"]}, - resp_alias="list_computational_services", - ), - ForwardToDirectorParams( - entrypoint="services?service_type=dynamic", - exp_status=status.HTTP_200_OK, - exp_data={"data": ["service1", "service2"]}, - resp_alias="list_dynamic_services", - ), - ] - - -def _get_service_version_calls() -> list[ForwardToDirectorParams]: - # TODO: here we see the return value is currently not validated - quoted_key = urllib.parse.quote_plus(MOCK_SERVICE_KEY) - return [ - ForwardToDirectorParams( - entrypoint=f"/services/{quoted_key}/{MOCK_SERVICE_VERSION}", - exp_status=status.HTTP_200_OK, - exp_data={"data": ["stuff about my service"]}, - resp_alias="get_service_version", - ), - ] - - -def _get_service_version_extras_calls() -> list[ForwardToDirectorParams]: - # TODO: here we see the return value is currently not validated - quoted_key = urllib.parse.quote_plus(MOCK_SERVICE_KEY) - return [ - ForwardToDirectorParams( - entrypoint=f"/services/{quoted_key}/{MOCK_SERVICE_VERSION}/extras", - exp_status=status.HTTP_200_OK, - exp_data={"data": "extra stuff about my service"}, - resp_alias="get_service_extras", - ), - ] - - -@pytest.mark.parametrize( - "entrypoint,exp_status,exp_data,resp_alias", - _get_list_services_calls() - + _get_service_version_calls() - + _get_service_version_extras_calls(), -) -def test_forward_to_director( - minimal_director_config: None, - client, - mocked_director_v0_service_api, - entrypoint, - exp_status, - exp_data: dict, - resp_alias, -): - response = client.get(f"v0/{entrypoint}") - - assert response.status_code == exp_status - assert response.json() == exp_data - assert mocked_director_v0_service_api[resp_alias].called - - -@pytest.fixture(scope="session") -def fake_service_details(mocks_dir: Path) -> ServiceDockerData: - fake_service_path = mocks_dir / "fake_service.json" - assert fake_service_path.exists() - fake_service_data = json.loads(fake_service_path.read_text()) - return ServiceDockerData(**fake_service_data) - - -@pytest.fixture(params=range(len(ServiceExtras.Config.schema_extra["examples"]))) -def fake_service_extras(request) -> ServiceExtras: - extra_example = ServiceExtras.Config.schema_extra["examples"][request.param] - random_extras = ServiceExtras(**extra_example) - assert random_extras is not None - return random_extras - - -@pytest.fixture -def fake_running_service_details() -> RunningDynamicServiceDetails: - sample_data = choice(RunningDynamicServiceDetails.Config.schema_extra["examples"]) - return RunningDynamicServiceDetails(**sample_data) - - -@pytest.fixture -def fake_service_labels() -> dict[str, Any]: - return choice(SimcoreServiceLabels.Config.schema_extra["examples"]) - - -@pytest.fixture -def mocked_director_service_fcts( - minimal_app: FastAPI, - mock_service_key_version: ServiceKeyVersion, - fake_service_details: ServiceDockerData, - fake_service_extras: ServiceExtras, - fake_service_labels: dict[str, Any], - fake_running_service_details: RunningDynamicServiceDetails, -): - # pylint: disable=not-context-manager - with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - quoted_key = urllib.parse.quote_plus(mock_service_key_version.key) - version = mock_service_key_version.version - - respx_mock.get( - f"/services/{quoted_key}/{version}", name="get_service_version" - ).respond(json={"data": [fake_service_details.dict(by_alias=True)]}) - - respx_mock.get( - f"/service_extras/{quoted_key}/{version}", name="get_service_extras" - ).respond(json={"data": fake_service_extras.dict(by_alias=True)}) - - respx_mock.get( - f"/services/{quoted_key}/{version}/labels", name="get_service_labels" - ).respond(json={"data": fake_service_labels}) - - respx_mock.get( - re.compile( - r"running_interactive_services/[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$" - ), - name="get_running_service_details", - ).respond( - json={"data": json.loads(fake_running_service_details.json(by_alias=True))} - ) - - yield respx_mock - - -async def test_get_service_details( - minimal_director_config: None, - minimal_app: FastAPI, - mocked_director_service_fcts, - mock_service_key_version: ServiceKeyVersion, - fake_service_details: ServiceDockerData, -): - director_client: DirectorV0Client = minimal_app.state.director_v0_client - service_details: ServiceDockerData = await director_client.get_service_details( - mock_service_key_version - ) - - assert mocked_director_service_fcts["get_service_version"].called - assert fake_service_details == service_details - - -async def test_get_service_extras( - minimal_director_config: None, - minimal_app: FastAPI, - mocked_director_service_fcts, - mock_service_key_version: ServiceKeyVersion, - fake_service_extras: ServiceExtras, -): - director_client: DirectorV0Client = minimal_app.state.director_v0_client - service_extras: ServiceExtras = await director_client.get_service_extras( - mock_service_key_version.key, mock_service_key_version.version - ) - assert mocked_director_service_fcts["get_service_extras"].called - assert fake_service_extras == service_extras - - -async def test_get_service_labels( - minimal_director_config: None, - minimal_app: FastAPI, - mocked_director_service_fcts, - fake_service_labels: dict[str, Any], - mock_service_key_version: ServiceKeyVersion, -): - director_client: DirectorV0Client = minimal_app.state.director_v0_client - - service_labels: SimcoreServiceLabels = await director_client.get_service_labels( - mock_service_key_version - ) - assert mocked_director_service_fcts["get_service_labels"].called - assert SimcoreServiceLabels(**fake_service_labels) == service_labels - - -async def test_get_running_service_details( - minimal_director_config: None, - minimal_app: FastAPI, - mocked_director_service_fcts, - fake_running_service_details: RunningDynamicServiceDetails, -): - director_client: DirectorV0Client = minimal_app.state.director_v0_client - - service_details: RunningDynamicServiceDetails = ( - await director_client.get_running_service_details(uuid4()) - ) - assert mocked_director_service_fcts["get_running_service_details"].called - assert fake_running_service_details == service_details diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_base.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_base.py deleted file mode 100644 index a6e61a9b5f5..00000000000 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_base.py +++ /dev/null @@ -1,206 +0,0 @@ -# pylint:disable=redefined-outer-name - -import pytest -from httpx import ( - ConnectError, - HTTPError, - PoolTimeout, - Request, - RequestError, - Response, - codes, -) -from pydantic import AnyHttpUrl, parse_obj_as -from pytest import LogCaptureFixture -from respx import MockRouter -from simcore_service_director_v2.modules.dynamic_sidecar.api_client._base import ( - BaseThinClient, - expect_status, - retry_on_errors, -) -from simcore_service_director_v2.modules.dynamic_sidecar.api_client._errors import ( - ClientHttpError, - UnexpectedStatusError, - _WrongReturnType, -) - -# UTILS - - -class FakeThickClient(BaseThinClient): - @retry_on_errors - async def get_provided_url(self, provided_url: str) -> Response: - return await self.client.get(provided_url) - - @retry_on_errors - async def get_retry_for_status(self) -> Response: - return await self.client.get("http://missing-host:1111") - - -def _assert_messages(messages: list[str]) -> None: - # check if the right amount of messages was captured by the logs - unexpected_counter = 1 - for log_message in messages: - if log_message.startswith("Retrying"): - assert "as it raised" in log_message - continue - assert log_message.startswith(f"Request timed-out after {unexpected_counter}") - unexpected_counter += 1 - - -@pytest.fixture -def request_timeout() -> int: - # below refer to exponential wait step duration - return 1 + 2 - - -@pytest.fixture -def thick_client(request_timeout: int) -> FakeThickClient: - return FakeThickClient(request_timeout=request_timeout) - - -@pytest.fixture -def test_url() -> AnyHttpUrl: - return parse_obj_as(AnyHttpUrl, "http://missing-host:1111") - - -async def test_base_with_async_context_manager( - test_url: AnyHttpUrl, request_timeout: int -) -> None: - async with FakeThickClient(request_timeout=request_timeout) as client: - with pytest.raises(ClientHttpError): - await client.get_provided_url(test_url) - - -async def test_connection_error( - thick_client: FakeThickClient, test_url: AnyHttpUrl -) -> None: - with pytest.raises(ClientHttpError) as exe_info: - await thick_client.get_provided_url(test_url) - - assert isinstance(exe_info.value, ClientHttpError) - assert isinstance(exe_info.value.error, ConnectError) - - -async def test_retry_on_errors( - request_timeout: int, - test_url: AnyHttpUrl, - caplog_info_level: LogCaptureFixture, -) -> None: - client = FakeThickClient(request_timeout=request_timeout) - - with pytest.raises(ClientHttpError): - await client.get_provided_url(test_url) - - _assert_messages(caplog_info_level.messages) - - -@pytest.mark.parametrize("error_class", [ConnectError, PoolTimeout]) -async def test_retry_on_errors_by_error_type( - error_class: type[RequestError], - caplog_info_level: LogCaptureFixture, - request_timeout: int, - test_url: AnyHttpUrl, -) -> None: - class ATestClient(BaseThinClient): - # pylint: disable=no-self-use - @retry_on_errors - async def raises_request_error(self) -> Response: - raise error_class( - "mock_connect_error", - request=Request(method="GET", url=test_url), - ) - - client = ATestClient(request_timeout=request_timeout) - - with pytest.raises(ClientHttpError): - await client.raises_request_error() - - if error_class == PoolTimeout: - _assert_messages(caplog_info_level.messages[:-1]) - connections_message = caplog_info_level.messages[-1] - assert ( - connections_message - == "Pool status @ 'POOL TIMEOUT': requests(0)=[], connections(0)=[]" - ) - else: - _assert_messages(caplog_info_level.messages) - - -async def test_retry_on_errors_raises_client_http_error( - request_timeout: int, -) -> None: - class ATestClient(BaseThinClient): - # pylint: disable=no-self-use - @retry_on_errors - async def raises_http_error(self) -> Response: - raise HTTPError("mock_http_error") - - client = ATestClient(request_timeout=request_timeout) - - with pytest.raises(ClientHttpError): - await client.raises_http_error() - - -async def test_methods_do_not_return_response( - request_timeout: int, -) -> None: - class OKTestClient(BaseThinClient): - async def public_method_ok(self) -> Response: # type: ignore - """this method will be ok even if no code is used""" - - # OK - OKTestClient(request_timeout=request_timeout) - - class FailWrongAnnotationTestClient(BaseThinClient): - async def public_method_wrong_annotation(self) -> None: - """this method will raise an error""" - - with pytest.raises(_WrongReturnType): - FailWrongAnnotationTestClient(request_timeout=request_timeout) - - class FailNoAnnotationTestClient(BaseThinClient): - async def public_method_no_annotation(self): - """this method will raise an error""" - - with pytest.raises(_WrongReturnType): - FailNoAnnotationTestClient(request_timeout=request_timeout) - - -async def test_expect_state_decorator( - test_url: AnyHttpUrl, - respx_mock: MockRouter, - request_timeout: int, -) -> None: - - url_get_200_ok = f"{test_url}/ok" - get_wrong_state = f"{test_url}/wrong-state" - error_status = codes.NOT_FOUND - - class ATestClient(BaseThinClient): - @expect_status(codes.OK) - async def get_200_ok(self) -> Response: - return await self.client.get(url_get_200_ok) - - @expect_status(error_status) - async def get_wrong_state(self) -> Response: - return await self.client.get(get_wrong_state) - - respx_mock.get(url_get_200_ok).mock(return_value=Response(codes.OK)) - respx_mock.get(get_wrong_state).mock(return_value=Response(codes.OK)) - - test_client = ATestClient(request_timeout=request_timeout) - - # OK - response = await test_client.get_200_ok() - assert response.status_code == codes.OK - - # RAISES EXPECTED ERROR - with pytest.raises(UnexpectedStatusError) as err_info: - await test_client.get_wrong_state() - - assert err_info.value.response.status_code == codes.OK - assert ( - f"{err_info.value}" - == f"Expected status: {error_status}, got {codes.OK} for: {get_wrong_state}: headers=Headers({{}}), body=''" - ) diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py index 037bec21fbf..c748fc1cd1b 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py @@ -2,24 +2,26 @@ # pylint:disable=redefined-outer-name from contextlib import contextmanager -from typing import Any, AsyncIterable, Callable, Iterator, Optional +from typing import Any, AsyncIterable, Callable, Iterator from unittest.mock import AsyncMock +from models_library.api_schemas_dynamic_sidecar.containers import ( + ActivityInfoOrNone +) import pytest +from common_library.json_serialization import json_dumps +from faker import Faker from fastapi import FastAPI, status from httpx import HTTPError, Response -from pydantic import AnyHttpUrl, parse_obj_as -from pytest import LogCaptureFixture, MonkeyPatch +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from pydantic import AnyHttpUrl, TypeAdapter from pytest_mock import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.fastapi.http_client_thin import ClientHttpError, UnexpectedStatusError from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.modules.dynamic_sidecar.api_client._errors import ( - ClientHttpError, - UnexpectedStatusError, -) from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import ( - DynamicSidecarClient, - get_dynamic_sidecar_client, + SidecarsClient, + get_sidecars_client, ) from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import ( setup as api_client_setup, @@ -34,14 +36,17 @@ @pytest.fixture def dynamic_sidecar_endpoint() -> AnyHttpUrl: - return parse_obj_as(AnyHttpUrl, "http://missing-host:1111") + return TypeAdapter(AnyHttpUrl).validate_python("http://missing-host:1111") @pytest.fixture -def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> None: - monkeypatch.setenv("S3_ACCESS_KEY", "") - monkeypatch.setenv("S3_SECRET_KEY", "") - monkeypatch.setenv("S3_BUCKET_NAME", "") +def mock_env( + monkeypatch: pytest.MonkeyPatch, mock_env: EnvVarsDict, faker: Faker +) -> None: + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") monkeypatch.setenv("POSTGRES_HOST", "") @@ -51,19 +56,19 @@ def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> None: # reduce number of retries to make more reliable monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", "3") - monkeypatch.setenv("S3_ENDPOINT", "") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) @pytest.fixture -async def dynamic_sidecar_client( - mock_env: EnvVarsDict, -) -> AsyncIterable[DynamicSidecarClient]: +async def sidecars_client( + mock_env: EnvVarsDict, faker: Faker +) -> AsyncIterable[SidecarsClient]: app = FastAPI() app.state.settings = AppSettings.create_from_envs() # WARNING: pytest gets confused with 'setup', use instead alias 'api_client_setup' await api_client_setup(app) - yield get_dynamic_sidecar_client(app) + yield await get_sidecars_client(app, faker.uuid4()) await shutdown(app) @@ -75,27 +80,27 @@ def request_timeout() -> int: @pytest.fixture def raise_request_timeout( - monkeypatch: MonkeyPatch, request_timeout: int, mock_env: EnvVarsDict + monkeypatch: pytest.MonkeyPatch, request_timeout: int, mock_env: EnvVarsDict ) -> None: monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", f"{request_timeout}") @pytest.fixture def get_patched_client( - dynamic_sidecar_client: DynamicSidecarClient, mocker: MockerFixture + sidecars_client: SidecarsClient, mocker: MockerFixture ) -> Callable: @contextmanager def wrapper( method: str, - return_value: Optional[Any] = None, - side_effect: Optional[Callable] = None, - ) -> Iterator[DynamicSidecarClient]: + return_value: Any | None = None, + side_effect: Callable | None = None, + ) -> Iterator[SidecarsClient]: mocker.patch( - f"simcore_service_director_v2.modules.dynamic_sidecar.api_client._thin.ThinDynamicSidecarClient.{method}", + f"simcore_service_director_v2.modules.dynamic_sidecar.api_client._thin.ThinSidecarsClient.{method}", return_value=return_value, side_effect=side_effect, ) - yield dynamic_sidecar_client + yield sidecars_client return wrapper @@ -121,11 +126,11 @@ async def test_is_healthy( async def test_is_healthy_times_out( raise_request_timeout: None, - dynamic_sidecar_client: DynamicSidecarClient, + sidecars_client: SidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, - caplog_info_level: LogCaptureFixture, + caplog_info_level: pytest.LogCaptureFixture, ) -> None: - assert await dynamic_sidecar_client.is_healthy(dynamic_sidecar_endpoint) is False + assert await sidecars_client.is_healthy(dynamic_sidecar_endpoint) is False # check if the right amount of messages was captured by the logs unexpected_counter = 1 for log_message in caplog_info_level.messages: @@ -141,17 +146,17 @@ async def test_is_healthy_times_out( [ pytest.param( UnexpectedStatusError( - Response( + response=Response( status_code=status.HTTP_400_BAD_REQUEST, content="some mocked error", request=AsyncMock(), ), - status.HTTP_200_OK, + expecting=status.HTTP_200_OK, ), id="UnexpectedStatusError", ), pytest.param( - ClientHttpError(HTTPError("another mocked error")), id="HTTPError" + ClientHttpError(error=HTTPError("another mocked error")), id="HTTPError" ), ], ) @@ -164,7 +169,7 @@ async def test_is_healthy_api_error( "get_health", side_effect=side_effect, ) as client: - assert await client.is_healthy(dynamic_sidecar_endpoint) == False + assert await client.is_healthy(dynamic_sidecar_endpoint) is False async def test_containers_inspect( @@ -197,41 +202,35 @@ async def test_containers_docker_status_api_error( with get_patched_client( "get_containers", side_effect=UnexpectedStatusError( - Response( + response=Response( status_code=status.HTTP_400_BAD_REQUEST, content="some mocked error", request=AsyncMock(), ), - status.HTTP_200_OK, + expecting=status.HTTP_200_OK, ), ) as client: assert await client.containers_docker_status(dynamic_sidecar_endpoint) == {} -async def test_disable_service_outputs_watcher( +@pytest.mark.parametrize("enable_outputs", [True, False]) +@pytest.mark.parametrize("enable_inputs", [True, False]) +async def test_toggle_service_ports_io( get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl, + enable_outputs: bool, + enable_inputs: bool, ) -> None: with get_patched_client( - "patch_containers_outputs_watcher", + "patch_containers_ports_io", return_value=Response(status_code=status.HTTP_204_NO_CONTENT), ) as client: assert ( - await client.disable_service_outputs_watcher(dynamic_sidecar_endpoint) - is None - ) - - -async def test_enable_service_outputs_watcher( - get_patched_client: Callable, - dynamic_sidecar_endpoint: AnyHttpUrl, -) -> None: - with get_patched_client( - "patch_containers_outputs_watcher", - return_value=Response(status_code=status.HTTP_204_NO_CONTENT), - ) as client: - assert ( - await client.enable_service_outputs_watcher(dynamic_sidecar_endpoint) + await client.toggle_service_ports_io( + dynamic_sidecar_endpoint, + enable_outputs=enable_outputs, + enable_inputs=enable_inputs, + ) is None ) @@ -281,14 +280,15 @@ async def test_get_entrypoint_container_name_api_not_found( with get_patched_client( "get_containers_name", side_effect=UnexpectedStatusError( - Response(status_code=status.HTTP_404_NOT_FOUND, request=AsyncMock()), - status.HTTP_204_NO_CONTENT, + response=Response( + status_code=status.HTTP_404_NOT_FOUND, request=AsyncMock() + ), + expecting=status.HTTP_204_NO_CONTENT, ), - ) as client: - with pytest.raises(EntrypointContainerNotFoundError): - await client.get_entrypoint_container_name( - dynamic_sidecar_endpoint, dynamic_sidecar_network_name - ) + ) as client, pytest.raises(EntrypointContainerNotFoundError): + await client.get_entrypoint_container_name( + dynamic_sidecar_endpoint, dynamic_sidecar_network_name + ) @pytest.mark.parametrize("network_aliases", [[], ["an-alias"], ["alias-1", "alias-2"]]) @@ -303,7 +303,7 @@ async def test_attach_container_to_network( ) as client: assert ( # pylint:disable=protected-access - await client._attach_container_to_network( + await client._attach_container_to_network( # noqa: SLF001 dynamic_sidecar_endpoint, container_id="container_id", network_id="network_id", @@ -323,10 +323,66 @@ async def test_detach_container_from_network( ) as client: assert ( # pylint:disable=protected-access - await client._detach_container_from_network( + await client._detach_container_from_network( # noqa: SLF001 dynamic_sidecar_endpoint, container_id="container_id", network_id="network_id", ) is None ) + + +@pytest.mark.parametrize("volume_category", VolumeCategory) +@pytest.mark.parametrize("volume_status", VolumeStatus) +async def test_update_volume_state( + get_patched_client: Callable, + dynamic_sidecar_endpoint: AnyHttpUrl, + volume_category: VolumeCategory, + volume_status: VolumeStatus, +) -> None: + with get_patched_client( + "put_volumes", + return_value=Response(status_code=status.HTTP_204_NO_CONTENT), + ) as client: + assert ( + await client.update_volume_state( + dynamic_sidecar_endpoint, + volume_category=volume_category, + volume_status=volume_status, + ) + is None + ) + + +@pytest.mark.parametrize( + "mock_dict", + [{"seconds_inactive": 1}, {"seconds_inactive": 0}, None], +) +async def test_get_service_activity( + get_patched_client: Callable, + dynamic_sidecar_endpoint: AnyHttpUrl, + mock_dict: dict[str, Any], +) -> None: + with get_patched_client( + "get_containers_activity", + return_value=Response( + status_code=status.HTTP_200_OK, text=json_dumps(mock_dict) + ), + ) as client: + assert await client.get_service_activity(dynamic_sidecar_endpoint) == TypeAdapter(ActivityInfoOrNone).validate_python(mock_dict) + + +async def test_free_reserved_disk_space( + get_patched_client: Callable, + dynamic_sidecar_endpoint: AnyHttpUrl, +) -> None: + with get_patched_client( + "post_disk_reserved_free", + return_value=Response(status_code=status.HTTP_204_NO_CONTENT), + ) as client: + assert ( + await client.free_reserved_disk_space( + dynamic_sidecar_endpoint, + ) + is None + ) diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_thin.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_thin.py index a1617dedcb3..7e9d4f429a4 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_thin.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_thin.py @@ -1,33 +1,33 @@ # pylint:disable=unused-argument # pylint:disable=redefined-outer-name -import json -from typing import Any, Callable, Optional +from collections.abc import AsyncIterable, Callable +from typing import Any import pytest +from common_library.json_serialization import json_dumps +from faker import Faker from fastapi import FastAPI, status from httpx import Response -from pydantic import AnyHttpUrl, parse_obj_as -from pytest import MonkeyPatch +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from pydantic import AnyHttpUrl, TypeAdapter from pytest_simcore.helpers.typing_env import EnvVarsDict from respx import MockRouter, Route from respx.types import SideEffectTypes from servicelib.docker_constants import SUFFIX_EGRESS_PROXY_NAME from simcore_service_director_v2.core.settings import AppSettings from simcore_service_director_v2.modules.dynamic_sidecar.api_client._thin import ( - ThinDynamicSidecarClient, + ThinSidecarsClient, ) -# NOTE: typing and callables cannot -MockRequestType = Callable[ - [str, str, Optional[Response], Optional[SideEffectTypes]], Route -] +MockRequestType = Callable[[str, str, Response | None, SideEffectTypes | None], Route] # UTILS -def assert_responses(mocked: Response, result: Optional[Response]) -> None: +def assert_responses(mocked: Response, result: Response | None) -> None: assert result is not None assert mocked.status_code == result.status_code assert mocked.headers == result.headers @@ -35,11 +35,14 @@ def assert_responses(mocked: Response, result: Optional[Response]) -> None: @pytest.fixture -def mocked_app(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> FastAPI: - monkeypatch.setenv("S3_ENDPOINT", "") - monkeypatch.setenv("S3_ACCESS_KEY", "") - monkeypatch.setenv("S3_SECRET_KEY", "") - monkeypatch.setenv("S3_BUCKET_NAME", "") +def mocked_app( + monkeypatch: pytest.MonkeyPatch, mock_env: EnvVarsDict, faker: Faker +) -> FastAPI: + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") monkeypatch.setenv("POSTGRES_HOST", "") @@ -53,35 +56,34 @@ def mocked_app(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> FastAPI: @pytest.fixture -def thin_client(mocked_app: FastAPI) -> ThinDynamicSidecarClient: - return ThinDynamicSidecarClient(mocked_app) +async def thin_client(mocked_app: FastAPI) -> AsyncIterable[ThinSidecarsClient]: + async with ThinSidecarsClient(mocked_app) as client: + yield client @pytest.fixture def dynamic_sidecar_endpoint() -> AnyHttpUrl: - return parse_obj_as(AnyHttpUrl, "http://missing-host:1111") + return TypeAdapter(AnyHttpUrl).validate_python("http://missing-host:1111") @pytest.fixture -def mock_request( - dynamic_sidecar_endpoint: AnyHttpUrl, respx_mock: MockRouter -) -> MockRequestType: +def mock_request(respx_mock: MockRouter) -> MockRequestType: def request_mock( method: str, path: str, - return_value: Optional[Response] = None, - side_effect: Optional[SideEffectTypes] = None, + return_value: Response | None = None, + side_effect: SideEffectTypes | None = None, ) -> Route: print(f"Mocking {path=}") - return respx_mock.request( - method=method, url=f"{dynamic_sidecar_endpoint}{path}" - ).mock(return_value=return_value, side_effect=side_effect) + return respx_mock.request(method=method, url=f"{path}").mock( + return_value=return_value, side_effect=side_effect + ) return request_mock async def test_get_health( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, ) -> None: @@ -93,7 +95,7 @@ async def test_get_health( async def test_get_health_no_retry( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, ): @@ -106,7 +108,7 @@ async def test_get_health_no_retry( @pytest.mark.parametrize("only_status", [False, True]) async def test_get_containers( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, only_status: bool, @@ -114,7 +116,7 @@ async def test_get_containers( mock_response = Response(status.HTTP_200_OK) mock_request( "GET", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}/containers?only_status={str(only_status).lower()}", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers?only_status={str(only_status).lower()}", mock_response, None, ) @@ -125,30 +127,34 @@ async def test_get_containers( assert_responses(mock_response, response) -@pytest.mark.parametrize("is_enabled", [False, True]) -async def test_post_patch_containers_outputs_watcher( - thin_client: ThinDynamicSidecarClient, +@pytest.mark.parametrize("enable_outputs", [False, True]) +@pytest.mark.parametrize("enable_inputs", [False, True]) +async def test_post_patch_containers_ports_io( + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, - is_enabled: bool, + enable_outputs: bool, + enable_inputs: bool, ) -> None: mock_response = Response(status.HTTP_204_NO_CONTENT) mock_request( "PATCH", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}/containers/directory-watcher", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/ports/io", mock_response, None, ) - response = await thin_client.patch_containers_outputs_watcher( - dynamic_sidecar_endpoint, is_enabled=is_enabled + response = await thin_client.patch_containers_ports_io( + dynamic_sidecar_endpoint, + enable_outputs=enable_outputs, + enable_inputs=enable_inputs, ) assert_responses(mock_response, response) @pytest.mark.parametrize("outputs_labels", [{}, {"some": "data"}]) async def test_post_containers_ports_outputs_dirs( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, outputs_labels: dict[str, Any], @@ -156,7 +162,7 @@ async def test_post_containers_ports_outputs_dirs( mock_response = Response(status.HTTP_204_NO_CONTENT) mock_request( "POST", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}/containers/ports/outputs/dirs", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/ports/outputs/dirs", mock_response, None, ) @@ -169,23 +175,23 @@ async def test_post_containers_ports_outputs_dirs( @pytest.mark.parametrize("dynamic_sidecar_network_name", ["test_nw_name"]) async def test_get_containers_name( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, dynamic_sidecar_network_name: str, ) -> None: mock_response = Response(status.HTTP_200_OK) - encoded_filters = json.dumps( - dict( - network=dynamic_sidecar_network_name, - exclude=SUFFIX_EGRESS_PROXY_NAME, - ) + encoded_filters = json_dumps( + { + "network": dynamic_sidecar_network_name, + "exclude": SUFFIX_EGRESS_PROXY_NAME, + } ) mock_request( "GET", ( - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}" + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}" f"/containers/name?filters={encoded_filters}" ), mock_response, @@ -201,7 +207,7 @@ async def test_get_containers_name( @pytest.mark.parametrize("network_aliases", [[], ["an_alias"], ["multuple_aliases"]]) async def test_post_containers_networks_attach( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, network_aliases: list[str], @@ -210,7 +216,7 @@ async def test_post_containers_networks_attach( container_id = "a_container_id" mock_request( "POST", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}/containers/{container_id}/networks:attach", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/{container_id}/networks:attach", mock_response, None, ) @@ -225,7 +231,7 @@ async def test_post_containers_networks_attach( async def test_post_containers_networks_detach( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, ) -> None: @@ -233,7 +239,7 @@ async def test_post_containers_networks_detach( container_id = "a_container_id" mock_request( "POST", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}/containers/{container_id}/networks:detach", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/{container_id}/networks:detach", mock_response, None, ) @@ -244,13 +250,46 @@ async def test_post_containers_networks_detach( assert_responses(mock_response, response) +@pytest.mark.parametrize("volume_category", VolumeCategory) +@pytest.mark.parametrize("volume_status", VolumeStatus) +async def test_put_volumes( + thin_client: ThinSidecarsClient, + dynamic_sidecar_endpoint: AnyHttpUrl, + mock_request: MockRequestType, + volume_category: str, + volume_status: VolumeStatus, +) -> None: + mock_response = Response(status.HTTP_204_NO_CONTENT) + mock_request( + "PUT", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/volumes/{volume_category}", + mock_response, + None, + ) + + response = await thin_client.put_volumes( + dynamic_sidecar_endpoint, + volume_category=volume_category, + volume_status=volume_status, + ) + assert_responses(mock_response, response) + + @pytest.mark.parametrize( "handler_name, mock_endpoint, extra_kwargs", [ pytest.param( "post_containers_tasks", "/containers", - dict(compose_spec="some_fake_compose_as_str"), + { + "metrics_params": TypeAdapter( + CreateServiceMetricsAdditionalParams + ).validate_python( + CreateServiceMetricsAdditionalParams.model_config[ + "json_schema_extra" + ]["example"], + ) + }, id="post_containers_tasks", ), pytest.param( @@ -259,6 +298,12 @@ async def test_post_containers_networks_detach( {}, id="down", ), + pytest.param( + "post_containers_images_pull", + "/containers/images:pull", + {}, + id="user_servces_images_pull", + ), pytest.param( "post_containers_tasks_state_restore", "/containers/state:restore", @@ -298,7 +343,7 @@ async def test_post_containers_networks_detach( ], ) async def test_post_containers_tasks( - thin_client: ThinDynamicSidecarClient, + thin_client: ThinSidecarsClient, dynamic_sidecar_endpoint: AnyHttpUrl, mock_request: MockRequestType, handler_name: str, @@ -308,7 +353,7 @@ async def test_post_containers_tasks( mock_response = Response(status.HTTP_202_ACCEPTED, json="mocked_task_id") mock_request( "POST", - f"{dynamic_sidecar_endpoint}/{thin_client.API_VERSION}{mock_endpoint}", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}{mock_endpoint}", mock_response, None, ) @@ -316,3 +361,56 @@ async def test_post_containers_tasks( thin_client_handler = getattr(thin_client, handler_name) response = await thin_client_handler(dynamic_sidecar_endpoint, **extra_kwargs) assert_responses(mock_response, response) + + +async def test_get_containers_inactivity( + thin_client: ThinSidecarsClient, + dynamic_sidecar_endpoint: AnyHttpUrl, + mock_request: MockRequestType, +) -> None: + mock_response = Response(status.HTTP_200_OK, json={}) + mock_request( + "GET", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/activity", + mock_response, + None, + ) + + response = await thin_client.get_containers_activity(dynamic_sidecar_endpoint) + assert_responses(mock_response, response) + + +async def test_post_disk_reserved_free( + thin_client: ThinSidecarsClient, + dynamic_sidecar_endpoint: AnyHttpUrl, + mock_request: MockRequestType, +) -> None: + mock_response = Response(status.HTTP_204_NO_CONTENT) + mock_request( + "POST", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/disk/reserved:free", + mock_response, + None, + ) + + response = await thin_client.post_disk_reserved_free(dynamic_sidecar_endpoint) + assert_responses(mock_response, response) + + +async def test_post_containers_compose_spec( + thin_client: ThinSidecarsClient, + dynamic_sidecar_endpoint: AnyHttpUrl, + mock_request: MockRequestType, +): + mock_response = Response(status.HTTP_202_ACCEPTED) + mock_request( + "POST", + f"{dynamic_sidecar_endpoint}{thin_client.API_VERSION}/containers/compose-spec", + mock_response, + None, + ) + + response = await thin_client.post_containers_compose_spec( + dynamic_sidecar_endpoint, compose_spec="some_fake_compose_as_str" + ) + assert_responses(mock_response, response) diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_egress_config.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_egress_config.py index 1bb15c9f02c..94f50850fb6 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_egress_config.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_egress_config.py @@ -7,13 +7,13 @@ import pytest import yaml from models_library.basic_types import PortInt -from models_library.service_settings_labels import ( +from models_library.service_settings_nat_rule import ( DEFAULT_DNS_SERVER_ADDRESS, DEFAULT_DNS_SERVER_PORT, NATRule, _PortRange, ) -from orderedset import OrderedSet +from ordered_set import OrderedSet from pydantic import NonNegativeInt from simcore_service_director_v2.modules.dynamic_sidecar.docker_compose_egress_config import ( _get_egress_proxy_dns_port_rules, diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py index 2324b38c3eb..94726173395 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_compose_specs.py @@ -1,22 +1,27 @@ # pylint: disable=redefined-outer-name # pylint: disable=protected-access - +import json from copy import deepcopy from typing import Any from uuid import uuid4 import pytest import yaml +from models_library.docker import to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID +from models_library.service_settings_labels import ( + ComposeSpecLabelDict, + SimcoreServiceLabels, +) from models_library.services_resources import ( DEFAULT_SINGLE_SERVICE_NAME, ResourcesDict, ServiceResourcesDict, ) from models_library.users import UserID -from pydantic import parse_obj_as +from pydantic import TypeAdapter from servicelib.resources import CPU_RESOURCE_LIMIT_KEY, MEM_RESOURCE_LIMIT_KEY from simcore_service_director_v2.modules.dynamic_sidecar import docker_compose_specs @@ -45,20 +50,20 @@ def test_parse_and_export_of_compose_environment_section(): assert isinstance(compose_as_list_str["environment"], list) - assert docker_compose_specs._environment_section.parse( + assert docker_compose_specs._EnvironmentSection.parse( compose_as_dict["environment"] - ) == docker_compose_specs._environment_section.parse( + ) == docker_compose_specs._EnvironmentSection.parse( compose_as_list_str["environment"] ) assert ( - docker_compose_specs._environment_section.parse( + docker_compose_specs._EnvironmentSection.parse( compose_as_list_str["environment"] ) == compose_as_dict["environment"] ) - envs = docker_compose_specs._environment_section.export_as_list( + envs = docker_compose_specs._EnvironmentSection.export_as_list( compose_as_dict["environment"] ) assert envs == compose_as_list_str["environment"] @@ -69,8 +74,7 @@ def test_parse_and_export_of_compose_environment_section(): [ pytest.param( {"version": "2.3", "services": {DEFAULT_SINGLE_SERVICE_NAME: {}}}, - parse_obj_as( - ServiceResourcesDict, + TypeAdapter(ServiceResourcesDict).validate_python( { DEFAULT_SINGLE_SERVICE_NAME: { "image": "simcore/services/dynamic/jupyter-math:2.0.5", @@ -85,13 +89,12 @@ def test_parse_and_export_of_compose_environment_section(): ), pytest.param( {"version": "3.7", "services": {DEFAULT_SINGLE_SERVICE_NAME: {}}}, - parse_obj_as( - ServiceResourcesDict, + TypeAdapter(ServiceResourcesDict).validate_python( { DEFAULT_SINGLE_SERVICE_NAME: { "image": "simcore/services/dynamic/jupyter-math:2.0.5", "resources": { - "CPU": {"limit": 1.1, "reservation": 4.0}, + "CPU": {"limit": "1.1", "reservation": "4.0"}, "RAM": {"limit": 17179869184, "reservation": 536870912}, }, }, @@ -128,7 +131,7 @@ async def test_inject_resource_limits_and_reservations( assert spec["deploy"]["resources"]["limits"]["memory"] == f"{memory.limit}" assert ( - f"{CPU_RESOURCE_LIMIT_KEY}={int(cpu.limit*10**9)}" + f"{CPU_RESOURCE_LIMIT_KEY}={int(float(cpu.limit)*10**9)}" in spec["environment"] ) assert f"{MEM_RESOURCE_LIMIT_KEY}={memory.limit}" in spec["environment"] @@ -146,12 +149,58 @@ async def test_inject_resource_limits_and_reservations( assert f"{MEM_RESOURCE_LIMIT_KEY}={memory.limit}" in spec["environment"] +@pytest.mark.parametrize( + "compose_spec, storage_opt_count", + [ + pytest.param( + json.loads( + SimcoreServiceLabels.model_json_schema()["examples"][2][ + "simcore.service.compose-spec" + ] + ), + 2, + id="two_storage_opt_entries", + ), + pytest.param( + { + "version": "2.3", + "services": { + "rt-web": {"storage_opt": None}, + "s4l-core": {}, + }, + }, + 1, + id="one_storage_opt_entry", + ), + pytest.param( + {"version": "2.3", "services": {"rt-web": {}, "s4l-core": {}}}, + 0, + id="no_storage_opt_entry", + ), + ], +) +@pytest.mark.parametrize("has_quota_support", [True, False]) +def test_update_service_quotas_storage( + compose_spec: ComposeSpecLabelDict, storage_opt_count: int, has_quota_support: bool +): + assert json.dumps(compose_spec).count("storage_opt") == storage_opt_count + if not has_quota_support: + docker_compose_specs._strip_service_quotas(service_spec=compose_spec) + + if has_quota_support: + assert json.dumps(compose_spec).count("storage_opt") == storage_opt_count + else: + assert "storage_opt" not in json.dumps(compose_spec) + + def test_regression_service_has_no_reservations(): service_spec: dict[str, Any] = { "version": "3.7", "services": {DEFAULT_SINGLE_SERVICE_NAME: {}}, } - service_resources: ServiceResourcesDict = parse_obj_as(ServiceResourcesDict, {}) + service_resources: ServiceResourcesDict = TypeAdapter( + ServiceResourcesDict + ).validate_python({}) spec_before = deepcopy(service_spec) docker_compose_specs._update_resource_limits_and_reservations( @@ -165,14 +214,23 @@ def test_regression_service_has_no_reservations(): NODE_ID: NodeID = uuid4() SIMCORE_USER_AGENT: str = "a-puppet" PRODUCT_NAME: str = "osparc" +SWARM_STACK_NAME: str = "mystackname" +CPU_LIMIT: float = 4.0 +RAM_LIMIT: int = 1233112423423 -EXPECTED_LABELS: list[str] = [ - f"product_name={PRODUCT_NAME}", - f"simcore_user_agent={SIMCORE_USER_AGENT}", - f"study_id={PROJECT_ID}", - f"user_id={USER_ID}", - f"uuid={NODE_ID}", -] + +EXPECTED_LABELS: list[str] = sorted( + [ + f"{to_simcore_runtime_docker_label_key('product-name')}={PRODUCT_NAME}", + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}={SIMCORE_USER_AGENT}", + f"{to_simcore_runtime_docker_label_key('project-id')}={PROJECT_ID}", + f"{to_simcore_runtime_docker_label_key('user-id')}={USER_ID}", + f"{to_simcore_runtime_docker_label_key('node-id')}={NODE_ID}", + f"{to_simcore_runtime_docker_label_key('swarm-stack-name')}={SWARM_STACK_NAME}", + f"{to_simcore_runtime_docker_label_key('cpu-limit')}=0.0", + f"{to_simcore_runtime_docker_label_key('memory-limit')}=0", + ] +) @pytest.mark.parametrize( @@ -199,6 +257,13 @@ async def test_update_container_labels( service_spec: dict[str, Any], expected_result: dict[str, Any] ): docker_compose_specs._update_container_labels( - service_spec, USER_ID, PROJECT_ID, NODE_ID, SIMCORE_USER_AGENT, PRODUCT_NAME + service_spec, + USER_ID, + PROJECT_ID, + NODE_ID, + SIMCORE_USER_AGENT, + PRODUCT_NAME, + SWARM_STACK_NAME, + {}, ) assert service_spec == expected_result diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_sidecar.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_sidecar.py index d16057dcc7e..27cdb831914 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_sidecar.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_sidecar.py @@ -2,71 +2,175 @@ # pylint: disable=unused-argument # pylint: disable=unused-variable -from typing import Any +from typing import Any, Final -from pytest import MonkeyPatch +import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - SchedulerData, -) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.docker_service_specs.sidecar import ( _get_environment_variables, + _get_storage_config, + _StorageConfig, ) # PLEASE keep alphabetical to simplify debugging -EXPECTED_DYNAMIC_SIDECAR_ENV_VAR_NAMES = { +EXPECTED_DYNAMIC_SIDECAR_ENV_VAR_NAMES: Final[set[str]] = { + "DY_DEPLOYMENT_REGISTRY_SETTINGS", + "DY_DOCKER_HUB_REGISTRY_SETTINGS", + "DY_SIDECAR_AWS_S3_CLI_SETTINGS", + "DY_SIDECAR_CALLBACKS_MAPPING", + "DY_SIDECAR_LEGACY_STATE", + "DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED", "DY_SIDECAR_NODE_ID", "DY_SIDECAR_PATH_INPUTS", "DY_SIDECAR_PATH_OUTPUTS", + "DY_SIDECAR_PRODUCT_NAME", "DY_SIDECAR_PROJECT_ID", "DY_SIDECAR_RUN_ID", + "DY_SIDECAR_SERVICE_KEY", + "DY_SIDECAR_SERVICE_VERSION", "DY_SIDECAR_STATE_EXCLUDE", "DY_SIDECAR_STATE_PATHS", + "DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE", "DY_SIDECAR_USER_ID", + "DY_SIDECAR_USER_PREFERENCES_PATH", "DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS", "DYNAMIC_SIDECAR_COMPOSE_NAMESPACE", "DYNAMIC_SIDECAR_LOG_LEVEL", + "DYNAMIC_SIDECAR_TRACING", + "NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS", "POSTGRES_DB", "POSTGRES_ENDPOINT", "POSTGRES_HOST", "POSTGRES_PASSWORD", "POSTGRES_PORT", "POSTGRES_USER", - "R_CLONE_ENABLED", + "R_CLONE_OPTION_BUFFER_SIZE", + "R_CLONE_OPTION_RETRIES", + "R_CLONE_OPTION_TRANSFERS", "R_CLONE_PROVIDER", "RABBIT_HOST", "RABBIT_PASSWORD", "RABBIT_PORT", + "RABBIT_SECURE", "RABBIT_USER", - "REGISTRY_AUTH", - "REGISTRY_PATH", - "REGISTRY_PW", - "REGISTRY_SSL", - "REGISTRY_URL", - "REGISTRY_USER", "S3_ACCESS_KEY", "S3_BUCKET_NAME", "S3_ENDPOINT", + "S3_REGION", "S3_SECRET_KEY", - "S3_SECURE", "SC_BOOT_MODE", "SIMCORE_HOST_NAME", "SSL_CERT_FILE", "STORAGE_HOST", + "STORAGE_PASSWORD", "STORAGE_PORT", + "STORAGE_SECURE", + "STORAGE_USERNAME", } def test_dynamic_sidecar_env_vars( - monkeypatch: MonkeyPatch, scheduler_data_from_http_request: SchedulerData, project_env_devel_environment: dict[str, Any], ): app_settings = AppSettings.create_from_envs() dynamic_sidecar_env_vars = _get_environment_variables( - "compose_namespace", scheduler_data_from_http_request, app_settings, False + "compose_namespace", + scheduler_data_from_http_request, + app_settings, + allow_internet_access=False, + metrics_collection_allowed=True, + telemetry_enabled=True, ) print("dynamic_sidecar_env_vars:", dynamic_sidecar_env_vars) assert set(dynamic_sidecar_env_vars) == EXPECTED_DYNAMIC_SIDECAR_ENV_VAR_NAMES + + +@pytest.mark.parametrize( + "env_vars, expected_storage_config", + [ + pytest.param( + {}, + _StorageConfig("storage", "8080", "null", "null", "0"), + id="no_env_vars", + ), + pytest.param( + { + "STORAGE_HOST": "just-storage", + "STORAGE_PORT": "123", + }, + _StorageConfig("just-storage", "123", "null", "null", "0"), + id="host-and-port", + ), + pytest.param( + { + "STORAGE_HOST": "storage-with-auth", + "STORAGE_PORT": "42", + "STORAGE_PASSWORD": "pass", + "STORAGE_USERNAME": "user", + }, + _StorageConfig("storage-with-auth", "42", "user", "pass", "0"), + id="host-port-pass-user", + ), + pytest.param( + { + "STORAGE_HOST": "storage-with-auth", + "STORAGE_PORT": "42", + "STORAGE_PASSWORD": "pass", + "STORAGE_USERNAME": "user", + "STORAGE_SECURE": "1", + }, + _StorageConfig("storage-with-auth", "42", "user", "pass", "1"), + id="host-port-pass-user-secure-true", + ), + pytest.param( + { + "STORAGE_HOST": "normal-storage", + "STORAGE_PORT": "8081", + "DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH": ( + "{" + '"STORAGE_USERNAME": "overwrite-user", ' + '"STORAGE_PASSWORD": "overwrite-passwd", ' + '"STORAGE_HOST": "overwrite-host", ' + '"STORAGE_PORT": "44", ' + '"STORAGE_SECURE": "1"' + "}" + ), + }, + _StorageConfig( + "overwrite-host", "44", "overwrite-user", "overwrite-passwd", "1" + ), + id="host-port-and-node-ports-config", + ), + pytest.param( + { + "DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH": ( + "{" + '"STORAGE_USERNAME": "overwrite-user", ' + '"STORAGE_PASSWORD": "overwrite-passwd", ' + '"STORAGE_HOST": "overwrite-host", ' + '"STORAGE_PORT": "44"' + "}" + ), + }, + _StorageConfig( + "overwrite-host", "44", "overwrite-user", "overwrite-passwd", "0" + ), + id="only-node-ports-config", + ), + ], +) +def test__get_storage_config( + project_env_devel_environment: dict[str, Any], + monkeypatch: pytest.MonkeyPatch, + env_vars: dict[str, str], + expected_storage_config: _StorageConfig, +): + setenvs_from_dict(monkeypatch, env_vars) + app_settings = AppSettings.create_from_envs() + + assert _get_storage_config(app_settings) == expected_storage_config diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_volume_remover.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_volume_remover.py deleted file mode 100644 index 8126d998617..00000000000 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_docker_service_specs_volume_remover.py +++ /dev/null @@ -1,231 +0,0 @@ -# pylint: disable=redefined-outer-name - -from pathlib import Path -from typing import AsyncIterator - -import pytest -from aiodocker import Docker, DockerError -from aiodocker.volumes import DockerVolume -from faker import Faker -from pydantic import parse_obj_as -from simcore_service_director_v2.modules.dynamic_sidecar.docker_service_specs.volume_remover import ( - SH_SCRIPT_REMOVE_VOLUMES, - DockerVersion, -) - -# UTILS - - -def _get_source(run_id: str, node_uuid: str, volume_path: Path) -> str: - reversed_path = f"{volume_path}"[::-1].replace("/", "_") - return f"dyv_{run_id}_{node_uuid}_{reversed_path}" - - -async def run_command( - async_docker_client: Docker, docker_version: DockerVersion, volume_names: list[str] -) -> str: - - volume_names_seq = " ".join(volume_names) - formatted_command = SH_SCRIPT_REMOVE_VOLUMES.format( - volume_names_seq=volume_names_seq, retries=3, sleep=0.1 - ) - print("Container will run:\n%s", formatted_command) - command = ["sh", "-c", formatted_command] - - container = await async_docker_client.containers.run( - config={ - "Cmd": command, - "Image": f"docker:{docker_version}-dind", - "HostConfig": {"Binds": ["/var/run/docker.sock:/var/run/docker.sock"]}, - }, - ) - await container.start() - await container.wait() - - logs = await container.log(stderr=True, stdout=True) - - await container.delete(force=True) - - return "".join(logs) - - -# FIXTURES - - -@pytest.fixture -def swarm_stack_name() -> str: - return "test_stack" - - -@pytest.fixture -def study_id(faker: Faker) -> str: - return faker.uuid4() - - -@pytest.fixture -def node_uuid(faker: Faker) -> str: - return faker.uuid4() - - -@pytest.fixture -def run_id(faker: Faker) -> str: - return faker.uuid4() - - -@pytest.fixture -def used_volume_path(tmp_path: Path) -> Path: - return tmp_path / "used_volume" - - -@pytest.fixture -def unused_volume_path(tmp_path: Path) -> Path: - return tmp_path / "unused_volume" - - -@pytest.fixture -async def unused_volume( - async_docker_client: Docker, - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, - unused_volume_path: Path, -) -> AsyncIterator[DockerVolume]: - source = _get_source(run_id, node_uuid, unused_volume_path) - volume = await async_docker_client.volumes.create( - { - "Name": source, - "Labels": { - "node_uuid": node_uuid, - "run_id": run_id, - "source": source, - "study_id": study_id, - "swarm_stack_name": swarm_stack_name, - "user_id": "1", - }, - } - ) - - yield volume - - try: - await volume.delete() - except DockerError: - pass - - -@pytest.fixture -async def used_volume( - async_docker_client: Docker, - swarm_stack_name: str, - study_id: str, - node_uuid: str, - run_id: str, - used_volume_path: Path, -) -> AsyncIterator[DockerVolume]: - source = _get_source(run_id, node_uuid, used_volume_path) - volume = await async_docker_client.volumes.create( - { - "Name": source, - "Labels": { - "node_uuid": node_uuid, - "run_id": run_id, - "source": source, - "study_id": study_id, - "swarm_stack_name": swarm_stack_name, - "user_id": "1", - }, - } - ) - - container = await async_docker_client.containers.run( - config={ - "Cmd": ["/bin/ash", "-c", "sleep 10000"], - "Image": "alpine:latest", - "HostConfig": {"Binds": [f"{volume.name}:{used_volume_path}"]}, - }, - name=f"using_volume_{volume.name}", - ) - await container.start() - - yield volume - - await container.delete(force=True) - await volume.delete() - - -@pytest.fixture -async def used_volume_name(used_volume: DockerVolume) -> str: - volume = await used_volume.show() - return volume["Name"] - - -@pytest.fixture -async def unused_volume_name(unused_volume: DockerVolume) -> str: - volume = await unused_volume.show() - return volume["Name"] - - -@pytest.fixture -def missing_volume_name(run_id: str, node_uuid: str) -> str: - return _get_source(run_id, node_uuid, Path("/MISSING/PATH")) - - -# TESTS - - -async def test_sh_script_error_if_volume_is_used( - async_docker_client: Docker, used_volume_name: str, docker_version: DockerVersion -): - command_stdout = await run_command( - async_docker_client, docker_version, volume_names=[used_volume_name] - ) - print(command_stdout) - assert "ERROR: Please check above logs, there was/were 1 error/s." in command_stdout - - -async def test_sh_script_removes_unused_volume( - async_docker_client: Docker, unused_volume_name: str, docker_version: DockerVersion -): - command_stdout = await run_command( - async_docker_client, docker_version, volume_names=[unused_volume_name] - ) - print(command_stdout) - assert "ERROR: Please check above logs, there was/were" not in command_stdout - assert command_stdout == f"{unused_volume_name}\n" - - -async def test_sh_script_no_error_if_volume_does_not_exist( - async_docker_client: Docker, missing_volume_name: str, docker_version: DockerVersion -): - command_stdout = await run_command( - async_docker_client, docker_version, volume_names=[missing_volume_name] - ) - print(command_stdout) - assert "ERROR: Please check above logs, there was/were" not in command_stdout - - -@pytest.mark.parametrize( - "docker_version", - [ - "20.10.17", - "20.10.17+azure-1-dind", # github workers - "20.10.17.", - "20.10.17asdjasjsaddas", - ], -) -def test_docker_version_strips_unwanted(docker_version: str): - assert parse_obj_as(DockerVersion, docker_version) == "20.10.17" - - -@pytest.mark.parametrize( - "invalid_docker_version", - [ - "nope", - ".20.10.17.", - ".20.10.17", - ], -) -def test_docker_version_invalid(invalid_docker_version: str): - with pytest.raises(ValueError): - parse_obj_as(DockerVersion, invalid_docker_version) diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_observer.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_observer.py index fe95864381d..19ba5f72bcf 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_observer.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_observer.py @@ -2,16 +2,16 @@ # pylint:disable=redefined-outer-name # pylint:disable=unused-argument -from typing import Optional +from collections.abc import AsyncIterator from unittest.mock import AsyncMock import pytest +from faker import Faker from fastapi import FastAPI -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_envs import EnvVarsDict, setenvs_from_dict +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.schemas.dynamic_services import SchedulerData +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.api_client import ( setup, shutdown, @@ -35,7 +35,9 @@ def disable_observation(mocker: MockerFixture) -> None: @pytest.fixture -def mock_are_sidecar_and_proxy_services_present(mocker: MockerFixture) -> None: +def mock_are_sidecar_and_proxy_services_present( + mocker: MockerFixture, +) -> None: mocker.patch( "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer.are_sidecar_and_proxy_services_present", autospec=True, @@ -64,9 +66,11 @@ def mock_events(mocker: MockerFixture) -> None: @pytest.fixture def mock_env( + disable_postgres: None, docker_swarm: None, mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, ) -> None: setenvs_from_dict( monkeypatch, @@ -74,16 +78,11 @@ def mock_env( "SIMCORE_SERVICES_NETWORK_NAME": "test_network", "DIRECTOR_HOST": "mocked_out", "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "true", - "S3_ENDPOINT": "endpoint", - "S3_ACCESS_KEY": "access_key", - "S3_SECRET_KEY": "secret_key", - "S3_BUCKET_NAME": "bucket_name", - "S3_SECURE": "false", - "DIRECTOR_V2_POSTGRES_ENABLED": "false", - "POSTGRES_HOST": "test", - "POSTGRES_USER": "test", - "POSTGRES_PASSWORD": "test", - "POSTGRES_DB": "test", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), }, ) @@ -91,13 +90,15 @@ def mock_env( @pytest.fixture def mocked_app(mock_env: None) -> FastAPI: app = FastAPI() - app.state.settings = AppSettings() + app.state.settings = AppSettings.create_from_envs() app.state.rabbitmq_client = AsyncMock() return app @pytest.fixture -async def dynamic_sidecar_scheduler(mocked_app: FastAPI) -> DynamicSidecarsScheduler: +async def dynamic_sidecar_scheduler( + mocked_app: FastAPI, +) -> AsyncIterator[DynamicSidecarsScheduler]: await setup_scheduler(mocked_app) await setup(mocked_app) @@ -113,21 +114,21 @@ def _is_observation_task_present( ) -> bool: return ( scheduler_data_from_http_request.service_name - in dynamic_sidecar_scheduler._scheduler._service_observation_task + in dynamic_sidecar_scheduler.scheduler._service_observation_task # noqa: SLF001 ) -@pytest.mark.parametrize("can_save", [None, False, True]) +@pytest.mark.parametrize("can_save", [False, True]) async def test_regression_break_endless_loop_cancellation_edge_case( disable_observation: None, mock_are_sidecar_and_proxy_services_present: None, mock_events: None, dynamic_sidecar_scheduler: DynamicSidecarsScheduler, scheduler_data_from_http_request: SchedulerData, - can_save: Optional[bool], + can_save: bool | None, ): # in this situation the scheduler would never end loops forever - await dynamic_sidecar_scheduler._scheduler._add_service( + await dynamic_sidecar_scheduler.scheduler.add_service_from_scheduler_data( scheduler_data_from_http_request ) @@ -154,9 +155,11 @@ async def test_regression_break_endless_loop_cancellation_edge_case( is True ) - await _apply_observation_cycle( - dynamic_sidecar_scheduler, scheduler_data_from_http_request - ) + # requires an extra pass to remove the service + for _ in range(3): + await _apply_observation_cycle( + dynamic_sidecar_scheduler, scheduler_data_from_http_request + ) assert ( _is_observation_task_present( diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py index bc2d7848bc5..77c1e033ef6 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler.py @@ -5,29 +5,29 @@ import logging import re -import urllib.parse +from collections.abc import AsyncGenerator, Awaitable, Callable, Iterator from contextlib import asynccontextmanager, contextmanager -from typing import AsyncGenerator, Awaitable, Callable, Iterator +from typing import Final from unittest.mock import AsyncMock import pytest import respx from faker import Faker from fastapi import FastAPI -from models_library.service_settings_labels import SimcoreServiceLabels -from pytest import LogCaptureFixture, MonkeyPatch +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.services_enums import ServiceState +from models_library.wallets import WalletID +from pydantic import NonNegativeFloat from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from respx.router import MockRouter -from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.schemas.dynamic_services import ( +from simcore_service_director_v2.models.dynamic_services_scheduler import ( + DockerContainerInspect, DynamicSidecarStatus, - RunningDynamicServiceDetails, SchedulerData, - ServiceState, -) -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - DockerContainerInspect, + ServiceName, ) from simcore_service_director_v2.modules.dynamic_sidecar.errors import ( DynamicSidecarError, @@ -37,7 +37,6 @@ DynamicSidecarsScheduler, ) from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._events import ( - REGISTERED_EVENTS, DynamicSchedulerEvent, ) from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer import ( @@ -46,10 +45,13 @@ from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler import ( Scheduler, ) +from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler_utils import ( + create_model_from_scheduler_data, +) # running scheduler at a hight rate to stress out the system # and ensure faster tests -TEST_SCHEDULER_INTERVAL_SECONDS = 0.1 +_TEST_SCHEDULER_INTERVAL_SECONDS: Final[NonNegativeFloat] = 0.1 log = logging.getLogger(__name__) @@ -74,7 +76,7 @@ def _mock_containers_docker_status( rf"^http://{scheduler_data.service_name}:{scheduler_data.port}/health" ), name="health", - ).respond(json=dict(is_healthy=True, error=None)) + ).respond(json={"is_healthy": True, "error": None}) mock.post( get_url(service_endpoint, "/v1/containers:down"), name="begin_service_destruction", @@ -91,11 +93,11 @@ async def _assert_get_dynamic_services_mocked( expected_status: str, ) -> AsyncGenerator[RunningDynamicServiceDetails, None]: with _mock_containers_docker_status(scheduler_data): - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) # put mocked data scheduler_data.dynamic_sidecar.containers_inspect = [ DockerContainerInspect.from_container( - dict(State=dict(Status=expected_status), Name="", Id="") + {"State": {"Status": expected_status}, "Name": "", "Id": ""} ) ] @@ -104,81 +106,71 @@ async def _assert_get_dynamic_services_mocked( yield stack_status - await scheduler.mark_service_for_removal(scheduler_data.node_uuid, True) - assert scheduler_data.service_name in scheduler._scheduler._to_observe - await scheduler._scheduler.remove_service_from_observation( + await scheduler.mark_service_for_removal( + scheduler_data.node_uuid, can_save=True + ) + assert ( + scheduler_data.service_name + in scheduler.scheduler._to_observe # noqa: SLF001 + ) + await scheduler.scheduler.remove_service_from_observation( scheduler_data.node_uuid ) - assert scheduler_data.service_name not in scheduler._scheduler._to_observe + assert ( + scheduler_data.service_name + not in scheduler.scheduler._to_observe # noqa: SLF001 + ) @pytest.fixture def mock_env( + mock_exclusive: None, + disable_postgres: None, disable_rabbitmq: None, mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, simcore_services_network_name: str, mock_docker_api: None, + faker: Faker, ) -> None: monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", simcore_services_network_name) monkeypatch.setenv("DIRECTOR_HOST", "mocked_out") monkeypatch.setenv( - "DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS", - str(TEST_SCHEDULER_INTERVAL_SECONDS), + "DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL", f"{_TEST_SCHEDULER_INTERVAL_SECONDS}" ) monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "false") - monkeypatch.setenv("POSTGRES_HOST", "test") - monkeypatch.setenv("POSTGRES_USER", "test") - monkeypatch.setenv("POSTGRES_PASSWORD", "test") - monkeypatch.setenv("POSTGRES_DB", "test") - - -@pytest.fixture -def mocked_director_v0( - minimal_config: AppSettings, scheduler_data: SchedulerData -) -> Iterator[MockRouter]: - endpoint = minimal_config.DIRECTOR_V0.endpoint - - with respx.mock as mock: - mock.get( - re.compile( - rf"^{endpoint}/services/{urllib.parse.quote_plus(scheduler_data.key)}/{scheduler_data.version}/labels" - ), - name="service labels", - ).respond( - json={"data": SimcoreServiceLabels.Config.schema_extra["examples"][0]} - ) - yield mock + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) @pytest.fixture -def mocked_dynamic_scheduler_events() -> None: +def mocked_dynamic_scheduler_events(mocker: MockerFixture) -> None: class AlwaysTriggersDynamicSchedulerEvent(DynamicSchedulerEvent): @classmethod async def will_trigger( - cls, app: FastAPI, scheduler_data: SchedulerData + cls, + app: FastAPI, # noqa: ARG003 + scheduler_data: SchedulerData, # noqa: ARG003 ) -> bool: return True @classmethod - async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: + async def action( + cls, + app: FastAPI, # noqa: ARG003 + scheduler_data: SchedulerData, # noqa: ARG003 + ) -> None: message = f"{cls.__name__} action triggered" log.warning(message) - test_defined_scheduler_events: list[type[DynamicSchedulerEvent]] = [ - AlwaysTriggersDynamicSchedulerEvent - ] - # replace REGISTERED EVENTS - REGISTERED_EVENTS.clear() - for event in test_defined_scheduler_events: - REGISTERED_EVENTS.append(event) + mocker.patch( + "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer.REGISTERED_EVENTS", + [AlwaysTriggersDynamicSchedulerEvent], + ) @pytest.fixture @@ -196,7 +188,7 @@ def mocked_api_client(scheduler_data: SchedulerData) -> Iterator[MockRouter]: service_endpoint = scheduler_data.endpoint with respx.mock as mock: mock.get(get_url(service_endpoint, "/health"), name="is_healthy").respond( - json=dict(is_healthy=True) + json={"is_healthy": True} ) mock.post( get_url(service_endpoint, "/v1/containers:down"), @@ -207,30 +199,20 @@ def mocked_api_client(scheduler_data: SchedulerData) -> Iterator[MockRouter]: @pytest.fixture -def mock_service_running(mock_docker_api, mocker: MockerFixture) -> Iterator[AsyncMock]: - mock = mocker.patch( - "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler.get_dynamic_sidecar_state", +def mock_service_running(mock_docker_api, mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler_utils.get_dynamic_sidecar_state", return_value=(ServiceState.RUNNING, ""), ) - yield mock - @pytest.fixture -def mock_update_label(mocker: MockerFixture) -> Iterator[None]: +def mock_update_label(mocker: MockerFixture) -> None: mocker.patch( "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler.update_scheduler_data_label", return_value=None, ) - yield None - - -@pytest.fixture -def mock_max_status_api_duration(monkeypatch: MonkeyPatch) -> Iterator[None]: - monkeypatch.setenv("DYNAMIC_SIDECAR_STATUS_API_TIMEOUT_S", "0.0001") - yield - @pytest.fixture def disabled_scheduler_background_task(mocker: MockerFixture): @@ -244,10 +226,10 @@ def disabled_scheduler_background_task(mocker: MockerFixture): async def manually_trigger_scheduler( scheduler: DynamicSidecarsScheduler, scheduler_data: SchedulerData ) -> Callable[[], Awaitable[None]]: - async def _triggerer() -> None: + async def _coroutine() -> None: await _apply_observation_cycle(scheduler, scheduler_data) - return _triggerer + return _coroutine @pytest.mark.parametrize("with_observation_cycle", [True, False]) @@ -262,22 +244,28 @@ async def test_scheduler_add_remove( mock_update_label: None, with_observation_cycle: bool, ) -> None: - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) if with_observation_cycle: await manually_trigger_scheduler() - await scheduler.mark_service_for_removal(scheduler_data.node_uuid, True) + await scheduler.mark_service_for_removal(scheduler_data.node_uuid, can_save=True) if with_observation_cycle: await manually_trigger_scheduler() - assert scheduler_data.service_name in scheduler._scheduler._to_observe + assert ( + scheduler_data.service_name in scheduler.scheduler._to_observe # noqa: SLF001 + ) - await scheduler._scheduler.remove_service_from_observation(scheduler_data.node_uuid) + await scheduler.scheduler.remove_service_from_observation(scheduler_data.node_uuid) if with_observation_cycle: await manually_trigger_scheduler() - assert scheduler_data.service_name not in scheduler._scheduler._to_observe + assert ( + scheduler_data.service_name + not in scheduler.scheduler._to_observe # noqa: SLF001 + ) +@pytest.mark.flaky(max_runs=3) async def test_scheduler_removes_partially_started_services( disabled_scheduler_background_task: None, manually_trigger_scheduler: Callable[[], Awaitable[None]], @@ -287,7 +275,7 @@ async def test_scheduler_removes_partially_started_services( mock_docker_api: None, ) -> None: await manually_trigger_scheduler() - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) scheduler_data.dynamic_sidecar.were_containers_created = True await manually_trigger_scheduler() @@ -301,7 +289,7 @@ async def test_scheduler_is_failing( mocked_dynamic_scheduler_events: None, ) -> None: await manually_trigger_scheduler() - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) scheduler_data.dynamic_sidecar.status.current = DynamicSidecarStatus.FAILING await manually_trigger_scheduler() @@ -312,12 +300,10 @@ async def test_scheduler_health_timing_out( manually_trigger_scheduler: Callable[[], Awaitable[None]], scheduler: DynamicSidecarsScheduler, scheduler_data: SchedulerData, - mock_max_status_api_duration: None, mocked_dynamic_scheduler_events: None, ): - await manually_trigger_scheduler() - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) await manually_trigger_scheduler() assert scheduler_data.dynamic_sidecar.is_ready is False @@ -328,9 +314,11 @@ async def test_adding_service_two_times_does_not_raise( scheduler_data: SchedulerData, mocked_dynamic_scheduler_events: None, ): - await scheduler._scheduler._add_service(scheduler_data) - assert scheduler_data.service_name in scheduler._scheduler._to_observe - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) + assert ( + scheduler_data.service_name in scheduler.scheduler._to_observe # noqa: SLF001 + ) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) async def test_collition_at_global_level_raises( @@ -339,11 +327,11 @@ async def test_collition_at_global_level_raises( mocked_dynamic_scheduler_events: None, mock_docker_api: None, ): - scheduler._scheduler._inverse_search_mapping[ + scheduler.scheduler._inverse_search_mapping[ # noqa: SLF001 scheduler_data.node_uuid - ] = "mock_service_name" + ] = ServiceName("mock_service_name") with pytest.raises(DynamicSidecarError) as execinfo: - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) assert "collide" in str(execinfo.value) @@ -354,7 +342,9 @@ async def test_remove_missing_no_error( mock_docker_api: None, ) -> None: with pytest.raises(DynamicSidecarNotFoundError) as execinfo: - await scheduler.mark_service_for_removal(scheduler_data.node_uuid, True) + await scheduler.mark_service_for_removal( + scheduler_data.node_uuid, can_save=True + ) assert "not found" in str(execinfo.value) @@ -367,10 +357,10 @@ async def test_get_stack_status( mock_docker_api: None, ) -> None: await manually_trigger_scheduler() - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) stack_status = await scheduler.get_stack_status(scheduler_data.node_uuid) - assert stack_status == RunningDynamicServiceDetails.from_scheduler_data( + assert stack_status == create_model_from_scheduler_data( node_uuid=scheduler_data.node_uuid, scheduler_data=scheduler_data, service_state=ServiceState.PENDING, @@ -384,9 +374,10 @@ async def test_get_stack_status_missing( mocked_dynamic_scheduler_events: None, mock_docker_api: None, ) -> None: - with pytest.raises(DynamicSidecarNotFoundError) as execinfo: + with pytest.raises( + DynamicSidecarNotFoundError, match=rf"{scheduler_data.node_uuid} not found" + ): await scheduler.get_stack_status(scheduler_data.node_uuid) - assert f"{scheduler_data.node_uuid} not found" in str(execinfo) async def test_get_stack_status_failing_sidecar( @@ -398,10 +389,10 @@ async def test_get_stack_status_failing_sidecar( failing_message = "some_failing_message" scheduler_data.dynamic_sidecar.status.update_failing_status(failing_message) - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) stack_status = await scheduler.get_stack_status(scheduler_data.node_uuid) - assert stack_status == RunningDynamicServiceDetails.from_scheduler_data( + assert stack_status == create_model_from_scheduler_data( node_uuid=scheduler_data.node_uuid, scheduler_data=scheduler_data, service_state=ServiceState.FAILED, @@ -420,7 +411,7 @@ async def test_get_stack_status_containers_are_starting( async with _assert_get_dynamic_services_mocked( scheduler, scheduler_data, mock_service_running, expected_status="created" ) as stack_status: - assert stack_status == RunningDynamicServiceDetails.from_scheduler_data( + assert stack_status == create_model_from_scheduler_data( node_uuid=scheduler_data.node_uuid, scheduler_data=scheduler_data, service_state=ServiceState.STARTING, @@ -439,7 +430,7 @@ async def test_get_stack_status_ok( async with _assert_get_dynamic_services_mocked( scheduler, scheduler_data, mock_service_running, expected_status="running" ) as stack_status: - assert stack_status == RunningDynamicServiceDetails.from_scheduler_data( + assert stack_status == create_model_from_scheduler_data( node_uuid=scheduler_data.node_uuid, scheduler_data=scheduler_data, service_state=ServiceState.RUNNING, @@ -456,17 +447,17 @@ def mocked_app() -> AsyncMock: async def test_regression_remove_service_from_observation( mocked_app: AsyncMock, faker: Faker, - caplog_debug_level: LogCaptureFixture, + caplog_debug_level: pytest.LogCaptureFixture, missing_to_observe_entry: bool, ): scheduler = Scheduler(mocked_app) # emulate service was previously added node_uuid = faker.uuid4(cast_to=None) - service_name = f"service_{node_uuid}" - scheduler._inverse_search_mapping[node_uuid] = service_name + service_name = ServiceName(f"service_{node_uuid}") + scheduler._inverse_search_mapping[node_uuid] = service_name # noqa: SLF001 if not missing_to_observe_entry: - scheduler._to_observe[service_name] = AsyncMock() + scheduler._to_observe[service_name] = AsyncMock() # noqa: SLF001 await scheduler.remove_service_from_observation(node_uuid) # check log message @@ -474,3 +465,49 @@ async def test_regression_remove_service_from_observation( if missing_to_observe_entry: assert f"Unexpected: '{service_name}' not found in" in caplog_debug_level.text + + +@pytest.mark.parametrize("call_count", [1, 10]) +async def test_mark_all_services_in_wallet_for_removal( + disabled_scheduler_background_task: None, + scheduler: DynamicSidecarsScheduler, + scheduler_data: SchedulerData, + mocked_dynamic_scheduler_events: None, + faker: Faker, + call_count: int, +) -> None: + for wallet_id in [WalletID(1), WalletID(2)]: + for _ in range(2): + new_scheduler_data = scheduler_data.model_copy(deep=True) + new_scheduler_data.node_uuid = faker.uuid4(cast_to=None) + new_scheduler_data.service_name = ServiceName( + f"fake_{new_scheduler_data.node_uuid}" + ) + assert new_scheduler_data.wallet_info + new_scheduler_data.wallet_info.wallet_id = wallet_id + + await scheduler.scheduler.add_service_from_scheduler_data( + new_scheduler_data + ) + + assert len(scheduler.scheduler._to_observe) == 4 # noqa: SLF001 + # pylint: disable=redefined-argument-from-local + for scheduler_data in scheduler.scheduler._to_observe.values(): # noqa: SLF001 + assert scheduler_data.dynamic_sidecar.service_removal_state.can_remove is False + + for _ in range(call_count): + await scheduler.scheduler.mark_all_services_in_wallet_for_removal( + wallet_id=WalletID(1) + ) + + for scheduler_data in scheduler.scheduler._to_observe.values(): # noqa: SLF001 + assert scheduler_data.wallet_info + wallet_id = scheduler_data.wallet_info.wallet_id + can_remove = scheduler_data.dynamic_sidecar.service_removal_state.can_remove + match wallet_id: + case 1: + assert can_remove is True + case 2: + assert can_remove is False + case _: + pytest.fail("unexpected case") diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler__core__scheduler_utils.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler__core__scheduler_utils.py new file mode 100644 index 00000000000..2a004a29bbb --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler__core__scheduler_utils.py @@ -0,0 +1,34 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import pytest +from simcore_service_director_v2.models.dynamic_services_scheduler import ( + DynamicSidecarStatus, + SchedulerData, +) +from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._scheduler_utils import ( + LOG_MSG_MANUAL_INTERVENTION, + service_awaits_manual_interventions, +) + + +@pytest.fixture +def scheduler_data_manual_intervention(scheduler_data: SchedulerData) -> SchedulerData: + scheduler_data.dynamic_sidecar.status.current = DynamicSidecarStatus.FAILING + scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error = True + return scheduler_data + + +async def test_service_awaits_manual_interventions_logs_once( + caplog: pytest.LogCaptureFixture, scheduler_data_manual_intervention: SchedulerData +): + caplog.clear() + + for _ in range(10): + await service_awaits_manual_interventions(scheduler_data_manual_intervention) + + # message is only logged once + assert caplog.text.count(LOG_MSG_MANUAL_INTERVENTION) == 1 diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_core_events.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_core_events.py index d067bcb9ed5..2bd05535e6a 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_core_events.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_core_events.py @@ -2,23 +2,24 @@ # pylint:disable=unused-argument import asyncio +import logging +from collections.abc import Iterable from typing import Final import pytest +from faker import Faker from fastapi import FastAPI from pydantic import PositiveFloat, PositiveInt -from pytest import LogCaptureFixture, MonkeyPatch from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict -from simcore_service_director_v2.models.schemas.dynamic_services import SchedulerData -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( +from servicelib.exception_utils import _SKIPS_MESSAGE +from servicelib.fastapi.http_client_thin import BaseHttpClientError +from simcore_service_director_v2.models.dynamic_services_scheduler import ( ContainerState, DockerContainerInspect, DockerStatus, -) -from simcore_service_director_v2.modules.dynamic_sidecar.api_client import ( - BaseClientHTTPError, + SchedulerData, ) from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core import _events @@ -30,17 +31,19 @@ @pytest.fixture def mock_env( + disable_postgres: None, mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, ) -> None: setenvs_from_dict( monkeypatch, { - "S3_ENDPOINT": "", - "S3_ACCESS_KEY": "", - "S3_SECRET_KEY": "", - "S3_BUCKET_NAME": "", - "S3_SECURE": "false", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), "POSTGRES_HOST": "", "POSTGRES_USER": "", "POSTGRES_PASSWORD": "", @@ -51,33 +54,37 @@ def mock_env( @pytest.fixture -def mock_dynamic_sidecar_client_always_fail(mocker: MockerFixture) -> None: +def mock_sidecars_client_always_fail(mocker: MockerFixture) -> None: class MockedObj: @classmethod async def containers_inspect(cls, *args, **kwargs) -> None: - raise BaseClientHTTPError("will always fail") + _ = args + _ = kwargs + raise BaseHttpClientError(message="will always fail") - mocker.patch.object(_events, "get_dynamic_sidecar_client", return_value=MockedObj()) + mocker.patch.object(_events, "get_sidecars_client", return_value=MockedObj()) @pytest.fixture -def mock_dynamic_sidecar_client_stops_failing(mocker: MockerFixture) -> None: +def mock_sidecars_client_stops_failing(mocker: MockerFixture) -> None: class MockedObj: def __init__(self) -> None: self.counter = 0 async def containers_inspect(self, *args, **kwargs) -> None: + _ = args + _ = kwargs self.counter += 1 if self.counter < STEPS / 2: - raise BaseClientHTTPError("will always fail") + raise BaseHttpClientError(message="will always fail") - mocker.patch.object(_events, "get_dynamic_sidecar_client", return_value=MockedObj()) + mocker.patch.object(_events, "get_sidecars_client", return_value=MockedObj()) @pytest.fixture def docker_container_inspect() -> DockerContainerInspect: return DockerContainerInspect( - status=DockerStatus.dead, container_state=ContainerState(**{}), name="", id="" + status=DockerStatus.dead, container_state=ContainerState(), name="", id="" ) @@ -89,30 +96,39 @@ def scheduler_data( return scheduler_data -_CHECK_LOG_EXCEPTION_IS_SKIPPED = "skip(s) of exception" +@pytest.fixture() +def caplog_debug( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: + with caplog.at_level( + logging.DEBUG, + ): + yield caplog async def test_event_get_status_network_connectivity( - mock_dynamic_sidecar_client_always_fail: None, + mock_sidecars_client_always_fail: None, minimal_app: FastAPI, scheduler_data: SchedulerData, - caplog_info_level: LogCaptureFixture, + caplog_debug: pytest.LogCaptureFixture, ): - with pytest.raises(BaseClientHTTPError): + caplog_debug.clear() + with pytest.raises(BaseHttpClientError): # noqa: PT012 for _ in range(REPEAT_COUNT): await _events.GetStatus.action(minimal_app, scheduler_data) await asyncio.sleep(SLEEP_BETWEEN_CALLS) - assert caplog_info_level.text.count(_CHECK_LOG_EXCEPTION_IS_SKIPPED) > 1 + assert caplog_debug.text.count(_SKIPS_MESSAGE) > 1 async def test_event_get_status_recovers_after_error( - mock_dynamic_sidecar_client_stops_failing: None, + mock_sidecars_client_stops_failing: None, minimal_app: FastAPI, scheduler_data: SchedulerData, - caplog_info_level: LogCaptureFixture, + caplog_debug: pytest.LogCaptureFixture, ): + caplog_debug.clear() for _ in range(REPEAT_COUNT): await _events.GetStatus.action(minimal_app, scheduler_data) await asyncio.sleep(SLEEP_BETWEEN_CALLS) - assert caplog_info_level.text.count(_CHECK_LOG_EXCEPTION_IS_SKIPPED) >= 1 + assert caplog_debug.text.count(_SKIPS_MESSAGE) >= 1 diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_task.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_task.py index 4a90258387d..fd328bd66aa 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_task.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_scheduler_task.py @@ -4,24 +4,25 @@ import asyncio import re +from collections.abc import Iterator from dataclasses import dataclass -from typing import Final, Iterator +from typing import Final from unittest.mock import AsyncMock import httpx import pytest import respx +from faker import Faker from fastapi import FastAPI -from pytest import FixtureRequest, MonkeyPatch +from models_library.docker import DockerNodeID +from pydantic import TypeAdapter from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict from respx.router import MockRouter -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - SchedulerData, -) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import ( - DynamicSidecarClient, + SidecarsClient, ) from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core import ( _events_utils, @@ -42,18 +43,21 @@ @pytest.fixture def mock_env( + disable_postgres: None, disable_rabbitmq: None, mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, simcore_services_network_name: str, docker_swarm: None, mock_docker_api: None, + faker: Faker, ) -> None: disabled_services_envs = { - "S3_ENDPOINT": "", - "S3_ACCESS_KEY": "", - "S3_SECRET_KEY": "", - "S3_BUCKET_NAME": "", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), "POSTGRES_HOST": "", "POSTGRES_USER": "", "POSTGRES_PASSWORD": "", @@ -63,14 +67,16 @@ def mock_env( monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") monkeypatch.setenv( - "DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL_SECONDS", + "DIRECTOR_V2_DYNAMIC_SCHEDULER_INTERVAL", f"{SCHEDULER_INTERVAL_SECONDS}", ) @pytest.fixture def scheduler_data(scheduler_data_from_http_request: SchedulerData) -> SchedulerData: - scheduler_data_from_http_request.docker_node_id = "test_docker_node_id" + scheduler_data_from_http_request.dynamic_sidecar.docker_node_id = TypeAdapter( + DockerNodeID + ).validate_python("testdockernodeid") return scheduler_data_from_http_request @@ -87,17 +93,17 @@ def mock_containers_docker_status( name="containers_docker_status", ).mock(httpx.Response(200, json={})) mock.get(f"{service_endpoint}/health", name="is_healthy").respond( - json=dict(is_healthy=True) + json={"is_healthy": True} ) yield mock @pytest.fixture -def mock_dynamic_sidecar_client(mocker: MockerFixture) -> None: - mocker.patch.object(DynamicSidecarClient, "push_service_output_ports") - mocker.patch.object(DynamicSidecarClient, "save_service_state") - mocker.patch.object(DynamicSidecarClient, "stop_service") +def mock_sidecars_client(mocker: MockerFixture) -> None: + mocker.patch.object(SidecarsClient, "push_service_output_ports") + mocker.patch.object(SidecarsClient, "save_service_state") + mocker.patch.object(SidecarsClient, "stop_service") @pytest.fixture @@ -113,7 +119,7 @@ async def _return_false(*args, **kwargs) -> bool: @pytest.fixture def scheduler( mock_containers_docker_status: MockRouter, - mock_dynamic_sidecar_client: None, + mock_sidecars_client: None, mock_is_dynamic_sidecar_stack_missing: None, minimal_app: FastAPI, ) -> DynamicSidecarsScheduler: @@ -130,7 +136,7 @@ def increment(self) -> None: @pytest.fixture(params=[True, False]) -def error_raised_by_saving_state(request: FixtureRequest) -> bool: +def error_raised_by_saving_state(request: pytest.FixtureRequest) -> bool: return request.param # type: ignore @@ -193,7 +199,8 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error = ( use_case.wait_for_manual_intervention_after_error ) - raise RuntimeError("Failed as planned") + msg = "Failed as planned" + raise RuntimeError(msg) test_defined_scheduler_events: list[type[DynamicSchedulerEvent]] = [ AlwaysTriggersDynamicSchedulerEvent @@ -208,12 +215,14 @@ async def action(cls, app: FastAPI, scheduler_data: SchedulerData) -> None: @pytest.fixture -def mock_remove_calls(mocker: MockerFixture) -> None: - mocker.patch.object(_events_utils, "remove_volumes_from_node") +def mock_rpc_calls(mocker: MockerFixture, minimal_app: FastAPI) -> None: + minimal_app.state.rabbitmq_rpc_client = AsyncMock() + mocker.patch.object(_events_utils, "remove_volumes_without_backup_for_service") + mocker.patch.object(_events_utils, "force_container_cleanup") @pytest.fixture(params=[True, False]) -def node_present_in_db(request: FixtureRequest) -> bool: +def node_present_in_db(request: pytest.FixtureRequest) -> bool: return request.param @@ -229,6 +238,7 @@ def mock_projects_repository(mocker: MockerFixture, node_present_in_db: bool) -> async def test_skip_observation_cycle_after_error( + mock_exclusive: None, docker_swarm: None, minimal_app: FastAPI, mock_projects_repository: None, @@ -237,14 +247,15 @@ async def test_skip_observation_cycle_after_error( mocked_dynamic_scheduler_events: ACounter, error_raised_by_saving_state: bool, use_case: UseCase, - mock_remove_calls: None, + mock_rpc_calls: None, ): + # add a task, emulate an error make sure no observation cycle is # being triggered again assert mocked_dynamic_scheduler_events.count == 0 - await scheduler._scheduler._add_service(scheduler_data) + await scheduler.scheduler.add_service_from_scheduler_data(scheduler_data) # check it is being tracked - assert scheduler_data.node_uuid in scheduler._scheduler._inverse_search_mapping + assert scheduler_data.node_uuid in scheduler.scheduler._inverse_search_mapping # ensure observation cycle triggers a lot await asyncio.sleep(SCHEDULER_INTERVAL_SECONDS * 10) @@ -257,13 +268,13 @@ async def test_skip_observation_cycle_after_error( if use_case.outcome_service_removed: assert ( scheduler_data.node_uuid - not in scheduler._scheduler._inverse_search_mapping + not in scheduler.scheduler._inverse_search_mapping ) else: assert ( - scheduler_data.node_uuid in scheduler._scheduler._inverse_search_mapping + scheduler_data.node_uuid in scheduler.scheduler._inverse_search_mapping ) else: assert ( - scheduler_data.node_uuid not in scheduler._scheduler._inverse_search_mapping + scheduler_data.node_uuid not in scheduler.scheduler._inverse_search_mapping ) diff --git a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_volumes_resolver.py b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_volumes_resolver.py index 78d7b5fb526..4acacd3a4e4 100644 --- a/services/director-v2/tests/unit/test_modules_dynamic_sidecar_volumes_resolver.py +++ b/services/director-v2/tests/unit/test_modules_dynamic_sidecar_volumes_resolver.py @@ -9,8 +9,11 @@ import aiodocker import pytest from faker import Faker +from models_library.api_schemas_directorv2.services import ( + CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME, +) from models_library.projects import ProjectID -from models_library.services import RunID +from models_library.services import ServiceRunID from models_library.users import UserID from simcore_service_director_v2.modules.dynamic_sidecar.volumes import ( DynamicSidecarVolumesPathsResolver, @@ -33,8 +36,8 @@ def state_paths() -> list[Path]: @pytest.fixture -def run_id(faker: Faker) -> RunID: - return faker.uuid4(cast_to=None) +def service_run_id() -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_dynamic() @pytest.fixture @@ -42,16 +45,11 @@ def project_id(faker: Faker) -> ProjectID: return faker.uuid4(cast_to=None) -@pytest.fixture -def user_id() -> UserID: - return 42 - - @pytest.fixture def expected_volume_config( swarm_stack_name: str, node_uuid: UUID, - run_id: RunID, + service_run_id: ServiceRunID, project_id: ProjectID, user_id: UserID, ) -> Callable[[str, str], dict[str, Any]]: @@ -61,14 +59,15 @@ def _callable(source: str, target: str) -> dict[str, Any]: "Target": target, "Type": "volume", "VolumeOptions": { + "DriverConfig": None, "Labels": { "source": source, - "run_id": f"{run_id}", + "run_id": service_run_id, "study_id": f"{project_id}", "user_id": f"{user_id}", "swarm_stack_name": swarm_stack_name, "node_uuid": f"{node_uuid}", - } + }, }, } @@ -80,7 +79,7 @@ def test_expected_paths( node_uuid: UUID, state_paths: list[Path], expected_volume_config: Callable[[str, str], dict[str, Any]], - run_id: RunID, + service_run_id: ServiceRunID, project_id: ProjectID, user_id: UserID, ) -> None: @@ -88,26 +87,38 @@ def test_expected_paths( inputs_path = Path(fake.file_path(depth=3)).parent assert DynamicSidecarVolumesPathsResolver.mount_entry( - swarm_stack_name, inputs_path, node_uuid, run_id, project_id, user_id + swarm_stack_name, + inputs_path, + node_uuid, + service_run_id, + project_id, + user_id, + None, ) == expected_volume_config( - source=f"dyv_{run_id}_{node_uuid}_{f'{inputs_path}'.replace('/', '_')[::-1]}", + source=f"dyv_{service_run_id}_{node_uuid}_{f'{inputs_path}'.replace('/', '_')[::-1]}", target=str(Path("/dy-volumes") / inputs_path.relative_to("/")), ) outputs_path = Path(fake.file_path(depth=3)).parent assert DynamicSidecarVolumesPathsResolver.mount_entry( - swarm_stack_name, outputs_path, node_uuid, run_id, project_id, user_id + swarm_stack_name, + outputs_path, + node_uuid, + service_run_id, + project_id, + user_id, + None, ) == expected_volume_config( - source=f"dyv_{run_id}_{node_uuid}_{f'{outputs_path}'.replace('/', '_')[::-1]}", + source=f"dyv_{service_run_id}_{node_uuid}_{f'{outputs_path}'.replace('/', '_')[::-1]}", target=str(Path("/dy-volumes") / outputs_path.relative_to("/")), ) for path in state_paths: name_from_path = f"{path}".replace(os.sep, "_")[::-1] assert DynamicSidecarVolumesPathsResolver.mount_entry( - swarm_stack_name, path, node_uuid, run_id, project_id, user_id + swarm_stack_name, path, node_uuid, service_run_id, project_id, user_id, None ) == expected_volume_config( - source=f"dyv_{run_id}_{node_uuid}_{name_from_path}", + source=f"dyv_{service_run_id}_{node_uuid}_{name_from_path}", target=str(Path("/dy-volumes/") / path.relative_to("/")), ) @@ -131,7 +142,7 @@ async def test_unique_name_creation_and_removal(faker: Faker): unique_volume_name = DynamicSidecarVolumesPathsResolver.source( path=Path("/some/random/path/to/a/workspace/folder"), node_uuid=faker.uuid4(cast_to=None), - run_id=faker.uuid4(cast_to=None), + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_dynamic(), ) await assert_creation_and_removal(unique_volume_name) @@ -139,15 +150,20 @@ async def test_unique_name_creation_and_removal(faker: Faker): def test_volumes_get_truncated_as_expected(faker: Faker): node_uuid = faker.uuid4(cast_to=None) - run_id = faker.uuid4(cast_to=None) - assert node_uuid != run_id + service_run_id = ServiceRunID.get_resource_tracking_run_id_for_dynamic() + assert node_uuid != service_run_id unique_volume_name = DynamicSidecarVolumesPathsResolver.source( path=Path( - f"/home/user/a-{'-'.join(['very' for _ in range(31)])}-long-home-path/workspace" + f"/home/user/a-{'-'.join(['very' for _ in range(34)])}-long-home-path/workspace" ), node_uuid=node_uuid, - run_id=run_id, + service_run_id=service_run_id, ) + + # if below fails the agent will have issues please check + constant_part = unique_volume_name[: CHARS_IN_VOLUME_NAME_BEFORE_DIR_NAME - 1] + assert constant_part == f"dyv_{service_run_id}_{node_uuid}" + assert len(unique_volume_name) == 255 - assert f"{run_id}" in unique_volume_name + assert f"{service_run_id}" in unique_volume_name assert f"{node_uuid}" in unique_volume_name diff --git a/services/director-v2/tests/unit/test_modules_instrumentation__utils.py b/services/director-v2/tests/unit/test_modules_instrumentation__utils.py new file mode 100644 index 00000000000..8eab58bdec5 --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_instrumentation__utils.py @@ -0,0 +1,10 @@ +import time + +from simcore_service_director_v2.modules.instrumentation._utils import track_duration + + +def test_track_duration(): + with track_duration() as duration: + time.sleep(0.1) + + assert duration.to_float() > 0.1 diff --git a/services/director-v2/tests/unit/test_modules_notifier.py b/services/director-v2/tests/unit/test_modules_notifier.py new file mode 100644 index 00000000000..357edc68af8 --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_notifier.py @@ -0,0 +1,182 @@ +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +from collections.abc import AsyncIterable, Callable +from contextlib import AsyncExitStack, _AsyncGeneratorContextManager +from unittest.mock import AsyncMock + +import pytest +import socketio +from faker import Faker +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_directorv2.notifications import ServiceNoMoreCredits +from models_library.api_schemas_directorv2.socketio import ( + SOCKET_IO_SERVICE_NO_MORE_CREDITS_EVENT, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import NonNegativeInt +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.utils import logged_gather +from settings_library.rabbit import RabbitSettings +from simcore_service_director_v2.core.settings import AppSettings +from simcore_service_director_v2.modules.notifier import ( + publish_shutdown_no_more_credits, +) +from socketio import AsyncServer +from tenacity import AsyncRetrying +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def disable_modules_setup(mock_exclusive: None, mocker: MockerFixture) -> None: + module_base = "simcore_service_director_v2.core.application" + mocker.patch(f"{module_base}.db.setup", autospec=True, return_value=False) + mocker.patch( + f"{module_base}.resource_usage_tracker_client.setup", + autospec=True, + return_value=False, + ) + + +@pytest.fixture +def mock_env( + disable_modules_setup: None, + monkeypatch: pytest.MonkeyPatch, + mock_env: EnvVarsDict, + rabbit_service: RabbitSettings, + faker: Faker, +) -> EnvVarsDict: + setenvs_from_dict( + monkeypatch, + { + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + "DIRECTOR_ENABLED": "0", + "DIRECTOR_V2_CATALOG": "null", + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": "0", + "COMPUTATIONAL_BACKEND_ENABLED": "0", + "DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "1", + }, + ) + return mock_env + + +@pytest.fixture +async def socketio_server( + initialized_app: FastAPI, + socketio_server_factory: Callable[ + [RabbitSettings], _AsyncGeneratorContextManager[AsyncServer] + ], +) -> AsyncIterable[AsyncServer]: + # Same configuration as simcore_service_webserver/socketio/server.py + settings: AppSettings = initialized_app.state.settings + assert settings.DIRECTOR_V2_RABBITMQ + + async with socketio_server_factory(settings.DIRECTOR_V2_RABBITMQ) as server: + yield server + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def room_name(user_id: UserID) -> SocketIORoomStr: + return SocketIORoomStr.from_user_id(user_id) + + +@pytest.fixture +def wallet_id(faker: Faker) -> WalletID: + return faker.pyint() + + +def _get_on_no_more_credits_event( + socketio_client: socketio.AsyncClient, +) -> AsyncMock: + # emulates front-end receiving message + + async def on_no_more_credits(data): + assert ServiceNoMoreCredits.model_validate(data) is not None + + on_event_spy = AsyncMock(wraps=on_no_more_credits) + socketio_client.on(SOCKET_IO_SERVICE_NO_MORE_CREDITS_EVENT, on_event_spy) + + return on_event_spy + + +async def _assert_call_count(mock: AsyncMock, *, call_count: int) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), stop=stop_after_attempt(500), reraise=True + ): + with attempt: + assert mock.call_count == call_count + + +async def test_notifier_publish_message( + socketio_server_events: dict[str, AsyncMock], + initialized_app: FastAPI, + user_id: UserID, + node_id: NodeID, + wallet_id: WalletID, + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], +): + # web server spy events + server_connect = socketio_server_events["connect"] + server_disconnect = socketio_server_events["disconnect"] + server_on_check = socketio_server_events["on_check"] + + number_of_clients: NonNegativeInt = 10 + async with AsyncExitStack() as socketio_frontend_clients: + frontend_clients: list[socketio.AsyncClient] = await logged_gather( + *[ + socketio_frontend_clients.enter_async_context(socketio_client_factory()) + for _ in range(number_of_clients) + ] + ) + await _assert_call_count(server_connect, call_count=number_of_clients) + + # client emits and check it was received + await logged_gather( + *[ + frontend_client.emit("check", data="an_event") + for frontend_client in frontend_clients + ] + ) + await _assert_call_count(server_on_check, call_count=number_of_clients) + + # attach spy to client + no_no_more_credits_events: list[AsyncMock] = [ + _get_on_no_more_credits_event(c) for c in frontend_clients + ] + + # server publishes a message + await publish_shutdown_no_more_credits( + initialized_app, user_id=user_id, node_id=node_id, wallet_id=wallet_id + ) + + # check that all clients received it + for on_no_more_credits_event in no_no_more_credits_events: + await _assert_call_count(on_no_more_credits_event, call_count=1) + on_no_more_credits_event.assert_awaited_once_with( + jsonable_encoder( + ServiceNoMoreCredits(node_id=node_id, wallet_id=wallet_id) + ) + ) + + await _assert_call_count(server_disconnect, call_count=number_of_clients * 2) diff --git a/services/director-v2/tests/unit/test_modules_osparc_variables.py b/services/director-v2/tests/unit/test_modules_osparc_variables.py new file mode 100644 index 00000000000..605cb32cb83 --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_osparc_variables.py @@ -0,0 +1,309 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +import json +from collections.abc import AsyncIterable +from contextlib import asynccontextmanager +from copy import deepcopy +from unittest.mock import AsyncMock, Mock + +import pytest +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from models_library.service_settings_labels import ComposeSpecLabelDict +from models_library.services import ServiceKey, ServiceVersion +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.utils.specs_substitution import SubstitutionValue +from models_library.utils.string_substitution import OSPARC_IDENTIFIER_PREFIX +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.faker_compose_specs import generate_fake_docker_compose +from simcore_postgres_database.models.services_environments import VENDOR_SECRET_PREFIX +from simcore_postgres_database.models.users import UserRole +from simcore_service_director_v2.api.dependencies.database import RepoType +from simcore_service_director_v2.modules.osparc_variables import substitutions +from simcore_service_director_v2.modules.osparc_variables.substitutions import ( + _NEW_ENVIRONMENTS, + OsparcSessionVariablesTable, + auto_inject_environments, + resolve_and_substitute_session_variables_in_specs, + substitute_vendor_secrets_in_specs, +) +from simcore_service_director_v2.utils.osparc_variables import ( + ContextDict, + OsparcVariablesTable, + factory_context_getter, + factory_handler, + resolve_variables_from_context, +) + + +@pytest.fixture +def session_context(faker: Faker) -> ContextDict: + return ContextDict( + app=FastAPI(), + service_key=TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/foo" + ), + service_version=TypeAdapter(ServiceVersion).validate_python("1.2.3"), + compose_spec=generate_fake_docker_compose(faker), + product_name=faker.word(), + project_id=faker.uuid4(), + user_id=faker.pyint(), + node_id=faker.uuid4(), + ) + + +@pytest.mark.acceptance_test() +async def test_resolve_session_environs(faker: Faker, session_context: ContextDict): + async def _request_user_role(app: FastAPI, user_id: UserID) -> SubstitutionValue: + print(app, user_id) + await asyncio.sleep(1) + return faker.random_element(elements=list(UserRole)).value + + # REGISTRATION ----- + osparc_variables_table = OsparcVariablesTable() + + # bulk registration + osparc_variables_table.register( + { + "OSPARC_VARIABLE_PRODUCT_NAME": factory_context_getter("product_name"), + "OSPARC_VARIABLE_STUDY_UUID": factory_context_getter("project_id"), + "OSPARC_VARIABLE_USER_ROLE": factory_handler(_request_user_role), + } + ) + + # single entry + osparc_variables_table.register_from_context("OSPARC_VARIABLE_NODE_UUID", "node_id") + + # using decorator + @osparc_variables_table.register_from_handler("OSPARC_VARIABLE_USER_EMAIL") + async def request_user_email(app: FastAPI, user_id: UserID) -> SubstitutionValue: + print(app, user_id) + await asyncio.sleep(1) + return faker.email() + + # Some context given ---------------------------------------------------------- + # TODO: test pre errors handling + # TODO: test errors handling + # TODO: test validation errors handling + # TODO: test timeout error handling + + environs = await resolve_variables_from_context( + osparc_variables_table.copy(), session_context + ) + + assert set(environs.keys()) == set(osparc_variables_table.variables_names()) + + # All values extracted from the context MUST be SubstitutionValue + assert { + key: TypeAdapter(SubstitutionValue).validate_python(value) + for key, value in environs.items() + } + + for osparc_variable_name, context_name in [ + ("OSPARC_VARIABLE_PRODUCT_NAME", "product_name"), + ("OSPARC_VARIABLE_STUDY_UUID", "project_id"), + ("OSPARC_VARIABLE_NODE_UUID", "node_id"), + ]: + assert environs[osparc_variable_name] == session_context[context_name] + + print(json.dumps(environs, indent=1)) + + +@pytest.fixture +def mock_repo_db_engine(mocker: MockerFixture) -> None: + @asynccontextmanager + async def _connect(): + yield + + mocked_engine = AsyncMock() + mocked_engine.connect = _connect + + def _get_repository(app: FastAPI, repo_type: type[RepoType]) -> RepoType: + return repo_type(db_engine=mocked_engine) + + for target in ( + "simcore_service_director_v2.modules.osparc_variables.substitutions.get_repository", + "simcore_service_director_v2.modules.osparc_variables._user.get_repository", + ): + mocker.patch(target, side_effect=_get_repository, autospec=True) + + +@pytest.fixture +def mock_user_repo(mocker: MockerFixture, mock_repo_db_engine: None) -> None: + base = "simcore_service_director_v2.modules.db.repositories.users" + mocker.patch(f"{base}.UsersRepo.get_role", return_value=UserRole("USER")) + mocker.patch(f"{base}.UsersRepo.get_email", return_value="e@ma.il") + + +@pytest.fixture +async def fake_app(faker: Faker) -> AsyncIterable[FastAPI]: + app = FastAPI() + app.state.engine = AsyncMock() + + mock_settings = Mock() + app.state.settings = mock_settings + + substitutions.setup(app) + + async with LifespanManager(app): + yield app + + +@pytest.mark.parametrize("wallet_id", [None, 12]) +async def test_resolve_and_substitute_session_variables_in_specs( + mock_user_repo: None, + mock_osparc_variables_api_auth_rpc: None, + fake_app: FastAPI, + faker: Faker, + wallet_id: WalletID | None, +): + specs = { + "product_name": "${OSPARC_VARIABLE_PRODUCT_NAME}", + "study_uuid": "${OSPARC_VARIABLE_STUDY_UUID}", + "node_id": "${OSPARC_VARIABLE_NODE_ID}", + "user_id": "${OSPARC_VARIABLE_USER_ID}", + "email": "${OSPARC_VARIABLE_USER_EMAIL}", + "user_role": "${OSPARC_VARIABLE_USER_ROLE}", + "api_key": "${OSPARC_VARIABLE_API_KEY}", + "api_secret": "${OSPARC_VARIABLE_API_SECRET}", + "service_run_id": "${OSPARC_VARIABLE_SERVICE_RUN_ID}", + "wallet_id": "${OSPARC_VARIABLE_WALLET_ID}", + } + print("SPECS\n", specs) + + replaced_specs = await resolve_and_substitute_session_variables_in_specs( + fake_app, + specs=specs, + user_id=1, + product_name="a_product", + product_api_base_url=faker.url(), + project_id=faker.uuid4(cast_to=None), + node_id=faker.uuid4(cast_to=None), + service_run_id=ServiceRunID("some_run_id"), + wallet_id=wallet_id, + ) + print("REPLACED SPECS\n", replaced_specs) + + assert OSPARC_IDENTIFIER_PREFIX not in f"{replaced_specs}" + assert f"'wallet_id': '{wallet_id}'" in f"{replaced_specs}" + + +@pytest.fixture +def mock_get_vendor_secrets(mocker: MockerFixture, mock_repo_db_engine: None) -> None: + base = "simcore_service_director_v2.modules.db.repositories.services_environments" + mocker.patch( + f"{base}.get_vendor_secrets", + return_value={ + "OSPARC_VARIABLE_VENDOR_SECRET_ONE": 1, + "OSPARC_VARIABLE_VENDOR_SECRET_TWO": "two", + }, + ) + + +async def test_substitute_vendor_secrets_in_specs( + mock_get_vendor_secrets: None, fake_app: FastAPI, faker: Faker +): + specs = { + "vendor_secret_one": "${OSPARC_VARIABLE_VENDOR_SECRET_ONE}", + "vendor_secret_two": "${OSPARC_VARIABLE_VENDOR_SECRET_TWO}", + } + print("SPECS\n", specs) + + replaced_specs = await substitute_vendor_secrets_in_specs( + fake_app, + specs=specs, + product_name="a_product", + service_key=TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/fake" + ), + service_version=TypeAdapter(ServiceVersion).validate_python("0.0.1"), + ) + print("REPLACED SPECS\n", replaced_specs) + + assert VENDOR_SECRET_PREFIX not in f"{replaced_specs}" + + +@pytest.fixture +def compose_spec(): + return { + "version": "3.7", + "services": { + "jupyter-math": { + "environment": [ + "OSPARC_API_KEY=$OSPARC_VARIABLE_API_KEY", + "OSPARC_API_SECRET=$OSPARC_VARIABLE_API_SECRET", + "FOO=33", + ], + "image": "${SIMCORE_REGISTRY}/simcore/services/dynamic/jupyter-math:${SERVICE_VERSION}", + "networks": {"dy-sidecar_10e1b317-de62-44ca-979e-09bf15663834": None}, + "deploy": { + "resources": { + "reservations": {"cpus": "0.1", "memory": "2147483648"}, + "limits": {"cpus": "4.0", "memory": "17179869184"}, + } + }, + "labels": [ + "io.simcore.runtime.cpu-limit=4.0", + "io.simcore.runtime.memory-limit=17179869184", + "io.simcore.runtime.node-id=10e1b317-de62-44ca-979e-09bf15663834", + "io.simcore.runtime.product-name=osparc", + "io.simcore.runtime.project-id=e341df9e-2e38-11ef-894b-0242ac140025", + "io.simcore.runtime.simcore-user-agent=undefined", + "io.simcore.runtime.swarm-stack-name=master-simcore", + "io.simcore.runtime.user-id=1", + ], + } + }, + "networks": { + "dy-sidecar_10e1b317-de62-44ca-979e-09bf15663834": { + "name": "dy-sidecar_10e1b317-de62-44ca-979e-09bf15663834", + "external": True, + "driver": "overlay", + }, + "master-simcore_interactive_services_subnet": { + "name": "master-simcore_interactive_services_subnet", + "external": True, + "driver": "overlay", + }, + }, + } + + +def test_auto_inject_environments_added_to_all_services_in_compose( + compose_spec: ComposeSpecLabelDict, +): + before = deepcopy(compose_spec) + + after = auto_inject_environments(compose_spec) + + assert before != after + assert after == compose_spec + + auto_injected_envs = set(_NEW_ENVIRONMENTS.keys()) + for name, service in compose_spec.get("services", {}).items(): + # all services have environment specs + assert service["environment"], f"expected in {name} service" + + # injected? + for env_name in auto_injected_envs: + assert env_name in str(service["environment"]) + + +def test_auto_inject_environments_are_registered(): + app = FastAPI() + table = OsparcSessionVariablesTable.create(app) + + registered_osparc_variables = set(table.variables_names()) + auto_injected_osparc_variables = {_.lstrip("$") for _ in _NEW_ENVIRONMENTS.values()} + + assert auto_injected_osparc_variables.issubset(registered_osparc_variables) diff --git a/services/director-v2/tests/unit/test_modules_project_networks.py b/services/director-v2/tests/unit/test_modules_project_networks.py index 962a76ab32c..2c233ad4297 100644 --- a/services/director-v2/tests/unit/test_modules_project_networks.py +++ b/services/director-v2/tests/unit/test_modules_project_networks.py @@ -3,12 +3,12 @@ import json from pathlib import Path -from typing import Any, Iterable +from typing import Any from unittest.mock import AsyncMock, call from uuid import UUID, uuid4 import pytest -from models_library.projects import ProjectID, Workbench +from models_library.projects import NodesDict, ProjectID from models_library.projects_networks import NetworksWithAliases from models_library.projects_nodes import Node from pydantic import BaseModel, PositiveInt @@ -40,8 +40,8 @@ def using( attach: list[Any], ) -> "Example": return cls( - existing_networks_with_aliases=NetworksWithAliases.parse_obj(existing), - new_networks_with_aliases=NetworksWithAliases.parse_obj(new), + existing_networks_with_aliases=NetworksWithAliases.model_validate(existing), + new_networks_with_aliases=NetworksWithAliases.model_validate(new), expected_calls=MockedCalls(detach=detach, attach=attach), ) @@ -159,7 +159,7 @@ def mock_scheduler() -> AsyncMock: @pytest.fixture -def mock_director_v0_client() -> AsyncMock: +def mock_catalog_client() -> AsyncMock: return AsyncMock() @@ -174,17 +174,17 @@ def project_id() -> ProjectID: @pytest.fixture -def dy_workbench_with_networkable_labels(mocks_dir: Path) -> Workbench: +def dy_workbench_with_networkable_labels(mocks_dir: Path) -> NodesDict: dy_workbench_template = mocks_dir / "fake_dy_workbench_template.json" assert dy_workbench_template.exists() dy_workbench = json.loads(dy_workbench_template.read_text()) - parsed_workbench: Workbench = {} + parsed_workbench: NodesDict = {} for node_uuid, node_data in dy_workbench.items(): node_data["label"] = f"label_{uuid4()}" - parsed_workbench[node_uuid] = Node.parse_obj(node_data) + parsed_workbench[node_uuid] = Node.model_validate(node_data) return parsed_workbench @@ -195,16 +195,11 @@ def fake_project_id() -> ProjectID: @pytest.fixture -def user_id() -> PositiveInt: - return 1 - - -@pytest.fixture -def mock_docker_calls(mocker: MockerFixture) -> Iterable[dict[str, AsyncMock]]: +def mock_docker_calls(mocker: MockerFixture) -> dict[str, AsyncMock]: requires_dynamic_sidecar_mock = AsyncMock() requires_dynamic_sidecar_mock.return_value = True class_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._task.DynamicSidecarsScheduler" - mocked_items = { + return { "attach": mocker.patch(f"{class_base}.attach_project_network", AsyncMock()), "detach": mocker.patch(f"{class_base}.detach_project_network", AsyncMock()), "requires_dynamic_sidecar": mocker.patch( @@ -213,8 +208,6 @@ def mock_docker_calls(mocker: MockerFixture) -> Iterable[dict[str, AsyncMock]]: ), } - yield mocked_items - async def test_send_network_configuration_to_dynamic_sidecar( mock_scheduler: AsyncMock, @@ -223,7 +216,6 @@ async def test_send_network_configuration_to_dynamic_sidecar( mock_docker_calls: dict[str, AsyncMock], ) -> None: for example in examples_factory: - await _send_network_configuration_to_dynamic_sidecar( scheduler=mock_scheduler, project_id=project_id, @@ -236,7 +228,7 @@ async def test_send_network_configuration_to_dynamic_sidecar( async def test_get_networks_with_aliases_for_default_network_is_json_serializable( - mock_director_v0_client: AsyncMock, + mock_catalog_client: AsyncMock, fake_project_id: ProjectID, dy_workbench_with_networkable_labels: dict[str, Any], user_id: PositiveInt, @@ -247,6 +239,6 @@ async def test_get_networks_with_aliases_for_default_network_is_json_serializabl project_id=fake_project_id, user_id=user_id, new_workbench=dy_workbench_with_networkable_labels, - director_v0_client=mock_director_v0_client, + catalog_client=mock_catalog_client, rabbitmq_client=rabbitmq_client, ) diff --git a/services/director-v2/tests/unit/test_modules_rabbitmq.py b/services/director-v2/tests/unit/test_modules_rabbitmq.py new file mode 100644 index 00000000000..972f836f575 --- /dev/null +++ b/services/director-v2/tests/unit/test_modules_rabbitmq.py @@ -0,0 +1,52 @@ +# pylint: disable=redefined-outer-name + +from decimal import Decimal +from unittest.mock import AsyncMock + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.rabbitmq_messages import ( + CreditsLimit, + WalletCreditsLimitReachedMessage, +) +from simcore_service_director_v2.modules.rabbitmq import handler_out_of_credits + + +@pytest.fixture(params=[True, False]) +def ignore_limits(request: pytest.FixtureRequest) -> bool: + return request.param + + +@pytest.fixture +async def mock_app(ignore_limits: bool) -> FastAPI: + mock = AsyncMock() + mock.state.settings.DYNAMIC_SERVICES.DYNAMIC_SCHEDULER.DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED = ( + ignore_limits + ) + mock.state.dynamic_sidecar_scheduler = AsyncMock() + return mock + + +@pytest.fixture +def message(faker: Faker) -> WalletCreditsLimitReachedMessage: + return WalletCreditsLimitReachedMessage( + service_run_id=faker.pystr(), + user_id=faker.pyint(), + project_id=faker.uuid4(cast_to=None), + node_id=faker.uuid4(cast_to=None), + wallet_id=faker.pyint(), + credits=Decimal(-10), + credits_limit=CreditsLimit(0), + ) + + +async def test_handler_out_of_credits( + mock_app: FastAPI, message: WalletCreditsLimitReachedMessage, ignore_limits +): + await handler_out_of_credits(mock_app, message.model_dump_json().encode()) + + removal_mark_count = ( + mock_app.state.dynamic_sidecar_scheduler.mark_all_services_in_wallet_for_removal.call_count + ) + assert removal_mark_count == 0 if ignore_limits else 1 diff --git a/services/director-v2/tests/unit/test_modules_storage.py b/services/director-v2/tests/unit/test_modules_storage.py index 74ce540d1c3..216a0414d00 100644 --- a/services/director-v2/tests/unit/test_modules_storage.py +++ b/services/director-v2/tests/unit/test_modules_storage.py @@ -4,7 +4,6 @@ # pylint:disable=protected-access import pytest -from faker import Faker from fastapi import FastAPI from models_library.users import UserID from settings_library.s3 import S3Settings @@ -12,23 +11,18 @@ @pytest.fixture -def minimal_storage_config(project_env_devel_environment, monkeypatch): +def minimal_storage_config( + disable_postgres: None, project_env_devel_environment, monkeypatch +): """set a minimal configuration for testing the director connection only""" monkeypatch.setenv("DIRECTOR_ENABLED", "0") - monkeypatch.setenv("POSTGRES_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "0") + monkeypatch.setenv("DIRECTOR_ENABLED", "0") monkeypatch.setenv("DIRECTOR_V2_CATALOG", "null") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "0") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") -@pytest.fixture -def user_id(faker: Faker) -> UserID: - return UserID(faker.pyint(min_value=1)) - - def test_get_storage_client_instance( minimal_storage_config: None, minimal_app: FastAPI, diff --git a/services/director-v2/tests/unit/test_schemas_dynamic_services_scheduler.py b/services/director-v2/tests/unit/test_schemas_dynamic_services_scheduler.py index eab2f93727b..6347ebab5f4 100644 --- a/services/director-v2/tests/unit/test_schemas_dynamic_services_scheduler.py +++ b/services/director-v2/tests/unit/test_schemas_dynamic_services_scheduler.py @@ -1,14 +1,12 @@ # pylint: disable=redefined-outer-name +from collections.abc import Iterator from contextlib import contextmanager from copy import deepcopy from pathlib import Path -from typing import Iterator import pytest -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - SchedulerData, -) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData @pytest.fixture( @@ -42,11 +40,11 @@ def assert_copy_has_changes(original: SchedulerData) -> Iterator[SchedulerData]: async def test_parse_saved_fake_scheduler_data(fake_scheduler_data: str) -> None: - assert SchedulerData.parse_raw(fake_scheduler_data) + assert SchedulerData.model_validate_json(fake_scheduler_data) def test_nested_compare(fake_scheduler_data: str) -> None: - scheduler_data = SchedulerData.parse_raw(fake_scheduler_data) + scheduler_data = SchedulerData.model_validate_json(fake_scheduler_data) with assert_copy_has_changes(scheduler_data) as to_change: to_change.paths_mapping.inputs_path = Path("/tmp") diff --git a/services/director-v2/tests/unit/test_utils_client_decorators.py b/services/director-v2/tests/unit/test_utils_client_decorators.py index 066bedad11b..5b630f788c7 100644 --- a/services/director-v2/tests/unit/test_utils_client_decorators.py +++ b/services/director-v2/tests/unit/test_utils_client_decorators.py @@ -35,10 +35,10 @@ async def a_request(method: str, **kwargs) -> Response: await a_request( "POST", url=url, - params=dict(kettle="boiling"), - data=dict(kettle_number="royal_01"), + params={"kettle": "boiling"}, + data={"kettle_number": "royal_01"}, ) - assert status.HTTP_503_SERVICE_UNAVAILABLE == exec_info.value.status_code + assert exec_info.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE # ERROR test_utils_client_decorators:client_decorators.py:76 AService service error: # |Request| diff --git a/services/director-v2/tests/unit/test_utils_comp_scheduler.py b/services/director-v2/tests/unit/test_utils_comp_scheduler.py new file mode 100644 index 00000000000..e589d4a933f --- /dev/null +++ b/services/director-v2/tests/unit/test_utils_comp_scheduler.py @@ -0,0 +1,88 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +import pytest +from models_library.docker import DockerGenericTag +from models_library.projects_state import RunningState +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.modules.comp_scheduler._utils import ( + COMPLETED_STATES, + SCHEDULED_STATES, + TASK_TO_START_STATES, + create_service_resources_from_task, +) + + +@pytest.mark.parametrize( + "state", + [ + RunningState.PUBLISHED, + RunningState.PENDING, + RunningState.STARTED, + ], +) +def test_scheduler_takes_care_of_runs_with_state(state: RunningState): + assert state in SCHEDULED_STATES + + +@pytest.mark.parametrize( + "state", + [ + RunningState.SUCCESS, + RunningState.ABORTED, + RunningState.FAILED, + ], +) +def test_scheduler_knows_these_are_completed_states(state: RunningState): + assert state in COMPLETED_STATES + + +def test_scheduler_knows_all_the_states(): + assert COMPLETED_STATES.union(SCHEDULED_STATES).union(TASK_TO_START_STATES).union( + {RunningState.NOT_STARTED, RunningState.UNKNOWN} + ) == set(RunningState) + + +@pytest.mark.parametrize( + "task", + [ + CompTaskAtDB.model_validate(example) + for example in CompTaskAtDB.model_config["json_schema_extra"]["examples"] + ], + ids=str, +) +def test_create_service_resources_from_task(task: CompTaskAtDB): + received_service_resources = create_service_resources_from_task(task) + assert received_service_resources + assert len(received_service_resources) == 1 + assert "container" in received_service_resources + service_resources = received_service_resources[DockerGenericTag("container")] + assert service_resources.boot_modes == [task.image.boot_mode] + assert service_resources.resources + # some requirements are compulsory such as CPU,RAM + assert "CPU" in service_resources.resources + assert "RAM" in service_resources.resources + # any set limit/reservation are the same + for res_data in service_resources.resources.values(): + assert res_data.limit == res_data.reservation + assert task.image.node_requirements + assert service_resources.resources["CPU"].limit == task.image.node_requirements.cpu + assert service_resources.resources["RAM"].limit == task.image.node_requirements.ram + if task.image.node_requirements.gpu: + assert "GPU" in service_resources.resources + assert ( + service_resources.resources["GPU"].limit == task.image.node_requirements.gpu + ) + else: + assert "GPU" not in service_resources.resources + + if task.image.node_requirements.vram: + assert "VRAM" in service_resources.resources + assert ( + service_resources.resources["VRAM"].limit + == task.image.node_requirements.vram + ) + else: + assert "VRAM" not in service_resources.resources diff --git a/services/director-v2/tests/unit/test_utils_computation.py b/services/director-v2/tests/unit/test_utils_computation.py index 1f55d85ab26..ef276d5bc69 100644 --- a/services/director-v2/tests/unit/test_utils_computation.py +++ b/services/director-v2/tests/unit/test_utils_computation.py @@ -4,12 +4,11 @@ # pylint:disable=protected-access from pathlib import Path -from typing import List import faker import pytest from models_library.projects_state import RunningState -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB from simcore_service_director_v2.utils.computations import ( get_pipeline_state_from_task_states, is_pipeline_running, @@ -28,7 +27,7 @@ def fake_task_file(mocks_dir: Path): @pytest.fixture(scope="session") def fake_task(fake_task_file: Path) -> CompTaskAtDB: - return CompTaskAtDB.parse_file(fake_task_file) + return CompTaskAtDB.model_validate_json(fake_task_file.read_text()) # NOTE: these parametrizations are made to mimic something like a sleepers project @@ -213,10 +212,9 @@ def fake_task(fake_task_file: Path) -> CompTaskAtDB: (RunningState.SUCCESS), (RunningState.FAILED), (RunningState.ABORTED), - (RunningState.RETRY), ], - RunningState.STARTED, - id="any number of success and 1 retry = started", + RunningState.FAILED, + id="any number of success", ), pytest.param( [ @@ -250,15 +248,33 @@ def fake_task(fake_task_file: Path) -> CompTaskAtDB: RunningState.UNKNOWN, id="empty tasks (empty project or full of dynamic services) = unknown", ), + pytest.param( + [ + (RunningState.WAITING_FOR_CLUSTER), + (RunningState.PUBLISHED), + (RunningState.PUBLISHED), + ], + RunningState.WAITING_FOR_CLUSTER, + id="published and waiting for cluster = waiting for cluster", + ), + pytest.param( + [ + (RunningState.WAITING_FOR_RESOURCES), + (RunningState.PUBLISHED), + (RunningState.PUBLISHED), + ], + RunningState.WAITING_FOR_RESOURCES, + id="published and waiting for resources = waiting for resources", + ), ], ) def test_get_pipeline_state_from_task_states( - task_states: List[RunningState], + task_states: list[RunningState], exp_pipeline_state: RunningState, fake_task: CompTaskAtDB, ): - tasks: List[CompTaskAtDB] = [ - fake_task.copy(deep=True, update={"state": s}) for s in task_states + tasks: list[CompTaskAtDB] = [ + fake_task.model_copy(deep=True, update={"state": s}) for s in task_states ] pipeline_state: RunningState = get_pipeline_state_from_task_states(tasks) @@ -275,7 +291,6 @@ def test_get_pipeline_state_from_task_states( (RunningState.NOT_STARTED, False), (RunningState.PENDING, True), (RunningState.STARTED, True), - (RunningState.RETRY, True), (RunningState.SUCCESS, False), (RunningState.FAILED, False), (RunningState.ABORTED, False), diff --git a/services/director-v2/tests/unit/test_utils_dags.py b/services/director-v2/tests/unit/test_utils_dags.py index 9434c69d85c..0fc17030a2d 100644 --- a/services/director-v2/tests/unit/test_utils_dags.py +++ b/services/director-v2/tests/unit/test_utils_dags.py @@ -5,14 +5,26 @@ # pylint:disable=no-value-for-parameter +import datetime from dataclasses import dataclass -from typing import Any, Dict, List +from typing import Any, Final +from uuid import uuid4 import networkx as nx import pytest -from models_library.projects import Workbench +from models_library.projects import NodesDict +from models_library.projects_nodes import NodeState from models_library.projects_nodes_io import NodeID +from models_library.projects_pipeline import PipelineDetails +from models_library.projects_state import RunningState +from simcore_postgres_database.models.comp_tasks import NodeClass +from simcore_service_director_v2.models.comp_tasks import ( + CompTaskAtDB, + Image, + NodeSchema, +) from simcore_service_director_v2.utils.dags import ( + compute_pipeline_details, create_complete_dag, create_minimal_computational_graph_based_on_selection, find_computational_node_cycles, @@ -20,8 +32,8 @@ def test_create_complete_dag_graph( - fake_workbench: Workbench, - fake_workbench_complete_adjacency: Dict[str, List[str]], + fake_workbench: NodesDict, + fake_workbench_complete_adjacency: dict[str, list[str]], ): dag_graph = create_complete_dag(fake_workbench) assert nx.is_directed_acyclic_graph(dag_graph) @@ -30,9 +42,9 @@ def test_create_complete_dag_graph( @dataclass class MinimalGraphTest: - subgraph: List[NodeID] - force_exp_dag: Dict[str, List[str]] - not_forced_exp_dag: Dict[str, List[str]] + subgraph: list[NodeID] + force_exp_dag: dict[str, list[str]] + not_forced_exp_dag: dict[str, list[str]] @pytest.mark.parametrize( @@ -186,7 +198,7 @@ class MinimalGraphTest: ), ], ) -async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGraphTest): +async def test_create_minimal_graph(fake_workbench: NodesDict, graph: MinimalGraphTest): """the workbench is made of file-picker and 4 sleepers. sleeper 1 has already run.""" complete_dag: nx.DiGraph = create_complete_dag(fake_workbench) @@ -214,9 +226,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra pytest.param( {"node_1": ["node_2", "node_3"], "node_2": ["node_3"], "node_3": []}, { - "node_1": {"key": "simcore/services/comp/fake"}, - "node_2": {"key": "simcore/services/comp/fake"}, - "node_3": {"key": "simcore/services/comp/fake"}, + "node_1": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_3": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, }, [], id="cycle less dag expect no cycle", @@ -228,9 +249,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra "node_3": ["node_1"], }, { - "node_1": {"key": "simcore/services/comp/fake"}, - "node_2": {"key": "simcore/services/comp/fake"}, - "node_3": {"key": "simcore/services/comp/fake"}, + "node_1": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_3": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, }, [["node_1", "node_2", "node_3"]], id="dag with 1 cycle", @@ -242,9 +272,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra "node_3": ["node_1"], }, { - "node_1": {"key": "simcore/services/comp/fake"}, - "node_2": {"key": "simcore/services/comp/fake"}, - "node_3": {"key": "simcore/services/comp/fake"}, + "node_1": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_3": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, }, [["node_1", "node_2", "node_3"], ["node_1", "node_2"]], id="dag with 2 cycles", @@ -256,9 +295,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra "node_3": ["node_1"], }, { - "node_1": {"key": "simcore/services/comp/fake"}, - "node_2": {"key": "simcore/services/comp/fake"}, - "node_3": {"key": "simcore/services/dynamic/fake"}, + "node_1": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_3": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, }, [["node_1", "node_2", "node_3"]], id="dag with 1 cycle and 1 dynamic services should fail", @@ -270,9 +318,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra "node_3": ["node_1"], }, { - "node_1": {"key": "simcore/services/dynamic/fake"}, - "node_2": {"key": "simcore/services/comp/fake"}, - "node_3": {"key": "simcore/services/dynamic/fake"}, + "node_1": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + }, + "node_3": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, }, [["node_1", "node_2", "node_3"]], id="dag with 1 cycle and 2 dynamic services should fail", @@ -284,9 +341,18 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra "node_3": ["node_1"], }, { - "node_1": {"key": "simcore/services/dynamic/fake"}, - "node_2": {"key": "simcore/services/dynamic/fake"}, - "node_3": {"key": "simcore/services/dynamic/fake"}, + "node_1": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, + "node_2": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, + "node_3": { + "key": "simcore/services/dynamic/fake", + "node_class": NodeClass.INTERACTIVE, + }, }, [], id="dag with 1 cycle and 3 dynamic services should be ok", @@ -294,9 +360,9 @@ async def test_create_minimal_graph(fake_workbench: Workbench, graph: MinimalGra ], ) def test_find_computational_node_cycles( - dag_adjacency: Dict[str, List[str]], - node_keys: Dict[str, Dict[str, Any]], - exp_cycles: List[List[str]], + dag_adjacency: dict[str, list[str]], + node_keys: dict[str, dict[str, Any]], + exp_cycles: list[list[str]], ): dag = nx.from_dict_of_lists(dag_adjacency, create_using=nx.DiGraph) # add node attributes @@ -307,3 +373,226 @@ def test_find_computational_node_cycles( assert len(list_of_cycles) == len(exp_cycles), "expected number of cycles not found" for cycle in list_of_cycles: assert sorted(cycle) in exp_cycles + + +@dataclass +class PipelineDetailsTestParams: + complete_dag: nx.DiGraph + pipeline_dag: nx.DiGraph + comp_tasks: list[CompTaskAtDB] + expected_pipeline_details: PipelineDetails + + +@pytest.fixture() +def pipeline_test_params( + dag_adjacency: dict[str, list[str]], + node_keys: dict[str, dict[str, Any]], + list_comp_tasks: list[CompTaskAtDB], + expected_pipeline_details_output: PipelineDetails, +) -> PipelineDetailsTestParams: + # check the inputs make sense + assert len(set(dag_adjacency)) == len(node_keys) == len(list_comp_tasks) + assert dag_adjacency.keys() == node_keys.keys() + assert len( + {t.node_id for t in list_comp_tasks}.intersection(node_keys.keys()) + ) == len(set(dag_adjacency)) + + # resolve the naming + node_name_to_uuid_map = {} + resolved_dag_adjacency: dict[str, list[str]] = {} + for node_a, next_nodes in dag_adjacency.items(): + resolved_dag_adjacency[ + node_name_to_uuid_map.setdefault(node_a, f"{uuid4()}") + ] = [node_name_to_uuid_map.setdefault(n, f"{uuid4()}") for n in next_nodes] + + # create the complete dag + complete_dag = nx.from_dict_of_lists( + resolved_dag_adjacency, create_using=nx.DiGraph + ) + # add node attributes + for non_resolved_key, values in node_keys.items(): + for attr, attr_value in values.items(): + complete_dag.nodes[node_name_to_uuid_map[non_resolved_key]][ + attr + ] = attr_value + + pipeline_dag = nx.from_dict_of_lists( + resolved_dag_adjacency, create_using=nx.DiGraph + ) + + # resolve the comp_tasks + resolved_list_comp_tasks = [ + c.model_copy(update={"node_id": node_name_to_uuid_map[c.node_id]}) + for c in list_comp_tasks + ] + + # resolved the expected output + + resolved_expected_pipeline_details = expected_pipeline_details_output.model_copy( + update={ + "adjacency_list": { + NodeID(node_name_to_uuid_map[node_a]): [ + NodeID(node_name_to_uuid_map[n]) for n in next_nodes + ] + for node_a, next_nodes in expected_pipeline_details_output.adjacency_list.items() + }, + "node_states": { + NodeID(node_name_to_uuid_map[node]): state + for node, state in expected_pipeline_details_output.node_states.items() + }, + } + ) + + return PipelineDetailsTestParams( + complete_dag=complete_dag, + pipeline_dag=pipeline_dag, + comp_tasks=resolved_list_comp_tasks, + expected_pipeline_details=resolved_expected_pipeline_details, + ) + + +_MANY_NODES: Final[int] = 60 + + +@pytest.mark.parametrize( + "dag_adjacency, node_keys, list_comp_tasks, expected_pipeline_details_output", + [ + pytest.param( + {}, + {}, + [], + PipelineDetails(adjacency_list={}, progress=None, node_states={}), + id="empty dag", + ), + pytest.param( + {f"node_{x}": [] for x in range(_MANY_NODES)}, + { + f"node_{x}": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + "state": RunningState.NOT_STARTED, + "outputs": None, + } + for x in range(_MANY_NODES) + }, + [ + CompTaskAtDB.model_construct( + project_id=uuid4(), + node_id=f"node_{x}", + schema=NodeSchema(inputs={}, outputs={}), + inputs=None, + image=Image(name="simcore/services/comp/fake", tag="1.3.4"), + state=RunningState.NOT_STARTED, + internal_id=3, + node_class=NodeClass.COMPUTATIONAL, + created=datetime.datetime.now(tz=datetime.UTC), + modified=datetime.datetime.now(tz=datetime.UTC), + last_heartbeat=None, + progress=1.00, + ) + for x in range(_MANY_NODES) + ], + PipelineDetails.model_construct( + adjacency_list={f"node_{x}": [] for x in range(_MANY_NODES)}, + progress=1.0, + node_states={ + f"node_{x}": NodeState(modified=True, progress=1) + for x in range(_MANY_NODES) + }, + ), + id="when summing many node progresses there are issues with floating point pipeline progress", + ), + pytest.param( + {"node_1": ["node_2", "node_3"], "node_2": ["node_3"], "node_3": []}, + { + "node_1": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + "state": RunningState.NOT_STARTED, + "outputs": None, + }, + "node_2": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + "state": RunningState.NOT_STARTED, + "outputs": None, + }, + "node_3": { + "key": "simcore/services/comp/fake", + "node_class": NodeClass.COMPUTATIONAL, + "state": RunningState.NOT_STARTED, + "outputs": None, + }, + }, + [ + # NOTE: we use construct here to be able to use non uuid names to simplify test setup + CompTaskAtDB.model_construct( + project_id=uuid4(), + node_id="node_1", + schema=NodeSchema(inputs={}, outputs={}), + inputs=None, + image=Image(name="simcore/services/comp/fake", tag="1.3.4"), + state=RunningState.NOT_STARTED, + internal_id=3, + node_class=NodeClass.COMPUTATIONAL, + created=datetime.datetime.now(tz=datetime.UTC), + modified=datetime.datetime.now(tz=datetime.UTC), + last_heartbeat=None, + ), + CompTaskAtDB.model_construct( + project_id=uuid4(), + node_id="node_2", + schema=NodeSchema(inputs={}, outputs={}), + inputs=None, + image=Image(name="simcore/services/comp/fake", tag="1.3.4"), + state=RunningState.NOT_STARTED, + internal_id=3, + node_class=NodeClass.COMPUTATIONAL, + created=datetime.datetime.now(tz=datetime.UTC), + modified=datetime.datetime.now(tz=datetime.UTC), + last_heartbeat=None, + ), + CompTaskAtDB.model_construct( + project_id=uuid4(), + node_id="node_3", + schema=NodeSchema(inputs={}, outputs={}), + inputs=None, + image=Image(name="simcore/services/comp/fake", tag="1.3.4"), + state=RunningState.NOT_STARTED, + internal_id=3, + node_class=NodeClass.COMPUTATIONAL, + created=datetime.datetime.now(tz=datetime.UTC), + modified=datetime.datetime.now(tz=datetime.UTC), + last_heartbeat=None, + progress=1.00, + ), + ], + PipelineDetails.model_construct( + adjacency_list={ + "node_1": ["node_2", "node_3"], + "node_2": ["node_3"], + "node_3": [], + }, + progress=0.3333333333333333, + node_states={ + "node_1": NodeState(modified=True, progress=None), + "node_2": NodeState(modified=True, progress=None), + "node_3": NodeState(modified=True, progress=1), + }, + ), + id="proper dag", + ), + ], +) +async def test_compute_pipeline_details( + pipeline_test_params: PipelineDetailsTestParams, +): + received_details = await compute_pipeline_details( + pipeline_test_params.complete_dag, + pipeline_test_params.pipeline_dag, + pipeline_test_params.comp_tasks, + ) + assert ( + received_details.model_dump() + == pipeline_test_params.expected_pipeline_details.model_dump() + ) diff --git a/services/director-v2/tests/unit/test_utils_db.py b/services/director-v2/tests/unit/test_utils_db.py index 8f6a46aad75..4bb06b82085 100644 --- a/services/director-v2/tests/unit/test_utils_db.py +++ b/services/director-v2/tests/unit/test_utils_db.py @@ -1,31 +1,17 @@ -from contextlib import suppress -from typing import Any, Dict, Type, cast - import pytest -from models_library.clusters import BaseCluster, Cluster -from pydantic import BaseModel -from simcore_service_director_v2.utils.db import to_clusters_db - - -@pytest.mark.parametrize( - "model_cls", - (Cluster,), +from models_library.projects_state import RunningState +from simcore_postgres_database.models.comp_pipeline import StateType +from simcore_service_director_v2.utils.db import ( + DB_TO_RUNNING_STATE, + RUNNING_STATE_TO_DB, ) -def test_export_clusters_to_db( - model_cls: Type[BaseModel], model_cls_examples: Dict[str, Dict[str, Any]] -): - for example in model_cls_examples.values(): - owner_gid = example["owner"] - # remove the owner from the access rights if any - with suppress(KeyError): - example.get("access_rights", {}).pop(owner_gid) - instance = cast(BaseCluster, model_cls(**example)) - # for updates - cluster_db_dict = to_clusters_db(instance, only_update=True) - keys_not_in_db = ["id", "access_rights"] +@pytest.mark.parametrize("input_running_state", RunningState) +def test_running_state_to_db(input_running_state: RunningState): + assert input_running_state in RUNNING_STATE_TO_DB + - assert list(cluster_db_dict.keys()) == [ - x for x in example if x not in keys_not_in_db - ] +@pytest.mark.parametrize("input_state_type", StateType) +def test_db_to_running_state(input_state_type: StateType): + assert input_state_type in DB_TO_RUNNING_STATE diff --git a/services/director-v2/tests/unit/test_utils_distributed_identifier.py b/services/director-v2/tests/unit/test_utils_distributed_identifier.py new file mode 100644 index 00000000000..c7ad46b74a9 --- /dev/null +++ b/services/director-v2/tests/unit/test_utils_distributed_identifier.py @@ -0,0 +1,359 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name + +import asyncio +import string +from collections.abc import AsyncIterable, AsyncIterator +from dataclasses import dataclass +from secrets import choice +from typing import Final +from uuid import UUID, uuid4 + +import pytest +from pydantic import BaseModel, NonNegativeInt +from pytest_mock import MockerFixture +from servicelib.redis import RedisClientSDK +from servicelib.utils import logged_gather +from settings_library.redis import RedisDatabase, RedisSettings +from simcore_service_director_v2.utils.base_distributed_identifier import ( + BaseDistributedIdentifierManager, +) + +pytest_simcore_core_services_selection = [ + "redis", +] + +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + +# if this goes too high, max open file limit is reached +_MAX_REDIS_CONCURRENCY: Final[NonNegativeInt] = 1000 + + +class UserDefinedID: + # define a custom type of ID for the API + # by choice it is hard to serialize/deserialize + + def __init__(self, uuid: UUID | None = None) -> None: + self._id = uuid if uuid else uuid4() + + def __eq__(self, other: "UserDefinedID") -> bool: + return self._id == other._id + + # only necessary for nice looking IDs in the logs + def __repr__(self) -> str: + return f"" + + # only necessary for RandomTextAPI + def __hash__(self): + return hash(str(self._id)) + + +class RandomTextEntry(BaseModel): + text: str + + @classmethod + def create(cls, length: int) -> "RandomTextEntry": + letters_and_digits = string.ascii_letters + string.digits + text = "".join(choice(letters_and_digits) for _ in range(length)) + return cls(text=text) + + +class RandomTextAPI: + # Emulates an external API + # used to create resources + + def __init__(self) -> None: + self._created: dict[UserDefinedID, RandomTextEntry] = {} + + def create(self, length: int) -> tuple[UserDefinedID, RandomTextEntry]: + identifier = UserDefinedID(uuid4()) + self._created[identifier] = RandomTextEntry.create(length) + return identifier, self._created[identifier] + + def delete(self, identifier: UserDefinedID) -> None: + del self._created[identifier] + + def get(self, identifier: UserDefinedID) -> RandomTextEntry | None: + return self._created.get(identifier, None) + + +@dataclass +class ComponentUsingRandomText: + # Emulates another component in the system + # using the created resources + + _in_use: bool = True + + def is_used(self, an_id: UserDefinedID) -> bool: + _ = an_id + return self._in_use + + def toggle_usage(self, in_use: bool) -> None: + self._in_use = in_use + + +class AnEmptyTextCleanupContext(BaseModel): + # nothing is required during cleanup, so the context + # is an empty object. + # A ``pydantic.BaseModel`` is used for convenience + # this could have inherited from ``object`` + ... + + +class RandomTextResourcesManager( + BaseDistributedIdentifierManager[ + UserDefinedID, RandomTextEntry, AnEmptyTextCleanupContext + ] +): + # Implements a resource manager for handling the lifecycle of + # resources created by a service. + # It also comes in with automatic cleanup in case the service owing + # the resources failed to removed them in the past. + + def __init__( + self, + redis_client_sdk: RedisClientSDK, + component_using_random_text: ComponentUsingRandomText, + ) -> None: + # THESE two systems would normally come stored in the `app` context + self.api = RandomTextAPI() + self.component_using_random_text = component_using_random_text + + super().__init__(redis_client_sdk) + + @classmethod + def _deserialize_identifier(cls, raw: str) -> UserDefinedID: + return UserDefinedID(UUID(raw)) + + @classmethod + def _serialize_identifier(cls, identifier: UserDefinedID) -> str: + return f"{identifier._id}" # noqa: SLF001 + + @classmethod + def _deserialize_cleanup_context( + cls, raw: str | bytes + ) -> AnEmptyTextCleanupContext: + return AnEmptyTextCleanupContext.model_validate_json(raw) + + @classmethod + def _serialize_cleanup_context( + cls, cleanup_context: AnEmptyTextCleanupContext + ) -> str: + return cleanup_context.model_dump_json() + + async def is_used( + self, identifier: UserDefinedID, cleanup_context: AnEmptyTextCleanupContext + ) -> bool: + _ = cleanup_context + return self.component_using_random_text.is_used(identifier) + + # NOTE: it is intended for the user to overwrite the **kwargs with custom names + # to provide a cleaner interface, tooling will complain slightly + async def _create( # pylint:disable=arguments-differ # type:ignore [override] + self, length: int + ) -> tuple[UserDefinedID, RandomTextEntry]: + return self.api.create(length) + + async def get(self, identifier: UserDefinedID, **_) -> RandomTextEntry | None: + return self.api.get(identifier) + + async def _destroy( + self, identifier: UserDefinedID, _: AnEmptyTextCleanupContext + ) -> None: + self.api.delete(identifier) + + +@pytest.fixture +async def redis_client_sdk( + redis_service: RedisSettings, +) -> AsyncIterator[RedisClientSDK]: + redis_resources_dns = redis_service.build_redis_dsn( + RedisDatabase.DISTRIBUTED_IDENTIFIERS + ) + + client = RedisClientSDK(redis_resources_dns, client_name="pytest") + assert client + assert client.redis_dsn == redis_resources_dns + # cleanup, previous run's leftovers + await client.redis.flushall() + + yield client + # cleanup, properly close the clients + await client.redis.flushall() + await client.shutdown() + + +@pytest.fixture +def component_using_random_text() -> ComponentUsingRandomText: + return ComponentUsingRandomText() + + +@pytest.fixture +async def manager_with_no_cleanup_task( + redis_client_sdk: RedisClientSDK, + component_using_random_text: ComponentUsingRandomText, +) -> RandomTextResourcesManager: + return RandomTextResourcesManager(redis_client_sdk, component_using_random_text) + + +@pytest.fixture +async def manager( + manager_with_no_cleanup_task: RandomTextResourcesManager, +) -> AsyncIterable[RandomTextResourcesManager]: + await manager_with_no_cleanup_task.setup() + yield manager_with_no_cleanup_task + await manager_with_no_cleanup_task.shutdown() + + +async def test_resource_is_missing(manager: RandomTextResourcesManager): + missing_identifier = UserDefinedID() + assert await manager.get(missing_identifier) is None + + +@pytest.mark.parametrize("delete_before_removal", [True, False]) +async def test_full_workflow( + manager: RandomTextResourcesManager, delete_before_removal: bool +): + # creation + identifier, _ = await manager.create( + cleanup_context=AnEmptyTextCleanupContext(), length=1 + ) + assert await manager.get(identifier) is not None + + # optional removal + if delete_before_removal: + await manager.remove(identifier) + + is_still_present = not delete_before_removal + assert (await manager.get(identifier) is not None) is is_still_present + + # safe remove the resource + await manager.remove(identifier) + + # resource no longer exists + assert await manager.get(identifier) is None + + +@pytest.mark.parametrize("reraise", [True, False]) +async def test_remove_raises_error( + mocker: MockerFixture, + manager: RandomTextResourcesManager, + caplog: pytest.LogCaptureFixture, + reraise: bool, +): + caplog.clear() + + error_message = "mock error during resource destroy" + mocker.patch.object(manager, "_destroy", side_effect=RuntimeError(error_message)) + + # after creation object is present + identifier, _ = await manager.create( + cleanup_context=AnEmptyTextCleanupContext(), length=1 + ) + assert await manager.get(identifier) is not None + + if reraise: + with pytest.raises(RuntimeError): + await manager.remove(identifier, reraise=reraise) + else: + await manager.remove(identifier, reraise=reraise) + # check logs in case of error + assert "Unhandled exception:" in caplog.text + assert error_message in caplog.text + + +async def _create_resources( + manager: RandomTextResourcesManager, count: int +) -> list[UserDefinedID]: + creation_results: list[tuple[UserDefinedID, RandomTextEntry]] = await logged_gather( + *[ + manager.create(cleanup_context=AnEmptyTextCleanupContext(), length=1) + for _ in range(count) + ], + max_concurrency=_MAX_REDIS_CONCURRENCY, + ) + return [x[0] for x in creation_results] + + +async def _assert_all_resources( + manager: RandomTextResourcesManager, + identifiers: list[UserDefinedID], + *, + exist: bool, +) -> None: + get_results: list[RandomTextEntry | None] = await logged_gather( + *[manager.get(identifier) for identifier in identifiers], + max_concurrency=_MAX_REDIS_CONCURRENCY, + ) + if exist: + assert all(x is not None for x in get_results) + else: + assert all(x is None for x in get_results) + + +@pytest.mark.parametrize("count", [1000]) +async def test_parallel_create_remove(manager: RandomTextResourcesManager, count: int): + # create resources + identifiers: list[UserDefinedID] = await _create_resources(manager, count) + await _assert_all_resources(manager, identifiers, exist=True) + + # safe remove the resources, they do not exist any longer + await asyncio.gather(*[manager.remove(identifier) for identifier in identifiers]) + await _assert_all_resources(manager, identifiers, exist=False) + + +async def test_background_removal_of_unused_resources( + manager_with_no_cleanup_task: RandomTextResourcesManager, + component_using_random_text: ComponentUsingRandomText, +): + # create resources + identifiers: list[UserDefinedID] = await _create_resources( + manager_with_no_cleanup_task, 10_000 + ) + await _assert_all_resources(manager_with_no_cleanup_task, identifiers, exist=True) + + # call cleanup, all resources still exist + await manager_with_no_cleanup_task._cleanup_unused_identifiers() # noqa: SLF001 + await _assert_all_resources(manager_with_no_cleanup_task, identifiers, exist=True) + + # make resources unused in external system + component_using_random_text.toggle_usage(in_use=False) + await manager_with_no_cleanup_task._cleanup_unused_identifiers() # noqa: SLF001 + await _assert_all_resources(manager_with_no_cleanup_task, identifiers, exist=False) + + +async def test_no_redis_key_overlap_when_inheriting( + redis_client_sdk: RedisClientSDK, + component_using_random_text: ComponentUsingRandomText, +): + class ChildRandomTextResourcesManager(RandomTextResourcesManager): + ... + + parent_manager = RandomTextResourcesManager( + redis_client_sdk, component_using_random_text + ) + child_manager = ChildRandomTextResourcesManager( + redis_client_sdk, component_using_random_text + ) + + # create an entry in the child and one in the parent + + parent_identifier, _ = await parent_manager.create( + cleanup_context=AnEmptyTextCleanupContext(), length=1 + ) + child_identifier, _ = await child_manager.create( + cleanup_context=AnEmptyTextCleanupContext(), length=1 + ) + assert parent_identifier != child_identifier + + keys = await redis_client_sdk.redis.keys("*") + assert len(keys) == 2 + + # check keys contain the correct prefixes + key_prefixes: set[str] = {k.split(":")[0] for k in keys} + assert key_prefixes == { + RandomTextResourcesManager.class_path(), + ChildRandomTextResourcesManager.class_path(), + } diff --git a/services/director-v2/tests/unit/test_utils_scheduler.py b/services/director-v2/tests/unit/test_utils_scheduler.py deleted file mode 100644 index 8ff14074076..00000000000 --- a/services/director-v2/tests/unit/test_utils_scheduler.py +++ /dev/null @@ -1,42 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - - -import pytest -from models_library.projects_state import RunningState -from simcore_service_director_v2.utils.scheduler import ( - COMPLETED_STATES, - SCHEDULED_STATES, -) - - -@pytest.mark.parametrize( - "state", - [ - RunningState.PUBLISHED, - RunningState.PENDING, - RunningState.STARTED, - RunningState.RETRY, - ], -) -def test_scheduler_takes_care_of_runs_with_state(state: RunningState): - assert state in SCHEDULED_STATES - - -@pytest.mark.parametrize( - "state", - [ - RunningState.SUCCESS, - RunningState.ABORTED, - RunningState.FAILED, - ], -) -def test_scheduler_knows_these_are_completed_states(state: RunningState): - assert state in COMPLETED_STATES - - -def test_scheduler_knows_all_the_states(): - assert COMPLETED_STATES.union(SCHEDULED_STATES).union( - {RunningState.NOT_STARTED, RunningState.UNKNOWN} - ) == set(RunningState) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py new file mode 100644 index 00000000000..0804a848d35 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/conftest.py @@ -0,0 +1,71 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-value-for-parameter +# pylint:disable=protected-access +# pylint:disable=too-many-arguments +# pylint:disable=no-name-in-module +# pylint: disable=too-many-statements + + +from unittest import mock + +import pytest +import sqlalchemy as sa +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings + + +@pytest.fixture +def mock_env( + mock_env: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + fake_s3_envs: EnvVarsDict, + postgres_db: sa.engine.Engine, + postgres_host_config: dict[str, str], + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + rabbit_env_vars_dict: EnvVarsDict, +) -> EnvVarsDict: + return mock_env | setenvs_from_dict( + monkeypatch, + {k: f"{v}" for k, v in fake_s3_envs.items()} + | { + "COMPUTATIONAL_BACKEND_ENABLED": True, + "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED": True, + } + | rabbit_env_vars_dict, + ) + + +@pytest.fixture +def with_disabled_auto_scheduling(mocker: MockerFixture) -> mock.Mock: + mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler.shutdown_manager", + ) + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler.setup_manager", + ) + + +@pytest.fixture +def with_disabled_scheduler_worker(mocker: MockerFixture) -> mock.Mock: + mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler.shutdown_worker", + autospec=True, + ) + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler.setup_worker", + autospec=True, + ) + + +@pytest.fixture +def with_disabled_scheduler_publisher(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._manager.request_pipeline_scheduling", + autospec=True, + ) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py new file mode 100644 index 00000000000..633e8cd2a44 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations.py @@ -0,0 +1,1013 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-positional-arguments + +import datetime as dt +import json +import re +import urllib.parse +from collections.abc import Awaitable, Callable, Iterator +from decimal import Decimal +from pathlib import Path +from random import choice +from typing import Any +from unittest import mock + +import httpx +import pytest +import respx +from faker import Faker +from fastapi import FastAPI, status +from models_library.api_schemas_catalog.services import ServiceGet +from models_library.api_schemas_clusters_keeper.ec2_instances import EC2InstanceTypeGet +from models_library.api_schemas_directorv2.computations import ( + ComputationCreate, + ComputationGet, +) +from models_library.api_schemas_directorv2.services import ServiceExtras +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingPlanGet, + RutPricingUnitGet, +) +from models_library.projects import ProjectAtDB +from models_library.projects_nodes import NodeID, NodeState +from models_library.projects_pipeline import PipelineDetails +from models_library.projects_state import RunningState +from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services import ServiceMetaDataPublished +from models_library.services_resources import ( + DEFAULT_SINGLE_SERVICE_NAME, + ServiceResourcesDict, + ServiceResourcesDictHelpers, +) +from models_library.utils.fastapi_encoders import jsonable_encoder +from models_library.wallets import WalletInfo +from pydantic import AnyHttpUrl, ByteSize, PositiveInt, TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from simcore_postgres_database.models.comp_pipeline import StateType +from simcore_postgres_database.models.comp_tasks import NodeClass +from simcore_postgres_database.utils_projects_nodes import ProjectNodesRepo +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.modules.db.repositories.comp_tasks._utils import ( + _CPUS_SAFE_MARGIN, + _RAM_SAFE_MARGIN_RATIO, +) +from simcore_service_director_v2.utils.computations import to_node_class +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture() +def mocked_rabbit_mq_client(mocker: MockerFixture): + mocker.patch( + "simcore_service_director_v2.core.application.rabbitmq.RabbitMQClient", + autospec=True, + ) + + +@pytest.fixture() +def minimal_configuration( + mock_env: EnvVarsDict, + postgres_host_config: dict[str, str], + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, +): + monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") + monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") + monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "1") + monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) + + +@pytest.fixture(scope="session") +def fake_service_details(mocks_dir: Path) -> ServiceMetaDataPublished: + fake_service_path = mocks_dir / "fake_service.json" + assert fake_service_path.exists() + fake_service_data = json.loads(fake_service_path.read_text()) + return ServiceMetaDataPublished(**fake_service_data) + + +@pytest.fixture +def fake_service_extras() -> ServiceExtras: + extra_example = ServiceExtras.model_json_schema()["examples"][2] # type: ignore + random_extras = ServiceExtras(**extra_example) # type: ignore + assert random_extras is not None + return random_extras + + +@pytest.fixture +def fake_service_resources() -> ServiceResourcesDict: + return TypeAdapter(ServiceResourcesDict).validate_python( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"][0], # type: ignore + ) + + +@pytest.fixture +def fake_service_labels() -> dict[str, Any]: + return choice( # noqa: S311 + SimcoreServiceLabels.model_json_schema()["examples"] # type: ignore + ) + + +@pytest.fixture +def mocked_director_service_fcts( + minimal_app: FastAPI, + fake_service_details: ServiceMetaDataPublished, +) -> Iterator[respx.MockRouter]: + # pylint: disable=not-context-manager + with respx.mock( + base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + respx_mock.get( + re.compile( + r"/services/simcore%2Fservices%2F(comp|dynamic|frontend)%2F[^/]+/\d+.\d+.\d+$" + ), + name="get_service", + ).respond( + json={"data": [fake_service_details.model_dump(mode="json", by_alias=True)]} + ) + + yield respx_mock + + +@pytest.fixture +def mocked_catalog_service_fcts( + minimal_app: FastAPI, + fake_service_details: ServiceMetaDataPublished, + fake_service_resources: ServiceResourcesDict, + fake_service_labels: dict[str, Any], + fake_service_extras: ServiceExtras, +) -> Iterator[respx.MockRouter]: + def _mocked_service_resources(request) -> httpx.Response: + return httpx.Response( + httpx.codes.OK, json=jsonable_encoder(fake_service_resources, by_alias=True) + ) + + def _mocked_services_details( + request, service_key: str, service_version: str + ) -> httpx.Response: + data_published = fake_service_details.model_copy( + update={ + "key": urllib.parse.unquote(service_key), + "version": service_version, + } + ).model_dump(by_alias=True) + data = { + **ServiceGet.model_json_schema()["examples"][0], + **data_published, + } + payload = ServiceGet.model_validate(data) + return httpx.Response( + 200, + json=jsonable_encoder( + payload, + by_alias=True, + ), + ) + + # pylint: disable=not-context-manager + with respx.mock( + base_url=minimal_app.state.settings.DIRECTOR_V2_CATALOG.api_base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + respx_mock.get( + re.compile( + r"services/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F[^/]+/[^\.]+.[^\.]+.[^\/]+/resources" + ), + name="get_service_resources", + ).mock(side_effect=_mocked_service_resources) + respx_mock.get( + re.compile( + r"/services/simcore%2Fservices%2F(comp|dynamic|frontend)%2F[^/]+/\d+.\d+.\d+/labels" + ), + name="get_service_labels", + ).respond(json=fake_service_labels) + respx_mock.get( + re.compile( + r"/services/simcore%2Fservices%2F(comp|dynamic|frontend)%2F[^/]+/\d+.\d+.\d+/extras" + ), + name="get_service_extras", + ).respond(json=fake_service_extras.model_dump(mode="json", by_alias=True)) + respx_mock.get( + re.compile( + r"services/(?Psimcore%2Fservices%2F(comp|dynamic|frontend)%2F[^/]+)/(?P[^\.]+.[^\.]+.[^/\?]+).*" + ), + name="get_service", + ).mock(side_effect=_mocked_services_details) + + yield respx_mock + + +@pytest.fixture +def mocked_catalog_service_fcts_deprecated( + minimal_app: FastAPI, + fake_service_details: ServiceMetaDataPublished, + fake_service_extras: ServiceExtras, +) -> Iterator[respx.MockRouter]: + def _mocked_services_details( + request, service_key: str, service_version: str + ) -> httpx.Response: + data_published = fake_service_details.model_copy( + update={ + "key": urllib.parse.unquote(service_key), + "version": service_version, + "deprecated": ( + dt.datetime.now(tz=dt.UTC) - dt.timedelta(days=1) + ).isoformat(), + } + ).model_dump(by_alias=True) + + deprecated = { + "deprecated": ( + dt.datetime.now(tz=dt.UTC) - dt.timedelta(days=1) + ).isoformat() + } + + data = { + **ServiceGet.model_json_schema()["examples"][0], + **data_published, + **deprecated, + } # type: ignore + + payload = ServiceGet.model_validate(data) + + return httpx.Response( + httpx.codes.OK, + json=jsonable_encoder( + payload, + by_alias=True, + ), + ) + + # pylint: disable=not-context-manager + with respx.mock( + base_url=minimal_app.state.settings.DIRECTOR_V2_CATALOG.api_base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + respx_mock.get( + re.compile( + r"services/(?Psimcore%2Fservices%2F(comp|dynamic|frontend)%2F[^/]+)/(?P[^\.]+.[^\.]+.[^/\?]+).*" + ), + name="get_service", + ).mock(side_effect=_mocked_services_details) + + yield respx_mock + + +assert "json_schema_extra" in RutPricingPlanGet.model_config +assert isinstance(RutPricingPlanGet.model_json_schema(), dict) +assert isinstance(RutPricingPlanGet.model_json_schema()["examples"], list) + + +@pytest.fixture( + params=[ + RutPricingPlanGet.model_json_schema()["examples"][0], + RutPricingPlanGet.model_json_schema()["examples"][1], + ], + ids=["with ec2 restriction", "without"], +) +def default_pricing_plan(request: pytest.FixtureRequest) -> RutPricingPlanGet: + return RutPricingPlanGet(**request.param) + + +@pytest.fixture +def default_pricing_plan_aws_ec2_type( + default_pricing_plan: RutPricingPlanGet, +) -> str | None: + assert default_pricing_plan.pricing_units + for p in default_pricing_plan.pricing_units: + if p.default: + if p.specific_info.aws_ec2_instances: + return p.specific_info.aws_ec2_instances[0] + return None + pytest.fail("no default pricing plan defined!") + msg = "make pylint happy by raising here" + raise RuntimeError(msg) + + +@pytest.fixture +def mocked_resource_usage_tracker_service_fcts( + minimal_app: FastAPI, default_pricing_plan: RutPricingPlanGet +) -> Iterator[respx.MockRouter]: + def _mocked_service_default_pricing_plan( + request, service_key: str, service_version: str + ) -> httpx.Response: + # RUT only returns values if they are in the table resource_tracker_pricing_plan_to_service + # otherwise it returns 404s + if "frontend" in service_key: + # NOTE: there are typically no frontend services that have pricing plans + return httpx.Response(status_code=status.HTTP_404_NOT_FOUND) + return httpx.Response( + 200, json=jsonable_encoder(default_pricing_plan, by_alias=True) + ) + + def _mocked_get_pricing_unit(request, pricing_plan_id: int) -> httpx.Response: + assert "json_schema_extra" in RutPricingUnitGet.model_config + assert isinstance(RutPricingUnitGet.model_json_schema(), dict) + assert isinstance(RutPricingUnitGet.model_json_schema()["examples"], list) + return httpx.Response( + 200, + json=jsonable_encoder( + ( + default_pricing_plan.pricing_units[0] + if default_pricing_plan.pricing_units + else RutPricingUnitGet.model_json_schema()["examples"][0] + ), + by_alias=True, + ), + ) + + # pylint: disable=not-context-manager + with respx.mock( + base_url=minimal_app.state.settings.DIRECTOR_V2_RESOURCE_USAGE_TRACKER.api_base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: + respx_mock.get( + re.compile( + r"services/(?Psimcore/services/(comp|dynamic|frontend)/[^/]+)/(?P[^\.]+.[^\.]+.[^/\?]+)/pricing-plan.+" + ), + name="get_service_default_pricing_plan", + ).mock(side_effect=_mocked_service_default_pricing_plan) + + respx_mock.get( + re.compile(r"pricing-plans/(?P\d+)/pricing-units.+"), + name="get_pricing_unit", + ).mock(side_effect=_mocked_get_pricing_unit) + + yield respx_mock + + +@pytest.fixture +def product_name(faker: Faker) -> str: + return faker.name() + + +@pytest.fixture +def product_api_base_url(faker: Faker) -> AnyHttpUrl: + return TypeAdapter(AnyHttpUrl).validate_python(faker.url()) + + +async def test_computation_create_validators( + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + fake_workbench_without_outputs: dict[str, Any], + product_name: str, + product_api_base_url: AnyHttpUrl, + faker: Faker, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + use_on_demand_clusters=True, + ) + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + use_on_demand_clusters=False, + ) + + +async def test_create_computation( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + ) + ), + ) + assert response.status_code == status.HTTP_201_CREATED, response.text + + +@pytest.fixture +def wallet_info(faker: Faker) -> WalletInfo: + return WalletInfo( + wallet_id=faker.pyint(), + wallet_name=faker.name(), + wallet_credit_amount=Decimal(faker.pyint(min_value=12, max_value=129312)), + ) + + +@pytest.fixture +def fake_ec2_cpus() -> PositiveInt: + return 4 + + +@pytest.fixture +def fake_ec2_ram() -> ByteSize: + return TypeAdapter(ByteSize).validate_python("4GiB") + + +@pytest.fixture +def mocked_clusters_keeper_service_get_instance_type_details( + mocker: MockerFixture, + default_pricing_plan_aws_ec2_type: str, + fake_ec2_cpus: PositiveInt, + fake_ec2_ram: ByteSize, +) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.db.repositories.comp_tasks._utils.get_instance_type_details", + return_value=[ + EC2InstanceTypeGet( + name=default_pricing_plan_aws_ec2_type, + cpus=fake_ec2_cpus, + ram=fake_ec2_ram, + ) + ], + ) + + +@pytest.fixture +def mocked_clusters_keeper_service_get_instance_type_details_with_invalid_name( + mocker: MockerFixture, + faker: Faker, + fake_ec2_cpus: PositiveInt, + fake_ec2_ram: ByteSize, +) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.db.repositories.comp_tasks._utils.get_instance_type_details", + return_value=[ + EC2InstanceTypeGet( + name=faker.pystr(), + cpus=fake_ec2_cpus, + ram=fake_ec2_ram, + ) + ], + ) + + +assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config +assert isinstance(ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict) +assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list +) + + +@pytest.fixture( + params=ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"] +) +def project_nodes_overrides(request: pytest.FixtureRequest) -> dict[str, Any]: + return request.param + + +async def test_create_computation_with_wallet( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + mocked_resource_usage_tracker_service_fcts: respx.MockRouter, + mocked_clusters_keeper_service_get_instance_type_details: mock.Mock, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, + wallet_info: WalletInfo, + project_nodes_overrides: dict[str, Any], + default_pricing_plan_aws_ec2_type: str | None, + sqlalchemy_async_engine: AsyncEngine, + fake_ec2_cpus: PositiveInt, + fake_ec2_ram: ByteSize, +): + # In billable product a wallet is passed, with a selected pricing plan + # the pricing plan contains information about the hardware that should be used + # this will then override the original service resources + user = create_registered_user() + + proj = await project( + user, + project_nodes_overrides={"required_resources": project_nodes_overrides}, + workbench=fake_workbench_without_outputs, + ) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + wallet_info=wallet_info, + ) + ), + ) + assert response.status_code == status.HTTP_201_CREATED, response.text + if default_pricing_plan_aws_ec2_type: + mocked_clusters_keeper_service_get_instance_type_details.assert_called() + assert ( + mocked_resource_usage_tracker_service_fcts.calls.call_count + == len( + [ + v + for v in proj.workbench.values() + if to_node_class(v.key) != NodeClass.FRONTEND + ] + ) + * 2 + ) + # check the project nodes were really overriden now + async with sqlalchemy_async_engine.connect() as connection: + project_nodes_repo = ProjectNodesRepo(project_uuid=proj.uuid) + for node in await project_nodes_repo.list(connection): + if ( + to_node_class(proj.workbench[f"{node.node_id}"].key) + != NodeClass.FRONTEND + ): + assert node.required_resources + if DEFAULT_SINGLE_SERVICE_NAME in node.required_resources: + assert node.required_resources[DEFAULT_SINGLE_SERVICE_NAME][ + "resources" + ] == { + "CPU": { + "limit": fake_ec2_cpus - _CPUS_SAFE_MARGIN, + "reservation": fake_ec2_cpus - _CPUS_SAFE_MARGIN, + }, + "RAM": { + "limit": int( + fake_ec2_ram - _RAM_SAFE_MARGIN_RATIO * fake_ec2_ram + ), + "reservation": int( + fake_ec2_ram - _RAM_SAFE_MARGIN_RATIO * fake_ec2_ram + ), + }, + } + elif "s4l-core" in node.required_resources: + # multi-container service, currently not supported + # hard-coded sim4life + assert "s4l-core" in node.required_resources + assert node.required_resources["s4l-core"]["resources"] == { + "CPU": {"limit": 4.0, "reservation": 0.1}, + "RAM": {"limit": 17179869184, "reservation": 536870912}, + "VRAM": {"limit": 1, "reservation": 1}, + } + else: + # multi-container service, currently not supported + # hard-coded jupyterlab + assert "jupyter-lab" in node.required_resources + assert node.required_resources["jupyter-lab"]["resources"] == { + "CPU": {"limit": 0.1, "reservation": 0.1}, + "RAM": {"limit": 2147483648, "reservation": 2147483648}, + } + + else: + mocked_clusters_keeper_service_get_instance_type_details.assert_not_called() + + +@pytest.mark.parametrize( + "default_pricing_plan", + [ + RutPricingPlanGet.model_validate( + RutPricingPlanGet.model_json_schema()["examples"][0] + ) + ], +) +async def test_create_computation_with_wallet_with_invalid_pricing_unit_name_raises_422( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + mocked_resource_usage_tracker_service_fcts: respx.MockRouter, + mocked_clusters_keeper_service_get_instance_type_details_with_invalid_name: mock.Mock, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, + wallet_info: WalletInfo, +): + user = create_registered_user() + proj = await project( + user, + workbench=fake_workbench_without_outputs, + ) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + wallet_info=wallet_info, + ) + ), + ) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, response.text + mocked_clusters_keeper_service_get_instance_type_details_with_invalid_name.assert_called_once() + + +@pytest.mark.parametrize( + "default_pricing_plan", + [ + RutPricingPlanGet( + **RutPricingPlanGet.model_json_schema()["examples"][0] # type: ignore + ) + ], +) +async def test_create_computation_with_wallet_with_no_clusters_keeper_raises_503( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + mocked_resource_usage_tracker_service_fcts: respx.MockRouter, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, + wallet_info: WalletInfo, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + product_name=product_name, + product_api_base_url=product_api_base_url, + wallet_info=wallet_info, + ) + ), + ) + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE, response.text + + +async def test_start_computation_without_product_fails( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + product_name: str, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json={ + "user_id": f"{user['id']}", + "project_id": f"{proj.uuid}", + "start_pipeline": f"{True}", + }, + ) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, response.text + + +async def test_start_computation( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + start_pipeline=True, + product_name=product_name, + product_api_base_url=product_api_base_url, + ) + ), + ) + assert response.status_code == status.HTTP_201_CREATED, response.text + mocked_get_service_resources = mocked_catalog_service_fcts["get_service_resources"] + # there should be as many calls to the catalog as there are no defined resources by default + assert mocked_get_service_resources.call_count == len( + fake_workbench_without_outputs + ) + + +async def test_start_computation_with_project_node_resources_defined( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts: respx.MockRouter, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + assert "json_schema_extra" in ServiceResourcesDictHelpers.model_config + assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"], dict + ) + assert isinstance( + ServiceResourcesDictHelpers.model_config["json_schema_extra"]["examples"], list + ) + proj = await project( + user, + project_nodes_overrides={ + "required_resources": ServiceResourcesDictHelpers.model_config[ + "json_schema_extra" + ]["examples"][0] + }, + workbench=fake_workbench_without_outputs, + ) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + start_pipeline=True, + product_name=product_name, + product_api_base_url=product_api_base_url, + ) + ), + ) + assert response.status_code == status.HTTP_201_CREATED, response.text + mocked_get_service_resources = mocked_catalog_service_fcts["get_service_resources"] + # there should be no calls to the catalog as there are resources defined, so no need to call the catalog + assert mocked_get_service_resources.call_count == 0 + + +async def test_start_computation_with_deprecated_services_raises_406( + minimal_configuration: None, + mocked_director_service_fcts: respx.MockRouter, + mocked_catalog_service_fcts_deprecated: respx.MockRouter, + product_name: str, + product_api_base_url: AnyHttpUrl, + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + create_computation_url = httpx.URL("/v2/computations") + response = await async_client.post( + create_computation_url, + json=jsonable_encoder( + ComputationCreate( + user_id=user["id"], + project_id=proj.uuid, + start_pipeline=True, + product_name=product_name, + product_api_base_url=product_api_base_url, + ) + ), + ) + assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text + + +async def test_get_computation_from_empty_project( + minimal_configuration: None, + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + faker: Faker, + async_client: httpx.AsyncClient, +): + user = create_registered_user() + get_computation_url = httpx.URL( + f"/v2/computations/{faker.uuid4()}?user_id={user['id']}" + ) + # the project exists but there is no pipeline yet + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_404_NOT_FOUND, response.text + # create the project + proj = await project(user, workbench=fake_workbench_without_outputs) + get_computation_url = httpx.URL( + f"/v2/computations/{proj.uuid}?user_id={user['id']}" + ) + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_404_NOT_FOUND, response.text + # create an empty pipeline + await create_pipeline( + project_id=f"{proj.uuid}", + ) + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_200_OK, response.text + returned_computation = ComputationGet.model_validate(response.json()) + assert returned_computation + expected_computation = ComputationGet( + id=proj.uuid, + state=RunningState.UNKNOWN, + pipeline_details=PipelineDetails( + adjacency_list={}, node_states={}, progress=None + ), + url=TypeAdapter(AnyHttpUrl).validate_python( + f"{async_client.base_url.join(get_computation_url)}" + ), + stop_url=None, + result=None, + iteration=None, + started=None, + stopped=None, + submitted=None, + ) + assert returned_computation.model_dump() == expected_computation.model_dump() + + +async def test_get_computation_from_not_started_computation_task( + minimal_configuration: None, + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + get_computation_url = httpx.URL( + f"/v2/computations/{proj.uuid}?user_id={user['id']}" + ) + await create_pipeline( + project_id=f"{proj.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + # create no task this should trigger an exception + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_409_CONFLICT, response.text + + # now create the expected tasks and the state is good again + comp_tasks = await create_tasks(user=user, project=proj) + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_200_OK, response.text + returned_computation = ComputationGet.model_validate(response.json()) + assert returned_computation + expected_computation = ComputationGet( + id=proj.uuid, + state=RunningState.NOT_STARTED, + pipeline_details=PipelineDetails( + adjacency_list=TypeAdapter(dict[NodeID, list[NodeID]]).validate_python( + fake_workbench_adjacency + ), + progress=0, + node_states={ + t.node_id: NodeState( + modified=True, + currentStatus=RunningState.NOT_STARTED, + progress=None, + dependencies={ + NodeID(node) + for node, next_nodes in fake_workbench_adjacency.items() + if f"{t.node_id}" in next_nodes + }, + ) + for t in comp_tasks + if t.node_class == NodeClass.COMPUTATIONAL + }, + ), + url=TypeAdapter(AnyHttpUrl).validate_python( + f"{async_client.base_url.join(get_computation_url)}" + ), + stop_url=None, + result=None, + iteration=None, + started=None, + stopped=None, + submitted=None, + ) + assert returned_computation == expected_computation + + +async def test_get_computation_from_published_computation_task( + minimal_configuration: None, + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + async_client: httpx.AsyncClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{proj.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks( + user=user, project=proj, state=StateType.PUBLISHED, progress=0 + ) + comp_runs = await create_comp_run( + user=user, project=proj, result=StateType.PUBLISHED + ) + assert comp_runs + get_computation_url = httpx.URL( + f"/v2/computations/{proj.uuid}?user_id={user['id']}" + ) + response = await async_client.get(get_computation_url) + assert response.status_code == status.HTTP_200_OK, response.text + returned_computation = ComputationGet.model_validate(response.json()) + assert returned_computation + expected_stop_url = async_client.base_url.join( + f"/v2/computations/{proj.uuid}:stop?user_id={user['id']}" + ) + expected_computation = ComputationGet( + id=proj.uuid, + state=RunningState.PUBLISHED, + pipeline_details=PipelineDetails( + adjacency_list=TypeAdapter(dict[NodeID, list[NodeID]]).validate_python( + fake_workbench_adjacency + ), + node_states={ + t.node_id: NodeState( + modified=True, + currentStatus=RunningState.PUBLISHED, + dependencies={ + NodeID(node) + for node, next_nodes in fake_workbench_adjacency.items() + if f"{t.node_id}" in next_nodes + }, + progress=0, + ) + for t in comp_tasks + if t.node_class == NodeClass.COMPUTATIONAL + }, + progress=0, + ), + url=TypeAdapter(AnyHttpUrl).validate_python( + f"{async_client.base_url.join(get_computation_url)}" + ), + stop_url=TypeAdapter(AnyHttpUrl).validate_python(f"{expected_stop_url}"), + result=None, + iteration=1, + started=None, + stopped=None, + submitted=None, + ) + + _CHANGED_FIELDS = {"submitted"} + assert returned_computation.model_dump( + exclude=_CHANGED_FIELDS + ) == expected_computation.model_dump(exclude=_CHANGED_FIELDS) + assert returned_computation.model_dump( + include=_CHANGED_FIELDS + ) != expected_computation.model_dump(include=_CHANGED_FIELDS) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py new file mode 100644 index 00000000000..2c539a7c2b6 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_route_computations_tasks.py @@ -0,0 +1,215 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import Awaitable, Callable +from typing import Any, NamedTuple +from unittest import mock +from uuid import uuid4 + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_directorv2.computations import ( + TaskLogFileGet, + TasksOutputs, + TasksSelection, +) +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_director_v2.core.settings import AppSettings +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", + "redis", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def mock_env( + mock_env: EnvVarsDict, # sets default env vars + postgres_host_config, # sets postgres env vars + monkeypatch: pytest.MonkeyPatch, + faker: Faker, +): + return setenvs_from_dict( + monkeypatch, + { + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + }, + ) + + +@pytest.fixture +def client(async_client: httpx.AsyncClient) -> httpx.AsyncClient: + # overrides client + # WARNING: this is an httpx.AsyncClient and not a TestClient!! + def _get_app(async_client: httpx.AsyncClient) -> FastAPI: + app = async_client._transport.app # type: ignore + assert app + assert isinstance(app, FastAPI) + return app + + app = _get_app(async_client) + + settings: AppSettings = app.state.settings + assert settings + print(settings.model_dump_json(indent=1)) + + return async_client + + +@pytest.fixture +def mocked_nodeports_storage_client(mocker, faker: Faker) -> dict[str, mock.MagicMock]: + # NOTE: mocking storage API would require aioresponses since the access to storage + # is via node-ports which uses aiohttp-client! In order to avoid adding an extra + # dependency we will patch storage-client functions in simcore-sdk's nodeports + + class Loc(NamedTuple): + name: str + id: int + + return { + "get_download_file_link": mocker.patch( + "simcore_sdk.node_ports_common.storage_client.get_download_file_link", + autospec=True, + return_value=faker.url(), + ), + "list_storage_locations": mocker.patch( + "simcore_sdk.node_ports_common.storage_client.list_storage_locations", + autospec=True, + return_value=[ + Loc(name="simcore.s3", id=0), + ], + ), + } + + +@pytest.fixture +def user(create_registered_user: Callable[..., dict[str, Any]]) -> dict[str, Any]: + return create_registered_user() + + +@pytest.fixture +def user_id(user: dict[str, Any]): + return user["id"] + + +@pytest.fixture +async def project_id( + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + user: dict[str, Any], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], +) -> ProjectID: + """project uuid of a saved project (w/ tasks up-to-date)""" + + # insert project -> db + proj = await project(user, workbench=fake_workbench_without_outputs) + + # insert pipeline -> comp_pipeline + await create_pipeline( + project_id=f"{proj.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + # insert tasks -> comp_tasks + comp_tasks = await create_tasks(user=user, project=proj) + + return proj.uuid + + +@pytest.fixture +def node_id(fake_workbench_adjacency: dict[str, Any]) -> NodeID: + return NodeID(next(nid for nid in fake_workbench_adjacency)) + + +# - tests api routes +# - real postgres db with rows inserted in users, projects, comp_tasks and comp_pipelines +# - mocks responses from storage API patching nodeports +# + + +async def test_get_all_tasks_log_files( + mocked_nodeports_storage_client: dict[str, mock.MagicMock], + client: httpx.AsyncClient, + user_id: UserID, + project_id: ProjectID, +): + resp = await client.get( + f"/v2/computations/{project_id}/tasks/-/logfile", params={"user_id": user_id} + ) + + # calls storage + mocked_nodeports_storage_client["list_storage_locations"].assert_not_called() + assert mocked_nodeports_storage_client["get_download_file_link"].called + + # test expected response according to OAS! + assert resp.status_code == status.HTTP_200_OK + log_files = TypeAdapter(list[TaskLogFileGet]).validate_json(resp.text) + assert log_files + assert all(l.download_link for l in log_files) + + +async def test_get_task_logs_file( + mocked_nodeports_storage_client: dict[str, mock.MagicMock], + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + client: httpx.AsyncClient, +): + resp = await client.get( + f"/v2/computations/{project_id}/tasks/{node_id}/logfile", + params={"user_id": user_id}, + ) + assert resp.status_code == status.HTTP_200_OK + + log_file = TaskLogFileGet.model_validate_json(resp.text) + assert log_file.download_link + + +async def test_get_tasks_outputs( + project_id: ProjectID, node_id: NodeID, client: httpx.AsyncClient +): + selection = { + node_id, + } + resp = await client.post( + f"/v2/computations/{project_id}/tasks/-/outputs:batchGet", + json=jsonable_encoder(TasksSelection(nodes_ids=selection)), + ) + + assert resp.status_code == status.HTTP_200_OK + + tasks_outputs = TasksOutputs.model_validate(resp.json()) + + assert selection == set(tasks_outputs.nodes_outputs.keys()) + outputs = tasks_outputs.nodes_outputs[node_id] + assert outputs == {} + + +async def test_get_tasks_outputs_not_found(node_id: NodeID, client: httpx.AsyncClient): + invalid_project = uuid4() + resp = await client.post( + f"/v2/computations/{invalid_project}/tasks/-/outputs:batchGet", + json=jsonable_encoder(TasksSelection(nodes_ids={node_id})), + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_rpc_computations.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_rpc_computations.py new file mode 100644 index 00000000000..910679901e3 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_api_rpc_computations.py @@ -0,0 +1,207 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-positional-arguments + +from collections.abc import Awaitable, Callable +from datetime import UTC, datetime, timedelta +from typing import Any + +from models_library.api_schemas_directorv2.comp_runs import ( + ComputationRunRpcGetPage, + ComputationTaskRpcGetPage, +) +from models_library.projects import ProjectAtDB +from models_library.projects_state import RunningState +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.director_v2 import ( + computations as rpc_computations, +) +from simcore_postgres_database.models.comp_pipeline import StateType +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB + +pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_rpc_list_computation_runs_and_tasks( + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + rpc_client: RabbitMQRPCClient, +): + user = create_registered_user() + proj = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{proj.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks( + user=user, project=proj, state=StateType.PUBLISHED, progress=None + ) + comp_runs = await create_comp_run( + user=user, project=proj, result=RunningState.PUBLISHED + ) + assert comp_runs + + output = await rpc_computations.list_computations_latest_iteration_page( + rpc_client, product_name="osparc", user_id=user["id"] + ) + assert output.total == 1 + assert isinstance(output, ComputationRunRpcGetPage) + assert output.items[0].iteration == 1 + + comp_runs_2 = await create_comp_run( + user=user, + project=proj, + result=RunningState.PENDING, + started=datetime.now(tz=UTC), + iteration=2, + ) + output = await rpc_computations.list_computations_latest_iteration_page( + rpc_client, product_name="osparc", user_id=user["id"] + ) + assert output.total == 1 + assert isinstance(output, ComputationRunRpcGetPage) + assert output.items[0].iteration == 2 + assert output.items[0].started_at is not None + assert output.items[0].ended_at is None + + comp_runs_3 = await create_comp_run( + user=user, + project=proj, + result=RunningState.SUCCESS, + started=datetime.now(tz=UTC), + ended=datetime.now(tz=UTC), + iteration=3, + ) + output = await rpc_computations.list_computations_latest_iteration_page( + rpc_client, product_name="osparc", user_id=user["id"] + ) + assert output.total == 1 + assert isinstance(output, ComputationRunRpcGetPage) + assert output.items[0].iteration == 3 + assert output.items[0].ended_at is not None + + # Tasks + + output = await rpc_computations.list_computations_latest_iteration_tasks_page( + rpc_client, product_name="osparc", user_id=user["id"], project_ids=[proj.uuid] + ) + assert output + assert output.total == 4 + assert isinstance(output, ComputationTaskRpcGetPage) + assert len(output.items) == 4 + + +async def test_rpc_list_computation_runs_with_filtering( + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + rpc_client: RabbitMQRPCClient, +): + user = create_registered_user() + + proj_1 = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{proj_1.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks( + user=user, project=proj_1, state=StateType.PUBLISHED, progress=None + ) + comp_runs = await create_comp_run( + user=user, project=proj_1, result=RunningState.PUBLISHED + ) + + proj_2 = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{proj_2.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks( + user=user, project=proj_2, state=StateType.SUCCESS, progress=None + ) + comp_runs = await create_comp_run( + user=user, project=proj_2, result=RunningState.SUCCESS + ) + + # Test default behaviour `filter_only_running=False` + output = await rpc_computations.list_computations_latest_iteration_page( + rpc_client, product_name="osparc", user_id=user["id"] + ) + assert output.total == 2 + + # Test filtering + output = await rpc_computations.list_computations_latest_iteration_page( + rpc_client, product_name="osparc", user_id=user["id"], filter_only_running=True + ) + assert output.total == 1 + assert output.items[0].project_uuid == proj_1.uuid + + +async def test_rpc_list_computation_runs_history( + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + rpc_client: RabbitMQRPCClient, +): + user = create_registered_user() + + proj = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{proj.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks( + user=user, project=proj, state=StateType.PUBLISHED, progress=None + ) + comp_runs_1 = await create_comp_run( + user=user, + project=proj, + result=RunningState.SUCCESS, + started=datetime.now(tz=UTC) - timedelta(minutes=120), + ended=datetime.now(tz=UTC) - timedelta(minutes=100), + iteration=1, + ) + comp_runs_2 = await create_comp_run( + user=user, + project=proj, + result=RunningState.SUCCESS, + started=datetime.now(tz=UTC) - timedelta(minutes=90), + ended=datetime.now(tz=UTC) - timedelta(minutes=60), + iteration=2, + ) + comp_runs_3 = await create_comp_run( + user=user, + project=proj, + result=RunningState.FAILED, + started=datetime.now(tz=UTC) - timedelta(minutes=50), + ended=datetime.now(tz=UTC), + iteration=3, + ) + + output = await rpc_computations.list_computations_iterations_page( + rpc_client, product_name="osparc", user_id=user["id"], project_ids=[proj.uuid] + ) + assert output.total == 3 + assert isinstance(output, ComputationRunRpcGetPage) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py new file mode 100644 index 00000000000..366f865e033 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_db_repositories_comp_runs.py @@ -0,0 +1,496 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +import datetime +import random +from collections.abc import Awaitable, Callable +from typing import cast + +import arrow +import pytest +from _helpers import PublishedProject +from faker import Faker +from models_library.projects import ProjectID +from models_library.projects_state import RunningState +from models_library.users import UserID +from simcore_service_director_v2.core.errors import ( + ComputationalRunNotFoundError, + ProjectNotFoundError, + UserNotFoundError, +) +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB, RunMetadataDict +from simcore_service_director_v2.modules.comp_scheduler._constants import ( + SCHEDULER_INTERVAL, +) +from simcore_service_director_v2.modules.db.repositories.comp_runs import ( + CompRunsRepository, +) +from sqlalchemy.ext.asyncio.engine import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def fake_user_id(faker: Faker) -> UserID: + return faker.pyint(min_value=1) + + +@pytest.fixture +def fake_project_id(faker: Faker) -> ProjectID: + return ProjectID(f"{faker.uuid4(cast_to=None)}") + + +async def test_get( + sqlalchemy_async_engine: AsyncEngine, + fake_user_id: UserID, + fake_project_id: ProjectID, + publish_project: Callable[[], Awaitable[PublishedProject]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], +): + with pytest.raises(ComputationalRunNotFoundError): + await CompRunsRepository(sqlalchemy_async_engine).get( + fake_user_id, fake_project_id + ) + + published_project = await publish_project() + assert published_project.project.prj_owner + # there is still no comp run created + with pytest.raises(ComputationalRunNotFoundError): + await CompRunsRepository(sqlalchemy_async_engine).get( + published_project.project.prj_owner, published_project.project.uuid + ) + + await create_comp_run(published_project.user, published_project.project) + await CompRunsRepository(sqlalchemy_async_engine).get( + published_project.project.prj_owner, published_project.project.uuid + ) + + +async def test_list( + sqlalchemy_async_engine: AsyncEngine, + publish_project: Callable[[], Awaitable[PublishedProject]], + run_metadata: RunMetadataDict, + faker: Faker, +): + assert await CompRunsRepository(sqlalchemy_async_engine).list_() == [] + + published_project = await publish_project() + assert await CompRunsRepository(sqlalchemy_async_engine).list_() == [] + + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + assert await CompRunsRepository(sqlalchemy_async_engine).list_() == [created] + + created = [created] + await asyncio.gather( + *( + CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=created.iteration + n + 1, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + for n in range(50) + ) + ) + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_(), + key=lambda x: x.iteration, + ) == sorted(created, key=lambda x: x.iteration) + + # test with filter of state + any_state_but_published = { + s for s in RunningState if s is not RunningState.PUBLISHED + } + assert ( + await CompRunsRepository(sqlalchemy_async_engine).list_( + filter_by_state=any_state_but_published + ) + == [] + ) + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_( + filter_by_state={RunningState.PUBLISHED} + ), + key=lambda x: x.iteration, + ) == sorted(created, key=lambda x: x.iteration) + + # test with never scheduled filter, let's create a bunch of scheduled entries, + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_(never_scheduled=True), + key=lambda x: x.iteration, + ) == sorted(created, key=lambda x: x.iteration) + comp_runs_marked_for_scheduling = random.sample(created, k=25) + await asyncio.gather( + *( + CompRunsRepository(sqlalchemy_async_engine).mark_for_scheduling( + user_id=comp_run.user_id, + project_id=comp_run.project_uuid, + iteration=comp_run.iteration, + ) + for comp_run in comp_runs_marked_for_scheduling + ) + ) + # filter them away + created = [r for r in created if r not in comp_runs_marked_for_scheduling] + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_(never_scheduled=True), + key=lambda x: x.iteration, + ) == sorted(created, key=lambda x: x.iteration) + + # now mark a few of them as processed + comp_runs_marked_as_processed = random.sample(comp_runs_marked_for_scheduling, k=11) + await asyncio.gather( + *( + CompRunsRepository(sqlalchemy_async_engine).mark_as_processed( + user_id=comp_run.user_id, + project_id=comp_run.project_uuid, + iteration=comp_run.iteration, + ) + for comp_run in comp_runs_marked_as_processed + ) + ) + # filter them away + comp_runs_marked_for_scheduling = [ + r + for r in comp_runs_marked_for_scheduling + if r not in comp_runs_marked_as_processed + ] + # since they were just marked as processed now, we will get nothing + assert ( + sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_( + never_scheduled=False, processed_since=SCHEDULER_INTERVAL + ), + key=lambda x: x.iteration, + ) + == [] + ) + # now we artificially change the scheduled/processed time and set it 2x the scheduler interval + # these are correctly processed ones, so we should get them back + fake_scheduled_time = arrow.utcnow().datetime - 2 * SCHEDULER_INTERVAL + fake_processed_time = fake_scheduled_time + 0.5 * SCHEDULER_INTERVAL + comp_runs_marked_as_processed = ( + cast( # NOTE: the cast here is ok since gather will raise if there is an error + list[CompRunsAtDB], + await asyncio.gather( + *( + CompRunsRepository(sqlalchemy_async_engine).update( + user_id=comp_run.user_id, + project_id=comp_run.project_uuid, + iteration=comp_run.iteration, + scheduled=fake_scheduled_time, + processed=fake_processed_time, + ) + for comp_run in comp_runs_marked_as_processed + ) + ), + ) + ) + # now we should get them + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_( + never_scheduled=False, processed_since=SCHEDULER_INTERVAL + ), + key=lambda x: x.iteration, + ) == sorted(comp_runs_marked_as_processed, key=lambda x: x.iteration) + + # now some of them were never processed (e.g. processed time is either null or before schedule time) + comp_runs_waiting_for_processing_or_never_processed = random.sample( + comp_runs_marked_as_processed, k=6 + ) + comp_runs_marked_as_processed = [ + r + for r in comp_runs_marked_as_processed + if r not in comp_runs_waiting_for_processing_or_never_processed + ] + # now we artificially change the processed time to be before the scheduled time + comp_runs_waiting_for_processing_or_never_processed = cast( + list[CompRunsAtDB], + await asyncio.gather( + *( + CompRunsRepository(sqlalchemy_async_engine).update( + user_id=comp_run.user_id, + project_id=comp_run.project_uuid, + iteration=comp_run.iteration, + scheduled=fake_processed_time, # NOTE: we invert here the timings + processed=random.choice([fake_scheduled_time, None]), # noqa: S311 + ) + for comp_run in comp_runs_waiting_for_processing_or_never_processed + ) + ), + ) + # so the processed ones shall remain + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_( + never_scheduled=False, processed_since=SCHEDULER_INTERVAL + ), + key=lambda x: x.iteration, + ) == sorted(comp_runs_marked_as_processed, key=lambda x: x.iteration) + # the ones waiting for scheduling now + assert sorted( + await CompRunsRepository(sqlalchemy_async_engine).list_( + never_scheduled=False, scheduled_since=SCHEDULER_INTERVAL + ), + key=lambda x: x.iteration, + ) == sorted( + comp_runs_waiting_for_processing_or_never_processed, key=lambda x: x.iteration + ) + + +async def test_create( + sqlalchemy_async_engine: AsyncEngine, + fake_user_id: UserID, + fake_project_id: ProjectID, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + with pytest.raises(ProjectNotFoundError): + await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=fake_user_id, + project_id=fake_project_id, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + published_project = await publish_project() + with pytest.raises(UserNotFoundError): + await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=fake_user_id, + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + + # creating a second one auto increment the iteration + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + assert created != got + assert created.iteration == got.iteration + 1 + + # getting without specifying the iteration returns the latest + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + + +async def test_update( + sqlalchemy_async_engine: AsyncEngine, + fake_user_id: UserID, + fake_project_id: ProjectID, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + # this updates nothing but also does not complain + updated = await CompRunsRepository(sqlalchemy_async_engine).update( + fake_user_id, fake_project_id, faker.pyint(min_value=1) + ) + assert updated is None + # now let's create a valid one + published_project = await publish_project() + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + + updated = await CompRunsRepository(sqlalchemy_async_engine).update( + created.user_id, + created.project_uuid, + created.iteration, + scheduled=datetime.datetime.now(datetime.UTC), + ) + assert updated is not None + assert created != updated + assert created.scheduled is None + assert updated.scheduled is not None + + +async def test_set_run_result( + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + published_project = await publish_project() + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + assert created.result is not RunningState.PENDING + assert created.ended is None + + updated = await CompRunsRepository(sqlalchemy_async_engine).set_run_result( + user_id=created.user_id, + project_id=created.project_uuid, + iteration=created.iteration, + result_state=RunningState.PENDING, + final_state=False, + ) + assert updated + assert updated != created + assert updated.result is RunningState.PENDING + assert updated.ended is None + + final_updated = await CompRunsRepository(sqlalchemy_async_engine).set_run_result( + user_id=created.user_id, + project_id=created.project_uuid, + iteration=created.iteration, + result_state=RunningState.ABORTED, + final_state=True, + ) + assert final_updated + assert final_updated != updated + assert final_updated.result is RunningState.ABORTED + assert final_updated.ended is not None + + +async def test_mark_for_cancellation( + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + published_project = await publish_project() + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + assert created.cancelled is None + + updated = await CompRunsRepository(sqlalchemy_async_engine).mark_for_cancellation( + user_id=created.user_id, + project_id=created.project_uuid, + iteration=created.iteration, + ) + assert updated + assert updated != created + assert updated.cancelled is not None + + +async def test_mark_for_scheduling( + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + published_project = await publish_project() + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + assert created.scheduled is None + assert created.processed is None + + updated = await CompRunsRepository(sqlalchemy_async_engine).mark_for_scheduling( + user_id=created.user_id, + project_id=created.project_uuid, + iteration=created.iteration, + ) + assert updated + assert updated != created + assert updated.scheduled is not None + assert updated.processed is None + + +async def test_mark_scheduling_done( + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + faker: Faker, + publish_project: Callable[[], Awaitable[PublishedProject]], +): + published_project = await publish_project() + created = await CompRunsRepository(sqlalchemy_async_engine).create( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + iteration=None, + metadata=run_metadata, + use_on_demand_clusters=faker.pybool(), + ) + got = await CompRunsRepository(sqlalchemy_async_engine).get( + user_id=published_project.user["id"], + project_id=published_project.project.uuid, + ) + assert created == got + assert created.scheduled is None + assert created.processed is None + + updated = await CompRunsRepository(sqlalchemy_async_engine).mark_as_processed( + user_id=created.user_id, + project_id=created.project_uuid, + iteration=created.iteration, + ) + assert updated + assert updated != created + assert updated.scheduled is None + assert updated.processed is not None diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py new file mode 100644 index 00000000000..f38b3302431 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_manager.py @@ -0,0 +1,362 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-value-for-parameter +# pylint:disable=protected-access +# pylint:disable=too-many-arguments +# pylint:disable=no-name-in-module +# pylint: disable=too-many-statements + + +import asyncio +import datetime +import logging +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Any +from unittest import mock + +import pytest +from _helpers import PublishedProject, assert_comp_runs, assert_comp_runs_empty +from fastapi import FastAPI +from models_library.projects import ProjectAtDB +from models_library.projects_state import RunningState +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq._client import RabbitMQClient +from servicelib.redis import CouldNotAcquireLockError +from servicelib.utils import limited_gather +from simcore_service_director_v2.core.errors import PipelineNotFoundError +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import RunMetadataDict +from simcore_service_director_v2.modules.comp_scheduler._manager import ( + _LOST_TASKS_FACTOR, + SCHEDULER_INTERVAL, + run_new_pipeline, + schedule_all_pipelines, + stop_pipeline, +) +from simcore_service_director_v2.modules.comp_scheduler._models import ( + SchedulePipelineRabbitMessage, +) +from simcore_service_director_v2.modules.db.repositories.comp_runs import ( + CompRunsRepository, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"] +pytest_simcore_ops_services_selection = ["adminer", "redis-commander"] + + +@pytest.fixture +async def scheduler_rabbit_client_parser( + create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture +) -> AsyncIterator[mock.AsyncMock]: + client = create_rabbitmq_client("scheduling_pytest_consumer") + mock = mocker.AsyncMock(return_value=True) + queue_name, _ = await client.subscribe( + SchedulePipelineRabbitMessage.get_channel_name(), mock, exclusive_queue=False + ) + yield mock + await client.unsubscribe(queue_name) + + +@pytest.fixture +def with_fast_scheduling(mocker: MockerFixture) -> None: + from simcore_service_director_v2.modules.comp_scheduler import _manager + + mocker.patch.object( + _manager, "SCHEDULER_INTERVAL", datetime.timedelta(seconds=0.01) + ) + + +@pytest.fixture +def mocked_schedule_all_pipelines(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._manager.schedule_all_pipelines", + autospec=True, + ) + + +async def test_manager_starts_and_auto_schedules_pipelines( + with_fast_scheduling: None, + with_disabled_scheduler_worker: mock.Mock, + mocked_schedule_all_pipelines: mock.Mock, + initialized_app: FastAPI, + sqlalchemy_async_engine: AsyncEngine, +): + await assert_comp_runs_empty(sqlalchemy_async_engine) + mocked_schedule_all_pipelines.assert_called() + + +async def test_schedule_all_pipelines_empty_db( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_worker: mock.Mock, + initialized_app: FastAPI, + scheduler_rabbit_client_parser: mock.AsyncMock, + sqlalchemy_async_engine: AsyncEngine, +): + await assert_comp_runs_empty(sqlalchemy_async_engine) + + await schedule_all_pipelines(initialized_app) + + # check nothing was distributed + scheduler_rabbit_client_parser.assert_not_called() + + # check comp_runs is still empty + await assert_comp_runs_empty(sqlalchemy_async_engine) + + +async def test_schedule_all_pipelines_concurently_runs_exclusively_and_raises( + with_disabled_auto_scheduling: mock.Mock, + initialized_app: FastAPI, + mocker: MockerFixture, +): + CONCURRENCY = 5 + # NOTE: this ensure no flakyness as empty scheduling is very fast + # so we slow down the limited_gather function + original_function = limited_gather + + async def slow_limited_gather(*args, **kwargs): + result = await original_function(*args, **kwargs) + await asyncio.sleep(3) # to ensure flakyness does not occur + return result + + mock_function = mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._manager.limited_gather", + autospec=True, + side_effect=slow_limited_gather, + ) + + results = await asyncio.gather( + *(schedule_all_pipelines(initialized_app) for _ in range(CONCURRENCY)), + return_exceptions=True, + ) + + assert results.count(None) == 1, f"Only one task should have run: {results}" + for r in results: + if r: + assert isinstance(r, CouldNotAcquireLockError) + mock_function.assert_called_once() + + +async def test_schedule_all_pipelines( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_worker: mock.Mock, + initialized_app: FastAPI, + published_project: PublishedProject, + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + scheduler_rabbit_client_parser: mock.AsyncMock, +): + await assert_comp_runs_empty(sqlalchemy_async_engine) + assert published_project.project.prj_owner + # now we schedule a pipeline + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + # this directly schedule a new pipeline + scheduler_rabbit_client_parser.assert_called_once_with( + SchedulePipelineRabbitMessage( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ).body() + ) + scheduler_rabbit_client_parser.reset_mock() + comp_run = (await assert_comp_runs(sqlalchemy_async_engine, expected_total=1))[0] + assert comp_run.project_uuid == published_project.project.uuid + assert comp_run.user_id == published_project.project.prj_owner + assert comp_run.iteration == 1 + assert comp_run.cancelled is None + assert comp_run.metadata == run_metadata + assert comp_run.result is RunningState.PUBLISHED + assert comp_run.scheduled is not None + assert comp_run.processed is None + start_schedule_time = comp_run.scheduled + start_modified_time = comp_run.modified + + # this will now not schedule the pipeline since it was already scheduled + await schedule_all_pipelines(initialized_app) + scheduler_rabbit_client_parser.assert_not_called() + comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1) + comp_run = comp_runs[0] + assert comp_run.scheduled + assert comp_run.scheduled == start_schedule_time, "scheduled time changed!" + assert comp_run.cancelled is None + assert comp_run.modified == start_modified_time + + # to simulate that the worker did its job we will set times in the past + await CompRunsRepository(sqlalchemy_async_engine).update( + user_id=comp_run.user_id, + project_id=comp_run.project_uuid, + iteration=comp_run.iteration, + scheduled=comp_run.scheduled - 1.5 * SCHEDULER_INTERVAL, + processed=comp_run.scheduled - 1.1 * SCHEDULER_INTERVAL, + ) + + # now we schedule a pipeline again, but we wait for the scheduler interval to pass + # this will trigger a new schedule + await schedule_all_pipelines(initialized_app) + scheduler_rabbit_client_parser.assert_called_once_with( + SchedulePipelineRabbitMessage( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ).body() + ) + scheduler_rabbit_client_parser.reset_mock() + comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1) + comp_run = comp_runs[0] + assert comp_run.scheduled is not None + assert comp_run.scheduled > start_schedule_time + last_schedule_time = comp_run.scheduled + assert comp_run.cancelled is None + assert comp_run.modified > start_modified_time + + # now we stop the pipeline, which should instantly trigger a schedule + await stop_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + ) + await schedule_all_pipelines(initialized_app) + scheduler_rabbit_client_parser.assert_called_once_with( + SchedulePipelineRabbitMessage( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ).body() + ) + scheduler_rabbit_client_parser.reset_mock() + comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1) + comp_run = comp_runs[0] + assert comp_run.scheduled is not None + assert comp_run.scheduled > last_schedule_time + assert comp_run.cancelled is not None + + +async def test_schedule_all_pipelines_logs_error_if_it_find_old_pipelines( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_worker: mock.Mock, + initialized_app: FastAPI, + published_project: PublishedProject, + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, + scheduler_rabbit_client_parser: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, +): + await assert_comp_runs_empty(sqlalchemy_async_engine) + assert published_project.project.prj_owner + # now we schedule a pipeline + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + # this directly schedule a new pipeline + scheduler_rabbit_client_parser.assert_called_once_with( + SchedulePipelineRabbitMessage( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ).body() + ) + scheduler_rabbit_client_parser.reset_mock() + comp_run = (await assert_comp_runs(sqlalchemy_async_engine, expected_total=1))[0] + assert comp_run.project_uuid == published_project.project.uuid + assert comp_run.user_id == published_project.project.prj_owner + assert comp_run.iteration == 1 + assert comp_run.cancelled is None + assert comp_run.metadata == run_metadata + assert comp_run.result is RunningState.PUBLISHED + assert comp_run.scheduled is not None + start_schedule_time = comp_run.scheduled + start_modified_time = comp_run.modified + + # this will now not schedule the pipeline since it was already scheduled + await schedule_all_pipelines(initialized_app) + scheduler_rabbit_client_parser.assert_not_called() + comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1) + comp_run = comp_runs[0] + assert comp_run.scheduled == start_schedule_time, "scheduled time changed!" + assert comp_run.cancelled is None + assert comp_run.modified == start_modified_time + + # now we artificially set the last_schedule time well in the past + await CompRunsRepository(sqlalchemy_async_engine).update( + comp_run.user_id, + comp_run.project_uuid, + comp_run.iteration, + scheduled=datetime.datetime.now(tz=datetime.UTC) + - SCHEDULER_INTERVAL * (_LOST_TASKS_FACTOR + 1), + ) + with caplog.at_level(logging.ERROR): + await schedule_all_pipelines(initialized_app) + assert ( + "found 1 lost pipelines, they will be re-scheduled now" in caplog.messages + ) + scheduler_rabbit_client_parser.assert_called_once_with( + SchedulePipelineRabbitMessage( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ).body() + ) + scheduler_rabbit_client_parser.reset_mock() + comp_runs = await assert_comp_runs(sqlalchemy_async_engine, expected_total=1) + comp_run = comp_runs[0] + assert comp_run.scheduled is not None + assert comp_run.scheduled > start_schedule_time + assert comp_run.cancelled is None + assert comp_run.modified > start_modified_time + + +async def test_empty_pipeline_is_not_scheduled( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_worker: mock.Mock, + initialized_app: FastAPI, + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + run_metadata: RunMetadataDict, + sqlalchemy_async_engine: AsyncEngine, + scheduler_rabbit_client_parser: mock.AsyncMock, + caplog: pytest.LogCaptureFixture, +): + await assert_comp_runs_empty(sqlalchemy_async_engine) + user = create_registered_user() + empty_project = await project(user) + + # the project is not in the comp_pipeline, therefore scheduling it should fail + with pytest.raises(PipelineNotFoundError): + await run_new_pipeline( + initialized_app, + user_id=user["id"], + project_id=empty_project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + await assert_comp_runs_empty(sqlalchemy_async_engine) + scheduler_rabbit_client_parser.assert_not_called() + + # create the empty pipeline now + await create_pipeline(project_id=f"{empty_project.uuid}") + + # creating a run with an empty pipeline is useless, check the scheduler is not kicking in + with caplog.at_level(logging.WARNING): + await run_new_pipeline( + initialized_app, + user_id=user["id"], + project_id=empty_project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + assert len(caplog.records) == 1 + assert "no computational dag defined" in caplog.records[0].message + await assert_comp_runs_empty(sqlalchemy_async_engine) + scheduler_rabbit_client_parser.assert_not_called() diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py new file mode 100644 index 00000000000..17d43c47dcc --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_scheduler_dask.py @@ -0,0 +1,1945 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-value-for-parameter +# pylint:disable=protected-access +# pylint:disable=too-many-arguments +# pylint:disable=no-name-in-module +# pylint: disable=too-many-statements + + +import asyncio +import datetime +from collections.abc import AsyncIterator, Awaitable, Callable +from copy import deepcopy +from dataclasses import dataclass +from typing import Any, cast +from unittest import mock + +import arrow +import pytest +from _helpers import ( + PublishedProject, + RunningProject, + assert_comp_runs, + assert_comp_runs_empty, + assert_comp_tasks, +) +from dask_task_models_library.container_tasks.errors import TaskCancelledError +from dask_task_models_library.container_tasks.events import TaskProgressEvent +from dask_task_models_library.container_tasks.io import TaskOutputData +from dask_task_models_library.container_tasks.protocol import TaskOwner +from faker import Faker +from fastapi.applications import FastAPI +from models_library.projects import ProjectAtDB, ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.projects_state import RunningState +from models_library.rabbitmq_messages import ( + InstrumentationRabbitMessage, + RabbitResourceTrackingBaseMessage, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingMessages, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, +) +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_postgres_database.models.comp_runs import comp_runs +from simcore_postgres_database.models.comp_tasks import NodeClass +from simcore_service_director_v2.core.errors import ( + ClustersKeeperNotAvailableError, + ComputationalBackendNotConnectedError, + ComputationalBackendOnDemandNotReadyError, + ComputationalBackendTaskNotFoundError, + ComputationalBackendTaskResultsNotReadyError, + ComputationalSchedulerChangedError, + ComputationalSchedulerError, +) +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import CompRunsAtDB, RunMetadataDict +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB, Image +from simcore_service_director_v2.modules.comp_scheduler._manager import ( + run_new_pipeline, + stop_pipeline, +) +from simcore_service_director_v2.modules.comp_scheduler._scheduler_base import ( + BaseCompScheduler, +) +from simcore_service_director_v2.modules.comp_scheduler._scheduler_dask import ( + DaskScheduler, +) +from simcore_service_director_v2.modules.comp_scheduler._utils import COMPLETED_STATES +from simcore_service_director_v2.modules.comp_scheduler._worker import ( + _get_scheduler_worker, +) +from simcore_service_director_v2.modules.dask_client import ( + DaskJobID, + PublishedComputationTask, +) +from simcore_service_director_v2.utils.dask_client_utils import TaskHandlers +from sqlalchemy import and_ +from sqlalchemy.ext.asyncio import AsyncEngine +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +def _assert_dask_client_correctly_initialized( + mocked_dask_client: mock.MagicMock, scheduler: BaseCompScheduler +) -> None: + mocked_dask_client.create.assert_called_once_with( + app=mock.ANY, + settings=mock.ANY, + endpoint=mock.ANY, + authentication=mock.ANY, + tasks_file_link_type=mock.ANY, + cluster_type=mock.ANY, + ) + mocked_dask_client.register_handlers.assert_called_once_with( + TaskHandlers( + cast( # noqa: SLF001 + DaskScheduler, scheduler + )._task_progress_change_handler, + ) + ) + + +@pytest.fixture +def mocked_dask_client(mocker: MockerFixture) -> mock.Mock: + mocked_dask_client = mocker.patch( + "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", + autospec=True, + ) + mocked_dask_client.create.return_value = mocked_dask_client + return mocked_dask_client + + +@pytest.fixture +def mocked_parse_output_data_fct(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.parse_output_data", + autospec=True, + ) + + +@pytest.fixture +def mocked_clean_task_output_fct(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.clean_task_output_and_log_files_if_invalid", + return_value=None, + autospec=True, + ) + + +@pytest.fixture +def mocked_clean_task_output_and_log_files_if_invalid( + mocker: MockerFixture, +) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.clean_task_output_and_log_files_if_invalid", + autospec=True, + ) + + +@pytest.fixture +def scheduler_api(initialized_app: FastAPI) -> BaseCompScheduler: + return _get_scheduler_worker(initialized_app) + + +async def _assert_start_pipeline( + app: FastAPI, + *, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + run_metadata: RunMetadataDict, +) -> tuple[CompRunsAtDB, list[CompTaskAtDB]]: + exp_published_tasks = deepcopy(published_project.tasks) + assert published_project.project.prj_owner + await run_new_pipeline( + app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + + # check the database is correctly updated, the run is published + runs = await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in exp_published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + return runs[0], exp_published_tasks + + +async def _assert_publish_in_dask_backend( + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + published_tasks: list[CompTaskAtDB], + mocked_dask_client: mock.MagicMock, + scheduler: BaseCompScheduler, +) -> tuple[list[CompTaskAtDB], dict[NodeID, Callable[[], None]]]: + expected_pending_tasks = [ + published_tasks[1], + published_tasks[3], + ] + for p in expected_pending_tasks: + published_tasks.remove(p) + + async def _return_tasks_pending(job_ids: list[str]) -> list[RunningState]: + return [RunningState.PENDING for job_id in job_ids] + + mocked_dask_client.get_tasks_status.side_effect = _return_tasks_pending + assert published_project.project.prj_owner + await scheduler.apply( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ) + _assert_dask_client_correctly_initialized(mocked_dask_client, scheduler) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + # the other tasks are still waiting in published state + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, # since we bypass the API entrypoint this is correct + ) + # tasks were send to the backend + assert published_project.project.prj_owner is not None + assert isinstance(mocked_dask_client.send_computation_tasks, mock.Mock) + assert isinstance(mocked_dask_client.get_tasks_status, mock.Mock) + assert isinstance(mocked_dask_client.get_task_result, mock.Mock) + mocked_dask_client.send_computation_tasks.assert_has_calls( + calls=[ + mock.call( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + tasks={f"{p.node_id}": p.image}, + callback=mock.ANY, + metadata=mock.ANY, + hardware_info=mock.ANY, + resource_tracking_run_id=mock.ANY, + ) + for p in expected_pending_tasks + ], + any_order=True, + ) + task_to_callback_mapping = { + task.node_id: mocked_dask_client.send_computation_tasks.call_args_list[ + i + ].kwargs["callback"] + for i, task in enumerate(expected_pending_tasks) + } + mocked_dask_client.send_computation_tasks.reset_mock() + mocked_dask_client.get_tasks_status.assert_not_called() + mocked_dask_client.get_task_result.assert_not_called() + # there is a second run of the scheduler to move comp_runs to pending, the rest does not change + await scheduler.apply( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PENDING, + where_statement=(comp_runs.c.user_id == published_project.project.prj_owner) + & (comp_runs.c.project_uuid == f"{published_project.project.uuid}"), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_has_calls( + calls=[mock.call([p.job_id for p in expected_pending_tasks])], any_order=True + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + return expected_pending_tasks, task_to_callback_mapping + + +@pytest.fixture +async def instrumentation_rabbit_client_parser( + create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture +) -> AsyncIterator[mock.AsyncMock]: + client = create_rabbitmq_client("instrumentation_pytest_consumer") + mock = mocker.AsyncMock(return_value=True) + queue_name, _ = await client.subscribe( + InstrumentationRabbitMessage.get_channel_name(), mock + ) + yield mock + await client.unsubscribe(queue_name) + + +@pytest.fixture +async def resource_tracking_rabbit_client_parser( + create_rabbitmq_client: Callable[[str], RabbitMQClient], mocker: MockerFixture +) -> AsyncIterator[mock.AsyncMock]: + client = create_rabbitmq_client("resource_tracking_pytest_consumer") + mock = mocker.AsyncMock(return_value=True) + queue_name, _ = await client.subscribe( + RabbitResourceTrackingBaseMessage.get_channel_name(), mock + ) + yield mock + await client.unsubscribe(queue_name) + + +async def _assert_message_received( + mocked_message_parser: mock.AsyncMock, + expected_call_count: int, + message_parser: Callable, +) -> list: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + print( + f"--> waiting for rabbitmq message [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + assert mocked_message_parser.call_count == expected_call_count + print( + f"<-- rabbitmq message received after [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + parsed_messages = [ + message_parser(mocked_message_parser.call_args_list[c].args[0]) + for c in range(expected_call_count) + ] + + mocked_message_parser.reset_mock() + return parsed_messages + + +def _with_mock_send_computation_tasks( + tasks: list[CompTaskAtDB], mocked_dask_client: mock.MagicMock +) -> mock.Mock: + node_id_to_job_id_map = {task.node_id: task.job_id for task in tasks} + + async def _send_computation_tasks( + *args, tasks: dict[NodeID, Image], **kwargs + ) -> list[PublishedComputationTask]: + for node_id in tasks: + assert NodeID(f"{node_id}") in node_id_to_job_id_map + return [ + PublishedComputationTask( + node_id=NodeID(f"{node_id}"), + job_id=DaskJobID(node_id_to_job_id_map[NodeID(f"{node_id}")]), + ) + for node_id in tasks + ] # type: ignore + + mocked_dask_client.send_computation_tasks.side_effect = _send_computation_tasks + return mocked_dask_client.send_computation_tasks + + +async def _trigger_progress_event( + scheduler: BaseCompScheduler, + *, + job_id: str, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, +) -> None: + event = TaskProgressEvent( + job_id=job_id, + progress=0, + task_owner=TaskOwner( + user_id=user_id, + project_id=project_id, + node_id=node_id, + parent_project_id=None, + parent_node_id=None, + ), + ) + await cast(DaskScheduler, scheduler)._task_progress_change_handler( # noqa: SLF001 + (arrow.utcnow().timestamp(), event.model_dump_json()) + ) + + +async def test_proper_pipeline_is_scheduled( # noqa: PLR0915 + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + mocked_dask_client: mock.MagicMock, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + mocked_parse_output_data_fct: mock.Mock, + mocked_clean_task_output_and_log_files_if_invalid: mock.Mock, + instrumentation_rabbit_client_parser: mock.AsyncMock, + resource_tracking_rabbit_client_parser: mock.AsyncMock, + run_metadata: RunMetadataDict, +): + with_disabled_auto_scheduling.assert_called_once() + _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client) + + # + # Initiate new pipeline run + # + run_in_db, expected_published_tasks = await _assert_start_pipeline( + initialized_app, + sqlalchemy_async_engine=sqlalchemy_async_engine, + published_project=published_project, + run_metadata=run_metadata, + ) + with_disabled_scheduler_publisher.assert_called() + + # ------------------------------------------------------------------------------- + # 1. first run will move comp_tasks to PENDING so the dask-worker can take them + expected_pending_tasks, _ = await _assert_publish_in_dask_backend( + sqlalchemy_async_engine, + published_project, + expected_published_tasks, + mocked_dask_client, + scheduler_api, + ) + + # ------------------------------------------------------------------------------- + # 2.1. the dask-worker takes the task + exp_started_task = expected_pending_tasks[0] + expected_pending_tasks.remove(exp_started_task) + + async def _return_1st_task_running(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.STARTED + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_running + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, # since we bypass the API entrypoint this is correct + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in (exp_started_task, *expected_pending_tasks)], + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + + # ------------------------------------------------------------------------------- + # 3. the dask-worker starts processing a task here we simulate a progress event + assert exp_started_task.job_id + assert exp_started_task.project_id + assert exp_started_task.node_id + assert published_project.project.prj_owner + await _trigger_progress_event( + scheduler_api, + job_id=exp_started_task.job_id, + user_id=published_project.project.prj_owner, + project_id=exp_started_task.project_id, + node_id=exp_started_task.node_id, + ) + + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + # comp_run, the comp_task switch to STARTED + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=0, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in (exp_started_task, *expected_pending_tasks)], + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + # check the metrics are properly published + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 1, + InstrumentationRabbitMessage.model_validate_json, + ) + assert messages[0].metrics == "service_started" + assert messages[0].service_uuid == exp_started_task.node_id + + # check the RUT messages are properly published + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStartedMessage.model_validate_json, + ) + assert messages[0].node_id == exp_started_task.node_id + + # ------------------------------------------------------------------------------- + # 4. the dask-worker completed the task successfully + async def _return_1st_task_success(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.SUCCESS + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_success + + async def _return_random_task_result(job_id) -> TaskOutputData: + return TaskOutputData.model_validate({"out_1": None, "out_2": 45}) + + mocked_dask_client.get_task_result.side_effect = _return_random_task_result + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.SUCCESS, + expected_progress=1, + ) + # check metrics are published + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 1, + InstrumentationRabbitMessage.model_validate_json, + ) + assert messages[0].metrics == "service_stopped" + assert messages[0].service_uuid == exp_started_task.node_id + # check RUT messages are published + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStoppedMessage.model_validate_json, + ) + + completed_tasks = [exp_started_task] + next_pending_task = published_project.tasks[2] + expected_pending_tasks.append(next_pending_task) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[ + p.node_id + for p in published_project.tasks + if p not in expected_pending_tasks + completed_tasks + ], + expected_state=RunningState.PUBLISHED, + expected_progress=None, # since we bypass the API entrypoint this is correct + ) + mocked_dask_client.send_computation_tasks.assert_called_once_with( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + tasks={ + f"{next_pending_task.node_id}": next_pending_task.image, + }, + callback=mock.ANY, + metadata=mock.ANY, + hardware_info=mock.ANY, + resource_tracking_run_id=mock.ANY, + ) + mocked_dask_client.send_computation_tasks.reset_mock() + mocked_dask_client.get_tasks_status.assert_has_calls( + calls=[ + mock.call([p.job_id for p in completed_tasks + expected_pending_tasks[:1]]) + ], + any_order=True, + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_called_once_with( + completed_tasks[0].job_id + ) + mocked_dask_client.get_task_result.reset_mock() + mocked_parse_output_data_fct.assert_called_once_with( + mock.ANY, + completed_tasks[0].job_id, + await _return_random_task_result(completed_tasks[0].job_id), + ) + mocked_parse_output_data_fct.reset_mock() + + # ------------------------------------------------------------------------------- + # 6. the dask-worker starts processing a task + exp_started_task = next_pending_task + + async def _return_2nd_task_running(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.STARTED + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_2nd_task_running + # trigger the scheduler, run state should keep to STARTED, task should be as well + assert exp_started_task.job_id + await _trigger_progress_event( + scheduler_api, + job_id=exp_started_task.job_id, + user_id=published_project.project.prj_owner, + project_id=exp_started_task.project_id, + node_id=exp_started_task.node_id, + ) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=0, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + expected_pending_tasks.reverse() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in expected_pending_tasks] + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 1, + InstrumentationRabbitMessage.model_validate_json, + ) + assert messages[0].metrics == "service_started" + assert messages[0].service_uuid == exp_started_task.node_id + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStartedMessage.model_validate_json, + ) + assert messages[0].node_id == exp_started_task.node_id + + # ------------------------------------------------------------------------------- + # 7. the task fails + async def _return_2nd_task_failed(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.FAILED + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_2nd_task_failed + mocked_dask_client.get_task_result.side_effect = None + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_clean_task_output_and_log_files_if_invalid.assert_called_once() + mocked_clean_task_output_and_log_files_if_invalid.reset_mock() + + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.FAILED, + expected_progress=1, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in expected_pending_tasks] + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_called_once_with(exp_started_task.job_id) + mocked_dask_client.get_task_result.reset_mock() + mocked_parse_output_data_fct.assert_not_called() + expected_pending_tasks.remove(exp_started_task) + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 1, + InstrumentationRabbitMessage.model_validate_json, + ) + assert messages[0].metrics == "service_stopped" + assert messages[0].service_uuid == exp_started_task.node_id + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStoppedMessage.model_validate_json, + ) + + # ------------------------------------------------------------------------------- + # 8. the last task shall succeed + exp_started_task = expected_pending_tasks[0] + + async def _return_3rd_task_success(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.SUCCESS + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_3rd_task_success + mocked_dask_client.get_task_result.side_effect = _return_random_task_result + + # trigger the scheduler, it should switch to FAILED, as we are done + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_clean_task_output_and_log_files_if_invalid.assert_not_called() + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.FAILED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.SUCCESS, + expected_progress=1, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in expected_pending_tasks] + ) + mocked_dask_client.get_task_result.assert_called_once_with(exp_started_task.job_id) + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 2, + InstrumentationRabbitMessage.model_validate_json, + ) + + # NOTE: the service was fast and went directly to success + def _parser(x) -> RabbitResourceTrackingMessages: + return TypeAdapter(RabbitResourceTrackingMessages).validate_json(x) + + assert messages[0].metrics == "service_started" + assert messages[0].service_uuid == exp_started_task.node_id + assert messages[1].metrics == "service_stopped" + assert messages[1].service_uuid == exp_started_task.node_id + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 2, + _parser, + ) + assert isinstance(messages[0], RabbitResourceTrackingStartedMessage) + assert isinstance(messages[1], RabbitResourceTrackingStoppedMessage) + + +@pytest.fixture +async def with_started_project( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + sqlalchemy_async_engine: AsyncEngine, + publish_project: Callable[[], Awaitable[PublishedProject]], + mocked_dask_client: mock.Mock, + run_metadata: RunMetadataDict, + scheduler_api: BaseCompScheduler, + instrumentation_rabbit_client_parser: mock.AsyncMock, + resource_tracking_rabbit_client_parser: mock.AsyncMock, +) -> RunningProject: + with_disabled_auto_scheduling.assert_called_once() + published_project = await publish_project() + # + # 1. Initiate new pipeline run + # + run_in_db, expected_published_tasks = await _assert_start_pipeline( + initialized_app, + sqlalchemy_async_engine=sqlalchemy_async_engine, + published_project=published_project, + run_metadata=run_metadata, + ) + with_disabled_scheduler_publisher.assert_called_once() + + # + # 2. This runs the scheduler until the project is started scheduled in the back-end + # + ( + expected_pending_tasks, + task_to_callback_mapping, + ) = await _assert_publish_in_dask_backend( + sqlalchemy_async_engine, + published_project, + expected_published_tasks, + mocked_dask_client, + scheduler_api, + ) + + # + # The dask-worker can take a job when it is PENDING, but the dask scheduler makes + # no difference between PENDING and STARTED + # + exp_started_task = expected_pending_tasks[0] + expected_pending_tasks.remove(exp_started_task) + + async def _return_1st_task_running(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.STARTED + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + assert isinstance(mocked_dask_client.get_tasks_status, mock.Mock) + assert isinstance(mocked_dask_client.send_computation_tasks, mock.Mock) + assert isinstance(mocked_dask_client.get_task_result, mock.Mock) + mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_running + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, # since we bypass the API entrypoint this is correct + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in (exp_started_task, *expected_pending_tasks)], + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + + # ------------------------------------------------------------------------------- + # 4. the dask-worker starts processing a task here we simulate a progress event + assert exp_started_task.job_id + assert exp_started_task.project_id + assert exp_started_task.node_id + assert published_project.project.prj_owner + await _trigger_progress_event( + scheduler_api, + job_id=exp_started_task.job_id, + user_id=published_project.project.prj_owner, + project_id=exp_started_task.project_id, + node_id=exp_started_task.node_id, + ) + + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + # comp_run, the comp_task switch to STARTED + run_in_db = ( + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + )[0] + tasks_in_db = await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[exp_started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=0, + ) + tasks_in_db += await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_pending_tasks], + expected_state=RunningState.PENDING, + expected_progress=None, + ) + tasks_in_db += await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[p.node_id for p in expected_published_tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + mocked_dask_client.send_computation_tasks.assert_not_called() + mocked_dask_client.get_tasks_status.assert_called_once_with( + [p.job_id for p in (exp_started_task, *expected_pending_tasks)], + ) + mocked_dask_client.get_tasks_status.reset_mock() + mocked_dask_client.get_task_result.assert_not_called() + # check the metrics are properly published + messages = await _assert_message_received( + instrumentation_rabbit_client_parser, + 1, + InstrumentationRabbitMessage.model_validate_json, + ) + assert messages[0].metrics == "service_started" + assert messages[0].service_uuid == exp_started_task.node_id + + # check the RUT messages are properly published + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStartedMessage.model_validate_json, + ) + assert messages[0].node_id == exp_started_task.node_id + + return RunningProject( + published_project.user, + published_project.project, + published_project.pipeline, + tasks_in_db, + runs=run_in_db, + task_to_callback_mapping=task_to_callback_mapping, + ) + + +@pytest.fixture +def mocked_worker_publisher(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_base.request_pipeline_scheduling", + autospec=True, + ) + + +async def test_completed_task_triggers_new_scheduling_task( + mocked_worker_publisher: mock.Mock, + with_started_project: RunningProject, +): + """When a pipeline job completes, the Dask backend provides a callback + that runs in a separate thread. We use that callback to ask the + director-v2 computational scheduler manager to ask for a new schedule + After fiddling in distributed source code, here is a similar way to trigger that callback + """ + completed_node_id = with_started_project.tasks[0].node_id + callback = with_started_project.task_to_callback_mapping[completed_node_id] + await asyncio.to_thread(callback) + + mocked_worker_publisher.assert_called_once_with( + mock.ANY, + mock.ANY, + user_id=with_started_project.runs.user_id, + project_id=with_started_project.runs.project_uuid, + iteration=with_started_project.runs.iteration, + ) + + +async def test_broken_pipeline_configuration_is_not_scheduled_and_aborted( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + scheduler_api: BaseCompScheduler, + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], + sqlalchemy_async_engine: AsyncEngine, + run_metadata: RunMetadataDict, +): + """A pipeline which comp_tasks are missing should not be scheduled. + It shall be aborted and shown as such in the comp_runs db""" + user = create_registered_user() + sleepers_project = await project(user, workbench=fake_workbench_without_outputs) + await create_pipeline( + project_id=f"{sleepers_project.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + await assert_comp_runs_empty(sqlalchemy_async_engine) + + # + # Initiate new pipeline scheduling + # + await run_new_pipeline( + initialized_app, + user_id=user["id"], + project_id=sleepers_project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + with_disabled_scheduler_publisher.assert_called_once() + # we shall have a a new comp_runs row with the new pipeline job + run_entry = ( + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=(comp_runs.c.user_id == user["id"]) + & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}"), + ) + )[0] + + # + # Trigger scheduling manually. since the pipeline is broken, it shall be aborted + # + await scheduler_api.apply( + user_id=run_entry.user_id, + project_id=run_entry.project_uuid, + iteration=run_entry.iteration, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.ABORTED, + where_statement=(comp_runs.c.user_id == user["id"]) + & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}"), + ) + + +async def test_task_progress_triggers( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + mocked_dask_client: mock.MagicMock, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + mocked_parse_output_data_fct: mock.Mock, + mocked_clean_task_output_and_log_files_if_invalid: mock.Mock, + run_metadata: RunMetadataDict, +): + _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client) + _run_in_db, expected_published_tasks = await _assert_start_pipeline( + initialized_app, + sqlalchemy_async_engine=sqlalchemy_async_engine, + published_project=published_project, + run_metadata=run_metadata, + ) + # ------------------------------------------------------------------------------- + # 1. first run will move comp_tasks to PENDING so the dask-worker can take them + expected_pending_tasks, _ = await _assert_publish_in_dask_backend( + sqlalchemy_async_engine, + published_project, + expected_published_tasks, + mocked_dask_client, + scheduler_api, + ) + + # send some progress + started_task = expected_pending_tasks[0] + assert started_task.job_id + assert published_project.project.prj_owner + for progress in [-1, 0, 0.3, 0.5, 1, 1.5, 0.7, 0, 20]: + progress_event = TaskProgressEvent( + job_id=started_task.job_id, + progress=progress, + task_owner=TaskOwner( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + node_id=started_task.node_id, + parent_node_id=None, + parent_project_id=None, + ), + ) + await cast( # noqa: SLF001 + DaskScheduler, scheduler_api + )._task_progress_change_handler( + (arrow.utcnow().timestamp(), progress_event.model_dump_json()) + ) + # NOTE: not sure whether it should switch to STARTED.. it would make sense + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[started_task.node_id], + expected_state=RunningState.STARTED, + expected_progress=min(max(0, progress), 1), + ) + + +@pytest.mark.parametrize( + "backend_error", + [ + ComputationalBackendNotConnectedError(msg="faked disconnected backend"), + ComputationalSchedulerChangedError( + original_scheduler_id="some_old_scheduler_id", + current_scheduler_id="some_new_scheduler_id", + ), + ], +) +async def test_handling_of_disconnected_scheduler_dask( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + mocked_dask_client: mock.MagicMock, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + mocker: MockerFixture, + published_project: PublishedProject, + backend_error: ComputationalSchedulerError, + run_metadata: RunMetadataDict, +): + # this will create a non connected backend issue that will trigger re-connection + mocked_dask_client_send_task = mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.DaskClient.send_computation_tasks", + side_effect=backend_error, + ) + assert mocked_dask_client_send_task + + # running the pipeline will now raise and the tasks are set back to PUBLISHED + assert published_project.project.prj_owner + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + + # since there is no cluster, there is no dask-scheduler, + # the tasks shall all still be in PUBLISHED state now + runs_in_db = await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + run_in_db = runs_in_db[0] + + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in published_project.tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + # on the next iteration of the pipeline it will try to re-connect + # now try to abort the tasks since we are wondering what is happening, this should auto-trigger the scheduler + await stop_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + ) + # we ensure the scheduler was run + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + # after this step the tasks are marked as ABORTED + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[ + t.node_id + for t in published_project.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ], + expected_state=RunningState.ABORTED, + expected_progress=1, + ) + # then we have another scheduler run + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + # now the run should be ABORTED + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.ABORTED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + + +@pytest.fixture +def with_disabled_unknown_max_time(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_base._MAX_WAITING_TIME_FOR_UNKNOWN_TASKS", + new=datetime.timedelta(0), + ) + + +@dataclass(frozen=True, kw_only=True) +class RebootState: + dask_task_status: RunningState + task_result: Exception | TaskOutputData + expected_task_state_group1: RunningState + expected_task_progress_group1: float + expected_task_state_group2: RunningState + expected_task_progress_group2: float + expected_run_state: RunningState + + +@pytest.mark.parametrize( + "reboot_state", + [ + pytest.param( + RebootState( + dask_task_status=RunningState.UNKNOWN, + task_result=ComputationalBackendTaskNotFoundError(job_id="fake_job_id"), + expected_task_state_group1=RunningState.FAILED, + expected_task_progress_group1=1, + expected_task_state_group2=RunningState.ABORTED, + expected_task_progress_group2=1, + expected_run_state=RunningState.FAILED, + ), + id="reboot with lost tasks", + ), + pytest.param( + RebootState( + dask_task_status=RunningState.ABORTED, + task_result=TaskCancelledError(job_id="fake_job_id"), + expected_task_state_group1=RunningState.ABORTED, + expected_task_progress_group1=1, + expected_task_state_group2=RunningState.ABORTED, + expected_task_progress_group2=1, + expected_run_state=RunningState.ABORTED, + ), + id="reboot with aborted tasks", + ), + pytest.param( + RebootState( + dask_task_status=RunningState.FAILED, + task_result=ValueError("some error during the call"), + expected_task_state_group1=RunningState.FAILED, + expected_task_progress_group1=1, + expected_task_state_group2=RunningState.ABORTED, + expected_task_progress_group2=1, + expected_run_state=RunningState.FAILED, + ), + id="reboot with failed tasks", + ), + pytest.param( + RebootState( + dask_task_status=RunningState.STARTED, + task_result=ComputationalBackendTaskResultsNotReadyError( + job_id="fake_job_id" + ), + expected_task_state_group1=RunningState.STARTED, + expected_task_progress_group1=0, + expected_task_state_group2=RunningState.STARTED, + expected_task_progress_group2=0, + expected_run_state=RunningState.STARTED, + ), + id="reboot with running tasks", + ), + pytest.param( + RebootState( + dask_task_status=RunningState.SUCCESS, + task_result=TaskOutputData.model_validate({"whatever_output": 123}), + expected_task_state_group1=RunningState.SUCCESS, + expected_task_progress_group1=1, + expected_task_state_group2=RunningState.SUCCESS, + expected_task_progress_group2=1, + expected_run_state=RunningState.SUCCESS, + ), + id="reboot with completed tasks", + ), + ], +) +async def test_handling_scheduled_tasks_after_director_reboots( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + with_disabled_unknown_max_time: None, + mocked_dask_client: mock.MagicMock, + sqlalchemy_async_engine: AsyncEngine, + running_project: RunningProject, + scheduler_api: BaseCompScheduler, + mocked_parse_output_data_fct: mock.Mock, + mocked_clean_task_output_fct: mock.Mock, + reboot_state: RebootState, +): + """After the dask client is rebooted, or that the director-v2 reboots the dv-2 internal scheduler + shall continue scheduling correctly. Even though the task might have continued to run + in the dask-scheduler.""" + + async def mocked_get_tasks_status(job_ids: list[str]) -> list[RunningState]: + return [reboot_state.dask_task_status for j in job_ids] + + mocked_dask_client.get_tasks_status.side_effect = mocked_get_tasks_status + + async def mocked_get_task_result(_job_id: str) -> TaskOutputData: + if isinstance(reboot_state.task_result, Exception): + raise reboot_state.task_result + return reboot_state.task_result + + mocked_dask_client.get_task_result.side_effect = mocked_get_task_result + assert running_project.project.prj_owner + await scheduler_api.apply( + user_id=running_project.project.prj_owner, + project_id=running_project.project.uuid, + iteration=1, + ) + # the status will be called once for all RUNNING tasks + mocked_dask_client.get_tasks_status.assert_called_once() + if reboot_state.expected_run_state in COMPLETED_STATES: + mocked_dask_client.get_task_result.assert_has_calls( + [ + mock.call(t.job_id) + for t in running_project.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ], + any_order=True, + ) + else: + mocked_dask_client.get_task_result.assert_not_called() + if reboot_state.expected_run_state in [RunningState.ABORTED, RunningState.FAILED]: + # the clean up of the outputs should be done + mocked_clean_task_output_fct.assert_has_calls( + [ + mock.call( + mock.ANY, + running_project.project.prj_owner, + running_project.project.uuid, + t.node_id, + ) + for t in running_project.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ], + any_order=True, + ) + else: + mocked_clean_task_output_fct.assert_not_called() + + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=running_project.project.uuid, + task_ids=[ + running_project.tasks[1].node_id, + running_project.tasks[3].node_id, + ], + expected_state=reboot_state.expected_task_state_group1, + expected_progress=reboot_state.expected_task_progress_group1, + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=running_project.project.uuid, + task_ids=[running_project.tasks[2].node_id, running_project.tasks[4].node_id], + expected_state=reboot_state.expected_task_state_group2, + expected_progress=reboot_state.expected_task_progress_group2, + ) + assert running_project.project.prj_owner + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=reboot_state.expected_run_state, + where_statement=and_( + comp_runs.c.user_id == running_project.project.prj_owner, + comp_runs.c.project_uuid == f"{running_project.project.uuid}", + ), + ) + + +async def test_handling_cancellation_of_jobs_after_reboot( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + mocked_dask_client: mock.MagicMock, + sqlalchemy_async_engine: AsyncEngine, + running_project_mark_for_cancellation: RunningProject, + scheduler_api: BaseCompScheduler, + mocked_parse_output_data_fct: mock.Mock, + mocked_clean_task_output_fct: mock.Mock, +): + """A running pipeline was cancelled by a user and the DV-2 was restarted BEFORE + It could actually cancel the task. On reboot the DV-2 shall recover + and actually cancel the pipeline properly""" + + # check initial status + run_in_db = ( + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id + == running_project_mark_for_cancellation.project.prj_owner, + comp_runs.c.project_uuid + == f"{running_project_mark_for_cancellation.project.uuid}", + ), + ) + )[0] + + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=running_project_mark_for_cancellation.project.uuid, + task_ids=[t.node_id for t in running_project_mark_for_cancellation.tasks], + expected_state=RunningState.STARTED, + expected_progress=0, + ) + + # the backend shall report the tasks as running + async def mocked_get_tasks_status(job_ids: list[str]) -> list[RunningState]: + return [RunningState.STARTED for j in job_ids] + + mocked_dask_client.get_tasks_status.side_effect = mocked_get_tasks_status + # Running the scheduler, should actually cancel the run now + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_dask_client.abort_computation_task.assert_called() + assert mocked_dask_client.abort_computation_task.call_count == len( + [ + t.node_id + for t in running_project_mark_for_cancellation.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ] + ) + # in the DB they are still running, they will be stopped in the next iteration + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=running_project_mark_for_cancellation.project.uuid, + task_ids=[ + t.node_id + for t in running_project_mark_for_cancellation.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ], + expected_state=RunningState.STARTED, + expected_progress=0, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.STARTED, + where_statement=and_( + comp_runs.c.user_id + == running_project_mark_for_cancellation.project.prj_owner, + comp_runs.c.project_uuid + == f"{running_project_mark_for_cancellation.project.uuid}", + ), + ) + + # the backend shall now report the tasks as aborted + async def mocked_get_tasks_status_aborted( + job_ids: list[str], + ) -> list[RunningState]: + return [RunningState.ABORTED for j in job_ids] + + mocked_dask_client.get_tasks_status.side_effect = mocked_get_tasks_status_aborted + + async def _return_random_task_result(job_id) -> TaskOutputData: + raise TaskCancelledError + + mocked_dask_client.get_task_result.side_effect = _return_random_task_result + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + # now should be stopped + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=running_project_mark_for_cancellation.project.uuid, + task_ids=[ + t.node_id + for t in running_project_mark_for_cancellation.tasks + if t.node_class == NodeClass.COMPUTATIONAL + ], + expected_state=RunningState.ABORTED, + expected_progress=1, + ) + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.ABORTED, + where_statement=and_( + comp_runs.c.user_id + == running_project_mark_for_cancellation.project.prj_owner, + comp_runs.c.project_uuid + == f"{running_project_mark_for_cancellation.project.uuid}", + ), + ) + mocked_clean_task_output_fct.assert_called() + + +@pytest.fixture +def with_fast_service_heartbeat_s(monkeypatch: pytest.MonkeyPatch) -> int: + seconds = 1 + monkeypatch.setenv( + "SERVICE_TRACKING_HEARTBEAT", f"{datetime.timedelta(seconds=seconds)}" + ) + return seconds + + +async def test_running_pipeline_triggers_heartbeat( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + with_fast_service_heartbeat_s: int, + initialized_app: FastAPI, + mocked_dask_client: mock.MagicMock, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + resource_tracking_rabbit_client_parser: mock.AsyncMock, + run_metadata: RunMetadataDict, +): + _with_mock_send_computation_tasks(published_project.tasks, mocked_dask_client) + run_in_db, expected_published_tasks = await _assert_start_pipeline( + initialized_app, + sqlalchemy_async_engine=sqlalchemy_async_engine, + published_project=published_project, + run_metadata=run_metadata, + ) + # ------------------------------------------------------------------------------- + # 1. first run will move comp_tasks to PENDING so the dask-worker can take them + expected_pending_tasks, _ = await _assert_publish_in_dask_backend( + sqlalchemy_async_engine, + published_project, + expected_published_tasks, + mocked_dask_client, + scheduler_api, + ) + # ------------------------------------------------------------------------------- + # 2. the "worker" starts processing a task + exp_started_task = expected_pending_tasks[0] + expected_pending_tasks.remove(exp_started_task) + + async def _return_1st_task_running(job_ids: list[str]) -> list[RunningState]: + return [ + ( + RunningState.STARTED + if job_id == exp_started_task.job_id + else RunningState.PENDING + ) + for job_id in job_ids + ] + + mocked_dask_client.get_tasks_status.side_effect = _return_1st_task_running + assert exp_started_task.job_id + assert published_project.project.prj_owner + await _trigger_progress_event( + scheduler_api, + job_id=exp_started_task.job_id, + user_id=published_project.project.prj_owner, + project_id=exp_started_task.project_id, + node_id=exp_started_task.node_id, + ) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingStartedMessage.model_validate_json, + ) + assert messages[0].node_id == exp_started_task.node_id + + # ------------------------------------------------------------------------------- + # 3. wait a bit and run again we should get another heartbeat, but only one! + await asyncio.sleep(with_fast_service_heartbeat_s + 1) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingHeartbeatMessage.model_validate_json, + ) + assert isinstance(messages[0], RabbitResourceTrackingHeartbeatMessage) + + # ------------------------------------------------------------------------------- + # 4. wait a bit and run again we should get another heartbeat, but only one! + await asyncio.sleep(with_fast_service_heartbeat_s + 1) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + messages = await _assert_message_received( + resource_tracking_rabbit_client_parser, + 1, + RabbitResourceTrackingHeartbeatMessage.model_validate_json, + ) + assert isinstance(messages[0], RabbitResourceTrackingHeartbeatMessage) + + +@pytest.fixture +async def mocked_get_or_create_cluster(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_dask.get_or_create_on_demand_cluster", + autospec=True, + ) + + +async def test_pipeline_with_on_demand_cluster_with_not_ready_backend_waits( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + run_metadata: RunMetadataDict, + mocked_get_or_create_cluster: mock.Mock, + faker: Faker, +): + mocked_get_or_create_cluster.side_effect = ( + ComputationalBackendOnDemandNotReadyError( + eta=faker.time_delta(datetime.timedelta(hours=1)) + ) + ) + # running the pipeline will trigger a call to the clusters-keeper + assert published_project.project.prj_owner + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=True, + ) + + # we ask to use an on-demand cluster, therefore the tasks are published first + run_in_db = ( + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + )[0] + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in published_project.tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + mocked_get_or_create_cluster.assert_not_called() + # now it should switch to waiting + expected_waiting_tasks = [ + published_project.tasks[1], + published_project.tasks[3], + ] + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_get_or_create_cluster.assert_called() + assert mocked_get_or_create_cluster.call_count == 1 + mocked_get_or_create_cluster.reset_mock() + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.WAITING_FOR_CLUSTER, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in expected_waiting_tasks], + expected_state=RunningState.WAITING_FOR_CLUSTER, + expected_progress=None, + ) + # again will trigger the same response + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_get_or_create_cluster.assert_called() + assert mocked_get_or_create_cluster.call_count == 1 + mocked_get_or_create_cluster.reset_mock() + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.WAITING_FOR_CLUSTER, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in expected_waiting_tasks], + expected_state=RunningState.WAITING_FOR_CLUSTER, + expected_progress=None, + ) + + +@pytest.mark.parametrize( + "get_or_create_exception", + [ClustersKeeperNotAvailableError], +) +async def test_pipeline_with_on_demand_cluster_with_no_clusters_keeper_fails( + with_disabled_auto_scheduling: mock.Mock, + with_disabled_scheduler_publisher: mock.Mock, + initialized_app: FastAPI, + scheduler_api: BaseCompScheduler, + sqlalchemy_async_engine: AsyncEngine, + published_project: PublishedProject, + run_metadata: RunMetadataDict, + mocked_get_or_create_cluster: mock.Mock, + get_or_create_exception: Exception, +): + # needs to change: https://github.com/ITISFoundation/osparc-simcore/issues/6817 + + mocked_get_or_create_cluster.side_effect = get_or_create_exception + # running the pipeline will trigger a call to the clusters-keeper + assert published_project.project.prj_owner + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=True, + ) + + # we ask to use an on-demand cluster, therefore the tasks are published first + run_in_db = ( + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.PUBLISHED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + )[0] + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in published_project.tasks], + expected_state=RunningState.PUBLISHED, + expected_progress=None, + ) + # now it should switch to failed, the run still runs until the next iteration + expected_failed_tasks = [ + published_project.tasks[1], + published_project.tasks[3], + ] + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_get_or_create_cluster.assert_called() + assert mocked_get_or_create_cluster.call_count == 1 + mocked_get_or_create_cluster.reset_mock() + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.FAILED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in expected_failed_tasks], + expected_state=RunningState.FAILED, + expected_progress=1.0, + ) + # again will not re-trigger the call to clusters-keeper + await scheduler_api.apply( + user_id=run_in_db.user_id, + project_id=run_in_db.project_uuid, + iteration=run_in_db.iteration, + ) + mocked_get_or_create_cluster.assert_not_called() + await assert_comp_runs( + sqlalchemy_async_engine, + expected_total=1, + expected_state=RunningState.FAILED, + where_statement=and_( + comp_runs.c.user_id == published_project.project.prj_owner, + comp_runs.c.project_uuid == f"{published_project.project.uuid}", + ), + ) + await assert_comp_tasks( + sqlalchemy_async_engine, + project_uuid=published_project.project.uuid, + task_ids=[t.node_id for t in expected_failed_tasks], + expected_state=RunningState.FAILED, + expected_progress=1.0, + ) diff --git a/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py new file mode 100644 index 00000000000..1970797e5d7 --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/comp_scheduler/test_worker.py @@ -0,0 +1,142 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=no-value-for-parameter +# pylint:disable=protected-access +# pylint:disable=too-many-arguments +# pylint:disable=no-name-in-module +# pylint: disable=too-many-statements + +import asyncio +from collections.abc import Awaitable, Callable +from unittest import mock + +import pytest +from _helpers import PublishedProject +from fastapi import FastAPI +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from simcore_service_director_v2.models.comp_runs import RunMetadataDict +from simcore_service_director_v2.modules.comp_scheduler._manager import run_new_pipeline +from simcore_service_director_v2.modules.comp_scheduler._models import ( + SchedulePipelineRabbitMessage, +) +from simcore_service_director_v2.modules.comp_scheduler._worker import ( + _get_scheduler_worker, +) +from tenacity import retry, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = ["postgres", "rabbit", "redis"] +pytest_simcore_ops_services_selection = ["adminer"] + + +async def test_worker_starts_and_stops(initialized_app: FastAPI): + assert _get_scheduler_worker(initialized_app) is not None + + +@pytest.fixture +def mock_schedule_pipeline(mocker: MockerFixture) -> mock.Mock: + mock_scheduler_worker = mock.Mock() + mock_scheduler_worker.schedule_pipeline = mocker.AsyncMock(return_value=True) + return mock_scheduler_worker + + +@pytest.fixture +def mocked_get_scheduler_worker( + mocker: MockerFixture, + mock_schedule_pipeline: mock.Mock, +) -> mock.Mock: + # Mock `_get_scheduler_worker` to return our mock scheduler + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._worker._get_scheduler_worker", + return_value=mock_schedule_pipeline, + ) + + +async def test_worker_properly_autocalls_scheduler_api( + with_disabled_auto_scheduling: mock.Mock, + initialized_app: FastAPI, + mocked_get_scheduler_worker: mock.Mock, + published_project: PublishedProject, + run_metadata: RunMetadataDict, +): + assert published_project.project.prj_owner + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + mocked_get_scheduler_worker.assert_called_once_with(initialized_app) + mocked_get_scheduler_worker.return_value.apply.assert_called_once_with( + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + iteration=1, + ) + + +@pytest.fixture +async def mocked_scheduler_api(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director_v2.modules.comp_scheduler._scheduler_base.BaseCompScheduler.apply" + ) + + +@pytest.fixture +def with_scheduling_concurrency( + mock_env: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, scheduling_concurrency: int +) -> EnvVarsDict: + return mock_env | setenvs_from_dict( + monkeypatch, + {"COMPUTATIONAL_BACKEND_SCHEDULING_CONCURRENCY": f"{scheduling_concurrency}"}, + ) + + +@pytest.mark.parametrize("scheduling_concurrency", [1, 50, 100]) +@pytest.mark.parametrize( + "queue_name", [SchedulePipelineRabbitMessage.get_channel_name()] +) +async def test_worker_scheduling_parallelism( + rabbit_service: RabbitSettings, + ensure_parametrized_queue_is_empty: None, + scheduling_concurrency: int, + with_scheduling_concurrency: EnvVarsDict, + with_disabled_auto_scheduling: mock.Mock, + mocked_scheduler_api: mock.Mock, + initialized_app: FastAPI, + publish_project: Callable[[], Awaitable[PublishedProject]], + run_metadata: RunMetadataDict, +): + with_disabled_auto_scheduling.assert_called_once() + + async def _side_effect(*args, **kwargs): + await asyncio.sleep(10) + + mocked_scheduler_api.side_effect = _side_effect + + async def _project_pipeline_creation_workflow() -> None: + published_project = await publish_project() + assert published_project.project.prj_owner + await run_new_pipeline( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + run_metadata=run_metadata, + use_on_demand_clusters=False, + ) + + # whatever scheduling concurrency we call in here, we shall always see the same number of calls to the scheduler + await asyncio.gather( + *(_project_pipeline_creation_workflow() for _ in range(scheduling_concurrency)) + ) + # the call to run the pipeline is async so we need to wait here + mocked_scheduler_api.assert_called() + + @retry(stop=stop_after_delay(5), reraise=True, wait=wait_fixed(0.5)) + def _assert_expected_called() -> None: + assert mocked_scheduler_api.call_count == scheduling_concurrency + + _assert_expected_called() diff --git a/services/director-v2/tests/unit/with_dbs/conftest.py b/services/director-v2/tests/unit/with_dbs/conftest.py index de612a6db04..24cc0546414 100644 --- a/services/director-v2/tests/unit/with_dbs/conftest.py +++ b/services/director-v2/tests/unit/with_dbs/conftest.py @@ -5,60 +5,65 @@ # pylint:disable=no-name-in-module -import json -from datetime import datetime -from typing import Any, Callable, Dict, Iterator, List +import datetime +from collections.abc import AsyncIterator, Awaitable, Callable +from typing import Any, cast from uuid import uuid4 +import arrow import pytest import sqlalchemy as sa from _helpers import PublishedProject, RunningProject -from models_library.clusters import Cluster -from models_library.projects import ProjectAtDB +from dask_task_models_library.container_tasks.utils import generate_dask_job_id +from faker import Faker +from fastapi.encoders import jsonable_encoder +from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from pydantic.main import BaseModel -from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups -from simcore_postgres_database.models.clusters import clusters from simcore_postgres_database.models.comp_pipeline import StateType, comp_pipeline from simcore_postgres_database.models.comp_runs import comp_runs from simcore_postgres_database.models.comp_tasks import comp_tasks -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB -from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB, Image +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_runs import ( + CompRunsAtDB, + ProjectMetadataDict, + RunMetadataDict, +) +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB, Image from simcore_service_director_v2.utils.computations import to_node_class -from simcore_service_director_v2.utils.dask import generate_dask_job_id -from simcore_service_director_v2.utils.db import to_clusters_db -from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncEngine @pytest.fixture -def pipeline( - postgres_db: sa.engine.Engine, -) -> Iterator[Callable[..., CompPipelineAtDB]]: - created_pipeline_ids: List[str] = [] +async def create_pipeline( + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[Callable[..., Awaitable[CompPipelineAtDB]]]: + created_pipeline_ids: list[str] = [] - def creator(**pipeline_kwargs) -> CompPipelineAtDB: + async def _(**pipeline_kwargs) -> CompPipelineAtDB: pipeline_config = { "project_id": f"{uuid4()}", "dag_adjacency_list": {}, "state": StateType.NOT_STARTED, } pipeline_config.update(**pipeline_kwargs) - with postgres_db.connect() as conn: - result = conn.execute( + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( comp_pipeline.insert() .values(**pipeline_config) .returning(sa.literal_column("*")) ) - new_pipeline = CompPipelineAtDB.parse_obj(result.first()) + assert result + + new_pipeline = CompPipelineAtDB.model_validate(result.first()) created_pipeline_ids.append(f"{new_pipeline.project_id}") return new_pipeline - yield creator + yield _ # cleanup - with postgres_db.connect() as conn: - conn.execute( + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( comp_pipeline.delete().where( comp_pipeline.c.project_id.in_(created_pipeline_ids) ) @@ -66,40 +71,53 @@ def creator(**pipeline_kwargs) -> CompPipelineAtDB: @pytest.fixture -def tasks(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., List[CompTaskAtDB]]]: - created_task_ids: List[int] = [] +async def create_tasks( + sqlalchemy_async_engine: AsyncEngine, +) -> AsyncIterator[Callable[..., Awaitable[list[CompTaskAtDB]]]]: + created_task_ids: list[int] = [] - def creator( - user: Dict[str, Any], project: ProjectAtDB, **overrides_kwargs - ) -> List[CompTaskAtDB]: - created_tasks: List[CompTaskAtDB] = [] + async def _( + user: dict[str, Any], project: ProjectAtDB, **overrides_kwargs + ) -> list[CompTaskAtDB]: + created_tasks: list[CompTaskAtDB] = [] for internal_id, (node_id, node_data) in enumerate(project.workbench.items()): task_config = { "project_id": f"{project.uuid}", "node_id": f"{node_id}", "schema": {"inputs": {}, "outputs": {}}, - "inputs": { - key: json.loads(value.json(by_alias=True, exclude_unset=True)) - if isinstance(value, BaseModel) - else value - for key, value in node_data.inputs.items() - } - if node_data.inputs - else {}, - "outputs": { - key: json.loads(value.json(by_alias=True, exclude_unset=True)) - if isinstance(value, BaseModel) - else value - for key, value in node_data.outputs.items() - } - if node_data.outputs - else {}, - "image": Image(name=node_data.key, tag=node_data.version).dict( # type: ignore + "inputs": ( + { + key: ( + value.model_dump( + mode="json", by_alias=True, exclude_unset=True + ) + if isinstance(value, BaseModel) + else value + ) + for key, value in node_data.inputs.items() + } + if node_data.inputs + else {} + ), + "outputs": ( + { + key: ( + value.model_dump( + mode="json", by_alias=True, exclude_unset=True + ) + if isinstance(value, BaseModel) + else value + ) + for key, value in node_data.outputs.items() + } + if node_data.outputs + else {} + ), + "image": Image(name=node_data.key, tag=node_data.version).model_dump( by_alias=True, exclude_unset=True - ), # type: ignore + ), "node_class": to_node_class(node_data.key), "internal_id": internal_id + 1, - "submit": datetime.utcnow(), "job_id": generate_dask_job_id( service_key=node_data.key, service_version=node_data.version, @@ -109,167 +127,206 @@ def creator( ), } task_config.update(**overrides_kwargs) - with postgres_db.connect() as conn: - result = conn.execute( + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( comp_tasks.insert() .values(**task_config) .returning(sa.literal_column("*")) ) - new_task = CompTaskAtDB.parse_obj(result.first()) + new_task = CompTaskAtDB.model_validate(result.first()) created_tasks.append(new_task) created_task_ids.extend([t.task_id for t in created_tasks if t.task_id]) return created_tasks - yield creator + yield _ # cleanup - with postgres_db.connect() as conn: - conn.execute( + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( comp_tasks.delete().where(comp_tasks.c.task_id.in_(created_task_ids)) ) @pytest.fixture -def runs(postgres_db: sa.engine.Engine) -> Iterator[Callable[..., CompRunsAtDB]]: - created_run_ids: List[int] = [] +def project_metadata(faker: Faker) -> ProjectMetadataDict: + return ProjectMetadataDict( + parent_node_id=cast(NodeID, faker.uuid4(cast_to=None)), + parent_node_name=faker.pystr(), + parent_project_id=cast(ProjectID, faker.uuid4(cast_to=None)), + parent_project_name=faker.pystr(), + root_parent_project_id=cast(ProjectID, faker.uuid4(cast_to=None)), + root_parent_project_name=faker.pystr(), + root_parent_node_id=cast(NodeID, faker.uuid4(cast_to=None)), + root_parent_node_name=faker.pystr(), + ) - def creator( - user: Dict[str, Any], project: ProjectAtDB, **run_kwargs + +@pytest.fixture +def run_metadata( + osparc_product_name: str, + simcore_user_agent: str, + project_metadata: ProjectMetadataDict, + faker: Faker, +) -> RunMetadataDict: + return RunMetadataDict( + node_id_names_map={}, + project_name=faker.name(), + product_name=osparc_product_name, + simcore_user_agent=simcore_user_agent, + user_email=faker.email(), + wallet_id=faker.pyint(min_value=1), + wallet_name=faker.name(), + project_metadata=project_metadata, + ) + + +@pytest.fixture +async def create_comp_run( + sqlalchemy_async_engine: AsyncEngine, run_metadata: RunMetadataDict +) -> AsyncIterator[Callable[..., Awaitable[CompRunsAtDB]]]: + created_run_ids: list[int] = [] + + async def _( + user: dict[str, Any], project: ProjectAtDB, **run_kwargs ) -> CompRunsAtDB: run_config = { "project_uuid": f"{project.uuid}", - "user_id": f"{user['id']}", + "user_id": user["id"], "iteration": 1, "result": StateType.NOT_STARTED, + "metadata": jsonable_encoder(run_metadata), + "use_on_demand_clusters": False, } run_config.update(**run_kwargs) - with postgres_db.connect() as conn: - result = conn.execute( + async with sqlalchemy_async_engine.begin() as conn: + result = await conn.execute( comp_runs.insert() .values(**run_config) .returning(sa.literal_column("*")) ) - new_run = CompRunsAtDB.parse_obj(result.first()) + new_run = CompRunsAtDB.model_validate(result.first()) created_run_ids.append(new_run.run_id) return new_run - yield creator + yield _ # cleanup - with postgres_db.connect() as conn: - conn.execute(comp_runs.delete().where(comp_runs.c.run_id.in_(created_run_ids))) + async with sqlalchemy_async_engine.begin() as conn: + await conn.execute( + comp_runs.delete().where(comp_runs.c.run_id.in_(created_run_ids)) + ) @pytest.fixture -def cluster( - postgres_db: sa.engine.Engine, -) -> Iterator[Callable[..., Cluster]]: - created_cluster_ids: List[str] = [] - - def creator(user: Dict[str, Any], **cluster_kwargs) -> Cluster: - cluster_config = Cluster.Config.schema_extra["examples"][1] - cluster_config["owner"] = user["primary_gid"] - cluster_config.update(**cluster_kwargs) - new_cluster = Cluster.parse_obj(cluster_config) - assert new_cluster - - with postgres_db.connect() as conn: - # insert basic cluster - created_cluster = conn.execute( - sa.insert(clusters) - .values(to_clusters_db(new_cluster, only_update=False)) - .returning(sa.literal_column("*")) - ).one() - created_cluster_ids.append(created_cluster.id) - if "access_rights" in cluster_kwargs: - for gid, rights in cluster_kwargs["access_rights"].items(): - conn.execute( - pg_insert(cluster_to_groups) - .values(cluster_id=created_cluster.id, gid=gid, **rights.dict()) - .on_conflict_do_update( - index_elements=["gid", "cluster_id"], set_=rights.dict() - ) - ) - access_rights_in_db = {} - for row in conn.execute( - sa.select( - [ - cluster_to_groups.c.gid, - cluster_to_groups.c.read, - cluster_to_groups.c.write, - cluster_to_groups.c.delete, - ] - ) - .select_from(clusters.join(cluster_to_groups)) - .where(clusters.c.id == created_cluster.id) - ): - access_rights_in_db[row.gid] = { - "read": row[cluster_to_groups.c.read], - "write": row[cluster_to_groups.c.write], - "delete": row[cluster_to_groups.c.delete], - } - - return Cluster( - id=created_cluster.id, - name=created_cluster.name, - description=created_cluster.description, - type=created_cluster.type, - owner=created_cluster.owner, - endpoint=created_cluster.endpoint, - authentication=created_cluster.authentication, - access_rights=access_rights_in_db, - thumbnail=None, - ) - - yield creator +async def publish_project( + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], +) -> Callable[[], Awaitable[PublishedProject]]: + user = create_registered_user() - # cleanup - with postgres_db.connect() as conn: - conn.execute( - # pylint: disable=no-value-for-parameter - clusters.delete().where(clusters.c.id.in_(created_cluster_ids)) + async def _() -> PublishedProject: + created_project = await project(user, workbench=fake_workbench_without_outputs) + return PublishedProject( + user=user, + project=created_project, + pipeline=await create_pipeline( + project_id=f"{created_project.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ), + tasks=await create_tasks( + user=user, project=created_project, state=StateType.PUBLISHED + ), ) + return _ + @pytest.fixture -def published_project( - registered_user: Callable[..., Dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., List[CompTaskAtDB]], - fake_workbench_without_outputs: Dict[str, Any], - fake_workbench_adjacency: Dict[str, Any], +async def published_project( + publish_project: Callable[[], Awaitable[PublishedProject]], ) -> PublishedProject: - user = registered_user() - created_project = project(user, workbench=fake_workbench_without_outputs) - return PublishedProject( + return await publish_project() + + +@pytest.fixture +async def running_project( + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], +) -> RunningProject: + user = create_registered_user() + created_project = await project(user, workbench=fake_workbench_without_outputs) + now_time = arrow.utcnow().datetime + return RunningProject( + user=user, project=created_project, - pipeline=pipeline( + pipeline=await create_pipeline( project_id=f"{created_project.uuid}", dag_adjacency_list=fake_workbench_adjacency, ), - tasks=tasks(user=user, project=created_project, state=StateType.PUBLISHED), + tasks=await create_tasks( + user=user, + project=created_project, + state=StateType.RUNNING, + progress=0.0, + start=now_time, + ), + runs=await create_comp_run( + user=user, + project=created_project, + started=now_time, + result=StateType.RUNNING, + ), + task_to_callback_mapping={}, ) @pytest.fixture -def running_project( - registered_user: Callable[..., Dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., List[CompTaskAtDB]], - runs: Callable[..., CompRunsAtDB], - fake_workbench_without_outputs: Dict[str, Any], - fake_workbench_adjacency: Dict[str, Any], +async def running_project_mark_for_cancellation( + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], + create_comp_run: Callable[..., Awaitable[CompRunsAtDB]], + fake_workbench_without_outputs: dict[str, Any], + fake_workbench_adjacency: dict[str, Any], ) -> RunningProject: - user = registered_user() - created_project = project(user, workbench=fake_workbench_without_outputs) + user = create_registered_user() + created_project = await project(user, workbench=fake_workbench_without_outputs) + now_time = arrow.utcnow().datetime return RunningProject( + user=user, project=created_project, - pipeline=pipeline( + pipeline=await create_pipeline( project_id=f"{created_project.uuid}", dag_adjacency_list=fake_workbench_adjacency, ), - tasks=tasks(user=user, project=created_project, state=StateType.RUNNING), - runs=runs(user=user, project=created_project, result=StateType.RUNNING), + tasks=await create_tasks( + user=user, + project=created_project, + state=StateType.RUNNING, + progress=0.0, + start=now_time, + ), + runs=await create_comp_run( + user=user, + project=created_project, + result=StateType.RUNNING, + started=now_time, + cancelled=now_time + datetime.timedelta(seconds=5), + ), + task_to_callback_mapping={}, ) + + +@pytest.fixture +def simcore_user_agent(faker: Faker) -> str: + return faker.pystr() diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py deleted file mode 100644 index 48703324a3a..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters.py +++ /dev/null @@ -1,819 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import json -import random -from typing import Any, Callable, Iterator - -import httpx -import pytest -import sqlalchemy as sa -from _dask_helpers import DaskGatewayServer -from distributed.deploy.spec import SpecCluster -from faker import Faker -from httpx import URL -from models_library.clusters import ( - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_NO_RIGHTS, - CLUSTER_USER_RIGHTS, - Cluster, - ClusterAccessRights, - ClusterAuthentication, - SimpleAuthentication, -) -from pydantic import AnyHttpUrl, SecretStr, parse_obj_as -from pytest import MonkeyPatch -from pytest_simcore.helpers.typing_env import EnvVarsDict -from settings_library.utils_cli import create_json_encoder_wo_secrets -from simcore_postgres_database.models.clusters import ClusterType, clusters -from simcore_service_director_v2.models.schemas.clusters import ( - ClusterCreate, - ClusterGet, - ClusterPatch, - ClusterPing, -) -from starlette import status - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture() -def clusters_config( - mock_env: EnvVarsDict, - postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - monkeypatch: MonkeyPatch, - dask_spec_local_cluster: SpecCluster, -): - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - -@pytest.fixture -def cluster_simple_authentication(faker: Faker) -> Callable[[], dict[str, Any]]: - def creator() -> dict[str, Any]: - simple_auth = { - "type": "simple", - "username": faker.user_name(), - "password": faker.password(), - } - assert SimpleAuthentication.parse_obj(simple_auth) - return simple_auth - - return creator - - -@pytest.fixture -def clusters_cleaner(postgres_db: sa.engine.Engine) -> Iterator: - yield - with postgres_db.connect() as conn: - conn.execute(sa.delete(clusters)) - - -async def test_list_clusters( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - list_clusters_url = URL(f"/v2/clusters?user_id={user_1['id']}") - # there is no cluster at the moment, the list shall contain the default cluster - response = await async_client.get(list_clusters_url) - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = parse_obj_as(list[ClusterGet], response.json()) - assert ( - len(returned_clusters_list) == 1 - ), f"no default cluster in {returned_clusters_list=}" - assert ( - returned_clusters_list[0].id == 0 - ), "default cluster id is not the one expected" - - # let's create some clusters - NUM_CLUSTERS = 111 - for n in range(NUM_CLUSTERS): - cluster(user_1, name=f"pytest cluster{n:04}") - - response = await async_client.get(list_clusters_url) - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = parse_obj_as(list[ClusterGet], response.json()) - assert ( - len(returned_clusters_list) == NUM_CLUSTERS + 1 - ) # the default cluster comes on top of the NUM_CLUSTERS - assert ( - returned_clusters_list[0].id == 0 - ), "the first cluster shall be the platform default cluster" - - # now create a second user and check the clusters are not seen by it BUT the default one - user_2 = registered_user() - response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}") - assert response.status_code == status.HTTP_200_OK - returned_clusters_list = parse_obj_as(list[ClusterGet], response.json()) - assert ( - len(returned_clusters_list) == 1 - ), f"no default cluster in {returned_clusters_list=}" - assert ( - returned_clusters_list[0].id == 0 - ), "default cluster id is not the one expected" - - # let's create a few more clusters owned by user_1 with specific rights - for rights, name in [ - (CLUSTER_NO_RIGHTS, "no rights"), - (CLUSTER_USER_RIGHTS, "user rights"), - (CLUSTER_MANAGER_RIGHTS, "manager rights"), - (CLUSTER_ADMIN_RIGHTS, "admin rights"), - ]: - cluster( - user_1, # cluster is owned by user_1 - name=f"cluster with {name}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: rights, - }, - ) - - response = await async_client.get(f"/v2/clusters?user_id={user_2['id']}") - assert response.status_code == status.HTTP_200_OK - user_2_clusters = parse_obj_as(list[ClusterGet], response.json()) - # we should find 3 clusters + the default cluster - assert len(user_2_clusters) == 3 + 1 - for name in [ - "cluster with user rights", - "cluster with manager rights", - "cluster with admin rights", - ]: - clusters = list( - filter( - lambda cluster, name=name: cluster.name == name, - user_2_clusters, - ), - ) - assert len(clusters) == 1, f"missing cluster with {name=}" - - -async def test_get_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # try to get one that does not exist - response = await async_client.get( - f"/v2/clusters/15615165165165?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_404_NOT_FOUND - # let's create some clusters - a_bunch_of_clusters = [ - cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - - # there is no cluster at the moment, the list is empty - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = parse_obj_as(ClusterGet, response.json()) - assert returned_cluster - assert the_cluster.dict(exclude={"authentication"}) == returned_cluster.dict( - exclude={"authentication"} - ) - - user_2 = registered_user() - # getting the same cluster for user 2 shall return 403 - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - # let's create a few cluster for user 2 and share some with user 1 - for rights, user_1_expected_access in [ - (CLUSTER_NO_RIGHTS, False), - (CLUSTER_USER_RIGHTS, True), - (CLUSTER_MANAGER_RIGHTS, True), - (CLUSTER_ADMIN_RIGHTS, True), - ]: - a_cluster = cluster( - user_2, # cluster is owned by user_2 - access_rights={ - user_2["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_1["primary_gid"]: rights, - }, - ) - # now let's check that user_1 can access only the correct ones - response = await async_client.get( - f"/v2/clusters/{a_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_200_OK - if user_1_expected_access - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_use", - [ - pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"), - pytest.param(CLUSTER_MANAGER_RIGHTS, True, id="SHARE_WITH_MANAGER_RIGHTS"), - pytest.param(CLUSTER_USER_RIGHTS, True, id="SHARE_WITH_USER_RIGHTS"), - pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"), - ], -) -async def test_get_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - async_client: httpx.AsyncClient, - cluster_sharing_rights: ClusterAccessRights, - can_use: bool, -): - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # try to get the cluster as user 2 - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_use - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -@pytest.mark.parametrize("with_query", [True, False]) -async def test_get_default_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, - with_query: bool, -): - user_1 = registered_user() - - get_cluster_url = URL("/v2/clusters/default") - if with_query: - get_cluster_url = URL(f"/v2/clusters/default?user_id={user_1['id']}") - response = await async_client.get(get_cluster_url) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = parse_obj_as(ClusterGet, response.json()) - assert returned_cluster - assert returned_cluster.id == 0 - assert returned_cluster.name == "Default cluster" - assert 1 in returned_cluster.access_rights # everyone group is always 1 - assert returned_cluster.access_rights[1] == CLUSTER_USER_RIGHTS - - -async def test_create_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - postgres_db: sa.engine.Engine, - clusters_cleaner, -): - user_1 = registered_user() - create_cluster_url = URL(f"/v2/clusters?user_id={user_1['id']}") - cluster_data = ClusterCreate( - endpoint=faker.uri(), - authentication=cluster_simple_authentication(), - name=faker.name(), - type=random.choice(list(ClusterType)), - ) - response = await async_client.post( - create_cluster_url, - json=json.loads( - cluster_data.json( - by_alias=True, - exclude_unset=True, - encoder=create_json_encoder_wo_secrets(ClusterCreate), - ) - ), - ) - assert response.status_code == status.HTTP_201_CREATED, f"received: {response.text}" - created_cluster = parse_obj_as(ClusterGet, response.json()) - assert created_cluster - - assert cluster_data.dict( - exclude={"id", "owner", "access_rights", "authentication"} - ) == created_cluster.dict( - exclude={"id", "owner", "access_rights", "authentication"} - ) - - assert created_cluster.id is not None - assert created_cluster.owner == user_1["primary_gid"] - assert created_cluster.access_rights == { - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS - } - - # let's check that DB is correctly setup, there is one entry - with postgres_db.connect() as conn: - cluster_entry = conn.execute( - sa.select([clusters]).where(clusters.c.name == cluster_data.name) - ).one() - - -async def test_update_own_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, -): - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - # try to modify one that does not exist - response = await async_client.patch( - f"/v2/clusters/15615165165165?user_id={user_1['id']}", - json=json.loads( - ClusterPatch().json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ), - ) - assert response.status_code == status.HTTP_404_NOT_FOUND - # let's create some clusters - a_bunch_of_clusters = [ - cluster(user_1, name=f"pytest cluster{n:04}") for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # get the original one - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - original_cluster = parse_obj_as(ClusterGet, response.json()) - - # now we modify nothing - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=json.loads( - ClusterPatch().json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = parse_obj_as(ClusterGet, response.json()) - assert returned_cluster.dict() == original_cluster.dict() - - # modify some simple things - expected_modified_cluster = original_cluster.copy() - for cluster_patch in [ - ClusterPatch(name=faker.name()), - ClusterPatch(description=faker.text()), - ClusterPatch(type=ClusterType.ON_PREMISE), - ClusterPatch(thumbnail=faker.uri()), - ClusterPatch(endpoint=faker.uri()), - ClusterPatch(authentication=cluster_simple_authentication()), - ]: - jsonable_cluster_patch = json.loads( - cluster_patch.json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ) - print(f"--> patching cluster with {jsonable_cluster_patch}") - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=jsonable_cluster_patch, - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = parse_obj_as(ClusterGet, response.json()) - expected_modified_cluster = expected_modified_cluster.copy( - update=cluster_patch.dict(**_PATCH_EXPORT) - ) - assert returned_cluster.dict( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.dict(exclude={"authentication": {"password"}}) - - # we can change the access rights, the owner rights are always kept - user_2 = registered_user() - - for rights in [ - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - CLUSTER_USER_RIGHTS, - CLUSTER_NO_RIGHTS, - ]: - cluster_patch = ClusterPatch(accessRights={user_2["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=cluster_patch.dict(**_PATCH_EXPORT), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.parse_obj(response.json()) - - expected_modified_cluster.access_rights[user_2["primary_gid"]] = rights - assert returned_cluster.dict( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.dict(exclude={"authentication": {"password"}}) - # we can change the owner since we are admin - cluster_patch = ClusterPatch(owner=user_2["primary_gid"]) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=json.loads( - cluster_patch.json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ), - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - returned_cluster = ClusterGet.parse_obj(response.json()) - expected_modified_cluster.owner = user_2["primary_gid"] - expected_modified_cluster.access_rights[ - user_2["primary_gid"] - ] = CLUSTER_ADMIN_RIGHTS - assert returned_cluster.dict( - exclude={"authentication": {"password"}} - ) == expected_modified_cluster.dict(exclude={"authentication": {"password"}}) - - # we should not be able to reduce the rights of the new owner - cluster_patch = ClusterPatch( - accessRights={user_2["primary_gid"]: CLUSTER_NO_RIGHTS} - ) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}", - json=json.loads( - cluster_patch.json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ), - ) - assert ( - response.status_code == status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -async def test_update_default_cluster_fails( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, -): - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - # try to modify one that does not exist - response = await async_client.patch( - f"/v2/clusters/default?user_id={user_1['id']}", - json=json.loads( - ClusterPatch().json( - **_PATCH_EXPORT, encoder=create_json_encoder_wo_secrets(ClusterPatch) - ) - ), - ) - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_use, can_manage, can_administer", - [ - pytest.param( - CLUSTER_ADMIN_RIGHTS, True, True, True, id="SHARE_WITH_ADMIN_RIGHTS" - ), - pytest.param( - CLUSTER_MANAGER_RIGHTS, True, True, False, id="SHARE_WITH_MANAGER_RIGHTS" - ), - pytest.param( - CLUSTER_USER_RIGHTS, True, False, False, id="SHARE_WITH_USER_RIGHTS" - ), - pytest.param(CLUSTER_NO_RIGHTS, False, False, False, id="DENY_RIGHTS"), - ], -) -async def test_update_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_sharing_rights: ClusterAccessRights, - can_use: bool, - can_manage: bool, - can_administer: bool, -): - """user_1 is the owner and administrator, he/she gives some rights to user 2""" - - _PATCH_EXPORT = {"by_alias": True, "exclude_unset": True, "exclude_none": True} - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # get the original one - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK, f"received {response.text}" - original_cluster = parse_obj_as(ClusterGet, response.json()) - - # let's try to modify stuff as we are user 2 - for cluster_patch in [ - ClusterPatch(name=faker.name()), - ClusterPatch(description=faker.text()), - ClusterPatch(type=ClusterType.ON_PREMISE), - ClusterPatch(thumbnail=faker.uri()), - ClusterPatch(endpoint=faker.uri()), - ClusterPatch(authentication=cluster_simple_authentication()), - ]: - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=json.loads( - cluster_patch.json( - **_PATCH_EXPORT, - encoder=create_json_encoder_wo_secrets(ClusterPatch), - ) - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_manage - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - # let's try to add/remove someone (reserved to managers) - user_3 = registered_user() - for rights in [ - CLUSTER_USER_RIGHTS, # add user - CLUSTER_NO_RIGHTS, # remove user - ]: - # try to add user 3 - cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=json.loads( - cluster_patch.json( - **_PATCH_EXPORT, - encoder=create_json_encoder_wo_secrets(ClusterPatch), - ) - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_manage - else status.HTTP_403_FORBIDDEN - ), f"received {response.text} while {'adding' if rights == CLUSTER_USER_RIGHTS else 'removing'} user" - - # modify rights to admin/manager (reserved to administrators) - for rights in [ - CLUSTER_ADMIN_RIGHTS, - CLUSTER_MANAGER_RIGHTS, - ]: - cluster_patch = ClusterPatch(accessRights={user_3["primary_gid"]: rights}) - response = await async_client.patch( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}", - json=json.loads( - cluster_patch.json( - **_PATCH_EXPORT, - encoder=create_json_encoder_wo_secrets(ClusterPatch), - ) - ), - ) - assert ( - response.status_code == status.HTTP_200_OK - if can_administer - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - - -async def test_delete_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # let's delete that cluster - response = await async_client.delete( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - ), f"received {response.text}" - # now check it is gone - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_404_NOT_FOUND - ), f"received {response.text}" - - -@pytest.mark.parametrize( - "cluster_sharing_rights, can_administer", - [ - pytest.param(CLUSTER_ADMIN_RIGHTS, True, id="SHARE_WITH_ADMIN_RIGHTS"), - pytest.param(CLUSTER_MANAGER_RIGHTS, False, id="SHARE_WITH_MANAGER_RIGHTS"), - pytest.param(CLUSTER_USER_RIGHTS, False, id="SHARE_WITH_USER_RIGHTS"), - pytest.param(CLUSTER_NO_RIGHTS, False, id="DENY_RIGHTS"), - ], -) -async def test_delete_another_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - cluster_simple_authentication: Callable, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_sharing_rights: ClusterAccessRights, - can_administer: bool, -): - user_1 = registered_user() - user_2 = registered_user() - # let's create some clusters - a_bunch_of_clusters = [ - cluster( - user_1, - name=f"pytest cluster{n:04}", - access_rights={ - user_1["primary_gid"]: CLUSTER_ADMIN_RIGHTS, - user_2["primary_gid"]: cluster_sharing_rights, - }, - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - # let's delete that cluster as user_2 - response = await async_client.delete( - f"/v2/clusters/{the_cluster.id}?user_id={user_2['id']}" - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - if can_administer - else status.HTTP_403_FORBIDDEN - ), f"received {response.text}" - # now check it is gone or still around - response = await async_client.get( - f"/v2/clusters/{the_cluster.id}?user_id={user_1['id']}" - ) - assert ( - response.status_code == status.HTTP_404_NOT_FOUND - if can_administer - else status.HTTP_200_OK - ), f"received {response.text}" - - -async def test_delete_default_cluster_fails( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - response = await async_client.delete(f"/v2/clusters/default?user_id={user_1['id']}") - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - -async def test_ping_invalid_cluster_raises_422( - clusters_config: None, - async_client: httpx.AsyncClient, - faker: Faker, - cluster_simple_authentication: Callable[[], dict[str, Any]], -): - # calling with wrong data raises - response = await async_client.post("/v2/clusters:ping", json={}) - with pytest.raises(httpx.HTTPStatusError): - response.raise_for_status() - - # calling with correct data but non existing cluster also raises - some_fake_cluster = ClusterPing( - endpoint=faker.uri(), - authentication=parse_obj_as( - ClusterAuthentication, cluster_simple_authentication() - ), - ) - response = await async_client.post( - "/v2/clusters:ping", - json=json.loads( - some_fake_cluster.json( - by_alias=True, encoder=create_json_encoder_wo_secrets(ClusterPing) - ) - ), - ) - with pytest.raises(httpx.HTTPStatusError): - response.raise_for_status() - - -async def test_ping_cluster( - clusters_config: None, - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, -): - valid_cluster = ClusterPing( - endpoint=parse_obj_as(AnyHttpUrl, local_dask_gateway_server.address), - authentication=SimpleAuthentication( - username="pytest_user", - password=parse_obj_as(SecretStr, local_dask_gateway_server.password), - ), - ) - response = await async_client.post( - "/v2/clusters:ping", - json=json.loads( - valid_cluster.json( - by_alias=True, - encoder=create_json_encoder_wo_secrets(SimpleAuthentication), - ) - ), - ) - response.raise_for_status() - assert response.status_code == status.HTTP_204_NO_CONTENT - - -async def test_ping_specific_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - cluster: Callable[..., Cluster], - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, -): - user_1 = registered_user() - # try to ping one that does not exist - response = await async_client.get( - f"/v2/clusters/15615165165165:ping?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY - - # let's create some clusters and ping one - a_bunch_of_clusters = [ - cluster( - user_1, - name=f"pytest cluster{n:04}", - endpoint=local_dask_gateway_server.address, - authentication=SimpleAuthentication( - username="pytest_user", - password=parse_obj_as(SecretStr, local_dask_gateway_server.password), - ), - ) - for n in range(111) - ] - the_cluster = random.choice(a_bunch_of_clusters) - - response = await async_client.post( - f"/v2/clusters/{the_cluster.id}:ping?user_id={user_1['id']}", - ) - response.raise_for_status() - assert response.status_code == status.HTTP_204_NO_CONTENT - - -async def test_ping_default_cluster( - clusters_config: None, - registered_user: Callable[..., dict], - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - # try to ping one that does not exist - response = await async_client.post( - f"/v2/clusters/default:ping?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_204_NO_CONTENT diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py b/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py deleted file mode 100644 index 7f169131a14..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_clusters_details.py +++ /dev/null @@ -1,301 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - -import json -from typing import Any, AsyncIterator, Callable - -import httpx -import pytest -import sqlalchemy as sa -from _dask_helpers import DaskGatewayServer -from dask_gateway import Gateway, GatewayCluster, auth -from distributed import Client as DaskClient -from distributed.deploy.spec import SpecCluster -from faker import Faker -from models_library.clusters import Cluster, ClusterID, SimpleAuthentication -from models_library.users import UserID -from pydantic import SecretStr -from pytest import MonkeyPatch -from pytest_simcore.helpers.typing_env import EnvVarsDict -from simcore_service_director_v2.models.schemas.clusters import ClusterDetailsGet -from starlette import status -from tenacity._asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture() -def clusters_config( - mock_env: EnvVarsDict, - postgres_db: sa.engine.Engine, - postgres_host_config: dict[str, str], - monkeypatch: MonkeyPatch, - dask_spec_local_cluster: SpecCluster, -): - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - -@pytest.fixture -async def dask_gateway( - local_dask_gateway_server: DaskGatewayServer, -) -> Gateway: - async with Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=auth.BasicAuth("pytest_user", local_dask_gateway_server.password), - ) as gateway: - print(f"--> {gateway=} created") - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}") - for option in cluster_options.items(): - print(f"--> {option=}") - return gateway - - -@pytest.fixture -async def dask_gateway_cluster(dask_gateway: Gateway) -> AsyncIterator[GatewayCluster]: - async with dask_gateway.new_cluster() as cluster: - yield cluster - - -@pytest.fixture -async def dask_gateway_cluster_client( - dask_gateway_cluster: GatewayCluster, -) -> AsyncIterator[DaskClient]: - async with dask_gateway_cluster.get_client() as client: - yield client - - -@pytest.fixture -def cluster_simple_authentication(faker: Faker) -> Callable[[], dict[str, Any]]: - def creator() -> dict[str, Any]: - simple_auth = { - "type": "simple", - "username": faker.user_name(), - "password": faker.password(), - } - assert SimpleAuthentication.parse_obj(simple_auth) - return simple_auth - - return creator - - -@pytest.mark.skip( - reason="test for helping developers understand how to use dask gateways" -) -async def test_local_dask_gateway_server(local_dask_gateway_server: DaskGatewayServer): - async with Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=auth.BasicAuth("pytest_user", local_dask_gateway_server.password), - ) as gateway: - print(f"--> {gateway=} created") - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions=}, {cluster_options=}, {clusters_list=}") - for option in cluster_options.items(): - print(f"--> {option=}") - - async with gateway.new_cluster() as cluster: - assert cluster - print(f"--> created new cluster {cluster=}, {cluster.scheduler_info=}") - NUM_WORKERS = 10 - await cluster.scale(NUM_WORKERS) - print(f"--> scaling cluster {cluster=} to {NUM_WORKERS} workers") - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30) - ): - with attempt: - print( - f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))} worker(s)" - ) - assert len(cluster.scheduler_info.get("workers", 0)) == 10 - - async with cluster.get_client() as client: - print(f"--> created new client {client=}, submitting a job") - res = await client.submit(lambda x: x + 1, 1) # type: ignore - assert res == 2 - - print(f"--> scaling cluster {cluster=} back to 0") - await cluster.scale(0) - - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(0.24), stop=stop_after_delay(30) - ): - with attempt: - print( - f"cluster {cluster=} has now {len(cluster.scheduler_info.get('workers', []))}" - ) - assert len(cluster.scheduler_info.get("workers", 0)) == 0 - - -async def test_get_default_cluster_details( - clusters_config: None, - registered_user: Callable, - async_client: httpx.AsyncClient, -): - user_1 = registered_user() - - # This test checks that the default cluster is accessible - # the default cluster is the osparc internal cluster available through a dask-scheduler - response = await async_client.get( - f"/v2/clusters/default/details?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK - default_cluster_out = ClusterDetailsGet.parse_obj(response.json()) - response = await async_client.get( - f"/v2/clusters/{0}/details?user_id={user_1['id']}" - ) - assert response.status_code == status.HTTP_200_OK - assert default_cluster_out == ClusterDetailsGet.parse_obj(response.json()) - - -async def _get_cluster_details( - async_client: httpx.AsyncClient, user_id: UserID, cluster_id: ClusterID -) -> ClusterDetailsGet: - response = await async_client.get( - f"/v2/clusters/{cluster_id}/details?user_id={user_id}" - ) - assert response.status_code == status.HTTP_200_OK - print(f"<-- received cluster details response {response=}") - cluster_out = ClusterDetailsGet.parse_obj(response.json()) - assert cluster_out - print(f"<-- received cluster details {cluster_out=}") - assert cluster_out.scheduler, "the cluster's scheduler is not started!" - return cluster_out - - -async def test_get_cluster_details( - clusters_config: None, - registered_user: Callable[..., dict[str, Any]], - async_client: httpx.AsyncClient, - local_dask_gateway_server: DaskGatewayServer, - cluster: Callable[..., Cluster], - dask_gateway_cluster: GatewayCluster, - dask_gateway_cluster_client: DaskClient, -): - user_1 = registered_user() - # define the cluster in the DB - some_cluster = cluster( - user_1, - endpoint=local_dask_gateway_server.address, - authentication=SimpleAuthentication( - username="pytest_user", - password=SecretStr(local_dask_gateway_server.password), - ).dict(by_alias=True), - ) - # in its present state, the cluster should have no workers - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert not cluster_out.scheduler.workers, "the cluster should not have any worker!" - - # now let's scale the cluster - _NUM_WORKERS = 1 - await dask_gateway_cluster.scale(_NUM_WORKERS) - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers, "the cluster has no workers!" - assert ( - len(cluster_out.scheduler.workers) == _NUM_WORKERS - ), f"the cluster is missing {_NUM_WORKERS}, currently has {len(cluster_out.scheduler.workers)}" - print( - f"cluster now has its {_NUM_WORKERS}, after {json.dumps(attempt.retry_state.retry_object.statistics)}" - ) - print(f"!!> cluster dashboard link: {dask_gateway_cluster.dashboard_link}") - - # let's start some computation - _TASK_SLEEP_TIME = 55 - - def do_some_work(x: int): - import time - - time.sleep(x) - return True - - task = dask_gateway_cluster_client.submit(do_some_work, _TASK_SLEEP_TIME) - # wait for the computation to start, we should see this in the cluster infos - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(10), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.executing - == 1 - ), "worker is not executing the task" - print( - f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}" - ) - # let's wait for the result - result = task.result(timeout=_TASK_SLEEP_TIME + 5) - assert result - assert await result == True - # wait for the computation to effectively stop - async for attempt in AsyncRetrying( - reraise=True, stop=stop_after_delay(60), wait=wait_fixed(1) - ): - with attempt: - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - print( - f"!!> cluster metrics: {next(iter(cluster_out.scheduler.workers.values())).metrics=}" - ) - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.executing - == 0 - ), "worker is still executing the task" - assert ( - next( - iter(cluster_out.scheduler.workers.values()) - ).metrics.task_counts.memory - == 1 - ), "worker did not keep the result in memory" - # NOTE: this is a CPU percent use - assert ( - next(iter(cluster_out.scheduler.workers.values())).metrics.cpu < 5.0 - ), "worker did not update the cpu metrics" - - # since the task is completed the worker should have stopped executing - cluster_out = await _get_cluster_details( - async_client, user_1["id"], some_cluster.id - ) - assert cluster_out.scheduler.workers - worker_data = next(iter(cluster_out.scheduler.workers.values())) - assert worker_data.metrics.task_counts.executing == 0 - # in dask, the task remains in memory until the result is deleted - assert worker_data.metrics.task_counts.memory == 1 diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py b/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py deleted file mode 100644 index 44a4a9d2601..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_computations.py +++ /dev/null @@ -1,470 +0,0 @@ -# pylint: disable=no-value-for-parameter -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name -# pylint: disable=too-many-arguments -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import json -import re -from datetime import datetime, timedelta -from pathlib import Path -from typing import Any, Callable - -import httpx -import pytest -import respx -from faker import Faker -from fastapi import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID -from models_library.projects import ProjectAtDB -from models_library.projects_nodes import NodeID, NodeState -from models_library.projects_pipeline import PipelineDetails -from models_library.projects_state import RunningState -from models_library.services import ServiceDockerData -from models_library.services_resources import ( - ServiceResourcesDict, - ServiceResourcesDictHelpers, -) -from models_library.utils.fastapi_encoders import jsonable_encoder -from pydantic import AnyHttpUrl, parse_obj_as -from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.typing_env import EnvVarsDict -from settings_library.rabbit import RabbitSettings -from simcore_postgres_database.models.comp_pipeline import StateType -from simcore_postgres_database.models.comp_tasks import NodeClass -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB -from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB -from simcore_service_director_v2.models.schemas.comp_tasks import ( - ComputationCreate, - ComputationGet, -) -from simcore_service_director_v2.models.schemas.services import ServiceExtras -from starlette import status - -pytest_simcore_core_services_selection = ["postgres", "rabbit"] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture() -def mocked_rabbit_mq_client(mocker: MockerFixture): - mocker.patch( - "simcore_service_director_v2.core.application.rabbitmq.RabbitMQClient", - autospec=True, - ) - - -@pytest.fixture() -def minimal_configuration( - mock_env: EnvVarsDict, - postgres_host_config: dict[str, str], - rabbit_service: RabbitSettings, - monkeypatch: pytest.MonkeyPatch, - mocked_rabbit_mq_client: None, -): - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - -@pytest.fixture(scope="session") -def fake_service_details(mocks_dir: Path) -> ServiceDockerData: - fake_service_path = mocks_dir / "fake_service.json" - assert fake_service_path.exists() - fake_service_data = json.loads(fake_service_path.read_text()) - return ServiceDockerData(**fake_service_data) - - -@pytest.fixture -def fake_service_extras() -> ServiceExtras: - extra_example = ServiceExtras.Config.schema_extra["examples"][2] - random_extras = ServiceExtras(**extra_example) - assert random_extras is not None - return random_extras - - -@pytest.fixture -def fake_service_resources() -> ServiceResourcesDict: - service_resources = parse_obj_as( - ServiceResourcesDict, - ServiceResourcesDictHelpers.Config.schema_extra["examples"][0], - ) - return service_resources - - -@pytest.fixture -def mocked_director_service_fcts( - minimal_app: FastAPI, - fake_service_details: ServiceDockerData, - fake_service_extras: ServiceExtras, -): - # pylint: disable=not-context-manager - with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - respx_mock.get( - re.compile( - r"/services/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F.+/(.+)" - ), - name="get_service", - ).respond(json={"data": [fake_service_details.dict(by_alias=True)]}) - - respx_mock.get( - re.compile( - r"/service_extras/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F.+/(.+)" - ), - name="get_service_extras", - ).respond(json={"data": fake_service_extras.dict(by_alias=True)}) - - yield respx_mock - - -@pytest.fixture -def mocked_catalog_service_fcts( - minimal_app: FastAPI, - fake_service_details: ServiceDockerData, - fake_service_extras: ServiceExtras, - fake_service_resources: ServiceResourcesDict, -): - # pylint: disable=not-context-manager - with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V2_CATALOG.api_base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - respx_mock.get( - re.compile( - r"services/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F[^/]+/[^\.]+.[^\.]+.[^\/]+/resources" - ), - name="get_service_resources", - ).respond(json=jsonable_encoder(fake_service_resources, by_alias=True)) - - respx_mock.get( - re.compile( - r"services/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F[^/]+/[^\.]+.[^\.]+.[^\/]+" - ), - name="get_service", - ).respond(json=fake_service_details.dict(by_alias=True)) - - yield respx_mock - - -@pytest.fixture -def mocked_catalog_service_fcts_deprecated( - minimal_app: FastAPI, - fake_service_details: ServiceDockerData, - fake_service_extras: ServiceExtras, -): - # pylint: disable=not-context-manager - with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V2_CATALOG.api_base_url, - assert_all_called=False, - assert_all_mocked=True, - ) as respx_mock: - respx_mock.get( - re.compile( - r"services/(simcore)%2F(services)%2F(comp|dynamic|frontend)%2F.+/(.+)" - ), - name="get_service", - ).respond( - json=fake_service_details.copy( - update={ - "deprecated": (datetime.utcnow() - timedelta(days=1)).isoformat() - } - ).dict(by_alias=True) - ) - - yield respx_mock - - -@pytest.fixture -def product_name(faker: Faker) -> str: - return faker.name() - - -async def test_create_computation( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - response = await async_client.post( - create_computation_url, - json=jsonable_encoder( - ComputationCreate( - user_id=user["id"], project_id=proj.uuid, product_name=product_name - ) - ), - ) - assert response.status_code == status.HTTP_201_CREATED, response.text - - -async def test_start_computation_without_product_fails( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - response = await async_client.post( - create_computation_url, - json={ - "user_id": f"{user['id']}", - "project_id": f"{proj.uuid}", - "start_pipeline": f"{True}", - }, - ) - assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, response.text - - -async def test_start_computation( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - response = await async_client.post( - create_computation_url, - json=jsonable_encoder( - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - start_pipeline=True, - product_name=product_name, - ) - ), - ) - assert response.status_code == status.HTTP_201_CREATED, response.text - - -async def test_start_computation_with_deprecated_services_raises_406( - minimal_configuration: None, - mocked_director_service_fcts, - mocked_catalog_service_fcts_deprecated, - product_name: str, - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - create_computation_url = httpx.URL("/v2/computations") - response = await async_client.post( - create_computation_url, - json=jsonable_encoder( - ComputationCreate( - user_id=user["id"], - project_id=proj.uuid, - start_pipeline=True, - product_name=product_name, - ) - ), - ) - assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE, response.text - - -async def test_get_computation_from_empty_project( - minimal_configuration: None, - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., list[CompTaskAtDB]], - faker: Faker, - async_client: httpx.AsyncClient, -): - user = registered_user() - get_computation_url = httpx.URL( - f"/v2/computations/{faker.uuid4()}?user_id={user['id']}" - ) - # the project exists but there is no pipeline yet - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_404_NOT_FOUND, response.text - # create the project - proj = project(user, workbench=fake_workbench_without_outputs) - get_computation_url = httpx.URL( - f"/v2/computations/{proj.uuid}?user_id={user['id']}" - ) - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_404_NOT_FOUND, response.text - # create an empty pipeline - pipeline( - project_id=proj.uuid, - ) - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_200_OK, response.text - returned_computation = ComputationGet.parse_obj(response.json()) - assert returned_computation - expected_computation = ComputationGet( - id=proj.uuid, - state=RunningState.UNKNOWN, - pipeline_details=PipelineDetails(adjacency_list={}, node_states={}), - url=parse_obj_as( - AnyHttpUrl, f"{async_client.base_url.join(get_computation_url)}" - ), - stop_url=None, - result=None, - iteration=None, - cluster_id=None, - ) - assert returned_computation.dict() == expected_computation.dict() - - -async def test_get_computation_from_not_started_computation_task( - minimal_configuration: None, - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., list[CompTaskAtDB]], - faker: Faker, - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - get_computation_url = httpx.URL( - f"/v2/computations/{proj.uuid}?user_id={user['id']}" - ) - pipeline( - project_id=proj.uuid, - dag_adjacency_list=fake_workbench_adjacency, - ) - # create no task this should trigger an exception - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_409_CONFLICT, response.text - - # now create the expected tasks and the state is good again - comp_tasks = tasks(user=user, project=proj) - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_200_OK, response.text - returned_computation = ComputationGet.parse_obj(response.json()) - assert returned_computation - expected_computation = ComputationGet( - id=proj.uuid, - state=RunningState.NOT_STARTED, - pipeline_details=PipelineDetails( - adjacency_list=parse_obj_as( - dict[NodeID, list[NodeID]], fake_workbench_adjacency - ), - node_states={ - t.node_id: NodeState( - modified=True, - currentStatus=RunningState.NOT_STARTED, - dependencies={ - NodeID(node) - for node, next_nodes in fake_workbench_adjacency.items() - if f"{t.node_id}" in next_nodes - }, - ) - for t in comp_tasks - if t.node_class == NodeClass.COMPUTATIONAL - }, - ), - url=parse_obj_as( - AnyHttpUrl, f"{async_client.base_url.join(get_computation_url)}" - ), - stop_url=None, - result=None, - iteration=None, - cluster_id=None, - ) - - assert returned_computation.dict() == expected_computation.dict() - - -async def test_get_computation_from_published_computation_task( - minimal_configuration: None, - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., list[CompTaskAtDB]], - runs: Callable[..., CompRunsAtDB], - async_client: httpx.AsyncClient, -): - user = registered_user() - proj = project(user, workbench=fake_workbench_without_outputs) - pipeline( - project_id=proj.uuid, - dag_adjacency_list=fake_workbench_adjacency, - ) - comp_tasks = tasks(user=user, project=proj, state=StateType.PUBLISHED) - comp_runs = runs(user=user, project=proj, result=StateType.PUBLISHED) - get_computation_url = httpx.URL( - f"/v2/computations/{proj.uuid}?user_id={user['id']}" - ) - response = await async_client.get(get_computation_url) - assert response.status_code == status.HTTP_200_OK, response.text - returned_computation = ComputationGet.parse_obj(response.json()) - assert returned_computation - expected_stop_url = async_client.base_url.join( - f"/v2/computations/{proj.uuid}:stop?user_id={user['id']}" - ) - expected_computation = ComputationGet( - id=proj.uuid, - state=RunningState.PUBLISHED, - pipeline_details=PipelineDetails( - adjacency_list=parse_obj_as( - dict[NodeID, list[NodeID]], fake_workbench_adjacency - ), - node_states={ - t.node_id: NodeState( - modified=True, - currentStatus=RunningState.PUBLISHED, - dependencies={ - NodeID(node) - for node, next_nodes in fake_workbench_adjacency.items() - if f"{t.node_id}" in next_nodes - }, - ) - for t in comp_tasks - if t.node_class == NodeClass.COMPUTATIONAL - }, - ), - url=parse_obj_as( - AnyHttpUrl, f"{async_client.base_url.join(get_computation_url)}" - ), - stop_url=parse_obj_as(AnyHttpUrl, f"{expected_stop_url}"), - result=None, - iteration=1, - cluster_id=DEFAULT_CLUSTER_ID, - ) - - assert returned_computation.dict() == expected_computation.dict() diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py b/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py deleted file mode 100644 index a935dd374d8..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_computations_tasks.py +++ /dev/null @@ -1,183 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - - -from typing import Any, Callable, NamedTuple -from unittest import mock - -import httpx -import pytest -from faker import Faker -from fastapi import FastAPI, status -from models_library.projects import ProjectAtDB, ProjectID -from models_library.projects_nodes_io import NodeID -from models_library.users import UserID -from pydantic import parse_raw_as -from pytest_simcore.helpers.typing_env import EnvVarsDict -from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB -from simcore_service_director_v2.models.schemas.comp_tasks import TaskLogFileGet - -pytest_simcore_core_services_selection = [ - "postgres", -] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -def get_app(async_client: httpx.AsyncClient) -> FastAPI: - # pylint: disable=protected-access - app = async_client._transport.app # type: ignore - assert app - assert isinstance(app, FastAPI) - return app - - -@pytest.fixture -def mock_env( - mock_env: EnvVarsDict, # sets default env vars - postgres_host_config, # sets postgres env vars - monkeypatch: pytest.MonkeyPatch, -): - # overrides mock_env - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "True") - - -@pytest.fixture -def client(async_client: httpx.AsyncClient, mocker) -> httpx.AsyncClient: - # overrides client - # WARNING: this is an httpx.AsyncClient and not a TestClient!! - app = get_app(async_client) - - settings: AppSettings = app.state.settings - assert settings - print(settings.json(indent=1)) - - return async_client - - -@pytest.fixture -def mocked_nodeports_storage_client(mocker, faker: Faker) -> dict[str, mock.MagicMock]: - # NOTE: mocking storage API would require aioresponses since the access to storage - # is via node-ports which uses aiohttp-client! In order to avoid adding an extra - # dependency we will patch storage-client functions in simcore-sdk's nodeports - - class Loc(NamedTuple): - name: str - id: int - - return { - "get_download_file_link": mocker.patch( - "simcore_sdk.node_ports_common.storage_client.get_download_file_link", - autospec=True, - return_value=faker.url(), - ), - "get_storage_locations": mocker.patch( - "simcore_sdk.node_ports_common.storage_client.get_storage_locations", - autospec=True, - return_value=[ - Loc(name="simcore.s3", id=0), - ], - ), - } - - -@pytest.fixture -def user(registered_user: Callable[..., dict[str, Any]]): - user = registered_user() - return user - - -@pytest.fixture -def user_id(user): - return user["id"] - - -@pytest.fixture -def project_id( - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - user: dict[str, Any], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - tasks: Callable[..., list[CompTaskAtDB]], -): - """project uuid of a saved project (w/ tasks up-to-date)""" - - # insert project -> db - proj = project(user, workbench=fake_workbench_without_outputs) - - # insert pipeline -> comp_pipeline - pipeline( - project_id=proj.uuid, - dag_adjacency_list=fake_workbench_adjacency, - ) - # insert tasks -> comp_tasks - comp_tasks = tasks(user=user, project=proj) - - return proj.uuid - - -@pytest.fixture -def node_id(fake_workbench_adjacency: dict[str, Any]) -> NodeID: - return NodeID(next(nid for nid in fake_workbench_adjacency.keys())) - - -# - tests api routes -# - real postgres db with rows inserted in users, projects, comp_tasks and comp_pipelines -# - mocks responses from storage API patching nodeports -# - - -async def test_get_all_tasks_log_files( - mocked_nodeports_storage_client: dict[str, mock.MagicMock], - client: httpx.AsyncClient, - user_id: UserID, - project_id: ProjectID, -): - resp = await client.get( - f"/v2/computations/{project_id}/tasks/-/logfile", params={"user_id": user_id} - ) - - # calls storage - mocked_nodeports_storage_client["get_storage_locations"].assert_not_called() - assert mocked_nodeports_storage_client["get_download_file_link"].called - - # test expected response according to OAS! - assert resp.status_code == status.HTTP_200_OK - log_files = parse_raw_as(list[TaskLogFileGet], resp.text) - assert log_files - assert all(l.download_link for l in log_files) - - -async def test_get_task_logs_file( - mocked_nodeports_storage_client: dict[str, mock.MagicMock], - user_id: UserID, - project_id: ProjectID, - node_id: NodeID, - client: httpx.AsyncClient, -): - resp = await client.get( - f"/v2/computations/{project_id}/tasks/{node_id}/logfile", - params={"user_id": user_id}, - ) - assert resp.status_code == status.HTTP_200_OK - - log_file = TaskLogFileGet.parse_raw(resp.text) - assert log_file.download_link - - -@pytest.mark.xfail -async def test_get_task_logs( - project_id: ProjectID, node_id: NodeID, client: httpx.AsyncClient -): - resp = await client.get(f"/{project_id}/tasks/{node_id}/logs") diff --git a/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py b/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py index 2cfee21cf47..fd1d43e25aa 100644 --- a/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py +++ b/services/director-v2/tests/unit/with_dbs/test_api_route_dynamic_services.py @@ -1,3 +1,4 @@ +# pylint: disable=no-self-use # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable @@ -6,17 +7,31 @@ import logging import os import urllib.parse +from collections.abc import AsyncIterator, Iterator from contextlib import asynccontextmanager -from typing import Any, AsyncIterator, Iterator, NamedTuple, Optional +from typing import Any, NamedTuple +from unittest.mock import Mock from uuid import UUID import pytest import respx +from faker import Faker from fastapi import FastAPI from httpx import URL, QueryParams +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceCreate, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.api_schemas_dynamic_sidecar.containers import ( + ActivityInfo, + ActivityInfoOrNone, +) +from models_library.projects import ProjectAtDB, ProjectID from models_library.projects_nodes_io import NodeID from models_library.service_settings_labels import SimcoreServiceLabels -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from respx import MockRouter @@ -25,19 +40,13 @@ X_DYNAMIC_SIDECAR_REQUEST_SCHEME, X_SIMCORE_USER_AGENT, ) -from simcore_service_director_v2.models.domains.dynamic_services import ( - DynamicServiceCreate, - RetrieveDataOutEnveloped, -) -from simcore_service_director_v2.models.schemas.dynamic_services import ( - RunningDynamicServiceDetails, -) -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( - SchedulerData, -) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.dynamic_sidecar.errors import ( DynamicSidecarNotFoundError, ) +from simcore_service_director_v2.modules.dynamic_sidecar.scheduler import ( + DynamicSidecarsScheduler, +) from starlette import status from starlette.testclient import TestClient @@ -64,14 +73,15 @@ def minimal_config( disable_rabbitmq: None, mock_env: EnvVarsDict, postgres_host_config: dict[str, str], - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, ) -> None: """set a minimal configuration for testing the director connection only""" monkeypatch.setenv("SC_BOOT_MODE", "default") monkeypatch.setenv("DIRECTOR_ENABLED", "1") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "0") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_TRACING", "null") + + monkeypatch.setenv("DIRECTOR_V2_PROMETHEUS_INSTRUMENTATION_ENABLED", "1") @pytest.fixture(scope="session") @@ -83,8 +93,15 @@ def dynamic_sidecar_headers() -> dict[str, str]: } -@pytest.fixture(scope="function") -def mock_env(disable_rabbitmq: None, monkeypatch: MonkeyPatch) -> None: +@pytest.fixture() +def mock_env( + mock_env: EnvVarsDict, + mock_exclusive: None, + disable_postgres: None, + disable_rabbitmq: None, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, +) -> None: # Works as below line in docker.compose.yml # ${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} @@ -95,16 +112,19 @@ def mock_env(disable_rabbitmq: None, monkeypatch: MonkeyPatch) -> None: logger.warning("Patching to: DYNAMIC_SIDECAR_IMAGE=%s", image_name) monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", image_name) + monkeypatch.setenv("DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS", "{}") monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", "test_network_name") monkeypatch.setenv("TRAEFIK_SIMCORE_ZONE", "test_traefik_zone") monkeypatch.setenv("SWARM_STACK_NAME", "test_swarm_name") monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "false") monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "false") + monkeypatch.setenv("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", f"{faker.url()}") + monkeypatch.setenv("COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH", "{}") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "true") - monkeypatch.setenv("DIRECTOR_V2_TRACING", "null") monkeypatch.setenv("RABBIT_HOST", "mocked_host") + monkeypatch.setenv("RABBIT_SECURE", "false") monkeypatch.setenv("RABBIT_USER", "mocked_user") monkeypatch.setenv("RABBIT_PASSWORD", "mocked_password") @@ -117,16 +137,15 @@ def mock_env(disable_rabbitmq: None, monkeypatch: MonkeyPatch) -> None: monkeypatch.setenv("POSTGRES_USER", "mocked_user") monkeypatch.setenv("POSTGRES_PASSWORD", "mocked_password") monkeypatch.setenv("POSTGRES_DB", "mocked_db") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "false") monkeypatch.setenv("SC_BOOT_MODE", "production") monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) @pytest.fixture @@ -136,19 +155,19 @@ async def mock_retrieve_features( is_legacy: bool, scheduler_data_from_http_request: SchedulerData, mocker: MockerFixture, -) -> AsyncIterator[Optional[MockRouter]]: +) -> AsyncIterator[MockRouter | None]: # pylint: disable=not-context-manager with respx.mock( assert_all_called=False, assert_all_mocked=True, ) as respx_mock: if is_legacy: - service_details = RunningDynamicServiceDetails.parse_obj( - RunningDynamicServiceDetails.Config.schema_extra["examples"][0] + service_details = RunningDynamicServiceDetails.model_validate( + RunningDynamicServiceDetails.model_json_schema()["examples"][0] ) respx_mock.post( f"{service_details.legacy_service_url}/retrieve", name="retrieve" - ).respond(json=RetrieveDataOutEnveloped.Config.schema_extra["examples"][0]) + ).respond(json=RetrieveDataOutEnveloped.model_json_schema()["examples"][0]) yield respx_mock # no cleanup required @@ -158,15 +177,15 @@ async def mock_retrieve_features( service_name = "service_name" # pylint: disable=protected-access - dynamic_sidecar_scheduler._scheduler._inverse_search_mapping[ + dynamic_sidecar_scheduler.scheduler._inverse_search_mapping[ # noqa: SLF001 node_uuid ] = service_name - dynamic_sidecar_scheduler._scheduler._to_observe[ + dynamic_sidecar_scheduler.scheduler._to_observe[ # noqa: SLF001 service_name ] = scheduler_data_from_http_request respx_mock.post( - f"{scheduler_data_from_http_request.endpoint}/v1/containers/ports/inputs:pull", + f"{scheduler_data_from_http_request.endpoint}v1/containers/ports/inputs:pull", name="service_pull_input_ports", ).respond(json="mocked_task_id", status_code=status.HTTP_202_ACCEPTED) @@ -182,17 +201,21 @@ async def _mocked_context_manger(*args, **kwargs) -> AsyncIterator[int]: yield respx_mock - dynamic_sidecar_scheduler._scheduler._inverse_search_mapping.pop(node_uuid) - dynamic_sidecar_scheduler._scheduler._to_observe.pop(service_name) + dynamic_sidecar_scheduler.scheduler._inverse_search_mapping.pop( # noqa: SLF001 + node_uuid + ) + dynamic_sidecar_scheduler.scheduler._to_observe.pop( # noqa: SLF001 + service_name + ) @pytest.fixture -def mocked_director_v0_service_api( +def mocked_catalog_service_api( minimal_app: FastAPI, service: dict[str, Any], service_labels: dict[str, Any] ) -> Iterator[MockRouter]: # pylint: disable=not-context-manager with respx.mock( - base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, + base_url=minimal_app.state.settings.DIRECTOR_V2_CATALOG.api_base_url, assert_all_called=False, assert_all_mocked=True, ) as respx_mock: @@ -200,14 +223,27 @@ def mocked_director_v0_service_api( respx_mock.get( f"/services/{urllib.parse.quote_plus(service['key'])}/{service['version']}/labels", name="service labels", - ).respond(json={"data": service_labels}) + ).respond(json=service_labels) + yield respx_mock + + +@pytest.fixture +def mocked_director_v0_service_api( + minimal_app: FastAPI, service: dict[str, Any], service_labels: dict[str, Any] +) -> Iterator[MockRouter]: + # pylint: disable=not-context-manager + with respx.mock( + base_url=minimal_app.state.settings.DIRECTOR_V0.endpoint, + assert_all_called=False, + assert_all_mocked=True, + ) as respx_mock: respx_mock.get( f"/running_interactive_services/{service['node_uuid']}", name="running interactive service", ).respond( json={ - "data": RunningDynamicServiceDetails.Config.schema_extra["examples"][0] + "data": RunningDynamicServiceDetails.model_json_schema()["examples"][0] } ) @@ -221,10 +257,10 @@ def mocked_director_v2_scheduler(mocker: MockerFixture, exp_status_code: int) -> # MOCKING get_stack_status def get_stack_status(node_uuid: NodeID) -> RunningDynamicServiceDetails: if exp_status_code == status.HTTP_307_TEMPORARY_REDIRECT: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) - return RunningDynamicServiceDetails.parse_obj( - RunningDynamicServiceDetails.Config.schema_extra["examples"][0] + return RunningDynamicServiceDetails.model_validate( + RunningDynamicServiceDetails.model_json_schema()["examples"][0] ) module_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler" @@ -234,17 +270,19 @@ def get_stack_status(node_uuid: NodeID) -> RunningDynamicServiceDetails: ) # MOCKING remove_service - def remove_service(node_uuid: NodeID, can_save: Optional[bool]) -> None: + def remove_service(node_uuid: NodeID, *ars: Any, **kwargs: Any) -> None: if exp_status_code == status.HTTP_307_TEMPORARY_REDIRECT: - raise DynamicSidecarNotFoundError(node_uuid) + raise DynamicSidecarNotFoundError(node_uuid=node_uuid) mocker.patch( f"{module_base}._task.DynamicSidecarsScheduler.mark_service_for_removal", + autospec=True, side_effect=remove_service, ) mocker.patch( - f"{module_base}._core._scheduler.Scheduler._discover_running_services", + f"{module_base}._core._scheduler_utils.discover_running_services", + autospec=True, return_value=None, ) @@ -254,8 +292,8 @@ def remove_service(node_uuid: NodeID, can_save: Optional[bool]) -> None: [ pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][0], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0], exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT, is_legacy=True, ), @@ -263,8 +301,8 @@ def remove_service(node_uuid: NodeID, can_save: Optional[bool]) -> None: ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][1], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1], exp_status_code=status.HTTP_201_CREATED, is_legacy=False, ), @@ -272,8 +310,8 @@ def remove_service(node_uuid: NodeID, can_save: Optional[bool]) -> None: ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][2], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2], exp_status_code=status.HTTP_201_CREATED, is_legacy=False, ), @@ -284,6 +322,7 @@ def remove_service(node_uuid: NodeID, can_save: Optional[bool]) -> None: def test_create_dynamic_services( minimal_config: None, mocked_director_v0_service_api: MockRouter, + mocked_catalog_service_api: MockRouter, mocked_director_v2_scheduler: None, client: TestClient, dynamic_sidecar_headers: dict[str, str], @@ -291,12 +330,13 @@ def test_create_dynamic_services( exp_status_code: int, is_legacy: bool, ): - post_data = DynamicServiceCreate.parse_obj(service) + post_data = DynamicServiceCreate.model_validate(service) response = client.post( "/v2/dynamic_services", headers=dynamic_sidecar_headers, - json=json.loads(post_data.json()), + json=json.loads(post_data.model_dump_json()), + follow_redirects=False, ) assert ( response.status_code == exp_status_code @@ -325,8 +365,8 @@ def test_create_dynamic_services( [ pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][0], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0], exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT, is_legacy=True, ), @@ -334,8 +374,8 @@ def test_create_dynamic_services( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][1], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1], exp_status_code=status.HTTP_200_OK, is_legacy=False, ), @@ -343,8 +383,8 @@ def test_create_dynamic_services( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][2], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2], exp_status_code=status.HTTP_200_OK, is_legacy=False, ), @@ -354,6 +394,7 @@ def test_create_dynamic_services( ) def test_get_service_status( mocked_director_v0_service_api: MockRouter, + mocked_catalog_service_api: MockRouter, mocked_director_v2_scheduler: None, client: TestClient, service: dict[str, Any], @@ -362,7 +403,7 @@ def test_get_service_status( ): url = URL(f"/v2/dynamic_services/{service['node_uuid']}") - response = client.get(str(url), allow_redirects=False) + response = client.get(str(url), follow_redirects=False) assert ( response.status_code == exp_status_code ), f"expected status code {exp_status_code}, received {response.status_code}: {response.text}" @@ -383,8 +424,8 @@ def test_get_service_status( [ pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][0], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0], exp_status_code=status.HTTP_307_TEMPORARY_REDIRECT, is_legacy=True, ), @@ -392,8 +433,8 @@ def test_get_service_status( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][1], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1], exp_status_code=status.HTTP_204_NO_CONTENT, is_legacy=False, ), @@ -401,8 +442,8 @@ def test_get_service_status( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][2], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2], exp_status_code=status.HTTP_204_NO_CONTENT, is_legacy=False, ), @@ -413,21 +454,24 @@ def test_get_service_status( @pytest.mark.parametrize( "can_save, exp_save_state", [(None, True), (True, True), (False, False)] ) -def test_delete_service( +def test_delete_service( # pylint:disable=too-many-arguments + docker_swarm: None, mocked_director_v0_service_api: MockRouter, + mocked_catalog_service_api: MockRouter, mocked_director_v2_scheduler: None, + mocked_service_awaits_manual_interventions: None, client: TestClient, service: dict[str, Any], exp_status_code: int, is_legacy: bool, - can_save: Optional[bool], + can_save: bool | None, exp_save_state: bool, ): url = URL(f"/v2/dynamic_services/{service['node_uuid']}") if can_save is not None: url = url.copy_with(params={"can_save": can_save}) - response = client.delete(str(url), allow_redirects=False) + response = client.delete(str(url), follow_redirects=False) assert ( response.status_code == exp_status_code ), f"expected status code {exp_status_code}, received {response.status_code}: {response.text}" @@ -443,13 +487,66 @@ def test_delete_service( assert redirect_url.params == QueryParams(can_save=exp_save_state) +@pytest.fixture +def dynamic_sidecar_scheduler(minimal_app: FastAPI) -> DynamicSidecarsScheduler: + return minimal_app.state.dynamic_sidecar_scheduler + + @pytest.mark.parametrize( "service, service_labels, exp_status_code, is_legacy", [ pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][0], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1], + exp_status_code=status.HTTP_201_CREATED, + is_legacy=False, + ) + ), + ], +) +def test_delete_service_waiting_for_manual_intervention( + minimal_config: None, + mocked_director_v0_service_api: MockRouter, + mocked_catalog_service_api: MockRouter, + mocked_director_v2_scheduler: None, + client: TestClient, + dynamic_sidecar_headers: dict[str, str], + service: dict[str, Any], + exp_status_code: int, + is_legacy: bool, + dynamic_sidecar_scheduler: DynamicSidecarsScheduler, +): + post_data = DynamicServiceCreate.model_validate(service) + + response = client.post( + "/v2/dynamic_services", + headers=dynamic_sidecar_headers, + json=json.loads(post_data.model_dump_json()), + ) + assert ( + response.status_code == exp_status_code + ), f"expected status code {exp_status_code}, received {response.status_code}: {response.text}" + + # mark service as failed and waiting for human intervention + node_uuid = UUID(service["node_uuid"]) + scheduler_data = dynamic_sidecar_scheduler.scheduler.get_scheduler_data(node_uuid) + scheduler_data.dynamic_sidecar.status.update_failing_status("failed") + scheduler_data.dynamic_sidecar.wait_for_manual_intervention_after_error = True + + # check response + url = URL(f"/v2/dynamic_services/{node_uuid}") + stop_response = client.delete(str(url), follow_redirects=False) + assert stop_response.json()["errors"][0] == "waiting_for_intervention" + + +@pytest.mark.parametrize( + "service, service_labels, exp_status_code, is_legacy", + [ + pytest.param( + *ServiceParams( + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][0], exp_status_code=status.HTTP_200_OK, is_legacy=True, ), @@ -457,8 +554,8 @@ def test_delete_service( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][1], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][1], exp_status_code=status.HTTP_200_OK, is_legacy=False, ), @@ -466,8 +563,8 @@ def test_delete_service( ), pytest.param( *ServiceParams( - service=DynamicServiceCreate.Config.schema_extra["example"], - service_labels=SimcoreServiceLabels.Config.schema_extra["examples"][2], + service=DynamicServiceCreate.model_json_schema()["example"], + service_labels=SimcoreServiceLabels.model_json_schema()["examples"][2], exp_status_code=status.HTTP_200_OK, is_legacy=False, ), @@ -477,8 +574,9 @@ def test_delete_service( ) def test_retrieve( minimal_config: None, - mock_retrieve_features: Optional[MockRouter], + mock_retrieve_features: MockRouter | None, mocked_director_v0_service_api: MockRouter, + mocked_catalog_service_api: MockRouter, mocked_director_v2_scheduler: None, client: TestClient, service: dict[str, Any], @@ -486,10 +584,153 @@ def test_retrieve( is_legacy: bool, ) -> None: url = URL(f"/v2/dynamic_services/{service['node_uuid']}:retrieve") - response = client.post(str(url), json=dict(port_keys=[]), allow_redirects=False) + response = client.post(str(url), json={"port_keys": []}, follow_redirects=False) assert ( response.status_code == exp_status_code ), f"expected status code {exp_status_code}, received {response.status_code}: {response.text}" assert ( - response.json() == RetrieveDataOutEnveloped.Config.schema_extra["examples"][0] + response.json() == RetrieveDataOutEnveloped.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def mock_internals_inactivity( + mocker: MockerFixture, + faker: Faker, + services_activity: list[ActivityInfoOrNone], +): + module_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler" + mocker.patch( + f"{module_base}._core._scheduler_utils.get_dynamic_sidecars_to_observe", + return_value=[], + ) + + service_inactivity_map: dict[str, ActivityInfoOrNone] = { + faker.uuid4(): s for s in services_activity + } + + mock_project = Mock() + mock_project.workbench = list(service_inactivity_map.keys()) + + class MockProjectRepo: + async def get_project(self, _: ProjectID) -> ProjectAtDB: + return mock_project + + # patch get_project + mocker.patch( + "simcore_service_director_v2.api.dependencies.database.get_base_repository", + return_value=MockProjectRepo(), + ) + + async def get_service_activity(node_uuid: NodeID) -> ActivityInfoOrNone: + return service_inactivity_map[f"{node_uuid}"] + + mocker.patch( + f"{module_base}.DynamicSidecarsScheduler.get_service_activity", + side_effect=get_service_activity, + ) + mocker.patch( + f"{module_base}.DynamicSidecarsScheduler.is_service_tracked", return_value=True + ) + + +@pytest.mark.parametrize( + "services_activity, max_inactivity_seconds, is_project_inactive", + [ + *[ + pytest.param( + [ + ActivityInfo(seconds_inactive=x), + ], + 5, + False, + id=f"{x}_makes_project_active_with_threshold_5", + ) + for x in [*range(5)] + ], + pytest.param( + [ + ActivityInfo(seconds_inactive=6), + ], + 5, + True, + id="single_new_style_inactive", + ), + pytest.param( + [ + ActivityInfo(seconds_inactive=4), + ], + 5, + False, + id="single_new_style_not_yet_inactive", + ), + pytest.param( + [ + ActivityInfo(seconds_inactive=0), + ], + 5, + False, + id="single_new_style_active", + ), + pytest.param( + [ + ActivityInfo(seconds_inactive=6), + ActivityInfo(seconds_inactive=1), + ActivityInfo(seconds_inactive=0), + ], + 5, + False, + id="active_services_make_project_active", + ), + pytest.param( + [ + ActivityInfo(seconds_inactive=6), + ActivityInfo(seconds_inactive=6), + ], + 5, + True, + id="all_services_inactive", + ), + pytest.param( + [], + 5, + True, + id="no_services_in_project_it_results_inactive", + ), + pytest.param( + [ + None, + ], + 5, + True, + id="without_inactivity_support_considered_as_inactive", + ), + pytest.param( + [ + None, + ActivityInfo(seconds_inactive=6), + None, + ActivityInfo(seconds_inactive=6), + ], + 5, + True, + id="mix_without_inactivity_support_and_inactive_considered_inactive", + ), + ], +) +def test_get_project_inactivity( + mock_internals_inactivity: None, + mocker: MockerFixture, + client: TestClient, + is_project_inactive: bool, + max_inactivity_seconds: float, + faker: Faker, +): + url = URL(f"/v2/dynamic_services/projects/{faker.uuid4()}/inactivity") + response = client.get( + f"{url}", + params={"max_inactivity_seconds": max_inactivity_seconds}, + follow_redirects=False, ) + assert response.status_code == status.HTTP_200_OK + assert response.json() == {"is_inactive": is_project_inactive} diff --git a/services/director-v2/tests/unit/with_dbs/test_cli.py b/services/director-v2/tests/unit/with_dbs/test_cli.py index ddc83db9610..813bd93aa07 100644 --- a/services/director-v2/tests/unit/with_dbs/test_cli.py +++ b/services/director-v2/tests/unit/with_dbs/test_cli.py @@ -4,14 +4,20 @@ import os import re import traceback +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable from contextlib import asynccontextmanager -from typing import Any, AsyncIterable, AsyncIterator, Callable, Optional +from typing import Any +from unittest.mock import AsyncMock import pytest import respx from click.testing import Result from faker import Faker from fastapi import status +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) from models_library.projects import ProjectAtDB from models_library.projects_nodes_io import NodeID from pytest_mock.plugin import MockerFixture @@ -21,12 +27,6 @@ from simcore_service_director_v2.cli._close_and_save_service import ( ThinDV2LocalhostClient, ) -from simcore_service_director_v2.models.domains.dynamic_services import ( - DynamicServiceGet, -) -from simcore_service_director_v2.models.schemas.dynamic_services import ( - RunningDynamicServiceDetails, -) from typer.testing import CliRunner pytest_simcore_core_services_selection = [ @@ -42,15 +42,16 @@ def minimal_configuration( mock_env: EnvVarsDict, postgres_host_config: dict[str, str], monkeypatch: pytest.MonkeyPatch, + faker: Faker, ): monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) @pytest.fixture @@ -59,13 +60,13 @@ def cli_runner(minimal_configuration: None) -> CliRunner: @pytest.fixture -def project_at_db( - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], +async def project_at_db( + create_registered_user: Callable[..., dict[str, Any]], + project: Callable[..., Awaitable[ProjectAtDB]], fake_workbench_without_outputs: dict[str, Any], ) -> ProjectAtDB: - user = registered_user() - return project(user, workbench=fake_workbench_without_outputs) + user = create_registered_user() + return await project(user, workbench=fake_workbench_without_outputs) @pytest.fixture @@ -79,18 +80,20 @@ def mock_requires_dynamic_sidecar(mocker: MockerFixture) -> None: @pytest.fixture def mock_save_service_state(mocker: MockerFixture) -> None: mocker.patch( - "simcore_service_director_v2.modules.dynamic_sidecar.api_client._public.DynamicSidecarClient.save_service_state", + "simcore_service_director_v2.modules.dynamic_sidecar.api_client._public.SidecarsClient.save_service_state", spec=True, + return_value=0, ) @pytest.fixture def mock_save_service_state_as_failing(mocker: MockerFixture) -> None: async def _always_raise(*args, **kwargs) -> None: - raise Exception("I AM FAILING NOW") # pylint: disable=broad-exception-raised + msg = "I AM FAILING NOW" + raise Exception(msg) # pylint: disable=broad-exception-raised # noqa: TRY002 mocker.patch( - "simcore_service_director_v2.modules.dynamic_sidecar.api_client._public.DynamicSidecarClient.save_service_state", + "simcore_service_director_v2.modules.dynamic_sidecar.api_client._public.SidecarsClient.save_service_state", side_effect=_always_raise, ) @@ -104,8 +107,8 @@ def node_id(faker: Faker) -> NodeID: def mock_get_node_state(mocker: MockerFixture) -> None: mocker.patch( "simcore_service_director_v2.cli._core._get_dy_service_state", - return_value=DynamicServiceGet.parse_obj( - RunningDynamicServiceDetails.Config.schema_extra["examples"][0] + return_value=DynamicServiceGet.model_validate( + RunningDynamicServiceDetails.model_json_schema()["examples"][0] ), ) @@ -115,6 +118,14 @@ def task_id(faker: Faker) -> str: return f"tas_id.{faker.uuid4()}" +@pytest.fixture +def mock_catalog_instance(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_director_v2.modules.catalog.CatalogClient.instance", + return_value=AsyncMock(), + ) + + @pytest.fixture async def mock_close_service_routes( mocker: MockerFixture, task_id: str @@ -134,6 +145,9 @@ async def mock_close_service_routes( respx_mock.post( re.compile(f"{regex_base}/state:save"), name="save_service_state" ).respond(status_code=status.HTTP_202_ACCEPTED, json=task_id) + respx_mock.get( + re.compile(f"{regex_base}/state"), name="service_internal_state" + ).respond(status_code=status.HTTP_200_OK) respx_mock.post( re.compile(f"{regex_base}/outputs:push"), name="push_service_outputs" ).respond(status_code=status.HTTP_202_ACCEPTED, json=task_id) @@ -141,6 +155,10 @@ async def mock_close_service_routes( re.compile(f"{regex_base}/docker-resources"), name="delete_service_docker_resources", ).respond(status_code=status.HTTP_202_ACCEPTED, json=task_id) + respx_mock.post( + re.compile(f"{regex_base}/disk/reserved:free"), + name="free_reserved_disk_space", + ).respond(status_code=status.HTTP_204_NO_CONTENT) @asynccontextmanager async def _mocked_context_manger( @@ -148,7 +166,7 @@ async def _mocked_context_manger( task_id: Any, *, task_timeout: Any, - progress_callback: Optional[ProgressCallback] = None, + progress_callback: ProgressCallback | None = None, status_poll_interval: Any = 5, ) -> AsyncIterator[None]: assert progress_callback @@ -170,32 +188,49 @@ def _format_cli_error(result: Result) -> str: def test_project_save_state_ok( + mock_catalog_instance: None, mock_requires_dynamic_sidecar: None, mock_save_service_state: None, cli_runner: CliRunner, project_at_db: ProjectAtDB, + capsys: pytest.CaptureFixture, ): - result = cli_runner.invoke(main, ["project-save-state", f"{project_at_db.uuid}"]) + with capsys.disabled() as _disabled: + # NOTE: without this, the test does not pass see https://github.com/pallets/click/issues/824 + # also see this https://github.com/Stranger6667/pytest-click/issues/27 when using log-cli-level=DEBUG + result = cli_runner.invoke( + main, ["project-save-state", f"{project_at_db.uuid}"] + ) print(result.stdout) assert result.exit_code == os.EX_OK, _format_cli_error(result) assert result.stdout.endswith(f"Save complete for project {project_at_db.uuid}\n") for node_uuid, node_content in project_at_db.workbench.items(): assert f"Saving state for {node_uuid} {node_content.label}" in result.stdout - assert f"Saving project '{project_at_db.uuid}' - '{project_at_db.name}'" + assert ( + f"Saving project '{project_at_db.uuid}' - '{project_at_db.name}'" + in result.stdout + ) def test_project_save_state_retry_3_times_and_fails( + mock_catalog_instance: None, mock_requires_dynamic_sidecar: None, mock_save_service_state_as_failing: None, cli_runner: CliRunner, project_at_db: ProjectAtDB, + capsys: pytest.CaptureFixture, ): - result = cli_runner.invoke(main, ["project-save-state", f"{project_at_db.uuid}"]) + with capsys.disabled() as _disabled: + # NOTE: without this, the test does not pass see https://github.com/pallets/click/issues/824 + # also see this https://github.com/Stranger6667/pytest-click/issues/27 when using log-cli-level=DEBUG + result = cli_runner.invoke( + main, ["project-save-state", f"{project_at_db.uuid}"] + ) print(result.stdout) assert result.exit_code == 1, _format_cli_error(result) assert "The following nodes failed to save:" in result.stdout - for node_uuid in project_at_db.workbench.keys(): + for node_uuid in project_at_db.workbench: assert ( result.stdout.count(f"Attempting to save {node_uuid}") == DEFAULT_NODE_SAVE_ATTEMPTS @@ -220,3 +255,25 @@ def test_close_and_save_service( result = cli_runner.invoke(main, ["close-and-save-service", f"{node_id}"]) assert result.exit_code == os.EX_OK, _format_cli_error(result) print(result.stdout) + + +def test_service_state( + mock_close_service_routes: None, cli_runner: CliRunner, node_id: NodeID +): + result = cli_runner.invoke(main, ["service-state", f"{node_id}"]) + assert result.exit_code == os.EX_OK, _format_cli_error(result) + print(result.stdout) + + +def test_free_reserved_disk_space( + mock_close_service_routes: None, cli_runner: CliRunner, node_id: NodeID +): + result = cli_runner.invoke(main, ["free-reserved-disk-space", f"{node_id}"]) + assert result.exit_code == os.EX_OK, _format_cli_error(result) + print(result.stdout) + + +def test_osparc_variables(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["osparc-variables"]) + assert result.exit_code == os.EX_OK, _format_cli_error(result) + print(result.stdout) diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py b/services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py deleted file mode 100644 index b8f074580d5..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_modules_comp_scheduler_dask_scheduler.py +++ /dev/null @@ -1,724 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=no-value-for-parameter -# pylint:disable=protected-access -# pylint:disable=too-many-arguments -# pylint:disable=no-name-in-module -# pylint: disable=too-many-statements - - -from dataclasses import dataclass -from typing import Any, Callable, Iterator, Union -from unittest import mock - -import aiopg -import httpx -import pytest -from _helpers import ( - PublishedProject, - RunningProject, - assert_comp_run_state, - assert_comp_tasks_state, - manually_run_comp_scheduler, - set_comp_task_state, -) -from dask.distributed import SpecCluster -from dask_task_models_library.container_tasks.errors import TaskCancelledError -from dask_task_models_library.container_tasks.io import TaskOutputData -from fastapi.applications import FastAPI -from models_library.clusters import DEFAULT_CLUSTER_ID -from models_library.projects import ProjectAtDB -from models_library.projects_state import RunningState -from pytest import MonkeyPatch -from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.typing_env import EnvVarsDict -from settings_library.rabbit import RabbitSettings -from simcore_postgres_database.models.comp_pipeline import StateType -from simcore_postgres_database.models.comp_runs import comp_runs -from simcore_postgres_database.models.comp_tasks import NodeClass -from simcore_service_director_v2.core.application import init_app -from simcore_service_director_v2.core.errors import ( - ComputationalBackendNotConnectedError, - ComputationalBackendTaskNotFoundError, - ComputationalBackendTaskResultsNotReadyError, - ComputationalSchedulerChangedError, - ConfigurationError, - PipelineNotFoundError, - SchedulerError, -) -from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.models.domains.comp_pipelines import CompPipelineAtDB -from simcore_service_director_v2.models.domains.comp_runs import CompRunsAtDB -from simcore_service_director_v2.modules.comp_scheduler import background_task -from simcore_service_director_v2.modules.comp_scheduler.base_scheduler import ( - BaseCompScheduler, -) -from simcore_service_director_v2.utils.scheduler import COMPLETED_STATES -from starlette.testclient import TestClient - -pytest_simcore_core_services_selection = ["postgres", "rabbit"] -pytest_simcore_ops_services_selection = [ - "adminer", -] - - -@pytest.fixture -def minimal_dask_scheduler_config( - mock_env: EnvVarsDict, - postgres_host_config: dict[str, str], - monkeypatch: MonkeyPatch, - rabbit_service: RabbitSettings, -) -> None: - """set a minimal configuration for testing the dask connection only""" - monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SIDECAR_ENABLED", "false") - monkeypatch.setenv("DIRECTOR_V0_ENABLED", "0") - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") - monkeypatch.setenv("COMPUTATIONAL_BACKEND_ENABLED", "1") - monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") - - -@pytest.fixture -def scheduler( - minimal_dask_scheduler_config: None, - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - # dask_spec_local_cluster: SpecCluster, - minimal_app: FastAPI, -) -> BaseCompScheduler: - assert minimal_app.state.scheduler is not None - return minimal_app.state.scheduler - - -@pytest.fixture -def mocked_dask_client(mocker: MockerFixture) -> mock.MagicMock: - mocked_dask_client = mocker.patch( - "simcore_service_director_v2.modules.dask_clients_pool.DaskClient", - autospec=True, - ) - mocked_dask_client.create.return_value = mocked_dask_client - return mocked_dask_client - - -@pytest.fixture -def mocked_node_ports(mocker: MockerFixture): - mocker.patch( - "simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.parse_output_data", - return_value=None, - autospec=True, - ) - - -@pytest.fixture -def mocked_clean_task_output_fct(mocker: MockerFixture) -> mock.MagicMock: - return mocker.patch( - "simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.clean_task_output_and_log_files_if_invalid", - return_value=None, - autospec=True, - ) - - -@pytest.fixture -def mocked_scheduler_task(mocker: MockerFixture) -> None: - """disables the scheduler task, note that it needs to be triggered manually then""" - mocker.patch.object(background_task, "scheduler_task") - - -@pytest.fixture -async def minimal_app(async_client: httpx.AsyncClient) -> FastAPI: - # must use the minimal app from from the `async_client`` - # the`client` uses starlette's TestClient which spawns - # a new thread on which it creates a new loop - # causing issues downstream with coroutines not - # being created on the same loop - return async_client._transport.app - - -async def test_scheduler_gracefully_starts_and_stops( - minimal_dask_scheduler_config: None, - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - dask_spec_local_cluster: SpecCluster, - minimal_app: FastAPI, -): - # check it started correctly - assert minimal_app.state.scheduler_task is not None - - -@pytest.mark.parametrize( - "missing_dependency", - [ - "DIRECTOR_V2_POSTGRES_ENABLED", - "COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", - ], -) -def test_scheduler_raises_exception_for_missing_dependencies( - minimal_dask_scheduler_config: None, - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - dask_spec_local_cluster: SpecCluster, - monkeypatch: MonkeyPatch, - missing_dependency: str, -): - # disable the dependency - monkeypatch.setenv(missing_dependency, "0") - # create the client - settings = AppSettings.create_from_envs() - app = init_app(settings) - - with pytest.raises(ConfigurationError): - with TestClient(app, raise_server_exceptions=True) as _: - pass - - -async def test_empty_pipeline_is_not_scheduled( - mocked_scheduler_task: None, - scheduler: BaseCompScheduler, - minimal_app: FastAPI, - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore -): - user = registered_user() - empty_project = project(user) - - # the project is not in the comp_pipeline, therefore scheduling it should fail - with pytest.raises(PipelineNotFoundError): - await scheduler.run_new_pipeline( - user_id=user["id"], - project_id=empty_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - ) - # create the empty pipeline now - _empty_pipeline = pipeline(project_id=f"{empty_project.uuid}") - - # creating a run with an empty pipeline is useless, check the scheduler is not kicking in - await scheduler.run_new_pipeline( - user_id=user["id"], - project_id=empty_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - ) - assert len(scheduler.scheduled_pipelines) == 0 - assert ( - scheduler.wake_up_event.is_set() == False - ), "the scheduler was woken up on an empty pipeline!" - # check the database is empty - async with aiopg_engine.acquire() as conn: # type: ignore - result = await conn.scalar( - comp_runs.select().where( - (comp_runs.c.user_id == user["id"]) - & (comp_runs.c.project_uuid == f"{empty_project.uuid}") - ) # there is only one entry - ) - assert result == None - - -async def test_misconfigured_pipeline_is_not_scheduled( - mocked_scheduler_task: None, - scheduler: BaseCompScheduler, - minimal_app: FastAPI, - registered_user: Callable[..., dict[str, Any]], - project: Callable[..., ProjectAtDB], - pipeline: Callable[..., CompPipelineAtDB], - fake_workbench_without_outputs: dict[str, Any], - fake_workbench_adjacency: dict[str, Any], - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore -): - """A pipeline which comp_tasks are missing should not be scheduled. - It shall be aborted and shown as such in the comp_runs db""" - user = registered_user() - sleepers_project = project(user, workbench=fake_workbench_without_outputs) - sleepers_pipeline = pipeline( - project_id=f"{sleepers_project.uuid}", - dag_adjacency_list=fake_workbench_adjacency, - ) - # check the pipeline is correctly added to the scheduled pipelines - await scheduler.run_new_pipeline( - user_id=user["id"], - project_id=sleepers_project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - ) - assert len(scheduler.scheduled_pipelines) == 1 - assert ( - scheduler.wake_up_event.is_set() == True - ), "the scheduler was NOT woken up on the scheduled pipeline!" - for (u_id, p_id, it), params in scheduler.scheduled_pipelines.items(): - assert u_id == user["id"] - assert p_id == sleepers_project.uuid - assert it > 0 - assert params.mark_for_cancellation == False - # check the database was properly updated - async with aiopg_engine.acquire() as conn: # type: ignore - result = await conn.execute( - comp_runs.select().where( - (comp_runs.c.user_id == user["id"]) - & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}") - ) # there is only one entry - ) - run_entry = CompRunsAtDB.parse_obj(await result.first()) - assert run_entry.result == RunningState.PUBLISHED - # let the scheduler kick in - await manually_run_comp_scheduler(scheduler) - # check the scheduled pipelines is again empty since it's misconfigured - assert len(scheduler.scheduled_pipelines) == 0 - # check the database entry is correctly updated - async with aiopg_engine.acquire() as conn: # type: ignore - result = await conn.execute( - comp_runs.select().where( - (comp_runs.c.user_id == user["id"]) - & (comp_runs.c.project_uuid == f"{sleepers_project.uuid}") - ) # there is only one entry - ) - run_entry = CompRunsAtDB.parse_obj(await result.first()) - assert run_entry.result == RunningState.ABORTED - - -async def test_proper_pipeline_is_scheduled( - mocked_scheduler_task: None, - mocked_dask_client: mock.MagicMock, - scheduler: BaseCompScheduler, - minimal_app: FastAPI, - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - published_project: PublishedProject, -): - # This calls adds starts the scheduling of a pipeline - await scheduler.run_new_pipeline( - user_id=published_project.project.prj_owner, - project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - ) - assert len(scheduler.scheduled_pipelines) == 1, "the pipeline is not scheduled!" - assert ( - scheduler.wake_up_event.is_set() == True - ), "the scheduler was NOT woken up on the scheduled pipeline!" - for (u_id, p_id, it), params in scheduler.scheduled_pipelines.items(): - assert u_id == published_project.project.prj_owner - assert p_id == published_project.project.uuid - assert it > 0 - assert params.mark_for_cancellation == False - # check the database is correctly updated, the run is published - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - exp_state=RunningState.PUBLISHED, - ) - published_tasks = [ - published_project.tasks[1], - published_project.tasks[3], - ] - # trigger the scheduler - await manually_run_comp_scheduler(scheduler) - # the client should be created here - mocked_dask_client.create.assert_called_once_with( - app=mock.ANY, - settings=mock.ANY, - endpoint=mock.ANY, - authentication=mock.ANY, - tasks_file_link_type=mock.ANY, - ) - # the tasks are set to pending, so they are ready to be taken, and the dask client is triggered - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [p.node_id for p in published_tasks], - exp_state=RunningState.PENDING, - ) - # the other tasks are published - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [p.node_id for p in published_project.tasks if p not in published_tasks], - exp_state=RunningState.PUBLISHED, - ) - - mocked_dask_client.send_computation_tasks.assert_has_calls( - calls=[ - mock.call( - published_project.project.prj_owner, - project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - tasks={f"{p.node_id}": p.image}, - callback=scheduler._wake_up_scheduler_now, - ) - for p in published_tasks - ], - any_order=True, - ) - mocked_dask_client.send_computation_tasks.reset_mock() - - # trigger the scheduler - await manually_run_comp_scheduler(scheduler) - # let the scheduler kick in, it should switch to the run state to PENDING state, to reflect the tasks states - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - exp_state=RunningState.PENDING, - ) - # no change here - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [p.node_id for p in published_tasks], - exp_state=RunningState.PENDING, - ) - mocked_dask_client.send_computation_tasks.assert_not_called() - - # change 1 task to RUNNING - running_task_id = published_tasks[0].node_id - await set_comp_task_state( - aiopg_engine, - node_id=f"{running_task_id}", - state=StateType.RUNNING, - ) - # trigger the scheduler, comp_run is now STARTED, as is the task - await manually_run_comp_scheduler(scheduler) - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.STARTED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [running_task_id], - exp_state=RunningState.STARTED, - ) - mocked_dask_client.send_computation_tasks.assert_not_called() - - # change the task to SUCCESS - await set_comp_task_state( - aiopg_engine, - node_id=f"{running_task_id}", - state=StateType.SUCCESS, - ) - # trigger the scheduler, the run state is still STARTED, the task is completed - await manually_run_comp_scheduler(scheduler) - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.STARTED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [running_task_id], - exp_state=RunningState.SUCCESS, - ) - next_published_task = published_project.tasks[2] - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [next_published_task.node_id], - exp_state=RunningState.PENDING, - ) - mocked_dask_client.send_computation_tasks.assert_called_once_with( - user_id=published_project.project.prj_owner, - project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - tasks={ - f"{next_published_task.node_id}": next_published_task.image, - }, - callback=scheduler._wake_up_scheduler_now, - ) - mocked_dask_client.send_computation_tasks.reset_mock() - - # change 1 task to RUNNING - await set_comp_task_state( - aiopg_engine, - node_id=f"{next_published_task.node_id}", - state=StateType.RUNNING, - ) - # trigger the scheduler, run state should keep to STARTED, task should be as well - await manually_run_comp_scheduler(scheduler) - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.STARTED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [next_published_task.node_id], - exp_state=RunningState.STARTED, - ) - mocked_dask_client.send_computation_tasks.assert_not_called() - - # now change the task to FAILED - await set_comp_task_state( - aiopg_engine, - node_id=f"{next_published_task.node_id}", - state=StateType.FAILED, - ) - # trigger the scheduler, it should keep to STARTED state until it finishes - await manually_run_comp_scheduler(scheduler) - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.STARTED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [next_published_task.node_id], - exp_state=RunningState.FAILED, - ) - mocked_dask_client.send_computation_tasks.assert_not_called() - - # now change the other task to SUCCESS - other_task = published_tasks[1] - await set_comp_task_state( - aiopg_engine, - node_id=f"{other_task.node_id}", - state=StateType.SUCCESS, - ) - # trigger the scheduler, it should switch to FAILED, as we are done - await manually_run_comp_scheduler(scheduler) - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.FAILED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [other_task.node_id], - exp_state=RunningState.SUCCESS, - ) - mocked_dask_client.send_computation_tasks.assert_not_called() - # the scheduled pipeline shall be removed - assert scheduler.scheduled_pipelines == {} - - -@pytest.mark.parametrize( - "backend_error", - [ - ComputationalBackendNotConnectedError(msg="faked disconnected backend"), - ComputationalSchedulerChangedError( - original_scheduler_id="some_old_scheduler_id", - current_scheduler_id="some_new_scheduler_id", - ), - ], -) -async def test_handling_of_disconnected_dask_scheduler( - mocked_scheduler_task: None, - dask_spec_local_cluster: SpecCluster, - scheduler: BaseCompScheduler, - minimal_app: FastAPI, - aiopg_engine: Iterator[aiopg.sa.engine.Engine], # type: ignore - mocker: MockerFixture, - published_project: PublishedProject, - backend_error: SchedulerError, -): - # this will create a non connected backend issue that will trigger re-connection - mocked_dask_client_send_task = mocker.patch( - "simcore_service_director_v2.modules.comp_scheduler.dask_scheduler.DaskClient.send_computation_tasks", - side_effect=backend_error, - ) - - # running the pipeline will now raise and the tasks are set back to PUBLISHED - await scheduler.run_new_pipeline( - user_id=published_project.project.prj_owner, - project_id=published_project.project.uuid, - cluster_id=DEFAULT_CLUSTER_ID, - ) - - # since there is no cluster, there is no dask-scheduler, - # the tasks shall all still be in PUBLISHED state now - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.PUBLISHED, - ) - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [t.node_id for t in published_project.tasks], - exp_state=RunningState.PUBLISHED, - ) - # on the next iteration of the pipeline it will try to re-connect - # now try to abort the tasks since we are wondering what is happening, this should auto-trigger the scheduler - await scheduler.stop_pipeline( - user_id=published_project.project.prj_owner, - project_id=published_project.project.uuid, - ) - # we ensure the scheduler was run - await manually_run_comp_scheduler(scheduler) - # after this step the tasks are marked as ABORTED - await assert_comp_tasks_state( - aiopg_engine, - published_project.project.uuid, - [ - t.node_id - for t in published_project.tasks - if t.node_class == NodeClass.COMPUTATIONAL - ], - exp_state=RunningState.ABORTED, - ) - # then we have another scheduler run - await manually_run_comp_scheduler(scheduler) - # now the run should be ABORTED - await assert_comp_run_state( - aiopg_engine, - published_project.project.prj_owner, - published_project.project.uuid, - RunningState.ABORTED, - ) - - -@dataclass -class RebootState: - task_status: RunningState - task_result: Union[Exception, TaskOutputData] - expected_task_state_group1: RunningState - expected_task_state_group2: RunningState - expected_run_state: RunningState - - -@pytest.mark.parametrize( - "reboot_state", - [ - pytest.param( - RebootState( - RunningState.UNKNOWN, - ComputationalBackendTaskNotFoundError(job_id="fake_job_id"), - RunningState.FAILED, - RunningState.ABORTED, - RunningState.FAILED, - ), - id="reboot with lost tasks", - ), - pytest.param( - RebootState( - RunningState.ABORTED, - TaskCancelledError(job_id="fake_job_id"), - RunningState.ABORTED, - RunningState.ABORTED, - RunningState.ABORTED, - ), - id="reboot with aborted tasks", - ), - pytest.param( - RebootState( - RunningState.FAILED, - ValueError("some error during the call"), - RunningState.FAILED, - RunningState.ABORTED, - RunningState.FAILED, - ), - id="reboot with failed tasks", - ), - pytest.param( - RebootState( - RunningState.STARTED, - ComputationalBackendTaskResultsNotReadyError(job_id="fake_job_id"), - RunningState.STARTED, - RunningState.STARTED, - RunningState.STARTED, - ), - id="reboot with running tasks", - ), - pytest.param( - RebootState( - RunningState.SUCCESS, - TaskOutputData.parse_obj({"whatever_output": 123}), - RunningState.SUCCESS, - RunningState.SUCCESS, - RunningState.SUCCESS, - ), - id="reboot with completed tasks", - ), - ], -) -async def test_handling_scheduling_after_reboot( - mocked_scheduler_task: None, - mocked_dask_client: mock.MagicMock, - aiopg_engine: aiopg.sa.engine.Engine, # type: ignore - running_project: RunningProject, - scheduler: BaseCompScheduler, - minimal_app: FastAPI, - mocked_node_ports: None, - mocked_clean_task_output_fct: mock.MagicMock, - reboot_state: RebootState, -): - """After the dask client is rebooted, or that the director-v2 reboots the scheduler - shall continue scheduling correctly. Even though the task might have continued to run - in the dask-scheduler.""" - - async def mocked_get_tasks_status(job_ids: list[str]) -> list[RunningState]: - return [reboot_state.task_status for j in job_ids] - - mocked_dask_client.get_tasks_status.side_effect = mocked_get_tasks_status - - async def mocked_get_task_result(_job_id: str) -> TaskOutputData: - if isinstance(reboot_state.task_result, Exception): - raise reboot_state.task_result - return reboot_state.task_result - - mocked_dask_client.get_task_result.side_effect = mocked_get_task_result - - await manually_run_comp_scheduler(scheduler) - # the status will be called once for all RUNNING tasks - mocked_dask_client.get_tasks_status.assert_called_once() - if reboot_state.expected_run_state in COMPLETED_STATES: - mocked_dask_client.get_task_result.assert_has_calls( - [ - mock.call(t.job_id) - for t in running_project.tasks - if t.node_class == NodeClass.COMPUTATIONAL - ], - any_order=True, - ) - else: - mocked_dask_client.get_task_result.assert_not_called() - if reboot_state.expected_run_state in [RunningState.ABORTED, RunningState.FAILED]: - # the clean up of the outputs should be done - mocked_clean_task_output_fct.assert_has_calls( - [ - mock.call( - mock.ANY, - running_project.project.prj_owner, - running_project.project.uuid, - t.node_id, - ) - for t in running_project.tasks - if t.node_class == NodeClass.COMPUTATIONAL - ], - any_order=True, - ) - else: - mocked_clean_task_output_fct.assert_not_called() - - await assert_comp_tasks_state( - aiopg_engine, - running_project.project.uuid, - [ - running_project.tasks[1].node_id, - running_project.tasks[2].node_id, - running_project.tasks[3].node_id, - ], - exp_state=reboot_state.expected_task_state_group1, - ) - await assert_comp_tasks_state( - aiopg_engine, - running_project.project.uuid, - [running_project.tasks[4].node_id], - exp_state=reboot_state.expected_task_state_group2, - ) - await assert_comp_run_state( - aiopg_engine, - running_project.project.prj_owner, - running_project.project.uuid, - exp_state=reboot_state.expected_run_state, - ) diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_groups_extra_properties.py b/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_groups_extra_properties.py index 73a303ae2cb..2ac7d6cd0df 100644 --- a/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_groups_extra_properties.py +++ b/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_groups_extra_properties.py @@ -1,14 +1,15 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument -from typing import Any, Callable, Iterator, cast +from collections.abc import Callable, Iterator +from typing import Any, cast import pytest import sqlalchemy as sa +from faker import Faker from fastapi import FastAPI -from pytest import FixtureRequest, MonkeyPatch +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict from simcore_postgres_database.models.groups import groups from simcore_postgres_database.models.groups_extra_properties import ( groups_extra_properties, @@ -29,21 +30,25 @@ @pytest.fixture def mock_env( - monkeypatch: MonkeyPatch, - postgres_host_config: dict[str, str], + monkeypatch: pytest.MonkeyPatch, mock_env: EnvVarsDict, + postgres_host_config: dict[str, str], postgres_db: sa.engine.Engine, + faker: Faker, ) -> EnvVarsDict: """overrides unit/conftest:mock_env fixture""" env_vars = mock_env.copy() env_vars.update( { - "DIRECTOR_V2_POSTGRES_ENABLED": "true", "S3_ACCESS_KEY": "12345678", "S3_BUCKET_NAME": "simcore", "S3_ENDPOINT": "http://172.17.0.1:9001", + "S3_REGION": faker.pystr(), "S3_SECRET_KEY": "12345678", - "S3_SECURE": "False", + "POSTGRES_HOST": postgres_host_config["host"], + "POSTGRES_USER": postgres_host_config["user"], + "POSTGRES_PASSWORD": postgres_host_config["password"], + "POSTGRES_DB": postgres_host_config["database"], } ) setenvs_from_dict(monkeypatch, env_vars) @@ -81,7 +86,7 @@ def creator( ) # this is needed to get the primary_gid correctly result = con.execute( - sa.select([groups_extra_properties]).where( + sa.select(groups_extra_properties).where( groups_extra_properties.c.group_id == group_id ) ) @@ -104,18 +109,18 @@ def creator( @pytest.fixture(params=[True, False]) -def with_internet_access(request: FixtureRequest) -> bool: +def with_internet_access(request: pytest.FixtureRequest) -> bool: return request.param @pytest.fixture() async def user( mock_env: EnvVarsDict, - registered_user: Callable[..., dict], + create_registered_user: Callable[..., dict], give_internet_to_group: Callable[..., dict], with_internet_access: bool, ) -> dict[str, Any]: - user = registered_user() + user = create_registered_user() group_info = give_internet_to_group( group_id=user["primary_gid"], has_internet_access=with_internet_access ) diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_projects.py b/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_projects.py index 861fa97fd46..14ff015d790 100644 --- a/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_projects.py +++ b/services/director-v2/tests/unit/with_dbs/test_modules_db_repositories_projects.py @@ -1,16 +1,18 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument -from typing import Any, Callable, cast +from collections.abc import Awaitable, Callable +from typing import Any import pytest import sqlalchemy as sa from faker import Faker from fastapi import FastAPI from models_library.projects import ProjectAtDB -from pytest import MonkeyPatch +from models_library.projects_nodes_io import NodeID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict +from simcore_postgres_database.utils_projects_nodes import ProjectNodesNodeNotFoundError from simcore_service_director_v2.modules.db.repositories.projects import ( ProjectsRepository, ) @@ -26,21 +28,25 @@ @pytest.fixture def mock_env( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, postgres_host_config: dict[str, str], mock_env: EnvVarsDict, postgres_db: sa.engine.Engine, + faker: Faker, ) -> EnvVarsDict: """overrides unit/conftest:mock_env fixture""" env_vars = mock_env.copy() env_vars.update( { - "DIRECTOR_V2_POSTGRES_ENABLED": "true", "S3_ACCESS_KEY": "12345678", "S3_BUCKET_NAME": "simcore", "S3_ENDPOINT": "http://172.17.0.1:9001", + "S3_REGION": faker.pystr(), "S3_SECRET_KEY": "12345678", - "S3_SECURE": "False", + "POSTGRES_HOST": postgres_host_config["host"], + "POSTGRES_USER": postgres_host_config["user"], + "POSTGRES_PASSWORD": postgres_host_config["password"], + "POSTGRES_DB": postgres_host_config["database"], } ) setenvs_from_dict(monkeypatch, env_vars) @@ -65,25 +71,22 @@ def workbench() -> dict[str, Any]: @pytest.fixture() async def project( mock_env: EnvVarsDict, - registered_user: Callable[..., dict], - project: Callable[..., ProjectAtDB], + create_registered_user: Callable[..., dict], + project: Callable[..., Awaitable[ProjectAtDB]], workbench: dict[str, Any], ) -> ProjectAtDB: - return project(registered_user(), workbench=workbench) + return await project(create_registered_user(), workbench=workbench) async def test_is_node_present_in_workbench( initialized_app: FastAPI, project: ProjectAtDB, faker: Faker ): - project_repository = cast( - ProjectsRepository, - get_repository(initialized_app, ProjectsRepository), - ) + project_repository = get_repository(initialized_app, ProjectsRepository) for node_uuid in project.workbench: assert ( await project_repository.is_node_present_in_workbench( - project_id=project.uuid, node_uuid=node_uuid + project_id=project.uuid, node_uuid=NodeID(node_uuid) ) is True ) @@ -105,3 +108,18 @@ async def test_is_node_present_in_workbench( ) is False ) + + +async def test_get_project_id_from_node( + initialized_app: FastAPI, project: ProjectAtDB, faker: Faker +): + project_repository = get_repository(initialized_app, ProjectsRepository) + for node_uuid in project.workbench: + assert ( + await project_repository.get_project_id_from_node(NodeID(node_uuid)) + == project.uuid + ) + + not_existing_node_id = faker.uuid4(cast_to=None) + with pytest.raises(ProjectNodesNodeNotFoundError): + await project_repository.get_project_id_from_node(not_existing_node_id) diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py index 9f04bea3a3c..06a791cc745 100644 --- a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py +++ b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_api.py @@ -3,37 +3,38 @@ # pylint: disable=protected-access import asyncio +import datetime import logging import sys -from datetime import datetime -from typing import Any, AsyncIterable, AsyncIterator, Optional +from collections.abc import AsyncIterable, AsyncIterator +from typing import Any from uuid import UUID, uuid4 import aiodocker import pytest from aiodocker.utils import clean_filters -from aiodocker.volumes import DockerVolume from faker import Faker -from fastapi.encoders import jsonable_encoder +from models_library.docker import DockerNodeID, to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID +from models_library.services_enums import ServiceState from models_library.users import UserID -from pytest import FixtureRequest, MonkeyPatch -from pytest_simcore.helpers.utils_envs import EnvVarsDict -from simcore_service_director_v2.core.settings import DynamicSidecarSettings -from simcore_service_director_v2.models.schemas.constants import ( +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_director_v2.constants import ( DYNAMIC_PROXY_SERVICE_PREFIX, DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL, DYNAMIC_SIDECAR_SERVICE_PREFIX, - DYNAMIC_VOLUME_REMOVER_PREFIX, ) -from simcore_service_director_v2.models.schemas.dynamic_services import ( - SchedulerData, - ServiceState, - ServiceType, +from simcore_service_director_v2.core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from simcore_service_director_v2.core.dynamic_services_settings.sidecar import ( + DynamicSidecarSettings, ) -from simcore_service_director_v2.models.schemas.dynamic_services.scheduler import ( +from simcore_service_director_v2.models.dynamic_services_scheduler import ( DockerContainerInspect, + SchedulerData, SimcoreServiceLabels, ) from simcore_service_director_v2.modules.dynamic_sidecar import docker_api @@ -43,16 +44,12 @@ from simcore_service_director_v2.modules.dynamic_sidecar.docker_api._utils import ( docker_client, ) -from simcore_service_director_v2.modules.dynamic_sidecar.docker_service_specs.volume_remover import ( - DockerVersion, - spec_volume_removal_service, -) from simcore_service_director_v2.modules.dynamic_sidecar.errors import ( DynamicSidecarError, GenericDockerError, ) from tenacity import TryAgain -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed @@ -69,21 +66,29 @@ ] +@pytest.fixture +def dynamic_services_scheduler_settings( + monkeypatch: pytest.MonkeyPatch, mock_env: EnvVarsDict +) -> DynamicServicesSchedulerSettings: + monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", "test_network_name") + monkeypatch.setenv("SWARM_STACK_NAME", "test_swarm_name") + return DynamicServicesSchedulerSettings.create_from_envs() + + @pytest.fixture def dynamic_sidecar_settings( - monkeypatch: MonkeyPatch, mock_env: EnvVarsDict + monkeypatch: pytest.MonkeyPatch, mock_env: EnvVarsDict, faker: Faker ) -> DynamicSidecarSettings: monkeypatch.setenv("DYNAMIC_SIDECAR_IMAGE", "local/dynamic-sidecar:MOCKED") monkeypatch.setenv("DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED", "false") monkeypatch.setenv("TRAEFIK_SIMCORE_ZONE", "test_traefik_zone") - monkeypatch.setenv("SWARM_STACK_NAME", "test_swarm_name") - monkeypatch.setenv("SIMCORE_SERVICES_NETWORK_NAME", "test_network_name") + monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) return DynamicSidecarSettings.create_from_envs() @@ -172,7 +177,7 @@ def dynamic_sidecar_service_name() -> str: @pytest.fixture def dynamic_sidecar_service_spec( dynamic_sidecar_service_name: str, - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, scheduler_data_from_http_request: SchedulerData, ) -> dict[str, Any]: # "joseluisq/static-web-server" is ~2MB docker image @@ -182,16 +187,16 @@ def dynamic_sidecar_service_spec( "name": dynamic_sidecar_service_name, "task_template": {"ContainerSpec": {"Image": "joseluisq/static-web-server"}}, "labels": { - "swarm_stack_name": f"{dynamic_sidecar_settings.SWARM_STACK_NAME}", - "uuid": f"{uuid4()}", - "service_key": "simcore/services/dynamic/3dviewer", - "service_tag": "2.4.5", - "traefik.docker.network": "", + "traefik.swarm.network": "", "io.simcore.zone": "", - "service_port": "80", - "study_id": f"{uuid4()}", - "user_id": "123", - DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: scheduler_data_from_http_request.json(), + f"{to_simcore_runtime_docker_label_key('project_id')}": f"{uuid4()}", + f"{to_simcore_runtime_docker_label_key('user_id')}": "123", + f"{to_simcore_runtime_docker_label_key('node_id')}": f"{uuid4()}", + f"{to_simcore_runtime_docker_label_key('swarm_stack_name')}": f"{dynamic_services_scheduler_settings.SWARM_STACK_NAME}", + f"{to_simcore_runtime_docker_label_key('service_port')}": "80", + f"{to_simcore_runtime_docker_label_key('service_key')}": "simcore/services/dynamic/3dviewer", + f"{to_simcore_runtime_docker_label_key('service_version')}": "2.4.5", + DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL: scheduler_data_from_http_request.model_dump_json(), }, } @@ -207,11 +212,6 @@ async def cleanup_test_dynamic_sidecar_service( ) -@pytest.fixture -def user_id(faker: Faker) -> UserID: - return faker.pyint(min_value=1) - - @pytest.fixture def project_id(faker: Faker) -> ProjectID: return ProjectID(faker.uuid4()) @@ -227,7 +227,7 @@ def dynamic_sidecar_stack_specs( node_uuid: UUID, user_id: UserID, project_id: ProjectID, - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, ) -> list[dict[str, Any]]: return [ { @@ -236,11 +236,13 @@ def dynamic_sidecar_stack_specs( "ContainerSpec": {"Image": "joseluisq/static-web-server"} }, "labels": { - "swarm_stack_name": f"{dynamic_sidecar_settings.SWARM_STACK_NAME}", - "type": f"{ServiceType.DEPENDENCY.value}", - "uuid": f"{node_uuid}", - "user_id": f"{user_id}", - "study_id": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('project_id')}": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('user_id')}": f"{user_id}", + f"{to_simcore_runtime_docker_label_key('node_id')}": f"{node_uuid}", + f"{to_simcore_runtime_docker_label_key('swarm_stack_name')}": f"{dynamic_services_scheduler_settings.SWARM_STACK_NAME}", + f"{to_simcore_runtime_docker_label_key('service_port')}": "80", + f"{to_simcore_runtime_docker_label_key('service_key')}": "simcore/services/dynamic/3dviewer", + f"{to_simcore_runtime_docker_label_key('service_version')}": "2.4.5", }, }, { @@ -249,11 +251,13 @@ def dynamic_sidecar_stack_specs( "ContainerSpec": {"Image": "joseluisq/static-web-server"} }, "labels": { - "swarm_stack_name": f"{dynamic_sidecar_settings.SWARM_STACK_NAME}", - "type": f"{ServiceType.MAIN.value}", - "uuid": f"{node_uuid}", - "user_id": f"{user_id}", - "study_id": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('project_id')}": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('user_id')}": f"{user_id}", + f"{to_simcore_runtime_docker_label_key('node_id')}": f"{node_uuid}", + f"{to_simcore_runtime_docker_label_key('swarm_stack_name')}": f"{dynamic_services_scheduler_settings.SWARM_STACK_NAME}", + f"{to_simcore_runtime_docker_label_key('service_port')}": "80", + f"{to_simcore_runtime_docker_label_key('service_key')}": "simcore/services/dynamic/3dviewer", + f"{to_simcore_runtime_docker_label_key('service_version')}": "2.4.5", }, }, ] @@ -327,16 +331,18 @@ def service_name() -> str: @pytest.fixture( params=[ - SimcoreServiceLabels.parse_obj(example) - for example in SimcoreServiceLabels.Config.schema_extra["examples"] + SimcoreServiceLabels.model_validate(example) + for example in SimcoreServiceLabels.model_json_schema()["examples"] ], ) -def labels_example(request: FixtureRequest) -> SimcoreServiceLabels: +def labels_example(request: pytest.FixtureRequest) -> SimcoreServiceLabels: return request.param -@pytest.fixture(params=[None, datetime.utcnow()]) -def time_dy_sidecar_became_unreachable(request: FixtureRequest) -> Optional[datetime]: +@pytest.fixture(params=[None, datetime.datetime.now(tz=datetime.UTC)]) +def time_dy_sidecar_became_unreachable( + request: pytest.FixtureRequest, +) -> datetime.datetime | None: return request.param @@ -344,7 +350,7 @@ def time_dy_sidecar_became_unreachable(request: FixtureRequest) -> Optional[date def mock_scheduler_data( labels_example: SimcoreServiceLabels, scheduler_data: SchedulerData, - time_dy_sidecar_became_unreachable: Optional[datetime], + time_dy_sidecar_became_unreachable: datetime.datetime | None, service_name: str, ) -> SchedulerData: # test all possible cases @@ -379,54 +385,56 @@ async def mock_service( @pytest.mark.parametrize( "simcore_services_network_name", - ("n", "network", "with_underscore", "with-dash", "with-dash_with_underscore"), + ["n", "network", "with_underscore", "with-dash", "with-dash_with_underscore"], ) def test_settings__valid_network_names( simcore_services_network_name: str, - monkeypatch: MonkeyPatch, - dynamic_sidecar_settings: DynamicSidecarSettings, + monkeypatch: pytest.MonkeyPatch, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, ) -> None: - - items = dynamic_sidecar_settings.dict() + items = dynamic_services_scheduler_settings.model_dump() items["SIMCORE_SERVICES_NETWORK_NAME"] = simcore_services_network_name # validate network names - DynamicSidecarSettings.parse_obj(items) + DynamicServicesSchedulerSettings.model_validate(items) async def test_failed_docker_client_request(docker_swarm: None): - missing_network_name = "this_network_cannot_be_found" - with pytest.raises(GenericDockerError) as execinfo: + with pytest.raises( + GenericDockerError, + match=f"Unexpected error using docker client: network {missing_network_name} not found", + ): async with docker_client() as client: await client.networks.get(missing_network_name) - assert ( - str(execinfo.value) - == f"Unexpected error from docker client: network {missing_network_name} not found" - ) async def test_get_swarm_network_ok( - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, simcore_services_network_name: str, ensure_swarm_network: None, docker_swarm: None, ): - swarm_network = await docker_api.get_swarm_network(dynamic_sidecar_settings) + swarm_network = await docker_api.get_swarm_network( + dynamic_services_scheduler_settings.SIMCORE_SERVICES_NETWORK_NAME + ) assert swarm_network["Name"] == simcore_services_network_name async def test_get_swarm_network_missing_network( - dynamic_sidecar_settings: DynamicSidecarSettings, docker_swarm: None + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, + docker_swarm: None, ): - with pytest.raises(DynamicSidecarError) as excinfo: - await docker_api.get_swarm_network(dynamic_sidecar_settings) - - assert str(excinfo.value) == ( - "Swarm network name (searching for '*test_network_name*') is not configured." - "Found following networks: []" - ) + with pytest.raises( + DynamicSidecarError, + match=r"Unexpected dynamic sidecar error: " + r"Swarm network name \(searching for \'\*test_network_name\*\'\) is not configured." + r"Found following networks: \[\]", + ): + await docker_api.get_swarm_network( + dynamic_services_scheduler_settings.SIMCORE_SERVICES_NETWORK_NAME + ) async def test_recreate_network_multiple_times( @@ -448,28 +456,10 @@ async def test_create_service( assert service_id -async def test_inspect_service( - service_spec: dict[str, Any], - cleanup_test_service_name: None, - docker_swarm: None, -): - service_id = await docker_api.create_service_and_get_id(service_spec) - assert service_id - - service_inspect = await docker_api.inspect_service(service_id) - - assert service_inspect["Spec"]["Labels"] == service_spec["labels"] - assert service_inspect["Spec"]["Name"] == service_spec["name"] - assert ( - service_inspect["Spec"]["TaskTemplate"]["ContainerSpec"]["Image"] - == service_spec["task_template"]["ContainerSpec"]["Image"] - ) - - async def test_services_to_observe_exist( dynamic_sidecar_service_name: str, dynamic_sidecar_service_spec: dict[str, Any], - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, cleanup_test_dynamic_sidecar_service: None, docker_swarm: None, ): @@ -479,7 +469,7 @@ async def test_services_to_observe_exist( assert service_id dynamic_services = await docker_api.get_dynamic_sidecars_to_observe( - dynamic_sidecar_settings + dynamic_services_scheduler_settings.SWARM_STACK_NAME ) assert len(dynamic_services) == 1 @@ -488,7 +478,7 @@ async def test_services_to_observe_exist( async def test_dynamic_sidecar_in_running_state_and_node_id_is_recovered( dynamic_sidecar_service_spec: dict[str, Any], - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, cleanup_test_dynamic_sidecar_service: None, docker_swarm: None, ): @@ -498,7 +488,7 @@ async def test_dynamic_sidecar_in_running_state_and_node_id_is_recovered( assert service_id node_id = await docker_api.get_dynamic_sidecar_placement( - service_id, dynamic_sidecar_settings + service_id, dynamic_services_scheduler_settings ) assert node_id @@ -514,7 +504,6 @@ async def test_dynamic_sidecar_get_dynamic_sidecar_sate_fail_to_schedule( cleanup_test_dynamic_sidecar_service: None, docker_swarm: None, ): - # set unachievable resource dynamic_sidecar_service_spec["task_template"]["Resources"] = { "Reservations": {"NanoCPUs": MAX_INT64, "MemoryBytes": MAX_INT64} @@ -537,14 +526,13 @@ async def test_dynamic_sidecar_get_dynamic_sidecar_sate_fail_to_schedule( async def test_is_dynamic_sidecar_stack_missing( node_uuid: UUID, - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, dynamic_sidecar_stack_specs: list[dict[str, Any]], cleanup_dynamic_sidecar_stack: None, docker_swarm: None, ): - services_are_missing = await docker_api.is_dynamic_sidecar_stack_missing( - node_uuid, dynamic_sidecar_settings + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME ) assert services_are_missing is True @@ -554,20 +542,20 @@ async def test_is_dynamic_sidecar_stack_missing( assert service_id services_are_missing = await docker_api.is_dynamic_sidecar_stack_missing( - node_uuid, dynamic_sidecar_settings + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME ) assert services_are_missing is False async def test_are_sidecar_and_proxy_services_present( node_uuid: UUID, - dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, dynamic_sidecar_stack_specs: list[dict[str, Any]], cleanup_dynamic_sidecar_stack: None, docker_swarm: None, ): services_are_missing = await docker_api.are_sidecar_and_proxy_services_present( - node_uuid, dynamic_sidecar_settings + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME ) assert services_are_missing is False @@ -577,7 +565,7 @@ async def test_are_sidecar_and_proxy_services_present( assert service_id services_are_missing = await docker_api.are_sidecar_and_proxy_services_present( - node_uuid, dynamic_sidecar_settings + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME ) assert services_are_missing is True @@ -588,6 +576,7 @@ async def test_remove_dynamic_sidecar_stack( dynamic_sidecar_stack_specs: list[dict[str, Any]], docker_swarm: None, async_docker_client: aiodocker.Docker, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, ): async def _count_services_in_stack( node_uuid: UUID, @@ -597,8 +586,8 @@ async def _count_services_in_stack( services = await async_docker_client.services.list( filters={ "label": [ - f"swarm_stack_name={dynamic_sidecar_settings.SWARM_STACK_NAME}", - f"uuid={node_uuid}", + f"{to_simcore_runtime_docker_label_key('swarm_stack_name')}={dynamic_services_scheduler_settings.SWARM_STACK_NAME}", + f"{to_simcore_runtime_docker_label_key('node_id')}={node_uuid}", ] } ) @@ -625,7 +614,9 @@ async def _count_services_in_stack( == 2 ) - await docker_api.remove_dynamic_sidecar_stack(node_uuid, dynamic_sidecar_settings) + await docker_api.remove_dynamic_sidecar_stack( + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME + ) assert ( await _count_services_in_stack( @@ -658,34 +649,18 @@ async def test_remove_dynamic_sidecar_network_fails( assert delete_result is False -async def test_list_dynamic_sidecar_services( - user_id: UserID, - project_id: ProjectID, - dynamic_sidecar_settings: DynamicSidecarSettings, - dynamic_sidecar_stack_specs: list[dict[str, Any]], - cleanup_dynamic_sidecar_stack: None, - docker_swarm: None, -): - # start 2 fake services to emulate the dynamic-sidecar stack - for dynamic_sidecar_stack in dynamic_sidecar_stack_specs: - service_id = await docker_api.create_service_and_get_id(dynamic_sidecar_stack) - assert service_id - - services = await docker_api.list_dynamic_sidecar_services( - dynamic_sidecar_settings, user_id=user_id, project_id=project_id - ) - assert len(services) == 1 - - async def test_is_sidecar_running( node_uuid: UUID, dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, dynamic_sidecar_stack_specs: list[dict[str, Any]], cleanup_dynamic_sidecar_stack: None, docker_swarm: None, ): assert ( - await docker_api.is_sidecar_running(node_uuid, dynamic_sidecar_settings) + await docker_api.is_sidecar_running( + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME + ) is False ) @@ -699,7 +674,7 @@ async def test_is_sidecar_running( ): with attempt: is_sidecar_running = await docker_api.is_sidecar_running( - node_uuid, dynamic_sidecar_settings + node_uuid, dynamic_services_scheduler_settings.SWARM_STACK_NAME ) print(f"Sidecar for service {node_uuid}: {is_sidecar_running=}") assert is_sidecar_running is True @@ -752,7 +727,7 @@ async def test_update_scheduler_data_label( # fetch stored data in labels service_inspect = await async_docker_client.services.inspect(mock_service) labels = service_inspect["Spec"]["Labels"] - scheduler_data = SchedulerData.parse_raw( + scheduler_data = SchedulerData.model_validate_json( labels[DYNAMIC_SIDECAR_SCHEDULER_DATA_LABEL] ) assert scheduler_data == mock_scheduler_data @@ -789,17 +764,16 @@ async def test_regression_update_service_update_out_of_sequence( @pytest.fixture -async def target_node_id(async_docker_client: aiodocker.Docker) -> str: +async def target_node_id(async_docker_client: aiodocker.Docker) -> DockerNodeID: # get a node's ID docker_nodes = await async_docker_client.nodes.list() - target_node_id = docker_nodes[0]["ID"] - return target_node_id + return TypeAdapter(DockerNodeID).validate_python(docker_nodes[0]["ID"]) async def test_constrain_service_to_node( async_docker_client: aiodocker.Docker, mock_service: str, - target_node_id: str, + target_node_id: DockerNodeID, docker_swarm: None, ): await docker_api.constrain_service_to_node( @@ -816,194 +790,3 @@ async def test_constrain_service_to_node( label, value = node_id_constraint.split("==") assert label.strip() == "node.id" assert value.strip() == target_node_id - - -@pytest.fixture -async def named_volumes( - async_docker_client: aiodocker.Docker, faker: Faker -) -> AsyncIterator[list[str]]: - named_volumes: list[DockerVolume] = [] - volume_names: list[str] = [] - for _ in range(10): - named_volume: DockerVolume = await async_docker_client.volumes.create( - {"Name": f"named-volume-{faker.uuid4()}"} - ) - volume_names.append(named_volume.name) - named_volumes.append(named_volume) - - yield volume_names - - # remove volume if still present - for named_volume in named_volumes: - try: - await named_volume.delete() - except aiodocker.DockerError: - pass - - -async def is_volume_present( - async_docker_client: aiodocker.Docker, volume_name: str -) -> bool: - docker_volume = DockerVolume(async_docker_client, volume_name) - try: - await docker_volume.show() - return True - except aiodocker.DockerError as e: - assert e.message == f"get {volume_name}: no such volume" - return False - - -async def test_remove_volume_from_node_ok( - docker_swarm: None, - async_docker_client: aiodocker.Docker, - named_volumes: list[str], - target_node_id: str, - user_id: UserID, - project_id: ProjectID, - node_uuid: NodeID, - dynamic_sidecar_settings: DynamicSidecarSettings, -): - for named_volume in named_volumes: - assert await is_volume_present(async_docker_client, named_volume) is True - - volume_removal_result = await docker_api.remove_volumes_from_node( - dynamic_sidecar_settings=dynamic_sidecar_settings, - volume_names=named_volumes, - docker_node_id=target_node_id, - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - ) - assert volume_removal_result is True - - for named_volume in named_volumes: - assert await is_volume_present(async_docker_client, named_volume) is False - - -async def test_remove_volume_from_node_no_volume_found( - docker_swarm: None, - async_docker_client: aiodocker.Docker, - named_volumes: list[str], - target_node_id: str, - user_id: UserID, - project_id: ProjectID, - node_uuid: NodeID, - dynamic_sidecar_settings: DynamicSidecarSettings, -): - missing_volume_name = "nope-i-am-fake-and-do-not-exist" - assert await is_volume_present(async_docker_client, missing_volume_name) is False - - # put the missing one in the middle of the sequence - volumes_to_remove = named_volumes[:1] + [missing_volume_name] + named_volumes[1:] - - volume_removal_result = await docker_api.remove_volumes_from_node( - dynamic_sidecar_settings=dynamic_sidecar_settings, - volume_names=volumes_to_remove, - docker_node_id=target_node_id, - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - volume_removal_attempts=2, - sleep_between_attempts_s=1, - ) - assert volume_removal_result is True - assert await is_volume_present(async_docker_client, missing_volume_name) is False - for named_volume in named_volumes: - assert await is_volume_present(async_docker_client, named_volume) is False - - -@pytest.fixture -def volume_removal_services_names(faker: Faker) -> set[str]: - return {f"{DYNAMIC_VOLUME_REMOVER_PREFIX}_{faker.uuid4()}" for _ in range(10)} - - -@pytest.fixture(params=[0, 2]) -def service_timeout_s(request: FixtureRequest) -> int: - return request.param # type: ignore - - -@pytest.fixture -async def ensure_fake_volume_removal_services( - async_docker_client: aiodocker.Docker, - docker_version: DockerVersion, - target_node_id: str, - user_id: UserID, - project_id: ProjectID, - node_uuid: NodeID, - volume_removal_services_names: list[str], - dynamic_sidecar_settings: DynamicSidecarSettings, - service_timeout_s: int, - docker_swarm: None, -) -> AsyncIterator[None]: - started_services_ids: list[str] = [] - - for service_name in volume_removal_services_names: - service_spec = spec_volume_removal_service( - dynamic_sidecar_settings=dynamic_sidecar_settings, - docker_node_id=target_node_id, - user_id=user_id, - project_id=project_id, - node_uuid=node_uuid, - volume_names=[], - docker_version=docker_version, - volume_removal_attempts=0, - sleep_between_attempts_s=0, - service_timeout_s=service_timeout_s, - ) - - # replace values - service_spec.Name = service_name - # use very long sleep command - service_spec.TaskTemplate.ContainerSpec.Command = ["sh", "-c", "sleep 3600"] - - started_service = await async_docker_client.services.create( - **jsonable_encoder(service_spec, by_alias=True, exclude_unset=True) - ) - started_services_ids.append(started_service["ID"]) - - yield None - - for service_id in started_services_ids: - try: - await async_docker_client.services.delete(service_id) - except aiodocker.exceptions.DockerError as e: - assert e.message == f"service {service_id} not found" - - -async def _get_pending_services(async_docker_client: aiodocker.Docker) -> list[str]: - service_filters = {"name": [f"{DYNAMIC_VOLUME_REMOVER_PREFIX}"]} - return [ - x["Spec"]["Name"] - for x in await async_docker_client.services.list(filters=service_filters) - ] - - -async def test_get_volume_removal_services( - ensure_fake_volume_removal_services: None, - async_docker_client: aiodocker.Docker, - volume_removal_services_names: set[str], - dynamic_sidecar_settings: DynamicSidecarSettings, - service_timeout_s: int, -): - # services will be detected as timed out after 1 second - sleep_for = 1.01 - await asyncio.sleep(sleep_for) - - pending_service_names = await _get_pending_services(async_docker_client) - assert len(pending_service_names) == len(volume_removal_services_names) - - # check services are present before removing timed out services - for service_name in pending_service_names: - assert service_name in volume_removal_services_names - - await docker_api.remove_pending_volume_removal_services(dynamic_sidecar_settings) - - # check that timed out services have been removed - pending_service_names = await _get_pending_services(async_docker_client) - services_have_timed_out = sleep_for > service_timeout_s - if services_have_timed_out: - assert len(pending_service_names) == 0 - else: - assert len(pending_service_names) == len(volume_removal_services_names) - for service_name in pending_service_names: - assert service_name in volume_removal_services_names diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py index 9b38e77a1d7..4618a9a9ba0 100644 --- a/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py +++ b/services/director-v2/tests/unit/with_dbs/test_modules_dynamic_sidecar_docker_service_specs.py @@ -5,35 +5,69 @@ import json from typing import Any, cast +from unittest.mock import Mock import pytest import respx +from common_library.json_serialization import json_dumps +from faker import Faker from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder from models_library.aiodocker_api import AioDockerServiceSpec +from models_library.callbacks_mapping import CallbacksMapping +from models_library.docker import ( + DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY, + to_simcore_runtime_docker_label_key, +) +from models_library.resource_tracker import HardwareInfo, PricingInfo from models_library.service_settings_labels import ( SimcoreServiceLabels, SimcoreServiceSettingsLabel, ) -from models_library.services import RunID, ServiceKeyVersion -from pytest import MonkeyPatch +from models_library.services import ServiceKeyVersion, ServiceRunID +from models_library.wallets import WalletInfo +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict -from servicelib.json_serialization import json_dumps -from simcore_service_director_v2.core.settings import DynamicSidecarSettings -from simcore_service_director_v2.models.schemas.dynamic_services import SchedulerData +from settings_library.s3 import S3Settings +from simcore_service_director_v2.core.dynamic_services_settings.scheduler import ( + DynamicServicesSchedulerSettings, +) +from simcore_service_director_v2.core.dynamic_services_settings.sidecar import ( + DynamicSidecarSettings, +) +from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData from simcore_service_director_v2.modules.catalog import CatalogClient +from simcore_service_director_v2.modules.db.repositories.groups_extra_properties import ( + UserExtraProperties, +) from simcore_service_director_v2.modules.dynamic_sidecar.docker_service_specs import ( get_dynamic_sidecar_spec, ) +from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._event_create_sidecars import ( + _DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS, + _merge_service_base_and_user_specs, +) from simcore_service_director_v2.utils.dict_utils import nested_update @pytest.fixture -def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> EnvVarsDict: +def mock_s3_settings() -> S3Settings: + return S3Settings.model_validate(S3Settings.model_json_schema()["examples"][0]) + + +@pytest.fixture +def mock_env( + monkeypatch: pytest.MonkeyPatch, + mock_env: EnvVarsDict, + disable_postgres: None, + mock_s3_settings: S3Settings, + faker: Faker, +) -> EnvVarsDict: """overrides unit/conftest:mock_env fixture""" env_vars = mock_env.copy() env_vars.update( { + "AWS_S3_CLI_S3": '{"S3_ACCESS_KEY":"12345678","S3_BUCKET_NAME":"simcore","S3_ENDPOINT":"http://172.17.0.1:9001","S3_REGION":"us-east-1","S3_SECRET_KEY":"12345678"}', "DYNAMIC_SIDECAR_IMAGE": "local/dynamic-sidecar:MOCK", "LOG_LEVEL": "DEBUG", "POSTGRES_DB": "test", @@ -42,26 +76,22 @@ def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> EnvVarsDict: "POSTGRES_PASSWORD": "test", "POSTGRES_PORT": "5432", "POSTGRES_USER": "test", - "R_CLONE_ENABLED": "False", "R_CLONE_PROVIDER": "MINIO", "RABBIT_HOST": "rabbit", "RABBIT_PASSWORD": "adminadmin", "RABBIT_PORT": "5672", "RABBIT_USER": "admin", + "RABBIT_SECURE": "false", "REGISTRY_AUTH": "false", "REGISTRY_PW": "test", "REGISTRY_SSL": "false", "REGISTRY_URL": "foo.bar.com", "REGISTRY_USER": "test", - "S3_ACCESS_KEY": "12345678", - "S3_BUCKET_NAME": "simcore", - "S3_ENDPOINT": "http://172.17.0.1:9001", - "S3_SECRET_KEY": "12345678", - "S3_SECURE": "False", "SC_BOOT_MODE": "production", "SIMCORE_SERVICES_NETWORK_NAME": "simcore_services_network_name", "SWARM_STACK_NAME": "test_swarm_name", "TRAEFIK_SIMCORE_ZONE": "test_traefik_zone", + **jsonable_encoder(mock_s3_settings, exclude_none=True), } ) setenvs_from_dict(monkeypatch, env_vars) @@ -73,6 +103,13 @@ def dynamic_sidecar_settings(mock_env: dict[str, str]) -> DynamicSidecarSettings return DynamicSidecarSettings.create_from_envs() +@pytest.fixture +def dynamic_services_scheduler_settings( + mock_env: dict[str, str], +) -> DynamicServicesSchedulerSettings: + return DynamicServicesSchedulerSettings.create_from_envs() + + @pytest.fixture def swarm_network_id() -> str: return "mocked_swarm_network_id" @@ -81,32 +118,42 @@ def swarm_network_id() -> str: @pytest.fixture def simcore_service_labels() -> SimcoreServiceLabels: # overwrites global fixture - return SimcoreServiceLabels.parse_obj( - SimcoreServiceLabels.Config.schema_extra["examples"][2] + return SimcoreServiceLabels.model_validate( + SimcoreServiceLabels.model_json_schema()["examples"][2] ) +@pytest.fixture +def hardware_info() -> HardwareInfo: + return HardwareInfo.model_validate(HardwareInfo.model_json_schema()["examples"][0]) + + @pytest.fixture def expected_dynamic_sidecar_spec( - run_id: RunID, osparc_product_name: str, request_simcore_user_agent: str + service_run_id: ServiceRunID, + osparc_product_name: str, + request_simcore_user_agent: str, + hardware_info: HardwareInfo, + faker: Faker, + mock_s3_settings: S3Settings, ) -> dict[str, Any]: return { "endpoint_spec": {}, "labels": { - "io.simcore.scheduler-data": SchedulerData.parse_obj( + "io.simcore.scheduler-data": SchedulerData.model_validate( { "compose_spec": '{"version": "2.3", "services": {"rt-web": {"image": ' '"${SIMCORE_REGISTRY}/simcore/services/dynamic/sim4life:${SERVICE_VERSION}", ' - '"init": true, "depends_on": ["s4l-core"]}, "s4l-core": ' - '{"image": ' + '"init": true, "depends_on": ["s4l-core"], "storage_opt": {"size": "10M"} }, ' + '"s4l-core": {"image": ' '"${SIMCORE_REGISTRY}/simcore/services/dynamic/s4l-core:${SERVICE_VERSION}", ' - '"runtime": "nvidia", "init": true, "environment": ' - '["DISPLAY=${DISPLAY}"], "volumes": ' + '"runtime": "nvidia", "storage_opt": {"size": "5G"}, "init": true, ' + '"environment": ["DISPLAY=${DISPLAY}"], "volumes": ' '["/tmp/.X11-unix:/tmp/.X11-unix"]}}}', "container_http_entry": "rt-web", "hostname": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "port": 1222, - "run_id": f"{run_id}", + "run_id": service_run_id, "dynamic_sidecar": { "containers_inspect": [], "dynamic_sidecar_id": None, @@ -116,7 +163,7 @@ def expected_dynamic_sidecar_spec( "is_service_environment_ready": False, "service_removal_state": { "can_remove": False, - "can_save": None, + "can_save": True, "was_removed": False, }, "status": {"current": "ok", "info": ""}, @@ -131,18 +178,25 @@ def expected_dynamic_sidecar_spec( "key": "simcore/services/dynamic/3dviewer", "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "paths_mapping": { - "inputs_path": "/tmp/inputs", - "outputs_path": "/tmp/outputs", - "state_exclude": ["/tmp/strip_me/*", "*.py"], - "state_paths": ["/tmp/save_1", "/tmp_save_2"], + "inputs_path": "/tmp/inputs", # noqa: S108 + "outputs_path": "/tmp/outputs", # noqa: S108 + "state_exclude": ["/tmp/strip_me/*"], # noqa: S108 + "state_paths": ["/tmp/save_1", "/tmp_save_2"], # noqa: S108 }, + "callbacks_mapping": CallbacksMapping.model_json_schema()[ + "examples" + ][3], "product_name": osparc_product_name, + "product_api_base_url": "https://api.local/", "project_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", "proxy_service_name": "dy-proxy_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "request_dns": "test-endpoint", "request_scheme": "http", "request_simcore_user_agent": request_simcore_user_agent, "restart_policy": "on-inputs-downloaded", + "wallet_info": WalletInfo.model_json_schema()["examples"][0], + "pricing_info": PricingInfo.model_json_schema()["examples"][0], + "hardware_info": hardware_info, "service_name": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "service_port": 65534, "service_resources": { @@ -159,14 +213,16 @@ def expected_dynamic_sidecar_spec( "version": "2.4.5", } ).as_label_data(), - "port": "8888", - "service_image": "local/dynamic-sidecar:MOCK", - "service_port": "8888", - "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "swarm_stack_name": "test_swarm_name", - "type": "main-v2", - "user_id": "234", - "uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + f"{to_simcore_runtime_docker_label_key('service-key')}": "simcore/services/dynamic/3dviewer", + f"{to_simcore_runtime_docker_label_key('service-version')}": "2.4.5", + f"{to_simcore_runtime_docker_label_key('memory-limit')}": "8589934592", + f"{to_simcore_runtime_docker_label_key('cpu-limit')}": "4.0", + f"{to_simcore_runtime_docker_label_key('project-id')}": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + f"{to_simcore_runtime_docker_label_key('user-id')}": "234", + f"{to_simcore_runtime_docker_label_key('node-id')}": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + f"{to_simcore_runtime_docker_label_key('product-name')}": "osparc", + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}": "python/test", + f"{to_simcore_runtime_docker_label_key('swarm-stack-name')}": "test_swarm_name", }, "name": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "networks": [{"Target": "mocked_swarm_network_id"}], @@ -175,18 +231,44 @@ def expected_dynamic_sidecar_spec( "Env": { "DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "DY_SIDECAR_NODE_ID": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", - "DY_SIDECAR_RUN_ID": f"{run_id}", - "DY_SIDECAR_PATH_INPUTS": "/tmp/inputs", - "DY_SIDECAR_PATH_OUTPUTS": "/tmp/outputs", + "DY_SIDECAR_RUN_ID": service_run_id, + "DY_SIDECAR_PATH_INPUTS": "/tmp/inputs", # noqa: S108 + "DY_SIDECAR_PATH_OUTPUTS": "/tmp/outputs", # noqa: S108 "DY_SIDECAR_PROJECT_ID": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "DY_SIDECAR_STATE_EXCLUDE": json_dumps({"*.py", "/tmp/strip_me/*"}), + "DY_SIDECAR_STATE_EXCLUDE": json_dumps( + ["/tmp/strip_me/*"] # noqa: S108 + ), "DY_SIDECAR_STATE_PATHS": json_dumps( - ["/tmp/save_1", "/tmp_save_2"] + ["/tmp/save_1", "/tmp_save_2"] # noqa: S108 ), "DY_SIDECAR_USER_ID": "234", "DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS": "False", + "DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE": "True", "FORWARD_ENV_DISPLAY": ":0", + "NODE_PORTS_400_REQUEST_TIMEOUT_ATTEMPTS": "3", "DYNAMIC_SIDECAR_LOG_LEVEL": "DEBUG", + "DYNAMIC_SIDECAR_TRACING": "null", + "DY_DEPLOYMENT_REGISTRY_SETTINGS": ( + '{"REGISTRY_AUTH":false,"REGISTRY_PATH":null,' + '"REGISTRY_URL":"foo.bar.com","REGISTRY_USER":' + '"test","REGISTRY_PW":"test","REGISTRY_SSL":false}' + ), + "DY_DOCKER_HUB_REGISTRY_SETTINGS": "null", + "DY_SIDECAR_AWS_S3_CLI_SETTINGS": ( + '{"AWS_S3_CLI_S3":{"S3_ACCESS_KEY":"12345678","S3_BUCKET_NAME":"simcore",' + '"S3_ENDPOINT":"http://172.17.0.1:9001/","S3_REGION":"us-east-1","S3_SECRET_KEY":"12345678"}}' + ), + "DY_SIDECAR_CALLBACKS_MAPPING": ( + '{"metrics":{"service":"rt-web","command":"ls","timeout":1.0},"before_shutdown"' + ':[{"service":"rt-web","command":"ls","timeout":1.0},{"service":"s4l-core",' + '"command":["ls","-lah"],"timeout":1.0}],"inactivity":null}' + ), + "DY_SIDECAR_SERVICE_KEY": "simcore/services/dynamic/3dviewer", + "DY_SIDECAR_SERVICE_VERSION": "2.4.5", + "DY_SIDECAR_PRODUCT_NAME": osparc_product_name, + "DY_SIDECAR_USER_PREFERENCES_PATH": "None", + "DY_SIDECAR_LEGACY_STATE": "null", + "DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED": "True", "POSTGRES_DB": "test", "POSTGRES_HOST": "localhost", "POSTGRES_PORT": "5432", @@ -197,34 +279,34 @@ def expected_dynamic_sidecar_spec( "RABBIT_PASSWORD": "adminadmin", "RABBIT_PORT": "5672", "RABBIT_USER": "admin", - "REGISTRY_AUTH": "False", - "REGISTRY_PATH": "None", - "REGISTRY_PW": "test", - "REGISTRY_SSL": "False", - "REGISTRY_URL": "foo.bar.com", - "REGISTRY_USER": "test", + "RABBIT_SECURE": "False", + "R_CLONE_OPTION_BUFFER_SIZE": "16M", + "R_CLONE_OPTION_RETRIES": "3", + "R_CLONE_OPTION_TRANSFERS": "5", "R_CLONE_PROVIDER": "MINIO", - "R_CLONE_ENABLED": "False", - "S3_ACCESS_KEY": "12345678", - "S3_BUCKET_NAME": "simcore", - "S3_ENDPOINT": "http://172.17.0.1:9001", - "S3_SECRET_KEY": "12345678", - "S3_SECURE": "False", "SC_BOOT_MODE": "production", "SIMCORE_HOST_NAME": "dy-sidecar_75c7f3f4-18f9-4678-8610-54a2ade78eaa", "SSL_CERT_FILE": "", + "STORAGE_USERNAME": "null", "STORAGE_HOST": "storage", + "STORAGE_PASSWORD": "null", + "STORAGE_SECURE": "0", "STORAGE_PORT": "8080", + **jsonable_encoder(mock_s3_settings, exclude_unset=True), }, + "CapabilityAdd": None, "Hosts": [], "Image": "local/dynamic-sidecar:MOCK", "Init": True, "Labels": { - "mem_limit": "8589934592", - "nano_cpus_limit": "4000000000", - "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "user_id": "234", - "uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + f"{to_simcore_runtime_docker_label_key('memory-limit')}": "8589934592", + f"{to_simcore_runtime_docker_label_key('cpu-limit')}": "4.0", + f"{to_simcore_runtime_docker_label_key('project-id')}": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", + f"{to_simcore_runtime_docker_label_key('user-id')}": "234", + f"{to_simcore_runtime_docker_label_key('node-id')}": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", + f"{to_simcore_runtime_docker_label_key('product-name')}": "osparc", + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}": "python/test", + f"{to_simcore_runtime_docker_label_key('swarm-stack-name')}": "test_swarm_name", }, "Mounts": [ { @@ -233,89 +315,95 @@ def expected_dynamic_sidecar_spec( "Type": "bind", }, { - "Source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_erots-derahs_", + "Source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_erots-derahs_", "Target": "/dy-volumes/shared-store", "Type": "volume", "VolumeOptions": { + "DriverConfig": None, "Labels": { "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "run_id": f"{run_id}", - "source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_erots-derahs_", + "run_id": service_run_id, + "source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_erots-derahs_", "swarm_stack_name": "test_swarm_name", "user_id": "234", - } + }, }, }, { "Target": "/dy-volumes/tmp/inputs", - "Source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stupni_pmt_", + "Source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stupni_pmt_", "Type": "volume", "VolumeOptions": { "Labels": { "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "run_id": f"{run_id}", - "source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stupni_pmt_", + "run_id": service_run_id, + "source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stupni_pmt_", "swarm_stack_name": "test_swarm_name", "user_id": "234", - } + }, }, }, { "Target": "/dy-volumes/tmp/outputs", - "Source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stuptuo_pmt_", + "Source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stuptuo_pmt_", "Type": "volume", "VolumeOptions": { "Labels": { "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "run_id": f"{run_id}", - "source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stuptuo_pmt_", + "run_id": service_run_id, + "source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_stuptuo_pmt_", "swarm_stack_name": "test_swarm_name", "user_id": "234", - } + }, }, }, { "Target": "/dy-volumes/tmp/save_1", - "Source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_1_evas_pmt_", + "Source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_1_evas_pmt_", "Type": "volume", "VolumeOptions": { "Labels": { "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "run_id": f"{run_id}", - "source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_1_evas_pmt_", + "run_id": service_run_id, + "source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_1_evas_pmt_", "swarm_stack_name": "test_swarm_name", "user_id": "234", - } + }, }, }, { "Target": "/dy-volumes/tmp_save_2", - "Source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_2_evas_pmt_", + "Source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_2_evas_pmt_", "Type": "volume", "VolumeOptions": { "Labels": { "node_uuid": "75c7f3f4-18f9-4678-8610-54a2ade78eaa", "study_id": "dd1d04d9-d704-4f7e-8f0f-1ca60cc771fe", - "run_id": f"{run_id}", - "source": f"dyv_{run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_2_evas_pmt_", + "run_id": service_run_id, + "source": f"dyv_{service_run_id}_75c7f3f4-18f9-4678-8610-54a2ade78eaa_2_evas_pmt_", "swarm_stack_name": "test_swarm_name", "user_id": "234", - } + }, }, }, { "ReadOnly": True, - "Source": "/tmp/.X11-unix", - "Target": "/tmp/.X11-unix", + "Source": "/tmp/.X11-unix", # noqa: S108 + "Target": "/tmp/.X11-unix", # noqa: S108 "Type": "bind", }, ], }, - "Placement": {"Constraints": ["node.platform.os == linux"]}, + "Placement": { + "Constraints": [ + f"node.labels.{DOCKER_TASK_EC2_INSTANCE_TYPE_PLACEMENT_CONSTRAINT_KEY}=={hardware_info.aws_ec2_instances[0]}", + "node.platform.os == linux", + ] + }, "Resources": { "Limits": {"MemoryBytes": 8589934592, "NanoCPUs": 4000000000}, "Reservations": { @@ -335,41 +423,52 @@ def expected_dynamic_sidecar_spec( } -def test_get_dynamic_proxy_spec( +async def test_get_dynamic_proxy_spec( mocked_catalog_service_api: respx.MockRouter, minimal_app: FastAPI, scheduler_data: SchedulerData, dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, swarm_network_id: str, simcore_service_labels: SimcoreServiceLabels, expected_dynamic_sidecar_spec: dict[str, Any], + hardware_info: HardwareInfo, ) -> None: dynamic_sidecar_spec_accumulated = None assert ( - dynamic_sidecar_settings.dict() - == minimal_app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.dict() + dynamic_sidecar_settings + == minimal_app.state.settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR ) - expected_dynamic_sidecar_spec_model = AioDockerServiceSpec.parse_obj( + + expected_dynamic_sidecar_spec_model = AioDockerServiceSpec.model_validate( expected_dynamic_sidecar_spec ) - assert expected_dynamic_sidecar_spec_model.TaskTemplate - assert expected_dynamic_sidecar_spec_model.TaskTemplate.ContainerSpec - assert expected_dynamic_sidecar_spec_model.TaskTemplate.ContainerSpec.Env + assert expected_dynamic_sidecar_spec_model.task_template + assert expected_dynamic_sidecar_spec_model.task_template.container_spec + assert expected_dynamic_sidecar_spec_model.task_template.container_spec.env for count in range(1, 11): # loop to check it does not repeat copies print(f"{count:*^50}") - dynamic_sidecar_spec: AioDockerServiceSpec = get_dynamic_sidecar_spec( + dynamic_sidecar_spec: AioDockerServiceSpec = await get_dynamic_sidecar_spec( scheduler_data=scheduler_data, dynamic_sidecar_settings=dynamic_sidecar_settings, + dynamic_services_scheduler_settings=dynamic_services_scheduler_settings, swarm_network_id=swarm_network_id, settings=cast(SimcoreServiceSettingsLabel, simcore_service_labels.settings), app_settings=minimal_app.state.settings, - allow_internet_access=False, + hardware_info=hardware_info, + has_quota_support=False, + metrics_collection_allowed=True, + user_extra_properties=UserExtraProperties( + is_internet_enabled=False, + is_telemetry_enabled=True, + is_efs_enabled=False, + ), + rpc_client=Mock(), ) - # NOTE: exclude_keys = { "Labels": True, "TaskTemplate": {"ContainerSpec": {"Env": True}}, @@ -378,46 +477,65 @@ def test_get_dynamic_proxy_spec( # NOTE: some flakiness here # state_exclude is a set and does not preserve order # when dumping to json it gets converted to a list - dynamic_sidecar_spec.TaskTemplate.ContainerSpec.Env[ + assert dynamic_sidecar_spec.task_template + assert dynamic_sidecar_spec.task_template.container_spec + assert dynamic_sidecar_spec.task_template.container_spec.env + assert dynamic_sidecar_spec.task_template.container_spec.env[ + "DY_SIDECAR_STATE_EXCLUDE" + ] + + dynamic_sidecar_spec.task_template.container_spec.env[ "DY_SIDECAR_STATE_EXCLUDE" - ] = sorted( - dynamic_sidecar_spec.TaskTemplate.ContainerSpec.Env[ - "DY_SIDECAR_STATE_EXCLUDE" - ] + ] = json.dumps( + sorted( + json.loads( + dynamic_sidecar_spec.task_template.container_spec.env[ + "DY_SIDECAR_STATE_EXCLUDE" + ] + ) + ) ) - expected_dynamic_sidecar_spec_model.TaskTemplate.ContainerSpec.Env[ + assert expected_dynamic_sidecar_spec_model.task_template.container_spec.env[ "DY_SIDECAR_STATE_EXCLUDE" - ] = sorted( - expected_dynamic_sidecar_spec_model.TaskTemplate.ContainerSpec.Env[ - "DY_SIDECAR_STATE_EXCLUDE" - ] + ] + expected_dynamic_sidecar_spec_model.task_template.container_spec.env[ + "DY_SIDECAR_STATE_EXCLUDE" + ] = json.dumps( + sorted( + json.loads( + expected_dynamic_sidecar_spec_model.task_template.container_spec.env[ + "DY_SIDECAR_STATE_EXCLUDE" + ] + ) + ) ) - assert dynamic_sidecar_spec.dict( - exclude=exclude_keys - ) == expected_dynamic_sidecar_spec_model.dict(exclude=exclude_keys) - - assert ( - dynamic_sidecar_spec.Labels.keys() - == expected_dynamic_sidecar_spec_model.Labels.keys() + assert dynamic_sidecar_spec.model_dump( + exclude=exclude_keys # type: ignore[arg-type] + ) == expected_dynamic_sidecar_spec_model.model_dump( + exclude=exclude_keys # type: ignore[arg-type] + ) + assert dynamic_sidecar_spec.labels + assert expected_dynamic_sidecar_spec_model.labels + assert sorted(dynamic_sidecar_spec.labels.keys()) == sorted( + expected_dynamic_sidecar_spec_model.labels.keys() ) assert ( - dynamic_sidecar_spec.Labels["io.simcore.scheduler-data"] - == expected_dynamic_sidecar_spec_model.Labels["io.simcore.scheduler-data"] + dynamic_sidecar_spec.labels["io.simcore.scheduler-data"] + == expected_dynamic_sidecar_spec_model.labels["io.simcore.scheduler-data"] ) - assert dynamic_sidecar_spec.Labels == expected_dynamic_sidecar_spec_model.Labels + assert dynamic_sidecar_spec.labels == expected_dynamic_sidecar_spec_model.labels dynamic_sidecar_spec_accumulated = dynamic_sidecar_spec # check reference after multiple runs assert dynamic_sidecar_spec_accumulated is not None assert ( - dynamic_sidecar_spec_accumulated.dict() - == expected_dynamic_sidecar_spec_model.dict() + dynamic_sidecar_spec_accumulated.model_dump() + == expected_dynamic_sidecar_spec_model.model_dump() ) - # TODO: finish test when working on https://github.com/ITISFoundation/osparc-simcore/issues/2454 async def test_merge_dynamic_sidecar_specs_with_user_specific_specs( @@ -425,70 +543,135 @@ async def test_merge_dynamic_sidecar_specs_with_user_specific_specs( minimal_app: FastAPI, scheduler_data: SchedulerData, dynamic_sidecar_settings: DynamicSidecarSettings, + dynamic_services_scheduler_settings: DynamicServicesSchedulerSettings, swarm_network_id: str, simcore_service_labels: SimcoreServiceLabels, expected_dynamic_sidecar_spec: dict[str, Any], mock_service_key_version: ServiceKeyVersion, + hardware_info: HardwareInfo, fake_service_specifications: dict[str, Any], ): - dynamic_sidecar_spec: AioDockerServiceSpec = get_dynamic_sidecar_spec( + dynamic_sidecar_spec: AioDockerServiceSpec = await get_dynamic_sidecar_spec( scheduler_data=scheduler_data, dynamic_sidecar_settings=dynamic_sidecar_settings, + dynamic_services_scheduler_settings=dynamic_services_scheduler_settings, swarm_network_id=swarm_network_id, settings=cast(SimcoreServiceSettingsLabel, simcore_service_labels.settings), app_settings=minimal_app.state.settings, - allow_internet_access=False, + hardware_info=hardware_info, + has_quota_support=False, + metrics_collection_allowed=True, + user_extra_properties=UserExtraProperties( + is_internet_enabled=False, + is_telemetry_enabled=True, + is_efs_enabled=False, + ), + rpc_client=Mock(), ) assert dynamic_sidecar_spec - dynamic_sidecar_spec_dict = dynamic_sidecar_spec.dict() - expected_dynamic_sidecar_spec_dict = AioDockerServiceSpec.parse_obj( + dynamic_sidecar_spec_dict = dynamic_sidecar_spec.model_dump() + expected_dynamic_sidecar_spec_dict = AioDockerServiceSpec.model_validate( expected_dynamic_sidecar_spec - ).dict() + ).model_dump() # ensure some entries are sorted the same to prevent flakyness for sorted_dict in [dynamic_sidecar_spec_dict, expected_dynamic_sidecar_spec_dict]: for key in ["DY_SIDECAR_STATE_EXCLUDE", "DY_SIDECAR_STATE_PATHS"]: # this is a json of a list assert isinstance( - sorted_dict["TaskTemplate"]["ContainerSpec"]["Env"][key], str + sorted_dict["task_template"]["container_spec"]["env"][key], str ) unsorted_list = json.loads( - sorted_dict["TaskTemplate"]["ContainerSpec"]["Env"][key] + sorted_dict["task_template"]["container_spec"]["env"][key] ) assert isinstance(unsorted_list, list) - sorted_dict["TaskTemplate"]["ContainerSpec"]["Env"][key] = json.dumps( + sorted_dict["task_template"]["container_spec"]["env"][key] = json.dumps( unsorted_list.sort() ) assert dynamic_sidecar_spec_dict == expected_dynamic_sidecar_spec_dict catalog_client = CatalogClient.instance(minimal_app) - user_service_specs: dict[ - str, Any - ] = await catalog_client.get_service_specifications( - scheduler_data.user_id, - mock_service_key_version.key, - mock_service_key_version.version, + user_service_specs: dict[str, Any] = ( + await catalog_client.get_service_specifications( + scheduler_data.user_id, + mock_service_key_version.key, + mock_service_key_version.version, + ) ) assert user_service_specs assert "sidecar" in user_service_specs - user_aiodocker_service_spec = AioDockerServiceSpec.parse_obj( + user_aiodocker_service_spec = AioDockerServiceSpec.model_validate( user_service_specs["sidecar"] ) assert user_aiodocker_service_spec - orig_dict = dynamic_sidecar_spec.dict(by_alias=True, exclude_unset=True) - user_dict = user_aiodocker_service_spec.dict(by_alias=True, exclude_unset=True) + orig_dict = dynamic_sidecar_spec.model_dump(by_alias=True, exclude_unset=True) + user_dict = user_aiodocker_service_spec.model_dump( + by_alias=True, exclude_unset=True + ) another_merged_dict = nested_update( orig_dict, user_dict, - include=( - ["labels"], - ["task_template", "Resources", "Limits"], - ["task_template", "Resources", "Reservation", "MemoryBytes"], - ["task_template", "Resources", "Reservation", "NanoCPUs"], - ["task_template", "Placement", "Constraints"], - ["task_template", "ContainerSpec", "Env"], - ["task_template", "Resources", "Reservation", "GenericResources"], - ), + include=_DYNAMIC_SIDECAR_SERVICE_EXTENDABLE_SPECS, ) assert another_merged_dict + + +def test_regression__merge_service_base_and_user_specs(): + mock_service_spec = AioDockerServiceSpec.model_validate( + {"Labels": {"l1": "false", "l0": "a"}} + ) + mock_catalog_constraints = AioDockerServiceSpec.model_validate( + { + "Labels": {"l1": "true", "l2": "a"}, + "TaskTemplate": { + "Placement": { + "Constraints": [ + "c1==true", + "c2==true", + ], + }, + "Resources": { + "Limits": {"MemoryBytes": 1, "NanoCPUs": 1}, + "Reservations": { + "GenericResources": [ + {"DiscreteResourceSpec": {"Kind": "VRAM", "Value": 1}} + ], + "MemoryBytes": 2, + "NanoCPUs": 2, + }, + }, + "ContainerSpec": { + "Env": [ + "key-1=value-1", + "key2-value2=a", + ] + }, + }, + } + ) + result = _merge_service_base_and_user_specs( + mock_service_spec, mock_catalog_constraints + ) + assert result.model_dump(by_alias=True, exclude_unset=True) == { + "Labels": {"l1": "true", "l2": "a", "l0": "a"}, + "TaskTemplate": { + "Placement": { + "Constraints": [ + "c1==true", + "c2==true", + ], + }, + "Resources": { + "Limits": {"MemoryBytes": 1, "NanoCPUs": 1}, + "Reservations": { + "GenericResources": [ + {"DiscreteResourceSpec": {"Kind": "VRAM", "Value": 1}} + ], + "MemoryBytes": 2, + "NanoCPUs": 2, + }, + }, + "ContainerSpec": {"Env": {"key-1": "value-1", "key2-value2": "a"}}, + }, + } diff --git a/services/director-v2/tests/unit/with_dbs/test_modules_redis.py b/services/director-v2/tests/unit/with_dbs/test_modules_redis.py deleted file mode 100644 index 49da188eb7a..00000000000 --- a/services/director-v2/tests/unit/with_dbs/test_modules_redis.py +++ /dev/null @@ -1,303 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=protected-access - -import asyncio -from asyncio import CancelledError, Task -from contextlib import suppress -from typing import Any, AsyncIterable, Final - -import pytest -from asgi_lifespan import LifespanManager -from faker import Faker -from fastapi import FastAPI -from pydantic import PositiveFloat -from pytest import MonkeyPatch -from redis.exceptions import LockError, LockNotOwnedError -from settings_library.redis import RedisSettings -from simcore_service_director_v2.core.errors import NodeRightsAcquireError -from simcore_service_director_v2.core.settings import AppSettings -from simcore_service_director_v2.modules import node_rights -from simcore_service_director_v2.modules.node_rights import ( - DockerNodeId, - ExtendLock, - NodeRightsManager, - ResourceName, -) - -pytest_simcore_core_services_selection = [ - "redis", -] - - -# UTILS -TEST_RESOURCE: Final[ResourceName] = "a_resource" - - -async def _assert_lock_acquired_and_released( - node_rights_manager: NodeRightsManager, - docker_node_id: DockerNodeId, - resource_name: ResourceName, - *, - sleep_before_release: PositiveFloat, -) -> ExtendLock: - async with node_rights_manager.acquire( - docker_node_id, resource_name=resource_name - ) as extend_lock: - assert await extend_lock._redis_lock.locked() is True - assert await extend_lock._redis_lock.owned() is True - - # task is running and not cancelled - assert extend_lock.task - assert extend_lock.task.done() is False - assert extend_lock.task.cancelled() is False - - await asyncio.sleep(sleep_before_release) - - # task was canceled and lock is unlocked and not owned - assert extend_lock.task is None - assert await extend_lock._redis_lock.locked() is False - assert await extend_lock._redis_lock.owned() is False - - return extend_lock - - -@pytest.fixture -async def minimal_app( - project_env_devel_environment: dict[str, Any], - redis_settings: RedisSettings, - monkeypatch: MonkeyPatch, -) -> AsyncIterable[FastAPI]: - monkeypatch.setenv("REDIS_HOST", redis_settings.REDIS_HOST) - monkeypatch.setenv("REDIS_PORT", f"{redis_settings.REDIS_PORT}") - - app = FastAPI() - - # add expected redis_settings - app.state.settings = AppSettings.create_from_envs() - - # setup redis module - node_rights.setup(app) - - async with LifespanManager(app): - yield app - - -@pytest.fixture -async def node_rights_manager(minimal_app: FastAPI) -> AsyncIterable[NodeRightsManager]: - redis_lock_manger = NodeRightsManager.instance(minimal_app) - await redis_lock_manger._redis.flushall() - yield redis_lock_manger - await redis_lock_manger._redis.flushall() - - -@pytest.fixture -def docker_node_id(faker: Faker) -> str: - return faker.uuid4() - - -async def test_redis_lock_working_as_expected( - node_rights_manager: NodeRightsManager, docker_node_id -) -> None: - lock = node_rights_manager._redis.lock(docker_node_id) - - lock_acquired = await lock.acquire(blocking=False) - assert lock_acquired - assert await lock.locked() is True - - await lock.release() - assert await lock.locked() is False - - with pytest.raises(LockError): - await lock.release() - - -async def test_redis_two_lock_instances( - node_rights_manager: NodeRightsManager, docker_node_id: DockerNodeId -) -> None: - # NOTE: this test show cases how the locks work - # you have to acquire the lock from the same istance - # in order to avoid tricky situations - - lock = node_rights_manager._redis.lock(docker_node_id) - - lock_acquired = await lock.acquire(blocking=False) - assert lock_acquired - assert await lock.locked() is True - - # we get a different instance - second_lock = node_rights_manager._redis.lock(docker_node_id) - assert await second_lock.locked() is True - - # cannot release lock form different instance! - with pytest.raises(LockError): - await second_lock.release() - - assert await lock.locked() is True - # NOTE: this is confusing! One woudl expect the second lock to be unlocked - # but it actually is True - assert await second_lock.locked() is True - - await lock.release() - assert await lock.locked() is False - # NOTE: apparently it mirrors the first lock instance! - assert await second_lock.locked() is False - - -async def test_lock_extend_task_life_cycle( - node_rights_manager: NodeRightsManager, docker_node_id: DockerNodeId -) -> None: - extend_lock = await _assert_lock_acquired_and_released( - node_rights_manager, docker_node_id, TEST_RESOURCE, sleep_before_release=0 - ) - - # try to cancel again will not work! - with pytest.raises(LockError): - await node_rights_manager._release_extend_lock(extend_lock) - - -@pytest.mark.parametrize("resource_count", [1, 2, 10]) -async def test_no_more_locks_can_be_acquired( - node_rights_manager: NodeRightsManager, - docker_node_id: DockerNodeId, - resource_count: int, -) -> None: - # acquire all available locks - - async def _acquire_tasks(resource_name: ResourceName) -> tuple[int, list[Task]]: - slots = await node_rights_manager._get_node_slots(docker_node_id, resource_name) - assert slots == node_rights_manager.concurrent_resource_slots - - tasks = [ - asyncio.create_task( - _assert_lock_acquired_and_released( - node_rights_manager, - docker_node_id, - resource_name, - sleep_before_release=1, - ) - ) - for _ in range(slots) - ] - - # ensure locks are acquired - await asyncio.sleep(0.25) - - # no slots available - with pytest.raises(NodeRightsAcquireError) as exec_info: - await _assert_lock_acquired_and_released( - node_rights_manager, - docker_node_id, - resource_name, - sleep_before_release=0, - ) - assert ( - f"{exec_info.value}" - == f"Could not acquire a lock for {docker_node_id} since all {slots} slots are used." - ) - return slots, tasks - - tasks = [] - for r in range(resource_count): - resource_name = f"resource_{r}" - used_slots_r1, resource_tasks = await _acquire_tasks(resource_name) - assert len(resource_tasks) == used_slots_r1 - tasks += resource_tasks - - # wait for tasks to be released - await asyncio.gather(*tasks) - - -@pytest.mark.flaky(max_runs=3) -@pytest.mark.parametrize( - "locks_per_node", - [ - 4, - 10, - 100, - ], -) -async def test_acquire_all_available_node_locks_stress_test( - node_rights_manager: NodeRightsManager, - docker_node_id: DockerNodeId, - locks_per_node: int, -) -> None: - # NOTE: this test is designed to spot if there are any issues when - # acquiring and releasing locks in parallel with high concurrency - - # adds more stress with lower lock_timeout_s - node_rights_manager.lock_timeout_s = 0.5 - - node_rights_manager.concurrent_resource_slots = locks_per_node - - total_node_slots = await node_rights_manager._get_node_slots( - docker_node_id, TEST_RESOURCE - ) - assert total_node_slots == locks_per_node - - # THE extend task is causing things to hang!!! that is what is wrong here! - await asyncio.gather( - *[ - _assert_lock_acquired_and_released( - node_rights_manager, - docker_node_id, - TEST_RESOURCE, - sleep_before_release=node_rights_manager.lock_timeout_s * 2, - ) - for _ in range(total_node_slots) - ] - ) - print("all locks have been released") - - -async def test_lock_extension_expiration( - node_rights_manager: NodeRightsManager, docker_node_id: DockerNodeId -) -> None: - SHORT_INTERVAL = 0.10 - - node_rights_manager.lock_timeout_s = SHORT_INTERVAL - node_rights_manager.concurrent_resource_slots = 1 - - with pytest.raises(LockNotOwnedError, match="Cannot release a lock") as err_info: - async with node_rights_manager.acquire( - docker_node_id, resource_name=TEST_RESOURCE - ) as extend_lock: - # lock should have been extended at least 2 times - # and should still be locked - await asyncio.sleep(SHORT_INTERVAL * 4) - assert await extend_lock._redis_lock.locked() is True - assert await extend_lock._redis_lock.owned() is True - - # emulating process died (equivalent to no further renews) - assert extend_lock.task - extend_lock.task.cancel() - with suppress(CancelledError): - await extend_lock.task - - # lock is expected to be unlocked after timeout interval - await asyncio.sleep(node_rights_manager.lock_timeout_s) - assert await extend_lock._redis_lock.locked() is False - assert await extend_lock._redis_lock.owned() is False - - # the error must be raised by the release method inside the ExtendLock - assert ( - err_info.traceback[-2].statement.__str__().strip() - == "await self._redis_lock.release()" - ) - - -async def test_lock_raises_error_if_no_slots_are_available( - node_rights_manager: NodeRightsManager, docker_node_id: DockerNodeId -) -> None: - node_rights_manager.concurrent_resource_slots = 0 - - with pytest.raises(NodeRightsAcquireError) as err_info: - async with node_rights_manager.acquire( - docker_node_id, resource_name=TEST_RESOURCE - ): - pass - - assert f"{err_info.value}" == ( - f"Could not acquire a lock for {docker_node_id} since all " - f"{node_rights_manager.concurrent_resource_slots} slots are used." - ) diff --git a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py index 743243b3ebc..682e24825fc 100644 --- a/services/director-v2/tests/unit/with_dbs/test_utils_dask.py +++ b/services/director-v2/tests/unit/with_dbs/test_utils_dask.py @@ -7,12 +7,12 @@ # pylint:disable=no-name-in-module +from collections.abc import Callable +from copy import deepcopy from random import choice -from typing import Any, Callable +from typing import Any from unittest import mock -import aiopg -import httpx import pytest from _helpers import PublishedProject, set_comp_task_inputs, set_comp_task_outputs from dask_task_models_library.container_tasks.io import ( @@ -20,30 +20,47 @@ FileUrl, TaskOutputData, ) +from dask_task_models_library.container_tasks.protocol import ( + ContainerEnvsDict, + ContainerLabelsDict, +) +from dask_task_models_library.container_tasks.utils import generate_dask_job_id +from distributed import SpecCluster from faker import Faker -from models_library.api_schemas_storage import FileUploadLinks, FileUploadSchema +from fastapi import FastAPI +from models_library.api_schemas_directorv2.services import NodeRequirements +from models_library.api_schemas_storage.storage_schemas import ( + FileUploadLinks, + FileUploadSchema, +) +from models_library.docker import to_simcore_runtime_docker_label_key from models_library.projects import ProjectID from models_library.projects_nodes_io import NodeID, SimCoreFileLink, SimcoreS3FileID +from models_library.services import ServiceRunID from models_library.users import UserID -from pydantic import ByteSize +from pydantic import ByteSize, TypeAdapter from pydantic.networks import AnyUrl -from pydantic.tools import parse_obj_as -from pytest import MonkeyPatch from pytest_mock.plugin import MockerFixture from pytest_simcore.helpers.typing_env import EnvVarsDict from simcore_sdk.node_ports_v2 import FileLinkType -from simcore_service_director_v2.models.domains.comp_tasks import CompTaskAtDB -from simcore_service_director_v2.models.schemas.services import NodeRequirements +from simcore_service_director_v2.constants import UNDEFINED_DOCKER_LABEL +from simcore_service_director_v2.models.comp_runs import RunMetadataDict +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.modules.dask_clients_pool import DaskClientsPool from simcore_service_director_v2.utils.dask import ( _LOGS_FILE_NAME, + _to_human_readable_resource_values, + check_if_cluster_is_able_to_run_pipeline, clean_task_output_and_log_files_if_invalid, compute_input_data, compute_output_data_schema, + compute_task_envs, + compute_task_labels, + create_node_ports, from_node_reqs_to_dask_resources, - generate_dask_job_id, - parse_dask_job_id, parse_output_data, ) +from sqlalchemy.ext.asyncio import AsyncEngine from yarl import URL pytest_simcore_core_services_selection = [ @@ -78,16 +95,17 @@ async def mocked_node_ports_filemanager_fcts( 0, FileUploadSchema( urls=[ - parse_obj_as( - AnyUrl, - f"{URL(faker.uri()).with_scheme(choice(tasks_file_link_scheme))}", + TypeAdapter(AnyUrl).validate_python( + f"{URL(faker.uri()).with_scheme(choice(tasks_file_link_scheme))}", # noqa: S311 ) ], - chunk_size=parse_obj_as(ByteSize, "5GiB"), + chunk_size=TypeAdapter(ByteSize).validate_python("5GiB"), links=FileUploadLinks( - abort_upload=parse_obj_as(AnyUrl, "https://www.fakeabort.com"), - complete_upload=parse_obj_as( - AnyUrl, "https://www.fakecomplete.com" + abort_upload=TypeAdapter(AnyUrl).validate_python( + "https://www.fakeabort.com" + ), + complete_upload=TypeAdapter(AnyUrl).validate_python( + "https://www.fakecomplete.com" ), ), ), @@ -96,23 +114,6 @@ async def mocked_node_ports_filemanager_fcts( } -@pytest.fixture( - params=["simcore/service/comp/some/fake/service/key", "dockerhub-style/service_key"] -) -def service_key(request) -> str: - return request.param - - -@pytest.fixture() -def service_version() -> str: - return "1234.32432.2344" - - -@pytest.fixture -def user_id(faker: Faker) -> UserID: - return faker.pyint(min_value=1) - - @pytest.fixture def project_id(faker: Faker) -> ProjectID: return ProjectID(faker.uuid4()) @@ -123,34 +124,10 @@ def node_id(faker: Faker) -> NodeID: return NodeID(faker.uuid4()) -def test_dask_job_id_serialization( - service_key: str, - service_version: str, - user_id: UserID, - project_id: ProjectID, - node_id: NodeID, -): - dask_job_id = generate_dask_job_id( - service_key, service_version, user_id, project_id, node_id - ) - ( - parsed_service_key, - parsed_service_version, - parsed_user_id, - parsed_project_id, - parsed_node_id, - ) = parse_dask_job_id(dask_job_id) - assert service_key == parsed_service_key - assert service_version == parsed_service_version - assert user_id == parsed_user_id - assert project_id == parsed_project_id - assert node_id == parsed_node_id - - @pytest.fixture() def fake_io_config(faker: Faker) -> dict[str, str]: return { - f"pytest_io_key_{faker.pystr()}": choice( + f"pytest_io_key_{faker.pystr()}": choice( # noqa: S311 ["integer", "data:*/*", "boolean", "number", "string"] ) for n in range(20) @@ -186,7 +163,7 @@ def generate_simcore_file_link() -> dict[str, Any]: path=create_simcore_file_id( faker.uuid4(), faker.uuid4(), faker.file_name() ), - ).dict(by_alias=True, exclude_unset=True) + ).model_dump(by_alias=True, exclude_unset=True) TYPE_TO_FAKE_CALLABLE_MAP = { "number": faker.pyfloat, @@ -208,23 +185,27 @@ def fake_task_output_data( faker: Faker, ) -> TaskOutputData: converted_data = { - key: { - "url": faker.url(), - "file_mapping": next(iter(fake_io_schema[key]["fileToKeyMap"])) - if "fileToKeyMap" in fake_io_schema[key] - else None, - } - if fake_io_schema[key]["type"] == "data:*/*" - else value + key: ( + { + "url": faker.url(), + "file_mapping": ( + next(iter(fake_io_schema[key]["fileToKeyMap"])) + if "fileToKeyMap" in fake_io_schema[key] + else None + ), + } + if fake_io_schema[key]["type"] == "data:*/*" + else value + ) for key, value in fake_io_data.items() } - data = parse_obj_as(TaskOutputData, converted_data) + data = TypeAdapter(TaskOutputData).validate_python(converted_data) assert data return data async def test_parse_output_data( - aiopg_engine: aiopg.sa.engine.Engine, # type: ignore + sqlalchemy_async_engine: AsyncEngine, published_project: PublishedProject, user_id: UserID, fake_io_schema: dict[str, dict[str, str]], @@ -235,7 +216,7 @@ async def test_parse_output_data( sleeper_task: CompTaskAtDB = published_project.tasks[1] no_outputs = {} await set_comp_task_outputs( - aiopg_engine, sleeper_task.node_id, fake_io_schema, no_outputs + sqlalchemy_async_engine, sleeper_task.node_id, fake_io_schema, no_outputs ) # mock the set_value function so we can test it is called correctly mocked_node_ports_set_value_fct = mocker.patch( @@ -250,7 +231,7 @@ async def test_parse_output_data( published_project.project.uuid, sleeper_task.node_id, ) - await parse_output_data(aiopg_engine, dask_job_id, fake_task_output_data) + await parse_output_data(sqlalchemy_async_engine, dask_job_id, fake_task_output_data) # the FileUrl types are converted to a pure url expected_values = { @@ -263,24 +244,24 @@ async def test_parse_output_data( @pytest.fixture -def app_with_db( +def _app_config_with_db( mock_env: EnvVarsDict, - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, postgres_host_config: dict[str, str], + faker: Faker, ): - monkeypatch.setenv("DIRECTOR_V2_POSTGRES_ENABLED", "1") monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO") - monkeypatch.setenv("S3_ENDPOINT", "endpoint") - monkeypatch.setenv("S3_ACCESS_KEY", "access_key") - monkeypatch.setenv("S3_SECRET_KEY", "secret_key") - monkeypatch.setenv("S3_BUCKET_NAME", "bucket_name") - monkeypatch.setenv("S3_SECURE", "false") + monkeypatch.setenv("S3_ENDPOINT", faker.url()) + monkeypatch.setenv("S3_ACCESS_KEY", faker.pystr()) + monkeypatch.setenv("S3_REGION", faker.pystr()) + monkeypatch.setenv("S3_SECRET_KEY", faker.pystr()) + monkeypatch.setenv("S3_BUCKET_NAME", faker.pystr()) async def test_compute_input_data( - app_with_db: None, - aiopg_engine: aiopg.sa.engine.Engine, # type: ignore - async_client: httpx.AsyncClient, + _app_config_with_db: None, + sqlalchemy_async_engine: AsyncEngine, + initialized_app: FastAPI, user_id: UserID, published_project: PublishedProject, fake_io_schema: dict[str, dict[str, str]], @@ -294,24 +275,31 @@ async def test_compute_input_data( # set some fake inputs fake_inputs = { - key: SimCoreFileLink( - store=0, - path=create_simcore_file_id( - published_project.project.uuid, sleeper_task.node_id, faker.file_name() - ), - ).dict(by_alias=True, exclude_unset=True) - if value_type["type"] == "data:*/*" - else fake_io_data[key] + key: ( + SimCoreFileLink( + store=0, + path=create_simcore_file_id( + published_project.project.uuid, + sleeper_task.node_id, + faker.file_name(), + ), + ).model_dump(by_alias=True, exclude_unset=True) + if value_type["type"] == "data:*/*" + else fake_io_data[key] + ) for key, value_type in fake_io_schema.items() } await set_comp_task_inputs( - aiopg_engine, sleeper_task.node_id, fake_io_schema, fake_inputs + sqlalchemy_async_engine, sleeper_task.node_id, fake_io_schema, fake_inputs ) + # mock the get_value function so we can test it is called correctly def return_fake_input_value(*args, **kwargs): - for value, value_type in zip(fake_inputs.values(), fake_io_schema.values()): + for value, value_type in zip( + fake_inputs.values(), fake_io_schema.values(), strict=True + ): if value_type["type"] == "data:*/*": - yield parse_obj_as(AnyUrl, faker.url()) + yield TypeAdapter(AnyUrl).validate_python(faker.url()) else: yield value @@ -320,18 +308,20 @@ def return_fake_input_value(*args, **kwargs): autospec=True, side_effect=return_fake_input_value(), ) + node_ports = await create_node_ports( + db_engine=sqlalchemy_async_engine, + user_id=user_id, + project_id=published_project.project.uuid, + node_id=sleeper_task.node_id, + ) computed_input_data = await compute_input_data( - async_client._transport.app, - user_id, - published_project.project.uuid, - sleeper_task.node_id, + project_id=published_project.project.uuid, + node_id=sleeper_task.node_id, file_link_type=tasks_file_link_type, + node_ports=node_ports, ) mocked_node_ports_get_value_fct.assert_has_calls( - [ - mock.call(mock.ANY, file_link_type=tasks_file_link_type) - for n in fake_io_data.keys() - ] + [mock.call(mock.ANY, file_link_type=tasks_file_link_type) for n in fake_io_data] ) assert computed_input_data.keys() == fake_io_data.keys() @@ -342,13 +332,14 @@ def tasks_file_link_scheme(tasks_file_link_type: FileLinkType) -> tuple: return ("s3", "s3a") if tasks_file_link_type == FileLinkType.PRESIGNED: return ("http", "https") - assert False, "unknown file link type, need update of the fixture" + pytest.fail("unknown file link type, need update of the fixture") + return ("thankspylint",) async def test_compute_output_data_schema( - app_with_db: None, - aiopg_engine: aiopg.sa.engine.Engine, # type: ignore - async_client: httpx.AsyncClient, + _app_config_with_db: None, + sqlalchemy_async_engine: AsyncEngine, + initialized_app: FastAPI, user_id: UserID, published_project: PublishedProject, fake_io_schema: dict[str, dict[str, str]], @@ -360,15 +351,22 @@ async def test_compute_output_data_schema( # simulate pre-created file links no_outputs = {} await set_comp_task_outputs( - aiopg_engine, sleeper_task.node_id, fake_io_schema, no_outputs + sqlalchemy_async_engine, sleeper_task.node_id, fake_io_schema, no_outputs + ) + + node_ports = await create_node_ports( + db_engine=sqlalchemy_async_engine, + user_id=user_id, + project_id=published_project.project.uuid, + node_id=sleeper_task.node_id, ) output_schema = await compute_output_data_schema( - async_client._transport.app, - user_id, - published_project.project.uuid, - sleeper_task.node_id, + user_id=user_id, + project_id=published_project.project.uuid, + node_id=sleeper_task.node_id, file_link_type=tasks_file_link_type, + node_ports=node_ports, ) for port_key, port_schema in fake_io_schema.items(): assert port_key in output_schema @@ -389,7 +387,7 @@ async def test_compute_output_data_schema( @pytest.mark.parametrize("entry_exists_returns", [True, False]) async def test_clean_task_output_and_log_files_if_invalid( - aiopg_engine: aiopg.sa.engine.Engine, # type: ignore + sqlalchemy_async_engine: AsyncEngine, user_id: UserID, published_project: PublishedProject, mocked_node_ports_filemanager_fcts: dict[str, mock.MagicMock], @@ -402,9 +400,9 @@ async def test_clean_task_output_and_log_files_if_invalid( # BEFORE the task is actually run. In case there is a failure at running # the task, these entries shall be cleaned up. The way to check this is # by asking storage if these file really exist. If not they get deleted. - mocked_node_ports_filemanager_fcts[ - "entry_exists" - ].return_value = entry_exists_returns + mocked_node_ports_filemanager_fcts["entry_exists"].return_value = ( + entry_exists_returns + ) sleeper_task = published_project.tasks[1] @@ -415,16 +413,16 @@ async def test_clean_task_output_and_log_files_if_invalid( path=create_simcore_file_id( published_project.project.uuid, sleeper_task.node_id, faker.file_name() ), - ).dict(by_alias=True, exclude_unset=True) + ).model_dump(by_alias=True, exclude_unset=True) for key, value_type in fake_io_schema.items() if value_type["type"] == "data:*/*" } await set_comp_task_outputs( - aiopg_engine, sleeper_task.node_id, fake_io_schema, fake_outputs + sqlalchemy_async_engine, sleeper_task.node_id, fake_io_schema, fake_outputs ) # this should ask for the 2 files + the log file await clean_task_output_and_log_files_if_invalid( - aiopg_engine, + sqlalchemy_async_engine, user_id, published_project.project.uuid, published_project.tasks[1].node_id, @@ -433,9 +431,9 @@ async def test_clean_task_output_and_log_files_if_invalid( mock.call( user_id=user_id, store_id=0, - s3_object=f"{published_project.project.uuid}/{sleeper_task.node_id}/{next(iter(fake_io_schema[key].get('fileToKeyMap', {key:key})))}", + s3_object=f"{published_project.project.uuid}/{sleeper_task.node_id}/{next(iter(fake_io_schema[key].get('fileToKeyMap', {key: key})))}", ) - for key in fake_outputs.keys() + for key in fake_outputs ] + [ mock.call( user_id=user_id, @@ -443,7 +441,15 @@ async def test_clean_task_output_and_log_files_if_invalid( s3_object=f"{published_project.project.uuid}/{sleeper_task.node_id}/{_LOGS_FILE_NAME}", ) ] - mocked_node_ports_filemanager_fcts["entry_exists"].assert_has_calls(expected_calls) + + def _add_is_directory(entry: mock._Call) -> mock._Call: + new_kwargs: dict[str, Any] = deepcopy(entry.kwargs) + new_kwargs["is_directory"] = False + return mock.call(**new_kwargs) + + mocked_node_ports_filemanager_fcts["entry_exists"].assert_has_calls( + [_add_is_directory(x) for x in expected_calls] + ) if entry_exists_returns: mocked_node_ports_filemanager_fcts["delete_file"].assert_not_called() else: @@ -453,10 +459,10 @@ async def test_clean_task_output_and_log_files_if_invalid( @pytest.mark.parametrize( - "req_example", NodeRequirements.Config.schema_extra["examples"] + "req_example", NodeRequirements.model_config["json_schema_extra"]["examples"] ) def test_node_requirements_correctly_convert_to_dask_resources( - req_example: dict[str, Any] + req_example: dict[str, Any], ): node_reqs = NodeRequirements(**req_example) assert node_reqs @@ -464,5 +470,162 @@ def test_node_requirements_correctly_convert_to_dask_resources( # all the dask resources shall be of type: RESOURCE_NAME: VALUE for resource_key, resource_value in dask_resources.items(): assert isinstance(resource_key, str) - assert isinstance(resource_value, (int, float, str, bool)) + assert isinstance(resource_value, int | float | str | bool) assert resource_value is not None + + +@pytest.mark.parametrize( + "input_resources, expected_human_readable_resources", + [ + ({}, {}), + ( + {"CPU": 2.1, "RAM": 1024, "VRAM": 2097152}, + {"CPU": 2.1, "RAM": "1.0KiB", "VRAM": "2.0MiB"}, + ), + ], + ids=str, +) +def test__to_human_readable_resource_values( + input_resources: dict[str, Any], expected_human_readable_resources: dict[str, Any] +): + assert ( + _to_human_readable_resource_values(input_resources) + == expected_human_readable_resources + ) + + +@pytest.fixture +def _app_config_with_dask_client( + _app_config_with_db: None, + dask_spec_local_cluster: SpecCluster, + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("COMPUTATIONAL_BACKEND_DASK_CLIENT_ENABLED", "1") + monkeypatch.setenv( + "COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL", + dask_spec_local_cluster.scheduler_address, + ) + + +async def test_check_if_cluster_is_able_to_run_pipeline( + _app_config_with_dask_client: None, + project_id: ProjectID, + node_id: NodeID, + published_project: PublishedProject, + initialized_app: FastAPI, +): + sleeper_task: CompTaskAtDB = published_project.tasks[1] + dask_scheduler_settings = ( + initialized_app.state.settings.DIRECTOR_V2_COMPUTATIONAL_BACKEND + ) + default_cluster = dask_scheduler_settings.default_cluster + dask_clients_pool = DaskClientsPool.instance(initialized_app) + async with dask_clients_pool.acquire(default_cluster) as dask_client: + check_if_cluster_is_able_to_run_pipeline( + project_id=project_id, + node_id=node_id, + node_image=sleeper_task.image, + scheduler_info=dask_client.backend.client.scheduler_info(), + task_resources={}, + ) + + +@pytest.mark.parametrize( + "run_metadata, expected_additional_task_labels", + [ + ( + {}, + { + f"{to_simcore_runtime_docker_label_key('product-name')}": UNDEFINED_DOCKER_LABEL, + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}": UNDEFINED_DOCKER_LABEL, + }, + ), + ( + { + f"{to_simcore_runtime_docker_label_key('product-name')}": "the awesome osparc", + "some-crazy-additional-label": "with awesome value", + }, + { + f"{to_simcore_runtime_docker_label_key('product-name')}": "the awesome osparc", + f"{to_simcore_runtime_docker_label_key('simcore-user-agent')}": UNDEFINED_DOCKER_LABEL, + "some-crazy-additional-label": "with awesome value", + }, + ), + ], +) +async def test_compute_task_labels( + _app_config_with_db: None, + published_project: PublishedProject, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + run_metadata: RunMetadataDict, + expected_additional_task_labels: ContainerLabelsDict, + initialized_app: FastAPI, +): + sleeper_task = published_project.tasks[1] + assert sleeper_task.image + assert sleeper_task.image.node_requirements + task_labels = compute_task_labels( + user_id=user_id, + project_id=project_id, + node_id=node_id, + run_metadata=run_metadata, + node_requirements=sleeper_task.image.node_requirements, + ) + expected_task_labels = { + f"{to_simcore_runtime_docker_label_key('user-id')}": f"{user_id}", + f"{to_simcore_runtime_docker_label_key('project-id')}": f"{project_id}", + f"{to_simcore_runtime_docker_label_key('node-id')}": f"{node_id}", + f"{to_simcore_runtime_docker_label_key('swarm-stack-name')}": f"{UNDEFINED_DOCKER_LABEL}", + f"{to_simcore_runtime_docker_label_key('cpu-limit')}": f"{sleeper_task.image.node_requirements.cpu}", + f"{to_simcore_runtime_docker_label_key('memory-limit')}": f"{sleeper_task.image.node_requirements.ram}", + } | expected_additional_task_labels + assert task_labels == expected_task_labels + + +@pytest.mark.parametrize( + "run_metadata", + [ + {"product_name": "some amazing product name"}, + ], +) +@pytest.mark.parametrize( + "input_task_envs, expected_computed_task_envs", + [ + pytest.param({}, {}, id="empty envs"), + pytest.param( + {"SOME_FAKE_ENV": "this is my fake value"}, + {"SOME_FAKE_ENV": "this is my fake value"}, + id="standard env", + ), + pytest.param( + {"SOME_FAKE_ENV": "this is my $OSPARC_VARIABLE_PRODUCT_NAME value"}, + {"SOME_FAKE_ENV": "this is my some amazing product name value"}, + id="substituable env", + ), + ], +) +async def test_compute_task_envs( + _app_config_with_db: None, + published_project: PublishedProject, + initialized_app: FastAPI, + run_metadata: RunMetadataDict, + input_task_envs: ContainerEnvsDict, + expected_computed_task_envs: ContainerEnvsDict, + resource_tracking_run_id: ServiceRunID, +): + sleeper_task: CompTaskAtDB = published_project.tasks[1] + sleeper_task.image.envs = input_task_envs + assert published_project.project.prj_owner is not None + task_envs = await compute_task_envs( + initialized_app, + user_id=published_project.project.prj_owner, + project_id=published_project.project.uuid, + node_id=sleeper_task.node_id, + node_image=sleeper_task.image, + metadata=run_metadata, + resource_tracking_run_id=resource_tracking_run_id, + wallet_id=run_metadata.get("wallet_id", None), + ) + assert task_envs == expected_computed_task_envs diff --git a/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py b/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py new file mode 100644 index 00000000000..cb3d81a910d --- /dev/null +++ b/services/director-v2/tests/unit/with_dbs/test_utils_rabbitmq.py @@ -0,0 +1,290 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=protected-access +# pylint:disable=too-many-statements + + +import datetime +import random +from collections.abc import Awaitable, Callable +from typing import Any +from unittest import mock + +import pytest +from faker import Faker +from models_library.projects import ProjectAtDB +from models_library.projects_nodes_io import NodeIDStr +from models_library.projects_state import RunningState +from models_library.rabbitmq_messages import ( + InstrumentationRabbitMessage, + RabbitResourceTrackingBaseMessage, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.services import ServiceKey, ServiceType, ServiceVersion +from pytest_mock import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_service_director_v2.models.comp_pipelines import CompPipelineAtDB +from simcore_service_director_v2.models.comp_tasks import CompTaskAtDB +from simcore_service_director_v2.utils.rabbitmq import ( + publish_service_resource_tracking_heartbeat, + publish_service_resource_tracking_started, + publish_service_resource_tracking_stopped, + publish_service_started_metrics, + publish_service_stopped_metrics, +) +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def mocked_message_parser(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.AsyncMock(return_value=True) + + +async def _assert_message_received( + mocked_message_parser: mock.AsyncMock, + expected_call_count: int, + message_parser: Callable, +) -> list: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + print( + f"--> waiting for rabbitmq message [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + assert mocked_message_parser.call_count == expected_call_count + print( + f"<-- rabbitmq message received after [{attempt.retry_state.attempt_number}, {attempt.retry_state.idle_for}]" + ) + return [ + message_parser(mocked_message_parser.call_args_list[c].args[0]) + for c in range(expected_call_count) + ] + + +@pytest.fixture +def user(create_registered_user: Callable[..., dict]) -> dict: + return create_registered_user() + + +@pytest.fixture +async def project( + user: dict[str, Any], + fake_workbench_without_outputs: dict[str, Any], + project: Callable[..., Awaitable[ProjectAtDB]], +) -> ProjectAtDB: + return await project(user, workbench=fake_workbench_without_outputs) + + +@pytest.fixture +async def tasks( + user: dict[str, Any], + project: ProjectAtDB, + fake_workbench_adjacency: dict[str, Any], + create_pipeline: Callable[..., Awaitable[CompPipelineAtDB]], + create_tasks: Callable[..., Awaitable[list[CompTaskAtDB]]], +) -> list[CompTaskAtDB]: + await create_pipeline( + project_id=f"{project.uuid}", + dag_adjacency_list=fake_workbench_adjacency, + ) + comp_tasks = await create_tasks(user, project) + assert len(comp_tasks) > 0 + return comp_tasks + + +async def test_publish_service_started_metrics( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + user: dict[str, Any], + simcore_user_agent: str, + tasks: list[CompTaskAtDB], + mocked_message_parser: mock.AsyncMock, +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + + await consumer.subscribe( + InstrumentationRabbitMessage.get_channel_name(), mocked_message_parser + ) + await publish_service_started_metrics( + publisher, + user_id=user["id"], + simcore_user_agent=simcore_user_agent, + task=random.choice(tasks), # noqa: S311 + ) + await _assert_message_received( + mocked_message_parser, 1, InstrumentationRabbitMessage.model_validate_json + ) + + +async def test_publish_service_stopped_metrics( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + user: dict[str, Any], + simcore_user_agent: str, + tasks: list[CompTaskAtDB], + mocked_message_parser: mock.AsyncMock, +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + + await consumer.subscribe( + InstrumentationRabbitMessage.get_channel_name(), mocked_message_parser + ) + await publish_service_stopped_metrics( + publisher, + user_id=user["id"], + simcore_user_agent=simcore_user_agent, + task=random.choice(tasks), # noqa: S311 + task_final_state=random.choice(list(RunningState)), # noqa: S311 + ) + await _assert_message_received( + mocked_message_parser, 1, InstrumentationRabbitMessage.model_validate_json + ) + + +async def test_publish_service_resource_tracking_started( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + user: dict[str, Any], + project: ProjectAtDB, + simcore_user_agent: str, + tasks: list[CompTaskAtDB], + mocked_message_parser: mock.AsyncMock, + faker: Faker, + osparc_product_name: str, +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + + random_task = random.choice(tasks) # noqa: S311 + + await consumer.subscribe( + RabbitResourceTrackingBaseMessage.get_channel_name(), mocked_message_parser + ) + random_service_run_id = faker.pystr() + before_publication_time = datetime.datetime.now(datetime.UTC) + await publish_service_resource_tracking_started( + publisher, + service_run_id=random_service_run_id, + wallet_id=faker.pyint(min_value=1), + wallet_name=faker.pystr(), + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + product_name=osparc_product_name, + simcore_user_agent=simcore_user_agent, + user_id=user["id"], + user_email=faker.email(), + project_id=project.uuid, + project_name=project.name, + node_id=random_task.node_id, + node_name=project.workbench[NodeIDStr(f"{random_task.node_id}")].label, + parent_project_id=None, + parent_node_id=None, + root_parent_project_id=None, + root_parent_project_name=None, + root_parent_node_id=None, + service_key=ServiceKey(random_task.image.name), + service_version=ServiceVersion(random_task.image.tag), + service_type=ServiceType.COMPUTATIONAL, + service_resources={}, + service_additional_metadata=faker.pydict(), + ) + after_publication_time = datetime.datetime.now(datetime.UTC) + received_messages = await _assert_message_received( + mocked_message_parser, + 1, + RabbitResourceTrackingStartedMessage.model_validate_json, + ) + assert isinstance(received_messages[0], RabbitResourceTrackingStartedMessage) + assert received_messages[0].service_run_id == random_service_run_id + assert received_messages[0].created_at + assert ( + before_publication_time + < received_messages[0].created_at + < after_publication_time + ) + + +async def test_publish_service_resource_tracking_stopped( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocked_message_parser: mock.AsyncMock, + faker: Faker, +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + + await consumer.subscribe( + RabbitResourceTrackingBaseMessage.get_channel_name(), mocked_message_parser + ) + random_service_run_id = faker.pystr() + before_publication_time = datetime.datetime.now(datetime.UTC) + await publish_service_resource_tracking_stopped( + publisher, + service_run_id=random_service_run_id, + simcore_platform_status=random.choice( # noqa: S311 + list(SimcorePlatformStatus) + ), + ) + after_publication_time = datetime.datetime.now(datetime.UTC) + received_messages = await _assert_message_received( + mocked_message_parser, + 1, + RabbitResourceTrackingStoppedMessage.model_validate_json, + ) + assert isinstance(received_messages[0], RabbitResourceTrackingStoppedMessage) + assert received_messages[0].service_run_id == random_service_run_id + assert received_messages[0].created_at + assert ( + before_publication_time + < received_messages[0].created_at + < after_publication_time + ) + + +async def test_publish_service_resource_tracking_heartbeat( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocked_message_parser: mock.AsyncMock, + faker: Faker, +): + consumer = create_rabbitmq_client("consumer") + publisher = create_rabbitmq_client("publisher") + + await consumer.subscribe( + RabbitResourceTrackingBaseMessage.get_channel_name(), mocked_message_parser + ) + random_service_run_id = faker.pystr() + before_publication_time = datetime.datetime.now(datetime.UTC) + await publish_service_resource_tracking_heartbeat( + publisher, + service_run_id=random_service_run_id, + ) + after_publication_time = datetime.datetime.now(datetime.UTC) + received_messages = await _assert_message_received( + mocked_message_parser, + 1, + RabbitResourceTrackingHeartbeatMessage.model_validate_json, + ) + assert isinstance(received_messages[0], RabbitResourceTrackingHeartbeatMessage) + assert received_messages[0].service_run_id == random_service_run_id + assert received_messages[0].created_at + assert ( + before_publication_time + < received_messages[0].created_at + < after_publication_time + ) diff --git a/services/director/Dockerfile b/services/director/Dockerfile index 1df4e033ac2..3b7c8272c94 100644 --- a/services/director/Dockerfile +++ b/services/director/Dockerfile @@ -1,5 +1,19 @@ -ARG PYTHON_VERSION="3.6.10" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + # # USAGE: # cd sercices/director @@ -10,49 +24,46 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=sanderegg -RUN set -eux && \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ apt-get update && \ - apt-get install -y gosu && \ - rm -rf /var/lib/apt/lists/* && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ - SC_USER_NAME=scu \ - SC_BUILD_TARGET=base \ - SC_BOOT_MODE=default + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default RUN adduser \ - --uid ${SC_USER_ID} \ - --disabled-password \ - --gecos "" \ - --shell /bin/sh \ - --home /home/${SC_USER_NAME} \ - ${SC_USER_NAME} + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} # Sets utf-8 encoding for Python et al ENV LANG=C.UTF-8 # Turns off writing .pyc files; superfluous on an ephemeral container. ENV PYTHONDONTWRITEBYTECODE=1 \ - VIRTUAL_ENV=/home/scu/.venv + VIRTUAL_ENV=/home/scu/.venv + # Ensures that the python and pip executables used # in the image will be those from our virtualenv. ENV PATH="${VIRTUAL_ENV}/bin:$PATH" -# environment variables -ENV REGISTRY_AUTH='' \ - REGISTRY_USER='' \ - REGISTRY_PW='' \ - REGISTRY_URL='' \ - REGISTRY_VERSION='v2' \ - PUBLISHED_HOST_NAME='' \ - SIMCORE_SERVICES_NETWORK_NAME='' \ - EXTRA_HOSTS_SUFFIX='undefined' - - -EXPOSE 8080 +EXPOSE 8000 +EXPOSE 3000 # -------------------------- Build stage ------------------- # Installs build/package management tools and third party dependencies @@ -60,38 +71,31 @@ EXPOSE 8080 # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ -# NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=21.3 \ - wheel \ - setuptools -# copy director and dependencies -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/director /build/services/director -# install base 3rd party dependencies (NOTE: this speeds up devel mode) -RUN pip --no-cache-dir install -r /build/services/director/requirements/_base.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools -# FIXME: -# necessary to prevent duplicated files. -# Will be removed when director is refactored using cookiecutter as this will not be necessary anymore -COPY --chown=scu:scu api/specs/common/schemas/node-meta-v0.0.1.json \ - /build/services/director/src/simcore_service_director/api/v0/oas-parts/schemas/node-meta-v0.0.1.json +WORKDIR /build # --------------------------Prod-depends-only stage ------------------- # This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) @@ -99,11 +103,20 @@ COPY --chown=scu:scu api/specs/common/schemas/node-meta-v0.0.1.json \ # + /build # + services/director [scu:scu] WORKDIR # -FROM build as prod-only-deps +FROM build AS prod-only-deps -WORKDIR /build/services/director ENV SC_BUILD_TARGET=prod-only-deps -RUN pip --no-cache-dir install -r requirements/prod.txt + +WORKDIR /build/services/director + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/director,target=/build/services/director,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + # --------------------------Production stage ------------------- # Final cleanup up to reduce image size and startup setup @@ -112,28 +125,38 @@ RUN pip --no-cache-dir install -r requirements/prod.txt # + /home/scu $HOME = WORKDIR # + services/director [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ - SC_BOOT_MODE=production + SC_BOOT_MODE=production + ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu -# bring installed package without build tools -COPY --from=prod-only-deps --chown=scu:scu ${VIRTUAL_ENV} ${VIRTUAL_ENV} +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} -# copy docker entrypoint and boot scripts +# Copies booting scripts COPY --chown=scu:scu services/director/docker services/director/docker RUN chmod +x services/director/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=120s \ - --start-period=30s \ - --retries=3 \ - CMD ["python3", "/home/scu/services/director/docker/healthcheck.py", "http://localhost:8080/v0/"] -ENTRYPOINT [ "services/director/docker/entrypoint.sh" ] -CMD ["services/director/docker/boot.sh"] + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "/home/scu/services/director/docker/healthcheck.py", "http://localhost:8000/v0/"] + +ENTRYPOINT [ "/bin/sh", "services/director/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/director/docker/boot.sh"] # --------------------------Development stage ------------------- @@ -144,11 +167,14 @@ CMD ["services/director/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/director -ENV SC_BUILD_TARGET=development -ENV NODE_SCHEMA_LOCATION=../../../api/specs/common/schemas/node-meta-v0.0.1.json WORKDIR /devel + RUN chown -R scu:scu "${VIRTUAL_ENV}" + ENTRYPOINT [ "/bin/sh", "services/director/docker/entrypoint.sh" ] CMD ["/bin/sh", "services/director/docker/boot.sh"] diff --git a/services/director/Makefile b/services/director/Makefile index 0e91426d6d2..140d05c72d0 100644 --- a/services/director/Makefile +++ b/services/director/Makefile @@ -3,13 +3,3 @@ # include ../../scripts/common.Makefile include ../../scripts/common-service.Makefile - - -_check_python_version: - # Checking that runs with correct python version - @python3 -c "import sys; current_version=[int(d) for d in '3.6'.split('.')]; assert sys.version_info[:2]==tuple(current_version[:2]), f'Expected python $(EXPECTED_PYTHON_VERSION), got {sys.version_info}'" - - -.PHONY: openapi-specs -openapi-specs: ## updates and validates openapi specifications - $(MAKE) -C $(CURDIR)/src/simcore_service_${APP_NAME}/api $@ diff --git a/services/director/README.md b/services/director/README.md index 21146025f16..d919b4f4e98 100644 --- a/services/director/README.md +++ b/services/director/README.md @@ -1,86 +1 @@ # director - -[![Docker Pulls](https://img.shields.io/docker/pulls/itisfoundation/director.svg)](https://hub.docker.com/r/itisfoundation/director/tags) -[![](https://images.microbadger.com/badges/image/itisfoundation/director.svg)](https://microbadger.com/images/itisfoundation/director "More on service image in registry") -[![](https://images.microbadger.com/badges/version/itisfoundation/director.svg)](https://microbadger.com/images/itisfoundation/director "More on service image in registry") -[![](https://images.microbadger.com/badges/commit/itisfoundation/director.svg)](https://microbadger.com/images/itisfoundation/director "More on service image in registry") - - -## Usage - -```bash - # go to director folder - cd /services/director - # install - pip install . - # start director - simcore-service-director - # or - python -m simcore_service_director -``` - -## Development - -```bash - # go to director folder - cd /services/director - # install with symlinks - pip install -r requirements-dev.txt -``` - -The director implements a REST API defined in __/src/simcore_service_director/api/v1/openapi.yaml__. -First extend the API and validate the API before implementing any new route. - -## Current status - -End validation of the requests/responses is missing as some issues arose with using the openapi-core library. It seems it is not happy with referencing a json schema file. An issue was filed to see if something may be done quickly [github](https://github.com/p1c2u/openapi-core/issues/90). - -## docker - -- Uses multi-stage dockerfile to extend a common stack of layers into production or development images -- Main difference between development and production stages is whether the code gets copied or not inside of the image -- Development stage is set first to avoid re-building when files are changed -- ``boot.sh`` is necessary to activate the virtual environment inside of the docker - -```bash - - # development image - docker build --target development -t director:dev . - docker run -v %DIRECTOR_SRC_CODE:/home/scu/src director:dev - - # production image - docker build -t director:prod . - # or - docker build --target production -t director:prod . - docker run director:prod - -``` - -### local testing - -Using the main Makefile of the oSparc platform allows for testing the director: - -```bash - # go to root folder - make build-devel - # switch the docker swarm on in development mode - make up-devel -``` - -Then open [director-swagger-ui](http://localhost:8080/apidoc/) to see the director API and try out the different routes. - -## code generation from REST API "server side" - -Execute the following script for generating the necessary code server side - -```bash -./codegen.sh -``` - -NOTE: Issue #3 must still be taken care of manually! - -### Issues - -1. SwaggerRouter must be created with __version_ui__ set to 3 or the swagger ui must be access with ?version=3 -2. SwaggerRouter.include needs to have the argument __basePath__ filled to serve the API at the right location (ndlr /v1) [Github bug entry](https://github.com/aamalev/aiohttp_apiset/issues/45) -3. The generated models need to be manually corrected when the properties are __nullable__ as the code generator does add a check for __None__ value that triggers a ValueError exception even though the value is allowed to be null [Python server models generation issue with __nullable: true__ on GitHub](https://github.com/OpenAPITools/openapi-generator/issues/579) diff --git a/services/director/VERSION b/services/director/VERSION index 6e8bf73aa55..3eefcb9dd5b 100644 --- a/services/director/VERSION +++ b/services/director/VERSION @@ -1 +1 @@ -0.1.0 +1.0.0 diff --git a/services/director/codegen.sh b/services/director/codegen.sh deleted file mode 100755 index bd5b6600cf6..00000000000 --- a/services/director/codegen.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!/bin/bash -# define the input specification file and the output directory -# typical structure: -# /src/package-name/.openapi/v1/package_api.yaml -- this is the input file -# /src/package-name/rest/generated_code -- this is the output directory -SOURCE_DIR=./src/simcore_service_director -API_VERSION=v0 -INPUT_SPEC=${SOURCE_DIR}/api/${API_VERSION}/openapi.yaml -OUTPUT_DIR=${SOURCE_DIR}/rest -OUTPUT_DIR_GEN=${SOURCE_DIR}/rest/generated_code -INIT_FILE_PATH=${OUTPUT_DIR}/__init__.py -HANDLERS_FILE_PATH=${OUTPUT_DIR}/handlers.py -ROUTING_FILE_PATH=${OUTPUT_DIR_GEN}/routing.py - -# create the folder for the output -mkdir -p $OUTPUT_DIR -# generate the python server models code -ABSOLUTE_INPUT_PATH=$(realpath "${INPUT_SPEC}") -ABSOLUTE_OUTPUT_DIR=$(realpath "${OUTPUT_DIR}") -ABSOLUTE_OUTPUT_DIR_GEN=$(realpath "${OUTPUT_DIR_GEN}") -../../scripts/openapi/openapi_python_server_codegen.sh -i ${ABSOLUTE_INPUT_PATH} -o ${ABSOLUTE_OUTPUT_DIR_GEN} -# replace import entries in python code -find ${OUTPUT_DIR_GEN}/models -type f -exec sed -i 's/openapi_server.models././g' {} \; -find ${OUTPUT_DIR_GEN}/models -type f -exec sed -i 's/openapi_server/../g' {} \; -find ${OUTPUT_DIR_GEN} -maxdepth 1 -type f -exec sed -i 's/openapi_server/./g' {} \; -# create __init__.py if always -cat > "${INIT_FILE_PATH}" << EOF -"""GENERATED CODE from codegen.sh -It is advisable to not modify this code if possible. -This will be overriden next time the code generator is called. -""" -from .generated_code import ( - models, - util, - routing -) -EOF - -# only generate stub if necessary -if [ ! -e "${HANDLERS_FILE_PATH}" ]; then - cat > "${HANDLERS_FILE_PATH}" << EOF -"""This is a generated stub of handlers to be connected to the paths defined in the API - -""" -import logging - -from aiohttp import web_exceptions - -log = logging.getLogger(__name__) - -# This module shall contain the handlers of the API (implementation side of the openapi server side). -# Each operation is typically defined as -# async def root_get(request): -# return "hello API world" - -# The API shall define a path where the entry operationId: -# operationId: root_get -EOF -fi - -# always generate routing -cat > "${ROUTING_FILE_PATH}" << EOF -"""GENERATED CODE from codegen.sh -It is advisable to not modify this code if possible. -This will be overriden next time the code generator is called. - -use create_web_app to initialise the web application using the specification file. -The base folder is the root of the package. -""" - - -import logging -from pathlib import Path - -from aiohttp import hdrs, web -from aiohttp_apiset import SwaggerRouter -from aiohttp_apiset.exceptions import ValidationError -from aiohttp_apiset.middlewares import Jsonify, jsonify -from aiohttp_apiset.swagger.loader import ExtendedSchemaFile -from aiohttp_apiset.swagger.operations import OperationIdMapping - -from .. import handlers -from .models.base_model_ import Model - -log = logging.getLogger(__name__) - -@web.middleware -async def __handle_errors(request, handler): - try: - log.debug("error middleware handling request %s to handler %s", request, handler) - response = await handler(request) - return response - except ValidationError as ex: - # aiohttp apiset errors - log.exception("error happened in handling route") - error = dict(status=ex.status, message=ex.to_tree()) - error_enveloped = dict(error=error) - return web.json_response(error_enveloped, status=ex.status) - except web.HTTPError as ex: - log.exception("error happened in handling route") - error = dict(status=ex.status, message=str(ex.reason)) - error_enveloped = dict(data=error) - return web.json_response(error_enveloped, status=ex.status) - - -def create_web_app(base_folder, spec_file, additional_middlewares = None): - # create the default mapping of the operationId to the implementation code in handlers - opmap = __create_default_operation_mapping(Path(base_folder / spec_file)) - - # generate a version 3 of the API documentation - router = SwaggerRouter( - swagger_ui='/apidoc/', - version_ui=3, # forces the use of version 3 by default - search_dirs=[base_folder], - default_validate=True, - ) - - # add automatic jsonification of the models located in generated code - jsonify.singleton = Jsonify(indent=3, ensure_ascii=False) - jsonify.singleton.add_converter(Model, lambda o: o.to_dict(), score=0) - - middlewares = [jsonify, __handle_errors] - if additional_middlewares: - middlewares.extend(additional_middlewares) - # create the web application using the API - app = web.Application( - router=router, - middlewares=middlewares, - ) - router.set_cors(app, domains='*', headers=( - (hdrs.ACCESS_CONTROL_EXPOSE_HEADERS, hdrs.AUTHORIZATION), - )) - - # Include our specifications in a router, - # is now available in the swagger-ui to the address http://localhost:8080/swagger/?spec=v1 - router.include( - spec=Path(base_folder / spec_file), - operationId_mapping=opmap, - name='v0', # name to access in swagger-ui, - basePath="/v0" # BUG: in apiset with openapi 3.0.0 [Github bug entry](https://github.com/aamalev/aiohttp_apiset/issues/45) - ) - - return app - -def __create_default_operation_mapping(specs_file): - operation_mapping = {} - yaml_specs = ExtendedSchemaFile(specs_file) - paths = yaml_specs['paths'] - for path in paths.items(): - for method in path[1].items(): # can be get, post, patch, put, delete... - op_str = "operationId" - if op_str not in method[1]: - raise Exception("The API %s does not contain the operationId tag for route %s %s" % (specs_file, path[0], method[0])) - operation_id = method[1][op_str] - operation_mapping[operation_id] = getattr(handlers, operation_id) - return OperationIdMapping(**operation_mapping) -EOF diff --git a/services/director/docker/boot.sh b/services/director/docker/boot.sh index f771974b095..af732895050 100755 --- a/services/director/docker/boot.sh +++ b/services/director/docker/boot.sh @@ -6,32 +6,60 @@ IFS=$(printf '\n\t') INFO="INFO: [$(basename "$0")] " -# BOOTING application --------------------------------------------- echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." -echo " User :$(id "$(whoami)")" -echo " Workdir :$(pwd)" +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# if [ "${SC_BUILD_TARGET}" = "development" ]; then echo "$INFO" "Environment :" printenv | sed 's/=/: /' | sed 's/^/ /' | sort echo "$INFO" "Python :" python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/director || exit 1 - # speedup for legacy service with all essential depnendcy pinned - # in this case `--no-deps` does the trick, for details see link - # https://stackoverflow.com/a/65793484/2855718 - pip install --no-cache-dir --no-deps -r requirements/dev.txt - cd - || exit 1 - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + + cd services/director + uv pip --quiet sync requirements/dev.txt + cd - + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi -# RUNNING application ---------------------------------------- -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - watchmedo auto-restart --recursive --pattern="*.py;*/src/*" --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" --ignore-directories -- \ - python3 -m ptvsd --host 0.0.0.0 --port 3000 -m \ - simcore_service_director --loglevel="${LOGLEVEL}" +# +# RUNNING application +# + +APP_LOG_LEVEL=${DIRECTOR_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/director/src/simcore_service_director && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${DIRECTOR_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " else - exec simcore-service-director --loglevel="${LOGLEVEL}" + exec uvicorn simcore_service_director.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" fi diff --git a/services/director/docker/entrypoint.sh b/services/director/docker/entrypoint.sh index 232da22ba7e..ad982fd8d5c 100755 --- a/services/director/docker/entrypoint.sh +++ b/services/director/docker/entrypoint.sh @@ -1,4 +1,9 @@ #!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# set -o errexit set -o nounset @@ -10,86 +15,75 @@ ERROR="ERROR: [$(basename "$0")] " # Read self-signed SSH certificates (if applicable) # -# In case the director must access a docker registry in a secure way using +# In case clusters-keeper must access a docker registry in a secure way using # non-standard certificates (e.g. such as self-signed certificates), this call is needed. -# It needs to be executed as root. +# It needs to be executed as root. Also required to any access for example to secure rabbitmq. update-ca-certificates -# This entrypoint script: -# -# - Executes *inside* of the container upon start as --user [default root] -# - Notice that the container *starts* as --user [default root] but -# *runs* as non-root user [scu] -# echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." -echo "$INFO" "User :$(id "$(whoami)")" -echo "$INFO" "Workdir :$(pwd)" -echo scuUser :"$(id scu)" - -if [ "${SC_BUILD_TARGET}" = "development" ] -then - # NOTE: expects docker run ... -v $(pwd):/devel/services/director - DEVEL_MOUNT=/devel/services/director +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" - stat $DEVEL_MOUNT > /dev/null 2>&1 || \ - (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1) # FIXME: exit does not stop script +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) - echo "setting correct user id/group id..." - HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}") - HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}") - CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) - if [ "$HOST_USERID" -eq 0 ] - then - echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..." - adduser "$SC_USER_NAME" root + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" else - echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." - # take host's credentials in $SC_USER_NAME - if [ -z "$CONT_GROUPNAME" ] - then - echo "Creating new group my$SC_USER_NAME" - CONT_GROUPNAME=my$SC_USER_NAME - addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" - else - echo "group already exists" - fi - echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..." - adduser "$SC_USER_NAME" "$CONT_GROUPNAME" - - echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" - usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" - - echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; - # change user property of files already around - echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + echo "$INFO" "group already exists" fi -fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ] -then - # NOTE: production does NOT pre-installs ptvsd - python3 -m pip install ptvsd + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi fi # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock -if stat $DOCKER_MOUNT > /dev/null 2>&1 -then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker - if ! addgroup --gid "$GROUPID" $GROUPNAME > /dev/null 2>&1 - then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" fi echo "$INFO Starting $* ..." diff --git a/services/director/docker/healthcheck.py b/services/director/docker/healthcheck.py old mode 100644 new mode 100755 index b3a1e7e8cad..cb51ed2399e --- a/services/director/docker/healthcheck.py +++ b/services/director/docker/healthcheck.py @@ -6,7 +6,8 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` @@ -18,23 +19,24 @@ import os import sys +from contextlib import suppress from urllib.request import urlopen -SUCCESS, UNHEALTHY = 0, 1 +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") -# Disabled if boots with debugger -ok = os.environ.get("SC_BOOT_MODE") == "debug" +# Adds a base-path if defined in environ +SIMCORE_NODE_BASEPATH = os.environ.get("SIMCORE_NODE_BASEPATH", "") -# Queries host -# pylint: disable=consider-using-with -ok = ( - ok - or urlopen( - "{host}{baseurl}".format( - host=sys.argv[1], baseurl=os.environ.get("SIMCORE_NODE_BASEPATH", "") - ) # adds a base-path if defined in environ - ).getcode() - == 200 -) -sys.exit(SUCCESS if ok else UNHEALTHY) +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception): + with urlopen(f"{sys.argv[1]}{SIMCORE_NODE_BASEPATH}") as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/director/requirements/Makefile b/services/director/requirements/Makefile index 7aacec9e5ee..3f25442b790 100644 --- a/services/director/requirements/Makefile +++ b/services/director/requirements/Makefile @@ -4,10 +4,3 @@ include ../../../requirements/base.Makefile # Add here any extra explicit dependency: e.g. _migration.txt: _base.txt - - -_test.txt: _base.txt _test.in - ## NOTE: this recipe override has to be removed - ## to execute target upgrades e.g. due to vulnerability of - ## a library. - @echo INFO: test.txt is frozen. Skipping upgrade. diff --git a/services/director/requirements/_base.in b/services/director/requirements/_base.in index 0618d6c7759..4039c4ffb4a 100644 --- a/services/director/requirements/_base.in +++ b/services/director/requirements/_base.in @@ -1,70 +1,20 @@ # -# Specifies third-party dependencies for 'director' +# Specifies third-party dependencies for 'services/web/server/src' # +--constraint ../../../requirements/constraints.txt -# IMPORTANT: All requirements (including the packages in this repository) as FROZEN to those in itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0 -# - current service is going to be replaced by director-v2 -# -# - -# This list was obtained as follows -# -# $ docker pull itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0 -# master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0: Pulling from itisfoundation/director -# Digest: sha256:84ba999ca348bf9d56d9ef0af2e3494ede0cd06d357d289e2a09a4191e7a56d3 -# Status: Image is up to date for itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0 -# docker.io/itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0 -# -# $ docker inspect itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0| jq '.[0] | .RepoTags, .ContainerConfig.Labels' -# [ -# "itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0" -# ] -# { -# "io.osparc.api-version": "0.1.0", -# "maintainer": "sanderegg", -# "org.label-schema.build-date": "2020-11-05T14:02:31Z", -# "org.label-schema.schema-version": "1.0", -# "org.label-schema.vcs-ref": "c8669fb", -# "org.label-schema.vcs-url": "https://github.com/ITISFoundation/osparc-simcore.git" -# } -# -# $ docker run -it itisfoundation/director:master-2020-11-05--14-45.c8669fb52659b684514fefa4f3b4599f57f276a0 pip freeze -# +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in -aiodebug==1.1.2 -aiodocker==0.14.0 -aiohttp==3.3.2 -aiohttp-apiset @ git+https://github.com/ITISFoundation/aiohttp_apiset.git@5c8a61ceb6de7ed9e09db5b4609b458a0d3773df -aiopg==1.0.0 -aiozipkin==0.7.1 -async-generator==1.10 -async-timeout==3.0.1 -asyncio-extras==1.3.2 -attrs==20.2.0 -certifi==2019.3.9 -chardet==3.0.4 -dataclasses==0.7 -idna==2.8 -idna-ssl==1.1.0 -isodate==0.6.0 -jsonschema==2.6.0 -lazy-object-proxy==1.4.3 -multidict==4.5.2 -openapi-core==0.12.0 -openapi-spec-validator==0.2.9 -prometheus-client==0.8.0 -psycopg2-binary==2.8.6 -pydantic==1.7.2 -PyYAML==5.4 # CVE-2020-1747 -requests==2.27.1 # -simcore-service-library @ git+https://github.com/ITISFoundation/osparc-simcore.git@c8669fb52659b684514fefa4f3b4599f57f276a0#egg=simcore-service-library&subdirectory=packages/service-library -six==1.12.0 -SQLAlchemy==1.3.20 -strict-rfc3339==0.7 -tenacity==6.0.0 -trafaret==2.1.0 -ujson==4.0.1 -urllib3==1.26.5 # CVE-2021-33503 -Werkzeug==1.0.1 -yarl==1.3.0 +aiocache +aiodocker +httpx[http2] +prometheus-client +pydantic +tenacity diff --git a/services/director/requirements/_base.txt b/services/director/requirements/_base.txt index d8280bd4673..3ed85a0602e 100644 --- a/services/director/requirements/_base.txt +++ b/services/director/requirements/_base.txt @@ -1,160 +1,519 @@ -# -# This file is autogenerated by pip-compile with python 3.6 -# To update, run: -# -# pip-compile --output-file=requirements/_base.txt --strip-extras requirements/_base.in -# -aiodebug==1.1.2 +aio-pika==9.4.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in - # simcore-service-library -aiodocker==0.14.0 - # via -r requirements/_base.in -aiohttp==3.3.2 +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.23.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in - # aiodocker - # aiohttp-apiset - # aiozipkin - # simcore-service-library -aiohttp-apiset @ git+https://github.com/ITISFoundation/aiohttp_apiset.git@5c8a61ceb6de7ed9e09db5b4609b458a0d3773df - # via -r requirements/_base.in -aiopg==1.0.0 - # via - # -r requirements/_base.in - # simcore-service-library -aiozipkin==0.7.1 +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.18 # via - # -r requirements/_base.in - # simcore-service-library -async-generator==1.10 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.1 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.6.2.post1 # via - # -r requirements/_base.in - # asyncio-extras -async-timeout==3.0.1 + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 # via - # -r requirements/_base.in - # aiohttp -asyncio-extras==1.3.2 - # via -r requirements/_base.in -attrs==20.2.0 + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==24.2.0 # via - # -r requirements/_base.in # aiohttp - # openapi-core - # simcore-service-library -certifi==2019.3.9 + # jsonschema + # referencing +certifi==2024.8.30 # via - # -r requirements/_base.in + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx # requests -chardet==3.0.4 - # via - # -r requirements/_base.in - # aiohttp -charset-normalizer==2.0.12 +charset-normalizer==3.4.0 # via requests -dataclasses==0.7 +click==8.1.7 # via - # -r requirements/_base.in + # typer + # uvicorn +deprecated==1.2.14 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via + # fastapi # pydantic -idna==2.8 +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.5 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.31 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.66.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.67.1 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn +h2==4.1.0 + # via httpx +hpack==4.0.0 + # via h2 +httpcore==1.0.6 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.27.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in # -r requirements/_base.in - # idna-ssl + # fastapi +hyperframe==6.0.1 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx # requests # yarl -idna-ssl==1.1.0 +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2023.7.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 # via - # -r requirements/_base.in # aiohttp -isodate==0.6.0 + # yarl +opentelemetry-api==1.28.1 # via - # -r requirements/_base.in - # openapi-core -jsonschema==2.6.0 + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.28.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.28.1 # via - # -r requirements/_base.in - # aiohttp-apiset - # openapi-spec-validator - # simcore-service-library -lazy-object-proxy==1.4.3 + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.28.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.28.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.49b1 # via - # -r requirements/_base.in - # openapi-core - # simcore-service-library -multidict==4.5.2 + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.49b1 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-fastapi==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.49b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.28.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.28.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.49b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.49b1 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.11 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.0 # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in # -r requirements/_base.in +propcache==0.2.0 + # via # aiohttp # yarl -openapi-core==0.12.0 +protobuf==5.28.3 # via - # -r requirements/_base.in - # simcore-service-library -openapi-spec-validator==0.2.9 + # googleapis-common-protos + # opentelemetry-proto +psutil==6.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.2 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in - # openapi-core -prometheus-client==0.8.0 + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.10.0 # via - # -r requirements/_base.in - # simcore-service-library -psycopg2-binary==2.8.6 + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 # via - # -r requirements/_base.in - # aiopg - # simcore-service-library -pydantic==1.7.2 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.18.0 + # via rich +pyinstrument==5.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 # via - # -r requirements/_base.in - # simcore-service-library -pyyaml==5.4 + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 # via - # -r requirements/_base.in - # aiohttp-apiset - # openapi-spec-validator - # simcore-service-library -requests==2.27.1 - # via -r requirements/_base.in -simcore-service-library @ git+https://github.com/ITISFoundation/osparc-simcore.git@c8669fb52659b684514fefa4f3b4599f57f276a0#subdirectory=packages/service-library - # via -r requirements/_base.in -six==1.12.0 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 # via - # -r requirements/_base.in - # isodate - # openapi-core - # openapi-spec-validator - # tenacity -sqlalchemy==1.3.20 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.29.3 # via - # -r requirements/_base.in - # simcore-service-library -strict-rfc3339==0.7 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 # via - # -r requirements/_base.in - # openapi-core -tenacity==6.0.0 + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.21.0 # via - # -r requirements/_base.in - # simcore-service-library -trafaret==2.1.0 + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.16.0 + # via python-dateutil +sniffio==1.3.1 # via - # -r requirements/_base.in - # simcore-service-library -ujson==4.0.1 + # anyio + # httpx +starlette==0.41.3 # via - # -r requirements/_base.in - # simcore-service-library -urllib3==1.26.5 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.13.0 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241003 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # typer +urllib3==2.2.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # requests -werkzeug==1.0.1 +uvicorn==0.34.2 # via - # -r requirements/_base.in - # simcore-service-library -yarl==1.3.0 + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==0.24.0 + # via uvicorn +websockets==14.1 + # via uvicorn +wrapt==1.16.0 # via - # -r requirements/_base.in - # aiodocker + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.17.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/services/director/requirements/_test.in b/services/director/requirements/_test.in index ead790f296c..b5787990732 100644 --- a/services/director/requirements/_test.in +++ b/services/director/requirements/_test.in @@ -1,33 +1,26 @@ +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! # -# Specifies dependencies required to run 'director' -# - -# frozen specs ---requirement _base.txt +--constraint ../../../requirements/constraints.txt -# NOTE: -# FROZEN (see notes in _base.in) -# DO NOT CHANGE ANYTHING HERE. -# IT WON'T HAVE ANY EFFECT +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt # - -# FROZEN as well (DO NOT CHANGE anything in pytest-simcore, it will have no effect in the director package) -pytest-simcore @ git+https://github.com/ITISFoundation/osparc-simcore.git@79f866219bf650c5eeb4fcdf8f017319087c92c7#egg=pytest-simcore&subdirectory=packages/pytest-simcore +--constraint _base.txt # testing +asgi_lifespan aioresponses -codecov -coverage==4.5.1 # TODO: Downgraded because of a bug https://github.com/nedbat/coveragepy/issues/716 -coveralls docker -openapi-spec-validator~=0.2 # TODO: this library is limiting jsonschema<3 -ptvsd -pylint +faker +jsonref pytest -pytest-aiohttp # incompatible with pytest-asyncio. See https://github.com/pytest-dev/pytest-asyncio/issues/76 +pytest-asyncio +pytest-benchmark pytest-cov +pytest-docker pytest-instafail pytest-mock pytest-runner pytest-sugar -python-dotenv +respx diff --git a/services/director/requirements/_test.txt b/services/director/requirements/_test.txt index acef70ab464..14f1a23345c 100644 --- a/services/director/requirements/_test.txt +++ b/services/director/requirements/_test.txt @@ -1,256 +1,144 @@ -# -# This file is autogenerated by pip-compile with python 3.6 -# To update, run: -# -# pip-compile --output-file=requirements/_test.txt --strip-extras requirements/_test.in -# -aiodebug==1.1.2 +aiohappyeyeballs==2.4.3 # via - # -r requirements/_base.txt - # simcore-service-library -aiodocker==0.14.0 - # via -r requirements/_base.txt -aiohttp==3.3.2 + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 # via - # -r requirements/_base.txt - # aiodocker - # aiohttp-apiset + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt # aioresponses - # aiozipkin - # pytest-aiohttp - # simcore-service-library -aiohttp-apiset @ git+https://github.com/ITISFoundation/aiohttp_apiset.git@5c8a61ceb6de7ed9e09db5b4609b458a0d3773df - # via -r requirements/_base.txt -aiopg==1.0.0 - # via - # -r requirements/_base.txt - # simcore-service-library -aioresponses==0.7.2 +aioresponses==0.7.8 # via -r requirements/_test.in -aiozipkin==0.7.1 - # via - # -r requirements/_base.txt - # simcore-service-library -astroid==2.4.2 - # via pylint -async-generator==1.10 - # via - # -r requirements/_base.txt - # asyncio-extras -async-timeout==3.0.1 - # via - # -r requirements/_base.txt - # aiohttp -asyncio-extras==1.3.2 - # via -r requirements/_base.txt -attrs==20.2.0 +aiosignal==1.3.1 # via - # -r requirements/_base.txt + # -c requirements/_base.txt # aiohttp - # openapi-core - # pytest - # simcore-service-library -certifi==2019.3.9 +anyio==4.6.2.post1 # via - # -r requirements/_base.txt - # requests -chardet==3.0.4 + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==24.2.0 # via - # -r requirements/_base.txt + # -c requirements/_base.txt # aiohttp -charset-normalizer==2.0.12 + # pytest-docker +certifi==2024.8.30 # via - # -r requirements/_base.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx # requests -codecov==2.1.10 - # via -r requirements/_test.in -coverage==4.5.1 +charset-normalizer==3.4.0 # via - # -r requirements/_test.in - # codecov - # coveralls - # pytest-cov -coveralls==2.1.2 + # -c requirements/_base.txt + # requests +coverage==7.6.12 + # via pytest-cov +docker==7.1.0 # via -r requirements/_test.in -dataclasses==0.7 - # via - # -r requirements/_base.txt - # pydantic -docker==4.3.1 +faker==36.1.1 # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -idna==2.8 - # via - # -r requirements/_base.txt - # idna-ssl - # requests - # yarl -idna-ssl==1.1.0 +frozenlist==1.5.0 # via - # -r requirements/_base.txt + # -c requirements/_base.txt # aiohttp -importlib-metadata==2.0.0 + # aiosignal +h11==0.14.0 # via - # pluggy - # pytest -iniconfig==1.1.1 - # via pytest -isodate==0.6.0 + # -c requirements/_base.txt + # httpcore +httpcore==1.0.6 # via - # -r requirements/_base.txt - # openapi-core -isort==5.6.4 - # via pylint -jsonschema==2.6.0 + # -c requirements/_base.txt + # httpx +httpx==0.27.2 # via - # -r requirements/_base.txt - # aiohttp-apiset - # openapi-spec-validator - # simcore-service-library -lazy-object-proxy==1.4.3 + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # respx +idna==3.10 # via - # -r requirements/_base.txt - # astroid - # openapi-core - # simcore-service-library -mccabe==0.6.1 - # via pylint -multidict==4.5.2 + # -c requirements/_base.txt + # anyio + # httpx + # requests + # yarl +iniconfig==2.0.0 + # via pytest +jsonref==1.1.0 + # via -r requirements/_test.in +multidict==6.1.0 # via - # -r requirements/_base.txt + # -c requirements/_base.txt # aiohttp # yarl -openapi-core==0.12.0 - # via - # -r requirements/_base.txt - # simcore-service-library -openapi-spec-validator==0.2.9 - # via - # -r requirements/_base.txt - # -r requirements/_test.in - # openapi-core -packaging==20.4 +packaging==24.2 # via + # -c requirements/_base.txt + # aioresponses # pytest # pytest-sugar -pluggy==0.13.1 - # via pytest -prometheus-client==0.8.0 - # via - # -r requirements/_base.txt - # simcore-service-library -psycopg2-binary==2.8.6 - # via - # -r requirements/_base.txt - # aiopg - # simcore-service-library -ptvsd==4.3.2 - # via -r requirements/_test.in -py==1.9.0 +pluggy==1.5.0 # via pytest -pydantic==1.7.2 +propcache==0.2.0 # via - # -r requirements/_base.txt - # simcore-service-library -pylint==2.6.0 - # via -r requirements/_test.in -pyparsing==2.4.7 - # via packaging -pytest==6.1.2 + # -c requirements/_base.txt + # aiohttp + # yarl +py-cpuinfo==9.0.0 + # via pytest-benchmark +pytest==8.3.5 # via # -r requirements/_test.in - # pytest-aiohttp + # pytest-asyncio + # pytest-benchmark # pytest-cov + # pytest-docker # pytest-instafail # pytest-mock - # pytest-simcore # pytest-sugar -pytest-aiohttp==0.3.0 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==2.10.1 +pytest-benchmark==5.1.0 # via -r requirements/_test.in -pytest-instafail==0.4.2 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-mock==3.3.1 +pytest-docker==3.2.0 # via -r requirements/_test.in -pytest-runner==5.2 +pytest-instafail==0.5.0 # via -r requirements/_test.in -pytest-simcore @ git+https://github.com/ITISFoundation/osparc-simcore.git@79f866219bf650c5eeb4fcdf8f017319087c92c7#subdirectory=packages/pytest-simcore +pytest-mock==3.14.0 # via -r requirements/_test.in -pytest-sugar==0.9.4 +pytest-runner==6.0.1 # via -r requirements/_test.in -python-dotenv==0.15.0 +pytest-sugar==1.0.0 # via -r requirements/_test.in -pyyaml==5.4 - # via - # -r requirements/_base.txt - # aiohttp-apiset - # openapi-spec-validator - # simcore-service-library -requests==2.27.1 +requests==2.32.3 # via - # -r requirements/_base.txt - # codecov - # coveralls + # -c requirements/_base.txt # docker -simcore-service-library @ git+https://github.com/ITISFoundation/osparc-simcore.git@c8669fb52659b684514fefa4f3b4599f57f276a0#subdirectory=packages/service-library - # via -r requirements/_base.txt -six==1.12.0 - # via - # -r requirements/_base.txt - # astroid - # docker - # isodate - # openapi-core - # openapi-spec-validator - # packaging - # tenacity - # websocket-client -sqlalchemy==1.3.20 - # via - # -r requirements/_base.txt - # simcore-service-library -strict-rfc3339==0.7 - # via - # -r requirements/_base.txt - # openapi-core -tenacity==6.0.0 +respx==0.22.0 + # via -r requirements/_test.in +sniffio==1.3.1 # via - # -r requirements/_base.txt - # simcore-service-library -termcolor==1.1.0 + # -c requirements/_base.txt + # anyio + # asgi-lifespan + # httpx +termcolor==2.5.0 # via pytest-sugar -toml==0.10.2 +tzdata==2025.1 + # via faker +urllib3==2.2.3 # via - # pylint - # pytest -trafaret==2.1.0 - # via - # -r requirements/_base.txt - # simcore-service-library -typed-ast==1.4.1 - # via astroid -ujson==4.0.1 - # via - # -r requirements/_base.txt - # simcore-service-library -urllib3==1.26.5 - # via - # -r requirements/_base.txt + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # docker # requests -websocket-client==0.57.0 - # via docker -werkzeug==1.0.1 - # via - # -r requirements/_base.txt - # simcore-service-library -wrapt==1.12.1 - # via astroid -yarl==1.3.0 +yarl==1.17.1 # via - # -r requirements/_base.txt - # aiodocker + # -c requirements/_base.txt # aiohttp -zipp==3.4.0 - # via importlib-metadata diff --git a/services/director/requirements/_tools.in b/services/director/requirements/_tools.in index 05f1ab1646f..52a9a39d162 100644 --- a/services/director/requirements/_tools.in +++ b/services/director/requirements/_tools.in @@ -1,7 +1,7 @@ +--constraint ../../../requirements/constraints.txt --constraint _base.txt --constraint _test.txt +--requirement ../../../requirements/devenv.txt + watchdog[watchmedo] -black~=20.8b0 -pip-tools -bump2version diff --git a/services/director/requirements/_tools.txt b/services/director/requirements/_tools.txt index 38ed7220aed..9bc3fb1323c 100644 --- a/services/director/requirements/_tools.txt +++ b/services/director/requirements/_tools.txt @@ -1,66 +1,84 @@ -# -# This file is autogenerated by pip-compile with python 3.6 -# To update, run: -# -# pip-compile --output-file=requirements/_tools.txt --strip-extras requirements/_tools.in -# -appdirs==1.4.4 - # via black -black==20.8b1 - # via -r requirements/_tools.in +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools bump2version==1.0.1 - # via -r requirements/_tools.in -click==8.0.3 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.7 # via + # -c requirements/_base.txt # black # pip-tools -dataclasses==0.7 +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 # via - # -c requirements/_base.txt - # -c requirements/_test.txt # black -importlib-metadata==2.0.0 + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt - # click - # pep517 -mypy-extensions==0.4.3 - # via black -pathspec==0.9.0 + # black + # build +pathspec==0.12.1 # via black -pep517==0.12.0 +pip==25.0.1 # via pip-tools -pip-tools==6.4.0 - # via -r requirements/_tools.in -pyyaml==5.4 +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt - # -c requirements/_test.txt + # pre-commit # watchdog -regex==2022.1.18 - # via black -toml==0.10.2 - # via - # -c requirements/_test.txt - # black -tomli==1.2.3 - # via pep517 -typed-ast==1.4.1 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 # via - # -c requirements/_test.txt - # black -typing-extensions==4.0.1 - # via black -watchdog==2.1.6 + # -c requirements/_base.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.37.1 +wheel==0.45.1 # via pip-tools -zipp==3.4.0 - # via - # -c requirements/_test.txt - # importlib-metadata - # pep517 - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/director/requirements/ci.txt b/services/director/requirements/ci.txt index 8edcd5f2bfe..dd8038caf3b 100644 --- a/services/director/requirements/ci.txt +++ b/services/director/requirements/ci.txt @@ -7,7 +7,16 @@ # # installs base + tests requirements +--requirement _base.txt --requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-director @ . diff --git a/services/director/requirements/dev.txt b/services/director/requirements/dev.txt index dac3f0a494b..80538c22580 100644 --- a/services/director/requirements/dev.txt +++ b/services/director/requirements/dev.txt @@ -12,5 +12,13 @@ --requirement _test.txt --requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/common-library/ +--editable ../../packages/models-library +--editable ../../packages/pytest-simcore/ +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library/ + # installs current package --editable . diff --git a/services/director/requirements/prod.txt b/services/director/requirements/prod.txt index dc0ec561efe..147a5b7ccba 100644 --- a/services/director/requirements/prod.txt +++ b/services/director/requirements/prod.txt @@ -9,5 +9,11 @@ # installs base requirements --requirement _base.txt +# installs this repo's packages +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ + # installs current package -. +simcore-service-director @ . diff --git a/services/director/setup.cfg b/services/director/setup.cfg index 8e7e8ea592f..7576ec6b459 100644 --- a/services/director/setup.cfg +++ b/services/director/setup.cfg @@ -1,14 +1,20 @@ [bumpversion] -current_version = 0.1.0 +current_version = 1.0.0 commit = True message = director api version: {current_version} β†’ {new_version} tag = False commit_args = --no-verify -[bumpversion:file:setup.py] -search = "{current_version}" -replace = "{new_version}" - [bumpversion:file:VERSION] -[bumpversion:file:../../api/specs/director/openapi.yaml] -[bumpversion:file:./src/simcore_service_director/api/v0/openapi.yaml] + + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" + + +[mypy] +plugins = + pydantic.mypy diff --git a/services/director/setup.py b/services/director/setup.py index b9fefde11b3..ce02ba7a1f2 100644 --- a/services/director/setup.py +++ b/services/director/setup.py @@ -4,62 +4,65 @@ from setuptools import find_packages, setup -here = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent -if not (sys.version_info.major == 3 and sys.version_info.minor == 6): - raise RuntimeError( - "Requires <=3.6, got %s. Did you forget to activate virtualenv?" - % sys.version_info - ) +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } -def read_reqs(reqs_path: Path): - reqs = re.findall( - r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", reqs_path.read_text(), re.MULTILINE - ) - # TODO: temporary excluding requirements using git - # https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support - return [r for r in reqs if not r.startswith("git")] +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent -install_requirements = read_reqs(here / "requirements" / "_base.txt") + [ - "aiohttp-apiset", - "simcore-service-library", -] +NAME = "simcore-service-director" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Sylvain Anderegg (sanderegg)",) +DESCRIPTION = "oSparc Director webserver service" +README = (CURRENT_DIR / "README.md").read_text() -test_requirements = read_reqs(here / "requirements" / "_test.txt") +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-models-library", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -_CONFIG = dict( - name="simcore-service-director", - version="0.1.0", - description="oSparc Director webserver service", - author="Sylvain Anderegg (sanderegg)", - python_requires="~=3.6", - packages=find_packages(where="src"), - package_dir={ + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=install_requirements, - tests_require=test_requirements, - setup_requires=["pytest-runner"], - package_data={ - "": ["api/v0/openapi.yaml", "api/v0/schemas/*.json"], - }, - entry_points={ + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ - "simcore-service-director = simcore_service_director.__main__:main", + "simcore-service-director = simcore_service_director.cli:main", + "simcore-service = simcore_service_director.cli:main", ], }, -) - - -def main(): - """Execute the setup commands.""" - setup(**_CONFIG) - return 0 # syccessful termination - +} if __name__ == "__main__": - raise SystemExit(main()) + setup(**SETUP) diff --git a/services/director/src/simcore_service_director/__main__.py b/services/director/src/simcore_service_director/__main__.py deleted file mode 100644 index 73227b1c129..00000000000 --- a/services/director/src/simcore_service_director/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python3 - -from .main import main - -main() diff --git a/services/director/src/simcore_service_director/_meta.py b/services/director/src/simcore_service_director/_meta.py new file mode 100644 index 00000000000..5bf4218d678 --- /dev/null +++ b/services/director/src/simcore_service_director/_meta.py @@ -0,0 +1,43 @@ +""" Application's metadata + +""" + +from typing import Final + +from models_library.basic_types import VersionStr, VersionTag +from packaging.version import Version +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore-service-director") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +APP_NAME: Final[str] = PROJECT_NAME +API_VTAG: Final[VersionTag] = VersionTag(info.api_prefix_path_tag) +SUMMARY: Final[str] = info.get_summary() + + +# NOTE: https://patorjk.com/software/taag/#p=display&f=Electronic&t=Director-v0 +APP_STARTED_BANNER_MSG = r""" + + β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„ β–„ β–„ β–„β–„β–„β–„β–„β–„β–„β–„β–„ +β–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œ +β–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–ˆβ–‘β–Œβ–€β–€β–€β–€β–ˆβ–‘β–ˆβ–€β–€β–€β–€ β–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–ˆβ–‘β–Œβ–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–€β–€ β–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€β–€β–€β–€β–ˆβ–‘β–ˆβ–€β–€β–€β–€ β–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–ˆβ–‘β–Œβ–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–ˆβ–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–ˆβ–‘β–ˆβ–€β–€β–€β–€β–€β–ˆβ–‘β–Œ +β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ +β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–ˆβ–‘β–Œβ–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–„β–„ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–ˆβ–‘β–Œ β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–„β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ +β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ +β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–ˆβ–€β–€β–€β–€β–ˆβ–‘β–ˆβ–€β–€ β–β–‘β–ˆβ–€β–€β–€β–€β–€β–€β–€β–€β–€ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–ˆβ–€β–€β–€β–€β–ˆβ–‘β–ˆβ–€β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ +β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œβ–β–‘β–Œ +β–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–ˆβ–‘β–Œβ–„β–„β–„β–„β–ˆβ–‘β–ˆβ–„β–„β–„β–„ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–„β–„ β–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–„β–„ β–β–‘β–Œ β–β–‘β–ˆβ–„β–„β–„β–„β–„β–„β–„β–ˆβ–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–β–‘β–Œ β–β–‘β–ˆβ–„β–„β–„β–„β–„β–ˆβ–‘β–ˆβ–‘β–Œ +β–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œ β–β–‘β–Œ β–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œβ–β–‘β–Œ β–β–‘β–Œ β–β–‘β–Œ β–β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–Œ + β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€ β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€β–€ β–€ β–€ β–€ β–€β–€β–€β–€β–€β–€β–€β–€β–€ + {} +""".format( + f"v{__version__}" +) + + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/director/src/simcore_service_director/api/Makefile b/services/director/src/simcore_service_director/api/Makefile index f3e3c172ddc..b9eb75b95b0 100644 --- a/services/director/src/simcore_service_director/api/Makefile +++ b/services/director/src/simcore_service_director/api/Makefile @@ -32,9 +32,3 @@ ${OAS_TARGET}: ${OAS_SOURCES} .update-schemas --outfile $@ \ --type yaml \ "${API_SPECS_DIR}/${APP_NAME}/openapi.yaml" - - -.PHONY: openapi-specs -openapi-specs: ${OAS_TARGET} ## creates and validates OpenAPI specs - # Validating bundled '${OAS_TARGET}' - @swagger-cli validate $< diff --git a/services/director/src/simcore_service_director/api/__init__.py b/services/director/src/simcore_service_director/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director/src/simcore_service_director/api/rest/__init__.py b/services/director/src/simcore_service_director/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director/src/simcore_service_director/api/rest/_health.py b/services/director/src/simcore_service_director/api/rest/_health.py new file mode 100644 index 00000000000..19a00014b33 --- /dev/null +++ b/services/director/src/simcore_service_director/api/rest/_health.py @@ -0,0 +1,16 @@ +import arrow +from fastapi import APIRouter +from fastapi.responses import PlainTextResponse + +router = APIRouter() + + +@router.api_route( + "/", + methods=["GET", "HEAD"], + include_in_schema=False, + response_class=PlainTextResponse, +) +async def health_check() -> str: + # NOTE: sync url in docker/healthcheck.py with this entrypoint! + return f"{__name__}.health_check@{arrow.utcnow().isoformat()}" diff --git a/services/director/src/simcore_service_director/api/rest/_running_interactive_services.py b/services/director/src/simcore_service_director/api/rest/_running_interactive_services.py new file mode 100644 index 00000000000..61457413688 --- /dev/null +++ b/services/director/src/simcore_service_director/api/rest/_running_interactive_services.py @@ -0,0 +1,134 @@ +import logging +from pathlib import Path +from typing import Annotated, Any +from uuid import UUID + +from fastapi import APIRouter, Depends, FastAPI, Header, HTTPException, status +from models_library.generics import Envelope +from models_library.projects import ProjectID +from models_library.services_types import ServiceKey, ServiceVersion +from models_library.users import UserID +from servicelib.fastapi.dependencies import get_app + +from ... import producer +from ...core.errors import ( + RegistryConnectionError, + ServiceNotAvailableError, + ServiceUUIDInUseError, + ServiceUUIDNotFoundError, +) + +router = APIRouter() + +_logger = logging.getLogger(__name__) + + +@router.get("/running_interactive_services") +async def list_running_services( + the_app: Annotated[FastAPI, Depends(get_app)], + user_id: UserID | None = None, + project_id: ProjectID | None = None, +) -> Envelope[list[dict[str, Any]]]: + _logger.debug( + "Client does list_running_services request user_id %s, project_id %s", + user_id, + project_id, + ) + services = await producer.get_services_details( + the_app, + f"{user_id}" if user_id else None, + f"{project_id}" if project_id else None, + ) + return Envelope[list[dict[str, Any]]](data=services) + + +@router.post( + "/running_interactive_services", + status_code=status.HTTP_201_CREATED, +) +async def start_service( + the_app: Annotated[FastAPI, Depends(get_app)], + user_id: UserID, + project_id: ProjectID, + service_key: ServiceKey, + service_uuid: UUID, + service_basepath: Path = Path(), + service_tag: ServiceVersion | None = None, + x_simcore_user_agent: str = Header(...), +) -> Envelope[dict[str, Any]]: + _logger.debug( + "Client does start_service with user_id %s, project_id %s, service %s:%s, service_uuid %s, service_basepath %s, request_simcore_user_agent %s", + user_id, + project_id, + service_key, + service_tag, + service_uuid, + service_basepath, + x_simcore_user_agent, + ) + try: + service = await producer.start_service( + the_app, + f"{user_id}", + f"{project_id}", + service_key, + service_tag, + f"{service_uuid}", + f"{service_basepath}", + x_simcore_user_agent, + ) + return Envelope[dict[str, Any]](data=service) + except ServiceNotAvailableError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + except ServiceUUIDInUseError as err: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, detail=f"{err}" + ) from err + except RegistryConnectionError as err: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{err}" + ) from err + + +@router.get("/running_interactive_services/{service_uuid}") +async def get_running_service( + the_app: Annotated[FastAPI, Depends(get_app)], + service_uuid: UUID, +) -> Envelope[dict[str, Any]]: + _logger.debug( + "Client does get_running_service with service_uuid %s", + service_uuid, + ) + try: + service = await producer.get_service_details(the_app, f"{service_uuid}") + return Envelope[dict[str, Any]](data=service) + except ServiceUUIDNotFoundError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + + +@router.delete( + "/running_interactive_services/{service_uuid}", + status_code=status.HTTP_204_NO_CONTENT, +) +async def stop_service( + the_app: Annotated[FastAPI, Depends(get_app)], + service_uuid: UUID, + save_state: bool = True, +) -> None: + _logger.debug( + "Client does stop_service with service_uuid %s", + service_uuid, + ) + try: + await producer.stop_service( + the_app, node_uuid=f"{service_uuid}", save_state=save_state + ) + + except ServiceUUIDNotFoundError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err diff --git a/services/director/src/simcore_service_director/api/rest/_services.py b/services/director/src/simcore_service_director/api/rest/_services.py new file mode 100644 index 00000000000..a82699331ce --- /dev/null +++ b/services/director/src/simcore_service_director/api/rest/_services.py @@ -0,0 +1,124 @@ +import logging +from typing import Annotated, Any + +from fastapi import APIRouter, Depends, FastAPI, HTTPException, status +from models_library.generics import Envelope +from models_library.services_enums import ServiceType +from models_library.services_types import ServiceKey, ServiceVersion +from pydantic import BaseModel +from servicelib.fastapi.dependencies import get_app + +from ... import registry_proxy +from ...core.errors import RegistryConnectionError, ServiceNotAvailableError + +router = APIRouter() + +_logger = logging.getLogger(__name__) + + +class _ErrorMessage(BaseModel): + message: str + + +@router.get( + "/services", + response_model=Envelope[list[dict[str, Any]]], + responses={ + status.HTTP_401_UNAUTHORIZED: { + "model": _ErrorMessage, + "description": "Could not connect with Docker Registry", + }, + status.HTTP_500_INTERNAL_SERVER_ERROR: { + "model": _ErrorMessage, + "description": "Unexpected error", + }, + }, +) +async def list_services( + the_app: Annotated[FastAPI, Depends(get_app)], + service_type: ServiceType | None = None, +) -> Envelope[list[dict[str, Any]]]: + _logger.debug( + "Client does list_services request with service_type %s", + service_type, + ) + try: + services: list[dict[str, Any]] = [] + if not service_type: + services = await registry_proxy.list_services( + the_app, registry_proxy.ServiceType.ALL + ) + elif service_type is ServiceType.COMPUTATIONAL: + services = await registry_proxy.list_services( + the_app, registry_proxy.ServiceType.COMPUTATIONAL + ) + elif service_type is ServiceType.DYNAMIC: + services = await registry_proxy.list_services( + the_app, registry_proxy.ServiceType.DYNAMIC + ) + # NOTE: the validation is done in the catalog. This entrypoint IS and MUST BE only used by the catalog!! + # NOTE2: the catalog will directly talk to the registry see case #2165 [https://github.com/ITISFoundation/osparc-simcore/issues/2165] + # services = node_validator.validate_nodes(services) + return Envelope[list[dict[str, Any]]](data=services) + except RegistryConnectionError as err: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{err}" + ) from err + + +# NOTE: be careful that /labels must be defined before the more generic get_service +@router.get("/services/{service_key:path}/{service_version}/labels") +async def list_service_labels( + the_app: Annotated[FastAPI, Depends(get_app)], + service_key: ServiceKey, + service_version: ServiceVersion, +) -> Envelope[dict[str, Any]]: + # NOTE: avoid using this directly via `director` service, call it via `catalog` service + _logger.debug( + "Retrieving service labels with service_key %s, service_version %s", + service_key, + service_version, + ) + try: + service_labels, _ = await registry_proxy.get_image_labels( + the_app, service_key, service_version + ) + return Envelope[dict[str, Any]](data=service_labels) + + except ServiceNotAvailableError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + + except RegistryConnectionError as err: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{err}" + ) from err + + +@router.get("/services/{service_key:path}/{service_version}") +async def get_service( + the_app: Annotated[FastAPI, Depends(get_app)], + service_key: ServiceKey, + service_version: ServiceVersion, +) -> Envelope[list[dict[str, Any]]]: + _logger.debug( + "Client does get_service with service_key %s, service_version %s", + service_key, + service_version, + ) + try: + services = [ + await registry_proxy.get_image_details( + the_app, service_key, service_version + ) + ] + return Envelope[list[dict[str, Any]]](data=services) + except ServiceNotAvailableError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + except RegistryConnectionError as err: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{err}" + ) from err diff --git a/services/director/src/simcore_service_director/api/rest/routes.py b/services/director/src/simcore_service_director/api/rest/routes.py new file mode 100644 index 00000000000..907b3d2bc74 --- /dev/null +++ b/services/director/src/simcore_service_director/api/rest/routes.py @@ -0,0 +1,29 @@ +from typing import Final + +from fastapi import APIRouter, FastAPI, HTTPException +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +from . import _health, _running_interactive_services, _services + +_V0_VTAG: Final[str] = "v0" + + +def setup_api_routes(app: FastAPI): + """ + Composes resources/sub-resources routers + """ + + app.include_router(_health.router, tags=["operations"]) + app.include_router(_health.router, tags=["operations"], prefix=f"/{_V0_VTAG}") + + # include the rest under /vX + api_router = APIRouter(prefix=f"/{_V0_VTAG}") + api_router.include_router(_services.router, tags=["services"]) + api_router.include_router(_running_interactive_services.router, tags=["services"]) + app.include_router(api_router) + + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) diff --git a/services/director/src/simcore_service_director/api/v0/openapi.yaml b/services/director/src/simcore_service_director/api/v0/openapi.yaml index 3e9f05493f2..daf98532f4d 100644 --- a/services/director/src/simcore_service_director/api/v0/openapi.yaml +++ b/services/director/src/simcore_service_director/api/v0/openapi.yaml @@ -466,6 +466,11 @@ paths: - default - items additionalProperties: true + image_digest: + title: Image Manifest digest + description: Provides a unique footprint (hash) of the image manifest + type: string + example: sha256:b7c8f6a401cb12d7fe36970b6927e03cb429b395fc9f2b0104291e12b81a5100 error: nullable: true default: null @@ -905,6 +910,11 @@ paths: - default - items additionalProperties: true + image_digest: + title: Image Manifest digest + description: Provides a unique footprint (hash) of the image manifest + type: string + example: sha256:b7c8f6a401cb12d7fe36970b6927e03cb429b395fc9f2b0104291e12b81a5100 error: nullable: true default: null @@ -2758,6 +2768,12 @@ components: - default - items additionalProperties: true + image_digest: + title: Image Manifest digest + description: Provides a unique footprint (hash) of the image manifest + type: string + example: sha256:b7c8f6a401cb12d7fe36970b6927e03cb429b395fc9f2b0104291e12b81a5100 + error: nullable: true default: null diff --git a/services/director/src/simcore_service_director/api/v0/schemas/node-meta-v0.0.1.json b/services/director/src/simcore_service_director/api/v0/schemas/node-meta-v0.0.1.json index a47f6c2ad3b..388939d5716 100644 --- a/services/director/src/simcore_service_director/api/v0/schemas/node-meta-v0.0.1.json +++ b/services/director/src/simcore_service_director/api/v0/schemas/node-meta-v0.0.1.json @@ -42,6 +42,10 @@ "0.0.1" ] }, + "version_display": { + "type": "string", + "description": "human readable version" + }, "type": { "type": "string", "description": "service type", @@ -472,6 +476,13 @@ ] } } + }, + "image_digest": { + "type": "string", + "description": "Image manifest digest. Provides a 'footprint' for the service image", + "examples": [ + "sha256:b7c8f6a401cb12d7fe36970b6927e03cb429b395fc9f2b0104291e12b81a5100" + ] } } } diff --git a/services/director/src/simcore_service_director/api/v0/schemas/project-v0.0.1.json b/services/director/src/simcore_service_director/api/v0/schemas/project-v0.0.1.json index 8c178845ccb..9b5a5552502 100644 --- a/services/director/src/simcore_service_director/api/v0/schemas/project-v0.0.1.json +++ b/services/director/src/simcore_service_director/api/v0/schemas/project-v0.0.1.json @@ -763,6 +763,33 @@ "type": "object", "title": "Quality", "description": "Object containing Quality Assessment related data" + }, + "workspaceId": { + "type": ["integer", "null"] + }, + "type": { + "type": "string", + "description": "project type", + "enum": [ + "STANDARD", + "TEMPLATE" + ] + }, + "templateType": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "project template type", + "enum": [ + "TEMPLATE", + "HYPERTOOL", + "TUTORIAL" + ] + } + ] } } } diff --git a/services/director/src/simcore_service_director/cache_request_decorator.py b/services/director/src/simcore_service_director/cache_request_decorator.py deleted file mode 100644 index 431a7216e90..00000000000 --- a/services/director/src/simcore_service_director/cache_request_decorator.py +++ /dev/null @@ -1,31 +0,0 @@ -from functools import wraps -from typing import Coroutine, Dict, Tuple - -from aiohttp import web -from simcore_service_director import config - - -def cache_requests(func: Coroutine, no_cache: bool = False): - @wraps(func) - async def wrapped( - app: web.Application, url: str, method: str, *args, **kwargs - ) -> Tuple[Dict, Dict]: - is_cache_enabled = config.DIRECTOR_REGISTRY_CACHING and method == "GET" - cache_key = f"{url}:{method}" - if is_cache_enabled and not no_cache: - cache_data = app[config.APP_REGISTRY_CACHE_DATA_KEY] - if cache_key in cache_data: - return cache_data[cache_key] - - resp_data, resp_headers = await func(app, url, method, *args, **kwargs) - - if is_cache_enabled and not no_cache: - cache_data = app[config.APP_REGISTRY_CACHE_DATA_KEY] - cache_data[cache_key] = (resp_data, resp_headers) - - return (resp_data, resp_headers) - - return wrapped - - -__all__ = ["cache_requests"] diff --git a/services/director/src/simcore_service_director/cli.py b/services/director/src/simcore_service_director/cli.py new file mode 100644 index 00000000000..f2e16f6b97e --- /dev/null +++ b/services/director/src/simcore_service_director/cli.py @@ -0,0 +1,26 @@ +import logging + +import typer +from settings_library.utils_cli import create_settings_command, create_version_callback + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + +main = typer.Typer(name=PROJECT_NAME) + +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def run(): + """Runs application""" + typer.secho("Sorry, this entrypoint is intentionally disabled. Use instead") + typer.secho( + "$ uvicorn simcore_service_director.main:the_app", + fg=typer.colors.BLUE, + ) diff --git a/services/director/src/simcore_service_director/config.py b/services/director/src/simcore_service_director/config.py deleted file mode 100644 index 187c963a853..00000000000 --- a/services/director/src/simcore_service_director/config.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Director service configuration -""" - -import logging -import os -from distutils.util import strtobool -from typing import Dict, Optional - -from servicelib.client_session import ( # pylint: disable=no-name-in-module - APP_CLIENT_SESSION_KEY, -) - -LOGLEVEL_STR = os.environ.get("LOGLEVEL", "WARNING").upper() -log_level = getattr(logging, LOGLEVEL_STR) -logging.basicConfig( - level=log_level, - format="%(levelname)s:%(name)s-%(lineno)d: %(message)s", -) -logging.root.setLevel(log_level) - -# TODO: debug mode is define by the LOG-LEVEL and not the other way around. I leave it like that for the moment ... -DEBUG_MODE = log_level == logging.DEBUG - -API_VERSION: str = "v0" -API_ROOT: str = "api" - - -def _from_env_with_default(env: str, python_type, default): - env_value = python_type(os.environ.get(env, default)) - - return default if env_value <= 0 else env_value - - -# NOTE: these settings must be in sync with settings-library: comp_services.py (since the director is frozen) -DEFAULT_MAX_NANO_CPUS: int = _from_env_with_default( - "DEFAULT_MAX_NANO_CPUS", int, 1 * pow(10, 9) -) -DEFAULT_MAX_MEMORY: int = _from_env_with_default( - "DEFAULT_MAX_MEMORY", int, 2 * pow(1024, 3) -) # 2 GiB - -SERVICE_RUNTIME_SETTINGS: str = "simcore.service.settings" -SERVICE_REVERSE_PROXY_SETTINGS: str = "simcore.service.reverse-proxy-settings" -SERVICE_RUNTIME_BOOTSETTINGS: str = "simcore.service.bootsettings" - -ORG_LABELS_TO_SCHEMA_LABELS = { - "org.label-schema.build-date": "build_date", - "org.label-schema.vcs-ref": "vcs_ref", - "org.label-schema.vcs-url": "vcs_url", -} - -DIRECTOR_REGISTRY_CACHING: bool = strtobool( - os.environ.get("DIRECTOR_REGISTRY_CACHING", "True") -) -DIRECTOR_REGISTRY_CACHING_TTL: int = int( - os.environ.get("DIRECTOR_REGISTRY_CACHING_TTL", 15 * 60) -) - -DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS: str = os.environ.get( - "DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS", "" -) - -# for passing self-signed certificate to spawned services -DIRECTOR_SELF_SIGNED_SSL_SECRET_ID: str = os.environ.get( - "DIRECTOR_SELF_SIGNED_SSL_SECRET_ID", "" -) -DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME: str = os.environ.get( - "DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME", "" -) -DIRECTOR_SELF_SIGNED_SSL_FILENAME: str = os.environ.get( - "DIRECTOR_SELF_SIGNED_SSL_FILENAME", "" -) - -DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS: int = int( - os.environ.get("DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS", 10) -) -DIRECTOR_SERVICES_RESTART_POLICY_DELAY_S: int = int( - os.environ.get("DIRECTOR_SERVICES_RESTART_POLICY_DELAY_S", 12) -) - -DIRECTOR_SERVICES_STATE_MONITOR_S: int = int( - os.environ.get("DIRECTOR_SERVICES_STATE_MONITOR_S", 8) -) - -TRAEFIK_SIMCORE_ZONE: str = os.environ.get( - "TRAEFIK_SIMCORE_ZONE", "internal_simcore_stack" -) -APP_REGISTRY_CACHE_DATA_KEY: str = __name__ + "_registry_cache_data" - -REGISTRY_AUTH: bool = strtobool(os.environ.get("REGISTRY_AUTH", "False")) -REGISTRY_USER: str = os.environ.get("REGISTRY_USER", "") -REGISTRY_PW: str = os.environ.get("REGISTRY_PW", "") -REGISTRY_URL: str = os.environ.get("REGISTRY_URL", "") -REGISTRY_PATH: str = os.environ.get("REGISTRY_PATH", None) or os.environ.get( - "REGISTRY_URL", "" -) # This is useful in case of a local registry, where the registry url (path) is relative to the host docker engine -REGISTRY_SSL: bool = strtobool(os.environ.get("REGISTRY_SSL", "True")) - -EXTRA_HOSTS_SUFFIX: str = os.environ.get("EXTRA_HOSTS_SUFFIX", "undefined") - -# these are the envs passed to the dynamic services by default -SERVICES_DEFAULT_ENVS: Dict[str, str] = { - "POSTGRES_ENDPOINT": os.environ.get( - "POSTGRES_ENDPOINT", "undefined postgres endpoint" - ), - "POSTGRES_USER": os.environ.get("POSTGRES_USER", "undefined postgres user"), - "POSTGRES_PASSWORD": os.environ.get( - "POSTGRES_PASSWORD", "undefined postgres password" - ), - "POSTGRES_DB": os.environ.get("POSTGRES_DB", "undefined postgres db"), - "STORAGE_ENDPOINT": os.environ.get( - "STORAGE_ENDPOINT", "undefined storage endpoint" - ), -} - -# some services need to know the published host to be functional (paraview) -# TODO: please review if needed -PUBLISHED_HOST_NAME: str = os.environ.get("PUBLISHED_HOST_NAME", "") - -SWARM_STACK_NAME: str = os.environ.get("SWARM_STACK_NAME", "undefined-please-check") - -# used when in devel mode vs release mode -NODE_SCHEMA_LOCATION: str = os.environ.get( - "NODE_SCHEMA_LOCATION", f"{API_ROOT}/{API_VERSION}/schemas/node-meta-v0.0.1.json" -) -# used to find the right network name -SIMCORE_SERVICES_NETWORK_NAME: Optional[str] = os.environ.get( - "SIMCORE_SERVICES_NETWORK_NAME" -) -# useful when developing with an alternative registry namespace -SIMCORE_SERVICES_PREFIX: str = os.environ.get( - "SIMCORE_SERVICES_PREFIX", "simcore/services" -) - -# monitoring -# NOTE: keep disabled for unit-testing otherwise mocks will not hold -MONITORING_ENABLED: bool = strtobool(os.environ.get("MONITORING_ENABLED", "False")) - -# tracing -TRACING_ENABLED: bool = strtobool(os.environ.get("TRACING_ENABLED", "True")) -TRACING_ZIPKIN_ENDPOINT: str = os.environ.get( - "TRACING_ZIPKIN_ENDPOINT", "http://jaeger:9411" -) - -# resources: not taken from servicelib.resources since the director uses a fixed hash of that library -CPU_RESOURCE_LIMIT_KEY = "SIMCORE_NANO_CPUS_LIMIT" -MEM_RESOURCE_LIMIT_KEY = "SIMCORE_MEMORY_BYTES_LIMIT" - -__all__ = ["APP_CLIENT_SESSION_KEY"] diff --git a/services/director/src/simcore_service_director/constants.py b/services/director/src/simcore_service_director/constants.py new file mode 100644 index 00000000000..3c94c501d61 --- /dev/null +++ b/services/director/src/simcore_service_director/constants.py @@ -0,0 +1,17 @@ +from typing import Final + +SERVICE_RUNTIME_SETTINGS: Final[str] = "simcore.service.settings" +SERVICE_REVERSE_PROXY_SETTINGS: Final[str] = "simcore.service.reverse-proxy-settings" +SERVICE_RUNTIME_BOOTSETTINGS: Final[str] = "simcore.service.bootsettings" + + +CPU_RESOURCE_LIMIT_KEY: Final[str] = "SIMCORE_NANO_CPUS_LIMIT" +MEM_RESOURCE_LIMIT_KEY: Final[str] = "SIMCORE_MEMORY_BYTES_LIMIT" + +APP_REGISTRY_CACHE_DATA_KEY: Final[str] = __name__ + "_registry_cache_data" + +API_ROOT: Final[str] = "api" + +DIRECTOR_SIMCORE_SERVICES_PREFIX: Final[str] = "simcore/services" + +DATETIME_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%S.%f" diff --git a/services/director/src/simcore_service_director/core/__init__.py b/services/director/src/simcore_service_director/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director/src/simcore_service_director/core/application.py b/services/director/src/simcore_service_director/core/application.py new file mode 100644 index 00000000000..0baa557506f --- /dev/null +++ b/services/director/src/simcore_service_director/core/application.py @@ -0,0 +1,84 @@ +import logging +from typing import Final + +from fastapi import FastAPI +from servicelib.async_utils import cancel_sequential_workers +from servicelib.fastapi.client_session import setup_client_session +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) + +from .._meta import ( + API_VERSION, + API_VTAG, + APP_FINISHED_BANNER_MSG, + APP_NAME, + APP_STARTED_BANNER_MSG, +) +from ..api.rest.routes import setup_api_routes +from ..instrumentation import setup as setup_instrumentation +from ..registry_proxy import setup as setup_registry +from .settings import ApplicationSettings + +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS: Final[tuple[str]] = ("werkzeug",) + +_logger = logging.getLogger(__name__) + + +def create_app(settings: ApplicationSettings) -> FastAPI: + # keep mostly quiet noisy loggers + quiet_level: int = max( + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + ) + for name in _NOISY_LOGGERS: + logging.getLogger(name).setLevel(quiet_level) + + _logger.info("app settings: %s", settings.model_dump_json(indent=1)) + + app = FastAPI( + debug=settings.DIRECTOR_DEBUG, + title=APP_NAME, + description="Director-v0 service", + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url="/dev/doc", + redoc_url=None, # default disabled + ) + # STATE + app.state.settings = settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + + # PLUGINS SETUP + if app.state.settings.DIRECTOR_TRACING: + setup_tracing(app, app.state.settings.DIRECTOR_TRACING, APP_NAME) + + setup_api_routes(app) + + setup_instrumentation(app) + + setup_client_session( + app, + max_keepalive_connections=settings.DIRECTOR_REGISTRY_CLIENT_MAX_KEEPALIVE_CONNECTIONS, + default_timeout=settings.DIRECTOR_REGISTRY_CLIENT_TIMEOUT, + ) + setup_registry(app) + + if app.state.settings.DIRECTOR_TRACING: + initialize_fastapi_app_tracing(app) + + # ERROR HANDLERS + + # EVENTS + async def _on_startup() -> None: + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + + async def _on_shutdown() -> None: + await cancel_sequential_workers() + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + return app diff --git a/services/director/src/simcore_service_director/core/errors.py b/services/director/src/simcore_service_director/core/errors.py new file mode 100644 index 00000000000..c7113baa402 --- /dev/null +++ b/services/director/src/simcore_service_director/core/errors.py @@ -0,0 +1,37 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class DirectorRuntimeError(OsparcErrorMixin, RuntimeError): + msg_template: str = "Director-v0 unexpected error: {msg}" + + +class ConfigurationError(DirectorRuntimeError): + msg_template: str = "Application misconfiguration: {msg}" + + +class GenericDockerError(DirectorRuntimeError): + msg_template: str = "Docker error: {err}" + + +class ServiceNotAvailableError(DirectorRuntimeError): + msg_template: str = "Service {service_name}:{service_tag} is not available" + + +class ServiceUUIDNotFoundError(DirectorRuntimeError): + msg_template: str = "Service with uuid {service_uuid} was not found" + + +class ServiceUUIDInUseError(DirectorRuntimeError): + msg_template: str = "Service with uuid {service_uuid} is already in use" + + +class ServiceStateSaveError(DirectorRuntimeError): + msg_template: str = "Failed to save state of service {service_uuid}: {reason}" + + +class RegistryConnectionError(DirectorRuntimeError): + msg_template: str = "Unexpected connection error while accessing registry: {msg}" + + +class ServiceStartTimeoutError(DirectorRuntimeError): + msg_template: str = "Service {service_name}:{service_uuid} failed to start in time" diff --git a/services/director/src/simcore_service_director/core/settings.py b/services/director/src/simcore_service_director/core/settings.py new file mode 100644 index 00000000000..5560de876fa --- /dev/null +++ b/services/director/src/simcore_service_director/core/settings.py @@ -0,0 +1,153 @@ +import datetime +import warnings +from typing import cast + +from fastapi import FastAPI +from models_library.basic_types import LogLevel, PortInt, VersionTag +from pydantic import AliasChoices, Field, NonNegativeInt, PositiveInt, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.docker_registry import RegistrySettings +from settings_library.postgres import PostgresSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + +from .._meta import API_VERSION, API_VTAG, APP_NAME + + +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + API_VERSION: str = API_VERSION + APP_NAME: str = APP_NAME + API_VTAG: VersionTag = API_VTAG + + DIRECTOR_DEBUG: bool = Field( + default=False, + description="Debug mode", + validation_alias=AliasChoices("DIRECTOR_DEBUG", "DEBUG"), + ) + DIRECTOR_REMOTE_DEBUG_PORT: PortInt = 3000 + + DIRECTOR_LOGLEVEL: LogLevel = Field( + ..., validation_alias=AliasChoices("DIRECTOR_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL") + ) + DIRECTOR_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field( + ..., + validation_alias=AliasChoices( + "DIRECTOR_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ) + DIRECTOR_LOG_FILTER_MAPPING: dict[LoggerName, list[MessageSubstring]] = Field( + default_factory=dict, + validation_alias=AliasChoices( + "DIRECTOR_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ) + DIRECTOR_TRACING: TracingSettings | None = Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ) + + DIRECTOR_DEFAULT_MAX_NANO_CPUS: NonNegativeInt = Field(default=0) + DIRECTOR_DEFAULT_MAX_MEMORY: NonNegativeInt = Field(default=0) + DIRECTOR_REGISTRY_CACHING: bool = Field( + ..., description="cache the docker registry internally" + ) + DIRECTOR_REGISTRY_CACHING_TTL: datetime.timedelta = Field( + ..., description="cache time to live value (defaults to 15 minutes)" + ) + + DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS: str | None + + DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS: dict[str, str] + + DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS: int = 10 + DIRECTOR_SERVICES_RESTART_POLICY_DELAY_S: int = 12 + DIRECTOR_SERVICES_STATE_MONITOR_S: int = 8 + + DIRECTOR_TRAEFIK_SIMCORE_ZONE: str = Field( + ..., + validation_alias=AliasChoices( + "DIRECTOR_TRAEFIK_SIMCORE_ZONE", "TRAEFIK_SIMCORE_ZONE" + ), + ) + + DIRECTOR_REGISTRY: RegistrySettings = Field( + description="settings for the private registry deployed with the platform", + json_schema_extra={"auto_default_from_env": True}, + ) + + DIRECTOR_POSTGRES: PostgresSettings = Field( + ..., json_schema_extra={"auto_default_from_env": True} + ) + STORAGE_ENDPOINT: str = Field(..., description="storage endpoint without scheme") + + DIRECTOR_PUBLISHED_HOST_NAME: str = Field( + ..., + validation_alias=AliasChoices( + "DIRECTOR_PUBLISHED_HOST_NAME", "PUBLISHED_HOST_NAME" + ), + ) + + DIRECTOR_SWARM_STACK_NAME: str = Field( + ..., + validation_alias=AliasChoices("DIRECTOR_SWARM_STACK_NAME", "SWARM_STACK_NAME"), + ) + + DIRECTOR_SIMCORE_SERVICES_NETWORK_NAME: str | None = Field( + # used to find the right network name + ..., + validation_alias=AliasChoices( + "DIRECTOR_SIMCORE_SERVICES_NETWORK_NAME", + "SIMCORE_SERVICES_NETWORK_NAME", + ), + ) + + DIRECTOR_MONITORING_ENABLED: bool = Field( + ..., + validation_alias=AliasChoices( + "DIRECTOR_MONITORING_ENABLED", "MONITORING_ENABLED" + ), + ) + + DIRECTOR_REGISTRY_CLIENT_MAX_KEEPALIVE_CONNECTIONS: NonNegativeInt = 5 + DIRECTOR_REGISTRY_CLIENT_TIMEOUT: datetime.timedelta = datetime.timedelta( + seconds=20 + ) + DIRECTOR_REGISTRY_CLIENT_MAX_CONCURRENT_CALLS: PositiveInt = 20 + DIRECTOR_REGISTRY_CLIENT_MAX_NUMBER_OF_RETRIEVED_OBJECTS: PositiveInt = 30 + + @field_validator("DIRECTOR_REGISTRY_CLIENT_TIMEOUT") + @classmethod + def _check_positive(cls, value: datetime.timedelta) -> datetime.timedelta: + if value.total_seconds() < 0: + msg = "DIRECTOR_REGISTRY_CLIENT_TIMEOUT must be positive" + raise ValueError(msg) + return value + + @field_validator("DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS") + @classmethod + def _validate_substitutions(cls, v): + if v: + warnings.warn( # noqa: B028 + "Generic resources will be replaced by the following " + f"placement constraints {v}. This is a workaround " + "for https://github.com/moby/swarmkit/pull/3162", + UserWarning, + ) + if len(v) != len(set(v.values())): + msg = f"Dictionary values must be unique, provided: {v}" + raise ValueError(msg) + + return v + + @field_validator("DIRECTOR_LOGLEVEL", mode="before") + @classmethod + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +def get_application_settings(app: FastAPI) -> ApplicationSettings: + return cast(ApplicationSettings, app.state.settings) diff --git a/services/director/src/simcore_service_director/docker_utils.py b/services/director/src/simcore_service_director/docker_utils.py index 56dfba1bc3a..7c1a832141a 100644 --- a/services/director/src/simcore_service_director/docker_utils.py +++ b/services/director/src/simcore_service_director/docker_utils.py @@ -1,40 +1,37 @@ import logging +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager import aiodocker -from asyncio_extras import async_contextmanager -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -@async_contextmanager -async def docker_client() -> aiodocker.docker.Docker: +@asynccontextmanager +async def docker_client() -> AsyncIterator[aiodocker.docker.Docker]: try: client = aiodocker.Docker() yield client except aiodocker.exceptions.DockerError: - log.exception(msg="Unexpected error with docker client") + _logger.exception(msg="Unexpected error with docker client") raise finally: await client.close() async def swarm_get_number_nodes() -> int: - async with docker_client() as client: # pylint: disable=not-async-context-manager + async with docker_client() as client: nodes = await client.nodes.list() return len(nodes) async def swarm_has_manager_nodes() -> bool: - async with docker_client() as client: # pylint: disable=not-async-context-manager + async with docker_client() as client: nodes = await client.nodes.list(filters={"role": "manager"}) - if nodes: - return True - return False + return bool(nodes) async def swarm_has_worker_nodes() -> bool: - async with docker_client() as client: # pylint: disable=not-async-context-manager + async with docker_client() as client: nodes = await client.nodes.list(filters={"role": "worker"}) - if nodes: - return True - return False + return bool(nodes) diff --git a/services/director/src/simcore_service_director/exceptions.py b/services/director/src/simcore_service_director/exceptions.py deleted file mode 100644 index cdb25145cb2..00000000000 --- a/services/director/src/simcore_service_director/exceptions.py +++ /dev/null @@ -1,87 +0,0 @@ -""" Defines the different exceptions that may arise in the director - - -TODO: Exceptions should provide all info to create Error instances of the API model -For instance, assume there is a ficticious exception class FieldValidationError, then it would -translate into something like - -// response - 422 -{ - "error": { - "status": 422, - "error": "FIELDS_VALIDATION_ERROR", - "description": "One or more fields raised validation errors." - "fields": { - "email": "Invalid email address.", - "password": "Password too short." - } - } -} -""" - -from typing import Optional - -from aiodocker.exceptions import DockerError - - -class DirectorException(Exception): - """Basic exception""" - - def __init__(self, msg: Optional[str] = None): - super().__init__(msg or "Unexpected error was triggered") - - -class GenericDockerError(DirectorException): - """Generic docker library error""" - - def __init__(self, msg: str, original_exception: DockerError): - super().__init__(msg + f": {original_exception.message}") - self.original_exception = original_exception - - -class ServiceNotAvailableError(DirectorException): - """Service not found""" - - def __init__(self, service_name: str, service_tag: Optional[str] = None): - service_tag = service_tag or "UNDEFINED" - super().__init__(f"The service {service_name}:{service_tag} does not exist") - self.service_name = service_name - self.service_tag = service_tag - - -class ServiceUUIDNotFoundError(DirectorException): - """Service not found""" - - def __init__(self, service_uuid: str): - super().__init__(f"The service with uuid {service_uuid} was not found") - self.service_uuid = service_uuid - - -class ServiceUUIDInUseError(DirectorException): - """Service UUID is already in use""" - - def __init__(self, service_uuid: str): - super().__init__(f"The service uuid {service_uuid} is already in use") - self.service_uuid = service_uuid - - -class ServiceStateSaveError(DirectorException): - def __init__(self, service_uuid: str, reason: str): - super().__init__(f"Failed to save state of service {service_uuid}: {reason}") - self.service_uuid = service_uuid - - -class RegistryConnectionError(DirectorException): - """Error while connecting to the docker regitry""" - - def __init__(self, msg: str): - super().__init__(msg or "Unexpected connection error while accessing registry") - - -class ServiceStartTimeoutError(DirectorException): - """The service was created but never run (time-out)""" - - def __init__(self, service_name: str, service_uuid: str): - super().__init__(f"Service {service_name}:{service_uuid} failed to start ") - self.service_name = service_name - self.service_uuid = service_uuid diff --git a/services/director/src/simcore_service_director/instrumentation.py b/services/director/src/simcore_service_director/instrumentation.py new file mode 100644 index 00000000000..40ee74d831a --- /dev/null +++ b/services/director/src/simcore_service_director/instrumentation.py @@ -0,0 +1,85 @@ +from dataclasses import dataclass, field +from typing import cast + +from fastapi import FastAPI +from prometheus_client import CollectorRegistry, Counter +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) +from servicelib.instrumentation import MetricsBase, get_metrics_namespace + +from ._meta import APP_NAME +from .core.errors import ConfigurationError +from .core.settings import get_application_settings + +MONITOR_SERVICE_STARTED_LABELS: list[str] = [ + "service_key", + "service_tag", + "simcore_user_agent", +] + +MONITOR_SERVICE_STOPPED_LABELS: list[str] = [ + "service_key", + "service_tag", + "result", + "simcore_user_agent", +] + + +@dataclass(slots=True, kw_only=True) +class DirectorV0Instrumentation(MetricsBase): + registry: CollectorRegistry + + services_started: Counter = field(init=False) + services_stopped: Counter = field(init=False) + + def __post_init__(self) -> None: + self.services_started = Counter( + name="services_started_total", + documentation="Counts the services started", + labelnames=MONITOR_SERVICE_STARTED_LABELS, + namespace=get_metrics_namespace(APP_NAME), + subsystem=self.subsystem, + registry=self.registry, + ) + + self.services_stopped = Counter( + name="services_stopped_total", + documentation="Counts the services stopped", + labelnames=MONITOR_SERVICE_STOPPED_LABELS, + namespace=get_metrics_namespace(APP_NAME), + subsystem=self.subsystem, + registry=self.registry, + ) + + +def setup(app: FastAPI) -> None: + app_settings = get_application_settings(app) + if not app_settings.DIRECTOR_MONITORING_ENABLED: + return + + # NOTE: this must be setup before application startup + registry = setup_prometheus_instrumentation(app) + + async def on_startup() -> None: + metrics_subsystem = "" + app.state.instrumentation = DirectorV0Instrumentation( + registry=registry, subsystem=metrics_subsystem + ) + + async def on_shutdown() -> None: ... + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_instrumentation(app: FastAPI) -> DirectorV0Instrumentation: + if not app.state.instrumentation: + raise ConfigurationError( + msg="Instrumentation not setup. Please check the configuration." + ) + return cast(DirectorV0Instrumentation, app.state.instrumentation) + + +def has_instrumentation(app: FastAPI) -> bool: + return hasattr(app.state, "instrumentation") diff --git a/services/director/src/simcore_service_director/main.py b/services/director/src/simcore_service_director/main.py index 0bf6edccc57..da0c480065f 100644 --- a/services/director/src/simcore_service_director/main.py +++ b/services/director/src/simcore_service_director/main.py @@ -1,42 +1,24 @@ -#!/usr/bin/env python3 -import logging - -from aiohttp import web - -# NOTE: notice that servicelib is frozen to c8669fb52659b684514fefa4f3b4599f57f276a0 -# pylint: disable=no-name-in-module -from servicelib.client_session import persistent_client_session -from simcore_service_director import registry_cache_task, resources -from simcore_service_director.monitoring import setup_app_monitoring -from simcore_service_director.rest import routing - -from .registry_proxy import setup_registry - -log = logging.getLogger(__name__) - +"""Main application to be deployed by uvicorn (or equivalent) server -def setup_app() -> web.Application: - api_spec_path = resources.get_path(resources.RESOURCE_OPEN_API) - app = routing.create_web_app(api_spec_path.parent, api_spec_path.name) +""" - # NOTE: ensure client session is context is run first, then any further get_client_sesions will be correctly closed - app.cleanup_ctx.append(persistent_client_session) - app.cleanup_ctx.append(setup_registry) - - registry_cache_task.setup(app) - - setup_app_monitoring(app, "simcore_service_director") - - # NOTE: removed tracing from director. Users old version of servicelib and - # in any case this service will be completely replaced - - return app +import logging +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_director.core.application import create_app +from simcore_service_director.core.settings import ApplicationSettings -def main() -> None: - app = setup_app() - web.run_app(app, port=8080) +_the_settings = ApplicationSettings.create_from_envs() +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=_the_settings.DIRECTOR_LOGLEVEL) +logging.root.setLevel(_the_settings.DIRECTOR_LOGLEVEL) +config_all_loggers( + log_format_local_dev_enabled=_the_settings.DIRECTOR_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=_the_settings.DIRECTOR_LOG_FILTER_MAPPING, + tracing_settings=_the_settings.DIRECTOR_TRACING, +) -if __name__ == "__main__": - main() +# SINGLETON FastAPI app +the_app: FastAPI = create_app(_the_settings) diff --git a/services/director/src/simcore_service_director/models/__init__.py b/services/director/src/simcore_service_director/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/director/src/simcore_service_director/monitoring.py b/services/director/src/simcore_service_director/monitoring.py deleted file mode 100644 index 403e4ef10ad..00000000000 --- a/services/director/src/simcore_service_director/monitoring.py +++ /dev/null @@ -1,32 +0,0 @@ -import prometheus_client -from aiohttp import web -from prometheus_client import CONTENT_TYPE_LATEST -from prometheus_client.registry import CollectorRegistry - - -from servicelib.monitor_services import ( # pylint: disable=no-name-in-module - add_instrumentation as add_services_instrumentation, -) - -from . import config - -kCOLLECTOR_REGISTRY = f"{__name__}.collector_registry" - - -async def metrics_handler(request: web.Request): - # TODO: prometheus_client.generate_latest blocking! -> Consider https://github.com/claws/aioprometheus - reg = request.app[kCOLLECTOR_REGISTRY] - resp = web.Response(body=prometheus_client.generate_latest(registry=reg)) - resp.content_type = CONTENT_TYPE_LATEST - return resp - - -def setup_app_monitoring(app: web.Application, app_name: str) -> None: - if not config.MONITORING_ENABLED: - return - # app-scope registry - app[kCOLLECTOR_REGISTRY] = reg = CollectorRegistry(auto_describe=True) - - add_services_instrumentation(app, reg, app_name) - - app.router.add_get("/metrics", metrics_handler) diff --git a/services/director/src/simcore_service_director/producer.py b/services/director/src/simcore_service_director/producer.py index a34f06508fc..eb8e3550815 100644 --- a/services/director/src/simcore_service_director/producer.py +++ b/services/director/src/simcore_service_director/producer.py @@ -1,44 +1,50 @@ import asyncio -import json +import contextlib import logging import re -from datetime import datetime, timedelta -from distutils.version import StrictVersion +from datetime import timedelta from enum import Enum -from http import HTTPStatus from pprint import pformat -from typing import Dict, List, Optional, Tuple +from typing import Any, Final, cast import aiodocker -import aiohttp +import aiodocker.networks +import arrow +import httpx import tenacity -from aiohttp import ( - ClientConnectionError, - ClientError, - ClientResponse, - ClientResponseError, - ClientSession, - web, -) +from common_library.json_serialization import json_dumps, json_loads +from fastapi import FastAPI, status +from packaging.version import Version from servicelib.async_utils import run_sequentially_in_context -from servicelib.monitor_services import service_started, service_stopped -from tenacity import retry +from servicelib.docker_utils import to_datetime +from servicelib.fastapi.client_session import get_client_session +from settings_library.docker_registry import RegistrySettings +from tenacity import retry, wait_random_exponential from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_attempt -from tenacity.wait import wait_fixed -from . import config, docker_utils, exceptions, registry_proxy -from .config import ( - APP_CLIENT_SESSION_KEY, +from . import docker_utils, registry_proxy +from .constants import ( CPU_RESOURCE_LIMIT_KEY, MEM_RESOURCE_LIMIT_KEY, + SERVICE_REVERSE_PROXY_SETTINGS, + SERVICE_RUNTIME_BOOTSETTINGS, + SERVICE_RUNTIME_SETTINGS, +) +from .core.errors import ( + DirectorRuntimeError, + GenericDockerError, + ServiceNotAvailableError, + ServiceStartTimeoutError, + ServiceStateSaveError, + ServiceUUIDInUseError, + ServiceUUIDNotFoundError, ) -from .exceptions import ServiceStateSaveError +from .core.settings import ApplicationSettings, get_application_settings +from .instrumentation import get_instrumentation from .services_common import ServicesCommonSettings -from .system_utils import get_system_extra_hosts_raw -from .utils import parse_as_datetime -log = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) class ServiceState(Enum): @@ -50,39 +56,43 @@ class ServiceState(Enum): FAILED = "failed" -async def _create_auth() -> Dict[str, str]: - return {"username": config.REGISTRY_USER, "password": config.REGISTRY_PW} +async def _create_auth(registry_settings: RegistrySettings) -> dict[str, str]: + return { + "username": registry_settings.REGISTRY_USER, + "password": registry_settings.REGISTRY_PW.get_secret_value(), + } async def _check_node_uuid_available( client: aiodocker.docker.Docker, node_uuid: str ) -> None: - log.debug("Checked if UUID %s is already in use", node_uuid) + _logger.debug("Checked if UUID %s is already in use", node_uuid) # check if service with same uuid already exists try: # not filtering by "swarm_stack_name" label because it's safer list_of_running_services_w_uuid = await client.services.list( - filters={"label": "uuid=" + node_uuid} + filters={ + "label": f"{_to_simcore_runtime_docker_label_key('node_id')}={node_uuid}" + } ) - except aiodocker.exceptions.DockerError as err: - log.exception("Error while retrieving services list") - raise exceptions.GenericDockerError( - "Error while retrieving services", err - ) from err + except aiodocker.DockerError as err: + msg = "Error while retrieving services" + raise GenericDockerError(err=msg) from err if list_of_running_services_w_uuid: - raise exceptions.ServiceUUIDInUseError(node_uuid) - log.debug("UUID %s is free", node_uuid) + raise ServiceUUIDInUseError(service_uuid=node_uuid) + _logger.debug("UUID %s is free", node_uuid) -def _check_setting_correctness(setting: Dict) -> None: +def _check_setting_correctness(setting: dict) -> None: if "name" not in setting or "type" not in setting or "value" not in setting: - raise exceptions.DirectorException("Invalid setting in %s" % setting) + msg = f"Invalid setting in {setting}" + raise DirectorRuntimeError(msg=msg) -def _parse_mount_settings(settings: List[Dict]) -> List[Dict]: +def _parse_mount_settings(settings: list[dict]) -> list[dict]: mounts = [] for s in settings: - log.debug("Retrieved mount settings %s", s) + _logger.debug("Retrieved mount settings %s", s) mount = {} mount["ReadOnly"] = True if "ReadOnly" in s and s["ReadOnly"] in ["false", "False", False]: @@ -92,46 +102,59 @@ def _parse_mount_settings(settings: List[Dict]) -> List[Dict]: if field in s: mount[field] = s[field] else: - log.warning( + _logger.warning( "Mount settings have wrong format. Required keys [Source, Target, Type]" ) continue - log.debug("Append mount settings %s", mount) + _logger.debug("Append mount settings %s", mount) mounts.append(mount) return mounts -def _parse_env_settings(settings: List[str]) -> Dict: +_ENV_NUM_ELEMENTS: Final[int] = 2 + + +def _parse_env_settings(settings: list[str]) -> dict: envs = {} for s in settings: - log.debug("Retrieved env settings %s", s) + _logger.debug("Retrieved env settings %s", s) if "=" in s: parts = s.split("=") - if len(parts) == 2: + if len(parts) == _ENV_NUM_ELEMENTS: envs.update({parts[0]: parts[1]}) - log.debug("Parsed env settings %s", s) + _logger.debug("Parsed env settings %s", s) return envs async def _read_service_settings( - app: web.Application, key: str, tag: str, settings_name: str -) -> Dict: - image_labels = await registry_proxy.get_image_labels(app, key, tag) - settings = ( - json.loads(image_labels[settings_name]) if settings_name in image_labels else {} + app: FastAPI, key: str, tag: str, settings_name: str +) -> dict[str, Any] | list[Any] | None: + image_labels, _ = await registry_proxy.get_image_labels(app, key, tag) + settings: dict[str, Any] | list[Any] | None = ( + json_loads(image_labels[settings_name]) + if settings_name in image_labels + else None ) - log.debug("Retrieved %s settings: %s", settings_name, pformat(settings)) + _logger.debug("Retrieved %s settings: %s", settings_name, pformat(settings)) return settings +_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX: str = "io.simcore.runtime." + + +def _to_simcore_runtime_docker_label_key(key: str) -> str: + return f"{_SIMCORE_RUNTIME_DOCKER_LABEL_PREFIX}{key.replace('_', '-').lower()}" + + # pylint: disable=too-many-branches async def _create_docker_service_params( - app: web.Application, + app: FastAPI, + *, client: aiodocker.docker.Docker, service_key: str, service_tag: str, @@ -140,144 +163,171 @@ async def _create_docker_service_params( node_uuid: str, project_id: str, node_base_path: str, - internal_network_id: Optional[str], -) -> Dict: + internal_network_id: str | None, + request_simcore_user_agent: str, +) -> dict: # pylint: disable=too-many-statements + app_settings = get_application_settings(app) + service_parameters_labels = await _read_service_settings( - app, service_key, service_tag, config.SERVICE_RUNTIME_SETTINGS + app, service_key, service_tag, SERVICE_RUNTIME_SETTINGS ) reverse_proxy_settings = await _read_service_settings( - app, service_key, service_tag, config.SERVICE_REVERSE_PROXY_SETTINGS + app, service_key, service_tag, SERVICE_REVERSE_PROXY_SETTINGS ) service_name = registry_proxy.get_service_last_names(service_key) + "_" + node_uuid - log.debug("Converting labels to docker runtime parameters") - container_spec = { - "Image": f"{config.REGISTRY_PATH}/{service_key}:{service_tag}", + _logger.debug("Converting labels to docker runtime parameters") + service_default_envs = { + # old services expect POSTGRES_ENDPOINT as hostname:port + "POSTGRES_ENDPOINT": f"{app_settings.DIRECTOR_POSTGRES.POSTGRES_HOST}:{app_settings.DIRECTOR_POSTGRES.POSTGRES_PORT}", + "POSTGRES_USER": app_settings.DIRECTOR_POSTGRES.POSTGRES_USER, + "POSTGRES_PASSWORD": app_settings.DIRECTOR_POSTGRES.POSTGRES_PASSWORD.get_secret_value(), + "POSTGRES_DB": app_settings.DIRECTOR_POSTGRES.POSTGRES_DB, + "STORAGE_ENDPOINT": app_settings.STORAGE_ENDPOINT, + } + container_spec: dict[str, Any] = { + "Image": f"{app_settings.DIRECTOR_REGISTRY.resolved_registry_url}/{service_key}:{service_tag}", "Env": { - **config.SERVICES_DEFAULT_ENVS, + **service_default_envs, "SIMCORE_USER_ID": user_id, "SIMCORE_NODE_UUID": node_uuid, "SIMCORE_PROJECT_ID": project_id, "SIMCORE_NODE_BASEPATH": node_base_path or "", "SIMCORE_HOST_NAME": service_name, }, - "Hosts": get_system_extra_hosts_raw(config.EXTRA_HOSTS_SUFFIX), "Init": True, "Labels": { - "user_id": user_id, - "study_id": project_id, - "node_id": node_uuid, - "swarm_stack_name": config.SWARM_STACK_NAME, + _to_simcore_runtime_docker_label_key("user_id"): user_id, + _to_simcore_runtime_docker_label_key("project_id"): project_id, + _to_simcore_runtime_docker_label_key("node_id"): node_uuid, + _to_simcore_runtime_docker_label_key( + "swarm_stack_name" + ): app_settings.DIRECTOR_SWARM_STACK_NAME, + _to_simcore_runtime_docker_label_key( + "simcore_user_agent" + ): request_simcore_user_agent, + _to_simcore_runtime_docker_label_key( + "product_name" + ): "osparc", # fixed no legacy available in other products + _to_simcore_runtime_docker_label_key("cpu_limit"): "0", + _to_simcore_runtime_docker_label_key("memory_limit"): "0", }, "Mounts": [], } - if ( - config.DIRECTOR_SELF_SIGNED_SSL_FILENAME - and config.DIRECTOR_SELF_SIGNED_SSL_SECRET_ID - and config.DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME - ): - # Note: this is useful for S3 client in case of self signed certificate - container_spec["Env"][ - "SSL_CERT_FILE" - ] = config.DIRECTOR_SELF_SIGNED_SSL_FILENAME - container_spec["Secrets"] = [ - { - "SecretID": config.DIRECTOR_SELF_SIGNED_SSL_SECRET_ID, - "SecretName": config.DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME, - "File": { - "Name": config.DIRECTOR_SELF_SIGNED_SSL_FILENAME, - "Mode": 444, - "UID": "0", - "GID": "0", - }, - } - ] - # SEE https://docs.docker.com/engine/api/v1.41/#operation/ServiceCreate - docker_params = { - "auth": await _create_auth() if config.REGISTRY_AUTH else {}, - "registry": config.REGISTRY_PATH if config.REGISTRY_AUTH else "", + docker_params: dict[str, Any] = { + "auth": ( + await _create_auth(app_settings.DIRECTOR_REGISTRY) + if app_settings.DIRECTOR_REGISTRY.REGISTRY_AUTH + else {} + ), + "registry": ( + app_settings.DIRECTOR_REGISTRY.resolved_registry_url + if app_settings.DIRECTOR_REGISTRY.REGISTRY_AUTH + else "" + ), "name": service_name, "task_template": { "ContainerSpec": container_spec, "Placement": { - "Constraints": ["node.role==worker"] - if await docker_utils.swarm_has_worker_nodes() - else [] + "Constraints": ( + ["node.role==worker"] + if await docker_utils.swarm_has_worker_nodes() + else [] + ) }, "RestartPolicy": { "Condition": "on-failure", - "Delay": config.DIRECTOR_SERVICES_RESTART_POLICY_DELAY_S * pow(10, 6), - "MaxAttempts": config.DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS, + "Delay": app_settings.DIRECTOR_SERVICES_RESTART_POLICY_DELAY_S + * pow(10, 6), + "MaxAttempts": app_settings.DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS, }, "Resources": { "Limits": { - "NanoCPUs": config.DEFAULT_MAX_NANO_CPUS, - "MemoryBytes": config.DEFAULT_MAX_MEMORY, + "NanoCPUs": app_settings.DIRECTOR_DEFAULT_MAX_NANO_CPUS, + "MemoryBytes": app_settings.DIRECTOR_DEFAULT_MAX_MEMORY, }, "Reservations": { - "NanoCPUs": config.DEFAULT_MAX_NANO_CPUS, - "MemoryBytes": config.DEFAULT_MAX_MEMORY, + "NanoCPUs": app_settings.DIRECTOR_DEFAULT_MAX_NANO_CPUS, + "MemoryBytes": app_settings.DIRECTOR_DEFAULT_MAX_MEMORY, }, }, }, "endpoint_spec": {"Mode": "dnsrr"}, "labels": { - "uuid": node_uuid, - "study_id": project_id, - "user_id": user_id, - "type": "main" if main_service else "dependency", - "swarm_stack_name": config.SWARM_STACK_NAME, - "io.simcore.zone": f"{config.TRAEFIK_SIMCORE_ZONE}", + _to_simcore_runtime_docker_label_key("user_id"): user_id, + _to_simcore_runtime_docker_label_key("project_id"): project_id, + _to_simcore_runtime_docker_label_key("node_id"): node_uuid, + _to_simcore_runtime_docker_label_key( + "swarm_stack_name" + ): app_settings.DIRECTOR_SWARM_STACK_NAME, + _to_simcore_runtime_docker_label_key( + "simcore_user_agent" + ): request_simcore_user_agent, + _to_simcore_runtime_docker_label_key( + "product_name" + ): "osparc", # fixed no legacy available in other products + _to_simcore_runtime_docker_label_key("cpu_limit"): "0", + _to_simcore_runtime_docker_label_key("memory_limit"): "0", + _to_simcore_runtime_docker_label_key("type"): ( + "main" if main_service else "dependency" + ), + "io.simcore.zone": f"{app_settings.DIRECTOR_TRAEFIK_SIMCORE_ZONE}", "traefik.enable": "true" if main_service else "false", f"traefik.http.services.{service_name}.loadbalancer.server.port": "8080", f"traefik.http.routers.{service_name}.rule": f"PathPrefix(`/x/{node_uuid}`)", f"traefik.http.routers.{service_name}.entrypoints": "http", f"traefik.http.routers.{service_name}.priority": "10", - f"traefik.http.routers.{service_name}.middlewares": f"{config.SWARM_STACK_NAME}_gzip@docker", + f"traefik.http.routers.{service_name}.middlewares": f"{app_settings.DIRECTOR_SWARM_STACK_NAME}_gzip@swarm", }, "networks": [internal_network_id] if internal_network_id else [], } - if config.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS: - log.debug( - "adding custom constraints %s ", config.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS + if app_settings.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS: + _logger.debug( + "adding custom constraints %s ", + app_settings.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS, ) docker_params["task_template"]["Placement"]["Constraints"] += [ - config.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS + app_settings.DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS ] - if reverse_proxy_settings: - # some services define strip_path:true if they need the path to be stripped away - if ( - "strip_path" in reverse_proxy_settings - and reverse_proxy_settings["strip_path"] - ): - docker_params["labels"][ - f"traefik.http.middlewares.{service_name}_stripprefixregex.stripprefixregex.regex" - ] = f"^/x/{node_uuid}" - docker_params["labels"][ - f"traefik.http.routers.{service_name}.middlewares" - ] += f", {service_name}_stripprefixregex" - + # some services define strip_path:true if they need the path to be stripped away + if ( + isinstance(reverse_proxy_settings, dict) + and reverse_proxy_settings + and reverse_proxy_settings.get("strip_path") + ): + docker_params["labels"][ + f"traefik.http.middlewares.{service_name}_stripprefixregex.stripprefixregex.regex" + ] = f"^/x/{node_uuid}" + docker_params["labels"][ + f"traefik.http.routers.{service_name}.middlewares" + ] += f", {service_name}_stripprefixregex" + + placement_constraints_to_substitute: list[str] = [] + placement_substitutions: dict[str, str] = ( + app_settings.DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS + ) + assert isinstance(service_parameters_labels, list) # nosec for param in service_parameters_labels: _check_setting_correctness(param) # replace %service_uuid% by the given uuid if str(param["value"]).find("%service_uuid%") != -1: - dummy_string = json.dumps(param["value"]) + dummy_string = json_dumps(param["value"]) dummy_string = dummy_string.replace("%service_uuid%", node_uuid) - param["value"] = json.loads(dummy_string) + param["value"] = json_loads(dummy_string) if param["type"] == "Resources": # python-API compatible for backward compatibility if "mem_limit" in param["value"]: - docker_params["task_template"]["Resources"]["Limits"][ - "MemoryBytes" - ] = param["value"]["mem_limit"] + docker_params["task_template"]["Resources"]["Limits"]["MemoryBytes"] = ( + param["value"]["mem_limit"] + ) if "cpu_limit" in param["value"]: - docker_params["task_template"]["Resources"]["Limits"][ - "NanoCPUs" - ] = param["value"]["cpu_limit"] + docker_params["task_template"]["Resources"]["Limits"]["NanoCPUs"] = ( + param["value"]["cpu_limit"] + ) if "mem_reservation" in param["value"]: docker_params["task_template"]["Resources"]["Reservations"][ "MemoryBytes" @@ -287,6 +337,32 @@ async def _create_docker_service_params( "NanoCPUs" ] = param["value"]["cpu_reservation"] # REST-API compatible + if ( + placement_substitutions + and "Reservations" in param["value"] + and "GenericResources" in param["value"]["Reservations"] + ): + # Use placement constraints in place of generic resources, for details + # see https://github.com/ITISFoundation/osparc-simcore/issues/5250 + # removing them form here + generic_resources: list = param["value"]["Reservations"][ + "GenericResources" + ] + + to_remove: set[str] = set() + for generic_resource in generic_resources: + kind = generic_resource["DiscreteResourceSpec"]["Kind"] + if kind in placement_substitutions: + placement_constraints_to_substitute.append(kind) + to_remove.add(kind) + + # only include generic resources which must not be substituted + param["value"]["Reservations"]["GenericResources"] = [ + x + for x in generic_resources + if x["DiscreteResourceSpec"]["Kind"] not in to_remove + ] + if "Limits" in param["value"] or "Reservations" in param["value"]: docker_params["task_template"]["Resources"].update(param["value"]) @@ -303,50 +379,59 @@ async def _create_docker_service_params( # publishing port on the ingress network. elif param["name"] == "ports" and param["type"] == "int": # backward comp - docker_params["labels"]["port"] = docker_params["labels"][ - f"traefik.http.services.{service_name}.loadbalancer.server.port" - ] = str(param["value"]) + docker_params["labels"][_to_simcore_runtime_docker_label_key("port")] = ( + docker_params["labels"][ + f"traefik.http.services.{service_name}.loadbalancer.server.port" + ] + ) = str( + param["value"] + ) # REST-API compatible elif param["type"] == "EndpointSpec": - if "Ports" in param["value"]: - if ( - isinstance(param["value"]["Ports"], list) - and "TargetPort" in param["value"]["Ports"][0] - ): - docker_params["labels"]["port"] = docker_params["labels"][ - f"traefik.http.services.{service_name}.loadbalancer.server.port" - ] = str(param["value"]["Ports"][0]["TargetPort"]) + if "Ports" in param["value"] and ( + isinstance(param["value"]["Ports"], list) + and "TargetPort" in param["value"]["Ports"][0] + ): + docker_params["labels"][ + _to_simcore_runtime_docker_label_key("port") + ] = docker_params["labels"][ + f"traefik.http.services.{service_name}.loadbalancer.server.port" + ] = str( + param["value"]["Ports"][0]["TargetPort"] + ) # placement constraints - elif param["name"] == "constraints": # python-API compatible - docker_params["task_template"]["Placement"]["Constraints"] += param["value"] - elif param["type"] == "Constraints": # REST-API compatible + elif ( + param["name"] == "constraints" or param["type"] == "Constraints" + ): # python-API compatible docker_params["task_template"]["Placement"]["Constraints"] += param["value"] elif param["name"] == "env": - log.debug("Found env parameter %s", param["value"]) + _logger.debug("Found env parameter %s", param["value"]) env_settings = _parse_env_settings(param["value"]) if env_settings: docker_params["task_template"]["ContainerSpec"]["Env"].update( env_settings ) elif param["name"] == "mount": - log.debug("Found mount parameter %s", param["value"]) - mount_settings: List[Dict] = _parse_mount_settings(param["value"]) + _logger.debug("Found mount parameter %s", param["value"]) + mount_settings: list[dict] = _parse_mount_settings(param["value"]) if mount_settings: docker_params["task_template"]["ContainerSpec"]["Mounts"].extend( mount_settings ) - # attach the service to the swarm network dedicated to services - try: - swarm_network = await _get_swarm_network(client) - swarm_network_id = swarm_network["Id"] - swarm_network_name = swarm_network["Name"] - docker_params["networks"].append(swarm_network_id) - docker_params["labels"]["traefik.docker.network"] = swarm_network_name + # add placement constraints based on what was found + for generic_resource_kind in placement_constraints_to_substitute: + docker_params["task_template"]["Placement"]["Constraints"] += [ + placement_substitutions[generic_resource_kind] + ] - except exceptions.DirectorException: - log.exception("Could not find swarm network") + # attach the service to the swarm network dedicated to services + swarm_network = await _get_swarm_network(client, app_settings=app_settings) + swarm_network_id = swarm_network["Id"] + swarm_network_name = swarm_network["Name"] + docker_params["networks"].append(swarm_network_id) + docker_params["labels"]["traefik.swarm.network"] = swarm_network_name # set labels for CPU and Memory limits nano_cpus_limit = str( @@ -355,8 +440,14 @@ async def _create_docker_service_params( mem_limit = str( docker_params["task_template"]["Resources"]["Limits"]["MemoryBytes"] ) - container_spec["Labels"]["nano_cpus_limit"] = nano_cpus_limit - container_spec["Labels"]["mem_limit"] = mem_limit + docker_params["labels"][ + _to_simcore_runtime_docker_label_key("cpu_limit") + ] = container_spec["Labels"][ + _to_simcore_runtime_docker_label_key("cpu_limit") + ] = f"{float(nano_cpus_limit) / 1e9}" + docker_params["labels"][_to_simcore_runtime_docker_label_key("memory_limit")] = ( + container_spec["Labels"][_to_simcore_runtime_docker_label_key("memory_limit")] + ) = mem_limit # and make the container aware of them via env variables resource_limits = { @@ -365,26 +456,31 @@ async def _create_docker_service_params( } docker_params["task_template"]["ContainerSpec"]["Env"].update(resource_limits) - log.debug( + _logger.debug( "Converted labels to docker runtime parameters: %s", pformat(docker_params) ) return docker_params -def _get_service_entrypoint(service_boot_parameters_labels: Dict) -> str: - log.debug("Getting service entrypoint") +def _get_service_entrypoint( + service_boot_parameters_labels: list[dict[str, Any]], +) -> str: + _logger.debug("Getting service entrypoint") for param in service_boot_parameters_labels: _check_setting_correctness(param) if param["name"] == "entry_point": - log.debug("Service entrypoint is %s", param["value"]) + _logger.debug("Service entrypoint is %s", param["value"]) + assert isinstance(param["value"], str) # nosec return param["value"] return "" -async def _get_swarm_network(client: aiodocker.docker.Docker) -> Dict: +async def _get_swarm_network( + client: aiodocker.docker.Docker, app_settings: ApplicationSettings +) -> dict: network_name = "_default" - if config.SIMCORE_SERVICES_NETWORK_NAME: - network_name = "{}".format(config.SIMCORE_SERVICES_NETWORK_NAME) + if app_settings.DIRECTOR_SIMCORE_SERVICES_NETWORK_NAME: + network_name = f"{app_settings.DIRECTOR_SIMCORE_SERVICES_NETWORK_NAME}" # try to find the network name (usually named STACKNAME_default) networks = [ x @@ -392,20 +488,20 @@ async def _get_swarm_network(client: aiodocker.docker.Docker) -> Dict: if "swarm" in x["Scope"] and network_name in x["Name"] ] if not networks or len(networks) > 1: - raise exceptions.DirectorException( + raise DirectorRuntimeError( msg=( "Swarm network name is not configured, found following networks " "(if there is more then 1 network, remove the one which has no " - f"containers attached and all is fixed): {networks}" + f"containers attached and all is fixed): {networks if networks else 'no swarm network!'}" ) ) return networks[0] async def _get_docker_image_port_mapping( - service: Dict, -) -> Tuple[Optional[str], Optional[int]]: - log.debug("getting port published by service: %s", service["Spec"]["Name"]) + service: dict, +) -> tuple[str | None, int | None]: + _logger.debug("getting port published by service: %s", service["Spec"]["Name"]) published_ports = [] target_ports = [] @@ -417,17 +513,18 @@ async def _get_docker_image_port_mapping( published_ports.append(port["PublishedPort"]) target_ports.append(port["TargetPort"]) - log.debug("Service %s publishes: %s ports", service["ID"], published_ports) + _logger.debug("Service %s publishes: %s ports", service["ID"], published_ports) published_port = None target_port = None if published_ports: published_port = published_ports[0] if target_ports: target_port = target_ports[0] - else: - # if empty no port is published but there might still be an internal port defined - if "port" in service["Spec"]["Labels"]: - target_port = int(service["Spec"]["Labels"]["port"]) + # if empty no port is published but there might still be an internal port defined + elif _to_simcore_runtime_docker_label_key("port") in service["Spec"]["Labels"]: + target_port = int( + service["Spec"]["Labels"][_to_simcore_runtime_docker_label_key("port")] + ) return published_port, target_port @@ -438,30 +535,30 @@ async def _get_docker_image_port_mapping( async def _pass_port_to_service( service_name: str, port: str, - service_boot_parameters_labels: Dict, - session: ClientSession, + service_boot_parameters_labels: list[Any], + session: httpx.AsyncClient, + app_settings: ApplicationSettings, ) -> None: for param in service_boot_parameters_labels: _check_setting_correctness(param) if param["name"] == "published_host": - # time.sleep(5) route = param["value"] - log.debug( + _logger.debug( "Service needs to get published host %s:%s using route %s", - config.PUBLISHED_HOST_NAME, + app_settings.DIRECTOR_PUBLISHED_HOST_NAME, port, route, ) - service_url = "http://" + service_name + "/" + route + service_url = "http://" + service_name + "/" + route # NOSONAR query_string = { - "hostname": str(config.PUBLISHED_HOST_NAME), + "hostname": app_settings.DIRECTOR_PUBLISHED_HOST_NAME, "port": str(port), } - log.debug("creating request %s and query %s", service_url, query_string) - async with session.post(service_url, data=query_string) as response: - log.debug("query response: %s", await response.text()) + _logger.debug("creating request %s and query %s", service_url, query_string) + response = await session.post(service_url, data=query_string) + _logger.debug("query response: %s", response.text) return - log.debug("service %s does not need to know its external port", service_name) + _logger.debug("service %s does not need to know its external port", service_name) async def _create_network_name(service_name: str, node_uuid: str) -> str: @@ -471,7 +568,7 @@ async def _create_network_name(service_name: str, node_uuid: str) -> str: async def _create_overlay_network_in_swarm( client: aiodocker.docker.Docker, service_name: str, node_uuid: str ) -> str: - log.debug( + _logger.debug( "Creating overlay network for service %s with uuid %s", service_name, node_uuid ) network_name = await _create_network_name(service_name, node_uuid) @@ -479,70 +576,64 @@ async def _create_overlay_network_in_swarm( network_config = { "Name": network_name, "Driver": "overlay", - "Labels": {"uuid": node_uuid}, + "Labels": {_to_simcore_runtime_docker_label_key("node_id"): node_uuid}, } docker_network = await client.networks.create(network_config) - log.debug( + _logger.debug( "Network %s created for service %s with uuid %s", network_name, service_name, node_uuid, ) - return docker_network.id - except aiodocker.exceptions.DockerError as err: - log.exception("Error while creating network for service %s", service_name) - raise exceptions.GenericDockerError( - "Error while creating network", err - ) from err + return cast(str, docker_network.id) + except aiodocker.DockerError as err: + msg = "Error while creating network" + raise GenericDockerError(err=msg) from err async def _remove_overlay_network_of_swarm( client: aiodocker.docker.Docker, node_uuid: str ) -> None: - log.debug("Removing overlay network for service with uuid %s", node_uuid) + _logger.debug("Removing overlay network for service with uuid %s", node_uuid) try: networks = await client.networks.list() networks = [ x for x in (await client.networks.list()) if x["Labels"] - and "uuid" in x["Labels"] - and x["Labels"]["uuid"] == node_uuid + and _to_simcore_runtime_docker_label_key("node_id") in x["Labels"] + and x["Labels"][_to_simcore_runtime_docker_label_key("node_id")] + == node_uuid ] - log.debug("Found %s networks with uuid %s", len(networks), node_uuid) + _logger.debug("Found %s networks with uuid %s", len(networks), node_uuid) # remove any network in the list (should be only one) for network in networks: docker_network = aiodocker.networks.DockerNetwork(client, network["Id"]) await docker_network.delete() - log.debug("Removed %s networks with uuid %s", len(networks), node_uuid) - except aiodocker.exceptions.DockerError as err: - log.exception( - "Error while removing networks for service with uuid: %s", node_uuid - ) - raise exceptions.GenericDockerError( - "Error while removing networks", err - ) from err + _logger.debug("Removed %s networks with uuid %s", len(networks), node_uuid) + except aiodocker.DockerError as err: + msg = "Error while removing networks" + raise GenericDockerError(err=msg) from err async def _get_service_state( - client: aiodocker.docker.Docker, service: Dict -) -> Tuple[ServiceState, str]: + client: aiodocker.docker.Docker, service: dict, app_settings: ApplicationSettings +) -> tuple[ServiceState, str]: # some times one has to wait until the task info is filled service_name = service["Spec"]["Name"] - log.debug("Getting service %s state", service_name) + _logger.debug("Getting service %s state", service_name) tasks = await client.tasks.list(filters={"service": service_name}) - async def _wait_for_tasks(tasks): - task_started_time = datetime.utcnow() - while (datetime.utcnow() - task_started_time) < timedelta(seconds=20): - tasks = await client.tasks.list(filters={"service": service_name}) - # only keep the ones with the right service ID (we're being a bit picky maybe) - tasks = [x for x in tasks if x["ServiceID"] == service["ID"]] - if tasks: - return - await asyncio.sleep(1) # let other events happen too - - await _wait_for_tasks(tasks) + # wait for tasks + task_started_time = arrow.utcnow().datetime + while (arrow.utcnow().datetime - task_started_time) < timedelta(seconds=20): + tasks = await client.tasks.list(filters={"service": service_name}) + # only keep the ones with the right service ID (we're being a bit picky maybe) + tasks = [x for x in tasks if x["ServiceID"] == service["ID"]] + if tasks: + break + await asyncio.sleep(1) # let other events happen too + if not tasks: return (ServiceState.FAILED, "getting state timed out") @@ -550,19 +641,17 @@ async def _wait_for_tasks(tasks): last_task = sorted(tasks, key=lambda task: task["UpdatedAt"])[-1] task_state = last_task["Status"]["State"] - log.debug("%s %s", service["ID"], task_state) + _logger.debug("%s %s", service["ID"], task_state) last_task_state = ServiceState.STARTING # default - last_task_error_msg = ( - last_task["Status"]["Err"] if "Err" in last_task["Status"] else "" - ) + last_task_error_msg = last_task["Status"].get("Err", "") if task_state in ("failed"): # check if it failed already the max number of attempts we allow for - if len(tasks) < config.DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS: - log.debug("number of tasks: %s", len(tasks)) + if len(tasks) < app_settings.DIRECTOR_SERVICES_RESTART_POLICY_MAX_ATTEMPTS: + _logger.debug("number of tasks: %s", len(tasks)) last_task_state = ServiceState.STARTING else: - log.error( + _logger.error( "service %s failed with %s after %s trials", service_name, last_task["Status"], @@ -570,7 +659,7 @@ async def _wait_for_tasks(tasks): ) last_task_state = ServiceState.FAILED elif task_state in ("rejected"): - log.error("service %s failed with %s", service_name, last_task["Status"]) + _logger.error("service %s failed with %s", service_name, last_task["Status"]) last_task_state = ServiceState.FAILED elif task_state in ("pending"): last_task_state = ServiceState.PENDING @@ -579,16 +668,16 @@ async def _wait_for_tasks(tasks): elif task_state in ("ready", "starting"): last_task_state = ServiceState.STARTING elif task_state in ("running"): - now = datetime.utcnow() + now = arrow.utcnow().datetime # NOTE: task_state_update_time is only used to discrimitate between 'starting' and 'running' - task_state_update_time = parse_as_datetime( - last_task["Status"]["Timestamp"], default=now - ) + task_state_update_time = to_datetime(last_task["Status"]["Timestamp"]) time_since_running = now - task_state_update_time - log.debug("Now is %s, time since running mode is %s", now, time_since_running) + _logger.debug( + "Now is %s, time since running mode is %s", now, time_since_running + ) if time_since_running > timedelta( - seconds=config.DIRECTOR_SERVICES_STATE_MONITOR_S + seconds=app_settings.DIRECTOR_SERVICES_STATE_MONITOR_S ): last_task_state = ServiceState.RUNNING else: @@ -596,16 +685,16 @@ async def _wait_for_tasks(tasks): elif task_state in ("complete", "shutdown"): last_task_state = ServiceState.COMPLETE - log.debug("service running state is %s", last_task_state) + _logger.debug("service running state is %s", last_task_state) return (last_task_state, last_task_error_msg) async def _wait_until_service_running_or_failed( - client: aiodocker.docker.Docker, service: Dict, node_uuid: str + client: aiodocker.docker.Docker, service: dict, node_uuid: str ) -> None: # some times one has to wait until the task info is filled service_name = service["Spec"]["Name"] - log.debug("Waiting for service %s to start", service_name) + _logger.debug("Waiting for service %s to start", service_name) while True: tasks = await client.tasks.list(filters={"service": service_name}) # only keep the ones with the right service ID (we're being a bit picky maybe) @@ -614,31 +703,31 @@ async def _wait_until_service_running_or_failed( if tasks: last_task = tasks[0] task_state = last_task["Status"]["State"] - log.debug("%s %s", service["ID"], task_state) + _logger.debug("%s %s", service["ID"], task_state) if task_state in ("failed", "rejected"): - log.error( + _logger.error( "Error while waiting for service with %s", last_task["Status"] ) - raise exceptions.ServiceStartTimeoutError(service_name, node_uuid) + raise ServiceStartTimeoutError( + service_name=service_name, service_uuid=node_uuid + ) if task_state in ("running", "complete"): break # allows dealing with other events instead of wasting time here await asyncio.sleep(1) # 1s - log.debug("Waited for service %s to start", service_name) + _logger.debug("Waited for service %s to start", service_name) -async def _get_repos_from_key( - app: web.Application, service_key: str -) -> Dict[str, List[Dict]]: +async def _get_repos_from_key(app: FastAPI, service_key: str) -> dict[str, list[str]]: # get the available image for the main service (syntax is image:tag) list_of_images = { service_key: await registry_proxy.list_image_tags(app, service_key) } - log.debug("entries %s", list_of_images) + _logger.debug("entries %s", list_of_images) if not list_of_images[service_key]: - raise exceptions.ServiceNotAvailableError(service_key) + raise ServiceNotAvailableError(service_name=service_key) - log.debug( + _logger.debug( "Service %s has the following list of images available: %s", service_key, list_of_images, @@ -648,52 +737,60 @@ async def _get_repos_from_key( async def _get_dependant_repos( - app: web.Application, service_key: str, service_tag: str -) -> List[Dict]: + app: FastAPI, service_key: str, service_tag: str +) -> list[dict]: list_of_images = await _get_repos_from_key(app, service_key) tag = await _find_service_tag(list_of_images, service_key, service_tag) # look for dependencies - dependent_repositories = await registry_proxy.list_interactive_service_dependencies( + return await registry_proxy.list_interactive_service_dependencies( app, service_key, tag ) - return dependent_repositories _TAG_REGEX = re.compile(r"^\d+\.\d+\.\d+$") _SERVICE_KEY_REGEX = re.compile( - r"^(simcore/services/(comp|dynamic|frontend)(/[\w/-]+)+):(\d+\.\d+\.\d+).*$" + r"^(?Psimcore/services/" + r"(?P(comp|dynamic|frontend))/" + r"(?P[a-z0-9][a-z0-9_.-]*/)*" + r"(?P[a-z0-9-_]+[a-z0-9]))" + r"(?::(?P[\w][\w.-]{0,127}))?" + r"(?P\@sha256:[a-fA-F0-9]{32,64})?$" ) async def _find_service_tag( - list_of_images: Dict, service_key: str, service_tag: str + list_of_images: dict, service_key: str, service_tag: str | None ) -> str: - if not service_key in list_of_images: - raise exceptions.ServiceNotAvailableError( + if service_key not in list_of_images: + raise ServiceNotAvailableError( service_name=service_key, service_tag=service_tag ) # filter incorrect chars filtered_tags_list = filter(_TAG_REGEX.search, list_of_images[service_key]) # sort them now - available_tags_list = sorted(filtered_tags_list, key=StrictVersion) + available_tags_list = sorted(filtered_tags_list, key=Version) # not tags available... probably an undefined service there... if not available_tags_list: - raise exceptions.ServiceNotAvailableError(service_key, service_tag) + raise ServiceNotAvailableError( + service_name=service_key, service_tag=service_tag + ) tag = service_tag if not service_tag or service_tag == "latest": # get latest tag tag = available_tags_list[len(available_tags_list) - 1] elif available_tags_list.count(service_tag) != 1: - raise exceptions.ServiceNotAvailableError( + raise ServiceNotAvailableError( service_name=service_key, service_tag=service_tag ) - log.debug("Service tag found is %s ", service_tag) + _logger.debug("Service tag found is %s ", service_tag) + assert tag is not None # nosec return tag async def _start_docker_service( - app: web.Application, + app: FastAPI, + *, client: aiodocker.docker.Docker, user_id: str, project_id: str, @@ -702,21 +799,24 @@ async def _start_docker_service( main_service: bool, node_uuid: str, node_base_path: str, - internal_network_id: Optional[str], -) -> Dict: # pylint: disable=R0913 + internal_network_id: str | None, + request_simcore_user_agent: str, +) -> dict: # pylint: disable=R0913 + app_settings = get_application_settings(app) service_parameters = await _create_docker_service_params( app, - client, - service_key, - service_tag, - main_service, - user_id, - node_uuid, - project_id, - node_base_path, - internal_network_id, + client=client, + service_key=service_key, + service_tag=service_tag, + main_service=main_service, + user_id=user_id, + node_uuid=node_uuid, + project_id=project_id, + node_base_path=node_base_path, + internal_network_id=internal_network_id, + request_simcore_user_agent=request_simcore_user_agent, ) - log.debug( + _logger.debug( "Starting docker service %s:%s using parameters %s", service_key, service_tag, @@ -727,34 +827,42 @@ async def _start_docker_service( service = await client.services.create(**service_parameters) if "ID" not in service: # error while starting service - raise exceptions.DirectorException( - "Error while starting service: {}".format(str(service)) - ) - log.debug("Service started now waiting for it to run") + msg = f"Error while starting service: {service!s}" + raise DirectorRuntimeError(msg=msg) + _logger.debug("Service started now waiting for it to run") # get the full info from docker service = await client.services.inspect(service["ID"]) service_name = service["Spec"]["Name"] - service_state, service_msg = await _get_service_state(client, service) + service_state, service_msg = await _get_service_state( + client, dict(service), app_settings=app_settings + ) # wait for service to start - # await _wait_until_service_running_or_failed(client, service, node_uuid) - log.debug("Service %s successfully started", service_name) + _logger.debug("Service %s successfully started", service_name) # the docker swarm maybe opened some random port to access the service, get the latest version of the service service = await client.services.inspect(service["ID"]) - published_port, target_port = await _get_docker_image_port_mapping(service) + published_port, target_port = await _get_docker_image_port_mapping( + dict(service) + ) # now pass boot parameters service_boot_parameters_labels = await _read_service_settings( - app, service_key, service_tag, config.SERVICE_RUNTIME_BOOTSETTINGS + app, service_key, service_tag, SERVICE_RUNTIME_BOOTSETTINGS ) - service_entrypoint = _get_service_entrypoint(service_boot_parameters_labels) - if published_port: - session = app[APP_CLIENT_SESSION_KEY] - await _pass_port_to_service( - service_name, published_port, service_boot_parameters_labels, session - ) + service_entrypoint = "" + if isinstance(service_boot_parameters_labels, list): + service_entrypoint = _get_service_entrypoint(service_boot_parameters_labels) + if published_port: + session = get_client_session(app) + await _pass_port_to_service( + service_name, + published_port, + service_boot_parameters_labels, + session, + app_settings=app_settings, + ) - container_meta_data = { + return { "published_port": published_port, "entry_point": service_entrypoint, "service_uuid": node_uuid, @@ -768,42 +876,42 @@ async def _start_docker_service( "user_id": user_id, "project_id": project_id, } - return container_meta_data - except exceptions.ServiceStartTimeoutError: - log.exception("Service failed to start") + except ServiceStartTimeoutError: + _logger.exception("Service failed to start") await _silent_service_cleanup(app, node_uuid) raise - except aiodocker.exceptions.DockerError as err: - log.exception("Unexpected error") + except aiodocker.DockerError as err: + _logger.exception("Unexpected error") await _silent_service_cleanup(app, node_uuid) - raise exceptions.ServiceNotAvailableError(service_key, service_tag) from err + raise ServiceNotAvailableError( + service_name=service_key, service_tag=service_tag + ) from err -async def _silent_service_cleanup(app: web.Application, node_uuid: str) -> None: - try: - await stop_service(app, node_uuid, False) - except exceptions.DirectorException: - pass +async def _silent_service_cleanup(app: FastAPI, node_uuid: str) -> None: + with contextlib.suppress(DirectorRuntimeError): + await stop_service(app, node_uuid=node_uuid, save_state=False) async def _create_node( - app: web.Application, + app: FastAPI, client: aiodocker.docker.Docker, user_id: str, project_id: str, - list_of_services: List[Dict], + list_of_services: list[dict], node_uuid: str, node_base_path: str, -) -> List[Dict]: # pylint: disable=R0913, R0915 - log.debug( + request_simcore_user_agent: str, +) -> list[dict]: # pylint: disable=R0913, R0915 + _logger.debug( "Creating %s docker services for node %s and base path %s for user %s", len(list_of_services), node_uuid, node_base_path, user_id, ) - log.debug("Services %s will be started", list_of_services) + _logger.debug("Services %s will be started", list_of_services) # if the service uses several docker images, a network needs to be setup to connect them together inter_docker_network_id = None @@ -812,21 +920,22 @@ async def _create_node( inter_docker_network_id = await _create_overlay_network_in_swarm( client, service_name, node_uuid ) - log.debug("Created docker network in swarm for service %s", service_name) + _logger.debug("Created docker network in swarm for service %s", service_name) containers_meta_data = [] for service in list_of_services: service_meta_data = await _start_docker_service( app, - client, - user_id, - project_id, - service["key"], - service["tag"], - list_of_services.index(service) == 0, - node_uuid, - node_base_path, - inter_docker_network_id, + client=client, + user_id=user_id, + project_id=project_id, + service_key=service["key"], + service_tag=service["tag"], + main_service=list_of_services.index(service) == 0, + node_uuid=node_uuid, + node_base_path=node_base_path, + internal_network_id=inter_docker_network_id, + request_simcore_user_agent=request_simcore_user_agent, ) containers_meta_data.append(service_meta_data) @@ -834,42 +943,45 @@ async def _create_node( async def _get_service_key_version_from_docker_service( - service: Dict, -) -> Tuple[str, str]: + service: dict, registry_settings: RegistrySettings +) -> tuple[str, str]: service_full_name = str(service["Spec"]["TaskTemplate"]["ContainerSpec"]["Image"]) - if not service_full_name.startswith(config.REGISTRY_PATH): - raise exceptions.DirectorException( - msg=f"Invalid service '{service_full_name}', it is missing {config.REGISTRY_PATH}" + if not service_full_name.startswith(registry_settings.resolved_registry_url): + raise DirectorRuntimeError( + msg=f"Invalid service '{service_full_name}', it is missing {registry_settings.resolved_registry_url}" ) - service_full_name = service_full_name[len(config.REGISTRY_PATH) :].strip("/") + service_full_name = service_full_name[ + len(registry_settings.resolved_registry_url) : + ].strip("/") service_re_match = _SERVICE_KEY_REGEX.match(service_full_name) if not service_re_match: - raise exceptions.DirectorException( + raise DirectorRuntimeError( msg=f"Invalid service '{service_full_name}', it does not follow pattern '{_SERVICE_KEY_REGEX.pattern}'" ) - service_key = service_re_match.group(1) - service_tag = service_re_match.group(4) + service_key = service_re_match.group("key") + service_tag = service_re_match.group("version") return service_key, service_tag -async def _get_service_basepath_from_docker_service(service: Dict) -> str: - envs_list = service["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] +async def _get_service_basepath_from_docker_service(service: dict[str, Any]) -> str: + envs_list: list[str] = service["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] envs_dict = dict(x.split("=") for x in envs_list) return envs_dict["SIMCORE_NODE_BASEPATH"] async def start_service( - app: web.Application, + app: FastAPI, user_id: str, project_id: str, service_key: str, - service_tag: str, + service_tag: str | None, node_uuid: str, node_base_path: str, -) -> Dict: - # pylint: disable=C0103 - log.debug( + request_simcore_user_agent: str, +) -> dict: + app_settings = get_application_settings(app) + _logger.debug( "starting service %s:%s using uuid %s, basepath %s", service_key, service_tag, @@ -881,11 +993,11 @@ async def start_service( await _check_node_uuid_available(client, node_uuid) list_of_images = await _get_repos_from_key(app, service_key) service_tag = await _find_service_tag(list_of_images, service_key, service_tag) - log.debug("Found service to start %s:%s", service_key, service_tag) + _logger.debug("Found service to start %s:%s", service_key, service_tag) list_of_services_to_start = [{"key": service_key, "tag": service_tag}] # find the service dependencies list_of_dependencies = await _get_dependant_repos(app, service_key, service_tag) - log.debug("Found service dependencies: %s", list_of_dependencies) + _logger.debug("Found service dependencies: %s", list_of_dependencies) if list_of_dependencies: list_of_services_to_start.extend(list_of_dependencies) @@ -897,48 +1009,57 @@ async def start_service( list_of_services_to_start, node_uuid, node_base_path, + request_simcore_user_agent, ) node_details = containers_meta_data[0] - if config.MONITORING_ENABLED: - service_started( - app, - "undefined_user", # NOTE: to prevent high cardinality metrics this is disabled - service_key, - service_tag, - "DYNAMIC", - ) + if app_settings.DIRECTOR_MONITORING_ENABLED: + get_instrumentation(app).services_started.labels( + service_key=service_key, + service_tag=service_tag, + simcore_user_agent="undefined_user", + ).inc() + # we return only the info of the main service return node_details async def _get_node_details( - app: web.Application, client: aiodocker.docker.Docker, service: Dict -) -> Dict: + app: FastAPI, client: aiodocker.docker.Docker, service: dict +) -> dict: + app_settings = get_application_settings(app) service_key, service_tag = await _get_service_key_version_from_docker_service( - service + service, registry_settings=app_settings.DIRECTOR_REGISTRY ) # get boot parameters results = await asyncio.gather( _read_service_settings( - app, service_key, service_tag, config.SERVICE_RUNTIME_BOOTSETTINGS + app, service_key, service_tag, SERVICE_RUNTIME_BOOTSETTINGS ), _get_service_basepath_from_docker_service(service), - _get_service_state(client, service), + _get_service_state(client, service, app_settings=app_settings), ) service_boot_parameters_labels = results[0] - service_entrypoint = _get_service_entrypoint(service_boot_parameters_labels) + service_entrypoint = "" + if service_boot_parameters_labels and isinstance( + service_boot_parameters_labels, list + ): + service_entrypoint = _get_service_entrypoint(service_boot_parameters_labels) service_basepath = results[1] service_state, service_msg = results[2] service_name = service["Spec"]["Name"] - service_uuid = service["Spec"]["Labels"]["uuid"] - user_id = service["Spec"]["Labels"]["user_id"] - project_id = service["Spec"]["Labels"]["study_id"] + service_uuid = service["Spec"]["Labels"][ + _to_simcore_runtime_docker_label_key("node_id") + ] + user_id = service["Spec"]["Labels"][_to_simcore_runtime_docker_label_key("user_id")] + project_id = service["Spec"]["Labels"][ + _to_simcore_runtime_docker_label_key("project_id") + ] # get the published port published_port, target_port = await _get_docker_image_port_mapping(service) - node_details = { + return { "published_port": published_port, "entry_point": service_entrypoint, "service_uuid": service_uuid, @@ -952,118 +1073,119 @@ async def _get_node_details( "user_id": user_id, "project_id": project_id, } - return node_details async def get_services_details( - app: web.Application, user_id: Optional[str], study_id: Optional[str] -) -> List[Dict]: + app: FastAPI, user_id: str | None, project_id: str | None +) -> list[dict]: + app_settings = get_application_settings(app) async with docker_utils.docker_client() as client: # pylint: disable=not-async-context-manager try: - filters = ["type=main", f"swarm_stack_name={config.SWARM_STACK_NAME}"] + filters = [ + f"{_to_simcore_runtime_docker_label_key('type')}=main", + f"{_to_simcore_runtime_docker_label_key('swarm_stack_name')}={app_settings.DIRECTOR_SWARM_STACK_NAME}", + ] if user_id: - filters.append("user_id=" + user_id) - if study_id: - filters.append("study_id=" + study_id) + filters.append( + f"{_to_simcore_runtime_docker_label_key('user_id')}=" + user_id + ) + if project_id: + filters.append( + f"{_to_simcore_runtime_docker_label_key('project_id')}=" + + project_id + ) list_running_services = await client.services.list( filters={"label": filters} ) - services_details = [ - await _get_node_details(app, client, service) + return [ + await _get_node_details(app, client, dict(service)) for service in list_running_services ] - return services_details - except aiodocker.exceptions.DockerError as err: - log.exception( - "Error while listing services with user_id, study_id %s, %s", - user_id, - study_id, - ) - raise exceptions.GenericDockerError( - "Error while accessing container", err - ) from err + except aiodocker.DockerError as err: + msg = f"Error while accessing container for {user_id=}, {project_id=}" + raise GenericDockerError(err=msg) from err -async def get_service_details(app: web.Application, node_uuid: str) -> Dict: - async with docker_utils.docker_client() as client: # pylint: disable=not-async-context-manager +async def get_service_details(app: FastAPI, node_uuid: str) -> dict: + app_settings = get_application_settings(app) + async with docker_utils.docker_client() as client: try: list_running_services_with_uuid = await client.services.list( filters={ "label": [ - f"uuid={node_uuid}", - "type=main", - f"swarm_stack_name={config.SWARM_STACK_NAME}", + f"{_to_simcore_runtime_docker_label_key('node_id')}={node_uuid}", + f"{_to_simcore_runtime_docker_label_key('type')}=main", + f"{_to_simcore_runtime_docker_label_key('swarm_stack_name')}={app_settings.DIRECTOR_SWARM_STACK_NAME}", ] } ) # error if no service with such an id exists if not list_running_services_with_uuid: - raise exceptions.ServiceUUIDNotFoundError(node_uuid) + raise ServiceUUIDNotFoundError(service_uuid=node_uuid) if len(list_running_services_with_uuid) > 1: # someone did something fishy here - raise exceptions.DirectorException( + raise DirectorRuntimeError( msg="More than one docker service is labeled as main service" ) - node_details = await _get_node_details( - app, client, list_running_services_with_uuid[0] + return await _get_node_details( + app, client, dict(list_running_services_with_uuid[0]) ) - return node_details - except aiodocker.exceptions.DockerError as err: - log.exception("Error while accessing container with uuid: %s", node_uuid) - raise exceptions.GenericDockerError( - "Error while accessing container", err - ) from err + except aiodocker.DockerError as err: + msg = f"Error while accessing container {node_uuid=}" + raise GenericDockerError(err=msg) from err @retry( - wait=wait_fixed(2), + wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(3), reraise=True, - retry=retry_if_exception_type(ClientConnectionError), + retry=retry_if_exception_type(httpx.RequestError), ) -async def _save_service_state(service_host_name: str, session: aiohttp.ClientSession): - response: ClientResponse - async with session.post( - url=f"http://{service_host_name}/state", - timeout=ServicesCommonSettings().director_dynamic_service_save_timeout, - ) as response: - try: - response.raise_for_status() +async def _save_service_state( + service_host_name: str, session: httpx.AsyncClient +) -> None: + try: + response = await session.post( + url=f"http://{service_host_name}/state", # NOSONAR + timeout=ServicesCommonSettings().director_dynamic_service_save_timeout, + ) + response.raise_for_status() - except ClientResponseError as err: - if err.status in ( - HTTPStatus.METHOD_NOT_ALLOWED, - HTTPStatus.NOT_FOUND, - HTTPStatus.NOT_IMPLEMENTED, - ): - # NOTE: Legacy Override. Some old services do not have a state entrypoint defined - # therefore we assume there is nothing to be saved and do not raise exception - # Responses found so far: - # METHOD NOT ALLOWED https://httpstatuses.com/405 - # NOT FOUND https://httpstatuses.com/404 - # - log.warning( - "Service '%s' does not seem to implement save state functionality: %s. Skipping save", - service_host_name, - err, - ) - else: - # upss ... could service had troubles saving, reraise - raise - else: - log.info( - "Service '%s' successfully saved its state: %s", + except httpx.HTTPStatusError as err: + if err.response.status_code in ( + status.HTTP_405_METHOD_NOT_ALLOWED, + status.HTTP_404_NOT_FOUND, + status.HTTP_501_NOT_IMPLEMENTED, + ): + # NOTE: Legacy Override. Some old services do not have a state entrypoint defined + # therefore we assume there is nothing to be saved and do not raise exception + # Responses found so far: + # METHOD NOT ALLOWED https://httpstatuses.com/405 + # NOT FOUND https://httpstatuses.com/404 + # + _logger.warning( + "Service '%s' does not seem to implement save state functionality: %s. Skipping save", service_host_name, - f"{response}", + err, ) + else: + # upss ... could service had troubles saving, reraise + raise + else: + _logger.info( + "Service '%s' successfully saved its state: %s", + service_host_name, + f"{response}", + ) @run_sequentially_in_context(target_args=["node_uuid"]) -async def stop_service(app: web.Application, node_uuid: str, save_state: bool) -> None: - log.debug( +async def stop_service(app: FastAPI, *, node_uuid: str, save_state: bool) -> None: + app_settings = get_application_settings(app) + _logger.debug( "stopping service with node_uuid=%s, save_state=%s", node_uuid, save_state ) @@ -1073,82 +1195,80 @@ async def stop_service(app: web.Application, node_uuid: str, save_state: bool) - list_running_services_with_uuid = await client.services.list( filters={ "label": [ - f"uuid={node_uuid}", - f"swarm_stack_name={config.SWARM_STACK_NAME}", + f"{_to_simcore_runtime_docker_label_key('node_id')}={node_uuid}", + f"{_to_simcore_runtime_docker_label_key('swarm_stack_name')}={app_settings.DIRECTOR_SWARM_STACK_NAME}", ] } ) - except aiodocker.exceptions.DockerError as err: - log.exception("Error while stopping container with uuid: %s", node_uuid) - raise exceptions.GenericDockerError( - "Error while stopping container", err - ) from err + except aiodocker.DockerError as err: + msg = f"Error while stopping container {node_uuid=}" + raise GenericDockerError(err=msg) from err # error if no service with such an id exists if not list_running_services_with_uuid: - raise exceptions.ServiceUUIDNotFoundError(node_uuid) + raise ServiceUUIDNotFoundError(service_uuid=node_uuid) - log.debug("found service(s) with uuid %s", list_running_services_with_uuid) + _logger.debug("found service(s) with uuid %s", list_running_services_with_uuid) # save the state of the main service if it can service_details = await get_service_details(app, node_uuid) - # FIXME: the exception for the 3d-viewer shall be removed once the dy-sidecar comes in service_host_name = "{}:{}{}".format( service_details["service_host"], - service_details["service_port"] - if service_details["service_port"] - else "80", - service_details["service_basepath"] - if not "3d-viewer" in service_details["service_host"] - else "", + ( + service_details["service_port"] + if service_details["service_port"] + else "80" + ), + ( + service_details["service_basepath"] + if "3d-viewer" not in service_details["service_host"] + else "" + ), ) # If state save is enforced if save_state: - log.debug("saving state of service %s...", service_host_name) + _logger.debug("saving state of service %s...", service_host_name) try: await _save_service_state( - service_host_name, session=app[APP_CLIENT_SESSION_KEY] + service_host_name, session=get_client_session(app) ) - except ClientResponseError as err: + except httpx.HTTPStatusError as err: raise ServiceStateSaveError( - node_uuid, + service_uuid=node_uuid, reason=f"service {service_host_name} rejected to save state, " - f"responded {err.message} (status {err.status})." + f"responded {err.response.text} (status {err.response.status_code})." "Aborting stop service to prevent data loss.", ) from err - except ClientError as err: - log.warning( + except httpx.RequestError as err: + _logger.warning( "Could not save state because %s is unreachable [%s]." "Resuming stop_service.", service_host_name, - err + err.request, ) # remove the services try: - log.debug("removing services ...") + _logger.debug("removing services ...") for service in list_running_services_with_uuid: - log.debug("removing %s", service["Spec"]["Name"]) + _logger.debug("removing %s", service["Spec"]["Name"]) await client.services.delete(service["Spec"]["Name"]) - except aiodocker.exceptions.DockerError as err: - raise exceptions.GenericDockerError( - "Error while removing services", err - ) from err + except aiodocker.DockerError as err: + msg = f"Error while removing services {node_uuid=}" + raise GenericDockerError(err=msg) from err # remove network(s) - log.debug("removed services, now removing network...") + _logger.debug("removed services, now removing network...") await _remove_overlay_network_of_swarm(client, node_uuid) - log.debug("removed network") - - if config.MONITORING_ENABLED: - service_stopped( - app, - "undefined_user", - service_details["service_key"], - service_details["service_version"], - "DYNAMIC", - "SUCCESS", - ) + _logger.debug("removed network") + + if app_settings.DIRECTOR_MONITORING_ENABLED: + get_instrumentation(app).services_stopped.labels( + service_key=service_details["service_key"], + service_tag=service_details["service_version"], + simcore_user_agent="undefined_user", + result="SUCCESS", + ).inc() diff --git a/services/director/src/simcore_service_director/registry_cache_task.py b/services/director/src/simcore_service_director/registry_cache_task.py deleted file mode 100644 index 10eca38b2b7..00000000000 --- a/services/director/src/simcore_service_director/registry_cache_task.py +++ /dev/null @@ -1,79 +0,0 @@ -import asyncio -import logging -from typing import AsyncIterator - -from aiohttp import web -from servicelib.utils import logged_gather -from simcore_service_director import config, exceptions, registry_proxy -from simcore_service_director.config import APP_REGISTRY_CACHE_DATA_KEY - -_logger = logging.getLogger(__name__) - -TASK_NAME: str = __name__ + "_registry_caching_task" - - -async def registry_caching_task(app: web.Application) -> None: - try: - - _logger.info("%s: initializing cache...", TASK_NAME) - app[APP_REGISTRY_CACHE_DATA_KEY].clear() - await registry_proxy.list_services(app, registry_proxy.ServiceType.ALL) - _logger.info("%s: initialisation completed", TASK_NAME) - while True: - _logger.info("%s: waking up, refreshing cache...", TASK_NAME) - try: - keys = [] - refresh_tasks = [] - for key in app[APP_REGISTRY_CACHE_DATA_KEY]: - path, method = key.split(":") - _logger.debug("refresh %s:%s", method, path) - refresh_tasks.append( - registry_proxy.registry_request( - app, path, method, no_cache=True - ) - ) - keys = list(app[APP_REGISTRY_CACHE_DATA_KEY].keys()) - results = await logged_gather(*refresh_tasks) - - for key, result in zip(keys, results): - app[APP_REGISTRY_CACHE_DATA_KEY][key] = result - - except exceptions.DirectorException: - # if the registry is temporarily not available this might happen - _logger.exception( - "%s: exception while refreshing cache, clean cache...", TASK_NAME - ) - app[APP_REGISTRY_CACHE_DATA_KEY].clear() - - _logger.info( - "cache refreshed %s: sleeping for %ss...", - TASK_NAME, - config.DIRECTOR_REGISTRY_CACHING_TTL, - ) - await asyncio.sleep(config.DIRECTOR_REGISTRY_CACHING_TTL) - except asyncio.CancelledError: - _logger.info("%s: cancelling task...", TASK_NAME) - except Exception: # pylint: disable=broad-except - _logger.exception("%s: Unhandled exception while refreshing cache", TASK_NAME) - finally: - _logger.info("%s: finished task...clearing cache...", TASK_NAME) - app[APP_REGISTRY_CACHE_DATA_KEY].clear() - - -async def setup_registry_caching_task(app: web.Application) -> AsyncIterator[None]: - app[APP_REGISTRY_CACHE_DATA_KEY] = {} - app[TASK_NAME] = asyncio.get_event_loop().create_task(registry_caching_task(app)) - - yield - - task = app[TASK_NAME] - task.cancel() - await task - - -def setup(app: web.Application) -> None: - if config.DIRECTOR_REGISTRY_CACHING: - app.cleanup_ctx.append(setup_registry_caching_task) - - -__all__ = ["setup", "APP_REGISTRY_CACHE_DATA_KEY"] diff --git a/services/director/src/simcore_service_director/registry_proxy.py b/services/director/src/simcore_service_director/registry_proxy.py index 88c65f2315a..56b5d812f8c 100644 --- a/services/director/src/simcore_service_director/registry_proxy.py +++ b/services/director/src/simcore_service_director/registry_proxy.py @@ -1,35 +1,47 @@ -# pylint: disable=C0111 import asyncio import enum import json import logging import re -from http import HTTPStatus -from pprint import pformat -from typing import Any, AsyncIterator, Dict, List, Tuple - -from aiohttp import BasicAuth, ClientSession, client_exceptions, web -from aiohttp.client import ClientTimeout -from simcore_service_director import config, exceptions -from simcore_service_director.cache_request_decorator import cache_requests +from collections.abc import AsyncGenerator, Mapping +from typing import Any, Final, cast + +import httpx +from aiocache import Cache, SimpleMemoryCache # type: ignore[import-untyped] +from common_library.json_serialization import json_loads +from fastapi import FastAPI, status +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.fastapi.client_session import get_client_session +from servicelib.logging_utils import log_catch, log_context +from servicelib.utils import limited_as_completed from tenacity import retry from tenacity.before_sleep import before_sleep_log -from tenacity.retry import retry_if_result -from tenacity.wait import wait_fixed +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed, wait_random_exponential from yarl import URL -from .config import APP_CLIENT_SESSION_KEY +from .constants import DIRECTOR_SIMCORE_SERVICES_PREFIX +from .core.errors import ( + DirectorRuntimeError, + RegistryConnectionError, + ServiceNotAvailableError, +) +from .core.settings import ApplicationSettings, get_application_settings DEPENDENCIES_LABEL_KEY: str = "simcore.service.dependencies" -NUMBER_OF_RETRIEVED_REPOS: int = 50 -NUMBER_OF_RETRIEVED_TAGS: int = 50 - VERSION_REG = re.compile( r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$" ) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + +# +# NOTE: if you are refactoring this module, +# please consider reusing packages/pytest-simcore/src/pytest_simcore/helpers/docker_registry.py +# class ServiceType(enum.Enum): @@ -39,69 +51,70 @@ class ServiceType(enum.Enum): async def _basic_auth_registry_request( - app: web.Application, path: str, method: str, **session_kwargs -) -> Tuple[Dict, Dict]: - if not config.REGISTRY_URL: - raise exceptions.DirectorException("URL to registry is not defined") - - url = URL( - f"{'https' if config.REGISTRY_SSL else 'http'}://{config.REGISTRY_URL}{path}" - ) - logger.debug("Requesting registry using %s", url) + app: FastAPI, path: str, method: str, **session_kwargs +) -> tuple[dict, Mapping]: + app_settings = get_application_settings(app) # try the registry with basic authentication first, spare 1 call - resp_data: Dict = {} - resp_headers: Dict = {} + resp_data: dict = {} + resp_headers: Mapping = {} auth = ( - BasicAuth(login=config.REGISTRY_USER, password=config.REGISTRY_PW) - if config.REGISTRY_AUTH and config.REGISTRY_USER and config.REGISTRY_PW + httpx.BasicAuth( + username=app_settings.DIRECTOR_REGISTRY.REGISTRY_USER, + password=app_settings.DIRECTOR_REGISTRY.REGISTRY_PW.get_secret_value(), + ) + if app_settings.DIRECTOR_REGISTRY.REGISTRY_AUTH else None ) - session = app[APP_CLIENT_SESSION_KEY] - try: - async with session.request( - method.lower(), url, auth=auth, **session_kwargs - ) as response: - if response.status == HTTPStatus.UNAUTHORIZED: - logger.debug("Registry unauthorized request: %s", await response.text()) - # basic mode failed, test with other auth mode - resp_data, resp_headers = await _auth_registry_request( - url, method, response.headers, session, **session_kwargs - ) - - elif response.status == HTTPStatus.NOT_FOUND: - logger.exception("Path to registry not found: %s", url) - raise exceptions.ServiceNotAvailableError(str(path)) + request_url = URL(f"{app_settings.DIRECTOR_REGISTRY.api_url}").joinpath( + path, encoded=True + ) - elif response.status > 399: - logger.exception( - "Unknown error while accessing registry: %s", str(response) - ) - raise exceptions.RegistryConnectionError(str(response)) + session = get_client_session(app) + response = await session.request( + method.lower(), + f"{request_url}", + auth=auth, + **session_kwargs, + ) - else: - # registry that does not need an auth - resp_data = await response.json(content_type=None) - resp_headers = response.headers - - return (resp_data, resp_headers) - except client_exceptions.ClientError as exc: - logger.exception("Unknown error while accessing registry: %s", str(exc)) - raise exceptions.DirectorException( - f"Unknown error while accessing registry: {str(exc)}" + if response.status_code == status.HTTP_401_UNAUTHORIZED: + # basic mode failed, test with other auth mode + resp_data, resp_headers = await _auth_registry_request( + app_settings, + request_url, + method, + response.headers, + session, + **session_kwargs, ) + elif response.status_code == status.HTTP_404_NOT_FOUND: + raise ServiceNotAvailableError(service_name=path) -async def _auth_registry_request( - url: URL, method: str, auth_headers: Dict, session: ClientSession, **kwargs -) -> Tuple[Dict, Dict]: - if not config.REGISTRY_AUTH or not config.REGISTRY_USER or not config.REGISTRY_PW: - raise exceptions.RegistryConnectionError( - "Wrong configuration: Authentication to registry is needed!" - ) + elif response.status_code >= status.HTTP_400_BAD_REQUEST: + raise RegistryConnectionError(msg=str(response)) + + else: + # registry that does not need an auth + if method.lower() != "head": + resp_data = response.json() + resp_headers = response.headers + + return (resp_data, resp_headers) + + +async def _auth_registry_request( # noqa: C901 + app_settings: ApplicationSettings, + url: URL, + method: str, + auth_headers: Mapping, + session: httpx.AsyncClient, + **kwargs, +) -> tuple[dict, Mapping]: # auth issue let's try some authentication get the auth type auth_type = None - auth_details: Dict[str, str] = {} + auth_details: dict[str, str] = {} for key in auth_headers: if str(key).lower() == "www-authenticate": auth_type, auth_value = str(auth_headers[key]).split(" ", 1) @@ -111,10 +124,12 @@ async def _auth_registry_request( } break if not auth_type: - raise exceptions.RegistryConnectionError( - "Unknown registry type: cannot deduce authentication method!" - ) - auth = BasicAuth(login=config.REGISTRY_USER, password=config.REGISTRY_PW) + msg = "Unknown registry type: cannot deduce authentication method!" + raise RegistryConnectionError(msg=msg) + auth = httpx.BasicAuth( + username=app_settings.DIRECTOR_REGISTRY.REGISTRY_USER, + password=app_settings.DIRECTOR_REGISTRY.REGISTRY_PW.get_secret_value(), + ) # bearer type, it needs a token with all communications if auth_type == "Bearer": @@ -122,155 +137,286 @@ async def _auth_registry_request( token_url = URL(auth_details["realm"]).with_query( service=auth_details["service"], scope=auth_details["scope"] ) - async with session.get(token_url, auth=auth, **kwargs) as token_resp: - if not token_resp.status == HTTPStatus.OK: - raise exceptions.RegistryConnectionError( - "Unknown error while authentifying with registry: {}".format( - str(token_resp) - ) - ) - bearer_code = (await token_resp.json())["token"] - headers = {"Authorization": "Bearer {}".format(bearer_code)} - async with getattr(session, method.lower())( - url, headers=headers, **kwargs - ) as resp_wtoken: - if resp_wtoken.status == HTTPStatus.NOT_FOUND: - logger.exception("path to registry not found: %s", url) - raise exceptions.ServiceNotAvailableError(str(url)) - if resp_wtoken.status > 399: - logger.exception( - "Unknown error while accessing with token authorized registry: %s", - str(resp_wtoken), - ) - raise exceptions.RegistryConnectionError(str(resp_wtoken)) - resp_data = await resp_wtoken.json(content_type=None) - resp_headers = resp_wtoken.headers - return (resp_data, resp_headers) - elif auth_type == "Basic": + token_resp = await session.get(f"{token_url}", auth=auth, **kwargs) + if token_resp.status_code != status.HTTP_200_OK: + msg = f"Unknown error while authentifying with registry: {token_resp!s}" + raise RegistryConnectionError(msg=msg) + + bearer_code = (await token_resp.json())["token"] + headers = {"Authorization": f"Bearer {bearer_code}"} + resp_wtoken = await getattr(session, method.lower())( + url, headers=headers, **kwargs + ) + assert isinstance(resp_wtoken, httpx.Response) # nosec + if resp_wtoken.status_code == status.HTTP_404_NOT_FOUND: + raise ServiceNotAvailableError(service_name=f"{url}") + if resp_wtoken.status_code >= status.HTTP_400_BAD_REQUEST: + raise RegistryConnectionError(msg=f"{resp_wtoken}") + resp_data = await resp_wtoken.json(content_type=None) + resp_headers = resp_wtoken.headers + return (resp_data, resp_headers) + if auth_type == "Basic": # basic authentication should not be since we tried already... - async with getattr(session, method.lower())( - url, auth=auth, **kwargs - ) as resp_wbasic: - if resp_wbasic.status == HTTPStatus.NOT_FOUND: - logger.exception("path to registry not found: %s", url) - raise exceptions.ServiceNotAvailableError(str(url)) - if resp_wbasic.status > 399: - logger.exception( - "Unknown error while accessing with token authorized registry: %s", - str(resp_wbasic), - ) - raise exceptions.RegistryConnectionError(str(resp_wbasic)) - resp_data = await resp_wbasic.json(content_type=None) - resp_headers = resp_wbasic.headers - return (resp_data, resp_headers) - raise exceptions.RegistryConnectionError( - f"Unknown registry authentification type: {url}" - ) + resp_wbasic = await getattr(session, method.lower())( + str(url), auth=auth, **kwargs + ) + assert isinstance(resp_wbasic, httpx.Response) # nosec + if resp_wbasic.status_code == status.HTTP_404_NOT_FOUND: + raise ServiceNotAvailableError(service_name=f"{url}") + if resp_wbasic.status_code >= status.HTTP_400_BAD_REQUEST: + raise RegistryConnectionError(msg=f"{resp_wbasic}") + resp_data = await resp_wbasic.json(content_type=None) + resp_headers = resp_wbasic.headers + return (resp_data, resp_headers) + msg = f"Unknown registry authentification type: {url}" + raise RegistryConnectionError(msg=msg) + + +@retry( + retry=retry_if_exception_type((httpx.RequestError, TimeoutError)), + wait=wait_random_exponential(min=1, max=10), + stop=stop_after_delay(120), + before_sleep=before_sleep_log(_logger, logging.WARNING), + reraise=True, +) +async def _retried_request( + app: FastAPI, path: str, method: str, **session_kwargs +) -> tuple[dict, Mapping]: + return await _basic_auth_registry_request(app, path, method, **session_kwargs) async def registry_request( - app: web.Application, + app: FastAPI, + *, path: str, - method: str = "GET", - no_cache: bool = False, + method: str, + use_cache: bool, **session_kwargs, -) -> Tuple[Dict, Dict]: - logger.debug( - "Request to registry: path=%s, method=%s. no_cache=%s", path, method, no_cache - ) - return await cache_requests(_basic_auth_registry_request, no_cache)( - app, path, method, **session_kwargs - ) - - -async def is_registry_responsive(app: web.Application) -> bool: - path = "/v2/" +) -> tuple[dict, Mapping]: + cache: SimpleMemoryCache = app.state.registry_cache_memory + cache_key = f"{method}_{path}" + if use_cache and (cached_response := await cache.get(cache_key)): + assert isinstance(cached_response, tuple) # nosec + return cast(tuple[dict, Mapping], cached_response) + + app_settings = get_application_settings(app) try: - await registry_request( - app, path, no_cache=True, timeout=ClientTimeout(total=1.0) + response, response_headers = await _retried_request( + app, path, method.upper(), **session_kwargs + ) + except httpx.RequestError as exc: + msg = f"Unknown error while accessing registry: {exc!s} via {exc.request}" + raise DirectorRuntimeError(msg=msg) from exc + + if app_settings.DIRECTOR_REGISTRY_CACHING and method.upper() == "GET": + await cache.set( + cache_key, + (response, response_headers), + ttl=app_settings.DIRECTOR_REGISTRY_CACHING_TTL.total_seconds(), ) - return True - except (exceptions.DirectorException, asyncio.TimeoutError) as exc: - logger.debug("Registry not responsive: %s", exc) - return False + return response, response_headers -async def setup_registry(app: web.Application) -> AsyncIterator[None]: - logger.debug("pinging registry...") +async def _setup_registry(app: FastAPI) -> None: @retry( - wait=wait_fixed(2), - before_sleep=before_sleep_log(logger, logging.WARNING), - retry=retry_if_result(lambda result: result == False), + wait=wait_fixed(1), + before_sleep=before_sleep_log(_logger, logging.WARNING), + retry=retry_if_exception_type((httpx.RequestError, DirectorRuntimeError)), reraise=True, ) - async def wait_until_registry_responsive(app: web.Application) -> bool: - return await is_registry_responsive(app) - - await wait_until_registry_responsive(app) - logger.info("Connected to docker registry") - yield - - -async def _list_repositories(app: web.Application) -> List[str]: - logger.debug("listing repositories") - # if there are more repos, the Link will be available in the response headers until none available - path = f"/v2/_catalog?n={NUMBER_OF_RETRIEVED_REPOS}" - repos_list: List = [] - while True: - result, headers = await registry_request(app, path) - if result["repositories"]: - repos_list.extend(result["repositories"]) - if "Link" not in headers: - break - path = str(headers["Link"]).split(";")[0].strip("<>") - logger.debug("listed %s repositories", len(repos_list)) - return repos_list - - -async def list_image_tags(app: web.Application, image_key: str) -> List[str]: - logger.debug("listing image tags in %s", image_key) - image_tags: List = [] - # get list of image tags - path = f"/v2/{image_key}/tags/list?n={NUMBER_OF_RETRIEVED_TAGS}" - while True: - tags, headers = await registry_request(app, path) - if tags["tags"]: - image_tags.extend([tag for tag in tags["tags"] if VERSION_REG.match(tag)]) - if "Link" not in headers: - break - path = str(headers["Link"]).split(";")[0].strip("<>") - logger.debug("Found %s image tags in %s", len(image_tags), image_key) + async def _wait_until_registry_responsive(app: FastAPI) -> None: + await _basic_auth_registry_request(app, path="", method="HEAD", timeout=1.0) + + with log_context(_logger, logging.INFO, msg="Connecting to docker registry"): + await _wait_until_registry_responsive(app) + + +async def _list_all_services_task(*, app: FastAPI) -> None: + with log_context(_logger, logging.INFO, msg="Updating cache with services"): + await list_services(app, ServiceType.ALL, update_cache=True) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + cache = Cache(Cache.MEMORY) + assert isinstance(cache, SimpleMemoryCache) # nosec + app.state.registry_cache_memory = cache + await _setup_registry(app) + app_settings = get_application_settings(app) + app.state.auto_cache_task = None + if app_settings.DIRECTOR_REGISTRY_CACHING: + app.state.auto_cache_task = create_periodic_task( + _list_all_services_task, + interval=app_settings.DIRECTOR_REGISTRY_CACHING_TTL / 2, + task_name="director-auto-cache-task", + app=app, + ) + + async def on_shutdown() -> None: + if app.state.auto_cache_task: + await cancel_wait_task(app.state.auto_cache_task) + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def _get_prefix(service_type: ServiceType) -> str: + return f"{DIRECTOR_SIMCORE_SERVICES_PREFIX}/{service_type.value}/" + + +_SERVICE_TYPE_FILTER_MAP: Final[dict[ServiceType, tuple[str, ...]]] = { + ServiceType.DYNAMIC: (_get_prefix(ServiceType.DYNAMIC),), + ServiceType.COMPUTATIONAL: (_get_prefix(ServiceType.COMPUTATIONAL),), + ServiceType.ALL: ( + _get_prefix(ServiceType.DYNAMIC), + _get_prefix(ServiceType.COMPUTATIONAL), + ), +} + + +async def _list_repositories_gen( + app: FastAPI, service_type: ServiceType, *, update_cache: bool +) -> AsyncGenerator[list[str], None]: + with log_context(_logger, logging.DEBUG, msg="listing repositories"): + path = f"_catalog?n={get_application_settings(app).DIRECTOR_REGISTRY_CLIENT_MAX_NUMBER_OF_RETRIEVED_OBJECTS}" + result, headers = await registry_request( + app, path=path, method="GET", use_cache=not update_cache + ) # initial call + + while True: + if "Link" in headers: + next_path = ( + str(headers["Link"]).split(";")[0].strip("<>").removeprefix("/v2/") + ) + prefetch_task = asyncio.create_task( + registry_request( + app, path=next_path, method="GET", use_cache=not update_cache + ) + ) + else: + prefetch_task = None + + yield list( + filter( + lambda x: str(x).startswith(_SERVICE_TYPE_FILTER_MAP[service_type]), + result["repositories"], + ) + ) + if prefetch_task: + result, headers = await prefetch_task + else: + return + + +async def list_image_tags_gen( + app: FastAPI, image_key: str, *, update_cache=False +) -> AsyncGenerator[list[str], None]: + with log_context(_logger, logging.DEBUG, msg=f"listing image tags in {image_key}"): + path = f"{image_key}/tags/list?n={get_application_settings(app).DIRECTOR_REGISTRY_CLIENT_MAX_NUMBER_OF_RETRIEVED_OBJECTS}" + tags, headers = await registry_request( + app, path=path, method="GET", use_cache=not update_cache + ) # initial call + assert "tags" in tags # nosec + while True: + if "Link" in headers: + next_path = ( + str(headers["Link"]).split(";")[0].strip("<>").removeprefix("/v2/") + ) + prefetch_task = asyncio.create_task( + registry_request( + app, path=next_path, method="GET", use_cache=not update_cache + ) + ) + else: + prefetch_task = None + + yield ( + list( + filter( + VERSION_REG.match, + tags["tags"], + ) + ) + if tags["tags"] is not None + else [] + ) + if prefetch_task: + tags, headers = await prefetch_task + else: + return + + +async def list_image_tags(app: FastAPI, image_key: str) -> list[str]: + image_tags = [] + async for tags in list_image_tags_gen(app, image_key): + image_tags.extend(tags) return image_tags -async def get_image_labels(app: web.Application, image: str, tag: str) -> Dict: - logger.debug("getting image labels of %s:%s", image, tag) - path = f"/v2/{image}/manifests/{tag}" - request_result, _ = await registry_request(app, path) - v1_compatibility_key = json.loads(request_result["history"][0]["v1Compatibility"]) - container_config = v1_compatibility_key.get( +_DOCKER_CONTENT_DIGEST_HEADER = "Docker-Content-Digest" + + +async def get_image_digest(app: FastAPI, image: str, tag: str) -> str | None: + """Returns image manifest digest number or None if fails to obtain it + + The manifest digest is essentially a SHA256 hash of the image manifest + + SEE https://distribution.github.io/distribution/spec/api/#digest-header + """ + path = f"{image}/manifests/{tag}" + _, headers = await registry_request(app, path=path, method="GET", use_cache=True) + + headers = headers or {} + return headers.get(_DOCKER_CONTENT_DIGEST_HEADER, None) + + +async def get_image_labels( + app: FastAPI, image: str, tag: str, *, update_cache=False +) -> tuple[dict[str, str], str | None]: + """Returns image labels and the image manifest digest""" + + _logger.debug("getting image labels of %s:%s", image, tag) + path = f"{image}/manifests/{tag}" + request_result, headers = await registry_request( + app, path=path, method="GET", use_cache=not update_cache + ) + v1_compatibility_key = json_loads(request_result["history"][0]["v1Compatibility"]) + container_config: dict[str, Any] = v1_compatibility_key.get( "container_config", v1_compatibility_key["config"] ) - labels = container_config["Labels"] - logger.debug("retrieved labels of image %s:%s", image, tag) - return labels + labels: dict[str, str] = container_config["Labels"] + + headers = headers or {} + manifest_digest: str | None = headers.get(_DOCKER_CONTENT_DIGEST_HEADER, None) + + _logger.debug("retrieved labels of image %s:%s", image, tag) + + return (labels, manifest_digest) async def get_image_details( - app: web.Application, image_key: str, image_tag: str -) -> Dict: - image_tags: Dict = {} - labels = await get_image_labels(app, image_key, image_tag) + app: FastAPI, image_key: str, image_tag: str, *, update_cache=False +) -> dict[str, Any]: + image_details: dict = {} + labels, image_manifest_digest = await get_image_labels( + app, image_key, image_tag, update_cache=update_cache + ) + + if image_manifest_digest: + # Adds manifest as extra key in the response similar to org.opencontainers.image.base.digest + # SEE https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys + image_details.update({"image_digest": image_manifest_digest}) + if not labels: - return image_tags + return image_details for key in labels: if not key.startswith("io.simcore."): continue try: - label_data = json.loads(labels[key]) - for label_key in label_data.keys(): - image_tags[label_key] = label_data[label_key] + label_data = json_loads(labels[key]) + for label_key in label_data: + image_details[label_key] = label_data[label_key] except json.decoder.JSONDecodeError: logging.exception( "Error while decoding json formatted data from %s:%s", @@ -280,56 +426,70 @@ async def get_image_details( # silently skip this repo return {} - return image_tags + return image_details -async def get_repo_details(app: web.Application, image_key: str) -> List[Dict]: +async def get_repo_details( + app: FastAPI, image_key: str, *, update_cache=False +) -> list[dict[str, Any]]: repo_details = [] - image_tags = await list_image_tags(app, image_key) - tasks = [get_image_details(app, image_key, tag) for tag in image_tags] - results = await asyncio.gather(*tasks) - for image_details in results: - if image_details: - repo_details.append(image_details) + async for image_tags in list_image_tags_gen( + app, image_key, update_cache=update_cache + ): + async for image_details_future in limited_as_completed( + ( + get_image_details(app, image_key, tag, update_cache=update_cache) + for tag in image_tags + ), + limit=get_application_settings( + app + ).DIRECTOR_REGISTRY_CLIENT_MAX_CONCURRENT_CALLS, + ): + with log_catch(_logger, reraise=False): + if image_details := await image_details_future: + repo_details.append(image_details) return repo_details -async def list_services(app: web.Application, service_type: ServiceType) -> List[Dict]: - logger.debug("getting list of services") - repos = await _list_repositories(app) - # get the services repos - prefixes = [] - if service_type in [ServiceType.DYNAMIC, ServiceType.ALL]: - prefixes.append(_get_prefix(ServiceType.DYNAMIC)) - if service_type in [ServiceType.COMPUTATIONAL, ServiceType.ALL]: - prefixes.append(_get_prefix(ServiceType.COMPUTATIONAL)) - repos = [x for x in repos if str(x).startswith(tuple(prefixes))] - logger.debug("retrieved list of repos : %s", repos) - - # only list as service if it actually contains the necessary labels - tasks = [get_repo_details(app, repo) for repo in repos] - results = await asyncio.gather(*tasks, return_exceptions=True) - services = [] - for repo_details in results: - if repo_details and isinstance(repo_details, list): - services.extend(repo_details) - elif isinstance(repo_details, Exception): - logger.error("Exception occured while listing services %s", repo_details) - return services +async def list_services( + app: FastAPI, service_type: ServiceType, *, update_cache=False +) -> list[dict]: + with log_context(_logger, logging.DEBUG, msg="listing services"): + services = [] + concurrency_limit = get_application_settings( + app + ).DIRECTOR_REGISTRY_CLIENT_MAX_CONCURRENT_CALLS + async for repos in _list_repositories_gen( + app, service_type, update_cache=update_cache + ): + # only list as service if it actually contains the necessary labels + async for repo_details_future in limited_as_completed( + ( + get_repo_details(app, repo, update_cache=update_cache) + for repo in repos + ), + limit=concurrency_limit, + ): + with log_catch(_logger, reraise=False): + if repo_details := await repo_details_future: + services.extend(repo_details) + + return services async def list_interactive_service_dependencies( - app: web.Application, service_key: str, service_tag: str -) -> List[Dict]: - image_labels = await get_image_labels(app, service_key, service_tag) + app: FastAPI, service_key: str, service_tag: str +) -> list[dict]: + image_labels, _ = await get_image_labels(app, service_key, service_tag) dependency_keys = [] if DEPENDENCIES_LABEL_KEY in image_labels: try: - dependencies = json.loads(image_labels[DEPENDENCIES_LABEL_KEY]) - for dependency in dependencies: - dependency_keys.append( - {"key": dependency["key"], "tag": dependency["tag"]} - ) + dependencies = json_loads(image_labels[DEPENDENCIES_LABEL_KEY]) + dependency_keys = [ + {"key": dependency["key"], "tag": dependency["tag"]} + for dependency in dependencies + ] + except json.decoder.JSONDecodeError: logging.exception( "Incorrect json formatting in %s, skipping...", @@ -339,10 +499,6 @@ async def list_interactive_service_dependencies( return dependency_keys -def _get_prefix(service_type: ServiceType) -> str: - return "{}/{}/".format(config.SIMCORE_SERVICES_PREFIX, service_type.value) - - def get_service_first_name(image_key: str) -> str: if str(image_key).startswith(_get_prefix(ServiceType.DYNAMIC)): service_name_suffixes = str(image_key)[len(_get_prefix(ServiceType.DYNAMIC)) :] @@ -353,7 +509,7 @@ def get_service_first_name(image_key: str) -> str: else: return "invalid service" - logger.debug( + _logger.debug( "retrieved service name from repo %s : %s", image_key, service_name_suffixes ) return service_name_suffixes.split("/")[0] @@ -369,107 +525,7 @@ def get_service_last_names(image_key: str) -> str: else: return "invalid service" service_last_name = str(service_name_suffixes).replace("/", "_") - logger.debug( + _logger.debug( "retrieved service last name from repo %s : %s", image_key, service_last_name ) return service_last_name - - -CONTAINER_SPEC_ENTRY_NAME = "ContainerSpec".lower() -RESOURCES_ENTRY_NAME = "Resources".lower() - - -def _validate_kind(entry_to_validate: Dict[str, Any], kind_name: str): - for element in ( - entry_to_validate.get("value", {}) - .get("Reservations", {}) - .get("GenericResources", []) - ): - if element.get("DiscreteResourceSpec", {}).get("Kind") == kind_name: - return True - return False - - -async def get_service_extras( - app: web.Application, image_key: str, image_tag: str -) -> Dict[str, Any]: - # check physical node requirements - # all nodes require "CPU" - result = { - "node_requirements": { - "CPU": config.DEFAULT_MAX_NANO_CPUS / 1.0e09, - "RAM": config.DEFAULT_MAX_MEMORY, - } - } - - labels = await get_image_labels(app, image_key, image_tag) - logger.debug("Compiling service extras from labels %s", pformat(labels)) - - if config.SERVICE_RUNTIME_SETTINGS in labels: - service_settings = json.loads(labels[config.SERVICE_RUNTIME_SETTINGS]) - for entry in service_settings: - entry_name = entry.get("name", "").lower() - entry_value = entry.get("value") - invalid_with_msg = None - - if entry_name == RESOURCES_ENTRY_NAME: - if entry_value and isinstance(entry_value, dict): - res_limit = entry_value.get("Limits", {}) - res_reservation = entry_value.get("Reservations", {}) - # CPU - result["node_requirements"]["CPU"] = ( - float(res_limit.get("NanoCPUs", 0)) - or float(res_reservation.get("NanoCPUs", 0)) - or config.DEFAULT_MAX_NANO_CPUS - ) / 1.0e09 - # RAM - result["node_requirements"]["RAM"] = ( - res_limit.get("MemoryBytes", 0) - or res_reservation.get("MemoryBytes", 0) - or config.DEFAULT_MAX_MEMORY - ) - else: - invalid_with_msg = f"invalid type for resource [{entry_value}]" - - # discrete resources (custom made ones) --- - # TODO: this could be adjusted to separate between GPU and/or VRAM - # check if the service requires GPU support - if not invalid_with_msg and _validate_kind(entry, "VRAM"): - - result["node_requirements"]["GPU"] = 1 - if not invalid_with_msg and _validate_kind(entry, "MPI"): - result["node_requirements"]["MPI"] = 1 - - elif entry_name == CONTAINER_SPEC_ENTRY_NAME: - # NOTE: some minor validation - # expects {'name': 'ContainerSpec', 'type': 'ContainerSpec', 'value': {'Command': [...]}} - if ( - entry_value - and isinstance(entry_value, dict) - and "Command" in entry_value - ): - result["container_spec"] = entry_value - else: - invalid_with_msg = f"invalid container_spec [{entry_value}]" - - if invalid_with_msg: - logger.warning( - "%s entry [%s] encoded in settings labels of service image %s:%s", - invalid_with_msg, - entry, - image_key, - image_tag, - ) - - # get org labels - result.update( - { - sl: labels[dl] - for dl, sl in config.ORG_LABELS_TO_SCHEMA_LABELS.items() - if dl in labels - } - ) - - logger.debug("Following service extras were compiled: %s", pformat(result)) - - return result diff --git a/services/director/src/simcore_service_director/resources.py b/services/director/src/simcore_service_director/resources.py deleted file mode 100644 index d5471ce39d0..00000000000 --- a/services/director/src/simcore_service_director/resources.py +++ /dev/null @@ -1,36 +0,0 @@ -import functools -from pathlib import Path - -import pkg_resources -from simcore_service_director import config - - -RESOURCE_OPENAPI_ROOT: str = "api" -RESOURCE_OPEN_API: str = f"{RESOURCE_OPENAPI_ROOT}/{config.API_VERSION}/openapi.yaml" -RESOURCE_NODE_SCHEMA: str = config.NODE_SCHEMA_LOCATION - -""" - List of pkg_resources functions *bound* to current package with the following signature - - function(resource_name) - - Note that resource names must be /-separated paths and - cannot be absolute (i.e. no leading /) or contain relative names like "..". - Do not use os.path routines to manipulate resource paths, as they are not filesystem paths. - - Resources are read/only files/folders -""" -exists = functools.partial(pkg_resources.resource_exists, __name__) -stream = functools.partial(pkg_resources.resource_stream, __name__) -listdir = functools.partial(pkg_resources.resource_listdir, __name__) -isdir = functools.partial(pkg_resources.resource_isdir, __name__) - - -def get_path(resource_name: str) -> Path: - """ Returns a path to a resource - - WARNING: existence of file is not guaranteed. Use resources.exists - WARNING: resource files are supposed to be used as read-only! - """ - resource_path = Path(pkg_resources.resource_filename(__name__, resource_name)) - return resource_path diff --git a/services/director/src/simcore_service_director/rest/__init__.py b/services/director/src/simcore_service_director/rest/__init__.py deleted file mode 100644 index a7048f43474..00000000000 --- a/services/director/src/simcore_service_director/rest/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""GENERATED CODE from codegen.sh -It is advisable to not modify this code if possible. -This will be overriden next time the code generator is called. -""" -from .generated_code import ( - models, - util, - routing -) diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/__init__.py b/services/director/src/simcore_service_director/rest/generated_code/models/__init__.py deleted file mode 100644 index d3b3590c0d2..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -# import models into model package -from .error_enveloped import ErrorEnveloped -from .health_check_enveloped import HealthCheckEnveloped -from .inline_response200 import InlineResponse200 -from .inline_response200_data import InlineResponse200Data -from .inline_response201 import InlineResponse201 -from .inline_response2001 import InlineResponse2001 -from .inline_response2001_authors import InlineResponse2001Authors -from .inline_response2001_badges import InlineResponse2001Badges -from .inline_response2002 import InlineResponse2002 -from .inline_response2002_data import InlineResponse2002Data -from .inline_response2002_data_node_requirements import ( - InlineResponse2002DataNodeRequirements, -) -from .inline_response2002_data_service_build_details import ( - InlineResponse2002DataServiceBuildDetails, -) -from .inline_response2003 import InlineResponse2003 -from .inline_response2003_data import InlineResponse2003Data -from .inline_response_default import InlineResponseDefault -from .inline_response_default_error import InlineResponseDefaultError -from .running_service_enveloped import RunningServiceEnveloped -from .running_services_enveloped import RunningServicesEnveloped -from .service_extras_enveloped import ServiceExtrasEnveloped -from .services_enveloped import ServicesEnveloped -from .simcore_node import SimcoreNode diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/base_model_.py b/services/director/src/simcore_service_director/rest/generated_code/models/base_model_.py deleted file mode 100644 index 5d67f4e0a8e..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/base_model_.py +++ /dev/null @@ -1,66 +0,0 @@ -import pprint - -import typing - -from .. import util - -T = typing.TypeVar('T') - - -class Model(object): - # openapiTypes: The key is attribute name and the - # value is attribute type. - openapi_types = {} - - # attributeMap: The key is attribute name and the - # value is json key in definition. - attribute_map = {} - - @classmethod - def from_dict(cls: T, dikt: dict) -> T: - """Returns the dict as a model""" - return util.deserialize_model(dikt, cls) - - def to_dict(self) -> dict: - """Returns the model properties as a dict - """ - result = {} - - for attr_key, json_key in self.attribute_map.items(): - value = getattr(self, attr_key) - if value is None: - continue - if isinstance(value, list): - result[json_key] = list(map( - lambda x: x.to_dict() if hasattr(x, "to_dict") else x, - value - )) - elif hasattr(value, "to_dict"): - result[json_key] = value.to_dict() - elif isinstance(value, dict): - result[json_key] = dict(map( - lambda item: (item[0], item[1].to_dict()) - if hasattr(item[1], "to_dict") else item, - value.items() - )) - else: - result[json_key] = value - - return result - - def to_str(self) -> str: - """Returns the string representation of the model - """ - return pprint.pformat(self.to_dict()) - - def __repr__(self): - """For `print` and `pprint`""" - return self.to_str() - - def __eq__(self, other): - """Returns true if both objects are equal""" - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Returns true if both objects are not equal""" - return not self == other diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/error_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/error_enveloped.py deleted file mode 100644 index 80829e28b9e..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/error_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response_default_error import InlineResponseDefaultError -from .. import util - - -class ErrorEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: object=None, error: InlineResponseDefaultError=None): - """ErrorEnveloped - a model defined in OpenAPI - - :param data: The data of this ErrorEnveloped. - :param error: The error of this ErrorEnveloped. - """ - self.openapi_types = { - 'data': object, - 'error': InlineResponseDefaultError - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'ErrorEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The ErrorEnveloped of this ErrorEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this ErrorEnveloped. - - - :return: The data of this ErrorEnveloped. - :rtype: object - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this ErrorEnveloped. - - - :param data: The data of this ErrorEnveloped. - :type data: object - """ - - self._data = data - - @property - def error(self): - """Gets the error of this ErrorEnveloped. - - - :return: The error of this ErrorEnveloped. - :rtype: InlineResponseDefaultError - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this ErrorEnveloped. - - - :param error: The error of this ErrorEnveloped. - :type error: InlineResponseDefaultError - """ - if error is None: - raise ValueError("Invalid value for `error`, must not be `None`") - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/health_check_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/health_check_enveloped.py deleted file mode 100644 index 3906d343690..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/health_check_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response200_data import InlineResponse200Data -from .. import util - - -class HealthCheckEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse200Data=None, error: object=None): - """HealthCheckEnveloped - a model defined in OpenAPI - - :param data: The data of this HealthCheckEnveloped. - :param error: The error of this HealthCheckEnveloped. - """ - self.openapi_types = { - 'data': InlineResponse200Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'HealthCheckEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The HealthCheckEnveloped of this HealthCheckEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this HealthCheckEnveloped. - - - :return: The data of this HealthCheckEnveloped. - :rtype: InlineResponse200Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this HealthCheckEnveloped. - - - :param data: The data of this HealthCheckEnveloped. - :type data: InlineResponse200Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this HealthCheckEnveloped. - - - :return: The error of this HealthCheckEnveloped. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this HealthCheckEnveloped. - - - :param error: The error of this HealthCheckEnveloped. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200.py deleted file mode 100644 index 007a500aced..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response200_data import InlineResponse200Data -from .. import util - - -class InlineResponse200(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse200Data=None, error: object=None): - """InlineResponse200 - a model defined in OpenAPI - - :param data: The data of this InlineResponse200. - :param error: The error of this InlineResponse200. - """ - self.openapi_types = { - 'data': InlineResponse200Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse200': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200 of this InlineResponse200. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponse200. - - - :return: The data of this InlineResponse200. - :rtype: InlineResponse200Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponse200. - - - :param data: The data of this InlineResponse200. - :type data: InlineResponse200Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponse200. - - - :return: The error of this InlineResponse200. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponse200. - - - :param error: The error of this InlineResponse200. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001.py deleted file mode 100644 index 86c64116439..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .simcore_node import SimcoreNode -from .. import util - - -class InlineResponse2001(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: List[SimcoreNode]=None, error: object=None): - """InlineResponse2001 - a model defined in OpenAPI - - :param data: The data of this InlineResponse2001. - :param error: The error of this InlineResponse2001. - """ - self.openapi_types = { - 'data': List[SimcoreNode], - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2001': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_1 of this InlineResponse2001. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponse2001. - - - :return: The data of this InlineResponse2001. - :rtype: List[SimcoreNode] - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponse2001. - - - :param data: The data of this InlineResponse2001. - :type data: List[SimcoreNode] - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponse2001. - - - :return: The error of this InlineResponse2001. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponse2001. - - - :param error: The error of this InlineResponse2001. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_authors.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_authors.py deleted file mode 100644 index 2fd9d6d8983..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_authors.py +++ /dev/null @@ -1,120 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse2001Authors(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, name: str=None, email: str=None, affiliation: str=None): - """InlineResponse2001Authors - a model defined in OpenAPI - - :param name: The name of this InlineResponse2001Authors. - :param email: The email of this InlineResponse2001Authors. - :param affiliation: The affiliation of this InlineResponse2001Authors. - """ - self.openapi_types = { - 'name': str, - 'email': str, - 'affiliation': str - } - - self.attribute_map = { - 'name': 'name', - 'email': 'email', - 'affiliation': 'affiliation' - } - - self._name = name - self._email = email - self._affiliation = affiliation - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2001Authors': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_1_authors of this InlineResponse2001Authors. - """ - return util.deserialize_model(dikt, cls) - - @property - def name(self): - """Gets the name of this InlineResponse2001Authors. - - Name of the author - - :return: The name of this InlineResponse2001Authors. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this InlineResponse2001Authors. - - Name of the author - - :param name: The name of this InlineResponse2001Authors. - :type name: str - """ - if name is None: - raise ValueError("Invalid value for `name`, must not be `None`") - - self._name = name - - @property - def email(self): - """Gets the email of this InlineResponse2001Authors. - - Email address - - :return: The email of this InlineResponse2001Authors. - :rtype: str - """ - return self._email - - @email.setter - def email(self, email): - """Sets the email of this InlineResponse2001Authors. - - Email address - - :param email: The email of this InlineResponse2001Authors. - :type email: str - """ - if email is None: - raise ValueError("Invalid value for `email`, must not be `None`") - - self._email = email - - @property - def affiliation(self): - """Gets the affiliation of this InlineResponse2001Authors. - - Affiliation of the author - - :return: The affiliation of this InlineResponse2001Authors. - :rtype: str - """ - return self._affiliation - - @affiliation.setter - def affiliation(self, affiliation): - """Sets the affiliation of this InlineResponse2001Authors. - - Affiliation of the author - - :param affiliation: The affiliation of this InlineResponse2001Authors. - :type affiliation: str - """ - - self._affiliation = affiliation diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_badges.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_badges.py deleted file mode 100644 index 94121a17f3a..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_badges.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse2001Badges(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, name: str=None, image: str=None, url: str=None): - """InlineResponse2001Badges - a model defined in OpenAPI - - :param name: The name of this InlineResponse2001Badges. - :param image: The image of this InlineResponse2001Badges. - :param url: The url of this InlineResponse2001Badges. - """ - self.openapi_types = { - 'name': str, - 'image': str, - 'url': str - } - - self.attribute_map = { - 'name': 'name', - 'image': 'image', - 'url': 'url' - } - - self._name = name - self._image = image - self._url = url - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2001Badges': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_1_badges of this InlineResponse2001Badges. - """ - return util.deserialize_model(dikt, cls) - - @property - def name(self): - """Gets the name of this InlineResponse2001Badges. - - Name of the subject - - :return: The name of this InlineResponse2001Badges. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this InlineResponse2001Badges. - - Name of the subject - - :param name: The name of this InlineResponse2001Badges. - :type name: str - """ - if name is None: - raise ValueError("Invalid value for `name`, must not be `None`") - - self._name = name - - @property - def image(self): - """Gets the image of this InlineResponse2001Badges. - - Url to the shield - - :return: The image of this InlineResponse2001Badges. - :rtype: str - """ - return self._image - - @image.setter - def image(self, image): - """Sets the image of this InlineResponse2001Badges. - - Url to the shield - - :param image: The image of this InlineResponse2001Badges. - :type image: str - """ - if image is None: - raise ValueError("Invalid value for `image`, must not be `None`") - - self._image = image - - @property - def url(self): - """Gets the url of this InlineResponse2001Badges. - - Link to status - - :return: The url of this InlineResponse2001Badges. - :rtype: str - """ - return self._url - - @url.setter - def url(self, url): - """Sets the url of this InlineResponse2001Badges. - - Link to status - - :param url: The url of this InlineResponse2001Badges. - :type url: str - """ - if url is None: - raise ValueError("Invalid value for `url`, must not be `None`") - - self._url = url diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_data.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_data.py deleted file mode 100644 index fcfa3b0bf69..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2001_data.py +++ /dev/null @@ -1,332 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -import re -from .. import util - - -class InlineResponse2001Data(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, entry_point: str=None, published_port: int=None, service_basepath: str='', service_host: str=None, service_key: str=None, service_message: str=None, service_port: int=None, service_state: str=None, service_uuid: str=None, service_version: str=None): - """InlineResponse2001Data - a model defined in OpenAPI - - :param entry_point: The entry_point of this InlineResponse2001Data. - :param published_port: The published_port of this InlineResponse2001Data. - :param service_basepath: The service_basepath of this InlineResponse2001Data. - :param service_host: The service_host of this InlineResponse2001Data. - :param service_key: The service_key of this InlineResponse2001Data. - :param service_message: The service_message of this InlineResponse2001Data. - :param service_port: The service_port of this InlineResponse2001Data. - :param service_state: The service_state of this InlineResponse2001Data. - :param service_uuid: The service_uuid of this InlineResponse2001Data. - :param service_version: The service_version of this InlineResponse2001Data. - """ - self.openapi_types = { - 'entry_point': str, - 'published_port': int, - 'service_basepath': str, - 'service_host': str, - 'service_key': str, - 'service_message': str, - 'service_port': int, - 'service_state': str, - 'service_uuid': str, - 'service_version': str - } - - self.attribute_map = { - 'entry_point': 'entry_point', - 'published_port': 'published_port', - 'service_basepath': 'service_basepath', - 'service_host': 'service_host', - 'service_key': 'service_key', - 'service_message': 'service_message', - 'service_port': 'service_port', - 'service_state': 'service_state', - 'service_uuid': 'service_uuid', - 'service_version': 'service_version' - } - - self._entry_point = entry_point - self._published_port = published_port - self._service_basepath = service_basepath - self._service_host = service_host - self._service_key = service_key - self._service_message = service_message - self._service_port = service_port - self._service_state = service_state - self._service_uuid = service_uuid - self._service_version = service_version - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2001Data': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_1_data of this InlineResponse2001Data. - """ - return util.deserialize_model(dikt, cls) - - @property - def entry_point(self): - """Gets the entry_point of this InlineResponse2001Data. - - The entry point where the service provides its interface if specified - - :return: The entry_point of this InlineResponse2001Data. - :rtype: str - """ - return self._entry_point - - @entry_point.setter - def entry_point(self, entry_point): - """Sets the entry_point of this InlineResponse2001Data. - - The entry point where the service provides its interface if specified - - :param entry_point: The entry_point of this InlineResponse2001Data. - :type entry_point: str - """ - - self._entry_point = entry_point - - @property - def published_port(self): - """Gets the published_port of this InlineResponse2001Data. - - The ports where the service provides its interface - - :return: The published_port of this InlineResponse2001Data. - :rtype: int - """ - return self._published_port - - @published_port.setter - def published_port(self, published_port): - """Sets the published_port of this InlineResponse2001Data. - - The ports where the service provides its interface - - :param published_port: The published_port of this InlineResponse2001Data. - :type published_port: int - """ - if published_port is None: - raise ValueError("Invalid value for `published_port`, must not be `None`") - if published_port is not None and published_port < 1: - raise ValueError("Invalid value for `published_port`, must be a value greater than or equal to `1`") - - self._published_port = published_port - - @property - def service_basepath(self): - """Gets the service_basepath of this InlineResponse2001Data. - - different base path where current service is mounted otherwise defaults to root - - :return: The service_basepath of this InlineResponse2001Data. - :rtype: str - """ - return self._service_basepath - - @service_basepath.setter - def service_basepath(self, service_basepath): - """Sets the service_basepath of this InlineResponse2001Data. - - different base path where current service is mounted otherwise defaults to root - - :param service_basepath: The service_basepath of this InlineResponse2001Data. - :type service_basepath: str - """ - - self._service_basepath = service_basepath - - @property - def service_host(self): - """Gets the service_host of this InlineResponse2001Data. - - service host name within the network - - :return: The service_host of this InlineResponse2001Data. - :rtype: str - """ - return self._service_host - - @service_host.setter - def service_host(self, service_host): - """Sets the service_host of this InlineResponse2001Data. - - service host name within the network - - :param service_host: The service_host of this InlineResponse2001Data. - :type service_host: str - """ - if service_host is None: - raise ValueError("Invalid value for `service_host`, must not be `None`") - - self._service_host = service_host - - @property - def service_key(self): - """Gets the service_key of this InlineResponse2001Data. - - distinctive name for the node based on the docker registry path - - :return: The service_key of this InlineResponse2001Data. - :rtype: str - """ - return self._service_key - - @service_key.setter - def service_key(self, service_key): - """Sets the service_key of this InlineResponse2001Data. - - distinctive name for the node based on the docker registry path - - :param service_key: The service_key of this InlineResponse2001Data. - :type service_key: str - """ - if service_key is None: - raise ValueError("Invalid value for `service_key`, must not be `None`") - if service_key is not None and not re.search(r'^(simcore)\/(services)\/(comp|dynamic)(\/[^\s\/]+)+$', service_key): - raise ValueError("Invalid value for `service_key`, must be a follow pattern or equal to `/^(simcore)\/(services)\/(comp|dynamic)(\/[^\s\/]+)+$/`") - - self._service_key = service_key - - @property - def service_message(self): - """Gets the service_message of this InlineResponse2001Data. - - the service message - - :return: The service_message of this InlineResponse2001Data. - :rtype: str - """ - return self._service_message - - @service_message.setter - def service_message(self, service_message): - """Sets the service_message of this InlineResponse2001Data. - - the service message - - :param service_message: The service_message of this InlineResponse2001Data. - :type service_message: str - """ - - self._service_message = service_message - - @property - def service_port(self): - """Gets the service_port of this InlineResponse2001Data. - - port to access the service within the network - - :return: The service_port of this InlineResponse2001Data. - :rtype: int - """ - return self._service_port - - @service_port.setter - def service_port(self, service_port): - """Sets the service_port of this InlineResponse2001Data. - - port to access the service within the network - - :param service_port: The service_port of this InlineResponse2001Data. - :type service_port: int - """ - if service_port is None: - raise ValueError("Invalid value for `service_port`, must not be `None`") - if service_port is not None and service_port < 1: - raise ValueError("Invalid value for `service_port`, must be a value greater than or equal to `1`") - - self._service_port = service_port - - @property - def service_state(self): - """Gets the service_state of this InlineResponse2001Data. - - the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start - - :return: The service_state of this InlineResponse2001Data. - :rtype: str - """ - return self._service_state - - @service_state.setter - def service_state(self, service_state): - """Sets the service_state of this InlineResponse2001Data. - - the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start - - :param service_state: The service_state of this InlineResponse2001Data. - :type service_state: str - """ - allowed_values = ["pending", "pulling", "starting", "running", "complete", "failed"] - if service_state not in allowed_values: - raise ValueError( - "Invalid value for `service_state` ({0}), must be one of {1}" - .format(service_state, allowed_values) - ) - - self._service_state = service_state - - @property - def service_uuid(self): - """Gets the service_uuid of this InlineResponse2001Data. - - The UUID attached to this service - - :return: The service_uuid of this InlineResponse2001Data. - :rtype: str - """ - return self._service_uuid - - @service_uuid.setter - def service_uuid(self, service_uuid): - """Sets the service_uuid of this InlineResponse2001Data. - - The UUID attached to this service - - :param service_uuid: The service_uuid of this InlineResponse2001Data. - :type service_uuid: str - """ - if service_uuid is None: - raise ValueError("Invalid value for `service_uuid`, must not be `None`") - - self._service_uuid = service_uuid - - @property - def service_version(self): - """Gets the service_version of this InlineResponse2001Data. - - semantic version number - - :return: The service_version of this InlineResponse2001Data. - :rtype: str - """ - return self._service_version - - @service_version.setter - def service_version(self, service_version): - """Sets the service_version of this InlineResponse2001Data. - - semantic version number - - :param service_version: The service_version of this InlineResponse2001Data. - :type service_version: str - """ - if service_version is None: - raise ValueError("Invalid value for `service_version`, must not be `None`") - if service_version is not None and not re.search(r'^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$', service_version): - raise ValueError("Invalid value for `service_version`, must be a follow pattern or equal to `/^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$/`") - - self._service_version = service_version diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002.py deleted file mode 100644 index ffeb93d434d..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2002_data import InlineResponse2002Data -from .. import util - - -class InlineResponse2002(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse2002Data=None, error: object=None): - """InlineResponse2002 - a model defined in OpenAPI - - :param data: The data of this InlineResponse2002. - :param error: The error of this InlineResponse2002. - """ - self.openapi_types = { - 'data': InlineResponse2002Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2002': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2 of this InlineResponse2002. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponse2002. - - - :return: The data of this InlineResponse2002. - :rtype: InlineResponse2002Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponse2002. - - - :param data: The data of this InlineResponse2002. - :type data: InlineResponse2002Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponse2002. - - - :return: The error of this InlineResponse2002. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponse2002. - - - :param error: The error of this InlineResponse2002. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_authors.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_authors.py deleted file mode 100644 index 5a6d37c0b68..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_authors.py +++ /dev/null @@ -1,120 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse2002Authors(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, affiliation: str=None, email: str=None, name: str=None): - """InlineResponse2002Authors - a model defined in OpenAPI - - :param affiliation: The affiliation of this InlineResponse2002Authors. - :param email: The email of this InlineResponse2002Authors. - :param name: The name of this InlineResponse2002Authors. - """ - self.openapi_types = { - 'affiliation': str, - 'email': str, - 'name': str - } - - self.attribute_map = { - 'affiliation': 'affiliation', - 'email': 'email', - 'name': 'name' - } - - self._affiliation = affiliation - self._email = email - self._name = name - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2002Authors': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2_authors of this InlineResponse2002Authors. - """ - return util.deserialize_model(dikt, cls) - - @property - def affiliation(self): - """Gets the affiliation of this InlineResponse2002Authors. - - Affiliation of the author - - :return: The affiliation of this InlineResponse2002Authors. - :rtype: str - """ - return self._affiliation - - @affiliation.setter - def affiliation(self, affiliation): - """Sets the affiliation of this InlineResponse2002Authors. - - Affiliation of the author - - :param affiliation: The affiliation of this InlineResponse2002Authors. - :type affiliation: str - """ - - self._affiliation = affiliation - - @property - def email(self): - """Gets the email of this InlineResponse2002Authors. - - Email address - - :return: The email of this InlineResponse2002Authors. - :rtype: str - """ - return self._email - - @email.setter - def email(self, email): - """Sets the email of this InlineResponse2002Authors. - - Email address - - :param email: The email of this InlineResponse2002Authors. - :type email: str - """ - if email is None: - raise ValueError("Invalid value for `email`, must not be `None`") - - self._email = email - - @property - def name(self): - """Gets the name of this InlineResponse2002Authors. - - Name of the author - - :return: The name of this InlineResponse2002Authors. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this InlineResponse2002Authors. - - Name of the author - - :param name: The name of this InlineResponse2002Authors. - :type name: str - """ - if name is None: - raise ValueError("Invalid value for `name`, must not be `None`") - - self._name = name diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_badges.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_badges.py deleted file mode 100644 index 20fb1cf7741..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_badges.py +++ /dev/null @@ -1,122 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse2002Badges(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, image: str=None, name: str=None, url: str=None): - """InlineResponse2002Badges - a model defined in OpenAPI - - :param image: The image of this InlineResponse2002Badges. - :param name: The name of this InlineResponse2002Badges. - :param url: The url of this InlineResponse2002Badges. - """ - self.openapi_types = { - 'image': str, - 'name': str, - 'url': str - } - - self.attribute_map = { - 'image': 'image', - 'name': 'name', - 'url': 'url' - } - - self._image = image - self._name = name - self._url = url - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2002Badges': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2_badges of this InlineResponse2002Badges. - """ - return util.deserialize_model(dikt, cls) - - @property - def image(self): - """Gets the image of this InlineResponse2002Badges. - - Url to the shield - - :return: The image of this InlineResponse2002Badges. - :rtype: str - """ - return self._image - - @image.setter - def image(self, image): - """Sets the image of this InlineResponse2002Badges. - - Url to the shield - - :param image: The image of this InlineResponse2002Badges. - :type image: str - """ - if image is None: - raise ValueError("Invalid value for `image`, must not be `None`") - - self._image = image - - @property - def name(self): - """Gets the name of this InlineResponse2002Badges. - - Name of the subject - - :return: The name of this InlineResponse2002Badges. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this InlineResponse2002Badges. - - Name of the subject - - :param name: The name of this InlineResponse2002Badges. - :type name: str - """ - if name is None: - raise ValueError("Invalid value for `name`, must not be `None`") - - self._name = name - - @property - def url(self): - """Gets the url of this InlineResponse2002Badges. - - Link to status - - :return: The url of this InlineResponse2002Badges. - :rtype: str - """ - return self._url - - @url.setter - def url(self, url): - """Sets the url of this InlineResponse2002Badges. - - Link to status - - :param url: The url of this InlineResponse2002Badges. - :type url: str - """ - if url is None: - raise ValueError("Invalid value for `url`, must not be `None`") - - self._url = url diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data.py deleted file mode 100644 index d635bfd8354..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime -from typing import Dict, List, Type - -from .. import util -from .base_model_ import Model -from .inline_response2002_data_node_requirements import ( - InlineResponse2002DataNodeRequirements, -) -from .inline_response2002_data_service_build_details import ( - InlineResponse2002DataServiceBuildDetails, -) - - -class InlineResponse2002Data(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__( - self, - node_requirements: InlineResponse2002DataNodeRequirements = None, - service_build_details: InlineResponse2002DataServiceBuildDetails = None, - ): - """InlineResponse2002Data - a model defined in OpenAPI - - :param node_requirements: The node_requirements of this InlineResponse2002Data. - :param service_build_details: The service_build_details of this InlineResponse2002Data. - """ - self.openapi_types = { - "node_requirements": InlineResponse2002DataNodeRequirements, - "service_build_details": InlineResponse2002DataServiceBuildDetails, - } - - self.attribute_map = { - "node_requirements": "node_requirements", - "service_build_details": "service_build_details", - } - - self._node_requirements = node_requirements - self._service_build_details = service_build_details - - @classmethod - def from_dict(cls, dikt: dict) -> "InlineResponse2002Data": - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2_data of this InlineResponse2002Data. - """ - return util.deserialize_model(dikt, cls) - - @property - def node_requirements(self): - """Gets the node_requirements of this InlineResponse2002Data. - - - :return: The node_requirements of this InlineResponse2002Data. - :rtype: InlineResponse2002DataNodeRequirements - """ - return self._node_requirements - - @node_requirements.setter - def node_requirements(self, node_requirements): - """Sets the node_requirements of this InlineResponse2002Data. - - - :param node_requirements: The node_requirements of this InlineResponse2002Data. - :type node_requirements: InlineResponse2002DataNodeRequirements - """ - if node_requirements is None: - raise ValueError( - "Invalid value for `node_requirements`, must not be `None`" - ) - - self._node_requirements = node_requirements - - @property - def service_build_details(self): - """Gets the service_build_details of this InlineResponse2002Data. - - - :return: The service_build_details of this InlineResponse2002Data. - :rtype: InlineResponse2002DataServiceBuildDetails - """ - return self._service_build_details - - @service_build_details.setter - def service_build_details(self, service_build_details): - """Sets the service_build_details of this InlineResponse2002Data. - - - :param service_build_details: The service_build_details of this InlineResponse2002Data. - :type service_build_details: InlineResponse2002DataServiceBuildDetails - """ - - self._service_build_details = service_build_details diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_node_requirements.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_node_requirements.py deleted file mode 100644 index 40864748f45..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_node_requirements.py +++ /dev/null @@ -1,146 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime -from typing import Dict, List, Type - -from .. import util -from .base_model_ import Model - - -class InlineResponse2002DataNodeRequirements(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__( - self, cpu: float = 1, gpu: int = None, ram: int = None, mpi: int = None - ): - """InlineResponse2002DataNodeRequirements - a model defined in OpenAPI - - :param cpu: The cpu of this InlineResponse2002DataNodeRequirements. - :param gpu: The gpu of this InlineResponse2002DataNodeRequirements. - :param ram: The ram of this InlineResponse2002DataNodeRequirements. - :param mpi: The mpi of this InlineResponse2002DataNodeRequirements. - """ - self.openapi_types = {"cpu": float, "gpu": int, "ram": int, "mpi": int} - - self.attribute_map = {"cpu": "CPU", "gpu": "GPU", "ram": "RAM", "mpi": "MPI"} - - self._cpu = cpu - self._gpu = gpu - self._ram = ram - self._mpi = mpi - - @classmethod - def from_dict(cls, dikt: dict) -> "InlineResponse2002DataNodeRequirements": - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2_data_node_requirements of this InlineResponse2002DataNodeRequirements. - """ - return util.deserialize_model(dikt, cls) - - @property - def cpu(self): - """Gets the cpu of this InlineResponse2002DataNodeRequirements. - - - :return: The cpu of this InlineResponse2002DataNodeRequirements. - :rtype: float - """ - return self._cpu - - @cpu.setter - def cpu(self, cpu): - """Sets the cpu of this InlineResponse2002DataNodeRequirements. - - - :param cpu: The cpu of this InlineResponse2002DataNodeRequirements. - :type cpu: float - """ - if cpu is None: - raise ValueError("Invalid value for `cpu`, must not be `None`") - if cpu is not None and cpu < 1: - raise ValueError( - "Invalid value for `cpu`, must be a value greater than or equal to `1`" - ) - - self._cpu = cpu - - @property - def gpu(self): - """Gets the gpu of this InlineResponse2002DataNodeRequirements. - - - :return: The gpu of this InlineResponse2002DataNodeRequirements. - :rtype: int - """ - return self._gpu - - @gpu.setter - def gpu(self, gpu): - """Sets the gpu of this InlineResponse2002DataNodeRequirements. - - - :param gpu: The gpu of this InlineResponse2002DataNodeRequirements. - :type gpu: int - """ - if gpu is not None and gpu < 0: - raise ValueError( - "Invalid value for `gpu`, must be a value greater than or equal to `0`" - ) - - self._gpu = gpu - - @property - def ram(self): - """Gets the ram of this InlineResponse2002DataNodeRequirements. - - - :return: The ram of this InlineResponse2002DataNodeRequirements. - :rtype: int - """ - return self._ram - - @ram.setter - def ram(self, ram): - """Sets the ram of this InlineResponse2002DataNodeRequirements. - - - :param ram: The ram of this InlineResponse2002DataNodeRequirements. - :type ram: int - """ - if ram is None: - raise ValueError("Invalid value for `ram`, must not be `None`") - if ram is not None and ram < 1024: - raise ValueError( - "Invalid value for `ram`, must be a value greater than or equal to `1024`" - ) - - self._ram = ram - - @property - def mpi(self): - """Gets the mpi of this InlineResponse2002DataNodeRequirements. - - - :return: The mpi of this InlineResponse2002DataNodeRequirements. - :rtype: int - """ - return self._mpi - - @mpi.setter - def mpi(self, mpi): - """Sets the mpi of this InlineResponse2002DataNodeRequirements. - - - :param mpi: The mpi of this InlineResponse2002DataNodeRequirements. - :type mpi: int - """ - if mpi is not None and mpi > 1: - raise ValueError( - "Invalid value for `mpi`, must be a value less than or equal to `1`" - ) - - self._mpi = mpi diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_service_build_details.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_service_build_details.py deleted file mode 100644 index 35ab8473235..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2002_data_service_build_details.py +++ /dev/null @@ -1,110 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse2002DataServiceBuildDetails(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, build_date: str=None, vcs_ref: str=None, vcs_url: str=None): - """InlineResponse2002DataServiceBuildDetails - a model defined in OpenAPI - - :param build_date: The build_date of this InlineResponse2002DataServiceBuildDetails. - :param vcs_ref: The vcs_ref of this InlineResponse2002DataServiceBuildDetails. - :param vcs_url: The vcs_url of this InlineResponse2002DataServiceBuildDetails. - """ - self.openapi_types = { - 'build_date': str, - 'vcs_ref': str, - 'vcs_url': str - } - - self.attribute_map = { - 'build_date': 'build_date', - 'vcs_ref': 'vcs_ref', - 'vcs_url': 'vcs_url' - } - - self._build_date = build_date - self._vcs_ref = vcs_ref - self._vcs_url = vcs_url - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2002DataServiceBuildDetails': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_2_data_service_build_details of this InlineResponse2002DataServiceBuildDetails. - """ - return util.deserialize_model(dikt, cls) - - @property - def build_date(self): - """Gets the build_date of this InlineResponse2002DataServiceBuildDetails. - - - :return: The build_date of this InlineResponse2002DataServiceBuildDetails. - :rtype: str - """ - return self._build_date - - @build_date.setter - def build_date(self, build_date): - """Sets the build_date of this InlineResponse2002DataServiceBuildDetails. - - - :param build_date: The build_date of this InlineResponse2002DataServiceBuildDetails. - :type build_date: str - """ - - self._build_date = build_date - - @property - def vcs_ref(self): - """Gets the vcs_ref of this InlineResponse2002DataServiceBuildDetails. - - - :return: The vcs_ref of this InlineResponse2002DataServiceBuildDetails. - :rtype: str - """ - return self._vcs_ref - - @vcs_ref.setter - def vcs_ref(self, vcs_ref): - """Sets the vcs_ref of this InlineResponse2002DataServiceBuildDetails. - - - :param vcs_ref: The vcs_ref of this InlineResponse2002DataServiceBuildDetails. - :type vcs_ref: str - """ - - self._vcs_ref = vcs_ref - - @property - def vcs_url(self): - """Gets the vcs_url of this InlineResponse2002DataServiceBuildDetails. - - - :return: The vcs_url of this InlineResponse2002DataServiceBuildDetails. - :rtype: str - """ - return self._vcs_url - - @vcs_url.setter - def vcs_url(self, vcs_url): - """Sets the vcs_url of this InlineResponse2002DataServiceBuildDetails. - - - :param vcs_url: The vcs_url of this InlineResponse2002DataServiceBuildDetails. - :type vcs_url: str - """ - - self._vcs_url = vcs_url diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003.py deleted file mode 100644 index 3c527146f9d..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2003_data import InlineResponse2003Data -from .. import util - - -class InlineResponse2003(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: List[InlineResponse2003Data]=None, error: object=None): - """InlineResponse2003 - a model defined in OpenAPI - - :param data: The data of this InlineResponse2003. - :param error: The error of this InlineResponse2003. - """ - self.openapi_types = { - 'data': List[InlineResponse2003Data], - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2003': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_3 of this InlineResponse2003. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponse2003. - - - :return: The data of this InlineResponse2003. - :rtype: List[InlineResponse2003Data] - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponse2003. - - - :param data: The data of this InlineResponse2003. - :type data: List[InlineResponse2003Data] - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponse2003. - - - :return: The error of this InlineResponse2003. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponse2003. - - - :param error: The error of this InlineResponse2003. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003_data.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003_data.py deleted file mode 100644 index be3e6cff3ab..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response2003_data.py +++ /dev/null @@ -1,361 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -import re -from .. import util - - -class InlineResponse2003Data(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, published_port: int=None, entry_point: str=None, service_uuid: str=None, service_key: str=None, service_version: str=None, service_host: str=None, service_port: int=None, service_basepath: str='', service_state: str=None, service_message: str=None, user_id: str=None): - """InlineResponse2003Data - a model defined in OpenAPI - - :param published_port: The published_port of this InlineResponse2003Data. - :param entry_point: The entry_point of this InlineResponse2003Data. - :param service_uuid: The service_uuid of this InlineResponse2003Data. - :param service_key: The service_key of this InlineResponse2003Data. - :param service_version: The service_version of this InlineResponse2003Data. - :param service_host: The service_host of this InlineResponse2003Data. - :param service_port: The service_port of this InlineResponse2003Data. - :param service_basepath: The service_basepath of this InlineResponse2003Data. - :param service_state: The service_state of this InlineResponse2003Data. - :param service_message: The service_message of this InlineResponse2003Data. - :param user_id: The user_id of this InlineResponse2003Data. - """ - self.openapi_types = { - 'published_port': int, - 'entry_point': str, - 'service_uuid': str, - 'service_key': str, - 'service_version': str, - 'service_host': str, - 'service_port': int, - 'service_basepath': str, - 'service_state': str, - 'service_message': str, - 'user_id': str - } - - self.attribute_map = { - 'published_port': 'published_port', - 'entry_point': 'entry_point', - 'service_uuid': 'service_uuid', - 'service_key': 'service_key', - 'service_version': 'service_version', - 'service_host': 'service_host', - 'service_port': 'service_port', - 'service_basepath': 'service_basepath', - 'service_state': 'service_state', - 'service_message': 'service_message', - 'user_id': 'user_id' - } - - self._published_port = published_port - self._entry_point = entry_point - self._service_uuid = service_uuid - self._service_key = service_key - self._service_version = service_version - self._service_host = service_host - self._service_port = service_port - self._service_basepath = service_basepath - self._service_state = service_state - self._service_message = service_message - self._user_id = user_id - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse2003Data': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_3_data of this InlineResponse2003Data. - """ - return util.deserialize_model(dikt, cls) - - @property - def published_port(self): - """Gets the published_port of this InlineResponse2003Data. - - The ports where the service provides its interface - - :return: The published_port of this InlineResponse2003Data. - :rtype: int - """ - return self._published_port - - @published_port.setter - def published_port(self, published_port): - """Sets the published_port of this InlineResponse2003Data. - - The ports where the service provides its interface - - :param published_port: The published_port of this InlineResponse2003Data. - :type published_port: int - """ - if published_port is None: - raise ValueError("Invalid value for `published_port`, must not be `None`") - if published_port is not None and published_port < 1: - raise ValueError("Invalid value for `published_port`, must be a value greater than or equal to `1`") - - self._published_port = published_port - - @property - def entry_point(self): - """Gets the entry_point of this InlineResponse2003Data. - - The entry point where the service provides its interface if specified - - :return: The entry_point of this InlineResponse2003Data. - :rtype: str - """ - return self._entry_point - - @entry_point.setter - def entry_point(self, entry_point): - """Sets the entry_point of this InlineResponse2003Data. - - The entry point where the service provides its interface if specified - - :param entry_point: The entry_point of this InlineResponse2003Data. - :type entry_point: str - """ - - self._entry_point = entry_point - - @property - def service_uuid(self): - """Gets the service_uuid of this InlineResponse2003Data. - - The UUID attached to this service - - :return: The service_uuid of this InlineResponse2003Data. - :rtype: str - """ - return self._service_uuid - - @service_uuid.setter - def service_uuid(self, service_uuid): - """Sets the service_uuid of this InlineResponse2003Data. - - The UUID attached to this service - - :param service_uuid: The service_uuid of this InlineResponse2003Data. - :type service_uuid: str - """ - if service_uuid is None: - raise ValueError("Invalid value for `service_uuid`, must not be `None`") - - self._service_uuid = service_uuid - - @property - def service_key(self): - """Gets the service_key of this InlineResponse2003Data. - - distinctive name for the node based on the docker registry path - - :return: The service_key of this InlineResponse2003Data. - :rtype: str - """ - return self._service_key - - @service_key.setter - def service_key(self, service_key): - """Sets the service_key of this InlineResponse2003Data. - - distinctive name for the node based on the docker registry path - - :param service_key: The service_key of this InlineResponse2003Data. - :type service_key: str - """ - if service_key is None: - raise ValueError("Invalid value for `service_key`, must not be `None`") - if service_key is not None and not re.search(r'^(simcore)\/(services)\/(comp|dynamic)(\/[\w\/-]+)+$', service_key): - raise ValueError("Invalid value for `service_key`, must be a follow pattern or equal to `/^(simcore)\/(services)\/(comp|dynamic)(\/[\w\/-]+)+$/`") - - self._service_key = service_key - - @property - def service_version(self): - """Gets the service_version of this InlineResponse2003Data. - - semantic version number - - :return: The service_version of this InlineResponse2003Data. - :rtype: str - """ - return self._service_version - - @service_version.setter - def service_version(self, service_version): - """Sets the service_version of this InlineResponse2003Data. - - semantic version number - - :param service_version: The service_version of this InlineResponse2003Data. - :type service_version: str - """ - if service_version is None: - raise ValueError("Invalid value for `service_version`, must not be `None`") - if service_version is not None and not re.search(r'^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$', service_version): - raise ValueError("Invalid value for `service_version`, must be a follow pattern or equal to `/^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$/`") - - self._service_version = service_version - - @property - def service_host(self): - """Gets the service_host of this InlineResponse2003Data. - - service host name within the network - - :return: The service_host of this InlineResponse2003Data. - :rtype: str - """ - return self._service_host - - @service_host.setter - def service_host(self, service_host): - """Sets the service_host of this InlineResponse2003Data. - - service host name within the network - - :param service_host: The service_host of this InlineResponse2003Data. - :type service_host: str - """ - if service_host is None: - raise ValueError("Invalid value for `service_host`, must not be `None`") - - self._service_host = service_host - - @property - def service_port(self): - """Gets the service_port of this InlineResponse2003Data. - - port to access the service within the network - - :return: The service_port of this InlineResponse2003Data. - :rtype: int - """ - return self._service_port - - @service_port.setter - def service_port(self, service_port): - """Sets the service_port of this InlineResponse2003Data. - - port to access the service within the network - - :param service_port: The service_port of this InlineResponse2003Data. - :type service_port: int - """ - if service_port is None: - raise ValueError("Invalid value for `service_port`, must not be `None`") - if service_port is not None and service_port < 1: - raise ValueError("Invalid value for `service_port`, must be a value greater than or equal to `1`") - - self._service_port = service_port - - @property - def service_basepath(self): - """Gets the service_basepath of this InlineResponse2003Data. - - different base path where current service is mounted otherwise defaults to root - - :return: The service_basepath of this InlineResponse2003Data. - :rtype: str - """ - return self._service_basepath - - @service_basepath.setter - def service_basepath(self, service_basepath): - """Sets the service_basepath of this InlineResponse2003Data. - - different base path where current service is mounted otherwise defaults to root - - :param service_basepath: The service_basepath of this InlineResponse2003Data. - :type service_basepath: str - """ - - self._service_basepath = service_basepath - - @property - def service_state(self): - """Gets the service_state of this InlineResponse2003Data. - - the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start - - :return: The service_state of this InlineResponse2003Data. - :rtype: str - """ - return self._service_state - - @service_state.setter - def service_state(self, service_state): - """Sets the service_state of this InlineResponse2003Data. - - the service state * 'pending' - The service is waiting for resources to start * 'pulling' - The service is being pulled from the registry * 'starting' - The service is starting * 'running' - The service is running * 'complete' - The service completed * 'failed' - The service failed to start - - :param service_state: The service_state of this InlineResponse2003Data. - :type service_state: str - """ - allowed_values = ["pending", "pulling", "starting", "running", "complete", "failed"] - if service_state not in allowed_values: - raise ValueError( - "Invalid value for `service_state` ({0}), must be one of {1}" - .format(service_state, allowed_values) - ) - - self._service_state = service_state - - @property - def service_message(self): - """Gets the service_message of this InlineResponse2003Data. - - the service message - - :return: The service_message of this InlineResponse2003Data. - :rtype: str - """ - return self._service_message - - @service_message.setter - def service_message(self, service_message): - """Sets the service_message of this InlineResponse2003Data. - - the service message - - :param service_message: The service_message of this InlineResponse2003Data. - :type service_message: str - """ - - self._service_message = service_message - - @property - def user_id(self): - """Gets the user_id of this InlineResponse2003Data. - - the user that started the service - - :return: The user_id of this InlineResponse2003Data. - :rtype: str - """ - return self._user_id - - @user_id.setter - def user_id(self, user_id): - """Sets the user_id of this InlineResponse2003Data. - - the user that started the service - - :param user_id: The user_id of this InlineResponse2003Data. - :type user_id: str - """ - if user_id is None: - raise ValueError("Invalid value for `user_id`, must not be `None`") - - self._user_id = user_id diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200_data.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200_data.py deleted file mode 100644 index 1cc495dbbbd..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response200_data.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponse200Data(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, name: str=None, status: str=None, api_version: str=None, version: str=None): - """InlineResponse200Data - a model defined in OpenAPI - - :param name: The name of this InlineResponse200Data. - :param status: The status of this InlineResponse200Data. - :param api_version: The api_version of this InlineResponse200Data. - :param version: The version of this InlineResponse200Data. - """ - self.openapi_types = { - 'name': str, - 'status': str, - 'api_version': str, - 'version': str - } - - self.attribute_map = { - 'name': 'name', - 'status': 'status', - 'api_version': 'api_version', - 'version': 'version' - } - - self._name = name - self._status = status - self._api_version = api_version - self._version = version - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse200Data': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_200_data of this InlineResponse200Data. - """ - return util.deserialize_model(dikt, cls) - - @property - def name(self): - """Gets the name of this InlineResponse200Data. - - - :return: The name of this InlineResponse200Data. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this InlineResponse200Data. - - - :param name: The name of this InlineResponse200Data. - :type name: str - """ - - self._name = name - - @property - def status(self): - """Gets the status of this InlineResponse200Data. - - - :return: The status of this InlineResponse200Data. - :rtype: str - """ - return self._status - - @status.setter - def status(self, status): - """Sets the status of this InlineResponse200Data. - - - :param status: The status of this InlineResponse200Data. - :type status: str - """ - - self._status = status - - @property - def api_version(self): - """Gets the api_version of this InlineResponse200Data. - - - :return: The api_version of this InlineResponse200Data. - :rtype: str - """ - return self._api_version - - @api_version.setter - def api_version(self, api_version): - """Sets the api_version of this InlineResponse200Data. - - - :param api_version: The api_version of this InlineResponse200Data. - :type api_version: str - """ - - self._api_version = api_version - - @property - def version(self): - """Gets the version of this InlineResponse200Data. - - - :return: The version of this InlineResponse200Data. - :rtype: str - """ - return self._version - - @version.setter - def version(self, version): - """Sets the version of this InlineResponse200Data. - - - :param version: The version of this InlineResponse200Data. - :type version: str - """ - - self._version = version diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response201.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response201.py deleted file mode 100644 index 221a60352b6..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response201.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2003_data import InlineResponse2003Data -from .. import util - - -class InlineResponse201(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse2003Data=None, error: object=None): - """InlineResponse201 - a model defined in OpenAPI - - :param data: The data of this InlineResponse201. - :param error: The error of this InlineResponse201. - """ - self.openapi_types = { - 'data': InlineResponse2003Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponse201': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_201 of this InlineResponse201. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponse201. - - - :return: The data of this InlineResponse201. - :rtype: InlineResponse2003Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponse201. - - - :param data: The data of this InlineResponse201. - :type data: InlineResponse2003Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponse201. - - - :return: The error of this InlineResponse201. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponse201. - - - :param error: The error of this InlineResponse201. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default.py deleted file mode 100644 index 3dd0b09b399..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response_default_error import InlineResponseDefaultError -from .. import util - - -class InlineResponseDefault(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: object=None, error: InlineResponseDefaultError=None): - """InlineResponseDefault - a model defined in OpenAPI - - :param data: The data of this InlineResponseDefault. - :param error: The error of this InlineResponseDefault. - """ - self.openapi_types = { - 'data': object, - 'error': InlineResponseDefaultError - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponseDefault': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_default of this InlineResponseDefault. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this InlineResponseDefault. - - - :return: The data of this InlineResponseDefault. - :rtype: object - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this InlineResponseDefault. - - - :param data: The data of this InlineResponseDefault. - :type data: object - """ - - self._data = data - - @property - def error(self): - """Gets the error of this InlineResponseDefault. - - - :return: The error of this InlineResponseDefault. - :rtype: InlineResponseDefaultError - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this InlineResponseDefault. - - - :param error: The error of this InlineResponseDefault. - :type error: InlineResponseDefaultError - """ - if error is None: - raise ValueError("Invalid value for `error`, must not be `None`") - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default_error.py b/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default_error.py deleted file mode 100644 index 95b5cf26175..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/inline_response_default_error.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .. import util - - -class InlineResponseDefaultError(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, message: str=None, errors: List[object]=None, status: int=None): - """InlineResponseDefaultError - a model defined in OpenAPI - - :param message: The message of this InlineResponseDefaultError. - :param errors: The errors of this InlineResponseDefaultError. - :param status: The status of this InlineResponseDefaultError. - """ - self.openapi_types = { - 'message': str, - 'errors': List[object], - 'status': int - } - - self.attribute_map = { - 'message': 'message', - 'errors': 'errors', - 'status': 'status' - } - - self._message = message - self._errors = errors - self._status = status - - @classmethod - def from_dict(cls, dikt: dict) -> 'InlineResponseDefaultError': - """Returns the dict as a model - - :param dikt: A dict. - :return: The inline_response_default_error of this InlineResponseDefaultError. - """ - return util.deserialize_model(dikt, cls) - - @property - def message(self): - """Gets the message of this InlineResponseDefaultError. - - Error message - - :return: The message of this InlineResponseDefaultError. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message): - """Sets the message of this InlineResponseDefaultError. - - Error message - - :param message: The message of this InlineResponseDefaultError. - :type message: str - """ - if message is None: - raise ValueError("Invalid value for `message`, must not be `None`") - - self._message = message - - @property - def errors(self): - """Gets the errors of this InlineResponseDefaultError. - - - :return: The errors of this InlineResponseDefaultError. - :rtype: List[object] - """ - return self._errors - - @errors.setter - def errors(self, errors): - """Sets the errors of this InlineResponseDefaultError. - - - :param errors: The errors of this InlineResponseDefaultError. - :type errors: List[object] - """ - - self._errors = errors - - @property - def status(self): - """Gets the status of this InlineResponseDefaultError. - - Error code - - :return: The status of this InlineResponseDefaultError. - :rtype: int - """ - return self._status - - @status.setter - def status(self, status): - """Sets the status of this InlineResponseDefaultError. - - Error code - - :param status: The status of this InlineResponseDefaultError. - :type status: int - """ - if status is None: - raise ValueError("Invalid value for `status`, must not be `None`") - - self._status = status diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/running_service_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/running_service_enveloped.py deleted file mode 100644 index 2075fb9fd91..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/running_service_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2003_data import InlineResponse2003Data -from .. import util - - -class RunningServiceEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse2003Data=None, error: object=None): - """RunningServiceEnveloped - a model defined in OpenAPI - - :param data: The data of this RunningServiceEnveloped. - :param error: The error of this RunningServiceEnveloped. - """ - self.openapi_types = { - 'data': InlineResponse2003Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'RunningServiceEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The RunningServiceEnveloped of this RunningServiceEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this RunningServiceEnveloped. - - - :return: The data of this RunningServiceEnveloped. - :rtype: InlineResponse2003Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this RunningServiceEnveloped. - - - :param data: The data of this RunningServiceEnveloped. - :type data: InlineResponse2003Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this RunningServiceEnveloped. - - - :return: The error of this RunningServiceEnveloped. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this RunningServiceEnveloped. - - - :param error: The error of this RunningServiceEnveloped. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/running_services_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/running_services_enveloped.py deleted file mode 100644 index 104508d8de4..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/running_services_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2003_data import InlineResponse2003Data -from .. import util - - -class RunningServicesEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: List[InlineResponse2003Data]=None, error: object=None): - """RunningServicesEnveloped - a model defined in OpenAPI - - :param data: The data of this RunningServicesEnveloped. - :param error: The error of this RunningServicesEnveloped. - """ - self.openapi_types = { - 'data': List[InlineResponse2003Data], - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'RunningServicesEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The RunningServicesEnveloped of this RunningServicesEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this RunningServicesEnveloped. - - - :return: The data of this RunningServicesEnveloped. - :rtype: List[InlineResponse2003Data] - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this RunningServicesEnveloped. - - - :param data: The data of this RunningServicesEnveloped. - :type data: List[InlineResponse2003Data] - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this RunningServicesEnveloped. - - - :return: The error of this RunningServicesEnveloped. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this RunningServicesEnveloped. - - - :param error: The error of this RunningServicesEnveloped. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/service_extras_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/service_extras_enveloped.py deleted file mode 100644 index dcb444a8725..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/service_extras_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .inline_response2002_data import InlineResponse2002Data -from .. import util - - -class ServiceExtrasEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: InlineResponse2002Data=None, error: object=None): - """ServiceExtrasEnveloped - a model defined in OpenAPI - - :param data: The data of this ServiceExtrasEnveloped. - :param error: The error of this ServiceExtrasEnveloped. - """ - self.openapi_types = { - 'data': InlineResponse2002Data, - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'ServiceExtrasEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The ServiceExtrasEnveloped of this ServiceExtrasEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this ServiceExtrasEnveloped. - - - :return: The data of this ServiceExtrasEnveloped. - :rtype: InlineResponse2002Data - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this ServiceExtrasEnveloped. - - - :param data: The data of this ServiceExtrasEnveloped. - :type data: InlineResponse2002Data - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this ServiceExtrasEnveloped. - - - :return: The error of this ServiceExtrasEnveloped. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this ServiceExtrasEnveloped. - - - :param error: The error of this ServiceExtrasEnveloped. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/services_enveloped.py b/services/director/src/simcore_service_director/rest/generated_code/models/services_enveloped.py deleted file mode 100644 index b101b17ecb8..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/services_enveloped.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from datetime import date, datetime - -from typing import List, Dict, Type - -from .base_model_ import Model -from .simcore_node import SimcoreNode -from .. import util - - -class ServicesEnveloped(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__(self, data: List[SimcoreNode]=None, error: object=None): - """ServicesEnveloped - a model defined in OpenAPI - - :param data: The data of this ServicesEnveloped. - :param error: The error of this ServicesEnveloped. - """ - self.openapi_types = { - 'data': List[SimcoreNode], - 'error': object - } - - self.attribute_map = { - 'data': 'data', - 'error': 'error' - } - - self._data = data - self._error = error - - @classmethod - def from_dict(cls, dikt: dict) -> 'ServicesEnveloped': - """Returns the dict as a model - - :param dikt: A dict. - :return: The ServicesEnveloped of this ServicesEnveloped. - """ - return util.deserialize_model(dikt, cls) - - @property - def data(self): - """Gets the data of this ServicesEnveloped. - - - :return: The data of this ServicesEnveloped. - :rtype: List[SimcoreNode] - """ - return self._data - - @data.setter - def data(self, data): - """Sets the data of this ServicesEnveloped. - - - :param data: The data of this ServicesEnveloped. - :type data: List[SimcoreNode] - """ - if data is None: - raise ValueError("Invalid value for `data`, must not be `None`") - - self._data = data - - @property - def error(self): - """Gets the error of this ServicesEnveloped. - - - :return: The error of this ServicesEnveloped. - :rtype: object - """ - return self._error - - @error.setter - def error(self, error): - """Sets the error of this ServicesEnveloped. - - - :param error: The error of this ServicesEnveloped. - :type error: object - """ - - self._error = error diff --git a/services/director/src/simcore_service_director/rest/generated_code/models/simcore_node.py b/services/director/src/simcore_service_director/rest/generated_code/models/simcore_node.py deleted file mode 100644 index 541ee8a4b95..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/models/simcore_node.py +++ /dev/null @@ -1,414 +0,0 @@ -# coding: utf-8 - -import re -from datetime import date, datetime -from typing import Dict, List, Type - -from .. import util -from .base_model_ import Model -from .inline_response2001_authors import InlineResponse2001Authors -from .inline_response2001_badges import InlineResponse2001Badges - - -class SimcoreNode(Model): - """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). - - Do not edit the class manually. - """ - - def __init__( - self, - key: str = None, - integration_version: str = None, - version: str = None, - type: str = None, - name: str = None, - thumbnail: str = None, - badges: List[InlineResponse2001Badges] = None, - description: str = None, - authors: List[InlineResponse2001Authors] = None, - contact: str = None, - inputs: Dict[str, object] = None, - outputs: Dict[str, object] = None, - ): - """SimcoreNode - a model defined in OpenAPI - - :param key: The key of this SimcoreNode. - :param integration_version: The integration_version of this SimcoreNode. - :param version: The version of this SimcoreNode. - :param type: The type of this SimcoreNode. - :param name: The name of this SimcoreNode. - :param thumbnail: The thumbnail of this SimcoreNode. - :param badges: The badges of this SimcoreNode. - :param description: The description of this SimcoreNode. - :param authors: The authors of this SimcoreNode. - :param contact: The contact of this SimcoreNode. - :param inputs: The inputs of this SimcoreNode. - :param outputs: The outputs of this SimcoreNode. - """ - self.openapi_types = { - "key": str, - "integration_version": str, - "version": str, - "type": str, - "name": str, - "thumbnail": str, - "badges": List[InlineResponse2001Badges], - "description": str, - "authors": List[InlineResponse2001Authors], - "contact": str, - "inputs": Dict[str, object], - "outputs": Dict[str, object], - } - - self.attribute_map = { - "key": "key", - "integration_version": "integration-version", - "version": "version", - "type": "type", - "name": "name", - "thumbnail": "thumbnail", - "badges": "badges", - "description": "description", - "authors": "authors", - "contact": "contact", - "inputs": "inputs", - "outputs": "outputs", - } - - self._key = key - self._integration_version = integration_version - self._version = version - self._type = type - self._name = name - self._thumbnail = thumbnail - self._badges = badges - self._description = description - self._authors = authors - self._contact = contact - self._inputs = inputs - self._outputs = outputs - - @classmethod - def from_dict(cls, dikt: dict) -> "SimcoreNode": - """Returns the dict as a model - - :param dikt: A dict. - :return: The simcore_node of this SimcoreNode. - """ - return util.deserialize_model(dikt, cls) - - @property - def key(self): - """Gets the key of this SimcoreNode. - - distinctive name for the node based on the docker registry path - - :return: The key of this SimcoreNode. - :rtype: str - """ - return self._key - - @key.setter - def key(self, key): - """Sets the key of this SimcoreNode. - - distinctive name for the node based on the docker registry path - - :param key: The key of this SimcoreNode. - :type key: str - """ - if key is None: - raise ValueError("Invalid value for `key`, must not be `None`") - if key is not None and not re.search( - r"^(simcore)\/(services)\/(comp|dynamic|frontend)(\/[\w\/-]+)+$", key - ): - raise ValueError( - "Invalid value for `key`, must be a follow pattern or equal to `/^(simcore)\/(services)\/(comp|dynamic|frontend)(\/[\w\/-]+)+$/`" - ) - - self._key = key - - @property - def integration_version(self): - """Gets the integration_version of this SimcoreNode. - - integration version number - - :return: The integration_version of this SimcoreNode. - :rtype: str - """ - return self._integration_version - - @integration_version.setter - def integration_version(self, integration_version): - """Sets the integration_version of this SimcoreNode. - - integration version number - - :param integration_version: The integration_version of this SimcoreNode. - :type integration_version: str - """ - if integration_version is not None and not re.search( - r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$", - integration_version, - ): - raise ValueError( - "Invalid value for `integration_version`, must be a follow pattern or equal to `/^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$/`" - ) - - self._integration_version = integration_version - - @property - def version(self): - """Gets the version of this SimcoreNode. - - service version number - - :return: The version of this SimcoreNode. - :rtype: str - """ - return self._version - - @version.setter - def version(self, version): - """Sets the version of this SimcoreNode. - - service version number - - :param version: The version of this SimcoreNode. - :type version: str - """ - if version is None: - raise ValueError("Invalid value for `version`, must not be `None`") - if version is not None and not re.search( - r"^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$", - version, - ): - raise ValueError( - "Invalid value for `version`, must be a follow pattern or equal to `/^(0|[1-9]\d*)(\.(0|[1-9]\d*)){2}(-(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*)(\.(0|[1-9]\d*|\d*[-a-zA-Z][-\da-zA-Z]*))*)?(\+[-\da-zA-Z]+(\.[-\da-zA-Z-]+)*)?$/`" - ) - - self._version = version - - @property - def type(self): - """Gets the type of this SimcoreNode. - - service type - - :return: The type of this SimcoreNode. - :rtype: str - """ - return self._type - - @type.setter - def type(self, type): - """Sets the type of this SimcoreNode. - - service type - - :param type: The type of this SimcoreNode. - :type type: str - """ - allowed_values = ["frontend", "computational", "dynamic"] - if type not in allowed_values: - raise ValueError( - "Invalid value for `type` ({0}), must be one of {1}".format( - type, allowed_values - ) - ) - - self._type = type - - @property - def name(self): - """Gets the name of this SimcoreNode. - - short, human readable name for the node - - :return: The name of this SimcoreNode. - :rtype: str - """ - return self._name - - @name.setter - def name(self, name): - """Sets the name of this SimcoreNode. - - short, human readable name for the node - - :param name: The name of this SimcoreNode. - :type name: str - """ - if name is None: - raise ValueError("Invalid value for `name`, must not be `None`") - - self._name = name - - @property - def thumbnail(self): - """Gets the thumbnail of this SimcoreNode. - - url to the thumbnail - - :return: The thumbnail of this SimcoreNode. - :rtype: str - """ - return self._thumbnail - - @thumbnail.setter - def thumbnail(self, thumbnail): - """Sets the thumbnail of this SimcoreNode. - - url to the thumbnail - - :param thumbnail: The thumbnail of this SimcoreNode. - :type thumbnail: str - """ - - self._thumbnail = thumbnail - - @property - def badges(self): - """Gets the badges of this SimcoreNode. - - - :return: The badges of this SimcoreNode. - :rtype: List[InlineResponse2001Badges] - """ - return self._badges - - @badges.setter - def badges(self, badges): - """Sets the badges of this SimcoreNode. - - - :param badges: The badges of this SimcoreNode. - :type badges: List[InlineResponse2001Badges] - """ - - self._badges = badges - - @property - def description(self): - """Gets the description of this SimcoreNode. - - human readable description of the purpose of the node - - :return: The description of this SimcoreNode. - :rtype: str - """ - return self._description - - @description.setter - def description(self, description): - """Sets the description of this SimcoreNode. - - human readable description of the purpose of the node - - :param description: The description of this SimcoreNode. - :type description: str - """ - if description is None: - raise ValueError("Invalid value for `description`, must not be `None`") - - self._description = description - - @property - def authors(self): - """Gets the authors of this SimcoreNode. - - - :return: The authors of this SimcoreNode. - :rtype: List[InlineResponse2001Authors] - """ - return self._authors - - @authors.setter - def authors(self, authors): - """Sets the authors of this SimcoreNode. - - - :param authors: The authors of this SimcoreNode. - :type authors: List[InlineResponse2001Authors] - """ - if authors is None: - raise ValueError("Invalid value for `authors`, must not be `None`") - - self._authors = authors - - @property - def contact(self): - """Gets the contact of this SimcoreNode. - - email to correspond to the authors about the node - - :return: The contact of this SimcoreNode. - :rtype: str - """ - return self._contact - - @contact.setter - def contact(self, contact): - """Sets the contact of this SimcoreNode. - - email to correspond to the authors about the node - - :param contact: The contact of this SimcoreNode. - :type contact: str - """ - if contact is None: - raise ValueError("Invalid value for `contact`, must not be `None`") - - self._contact = contact - - @property - def inputs(self): - """Gets the inputs of this SimcoreNode. - - definition of the inputs of this node - - :return: The inputs of this SimcoreNode. - :rtype: Dict[str, object] - """ - return self._inputs - - @inputs.setter - def inputs(self, inputs): - """Sets the inputs of this SimcoreNode. - - definition of the inputs of this node - - :param inputs: The inputs of this SimcoreNode. - :type inputs: Dict[str, object] - """ - if inputs is None: - raise ValueError("Invalid value for `inputs`, must not be `None`") - - self._inputs = inputs - - @property - def outputs(self): - """Gets the outputs of this SimcoreNode. - - definition of the outputs of this node - - :return: The outputs of this SimcoreNode. - :rtype: Dict[str, object] - """ - return self._outputs - - @outputs.setter - def outputs(self, outputs): - """Sets the outputs of this SimcoreNode. - - definition of the outputs of this node - - :param outputs: The outputs of this SimcoreNode. - :type outputs: Dict[str, object] - """ - if outputs is None: - raise ValueError("Invalid value for `outputs`, must not be `None`") - - self._outputs = outputs diff --git a/services/director/src/simcore_service_director/rest/generated_code/routing.py b/services/director/src/simcore_service_director/rest/generated_code/routing.py deleted file mode 100644 index 3cf2a4d57b7..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/routing.py +++ /dev/null @@ -1,94 +0,0 @@ -"""GENERATED CODE from codegen.sh -It is advisable to not modify this code if possible. -This will be overriden next time the code generator is called. - -use create_web_app to initialise the web application using the specification file. -The base folder is the root of the package. -""" - - -import logging -from pathlib import Path - -from aiohttp import hdrs, web -from aiohttp_apiset import SwaggerRouter -from aiohttp_apiset.exceptions import ValidationError -from aiohttp_apiset.middlewares import Jsonify, jsonify -from aiohttp_apiset.swagger.loader import ExtendedSchemaFile -from aiohttp_apiset.swagger.operations import OperationIdMapping - -from .. import handlers -from .models.base_model_ import Model - -log = logging.getLogger(__name__) - -@web.middleware -async def __handle_errors(request, handler): - try: - log.debug("error middleware handling request %s to handler %s", request, handler) - response = await handler(request) - return response - except ValidationError as ex: - # aiohttp apiset errors - log.exception("error happened in handling route") - error = dict(status=ex.status, message=ex.to_tree()) - error_enveloped = dict(error=error) - return web.json_response(error_enveloped, status=ex.status) - except web.HTTPError as ex: - log.exception("error happened in handling route") - error = dict(status=ex.status, message=str(ex.reason)) - error_enveloped = dict(data=error) - return web.json_response(error_enveloped, status=ex.status) - - -def create_web_app(base_folder, spec_file, additional_middlewares = None): - # create the default mapping of the operationId to the implementation code in handlers - opmap = __create_default_operation_mapping(Path(base_folder / spec_file)) - - # generate a version 3 of the API documentation - router = SwaggerRouter( - swagger_ui='/apidoc/', - version_ui=3, # forces the use of version 3 by default - search_dirs=[base_folder], - default_validate=True, - ) - - # add automatic jsonification of the models located in generated code - jsonify.singleton = Jsonify(indent=3, ensure_ascii=False) - jsonify.singleton.add_converter(Model, lambda o: o.to_dict(), score=0) - - middlewares = [jsonify, __handle_errors] - if additional_middlewares: - middlewares.extend(additional_middlewares) - # create the web application using the API - app = web.Application( - router=router, - middlewares=middlewares, - ) - router.set_cors(app, domains='*', headers=( - (hdrs.ACCESS_CONTROL_EXPOSE_HEADERS, hdrs.AUTHORIZATION), - )) - - # Include our specifications in a router, - # is now available in the swagger-ui to the address http://localhost:8080/swagger/?spec=v1 - router.include( - spec=Path(base_folder / spec_file), - operationId_mapping=opmap, - name='v0', # name to access in swagger-ui, - basePath="/v0" # BUG: in apiset with openapi 3.0.0 [Github bug entry](https://github.com/aamalev/aiohttp_apiset/issues/45) - ) - - return app - -def __create_default_operation_mapping(specs_file): - operation_mapping = {} - yaml_specs = ExtendedSchemaFile(specs_file) - paths = yaml_specs['paths'] - for path in paths.items(): - for method in path[1].items(): # can be get, post, patch, put, delete... - op_str = "operationId" - if op_str not in method[1]: - raise Exception("The API %s does not contain the operationId tag for route %s %s" % (specs_file, path[0], method[0])) - operation_id = method[1][op_str] - operation_mapping[operation_id] = getattr(handlers, operation_id) - return OperationIdMapping(**operation_mapping) diff --git a/services/director/src/simcore_service_director/rest/generated_code/typing_utils.py b/services/director/src/simcore_service_director/rest/generated_code/typing_utils.py deleted file mode 100644 index 0563f81fd53..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/typing_utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding: utf-8 - -import sys - -if sys.version_info < (3, 7): - import typing - - def is_generic(klass): - """ Determine whether klass is a generic class """ - return type(klass) == typing.GenericMeta - - def is_dict(klass): - """ Determine whether klass is a Dict """ - return klass.__extra__ == dict - - def is_list(klass): - """ Determine whether klass is a List """ - return klass.__extra__ == list - -else: - - def is_generic(klass): - """ Determine whether klass is a generic class """ - return hasattr(klass, '__origin__') - - def is_dict(klass): - """ Determine whether klass is a Dict """ - return klass.__origin__ == dict - - def is_list(klass): - """ Determine whether klass is a List """ - return klass.__origin__ == list diff --git a/services/director/src/simcore_service_director/rest/generated_code/util.py b/services/director/src/simcore_service_director/rest/generated_code/util.py deleted file mode 100644 index a9ab1e81939..00000000000 --- a/services/director/src/simcore_service_director/rest/generated_code/util.py +++ /dev/null @@ -1,131 +0,0 @@ -import datetime - -import typing -from typing import Union -from . import typing_utils - -T = typing.TypeVar('T') -Class = typing.Type[T] - - -def _deserialize(data: Union[dict, list, str], klass: Union[Class, str]) -> Union[dict, list, Class, int, float, str, bool, datetime.date, datetime.datetime]: - """Deserializes dict, list, str into an object. - - :param data: dict, list or str. - :param klass: class literal, or string of class name. - - :return: object. - """ - if data is None: - return None - - if klass in (int, float, str, bool): - return _deserialize_primitive(data, klass) - elif klass == object: - return _deserialize_object(data) - elif klass == datetime.date: - return deserialize_date(data) - elif klass == datetime.datetime: - return deserialize_datetime(data) - elif typing_utils.is_generic(klass): - if typing_utils.is_list(klass): - return _deserialize_list(data, klass.__args__[0]) - if typing_utils.is_dict(klass): - return _deserialize_dict(data, klass.__args__[1]) - else: - return deserialize_model(data, klass) - - -def _deserialize_primitive(data, klass: Class) -> Union[Class, int, float, str, bool]: - """Deserializes to primitive type. - - :param data: data to deserialize. - :param klass: class literal. - - :return: int, float, str, bool. - """ - try: - value = klass(data) - except (UnicodeEncodeError, TypeError): - value = data - return value - - -def _deserialize_object(value: T) -> T: - """Return an original value. - - :return: object. - """ - return value - - -def deserialize_date(string: str) -> datetime.date: - """Deserializes string to date. - - :param string: str. - :return: date. - """ - try: - from dateutil.parser import parse - return parse(string).date() - except ImportError: - return string - - -def deserialize_datetime(string: str) -> datetime.datetime: - """Deserializes string to datetime. - - The string should be in iso8601 datetime format. - - :param string: str. - :return: datetime. - """ - try: - from dateutil.parser import parse - return parse(string) - except ImportError: - return string - - -def deserialize_model(data: Union[dict, list], klass: T) -> T: - """Deserializes list or dict to model. - - :param data: dict, list. - :param klass: class literal. - :return: model object. - """ - instance = klass() - - if not instance.openapi_types: - return data - - if data is not None and isinstance(data, (list, dict)): - for attr, attr_type in instance.openapi_types.items(): - attr_key = instance.attribute_map[attr] - if attr_key in data: - value = data[attr_key] - setattr(instance, attr, _deserialize(value, attr_type)) - - return instance - - -def _deserialize_list(data: list, boxed_type) -> list: - """Deserializes a list and its elements. - - :param data: list to deserialize. - :param boxed_type: class literal. - - :return: deserialized list. - """ - return [_deserialize(sub_data, boxed_type) for sub_data in data] - - -def _deserialize_dict(data: dict, boxed_type) -> dict: - """Deserializes a dict and its elements. - - :param data: dict to deserialize. - :param boxed_type: class literal. - - :return: deserialized dict. - """ - return {k: _deserialize(v, boxed_type) for k, v in data.items()} diff --git a/services/director/src/simcore_service_director/rest/handlers.py b/services/director/src/simcore_service_director/rest/handlers.py deleted file mode 100644 index 3b611df3031..00000000000 --- a/services/director/src/simcore_service_director/rest/handlers.py +++ /dev/null @@ -1,229 +0,0 @@ -# pylint:disable=too-many-arguments - -import logging -from typing import Optional - -import pkg_resources -import yaml -from aiohttp import web, web_exceptions -from simcore_service_director import exceptions, producer, registry_proxy, resources - -log = logging.getLogger(__name__) - - -async def root_get( - request: web.Request, -) -> web.Response: - log.debug("Client does root_get request %s", request) - distb = pkg_resources.get_distribution("simcore-service-director") - with resources.stream(resources.RESOURCE_OPEN_API) as file_ptr: - api_dict = yaml.safe_load(file_ptr) - - service_health = dict( - name=distb.project_name, - status="SERVICE_RUNNING", - api_version=api_dict["info"]["version"], - version=distb.version, - ) - return web.json_response(data=dict(data=service_health)) - - -async def services_get( - request: web.Request, service_type: Optional[str] = None -) -> web.Response: - log.debug( - "Client does services_get request %s with service_type %s", - request, - service_type, - ) - try: - services = [] - if not service_type: - services = await registry_proxy.list_services( - request.app, registry_proxy.ServiceType.ALL - ) - elif "computational" in service_type: - services = await registry_proxy.list_services( - request.app, registry_proxy.ServiceType.COMPUTATIONAL - ) - elif "interactive" in service_type: - services = await registry_proxy.list_services( - request.app, registry_proxy.ServiceType.DYNAMIC - ) - # NOTE: the validation is done in the catalog. This entrypoint IS and MUST BE only used by the catalog!! - # NOTE2: the catalog will directly talk to the registry see case #2165 [https://github.com/ITISFoundation/osparc-simcore/issues/2165] - # services = node_validator.validate_nodes(services) - return web.json_response(data=dict(data=services)) - except exceptions.RegistryConnectionError as err: - raise web_exceptions.HTTPUnauthorized(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def services_by_key_version_get( - request: web.Request, service_key: str, service_version: str -) -> web.Response: - log.debug( - "Client does services_get request %s with service_key %s, service_version %s", - request, - service_key, - service_version, - ) - try: - services = [ - await registry_proxy.get_image_details( - request.app, service_key, service_version - ) - ] - return web.json_response(data=dict(data=services)) - except exceptions.ServiceNotAvailableError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except exceptions.RegistryConnectionError as err: - raise web_exceptions.HTTPUnauthorized(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def get_service_labels( - request: web.Request, service_key: str, service_version: str -) -> web.Response: - log.debug( - "Retrieving service labels %s with service_key %s, service_version %s", - request, - service_key, - service_version, - ) - try: - service_labels = await registry_proxy.get_image_labels( - request.app, service_key, service_version - ) - return web.json_response(data=dict(data=service_labels)) - except exceptions.ServiceNotAvailableError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except exceptions.RegistryConnectionError as err: - raise web_exceptions.HTTPUnauthorized(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -# GET /service_extras/{service_key}/{service_version} -async def service_extras_by_key_version_get( - request: web.Request, service_key: str, service_version: str -) -> web.Response: - log.debug( - "Client does service_extras_by_key_version_get request %s with service_key %s, service_version %s", - request, - service_key, - service_version, - ) - try: - service_extras = await registry_proxy.get_service_extras( - request.app, service_key, service_version - ) - return web.json_response(data=dict(data=service_extras)) - except exceptions.ServiceNotAvailableError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except exceptions.RegistryConnectionError as err: - raise web_exceptions.HTTPUnauthorized(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def running_interactive_services_list_get( - request: web.Request, user_id: str, project_id: str -) -> web.Response: - log.debug( - "Client does running_interactive_services_list_get request %s, user_id %s, project_id %s", - request, - user_id, - project_id, - ) - try: - service = await producer.get_services_details(request.app, user_id, project_id) - return web.json_response(data=dict(data=service), status=200) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def running_interactive_services_post( - request: web.Request, - user_id: str, - project_id: str, - service_key: str, - service_uuid: str, - service_tag: str, - service_basepath: str, -) -> web.Response: - log.debug( - "Client does running_interactive_services_post request %s with user_id %s, project_id %s, service %s:%s, service_uuid %s, service_basepath %s", - request, - user_id, - project_id, - service_key, - service_tag, - service_uuid, - service_basepath, - ) - try: - service = await producer.start_service( - request.app, - user_id, - project_id, - service_key, - service_tag, - service_uuid, - service_basepath, - ) - return web.json_response(data=dict(data=service), status=201) - except exceptions.ServiceStartTimeoutError as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - except exceptions.ServiceNotAvailableError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except exceptions.ServiceUUIDInUseError as err: - raise web_exceptions.HTTPConflict(reason=str(err)) - except exceptions.RegistryConnectionError as err: - raise web_exceptions.HTTPUnauthorized(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def running_interactive_services_get( - request: web.Request, service_uuid: str -) -> web.Response: - log.debug( - "Client does running_interactive_services_get request %s with service_uuid %s", - request, - service_uuid, - ) - try: - service = await producer.get_service_details(request.app, service_uuid) - return web.json_response(data=dict(data=service), status=200) - except exceptions.ServiceUUIDNotFoundError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except Exception as err: - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - -async def running_interactive_services_delete( - request: web.Request, service_uuid: str, save_state: Optional[bool] = True -) -> web.Response: - log.debug( - "Client does running_interactive_services_delete request %s with service_uuid %s", - request, - service_uuid, - ) - try: - await producer.stop_service(request.app, service_uuid, save_state) - - except exceptions.ServiceUUIDNotFoundError as err: - raise web_exceptions.HTTPNotFound(reason=str(err)) - except Exception as err: - # server errors are logged (>=500) - log.exception( - "Failed to delete dynamic service %s (save_state=%s)", - service_uuid, - save_state, - ) - raise web_exceptions.HTTPInternalServerError(reason=str(err)) - - return web.json_response(status=204) diff --git a/services/director/src/simcore_service_director/services_common.py b/services/director/src/simcore_service_director/services_common.py index f1aef5ac668..638f6ddefe5 100644 --- a/services/director/src/simcore_service_director/services_common.py +++ b/services/director/src/simcore_service_director/services_common.py @@ -3,7 +3,8 @@ # since this service is frozen and MUST NOT ADD ANY MORE DEPENDENCIES # # -from pydantic import BaseSettings, Field, PositiveInt +from pydantic import Field, PositiveInt +from pydantic_settings import BaseSettings, SettingsConfigDict _BASE_TIMEOUT_FOR_STOPPING_SERVICES = 60 * 60 @@ -11,14 +12,14 @@ class ServicesCommonSettings(BaseSettings): # set this interval to 1 hour director_dynamic_service_save_timeout: PositiveInt = Field( - _BASE_TIMEOUT_FOR_STOPPING_SERVICES, + default=_BASE_TIMEOUT_FOR_STOPPING_SERVICES, description=( "When stopping a dynamic service, if it has " "big payloads it is important to have longer timeouts." ), ) webserver_director_stop_service_timeout: PositiveInt = Field( - _BASE_TIMEOUT_FOR_STOPPING_SERVICES + 10, + default=_BASE_TIMEOUT_FOR_STOPPING_SERVICES + 10, description=( "When the webserver invokes the director API to stop " "a service which has a very long timeout, it also " @@ -26,7 +27,7 @@ class ServicesCommonSettings(BaseSettings): ), ) storage_service_upload_download_timeout: PositiveInt = Field( - 60 * 60, + default=60 * 60, description=( "When dynamic services upload and download data from storage, " "sometimes very big payloads are involved. In order to handle " @@ -34,7 +35,6 @@ class ServicesCommonSettings(BaseSettings): "allow the service to finish the operation." ), ) - - class Config: - env_prefix = "SERVICES_COMMON_" - case_sensitive = False + model_config = SettingsConfigDict( + env_prefix="SERVICES_COMMON_", case_sensitive=False + ) diff --git a/services/director/src/simcore_service_director/system_utils.py b/services/director/src/simcore_service_director/system_utils.py deleted file mode 100644 index cc3ee25c114..00000000000 --- a/services/director/src/simcore_service_director/system_utils.py +++ /dev/null @@ -1,13 +0,0 @@ -from pathlib import Path -from typing import List - - -def get_system_extra_hosts_raw(extra_host_domain: str) -> List[str]: - extra_hosts = [] - hosts_path = Path("/etc/hosts") - if hosts_path.exists() and extra_host_domain != "undefined": - with hosts_path.open() as hosts: - for line in hosts: - if extra_host_domain in line: - extra_hosts.append(line.strip().replace("\t", " ")) - return extra_hosts diff --git a/services/director/src/simcore_service_director/utils.py b/services/director/src/simcore_service_director/utils.py deleted file mode 100644 index 486c46cce60..00000000000 --- a/services/director/src/simcore_service_director/utils.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging -from datetime import datetime -from typing import Optional - -log = logging.getLogger(__name__) - -DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" -_MAXLEN = len("2020-10-09T12:28:14.7710") - - -def parse_as_datetime(timestr: str, *, default: Optional[datetime] = None) -> datetime: - """ - default: if parsing is not possible, it returs default - """ - # datetime_str is typically '2020-10-09T12:28:14.771034099Z' - # - The T separates the date portion from the time-of-day portion - # - The Z on the end means UTC, that is, an offset-from-UTC - # The 099 before the Z is not clear, therefore we will truncate the last part - - try: - timestr = timestr.strip("Z ")[:_MAXLEN] - dt = datetime.strptime(timestr, DATETIME_FORMAT) - return dt - except ValueError as err: - log.debug("Failed to parse %s: %s", timestr, err) - if default is not None: - return default - raise diff --git a/services/director/temp_generate_openapi.sh b/services/director/temp_generate_openapi.sh deleted file mode 100755 index 533053087ef..00000000000 --- a/services/director/temp_generate_openapi.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0) -usage() -{ - echo "usage: temp_generate_openapi.sh [[[-i input]] | [-h help]]" -} - -apihub_specs_dir= -# process arguments -while [ "$1" != "" ]; do - case $1 in - -i | --input ) shift - apihub_specs_dir=$1 - ;; - -h | --help ) usage - exit - ;; - * ) usage - exit 1 - esac - shift -done - -if [ -z "$apihub_specs_dir" ]; then - echo "please define an apihub specs directory..." - usage - exit 1 -fi - -docker run \ - -v $apihub_specs_dir:/input \ - -v ${PWD}/src/simcore_service_director/api/v0:/output \ - itisfoundation/oas_resolver \ - /input/director/v0/openapi.yaml \ - /output/openapi.yaml diff --git a/services/director/tests/conftest.py b/services/director/tests/conftest.py deleted file mode 100644 index 229960559d0..00000000000 --- a/services/director/tests/conftest.py +++ /dev/null @@ -1,116 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=unused-import -# pylint: disable=bare-except -# pylint: disable=redefined-outer-name - -import os -from pathlib import Path - -import pytest -import simcore_service_director -from aiohttp import ClientSession -from simcore_service_director import config, resources - -# NOTE: that all the changes in these pytest-plugins MUST by py3.6 compatible! -pytest_plugins = [ - "fixtures.fake_services", - "pytest_simcore.docker_compose", - "pytest_simcore.docker_registry", - "pytest_simcore.docker_swarm", - "pytest_simcore.repository_paths", - "pytest_simcore.tmp_path_extra", - "pytest_simcore.pytest_global_environs", -] - - -@pytest.fixture -def configure_swarm_stack_name(): - config.SWARM_STACK_NAME = "test_stack" - - -@pytest.fixture(scope="session") -def common_schemas_specs_dir(osparc_simcore_root_dir): - specs_dir = osparc_simcore_root_dir / "api" / "specs" / "common" / "schemas" - assert specs_dir.exists() - return specs_dir - - -@pytest.fixture(scope="session") -def package_dir(): - dirpath = Path(simcore_service_director.__file__).resolve().parent - assert dirpath.exists() - return dirpath - - -@pytest.fixture -def configure_schemas_location(package_dir, common_schemas_specs_dir): - config.NODE_SCHEMA_LOCATION = str( - common_schemas_specs_dir / "node-meta-v0.0.1.json" - ) - resources.RESOURCE_NODE_SCHEMA = os.path.relpath( - config.NODE_SCHEMA_LOCATION, package_dir - ) - - -@pytest.fixture -def configure_registry_access(docker_registry): - config.REGISTRY_URL = docker_registry - config.REGISTRY_PATH = docker_registry - config.REGISTRY_SSL = False - config.DIRECTOR_REGISTRY_CACHING = False - - -@pytest.fixture -def user_id(): - yield "some_user_id" - - -@pytest.fixture -def project_id(): - yield "some_project_id" - - -def pytest_addoption(parser): - parser.addoption("--registry_url", action="store", default="default url") - parser.addoption("--registry_user", action="store", default="default user") - parser.addoption("--registry_pw", action="store", default="default pw") - - -@pytest.fixture(scope="session") -def configure_custom_registry(pytestconfig): - # to set these values call - # pytest --registry_url myregistry --registry_user username --registry_pw password - config.REGISTRY_URL = pytestconfig.getoption("registry_url") - config.REGISTRY_AUTH = True - config.REGISTRY_USER = pytestconfig.getoption("registry_user") - config.REGISTRY_PW = pytestconfig.getoption("registry_pw") - config.DIRECTOR_REGISTRY_CACHING = False - - -@pytest.fixture -async def aiohttp_mock_app(loop, mocker): - print("client session started ...") - session = ClientSession() - - mock_app_storage = { - config.APP_CLIENT_SESSION_KEY: session, - config.APP_REGISTRY_CACHE_DATA_KEY: {}, - } - - def _get_item(self, key): - return mock_app_storage[key] - - aiohttp_app = mocker.patch("aiohttp.web.Application") - aiohttp_app.__getitem__ = _get_item - - yield aiohttp_app - - # cleanup session - await session.close() - print("client session closed") - - -@pytest.fixture -def api_version_prefix() -> str: - assert "v0" in resources.listdir(resources.RESOURCE_OPENAPI_ROOT) - return "v0" diff --git a/services/director/tests/fixtures/fake_services.py b/services/director/tests/fixtures/fake_services.py deleted file mode 100644 index e58f547f729..00000000000 --- a/services/director/tests/fixtures/fake_services.py +++ /dev/null @@ -1,242 +0,0 @@ -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name - - -import asyncio -import json -import logging -import random -from io import BytesIO -from pathlib import Path - -import pytest -import requests -from aiodocker import utils -from aiodocker.docker import Docker -from aiodocker.exceptions import DockerError -from simcore_service_director.config import DEFAULT_MAX_MEMORY, DEFAULT_MAX_NANO_CPUS - -_logger = logging.getLogger(__name__) - - -@pytest.fixture(scope="function") -def push_services(docker_registry, tmpdir): - registry_url = docker_registry - tmp_dir = Path(tmpdir) - - list_of_pushed_images_tags = [] - dependent_images = [] - - async def build_push_images( - number_of_computational_services, - number_of_interactive_services, - inter_dependent_services=False, - bad_json_format=False, - version="1.0.", - ): - try: - dependent_image = None - if inter_dependent_services: - dependent_image = await _build_push_image( - tmp_dir, - registry_url, - "computational", - "dependency", - "10.52.999999", - None, - bad_json_format=bad_json_format, - ) - dependent_images.append(dependent_image) - - images_to_build = [] - - for image_index in range(0, number_of_computational_services): - images_to_build.append( - _build_push_image( - tmp_dir, - registry_url, - "computational", - "test", - version + str(image_index), - dependent_image, - bad_json_format=bad_json_format, - ) - ) - - for image_index in range(0, number_of_interactive_services): - images_to_build.append( - _build_push_image( - tmp_dir, - registry_url, - "dynamic", - "test", - version + str(image_index), - dependent_image, - bad_json_format=bad_json_format, - ) - ) - results = await asyncio.gather(*images_to_build) - list_of_pushed_images_tags.extend(results) - except DockerError: - _logger.exception("Unexpected docker API error") - raise - - return list_of_pushed_images_tags - - yield build_push_images - _logger.info("clean registry") - _clean_registry(registry_url, list_of_pushed_images_tags) - _clean_registry(registry_url, dependent_images) - - -async def _build_push_image( - docker_dir, - registry_url, - service_type, - name, - tag, - dependent_image=None, - *, - bad_json_format=False, -): # pylint: disable=R0913 - - # crate image - service_description = _create_service_description(service_type, name, tag) - docker_labels = _create_docker_labels(service_description, bad_json_format) - additional_docker_labels = [ - {"name": "constraints", "type": "string", "value": ["node.role==manager"]} - ] - - internal_port = None - entry_point = "" - if service_type == "dynamic": - internal_port = random.randint(1, 65535) - additional_docker_labels.append( - {"name": "ports", "type": "int", "value": internal_port} - ) - entry_point = "/test/entry_point" - docker_labels["simcore.service.bootsettings"] = json.dumps( - [{"name": "entry_point", "type": "string", "value": entry_point}] - ) - docker_labels["simcore.service.settings"] = json.dumps(additional_docker_labels) - if bad_json_format: - docker_labels["simcore.service.settings"] = ( - "'fjks" + docker_labels["simcore.service.settings"] - ) - - if dependent_image is not None: - dependent_description = dependent_image["service_description"] - dependency_docker_labels = [ - { - "key": dependent_description["key"], - "tag": dependent_description["version"], - } - ] - docker_labels["simcore.service.dependencies"] = json.dumps( - dependency_docker_labels - ) - if bad_json_format: - docker_labels["simcore.service.dependencies"] = ( - "'fjks" + docker_labels["simcore.service.dependencies"] - ) - - # create the typical org.label-schema labels - service_extras = { - "node_requirements": { - "CPU": DEFAULT_MAX_NANO_CPUS / 1e9, - "RAM": DEFAULT_MAX_MEMORY, - }, - "build_date": "2020-08-19T15:36:27Z", - "vcs_ref": "ca180ef1", - "vcs_url": "git@github.com:ITISFoundation/osparc-simcore.git", - } - docker_labels["org.label-schema.build-date"] = service_extras["build_date"] - docker_labels["org.label-schema.schema-version"] = "1.0" - docker_labels["org.label-schema.vcs-ref"] = service_extras["vcs_ref"] - docker_labels["org.label-schema.vcs-url"] = service_extras["vcs_url"] - - image_tag = registry_url + "/{key}:{version}".format( - key=service_description["key"], version=tag - ) - await _create_base_image(docker_labels, image_tag) - - # push image to registry - docker = Docker() - await docker.images.push(image_tag) - await docker.close() - # remove image from host - # docker.images.remove(image_tag) - return { - "service_description": service_description, - "docker_labels": docker_labels, - "image_path": image_tag, - "internal_port": internal_port, - "entry_point": entry_point, - "service_extras": service_extras, - } - - -def _clean_registry(registry_url, list_of_images): - request_headers = {"accept": "application/vnd.docker.distribution.manifest.v2+json"} - for image in list_of_images: - service_description = image["service_description"] - # get the image digest - tag = service_description["version"] - url = "http://{host}/v2/{name}/manifests/{tag}".format( - host=registry_url, name=service_description["key"], tag=tag - ) - response = requests.get(url, headers=request_headers) - docker_content_digest = response.headers["Docker-Content-Digest"] - # remove the image from the registry - url = "http://{host}/v2/{name}/manifests/{digest}".format( - host=registry_url, - name=service_description["key"], - digest=docker_content_digest, - ) - response = requests.delete(url, headers=request_headers) - - -async def _create_base_image(labels, tag): - dockerfile = """ -FROM alpine -CMD while true; do sleep 10; done - """ - f = BytesIO(dockerfile.encode("utf-8")) - tar_obj = utils.mktar_from_dockerfile(f) - - # build docker base image - docker = Docker() - base_docker_image = await docker.images.build( - fileobj=tar_obj, encoding="gzip", rm=True, labels=labels, tag=tag - ) - await docker.close() - return base_docker_image[0] - - -def _create_service_description(service_type, name, tag): - file_name = "dummy_service_description-v1.json" - dummy_description_path = Path(__file__).parent / file_name - with dummy_description_path.open() as file_pt: - service_desc = json.load(file_pt) - - if service_type == "computational": - service_key_type = "comp" - elif service_type == "dynamic": - service_key_type = "dynamic" - service_desc["key"] = "simcore/services/" + service_key_type + "/" + name - service_desc["version"] = tag - service_desc["type"] = service_type - - return service_desc - - -def _create_docker_labels(service_description, bad_json_format): - docker_labels = {} - for key, value in service_description.items(): - docker_labels[".".join(["io", "simcore", key])] = json.dumps({key: value}) - if bad_json_format: - docker_labels[".".join(["io", "simcore", key])] = ( - "d32;'" + docker_labels[".".join(["io", "simcore", key])] - ) - - return docker_labels diff --git a/services/director/tests/helpers/json_schema_validator.py b/services/director/tests/helpers/json_schema_validator.py deleted file mode 100644 index 25088a192b8..00000000000 --- a/services/director/tests/helpers/json_schema_validator.py +++ /dev/null @@ -1,29 +0,0 @@ -import json -import logging -from pathlib import Path - -# NOTE: currently uses draft04 version -from jsonschema import SchemaError, ValidationError, validate - -_logger = logging.getLogger(__name__) - - -def validate_instance_object(json_instance: dict, json_schema: dict): - try: - validate(json_instance, json_schema) - except ValidationError: - _logger.exception("Node validation error:") - raise - except SchemaError: - _logger.exception("Schema validation error:") - raise - - -def validate_instance_path(json_instance: Path, json_schema: Path): - with json_instance.open() as file_pointer: - instance = json.load(file_pointer) - - with json_schema.open() as file_pointer: - schema = json.load(file_pointer) - - validate_instance_object(instance, schema) diff --git a/services/director/tests/test_docker_utils.py b/services/director/tests/test_docker_utils.py deleted file mode 100644 index f6cce146e4b..00000000000 --- a/services/director/tests/test_docker_utils.py +++ /dev/null @@ -1,67 +0,0 @@ -# pylint:disable=unused-variable -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -# pylint:disable=too-many-arguments -# pylint: disable=not-async-context-manager -from asyncio import sleep - -import pytest -from aiodocker.exceptions import DockerError -from simcore_service_director import docker_utils - - -async def test_docker_client(): - async with docker_utils.docker_client() as client: - await client.images.pull("alpine:latest") - container = await client.containers.create_or_replace( - config={ - "Cmd": ["/bin/ash", "-c", 'echo "hello world"'], - "Image": "alpine:latest", - }, - name="testing", - ) - await container.start() - await sleep(5) - logs = await container.log(stdout=True) - assert ( - "".join(logs) - ) == "hello world\n", f"running containers {client.containers.list()}" - await container.delete(force=True) - - -@pytest.mark.parametrize( - "fct", - [ - (docker_utils.swarm_get_number_nodes), - (docker_utils.swarm_has_manager_nodes), - (docker_utils.swarm_has_worker_nodes), - ], -) -async def test_swarm_method_with_no_swarm(fct): - # if this fails on your development machine run - # `docker swarm leave --force` to leave the swarm - with pytest.raises(DockerError): - await fct() - - -async def test_swarm_get_number_nodes(docker_swarm): - num_nodes = await docker_utils.swarm_get_number_nodes() - assert num_nodes == 1 - - -async def test_swarm_has_manager_nodes(docker_swarm): - assert (await docker_utils.swarm_has_manager_nodes()) == True - - -async def test_swarm_has_worker_nodes(docker_swarm): - assert (await docker_utils.swarm_has_worker_nodes()) == False - - -async def test_push_services( - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=3, number_of_interactive_services=3 - ) diff --git a/services/director/tests/test_dummy_services.py b/services/director/tests/test_dummy_services.py deleted file mode 100644 index f38cb848b22..00000000000 --- a/services/director/tests/test_dummy_services.py +++ /dev/null @@ -1,24 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=unused-import -# pylint: disable=bare-except -# pylint:disable=redefined-outer-name - -import pytest -import json -import logging -from helpers import json_schema_validator - -log = logging.getLogger(__name__) - - -async def test_services_conformity(configure_schemas_location, push_services): - from simcore_service_director import resources - - services = await push_services(1, 1) - with resources.stream(resources.RESOURCE_NODE_SCHEMA) as file_pt: - service_schema = json.load(file_pt) - for service in services: - # validate service - json_schema_validator.validate_instance_object( - service["service_description"], service_schema - ) diff --git a/services/director/tests/test_handlers.py b/services/director/tests/test_handlers.py deleted file mode 100644 index 43c1395cadf..00000000000 --- a/services/director/tests/test_handlers.py +++ /dev/null @@ -1,536 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=unused-import -# pylint: disable=bare-except -# pylint: disable=redefined-outer-name -# pylint: disable=R0915 -# pylint: disable=too-many-arguments - -import json -import uuid -from typing import Optional -from urllib.parse import quote - -import pytest -from aioresponses.core import CallbackResult, aioresponses -from helpers import json_schema_validator -from servicelib.rest_responses import ( # pylint: disable=no-name-in-module - unwrap_envelope, -) -from simcore_service_director import main, resources, rest - - -@pytest.fixture -def client( - loop, - aiohttp_client, - aiohttp_unused_port, - configure_schemas_location, - configure_registry_access, -): - app = main.setup_app() - server_kwargs = {"port": aiohttp_unused_port(), "host": "localhost"} - client = loop.run_until_complete(aiohttp_client(app, server_kwargs=server_kwargs)) - return client - - -async def test_root_get(client, api_version_prefix): - web_response = await client.get(f"/{api_version_prefix}/") - assert web_response.content_type == "application/json" - assert web_response.status == 200 - healthcheck_enveloped = await web_response.json() - assert "data" in healthcheck_enveloped - - assert isinstance(healthcheck_enveloped["data"], dict) - - healthcheck = healthcheck_enveloped["data"] - assert healthcheck["name"] == "simcore-service-director" - assert healthcheck["status"] == "SERVICE_RUNNING" - assert healthcheck["version"] == "0.1.0" - assert healthcheck["api_version"] == "0.1.0" - - -def _check_services(created_services, services, schema_version="v1"): - assert len(created_services) == len(services) - - created_service_descriptions = [x["service_description"] for x in created_services] - - json_schema_path = resources.get_path(resources.RESOURCE_NODE_SCHEMA) - assert json_schema_path.exists() == True - with json_schema_path.open() as file_pt: - service_schema = json.load(file_pt) - - for service in services: - if schema_version == "v1": - assert created_service_descriptions.count(service) == 1 - json_schema_validator.validate_instance_object(service, service_schema) - - -async def test_services_get(docker_registry, client, push_services, api_version_prefix): - # empty case - web_response = await client.get(f"/{api_version_prefix}/services") - assert web_response.status == 200 - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - _check_services([], services) - - # some services - created_services = await push_services(3, 2) - web_response = await client.get(f"/{api_version_prefix}/services") - assert web_response.status == 200 - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - _check_services(created_services, services) - - web_response = await client.get( - f"/{api_version_prefix}/services?service_type=blahblah" - ) - assert web_response.status == 400 - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - assert not "data" in services_enveloped - assert "error" in services_enveloped - - web_response = await client.get( - f"/{api_version_prefix}/services?service_type=computational" - ) - assert web_response.status == 200 - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - assert len(services) == 3 - - web_response = await client.get( - f"/{api_version_prefix}/services?service_type=interactive" - ) - assert web_response.status == 200 - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - assert len(services) == 2 - - -async def test_services_by_key_version_get( - client, push_services, api_version_prefix -): # pylint: disable=W0613, W0621 - web_response = await client.get( - f"/{api_version_prefix}/services/whatever/someversion" - ) - assert web_response.status == 400 - web_response = await client.get( - f"/{api_version_prefix}/services/simcore/services/dynamic/something/someversion" - ) - assert web_response.status == 404 - web_response = await client.get( - f"/{api_version_prefix}/services/simcore/services/dynamic/something/1.5.2" - ) - assert web_response.status == 404 - - created_services = await push_services(3, 2) - assert len(created_services) == 5 - - retrieved_services = [] - for created_service in created_services: - service_description = created_service["service_description"] - # note that it is very important to remove the safe="/" from quote!!!! - key, version = [ - quote(service_description[key], safe="") for key in ("key", "version") - ] - url = f"/{api_version_prefix}/services/{key}/{version}" - web_response = await client.get(url) - - assert ( - web_response.status == 200 - ), await web_response.text() # here the error is actually json. - assert web_response.content_type == "application/json" - services_enveloped = await web_response.json() - - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - assert len(services) == 1 - retrieved_services.append(services[0]) - _check_services(created_services, retrieved_services) - - -async def test_get_service_labels( - client, push_services, api_version_prefix -): # pylint: disable=W0613, W0621 - created_services = await push_services(3, 2) - - for service in created_services: - service_description = service["service_description"] - # note that it is very important to remove the safe="/" from quote!!!! - key, version = [ - quote(service_description[key], safe="") for key in ("key", "version") - ] - url = f"/{api_version_prefix}/services/{key}/{version}/labels" - web_response = await client.get(url) - assert web_response.status == 200, await web_response.text() - - services_enveloped = await web_response.json() - labels = services_enveloped["data"] - - assert service["docker_labels"] == labels - - -async def test_services_extras_by_key_version_get( - client, push_services, api_version_prefix -): # pylint: disable=W0613, W0621 - web_response = await client.get( - f"/{api_version_prefix}/service_extras/whatever/someversion" - ) - assert web_response.status == 400 - web_response = await client.get( - f"/{api_version_prefix}/service_extras/simcore/services/dynamic/something/someversion" - ) - assert web_response.status == 404 - web_response = await client.get( - f"/{api_version_prefix}/service_extras/simcore/services/dynamic/something/1.5.2" - ) - assert web_response.status == 404 - - created_services = await push_services(3, 2) - assert len(created_services) == 5 - - for created_service in created_services: - service_description = created_service["service_description"] - # note that it is very important to remove the safe="/" from quote!!!! - key, version = [ - quote(service_description[key], safe="") for key in ("key", "version") - ] - url = f"/{api_version_prefix}/service_extras/{key}/{version}" - web_response = await client.get(url) - - assert ( - web_response.status == 200 - ), await web_response.text() # here the error is actually json. - assert web_response.content_type == "application/json" - service_extras_enveloped = await web_response.json() - - assert isinstance(service_extras_enveloped["data"], dict) - service_extras = service_extras_enveloped["data"] - assert created_service["service_extras"] == service_extras - - -async def _start_get_stop_services( - client, - push_services, - user_id, - project_id, - api_version_prefix: str, - save_state: Optional[bool], - expected_save_state_call: bool, - mocker, -): - params = {} - web_response = await client.post( - f"/{api_version_prefix}/running_interactive_services", params=params - ) - assert web_response.status == 400 - - params = { - "user_id": "None", - "project_id": "None", - "service_uuid": "sdlfkj4", - "service_key": "None", - "service_tag": "None", # optional - "service_basepath": "None", # optional - } - web_response = await client.post( - f"/{api_version_prefix}/running_interactive_services", params=params - ) - data = await web_response.json() - assert web_response.status == 400, data - - params["service_key"] = "simcore/services/comp/somfunkyname-nhsd" - params["service_tag"] = "1.2.3" - web_response = await client.post( - f"/{api_version_prefix}/running_interactive_services", params=params - ) - data = await web_response.json() - assert web_response.status == 404, data - - created_services = await push_services(0, 2) - assert len(created_services) == 2 - for created_service in created_services: - service_description = created_service["service_description"] - params["user_id"] = user_id - params["project_id"] = project_id - params["service_key"] = service_description["key"] - params["service_tag"] = service_description["version"] - service_port = created_service["internal_port"] - service_entry_point = created_service["entry_point"] - params["service_basepath"] = "/i/am/a/basepath" - params["service_uuid"] = str(uuid.uuid4()) - # start the service - web_response = await client.post( - f"/{api_version_prefix}/running_interactive_services", params=params - ) - assert web_response.status == 201 - assert web_response.content_type == "application/json" - running_service_enveloped = await web_response.json() - assert isinstance(running_service_enveloped["data"], dict) - assert all( - k in running_service_enveloped["data"] - for k in [ - "service_uuid", - "service_key", - "service_version", - "published_port", - "entry_point", - "service_host", - "service_port", - "service_basepath", - ] - ) - assert ( - running_service_enveloped["data"]["service_uuid"] == params["service_uuid"] - ) - assert running_service_enveloped["data"]["service_key"] == params["service_key"] - assert ( - running_service_enveloped["data"]["service_version"] - == params["service_tag"] - ) - assert running_service_enveloped["data"]["service_port"] == service_port - service_published_port = running_service_enveloped["data"]["published_port"] - assert not service_published_port - assert service_entry_point == running_service_enveloped["data"]["entry_point"] - service_host = running_service_enveloped["data"]["service_host"] - assert service_host == f"test_{params['service_uuid']}" - service_basepath = running_service_enveloped["data"]["service_basepath"] - assert service_basepath == params["service_basepath"] - - # get the service - web_response = await client.request( - "GET", - f"/{api_version_prefix}/running_interactive_services/{params['service_uuid']}", - ) - assert web_response.status == 200 - text = await web_response.text() - assert web_response.content_type == "application/json", text - running_service_enveloped = await web_response.json() - assert isinstance(running_service_enveloped["data"], dict) - assert all( - k in running_service_enveloped["data"] - for k in [ - "service_uuid", - "service_key", - "service_version", - "published_port", - "entry_point", - ] - ) - assert ( - running_service_enveloped["data"]["service_uuid"] == params["service_uuid"] - ) - assert running_service_enveloped["data"]["service_key"] == params["service_key"] - assert ( - running_service_enveloped["data"]["service_version"] - == params["service_tag"] - ) - assert ( - running_service_enveloped["data"]["published_port"] - == service_published_port - ) - assert running_service_enveloped["data"]["entry_point"] == service_entry_point - assert running_service_enveloped["data"]["service_host"] == service_host - assert running_service_enveloped["data"]["service_port"] == service_port - assert running_service_enveloped["data"]["service_basepath"] == service_basepath - - # stop the service - query_params = {} - if save_state: - query_params.update({"save_state": "true" if save_state else "false"}) - - mocked_save_state_cb = mocker.MagicMock( - return_value=CallbackResult(status=200, payload={}) - ) - PASSTHROUGH_REQUESTS_PREFIXES = [ - "http://127.0.0.1", - "http://localhost", - "unix://", # docker engine - "ws://", # websockets - ] - with aioresponses(passthrough=PASSTHROUGH_REQUESTS_PREFIXES) as mock: - - # POST /http://service_host:service_port service_basepath/state ------------------------------------------------- - mock.post( - f"http://{service_host}:{service_port}{service_basepath}/state", - status=200, - callback=mocked_save_state_cb, - ) - web_response = await client.delete( - f"/{api_version_prefix}/running_interactive_services/{params['service_uuid']}", - params=query_params, - ) - if expected_save_state_call: - mocked_save_state_cb.assert_called_once() - - text = await web_response.text() - assert web_response.status == 204, text - assert web_response.content_type == "application/json" - data = await web_response.json() - assert data is None - - -@pytest.mark.skip( - reason="docker_swarm fixture is a session fixture making it bad running together with other tests that require a swarm" -) -async def test_running_services_post_and_delete_no_swarm( - configure_swarm_stack_name, - client, - push_services, - user_id, - project_id, - api_version_prefix, -): - params = { - "user_id": "None", - "project_id": "None", - "service_uuid": "sdlfkj4", - "service_key": "simcore/services/comp/some-key", - } - web_response = await client.post( - f"/{api_version_prefix}/running_interactive_services", params=params - ) - data = await web_response.json() - assert web_response.status == 500, data - - -@pytest.mark.parametrize( - "save_state, expected_save_state_call", [(True, True), (False, False), (None, True)] -) -async def test_running_services_post_and_delete( - configure_swarm_stack_name, - client, - push_services, - docker_swarm, - user_id, - project_id, - api_version_prefix, - save_state: Optional[bool], - expected_save_state_call: bool, - mocker, -): - await _start_get_stop_services( - client, - push_services, - user_id, - project_id, - api_version_prefix, - save_state, - expected_save_state_call, - mocker, - ) - - -async def test_running_interactive_services_list_get( - client, push_services, docker_swarm -): - """Test case for running_interactive_services_list_get - - Returns a list of interactive services - """ - user_ids = ["first_user_id", "second_user_id"] - project_ids = ["first_project_id", "second_project_id", "third_project_id"] - # prepare services - NUM_SERVICES = 1 - created_services = await push_services(0, NUM_SERVICES) - assert len(created_services) == NUM_SERVICES - # start the services - for user_id in user_ids: - for project_id in project_ids: - for created_service in created_services: - service_description = created_service["service_description"] - params = {} - params["user_id"] = user_id - params["project_id"] = project_id - params["service_key"] = service_description["key"] - params["service_tag"] = service_description["version"] - params["service_uuid"] = str(uuid.uuid4()) - # start the service - web_response = await client.post( - "/v0/running_interactive_services", params=params - ) - assert web_response.status == 201 - # get the list of services - for user_id in user_ids: - for project_id in project_ids: - params = {} - # list by user_id - params["user_id"] = user_id - response = await client.get( - path="/v0/running_interactive_services", params=params - ) - assert response.status == 200, "Response body is : " + ( - await response.read() - ).decode("utf-8") - data, error = unwrap_envelope(await response.json()) - assert data - assert not error - services_list = data - assert len(services_list) == len(project_ids) * NUM_SERVICES - # list by user_id and project_id - params["project_id"] = project_id - response = await client.get( - path="/v0/running_interactive_services", params=params - ) - assert response.status == 200, "Response body is : " + ( - await response.read() - ).decode("utf-8") - data, error = unwrap_envelope(await response.json()) - assert data - assert not error - services_list = data - assert len(services_list) == NUM_SERVICES - # list by project_id - params = {} - params["project_id"] = project_id - response = await client.get( - path="/v0/running_interactive_services", params=params - ) - assert response.status == 200, "Response body is : " + ( - await response.read() - ).decode("utf-8") - data, error = unwrap_envelope(await response.json()) - assert data - assert not error - services_list = data - assert len(services_list) == len(user_ids) * NUM_SERVICES - - -@pytest.mark.skip(reason="test needs credentials to real registry") -async def test_performance_get_services( - loop, configure_custom_registry, configure_schemas_location -): - import time - - fake_request = "fake request" - start_time = time.perf_counter() - number_of_calls = 1 - number_of_services = 0 - for i in range(number_of_calls): - print("calling iteration", i) - start_time_i = time.perf_counter() - web_response = await rest.handlers.services_get(fake_request) - assert web_response.status == 200 - assert web_response.content_type == "application/json" - services_enveloped = json.loads(web_response.text) - assert isinstance(services_enveloped["data"], list) - services = services_enveloped["data"] - number_of_services = len(services) - print("iteration completed in", (time.perf_counter() - start_time_i), "s") - stop_time = time.perf_counter() - print( - "Time to run {} times: {}s, #services {}, time per call {}s/service".format( - number_of_calls, - stop_time - start_time, - number_of_services, - (stop_time - start_time) / number_of_calls / number_of_services, - ) - ) diff --git a/services/director/tests/test_json_schemas.py b/services/director/tests/test_json_schemas.py deleted file mode 100644 index 6a45b1d0740..00000000000 --- a/services/director/tests/test_json_schemas.py +++ /dev/null @@ -1,30 +0,0 @@ -import json -from pathlib import Path - -import pytest -from jsonschema import SchemaError, ValidationError, validate - -from simcore_service_director import resources - -API_VERSIONS = resources.listdir(resources.RESOURCE_OPENAPI_ROOT) - - -def validate_individual_schemas(list_of_paths): - for spec_file_path in list_of_paths: - assert spec_file_path.exists() - with spec_file_path.open() as file_ptr: - schema_specs = json.load(file_ptr) - try: - dummy_instance = {} - with pytest.raises(ValidationError): - validate(dummy_instance, schema_specs) - except SchemaError as err: - pytest.fail(err.message) - - -@pytest.mark.parametrize("version", API_VERSIONS) -def test_valid_individual_json_schemas_specs(version): - name = f"{resources.RESOURCE_OPENAPI_ROOT}/{version}/schemas" - schemas_folder_path = resources.get_path(name) - - validate_individual_schemas(Path(schemas_folder_path).rglob("*.json")) diff --git a/services/director/tests/test_oas.py b/services/director/tests/test_oas.py deleted file mode 100644 index 86898604fa4..00000000000 --- a/services/director/tests/test_oas.py +++ /dev/null @@ -1,30 +0,0 @@ -# pylint: disable=redefined-outer-name - -import pytest -import yaml -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError - -from simcore_service_director import resources - - -def test_openapi_specs(): - openapi_path = resources.get_path(resources.RESOURCE_OPEN_API) - with resources.stream(resources.RESOURCE_OPEN_API) as fh: - specs = yaml.safe_load(fh) - try: - validate_spec(specs, spec_url=openapi_path.as_uri()) - except OpenAPIValidationError as err: - pytest.fail(err.message) - - -def test_server_specs(): - with resources.stream(resources.RESOURCE_OPEN_API) as fh: - specs = yaml.safe_load(fh) - - # client-sdk current limitation - # - hooks to first server listed in oas - default_server = specs["servers"][0] - assert ( - default_server["url"] == "http://{host}:{port}/{version}" - ), "Invalid convention" diff --git a/services/director/tests/test_openapi.py b/services/director/tests/test_openapi.py deleted file mode 100644 index 36b25d16073..00000000000 --- a/services/director/tests/test_openapi.py +++ /dev/null @@ -1,25 +0,0 @@ -from pathlib import Path - -import pkg_resources -import pytest -import simcore_service_director -import yaml -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError -from simcore_service_director.resources import RESOURCE_OPEN_API - - -def test_specifications(): - # pylint: disable=no-value-for-parameter - spec_path = Path( - pkg_resources.resource_filename( - simcore_service_director.__name__, RESOURCE_OPEN_API - ) - ) - - with spec_path.open() as fh: - specs = yaml.safe_load(fh) - try: - validate_spec(specs, spec_url=spec_path.as_uri()) - except OpenAPIValidationError as err: - pytest.fail(err.message) diff --git a/services/director/tests/test_openapi_schemas.py b/services/director/tests/test_openapi_schemas.py deleted file mode 100644 index 7849534fbcf..00000000000 --- a/services/director/tests/test_openapi_schemas.py +++ /dev/null @@ -1,70 +0,0 @@ -from pathlib import Path - -import pytest -import yaml - -from openapi_spec_validator import validate_spec -from openapi_spec_validator.exceptions import OpenAPIValidationError - -from simcore_service_director import resources - -API_VERSIONS = resources.listdir(resources.RESOURCE_OPENAPI_ROOT) - - -def correct_schema_local_references(schema_specs): - for key, value in schema_specs.items(): - if isinstance(value, dict): - correct_schema_local_references(value) - elif "$ref" in key: - if str(value).startswith("#/"): - # correct the reference - new_value = str(value).replace("#/", "#/components/schemas/") - schema_specs[key] = new_value - - -def add_namespace_for_converted_schemas(schema_specs): - # schemas converted from jsonschema do not have an overarching namespace. - # the openapi validator does not like this - # we use the jsonschema title to create a fake namespace - fake_schema_specs = {"FakeName": schema_specs} - return fake_schema_specs - - -def validate_individual_schemas(list_of_paths): - fake_openapi_headers = { - "openapi": "3.0.0", - "info": { - "title": "An include file to define sortable attributes", - "version": "1.0.0", - }, - "paths": {}, - "components": {"parameters": {}, "schemas": {}}, - } - - for spec_file_path in list_of_paths: - assert spec_file_path.exists() - # only consider schemas - if not "openapi.yaml" in str(spec_file_path.name) and "schemas" in str( - spec_file_path - ): - with spec_file_path.open() as file_ptr: - schema_specs = yaml.safe_load(file_ptr) - # correct local references - correct_schema_local_references(schema_specs) - if str(spec_file_path).endswith("-converted.yaml"): - schema_specs = add_namespace_for_converted_schemas(schema_specs) - fake_openapi_headers["components"]["schemas"] = schema_specs - try: - validate_spec(fake_openapi_headers, spec_url=spec_file_path.as_uri()) - except OpenAPIValidationError as err: - pytest.fail(err.message) - - -@pytest.mark.parametrize("version", API_VERSIONS) -def test_valid_individual_openapi_schemas_specs(version): - name = "{root}/{version}/schemas".format( - root=resources.RESOURCE_OPENAPI_ROOT, version=version - ) - schemas_folder_path = resources.get_path(name) - validate_individual_schemas(Path(schemas_folder_path).rglob("*.yaml")) - validate_individual_schemas(Path(schemas_folder_path).rglob("*.yml")) diff --git a/services/director/tests/test_producer.py b/services/director/tests/test_producer.py deleted file mode 100644 index 675aeece000..00000000000 --- a/services/director/tests/test_producer.py +++ /dev/null @@ -1,370 +0,0 @@ -# pylint:disable=protected-access -# pylint:disable=redefined-outer-name -# pylint:disable=too-many-arguments -# pylint:disable=unused-argument -# pylint:disable=unused-variable - -import json -import uuid -from dataclasses import dataclass -from typing import Callable - -import docker -import pytest -from simcore_service_director import config, exceptions, producer -from tenacity import Retrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - - -@pytest.fixture -def ensure_service_runs_in_ci(monkeypatch): - monkeypatch.setattr(config, "DEFAULT_MAX_MEMORY", int(25 * pow(1024, 2))) - monkeypatch.setattr(config, "DEFAULT_MAX_NANO_CPUS", int(0.01 * pow(10, 9))) - - -@pytest.fixture -async def run_services( - ensure_service_runs_in_ci, - aiohttp_mock_app, - configure_registry_access, - configure_schemas_location, - push_services, - docker_swarm, - user_id, - project_id, - docker_client: docker.client.DockerClient, -) -> Callable: - started_services = [] - - async def push_start_services(number_comp: int, number_dyn: int, dependant=False): - pushed_services = await push_services( - number_comp, number_dyn, inter_dependent_services=dependant - ) - assert len(pushed_services) == (number_comp + number_dyn) - for pushed_service in pushed_services: - service_description = pushed_service["service_description"] - service_key = service_description["key"] - service_version = service_description["version"] - service_port = pushed_service["internal_port"] - service_entry_point = pushed_service["entry_point"] - service_uuid = str(uuid.uuid1()) - service_basepath = "/my/base/path" - with pytest.raises(exceptions.ServiceUUIDNotFoundError): - await producer.get_service_details(aiohttp_mock_app, service_uuid) - # start the service - started_service = await producer.start_service( - aiohttp_mock_app, - user_id, - project_id, - service_key, - service_version, - service_uuid, - service_basepath, - ) - assert "published_port" in started_service - if service_description["type"] == "dynamic": - assert not started_service["published_port"] - assert "entry_point" in started_service - assert started_service["entry_point"] == service_entry_point - assert "service_uuid" in started_service - assert started_service["service_uuid"] == service_uuid - assert "service_key" in started_service - assert started_service["service_key"] == service_key - assert "service_version" in started_service - assert started_service["service_version"] == service_version - assert "service_port" in started_service - assert started_service["service_port"] == service_port - assert "service_host" in started_service - assert service_uuid in started_service["service_host"] - assert "service_basepath" in started_service - assert started_service["service_basepath"] == service_basepath - assert "service_state" in started_service - assert "service_message" in started_service - - # wait for service to be running - node_details = await producer.get_service_details( - aiohttp_mock_app, service_uuid - ) - max_time = 60 - for attempt in Retrying( - wait=wait_fixed(1), stop=stop_after_delay(max_time), reraise=True - ): - with attempt: - print( - f"--> waiting for {started_service['service_key']}:{started_service['service_version']} to run..." - ) - node_details = await producer.get_service_details( - aiohttp_mock_app, service_uuid - ) - print( - f"<-- {started_service['service_key']}:{started_service['service_version']} state is {node_details['service_state']} using {config.DEFAULT_MAX_MEMORY}Bytes, {config.DEFAULT_MAX_NANO_CPUS}nanocpus" - ) - for service in docker_client.services.list(): - tasks = service.tasks() - print( - f"service details {service.id}:{service.name}: {json.dumps( tasks, indent=2)}" - ) - assert ( - node_details["service_state"] == "running" - ), f"current state is {node_details['service_state']}" - - started_service["service_state"] = node_details["service_state"] - started_service["service_message"] = node_details["service_message"] - assert node_details == started_service - started_services.append(started_service) - return started_services - - yield push_start_services - # teardown stop the services - for service in started_services: - service_uuid = service["service_uuid"] - # NOTE: Fake services are not even web-services therefore we cannot - # even emulate a legacy dy-service that does not implement a save-state feature - # so here we must make save_state=False - await producer.stop_service(aiohttp_mock_app, service_uuid, save_state=False) - with pytest.raises(exceptions.ServiceUUIDNotFoundError): - await producer.get_service_details(aiohttp_mock_app, service_uuid) - - -async def test_find_service_tag(): - my_service_key = "myservice-key" - list_of_images = { - my_service_key: [ - "2.4.0", - "2.11.0", - "2.8.0", - "1.2.1", - "some wrong value", - "latest", - "1.2.0", - "1.2.3", - ] - } - with pytest.raises(exceptions.ServiceNotAvailableError): - await producer._find_service_tag(list_of_images, "some_wrong_key", None) - with pytest.raises(exceptions.ServiceNotAvailableError): - await producer._find_service_tag( - list_of_images, my_service_key, "some wrong key" - ) - # get the latest (e.g. 2.11.0) - latest_version = await producer._find_service_tag( - list_of_images, my_service_key, None - ) - assert latest_version == "2.11.0" - latest_version = await producer._find_service_tag( - list_of_images, my_service_key, "latest" - ) - assert latest_version == "2.11.0" - # get a specific version - version = await producer._find_service_tag(list_of_images, my_service_key, "1.2.3") - - -async def test_start_stop_service(docker_network, run_services): - # standard test - await run_services(number_comp=1, number_dyn=1) - - -async def test_service_assigned_env_variables( - docker_network, run_services, user_id, project_id -): - started_services = await run_services(number_comp=1, number_dyn=1) - client = docker.from_env() - for service in started_services: - service_uuid = service["service_uuid"] - list_of_services = client.services.list( - filters={"label": "uuid=" + service_uuid} - ) - assert len(list_of_services) == 1 - docker_service = list_of_services[0] - # check env - docker_tasks = docker_service.tasks() - assert len(docker_tasks) > 0 - task = docker_tasks[0] - envs_list = task["Spec"]["ContainerSpec"]["Env"] - envs_dict = dict(x.split("=") for x in envs_list) - - assert "POSTGRES_ENDPOINT" in envs_dict - assert "POSTGRES_USER" in envs_dict - assert "POSTGRES_PASSWORD" in envs_dict - assert "POSTGRES_DB" in envs_dict - assert "STORAGE_ENDPOINT" in envs_dict - - assert "SIMCORE_USER_ID" in envs_dict - assert envs_dict["SIMCORE_USER_ID"] == user_id - assert "SIMCORE_NODE_UUID" in envs_dict - assert envs_dict["SIMCORE_NODE_UUID"] == service_uuid - assert "SIMCORE_PROJECT_ID" in envs_dict - assert envs_dict["SIMCORE_PROJECT_ID"] == project_id - assert "SIMCORE_NODE_BASEPATH" in envs_dict - assert envs_dict["SIMCORE_NODE_BASEPATH"] == service["service_basepath"] - assert "SIMCORE_HOST_NAME" in envs_dict - assert envs_dict["SIMCORE_HOST_NAME"] == docker_service.name - - assert config.MEM_RESOURCE_LIMIT_KEY in envs_dict - assert config.CPU_RESOURCE_LIMIT_KEY in envs_dict - - -async def test_interactive_service_published_port(docker_network, run_services): - running_dynamic_services = await run_services(number_comp=0, number_dyn=1) - assert len(running_dynamic_services) == 1 - - service = running_dynamic_services[0] - assert "published_port" in service - - service_port = service["published_port"] - # ports are not published anymore in production mode - assert not service_port - - client = docker.from_env() - service_uuid = service["service_uuid"] - list_of_services = client.services.list(filters={"label": "uuid=" + service_uuid}) - assert len(list_of_services) == 1 - - docker_service = list_of_services[0] - # no port open to the outside - assert not docker_service.attrs["Endpoint"]["Spec"] - # service is started with dnsrr (round-robin) mode - assert docker_service.attrs["Spec"]["EndpointSpec"]["Mode"] == "dnsrr" - - -@pytest.fixture -def docker_network( - docker_client: docker.client.DockerClient, docker_swarm: None -) -> docker.models.networks.Network: - network = docker_client.networks.create( - "test_network_default", driver="overlay", scope="swarm" - ) - print(f"--> docker network '{network.name}' created") - config.SIMCORE_SERVICES_NETWORK_NAME = network.name - yield network - - # cleanup - print(f"<-- removing docker network '{network.name}'...") - network.remove() - - for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(1)): - with attempt: - list_networks = docker_client.networks.list( - config.SIMCORE_SERVICES_NETWORK_NAME - ) - assert not list_networks - config.SIMCORE_SERVICES_NETWORK_NAME = None - print(f"<-- removed docker network '{network.name}'") - - -async def test_interactive_service_in_correct_network( - docker_network: docker.models.networks.Network, run_services -): - - running_dynamic_services = await run_services( - number_comp=0, number_dyn=2, dependant=False - ) - assert len(running_dynamic_services) == 2 - for service in running_dynamic_services: - client = docker.from_env() - service_uuid = service["service_uuid"] - list_of_services = client.services.list( - filters={"label": "uuid=" + service_uuid} - ) - assert list_of_services - assert len(list_of_services) == 1 - docker_service = list_of_services[0] - assert ( - docker_service.attrs["Spec"]["Networks"][0]["Target"] == docker_network.id - ) - - -async def test_dependent_services_have_common_network(docker_network, run_services): - running_dynamic_services = await run_services( - number_comp=0, number_dyn=2, dependant=True - ) - assert len(running_dynamic_services) == 2 - - for service in running_dynamic_services: - client = docker.from_env() - service_uuid = service["service_uuid"] - list_of_services = client.services.list( - filters={"label": "uuid=" + service_uuid} - ) - # there is one dependency per service - assert len(list_of_services) == 2 - # check they have same network - assert ( - list_of_services[0].attrs["Spec"]["Networks"][0]["Target"] - == list_of_services[1].attrs["Spec"]["Networks"][0]["Target"] - ) - - -@dataclass -class FakeDockerService: - service_str: str - expected_key: str - expected_tag: str - - -@pytest.mark.parametrize( - "fake_service", - [ - FakeDockerService( - "/simcore/services/dynamic/some/sub/folder/my_service-key:123.456.3214", - "simcore/services/dynamic/some/sub/folder/my_service-key", - "123.456.3214", - ), - FakeDockerService( - "/simcore/services/dynamic/some/sub/folder/my_service-key:123.456.3214@sha256:2aef165ab4f30fbb109e88959271d8b57489790ea13a77d27c02d8adb8feb20f", - "simcore/services/dynamic/some/sub/folder/my_service-key", - "123.456.3214", - ), - ], -) -async def test_get_service_key_version_from_docker_service( - fake_service: FakeDockerService, -): - docker_service_partial_inspect = { - "Spec": { - "TaskTemplate": { - "ContainerSpec": { - "Image": f"{config.REGISTRY_PATH}{fake_service.service_str}" - } - } - } - } - ( - service_key, - service_tag, - ) = await producer._get_service_key_version_from_docker_service( - docker_service_partial_inspect - ) - assert service_key == fake_service.expected_key - assert service_tag == fake_service.expected_tag - - - -@pytest.mark.parametrize( - "fake_service_str", - [ - "postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce", - "/simcore/postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce", - "itisfoundation/postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce", - "/simcore/services/stuff/postgres:10.11", - "/simcore/services/dynamic/postgres:10.11", - "/simcore/services/dynamic/postgres:10", - ], -) -async def test_get_service_key_version_from_docker_service_except_invalid_keys( - fake_service_str: str, -): - docker_service_partial_inspect = { - "Spec": { - "TaskTemplate": { - "ContainerSpec": { - "Image": f"{config.REGISTRY_PATH if fake_service_str.startswith('/') else ''}{fake_service_str}" - } - } - } - } - with pytest.raises(exceptions.DirectorException): - await producer._get_service_key_version_from_docker_service( - docker_service_partial_inspect - ) diff --git a/services/director/tests/test_registry_cache_task.py b/services/director/tests/test_registry_cache_task.py deleted file mode 100644 index 056462b9199..00000000000 --- a/services/director/tests/test_registry_cache_task.py +++ /dev/null @@ -1,67 +0,0 @@ -# pylint:disable=unused-argument -# pylint:disable=redefined-outer-name -from asyncio import sleep - -import pytest -from simcore_service_director import config, main, registry_cache_task, registry_proxy - - -@pytest.fixture -def client( - loop, - aiohttp_client, - aiohttp_unused_port, - configure_schemas_location, - configure_registry_access, -): - config.DIRECTOR_REGISTRY_CACHING = True - config.DIRECTOR_REGISTRY_CACHING_TTL = 5 - # config.DIRECTOR_REGISTRY_CACHING_TTL = 5 - app = main.setup_app() - server_kwargs = {"port": aiohttp_unused_port(), "host": "localhost"} - - registry_cache_task.setup(app) - - yield loop.run_until_complete(aiohttp_client(app, server_kwargs=server_kwargs)) - - -async def test_registry_caching_task(client, push_services): - app = client.app - assert app - - # check the task is started - assert registry_cache_task.TASK_NAME in app - # check the registry cache is empty (no calls yet) - assert registry_cache_task.APP_REGISTRY_CACHE_DATA_KEY in app - - # check we do not get any repository - list_of_services = await registry_proxy.list_services( - app, registry_proxy.ServiceType.ALL - ) - assert not list_of_services - assert app[registry_cache_task.APP_REGISTRY_CACHE_DATA_KEY] != {} - # create services in the registry - pushed_services = await push_services( - number_of_computational_services=1, number_of_interactive_services=1 - ) - # the services shall be updated - await sleep( - config.DIRECTOR_REGISTRY_CACHING_TTL * 1.1 - ) # NOTE: this can take some time. Sleep increased by 10%. - list_of_services = await registry_proxy.list_services( - app, registry_proxy.ServiceType.ALL - ) - assert len(list_of_services) == 2 - # add more - pushed_services = await push_services( - number_of_computational_services=2, - number_of_interactive_services=2, - version="2.0.", - ) - await sleep( - config.DIRECTOR_REGISTRY_CACHING_TTL * 1.1 - ) # NOTE: this sometimes takes a bit more. Sleep increased a 10%. - list_of_services = await registry_proxy.list_services( - app, registry_proxy.ServiceType.ALL - ) - assert len(list_of_services) == len(pushed_services) diff --git a/services/director/tests/test_registry_proxy.py b/services/director/tests/test_registry_proxy.py deleted file mode 100644 index fbbed5897a3..00000000000 --- a/services/director/tests/test_registry_proxy.py +++ /dev/null @@ -1,293 +0,0 @@ -# pylint: disable=W0613, W0621 -# pylint: disable=unused-variable - -import json -import time - -import pytest - -from simcore_service_director import config, registry_proxy - - -async def test_list_no_services_available( - aiohttp_mock_app, - docker_registry, - configure_registry_access, - configure_schemas_location, -): - - computational_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.COMPUTATIONAL - ) - assert not computational_services # it's empty - interactive_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.DYNAMIC - ) - assert not interactive_services - all_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.ALL - ) - assert not all_services - - -async def test_list_services_with_bad_json_formatting( - aiohttp_mock_app, - docker_registry, - configure_registry_access, - configure_schemas_location, - push_services, -): - # some services - created_services = await push_services( - number_of_computational_services=3, - number_of_interactive_services=2, - bad_json_format=True, - ) - assert len(created_services) == 5 - computational_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.COMPUTATIONAL - ) - assert not computational_services # it's empty - interactive_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.DYNAMIC - ) - assert not interactive_services - all_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.ALL - ) - assert not all_services - - -async def test_list_computational_services( - aiohttp_mock_app, - docker_registry, - push_services, - configure_registry_access, - configure_schemas_location, -): - await push_services( - number_of_computational_services=6, number_of_interactive_services=3 - ) - - computational_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.COMPUTATIONAL - ) - assert len(computational_services) == 6 - - -async def test_list_interactive_services( - aiohttp_mock_app, - docker_registry, - push_services, - configure_registry_access, - configure_schemas_location, -): - await push_services( - number_of_computational_services=5, number_of_interactive_services=4 - ) - interactive_services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.DYNAMIC - ) - assert len(interactive_services) == 4 - - -async def test_list_of_image_tags( - aiohttp_mock_app, - docker_registry, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=5, number_of_interactive_services=3 - ) - image_number = {} - for image in images: - service_description = image["service_description"] - key = service_description["key"] - if key not in image_number: - image_number[key] = 0 - image_number[key] = image_number[key] + 1 - - for key, number in image_number.items(): - list_of_image_tags = await registry_proxy.list_image_tags(aiohttp_mock_app, key) - assert len(list_of_image_tags) == number - - -async def test_list_interactive_service_dependencies( - aiohttp_mock_app, - docker_registry, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=2, - number_of_interactive_services=2, - inter_dependent_services=True, - ) - for image in images: - service_description = image["service_description"] - docker_labels = image["docker_labels"] - if "simcore.service.dependencies" in docker_labels: - docker_dependencies = json.loads( - docker_labels["simcore.service.dependencies"] - ) - image_dependencies = await registry_proxy.list_interactive_service_dependencies( - aiohttp_mock_app, - service_description["key"], - service_description["version"], - ) - assert isinstance(image_dependencies, list) - assert len(image_dependencies) == len(docker_dependencies) - assert image_dependencies[0]["key"] == docker_dependencies[0]["key"] - assert image_dependencies[0]["tag"] == docker_dependencies[0]["tag"] - - -async def test_get_image_labels( - aiohttp_mock_app, - docker_registry, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=1, number_of_interactive_services=1 - ) - for image in images: - service_description = image["service_description"] - labels = await registry_proxy.get_image_labels( - aiohttp_mock_app, service_description["key"], service_description["version"] - ) - assert "io.simcore.key" in labels - assert "io.simcore.version" in labels - assert "io.simcore.type" in labels - assert "io.simcore.name" in labels - assert "io.simcore.description" in labels - assert "io.simcore.authors" in labels - assert "io.simcore.contact" in labels - assert "io.simcore.inputs" in labels - assert "io.simcore.outputs" in labels - if service_description["type"] == "dynamic": - # dynamic services have this additional flag - assert "simcore.service.settings" in labels - - -def test_get_service_first_name(): - repo = "simcore/services/dynamic/myservice/modeler/my-sub-modeler" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/dynamic/myservice/modeler" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/dynamic/myservice" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/comp/myservice" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/comp/myservice/modeler" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/comp/myservice/modeler/blahblahblah" - assert registry_proxy.get_service_first_name(repo) == "myservice" - repo = "simcore/services/comp" - assert registry_proxy.get_service_first_name(repo) == "invalid service" - - repo = "services/myservice/modeler/my-sub-modeler" - assert registry_proxy.get_service_first_name(repo) == "invalid service" - - -def test_get_service_last_namess(): - repo = "simcore/services/dynamic/myservice/modeler/my-sub-modeler" - assert ( - registry_proxy.get_service_last_names(repo) - == "myservice_modeler_my-sub-modeler" - ) - repo = "simcore/services/dynamic/myservice/modeler" - assert registry_proxy.get_service_last_names(repo) == "myservice_modeler" - repo = "simcore/services/dynamic/myservice" - assert registry_proxy.get_service_last_names(repo) == "myservice" - repo = "simcore/services/dynamic" - assert registry_proxy.get_service_last_names(repo) == "invalid service" - repo = "simcore/services/comp/myservice/modeler" - assert registry_proxy.get_service_last_names(repo) == "myservice_modeler" - repo = "services/dynamic/modeler" - assert registry_proxy.get_service_last_names(repo) == "invalid service" - - -async def test_get_image_details( - aiohttp_mock_app, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=1, number_of_interactive_services=1 - ) - for image in images: - service_description = image["service_description"] - details = await registry_proxy.get_image_details( - aiohttp_mock_app, service_description["key"], service_description["version"] - ) - - assert details == service_description - - -async def test_registry_caching( - aiohttp_mock_app, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=1, number_of_interactive_services=1 - ) - config.DIRECTOR_REGISTRY_CACHING = True - start_time = time.perf_counter() - services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.ALL - ) - time_to_retrieve_without_cache = time.perf_counter() - start_time - assert len(services) == len(images) - start_time = time.perf_counter() - services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.ALL - ) - time_to_retrieve_with_cache = time.perf_counter() - start_time - assert len(services) == len(images) - assert time_to_retrieve_with_cache < time_to_retrieve_without_cache - - -@pytest.mark.skip(reason="test needs credentials to real registry") -async def test_get_services_performance( - aiohttp_mock_app, loop, configure_custom_registry -): - start_time = time.perf_counter() - services = await registry_proxy.list_services( - aiohttp_mock_app, registry_proxy.ServiceType.ALL - ) - stop_time = time.perf_counter() - print( - "\nTime to run getting services: {}s, #services {}, time per call {}s/service".format( - stop_time - start_time, - len(services), - (stop_time - start_time) / len(services), - ) - ) - - -async def test_generate_service_extras( - aiohttp_mock_app, - push_services, - configure_registry_access, - configure_schemas_location, -): - images = await push_services( - number_of_computational_services=1, number_of_interactive_services=1 - ) - - for image in images: - service_description = image["service_description"] - service_extras = image["service_extras"] - - extras = await registry_proxy.get_service_extras( - aiohttp_mock_app, service_description["key"], service_description["version"] - ) - - assert extras == service_extras diff --git a/services/director/tests/test_utils.py b/services/director/tests/test_utils.py deleted file mode 100644 index 3141d2f2baa..00000000000 --- a/services/director/tests/test_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -from datetime import datetime - -import pytest -from simcore_service_director.utils import parse_as_datetime - - -@pytest.mark.parametrize( - "timestr", - ( - # Samples taken from https://docs.docker.com/engine/reference/commandline/service_inspect/ - "2020-10-09T18:44:02.558012087Z", - "2020-10-09T12:28:14.771034099Z", - "2020-10-09T12:28:14.7710", - # found cases with spaces - "2020-10-09T12:28:14.77 Z", - " 2020-10-09T12:28:14.77 ", - ), -) -def test_parse_valid_time_strings(timestr): - - dt = parse_as_datetime(timestr) - assert isinstance(dt, datetime) - assert dt.year == 2020 - assert dt.month == 10 - assert dt.day == 9 - - -def test_parse_invalid_timestr(): - now = datetime.utcnow() - invalid_timestr = "2020-10-09T12:28" - - # w/ default, it should NOT raise - dt = parse_as_datetime(invalid_timestr, default=now) - assert dt == now - - # w/o default - with pytest.raises(ValueError): - parse_as_datetime(invalid_timestr) diff --git a/services/director/tests/unit/api/conftest.py b/services/director/tests/unit/api/conftest.py new file mode 100644 index 00000000000..e295a9dacd1 --- /dev/null +++ b/services/director/tests/unit/api/conftest.py @@ -0,0 +1,40 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import AsyncIterator + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI +from fixtures.fake_services import PushServicesCallable, ServiceInRegistryInfoDict +from httpx._transports.asgi import ASGITransport + + +@pytest.fixture +async def client(app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: + # - Needed for app to trigger start/stop event handlers + # - Prefer this client instead of fastapi.testclient.TestClient + async with httpx.AsyncClient( + transport=ASGITransport(app=app), + base_url="http://director.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + assert isinstance(getattr(client, "_transport", None), ASGITransport) + yield client + + +@pytest.fixture +async def created_services( + push_services: PushServicesCallable, +) -> list[ServiceInRegistryInfoDict]: + return await push_services( + number_of_computational_services=3, number_of_interactive_services=2 + ) + + +@pytest.fixture +def x_simcore_user_agent_header(faker: Faker) -> dict[str, str]: + return {"x-simcore-user-agent": faker.pystr()} diff --git a/services/director/tests/unit/api/test_rest_health.py b/services/director/tests/unit/api/test_rest_health.py new file mode 100644 index 00000000000..7a429c668ff --- /dev/null +++ b/services/director/tests/unit/api/test_rest_health.py @@ -0,0 +1,19 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import httpx +from fastapi import status + + +async def test_healthcheck( + configure_registry_access, + client: httpx.AsyncClient, + api_version_prefix: str, +): + resp = await client.get(f"/{api_version_prefix}/") + + assert resp.is_success + assert resp.status_code == status.HTTP_200_OK + assert "simcore_service_director" in resp.text diff --git a/services/director/tests/unit/api/test_rest_running_interactive_services.py b/services/director/tests/unit/api/test_rest_running_interactive_services.py new file mode 100644 index 00000000000..fa6fabd6948 --- /dev/null +++ b/services/director/tests/unit/api/test_rest_running_interactive_services.py @@ -0,0 +1,303 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import uuid + +import httpx +import pytest +import respx +from faker import Faker +from fastapi import status +from models_library.projects import ProjectID +from models_library.users import UserID +from pytest_simcore.helpers.typing_env import EnvVarsDict + + +def _assert_response_and_unwrap_envelope(got: httpx.Response): + assert got.headers["content-type"] == "application/json" + assert got.encoding == "utf-8" + + body = got.json() + assert isinstance(body, dict) + assert "data" in body or "error" in body + return body.get("data"), body.get("error") + + +@pytest.mark.parametrize( + "save_state, expected_save_state_call", [(True, True), (False, False), (None, True)] +) +async def test_running_services_post_and_delete( + configure_swarm_stack_name: EnvVarsDict, + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + client: httpx.AsyncClient, + push_services, + user_id: UserID, + project_id: ProjectID, + api_version_prefix: str, + save_state: bool | None, + expected_save_state_call: bool, + mocker, + faker: Faker, + x_simcore_user_agent_header: dict[str, str], + ensure_run_in_sequence_context_is_empty: None, +): + params = {} + resp = await client.post( + f"/{api_version_prefix}/running_interactive_services", params=params + ) + assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + + params = { + "user_id": f"{faker.pyint(min_value=1)}", + "project_id": f"{faker.uuid4()}", + "service_uuid": f"{faker.uuid4()}", + "service_key": "None", + "service_tag": "None", # optional + "service_basepath": "None", # optional + } + resp = await client.post( + f"/{api_version_prefix}/running_interactive_services", params=params + ) + data = resp.json() + assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, data + + params["service_key"] = "simcore/services/comp/somfunkyname-nhsd" + params["service_tag"] = "1.2.3" + resp = await client.post( + f"/{api_version_prefix}/running_interactive_services", + params=params, + headers=x_simcore_user_agent_header, + ) + data = resp.json() + assert resp.status_code == status.HTTP_404_NOT_FOUND, data + + created_services = await push_services( + number_of_computational_services=0, number_of_interactive_services=2 + ) + assert len(created_services) == 2 + for created_service in created_services: + service_description = created_service["service_description"] + params["user_id"] = f"{user_id}" + params["project_id"] = f"{project_id}" + params["service_key"] = service_description["key"] + params["service_tag"] = service_description["version"] + service_port = created_service["internal_port"] + service_entry_point = created_service["entry_point"] + params["service_basepath"] = "/i/am/a/basepath" + params["service_uuid"] = f"{faker.uuid4()}" + # start the service + resp = await client.post( + f"/{api_version_prefix}/running_interactive_services", + params=params, + headers=x_simcore_user_agent_header, + ) + assert resp.status_code == status.HTTP_201_CREATED, resp.text + assert resp.encoding == "utf-8" + assert resp.headers["content-type"] == "application/json" + running_service_enveloped = resp.json() + assert isinstance(running_service_enveloped["data"], dict) + assert all( + k in running_service_enveloped["data"] + for k in [ + "service_uuid", + "service_key", + "service_version", + "published_port", + "entry_point", + "service_host", + "service_port", + "service_basepath", + ] + ) + assert ( + running_service_enveloped["data"]["service_uuid"] == params["service_uuid"] + ) + assert running_service_enveloped["data"]["service_key"] == params["service_key"] + assert ( + running_service_enveloped["data"]["service_version"] + == params["service_tag"] + ) + assert running_service_enveloped["data"]["service_port"] == service_port + service_published_port = running_service_enveloped["data"]["published_port"] + assert not service_published_port + assert service_entry_point == running_service_enveloped["data"]["entry_point"] + service_host = running_service_enveloped["data"]["service_host"] + assert service_host == f"test_{params['service_uuid']}" + service_basepath = running_service_enveloped["data"]["service_basepath"] + assert service_basepath == params["service_basepath"] + + # get the service + resp = await client.request( + "GET", + f"/{api_version_prefix}/running_interactive_services/{params['service_uuid']}", + ) + assert resp.status_code == status.HTTP_200_OK + text = resp.text + assert resp.headers["content-type"] == "application/json" + assert resp.encoding == "utf-8", f"Got {text=}" + running_service_enveloped = resp.json() + assert isinstance(running_service_enveloped["data"], dict) + assert all( + k in running_service_enveloped["data"] + for k in [ + "service_uuid", + "service_key", + "service_version", + "published_port", + "entry_point", + ] + ) + assert ( + running_service_enveloped["data"]["service_uuid"] == params["service_uuid"] + ) + assert running_service_enveloped["data"]["service_key"] == params["service_key"] + assert ( + running_service_enveloped["data"]["service_version"] + == params["service_tag"] + ) + assert ( + running_service_enveloped["data"]["published_port"] + == service_published_port + ) + assert running_service_enveloped["data"]["entry_point"] == service_entry_point + assert running_service_enveloped["data"]["service_host"] == service_host + assert running_service_enveloped["data"]["service_port"] == service_port + assert running_service_enveloped["data"]["service_basepath"] == service_basepath + + # stop the service + query_params = {} + if save_state: + query_params.update({"save_state": "true" if save_state else "false"}) + + with respx.mock( + base_url=f"http://{service_host}:{service_port}{service_basepath}", + assert_all_called=False, + assert_all_mocked=False, + ) as respx_mock: + + def _save_me(request) -> httpx.Response: + return httpx.Response(status.HTTP_200_OK, json={}) + + respx_mock.post("/state", name="save_state").mock(side_effect=_save_me) + respx_mock.route(host="127.0.0.1", name="host").pass_through() + respx_mock.route(host="localhost", name="localhost").pass_through() + + resp = await client.delete( + f"/{api_version_prefix}/running_interactive_services/{params['service_uuid']}", + params=query_params, + ) + + text = resp.text + assert resp.status_code == status.HTTP_204_NO_CONTENT, text + assert resp.headers["content-type"] == "application/json" + assert resp.encoding == "utf-8" + + +async def test_running_interactive_services_list_get( + configure_swarm_stack_name: EnvVarsDict, + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + client: httpx.AsyncClient, + push_services, + x_simcore_user_agent_header: dict[str, str], + api_version_prefix: str, + ensure_run_in_sequence_context_is_empty: None, + faker: Faker, +): + """Test case for running_interactive_services_list_get + + Returns a list of interactive services + """ + user_ids = [faker.pyint(min_value=1), faker.pyint(min_value=1)] + project_ids = [faker.uuid4(), faker.uuid4(), faker.uuid4()] + # prepare services + NUM_SERVICES = 1 + available_services = await push_services( + number_of_computational_services=0, number_of_interactive_services=NUM_SERVICES + ) + assert len(available_services) == NUM_SERVICES + # start the services + created_services = [] + for user_id in user_ids: + for project_id in project_ids: + for created_service in available_services: + service_description = created_service["service_description"] + params = {} + params["user_id"] = user_id + params["project_id"] = project_id + params["service_key"] = service_description["key"] + params["service_tag"] = service_description["version"] + params["service_uuid"] = str(uuid.uuid4()) + # start the service + resp = await client.post( + "/v0/running_interactive_services", + params=params, + headers=x_simcore_user_agent_header, + ) + assert resp.status_code == 201, resp.text + created_services.append(resp.json()["data"]) + # get the list of services + for user_id in user_ids: + for project_id in project_ids: + params = {} + # list by user_id + params["user_id"] = user_id + response = await client.get( + "/v0/running_interactive_services", params=params + ) + assert ( + response.status_code == status.HTTP_200_OK + ), f"Response body is : {response.text}" + data, error = _assert_response_and_unwrap_envelope(response) + assert data + assert not error + services_list = data + assert len(services_list) == len(project_ids) * NUM_SERVICES + # list by user_id and project_id + params["project_id"] = project_id + response = await client.get( + "/v0/running_interactive_services", params=params + ) + assert ( + response.status_code == status.HTTP_200_OK + ), f"Response body is : {response.text}" + data, error = _assert_response_and_unwrap_envelope(response) + assert data + assert not error + services_list = data + assert len(services_list) == NUM_SERVICES + # list by project_id + params = {} + params["project_id"] = project_id + response = await client.get( + "/v0/running_interactive_services", params=params + ) + assert ( + response.status_code == status.HTTP_200_OK + ), f"Response body is : {response.text}" + data, error = _assert_response_and_unwrap_envelope(response) + assert data + assert not error + services_list = data + assert len(services_list) == len(user_ids) * NUM_SERVICES + # get all the running services + response = await client.get("/v0/running_interactive_services") + assert ( + response.status_code == status.HTTP_200_OK + ), f"Response body is : {response.text}" + data, error = _assert_response_and_unwrap_envelope(response) + assert data + assert not error + services_list = data + assert len(services_list) == len(user_ids) * len(project_ids) * NUM_SERVICES + + # cleanup + for service in created_services: + resp = await client.delete( + f"/{api_version_prefix}/running_interactive_services/{service['service_uuid']}", + params={"save_state": False}, + ) + assert resp.status_code == status.HTTP_204_NO_CONTENT, resp.text diff --git a/services/director/tests/unit/api/test_rest_services.py b/services/director/tests/unit/api/test_rest_services.py new file mode 100644 index 00000000000..e42edea2eff --- /dev/null +++ b/services/director/tests/unit/api/test_rest_services.py @@ -0,0 +1,199 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from urllib.parse import quote + +import httpx +from fastapi import status +from fixtures.fake_services import ServiceInRegistryInfoDict +from models_library.api_schemas_director.services import ServiceDataGet +from pytest_simcore.helpers.typing_env import EnvVarsDict + + +def _assert_response_and_unwrap_envelope(got: httpx.Response): + assert got.headers["content-type"] == "application/json" + assert got.encoding == "utf-8" + + body = got.json() + assert isinstance(body, dict) + assert "data" in body or "error" in body + return body.get("data"), body.get("error") + + +def _assert_services( + *, + expected: list[ServiceInRegistryInfoDict], + got: list[dict], + schema_version="v1", +): + assert len(expected) == len(got) + + expected_key_version_tuples = [ + (s["service_description"]["key"], s["service_description"]["version"]) + for s in expected + ] + + for data in got: + service = ServiceDataGet.model_validate(data) + assert ( + expected_key_version_tuples.count((f"{service.key}", f"{service.version}")) + == 1 + ) + + +async def test_list_services_with_empty_registry( + docker_registry: str, + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + api_version_prefix: str, +): + assert docker_registry, "docker-registry is not ready?" + + # empty case + resp = await client.get(f"/{api_version_prefix}/services") + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + services, error = _assert_response_and_unwrap_envelope(resp) + assert not error + assert isinstance(services, list) + + _assert_services(expected=[], got=services) + + +async def test_list_services( + docker_registry: str, + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + created_services: list[ServiceInRegistryInfoDict], + api_version_prefix: str, +): + assert docker_registry, "docker-registry is not ready?" + + resp = await client.get(f"/{api_version_prefix}/services") + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + services, error = _assert_response_and_unwrap_envelope(resp) + assert not error + assert isinstance(services, list) + + _assert_services(expected=created_services, got=services) + + +async def test_get_service_bad_request( + docker_registry: str, + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + created_services: list[ServiceInRegistryInfoDict], + api_version_prefix: str, +): + assert docker_registry, "docker-registry is not ready?" + assert len(created_services) > 0 + + resp = await client.get(f"/{api_version_prefix}/services?service_type=blahblah") + assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, f"Got f{resp.text}" + + # NOTE: only successful errors are enveloped + + +async def test_list_services_by_service_type( + docker_registry: str, + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + created_services: list[ServiceInRegistryInfoDict], + api_version_prefix: str, +): + assert docker_registry, "docker-registry is not ready?" + assert len(created_services) == 5 + + resp = await client.get( + f"/{api_version_prefix}/services?service_type=computational" + ) + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + services, error = _assert_response_and_unwrap_envelope(resp) + assert not error + assert services + assert len(services) == 3 + + resp = await client.get(f"/{api_version_prefix}/services?service_type=dynamic") + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + services, error = _assert_response_and_unwrap_envelope(resp) + assert not error + assert services + assert len(services) == 2 + + +async def test_get_services_by_key_and_version_with_empty_registry( + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + api_version_prefix: str, +): + resp = await client.get(f"/{api_version_prefix}/services/whatever/someversion") + assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, f"Got f{resp.text}" + + resp = await client.get( + f"/{api_version_prefix}/simcore/services/dynamic/something/someversion" + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND, f"Got f{resp.text}" + + resp = await client.get( + f"/{api_version_prefix}/simcore/services/dynamic/something/1.5.2" + ) + assert resp.status_code == status.HTTP_404_NOT_FOUND, f"Got f{resp.text}" + + +async def test_get_services_by_key_and_version( + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + created_services: list[ServiceInRegistryInfoDict], + api_version_prefix: str, +): + assert len(created_services) == 5 + + retrieved_services: list[dict] = [] + for created_service in created_services: + service_description = created_service["service_description"] + # note that it is very important to remove the safe="/" from quote!!!! + key, version = ( + quote(service_description[key], safe="") for key in ("key", "version") + ) + url = f"/{api_version_prefix}/services/{key}/{version}" + resp = await client.get(url) + + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + services, error = _assert_response_and_unwrap_envelope(resp) + assert not error + assert isinstance(services, list) + assert len(services) == 1 + + retrieved_services.append(services[0]) + + _assert_services(expected=created_services, got=retrieved_services) + + +async def test_get_service_labels( + configure_registry_access: EnvVarsDict, + client: httpx.AsyncClient, + created_services: list[ServiceInRegistryInfoDict], + api_version_prefix: str, +): + assert len(created_services) == 5 + + for service in created_services: + service_description = service["service_description"] + # note that it is very important to remove the safe="/" from quote!!!! + key, version = ( + quote(service_description[key], safe="") for key in ("key", "version") + ) + url = f"/{api_version_prefix}/services/{key}/{version}/labels" + resp = await client.get(url) + assert resp.status_code == status.HTTP_200_OK, f"Got f{resp.text}" + + labels, error = _assert_response_and_unwrap_envelope(resp) + assert not error + + assert service["docker_labels"] == labels diff --git a/services/director/tests/unit/conftest.py b/services/director/tests/unit/conftest.py new file mode 100644 index 00000000000..15b7627e29d --- /dev/null +++ b/services/director/tests/unit/conftest.py @@ -0,0 +1,189 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from collections.abc import AsyncIterator, Awaitable, Callable +from pathlib import Path +from typing import Any + +import pytest +import simcore_service_director +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.docker_registry import RegistrySettings +from simcore_service_director.core.application import create_app +from simcore_service_director.core.settings import ApplicationSettings + +pytest_plugins = [ + "fixtures.fake_services", + "pytest_simcore.cli_runner", + "pytest_simcore.docker", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_registry", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.repository_paths", + "pytest_simcore.simcore_service_library_fixtures", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "director" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_director")) + return service_folder + + +@pytest.fixture(scope="session") +def installed_package_dir() -> Path: + dirpath = Path(simcore_service_director.__file__).resolve().parent + assert dirpath.exists() + return dirpath + + +@pytest.fixture(scope="session") +def common_schemas_specs_dir(osparc_simcore_root_dir: Path) -> Path: + specs_dir = osparc_simcore_root_dir / "api" / "specs" / "director" / "schemas" + assert specs_dir.exists() + return specs_dir + + +@pytest.fixture +def configure_swarm_stack_name( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "SWARM_STACK_NAME": "test_stack", + }, + ) + + +@pytest.fixture +def configure_registry_access( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, docker_registry: str +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "REGISTRY_URL": docker_registry, + "REGISTRY_PATH": docker_registry, + "REGISTRY_SSL": False, + "DIRECTOR_REGISTRY_CACHING": False, + }, + ) + + +@pytest.fixture +def configure_external_registry_access( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + external_registry_settings: RegistrySettings | None, +) -> EnvVarsDict: + assert external_registry_settings + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + **external_registry_settings.model_dump(by_alias=True, exclude_none=True), + "REGISTRY_PW": external_registry_settings.REGISTRY_PW.get_secret_value(), + "DIRECTOR_REGISTRY_CACHING": False, + }, + ) + + +@pytest.fixture(scope="session") +def configure_custom_registry( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + pytestconfig: pytest.Config, +) -> EnvVarsDict: + # to set these values call + # pytest --registry_url myregistry --registry_user username --registry_pw password + registry_url = pytestconfig.getoption("registry_url") + assert registry_url + assert isinstance(registry_url, str) + registry_user = pytestconfig.getoption("registry_user") + assert registry_user + assert isinstance(registry_user, str) + registry_pw = pytestconfig.getoption("registry_pw") + assert registry_pw + assert isinstance(registry_pw, str) + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "REGISTRY_URL": registry_url, + "REGISTRY_AUTH": True, + "REGISTRY_USER": registry_user, + "REGISTRY_PW": registry_pw, + "REGISTRY_SSL": False, + "DIRECTOR_REGISTRY_CACHING": False, + }, + ) + + +@pytest.fixture +def api_version_prefix() -> str: + return "v0" + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + docker_compose_service_environment_dict: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_environment_dict, + "DIRECTOR_TRACING": "null", + }, + ) + + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 + + +@pytest.fixture +def app_settings(app_environment: EnvVarsDict) -> ApplicationSettings: + return ApplicationSettings.create_from_envs() + + +@pytest.fixture +async def app( + app_settings: ApplicationSettings, is_pdb_enabled: bool +) -> AsyncIterator[FastAPI]: + the_test_app = create_app(settings=app_settings) + async with LifespanManager( + the_test_app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + yield the_test_app + + +@pytest.fixture +async def with_docker_network( + docker_network: Callable[..., Awaitable[dict[str, Any]]], +) -> dict[str, Any]: + return await docker_network() + + +@pytest.fixture +def configured_docker_network( + with_docker_network: dict[str, Any], + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + {"DIRECTOR_SIMCORE_SERVICES_NETWORK_NAME": with_docker_network["Name"]}, + ) diff --git a/services/director/tests/fixtures/dummy_service_description-v1.json b/services/director/tests/unit/fixtures/dummy_service_description-v1.json similarity index 96% rename from services/director/tests/fixtures/dummy_service_description-v1.json rename to services/director/tests/unit/fixtures/dummy_service_description-v1.json index e7e0f4907ca..f68f21a15d6 100644 --- a/services/director/tests/fixtures/dummy_service_description-v1.json +++ b/services/director/tests/unit/fixtures/dummy_service_description-v1.json @@ -55,4 +55,4 @@ "type": "data:application/json" } } -} \ No newline at end of file +} diff --git a/services/director/tests/unit/fixtures/fake_services.py b/services/director/tests/unit/fixtures/fake_services.py new file mode 100644 index 00000000000..1edb799ee9c --- /dev/null +++ b/services/director/tests/unit/fixtures/fake_services.py @@ -0,0 +1,321 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import asyncio +import json +import logging +import random +import sys +from collections.abc import Awaitable, Iterator +from io import BytesIO +from pathlib import Path +from typing import Any, Literal, Protocol, TypedDict + +import pytest +import requests +from aiodocker import utils +from aiodocker.docker import Docker +from aiodocker.exceptions import DockerError +from simcore_service_director.core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + + +class NodeRequirementsDict(TypedDict): + CPU: float + RAM: float + + +class ServiceExtrasDict(TypedDict): + node_requirements: NodeRequirementsDict + build_date: str + vcs_ref: str + vcs_url: str + + +class ServiceDescriptionDict(TypedDict): + key: str + version: str + type: Literal["computational", "dynamic"] + + +class ServiceInRegistryInfoDict(TypedDict): + service_description: ServiceDescriptionDict + docker_labels: dict[str, Any] + image_path: str + internal_port: int | None + entry_point: str + service_extras: ServiceExtrasDict + + +def _create_service_description( + service_type: Literal["computational", "dynamic"], name: str, tag: str +) -> ServiceDescriptionDict: + service_desc = json.loads( + (CURRENT_DIR / "dummy_service_description-v1.json").read_text() + ) + + if service_type == "computational": + service_key_type = "comp" + elif service_type == "dynamic": + service_key_type = "dynamic" + else: + msg = f"Invalid {service_type=}" + raise ValueError(msg) + + service_desc["key"] = f"simcore/services/{service_key_type}/{name}" + service_desc["version"] = tag + service_desc["type"] = service_type + + return service_desc + + +def _create_docker_labels( + service_description: ServiceDescriptionDict, *, bad_json_format: bool +) -> dict[str, str]: + docker_labels = {} + for key, value in service_description.items(): + docker_labels[".".join(["io", "simcore", key])] = json.dumps({key: value}) + if bad_json_format: + docker_labels[".".join(["io", "simcore", key])] = ( + "d32;'" + docker_labels[".".join(["io", "simcore", key])] + ) + + return docker_labels + + +async def _create_base_image(labels, tag) -> dict[str, Any]: + dockerfile = """ +FROM alpine +CMD while true; do sleep 10; done + """ + f = BytesIO(dockerfile.encode("utf-8")) + tar_obj = utils.mktar_from_dockerfile(f) + + # build docker base image + docker = Docker() + base_docker_image = await docker.images.build( + fileobj=tar_obj, encoding="gzip", rm=True, labels=labels, tag=tag + ) + await docker.close() + return base_docker_image + + +async def _build_and_push_image( + registry_url: str, + service_type: Literal["computational", "dynamic"], + name: str, + tag: str, + dependent_image=None, + *, + bad_json_format: bool = False, + app_settings: ApplicationSettings, +) -> ServiceInRegistryInfoDict: + + # crate image + service_description = _create_service_description(service_type, name, tag) + docker_labels = _create_docker_labels( + service_description, bad_json_format=bad_json_format + ) + additional_docker_labels = [ + { + "name": "constraints", + "type": "string", + "value": ["node.role==manager"], + } + ] + + internal_port = None + entry_point = "" + if service_type == "dynamic": + internal_port = random.randint(1, 65535) # noqa: S311 + additional_docker_labels.append( + { + "name": "ports", + "type": "int", + "value": internal_port, + } + ) + entry_point = "/test/entry_point" + docker_labels["simcore.service.bootsettings"] = json.dumps( + [ + { + "name": "entry_point", + "type": "string", + "value": entry_point, + } + ] + ) + docker_labels["simcore.service.settings"] = json.dumps(additional_docker_labels) + if bad_json_format: + docker_labels["simcore.service.settings"] = ( + "'fjks" + docker_labels["simcore.service.settings"] + ) + + if dependent_image is not None: + dependent_description = dependent_image["service_description"] + dependency_docker_labels = [ + { + "key": dependent_description["key"], + "tag": dependent_description["version"], + } + ] + docker_labels["simcore.service.dependencies"] = json.dumps( + dependency_docker_labels + ) + if bad_json_format: + docker_labels["simcore.service.dependencies"] = ( + "'fjks" + docker_labels["simcore.service.dependencies"] + ) + + # create the typical org.label-schema labels + service_extras = ServiceExtrasDict( + node_requirements=NodeRequirementsDict( + CPU=app_settings.DIRECTOR_DEFAULT_MAX_NANO_CPUS / 1e9, + RAM=app_settings.DIRECTOR_DEFAULT_MAX_MEMORY, + ), + build_date="2020-08-19T15:36:27Z", + vcs_ref="ca180ef1", + vcs_url="git@github.com:ITISFoundation/osparc-simcore.git", + ) + docker_labels["org.label-schema.build-date"] = service_extras["build_date"] + docker_labels["org.label-schema.schema-version"] = "1.0" + docker_labels["org.label-schema.vcs-ref"] = service_extras["vcs_ref"] + docker_labels["org.label-schema.vcs-url"] = service_extras["vcs_url"] + + image_tag = registry_url + "/{key}:{version}".format( + key=service_description["key"], version=tag + ) + await _create_base_image(docker_labels, image_tag) + + # push image to registry + try: + docker = Docker() + await docker.images.push(image_tag) + finally: + await docker.close() + + # remove image from host + # docker.images.remove(image_tag) + + return ServiceInRegistryInfoDict( + service_description=service_description, + docker_labels=docker_labels, + image_path=image_tag, + internal_port=internal_port, + entry_point=entry_point, + service_extras=service_extras, + ) + + +def _clean_registry(registry_url: str, list_of_images: list[ServiceInRegistryInfoDict]): + request_headers = {"accept": "application/vnd.docker.distribution.manifest.v2+json"} + for image in list_of_images: + service_description = image["service_description"] + # get the image digest + tag = service_description["version"] + url = "http://{host}/v2/{name}/manifests/{tag}".format( + host=registry_url, name=service_description["key"], tag=tag + ) + response = requests.get(url, headers=request_headers, timeout=10) + docker_content_digest = response.headers["Docker-Content-Digest"] + # remove the image from the registry + url = "http://{host}/v2/{name}/manifests/{digest}".format( + host=registry_url, + name=service_description["key"], + digest=docker_content_digest, + ) + response = requests.delete(url, headers=request_headers, timeout=5) + + +class PushServicesCallable(Protocol): + async def __call__( + self, + *, + number_of_computational_services: int, + number_of_interactive_services: int, + inter_dependent_services: bool = False, + bad_json_format: bool = False, + version="1.0.", + ) -> list[ServiceInRegistryInfoDict]: + ... + + +@pytest.fixture +def push_services( + docker_registry: str, app_settings: ApplicationSettings +) -> Iterator[PushServicesCallable]: + registry_url = docker_registry + list_of_pushed_images_tags: list[ServiceInRegistryInfoDict] = [] + dependent_images = [] + + async def _build_push_images_to_docker_registry( + *, + number_of_computational_services, + number_of_interactive_services, + inter_dependent_services=False, + bad_json_format=False, + version="1.0.", + ) -> list[ServiceInRegistryInfoDict]: + try: + dependent_image = None + if inter_dependent_services: + dependent_image = await _build_and_push_image( + registry_url=registry_url, + service_type="computational", + name="dependency", + tag="10.52.999999", + dependent_image=None, + bad_json_format=bad_json_format, + app_settings=app_settings, + ) + dependent_images.append(dependent_image) + + images_to_build: list[Awaitable] = [ + _build_and_push_image( + registry_url=registry_url, + service_type="computational", + name="test", + tag=f"{version}{image_index}", + dependent_image=dependent_image, + bad_json_format=bad_json_format, + app_settings=app_settings, + ) + for image_index in range(number_of_computational_services) + ] + + images_to_build.extend( + [ + _build_and_push_image( + registry_url=registry_url, + service_type="dynamic", + name="test", + tag=f"{version}{image_index}", + dependent_image=dependent_image, + bad_json_format=bad_json_format, + app_settings=app_settings, + ) + for image_index in range(number_of_interactive_services) + ] + ) + + results = await asyncio.gather(*images_to_build) + list_of_pushed_images_tags.extend(results) + + except DockerError: + _logger.exception("Docker API error while building and pushing images") + raise + + return list_of_pushed_images_tags + + yield _build_push_images_to_docker_registry + + _logger.info("clean registry") + _clean_registry(registry_url, list_of_pushed_images_tags) + _clean_registry(registry_url, dependent_images) diff --git a/services/director/tests/unit/test__model_examples.py b/services/director/tests/unit/test__model_examples.py new file mode 100644 index 00000000000..b304277c536 --- /dev/null +++ b/services/director/tests/unit/test__model_examples.py @@ -0,0 +1,27 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from typing import Any + +import pytest +import simcore_service_director.models +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(simcore_service_director.models), +) +def test_director_service_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/services/director/tests/unit/test_cli.py b/services/director/tests/unit/test_cli.py new file mode 100644 index 00000000000..92967a3b8a7 --- /dev/null +++ b/services/director/tests/unit/test_cli.py @@ -0,0 +1,35 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments +import os + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_director._meta import API_VERSION +from simcore_service_director.cli import main +from simcore_service_director.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def test_cli_help_and_version(cli_runner: CliRunner): + result = cli_runner.invoke(main, "--help") + assert result.exit_code == os.EX_OK, result.output + + result = cli_runner.invoke(main, "--version") + assert result.exit_code == os.EX_OK, result.output + assert result.stdout.strip() == API_VERSION + + +def test_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK + + print(result.output) + settings = ApplicationSettings(result.output) + assert settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() + + +def test_run(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["run"]) + assert result.exit_code == 0 + assert "disabled" in result.stdout diff --git a/services/director/tests/unit/test_core_settings.py b/services/director/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..5e8b42c1268 --- /dev/null +++ b/services/director/tests/unit/test_core_settings.py @@ -0,0 +1,166 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import datetime + +import pytest +from pydantic import ValidationError +from pytest_simcore.helpers.monkeypatch_envs import ( + setenvs_from_dict, + setenvs_from_envfile, +) +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_director.core.settings import ApplicationSettings + + +def test_valid_web_application_settings(app_environment: EnvVarsDict): + """ + We validate actual envfiles (e.g. repo.config files) by passing them via the CLI + + $ ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + $ pytest --external-envfile=.secrets --pdb tests/unit/test_core_settings.py + + """ + settings = ApplicationSettings() # type: ignore + assert settings + + assert settings == ApplicationSettings.create_from_envs() + + assert ( + str( + app_environment.get( + "DIRECTOR_DEFAULT_MAX_MEMORY", + ApplicationSettings.model_fields["DIRECTOR_DEFAULT_MAX_MEMORY"].default, + ) + ) + == f"{settings.DIRECTOR_DEFAULT_MAX_MEMORY}" + ) + + +def test_invalid_client_timeout_raises( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +): + monkeypatch.setenv( + "DIRECTOR_REGISTRY_CLIENT_TIMEOUT", f"{datetime.timedelta(seconds=-10)}" + ) + with pytest.raises(ValidationError): + ApplicationSettings.create_from_envs() + + +def test_docker_container_env_sample(monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("DIRECTOR_DEFAULT_MAX_MEMORY", raising=False) + + setenvs_from_envfile( + monkeypatch, + """ + DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS={} + DIRECTOR_REGISTRY_CACHING=True + DIRECTOR_REGISTRY_CACHING_TTL=00:15:00 + DIRECTOR_SELF_SIGNED_SSL_FILENAME= + DIRECTOR_SELF_SIGNED_SSL_SECRET_ID= + DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME= + DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=node.labels.io.simcore.autoscaled-node!=true + EXTRA_HOSTS_SUFFIX=undefined + GPG_KEY=0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + HOME=/root + HOSTNAME=osparc-master-01-2 + LANG=C.UTF-8 + LC_ALL=C.UTF-8 + LOGLEVEL=WARNING + MONITORING_ENABLED=True + PATH=/home/scu/.venv/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + POSTGRES_DB=simcoredb + POSTGRES_ENDPOINT=master_postgres:5432 + POSTGRES_HOST=master_postgres + POSTGRES_PASSWORD=z43 + POSTGRES_PORT=5432 + POSTGRES_USER=scu + PUBLISHED_HOST_NAME=osparc-master.speag.com + PWD=/home/scu + PYTHONDONTWRITEBYTECODE=1 + PYTHONOPTIMIZE=TRUE + PYTHON_GET_PIP_SHA256=adsfasdf + PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/eff16c878c7fd6b688b9b4c4267695cf1a0bf01b/get-pip.py + PYTHON_PIP_VERSION=20.1.1 + PYTHON_VERSION=3.6.10 + REGISTRY_AUTH=True + REGISTRY_PATH= + REGISTRY_PW=adsfasdf + REGISTRY_SSL=True + REGISTRY_URL=registry.osparc-master.speag.com + REGISTRY_USER=admin + REGISTRY_VERSION=v2 + S3_ACCESS_KEY=adsfasdf + S3_BUCKET_NAME=master-simcore + S3_ENDPOINT=https://ceph-prod-rgw.speag.com + S3_REGION=us-east-1 + S3_SECRET_KEY=asdf + SC_BOOT_MODE=production + SC_BUILD_TARGET=production + SC_USER_ID=8004 + SC_USER_NAME=scu + SHLVL=0 + SIMCORE_SERVICES_NETWORK_NAME=master-simcore_interactive_services_subnet + STORAGE_ENDPOINT=master_storage:8080 + SWARM_STACK_NAME=master-simcore + TERM=xterm + TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT=http://jaeger:4318 + TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE=50 + TRAEFIK_SIMCORE_ZONE=master_internal_simcore_stack + VIRTUAL_ENV=/home/scu/.venv + LOG_FORMAT_LOCAL_DEV_ENABLED=1 + """, + ) + + settings = ApplicationSettings.create_from_envs() + + assert settings.DIRECTOR_DEFAULT_MAX_MEMORY == 0, "default!" + + +def test_docker_compose_environment_sample( + monkeypatch: pytest.MonkeyPatch, app_environment: EnvVarsDict +): + + setenvs_from_dict( + monkeypatch, + { + **app_environment, + "DEFAULT_MAX_MEMORY": "0", + "DEFAULT_MAX_NANO_CPUS": "0", + "DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS": '{"VRAM": "node.labels.gpu==true"}', + "DIRECTOR_REGISTRY_CACHING": "True", + "DIRECTOR_REGISTRY_CACHING_TTL": "00:15:00", + "DIRECTOR_SELF_SIGNED_SSL_FILENAME": "", + "DIRECTOR_SELF_SIGNED_SSL_SECRET_ID": "", + "DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME": "", + "DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS": "", + "DIRECTOR_TRACING": "{}", + "EXTRA_HOSTS_SUFFIX": "undefined", + "LOGLEVEL": "DEBUG", + "MONITORING_ENABLED": "True", + "POSTGRES_DB": "simcoredb", + "POSTGRES_ENDPOINT": "osparc-dev.foo.com:5432", + "POSTGRES_HOST": "osparc-dev.foo.com", + "POSTGRES_PASSWORD": "adsfasdf", + "POSTGRES_PORT": "5432", + "POSTGRES_USER": "postgres", + "PUBLISHED_HOST_NAME": "osparc-master-zmt.click", + "REGISTRY_AUTH": "True", + "REGISTRY_PATH": "", + "REGISTRY_PW": "asdf", + "REGISTRY_SSL": "True", + "REGISTRY_URL": "registry.osparc-master-zmt.click", + "REGISTRY_USER": "admin", + "SIMCORE_SERVICES_NETWORK_NAME": "master-simcore_interactive_services_subnet", + "STORAGE_ENDPOINT": "master_storage:8080", + "SWARM_STACK_NAME": "master-simcore", + "TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT": "http://jaeger:4318", + "TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE": "50", + "TRAEFIK_SIMCORE_ZONE": "master_internal_simcore_stack", + }, + ) + + ApplicationSettings.create_from_envs() diff --git a/services/director/tests/unit/test_docker_utils.py b/services/director/tests/unit/test_docker_utils.py new file mode 100644 index 00000000000..81ad8299f31 --- /dev/null +++ b/services/director/tests/unit/test_docker_utils.py @@ -0,0 +1,40 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint: disable=not-async-context-manager +from asyncio import sleep + +from simcore_service_director import docker_utils + + +async def test_docker_client(): + async with docker_utils.docker_client() as client: + await client.images.pull("alpine:latest") + container = await client.containers.create_or_replace( + config={ + "Cmd": ["/bin/ash", "-c", 'echo "hello world"'], + "Image": "alpine:latest", + }, + name="testing", + ) + await container.start() + await sleep(5) + logs = await container.log(stdout=True) + assert ( + "".join(logs) + ) == "hello world\n", f"running containers {client.containers.list()}" + await container.delete(force=True) + + +async def test_swarm_get_number_nodes(docker_swarm: None): + num_nodes = await docker_utils.swarm_get_number_nodes() + assert num_nodes == 1 + + +async def test_swarm_has_manager_nodes(docker_swarm: None): + assert (await docker_utils.swarm_has_manager_nodes()) is True + + +async def test_swarm_has_worker_nodes(docker_swarm: None): + assert (await docker_utils.swarm_has_worker_nodes()) is False diff --git a/services/director/tests/unit/test_producer.py b/services/director/tests/unit/test_producer.py new file mode 100644 index 00000000000..4b729c424bb --- /dev/null +++ b/services/director/tests/unit/test_producer.py @@ -0,0 +1,401 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments +# pylint:disable=unused-argument +# pylint:disable=unused-variable + +import json +import uuid +from collections.abc import AsyncIterator, Awaitable, Callable +from dataclasses import dataclass +from typing import Any + +import docker +import pytest +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.docker_registry import RegistrySettings +from simcore_service_director import producer +from simcore_service_director.constants import ( + CPU_RESOURCE_LIMIT_KEY, + MEM_RESOURCE_LIMIT_KEY, +) +from simcore_service_director.core.errors import ( + DirectorRuntimeError, + ServiceNotAvailableError, + ServiceUUIDNotFoundError, +) +from simcore_service_director.core.settings import ApplicationSettings +from tenacity import Retrying +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + + +@pytest.fixture +def ensure_service_runs_in_ci( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "DIRECTOR_DEFAULT_MAX_MEMORY": f"{int(25 * pow(1024, 2))}", + "DIRECTOR_DEFAULT_MAX_NANO_CPUS": f"{int(0.01 * pow(10, 9))}", + }, + ) + + +@pytest.fixture +async def run_services( + ensure_service_runs_in_ci: EnvVarsDict, + configure_registry_access: EnvVarsDict, + app: FastAPI, + app_settings: ApplicationSettings, + push_services, + docker_swarm: None, + user_id: UserID, + project_id: ProjectID, + docker_client: docker.client.DockerClient, +) -> AsyncIterator[Callable[[int, int], Awaitable[list[dict[str, Any]]]]]: + started_services = [] + + async def push_start_services( + number_comp: int, number_dyn: int, dependant=False + ) -> list[dict[str, Any]]: + pushed_services = await push_services( + number_of_computational_services=number_comp, + number_of_interactive_services=number_dyn, + inter_dependent_services=dependant, + ) + assert len(pushed_services) == (number_comp + number_dyn) + for pushed_service in pushed_services: + service_description = pushed_service["service_description"] + service_key = service_description["key"] + service_version = service_description["version"] + service_port = pushed_service["internal_port"] + service_entry_point = pushed_service["entry_point"] + service_uuid = str(uuid.uuid1()) + service_basepath = "/my/base/path" + with pytest.raises(ServiceUUIDNotFoundError): + await producer.get_service_details(app, service_uuid) + # start the service + started_service = await producer.start_service( + app, + f"{user_id}", + f"{project_id}", + service_key, + service_version, + service_uuid, + service_basepath, + "", + ) + assert "published_port" in started_service + if service_description["type"] == "dynamic": + assert not started_service["published_port"] + assert "entry_point" in started_service + assert started_service["entry_point"] == service_entry_point + assert "service_uuid" in started_service + assert started_service["service_uuid"] == service_uuid + assert "service_key" in started_service + assert started_service["service_key"] == service_key + assert "service_version" in started_service + assert started_service["service_version"] == service_version + assert "service_port" in started_service + assert started_service["service_port"] == service_port + assert "service_host" in started_service + assert service_uuid in started_service["service_host"] + assert "service_basepath" in started_service + assert started_service["service_basepath"] == service_basepath + assert "service_state" in started_service + assert "service_message" in started_service + + # wait for service to be running + node_details = await producer.get_service_details(app, service_uuid) + max_time = 60 + for attempt in Retrying( + wait=wait_fixed(1), stop=stop_after_delay(max_time), reraise=True + ): + with attempt: + print( + f"--> waiting for {started_service['service_key']}:{started_service['service_version']} to run..." + ) + node_details = await producer.get_service_details(app, service_uuid) + print( + f"<-- {started_service['service_key']}:{started_service['service_version']} state is {node_details['service_state']} using {app_settings.DIRECTOR_DEFAULT_MAX_MEMORY}Bytes, {app_settings.DIRECTOR_DEFAULT_MAX_NANO_CPUS}nanocpus" + ) + for service in docker_client.services.list(): + tasks = service.tasks() + print( + f"service details {service.id}:{service.name}: {json.dumps( tasks, indent=2)}" + ) + assert ( + node_details["service_state"] == "running" + ), f"current state is {node_details['service_state']}" + + started_service["service_state"] = node_details["service_state"] + started_service["service_message"] = node_details["service_message"] + assert node_details == started_service + started_services.append(started_service) + return started_services + + yield push_start_services + # teardown stop the services + for service in started_services: + service_uuid = service["service_uuid"] + # NOTE: Fake services are not even web-services therefore we cannot + # even emulate a legacy dy-service that does not implement a save-state feature + # so here we must make save_state=False + await producer.stop_service(app, node_uuid=service_uuid, save_state=False) + with pytest.raises(ServiceUUIDNotFoundError): + await producer.get_service_details(app, service_uuid) + + +async def test_find_service_tag(): + my_service_key = "myservice-key" + list_of_images = { + my_service_key: [ + "2.4.0", + "2.11.0", + "2.8.0", + "1.2.1", + "some wrong value", + "latest", + "1.2.0", + "1.2.3", + ] + } + with pytest.raises(ServiceNotAvailableError): + await producer._find_service_tag( # noqa: SLF001 + list_of_images, "some_wrong_key", None + ) + with pytest.raises(ServiceNotAvailableError): + await producer._find_service_tag( # noqa: SLF001 + list_of_images, my_service_key, "some wrong key" + ) + # get the latest (e.g. 2.11.0) + latest_version = await producer._find_service_tag( # noqa: SLF001 + list_of_images, my_service_key, None + ) + assert latest_version == "2.11.0" + latest_version = await producer._find_service_tag( # noqa: SLF001 + list_of_images, my_service_key, "latest" + ) + assert latest_version == "2.11.0" + # get a specific version + await producer._find_service_tag( # noqa: SLF001 + list_of_images, my_service_key, "1.2.3" + ) + + +async def test_start_stop_service( + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + run_services: Callable[..., Awaitable[list[dict[str, Any]]]], +): + # standard test + await run_services(number_comp=1, number_dyn=1) + + +async def test_service_assigned_env_variables( + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + run_services: Callable[..., Awaitable[list[dict[str, Any]]]], + user_id: UserID, + project_id: ProjectID, +): + started_services = await run_services(number_comp=1, number_dyn=1) + client = docker.from_env() + for service in started_services: + service_uuid = service["service_uuid"] + list_of_services = client.services.list( + filters={"label": f"io.simcore.runtime.node-id={service_uuid}"} + ) + assert len(list_of_services) == 1 + docker_service = list_of_services[0] + # check env + docker_tasks = docker_service.tasks() + assert len(docker_tasks) > 0 + task = docker_tasks[0] + envs_list = task["Spec"]["ContainerSpec"]["Env"] + envs_dict = dict(x.split("=") for x in envs_list) + + assert "POSTGRES_ENDPOINT" in envs_dict + assert "POSTGRES_USER" in envs_dict + assert "POSTGRES_PASSWORD" in envs_dict + assert "POSTGRES_DB" in envs_dict + assert "STORAGE_ENDPOINT" in envs_dict + + assert "SIMCORE_USER_ID" in envs_dict + assert envs_dict["SIMCORE_USER_ID"] == f"{user_id}" + assert "SIMCORE_NODE_UUID" in envs_dict + assert envs_dict["SIMCORE_NODE_UUID"] == service_uuid + assert "SIMCORE_PROJECT_ID" in envs_dict + assert envs_dict["SIMCORE_PROJECT_ID"] == f"{project_id}" + assert "SIMCORE_NODE_BASEPATH" in envs_dict + assert envs_dict["SIMCORE_NODE_BASEPATH"] == service["service_basepath"] + assert "SIMCORE_HOST_NAME" in envs_dict + assert envs_dict["SIMCORE_HOST_NAME"] == docker_service.name + + assert MEM_RESOURCE_LIMIT_KEY in envs_dict + assert CPU_RESOURCE_LIMIT_KEY in envs_dict + + +async def test_interactive_service_published_port( + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + run_services, +): + running_dynamic_services = await run_services(number_comp=0, number_dyn=1) + assert len(running_dynamic_services) == 1 + + service = running_dynamic_services[0] + assert "published_port" in service + + service_port = service["published_port"] + # ports are not published anymore in production mode + assert not service_port + + client = docker.from_env() + service_uuid = service["service_uuid"] + list_of_services = client.services.list( + filters={"label": f"io.simcore.runtime.node-id={service_uuid}"} + ) + assert len(list_of_services) == 1 + + docker_service = list_of_services[0] + # no port open to the outside + assert not docker_service.attrs["Endpoint"]["Spec"] + # service is started with dnsrr (round-robin) mode + assert docker_service.attrs["Spec"]["EndpointSpec"]["Mode"] == "dnsrr" + + +async def test_interactive_service_in_correct_network( + configure_registry_access: EnvVarsDict, + with_docker_network: dict[str, Any], + configured_docker_network: EnvVarsDict, + run_services, +): + running_dynamic_services = await run_services( + number_comp=0, number_dyn=2, dependant=False + ) + assert len(running_dynamic_services) == 2 + for service in running_dynamic_services: + client = docker.from_env() + service_uuid = service["service_uuid"] + list_of_services = client.services.list( + filters={"label": f"io.simcore.runtime.node-id={service_uuid}"} + ) + assert list_of_services + assert len(list_of_services) == 1 + docker_service = list_of_services[0] + assert ( + docker_service.attrs["Spec"]["Networks"][0]["Target"] + == with_docker_network["Id"] + ) + + +async def test_dependent_services_have_common_network( + configure_registry_access: EnvVarsDict, + configured_docker_network: EnvVarsDict, + run_services, +): + running_dynamic_services = await run_services( + number_comp=0, number_dyn=2, dependant=True + ) + assert len(running_dynamic_services) == 2 + + for service in running_dynamic_services: + client = docker.from_env() + service_uuid = service["service_uuid"] + list_of_services = client.services.list( + filters={"label": f"io.simcore.runtime.node-id={service_uuid}"} + ) + # there is one dependency per service + assert len(list_of_services) == 2 + # check they have same network + assert ( + list_of_services[0].attrs["Spec"]["Networks"][0]["Target"] + == list_of_services[1].attrs["Spec"]["Networks"][0]["Target"] + ) + + +@dataclass +class FakeDockerService: + service_str: str + expected_key: str + expected_tag: str + + +@pytest.fixture +def registry_settings(app_settings: ApplicationSettings) -> RegistrySettings: + return app_settings.DIRECTOR_REGISTRY + + +@pytest.mark.parametrize( + "fake_service", + [ + FakeDockerService( + "/simcore/services/dynamic/some/sub/folder/my_service-key:123.456.3214", + "simcore/services/dynamic/some/sub/folder/my_service-key", + "123.456.3214", + ), + FakeDockerService( + "/simcore/services/dynamic/some/sub/folder/my_service-key:123.456.3214@sha256:2aef165ab4f30fbb109e88959271d8b57489790ea13a77d27c02d8adb8feb20f", + "simcore/services/dynamic/some/sub/folder/my_service-key", + "123.456.3214", + ), + ], +) +async def test_get_service_key_version_from_docker_service( + configure_registry_access: EnvVarsDict, + registry_settings: RegistrySettings, + fake_service: FakeDockerService, +): + docker_service_partial_inspect = { + "Spec": { + "TaskTemplate": { + "ContainerSpec": { + "Image": f"{registry_settings.resolved_registry_url}{fake_service.service_str}" + } + } + } + } + ( + service_key, + service_tag, + ) = await producer._get_service_key_version_from_docker_service( # noqa: SLF001 + docker_service_partial_inspect, registry_settings + ) + assert service_key == fake_service.expected_key + assert service_tag == fake_service.expected_tag + + +@pytest.mark.parametrize( + "fake_service_str", + [ + "postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc", + "/simcore/postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc", + "itisfoundation/postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc", + "/simcore/services/stuff/postgres:10.11", + ], +) +async def test_get_service_key_version_from_docker_service_except_invalid_keys( + configure_registry_access: EnvVarsDict, + registry_settings: RegistrySettings, + fake_service_str: str, +): + docker_service_partial_inspect = { + "Spec": { + "TaskTemplate": { + "ContainerSpec": { + "Image": f"{registry_settings.resolved_registry_url if fake_service_str.startswith('/') else ''}{fake_service_str}" + } + } + } + } + with pytest.raises(DirectorRuntimeError): + await producer._get_service_key_version_from_docker_service( # noqa: SLF001 + docker_service_partial_inspect, registry_settings + ) diff --git a/services/director/tests/unit/test_registry_proxy.py b/services/director/tests/unit/test_registry_proxy.py new file mode 100644 index 00000000000..c15ccd7df7f --- /dev/null +++ b/services/director/tests/unit/test_registry_proxy.py @@ -0,0 +1,301 @@ +# pylint: disable=W0613, W0621 +# pylint: disable=unused-variable + +import asyncio +import json +import time +from unittest import mock + +import pytest +from fastapi import FastAPI +from pytest_benchmark.plugin import BenchmarkFixture +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.docker_registry import RegistrySettings +from simcore_service_director import registry_proxy +from simcore_service_director.core.settings import ApplicationSettings + + +async def test_list_no_services_available( + configure_registry_access: EnvVarsDict, + app: FastAPI, +): + + computational_services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.COMPUTATIONAL + ) + assert not computational_services # it's empty + interactive_services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.DYNAMIC + ) + assert not interactive_services + all_services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.ALL + ) + assert not all_services + + +async def test_list_computational_services( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + await push_services( + number_of_computational_services=6, number_of_interactive_services=3 + ) + + computational_services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.COMPUTATIONAL + ) + assert len(computational_services) == 6 + + +async def test_list_interactive_services( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + await push_services( + number_of_computational_services=5, number_of_interactive_services=4 + ) + interactive_services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.DYNAMIC + ) + assert len(interactive_services) == 4 + + +async def test_list_of_image_tags( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + images = await push_services( + number_of_computational_services=5, number_of_interactive_services=3 + ) + image_number = {} + for image in images: + service_description = image["service_description"] + key = service_description["key"] + if key not in image_number: + image_number[key] = 0 + image_number[key] = image_number[key] + 1 + + for key, number in image_number.items(): + list_of_image_tags = await registry_proxy.list_image_tags(app, key) + assert len(list_of_image_tags) == number + + +async def test_list_interactive_service_dependencies( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + images = await push_services( + number_of_computational_services=2, + number_of_interactive_services=2, + inter_dependent_services=True, + ) + for image in images: + service_description = image["service_description"] + docker_labels = image["docker_labels"] + if "simcore.service.dependencies" in docker_labels: + docker_dependencies = json.loads( + docker_labels["simcore.service.dependencies"] + ) + image_dependencies = ( + await registry_proxy.list_interactive_service_dependencies( + app, + service_description["key"], + service_description["version"], + ) + ) + assert isinstance(image_dependencies, list) + assert len(image_dependencies) == len(docker_dependencies) + assert image_dependencies[0]["key"] == docker_dependencies[0]["key"] + assert image_dependencies[0]["tag"] == docker_dependencies[0]["tag"] + + +async def test_get_image_labels( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + images = await push_services( + number_of_computational_services=1, number_of_interactive_services=1 + ) + images_digests = set() + for image in images: + service_description = image["service_description"] + labels, image_manifest_digest = await registry_proxy.get_image_labels( + app, service_description["key"], service_description["version"] + ) + assert "io.simcore.key" in labels + assert "io.simcore.version" in labels + assert "io.simcore.type" in labels + assert "io.simcore.name" in labels + assert "io.simcore.description" in labels + assert "io.simcore.authors" in labels + assert "io.simcore.contact" in labels + assert "io.simcore.inputs" in labels + assert "io.simcore.outputs" in labels + if service_description["type"] == "dynamic": + # dynamic services have this additional flag + assert "simcore.service.settings" in labels + + assert image_manifest_digest == await registry_proxy.get_image_digest( + app, service_description["key"], service_description["version"] + ) + assert image_manifest_digest is not None + assert image_manifest_digest not in images_digests + images_digests.add(image_manifest_digest) + + +def test_get_service_first_name(): + repo = "simcore/services/dynamic/myservice/modeler/my-sub-modeler" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/dynamic/myservice/modeler" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/dynamic/myservice" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/comp/myservice" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/comp/myservice/modeler" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/comp/myservice/modeler/blahblahblah" + assert registry_proxy.get_service_first_name(repo) == "myservice" + repo = "simcore/services/comp" + assert registry_proxy.get_service_first_name(repo) == "invalid service" + + repo = "services/myservice/modeler/my-sub-modeler" + assert registry_proxy.get_service_first_name(repo) == "invalid service" + + +def test_get_service_last_namess(): + repo = "simcore/services/dynamic/myservice/modeler/my-sub-modeler" + assert ( + registry_proxy.get_service_last_names(repo) + == "myservice_modeler_my-sub-modeler" + ) + repo = "simcore/services/dynamic/myservice/modeler" + assert registry_proxy.get_service_last_names(repo) == "myservice_modeler" + repo = "simcore/services/dynamic/myservice" + assert registry_proxy.get_service_last_names(repo) == "myservice" + repo = "simcore/services/dynamic" + assert registry_proxy.get_service_last_names(repo) == "invalid service" + repo = "simcore/services/comp/myservice/modeler" + assert registry_proxy.get_service_last_names(repo) == "myservice_modeler" + repo = "services/dynamic/modeler" + assert registry_proxy.get_service_last_names(repo) == "invalid service" + + +async def test_get_image_details( + configure_registry_access: EnvVarsDict, + app: FastAPI, + push_services, +): + images = await push_services( + number_of_computational_services=1, number_of_interactive_services=1 + ) + for image in images: + service_description = image["service_description"] + details = await registry_proxy.get_image_details( + app, service_description["key"], service_description["version"] + ) + + assert details.pop("image_digest").startswith("sha") + + assert details == service_description + + +async def test_list_services( + configure_registry_access: EnvVarsDict, + configure_number_concurrency_calls: EnvVarsDict, + app: FastAPI, + push_services, +): + await push_services( + number_of_computational_services=21, number_of_interactive_services=21 + ) + services = await registry_proxy.list_services(app, registry_proxy.ServiceType.ALL) + assert len(services) == 42 + + +@pytest.fixture +def configure_registry_caching( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, {"DIRECTOR_REGISTRY_CACHING": True} + ) + + +@pytest.fixture +def with_disabled_auto_caching(mocker: MockerFixture) -> mock.Mock: + return mocker.patch( + "simcore_service_director.registry_proxy._list_all_services_task", autospec=True + ) + + +async def test_registry_caching( + configure_registry_access: EnvVarsDict, + configure_registry_caching: EnvVarsDict, + with_disabled_auto_caching: mock.Mock, + app_settings: ApplicationSettings, + app: FastAPI, + push_services, +): + images = await push_services( + number_of_computational_services=201, number_of_interactive_services=201 + ) + assert app_settings.DIRECTOR_REGISTRY_CACHING is True + + start_time = time.perf_counter() + services = await registry_proxy.list_services(app, registry_proxy.ServiceType.ALL) + time_to_retrieve_without_cache = time.perf_counter() - start_time + assert len(services) == len(images) + start_time = time.perf_counter() + services = await registry_proxy.list_services(app, registry_proxy.ServiceType.ALL) + time_to_retrieve_with_cache = time.perf_counter() - start_time + assert len(services) == len(images) + assert time_to_retrieve_with_cache < time_to_retrieve_without_cache + print("time to retrieve services without cache: ", time_to_retrieve_without_cache) + print("time to retrieve services with cache: ", time_to_retrieve_with_cache) + + +@pytest.fixture +def configure_number_concurrency_calls( + app_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + return app_environment | setenvs_from_dict( + monkeypatch, + envs={ + "DIRECTOR_REGISTRY_CLIENT_MAX_CONCURRENT_CALLS": "50", + "DIRECTOR_REGISTRY_CLIENT_MAX_NUMBER_OF_RETRIEVED_OBJECTS": "50", + }, + ) + + +def test_list_services_performance( + skip_if_external_envfile_dict: None, + configure_external_registry_access: EnvVarsDict, + configure_number_concurrency_calls: EnvVarsDict, + registry_settings: RegistrySettings, + app: FastAPI, + benchmark: BenchmarkFixture, +): + async def _list_services(): + start_time = time.perf_counter() + services = await registry_proxy.list_services( + app, registry_proxy.ServiceType.ALL + ) + stop_time = time.perf_counter() + print( + f"\nTime to list services: {stop_time - start_time:.3}s, {len(services)} services in {registry_settings.resolved_registry_url}, rate: {(stop_time - start_time) / len(services or [1]):.3}s/service" + ) + + def run_async_test() -> None: + asyncio.get_event_loop().run_until_complete(_list_services()) + + benchmark.pedantic(run_async_test, rounds=5) diff --git a/services/docker-api-proxy/Dockerfile b/services/docker-api-proxy/Dockerfile new file mode 100644 index 00000000000..e179269ae89 --- /dev/null +++ b/services/docker-api-proxy/Dockerfile @@ -0,0 +1,45 @@ +FROM caddy:2.10.0-alpine AS base + +LABEL maintainer=GitHK + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN addgroup -g ${SC_USER_ID} ${SC_USER_NAME} && \ + adduser -u ${SC_USER_ID} -G ${SC_USER_NAME} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + +RUN apk add --no-cache \ + curl \ + socat \ + su-exec + +# Health check to ensure the proxy is running +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD curl --fail-with-body --user ${DOCKER_API_PROXY_USER}:${DOCKER_API_PROXY_PASSWORD} http://localhost:8888/version + +COPY --chown=scu:scu services/docker-api-proxy/docker services/docker-api-proxy/docker +RUN chmod +x services/docker-api-proxy/docker/*.sh && \ + mv services/docker-api-proxy/docker/Caddyfile /etc/caddy/Caddyfile + +ENTRYPOINT [ "/bin/sh", "services/docker-api-proxy/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/docker-api-proxy/docker/boot.sh"] + +FROM base AS development +ENV SC_BUILD_TARGET=development + +FROM base AS production +ENV SC_BUILD_TARGET=production diff --git a/services/docker-api-proxy/Makefile b/services/docker-api-proxy/Makefile new file mode 100644 index 00000000000..82ebf1a73f3 --- /dev/null +++ b/services/docker-api-proxy/Makefile @@ -0,0 +1,2 @@ +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile diff --git a/services/docker-api-proxy/docker/Caddyfile b/services/docker-api-proxy/docker/Caddyfile new file mode 100644 index 00000000000..dbd450e0f0c --- /dev/null +++ b/services/docker-api-proxy/docker/Caddyfile @@ -0,0 +1,11 @@ +:8888 { + handle { + basicauth { + {$DOCKER_API_PROXY_USER} {$DOCKER_API_PROXY_ENCRYPTED_PASSWORD} + } + + reverse_proxy http://localhost:8889 { + health_uri /version + } + } +} diff --git a/services/docker-api-proxy/docker/boot.sh b/services/docker-api-proxy/docker/boot.sh new file mode 100755 index 00000000000..fac4bddcf10 --- /dev/null +++ b/services/docker-api-proxy/docker/boot.sh @@ -0,0 +1,18 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" + +# +# RUNNING application +# +socat TCP-LISTEN:8889,fork,reuseaddr UNIX-CONNECT:/var/run/docker.sock & + +DOCKER_API_PROXY_ENCRYPTED_PASSWORD=$(caddy hash-password --plaintext "$DOCKER_API_PROXY_PASSWORD") \ + caddy run --adapter caddyfile --config /etc/caddy/Caddyfile diff --git a/services/docker-api-proxy/docker/entrypoint.sh b/services/docker-api-proxy/docker/entrypoint.sh new file mode 100755 index 00000000000..d22786d7253 --- /dev/null +++ b/services/docker-api-proxy/docker/entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" + +# Appends docker group +DOCKER_MOUNT=/var/run/docker.sock +echo "INFO: adding user to group..." +GROUPID=$(stat -c %g $DOCKER_MOUNT) # Alpine uses `-c` instead of `--format` +GROUPNAME=scdocker + +# Check if a group with the specified GID exists +if ! addgroup -g "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "WARNING: docker group with GID $GROUPID already exists, getting group name..." + # Get the group name based on GID + GROUPNAME=$(getent group | awk -F: "\$3 == $GROUPID {print \$1}") + echo "WARNING: docker group with GID $GROUPID has name $GROUPNAME" +fi + +# Add the user to the group +adduser "$SC_USER_NAME" $GROUPNAME + +exec su-exec "$SC_USER_NAME" "$@" diff --git a/services/docker-api-proxy/requirements/Makefile b/services/docker-api-proxy/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/services/docker-api-proxy/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/services/docker-api-proxy/requirements/_base.in b/services/docker-api-proxy/requirements/_base.in new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/docker-api-proxy/requirements/_base.txt b/services/docker-api-proxy/requirements/_base.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/docker-api-proxy/requirements/_test.in b/services/docker-api-proxy/requirements/_test.in new file mode 100644 index 00000000000..321d2e72461 --- /dev/null +++ b/services/docker-api-proxy/requirements/_test.in @@ -0,0 +1,22 @@ +--constraint ../../../requirements/constraints.txt + +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_base.in + +aiodocker +arrow +asgi_lifespan +docker +faker +fastapi +fastapi-lifespan-manager +flaky +pytest +pytest-asyncio +pytest-cov +pytest-mock +python-dotenv +PyYAML +tenacity diff --git a/services/docker-api-proxy/requirements/_test.txt b/services/docker-api-proxy/requirements/_test.txt new file mode 100644 index 00000000000..a16e59f627c --- /dev/null +++ b/services/docker-api-proxy/requirements/_test.txt @@ -0,0 +1,455 @@ +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_test.in +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.4.6 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 + # via + # fast-depends + # faststream + # starlette +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_test.in +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==25.1.0 + # via + # aiohttp + # jsonschema + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via typer +coverage==7.6.12 + # via pytest-cov +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +docker==7.1.0 + # via -r requirements/_test.in +email-validator==2.2.0 + # via pydantic +exceptiongroup==1.2.2 + # via aio-pika +faker==36.1.1 + # via -r requirements/_test.in +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/_test.in + # fastapi-lifespan-manager +fastapi-lifespan-manager==0.1.4 + # via -r requirements/_test.in +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +flaky==3.8.1 + # via -r requirements/_test.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.68.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +idna==3.10 + # via + # anyio + # email-validator + # requests + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +iniconfig==2.0.0 + # via pytest +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp + # yarl +opentelemetry-api==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # opentelemetry-instrumentation + # pytest +pamqp==3.3.0 + # via aiormq +pluggy==1.5.0 + # via pytest +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-mock +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via + # -r requirements/_test.in + # pydantic-settings +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_test.in +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via + # docker + # opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # typer +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via + # anyio + # asgi-lifespan +starlette==0.46.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_test.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # anyio + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # typer +tzdata==2025.1 + # via faker +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # docker + # requests +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.18.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/services/docker-api-proxy/requirements/_tools.in b/services/docker-api-proxy/requirements/_tools.in new file mode 100644 index 00000000000..140b6ed2e30 --- /dev/null +++ b/services/docker-api-proxy/requirements/_tools.in @@ -0,0 +1,6 @@ +--constraint ../../../requirements/constraints.txt + +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/services/docker-api-proxy/requirements/_tools.txt b/services/docker-api-proxy/requirements/_tools.txt new file mode 100644 index 00000000000..24be1a5cfb8 --- /dev/null +++ b/services/docker-api-proxy/requirements/_tools.txt @@ -0,0 +1,80 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_test.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_test.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/services/docker-api-proxy/requirements/ci.txt b/services/docker-api-proxy/requirements/ci.txt new file mode 100644 index 00000000000..419f091d4d0 --- /dev/null +++ b/services/docker-api-proxy/requirements/ci.txt @@ -0,0 +1,18 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/api-server' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library/ +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ diff --git a/services/docker-api-proxy/requirements/dev.txt b/services/docker-api-proxy/requirements/dev.txt new file mode 100644 index 00000000000..57a8239ab79 --- /dev/null +++ b/services/docker-api-proxy/requirements/dev.txt @@ -0,0 +1,19 @@ +# Shortcut to install all packages needed to develop 'services/docker-api-proxy' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs this repo's packages +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library +--editable ../../packages/settings-library + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt diff --git a/services/docker-api-proxy/tests/integration/conftest.py b/services/docker-api-proxy/tests/integration/conftest.py new file mode 100644 index 00000000000..10878c70d57 --- /dev/null +++ b/services/docker-api-proxy/tests/integration/conftest.py @@ -0,0 +1,77 @@ +# pylint:disable=unrecognized-options + +from collections.abc import AsyncIterator, Callable +from contextlib import AbstractAsyncContextManager, asynccontextmanager +from typing import Annotated + +import aiodocker +import pytest +from asgi_lifespan import LifespanManager as ASGILifespanManager +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from pydantic import Field +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.fastapi.docker import ( + create_remote_docker_client_input_state, + get_remote_docker_client, + remote_docker_client_lifespan, +) +from settings_library.application import BaseApplicationSettings +from settings_library.docker_api_proxy import DockerApiProxysettings + +pytest_plugins = [ + "pytest_simcore.docker_api_proxy", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.repository_paths", + "pytest_simcore.simcore_services", +] + + +def pytest_configure(config): + # Set asyncio_mode to "auto" + config.option.asyncio_mode = "auto" + + +class ApplicationSetting(BaseApplicationSettings): + DOCKER_API_PROXY: Annotated[ + DockerApiProxysettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + +async def _settings_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSetting = app.state.settings + + yield { + **create_remote_docker_client_input_state(settings.DOCKER_API_PROXY), + } + + +def _get_test_app() -> FastAPI: + settings = ApplicationSetting.create_from_envs() + + lifespan_manager = LifespanManager() + lifespan_manager.add(_settings_lifespan) + lifespan_manager.add(remote_docker_client_lifespan) + + app = FastAPI(lifespan=lifespan_manager) + app.state.settings = settings + + return app + + +@pytest.fixture +async def setup_docker_client( + monkeypatch: pytest.MonkeyPatch, +) -> Callable[[EnvVarsDict], AbstractAsyncContextManager[aiodocker.Docker]]: + @asynccontextmanager + async def _(env_vars: EnvVarsDict) -> AsyncIterator[aiodocker.Docker]: + setenvs_from_dict(monkeypatch, env_vars) + + app = _get_test_app() + + async with ASGILifespanManager(app, startup_timeout=30, shutdown_timeout=30): + yield get_remote_docker_client(app) + + return _ diff --git a/services/docker-api-proxy/tests/integration/test_docker_api_proxy.py b/services/docker-api-proxy/tests/integration/test_docker_api_proxy.py new file mode 100644 index 00000000000..4456a1d03d4 --- /dev/null +++ b/services/docker-api-proxy/tests/integration/test_docker_api_proxy.py @@ -0,0 +1,61 @@ +# pylint: disable=unused-argument + +import json +from collections.abc import Callable +from contextlib import AbstractAsyncContextManager + +import aiodocker +import pytest +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from servicelib.aiohttp import status +from settings_library.docker_api_proxy import DockerApiProxysettings + +pytest_simcore_core_services_selection = [ + "docker-api-proxy", +] + + +async def test_authenticated_docker_client( + docker_swarm: None, + docker_api_proxy_settings: DockerApiProxysettings, + setup_docker_client: Callable[ + [EnvVarsDict], AbstractAsyncContextManager[aiodocker.Docker] + ], +): + envs = { + "DOCKER_API_PROXY_HOST": "127.0.0.1", + "DOCKER_API_PROXY_PORT": "8014", + "DOCKER_API_PROXY_USER": docker_api_proxy_settings.DOCKER_API_PROXY_USER, + "DOCKER_API_PROXY_PASSWORD": docker_api_proxy_settings.DOCKER_API_PROXY_PASSWORD.get_secret_value(), + } + async with setup_docker_client(envs) as working_docker: + info = await working_docker.system.info() + print(json.dumps(info, indent=2)) + + +@pytest.mark.parametrize( + "user, password", + [ + ("wrong", "wrong"), + ("wrong", "admin"), + ], +) +async def test_unauthenticated_docker_client( + docker_swarm: None, + docker_api_proxy_settings: DockerApiProxysettings, + setup_docker_client: Callable[ + [EnvVarsDict], AbstractAsyncContextManager[aiodocker.Docker] + ], + user: str, + password: str, +): + envs = { + "DOCKER_API_PROXY_HOST": "127.0.0.1", + "DOCKER_API_PROXY_PORT": "8014", + "DOCKER_API_PROXY_USER": user, + "DOCKER_API_PROXY_PASSWORD": password, + } + async with setup_docker_client(envs) as working_docker: + with pytest.raises(aiodocker.exceptions.DockerError) as exc: + await working_docker.system.info() + assert exc.value.status == status.HTTP_401_UNAUTHORIZED diff --git a/services/docker-bake.hcl b/services/docker-bake.hcl deleted file mode 100644 index c11de1c6834..00000000000 --- a/services/docker-bake.hcl +++ /dev/null @@ -1,21 +0,0 @@ -variable "DOCKER_REGISTRY" { - default = "itisfoundation" -} - -variable "DASK_SIDECAR_VERSION" { - default = "latest" -} - -target "dask-sidecar" { - tags = ["${DOCKER_REGISTRY}/dask-sidecar:latest","${DOCKER_REGISTRY}/dask-sidecar:${DASK_SIDECAR_VERSION}"] - output = ["type=registry"] -} - -variable "OSPARC_GATEWAY_SERVER_VERSION" { - default = "latest" -} - -target "osparc-gateway-server" { - tags = ["${DOCKER_REGISTRY}/osparc-gateway-server:latest","${DOCKER_REGISTRY}/osparc-gateway-server:${OSPARC_GATEWAY_SERVER_VERSION}"] - output = ["type=registry"] -} diff --git a/services/docker-compose-build.yml b/services/docker-compose-build.yml index de80865e0fb..717e40645fc 100644 --- a/services/docker-compose-build.yml +++ b/services/docker-compose-build.yml @@ -9,7 +9,6 @@ # # NOTE: the dask-scheduler uses the same image as the dask-sidecar. there is no need to build it twice! # -version: "3.8" services: service-integration: image: local/service-integration:${BUILD_TARGET:?build_target_required} @@ -42,9 +41,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${API_SERVER_API_VERSION}" autoscaling: @@ -60,9 +59,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${AUTOSCALING_API_VERSION}" catalog: @@ -78,11 +77,27 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${CATALOG_API_VERSION}" + clusters-keeper: + image: local/clusters-keeper:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/clusters-keeper/Dockerfile + cache_from: + - local/clusters-keeper:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/clusters-keeper:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/clusters-keeper:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/clusters-keeper:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + director: image: local/director:${BUILD_TARGET:?build_target_required} build: @@ -96,9 +111,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${DIRECTOR_API_VERSION}" dask-sidecar: @@ -115,9 +130,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" director-v2: image: local/director-v2:${BUILD_TARGET:?build_target_required} @@ -132,9 +147,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${DIRECTOR_V2_API_VERSION}" migration: @@ -150,20 +165,38 @@ services: target: production labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + + notifications: + image: local/notifications:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/notifications/Dockerfile + cache_from: + - local/notifications:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/notifications:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/notifications:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/notifications:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.label-schema.schema-version: "1.0" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + io.osparc.api-version: "${NOTIFICATIONS_API_VERSION}" - osparc-gateway-server: - image: local/osparc-gateway-server:${BUILD_TARGET:?build_target_required} + resource-usage-tracker: + image: local/resource-usage-tracker:${BUILD_TARGET:?build_target_required} build: context: ../ - dockerfile: services/osparc-gateway-server/Dockerfile + dockerfile: services/resource-usage-tracker/Dockerfile cache_from: - - local/osparc-gateway-server:${BUILD_TARGET:?build_target_required} - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:master-github-latest - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:staging-github-latest - - ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:release-github-latest + - local/resource-usage-tracker:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/resource-usage-tracker:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/resource-usage-tracker:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/resource-usage-tracker:release-github-latest target: ${BUILD_TARGET:?build_target_required} labels: org.opencontainers.image.created: "${BUILD_DATE}" @@ -189,9 +222,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" webserver: image: local/webserver:${BUILD_TARGET:?build_target_required} @@ -210,9 +243,9 @@ services: - VCS_REF=${VCS_REF} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${WEBSERVER_API_VERSION}" dynamic-sidecar: @@ -228,9 +261,25 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + + efs-guardian: + image: local/efs-guardian:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/efs-guardian/Dockerfile + cache_from: + - local/efs-guardian:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/efs-guardian:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/efs-guardian:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/efs-guardian:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" invitations: image: local/invitations:${BUILD_TARGET:?build_target_required} @@ -245,11 +294,62 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${INVITATIONS_API_VERSION}" + + payments: + image: local/payments:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/payments/Dockerfile + cache_from: + - local/payments:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/payments:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/payments:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/payments:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + io.osparc.api-version: "${PAYMENTS_API_VERSION}" + + dynamic-scheduler: + image: local/dynamic-scheduler:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/dynamic-scheduler/Dockerfile + cache_from: + - local/dynamic-scheduler:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/dynamic-scheduler:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/dynamic-scheduler:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/dynamic-scheduler:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + io.osparc.api-version: "${DYNAMIC_SCHEDULER_API_VERSION}" + + docker-api-proxy: + image: local/docker-api-proxy:${BUILD_TARGET:?build_target_required} + build: + context: ../ + dockerfile: services/docker-api-proxy/Dockerfile + cache_from: + - local/docker-api-proxy:${BUILD_TARGET:?build_target_required} + - ${DOCKER_REGISTRY:-itisfoundation}/docker-api-proxy:master-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/docker-api-proxy:staging-github-latest + - ${DOCKER_REGISTRY:-itisfoundation}/docker-api-proxy:release-github-latest + target: ${BUILD_TARGET:?build_target_required} + labels: + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" + datcore-adapter: image: local/datcore-adapter:${BUILD_TARGET:?build_target_required} build: @@ -263,9 +363,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${DATCORE_ADAPTER_API_VERSION}" storage: @@ -281,9 +381,9 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${STORAGE_API_VERSION}" agent: @@ -305,7 +405,7 @@ services: target: ${BUILD_TARGET:?build_target_required} labels: org.label-schema.schema-version: "1.0" - org.label-schema.build-date: "${BUILD_DATE}" - org.label-schema.vcs-url: "${VCS_URL}" - org.label-schema.vcs-ref: "${VCS_REF}" + org.opencontainers.image.created: "${BUILD_DATE}" + org.opencontainers.image.source: "${VCS_URL}" + org.opencontainers.image.revision: "${VCS_REF}" io.osparc.api-version: "${AGENT_API_VERSION}" diff --git a/services/docker-compose-deploy.yml b/services/docker-compose-deploy.yml index 45a3d2e31b9..e6c21da36db 100644 --- a/services/docker-compose-deploy.yml +++ b/services/docker-compose-deploy.yml @@ -1,49 +1,45 @@ -version: "3.8" services: + agent: + image: ${DOCKER_REGISTRY:-itisfoundation}/agent:${DOCKER_IMAGE_TAG:-latest} api-server: image: ${DOCKER_REGISTRY:-itisfoundation}/api-server:${DOCKER_IMAGE_TAG:-latest} - autoscaling: image: ${DOCKER_REGISTRY:-itisfoundation}/autoscaling:${DOCKER_IMAGE_TAG:-latest} - catalog: image: ${DOCKER_REGISTRY:-itisfoundation}/catalog:${DOCKER_IMAGE_TAG:-latest} - - director: - image: ${DOCKER_REGISTRY:-itisfoundation}/director:${DOCKER_IMAGE_TAG:-latest} - - director-v2: - image: ${DOCKER_REGISTRY:-itisfoundation}/director-v2:${DOCKER_IMAGE_TAG:-latest} - - webserver: - image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} - + clusters-keeper: + image: ${DOCKER_REGISTRY:-itisfoundation}/clusters-keeper:${DOCKER_IMAGE_TAG:-latest} dask-sidecar: image: ${DOCKER_REGISTRY:-itisfoundation}/dask-sidecar:${DOCKER_IMAGE_TAG:-latest} - datcore-adapter: image: ${DOCKER_REGISTRY:-itisfoundation}/datcore-adapter:${DOCKER_IMAGE_TAG:-latest} - - storage: - image: ${DOCKER_REGISTRY:-itisfoundation}/storage:${DOCKER_IMAGE_TAG:-latest} - + director: + image: ${DOCKER_REGISTRY:-itisfoundation}/director:${DOCKER_IMAGE_TAG:-latest} + director-v2: + image: ${DOCKER_REGISTRY:-itisfoundation}/director-v2:${DOCKER_IMAGE_TAG:-latest} + dynamic-sidecar: + image: ${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} + efs-guardian: + image: ${DOCKER_REGISTRY:-itisfoundation}/efs-guardian:${DOCKER_IMAGE_TAG:-latest} + invitations: + image: ${DOCKER_REGISTRY:-itisfoundation}/invitations:${DOCKER_IMAGE_TAG:-latest} migration: image: ${DOCKER_REGISTRY:-itisfoundation}/migration:${DOCKER_IMAGE_TAG:-latest} - - osparc-gateway-server: - image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest} - + notifications: + image: ${DOCKER_REGISTRY:-itisfoundation}/notifications:${DOCKER_IMAGE_TAG:-latest} + payments: + image: ${DOCKER_REGISTRY:-itisfoundation}/payments:${DOCKER_IMAGE_TAG:-latest} + dynamic-scheduler: + image: ${DOCKER_REGISTRY:-itisfoundation}/dynamic-scheduler:${DOCKER_IMAGE_TAG:-latest} + docker-api-proxy: + image: ${DOCKER_REGISTRY:-itisfoundation}/docker-api-proxy:${DOCKER_IMAGE_TAG:-latest} + resource-usage-tracker: + image: ${DOCKER_REGISTRY:-itisfoundation}/resource-usage-tracker:${DOCKER_IMAGE_TAG:-latest} service-integration: image: ${DOCKER_REGISTRY:-itisfoundation}/service-integration:${DOCKER_IMAGE_TAG:-latest} - - invitations: - image: ${DOCKER_REGISTRY:-itisfoundation}/invitations:${DOCKER_IMAGE_TAG:-latest} - static-webserver: image: ${DOCKER_REGISTRY:-itisfoundation}/static-webserver:${DOCKER_IMAGE_TAG:-latest} - - dynamic-sidecar: - image: ${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} - - agent: - image: ${DOCKER_REGISTRY:-itisfoundation}/agent:${DOCKER_IMAGE_TAG:-latest} + storage: + image: ${DOCKER_REGISTRY:-itisfoundation}/storage:${DOCKER_IMAGE_TAG:-latest} + webserver: + image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} diff --git a/services/docker-compose-dev-vendors.yml b/services/docker-compose-dev-vendors.yml new file mode 100644 index 00000000000..2c885c0ea95 --- /dev/null +++ b/services/docker-compose-dev-vendors.yml @@ -0,0 +1,35 @@ + +# NOTE: this stack is only for development and testing of vendor services. +# the actualy code is deployed inside the ops repository. + +services: + + manual: + image: ${VENDOR_DEV_MANUAL_IMAGE} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + deploy: + replicas: ${VENDOR_DEV_MANUAL_REPLICAS} + labels: + - io.simcore.zone=${TRAEFIK_SIMCORE_ZONE} + - traefik.enable=true + - traefik.swarm.network=${SWARM_STACK_NAME}_default + # auth: https://doc.traefik.io/traefik/middlewares/http/forwardauth + - traefik.http.middlewares.${SWARM_STACK_NAME}_manual-auth.forwardauth.address=http://${WEBSERVER_HOST}:${WEBSERVER_PORT}/v0/auth:check + - traefik.http.middlewares.${SWARM_STACK_NAME}_manual-auth.forwardauth.trustForwardHeader=true + - traefik.http.middlewares.${SWARM_STACK_NAME}_manual-auth.forwardauth.authResponseHeaders=Set-Cookie,osparc-sc2 + # routing + - traefik.http.services.${SWARM_STACK_NAME}_manual.loadbalancer.server.port=80 + - traefik.http.services.${SWARM_STACK_NAME}_manual.loadbalancer.healthcheck.path=/ + - traefik.http.services.${SWARM_STACK_NAME}_manual.loadbalancer.healthcheck.interval=2000ms + - traefik.http.services.${SWARM_STACK_NAME}_manual.loadbalancer.healthcheck.timeout=1000ms + - traefik.http.routers.${SWARM_STACK_NAME}_manual.entrypoints=http + - traefik.http.routers.${SWARM_STACK_NAME}_manual.rule=HostRegexp(`${VENDOR_DEV_MANUAL_SUBDOMAIN}\.(?P.+)`) + - traefik.http.routers.${SWARM_STACK_NAME}_manual.middlewares=${SWARM_STACK_NAME}_gzip@swarm, ${SWARM_STACK_NAME}_manual-auth + networks: + - simcore_default + +networks: + simcore_default: + name: ${SWARM_STACK_NAME}_default + external: true diff --git a/services/docker-compose-ops-ci.yml b/services/docker-compose-ops-ci.yml new file mode 100644 index 00000000000..ec2b2923ff7 --- /dev/null +++ b/services/docker-compose-ops-ci.yml @@ -0,0 +1,58 @@ +# Minimal docker-compose configuration to deploy a stack that contains a selection +# of the services deployed with ITISFoundation/osparc-ops +# +# These configurations avoid having to deploy the entire +# ITISFoundation/osparc-ops to operate osparc-simcore stack during development +# +# By default, the **ops** stack is automatically deployed in all the make up-* targets but +# can be disabled passing the 'ops_disabled' flag +# +# $ make ops_disabled=1 up-devel +# $ make ops_disabled=1 up-prod +# $ make ops_disabled=1 up-vesioned +# $ make ops_disabled=1 up-latest +# +# Nonetheless, notice that minio is a service used from simcore stack. Therefore, disabling ops stack +# is meaningful ONLY when simcore stack is intended to run with the the actual stacks from osparc-ops +# +# NOTE: this stack cannot be called tools because it collides with default network created in services/static-webserver/client/tools/docker-compose.yml +# IMPORTANT: This stack IS NOT used in the deployed version + +services: + minio: + image: minio/minio:RELEASE.2025-04-22T22-12-26Z + init: true + environment: + MINIO_ACCESS_KEY : ${S3_ACCESS_KEY:?access_key_required} + MINIO_SECRET_KEY : ${S3_SECRET_KEY:?secret_key_required} + ports: + - "9001:9000" + command: server /data + volumes: + - minio_data:/data + networks: + - simcore_default + - interactive_services_subnet + healthcheck: + test: + [ + "CMD", + "curl", + "--fail", + "http://localhost:9000/minio/health/live" + ] + interval: 5s + timeout: 20s + retries: 5 + +volumes: + minio_data: + name: ops_minio_data + +networks: + simcore_default: + name: ${SWARM_STACK_NAME:-simcore}_default + external: true + interactive_services_subnet: + name: ${SWARM_STACK_NAME:-simcore}_interactive_services_subnet + external: true diff --git a/services/docker-compose-ops-registry.yml b/services/docker-compose-ops-registry.yml index cf290881a6d..ffc4b5d206c 100644 --- a/services/docker-compose-ops-registry.yml +++ b/services/docker-compose-ops-registry.yml @@ -8,7 +8,7 @@ services: container_name: registry init: true environment: - - REGISTRY_STORAGE_DELETE_ENABLED=true + REGISTRY_STORAGE_DELETE_ENABLED : true ports: - 5000:5000 volumes: diff --git a/services/docker-compose-ops.yml b/services/docker-compose-ops.yml index 1055d1ad323..3db7af6aed0 100644 --- a/services/docker-compose-ops.yml +++ b/services/docker-compose-ops.yml @@ -17,21 +17,31 @@ # # NOTE: this stack cannot be called tools because it collides with default network created in services/static-webserver/client/tools/docker-compose.yml # IMPORTANT: This stack IS NOT used in the deployed version -version: "3.8" services: adminer: - image: adminer:4.8.0 + image: adminer:4.8.1 init: true environment: - - ADMINER_DEFAULT_SERVER=postgres - - ADMINER_DESIGN=nette - - ADMINER_PLUGINS=json-column + ADMINER_DEFAULT_SERVER : postgres + ADMINER_DESIGN : nette + ADMINER_PLUGINS : json-column ports: - "18080:8080" networks: - simcore_default - + jaeger: + image: jaegertracing/all-in-one:1.47 + networks: + - simcore_default + ports: + - "16686:16686" # Jaeger UI + - "14268:14268" # Jaeger HTTP Thrift + - "14250:14250" # Jaeger gRPC + - "43017:4317" # opentelemetry GRPC default port + - "43018:4318" # opentelemetry HTTP default port + environment: + COLLECTOR_OTLP_ENABLED: "true" portainer: image: portainer/portainer-ce init: true @@ -43,14 +53,16 @@ services: - portainer_data:/data minio: - image: minio/minio:RELEASE.2020-05-16T01-33-21Z + image: minio/minio:RELEASE.2025-04-22T22-12-26Z init: true environment: - - MINIO_ACCESS_KEY=${S3_ACCESS_KEY:?access_key_required} - - MINIO_SECRET_KEY=${S3_SECRET_KEY:?secret_key_required} + MINIO_ROOT_USER : ${S3_ACCESS_KEY:?access_key_required} + MINIO_ROOT_PASSWORD : ${S3_SECRET_KEY:?secret_key_required} + MINIO_VOLUMES : /data ports: - "9001:9000" - command: server /data + - "9090:9090" + command: server --console-address ":9090" volumes: - minio_data:/data networks: @@ -60,39 +72,51 @@ services: test: [ "CMD", - "curl", - "--fail", - "http://localhost:9000/minio/health/live" + "mc", + "ready", + "local" ] interval: 5s - timeout: 20s + timeout: 10s retries: 5 - filestash: - image: machines/filestash:3a01b70 - ports: - - "9002:8334" - volumes: - - ${TMP_PATH_TO_FILESTASH_CONFIG}:/app/data/state/config/config.json - networks: - - simcore_default - - onlyoffice: - image: onlyoffice/documentserver:7.0.0.132 - networks: - - simcore_default - redis-commander: image: rediscommander/redis-commander:latest init: true environment: - - REDIS_HOSTS=resources:${REDIS_HOST}:${REDIS_PORT}:0,locks:${REDIS_HOST}:${REDIS_PORT}:1,validation_codes:${REDIS_HOST}:${REDIS_PORT}:2,scheduled_maintenance:${REDIS_HOST}:${REDIS_PORT}:3,user_notifications:${REDIS_HOST}:${REDIS_PORT}:4 + - >- + REDIS_HOSTS= + resources:${REDIS_HOST}:${REDIS_PORT}:0:${REDIS_PASSWORD}, + locks:${REDIS_HOST}:${REDIS_PORT}:1:${REDIS_PASSWORD}, + validation_codes:${REDIS_HOST}:${REDIS_PORT}:2:${REDIS_PASSWORD}, + scheduled_maintenance:${REDIS_HOST}:${REDIS_PORT}:3:${REDIS_PASSWORD}, + user_notifications:${REDIS_HOST}:${REDIS_PORT}:4:${REDIS_PASSWORD}, + announcements:${REDIS_HOST}:${REDIS_PORT}:5:${REDIS_PASSWORD}, + distributed_identifiers:${REDIS_HOST}:${REDIS_PORT}:6:${REDIS_PASSWORD}, + deferred_tasks:${REDIS_HOST}:${REDIS_PORT}:7:${REDIS_PASSWORD}, + dynamic_services:${REDIS_HOST}:${REDIS_PORT}:8:${REDIS_PASSWORD} + celery_tasks:${REDIS_HOST}:${REDIS_PORT}:9:${REDIS_PASSWORD} # If you add/remove a db, do not forget to update the --databases entry in the docker-compose.yml ports: - "18081:8081" networks: - simcore_default - + opentelemetry-collector: + image: otel/opentelemetry-collector-contrib:0.105.0 + volumes: + - ./opentelemetry-collector-config.yaml:/etc/otel/config.yaml + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + command: + - "--config=/etc/otel/config.yaml" + ports: + - "4318:4318" # OTLP HTTP receiver + networks: + - simcore_default + - interactive_services_subnet + environment: + TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE: ${TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE} + TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE: ${TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE} + TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT} volumes: minio_data: name: ops_minio_data @@ -101,8 +125,8 @@ volumes: networks: simcore_default: - name: ${SWARM_STACK_NAME:-simcore}_default + name: ${SWARM_STACK_NAME}_default external: true interactive_services_subnet: - name: ${SWARM_STACK_NAME:-simcore}_interactive_services_subnet + name: ${SWARM_STACK_NAME}_interactive_services_subnet external: true diff --git a/services/docker-compose.devel-frontend.yml b/services/docker-compose.devel-frontend.yml new file mode 100644 index 00000000000..bea76b28d7e --- /dev/null +++ b/services/docker-compose.devel-frontend.yml @@ -0,0 +1,9 @@ +# This gets used only after services/docker-compose.local.yml and overrides the definition of +# the static-webserver to be the only one running the dev image +services: + static-webserver: + image: ${DOCKER_REGISTRY:-itisfoundation}/static-webserver:development + volumes: + - ./static-webserver/client/source-output:/static-content + environment: + SERVER_LOG_LEVEL : info diff --git a/services/docker-compose.devel.yml b/services/docker-compose.devel.yml index 55b8a869963..2b994b99ffd 100644 --- a/services/docker-compose.devel.yml +++ b/services/docker-compose.devel.yml @@ -1,105 +1,184 @@ # Overrides docker-compose.yml config when deploying services in development mode, # i.e. binding with source code in source -# # NOTES: # - port 3000 used for ptsv # -version: "3.8" +x-common-environment: &common-environment + # Enforces *_DEBUG option in all services. ONLY allowed in devel-mode! + DEBUG : "true" + # Enforces app to boot debug mode (see docker/boot.sh). ONLY allowed in devel-mode! + SC_BOOT_MODE : debug + + services: api-server: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOG_LEVEL=DEBUG - - DEBUG=true + <<: *common-environment + API_SERVER_PROFILING : ${API_SERVER_PROFILING} + API_SERVER_LOGLEVEL: DEBUG volumes: - ./api-server:/devel/services/api-server - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv autoscaling: environment: - - SC_BOOT_MODE=debug-ptvsd - - AUTOSCALING_LOGLEVEL=DEBUG - - DEBUG=true + <<: *common-environment + AUTOSCALING_LOGLEVEL: DEBUG + volumes: - ./autoscaling:/devel/services/autoscaling - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv invitations: environment: - - SC_BOOT_MODE=debug-ptvsd - - INVITATIONS_LOGLEVEL=DEBUG - - DEBUG=true + <<: *common-environment + INVITATIONS_LOGLEVEL: DEBUG volumes: - ./invitations:/devel/services/invitations - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + + payments: + environment: + <<: *common-environment + PAYMENTS_LOGLEVEL: DEBUG + volumes: + - ./payments:/devel/services/payments + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + + dynamic-schdlr: + environment: + <<: *common-environment + DYNAMIC_SCHEDULER_PROFILING : ${DYNAMIC_SCHEDULER_PROFILING} + DYNAMIC_SCHEDULER_LOGLEVEL: DEBUG + volumes: + - ./dynamic-scheduler:/devel/services/dynamic-scheduler + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + deploy: + replicas: 1 catalog: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOG_LEVEL=DEBUG - - DEBUG=true - - DYNAMIC_SIDECAR_MOUNT_PATH_DEV=${PWD}/services/dynamic-sidecar + <<: *common-environment + CATALOG_PROFILING : ${CATALOG_PROFILING} + DYNAMIC_SIDECAR_MOUNT_PATH_DEV : ${PWD}/services/dynamic-sidecar + CATALOG_LOGLEVEL: DEBUG volumes: - ./catalog:/devel/services/catalog - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + + notifications: + environment: + <<: *common-environment + NOTIFICATIONS_PROFILING : ${NOTIFICATIONS_PROFILING} + NOTIFICATIONS_LOGLEVEL: DEBUG + volumes: + - ./notifications:/devel/services/notifications + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + + clusters-keeper: + environment: + <<: *common-environment + CLUSTERS_KEEPER_LOGLEVEL: DEBUG + volumes: + - ./clusters-keeper:/devel/services/clusters-keeper + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv datcore-adapter: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOG_LEVEL=DEBUG + <<: *common-environment + DATCORE_ADAPTER_LOGLEVEL: DEBUG volumes: - ./datcore-adapter:/devel/services/datcore-adapter - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv director: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOGLEVEL=debug + LOGLEVEL : debug + SC_BOOT_MODE : debug volumes: - ./director:/devel/services/director - ../packages:/devel/packages - - ../api:/devel/services/api + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv director-v2: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOG_LEVEL=debug - - DEBUG=true - - DYNAMIC_SIDECAR_MOUNT_PATH_DEV=${PWD}/services/dynamic-sidecar + <<: *common-environment + DIRECTOR_V2_PROFILING : ${DIRECTOR_V2_PROFILING} + DYNAMIC_SIDECAR_MOUNT_PATH_DEV : ${PWD}/services/dynamic-sidecar + DIRECTOR_V2_LOGLEVEL: DEBUG + volumes: - ./director-v2:/devel/services/director-v2 - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + + efs-guardian: + environment: + <<: *common-environment + EFS_GUARDIAN_LOGLEVEL: DEBUG + volumes: + - ./efs-guardian:/devel/services/efs-guardian + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + deploy: + replicas: 0 static-webserver: volumes: - ./static-webserver/client/source-output:/static-content environment: - - SERVER_LOG_LEVEL=info + SERVER_LOGLEVEL : info webserver: - volumes: - &webserver-volumes-dev + + volumes: &webserver_volumes_devel - ./web/server:/devel/services/web/server - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + environment: &webserver_environment_devel + <<: *common-environment + DEBUG: 1 # NOTE: gunicorn expects an int not a boolean + WEBSERVER_LOGLEVEL: DEBUG + WEBSERVER_PROFILING: ${WEBSERVER_PROFILING} + WEBSERVER_REMOTE_DEBUGGING_PORT: 3000 + WEBSERVER_FUNCTIONS: ${WEBSERVER_FUNCTIONS} + + + wb-api-server: + volumes: *webserver_volumes_devel environment: - &webserver-environment-dev - SC_BOOT_MODE: debug-ptvsd - WEBSERVER_LOGLEVEL: ${LOG_LEVEL:-DEBUG} + <<: *webserver_environment_devel + + wb-db-event-listener: + volumes: *webserver_volumes_devel + environment: + <<: *webserver_environment_devel wb-garbage-collector: - volumes: *webserver-volumes-dev + volumes: *webserver_volumes_devel environment: - <<: *webserver-environment-dev - WEBSERVER_RESOURCES_DELETION_TIMEOUT_SECONDS: 15 + <<: *webserver_environment_devel + RESOURCE_MANAGER_RESOURCE_TTL_S: 15 dask-sidecar: volumes: - &dev-dask-sidecar-volumes + &dask-sidecar_volumes_devel - ./dask-sidecar:/devel/services/dask-sidecar - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv - ${ETC_HOSTNAME:-/etc/hostname}:/home/scu/hostname:ro + environment: - SC_BOOT_MODE: debug-ptvsd + <<: *common-environment SIDECAR_LOGLEVEL: DEBUG ports: - "3000" @@ -107,25 +186,60 @@ services: endpoint_mode: vip dask-scheduler: - volumes: *dev-dask-sidecar-volumes + volumes: *dask-sidecar_volumes_devel + environment: + <<: *common-environment + SIDECAR_LOGLEVEL: DEBUG ports: - "3000" deploy: endpoint_mode: vip replicas: 1 + resource-usage-tracker: + environment: + <<: *common-environment + RESOURCE_USAGE_TRACKER_LOGLEVEL: DEBUG + volumes: + - ./resource-usage-tracker:/devel/services/resource-usage-tracker + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + storage: volumes: - ./storage:/devel/services/storage - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + environment: + <<: *common-environment + STORAGE_PROFILING : ${STORAGE_PROFILING} + STORAGE_LOGLEVEL: DEBUG + + sto-worker: + volumes: + - ./storage:/devel/services/storage + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv + environment: + <<: *common-environment + STORAGE_PROFILING : ${STORAGE_PROFILING} + STORAGE_LOGLEVEL: DEBUG + + sto-worker-cpu-bound: + volumes: + - ./storage:/devel/services/storage + - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv environment: - - SC_BOOT_MODE=debug-ptvsd - - STORAGE_LOGLEVEL=DEBUG + <<: *common-environment + STORAGE_PROFILING : ${STORAGE_PROFILING} + STORAGE_LOGLEVEL: DEBUG agent: environment: - - SC_BOOT_MODE=debug-ptvsd - - LOGLEVEL=DEBUG + <<: *common-environment + AGENT_LOGLEVEL: DEBUG volumes: - ./agent:/devel/services/agent - ../packages:/devel/packages + - ${HOST_UV_CACHE_DIR}:/home/scu/.cache/uv diff --git a/services/docker-compose.local.yml b/services/docker-compose.local.yml index 684db6249ee..5be8ddc4917 100644 --- a/services/docker-compose.local.yml +++ b/services/docker-compose.local.yml @@ -10,64 +10,149 @@ # - debug mode can be activated if SC_BOOT_MODE=debug-ptvsd (this is the default in devel). # - use vscode debugger "Python: Remote Attach *" config in ''.vscode-template/launch.json' # -version: "3.8" +x-common-environment: &common_environment + SWARM_STACK_NAME : ${SWARM_STACK_NAME:-simcore_local} services: api-server: + environment: + <<: *common_environment + API_SERVER_REMOTE_DEBUG_PORT : 3000 ports: - "8006:8000" - "3006:3000" autoscaling: + environment: + <<: *common_environment + AUTOSCALING_REMOTE_DEBUGGING_PORT : 3000 ports: - "8007:8000" - "3012:3000" + networks: + - default catalog: + environment: + <<: *common_environment + CATALOG_REMOTE_DEBUGGING_PORT : 3000 ports: - "8005:8000" - "3005:3000" + clusters-keeper: + environment: + <<: *common_environment + CLUSTERS_KEEPER_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8010:8000" + - "3015:3000" + director: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} - - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore_local} + <<: *common_environment + DIRECTOR_REMOTE_DEBUGGING_PORT : 3000 ports: - - "8080" + - "8000" - "3004:3000" - # TODO: disable all pdb-debug modes if not used !!! - #stdin_open: true - #tty: true director-v2: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} - - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore_local} + <<: *common_environment + DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS : "{}" + DIRECTOR_V2_REMOTE_DEBUGGING_PORT : 3000 ports: - "8000" - "3009:3000" datcore-adapter: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} + <<: *common_environment + DATCORE_ADAPTER_REMOTE_DEBUG_PORT : 3000 ports: - "8000" - "3010:3000" + efs-guardian: + environment: + <<: *common_environment + EFS_GUARDIAN_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8013:8000" + - "3020:3000" + invitations: + environment: + <<: *common_environment + INVITATIONS_REMOTE_DEBUGGING_PORT : 3000 ports: - "8008:8000" + - "3017:3000" + + notifications: + environment: + <<: *common_environment + NOTIFICATIONS_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8015:8000" + - "3023:3000" + + payments: + environment: + <<: *common_environment + PAYMENTS_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8011:8000" + - "3018:3000" + + dynamic-schdlr: + environment: + <<: *common_environment + DYNAMIC_SCHEDULER_REMOTE_DEBUGGING_PORT : 3000 + DYNAMIC_SCHEDULER_UI_MOUNT_PATH: / + ports: + - "8012:8000" + - "3016:3000" + deploy: + replicas: 2 + + docker-api-proxy: + ports: + - "8014:8888" + + resource-usage-tracker: + environment: + RESOURCE_USAGE_TRACKER_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8009:8000" + - "3014:3000" storage: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} + <<: *common_environment + STORAGE_REMOTE_DEBUGGING_PORT : 3000 ports: - "8080" - "3003:3000" - webserver: + sto-worker: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} - - REST_SWAGGER_API_DOC_ENABLED=1 + <<: *common_environment + STORAGE_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8080" + - "3021:3000" + + sto-worker-cpu-bound: + environment: + <<: *common_environment + STORAGE_REMOTE_DEBUGGING_PORT : 3000 + ports: + - "8080" + - "3022:3000" + webserver: + environment: &webserver_environment_local + <<: *common_environment + WEBSERVER_FUNCTIONS: ${WEBSERVER_FUNCTIONS} ports: - "8080" - "3001:3000" @@ -76,15 +161,27 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.sticky.cookie.secure=false - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.service=${SWARM_STACK_NAME}_webserver - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.entrypoints=http - - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.rule=hostregexp(`{host:.+}`) && PathPrefix(`/dev/`) - - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.priority=3 - - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.middlewares=${SWARM_STACK_NAME}_gzip@docker, ${SWARM_STACK_NAME_NO_HYPHEN}_sslheader@docker, ${SWARM_STACK_NAME}_webserver_retry + - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.rule=PathPrefix(`/dev/`) + - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.priority=9 + - traefik.http.routers.${SWARM_STACK_NAME}_webserver_local.middlewares=${SWARM_STACK_NAME}_gzip@swarm, ${SWARM_STACK_NAME_NO_HYPHEN}_sslheader@swarm, ${SWARM_STACK_NAME}_webserver_retry + + wb-api-server: + environment: + <<: *webserver_environment_local + ports: + - "8080" + - "3019:3000" + + wb-db-event-listener: + environment: + <<: *webserver_environment_local + ports: + - "8080" + - "3013:3000" wb-garbage-collector: environment: - SC_BOOT_MODE: ${SC_BOOT_MODE:-default} - WEBSERVER_LOGLEVEL: ${LOG_LEVEL:-INFO} - REST_SWAGGER_API_DOC_ENABLED: 1 + <<: *webserver_environment_local ports: - "8080" - "3011:3000" @@ -94,16 +191,16 @@ services: - /var/run/docker.sock:/var/run/docker.sock - /var/lib/docker/volumes/:/var/lib/docker/volumes/ environment: - - LOGLEVEL=${LOG_LEVEL:-INFO} - - AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore_local} + <<: *common_environment + AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME : ${SWARM_STACK_NAME:-simcore_local} dask-sidecar: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} + <<: *common_environment dask-scheduler: environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} + <<: *common_environment ports: # dashboard UI - "8787:8787" @@ -144,7 +241,7 @@ services: rabbit: ports: - "5672" - - "15672" + - "15672:15672" - "15692" redis: @@ -168,7 +265,7 @@ services: - traefik.http.routers.${SWARM_STACK_NAME}_api_internal.service=api@internal - traefik.http.routers.${SWARM_STACK_NAME}_api_internal.rule=PathPrefix(`/dashboard`) || PathPrefix(`/api`) - traefik.http.routers.${SWARM_STACK_NAME}_api_internal.entrypoints=traefik_monitor - - traefik.http.routers.${SWARM_STACK_NAME}_api_internal.middlewares=${SWARM_STACK_NAME}_gzip@docker + - traefik.http.routers.${SWARM_STACK_NAME}_api_internal.middlewares=${SWARM_STACK_NAME}_gzip@swarm - traefik.http.services.${SWARM_STACK_NAME}_api_internal.loadbalancer.server.port=8080 whoami: @@ -182,4 +279,9 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_whoami.loadbalancer.server.port=80 - traefik.http.routers.${SWARM_STACK_NAME}_whoami.rule=PathPrefix(`/whoami`) - traefik.http.routers.${SWARM_STACK_NAME}_whoami.entrypoints=traefik_monitor - - traefik.http.routers.${SWARM_STACK_NAME}_whoami.middlewares=${SWARM_STACK_NAME}_gzip@docker + - traefik.http.routers.${SWARM_STACK_NAME}_whoami.middlewares=${SWARM_STACK_NAME}_gzip@swarm + +networks: + docker-api-network: + driver_opts: + {} # override 'encrypted' locally, some WSL versions have issues with encrypted networks SEE https://github.com/microsoft/WSL/issues/10029 diff --git a/services/docker-compose.yml b/services/docker-compose.yml index fd6e6718b72..0f26fa3dda7 100644 --- a/services/docker-compose.yml +++ b/services/docker-compose.yml @@ -1,14 +1,51 @@ -version: "3.8" +x-dask-tls-secrets: &dask_tls_secrets + - source: dask_tls_key + target: ${DASK_TLS_KEY} + # mode: 444 # not supported by docker stack compose as of 26.0.0 + - source: dask_tls_cert + target: ${DASK_TLS_CERT} + # mode: 444 # not supported by docker stack compose as of 26.0.0 + services: api-server: image: ${DOCKER_REGISTRY:-itisfoundation}/api-server:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - WEBSERVER_HOST=${WEBSERVER_HOST:-webserver} - - LOG_LEVEL=${LOG_LEVEL:-WARNING} - env_file: - - ../.env + API_SERVER_DEV_FEATURES_ENABLED: ${API_SERVER_DEV_FEATURES_ENABLED} + API_SERVER_LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + API_SERVER_LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + API_SERVER_LOGLEVEL: ${API_SERVER_LOGLEVEL} + API_SERVER_PROFILING: ${API_SERVER_PROFILING} + + CATALOG_HOST: ${CATALOG_HOST} + CATALOG_PORT: ${CATALOG_PORT} + + DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST} + DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT} + + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + STORAGE_HOST: ${STORAGE_HOST} + STORAGE_PORT: ${STORAGE_PORT} + + WEBSERVER_HOST: ${WB_API_WEBSERVER_HOST} + WEBSERVER_PORT: ${WB_API_WEBSERVER_PORT} + WEBSERVER_SESSION_SECRET_KEY: ${WEBSERVER_SESSION_SECRET_KEY} + API_SERVER_TRACING: ${API_SERVER_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + deploy: labels: - io.simcore.zone=${TRAEFIK_SIMCORE_ZONE} @@ -20,29 +57,85 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_api-server.loadbalancer.healthcheck.path=/ - traefik.http.services.${SWARM_STACK_NAME}_api-server.loadbalancer.healthcheck.interval=2000ms - traefik.http.services.${SWARM_STACK_NAME}_api-server.loadbalancer.healthcheck.timeout=1000ms - - traefik.http.routers.${SWARM_STACK_NAME}_api-server.rule=hostregexp(`{host:.+}`) && (Path(`/`, `/v0`) || PathPrefix(`/v0/`) || Path(`/api/v0/openapi.json`)) + - traefik.http.routers.${SWARM_STACK_NAME}_api-server.rule=(Path(`/`) || Path(`/v0`) || PathPrefix(`/v0/`) || Path(`/api/v0/openapi.json`)) - traefik.http.routers.${SWARM_STACK_NAME}_api-server.entrypoints=simcore_api - - traefik.http.routers.${SWARM_STACK_NAME}_api-server.priority=1 - - traefik.http.routers.${SWARM_STACK_NAME}_api-server.middlewares=${SWARM_STACK_NAME}_gzip@docker,ratelimit-${SWARM_STACK_NAME}_api-server + - traefik.http.routers.${SWARM_STACK_NAME}_api-server.priority=3 + - traefik.http.routers.${SWARM_STACK_NAME}_api-server.middlewares=${SWARM_STACK_NAME}_gzip@swarm,ratelimit-${SWARM_STACK_NAME}_api-server,inflightreq-${SWARM_STACK_NAME}_api-server networks: - default autoscaling: image: ${DOCKER_REGISTRY:-itisfoundation}/autoscaling:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" networks: - autoscaling_subnet environment: - - LOG_LEVEL=${LOG_LEVEL:-WARNING} - - REDIS_HOST=${REDIS_HOST} - - REDIS_PORT=${REDIS_PORT} + AUTOSCALING_LOGLEVEL: ${AUTOSCALING_LOGLEVEL} + AUTOSCALING_POLL_INTERVAL: ${AUTOSCALING_POLL_INTERVAL} + AUTOSCALING_DRAIN_NODES_WITH_LABELS: ${AUTOSCALING_DRAIN_NODES_WITH_LABELS} + AUTOSCALING_DOCKER_JOIN_DRAINED: ${AUTOSCALING_DOCKER_JOIN_DRAINED} + AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION: ${AUTOSCALING_WAIT_FOR_CLOUD_INIT_BEFORE_WARM_BUFFER_ACTIVATION} + + AUTOSCALING_DASK: ${AUTOSCALING_DASK} # comp autoscaling + DASK_MONITORING_URL: ${DASK_MONITORING_URL} + DASK_SCHEDULER_AUTH: ${DASK_SCHEDULER_AUTH} + + AUTOSCALING_EC2_ACCESS: ${AUTOSCALING_EC2_ACCESS} # used to enable/disable + AUTOSCALING_EC2_ACCESS_KEY_ID: ${AUTOSCALING_EC2_ACCESS_KEY_ID} + AUTOSCALING_EC2_SECRET_ACCESS_KEY: ${AUTOSCALING_EC2_SECRET_ACCESS_KEY} + AUTOSCALING_EC2_REGION_NAME: ${AUTOSCALING_EC2_REGION_NAME} + + AUTOSCALING_EC2_INSTANCES: ${AUTOSCALING_EC2_INSTANCES} # used to enable/disable + EC2_INSTANCES_ALLOWED_TYPES: ${EC2_INSTANCES_ALLOWED_TYPES} + EC2_INSTANCES_MACHINES_BUFFER: ${EC2_INSTANCES_MACHINES_BUFFER} + EC2_INSTANCES_MAX_INSTANCES: ${EC2_INSTANCES_MAX_INSTANCES} + EC2_INSTANCES_MAX_START_TIME: ${EC2_INSTANCES_MAX_START_TIME} + EC2_INSTANCES_NAME_PREFIX: ${EC2_INSTANCES_NAME_PREFIX} + EC2_INSTANCES_SECURITY_GROUP_IDS: ${EC2_INSTANCES_SECURITY_GROUP_IDS} + EC2_INSTANCES_SUBNET_ID: ${EC2_INSTANCES_SUBNET_ID} + EC2_INSTANCES_KEY_NAME: ${EC2_INSTANCES_KEY_NAME} + EC2_INSTANCES_TIME_BEFORE_DRAINING: ${EC2_INSTANCES_TIME_BEFORE_DRAINING} + EC2_INSTANCES_TIME_BEFORE_TERMINATION: ${EC2_INSTANCES_TIME_BEFORE_TERMINATION} + EC2_INSTANCES_CUSTOM_TAGS: ${EC2_INSTANCES_CUSTOM_TAGS} + EC2_INSTANCES_ATTACHED_IAM_PROFILE: ${EC2_INSTANCES_ATTACHED_IAM_PROFILE} + + AUTOSCALING_NODES_MONITORING: ${AUTOSCALING_NODES_MONITORING} # dyn autoscaling envvar + NODES_MONITORING_NODE_LABELS: ${NODES_MONITORING_NODE_LABELS} + NODES_MONITORING_SERVICE_LABELS: ${NODES_MONITORING_SERVICE_LABELS} + NODES_MONITORING_NEW_NODES_LABELS: ${NODES_MONITORING_NEW_NODES_LABELS} + + AUTOSCALING_SSM_ACCESS: ${AUTOSCALING_SSM_ACCESS} # used to enable/disable + SSM_ENDPOINT: ${SSM_ENDPOINT} + SSM_ACCESS_KEY_ID: ${SSM_ACCESS_KEY_ID} + SSM_SECRET_ACCESS_KEY: ${SSM_SECRET_ACCESS_KEY} + SSM_REGION_NAME: ${SSM_REGION_NAME} + + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + REDIS_HOST: ${REDIS_HOST} + REDIS_PASSWORD: ${REDIS_PASSWORD} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REGISTRY_USER: ${REGISTRY_USER} + REGISTRY_PW: ${REGISTRY_PW} + REGISTRY_URL: ${REGISTRY_URL} + REGISTRY_SSL: ${REGISTRY_SSL} + REGISTRY_AUTH: ${REGISTRY_AUTH} + AUTOSCALING_TRACING: ${AUTOSCALING_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} volumes: - "/var/run/docker.sock:/var/run/docker.sock" deploy: placement: constraints: - - node.platform.os == linux - node.role == manager resources: reservations: @@ -55,68 +148,148 @@ services: catalog: image: ${DOCKER_REGISTRY:-itisfoundation}/catalog:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "cat-{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - CATALOG_BACKGROUND_TASK_REST_TIME=${CATALOG_BACKGROUND_TASK_REST_TIME:-60} - - CATALOG_DEV_FEATURES_ENABLED=${CATALOG_DEV_FEATURES_ENABLED} - - CATALOG_SERVICES_DEFAULT_RESOURCES=${CATALOG_SERVICES_DEFAULT_RESOURCES} - - CATALOG_SERVICES_DEFAULT_SPECIFICATIONS=${CATALOG_SERVICES_DEFAULT_SPECIFICATIONS} - - DIRECTOR_HOST=${DIRECTOR_HOST:-director} - - DIRECTOR_PORT=${DIRECTOR_PORT:-8080} - - LOG_LEVEL=${LOG_LEVEL:-WARNING} - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_HOST=${POSTGRES_HOST} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_PORT=${POSTGRES_PORT} - - POSTGRES_USER=${POSTGRES_USER} - - TRACING_THRIFT_COMPACT_ENDPOINT=${TRACING_THRIFT_COMPACT_ENDPOINT} + CATALOG_BACKGROUND_TASK_REST_TIME: ${CATALOG_BACKGROUND_TASK_REST_TIME} + CATALOG_DEV_FEATURES_ENABLED: ${CATALOG_DEV_FEATURES_ENABLED} + CATALOG_LOGLEVEL: ${CATALOG_LOGLEVEL} + CATALOG_PROFILING: ${CATALOG_PROFILING} + CATALOG_SERVICES_DEFAULT_RESOURCES: ${CATALOG_SERVICES_DEFAULT_RESOURCES} + CATALOG_SERVICES_DEFAULT_SPECIFICATIONS: ${CATALOG_SERVICES_DEFAULT_SPECIFICATIONS} + DIRECTOR_DEFAULT_MAX_MEMORY: ${DIRECTOR_DEFAULT_MAX_MEMORY} + DIRECTOR_DEFAULT_MAX_NANO_CPUS: ${DIRECTOR_DEFAULT_MAX_NANO_CPUS} + DIRECTOR_HOST: ${DIRECTOR_HOST:-director} + DIRECTOR_PORT: ${DIRECTOR_PORT:-8080} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + CATALOG_TRACING: ${CATALOG_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} networks: - default + clusters-keeper: + image: ${DOCKER_REGISTRY:-itisfoundation}/clusters-keeper:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + networks: + - default + environment: + CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG: ${CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DOCKER_IMAGE_TAG} + CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ${CLUSTERS_KEEPER_COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH} + CLUSTERS_KEEPER_DASK_NTHREADS: ${CLUSTERS_KEEPER_DASK_NTHREADS} + CLUSTERS_KEEPER_DASK_WORKER_SATURATION: ${CLUSTERS_KEEPER_DASK_WORKER_SATURATION} + CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION: ${CLUSTERS_KEEPER_MAX_MISSED_HEARTBEATS_BEFORE_CLUSTER_TERMINATION} + CLUSTERS_KEEPER_TASK_INTERVAL: ${CLUSTERS_KEEPER_TASK_INTERVAL} + CLUSTERS_KEEPER_LOGLEVEL: ${CLUSTERS_KEEPER_LOGLEVEL} + CLUSTERS_KEEPER_EC2_ACCESS: ${CLUSTERS_KEEPER_EC2_ACCESS} + CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID: ${CLUSTERS_KEEPER_EC2_ACCESS_KEY_ID} + CLUSTERS_KEEPER_EC2_ENDPOINT: ${CLUSTERS_KEEPER_EC2_ENDPOINT} + CLUSTERS_KEEPER_EC2_REGION_NAME: ${CLUSTERS_KEEPER_EC2_REGION_NAME} + CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY: ${CLUSTERS_KEEPER_EC2_SECRET_ACCESS_KEY} + CLUSTERS_KEEPER_SSM_ACCESS: ${CLUSTERS_KEEPER_SSM_ACCESS} + CLUSTERS_KEEPER_SSM_ACCESS_KEY_ID: ${CLUSTERS_KEEPER_SSM_ACCESS_KEY_ID} + CLUSTERS_KEEPER_SSM_ENDPOINT: ${CLUSTERS_KEEPER_SSM_ENDPOINT} + CLUSTERS_KEEPER_SSM_REGION_NAME: ${CLUSTERS_KEEPER_SSM_REGION_NAME} + CLUSTERS_KEEPER_SSM_SECRET_ACCESS_KEY: ${CLUSTERS_KEEPER_SSM_SECRET_ACCESS_KEY} + CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX: ${CLUSTERS_KEEPER_EC2_INSTANCES_PREFIX} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES: ${CLUSTERS_KEEPER_PRIMARY_EC2_INSTANCES} + PRIMARY_EC2_INSTANCES_ALLOWED_TYPES: ${PRIMARY_EC2_INSTANCES_ALLOWED_TYPES} + PRIMARY_EC2_INSTANCES_KEY_NAME: ${PRIMARY_EC2_INSTANCES_KEY_NAME} + PRIMARY_EC2_INSTANCES_MAX_INSTANCES: ${PRIMARY_EC2_INSTANCES_MAX_INSTANCES} + PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS: ${PRIMARY_EC2_INSTANCES_SECURITY_GROUP_IDS} + PRIMARY_EC2_INSTANCES_SUBNET_ID: ${PRIMARY_EC2_INSTANCES_SUBNET_ID} + PRIMARY_EC2_INSTANCES_CUSTOM_TAGS: ${PRIMARY_EC2_INSTANCES_CUSTOM_TAGS} + PRIMARY_EC2_INSTANCES_ATTACHED_IAM_PROFILE: ${PRIMARY_EC2_INSTANCES_ATTACHED_IAM_PROFILE} + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA: ${PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CA} + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT: ${PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_CERT} + PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY: ${PRIMARY_EC2_INSTANCES_SSM_TLS_DASK_KEY} + PRIMARY_EC2_INSTANCES_PROMETHEUS_USERNAME: ${PRIMARY_EC2_INSTANCES_PROMETHEUS_USERNAME} + PRIMARY_EC2_INSTANCES_PROMETHEUS_PASSWORD: ${PRIMARY_EC2_INSTANCES_PROMETHEUS_PASSWORD} + PRIMARY_EC2_INSTANCES_MAX_START_TIME: ${PRIMARY_EC2_INSTANCES_MAX_START_TIME} + PRIMARY_EC2_INSTANCES_DOCKER_DEFAULT_ADDRESS_POOL: ${PRIMARY_EC2_INSTANCES_DOCKER_DEFAULT_ADDRESS_POOL} + PRIMARY_EC2_INSTANCES_RABBIT: ${PRIMARY_EC2_INSTANCES_RABBIT} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES: ${CLUSTERS_KEEPER_WORKERS_EC2_INSTANCES} + WORKERS_EC2_INSTANCES_ALLOWED_TYPES: ${WORKERS_EC2_INSTANCES_ALLOWED_TYPES} + WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING: ${WORKERS_EC2_INSTANCES_TIME_BEFORE_DRAINING} + WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION: ${WORKERS_EC2_INSTANCES_TIME_BEFORE_TERMINATION} + WORKERS_EC2_INSTANCES_KEY_NAME: ${WORKERS_EC2_INSTANCES_KEY_NAME} + WORKERS_EC2_INSTANCES_MAX_INSTANCES: ${WORKERS_EC2_INSTANCES_MAX_INSTANCES} + WORKERS_EC2_INSTANCES_MAX_START_TIME: ${WORKERS_EC2_INSTANCES_MAX_START_TIME} + WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS: ${WORKERS_EC2_INSTANCES_SECURITY_GROUP_IDS} + WORKERS_EC2_INSTANCES_SUBNET_ID: ${WORKERS_EC2_INSTANCES_SUBNET_ID} + WORKERS_EC2_INSTANCES_CUSTOM_TAGS: ${WORKERS_EC2_INSTANCES_CUSTOM_TAGS} + CLUSTERS_KEEPER_TRACING: ${CLUSTERS_KEEPER_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + secrets: *dask_tls_secrets + director: image: ${DOCKER_REGISTRY:-itisfoundation}/director:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - DEFAULT_MAX_MEMORY=${DEFAULT_MAX_MEMORY:-0} - - DEFAULT_MAX_NANO_CPUS=${DEFAULT_MAX_NANO_CPUS:-0} - - DIRECTOR_REGISTRY_CACHING_TTL=${DIRECTOR_REGISTRY_CACHING_TTL} - - DIRECTOR_REGISTRY_CACHING=${DIRECTOR_REGISTRY_CACHING} - - DIRECTOR_SELF_SIGNED_SSL_FILENAME=${DIRECTOR_SELF_SIGNED_SSL_FILENAME} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_ID=${DIRECTOR_SELF_SIGNED_SSL_SECRET_ID} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME=${DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME} - - DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=${DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS} - - EXTRA_HOSTS_SUFFIX=${EXTRA_HOSTS_SUFFIX:-undefined} - - LOGLEVEL=${LOG_LEVEL:-WARNING} - - MONITORING_ENABLED=${MONITORING_ENABLED:-True} - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_ENDPOINT=${POSTGRES_ENDPOINT} - - POSTGRES_HOST=${POSTGRES_HOST} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_PORT=${POSTGRES_PORT} - - POSTGRES_USER=${POSTGRES_USER} - - REGISTRY_AUTH=${REGISTRY_AUTH} - - REGISTRY_PATH=${REGISTRY_PATH} - - REGISTRY_PW=${REGISTRY_PW} - - REGISTRY_SSL=${REGISTRY_SSL} - - REGISTRY_URL=${REGISTRY_URL} - - REGISTRY_USER=${REGISTRY_USER} - - S3_ACCESS_KEY=${S3_ACCESS_KEY} - - S3_BUCKET_NAME=${S3_BUCKET_NAME} - - S3_ENDPOINT=${S3_ENDPOINT} - - S3_SECRET_KEY=${S3_SECRET_KEY} - - SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet - - STORAGE_ENDPOINT=${STORAGE_ENDPOINT} - - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore} - - TRACING_ENABLED=${TRACING_ENABLED:-True} - - TRACING_ZIPKIN_ENDPOINT=${TRACING_ZIPKIN_ENDPOINT:-http://jaeger:9411} - - TRAEFIK_SIMCORE_ZONE=${TRAEFIK_SIMCORE_ZONE:-internal_simcore_stack} + DIRECTOR_DEFAULT_MAX_MEMORY: ${DIRECTOR_DEFAULT_MAX_MEMORY} + DIRECTOR_DEFAULT_MAX_NANO_CPUS: ${DIRECTOR_DEFAULT_MAX_NANO_CPUS} + DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS: ${DIRECTOR_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS} + DIRECTOR_LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + DIRECTOR_LOGLEVEL: ${DIRECTOR_LOGLEVEL} + DIRECTOR_MONITORING_ENABLED: ${DIRECTOR_MONITORING_ENABLED} + DIRECTOR_PUBLISHED_HOST_NAME: ${DIRECTOR_PUBLISHED_HOST_NAME} + DIRECTOR_REGISTRY_CACHING_TTL: ${DIRECTOR_REGISTRY_CACHING_TTL} + DIRECTOR_REGISTRY_CACHING: ${DIRECTOR_REGISTRY_CACHING} + DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS: ${DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS} + DIRECTOR_TRACING: ${DIRECTOR_TRACING} + + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + REGISTRY_AUTH: ${REGISTRY_AUTH} + REGISTRY_PATH: ${REGISTRY_PATH} + REGISTRY_PW: ${REGISTRY_PW} + REGISTRY_SSL: ${REGISTRY_SSL} + REGISTRY_URL: ${REGISTRY_URL} + REGISTRY_USER: ${REGISTRY_USER} + + SIMCORE_SERVICES_NETWORK_NAME: interactive_services_subnet + STORAGE_ENDPOINT: ${STORAGE_ENDPOINT} + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + + TRAEFIK_SIMCORE_ZONE: ${TRAEFIK_SIMCORE_ZONE} volumes: - "/var/run/docker.sock:/var/run/docker.sock" deploy: placement: constraints: - - node.platform.os == linux - node.role == manager networks: - default @@ -125,69 +298,327 @@ services: director-v2: image: ${DOCKER_REGISTRY:-itisfoundation}/director-v2:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - CATALOG_HOST=${CATALOG_HOST:-catalog} - - CATALOG_PORT=${CATALOG_PORT:-8000} - - COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL=${COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL:-tcp://dask-scheduler:8786} - - DIRECTOR_HOST=${DIRECTOR_HOST:-director} - - DIRECTOR_PORT=${DIRECTOR_PORT:-8080} - - DIRECTOR_SELF_SIGNED_SSL_FILENAME=${DIRECTOR_SELF_SIGNED_SSL_FILENAME} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_ID=${DIRECTOR_SELF_SIGNED_SSL_SECRET_ID} - - DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME=${DIRECTOR_SELF_SIGNED_SSL_SECRET_NAME} - - DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS=${DIRECTOR_SERVICES_CUSTOM_CONSTRAINTS} - - DIRECTOR_V2_DEV_FEATURES_ENABLED=${DIRECTOR_V2_DEV_FEATURES_ENABLED} - - DYNAMIC_SIDECAR_IMAGE=${DOCKER_REGISTRY:-itisfoundation}/dynamic-sidecar:${DOCKER_IMAGE_TAG:-latest} - - EXTRA_HOSTS_SUFFIX=${EXTRA_HOSTS_SUFFIX:-undefined} - - LOG_LEVEL=${LOG_LEVEL:-WARNING} - - S3_ACCESS_KEY=${S3_ACCESS_KEY} - - S3_BUCKET_NAME=${S3_BUCKET_NAME} - - S3_ENDPOINT=${S3_ENDPOINT} - - S3_SECRET_KEY=${S3_SECRET_KEY} - - R_CLONE_PROVIDER=${R_CLONE_PROVIDER} - - MONITORING_ENABLED=${MONITORING_ENABLED:-True} - - SIMCORE_SERVICES_NETWORK_NAME=interactive_services_subnet - - TRACING_THRIFT_COMPACT_ENDPOINT=${TRACING_THRIFT_COMPACT_ENDPOINT} - env_file: - - ../.env + AWS_S3_CLI_S3: ${AWS_S3_CLI_S3} + + CATALOG_HOST: ${CATALOG_HOST} + CATALOG_PORT: ${CATALOG_PORT} + + COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE: ${COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_FILE_LINK_TYPE} + COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL: ${COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_URL} + COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH: ${COMPUTATIONAL_BACKEND_DEFAULT_CLUSTER_AUTH} + COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE: ${COMPUTATIONAL_BACKEND_DEFAULT_FILE_LINK_TYPE} + COMPUTATIONAL_BACKEND_ON_DEMAND_CLUSTERS_FILE_LINK_TYPE: ${COMPUTATIONAL_BACKEND_ON_DEMAND_CLUSTERS_FILE_LINK_TYPE} + + DIRECTOR_HOST: ${DIRECTOR_HOST} + DIRECTOR_PORT: ${DIRECTOR_PORT} + DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS: ${DIRECTOR_V2_GENERIC_RESOURCE_PLACEMENT_CONSTRAINTS_SUBSTITUTIONS} + + DIRECTOR_V2_DEV_FEATURES_ENABLED: ${DIRECTOR_V2_DEV_FEATURES_ENABLED} + DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED: ${DIRECTOR_V2_DYNAMIC_SCHEDULER_CLOSE_SERVICES_VIA_FRONTEND_WHEN_CREDITS_LIMIT_REACHED} + DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS: ${DIRECTOR_V2_SERVICES_CUSTOM_CONSTRAINTS} + DIRECTOR_V2_PROFILING: ${DIRECTOR_V2_PROFILING} + DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL: ${DIRECTOR_V2_DYNAMIC_SIDECAR_SLEEP_AFTER_CONTAINER_REMOVAL} + + DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED: ${DYNAMIC_SIDECAR_ENDPOINT_SPECS_MODE_DNSRR_ENABLED} + DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS: ${DYNAMIC_SIDECAR_ENABLE_VOLUME_LIMITS} + DYNAMIC_SIDECAR_IMAGE: ${DYNAMIC_SIDECAR_IMAGE} + DYNAMIC_SIDECAR_LOG_LEVEL: ${DYNAMIC_SIDECAR_LOG_LEVEL} + DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS: ${DYNAMIC_SIDECAR_PROMETHEUS_MONITORING_NETWORKS} + DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS: ${DYNAMIC_SIDECAR_PROMETHEUS_SERVICE_LABELS} + DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT: ${DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT} + + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + DIRECTOR_V2_LOGLEVEL: ${DIRECTOR_V2_LOGLEVEL} + MONITORING_ENABLED: ${MONITORING_ENABLED} + + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + R_CLONE_OPTION_BUFFER_SIZE: ${R_CLONE_OPTION_BUFFER_SIZE} + R_CLONE_OPTION_RETRIES: ${R_CLONE_OPTION_RETRIES} + R_CLONE_OPTION_TRANSFERS: ${R_CLONE_OPTION_TRANSFERS} + R_CLONE_PROVIDER: ${R_CLONE_PROVIDER} + + EFS_DNS_NAME: ${EFS_DNS_NAME} + EFS_MOUNTED_PATH: ${EFS_MOUNTED_PATH} + EFS_PROJECT_SPECIFIC_DATA_DIRECTORY: ${EFS_PROJECT_SPECIFIC_DATA_DIRECTORY} + + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + + REGISTRY_AUTH: ${REGISTRY_AUTH} + REGISTRY_PATH: ${REGISTRY_PATH} + REGISTRY_PW: ${REGISTRY_PW} + REGISTRY_SSL: ${REGISTRY_SSL} + REGISTRY_URL: ${REGISTRY_URL} + REGISTRY_USER: ${REGISTRY_USER} + DIRECTOR_V2_DOCKER_HUB_REGISTRY: ${DIRECTOR_V2_DOCKER_HUB_REGISTRY} + + RESOURCE_USAGE_TRACKER_HOST: ${RESOURCE_USAGE_TRACKER_HOST} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_EXTERNAL_PORT} + + S3_ACCESS_KEY: ${S3_ACCESS_KEY} + S3_BUCKET_NAME: ${S3_BUCKET_NAME} + S3_ENDPOINT: ${S3_ENDPOINT} + S3_REGION: ${S3_REGION} + S3_SECRET_KEY: ${S3_SECRET_KEY} + + STORAGE_HOST: ${STORAGE_HOST} + STORAGE_PORT: ${STORAGE_PORT} + DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH: ${DIRECTOR_V2_NODE_PORTS_STORAGE_AUTH} + + SIMCORE_SERVICES_NETWORK_NAME: ${SIMCORE_SERVICES_NETWORK_NAME} + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + TRAEFIK_SIMCORE_ZONE: ${TRAEFIK_SIMCORE_ZONE} + DIRECTOR_V2_TRACING: ${DIRECTOR_V2_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + + WEBSERVER_HOST: ${WEBSERVER_HOST} + WEBSERVER_PORT: ${WEBSERVER_PORT} volumes: - "/var/run/docker.sock:/var/run/docker.sock" deploy: placement: constraints: - - node.platform.os == linux - node.role == manager networks: - default - interactive_services_subnet - computational_services_subnet + secrets: *dask_tls_secrets + efs-guardian: + image: ${DOCKER_REGISTRY:-itisfoundation}/efs-guardian:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + networks: + - default + environment: + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + REDIS_HOST: ${REDIS_HOST} + REDIS_PASSWORD: ${REDIS_PASSWORD} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + SC_USER_ID: ${SC_USER_ID} + SC_USER_NAME: ${SC_USER_NAME} + EFS_USER_ID: ${EFS_USER_ID} + EFS_USER_NAME: ${EFS_USER_NAME} + EFS_GROUP_ID: ${EFS_GROUP_ID} + EFS_GROUP_NAME: ${EFS_GROUP_NAME} + EFS_DNS_NAME: ${EFS_DNS_NAME} + EFS_DEFAULT_USER_SERVICE_SIZE_BYTES: ${EFS_DEFAULT_USER_SERVICE_SIZE_BYTES} + EFS_MOUNTED_PATH: ${EFS_MOUNTED_PATH} + EFS_PROJECT_SPECIFIC_DATA_DIRECTORY: ${EFS_PROJECT_SPECIFIC_DATA_DIRECTORY} + EFS_GUARDIAN_TRACING: ${EFS_GUARDIAN_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} invitations: image: ${DOCKER_REGISTRY:-itisfoundation}/invitations:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "inv-{{.Node.Hostname}}-{{.Task.Slot}}" + networks: + - default + environment: + INVITATIONS_DEFAULT_PRODUCT: ${INVITATIONS_DEFAULT_PRODUCT} + INVITATIONS_LOGLEVEL: ${INVITATIONS_LOGLEVEL} + INVITATIONS_OSPARC_URL: ${INVITATIONS_OSPARC_URL} + INVITATIONS_PASSWORD: ${INVITATIONS_PASSWORD} + INVITATIONS_SECRET_KEY: ${INVITATIONS_SECRET_KEY} + INVITATIONS_SWAGGER_API_DOC_ENABLED: ${INVITATIONS_SWAGGER_API_DOC_ENABLED} + INVITATIONS_USERNAME: ${INVITATIONS_USERNAME} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + INVITATIONS_TRACING: ${INVITATIONS_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + payments: + image: ${DOCKER_REGISTRY:-itisfoundation}/payments:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "pay-{{.Node.Hostname}}-{{.Task.Slot}}" networks: - default environment: - - INVITATIONS_LOGLEVEL=${LOG_LEVEL:-INFO} - - INVITATIONS_SECRET_KEY=${INVITATIONS_SECRET_KEY} - - INVITATIONS_OSPARC_URL=${INVITATIONS_OSPARC_URL} - - INVITATIONS_USERNAME=${INVITATIONS_USERNAME} - - INVITATIONS_PASSWORD=${INVITATIONS_PASSWORD} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES: ${PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES} + PAYMENTS_ACCESS_TOKEN_SECRET_KEY: ${PAYMENTS_ACCESS_TOKEN_SECRET_KEY} + PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT: ${PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT} + PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT: ${PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT} + PAYMENTS_AUTORECHARGE_ENABLED: ${PAYMENTS_AUTORECHARGE_ENABLED} + PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS: ${PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS} + PAYMENTS_BCC_EMAIL: ${PAYMENTS_BCC_EMAIL} + PAYMENTS_GATEWAY_API_SECRET: ${PAYMENTS_GATEWAY_API_SECRET} + PAYMENTS_GATEWAY_URL: ${PAYMENTS_GATEWAY_URL} + PAYMENTS_LOGLEVEL: ${PAYMENTS_LOGLEVEL} + PAYMENTS_PASSWORD: ${PAYMENTS_PASSWORD} + PAYMENTS_STRIPE_URL: ${PAYMENTS_STRIPE_URL} + PAYMENTS_STRIPE_API_SECRET: ${PAYMENTS_STRIPE_API_SECRET} + PAYMENTS_SWAGGER_API_DOC_ENABLED: ${PAYMENTS_SWAGGER_API_DOC_ENABLED} + PAYMENTS_USERNAME: ${PAYMENTS_USERNAME} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + RESOURCE_USAGE_TRACKER_HOST: ${RESOURCE_USAGE_TRACKER_HOST} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_EXTERNAL_PORT} + PAYMENTS_EMAIL: ${PAYMENTS_EMAIL} + SMTP_HOST: ${SMTP_HOST} + SMTP_PASSWORD: ${SMTP_PASSWORD} + SMTP_PORT: ${SMTP_PORT} + SMTP_PROTOCOL: ${SMTP_PROTOCOL} + SMTP_USERNAME: ${SMTP_USERNAME} + PAYMENTS_TRACING: ${PAYMENTS_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} - static-webserver: - image: ${DOCKER_REGISTRY:-itisfoundation}/static-webserver:${DOCKER_IMAGE_TAG:-latest} + resource-usage-tracker: + image: ${DOCKER_REGISTRY:-itisfoundation}/resource-usage-tracker:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + networks: + - default + environment: + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + PROMETHEUS_URL: ${RESOURCE_USAGE_TRACKER_PROMETHEUS_URL} + PROMETHEUS_USERNAME: ${RESOURCE_USAGE_TRACKER_PROMETHEUS_USERNAME} + PROMETHEUS_PASSWORD: ${RESOURCE_USAGE_TRACKER_PROMETHEUS_PASSWORD} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + RESOURCE_USAGE_TRACKER_LOGLEVEL: ${RESOURCE_USAGE_TRACKER_LOGLEVEL} + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED: ${RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED} + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC: ${RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC} + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL: ${RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL} + RESOURCE_USAGE_TRACKER_S3: ${RESOURCE_USAGE_TRACKER_S3} + RESOURCE_USAGE_TRACKER_TRACING: ${RESOURCE_USAGE_TRACKER_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_PORT} + + dynamic-schdlr: + image: ${DOCKER_REGISTRY:-itisfoundation}/dynamic-scheduler:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + networks: + - default + - docker-api-network + environment: + CATALOG_HOST: ${CATALOG_HOST} + CATALOG_PORT: ${CATALOG_PORT} + DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST} + DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT} + + DOCKER_API_PROXY_HOST: ${DOCKER_API_PROXY_HOST} + DOCKER_API_PROXY_PASSWORD: ${DOCKER_API_PROXY_PASSWORD} + DOCKER_API_PROXY_PORT: ${DOCKER_API_PROXY_PORT} + DOCKER_API_PROXY_SECURE: ${DOCKER_API_PROXY_SECURE} + DOCKER_API_PROXY_USER: ${DOCKER_API_PROXY_USER} + + DYNAMIC_SCHEDULER_LOGLEVEL: ${DYNAMIC_SCHEDULER_LOGLEVEL} + DYNAMIC_SCHEDULER_PROFILING: ${DYNAMIC_SCHEDULER_PROFILING} + DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT: ${DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT} + DYNAMIC_SCHEDULER_TRACING: ${DYNAMIC_SCHEDULER_TRACING} + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET: ${DYNAMIC_SCHEDULER_UI_STORAGE_SECRET} + DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: ${DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER} + DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT: ${DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT} + + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + REDIS_HOST: ${REDIS_HOST} + REDIS_PASSWORD: ${REDIS_PASSWORD} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + docker-api-proxy: + image: ${DOCKER_REGISTRY:-itisfoundation}/docker-api-proxy:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" environment: - - SERVER_HOST=0.0.0.0 - - SERVER_PORT=8000 - - SERVER_LOG_LEVEL=error - - SERVER_ROOT=/static-content + DOCKER_API_PROXY_PASSWORD: ${DOCKER_API_PROXY_PASSWORD} + DOCKER_API_PROXY_USER : ${DOCKER_API_PROXY_USER} deploy: placement: constraints: - - node.platform.os == linux + - node.role == manager + mode: global + volumes: + - /var/run/docker.sock:/var/run/docker.sock + networks: + - docker-api-network + + static-webserver: + image: ${DOCKER_REGISTRY:-itisfoundation}/static-webserver:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + environment: + SERVER_HOST: 0.0.0.0 + SERVER_PORT: 8000 + SERVER_LOG_LEVEL: error + SERVER_ROOT: /static-content + deploy: labels: - io.simcore.zone=${TRAEFIK_SIMCORE_ZONE} - traefik.http.middlewares.${SWARM_STACK_NAME}_gzip.compress=true @@ -197,16 +628,16 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_static_webserver.loadbalancer.healthcheck.interval=2000ms - traefik.http.services.${SWARM_STACK_NAME}_static_webserver.loadbalancer.healthcheck.timeout=1000ms - traefik.http.middlewares.${SWARM_STACK_NAME}_static_webserver_retry.retry.attempts=2 - - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.rule=hostregexp(`{host:.+}`) && (Path(`/osparc`,`/s4l`,`/s4llite`,`/tis`,`/transpiled`,`/resource`) || PathPrefix(`/osparc/`,`/s4l/`,`/s4llite/`,`/tis/`,`/transpiled/`,`/resource/`)) + - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.rule=(Path(`/osparc`) || Path(`/s4l`) || Path(`/s4llite`) || Path(`/s4lacad`) || Path(`/s4lengine`) || Path(`/s4ldesktop`) || Path(`/s4ldesktopacad`) || Path(`/tis`) || Path(`/tiplite`) || Path(`/transpiled`) || Path(`/resource`) || PathPrefix(`/osparc/`) || PathPrefix(`/s4l/`) || PathPrefix(`/s4llite/`) || PathPrefix(`/s4lacad/`) || PathPrefix(`/s4lengine/`) || PathPrefix(`/s4ldesktop/`) || PathPrefix(`/s4ldesktopacad/`) || PathPrefix(`/tis/`) || PathPrefix(`/tiplite/`) || PathPrefix(`/transpiled/`) || PathPrefix(`/resource/`)) - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.service=${SWARM_STACK_NAME}_static_webserver - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.entrypoints=http - - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.priority=2 - - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.middlewares=${SWARM_STACK_NAME}_gzip@docker,${SWARM_STACK_NAME}_static_webserver_retry + - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.priority=6 + - traefik.http.routers.${SWARM_STACK_NAME}_static_webserver.middlewares=${SWARM_STACK_NAME}_gzip@swarm,${SWARM_STACK_NAME}_static_webserver_retry # catchall for legacy services (this happens if a backend disappears and a frontend tries to reconnect, the right return value is a 503) - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.service=${SWARM_STACK_NAME}_legacy_services_catchall - - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.priority=1 + - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.priority=3 - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.entrypoints=http - - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.rule=hostregexp(`{host:.+}`) && (Path(`/x/{node_uuid:\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b}`) || PathPrefix(`/x/{node_uuid:\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b}/`)) + - traefik.http.routers.${SWARM_STACK_NAME}_legacy_services_catchall.rule=PathRegexp(`^/x/(?P\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b)[\/]?`) # this tricks traefik into a 502 (bad gateway) since the service does not exist on this port - traefik.http.services.${SWARM_STACK_NAME}_legacy_services_catchall.loadbalancer.server.port=0 # this tricks traefik into returning a 503 (service unavailable) since the healthcheck will always return false @@ -214,13 +645,13 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_legacy_services_catchall.loadbalancer.healthcheck.interval=500s - traefik.http.services.${SWARM_STACK_NAME}_legacy_services_catchall.loadbalancer.healthcheck.timeout=1ms # see [#2718](https://github.com/ITISFoundation/osparc-simcore/issues/2718) - # catchall for dy-sidecar powered-services (this happens if a backend disappears and a frontend tries to reconnect, the right return value is a 503) + # catchall for dynamic-sidecar powered-services (this happens if a backend disappears and a frontend tries to reconnect, the right return value is a 503) - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.service=${SWARM_STACK_NAME}_modern_services_catchall # the priority is a bit higher than webserver, the webserver is the fallback to everything and has prio 2 - - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.priority=3 + - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.priority=9 - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.entrypoints=http # in theory the pattern should be uuid.services.OSPARC_DOMAIN, but anything could go through.. so let's catch everything - - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.rule=hostregexp(`{node_uuid:.+}.services.{host:.+}`) + - traefik.http.routers.${SWARM_STACK_NAME}_modern_services_catchall.rule=HostRegexp(`(?P\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b)\.services\.(?P.+)`) # this tricks traefik into a 502 (bad gateway) since the service does not exist on this port - traefik.http.services.${SWARM_STACK_NAME}_modern_services_catchall.loadbalancer.server.port=0 # this tricks traefik into returning a 503 (service unavailable) since the healthcheck will always return false @@ -233,28 +664,199 @@ services: webserver: image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" - environment: - &webserver-environment - CATALOG_HOST: ${CATALOG_HOST:-catalog} - CATALOG_PORT: ${CATALOG_PORT:-8000} - DIAGNOSTICS_MAX_AVG_LATENCY: 10 - DIAGNOSTICS_MAX_TASK_DELAY: 30 - DIRECTOR_HOST: ${DIRECTOR_HOST:-director} - DIRECTOR_PORT: ${DIRECTOR_PORT:-8080} - DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST:-director-v2} - DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT:-8000} - STORAGE_HOST: ${STORAGE_HOST:-storage} - STORAGE_PORT: ${STORAGE_PORT:-8080} - SWARM_STACK_NAME: ${SWARM_STACK_NAME:-simcore} - WEBSERVER_GARBAGE_COLLECTOR: "null" - WEBSERVER_LOGLEVEL: ${LOG_LEVEL:-WARNING} - env_file: - - ../.env + hostname: "wb-{{.Node.Hostname}}-{{.Task.Slot}}" # the hostname is used in conjonction with other services and must be unique see https://github.com/ITISFoundation/osparc-simcore/pull/5931 + environment: &webserver_environment + AIODEBUG_SLOW_DURATION_SECS: ${AIODEBUG_SLOW_DURATION_SECS} + + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + + WEBSERVER_DEV_FEATURES_ENABLED: ${WEBSERVER_DEV_FEATURES_ENABLED} + + WEBSERVER_LOGLEVEL: ${WEBSERVER_LOGLEVEL} + WEBSERVER_PROFILING: ${WEBSERVER_PROFILING} + + WEBSERVER_LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + WEBSERVER_LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + + # WEBSERVER_SERVER_HOST + + WEBSERVER_HOST: ${WEBSERVER_HOST} + WEBSERVER_PORT: ${WEBSERVER_PORT} + + # WEBSERVER_SERVER_PORT + + WEBSERVER_FRONTEND: ${WEBSERVER_FRONTEND} + + # WEBSERVER_ACTIVITY + WEBSERVER_ACTIVITY: ${WEBSERVER_ACTIVITY} + PROMETHEUS_API_VERSION: ${WEBSERVER_PROMETHEUS_API_VERSION} # seems to be not used + PROMETHEUS_URL: ${WEBSERVER_PROMETHEUS_URL} + + WEBSERVER_CATALOG: ${WEBSERVER_CATALOG} + CATALOG_HOST: ${CATALOG_HOST} + CATALOG_PORT: ${CATALOG_PORT} + + # WEBSERVER_CREDIT_COMPUTATION + WEBSERVER_CREDIT_COMPUTATION_ENABLED: ${WEBSERVER_CREDIT_COMPUTATION_ENABLED} + + # WEBSERVER_DB + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + # WEBSERVER_DIAGNOSTICS + WEBSERVER_DIAGNOSTICS: ${WEBSERVER_DIAGNOSTICS} + DIAGNOSTICS_HEALTHCHECK_ENABLED: ${DIAGNOSTICS_HEALTHCHECK_ENABLED} + DIAGNOSTICS_MAX_AVG_LATENCY: ${DIAGNOSTICS_MAX_AVG_LATENCY} + DIAGNOSTICS_MAX_TASK_DELAY: ${DIAGNOSTICS_MAX_TASK_DELAY} + DIAGNOSTICS_SLOW_DURATION_SECS: ${DIAGNOSTICS_SLOW_DURATION_SECS} + + # WEBSERVER_DIRECTOR_V2 + DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST} + DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT} + + # WEBSERVER_EMAIL + WEBSERVER_EMAIL: ${WEBSERVER_EMAIL} + SMTP_HOST: ${SMTP_HOST} + SMTP_PORT: ${SMTP_PORT} + SMTP_USERNAME: ${SMTP_USERNAME} + SMTP_PASSWORD: ${SMTP_PASSWORD} + SMTP_PROTOCOL: ${SMTP_PROTOCOL} + + WEBSERVER_EXPORTER: ${WEBSERVER_EXPORTER} + + # WEBSERVER_GARBAGE_COLLECTOR + WEBSERVER_GARBAGE_COLLECTOR: ${WEBSERVER_GARBAGE_COLLECTOR} + + # WEBSERVER_INVITATIONS + INVITATIONS_HOST: ${INVITATIONS_HOST} + INVITATIONS_LOGLEVEL: ${INVITATIONS_LOGLEVEL} + INVITATIONS_OSPARC_URL: ${INVITATIONS_OSPARC_URL} + INVITATIONS_PASSWORD: ${INVITATIONS_PASSWORD} + INVITATIONS_PORT: ${INVITATIONS_PORT} + INVITATIONS_SECRET_KEY: ${INVITATIONS_SECRET_KEY} + INVITATIONS_USERNAME: ${INVITATIONS_USERNAME} + + WEBSERVER_LICENSES: ${WEBSERVER_LICENSES} + LICENSES_ITIS_VIP_SYNCER_ENABLED : ${LICENSES_ITIS_VIP_SYNCER_ENABLED} + LICENSES_ITIS_VIP_SYNCER_PERIODICITY: ${LICENSES_ITIS_VIP_SYNCER_PERIODICITY} + LICENSES_ITIS_VIP_API_URL: ${LICENSES_ITIS_VIP_API_URL} + LICENSES_ITIS_VIP_CATEGORIES: ${LICENSES_ITIS_VIP_CATEGORIES} + LICENSES_SPEAG_PHANTOMS_API_URL: ${LICENSES_SPEAG_PHANTOMS_API_URL} + LICENSES_SPEAG_PHANTOMS_CATEGORIES: ${LICENSES_SPEAG_PHANTOMS_CATEGORIES} + + + WEBSERVER_LOGIN: ${WEBSERVER_LOGIN} + LOGIN_ACCOUNT_DELETION_RETENTION_DAYS: ${LOGIN_ACCOUNT_DELETION_RETENTION_DAYS} + LOGIN_REGISTRATION_CONFIRMATION_REQUIRED: ${LOGIN_REGISTRATION_CONFIRMATION_REQUIRED} + LOGIN_REGISTRATION_INVITATION_REQUIRED: ${LOGIN_REGISTRATION_INVITATION_REQUIRED} + LOGIN_2FA_REQUIRED: ${LOGIN_2FA_REQUIRED} + LOGIN_2FA_CODE_EXPIRATION_SEC: ${LOGIN_2FA_CODE_EXPIRATION_SEC} + TWILIO_ACCOUNT_SID: ${TWILIO_ACCOUNT_SID} + TWILIO_AUTH_TOKEN: ${TWILIO_AUTH_TOKEN} + TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT: ${TWILIO_COUNTRY_CODES_W_ALPHANUMERIC_SID_SUPPORT} + + WEBSERVER_PAYMENTS: ${WEBSERVER_PAYMENTS} + PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT: ${PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT} + PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT: ${PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT} + PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS: ${PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS} + PAYMENTS_FAKE_COMPLETION_DELAY_SEC: ${PAYMENTS_FAKE_COMPLETION_DELAY_SEC} + PAYMENTS_FAKE_COMPLETION: ${PAYMENTS_FAKE_COMPLETION} + PAYMENTS_FAKE_GATEWAY_URL: ${PAYMENTS_GATEWAY_URL} + PAYMENTS_HOST: ${PAYMENTS_HOST} + PAYMENTS_PASSWORD: ${PAYMENTS_PASSWORD} + PAYMENTS_PORT: ${PAYMENTS_PORT} + PAYMENTS_USERNAME: ${PAYMENTS_USERNAME} + + # WEBSERVER_REDIS + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + + # WEBSERVER_REST + REST_SWAGGER_API_DOC_ENABLED: ${REST_SWAGGER_API_DOC_ENABLED} + + # WEBSERVER_RESOURCE_MANAGER + RESOURCE_MANAGER_RESOURCE_TTL_S: ${RESOURCE_MANAGER_RESOURCE_TTL_S} + + # WEBSERVER_RESOURCE_USAGE_TRACKER + RESOURCE_USAGE_TRACKER_HOST: ${RESOURCE_USAGE_TRACKER_HOST} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_EXTERNAL_PORT} + + # WEBSERVER_SCICRUNCH + WEBSERVER_SCICRUNCH: ${WEBSERVER_SCICRUNCH} + SCICRUNCH_API_BASE_URL: ${SCICRUNCH_API_BASE_URL} + SCICRUNCH_API_KEY: ${SCICRUNCH_API_KEY} + + # WEBSERVER_SESSION + SESSION_SECRET_KEY: ${WEBSERVER_SESSION_SECRET_KEY} + SESSION_COOKIE_MAX_AGE: ${SESSION_COOKIE_MAX_AGE} + SESSION_COOKIE_SAMESITE: ${SESSION_COOKIE_SAMESITE} + SESSION_COOKIE_SECURE: ${SESSION_COOKIE_SECURE} + SESSION_COOKIE_HTTPONLY: ${SESSION_COOKIE_HTTPONLY} + + WEBSERVER_STATICWEB: ${WEBSERVER_STATICWEB} + + SIMCORE_VCS_RELEASE_TAG: ${SIMCORE_VCS_RELEASE_TAG} + + # WEBSERVER_STORAGE + STORAGE_ENDPOINT: ${STORAGE_ENDPOINT} + STORAGE_HOST: ${STORAGE_HOST} + STORAGE_PORT: ${STORAGE_PORT} + + # WEBSERVER_STUDIES_DISPATCHER + WEBSERVER_STUDIES_DISPATCHER: ${WEBSERVER_STUDIES_DISPATCHER} + STUDIES_ACCESS_ANONYMOUS_ALLOWED: ${STUDIES_ACCESS_ANONYMOUS_ALLOWED} + STUDIES_DEFAULT_SERVICE_THUMBNAIL: ${STUDIES_DEFAULT_SERVICE_THUMBNAIL} + + WEBSERVER_TRACING: ${WEBSERVER_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE: ${TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE: ${TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE} + + # WEBSERVER_PROJECTS + WEBSERVER_PROJECTS: ${WEBSERVER_PROJECTS} + PROJECTS_INACTIVITY_INTERVAL: ${PROJECTS_INACTIVITY_INTERVAL} + PROJECTS_MAX_COPY_SIZE_BYTES: ${PROJECTS_MAX_COPY_SIZE_BYTES} + PROJECTS_MAX_NUM_RUNNING_DYNAMIC_NODES: ${PROJECTS_MAX_NUM_RUNNING_DYNAMIC_NODES} + + + # WEBSERVER_RABBITMQ + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + # WEBSERVER_TRASH + TRASH_RETENTION_DAYS: ${TRASH_RETENTION_DAYS} + + + # ARBITRARY ENV VARS + + # see [https://docs.gunicorn.org/en/stable/settings.html#timeout], + # since we have the docker healthcheck already, this should be ok + GUNICORN_CMD_ARGS: ${WEBSERVER_GUNICORN_CMD_ARGS} + WEBSERVER_DB_LISTENER: ${WEBSERVER_DB_LISTENER} + WEBSERVER_ANNOUNCEMENTS: ${WEBSERVER_ANNOUNCEMENTS} + WEBSERVER_NOTIFICATIONS: ${WEBSERVER_NOTIFICATIONS} + WEBSERVER_CLUSTERS: ${WEBSERVER_CLUSTERS} + WEBSERVER_FUNCTIONS: 0 + WEBSERVER_GROUPS: ${WEBSERVER_GROUPS} + WEBSERVER_PRODUCTS: ${WEBSERVER_PRODUCTS} + WEBSERVER_PUBLICATIONS: ${WEBSERVER_PUBLICATIONS} + WEBSERVER_SOCKETIO: ${WEBSERVER_SOCKETIO} + WEBSERVER_TAGS: ${WEBSERVER_TAGS} + WEBSERVER_USERS: ${WEBSERVER_USERS} + WEBSERVER_FOLDERS: ${WEBSERVER_FOLDERS} + deploy: - placement: - constraints: - - node.platform.os == linux labels: - io.simcore.zone=${TRAEFIK_SIMCORE_ZONE} # gzip compression @@ -266,34 +868,215 @@ services: - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.healthcheck.path=/v0/ - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.healthcheck.interval=2000ms - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.healthcheck.timeout=1000ms + # NOTE: stickyness must remain until the long running tasks in the webserver are removed + # and also https://github.com/ITISFoundation/osparc-simcore/pull/4180 is resolved. - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.sticky.cookie=true - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.sticky.cookie.samesite=lax - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.sticky.cookie.httponly=true - traefik.http.services.${SWARM_STACK_NAME}_webserver.loadbalancer.sticky.cookie.secure=true - traefik.http.middlewares.${SWARM_STACK_NAME}_webserver_retry.retry.attempts=2 - traefik.http.routers.${SWARM_STACK_NAME}_webserver.service=${SWARM_STACK_NAME}_webserver - - traefik.http.routers.${SWARM_STACK_NAME}_webserver.rule=hostregexp(`{host:.+}`) && (Path(`/`, `/v0`,`/socket.io/`,`/static-frontend-data.json`, `/study/{study_uuid:\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b}`, `/view`, `/#/view`, `/#/error`) || PathPrefix(`/v0/`)) + - traefik.http.routers.${SWARM_STACK_NAME}_webserver.rule=(Path(`/`) || Path(`/v0`) || Path(`/socket.io/`) || Path(`/static-frontend-data.json`) || PathRegexp(`^/study/(?P\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b)`) || Path(`/view`) || Path(`/#/view`) || Path(`/#/error`) || PathPrefix(`/v0/`)) - traefik.http.routers.${SWARM_STACK_NAME}_webserver.entrypoints=http - - traefik.http.routers.${SWARM_STACK_NAME}_webserver.priority=2 - - traefik.http.routers.${SWARM_STACK_NAME}_webserver.middlewares=${SWARM_STACK_NAME}_gzip@docker, ${SWARM_STACK_NAME_NO_HYPHEN}_sslheader@docker, ${SWARM_STACK_NAME}_webserver_retry - networks: + - traefik.http.routers.${SWARM_STACK_NAME}_webserver.priority=6 + - traefik.http.routers.${SWARM_STACK_NAME}_webserver.middlewares=${SWARM_STACK_NAME}_gzip@swarm, ${SWARM_STACK_NAME_NO_HYPHEN}_sslheader@swarm, ${SWARM_STACK_NAME}_webserver_retry + networks: &webserver_networks - default - interactive_services_subnet - wb-garbage-collector: + wb-api-server: image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "api-{{.Node.Hostname}}-{{.Task.Slot}}" # the hostname is used in conjonction with other services and must be unique see https://github.com/ITISFoundation/osparc-simcore/pull/5931 environment: - <<: *webserver-environment - WEBSERVER_GARBAGE_COLLECTOR: '{"GARBAGE_COLLECTOR_INTERVAL_S": 30}' - env_file: - - ../.env - - ../.env-wb-garbage-collector + <<: *webserver_environment + WEBSERVER_HOST: ${WB_API_WEBSERVER_HOST} + WEBSERVER_PORT: ${WB_API_WEBSERVER_PORT} + WEBSERVER_STATICWEB: "null" + WEBSERVER_FUNCTIONS: ${WEBSERVER_FUNCTIONS} + + + networks: *webserver_networks + + wb-db-event-listener: + image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "db-{{.Node.Hostname}}-{{.Task.Slot}}" # the hostname is used in conjonction with other services and must be unique see https://github.com/ITISFoundation/osparc-simcore/pull/5931 + environment: + WEBSERVER_LOGLEVEL: ${WB_DB_EL_LOGLEVEL} + + WEBSERVER_HOST: ${WEBSERVER_HOST} + WEBSERVER_PORT: ${WEBSERVER_PORT} + + # WEBSERVER_DB + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST} + DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT} + + REST_SWAGGER_API_DOC_ENABLED: ${WB_DB_EL_REST_SWAGGER_API_DOC_ENABLED} + + # WEBSERVER_RESOURCE_USAGE_TRACKER + RESOURCE_USAGE_TRACKER_HOST: ${RESOURCE_USAGE_TRACKER_HOST} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_EXTERNAL_PORT} + + GUNICORN_CMD_ARGS: ${WEBSERVER_GUNICORN_CMD_ARGS} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + SESSION_SECRET_KEY: ${WEBSERVER_SESSION_SECRET_KEY} + WEBSERVER_ACTIVITY: ${WB_DB_EL_ACTIVITY} + WEBSERVER_ANNOUNCEMENTS: ${WB_DB_EL_ANNOUNCEMENTS} + WEBSERVER_CATALOG: ${WB_DB_EL_CATALOG} + WEBSERVER_CLUSTERS: ${WB_DB_EL_CLUSTERS} + WEBSERVER_DB_LISTENER: ${WB_DB_EL_DB_LISTENER} + WEBSERVER_DIAGNOSTICS: ${WB_DB_EL_DIAGNOSTICS} + WEBSERVER_EMAIL: ${WB_DB_EL_EMAIL} + WEBSERVER_EXPORTER: ${WB_DB_EL_EXPORTER} + WEBSERVER_FOLDERS: ${WB_DB_EL_FOLDERS} + WEBSERVER_FRONTEND: ${WB_DB_EL_FRONTEND} + WEBSERVER_FUNCTIONS: 0 + WEBSERVER_GARBAGE_COLLECTOR: ${WB_DB_EL_GARBAGE_COLLECTOR} + WEBSERVER_GROUPS: ${WB_DB_EL_GROUPS} + WEBSERVER_INVITATIONS: ${WB_DB_EL_INVITATIONS} + WEBSERVER_LICENSES: null + WEBSERVER_LOGIN: ${WB_DB_EL_LOGIN} + WEBSERVER_PAYMENTS: ${WB_DB_EL_PAYMENTS} + WEBSERVER_NOTIFICATIONS: ${WB_DB_EL_NOTIFICATIONS} + WEBSERVER_PRODUCTS: ${WB_DB_EL_PRODUCTS} + WEBSERVER_PROJECTS: ${WB_DB_EL_PROJECTS} + WEBSERVER_PUBLICATIONS: ${WB_DB_EL_PUBLICATIONS} + WEBSERVER_SCICRUNCH: ${WB_DB_EL_SCICRUNCH} + WEBSERVER_SOCKETIO: ${WB_DB_EL_SOCKETIO} + WEBSERVER_STATICWEB: ${WB_DB_EL_STATICWEB} + WEBSERVER_STORAGE: ${WB_DB_EL_STORAGE} + WEBSERVER_STUDIES_DISPATCHER: ${WB_DB_EL_STUDIES_DISPATCHER} + WEBSERVER_TAGS: ${WB_DB_EL_TAGS} + WEBSERVER_TRACING: ${WB_DB_EL_TRACING} + WEBSERVER_USERS: ${WB_DB_EL_USERS} + WEBSERVER_WALLETS: ${WB_DB_EL_WALLETS} + + # WEBSERVER_RABBITMQ + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + # WEBSERVER_REDIS + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + + RESOURCE_MANAGER_RESOURCE_TTL_S: ${RESOURCE_MANAGER_RESOURCE_TTL_S} + deploy: - placement: - constraints: - - node.platform.os == linux + # NOTE: https://github.com/ITISFoundation/osparc-simcore/pull/4286 + # NOTE: this MUSTN'T change, or weird things might happen + # this will stay until all legacy dynamic services are gone. + replicas: 1 + networks: + - default + + wb-garbage-collector: + image: ${DOCKER_REGISTRY:-itisfoundation}/webserver:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "gc-{{.Node.Hostname}}-{{.Task.Slot}}" # the hostname is used in conjonction with other services and must be unique see https://github.com/ITISFoundation/osparc-simcore/pull/5931 + environment: + + # WEBSERVER_DIRECTOR_V2 + DIRECTOR_V2_HOST: ${DIRECTOR_V2_HOST} + DIRECTOR_V2_PORT: ${DIRECTOR_V2_PORT} + + GUNICORN_CMD_ARGS: ${WEBSERVER_GUNICORN_CMD_ARGS} + + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + + # WEBSERVER_DB + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + # WEBSERVER_RABBITMQ + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + # WEBSERVER_REDIS + REDIS_HOST: ${REDIS_HOST} + REDIS_PASSWORD: ${REDIS_PASSWORD} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + + # WEBSERVER_RESOURCE_MANAGER + RESOURCE_MANAGER_RESOURCE_TTL_S: ${WB_GC_RESOURCE_MANAGER_RESOURCE_TTL_S} + + # WEBSERVER_RESOURCE_USAGE_TRACKER + RESOURCE_USAGE_TRACKER_HOST: ${RESOURCE_USAGE_TRACKER_HOST} + RESOURCE_USAGE_TRACKER_PORT: ${RESOURCE_USAGE_TRACKER_EXTERNAL_PORT} + + REST_SWAGGER_API_DOC_ENABLED: ${WB_GC_REST_SWAGGER_API_DOC_ENABLED} + + # WEBSERVER_SESSION + SESSION_SECRET_KEY: ${WEBSERVER_SESSION_SECRET_KEY} + + # WEBSERVER_STORAGE + STORAGE_HOST: ${STORAGE_HOST} + STORAGE_PORT: ${STORAGE_PORT} + + SWARM_STACK_NAME: ${SWARM_STACK_NAME} + + # WEBSERVER_TRASH + TRASH_RETENTION_DAYS: ${TRASH_RETENTION_DAYS} + + WEBSERVER_ACTIVITY: ${WB_GC_ACTIVITY} + WEBSERVER_ANNOUNCEMENTS: ${WB_GC_ANNOUNCEMENTS} + WEBSERVER_CATALOG: ${WB_GC_CATALOG} + WEBSERVER_CLUSTERS: ${WB_GC_CLUSTERS} + WEBSERVER_DB_LISTENER: ${WB_GC_DB_LISTENER} + WEBSERVER_DIAGNOSTICS: ${WB_GC_DIAGNOSTICS} + WEBSERVER_EMAIL: ${WB_GC_EMAIL} + WEBSERVER_EXPORTER: ${WB_GC_EXPORTER} + WEBSERVER_FOLDERS: ${WB_GC_FOLDERS} + WEBSERVER_FRONTEND: ${WB_GC_FRONTEND} + WEBSERVER_FUNCTIONS: 0 + WEBSERVER_GARBAGE_COLLECTOR: ${WB_GC_GARBAGE_COLLECTOR} + WEBSERVER_GROUPS: ${WB_GC_GROUPS} + WEBSERVER_HOST: ${WEBSERVER_HOST} + WEBSERVER_INVITATIONS: ${WB_GC_INVITATIONS} + WEBSERVER_LICENSES: null + WEBSERVER_LOGIN: ${WB_GC_LOGIN} + WEBSERVER_LOGLEVEL: ${WB_GC_LOGLEVEL} + WEBSERVER_NOTIFICATIONS: ${WB_GC_NOTIFICATIONS} + WEBSERVER_PAYMENTS: ${WB_GC_PAYMENTS} + WEBSERVER_PORT: ${WEBSERVER_PORT} + WEBSERVER_PRODUCTS: ${WB_GC_PRODUCTS} + WEBSERVER_PROJECTS: ${WB_GC_PROJECTS} + WEBSERVER_PUBLICATIONS: ${WB_GC_PUBLICATIONS} + WEBSERVER_SCICRUNCH: ${WB_GC_SCICRUNCH} + WEBSERVER_SOCKETIO: ${WB_GC_SOCKETIO} + WEBSERVER_STATICWEB: ${WB_GC_STATICWEB} + WEBSERVER_STUDIES_DISPATCHER: ${WB_GC_STUDIES_DISPATCHER} + WEBSERVER_TAGS: ${WB_GC_TAGS} + WEBSERVER_TRACING: ${WB_GC_TRACING} + WEBSERVER_USERS: ${WB_GC_USERS} + WEBSERVER_WALLETS: ${WB_GC_WALLETS} + + networks: - default - interactive_services_subnet @@ -301,7 +1084,7 @@ services: agent: image: ${DOCKER_REGISTRY:-itisfoundation}/agent:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" deploy: mode: global resources: @@ -309,24 +1092,62 @@ services: cpus: "1.0" memory: 1024M - networks: - - default volumes: - /var/run/docker.sock:/var/run/docker.sock environment: - LOGLEVEL: ${LOG_LEVEL:-INFO} - AGENT_VOLUMES_CLEANUP_S3_SECURE: ${AGENT_VOLUMES_CLEANUP_S3_SECURE} + AGENT_LOGLEVEL: ${AGENT_LOGLEVEL} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} AGENT_VOLUMES_CLEANUP_S3_ENDPOINT: ${AGENT_VOLUMES_CLEANUP_S3_ENDPOINT} + AGENT_VOLUMES_CLEANUP_S3_REGION: ${AGENT_VOLUMES_CLEANUP_S3_REGION} AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY: ${AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY} AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY: ${AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY} AGENT_VOLUMES_CLEANUP_S3_BUCKET: ${AGENT_VOLUMES_CLEANUP_S3_BUCKET} AGENT_VOLUMES_CLEANUP_S3_PROVIDER: ${AGENT_VOLUMES_CLEANUP_S3_PROVIDER} - AGENT_VOLUMES_CLEANUP_S3_REGION: ${AGENT_VOLUMES_CLEANUP_S3_REGION:-us-east-1} + AGENT_DOCKER_NODE_ID: "{{.Node.ID}}" + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_USER: ${RABBIT_USER} + RABBIT_SECURE: ${RABBIT_SECURE} + + AGENT_TRACING: ${AGENT_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + + notifications: + image: ${DOCKER_REGISTRY:-itisfoundation}/notifications:${DOCKER_IMAGE_TAG:-latest} + init: true + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" + + environment: + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + + NOTIFICATIONS_LOGLEVEL: ${NOTIFICATIONS_LOGLEVEL} + NOTIFICATIONS_TRACING: ${NOTIFICATIONS_TRACING} + + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + dask-sidecar: image: ${DOCKER_REGISTRY:-itisfoundation}/dask-sidecar:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" deploy: mode: global endpoint_mode: dnsrr @@ -337,75 +1158,124 @@ services: volumes: - computational_shared_data:${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} - /var/run/docker.sock:/var/run/docker.sock - environment: - &sidecar-environment + environment: &sidecar-environment + DASK_TLS_CA_FILE: ${DASK_TLS_CA_FILE} + DASK_TLS_KEY: ${DASK_TLS_KEY} + DASK_TLS_CERT: ${DASK_TLS_CERT} DASK_SCHEDULER_HOST: ${DASK_SCHEDULER_HOST:-dask-scheduler} - SIDECAR_LOGLEVEL: ${LOG_LEVEL:-WARNING} + DASK_LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + DASK_LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + DASK_SIDECAR_LOGLEVEL: ${DASK_SIDECAR_LOGLEVEL} SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME: ${SWARM_STACK_NAME}_computational_shared_data SIDECAR_COMP_SERVICES_SHARED_FOLDER: ${SIDECAR_COMP_SERVICES_SHARED_FOLDER:-/home/scu/computational_shared_data} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} networks: - computational_services_subnet + secrets: *dask_tls_secrets dask-scheduler: image: ${DOCKER_REGISTRY:-itisfoundation}/dask-sidecar:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: <<: *sidecar-environment DASK_START_AS_SCHEDULER: 1 networks: - computational_services_subnet + secrets: *dask_tls_secrets datcore-adapter: image: ${DOCKER_REGISTRY:-itisfoundation}/datcore-adapter:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" networks: - storage_subnet environment: - - TRACING_THRIFT_COMPACT_ENDPOINT=${TRACING_THRIFT_COMPACT_ENDPOINT} + DATCORE_ADAPTER_LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + DATCORE_ADAPTER_LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + DATCORE_ADAPTER_TRACING: ${DATCORE_ADAPTER_TRACING} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} storage: image: ${DOCKER_REGISTRY:-itisfoundation}/storage:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" - environment: - - BF_API_KEY=${BF_API_KEY} - - BF_API_SECRET=${BF_API_SECRET} - - DATCORE_ADAPTER_HOST=${DATCORE_ADAPTER_HOST:-datcore-adapter} - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_ENDPOINT=${POSTGRES_ENDPOINT} - - POSTGRES_HOST=${POSTGRES_HOST} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_PORT=${POSTGRES_PORT} - - POSTGRES_USER=${POSTGRES_USER} - - S3_ACCESS_KEY=${S3_ACCESS_KEY} - - S3_BUCKET_NAME=${S3_BUCKET_NAME} - - S3_ENDPOINT=${S3_ENDPOINT} - - S3_SECRET_KEY=${S3_SECRET_KEY} - - S3_SECURE=${S3_SECURE} - - STORAGE_LOGLEVEL=${LOG_LEVEL:-WARNING} - - STORAGE_MONITORING_ENABLED=1 - - TRACING_ZIPKIN_ENDPOINT=${TRACING_ZIPKIN_ENDPOINT:-http://jaeger:9411} - deploy: - placement: - constraints: - - node.platform.os == linux - networks: + hostname: "sto-{{.Node.Hostname}}-{{.Task.Slot}}" + environment: &storage_environment + DATCORE_ADAPTER_HOST: ${DATCORE_ADAPTER_HOST:-datcore-adapter} + LOG_FORMAT_LOCAL_DEV_ENABLED: ${LOG_FORMAT_LOCAL_DEV_ENABLED} + LOG_FILTER_MAPPING : ${LOG_FILTER_MAPPING} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} + RABBIT_HOST: ${RABBIT_HOST} + RABBIT_PASSWORD: ${RABBIT_PASSWORD} + RABBIT_PORT: ${RABBIT_PORT} + RABBIT_SECURE: ${RABBIT_SECURE} + RABBIT_USER: ${RABBIT_USER} + REDIS_HOST: ${REDIS_HOST} + REDIS_PORT: ${REDIS_PORT} + REDIS_SECURE: ${REDIS_SECURE} + REDIS_USER: ${REDIS_USER} + REDIS_PASSWORD: ${REDIS_PASSWORD} + S3_ACCESS_KEY: ${S3_ACCESS_KEY} + S3_BUCKET_NAME: ${S3_BUCKET_NAME} + S3_ENDPOINT: ${S3_ENDPOINT} + S3_REGION: ${S3_REGION} + S3_SECRET_KEY: ${S3_SECRET_KEY} + STORAGE_WORKER_MODE: "false" + STORAGE_LOGLEVEL: ${STORAGE_LOGLEVEL} + STORAGE_MONITORING_ENABLED: 1 + STORAGE_PROFILING: ${STORAGE_PROFILING} + STORAGE_PORT: ${STORAGE_PORT} + TRACING_OPENTELEMETRY_COLLECTOR_PORT: ${TRACING_OPENTELEMETRY_COLLECTOR_PORT} + TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT: ${TRACING_OPENTELEMETRY_COLLECTOR_ENDPOINT} + networks: &storage_networks - default - interactive_services_subnet - storage_subnet + sto-worker: + image: ${DOCKER_REGISTRY:-itisfoundation}/storage:${DOCKER_IMAGE_TAG:-master-github-latest} + init: true + hostname: "sto-worker-{{.Node.Hostname}}-{{.Task.Slot}}" + environment: + <<: *storage_environment + STORAGE_WORKER_MODE: "true" + CELERY_CONCURRENCY: 100 + networks: *storage_networks + + sto-worker-cpu-bound: + image: ${DOCKER_REGISTRY:-itisfoundation}/storage:${DOCKER_IMAGE_TAG:-master-github-latest} + init: true + hostname: "sto-worker-cpu-bound-{{.Node.Hostname}}-{{.Task.Slot}}" + environment: + <<: *storage_environment + STORAGE_WORKER_MODE: "true" + CELERY_CONCURRENCY: 1 + CELERY_QUEUES: "cpu_bound" + networks: *storage_networks + rabbit: - image: itisfoundation/rabbitmq:3.11.2-management + image: itisfoundation/rabbitmq:3.13.7-management init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - RABBITMQ_DEFAULT_USER=${RABBIT_USER} - - RABBITMQ_DEFAULT_PASS=${RABBIT_PASSWORD} + RABBITMQ_DEFAULT_USER: ${RABBIT_USER} + RABBITMQ_DEFAULT_PASS: ${RABBIT_PASSWORD} + volumes: + - rabbit_data:/var/lib/rabbitmq networks: - default + - computational_services_subnet - interactive_services_subnet - autoscaling_subnet healthcheck: @@ -419,25 +1289,25 @@ services: migration: image: ${DOCKER_REGISTRY:-itisfoundation}/migration:${DOCKER_IMAGE_TAG:-latest} init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_ENDPOINT=${POSTGRES_ENDPOINT} - - POSTGRES_HOST=${POSTGRES_HOST} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_PORT=${POSTGRES_PORT} - - POSTGRES_USER=${POSTGRES_USER} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_ENDPOINT: ${POSTGRES_ENDPOINT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_USER: ${POSTGRES_USER} networks: - default # actually needed for the postgres service only postgres: - image: "postgres:14.5-alpine@sha256:db802f226b620fc0b8adbeca7859eb203c8d3c9ce5d84870fadee05dea8f50ce" + image: "postgres:14.8-alpine@sha256:150dd39ccb7ae6c7ba6130c3582c39a30bb5d3d22cb08ad0ba37001e3f829abc" init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" environment: - - POSTGRES_DB=${POSTGRES_DB} - - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - - POSTGRES_USER=${POSTGRES_USER} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_USER: ${POSTGRES_USER} volumes: - postgres_data:/var/lib/postgresql/data - type: tmpfs @@ -448,15 +1318,7 @@ services: - default - interactive_services_subnet healthcheck: - test: - [ - "CMD", - "pg_isready", - "--username", - "${POSTGRES_USER}", - "--dbname", - "${POSTGRES_DB}" - ] + test: [ "CMD", "pg_isready", "--username", "${POSTGRES_USER}", "--dbname", "${POSTGRES_DB}" ] interval: 5s retries: 5 # NOTES: this is not yet compatible with portainer deployment but could work also for other containers @@ -469,25 +1331,12 @@ services: # - net.ipv4.tcp_keepalive_intvl=600 # - net.ipv4.tcp_keepalive_probes=9 # - net.ipv4.tcp_keepalive_time=600 - command: - [ - "postgres", - "-c", - "tcp_keepalives_idle=600", - "-c", - "tcp_keepalives_interval=600", - "-c", - "tcp_keepalives_count=5", - "-c", - "max_connections=413", - "-c", - "shared_buffers=256MB" - ] + command: [ "postgres", "-c", "tcp_keepalives_idle=600", "-c", "tcp_keepalives_interval=600", "-c", "tcp_keepalives_count=5", "-c", "max_connections=413", "-c", "shared_buffers=256MB" ] redis: image: "redis:6.2.6@sha256:4bed291aa5efb9f0d77b76ff7d4ab71eee410962965d052552db1fb80576431d" init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" command: # redis server will write a backup every 60 seconds if at least 1 key was changed # also aof (append only) is also enabled such that we get full durability at the expense @@ -500,9 +1349,11 @@ services: "--loglevel", "verbose", "--databases", - "5", + "10", "--appendonly", - "yes" + "yes", + "--requirepass", + "${REDIS_PASSWORD}" ] networks: - default @@ -510,22 +1361,22 @@ services: volumes: - redis-data:/data healthcheck: - test: [ "CMD", "redis-cli", "ping" ] + test: [ "CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping" ] interval: 5s timeout: 30s retries: 50 traefik: - image: "traefik:v2.9.8@sha256:553239e27c4614d0477651415205b9b119f7a98f698e6562ef383c9d8ff3b6e6" + image: "traefik:v3.4.0@sha256:4cf907247939b5d20bf4eff73abd21cb413c339600dde76dbc94a874b2578a27" init: true - hostname: "{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}" + hostname: "{{.Node.Hostname}}-{{.Task.Slot}}" command: - "--api=true" - "--api.dashboard=true" - "--ping=true" - "--entryPoints.ping.address=:9082" - "--ping.entryPoint=ping" - - "--log.level=WARNING" + - "--log.level=WARN" # WARN, not WARNING - "--accesslog=false" - "--metrics.prometheus=true" - "--metrics.prometheus.addEntryPointsLabels=true" @@ -534,21 +1385,22 @@ services: - "--metrics.prometheus.entryPoint=metrics" - "--entryPoints.http.address=:80" - "--entryPoints.http.forwardedHeaders.insecure" + - "--entryPoints.http.transport.respondingTimeouts.readTimeout=21600s" #6h, for https://github.com/traefik/traefik/issues/10805 - "--entryPoints.simcore_api.address=:10081" - "--entryPoints.simcore_api.address=:10081" - "--entryPoints.simcore_api.forwardedHeaders.insecure" + - "--entryPoints.simcore_api.transport.respondingTimeouts.readTimeout=21600s" #6h, for https://github.com/traefik/traefik/issues/10805 - "--entryPoints.traefik_monitor.address=:8080" - "--entryPoints.traefik_monitor.forwardedHeaders.insecure" - - "--providers.docker.endpoint=unix:///var/run/docker.sock" - - "--providers.docker.network=${SWARM_STACK_NAME}_default" - - "--providers.docker.swarmMode=true" + - "--providers.swarm.endpoint=unix:///var/run/docker.sock" + - "--providers.swarm.network=${SWARM_STACK_NAME}_default" # https://github.com/traefik/traefik/issues/7886 - - "--providers.docker.swarmModeRefreshSeconds=1" - - "--providers.docker.exposedByDefault=false" - - "--providers.docker.constraints=Label(`io.simcore.zone`, `${TRAEFIK_SIMCORE_ZONE}`)" - - "--tracing=true" - - "--tracing.jaeger=true" - - "--tracing.jaeger.samplingServerURL=http://jaeger:5778/sampling" - - "--tracing.jaeger.localAgentHostPort=jaeger:6831" + - "--providers.swarm.refreshSeconds=1" + - "--providers.swarm.exposedByDefault=false" + - "--providers.swarm.constraints=Label(`io.simcore.zone`, `${TRAEFIK_SIMCORE_ZONE}`)" + - "--tracing" + - "--tracing.addinternals" + - "--tracing.otlp=true" + - "--tracing.otlp.http=true" volumes: # So that Traefik can listen to the Docker events - /var/run/docker.sock:/var/run/docker.sock @@ -565,16 +1417,14 @@ services: - traefik.http.middlewares.ratelimit-${SWARM_STACK_NAME}_api-server.ratelimit.burst=10 # X-Forwarded-For header extracts second IP from the right, count starts at one - traefik.http.middlewares.ratelimit-${SWARM_STACK_NAME}_api-server.ratelimit.sourcecriterion.ipstrategy.depth=2 + # middleware for limiting total inflight requests the api-server is handling + - traefik.http.middlewares.ensure-group-header-${SWARM_STACK_NAME}_api-server.headers.customrequestheaders.X-Inflight-Limit-Group=all + - traefik.http.middlewares.limit-reqs-${SWARM_STACK_NAME}_api-server.inflightreq.amount=${TRAEFIK_API_SERVER_INFLIGHTREQ_AMOUNT} + - traefik.http.middlewares.limit-reqs-${SWARM_STACK_NAME}_api-server.inflightreq.sourcecriterion.requestheadername=X-Inflight-Limit-Group + - traefik.http.middlewares.inflightreq-${SWARM_STACK_NAME}_api-server.chain.middlewares=ensure-group-header-${SWARM_STACK_NAME}_api-server,limit-reqs-${SWARM_STACK_NAME}_api-server networks: - default - interactive_services_subnet # for legacy dynamic services - #healthcheck: - # test: wget --quiet --tries=1 --spider http://localhost:9082/ping || exit 1 - # interval: 3s - # timeout: 1s - # retries: 3 - # start_period: 20s - volumes: postgres_data: @@ -583,6 +1433,8 @@ volumes: name: ${SWARM_STACK_NAME}_computational_shared_data redis-data: name: ${SWARM_STACK_NAME}_redis-data + rabbit_data: + name: ${SWARM_STACK_NAME}_rabbit_data networks: default: @@ -601,10 +1453,6 @@ networks: internal: false labels: com.simcore.description: "interactive services network" - ipam: - driver: default - config: - - subnet: "172.8.0.0/16" computational_services_subnet: name: ${SWARM_STACK_NAME}_computational_services_subnet driver: overlay @@ -612,3 +1460,20 @@ networks: internal: false labels: com.simcore.description: "computational services network" + docker-api-network: + name: ${SWARM_STACK_NAME}_docker-api-network + driver: overlay + attachable: true + internal: true + driver_opts: + encrypted: "true" + labels: + com.simcore.description: "used for internal access to the docker swarm api" + +secrets: + dask_tls_key: + file: ./dask-sidecar/.dask-certificates/dask-key.pem + name: ${SWARM_STACK_NAME}_dask_tls_key + dask_tls_cert: + file: ./dask-sidecar/.dask-certificates/dask-cert.pem + name: ${SWARM_STACK_NAME}_dask_tls_cert diff --git a/services/dynamic-scheduler/Dockerfile b/services/dynamic-scheduler/Dockerfile new file mode 100644 index 00000000000..f3bad615462 --- /dev/null +++ b/services/dynamic-scheduler/Dockerfile @@ -0,0 +1,185 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd sercices/dynamic-scheduler +# docker build -f Dockerfile -t dynamic_scheduler:prod --target production ../../ +# docker run dynamic_scheduler:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=pcrespov + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/scu/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/dynamic-scheduler [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/dynamic-scheduler + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/dynamic-scheduler,target=/build/services/dynamic-scheduler,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/dynamic-scheduler [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/scu + +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY --chown=scu:scu services/dynamic-scheduler/docker services/dynamic-scheduler/docker +RUN chmod +x services/dynamic-scheduler/docker/*.sh + + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/dynamic-scheduler/docker/healthcheck.py", "http://localhost:8000/health"] + +ENTRYPOINT [ "/bin/sh", "services/dynamic-scheduler/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/dynamic-scheduler/docker/boot.sh"] + +EXPOSE 8000 + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/dynamic-scheduler + +WORKDIR /devel + +RUN chown -R scu:scu "${VIRTUAL_ENV}" + +EXPOSE 8000 +EXPOSE 3000 + +ENTRYPOINT ["/bin/sh", "services/dynamic-scheduler/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/dynamic-scheduler/docker/boot.sh"] diff --git a/services/dynamic-scheduler/Makefile b/services/dynamic-scheduler/Makefile new file mode 100644 index 00000000000..f46337a7667 --- /dev/null +++ b/services/dynamic-scheduler/Makefile @@ -0,0 +1,18 @@ +# +# DEVELOPMENT recipes for dynamic-scheduler +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile + + +.env-ignore: + $(APP_CLI_NAME) echo-dotenv > $@ + +.PHONY: openapi.json +openapi-specs: openapi.json +openapi.json: .env-ignore ## produces openapi.json + # generating openapi specs file (need to have the environment set for this) + @set -o allexport; \ + source $<; \ + set +o allexport; \ + python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ diff --git a/services/dynamic-scheduler/README.md b/services/dynamic-scheduler/README.md new file mode 100644 index 00000000000..2c20e3f37b8 --- /dev/null +++ b/services/dynamic-scheduler/README.md @@ -0,0 +1,3 @@ +# dynamic-scheduler + +Wil be used as an interface for running and handling the lifecycle of all dynamic services. diff --git a/services/dynamic-scheduler/VERSION b/services/dynamic-scheduler/VERSION new file mode 100644 index 00000000000..9084fa2f716 --- /dev/null +++ b/services/dynamic-scheduler/VERSION @@ -0,0 +1 @@ +1.1.0 diff --git a/services/dynamic-scheduler/docker/boot.sh b/services/dynamic-scheduler/docker/boot.sh new file mode 100755 index 00000000000..dae7ea09e1b --- /dev/null +++ b/services/dynamic-scheduler/docker/boot.sh @@ -0,0 +1,66 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/dynamic-scheduler + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${DYNAMIC_SCHEDULER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/dynamic-scheduler/src/simcore_service_dynamic_scheduler && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${DYNAMIC_SCHEDULER_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_dynamic_scheduler.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" +fi diff --git a/services/dynamic-scheduler/docker/entrypoint.sh b/services/dynamic-scheduler/docker/entrypoint.sh new file mode 100755 index 00000000000..25153a6b2a2 --- /dev/null +++ b/services/dynamic-scheduler/docker/entrypoint.sh @@ -0,0 +1,70 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi +fi + +echo "$INFO Starting $* ..." +echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" +echo " local dir : $(ls -al)" + +exec gosu "$SC_USER_NAME" "$@" diff --git a/services/dynamic-scheduler/docker/healthcheck.py b/services/dynamic-scheduler/docker/healthcheck.py new file mode 100755 index 00000000000..93d880914c1 --- /dev/null +++ b/services/dynamic-scheduler/docker/healthcheck.py @@ -0,0 +1,38 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=20s \ + --start-interval=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception), urlopen(sys.argv[1]) as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/dynamic-scheduler/openapi.json b/services/dynamic-scheduler/openapi.json new file mode 100644 index 00000000000..9f6867c6872 --- /dev/null +++ b/services/dynamic-scheduler/openapi.json @@ -0,0 +1,266 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "simcore-service-dynamic-scheduler web API", + "description": "Service that manages lifecycle of dynamic services", + "version": "1.1.0" + }, + "paths": { + "/health": { + "get": { + "summary": "Healthcheck", + "operationId": "healthcheck_health_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + }, + "/v1/meta": { + "get": { + "tags": [ + "meta" + ], + "summary": "Get Service Metadata", + "operationId": "get_service_metadata_v1_meta_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Meta" + } + } + } + } + } + } + }, + "/v1/ops/running-services": { + "get": { + "tags": [ + "ops" + ], + "summary": "Running Services", + "description": "returns all running dynamic services. Used by ops internall to determine\nwhen it is safe to shutdown the platform", + "operationId": "running_services_v1_ops_running_services_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/RunningDynamicServiceDetails" + }, + "type": "array", + "title": "Response Running Services V1 Ops Running Services Get" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "Meta": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + }, + "released": { + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Released", + "description": "Maps every route's path tag with a released version" + }, + "docs_url": { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri", + "title": "Docs Url" + } + }, + "type": "object", + "required": [ + "name", + "version", + "docs_url" + ], + "title": "Meta" + }, + "RunningDynamicServiceDetails": { + "properties": { + "service_key": { + "type": "string", + "pattern": "^simcore/services/dynamic/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key", + "description": "distinctive name for the node based on the docker registry path" + }, + "service_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version", + "description": "semantic version number of the node" + }, + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + }, + "project_id": { + "type": "string", + "format": "uuid", + "title": "Project Id" + }, + "service_uuid": { + "type": "string", + "format": "uuid", + "title": "Service Uuid" + }, + "service_basepath": { + "anyOf": [ + { + "type": "string", + "format": "path" + }, + { + "type": "null" + } + ], + "title": "Service Basepath", + "description": "predefined path where the dynamic service should be served. If empty, the service shall use the root endpoint." + }, + "boot_type": { + "$ref": "#/components/schemas/ServiceBootType", + "description": "Describes how the dynamic services was started (legacy=V0, modern=V2).Since legacy services do not have this label it defaults to V0.", + "default": "V0" + }, + "service_host": { + "type": "string", + "title": "Service Host", + "description": "the service swarm internal host name" + }, + "service_port": { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "title": "Service Port", + "description": "the service swarm internal port", + "maximum": 65535, + "minimum": 0 + }, + "published_port": { + "anyOf": [ + { + "type": "integer", + "exclusiveMaximum": true, + "exclusiveMinimum": true, + "maximum": 65535, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Published Port", + "description": "the service swarm published port if any", + "deprecated": true + }, + "entry_point": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Entry Point", + "description": "if empty the service entrypoint is on the root endpoint.", + "deprecated": true + }, + "service_state": { + "$ref": "#/components/schemas/ServiceState", + "description": "service current state" + }, + "service_message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Service Message", + "description": "additional information related to service state" + } + }, + "type": "object", + "required": [ + "service_key", + "service_version", + "user_id", + "project_id", + "service_uuid", + "service_host", + "service_port", + "service_state" + ], + "title": "RunningDynamicServiceDetails" + }, + "ServiceBootType": { + "type": "string", + "enum": [ + "V0", + "V2" + ], + "title": "ServiceBootType" + }, + "ServiceState": { + "type": "string", + "enum": [ + "failed", + "pending", + "pulling", + "starting", + "running", + "stopping", + "complete", + "idle" + ], + "title": "ServiceState" + } + } + } +} diff --git a/services/dynamic-scheduler/requirements/Makefile b/services/dynamic-scheduler/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/dynamic-scheduler/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/dynamic-scheduler/requirements/_base.in b/services/dynamic-scheduler/requirements/_base.in new file mode 100644 index 00000000000..93a56922f0b --- /dev/null +++ b/services/dynamic-scheduler/requirements/_base.in @@ -0,0 +1,22 @@ +# +# Specifies third-party dependencies for 'services/dynamic-scheduler/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + +nicegui +packaging +python-socketio +typer[all] +u-msgpack-python diff --git a/services/dynamic-scheduler/requirements/_base.txt b/services/dynamic-scheduler/requirements/_base.txt new file mode 100644 index 00000000000..9dc7a93fb0e --- /dev/null +++ b/services/dynamic-scheduler/requirements/_base.txt @@ -0,0 +1,647 @@ +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # nicegui +aiohappyeyeballs==2.5.0 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker + # nicegui + # python-socketio +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.2 + # via aiohttp +alembic==1.15.1 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.1.0 + # via + # aiohttp + # jsonschema + # referencing +bidict==0.23.1 + # via python-socketio +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # nicegui + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via + # rich-toolkit + # typer + # uvicorn +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +docutils==0.21.2 + # via nicegui +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager + # nicegui +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.69.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 + # via sqlalchemy +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn + # wsproto +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi + # nicegui +hyperframe==6.1.0 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +ifaddr==0.2.0 + # via nicegui +importlib-metadata==8.5.0 + # via opentelemetry-api +itsdangerous==2.2.0 + # via nicegui +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi + # nicegui +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markdown2==2.5.3 + # via nicegui +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp + # yarl +nicegui==2.12.1 + # via -r requirements/_base.in +opentelemetry-api==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.51b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.51b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # nicegui +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +pscript==0.7.7 + # via vbuild +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via + # nicegui + # rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-engineio==4.11.2 + # via python-socketio +python-multipart==0.0.20 + # via + # fastapi + # nicegui +python-socketio==5.12.1 + # via + # -r requirements/_base.in + # nicegui +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via + # nicegui + # opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.23.1 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +simple-websocket==1.1.0 + # via python-engineio +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +starlette==0.46.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # alembic + # anyio + # fastapi + # faststream + # nicegui + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +u-msgpack-python==2.8.0 + # via -r requirements/_base.in +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # nicegui + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli + # nicegui +uvloop==0.21.0 + # via uvicorn +vbuild==0.8.2 + # via nicegui +watchfiles==1.0.4 + # via + # nicegui + # uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +wsproto==1.2.0 + # via simple-websocket +yarl==1.18.3 + # via + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/services/dynamic-scheduler/requirements/_test.in b/services/dynamic-scheduler/requirements/_test.in new file mode 100644 index 00000000000..840e5093b13 --- /dev/null +++ b/services/dynamic-scheduler/requirements/_test.in @@ -0,0 +1,30 @@ +# +# Specifies dependencies required to run 'services/dynamic-scheduler/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +asgi_lifespan +coverage +docker +faker +hypercorn +playwright +pytest +pytest-asyncio +pytest-cov +pytest-icdiff +pytest-mock +pytest-runner +pytest-sugar +python-dotenv +respx +sqlalchemy[mypy] +types-psycopg2 diff --git a/services/dynamic-scheduler/requirements/_test.txt b/services/dynamic-scheduler/requirements/_test.txt new file mode 100644 index 00000000000..07f543c069a --- /dev/null +++ b/services/dynamic-scheduler/requirements/_test.txt @@ -0,0 +1,153 @@ +anyio==4.8.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.1 + # via + # -c requirements/_base.txt + # requests +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +docker==7.1.0 + # via -r requirements/_test.in +faker==36.2.2 + # via -r requirements/_test.in +greenlet==3.1.1 + # via + # -c requirements/_base.txt + # playwright + # sqlalchemy +h11==0.14.0 + # via + # -c requirements/_base.txt + # httpcore + # hypercorn + # wsproto +h2==4.2.0 + # via + # -c requirements/_base.txt + # hypercorn +hpack==4.1.0 + # via + # -c requirements/_base.txt + # h2 +httpcore==1.0.7 + # via + # -c requirements/_base.txt + # httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # respx +hypercorn==0.17.3 + # via -r requirements/_test.in +hyperframe==6.1.0 + # via + # -c requirements/_base.txt + # h2 +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests +iniconfig==2.0.0 + # via pytest +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.1.0 + # via mypy +packaging==24.2 + # via + # -c requirements/_base.txt + # pytest + # pytest-sugar +playwright==1.50.0 + # via -r requirements/_test.in +pluggy==1.5.0 + # via pytest +pprintpp==0.4.0 + # via pytest-icdiff +priority==2.0.0 + # via hypercorn +pyee==12.1.1 + # via playwright +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-icdiff + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-icdiff==0.9 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker +respx==0.22.0 + # via -r requirements/_test.in +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +termcolor==2.5.0 + # via pytest-sugar +types-psycopg2==2.9.21.20250318 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # anyio + # mypy + # pyee + # sqlalchemy2-stubs +tzdata==2025.1 + # via faker +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # docker + # requests +wsproto==1.2.0 + # via + # -c requirements/_base.txt + # hypercorn diff --git a/services/dynamic-scheduler/requirements/_tools.in b/services/dynamic-scheduler/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/services/dynamic-scheduler/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/services/dynamic-scheduler/requirements/_tools.txt b/services/dynamic-scheduler/requirements/_tools.txt new file mode 100644 index 00000000000..24f125f8a3c --- /dev/null +++ b/services/dynamic-scheduler/requirements/_tools.txt @@ -0,0 +1,85 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_base.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.1.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.3 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/services/dynamic-scheduler/requirements/ci.txt b/services/dynamic-scheduler/requirements/ci.txt new file mode 100644 index 00000000000..6b762254f44 --- /dev/null +++ b/services/dynamic-scheduler/requirements/ci.txt @@ -0,0 +1,22 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/dynamic-scheduler' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +# installs current package +simcore-service-dynamic-scheduler @ . diff --git a/services/dynamic-scheduler/requirements/constraints.txt b/services/dynamic-scheduler/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/requirements/dev.txt b/services/dynamic-scheduler/requirements/dev.txt new file mode 100644 index 00000000000..60cb7217e53 --- /dev/null +++ b/services/dynamic-scheduler/requirements/dev.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages needed to develop 'services/dynamic-scheduler' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/postgres-database +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library + +# installs current package +--editable . diff --git a/services/dynamic-scheduler/requirements/prod.txt b/services/dynamic-scheduler/requirements/prod.txt new file mode 100644 index 00000000000..6150ebc8780 --- /dev/null +++ b/services/dynamic-scheduler/requirements/prod.txt @@ -0,0 +1,20 @@ +# Shortcut to install 'services/invitations' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-postgres-database @ ../../packages/postgres-database +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library + +# installs current package +simcore-service-dynamic-scheduler @ . diff --git a/services/dynamic-scheduler/setup.cfg b/services/dynamic-scheduler/setup.cfg new file mode 100644 index 00000000000..8eb83957564 --- /dev/null +++ b/services/dynamic-scheduler/setup.cfg @@ -0,0 +1,19 @@ +[bumpversion] +current_version = 1.1.0 +commit = True +message = services/dynamic-scheduler version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/services/dynamic-scheduler/setup.py b/services/dynamic-scheduler/setup.py new file mode 100755 index 00000000000..1dcc2af6f07 --- /dev/null +++ b/services/dynamic-scheduler/setup.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-dynamic-scheduler" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Andrei Neagu (GitHK)",) +DESCRIPTION = "Service that manages lifecycle of dynamic services" +README = (CURRENT_DIR / "README.md").read_text() + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-postgres-database", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-dynamic-scheduler = simcore_service_dynamic_scheduler.cli:main", + "simcore-service = simcore_service_dynamic_scheduler.cli:main", + ], + }, +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/_meta.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/_meta.py new file mode 100644 index 00000000000..4e33eee9226 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/_meta.py @@ -0,0 +1,35 @@ +""" Application's metadata + +""" +from typing import Final + +from models_library.basic_types import VersionStr +from packaging.version import Version +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore-service-dynamic-scheduler") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +APP_NAME = PROJECT_NAME +API_VTAG: Final[str] = info.api_prefix_path_tag +SUMMARY: Final[str] = info.get_summary() + + +# NOTE: https://patorjk.com/software/taag/#p=display&f=Standard&t=dynamic-scheduler +APP_STARTED_BANNER_MSG = r""" + _ _ _ _ _ + __| |_ _ _ __ __ _ _ __ ___ (_) ___ ___ ___| |__ ___ __| |_ _| | ___ _ __ + / _` | | | | '_ \ / _` | '_ ` _ \| |/ __|____/ __|/ __| '_ \ / _ \/ _` | | | | |/ _ \ '__| + | (_| | |_| | | | | (_| | | | | | | | (_|_____\__ \ (__| | | | __/ (_| | |_| | | __/ | + \__,_|\__, |_| |_|\__,_|_| |_| |_|_|\___| |___/\___|_| |_|\___|\__,_|\__,_|_|\___|_| + |___/ {} +""".format( + f"v{__version__}" +) + + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py new file mode 100644 index 00000000000..71c3aa2c862 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/__init__.py @@ -0,0 +1,3 @@ +from ._setup import initialize_frontend + +__all__: tuple[str, ...] = ("initialize_frontend",) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py new file mode 100644 index 00000000000..d56da5f43f4 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_setup.py @@ -0,0 +1,19 @@ +import nicegui +from fastapi import FastAPI + +from ...core.settings import ApplicationSettings +from ._utils import set_parent_app +from .routes import router + + +def initialize_frontend(app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + + nicegui.app.include_router(router) + + nicegui.ui.run_with( + app, + mount_path=settings.DYNAMIC_SCHEDULER_UI_MOUNT_PATH, + storage_secret=settings.DYNAMIC_SCHEDULER_UI_STORAGE_SECRET.get_secret_value(), + ) + set_parent_app(app) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py new file mode 100644 index 00000000000..6d3f61c31fc --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/_utils.py @@ -0,0 +1,19 @@ +import nicegui +from fastapi import FastAPI + +from ...core.settings import ApplicationSettings + + +def set_parent_app(parent_app: FastAPI) -> None: + nicegui.app.state.parent_app = parent_app + + +def get_parent_app(app: FastAPI) -> FastAPI: + parent_app: FastAPI = app.state.parent_app + return parent_app + + +def get_settings() -> ApplicationSettings: + parent_app = get_parent_app(nicegui.app) + settings: ApplicationSettings = parent_app.state.settings + return settings diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py new file mode 100644 index 00000000000..098f68217be --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/__init__.py @@ -0,0 +1,10 @@ +from nicegui import APIRouter + +from . import _index, _service + +router = APIRouter() + +router.include_router(_index.router) +router.include_router(_service.router) + +__all__: tuple[str, ...] = ("router",) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py new file mode 100644 index 00000000000..b6f7d5b1c91 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_index.py @@ -0,0 +1,168 @@ +import httpx +from common_library.json_serialization import json_dumps, json_loads +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from nicegui import APIRouter, app, ui +from nicegui.element import Element +from nicegui.elements.label import Label +from settings_library.utils_service import DEFAULT_FASTAPI_PORT + +from ....services.service_tracker import TrackedServiceModel, get_all_tracked_services +from ....services.service_tracker._models import SchedulerServiceState +from .._utils import get_parent_app, get_settings +from ._render_utils import base_page, get_iso_formatted_date + +router = APIRouter() + + +def _render_service_details(node_id: NodeID, service: TrackedServiceModel) -> None: + dict_to_render: dict[str, tuple[str, str]] = { + "NodeID": ("copy", f"{node_id}"), + "Display State": ("label", service.current_state), + "Last State Change": ( + "label", + get_iso_formatted_date(service.last_state_change), + ), + "UserID": ("copy", f"{service.user_id}"), + "ProjectID": ("copy", f"{service.project_id}"), + "User Requested": ("label", service.requested_state), + } + + if service.dynamic_service_start: + dict_to_render["Service"] = ( + "label", + f"{service.dynamic_service_start.key}:{service.dynamic_service_start.version}", + ) + dict_to_render["Product"] = ( + "label", + service.dynamic_service_start.product_name, + ) + service_status = ( + json_loads(service.service_status) if service.service_status else {} + ) + dict_to_render["Service State"] = ( + "label", + service_status.get( + "state" if "boot_type" in service_status else "service_state", "N/A" + ), + ) + + with ui.column().classes("gap-0"): + for key, (widget, value) in dict_to_render.items(): + with ui.row(align_items="baseline"): + ui.label(key).classes("font-bold") + match widget: + case "copy": + ui.label(value).classes("border bg-slate-200 px-1") + case "label": + ui.label(value) + case _: + ui.label(value) + + +def _render_buttons(node_id: NodeID, service: TrackedServiceModel) -> None: + + with ui.dialog() as confirm_dialog, ui.card(): + ui.markdown(f"Stop service **{node_id}**?") + ui.label("The service will be stopped and its data will be saved.") + with ui.row(): + + async def _stop_service() -> None: + confirm_dialog.close() + + url = f"http://localhost:{DEFAULT_FASTAPI_PORT}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}service/{node_id}:stop" + await httpx.AsyncClient(timeout=10).get(f"{url}") + + ui.notify( + f"Submitted stop request for {node_id}. Please give the service some time to stop!" + ) + + ui.button("Stop Now", color="red", on_click=_stop_service) + ui.button("Cancel", on_click=confirm_dialog.close) + + with ui.button_group(): + ui.button( + "Details", + icon="source", + on_click=lambda: ui.navigate.to(f"/service/{node_id}:details"), + ).tooltip("Display more information about what the scheduler is tracking") + + if service.current_state != SchedulerServiceState.RUNNING: + return + + ui.button( + "Stop Service", + icon="stop", + color="orange", + on_click=confirm_dialog.open, + ).tooltip("Stops the service and saves the data") + + +def _render_card( + card_container: Element, node_id: NodeID, service: TrackedServiceModel +) -> None: + with card_container: # noqa: SIM117 + with ui.column().classes("border p-1"): + _render_service_details(node_id, service) + _render_buttons(node_id, service) + + +def _get_clean_hashable(model: TrackedServiceModel) -> dict: + """removes items which trigger frequent updates and are not interesting to the user""" + data = model.model_dump(mode="json") + data.pop("check_status_after") + data.pop("last_status_notification") + data.pop("service_status_task_uid") + return data + + +def _get_hash(items: list[tuple[NodeID, TrackedServiceModel]]) -> int: + return hash( + json_dumps([(f"{key}", _get_clean_hashable(model)) for key, model in items]) + ) + + +class CardUpdater: + def __init__( + self, parent_app: FastAPI, container: Element, services_count_label: Label + ) -> None: + self.parent_app = parent_app + self.container = container + self.services_count_label = services_count_label + self.last_hash: int = _get_hash([]) + + async def update(self) -> None: + tracked_services = await get_all_tracked_services(self.parent_app) + tracked_items: list[tuple[NodeID, TrackedServiceModel]] = sorted( + tracked_services.items(), reverse=True + ) + + current_hash = _get_hash(tracked_items) + + if self.last_hash != current_hash: + self.services_count_label.set_text(f"{len(tracked_services)}") + # Clear the current cards + self.container.clear() + for node_id, service in tracked_items: + _render_card(self.container, node_id, service) + + self.last_hash = current_hash + + +@router.page("/") +async def index(): + with base_page(): + with ui.row().classes("gap-0"): + ui.label("Total tracked services:") + ui.label("").classes("w-1") + with ui.label("0") as services_count_label: + pass + + card_container: Element = ui.row() + + updater = CardUpdater(get_parent_app(app), card_container, services_count_label) + + # render cards when page is loaded + await updater.update() + # update card at a set interval + ui.timer(1, updater.update) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py new file mode 100644 index 00000000000..c3a315be2d7 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_render_utils.py @@ -0,0 +1,23 @@ +from collections.abc import Iterator +from contextlib import contextmanager + +import arrow +from nicegui import ui + + +@contextmanager +def base_page(*, title: str | None = None) -> Iterator[None]: + display_title = ( + "Dynamic Scheduler" if title is None else f"Dynamic Scheduler - {title}" + ) + ui.page_title(display_title) + + with ui.header(elevated=True).classes("items-center"): + ui.button(icon="o_home", on_click=lambda: ui.navigate.to("/")) + ui.label(display_title) + + yield None + + +def get_iso_formatted_date(timestamp: float) -> str: + return arrow.get(timestamp).isoformat() diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py new file mode 100644 index 00000000000..ac073072a44 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/frontend/routes/_service.py @@ -0,0 +1,145 @@ +import httpx +from common_library.json_serialization import json_dumps, json_loads +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStop, +) +from models_library.projects_nodes_io import NodeID +from nicegui import APIRouter, app, ui +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.services import ( + stop_dynamic_service, +) +from settings_library.utils_service import DEFAULT_FASTAPI_PORT +from simcore_service_dynamic_scheduler.services.rabbitmq import get_rabbitmq_rpc_client + +from ....core.settings import ApplicationSettings +from ....services.service_tracker import get_tracked_service, remove_tracked_service +from .._utils import get_parent_app, get_settings +from ._render_utils import base_page + +router = APIRouter() + + +def _render_remove_from_tracking(node_id): + with ui.dialog() as confirm_dialog, ui.card(): + + async def remove_from_tracking(): + confirm_dialog.close() + + url = f"http://localhost:{DEFAULT_FASTAPI_PORT}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}service/{node_id}/tracker:remove" + await httpx.AsyncClient(timeout=10).get(f"{url}") + + ui.notify(f"Service {node_id} removed from tracking") + ui.navigate.to("/") + + ui.markdown(f"Remove the service **{node_id}** form the tracker?") + ui.label( + "This action will result in the removal of the service form the internal tracker. " + "This action should be used whn you are facing issues and the service is not " + "automatically removed." + ) + ui.label( + "NOTE 1: the system normally cleans up services but it might take a few minutes. " + "Only use this option when you have observed enough time passing without any change." + ).classes("text-red-600") + ui.label( + "NOTE 2: This will break the fronted for the user! If the user has the service opened, " + "it will no longer receive an status updates." + ).classes("text-red-600") + + with ui.row(): + ui.button("Remove service", color="red", on_click=remove_from_tracking) + ui.button("Cancel", on_click=confirm_dialog.close) + + ui.button( + "Remove from tracking", + icon="remove_circle", + color="red", + on_click=confirm_dialog.open, + ).tooltip("Removes the service form the dynamic-scheduler's internal tracking") + + +def _render_danger_zone(node_id: NodeID) -> None: + ui.separator() + + ui.markdown("**Danger Zone, beware!**").classes("text-2xl text-red-700") + ui.label( + "Do not use these actions if you do not know what they are doing." + ).classes("text-red-700") + + ui.label( + "They are reserved as means of recovering the system form a failing state." + ).classes("text-red-700") + + _render_remove_from_tracking(node_id) + + +@router.page("/service/{node_id}:details") +async def service_details(node_id: NodeID): + with base_page(title=f"{node_id} details"): + service_model = await get_tracked_service(get_parent_app(app), node_id) + + if not service_model: + ui.markdown( + f"Sorry could not find any details for **node_id={node_id}**. " + "Please make sure the **node_id** is correct. " + "Also make sure you have not provided a **product_id**." + ) + return + + scheduler_internals = service_model.model_dump(mode="json") + service_status = scheduler_internals.pop("service_status", "{}") + service_status = json_loads("{}" if service_status == "" else service_status) + dynamic_service_start = scheduler_internals.pop("dynamic_service_start") + + ui.markdown("**Service Status**") + ui.code(json_dumps(service_status, indent=2), language="json") + + ui.markdown("**Scheduler Internals**") + ui.code(json_dumps(scheduler_internals, indent=2), language="json") + + ui.markdown("**Start Parameters**") + ui.code(json_dumps(dynamic_service_start, indent=2), language="json") + + ui.markdown("**Raw serialized data (the one used to render the above**") + ui.code(service_model.model_dump_json(indent=2), language="json") + + _render_danger_zone(node_id) + + +@router.page("/service/{node_id}:stop") +async def service_stop(node_id: NodeID): + parent_app = get_parent_app(app) + + service_model = await get_tracked_service(parent_app, node_id) + if not service_model: + ui.notify(f"Could not stop service {node_id}. Was not abel to find it") + return + + settings: ApplicationSettings = parent_app.state.settings + + assert service_model.user_id # nosec + assert service_model.project_id # nosec + + await stop_dynamic_service( + get_rabbitmq_rpc_client(get_parent_app(app)), + dynamic_service_stop=DynamicServiceStop( + user_id=service_model.user_id, + project_id=service_model.project_id, + node_id=node_id, + simcore_user_agent="", + save_state=True, + ), + timeout_s=int(settings.DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT.total_seconds()), + ) + + +@router.page("/service/{node_id}/tracker:remove") +async def remove_service_from_tracking(node_id: NodeID): + parent_app = get_parent_app(app) + + service_model = await get_tracked_service(parent_app, node_id) + if not service_model: + ui.notify(f"Could not remove service {node_id}. Was not abel to find it") + return + + await remove_tracked_service(parent_app, node_id) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_dependencies.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_dependencies.py new file mode 100644 index 00000000000..ce43766f5a3 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_dependencies.py @@ -0,0 +1,32 @@ +# mypy: disable-error-code=truthy-function +from fastapi import Request +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase +from simcore_service_dynamic_scheduler.services.redis import get_all_redis_clients + +from ...services.rabbitmq import get_rabbitmq_client, get_rabbitmq_rpc_server + +assert get_app # nosec +assert get_reverse_url_mapper # nosec + + +def get_rabbitmq_client_from_request(request: Request) -> RabbitMQClient: + return get_rabbitmq_client(request.app) + + +def get_rabbitmq_rpc_server_from_request(request: Request) -> RabbitMQRPCClient: + return get_rabbitmq_rpc_server(request.app) + + +def get_redis_clients_from_request( + request: Request, +) -> dict[RedisDatabase, RedisClientSDK]: + return get_all_redis_clients(request.app) + + +__all__: tuple[str, ...] = ( + "get_app", + "get_reverse_url_mapper", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py new file mode 100644 index 00000000000..e72f897ca74 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_health.py @@ -0,0 +1,53 @@ +from typing import Annotated + +import arrow +from fastapi import APIRouter, Depends, FastAPI +from fastapi.responses import PlainTextResponse +from models_library.errors import ( + DOCKER_API_PROXY_UNHEALTHY_MSG, + RABBITMQ_CLIENT_UNHEALTHY_MSG, + REDIS_CLIENT_UNHEALTHY_MSG, +) +from servicelib.fastapi.docker import is_docker_api_proxy_ready +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase + +from ._dependencies import ( + get_app, + get_rabbitmq_client_from_request, + get_rabbitmq_rpc_server_from_request, + get_redis_clients_from_request, +) + +router = APIRouter() + + +class HealthCheckError(RuntimeError): + """Failed a health check""" + + +@router.get("/health", response_class=PlainTextResponse) +async def healthcheck( + app: Annotated[FastAPI, Depends(get_app)], + rabbit_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client_from_request)], + rabbit_rpc_server: Annotated[ + RabbitMQRPCClient, Depends(get_rabbitmq_rpc_server_from_request) + ], + redis_client_sdks: Annotated[ + dict[RedisDatabase, RedisClientSDK], + Depends(get_redis_clients_from_request), + ], +): + if not await is_docker_api_proxy_ready(app, timeout=1): + raise HealthCheckError(DOCKER_API_PROXY_UNHEALTHY_MSG) + + if not rabbit_client.healthy or not rabbit_rpc_server.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + if not all( + redis_client_sdk.is_healthy for redis_client_sdk in redis_client_sdks.values() + ): + raise HealthCheckError(REDIS_CLIENT_UNHEALTHY_MSG) + + return f"{__name__}@{arrow.utcnow().isoformat()}" diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_meta.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_meta.py new file mode 100644 index 00000000000..1a4c865ba52 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_meta.py @@ -0,0 +1,21 @@ +from collections.abc import Callable +from typing import Annotated + +from fastapi import APIRouter, Depends + +from ..._meta import API_VERSION, PROJECT_NAME +from ...models.schemas.meta import Meta +from ._dependencies import get_reverse_url_mapper + +router = APIRouter() + + +@router.get("/meta", response_model=Meta) +async def get_service_metadata( + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], +): + return Meta( + name=PROJECT_NAME, + version=API_VERSION, + docs_url=url_for("swagger_ui_html"), + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_ops.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_ops.py new file mode 100644 index 00000000000..6af7b8f88ca --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/_ops.py @@ -0,0 +1,24 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, FastAPI +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, +) + +from ...services import scheduler_interface +from ._dependencies import ( + get_app, +) + +router = APIRouter() + + +@router.get("/ops/running-services") +async def running_services( + app: Annotated[FastAPI, Depends(get_app)], +) -> list[DynamicServiceGet]: + """returns all running dynamic services. Used by ops internall to determine + when it is safe to shutdown the platform""" + return await scheduler_interface.list_tracked_dynamic_services( + app, user_id=None, project_id=None + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/routes.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/routes.py new file mode 100644 index 00000000000..51cabd88ecb --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rest/routes.py @@ -0,0 +1,20 @@ +from fastapi import APIRouter, FastAPI, HTTPException +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +from ..._meta import API_VTAG +from . import _health, _meta, _ops + + +def initialize_rest_api(app: FastAPI) -> None: + app.include_router(_health.router) + + api_router = APIRouter(prefix=f"/{API_VTAG}") + api_router.include_router(_meta.router, tags=["meta"]) + api_router.include_router(_ops.router, tags=["ops"]) + app.include_router(api_router) + + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py new file mode 100644 index 00000000000..b90ed821bfa --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/_services.py @@ -0,0 +1,92 @@ +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import NonNegativeInt +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.errors import ( + ServiceWaitingForManualInterventionError, + ServiceWasNotFoundError, +) + +from ...services import scheduler_interface + +router = RPCRouter() + + +@router.expose() +async def list_tracked_dynamic_services( + app: FastAPI, *, user_id: UserID | None = None, project_id: ProjectID | None = None +) -> list[DynamicServiceGet]: + return await scheduler_interface.list_tracked_dynamic_services( + app, user_id=user_id, project_id=project_id + ) + + +@router.expose() +async def get_service_status( + app: FastAPI, *, node_id: NodeID +) -> NodeGet | DynamicServiceGet | NodeGetIdle: + return await scheduler_interface.get_service_status(app, node_id=node_id) + + +@router.expose() +async def run_dynamic_service( + app: FastAPI, *, dynamic_service_start: DynamicServiceStart +) -> NodeGet | DynamicServiceGet: + return await scheduler_interface.run_dynamic_service( + app, dynamic_service_start=dynamic_service_start + ) + + +@router.expose( + reraise_if_error_type=( + ServiceWaitingForManualInterventionError, + ServiceWasNotFoundError, + ) +) +async def stop_dynamic_service( + app: FastAPI, *, dynamic_service_stop: DynamicServiceStop +) -> None: + return await scheduler_interface.stop_dynamic_service( + app, dynamic_service_stop=dynamic_service_stop + ) + + +@router.expose() +async def get_project_inactivity( + app: FastAPI, *, project_id: ProjectID, max_inactivity_seconds: NonNegativeInt +) -> GetProjectInactivityResponse: + return await scheduler_interface.get_project_inactivity( + app, project_id=project_id, max_inactivity_seconds=max_inactivity_seconds + ) + + +@router.expose() +async def restart_user_services(app: FastAPI, *, node_id: NodeID) -> None: + await scheduler_interface.restart_user_services(app, node_id=node_id) + + +@router.expose() +async def retrieve_inputs( + app: FastAPI, *, node_id: NodeID, port_keys: list[ServicePortKey] +) -> RetrieveDataOutEnveloped: + return await scheduler_interface.retrieve_inputs( + app, node_id=node_id, port_keys=port_keys + ) + + +@router.expose() +async def update_projects_networks(app: FastAPI, *, project_id: ProjectID) -> None: + await scheduler_interface.update_projects_networks(app, project_id=project_id) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/routes.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/routes.py new file mode 100644 index 00000000000..f313e03aac9 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/api/rpc/routes.py @@ -0,0 +1,21 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from models_library.api_schemas_dynamic_scheduler import DYNAMIC_SCHEDULER_RPC_NAMESPACE +from servicelib.rabbitmq import RPCRouter + +from ...services.rabbitmq import get_rabbitmq_rpc_server +from . import _services + +ROUTERS: list[RPCRouter] = [ + _services.router, +] + + +async def rpc_api_routes_lifespan(app: FastAPI) -> AsyncIterator[State]: + rpc_server = get_rabbitmq_rpc_server(app) + for router in ROUTERS: + await rpc_server.register_router(router, DYNAMIC_SCHEDULER_RPC_NAMESPACE, app) + + yield {} diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py new file mode 100644 index 00000000000..58a659c2b20 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/cli.py @@ -0,0 +1,98 @@ +import logging +import os + +import typer +from settings_library.docker_api_proxy import DockerApiProxysettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.utils_cli import ( + create_settings_command, + create_version_callback, + print_as_envfile, +) + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + +main = typer.Typer(name=PROJECT_NAME) + +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def echo_dotenv(ctx: typer.Context, *, minimal: bool = True): + """Generates and displays a valid environment variables file (also known as dot-envfile) + + Usage: + $ simcore-service-dynamic-scheduler echo-dotenv > .env + $ cat .env + $ set -o allexport; source .env; set +o allexport + """ + assert ctx # nosec + + # NOTE: we normally DO NOT USE `os.environ` to capture env vars but this is a special case + # The idea here is to have a command that can generate a **valid** `.env` file that can be used + # to initialized the app. For that reason we fill required fields of the `ApplicationSettings` with + # "fake" but valid values (e.g. generating a password or adding tags as `replace-with-api-key). + # Nonetheless, if the caller of this CLI has already some **valid** env vars in the environment we want to use them ... + # and that is why we use `os.environ`. + + settings = ApplicationSettings.create_from_envs( + DYNAMIC_SCHEDULER_RABBITMQ=os.environ.get( + "DYNAMIC_SCHEDULER_RABBITMQ", + RabbitSettings.create_from_envs( + RABBIT_HOST=os.environ.get("RABBIT_HOST", "replace-with-rabbit-host"), + RABBIT_SECURE=os.environ.get("RABBIT_SECURE", "0"), + RABBIT_USER=os.environ.get("RABBIT_USER", "replace-with-rabbit-user"), + RABBIT_PASSWORD=os.environ.get( + "RABBIT_PASSWORD", "replace-with-rabbit-password" + ), + ), + ), + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET=os.environ.get( + "DYNAMIC_SCHEDULER_UI_STORAGE_SECRET", + "replace-with-ui-storage-secret", + ), + DYNAMIC_SCHEDULER_POSTGRES=os.environ.get( + "DYNAMIC_SCHEDULER_POSTGRES", + PostgresSettings.create_from_envs( + POSTGRES_HOST=os.environ.get( + "POSTGRES_HOST", "replace-with-postgres-host" + ), + POSTGRES_USER=os.environ.get( + "POSTGRES_USER", "replace-with-postgres-user" + ), + POSTGRES_PASSWORD=os.environ.get( + "POSTGRES_PASSWORD", "replace-with-postgres-password" + ), + POSTGRES_DB=os.environ.get("POSTGRES_DB", "replace-with-postgres-db"), + ), + ), + DYNAMIC_SCHEDULER_DOCKER_API_PROXY=os.environ.get( + "DYNAMIC_SCHEDULER_DOCKER_API_PROXY", + DockerApiProxysettings.create_from_envs( + DOCKER_API_PROXY_HOST=os.environ.get( + "DOCKER_API_PROXY_HOST", "replace-with-proxy-host" + ), + DOCKER_API_PROXY_USER=os.environ.get( + "DOCKER_API_PROXY_USER", "replace-with-proxy-user" + ), + DOCKER_API_PROXY_PASSWORD=os.environ.get( + "DOCKER_API_PROXY_PASSWORD", "replace-with-proxy-password" + ), + ), + ), + ) + + print_as_envfile( + settings, + compact=False, + verbose=True, + show_secrets=True, + exclude_unset=minimal, + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py new file mode 100644 index 00000000000..9f59f29859e --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/application.py @@ -0,0 +1,49 @@ +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + initialize_prometheus_instrumentation, +) +from servicelib.fastapi.openapi import override_fastapi_openapi_method +from servicelib.fastapi.profiler import initialize_profiler +from servicelib.fastapi.tracing import initialize_fastapi_app_tracing + +from .._meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY +from ..api.frontend import initialize_frontend +from ..api.rest.routes import initialize_rest_api +from . import events +from .settings import ApplicationSettings + + +def create_app(settings: ApplicationSettings | None = None) -> FastAPI: + app_settings = settings or ApplicationSettings.create_from_envs() + + app = FastAPI( + title=f"{PROJECT_NAME} web API", + description=SUMMARY, + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url=( + "/doc" if app_settings.DYNAMIC_SCHEDULER_SWAGGER_API_DOC_ENABLED else None + ), + redoc_url=None, + lifespan=events.create_app_lifespan(settings=app_settings), + ) + override_fastapi_openapi_method(app) + + # STATE + app.state.settings = app_settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + + initialize_rest_api(app) + + if app_settings.DYNAMIC_SCHEDULER_PROMETHEUS_INSTRUMENTATION_ENABLED: + initialize_prometheus_instrumentation(app) + + initialize_frontend(app) + + if app_settings.DYNAMIC_SCHEDULER_PROFILING: + initialize_profiler(app) + + if app_settings.DYNAMIC_SCHEDULER_TRACING: + initialize_fastapi_app_tracing(app) + + return app diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/errors.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/errors.py new file mode 100644 index 00000000000..0dd4b43e4bd --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/errors.py @@ -0,0 +1,5 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class BaseDynamicSchedulerError(OsparcErrorMixin, ValueError): + ... diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/events.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/events.py new file mode 100644 index 00000000000..492834a99e3 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/events.py @@ -0,0 +1,87 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from servicelib.fastapi.docker import ( + create_remote_docker_client_input_state, + remote_docker_client_lifespan, +) +from servicelib.fastapi.monitoring import ( + create_prometheus_instrumentationmain_input_state, + prometheus_instrumentation_lifespan, +) +from servicelib.fastapi.postgres_lifespan import ( + create_postgres_database_input_state, +) +from servicelib.fastapi.tracing import get_tracing_instrumentation_lifespan + +from .._meta import APP_FINISHED_BANNER_MSG, APP_NAME, APP_STARTED_BANNER_MSG +from ..api.rpc.routes import rpc_api_routes_lifespan +from ..repository.events import repository_lifespan_manager +from ..services.catalog import catalog_lifespan +from ..services.deferred_manager import deferred_manager_lifespan +from ..services.director_v0 import director_v0_lifespan +from ..services.director_v2 import director_v2_lifespan +from ..services.notifier import get_notifier_lifespans +from ..services.rabbitmq import rabbitmq_lifespan +from ..services.redis import redis_lifespan +from ..services.service_tracker import service_tracker_lifespan +from ..services.status_monitor import status_monitor_lifespan +from .settings import ApplicationSettings + + +async def _banner_lifespan(app: FastAPI) -> AsyncIterator[State]: + _ = app + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + yield {} + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + +async def _settings_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSettings = app.state.settings + + yield { + **create_postgres_database_input_state(settings.DYNAMIC_SCHEDULER_POSTGRES), + **create_prometheus_instrumentationmain_input_state( + enabled=settings.DYNAMIC_SCHEDULER_PROMETHEUS_INSTRUMENTATION_ENABLED + ), + **create_remote_docker_client_input_state( + settings.DYNAMIC_SCHEDULER_DOCKER_API_PROXY + ), + } + + +def create_app_lifespan(settings: ApplicationSettings) -> LifespanManager: + app_lifespan = LifespanManager() + app_lifespan.add(_settings_lifespan) + + if settings.DYNAMIC_SCHEDULER_TRACING: + app_lifespan.add( + get_tracing_instrumentation_lifespan( + tracing_settings=settings.DYNAMIC_SCHEDULER_TRACING, + service_name=APP_NAME, + ) + ) + + app_lifespan.include(repository_lifespan_manager) + app_lifespan.add(director_v2_lifespan) + app_lifespan.add(director_v0_lifespan) + app_lifespan.add(catalog_lifespan) + app_lifespan.add(rabbitmq_lifespan) + app_lifespan.add(rpc_api_routes_lifespan) + app_lifespan.add(redis_lifespan) + + for lifespan in get_notifier_lifespans(): + app_lifespan.add(lifespan) + + app_lifespan.add(service_tracker_lifespan) + app_lifespan.add(deferred_manager_lifespan) + app_lifespan.add(status_monitor_lifespan) + + app_lifespan.add(remote_docker_client_lifespan) + + app_lifespan.add(prometheus_instrumentation_lifespan) + + app_lifespan.add(_banner_lifespan) + + return app_lifespan diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py new file mode 100644 index 00000000000..f1ce9b13d33 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/core/settings.py @@ -0,0 +1,193 @@ +import datetime +from typing import Annotated + +from common_library.basic_types import DEFAULT_FACTORY +from pydantic import AliasChoices, Field, SecretStr, TypeAdapter, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.basic_types import LogLevel, VersionTag +from settings_library.catalog import CatalogSettings +from settings_library.director_v0 import DirectorV0Settings +from settings_library.director_v2 import DirectorV2Settings +from settings_library.docker_api_proxy import DockerApiProxysettings +from settings_library.http_client_request import ClientRequestSettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + +from .._meta import API_VERSION, API_VTAG, PROJECT_NAME + + +class _BaseApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + """Base settings of any osparc service's app""" + + # CODE STATICS --------------------------------------------------------- + API_VERSION: str = API_VERSION + APP_NAME: str = PROJECT_NAME + API_VTAG: VersionTag = TypeAdapter(VersionTag).validate_python(API_VTAG) + + # RUNTIME ----------------------------------------------------------- + + DYNAMIC_SCHEDULER_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "DYNAMIC_SCHEDULER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + + DYNAMIC_SCHEDULER_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "LOG_FORMAT_LOCAL_DEV_ENABLED", + "DYNAMIC_SCHEDULER_LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description=( + "Enables local development log format. WARNING: make sure it " + "is disabled if you want to have structured logs!" + ), + ), + ] = False + + DYNAMIC_SCHEDULER_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "LOG_FILTER_MAPPING", + "DYNAMIC_SCHEDULER_LOG_FILTER_MAPPING", + ), + description=( + "is a dictionary that maps specific loggers " + "(such as 'uvicorn.access' or 'gunicorn.access') to a list " + "of log message patterns that should be filtered out." + ), + ), + ] = DEFAULT_FACTORY + + DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT: Annotated[ + datetime.timedelta, + Field( + validation_alias=AliasChoices( + "DYNAMIC_SIDECAR_API_SAVE_RESTORE_STATE_TIMEOUT", + "DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT", + ), + description=( + "Time to wait before timing out when stopping a dynamic service. " + "Since services require data to be stopped, this operation is timed out after 1 hour" + ), + ), + ] = datetime.timedelta(minutes=60) + + DYNAMIC_SCHEDULER_SERVICE_UPLOAD_DOWNLOAD_TIMEOUT: Annotated[ + datetime.timedelta, + Field( + description=( + "When dynamic services upload and download data from storage, " + "sometimes very big payloads are involved. In order to handle " + "such payloads it is required to have long timeouts which " + "allow the service to finish the operation." + ), + ), + ] = datetime.timedelta(minutes=60) + + DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: Annotated[ + bool, + Field( + description=( + "this is a way to switch between different dynamic schedulers for the new style services" + # NOTE: this option should be removed when the scheduling will be done via this service + ), + ), + ] = False + + @field_validator("DYNAMIC_SCHEDULER_LOGLEVEL", mode="before") + @classmethod + def _validate_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +class ApplicationSettings(_BaseApplicationSettings): + """Web app's environment variables + + These settings includes extra configuration for the http-API + """ + + DYNAMIC_SCHEDULER_UI_STORAGE_SECRET: SecretStr = Field( + ..., + description=( + "secret required to enabled browser-based storage for the UI. " + "Enables the full set of features to be used for NiceUI" + ), + ) + + DYNAMIC_SCHEDULER_UI_MOUNT_PATH: Annotated[ + str, Field(description="path on the URL where the dashboard is mounted") + ] = "/dynamic-scheduler/" + + DYNAMIC_SCHEDULER_RABBITMQ: RabbitSettings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for service/rabbitmq", + ) + + DYNAMIC_SCHEDULER_REDIS: RedisSettings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for service/redis", + ) + + DYNAMIC_SCHEDULER_SWAGGER_API_DOC_ENABLED: Annotated[ + bool, Field(description="If true, it displays swagger doc at /doc") + ] = True + + CLIENT_REQUEST: ClientRequestSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SCHEDULER_DIRECTOR_V0_SETTINGS: DirectorV0Settings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for director service", + ) + + DYNAMIC_SCHEDULER_DIRECTOR_V2_SETTINGS: DirectorV2Settings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for director-v2 service", + ) + + DYNAMIC_SCHEDULER_CATALOG_SETTINGS: CatalogSettings = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for catalog service", + ) + + DYNAMIC_SCHEDULER_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + DYNAMIC_SCHEDULER_PROFILING: bool = False + + DYNAMIC_SCHEDULER_TRACING: TracingSettings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ) + + DYNAMIC_SCHEDULER_DOCKER_API_PROXY: Annotated[ + DockerApiProxysettings, + Field(json_schema_extra={"auto_default_from_env": True}), + ] + + DYNAMIC_SCHEDULER_POSTGRES: Annotated[ + PostgresSettings, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for postgres service", + ), + ] + + @field_validator("DYNAMIC_SCHEDULER_UI_MOUNT_PATH", mode="before") + @classmethod + def _ensure_ends_with_slash(cls, v: str) -> str: + if not v.endswith("/"): + msg = f"Provided mount path: '{v}' must be '/' terminated" + raise ValueError(msg) + return v diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/main.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/main.py new file mode 100644 index 00000000000..ab726883237 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/main.py @@ -0,0 +1,22 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_dynamic_scheduler.core.application import create_app +from simcore_service_dynamic_scheduler.core.settings import ApplicationSettings + +_the_settings = ApplicationSettings.create_from_envs() +logging.basicConfig(level=_the_settings.DYNAMIC_SCHEDULER_LOGLEVEL.value) +logging.root.setLevel(_the_settings.DYNAMIC_SCHEDULER_LOGLEVEL.value) +config_all_loggers( + log_format_local_dev_enabled=_the_settings.DYNAMIC_SCHEDULER_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=_the_settings.DYNAMIC_SCHEDULER_LOG_FILTER_MAPPING, + tracing_settings=_the_settings.DYNAMIC_SCHEDULER_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(_the_settings) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/__init__.py new file mode 100644 index 00000000000..8063e97ab6e --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/__init__.py @@ -0,0 +1 @@ +# NOTE: all models/schemas are not in models_library this rest API is NOT used by any of the services in the stack diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/errors.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/errors.py new file mode 100644 index 00000000000..d1584834ab2 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/errors.py @@ -0,0 +1,5 @@ +from models_library.api_schemas__common.errors import DefaultApiError + + +class ApiError(DefaultApiError): + ... diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/meta.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/meta.py new file mode 100644 index 00000000000..ad73c58ac70 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/models/schemas/meta.py @@ -0,0 +1,17 @@ +from models_library.api_schemas__common.meta import BaseMeta +from pydantic import ConfigDict, HttpUrl + + +class Meta(BaseMeta): + docs_url: HttpUrl + model_config = ConfigDict( + json_schema_extra={ + "examples": [ + { + "name": "simcore_service_dynamic_scheduler", + "version": "2.4.45", + "docs_url": "https://foo.io/doc", + } + ] + } + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/events.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/events.py new file mode 100644 index 00000000000..1a93d0aeb0a --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/events.py @@ -0,0 +1,35 @@ +import logging +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from servicelib.fastapi.postgres_lifespan import ( + PostgresLifespanState, + postgres_database_lifespan, +) + +from .project_networks import ProjectNetworksRepo + +_logger = logging.getLogger(__name__) + + +async def _database_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + app.state.engine = state[PostgresLifespanState.POSTGRES_ASYNC_ENGINE] + + app.state.repositories = { + ProjectNetworksRepo.__name__: ProjectNetworksRepo(app.state.engine), + } + + yield {} + + +repository_lifespan_manager = LifespanManager() +repository_lifespan_manager.add(postgres_database_lifespan) +repository_lifespan_manager.add(_database_lifespan) + + +def get_project_networks_repo(app: FastAPI) -> ProjectNetworksRepo: + assert isinstance(app.state.repositories, dict) # nosec + repo = app.state.repositories.get(ProjectNetworksRepo.__name__) + assert isinstance(repo, ProjectNetworksRepo) # nosec + return repo diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/project_networks.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/project_networks.py new file mode 100644 index 00000000000..e6a58e6c69d --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/repository/project_networks.py @@ -0,0 +1,57 @@ +import sqlalchemy as sa +from common_library.errors_classes import OsparcErrorMixin +from models_library.projects import ProjectID +from models_library.projects_networks import NetworksWithAliases, ProjectsNetworks +from simcore_postgres_database.models.projects_networks import projects_networks +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.dialects.postgresql import insert as pg_insert +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + + +class BaseProjectNetworksError(OsparcErrorMixin, RuntimeError): + msg_template: str = "project networks unexpected error" + + +class ProjectNetworkNotFoundError(BaseProjectNetworksError): + msg_template: str = "no networks found for project {project_id}" + + +class ProjectNetworksRepo: + def __init__(self, engine: AsyncEngine): + self.engine = engine + + async def get_projects_networks( + self, connection: AsyncConnection | None = None, *, project_id: ProjectID + ) -> ProjectsNetworks: + async with pass_or_acquire_connection(self.engine, connection) as conn: + result = await conn.execute( + sa.select(projects_networks).where( + projects_networks.c.project_uuid == f"{project_id}" + ) + ) + row = result.first() + if not row: + raise ProjectNetworkNotFoundError(project_id=project_id) + return ProjectsNetworks.model_validate(row) + + async def upsert_projects_networks( + self, + connection: AsyncConnection | None = None, + *, + project_id: ProjectID, + networks_with_aliases: NetworksWithAliases, + ) -> None: + projects_networks_to_insert = ProjectsNetworks.model_validate( + {"project_uuid": project_id, "networks_with_aliases": networks_with_aliases} + ) + + async with transaction_context(self.engine, connection) as conn: + row_data = projects_networks_to_insert.model_dump(mode="json") + insert_stmt = pg_insert(projects_networks).values(**row_data) + upsert_snapshot = insert_stmt.on_conflict_do_update( + index_elements=[projects_networks.c.project_uuid], set_=row_data + ) + await conn.execute(upsert_snapshot) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/__init__.py new file mode 100644 index 00000000000..8cb49b7a1a0 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/__init__.py @@ -0,0 +1,7 @@ +from ._public_client import CatalogPublicClient +from ._setup import catalog_lifespan + +__all__: tuple[str, ...] = ( + "CatalogPublicClient", + "catalog_lifespan", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_public_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_public_client.py new file mode 100644 index 00000000000..fbe160b261a --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_public_client.py @@ -0,0 +1,34 @@ +from fastapi import FastAPI +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.fastapi.app_state import SingletonInAppStateMixin + +from ._thin_client import CatalogThinClient + + +class CatalogPublicClient(SingletonInAppStateMixin): + app_state_name: str = "catalog_public_client" + + def __init__(self, app: FastAPI) -> None: + self.app = app + + async def get_services_labels( + self, service_key: ServiceKey, service_version: ServiceVersion + ) -> SimcoreServiceLabels: + response = await CatalogThinClient.get_from_app_state( + self.app + ).get_services_labels(service_key, service_version) + return TypeAdapter(SimcoreServiceLabels).validate_python(response.json()) + + async def get_services_specifications( + self, user_id: UserID, service_key: ServiceKey, service_version: ServiceVersion + ) -> ServiceSpecifications: + response = await CatalogThinClient.get_from_app_state( + self.app + ).get_services_specifications(user_id, service_key, service_version) + return TypeAdapter(ServiceSpecifications).validate_python(response.json()) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_setup.py new file mode 100644 index 00000000000..40f52050cc9 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_setup.py @@ -0,0 +1,21 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State + +from ._public_client import CatalogPublicClient +from ._thin_client import CatalogThinClient + + +async def catalog_lifespan(app: FastAPI) -> AsyncIterator[State]: + thin_client = CatalogThinClient(app) + thin_client.set_to_app_state(app) + thin_client.attach_lifespan_to(app) + + public_client = CatalogPublicClient(app) + public_client.set_to_app_state(app) + + yield {} + + CatalogPublicClient.pop_from_app_state(app) + CatalogThinClient.pop_from_app_state(app) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_thin_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_thin_client.py new file mode 100644 index 00000000000..98cf8b7e0ae --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/catalog/_thin_client.py @@ -0,0 +1,57 @@ +import urllib.parse + +from fastapi import FastAPI, status +from httpx import Response +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import AttachLifespanMixin +from servicelib.fastapi.http_client_thin import ( + BaseThinClient, + expect_status, + retry_on_errors, +) +from yarl import URL + +from ...core.settings import ApplicationSettings + + +class CatalogThinClient(SingletonInAppStateMixin, BaseThinClient, AttachLifespanMixin): + app_state_name: str = "catalog_thin_client" + + def __init__(self, app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + assert settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT # nosec + + super().__init__( + total_retry_interval=int( + settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT + ), + extra_allowed_method_names={ + "attach_lifespan_to", + "get_from_app_state", + "pop_from_app_state", + "set_to_app_state", + }, + base_url=settings.DYNAMIC_SCHEDULER_CATALOG_SETTINGS.api_base_url, + tracing_settings=settings.DYNAMIC_SCHEDULER_TRACING, + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_services_labels( + self, service_key: ServiceKey, service_version: ServiceVersion + ) -> Response: + return await self.client.get( + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}/labels" + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_services_specifications( + self, user_id: UserID, service_key: ServiceKey, service_version: ServiceVersion + ) -> Response: + request_url = URL( + f"/services/{urllib.parse.quote(service_key, safe='')}/{service_version}/specifications", + ).with_query(user_id=user_id) + return await self.client.get(f"{request_url}") diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/deferred_manager.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/deferred_manager.py new file mode 100644 index 00000000000..630a4e12158 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/deferred_manager.py @@ -0,0 +1,23 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.deferred_tasks import DeferredManager +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisDatabase + +from .redis import get_redis_client + + +async def deferred_manager_lifespan(app: FastAPI) -> AsyncIterator[State]: + rabbit_settings: RabbitSettings = app.state.settings.DYNAMIC_SCHEDULER_RABBITMQ + + redis_client_sdk = get_redis_client(app, RedisDatabase.DEFERRED_TASKS) + app.state.deferred_manager = manager = DeferredManager( + rabbit_settings, redis_client_sdk, globals_context={"app": app} + ) + await manager.setup() + + yield {} + + await manager.shutdown() diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/__init__.py new file mode 100644 index 00000000000..7b5fe80ca95 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/__init__.py @@ -0,0 +1,7 @@ +from ._public_client import DirectorV0PublicClient +from ._setup import director_v0_lifespan + +__all__: tuple[str, ...] = ( + "DirectorV0PublicClient", + "director_v0_lifespan", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_public_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_public_client.py new file mode 100644 index 00000000000..533f2a96a68 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_public_client.py @@ -0,0 +1,59 @@ +import logging +from typing import Any, cast + +import httpx +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import TypeAdapter +from servicelib.fastapi.app_state import SingletonInAppStateMixin + +from ._thin_client import DirectorV0ThinClient + +logger = logging.getLogger(__name__) + + +def _unenvelope_or_raise_error(resp: httpx.Response) -> dict | list: + """ + Director responses are enveloped + If successful response, we un-envelop it and return data as a dict + If error, is detected raise an ValueError + """ + body = resp.json() + if "data" in body: + return body["data"] # type: ignore[no-any-return] + + msg = f"Unexpected, data was not returned: {body=}" + raise ValueError(msg) + + +class DirectorV0PublicClient(SingletonInAppStateMixin): + app_state_name: str = "director_v0_public_client" + + def __init__(self, app: FastAPI) -> None: + self.app = app + + async def get_running_service_details( + self, node_id: NodeID + ) -> RunningDynamicServiceDetails: + response = await DirectorV0ThinClient.get_from_app_state( + self.app + ).get_running_interactive_service_details(node_id) + return TypeAdapter(RunningDynamicServiceDetails).validate_python( + _unenvelope_or_raise_error(response) + ) + + async def get_running_services( + self, user_id: UserID | None = None, project_id: ProjectID | None = None + ) -> list[RunningDynamicServiceDetails]: + response = await DirectorV0ThinClient.get_from_app_state( + self.app + ).get_running_interactive_services(user_id=user_id, project_id=project_id) + return [ + RunningDynamicServiceDetails(**x) + for x in cast(list[dict[str, Any]], _unenvelope_or_raise_error(response)) + ] diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_setup.py new file mode 100644 index 00000000000..ccdc96d71c7 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_setup.py @@ -0,0 +1,22 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State + +from ._public_client import DirectorV0PublicClient +from ._thin_client import DirectorV0ThinClient + + +async def director_v0_lifespan(app: FastAPI) -> AsyncIterator[State]: + + thin_client = DirectorV0ThinClient(app) + thin_client.set_to_app_state(app) + thin_client.attach_lifespan_to(app) + + public_client = DirectorV0PublicClient(app) + public_client.set_to_app_state(app) + + yield {} + + DirectorV0PublicClient.pop_from_app_state(app) + DirectorV0ThinClient.pop_from_app_state(app) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_thin_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_thin_client.py new file mode 100644 index 00000000000..d6d2cd27221 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v0/_thin_client.py @@ -0,0 +1,57 @@ +from common_library.exclude import as_dict_exclude_none +from fastapi import FastAPI, status +from httpx import Response +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import AttachLifespanMixin +from servicelib.fastapi.http_client_thin import ( + BaseThinClient, + expect_status, + retry_on_errors, +) +from yarl import URL + +from ...core.settings import ApplicationSettings + + +class DirectorV0ThinClient( + SingletonInAppStateMixin, BaseThinClient, AttachLifespanMixin +): + app_state_name: str = "director_v0_thin_client" + + def __init__(self, app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + assert settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT # nosec + + super().__init__( + total_retry_interval=int( + settings.CLIENT_REQUEST.HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT + ), + extra_allowed_method_names={ + "attach_lifespan_to", + "get_from_app_state", + "pop_from_app_state", + "set_to_app_state", + }, + base_url=settings.DYNAMIC_SCHEDULER_DIRECTOR_V0_SETTINGS.endpoint, + tracing_settings=settings.DYNAMIC_SCHEDULER_TRACING, + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_running_interactive_service_details( + self, node_id: NodeID + ) -> Response: + return await self.client.get(f"/running_interactive_services/{node_id}") + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_running_interactive_services( + self, user_id: UserID | None, project_id: ProjectID | None + ) -> Response: + request_url = URL("/running_interactive_services").with_query( + as_dict_exclude_none(user_id=user_id, study_id=project_id) + ) + return await self.client.get(f"{request_url}") diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/__init__.py new file mode 100644 index 00000000000..424fae9ba1e --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/__init__.py @@ -0,0 +1,6 @@ +from ._public_client import DirectorV2Client, director_v2_lifespan + +__all__: tuple[str, ...] = ( + "DirectorV2Client", + "director_v2_lifespan", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_public_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_public_client.py new file mode 100644 index 00000000000..d833d3169bf --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_public_client.py @@ -0,0 +1,150 @@ +import datetime +from collections.abc import AsyncIterator +from typing import Any + +from fastapi import FastAPI, status +from fastapi_lifespan_manager import State +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import NonNegativeInt, TypeAdapter +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import AttachLifespanMixin +from servicelib.fastapi.http_client_thin import UnexpectedStatusError +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.errors import ( + ServiceWaitingForManualInterventionError, + ServiceWasNotFoundError, +) + +from ._thin_client import DirectorV2ThinClient + + +class DirectorV2Client(SingletonInAppStateMixin, AttachLifespanMixin): + app_state_name: str = "director_v2_client" + + def __init__(self, app: FastAPI) -> None: + self.thin_client = DirectorV2ThinClient(app) + + async def setup_client(self) -> None: + await self.thin_client.setup_client() + + async def teardown_client(self) -> None: + await self.thin_client.teardown_client() + + async def get_status( + self, node_id: NodeID + ) -> NodeGet | DynamicServiceGet | NodeGetIdle: + try: + response = await self.thin_client.get_status(node_id) + dict_response: dict[str, Any] = response.json() + + # in case of legacy version + # we need to transfer the correct format! + if "data" in dict_response: + return TypeAdapter(NodeGet).validate_python(dict_response["data"]) + + return TypeAdapter(DynamicServiceGet).validate_python(dict_response) + except UnexpectedStatusError as e: + if ( + e.response.status_code # type: ignore[attr-defined] # pylint:disable=no-member + == status.HTTP_404_NOT_FOUND + ): + return NodeGetIdle.from_node_id(node_id) + raise + + async def run_dynamic_service( + self, dynamic_service_start: DynamicServiceStart + ) -> NodeGet | DynamicServiceGet: + response = await self.thin_client.post_dynamic_service(dynamic_service_start) + dict_response: dict[str, Any] = response.json() + + # legacy services + if "data" in dict_response: + return TypeAdapter(NodeGet).validate_python(dict_response["data"]) + + return TypeAdapter(DynamicServiceGet).validate_python(dict_response) + + async def stop_dynamic_service( + self, + *, + node_id: NodeID, + simcore_user_agent: str, + save_state: bool, + timeout: datetime.timedelta # noqa: ASYNC109 + ) -> None: + try: + await self.thin_client.delete_dynamic_service( + node_id=node_id, + simcore_user_agent=simcore_user_agent, + save_state=save_state, + timeout=timeout, + ) + except UnexpectedStatusError as e: + if ( + e.response.status_code # type: ignore[attr-defined] # pylint:disable=no-member + == status.HTTP_409_CONFLICT + ): + raise ServiceWaitingForManualInterventionError( + node_id=node_id + ) from None + if ( + e.response.status_code # type: ignore[attr-defined] # pylint:disable=no-member + == status.HTTP_404_NOT_FOUND + ): + raise ServiceWasNotFoundError(node_id=node_id) from None + + raise + + async def retrieve_inputs( + self, + *, + node_id: NodeID, + port_keys: list[ServicePortKey], + timeout: datetime.timedelta # noqa: ASYNC109 + ) -> RetrieveDataOutEnveloped: + response = await self.thin_client.dynamic_service_retrieve( + node_id=node_id, port_keys=port_keys, timeout=timeout + ) + dict_response: dict[str, Any] = response.json() + return TypeAdapter(RetrieveDataOutEnveloped).validate_python(dict_response) + + async def list_tracked_dynamic_services( + self, *, user_id: UserID | None = None, project_id: ProjectID | None = None + ) -> list[DynamicServiceGet]: + response = await self.thin_client.get_dynamic_services( + user_id=user_id, project_id=project_id + ) + return TypeAdapter(list[DynamicServiceGet]).validate_python(response.json()) + + async def get_project_inactivity( + self, *, project_id: ProjectID, max_inactivity_seconds: NonNegativeInt + ) -> GetProjectInactivityResponse: + response = await self.thin_client.get_projects_inactivity( + project_id=project_id, max_inactivity_seconds=max_inactivity_seconds + ) + return TypeAdapter(GetProjectInactivityResponse).validate_python( + response.json() + ) + + async def restart_user_services(self, *, node_id: NodeID) -> None: + await self.thin_client.post_restart(node_id=node_id) + + async def update_projects_networks(self, *, project_id: ProjectID) -> None: + await self.thin_client.patch_projects_networks(project_id=project_id) + + +async def director_v2_lifespan(app: FastAPI) -> AsyncIterator[State]: + public_client = DirectorV2Client(app) + public_client.set_to_app_state(app) + + yield {} diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_thin_client.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_thin_client.py new file mode 100644 index 00000000000..c3afea52818 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/director_v2/_thin_client.py @@ -0,0 +1,162 @@ +import datetime +from typing import cast + +from common_library.exclude import as_dict_exclude_none +from common_library.json_serialization import json_dumps +from fastapi import FastAPI, status +from httpx import Response, Timeout +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_resources import ServiceResourcesDictHelpers +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import NonNegativeInt +from servicelib.common_headers import ( + X_DYNAMIC_SIDECAR_REQUEST_DNS, + X_DYNAMIC_SIDECAR_REQUEST_SCHEME, + X_SIMCORE_USER_AGENT, +) +from servicelib.fastapi.http_client_thin import ( + BaseThinClient, + expect_status, + retry_on_errors, +) +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.services import ( + DEFAULT_LEGACY_WB_TO_DV2_HTTP_REQUESTS_TIMEOUT_S, +) + +from ...core.settings import ApplicationSettings + + +class DirectorV2ThinClient(BaseThinClient): + def __init__(self, app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + super().__init__( + total_retry_interval=DEFAULT_LEGACY_WB_TO_DV2_HTTP_REQUESTS_TIMEOUT_S, + base_url=settings.DYNAMIC_SCHEDULER_DIRECTOR_V2_SETTINGS.api_base_url, + default_http_client_timeout=Timeout( + DEFAULT_LEGACY_WB_TO_DV2_HTTP_REQUESTS_TIMEOUT_S + ), + extra_allowed_method_names={"attach_lifespan_to"}, + tracing_settings=settings.DYNAMIC_SCHEDULER_TRACING, + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_status(self, node_id: NodeID) -> Response: + return await self.client.get( + f"/dynamic_services/{node_id}", follow_redirects=True + ) + + @retry_on_errors() + @expect_status(status.HTTP_201_CREATED) + async def post_dynamic_service( + self, dynamic_service_start: DynamicServiceStart + ) -> Response: + post_data = { + "product_name": dynamic_service_start.product_name, + "product_api_base_url": dynamic_service_start.product_api_base_url, + "can_save": dynamic_service_start.can_save, + "user_id": dynamic_service_start.user_id, + "project_id": dynamic_service_start.project_id, + "key": dynamic_service_start.key, + "version": dynamic_service_start.version, + "node_uuid": dynamic_service_start.node_uuid, + "basepath": f"/x/{dynamic_service_start.node_uuid}", + "service_resources": ServiceResourcesDictHelpers.create_jsonable( + dynamic_service_start.service_resources + ), + "wallet_info": dynamic_service_start.wallet_info, + "pricing_info": dynamic_service_start.pricing_info, + "hardware_info": dynamic_service_start.hardware_info, + } + + headers = { + X_DYNAMIC_SIDECAR_REQUEST_DNS: dynamic_service_start.request_dns, + X_DYNAMIC_SIDECAR_REQUEST_SCHEME: dynamic_service_start.request_scheme, + X_SIMCORE_USER_AGENT: dynamic_service_start.simcore_user_agent, + } + + return await self.client.post( + "/dynamic_services", + content=json_dumps(post_data), + headers=headers, + follow_redirects=True, + ) + + async def delete_dynamic_service( + self, + *, + node_id: NodeID, + simcore_user_agent: str, + save_state: bool, + timeout: datetime.timedelta, # noqa: ASYNC109 + ) -> Response: + @retry_on_errors(total_retry_timeout_overwrite=timeout.total_seconds()) + @expect_status(status.HTTP_204_NO_CONTENT) + async def _( + self, # NOTE: required by retry_on_errors + ) -> Response: + headers = {X_SIMCORE_USER_AGENT: simcore_user_agent} + + return cast( + Response, + await self.client.delete( + f"dynamic_services/{node_id}?can_save={f'{save_state}'.lower()}", + headers=headers, + timeout=timeout.total_seconds(), + follow_redirects=True, + ), + ) + + return await _(self) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def dynamic_service_retrieve( + self, + *, + node_id: NodeID, + port_keys: list[ServicePortKey], + timeout: datetime.timedelta, # noqa: ASYNC109 + ) -> Response: + post_data = {"port_keys": port_keys} + return await self.client.post( + f"/dynamic_services/{node_id}:retrieve", + content=json_dumps(post_data), + timeout=timeout.total_seconds(), + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_dynamic_services( + self, *, user_id: UserID | None = None, project_id: ProjectID | None = None + ) -> Response: + return await self.client.get( + "/dynamic_services", + params=as_dict_exclude_none(user_id=user_id, project_id=project_id), + ) + + @retry_on_errors() + @expect_status(status.HTTP_200_OK) + async def get_projects_inactivity( + self, *, project_id: ProjectID, max_inactivity_seconds: NonNegativeInt + ) -> Response: + return await self.client.get( + f"/dynamic_services/projects/{project_id}/inactivity", + params={"max_inactivity_seconds": max_inactivity_seconds}, + ) + + @expect_status(status.HTTP_204_NO_CONTENT) + async def post_restart(self, *, node_id: NodeID) -> Response: + return await self.client.post(f"/dynamic_services/{node_id}:restart") + + @retry_on_errors() + @expect_status(status.HTTP_204_NO_CONTENT) + async def patch_projects_networks(self, *, project_id: ProjectID) -> Response: + return await self.client.patch( + f"/dynamic_services/projects/{project_id}/-/networks" + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/__init__.py new file mode 100644 index 00000000000..e5e1609440b --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/__init__.py @@ -0,0 +1,7 @@ +from ._notifier import notify_service_status_change +from ._setup import get_notifier_lifespans + +__all__: tuple[str, ...] = ( + "get_notifier_lifespans", + "notify_service_status_change", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_notifier.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_notifier.py new file mode 100644 index 00000000000..c869a368ab2 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_notifier.py @@ -0,0 +1,55 @@ +import contextlib +from collections.abc import AsyncIterator + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from fastapi_lifespan_manager import State +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.socketio import ( + SOCKET_IO_SERVICE_STATUS_EVENT, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.users import UserID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.services_utils import get_status_as_dict + + +class Notifier(SingletonInAppStateMixin): + app_state_name: str = "notifier" + + def __init__(self, sio_manager: socketio.AsyncAioPikaManager): + self._sio_manager = sio_manager + + async def notify_service_status( + self, user_id: UserID, status: NodeGet | DynamicServiceGet | NodeGetIdle + ) -> None: + await self._sio_manager.emit( + SOCKET_IO_SERVICE_STATUS_EVENT, + data=jsonable_encoder(get_status_as_dict(status)), + room=SocketIORoomStr.from_user_id(user_id), + ) + + +async def notify_service_status_change( + app: FastAPI, user_id: UserID, status: NodeGet | DynamicServiceGet | NodeGetIdle +) -> None: + notifier: Notifier = Notifier.get_from_app_state(app) + await notifier.notify_service_status(user_id=user_id, status=status) + + +async def lifespan(app: FastAPI) -> AsyncIterator[State]: + + assert app.state.external_socketio # nosec + + notifier = Notifier( + sio_manager=app.state.external_socketio, + ) + notifier.set_to_app_state(app) + assert Notifier.get_from_app_state(app) == notifier # nosec + + yield {} + + with contextlib.suppress(AttributeError): + Notifier.pop_from_app_state(app) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_setup.py new file mode 100644 index 00000000000..d9f9fd81340 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_setup.py @@ -0,0 +1,10 @@ +from collections.abc import AsyncIterator, Callable + +from fastapi import FastAPI +from fastapi_lifespan_manager import State + +from . import _notifier, _socketio + + +def get_notifier_lifespans() -> list[Callable[[FastAPI], AsyncIterator[State]]]: + return [_socketio.lifespan, _notifier.lifespan] diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_socketio.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_socketio.py new file mode 100644 index 00000000000..f34f6b87f09 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/notifier/_socketio.py @@ -0,0 +1,29 @@ +import logging +from collections.abc import AsyncIterator + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.socketio_utils import cleanup_socketio_async_pubsub_manager + +from ...core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +async def lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSettings = app.state.settings + + assert app.state.rabbitmq_client # nosec + + # Connect to the as an external process in write-only mode + # SEE https://python-socketio.readthedocs.io/en/stable/server.html#emitting-from-external-processes + assert settings.DYNAMIC_SCHEDULER_RABBITMQ # nosec + app.state.external_socketio = socketio.AsyncAioPikaManager( + url=settings.DYNAMIC_SCHEDULER_RABBITMQ.dsn, logger=_logger, write_only=True + ) + + yield {} + + if external_socketio := getattr(app.state, "external_socketio"): # noqa: B009 + await cleanup_socketio_async_pubsub_manager(server_manager=external_socketio) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py new file mode 100644 index 00000000000..c4357bb9439 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/rabbitmq.py @@ -0,0 +1,53 @@ +from collections.abc import AsyncIterator +from typing import cast + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from models_library.rabbitmq_messages import RabbitMessageBase +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) +from settings_library.rabbit import RabbitSettings + + +async def rabbitmq_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: RabbitSettings = app.state.settings.DYNAMIC_SCHEDULER_RABBITMQ + + await wait_till_rabbitmq_responsive(settings.dsn) + + app.state.rabbitmq_client = RabbitMQClient( + client_name="dynamic_scheduler", settings=settings + ) + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="dynamic_scheduler_rpc_client", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="dynamic_scheduler_rpc_server", settings=settings + ) + + yield {} + + await app.state.rabbitmq_client.close() + await app.state.rabbitmq_rpc_client.close() + await app.state.rabbitmq_rpc_server.close() + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + assert app.state.rabbitmq_client # nosec + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_client + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + +async def post_message(app: FastAPI, message: RabbitMessageBase) -> None: + await get_rabbitmq_client(app).publish(message.channel_name, message) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/redis.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/redis.py new file mode 100644 index 00000000000..a2c72b0bf83 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/redis.py @@ -0,0 +1,49 @@ +from collections.abc import AsyncIterator +from typing import Final + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.redis import RedisClientSDK, RedisClientsManager, RedisManagerDBConfig +from settings_library.redis import RedisDatabase, RedisSettings + +from .._meta import APP_NAME + +_DECODE_DBS: Final[set[RedisDatabase]] = { + RedisDatabase.LOCKS, +} + +_BINARY_DBS: Final[set[RedisDatabase]] = { + RedisDatabase.DEFERRED_TASKS, + RedisDatabase.DYNAMIC_SERVICES, +} + +_ALL_REDIS_DATABASES: Final[set[RedisDatabase]] = _DECODE_DBS | _BINARY_DBS + + +async def redis_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: RedisSettings = app.state.settings.DYNAMIC_SCHEDULER_REDIS + + app.state.redis_clients_manager = manager = RedisClientsManager( + {RedisManagerDBConfig(database=x, decode_responses=False) for x in _BINARY_DBS} + | { + RedisManagerDBConfig(database=x, decode_responses=True) for x in _DECODE_DBS + }, + settings, + client_name=APP_NAME, + ) + await manager.setup() + + yield {} + + await manager.shutdown() + + +def get_redis_client(app: FastAPI, database: RedisDatabase) -> RedisClientSDK: + manager: RedisClientsManager = app.state.redis_clients_manager + return manager.client(database) + + +def get_all_redis_clients( + app: FastAPI, +) -> dict[RedisDatabase, RedisClientSDK]: + return {d: get_redis_client(app, d) for d in _ALL_REDIS_DATABASES} diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py new file mode 100644 index 00000000000..ff279fb75c9 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/scheduler_interface.py @@ -0,0 +1,130 @@ +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import NonNegativeInt + +from ..core.settings import ApplicationSettings +from .director_v2 import DirectorV2Client +from .service_tracker import set_request_as_running, set_request_as_stopped + + +async def list_tracked_dynamic_services( + app: FastAPI, *, user_id: UserID | None = None, project_id: ProjectID | None = None +) -> list[DynamicServiceGet]: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + return await director_v2_client.list_tracked_dynamic_services( + user_id=user_id, project_id=project_id + ) + + +async def get_service_status( + app: FastAPI, *, node_id: NodeID +) -> NodeGet | DynamicServiceGet | NodeGetIdle: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + response: NodeGet | DynamicServiceGet | NodeGetIdle = ( + await director_v2_client.get_status(node_id) + ) + return response + + +async def run_dynamic_service( + app: FastAPI, *, dynamic_service_start: DynamicServiceStart +) -> NodeGet | DynamicServiceGet: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + response: NodeGet | DynamicServiceGet = ( + await director_v2_client.run_dynamic_service(dynamic_service_start) + ) + + await set_request_as_running(app, dynamic_service_start) + return response + + +async def stop_dynamic_service( + app: FastAPI, *, dynamic_service_stop: DynamicServiceStop +) -> None: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + await director_v2_client.stop_dynamic_service( + node_id=dynamic_service_stop.node_id, + simcore_user_agent=dynamic_service_stop.simcore_user_agent, + save_state=dynamic_service_stop.save_state, + timeout=settings.DYNAMIC_SCHEDULER_STOP_SERVICE_TIMEOUT, + ) + + await set_request_as_stopped(app, dynamic_service_stop) + + +async def get_project_inactivity( + app: FastAPI, *, project_id: ProjectID, max_inactivity_seconds: NonNegativeInt +) -> GetProjectInactivityResponse: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + response: GetProjectInactivityResponse = ( + await director_v2_client.get_project_inactivity( + project_id=project_id, max_inactivity_seconds=max_inactivity_seconds + ) + ) + return response + + +async def restart_user_services(app: FastAPI, *, node_id: NodeID) -> None: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + await director_v2_client.restart_user_services(node_id=node_id) + + +async def retrieve_inputs( + app: FastAPI, *, node_id: NodeID, port_keys: list[ServicePortKey] +) -> RetrieveDataOutEnveloped: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + return await director_v2_client.retrieve_inputs( + node_id=node_id, + port_keys=port_keys, + timeout=settings.DYNAMIC_SCHEDULER_SERVICE_UPLOAD_DOWNLOAD_TIMEOUT, + ) + + +async def update_projects_networks(app: FastAPI, *, project_id: ProjectID) -> None: + settings: ApplicationSettings = app.state.settings + if settings.DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER: + raise NotImplementedError + + director_v2_client = DirectorV2Client.get_from_app_state(app) + await director_v2_client.update_projects_networks(project_id=project_id) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py new file mode 100644 index 00000000000..fee6fc069f3 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/__init__.py @@ -0,0 +1,33 @@ +from ._api import ( + NORMAL_RATE_POLL_INTERVAL, + get_all_tracked_services, + get_tracked_service, + get_user_id_for_service, + remove_tracked_service, + set_frontend_notified_for_service, + set_if_status_changed_for_service, + set_request_as_running, + set_request_as_stopped, + set_service_scheduled_to_run, + set_service_status_task_uid, + should_notify_frontend_for_service, +) +from ._models import TrackedServiceModel +from ._setup import service_tracker_lifespan + +__all__: tuple[str, ...] = ( + "get_all_tracked_services", + "get_tracked_service", + "get_user_id_for_service", + "service_tracker_lifespan", + "NORMAL_RATE_POLL_INTERVAL", + "remove_tracked_service", + "set_frontend_notified_for_service", + "set_if_status_changed_for_service", + "set_request_as_running", + "set_request_as_stopped", + "set_service_scheduled_to_run", + "set_service_status_task_uid", + "should_notify_frontend_for_service", + "TrackedServiceModel", +) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py new file mode 100644 index 00000000000..09e4c3b965f --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_api.py @@ -0,0 +1,248 @@ +import inspect +import logging +from datetime import timedelta +from typing import Final + +import arrow +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects_nodes_io import NodeID +from models_library.services_enums import ServiceState +from models_library.users import UserID +from servicelib.deferred_tasks import TaskUID + +from ._models import SchedulerServiceState, TrackedServiceModel, UserRequestedState +from ._setup import get_tracker + +_logger = logging.getLogger(__name__) + + +_LOW_RATE_POLL_INTERVAL: Final[timedelta] = timedelta(seconds=1) +NORMAL_RATE_POLL_INTERVAL: Final[timedelta] = timedelta(seconds=5) +_MAX_PERIOD_WITHOUT_SERVICE_STATUS_UPDATES: Final[timedelta] = timedelta(seconds=60) + + +async def set_request_as_running( + app: FastAPI, + dynamic_service_start: DynamicServiceStart, +) -> None: + """Stores intention to `start` request""" + await get_tracker(app).save( + dynamic_service_start.node_uuid, + TrackedServiceModel( + dynamic_service_start=dynamic_service_start, + requested_state=UserRequestedState.RUNNING, + project_id=dynamic_service_start.project_id, + user_id=dynamic_service_start.user_id, + ), + ) + + +async def set_request_as_stopped( + app: FastAPI, dynamic_service_stop: DynamicServiceStop +) -> None: + """Stores intention to `stop` request""" + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(dynamic_service_stop.node_id) + + if model is None: + model = TrackedServiceModel( + dynamic_service_start=None, + user_id=dynamic_service_stop.user_id, + project_id=dynamic_service_stop.project_id, + requested_state=UserRequestedState.STOPPED, + ) + + model.requested_state = UserRequestedState.STOPPED + await tracker.save(dynamic_service_stop.node_id, model) + + +def _get_service_state( + status: NodeGet | DynamicServiceGet | NodeGetIdle, +) -> ServiceState: + # Attributes where to find the state + # NodeGet -> service_state + # DynamicServiceGet -> state + # NodeGetIdle -> service_state + state_key = "state" if isinstance(status, DynamicServiceGet) else "service_state" + + state: ServiceState | str = getattr(status, state_key) + result: str = state.value if isinstance(state, ServiceState) else state + return ServiceState(result) + + +def _get_poll_interval(status: NodeGet | DynamicServiceGet | NodeGetIdle) -> timedelta: + if _get_service_state(status) != ServiceState.RUNNING: + return _LOW_RATE_POLL_INTERVAL + + return NORMAL_RATE_POLL_INTERVAL + + +def _get_current_scheduler_service_state( + requested_state: UserRequestedState, + status: NodeGet | DynamicServiceGet | NodeGetIdle, +) -> SchedulerServiceState: + """ + Computes the `SchedulerServiceState` used internally by the scheduler + to decide about a service's future. + """ + + if isinstance(status, NodeGetIdle): + return SchedulerServiceState.IDLE + + service_state: ServiceState = _get_service_state(status) + + if requested_state == UserRequestedState.RUNNING: + if service_state == ServiceState.RUNNING: + return SchedulerServiceState.RUNNING + + if ( + ServiceState.PENDING # type:ignore[operator] + <= service_state + <= ServiceState.STARTING + ): + return SchedulerServiceState.STARTING + + if service_state < ServiceState.PENDING or service_state > ServiceState.RUNNING: + return SchedulerServiceState.UNEXPECTED_OUTCOME + + if requested_state == UserRequestedState.STOPPED: + if service_state >= ServiceState.RUNNING: # type:ignore[operator] + return SchedulerServiceState.STOPPING + + if service_state < ServiceState.RUNNING: + return SchedulerServiceState.UNEXPECTED_OUTCOME + + msg = f"Could not determine current_state from: '{requested_state=}', '{status=}'" + raise TypeError(msg) + + +def _log_skipping_operation(node_id: NodeID) -> None: + # the caller is at index 1 (index 0 is the current function) + caller_name = inspect.stack()[1].function + + _logger.info( + "Could not find a %s entry for node_id %s: skipping %s", + TrackedServiceModel.__name__, + node_id, + caller_name, + ) + + +async def set_if_status_changed_for_service( + app: FastAPI, node_id: NodeID, status: NodeGet | DynamicServiceGet | NodeGetIdle +) -> bool: + """returns ``True`` if the tracker detected a status change""" + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(node_id) + if model is None: + _log_skipping_operation(node_id) + return False + + # set new polling interval in the future + model.set_check_status_after_to(_get_poll_interval(status)) + model.service_status_task_uid = None + model.scheduled_to_run = False + + # check if model changed + json_status = status.model_dump_json() + if model.service_status != json_status: + model.service_status = json_status + model.current_state = _get_current_scheduler_service_state( + model.requested_state, status + ) + await tracker.save(node_id, model) + return True + + return False + + +async def should_notify_frontend_for_service( + app: FastAPI, node_id: NodeID, *, status_changed: bool +) -> bool: + """ + Checks if it's time to notify the frontend. + The frontend will be notified at regular intervals and on changes + Avoids sending too many updates. + """ + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(node_id) + + if model is None: + return False + + # check if too much time has passed since the last time an update was sent + return ( + status_changed + or arrow.utcnow().timestamp() - model.last_status_notification + > _MAX_PERIOD_WITHOUT_SERVICE_STATUS_UPDATES.total_seconds() + ) + + +async def set_frontend_notified_for_service(app: FastAPI, node_id: NodeID) -> None: + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(node_id) + if model is None: + _log_skipping_operation(node_id) + return + + model.set_last_status_notification_to_now() + await tracker.save(node_id, model) + + +async def set_service_scheduled_to_run( + app: FastAPI, node_id: NodeID, delay_from_now: timedelta +) -> None: + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(node_id) + if model is None: + _log_skipping_operation(node_id) + return + + model.scheduled_to_run = True + model.set_check_status_after_to(delay_from_now) + await tracker.save(node_id, model) + + +async def set_service_status_task_uid( + app: FastAPI, node_id: NodeID, task_uid: TaskUID +) -> None: + tracker = get_tracker(app) + model: TrackedServiceModel | None = await tracker.load(node_id) + if model is None: + _log_skipping_operation(node_id) + return + + model.service_status_task_uid = task_uid + await tracker.save(node_id, model) + + +async def remove_tracked_service(app: FastAPI, node_id: NodeID) -> None: + """ + Removes the service from tracking (usually after stop completes) + # NOTE: does not raise if node_id is not found + """ + await get_tracker(app).delete(node_id) + + +async def get_tracked_service( + app: FastAPI, node_id: NodeID +) -> TrackedServiceModel | None: + """Returns information about the tracked service""" + return await get_tracker(app).load(node_id) + + +async def get_all_tracked_services(app: FastAPI) -> dict[NodeID, TrackedServiceModel]: + """Returns all tracked services""" + return await get_tracker(app).all() + + +async def get_user_id_for_service(app: FastAPI, node_id: NodeID) -> UserID | None: + """returns user_id for the service""" + model: TrackedServiceModel | None = await get_tracker(app).load(node_id) + return model.user_id if model else None diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_models.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_models.py new file mode 100644 index 00000000000..55a30cf2e8a --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_models.py @@ -0,0 +1,142 @@ +from datetime import timedelta +from decimal import Decimal +from enum import auto +from typing import Any, Callable, Final +from uuid import UUID + +import arrow +import umsgpack # type: ignore[import-untyped] +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.projects import ProjectID +from models_library.users import UserID +from models_library.utils.enums import StrAutoEnum +from pydantic import BaseModel, Field +from servicelib.deferred_tasks import TaskUID + +# `umsgpack.Ext`` extension types are part of the msgpack specification +# allows to define serialization and deserialization rules for custom types +# see https://github.com/msgpack/msgpack/blob/master/spec.md#extension-types + +_UUID_TYPE: Final[int] = 0x00 +_DECIMAL_TYPE: Final[int] = 0x01 + +_PACKB_EXTENSION_TYPES: Final[dict[type[Any], Callable[[Any], umsgpack.Ext]]] = { + # helpers to serialize an object to bytes + UUID: lambda obj: umsgpack.Ext(_UUID_TYPE, obj.bytes), + Decimal: lambda obj: umsgpack.Ext(_DECIMAL_TYPE, f"{obj}".encode()), +} + +_UNPACKB_EXTENSION_TYPES: Final[dict[int, Callable[[umsgpack.Ext], Any]]] = { + # helpers to deserialize an object from bytes + _UUID_TYPE: lambda ext: UUID(bytes=ext.data), + _DECIMAL_TYPE: lambda ext: Decimal(ext.data.decode()), +} + + +class UserRequestedState(StrAutoEnum): + RUNNING = auto() + STOPPED = auto() + + +class SchedulerServiceState(StrAutoEnum): + # service was started and is running as expected + RUNNING = auto() + # service is not present + IDLE = auto() + # something went wrong while starting/stopping service + UNEXPECTED_OUTCOME = auto() + + # service is being started + STARTING = auto() + # service is being stopped + STOPPING = auto() + + # service status has not been determined + UNKNOWN = auto() + + +class TrackedServiceModel(BaseModel): # pylint:disable=too-many-instance-attributes + + dynamic_service_start: DynamicServiceStart | None = Field( + description=( + "used to create the service in any given moment if the requested_state is RUNNING" + "can be set to None only when stopping the service" + ) + ) + + user_id: UserID | None = Field( + description="required for propagating status changes to the frontend" + ) + project_id: ProjectID | None = Field( + description="required for propagating status changes to the frontend" + ) + + requested_state: UserRequestedState = Field( + description=("status of the service desidered by the user RUNNING or STOPPED") + ) + + current_state: SchedulerServiceState = Field( + default=SchedulerServiceState.UNKNOWN, + description="to set after parsing the incoming state via the API calls", + ) + + def __setattr__(self, name, value): + if name == "current_state" and value != self.current_state: + self.last_state_change = arrow.utcnow().timestamp() + super().__setattr__(name, value) + + last_state_change: float = Field( + default_factory=lambda: arrow.utcnow().timestamp(), + description="keeps track when the current_state was last updated", + ) + + ############################# + ### SERVICE STATUS UPDATE ### + ############################# + + scheduled_to_run: bool = Field( + default=False, + description="set when a job will be immediately scheduled", + ) + + service_status: str = Field( + default="", + description="stored for debug mainly this is used to compute ``current_state``", + ) + service_status_task_uid: TaskUID | None = Field( + default=None, + description="uid of the job currently fetching the status", + ) + + check_status_after: float = Field( + default_factory=lambda: arrow.utcnow().timestamp(), + description="used to determine when to poll the status again", + ) + + last_status_notification: float = Field( + default=0, + description="used to determine when was the last time the status was notified", + ) + + def set_check_status_after_to(self, delay_from_now: timedelta) -> None: + self.check_status_after = (arrow.utcnow() + delay_from_now).timestamp() + + def set_last_status_notification_to_now(self) -> None: + self.last_status_notification = arrow.utcnow().timestamp() + + ##################### + ### SERIALIZATION ### + ##################### + + def to_bytes(self) -> bytes: + result: bytes = umsgpack.packb( + self.model_dump(), ext_handlers=_PACKB_EXTENSION_TYPES + ) + return result + + @classmethod + def from_bytes(cls, data: bytes) -> "TrackedServiceModel": + unpacked_data = umsgpack.unpackb(data, ext_handlers=_UNPACKB_EXTENSION_TYPES) + return cls(**unpacked_data) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_setup.py new file mode 100644 index 00000000000..b00a4cb2874 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_setup.py @@ -0,0 +1,20 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from settings_library.redis import RedisDatabase + +from ..redis import get_redis_client +from ._tracker import Tracker + + +async def service_tracker_lifespan(app: FastAPI) -> AsyncIterator[State]: + app.state.service_tracker = Tracker( + get_redis_client(app, RedisDatabase.DYNAMIC_SERVICES) + ) + yield {} + + +def get_tracker(app: FastAPI) -> Tracker: + tracker: Tracker = app.state.service_tracker + return tracker diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_tracker.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_tracker.py new file mode 100644 index 00000000000..489cee15310 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/service_tracker/_tracker.py @@ -0,0 +1,44 @@ +from dataclasses import dataclass +from typing import Final + +from models_library.projects_nodes_io import NodeID +from servicelib.redis import RedisClientSDK + +from ._models import TrackedServiceModel + +_KEY_PREFIX: Final[str] = "t::" + + +def _get_key(node_id: NodeID) -> str: + return f"{_KEY_PREFIX}{node_id}" + + +@dataclass +class Tracker: + redis_client_sdk: RedisClientSDK + + async def save(self, node_id: NodeID, model: TrackedServiceModel) -> None: + await self.redis_client_sdk.redis.set(_get_key(node_id), model.to_bytes()) + + async def load(self, node_id: NodeID) -> TrackedServiceModel | None: + model_as_bytes: bytes | None = await self.redis_client_sdk.redis.get( + _get_key(node_id) + ) + return ( + None + if model_as_bytes is None + else TrackedServiceModel.from_bytes(model_as_bytes) + ) + + async def delete(self, node_id: NodeID) -> None: + await self.redis_client_sdk.redis.delete(_get_key(node_id)) + + async def all(self) -> dict[NodeID, TrackedServiceModel]: + found_keys = await self.redis_client_sdk.redis.keys(f"{_KEY_PREFIX}*") + found_values = await self.redis_client_sdk.redis.mget(found_keys) + + return { + NodeID(k.decode().lstrip(_KEY_PREFIX)): TrackedServiceModel.from_bytes(v) + for k, v in zip(found_keys, found_values, strict=True) + if v is not None + } diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/__init__.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/__init__.py new file mode 100644 index 00000000000..c165d51a75b --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/__init__.py @@ -0,0 +1,3 @@ +from ._setup import status_monitor_lifespan + +__all__: tuple[str, ...] = ("status_monitor_lifespan",) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py new file mode 100644 index 00000000000..3f6efbfaecb --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_deferred_get_status.py @@ -0,0 +1,83 @@ +import logging +from datetime import timedelta + +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from servicelib.deferred_tasks import BaseDeferredHandler, TaskUID +from servicelib.deferred_tasks._base_deferred_handler import DeferredContext + +from .. import scheduler_interface, service_tracker +from ..notifier import notify_service_status_change + +_logger = logging.getLogger(__name__) + + +class DeferredGetStatus(BaseDeferredHandler[NodeGet | DynamicServiceGet | NodeGetIdle]): + @classmethod + async def get_timeout(cls, context: DeferredContext) -> timedelta: + assert context # nosec + return timedelta(seconds=5) + + @classmethod + async def start( # type:ignore[override] # pylint:disable=arguments-differ + cls, node_id: NodeID + ) -> DeferredContext: + _logger.debug("Getting service status for %s", node_id) + return {"node_id": node_id} + + @classmethod + async def on_created(cls, task_uid: TaskUID, context: DeferredContext) -> None: + """called after deferred was scheduled to run""" + app: FastAPI = context["app"] + node_id: NodeID = context["node_id"] + + await service_tracker.set_service_status_task_uid(app, node_id, task_uid) + + @classmethod + async def run( + cls, context: DeferredContext + ) -> NodeGet | DynamicServiceGet | NodeGetIdle: + app: FastAPI = context["app"] + node_id: NodeID = context["node_id"] + + service_status: NodeGet | RunningDynamicServiceDetails | NodeGetIdle = ( + await scheduler_interface.get_service_status(app, node_id=node_id) + ) + _logger.debug( + "Service status type=%s, %s", type(service_status), service_status + ) + return service_status + + @classmethod + async def on_result( + cls, result: NodeGet | DynamicServiceGet | NodeGetIdle, context: DeferredContext + ) -> None: + app: FastAPI = context["app"] + node_id: NodeID = context["node_id"] + + _logger.debug("Received status for service '%s': '%s'", node_id, result) + + status_changed: bool = await service_tracker.set_if_status_changed_for_service( + app, node_id, result + ) + if await service_tracker.should_notify_frontend_for_service( + app, node_id, status_changed=status_changed + ): + user_id: UserID | None = await service_tracker.get_user_id_for_service( + app, node_id + ) + if user_id: + await notify_service_status_change(app, user_id, result) + await service_tracker.set_frontend_notified_for_service(app, node_id) + else: + _logger.info( + "Did not find a user for '%s', skipping status delivery of: %s", + node_id, + result, + ) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py new file mode 100644 index 00000000000..750b0dbdc63 --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_monitor.py @@ -0,0 +1,152 @@ +import asyncio +import logging +from datetime import timedelta +from functools import cached_property +from typing import Final + +import arrow +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from pydantic import NonNegativeFloat, NonNegativeInt +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task_utils import exclusive_periodic +from servicelib.utils import limited_gather +from settings_library.redis import RedisDatabase + +from .. import service_tracker +from ..redis import get_redis_client +from ..service_tracker import NORMAL_RATE_POLL_INTERVAL, TrackedServiceModel +from ..service_tracker._models import SchedulerServiceState, UserRequestedState +from ._deferred_get_status import DeferredGetStatus + +_logger = logging.getLogger(__name__) + +_INTERVAL_BETWEEN_CHECKS: Final[timedelta] = timedelta(seconds=1) +_MAX_CONCURRENCY: Final[NonNegativeInt] = 10 + +_REMOVE_AFTER_IDLE_FOR: Final[timedelta] = timedelta(minutes=5) + + +async def _start_get_status_deferred( + app: FastAPI, node_id: NodeID, *, next_check_delay: timedelta +) -> None: + await service_tracker.set_service_scheduled_to_run(app, node_id, next_check_delay) + await DeferredGetStatus.start(node_id=node_id) + + +def _can_be_removed(model: TrackedServiceModel) -> bool: + # requested **as** `STOPPED` + # service **reports** `IDLE` + if ( + model.current_state == SchedulerServiceState.IDLE + and model.requested_state == UserRequestedState.STOPPED + ): + return True + + # NOTE: currently dynamic-scheduler does nto automatically start a + # service reported who's requested_state is STARTED + # to avoid monitoring services which no longer exist, + # the service has to be removed. + + # requested as `STARTED` + # service **reports** `IDLE` since `_REMOVE_AFTER_IDLE_FOR` + if ( # noqa: SIM103 + model.current_state == SchedulerServiceState.IDLE + and model.requested_state == UserRequestedState.RUNNING + and arrow.utcnow().timestamp() - model.last_state_change + > _REMOVE_AFTER_IDLE_FOR.total_seconds() + ): + return True + + return False + + +class Monitor: + def __init__(self, app: FastAPI, status_worker_interval: timedelta) -> None: + self.app = app + self.status_worker_interval = status_worker_interval + + @cached_property + def status_worker_interval_seconds(self) -> NonNegativeFloat: + return self.status_worker_interval.total_seconds() + + async def _worker_check_services_require_status_update(self) -> None: + """ + Check if any service requires it's status to be polled. + Note that the interval at which the status is polled can vary. + This is a relatively low resource check. + """ + + # NOTE: this worker runs on only once across all instances of the scheduler + + models: dict[ + NodeID, TrackedServiceModel + ] = await service_tracker.get_all_tracked_services(self.app) + + to_remove: list[NodeID] = [] + to_start: list[NodeID] = [] + + current_timestamp = arrow.utcnow().timestamp() + + for node_id, model in models.items(): + if _can_be_removed(model): + to_remove.append(node_id) + continue + + job_not_running = not ( + model.scheduled_to_run + and model.service_status_task_uid is not None + and await DeferredGetStatus.is_present(model.service_status_task_uid) + ) + wait_period_finished = current_timestamp > model.check_status_after + if job_not_running and wait_period_finished: + to_start.append(node_id) + else: + _logger.info( + "Skipping status check for %s, because: %s or %s", + node_id, + f"{job_not_running=}", + ( + f"{wait_period_finished=}" + if wait_period_finished + else f"can_start_in={model.check_status_after - current_timestamp}" + ), + ) + + _logger.debug("Removing tracked services: '%s'", to_remove) + await limited_gather( + *( + service_tracker.remove_tracked_service(self.app, node_id) + for node_id in to_remove + ), + limit=_MAX_CONCURRENCY, + ) + + _logger.debug("Poll status for tracked services: '%s'", to_start) + await limited_gather( + *( + _start_get_status_deferred( + self.app, node_id, next_check_delay=NORMAL_RATE_POLL_INTERVAL + ) + for node_id in to_start + ), + limit=_MAX_CONCURRENCY, + ) + + async def setup(self) -> None: + @exclusive_periodic( + get_redis_client(self.app, RedisDatabase.LOCKS), + task_interval=_INTERVAL_BETWEEN_CHECKS, + retry_after=_INTERVAL_BETWEEN_CHECKS, + ) + async def _periodic_check_services_require_status_update() -> None: + await self._worker_check_services_require_status_update() + + self.app.state.status_monitor_background_task = asyncio.create_task( + _periodic_check_services_require_status_update(), + name="periodic_service_status_update", + ) + + async def shutdown(self) -> None: + if getattr(self.app.state, "status_monitor_background_task", None): + await cancel_wait_task(self.app.state.status_monitor_background_task) diff --git a/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_setup.py b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_setup.py new file mode 100644 index 00000000000..e0fad2a09fd --- /dev/null +++ b/services/dynamic-scheduler/src/simcore_service_dynamic_scheduler/services/status_monitor/_setup.py @@ -0,0 +1,26 @@ +from collections.abc import AsyncIterator +from datetime import timedelta +from typing import Final + +from fastapi import FastAPI +from fastapi_lifespan_manager import State + +from ._monitor import Monitor + +_STATUS_WORKER_DEFAULT_INTERVAL: Final[timedelta] = timedelta(seconds=1) + + +async def status_monitor_lifespan(app: FastAPI) -> AsyncIterator[State]: + app.state.status_monitor = monitor = Monitor( + app, status_worker_interval=_STATUS_WORKER_DEFAULT_INTERVAL + ) + await monitor.setup() + + yield {} + + await monitor.shutdown() + + +def get_monitor(app: FastAPI) -> Monitor: + monitor: Monitor = app.state.status_monitor + return monitor diff --git a/services/dynamic-scheduler/tests/assets/legacy_tracked_service_model.bin b/services/dynamic-scheduler/tests/assets/legacy_tracked_service_model.bin new file mode 100644 index 00000000000..8c26d4e8ba5 Binary files /dev/null and b/services/dynamic-scheduler/tests/assets/legacy_tracked_service_model.bin differ diff --git a/services/dynamic-scheduler/tests/conftest.py b/services/dynamic-scheduler/tests/conftest.py new file mode 100644 index 00000000000..2cb7f135829 --- /dev/null +++ b/services/dynamic-scheduler/tests/conftest.py @@ -0,0 +1,167 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import string +from collections.abc import AsyncIterator +from pathlib import Path +from typing import Final + +import nicegui +import pytest +import simcore_service_dynamic_scheduler +import yaml +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.redis import RedisClientsManager, RedisManagerDBConfig +from servicelib.utils import logged_gather +from settings_library.redis import RedisDatabase, RedisSettings +from simcore_service_dynamic_scheduler.core.application import create_app + +pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.postgres_service", + "pytest_simcore.rabbit_service", + "pytest_simcore.redis_service", + "pytest_simcore.repository_paths", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "dynamic-scheduler" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_dynamic_scheduler")) + return service_folder + + +@pytest.fixture(scope="session") +def installed_package_dir() -> Path: + dirpath = Path(simcore_service_dynamic_scheduler.__file__).resolve().parent + assert dirpath.exists() + return dirpath + + +@pytest.fixture +def docker_compose_service_dynamic_scheduler_env_vars( + services_docker_compose_file: Path, + env_devel_dict: EnvVarsDict, +) -> EnvVarsDict: + """env vars injected at the docker-compose""" + content = yaml.safe_load(services_docker_compose_file.read_text()) + environment = content["services"]["dynamic-schdlr"].get("environment", {}) + + envs: EnvVarsDict = {} + for name, value in environment.items(): + try: + envs[name] = string.Template(value).substitute(env_devel_dict) + except (KeyError, ValueError) as err: + pytest.fail( + f"{err}: {value} is not defined in .env-devel but used as RHS in docker-compose services['dynamic-schdlr'].environment[{name}]" + ) + return envs + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + docker_compose_service_dynamic_scheduler_env_vars: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_dynamic_scheduler_env_vars, + "DYNAMIC_SCHEDULER_TRACING": "null", + }, + ) + + +_EVENTS_MODULE: Final[str] = "simcore_service_dynamic_scheduler.core.events" + + +@pytest.fixture +def disable_rabbitmq_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.rabbitmq_lifespan") + mocker.patch(f"{_EVENTS_MODULE}.rpc_api_routes_lifespan") + + +@pytest.fixture +def disable_redis_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.redis_lifespan") + + +@pytest.fixture +def disable_service_tracker_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.service_tracker_lifespan") + + +@pytest.fixture +def disable_deferred_manager_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.deferred_manager_lifespan") + + +@pytest.fixture +def disable_notifier_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.get_notifier_lifespans") + + +@pytest.fixture +def disable_status_monitor_lifespan(mocker: MockerFixture) -> None: + mocker.patch(f"{_EVENTS_MODULE}.status_monitor_lifespan") + + +@pytest.fixture +def disable_postgres_lifespan( + mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch +) -> None: + setenvs_from_dict( + monkeypatch, + { + "POSTGRES_USER": "test_user", + "POSTGRES_PASSWORD": "test_password", + "POSTGRES_DB": "test_db", + "POSTGRES_HOST": "test_host", + }, + ) + + mocker.patch(f"{_EVENTS_MODULE}.repository_lifespan_manager") + + +MAX_TIME_FOR_APP_TO_STARTUP: Final[float] = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN: Final[float] = 10 + + +@pytest.fixture +async def app( + app_environment: EnvVarsDict, is_pdb_enabled: bool +) -> AsyncIterator[FastAPI]: + # forces rebuild of middleware stack on next test + nicegui.app.user_middleware.clear() + nicegui.app.middleware_stack = None + test_app = create_app() + async with LifespanManager( + test_app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + yield test_app + + +@pytest.fixture +async def remove_redis_data(redis_service: RedisSettings) -> None: + async with RedisClientsManager( + {RedisManagerDBConfig(database=x) for x in RedisDatabase}, + redis_service, + client_name="pytest", + ) as manager: + await logged_gather( + *[manager.client(d).redis.flushall() for d in RedisDatabase] + ) diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py b/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py new file mode 100644 index 00000000000..663091247d1 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/conftest.py @@ -0,0 +1,136 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import asyncio +import subprocess +from collections.abc import AsyncIterable +from contextlib import suppress +from typing import Final +from unittest.mock import AsyncMock + +import nicegui +import pytest +import sqlalchemy as sa +from fastapi import FastAPI, status +from httpx import AsyncClient +from hypercorn.asyncio import serve +from hypercorn.config import Config +from playwright.async_api import Page, async_playwright +from pytest_mock import MockerFixture +from pytest_simcore.helpers.postgres_tools import PostgresTestConfig +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.utils_service import DEFAULT_FASTAPI_PORT +from simcore_service_dynamic_scheduler.core.application import create_app +from simcore_service_dynamic_scheduler.core.settings import ApplicationSettings +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +_MODULE: Final["str"] = "simcore_service_dynamic_scheduler" + + +@pytest.fixture +def disable_status_monitor_background_task(mocker: MockerFixture) -> None: + mocker.patch( + f"{_MODULE}.services.status_monitor._monitor.Monitor._worker_check_services_require_status_update" + ) + + +@pytest.fixture +def mock_stop_dynamic_service(mocker: MockerFixture) -> AsyncMock: + async_mock = AsyncMock() + mocker.patch( + f"{_MODULE}.api.frontend.routes._service.stop_dynamic_service", async_mock + ) + return async_mock + + +@pytest.fixture +def mock_remove_tracked_service(mocker: MockerFixture) -> AsyncMock: + async_mock = AsyncMock() + mocker.patch( + f"{_MODULE}.api.frontend.routes._service.remove_tracked_service", async_mock + ) + return async_mock + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + postgres_db: sa.engine.Engine, + postgres_host_config: PostgresTestConfig, + disable_status_monitor_background_task: None, + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + remove_redis_data: None, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +def server_host_port() -> str: + return f"127.0.0.1:{DEFAULT_FASTAPI_PORT}" + + +@pytest.fixture +def not_initialized_app(app_environment: EnvVarsDict) -> FastAPI: + # forces rebuild of middleware stack on next test + nicegui.app.user_middleware.clear() + nicegui.app.middleware_stack = None + return create_app() + + +@pytest.fixture +async def app_runner( + not_initialized_app: FastAPI, server_host_port: str +) -> AsyncIterable[None]: + + shutdown_event = asyncio.Event() + + async def _wait_for_shutdown_event(): + await shutdown_event.wait() + + async def _run_server() -> None: + config = Config() + config.bind = [server_host_port] + + with suppress(asyncio.CancelledError): + await serve( + not_initialized_app, config, shutdown_trigger=_wait_for_shutdown_event + ) + + server_task = asyncio.create_task(_run_server()) + + settings: ApplicationSettings = not_initialized_app.state.settings + + home_page_url = ( + f"http://{server_host_port}{settings.DYNAMIC_SCHEDULER_UI_MOUNT_PATH}" + ) + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(2) + ): + with attempt: + async with AsyncClient(timeout=1) as client: + response = await client.get(f"{home_page_url}") + assert response.status_code == status.HTTP_200_OK + + yield + + shutdown_event.set() + await server_task + + +@pytest.fixture +def download_playwright_browser() -> None: + subprocess.run( # noqa: S603 + ["playwright", "install", "chromium"], check=True # noqa: S607 + ) + + +@pytest.fixture +async def async_page(download_playwright_browser: None) -> AsyncIterable[Page]: + async with async_playwright() as p: + browser = await p.chromium.launch() + page = await browser.new_page() + yield page + await browser.close() diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py b/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py new file mode 100644 index 00000000000..91c2058c869 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/helpers.py @@ -0,0 +1,104 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import sys +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from pathlib import Path +from typing import Final +from uuid import uuid4 + +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from playwright.async_api import Locator, Page +from pydantic import NonNegativeFloat, NonNegativeInt, TypeAdapter +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +_HERE: Final[Path] = ( + Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent +) +_DEFAULT_TIMEOUT: Final[NonNegativeFloat] = 10 + + +@asynccontextmanager +async def take_screenshot_on_error( + async_page: Page, +) -> AsyncIterator[None]: + try: + yield + # allows to also capture exceptions form `with pytest.raise(...)`` + except BaseException: + path = _HERE / f"{uuid4()}.ignore.png" + await async_page.screenshot(path=path) + print(f"Please check :{path}") + + raise + + +async def _get_locator( + async_page: Page, + text: str, + instances: NonNegativeInt | None, + timeout: float, # noqa: ASYNC109 +) -> Locator: + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(timeout) + ): + with attempt: + locator = async_page.get_by_text(text) + count = await locator.count() + if instances is None: + assert count > 0, f"cold not find text='{text}'" + else: + assert ( + count == instances + ), f"found {count} instances of text='{text}'. Expected {instances}" + return locator + + +async def assert_contains_text( + async_page: Page, + text: str, + instances: NonNegativeInt | None = None, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + await _get_locator(async_page, text, instances=instances, timeout=timeout) + + +async def click_on_text( + async_page: Page, + text: str, + instances: NonNegativeInt | None = None, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + locator = await _get_locator(async_page, text, instances=instances, timeout=timeout) + await locator.click() + + +async def assert_not_contains_text( + async_page: Page, + text: str, + timeout: float = _DEFAULT_TIMEOUT, # noqa: ASYNC109 +) -> None: + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(timeout) + ): + with attempt: + locator = async_page.get_by_text(text) + assert await locator.count() < 1, f"found text='{text}' in body" + + +def get_new_style_service_status(state: str) -> DynamicServiceGet: + return TypeAdapter(DynamicServiceGet).validate_python( + DynamicServiceGet.model_config["json_schema_extra"]["examples"][0] + | {"state": state} + ) + + +def get_legacy_service_status(state: str) -> NodeGet: + return TypeAdapter(NodeGet).validate_python( + NodeGet.model_config["json_schema_extra"]["examples"][0] + | {"service_state": state} + ) diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py new file mode 100644 index 00000000000..8ba68fbe632 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_index.py @@ -0,0 +1,134 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +from fastapi import FastAPI +from helpers import ( + assert_contains_text, + assert_not_contains_text, + click_on_text, + get_legacy_service_status, + get_new_style_service_status, + take_screenshot_on_error, +) +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from models_library.projects_nodes_io import NodeID +from playwright.async_api import Page +from simcore_service_dynamic_scheduler.api.frontend._utils import get_settings +from simcore_service_dynamic_scheduler.services.service_tracker import ( + set_if_status_changed_for_service, + set_request_as_running, + set_request_as_stopped, +) +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", + "redis", +] + +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + + +async def test_index_with_elements( + app_runner: None, + async_page: Page, + server_host_port: str, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + get_dynamic_service_stop: Callable[[NodeID], DynamicServiceStop], +): + await async_page.goto( + f"{server_host_port}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}" + ) + + # 1. no content + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "0") + await assert_not_contains_text(async_page, "Details") + + # 2. add elements and check + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(uuid4()) + ) + await set_request_as_stopped(not_initialized_app, get_dynamic_service_stop(uuid4())) + + await assert_contains_text(async_page, "2") + await assert_contains_text(async_page, "Details", instances=2) + + +@pytest.mark.parametrize( + "service_status", + [ + get_new_style_service_status("running"), + get_legacy_service_status("running"), + ], +) +async def test_main_page( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + service_status: NodeGet | DynamicServiceGet, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + mock_stop_dynamic_service: AsyncMock, +): + await async_page.goto( + f"{server_host_port}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}" + ) + + # 1. no content + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "0") + await assert_not_contains_text(async_page, "Details") + + # 2. start a service shows content + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_if_status_changed_for_service( + not_initialized_app, node_id, service_status + ) + + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details") + + # 3. click on stop and then cancel + await click_on_text(async_page, "Stop Service") + await assert_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + await click_on_text(async_page, "Cancel") + + # 4. click on stop then confirm + + await assert_not_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + await click_on_text(async_page, "Stop Service") + await assert_contains_text( + async_page, "The service will be stopped and its data will be saved" + ) + + mock_stop_dynamic_service.assert_not_awaited() + await click_on_text(async_page, "Stop Now") + + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(3) + ): + with attempt: + mock_stop_dynamic_service.assert_awaited_once() diff --git a/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py new file mode 100644 index 00000000000..a4f0c3993d0 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_frontend/test_api_frontend_routes_service.py @@ -0,0 +1,130 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from unittest.mock import AsyncMock + +import pytest +from fastapi import FastAPI +from helpers import ( + assert_contains_text, + click_on_text, + get_legacy_service_status, + get_new_style_service_status, + take_screenshot_on_error, +) +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet +from models_library.projects_nodes_io import NodeID +from playwright.async_api import Page +from simcore_service_dynamic_scheduler.api.frontend._utils import get_settings +from simcore_service_dynamic_scheduler.services.service_tracker import ( + set_if_status_changed_for_service, + set_request_as_running, +) +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", + "redis", +] + +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + + +async def test_service_details_no_status_present( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], +): + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + + await async_page.goto( + f"{server_host_port}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}" + ) + + # 1. one service is tracked + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details", instances=1) + + # 2. open details page + await click_on_text(async_page, "Details") + # NOTE: if something is wrong with the page the bottom to remove from tracking + # will not be present + await assert_contains_text(async_page, "Remove from tracking", instances=1) + + +async def test_service_details_renders_friendly_404( + app_runner: None, async_page: Page, server_host_port: str, node_id: NodeID +): + # node was not started + url = f"http://{server_host_port}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}service/{node_id}:details" + await async_page.goto(f"{url}") + await assert_contains_text(async_page, "Sorry could not find any details for") + + +@pytest.mark.parametrize( + "service_status", + [ + get_new_style_service_status("running"), + get_legacy_service_status("running"), + ], +) +async def test_service_details( + app_runner: None, + async_page: Page, + server_host_port: str, + node_id: NodeID, + not_initialized_app: FastAPI, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + mock_remove_tracked_service: AsyncMock, + service_status: NodeGet | DynamicServiceGet, +): + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_request_as_running( + not_initialized_app, get_dynamic_service_start(node_id) + ) + await set_if_status_changed_for_service( + not_initialized_app, node_id, service_status + ) + + await async_page.goto( + f"{server_host_port}{get_settings().DYNAMIC_SCHEDULER_UI_MOUNT_PATH}" + ) + + # 1. one service is tracked + await assert_contains_text(async_page, "Total tracked services:") + await assert_contains_text(async_page, "1") + await assert_contains_text(async_page, "Details", instances=1) + + # 2. open details page + await click_on_text(async_page, "Details") + + # 3. click "Remove from tracking" -> cancel + await click_on_text(async_page, "Remove from tracking") + await click_on_text(async_page, "Cancel") + mock_remove_tracked_service.assert_not_awaited() + + # 4. click "Remove from tracking" -> confirm + await click_on_text(async_page, "Remove from tracking") + await click_on_text(async_page, "Remove service") + async with take_screenshot_on_error(async_page): + async for attempt in AsyncRetrying( + reraise=True, wait=wait_fixed(0.1), stop=stop_after_delay(3) + ): + with attempt: + mock_remove_tracked_service.assert_awaited_once() diff --git a/services/dynamic-scheduler/tests/unit/api_rest/conftest.py b/services/dynamic-scheduler/tests/unit/api_rest/conftest.py new file mode 100644 index 00000000000..eafc8a694e9 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_rest/conftest.py @@ -0,0 +1,39 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +from collections.abc import AsyncIterator + +import pytest +from fastapi import FastAPI +from httpx import AsyncClient +from httpx._transports.asgi import ASGITransport +from pytest_simcore.helpers.typing_env import EnvVarsDict + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_rabbitmq_lifespan: None, + disable_redis_lifespan: None, + disable_service_tracker_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +async def client( + app_environment: EnvVarsDict, app: FastAPI +) -> AsyncIterator[AsyncClient]: + # - Needed for app to trigger start/stop event handlers + # - Prefer this client instead of fastapi.testclient.TestClient + async with AsyncClient( + transport=ASGITransport(app=app), + base_url="http://payments.testserver.io", + headers={"Content-Type": "application/json"}, + ) as httpx_client: + # pylint:disable=protected-access + assert isinstance(httpx_client._transport, ASGITransport) # noqa: SLF001 + yield httpx_client diff --git a/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py new file mode 100644 index 00000000000..42bc7396c9c --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__health.py @@ -0,0 +1,86 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from datetime import datetime + +import pytest +from fastapi import status +from httpx import AsyncClient +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dynamic_scheduler.api.rest._health import HealthCheckError + + +class MockHealth: + def __init__(self, is_ok: bool) -> None: + self.healthy: bool = is_ok + self.is_healthy: bool = is_ok + + +@pytest.fixture +def mock_rabbitmq_clients( + mocker: MockerFixture, + rabbit_client_ok: bool, + rabbit_rpc_server_ok: bool, +) -> None: + base_path = "simcore_service_dynamic_scheduler.api.rest._dependencies" + + mocker.patch( + f"{base_path}.get_rabbitmq_client", return_value=MockHealth(rabbit_client_ok) + ) + mocker.patch( + f"{base_path}.get_rabbitmq_rpc_server", + return_value=MockHealth(rabbit_rpc_server_ok), + ) + + +@pytest.fixture +def mock_redis_client( + mocker: MockerFixture, + redis_client_ok: bool, +) -> None: + base_path = "simcore_service_dynamic_scheduler.api.rest._dependencies" + mocker.patch( + f"{base_path}.get_all_redis_clients", + return_value={0: MockHealth(redis_client_ok)}, + ) + + +@pytest.fixture +def mock_docker_api_proxy(mocker: MockerFixture, docker_api_proxy_ok: bool) -> None: + base_path = "simcore_service_dynamic_scheduler.api.rest._health" + mocker.patch( + f"{base_path}.is_docker_api_proxy_ready", return_value=docker_api_proxy_ok + ) + + +@pytest.fixture +def app_environment( + mock_docker_api_proxy: None, + mock_rabbitmq_clients: None, + mock_redis_client: None, + app_environment: EnvVarsDict, +) -> EnvVarsDict: + return app_environment + + +@pytest.mark.parametrize( + "rabbit_client_ok, rabbit_rpc_server_ok, redis_client_ok,, docker_api_proxy_ok, is_ok", + [ + pytest.param(True, True, True, True, True, id="ok"), + pytest.param(False, True, True, True, False, id="rabbit_client_bad"), + pytest.param(True, False, True, True, False, id="rabbit_rpc_server_bad"), + pytest.param(True, True, False, True, False, id="redis_client_bad"), + pytest.param(True, True, True, False, False, id="docker_api_proxy_bad"), + ], +) +async def test_health(client: AsyncClient, is_ok: bool): + if is_ok: + response = await client.get("/health") + assert response.status_code == status.HTTP_200_OK + assert datetime.fromisoformat(response.text.split("@")[1]) + else: + with pytest.raises(HealthCheckError): + await client.get("/health") diff --git a/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__meta.py b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__meta.py new file mode 100644 index 00000000000..2fdb1de6afe --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__meta.py @@ -0,0 +1,12 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +from fastapi import status +from httpx import AsyncClient +from simcore_service_dynamic_scheduler._meta import API_VTAG +from simcore_service_dynamic_scheduler.models.schemas.meta import Meta + + +async def test_meta(client: AsyncClient): + response = await client.get(f"/{API_VTAG}/meta") + assert response.status_code == status.HTTP_200_OK + assert Meta.model_validate_json(response.text) diff --git a/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__ops.py b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__ops.py new file mode 100644 index 00000000000..985cc86a4a3 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_rest/test_api_rest__ops.py @@ -0,0 +1,47 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument +import json +from collections.abc import Iterator + +import pytest +import respx +from fastapi import status +from fastapi.encoders import jsonable_encoder +from httpx import AsyncClient +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, +) +from pydantic import TypeAdapter +from simcore_service_dynamic_scheduler._meta import API_VTAG + + +@pytest.fixture +def mock_director_v2_service( + running_services: list[DynamicServiceGet], +) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get("/dynamic_services").respond( + status.HTTP_200_OK, + text=json.dumps(jsonable_encoder(running_services)), + ) + + yield None + + +@pytest.mark.parametrize( + "running_services", + [ + DynamicServiceGet.model_json_schema()["examples"], + [], + ], +) +async def test_running_services(mock_director_v2_service: None, client: AsyncClient): + response = await client.get(f"/{API_VTAG}/ops/running-services") + assert response.status_code == status.HTTP_200_OK + assert isinstance( + TypeAdapter(list[DynamicServiceGet]).validate_python(response.json()), list + ) diff --git a/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py b/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py new file mode 100644 index 00000000000..c9b974e4454 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/api_rpc/test_api_rpc__services.py @@ -0,0 +1,604 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import json +from collections.abc import Awaitable, Callable, Iterator + +import pytest +import respx +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_directorv2.dynamic_services import ( + DynamicServiceGet, + GetProjectInactivityResponse, + RetrieveDataOutEnveloped, +) +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient, RPCServerError +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler import services +from servicelib.rabbitmq.rpc_interfaces.dynamic_scheduler.errors import ( + ServiceWaitingForManualInterventionError, + ServiceWasNotFoundError, +) +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings + +pytest_simcore_core_services_selection = [ + "redis", + "rabbit", +] + + +@pytest.fixture +def node_id_new_style(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def node_id_legacy(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def node_not_found(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def service_status_new_style() -> DynamicServiceGet: + return TypeAdapter(DynamicServiceGet).validate_python( + DynamicServiceGet.model_json_schema()["examples"][1] + ) + + +@pytest.fixture +def service_status_legacy() -> NodeGet: + return TypeAdapter(NodeGet).validate_python( + NodeGet.model_json_schema()["examples"][1] + ) + + +@pytest.fixture +def fake_director_v0_base_url() -> str: + return "http://fake-director-v0" + + +@pytest.fixture +def mock_director_v0_service_state( + fake_director_v0_base_url: str, + node_id_legacy: NodeID, + node_not_found: NodeID, + service_status_legacy: NodeGet, +) -> Iterator[None]: + with respx.mock( + base_url=fake_director_v0_base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get(f"/fake-status/{node_id_legacy}").respond( + status.HTTP_200_OK, + text=json.dumps( + jsonable_encoder({"data": service_status_legacy.model_dump()}) + ), + ) + + # service was not found response + mock.get(f"fake-status/{node_not_found}").respond(status.HTTP_404_NOT_FOUND) + + yield None + + +@pytest.fixture +def mock_director_v2_service_state( + node_id_new_style: NodeID, + node_id_legacy: NodeID, + node_not_found: NodeID, + service_status_new_style: DynamicServiceGet, + fake_director_v0_base_url: str, +) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get("/dynamic_services").respond( + status.HTTP_200_OK, + text=json.dumps( + jsonable_encoder(DynamicServiceGet.model_json_schema()["examples"]) + ), + ) + + mock.get(f"/dynamic_services/{node_id_new_style}").respond( + status.HTTP_200_OK, text=service_status_new_style.model_dump_json() + ) + + # emulate redirect response to director-v0 + + # this will provide a reply + mock.get(f"/dynamic_services/{node_id_legacy}").respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={ + "Location": f"{fake_director_v0_base_url}/fake-status/{node_id_legacy}" + }, + ) + + # will result in not being found + mock.get(f"/dynamic_services/{node_not_found}").respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={ + "Location": f"{fake_director_v0_base_url}/fake-status/{node_not_found}" + }, + ) + + yield None + + +@pytest.fixture( + params=[ + False, + pytest.param( + True, + marks=pytest.mark.xfail( + reason="INTERNAL scheduler implementation is missing" + ), + ), + ] +) +def use_internal_scheduler(request: pytest.FixtureRequest) -> bool: + return request.param + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + use_internal_scheduler: bool, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + setenvs_from_dict( + monkeypatch, + { + "DYNAMIC_SCHEDULER_USE_INTERNAL_SCHEDULER": f"{use_internal_scheduler}", + }, + ) + return app_environment + + +@pytest.fixture +async def rpc_client( + disable_postgres_lifespan: None, + app_environment: EnvVarsDict, + mock_director_v2_service_state: None, + mock_director_v0_service_state: None, + app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") + + +async def test_list_tracked_dynamic_services(rpc_client: RabbitMQRPCClient): + results = await services.list_tracked_dynamic_services( + rpc_client, user_id=None, project_id=None + ) + assert len(results) == 2 + assert results == [ + TypeAdapter(DynamicServiceGet).validate_python(x) + for x in DynamicServiceGet.model_json_schema()["examples"] + ] + + +async def test_get_state( + rpc_client: RabbitMQRPCClient, + node_id_new_style: NodeID, + node_id_legacy: NodeID, + node_not_found: NodeID, + service_status_new_style: DynamicServiceGet, + service_status_legacy: NodeGet, +): + # status from director-v2 + + result = await services.get_service_status(rpc_client, node_id=node_id_new_style) + assert result == service_status_new_style + + # status from director-v0 + result = await services.get_service_status(rpc_client, node_id=node_id_legacy) + assert result == service_status_legacy + + # node not tracked any of the two directors + result = await services.get_service_status(rpc_client, node_id=node_not_found) + assert result == NodeGetIdle.from_node_id(node_not_found) + + +@pytest.fixture +def dynamic_service_start() -> DynamicServiceStart: + # one for legacy and one for new style? + return TypeAdapter(DynamicServiceStart).validate_python( + DynamicServiceStart.model_json_schema()["example"] + ) + + +@pytest.fixture +def mock_director_v0_service_run( + fake_director_v0_base_url: str, service_status_legacy: NodeGet +) -> Iterator[None]: + with respx.mock( + base_url=fake_director_v0_base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.post("/fake-service-run").respond( + status.HTTP_201_CREATED, + text=json.dumps( + jsonable_encoder({"data": service_status_legacy.model_dump()}) + ), + ) + + yield None + + +@pytest.fixture +def mock_director_v2_service_run( + is_legacy: bool, + service_status_new_style: DynamicServiceGet, + service_status_legacy: NodeGet, + fake_director_v0_base_url: str, +) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + request = mock.post("/dynamic_services") + if is_legacy: + request.respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={"Location": f"{fake_director_v0_base_url}/fake-service-run"}, + ) + else: + request.respond( + status.HTTP_201_CREATED, + text=service_status_new_style.model_dump_json(), + ) + yield None + + +@pytest.mark.parametrize("is_legacy", [True, False]) +async def test_run_dynamic_service( + mock_director_v0_service_run: None, + mock_director_v2_service_run: None, + rpc_client: RabbitMQRPCClient, + dynamic_service_start: DynamicServiceStart, + is_legacy: bool, +): + result = await services.run_dynamic_service( + rpc_client, dynamic_service_start=dynamic_service_start + ) + + if is_legacy: + assert isinstance(result, NodeGet) + else: + assert isinstance(result, DynamicServiceGet) + + +@pytest.fixture +def simcore_user_agent(faker: Faker) -> str: + return faker.pystr() + + +@pytest.fixture +def node_id(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def node_id_not_found(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def node_id_manual_intervention(faker: Faker) -> NodeID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def user_id() -> UserID: + return 42 + + +@pytest.fixture +def project_id(faker: Faker) -> ProjectID: + return faker.uuid4(cast_to=None) + + +@pytest.fixture +def mock_director_v0_service_stop( + fake_director_v0_base_url: str, + node_id: NodeID, + node_id_not_found: NodeID, + node_id_manual_intervention: NodeID, + save_state: bool, +) -> Iterator[None]: + can_save_str = f"{save_state}".lower() + with respx.mock( + base_url=fake_director_v0_base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.delete(f"/fake-service-stop-ok/{node_id}?can_save={can_save_str}").respond( + status.HTTP_204_NO_CONTENT + ) + + mock.delete( + f"/fake-service-stop-not-found/{node_id_not_found}?can_save={can_save_str}" + ).respond(status.HTTP_404_NOT_FOUND) + + mock.delete( + f"/fake-service-stop-manual/{node_id_manual_intervention}?can_save={can_save_str}" + ).respond(status.HTTP_409_CONFLICT) + + yield None + + +@pytest.fixture +def mock_director_v2_service_stop( + node_id: NodeID, + node_id_not_found: NodeID, + node_id_manual_intervention: NodeID, + is_legacy: bool, + fake_director_v0_base_url: str, + save_state: bool, +) -> Iterator[None]: + can_save_str = f"{save_state}".lower() + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + request_ok = mock.delete(f"/dynamic_services/{node_id}?can_save={can_save_str}") + if is_legacy: + request_ok.respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={ + "Location": f"{fake_director_v0_base_url}/fake-service-stop-ok/{node_id}?can_save={can_save_str}" + }, + ) + else: + request_ok.respond(status.HTTP_204_NO_CONTENT) + + request_not_found = mock.delete( + f"/dynamic_services/{node_id_not_found}?can_save={can_save_str}" + ) + if is_legacy: + request_not_found.respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={ + "Location": f"{fake_director_v0_base_url}/fake-service-stop-not-found/{node_id_not_found}?can_save={can_save_str}" + }, + ) + else: + request_not_found.respond(status.HTTP_404_NOT_FOUND) + + request_manual_intervention = mock.delete( + f"/dynamic_services/{node_id_manual_intervention}?can_save={can_save_str}" + ) + if is_legacy: + request_manual_intervention.respond( + status.HTTP_307_TEMPORARY_REDIRECT, + headers={ + "Location": f"{fake_director_v0_base_url}/fake-service-stop-manual/{node_id_manual_intervention}?can_save={can_save_str}" + }, + ) + else: + request_manual_intervention.respond(status.HTTP_409_CONFLICT) + + yield None + + +@pytest.mark.parametrize("is_legacy", [True, False]) +@pytest.mark.parametrize("save_state", [True, False]) +async def test_stop_dynamic_service( + mock_director_v0_service_stop: None, + mock_director_v2_service_stop: None, + rpc_client: RabbitMQRPCClient, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + node_id_not_found: NodeID, + node_id_manual_intervention: NodeID, + simcore_user_agent: str, + save_state: bool, +): + def _get_rpc_stop(with_node_id: NodeID) -> DynamicServiceStop: + return DynamicServiceStop( + user_id=user_id, + project_id=project_id, + node_id=with_node_id, + simcore_user_agent=simcore_user_agent, + save_state=save_state, + ) + + # service was stopped + result = await services.stop_dynamic_service( + rpc_client, + dynamic_service_stop=_get_rpc_stop(node_id), + timeout_s=5, + ) + assert result is None + + # service was not found + with pytest.raises(ServiceWasNotFoundError): + await services.stop_dynamic_service( + rpc_client, + dynamic_service_stop=_get_rpc_stop(node_id_not_found), + timeout_s=5, + ) + + # service awaits for manual intervention + with pytest.raises(ServiceWaitingForManualInterventionError): + await services.stop_dynamic_service( + rpc_client, + dynamic_service_stop=_get_rpc_stop(node_id_manual_intervention), + timeout_s=5, + ) + + +@pytest.fixture +def mock_raise_generic_error( + mocker: MockerFixture, +) -> None: + module_base = ( + "simcore_service_dynamic_scheduler.services.director_v2._public_client" + ) + mocker.patch( + f"{module_base}.DirectorV2Client.stop_dynamic_service", + autospec=True, + side_effect=Exception("raised as expected"), + ) + + +@pytest.mark.parametrize("save_state", [True, False]) +async def test_stop_dynamic_service_serializes_generic_errors( + mock_raise_generic_error: None, + rpc_client: RabbitMQRPCClient, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + simcore_user_agent: str, + save_state: bool, +): + with pytest.raises( + RPCServerError, match="While running method 'stop_dynamic_service'" + ): + await services.stop_dynamic_service( + rpc_client, + dynamic_service_stop=DynamicServiceStop( + user_id=user_id, + project_id=project_id, + node_id=node_id, + simcore_user_agent=simcore_user_agent, + save_state=save_state, + ), + timeout_s=5, + ) + + +@pytest.fixture +def inactivity_response() -> GetProjectInactivityResponse: + return TypeAdapter(GetProjectInactivityResponse).validate_python( + GetProjectInactivityResponse.model_json_schema()["example"] + ) + + +@pytest.fixture +def mock_director_v2_get_project_inactivity( + project_id: ProjectID, inactivity_response: GetProjectInactivityResponse +) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.get(f"/dynamic_services/projects/{project_id}/inactivity").respond( + status.HTTP_200_OK, text=inactivity_response.model_dump_json() + ) + yield None + + +async def test_get_project_inactivity( + mock_director_v2_get_project_inactivity: None, + rpc_client: RabbitMQRPCClient, + project_id: ProjectID, + inactivity_response: GetProjectInactivityResponse, +): + result = await services.get_project_inactivity( + rpc_client, project_id=project_id, max_inactivity_seconds=5 + ) + assert result == inactivity_response + + +@pytest.fixture +def mock_director_v2_restart_user_services(node_id: NodeID) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.post(f"/dynamic_services/{node_id}:restart").respond( + status.HTTP_204_NO_CONTENT + ) + yield None + + +async def test_restart_user_services( + mock_director_v2_restart_user_services: None, + rpc_client: RabbitMQRPCClient, + node_id: NodeID, +): + await services.restart_user_services(rpc_client, node_id=node_id, timeout_s=5) + + +@pytest.fixture +def mock_director_v2_service_retrieve_inputs(node_id: NodeID) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.post(f"/dynamic_services/{node_id}:retrieve").respond( + status.HTTP_200_OK, + text=TypeAdapter(RetrieveDataOutEnveloped) + .validate_python( + RetrieveDataOutEnveloped.model_json_schema()["examples"][0] + ) + .model_dump_json(), + ) + + yield None + + +async def test_retrieve_inputs( + mock_director_v2_service_retrieve_inputs: None, + rpc_client: RabbitMQRPCClient, + node_id: NodeID, +): + results = await services.retrieve_inputs( + rpc_client, node_id=node_id, port_keys=[], timeout_s=10 + ) + assert ( + results.model_dump(mode="python") + == RetrieveDataOutEnveloped.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def mock_director_v2_update_projects_networks(project_id: ProjectID) -> Iterator[None]: + with respx.mock( + base_url="http://director-v2:8000/v2", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as mock: + mock.patch(f"/dynamic_services/projects/{project_id}/-/networks").respond( + status.HTTP_204_NO_CONTENT + ) + yield None + + +async def test_update_projects_networks( + mock_director_v2_update_projects_networks: None, + rpc_client: RabbitMQRPCClient, + project_id: ProjectID, +): + await services.update_projects_networks(rpc_client, project_id=project_id) diff --git a/services/dynamic-scheduler/tests/unit/conftest.py b/services/dynamic-scheduler/tests/unit/conftest.py new file mode 100644 index 00000000000..dd59a127201 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/conftest.py @@ -0,0 +1,32 @@ +from collections.abc import Callable +from copy import deepcopy + +import pytest +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.projects_nodes_io import NodeID +from pydantic import TypeAdapter + + +@pytest.fixture +def get_dynamic_service_start() -> Callable[[NodeID], DynamicServiceStart]: + def _(node_id: NodeID) -> DynamicServiceStart: + dict_data = deepcopy(DynamicServiceStart.model_json_schema()["example"]) + dict_data["service_uuid"] = f"{node_id}" + return TypeAdapter(DynamicServiceStart).validate_python(dict_data) + + return _ + + +@pytest.fixture +def get_dynamic_service_stop() -> Callable[[NodeID], DynamicServiceStop]: + def _(node_id: NodeID) -> DynamicServiceStop: + dict_data = deepcopy( + DynamicServiceStop.model_config["json_schema_extra"]["example"] + ) + dict_data["node_id"] = f"{node_id}" + return TypeAdapter(DynamicServiceStop).validate_python(dict_data) + + return _ diff --git a/services/dynamic-scheduler/tests/unit/service_tracker/test__api.py b/services/dynamic-scheduler/tests/unit/service_tracker/test__api.py new file mode 100644 index 00000000000..5ce6c8c3d1c --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/service_tracker/test__api.py @@ -0,0 +1,344 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from datetime import timedelta +from typing import Any, Final, NamedTuple +from uuid import uuid4 + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects_nodes_io import NodeID +from models_library.services_enums import ServiceState +from pydantic import NonNegativeInt, TypeAdapter +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.deferred_tasks import TaskUID +from servicelib.utils import limited_gather +from settings_library.redis import RedisSettings +from simcore_service_dynamic_scheduler.services.service_tracker import ( + get_all_tracked_services, + get_tracked_service, + remove_tracked_service, + set_if_status_changed_for_service, + set_request_as_running, + set_request_as_stopped, + set_service_status_task_uid, +) +from simcore_service_dynamic_scheduler.services.service_tracker._api import ( + _LOW_RATE_POLL_INTERVAL, + NORMAL_RATE_POLL_INTERVAL, + _get_current_scheduler_service_state, + _get_poll_interval, +) +from simcore_service_dynamic_scheduler.services.service_tracker._models import ( + SchedulerServiceState, + UserRequestedState, +) + +pytest_simcore_core_services_selection = [ + "redis", +] +pytest_simcore_ops_services_selection = [ + # "redis-commander", +] + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_rabbitmq_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, + redis_service: RedisSettings, + remove_redis_data: None, +) -> EnvVarsDict: + return app_environment + + +async def test_services_tracer_set_as_running_set_as_stopped( + app: FastAPI, + node_id: NodeID, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + get_dynamic_service_stop: Callable[[NodeID], DynamicServiceStop], +): + async def _remove_service() -> None: + await remove_tracked_service(app, node_id) + assert await get_tracked_service(app, node_id) is None + assert await get_all_tracked_services(app) == {} + + async def _set_as_running() -> None: + await set_request_as_running(app, get_dynamic_service_start(node_id)) + tracked_model = await get_tracked_service(app, node_id) + assert tracked_model + assert tracked_model.requested_state == UserRequestedState.RUNNING + + async def _set_as_stopped() -> None: + await set_request_as_stopped(app, get_dynamic_service_stop(node_id)) + tracked_model = await get_tracked_service(app, node_id) + assert tracked_model + assert tracked_model.requested_state == UserRequestedState.STOPPED + + # request as running then as stopped + await _remove_service() + await _set_as_running() + await _set_as_stopped() + + # request as stopped then as running + await _remove_service() + await _set_as_stopped() + await _set_as_running() + + +@pytest.mark.parametrize("item_count", [100]) +async def test_services_tracer_workflow( + app: FastAPI, + node_id: NodeID, + item_count: NonNegativeInt, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + get_dynamic_service_stop: Callable[[NodeID], DynamicServiceStop], +): + # ensure more than one service can be tracked + await limited_gather( + *[ + set_request_as_stopped(app, get_dynamic_service_stop(uuid4())) + for _ in range(item_count) + ], + limit=100, + ) + assert len(await get_all_tracked_services(app)) == item_count + + +@pytest.mark.parametrize( + "status", + [ + *[ + NodeGet.model_validate(o) + for o in NodeGet.model_config["json_schema_extra"]["examples"] + ], + *[ + DynamicServiceGet.model_validate(o) + for o in DynamicServiceGet.model_config["json_schema_extra"]["examples"] + ], + NodeGetIdle.model_validate( + NodeGetIdle.model_config["json_schema_extra"]["example"] + ), + ], +) +async def test_set_if_status_changed( + app: FastAPI, + node_id: NodeID, + status: NodeGet | DynamicServiceGet | NodeGetIdle, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], +): + await set_request_as_running(app, get_dynamic_service_start(node_id)) + + assert await set_if_status_changed_for_service(app, node_id, status) is True + + assert await set_if_status_changed_for_service(app, node_id, status) is False + + model = await get_tracked_service(app, node_id) + assert model + + assert model.service_status == status.model_dump_json() + + +async def test_set_service_status_task_uid( + app: FastAPI, + node_id: NodeID, + faker: Faker, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], +): + await set_request_as_running(app, get_dynamic_service_start(node_id)) + + task_uid = TaskUID(faker.uuid4()) + await set_service_status_task_uid(app, node_id, task_uid) + + model = await get_tracked_service(app, node_id) + assert model + + assert model.service_status_task_uid == task_uid + + +@pytest.mark.parametrize( + "status, expected_poll_interval", + [ + ( + TypeAdapter(NodeGet).validate_python( + NodeGet.model_config["json_schema_extra"]["examples"][1] + ), + _LOW_RATE_POLL_INTERVAL, + ), + *[ + ( + TypeAdapter(DynamicServiceGet).validate_python(o), + NORMAL_RATE_POLL_INTERVAL, + ) + for o in DynamicServiceGet.model_config["json_schema_extra"]["examples"] + ], + ( + TypeAdapter(NodeGetIdle).validate_python( + NodeGetIdle.model_config["json_schema_extra"]["example"] + ), + _LOW_RATE_POLL_INTERVAL, + ), + ], +) +def test__get_poll_interval( + status: NodeGet | DynamicServiceGet | NodeGetIdle, expected_poll_interval: timedelta +): + assert _get_poll_interval(status) == expected_poll_interval + + +def _get_node_get_from(service_state: ServiceState) -> NodeGet: + dict_data = NodeGet.model_config["json_schema_extra"]["examples"][1] + assert "service_state" in dict_data + dict_data["service_state"] = service_state + return TypeAdapter(NodeGet).validate_python(dict_data) + + +def _get_dynamic_service_get_from( + service_state: ServiceState, +) -> DynamicServiceGet: + dict_data = DynamicServiceGet.model_config["json_schema_extra"]["examples"][1] + assert "service_state" in dict_data + dict_data["service_state"] = service_state + return TypeAdapter(DynamicServiceGet).validate_python(dict_data) + + +def _get_node_get_idle() -> NodeGetIdle: + return TypeAdapter(NodeGetIdle).validate_python( + NodeGetIdle.model_config["json_schema_extra"]["example"] + ) + + +def __get_flat_list(nested_list: list[list[Any]]) -> list[Any]: + return [item for sublist in nested_list for item in sublist] + + +class ServiceStatusToSchedulerState(NamedTuple): + requested: UserRequestedState + service_status: NodeGet | DynamicServiceGet | NodeGetIdle + expected: SchedulerServiceState + + +_EXPECTED_TEST_CASES: list[list[ServiceStatusToSchedulerState]] = [ + [ + # UserRequestedState.RUNNING + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.PENDING), + SchedulerServiceState.STARTING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.PULLING), + SchedulerServiceState.STARTING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.STARTING), + SchedulerServiceState.STARTING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.RUNNING), + SchedulerServiceState.RUNNING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.COMPLETE), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.FAILED), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + status_generator(ServiceState.STOPPING), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.RUNNING, + _get_node_get_idle(), + SchedulerServiceState.IDLE, + ), + # UserRequestedState.STOPPED + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.PENDING), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.PULLING), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.STARTING), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.RUNNING), + SchedulerServiceState.STOPPING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.COMPLETE), + SchedulerServiceState.STOPPING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.FAILED), + SchedulerServiceState.UNEXPECTED_OUTCOME, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + status_generator(ServiceState.STOPPING), + SchedulerServiceState.STOPPING, + ), + ServiceStatusToSchedulerState( + UserRequestedState.STOPPED, + _get_node_get_idle(), + SchedulerServiceState.IDLE, + ), + ] + for status_generator in ( + _get_node_get_from, + _get_dynamic_service_get_from, + ) +] +_FLAT_EXPECTED_TEST_CASES: list[ServiceStatusToSchedulerState] = __get_flat_list( + _EXPECTED_TEST_CASES +) +# ensure enum changes do not break above rules +_NODE_STATUS_FORMATS_COUNT: Final[int] = 2 +assert ( + len(_FLAT_EXPECTED_TEST_CASES) + == len(ServiceState) * len(UserRequestedState) * _NODE_STATUS_FORMATS_COUNT +) + + +@pytest.mark.parametrize("service_status_to_scheduler_state", _FLAT_EXPECTED_TEST_CASES) +def test__get_current_scheduler_service_state( + service_status_to_scheduler_state: ServiceStatusToSchedulerState, +): + assert ( + _get_current_scheduler_service_state( + service_status_to_scheduler_state.requested, + service_status_to_scheduler_state.service_status, + ) + == service_status_to_scheduler_state.expected + ) diff --git a/services/dynamic-scheduler/tests/unit/service_tracker/test__models.py b/services/dynamic-scheduler/tests/unit/service_tracker/test__models.py new file mode 100644 index 00000000000..92d3b701522 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/service_tracker/test__models.py @@ -0,0 +1,119 @@ +# pylint: disable=redefined-outer-name + +from copy import deepcopy +from datetime import timedelta +from pathlib import Path +from uuid import uuid4 + +import arrow +import pytest +from faker import Faker +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, +) +from models_library.projects import ProjectID +from pydantic import TypeAdapter +from servicelib.deferred_tasks import TaskUID +from simcore_service_dynamic_scheduler.services.service_tracker._models import ( + SchedulerServiceState, + TrackedServiceModel, + UserRequestedState, +) + + +@pytest.mark.parametrize("requested_state", UserRequestedState) +@pytest.mark.parametrize("current_state", SchedulerServiceState) +@pytest.mark.parametrize("check_status_after", [1, arrow.utcnow().timestamp()]) +@pytest.mark.parametrize("service_status_task_uid", [None, TaskUID("ok")]) +def test_serialization( + faker: Faker, + requested_state: UserRequestedState, + current_state: SchedulerServiceState, + check_status_after: float, + service_status_task_uid: TaskUID | None, +): + tracked_model = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=requested_state, + current_state=current_state, + service_status=faker.pystr(), + check_status_after=check_status_after, + service_status_task_uid=service_status_task_uid, + ) + + as_bytes = tracked_model.to_bytes() + assert as_bytes + assert TrackedServiceModel.from_bytes(as_bytes) == tracked_model + + +@pytest.mark.parametrize( + "dynamic_service_start", + [ + None, + TypeAdapter(DynamicServiceStart).validate_python( + DynamicServiceStart.model_json_schema()["example"] + ), + ], +) +@pytest.mark.parametrize("project_id", [None, uuid4()]) +async def test_set_check_status_after_to( + dynamic_service_start: DynamicServiceStart | None, project_id: ProjectID | None +): + model = TrackedServiceModel( + dynamic_service_start=dynamic_service_start, + user_id=None, + project_id=project_id, + requested_state=UserRequestedState.RUNNING, + ) + assert model.check_status_after < arrow.utcnow().timestamp() + + delay = timedelta(seconds=4) + + before = (arrow.utcnow() + delay).timestamp() + model.set_check_status_after_to(delay) + after = (arrow.utcnow() + delay).timestamp() + + assert model.check_status_after + assert before < model.check_status_after < after + + +async def test_legacy_format_compatibility(project_slug_dir: Path): + legacy_format_path = ( + project_slug_dir / "tests" / "assets" / "legacy_tracked_service_model.bin" + ) + assert legacy_format_path.exists() + + model_from_disk = TrackedServiceModel.from_bytes(legacy_format_path.read_bytes()) + + model = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=UserRequestedState.RUNNING, + # assume same dates are coming in + check_status_after=model_from_disk.check_status_after, + last_state_change=model_from_disk.last_state_change, + ) + + assert model_from_disk == model + + +def test_current_state_changes_updates_last_state_change(): + model = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=UserRequestedState.RUNNING, + ) + + last_changed = deepcopy(model.last_state_change) + model.current_state = SchedulerServiceState.IDLE + assert last_changed != model.last_state_change + + last_changed_2 = deepcopy(model.last_state_change) + model.current_state = SchedulerServiceState.IDLE + assert last_changed_2 == model.last_state_change + + assert last_changed != last_changed_2 diff --git a/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py b/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py new file mode 100644 index 00000000000..818a724c77d --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/service_tracker/test__tracker.py @@ -0,0 +1,105 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from uuid import uuid4 + +import pytest +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from pydantic import NonNegativeInt +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.utils import logged_gather +from settings_library.redis import RedisSettings +from simcore_service_dynamic_scheduler.services.service_tracker._models import ( + TrackedServiceModel, + UserRequestedState, +) +from simcore_service_dynamic_scheduler.services.service_tracker._setup import ( + get_tracker, +) +from simcore_service_dynamic_scheduler.services.service_tracker._tracker import Tracker + +pytest_simcore_core_services_selection = [ + "redis", +] + + +@pytest.fixture +def disable_monitor_task(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_scheduler.services.status_monitor._monitor.Monitor._worker_check_services_require_status_update", + autospec=True, + ) + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_monitor_task: None, + disable_rabbitmq_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + app_environment: EnvVarsDict, + redis_service: RedisSettings, + remove_redis_data: None, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +def tracker(app: FastAPI) -> Tracker: + return get_tracker(app) + + +async def test_tracker_workflow(tracker: Tracker): + node_id: NodeID = uuid4() + + # ensure does not already exist + result = await tracker.load(node_id) + assert result is None + + # node creation + model = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=UserRequestedState.RUNNING, + ) + await tracker.save(node_id, model) + + # check if exists + result = await tracker.load(node_id) + assert result == model + + # remove and check is missing + await tracker.delete(node_id) + result = await tracker.load(node_id) + assert result is None + + +@pytest.mark.parametrize("item_count", [100]) +async def test_tracker_listing(tracker: Tracker, item_count: NonNegativeInt) -> None: + assert await tracker.all() == {} + + model_to_insert = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=UserRequestedState.RUNNING, + ) + + data_to_insert = {uuid4(): model_to_insert for _ in range(item_count)} + + await logged_gather( + *[tracker.save(k, v) for k, v in data_to_insert.items()], max_concurrency=100 + ) + + response = await tracker.all() + for key in response: + assert isinstance(key, NodeID) + assert response == data_to_insert + + +async def test_remove_missing_key_does_not_raise_error(tracker: Tracker): + await tracker.delete(uuid4()) diff --git a/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py b/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py new file mode 100644 index 00000000000..4b59a9683ab --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/status_monitor/test_services_status_monitor__monitor.py @@ -0,0 +1,500 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-positional-arguments +# pylint:disable=unused-argument + +import itertools +import json +import re +from collections.abc import AsyncIterable, Callable +from copy import deepcopy +from datetime import timedelta +from typing import Any +from unittest.mock import AsyncMock +from uuid import uuid4 + +import pytest +import respx +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from httpx import Request, Response +from models_library.api_schemas_directorv2.dynamic_services import DynamicServiceGet +from models_library.api_schemas_dynamic_scheduler.dynamic_services import ( + DynamicServiceStart, + DynamicServiceStop, +) +from models_library.api_schemas_webserver.projects_nodes import NodeGet, NodeGetIdle +from models_library.projects_nodes_io import NodeID +from pydantic import NonNegativeInt, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from simcore_service_dynamic_scheduler.services.service_tracker import ( + get_all_tracked_services, + set_request_as_running, + set_request_as_stopped, +) +from simcore_service_dynamic_scheduler.services.service_tracker._models import ( + SchedulerServiceState, + TrackedServiceModel, + UserRequestedState, +) +from simcore_service_dynamic_scheduler.services.status_monitor import _monitor +from simcore_service_dynamic_scheduler.services.status_monitor._deferred_get_status import ( + DeferredGetStatus, +) +from simcore_service_dynamic_scheduler.services.status_monitor._monitor import ( + Monitor, + _can_be_removed, +) +from simcore_service_dynamic_scheduler.services.status_monitor._setup import get_monitor +from tenacity import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", + "redis", +] + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + app_environment: EnvVarsDict, + rabbit_service: RabbitSettings, + redis_service: RedisSettings, + remove_redis_data: None, +) -> EnvVarsDict: + return app_environment + + +_DEFAULT_NODE_ID: NodeID = uuid4() + + +def _add_to_dict(dict_data: dict, entries: list[tuple[str, Any]]) -> None: + for key, data in entries: + assert key in dict_data + dict_data[key] = data + + +def _get_node_get_with(state: str, node_id: NodeID = _DEFAULT_NODE_ID) -> NodeGet: + dict_data = deepcopy(NodeGet.model_config["json_schema_extra"]["examples"][1]) + _add_to_dict( + dict_data, + [ + ("service_state", state), + ("service_uuid", f"{node_id}"), + ], + ) + return TypeAdapter(NodeGet).validate_python(dict_data) + + +def _get_dynamic_service_get_legacy_with( + state: str, node_id: NodeID = _DEFAULT_NODE_ID +) -> DynamicServiceGet: + dict_data = deepcopy( + DynamicServiceGet.model_config["json_schema_extra"]["examples"][0] + ) + _add_to_dict( + dict_data, + [ + ("service_state", state), + ("service_uuid", f"{node_id}"), + ], + ) + return TypeAdapter(DynamicServiceGet).validate_python(dict_data) + + +def _get_dynamic_service_get_new_style_with( + state: str, node_id: NodeID = _DEFAULT_NODE_ID +) -> DynamicServiceGet: + dict_data = deepcopy( + DynamicServiceGet.model_config["json_schema_extra"]["examples"][1] + ) + _add_to_dict( + dict_data, + [ + ("service_state", state), + ("service_uuid", f"{node_id}"), + ], + ) + return TypeAdapter(DynamicServiceGet).validate_python(dict_data) + + +def _get_node_get_idle(node_id: NodeID = _DEFAULT_NODE_ID) -> NodeGetIdle: + dict_data = NodeGetIdle.model_config["json_schema_extra"]["example"] + _add_to_dict( + dict_data, + [ + ("service_uuid", f"{node_id}"), + ], + ) + return TypeAdapter(NodeGetIdle).validate_python(dict_data) + + +class _ResponseTimeline: + def __init__( + self, timeline: list[NodeGet | DynamicServiceGet | NodeGetIdle] + ) -> None: + self._timeline = timeline + + self._client_access_history: dict[NodeID, NonNegativeInt] = {} + + @property + def entries(self) -> list[NodeGet | DynamicServiceGet | NodeGetIdle]: + return self._timeline + + def __len__(self) -> int: + return len(self._timeline) + + def get_status(self, node_id: NodeID) -> NodeGet | DynamicServiceGet | NodeGetIdle: + if node_id not in self._client_access_history: + self._client_access_history[node_id] = 0 + + # always return node idle when timeline finished playing + if self._client_access_history[node_id] >= len(self._timeline): + return _get_node_get_idle() + + status = self._timeline[self._client_access_history[node_id]] + self._client_access_history[node_id] += 1 + return status + + +async def _assert_call_to( + deferred_status_spies: dict[str, AsyncMock], *, method: str, count: NonNegativeInt +) -> None: + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(1), + wait=wait_fixed(0.01), + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + call_count = deferred_status_spies[method].call_count + assert ( + call_count == count + ), f"Received calls {call_count} != {count} (expected) to '{method}'" + + +async def _assert_result( + deferred_status_spies: dict[str, AsyncMock], + *, + timeline: list[NodeGet | DynamicServiceGet | NodeGetIdle], +) -> None: + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(1), + wait=wait_fixed(0.01), + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + + assert deferred_status_spies["on_result"].call_count == len(timeline) + assert [ + x.args[0] for x in deferred_status_spies["on_result"].call_args_list + ] == timeline + + +async def _assert_notification_count( + mock: AsyncMock, expected_count: NonNegativeInt +) -> None: + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(1), + wait=wait_fixed(0.01), + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert mock.call_count == expected_count + + +@pytest.fixture +async def mock_director_v2_status( + app: FastAPI, response_timeline: _ResponseTimeline +) -> AsyncIterable[None]: + def _side_effect_node_status_response(request: Request) -> Response: + node_id = NodeID(f"{request.url}".split("/")[-1]) + + service_status = response_timeline.get_status(node_id) + + if isinstance(service_status, NodeGet): + return Response( + status.HTTP_200_OK, + text=json.dumps( + jsonable_encoder({"data": service_status.model_dump()}) + ), + ) + if isinstance(service_status, DynamicServiceGet): + return Response(status.HTTP_200_OK, text=service_status.model_dump_json()) + if isinstance(service_status, NodeGetIdle): + return Response(status.HTTP_404_NOT_FOUND) + + raise TypeError + + with respx.mock( + base_url=app.state.settings.DYNAMIC_SCHEDULER_DIRECTOR_V2_SETTINGS.api_base_url, + assert_all_called=False, + assert_all_mocked=True, + ) as mock: + mock.get(re.compile(r"/dynamic_services/([\w-]+)")).mock( + side_effect=_side_effect_node_status_response + ) + yield + + +@pytest.fixture +def monitor(mock_director_v2_status: None, app: FastAPI) -> Monitor: + return get_monitor(app) + + +@pytest.fixture +def deferred_status_spies(mocker: MockerFixture) -> dict[str, AsyncMock]: + results: dict[str, AsyncMock] = {} + for method_name in ( + "start", + "on_result", + "on_created", + "run", + "on_finished_with_error", + ): + mock_method = mocker.AsyncMock(wraps=getattr(DeferredGetStatus, method_name)) + mocker.patch.object(DeferredGetStatus, method_name, mock_method) + results[method_name] = mock_method + + return results + + +@pytest.fixture +def remove_tracked_spy(mocker: MockerFixture) -> AsyncMock: + mock_method = mocker.AsyncMock( + wraps=_monitor.service_tracker.remove_tracked_service + ) + return mocker.patch.object( + _monitor.service_tracker, + _monitor.service_tracker.remove_tracked_service.__name__, + mock_method, + ) + + +@pytest.fixture +def node_id() -> NodeID: + return _DEFAULT_NODE_ID + + +@pytest.fixture +def mocked_notify_frontend(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_dynamic_scheduler.services.status_monitor._deferred_get_status.notify_service_status_change" + ) + + +@pytest.fixture +def disable_status_monitor_background_task(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_scheduler.services.status_monitor._monitor.Monitor.setup" + ) + + +@pytest.mark.parametrize( + "user_requests_running, response_timeline, expected_notification_count, remove_tracked_count", + [ + pytest.param( + True, + _ResponseTimeline([_get_node_get_with("running")]), + 1, + 0, + id="requested_running_state_changes_1_no_task_removal", + ), + pytest.param( + True, + _ResponseTimeline( + [_get_dynamic_service_get_legacy_with("running") for _ in range(10)] + ), + 1, + 0, + id="requested_running_state_changes_1_for_multiple_same_state_no_task_removal", + ), + pytest.param( + True, + _ResponseTimeline([_get_node_get_idle()]), + 1, + 0, + id="requested_running_state_idle_no_removal", + ), + pytest.param( + False, + _ResponseTimeline([_get_node_get_idle()]), + 1, + 1, + id="requested_stopped_state_idle_is_removed", + ), + pytest.param( + True, + _ResponseTimeline( + [ + *[_get_node_get_idle() for _ in range(10)], + _get_dynamic_service_get_new_style_with("pending"), + _get_dynamic_service_get_new_style_with("pulling"), + *[ + _get_dynamic_service_get_new_style_with("starting") + for _ in range(10) + ], + _get_dynamic_service_get_new_style_with("running"), + _get_dynamic_service_get_new_style_with("stopping"), + _get_dynamic_service_get_new_style_with("complete"), + _get_node_get_idle(), + ] + ), + 8, + 0, + id="requested_running_state_changes_8_no_removal", + ), + pytest.param( + False, + _ResponseTimeline( + [ + _get_dynamic_service_get_new_style_with("pending"), + _get_dynamic_service_get_new_style_with("pulling"), + *[ + _get_dynamic_service_get_new_style_with("starting") + for _ in range(10) + ], + _get_dynamic_service_get_new_style_with("running"), + _get_dynamic_service_get_new_style_with("stopping"), + _get_dynamic_service_get_new_style_with("complete"), + _get_node_get_idle(), + ] + ), + 7, + 1, + id="requested_stopped_state_changes_7_is_removed", + ), + ], +) +async def test_expected_calls_to_notify_frontend( # pylint:disable=too-many-arguments + disable_status_monitor_background_task: None, + mocked_notify_frontend: AsyncMock, + deferred_status_spies: dict[str, AsyncMock], + remove_tracked_spy: AsyncMock, + app: FastAPI, + monitor: Monitor, + node_id: NodeID, + user_requests_running: bool, + response_timeline: _ResponseTimeline, + expected_notification_count: NonNegativeInt, + remove_tracked_count: NonNegativeInt, + get_dynamic_service_start: Callable[[NodeID], DynamicServiceStart], + get_dynamic_service_stop: Callable[[NodeID], DynamicServiceStop], +): + assert await get_all_tracked_services(app) == {} + + if user_requests_running: + await set_request_as_running(app, get_dynamic_service_start(node_id)) + else: + await set_request_as_stopped(app, get_dynamic_service_stop(node_id)) + + entries_in_timeline = len(response_timeline) + + for i in range(entries_in_timeline): + async for attempt in AsyncRetrying( + reraise=True, stop=stop_after_delay(10), wait=wait_fixed(0.1) + ): + with attempt: + # pylint:disable=protected-access + await monitor._worker_check_services_require_status_update() # noqa: SLF001 + for method in ("start", "on_created", "on_result"): + await _assert_call_to( + deferred_status_spies, method=method, count=i + 1 + ) + + await _assert_call_to( + deferred_status_spies, method="run", count=entries_in_timeline + ) + await _assert_call_to( + deferred_status_spies, method="on_finished_with_error", count=0 + ) + + await _assert_result(deferred_status_spies, timeline=response_timeline.entries) + + await _assert_notification_count( + mocked_notify_frontend, expected_notification_count + ) + + async for attempt in AsyncRetrying( + reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.1) + ): + with attempt: + # pylint:disable=protected-access + await monitor._worker_check_services_require_status_update() # noqa: SLF001 + assert remove_tracked_spy.call_count == remove_tracked_count + + +@pytest.fixture +def mock_tracker_remove_after_idle_for(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_scheduler.services.status_monitor._monitor._REMOVE_AFTER_IDLE_FOR", + timedelta(seconds=0.1), + ) + + +@pytest.mark.parametrize( + "requested_state, current_state, immediate_can_be_removed, can_be_removed", + [ + pytest.param( + UserRequestedState.RUNNING, + SchedulerServiceState.IDLE, + False, + True, + id="can_remove_after_an_interval", + ), + pytest.param( + UserRequestedState.STOPPED, + SchedulerServiceState.IDLE, + True, + True, + id="can_remove_no_interval", + ), + *[ + pytest.param( + requested_state, + service_state, + False, + False, + id=f"not_removed_{requested_state=}_{service_state=}", + ) + for requested_state, service_state in itertools.product( + set(UserRequestedState), + {x for x in SchedulerServiceState if x != SchedulerServiceState.IDLE}, + ) + ], + ], +) +async def test__can_be_removed( + mock_tracker_remove_after_idle_for: None, + requested_state: UserRequestedState, + current_state: SchedulerServiceState, + immediate_can_be_removed: bool, + can_be_removed: bool, +): + model = TrackedServiceModel( + dynamic_service_start=None, + user_id=None, + project_id=None, + requested_state=requested_state, + ) + + # This also triggers the setter and updates the last state change timer + model.current_state = current_state + + assert _can_be_removed(model) is immediate_can_be_removed + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(2), + reraise=True, + retry=retry_if_exception_type(AssertionError), + ): + with attempt: + assert _can_be_removed(model) is can_be_removed diff --git a/services/dynamic-scheduler/tests/unit/test__model_examples.py b/services/dynamic-scheduler/tests/unit/test__model_examples.py new file mode 100644 index 00000000000..98b04cc2996 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test__model_examples.py @@ -0,0 +1,21 @@ +from typing import Any + +import pytest +import simcore_service_dynamic_scheduler.models +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(simcore_service_dynamic_scheduler.models), +) +def test_api_server_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/services/dynamic-scheduler/tests/unit/test_cli.py b/services/dynamic-scheduler/tests/unit/test_cli.py new file mode 100644 index 00000000000..89163a7f5b6 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_cli.py @@ -0,0 +1,69 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import os +import traceback + +import pytest +from click.testing import Result +from pytest_simcore.helpers.monkeypatch_envs import load_dotenv, setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dynamic_scheduler._meta import API_VERSION +from simcore_service_dynamic_scheduler.cli import main as cli_main +from simcore_service_dynamic_scheduler.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def _format_cli_error(result: Result) -> str: + assert result.exception + tb_message = "\n".join(traceback.format_tb(result.exception.__traceback__)) + return f"Below exception was raised by the cli:\n{tb_message}" + + +def test_cli_help_and_version(cli_runner: CliRunner): + # simcore-service-dynamic-scheduler --help + result = cli_runner.invoke(cli_main, "--help") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + result = cli_runner.invoke(cli_main, "--version") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + assert result.stdout.strip() == API_VERSION + + +def test_echo_dotenv( + app_environment: EnvVarsDict, cli_runner: CliRunner, monkeypatch: pytest.MonkeyPatch +): + # simcore-service-dynamic-scheduler echo-dotenv + result = cli_runner.invoke(cli_main, "echo-dotenv") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + environs = load_dotenv(result.stdout) + + with monkeypatch.context() as patch: + setenvs_from_dict(patch, environs) + ApplicationSettings.create_from_envs() + + +def _get_default_environs(cli_runner: CliRunner) -> EnvVarsDict: + result = cli_runner.invoke(cli_main, "echo-dotenv") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + return load_dotenv(result.stdout) + + +def test_list_settings( + cli_runner: CliRunner, app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +): + with monkeypatch.context() as patch: + setenvs_from_dict(patch, _get_default_environs(cli_runner)) + + # simcore-service-dynamic-scheduler settings --show-secrets --as-json + result = cli_runner.invoke( + cli_main, ["settings", "--show-secrets", "--as-json"] + ) + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + print(result.output) + settings = ApplicationSettings(result.output) + assert ( + settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() + ) diff --git a/services/dynamic-scheduler/tests/unit/test_repository_postgres_networks.py b/services/dynamic-scheduler/tests/unit/test_repository_postgres_networks.py new file mode 100644 index 00000000000..e0374fb31dc --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_repository_postgres_networks.py @@ -0,0 +1,149 @@ +# pylint:disable=contextmanager-generator-missing-cleanup +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import AsyncIterator +from typing import Any + +import pytest +import sqlalchemy as sa +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_networks import NetworksWithAliases +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import ( + PostgresTestConfig, + insert_and_get_row_lifespan, +) +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.models.users import users +from simcore_service_dynamic_scheduler.repository.events import ( + get_project_networks_repo, +) +from simcore_service_dynamic_scheduler.repository.project_networks import ( + ProjectNetworkNotFoundError, + ProjectNetworksRepo, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + postgres_db: sa.engine.Engine, + postgres_host_config: PostgresTestConfig, + disable_rabbitmq_lifespan: None, + disable_redis_lifespan: None, + disable_service_tracker_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + setenvs_from_dict( + monkeypatch, + { + "POSTGRES_CLIENT_NAME": "test_postgres_client", + }, + ) + return app_environment + + +@pytest.fixture +def engine(app: FastAPI) -> AsyncEngine: + assert isinstance(app.state.engine, AsyncEngine) + return app.state.engine + + +@pytest.fixture +def user_id() -> UserID: + return 1 + + +@pytest.fixture +async def user_in_db( + engine: AsyncEngine, + user: dict[str, Any], + user_id: UserID, +) -> AsyncIterator[dict[str, Any]]: + """ + injects a user in db + """ + assert user_id == user["id"] + async with insert_and_get_row_lifespan( + engine, + table=users, + values=user, + pk_col=users.c.id, + pk_value=user["id"], + ) as row: + yield row + + +@pytest.fixture +async def project_in_db( + engine: AsyncEngine, + project_id: ProjectID, + project_data: dict[str, Any], + user_in_db: UserID, +) -> AsyncIterator[dict[str, Any]]: + assert f"{project_id}" == project_data["uuid"] + async with insert_and_get_row_lifespan( + engine, + table=projects, + values=project_data, + pk_col=projects.c.uuid, + pk_value=project_data["uuid"], + ) as row: + yield row + + +@pytest.fixture() +def project_networks_repo(app: FastAPI) -> ProjectNetworksRepo: + return get_project_networks_repo(app) + + +@pytest.fixture +def networks_with_aliases() -> NetworksWithAliases: + return TypeAdapter(NetworksWithAliases).validate_python( + NetworksWithAliases.model_json_schema()["examples"][0] + ) + + +async def test_no_project_networks_for_project( + project_networks_repo: ProjectNetworksRepo, + project_in_db: dict[str, Any], + project_id: ProjectID, +): + with pytest.raises(ProjectNetworkNotFoundError): + await project_networks_repo.get_projects_networks(project_id=project_id) + + +async def test_upsert_projects_networks( + project_networks_repo: ProjectNetworksRepo, + project_in_db: dict[str, Any], + project_id: ProjectID, + networks_with_aliases: NetworksWithAliases, +): + + # allows ot test the upsert capabilities + for _ in range(2): + await project_networks_repo.upsert_projects_networks( + project_id=project_id, networks_with_aliases=networks_with_aliases + ) + + project_networks = await project_networks_repo.get_projects_networks( + project_id=project_id + ) + assert project_networks.project_uuid == project_id + assert project_networks.networks_with_aliases == networks_with_aliases diff --git a/services/dynamic-scheduler/tests/unit/test_services_catalog.py b/services/dynamic-scheduler/tests/unit/test_services_catalog.py new file mode 100644 index 00000000000..c54222fdab4 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_services_catalog.py @@ -0,0 +1,119 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +import urllib.parse +from collections.abc import Iterator + +import pytest +import respx +from fastapi import FastAPI +from models_library.api_schemas_catalog.services_specifications import ( + ServiceSpecifications, +) +from models_library.service_settings_labels import SimcoreServiceLabels +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dynamic_scheduler.services.catalog import CatalogPublicClient + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_redis_lifespan: None, + disable_rabbitmq_lifespan: None, + disable_service_tracker_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +def simcore_service_labels() -> SimcoreServiceLabels: + return TypeAdapter(SimcoreServiceLabels).validate_python( + SimcoreServiceLabels.model_json_schema()["examples"][1] + ) + + +@pytest.fixture +def service_specifications() -> ServiceSpecifications: + return TypeAdapter(ServiceSpecifications).validate_python({}) + + +@pytest.fixture +def service_version() -> ServiceVersion: + return "1.0.0" + + +@pytest.fixture +def service_key() -> ServiceKey: + return "simcore/services/dynamic/test" + + +@pytest.fixture +def mock_catalog( + app: FastAPI, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + simcore_service_labels: SimcoreServiceLabels, + service_specifications: ServiceSpecifications, +) -> Iterator[None]: + with respx.mock( + base_url=app.state.settings.DYNAMIC_SCHEDULER_CATALOG_SETTINGS.api_base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + respx_mock.get( + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}/labels", + name="service labels", + ).respond( + status_code=200, + json=simcore_service_labels.model_dump(mode="json"), + ) + + respx_mock.get( + f"/services/{urllib.parse.quote_plus(service_key)}/{service_version}/specifications?user_id={user_id}", + name="service specifications", + ).respond( + status_code=200, + json=service_specifications.model_dump(mode="json"), + ) + + yield + + +async def test_get_services_labels( + mock_catalog: None, + app: FastAPI, + service_key: ServiceKey, + service_version: ServiceVersion, + simcore_service_labels: SimcoreServiceLabels, +): + client = CatalogPublicClient.get_from_app_state(app) + result = await client.get_services_labels(service_key, service_version) + assert result.model_dump(mode="json") == simcore_service_labels.model_dump( + mode="json" + ) + + +async def test_get_services_specifications( + mock_catalog: None, + app: FastAPI, + user_id: UserID, + service_key: ServiceKey, + service_version: ServiceVersion, + service_specifications: ServiceSpecifications, +): + client = CatalogPublicClient.get_from_app_state(app) + result = await client.get_services_specifications( + user_id, service_key, service_version + ) + assert result.model_dump(mode="json") == service_specifications.model_dump( + mode="json" + ) diff --git a/services/dynamic-scheduler/tests/unit/test_services_director_v0.py b/services/dynamic-scheduler/tests/unit/test_services_director_v0.py new file mode 100644 index 00000000000..a24f2b7a5ed --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_services_director_v0.py @@ -0,0 +1,89 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +from collections.abc import Iterator + +import pytest +import respx +from fastapi import FastAPI +from models_library.api_schemas_directorv2.dynamic_services_service import ( + RunningDynamicServiceDetails, +) +from models_library.projects_nodes_io import NodeID +from pydantic import TypeAdapter +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_dynamic_scheduler.services.director_v0 import ( + DirectorV0PublicClient, +) + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_redis_lifespan: None, + disable_rabbitmq_lifespan: None, + disable_service_tracker_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, +) -> EnvVarsDict: + return app_environment + + +@pytest.fixture +def legacy_service_details() -> RunningDynamicServiceDetails: + return TypeAdapter(RunningDynamicServiceDetails).validate_python( + RunningDynamicServiceDetails.model_json_schema()["examples"][0] + ) + + +@pytest.fixture +def mock_director_v0( + node_id: NodeID, legacy_service_details: RunningDynamicServiceDetails +) -> Iterator[None]: + with respx.mock( + base_url="http://director:8000", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + respx_mock.request( + method="GET", path=f"/v0/running_interactive_services/{node_id}" + ).respond( + status_code=200, + json={ + "data": legacy_service_details.model_dump(mode="json"), + "error": None, + }, + ) + + respx_mock.request( + method="GET", path="/v0/running_interactive_services" + ).respond( + status_code=200, + json={"data": [legacy_service_details.model_dump(mode="json")]}, + ) + + yield + + +async def test_get_running_service_details( + mock_director_v0: None, + app: FastAPI, + node_id: NodeID, + legacy_service_details: RunningDynamicServiceDetails, +): + client = DirectorV0PublicClient.get_from_app_state(app) + result = await client.get_running_service_details(node_id) + assert result == legacy_service_details + + +async def test_get_running_services( + mock_director_v0: None, + app: FastAPI, + legacy_service_details: RunningDynamicServiceDetails, +): + client = DirectorV0PublicClient.get_from_app_state(app) + result = await client.get_running_services() + assert result == [legacy_service_details] diff --git a/services/dynamic-scheduler/tests/unit/test_services_rabbitmq.py b/services/dynamic-scheduler/tests/unit/test_services_rabbitmq.py new file mode 100644 index 00000000000..bdc5fe73fa3 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_services_rabbitmq.py @@ -0,0 +1,46 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +import pytest +from fastapi import FastAPI +from models_library.rabbitmq_messages import RabbitMessageBase +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from simcore_service_dynamic_scheduler.services.rabbitmq import ( + get_rabbitmq_client, + get_rabbitmq_rpc_server, + post_message, +) + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_redis_lifespan: None, + disable_service_tracker_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, + rabbit_service: RabbitSettings, +) -> EnvVarsDict: + return app_environment + + +async def test_health(app: FastAPI): + assert get_rabbitmq_client(app) + assert get_rabbitmq_rpc_server(app) + + class TestMessage(RabbitMessageBase): + channel_name: str = "test" + + # pylint:disable=no-self-use + def routing_key(self) -> str | None: + return None + + await post_message(app, TestMessage()) diff --git a/services/dynamic-scheduler/tests/unit/test_services_redis.py b/services/dynamic-scheduler/tests/unit/test_services_redis.py new file mode 100644 index 00000000000..54a8ad29cc7 --- /dev/null +++ b/services/dynamic-scheduler/tests/unit/test_services_redis.py @@ -0,0 +1,32 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.redis import RedisSettings +from simcore_service_dynamic_scheduler.services.redis import get_all_redis_clients + +pytest_simcore_core_services_selection = [ + "redis", +] + + +@pytest.fixture +def app_environment( + disable_postgres_lifespan: None, + disable_rabbitmq_lifespan: None, + disable_deferred_manager_lifespan: None, + disable_notifier_lifespan: None, + disable_status_monitor_lifespan: None, + app_environment: EnvVarsDict, + redis_service: RedisSettings, +) -> EnvVarsDict: + return app_environment + + +async def test_health(app: FastAPI): + redis_clients = get_all_redis_clients(app) + for redis_client in redis_clients.values(): + assert await redis_client.ping() is True diff --git a/services/dynamic-sidecar/.env-devel b/services/dynamic-sidecar/.env-devel index 2144599c402..e1866cb022a 100644 --- a/services/dynamic-sidecar/.env-devel +++ b/services/dynamic-sidecar/.env-devel @@ -10,6 +10,7 @@ DYNAMIC_SIDECAR_SHARED_STORE_DIR="/tmp/shared-store" # service specific required vars DYNAMIC_SIDECAR_COMPOSE_NAMESPACE=dev-namespace +DY_SIDECAR_CALLBACKS_MAPPING='{"metrics": {"command": "fake-command", "timeout": 1, "service": "fake-test"}}' DY_SIDECAR_PATH_INPUTS=/tmp DY_SIDECAR_PATH_OUTPUTS=/tmp DY_SIDECAR_STATE_PATHS='["/tmp"]' @@ -17,17 +18,25 @@ DY_SIDECAR_STATE_EXCLUDE='["/tmp/exclude"]' DY_SIDECAR_USER_ID=1 DY_SIDECAR_PROJECT_ID=4539cfa0-8366-4c77-bf42-790684c7f564 DY_SIDECAR_NODE_ID=d286bc62-3b4d-416a-90a2-3aec949468c5 -DY_SIDECAR_RUN_ID=89563d90-ec72-11ec-8317-02420a0b0490 +DY_SIDECAR_RUN_ID=1689771013_f7c1bd87-4da5-4709-9471-3d60c8a70639 DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS=false # DOCKER REGISTRY -REGISTRY_AUTH=false -REGISTRY_USER=test -REGISTRY_PW=test -REGISTRY_SSL=false +DY_DEPLOYMENT_REGISTRY_SETTINGS='{"REGISTRY_AUTH":"false","REGISTRY_USER":"test","REGISTRY_PW":"test","REGISTRY_SSL":"false", "REGISTRY_URL": "fake.registry.com"}' -S3_ENDPOINT=MINIO +S3_ENDPOINT=http://111.111.111.111:12345 S3_ACCESS_KEY=mocked +S3_REGION=mocked S3_SECRET_KEY=mocked S3_BUCKET_NAME=mocked R_CLONE_PROVIDER=MINIO + +RABBIT_HOST=test +RABBIT_PASSWORD=test +RABBIT_SECURE=false +RABBIT_USER=test + +POSTGRES_DB=test +POSTGRES_HOST=test +POSTGRES_PASSWORD=test +POSTGRES_USER=test diff --git a/services/dynamic-sidecar/Dockerfile b/services/dynamic-sidecar/Dockerfile index fa40ad377d0..68148956da9 100644 --- a/services/dynamic-sidecar/Dockerfile +++ b/services/dynamic-sidecar/Dockerfile @@ -1,6 +1,18 @@ # syntax=docker/dockerfile:1 -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: # cd sercices/dynamic-sidecar @@ -11,26 +23,70 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer="Andrei Neagu " -COPY --chown=scu:scu scripts/install_rclone.bash . -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +# NOTE: to list the latest version run `make` inside `scripts/apt-packages-versions` +ENV DOCKER_APT_VERSION="5:26.1.4-1~debian.12~bookworm" +ENV DOCKER_COMPOSE_APT_VERSION="2.27.1-1~debian.12~bookworm" + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN \ + --mount=type=cache,target=/var/cache/apt,mode=0755,sharing=private \ + --mount=type=cache,target=/var/lib/apt,mode=0755,sharing=private \ set -eux && \ apt-get update && \ apt-get install -y --no-install-recommends\ curl \ + gnupg \ + lsb-release \ + xz-utils \ + && mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + docker-ce-cli=${DOCKER_APT_VERSION} \ + docker-compose-plugin=${DOCKER_COMPOSE_APT_VERSION} \ gosu \ ca-certificates \ # required by python-magic libmagic1 \ - && ./install_rclone.bash \ && apt-get remove -y\ - curl \ - && apt-get autoclean -y\ - && apt-get autoremove -y\ - && rm -rf /var/lib/apt/lists/* \ + gnupg \ + lsb-release \ + && apt-get clean -y\ # verify that the binary works && gosu nobody true +# install RClone, we do it in a separate layer such that the cache is not locked forever, as this seems to take a long time +ARG TARGETARCH +ENV TARGETARCH=${TARGETARCH} +RUN \ + --mount=type=bind,source=scripts/install_rclone.bash,target=install_rclone.bash \ + ./install_rclone.bash +# install 7zip +ARG TARGETARCH +ENV TARGETARCH=${TARGETARCH} +RUN \ + --mount=type=bind,source=scripts/install_7zip.bash,target=install_7zip.bash \ + ./install_7zip.bash + +RUN AWS_CLI_VERSION="2.11.11" \ + && case "${TARGETARCH}" in \ + "amd64") ARCH="x86_64" ;; \ + "arm64") ARCH="aarch64" ;; \ + *) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \ + esac \ + && curl "https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}-${AWS_CLI_VERSION}.zip" -o "awscliv2.zip" \ + && apt-get update && apt-get install -y unzip \ + && unzip awscliv2.zip \ + && ./aws/install \ + && apt-get remove --purge -y unzip \ + && rm awscliv2.zip \ + && rm -rf awscliv2 \ + && aws --version + # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ SC_USER_NAME=scu \ @@ -59,7 +115,7 @@ ENV DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR="/dy-volumes" # create direcotry to persist SharedStore data accessiable # between dynamic-sidecar reboots -ENV DYNAMIC_SIDECAR_SHARED_STORE_DIR="/shared-store" +ENV DYNAMIC_SIDECAR_SHARED_STORE_DIR="${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}/shared-store" RUN mkdir -p "${DYNAMIC_SIDECAR_SHARED_STORE_DIR}" && \ chown -R scu:scu "${DYNAMIC_SIDECAR_SHARED_STORE_DIR}" @@ -68,36 +124,34 @@ RUN mkdir -p "${DYNAMIC_SIDECAR_SHARED_STORE_DIR}" && \ # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ set -eux \ && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" \ +RUN uv venv "${VIRTUAL_ENV}" \ && mkdir -p "${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}" -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + +RUN --mount=type=cache,target=/root/.cache/uv \ + echo ${UV_CONCURRENT_INSTALLS} && \ + uv pip install --upgrade \ wheel \ setuptools + WORKDIR /build -# install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/dynamic-sidecar/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt # copy utility devops scripts COPY --chown=scu:scu services/dynamic-sidecar/scripts/Makefile /home/scu @@ -109,19 +163,19 @@ COPY --chown=root:root services/dynamic-sidecar/scripts/Makefile /root # + /build # + services/dynamic-sidecar [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/dynamic-sidecar /build/services/dynamic-sidecar +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/dynamic-sidecar -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip --no-cache-dir install \ - --requirement requirements/prod.txt &&\ - pip --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/dynamic-sidecar,target=/build/services/dynamic-sidecar,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- # Final cleanup up to reduce image size and startup setup @@ -130,15 +184,19 @@ RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ # + /home/scu $HOME = WORKDIR # + services/dynamic-sidecar [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY --chown=scu:scu --from=prod-only-deps ${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR} ${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR} @@ -147,18 +205,17 @@ COPY --chown=scu:scu --from=prod-only-deps ${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_D COPY --chown=scu:scu services/dynamic-sidecar/docker services/dynamic-sidecar/docker RUN chmod +x services/dynamic-sidecar/docker/*.sh -# disabled healthcheck as director-v2 is already taking care of it -# in oder to have similar performance a more aggressive healethcek -# would be required. -# removing the healthchek would not cause any issues at this point -# NOTE: When adding a healthcheck -# - remove UpdateHealth - no longer required -# - remove WaitForSidecarAPI - no longer required -# - After `get_dynamic_sidecar_placement` inside CreateSidecars call -# (the sidecar's API will be up and running; guaranteed by the docker engine healthck). -# Add the following line `scheduler_data.dynamic_sidecar.is_ready = True` -# The healthcheck guarantees that the API is available -HEALTHCHECK NONE +# NOTE: the start period of 3 minutes is to allow the dynamic-sidecar +# enough time to connect to the externald dependencies. some times the docker +# networks take time to get created +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=180s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/dynamic-sidecar/docker/healthcheck.py", "http://localhost:8000/health"] EXPOSE 8000 @@ -174,7 +231,7 @@ CMD ["/bin/sh", "services/dynamic-sidecar/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_BOOT_MODE=development diff --git a/services/dynamic-sidecar/Makefile b/services/dynamic-sidecar/Makefile index 7ecf6c298a6..19991ab56d2 100644 --- a/services/dynamic-sidecar/Makefile +++ b/services/dynamic-sidecar/Makefile @@ -1,15 +1,6 @@ include ../../scripts/common.Makefile include ../../scripts/common-service.Makefile -.DEFAULT_GOAL := help - - -.env: .env-devel ## creates .env file from defaults in .env-devel - $(if $(wildcard $@), \ - @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ - @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) - - .PHONY: openapi-specs openapi.json @@ -21,10 +12,6 @@ openapi.json: .env ## Creates OAS document openapi.json set +o allexport; \ simcore-service-dynamic-sidecar openapi > $@ - # validates OAS file: $@ - @cd $(CURDIR); \ - $(SCRIPTS_DIR)/openapi-generator-cli.bash validate --input-spec /local/$@ - .PHONY: run-github-action-locally diff --git a/services/dynamic-sidecar/VERSION b/services/dynamic-sidecar/VERSION index 524cb55242b..26aaba0e866 100644 --- a/services/dynamic-sidecar/VERSION +++ b/services/dynamic-sidecar/VERSION @@ -1 +1 @@ -1.1.1 +1.2.0 diff --git a/services/dynamic-sidecar/docker/boot.sh b/services/dynamic-sidecar/docker/boot.sh index ac297ae60cc..152fc8a04c0 100755 --- a/services/dynamic-sidecar/docker/boot.sh +++ b/services/dynamic-sidecar/docker/boot.sh @@ -23,26 +23,37 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/dynamic-sidecar || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + # NOTE: uv does not like this requirement file... + cd /devel/services/dynamic-sidecar + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" pip list | sed 's/^/ /' fi +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + # # RUNNING application # APP_LOG_LEVEL=${DYNAMIC_SIDECAR_LOG_LEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +DYNAMIC_SIDECAR_REMOTE_DEBUGGING_PORT=${DYNAMIC_SIDECAR_REMOTE_DEBUGGING_PORT:-3000} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/dynamic-sidecar/src/simcore_service_dynamic_sidecar && \ - uvicorn main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${DYNAMIC_SIDECAR_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages diff --git a/services/dynamic-sidecar/docker/entrypoint.sh b/services/dynamic-sidecar/docker/entrypoint.sh index e6d007a9246..73815d447dd 100755 --- a/services/dynamic-sidecar/docker/entrypoint.sh +++ b/services/dynamic-sidecar/docker/entrypoint.sh @@ -74,11 +74,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - # Appends docker group if socket is mounted DOCKER_MOUNT=/var/run/docker.sock if stat $DOCKER_MOUNT >/dev/null 2>&1; then @@ -98,12 +93,12 @@ fi # Change ownership of volumes mount directory # directories are empty at this point # each individual subdirectory is a unique volume -chown --verbose --recursive "$SC_USER_NAME":"$GROUPNAME" "${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}" +chown --recursive "$SC_USER_NAME":"$GROUPNAME" "${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}" # Allow owner and group to edit write and execute # files from all the subdirectories # When the service access files downloaded by the dynamic-sidecar # it uses group permissions -chmod --verbose --recursive 774 "${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}" +chmod --recursive 774 "${DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR}" echo "$INFO Starting $* ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" diff --git a/services/dynamic-sidecar/docker/healthcheck.py b/services/dynamic-sidecar/docker/healthcheck.py new file mode 100755 index 00000000000..c8fedade9eb --- /dev/null +++ b/services/dynamic-sidecar/docker/healthcheck.py @@ -0,0 +1,27 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception), urlopen(sys.argv[1]) as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/dynamic-sidecar/openapi.json b/services/dynamic-sidecar/openapi.json index 8ed16f4a533..442005a03c1 100644 --- a/services/dynamic-sidecar/openapi.json +++ b/services/dynamic-sidecar/openapi.json @@ -1,9 +1,9 @@ { - "openapi": "3.0.2", + "openapi": "3.1.0", "info": { "title": "simcore-service-dynamic-sidecar", - "description": " Implements a sidecar service to manage user's dynamic/interactive services", - "version": "1.1.1" + "description": "Implements a sidecar service to manage user's dynamic/interactive services", + "version": "1.2.0" }, "servers": [ { @@ -45,6 +45,71 @@ } } }, + "/metrics": { + "get": { + "summary": "Metrics Endpoint", + "description": "Exposes metrics form the underlying user service.\n\nPossible responses:\n- HTTP 200 & empty body: user services did not start\n- HTTP 200 & prometheus metrics: was able to fetch data from user service\n- HTTP 500 & error message: something went wrong when fetching data from user service", + "operationId": "metrics_endpoint_metrics_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + }, + "500": { + "description": "error in recovering data from user service" + } + } + } + }, + "/v1/containers/compose-spec": { + "post": { + "tags": [ + "containers" + ], + "summary": "Store Compose Spec", + "description": "Validates and stores the docker compose spec for the user services.", + "operationId": "store_compose_spec_v1_containers_compose_spec_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ContainersComposeSpec" + } + } + }, + "required": true + }, + "responses": { + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "500": { + "description": "Errors in container" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, "/v1/containers": { "get": { "tags": [ @@ -55,16 +120,16 @@ "operationId": "containers_docker_inspect_v1_containers_get", "parameters": [ { - "description": "if True only show the status of the container", + "name": "only_status", + "in": "query", "required": false, "schema": { - "title": "Only Status", "type": "boolean", "description": "if True only show the status of the container", - "default": false + "default": false, + "title": "Only Status" }, - "name": "only_status", - "in": "query" + "description": "if True only show the status of the container" } ], "responses": { @@ -72,7 +137,10 @@ "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "type": "object", + "title": "Response Containers Docker Inspect V1 Containers Get" + } } } }, @@ -95,17 +163,17 @@ "tags": [ "containers" ], - "summary": "Starts the containers as defined in ContainerCreate by:\n- cleaning up resources from previous runs if any\n- pulling the needed images\n- starting the containers\n\nProgress may be obtained through URL\nProcess may be cancelled through URL", + "summary": "Starts the containers as defined in ContainerCreate by:\n- cleaning up resources from previous runs if any\n- starting the containers\n\nProgress may be obtained through URL\nProcess may be cancelled through URL", "operationId": "create_service_containers_task_v1_containers_post", "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ContainersCreate" } } - }, - "required": true + } }, "responses": { "202": { @@ -113,8 +181,8 @@ "content": { "application/json": { "schema": { - "title": "Response Create Service Containers Task V1 Containers Post", - "type": "string" + "type": "string", + "title": "Response Create Service Containers Task V1 Containers Post" } } } @@ -132,82 +200,28 @@ } } }, - "/v1/containers/{id}/logs": { + "/v1/containers/activity": { "get": { "tags": [ "containers" ], - "summary": "Get Container Logs", - "description": "Returns the logs of a given container if found", - "operationId": "get_container_logs_v1_containers__id__logs_get", - "parameters": [ - { - "required": true, - "schema": { - "title": "Id", - "type": "string" - }, - "name": "id", - "in": "path" - }, - { - "description": "Only return logs since this time, as a UNIX timestamp", - "required": false, - "schema": { - "title": "Timestamp", - "type": "integer", - "description": "Only return logs since this time, as a UNIX timestamp", - "default": 0 - }, - "name": "since", - "in": "query" - }, - { - "description": "Only return logs before this time, as a UNIX timestamp", - "required": false, - "schema": { - "title": "Timestamp", - "type": "integer", - "description": "Only return logs before this time, as a UNIX timestamp", - "default": 0 - }, - "name": "until", - "in": "query" - }, - { - "description": "Enabling this parameter will include timestamps in logs", - "required": false, - "schema": { - "title": "Display timestamps", - "type": "boolean", - "description": "Enabling this parameter will include timestamps in logs", - "default": false - }, - "name": "timestamps", - "in": "query" - } - ], + "summary": "Get Containers Activity", + "operationId": "get_containers_activity_v1_containers_activity_get", "responses": { "200": { "description": "Successful Response", - "content": { - "application/json": { - "schema": {} - } - } - }, - "404": { - "description": "Container does not exists" - }, - "500": { - "description": "Errors in container" - }, - "422": { - "description": "Validation Error", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "anyOf": [ + { + "$ref": "#/components/schemas/ActivityInfo" + }, + { + "type": "null" + } + ], + "title": "Response Get Containers Activity V1 Containers Activity Get" } } } @@ -225,15 +239,15 @@ "operationId": "get_containers_name_v1_containers_name_get", "parameters": [ { - "description": "JSON encoded dictionary. FastAPI does not allow for dict as type in query parameters", + "name": "filters", + "in": "query", "required": true, "schema": { - "title": "Filters", "type": "string", - "description": "JSON encoded dictionary. FastAPI does not allow for dict as type in query parameters" + "description": "JSON encoded dictionary. FastAPI does not allow for dict as type in query parameters", + "title": "Filters" }, - "name": "filters", - "in": "query" + "description": "JSON encoded dictionary. FastAPI does not allow for dict as type in query parameters" } ], "responses": { @@ -241,7 +255,17 @@ "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ], + "title": "Response Get Containers Name V1 Containers Name Get" + } } } }, @@ -264,13 +288,13 @@ "operationId": "inspect_container_v1_containers__id__get", "parameters": [ { + "name": "id", + "in": "path", "required": true, "schema": { - "title": "Id", - "type": "string" - }, - "name": "id", - "in": "path" + "type": "string", + "title": "Id" + } } ], "responses": { @@ -278,7 +302,10 @@ "description": "Successful Response", "content": { "application/json": { - "schema": {} + "schema": { + "type": "object", + "title": "Response Inspect Container V1 Containers Id Get" + } } } }, @@ -301,18 +328,18 @@ } } }, - "/v1/containers/directory-watcher": { + "/v1/containers/ports/io": { "patch": { "tags": [ "containers" ], - "summary": "Enable/disable directory-watcher event propagation", - "operationId": "toggle_directory_watcher_v1_containers_directory_watcher_patch", + "summary": "Enable/disable ports i/o", + "operationId": "toggle_ports_io_v1_containers_ports_io_patch", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/PatchDirectoryWatcherItem" + "$ref": "#/components/schemas/PatchPortsIOItem" } } }, @@ -378,24 +405,24 @@ "operationId": "attach_container_to_network_v1_containers__id__networks_attach_post", "parameters": [ { + "name": "id", + "in": "path", "required": true, "schema": { - "title": "Id", - "type": "string" - }, - "name": "id", - "in": "path" + "type": "string", + "title": "Id" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AttachContainerToNetworkItem" } } - }, - "required": true + } }, "responses": { "204": { @@ -423,24 +450,24 @@ "operationId": "detach_container_from_network_v1_containers__id__networks_detach_post", "parameters": [ { + "name": "id", + "in": "path", "required": true, "schema": { - "title": "Id", - "type": "string" - }, - "name": "id", - "in": "path" + "type": "string", + "title": "Id" + } } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/DetachContainerFromNetworkItem" } } - }, - "required": true + } }, "responses": { "204": { @@ -459,6 +486,28 @@ } } }, + "/v1/containers/images:pull": { + "post": { + "tags": [ + "containers" + ], + "summary": "Pulls all the docker container images for the user services", + "operationId": "pull_user_servcices_docker_images_v1_containers_images_pull_post", + "responses": { + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string", + "title": "Response Pull User Servcices Docker Images V1 Containers Images Pull Post" + } + } + } + } + } + } + }, "/v1/containers:down": { "post": { "tags": [ @@ -472,8 +521,8 @@ "content": { "application/json": { "schema": { - "title": "Response Runs Docker Compose Down Task V1 Containers Down Post", - "type": "string" + "type": "string", + "title": "Response Runs Docker Compose Down Task V1 Containers Down Post" } } } @@ -494,8 +543,8 @@ "content": { "application/json": { "schema": { - "title": "Response State Restore Task V1 Containers State Restore Post", - "type": "string" + "type": "string", + "title": "Response State Restore Task V1 Containers State Restore Post" } } } @@ -516,8 +565,8 @@ "content": { "application/json": { "schema": { - "title": "Response State Save Task V1 Containers State Save Post", - "type": "string" + "type": "string", + "title": "Response State Save Task V1 Containers State Save Post" } } } @@ -536,11 +585,18 @@ "content": { "application/json": { "schema": { - "title": "Port Keys", - "type": "array", - "items": { - "type": "string" - } + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Port Keys" } } } @@ -551,8 +607,8 @@ "content": { "application/json": { "schema": { - "title": "Response Ports Inputs Pull Task V1 Containers Ports Inputs Pull Post", - "type": "string" + "type": "string", + "title": "Response Ports Inputs Pull Task V1 Containers Ports Inputs Pull Post" } } } @@ -581,11 +637,18 @@ "content": { "application/json": { "schema": { - "title": "Port Keys", - "type": "array", - "items": { - "type": "string" - } + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Port Keys" } } } @@ -596,8 +659,8 @@ "content": { "application/json": { "schema": { - "title": "Response Ports Outputs Pull Task V1 Containers Ports Outputs Pull Post", - "type": "string" + "type": "string", + "title": "Response Ports Outputs Pull Task V1 Containers Ports Outputs Pull Post" } } } @@ -628,8 +691,8 @@ "content": { "application/json": { "schema": { - "title": "Response Ports Outputs Push Task V1 Containers Ports Outputs Push Post", - "type": "string" + "type": "string", + "title": "Response Ports Outputs Push Task V1 Containers Ports Outputs Push Post" } } } @@ -650,195 +713,564 @@ "content": { "application/json": { "schema": { - "title": "Response Containers Restart Task V1 Containers Restart Post", - "type": "string" + "type": "string", + "title": "Response Containers Restart Task V1 Containers Restart Post" + } + } + } + } + } + } + }, + "/v1/volumes/{id}": { + "put": { + "tags": [ + "volumes" + ], + "summary": "Updates the state of the volume", + "operationId": "put_volume_state_v1_volumes__id__put", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/VolumeCategory" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PutVolumeItem" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" } } } } } } + }, + "/v1/disk/reserved:free": { + "post": { + "tags": [ + "disk" + ], + "summary": "Frees up reserved disk space", + "operationId": "free_reserved_disk_space_v1_disk_reserved_free_post", + "responses": { + "204": { + "description": "Successful Response" + } + } + } } }, "components": { "schemas": { - "ApplicationHealth": { - "title": "ApplicationHealth", + "ActivityInfo": { + "properties": { + "seconds_inactive": { + "type": "number", + "minimum": 0.0, + "title": "Seconds Inactive" + } + }, "type": "object", + "required": [ + "seconds_inactive" + ], + "title": "ActivityInfo" + }, + "ApplicationHealth": { "properties": { "is_healthy": { - "title": "Is Healthy", "type": "boolean", + "title": "Is Healthy", "description": "returns True if the service sis running correctly", "default": true }, "error_message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Error Message", - "type": "string", "description": "in case of error this gets set" } - } + }, + "type": "object", + "title": "ApplicationHealth" }, "AttachContainerToNetworkItem": { - "title": "AttachContainerToNetworkItem", - "required": [ - "network_id", - "network_aliases" - ], - "type": "object", "properties": { "network_id": { - "title": "Network Id", - "type": "string" + "type": "string", + "title": "Network Id" }, "network_aliases": { - "title": "Network Aliases", - "type": "array", "items": { "type": "string" - } + }, + "type": "array", + "title": "Network Aliases" } - } - }, - "ContainersCreate": { - "title": "ContainersCreate", + }, + "type": "object", "required": [ - "docker_compose_yaml" + "network_id", + "network_aliases" ], - "type": "object", + "title": "AttachContainerToNetworkItem" + }, + "BootMode": { + "type": "string", + "enum": [ + "CPU", + "GPU", + "MPI" + ], + "title": "BootMode" + }, + "ContainersComposeSpec": { "properties": { "docker_compose_yaml": { - "title": "Docker Compose Yaml", - "type": "string" + "type": "string", + "title": "Docker Compose Yaml" } - } - }, - "CreateDirsRequestItem": { - "title": "CreateDirsRequestItem", + }, + "type": "object", "required": [ - "outputs_labels" + "docker_compose_yaml" ], + "title": "ContainersComposeSpec" + }, + "ContainersCreate": { + "properties": { + "metrics_params": { + "$ref": "#/components/schemas/CreateServiceMetricsAdditionalParams" + } + }, "type": "object", + "required": [ + "metrics_params" + ], + "title": "ContainersCreate" + }, + "CreateDirsRequestItem": { "properties": { "outputs_labels": { - "title": "Outputs Labels", - "type": "object", "additionalProperties": { "$ref": "#/components/schemas/ServiceOutput" - } + }, + "type": "object", + "title": "Outputs Labels" } - } - }, - "DetachContainerFromNetworkItem": { - "title": "DetachContainerFromNetworkItem", + }, + "type": "object", "required": [ - "network_id" + "outputs_labels" ], + "title": "CreateDirsRequestItem" + }, + "CreateServiceMetricsAdditionalParams": { + "properties": { + "wallet_id": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Wallet Id" + }, + "wallet_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Wallet Name" + }, + "pricing_plan_id": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Pricing Plan Id" + }, + "pricing_unit_id": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Pricing Unit Id" + }, + "pricing_unit_cost_id": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Pricing Unit Cost Id" + }, + "product_name": { + "type": "string", + "title": "Product Name" + }, + "simcore_user_agent": { + "type": "string", + "title": "Simcore User Agent" + }, + "user_email": { + "type": "string", + "title": "User Email" + }, + "project_name": { + "type": "string", + "title": "Project Name" + }, + "node_name": { + "type": "string", + "title": "Node Name" + }, + "service_key": { + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + }, + "service_version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + }, + "service_resources": { + "type": "object", + "title": "Service Resources" + }, + "service_additional_metadata": { + "type": "object", + "title": "Service Additional Metadata" + } + }, "type": "object", + "required": [ + "wallet_id", + "wallet_name", + "pricing_plan_id", + "pricing_unit_id", + "pricing_unit_cost_id", + "product_name", + "simcore_user_agent", + "user_email", + "project_name", + "node_name", + "service_key", + "service_version", + "service_resources", + "service_additional_metadata" + ], + "title": "CreateServiceMetricsAdditionalParams", + "example": { + "node_name": "the service of a lifetime _ *!", + "pricing_plan_id": 1, + "pricing_unit_detail_id": 1, + "pricing_unit_id": 1, + "product_name": "osparc", + "project_name": "_!New Study", + "service_additional_metadata": {}, + "service_key": "simcore/services/dynamic/test", + "service_resources": {}, + "service_version": "0.0.1", + "simcore_user_agent": "undefined", + "user_email": "test@test.com", + "wallet_id": 1, + "wallet_name": "a private wallet for me" + } + }, + "DetachContainerFromNetworkItem": { "properties": { "network_id": { - "title": "Network Id", - "type": "string" + "type": "string", + "title": "Network Id" } - } + }, + "type": "object", + "required": [ + "network_id" + ], + "title": "DetachContainerFromNetworkItem" }, "HTTPValidationError": { - "title": "HTTPValidationError", - "type": "object", "properties": { "detail": { - "title": "Detail", - "type": "array", "items": { "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "ImageResources": { + "properties": { + "image": { + "type": "string", + "pattern": "^(?:([a-z0-9-]+(?:\\.[a-z0-9-]+)+(?::\\d+)?|[a-z0-9-]+:\\d+)/)?((?:[a-z0-9][a-z0-9_.-]*/)*[a-z0-9-_]+[a-z0-9])(?::([\\w][\\w.-]{0,127}))?(\\@sha256:[a-fA-F0-9]{32,64})?$", + "title": "Image", + "description": "Used by the frontend to provide a context for the users.Services with a docker-compose spec will have multiple entries.Using the `image:version` instead of the docker-compose spec is more helpful for the end user." + }, + "resources": { + "additionalProperties": { + "$ref": "#/components/schemas/ResourceValue" + }, + "type": "object", + "title": "Resources" + }, + "boot_modes": { + "items": { + "$ref": "#/components/schemas/BootMode" + }, + "type": "array", + "title": "Boot Modes", + "description": "describe how a service shall be booted, using CPU, MPI, openMP or GPU", + "default": [ + "CPU" + ] + } + }, + "type": "object", + "required": [ + "image", + "resources" + ], + "title": "ImageResources", + "example": { + "image": "simcore/service/dynamic/pretty-intense:1.0.0", + "resources": { + "AIRAM": { + "limit": 1, + "reservation": 1 + }, + "ANY_resource": { + "limit": "some_value", + "reservation": "some_value" + }, + "CPU": { + "limit": 4, + "reservation": 0.1 + }, + "RAM": { + "limit": 103079215104, + "reservation": 536870912 + }, + "VRAM": { + "limit": 1, + "reservation": 1 } } } }, - "PatchDirectoryWatcherItem": { - "title": "PatchDirectoryWatcherItem", + "PatchPortsIOItem": { + "properties": { + "enable_outputs": { + "type": "boolean", + "title": "Enable Outputs" + }, + "enable_inputs": { + "type": "boolean", + "title": "Enable Inputs" + } + }, + "type": "object", "required": [ - "is_enabled" + "enable_outputs", + "enable_inputs" ], - "type": "object", + "title": "PatchPortsIOItem" + }, + "PutVolumeItem": { "properties": { - "is_enabled": { - "title": "Is Enabled", - "type": "boolean" + "status": { + "$ref": "#/components/schemas/VolumeStatus" } - } - }, - "SelectBox": { - "title": "SelectBox", + }, + "type": "object", "required": [ - "structure" + "status" ], + "title": "PutVolumeItem" + }, + "ResourceValue": { + "properties": { + "limit": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "title": "Limit" + }, + "reservation": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "title": "Reservation" + } + }, "type": "object", + "required": [ + "limit", + "reservation" + ], + "title": "ResourceValue" + }, + "SelectBox": { "properties": { "structure": { - "title": "Structure", - "minItems": 1, - "type": "array", "items": { "$ref": "#/components/schemas/Structure" - } + }, + "type": "array", + "minItems": 1, + "title": "Structure" } }, - "additionalProperties": false - }, - "ServiceOutput": { - "title": "ServiceOutput", + "additionalProperties": false, + "type": "object", "required": [ - "label", - "description", - "type" + "structure" ], - "type": "object", + "title": "SelectBox" + }, + "ServiceOutput": { "properties": { "displayOrder": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], "title": "Displayorder", - "type": "number", "description": "DEPRECATED: new display order is taken from the item position. This will be removed.", "deprecated": true }, "label": { - "title": "Label", "type": "string", - "description": "short name for the property", - "example": "Age" + "title": "Label", + "description": "short name for the property" }, "description": { - "title": "Description", "type": "string", - "description": "description of the property", - "example": "Age in seconds since 1970" + "title": "Description", + "description": "description of the property" }, "type": { - "title": "Type", - "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", "type": "string", + "pattern": "^(number|integer|boolean|string|ref_contentSchema|data:([^/\\s,]+/[^/\\s,]+|\\[[^/\\s,]+/[^/\\s,]+(,[^/\\s]+/[^/,\\s]+)*\\]))$", + "title": "Type", "description": "data type expected on this input glob matching for data type is allowed" }, "contentSchema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], "title": "Contentschema", - "type": "object", "description": "jsonschema of this input/output. Required when type='ref_contentSchema'" }, "fileToKeyMap": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], "title": "Filetokeymap", - "type": "object", "description": "Place the data associated with the named keys in files" }, "unit": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], "title": "Unit", - "type": "string", - "description": "Units, when it refers to a physical quantity" + "description": "Units, when it refers to a physical quantity", + "deprecated": true }, "widget": { - "title": "Widget", - "allOf": [ + "anyOf": [ { "$ref": "#/components/schemas/Widget" + }, + { + "type": "null" } ], "description": "custom widget to use instead of the default one determined from the data-type", @@ -846,18 +1278,17 @@ } }, "additionalProperties": false, - "description": "Base class for service input/outputs" - }, - "Structure": { - "title": "Structure", + "type": "object", "required": [ - "key", - "label" + "label", + "description", + "type" ], - "type": "object", + "title": "ServiceOutput" + }, + "Structure": { "properties": { "key": { - "title": "Key", "anyOf": [ { "type": "string" @@ -868,44 +1299,42 @@ { "type": "number" } - ] + ], + "title": "Key" }, "label": { - "title": "Label", - "type": "string" + "type": "string", + "title": "Label" } }, - "additionalProperties": false - }, - "TextArea": { - "title": "TextArea", + "additionalProperties": false, + "type": "object", "required": [ - "minHeight" + "key", + "label" ], - "type": "object", + "title": "Structure" + }, + "TextArea": { "properties": { "minHeight": { - "title": "Minheight", - "exclusiveMinimum": true, "type": "integer", + "exclusiveMinimum": true, + "title": "Minheight", "description": "minimum Height of the textarea", "minimum": 0 } }, - "additionalProperties": false - }, - "ValidationError": { - "title": "ValidationError", + "additionalProperties": false, + "type": "object", "required": [ - "loc", - "msg", - "type" + "minHeight" ], - "type": "object", + "title": "TextArea" + }, + "ValidationError": { "properties": { "loc": { - "title": "Location", - "type": "array", "items": { "anyOf": [ { @@ -915,36 +1344,55 @@ "type": "integer" } ] - } + }, + "type": "array", + "title": "Location" }, "msg": { - "title": "Message", - "type": "string" + "type": "string", + "title": "Message" }, "type": { - "title": "Error Type", - "type": "string" + "type": "string", + "title": "Error Type" } - } - }, - "Widget": { - "title": "Widget", + }, + "type": "object", "required": [ - "type", - "details" + "loc", + "msg", + "type" ], - "type": "object", + "title": "ValidationError" + }, + "VolumeCategory": { + "type": "string", + "enum": [ + "OUTPUTS", + "INPUTS", + "STATES", + "SHARED_STORE" + ], + "title": "VolumeCategory", + "description": "These uniquely identify volumes which are mounted by\nthe dynamic-sidecar and user services.\n\nThis is primarily used to keep track of the status of\neach individual volume on the volumes.\n\nThe status is ingested by the agent and processed\nwhen the volume is removed." + }, + "VolumeStatus": { + "type": "string", + "enum": [ + "CONTENT_NEEDS_TO_BE_SAVED", + "CONTENT_WAS_SAVED", + "CONTENT_NO_SAVE_REQUIRED" + ], + "title": "VolumeStatus", + "description": "Used by the agent to figure out what to do with the data\npresent on the volume." + }, + "Widget": { "properties": { "type": { - "allOf": [ - { - "$ref": "#/components/schemas/WidgetType" - } - ], + "$ref": "#/components/schemas/WidgetType", "description": "type of the property" }, "details": { - "title": "Details", "anyOf": [ { "$ref": "#/components/schemas/TextArea" @@ -952,19 +1400,25 @@ { "$ref": "#/components/schemas/SelectBox" } - ] + ], + "title": "Details" } }, - "additionalProperties": false + "additionalProperties": false, + "type": "object", + "required": [ + "type", + "details" + ], + "title": "Widget" }, "WidgetType": { - "title": "WidgetType", + "type": "string", "enum": [ "TextArea", "SelectBox" ], - "type": "string", - "description": "An enumeration." + "title": "WidgetType" } } } diff --git a/services/dynamic-sidecar/requirements/_base.in b/services/dynamic-sidecar/requirements/_base.in index 52dee709022..7198cff683c 100644 --- a/services/dynamic-sidecar/requirements/_base.in +++ b/services/dynamic-sidecar/requirements/_base.in @@ -7,6 +7,7 @@ # NOTE: These input-requirements under packages are tested using latest updates # NOTE: Make sure these packages are added in setup.install_requires +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/postgres-database/requirements/_base.in # service-library[fastapi] @@ -28,12 +29,10 @@ aio-pika aiodocker aiofiles aioprocessing -docker-compose -fastapi -httpx psutil pydantic python-magic # file type identification library. See 'magic.from_file(...)' NOTE: requires `libmagic`` installed +python-socketio PyYAML -uvicorn +u-msgpack-python watchdog diff --git a/services/dynamic-sidecar/requirements/_base.txt b/services/dynamic-sidecar/requirements/_base.txt index c3123f3a403..ca212ee21be 100644 --- a/services/dynamic-sidecar/requirements/_base.txt +++ b/services/dynamic-sidecar/requirements/_base.txt @@ -1,277 +1,925 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.2.4 +aio-pika==9.5.5 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiocache==0.11.1 - # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in +aiocache==0.12.3 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in aiodebug==2.3.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -aiodocker==0.19.1 - # via -r requirements/_base.in -aiofiles==0.8.0 +aiodocker==0.24.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in -aiohttp==3.8.3 +aiofiles==24.1.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in + # -r requirements/_base.in +aiohappyeyeballs==2.5.0 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # aiodocker -aiopg==1.3.3 - # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in aioprocessing==2.0.1 # via -r requirements/_base.in -aiormq==6.4.2 +aiormq==6.8.1 # via aio-pika -aiosignal==1.2.0 +aiosignal==1.3.2 # via aiohttp -alembic==1.8.1 +alembic==1.15.1 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in -anyio==3.6.1 +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -asgiref==3.5.2 - # via uvicorn -async-timeout==4.0.2 + # watchfiles +arrow==1.3.0 # via - # aiohttp - # aiopg - # redis -attrs==21.4.0 + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.1.0 # via - # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/./constraints.txt # aiohttp # jsonschema -bcrypt==3.2.0 - # via paramiko -certifi==2022.12.7 - # via + # referencing +bidict==0.23.1 + # via python-socketio +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx # requests -cffi==1.15.0 - # via - # bcrypt - # cryptography - # pynacl -charset-normalizer==2.0.12 - # via - # aiohttp - # requests -click==8.1.3 +charset-normalizer==3.4.1 + # via requests +click==8.1.8 # via + # rich-toolkit # typer # uvicorn -cryptography==39.0.1 +deprecated==1.2.18 # via - # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt - # paramiko -distro==1.5.0 - # via docker-compose -dnspython==2.1.0 + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -docker==6.0.0 - # via docker-compose -docker-compose==1.29.1 - # via -r requirements/_base.in -dockerpty==0.4.1 - # via docker-compose -docopt==0.6.2 - # via docker-compose -email-validator==1.2.1 - # via pydantic -fastapi==0.85.0 +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -frozenlist==1.3.0 +faststream==0.5.35 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +flexcache==0.3 + # via pint +flexparser==0.4 + # via pint +frozenlist==1.5.0 # via # aiohttp # aiosignal -greenlet==1.1.2 +googleapis-common-protos==1.69.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 # via sqlalchemy -h11==0.12.0 +grpcio==1.70.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 # via # httpcore # uvicorn -httpcore==0.15.0 + # wsproto +h2==4.2.0 # via httpx -httpx==0.23.0 - # via +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -idna==2.10 + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator + # httpx # requests - # rfc3986 # yarl -jaeger-client==4.8.0 - # via fastapi-contrib -jsonschema==3.2.0 +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jsonschema==4.23.0 # via # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in - # docker-compose -mako==1.2.2 - # via + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # alembic -markupsafe==2.1.1 - # via mako -multidict==6.0.2 +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 # via # aiohttp # yarl -opentracing==2.4.0 +opentelemetry-api==1.30.0 # via - # fastapi-contrib - # jaeger-client -packaging==21.3 + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.30.0 # via - # -r requirements/../../../packages/simcore-sdk/requirements/_base.in - # docker -pamqp==3.2.1 - # via aiormq -paramiko==2.11.0 + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.51b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.51b0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.51b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.51b0 + # via + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.51b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.51b0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.51b0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.51b0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.51b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt - # docker -pint==0.19.2 + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +pint==0.24.4 # via -r requirements/../../../packages/simcore-sdk/requirements/_base.in -psutil==5.9.1 - # via -r requirements/_base.in -psycopg2-binary==2.9.3 +platformdirs==4.3.6 + # via pint +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.0 # via - # aiopg - # sqlalchemy -pycparser==2.20 - # via cffi -pydantic==1.9.0 + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # -r requirements/_base.in + # fast-depends # fastapi -pyinstrument==4.1.1 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.2 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 # via # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -pynacl==1.4.0 - # via paramiko -pyparsing==3.0.9 - # via packaging -pyrsistent==0.18.1 - # via jsonschema -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via arrow -python-dotenv==0.20.0 - # via docker-compose -python-magic==0.4.25 - # via -r requirements/_base.in -pyyaml==5.4.1 +python-dotenv==1.0.1 # via + # pydantic-settings + # uvicorn +python-engineio==4.11.2 + # via python-socketio +python-magic==0.4.27 + # via -r requirements/_base.in +python-multipart==0.0.20 + # via fastapi +python-socketio==5.12.1 + # via -r requirements/_base.in +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/_base.in - # docker-compose -redis==4.4.0 - # via + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in -requests==2.27.1 - # via - # docker - # docker-compose -rfc3986==1.5.0 - # via httpx -six==1.16.0 - # via - # bcrypt - # dockerpty +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # paramiko - # pynacl - # python-dateutil - # thrift - # websocket-client -sniffio==1.2.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 # via - # anyio - # httpcore - # httpx -sqlalchemy==1.4.37 + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.23.1 # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +simple-websocket==1.1.0 + # via python-engineio +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/postgres-database/requirements/_base.in - # aiopg + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/_base.in # alembic -starlette==0.20.4 - # via fastapi -tenacity==8.0.1 +starlette==0.46.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in -texttable==1.6.3 - # via docker-compose -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.1 - # via - # jaeger-client - # threadloop -tqdm==4.64.0 +toolz==1.0.0 # via # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/_base.in -typer==0.4.1 +typer==0.15.2 # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/_base.in -typing-extensions==4.3.0 + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug - # aiodocker + # alembic + # anyio + # fastapi + # faststream + # flexcache + # flexparser + # opentelemetry-sdk + # pint # pydantic - # starlette -urllib3==1.26.9 - # via + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +u-msgpack-python==2.8.0 + # via -r requirements/_base.in +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt - # docker + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/simcore-sdk/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # requests -uvicorn==0.17.0 +uvicorn==0.34.2 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -watchdog==2.1.5 + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchdog==6.0.0 # via -r requirements/_base.in -websocket-client==0.59.0 +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 # via - # docker - # docker-compose -yarl==1.7.2 + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +wsproto==1.2.0 + # via simple-websocket +yarl==1.18.3 # via # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/simcore-sdk/requirements/../../../packages/service-library/requirements/_base.in # aio-pika # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/dynamic-sidecar/requirements/_test.in b/services/dynamic-sidecar/requirements/_test.in index feff0a4a36d..35f081991de 100644 --- a/services/dynamic-sidecar/requirements/_test.in +++ b/services/dynamic-sidecar/requirements/_test.in @@ -3,18 +3,22 @@ --constraint _base.txt +aioboto3 asgi_lifespan async-asgi-testclient # replacement for fastapi.testclient.TestClient [see b) below] +docker faker +flaky pytest pytest-asyncio pytest-cov pytest-mock -types-aiofiles # missing mypy stubs -types-pkg_resources # missing mypy stubs -types-PyYAML # missing mypy stubs - - +python-dotenv +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types_aiobotocore_s3 +types-aiofiles +types-psutil +types-PyYAML # NOTE: What test client to use for fastapi-based apps? # diff --git a/services/dynamic-sidecar/requirements/_test.txt b/services/dynamic-sidecar/requirements/_test.txt index 0016d510bc9..7cf2d4f763c 100644 --- a/services/dynamic-sidecar/requirements/_test.txt +++ b/services/dynamic-sidecar/requirements/_test.txt @@ -1,90 +1,172 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -asgi-lifespan==2.0.0 +aioboto3==14.3.0 + # via -r requirements/_test.in +aiobotocore==2.22.0 + # via aioboto3 +aiofiles==24.1.0 + # via + # -c requirements/_base.txt + # aioboto3 +aiohappyeyeballs==2.5.0 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aiobotocore +aioitertools==0.12.0 + # via aiobotocore +aiosignal==1.3.2 + # via + # -c requirements/_base.txt + # aiohttp +asgi-lifespan==2.1.0 # via -r requirements/_test.in async-asgi-testclient==1.4.11 # via -r requirements/_test.in -attrs==21.4.0 +attrs==25.1.0 # via # -c requirements/_base.txt - # pytest -certifi==2022.12.7 + # aiohttp +boto3==1.37.3 + # via aiobotocore +botocore==1.37.3 # via + # aiobotocore + # boto3 + # s3transfer +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # requests -charset-normalizer==2.0.12 +charset-normalizer==3.4.1 # via # -c requirements/_base.txt # requests -coverage==7.2.1 +coverage==7.6.12 # via pytest-cov -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 +docker==7.1.0 # via -r requirements/_test.in -idna==2.10 +faker==36.2.2 + # via -r requirements/_test.in +flaky==3.8.1 + # via -r requirements/_test.in +frozenlist==1.5.0 + # via + # -c requirements/_base.txt + # aiohttp + # aiosignal +greenlet==3.1.1 + # via + # -c requirements/_base.txt + # sqlalchemy +idna==3.10 # via # -c requirements/_base.txt # requests + # yarl iniconfig==2.0.0 # via pytest -multidict==6.0.2 +jmespath==1.0.1 + # via + # aiobotocore + # boto3 + # botocore +multidict==6.1.0 # via # -c requirements/_base.txt + # aiobotocore + # aiohttp # async-asgi-testclient -packaging==21.3 + # yarl +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +packaging==24.2 # via # -c requirements/_base.txt # pytest -pluggy==1.0.0 +pluggy==1.5.0 # via pytest -pyparsing==3.0.9 +propcache==0.3.0 # via # -c requirements/_base.txt - # packaging -pytest==7.2.1 + # aiohttp + # yarl +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio # pytest-cov # pytest-mock -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-mock==3.10.0 +pytest-mock==3.14.0 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 + # via + # -c requirements/_base.txt + # aiobotocore + # botocore +python-dotenv==1.0.1 # via # -c requirements/_base.txt - # faker -requests==2.27.1 + # -r requirements/_test.in +requests==2.32.3 # via # -c requirements/_base.txt # async-asgi-testclient -six==1.16.0 + # docker +s3transfer==0.11.3 + # via boto3 +six==1.17.0 # via # -c requirements/_base.txt # python-dateutil -sniffio==1.2.0 +sniffio==1.3.1 # via # -c requirements/_base.txt # asgi-lifespan -tomli==2.0.1 +sqlalchemy==1.4.54 # via - # coverage - # pytest -types-aiofiles==22.1.0.9 + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +types-aiobotocore-s3==2.21.1 + # via -r requirements/_test.in +types-aiofiles==24.1.0.20241221 # via -r requirements/_test.in -types-pkg-resources==0.1.3 +types-psutil==7.0.0.20250218 # via -r requirements/_test.in -types-pyyaml==6.0.12.8 +types-pyyaml==6.0.12.20241230 # via -r requirements/_test.in -urllib3==1.26.9 +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # mypy + # sqlalchemy2-stubs + # types-aiobotocore-s3 +tzdata==2025.1 + # via faker +urllib3==2.3.0 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt + # botocore + # docker # requests +wrapt==1.17.2 + # via + # -c requirements/_base.txt + # aiobotocore +yarl==1.18.3 + # via + # -c requirements/_base.txt + # aiohttp diff --git a/services/dynamic-sidecar/requirements/_tools.txt b/services/dynamic-sidecar/requirements/_tools.txt index eb16d8502b3..404d7858eca 100644 --- a/services/dynamic-sidecar/requirements/_tools.txt +++ b/services/dynamic-sidecar/requirements/_tools.txt @@ -1,93 +1,86 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.8 # via pylint -black==22.12.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.8 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==21.3 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt + # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via + # -c requirements/_base.txt # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.4 # via -r requirements/../../../requirements/devenv.txt -pyparsing==3.0.9 +pyproject-hooks==1.2.0 # via - # -c requirements/_test.txt - # packaging -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 + # build + # pip-tools +pyyaml==6.0.2 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # pre-commit -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.3.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.29.3 # via pre-commit -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/dynamic-sidecar/requirements/ci.txt b/services/dynamic-sidecar/requirements/ci.txt index dffd2697d65..827161faf6c 100644 --- a/services/dynamic-sidecar/requirements/ci.txt +++ b/services/dynamic-sidecar/requirements/ci.txt @@ -12,12 +12,13 @@ --requirement _tools.txt # installs this repo's packages -../../packages/models-library/ -../../packages/postgres-database/ -../../packages/pytest-simcore/ -../../packages/simcore-sdk -../../packages/service-library[fastapi] -../../packages/settings-library/ +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library/ +simcore-postgres-database @ ../../packages/postgres-database/ +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-sdk @ ../../packages/simcore-sdk +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-dynamic-sidecar @ . diff --git a/services/dynamic-sidecar/requirements/dev.txt b/services/dynamic-sidecar/requirements/dev.txt index 2d1c00661ed..ce064f44c52 100644 --- a/services/dynamic-sidecar/requirements/dev.txt +++ b/services/dynamic-sidecar/requirements/dev.txt @@ -12,6 +12,7 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library --editable ../../packages/models-library --editable ../../packages/postgres-database/ --editable ../../packages/pytest-simcore/ diff --git a/services/dynamic-sidecar/requirements/prod.txt b/services/dynamic-sidecar/requirements/prod.txt index a30978c96a0..11aba2a4b8c 100644 --- a/services/dynamic-sidecar/requirements/prod.txt +++ b/services/dynamic-sidecar/requirements/prod.txt @@ -10,11 +10,12 @@ --requirement _base.txt # installs this repo's packages -../../packages/models-library/ -../../packages/postgres-database/ -../../packages/simcore-sdk -../../packages/service-library[fastapi] -../../packages/settings-library/ +simcore-models-library @ ../../packages/models-library/ +simcore-common-library @ ../../packages/common-library/ +simcore-postgres-database @ ../../packages/postgres-database/ +simcore-sdk @ ../../packages/simcore-sdk +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library/ # installs current package -. +simcore-service-dynamic-sidecar @ . diff --git a/services/dynamic-sidecar/setup.cfg b/services/dynamic-sidecar/setup.cfg index 24947589554..e02fb54d462 100644 --- a/services/dynamic-sidecar/setup.cfg +++ b/services/dynamic-sidecar/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.1.1 +current_version = 1.2.0 commit = True message = services/dynamic-sidecar version: {current_version} β†’ {new_version} tag = False @@ -9,5 +9,10 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function markers = testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/services/dynamic-sidecar/setup.py b/services/dynamic-sidecar/setup.py index c4171130c44..ceb4a90a0f8 100644 --- a/services/dynamic-sidecar/setup.py +++ b/services/dynamic-sidecar/setup.py @@ -34,31 +34,32 @@ def read_reqs(reqs_path: Path) -> set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name="simcore-service-dynamic-sidecar", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author=", ".join( +SETUP = { + "name": "simcore-service-dynamic-sidecar", + "version": (CURRENT_DIR / "VERSION").read_text().strip(), + "author": ", ".join( ( "Andrei Neagu (GitHK)", "Sylvain Anderegg (sanderegg)", ) ), - description="Implements a sidecar service to manage user's dynamic/interactive services", - packages=find_packages(where="src"), - package_dir={ + "description": "Implements a sidecar service to manage user's dynamic/interactive services", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - python_requires="~=3.9", - PROD_REQUIREMENTS=PROD_REQUIREMENTS, - TEST_REQUIREMENTS=TEST_REQUIREMENTS, - setup_requires=["setuptools_scm"], - entry_points={ + "include_package_data": True, + "python_requires": ">=3.10", + "PROD_REQUIREMENTS": PROD_REQUIREMENTS, + "TEST_REQUIREMENTS": TEST_REQUIREMENTS, + "setup_requires": ["setuptools_scm"], + "entry_points": { "console_scripts": [ "simcore-service-dynamic-sidecar=simcore_service_dynamic_sidecar.cli:main", + "simcore-service=simcore_service_dynamic_sidecar.cli:main", ], }, -) +} if __name__ == "__main__": diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/_meta.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/_meta.py index 6ad0476a54a..2c9fb9d5c50 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/_meta.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/_meta.py @@ -1,32 +1,22 @@ """ Package Metadata """ -from contextlib import suppress -import pkg_resources +from importlib.metadata import distribution, version -_current_distribution = pkg_resources.get_distribution( - "simcore-service-dynamic-sidecar" -) +_current_distribution = distribution("simcore-service-dynamic-sidecar") -PROJECT_NAME: str = _current_distribution.project_name +PROJECT_NAME: str = _current_distribution.metadata["Name"] -API_VERSION: str = _current_distribution.version -MAJOR, MINOR, PATCH = _current_distribution.version.split(".") +API_VERSION: str = version("simcore-service-dynamic-sidecar") +MAJOR, MINOR, PATCH = API_VERSION.split(".") API_VTAG: str = f"v{MAJOR}" __version__ = _current_distribution.version def get_summary() -> str: - with suppress(Exception): - try: - metadata = _current_distribution.get_metadata_lines("METADATA") - except FileNotFoundError: - metadata = _current_distribution.get_metadata_lines("PKG-INFO") - - return next(x.split(":") for x in metadata if x.startswith("Summary:"))[-1] - return "" + return _current_distribution.metadata.get_all("Summary", [""])[-1] SUMMARY: str = get_summary() diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/__init__.py index 1a25517afd4..e69de29bb2d 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/__init__.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/__init__.py @@ -1,3 +0,0 @@ -from ._routing import main_router - -__all__: tuple[str, ...] = ("main_router",) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_dependencies.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_dependencies.py deleted file mode 100644 index 990876af205..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_dependencies.py +++ /dev/null @@ -1,54 +0,0 @@ -""" Free functions to inject dependencies in routes handlers -""" - -from asyncio import Lock - -from fastapi import Depends, FastAPI, Request -from fastapi.datastructures import State -from simcore_service_dynamic_sidecar.modules.outputs import ( - OutputsContext, - OutputsManager, -) - -from ..core.settings import ApplicationSettings -from ..models.schemas.application_health import ApplicationHealth -from ..models.shared_store import SharedStore -from ..modules.mounted_fs import MountedVolumes - - -def get_application(request: Request) -> FastAPI: - return request.app - - -def get_app_state(request: Request) -> State: - return request.app.state - - -def get_application_health( - app_state: State = Depends(get_app_state), -) -> ApplicationHealth: - return app_state.application_health # type: ignore - - -def get_settings(app_state: State = Depends(get_app_state)) -> ApplicationSettings: - return app_state.settings # type: ignore - - -def get_shared_store(app_state: State = Depends(get_app_state)) -> SharedStore: - return app_state.shared_store # type: ignore - - -def get_mounted_volumes(app_state: State = Depends(get_app_state)) -> MountedVolumes: - return app_state.mounted_volumes # type: ignore - - -def get_container_restart_lock(app_state: State = Depends(get_app_state)) -> Lock: - return app_state.container_restart_lock # type: ignore - - -def get_outputs_manager(app_state: State = Depends(get_app_state)) -> OutputsManager: - return app_state.outputs_manager # type: ignore - - -def get_outputs_context(app_state: State = Depends(get_app_state)) -> OutputsContext: - return app_state.outputs_context # type: ignore diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_routing.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_routing.py deleted file mode 100644 index b731b86a466..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/_routing.py +++ /dev/null @@ -1,29 +0,0 @@ -""" Module to collect, tag and prefix all routes under 'main_router' - -Setup and register all routes here form different modules -""" - -from fastapi import APIRouter - -from .._meta import API_VTAG -from . import containers, containers_extension, containers_long_running_tasks, health - -main_router = APIRouter() -main_router.include_router(health.router) -main_router.include_router( - containers.router, - tags=["containers"], - prefix=f"/{API_VTAG}", -) -main_router.include_router( - containers_extension.router, - tags=["containers"], - prefix=f"/{API_VTAG}", -) -main_router.include_router( - containers_long_running_tasks.router, - tags=["containers"], - prefix=f"/{API_VTAG}", -) - -__all__: tuple[str, ...] = ("main_router",) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers.py deleted file mode 100644 index 5737b685272..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers.py +++ /dev/null @@ -1,233 +0,0 @@ -# pylint: disable=too-many-arguments - -import json -import logging -from asyncio import Lock -from typing import Any, Optional, Union - -from fastapi import APIRouter, Depends, HTTPException -from fastapi import Path as PathParam -from fastapi import Query, Request, status -from servicelib.fastapi.requests_decorators import cancel_on_disconnect - -from ..core.docker_utils import docker_client -from ..core.validation import parse_compose_spec -from ..models.shared_store import SharedStore -from ._dependencies import get_container_restart_lock, get_shared_store - -logger = logging.getLogger(__name__) - - -def _raise_if_container_is_missing( - container_id: str, container_names: list[str] -) -> None: - if container_id not in container_names: - message = f"No container '{container_id}' was started. Started containers '{container_names}'" - logger.warning(message) - raise HTTPException(status.HTTP_404_NOT_FOUND, detail=message) - - -router = APIRouter() - - -@router.get( - "/containers", - responses={ - status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"} - }, -) -@cancel_on_disconnect -async def containers_docker_inspect( - request: Request, - only_status: bool = Query( - False, description="if True only show the status of the container" - ), - shared_store: SharedStore = Depends(get_shared_store), - container_restart_lock: Lock = Depends(get_container_restart_lock), -) -> dict[str, Any]: - """ - Returns entire docker inspect data, if only_state is True, - the status of the containers is returned - """ - assert request # nosec - - def _format_result(container_inspect: dict[str, Any]) -> dict[str, Any]: - if only_status: - container_state = container_inspect.get("State", {}) - - # pending is another fake state use to share more information with the frontend - return { - "Status": container_state.get("Status", "pending"), - "Error": container_state.get("Error", ""), - } - - return container_inspect - - async with container_restart_lock, docker_client() as docker: - container_names = shared_store.container_names - - results = {} - - for container in container_names: - container_instance = await docker.containers.get(container) - container_inspect = await container_instance.show() - results[container] = _format_result(container_inspect) - - return results - - -# Some of the operations and sub-resources on containers are implemented as long-running tasks. -# Handlers for these operations can be found in: -# -# POST /containers : SEE containers_long_running_tasks::create_service_containers_task -# POST /containers:down : SEE containers_long_running_tasks::runs_docker_compose_down_task -# POST /containers/state:restore : SEE containers_long_running_tasks::state_restore_task -# POST /containers/state:save : SEE containers_long_running_tasks::state_save_task -# POST /containers/ports/inputs:pull : SEE containers_long_running_tasks::ports_inputs_pull_task -# POST /containers/ports/outputs:pull : SEE containers_long_running_tasks::ports_outputs_pull_task -# POST /containers/ports/outputs:push : SEE containers_long_running_tasks::ports_outputs_push_task -# - - -@router.get( - "/containers/{id}/logs", - responses={ - status.HTTP_404_NOT_FOUND: { - "description": "Container does not exists", - }, - status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"}, - }, -) -@cancel_on_disconnect -async def get_container_logs( - request: Request, - container_id: str = PathParam(..., alias="id"), - since: int = Query( - 0, - title="Timestamp", - description="Only return logs since this time, as a UNIX timestamp", - ), - until: int = Query( - 0, - title="Timestamp", - description="Only return logs before this time, as a UNIX timestamp", - ), - timestamps: bool = Query( - False, - title="Display timestamps", - description="Enabling this parameter will include timestamps in logs", - ), - shared_store: SharedStore = Depends(get_shared_store), -) -> list[str]: - """Returns the logs of a given container if found""" - assert request # nosec - - _raise_if_container_is_missing(container_id, shared_store.container_names) - - async with docker_client() as docker: - container_instance = await docker.containers.get(container_id) - - args = dict(stdout=True, stderr=True, since=since, until=until) - if timestamps: - args["timestamps"] = True - - container_logs: list[str] = await container_instance.log(**args) - return container_logs - - -@router.get( - "/containers/name", - responses={ - status.HTTP_404_NOT_FOUND: { - "description": "No entrypoint container found or spec is not yet present" - }, - status.HTTP_422_UNPROCESSABLE_ENTITY: { - "description": "Filters could not be parsed" - }, - }, -) -@cancel_on_disconnect -async def get_containers_name( - request: Request, - filters: str = Query( - ..., - description=( - "JSON encoded dictionary. FastAPI does not " - "allow for dict as type in query parameters" - ), - ), - shared_store: SharedStore = Depends(get_shared_store), -) -> Union[str, dict[str, Any]]: - """ - Searches for the container's name given the network - on which the proxy communicates with it. - Supported filters: - network: matches against the exact network name - assigned to the container; `will include` - containers - exclude: matches if contained in the name of the - container; `will exclude` containers - """ - assert request # nosec - - filters_dict: dict[str, str] = json.loads(filters) - if not isinstance(filters_dict, dict): - raise HTTPException( - status.HTTP_422_UNPROCESSABLE_ENTITY, - detail=f"Provided filters, could not parsed {filters_dict}", - ) - network_name: Optional[str] = filters_dict.get("network", None) - exclude: Optional[str] = filters_dict.get("exclude", None) - - stored_compose_content = shared_store.compose_spec - if stored_compose_content is None: - raise HTTPException( - status.HTTP_404_NOT_FOUND, - detail="No spec for docker-compose down was found", - ) - - compose_spec = parse_compose_spec(stored_compose_content) - - container_name = None - - spec_services = compose_spec["services"] - for service in spec_services: - service_content = spec_services[service] - if network_name in service_content.get("networks", {}): - if exclude is not None and exclude in service_content["container_name"]: - # removing this container from results - continue - container_name = service_content["container_name"] - break - - if container_name is None: - raise HTTPException( - status.HTTP_404_NOT_FOUND, - detail=f"No container found for network={network_name}", - ) - - return f"{container_name}" - - -@router.get( - "/containers/{id}", - responses={ - status.HTTP_404_NOT_FOUND: {"description": "Container does not exist"}, - status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"}, - }, -) -@cancel_on_disconnect -async def inspect_container( - request: Request, - container_id: str = PathParam(..., alias="id"), - shared_store: SharedStore = Depends(get_shared_store), -) -> dict[str, Any]: - """Returns information about the container, like docker inspect command""" - assert request # nosec - - _raise_if_container_is_missing(container_id, shared_store.container_names) - - async with docker_client() as docker: - container_instance = await docker.containers.get(container_id) - inspect_result: dict[str, Any] = await container_instance.show() - return inspect_result diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_extension.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_extension.py deleted file mode 100644 index dd52ba1ad56..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_extension.py +++ /dev/null @@ -1,169 +0,0 @@ -import logging - -from aiodocker.networks import DockerNetwork -from fastapi import APIRouter, Depends, FastAPI -from fastapi import Path as PathParam -from fastapi import Request, Response, status -from models_library.services import ServiceOutput -from pydantic.main import BaseModel -from simcore_sdk.node_ports_v2.port_utils import is_file_type - -from ..core.docker_utils import docker_client -from ..modules.mounted_fs import MountedVolumes -from ..modules.outputs import ( - OutputsContext, - disable_outputs_watcher, - enable_outputs_watcher, -) -from ._dependencies import get_application, get_mounted_volumes, get_outputs_context - -logger = logging.getLogger(__name__) - - -class CreateDirsRequestItem(BaseModel): - outputs_labels: dict[str, ServiceOutput] - - -class PatchDirectoryWatcherItem(BaseModel): - is_enabled: bool - - -class _BaseNetworkItem(BaseModel): - network_id: str - - -class AttachContainerToNetworkItem(_BaseNetworkItem): - network_aliases: list[str] - - -class DetachContainerFromNetworkItem(_BaseNetworkItem): - pass - - -# -# HANDLERS ------------------ -# -router = APIRouter() - - -@router.patch( - "/containers/directory-watcher", - summary="Enable/disable directory-watcher event propagation", - response_class=Response, - status_code=status.HTTP_204_NO_CONTENT, -) -async def toggle_directory_watcher( - patch_directory_watcher_item: PatchDirectoryWatcherItem, - app: FastAPI = Depends(get_application), -) -> None: - if patch_directory_watcher_item.is_enabled: - enable_outputs_watcher(app) - else: - disable_outputs_watcher(app) - - -@router.post( - "/containers/ports/outputs/dirs", - summary=( - "Creates the output directories declared by the docker images's labels. " - "It is more convenient to pass the labels from director-v2, " - "since it already has all the machinery to call into director-v0 " - "to retrieve them." - ), - response_class=Response, - status_code=status.HTTP_204_NO_CONTENT, -) -async def create_output_dirs( - request_mode: CreateDirsRequestItem, - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), - outputs_context: OutputsContext = Depends(get_outputs_context), -) -> None: - outputs_path = mounted_volumes.disk_outputs_path - file_type_port_keys = [] - non_file_port_keys = [] - for port_key, service_output in request_mode.outputs_labels.items(): - logger.debug("Parsing output labels, detected: %s", f"{port_key=}") - if is_file_type(service_output.property_type): - dir_to_create = outputs_path / port_key - dir_to_create.mkdir(parents=True, exist_ok=True) - file_type_port_keys.append(port_key) - else: - non_file_port_keys.append(port_key) - - logger.debug("Setting: %s, %s", f"{file_type_port_keys=}", f"{non_file_port_keys=}") - await outputs_context.set_file_type_port_keys(file_type_port_keys) - outputs_context.non_file_type_port_keys = non_file_port_keys - - -@router.post( - "/containers/{id}/networks:attach", - summary="attach container to a network, if not already attached", - response_class=Response, - status_code=status.HTTP_204_NO_CONTENT, -) -async def attach_container_to_network( - request: Request, - item: AttachContainerToNetworkItem, - container_id: str = PathParam(..., alias="id"), -) -> None: - assert request # nosec - - async with docker_client() as docker: - container_instance = await docker.containers.get(container_id) - container_inspect = await container_instance.show() - - attached_network_ids: set[str] = { - x["NetworkID"] - for x in container_inspect["NetworkSettings"]["Networks"].values() - } - - if item.network_id in attached_network_ids: - logger.debug( - "Container %s already attached to network %s", - container_id, - item.network_id, - ) - return - - # NOTE: A docker network is only visible on a docker node when it is - # used by a container - network = DockerNetwork(docker=docker, id_=item.network_id) - await network.connect( - { - "Container": container_id, - "EndpointConfig": {"Aliases": item.network_aliases}, - } - ) - - -@router.post( - "/containers/{id}/networks:detach", - summary="detach container from a network, if not already detached", - response_class=Response, - status_code=status.HTTP_204_NO_CONTENT, -) -async def detach_container_from_network( - item: DetachContainerFromNetworkItem, - container_id: str = PathParam(..., alias="id"), -) -> None: - async with docker_client() as docker: - container_instance = await docker.containers.get(container_id) - container_inspect = await container_instance.show() - - attached_network_ids: set[str] = { - x["NetworkID"] - for x in container_inspect["NetworkSettings"]["Networks"].values() - } - - if item.network_id not in attached_network_ids: - logger.debug( - "Container %s already detached from network %s", - container_id, - item.network_id, - ) - return - - # NOTE: A docker network is only visible on a docker node when it is - # used by a container - network = DockerNetwork(docker=docker, id_=item.network_id) - await network.disconnect({"Container": container_id}) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_long_running_tasks.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_long_running_tasks.py deleted file mode 100644 index ddd82f0fd91..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/containers_long_running_tasks.py +++ /dev/null @@ -1,295 +0,0 @@ -import logging -from textwrap import dedent -from typing import Optional - -from fastapi import APIRouter, Depends, FastAPI, Request, status -from servicelib.fastapi.long_running_tasks.server import ( - TaskAlreadyRunningError, - TaskId, - TasksManager, - get_tasks_manager, - start_task, -) -from servicelib.fastapi.requests_decorators import cancel_on_disconnect - -from ..core.settings import ApplicationSettings -from ..models.schemas.application_health import ApplicationHealth -from ..models.schemas.containers import ContainersCreate -from ..models.shared_store import SharedStore -from ..modules.long_running_tasks import ( - task_containers_restart, - task_create_service_containers, - task_ports_inputs_pull, - task_ports_outputs_pull, - task_ports_outputs_push, - task_restore_state, - task_runs_docker_compose_down, - task_save_state, -) -from ..modules.mounted_fs import MountedVolumes -from ..modules.outputs import OutputsManager -from ._dependencies import ( - get_application, - get_application_health, - get_mounted_volumes, - get_outputs_manager, - get_settings, - get_shared_store, -) - -logger = logging.getLogger(__name__) -router = APIRouter() - - -@router.post( - "/containers", - summary=dedent( - """ - Starts the containers as defined in ContainerCreate by: - - cleaning up resources from previous runs if any - - pulling the needed images - - starting the containers - - Progress may be obtained through URL - Process may be cancelled through URL - """ - ).strip(), - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def create_service_containers_task( # pylint: disable=too-many-arguments - request: Request, - containers_create: ContainersCreate, - tasks_manager: TasksManager = Depends(get_tasks_manager), - settings: ApplicationSettings = Depends(get_settings), - shared_store: SharedStore = Depends(get_shared_store), - app: FastAPI = Depends(get_application), - application_health: ApplicationHealth = Depends(get_application_health), - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_create_service_containers, - unique=True, - settings=settings, - containers_create=containers_create, - shared_store=shared_store, - mounted_volumes=mounted_volumes, - app=app, - application_health=application_health, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers:down", - summary="Remove the previously started containers", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def runs_docker_compose_down_task( - request: Request, - tasks_manager: TasksManager = Depends(get_tasks_manager), - settings: ApplicationSettings = Depends(get_settings), - shared_store: SharedStore = Depends(get_shared_store), - app: FastAPI = Depends(get_application), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_runs_docker_compose_down, - unique=True, - app=app, - shared_store=shared_store, - settings=settings, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers/state:restore", - summary="Restores the state of the dynamic service", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def state_restore_task( - request: Request, - tasks_manager: TasksManager = Depends(get_tasks_manager), - settings: ApplicationSettings = Depends(get_settings), - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), - app: FastAPI = Depends(get_application), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_restore_state, - unique=True, - settings=settings, - mounted_volumes=mounted_volumes, - app=app, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers/state:save", - summary="Stores the state of the dynamic service", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def state_save_task( - request: Request, - tasks_manager: TasksManager = Depends(get_tasks_manager), - app: FastAPI = Depends(get_application), - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), - settings: ApplicationSettings = Depends(get_settings), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_save_state, - unique=True, - settings=settings, - mounted_volumes=mounted_volumes, - app=app, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers/ports/inputs:pull", - summary="Pull input ports data", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def ports_inputs_pull_task( - request: Request, - port_keys: Optional[list[str]] = None, - tasks_manager: TasksManager = Depends(get_tasks_manager), - app: FastAPI = Depends(get_application), - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_ports_inputs_pull, - unique=True, - port_keys=port_keys, - mounted_volumes=mounted_volumes, - app=app, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers/ports/outputs:pull", - summary="Pull output ports data", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def ports_outputs_pull_task( - request: Request, - port_keys: Optional[list[str]] = None, - tasks_manager: TasksManager = Depends(get_tasks_manager), - app: FastAPI = Depends(get_application), - mounted_volumes: MountedVolumes = Depends(get_mounted_volumes), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_ports_outputs_pull, - unique=True, - port_keys=port_keys, - mounted_volumes=mounted_volumes, - app=app, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers/ports/outputs:push", - summary="Push output ports data", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def ports_outputs_push_task( - request: Request, - tasks_manager: TasksManager = Depends(get_tasks_manager), - outputs_manager: OutputsManager = Depends(get_outputs_manager), - app: FastAPI = Depends(get_application), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_ports_outputs_push, - unique=True, - outputs_manager=outputs_manager, - app=app, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member - - -@router.post( - "/containers:restart", - summary="Restarts previously started containers", - status_code=status.HTTP_202_ACCEPTED, - response_model=TaskId, -) -@cancel_on_disconnect -async def containers_restart_task( - request: Request, - tasks_manager: TasksManager = Depends(get_tasks_manager), - app: FastAPI = Depends(get_application), - settings: ApplicationSettings = Depends(get_settings), - shared_store: SharedStore = Depends(get_shared_store), -) -> TaskId: - assert request # nosec - - try: - task_id = start_task( - tasks_manager, - task=task_containers_restart, - unique=True, - app=app, - settings=settings, - shared_store=shared_store, - ) - return task_id - except TaskAlreadyRunningError as e: - return e.managed_task.task_id # pylint: disable=no-member diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/health.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/health.py deleted file mode 100644 index 6497afeba3b..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/health.py +++ /dev/null @@ -1,24 +0,0 @@ -from fastapi import APIRouter, Depends, HTTPException, status - -from ..models.schemas.application_health import ApplicationHealth -from ._dependencies import get_application_health - -router = APIRouter() - - -@router.get( - "/health", - response_model=ApplicationHealth, - responses={ - status.HTTP_503_SERVICE_UNAVAILABLE: {"description": "Service is unhealthy"} - }, -) -async def health_endpoint( - application_health: ApplicationHealth = Depends(get_application_health), -) -> ApplicationHealth: - if not application_health.is_healthy: - raise HTTPException( - status.HTTP_503_SERVICE_UNAVAILABLE, detail=application_health.dict() - ) - - return application_health diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/__init__.py new file mode 100644 index 00000000000..990722e1834 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/__init__.py @@ -0,0 +1,3 @@ +from ._routing import get_main_router + +__all__: tuple[str, ...] = ("get_main_router",) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_dependencies.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_dependencies.py new file mode 100644 index 00000000000..a3b3f808173 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_dependencies.py @@ -0,0 +1,93 @@ +""" Free functions to inject dependencies in routes handlers +""" + +from asyncio import Lock +from typing import Annotated, cast + +from fastapi import Depends, FastAPI, Request +from fastapi.datastructures import State +from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + +from ...core import rabbitmq +from ...core.settings import ApplicationSettings +from ...models.schemas.application_health import ApplicationHealth +from ...models.shared_store import SharedStore +from ...modules.inputs import InputsState +from ...modules.mounted_fs import MountedVolumes +from ...modules.outputs import OutputsContext, OutputsManager +from ...modules.prometheus_metrics import UserServicesMetrics + + +def get_application(request: Request) -> FastAPI: + return cast(FastAPI, request.app) + + +def get_app_state(request: Request) -> State: + return cast(State, request.app.state) + + +def get_application_health( + app_state: Annotated[State, Depends(get_app_state)] +) -> ApplicationHealth: + return cast(ApplicationHealth, app_state.application_health) + + +def get_settings( + app_state: Annotated[State, Depends(get_app_state)] +) -> ApplicationSettings: + return cast(ApplicationSettings, app_state.settings) + + +def get_shared_store( + app_state: Annotated[State, Depends(get_app_state)] +) -> SharedStore: + return cast(SharedStore, app_state.shared_store) + + +def get_mounted_volumes( + app_state: Annotated[State, Depends(get_app_state)] +) -> MountedVolumes: + return cast(MountedVolumes, app_state.mounted_volumes) + + +def get_container_restart_lock( + app_state: Annotated[State, Depends(get_app_state)] +) -> Lock: + return cast(Lock, app_state.container_restart_lock) + + +def get_outputs_manager( + app_state: Annotated[State, Depends(get_app_state)] +) -> OutputsManager: + return cast(OutputsManager, app_state.outputs_manager) + + +def get_outputs_context( + app_state: Annotated[State, Depends(get_app_state)] +) -> OutputsContext: + return cast(OutputsContext, app_state.outputs_context) + + +def get_inputs_state( + app_state: Annotated[State, Depends(get_app_state)] +) -> InputsState: + return cast(InputsState, app_state.inputs_state) + + +def get_user_services_metrics( + app_state: Annotated[State, Depends(get_app_state)] +) -> UserServicesMetrics: + return cast(UserServicesMetrics, app_state.user_service_metrics) + + +def get_rabbitmq_client( + app: Annotated[FastAPI, Depends(get_application)] +) -> RabbitMQClient: + return rabbitmq.get_rabbitmq_client(app) + + +def get_rabbitmq_rpc_server( + app: Annotated[FastAPI, Depends(get_application)] +) -> RabbitMQRPCClient: + return rabbitmq.get_rabbitmq_rpc_server(app) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_routing.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_routing.py new file mode 100644 index 00000000000..bff0bf16244 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/_routing.py @@ -0,0 +1,59 @@ +""" Module to collect, tag and prefix all routes under 'main_router' + +Setup and register all routes here form different modules +""" + +from fastapi import APIRouter, FastAPI + +from ..._meta import API_VTAG +from ...core.settings import ApplicationSettings +from . import ( + containers, + containers_extension, + containers_long_running_tasks, + disk, + health, + prometheus_metrics, + volumes, +) + + +def get_main_router(app: FastAPI) -> APIRouter: + settings: ApplicationSettings = app.state.settings + + main_router = APIRouter() + + main_router.include_router(health.router) + if settings.are_prometheus_metrics_enabled: + main_router.include_router(prometheus_metrics.router) + + main_router.include_router( + containers.router, + tags=["containers"], + prefix=f"/{API_VTAG}", + ) + main_router.include_router( + containers_extension.router, + tags=["containers"], + prefix=f"/{API_VTAG}", + ) + main_router.include_router( + containers_long_running_tasks.router, + tags=["containers"], + prefix=f"/{API_VTAG}", + ) + main_router.include_router( + volumes.router, + tags=["volumes"], + prefix=f"/{API_VTAG}", + ) + main_router.include_router( + disk.router, + tags=["disk"], + prefix=f"/{API_VTAG}", + ) + + return main_router + + +__all__: tuple[str, ...] = ("get_main_router",) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers.py new file mode 100644 index 00000000000..43dc75c75f5 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers.py @@ -0,0 +1,299 @@ +# pylint: disable=too-many-arguments + +import logging +from asyncio import Lock +from typing import Annotated, Any, Final + +from common_library.json_serialization import json_loads +from fastapi import APIRouter, Depends, HTTPException +from fastapi import Path as PathParam +from fastapi import Query, Request, status +from models_library.api_schemas_dynamic_sidecar.containers import ( + ActivityInfo, + ActivityInfoOrNone, +) +from pydantic import TypeAdapter, ValidationError +from servicelib.fastapi.requests_decorators import cancel_on_disconnect + +from ...core.docker_utils import docker_client +from ...core.errors import ( + ContainerExecCommandFailedError, + ContainerExecContainerNotFoundError, + ContainerExecTimeoutError, +) +from ...core.settings import ApplicationSettings +from ...core.validation import ( + ComposeSpecValidation, + get_and_validate_compose_spec, + parse_compose_spec, +) +from ...models.schemas.containers import ContainersComposeSpec +from ...models.shared_store import SharedStore +from ...modules.container_utils import run_command_in_container +from ...modules.mounted_fs import MountedVolumes +from ._dependencies import ( + get_container_restart_lock, + get_mounted_volumes, + get_settings, + get_shared_store, +) + +_INACTIVE_FOR_LONG_TIME: Final[int] = 2**63 - 1 + +_logger = logging.getLogger(__name__) + + +def _raise_if_container_is_missing( + container_id: str, container_names: list[str] +) -> None: + if container_id not in container_names: + message = f"No container '{container_id}' was started. Started containers '{container_names}'" + _logger.warning(message) + raise HTTPException(status.HTTP_404_NOT_FOUND, detail=message) + + +router = APIRouter() + + +@router.post( + "/containers/compose-spec", + status_code=status.HTTP_202_ACCEPTED, + responses={ + status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"} + }, +) +@cancel_on_disconnect +async def store_compose_spec( + request: Request, + settings: Annotated[ApplicationSettings, Depends(get_settings)], + containers_compose_spec: ContainersComposeSpec, + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], +): + """ + Validates and stores the docker compose spec for the user services. + """ + _ = request + + async with shared_store: + compose_spec_validation: ComposeSpecValidation = ( + await get_and_validate_compose_spec( + settings=settings, + compose_file_content=containers_compose_spec.docker_compose_yaml, + mounted_volumes=mounted_volumes, + ) + ) + shared_store.compose_spec = compose_spec_validation.compose_spec + shared_store.container_names = compose_spec_validation.current_container_names + shared_store.original_to_container_names = ( + compose_spec_validation.original_to_current_container_names + ) + + _logger.info("Validated compose-spec:\n%s", f"{shared_store.compose_spec}") + + assert shared_store.compose_spec # nosec + + +@router.get( + "/containers", + responses={ + status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"} + }, +) +@cancel_on_disconnect +async def containers_docker_inspect( + request: Request, + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + container_restart_lock: Annotated[Lock, Depends(get_container_restart_lock)], + only_status: bool = Query( # noqa: FBT001 + default=False, description="if True only show the status of the container" + ), +) -> dict[str, Any]: + """ + Returns entire docker inspect data, if only_state is True, + the status of the containers is returned + """ + _ = request + + def _format_result(container_inspect: dict[str, Any]) -> dict[str, Any]: + if only_status: + container_state = container_inspect.get("State", {}) + + # pending is another fake state use to share more information with the frontend + return { + "Status": container_state.get("Status", "pending"), + "Error": container_state.get("Error", ""), + } + + return container_inspect + + async with container_restart_lock, docker_client() as docker: + container_names = shared_store.container_names + + results = {} + + for container in container_names: + container_instance = await docker.containers.get(container) + container_inspect = await container_instance.show() + results[container] = _format_result(container_inspect) + + return results + + +@router.get( + "/containers/activity", +) +@cancel_on_disconnect +async def get_containers_activity( + request: Request, + settings: Annotated[ApplicationSettings, Depends(get_settings)], + shared_store: Annotated[SharedStore, Depends(get_shared_store)], +) -> ActivityInfoOrNone: + _ = request + inactivity_command = settings.DY_SIDECAR_CALLBACKS_MAPPING.inactivity + if inactivity_command is None: + return None + + container_name = inactivity_command.service + + try: + inactivity_response = await run_command_in_container( + shared_store.original_to_container_names[inactivity_command.service], + command=inactivity_command.command, + timeout=inactivity_command.timeout, + ) + except ( + ContainerExecContainerNotFoundError, + ContainerExecCommandFailedError, + ContainerExecTimeoutError, + ): + _logger.warning( + "Could not run inactivity command '%s' in container '%s'", + inactivity_command.command, + container_name, + exc_info=True, + ) + return ActivityInfo(seconds_inactive=_INACTIVE_FOR_LONG_TIME) + + try: + return TypeAdapter(ActivityInfo).validate_json(inactivity_response) + except ValidationError: + _logger.warning( + "Could not parse command result '%s' as '%s'", + inactivity_response, + ActivityInfo.__name__, + exc_info=True, + ) + + return ActivityInfo(seconds_inactive=_INACTIVE_FOR_LONG_TIME) + + +# Some of the operations and sub-resources on containers are implemented as long-running tasks. +# Handlers for these operations can be found in: +# +# POST /containers : SEE containers_long_running_tasks::create_service_containers_task +# POST /containers:down : SEE containers_long_running_tasks::runs_docker_compose_down_task +# POST /containers/state:restore : SEE containers_long_running_tasks::state_restore_task +# POST /containers/state:save : SEE containers_long_running_tasks::state_save_task +# POST /containers/ports/inputs:pull : SEE containers_long_running_tasks::ports_inputs_pull_task +# POST /containers/ports/outputs:pull : SEE containers_long_running_tasks::ports_outputs_pull_task +# POST /containers/ports/outputs:push : SEE containers_long_running_tasks::ports_outputs_push_task +# + + +@router.get( + "/containers/name", + responses={ + status.HTTP_404_NOT_FOUND: { + "description": "No entrypoint container found or spec is not yet present" + }, + status.HTTP_422_UNPROCESSABLE_ENTITY: { + "description": "Filters could not be parsed" + }, + }, +) +@cancel_on_disconnect +async def get_containers_name( + request: Request, + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + filters: str = Query( + ..., + description=( + "JSON encoded dictionary. FastAPI does not " + "allow for dict as type in query parameters" + ), + ), +) -> str | dict[str, Any]: + """ + Searches for the container's name given the network + on which the proxy communicates with it. + Supported filters: + network: matches against the exact network name + assigned to the container; `will include` + containers + exclude: matches if contained in the name of the + container; `will exclude` containers + """ + _ = request + + filters_dict: dict[str, str] = json_loads(filters) + if not isinstance(filters_dict, dict): + raise HTTPException( + status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Provided filters, could not parsed {filters_dict}", + ) + network_name: str | None = filters_dict.get("network", None) + exclude: str | None = filters_dict.get("exclude", None) + + stored_compose_content = shared_store.compose_spec + if stored_compose_content is None: + raise HTTPException( + status.HTTP_404_NOT_FOUND, + detail="No spec for docker compose down was found", + ) + + compose_spec = parse_compose_spec(stored_compose_content) + + container_name = None + + spec_services = compose_spec["services"] + for service in spec_services: + service_content = spec_services[service] + if network_name in service_content.get("networks", {}): + if exclude is not None and exclude in service_content["container_name"]: + # removing this container from results + continue + container_name = service_content["container_name"] + break + + if container_name is None: + raise HTTPException( + status.HTTP_404_NOT_FOUND, + detail=f"No container found for network={network_name}", + ) + + return f"{container_name}" + + +@router.get( + "/containers/{id}", + responses={ + status.HTTP_404_NOT_FOUND: {"description": "Container does not exist"}, + status.HTTP_500_INTERNAL_SERVER_ERROR: {"description": "Errors in container"}, + }, +) +@cancel_on_disconnect +async def inspect_container( + request: Request, + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + container_id: str = PathParam(..., alias="id"), +) -> dict[str, Any]: + """Returns information about the container, like docker inspect command""" + _ = request + + _raise_if_container_is_missing(container_id, shared_store.container_names) + + async with docker_client() as docker: + container_instance = await docker.containers.get(container_id) + inspect_result: dict[str, Any] = await container_instance.show() + return inspect_result diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_extension.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_extension.py new file mode 100644 index 00000000000..d5cf21b8723 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_extension.py @@ -0,0 +1,178 @@ +import logging +from typing import Annotated + +from aiodocker.networks import DockerNetwork +from fastapi import APIRouter, Depends, FastAPI +from fastapi import Path as PathParam +from fastapi import Request, Response, status +from models_library.services import ServiceOutput +from pydantic.main import BaseModel +from simcore_sdk.node_ports_v2.port_utils import is_file_type + +from ...core.docker_utils import docker_client +from ...modules.inputs import disable_inputs_pulling, enable_inputs_pulling +from ...modules.mounted_fs import MountedVolumes +from ...modules.outputs import ( + OutputsContext, + disable_event_propagation, + enable_event_propagation, +) +from ._dependencies import get_application, get_mounted_volumes, get_outputs_context + +_logger = logging.getLogger(__name__) + + +class CreateDirsRequestItem(BaseModel): + outputs_labels: dict[str, ServiceOutput] + + +class PatchPortsIOItem(BaseModel): + enable_outputs: bool + enable_inputs: bool + + +class _BaseNetworkItem(BaseModel): + network_id: str + + +class AttachContainerToNetworkItem(_BaseNetworkItem): + network_aliases: list[str] + + +class DetachContainerFromNetworkItem(_BaseNetworkItem): + pass + + +# +# HANDLERS ------------------ +# +router = APIRouter() + + +@router.patch( + "/containers/ports/io", + summary="Enable/disable ports i/o", + response_class=Response, + status_code=status.HTTP_204_NO_CONTENT, +) +async def toggle_ports_io( + patch_ports_io_item: PatchPortsIOItem, + app: Annotated[FastAPI, Depends(get_application)], +) -> None: + if patch_ports_io_item.enable_outputs: + await enable_event_propagation(app) + else: + await disable_event_propagation(app) + + if patch_ports_io_item.enable_inputs: + enable_inputs_pulling(app) + else: + disable_inputs_pulling(app) + + +@router.post( + "/containers/ports/outputs/dirs", + summary=( + "Creates the output directories declared by the docker images's labels. " + "It is more convenient to pass the labels from director-v2, " + "since it already has all the machinery to call into director-v0 " + "to retrieve them." + ), + response_class=Response, + status_code=status.HTTP_204_NO_CONTENT, +) +async def create_output_dirs( + request_mode: CreateDirsRequestItem, + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], + outputs_context: Annotated[OutputsContext, Depends(get_outputs_context)], +) -> None: + outputs_path = mounted_volumes.disk_outputs_path + file_type_port_keys = [] + non_file_port_keys = [] + for port_key, service_output in request_mode.outputs_labels.items(): + _logger.debug("Parsing output labels, detected: %s", f"{port_key=}") + if is_file_type(service_output.property_type): + dir_to_create = outputs_path / port_key + dir_to_create.mkdir(parents=True, exist_ok=True) + file_type_port_keys.append(port_key) + else: + non_file_port_keys.append(port_key) + + _logger.debug( + "Setting: %s, %s", f"{file_type_port_keys=}", f"{non_file_port_keys=}" + ) + await outputs_context.set_file_type_port_keys(file_type_port_keys) + outputs_context.non_file_type_port_keys = non_file_port_keys + + +@router.post( + "/containers/{id}/networks:attach", + summary="attach container to a network, if not already attached", + response_class=Response, + status_code=status.HTTP_204_NO_CONTENT, +) +async def attach_container_to_network( + request: Request, + item: AttachContainerToNetworkItem, + container_id: Annotated[str, PathParam(..., alias="id")], +) -> None: + assert request # nosec + + async with docker_client() as docker: + container_instance = await docker.containers.get(container_id) + container_inspect = await container_instance.show() + + attached_network_ids: set[str] = { + x["NetworkID"] + for x in container_inspect["NetworkSettings"]["Networks"].values() + } + + if item.network_id in attached_network_ids: + _logger.debug( + "Container %s already attached to network %s", + container_id, + item.network_id, + ) + return + + # NOTE: A docker network is only visible on a docker node when it is + # used by a container + network = DockerNetwork(docker=docker, id_=item.network_id) + await network.connect( + { + "Container": container_id, + "EndpointConfig": {"Aliases": item.network_aliases}, + } + ) + + +@router.post( + "/containers/{id}/networks:detach", + summary="detach container from a network, if not already detached", + response_class=Response, + status_code=status.HTTP_204_NO_CONTENT, +) +async def detach_container_from_network( + item: DetachContainerFromNetworkItem, + container_id: Annotated[str, PathParam(..., alias="id")], +) -> None: + async with docker_client() as docker: + container_instance = await docker.containers.get(container_id) + container_inspect = await container_instance.show() + + attached_network_ids: set[str] = set( + container_inspect["NetworkSettings"]["Networks"].keys() + ) + + if item.network_id not in attached_network_ids: + _logger.debug( + "Container %s already detached from network %s", + container_id, + item.network_id, + ) + return + + # NOTE: A docker network is only visible on a docker node when it is + # used by a container + network = DockerNetwork(docker=docker, id_=item.network_id) + await network.disconnect({"Container": container_id, "Force": True}) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_long_running_tasks.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_long_running_tasks.py new file mode 100644 index 00000000000..af857013a82 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/containers_long_running_tasks.py @@ -0,0 +1,318 @@ +from textwrap import dedent +from typing import Annotated, cast + +from fastapi import APIRouter, Depends, FastAPI, Request, status +from servicelib.fastapi.long_running_tasks.server import ( + TaskAlreadyRunningError, + TaskId, + TasksManager, + get_tasks_manager, + start_task, +) +from servicelib.fastapi.requests_decorators import cancel_on_disconnect + +from ...core.settings import ApplicationSettings +from ...models.schemas.application_health import ApplicationHealth +from ...models.schemas.containers import ContainersCreate +from ...models.shared_store import SharedStore +from ...modules.inputs import InputsState +from ...modules.long_running_tasks import ( + task_containers_restart, + task_create_service_containers, + task_ports_inputs_pull, + task_ports_outputs_pull, + task_ports_outputs_push, + task_pull_user_servcices_docker_images, + task_restore_state, + task_runs_docker_compose_down, + task_save_state, +) +from ...modules.mounted_fs import MountedVolumes +from ...modules.outputs import OutputsManager +from ._dependencies import ( + get_application, + get_application_health, + get_inputs_state, + get_mounted_volumes, + get_outputs_manager, + get_settings, + get_shared_store, +) + +router = APIRouter() + + +@router.post( + "/containers/images:pull", + summary="Pulls all the docker container images for the user services", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def pull_user_servcices_docker_images( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + app: Annotated[FastAPI, Depends(get_application)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_pull_user_servcices_docker_images, + unique=True, + app=app, + shared_store=shared_store, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers", + summary=dedent( + """ + Starts the containers as defined in ContainerCreate by: + - cleaning up resources from previous runs if any + - starting the containers + + Progress may be obtained through URL + Process may be cancelled through URL + """ + ).strip(), + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def create_service_containers_task( # pylint: disable=too-many-arguments + request: Request, + containers_create: ContainersCreate, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + app: Annotated[FastAPI, Depends(get_application)], + application_health: Annotated[ApplicationHealth, Depends(get_application_health)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_create_service_containers, + unique=True, + settings=settings, + containers_create=containers_create, + shared_store=shared_store, + app=app, + application_health=application_health, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers:down", + summary="Remove the previously started containers", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def runs_docker_compose_down_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], + shared_store: Annotated[SharedStore, Depends(get_shared_store)], + app: Annotated[FastAPI, Depends(get_application)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_runs_docker_compose_down, + unique=True, + app=app, + shared_store=shared_store, + settings=settings, + mounted_volumes=mounted_volumes, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers/state:restore", + summary="Restores the state of the dynamic service", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def state_restore_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], + app: Annotated[FastAPI, Depends(get_application)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_restore_state, + unique=True, + settings=settings, + mounted_volumes=mounted_volumes, + app=app, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers/state:save", + summary="Stores the state of the dynamic service", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def state_save_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + app: Annotated[FastAPI, Depends(get_application)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_save_state, + unique=True, + settings=settings, + mounted_volumes=mounted_volumes, + app=app, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers/ports/inputs:pull", + summary="Pull input ports data", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def ports_inputs_pull_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + app: Annotated[FastAPI, Depends(get_application)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], + inputs_state: Annotated[InputsState, Depends(get_inputs_state)], + port_keys: list[str] | None = None, +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_ports_inputs_pull, + unique=True, + port_keys=port_keys, + mounted_volumes=mounted_volumes, + app=app, + settings=settings, + inputs_pulling_enabled=inputs_state.inputs_pulling_enabled, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers/ports/outputs:pull", + summary="Pull output ports data", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def ports_outputs_pull_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + app: Annotated[FastAPI, Depends(get_application)], + mounted_volumes: Annotated[MountedVolumes, Depends(get_mounted_volumes)], + port_keys: list[str] | None = None, +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_ports_outputs_pull, + unique=True, + port_keys=port_keys, + mounted_volumes=mounted_volumes, + app=app, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers/ports/outputs:push", + summary="Push output ports data", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def ports_outputs_push_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + outputs_manager: Annotated[OutputsManager, Depends(get_outputs_manager)], + app: Annotated[FastAPI, Depends(get_application)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_ports_outputs_push, + unique=True, + outputs_manager=outputs_manager, + app=app, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member + + +@router.post( + "/containers:restart", + summary="Restarts previously started containers", + status_code=status.HTTP_202_ACCEPTED, + response_model=TaskId, +) +@cancel_on_disconnect +async def containers_restart_task( + request: Request, + tasks_manager: Annotated[TasksManager, Depends(get_tasks_manager)], + app: Annotated[FastAPI, Depends(get_application)], + settings: Annotated[ApplicationSettings, Depends(get_settings)], + shared_store: Annotated[SharedStore, Depends(get_shared_store)], +) -> TaskId: + assert request # nosec + + try: + return start_task( + tasks_manager, + task=task_containers_restart, + unique=True, + app=app, + settings=settings, + shared_store=shared_store, + ) + except TaskAlreadyRunningError as e: + return cast(str, e.managed_task.task_id) # type: ignore[attr-defined] # pylint:disable=no-member diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/disk.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/disk.py new file mode 100644 index 00000000000..4ff0cc2dbca --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/disk.py @@ -0,0 +1,14 @@ +from fastapi import APIRouter, status + +from ...services import disk + +router = APIRouter() + + +@router.post( + "/disk/reserved:free", + summary="Frees up reserved disk space", + status_code=status.HTTP_204_NO_CONTENT, +) +async def free_reserved_disk_space() -> None: + disk.remove_reserved_disk_space() diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/health.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/health.py new file mode 100644 index 00000000000..848821715e9 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/health.py @@ -0,0 +1,40 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, HTTPException, status +from models_library.errors import RABBITMQ_CLIENT_UNHEALTHY_MSG +from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + +from ...models.schemas.application_health import ApplicationHealth +from ._dependencies import ( + get_application_health, + get_rabbitmq_client, + get_rabbitmq_rpc_server, +) + +router = APIRouter() + + +@router.get( + "/health", + response_model=ApplicationHealth, + responses={ + status.HTTP_503_SERVICE_UNAVAILABLE: {"description": "Service is unhealthy"} + }, +) +async def health_endpoint( + application_health: Annotated[ApplicationHealth, Depends(get_application_health)], + rabbitmq_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client)], + rabbitmq_rpc_server: Annotated[RabbitMQRPCClient, Depends(get_rabbitmq_rpc_server)], +) -> ApplicationHealth: + if not application_health.is_healthy: + raise HTTPException( + status.HTTP_503_SERVICE_UNAVAILABLE, detail=application_health.model_dump() + ) + + if not rabbitmq_client.healthy or not rabbitmq_rpc_server.healthy: + raise HTTPException( + status.HTTP_503_SERVICE_UNAVAILABLE, detail=RABBITMQ_CLIENT_UNHEALTHY_MSG + ) + + return application_health diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/prometheus_metrics.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/prometheus_metrics.py new file mode 100644 index 00000000000..298f2a84ac0 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/prometheus_metrics.py @@ -0,0 +1,34 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from fastapi.responses import PlainTextResponse + +from ...modules.prometheus_metrics import UserServicesMetrics +from ._dependencies import get_user_services_metrics + +router = APIRouter() + + +@router.get( + "/metrics", + response_class=PlainTextResponse, + responses={ + status.HTTP_500_INTERNAL_SERVER_ERROR: { + "description": "error in recovering data from user service" + } + }, +) +async def metrics_endpoint( + user_services_metrics: Annotated[ + UserServicesMetrics, Depends(get_user_services_metrics) + ], +): + """Exposes metrics form the underlying user service. + + Possible responses: + - HTTP 200 & empty body: user services did not start + - HTTP 200 & prometheus metrics: was able to fetch data from user service + - HTTP 500 & error message: something went wrong when fetching data from user service + """ + metrics_response = user_services_metrics.get_metrics() + return PlainTextResponse(metrics_response.body, status_code=metrics_response.status) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/volumes.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/volumes.py new file mode 100644 index 00000000000..793fbc687e9 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rest/volumes.py @@ -0,0 +1,29 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, FastAPI +from fastapi import Path as PathParam +from fastapi import status +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from pydantic import BaseModel + +from ...services import volumes +from ._dependencies import get_application + +router = APIRouter() + + +class PutVolumeItem(BaseModel): + status: VolumeStatus + + +@router.put( + "/volumes/{id}", + summary="Updates the state of the volume", + status_code=status.HTTP_204_NO_CONTENT, +) +async def put_volume_state( + item: PutVolumeItem, + app: Annotated[FastAPI, Depends(get_application)], + volume_category: Annotated[VolumeCategory, PathParam(..., alias="id")], +) -> None: + await volumes.save_volume_state(app, status=item.status, category=volume_category) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk.py new file mode 100644 index 00000000000..f3bba913ac9 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk.py @@ -0,0 +1,11 @@ +from fastapi import FastAPI +from servicelib.rabbitmq import RPCRouter + +from ...services import disk + +router = RPCRouter() + + +@router.expose() +async def free_reserved_disk_space(_: FastAPI) -> None: + disk.remove_reserved_disk_space() diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk_usage.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk_usage.py new file mode 100644 index 00000000000..a0026e86dd9 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_disk_usage.py @@ -0,0 +1,26 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import DiskUsage +from pydantic import validate_call +from servicelib.rabbitmq import RPCRouter + +from ...modules.system_monitor import get_disk_usage_monitor + +_logger = logging.getLogger(__name__) + +router = RPCRouter() + + +@router.expose() +@validate_call(config={"arbitrary_types_allowed": True}) +async def update_disk_usage(app: FastAPI, *, usage: dict[str, DiskUsage]) -> None: + disk_usage_monitor = get_disk_usage_monitor(app) + + if disk_usage_monitor is None: + _logger.warning( + "Disk usage monitor not initialized, could not update disk usage" + ) + return + + disk_usage_monitor.set_disk_usage_for_path(usage) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_volumes.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_volumes.py new file mode 100644 index 00000000000..6f51d7ff629 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/_volumes.py @@ -0,0 +1,16 @@ +from fastapi import FastAPI +from models_library.sidecar_volumes import VolumeCategory, VolumeStatus +from pydantic import validate_call +from servicelib.rabbitmq import RPCRouter + +from ...services import volumes + +router = RPCRouter() + + +@router.expose() +@validate_call(config={"arbitrary_types_allowed": True}) +async def save_volume_state( + app: FastAPI, *, status: VolumeStatus, category: VolumeCategory +) -> None: + await volumes.save_volume_state(app, status=status, category=category) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/routes.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/routes.py new file mode 100644 index 00000000000..1b020c03c37 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/api/rpc/routes.py @@ -0,0 +1,27 @@ +from fastapi import FastAPI +from models_library.rabbitmq_basic_types import RPCNamespace +from servicelib.rabbitmq import RPCRouter + +from ...core.rabbitmq import get_rabbitmq_rpc_server +from ...core.settings import ApplicationSettings +from . import _disk, _disk_usage, _volumes + +ROUTERS: list[RPCRouter] = [ + _disk_usage.router, + _disk.router, + _volumes.router, +] + + +def setup_rpc_api_routes(app: FastAPI) -> None: + async def startup() -> None: + rpc_server = get_rabbitmq_rpc_server(app) + settings: ApplicationSettings = app.state.settings + + rpc_namespace = RPCNamespace.from_entries( + {"service": "dy-sidecar", "node_id": f"{settings.DY_SIDECAR_NODE_ID}"} + ) + for router in ROUTERS: + await rpc_server.register_router(router, rpc_namespace, app) + + app.add_event_handler("startup", startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/cli.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/cli.py index 76d663383f6..4bbf9e6016e 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/cli.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/cli.py @@ -1,10 +1,10 @@ import asyncio -import json import logging +from collections.abc import AsyncIterator from contextlib import asynccontextmanager -from typing import AsyncIterator import typer +from common_library.json_serialization import json_dumps from fastapi import FastAPI from servicelib.fastapi.long_running_tasks.server import TaskProgress from settings_library.utils_cli import create_settings_command @@ -17,7 +17,11 @@ from .modules.outputs import OutputsManager, setup_outputs log = logging.getLogger(__name__) -main = typer.Typer(name=PROJECT_NAME) +main = typer.Typer( + name=PROJECT_NAME, + pretty_exceptions_enable=False, + pretty_exceptions_show_locals=False, +) main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) @@ -27,7 +31,7 @@ def openapi(): """Prints OpenAPI specifications in json format""" app = create_base_app() - typer.secho(json.dumps(app.openapi(), indent=2)) + typer.secho(json_dumps(app.openapi(), indent=2)) @asynccontextmanager diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/application.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/application.py index 415d9b53d2b..b141e7ca236 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/application.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/application.py @@ -1,33 +1,51 @@ import logging from asyncio import Lock -from typing import Optional +from typing import Any, ClassVar from fastapi import FastAPI -from models_library.basic_types import BootModeEnum from servicelib.async_utils import cancel_sequential_workers from servicelib.fastapi import long_running_tasks from servicelib.fastapi.openapi import ( get_common_oas_options, override_fastapi_openapi_method, ) +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from servicelib.logging_utils import config_all_loggers from simcore_sdk.node_ports_common.exceptions import NodeNotFound from .._meta import API_VERSION, API_VTAG, PROJECT_NAME, SUMMARY, __version__ -from ..api import main_router +from ..api.rest import get_main_router +from ..api.rpc.routes import setup_rpc_api_routes from ..models.schemas.application_health import ApplicationHealth from ..models.shared_store import SharedStore, setup_shared_store from ..modules.attribute_monitor import setup_attribute_monitor +from ..modules.inputs import setup_inputs from ..modules.mounted_fs import MountedVolumes, setup_mounted_fs +from ..modules.notifications import setup_notifications from ..modules.outputs import setup_outputs +from ..modules.prometheus_metrics import setup_prometheus_metrics +from ..modules.resource_tracking import setup_resource_tracking +from ..modules.system_monitor import setup_system_monitor +from ..modules.user_services_preferences import setup_user_services_preferences from .docker_compose_utils import docker_compose_down from .docker_logs import setup_background_log_fetcher from .error_handlers import http_error_handler, node_not_found_error_handler from .errors import BaseDynamicSidecarError +from .external_dependencies import setup_check_dependencies from .rabbitmq import setup_rabbitmq -from .remote_debug import setup as remote_debug_setup +from .reserved_space import setup as setup_reserved_space from .settings import ApplicationSettings -from .utils import login_registry, volumes_fix_permissions +from .utils import volumes_fix_permissions + +_LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR +_NOISY_LOGGERS = ( + "aio_pika", + "aiormq", + "httpcore", +) logger = logging.getLogger(__name__) @@ -58,7 +76,7 @@ class AppState: of the different app.state fields during the app's lifespan """ - _STATES = { + _STATES: ClassVar[dict[str, Any]] = { "settings": ApplicationSettings, "mounted_volumes": MountedVolumes, "shared_store": SharedStore, @@ -72,9 +90,8 @@ def __init__(self, initialized_app: FastAPI): if not isinstance(getattr(initialized_app.state, name, None), type_) ] if errors: - raise ValueError( - f"These app states were not properly initialized: {errors}" - ) + msg = f"These app states were not properly initialized: {errors}" + raise ValueError(msg) self._app = initialized_app @@ -94,7 +111,7 @@ def _shared_store(self) -> SharedStore: return self._app.state.shared_store @property - def compose_spec(self) -> Optional[str]: + def compose_spec(self) -> str | None: return self._shared_store.compose_spec @@ -102,30 +119,44 @@ def setup_logger(settings: ApplicationSettings): # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 logging.basicConfig(level=settings.log_level) logging.root.setLevel(settings.log_level) - config_all_loggers() + config_all_loggers( + log_format_local_dev_enabled=settings.DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.DY_SIDECAR_LOG_FILTER_MAPPING, + tracing_settings=settings.DYNAMIC_SIDECAR_TRACING, + ) def create_base_app() -> FastAPI: + # keep mostly quiet noisy loggers + quiet_level: int = max( + min(logging.root.level + _LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING + ) + for name in _NOISY_LOGGERS: + logging.getLogger(name).setLevel(quiet_level) + # settings settings = ApplicationSettings.create_from_envs() setup_logger(settings) - logger.debug(settings.json(indent=2)) + logger.debug(settings.model_dump_json(indent=2)) # minimal + assert settings.SC_BOOT_MODE # nosec app = FastAPI( debug=settings.SC_BOOT_MODE.is_devel_mode(), title=PROJECT_NAME, description=SUMMARY, version=API_VERSION, openapi_url=f"/api/{API_VTAG}/openapi.json", - **get_common_oas_options(settings.SC_BOOT_MODE.is_devel_mode()), + **get_common_oas_options(is_devel_mode=settings.SC_BOOT_MODE.is_devel_mode()), ) override_fastapi_openapi_method(app) app.state.settings = settings long_running_tasks.server.setup(app) - app.include_router(main_router) + app.include_router(get_main_router(app)) + + setup_reserved_space(app) return app @@ -141,25 +172,42 @@ def create_app(): # MODULES SETUP -------------- + setup_check_dependencies(app) + setup_shared_store(app) app.state.application_health = ApplicationHealth() + application_settings: ApplicationSettings = app.state.settings - if app.state.settings.SC_BOOT_MODE == BootModeEnum.DEBUG: - remote_debug_setup(app) + if application_settings.DYNAMIC_SIDECAR_TRACING: + setup_tracing(app, application_settings.DYNAMIC_SIDECAR_TRACING, PROJECT_NAME) - if app.state.settings.RABBIT_SETTINGS: - setup_rabbitmq(app) - setup_background_log_fetcher(app) + setup_rabbitmq(app) + setup_rpc_api_routes(app) + setup_background_log_fetcher(app) + setup_resource_tracking(app) + setup_notifications(app) - # also sets up mounted_volumes setup_mounted_fs(app) + setup_system_monitor(app) + setup_inputs(app) setup_outputs(app) setup_attribute_monitor(app) + setup_user_services_preferences(app) + + if application_settings.are_prometheus_metrics_enabled: + setup_prometheus_metrics(app) + + if application_settings.DYNAMIC_SIDECAR_TRACING: + initialize_fastapi_app_tracing(app) + # ERROR HANDLERS ------------ - app.add_exception_handler(NodeNotFound, node_not_found_error_handler) - app.add_exception_handler(BaseDynamicSidecarError, http_error_handler) + app.add_exception_handler( + NodeNotFound, + node_not_found_error_handler, # type: ignore[arg-type] + ) + app.add_exception_handler(BaseDynamicSidecarError, http_error_handler) # type: ignore[arg-type] # EVENTS --------------------- @@ -167,10 +215,9 @@ async def _on_startup() -> None: app.state.container_restart_lock = Lock() app_state = AppState(app) - await login_registry(app_state.settings.REGISTRY_SETTINGS) await volumes_fix_permissions(app_state.mounted_volumes) # STARTED - print(APP_STARTED_BANNER_MSG, flush=True) + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 async def _on_shutdown() -> None: app_state = AppState(app) @@ -188,7 +235,7 @@ async def _on_shutdown() -> None: await cancel_sequential_workers() # FINISHED - print(APP_FINISHED_BANNER_MSG, flush=True) + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 app.add_event_handler("startup", _on_startup) app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_compose_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_compose_utils.py index c56f5269dc7..9023c8e5f46 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_compose_utils.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_compose_utils.py @@ -5,24 +5,33 @@ run sequentially by this service """ + import logging -from copy import deepcopy -from typing import Optional +from typing import Final from fastapi import FastAPI +from models_library.progress_bar import ProgressReport from models_library.rabbitmq_messages import ProgressType from servicelib.async_utils import run_sequentially_in_context +from servicelib.fastapi.docker_utils import pull_images +from servicelib.logging_utils import LogLevelInt, LogMessageStr from settings_library.basic_types import LogLevel -from simcore_service_dynamic_sidecar.core.rabbitmq import ( - post_progress_message, - post_sidecar_log_message, -) -from .docker_utils import get_docker_service_images, pull_images +from .docker_utils import get_docker_service_images +from .rabbitmq import post_progress_message, post_sidecar_log_message from .settings import ApplicationSettings -from .utils import CommandResult, async_command, write_to_tmp_file +from .utils import CommandResult, async_command + +_logger = logging.getLogger(__name__) -logger = logging.getLogger(__name__) + +_DOCKER_COMPOSE_CLI_ENV: Final[dict[str, str]] = { + # NOTE: TIMEOUT adjusted because of: + # https://github.com/docker/compose/issues/3927 + # https://github.com/AzuraCast/AzuraCast/issues/3258 + "DOCKER_CLIENT_TIMEOUT": "120", + "COMPOSE_HTTP_TIMEOUT": "120", +} def _docker_compose_options_from_settings(settings: ApplicationSettings) -> str: @@ -32,7 +41,7 @@ def _docker_compose_options_from_settings(settings: ApplicationSettings) -> str: return " ".join(options) -def _increase_timeout(docker_command_timeout: Optional[int]) -> Optional[int]: +def _increase_timeout(docker_command_timeout: int | None) -> int | None: if docker_command_timeout is None: return None # NOTE: ensuring process has enough time to end @@ -40,36 +49,28 @@ def _increase_timeout(docker_command_timeout: Optional[int]) -> Optional[int]: @run_sequentially_in_context() -async def _write_file_and_spawn_process( - yaml_content: str, - *, - command: str, - process_termination_timeout: Optional[int], +async def _compose_cli_command( + yaml_content: str, *, command: str, process_termination_timeout: int | None ) -> CommandResult: - """The command which accepts {file_path} as an argument for string formatting + """ + This calls is intentionally verbose at DEBUG level + """ - ALL docker_compose run sequentially + env_vars = _DOCKER_COMPOSE_CLI_ENV - This calls is intentionally verbose at INFO level - """ - async with write_to_tmp_file(yaml_content) as file_path: - cmd = command.format(file_path=file_path) + _logger.debug("Runs '%s' with ENV=%s...\n%s", command, env_vars, yaml_content) - logger.debug("Runs %s ...\n%s", cmd, yaml_content) + result = await async_command( + command=command, + timeout=process_termination_timeout, + pipe_as_input=yaml_content, + env_vars=env_vars, + ) - result = await async_command( - command=cmd, - timeout=process_termination_timeout, - ) - debug_message = deepcopy(result._asdict()) - logger.debug( - "Finished executing docker-compose command '%s' finished_ok='%s' elapsed='%s'\n%s", - debug_message["command"], - debug_message["success"], - debug_message["elapsed"], - debug_message["message"], - ) - return result + _logger.debug( + "Finished executing docker compose command %s", result.as_log_message() + ) + return result async def docker_compose_config( @@ -84,13 +85,12 @@ async def docker_compose_config( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_convert/) [SEE compose-file](https://docs.docker.com/compose/compose-file/) """ - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, - command='export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose --file "{file_path}" config', + command="docker compose --file - config", process_termination_timeout=timeout, ) - return result # type: ignore + return result async def docker_compose_pull(app: FastAPI, compose_spec_yaml: str) -> None: @@ -100,18 +100,16 @@ async def docker_compose_pull(app: FastAPI, compose_spec_yaml: str) -> None: [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_pull/) """ app_settings: ApplicationSettings = app.state.settings - registry_settings = app_settings.REGISTRY_SETTINGS + registry_settings = app_settings.DY_DEPLOYMENT_REGISTRY_SETTINGS list_of_images = get_docker_service_images(compose_spec_yaml) - async def _progress_cb(current: int, total: int) -> None: + async def _progress_cb(report: ProgressReport) -> None: await post_progress_message( - app, - ProgressType.SERVICE_IMAGES_PULLING, - float(current / (total or 1)), + app, ProgressType.SERVICE_IMAGES_PULLING, report=report ) - async def _log_cb(msg: str) -> None: - await post_sidecar_log_message(app, msg) + async def _log_cb(msg: LogMessageStr, log_level: LogLevelInt) -> None: + await post_sidecar_log_message(app, msg, log_level=log_level) await pull_images(list_of_images, registry_settings, _progress_cb, _log_cb) @@ -124,15 +122,16 @@ async def docker_compose_create( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_up/) """ - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - # building is a security risk hence is disabled via "--no-build" parameter - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, - command=f'export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose {_docker_compose_options_from_settings(settings)} --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file "{{file_path}}" up' - " --no-build --no-start", + command=( + f"docker compose {_docker_compose_options_from_settings(settings)} " + f"--project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file - " + "up --no-start --no-build" # building is a security risk hence is disabled via "--no-build" parameter + ), process_termination_timeout=None, ) - return result # type: ignore + return result async def docker_compose_start( @@ -143,13 +142,16 @@ async def docker_compose_start( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_start/) """ - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, - command=f'export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose {_docker_compose_options_from_settings(settings)} --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file "{{file_path}}" start', + command=( + f"docker compose {_docker_compose_options_from_settings(settings)} " + f"--project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file - " + "start" + ), process_termination_timeout=None, ) - return result # type: ignore + return result async def docker_compose_restart( @@ -161,16 +163,16 @@ async def docker_compose_restart( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_restart/) """ default_compose_restart_timeout = 10 - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, command=( - f'export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose {_docker_compose_options_from_settings(settings)} --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file "{{file_path}}" restart' - f" --timeout {default_compose_restart_timeout}" + f"docker compose {_docker_compose_options_from_settings(settings)} " + f"--project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file - " + f"restart --timeout {default_compose_restart_timeout}" ), process_termination_timeout=_increase_timeout(default_compose_restart_timeout), ) - return result # type: ignore + return result async def docker_compose_down( @@ -186,16 +188,16 @@ async def docker_compose_down( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_down/) """ default_compose_down_timeout = 10 - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, command=( - f'export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose {_docker_compose_options_from_settings(settings)} --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file "{{file_path}}" down' - f" --volumes --remove-orphans --timeout {default_compose_down_timeout}" + f"docker compose {_docker_compose_options_from_settings(settings)} " + f" --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file - " + f"down --volumes --remove-orphans --timeout {default_compose_down_timeout}" ), process_termination_timeout=_increase_timeout(default_compose_down_timeout), ) - return result # type: ignore + return result async def docker_compose_rm( @@ -209,13 +211,13 @@ async def docker_compose_rm( [SEE docker-compose](https://docs.docker.com/engine/reference/commandline/compose_rm) """ - # NOTE: TIMEOUT adjusted because of https://github.com/docker/compose/issues/3927, https://github.com/AzuraCast/AzuraCast/issues/3258 - result = await _write_file_and_spawn_process( + result: CommandResult = await _compose_cli_command( compose_spec_yaml, command=( - f'export DOCKER_CLIENT_TIMEOUT=120 && export COMPOSE_HTTP_TIMEOUT=120 && docker-compose {_docker_compose_options_from_settings(settings)} --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file "{{file_path}}" rm' - " --force -v" + f"docker compose {_docker_compose_options_from_settings(settings)}" + f" --project-name {settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE} --file - " + "rm --force -v" ), process_termination_timeout=None, ) - return result # type: ignore + return result diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_logs.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_logs.py index 1f23fc9b2a9..668ba0db91f 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_logs.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_logs.py @@ -1,17 +1,19 @@ """ - BackgroundLogFetcher: - Creates background task that - reads every line of a container's log and - posts it as a message to rabbit's log channel (logger) +BackgroundLogFetcher: + Creates background task that + reads every line of a container's log and + posts it as a message to rabbit's log channel (logger) """ - import logging from asyncio import CancelledError, Task, create_task +from collections.abc import AsyncGenerator, Callable, Coroutine from contextlib import suppress -from typing import Any, Callable, Coroutine, Optional, cast +from typing import Any, cast +from aiodocker import DockerError from fastapi import FastAPI +from servicelib.logging_utils import guess_message_log_level from ..core.rabbitmq import post_log_message from .docker_utils import docker_client @@ -34,8 +36,19 @@ async def _logs_fetcher_worker( image_name = container_inspect["Config"]["Image"].split("/")[-1] logger.debug("Streaming logs from %s, image %s", container_name, image_name) - async for line in container.log(stdout=True, stderr=True, follow=True): - await dispatch_log(image_name=image_name, message=line) + try: + async for line in cast( + AsyncGenerator[str, None], + container.log(stdout=True, stderr=True, follow=True), + ): + await dispatch_log(image_name=image_name, message=line) + except DockerError as e: + logger.warning( + "Cannot stream logs from %s, image %s, because: %s", + container_name, + image_name, + e, + ) class BackgroundLogFetcher: @@ -45,9 +58,11 @@ def __init__(self, app: FastAPI) -> None: self._log_processor_tasks: dict[str, Task[None]] = {} async def _dispatch_logs(self, image_name: str, message: str) -> None: - # sending the logs to the UI to facilitate the - # user debugging process - await post_log_message(self._app, f"[{image_name}] {message}") + await post_log_message( + self._app, + f"[{image_name}] {message}", + log_level=guess_message_log_level(message), + ) async def start_log_feching(self, container_name: str) -> None: self._log_processor_tasks[container_name] = create_task( @@ -62,7 +77,7 @@ async def start_log_feching(self, container_name: str) -> None: async def stop_log_fetching(self, container_name: str) -> None: logger.debug("Stopping logs fetching from container '%s'", container_name) - task: Optional[Task] = self._log_processor_tasks.pop(container_name, None) + task: Task | None = self._log_processor_tasks.pop(container_name, None) if task is None: logger.info( "No log_processor task found for container: %s ", container_name @@ -70,7 +85,9 @@ async def stop_log_fetching(self, container_name: str) -> None: return task.cancel() - with suppress(CancelledError): + + # NOTE: sometime the docker engine causes a TimeoutError after the task is cancelled + with suppress(CancelledError, TimeoutError): await task logger.debug("Logs fetching stopped for container '%s'", container_name) @@ -80,7 +97,7 @@ async def stop_fetcher(self) -> None: await self.stop_log_fetching(container_name) -def _get_background_log_fetcher(app: FastAPI) -> Optional[BackgroundLogFetcher]: +def _get_background_log_fetcher(app: FastAPI) -> BackgroundLogFetcher | None: if hasattr(app.state, "background_log_fetcher"): return cast(BackgroundLogFetcher, app.state.background_log_fetcher) return None diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_utils.py index 3f874e1d551..eee04ad6be3 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_utils.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/docker_utils.py @@ -1,20 +1,29 @@ -import asyncio import logging +from collections.abc import AsyncGenerator, Iterable from contextlib import asynccontextmanager -from enum import Enum -from typing import Any, AsyncGenerator, Awaitable, Callable, Final, Optional, TypedDict +from typing import Any import aiodocker import yaml +from aiodocker.containers import DockerContainer from aiodocker.utils import clean_filters -from models_library.basic_regex import DOCKER_GENERIC_TAG_KEY_RE -from models_library.services import RunID +from models_library.docker import DockerGenericTag +from models_library.generated_models.docker_rest_api import ContainerState +from models_library.generated_models.docker_rest_api import Status2 as ContainerStatus +from models_library.services import ServiceRunID from pydantic import PositiveInt -from settings_library.docker_registry import RegistrySettings +from servicelib.utils import logged_gather +from starlette import status as http_status from .errors import UnexpectedDockerError, VolumeNotFoundError -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + + +_ACCEPTED_CONTAINER_STATUSES: set[str] = { + ContainerStatus.created, + ContainerStatus.running, +} @asynccontextmanager @@ -23,219 +32,106 @@ async def docker_client() -> AsyncGenerator[aiodocker.Docker, None]: try: yield docker except aiodocker.exceptions.DockerError as error: - logger.debug("An unexpected Docker error occurred", exc_info=True) + _logger.debug("An unexpected Docker error occurred", exc_info=True) raise UnexpectedDockerError( - message=error.message, status=error.status + message=error.message, status_code=error.status ) from error finally: await docker.close() -async def get_volume_by_label(label: str, run_id: RunID) -> dict[str, Any]: +async def get_volume_by_label( + label: str, service_run_id: ServiceRunID +) -> dict[str, Any]: async with docker_client() as docker: - filters = {"label": [f"source={label}", f"run_id={run_id}"]} + filters = {"label": [f"source={label}", f"run_id={service_run_id}"]} params = {"filters": clean_filters(filters)} - data = await docker._query_json( # pylint: disable=protected-access + data = await docker._query_json( # pylint: disable=protected-access # noqa: SLF001 "volumes", method="GET", params=params ) volumes = data["Volumes"] - logger.debug( # pylint: disable=logging-fstring-interpolation - f"volumes query for {label=} {volumes=}" - ) + _logger.debug("volumes query for label=%s volumes=%s", label, volumes) if len(volumes) != 1: - raise VolumeNotFoundError(label, run_id, volumes) - volume_details = volumes[0] - return volume_details # type: ignore + raise VolumeNotFoundError( + volume_count=len(volumes), + source_label=label, + service_run_id=service_run_id, + volume_names=" ".join(v.get("Name", "UNKNOWN") for v in volumes), + status_code=http_status.HTTP_404_NOT_FOUND, + ) + volume_details: dict[str, Any] = volumes[0] + return volume_details -async def get_running_containers_count_from_names( - container_names: list[str], -) -> PositiveInt: - if len(container_names) == 0: - return 0 +async def _get_container( + docker: aiodocker.Docker, container_name: str +) -> DockerContainer | None: + try: + return await docker.containers.get(container_name) + except aiodocker.DockerError as e: + if e.status == http_status.HTTP_404_NOT_FOUND: + return None + raise - async with docker_client() as docker: - filters = clean_filters({"name": container_names}) - containers = await docker.containers.list(all=True, filters=filters) - return len(containers) +async def _get_containers_inspect_from_names( + container_names: list[str], +) -> dict[str, DockerContainer | None]: + # NOTE: returned objects have their associated Docker client session closed + if len(container_names) == 0: + return {} -def get_docker_service_images(compose_spec_yaml: str) -> set[str]: - docker_compose_spec = yaml.safe_load(compose_spec_yaml) - return { - service_data["image"] - for service_data in docker_compose_spec["services"].values() + containers_inspect: dict[str, DockerContainer | None] = { + x: None for x in container_names } - -ProgressCB = Callable[[int, int], Awaitable[None]] -LogCB = Callable[[str], Awaitable[None]] - - -async def pull_images( - images: set[str], - registry_settings: RegistrySettings, - progress_cb: ProgressCB, - log_cb: LogCB, -) -> None: - images_pulling_data: dict[str, dict[str, tuple[int, int]]] = {} async with docker_client() as docker: - await asyncio.gather( + docker_containers: list[DockerContainer | None] = await logged_gather( *( - _pull_image_with_progress( - docker, - registry_settings, - image, - images_pulling_data, - progress_cb, - log_cb, - ) - for image in images + _get_container(docker, container_name) + for container_name in container_names ) ) + for docker_container in docker_containers: + if docker_container is None: + continue + stripped_name = docker_container["Name"].lstrip("/") + if stripped_name in containers_inspect: + containers_inspect[stripped_name] = docker_container -# -# HELPERS -# -_DOWNLOAD_RATIO: Final[float] = 0.75 - -LayerId = str -_LayersInfoDict = dict[LayerId, tuple[int, int]] -ImageName = str -_ImagesInfoDict = dict[ImageName, _LayersInfoDict] + return containers_inspect -class _ProgressDetailDict(TypedDict, total=True): - current: int - total: int +async def get_container_states( + container_names: list[str], +) -> dict[str, ContainerState | None]: + """if a container is not found it's status is None""" + containers_inspect = await _get_containers_inspect_from_names(container_names) + return { + k: None if v is None else ContainerState(**v["State"]) + for k, v in containers_inspect.items() + } -class _DockerProgressDict(TypedDict, total=False): - status: str - progressDetail: _ProgressDetailDict - progress: str - id: str +def are_all_containers_in_expected_states( + states: Iterable[ContainerState | None], +) -> bool: + return all( + s is not None and s.status in _ACCEPTED_CONTAINER_STATUSES for s in states + ) -class _TargetPullStatus(str, Enum): - # They contain 'progressDetail' - DOWNLOADING = "Downloading" - DOWNLOAD_COMPLETE = "Download complete" - EXTRACTING = "Extracting" - PULL_COMPLETE = "Pull complete" +async def get_containers_count_from_names( + container_names: list[str], +) -> PositiveInt: + # this one could handle the error + return len(await _get_containers_inspect_from_names(container_names)) -def _parse_docker_pull_progress( - docker_pull_progress: _DockerProgressDict, image_pulling_data: _LayersInfoDict -) -> bool: - # Example of docker_pull_progress with status in _TargetPullStatus - # {'status': 'Pulling fs layer', 'progressDetail': {}, 'id': '6e3729cf69e0'} - # {'status': 'Downloading', 'progressDetail': {'current': 309633, 'total': 30428708}, 'progress': '[> ] 309.6kB/30.43MB', 'id': '6e3729cf69e0'} - # - # Examples of docker_pull_progress with status NOT in _TargetPullStatus - # {'status': 'Digest: sha256:27cb6e6ccef575a4698b66f5de06c7ecd61589132d5a91d098f7f3f9285415a9'} - # {'status': 'Status: Downloaded newer image for ubuntu:latest'} - - status: Optional[str] = docker_pull_progress.get("status") - - if status in list(_TargetPullStatus): - assert "id" in docker_pull_progress # nosec - assert "progressDetail" in docker_pull_progress # nosec - - layer_id: LayerId = docker_pull_progress["id"] - # inits (read/write order is not guaranteed) - image_pulling_data.setdefault(layer_id, (0, 0)) - - if status == _TargetPullStatus.DOWNLOADING: - # writes - image_pulling_data[layer_id] = ( - round( - _DOWNLOAD_RATIO * docker_pull_progress["progressDetail"]["current"] - ), - docker_pull_progress["progressDetail"]["total"], - ) - elif status == _TargetPullStatus.DOWNLOAD_COMPLETE: - # reads - _, layer_total_size = image_pulling_data[layer_id] - # writes - image_pulling_data[layer_id] = ( - round(_DOWNLOAD_RATIO * layer_total_size), - layer_total_size, - ) - elif status == _TargetPullStatus.EXTRACTING: - # reads - _, layer_total_size = image_pulling_data[layer_id] - - # writes - image_pulling_data[layer_id] = ( - round( - _DOWNLOAD_RATIO * layer_total_size - + (1 - _DOWNLOAD_RATIO) - * docker_pull_progress["progressDetail"]["current"] - ), - layer_total_size, - ) - elif status == _TargetPullStatus.PULL_COMPLETE: - # reads - _, layer_total_size = image_pulling_data[layer_id] - # writes - image_pulling_data[layer_id] = ( - layer_total_size, - layer_total_size, - ) - return True - - return False # no pull progress logged - - -def _compute_sizes(all_images: _ImagesInfoDict) -> tuple[int, int]: - total_current_size = total_total_size = 0 - for layer in all_images.values(): - for current_size, total_size in layer.values(): - total_current_size += current_size - total_total_size += total_size - return (total_current_size, total_total_size) - - -async def _pull_image_with_progress( - client: aiodocker.Docker, - registry_settings: RegistrySettings, - image_name: str, - all_image_pulling_data: dict[str, Any], - progress_cb: ProgressCB, - log_cb: LogCB, -) -> None: - # NOTE: if there is no registry_host, then there is no auth allowed, - # which is typical for dockerhub or local images - # NOTE: progress is such that downloading is taking 3/4 of the time, - # Extracting 1/4 - match = DOCKER_GENERIC_TAG_KEY_RE.match(image_name) - registry_host = "" - if match: - registry_host = match.group("registry_host") - else: - logger.error( - "%s does not match typical docker image pattern, please check! Image pulling will still be attempted but may fail.", - f"{image_name=}", - ) - - shorter_image_name: Final[str] = image_name.rsplit("/", maxsplit=1)[-1] - all_image_pulling_data[image_name] = {} - async for pull_progress in client.images.pull( - image_name, - stream=True, - auth={ - "username": registry_settings.REGISTRY_USER, - "password": registry_settings.REGISTRY_PW.get_secret_value(), - } - if registry_host - else None, - ): - if _parse_docker_pull_progress( - pull_progress, all_image_pulling_data[image_name] - ): - total_current, total_total = _compute_sizes(all_image_pulling_data) - await progress_cb(total_current, total_total) - - await log_cb(f"pulling {shorter_image_name}: {pull_progress}...") +def get_docker_service_images(compose_spec_yaml: str) -> set[DockerGenericTag]: + docker_compose_spec = yaml.safe_load(compose_spec_yaml) + return { + DockerGenericTag(service_data["image"]) + for service_data in docker_compose_spec["services"].values() + } diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/error_handlers.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/error_handlers.py index 8fbba2fb1c8..cbd3e4dbe52 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/error_handlers.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/error_handlers.py @@ -12,18 +12,19 @@ async def http_error_handler( ) -> JSONResponse: return JSONResponse( content=jsonable_encoder({"errors": [exception.message]}), - status_code=exception.status, + status_code=exception.status_code, # type:ignore[attr-defined] ) async def node_not_found_error_handler( _: Request, exception: NodeNotFound ) -> JSONResponse: - error_fields = dict( - code="dynamic_sidecar.nodeports.node_not_found", - message=f"{exception}", - node_uuid=f"{exception.node_uuid}", - ) + error_fields = { + "code": "dynamic_sidecar.nodeports.node_not_found", + "message": f"{exception}", + "node_uuid": f"{exception.node_uuid}", + } return JSONResponse( - content=jsonable_encoder(error_fields), status_code=status.HTTP_404_NOT_FOUND + content=jsonable_encoder(error_fields), + status_code=status.HTTP_404_NOT_FOUND, ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/errors.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/errors.py index 6d291ecdeb9..fc67b7072f8 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/errors.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/errors.py @@ -1,30 +1,31 @@ -from typing import Any +from common_library.errors_classes import OsparcErrorMixin -from models_library.services import RunID - -class BaseDynamicSidecarError(Exception): +class BaseDynamicSidecarError(OsparcErrorMixin, Exception): """Used as base for all exceptions""" - def __init__(self, nessage: str, status: int = 500) -> None: - self.message: str = nessage - self.status: int = status - super().__init__(nessage) - class VolumeNotFoundError(BaseDynamicSidecarError): - def __init__( - self, source_label: str, run_id: RunID, volumes: list[dict[str, Any]] - ) -> None: - super().__init__( - f"Expected 1 got {len(volumes)} volumes labels with {source_label=}, {run_id=!s}: " - f"Found {' '.join(v.get('Name', 'UNKNOWN') for v in volumes)}", - status=404, - ) + msg_template = ( + "Expected 1 got {volume_count} volumes labels with " + "source_label={source_label}, service_run_id={service_run_id}: Found {volume_names}" + ) class UnexpectedDockerError(BaseDynamicSidecarError): - def __init__(self, message: str, status: int) -> None: - super().__init__( - f"An unexpected Docker error occurred {status=}, {message=}", status=status - ) + msg_template = "An unexpected Docker error occurred status_code={status_code}, message={message}" + + +class ContainerExecContainerNotFoundError(BaseDynamicSidecarError): + msg_template = "Container '{container_name}' was not found" + + +class ContainerExecTimeoutError(BaseDynamicSidecarError): + msg_template = "Timed out after {timeout} while executing: '{command}'" + + +class ContainerExecCommandFailedError(BaseDynamicSidecarError): + msg_template = ( + "Command '{command}' exited with code '{exit_code}'" + "and output: '{command_result}'" + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/external_dependencies.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/external_dependencies.py new file mode 100644 index 00000000000..d10d1ce58c5 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/external_dependencies.py @@ -0,0 +1,37 @@ +from common_library.errors_classes import OsparcErrorMixin +from fastapi import FastAPI +from servicelib.utils import logged_gather + +from ..modules.database import wait_for_database_liveness +from .rabbitmq import wait_for_rabbitmq_liveness +from .registry import wait_for_registries_liveness +from .storage import wait_for_storage_liveness + + +class CouldNotReachExternalDependenciesError(OsparcErrorMixin, Exception): + msg_template: str = ( + "Could not start because the following external dependencies failed: {failed}" + ) + + +def setup_check_dependencies(app: FastAPI) -> None: + # NOTE: in most situations these checks would live + # inside each individual module's setup function + # The dynamic-sidecar is created and expected to + # start rapidly, for this reason they are run in + # parallel. + async def on_startup() -> None: + liveliness_results = await logged_gather( + *[ + wait_for_database_liveness(app), + wait_for_rabbitmq_liveness(app), + wait_for_registries_liveness(app), + wait_for_storage_liveness(app), + ], + reraise=False, + ) + failed = [f"{x}" for x in liveliness_results if isinstance(x, Exception)] + if failed: + raise CouldNotReachExternalDependenciesError(failed=failed) + + app.add_event_handler("startup", on_startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/rabbitmq.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/rabbitmq.py index 119b457a310..88c77c84997 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/rabbitmq.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/rabbitmq.py @@ -3,58 +3,78 @@ from typing import cast from fastapi import FastAPI +from models_library.progress_bar import ProgressReport from models_library.rabbitmq_messages import ( + DynamicServiceRunningMessage, EventRabbitMessage, LoggerRabbitMessage, ProgressRabbitMessageNode, ProgressType, RabbitEventMessageType, RabbitMessageBase, + RabbitResourceTrackingMessages, ) -from pydantic import NonNegativeFloat -from servicelib.logging_utils import log_catch, log_context -from servicelib.rabbitmq import RabbitMQClient -from servicelib.rabbitmq_utils import wait_till_rabbitmq_responsive +from servicelib.logging_utils import LogLevelInt, LogMessageStr, log_catch, log_context +from servicelib.rabbitmq import RabbitMQClient, is_rabbitmq_responsive +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient +from settings_library.rabbit import RabbitSettings from ..core.settings import ApplicationSettings +from ..modules.service_liveness import wait_for_service_liveness -log = logging.getLogger(__file__) +_logger = logging.getLogger(__file__) async def _post_rabbit_message(app: FastAPI, message: RabbitMessageBase) -> None: - with log_catch(log, reraise=False): - await get_rabbitmq_client(app).publish(message.channel_name, message.json()) + with log_catch(_logger, reraise=False): + await get_rabbitmq_client(app).publish(message.channel_name, message) -async def post_log_message(app: FastAPI, logs: str) -> None: +async def post_resource_tracking_message( + app: FastAPI, message: RabbitResourceTrackingMessages +): + await _post_rabbit_message(app, message) + + +async def post_dynamic_service_running_message( + app: FastAPI, message: DynamicServiceRunningMessage +): + await _post_rabbit_message(app, message) + + +async def post_log_message( + app: FastAPI, log: LogMessageStr, *, log_level: LogLevelInt +) -> None: app_settings: ApplicationSettings = app.state.settings - message = LoggerRabbitMessage( + message = LoggerRabbitMessage.model_construct( node_id=app_settings.DY_SIDECAR_NODE_ID, user_id=app_settings.DY_SIDECAR_USER_ID, project_id=app_settings.DY_SIDECAR_PROJECT_ID, - messages=[logs], - log_level=logging.INFO, + messages=[log], + log_level=log_level, ) await _post_rabbit_message(app, message) async def post_progress_message( - app: FastAPI, progress_type: ProgressType, progress_value: NonNegativeFloat + app: FastAPI, progress_type: ProgressType, report: ProgressReport ) -> None: app_settings: ApplicationSettings = app.state.settings - message = ProgressRabbitMessageNode( + message = ProgressRabbitMessageNode.model_construct( node_id=app_settings.DY_SIDECAR_NODE_ID, user_id=app_settings.DY_SIDECAR_USER_ID, project_id=app_settings.DY_SIDECAR_PROJECT_ID, progress_type=progress_type, - progress=progress_value, + report=report, ) await _post_rabbit_message(app, message) -async def post_sidecar_log_message(app: FastAPI, logs: str) -> None: - await post_log_message(app, f"[sidecar] {logs}") +async def post_sidecar_log_message( + app: FastAPI, log: LogMessageStr, *, log_level: LogLevelInt +) -> None: + await post_log_message(app, f"[sidecar] {log}", log_level=log_level) async def post_event_reload_iframe(app: FastAPI) -> None: @@ -68,34 +88,61 @@ async def post_event_reload_iframe(app: FastAPI) -> None: await _post_rabbit_message(app, message) +async def wait_for_rabbitmq_liveness(app: FastAPI) -> None: + app_settings: ApplicationSettings = app.state.settings + rabbit_settings: RabbitSettings = app_settings.RABBIT_SETTINGS + + await wait_for_service_liveness( + is_rabbitmq_responsive, + service_name="RabbitMQ", + endpoint=rabbit_settings.dsn, + url=rabbit_settings.dsn, + ) + + +@lru_cache(maxsize=2) +def _is_rabbitmq_initialized(app: FastAPI, state_client_name: str) -> bool: + return hasattr(app.state, state_client_name) + + +def _raise_if_not_initialized(app: FastAPI, state_client_name: str) -> None: + if not _is_rabbitmq_initialized(app, state_client_name): + msg = "RabbitMQ client is not available. Please check the configuration." + raise RuntimeError(msg) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + _raise_if_not_initialized(app, "rabbitmq_client") + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + _raise_if_not_initialized(app, "rabbitmq_rpc_server") + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + def setup_rabbitmq(app: FastAPI) -> None: async def on_startup() -> None: app_settings: ApplicationSettings = app.state.settings assert app_settings.RABBIT_SETTINGS # nosec settings = app_settings.RABBIT_SETTINGS - await wait_till_rabbitmq_responsive(settings.dsn) - with log_context(log, logging.INFO, msg="Create RabbitMQClient"): + + with log_context(_logger, logging.INFO, msg="Create RabbitMQClient"): app.state.rabbitmq_client = RabbitMQClient( client_name=f"dynamic-sidecar_{app_settings.DY_SIDECAR_NODE_ID}", settings=settings, ) + with log_context(_logger, logging.INFO, msg="Create RabbitMQRPCClient"): + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name=f"dynamic-sidecar_rpc_{app_settings.DY_SIDECAR_NODE_ID}", + settings=settings, + ) async def on_shutdown() -> None: if app.state.rabbitmq_client: await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() app.add_event_handler("startup", on_startup) app.add_event_handler("shutdown", on_shutdown) - - -@lru_cache(maxsize=1) -def _is_rabbitmq_initialized(app: FastAPI) -> bool: - return hasattr(app.state, "rabbitmq_client") - - -def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: - if not _is_rabbitmq_initialized(app): - raise RuntimeError( - "RabbitMQ client is not available. Please check the configuration." - ) - return cast(RabbitMQClient, app.state.rabbitmq_client) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/registry.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/registry.py new file mode 100644 index 00000000000..71b50a3f548 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/registry.py @@ -0,0 +1,62 @@ +import logging +from pathlib import Path +from typing import Final + +from fastapi import FastAPI +from pydantic import NonNegativeInt +from settings_library.docker_registry import RegistrySettings + +from ..modules.service_liveness import wait_for_service_liveness +from .settings import ApplicationSettings +from .utils import CommandResult, async_command + +_logger = logging.getLogger(__name__) + +DOCKER_CONFIG_JSON_PATH: Final[Path] = Path.home() / ".docker" / "config.json" +DOCKER_LOGIN_TIMEOUT: Final[NonNegativeInt] = 5 + + +class _RegistryNotReachableError(Exception): + pass + + +def _get_login_url(registry_settings: RegistrySettings) -> str: + return registry_settings.resolved_registry_url + + +async def _login_registry(registry_settings: RegistrySettings) -> None: + command_result: CommandResult = await async_command( + ( + f"echo '{registry_settings.REGISTRY_PW.get_secret_value()}' | " + f"docker login {_get_login_url(registry_settings)} " + f"--username '{registry_settings.REGISTRY_USER}' " + "--password-stdin" + ), + timeout=DOCKER_LOGIN_TIMEOUT, + ) + if "Login Succeeded" not in command_result.message: + _logger.error("Response: %s", command_result) + error_message = f"Could not contact registry with the following credentials {registry_settings}" + raise _RegistryNotReachableError(error_message) + + _logger.debug("Logged into registry: %s", registry_settings) + + +async def wait_for_registries_liveness(app: FastAPI) -> None: + # NOTE: also logins to the registries when the health check is enforced + settings: ApplicationSettings = app.state.settings + + await wait_for_service_liveness( + _login_registry, + service_name="Internal Registry", + endpoint=_get_login_url(settings.DY_DEPLOYMENT_REGISTRY_SETTINGS), + registry_settings=settings.DY_DEPLOYMENT_REGISTRY_SETTINGS, + ) + + if settings.DY_DOCKER_HUB_REGISTRY_SETTINGS: + await wait_for_service_liveness( + _login_registry, + service_name="DockerHub Registry", + endpoint=_get_login_url(settings.DY_DOCKER_HUB_REGISTRY_SETTINGS), + registry_settings=settings.DY_DOCKER_HUB_REGISTRY_SETTINGS, + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/remote_debug.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/remote_debug.py deleted file mode 100644 index 27b87397f04..00000000000 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/remote_debug.py +++ /dev/null @@ -1,42 +0,0 @@ -""" Setup remote debugger with Python Tools for Visual Studio (PTVSD) - -""" -import logging - -from fastapi import FastAPI - -from .settings import ApplicationSettings - -logger = logging.getLogger(__name__) - - -def setup(app: FastAPI) -> None: - settings: ApplicationSettings = app.state.settings - - def on_startup() -> None: - try: - logger.debug("Enabling attach ptvsd ...") - # - # SEE https://github.com/microsoft/ptvsd#enabling-debugging - # - # pylint: disable=import-outside-toplevel - import ptvsd - - ptvsd.enable_attach( - address=( - settings.DYNAMIC_SIDECAR_HOST, - settings.DYNAMIC_SIDECAR_REMOTE_DEBUG_PORT, - ) - ) - - except ImportError as err: - raise ValueError( - "Cannot enable remote debugging. Please install ptvsd first" - ) from err - - logger.info( - "Remote debugging enabled: listening port %s", - settings.DYNAMIC_SIDECAR_REMOTE_DEBUG_PORT, - ) - - app.add_event_handler("startup", on_startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/reserved_space.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/reserved_space.py new file mode 100644 index 00000000000..e43946f5375 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/reserved_space.py @@ -0,0 +1,40 @@ +import os +from pathlib import Path +from typing import Final + +from fastapi import FastAPI +from pydantic import ByteSize, TypeAdapter + +from .settings import ApplicationSettings + +_RESERVED_DISK_SPACE_NAME: Final[Path] = Path( + "/tmp/reserved_disk_space" # nosec # noqa: S108 +) +_DEFAULT_CHUNK_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python("8k") + + +def _write_random_binary_file( + file_path: Path, total_size: ByteSize, *, chunk_size: ByteSize = _DEFAULT_CHUNK_SIZE +): + with Path.open(file_path, "wb") as file: + bytes_written: int = 0 + while bytes_written < total_size: + # Calculate the size of the current chunk + remaining_size = total_size - bytes_written + current_chunk_size = min(chunk_size, remaining_size) + + binary_data = os.urandom(current_chunk_size) + file.write(binary_data) + bytes_written += current_chunk_size + + +def remove_reserved_disk_space() -> None: + _RESERVED_DISK_SPACE_NAME.unlink(missing_ok=True) + + +def setup(app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + + _write_random_binary_file( + _RESERVED_DISK_SPACE_NAME, settings.DYNAMIC_SIDECAR_RESERVED_SPACE_SIZE + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/settings.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/settings.py index f538680353f..4187f08b02c 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/settings.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/settings.py @@ -1,28 +1,60 @@ import warnings +from datetime import timedelta from functools import lru_cache from pathlib import Path -from typing import Optional, cast +from typing import cast -from models_library.basic_types import BootModeEnum, PortInt +from common_library.pydantic_validators import validate_numeric_string_as_timedelta +from models_library.basic_types import PortInt +from models_library.callbacks_mapping import CallbacksMapping +from models_library.products import ProductName from models_library.projects import ProjectID -from models_library.projects_nodes import NodeID -from models_library.services import RunID +from models_library.projects_nodes_io import NodeID +from models_library.service_settings_labels import LegacyState +from models_library.services import DynamicServiceKey, ServiceRunID, ServiceVersion from models_library.users import UserID -from pydantic import Field, PositiveInt, validator -from settings_library.base import BaseCustomSettings +from pydantic import ( + AliasChoices, + ByteSize, + Field, + PositiveInt, + TypeAdapter, + field_validator, +) +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.aws_s3_cli import AwsS3CliSettings from settings_library.docker_registry import RegistrySettings +from settings_library.node_ports import StorageAuthSettings +from settings_library.postgres import PostgresSettings from settings_library.r_clone import RCloneSettings from settings_library.rabbit import RabbitSettings +from settings_library.resource_usage_tracker import ( + DEFAULT_RESOURCE_USAGE_HEARTBEAT_INTERVAL, +) +from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings -class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): +class ResourceTrackingSettings(BaseApplicationSettings): + RESOURCE_TRACKING_HEARTBEAT_INTERVAL: timedelta = Field( + default=DEFAULT_RESOURCE_USAGE_HEARTBEAT_INTERVAL, + description="each time the status of the service is propagated", + ) - SC_BOOT_MODE: BootModeEnum = Field( - ..., - description="boot mode helps determine if in development mode or normal operation", + _validate_resource_tracking_heartbeat_interval = ( + validate_numeric_string_as_timedelta("RESOURCE_TRACKING_HEARTBEAT_INTERVAL") + ) + + +class SystemMonitorSettings(BaseApplicationSettings): + DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE: bool = Field( + default=False, description="enabled/disabled disk usage monitoring" ) + +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR: Path = Field( ..., description="Base directory where dynamic-sidecar stores creates " @@ -40,14 +72,13 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): # LOGGING LOG_LEVEL: str = Field( - default="WARNING", env=["DYNAMIC_SIDECAR_LOG_LEVEL", "LOG_LEVEL", "LOGLEVEL"] + default="WARNING", + validation_alias=AliasChoices( + "DYNAMIC_SIDECAR_LOG_LEVEL", "LOG_LEVEL", "LOGLEVEL" + ), ) # SERVICE SERVER (see : https://www.uvicorn.org/settings/) - DYNAMIC_SIDECAR_HOST: str = Field( - default="0.0.0.0", # nosec - description="host where to bind the application on which to serve", - ) DYNAMIC_SIDECAR_PORT: PortInt = Field( default=8000, description="port where the server will be currently serving" ) @@ -72,15 +103,30 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): ), ) + DYNAMIC_SIDECAR_TELEMETRY_DISK_USAGE_MONITOR_INTERVAL: timedelta = Field( + default=timedelta(seconds=5), + description="time between checks for disk usage", + ) + DEBUG: bool = Field( default=False, description="If set to True the application will boot into debug mode", ) - DYNAMIC_SIDECAR_REMOTE_DEBUG_PORT: PortInt = Field( - default=3000, description="ptsvd remote debugger starting port" + DYNAMIC_SIDECAR_RESERVED_SPACE_SIZE: ByteSize = Field( + TypeAdapter(ByteSize).validate_python("10Mib"), + description=( + "Disk space reserve when the dy-sidecar is started. Can be freed at " + "any time via an API call. Main reason to free this disk space is " + "when the host's `/docker` partition has reached 0. Services will " + "behave unexpectedly until some disk space is freed. This will " + "allow to manual intervene and cleanup." + ), ) + DY_SIDECAR_CALLBACKS_MAPPING: CallbacksMapping = Field( + ..., description="callbacks to use for this service" + ) DY_SIDECAR_PATH_INPUTS: Path = Field( ..., description="path where to expect the inputs folder" ) @@ -90,43 +136,94 @@ class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): DY_SIDECAR_STATE_PATHS: list[Path] = Field( ..., description="list of additional paths to be synced" ) + DY_SIDECAR_USER_PREFERENCES_PATH: Path | None = Field( + None, description="path where the user preferences should be saved" + ) DY_SIDECAR_STATE_EXCLUDE: set[str] = Field( ..., description="list of patterns to exclude files when saving states" ) + DY_SIDECAR_LEGACY_STATE: LegacyState | None = Field( + default=None, description="used to recover state when upgrading service" + ) + + DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field( + default=False, + validation_alias=AliasChoices( + "DY_SIDECAR_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ) + DY_SIDECAR_LOG_FILTER_MAPPING: dict[LoggerName, list[MessageSubstring]] = Field( + default_factory=dict, + validation_alias=AliasChoices( + "DY_SIDECAR_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ) DY_SIDECAR_USER_ID: UserID DY_SIDECAR_PROJECT_ID: ProjectID DY_SIDECAR_NODE_ID: NodeID - DY_SIDECAR_RUN_ID: RunID + DY_SIDECAR_RUN_ID: ServiceRunID DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS: bool - REGISTRY_SETTINGS: RegistrySettings = Field(auto_default_from_env=True) + DY_SIDECAR_SERVICE_KEY: DynamicServiceKey | None = None + DY_SIDECAR_SERVICE_VERSION: ServiceVersion | None = None + DY_SIDECAR_PRODUCT_NAME: ProductName | None = None + + NODE_PORTS_STORAGE_AUTH: StorageAuthSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + DY_SIDECAR_R_CLONE_SETTINGS: RCloneSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + DY_SIDECAR_AWS_S3_CLI_SETTINGS: AwsS3CliSettings | None = Field( + None, + description="AWS S3 settings are used for the AWS S3 CLI. If these settings are filled, the AWS S3 CLI is used instead of RClone.", + ) + POSTGRES_SETTINGS: PostgresSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + RABBIT_SETTINGS: RabbitSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DY_DEPLOYMENT_REGISTRY_SETTINGS: RegistrySettings = Field() + DY_DOCKER_HUB_REGISTRY_SETTINGS: RegistrySettings | None = Field(default=None) + + RESOURCE_TRACKING: ResourceTrackingSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + SYSTEM_MONITOR_SETTINGS: SystemMonitorSettings = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + DYNAMIC_SIDECAR_TRACING: TracingSettings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ) - RABBIT_SETTINGS: Optional[RabbitSettings] = Field(auto_default_from_env=True) - DY_SIDECAR_R_CLONE_SETTINGS: RCloneSettings = Field(auto_default_from_env=True) + @property + def are_prometheus_metrics_enabled(self) -> bool: + return ( # pylint: disable=no-member + self.DY_SIDECAR_CALLBACKS_MAPPING.metrics is not None + ) - @validator("LOG_LEVEL") + @field_validator("LOG_LEVEL", mode="before") @classmethod - def _check_log_level(cls, value): + def _check_log_level(cls, value: str) -> str: return cls.validate_log_level(value) - @property - def rclone_settings_for_nodeports(self) -> Optional[RCloneSettings]: - """ - If R_CLONE_ENABLED is False it returns None which indicates - nodeports to disable rclone and fallback to the previous storage mechanim. - """ - return ( - self.DY_SIDECAR_R_CLONE_SETTINGS - if self.DY_SIDECAR_R_CLONE_SETTINGS.R_CLONE_ENABLED - else None + _validate_dynamic_sidecar_telemetry_disk_usage_monitor_interval = ( + validate_numeric_string_as_timedelta( + "DYNAMIC_SIDECAR_TELEMETRY_DISK_USAGE_MONITOR_INTERVAL" ) + ) @lru_cache def get_settings() -> ApplicationSettings: """used outside the context of a request""" - warnings.warn( - "Use instead app.state.settings", - DeprecationWarning, - ) + warnings.warn("Use instead app.state.settings", DeprecationWarning, stacklevel=2) return cast(ApplicationSettings, ApplicationSettings.create_from_envs()) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/storage.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/storage.py new file mode 100644 index 00000000000..639ee0dd810 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/storage.py @@ -0,0 +1,75 @@ +import logging +from datetime import timedelta +from typing import Final, NamedTuple + +from fastapi import FastAPI, status +from httpx import AsyncClient +from pydantic import AnyUrl, TypeAdapter +from servicelib.logging_utils import log_context +from settings_library.node_ports import StorageAuthSettings + +from ..modules.service_liveness import wait_for_service_liveness +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + +_LIVENESS_TIMEOUT: Final[timedelta] = timedelta(seconds=5) + + +class _AuthTuple(NamedTuple): + username: str + password: str + + +def _get_auth_or_none(storage_auth_settings: StorageAuthSettings) -> _AuthTuple | None: + if storage_auth_settings.auth_required: + assert storage_auth_settings.STORAGE_USERNAME # nosec + assert storage_auth_settings.STORAGE_PASSWORD # nosec + return _AuthTuple( + storage_auth_settings.STORAGE_USERNAME, + storage_auth_settings.STORAGE_PASSWORD.get_secret_value(), + ) + return None + + +def _get_url(storage_auth_settings: StorageAuthSettings) -> str: + url: AnyUrl = TypeAdapter(AnyUrl).validate_python( + f"{storage_auth_settings.api_base_url}/" + ) + return f"{url}" + + +async def _is_storage_responsive(storage_auth_settings: StorageAuthSettings) -> bool: + url = _get_url(storage_auth_settings) + auth = _get_auth_or_none(storage_auth_settings) + + with log_context( + _logger, + logging.DEBUG, + msg=f"checking storage connection at {url=} {auth=}", + ): + async with AsyncClient( + auth=auth, timeout=_LIVENESS_TIMEOUT.total_seconds() + ) as session: + result = await session.get(url) + if result.status_code == status.HTTP_200_OK: + _logger.debug("storage connection established") + return True + _logger.error("storage is not responding %s", result.text) + return False + + +async def wait_for_storage_liveness(app: FastAPI) -> None: + settings: ApplicationSettings = app.state.settings + storage_auth_settings = settings.NODE_PORTS_STORAGE_AUTH + + if storage_auth_settings is None: + msg = f"Wrong configuration, check {StorageAuthSettings.__name__} for details" + raise ValueError(msg) + + await wait_for_service_liveness( + _is_storage_responsive, + service_name="Storage", + endpoint=_get_url(storage_auth_settings), + storage_auth_settings=storage_auth_settings, + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/utils.py index 1384ee8afc7..3993ce37a56 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/utils.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/utils.py @@ -1,119 +1,36 @@ import asyncio -import base64 -import json import logging import os import signal -import tempfile import time from asyncio.subprocess import Process -from contextlib import asynccontextmanager -from pathlib import Path -from typing import AsyncIterator, NamedTuple, Optional +from typing import NamedTuple -import aiofiles -import httpx import psutil -import yaml -from aiofiles import os as aiofiles_os -from servicelib.error_codes import create_error_code -from settings_library.docker_registry import RegistrySettings -from starlette import status -from tenacity import retry -from tenacity.before_sleep import before_sleep_log -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed +from common_library.error_codes import create_error_code +from servicelib.logging_errors import create_troubleshotting_log_kwargs from ..modules.mounted_fs import MountedVolumes HIDDEN_FILE_NAME = ".hidden_do_not_remove" -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) class CommandResult(NamedTuple): success: bool message: str command: str - elapsed: Optional[float] - - -class _RegistryNotReachableException(Exception): - pass - - -@retry( - wait=wait_fixed(1), - stop=stop_after_delay(10), - before_sleep=before_sleep_log(logger, logging.INFO), - reraise=True, -) -async def _is_registry_reachable(registry_settings: RegistrySettings) -> None: - async with httpx.AsyncClient() as client: - params = {} - if registry_settings.REGISTRY_AUTH: - params["auth"] = ( - registry_settings.REGISTRY_USER, - registry_settings.REGISTRY_PW.get_secret_value(), - ) - - protocol = "https" if registry_settings.REGISTRY_SSL else "http" - url = f"{protocol}://{registry_settings.api_url}/" - - logging.info("Registry test url ='%s'", url) - response = await client.get(url, **params) - reachable = response.status_code == status.HTTP_200_OK and response.json() == {} - if not reachable: - logger.error("Response: %s", response) - error_message = ( - f"Could not reach registry {registry_settings.api_url} " - f"auth={registry_settings.REGISTRY_AUTH}" - ) - raise _RegistryNotReachableException(error_message) + elapsed: float | None - -async def login_registry(registry_settings: RegistrySettings) -> None: - """ - Creates ~/.docker/config.json and adds docker registry credentials - """ - await _is_registry_reachable(registry_settings) - - def create_docker_config_file(registry_settings: RegistrySettings) -> None: - user = registry_settings.REGISTRY_USER - password = registry_settings.REGISTRY_PW.get_secret_value() - docker_config = { - "auths": { - f"{registry_settings.resolved_registry_url}": { - "auth": base64.b64encode(f"{user}:{password}".encode()).decode( - "utf-8" - ) - } - } - } - conf_file = Path.home() / ".docker" / "config.json" - conf_file.parent.mkdir(exist_ok=True, parents=True) - conf_file.write_text(json.dumps(docker_config)) - - if registry_settings.REGISTRY_AUTH: - await asyncio.get_event_loop().run_in_executor( - None, create_docker_config_file, registry_settings + def as_log_message(self) -> str: + return ( + f"'{self.command}' finished_ok='{self.success}' " + f"elapsed='{self.elapsed}'\n{self.message}" ) -@asynccontextmanager -async def write_to_tmp_file(file_contents: str) -> AsyncIterator[Path]: - """Disposes of file on exit""" - file_path = Path(tempfile.mkdtemp()) / "file" - async with aiofiles.open(file_path, mode="w") as tmp_file: - await tmp_file.write(file_contents) - try: - yield file_path - finally: - await aiofiles_os.remove(file_path) - - def _close_transport(proc: Process): - # Closes transport (initialized during 'await proc.communicate(...)' ) and avoids error: # # Exception ignored in: @@ -125,27 +42,39 @@ def _close_transport(proc: Process): # SEE implementation of asyncio.subprocess.Process._read_stream(...) for fd in (1, 2): # pylint: disable=protected-access - if transport := getattr(proc, "_transport", None): + if transport := getattr(proc, "_transport", None): # noqa: SIM102 if t := transport.get_pipe_transport(fd): t.close() -async def async_command(command: str, timeout: Optional[float] = None) -> CommandResult: +async def async_command( + command: str, + timeout: float | None = None, # noqa: ASYNC109 + pipe_as_input: str | None = None, + env_vars: dict[str, str] | None = None, +) -> CommandResult: """ Does not raise Exception """ proc = await asyncio.create_subprocess_shell( command, + stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, - # NOTE that stdout/stderr together. Might want to separate them? + env=env_vars, ) + + if pipe_as_input: + assert proc.stdin # nosec + proc.stdin.write(pipe_as_input.encode()) + proc.stdin.close() + start = time.time() try: stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=timeout) - except asyncio.TimeoutError: + except TimeoutError: proc.terminate() _close_transport(proc) @@ -159,12 +88,12 @@ async def async_command(command: str, timeout: Optional[float] = None) -> Comman # # There is a chance that the launched process ignores SIGTERM # in that case, it would proc.wait() forever. This code will be - # used only to run docker-compose CLI which behaves well. Nonetheless, + # used only to run docker compose CLI which behaves well. Nonetheless, # we add here some asserts. assert await proc.wait() == -signal.SIGTERM # nosec assert not psutil.pid_exists(proc.pid) # nosec - logger.warning( + _logger.warning( "Process %s timed out after %ss", f"{command=!r}", f"{timeout=}", @@ -177,17 +106,22 @@ async def async_command(command: str, timeout: Optional[float] = None) -> Comman ) except Exception as err: # pylint: disable=broad-except + error_code = create_error_code(err) - logger.exception( - "Process with %s failed unexpectedly [%s]", - f"{command=!r}", - f"{error_code}", - extra={"error_code": error_code}, + user_error_msg = f"Unexpected error [{error_code}]" + _logger.exception( + **create_troubleshotting_log_kwargs( + user_error_msg, + error=err, + error_context={"command": command, "proc.returncode": proc.returncode}, + error_code=error_code, + tip="Process with command failed unexpectily", + ) ) return CommandResult( success=False, - message=f"Unexpected error [{error_code}]", + message=user_error_msg, command=f"{command}", elapsed=time.time() - start, ) @@ -201,20 +135,11 @@ async def async_command(command: str, timeout: Optional[float] = None) -> Comman ) -def assemble_container_names(validated_compose_content: str) -> list[str]: - """returns the list of container names from a validated compose_spec""" - parsed_compose_spec = yaml.safe_load(validated_compose_content) - return [ - service_data["container_name"] - for service_data in parsed_compose_spec["services"].values() - ] - - async def volumes_fix_permissions(mounted_volumes: MountedVolumes) -> None: # NOTE: by creating a hidden file on all mounted volumes # the same permissions are ensured and avoids # issues when starting the services - for volume_path in mounted_volumes.all_disk_paths(): + for volume_path in mounted_volumes.all_disk_paths_iter(): hidden_file = volume_path / HIDDEN_FILE_NAME hidden_file.write_text( f"Directory must not be empty.\nCreated by {__file__}.\n" diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/validation.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/validation.py index 396f37c46dd..25bbb2fd012 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/validation.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/validation.py @@ -1,10 +1,11 @@ -import json import logging import os import re -from typing import Any, Generator +from collections.abc import Generator +from typing import Any, NamedTuple import yaml +from common_library.json_serialization import json_loads from servicelib.docker_constants import ( DEFAULT_USER_SERVICES_NETWORK_NAME, SUFFIX_EGRESS_PROXY_NAME, @@ -16,10 +17,10 @@ TEMPLATE_SEARCH_PATTERN = r"%%(.*?)%%" -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -class InvalidComposeSpec(Exception): +class InvalidComposeSpecError(Exception): """Exception used to signal incorrect docker-compose configuration file""" @@ -36,12 +37,10 @@ def _assemble_container_name( service_key, ] - container_name = "-".join([x for x in strings_to_use if len(x) > 0])[ + return "-".join([x for x in strings_to_use if len(x) > 0])[ : settings.DYNAMIC_SIDECAR_MAX_COMBINED_CONTAINER_NAME_LENGTH ].replace("_", "-") - return container_name - def _get_forwarded_env_vars(container_key: str) -> list[str]: """returns env vars targeted to each container in the compose spec""" @@ -49,12 +48,12 @@ def _get_forwarded_env_vars(container_key: str) -> list[str]: # some services expect it, using it as empty "SIMCORE_NODE_BASEPATH=", ] - for key in os.environ.keys(): + for key in os.environ: if key.startswith("FORWARD_ENV_"): new_entry_key = key.replace("FORWARD_ENV_", "") # parsing `VAR={"destination_containers": ["destination_container"], "env_var": "PAYLOAD"}` - new_entry_payload = json.loads(os.environ[key]) + new_entry_payload = json_loads(os.environ[key]) if container_key not in new_entry_payload["destination_containers"]: continue @@ -108,14 +107,20 @@ def _apply_templating_directives( def _merge_env_vars( - compose_spec_env_vars: list[str], settings_env_vars: list[str] + compose_spec_env_vars: list[str] | dict[str, str], + settings_env_vars: list[str] | dict[str, str], ) -> list[str]: def _gen_parts_env_vars( - env_vars: list[str], + env_vars: list[str] | dict[str, str], ) -> Generator[tuple[str, str], None, None]: - for env_var in env_vars: - key, value = env_var.split("=") - yield key, value + assert isinstance(env_vars, list | dict) # nosec + + if isinstance(env_vars, list): + for env_var in env_vars: + key, value = env_var.split("=") + yield key, value + else: + yield from env_vars.items() # pylint: disable=unnecessary-comprehension dict_spec_env_vars = {k: v for k, v in _gen_parts_env_vars(compose_spec_env_vars)} @@ -130,12 +135,12 @@ def _gen_parts_env_vars( def _connect_user_services( - parsed_compose_spec: dict[str, Any], allow_internet_access: bool + parsed_compose_spec: dict[str, Any], *, allow_internet_access: bool ) -> None: """ Put all containers in the compose spec in the same network. The network name must only be unique inside the user defined spec. - `docker-compose` will add some prefix to it. + `docker compose` will add some prefix to it. """ networks = parsed_compose_spec.get("networks", None) if networks is None: @@ -170,16 +175,21 @@ def parse_compose_spec(compose_file_content: str) -> Any: try: return yaml.safe_load(compose_file_content) except yaml.YAMLError as e: - raise InvalidComposeSpec( - f"{str(e)}\n{compose_file_content}\nProvided yaml is not valid!" - ) from e + msg = f"{e}\n{compose_file_content}\nProvided yaml is not valid!" + raise InvalidComposeSpecError(msg) from e -async def validate_compose_spec( +class ComposeSpecValidation(NamedTuple): + compose_spec: str + current_container_names: list[str] + original_to_current_container_names: dict[str, str] + + +async def get_and_validate_compose_spec( # pylint: disable=too-many-statements settings: ApplicationSettings, compose_file_content: str, mounted_volumes: MountedVolumes, -) -> str: +) -> ComposeSpecValidation: """ Validates what looks like a docker compose spec and injects additional data to mainly make sure: @@ -188,23 +198,29 @@ async def validate_compose_spec( - properly target environment variables formwarded via settings on the service - Finally runs docker-compose config to properly validate the result + Finally runs docker compose config to properly validate the result """ - logger.debug("validating compose spec:\n%s", f"{compose_file_content=}") + _logger.debug("validating compose spec:\n%s", f"{compose_file_content=}") parsed_compose_spec = parse_compose_spec(compose_file_content) if parsed_compose_spec is None or not isinstance(parsed_compose_spec, dict): - raise InvalidComposeSpec(f"{compose_file_content}\nProvided yaml is not valid!") + msg = f"{compose_file_content}\nProvided yaml is not valid!" + raise InvalidComposeSpecError(msg) if not {"version", "services"}.issubset(set(parsed_compose_spec.keys())): - raise InvalidComposeSpec(f"{compose_file_content}\nProvided yaml is not valid!") + msg = f"{compose_file_content}\nProvided yaml is not valid!" + raise InvalidComposeSpecError(msg) version = parsed_compose_spec["version"] if version.startswith("1"): - raise InvalidComposeSpec(f"Provided spec version '{version}' is not supported") + msg = f"Provided spec version '{version}' is not supported" + raise InvalidComposeSpecError(msg) spec_services_to_container_name: dict[str, str] = {} + current_container_names: list[str] = [] + original_to_current_container_names: dict[str, str] = {} + spec_services = parsed_compose_spec["services"] for index, service in enumerate(spec_services): service_content = spec_services[service] @@ -217,6 +233,9 @@ async def validate_compose_spec( service_content["container_name"] = container_name spec_services_to_container_name[service] = container_name + current_container_names.append(container_name) + original_to_current_container_names[service] = container_name + # inject forwarded environment variables environment_entries = service_content.get("environment", []) service_settings_env_vars = _get_forwarded_env_vars(service) @@ -246,6 +265,13 @@ async def validate_compose_spec( ): service_volumes.append(state_paths_docker_volume) + if settings.DY_SIDECAR_USER_PREFERENCES_PATH is not None and ( + user_preferences_volume := await mounted_volumes.get_user_preferences_path_volume( + settings.DY_SIDECAR_RUN_ID + ) + ): + service_volumes.append(user_preferences_volume) + service_content["volumes"] = service_volumes _connect_user_services( @@ -278,15 +304,20 @@ async def validate_compose_spec( spec_services_to_container_name=spec_services_to_container_name, ) - # validate against docker-compose config + # validate against docker compose config result = await docker_compose_config(compose_spec) if not result.success: - logger.warning( - "'docker-compose config' failed for:\n%s\n%s", + _logger.warning( + "'docker compose config' failed for:\n%s\n%s", f"{compose_spec}", result.message, ) - raise InvalidComposeSpec(f"Invalid compose-specs:\n{result.message}") + msg = f"Invalid compose-specs:\n{result.message}" + raise InvalidComposeSpecError(msg) - return compose_spec + return ComposeSpecValidation( + compose_spec=compose_spec, + current_container_names=current_container_names, + original_to_current_container_names=original_to_current_container_names, + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/schemas/containers.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/schemas/containers.py index 60d9606fefc..e374c924070 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/schemas/containers.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/schemas/containers.py @@ -1,5 +1,10 @@ +from models_library.services_creation import CreateServiceMetricsAdditionalParams from pydantic import BaseModel -class ContainersCreate(BaseModel): +class ContainersComposeSpec(BaseModel): docker_compose_yaml: str + + +class ContainersCreate(BaseModel): + metrics_params: CreateServiceMetricsAdditionalParams diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/shared_store.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/shared_store.py index b0c8d58f77b..32257d79d0c 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/shared_store.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/models/shared_store.py @@ -1,58 +1,114 @@ +from asyncio import Lock from pathlib import Path -from typing import Final, Optional +from typing import Final, TypeAlias import aiofiles from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.containers import DockerComposeYamlStr +from models_library.sidecar_volumes import VolumeCategory, VolumeState, VolumeStatus from pydantic import BaseModel, Field, PrivateAttr from ..core.settings import ApplicationSettings -ContainerNameStr = str +ContainerNameStr: TypeAlias = str STORE_FILE_NAME: Final[str] = "data.json" -class SharedStore(BaseModel): - _shared_store_dir: Path = PrivateAttr() +class _StoreMixin(BaseModel): + _shared_store_dir: Path | None = PrivateAttr() + _persist_lock: Lock = PrivateAttr(default_factory=Lock) - compose_spec: Optional[str] = Field( + async def __aenter__(self) -> None: + await self._persist_lock.acquire() + + async def __aexit__(self, *args) -> None: + await self._persist_to_disk() + self._persist_lock.release() + + async def _persist_to_disk(self) -> None: + assert self._shared_store_dir # nosec + async with aiofiles.open( + self._shared_store_dir / STORE_FILE_NAME, "w" + ) as data_file: + await data_file.write(self.model_dump_json()) + + def post_init(self, shared_store_dir: Path): + self._shared_store_dir = shared_store_dir + + +class SharedStore(_StoreMixin): + """ + When used as a context manager will persist the state to the disk upon exit. + + NOTE: when updating the contents of the shared store always use a context manger + to avoid concurrency issues. + + Example: + async with shared_store: + copied_list = deepcopy(shared_store.container_names) + copied_list.append("a_container_name") + shared_store.container_names = copied_list + """ + + compose_spec: DockerComposeYamlStr | None = Field( default=None, description="stores the stringified compose spec" ) container_names: list[ContainerNameStr] = Field( default_factory=list, description="stores the container names from the compose_spec", ) + original_to_container_names: dict[ContainerNameStr, ContainerNameStr] = Field( + default_factory=dict, + description="original container names form the compose_spec", + ) - # NOTE: setting up getter and setter does not work. - def set_shared_store_dir(self, shared_store_dir: Path) -> None: - self._shared_store_dir = shared_store_dir + volume_states: dict[VolumeCategory, VolumeState] = Field( + default_factory=dict, description="persist the state of each volume" + ) + + def __eq__(self, other: object) -> bool: + return all( + getattr(self, n, None) == getattr(other, n, None) + for n in ( + "compose_spec", + "container_names", + "original_to_container_names", + "volume_states", + ) + ) - async def clear(self): - self.compose_spec = None - self.container_names = [] - await self.persist_to_disk() + async def _setup_initial_volume_states(self) -> None: + async with self: + for category, status in [ + (VolumeCategory.INPUTS, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + (VolumeCategory.SHARED_STORE, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + (VolumeCategory.OUTPUTS, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + (VolumeCategory.STATES, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + ]: + # pylint: disable=unsupported-assignment-operation + self.volume_states[category] = VolumeState(status=status) @classmethod - async def init_from_disk(cls, shared_store_dir: Path) -> "SharedStore": - data_file_path = shared_store_dir / STORE_FILE_NAME - if data_file_path.exists(): - # if the sidecar is started for a second time (usually the container dies) - # it will load the previous data which was stored - async with aiofiles.open(shared_store_dir / STORE_FILE_NAME) as data_file: - file_content = await data_file.read() - - obj = cls.parse_obj(file_content) - else: + async def init_from_disk( + cls, shared_store_dir: Path, *, store_file_name: "str" + ) -> "SharedStore": + data_file_path = shared_store_dir / store_file_name + + if not data_file_path.exists(): obj = cls() + obj.post_init(shared_store_dir) + await obj._setup_initial_volume_states() # noqa SLF001 + return obj - obj.set_shared_store_dir(shared_store_dir) - return obj + # if the sidecar is started for a second time (usually the container dies) + # it will load the previous data which was stored + async with aiofiles.open(shared_store_dir / store_file_name) as data_file: + file_content = await data_file.read() - async def persist_to_disk(self) -> None: - async with aiofiles.open( - self._shared_store_dir / STORE_FILE_NAME, "w" - ) as data_file: - await data_file.write(self.json()) + obj = cls.model_validate_json(file_content) + obj.post_init(shared_store_dir) + return obj def setup_shared_store(app: FastAPI) -> None: @@ -60,7 +116,12 @@ async def on_startup() -> None: settings: ApplicationSettings = app.state.settings app.state.shared_store = await SharedStore.init_from_disk( - settings.DYNAMIC_SIDECAR_SHARED_STORE_DIR + settings.DYNAMIC_SIDECAR_SHARED_STORE_DIR, store_file_name=STORE_FILE_NAME ) app.add_event_handler("startup", on_startup) + + +def get_shared_store(app: FastAPI) -> SharedStore: + shared_store: SharedStore = app.state.shared_store + return shared_store diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_logging_event_handler.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_logging_event_handler.py index aa39e4f2a96..3c80e2ed87f 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_logging_event_handler.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_logging_event_handler.py @@ -1,5 +1,3 @@ -# pylint:disable=no-member - import logging import stat from asyncio import CancelledError, Task, create_task, get_event_loop @@ -9,11 +7,11 @@ from pathlib import Path from queue import Empty from time import sleep as blocking_sleep -from typing import Final, Optional +from typing import Final -import aioprocessing -from aioprocessing.process import AioProcess -from aioprocessing.queues import AioQueue +import aioprocessing # type: ignore[import-untyped] +from aioprocessing.process import AioProcess # type: ignore[import-untyped] +from aioprocessing.queues import AioQueue # type: ignore[import-untyped] from pydantic import ByteSize, PositiveFloat from servicelib.logging_utils import log_context from watchdog.events import FileSystemEvent @@ -29,17 +27,22 @@ class _LoggingEventHandler(SafeFileSystemEventHandler): def event_handler(self, event: FileSystemEvent) -> None: # NOTE: runs in the created process - file_path = Path(event.src_path) - file_stat = file_path.stat() - logger.info( - "Attribute change to: '%s': permissions=%s uid=%s gid=%s size=%s\nFile stat: %s", - file_path, - stat.filemode(file_stat.st_mode), - file_stat.st_uid, - file_stat.st_gid, - ByteSize(file_stat.st_size).human_readable(), - file_stat, + file_path = Path( + event.src_path.decode() + if isinstance(event.src_path, bytes) + else event.src_path ) + with suppress(FileNotFoundError): + file_stat = file_path.stat() + logger.info( + "Attribute change to: '%s': permissions=%s uid=%s gid=%s size=%s\nFile stat: %s", + file_path, + stat.filemode(file_stat.st_mode), + file_stat.st_uid, + file_stat.st_gid, + ByteSize(file_stat.st_size).human_readable(), + file_stat, + ) class _LoggingEventHandlerProcess: @@ -49,7 +52,6 @@ def __init__( health_check_queue: AioQueue, heart_beat_interval_s: PositiveFloat, ) -> None: - self.path_to_observe: Path = path_to_observe self.health_check_queue: AioQueue = health_check_queue self.heart_beat_interval_s: PositiveFloat = heart_beat_interval_s @@ -58,8 +60,8 @@ def __init__( # the process itself and is used to stop the process. self._stop_queue: AioQueue = aioprocessing.AioQueue() - self._file_system_event_handler: Optional[_LoggingEventHandler] = None - self._process: Optional[AioProcess] = None + self._file_system_event_handler: _LoggingEventHandler | None = None + self._process: AioProcess | None = None def start_process(self) -> None: with log_context( @@ -70,7 +72,7 @@ def start_process(self) -> None: self._process = aioprocessing.AioProcess( target=self._process_worker, daemon=True ) - self._process.start() + self._process.start() # pylint:disable=no-member def _stop_process(self) -> None: with log_context( @@ -78,12 +80,12 @@ def _stop_process(self) -> None: logging.DEBUG, f"{_LoggingEventHandlerProcess.__name__} stop_process", ): - self._stop_queue.put(None) + self._stop_queue.put(None) # pylint:disable=no-member if self._process: # force stop the process - self._process.kill() - self._process.join() + self._process.kill() # pylint:disable=no-member + self._process.join() # pylint:disable=no-member self._process = None # cleanup whatever remains @@ -111,7 +113,7 @@ def _process_worker(self) -> None: ) observer.start() - while self._stop_queue.qsize() == 0: + while self._stop_queue.qsize() == 0: # pylint:disable=no-member # NOTE: watchdog handles events internally every 1 second. # While doing so it will block this thread briefly. # Health check delivery may be delayed. @@ -157,7 +159,7 @@ def __init__( heart_beat_interval_s=heart_beat_interval_s, ) self._keep_running: bool = False - self._task_health_worker: Optional[Task] = None + self._task_health_worker: Task | None = None @property def heart_beat_interval_s(self) -> PositiveFloat: @@ -173,7 +175,7 @@ async def _health_worker(self) -> None: heart_beat_count = 0 while True: try: - self._health_check_queue.get_nowait() + self._health_check_queue.get_nowait() # pylint:disable=no-member heart_beat_count += 1 except Empty: break diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_watchdog_extensions.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_watchdog_extensions.py index 55da4bee573..b73d5f97a6b 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_watchdog_extensions.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/attribute_monitor/_watchdog_extensions.py @@ -5,7 +5,8 @@ from servicelib.logging_utils import log_catch from watchdog.events import FileSystemEvent, FileSystemEventHandler from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT, BaseObserver -from watchdog.observers.inotify import InotifyBuffer, InotifyEmitter +from watchdog.observers.inotify import InotifyEmitter +from watchdog.observers.inotify_buffer import InotifyBuffer from watchdog.observers.inotify_c import Inotify, InotifyConstants from watchdog.utils import BaseThread from watchdog.utils.delayed_queue import DelayedQueue @@ -14,12 +15,16 @@ class _ExtendedInotifyBuffer(InotifyBuffer): - def __init__(self, path, recursive=False): # pylint:disable=super-init-not-called + def __init__( + self, path: bytes, *, recursive: bool = False + ): # pylint:disable=super-init-not-called # below call to `BaseThread.__init__` is correct since we want to # overwrite the `InotifyBuffer.__init__` method BaseThread.__init__(self) # pylint:disable=non-parent-init-called self._queue = DelayedQueue(self.delay) - self._inotify = Inotify(path, recursive, InotifyConstants.IN_ATTRIB) + self._inotify = Inotify( # pylint:disable=too-many-function-args + path, recursive=recursive, event_mask=InotifyConstants.IN_ATTRIB + ) self.start() @@ -27,7 +32,7 @@ class _ExtendedInotifyEmitter(InotifyEmitter): def on_thread_start(self): path = os.fsencode(self.watch.path) # pylint:disable=attribute-defined-outside-init - self._inotify = _ExtendedInotifyBuffer(path, self.watch.is_recursive) + self._inotify = _ExtendedInotifyBuffer(path, recursive=self.watch.is_recursive) class ExtendedInotifyObserver(BaseObserver): diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py new file mode 100644 index 00000000000..64b91bf938e --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/container_utils.py @@ -0,0 +1,86 @@ +import asyncio +import logging +from collections.abc import Sequence +from typing import Any + +from aiodocker import Docker, DockerError +from aiodocker.execs import Exec +from aiodocker.stream import Stream +from pydantic import NonNegativeFloat +from starlette import status + +from ..core.errors import ( + ContainerExecCommandFailedError, + ContainerExecContainerNotFoundError, + ContainerExecTimeoutError, +) + +_logger = logging.getLogger(__name__) + + +async def _execute_command(container_name: str, command: str | Sequence[str]) -> str: + async with Docker() as docker: + container = await docker.containers.get(container_name) + + # Start the command inside the container + exec_instance: Exec = await container.exec( + cmd=command, stdout=True, stderr=True, tty=False + ) + + # Start the execution + stream: Stream = exec_instance.start(detach=False) + + command_result: str = "" + async with stream: + while stream_message := await stream.read_out(): + command_result += stream_message.data.decode() + + inspect_result: dict[str, Any] = await exec_instance.inspect() + exit_code: int | None = inspect_result.get("ExitCode", None) + if exit_code != 0: + raise ContainerExecCommandFailedError( + command=command, exit_code=exit_code, command_result=command_result + ) + + _logger.debug("Command result:\n$ '%s'\n%s", command, command_result) + return command_result + + +async def run_command_in_container( + container_name: str, + *, + command: str | Sequence[str], + timeout: NonNegativeFloat = 1.0, +): + """ + Runs `command` in target container and returns the command's output if + command's exit code is 0. + + Arguments: + container_name -- name of the container in which to run the command + command -- string or sequence of strings to run as command + + Keyword Arguments: + timeout -- max time for the command to return a result in (default: {1.0}) + + Raises: + ContainerExecTimeoutError: command execution did not finish in time + ContainerExecContainerNotFoundError: target container is not present + ContainerExecCommandFailedError: command finished with not 0 exit code + DockerError: propagates error from docker engine + + Returns: + stdout + stderr produced by the command is returned + """ + try: + return await asyncio.wait_for( + _execute_command(container_name, command), timeout + ) + except DockerError as e: + if e.status == status.HTTP_404_NOT_FOUND: + raise ContainerExecContainerNotFoundError( + container_name=container_name + ) from e + raise + except asyncio.TimeoutError as e: + raise ContainerExecTimeoutError(timeout=timeout, command=command) from e diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/database.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/database.py new file mode 100644 index 00000000000..a1ccfb9805c --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/database.py @@ -0,0 +1,26 @@ +from fastapi import FastAPI +from servicelib.db_asyncpg_utils import check_postgres_liveness, with_async_pg_engine +from settings_library.postgres import PostgresSettings + +from ..core.settings import ApplicationSettings +from .service_liveness import ( + wait_for_service_liveness, +) + + +async def wait_for_database_liveness(app: FastAPI) -> None: + """ + Checks if the postgres engine is alive and can be used. + """ + + app_settings = app.state.settings + assert isinstance(app_settings, ApplicationSettings) # nosec + postgres_settings = app_settings.POSTGRES_SETTINGS + assert isinstance(postgres_settings, PostgresSettings) # nosec + async with with_async_pg_engine(postgres_settings) as engine: + await wait_for_service_liveness( + check_postgres_liveness, + engine, + service_name="Postgres", + endpoint=postgres_settings.dsn, + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/inputs.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/inputs.py new file mode 100644 index 00000000000..3898810930f --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/inputs.py @@ -0,0 +1,25 @@ +from fastapi import FastAPI +from pydantic import BaseModel, Field + + +class InputsState(BaseModel): + inputs_pulling_enabled: bool = Field( + default=False, description="can pull input ports" + ) + + +def enable_inputs_pulling(app: FastAPI) -> None: + inputs_state: InputsState = app.state.inputs_state + inputs_state.inputs_pulling_enabled = True + + +def disable_inputs_pulling(app: FastAPI) -> None: + inputs_state: InputsState = app.state.inputs_state + inputs_state.inputs_pulling_enabled = False + + +def setup_inputs(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.inputs_state = InputsState() + + app.add_event_handler("startup", on_startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks.py index 6601b7821b0..42412376d08 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks.py @@ -1,11 +1,21 @@ import functools import logging +from collections.abc import AsyncGenerator +from contextlib import asynccontextmanager from pathlib import Path -from typing import Final, Optional +from typing import Final from fastapi import FastAPI -from models_library.rabbitmq_messages import ProgressType -from servicelib.fastapi.long_running_tasks.server import TaskProgress +from models_library.api_schemas_long_running_tasks.base import ( + ProgressPercent, + TaskProgress, +) +from models_library.generated_models.docker_rest_api import ContainerState +from models_library.rabbitmq_messages import ProgressType, SimcorePlatformStatus +from models_library.service_settings_labels import LegacyState +from pydantic import PositiveInt +from servicelib.file_utils import log_directory_changes +from servicelib.logging_utils import log_context from servicelib.progress_bar import ProgressBarData from servicelib.utils import logged_gather from simcore_sdk.node_data import data_manager @@ -24,23 +34,33 @@ docker_compose_start, ) from ..core.docker_logs import start_log_fetching, stop_log_fetching -from ..core.docker_utils import get_running_containers_count_from_names +from ..core.docker_utils import ( + are_all_containers_in_expected_states, + get_container_states, + get_containers_count_from_names, +) from ..core.rabbitmq import ( post_event_reload_iframe, post_progress_message, post_sidecar_log_message, ) from ..core.settings import ApplicationSettings -from ..core.utils import CommandResult, assemble_container_names -from ..core.validation import parse_compose_spec, validate_compose_spec +from ..core.utils import CommandResult +from ..core.validation import parse_compose_spec from ..models.schemas.application_health import ApplicationHealth from ..models.schemas.containers import ContainersCreate from ..models.shared_store import SharedStore -from ..modules import nodeports +from ..modules import nodeports, user_services_preferences from ..modules.mounted_fs import MountedVolumes -from ..modules.outputs import OutputsManager, outputs_watcher_disabled +from ..modules.notifications._notifications_ports import PortNotifier +from ..modules.outputs import OutputsManager, event_propagation_disabled +from .long_running_tasks_utils import ( + ensure_read_permissions_on_user_service_data, + run_before_shutdown_actions, +) +from .resource_tracking import send_service_started, send_service_stopped -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) # TASKS @@ -54,8 +74,8 @@ def _raise_for_errors( command_result: CommandResult, docker_compose_command: str ) -> None: if not command_result.success: - logger.warning( - "docker-compose %s command finished with errors\n%s", + _logger.warning( + "docker compose %s command finished with errors\n%s", docker_compose_command, command_result.message, ) @@ -67,7 +87,7 @@ def _raise_for_errors( stop=stop_after_delay(5 * _MINUTE), retry=retry_if_result(lambda result: result.success is False), reraise=False, - before_sleep=before_sleep_log(logger, logging.WARNING, exc_info=True), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), ) async def _retry_docker_compose_start( compose_spec: str, settings: ApplicationSettings @@ -83,7 +103,7 @@ async def _retry_docker_compose_start( stop=stop_after_delay(5 * _MINUTE), retry=retry_if_result(lambda result: result.success is False), reraise=False, - before_sleep=before_sleep_log(logger, logging.WARNING, exc_info=True), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), ) async def _retry_docker_compose_down( compose_spec: str, settings: ApplicationSettings @@ -96,7 +116,7 @@ async def _retry_docker_compose_down( stop=stop_after_delay(5 * _MINUTE), retry=retry_if_result(lambda result: result is False), reraise=True, - before_sleep=before_sleep_log(logger, logging.WARNING, exc_info=True), + before_sleep=before_sleep_log(_logger, logging.WARNING, exc_info=True), ) async def _retry_docker_compose_create( compose_spec: str, settings: ApplicationSettings @@ -108,68 +128,110 @@ async def _retry_docker_compose_create( container_names = list(compose_spec_dict["services"].keys()) expected_num_containers = len(container_names) - actual_num_containers = await get_running_containers_count_from_names( - container_names - ) + actual_num_containers = await get_containers_count_from_names(container_names) return expected_num_containers == actual_num_containers +@asynccontextmanager +async def _reset_on_error( + shared_store: SharedStore, +) -> AsyncGenerator[None, None]: + try: + yield None + except Exception: + async with shared_store: + shared_store.compose_spec = None + shared_store.container_names = [] + raise + + +async def task_pull_user_servcices_docker_images( + progress: TaskProgress, shared_store: SharedStore, app: FastAPI +) -> None: + assert shared_store.compose_spec # nosec + + progress.update(message="started pulling user services", percent=ProgressPercent(0)) + + await docker_compose_pull(app, shared_store.compose_spec) + + progress.update( + message="finished pulling user services", percent=ProgressPercent(1) + ) + + async def task_create_service_containers( progress: TaskProgress, settings: ApplicationSettings, containers_create: ContainersCreate, shared_store: SharedStore, - mounted_volumes: MountedVolumes, app: FastAPI, application_health: ApplicationHealth, ) -> list[str]: - progress.update(message="validating service spec", percent=0) - - shared_store.compose_spec = await validate_compose_spec( - settings=settings, - compose_file_content=containers_create.docker_compose_yaml, - mounted_volumes=mounted_volumes, - ) - shared_store.container_names = assemble_container_names(shared_store.compose_spec) - await shared_store.persist_to_disk() - - logger.info("Validated compose-spec:\n%s", f"{shared_store.compose_spec}") + progress.update(message="validating service spec", percent=ProgressPercent(0)) assert shared_store.compose_spec # nosec - with outputs_watcher_disabled(app): + async with event_propagation_disabled(app), _reset_on_error( + shared_store + ), ProgressBarData( + num_steps=4, + progress_report_cb=functools.partial( + post_progress_message, + app, + ProgressType.SERVICE_CONTAINERS_STARTING, + ), + description="starting software", + ) as progress_bar: + with log_context(_logger, logging.INFO, "load user services preferences"): + if user_services_preferences.is_feature_enabled(app): + await user_services_preferences.load_user_services_preferences(app) + await progress_bar.update() + # removes previous pending containers progress.update(message="cleanup previous used resources") result = await docker_compose_rm(shared_store.compose_spec, settings) _raise_for_errors(result, "rm") + await progress_bar.update() - progress.update(message="pulling images", percent=0.01) - await post_sidecar_log_message(app, "pulling service images") - await post_progress_message(app, ProgressType.SERVICE_IMAGES_PULLING, 0) - await docker_compose_pull(app, shared_store.compose_spec) - await post_sidecar_log_message(app, "service images ready") - await post_progress_message(app, ProgressType.SERVICE_IMAGES_PULLING, 1) - - progress.update(message="creating and starting containers", percent=0.90) - await post_sidecar_log_message(app, "starting service containers") + progress.update( + message="creating and starting containers", percent=ProgressPercent(0.90) + ) + await post_sidecar_log_message( + app, "starting service containers", log_level=logging.INFO + ) await _retry_docker_compose_create(shared_store.compose_spec, settings) + await progress_bar.update() - progress.update(message="ensure containers are started", percent=0.95) - r = await _retry_docker_compose_start(shared_store.compose_spec, settings) + progress.update( + message="ensure containers are started", percent=ProgressPercent(0.95) + ) + compose_start_result = await _retry_docker_compose_start( + shared_store.compose_spec, settings + ) - message = f"Finished docker-compose start with output\n{r.message}" + await send_service_started(app, metrics_params=containers_create.metrics_params) - if r.success: - await post_sidecar_log_message(app, "service containers started") - logger.debug(message) + message = ( + f"Finished docker-compose start with output\n{compose_start_result.message}" + ) + + if compose_start_result.success: + await post_sidecar_log_message( + app, "user services started", log_level=logging.INFO + ) + _logger.debug(message) for container_name in shared_store.container_names: await start_log_fetching(app, container_name) else: application_health.is_healthy = False application_health.error_message = message - logger.error("Marked sidecar as unhealthy, see below for details\n:%s", message) - await post_sidecar_log_message(app, "could not start service containers") + _logger.error( + "Marked sidecar as unhealthy, see below for details\n:%s", message + ) + await post_sidecar_log_message( + app, "could not start user services", log_level=logging.ERROR + ) return shared_store.container_names @@ -179,83 +241,211 @@ async def task_runs_docker_compose_down( app: FastAPI, shared_store: SharedStore, settings: ApplicationSettings, + mounted_volumes: MountedVolumes, ) -> None: if shared_store.compose_spec is None: - logger.warning("No compose-spec was found") + _logger.warning("No compose-spec was found") return - progress.update(message="running docker-compose-down", percent=0.1) - result = await _retry_docker_compose_down(shared_store.compose_spec, settings) - _raise_for_errors(result, "down") + container_states: dict[str, ContainerState | None] = await get_container_states( + shared_store.container_names + ) + containers_were_ok = are_all_containers_in_expected_states( + container_states.values() + ) + + container_count_before_down: PositiveInt = await get_containers_count_from_names( + shared_store.container_names + ) - progress.update(message="stopping logs", percent=0.9) - for container_name in shared_store.container_names: - await stop_log_fetching(app, container_name) + async def _send_resource_tracking_stop(platform_status: SimcorePlatformStatus): + # NOTE: avoids sending a stop message without a start or any heartbeats, + # which makes no sense for the purpose of billing + if container_count_before_down > 0: + # if containers were not OK, we need to check their status + # only if oom killed we report as BAD + simcore_platform_status = platform_status + if not containers_were_ok: + any_container_oom_killed = any( + c.oom_killed is True + for c in container_states.values() + if c is not None + ) + # if it's not an OOM killer (the user killed it) we set it as bad + # since the platform failed the container + if any_container_oom_killed: + _logger.warning( + "Containers killed to to OOMKiller: %s", container_states + ) + else: + # NOTE: MD/ANE discussed: Initial thought was to use SimcorePlatformStatus to + # inform RUT that there was some problem on Simcore side and therefore we will + # not bill the user for running the service. This needs to be discussed + # therefore we will always consider it as OK for now. + # NOTE: https://github.com/ITISFoundation/osparc-simcore/issues/4952 + simcore_platform_status = SimcorePlatformStatus.OK + + await send_service_stopped(app, simcore_platform_status) + + try: + progress.update( + message="running docker-compose-down", percent=ProgressPercent(0.1) + ) + + await run_before_shutdown_actions( + shared_store, settings.DY_SIDECAR_CALLBACKS_MAPPING.before_shutdown + ) + + with log_context(_logger, logging.INFO, "save user services preferences"): + if user_services_preferences.is_feature_enabled(app): + await user_services_preferences.save_user_services_preferences(app) + + result = await _retry_docker_compose_down(shared_store.compose_spec, settings) + _raise_for_errors(result, "down") + + progress.update(message="stopping logs", percent=ProgressPercent(0.9)) + for container_name in shared_store.container_names: + await stop_log_fetching(app, container_name) + + progress.update( + message="removing pending resources", percent=ProgressPercent(0.95) + ) + result = await docker_compose_rm(shared_store.compose_spec, settings) + _raise_for_errors(result, "rm") + except Exception: + # NOTE: https://github.com/ITISFoundation/osparc-simcore/issues/4952 + await _send_resource_tracking_stop(SimcorePlatformStatus.OK) + raise + finally: + with log_context(_logger, logging.INFO, "ensure read permissions"): + await ensure_read_permissions_on_user_service_data(mounted_volumes) - progress.update(message="removing pending resources", percent=0.95) - result = await docker_compose_rm(shared_store.compose_spec, settings) - _raise_for_errors(result, "rm") + await _send_resource_tracking_stop(SimcorePlatformStatus.OK) # removing compose-file spec - await shared_store.clear() - progress.update(message="done", percent=0.99) + async with shared_store: + shared_store.compose_spec = None + shared_store.container_names = [] + progress.update(message="done", percent=ProgressPercent(0.99)) -async def task_restore_state( - progress: TaskProgress, +def _get_satate_folders_size(paths: list[Path]) -> int: + total_size: int = 0 + for path in paths: + for file in path.rglob("*"): + if file.is_file(): + total_size += file.stat().st_size + return total_size + + +def _get_legacy_state_with_dy_volumes_path( settings: ApplicationSettings, - mounted_volumes: MountedVolumes, +) -> LegacyState | None: + legacy_state = settings.DY_SIDECAR_LEGACY_STATE + if legacy_state is None: + return None + dy_volumes = settings.DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR + return LegacyState( + old_state_path=dy_volumes / legacy_state.old_state_path.relative_to("/"), + new_state_path=dy_volumes / legacy_state.new_state_path.relative_to("/"), + ) + + +async def _restore_state_folder( app: FastAPI, + *, + settings: ApplicationSettings, + progress_bar: ProgressBarData, + state_path: Path, ) -> None: - progress.update(message="checking files", percent=0.0) - # first check if there are files (no max concurrency here, these are just quick REST calls) - paths_exists: list[bool] = await logged_gather( - *( - data_manager.exists( - user_id=settings.DY_SIDECAR_USER_ID, - project_id=f"{settings.DY_SIDECAR_PROJECT_ID}", - node_uuid=f"{settings.DY_SIDECAR_NODE_ID}", - file_path=path, - ) - for path in mounted_volumes.disk_state_paths() + await data_manager.pull( + user_id=settings.DY_SIDECAR_USER_ID, + project_id=settings.DY_SIDECAR_PROJECT_ID, + node_uuid=settings.DY_SIDECAR_NODE_ID, + destination_path=Path(state_path), + io_log_redirect_cb=functools.partial( + post_sidecar_log_message, app, log_level=logging.INFO ), - reraise=True, + r_clone_settings=settings.DY_SIDECAR_R_CLONE_SETTINGS, + progress_bar=progress_bar, + aws_s3_cli_settings=settings.DY_SIDECAR_AWS_S3_CLI_SETTINGS, + legacy_state=_get_legacy_state_with_dy_volumes_path(settings), ) - effective_paths: list[Path] = [ - path - for path, exists in zip(mounted_volumes.disk_state_paths(), paths_exists) - if exists - ] - progress.update(message="Downloading state", percent=0.05) + +async def task_restore_state( + progress: TaskProgress, + settings: ApplicationSettings, + mounted_volumes: MountedVolumes, + app: FastAPI, +) -> int: + # NOTE: the legacy data format was a zip file + # this method will maintain retro compatibility. + # The legacy archive is always downloaded and decompressed + # if found. If the `task_save_state` is successful the legacy + # archive will be removed. + # When the legacy archive is detected it will have precedence + # over the new format. + # NOTE: this implies that the legacy format will always be decompressed + # until it is not removed. + + progress.update(message="Downloading state", percent=ProgressPercent(0.05)) + state_paths = list(mounted_volumes.disk_state_paths_iter()) await post_sidecar_log_message( app, - f"Downloading state files for {effective_paths}...", + f"Downloading state files for {state_paths}...", + log_level=logging.INFO, ) async with ProgressBarData( - steps=len(effective_paths), + num_steps=len(state_paths), progress_report_cb=functools.partial( - post_progress_message, app, ProgressType.SERVICE_STATE_PULLING + post_progress_message, + app, + ProgressType.SERVICE_STATE_PULLING, ), + description="pulling states", ) as root_progress: await logged_gather( *( - data_manager.pull( - user_id=settings.DY_SIDECAR_USER_ID, - project_id=str(settings.DY_SIDECAR_PROJECT_ID), - node_uuid=str(settings.DY_SIDECAR_NODE_ID), - file_or_folder=path, - io_log_redirect_cb=functools.partial(post_sidecar_log_message, app), - progress_bar=root_progress, + _restore_state_folder( + app, settings=settings, progress_bar=root_progress, state_path=path ) - for path in effective_paths + for path in mounted_volumes.disk_state_paths_iter() ), max_concurrency=CONCURRENCY_STATE_SAVE_RESTORE, reraise=True, # this should raise if there is an issue ) - await post_sidecar_log_message(app, "Finished state downloading") - progress.update(message="state restored", percent=0.99) + await post_sidecar_log_message( + app, "Finished state downloading", log_level=logging.INFO + ) + progress.update(message="state restored", percent=ProgressPercent(0.99)) + + return _get_satate_folders_size(state_paths) + + +async def _save_state_folder( + app: FastAPI, + *, + settings: ApplicationSettings, + progress_bar: ProgressBarData, + state_path: Path, + mounted_volumes: MountedVolumes, +) -> None: + await data_manager.push( + user_id=settings.DY_SIDECAR_USER_ID, + project_id=settings.DY_SIDECAR_PROJECT_ID, + node_uuid=settings.DY_SIDECAR_NODE_ID, + source_path=state_path, + r_clone_settings=settings.DY_SIDECAR_R_CLONE_SETTINGS, + exclude_patterns=mounted_volumes.state_exclude, + io_log_redirect_cb=functools.partial( + post_sidecar_log_message, app, log_level=logging.INFO + ), + progress_bar=progress_bar, + aws_s3_cli_settings=settings.DY_SIDECAR_AWS_S3_CLI_SETTINGS, + legacy_state=_get_legacy_state_with_dy_volumes_path(settings), + ) async def task_save_state( @@ -263,103 +453,149 @@ async def task_save_state( settings: ApplicationSettings, mounted_volumes: MountedVolumes, app: FastAPI, -) -> None: - progress.update(message="starting state save", percent=0.0) +) -> int: + """ + Saves the states of the service. + If a legacy archive is detected, it will be removed after + saving the new format. + """ + progress.update(message="starting state save", percent=ProgressPercent(0.0)) + state_paths = list(mounted_volumes.disk_state_paths_iter()) async with ProgressBarData( - steps=len([mounted_volumes.disk_state_paths()]), + num_steps=len(state_paths), progress_report_cb=functools.partial( - post_progress_message, app, ProgressType.SERVICE_STATE_PUSHING + post_progress_message, + app, + ProgressType.SERVICE_STATE_PUSHING, ), + description="pushing state", ) as root_progress: await logged_gather( *[ - data_manager.push( - user_id=settings.DY_SIDECAR_USER_ID, - project_id=str(settings.DY_SIDECAR_PROJECT_ID), - node_uuid=str(settings.DY_SIDECAR_NODE_ID), - file_or_folder=state_path, - r_clone_settings=settings.rclone_settings_for_nodeports, - archive_exclude_patterns=mounted_volumes.state_exclude, - io_log_redirect_cb=functools.partial(post_sidecar_log_message, app), + _save_state_folder( + app, + settings=settings, progress_bar=root_progress, + state_path=state_path, + mounted_volumes=mounted_volumes, ) - for state_path in mounted_volumes.disk_state_paths() + for state_path in state_paths ], max_concurrency=CONCURRENCY_STATE_SAVE_RESTORE, ) - await post_sidecar_log_message(app, "Finished state saving") - progress.update(message="finished state saving", percent=0.99) + await post_sidecar_log_message(app, "Finished state saving", log_level=logging.INFO) + progress.update(message="finished state saving", percent=ProgressPercent(0.99)) + + return _get_satate_folders_size(state_paths) async def task_ports_inputs_pull( progress: TaskProgress, - port_keys: Optional[list[str]], + port_keys: list[str] | None, mounted_volumes: MountedVolumes, app: FastAPI, + settings: ApplicationSettings, + *, + inputs_pulling_enabled: bool, ) -> int: - progress.update(message="starting inputs pulling", percent=0.0) + if not inputs_pulling_enabled: + _logger.info("Received request to pull inputs but was ignored") + return 0 + + progress.update(message="starting inputs pulling", percent=ProgressPercent(0.0)) port_keys = [] if port_keys is None else port_keys - await post_sidecar_log_message(app, f"Pulling inputs for {port_keys}") - progress.update(message="pulling inputs", percent=0.1) + await post_sidecar_log_message( + app, f"Pulling inputs for {port_keys}", log_level=logging.INFO + ) + progress.update(message="pulling inputs", percent=ProgressPercent(0.1)) async with ProgressBarData( - steps=1, + num_steps=1, progress_report_cb=functools.partial( - post_progress_message, app, ProgressType.SERVICE_INPUTS_PULLING + post_progress_message, + app, + ProgressType.SERVICE_INPUTS_PULLING, ), + description="pulling inputs", ) as root_progress: - transferred_bytes = await nodeports.download_target_ports( - nodeports.PortTypeName.INPUTS, - mounted_volumes.disk_inputs_path, - port_keys=port_keys, - io_log_redirect_cb=functools.partial(post_sidecar_log_message, app), - progress_bar=root_progress, - ) - await post_sidecar_log_message(app, "Finished pulling inputs") - progress.update(message="finished inputs pulling", percent=0.99) + with log_directory_changes( + mounted_volumes.disk_inputs_path, _logger, logging.INFO + ): + transferred_bytes = await nodeports.download_target_ports( + nodeports.PortTypeName.INPUTS, + mounted_volumes.disk_inputs_path, + port_keys=port_keys, + io_log_redirect_cb=functools.partial( + post_sidecar_log_message, app, log_level=logging.INFO + ), + progress_bar=root_progress, + port_notifier=PortNotifier( + app, + settings.DY_SIDECAR_USER_ID, + settings.DY_SIDECAR_PROJECT_ID, + settings.DY_SIDECAR_NODE_ID, + ), + ) + await post_sidecar_log_message( + app, "Finished pulling inputs", log_level=logging.INFO + ) + progress.update(message="finished inputs pulling", percent=ProgressPercent(0.99)) return int(transferred_bytes) async def task_ports_outputs_pull( progress: TaskProgress, - port_keys: Optional[list[str]], + port_keys: list[str] | None, mounted_volumes: MountedVolumes, app: FastAPI, ) -> int: - progress.update(message="starting outputs pulling", percent=0.0) + progress.update(message="starting outputs pulling", percent=ProgressPercent(0.0)) port_keys = [] if port_keys is None else port_keys - await post_sidecar_log_message(app, f"Pulling output for {port_keys}") + await post_sidecar_log_message( + app, f"Pulling output for {port_keys}", log_level=logging.INFO + ) async with ProgressBarData( - steps=1, + num_steps=1, progress_report_cb=functools.partial( - post_progress_message, app, ProgressType.SERVICE_OUTPUTS_PULLING + post_progress_message, + app, + ProgressType.SERVICE_OUTPUTS_PULLING, ), + description="pulling outputs", ) as root_progress: transferred_bytes = await nodeports.download_target_ports( nodeports.PortTypeName.OUTPUTS, mounted_volumes.disk_outputs_path, port_keys=port_keys, - io_log_redirect_cb=functools.partial(post_sidecar_log_message, app), + io_log_redirect_cb=functools.partial( + post_sidecar_log_message, app, log_level=logging.INFO + ), progress_bar=root_progress, + port_notifier=None, ) - await post_sidecar_log_message(app, "Finished pulling outputs") - progress.update(message="finished outputs pulling", percent=0.99) + await post_sidecar_log_message( + app, "Finished pulling outputs", log_level=logging.INFO + ) + progress.update(message="finished outputs pulling", percent=ProgressPercent(0.99)) return int(transferred_bytes) async def task_ports_outputs_push( progress: TaskProgress, outputs_manager: OutputsManager, app: FastAPI ) -> None: - progress.update(message="starting outputs pushing", percent=0.0) + progress.update(message="starting outputs pushing", percent=ProgressPercent(0.0)) await post_sidecar_log_message( app, f"waiting for outputs {outputs_manager.outputs_context.file_type_port_keys} to be pushed", + log_level=logging.INFO, ) await outputs_manager.wait_for_all_uploads_to_finish() - await post_sidecar_log_message(app, "finished outputs pushing") - progress.update(message="finished outputs pushing", percent=0.99) + await post_sidecar_log_message( + app, "finished outputs pushing", log_level=logging.INFO + ) + progress.update(message="finished outputs pushing", percent=ProgressPercent(0.99)) async def task_containers_restart( @@ -374,27 +610,30 @@ async def task_containers_restart( # or some other state, the service will get shutdown, to prevent this # blocking status while containers are being restarted. async with app.state.container_restart_lock: - progress.update(message="starting containers restart", percent=0.0) + progress.update( + message="starting containers restart", percent=ProgressPercent(0.0) + ) if shared_store.compose_spec is None: - raise RuntimeError("No spec for docker-compose command was found") + msg = "No spec for docker compose command was found" + raise RuntimeError(msg) for container_name in shared_store.container_names: await stop_log_fetching(app, container_name) - progress.update(message="stopped log fetching", percent=0.1) + progress.update(message="stopped log fetching", percent=ProgressPercent(0.1)) result = await docker_compose_restart(shared_store.compose_spec, settings) _raise_for_errors(result, "restart") - progress.update(message="containers restarted", percent=0.8) + progress.update(message="containers restarted", percent=ProgressPercent(0.8)) for container_name in shared_store.container_names: await start_log_fetching(app, container_name) - progress.update(message="started log fetching", percent=0.9) + progress.update(message="started log fetching", percent=ProgressPercent(0.9)) await post_sidecar_log_message( - app, "Service was restarted please reload the UI" + app, "Service was restarted please reload the UI", log_level=logging.INFO ) await post_event_reload_iframe(app) - progress.update(message="started log fetching", percent=0.99) + progress.update(message="started log fetching", percent=ProgressPercent(0.99)) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks_utils.py new file mode 100644 index 00000000000..21d9adaebbb --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/long_running_tasks_utils.py @@ -0,0 +1,67 @@ +import logging +import os +from datetime import timedelta +from typing import Final + +from models_library.callbacks_mapping import UserServiceCommand +from servicelib.logging_utils import log_context + +from ..core.errors import ( + ContainerExecCommandFailedError, + ContainerExecContainerNotFoundError, + ContainerExecTimeoutError, +) +from ..models.shared_store import SharedStore +from ..modules.mounted_fs import MountedVolumes +from .container_utils import run_command_in_container + +_logger = logging.getLogger(__name__) + +_TIMEOUT_PERMISSION_CHANGES: Final[timedelta] = timedelta(minutes=5) + + +async def run_before_shutdown_actions( + shared_store: SharedStore, before_shutdown: list[UserServiceCommand] +) -> None: + for user_service_command in before_shutdown: + container_name = user_service_command.service + with log_context( + _logger, logging.INFO, f"running before_shutdown {user_service_command}" + ): + try: + await run_command_in_container( + shared_store.original_to_container_names[container_name], + command=user_service_command.command, + timeout=user_service_command.timeout, + ) + + except ( + ContainerExecContainerNotFoundError, + ContainerExecCommandFailedError, + ContainerExecTimeoutError, + ): + _logger.warning( + "Could not run before_shutdown command %s in container %s", + user_service_command.command, + container_name, + exc_info=True, + ) + + +async def ensure_read_permissions_on_user_service_data( + mounted_volumes: MountedVolumes, +) -> None: + # Makes sure sidecar has access to all files in the user services. + # The user could have removed read permissions form a file, which will cause an error. + + # NOTE: command runs inside self container since the user service container might not always be running + self_container = os.environ["HOSTNAME"] + for path_to_store in ( # apply changes to otuputs and all state folders + *mounted_volumes.disk_state_paths_iter(), + mounted_volumes.disk_outputs_path, + ): + await run_command_in_container( + self_container, + command=f"chmod -R o+rX '{path_to_store}'", + timeout=_TIMEOUT_PERMISSION_CHANGES.total_seconds(), + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/mounted_fs.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/mounted_fs.py index 79b2d1b6578..78ddbf41199 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/mounted_fs.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/mounted_fs.py @@ -1,11 +1,11 @@ import os +from collections.abc import AsyncGenerator, Generator, Iterator from functools import cached_property from pathlib import Path -from typing import AsyncGenerator, Generator, Iterator from fastapi import FastAPI -from models_library.projects_nodes import NodeID -from models_library.services import RunID +from models_library.projects_nodes_io import NodeID +from models_library.services import ServiceRunID from servicelib.docker_constants import PREFIX_DYNAMIC_SIDECAR_VOLUMES from ..core.docker_utils import get_volume_by_label @@ -36,19 +36,21 @@ class MountedVolumes: def __init__( self, - run_id: RunID, + service_run_id: ServiceRunID, node_id: NodeID, inputs_path: Path, outputs_path: Path, + user_preferences_path: Path | None, state_paths: list[Path], state_exclude: set[str], compose_namespace: str, dy_volumes: Path, ) -> None: - self.run_id: RunID = run_id + self.service_run_id: ServiceRunID = service_run_id self.node_id: NodeID = node_id self.inputs_path: Path = inputs_path self.outputs_path: Path = outputs_path + self.user_preferences_path = user_preferences_path self.state_paths: list[Path] = state_paths self.state_exclude: set[str] = state_exclude self.compose_namespace = compose_namespace @@ -60,21 +62,30 @@ def __init__( def volume_name_inputs(self) -> str: """Same name as the namespace, to easily track components""" return ( - f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}" + f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.service_run_id}_{self.node_id}" f"_{_name_from_full_path(self.inputs_path)[::-1]}" ) @cached_property def volume_name_outputs(self) -> str: return ( - f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}" + f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.service_run_id}_{self.node_id}" f"_{_name_from_full_path(self.outputs_path)[::-1]}" ) + @cached_property + def volume_user_preferences(self) -> str | None: + if self.user_preferences_path is None: + return None + return ( + f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.service_run_id}_{self.node_id}" + f"_{_name_from_full_path(self.user_preferences_path)[::-1]}" + ) + def volume_name_state_paths(self) -> Generator[str, None, None]: for state_path in self.state_paths: yield ( - f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.run_id}_{self.node_id}" + f"{PREFIX_DYNAMIC_SIDECAR_VOLUMES}_{self.service_run_id}_{self.node_id}" f"_{_name_from_full_path(state_path)[::-1]}" ) @@ -86,51 +97,64 @@ def disk_inputs_path(self) -> Path: def disk_outputs_path(self) -> Path: return _ensure_path(self._dy_volumes / self.outputs_path.relative_to("/")) - def disk_state_paths(self) -> Iterator[Path]: + def disk_state_paths_iter(self) -> Iterator[Path]: for state_path in self.state_paths: yield _ensure_path(self._dy_volumes / state_path.relative_to("/")) - def all_disk_paths(self) -> Iterator[Path]: + def all_disk_paths_iter(self) -> Iterator[Path]: # PC: keeps iterator to follow same style as disk_state_paths but IMO it is overreaching yield self.disk_inputs_path yield self.disk_outputs_path - yield from self.disk_state_paths() + yield from self.disk_state_paths_iter() def _ensure_directories(self) -> None: """ - Creates the directories on its file system, - these will be mounted elsewere. + Creates directories on its file system, these will be mounted by the user services. """ _ensure_path(self._dy_volumes) - self.disk_inputs_path # pylint:disable= pointless-statement - self.disk_outputs_path # pylint:disable= pointless-statement - set(self.disk_state_paths()) + for path in self.all_disk_paths_iter(): + _ensure_path(path) @staticmethod - async def _get_bind_path_from_label(label: str, run_id: RunID) -> Path: - volume_details = await get_volume_by_label(label=label, run_id=run_id) + async def _get_bind_path_from_label( + label: str, service_run_id: ServiceRunID + ) -> Path: + volume_details = await get_volume_by_label( + label=label, service_run_id=service_run_id + ) return Path(volume_details["Mountpoint"]) - async def get_inputs_docker_volume(self, run_id: RunID) -> str: + async def get_inputs_docker_volume(self, service_run_id: ServiceRunID) -> str: bind_path: Path = await self._get_bind_path_from_label( - self.volume_name_inputs, run_id + self.volume_name_inputs, service_run_id ) return f"{bind_path}:{self.inputs_path}" - async def get_outputs_docker_volume(self, run_id: RunID) -> str: + async def get_outputs_docker_volume(self, service_run_id: ServiceRunID) -> str: bind_path: Path = await self._get_bind_path_from_label( - self.volume_name_outputs, run_id + self.volume_name_outputs, service_run_id ) return f"{bind_path}:{self.outputs_path}" + async def get_user_preferences_path_volume( + self, service_run_id: ServiceRunID + ) -> str | None: + if self.volume_user_preferences is None: + return None + + bind_path: Path = await self._get_bind_path_from_label( + self.volume_user_preferences, service_run_id + ) + return f"{bind_path}:{self.user_preferences_path}" + async def iter_state_paths_to_docker_volumes( - self, run_id: RunID + self, service_run_id: ServiceRunID ) -> AsyncGenerator[str, None]: for volume_state_path, state_path in zip( - self.volume_name_state_paths(), self.state_paths + self.volume_name_state_paths(), self.state_paths, strict=True ): bind_path: Path = await self._get_bind_path_from_label( - volume_state_path, run_id + volume_state_path, service_run_id ) yield f"{bind_path}:{state_path}" @@ -139,10 +163,11 @@ def setup_mounted_fs(app: FastAPI) -> MountedVolumes: settings: ApplicationSettings = app.state.settings app.state.mounted_volumes = MountedVolumes( - run_id=settings.DY_SIDECAR_RUN_ID, + service_run_id=settings.DY_SIDECAR_RUN_ID, node_id=settings.DY_SIDECAR_NODE_ID, inputs_path=settings.DY_SIDECAR_PATH_INPUTS, outputs_path=settings.DY_SIDECAR_PATH_OUTPUTS, + user_preferences_path=settings.DY_SIDECAR_USER_PREFERENCES_PATH, state_paths=settings.DY_SIDECAR_STATE_PATHS, state_exclude=settings.DY_SIDECAR_STATE_EXCLUDE, compose_namespace=settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE, @@ -150,6 +175,3 @@ def setup_mounted_fs(app: FastAPI) -> MountedVolumes: ) return app.state.mounted_volumes - - -__all__: tuple[str, ...] = ("MountedVolumes",) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/nodeports.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/nodeports.py index 1c2c006ba31..db074ac7071 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/nodeports.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/nodeports.py @@ -1,37 +1,40 @@ import json import logging import os -import shutil import sys import time +from asyncio import CancelledError from collections import deque +from collections.abc import Coroutine from contextlib import AsyncExitStack from enum import Enum from pathlib import Path -from typing import Coroutine, Optional, cast +from typing import cast -import aiofiles.os import magic +from aiofiles.os import remove from aiofiles.tempfile import TemporaryDirectory as AioTemporaryDirectory +from common_library.json_serialization import json_loads from models_library.projects import ProjectIDStr from models_library.projects_nodes_io import NodeIDStr -from pydantic import ByteSize +from models_library.services_types import ServicePortKey +from pydantic import ByteSize, TypeAdapter from servicelib.archiving_utils import PrunableFolder, archive_dir, unarchive_dir from servicelib.async_utils import run_sequentially_in_context -from servicelib.file_utils import remove_directory +from servicelib.file_utils import remove_directory, shutil_move from servicelib.logging_utils import log_context from servicelib.progress_bar import ProgressBarData -from servicelib.utils import logged_gather +from servicelib.utils import limited_gather from simcore_sdk import node_ports_v2 from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB -from simcore_sdk.node_ports_v2 import Nodeports, Port +from simcore_sdk.node_ports_v2 import Port from simcore_sdk.node_ports_v2.links import ItemConcreteValue +from simcore_sdk.node_ports_v2.nodeports_v2 import Nodeports, OutputsCallbacks from simcore_sdk.node_ports_v2.port import SetKWargs from simcore_sdk.node_ports_v2.port_utils import is_file_type -from simcore_service_dynamic_sidecar.core.settings import ( - ApplicationSettings, - get_settings, -) + +from ..core.settings import ApplicationSettings, get_settings +from ..modules.notifications import PortNotifier class PortTypeName(str, Enum): @@ -42,12 +45,12 @@ class PortTypeName(str, Enum): _FILE_TYPE_PREFIX = "data:" _KEY_VALUE_FILE_NAME = "key_values.json" -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) # OUTPUTS section -def _get_size_of_value(value: Optional[ItemConcreteValue]) -> int: +def _get_size_of_value(value: tuple[ItemConcreteValue | None, SetKWargs | None]) -> int: if value is None: return 0 if isinstance(value, Path): @@ -68,52 +71,75 @@ def _get_size_of_value(value: Optional[ItemConcreteValue]) -> int: ) -# NOTE: outputs_manager guarantees that no parallel calls -# to this function occur -async def upload_outputs( +class OutputCallbacksWrapper(OutputsCallbacks): + def __init__(self, port_notifier: PortNotifier) -> None: + self.port_notifier = port_notifier + + async def aborted(self, key: ServicePortKey) -> None: + await self.port_notifier.send_output_port_upload_was_aborted(key) + + async def finished_succesfully(self, key: ServicePortKey) -> None: + await self.port_notifier.send_output_port_upload_finished_successfully(key) + + async def finished_with_error(self, key: ServicePortKey) -> None: + await self.port_notifier.send_output_port_upload_finished_with_error(key) + + +# NOTE: outputs_manager guarantees that no parallel calls to this function occur +async def upload_outputs( # pylint:disable=too-many-statements # noqa: PLR0915, C901 outputs_path: Path, port_keys: list[str], - io_log_redirect_cb: Optional[LogRedirectCB], + io_log_redirect_cb: LogRedirectCB | None, progress_bar: ProgressBarData, + port_notifier: PortNotifier, ) -> None: # pylint: disable=too-many-branches - logger.debug("uploading data to simcore...") + _logger.debug("uploading data to simcore...") start_time = time.perf_counter() settings: ApplicationSettings = get_settings() PORTS: Nodeports = await node_ports_v2.ports( user_id=settings.DY_SIDECAR_USER_ID, project_id=ProjectIDStr(settings.DY_SIDECAR_PROJECT_ID), - node_uuid=NodeIDStr(settings.DY_SIDECAR_NODE_ID), - r_clone_settings=settings.rclone_settings_for_nodeports, + node_uuid=TypeAdapter(NodeIDStr).validate_python( + f"{settings.DY_SIDECAR_NODE_ID}" + ), + r_clone_settings=None, io_log_redirect_cb=io_log_redirect_cb, + aws_s3_cli_settings=None, ) # let's gather the tasks ports_values: dict[ - str, tuple[Optional[ItemConcreteValue], Optional[SetKWargs]] + ServicePortKey, tuple[ItemConcreteValue | None, SetKWargs | None] ] = {} archiving_tasks: deque[Coroutine[None, None, None]] = deque() - ports_to_set = [ + ports_to_set: list[Port] = [ port_value for port_value in (await PORTS.outputs).values() if (not port_keys) or (port_value.key in port_keys) ] + await limited_gather( + *(port_notifier.send_output_port_upload_sarted(p.key) for p in ports_to_set), + limit=4, + ) + async with AsyncExitStack() as stack: sub_progress = await stack.enter_async_context( progress_bar.sub_progress( steps=sum( 2 if is_file_type(port.property_type) else 1 for port in ports_to_set - ) + ), + description="uploading outputs", ) ) for port in ports_to_set: if is_file_type(port.property_type): src_folder = outputs_path / port.key files_and_folders_list = list(src_folder.rglob("*")) - logger.debug("Discovered files to upload %s", files_and_folders_list) + _logger.debug("Discovered files to upload %s", files_and_folders_list) if not files_and_folders_list: ports_values[port.key] = (None, None) @@ -137,19 +163,39 @@ async def upload_outputs( # generic case let's create an archive # only the filtered out files will be zipped tmp_folder = Path( - await stack.enter_async_context(AioTemporaryDirectory()) + await stack.enter_async_context(AioTemporaryDirectory()) # type: ignore[arg-type] ) tmp_file = tmp_folder / f"{src_folder.stem}.zip" # when having multiple directories it is important to # run the compression in parallel to guarantee better performance + async def _archive_dir_notified( + dir_to_compress: Path, destination: Path, port_key: ServicePortKey + ) -> None: + # Errors and cancellation can also be triggered from archving as well + try: + await archive_dir( + dir_to_compress=dir_to_compress, + destination=destination, + compress=False, + progress_bar=sub_progress, + ) + except CancelledError: + await port_notifier.send_output_port_upload_was_aborted( + port_key + ) + raise + except Exception: + await port_notifier.send_output_port_upload_finished_with_error( + port_key + ) + raise + archiving_tasks.append( - archive_dir( + _archive_dir_notified( dir_to_compress=src_folder, destination=tmp_file, - compress=False, - store_relative_path=True, - progress_bar=sub_progress, + port_key=port.key, ) ) ports_values[port.key] = ( @@ -163,23 +209,27 @@ async def upload_outputs( else: data_file = outputs_path / _KEY_VALUE_FILE_NAME if data_file.exists(): - data = json.loads(data_file.read_text()) + data = json_loads(data_file.read_text()) if port.key in data and data[port.key] is not None: ports_values[port.key] = (data[port.key], None) else: - logger.debug("Port %s not found in %s", port.key, data) + _logger.debug("Port %s not found in %s", port.key, data) else: - logger.debug("No file %s to fetch port values from", data_file) + _logger.debug("No file %s to fetch port values from", data_file) if archiving_tasks: - await logged_gather(*archiving_tasks) + await limited_gather(*archiving_tasks, limit=4) - await PORTS.set_multiple(ports_values, progress_bar=sub_progress) + await PORTS.set_multiple( + ports_values, + progress_bar=sub_progress, + outputs_callbacks=OutputCallbacksWrapper(port_notifier), + ) elapsed_time = time.perf_counter() - start_time total_bytes = sum(_get_size_of_value(x) for x in ports_values.values()) - logger.info("Uploaded %s bytes in %s seconds", total_bytes, elapsed_time) - logger.debug(_CONTROL_TESTMARK_DY_SIDECAR_NODEPORT_UPLOADED_MESSAGE) + _logger.info("Uploaded %s bytes in %s seconds", total_bytes, elapsed_time) + _logger.debug(_CONTROL_TESTMARK_DY_SIDECAR_NODEPORT_UPLOADED_MESSAGE) # INPUTS section @@ -190,62 +240,72 @@ def _is_zip_file(file_path: Path) -> bool: return f"{mime_type}" == "application/zip" -_shutil_move = aiofiles.os.wrap(shutil.move) # type: ignore - - async def _get_data_from_port( port: Port, *, target_dir: Path, progress_bar: ProgressBarData -) -> tuple[Port, Optional[ItemConcreteValue], ByteSize]: +) -> tuple[Port, ItemConcreteValue | None, ByteSize]: async with progress_bar.sub_progress( - steps=2 if is_file_type(port.property_type) else 1 + steps=2 if is_file_type(port.property_type) else 1, + description="getting data", ) as sub_progress: - with log_context(logger, logging.DEBUG, f"getting {port.key=}"): + with log_context(_logger, logging.DEBUG, f"getting {port.key=}"): port_data = await port.get(sub_progress) if is_file_type(port.property_type): # if there are files, move them to the final destination - downloaded_file: Optional[Path] = cast(Optional[Path], port_data) + downloaded_file: Path | None = cast(Path | None, port_data) final_path: Path = target_dir / port.key if not downloaded_file or not downloaded_file.exists(): # the link may be empty # remove files all files from disk when disconnecting port - logger.debug("removing contents of dir %s", final_path) - await remove_directory( - final_path, only_children=True, ignore_errors=True - ) + with log_context( + _logger, logging.DEBUG, f"removing contents of dir '{final_path}'" + ): + await remove_directory( + final_path, only_children=True, ignore_errors=True + ) return port, None, ByteSize(0) transferred_bytes = downloaded_file.stat().st_size # in case of valid file, it is either uncompressed and/or moved to the final directory - with log_context(logger, logging.DEBUG, "creating directory"): + with log_context(_logger, logging.DEBUG, "creating directory"): final_path.mkdir(exist_ok=True, parents=True) port_data = f"{final_path}" - dest_folder = PrunableFolder(final_path) - if _is_zip_file(downloaded_file): - # unzip updated data to dest_path - logger.debug("unzipping %s", downloaded_file) - unarchived: set[Path] = await unarchive_dir( - archive_to_extract=downloaded_file, - destination_folder=final_path, - progress_bar=sub_progress, - ) + archive_files: set[Path] - dest_folder.prune(exclude=unarchived) + if _is_zip_file(downloaded_file): + with log_context( + _logger, + logging.DEBUG, + f"unzipping '{downloaded_file}' to {final_path}", + ): + archive_files = await unarchive_dir( + archive_to_extract=downloaded_file, + destination_folder=final_path, + progress_bar=sub_progress, + ) - logger.debug("all unzipped in %s", final_path) + with log_context( + _logger, logging.DEBUG, f"archive removal '{downloaded_file}'" + ): + await remove(downloaded_file) else: - logger.debug("moving %s", downloaded_file) - final_path = final_path / Path(downloaded_file).name - await _shutil_move(str(downloaded_file), final_path) + # move archive to directory as is + final_path = final_path / downloaded_file.name - # NOTE: after the download the current value of the port - # makes sure previously downloaded files are removed - dest_folder.prune(exclude={final_path}) + with log_context( + _logger, logging.DEBUG, f"moving {downloaded_file} to {final_path}" + ): + final_path.parent.mkdir(exist_ok=True, parents=True) + await shutil_move(downloaded_file, final_path) + + archive_files = {final_path} - logger.debug("all moved to %s", final_path) + # NOTE: after the port content changes, make sure old files + # which are no longer part of the port, are removed + PrunableFolder(final_path).prune(exclude=archive_files) else: transferred_bytes = sys.getsizeof(port_data) @@ -259,34 +319,64 @@ async def download_target_ports( port_keys: list[str], io_log_redirect_cb: LogRedirectCB, progress_bar: ProgressBarData, + port_notifier: PortNotifier | None, ) -> ByteSize: - logger.debug("retrieving data from simcore...") + _logger.debug("retrieving data from simcore...") start_time = time.perf_counter() settings: ApplicationSettings = get_settings() PORTS: Nodeports = await node_ports_v2.ports( user_id=settings.DY_SIDECAR_USER_ID, project_id=ProjectIDStr(settings.DY_SIDECAR_PROJECT_ID), - node_uuid=NodeIDStr(settings.DY_SIDECAR_NODE_ID), - r_clone_settings=settings.rclone_settings_for_nodeports, + node_uuid=TypeAdapter(NodeIDStr).validate_python( + f"{settings.DY_SIDECAR_NODE_ID}" + ), + r_clone_settings=None, io_log_redirect_cb=io_log_redirect_cb, + aws_s3_cli_settings=None, ) # let's gather all the data - ports_to_get = [ + ports_to_get: list[Port] = [ port_value for port_value in (await getattr(PORTS, port_type_name.value)).values() if (not port_keys) or (port_value.key in port_keys) ] - async with progress_bar.sub_progress(steps=len(ports_to_get)) as sub_progress: - results = await logged_gather( + + async def _get_date_from_port_notified( + port: Port, progress_bar: ProgressBarData + ) -> tuple[Port, ItemConcreteValue | None, ByteSize]: + assert port_notifier is not None + await port_notifier.send_input_port_download_started(port.key) + try: + result = await _get_data_from_port( + port, target_dir=target_dir, progress_bar=progress_bar + ) + await port_notifier.send_input_port_download_finished_succesfully(port.key) + return result + + except CancelledError: + await port_notifier.send_input_port_download_was_aborted(port.key) + raise + except Exception: + await port_notifier.send_input_port_download_finished_with_error(port.key) + raise + + async with progress_bar.sub_progress( + steps=len(ports_to_get), description="downloading" + ) as sub_progress: + results = await limited_gather( *[ - _get_data_from_port( - port, target_dir=target_dir, progress_bar=sub_progress + ( + _get_data_from_port( + port, target_dir=target_dir, progress_bar=sub_progress + ) + if port_type_name == PortTypeName.OUTPUTS + else _get_date_from_port_notified(port, progress_bar=sub_progress) ) for port in ports_to_get ], - max_concurrency=2, + limit=2, ) # parse results data = { @@ -301,13 +391,13 @@ async def download_target_ports( if data: data_file = target_dir / _KEY_VALUE_FILE_NAME if data_file.exists(): - current_data = json.loads(data_file.read_text()) + current_data = json_loads(data_file.read_text()) # merge data data = {**current_data, **data} data_file.write_text(json.dumps(data)) elapsed_time = time.perf_counter() - start_time - logger.info( + _logger.info( "Downloaded %s in %s seconds", total_transfered_bytes.human_readable(decimal=True), elapsed_time, diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/__init__.py new file mode 100644 index 00000000000..18254b1d23c --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/__init__.py @@ -0,0 +1,9 @@ +from ._notifications_ports import PortNotifier +from ._notifications_system_monitor import publish_disk_usage +from ._setup import setup_notifications + +__all__: tuple[str, ...] = ( + "PortNotifier", + "publish_disk_usage", + "setup_notifications", +) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_ports.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_ports.py new file mode 100644 index 00000000000..6a8c45e35da --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_ports.py @@ -0,0 +1,78 @@ +from dataclasses import dataclass + +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.ports import InputStatus, OutputStatus +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID + +from ._notifier import Notifier + + +@dataclass +class PortNotifier: + app: FastAPI + user_id: UserID + project_id: ProjectID + node_id: NodeID + + async def _send_output_port_status( + self, port_key: ServicePortKey, status: OutputStatus + ) -> None: + notifier: Notifier = Notifier.get_from_app_state(self.app) + await notifier.notify_output_port_status( + self.user_id, self.project_id, self.node_id, port_key, status + ) + + async def _send_input_port_status( + self, port_key: ServicePortKey, status: InputStatus + ) -> None: + notifier: Notifier = Notifier.get_from_app_state(self.app) + await notifier.notify_input_port_status( + self.user_id, self.project_id, self.node_id, port_key, status + ) + + async def send_output_port_upload_sarted(self, port_key: ServicePortKey) -> None: + await self._send_output_port_status(port_key, OutputStatus.UPLOAD_STARTED) + + async def send_output_port_upload_was_aborted( + self, port_key: ServicePortKey + ) -> None: + await self._send_output_port_status(port_key, OutputStatus.UPLOAD_WAS_ABORTED) + + async def send_output_port_upload_finished_successfully( + self, port_key: ServicePortKey + ) -> None: + await self._send_output_port_status( + port_key, OutputStatus.UPLOAD_FINISHED_SUCCESSFULLY + ) + + async def send_output_port_upload_finished_with_error( + self, port_key: ServicePortKey + ) -> None: + await self._send_output_port_status( + port_key, OutputStatus.UPLOAD_FINISHED_WITH_ERROR + ) + + async def send_input_port_download_started(self, port_key: ServicePortKey) -> None: + await self._send_input_port_status(port_key, InputStatus.DOWNLOAD_STARTED) + + async def send_input_port_download_was_aborted( + self, port_key: ServicePortKey + ) -> None: + await self._send_input_port_status(port_key, InputStatus.DOWNLOAD_WAS_ABORTED) + + async def send_input_port_download_finished_succesfully( + self, port_key: ServicePortKey + ) -> None: + await self._send_input_port_status( + port_key, InputStatus.DOWNLOAD_FINISHED_SUCCESSFULLY + ) + + async def send_input_port_download_finished_with_error( + self, port_key: ServicePortKey + ) -> None: + await self._send_input_port_status( + port_key, InputStatus.DOWNLOAD_FINISHED_WITH_ERROR + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_system_monitor.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_system_monitor.py new file mode 100644 index 00000000000..664b0e8285d --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifications_system_monitor.py @@ -0,0 +1,22 @@ +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import ( + DiskUsage, + MountPathCategory, +) +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID + +from ._notifier import Notifier + + +async def publish_disk_usage( + app: FastAPI, + *, + user_id: UserID, + node_id: NodeID, + usage: dict[MountPathCategory, DiskUsage] +) -> None: + notifier: Notifier = Notifier.get_from_app_state(app) + await notifier.notify_service_disk_usage( + user_id=user_id, node_id=node_id, usage=usage + ) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifier.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifier.py new file mode 100644 index 00000000000..04a82d44a04 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_notifier.py @@ -0,0 +1,106 @@ +import contextlib + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_dynamic_sidecar.ports import ( + InputPortSatus, + InputStatus, + OutputPortStatus, + OutputStatus, +) +from models_library.api_schemas_dynamic_sidecar.socketio import ( + SOCKET_IO_SERVICE_DISK_USAGE_EVENT, + SOCKET_IO_STATE_INPUT_PORTS_EVENT, + SOCKET_IO_STATE_OUTPUT_PORTS_EVENT, +) +from models_library.api_schemas_dynamic_sidecar.telemetry import ( + DiskUsage, + MountPathCategory, + ServiceDiskUsage, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from servicelib.fastapi.app_state import SingletonInAppStateMixin + + +class Notifier(SingletonInAppStateMixin): + app_state_name: str = "notifier" + + def __init__(self, sio_manager: socketio.AsyncAioPikaManager): + self._sio_manager = sio_manager + + async def notify_service_disk_usage( + self, + user_id: UserID, + node_id: NodeID, + usage: dict[MountPathCategory, DiskUsage], + ) -> None: + await self._sio_manager.emit( + SOCKET_IO_SERVICE_DISK_USAGE_EVENT, + data=jsonable_encoder(ServiceDiskUsage(node_id=node_id, usage=usage)), + room=SocketIORoomStr.from_user_id(user_id), + ) + + async def notify_output_port_status( + self, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + port_key: ServicePortKey, + output_status: OutputStatus, + ) -> None: + await self._sio_manager.emit( + SOCKET_IO_STATE_OUTPUT_PORTS_EVENT, + data=jsonable_encoder( + OutputPortStatus( + project_id=project_id, + node_id=node_id, + port_key=port_key, + status=output_status, + ) + ), + room=SocketIORoomStr.from_user_id(user_id), + ) + + async def notify_input_port_status( + self, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + port_key: ServicePortKey, + input_status: InputStatus, + ) -> None: + await self._sio_manager.emit( + SOCKET_IO_STATE_INPUT_PORTS_EVENT, + data=jsonable_encoder( + InputPortSatus( + project_id=project_id, + node_id=node_id, + port_key=port_key, + status=input_status, + ) + ), + room=SocketIORoomStr.from_user_id(user_id), + ) + + +def setup_notifier(app: FastAPI): + async def _on_startup() -> None: + assert app.state.external_socketio # nosec + + notifier = Notifier( + sio_manager=app.state.external_socketio, + ) + notifier.set_to_app_state(app) + assert Notifier.get_from_app_state(app) == notifier # nosec + + async def _on_shutdown() -> None: + with contextlib.suppress(AttributeError): + Notifier.pop_from_app_state(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_setup.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_setup.py new file mode 100644 index 00000000000..6de0fae307f --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_setup.py @@ -0,0 +1,15 @@ +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import log_context + +from ..notifications._notifier import setup_notifier +from ..notifications._socketio import setup_socketio + +_logger = logging.getLogger(__name__) + + +def setup_notifications(app: FastAPI) -> None: + with log_context(_logger, logging.INFO, "setup notifications"): + setup_socketio(app) + setup_notifier(app) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_socketio.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_socketio.py new file mode 100644 index 00000000000..bdbe9808a8a --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/notifications/_socketio.py @@ -0,0 +1,32 @@ +import logging + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from servicelib.socketio_utils import cleanup_socketio_async_pubsub_manager + +from ...core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def setup_socketio(app: FastAPI): + settings: ApplicationSettings = app.state.settings + + async def _on_startup() -> None: + assert app.state.rabbitmq_client # nosec + + # Connect to the as an external process in write-only mode + # SEE https://python-socketio.readthedocs.io/en/stable/server.html#emitting-from-external-processes + assert settings.RABBIT_SETTINGS # nosec + app.state.external_socketio = socketio.AsyncAioPikaManager( + url=settings.RABBIT_SETTINGS.dsn, logger=_logger, write_only=True + ) + + async def _on_shutdown() -> None: + if external_socketio := getattr(app.state, "external_socketio"): # noqa: B009 + await cleanup_socketio_async_pubsub_manager( + server_manager=external_socketio + ) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/__init__.py index bc82628c30d..0311ff0462e 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/__init__.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/__init__.py @@ -3,9 +3,9 @@ from ._context import OutputsContext, setup_outputs_context from ._manager import OutputsManager, setup_outputs_manager from ._watcher import ( - disable_outputs_watcher, - enable_outputs_watcher, - outputs_watcher_disabled, + disable_event_propagation, + enable_event_propagation, + event_propagation_disabled, setup_outputs_watcher, ) @@ -17,9 +17,9 @@ def setup_outputs(app: FastAPI) -> None: __all__: tuple[str, ...] = ( - "disable_outputs_watcher", - "enable_outputs_watcher", - "outputs_watcher_disabled", + "disable_event_propagation", + "enable_event_propagation", + "event_propagation_disabled", "OutputsContext", "OutputsManager", "setup_outputs", diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_context.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_context.py index ef3a852ea2c..e05bb899cba 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_context.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_context.py @@ -1,8 +1,8 @@ from dataclasses import dataclass, field from pathlib import Path -import aioprocessing -from aioprocessing.queues import AioQueue +import aioprocessing # type: ignore[import-untyped] +from aioprocessing.queues import AioQueue # type: ignore[import-untyped] from fastapi import FastAPI from ..mounted_fs import MountedVolumes @@ -16,7 +16,7 @@ class OutputsContext: port_key_events_queue: AioQueue = field(default_factory=aioprocessing.AioQueue) # OutputsContext (generates) -> _EventHandlerProcess(receives) - file_type_port_keys_updates_queue: AioQueue = field( + file_system_event_handler_queue: AioQueue = field( default_factory=aioprocessing.AioQueue ) @@ -28,8 +28,19 @@ class OutputsContext: async def set_file_type_port_keys(self, file_type_port_keys: list[str]) -> None: self._file_type_port_keys = file_type_port_keys - await self.file_type_port_keys_updates_queue.coro_put( # pylint:disable=no-member - self._file_type_port_keys + await self.file_system_event_handler_queue.coro_put( # pylint:disable=no-member + { + "method_name": "handle_set_outputs_port_keys", + "kwargs": {"outputs_port_keys": self._file_type_port_keys}, + } + ) + + async def toggle_event_propagation(self, *, is_enabled: bool) -> None: + await self.file_system_event_handler_queue.coro_put( # pylint:disable=no-member + { + "method_name": "handle_toggle_event_propagation", + "kwargs": {"is_enabled": is_enabled}, + } ) @property diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_directory_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_directory_utils.py index 7cc13922244..21f07bf1523 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_directory_utils.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_directory_utils.py @@ -1,7 +1,7 @@ import os from pathlib import Path -from pydantic import ByteSize, parse_obj_as +from pydantic import ByteSize, TypeAdapter def get_directory_total_size(path: Path) -> ByteSize: @@ -10,7 +10,7 @@ def get_directory_total_size(path: Path) -> ByteSize: # until we do not hit 1 million it can be ignored # NOTE: file size has no impact on performance if not path.exists(): - return parse_obj_as(ByteSize, 0) + return TypeAdapter(ByteSize).validate_python(0) total = 0 for entry in os.scandir(path): @@ -18,4 +18,4 @@ def get_directory_total_size(path: Path) -> ByteSize: total += entry.stat().st_size elif entry.is_dir(): total += get_directory_total_size(Path(entry.path)) - return parse_obj_as(ByteSize, total) + return TypeAdapter(ByteSize).validate_python(total) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_filter.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_filter.py index c0d3175e407..919d777921e 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_filter.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_filter.py @@ -6,7 +6,7 @@ from concurrent.futures import ThreadPoolExecutor from contextlib import suppress from dataclasses import dataclass -from typing import Final, Optional +from typing import Final, TypeAlias from pydantic import ( ByteSize, @@ -14,7 +14,7 @@ NonNegativeInt, PositiveFloat, PositiveInt, - parse_obj_as, + TypeAdapter, ) from servicelib.logging_utils import log_context from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT @@ -22,18 +22,18 @@ from ._directory_utils import get_directory_total_size from ._manager import OutputsManager -PortEvent = Optional[str] +PortEvent: TypeAlias = str | None -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) -_1_MB: Final[PositiveInt] = parse_obj_as(ByteSize, "1mib") -_500_MB: Final[PositiveInt] = parse_obj_as(ByteSize, "500mib") +_1_MB: Final[PositiveInt] = TypeAdapter(ByteSize).validate_python("1mib") +_500_MB: Final[PositiveInt] = TypeAdapter(ByteSize).validate_python("500mib") class BaseDelayPolicy(ABC): def get_min_interval(self) -> NonNegativeFloat: # pylint:disable=no-self-use - return DEFAULT_OBSERVER_TIMEOUT # type: ignore + return DEFAULT_OBSERVER_TIMEOUT @abstractmethod def get_wait_interval(self, dir_size: NonNegativeInt) -> NonNegativeFloat: @@ -63,7 +63,7 @@ def get_wait_interval(self, dir_size: NonNegativeInt) -> NonNegativeFloat: @dataclass class TrackedEvent: last_detection: NonNegativeFloat - wait_interval: Optional[NonNegativeFloat] = None + wait_interval: NonNegativeFloat | None = None class EventFilter: @@ -76,13 +76,13 @@ def __init__( self.delay_policy = delay_policy self._incoming_events_queue: Queue[PortEvent] = Queue() - self._task_incoming_event_ingestion: Optional[Task] = None + self._task_incoming_event_ingestion: Task | None = None - self._task_event_emitter: Optional[Task] = None + self._task_event_emitter: Task | None = None self._keep_event_emitter_running: bool = True - self._upload_events_queue: Queue[Optional[str]] = Queue() - self._task_upload_events: Optional[Task] = None + self._upload_events_queue: Queue[str | None] = Queue() + self._task_upload_events: Task | None = None self._port_key_tracked_event: dict[str, TrackedEvent] = {} @@ -105,7 +105,6 @@ async def _worker_incoming_event_ingestion(self) -> None: def _worker_blocking_event_emitter(self) -> None: # NOSONAR repeat_interval = self.delay_policy.get_min_interval() * 0.49 while self._keep_event_emitter_running: - # can be iterated while modified for port_key in list(self._port_key_tracked_event.keys()): tracked_event = self._port_key_tracked_event.get(port_key, None) @@ -161,18 +160,18 @@ async def _worker_event_emitter(self) -> None: async def _worker_upload_events(self) -> None: """enqueues uploads for port `port_key`""" while True: - port_key: Optional[str] = await self._upload_events_queue.get() + port_key: str | None = await self._upload_events_queue.get() if port_key is None: break - logger.debug("Request upload for port_key %s", port_key) + _logger.debug("Request upload for port_key %s", port_key) await self.outputs_manager.port_key_content_changed(port_key) async def enqueue(self, port_key: str) -> None: await self._incoming_events_queue.put(port_key) async def start(self) -> None: - with log_context(logger, logging.INFO, f"{EventFilter.__name__} start"): + with log_context(_logger, logging.INFO, f"{EventFilter.__name__} start"): self._task_incoming_event_ingestion = create_task( self._worker_incoming_event_ingestion(), name=self._worker_incoming_event_ingestion.__name__, @@ -188,7 +187,7 @@ async def start(self) -> None: ) async def shutdown(self) -> None: - async def _cancel_task(task: Optional[Task]) -> None: + async def _cancel_task(task: Task | None) -> None: if task is None: return @@ -196,7 +195,7 @@ async def _cancel_task(task: Optional[Task]) -> None: with suppress(CancelledError): await task - with log_context(logger, logging.INFO, f"{EventFilter.__name__} shutdown"): + with log_context(_logger, logging.INFO, f"{EventFilter.__name__} shutdown"): await self._incoming_events_queue.put(None) await _cancel_task(self._task_incoming_event_ingestion) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_handler.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_handler.py index 2676532779a..a61ea375286 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_handler.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_event_handler.py @@ -1,5 +1,3 @@ -# pylint:disable=no-member - import logging from asyncio import CancelledError, Task, create_task, get_event_loop from asyncio import sleep as async_sleep @@ -9,11 +7,11 @@ from queue import Empty from threading import Thread from time import sleep as blocking_sleep -from typing import Final, Optional +from typing import Any, Final -import aioprocessing -from aioprocessing.process import AioProcess -from aioprocessing.queues import AioQueue +import aioprocessing # type: ignore [import-untyped] +from aioprocessing.process import AioProcess # type: ignore [import-untyped] +from aioprocessing.queues import AioQueue # type: ignore [import-untyped] from pydantic import PositiveFloat from servicelib.logging_utils import log_context from watchdog.events import FileSystemEvent @@ -24,7 +22,11 @@ _HEART_BEAT_MARK: Final = 1 -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) + + +def _get_first_entry_or_none(data: list[str]) -> str | None: + return next(iter(data), None) class _PortKeysEventHandler(SafeFileSystemEventHandler): @@ -33,32 +35,56 @@ class _PortKeysEventHandler(SafeFileSystemEventHandler): def __init__(self, outputs_path: Path, port_key_events_queue: AioQueue): super().__init__() + self._is_event_propagation_enabled: bool = False self.outputs_path: Path = outputs_path self.port_key_events_queue: AioQueue = port_key_events_queue self._outputs_port_keys: set[str] = set() - def set_outputs_port_keys(self, outputs_port_keys: set[str]) -> None: + def handle_set_outputs_port_keys(self, *, outputs_port_keys: set[str]) -> None: self._outputs_port_keys = outputs_port_keys + def handle_toggle_event_propagation(self, *, is_enabled: bool) -> None: + self._is_event_propagation_enabled = is_enabled + + def _get_relative_path_parents(self, path: bytes | str) -> list[str]: + try: + spath_relative_to_outputs = Path( + path.decode() if isinstance(path, bytes) else path + ).relative_to(self.outputs_path) + except ValueError: + return [] + return [f"{x}" for x in spath_relative_to_outputs.parents] + def event_handler(self, event: FileSystemEvent) -> None: + if not self._is_event_propagation_enabled: + return + # NOTE: ignoring all events which are not relative to modifying # the contents of the `port_key` folders from the outputs directory - path_relative_to_outputs = Path(event.src_path).relative_to(self.outputs_path) + # NOTE: the `port_key` will be present in either the src_path or the dest_path + # depending on the type of event + + src_relative_path_parents = self._get_relative_path_parents(event.src_path) + dst_relative_path_parents = self._get_relative_path_parents(event.dest_path) # discard event if not part of a subfolder - relative_path_parents = path_relative_to_outputs.parents - event_in_subdirs = len(relative_path_parents) > 0 + event_in_subdirs = ( + len(src_relative_path_parents) > 0 or len(dst_relative_path_parents) > 0 + ) if not event_in_subdirs: return # only accept events generated inside `port_key` subfolder - port_key_candidate = f"{relative_path_parents[0]}" - - if port_key_candidate in self._outputs_port_keys: - # messages in this queue (part of the process), - # will be consumed by the asyncio thread - self.port_key_events_queue.put(port_key_candidate) + src_port_key_candidate = _get_first_entry_or_none(src_relative_path_parents) + dst_port_key_candidate = _get_first_entry_or_none(dst_relative_path_parents) + + for port_key_candidate in (src_port_key_candidate, dst_port_key_candidate): + if port_key_candidate in self._outputs_port_keys: + # messages in this queue (part of the process), + # will be consumed by the asyncio thread + self.port_key_events_queue.put(port_key_candidate) + break class _EventHandlerProcess: @@ -78,32 +104,32 @@ def __init__( # the process itself and is used to stop the process. self._stop_queue: AioQueue = aioprocessing.AioQueue() - self._file_system_event_handler: Optional[_PortKeysEventHandler] = None - self._process: Optional[AioProcess] = None + self._file_system_event_handler: _PortKeysEventHandler | None = None + self._process: AioProcess | None = None def start_process(self) -> None: # NOTE: runs in asyncio thread with log_context( - logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} start_process" + _logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} start_process" ): self._process = aioprocessing.AioProcess( target=self._process_worker, daemon=True ) - self._process.start() + self._process.start() # pylint:disable=no-member def stop_process(self) -> None: # NOTE: runs in asyncio thread with log_context( - logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} stop_process" + _logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} stop_process" ): - self._stop_queue.put(None) + self._stop_queue.put(None) # pylint:disable=no-member if self._process: # force stop the process - self._process.kill() - self._process.join() + self._process.kill() # pylint:disable=no-member + self._process.join() # pylint:disable=no-member self._process = None # cleanup whatever remains @@ -113,31 +139,39 @@ def shutdown(self) -> None: # NOTE: runs in asyncio thread with log_context( - logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} shutdown" + _logger, logging.DEBUG, f"{_EventHandlerProcess.__name__} shutdown" ): self.stop_process() # signal queue observers to finish - self.outputs_context.port_key_events_queue.put(None) - self.health_check_queue.put(None) + self.outputs_context.port_key_events_queue.put( + None + ) # pylint:disable=no-member + self.health_check_queue.put(None) # pylint:disable=no-member def _thread_worker_update_outputs_port_keys(self) -> None: # NOTE: runs as a thread in the created process # Propagate `outputs_port_keys` changes to the `_PortKeysEventHandler`. while True: - outputs_port_keys: Optional[ - set[str] - ] = self.outputs_context.file_type_port_keys_updates_queue.get() - print("outputs_port_keys", outputs_port_keys) + message: dict[str, Any] | None = ( + self.outputs_context.file_system_event_handler_queue.get() # pylint:disable=no-member + ) + _logger.debug("received message %s", message) - if outputs_port_keys is None: + # no more messages quitting + if message is None: break - if self._file_system_event_handler is not None: - self._file_system_event_handler.set_outputs_port_keys( - set(outputs_port_keys) - ) + # do nothing + if self._file_system_event_handler is None: + continue + + # handle events + method_kwargs: dict[str, Any] = message["kwargs"] + method_name = message["method_name"] + method_to_call = getattr(self._file_system_event_handler, method_name) + method_to_call(**method_kwargs) def _process_worker(self) -> None: # NOTE: runs in the created process @@ -162,7 +196,7 @@ def _process_worker(self) -> None: ) observer.start() - while self._stop_queue.qsize() == 0: + while self._stop_queue.qsize() == 0: # pylint:disable=no-member # watchdog internally uses 1 sec interval to detect events # sleeping for less is useless. # If this value is bigger then the DEFAULT_OBSERVER_TIMEOUT @@ -172,11 +206,13 @@ def _process_worker(self) -> None: # time while handling inotify events # the health_check sending could be delayed - self.health_check_queue.put(_HEART_BEAT_MARK) + self.health_check_queue.put( # pylint:disable=no-member + _HEART_BEAT_MARK + ) blocking_sleep(self.heart_beat_interval_s) except Exception: # pylint: disable=broad-except - logger.exception("Unexpected error") + _logger.exception("Unexpected error") finally: if watch: observer.remove_handler_for_watch( @@ -185,10 +221,12 @@ def _process_worker(self) -> None: observer.stop() # stop created thread - self.outputs_context.file_type_port_keys_updates_queue.put(None) + self.outputs_context.file_system_event_handler_queue.put( # pylint:disable=no-member + None + ) thread_update_outputs_port_keys.join() - logger.warning("%s exited", _EventHandlerProcess.__name__) + _logger.warning("%s exited", _EventHandlerProcess.__name__) class EventHandlerObserver: @@ -219,7 +257,7 @@ def __init__( heart_beat_interval_s=heart_beat_interval_s, ) self._keep_running: bool = False - self._task_health_worker: Optional[Task] = None + self._task_health_worker: Task | None = None @property def wait_for_heart_beat_interval_s(self) -> PositiveFloat: @@ -235,13 +273,13 @@ async def _health_worker(self) -> None: heart_beat_count = 0 while True: try: - self._health_check_queue.get_nowait() + self._health_check_queue.get_nowait() # pylint:disable=no-member heart_beat_count += 1 except Empty: break if heart_beat_count == 0: - logger.warning( + _logger.warning( ( "WatcherProcess health is no longer responsive. " "%s will be uploaded when closing." @@ -266,7 +304,7 @@ def _stop_observer_process(self) -> None: async def start(self) -> None: with log_context( - logger, logging.INFO, f"{EventHandlerObserver.__name__} start" + _logger, logging.INFO, f"{EventHandlerObserver.__name__} start" ): self._keep_running = True self._task_health_worker = create_task( @@ -275,7 +313,9 @@ async def start(self) -> None: self._start_observer_process() async def stop(self) -> None: - with log_context(logger, logging.INFO, f"{EventHandlerObserver.__name__} stop"): + with log_context( + _logger, logging.INFO, f"{EventHandlerObserver.__name__} stop" + ): self._stop_observer_process() self._keep_running = False if self._task_health_worker is not None: diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_manager.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_manager.py index 4b5c4111266..f29f26358e2 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_manager.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_manager.py @@ -5,34 +5,34 @@ from contextlib import suppress from datetime import timedelta from functools import partial -from typing import Optional +from common_library.errors_classes import OsparcErrorMixin from fastapi import FastAPI from models_library.rabbitmq_messages import ProgressType from pydantic import PositiveFloat -from pydantic.errors import PydanticErrorMixin from servicelib import progress_bar -from servicelib.background_task import start_periodic_task, stop_periodic_task +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task from servicelib.logging_utils import log_catch, log_context from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB from ...core.rabbitmq import post_log_message, post_progress_message from ...core.settings import ApplicationSettings +from ...modules.notifications._notifications_ports import PortNotifier from ..nodeports import upload_outputs from ._context import OutputsContext -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) async def _cancel_task(task: Task, task_cancellation_timeout_s: PositiveFloat) -> None: task.cancel() - with suppress(CancelledError): - with log_catch(logger, reraise=False): - await wait((task,), timeout=task_cancellation_timeout_s) + with suppress(CancelledError), log_catch(_logger, reraise=False): + await wait((task,), timeout=task_cancellation_timeout_s) -class UploadPortsFailed(PydanticErrorMixin, RuntimeError): - code: str = "dynamic_sidecar.outputs_manager.failed_while_uploading" +class UploadPortsFailedError(OsparcErrorMixin, RuntimeError): + code: str = "dynamic_sidecar.outputs_manager.failed_while_uploading" # type: ignore[assignment] msg_template: str = "Failed while uploading: failures={failures}" @@ -100,14 +100,16 @@ class OutputsManager: # pylint: disable=too-many-instance-attributes def __init__( self, outputs_context: OutputsContext, - io_log_redirect_cb: Optional[LogRedirectCB], - progress_cb: Optional[progress_bar.AsyncReportCB], + port_notifier: PortNotifier, + io_log_redirect_cb: LogRedirectCB | None, + progress_cb: progress_bar.AsyncReportCB | None, *, upload_upon_api_request: bool = True, task_cancellation_timeout_s: PositiveFloat = 5, task_monitor_interval_s: PositiveFloat = 1.0, ): self.outputs_context = outputs_context + self.port_notifier = port_notifier self.io_log_redirect_cb = io_log_redirect_cb self.upload_upon_api_request = upload_upon_api_request self.task_cancellation_timeout_s = task_cancellation_timeout_s @@ -115,27 +117,32 @@ def __init__( self.task_progress_cb = progress_cb self._port_key_tracker = _PortKeyTracker() - self._task_uploading: Optional[Task] = None - self._task_scheduler_worker: Optional[Task] = None + self._task_uploading: Task | None = None + self._task_scheduler_worker: Task | None = None self._schedule_all_ports_for_upload: bool = False # keep track if a port was uploaded and there was an error, remove said error if - self._last_upload_error_tracker: dict[str, Optional[Exception]] = {} + self._last_upload_error_tracker: dict[str, Exception | None] = {} async def _uploading_task_start(self) -> None: port_keys = await self._port_key_tracker.get_uploading() assert len(port_keys) > 0 # nosec async def _upload_ports() -> None: - with log_context(logger, logging.INFO, f"Uploading port keys: {port_keys}"): + with log_context( + _logger, logging.INFO, f"Uploading port keys: {port_keys}" + ): async with progress_bar.ProgressBarData( - steps=1, progress_report_cb=self.task_progress_cb + num_steps=1, + progress_report_cb=self.task_progress_cb, + description="uploading ports", ) as root_progress: await upload_outputs( outputs_path=self.outputs_context.outputs_path, port_keys=port_keys, io_log_redirect_cb=self.io_log_redirect_cb, progress_bar=root_progress, + port_notifier=self.port_notifier, ) task_name = f"outputs_manager_port_keys-{'_'.join(port_keys)}" @@ -145,19 +152,11 @@ def _remove_downloads(future: Future) -> None: # pylint: disable=protected-access if future._exception is not None: formatted_traceback = ( - "\n" - + "".join( - # pylint:disable = unexpected-keyword-arg, no-value-for-parameter - traceback.format_exception( - etype=type(future._exception), - value=future._exception, - tb=future._exception.__traceback__, - ) - ) + "\n" + "".join(traceback.format_exception(future._exception)) if future._exception.__traceback__ else "" ) - logger.warning( + _logger.warning( "%s ended with exception: %s%s", task_name, future._exception, @@ -195,18 +194,18 @@ def set_all_ports_for_upload(self) -> None: self._schedule_all_ports_for_upload = True async def start(self) -> None: - self._task_scheduler_worker = await start_periodic_task( + self._task_scheduler_worker = create_periodic_task( self._scheduler_worker, interval=timedelta(seconds=self.task_monitor_interval_s), task_name="outputs_manager_scheduler_worker", ) async def shutdown(self) -> None: - with log_context(logger, logging.INFO, f"{OutputsManager.__name__} shutdown"): + with log_context(_logger, logging.INFO, f"{OutputsManager.__name__} shutdown"): await self._uploading_task_cancel() if self._task_scheduler_worker is not None: - await stop_periodic_task( - self._task_scheduler_worker, timeout=self.task_monitor_interval_s + await cancel_wait_task( + self._task_scheduler_worker, max_delay=self.task_monitor_interval_s ) async def port_key_content_changed(self, port_key: str) -> None: @@ -224,7 +223,7 @@ async def wait_for_all_uploads_to_finish(self) -> None: # always scheduling non file based ports for upload # there is no auto detection when these change for non_file_port_key in self.outputs_context.non_file_type_port_keys: - logger.info("Adding non file port key %s", non_file_port_key) + _logger.info("Adding non file port key %s", non_file_port_key) await self.port_key_content_changed(non_file_port_key) # NOTE: the file system watchdog was found unhealthy and to make @@ -234,7 +233,7 @@ async def wait_for_all_uploads_to_finish(self) -> None: # is missed. if self._schedule_all_ports_for_upload: self._schedule_all_ports_for_upload = False - logger.warning( + _logger.warning( "Scheduled %s for upload. The watchdog was rebooted. " "This is a safety measure to make sure no data is lost. ", self.outputs_context.outputs_path, @@ -242,10 +241,10 @@ async def wait_for_all_uploads_to_finish(self) -> None: for file_port_key in self.outputs_context.file_type_port_keys: await self.port_key_content_changed(file_port_key) - logger.info("Port status before waiting %s", f"{self._port_key_tracker}") + _logger.info("Port status before waiting %s", f"{self._port_key_tracker}") while not await self._port_key_tracker.no_tracked_ports(): await asyncio.sleep(self.task_monitor_interval_s) - logger.info("Port status after waiting %s", f"{self._port_key_tracker}") + _logger.info("Port status after waiting %s", f"{self._port_key_tracker}") # NOTE: checking if there were any errors during the last port upload, # for each port. If any error is detected this will raise. @@ -253,7 +252,7 @@ async def wait_for_all_uploads_to_finish(self) -> None: True for v in self._last_upload_error_tracker.values() if v is not None ) if any_failed_upload: - raise UploadPortsFailed(failures=self._last_upload_error_tracker) + raise UploadPortsFailedError(failures=self._last_upload_error_tracker) def setup_outputs_manager(app: FastAPI) -> None: @@ -263,10 +262,10 @@ async def on_startup() -> None: assert isinstance(app.state.settings, ApplicationSettings) # nosec settings: ApplicationSettings = app.state.settings - io_log_redirect_cb: Optional[LogRedirectCB] = None + io_log_redirect_cb: LogRedirectCB | None = None if settings.RABBIT_SETTINGS: - io_log_redirect_cb = partial(post_log_message, app) - logger.debug( + io_log_redirect_cb = partial(post_log_message, app, log_level=logging.INFO) + _logger.debug( "setting up outputs manager %s", "with redirection of logs..." if io_log_redirect_cb else "...", ) @@ -277,11 +276,17 @@ async def on_startup() -> None: progress_cb=partial( post_progress_message, app, ProgressType.SERVICE_OUTPUTS_PUSHING ), + port_notifier=PortNotifier( + app, + settings.DY_SIDECAR_USER_ID, + settings.DY_SIDECAR_PROJECT_ID, + settings.DY_SIDECAR_NODE_ID, + ), ) await outputs_manager.start() async def on_shutdown() -> None: - outputs_manager: Optional[OutputsManager] = app.state.outputs_manager + outputs_manager: OutputsManager | None = app.state.outputs_manager if outputs_manager is not None: await outputs_manager.shutdown() diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watchdog_extensions.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watchdog_extensions.py index 78c416b9bef..2b77249e355 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watchdog_extensions.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watchdog_extensions.py @@ -7,7 +7,8 @@ from servicelib.logging_utils import log_catch from watchdog.events import FileSystemEvent, FileSystemEventHandler from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT, BaseObserver -from watchdog.observers.inotify import InotifyBuffer, InotifyEmitter +from watchdog.observers.inotify import InotifyEmitter +from watchdog.observers.inotify_buffer import InotifyBuffer from watchdog.observers.inotify_c import Inotify, InotifyConstants from watchdog.utils import BaseThread from watchdog.utils.delayed_queue import DelayedQueue @@ -26,16 +27,20 @@ ], ) -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) class _ExtendedInotifyBuffer(InotifyBuffer): - def __init__(self, path, recursive=False): # pylint:disable=super-init-not-called + def __init__( + self, path: bytes, *, recursive: bool = False + ): # pylint:disable=super-init-not-called # below call to `BaseThread.__init__` is correct since we want to # overwrite the `InotifyBuffer.__init__` method BaseThread.__init__(self) # pylint:disable=non-parent-init-called self._queue = DelayedQueue(self.delay) - self._inotify = Inotify(path, recursive, _EVENTS_TO_WATCH) + self._inotify = Inotify( # pylint:disable=too-many-function-args + path, recursive=recursive, event_mask=_EVENTS_TO_WATCH + ) self.start() @@ -43,7 +48,7 @@ class _ExtendedInotifyEmitter(InotifyEmitter): def on_thread_start(self): path = os.fsencode(self.watch.path) # pylint:disable=attribute-defined-outside-init - self._inotify = _ExtendedInotifyBuffer(path, self.watch.is_recursive) + self._inotify = _ExtendedInotifyBuffer(path, recursive=self.watch.is_recursive) class ExtendedInotifyObserver(BaseObserver): @@ -84,5 +89,5 @@ def on_any_event(self, event: FileSystemEvent) -> None: # which is running in the context of the # ExtendedInotifyObserver will cause the # observer to stop working. - with log_catch(logger, reraise=False): + with log_catch(_logger, reraise=False): self.event_handler(event) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py index b451fd2aef9..da0cdea251c 100644 --- a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/outputs/_watcher.py @@ -1,7 +1,7 @@ import logging from asyncio import CancelledError, Task, create_task -from contextlib import contextmanager, suppress -from typing import Generator, Optional +from collections.abc import AsyncGenerator +from contextlib import asynccontextmanager, suppress from fastapi import FastAPI from servicelib.logging_utils import log_context @@ -12,7 +12,7 @@ from ._event_handler import EventHandlerObserver from ._manager import OutputsManager -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) class OutputsWatcher: @@ -22,8 +22,7 @@ def __init__( self.outputs_manager = outputs_manager self.outputs_context = outputs_context - self._allow_event_propagation: bool = True - self._task_events_worker: Optional[Task] = None + self._task_events_worker: Task | None = None self._event_filter = EventFilter(outputs_manager=outputs_manager) self._observer_monitor: EventHandlerObserver = EventHandlerObserver( outputs_context=self.outputs_context, @@ -33,23 +32,22 @@ def __init__( async def _worker_events(self) -> None: while True: - event: Optional[ - str - ] = await self.outputs_context.port_key_events_queue.coro_get() + event: str | None = ( + await self.outputs_context.port_key_events_queue.coro_get() + ) if event is None: break - if self._allow_event_propagation: - await self._event_filter.enqueue(event) + await self._event_filter.enqueue(event) - def enable_event_propagation(self) -> None: - self._allow_event_propagation = True + async def enable_event_propagation(self) -> None: + await self.outputs_context.toggle_event_propagation(is_enabled=True) - def disable_event_propagation(self) -> None: - self._allow_event_propagation = False + async def disable_event_propagation(self) -> None: + await self.outputs_context.toggle_event_propagation(is_enabled=False) async def start(self) -> None: - with log_context(logger, logging.INFO, f"{OutputsWatcher.__name__} start"): + with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} start"): self._task_events_worker = create_task( self._worker_events(), name="outputs_watcher_events_worker" ) @@ -59,7 +57,7 @@ async def start(self) -> None: async def shutdown(self) -> None: """cleans up spawned tasks which might be pending""" - with log_context(logger, logging.INFO, f"{OutputsWatcher.__name__} shutdown"): + with log_context(_logger, logging.INFO, f"{OutputsWatcher.__name__} shutdown"): await self._event_filter.shutdown() await self._observer_monitor.stop() @@ -80,11 +78,11 @@ async def on_startup() -> None: outputs_manager=outputs_manager, outputs_context=outputs_context, ) - app.state.outputs_watcher.disable_event_propagation() await app.state.outputs_watcher.start() + await disable_event_propagation(app) async def on_shutdown() -> None: - outputs_watcher: Optional[OutputsWatcher] = app.state.outputs_watcher + outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher if outputs_watcher is not None: await outputs_watcher.shutdown() @@ -92,20 +90,22 @@ async def on_shutdown() -> None: app.add_event_handler("shutdown", on_shutdown) -def disable_outputs_watcher(app: FastAPI) -> None: - if app.state.outputs_watcher is not None: - app.state.outputs_watcher.disable_event_propagation() +async def disable_event_propagation(app: FastAPI) -> None: + outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher + if outputs_watcher is not None: + await outputs_watcher.disable_event_propagation() -def enable_outputs_watcher(app: FastAPI) -> None: - if app.state.outputs_watcher is not None: - app.state.outputs_watcher.enable_event_propagation() +async def enable_event_propagation(app: FastAPI) -> None: + outputs_watcher: OutputsWatcher | None = app.state.outputs_watcher + if outputs_watcher is not None: + await outputs_watcher.enable_event_propagation() -@contextmanager -def outputs_watcher_disabled(app: FastAPI) -> Generator[None, None, None]: +@asynccontextmanager +async def event_propagation_disabled(app: FastAPI) -> AsyncGenerator[None, None]: try: - disable_outputs_watcher(app) + await disable_event_propagation(app) yield None finally: - enable_outputs_watcher(app) + await enable_event_propagation(app) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/prometheus_metrics.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/prometheus_metrics.py new file mode 100644 index 00000000000..eb7ad93ed9e --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/prometheus_metrics.py @@ -0,0 +1,172 @@ +import asyncio +import logging +from collections import deque +from collections.abc import Sequence +from datetime import datetime +from typing import Final + +import arrow +from fastapi import FastAPI, status +from models_library.callbacks_mapping import CallbacksMapping, UserServiceCommand +from pydantic import BaseModel, NonNegativeFloat, NonNegativeInt +from servicelib.async_utils import cancel_wait_task +from servicelib.logging_utils import log_context +from servicelib.sequences_utils import pairwise +from simcore_service_dynamic_sidecar.core.errors import ( + ContainerExecContainerNotFoundError, +) + +from ..models.shared_store import SharedStore +from .container_utils import run_command_in_container + +_logger = logging.getLogger(__name__) + +_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL: Final[NonNegativeFloat] = 60.0 +_MIN_ELEMENTS: Final[NonNegativeInt] = 2 +_MAX_PROMETHEUS_SAMPLES: Final[NonNegativeInt] = 5 +_TASK_CANCELLATION_TIMEOUT_S: Final[NonNegativeInt] = 2 + +_USER_SERVICES_NOT_STARTED: Final[str] = "User service(s) was/were not started" + + +def _get_user_services_scrape_interval( + last_prometheus_query_times: Sequence[datetime], +) -> NonNegativeFloat: + if len(last_prometheus_query_times) < _MIN_ELEMENTS: + return _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL + + time_pairs: list[tuple[datetime, datetime]] = list( + pairwise(last_prometheus_query_times) + ) + scrape_intervals: list[NonNegativeFloat] = [ + (t2 - t1).total_seconds() for t1, t2 in time_pairs + ] + average_prometheus_scrape_interval = sum(scrape_intervals) / len(scrape_intervals) + return min(average_prometheus_scrape_interval, _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL) + + +class MetricsResponse(BaseModel): + body: str + status: int + + @staticmethod + def __get_iso_timestamp() -> str: + return f"{arrow.now().datetime.isoformat()}" + + @classmethod + def from_reply(cls, metrics_fetch_result: str) -> "MetricsResponse": + body = f"{metrics_fetch_result}" + return cls(body=body, status=status.HTTP_200_OK) + + @classmethod + def from_error(cls, error: Exception) -> "MetricsResponse": + iso_timestamp = cls.__get_iso_timestamp() + return cls( + body=f"At {iso_timestamp} an unexpected error occurred: {error}", + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + @classmethod + def initial_response(cls) -> "MetricsResponse": + return cls(body="", status=status.HTTP_200_OK) + + +class UserServicesMetrics: + def __init__( + self, shared_store: SharedStore, metrics_command: UserServiceCommand + ) -> None: + self.shared_store: SharedStore = shared_store + self.metrics_command: UserServiceCommand = metrics_command + + self._last_prometheus_query_times: deque[datetime] = deque( + maxlen=_MAX_PROMETHEUS_SAMPLES + ) + self._metrics_recovery_task: asyncio.Task | None = None + + self._metrics_response: MetricsResponse = MetricsResponse.initial_response() + + def get_metrics(self) -> MetricsResponse: + self._last_prometheus_query_times.append(arrow.now().datetime) + return self._metrics_response + + async def _update_metrics(self): + container_name: str | None = self.shared_store.original_to_container_names.get( + self.metrics_command.service, None + ) + if container_name is None: + self._metrics_response = MetricsResponse.from_error( + RuntimeError(_USER_SERVICES_NOT_STARTED) + ) + return + + try: + metrics_fetch_result = await run_command_in_container( + container_name, + command=self.metrics_command.command, + timeout=self.metrics_command.timeout, + ) + self._metrics_response = MetricsResponse.from_reply(metrics_fetch_result) + except ContainerExecContainerNotFoundError as e: + _logger.debug( + "Container %s was not found could not recover metrics", + container_name, + ) + self._metrics_response = MetricsResponse.from_error(e) + except Exception as e: # pylint: disable=broad-exception-caught + _logger.debug("Could not recover metrics", exc_info=True) + self._metrics_response = MetricsResponse.from_error(e) + + async def _task_metrics_recovery(self) -> None: + while True: + with log_context(_logger, logging.DEBUG, "prometheus metrics update"): + await self._update_metrics() + + # NOTE: will wait at most `_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL` before scraping + # the metrics again. + # If Prometheus is actively scraping this container, it will match it's + # scraping rate to provide up to date metrics. + await asyncio.sleep( + _get_user_services_scrape_interval( + self._last_prometheus_query_times + ) + ) + + async def start(self) -> None: + with log_context(_logger, logging.INFO, "setup service metrics recovery"): + if self._metrics_recovery_task is None: + self._metrics_recovery_task = asyncio.create_task( + self._task_metrics_recovery() + ) + else: + _logger.info("metrics recovery was already started") + + async def stop(self) -> None: + with log_context(_logger, logging.INFO, "shutdown service metrics recovery"): + if self._metrics_recovery_task: + await cancel_wait_task( + self._metrics_recovery_task, max_delay=_TASK_CANCELLATION_TIMEOUT_S + ) + + +def setup_prometheus_metrics(app: FastAPI) -> None: + async def on_startup() -> None: + callbacks_mapping: CallbacksMapping = ( + app.state.settings.DY_SIDECAR_CALLBACKS_MAPPING + ) + assert callbacks_mapping.metrics # nosec + + with log_context( + _logger, logging.INFO, "enabling user services metrics scraping" + ): + shared_store: SharedStore = app.state.shared_store + app.state.user_service_metrics = user_service_metrics = UserServicesMetrics( + shared_store, callbacks_mapping.metrics + ) + await user_service_metrics.start() + + async def on_shutdown() -> None: + user_service_metrics: UserServicesMetrics = app.state.user_service_metrics + await user_service_metrics.stop() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/__init__.py new file mode 100644 index 00000000000..1c74097a4be --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/__init__.py @@ -0,0 +1,8 @@ +from ._core import send_service_started, send_service_stopped +from ._setup import setup_resource_tracking + +__all__: tuple[str, ...] = ( + "send_service_started", + "send_service_stopped", + "setup_resource_tracking", +) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_core.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_core.py new file mode 100644 index 00000000000..eecbfd2089e --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_core.py @@ -0,0 +1,146 @@ +import asyncio +import logging +from typing import Final + +from fastapi import FastAPI +from models_library.generated_models.docker_rest_api import ContainerState +from models_library.rabbitmq_messages import ( + DynamicServiceRunningMessage, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.services import ServiceType +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from pydantic import NonNegativeFloat +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.logging_utils import log_context + +from ...core.docker_utils import ( + are_all_containers_in_expected_states, + get_container_states, +) +from ...core.rabbitmq import ( + post_dynamic_service_running_message, + post_resource_tracking_message, +) +from ...core.settings import ApplicationSettings, ResourceTrackingSettings +from ...models.shared_store import SharedStore +from ._models import ResourceTrackingState + +_STOP_WORKER_TIMEOUT_S: Final[NonNegativeFloat] = 1.0 + +_logger = logging.getLogger(__name__) + + +def _get_settings(app: FastAPI) -> ApplicationSettings: + settings: ApplicationSettings = app.state.settings + return settings + + +async def _start_heart_beat_task(app: FastAPI) -> None: + settings: ApplicationSettings = _get_settings(app) + resource_tracking_settings: ResourceTrackingSettings = settings.RESOURCE_TRACKING + resource_tracking: ResourceTrackingState = app.state.resource_tracking + + if resource_tracking.heart_beat_task is not None: + msg = f"Unexpected task={resource_tracking.heart_beat_task} already running!" + raise RuntimeError(msg) + + with log_context(_logger, logging.DEBUG, "starting heart beat task"): + resource_tracking.heart_beat_task = create_periodic_task( + _heart_beat_task, + app=app, + interval=resource_tracking_settings.RESOURCE_TRACKING_HEARTBEAT_INTERVAL, + task_name="resource_tracking_heart_beat", + wait_before_running=resource_tracking_settings.RESOURCE_TRACKING_HEARTBEAT_INTERVAL, + ) + + +async def stop_heart_beat_task(app: FastAPI) -> None: + resource_tracking: ResourceTrackingState = app.state.resource_tracking + if resource_tracking.heart_beat_task: + await cancel_wait_task( + resource_tracking.heart_beat_task, max_delay=_STOP_WORKER_TIMEOUT_S + ) + + +async def _heart_beat_task(app: FastAPI): + settings: ApplicationSettings = _get_settings(app) + shared_store: SharedStore = app.state.shared_store + + container_states: dict[str, ContainerState | None] = await get_container_states( + shared_store.container_names + ) + + if are_all_containers_in_expected_states(container_states.values()): + rut_message = RabbitResourceTrackingHeartbeatMessage( + service_run_id=settings.DY_SIDECAR_RUN_ID + ) + dyn_message = DynamicServiceRunningMessage( + project_id=settings.DY_SIDECAR_PROJECT_ID, + node_id=settings.DY_SIDECAR_NODE_ID, + user_id=settings.DY_SIDECAR_USER_ID, + product_name=settings.DY_SIDECAR_PRODUCT_NAME, + ) + await asyncio.gather( + *[ + post_resource_tracking_message(app, rut_message), + post_dynamic_service_running_message(app, dyn_message), + ] + ) + else: + _logger.info( + "heart beat message skipped: container_states=%s", container_states + ) + + +async def send_service_stopped( + app: FastAPI, simcore_platform_status: SimcorePlatformStatus +) -> None: + await stop_heart_beat_task(app) + + settings: ApplicationSettings = _get_settings(app) + message = RabbitResourceTrackingStoppedMessage( + service_run_id=settings.DY_SIDECAR_RUN_ID, + simcore_platform_status=simcore_platform_status, + ) + await post_resource_tracking_message(app, message) + + +async def send_service_started( + app: FastAPI, *, metrics_params: CreateServiceMetricsAdditionalParams +) -> None: + settings: ApplicationSettings = _get_settings(app) + + message = RabbitResourceTrackingStartedMessage( + service_run_id=settings.DY_SIDECAR_RUN_ID, + wallet_id=metrics_params.wallet_id, + wallet_name=metrics_params.wallet_name, + product_name=metrics_params.product_name, + simcore_user_agent=metrics_params.simcore_user_agent, + user_id=settings.DY_SIDECAR_USER_ID, + user_email=metrics_params.user_email, + project_id=settings.DY_SIDECAR_PROJECT_ID, + project_name=metrics_params.project_name, + node_id=settings.DY_SIDECAR_NODE_ID, + node_name=metrics_params.node_name, + parent_project_id=settings.DY_SIDECAR_PROJECT_ID, + root_parent_project_id=settings.DY_SIDECAR_PROJECT_ID, + root_parent_project_name=metrics_params.project_name, + parent_node_id=settings.DY_SIDECAR_NODE_ID, + root_parent_node_id=settings.DY_SIDECAR_NODE_ID, + service_key=metrics_params.service_key, + service_version=metrics_params.service_version, + service_type=ServiceType.DYNAMIC, + service_resources=metrics_params.service_resources, + service_additional_metadata=metrics_params.service_additional_metadata, + pricing_plan_id=metrics_params.pricing_plan_id, + pricing_unit_id=metrics_params.pricing_unit_id, + pricing_unit_cost_id=metrics_params.pricing_unit_cost_id, + ) + await post_resource_tracking_message(app, message) + + await _start_heart_beat_task(app) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_models.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_models.py new file mode 100644 index 00000000000..f87fa415a74 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_models.py @@ -0,0 +1,8 @@ +from asyncio import Task + +from pydantic import BaseModel, ConfigDict + + +class ResourceTrackingState(BaseModel): + heart_beat_task: Task | None = None + model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_setup.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_setup.py new file mode 100644 index 00000000000..22469fad6d1 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/resource_tracking/_setup.py @@ -0,0 +1,15 @@ +from fastapi import FastAPI + +from ._core import stop_heart_beat_task +from ._models import ResourceTrackingState + + +def setup_resource_tracking(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.resource_tracking = ResourceTrackingState() + + async def on_shutdown() -> None: + await stop_heart_beat_task(app) + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/service_liveness.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/service_liveness.py new file mode 100644 index 00000000000..33c0083861f --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/service_liveness.py @@ -0,0 +1,113 @@ +import logging +import time +from collections.abc import Awaitable, Callable +from datetime import timedelta +from typing import Final + +from common_library.errors_classes import OsparcErrorMixin +from tenacity import AsyncRetrying, RetryCallState, TryAgain +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +_logger = logging.getLogger(__name__) + + +_DEFAULT_CHECK_INTERVAL: Final[timedelta] = timedelta(seconds=1) +_DEFAULT_TIMEOUT_INTERVAL: Final[timedelta] = timedelta(seconds=30) + + +class CouldNotReachServiceError(OsparcErrorMixin, Exception): + msg_template: str = "Could not contact service '{service_name}' at '{endpoint}'. Look above for details." + + +def _before_sleep_log( + logger: logging.Logger, service_name: str, endpoint: str +) -> Callable[[RetryCallState], None]: + def log_it(retry_state: RetryCallState) -> None: + assert retry_state # nosec + assert retry_state.next_action # nosec + + logger.warning( + "Retrying (attempt %s) to contact '%s' at '%s' in %s seconds.", + retry_state.attempt_number, + service_name, + endpoint, + retry_state.next_action.sleep, + ) + + return log_it + + +async def _attempt_to_wait_for_handler( + async_handler: Callable[..., Awaitable], + *args, + service_name: str, + endpoint: str, + check_interval: timedelta, + timeout: timedelta, + **kwargs, +) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(check_interval), + stop=stop_after_delay(timeout.total_seconds()), + before_sleep=_before_sleep_log(_logger, service_name, endpoint), + reraise=True, + ): + with attempt: + if await async_handler(*args, **kwargs) is False: + raise TryAgain + + +async def wait_for_service_liveness( + async_handler: Callable[..., Awaitable], + *args, + service_name: str, + endpoint: str, + check_interval: timedelta | None = None, + timeout: timedelta | None = None, + **kwargs, +) -> None: + """waits for async_handler to return ``True`` or ``None`` instead of + raising errors or returning ``False`` + + Arguments: + async_handler -- handler to execute + service_name -- service reference for whom investigates the logs + endpoint -- endpoint address for whom investigates the logs (only used for logging) + + Keyword Arguments: + check_interval -- interval at which service check is ran (default: {_DEFAULT_CHECK_INTERVAL}) + timeout -- stops trying to contact service and raises ``CouldNotReachServiceError`` + (default: {_DEFAULT_TIMEOUT_INTERVAL}) + + Raises: + CouldNotReachServiceError: if it was not able to contact the service in time + """ + + if check_interval is None: + check_interval = _DEFAULT_CHECK_INTERVAL + if timeout is None: + timeout = _DEFAULT_TIMEOUT_INTERVAL + + try: + start = time.time() + await _attempt_to_wait_for_handler( + async_handler, + *args, + service_name=service_name, + endpoint=endpoint, + check_interval=check_interval, + timeout=timeout, + **kwargs, + ) + elapsed_ms = (time.time() - start) * 1000 + _logger.info( + "Service '%s' found at '%s' after %.2f ms", + service_name, + endpoint, + elapsed_ms, + ) + except Exception as e: + raise CouldNotReachServiceError( + service_name=service_name, endpoint=endpoint + ) from e diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/__init__.py new file mode 100644 index 00000000000..546243f4cdb --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/__init__.py @@ -0,0 +1,7 @@ +from ._disk_usage import get_disk_usage_monitor +from ._setup import setup_system_monitor + +__all__: tuple[str, ...] = ( + "get_disk_usage_monitor", + "setup_system_monitor", +) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_disk_usage.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_disk_usage.py new file mode 100644 index 00000000000..d2148842ef5 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_disk_usage.py @@ -0,0 +1,242 @@ +import asyncio +import logging +from dataclasses import dataclass, field +from datetime import timedelta +from functools import cached_property +from pathlib import Path +from typing import Final + +import psutil +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import ( + DiskUsage, + MountPathCategory, +) +from models_library.projects_nodes_io import NodeID +from models_library.users import UserID +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.logging_utils import log_context +from servicelib.utils import logged_gather + +from ...core.settings import ApplicationSettings +from ..mounted_fs import MountedVolumes +from ..notifications import publish_disk_usage + +_NODE_FILE_SYSTEM_PATH: Final[Path] = Path("/") + + +_logger = logging.getLogger(__name__) + + +_SUPPORTED_ITEMS: Final[set[str]] = { + MountPathCategory.HOST, + MountPathCategory.STATES_VOLUMES, +} + + +async def get_usage(path: Path) -> DiskUsage: + usage = await asyncio.get_event_loop().run_in_executor( + None, psutil.disk_usage, f"{path}" + ) + return DiskUsage.from_ps_util_disk_usage(usage) + + +def get_relative_path(path: Path, dy_volumes_mount_dir: Path) -> Path: + try: + return path.relative_to(dy_volumes_mount_dir) + except ValueError: + return path + + +def _get_normalized_folder_name(path: Path) -> str: + return f"{path}".replace("/", "_") + + +@dataclass +class DiskUsageMonitor: + app: FastAPI + user_id: UserID + node_id: NodeID + interval: timedelta + monitored_paths: dict[MountPathCategory, set[Path]] + + dy_volumes_mount_dir: Path + _monitor_task: asyncio.Task | None = None + + # tracked disk usage + _last_usage: dict[MountPathCategory, DiskUsage] = field(default_factory=dict) + _usage_overwrite: dict[str, DiskUsage] = field( + default_factory=dict, + metadata={ + "description": ( + "third party services can update the disk usage for certain paths " + "monitored by the dynamic-sidecar. This is the case for the efs-guardian." + ) + }, + ) + + @cached_property + def _monitored_paths_set(self) -> set[Path]: + if not self.monitored_paths: + return set() + return set.union(*self.monitored_paths.values()) + + @cached_property + def _normalized_monitored_paths(self) -> dict[MountPathCategory, set[str]]: + """ + Transforms Path -> str form `/tmp/.some_file/here` -> `_tmp_.some_file_here`. + This a one way transformation used to uniquely identify volume mounts inside + by the dynamic-sidecar. These are also used by the efs-guardian. + """ + return { + k: { + _get_normalized_folder_name( + get_relative_path(p, self.dy_volumes_mount_dir) + ) + for p in paths + } + for k, paths in self.monitored_paths.items() + } + + async def _get_measured_disk_usage(self) -> list[DiskUsage]: + return await logged_gather( + *[get_usage(monitored_path) for monitored_path in self._monitored_paths_set] + ) + + def _get_local_disk_usage( + self, measured_disk_usage: list[DiskUsage] + ) -> dict[str, DiskUsage]: + return { + _get_normalized_folder_name( + get_relative_path(p, self.dy_volumes_mount_dir) + ): u + for p, u in zip(self._monitored_paths_set, measured_disk_usage, strict=True) + } + + def _replace_incoming_usage( + self, normalized_disk_usage: dict[str, DiskUsage] + ) -> None: + """overwrites local disk usage with incoming usage from egs-guardian""" + for key, overwrite_usage in self._usage_overwrite.items(): + normalized_disk_usage[key] = overwrite_usage # noqa: PERF403 + + @staticmethod + def _get_grouped_usage_to_folder_names( + local_disk_usage: dict[str, DiskUsage], + ) -> dict[DiskUsage, set[str]]: + """Groups all paths that have the same metrics together""" + usage_to_folder_names: dict[DiskUsage, set[str]] = {} + for folder_name, disk_usage in local_disk_usage.items(): + if disk_usage not in usage_to_folder_names: + usage_to_folder_names[disk_usage] = set() + + usage_to_folder_names[disk_usage].add(folder_name) + return usage_to_folder_names + + async def _publish_disk_usage(self, usage: dict[MountPathCategory, DiskUsage]): + await publish_disk_usage( + self.app, user_id=self.user_id, node_id=self.node_id, usage=usage + ) + + async def get_disk_usage(self) -> dict[MountPathCategory, DiskUsage]: + measured_disk_usage = await self._get_measured_disk_usage() + + local_disk_usage = self._get_local_disk_usage(measured_disk_usage) + + self._replace_incoming_usage(local_disk_usage) + + usage_to_folder_names = self._get_grouped_usage_to_folder_names( + local_disk_usage + ) + + # compute new version of DiskUsage for FE, only 1 label for each unique disk usage entry + usage: dict[MountPathCategory, DiskUsage] = {} + + normalized_paths = self._normalized_monitored_paths + + for disk_usage, folder_names in usage_to_folder_names.items(): + for category in [ + MountPathCategory.HOST, + MountPathCategory.STATES_VOLUMES, + MountPathCategory.INPUTS_VOLUMES, + MountPathCategory.OUTPUTS_VOLUMES, + ]: + if folder_names.intersection(normalized_paths[category]): + usage[category] = disk_usage + break + else: + msg = f"Could not assign {disk_usage=} for {folder_names=}" + raise RuntimeError(msg) + + return {k: v for k, v in usage.items() if k in _SUPPORTED_ITEMS} + + async def _monitor(self) -> None: + disk_usage = await self.get_disk_usage() + # notify only when usage changes + if self._last_usage != disk_usage: + await self._publish_disk_usage(disk_usage) + self._last_usage = disk_usage + + async def setup(self) -> None: + self._monitor_task = create_periodic_task( + self._monitor, interval=self.interval, task_name="monitor_disk_usage" + ) + + async def shutdown(self) -> None: + if self._monitor_task: + await cancel_wait_task(self._monitor_task) + + def set_disk_usage_for_path(self, overwrite_usage: dict[str, DiskUsage]) -> None: + """ + efs-guardian manages disk quotas since the underlying FS has no support for them. + the dynamic-sidecar will use this information to provide correct quotas for the + volumes managed by the efs-guardian + """ + self._usage_overwrite = overwrite_usage + + +def _get_monitored_paths(app: FastAPI) -> dict[MountPathCategory, set[Path]]: + mounted_volumes: MountedVolumes = app.state.mounted_volumes + return { + MountPathCategory.HOST: {_NODE_FILE_SYSTEM_PATH}, + MountPathCategory.INPUTS_VOLUMES: {mounted_volumes.disk_inputs_path}, + MountPathCategory.OUTPUTS_VOLUMES: {mounted_volumes.disk_outputs_path}, + MountPathCategory.STATES_VOLUMES: set(mounted_volumes.disk_state_paths_iter()), + } + + +def create_disk_usage_monitor(app: FastAPI) -> DiskUsageMonitor: + settings: ApplicationSettings = app.state.settings + return DiskUsageMonitor( + app, + user_id=settings.DY_SIDECAR_USER_ID, + node_id=settings.DY_SIDECAR_NODE_ID, + interval=settings.DYNAMIC_SIDECAR_TELEMETRY_DISK_USAGE_MONITOR_INTERVAL, + monitored_paths=_get_monitored_paths(app), + dy_volumes_mount_dir=settings.DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR, + ) + + +def get_disk_usage_monitor(app: FastAPI) -> DiskUsageMonitor | None: + if hasattr(app.state, "disk_usage_monitor"): + disk_usage_monitor: DiskUsageMonitor = app.state.disk_usage_monitor + return disk_usage_monitor + return None + + +def setup_disk_usage(app: FastAPI) -> None: + async def on_startup() -> None: + with log_context(_logger, logging.INFO, "setup disk monitor"): + app.state.disk_usage_monitor = create_disk_usage_monitor(app) + await app.state.disk_usage_monitor.setup() + + async def on_shutdown() -> None: + with log_context(_logger, logging.INFO, "shutdown disk monitor"): + if disk_usage_monitor := getattr( # noqa: B009 + app.state, "disk_usage_monitor" + ): + await disk_usage_monitor.shutdown() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_setup.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_setup.py new file mode 100644 index 00000000000..95a6571c3d7 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/system_monitor/_setup.py @@ -0,0 +1,45 @@ +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import log_context + +from ...core.settings import SystemMonitorSettings +from ._disk_usage import ( + create_disk_usage_monitor, + get_disk_usage_monitor, + setup_disk_usage, +) + +_logger = logging.getLogger(__name__) + + +async def _display_current_disk_usage(app: FastAPI) -> None: + disk_usage_monitor = get_disk_usage_monitor(app) + if disk_usage_monitor is None: + disk_usage_monitor = create_disk_usage_monitor(app) + + disk_usage = await disk_usage_monitor.get_disk_usage() + for name, entry in disk_usage.items(): + _logger.info( + "Disk usage for '%s': total=%s, free=%s, used=%s, used_percent=%s", + name, + entry.total.human_readable(), + entry.free.human_readable(), + entry.used.human_readable(), + entry.used_percent, + ) + + +def setup_system_monitor(app: FastAPI) -> None: + with log_context(_logger, logging.INFO, "setup system monitor"): + settings: SystemMonitorSettings = app.state.settings.SYSTEM_MONITOR_SETTINGS + + if settings.DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE: + setup_disk_usage(app) + else: + _logger.warning("system monitor disabled") + + async def on_startup() -> None: + await _display_current_disk_usage(app) + + app.add_event_handler("startup", on_startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/__init__.py new file mode 100644 index 00000000000..03c14a805c2 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/__init__.py @@ -0,0 +1,10 @@ +from ._manager import load_user_services_preferences, save_user_services_preferences +from ._setup import setup_user_services_preferences +from ._utils import is_feature_enabled + +__all__: tuple[str, ...] = ( + "is_feature_enabled", + "load_user_services_preferences", + "save_user_services_preferences", + "setup_user_services_preferences", +) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_db.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_db.py new file mode 100644 index 00000000000..3942e23b184 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_db.py @@ -0,0 +1,79 @@ +from pathlib import Path + +import umsgpack # type: ignore[import-untyped] +from models_library.products import ProductName +from models_library.services import ServiceKey, ServiceVersion +from models_library.user_preferences import PreferenceName +from models_library.users import UserID +from packaging.version import Version +from pydantic import TypeAdapter +from simcore_postgres_database.utils_user_preferences import ( + UserServicesUserPreferencesRepo, +) + +# NOTE: Using the same connection pattern to Postgres as the one inside nodeports. +# The same connection context manager is utilized here as well! +from simcore_sdk.node_ports_common.dbmanager import DBContextManager + +from ._packaging import dir_from_bytes, dir_to_bytes +from ._user_preference import get_model_class + + +def _get_db_preference_name( + preference_name: PreferenceName, service_version: ServiceVersion +) -> str: + version = Version(service_version) + return f"{preference_name}/{version.major}.{version.minor}" + + +async def save_preferences( + service_key: ServiceKey, + service_version: ServiceVersion, + user_preferences_path: Path, + user_id: UserID, + product_name: ProductName, +): + preference_class = get_model_class(service_key) + + dir_content: bytes = await dir_to_bytes(user_preferences_path) + preference = preference_class( + service_key=service_key, service_version=service_version, value=dir_content + ) + + async with DBContextManager() as engine, engine.begin() as conn: + await UserServicesUserPreferencesRepo.save( + conn, + user_id=user_id, + product_name=product_name, + preference_name=_get_db_preference_name( + preference_class.get_preference_name(), service_version + ), + payload=umsgpack.packb(preference.to_db()), + ) + + +async def load_preferences( + service_key: ServiceKey, + service_version: ServiceVersion, + user_preferences_path: Path, + user_id: UserID, + product_name: ProductName, +) -> None: + preference_class = get_model_class(service_key) + + async with DBContextManager() as engine, engine.connect() as conn: + payload = await UserServicesUserPreferencesRepo.load( + conn, + user_id=user_id, + product_name=product_name, + preference_name=_get_db_preference_name( + preference_class.get_preference_name(), service_version + ), + ) + if payload is None: + return + + preference = TypeAdapter(preference_class).validate_python( + umsgpack.unpackb(payload) + ) + await dir_from_bytes(preference.value, user_preferences_path) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_errors.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_errors.py new file mode 100644 index 00000000000..278ad52b04d --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_errors.py @@ -0,0 +1,13 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class BaseServicesPreferencesError(OsparcErrorMixin, Exception): + ... + + +class DestinationIsNotADirectoryError(BaseServicesPreferencesError): + msg_template = "Provided destination_to={destination_to} must be a directory" + + +class PreferencesAreTooBigError(BaseServicesPreferencesError): + msg_template = "Preferences amount to a size of size={size} bytes. Allowed limit={limit} bytes." diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_manager.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_manager.py new file mode 100644 index 00000000000..3c9ede49dbc --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_manager.py @@ -0,0 +1,60 @@ +import logging +from pathlib import Path + +from attr import dataclass +from fastapi import FastAPI +from models_library.products import ProductName +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID + +from . import _db + +_logger = logging.getLogger(__name__) + + +@dataclass +class UserServicesPreferencesManager: + user_preferences_path: Path + service_key: ServiceKey + service_version: ServiceVersion + user_id: UserID + product_name: ProductName + _preferences_already_saved: bool = False + + async def load_preferences(self) -> None: + await _db.load_preferences( + user_preferences_path=self.user_preferences_path, + service_key=self.service_key, + service_version=self.service_version, + user_id=self.user_id, + product_name=self.product_name, + ) + + async def save_preferences(self) -> None: + if self._preferences_already_saved: + _logger.warning("Preferences were already saved, will not save them again") + return + + await _db.save_preferences( + user_preferences_path=self.user_preferences_path, + service_key=self.service_key, + service_version=self.service_version, + user_id=self.user_id, + product_name=self.product_name, + ) + + self._preferences_already_saved = True + + +async def save_user_services_preferences(app: FastAPI) -> None: + user_services_preferences_manager: UserServicesPreferencesManager = ( + app.state.user_services_preferences_manager + ) + await user_services_preferences_manager.save_preferences() + + +async def load_user_services_preferences(app: FastAPI) -> None: + user_services_preferences_manager: UserServicesPreferencesManager = ( + app.state.user_services_preferences_manager + ) + await user_services_preferences_manager.load_preferences() diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_packaging.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_packaging.py new file mode 100644 index 00000000000..4c711e34f9a --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_packaging.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Final + +import aiofiles +from pydantic import ByteSize, TypeAdapter +from servicelib.archiving_utils import archive_dir, unarchive_dir +from servicelib.file_utils import remove_directory + +from ._errors import DestinationIsNotADirectoryError, PreferencesAreTooBigError + +_MAX_PREFERENCES_TOTAL_SIZE: Final[ByteSize] = TypeAdapter(ByteSize).validate_python( + "128kib" +) + + +async def dir_to_bytes(source: Path) -> bytes: + if not source.is_dir(): + raise DestinationIsNotADirectoryError(destination_to=source) + + async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: + archive_path = Path(tmp_dir) / "archive" + + await archive_dir(source, archive_path, compress=True) + + archive_size = archive_path.stat().st_size + if archive_size > _MAX_PREFERENCES_TOTAL_SIZE: + raise PreferencesAreTooBigError( + size=archive_size, limit=_MAX_PREFERENCES_TOTAL_SIZE + ) + + return archive_path.read_bytes() + + +async def dir_from_bytes(payload: bytes, destination: Path) -> None: + if not destination.is_dir(): + raise DestinationIsNotADirectoryError(destination_to=destination) + + await remove_directory(destination, only_children=True) + + async with aiofiles.tempfile.TemporaryDirectory() as tmp_dir: + archive_path = Path(tmp_dir) / "archive" + + archive_path.write_bytes(payload) + await unarchive_dir(archive_path, destination) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_setup.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_setup.py new file mode 100644 index 00000000000..83915fc151a --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_setup.py @@ -0,0 +1,41 @@ +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import log_context + +from ...core.settings import ApplicationSettings +from ._manager import UserServicesPreferencesManager +from ._utils import is_feature_enabled + +_logger = logging.getLogger(__name__) + + +def setup_user_services_preferences(app: FastAPI) -> None: + async def on_startup() -> None: + with log_context(_logger, logging.INFO, "setup user services preferences"): + if is_feature_enabled(app): + settings: ApplicationSettings = app.state.settings + assert settings.DY_SIDECAR_USER_PREFERENCES_PATH # nosec + assert settings.DY_SIDECAR_SERVICE_KEY # nosec + assert settings.DY_SIDECAR_SERVICE_VERSION # nosec + assert settings.DY_SIDECAR_PRODUCT_NAME # nosec + + user_preferences_path = ( + settings.DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR + / settings.DY_SIDECAR_USER_PREFERENCES_PATH.relative_to("/") + ) + user_preferences_path.mkdir(parents=True, exist_ok=True) + + app.state.user_services_preferences_manager = ( + UserServicesPreferencesManager( + user_preferences_path=user_preferences_path, + service_key=settings.DY_SIDECAR_SERVICE_KEY, + service_version=settings.DY_SIDECAR_SERVICE_VERSION, + user_id=settings.DY_SIDECAR_USER_ID, + product_name=settings.DY_SIDECAR_PRODUCT_NAME, + ) + ) + else: + _logger.warning("user service preferences not mounted") + + app.add_event_handler("startup", on_startup) diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_user_preference.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_user_preference.py new file mode 100644 index 00000000000..5f96e43a73e --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_user_preference.py @@ -0,0 +1,19 @@ +from models_library.services import ServiceKey +from models_library.user_preferences import UserServiceUserPreference +from models_library.utils.change_case import snake_to_upper_camel +from pydantic import create_model + + +def get_model_class(service_key: ServiceKey) -> type[UserServiceUserPreference]: + base_model_name = snake_to_upper_camel( + service_key.replace("/", "_").replace("-", "_") + ) + model_class_name = f"{base_model_name}UserServiceUserPreference" + + model_type: type[UserServiceUserPreference] = ( + UserServiceUserPreference.registered_user_preference_classes[model_class_name] + if model_class_name + in UserServiceUserPreference.registered_user_preference_classes + else create_model(model_class_name, __base__=UserServiceUserPreference) + ) + return model_type diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_utils.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_utils.py new file mode 100644 index 00000000000..03cc8103997 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/modules/user_services_preferences/_utils.py @@ -0,0 +1,28 @@ +import logging + +from fastapi import FastAPI + +from ...core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def is_feature_enabled(app: FastAPI) -> bool: + settings: ApplicationSettings = app.state.settings + is_enabled = ( + settings.DY_SIDECAR_SERVICE_KEY is not None + and settings.DY_SIDECAR_SERVICE_VERSION is not None + and settings.DY_SIDECAR_USER_PREFERENCES_PATH is not None + and settings.DY_SIDECAR_PRODUCT_NAME is not None + and settings.POSTGRES_SETTINGS is not None + ) + if not is_enabled: + _logger.warning( + "user services preferences is manager is not enabled: %s, %s, %s, %s, %s", + f"{settings.DY_SIDECAR_SERVICE_KEY=}", + f"{settings.DY_SIDECAR_SERVICE_VERSION=}", + f"{settings.DY_SIDECAR_USER_PREFERENCES_PATH=}", + f"{settings.DY_SIDECAR_PRODUCT_NAME=}", + f"{settings.POSTGRES_SETTINGS=}", + ) + return is_enabled diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/__init__.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/disk.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/disk.py new file mode 100644 index 00000000000..316a86d6036 --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/disk.py @@ -0,0 +1,5 @@ +from ..core.reserved_space import remove_reserved_disk_space + +__all__: tuple[str, ...] = ("remove_reserved_disk_space",) + +# nopycln: file diff --git a/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/volumes.py b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/volumes.py new file mode 100644 index 00000000000..366276bbaed --- /dev/null +++ b/services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/services/volumes.py @@ -0,0 +1,12 @@ +from fastapi import FastAPI +from models_library.sidecar_volumes import VolumeCategory, VolumeState, VolumeStatus + +from ..models.shared_store import get_shared_store + + +async def save_volume_state( + app: FastAPI, *, status: VolumeStatus, category: VolumeCategory +) -> None: + shared_store = get_shared_store(app) + async with shared_store: + shared_store.volume_states[category] = VolumeState(status=status) diff --git a/services/dynamic-sidecar/tests/conftest.py b/services/dynamic-sidecar/tests/conftest.py index 0ae0fde09fe..b0cf6b67413 100644 --- a/services/dynamic-sidecar/tests/conftest.py +++ b/services/dynamic-sidecar/tests/conftest.py @@ -1,28 +1,36 @@ # pylint: disable=redefined-outer-name # pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument # pylint: disable=unused-variable +import json import logging import sys +from collections.abc import Iterable, Iterator from pathlib import Path -from typing import Iterable, Iterator +from unittest.mock import AsyncMock import pytest import simcore_service_dynamic_sidecar +from common_library.json_serialization import json_dumps from faker import Faker from models_library.projects import ProjectID from models_library.projects_nodes import NodeID -from models_library.services import RunID +from models_library.services import ServiceRunID +from models_library.services_creation import CreateServiceMetricsAdditionalParams from models_library.users import UserID -from pytest import LogCaptureFixture, MonkeyPatch -from pytest_simcore.helpers.utils_envs import ( +from pydantic import TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import ( EnvVarsDict, setenvs_from_dict, setenvs_from_envfile, ) -from servicelib.json_serialization import json_dumps +from simcore_service_dynamic_sidecar.core.reserved_space import ( + remove_reserved_disk_space, +) logger = logging.getLogger(__name__) @@ -30,12 +38,15 @@ "pytest_simcore.docker_compose", "pytest_simcore.docker_registry", "pytest_simcore.docker_swarm", - "pytest_simcore.monkeypatch_extra", # TODO: remove this dependency + "pytest_simcore.faker_users_data", + "pytest_simcore.minio_service", + "pytest_simcore.postgres_service", "pytest_simcore.pytest_global_environs", "pytest_simcore.rabbit_service", + "pytest_simcore.redis_service", "pytest_simcore.repository_paths", "pytest_simcore.simcore_service_library_fixtures", - "pytest_simcore.tmp_path_extra", + "pytest_simcore.socketio", ] CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent @@ -103,11 +114,6 @@ def state_exclude_dirs(container_base_dir: Path) -> list[Path]: return [container_base_dir / f"exclude_{i}" for i in range(4)] -@pytest.fixture -def user_id(faker: Faker) -> UserID: - return faker.pyint(min_value=1) - - @pytest.fixture def project_id(faker: Faker) -> ProjectID: return faker.uuid4(cast_to=None) @@ -119,13 +125,47 @@ def node_id(faker: Faker) -> NodeID: @pytest.fixture -def run_id(faker: Faker) -> RunID: - return faker.uuid4(cast_to=None) +def service_run_id() -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_dynamic() @pytest.fixture -def mock_environment( - monkeypatch: MonkeyPatch, +def ensure_shared_store_dir(shared_store_dir: Path) -> Iterator[Path]: + shared_store_dir.mkdir(parents=True, exist_ok=True) + assert shared_store_dir.exists() is True + + yield shared_store_dir + + # remove files and dir + for f in shared_store_dir.glob("*"): + f.unlink() + shared_store_dir.rmdir() + assert shared_store_dir.exists() is False + + +@pytest.fixture +def mock_storage_check(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.core.external_dependencies.wait_for_storage_liveness", + ) + + +@pytest.fixture +def mock_postgres_check(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.core.external_dependencies.wait_for_database_liveness", + ) + + +@pytest.fixture +def mock_rabbit_check(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.core.external_dependencies.wait_for_rabbitmq_liveness", + ) + + +@pytest.fixture +def base_mock_envs( dy_volumes: Path, shared_store_dir: Path, compose_namespace: str, @@ -133,81 +173,193 @@ def mock_environment( outputs_dir: Path, state_paths_dirs: list[Path], state_exclude_dirs: list[Path], + node_id: NodeID, + service_run_id: ServiceRunID, + ensure_shared_store_dir: None, +) -> EnvVarsDict: + return { + # envs in Dockerfile + "SC_BOOT_MODE": "production", + "SC_BUILD_TARGET": "production", + "DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR": f"{dy_volumes}", + "DYNAMIC_SIDECAR_SHARED_STORE_DIR": f"{shared_store_dir}", + # envs on container + "DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": compose_namespace, + "DY_SIDECAR_RUN_ID": service_run_id, + "DY_SIDECAR_NODE_ID": f"{node_id}", + "DY_SIDECAR_PATH_INPUTS": f"{inputs_dir}", + "DY_SIDECAR_PATH_OUTPUTS": f"{outputs_dir}", + "DY_SIDECAR_STATE_PATHS": json_dumps(state_paths_dirs), + "DY_SIDECAR_STATE_EXCLUDE": json_dumps(state_exclude_dirs), + "DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS": "false", + "DY_DEPLOYMENT_REGISTRY_SETTINGS": json.dumps( + { + "REGISTRY_AUTH": "false", + "REGISTRY_USER": "test", + "REGISTRY_PW": "test", + "REGISTRY_SSL": "false", + "REGISTRY_URL": "registry.pytest.com", + } + ), + "DYNAMIC_SIDECAR_TRACING": "null", + } + + +@pytest.fixture +def mock_environment( + mock_storage_check: None, + mock_postgres_check: None, + mock_rabbit_check: None, + monkeypatch: pytest.MonkeyPatch, + base_mock_envs: EnvVarsDict, user_id: UserID, project_id: ProjectID, + state_paths_dirs: list[Path], + state_exclude_dirs: list[Path], node_id: NodeID, - run_id: RunID, + service_run_id: ServiceRunID, + inputs_dir: Path, + compose_namespace: str, + outputs_dir: Path, + dy_volumes: Path, + shared_store_dir: Path, + faker: Faker, ) -> EnvVarsDict: """Main test environment used to build the application Override if new configuration for the app is needed. """ - envs = {} - # envs in Dockerfile - envs["SC_BOOT_MODE"] = "production" - envs["SC_BUILD_TARGET"] = "production" - envs["DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR"] = f"{dy_volumes}" - envs["DYNAMIC_SIDECAR_SHARED_STORE_DIR"] = f"{shared_store_dir}" - - # envs on container - envs["DYNAMIC_SIDECAR_COMPOSE_NAMESPACE"] = compose_namespace - - envs["REGISTRY_AUTH"] = "false" - envs["REGISTRY_USER"] = "test" - envs["REGISTRY_PW"] = "test" - envs["REGISTRY_SSL"] = "false" - - envs["DY_SIDECAR_USER_ID"] = f"{user_id}" - envs["DY_SIDECAR_PROJECT_ID"] = f"{project_id}" - envs["DY_SIDECAR_RUN_ID"] = f"{run_id}" - envs["DY_SIDECAR_NODE_ID"] = f"{node_id}" - envs["DY_SIDECAR_PATH_INPUTS"] = f"{inputs_dir}" - envs["DY_SIDECAR_PATH_OUTPUTS"] = f"{outputs_dir}" - envs["DY_SIDECAR_STATE_PATHS"] = json_dumps(state_paths_dirs) - envs["DY_SIDECAR_STATE_EXCLUDE"] = json_dumps(state_exclude_dirs) - envs["DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS"] = "false" - - envs["S3_ENDPOINT"] = "endpoint" - envs["S3_ACCESS_KEY"] = "access_key" - envs["S3_SECRET_KEY"] = "secret_key" - envs["S3_BUCKET_NAME"] = "bucket_name" - envs["S3_SECURE"] = "false" - - envs["R_CLONE_PROVIDER"] = "MINIO" - - setenvs_from_dict(monkeypatch, envs) - - return envs + return setenvs_from_dict( + monkeypatch, + { + # envs in Dockerfile + "DYNAMIC_SIDECAR_DY_VOLUMES_MOUNT_DIR": f"{dy_volumes}", + "DYNAMIC_SIDECAR_SHARED_STORE_DIR": f"{shared_store_dir}", + "SC_BOOT_MODE": "production", + "SC_BUILD_TARGET": "production", + # envs on container + "DY_SIDECAR_CALLBACKS_MAPPING": "{}", + "DY_SIDECAR_NODE_ID": f"{node_id}", + "DY_SIDECAR_PATH_INPUTS": f"{inputs_dir}", + "DY_SIDECAR_PATH_OUTPUTS": f"{outputs_dir}", + "DY_SIDECAR_PROJECT_ID": f"{project_id}", + "DY_SIDECAR_RUN_ID": service_run_id, + "DY_SIDECAR_STATE_EXCLUDE": json_dumps(state_exclude_dirs), + "DY_SIDECAR_STATE_PATHS": json_dumps(state_paths_dirs), + "DY_SIDECAR_USER_ID": f"{user_id}", + "DY_SIDECAR_USER_SERVICES_HAVE_INTERNET_ACCESS": "false", + "DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": compose_namespace, + "POSTGRES_DB": "test", + "POSTGRES_HOST": "test", + "POSTGRES_PASSWORD": "test", + "POSTGRES_USER": "test", + "R_CLONE_PROVIDER": "MINIO", + "RABBIT_HOST": "test", + "RABBIT_PASSWORD": "test", + "RABBIT_SECURE": "false", + "RABBIT_USER": "test", + "S3_ACCESS_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + "S3_ENDPOINT": faker.url(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "DY_DEPLOYMENT_REGISTRY_SETTINGS": json.dumps( + { + "REGISTRY_AUTH": "false", + "REGISTRY_USER": "test", + "REGISTRY_PW": "test", + "REGISTRY_SSL": "false", + "REGISTRY_URL": "registry.pytest.com", + } + ), + }, + ) @pytest.fixture def mock_environment_with_envdevel( - monkeypatch: MonkeyPatch, project_slug_dir: Path + monkeypatch: pytest.MonkeyPatch, project_slug_dir: Path ) -> EnvVarsDict: """Alternative environment loaded fron .env-devel. .env-devel is used mainly to run CLI """ env_file = project_slug_dir / ".env-devel" - envs = setenvs_from_envfile(monkeypatch, env_file.read_text()) - return envs + return setenvs_from_envfile(monkeypatch, env_file.read_text()) + + +@pytest.fixture() +def caplog_info_debug( + caplog: pytest.LogCaptureFixture, +) -> Iterable[pytest.LogCaptureFixture]: + with caplog.at_level(logging.DEBUG): + yield caplog @pytest.fixture -def ensure_shared_store_dir(shared_store_dir: Path) -> Iterator[Path]: - shared_store_dir.mkdir(parents=True, exist_ok=True) - assert shared_store_dir.exists() is True +def mock_registry_service(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_dynamic_sidecar.core.registry._login_registry", + autospec=True, + ) - yield shared_store_dir - # remove files and dir - for f in shared_store_dir.glob("*"): - f.unlink() - shared_store_dir.rmdir() - assert shared_store_dir.exists() is False +@pytest.fixture +def mock_core_rabbitmq(mocker: MockerFixture) -> dict[str, AsyncMock]: + """mocks simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQClient member functions""" + return { + "wait_till_rabbitmq_responsive": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq.wait_for_rabbitmq_liveness", + return_value=None, + autospec=True, + ), + "post_rabbit_message": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq._post_rabbit_message", + return_value=None, + autospec=True, + ), + "close": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQClient.close", + return_value=None, + autospec=True, + ), + "rpc._rpc_initialize": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQRPCClient._rpc_initialize", + return_value=None, + autospec=True, + ), + "rpc.close": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQRPCClient.close", + return_value=None, + autospec=True, + ), + "rpc.register_router": mocker.patch( + "simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQRPCClient.register_router", + return_value=None, + autospec=True, + ), + } -@pytest.fixture() -def caplog_info_debug(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]: - with caplog.at_level(logging.DEBUG): - yield caplog +@pytest.fixture +def mock_stop_heart_beat_task(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_dynamic_sidecar.modules.resource_tracking._core.stop_heart_beat_task", + return_value=None, + ) + + +@pytest.fixture +def mock_metrics_params(faker: Faker) -> CreateServiceMetricsAdditionalParams: + return TypeAdapter(CreateServiceMetricsAdditionalParams).validate_python( + CreateServiceMetricsAdditionalParams.model_config["json_schema_extra"][ + "example" + ], + ) + + +@pytest.fixture +def cleanup_reserved_disk_space() -> Iterator[None]: + remove_reserved_disk_space() + yield + remove_reserved_disk_space() diff --git a/services/dynamic-sidecar/tests/integration/conftest.py b/services/dynamic-sidecar/tests/integration/conftest.py new file mode 100644 index 00000000000..8c7e5e79533 --- /dev/null +++ b/services/dynamic-sidecar/tests/integration/conftest.py @@ -0,0 +1,36 @@ +from collections.abc import Iterable + +import pytest +import sqlalchemy as sa +from models_library.users import UserID +from pytest_simcore.helpers.faker_factories import random_user +from simcore_postgres_database.models.users import users + +pytest_plugins = [ + "pytest_simcore.postgres_service", + "pytest_simcore.simcore_storage_service", + "pytest_simcore.rabbit_service", +] + + +@pytest.fixture +def user_id(postgres_db: sa.engine.Engine) -> Iterable[UserID]: + # inject user in db + + # NOTE: Ideally this (and next fixture) should be done via webserver API but at this point + # in time, the webserver service would bring more dependencies to other services + # which would turn this test too complex. + + # pylint: disable=no-value-for-parameter + stmt = users.insert().values(**random_user(name="test")).returning(users.c.id) + print(f"{stmt}") + with postgres_db.connect() as conn: + result = conn.execute(stmt) + row = result.first() + assert row + usr_id = row[users.c.id] + + yield usr_id + + with postgres_db.connect() as conn: + conn.execute(users.delete().where(users.c.id == usr_id)) diff --git a/services/dynamic-sidecar/tests/integration/test_modules_long_running_tasks.py b/services/dynamic-sidecar/tests/integration/test_modules_long_running_tasks.py new file mode 100644 index 00000000000..b7d45d90654 --- /dev/null +++ b/services/dynamic-sidecar/tests/integration/test_modules_long_running_tasks.py @@ -0,0 +1,494 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument + + +import filecmp +import os +import shutil +from collections.abc import AsyncIterable, Iterable +from pathlib import Path +from typing import Final, cast +from unittest.mock import AsyncMock + +import aioboto3 +import pytest +import sqlalchemy as sa +from aiobotocore.session import ClientCreatorContext +from async_asgi_testclient import TestClient +from botocore.client import Config +from botocore.exceptions import ClientError +from fastapi import FastAPI +from models_library.api_schemas_storage.storage_schemas import S3BucketName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID, SimcoreS3FileID +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.faker_factories import random_project +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import PostgresTestConfig +from pytest_simcore.helpers.storage import replace_storage_endpoint +from servicelib.fastapi.long_running_tasks.server import TaskProgress +from servicelib.utils import logged_gather +from settings_library.s3 import S3Settings +from simcore_postgres_database.models.projects import projects +from simcore_sdk.node_ports_common.constants import SIMCORE_LOCATION +from simcore_sdk.node_ports_common.filemanager import upload_path +from simcore_service_dynamic_sidecar.core.application import AppState, create_app +from simcore_service_dynamic_sidecar.core.utils import HIDDEN_FILE_NAME +from simcore_service_dynamic_sidecar.modules.long_running_tasks import ( + task_restore_state, + task_save_state, +) +from types_aiobotocore_s3 import S3Client +from yarl import URL + +pytest_simcore_core_services_selection = [ + "migration", + "postgres", + "rabbit", + "redis", + "storage", + "sto-worker", +] + +pytest_simcore_ops_services_selection = [ + "minio", + "adminer", +] + + +TO_REMOVE: set[Path] = {Path(HIDDEN_FILE_NAME)} + + +@pytest.fixture +def project_id(user_id: int, postgres_db: sa.engine.Engine) -> Iterable[ProjectID]: + # inject project for user in db. This will give user_id, the full project's ownership + + # pylint: disable=no-value-for-parameter + stmt = ( + projects.insert() + .values(**random_project(prj_owner=user_id)) + .returning(projects.c.uuid) + ) + print(f"{stmt}") + with postgres_db.connect() as conn: + result = conn.execute(stmt) + row = result.first() + assert row + prj_uuid = row[projects.c.uuid] + + yield prj_uuid + + with postgres_db.connect() as conn: + conn.execute(projects.delete().where(projects.c.uuid == prj_uuid)) + + +@pytest.fixture +def mock_environment( + mock_storage_check: None, + mock_rabbit_check: None, + rabbit_service, + postgres_host_config: PostgresTestConfig, + storage_endpoint: URL, + minio_s3_settings_envs: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + base_mock_envs: EnvVarsDict, + user_id: UserID, + project_id: ProjectID, +) -> EnvVarsDict: + assert storage_endpoint.host + + envs: EnvVarsDict = { + "STORAGE_HOST": storage_endpoint.host, + "STORAGE_PORT": f"{storage_endpoint.port}", + "DY_SIDECAR_USER_ID": f"{user_id}", + "DY_SIDECAR_PROJECT_ID": f"{project_id}", + "R_CLONE_PROVIDER": "MINIO", + "DY_SIDECAR_CALLBACKS_MAPPING": "{}", + **{k: f"{v}" for k, v in rabbit_service.dict().items()}, + **base_mock_envs, + } + + setenvs_from_dict(monkeypatch, envs) + return envs + + +@pytest.fixture +def app( + mock_environment: EnvVarsDict, + mock_registry_service: AsyncMock, + mock_core_rabbitmq: dict[str, AsyncMock], +) -> FastAPI: + """creates app with registry and rabbitMQ services mocked""" + return create_app() + + +@pytest.fixture +async def test_client(app: FastAPI) -> AsyncIterable[TestClient]: + async with TestClient(app) as client: + yield client + + +@pytest.fixture +def task_progress() -> TaskProgress: + return TaskProgress.create() + + +@pytest.fixture +def app_state(app: FastAPI) -> AppState: + return AppState(app) + + +@pytest.fixture +def state_paths_to_legacy_archives( + app_state: AppState, project_tests_dir: Path +) -> dict[Path, Path]: + LEGACY_STATE_ARCHIVES_DIR = project_tests_dir / "mocks" / "legacy_state_archives" + assert LEGACY_STATE_ARCHIVES_DIR.exists() + + results: dict[Path, Path] = {} + for state_path in app_state.mounted_volumes.disk_state_paths_iter(): + legacy_archive_name = f"{state_path.name}.zip" + legacy_archive_path = LEGACY_STATE_ARCHIVES_DIR / legacy_archive_name + assert legacy_archive_path.exists() + results[state_path] = legacy_archive_path + + return results + + +@pytest.fixture +async def simcore_storage_service(mocker: MockerFixture, app: FastAPI) -> None: + storage_host: Final[str] | None = os.environ.get("STORAGE_HOST") + storage_port: Final[str] | None = os.environ.get("STORAGE_PORT") + assert storage_host is not None + assert storage_port is not None + + # NOTE: Mock to ensure container IP agrees with host IP when testing + mocker.patch( + "simcore_sdk.node_ports_common._filemanager_utils._get_https_link_if_storage_secure", + replace_storage_endpoint(storage_host, int(storage_port)), + ) + + +@pytest.fixture +async def restore_legacy_state_archives( + test_client: TestClient, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + state_paths_to_legacy_archives: dict[Path, Path], +) -> None: + tasks = [] + for legacy_archive_zip in state_paths_to_legacy_archives.values(): + s3_path = f"{project_id}/{node_id}/{legacy_archive_zip.name}" + tasks.append( + upload_path( + user_id=user_id, + store_id=SIMCORE_LOCATION, + store_name=None, + s3_object=TypeAdapter(SimcoreS3FileID).validate_python(s3_path), + path_to_upload=legacy_archive_zip, + io_log_redirect_cb=None, + ) + ) + + await logged_gather(*tasks) + + +def _generate_content(root_path: Path, *, file_prefix: str, payload: str = "a") -> None: + # NOTE: this was also used to generate the content of the zips + # stored inside `mocks/legacy_state_archives` directory + # NOTE: changing this function's outcome requires the + # regeneration of the zip archives + assert root_path.exists() + + paths: set[Path] = { + root_path / "s" / "u" / "b" / "d" / "i" / "r" / "s", + root_path / "first-level", + root_path / "111", + root_path / "dir.txt", + root_path, + } + + file_names: set[str] = {f"{file_prefix}_file-{i}.txt" for i in range(10)} + + for path in paths: + for file_name in file_names: + path.mkdir(parents=True, exist_ok=True) + (path / file_name).write_text(payload) + + +@pytest.fixture +def expected_contents_paths(app_state: AppState, tmp_path: Path) -> dict[Path, Path]: + results: dict[Path, Path] = {} + + expected_contents_dir = tmp_path / "expected_contents" + + for k, state_path in enumerate(app_state.mounted_volumes.disk_state_paths_iter()): + expected_state_path_dir = expected_contents_dir / state_path.name + expected_state_path_dir.mkdir(parents=True, exist_ok=True) + _generate_content(expected_state_path_dir, file_prefix=f"{k}_") + results[state_path] = expected_state_path_dir + + return results + + +def _files_in_dir( + dir_path: Path, + *, + include_parent_dir_name: bool = False, + discard: set[Path] | None = None, +) -> set[Path]: + parent_dir_name = dir_path.name if include_parent_dir_name else "" + result = { + Path(parent_dir_name) / p.relative_to(dir_path) + for p in dir_path.rglob("*") + if p.is_file() + } + + if discard is None: + return result + + for entry in discard: + result.discard(Path(parent_dir_name) / entry) + return result + + +def _delete_files_in_dir(dir_path: Path) -> None: + for file in _files_in_dir(dir_path): + (dir_path / file).unlink() + + +def _assert_same_directory_content(dir1: Path, dir2: Path) -> None: + # NOTE: the HIDDEN_FILE_NAME is added automatically by the dy-sidecar + # when it initializes, this is added below just for the comparison + + files_in_dir1 = _files_in_dir(dir1, discard=TO_REMOVE) + files_in_dir2 = _files_in_dir(dir2, discard=TO_REMOVE) + + all_files_in_both_dirs = files_in_dir1 & files_in_dir2 + + # ensure files overlap + assert len(files_in_dir1) > 0, "Expected at least one file!" + assert len(all_files_in_both_dirs) == len(files_in_dir1) + assert len(all_files_in_both_dirs) == len(files_in_dir2) + + for file in all_files_in_both_dirs: + f_in_dir1 = dir1 / file + f_in_dir2 = dir2 / file + + assert f_in_dir1.exists() + assert f_in_dir2.exists() + + assert filecmp.cmp(f_in_dir1, f_in_dir2, shallow=False) + + +@pytest.fixture +def s3_settings(app_state: AppState) -> S3Settings: + return app_state.settings.DY_SIDECAR_R_CLONE_SETTINGS.R_CLONE_S3 + + +@pytest.fixture +def bucket_name(app_state: AppState) -> S3BucketName: + return TypeAdapter(S3BucketName).validate_python( + app_state.settings.DY_SIDECAR_R_CLONE_SETTINGS.R_CLONE_S3.S3_BUCKET_NAME, + ) + + +@pytest.fixture +async def s3_client(s3_settings: S3Settings) -> AsyncIterable[S3Client]: + session = aioboto3.Session() + session_client = session.client( + "s3", + endpoint_url=f"{s3_settings.S3_ENDPOINT}", + aws_access_key_id=s3_settings.S3_ACCESS_KEY, + aws_secret_access_key=s3_settings.S3_SECRET_KEY, + region_name=s3_settings.S3_REGION, + config=Config(signature_version="s3v4"), + ) + assert isinstance(session_client, ClientCreatorContext) # nosec + async with session_client as client: + client = cast(S3Client, client) + yield client + + +async def _is_key_in_s3( + s3_client: S3Client, bucket_name: S3BucketName, key: str +) -> bool: + try: + await s3_client.head_object(Bucket=bucket_name, Key=key) + except ClientError: + return False + return True + + +async def _assert_keys_in_s3( + s3_client: S3Client, + bucket_name: S3BucketName, + keys: list[str], + *, + each_key_is_in_s3: bool, +) -> None: + keys_exist_in_s3 = await logged_gather( + *[ + _is_key_in_s3(s3_client=s3_client, bucket_name=bucket_name, key=key) + for key in keys + ] + ) + results: dict[str, bool] = dict(zip(keys, keys_exist_in_s3, strict=True)) + for key, key_exists in results.items(): + assert ( + key_exists is each_key_is_in_s3 + ), f"Unexpected result: {key_exists=} != {each_key_is_in_s3=} for '{key}'\nAll results: {results}" + + +def _get_expected_s3_objects( + project_id: ProjectID, node_id: NodeID, state_dirs: list[Path] +) -> list[str]: + result: set[Path] = set() + for state_path in state_dirs: + result |= _files_in_dir(state_path, include_parent_dir_name=True) + return [f"{project_id}/{node_id}/{x}" for x in result] + + +@pytest.mark.parametrize("repeat_count", [1, 2]) +async def test_legacy_state_open_and_clone( + simcore_storage_service: None, + restore_legacy_state_archives: None, + state_paths_to_legacy_archives: dict[Path, Path], + expected_contents_paths: dict[Path, Path], + app: FastAPI, + app_state: AppState, + task_progress: TaskProgress, + project_id: ProjectID, + node_id: NodeID, + s3_client: S3Client, + bucket_name: S3BucketName, + repeat_count: int, +): + # NOTE: this tests checks that the legacy state is migrated to the new style state + + # restore state from legacy archives + for _ in range(repeat_count): + await task_restore_state( + progress=task_progress, + settings=app_state.settings, + mounted_volumes=app_state.mounted_volumes, + app=app, + ) + + # check that legacy and generated folder content is the same + for state_dir_path, expected_content_dir_path in expected_contents_paths.items(): + _assert_same_directory_content(state_dir_path, expected_content_dir_path) + + # check that the file is still present in storage s3 + legacy_s3_keys: list[str] = [ + f"{project_id}/{node_id}/{legacy_archive_path.name}" + for legacy_archive_path in state_paths_to_legacy_archives.values() + ] + await _assert_keys_in_s3( + s3_client, bucket_name, keys=legacy_s3_keys, each_key_is_in_s3=True + ) + + for _ in range(repeat_count): + await task_save_state( + progress=task_progress, + settings=app_state.settings, + mounted_volumes=app_state.mounted_volumes, + app=app, + ) + + # check that all local files are present in s3 + expected_s3_objects = _get_expected_s3_objects( + project_id, node_id, list(expected_contents_paths.keys()) + ) + await _assert_keys_in_s3( + s3_client, bucket_name, keys=expected_s3_objects, each_key_is_in_s3=True + ) + + # the legacy archives should now be missing + await _assert_keys_in_s3( + s3_client, bucket_name, keys=legacy_s3_keys, each_key_is_in_s3=False + ) + + +@pytest.mark.parametrize("repeat_count", [1, 2]) +async def test_state_open_and_close( + simcore_storage_service: None, + test_client: TestClient, + state_paths_to_legacy_archives: dict[Path, Path], + expected_contents_paths: dict[Path, Path], + app: FastAPI, + app_state: AppState, + task_progress: TaskProgress, + project_id: ProjectID, + node_id: NodeID, + s3_client: S3Client, + bucket_name: S3BucketName, + repeat_count: int, +): + # NOTE: this is the new style of opening and closing the state using directories + + # restoring finds nothing inside + for _ in range(repeat_count): + await task_restore_state( + progress=task_progress, + settings=app_state.settings, + mounted_volumes=app_state.mounted_volumes, + app=app, + ) + + # check that no other objects are in s3 + expected_s3_objects = _get_expected_s3_objects( + project_id, node_id, list(expected_contents_paths.keys()) + ) + await _assert_keys_in_s3( + s3_client, bucket_name, keys=expected_s3_objects, each_key_is_in_s3=False + ) + + # check that no files are present in the local directories + for state_dir_path in expected_contents_paths: + assert len(_files_in_dir(state_dir_path, discard=TO_REMOVE)) == 0 + + # copy the content to be generated to the local folder + for state_dir_path, expected_content_dir_path in expected_contents_paths.items(): + shutil.copytree(expected_content_dir_path, state_dir_path, dirs_exist_ok=True) + _assert_same_directory_content(state_dir_path, expected_content_dir_path) + + # save them to S3 + for _ in range(repeat_count): + await task_save_state( + progress=task_progress, + settings=app_state.settings, + mounted_volumes=app_state.mounted_volumes, + app=app, + ) + + # check generated files are in S3 + expected_s3_objects = _get_expected_s3_objects( + project_id, node_id, list(expected_contents_paths.keys()) + ) + assert len(expected_s3_objects) > 0 + await _assert_keys_in_s3( + s3_client, bucket_name, keys=expected_s3_objects, each_key_is_in_s3=True + ) + + # remove and check no file is present any longer + for state_dir_path in expected_contents_paths: + _delete_files_in_dir(state_dir_path) + assert len(_files_in_dir(state_dir_path, discard=TO_REMOVE)) == 0 + + # restore them from S3 + for _ in range(repeat_count): + await task_restore_state( + progress=task_progress, + settings=app_state.settings, + mounted_volumes=app_state.mounted_volumes, + app=app, + ) + + # check files are the same as the ones previously generated + for state_dir_path, expected_content_dir_path in expected_contents_paths.items(): + _assert_same_directory_content(state_dir_path, expected_content_dir_path) diff --git a/services/dynamic-sidecar/tests/integration/test_modules_user_services_preferences.py b/services/dynamic-sidecar/tests/integration/test_modules_user_services_preferences.py new file mode 100644 index 00000000000..9be0bbdebbf --- /dev/null +++ b/services/dynamic-sidecar/tests/integration/test_modules_user_services_preferences.py @@ -0,0 +1,170 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=no-name-in-module + +from collections.abc import AsyncIterable +from pathlib import Path +from unittest.mock import AsyncMock + +import pytest +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.services import ServiceKey, ServiceVersion +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import PostgresTestConfig +from simcore_service_dynamic_sidecar.core.application import create_app +from simcore_service_dynamic_sidecar.modules.user_services_preferences import ( + load_user_services_preferences, + save_user_services_preferences, +) +from simcore_service_dynamic_sidecar.modules.user_services_preferences._manager import ( + UserServicesPreferencesManager, +) +from simcore_service_dynamic_sidecar.modules.user_services_preferences._utils import ( + is_feature_enabled, +) + +pytest_simcore_core_services_selection = [ + "migration", + "postgres", +] + +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def dy_sidecar_user_preferences_path(tmp_path: Path) -> Path: + return Path("/tmp/service-owner/defined/path") # noqa: S108 + + +@pytest.fixture +def service_key() -> ServiceKey: + return TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/test-service-34" + ) + + +@pytest.fixture +def service_version() -> ServiceVersion: + return TypeAdapter(ServiceVersion).validate_python("1.0.0") + + +@pytest.fixture +def product_name() -> ProductName: + return TypeAdapter(ProductName).validate_python("osparc") + + +@pytest.fixture +def mock_environment( # pylint:disable=too-many-arguments + mock_rabbit_check: None, + mock_storage_check: None, + postgres_host_config: PostgresTestConfig, + monkeypatch: pytest.MonkeyPatch, + base_mock_envs: EnvVarsDict, + user_id: UserID, + project_id: ProjectID, + dy_sidecar_user_preferences_path: Path, + service_key: ServiceKey, + service_version: ServiceVersion, + product_name: ProductName, + faker: Faker, +) -> EnvVarsDict: + envs: EnvVarsDict = { + "DY_SIDECAR_USER_ID": f"{user_id}", + "DY_SIDECAR_PROJECT_ID": f"{project_id}", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + "RABBIT_HOST": "test", + "RABBIT_PASSWORD": "test", + "RABBIT_SECURE": "0", + "RABBIT_USER": "test", + "R_CLONE_PROVIDER": "MINIO", + "DY_SIDECAR_CALLBACKS_MAPPING": "{}", + "DY_SIDECAR_SERVICE_KEY": service_key, + "DY_SIDECAR_SERVICE_VERSION": service_version, + "DY_SIDECAR_USER_PREFERENCES_PATH": f"{dy_sidecar_user_preferences_path}", + "DY_SIDECAR_PRODUCT_NAME": product_name, + **base_mock_envs, + } + + setenvs_from_dict(monkeypatch, envs) + return envs + + +@pytest.fixture +async def app( + mock_environment: EnvVarsDict, + mock_registry_service: AsyncMock, + mock_core_rabbitmq: dict[str, AsyncMock], +) -> AsyncIterable[FastAPI]: + app = create_app() + async with LifespanManager(app): + yield app + + +@pytest.fixture +def user_preferences_path(app: FastAPI) -> Path: + user_services_preferences_manager: UserServicesPreferencesManager = ( + app.state.user_services_preferences_manager + ) + return user_services_preferences_manager.user_preferences_path + + +def _get_files_preferences_path(user_preferences_path: Path) -> set[Path]: + return {x for x in user_preferences_path.rglob("*") if x.is_file()} + + +def _make_files_in_preferences_path( + user_preferences_path: Path, file_count: int, subdir_count: int +) -> set[Path]: + file_names: set[Path] = set() + for s in range(subdir_count): + (user_preferences_path / f"subdir-{s}").mkdir(parents=True, exist_ok=True) + for f in range(file_count): + file_name = user_preferences_path / f"subdir-{s}" / f"f{f}.txt" + file_name.write_text(f"file content {f}") + file_names.add(file_name) + return file_names + + +def _remove_files_in_preferences_path(user_preferences_path: Path): + for f in user_preferences_path.rglob("*"): + if f.is_file(): + f.unlink() + + +async def test_user_service_workflow(app: FastAPI, user_preferences_path: Path): + assert is_feature_enabled(app) + + # restore nothing is downloaded + await load_user_services_preferences(app) + assert len(_get_files_preferences_path(user_preferences_path)) == 0 + + # mock user service creates some preferences + FILE_COUNT = 4 + SUBDIR_COUNT = 2 + file_names = _make_files_in_preferences_path( + user_preferences_path, FILE_COUNT, SUBDIR_COUNT + ) + assert _get_files_preferences_path(user_preferences_path) == file_names + + # save preferences to be recovered at later date + await save_user_services_preferences(app) + + # mock service was closed + _remove_files_in_preferences_path(user_preferences_path) + assert len(_get_files_preferences_path(user_preferences_path)) == 0 + + # reopen service which had saved preferences + await load_user_services_preferences(app) + assert _get_files_preferences_path(user_preferences_path) == file_names diff --git a/services/dynamic-sidecar/tests/mocks/internet_blocked_spec.yaml b/services/dynamic-sidecar/tests/mocks/internet_blocked_spec.yaml new file mode 100644 index 00000000000..83bc4592b05 --- /dev/null +++ b/services/dynamic-sidecar/tests/mocks/internet_blocked_spec.yaml @@ -0,0 +1,141 @@ +networks: + dy-sidecar_a019b83f-7cce-46bf-90cf-d02f7f0f089a: + driver: overlay + external: true + name: dy-sidecar_a019b83f-7cce-46bf-90cf-d02f7f0f089a + egress-0: + internal: true + production-simcore_interactive_services_subnet: + driver: overlay + external: true + name: production-simcore_interactive_services_subnet + with-internet: + internal: false +services: + egress-0: + command: "fake" + environment: + OSPARC_NODE_ID: a019b83f-7cce-46bf-90cf-d02f7f0f089a + OSPARC_STUDY_ID: ef60eaa6-3f52-11ef-9745-0242ac172e8c + image: envoyproxy/envoy:v1.25-latest + labels: + - io.simcore.runtime.cpu-limit=0 + - io.simcore.runtime.memory-limit=0 + - io.simcore.runtime.node-id=a019b83f-7cce-46bf-90cf-d02f7f0f089a + - io.simcore.runtime.product-name=s4llite + - io.simcore.runtime.project-id=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - io.simcore.runtime.simcore-user-agent=undefined + - io.simcore.runtime.swarm-stack-name=production-simcore + - io.simcore.runtime.user-id=47568 + networks: + egress-0: + aliases: + - ip-10-1-0-25.ec2.internal + with-internet: null + rt-web-lite: + cpus: 0.5 + depends_on: + - s4l-core + environment: + - DY_SIDECAR_PATH_INPUTS=/home/smu/work/inputs + - DY_SIDECAR_PATH_OUTPUTS=/home/smu/work/outputs + - DY_SIDECAR_STATE_PATHS=[""/home/smu/work/workspace""] + - SIMCORE_NANO_CPUS_LIMIT=500000000 + - SIMCORE_MEMORY_BYTES_LIMIT=1073741824 + - OSPARC_STUDY_ID=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - OSPARC_NODE_ID=a019b83f-7cce-46bf-90cf-d02f7f0f089a + image: simcore/services/dynamic/sim4life-lite:2.0.106 + init: true + labels: + - io.simcore.runtime.cpu-limit=0.5 + - io.simcore.runtime.memory-limit=1073741824 + - io.simcore.runtime.node-id=a019b83f-7cce-46bf-90cf-d02f7f0f089a + - io.simcore.runtime.product-name=s4llite + - io.simcore.runtime.project-id=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - io.simcore.runtime.simcore-user-agent=undefined + - io.simcore.runtime.swarm-stack-name=production-simcore + - io.simcore.runtime.user-id=47568 + mem_limit: 1073741824 + mem_reservation: 1073741824 + networks: + dy-sidecar_a019b83f-7cce-46bf-90cf-d02f7f0f089a: null + s4l-core: + cpus: 3.5 + depends_on: + - egress-0 + environment: + - DISPLAY= + - DY_SIDECAR_PATH_INPUTS=/home/smu/work/inputs + - DY_SIDECAR_PATH_OUTPUTS=/home/smu/work/outputs + - DY_SIDECAR_STATE_PATHS=[""/home/smu/work/workspace""] + - SIMCORE_NANO_CPUS_LIMIT=3500000000 + - SIMCORE_MEMORY_BYTES_LIMIT=17179869184 + - OSPARC_STUDY_ID=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - OSPARC_NODE_ID=a019b83f-7cce-46bf-90cf-d02f7f0f089a + image: simcore/services/dynamic/s4l-core-lite:2.0.106 + init: true + labels: + - io.simcore.runtime.cpu-limit=3.5 + - io.simcore.runtime.memory-limit=17179869184 + - io.simcore.runtime.node-id=a019b83f-7cce-46bf-90cf-d02f7f0f089a + - io.simcore.runtime.product-name=s4llite + - io.simcore.runtime.project-id=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - io.simcore.runtime.simcore-user-agent=undefined + - io.simcore.runtime.swarm-stack-name=production-simcore + - io.simcore.runtime.user-id=47568 + mem_limit: 17179869184 + mem_reservation: 17179869184 + networks: + egress-0: null + runtime: nvidia + volumes: + - /tmp/.X11-unix:/tmp/.X11-unix + s4l-core-stream: + cpus: 0.5 + environment: + - DY_SIDECAR_PATH_INPUTS=/home/smu/work/inputs + - DY_SIDECAR_PATH_OUTPUTS=/home/smu/work/outputs + - DY_SIDECAR_STATE_PATHS=[""/home/smu/work/workspace""] + - SIMCORE_NANO_CPUS_LIMIT=500000000 + - SIMCORE_MEMORY_BYTES_LIMIT=1073741824 + - OSPARC_STUDY_ID=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - OSPARC_NODE_ID=a019b83f-7cce-46bf-90cf-d02f7f0f089a + image: simcore/services/dynamic/s4l-core-stream:2.0.106 + init: true + labels: + - io.simcore.runtime.cpu-limit=0.5 + - io.simcore.runtime.memory-limit=1073741824 + - io.simcore.runtime.node-id=a019b83f-7cce-46bf-90cf-d02f7f0f089a + - io.simcore.runtime.product-name=s4llite + - io.simcore.runtime.project-id=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - io.simcore.runtime.simcore-user-agent=undefined + - io.simcore.runtime.swarm-stack-name=production-simcore + - io.simcore.runtime.user-id=47568 + mem_limit: 1073741824 + mem_reservation: 1073741824 + networks: + with-internet: null + sym-server: + cpus: 0.5 + environment: + - DY_SIDECAR_PATH_INPUTS=/home/smu/work/inputs + - DY_SIDECAR_PATH_OUTPUTS=/home/smu/work/outputs + - DY_SIDECAR_STATE_PATHS=[""/home/smu/work/workspace""] + - SIMCORE_NANO_CPUS_LIMIT=500000000 + - SIMCORE_MEMORY_BYTES_LIMIT=2147483648 + - OSPARC_STUDY_ID=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - OSPARC_NODE_ID=a019b83f-7cce-46bf-90cf-d02f7f0f089a + image: simcore/services/dynamic/sym-server-dy:2.0.106 + init: true + labels: + - io.simcore.runtime.cpu-limit=0.5 + - io.simcore.runtime.memory-limit=2147483648 + - io.simcore.runtime.node-id=a019b83f-7cce-46bf-90cf-d02f7f0f089a + - io.simcore.runtime.product-name=s4llite + - io.simcore.runtime.project-id=ef60eaa6-3f52-11ef-9745-0242ac172e8c + - io.simcore.runtime.simcore-user-agent=undefined + - io.simcore.runtime.swarm-stack-name=production-simcore + - io.simcore.runtime.user-id=47568 + mem_limit: 2147483648 + mem_reservation: 2147483648 +version: \'2.3\' diff --git a/services/dynamic-sidecar/tests/mocks/legacy_shared_store.json b/services/dynamic-sidecar/tests/mocks/legacy_shared_store.json new file mode 100644 index 00000000000..102d872cf0b --- /dev/null +++ b/services/dynamic-sidecar/tests/mocks/legacy_shared_store.json @@ -0,0 +1 @@ +{"compose_spec": null, "container_names": [], "volume_states": {"INPUTS": {"status": "CONTENT_NO_SAVE_REQUIRED", "last_changed": "2023-06-28T07:18:11.157717+00:00"}, "SHARED_STORE": {"status": "CONTENT_NO_SAVE_REQUIRED", "last_changed": "2023-06-28T07:18:11.157741+00:00"}, "OUTPUTS": {"status": "CONTENT_NEEDS_TO_BE_SAVED", "last_changed": "2023-06-28T07:18:11.778735+00:00"}, "STATES": {"status": "CONTENT_NEEDS_TO_BE_SAVED", "last_changed": "2023-06-28T07:18:11.776789+00:00"}}, "original_to_container_names": {}} diff --git a/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir0.zip b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir0.zip new file mode 100644 index 00000000000..03e3cf8a42e Binary files /dev/null and b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir0.zip differ diff --git a/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir1.zip b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir1.zip new file mode 100644 index 00000000000..0e1a830e83a Binary files /dev/null and b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir1.zip differ diff --git a/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir2.zip b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir2.zip new file mode 100644 index 00000000000..3edd244404a Binary files /dev/null and b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir2.zip differ diff --git a/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir3.zip b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir3.zip new file mode 100644 index 00000000000..80575d182f1 Binary files /dev/null and b/services/dynamic-sidecar/tests/mocks/legacy_state_archives/state_dir3.zip differ diff --git a/services/dynamic-sidecar/tests/unit/api/rest/test_disk.py b/services/dynamic-sidecar/tests/unit/api/rest/test_disk.py new file mode 100644 index 00000000000..3d6bda8d8f1 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rest/test_disk.py @@ -0,0 +1,17 @@ +# pylint:disable=unused-argument + +from async_asgi_testclient import TestClient +from fastapi import status +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.core.reserved_space import ( + _RESERVED_DISK_SPACE_NAME, +) + + +async def test_reserved_disk_space_freed( + cleanup_reserved_disk_space: None, test_client: TestClient +): + assert _RESERVED_DISK_SPACE_NAME.exists() + response = await test_client.post(f"/{API_VTAG}/disk/reserved:free") + assert response.status_code == status.HTTP_204_NO_CONTENT, response.text + assert not _RESERVED_DISK_SPACE_NAME.exists() diff --git a/services/dynamic-sidecar/tests/unit/api/rest/test_volumes.py b/services/dynamic-sidecar/tests/unit/api/rest/test_volumes.py new file mode 100644 index 00000000000..40eab12336a --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rest/test_volumes.py @@ -0,0 +1,59 @@ +# pylint: disable=unused-argument + +from pathlib import Path + +import pytest +from async_asgi_testclient import TestClient +from fastapi import status +from models_library.sidecar_volumes import VolumeCategory, VolumeState, VolumeStatus +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.models.shared_store import SharedStore + + +@pytest.mark.parametrize( + "volume_category, initial_expected_status", + [ + (VolumeCategory.STATES, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + (VolumeCategory.OUTPUTS, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + (VolumeCategory.INPUTS, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + (VolumeCategory.SHARED_STORE, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + ], +) +async def test_volumes_state_saved_ok( + ensure_shared_store_dir: Path, + test_client: TestClient, + volume_category: VolumeCategory, + initial_expected_status: VolumeStatus, +): + shared_store: SharedStore = test_client.application.state.shared_store + + # check that initial status is as expected + assert shared_store.volume_states[volume_category] == VolumeState( + status=initial_expected_status + ) + + response = await test_client.put( + f"/{API_VTAG}/volumes/{volume_category}", + json={"status": VolumeStatus.CONTENT_WAS_SAVED}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT, response.text + + # check that + assert shared_store.volume_states[volume_category] == VolumeState( + status=VolumeStatus.CONTENT_WAS_SAVED + ) + + +@pytest.mark.parametrize("invalid_volume_category", ["outputs", "outputS"]) +async def test_volumes_state_saved_error( + ensure_shared_store_dir: Path, + test_client: TestClient, + invalid_volume_category: VolumeCategory, +): + response = await test_client.put( + f"/{API_VTAG}/volumes/{invalid_volume_category}", + json={"status": VolumeStatus.CONTENT_WAS_SAVED}, + ) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, response.text + json_response = response.json() + assert invalid_volume_category not in json_response["detail"][0]["ctx"]["expected"] diff --git a/services/dynamic-sidecar/tests/unit/api/rpc/conftest.py b/services/dynamic-sidecar/tests/unit/api/rpc/conftest.py new file mode 100644 index 00000000000..6097c7a6d43 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rpc/conftest.py @@ -0,0 +1,48 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import AsyncIterable, Awaitable, Callable +from unittest.mock import AsyncMock + +import pytest +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.rabbitmq import RabbitMQRPCClient +from settings_library.rabbit import RabbitSettings +from simcore_service_dynamic_sidecar.core.application import create_app + + +@pytest.fixture +def mock_environment( + monkeypatch: pytest.MonkeyPatch, + rabbit_service: RabbitSettings, + mock_environment: EnvVarsDict, + mock_registry_service: AsyncMock, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE": "true", + "RABBIT_HOST": rabbit_service.RABBIT_HOST, + "RABBIT_PASSWORD": rabbit_service.RABBIT_PASSWORD.get_secret_value(), + "RABBIT_PORT": f"{rabbit_service.RABBIT_PORT}", + "RABBIT_SECURE": f"{rabbit_service.RABBIT_SECURE}", + "RABBIT_USER": rabbit_service.RABBIT_USER, + }, + ) + + +@pytest.fixture +async def app(mock_environment: EnvVarsDict) -> AsyncIterable[FastAPI]: + app = create_app() + async with LifespanManager(app): + yield app + + +@pytest.fixture +async def rpc_client( + app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") diff --git a/services/dynamic-sidecar/tests/unit/api/rpc/test__disk.py b/services/dynamic-sidecar/tests/unit/api/rpc/test__disk.py new file mode 100644 index 00000000000..7ed592976e7 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rpc/test__disk.py @@ -0,0 +1,30 @@ +# pylint:disable=unused-argument + + +from fastapi import FastAPI +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.dynamic_sidecar import disk +from simcore_service_dynamic_sidecar.core.reserved_space import ( + _RESERVED_DISK_SPACE_NAME, +) +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +async def test_free_reserved_disk_space( + cleanup_reserved_disk_space: None, app: FastAPI, rpc_client: RabbitMQRPCClient +): + assert _RESERVED_DISK_SPACE_NAME.exists() + + settings: ApplicationSettings = app.state.settings + + result = await disk.free_reserved_disk_space( + rpc_client, + node_id=settings.DY_SIDECAR_NODE_ID, + ) + assert result is None + + assert not _RESERVED_DISK_SPACE_NAME.exists() diff --git a/services/dynamic-sidecar/tests/unit/api/rpc/test__disk_usage.py b/services/dynamic-sidecar/tests/unit/api/rpc/test__disk_usage.py new file mode 100644 index 00000000000..105d4a85ccc --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rpc/test__disk_usage.py @@ -0,0 +1,44 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import DiskUsage +from pydantic import ByteSize +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.dynamic_sidecar import disk_usage +from settings_library.redis import RedisSettings +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings +from simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage import ( + get_disk_usage_monitor, +) + +pytest_simcore_core_services_selection = [ + "redis", + "rabbit", +] + + +@pytest.fixture +def mock_environment( + redis_service: RedisSettings, mock_environment: EnvVarsDict +) -> EnvVarsDict: + return mock_environment + + +async def test_get_state(app: FastAPI, rpc_client: RabbitMQRPCClient): + usage = { + "some_path": DiskUsage( + total=ByteSize(0), used=ByteSize(0), free=ByteSize(0), used_percent=0 + ) + } + settings: ApplicationSettings = app.state.settings + + result = await disk_usage.update_disk_usage( + rpc_client, node_id=settings.DY_SIDECAR_NODE_ID, usage=usage + ) + assert result is None + + assert get_disk_usage_monitor(app)._usage_overwrite == usage # noqa: SLF001 diff --git a/services/dynamic-sidecar/tests/unit/api/rpc/test__volumes.py b/services/dynamic-sidecar/tests/unit/api/rpc/test__volumes.py new file mode 100644 index 00000000000..e19b50916c1 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/api/rpc/test__volumes.py @@ -0,0 +1,74 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from pathlib import Path + +import pytest +from fastapi import FastAPI +from models_library.sidecar_volumes import VolumeCategory, VolumeState, VolumeStatus +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq._errors import RPCServerError +from servicelib.rabbitmq.rpc_interfaces.dynamic_sidecar import volumes +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings +from simcore_service_dynamic_sidecar.models.shared_store import SharedStore + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.mark.parametrize( + "volume_category, initial_expected_status", + [ + (VolumeCategory.STATES, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + (VolumeCategory.OUTPUTS, VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED), + (VolumeCategory.INPUTS, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + (VolumeCategory.SHARED_STORE, VolumeStatus.CONTENT_NO_SAVE_REQUIRED), + ], +) +async def test_volumes_state_saved_ok( + ensure_shared_store_dir: Path, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + volume_category: VolumeCategory, + initial_expected_status: VolumeStatus, +): + shared_store: SharedStore = app.state.shared_store + settings: ApplicationSettings = app.state.settings + + # check that initial status is as expected + assert shared_store.volume_states[volume_category] == VolumeState( + status=initial_expected_status + ) + + await volumes.save_volume_state( + rpc_client, + node_id=settings.DY_SIDECAR_NODE_ID, + status=VolumeStatus.CONTENT_WAS_SAVED, + category=volume_category, + ) + + # check that content was saved + assert shared_store.volume_states[volume_category] == VolumeState( + status=VolumeStatus.CONTENT_WAS_SAVED + ) + + +@pytest.mark.parametrize("invalid_volume_category", ["outputs", "outputS"]) +async def test_volumes_state_saved_error( + ensure_shared_store_dir: Path, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + invalid_volume_category: VolumeCategory, +): + + settings: ApplicationSettings = app.state.settings + + with pytest.raises(RPCServerError, match="ValidationError"): + await volumes.save_volume_state( + rpc_client, + node_id=settings.DY_SIDECAR_NODE_ID, + status=VolumeStatus.CONTENT_WAS_SAVED, + category=invalid_volume_category, + ) diff --git a/services/dynamic-sidecar/tests/unit/conftest.py b/services/dynamic-sidecar/tests/unit/conftest.py index 81cd0b6dd33..75b9d316c10 100644 --- a/services/dynamic-sidecar/tests/unit/conftest.py +++ b/services/dynamic-sidecar/tests/unit/conftest.py @@ -4,20 +4,24 @@ import asyncio import logging -from typing import AsyncIterable, AsyncIterator +from collections.abc import AsyncIterable, AsyncIterator from unittest.mock import AsyncMock import pytest from aiodocker.volumes import DockerVolume from async_asgi_testclient import TestClient from fastapi import FastAPI -from pytest_mock import MockerFixture -from pytest_simcore.helpers.typing_env import EnvVarsDict +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict from simcore_service_dynamic_sidecar.core.application import AppState, create_app from simcore_service_dynamic_sidecar.core.docker_compose_utils import ( docker_compose_down, ) from simcore_service_dynamic_sidecar.core.docker_utils import docker_client +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings +from simcore_service_dynamic_sidecar.modules.notifications._notifications_ports import ( + PortNotifier, +) from tenacity import retry from tenacity.after import after_log from tenacity.stop import stop_after_delay @@ -35,36 +39,6 @@ # -@pytest.fixture -def mock_registry_service(mocker: MockerFixture) -> AsyncMock: - return mocker.patch( - "simcore_service_dynamic_sidecar.core.utils._is_registry_reachable", - autospec=True, - ) - - -@pytest.fixture -def mock_core_rabbitmq(mocker: MockerFixture) -> dict[str, AsyncMock]: - """mocks simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQClient member functions""" - return { - "wait_till_rabbitmq_responsive": mocker.patch( - "simcore_service_dynamic_sidecar.core.rabbitmq.wait_till_rabbitmq_responsive", - return_value=None, - autospec=True, - ), - "post_log_message": mocker.patch( - "simcore_service_dynamic_sidecar.core.rabbitmq._post_rabbit_message", - return_value=None, - autospec=True, - ), - "close": mocker.patch( - "simcore_service_dynamic_sidecar.core.rabbitmq.RabbitMQClient.close", - return_value=None, - autospec=True, - ), - } - - @pytest.fixture def app( mock_environment: EnvVarsDict, @@ -72,8 +46,7 @@ def app( mock_core_rabbitmq: dict[str, AsyncMock], ) -> FastAPI: """creates app with registry and rabbitMQ services mocked""" - app = create_app() - return app + return create_app() @pytest.fixture @@ -99,7 +72,7 @@ async def test_client( @pytest.fixture async def ensure_external_volumes( app: FastAPI, -) -> AsyncIterator[tuple[DockerVolume]]: +) -> AsyncIterator[tuple[DockerVolume, ...]]: """ensures inputs and outputs volumes for the service are present Emulates creation of volumes by the directorv2 when it spawns the dynamic-sidecar service @@ -108,17 +81,17 @@ async def ensure_external_volumes( volume_labels_source = [ app_state.mounted_volumes.volume_name_inputs, app_state.mounted_volumes.volume_name_outputs, - ] + list(app_state.mounted_volumes.volume_name_state_paths()) + *list(app_state.mounted_volumes.volume_name_state_paths()), + ] async with docker_client() as docker: - volumes = await asyncio.gather( *[ docker.volumes.create( { "Labels": { "source": source, - "run_id": f"{app_state.settings.DY_SIDECAR_RUN_ID}", + "run_id": app_state.settings.DY_SIDECAR_RUN_ID, } } ) @@ -126,30 +99,6 @@ async def ensure_external_volumes( ] ) - # - # - # docker volume ls --format "{{.Name}} {{.Labels}}" | grep run_id | awk '{print $1}') - # - # - # Example - # { - # "CreatedAt": "2022-06-23T03:22:08+02:00", - # "Driver": "local", - # "Labels": { - # "run_id": "f7c1bd87-4da5-4709-9471-3d60c8a70639", - # "source": "dy-sidecar_e3e70682-c209-4cac-a29f-6fbed82c07cd_data_dir_2" - # }, - # "Mountpoint": "/var/lib/docker/volumes/22bfd79a50eb9097d45cc946736cb66f3670a2fadccb62a77ffbe5e1d88f0034/_data", - # "Name": "22bfd79a50eb9097d45cc946736cb66f3670a2fadccb62a77ffbe5e1d88f0034", - # "Options": null, - # "Scope": "local", - # "CreatedTime": 1655947328000, - # "Containers": {} - # } - # - # CLEAN: - # docker volume rm $(docker volume ls --format "{{.Name}} {{.Labels}}" | grep run_id | awk '{print $1}') - yield tuple(volumes) @retry( @@ -159,7 +108,7 @@ async def ensure_external_volumes( after=after_log(logger, logging.WARNING), ) async def _delete(volume): - # Ocasionally might raise because volumes are mount to closing containers + # Occasionally might raise because volumes are mounted to closing containers await volume.delete() deleted = await asyncio.gather( @@ -170,7 +119,6 @@ async def _delete(volume): @pytest.fixture async def cleanup_containers(app: FastAPI) -> AsyncIterator[None]: - app_state = AppState(app) yield @@ -181,3 +129,39 @@ async def cleanup_containers(app: FastAPI) -> AsyncIterator[None]: return await docker_compose_down(app_state.compose_spec, app_state.settings) + + +@pytest.fixture +def mock_rabbitmq_envs( + mock_core_rabbitmq: dict[str, AsyncMock], + monkeypatch: pytest.MonkeyPatch, + mock_environment: EnvVarsDict, +) -> EnvVarsDict: + setenvs_from_dict( + monkeypatch, + { + "RABBIT_HOST": "mocked_host", + "RABBIT_SECURE": "false", + "RABBIT_USER": "mocked_user", + "RABBIT_PASSWORD": "mocked_password", + }, + ) + return mock_environment + + +@pytest.fixture +def port_notifier(app: FastAPI) -> PortNotifier: + settings: ApplicationSettings = app.state.settings + return PortNotifier( + app, + settings.DY_SIDECAR_USER_ID, + settings.DY_SIDECAR_PROJECT_ID, + settings.DY_SIDECAR_NODE_ID, + ) + + +@pytest.fixture +def mock_ensure_read_permissions_on_user_service_data(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.long_running_tasks.ensure_read_permissions_on_user_service_data", + ) diff --git a/services/dynamic-sidecar/tests/unit/test__oas_spec.py b/services/dynamic-sidecar/tests/unit/test__oas_spec.py index a25734b30f2..b5fbc3328a4 100644 --- a/services/dynamic-sidecar/tests/unit/test__oas_spec.py +++ b/services/dynamic-sidecar/tests/unit/test__oas_spec.py @@ -7,7 +7,7 @@ import pytest from fastapi import FastAPI -from pytest_simcore.helpers.utils_envs import EnvVarsDict +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict @pytest.fixture diff --git a/services/dynamic-sidecar/tests/unit/test_api_containers.py b/services/dynamic-sidecar/tests/unit/test_api_containers.py deleted file mode 100644 index 4b788534ec7..00000000000 --- a/services/dynamic-sidecar/tests/unit/test_api_containers.py +++ /dev/null @@ -1,700 +0,0 @@ -# pylint: disable=protected-access -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable - -import asyncio -import json -import random -from inspect import signature -from pathlib import Path -from typing import Any, AsyncIterable, Final, Iterator -from unittest.mock import AsyncMock, Mock -from uuid import uuid4 - -import aiodocker -import aiofiles -import pytest -import yaml -from aiodocker.volumes import DockerVolume -from aiofiles.os import mkdir -from async_asgi_testclient import TestClient -from faker import Faker -from fastapi import FastAPI, status -from models_library.services import ServiceOutput -from pytest import MonkeyPatch -from pytest_mock.plugin import MockerFixture -from servicelib.docker_constants import SUFFIX_EGRESS_PROXY_NAME -from servicelib.fastapi.long_running_tasks.client import TaskId -from simcore_service_dynamic_sidecar._meta import API_VTAG -from simcore_service_dynamic_sidecar.core.application import AppState -from simcore_service_dynamic_sidecar.core.docker_compose_utils import ( - docker_compose_create, -) -from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings -from simcore_service_dynamic_sidecar.core.utils import async_command -from simcore_service_dynamic_sidecar.core.validation import parse_compose_spec -from simcore_service_dynamic_sidecar.models.shared_store import SharedStore -from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext -from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager -from simcore_service_dynamic_sidecar.modules.outputs._watcher import OutputsWatcher -from tenacity._asyncio import AsyncRetrying -from tenacity.retry import retry_if_exception_type -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -WAIT_FOR_OUTPUTS_WATCHER: Final[float] = 0.1 -FAST_POLLING_INTERVAL: Final[float] = 0.1 - - -# UTILS - - -class FailTestError(RuntimeError): - pass - - -_TENACITY_RETRY_PARAMS: dict[str, Any] = dict( - reraise=True, - retry=retry_if_exception_type((FailTestError, AssertionError)), - stop=stop_after_delay(10), - wait=wait_fixed(0.01), -) - - -def _create_network_aliases(network_name: str) -> list[str]: - return [f"alias_{i}_{network_name}" for i in range(10)] - - -async def _assert_enable_outputs_watcher(test_client: TestClient) -> None: - response = await test_client.patch( - f"/{API_VTAG}/containers/directory-watcher", json=dict(is_enabled=True) - ) - assert response.status_code == status.HTTP_204_NO_CONTENT, response.text - assert response.text == "" - - -async def _assert_disable_outputs_watcher(test_client: TestClient) -> None: - response = await test_client.patch( - f"/{API_VTAG}/containers/directory-watcher", json=dict(is_enabled=False) - ) - assert response.status_code == status.HTTP_204_NO_CONTENT, response.text - assert response.text == "" - - -async def _start_containers(test_client: TestClient, compose_spec: str) -> list[str]: - # start containers - response = await test_client.post( - f"/{API_VTAG}/containers", json={"docker_compose_yaml": compose_spec} - ) - assert response.status_code == status.HTTP_202_ACCEPTED, response.text - task_id: TaskId = response.json() - - async for attempt in AsyncRetrying( - wait=wait_fixed(FAST_POLLING_INTERVAL), - stop=stop_after_delay(100 * FAST_POLLING_INTERVAL), - reraise=True, - ): - with attempt: - response = await test_client.get(f"/task/{task_id}") - assert response.status_code == status.HTTP_200_OK - task_status = response.json() - if not task_status["done"]: - raise RuntimeError(f"Waiting for task to complete, got: {task_status}") - - response = await test_client.get(f"/task/{task_id}/result") - assert response.status_code == status.HTTP_200_OK - result_response = response.json() - assert result_response["error"] is None - response_containers = result_response["result"] - - shared_store: SharedStore = test_client.application.state.shared_store - container_names = shared_store.container_names - assert response_containers == container_names - - return container_names - - -async def _docker_ps_a_container_names() -> list[str]: - # TODO: replace with aiodocker this is legacy by now - command = 'docker ps -a --format "{{.Names}}"' - success, stdout, *_ = await async_command(command=command, timeout=None) - - assert success is True, stdout - return stdout.split("\n") - - -async def _assert_compose_spec_pulled(compose_spec: str, settings: ApplicationSettings): - """ensures all containers inside compose_spec are pulled""" - - result = await docker_compose_create(compose_spec, settings) - - assert result.success is True, result.message - - dict_compose_spec = json.loads(compose_spec) - expected_services_count = len(dict_compose_spec["services"]) - - docker_ps_names = await _docker_ps_a_container_names() - started_containers = [ - x - for x in docker_ps_names - if x.startswith(settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE) - ] - assert len(started_containers) == expected_services_count - - -@pytest.fixture -def app(app: FastAPI) -> FastAPI: - app.state.shared_store = SharedStore() # emulate on_startup event - return app - - -@pytest.fixture -def test_client( - ensure_shared_store_dir: Path, - ensure_run_in_sequence_context_is_empty: None, - ensure_external_volumes: tuple[DockerVolume], - cleanup_containers, - test_client: TestClient, -) -> TestClient: - """creates external volumes and provides a client to dy-sidecar service""" - return test_client - - -@pytest.fixture -def dynamic_sidecar_network_name() -> str: - return "entrypoint_container_network" - - -@pytest.fixture -def compose_spec(dynamic_sidecar_network_name: str) -> str: - return json.dumps( - { - "version": "3", - "services": { - "first-box": { - "image": "busybox:latest", - "networks": { - dynamic_sidecar_network_name: None, - }, - "labels": {"io.osparc.test-label": "mark-entrypoint"}, - }, - "second-box": {"image": "busybox:latest"}, - "egress": { - "image": "busybox:latest", - "networks": { - dynamic_sidecar_network_name: None, - }, - }, - }, - "networks": {dynamic_sidecar_network_name: None}, - } - ) - - -@pytest.fixture -def compose_spec_single_service() -> str: - return json.dumps( - { - "version": "3", - "services": { - "solo-box": { - "image": "busybox:latest", - "labels": {"io.osparc.test-label": "mark-entrypoint"}, - }, - }, - } - ) - - -@pytest.fixture(params=["compose_spec", "compose_spec_single_service"]) -def selected_spec(request, compose_spec: str, compose_spec_single_service: str) -> str: - # check that fixture_name is present in this function's parameters - fixture_name = request.param - sig = signature(selected_spec) - assert fixture_name in sig.parameters, ( - f"Provided fixture name {fixture_name} was not found " - f"as a parameter in the signature {sig}" - ) - - # returns the parameter by name from the ones declared in the signature - result: str = locals()[fixture_name] - return result - - -@pytest.fixture -async def started_containers(test_client: TestClient, compose_spec: str) -> list[str]: - settings: ApplicationSettings = test_client.application.state.settings - await _assert_compose_spec_pulled(compose_spec, settings) - - return await _start_containers(test_client, compose_spec) - - -@pytest.fixture -def not_started_containers() -> list[str]: - return [f"missing-container-{i}" for i in range(5)] - - -@pytest.fixture -def mock_outputs_labels() -> dict[str, ServiceOutput]: - return { - "output_port_1": ServiceOutput.parse_obj( - ServiceOutput.Config.schema_extra["examples"][3] - ), - "output_port_2": ServiceOutput.parse_obj( - ServiceOutput.Config.schema_extra["examples"][3] - ), - } - - -@pytest.fixture -async def attachable_networks_and_ids(faker: Faker) -> AsyncIterable[dict[str, str]]: - # generate some network names - unique_id = faker.uuid4() - network_names = {f"test_network_{i}_{unique_id}": "" for i in range(10)} - - # create networks - async with aiodocker.Docker() as client: - for network_name in network_names: - network_config = { - "Name": network_name, - "Driver": "overlay", - "Attachable": True, - "Internal": True, - } - network = await client.networks.create(network_config) - network_names[network_name] = network.id - - yield network_names - - # remove networks - async with aiodocker.Docker() as client: - for network_id in network_names.values(): - network = await client.networks.get(network_id) - assert await network.delete() is True - - -@pytest.fixture -def mock_aiodocker_containers_get(mocker: MockerFixture) -> int: - """raises a DockerError with a random HTTP status which is also returned""" - mock_status_code = random.randint(1, 999) - - async def mock_get(*args: str, **kwargs: Any) -> None: - raise aiodocker.exceptions.DockerError( - status=mock_status_code, data=dict(message="aiodocker_mocked_error") - ) - - mocker.patch("aiodocker.containers.DockerContainers.get", side_effect=mock_get) - - return mock_status_code - - -@pytest.fixture -def mock_event_filter_enqueue( - app: FastAPI, monkeypatch: MonkeyPatch -) -> Iterator[AsyncMock]: - mock = AsyncMock(return_value=None) - outputs_watcher: OutputsWatcher = app.state.outputs_watcher - monkeypatch.setattr(outputs_watcher._event_filter, "enqueue", mock) - yield mock - - -@pytest.fixture -async def mocked_port_key_events_queue_coro_get( - app: FastAPI, mocker: MockerFixture -) -> Mock: - outputs_context: OutputsContext = app.state.outputs_context - - target = getattr(outputs_context.port_key_events_queue, "coro_get") - - mock_result_tracker = Mock() - - async def _wrapped_coroutine() -> Any: - # NOTE: coro_get returns a future, naming is unfortunate - # and can cause confusion, normally an async def function - # will return a coroutine not a future object. - future: asyncio.Future = target() - result = await future - mock_result_tracker(result) - - return result - - mocker.patch.object( - outputs_context.port_key_events_queue, - "coro_get", - side_effect=_wrapped_coroutine, - ) - - return mock_result_tracker - - -# TESTS - - -def test_ensure_api_vtag_is_v1(): - assert API_VTAG == "v1" - - -async def test_start_same_space_twice(compose_spec: str, test_client: TestClient): - settings = test_client.application.state.settings - - settings_1 = settings.copy( - update={"DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": "test_name_space_1"}, deep=True - ) - await _assert_compose_spec_pulled(compose_spec, settings_1) - - settings_2 = settings.copy( - update={"DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": "test_name_space_2"}, deep=True - ) - await _assert_compose_spec_pulled(compose_spec, settings_2) - - -async def test_containers_get( - test_client: TestClient, - started_containers: list[str], - ensure_external_volumes: None, -): - response = await test_client.get(f"/{API_VTAG}/containers") - assert response.status_code == status.HTTP_200_OK, response.text - - decoded_response = response.json() - assert set(decoded_response) == set(started_containers) - for entry in decoded_response.values(): - assert "Status" not in entry - assert "Error" not in entry - - -async def test_containers_get_status( - test_client: TestClient, - started_containers: list[str], - ensure_external_volumes: None, -): - response = await test_client.get( - f"/{API_VTAG}/containers", query_string=dict(only_status=True) - ) - assert response.status_code == status.HTTP_200_OK, response.text - - decoded_response = response.json() - assert set(decoded_response) == set(started_containers) - - def assert_keys_exist(result: dict[str, Any]) -> bool: - for entry in result.values(): - assert "Status" in entry - assert "Error" in entry - return True - - assert assert_keys_exist(decoded_response) is True - - -async def test_containers_docker_status_docker_error( - test_client: TestClient, - started_containers: list[str], - mock_aiodocker_containers_get: int, -): - response = await test_client.get(f"/{API_VTAG}/containers") - assert response.status_code == mock_aiodocker_containers_get, response.text - - -async def test_container_inspect_logs_remove( - test_client: TestClient, started_containers: list[str] -): - for container in started_containers: - # get container logs - # FIXME: slow call? - response = await test_client.get(f"/{API_VTAG}/containers/{container}/logs") - assert response.status_code == status.HTTP_200_OK, response.text - - # inspect container - response = await test_client.get(f"/{API_VTAG}/containers/{container}") - assert response.status_code == status.HTTP_200_OK, response.text - parsed_response = response.json() - assert parsed_response["Name"] == f"/{container}" - - -async def test_container_logs_with_timestamps( - test_client: TestClient, started_containers: list[str] -): - for container in started_containers: - print("getting logs of container", container, "...") - response = await test_client.get( - f"/{API_VTAG}/containers/{container}/logs", - query_string=dict(timestamps=True), - ) - assert response.status_code == status.HTTP_200_OK, response.text - assert response.json() == [] - - -async def test_container_missing_container( - test_client: TestClient, not_started_containers: list[str] -): - def _expected_error_string(container: str) -> dict[str, str]: - return dict( - detail=f"No container '{container}' was started. Started containers '[]'" - ) - - for container in not_started_containers: - # get container logs - response = await test_client.get(f"/{API_VTAG}/containers/{container}/logs") - assert response.status_code == status.HTTP_404_NOT_FOUND, response.text - assert response.json() == _expected_error_string(container) - - # inspect container - response = await test_client.get(f"/{API_VTAG}/containers/{container}") - assert response.status_code == status.HTTP_404_NOT_FOUND, response.text - assert response.json() == _expected_error_string(container) - - -async def test_container_docker_error( - test_client: TestClient, - started_containers: list[str], - mock_aiodocker_containers_get: int, -): - def _expected_error_string(status_code: int) -> dict[str, Any]: - return { - "errors": [ - f"An unexpected Docker error occurred status={status_code}, message='aiodocker_mocked_error'" - ] - } - - for container in started_containers: - # get container logs - response = await test_client.get(f"/{API_VTAG}/containers/{container}/logs") - assert response.status_code == mock_aiodocker_containers_get, response.text - assert response.json() == _expected_error_string(mock_aiodocker_containers_get) - - # inspect container - response = await test_client.get(f"/{API_VTAG}/containers/{container}") - assert response.status_code == mock_aiodocker_containers_get, response.text - assert response.json() == _expected_error_string(mock_aiodocker_containers_get) - - -@pytest.mark.flaky(max_runs=3) -async def test_outputs_watcher_disabling( - test_client: TestClient, - mocked_port_key_events_queue_coro_get: Mock, - mock_event_filter_enqueue: AsyncMock, -): - assert isinstance(test_client.application, FastAPI) - outputs_context: OutputsContext = test_client.application.state.outputs_context - outputs_manager: OutputsManager = test_client.application.state.outputs_manager - outputs_manager.task_monitor_interval_s = WAIT_FOR_OUTPUTS_WATCHER / 10 - WAIT_PORT_KEY_PROPAGATION = outputs_manager.task_monitor_interval_s * 10 - EXPECTED_EVENTS_PER_RANDOM_PORT_KEY = 3 - - async def _create_port_key_events() -> None: - random_subdir = f"{uuid4()}" - - await outputs_context.set_file_type_port_keys([random_subdir]) - await asyncio.sleep(WAIT_PORT_KEY_PROPAGATION) - - dir_name = outputs_context.outputs_path / random_subdir - await mkdir(dir_name) - async with aiofiles.open(dir_name / f"file_{uuid4()}", "w") as f: - await f.write("ok") - - async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): - with attempt: - # check event was triggered - dir_event_set = [ - c.args[0] - for c in mocked_port_key_events_queue_coro_get.call_args_list - if c.args[0] == random_subdir - ] - # NOTE: this test can sometimes generate +/-(1 event) - # - when it creates +1 event βœ… using `>=` solves it - # - when it creates -1 event ❌ cannot deal with it from here - # Will cause downstream assertions to fail since in the - # event_filter_queue there will be unexpected items - # NOTE: will make entire test fail with a specific - # exception and rely on mark.flaky to retry it. - if len(dir_event_set) < EXPECTED_EVENTS_PER_RANDOM_PORT_KEY: - raise FailTestError( - f"Expected at least {EXPECTED_EVENTS_PER_RANDOM_PORT_KEY}" - f" events, found: {dir_event_set}" - ) - assert len(dir_event_set) >= EXPECTED_EVENTS_PER_RANDOM_PORT_KEY - - def _assert_events_generated(*, expected_events: int) -> None: - events_set = {x.args[0] for x in mock_event_filter_enqueue.call_args_list} - assert len(events_set) == expected_events - - # NOTE: for some reason the first event in the queue - # does not get delivered the AioQueue future handling coro_get hangs - await outputs_context.port_key_events_queue.coro_put("") - await asyncio.sleep(WAIT_PORT_KEY_PROPAGATION) - - # by default outputs-watcher it is disabled - - # expect no events to be generated - _assert_events_generated(expected_events=0) - await _create_port_key_events() - _assert_events_generated(expected_events=0) - - # after enabling new vents will be generated - await _assert_enable_outputs_watcher(test_client) - _assert_events_generated(expected_events=0) - await _create_port_key_events() - _assert_events_generated(expected_events=1) - - # disabling again, no longer generate events - await _assert_disable_outputs_watcher(test_client) - _assert_events_generated(expected_events=1) - await _create_port_key_events() - _assert_events_generated(expected_events=1) - - # enabling once more time, events are once again generated - await _assert_enable_outputs_watcher(test_client) - _assert_events_generated(expected_events=1) - await _create_port_key_events() - _assert_events_generated(expected_events=2) - - -async def test_container_create_outputs_dirs( - test_client: TestClient, - mock_outputs_labels: dict[str, ServiceOutput], - mock_event_filter_enqueue: AsyncMock, -): - assert isinstance(test_client.application, FastAPI) - mounted_volumes = AppState(test_client.application).mounted_volumes - - # by default outputs-watcher it is disabled - await _assert_enable_outputs_watcher(test_client) - await asyncio.sleep(WAIT_FOR_OUTPUTS_WATCHER) - - assert mock_event_filter_enqueue.call_count == 0 - - json_outputs_labels = { - k: v.dict(by_alias=True) for k, v in mock_outputs_labels.items() - } - response = await test_client.post( - f"/{API_VTAG}/containers/ports/outputs/dirs", - json={"outputs_labels": json_outputs_labels}, - ) - assert response.status_code == status.HTTP_204_NO_CONTENT, response.text - assert response.text == "" - - for dir_name in mock_outputs_labels.keys(): - assert (mounted_volumes.disk_outputs_path / dir_name).is_dir() - - await asyncio.sleep(WAIT_FOR_OUTPUTS_WATCHER) - EXPECT_EVENTS_WHEN_CREATING_OUTPUT_PORT_KEY_DIRS = 0 - assert ( - mock_event_filter_enqueue.call_count - == EXPECT_EVENTS_WHEN_CREATING_OUTPUT_PORT_KEY_DIRS - ) - - -def _get_entrypoint_container_name(test_client: TestClient) -> str: - parsed_spec = parse_compose_spec( - test_client.application.state.shared_store.compose_spec - ) - container_name = None - for service_name, service_details in parsed_spec["services"].items(): - if service_details.get("labels", None) is not None: - container_name = service_name - break - assert container_name is not None - return container_name - - -@pytest.mark.parametrize("include_exclude_filter_option", [True, False]) -async def test_containers_entrypoint_name_ok( - test_client: TestClient, - dynamic_sidecar_network_name: str, - started_containers: list[str], - include_exclude_filter_option: bool, -): - filters_dict = {"network": dynamic_sidecar_network_name} - if include_exclude_filter_option: - filters_dict["exclude"] = SUFFIX_EGRESS_PROXY_NAME - filters = json.dumps(filters_dict) - - response = await test_client.get(f"/{API_VTAG}/containers/name?filters={filters}") - assert response.status_code == status.HTTP_200_OK, response.text - container_name = response.json() - assert container_name == _get_entrypoint_container_name(test_client) - assert SUFFIX_EGRESS_PROXY_NAME not in container_name - - -@pytest.mark.parametrize("include_exclude_filter_option", [True, False]) -async def test_containers_entrypoint_name_containers_not_started( - test_client: TestClient, - dynamic_sidecar_network_name: str, - started_containers: list[str], - include_exclude_filter_option: bool, -): - entrypoint_container = _get_entrypoint_container_name(test_client) - - # remove the container from the spec - parsed_spec = parse_compose_spec( - test_client.application.state.shared_store.compose_spec - ) - del parsed_spec["services"][entrypoint_container] - test_client.application.state.shared_store.compose_spec = yaml.safe_dump( - parsed_spec - ) - - filters_dict = {"network": dynamic_sidecar_network_name} - if include_exclude_filter_option: - filters_dict["exclude"] = SUFFIX_EGRESS_PROXY_NAME - filters = json.dumps(filters_dict) - response = await test_client.get(f"/{API_VTAG}/containers/name?filters={filters}") - - if include_exclude_filter_option: - assert response.status_code == status.HTTP_404_NOT_FOUND, response.text - assert response.json() == { - "detail": "No container found for network=entrypoint_container_network" - } - else: - assert response.status_code == status.HTTP_200_OK, response.text - found_container = response.json() - assert found_container in started_containers - assert SUFFIX_EGRESS_PROXY_NAME in found_container - - -async def test_attach_detach_container_to_network( - docker_swarm: None, - test_client: TestClient, - selected_spec: str, - attachable_networks_and_ids: dict[str, str], -): - container_names = await _start_containers(test_client, selected_spec) - - async with aiodocker.Docker() as docker: - for container_name in container_names: - for network_name, network_id in attachable_networks_and_ids.items(): - network_aliases = _create_network_aliases(network_name) - - # attach network to containers - for _ in range(2): # calling 2 times in a row - response = await test_client.post( - f"/{API_VTAG}/containers/{container_name}/networks:attach", - json={ - "network_id": network_id, - "network_aliases": network_aliases, - }, - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - ), response.text - - container = await docker.containers.get(container_name) - container_inspect = await container.show() - networks = container_inspect["NetworkSettings"]["Networks"] - assert network_id in networks - assert set(networks[network_id]["Aliases"]) == set(network_aliases) - - # detach network from containers - for _ in range(2): # running twice in a row - response = await test_client.post( - f"/{API_VTAG}/containers/{container_name}/networks:detach", - json={"network_id": network_id}, - ) - assert ( - response.status_code == status.HTTP_204_NO_CONTENT - ), response.text - - container = await docker.containers.get(container_name) - container_inspect = await container.show() - networks = container_inspect["NetworkSettings"]["Networks"] - assert network_id in networks diff --git a/services/dynamic-sidecar/tests/unit/test_api_containers_long_running_tasks.py b/services/dynamic-sidecar/tests/unit/test_api_containers_long_running_tasks.py deleted file mode 100644 index bd3be258590..00000000000 --- a/services/dynamic-sidecar/tests/unit/test_api_containers_long_running_tasks.py +++ /dev/null @@ -1,632 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=no-member - -import json -from collections import namedtuple -from contextlib import asynccontextmanager, contextmanager -from inspect import getmembers, isfunction -from pathlib import Path -from typing import ( - Any, - AsyncIterable, - AsyncIterator, - Awaitable, - Callable, - Final, - Iterator, - Optional, -) - -import aiodocker -import faker -import pytest -from aiodocker.containers import DockerContainer -from aiodocker.volumes import DockerVolume -from asgi_lifespan import LifespanManager -from fastapi import FastAPI -from fastapi.routing import APIRoute -from httpx import AsyncClient -from pydantic import AnyHttpUrl, parse_obj_as -from pytest import FixtureRequest, LogCaptureFixture -from pytest_mock.plugin import MockerFixture -from servicelib.fastapi.long_running_tasks.client import ( - Client, - TaskClientResultError, - TaskId, - periodic_task_result, -) -from servicelib.fastapi.long_running_tasks.client import setup as client_setup -from simcore_sdk.node_ports_common.exceptions import NodeNotFound -from simcore_service_dynamic_sidecar._meta import API_VTAG -from simcore_service_dynamic_sidecar.api import containers_long_running_tasks -from simcore_service_dynamic_sidecar.models.shared_store import SharedStore -from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext -from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager - -FAST_STATUS_POLL: Final[float] = 0.1 -CREATE_SERVICE_CONTAINERS_TIMEOUT: Final[float] = 60 -DEFAULT_COMMAND_TIMEOUT: Final[int] = 5 - -ContainerTimes = namedtuple("ContainerTimes", "created, started_at, finished_at") - - -# UTILS - - -def _print_routes(app: FastAPI) -> None: - endpoints = [] - for route in app.routes: - if isinstance(route, APIRoute): - endpoints.append(route.path) - - print("ROUTES\n", json.dumps(endpoints, indent=2)) - - -def _get_dynamic_sidecar_network_name() -> str: - return "entrypoint_container_network" - - -@contextmanager -def mock_tasks(mocker: MockerFixture) -> Iterator[None]: - async def _just_log_task(*args, **kwargs) -> None: - print(f"Called mocked function with {args}, {kwargs}") - - # searching by name since all start with _task - tasks_names = [ - x[0] - for x in getmembers(containers_long_running_tasks, isfunction) - if x[0].startswith("task") - ] - - for task_name in tasks_names: - mocker.patch.object( - containers_long_running_tasks, task_name, new=_just_log_task - ) - - yield None - - -@asynccontextmanager -async def auto_remove_task(client: Client, task_id: TaskId) -> AsyncIterator[None]: - """clenup pending tasks""" - try: - yield - finally: - await client.cancel_and_delete_task(task_id, timeout=10) - - -async def _get_container_timestamps( - container_names: list[str], -) -> dict[str, ContainerTimes]: - container_timestamps: dict[str, ContainerTimes] = {} - async with aiodocker.Docker() as client: - for container_name in container_names: - container: DockerContainer = await client.containers.get(container_name) - container_inspect: dict[str, Any] = await container.show() - container_timestamps[container_name] = ContainerTimes( - created=container_inspect["Created"], - started_at=container_inspect["State"]["StartedAt"], - finished_at=container_inspect["State"]["FinishedAt"], - ) - - return container_timestamps - - -@pytest.fixture -def dynamic_sidecar_network_name() -> str: - return _get_dynamic_sidecar_network_name() - - -@pytest.fixture( - params=[ - { - "version": "3", - "services": { - "first-box": { - "image": "busybox:latest", - "networks": { - _get_dynamic_sidecar_network_name(): None, - }, - }, - "second-box": {"image": "busybox:latest"}, - }, - "networks": {_get_dynamic_sidecar_network_name(): None}, - }, - { - "version": "3", - "services": { - "solo-box": {"image": "busybox:latest"}, - }, - }, - ] -) -def compose_spec(request: FixtureRequest) -> str: - spec_dict: dict[str, Any] = request.param # type: ignore - return json.dumps(spec_dict) - - -@pytest.fixture -def backend_url() -> AnyHttpUrl: - return parse_obj_as(AnyHttpUrl, "http://backgroud.testserver.io") - - -@pytest.fixture -async def app(app: FastAPI) -> AsyncIterable[FastAPI]: - # add the client setup to the same application - # this is only required for testing, in reality - # this will be in a different process - client_setup(app) - async with LifespanManager(app): - _print_routes(app) - yield app - - -@pytest.fixture -async def httpx_async_client( - app: FastAPI, - backend_url: AnyHttpUrl, - ensure_external_volumes: tuple[DockerVolume], - cleanup_containers: None, - ensure_shared_store_dir: Path, -) -> AsyncIterable[AsyncClient]: - # crete dir here - async with AsyncClient( - app=app, - base_url=backend_url, - headers={"Content-Type": "application/json"}, - ) as client: - yield client - - -@pytest.fixture -def client( - app: FastAPI, httpx_async_client: AsyncClient, backend_url: AnyHttpUrl -) -> Client: - return Client(app=app, async_client=httpx_async_client, base_url=backend_url) - - -@pytest.fixture -def shared_store(httpx_async_client: AsyncClient) -> SharedStore: - # pylint: disable=protected-access - return httpx_async_client._transport.app.state.shared_store - - -@pytest.fixture -def mock_data_manager(mocker: MockerFixture) -> None: - mocker.patch( - "simcore_service_dynamic_sidecar.modules.long_running_tasks.data_manager.push", - autospec=True, - return_value=None, - ) - mocker.patch( - "simcore_service_dynamic_sidecar.modules.long_running_tasks.data_manager.exists", - autospec=True, - return_value=True, - ) - mocker.patch( - "simcore_service_dynamic_sidecar.modules.long_running_tasks.data_manager.pull", - autospec=True, - return_value=None, - ) - - -@pytest.fixture() -def mock_nodeports(mocker: MockerFixture) -> None: - mocker.patch( - "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", - return_value=None, - ) - mocker.patch( - "simcore_service_dynamic_sidecar.modules.nodeports.download_target_ports", - return_value=42, - ) - - -@pytest.fixture( - params=[ - [], - None, - ["single_port"], - ["first_port", "second_port"], - ] -) -async def mock_port_keys( - request: FixtureRequest, client: Client -) -> Optional[list[str]]: - outputs_context: OutputsContext = client.app.state.outputs_context - if request.param is not None: - await outputs_context.set_file_type_port_keys(request.param) - return request.param - - -@pytest.fixture -def outputs_manager(client: Client) -> OutputsManager: - return client.app.state.outputs_manager - - -@pytest.fixture -def missing_node_uuid(faker: faker.Faker) -> str: - return faker.uuid4() - - -@pytest.fixture -def mock_node_missing(mocker: MockerFixture, missing_node_uuid: str) -> None: - async def _mocked(*args, **kwargs) -> None: - raise NodeNotFound(missing_node_uuid) - - mocker.patch( - "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", - side_effect=_mocked, - ) - - -async def _get_task_id_create_service_containers( - httpx_async_client: AsyncClient, compose_spec: str, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post( - f"/{API_VTAG}/containers", json={"docker_compose_yaml": compose_spec} - ) - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_docker_compose_down( - httpx_async_client: AsyncClient, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post(f"/{API_VTAG}/containers:down") - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_state_restore( - httpx_async_client: AsyncClient, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post(f"/{API_VTAG}/containers/state:restore") - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_state_save( - httpx_async_client: AsyncClient, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post(f"/{API_VTAG}/containers/state:save") - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_task_ports_inputs_pull( - httpx_async_client: AsyncClient, port_keys: Optional[list[str]], *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post( - f"/{API_VTAG}/containers/ports/inputs:pull", json=port_keys - ) - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_task_ports_outputs_pull( - httpx_async_client: AsyncClient, port_keys: Optional[list[str]], *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post( - f"/{API_VTAG}/containers/ports/outputs:pull", json=port_keys - ) - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_task_ports_outputs_push( - httpx_async_client: AsyncClient, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post( - f"/{API_VTAG}/containers/ports/outputs:push" - ) - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _get_task_id_task_containers_restart( - httpx_async_client: AsyncClient, command_timeout: int, *args, **kwargs -) -> TaskId: - response = await httpx_async_client.post( - f"/{API_VTAG}/containers:restart", - params=dict(command_timeout=command_timeout), - ) - task_id: TaskId = response.json() - assert isinstance(task_id, str) - return task_id - - -async def _debug_progress(message: str, percent: float, task_id: TaskId) -> None: - print(f"{task_id} {percent} {message}") - - -async def test_create_containers_task( - httpx_async_client: AsyncClient, - client: Client, - compose_spec: str, - shared_store: SharedStore, -) -> None: - last_progress_message: Optional[tuple[str, float]] = None - - async def create_progress(message: str, percent: float, _: TaskId) -> None: - nonlocal last_progress_message - last_progress_message = (message, percent) - print(message, percent) - - async with periodic_task_result( - client=client, - task_id=await _get_task_id_create_service_containers( - httpx_async_client, compose_spec - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=create_progress, - ) as result: - assert shared_store.container_names == result - - assert last_progress_message == ("finished", 1.0) - - -async def test_create_containers_task_invalid_yaml_spec( - httpx_async_client: AsyncClient, client: Client -): - with pytest.raises(TaskClientResultError) as exec_info: - async with periodic_task_result( - client=client, - task_id=await _get_task_id_create_service_containers( - httpx_async_client, "" - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ): - pass - assert "raise InvalidComposeSpec" in f"{exec_info.value}" - - -@pytest.mark.parametrize( - "get_task_id_callable", - [ - _get_task_id_create_service_containers, - _get_task_id_docker_compose_down, - _get_task_id_state_restore, - _get_task_id_state_save, - _get_task_id_task_ports_inputs_pull, - _get_task_id_task_ports_outputs_pull, - _get_task_id_task_ports_outputs_push, - _get_task_id_task_containers_restart, - ], -) -async def test_same_task_id_is_returned_if_task_exists( - httpx_async_client: AsyncClient, - client: Client, - mocker: MockerFixture, - get_task_id_callable: Callable[..., Awaitable], -) -> None: - def _get_awaitable() -> Awaitable: - return get_task_id_callable( - httpx_async_client=httpx_async_client, - compose_spec="", - port_keys=None, - command_timeout=0, - ) - - with mock_tasks(mocker): - task_id = await _get_awaitable() - async with auto_remove_task(client, task_id): - assert await _get_awaitable() == task_id - - # since the previous task was already removed it is again possible - # to create a task - new_task_id = await _get_awaitable() - assert new_task_id != task_id - async with auto_remove_task(client, task_id): - pass - - -async def test_containers_down_after_starting( - httpx_async_client: AsyncClient, - client: Client, - compose_spec: str, - shared_store: SharedStore, -): - # start containers - async with periodic_task_result( - client=client, - task_id=await _get_task_id_create_service_containers( - httpx_async_client, compose_spec - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert shared_store.container_names == result - - # put down containers - async with periodic_task_result( - client=client, - task_id=await _get_task_id_docker_compose_down(httpx_async_client), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - - -async def test_containers_down_missing_spec( - httpx_async_client: AsyncClient, - client: Client, - caplog_info_debug: LogCaptureFixture, -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_docker_compose_down(httpx_async_client), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - assert "No compose-spec was found" in caplog_info_debug.text - - -async def test_container_restore_state( - httpx_async_client: AsyncClient, client: Client, mock_data_manager: None -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_state_restore(httpx_async_client), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - - -async def test_container_save_state( - httpx_async_client: AsyncClient, client: Client, mock_data_manager: None -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_state_save(httpx_async_client), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - - -async def test_container_pull_input_ports( - httpx_async_client: AsyncClient, - client: Client, - mock_port_keys: Optional[list[str]], - mock_nodeports: None, -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_task_ports_inputs_pull( - httpx_async_client, mock_port_keys - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result == 42 - - -async def test_container_pull_output_ports( - httpx_async_client: AsyncClient, - client: Client, - mock_port_keys: Optional[list[str]], - mock_nodeports: None, -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_task_ports_outputs_pull( - httpx_async_client, mock_port_keys - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result == 42 - - -async def test_container_push_output_ports( - httpx_async_client: AsyncClient, - client: Client, - mock_port_keys: Optional[list[str]], - mock_nodeports: None, -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_task_ports_outputs_push( - httpx_async_client, mock_port_keys - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - - -async def test_container_push_output_ports_missing_node( - httpx_async_client: AsyncClient, - client: Client, - mock_port_keys: Optional[list[str]], - missing_node_uuid: str, - mock_node_missing: None, - outputs_manager: OutputsManager, -): - - for port_key in mock_port_keys if mock_port_keys else []: - await outputs_manager.port_key_content_changed(port_key) - - async def _test_code() -> None: - async with periodic_task_result( - client=client, - task_id=await _get_task_id_task_ports_outputs_push( - httpx_async_client, mock_port_keys - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ): - pass - - if not mock_port_keys: - await _test_code() - else: - with pytest.raises(TaskClientResultError) as exec_info: - await _test_code() - assert f"the node id {missing_node_uuid} was not found" in f"{exec_info.value}" - - -async def test_containers_restart( - httpx_async_client: AsyncClient, - client: Client, - compose_spec: str, - shared_store: SharedStore, -): - async with periodic_task_result( - client=client, - task_id=await _get_task_id_create_service_containers( - httpx_async_client, compose_spec - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as container_names: - assert shared_store.container_names == container_names - - assert container_names - - container_timestamps_before = await _get_container_timestamps(container_names) - - async with periodic_task_result( - client=client, - task_id=await _get_task_id_task_containers_restart( - httpx_async_client, DEFAULT_COMMAND_TIMEOUT - ), - task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, - status_poll_interval=FAST_STATUS_POLL, - progress_callback=_debug_progress, - ) as result: - assert result is None - - container_timestamps_after = await _get_container_timestamps(container_names) - - for container_name in container_names: - before: ContainerTimes = container_timestamps_before[container_name] - after: ContainerTimes = container_timestamps_after[container_name] - - assert before.created == after.created - assert before.started_at < after.started_at - assert before.finished_at < after.finished_at diff --git a/services/dynamic-sidecar/tests/unit/test_api_health.py b/services/dynamic-sidecar/tests/unit/test_api_health.py deleted file mode 100644 index d6b6c801d5f..00000000000 --- a/services/dynamic-sidecar/tests/unit/test_api_health.py +++ /dev/null @@ -1,28 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument - -import pytest -from async_asgi_testclient import TestClient -from fastapi import status -from simcore_service_dynamic_sidecar.models.schemas.application_health import ( - ApplicationHealth, -) - - -@pytest.fixture -def test_client(test_client: TestClient) -> TestClient: - return test_client - - -async def test_is_healthy(test_client: TestClient) -> None: - test_client.application.state.application_health.is_healthy = True - response = await test_client.get("/health") - assert response.status_code == status.HTTP_200_OK, response - assert response.json() == ApplicationHealth(is_healthy=True).dict() - - -async def test_is_unhealthy(test_client: TestClient) -> None: - test_client.application.state.application_health.is_healthy = False - response = await test_client.get("/health") - assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE, response - assert response.json() == {"detail": ApplicationHealth(is_healthy=False).dict()} diff --git a/services/dynamic-sidecar/tests/unit/test_api_rest_containers.py b/services/dynamic-sidecar/tests/unit/test_api_rest_containers.py new file mode 100644 index 00000000000..182743dca57 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_api_rest_containers.py @@ -0,0 +1,776 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import asyncio +import json +from collections.abc import AsyncIterable, AsyncIterator +from inspect import signature +from pathlib import Path +from typing import Any, Final +from unittest.mock import AsyncMock, Mock +from uuid import uuid4 + +import aiodocker +import aiofiles +import pytest +import yaml +from aiodocker.volumes import DockerVolume +from aiofiles.os import mkdir +from async_asgi_testclient import TestClient +from faker import Faker +from fastapi import FastAPI, status +from models_library.api_schemas_dynamic_sidecar.containers import ActivityInfo +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from models_library.services_io import ServiceOutput +from pydantic import TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.docker_constants import SUFFIX_EGRESS_PROXY_NAME +from servicelib.fastapi.long_running_tasks.client import TaskId +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.api.rest.containers import _INACTIVE_FOR_LONG_TIME +from simcore_service_dynamic_sidecar.core.application import AppState +from simcore_service_dynamic_sidecar.core.docker_compose_utils import ( + docker_compose_create, +) +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings +from simcore_service_dynamic_sidecar.core.utils import async_command +from simcore_service_dynamic_sidecar.core.validation import parse_compose_spec +from simcore_service_dynamic_sidecar.models.shared_store import SharedStore +from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext +from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager +from simcore_service_dynamic_sidecar.modules.outputs._watcher import OutputsWatcher +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +WAIT_FOR_OUTPUTS_WATCHER: Final[float] = 0.1 +FAST_POLLING_INTERVAL: Final[float] = 0.1 + + +# UTILS + + +class FailTestError(RuntimeError): + pass + + +_TENACITY_RETRY_PARAMS: dict[str, Any] = { + "reraise": True, + "retry": retry_if_exception_type((FailTestError, AssertionError)), + "stop": stop_after_delay(10), + "wait": wait_fixed(0.01), +} + + +def _create_network_aliases(network_name: str) -> list[str]: + return [f"alias_{i}_{network_name}" for i in range(10)] + + +async def _assert_enable_output_ports(test_client: TestClient) -> None: + response = await test_client.patch( + f"/{API_VTAG}/containers/ports/io", + json={"enable_outputs": True, "enable_inputs": False}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT, response.text + assert response.text == "" + + +async def _assert_disable_output_ports(test_client: TestClient) -> None: + response = await test_client.patch( + f"/{API_VTAG}/containers/ports/io", + json={"enable_outputs": False, "enable_inputs": False}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT, response.text + assert response.text == "" + + +async def _start_containers( + test_client: TestClient, + compose_spec: str, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +) -> list[str]: + # start containers + response = await test_client.post( + f"/{API_VTAG}/containers/compose-spec", + json={"docker_compose_yaml": compose_spec}, + ) + assert response.status_code == status.HTTP_202_ACCEPTED, response.text + assert response.json() is None + + response = await test_client.post( + f"/{API_VTAG}/containers", + json={"metrics_params": mock_metrics_params.model_dump()}, + ) + assert response.status_code == status.HTTP_202_ACCEPTED, response.text + task_id: TaskId = response.json() + + async for attempt in AsyncRetrying( + wait=wait_fixed(FAST_POLLING_INTERVAL), + stop=stop_after_delay(100 * FAST_POLLING_INTERVAL), + reraise=True, + ): + with attempt: + response = await test_client.get(f"/task/{task_id}") + assert response.status_code == status.HTTP_200_OK + task_status = response.json() + if not task_status["done"]: + msg = f"Waiting for task to complete, got: {task_status}" + raise RuntimeError(msg) + + response = await test_client.get(f"/task/{task_id}/result") + assert response.status_code == status.HTTP_200_OK + result_response = response.json() + assert result_response["error"] is None + response_containers = result_response["result"] + + shared_store: SharedStore = test_client.application.state.shared_store + container_names = shared_store.container_names + assert response_containers == container_names + + return container_names + + +async def _docker_ps_a_container_names() -> list[str]: + command = 'docker ps -a --format "{{.Names}}"' + success, stdout, *_ = await async_command(command=command, timeout=None) + + assert success is True, stdout + return stdout.split("\n") + + +async def _assert_compose_spec_pulled(compose_spec: str, settings: ApplicationSettings): + """ensures all containers inside compose_spec are pulled""" + + result = await docker_compose_create(compose_spec, settings) + + assert result.success is True, result.message + + dict_compose_spec = json.loads(compose_spec) + expected_services_count = len(dict_compose_spec["services"]) + + docker_ps_names = await _docker_ps_a_container_names() + started_containers = [ + x + for x in docker_ps_names + if x.startswith(settings.DYNAMIC_SIDECAR_COMPOSE_NAMESPACE) + ] + assert len(started_containers) == expected_services_count + + +@pytest.fixture +def mock_environment( + mock_environment: EnvVarsDict, mock_rabbitmq_envs: EnvVarsDict +) -> EnvVarsDict: + return mock_rabbitmq_envs + + +@pytest.fixture +def app(app: FastAPI) -> FastAPI: + app.state.shared_store = SharedStore() # emulate on_startup event + return app + + +@pytest.fixture +async def test_client( + ensure_shared_store_dir: Path, + ensure_external_volumes: tuple[DockerVolume], + test_client: TestClient, + cleanup_containers: AsyncIterator[None], + ensure_run_in_sequence_context_is_empty: None, +) -> TestClient: + """creates external volumes and provides a client to dy-sidecar service""" + return test_client + + +@pytest.fixture +def dynamic_sidecar_network_name() -> str: + return "entrypoint_container_network" + + +@pytest.fixture +def compose_spec(dynamic_sidecar_network_name: str) -> str: + return json.dumps( + { + "version": "3", + "services": { + "first-box": { + "image": "busybox:latest", + "networks": { + dynamic_sidecar_network_name: None, + }, + "labels": {"io.osparc.test-label": "mark-entrypoint"}, + }, + "second-box": {"image": "busybox:latest"}, + "egress": { + "image": "busybox:latest", + "networks": { + dynamic_sidecar_network_name: None, + }, + }, + }, + "networks": {dynamic_sidecar_network_name: None}, + } + ) + + +@pytest.fixture +def compose_spec_single_service() -> str: + return json.dumps( + { + "version": "3", + "services": { + "solo-box": { + "image": "busybox:latest", + "labels": {"io.osparc.test-label": "mark-entrypoint"}, + }, + }, + } + ) + + +@pytest.fixture(params=["compose_spec", "compose_spec_single_service"]) +def selected_spec(request, compose_spec: str, compose_spec_single_service: str) -> str: + # check that fixture_name is present in this function's parameters + fixture_name = request.param + sig = signature(selected_spec) + assert fixture_name in sig.parameters, ( + f"Provided fixture name {fixture_name} was not found " + f"as a parameter in the signature {sig}" + ) + + # returns the parameter by name from the ones declared in the signature + result: str = locals()[fixture_name] + return result + + +@pytest.fixture +async def started_containers( + test_client: TestClient, + compose_spec: str, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +) -> list[str]: + settings: ApplicationSettings = test_client.application.state.settings + await _assert_compose_spec_pulled(compose_spec, settings) + + return await _start_containers(test_client, compose_spec, mock_metrics_params) + + +@pytest.fixture +def not_started_containers() -> list[str]: + return [f"missing-container-{i}" for i in range(5)] + + +@pytest.fixture +def mock_outputs_labels() -> dict[str, ServiceOutput]: + return { + "output_port_1": TypeAdapter(ServiceOutput).validate_python( + ServiceOutput.model_config["json_schema_extra"]["examples"][3] + ), + "output_port_2": TypeAdapter(ServiceOutput).validate_python( + ServiceOutput.model_config["json_schema_extra"]["examples"][3] + ), + } + + +@pytest.fixture +async def attachable_networks_and_ids(faker: Faker) -> AsyncIterable[dict[str, str]]: + # generate some network names + unique_id = faker.uuid4() + network_names = {f"test_network_{i}_{unique_id}": "" for i in range(10)} + + # create networks + async with aiodocker.Docker() as client: + for network_name in network_names: + network_config = { + "Name": network_name, + "Driver": "overlay", + "Attachable": True, + "Internal": True, + } + network = await client.networks.create(network_config) + network_names[network_name] = network.id + + yield network_names + + # remove networks + async with aiodocker.Docker() as client: + for network_id in network_names.values(): + network = await client.networks.get(network_id) + assert await network.delete() is True + + +@pytest.fixture +def mock_aiodocker_containers_get(mocker: MockerFixture, faker: Faker) -> int: + """raises a DockerError with a random HTTP status which is also returned""" + mock_status_code = faker.random_int(1, 999) + + async def mock_get(*args: str, **kwargs: Any) -> None: + raise aiodocker.exceptions.DockerError( + status=mock_status_code, data={"message": "aiodocker_mocked_error"} + ) + + mocker.patch("aiodocker.containers.DockerContainers.get", side_effect=mock_get) + + return mock_status_code + + +@pytest.fixture +def mock_event_filter_enqueue( + app: FastAPI, monkeypatch: pytest.MonkeyPatch +) -> AsyncMock: + mock = AsyncMock(return_value=None) + outputs_watcher: OutputsWatcher = app.state.outputs_watcher + monkeypatch.setattr(outputs_watcher._event_filter, "enqueue", mock) # noqa: SLF001 + return mock + + +@pytest.fixture +async def mocked_port_key_events_queue_coro_get( + app: FastAPI, mocker: MockerFixture +) -> Mock: + outputs_context: OutputsContext = app.state.outputs_context + + target = getattr(outputs_context.port_key_events_queue, "coro_get") # noqa: B009 + + mock_result_tracker = Mock() + + async def _wrapped_coroutine() -> Any: + # NOTE: coro_get returns a future, naming is unfortunate + # and can cause confusion, normally an async def function + # will return a coroutine not a future object. + future: asyncio.Future = target() + result = await future + mock_result_tracker(result) + + return result + + mocker.patch.object( + outputs_context.port_key_events_queue, + "coro_get", + side_effect=_wrapped_coroutine, + ) + + return mock_result_tracker + + +# TESTS + + +def test_ensure_api_vtag_is_v1(): + assert API_VTAG == "v1" + + +async def test_start_same_space_twice(compose_spec: str, test_client: TestClient): + settings = test_client.application.state.settings + + settings_1 = settings.model_copy( + update={"DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": "test_name_space_1"}, deep=True + ) + await _assert_compose_spec_pulled(compose_spec, settings_1) + + settings_2 = settings.model_copy( + update={"DYNAMIC_SIDECAR_COMPOSE_NAMESPACE": "test_name_space_2"}, deep=True + ) + await _assert_compose_spec_pulled(compose_spec, settings_2) + + +async def test_containers_get( + test_client: TestClient, + started_containers: list[str], + ensure_external_volumes: None, +): + response = await test_client.get(f"/{API_VTAG}/containers") + assert response.status_code == status.HTTP_200_OK, response.text + + decoded_response = response.json() + assert set(decoded_response) == set(started_containers) + for entry in decoded_response.values(): + assert "Status" not in entry + assert "Error" not in entry + + +async def test_containers_get_status( + test_client: TestClient, + started_containers: list[str], + ensure_external_volumes: None, +): + response = await test_client.get( + f"/{API_VTAG}/containers", query_string={"only_status": True} + ) + assert response.status_code == status.HTTP_200_OK, response.text + + decoded_response = response.json() + assert set(decoded_response) == set(started_containers) + + def assert_keys_exist(result: dict[str, Any]) -> bool: + for entry in result.values(): + assert "Status" in entry + assert "Error" in entry + return True + + assert assert_keys_exist(decoded_response) is True + + +async def test_containers_docker_status_docker_error( + test_client: TestClient, + started_containers: list[str], + mock_aiodocker_containers_get: int, +): + response = await test_client.get(f"/{API_VTAG}/containers") + assert response.status_code == mock_aiodocker_containers_get, response.text + + +async def test_container_missing_container( + test_client: TestClient, not_started_containers: list[str] +): + def _expected_error_string(container: str) -> dict[str, str]: + return { + "detail": f"No container '{container}' was started. Started containers '[]'" + } + + for container in not_started_containers: + # inspect container + response = await test_client.get(f"/{API_VTAG}/containers/{container}") + assert response.status_code == status.HTTP_404_NOT_FOUND, response.text + assert response.json() == _expected_error_string(container) + + +async def test_container_docker_error( + test_client: TestClient, + started_containers: list[str], + mock_aiodocker_containers_get: int, +): + def _expected_error_string(status_code: int) -> dict[str, Any]: + return { + "errors": [ + f"An unexpected Docker error occurred status_code={status_code}, message=aiodocker_mocked_error" + ] + } + + for container in started_containers: + # inspect container + response = await test_client.get(f"/{API_VTAG}/containers/{container}") + assert response.status_code == mock_aiodocker_containers_get, response.text + assert response.json() == _expected_error_string(mock_aiodocker_containers_get) + + +async def test_outputs_watcher_disabling( + test_client: TestClient, + mocked_port_key_events_queue_coro_get: Mock, + mock_event_filter_enqueue: AsyncMock, +): + assert isinstance(test_client.application, FastAPI) + outputs_context: OutputsContext = test_client.application.state.outputs_context + outputs_manager: OutputsManager = test_client.application.state.outputs_manager + outputs_manager.task_monitor_interval_s = WAIT_FOR_OUTPUTS_WATCHER / 10 + + async def _create_port_key_events(is_propagation_enabled: bool) -> None: + random_subdir = f"{uuid4()}" + + await outputs_context.set_file_type_port_keys([random_subdir]) + + dir_name = outputs_context.outputs_path / random_subdir + await mkdir(dir_name) + async with aiofiles.open(dir_name / f"file_{uuid4()}", "w") as f: + await f.write("ok") + + EXPECTED_EVENTS_PER_RANDOM_PORT_KEY = 2 + + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + # check events were triggered after generation + events_in_dir: list[str] = [ + c.args[0] + for c in mocked_port_key_events_queue_coro_get.call_args_list + if c.args[0] == random_subdir + ] + + if is_propagation_enabled: + assert len(events_in_dir) >= EXPECTED_EVENTS_PER_RANDOM_PORT_KEY + else: + assert len(events_in_dir) == 0 + + def _assert_events_generated(*, expected_events: int) -> None: + events_set = {x.args[0] for x in mock_event_filter_enqueue.call_args_list} + assert len(events_set) == expected_events + + # by default outputs-watcher it is disabled + _assert_events_generated(expected_events=0) + await _create_port_key_events(is_propagation_enabled=False) + _assert_events_generated(expected_events=0) + + # after enabling new events will be generated + await _assert_enable_output_ports(test_client) + _assert_events_generated(expected_events=0) + await _create_port_key_events(is_propagation_enabled=True) + _assert_events_generated(expected_events=1) + + # disabling again, no longer generate events + await _assert_disable_output_ports(test_client) + _assert_events_generated(expected_events=1) + await _create_port_key_events(is_propagation_enabled=False) + _assert_events_generated(expected_events=1) + + # enabling once more time, events are once again generated + await _assert_enable_output_ports(test_client) + _assert_events_generated(expected_events=1) + for i in range(10): + await _create_port_key_events(is_propagation_enabled=True) + _assert_events_generated(expected_events=2 + i) + + +async def test_container_create_outputs_dirs( + test_client: TestClient, + mock_outputs_labels: dict[str, ServiceOutput], + mock_event_filter_enqueue: AsyncMock, +): + assert isinstance(test_client.application, FastAPI) + mounted_volumes = AppState(test_client.application).mounted_volumes + + # by default outputs-watcher it is disabled + await _assert_enable_output_ports(test_client) + await asyncio.sleep(WAIT_FOR_OUTPUTS_WATCHER) + + assert mock_event_filter_enqueue.call_count == 0 + + json_outputs_labels = { + k: v.model_dump(by_alias=True) for k, v in mock_outputs_labels.items() + } + response = await test_client.post( + f"/{API_VTAG}/containers/ports/outputs/dirs", + json={"outputs_labels": json_outputs_labels}, + ) + assert response.status_code == status.HTTP_204_NO_CONTENT, response.text + assert response.text == "" + + for dir_name in mock_outputs_labels: + assert (mounted_volumes.disk_outputs_path / dir_name).is_dir() + + await asyncio.sleep(WAIT_FOR_OUTPUTS_WATCHER) + EXPECT_EVENTS_WHEN_CREATING_OUTPUT_PORT_KEY_DIRS = 0 + assert ( + mock_event_filter_enqueue.call_count + == EXPECT_EVENTS_WHEN_CREATING_OUTPUT_PORT_KEY_DIRS + ) + + +def _get_entrypoint_container_name(test_client: TestClient) -> str: + parsed_spec = parse_compose_spec( + test_client.application.state.shared_store.compose_spec + ) + container_name = None + for service_name, service_details in parsed_spec["services"].items(): + if service_details.get("labels", None) is not None: + container_name = service_name + break + assert container_name is not None + return container_name + + +@pytest.mark.parametrize("include_exclude_filter_option", [True, False]) +async def test_containers_entrypoint_name_ok( + test_client: TestClient, + dynamic_sidecar_network_name: str, + started_containers: list[str], + include_exclude_filter_option: bool, +): + filters_dict = {"network": dynamic_sidecar_network_name} + if include_exclude_filter_option: + filters_dict["exclude"] = SUFFIX_EGRESS_PROXY_NAME + filters = json.dumps(filters_dict) + + response = await test_client.get(f"/{API_VTAG}/containers/name?filters={filters}") + assert response.status_code == status.HTTP_200_OK, response.text + container_name = response.json() + assert container_name == _get_entrypoint_container_name(test_client) + assert SUFFIX_EGRESS_PROXY_NAME not in container_name + + +@pytest.mark.parametrize("include_exclude_filter_option", [True, False]) +async def test_containers_entrypoint_name_containers_not_started( + test_client: TestClient, + dynamic_sidecar_network_name: str, + started_containers: list[str], + include_exclude_filter_option: bool, +): + entrypoint_container = _get_entrypoint_container_name(test_client) + + # remove the container from the spec + parsed_spec = parse_compose_spec( + test_client.application.state.shared_store.compose_spec + ) + del parsed_spec["services"][entrypoint_container] + test_client.application.state.shared_store.compose_spec = yaml.safe_dump( + parsed_spec + ) + + filters_dict = {"network": dynamic_sidecar_network_name} + if include_exclude_filter_option: + filters_dict["exclude"] = SUFFIX_EGRESS_PROXY_NAME + filters = json.dumps(filters_dict) + response = await test_client.get(f"/{API_VTAG}/containers/name?filters={filters}") + + if include_exclude_filter_option: + assert response.status_code == status.HTTP_404_NOT_FOUND, response.text + assert response.json() == { + "detail": "No container found for network=entrypoint_container_network" + } + else: + assert response.status_code == status.HTTP_200_OK, response.text + found_container = response.json() + assert found_container in started_containers + assert SUFFIX_EGRESS_PROXY_NAME in found_container + + +async def test_attach_detach_container_to_network( + docker_swarm: None, + test_client: TestClient, + selected_spec: str, + attachable_networks_and_ids: dict[str, str], + mock_metrics_params: CreateServiceMetricsAdditionalParams, +): + container_names = await _start_containers( + test_client, selected_spec, mock_metrics_params + ) + + async with aiodocker.Docker() as docker: + for container_name in container_names: + for network_name, network_id in attachable_networks_and_ids.items(): + network_aliases = _create_network_aliases(network_name) + + # attach network to containers + for _ in range(2): # calling 2 times in a row + response = await test_client.post( + f"/{API_VTAG}/containers/{container_name}/networks:attach", + json={ + "network_id": network_id, + "network_aliases": network_aliases, + }, + ) + assert ( + response.status_code == status.HTTP_204_NO_CONTENT + ), response.text + + container = await docker.containers.get(container_name) + container_inspect = await container.show() + networks = container_inspect["NetworkSettings"]["Networks"] + assert network_id in networks + assert set(network_aliases).issubset( + set(networks[network_id]["Aliases"]) + ) + + # detach network from containers + for _ in range(2): # running twice in a row + response = await test_client.post( + f"/{API_VTAG}/containers/{container_name}/networks:detach", + json={"network_id": network_id}, + ) + assert ( + response.status_code == status.HTTP_204_NO_CONTENT + ), response.text + + container = await docker.containers.get(container_name) + container_inspect = await container.show() + networks = container_inspect["NetworkSettings"]["Networks"] + assert network_id not in networks + + +@pytest.fixture +def define_inactivity_command( + mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None: + setenvs_from_dict( + monkeypatch, + { + "DY_SIDECAR_CALLBACKS_MAPPING": json.dumps( + { + "inactivity": { + "service": "mock_container_name", + "command": "", + "timeout": 4, + } + } + ) + }, + ) + + +@pytest.fixture +def mock_shared_store(app: FastAPI) -> None: + shared_store: SharedStore = app.state.shared_store + shared_store.original_to_container_names["mock_container_name"] = ( + "mock_container_name" + ) + + +async def test_containers_activity_command_failed( + define_inactivity_command: None, test_client: TestClient, mock_shared_store: None +): + response = await test_client.get(f"/{API_VTAG}/containers/activity") + assert response.status_code == 200, response.text + assert ( + response.json() + == ActivityInfo(seconds_inactive=_INACTIVE_FOR_LONG_TIME).model_dump() + ) + + +async def test_containers_activity_no_inactivity_defined( + test_client: TestClient, mock_shared_store: None +): + response = await test_client.get(f"/{API_VTAG}/containers/activity") + assert response.status_code == 200, response.text + assert response.json() is None + + +@pytest.fixture +def activity_response() -> ActivityInfo: + return ActivityInfo(seconds_inactive=10) + + +@pytest.fixture +def mock_inactive_since_command_response( + mocker: MockerFixture, + activity_response: ActivityInfo, +) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.api.rest.containers.run_command_in_container", + return_value=activity_response.model_dump_json(), + ) + + +async def test_containers_activity_inactive_since( + define_inactivity_command: None, + mock_inactive_since_command_response: None, + test_client: TestClient, + mock_shared_store: None, + activity_response: ActivityInfo, +): + response = await test_client.get(f"/{API_VTAG}/containers/activity") + assert response.status_code == 200, response.text + assert response.json() == activity_response.model_dump() + + +@pytest.fixture +def mock_inactive_response_wrong_format(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.api.rest.containers.run_command_in_container", + return_value="This is an unparsable json response {}", + ) + + +async def test_containers_activity_unexpected_response( + define_inactivity_command: None, + mock_inactive_response_wrong_format: None, + test_client: TestClient, + mock_shared_store: None, +): + response = await test_client.get(f"/{API_VTAG}/containers/activity") + assert response.status_code == 200, response.text + assert ( + response.json() + == ActivityInfo(seconds_inactive=_INACTIVE_FOR_LONG_TIME).model_dump() + ) diff --git a/services/dynamic-sidecar/tests/unit/test_api_rest_containers_long_running_tasks.py b/services/dynamic-sidecar/tests/unit/test_api_rest_containers_long_running_tasks.py new file mode 100644 index 00000000000..f65a624459d --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_api_rest_containers_long_running_tasks.py @@ -0,0 +1,732 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=no-member + +import json +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Iterator +from contextlib import asynccontextmanager, contextmanager +from inspect import getmembers, isfunction +from pathlib import Path +from typing import Any, Final, NamedTuple +from unittest.mock import AsyncMock + +import aiodocker +import faker +import pytest +from aiodocker.containers import DockerContainer +from aiodocker.volumes import DockerVolume +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from fastapi.routing import APIRoute +from httpx import ASGITransport, AsyncClient +from models_library.api_schemas_dynamic_sidecar.containers import DockerComposeYamlStr +from models_library.api_schemas_long_running_tasks.base import ( + ProgressMessage, + ProgressPercent, +) +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from pydantic import AnyHttpUrl, TypeAdapter +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from servicelib.fastapi.long_running_tasks.client import ( + Client, + TaskClientResultError, + TaskId, + periodic_task_result, +) +from servicelib.fastapi.long_running_tasks.client import setup as client_setup +from simcore_sdk.node_ports_common.exceptions import NodeNotFound +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.api.rest import containers_long_running_tasks +from simcore_service_dynamic_sidecar.core.validation import InvalidComposeSpecError +from simcore_service_dynamic_sidecar.models.schemas.containers import ( + ContainersComposeSpec, + ContainersCreate, +) +from simcore_service_dynamic_sidecar.models.shared_store import SharedStore +from simcore_service_dynamic_sidecar.modules.inputs import enable_inputs_pulling +from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext +from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager + +FAST_STATUS_POLL: Final[float] = 0.1 +CREATE_SERVICE_CONTAINERS_TIMEOUT: Final[float] = 60 +DEFAULT_COMMAND_TIMEOUT: Final[int] = 5 + + +class ContainerTimes(NamedTuple): + created: Any + started_at: Any + finished_at: Any + + +# UTILS + + +def _print_routes(app: FastAPI) -> None: + endpoints = [route.path for route in app.routes if isinstance(route, APIRoute)] + print("ROUTES\n", json.dumps(endpoints, indent=2)) + + +def _get_dynamic_sidecar_network_name() -> str: + return "entrypoint_container_network" + + +@contextmanager +def mock_tasks(mocker: MockerFixture) -> Iterator[None]: + async def _just_log_task(*args, **kwargs) -> None: + print(f"Called mocked function with {args}, {kwargs}") + + # searching by name since all start with _task + tasks_names = [ + x[0] + for x in getmembers(containers_long_running_tasks, isfunction) + if x[0].startswith("task") + ] + + for task_name in tasks_names: + mocker.patch.object( + containers_long_running_tasks, task_name, new=_just_log_task + ) + + yield None + + +@asynccontextmanager +async def auto_remove_task(client: Client, task_id: TaskId) -> AsyncIterator[None]: + """clenup pending tasks""" + try: + yield + finally: + await client.cancel_and_delete_task(task_id, timeout=10) + + +async def _get_container_timestamps( + container_names: list[str], +) -> dict[str, ContainerTimes]: + container_timestamps: dict[str, ContainerTimes] = {} + async with aiodocker.Docker() as client: + for container_name in container_names: + container: DockerContainer = await client.containers.get(container_name) + container_inspect: dict[str, Any] = await container.show() + container_timestamps[container_name] = ContainerTimes( + created=container_inspect["Created"], + started_at=container_inspect["State"]["StartedAt"], + finished_at=container_inspect["State"]["FinishedAt"], + ) + + return container_timestamps + + +@pytest.fixture +def dynamic_sidecar_network_name() -> str: + return _get_dynamic_sidecar_network_name() + + +@pytest.fixture( + params=[ + { + "version": "3", + "services": { + "first-box": { + "image": "alpine:latest", + "networks": { + _get_dynamic_sidecar_network_name(): None, + }, + }, + "second-box": { + "image": "alpine:latest", + "command": ["sh", "-c", "sleep 100000"], + }, + }, + "networks": {_get_dynamic_sidecar_network_name(): None}, + }, + { + "version": "3", + "services": { + "solo-box": { + "image": "alpine:latest", + "command": ["sh", "-c", "sleep 100000"], + }, + }, + }, + ] +) +def compose_spec(request: pytest.FixtureRequest) -> DockerComposeYamlStr: + spec_dict: dict[str, Any] = request.param # type: ignore + return json.dumps(spec_dict) + + +@pytest.fixture +def backend_url() -> AnyHttpUrl: + return TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io") + + +@pytest.fixture +def mock_environment(mock_rabbitmq_envs: EnvVarsDict) -> EnvVarsDict: + return mock_rabbitmq_envs + + +@pytest.fixture +async def app(app: FastAPI) -> AsyncIterable[FastAPI]: + # add the client setup to the same application + # this is only required for testing, in reality + # this will be in a different process + client_setup(app) + async with LifespanManager(app): + _print_routes(app) + yield app + + +@pytest.fixture +async def httpx_async_client( + app: FastAPI, + backend_url: AnyHttpUrl, + ensure_external_volumes: tuple[DockerVolume], + cleanup_containers: None, + ensure_shared_store_dir: Path, +) -> AsyncIterable[AsyncClient]: + # crete dir here + async with AsyncClient( + transport=ASGITransport(app=app), + base_url=f"{backend_url}", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def client( + app: FastAPI, httpx_async_client: AsyncClient, backend_url: AnyHttpUrl +) -> Client: + return Client(app=app, async_client=httpx_async_client, base_url=f"{backend_url}") + + +@pytest.fixture +def shared_store(httpx_async_client: AsyncClient) -> SharedStore: + # pylint: disable=protected-access + return httpx_async_client._transport.app.state.shared_store # noqa: SLF001 + + +@pytest.fixture +def mock_data_manager(mocker: MockerFixture) -> None: + for function_name in ( + "_push_directory", + "_state_metadata_entry_exists", + "_pull_directory", + "_pull_legacy_archive", + ): + mocker.patch( + f"simcore_service_dynamic_sidecar.modules.long_running_tasks.data_manager.{function_name}", + autospec=True, + return_value=None, + ) + + +@pytest.fixture() +def mock_nodeports(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", + return_value=None, + ) + mocker.patch( + "simcore_service_dynamic_sidecar.modules.nodeports.download_target_ports", + return_value=42, + ) + + +@pytest.fixture( + params=[ + [], + None, + ["single_port"], + ["first_port", "second_port"], + ] +) +async def mock_port_keys( + request: pytest.FixtureRequest, client: Client +) -> list[str] | None: + outputs_context: OutputsContext = client.app.state.outputs_context + if request.param is not None: + await outputs_context.set_file_type_port_keys(request.param) + return request.param + + +@pytest.fixture +def outputs_manager(client: Client) -> OutputsManager: + return client.app.state.outputs_manager + + +@pytest.fixture +def missing_node_uuid(faker: faker.Faker) -> str: + return faker.uuid4() + + +@pytest.fixture +def mock_node_missing(mocker: MockerFixture, missing_node_uuid: str) -> None: + async def _mocked(*args, **kwargs) -> None: + raise NodeNotFound(missing_node_uuid) + + mocker.patch( + "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", + side_effect=_mocked, + ) + + +async def _get_task_id_pull_user_servcices_docker_images( + httpx_async_client: AsyncClient, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post(f"/{API_VTAG}/containers/images:pull") + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_create_service_containers( + httpx_async_client: AsyncClient, + compose_spec: DockerComposeYamlStr, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + *args, + **kwargs, +) -> TaskId: + containers_compose_spec = ContainersComposeSpec( + docker_compose_yaml=compose_spec, + ) + await httpx_async_client.post( + f"/{API_VTAG}/containers/compose-spec", + json=containers_compose_spec.model_dump(), + ) + containers_create = ContainersCreate(metrics_params=mock_metrics_params) + response = await httpx_async_client.post( + f"/{API_VTAG}/containers", json=containers_create.model_dump() + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_docker_compose_down( + httpx_async_client: AsyncClient, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post(f"/{API_VTAG}/containers:down") + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_state_restore( + httpx_async_client: AsyncClient, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post(f"/{API_VTAG}/containers/state:restore") + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_state_save( + httpx_async_client: AsyncClient, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post(f"/{API_VTAG}/containers/state:save") + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_task_ports_inputs_pull( + httpx_async_client: AsyncClient, port_keys: list[str] | None, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post( + f"/{API_VTAG}/containers/ports/inputs:pull", json=port_keys + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_task_ports_outputs_pull( + httpx_async_client: AsyncClient, port_keys: list[str] | None, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post( + f"/{API_VTAG}/containers/ports/outputs:pull", json=port_keys + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_task_ports_outputs_push( + httpx_async_client: AsyncClient, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post( + f"/{API_VTAG}/containers/ports/outputs:push" + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_task_containers_restart( + httpx_async_client: AsyncClient, command_timeout: int, *args, **kwargs +) -> TaskId: + response = await httpx_async_client.post( + f"/{API_VTAG}/containers:restart", + params={"command_timeout": command_timeout}, + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _debug_progress( + message: ProgressMessage, percent: ProgressPercent | None, task_id: TaskId +) -> None: + print(f"{task_id} {percent} {message}") + + +async def test_create_containers_task( + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + shared_store: SharedStore, +) -> None: + last_progress_message: tuple[str, float] | None = None + + async def create_progress(message: str, percent: float, _: TaskId) -> None: + nonlocal last_progress_message + last_progress_message = (message, percent) + print(message, percent) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=create_progress, + ) as result: + assert shared_store.container_names == result + + assert last_progress_message == ("finished", 1.0) + + +async def test_pull_user_servcices_docker_images( + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + shared_store: SharedStore, +) -> None: + last_progress_message: tuple[ProgressMessage, ProgressPercent] | None = None + + async def create_progress( + message: ProgressMessage, percent: ProgressPercent | None, _: TaskId + ) -> None: + nonlocal last_progress_message + assert percent is not None + last_progress_message = (message, percent) + print(message, percent) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=create_progress, + ) as result: + assert shared_store.container_names == result + + assert last_progress_message == ("finished", 1.0) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_pull_user_servcices_docker_images( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result is None + assert last_progress_message == ("finished", 1.0) + + +async def test_create_containers_task_invalid_yaml_spec( + httpx_async_client: AsyncClient, + client: Client, + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +): + with pytest.raises(InvalidComposeSpecError) as exec_info: + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, "", mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ): + pass + assert "Provided yaml is not valid" in f"{exec_info.value}" + + +@pytest.mark.parametrize( + "get_task_id_callable", + [ + _get_task_id_pull_user_servcices_docker_images, + _get_task_id_create_service_containers, + _get_task_id_docker_compose_down, + _get_task_id_state_restore, + _get_task_id_state_save, + _get_task_id_task_ports_inputs_pull, + _get_task_id_task_ports_outputs_pull, + _get_task_id_task_ports_outputs_push, + _get_task_id_task_containers_restart, + ], +) +async def test_same_task_id_is_returned_if_task_exists( + httpx_async_client: AsyncClient, + client: Client, + mocker: MockerFixture, + get_task_id_callable: Callable[..., Awaitable], + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + compose_spec: str, +) -> None: + def _get_awaitable() -> Awaitable: + return get_task_id_callable( + httpx_async_client=httpx_async_client, + compose_spec=compose_spec, + mock_metrics_params=mock_metrics_params, + port_keys=None, + command_timeout=0, + ) + + with mock_tasks(mocker): + task_id = await _get_awaitable() + async with auto_remove_task(client, task_id): + assert await _get_awaitable() == task_id + + # since the previous task was already removed it is again possible + # to create a task + new_task_id = await _get_awaitable() + assert new_task_id != task_id + async with auto_remove_task(client, task_id): + pass + + +async def test_containers_down_after_starting( + mock_ensure_read_permissions_on_user_service_data: None, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + shared_store: SharedStore, + mock_core_rabbitmq: dict[str, AsyncMock], + mocker: MockerFixture, +): + # start containers + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert shared_store.container_names == result + + # put down containers + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result is None + + +async def test_containers_down_missing_spec( + httpx_async_client: AsyncClient, + client: Client, + caplog_info_debug: pytest.LogCaptureFixture, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result is None + assert "No compose-spec was found" in caplog_info_debug.text + + +async def test_container_restore_state( + httpx_async_client: AsyncClient, client: Client, mock_data_manager: None +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_state_restore(httpx_async_client), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert isinstance(result, int) + + +async def test_container_save_state( + httpx_async_client: AsyncClient, client: Client, mock_data_manager: None +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_state_save(httpx_async_client), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert isinstance(result, int) + + +@pytest.mark.parametrize("inputs_pulling_enabled", [True, False]) +async def test_container_pull_input_ports( + httpx_async_client: AsyncClient, + client: Client, + inputs_pulling_enabled: bool, + app: FastAPI, + mock_port_keys: list[str] | None, + mock_nodeports: None, +): + if inputs_pulling_enabled: + enable_inputs_pulling(app) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_task_ports_inputs_pull( + httpx_async_client, mock_port_keys + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result == (42 if inputs_pulling_enabled else 0) + + +async def test_container_pull_output_ports( + httpx_async_client: AsyncClient, + client: Client, + mock_port_keys: list[str] | None, + mock_nodeports: None, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_task_ports_outputs_pull( + httpx_async_client, mock_port_keys + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result == 42 + + +async def test_container_push_output_ports( + httpx_async_client: AsyncClient, + client: Client, + mock_port_keys: list[str] | None, + mock_nodeports: None, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_task_ports_outputs_push( + httpx_async_client, mock_port_keys + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result is None + + +async def test_container_push_output_ports_missing_node( + httpx_async_client: AsyncClient, + client: Client, + mock_port_keys: list[str] | None, + missing_node_uuid: str, + mock_node_missing: None, + outputs_manager: OutputsManager, +): + for port_key in mock_port_keys if mock_port_keys else []: + await outputs_manager.port_key_content_changed(port_key) + + async def _test_code() -> None: + async with periodic_task_result( + client=client, + task_id=await _get_task_id_task_ports_outputs_push( + httpx_async_client, mock_port_keys + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ): + pass + + if not mock_port_keys: + await _test_code() + else: + with pytest.raises(TaskClientResultError) as exec_info: + await _test_code() + assert f"the node id {missing_node_uuid} was not found" in f"{exec_info.value}" + + +async def test_containers_restart( + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_stop_heart_beat_task: AsyncMock, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + shared_store: SharedStore, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as container_names: + assert shared_store.container_names == container_names + + assert container_names + + container_timestamps_before = await _get_container_timestamps(container_names) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_task_containers_restart( + httpx_async_client, DEFAULT_COMMAND_TIMEOUT + ), + task_timeout=CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=FAST_STATUS_POLL, + progress_callback=_debug_progress, + ) as result: + assert result is None + + container_timestamps_after = await _get_container_timestamps(container_names) + + for container_name in container_names: + before: ContainerTimes = container_timestamps_before[container_name] + after: ContainerTimes = container_timestamps_after[container_name] + + assert before.created == after.created + assert before.started_at < after.started_at + assert before.finished_at < after.finished_at diff --git a/services/dynamic-sidecar/tests/unit/test_api_rest_health.py b/services/dynamic-sidecar/tests/unit/test_api_rest_health.py new file mode 100644 index 00000000000..987ddbf1e63 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_api_rest_health.py @@ -0,0 +1,32 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from async_asgi_testclient import TestClient +from fastapi import status +from simcore_service_dynamic_sidecar.models.schemas.application_health import ( + ApplicationHealth, +) + + +async def test_is_healthy(test_client: TestClient) -> None: + test_client.application.state.application_health.is_healthy = True + response = await test_client.get("/health") + assert response.status_code == status.HTTP_200_OK, response + assert response.json() == ApplicationHealth(is_healthy=True).model_dump() + + +async def test_is_unhealthy(test_client: TestClient) -> None: + test_client.application.state.application_health.is_healthy = False + response = await test_client.get("/health") + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE, response + assert response.json() == { + "detail": ApplicationHealth(is_healthy=False).model_dump() + } + + +async def test_is_unhealthy_via_rabbitmq(test_client: TestClient) -> None: + # pylint: disable=protected-access + test_client.application.state.rabbitmq_client._healthy_state = False # noqa: SLF001 + response = await test_client.get("/health") + assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE, response + assert response.json() == {"detail": "RabbitMQ cannot be reached!"} diff --git a/services/dynamic-sidecar/tests/unit/test_api_rest_prometheus_metrics.py b/services/dynamic-sidecar/tests/unit/test_api_rest_prometheus_metrics.py new file mode 100644 index 00000000000..7d4454b7e0a --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_api_rest_prometheus_metrics.py @@ -0,0 +1,172 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import json +from collections.abc import AsyncIterable +from typing import Final +from unittest.mock import AsyncMock + +import pytest +from aiodocker.volumes import DockerVolume +from asgi_lifespan import LifespanManager +from fastapi import FastAPI, status +from httpx import ASGITransport, AsyncClient +from models_library.api_schemas_dynamic_sidecar.containers import DockerComposeYamlStr +from models_library.callbacks_mapping import CallbacksMapping +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from pydantic import AnyHttpUrl, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.fastapi.long_running_tasks.client import ( + Client, + TaskId, + periodic_task_result, +) +from servicelib.fastapi.long_running_tasks.client import setup as client_setup +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.models.schemas.containers import ( + ContainersComposeSpec, + ContainersCreate, +) +from simcore_service_dynamic_sidecar.modules.prometheus_metrics import ( + _USER_SERVICES_NOT_STARTED, + UserServicesMetrics, +) + +_FAST_STATUS_POLL: Final[float] = 0.1 +_CREATE_SERVICE_CONTAINERS_TIMEOUT: Final[float] = 60 + + +@pytest.fixture +async def enable_prometheus_metrics( + monkeypatch: pytest.MonkeyPatch, mock_environment: EnvVarsDict +) -> None: + setenvs_from_dict( + monkeypatch, + { + "DY_SIDECAR_CALLBACKS_MAPPING": json.dumps( + CallbacksMapping.model_config["json_schema_extra"]["examples"][2] + ) + }, + ) + + +@pytest.fixture +async def app(mock_rabbitmq_envs: EnvVarsDict, app: FastAPI) -> AsyncIterable[FastAPI]: + client_setup(app) + async with LifespanManager(app): + yield app + + +@pytest.fixture +def backend_url() -> AnyHttpUrl: + return TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io") + + +@pytest.fixture +async def httpx_async_client( + app: FastAPI, + backend_url: AnyHttpUrl, + ensure_external_volumes: tuple[DockerVolume], + cleanup_containers: None, +) -> AsyncIterable[AsyncClient]: + async with AsyncClient( + transport=ASGITransport(app=app), + base_url=f"{backend_url}", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def client( + app: FastAPI, httpx_async_client: AsyncClient, backend_url: AnyHttpUrl +) -> Client: + return Client(app=app, async_client=httpx_async_client, base_url=f"{backend_url}") + + +@pytest.fixture +def compose_spec() -> DockerComposeYamlStr: + return json.dumps( + { + "version": "3", + "services": { + "rt-web": { + "image": "alpine:latest", + "command": ["sh", "-c", "sleep 100000"], + } + }, + } + ) + + +async def _get_task_id_create_service_containers( + httpx_async_client: AsyncClient, + compose_spec: DockerComposeYamlStr, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +) -> TaskId: + ctontainers_compose_spec = ContainersComposeSpec( + docker_compose_yaml=compose_spec, + ) + await httpx_async_client.post( + f"/{API_VTAG}/containers/compose-spec", + json=ctontainers_compose_spec.model_dump(), + ) + containers_create = ContainersCreate(metrics_params=mock_metrics_params) + response = await httpx_async_client.post( + f"/{API_VTAG}/containers", json=containers_create.model_dump() + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def test_metrics_disabled( + mock_core_rabbitmq: dict[str, AsyncMock], httpx_async_client: AsyncClient +) -> None: + response = await httpx_async_client.get("/metrics") + assert response.status_code == status.HTTP_404_NOT_FOUND, response + + +async def test_metrics_enabled_no_containers_running( + enable_prometheus_metrics: None, + mock_core_rabbitmq: dict[str, AsyncMock], + httpx_async_client: AsyncClient, +) -> None: + response = await httpx_async_client.get("/metrics") + assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR, response + assert _USER_SERVICES_NOT_STARTED in response.text + + +async def test_metrics_enabled_containers_will_start( + enable_prometheus_metrics: None, + mock_core_rabbitmq: dict[str, AsyncMock], + app: FastAPI, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +): + # no containers started + response = await httpx_async_client.get("/metrics") + assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR, response + assert _USER_SERVICES_NOT_STARTED in response.text + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert isinstance(result, list) + assert len(result) == 1 + + # check after containers started + # update manually + user_service_metrics: UserServicesMetrics = app.state.user_service_metrics + await user_service_metrics._update_metrics() # noqa: SLF001 + + response = await httpx_async_client.get("/metrics") + assert response.status_code == status.HTTP_200_OK, response diff --git a/services/dynamic-sidecar/tests/unit/test_api_rest_workflow_service_metrics.py b/services/dynamic-sidecar/tests/unit/test_api_rest_workflow_service_metrics.py new file mode 100644 index 00000000000..9c760ad06b4 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_api_rest_workflow_service_metrics.py @@ -0,0 +1,461 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=no-member + + +import asyncio +import json +from collections.abc import AsyncIterable, Callable +from pathlib import Path +from typing import Any, Final +from unittest.mock import AsyncMock + +import aiodocker +import pytest +from aiodocker.containers import DockerContainer +from aiodocker.utils import clean_filters +from aiodocker.volumes import DockerVolume +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from httpx import ASGITransport, AsyncClient +from models_library.api_schemas_dynamic_sidecar.containers import DockerComposeYamlStr +from models_library.generated_models.docker_rest_api import ContainerState +from models_library.generated_models.docker_rest_api import Status2 as ContainerStatus +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingMessages, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.services_creation import CreateServiceMetricsAdditionalParams +from pydantic import AnyHttpUrl, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.fastapi.long_running_tasks.client import ( + Client, + TaskClientResultError, + TaskId, + periodic_task_result, +) +from servicelib.fastapi.long_running_tasks.client import setup as client_setup +from simcore_service_dynamic_sidecar._meta import API_VTAG +from simcore_service_dynamic_sidecar.core.docker_utils import get_container_states +from simcore_service_dynamic_sidecar.models.schemas.containers import ( + ContainersComposeSpec, + ContainersCreate, +) +from simcore_service_dynamic_sidecar.models.shared_store import SharedStore +from tenacity import AsyncRetrying, TryAgain +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +_FAST_STATUS_POLL: Final[float] = 0.1 +_CREATE_SERVICE_CONTAINERS_TIMEOUT: Final[float] = 60 +_BASE_HEART_BEAT_INTERVAL: Final[float] = 0.1 + + +@pytest.fixture(params=[1, 2]) +def container_names(request: pytest.FixtureRequest) -> list[str]: + return [f"service-{i}" for i in range(request.param)] + + +@pytest.fixture +def raw_compose_spec(container_names: list[str]) -> dict[str, Any]: + base_spec: dict[str, Any] = {"version": "3", "services": {}} + + for container_name in container_names: + base_spec["services"][container_name] = { + "image": "alpine:latest", + "command": ["sh", "-c", "sleep 100000"], + } + + return base_spec + + +@pytest.fixture +def compose_spec(raw_compose_spec: dict[str, Any]) -> DockerComposeYamlStr: + return json.dumps(raw_compose_spec) + + +@pytest.fixture +def backend_url() -> AnyHttpUrl: + return TypeAdapter(AnyHttpUrl).validate_python("http://backgroud.testserver.io") + + +@pytest.fixture +def mock_environment( + monkeypatch: pytest.MonkeyPatch, mock_rabbitmq_envs: EnvVarsDict +) -> EnvVarsDict: + setenvs_from_dict( + monkeypatch, + {"RESOURCE_TRACKING_HEARTBEAT_INTERVAL": f"{_BASE_HEART_BEAT_INTERVAL}"}, + ) + return mock_rabbitmq_envs + + +@pytest.fixture +async def app(app: FastAPI) -> AsyncIterable[FastAPI]: + # add the client setup to the same application + # this is only required for testing, in reality + # this will be in a different process + client_setup(app) + async with LifespanManager(app): + yield app + + +@pytest.fixture +async def httpx_async_client( + mock_ensure_read_permissions_on_user_service_data: None, + app: FastAPI, + backend_url: AnyHttpUrl, + ensure_external_volumes: tuple[DockerVolume], + cleanup_containers: None, + ensure_shared_store_dir: Path, +) -> AsyncIterable[AsyncClient]: + # crete dir here + async with AsyncClient( + transport=ASGITransport(app=app), + base_url=f"{backend_url}", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def client( + app: FastAPI, httpx_async_client: AsyncClient, backend_url: AnyHttpUrl +) -> Client: + return Client(app=app, async_client=httpx_async_client, base_url=f"{backend_url}") + + +@pytest.fixture +def mock_user_services_fail_to_start(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.long_running_tasks._retry_docker_compose_create", + side_effect=RuntimeError(""), + ) + + +@pytest.fixture +def mock_user_services_fail_to_stop(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.long_running_tasks._retry_docker_compose_down", + side_effect=RuntimeError(""), + ) + + +async def _get_task_id_create_service_containers( + httpx_async_client: AsyncClient, + compose_spec: DockerComposeYamlStr, + mock_metrics_params: CreateServiceMetricsAdditionalParams, +) -> TaskId: + containers_compose_spec = ContainersComposeSpec( + docker_compose_yaml=compose_spec, + ) + await httpx_async_client.post( + f"/{API_VTAG}/containers/compose-spec", + json=containers_compose_spec.model_dump(), + ) + containers_create = ContainersCreate(metrics_params=mock_metrics_params) + response = await httpx_async_client.post( + f"/{API_VTAG}/containers", json=containers_create.model_dump() + ) + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +async def _get_task_id_docker_compose_down(httpx_async_client: AsyncClient) -> TaskId: + response = await httpx_async_client.post(f"/{API_VTAG}/containers:down") + task_id: TaskId = response.json() + assert isinstance(task_id, str) + return task_id + + +def _get_resource_tracking_messages( + mock_core_rabbitmq: dict[str, AsyncMock], +) -> list[RabbitResourceTrackingMessages]: + return [ + x[0][1] + for x in mock_core_rabbitmq["post_rabbit_message"].call_args_list + if isinstance(x[0][1], RabbitResourceTrackingMessages) + ] + + +async def _wait_for_containers_to_be_running(app: FastAPI) -> None: + shared_store: SharedStore = app.state.shared_store + async for attempt in AsyncRetrying(wait=wait_fixed(0.1), stop=stop_after_delay(4)): + with attempt: + containers_statuses = await get_container_states( + shared_store.container_names + ) + + running_container_statuses = [ + x + for x in containers_statuses.values() + if x is not None and x.status == ContainerStatus.running + ] + + if len(running_container_statuses) != len(shared_store.container_names): + raise TryAgain + + +async def test_service_starts_and_closes_as_expected( + mock_core_rabbitmq: dict[str, AsyncMock], + app: FastAPI, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + container_names: list[str], + mock_metrics_params: CreateServiceMetricsAdditionalParams, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert isinstance(result, list) + assert len(result) == len(container_names) + + await _wait_for_containers_to_be_running(app) + + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert result is None + + # NOTE: task was not properly cancelled and events were still + # generated. This is here to catch regressions. + await asyncio.sleep(_BASE_HEART_BEAT_INTERVAL * 10) + + # Ensure messages arrive in the expected order + resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + assert len(resource_tracking_messages) >= 3 + + start_message = resource_tracking_messages[0] + heart_beat_messages = resource_tracking_messages[1:-1] + assert len(heart_beat_messages) > 0 + stop_message = resource_tracking_messages[-1] + + assert isinstance(start_message, RabbitResourceTrackingStartedMessage) + for heart_beat_message in heart_beat_messages: + assert isinstance(heart_beat_message, RabbitResourceTrackingHeartbeatMessage) + assert isinstance(stop_message, RabbitResourceTrackingStoppedMessage) + assert stop_message.simcore_platform_status == SimcorePlatformStatus.OK + + +@pytest.mark.parametrize("with_compose_down", [True, False]) +async def test_user_services_fail_to_start( + mock_core_rabbitmq: dict[str, AsyncMock], + app: FastAPI, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + mock_metrics_params: CreateServiceMetricsAdditionalParams, + with_compose_down: bool, + mock_user_services_fail_to_start: None, +): + with pytest.raises(TaskClientResultError): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ): + ... + shared_store: SharedStore = app.state.shared_store + assert len(shared_store.container_names) == 0 + + if with_compose_down: + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert result is None + + # no messages were sent + resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + assert len(resource_tracking_messages) == 0 + + +async def test_user_services_fail_to_stop_or_save_data( + mock_core_rabbitmq: dict[str, AsyncMock], + app: FastAPI, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + container_names: list[str], + mock_metrics_params: CreateServiceMetricsAdditionalParams, + mock_user_services_fail_to_stop: None, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert isinstance(result, list) + assert len(result) == len(container_names) + + await _wait_for_containers_to_be_running(app) + + # let a few heartbeats pass + await asyncio.sleep(_BASE_HEART_BEAT_INTERVAL * 2) + + # in case of manual intervention multiple stops will be sent + _EXPECTED_STOP_MESSAGES = 4 + for _ in range(_EXPECTED_STOP_MESSAGES): + with pytest.raises(TaskClientResultError): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ): + ... + + # Ensure messages arrive in the expected order + resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + assert len(resource_tracking_messages) >= 3 + + start_message = resource_tracking_messages[0] + heart_beat_messages = resource_tracking_messages[1:-_EXPECTED_STOP_MESSAGES] + assert len(heart_beat_messages) > 0 + stop_messages = resource_tracking_messages[-_EXPECTED_STOP_MESSAGES:] + # NOTE: this is a situation where multiple stop events are sent out + # since the stopping fails and the operation is repeated + assert len(stop_messages) == _EXPECTED_STOP_MESSAGES + + assert isinstance(start_message, RabbitResourceTrackingStartedMessage) + for heart_beat_message in heart_beat_messages: + assert isinstance(heart_beat_message, RabbitResourceTrackingHeartbeatMessage) + for stop_message in stop_messages: + assert isinstance(stop_message, RabbitResourceTrackingStoppedMessage) + assert stop_message.simcore_platform_status == SimcorePlatformStatus.OK + + +async def _simulate_container_crash(container_names: list[str]) -> None: + async with aiodocker.Docker() as docker: + filters = clean_filters({"name": container_names}) + containers: list[DockerContainer] = await docker.containers.list( + all=True, filters=filters + ) + for container in containers: + await container.kill() + + +@pytest.fixture +def mock_one_container_oom_killed(mocker: MockerFixture) -> Callable[[], None]: + def _mock() -> None: + async def _mocked_get_container_states( + container_names: list[str], + ) -> dict[str, ContainerState | None]: + results = await get_container_states(container_names) + for result in results.values(): + if result: + result.oom_killed = True + result.status = ContainerStatus.exited + break + return results + + mocker.patch( + "simcore_service_dynamic_sidecar.modules.long_running_tasks.get_container_states", + side_effect=_mocked_get_container_states, + ) + mocker.patch( + "simcore_service_dynamic_sidecar.modules.resource_tracking._core.get_container_states", + side_effect=_mocked_get_container_states, + ) + + return _mock + + +@pytest.mark.parametrize("expected_platform_state", SimcorePlatformStatus) +async def test_user_services_crash_when_running( + mock_core_rabbitmq: dict[str, AsyncMock], + app: FastAPI, + httpx_async_client: AsyncClient, + client: Client, + compose_spec: str, + container_names: list[str], + mock_metrics_params: CreateServiceMetricsAdditionalParams, + mock_one_container_oom_killed: Callable[[], None], + expected_platform_state: SimcorePlatformStatus, +): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_create_service_containers( + httpx_async_client, compose_spec, mock_metrics_params + ), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert isinstance(result, list) + assert len(result) == len(container_names) + + await _wait_for_containers_to_be_running(app) + + # let a few heartbeats pass + await asyncio.sleep(_BASE_HEART_BEAT_INTERVAL * 2) + + # crash the user services + if expected_platform_state == SimcorePlatformStatus.OK: + # was it oom killed, not our fault + mock_one_container_oom_killed() + else: + # something else happened our fault and is bad + await _simulate_container_crash(container_names) + + # check only start and heartbeats are present + resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + assert len(resource_tracking_messages) >= 2 + + start_message = resource_tracking_messages[0] + heart_beat_messages = resource_tracking_messages[1:] + + assert isinstance(start_message, RabbitResourceTrackingStartedMessage) + for heart_beat_message in heart_beat_messages: + assert isinstance(heart_beat_message, RabbitResourceTrackingHeartbeatMessage) + + # reset mock + await asyncio.sleep(_BASE_HEART_BEAT_INTERVAL * 2) + mock_core_rabbitmq["post_rabbit_message"].reset_mock() + + # wait a bit more and check no further heartbeats are sent + await asyncio.sleep(_BASE_HEART_BEAT_INTERVAL * 2) + new_resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + assert len(new_resource_tracking_messages) == 0 + + # sending stop events, and since there was an issue multiple stops + # will be sent due to manual intervention + _EXPECTED_STOP_MESSAGES = 4 + for _ in range(_EXPECTED_STOP_MESSAGES): + async with periodic_task_result( + client=client, + task_id=await _get_task_id_docker_compose_down(httpx_async_client), + task_timeout=_CREATE_SERVICE_CONTAINERS_TIMEOUT, + status_poll_interval=_FAST_STATUS_POLL, + ) as result: + assert result is None + + resource_tracking_messages = _get_resource_tracking_messages(mock_core_rabbitmq) + # NOTE: only 1 stop event arrives here since the stopping of the containers + # was successful + assert len(resource_tracking_messages) == 1 + + for stop_message in resource_tracking_messages: + assert isinstance(stop_message, RabbitResourceTrackingStoppedMessage) + assert stop_message.simcore_platform_status == SimcorePlatformStatus.OK diff --git a/services/dynamic-sidecar/tests/unit/test_core_docker_compose_utils.py b/services/dynamic-sidecar/tests/unit/test_core_docker_compose_utils.py index 2dfc37217a6..115955a2d89 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_docker_compose_utils.py +++ b/services/dynamic-sidecar/tests/unit/test_core_docker_compose_utils.py @@ -25,7 +25,6 @@ SLEEP_TIME_S = 60 COMPOSE_SPEC_SAMPLE = { - "version": "3.8", "services": { "my-test-container": { "environment": [ @@ -46,7 +45,7 @@ def compose_spec_yaml(faker: Faker) -> str: return yaml.safe_dump(COMPOSE_SPEC_SAMPLE, indent=1) -@pytest.mark.parametrize("with_restart", (True, False)) +@pytest.mark.parametrize("with_restart", [True, False]) async def test_docker_compose_workflow( compose_spec_yaml: str, mock_environment: EnvVarsDict, @@ -57,7 +56,8 @@ async def test_docker_compose_workflow( settings = ApplicationSettings.create_from_envs() def _print_result(r: CommandResult): - assert r.elapsed and r.elapsed > 0 + assert r.elapsed + assert r.elapsed > 0 print(f"{r.command:*^100}", "\nELAPSED:", r.elapsed) compose_spec: dict[str, Any] = yaml.safe_load(compose_spec_yaml) @@ -134,7 +134,8 @@ async def test_burst_calls_to_docker_compose_config( success = [r for r in results if r.success] failed = [r for r in results if not r.success] - assert len(success) == CALLS_COUNT and not failed + assert len(success) == CALLS_COUNT + assert not failed async def test_docker_start_fails_if_containers_are_not_present( @@ -145,7 +146,8 @@ async def test_docker_start_fails_if_containers_are_not_present( settings = ApplicationSettings.create_from_envs() def _print_result(r: CommandResult): - assert r.elapsed and r.elapsed > 0 + assert r.elapsed + assert r.elapsed > 0 print(f"{r.command:*^100}", "\nELAPSED:", r.elapsed) compose_spec: dict[str, Any] = yaml.safe_load(compose_spec_yaml) diff --git a/services/dynamic-sidecar/tests/unit/test_core_docker_logs.py b/services/dynamic-sidecar/tests/unit/test_core_docker_logs.py index 0fdfab591a7..e7d780db9dd 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_docker_logs.py +++ b/services/dynamic-sidecar/tests/unit/test_core_docker_logs.py @@ -2,14 +2,13 @@ # pylint: disable=unused-argument import asyncio -from typing import AsyncIterable +from collections.abc import AsyncIterable from unittest.mock import AsyncMock import aiodocker import pytest from async_asgi_testclient import TestClient from fastapi import FastAPI -from pytest import MonkeyPatch from simcore_service_dynamic_sidecar.core.docker_logs import ( _get_background_log_fetcher, start_log_fetching, @@ -19,11 +18,12 @@ @pytest.fixture def mock_environment( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, mock_environment: None, ) -> None: monkeypatch.setenv("DYNAMIC_SIDECAR_COMPOSE_NAMESPACE", "test-space") monkeypatch.setenv("RABBIT_HOST", "mocked_host") + monkeypatch.setenv("RABBIT_SECURE", "false") monkeypatch.setenv("RABBIT_USER", "mocked_user") monkeypatch.setenv("RABBIT_PASSWORD", "mocked_password") @@ -55,6 +55,6 @@ async def test_background_log_fetcher( await start_log_fetching(app=app, container_name=container_name) # wait for background log fetcher await asyncio.sleep(1) - assert mock_core_rabbitmq["post_log_message"].call_count == 1 + assert mock_core_rabbitmq["post_rabbit_message"].call_count == 1 await stop_log_fetching(app=app, container_name=container_name) diff --git a/services/dynamic-sidecar/tests/unit/test_core_docker_utils.py b/services/dynamic-sidecar/tests/unit/test_core_docker_utils.py index 838a7b3a94d..e39c908dbc9 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_docker_utils.py +++ b/services/dynamic-sidecar/tests/unit/test_core_docker_utils.py @@ -1,45 +1,46 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument # pylint: disable=unused-variable -from typing import AsyncIterable, AsyncIterator +from collections.abc import AsyncIterable, AsyncIterator +from contextlib import suppress import aiodocker import pytest import yaml +from aiodocker.containers import DockerContainer from faker import Faker -from models_library.services import RunID +from models_library.generated_models.docker_rest_api import ContainerState +from models_library.services import ServiceRunID from pydantic import PositiveInt -from pytest import FixtureRequest -from settings_library.docker_registry import RegistrySettings from simcore_service_dynamic_sidecar.core.docker_utils import ( + _get_containers_inspect_from_names, + get_container_states, + get_containers_count_from_names, get_docker_service_images, - get_running_containers_count_from_names, get_volume_by_label, - pull_images, ) from simcore_service_dynamic_sidecar.core.errors import VolumeNotFoundError -@pytest.fixture(scope="session") +@pytest.fixture def volume_name() -> str: return "test_source_name" @pytest.fixture -def run_id(faker: Faker) -> RunID: - return faker.uuid4(cast_to=None) +def service_run_id() -> ServiceRunID: + return ServiceRunID.get_resource_tracking_run_id_for_dynamic() @pytest.fixture -async def volume_with_label(volume_name: str, run_id: RunID) -> AsyncIterable[None]: +async def volume_with_label( + volume_name: str, service_run_id: ServiceRunID +) -> AsyncIterable[None]: async with aiodocker.Docker() as docker_client: volume = await docker_client.volumes.create( { "Name": "test_volume_name_1", - "Labels": { - "source": volume_name, - "run_id": f"{run_id}", - }, + "Labels": {"source": volume_name, "run_id": service_run_id}, } ) @@ -49,7 +50,7 @@ async def volume_with_label(volume_name: str, run_id: RunID) -> AsyncIterable[No @pytest.fixture(params=[0, 1, 2, 3]) -def container_count(request: FixtureRequest) -> PositiveInt: +def container_count(request: pytest.FixtureRequest) -> PositiveInt: return request.param @@ -63,8 +64,8 @@ async def started_services(container_names: list[str]) -> AsyncIterator[None]: async with aiodocker.Docker() as docker_client: started_containers = [] for container_name in container_names: - container = await docker_client.containers.create( - config={"Image": "busybox:latest"}, + container = await docker_client.containers.run( + config={"Image": "alpine:latest", "Cmd": ["sh", "-c", "sleep 10000"]}, name=container_name, ) started_containers.append(container) @@ -72,34 +73,68 @@ async def started_services(container_names: list[str]) -> AsyncIterator[None]: yield for container in started_containers: - await container.stop() + with suppress(aiodocker.DockerError): + await container.kill() await container.delete() async def test_volume_with_label( - volume_with_label: None, volume_name: str, run_id: RunID + volume_with_label: None, volume_name: str, service_run_id: ServiceRunID ) -> None: - assert await get_volume_by_label(volume_name, run_id) + assert await get_volume_by_label(volume_name, service_run_id) -async def test_volume_label_missing(run_id: RunID) -> None: +async def test_volume_label_missing(service_run_id: ServiceRunID) -> None: with pytest.raises(VolumeNotFoundError) as exc_info: - await get_volume_by_label("not_exist", run_id) + await get_volume_by_label("not_exist", service_run_id) error_msg = f"{exc_info.value}" - assert f"{run_id}" in error_msg + assert service_run_id in error_msg assert "not_exist" in error_msg +async def test__get_containers_inspect_from_names( + started_services: None, container_names: list[str], faker: Faker +): + MISSING_CONTAINER_NAME = f"missing-container-{faker.uuid4()}" + container_details: dict[str, DockerContainer | None] = ( + await _get_containers_inspect_from_names( + [*container_names, MISSING_CONTAINER_NAME] + ) + ) + # containers which do not exist always return None + assert MISSING_CONTAINER_NAME in container_details + assert container_details.pop(MISSING_CONTAINER_NAME) is None + + assert set(container_details.keys()) == set(container_names) + for docker_container in container_details.values(): + assert docker_container is not None + + +async def test_get_container_statuses( + started_services: None, container_names: list[str], faker: Faker +): + MISSING_CONTAINER_NAME = f"missing-container-{faker.uuid4()}" + container_states: dict[str, ContainerState | None] = await get_container_states( + [*container_names, MISSING_CONTAINER_NAME] + ) + # containers which do not exist always have a None status + assert MISSING_CONTAINER_NAME in container_states + assert container_states.pop(MISSING_CONTAINER_NAME) is None + + assert set(container_states.keys()) == set(container_names) + for docker_status in container_states.values(): + assert docker_status is not None + + async def test_get_running_containers_count_from_names( started_services: None, container_names: list[str], container_count: PositiveInt ): - found_containers = await get_running_containers_count_from_names(container_names) + found_containers = await get_containers_count_from_names(container_names) assert found_containers == container_count COMPOSE_SPEC_SAMPLE = { - "version": "3.8", "services": { "my-test-container": { "environment": [ @@ -131,64 +166,3 @@ def test_get_docker_service_images(compose_spec_yaml: str): "nginx:latest", "simcore/services/dynamic/jupyter-math:2.1.3", } - - -@pytest.mark.skip( - reason="Only for manual testing." - "Avoid this test in CI since it consumes disk and time" -) -async def test_issue_3793_pulling_images_raises_error(): - """ - Reproduces (sometimes) https://github.com/ITISFoundation/osparc-simcore/issues/3793 - """ - - async def _print_progress(*args, **kwargs): - print("progress -> ", args, kwargs) - - async def _print_log(*args, **kwargs): - print("log -> ", args, kwargs) - - for n in range(2): - await pull_images( - images={ - "ubuntu:latest", - "ubuntu:18.04", - "ubuntu:22.04", - "ubuntu:22.10", - "ubuntu:23.04", - "ubuntu:14.04", - "itisfoundation/mattward-viewer:latest", # 6.1 GB - }, - registry_settings=RegistrySettings( - REGISTRY_AUTH=False, - REGISTRY_USER="", - REGISTRY_PW="", - REGISTRY_SSL=False, - ), - progress_cb=_print_progress, - log_cb=_print_log, - ) - - -@pytest.mark.parametrize("repeat", ["first-pull", "repeat-pull"]) -async def test_pull_image(repeat: str): - async def _print_progress(current: int, total: int): - print("progress ->", f"{current=}", f"{total=}") - - async def _print_log(msg): - assert "alpine" in msg - print("log -> ", msg) - - await pull_images( - images={ - "alpine:latest", - }, - registry_settings=RegistrySettings( - REGISTRY_AUTH=False, - REGISTRY_USER="", - REGISTRY_PW="", - REGISTRY_SSL=False, - ), - progress_cb=_print_progress, - log_cb=_print_log, - ) diff --git a/services/dynamic-sidecar/tests/unit/test_core_errors.py b/services/dynamic-sidecar/tests/unit/test_core_errors.py new file mode 100644 index 00000000000..6b27efa3fcd --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_core_errors.py @@ -0,0 +1,42 @@ +# pylint:disable=broad-exception-caught +# pylint:disable=no-member + +from simcore_service_dynamic_sidecar.core.errors import ( + UnexpectedDockerError, + VolumeNotFoundError, +) +from starlette import status + + +def test_legacy_interface_unexpected_docker_error(): + message = "some_message" + status_code = 42 + try: + raise UnexpectedDockerError( # noqa: TRY301 + message=message, status_code=status_code + ) + except Exception as e: + print(e) + assert e.status_code == status_code # noqa: PT017 + assert message in e.message # noqa: PT017 + + +def test_legacy_interface_volume_not_found_error(): + try: + volumes = [{}, {"Name": "a_volume"}] + volume_names = " ".join(v.get("Name", "UNKNOWN") for v in volumes) + + raise VolumeNotFoundError( # noqa: TRY301 + volume_count=len(volumes), + source_label="some", + service_run_id="service_run_id", + volume_names=volume_names, + status_code=status.HTTP_404_NOT_FOUND, + ) + except Exception as e: + print(e) + assert ( # noqa: PT017 + e.message + == "Expected 1 got 2 volumes labels with source_label=some, service_run_id=service_run_id: Found UNKNOWN a_volume" + ) + assert e.status_code == status.HTTP_404_NOT_FOUND # noqa: PT017 diff --git a/services/dynamic-sidecar/tests/unit/test_core_external_dependencies.py b/services/dynamic-sidecar/tests/unit/test_core_external_dependencies.py new file mode 100644 index 00000000000..e741b11189c --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_core_external_dependencies.py @@ -0,0 +1,89 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from datetime import timedelta +from typing import Final + +import pytest +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from models_library.projects import ProjectID +from pydantic import NonNegativeFloat +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from simcore_service_dynamic_sidecar.core.application import create_app +from simcore_service_dynamic_sidecar.core.external_dependencies import ( + CouldNotReachExternalDependenciesError, +) + +_LONG_STARTUP_SHUTDOWN_TIMEOUT: Final[NonNegativeFloat] = 60 + + +@pytest.fixture +def mock_liveness_timeout(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.service_liveness._DEFAULT_TIMEOUT_INTERVAL", + new=timedelta(seconds=0.1), + ) + + +@pytest.fixture +def mock_environment( + mock_liveness_timeout: None, + base_mock_envs: EnvVarsDict, + project_id: ProjectID, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "DY_SIDECAR_CALLBACKS_MAPPING": "{}", + "DY_SIDECAR_PROJECT_ID": f"{project_id}", + "DY_SIDECAR_USER_ID": f"{2}", + "DYNAMIC_SIDECAR_LOG_LEVEL": "DEBUG", + "R_CLONE_PROVIDER": "MINIO", + "RABBIT_HOST": "test", + "RABBIT_PASSWORD": "test", + "RABBIT_SECURE": "0", + "RABBIT_USER": "test", + "STORAGE_USERNAME": "test", + "STORAGE_PASSWORD": "test", + "S3_ENDPOINT": faker.url(), + "S3_ACCESS_KEY": faker.pystr(), + "S3_REGION": faker.pystr(), + "S3_SECRET_KEY": faker.pystr(), + "S3_BUCKET_NAME": faker.pystr(), + "POSTGRES_HOST": "test", + "POSTGRES_USER": "test", + "POSTGRES_PASSWORD": "test", + "POSTGRES_DB": "test", + "REGISTRY_AUTH": f"{faker.pybool()}", + "REGISTRY_USER": faker.user_name(), + "REGISTRY_PW": faker.password(), + "REGISTRY_SSL": f"{faker.pybool()}", + "REGISTRY_URL": faker.url(), + **base_mock_envs, + }, + ) + + +@pytest.fixture +async def app(mock_environment: EnvVarsDict) -> FastAPI: + return create_app() + + +async def test_external_dependencies_are_not_reachable(app: FastAPI): + with pytest.raises(CouldNotReachExternalDependenciesError) as exe_info: + async with LifespanManager( + app, + startup_timeout=_LONG_STARTUP_SHUTDOWN_TIMEOUT, + shutdown_timeout=_LONG_STARTUP_SHUTDOWN_TIMEOUT, + ): + ... + failed = exe_info.value.failed + assert len(failed) == 4 + + for entry in ["Postgres", "RabbitMQ", "Internal Registry", "Storage"]: + assert any(f"Could not contact service '{entry}'" in err for err in failed) diff --git a/services/dynamic-sidecar/tests/unit/test_core_registry.py b/services/dynamic-sidecar/tests/unit/test_core_registry.py new file mode 100644 index 00000000000..32e71fe2b17 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_core_registry.py @@ -0,0 +1,105 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import json +from collections.abc import Iterable + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.docker_registry import RegistrySettings +from simcore_service_dynamic_sidecar.core.registry import ( + DOCKER_CONFIG_JSON_PATH, + _login_registry, +) + + +def _get_registry_config( + *, + url: str = "localhost:1111", + auth: str = "true", + user: str = "user", + password: str = "password", # noqa: S107 + ssl: str = "false", +) -> str: + return json.dumps( + { + "REGISTRY_URL": url, + "REGISTRY_AUTH": auth, + "REGISTRY_USER": user, + "REGISTRY_PW": password, + "REGISTRY_SSL": ssl, + } + ) + + +@pytest.fixture +def backup_docker_config_file() -> Iterable[None]: + backup_path = ( + DOCKER_CONFIG_JSON_PATH.parent / f"{DOCKER_CONFIG_JSON_PATH.name}.backup" + ) + + if not backup_path.exists() and DOCKER_CONFIG_JSON_PATH.exists(): + backup_path.write_text(DOCKER_CONFIG_JSON_PATH.read_text()) + + if DOCKER_CONFIG_JSON_PATH.exists(): + DOCKER_CONFIG_JSON_PATH.unlink() + + yield + + if backup_path.exists(): + DOCKER_CONFIG_JSON_PATH.write_text(backup_path.read_text()) + backup_path.unlink() + + +@pytest.fixture +def unset_registry_envs( + mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None: + for env in ( + "REGISTRY_AUTH", + "REGISTRY_PATH", + "REGISTRY_URL", + "REGISTRY_USER", + "REGISTRY_PW", + "REGISTRY_SSL", + ): + monkeypatch.delenv(env, raising=False) + + +@pytest.fixture +def mock_registry_settings_with_auth( + unset_registry_envs: None, + backup_docker_config_file: None, + monkeypatch: pytest.MonkeyPatch, + docker_registry: str, +) -> None: + monkeypatch.setenv( + "DY_DEPLOYMENT_REGISTRY_SETTINGS", + _get_registry_config( + url=docker_registry, + user="testuser", + password="testpassword", # noqa: S106 + ), + ) + + +async def test__login_registry( + mock_registry_settings_with_auth: None, + app: FastAPI, + docker_registry: str, +) -> None: + registry_settings: RegistrySettings = ( + app.state.settings.DY_DEPLOYMENT_REGISTRY_SETTINGS + ) + assert registry_settings.REGISTRY_URL == docker_registry # noqa: SIM300 + assert registry_settings.REGISTRY_AUTH is True + assert registry_settings.REGISTRY_USER == "testuser" + assert registry_settings.REGISTRY_PW.get_secret_value() == "testpassword" + assert registry_settings.REGISTRY_SSL is False + + await _login_registry(registry_settings) + + config_json = json.loads(DOCKER_CONFIG_JSON_PATH.read_text()) + assert len(config_json["auths"]) == 1 + assert registry_settings.REGISTRY_URL in config_json["auths"] diff --git a/services/dynamic-sidecar/tests/unit/test_core_reserved_space.py b/services/dynamic-sidecar/tests/unit/test_core_reserved_space.py new file mode 100644 index 00000000000..c78b800ce5a --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_core_reserved_space.py @@ -0,0 +1,26 @@ +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + + +from pydantic import ByteSize, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_dynamic_sidecar.core.application import create_base_app +from simcore_service_dynamic_sidecar.core.reserved_space import ( + _RESERVED_DISK_SPACE_NAME, + remove_reserved_disk_space, +) + + +def test_reserved_disk_space_workflow( + cleanup_reserved_disk_space: None, mock_environment: EnvVarsDict +): + assert not _RESERVED_DISK_SPACE_NAME.exists() + create_base_app() + + assert _RESERVED_DISK_SPACE_NAME.exists() + assert _RESERVED_DISK_SPACE_NAME.stat().st_size == TypeAdapter( + ByteSize + ).validate_python("10MiB") + + remove_reserved_disk_space() + assert not _RESERVED_DISK_SPACE_NAME.exists() diff --git a/services/dynamic-sidecar/tests/unit/test_core_settings.py b/services/dynamic-sidecar/tests/unit/test_core_settings.py index 8ec31f018e3..9e581d90999 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_settings.py +++ b/services/dynamic-sidecar/tests/unit/test_core_settings.py @@ -1,13 +1,93 @@ # pylint: disable=unused-argument # pylint: disable=redefined-outer-name - +import pytest +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from settings_library.utils_service import DEFAULT_AIOHTTP_PORT from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings -def test_settings_with_mock_environment(mock_environment): +def test_settings_with_mock_environment(mock_environment: EnvVarsDict): assert ApplicationSettings.create_from_envs() -def test_settings_with_envdevel_file(mock_environment_with_envdevel): +def test_settings_with_envdevel_file(mock_environment_with_envdevel: EnvVarsDict): assert ApplicationSettings.create_from_envs() + + +@pytest.fixture +def mock_postgres_data(monkeypatch: pytest.MonkeyPatch) -> None: + setenvs_from_dict( + monkeypatch, + { + "POSTGRES_HOST": "test", + "POSTGRES_USER": "test", + "POSTGRES_PASSWORD": "test", + "POSTGRES_DB": "test", + }, + ) + + +@pytest.mark.parametrize( + "envs", + [ + { + "STORAGE_USERNAME": "user", + "STORAGE_PASSWORD": "passwd", + "STORAGE_HOST": "host", + "STORAGE_PORT": "42", + "STORAGE_SECURE": "1", + }, + { + "NODE_PORTS_STORAGE_AUTH": ( + "{" + '"STORAGE_USERNAME": "user", ' + '"STORAGE_PASSWORD": "passwd", ' + '"STORAGE_HOST": "host", ' + '"STORAGE_PORT": "42", ' + '"STORAGE_SECURE": "1"' + "}" + ) + }, + ], +) +def test_settings_with_node_ports_storage_auth( + mock_environment: EnvVarsDict, + mock_postgres_data: None, + monkeypatch: pytest.MonkeyPatch, + envs: dict[str, str], +): + setenvs_from_dict(monkeypatch, envs) + + settings = ApplicationSettings.create_from_envs() + assert settings.NODE_PORTS_STORAGE_AUTH + # pylint:disable=no-member + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_SECURE is True + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_HOST == "host" + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_PORT == 42 + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_USERNAME == "user" + assert settings.NODE_PORTS_STORAGE_AUTH.auth_required is True + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_PASSWORD + + # enforce avoiding credentials leaks + assert ( + settings.NODE_PORTS_STORAGE_AUTH.STORAGE_PASSWORD.get_secret_value() == "passwd" + ) + assert "passwd" not in settings.NODE_PORTS_STORAGE_AUTH.model_dump_json() + + +@pytest.mark.parametrize("envs", [{}]) +def test_settings_with_node_ports_storage_auth_as_missing( + mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, envs: dict[str, str] +): + setenvs_from_dict(monkeypatch, envs) + + settings = ApplicationSettings.create_from_envs() + assert settings.NODE_PORTS_STORAGE_AUTH is not None + # pylint:disable=no-member + assert settings.NODE_PORTS_STORAGE_AUTH.auth_required is False + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_USERNAME is None + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_PASSWORD is None + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_SECURE is False + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_HOST == "storage" + assert settings.NODE_PORTS_STORAGE_AUTH.STORAGE_PORT == DEFAULT_AIOHTTP_PORT diff --git a/services/dynamic-sidecar/tests/unit/test_core_stroage.py b/services/dynamic-sidecar/tests/unit/test_core_stroage.py new file mode 100644 index 00000000000..0fdf000f7c0 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_core_stroage.py @@ -0,0 +1,140 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import multiprocessing +from collections.abc import AsyncIterable +from datetime import timedelta +from typing import Annotated, Final +from unittest.mock import Mock + +import pytest +import requests +import uvicorn +from fastapi import Depends, FastAPI, HTTPException, status +from fastapi.security import HTTPBasic, HTTPBasicCredentials +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from settings_library.node_ports import StorageAuthSettings +from simcore_service_dynamic_sidecar.core.storage import ( + _get_url, + wait_for_storage_liveness, +) +from tenacity import AsyncRetrying, stop_after_delay, wait_fixed + + +@pytest.fixture +def mock_storage_app(username: str | None, password: str | None) -> FastAPI: + app = FastAPI() + security = HTTPBasic() + + @app.get("/") + def health(): + return "ok" + + def _authenticate_user( + credentials: Annotated[HTTPBasicCredentials, Depends(security)], + ): + if credentials.username != username or credentials.password != password: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect username or password", + headers={"WWW-Authenticate": "Basic"}, + ) + return {"username": username} + + if username and password: + + @app.get("/v0/") + async def protected_route(user: Annotated[dict, Depends(_authenticate_user)]): + return {"message": f"Welcome, {user['username']}!"} + + else: + + @app.get("/v0/") + async def unprotected_route(): + return {"message": "Welcome, no auth!"} + + return app + + +@pytest.fixture +def storage_auth_settings( + username: str | None, password: str | None +) -> StorageAuthSettings: + return TypeAdapter(StorageAuthSettings).validate_python( + { + "STORAGE_HOST": "localhost", + "STORAGE_PORT": 44332, + "STORAGE_USERNAME": username, + "STORAGE_PASSWORD": password, + } + ) + + +def _get_base_url(storage_auth_settings: StorageAuthSettings) -> str: + return f"http://{storage_auth_settings.STORAGE_HOST}:{storage_auth_settings.STORAGE_PORT}" + + +@pytest.fixture +async def mock_storage_server( + mock_storage_app: None, storage_auth_settings: StorageAuthSettings +) -> AsyncIterable[None]: + def _run_server(app): + uvicorn.run( + app, + host=storage_auth_settings.STORAGE_HOST, + port=storage_auth_settings.STORAGE_PORT, + ) + + process = multiprocessing.Process(target=_run_server, args=(mock_storage_app,)) + process.start() + + base_url = _get_base_url(storage_auth_settings) + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(1), + reraise=True, + ): + with attempt: + response = requests.get(f"{base_url}/", timeout=1) + assert response.status_code == status.HTTP_200_OK + + yield None + + process.kill() + + +@pytest.fixture +def mock_liveness_timeout(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.service_liveness._DEFAULT_TIMEOUT_INTERVAL", + new=timedelta(seconds=2), + ) + + +@pytest.fixture +def mock_dynamic_sidecar_app( + mock_liveness_timeout: None, storage_auth_settings: StorageAuthSettings +) -> Mock: + mock = Mock() + mock.state.settings.NODE_PORTS_STORAGE_AUTH = storage_auth_settings + return mock + + +_USERNAME_PASSWORD_TEST_CASES: Final[list] = [ + pytest.param("user", "password", id="authenticated"), + pytest.param(None, None, id="no-auth"), +] + + +@pytest.mark.parametrize("username, password", _USERNAME_PASSWORD_TEST_CASES) +async def test_wait_for_storage_liveness( + mock_storage_server: None, mock_dynamic_sidecar_app: Mock +): + await wait_for_storage_liveness(mock_dynamic_sidecar_app) + + +@pytest.mark.parametrize("username, password", _USERNAME_PASSWORD_TEST_CASES) +def test__get_url(storage_auth_settings: StorageAuthSettings): + assert _get_url(storage_auth_settings) == "http://localhost:44332/v0/" diff --git a/services/dynamic-sidecar/tests/unit/test_core_utils.py b/services/dynamic-sidecar/tests/unit/test_core_utils.py index f9034ca8052..76913bc9347 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_utils.py +++ b/services/dynamic-sidecar/tests/unit/test_core_utils.py @@ -7,31 +7,7 @@ from pathlib import Path import pytest -from pytest import MonkeyPatch -from settings_library.docker_registry import RegistrySettings -from simcore_service_dynamic_sidecar.core.utils import ( - CommandResult, - _is_registry_reachable, - async_command, -) - - -@pytest.fixture -def registry_with_auth( - monkeypatch: MonkeyPatch, docker_registry: str -) -> RegistrySettings: - monkeypatch.setenv("REGISTRY_URL", docker_registry) - monkeypatch.setenv("REGISTRY_AUTH", "false") - monkeypatch.setenv("REGISTRY_AUTH", "true") - monkeypatch.setenv("REGISTRY_USER", "testuser") - monkeypatch.setenv("REGISTRY_PW", "testpassword") - monkeypatch.setenv("REGISTRY_SSL", "false") - return RegistrySettings.create_from_envs() - - -async def test_is_registry_reachable(registry_with_auth: RegistrySettings) -> None: - await _is_registry_reachable(registry_with_auth) - +from simcore_service_dynamic_sidecar.core.utils import CommandResult, async_command CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent @@ -41,7 +17,6 @@ def cmd(tmp_path: Path, sleep: int): docker_compose = tmp_path / "docker_compose.yml" docker_compose.write_text( f"""\ -version: "3.8" services: my-container: environment: @@ -55,7 +30,7 @@ def cmd(tmp_path: Path, sleep: int): ) print("docker-compose from cmd fixture:\n", docker_compose.read_text(), "\n") - return f"docker-compose -f {docker_compose} up" + return f"docker compose -f {docker_compose} up" @pytest.mark.parametrize( diff --git a/services/dynamic-sidecar/tests/unit/test_core_validation.py b/services/dynamic-sidecar/tests/unit/test_core_validation.py index 893b5abdebb..70f99addc84 100644 --- a/services/dynamic-sidecar/tests/unit/test_core_validation.py +++ b/services/dynamic-sidecar/tests/unit/test_core_validation.py @@ -2,14 +2,20 @@ # pylint: disable=unused-argument from inspect import signature -from typing import Union +from pathlib import Path import pytest +from fastapi import FastAPI +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from pytest_mock import MockerFixture from servicelib.docker_constants import DEFAULT_USER_SERVICES_NETWORK_NAME from simcore_service_dynamic_sidecar.core.validation import ( _connect_user_services, + get_and_validate_compose_spec, parse_compose_spec, ) +from simcore_service_dynamic_sidecar.modules.mounted_fs import MountedVolumes @pytest.fixture @@ -18,8 +24,8 @@ def incoming_iseg_compose_file_content() -> str: networks: dy-sidecar_6f54ecb4-cac2-424a-8b72-ee9366026ff8: driver: overlay - external: - name: dy-sidecar_6f54ecb4-cac2-424a-8b72-ee9366026ff8 + name: dy-sidecar_6f54ecb4-cac2-424a-8b72-ee9366026ff8 + external: true services: iseg-app: image: registry.osparc.org/simcore/services/dynamic/iseg-app:1.0.7 @@ -111,14 +117,16 @@ def incoming_compose_file( ) @pytest.mark.parametrize("allow_internet_access", [True, False]) def test_inject_backend_networking( - networks: Union[None, dict], incoming_compose_file: str, allow_internet_access: bool + networks: None | dict, incoming_compose_file: str, allow_internet_access: bool ): """ NOTE: this goes with issue [https://github.com/ITISFoundation/osparc-simcore/issues/3261] """ parsed_compose_spec = parse_compose_spec(incoming_compose_file) parsed_compose_spec["networks"] = networks - _connect_user_services(parsed_compose_spec, allow_internet_access) + _connect_user_services( + parsed_compose_spec, allow_internet_access=allow_internet_access + ) assert DEFAULT_USER_SERVICES_NETWORK_NAME in parsed_compose_spec["networks"] assert ( DEFAULT_USER_SERVICES_NETWORK_NAME @@ -128,3 +136,44 @@ def test_inject_backend_networking( DEFAULT_USER_SERVICES_NETWORK_NAME in parsed_compose_spec["services"]["iseg-web"]["networks"] ) + + +@pytest.fixture +def mock_get_volume_by_label(mocker: MockerFixture) -> None: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.mounted_fs.get_volume_by_label", + autospec=True, + return_value={"Mountpoint": "/fake/mount"}, + ) + + +@pytest.fixture +def no_internet_spec(project_tests_dir: Path) -> str: + no_intenret_file = project_tests_dir / "mocks" / "internet_blocked_spec.yaml" + return no_intenret_file.read_text() + + +@pytest.fixture +def fake_mounted_volumes() -> MountedVolumes: + return MountedVolumes( + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_dynamic(), + node_id=NodeID("a019b83f-7cce-46bf-90cf-d02f7f0f089a"), + inputs_path=Path("/"), + outputs_path=Path("/"), + user_preferences_path=None, + state_paths=[], + state_exclude=set(), + compose_namespace="", + dy_volumes=Path("/"), + ) + + +async def test_regression_validate_compose_spec( + mock_get_volume_by_label: None, + app: FastAPI, + no_internet_spec: str, + fake_mounted_volumes: MountedVolumes, +): + await get_and_validate_compose_spec( + app.state.settings, no_internet_spec, fake_mounted_volumes + ) diff --git a/services/dynamic-sidecar/tests/unit/test_models_shared_store.py b/services/dynamic-sidecar/tests/unit/test_models_shared_store.py index e17b0e9d9aa..2c2b474a029 100644 --- a/services/dynamic-sidecar/tests/unit/test_models_shared_store.py +++ b/services/dynamic-sidecar/tests/unit/test_models_shared_store.py @@ -1,19 +1,22 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument -from dataclasses import dataclass +import json +from copy import deepcopy from pathlib import Path -from typing import Optional +from typing import Any +import arrow import pytest from async_asgi_testclient import TestClient -from faker import Faker from fastapi import FastAPI +from models_library.sidecar_volumes import VolumeCategory, VolumeState, VolumeStatus +from pydantic import TypeAdapter from pytest_mock.plugin import MockerFixture +from servicelib.utils import logged_gather from simcore_service_dynamic_sidecar.core import application from simcore_service_dynamic_sidecar.models.shared_store import ( STORE_FILE_NAME, - ContainerNameStr, SharedStore, ) @@ -32,52 +35,118 @@ def shared_store(trigger_setup_shutdown_events: None, app: FastAPI) -> SharedSto return app.state.shared_store -@pytest.fixture -def fake_compose_spec(faker: Faker) -> str: - return f"just_a_string_pretending_to_be_a_compose_spec_{faker.uuid4()}" - - # mock docker_compose_down in application @pytest.fixture def mock_docker_compose(mocker: MockerFixture) -> None: mocker.patch.object(application, "docker_compose_down") -@dataclass -class UpdateFields: - compose_spec: Optional[str] - container_names: list[ContainerNameStr] - - @pytest.mark.parametrize( "update_fields", [ - UpdateFields(compose_spec=None, container_names=[]), - UpdateFields(compose_spec="some_random_fake_spec", container_names=[]), - UpdateFields( - compose_spec="some_random_fake_spec", container_names=["a_continaer"] - ), - UpdateFields( - compose_spec="some_random_fake_spec", container_names=["a_ctnr", "b_cont"] - ), + {"compose_spec": None, "container_names": []}, + {"compose_spec": "some_random_fake_spec", "container_names": []}, + {"compose_spec": "some_random_fake_spec", "container_names": ["a_continaer"]}, + { + "compose_spec": "some_random_fake_spec", + "container_names": ["a_ctnr", "b_cont"], + }, + {"volume_states": {}}, + { + "volume_states": { + VolumeCategory.OUTPUTS: TypeAdapter(VolumeState).validate_python( + {"status": VolumeStatus.CONTENT_NO_SAVE_REQUIRED} + ), + VolumeCategory.INPUTS: TypeAdapter(VolumeState).validate_python( + {"status": VolumeStatus.CONTENT_NEEDS_TO_BE_SAVED} + ), + VolumeCategory.STATES: TypeAdapter(VolumeState).validate_python( + {"status": VolumeStatus.CONTENT_WAS_SAVED} + ), + VolumeCategory.SHARED_STORE: TypeAdapter(VolumeState).validate_python( + {"status": VolumeStatus.CONTENT_NO_SAVE_REQUIRED} + ), + } + }, ], ) async def test_shared_store_updates( mock_docker_compose: None, shared_store: SharedStore, - fake_compose_spec: str, ensure_shared_store_dir: Path, - update_fields: UpdateFields, + update_fields: dict[str, Any], ): # check no file is present on the disk from where the data was created store_file_path = ensure_shared_store_dir / STORE_FILE_NAME + # file already exists since it was initialized, removing it for this test + store_file_path.unlink() assert store_file_path.exists() is False # change some data and trigger a persist - shared_store.compose_spec = update_fields.compose_spec - shared_store.container_names = update_fields.container_names - await shared_store.persist_to_disk() + async with shared_store: + for attr_name, attr_value in update_fields.items(): + setattr(shared_store, attr_name, attr_value) # check the contes of the file should be the same as the shared_store's assert store_file_path.exists() is True - assert shared_store == SharedStore.parse_raw(store_file_path.read_text()) + + def _normalize_datetimes(shared_store: SharedStore) -> None: + for state in shared_store.volume_states.values(): + state.last_changed = arrow.get(state.last_changed.isoformat()).datetime + + shared_store_from_file = SharedStore.model_validate_json( + store_file_path.read_text() + ) + _normalize_datetimes(shared_store) + _normalize_datetimes(shared_store_from_file) + + assert shared_store == shared_store_from_file + + +async def test_no_concurrency_with_parallel_writes( + mock_docker_compose: None, shared_store: SharedStore, ensure_shared_store_dir: Path +): + PARALLEL_CHANGES: int = 1000 + + async def replace_list_in_shared_store(item: str): + async with shared_store: + new_list = deepcopy(shared_store.container_names) + new_list.append(item) + shared_store.container_names = new_list + + await logged_gather( + *(replace_list_in_shared_store(f"{x}") for x in range(PARALLEL_CHANGES)) + ) + assert len(shared_store.container_names) == PARALLEL_CHANGES + + +async def test_init_from_disk_with_legacy_data_format(project_tests_dir: Path): + MOCKS_DIR = project_tests_dir / "mocks" + LEGACY_SHARED_STORE = "legacy_shared_store.json" + + # ensure stored legacy format is parsable + disk_shared_store = await SharedStore.init_from_disk( + MOCKS_DIR, store_file_name=LEGACY_SHARED_STORE + ) + # if file is missing it correctly loaded the storage_file + assert (MOCKS_DIR / STORE_FILE_NAME).exists() is False + + def _normalize_datetimes(data: dict[str, Any]) -> None: + for state in data["volume_states"].values(): + state["last_changed"] = arrow.get( + state["last_changed"] + ).datetime.isoformat() + + # ensure objects are compatible + parsed_legacy_format = json.loads(disk_shared_store.model_dump_json()) + load_raw_from_disk = json.loads((MOCKS_DIR / LEGACY_SHARED_STORE).read_text()) + + _normalize_datetimes(parsed_legacy_format) + _normalize_datetimes(load_raw_from_disk) + + assert parsed_legacy_format == load_raw_from_disk + + +async def test_init_from_disk_no_file_present(tmp_path: Path): + await SharedStore.init_from_disk(tmp_path, store_file_name=STORE_FILE_NAME) + assert (tmp_path / STORE_FILE_NAME).exists() is True diff --git a/services/dynamic-sidecar/tests/unit/test_modules_attribute_monitor.py b/services/dynamic-sidecar/tests/unit/test_modules_attribute_monitor.py index 46e3ad1b935..5266730830a 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_attribute_monitor.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_attribute_monitor.py @@ -1,5 +1,6 @@ # pylint: disable=redefined-outer-name # pylint: disable=unused-argument +# pylint: disable=protected-access import asyncio @@ -8,9 +9,11 @@ import socket import threading from collections import deque +from collections.abc import AsyncIterator, Iterator +from logging.handlers import DEFAULT_UDP_LOGGING_PORT, DatagramHandler from pathlib import Path from typing import Final -from unittest.mock import AsyncMock +from unittest.mock import AsyncMock, Mock import pytest from asgi_lifespan import LifespanManager @@ -28,7 +31,7 @@ # redirecting via UDP, below is a slight change from # https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393 -DATAGRAM_PORT: Final[PortInt] = logging.handlers.DEFAULT_UDP_LOGGING_PORT +DATAGRAM_PORT: Final[PortInt] = PortInt(DEFAULT_UDP_LOGGING_PORT) ENSURE_LOGS_DELIVERED: Final[float] = 0.1 @@ -41,22 +44,22 @@ def fake_dy_volumes_mount_dir(tmp_path: Path) -> Path: @pytest.fixture def patch_logging(mocker: MockerFixture) -> None: logger = logging.getLogger(_logging_event_handler.__name__) - datagram_handler = logging.handlers.DatagramHandler("127.0.0.1", DATAGRAM_PORT) + datagram_handler = DatagramHandler("127.0.0.1", DATAGRAM_PORT) datagram_handler.setLevel(logging.NOTSET) logger.addHandler(datagram_handler) - logger.isEnabledFor = lambda _: True + logger.isEnabledFor = lambda _level: True mocker.patch.object(_logging_event_handler, "logger", logger) class LogRecordKeeper: - def __init__(self): + def __init__(self) -> None: self._records = deque() def appendleft(self, x) -> None: self._records.appendleft(x) - def has_log_within(self, **expected_logrec_fields) -> None: + def has_log_within(self, **expected_logrec_fields) -> bool: for rec in self._records: if all(str(v) in str(rec[k]) for k, v in expected_logrec_fields.items()): return True @@ -70,10 +73,10 @@ def __repr__(self) -> str: @pytest.fixture -def log_receiver() -> LogRecordKeeper: +def log_receiver() -> Iterator[LogRecordKeeper]: log_record_keeper = LogRecordKeeper() - def listener(): + def listener() -> None: receive_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) receive_socket.bind(("127.0.0.1", DATAGRAM_PORT)) while True: @@ -107,7 +110,7 @@ def fake_app(fake_dy_volumes_mount_dir: Path, patch_logging: None) -> FastAPI: @pytest.fixture async def logging_event_handler_observer( fake_app: FastAPI, -) -> None: +) -> AsyncIterator[None]: setup_attribute_monitor(fake_app) async with LifespanManager(fake_app): assert fake_app.state.attribute_monitor @@ -122,6 +125,7 @@ async def logging_event_handler_observer( ], ) async def test_chown_triggers_event( + mock_ensure_read_permissions_on_user_service_data: None, logging_event_handler_observer: None, fake_dy_volumes_mount_dir: Path, command_template: str, @@ -148,3 +152,26 @@ async def test_chown_triggers_event( assert log_receiver.has_log_within( msg=f"Attribute change to: '{file_path}'", levelname="INFO" ) + + +@pytest.mark.parametrize("file_is_present", [True, False]) +async def test_regression_logging_event_handler_file_does_not_exist( + faker: Faker, + tmp_path: Path, + caplog: pytest.LogCaptureFixture, + file_is_present: bool, +): + caplog.clear() + mocked_event = Mock() + file_path = tmp_path / f"missing-path{faker.uuid4()}" + if file_is_present: + file_path.touch() + assert file_path.exists() is True + else: + assert file_path.exists() is False + + mocked_event.src_path = file_path + _logging_event_handler._LoggingEventHandler().event_handler( # noqa: SLF001 + mocked_event + ) + assert (f"Attribute change to: '{file_path}'" in caplog.text) is file_is_present diff --git a/services/dynamic-sidecar/tests/unit/test_modules_container_utils.py b/services/dynamic-sidecar/tests/unit/test_modules_container_utils.py new file mode 100644 index 00000000000..a8b84f7235c --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_container_utils.py @@ -0,0 +1,55 @@ +# pylint: disable=redefined-outer-name + +import contextlib +from collections.abc import AsyncIterable + +import aiodocker +import pytest +from simcore_service_dynamic_sidecar.modules.container_utils import ( + ContainerExecCommandFailedError, + ContainerExecContainerNotFoundError, + ContainerExecTimeoutError, + run_command_in_container, +) + + +@pytest.fixture +async def running_container_name() -> AsyncIterable[str]: + async with aiodocker.Docker() as client: + container = await client.containers.run( + config={ + "Image": "alpine:latest", + "Cmd": ["/bin/ash", "-c", "sleep 10000"], + } + ) + container_inspect = await container.show() + + yield container_inspect["Name"][1:] + + with contextlib.suppress(aiodocker.DockerError): + await container.kill() + await container.delete() + + +async def test_run_command_in_container_container_not_found(): + with pytest.raises(ContainerExecContainerNotFoundError): + await run_command_in_container("missing_container", command="") + + +async def test_run_command_in_container_command_timed_out(running_container_name: str): + with pytest.raises(ContainerExecTimeoutError): + await run_command_in_container( + running_container_name, command="sleep 10", timeout=0.1 + ) + + +async def test_run_command_in_container_none_zero_exit_code( + running_container_name: str, +): + with pytest.raises(ContainerExecCommandFailedError): + await run_command_in_container(running_container_name, command="exit 1") + + +async def test_run_command_in_container(running_container_name: str): + result = await run_command_in_container(running_container_name, command="ls -lah") + assert len(result) > 0 diff --git a/services/dynamic-sidecar/tests/unit/test_modules_mounted_fs.py b/services/dynamic-sidecar/tests/unit/test_modules_mounted_fs.py index 34e31ebeddc..ac3114fc51b 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_mounted_fs.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_mounted_fs.py @@ -9,7 +9,7 @@ from aiodocker.volumes import DockerVolume from fastapi import FastAPI from models_library.projects_nodes_io import NodeID -from models_library.services import RunID +from models_library.services import ServiceRunID from simcore_service_dynamic_sidecar.core.application import AppState from simcore_service_dynamic_sidecar.models.shared_store import SharedStore from simcore_service_dynamic_sidecar.modules.mounted_fs import ( @@ -56,7 +56,7 @@ async def test_expected_paths_and_volumes( inputs_dir: Path, outputs_dir: Path, state_paths_dirs: list[Path], - run_id: RunID, + service_run_id: ServiceRunID, node_id: NodeID, ): assert ( @@ -65,11 +65,11 @@ async def test_expected_paths_and_volumes( { x async for x in mounted_volumes.iter_state_paths_to_docker_volumes( - run_id + service_run_id ) } ) - == len(set(mounted_volumes.disk_state_paths())) + == len(set(mounted_volumes.disk_state_paths_iter())) ) # check location on disk @@ -82,22 +82,23 @@ async def test_expected_paths_and_volumes( == mounted_volumes._dy_volumes / inputs_dir.relative_to("/") ) - assert set(mounted_volumes.disk_state_paths()) == { + assert set(mounted_volumes.disk_state_paths_iter()) == { mounted_volumes._dy_volumes / x.relative_to("/") for x in state_paths_dirs } # check volume mount point assert ( mounted_volumes.volume_name_outputs - == f"dyv_{run_id}_{node_id}_{_replace_slashes(outputs_dir)[::-1]}" + == f"dyv_{service_run_id}_{node_id}_{_replace_slashes(outputs_dir)[::-1]}" ) assert ( mounted_volumes.volume_name_inputs - == f"dyv_{run_id}_{node_id}_{_replace_slashes(inputs_dir)[::-1]}" + == f"dyv_{service_run_id}_{node_id}_{_replace_slashes(inputs_dir)[::-1]}" ) assert set(mounted_volumes.volume_name_state_paths()) == { - f"dyv_{run_id}_{node_id}_{_replace_slashes(x)[::-1]}" for x in state_paths_dirs + f"dyv_{service_run_id}_{node_id}_{_replace_slashes(x)[::-1]}" + for x in state_paths_dirs } def _get_container_mount(mount_path: str) -> str: @@ -105,15 +106,21 @@ def _get_container_mount(mount_path: str) -> str: # check docker_volume assert ( - _get_container_mount(await mounted_volumes.get_inputs_docker_volume(run_id)) + _get_container_mount( + await mounted_volumes.get_inputs_docker_volume(service_run_id) + ) == f"{mounted_volumes.inputs_path}" ) assert ( - _get_container_mount(await mounted_volumes.get_outputs_docker_volume(run_id)) + _get_container_mount( + await mounted_volumes.get_outputs_docker_volume(service_run_id) + ) == f"{mounted_volumes.outputs_path}" ) assert { _get_container_mount(x) - async for x in mounted_volumes.iter_state_paths_to_docker_volumes(run_id) + async for x in mounted_volumes.iter_state_paths_to_docker_volumes( + service_run_id + ) } == {f"{state_path}" for state_path in state_paths_dirs} diff --git a/services/dynamic-sidecar/tests/unit/test_modules_notifier.py b/services/dynamic-sidecar/tests/unit/test_modules_notifier.py new file mode 100644 index 00000000000..4b14bc69f95 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_notifier.py @@ -0,0 +1,401 @@ +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +from collections.abc import AsyncIterable, Callable +from contextlib import AsyncExitStack, _AsyncGeneratorContextManager +from pathlib import Path +from typing import Final +from unittest.mock import AsyncMock + +import pytest +import socketio +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_dynamic_sidecar.ports import ( + InputPortSatus, + InputStatus, + OutputPortStatus, + OutputStatus, +) +from models_library.api_schemas_dynamic_sidecar.socketio import ( + SOCKET_IO_SERVICE_DISK_USAGE_EVENT, + SOCKET_IO_STATE_INPUT_PORTS_EVENT, + SOCKET_IO_STATE_OUTPUT_PORTS_EVENT, +) +from models_library.api_schemas_dynamic_sidecar.telemetry import ( + DiskUsage, + MountPathCategory, + ServiceDiskUsage, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServicePortKey +from models_library.users import UserID +from pydantic import ByteSize, NonNegativeInt, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from servicelib.utils import logged_gather +from settings_library.rabbit import RabbitSettings +from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings +from simcore_service_dynamic_sidecar.modules.notifications import ( + PortNotifier, + publish_disk_usage, +) +from simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage import ( + DiskUsageMonitor, +) +from socketio import AsyncServer +from tenacity import AsyncRetrying +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + +_NUMBER_OF_CLIENTS: Final[NonNegativeInt] = 10 + + +@pytest.fixture +def mock_environment( + monkeypatch: pytest.MonkeyPatch, + rabbit_service: RabbitSettings, + mock_environment: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "DY_SIDECAR_SYSTEM_MONITOR_TELEMETRY_ENABLE": "true", + "RABBIT_HOST": rabbit_service.RABBIT_HOST, + "RABBIT_PASSWORD": rabbit_service.RABBIT_PASSWORD.get_secret_value(), + "RABBIT_PORT": f"{rabbit_service.RABBIT_PORT}", + "RABBIT_SECURE": f"{rabbit_service.RABBIT_SECURE}", + "RABBIT_USER": rabbit_service.RABBIT_USER, + }, + ) + + +@pytest.fixture +async def app( + app: FastAPI, + mock_environment: EnvVarsDict, + mock_registry_service: AsyncMock, + mock_storage_check: None, + mocker: MockerFixture, +) -> AsyncIterable[FastAPI]: + mocker.patch( + "simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage._get_monitored_paths", + return_value={}, + ) + + async with LifespanManager(app): + yield app + + +@pytest.fixture +async def disk_usage_monitor(app: FastAPI) -> DiskUsageMonitor: + return app.state.disk_usage_monitor + + +@pytest.fixture +async def socketio_server( + app: FastAPI, + socketio_server_factory: Callable[ + [RabbitSettings], _AsyncGeneratorContextManager[AsyncServer] + ], +) -> AsyncIterable[AsyncServer]: + # Same configuration as simcore_service_webserver/socketio/server.py + settings: ApplicationSettings = app.state.settings + assert settings.RABBIT_SETTINGS + + async with socketio_server_factory(settings.RABBIT_SETTINGS) as server: + yield server + + +@pytest.fixture +def room_name(user_id: UserID) -> SocketIORoomStr: + return SocketIORoomStr.from_user_id(user_id) + + +async def _assert_call_count(mock: AsyncMock, *, call_count: int) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), stop=stop_after_delay(5), reraise=True + ): + with attempt: + assert mock.call_count == call_count + + +def _get_mocked_disk_usage(byte_size_str: str) -> DiskUsage: + return DiskUsage( + total=TypeAdapter(ByteSize).validate_python(byte_size_str), + used=ByteSize(0), + free=TypeAdapter(ByteSize).validate_python(byte_size_str), + used_percent=0, + ) + + +def _get_on_service_disk_usage_spy( + socketio_client: socketio.AsyncClient, +) -> AsyncMock: + # emulates front-end receiving message + + async def on_service_status(data): + assert TypeAdapter(ServiceDiskUsage).validate_python(data) is not None + + on_event_spy = AsyncMock(wraps=on_service_status) + socketio_client.on(SOCKET_IO_SERVICE_DISK_USAGE_EVENT, on_event_spy) + + return on_event_spy + + +@pytest.mark.parametrize( + "usage", + [ + pytest.param({}, id="empty"), + pytest.param( + {MountPathCategory.HOST: _get_mocked_disk_usage("1kb")}, id="one_entry" + ), + pytest.param( + { + MountPathCategory.HOST: _get_mocked_disk_usage("1kb"), + MountPathCategory.STATES_VOLUMES: _get_mocked_disk_usage("2kb"), + }, + id="two_entries", + ), + ], +) +async def test_notifier_publish_disk_usage( + disk_usage_monitor: DiskUsageMonitor, + socketio_server_events: dict[str, AsyncMock], + app: FastAPI, + user_id: UserID, + usage: dict[Path, DiskUsage], + node_id: NodeID, + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], +): + # web server spy events + server_connect = socketio_server_events["connect"] + server_disconnect = socketio_server_events["disconnect"] + server_on_check = socketio_server_events["on_check"] + + async with AsyncExitStack() as socketio_frontend_clients: + frontend_clients: list[socketio.AsyncClient] = await logged_gather( + *[ + socketio_frontend_clients.enter_async_context(socketio_client_factory()) + for _ in range(_NUMBER_OF_CLIENTS) + ] + ) + await _assert_call_count(server_connect, call_count=_NUMBER_OF_CLIENTS) + + # client emits and check it was received + await logged_gather( + *[ + frontend_client.emit("check", data="an_event") + for frontend_client in frontend_clients + ] + ) + await _assert_call_count(server_on_check, call_count=_NUMBER_OF_CLIENTS) + + # attach spy to client + on_service_disk_usage_events: list[AsyncMock] = [ + _get_on_service_disk_usage_spy(c) for c in frontend_clients + ] + + # server publishes a message + await publish_disk_usage(app, user_id=user_id, node_id=node_id, usage=usage) + + # check that all clients received it + for on_service_disk_usage_event in on_service_disk_usage_events: + await _assert_call_count(on_service_disk_usage_event, call_count=1) + on_service_disk_usage_event.assert_awaited_once_with( + jsonable_encoder(ServiceDiskUsage(node_id=node_id, usage=usage)) + ) + + await _assert_call_count(server_disconnect, call_count=_NUMBER_OF_CLIENTS * 2) + + +@pytest.fixture +def port_key() -> ServicePortKey: + return TypeAdapter(ServicePortKey).validate_python("test_port") + + +def _get_on_input_port_spy( + socketio_client: socketio.AsyncClient, +) -> AsyncMock: + # emulates front-end receiving message + + async def on_service_status(data): + assert TypeAdapter(ServiceDiskUsage).validate_python(data) is not None + + on_event_spy = AsyncMock(wraps=on_service_status) + socketio_client.on(SOCKET_IO_STATE_INPUT_PORTS_EVENT, on_event_spy) + + return on_event_spy + + +@pytest.mark.parametrize("input_status", InputStatus) +async def test_notifier_send_input_port_status( + socketio_server_events: dict[str, AsyncMock], + app: FastAPI, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + port_key: ServicePortKey, + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], + input_status: InputStatus, +): + # web server spy events + server_connect = socketio_server_events["connect"] + server_disconnect = socketio_server_events["disconnect"] + server_on_check = socketio_server_events["on_check"] + + async with AsyncExitStack() as socketio_frontend_clients: + frontend_clients: list[socketio.AsyncClient] = await logged_gather( + *[ + socketio_frontend_clients.enter_async_context(socketio_client_factory()) + for _ in range(_NUMBER_OF_CLIENTS) + ] + ) + await _assert_call_count(server_connect, call_count=_NUMBER_OF_CLIENTS) + + # client emits and check it was received + await logged_gather( + *[ + frontend_client.emit("check", data="an_event") + for frontend_client in frontend_clients + ] + ) + await _assert_call_count(server_on_check, call_count=_NUMBER_OF_CLIENTS) + + # attach spy to client + on_input_port_events: list[AsyncMock] = [ + _get_on_input_port_spy(c) for c in frontend_clients + ] + + port_notifier = PortNotifier(app, user_id, project_id, node_id) + + # server publishes a message + match input_status: + case InputStatus.DOWNLOAD_STARTED: + await port_notifier.send_input_port_download_started(port_key) + case InputStatus.DOWNLOAD_WAS_ABORTED: + await port_notifier.send_input_port_download_was_aborted(port_key) + case InputStatus.DOWNLOAD_FINISHED_SUCCESSFULLY: + await port_notifier.send_input_port_download_finished_succesfully( + port_key + ) + case InputStatus.DOWNLOAD_FINISHED_WITH_ERROR: + await port_notifier.send_input_port_download_finished_with_error( + port_key + ) + + # check that all clients received it + for on_input_port_event in on_input_port_events: + await _assert_call_count(on_input_port_event, call_count=1) + on_input_port_event.assert_awaited_once_with( + jsonable_encoder( + InputPortSatus( + project_id=project_id, + node_id=node_id, + port_key=port_key, + status=input_status, + ) + ) + ) + + await _assert_call_count(server_disconnect, call_count=_NUMBER_OF_CLIENTS * 2) + + +def _get_on_output_port_spy( + socketio_client: socketio.AsyncClient, +) -> AsyncMock: + # emulates front-end receiving message + + async def on_service_status(data): + assert TypeAdapter(ServiceDiskUsage).validate_python(data) is not None + + on_event_spy = AsyncMock(wraps=on_service_status) + socketio_client.on(SOCKET_IO_STATE_OUTPUT_PORTS_EVENT, on_event_spy) + + return on_event_spy + + +@pytest.mark.parametrize("output_status", OutputStatus) +async def test_notifier_send_output_port_status( + socketio_server_events: dict[str, AsyncMock], + app: FastAPI, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + port_key: ServicePortKey, + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], + output_status: OutputStatus, +): + # web server spy events + server_connect = socketio_server_events["connect"] + server_disconnect = socketio_server_events["disconnect"] + server_on_check = socketio_server_events["on_check"] + + async with AsyncExitStack() as socketio_frontend_clients: + frontend_clients: list[socketio.AsyncClient] = await logged_gather( + *[ + socketio_frontend_clients.enter_async_context(socketio_client_factory()) + for _ in range(_NUMBER_OF_CLIENTS) + ] + ) + await _assert_call_count(server_connect, call_count=_NUMBER_OF_CLIENTS) + + # client emits and check it was received + await logged_gather( + *[ + frontend_client.emit("check", data="an_event") + for frontend_client in frontend_clients + ] + ) + await _assert_call_count(server_on_check, call_count=_NUMBER_OF_CLIENTS) + + # attach spy to client + on_output_port_events: list[AsyncMock] = [ + _get_on_output_port_spy(c) for c in frontend_clients + ] + + port_notifier = PortNotifier(app, user_id, project_id, node_id) + + # server publishes a message + match output_status: + case OutputStatus.UPLOAD_STARTED: + await port_notifier.send_output_port_upload_sarted(port_key) + case OutputStatus.UPLOAD_WAS_ABORTED: + await port_notifier.send_output_port_upload_was_aborted(port_key) + case OutputStatus.UPLOAD_FINISHED_SUCCESSFULLY: + await port_notifier.send_output_port_upload_finished_successfully( + port_key + ) + case OutputStatus.UPLOAD_FINISHED_WITH_ERROR: + await port_notifier.send_output_port_upload_finished_with_error( + port_key + ) + + # check that all clients received it + for on_output_port_event in on_output_port_events: + await _assert_call_count(on_output_port_event, call_count=1) + on_output_port_event.assert_awaited_once_with( + jsonable_encoder( + OutputPortStatus( + project_id=project_id, + node_id=node_id, + port_key=port_key, + status=output_status, + ) + ) + ) + + await _assert_call_count(server_disconnect, call_count=_NUMBER_OF_CLIENTS * 2) diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_directory_utils.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_directory_utils.py index dcd1b5b8cc5..a2ab9b9ce9f 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_directory_utils.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_directory_utils.py @@ -6,7 +6,6 @@ from uuid import uuid4 import pytest -from _pytest.fixtures import FixtureRequest from pydantic import NonNegativeInt, PositiveInt from simcore_service_dynamic_sidecar.modules.outputs._directory_utils import ( get_directory_total_size, @@ -25,12 +24,12 @@ def _create_files(path: Path, files: NonNegativeInt) -> PositiveInt: @pytest.fixture(params=[10, 100, 1000]) -def files_per_directory(request: FixtureRequest) -> NonNegativeInt: +def files_per_directory(request: pytest.FixtureRequest) -> NonNegativeInt: return request.param @pytest.fixture(params=[10, 100]) -def subdirs_per_directory(request: FixtureRequest) -> NonNegativeInt: +def subdirs_per_directory(request: pytest.FixtureRequest) -> NonNegativeInt: return request.param diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_filter.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_filter.py index ff0284cd83a..16b57b218c9 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_filter.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_filter.py @@ -2,13 +2,16 @@ # pylint:disable=unused-argument import asyncio +from collections.abc import AsyncIterator from pathlib import Path -from typing import AsyncIterator, Iterator from unittest.mock import AsyncMock import pytest -from pydantic import ByteSize, NonNegativeFloat, NonNegativeInt, parse_obj_as +from pydantic import ByteSize, NonNegativeFloat, NonNegativeInt, TypeAdapter from pytest_mock.plugin import MockerFixture +from simcore_service_dynamic_sidecar.modules.notifications._notifications_ports import ( + PortNotifier, +) from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext from simcore_service_dynamic_sidecar.modules.outputs._event_filter import ( BaseDelayPolicy, @@ -16,17 +19,17 @@ EventFilter, ) from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -_TENACITY_RETRY_PARAMS = dict( - reraise=True, - retry=retry_if_exception_type(AssertionError), - stop=stop_after_delay(10), - wait=wait_fixed(0.01), -) +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "stop": stop_after_delay(10), + "wait": wait_fixed(0.01), +} # FIXTURES @@ -56,10 +59,13 @@ async def outputs_context(outputs_path: Path, port_keys: list[str]) -> OutputsCo @pytest.fixture async def outputs_manager( - outputs_context: OutputsContext, + outputs_context: OutputsContext, port_notifier: PortNotifier ) -> AsyncIterator[OutputsManager]: outputs_manager = OutputsManager( - outputs_context=outputs_context, io_log_redirect_cb=None, progress_cb=None + outputs_context=outputs_context, + port_notifier=port_notifier, + io_log_redirect_cb=None, + progress_cb=None, ) await outputs_manager.start() yield outputs_manager @@ -69,11 +75,11 @@ async def outputs_manager( @pytest.fixture def mocked_port_key_content_changed( mocker: MockerFixture, outputs_manager: OutputsManager -) -> Iterator[AsyncMock]: +) -> AsyncMock: async def _mock_upload_outputs(*args, **kwargs) -> None: pass - yield mocker.patch.object( + return mocker.patch.object( outputs_manager, "port_key_content_changed", side_effect=_mock_upload_outputs ) @@ -95,8 +101,8 @@ def get_wait_interval(self, dir_size: NonNegativeInt) -> NonNegativeFloat: @pytest.fixture -def mock_get_directory_total_size(mocker: MockerFixture) -> Iterator[AsyncMock]: - yield mocker.patch( +def mock_get_directory_total_size(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( "simcore_service_dynamic_sidecar.modules.outputs._event_filter.get_directory_total_size", return_value=1, ) @@ -121,10 +127,6 @@ async def _wait_for_event_to_trigger(event_filter: EventFilter) -> None: await asyncio.sleep(event_filter.delay_policy.get_min_interval() * 5) -async def _wait_for_event_to_trigger_big_directory(event_filter: EventFilter) -> None: - await asyncio.sleep(event_filter.delay_policy.get_wait_interval(1) * 2) - - # TESTS @@ -164,8 +166,9 @@ async def test_always_trigger_after_delay( # event trigger after correct interval delay correctly for expected_call_count in range(1, 10): await event_filter.enqueue(port_key_1) - await _wait_for_event_to_trigger_big_directory(event_filter) - assert mocked_port_key_content_changed.call_count == expected_call_count + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + assert mocked_port_key_content_changed.call_count == expected_call_count async def test_minimum_amount_of_get_directory_total_size_calls( @@ -184,7 +187,6 @@ async def test_minimum_amount_of_get_directory_total_size_calls( assert mocked_port_key_content_changed.call_count == 0 # event finished processing and was dispatched - await _wait_for_event_to_trigger_big_directory(event_filter) async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): with attempt: assert mock_get_directory_total_size.call_count == 2 @@ -215,17 +217,18 @@ async def test_minimum_amount_of_get_directory_total_size_calls_with_continuous_ assert mocked_port_key_content_changed.call_count == 0 # event finished processing and was dispatched - await _wait_for_event_to_trigger_big_directory(event_filter) - assert mock_get_directory_total_size.call_count == 2 - assert mocked_port_key_content_changed.call_count == 1 + async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + with attempt: + assert mock_get_directory_total_size.call_count == 2 + assert mocked_port_key_content_changed.call_count == 1 def test_default_delay_policy(): wait_policy = DefaultDelayPolicy() # below items are defined by the default policy - LOWER_BOUND = parse_obj_as(ByteSize, "1mib") - UPPER_BOUND = parse_obj_as(ByteSize, "500mib") + LOWER_BOUND = TypeAdapter(ByteSize).validate_python("1mib") + UPPER_BOUND = TypeAdapter(ByteSize).validate_python("500mib") assert wait_policy.get_min_interval() == 1.0 @@ -237,4 +240,7 @@ def test_default_delay_policy(): assert wait_policy.get_wait_interval(UPPER_BOUND - 1) < 10.0 assert wait_policy.get_wait_interval(UPPER_BOUND) == 10.0 assert wait_policy.get_wait_interval(UPPER_BOUND + 1) == 10.0 - assert wait_policy.get_wait_interval(parse_obj_as(ByteSize, "1Tib")) == 10.0 + assert ( + wait_policy.get_wait_interval(TypeAdapter(ByteSize).validate_python("1Tib")) + == 10.0 + ) diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_handler.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_handler.py index 1c22150ca88..49d38d946ea 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_handler.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_event_handler.py @@ -2,20 +2,32 @@ # pylint: disable=protected-access import asyncio +from collections.abc import AsyncIterable from pathlib import Path -from typing import AsyncIterable -from unittest.mock import AsyncMock +from typing import Any, Final +from unittest.mock import Mock import aioprocessing import pytest from aioprocessing.queues import AioQueue from pydantic import PositiveFloat +from simcore_service_dynamic_sidecar.modules.notifications._notifications_ports import ( + PortNotifier, +) from simcore_service_dynamic_sidecar.modules.outputs._context import OutputsContext from simcore_service_dynamic_sidecar.modules.outputs._event_handler import ( EventHandlerObserver, _EventHandlerProcess, + _PortKeysEventHandler, ) from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager +from watchdog.events import ( + DirModifiedEvent, + FileClosedEvent, + FileCreatedEvent, + FileMovedEvent, + FileSystemEvent, +) @pytest.fixture @@ -39,14 +51,17 @@ async def outputs_context( @pytest.fixture async def outputs_manager( - outputs_context: OutputsContext, + outputs_context: OutputsContext, port_notifier: PortNotifier ) -> AsyncIterable[OutputsManager]: outputs_manager = OutputsManager( - outputs_context, io_log_redirect_cb=None, progress_cb=None + outputs_context, + port_notifier=port_notifier, + io_log_redirect_cb=None, + progress_cb=None, ) await outputs_manager.start() - outputs_manager.set_all_ports_for_upload = AsyncMock() + outputs_manager.set_all_ports_for_upload = Mock() yield outputs_manager await outputs_manager.shutdown() @@ -104,7 +119,6 @@ async def test_event_handler_observer_health_degraded( outputs_manager: OutputsManager, heart_beat_interval_s: PositiveFloat, ): - observer_monitor = EventHandlerObserver( outputs_context=outputs_context, outputs_manager=outputs_manager, @@ -119,3 +133,111 @@ async def test_event_handler_observer_health_degraded( await asyncio.sleep(observer_monitor.wait_for_heart_beat_interval_s * 3) await observer_monitor.stop() assert outputs_manager.set_all_ports_for_upload.call_count >= 1 + + +_STATE_PATH: Final[Path] = Path("/some/random/fake/path/for/state/") + + +@pytest.fixture +def mock_state_path() -> Path: + return _STATE_PATH + + +class _MockAioQueue: + def __init__(self) -> None: + self.items: list[Any] = [] + + def put(self, item: Any) -> None: + self.items.append(item) + + def get(self) -> Any | None: + try: + return self.items.pop() + except IndexError: + return None + + +@pytest.mark.parametrize( + "event, expected_port_key", + [ + pytest.param( + FileCreatedEvent(src_path=f"{_STATE_PATH}/untitled.txt", dest_path=""), + None, + id="file_create_outside", + ), + pytest.param( + FileCreatedEvent( + src_path=f"{_STATE_PATH}/output_1/untitled1.txt", + dest_path="", + ), + "output_1", + id="file_create_inside_monitored_port", + ), + pytest.param( + FileCreatedEvent( + src_path=f"{_STATE_PATH}/output_9/untitled1.txt", + dest_path="", + ), + None, + id="file_create_inside_not_monitored_port", + ), + pytest.param( + FileMovedEvent( + src_path=f"{_STATE_PATH}/untitled.txt", + dest_path=f"{_STATE_PATH}/asdsadsasad.txt", + ), + None, + id="move_outside_any_port", + ), + pytest.param( + FileMovedEvent( + src_path=f"{_STATE_PATH}/asdsadsasad.txt", + dest_path=f"{_STATE_PATH}/output_1/asdsadsasad.txt", + ), + "output_1", + id="move_to_monitored_port", + ), + pytest.param( + FileMovedEvent( + src_path=f"{_STATE_PATH}/asdsadsasad.txt", + dest_path=f"{_STATE_PATH}/output_9/asdsadsasad.txt", + ), + None, + id="move_outside_monitored_port", + ), + pytest.param( + DirModifiedEvent(src_path=f"{_STATE_PATH}/output_1", dest_path=""), + None, + id="modified_port_dir_does_nothing", + ), + pytest.param( + DirModifiedEvent(src_path=f"{_STATE_PATH}", dest_path=""), + None, + id="modified_outer_dir_does_nothing", + ), + pytest.param( + FileClosedEvent(src_path=f"{_STATE_PATH}/untitled.txt", dest_path=""), + None, + id="close_file_outside_does_nothing", + ), + pytest.param( + FileClosedEvent( + src_path=f"{_STATE_PATH}/output_1/asdsadsasad.txt", dest_path="" + ), + "output_1", + id="close_file_inside_triggers_event", + ), + ], +) +def test_port_keys_event_handler_triggers_for_events( + mock_state_path: Path, event: FileSystemEvent, expected_port_key: str | None +) -> None: + + queue = _MockAioQueue() + + event_handler = _PortKeysEventHandler(mock_state_path, queue) + event_handler.handle_set_outputs_port_keys(outputs_port_keys={"output_1"}) + event_handler.handle_toggle_event_propagation(is_enabled=True) + + event_handler.event_handler(event) + assert queue.get() == expected_port_key diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_manager.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_manager.py index 83c70d52b7d..fe106cd55c8 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_manager.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_manager.py @@ -4,31 +4,34 @@ import asyncio import inspect +from collections.abc import AsyncIterator, Iterator from dataclasses import dataclass from inspect import FullArgSpec from pathlib import Path -from typing import AsyncIterator, Iterator from unittest.mock import AsyncMock import pytest from async_asgi_testclient import TestClient from faker import Faker from fastapi import FastAPI +from models_library.services import ServiceRunID from pydantic import PositiveFloat -from pytest import FixtureRequest, MonkeyPatch from pytest_mock.plugin import MockerFixture -from pytest_simcore.helpers.utils_envs import EnvVarsDict +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict from simcore_sdk.node_ports_common.exceptions import S3TransferError from simcore_sdk.node_ports_common.file_io_utils import LogRedirectCB from simcore_service_dynamic_sidecar.core.settings import ApplicationSettings from simcore_service_dynamic_sidecar.modules.mounted_fs import MountedVolumes +from simcore_service_dynamic_sidecar.modules.notifications._notifications_ports import ( + PortNotifier, +) from simcore_service_dynamic_sidecar.modules.outputs._context import ( OutputsContext, setup_outputs_context, ) from simcore_service_dynamic_sidecar.modules.outputs._manager import ( OutputsManager, - UploadPortsFailed, + UploadPortsFailedError, _PortKeyTracker, setup_outputs_manager, ) @@ -54,7 +57,7 @@ def stop_raising_errors(self) -> None: @pytest.fixture(params=[0.01]) -def upload_duration(request: FixtureRequest) -> PositiveFloat: +def upload_duration(request: pytest.FixtureRequest) -> PositiveFloat: return request.param @@ -70,7 +73,7 @@ def mock_upload_outputs( async def _mock_upload_outputs(*args, **kwargs) -> None: await asyncio.sleep(upload_duration) - yield mocker.patch( + return mocker.patch( "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", side_effect=_mock_upload_outputs, ) @@ -82,7 +85,7 @@ async def _mock_upload_outputs(*args, **kwargs) -> None: _MockError(error_class=S3TransferError, message="mocked_s3transfererror"), ] ) -def mock_error(request: FixtureRequest) -> _MockError: +def mock_error(request: pytest.FixtureRequest) -> _MockError: return request.param @@ -107,7 +110,7 @@ async def _mock_upload_outputs(*args, **kwargs) -> None: @pytest.fixture(params=[1, 4, 10]) -def port_keys(request: FixtureRequest) -> list[str]: +def port_keys(request: pytest.FixtureRequest) -> list[str]: return [f"port_{i}" for i in range(request.param)] @@ -165,10 +168,11 @@ async def outputs_context( @pytest.fixture async def outputs_manager( - outputs_context: OutputsContext, + outputs_context: OutputsContext, port_notifier: PortNotifier ) -> AsyncIterator[OutputsManager]: outputs_manager = OutputsManager( outputs_context=outputs_context, + port_notifier=port_notifier, io_log_redirect_cb=None, task_monitor_interval_s=0.01, progress_cb=None, @@ -230,7 +234,7 @@ async def test_recovers_after_raising_error( assert await outputs_manager._port_key_tracker.no_tracked_ports() is False await asyncio.sleep(outputs_manager.task_monitor_interval_s * 10) - with pytest.raises(UploadPortsFailed) as exec_info: + with pytest.raises(UploadPortsFailedError) as exec_info: await outputs_manager.wait_for_all_uploads_to_finish() assert set(exec_info.value.failures.keys()) == set(port_keys) | set( @@ -272,7 +276,6 @@ async def test_port_key_tracker_add_pending( async def test_port_key_tracker_are_pending_ports_uploading( port_key_tracker_with_ports: _PortKeyTracker, port_keys: list[str] ): - await port_key_tracker_with_ports.move_all_ports_to_uploading() assert await port_key_tracker_with_ports.are_pending_ports_uploading() is False @@ -353,9 +356,9 @@ async def test_port_key_tracker_workflow( async def test_regression_io_log_redirect_cb( - mock_environment: EnvVarsDict, monkeypatch: MonkeyPatch, faker: Faker + mock_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch, faker: Faker ): - for mock_empty_str in { + for mock_empty_str in ( "RABBIT_HOST", "RABBIT_USER", "RABBIT_PASSWORD", @@ -363,14 +366,16 @@ async def test_regression_io_log_redirect_cb( "POSTGRES_USER", "POSTGRES_PASSWORD", "POSTGRES_DB", - }: + ): monkeypatch.setenv(mock_empty_str, "") + monkeypatch.setenv("RABBIT_SECURE", "false") mounted_volumes = MountedVolumes( - run_id=faker.uuid4(cast_to=None), + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_dynamic(), node_id=faker.uuid4(cast_to=None), inputs_path=Path("/"), outputs_path=Path("/"), + user_preferences_path=None, state_paths=[], state_exclude=set(), compose_namespace="", @@ -389,7 +394,6 @@ async def test_regression_io_log_redirect_cb( setup_outputs_manager(app) async with TestClient(app): # runs setup handlers - outputs_manager: OutputsManager = app.state.outputs_manager assert outputs_manager.io_log_redirect_cb is not None @@ -397,22 +401,23 @@ async def test_regression_io_log_redirect_cb( assert inspect.getfullargspec( outputs_manager.io_log_redirect_cb.func ) == FullArgSpec( - args=["app", "logs"], + args=["app", "log"], varargs=None, varkw=None, defaults=None, - kwonlyargs=[], + kwonlyargs=["log_level"], kwonlydefaults=None, annotations={ "return": None, "app": FastAPI, - "logs": str, + "log": str, + "log_level": int, }, ) # ensure logger used in nodeports deos not change assert inspect.getfullargspec(LogRedirectCB.__call__) == FullArgSpec( - args=["self", "logs"], + args=["self", "log"], varargs=None, varkw=None, defaults=None, @@ -420,6 +425,6 @@ async def test_regression_io_log_redirect_cb( kwonlydefaults=None, annotations={ "return": None, - "logs": str, + "log": str, }, ) diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_watchdog_extensions.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_watchdog_extensions.py index 27682a53699..7456d5d77e8 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_watchdog_extensions.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_watchdog_extensions.py @@ -2,7 +2,7 @@ import asyncio from pathlib import Path -from unittest.mock import AsyncMock +from unittest.mock import Mock from uuid import uuid4 import pytest @@ -34,7 +34,7 @@ async def test_regression_watchdog_blocks_on_handler_error( path_to_observe: Path, fail_once: bool ): raised_error = False - event_handler = AsyncMock() + event_handler = Mock() class MockedEventHandler(FileSystemEventHandler): def on_any_event(self, event: FileSystemEvent) -> None: @@ -43,7 +43,8 @@ def on_any_event(self, event: FileSystemEvent) -> None: nonlocal raised_error if not raised_error and fail_once: raised_error = True - raise RuntimeError("raised as expected") + msg = "raised as expected" + raise RuntimeError(msg) observer = ExtendedInotifyObserver() observer.schedule( @@ -75,7 +76,8 @@ async def test_safe_file_system_event_handler( class MockedEventHandler(SafeFileSystemEventHandler): def event_handler(self, _: FileSystemEvent) -> None: if user_code_raises_error: - raise RuntimeError("error was raised") + msg = "error was raised" + raise RuntimeError(msg) mocked_handler = MockedEventHandler() mocked_handler.on_any_event(mocked_file_system_event) diff --git a/services/dynamic-sidecar/tests/unit/test_modules_outputs_watcher.py b/services/dynamic-sidecar/tests/unit/test_modules_outputs_watcher.py index 61ce2893788..eeea009cc32 100644 --- a/services/dynamic-sidecar/tests/unit/test_modules_outputs_watcher.py +++ b/services/dynamic-sidecar/tests/unit/test_modules_outputs_watcher.py @@ -3,29 +3,32 @@ # pylint: disable=unused-argument import asyncio -from collections import deque +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Iterator from dataclasses import dataclass from pathlib import Path from random import randbytes, shuffle from shutil import move, rmtree from threading import Thread -from typing import AsyncIterable, AsyncIterator, Awaitable, Final, Iterator, Optional +from typing import Final from unittest.mock import AsyncMock import aiofiles import pytest from aiofiles import os from faker import Faker +from models_library.services import ServiceRunID from pydantic import ( ByteSize, NonNegativeFloat, NonNegativeInt, PositiveFloat, - parse_obj_as, + TypeAdapter, ) -from pytest import FixtureRequest from pytest_mock import MockerFixture from simcore_service_dynamic_sidecar.modules.mounted_fs import MountedVolumes +from simcore_service_dynamic_sidecar.modules.notifications._notifications_ports import ( + PortNotifier, +) from simcore_service_dynamic_sidecar.modules.outputs import ( _watcher as outputs_watcher_core, ) @@ -38,17 +41,16 @@ ) from simcore_service_dynamic_sidecar.modules.outputs._manager import OutputsManager from simcore_service_dynamic_sidecar.modules.outputs._watcher import OutputsWatcher -from tenacity._asyncio import AsyncRetrying +from tenacity.asyncio import AsyncRetrying from tenacity.retry import retry_if_exception_type from tenacity.stop import stop_after_delay from tenacity.wait import wait_fixed -_TENACITY_RETRY_PARAMS = dict( - reraise=True, - retry=retry_if_exception_type(AssertionError), - stop=stop_after_delay(10), - wait=wait_fixed(0.01), -) +_TENACITY_RETRY_PARAMS = { + "reraise": True, + "retry": retry_if_exception_type(AssertionError), + "wait": wait_fixed(0.01), +} TICK_INTERVAL: Final[PositiveFloat] = 0.001 WAIT_INTERVAL: Final[PositiveFloat] = TICK_INTERVAL * 10 @@ -61,10 +63,11 @@ @pytest.fixture def mounted_volumes(faker: Faker, tmp_path: Path) -> Iterator[MountedVolumes]: mounted_volumes = MountedVolumes( - run_id=faker.uuid4(cast_to=None), + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_dynamic(), node_id=faker.uuid4(cast_to=None), inputs_path=tmp_path / "inputs", outputs_path=tmp_path / "outputs", + user_preferences_path=None, state_paths=[], state_exclude=set(), compose_namespace="", @@ -90,10 +93,11 @@ async def outputs_context( @pytest.fixture async def outputs_manager( - outputs_context: OutputsContext, + outputs_context: OutputsContext, port_notifier: PortNotifier ) -> AsyncIterable[OutputsManager]: outputs_manager = OutputsManager( outputs_context=outputs_context, + port_notifier=port_notifier, io_log_redirect_cb=None, task_monitor_interval_s=TICK_INTERVAL, progress_cb=None, @@ -122,11 +126,11 @@ async def outputs_watcher( def mock_event_filter_upload_trigger( mocker: MockerFixture, outputs_watcher: OutputsWatcher, -) -> Iterator[AsyncMock]: +) -> AsyncMock: mock_enqueue = AsyncMock(return_value=None) mocker.patch.object( - outputs_watcher._event_filter.outputs_manager, + outputs_watcher._event_filter.outputs_manager, # noqa: SLF001 "port_key_content_changed", mock_enqueue, ) @@ -135,12 +139,12 @@ class FastDelayPolicy(BaseDelayPolicy): def get_min_interval(self) -> NonNegativeFloat: return WAIT_INTERVAL - def get_wait_interval(self, dir_size: NonNegativeInt) -> NonNegativeFloat: + def get_wait_interval(self, _: NonNegativeInt) -> NonNegativeFloat: return WAIT_INTERVAL - outputs_watcher._event_filter.delay_policy = FastDelayPolicy() + outputs_watcher._event_filter.delay_policy = FastDelayPolicy() # noqa: SLF001 - yield mock_enqueue + return mock_enqueue @pytest.fixture @@ -148,14 +152,14 @@ def mock_long_running_upload_outputs(mocker: MockerFixture) -> Iterator[AsyncMoc async def mock_upload_outputs(*args, **kwargs) -> None: await asyncio.sleep(UPLOAD_DURATION) - yield mocker.patch( + return mocker.patch( "simcore_service_dynamic_sidecar.modules.outputs._manager.upload_outputs", - sire_effect=mock_upload_outputs, + side_effect=mock_upload_outputs, ) @pytest.fixture(params=[1, 2, 4]) -def files_per_port_key(request: FixtureRequest) -> NonNegativeInt: +def files_per_port_key(request: pytest.FixtureRequest) -> NonNegativeInt: return request.param @@ -168,31 +172,31 @@ class FileGenerationInfo: @pytest.fixture( params=[ FileGenerationInfo( - size=parse_obj_as(ByteSize, "100b"), - chunk_size=parse_obj_as(ByteSize, "1b"), + size=TypeAdapter(ByteSize).validate_python("100b"), + chunk_size=TypeAdapter(ByteSize).validate_python("1b"), ), FileGenerationInfo( - size=parse_obj_as(ByteSize, "100kib"), - chunk_size=parse_obj_as(ByteSize, "1kib"), + size=TypeAdapter(ByteSize).validate_python("100kib"), + chunk_size=TypeAdapter(ByteSize).validate_python("1kib"), ), FileGenerationInfo( - size=parse_obj_as(ByteSize, "100mib"), - chunk_size=parse_obj_as(ByteSize, "1mib"), + size=TypeAdapter(ByteSize).validate_python("100mib"), + chunk_size=TypeAdapter(ByteSize).validate_python("1mib"), ), FileGenerationInfo( - size=parse_obj_as(ByteSize, "100mib"), - chunk_size=parse_obj_as(ByteSize, "10mib"), + size=TypeAdapter(ByteSize).validate_python("100mib"), + chunk_size=TypeAdapter(ByteSize).validate_python("10mib"), ), ] ) -def file_generation_info(request: FixtureRequest) -> FileGenerationInfo: +def file_generation_info(request: pytest.FixtureRequest) -> FileGenerationInfo: return request.param # UTILS -async def random_events_in_path( +async def random_events_in_path( # noqa: C901 *, port_key_path: Path, files_per_port_key: NonNegativeInt, @@ -235,21 +239,23 @@ async def _remove_file(file_path: Path) -> None: await os.remove(file_path) assert file_path.exists() is False - event_awaitables: deque[Awaitable] = deque() - - for i in range(empty_files): - event_awaitables.append(_empty_file(port_key_path / f"empty_file_{i}")) - for i in range(moved_files): - event_awaitables.append(_move_existing_file(port_key_path / f"moved_file_{i}")) - for i in range(removed_files): - event_awaitables.append(_remove_file(port_key_path / f"removed_file_{i}")) - - for i in range(files_per_port_key): - event_awaitables.append( + event_awaitables: list[Awaitable] = [ + *(_empty_file(port_key_path / f"empty_file_{i}") for i in range(empty_files)), + *( + _move_existing_file(port_key_path / f"moved_file_{i}") + for i in range(moved_files) + ), + *( + _remove_file(port_key_path / f"removed_file_{i}") + for i in range(removed_files) + ), + *( _random_file( port_key_path / f"big_file{i}", size=size, chunk_size=chunk_size ) - ) + for i in range(files_per_port_key) + ), + ] shuffle(event_awaitables) # NOTE: wait for events to be generated events in sequence @@ -258,9 +264,7 @@ async def _remove_file(file_path: Path) -> None: await awaitable -async def _generate_event_burst( - tmp_path: Path, subfolder: Optional[str] = None -) -> None: +async def _generate_event_burst(tmp_path: Path, subfolder: str | None = None) -> None: def _worker(): full_dir_path = tmp_path if subfolder is None else tmp_path / subfolder full_dir_path.mkdir(parents=True, exist_ok=True) @@ -288,11 +292,14 @@ async def _wait_for_events_to_trigger() -> None: await asyncio.sleep(event_wait_interval) +@pytest.mark.flaky(max_runs=3) async def test_run_observer( mock_event_filter_upload_trigger: AsyncMock, outputs_watcher: OutputsWatcher, port_keys: list[str], ) -> None: + await outputs_watcher.enable_event_propagation() + # generates the first event chain await _generate_event_burst( outputs_watcher.outputs_context.outputs_path, port_keys[0] @@ -315,6 +322,7 @@ async def test_does_not_trigger_on_attribute_change( outputs_watcher: OutputsWatcher, ): await _wait_for_events_to_trigger() + await outputs_watcher.enable_event_propagation() # crate a file in the directory mounted_volumes.disk_outputs_path.mkdir(parents=True, exist_ok=True) @@ -342,8 +350,10 @@ async def test_port_key_sequential_event_generation( file_generation_info: FileGenerationInfo, port_keys: list[str], ): + await outputs_watcher.enable_event_propagation() + # writing ports sequentially - wait_interval_for_port: deque[float] = deque() + wait_interval_for_port: list[float] = [] for port_key in port_keys: port_dir = mounted_volumes.disk_outputs_path / port_key port_dir.mkdir(parents=True, exist_ok=True) @@ -354,20 +364,26 @@ async def test_port_key_sequential_event_generation( chunk_size=file_generation_info.chunk_size, ) wait_interval_for_port.append( - outputs_watcher._event_filter.delay_policy.get_wait_interval( + outputs_watcher._event_filter.delay_policy.get_wait_interval( # noqa: SLF001 get_directory_total_size(port_dir) ) ) - # Waiting for events o finish propagation and changes to be uploaded + # Waiting for events to finish propagation and changes to be uploaded MARGIN_FOR_ALL_EVENT_PROCESSORS_TO_TRIGGER = 1 - sleep_for = max(wait_interval_for_port) + MARGIN_FOR_ALL_EVENT_PROCESSORS_TO_TRIGGER - print(f"Waiting {sleep_for} seconds for events to be processed") - await asyncio.sleep(sleep_for) - - assert mock_long_running_upload_outputs.call_count > 0 + sleep_for = max( + max(wait_interval_for_port) + MARGIN_FOR_ALL_EVENT_PROCESSORS_TO_TRIGGER, 3 + ) + print(f"max {sleep_for=} interval") + async for attempt in AsyncRetrying( + **_TENACITY_RETRY_PARAMS, stop=stop_after_delay(sleep_for) + ): + with attempt: + assert mock_long_running_upload_outputs.call_count > 0 - async for attempt in AsyncRetrying(**_TENACITY_RETRY_PARAMS): + async for attempt in AsyncRetrying( + **_TENACITY_RETRY_PARAMS, stop=stop_after_delay(10) + ): with attempt: uploaded_port_keys: set[str] = set() for call_args in mock_long_running_upload_outputs.call_args_list: diff --git a/services/dynamic-sidecar/tests/unit/test_modules_prometheus_metrics.py b/services/dynamic-sidecar/tests/unit/test_modules_prometheus_metrics.py new file mode 100644 index 00000000000..91b91d7a947 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_prometheus_metrics.py @@ -0,0 +1,47 @@ +from collections.abc import Sequence +from datetime import datetime, timedelta +from typing import Final + +import arrow +import pytest +from pydantic import NonNegativeFloat +from simcore_service_dynamic_sidecar.modules.prometheus_metrics import ( + _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, + _MAX_PROMETHEUS_SAMPLES, + _get_user_services_scrape_interval, +) + +_DT_REF: Final[datetime] = arrow.utcnow().datetime + + +@pytest.mark.parametrize( + "input_query_times, expected", + [ + pytest.param( + [], _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, id="no_prometheus_queries" + ), + pytest.param( + [_DT_REF], + _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, + id="too_few_prometheus_queries", + ), + ([_DT_REF, _DT_REF + timedelta(seconds=5)], 5), + pytest.param( + [_DT_REF, _DT_REF + timedelta(seconds=1000)], + _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, + id="prometheus_queries_too_far_apart", + ), + pytest.param( + [ + _DT_REF + timedelta(seconds=i * 3) + for i in range(_MAX_PROMETHEUS_SAMPLES) + ], + 3, + id="average_over_prometheus_queries", + ), + ], +) +def test_get_user_services_scrape_interval( + input_query_times: Sequence[datetime], expected: NonNegativeFloat +): + assert _get_user_services_scrape_interval(input_query_times) == expected diff --git a/services/dynamic-sidecar/tests/unit/test_modules_system_monitor__disk_usage.py b/services/dynamic-sidecar/tests/unit/test_modules_system_monitor__disk_usage.py new file mode 100644 index 00000000000..5cac0f59934 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_system_monitor__disk_usage.py @@ -0,0 +1,251 @@ +# pylint:disable=protected-access +# pylint:disable=redefined-outer-name +# pylint:disable=unused-argument + +from collections.abc import Callable +from datetime import timedelta +from pathlib import Path +from unittest.mock import AsyncMock, Mock + +import pytest +from common_library.json_serialization import json_dumps +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import ( + DiskUsage, + MountPathCategory, +) +from models_library.projects_nodes_io import NodeID +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from psutil._common import sdiskusage +from pydantic import ByteSize, TypeAdapter +from pytest_mock import MockerFixture +from simcore_service_dynamic_sidecar.modules.mounted_fs import MountedVolumes +from simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage import ( + DiskUsageMonitor, + _get_monitored_paths, + get_usage, +) + + +@pytest.fixture +def dy_volumes(tmp_path: Path) -> Path: + return tmp_path + + +@pytest.fixture +def get_monitored_paths( + dy_volumes: Path, node_id: NodeID +) -> Callable[[Path, Path, list[Path]], dict[MountPathCategory, set[Path]]]: + def _( + inputs: Path, outputs: Path, states: list[Path] + ) -> dict[MountPathCategory, set[Path]]: + mounted_volumes = MountedVolumes( + service_run_id=ServiceRunID.get_resource_tracking_run_id_for_dynamic(), + node_id=node_id, + inputs_path=dy_volumes / inputs, + outputs_path=dy_volumes / outputs, + user_preferences_path=None, + state_paths=[dy_volumes / x for x in states], + state_exclude=set(), + compose_namespace="", + dy_volumes=dy_volumes, + ) + app = Mock() + app.state = Mock() + app.state.mounted_volumes = mounted_volumes + return _get_monitored_paths(app) + + return _ + + +@pytest.fixture +def mock_disk_usage(mocker: MockerFixture) -> Callable[[dict[str, ByteSize]], None]: + base_module = "simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage" + + def _(free: dict[str, ByteSize]) -> None: + def _disk_usage(path: str) -> sdiskusage: + return sdiskusage(total=0, used=0, free=free[path], percent=0) + + mocker.patch(f"{base_module}.psutil.disk_usage", side_effect=_disk_usage) + + return _ + + +@pytest.fixture +def publish_disk_usage_spy(mocker: MockerFixture) -> Mock: + mock = Mock() + + def __publish_disk_usage( + app: FastAPI, + *, + user_id: UserID, + node_id: NodeID, + usage: dict[Path, DiskUsage], + ) -> None: + mock(usage) + + mocker.patch( + "simcore_service_dynamic_sidecar.modules.system_monitor._disk_usage.publish_disk_usage", + side_effect=__publish_disk_usage, + ) + return mock + + +def _get_entry(mock: Mock, *, index: int) -> dict[Path, DiskUsage]: + return mock.call_args_list[index].args[0] + + +def _get_byte_size(byte_size_as_str: str) -> ByteSize: + return TypeAdapter(ByteSize).validate_python(byte_size_as_str) + + +def _get_mocked_disk_usage(byte_size_as_str: str) -> DiskUsage: + bytes_size = _get_byte_size(byte_size_as_str) + return DiskUsage( + total=bytes_size, used=ByteSize(0), free=bytes_size, used_percent=0 + ) + + +async def _assert_monitor_triggers( + disk_usage_monitor: DiskUsageMonitor, + publish_disk_usage_spy: Mock, + *, + expected_events: int, + monitor_call_count: int = 10, +) -> None: + for _ in range(monitor_call_count): + # regardless of how many times it's called only generates 1 publish event + await disk_usage_monitor._monitor() # pylint:disable=protected-access # noqa: SLF001 + assert len(publish_disk_usage_spy.call_args_list) == expected_events + + +async def test_disk_usage_monitor( + mock_disk_usage: Callable[[dict[str, ByteSize]], None], + get_monitored_paths: Callable[ + [Path, Path, list[Path]], dict[MountPathCategory, set[Path]] + ], + dy_volumes: Path, + publish_disk_usage_spy: Mock, + node_id: NodeID, +) -> None: + disk_usage_monitor = DiskUsageMonitor( + app=AsyncMock(), + user_id=1, + node_id=node_id, + interval=timedelta(seconds=5), + monitored_paths=get_monitored_paths( + Path("/inputs"), Path("/outputs"), [Path("/workspace")] + ), + dy_volumes_mount_dir=dy_volumes, + ) + + assert len(publish_disk_usage_spy.call_args_list) == 0 + + for i in range(1, 3): + mock_disk_usage( + { + f"{p}": _get_byte_size(f"{i*2}kb") + for p in disk_usage_monitor._monitored_paths_set # noqa: SLF001 + }, + ) + + await _assert_monitor_triggers( + disk_usage_monitor, publish_disk_usage_spy, expected_events=1 + ) + + assert _get_entry(publish_disk_usage_spy, index=0) == { + MountPathCategory.HOST: _get_mocked_disk_usage(f"{i*2}kb"), + } + + # reset mock to test again + publish_disk_usage_spy.reset_mock() + + +def _random_tmp_file(tmp_path: Path, faker: Faker) -> None: + some_path: Path = tmp_path / f"{faker.uuid4()}" + some_path.write_text("some text here") + + +async def test_get_usage(tmp_path: Path, faker: Faker): + usage_before = await get_usage(Path("/")) + _random_tmp_file(tmp_path, faker) + usage_after = await get_usage(Path("/")) + + assert usage_after.free < usage_before.free + + +async def test_disk_usage_monitor_new_frontend_format( + mock_disk_usage: Callable[[dict[str, ByteSize]], None], + get_monitored_paths: Callable[ + [Path, Path, list[Path]], dict[MountPathCategory, set[Path]] + ], + publish_disk_usage_spy: Mock, + node_id: NodeID, + dy_volumes: Path, +) -> None: + disk_usage_monitor = DiskUsageMonitor( + app=AsyncMock(), + user_id=1, + node_id=node_id, + interval=timedelta(seconds=5), + monitored_paths=get_monitored_paths( + Path("/home/user/inputs"), + Path("/home/user/outputs"), + [Path("/home/user/workspace"), Path("/.data/assets")], + ), + dy_volumes_mount_dir=dy_volumes, + ) + + mock_disk_usage( + { + f"{p}": ByteSize(1294390525952) + for p in disk_usage_monitor._monitored_paths_set # noqa: SLF001 + }, + ) + + async def _wait_for_usage() -> dict[str, DiskUsage]: + publish_disk_usage_spy.reset_mock() + await disk_usage_monitor._monitor() # noqa: SLF001 + publish_disk_usage_spy.assert_called() + return publish_disk_usage_spy.call_args_list[0][0][0] + + # normally only 1 entry is found + frontend_usage = await _wait_for_usage() + print(json_dumps(frontend_usage, indent=2)) + assert len(frontend_usage) == 1 + assert MountPathCategory.HOST in frontend_usage + assert frontend_usage[MountPathCategory.HOST] == _get_mocked_disk_usage( + "1294390525952" + ) + + # emulate EFS sending metrics, 2 entries are found + + disk_usage_monitor.set_disk_usage_for_path( + { + ".data_assets": _get_mocked_disk_usage("1GB"), + "home_user_workspace": _get_mocked_disk_usage("1GB"), + } + ) + + frontend_usage = await _wait_for_usage() + print(json_dumps(frontend_usage, indent=2)) + assert len(frontend_usage) == 2 + assert MountPathCategory.HOST in frontend_usage + assert MountPathCategory.STATES_VOLUMES in frontend_usage + assert frontend_usage[MountPathCategory.HOST] == _get_mocked_disk_usage( + "1294390525952" + ) + assert frontend_usage[MountPathCategory.STATES_VOLUMES] == _get_mocked_disk_usage( + "1GB" + ) + + # emulate data could not be mapped + disk_usage_monitor.set_disk_usage_for_path( + { + "missing_path": _get_mocked_disk_usage("2GB"), + } + ) + with pytest.raises(RuntimeError): + frontend_usage = await _wait_for_usage() diff --git a/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_packaging.py b/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_packaging.py new file mode 100644 index 00000000000..fb181d6512b --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_packaging.py @@ -0,0 +1,87 @@ +# pylint: disable=redefined-outer-name + +import filecmp +from pathlib import Path + +import pytest +from simcore_service_dynamic_sidecar.modules.user_services_preferences._errors import ( + DestinationIsNotADirectoryError, + PreferencesAreTooBigError, +) +from simcore_service_dynamic_sidecar.modules.user_services_preferences._packaging import ( + dir_from_bytes, + dir_to_bytes, +) + + +def _make_dir(tmp_path: Path, dir_name: str) -> Path: + target_path = tmp_path / dir_name + target_path.mkdir(parents=True, exist_ok=True) + assert target_path.is_dir() + assert len(list(target_path.rglob("*"))) == 0 + return target_path + + +def _get_relative_file_paths(path: Path) -> set[Path]: + return {x.relative_to(path) for x in path.rglob("*") if x.is_file()} + + +def assert_same_files(dir1: Path, dir2: Path): + assert dir1.is_dir() + assert dir2.is_dir() + + files_in_dir1 = _get_relative_file_paths(dir1) + files_in_dir2 = _get_relative_file_paths(dir2) + + assert files_in_dir1 == files_in_dir2 + + for file_name in files_in_dir1: + assert filecmp.cmp(dir1 / file_name, dir2 / file_name) + + +@pytest.fixture +def source_path(tmp_path: Path) -> Path: + return _make_dir(tmp_path, "soruce_path") + + +@pytest.fixture +def from_bytes_path(tmp_path: Path) -> Path: + return _make_dir(tmp_path, "from_bytes_path") + + +def add_files_in_dir(path: Path, file_count: int, subdirs_count: int) -> None: + assert subdirs_count > 0 + path.mkdir(parents=True, exist_ok=True) + for s in range(subdirs_count): + (path / f"subdir{s}").mkdir(parents=True, exist_ok=True) + for f in range(file_count): + (path / f"subdir{s}" / f"f{f}.txt").write_text(f"{f} and some text") + + +async def test_workflow(source_path: Path, from_bytes_path: Path): + add_files_in_dir(source_path, file_count=10, subdirs_count=1) + + payload = await dir_to_bytes(source_path) + assert len(payload) > 0 + + await dir_from_bytes(payload, from_bytes_path) + + assert_same_files(source_path, from_bytes_path) + + +async def test_dir_to_bytes_too_big(source_path: Path): + add_files_in_dir(source_path, file_count=500, subdirs_count=10) + + with pytest.raises(PreferencesAreTooBigError): + await dir_to_bytes(source_path) + + +async def test_destination_is_not_a_directory(tmp_path: Path): + a_file = tmp_path / "a_file" + a_file.write_text("a") + + with pytest.raises(DestinationIsNotADirectoryError): + await dir_to_bytes(a_file) + + with pytest.raises(DestinationIsNotADirectoryError): + await dir_from_bytes(b"", a_file) diff --git a/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_user_preference.py b/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_user_preference.py new file mode 100644 index 00000000000..9496bfceb78 --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_modules_user_services_preferences_user_preference.py @@ -0,0 +1,21 @@ +# pylint: disable=no-name-in-module +# pylint: disable=redefined-outer-name + +import pytest +from models_library.services import ServiceKey +from pydantic import TypeAdapter +from simcore_service_dynamic_sidecar.modules.user_services_preferences._user_preference import ( + get_model_class, +) + + +@pytest.fixture +def service_key() -> ServiceKey: + return TypeAdapter(ServiceKey).validate_python( + "simcore/services/dynamic/test-service-34" + ) + + +def test_get_model_class_only_defined_once(service_key: ServiceKey): + for _ in range(10): + get_model_class(service_key) diff --git a/services/dynamic-sidecar/tests/unit/test_service_liveness.py b/services/dynamic-sidecar/tests/unit/test_service_liveness.py new file mode 100644 index 00000000000..5be601ebbac --- /dev/null +++ b/services/dynamic-sidecar/tests/unit/test_service_liveness.py @@ -0,0 +1,57 @@ +# pylint:disable=redefined-outer-name + +from datetime import timedelta + +import pytest +from simcore_service_dynamic_sidecar.modules.service_liveness import ( + CouldNotReachServiceError, + wait_for_service_liveness, +) + + +@pytest.fixture +def check_interval() -> timedelta: + return timedelta(seconds=0.1) + + +@pytest.fixture +def timeout() -> timedelta: + return timedelta(seconds=1) + + +@pytest.mark.parametrize("handler_return", [None, True]) +async def test_wait_for_service_liveness_ok( + check_interval: timedelta, timeout: timedelta, handler_return: bool | None +): + async def _ok_handler() -> bool | None: + return handler_return + + await wait_for_service_liveness( + _ok_handler, + service_name="test_service", + endpoint="http://fake.endpoint_string", + check_interval=check_interval, + timeout=timeout, + ) + + +@pytest.mark.parametrize("handler_return", [Exception("Ohh no, I failed!"), False]) +async def test_wait_for_service_liveness_fails( + check_interval: timedelta, + timeout: timedelta, + handler_return: bool | type[Exception], +): + async def _failing_handler() -> bool: + if isinstance(handler_return, bool): + return handler_return + raise handler_return + + with pytest.raises(CouldNotReachServiceError) as exc_info: + await wait_for_service_liveness( + _failing_handler, + service_name="test_service", + endpoint="http://fake.endpoint_string", + check_interval=check_interval, + timeout=timeout, + ) + assert "Could not contact service" in f"{exc_info.value}" diff --git a/services/efs-guardian/Dockerfile b/services/efs-guardian/Dockerfile new file mode 100644 index 00000000000..af76f74dd70 --- /dev/null +++ b/services/efs-guardian/Dockerfile @@ -0,0 +1,218 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd sercices/efs-guardian +# docker build -f Dockerfile -t efs-guardian:prod --target production ../../ +# docker run efs-guardian:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=sanderegg + +# NOTE: to list the latest version run `make` inside `scripts/apt-packages-versions` +ENV DOCKER_APT_VERSION="5:26.1.4-1~debian.12~bookworm" + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + gosu \ + ca-certificates \ + curl \ + gnupg \ + lsb-release \ + && mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + # only the cli is needed and we remove the unnecessary stuff again + docker-ce-cli=${DOCKER_APT_VERSION} \ + && apt-get remove -y\ + gnupg \ + curl \ + lsb-release \ + && apt-get clean -y\ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +# simcore-efs-user uid=8006(efs) gid=8006(efs) groups=8006(efs) +# Currently all simcore services run as user 8004. The Guardian needs to run as a different user 8006. +# The Guardian has access to the root directory of the EFS distributed file system +# and can change the file permissions of user 8004 if needed. +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + EFS_USER_ID=8006 \ + EFS_USER_NAME=efs \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid 8004 \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/scu \ + scu + +RUN adduser \ + --uid 8006 \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/efs \ + efs + +# we create efs-group which is then used in efs guardian when he is creating a directory for user services. +RUN groupadd -g 8106 efs-group +RUN usermod -a -G efs-group efs +RUN usermod -a -G efs-group scu + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/efs/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +EXPOSE 8000 +EXPOSE 3000 + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/efs-guardian [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/efs-guardian + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/efs-guardian,target=/build/services/efs-guardian,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/efs-guardian [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/efs + +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY services/efs-guardian/docker services/efs-guardian/docker +RUN chmod +x services/efs-guardian/docker/*.sh + + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/efs-guardian/docker/healthcheck.py", "http://localhost:8000/"] + +ENTRYPOINT [ "/bin/sh", "services/efs-guardian/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/efs-guardian/docker/boot.sh"] + + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/efs-guardian + +WORKDIR /devel + +RUN chown -R root:root "${VIRTUAL_ENV}" + +ENTRYPOINT ["/bin/sh", "services/efs-guardian/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/efs-guardian/docker/boot.sh"] diff --git a/services/efs-guardian/Makefile b/services/efs-guardian/Makefile new file mode 100644 index 00000000000..af13c225526 --- /dev/null +++ b/services/efs-guardian/Makefile @@ -0,0 +1,5 @@ +# +# DEVELOPMENT recipes for efs-guardian service +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile diff --git a/services/efs-guardian/README.md b/services/efs-guardian/README.md new file mode 100644 index 00000000000..503bdb93b1b --- /dev/null +++ b/services/efs-guardian/README.md @@ -0,0 +1,4 @@ +# efs-guardian + + +Service to monitor and manage elastic file system diff --git a/services/efs-guardian/VERSION b/services/efs-guardian/VERSION new file mode 100644 index 00000000000..3eefcb9dd5b --- /dev/null +++ b/services/efs-guardian/VERSION @@ -0,0 +1 @@ +1.0.0 diff --git a/services/efs-guardian/docker/boot.sh b/services/efs-guardian/docker/boot.sh new file mode 100755 index 00000000000..862a3456b26 --- /dev/null +++ b/services/efs-guardian/docker/boot.sh @@ -0,0 +1,66 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/efs-guardian + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${EFS_GUARDIAN_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/efs-guardian/src/simcore_service_efs_guardian && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${EFS_GUARDIAN_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_efs_guardian.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" +fi diff --git a/services/efs-guardian/docker/entrypoint.sh b/services/efs-guardian/docker/entrypoint.sh new file mode 100755 index 00000000000..d8ddf1c826a --- /dev/null +++ b/services/efs-guardian/docker/entrypoint.sh @@ -0,0 +1,94 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +# Read self-signed SSH certificates (if applicable) +# +# In case efs-guardian must access a docker registry in a secure way using +# non-standard certificates (e.g. such as self-signed certificates), this call is needed. +# It needs to be executed as root. Also required to any access for example to secure rabbitmq. +update-ca-certificates + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $EFS_USER_NAME to root..." + adduser "$EFS_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $EFS_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$EFS_USER_NAME" + CONT_GROUPNAME=grp$EFS_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $EFS_USER_NAME to group $CONT_GROUPNAME..." + adduser "$EFS_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $EFS_USER_NAME:$EFS_USER_NAME ($EFS_USER_ID:$EFS_USER_ID) to $EFS_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$EFS_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $EFS_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$EFS_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $EFS_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$EFS_USER_ID" -exec chown --no-dereference "$EFS_USER_NAME" {} \; + fi +fi + + +# Appends docker group if socket is mounted +DOCKER_MOUNT=/var/run/docker.sock +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker + + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$EFS_USER_NAME" "$GROUPNAME" +fi + +echo "$INFO Starting $* ..." +echo " $(whoami) rights : $(id $whoami))" +echo " local dir : $(ls -al)" + +exec "$@" diff --git a/services/efs-guardian/docker/healthcheck.py b/services/efs-guardian/docker/healthcheck.py new file mode 100755 index 00000000000..cb51ed2399e --- /dev/null +++ b/services/efs-guardian/docker/healthcheck.py @@ -0,0 +1,42 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=20s \ + --start-interval=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + +# Adds a base-path if defined in environ +SIMCORE_NODE_BASEPATH = os.environ.get("SIMCORE_NODE_BASEPATH", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception): + with urlopen(f"{sys.argv[1]}{SIMCORE_NODE_BASEPATH}") as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/efs-guardian/requirements/Makefile b/services/efs-guardian/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/efs-guardian/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/efs-guardian/requirements/_base.in b/services/efs-guardian/requirements/_base.in new file mode 100644 index 00000000000..27f56ac4e04 --- /dev/null +++ b/services/efs-guardian/requirements/_base.in @@ -0,0 +1,19 @@ +# +# Specifies third-party dependencies for 'services/efs-guardian/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/aws-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + +packaging diff --git a/services/efs-guardian/requirements/_base.txt b/services/efs-guardian/requirements/_base.txt new file mode 100644 index 00000000000..3e5ca734e96 --- /dev/null +++ b/services/efs-guardian/requirements/_base.txt @@ -0,0 +1,899 @@ +aio-pika==9.4.3 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aioboto3==14.3.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +aiobotocore==2.22.0 + # via aioboto3 +aiocache==0.12.3 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.23.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aioboto3 +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiobotocore + # aiodocker +aioitertools==0.12.0 + # via aiobotocore +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.1 + # via aiohttp +alembic==1.13.3 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +anyio==4.6.2.post1 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +async-timeout==4.0.3 + # via asyncpg +asyncpg==0.29.0 + # via sqlalchemy +attrs==24.2.0 + # via + # aiohttp + # jsonschema + # referencing +boto3==1.37.3 + # via aiobotocore +botocore==1.37.3 + # via + # aiobotocore + # boto3 + # s3transfer +botocore-stubs==1.35.43 + # via types-aiobotocore +certifi==2024.8.30 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.0 + # via requests +click==8.1.7 + # via + # rich-toolkit + # typer + # uvicorn +deprecated==1.2.14 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via + # fastapi + # pydantic +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.31 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.65.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 + # via sqlalchemy +grpcio==1.67.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.6 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.27.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +importlib-metadata==8.4.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jmespath==1.0.1 + # via + # aiobotocore + # boto3 + # botocore +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2023.7.1 + # via jsonschema +mako==1.3.5 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiobotocore + # aiohttp + # yarl +opentelemetry-api==1.27.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-propagator-aws-xray + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.27.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.27.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.27.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.27.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.48b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.48b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.48b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.48b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-botocore==0.48b0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.48b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.48b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.48b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.48b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.48b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-propagator-aws-xray==1.0.1 + # via opentelemetry-instrumentation-botocore +opentelemetry-proto==1.27.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.27.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.48b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.48b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.7 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.1 + # via -r requirements/_base.in +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==4.25.5 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==6.1.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.9.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.18.0 + # via rich +pyinstrument==5.0.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via + # aiobotocore + # arrow + # botocore +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.29.3 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.2 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.20.0 + # via + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +setuptools==75.2.0 + # via opentelemetry-instrumentation +sh==2.1.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +shellingham==1.5.4 + # via typer +six==1.16.0 + # via python-dateutil +sniffio==1.3.1 + # via + # anyio + # httpx +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +starlette==0.41.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.66.5 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.12.5 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-aiobotocore==2.19.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +types-aiobotocore-ec2==2.19.0 + # via types-aiobotocore +types-aiobotocore-s3==2.19.0 + # via types-aiobotocore +types-aiobotocore-ssm==2.19.0 + # via types-aiobotocore +types-awscrt==0.22.0 + # via botocore-stubs +types-python-dateutil==2.9.0.20241003 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # alembic + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # rich-toolkit + # typer + # types-aiobotocore + # types-aiobotocore-ec2 + # types-aiobotocore-s3 + # types-aiobotocore-ssm +urllib3==2.2.3 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # botocore + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.16.0 + # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.20.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.20.2 + # via importlib-metadata diff --git a/services/efs-guardian/requirements/_test.in b/services/efs-guardian/requirements/_test.in new file mode 100644 index 00000000000..3d7f73b1cd8 --- /dev/null +++ b/services/efs-guardian/requirements/_test.in @@ -0,0 +1,32 @@ +# +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +aiodocker +asgi-lifespan +coverage +debugpy +deepdiff +docker +faker +fakeredis[lua] +httpx +moto[server] +parse +psutil +pytest +pytest-asyncio +pytest-cov +pytest-mock +pytest-runner +python-dotenv +respx diff --git a/services/efs-guardian/requirements/_test.txt b/services/efs-guardian/requirements/_test.txt new file mode 100644 index 00000000000..0287acf23e6 --- /dev/null +++ b/services/efs-guardian/requirements/_test.txt @@ -0,0 +1,343 @@ +aiodocker==0.23.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +aiohappyeyeballs==2.4.3 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aiodocker +aiosignal==1.3.1 + # via + # -c requirements/_base.txt + # aiohttp +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.6.2.post1 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==24.2.0 + # via + # -c requirements/_base.txt + # aiohttp + # jsonschema + # referencing +aws-sam-translator==1.95.0 + # via cfn-lint +aws-xray-sdk==2.14.0 + # via moto +blinker==1.9.0 + # via flask +boto3==1.37.3 + # via + # -c requirements/_base.txt + # aws-sam-translator + # moto +botocore==1.37.3 + # via + # -c requirements/_base.txt + # aws-xray-sdk + # boto3 + # moto + # s3transfer +certifi==2024.8.30 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +cffi==1.17.1 + # via cryptography +cfn-lint==1.27.0 + # via moto +charset-normalizer==3.4.0 + # via + # -c requirements/_base.txt + # requests +click==8.1.7 + # via + # -c requirements/_base.txt + # flask +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +cryptography==44.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # joserfc + # moto +debugpy==1.8.12 + # via -r requirements/_test.in +deepdiff==8.2.0 + # via -r requirements/_test.in +docker==7.1.0 + # via + # -r requirements/_test.in + # moto +faker==36.1.1 + # via -r requirements/_test.in +fakeredis==2.27.0 + # via -r requirements/_test.in +flask==3.1.0 + # via + # flask-cors + # moto +flask-cors==5.0.1 + # via moto +frozenlist==1.4.1 + # via + # -c requirements/_base.txt + # aiohttp + # aiosignal +graphql-core==3.2.6 + # via moto +h11==0.14.0 + # via + # -c requirements/_base.txt + # httpcore +httpcore==1.0.6 + # via + # -c requirements/_base.txt + # httpx +httpx==0.27.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in + # respx +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests + # yarl +iniconfig==2.0.0 + # via pytest +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # flask + # moto +jmespath==1.0.1 + # via + # -c requirements/_base.txt + # boto3 + # botocore +joserfc==1.0.4 + # via moto +jsonpatch==1.33 + # via cfn-lint +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 + # via jsonpatch +jsonschema==4.23.0 + # via + # -c requirements/_base.txt + # aws-sam-translator + # openapi-schema-validator + # openapi-spec-validator +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2023.7.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +lupa==2.4 + # via fakeredis +markupsafe==3.0.2 + # via + # -c requirements/_base.txt + # jinja2 + # werkzeug +moto==5.1.4 + # via -r requirements/_test.in +mpmath==1.3.0 + # via sympy +multidict==6.1.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +networkx==3.4.2 + # via cfn-lint +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 + # via moto +orderly-set==5.3.0 + # via deepdiff +packaging==24.1 + # via + # -c requirements/_base.txt + # pytest +parse==1.20.2 + # via -r requirements/_test.in +pathable==0.4.4 + # via jsonschema-path +pluggy==1.5.0 + # via pytest +ply==3.11 + # via jsonpath-ng +propcache==0.3.1 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +psutil==6.1.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 + # via cffi +pydantic==2.10.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aws-sam-translator +pydantic-core==2.27.1 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.2.1 + # via moto +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-mock +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 + # via + # -c requirements/_base.txt + # botocore + # moto +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # cfn-lint + # jsonschema-path + # moto + # responses +redis==5.2.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # fakeredis +referencing==0.29.3 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2024.11.6 + # via cfn-lint +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker + # jsonschema-path + # moto + # responses +responses==0.25.6 + # via moto +respx==0.22.0 + # via -r requirements/_test.in +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.20.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 + # via + # -c requirements/_base.txt + # boto3 +setuptools==75.2.0 + # via + # -c requirements/_base.txt + # moto +six==1.16.0 + # via + # -c requirements/_base.txt + # python-dateutil + # rfc3339-validator +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan + # httpx +sortedcontainers==2.4.0 + # via fakeredis +sympy==1.13.3 + # via cfn-lint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # aws-sam-translator + # cfn-lint + # pydantic + # pydantic-core +tzdata==2025.1 + # via faker +urllib3==2.2.3 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore + # docker + # requests + # responses +werkzeug==3.1.3 + # via + # flask + # flask-cors + # moto +wrapt==1.16.0 + # via + # -c requirements/_base.txt + # aws-xray-sdk +xmltodict==0.14.2 + # via moto +yarl==1.20.0 + # via + # -c requirements/_base.txt + # aiohttp diff --git a/services/efs-guardian/requirements/_tools.in b/services/efs-guardian/requirements/_tools.in new file mode 100644 index 00000000000..52a9a39d162 --- /dev/null +++ b/services/efs-guardian/requirements/_tools.in @@ -0,0 +1,7 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt + +watchdog[watchmedo] diff --git a/services/efs-guardian/requirements/_tools.txt b/services/efs-guardian/requirements/_tools.txt new file mode 100644 index 00000000000..bd233d72641 --- /dev/null +++ b/services/efs-guardian/requirements/_tools.txt @@ -0,0 +1,90 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.7 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.1 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_test.txt + # pre-commit + # watchdog +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.2.0 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +watchdog==6.0.0 + # via -r requirements/_tools.in +wheel==0.45.1 + # via pip-tools diff --git a/services/efs-guardian/requirements/ci.txt b/services/efs-guardian/requirements/ci.txt new file mode 100644 index 00000000000..163b032f346 --- /dev/null +++ b/services/efs-guardian/requirements/ci.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/efs-guardian' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-postgres-database @ ../../packages/postgres-database + +# installs current package +simcore-service-efs-guardian @ . diff --git a/services/efs-guardian/requirements/constraints.txt b/services/efs-guardian/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/requirements/dev.txt b/services/efs-guardian/requirements/dev.txt new file mode 100644 index 00000000000..35e2f508112 --- /dev/null +++ b/services/efs-guardian/requirements/dev.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages needed to develop 'services/efs-guardian' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/aws-library +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library +--editable ../../packages/postgres-database + +# installs current package +--editable . diff --git a/services/efs-guardian/requirements/prod.txt b/services/efs-guardian/requirements/prod.txt new file mode 100644 index 00000000000..efcaf5fb9d5 --- /dev/null +++ b/services/efs-guardian/requirements/prod.txt @@ -0,0 +1,20 @@ +# Shortcut to install 'services/efs-guardian' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-postgres-database @ ../../packages/postgres-database +# installs current package +simcore-service-efs-guardian @ . diff --git a/services/efs-guardian/setup.cfg b/services/efs-guardian/setup.cfg new file mode 100644 index 00000000000..a9078103b94 --- /dev/null +++ b/services/efs-guardian/setup.cfg @@ -0,0 +1,18 @@ +[bumpversion] +current_version = 1.0.0 +commit = True +message = services/efs-guardian version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/services/efs-guardian/setup.py b/services/efs-guardian/setup.py new file mode 100755 index 00000000000..d1ea836ecb1 --- /dev/null +++ b/services/efs-guardian/setup.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-efs-guardian" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Matus Drobuliak (drobuliakmatus66)",) +DESCRIPTION = "Service to monitor and manage elastic file system" +README = (CURRENT_DIR / "README.md").read_text() + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-aws-library", + "simcore-models-library", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "package_data": {"": ["data/*.yml"]}, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-efs-guardian = simcore_service_efs_guardian.cli:main", + "simcore-service = simcore_service_efs_guardian.cli:main", + ], + }, +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/__init__.py new file mode 100644 index 00000000000..f513c971cca --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/__init__.py @@ -0,0 +1,3 @@ +from ._meta import __version__ + +assert __version__ # nosec diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/_meta.py b/services/efs-guardian/src/simcore_service_efs_guardian/_meta.py new file mode 100644 index 00000000000..1d0965003fb --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/_meta.py @@ -0,0 +1,69 @@ +""" Application's metadata + +""" + +from importlib.metadata import distribution, version +from importlib.resources import files +from pathlib import Path +from typing import Final + +from models_library.basic_types import VersionStr, VersionTag +from packaging.version import Version +from pydantic import TypeAdapter + +_current_distribution = distribution("simcore-service-efs-guardian") +__version__: str = version("simcore-service-efs-guardian") + + +APP_NAME: Final[str] = _current_distribution.metadata["Name"] +API_VERSION: Final[VersionStr] = TypeAdapter(VersionStr).validate_python(__version__) +VERSION: Final[Version] = Version(__version__) +API_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + f"v{VERSION.major}" +) +RPC_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + f"v{VERSION.major}" +) + + +def get_summary() -> str: + return _current_distribution.metadata.get_all("Summary", [""])[-1] + + +SUMMARY: Final[str] = get_summary() +PACKAGE_DATA_FOLDER: Final[Path] = Path(f'{files(APP_NAME.replace("-", "_")) / "data"}') + +# https://patorjk.com/software/taag/#p=display&f=ANSI%20Shadow&t=Elastic%20file%0Asystem%20guardian +APP_STARTED_BANNER_MSG = r""" +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— +β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β•β•β•β•šβ•β•β–ˆβ–ˆβ•”β•β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β•β•β• β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β•β•β• +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— +β–ˆβ–ˆβ•”β•β•β• β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘β•šβ•β•β•β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β• β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β• +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β•šβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— +β•šβ•β•β•β•β•β•β•β•šβ•β•β•β•β•β•β•β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β•β• β•šβ•β• β•šβ•β• β•šβ•β•β•β•β•β• β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β•β•β•šβ•β•β•β•β•β•β• + +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•— β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•— +β–ˆβ–ˆβ•”β•β•β•β•β•β•šβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•”β•β•β•β•β•β•šβ•β•β–ˆβ–ˆβ•”β•β•β•β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β•β•β• β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β•šβ–ˆβ–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•”β–ˆβ–ˆβ–ˆβ–ˆβ•”β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ +β•šβ•β•β•β•β–ˆβ–ˆβ•‘ β•šβ–ˆβ–ˆβ•”β• β•šβ•β•β•β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β• β–ˆβ–ˆβ•‘β•šβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘β•šβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β•šβ•β• β–ˆβ–ˆβ•‘ β•šβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β•šβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β•šβ–ˆβ–ˆβ–ˆβ–ˆβ•‘ +β•šβ•β•β•β•β•β•β• β•šβ•β• β•šβ•β•β•β•β•β•β• β•šβ•β• β•šβ•β•β•β•β•β•β•β•šβ•β• β•šβ•β• β•šβ•β•β•β•β•β• β•šβ•β•β•β•β•β• β•šβ•β• β•šβ•β•β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β• β•šβ•β•β•šβ•β• β•šβ•β•β•šβ•β• β•šβ•β•β•β• + πŸ›‘οΈ Welcome to EFS-Guardian App πŸ›‘οΈ + Your Elastic File System Manager & Monitor + {} +""".format( + f"v{__version__}" +) + +APP_STARTED_DISABLED_BANNER_MSG = r""" +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— +β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•— +β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ +β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘β•šβ•β•β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β• β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ +β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β• +β•šβ•β•β•β•β•β• β•šβ•β•β•šβ•β•β•β•β•β•β•β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β• β•šβ•β•β•β•β•β•β•β•šβ•β•β•β•β•β•β•β•šβ•β•β•β•β•β• +""" + +APP_FINISHED_BANNER_MSG = "{:=^100}".format( + f"πŸŽ‰ App {APP_NAME}=={__version__} shutdown completed πŸŽ‰" +) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/health.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/health.py new file mode 100644 index 00000000000..2c6f160a9e8 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/health.py @@ -0,0 +1,18 @@ +""" +All entrypoints used for operations + +for instance: service health-check (w/ different variants), diagnostics, debugging, status, etc +""" + +import datetime + +from fastapi import APIRouter +from fastapi.responses import PlainTextResponse + +router = APIRouter() + + +@router.get("/", include_in_schema=True, response_class=PlainTextResponse) +async def health_check(): + # NOTE: sync url in docker/healthcheck.py with this entrypoint! + return f"{__name__}.health_check@{datetime.datetime.now(datetime.timezone.utc).isoformat()}" diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/routes.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/routes.py new file mode 100644 index 00000000000..af7eef7aa26 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/api/rest/routes.py @@ -0,0 +1,17 @@ +from fastapi import APIRouter, FastAPI + +from ..._meta import API_VTAG +from . import health + + +def setup_api_routes(app: FastAPI): + """ + Composes resources/sub-resources routers + """ + router = APIRouter() + + # include operations in / + app.include_router(health.router, tags=["operations"]) + + # include the rest under /vX + app.include_router(router, prefix=f"/{API_VTAG}") diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/_efs_guardian.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/_efs_guardian.py new file mode 100644 index 00000000000..9fe0978471d --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/_efs_guardian.py @@ -0,0 +1,23 @@ +from pathlib import Path + +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from servicelib.rabbitmq import RPCRouter + +from ...services.efs_manager_setup import get_efs_manager + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=()) +async def create_project_specific_data_dir( + app: FastAPI, *, project_id: ProjectID, node_id: NodeID, storage_directory_name: str +) -> Path: + _efs_manager = get_efs_manager(app) + + return await _efs_manager.create_project_specific_data_dir( + project_id=project_id, + node_id=node_id, + storage_directory_name=storage_directory_name, + ) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/routes.py b/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/routes.py new file mode 100644 index 00000000000..9a1f349fa29 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/api/rpc/routes.py @@ -0,0 +1,33 @@ +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from models_library.api_schemas_efs_guardian import EFS_GUARDIAN_RPC_NAMESPACE +from servicelib.rabbitmq import RPCRouter + +from ...services.modules.rabbitmq import get_rabbitmq_rpc_server +from . import _efs_guardian + +ROUTERS: list[RPCRouter] = [ + _efs_guardian.router, +] + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _start() -> None: + rpc_server = get_rabbitmq_rpc_server(app) + for router in ROUTERS: + await rpc_server.register_router(router, EFS_GUARDIAN_RPC_NAMESPACE, app) + + return _start + + +def on_app_shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + assert app # nosec + + return _stop + + +def setup_rpc_routes(app: FastAPI) -> None: + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/cli.py b/services/efs-guardian/src/simcore_service_efs_guardian/cli.py new file mode 100644 index 00000000000..77d18015ec0 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/cli.py @@ -0,0 +1,24 @@ +import logging + +import typer +from settings_library.utils_cli import create_settings_command + +from ._meta import APP_NAME +from .core.settings import ApplicationSettings + +log = logging.getLogger(__name__) + +# NOTE: 'main' variable is referred in the setup's entrypoint! +main = typer.Typer(name=APP_NAME) + +main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) + + +@main.command() +def run(): + """Runs application""" + typer.secho("Sorry, this entrypoint is intentionally disabled. Use instead") + typer.secho( + "$ uvicorn simcore_service_efs_guardian.main:the_app", + fg=typer.colors.BLUE, + ) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/core/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/core/application.py b/services/efs-guardian/src/simcore_service_efs_guardian/core/application.py new file mode 100644 index 00000000000..7c68ba3f0e4 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/core/application.py @@ -0,0 +1,85 @@ +import logging + +from fastapi import FastAPI +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) + +from .._meta import ( + API_VERSION, + API_VTAG, + APP_FINISHED_BANNER_MSG, + APP_NAME, + APP_STARTED_BANNER_MSG, + APP_STARTED_DISABLED_BANNER_MSG, +) +from ..api.rest.routes import setup_api_routes +from ..api.rpc.routes import setup_rpc_routes +from ..services.background_tasks_setup import setup as setup_background_tasks +from ..services.efs_manager_setup import setup as setup_efs_manager +from ..services.fire_and_forget_setup import setup as setup_fire_and_forget +from ..services.modules.db import setup as setup_db +from ..services.modules.rabbitmq import setup as setup_rabbitmq +from ..services.modules.redis import setup as setup_redis +from ..services.process_messages_setup import setup as setup_process_messages +from .settings import ApplicationSettings + +logger = logging.getLogger(__name__) + + +def create_app(settings: ApplicationSettings | None = None) -> FastAPI: + app_settings = settings or ApplicationSettings.create_from_envs() + + logger.info("app settings: %s", app_settings.model_dump_json(indent=1)) + + app = FastAPI( + debug=app_settings.EFS_GUARDIAN_DEBUG, + title=APP_NAME, + description="Service to monitor and manage elastic file system", + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url="/dev/doc", + redoc_url=None, # default disabled + ) + # STATE + app.state.settings = app_settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + if app.state.settings.EFS_GUARDIAN_TRACING: + setup_tracing(app, app.state.settings.EFS_GUARDIAN_TRACING, APP_NAME) + + # PLUGINS SETUP + setup_rabbitmq(app) + setup_redis(app) + setup_db(app) + + setup_api_routes(app) + setup_rpc_routes(app) # requires Rabbit + + setup_efs_manager(app) + setup_background_tasks(app) # requires Redis, DB + setup_process_messages(app) # requires Rabbit + + setup_fire_and_forget(app) + + if app.state.settings.EFS_GUARDIAN_TRACING: + initialize_fastapi_app_tracing(app) + + # EVENTS + async def _on_startup() -> None: + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + if any( + s is None + for s in [ + app_settings.EFS_GUARDIAN_AWS_EFS_SETTINGS, + ] + ): + print(APP_STARTED_DISABLED_BANNER_MSG, flush=True) # noqa: T201 + + async def _on_shutdown() -> None: + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + return app diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/core/settings.py b/services/efs-guardian/src/simcore_service_efs_guardian/core/settings.py new file mode 100644 index 00000000000..ab5377a82d3 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/core/settings.py @@ -0,0 +1,129 @@ +import datetime +from functools import cached_property +from typing import Annotated, Final, cast + +from common_library.basic_types import DEFAULT_FACTORY +from fastapi import FastAPI +from models_library.basic_types import LogLevel, VersionTag +from pydantic import AliasChoices, ByteSize, Field, TypeAdapter, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.efs import AwsEfsSettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + +from .._meta import API_VERSION, API_VTAG, APP_NAME + +EFS_GUARDIAN_ENV_PREFIX: Final[str] = "EFS_GUARDIAN_" + + +class ApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + # CODE STATICS --------------------------------------------------------- + API_VERSION: str = API_VERSION + APP_NAME: str = APP_NAME + API_VTAG: VersionTag = API_VTAG + + EFS_USER_ID: Annotated[ + int, Field(description="Linux user ID that the Guardian service will run with") + ] + EFS_USER_NAME: Annotated[ + str, + Field(description="Linux user name that the Guardian service will run with"), + ] + EFS_GROUP_ID: Annotated[ + int, + Field( + description="Linux group ID that the EFS and Simcore linux users are part of" + ), + ] + EFS_GROUP_NAME: Annotated[ + str, + Field( + description="Linux group name that the EFS and Simcore linux users are part of" + ), + ] + EFS_DEFAULT_USER_SERVICE_SIZE_BYTES: ByteSize = TypeAdapter( + ByteSize + ).validate_python("500GiB") + + EFS_REMOVAL_POLICY_TASK_AGE_LIMIT_TIMEDELTA: Annotated[ + datetime.timedelta, + Field( + description="For how long must a project remain unused before we remove its data from the EFS. (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ), + ] = datetime.timedelta(days=10) + + # RUNTIME ----------------------------------------------------------- + EFS_GUARDIAN_DEBUG: Annotated[ + bool, + Field( + description="Debug mode", + validation_alias=AliasChoices("EFS_GUARDIAN_DEBUG", "DEBUG"), + ), + ] = False + + EFS_GUARDIAN_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "EFS_GUARDIAN_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + + EFS_GUARDIAN_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "EFS_GUARDIAN_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + EFS_GUARDIAN_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "EFS_GUARDIAN_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + EFS_GUARDIAN_AWS_EFS_SETTINGS: Annotated[ + AwsEfsSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + EFS_GUARDIAN_POSTGRES: Annotated[ + PostgresSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + EFS_GUARDIAN_RABBITMQ: Annotated[ + RabbitSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + EFS_GUARDIAN_REDIS: Annotated[ + RedisSettings, Field(json_schema_extra={"auto_default_from_env": True}) + ] + EFS_GUARDIAN_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + @cached_property + def LOG_LEVEL(self) -> LogLevel: # noqa: N802 + return self.EFS_GUARDIAN_LOGLEVEL + + @field_validator("EFS_GUARDIAN_LOGLEVEL", mode="before") + @classmethod + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +def get_application_settings(app: FastAPI) -> ApplicationSettings: + return cast(ApplicationSettings, app.state.settings) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/__init__.py new file mode 100644 index 00000000000..b6036dda040 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/__init__.py @@ -0,0 +1,5 @@ +from . import handlers + +setup_exception_handlers = handlers.setup + +__all__: tuple[str, ...] = ("setup_exception_handlers",) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/_base.py b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/_base.py new file mode 100644 index 00000000000..9c8c45d0933 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/_base.py @@ -0,0 +1,5 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class EfsGuardianBaseError(OsparcErrorMixin, Exception): + """EFS guardian base error class.""" diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/custom_errors.py b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/custom_errors.py new file mode 100644 index 00000000000..ca702657f53 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/custom_errors.py @@ -0,0 +1,9 @@ +from ._base import EfsGuardianBaseError + + +class CustomBaseError(EfsGuardianBaseError): + pass + + +class ApplicationSetupError(CustomBaseError): + pass diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/handlers/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/handlers/__init__.py new file mode 100644 index 00000000000..f9a5aefe592 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/exceptions/handlers/__init__.py @@ -0,0 +1,7 @@ +# pylint: disable=unused-argument + +from fastapi import FastAPI + + +def setup(app: FastAPI, *, is_debug: bool = False): + ... diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/main.py b/services/efs-guardian/src/simcore_service_efs_guardian/main.py new file mode 100644 index 00000000000..711d32d83ee --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/main.py @@ -0,0 +1,22 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_efs_guardian.core.application import create_app +from simcore_service_efs_guardian.core.settings import ApplicationSettings + +the_settings = ApplicationSettings.create_from_envs() +logging.basicConfig(level=the_settings.log_level) +logging.root.setLevel(the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=the_settings.EFS_GUARDIAN_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=the_settings.EFS_GUARDIAN_LOG_FILTER_MAPPING, + tracing_settings=the_settings.EFS_GUARDIAN_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(the_settings) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks.py new file mode 100644 index 00000000000..75ed9f66fc3 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks.py @@ -0,0 +1,76 @@ +import logging +from datetime import UTC, datetime + +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_state import ProjectStatus +from servicelib.logging_utils import log_context +from servicelib.redis import with_project_locked +from simcore_postgres_database.utils_projects import ( + DBProjectNotFoundError, + ProjectsRepo, +) + +from ..core.settings import ApplicationSettings +from .efs_manager import EfsManager +from .modules.redis import get_redis_lock_client + +_logger = logging.getLogger(__name__) + + +async def _lock_project_and_remove_data(app: FastAPI, project_id: ProjectID) -> None: + efs_manager: EfsManager = app.state.efs_manager + + @with_project_locked( + get_redis_lock_client(app), + project_uuid=project_id, + status=ProjectStatus.MAINTAINING, + owner=None, + notification_cb=None, + ) + async def _remove(): + await efs_manager.remove_project_efs_data(project_id) + + await _remove() + + +async def removal_policy_task(app: FastAPI) -> None: + _logger.info("Removal policy task started") + + app_settings: ApplicationSettings = app.state.settings + assert app_settings # nosec + efs_manager: EfsManager = app.state.efs_manager + + base_start_timestamp = datetime.now(tz=UTC) + + efs_project_ids: list[ + ProjectID + ] = await efs_manager.list_projects_across_whole_efs() + _logger.info( + "Number of projects that are currently in the EFS file system: %s", + len(efs_project_ids), + ) + + projects_repo = ProjectsRepo(app.state.engine) + for project_id in efs_project_ids: + try: + _project_last_change_date = ( + await projects_repo.get_project_last_change_date(project_id) + ) + except DBProjectNotFoundError: + _logger.info( + "Project %s not found. Removing EFS data for project {project_id} started", + project_id, + ) + await efs_manager.remove_project_efs_data(project_id) + if ( + _project_last_change_date + < base_start_timestamp + - app_settings.EFS_REMOVAL_POLICY_TASK_AGE_LIMIT_TIMEDELTA + ): + with log_context( + _logger, + logging.INFO, + msg=f"Removing data for project {project_id} started, project last change date {_project_last_change_date}, efs removal policy task age limit timedelta {app_settings.EFS_REMOVAL_POLICY_TASK_AGE_LIMIT_TIMEDELTA}", + ): + await _lock_project_and_remove_data(app, project_id) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks_setup.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks_setup.py new file mode 100644 index 00000000000..e1480f84b20 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/background_tasks_setup.py @@ -0,0 +1,62 @@ +import asyncio +import logging +from collections.abc import Awaitable, Callable +from datetime import timedelta + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task_utils import exclusive_periodic +from servicelib.logging_utils import log_catch, log_context + +from .background_tasks import removal_policy_task +from .modules.redis import get_redis_lock_client + +_logger = logging.getLogger(__name__) + + +def _on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with ( + log_context(_logger, logging.INFO, msg="Efs Guardian background task "), + log_catch(_logger, reraise=False), + ): + app.state.efs_guardian_removal_policy_background_task = None + + _logger.info("starting efs guardian removal policy task") + + @exclusive_periodic( + get_redis_lock_client(app), + task_interval=timedelta(hours=1), + retry_after=timedelta(minutes=5), + ) + async def _periodic_removal_policy_task() -> None: + await removal_policy_task(app) + + app.state.efs_guardian_removal_policy_background_task = asyncio.create_task( + _periodic_removal_policy_task(), + name=_periodic_removal_policy_task.__name__, + ) + + return _startup + + +def _on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + with ( + log_context(_logger, logging.INFO, msg="Efs Guardian shutdown.."), + log_catch(_logger, reraise=False), + ): + assert _app # nosec + if _app.state.efs_guardian_removal_policy_background_task: + await cancel_wait_task( + _app.state.efs_guardian_removal_policy_background_task + ) + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", _on_app_startup(app)) + app.add_event_handler("shutdown", _on_app_shutdown(app)) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager.py new file mode 100644 index 00000000000..88016b1617b --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager.py @@ -0,0 +1,169 @@ +import logging +import os +from dataclasses import dataclass +from pathlib import Path + +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from pydantic import ByteSize, TypeAdapter, ValidationError +from servicelib.file_utils import remove_directory + +from ..core.settings import ApplicationSettings, get_application_settings +from . import efs_manager_utils + +_logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class EfsManager: + app: FastAPI + + _efs_mounted_path: Path + _project_specific_data_base_directory: str + _settings: ApplicationSettings + + @classmethod + async def create( + cls, + app: FastAPI, + efs_mounted_path: Path, + project_specific_data_base_directory: str, + ): + settings = get_application_settings(app) + return cls( + app, efs_mounted_path, project_specific_data_base_directory, settings + ) + + async def initialize_directories(self): + _dir_path = self._efs_mounted_path / self._project_specific_data_base_directory + Path.mkdir(_dir_path, parents=True, exist_ok=True) + + async def create_project_specific_data_dir( + self, project_id: ProjectID, node_id: NodeID, storage_directory_name: str + ) -> Path: + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + / f"{node_id}" + / f"{storage_directory_name}" + ) + # Ensure the directory exists with the right parents + Path.mkdir(_dir_path, parents=True, exist_ok=True) + # Change the owner to user id 8006(efs) and group id 8106(efs-group) + os.chown(_dir_path, self._settings.EFS_USER_ID, self._settings.EFS_GROUP_ID) + # Set directory permissions to allow group write access + Path.chmod( + _dir_path, 0o770 + ) # This gives rwx permissions to user and group, and nothing to others + return _dir_path + + async def check_project_node_data_directory_exits( + self, project_id: ProjectID, node_id: NodeID + ) -> bool: + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + / f"{node_id}" + ) + + return _dir_path.exists() + + async def get_project_node_data_size( + self, project_id: ProjectID, node_id: NodeID + ) -> ByteSize: + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + / f"{node_id}" + ) + + return await efs_manager_utils.get_size_bash_async(_dir_path) + + async def list_project_node_state_names( + self, project_id: ProjectID, node_id: NodeID + ) -> list[str]: + """ + These are currently state volumes that are mounted via docker volume to dynamic sidecar and user services + (ex. ".data_assets" and "home_user_workspace") + """ + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + / f"{node_id}" + ) + + project_node_states = [] + for child in _dir_path.iterdir(): + if child.is_dir(): + project_node_states.append(child.name) + else: + _logger.error( + "This is not a directory. This should not happen! %s", + _dir_path / child.name, + ) + return project_node_states + + async def remove_project_node_data_write_permissions( + self, project_id: ProjectID, node_id: NodeID + ) -> None: + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + / f"{node_id}" + ) + + await efs_manager_utils.remove_write_permissions_bash_async(_dir_path) + + async def list_projects_across_whole_efs(self) -> list[ProjectID]: + _dir_path = self._efs_mounted_path / self._project_specific_data_base_directory + + # Filter and list only directories (which should be Project UUIDs) + project_uuids = [] + for child in _dir_path.iterdir(): + if child.is_dir(): + try: + _project_id = TypeAdapter(ProjectID).validate_python(child.name) + project_uuids.append(_project_id) + except ValidationError: + _logger.error( + "This is not a project ID. This should not happen! %s", + _dir_path / child.name, + ) + else: + _logger.error( + "This is not a directory. This should not happen! %s", + _dir_path / child.name, + ) + + return project_uuids + + async def remove_project_efs_data(self, project_id: ProjectID) -> None: + _dir_path = ( + self._efs_mounted_path + / self._project_specific_data_base_directory + / f"{project_id}" + ) + + if Path.exists(_dir_path): + # Remove the directory and all its contents + try: + await remove_directory(_dir_path) + _logger.info("%s has been deleted.", _dir_path) + except FileNotFoundError: + _logger.exception("Directory %s does not exist.", _dir_path) + except PermissionError: + _logger.exception( + "Permission denied when trying to delete %s.", _dir_path + ) + except NotADirectoryError: + _logger.exception("%s is not a directory.", _dir_path) + except OSError: + _logger.exception("Issue with path: %s", _dir_path) + else: + _logger.error("%s does not exist.", _dir_path) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_setup.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_setup.py new file mode 100644 index 00000000000..e418d27cc59 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_setup.py @@ -0,0 +1,54 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from settings_library.efs import AwsEfsSettings +from tenacity import ( + AsyncRetrying, + before_sleep_log, + stop_after_delay, + wait_random_exponential, +) + +from ..exceptions.custom_errors import ApplicationSetupError +from .efs_manager import EfsManager + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + aws_efs_settings: AwsEfsSettings = ( + app.state.settings.EFS_GUARDIAN_AWS_EFS_SETTINGS + ) + + app.state.efs_manager = None + app.state.efs_manager = efs_manager = await EfsManager.create( + app, + aws_efs_settings.EFS_MOUNTED_PATH, + aws_efs_settings.EFS_PROJECT_SPECIFIC_DATA_DIRECTORY, + ) + + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(120), + wait=wait_random_exponential(max=30), + before_sleep=before_sleep_log(_logger, logging.WARNING), + ): + with attempt: + await efs_manager.initialize_directories() + + async def on_shutdown() -> None: + if app.state.efs_manager: + ... + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_efs_manager(app: FastAPI) -> EfsManager: + if not app.state.efs_manager: + raise ApplicationSetupError( + msg="Efs Manager is not available. Please check the configuration." + ) + return cast(EfsManager, app.state.efs_manager) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_utils.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_utils.py new file mode 100644 index 00000000000..9418fa733db --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/efs_manager_utils.py @@ -0,0 +1,46 @@ +import asyncio +import logging + +from pydantic import ByteSize + +_logger = logging.getLogger(__name__) + + +async def get_size_bash_async(path) -> ByteSize: + # Create the subprocess + command = ["du", "-sb", path] + process = await asyncio.create_subprocess_exec( + *command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + # Wait for the subprocess to complete + stdout, stderr = await process.communicate() + + if process.returncode == 0: + # Parse the output + size = ByteSize(stdout.decode().split()[0]) + return size + msg = f"Command {' '.join(command)} failed with error code {process.returncode}: {stderr.decode()}" + _logger.error(msg) + raise RuntimeError(msg) + + +async def remove_write_permissions_bash_async(path) -> None: + # Create the subprocess + command = ["chmod", "-R", "a-w", path] + process = await asyncio.create_subprocess_exec( + *command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + + # Wait for the subprocess to complete + _, stderr = await process.communicate() + + if process.returncode == 0: + return + msg = f"Command {' '.join(command)} failed with error code {process.returncode}: {stderr.decode()}" + _logger.error(msg) + raise RuntimeError(msg) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/fire_and_forget_setup.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/fire_and_forget_setup.py new file mode 100644 index 00000000000..a38411f56a1 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/fire_and_forget_setup.py @@ -0,0 +1,38 @@ +import logging +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.logging_utils import log_catch, log_context + +_logger = logging.getLogger(__name__) + + +def _on_app_startup(_app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with log_context( + _logger, logging.INFO, msg="Efs Guardian setup fire and forget tasks.." + ), log_catch(_logger, reraise=False): + _app.state.efs_guardian_fire_and_forget_tasks = set() + + return _startup + + +def _on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + with log_context( + _logger, logging.INFO, msg="Efs Guardian fire and forget tasks shutdown.." + ), log_catch(_logger, reraise=False): + assert _app # nosec + if _app.state.efs_guardian_fire_and_forget_tasks: + for task in _app.state.efs_guardian_fire_and_forget_tasks: + await cancel_wait_task(task) + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", _on_app_startup(app)) + app.add_event_handler("shutdown", _on_app_shutdown(app)) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/__init__.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/db.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/db.py new file mode 100644 index 00000000000..f5d5970216e --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/db.py @@ -0,0 +1,17 @@ +from fastapi import FastAPI +from servicelib.fastapi.db_asyncpg_engine import close_db_connection, connect_to_db + + +def setup(app: FastAPI): + async def on_startup() -> None: + await connect_to_db(app, app.state.settings.EFS_GUARDIAN_POSTGRES) + + async def on_shutdown() -> None: + await close_db_connection(app) + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_database_engine(app: FastAPI): + return app.state.engine diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/rabbitmq.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/rabbitmq.py new file mode 100644 index 00000000000..f94c5dbf418 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/rabbitmq.py @@ -0,0 +1,66 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) +from settings_library.rabbit import RabbitSettings + +from ...exceptions.custom_errors import ApplicationSetupError + +logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.rabbitmq_client = None + settings: RabbitSettings | None = app.state.settings.EFS_GUARDIAN_RABBITMQ + if not settings: + raise ApplicationSetupError( + msg="Rabbit MQ client is de-activated in the settings" + ) + await wait_till_rabbitmq_responsive(settings.dsn) + app.state.rabbitmq_client = RabbitMQClient( + client_name="efs-guardian", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="efs_guardian_rpc_server", settings=settings + ) + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="efs_guardian_rpc_client", settings=settings + ) + + async def on_shutdown() -> None: + if app.state.rabbitmq_client: + await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + if app.state.rabbitmq_rpc_client: + await app.state.rabbitmq_rpc_client.close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + if not app.state.rabbitmq_client: + raise ApplicationSetupError( + msg="RabbitMQ client is not available. Please check the configuration." + ) + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_client # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + +__all__ = ("RabbitMQClient",) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/redis.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/redis.py new file mode 100644 index 00000000000..78d1462378a --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/modules/redis.py @@ -0,0 +1,32 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase, RedisSettings + +from ..._meta import APP_NAME + +logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + app.state.redis_lock_client_sdk = None + settings: RedisSettings = app.state.settings.EFS_GUARDIAN_REDIS + redis_locks_dsn = settings.build_redis_dsn(RedisDatabase.LOCKS) + app.state.redis_lock_client_sdk = RedisClientSDK( + redis_locks_dsn, client_name=APP_NAME + ) + + async def on_shutdown() -> None: + redis_lock_client_sdk: None | RedisClientSDK = app.state.redis_lock_client_sdk + if redis_lock_client_sdk: + await redis_lock_client_sdk.shutdown() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_redis_lock_client(app: FastAPI) -> RedisClientSDK: + return cast(RedisClientSDK, app.state.redis_lock_client_sdk) diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages.py new file mode 100644 index 00000000000..1b84c02df1d --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages.py @@ -0,0 +1,91 @@ +import datetime +import logging + +from fastapi import FastAPI +from models_library.api_schemas_dynamic_sidecar.telemetry import DiskUsage +from models_library.rabbitmq_messages import DynamicServiceRunningMessage +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.dynamic_sidecar.disk_usage import ( + update_disk_usage, +) +from servicelib.redis import exclusive +from servicelib.utils import fire_and_forget_task + +from ..core.settings import get_application_settings +from ..services.efs_manager import EfsManager +from ..services.modules.rabbitmq import get_rabbitmq_rpc_client +from ..services.modules.redis import get_redis_lock_client + +_logger = logging.getLogger(__name__) + + +async def process_dynamic_service_running_message(app: FastAPI, data: bytes) -> bool: + assert app # nosec + rabbit_message: DynamicServiceRunningMessage = ( + DynamicServiceRunningMessage.model_validate_json(data) + ) + _logger.debug( + "Process dynamic service running msg, project ID: %s node ID: %s, current user: %s", + rabbit_message.project_id, + rabbit_message.node_id, + rabbit_message.user_id, + ) + + settings = get_application_settings(app) + efs_manager: EfsManager = app.state.efs_manager + + dir_exists = await efs_manager.check_project_node_data_directory_exits( + rabbit_message.project_id, node_id=rabbit_message.node_id + ) + if dir_exists is False: + _logger.debug( + "Directory doesn't exists in EFS, project ID: %s node ID: %s, current user: %s", + rabbit_message.project_id, + rabbit_message.node_id, + rabbit_message.user_id, + ) + return True + + size = await efs_manager.get_project_node_data_size( + rabbit_message.project_id, node_id=rabbit_message.node_id + ) + _logger.debug( + "Current directory size: %s, project ID: %s node ID: %s, current user: %s", + size, + rabbit_message.project_id, + rabbit_message.node_id, + rabbit_message.user_id, + ) + + project_node_state_names = await efs_manager.list_project_node_state_names( + rabbit_message.project_id, node_id=rabbit_message.node_id + ) + rpc_client: RabbitMQRPCClient = get_rabbitmq_rpc_client(app) + _used = min(size, settings.EFS_DEFAULT_USER_SERVICE_SIZE_BYTES) + usage: dict[str, DiskUsage] = {} + for name in project_node_state_names: + usage[name] = DiskUsage.from_efs_guardian( + used=_used, total=settings.EFS_DEFAULT_USER_SERVICE_SIZE_BYTES + ) + + fire_and_forget_task( + update_disk_usage(rpc_client, node_id=rabbit_message.node_id, usage=usage), + task_suffix_name=f"update_disk_usage_efs_user_id{rabbit_message.user_id}_node_id{rabbit_message.node_id}", + fire_and_forget_tasks_collection=app.state.efs_guardian_fire_and_forget_tasks, + ) + + if size > settings.EFS_DEFAULT_USER_SERVICE_SIZE_BYTES: + msg = f"Removing write permissions inside of EFS starts for project ID: {rabbit_message.project_id}, node ID: {rabbit_message.node_id}, current user: {rabbit_message.user_id}, size: {size}, upper limit: {settings.EFS_DEFAULT_USER_SERVICE_SIZE_BYTES}" + with log_context(_logger, logging.WARNING, msg=msg): + redis = get_redis_lock_client(app) + await exclusive( + redis, + lock_key=f"efs_remove_write_permissions-{rabbit_message.project_id=}-{rabbit_message.node_id=}", + blocking=True, + blocking_timeout=datetime.timedelta(seconds=10), + )(efs_manager.remove_project_node_data_write_permissions)( + project_id=rabbit_message.project_id, node_id=rabbit_message.node_id + ) + + return True diff --git a/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages_setup.py b/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages_setup.py new file mode 100644 index 00000000000..2afd34d2f42 --- /dev/null +++ b/services/efs-guardian/src/simcore_service_efs_guardian/services/process_messages_setup.py @@ -0,0 +1,66 @@ +import functools +import logging +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from models_library.rabbitmq_messages import DynamicServiceRunningMessage +from servicelib.logging_utils import log_catch, log_context +from servicelib.rabbitmq import RabbitMQClient +from settings_library.rabbit import RabbitSettings + +from ..core.settings import ApplicationSettings +from .modules.rabbitmq import get_rabbitmq_client +from .process_messages import process_dynamic_service_running_message + +_logger = logging.getLogger(__name__) + + +_SEC = 1000 # in ms +_MIN = 60 * _SEC # in ms +_HOUR = 60 * _MIN # in ms + +_EFS_MESSAGE_TTL_IN_MS = 2 * _HOUR + + +async def _subscribe_to_rabbitmq(app) -> str: + with log_context(_logger, logging.INFO, msg="Subscribing to rabbitmq channel"): + rabbit_client: RabbitMQClient = get_rabbitmq_client(app) + subscribed_queue, _ = await rabbit_client.subscribe( + DynamicServiceRunningMessage.get_channel_name(), + message_handler=functools.partial( + process_dynamic_service_running_message, app + ), + exclusive_queue=False, + message_ttl=_EFS_MESSAGE_TTL_IN_MS, + ) + return subscribed_queue + + +def _on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with log_context( + _logger, logging.INFO, msg="setup efs guardian process messages" + ), log_catch(_logger, reraise=False): + app_settings: ApplicationSettings = app.state.settings + app.state.efs_guardian_rabbitmq_consumer = None + settings: RabbitSettings | None = app_settings.EFS_GUARDIAN_RABBITMQ + if not settings: + _logger.warning("RabbitMQ client is de-activated in the settings") + return + app.state.efs_guardian_rabbitmq_consumer = await _subscribe_to_rabbitmq(app) + + return _startup + + +def _on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + assert _app # nosec + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", _on_app_startup(app)) + app.add_event_handler("shutdown", _on_app_shutdown(app)) diff --git a/services/efs-guardian/tests/conftest.py b/services/efs-guardian/tests/conftest.py new file mode 100644 index 00000000000..96585f4c87b --- /dev/null +++ b/services/efs-guardian/tests/conftest.py @@ -0,0 +1,83 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from pathlib import Path + +import pytest +import simcore_service_efs_guardian +from faker import Faker +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict + +pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_registry", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.faker_products_data", + "pytest_simcore.faker_projects_data", + "pytest_simcore.pydantic_models", + "pytest_simcore.pytest_global_environs", + "pytest_simcore.rabbit_service", + "pytest_simcore.redis_service", + "pytest_simcore.postgres_service", + "pytest_simcore.repository_paths", + "pytest_simcore.aws_s3_service", + "pytest_simcore.aws_server", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "payments" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_payments")) + return service_folder + + +@pytest.fixture(scope="session") +def installed_package_dir() -> Path: + dirpath = Path(simcore_service_efs_guardian.__file__).resolve().parent + assert dirpath.exists() + return dirpath + + +@pytest.fixture(scope="session") +def env_devel_dict( + env_devel_dict: EnvVarsDict, external_envfile_dict: EnvVarsDict +) -> EnvVarsDict: + if external_envfile_dict: + return external_envfile_dict + return env_devel_dict + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + docker_compose_service_environment_dict: EnvVarsDict, + faker: Faker, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_environment_dict, + "EFS_DNS_NAME": "fs-xxx.efs.us-east-1.amazonaws.com", + "EFS_MOUNTED_PATH": "/tmp/efs", + "EFS_PROJECT_SPECIFIC_DATA_DIRECTORY": "project-specific-data", + "EFS_GUARDIAN_TRACING": "null", + "SC_USER_ID": "8004", + "SC_USER_NAME": "scu", + "EFS_USER_ID": "8006", + "EFS_USER_NAME": "efs", + "EFS_GROUP_ID": "8106", + "EFS_GROUP_NAME": "efs-group", + }, + ) diff --git a/services/efs-guardian/tests/integration/.gitkeep b/services/efs-guardian/tests/integration/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/efs-guardian/tests/unit/conftest.py b/services/efs-guardian/tests/unit/conftest.py new file mode 100644 index 00000000000..d598fe06ebd --- /dev/null +++ b/services/efs-guardian/tests/unit/conftest.py @@ -0,0 +1,159 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import os +import shutil +import stat +from collections.abc import AsyncIterator, Awaitable, Callable +from pathlib import Path +from unittest.mock import Mock + +import httpx +import pytest +import sqlalchemy as sa +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from settings_library.efs import AwsEfsSettings +from simcore_service_efs_guardian.core.application import create_app + +# +# rabbit-MQ +# + + +@pytest.fixture +def disable_rabbitmq_and_rpc_setup(mocker: MockerFixture) -> Callable: + def _(): + # The following services are affected if rabbitmq is not in place + mocker.patch("simcore_service_efs_guardian.core.application.setup_rabbitmq") + mocker.patch("simcore_service_efs_guardian.core.application.setup_rpc_routes") + mocker.patch( + "simcore_service_efs_guardian.core.application.setup_process_messages" + ) + + return _ + + +@pytest.fixture +def with_disabled_rabbitmq_and_rpc(disable_rabbitmq_and_rpc_setup: Callable): + disable_rabbitmq_and_rpc_setup() + + +@pytest.fixture +async def rpc_client( + faker: Faker, rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]] +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client(f"director-v2-client-{faker.word()}") + + +# +# postgres +# + + +@pytest.fixture +def disable_postgres_setup(mocker: MockerFixture) -> Callable: + def _setup(app: FastAPI): + app.state.engine = ( + Mock() + ) # NOTE: avoids error in api._dependencies::get_db_engine + + def _(): + # The following services are affected if postgres is not in place + mocker.patch( + "simcore_service_efs_guardian.core.application.setup_db", + spec=True, + side_effect=_setup, + ) + + return _ + + +@pytest.fixture +def with_disabled_postgres(disable_postgres_setup: Callable): + disable_postgres_setup() + + +@pytest.fixture +def wait_for_postgres_ready_and_db_migrated(postgres_db: sa.engine.Engine) -> None: + """ + Typical use-case is to include it in + + @pytest.fixture + def app_environment( + ... + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + ) + """ + assert postgres_db + + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 + + +@pytest.fixture +async def app( + app_environment: EnvVarsDict, is_pdb_enabled: bool +) -> AsyncIterator[FastAPI]: + the_test_app = create_app() + async with LifespanManager( + the_test_app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + yield the_test_app + + +@pytest.fixture +async def client(app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: + # - Needed for app to trigger start/stop event handlers + # - Prefer this client instead of fastapi.testclient.TestClient + async with httpx.AsyncClient( + transport=httpx.ASGITransport(app=app), + base_url=f"http://{app.title}.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def with_disabled_background_tasks(mocker: MockerFixture) -> None: + mocker.patch("simcore_service_efs_guardian.core.application.setup_background_tasks") + + +@pytest.fixture +def with_disabled_redis_and_background_tasks(mocker: MockerFixture) -> None: + mocker.patch("simcore_service_efs_guardian.core.application.setup_redis") + mocker.patch("simcore_service_efs_guardian.core.application.setup_background_tasks") + + +# +# Others +# + + +@pytest.fixture +async def efs_cleanup(app: FastAPI) -> AsyncIterator[None]: + yield + + aws_efs_settings: AwsEfsSettings = app.state.settings.EFS_GUARDIAN_AWS_EFS_SETTINGS + _dir_path = Path(aws_efs_settings.EFS_MOUNTED_PATH) + if _dir_path.exists(): + for root, dirs, files in os.walk(_dir_path): + for name in dirs + files: + file_path = Path(root, name) + # Get the current permissions of the file or directory + current_permissions = Path.stat(file_path).st_mode + # Add write permission for the owner (user) + Path.chmod(file_path, current_permissions | stat.S_IWUSR) + + shutil.rmtree(_dir_path) diff --git a/services/efs-guardian/tests/unit/test_api_health.py b/services/efs-guardian/tests/unit/test_api_health.py new file mode 100644 index 00000000000..a07b8e79e1f --- /dev/null +++ b/services/efs-guardian/tests/unit/test_api_health.py @@ -0,0 +1,37 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +import httpx +import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from starlette import status + +pytest_simcore_core_services_selection = [] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + with_disabled_redis_and_background_tasks: None, + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, +): + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + }, + ) + + +async def test_healthcheck( + client: httpx.AsyncClient, +): + response = await client.get("/") + response.raise_for_status() + assert response.status_code == status.HTTP_200_OK + assert "simcore_service_efs_guardian" in response.text diff --git a/services/efs-guardian/tests/unit/test_cli.py b/services/efs-guardian/tests/unit/test_cli.py new file mode 100644 index 00000000000..6819ed50a41 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_cli.py @@ -0,0 +1,21 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from simcore_service_efs_guardian.cli import main +from typer.testing import CliRunner + +runner = CliRunner() + + +def test_settings(app_environment): + result = runner.invoke(main, ["settings"]) + assert result.exit_code == 0 + assert "APP_NAME=simcore-service-efs-guardian" in result.stdout + + +def test_run(): + result = runner.invoke(main, ["run"]) + assert result.exit_code == 0 + assert "disabled" in result.stdout diff --git a/services/efs-guardian/tests/unit/test_core_settings.py b/services/efs-guardian/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..0d72653a8e4 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_core_settings.py @@ -0,0 +1,22 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from simcore_service_efs_guardian.core.settings import ApplicationSettings + + +def test_settings(app_environment: EnvVarsDict): + """ + We validate actual envfiles (e.g. repo.config files) by passing them via the CLI + + $ ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + $ pytest --external-envfile=.secrets --pdb tests/unit/test_core_settings.py + + """ + settings = ApplicationSettings() # type: ignore + assert settings + + assert settings == ApplicationSettings.create_from_envs() + assert settings.EFS_GUARDIAN_POSTGRES diff --git a/services/efs-guardian/tests/unit/test_efs_guardian_rpc.py b/services/efs-guardian/tests/unit/test_efs_guardian_rpc.py new file mode 100644 index 00000000000..e5d2dfab27a --- /dev/null +++ b/services/efs-guardian/tests/unit/test_efs_guardian_rpc.py @@ -0,0 +1,74 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pathlib import Path +from unittest.mock import patch + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.projects import ProjectID +from models_library.projects_nodes import NodeID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.efs_guardian import efs_manager +from simcore_service_efs_guardian.core.settings import AwsEfsSettings + +pytest_simcore_core_services_selection = ["rabbit"] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + with_disabled_redis_and_background_tasks: None, + with_disabled_postgres: None, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + }, + ) + + +async def test_rpc_create_project_specific_data_dir( + rpc_client: RabbitMQRPCClient, + faker: Faker, + app: FastAPI, + project_id: ProjectID, + node_id: NodeID, + efs_cleanup: None, +): + aws_efs_settings: AwsEfsSettings = app.state.settings.EFS_GUARDIAN_AWS_EFS_SETTINGS + + _storage_directory_name = faker.word() + + with patch( + "simcore_service_efs_guardian.services.efs_manager.os.chown" + ) as mocked_chown: + result = await efs_manager.create_project_specific_data_dir( + rpc_client, + project_id=project_id, + node_id=node_id, + storage_directory_name=_storage_directory_name, + ) + mocked_chown.assert_called_once() + + assert isinstance(result, Path) + _expected_path = ( + aws_efs_settings.EFS_MOUNTED_PATH + / aws_efs_settings.EFS_PROJECT_SPECIFIC_DATA_DIRECTORY + / f"{project_id}" + / f"{node_id}" + / _storage_directory_name + ) + assert _expected_path == result + assert _expected_path.exists diff --git a/services/efs-guardian/tests/unit/test_efs_manager.py b/services/efs-guardian/tests/unit/test_efs_manager.py new file mode 100644 index 00000000000..35c2535de94 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_efs_manager.py @@ -0,0 +1,155 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import stat +from pathlib import Path +from unittest.mock import patch + +import pytest +from faker import Faker +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_efs_guardian.core.settings import AwsEfsSettings +from simcore_service_efs_guardian.services.efs_manager import ( + EfsManager, + NodeID, + ProjectID, +) + +pytest_simcore_core_services_selection = ["rabbit"] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, + with_disabled_redis_and_background_tasks: None, + with_disabled_postgres: None, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + }, + ) + + +def assert_permissions( + file_path: Path, + expected_readable: bool, + expected_writable: bool, + expected_executable: bool, +): + file_stat = Path.stat(file_path) + file_permissions = file_stat.st_mode + is_readable = bool(file_permissions & stat.S_IRUSR) + is_writable = bool(file_permissions & stat.S_IWUSR) + is_executable = bool(file_permissions & stat.S_IXUSR) + + assert ( + is_readable == expected_readable + ), f"Expected readable={expected_readable}, but got readable={is_readable} for {file_path}" + assert ( + is_writable == expected_writable + ), f"Expected writable={expected_writable}, but got writable={is_writable} for {file_path}" + assert ( + is_executable == expected_executable + ), f"Expected executable={expected_executable}, but got executable={is_executable} for {file_path}" + + +async def test_remove_write_access_rights( + faker: Faker, + app: FastAPI, + efs_cleanup: None, + project_id: ProjectID, + node_id: NodeID, +): + aws_efs_settings: AwsEfsSettings = app.state.settings.EFS_GUARDIAN_AWS_EFS_SETTINGS + + _storage_directory_name = faker.word() + _dir_path = ( + aws_efs_settings.EFS_MOUNTED_PATH + / aws_efs_settings.EFS_PROJECT_SPECIFIC_DATA_DIRECTORY + / f"{project_id}" + / f"{node_id}" + / f"{_storage_directory_name}" + ) + + efs_manager: EfsManager = app.state.efs_manager + + assert ( + await efs_manager.check_project_node_data_directory_exits( + project_id=project_id, node_id=node_id + ) + is False + ) + + with pytest.raises(FileNotFoundError): + await efs_manager.list_project_node_state_names( + project_id=project_id, node_id=node_id + ) + + with patch( + "simcore_service_efs_guardian.services.efs_manager.os.chown" + ) as mocked_chown: + await efs_manager.create_project_specific_data_dir( + project_id=project_id, + node_id=node_id, + storage_directory_name=_storage_directory_name, + ) + assert mocked_chown.called + + assert ( + await efs_manager.check_project_node_data_directory_exits( + project_id=project_id, node_id=node_id + ) + is True + ) + + project_node_state_names = await efs_manager.list_project_node_state_names( + project_id=project_id, node_id=node_id + ) + assert project_node_state_names == [_storage_directory_name] + + size_before = await efs_manager.get_project_node_data_size( + project_id=project_id, node_id=node_id + ) + + file_paths = [] + for i in range(3): # Let's create 3 small files for testing + file_path = Path(_dir_path, f"test_file_{i}.txt") + file_path.write_text(f"This is file {i}") + file_paths.append(file_path) + + size_after = await efs_manager.get_project_node_data_size( + project_id=project_id, node_id=node_id + ) + assert size_after > size_before + + # Now we will check removal of write permissions + for file_path in file_paths: + assert_permissions( + file_path, + expected_readable=True, + expected_writable=True, + expected_executable=False, + ) + + await efs_manager.remove_project_node_data_write_permissions( + project_id=project_id, node_id=node_id + ) + + for file_path in file_paths: + assert_permissions( + file_path, + expected_readable=True, + expected_writable=False, + expected_executable=False, + ) diff --git a/services/efs-guardian/tests/unit/test_efs_removal_policy_task.py b/services/efs-guardian/tests/unit/test_efs_removal_policy_task.py new file mode 100644 index 00000000000..4000fab0c88 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_efs_removal_policy_task.py @@ -0,0 +1,193 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock, patch + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import insert_and_get_row_lifespan +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_postgres_database.models.projects import projects +from simcore_postgres_database.models.users import users +from simcore_postgres_database.utils_repos import transaction_context +from simcore_service_efs_guardian.core.settings import ( + ApplicationSettings, + AwsEfsSettings, +) +from simcore_service_efs_guardian.services.background_tasks import removal_policy_task +from simcore_service_efs_guardian.services.efs_manager import ( + EfsManager, + NodeID, + ProjectID, +) + +pytest_simcore_core_services_selection = ["postgres", "redis"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + wait_for_postgres_ready_and_db_migrated: None, + with_disabled_redis_and_background_tasks: None, +): + # set environs + monkeypatch.delenv("EFS_GUARDIAN_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "efs-guardian-service-pg-client", + "EFS_REMOVAL_POLICY_TASK_AGE_LIMIT_TIMEDELTA": "01:00:00", + }, + ) + + +@pytest.fixture +async def user_in_db( + app: FastAPI, + user: dict[str, Any], + user_id: UserID, +) -> AsyncIterator[dict[str, Any]]: + """ + injects a user in db + """ + assert user_id == user["id"] + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + app.state.engine, + table=users, + values=user, + pk_col=users.c.id, + pk_value=user["id"], + ) as row: + yield row + + +@pytest.fixture +async def project_in_db( + app: FastAPI, + user_in_db: dict, + project_data: dict[str, Any], + project_id: ProjectID, +) -> AsyncIterator[dict[str, Any]]: + """ + injects a project in db + """ + assert f"{project_id}" == project_data["uuid"] + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + app.state.engine, + table=projects, + values=project_data, + pk_col=projects.c.uuid, + pk_value=project_data["uuid"], + ) as row: + yield row + + +# Create a mock object manually +mock_with_locked_project = MagicMock() + + +# The stand-in decorator to replace the original one and record the function call +def mock_decorator(*args, **kwargs): + def _decorator(func): + def wrapper(*args, **kwargs): + mock_with_locked_project(*args, **kwargs) # Log the call + return func(*args, **kwargs) + + return wrapper + + return _decorator + + +@patch("simcore_service_efs_guardian.services.background_tasks.get_redis_lock_client") +@patch( + "simcore_service_efs_guardian.services.background_tasks.with_project_locked", + new=mock_decorator, +) +async def test_efs_removal_policy_task( + mock_get_redis_lock_client: MagicMock, + faker: Faker, + app: FastAPI, + efs_cleanup: None, + project_id: ProjectID, + node_id: NodeID, + project_in_db: dict, +): + # 1. Nothing should happen + await removal_policy_task(app) + assert not mock_with_locked_project.called + + # 2. Lets create some project with data + aws_efs_settings: AwsEfsSettings = app.state.settings.EFS_GUARDIAN_AWS_EFS_SETTINGS + _storage_directory_name = faker.word() + _dir_path = ( + aws_efs_settings.EFS_MOUNTED_PATH + / aws_efs_settings.EFS_PROJECT_SPECIFIC_DATA_DIRECTORY + / f"{project_id}" + / f"{node_id}" + / f"{_storage_directory_name}" + ) + efs_manager: EfsManager = app.state.efs_manager + + with patch( + "simcore_service_efs_guardian.services.efs_manager.os.chown" + ) as mocked_chown: + await efs_manager.create_project_specific_data_dir( + project_id=project_id, + node_id=node_id, + storage_directory_name=_storage_directory_name, + ) + assert mocked_chown.called + + file_paths = [] + for i in range(3): # Let's create 3 small files for testing + file_path = Path(_dir_path, f"test_file_{i}.txt") + file_path.write_text(f"This is file {i}") + file_paths.append(file_path) + + # 3. Nothing should happen + await removal_policy_task(app) + assert not mock_with_locked_project.called + + # 4. We will artifically change the project last change date + app_settings: ApplicationSettings = app.state.settings + _current_timestamp = datetime.now() + _old_timestamp = ( + _current_timestamp + - app_settings.EFS_REMOVAL_POLICY_TASK_AGE_LIMIT_TIMEDELTA + - timedelta(days=1) + ) + async with transaction_context(app.state.engine) as conn: + result = await conn.execute( + projects.update() + .values(last_change_date=_old_timestamp) + .where(projects.c.uuid == f"{project_id}") + ) + result_row_count: int = result.rowcount + assert result_row_count == 1 # nosec + + # 5. Now removal policy should remove those data + await removal_policy_task(app) + assert mock_with_locked_project.assert_called_once + assert mock_get_redis_lock_client.assert_called_once + projects_list = await efs_manager.list_projects_across_whole_efs() + assert projects_list == [] diff --git a/services/efs-guardian/tests/unit/test_main.py b/services/efs-guardian/tests/unit/test_main.py new file mode 100644 index 00000000000..bbdb41096c8 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_main.py @@ -0,0 +1,12 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict + + +def test_main_app(app_environment: EnvVarsDict): + from simcore_service_efs_guardian.main import the_app, the_settings + + assert the_app.state.settings == the_settings diff --git a/services/efs-guardian/tests/unit/test_process_messages.py b/services/efs-guardian/tests/unit/test_process_messages.py new file mode 100644 index 00000000000..32b439777f0 --- /dev/null +++ b/services/efs-guardian/tests/unit/test_process_messages.py @@ -0,0 +1,111 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from unittest.mock import AsyncMock, patch + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.products import ProductName +from models_library.rabbitmq_messages import DynamicServiceRunningMessage +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_efs_guardian.services.efs_manager import NodeID, ProjectID +from simcore_service_efs_guardian.services.process_messages import ( + process_dynamic_service_running_message, +) + +pytest_simcore_core_services_selection = ["rabbit"] +pytest_simcore_ops_services_selection = [] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, + with_disabled_redis_and_background_tasks: None, + with_disabled_postgres: None, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + "EFS_DEFAULT_USER_SERVICE_SIZE_BYTES": "10000", + }, + ) + + +@patch("simcore_service_efs_guardian.services.process_messages.update_disk_usage") +async def test_process_msg( + mock_update_disk_usage, + faker: Faker, + app: FastAPI, + efs_cleanup: None, + project_id: ProjectID, + node_id: NodeID, + user_id: UserID, + product_name: ProductName, +): + # Create mock data for the message + model_instance = DynamicServiceRunningMessage( + project_id=project_id, + node_id=node_id, + user_id=user_id, + product_name=product_name, + ) + json_str = model_instance.json() + model_bytes = json_str.encode("utf-8") + + _expected_project_node_states = [".data_assets", "home_user_workspace"] + # Mock efs_manager and its methods + mock_efs_manager = AsyncMock() + app.state.efs_manager = mock_efs_manager + mock_efs_manager.check_project_node_data_directory_exits.return_value = True + mock_efs_manager.get_project_node_data_size.return_value = 4000 + mock_efs_manager.list_project_node_state_names.return_value = ( + _expected_project_node_states + ) + + result = await process_dynamic_service_running_message(app, data=model_bytes) + + # Check the actual arguments passed to notify_service_efs_disk_usage + _, kwargs = mock_update_disk_usage.call_args + assert kwargs["usage"] + assert len(kwargs["usage"]) == 2 + for key, value in kwargs["usage"].items(): + assert key in _expected_project_node_states + assert value.used == 4000 + assert value.free == 6000 + assert value.total == 10000 + assert value.used_percent == 40.0 + + assert result is True + + +async def test_process_msg__dir_not_exists( + app: FastAPI, + efs_cleanup: None, + project_id: ProjectID, + node_id: NodeID, + user_id: UserID, + product_name: ProductName, +): + # Create mock data for the message + model_instance = DynamicServiceRunningMessage( + project_id=project_id, + node_id=node_id, + user_id=user_id, + product_name=product_name, + ) + json_str = model_instance.json() + model_bytes = json_str.encode("utf-8") + + result = await process_dynamic_service_running_message(app, data=model_bytes) + assert result is True diff --git a/services/invitations/Dockerfile b/services/invitations/Dockerfile index 4351c4c6ec1..49a03bd61f5 100644 --- a/services/invitations/Dockerfile +++ b/services/invitations/Dockerfile @@ -1,5 +1,18 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base # # USAGE: @@ -11,12 +24,18 @@ FROM python:${PYTHON_VERSION}-slim-buster as base LABEL maintainer=pcrespov -RUN set -eux; \ - apt-get update; \ - apt-get install -y --no-install-recommends gosu; \ - rm -rf /var/lib/apt/lists/*; \ +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ # verify that the binary works - gosu nobody true + && gosu nobody true # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ @@ -51,32 +70,35 @@ EXPOSE 8000 # # + /build WORKDIR # -FROM base as build +FROM base AS build ENV SC_BUILD_TARGET=build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ - build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + build-essential +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed # packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" + -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools WORKDIR /build # install base 3rd party dependencies -# NOTE: copies to /build to avoid overwriting later which would invalidate this layer -COPY --chown=scu:scu services/invitations/requirements/_base.txt . -RUN pip --no-cache-dir install -r _base.txt + + # --------------------------Prod-depends-only stage ------------------- @@ -85,17 +107,19 @@ RUN pip --no-cache-dir install -r _base.txt # + /build # + services/invitations [scu:scu] WORKDIR # -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET prod-only-deps +FROM build AS prod-only-deps -COPY --chown=scu:scu packages /build/packages -COPY --chown=scu:scu services/invitations /build/services/invitations +ENV SC_BUILD_TARGET=prod-only-deps WORKDIR /build/services/invitations -RUN pip3 --no-cache-dir install -r requirements/prod.txt \ - && pip3 --no-cache-dir list -v +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/invitations,target=/build/services/invitations,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list # --------------------------Production stage ------------------- @@ -105,15 +129,19 @@ RUN pip3 --no-cache-dir install -r requirements/prod.txt \ # + /home/scu $HOME = WORKDIR # + services/invitations [scu:scu] # -FROM base as production +FROM base AS production ENV SC_BUILD_TARGET=production \ SC_BOOT_MODE=production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 WORKDIR /home/scu +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # Starting from clean base image, copies pre-installed virtualenv from prod-only-deps COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} @@ -122,10 +150,13 @@ COPY --chown=scu:scu services/invitations/docker services/invitations/docker RUN chmod +x services/invitations/docker/*.sh -HEALTHCHECK --interval=30s \ - --timeout=20s \ - --start-period=30s \ - --retries=3 \ +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD ["python3", "services/invitations/docker/healthcheck.py", "http://localhost:8000/"] ENTRYPOINT [ "/bin/sh", "services/invitations/docker/entrypoint.sh" ] @@ -140,7 +171,7 @@ CMD ["/bin/sh", "services/invitations/docker/boot.sh"] # + /devel WORKDIR # + services (mounted volume) # -FROM build as development +FROM build AS development ENV SC_BUILD_TARGET=development \ SC_DEVEL_MOUNT=/devel/services/invitations diff --git a/services/invitations/Makefile b/services/invitations/Makefile index f727d80af69..f7a9b88fe72 100644 --- a/services/invitations/Makefile +++ b/services/invitations/Makefile @@ -6,18 +6,18 @@ include ../../scripts/common-service.Makefile -.env: - $(APP_CLI_NAME) generate-dotenv --auto-password > $@ +.env-ignore: + $(APP_CLI_NAME) echo-dotenv --auto-password > $@ .PHONY: openapi.json openapi-specs: openapi.json -openapi.json: .env ## produces openapi.json +openapi.json: .env-ignore ## produces openapi.json # generating openapi specs file (need to have the environment set for this) @set -o allexport; \ - source .env; \ + source $<; \ set +o allexport; \ - python3 -c "import json; from $(APP_PACKAGE_NAME).web_main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ + python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ # diff --git a/services/invitations/README.md b/services/invitations/README.md index b89874e487b..afbe80107a0 100644 --- a/services/invitations/README.md +++ b/services/invitations/README.md @@ -25,7 +25,7 @@ simcore-service-invitations --help Create ``.env`` file ``` -simcore-service-invitations generate-dotenv --auto-password > .env +simcore-service-invitations echo-dotenv --auto-password > .env set -o allexport; source .env; set +o allexport ``` and modify the ``.env`` if needed @@ -68,7 +68,7 @@ docker run -it itisfoundation/invitations:release-github-latest simcore-service- Create ``.env`` file ``` -docker run -it itisfoundation/invitations:release-github-latest simcore-service-invitations generate-dotenv --auto-password > .env +docker run -it itisfoundation/invitations:release-github-latest simcore-service-invitations echo-dotenv --auto-password > .env ``` and modify the ``.env`` if needed diff --git a/services/invitations/VERSION b/services/invitations/VERSION index 7dea76edb3d..26aaba0e866 100644 --- a/services/invitations/VERSION +++ b/services/invitations/VERSION @@ -1 +1 @@ -1.0.1 +1.2.0 diff --git a/services/invitations/docker/boot.sh b/services/invitations/docker/boot.sh index 5041e2ea4cf..0616dc4c2b7 100755 --- a/services/invitations/docker/boot.sh +++ b/services/invitations/docker/boot.sh @@ -23,27 +23,36 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then python --version | sed 's/^/ /' command -v python | sed 's/^/ /' - cd services/invitations || exit 1 - pip --quiet --no-cache-dir install -r requirements/dev.txt - cd - || exit 1 + cd services/invitations + uv pip --quiet sync requirements/dev.txt + cd - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi fi # # RUNNING application # -APP_LOG_LEVEL=${API_SERVER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +APP_LOG_LEVEL=${INVITATIONS_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then +if [ "${SC_BOOT_MODE}" = "debug" ]; then reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) exec sh -c " cd services/invitations/src/simcore_service_invitations && \ - uvicorn web_main:the_app \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${INVITATIONS_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ --host 0.0.0.0 \ --reload \ $reload_dir_packages @@ -51,7 +60,7 @@ if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then --log-level \"${SERVER_LOG_LEVEL}\" " else - exec uvicorn simcore_service_invitations.web_main:the_app \ + exec uvicorn simcore_service_invitations.main:the_app \ --host 0.0.0.0 \ --log-level "${SERVER_LOG_LEVEL}" fi diff --git a/services/invitations/docker/entrypoint.sh b/services/invitations/docker/entrypoint.sh index 7629ec24596..25153a6b2a2 100755 --- a/services/invitations/docker/entrypoint.sh +++ b/services/invitations/docker/entrypoint.sh @@ -63,11 +63,6 @@ if [ "${SC_BUILD_TARGET}" = "development" ]; then fi fi -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - echo "$INFO Starting $* ..." echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" echo " local dir : $(ls -al)" diff --git a/services/invitations/docker/healthcheck.py b/services/invitations/docker/healthcheck.py index 10e58d00e21..f5dc8762ae3 100755 --- a/services/invitations/docker/healthcheck.py +++ b/services/invitations/docker/healthcheck.py @@ -6,9 +6,10 @@ COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py HEALTHCHECK --interval=30s \ --timeout=30s \ - --start-period=1s \ + --start-period=20s \ + --start-interval=1s \ --retries=3 \ - CMD python3 docker/healthcheck.py http://localhost:8000/ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ ``` Q&A: @@ -21,7 +22,7 @@ from contextlib import suppress from urllib.request import urlopen -# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc) +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") # Adds a base-path if defined in environ diff --git a/services/invitations/openapi.json b/services/invitations/openapi.json index dc8fc9d317e..508191b0419 100644 --- a/services/invitations/openapi.json +++ b/services/invitations/openapi.json @@ -1,9 +1,9 @@ { - "openapi": "3.0.2", + "openapi": "3.1.0", "info": { "title": "simcore-service-invitations web API", - "description": " Service that manages creation and validation of registration invitations", - "version": "1.0.1" + "description": "Service that manages creation and validation of registration invitations", + "version": "1.2.0" }, "paths": { "/": { @@ -57,7 +57,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_ApiInvitationInputs" + "$ref": "#/components/schemas/ApiInvitationInputs" } } }, @@ -69,7 +69,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_InvitationContentAndLink" + "$ref": "#/components/schemas/ApiInvitationContentAndLink" } } } @@ -104,7 +104,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_EncryptedInvitation" + "$ref": "#/components/schemas/ApiEncryptedInvitation" } } }, @@ -116,7 +116,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_ApiInvitationContent" + "$ref": "#/components/schemas/ApiInvitationContent" } } } @@ -142,228 +142,314 @@ }, "components": { "schemas": { - "HTTPValidationError": { - "title": "HTTPValidationError", - "type": "object", + "ApiEncryptedInvitation": { "properties": { - "detail": { - "title": "Detail", - "type": "array", - "items": { - "$ref": "#/components/schemas/ValidationError" - } + "invitation_url": { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri", + "title": "Invitation Url", + "description": "Invitation link" } - } - }, - "ValidationError": { - "title": "ValidationError", - "required": [ - "loc", - "msg", - "type" - ], + }, "type": "object", - "properties": { - "loc": { - "title": "Location", - "type": "array", - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "integer" - } - ] - } - }, - "msg": { - "title": "Message", - "type": "string" - }, - "type": { - "title": "Error Type", - "type": "string" - } - } - }, - "_ApiInvitationContent": { - "title": "_ApiInvitationContent", "required": [ - "issuer", - "guest", - "created" + "invitation_url" ], - "type": "object", + "title": "ApiEncryptedInvitation" + }, + "ApiInvitationContent": { "properties": { "issuer": { - "title": "Issuer", - "maxLength": 30, - "minLength": 1, "type": "string", - "description": "Identifies who issued the invitation. E.g. an email, a service name etc" + "maxLength": 40, + "minLength": 1, + "title": "Issuer", + "description": "Identifies who issued the invitation. E.g. an email, a service name etc. NOTE: it will be trimmed if exceeds maximum" }, "guest": { - "title": "Guest", "type": "string", - "description": "Invitee's email. Note that the registration can ONLY be used with this email", - "format": "email" + "format": "email", + "title": "Guest", + "description": "Invitee's email. Note that the registration can ONLY be used with this email" }, "trial_account_days": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Trial Account Days", - "exclusiveMinimum": true, - "type": "integer", - "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires", - "minimum": 0 + "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires" + }, + "extra_credits_in_usd": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Extra Credits In Usd", + "description": "If set, the account's primary wallet will add extra credits corresponding to this ammount in USD" + }, + "product": { + "type": "string", + "title": "Product", + "description": "This invitations can only be used for this product." }, "created": { - "title": "Created", "type": "string", - "description": "Timestamp for creation", - "format": "date-time" + "format": "date-time", + "title": "Created", + "description": "Timestamp for creation" } }, - "description": "Data in an invitation", + "type": "object", + "required": [ + "issuer", + "guest", + "product", + "created" + ], + "title": "ApiInvitationContent", "example": { - "issuer": "issuerid", + "created": "2023-01-11 13:11:47.293595", "guest": "invitedguest@company.com", - "trial_account_days": 2, - "created": "2023-01-11 13:11:47.293595" + "issuer": "issuerid", + "product": "osparc", + "trial_account_days": 2 } }, - "_ApiInvitationInputs": { - "title": "_ApiInvitationInputs", - "required": [ - "issuer", - "guest" - ], - "type": "object", + "ApiInvitationContentAndLink": { "properties": { "issuer": { - "title": "Issuer", - "maxLength": 30, - "minLength": 1, "type": "string", - "description": "Identifies who issued the invitation. E.g. an email, a service name etc" + "maxLength": 40, + "minLength": 1, + "title": "Issuer", + "description": "Identifies who issued the invitation. E.g. an email, a service name etc. NOTE: it will be trimmed if exceeds maximum" }, "guest": { - "title": "Guest", "type": "string", - "description": "Invitee's email. Note that the registration can ONLY be used with this email", - "format": "email" + "format": "email", + "title": "Guest", + "description": "Invitee's email. Note that the registration can ONLY be used with this email" }, "trial_account_days": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Trial Account Days", - "exclusiveMinimum": true, - "type": "integer", - "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires", - "minimum": 0 - } - }, - "description": "Input data necessary to create an invitation", - "example": { - "issuer": "issuerid", - "guest": "invitedguest@company.com", - "trial_account_days": 2 - } - }, - "_EncryptedInvitation": { - "title": "_EncryptedInvitation", - "required": [ - "invitation_url" - ], - "type": "object", - "properties": { + "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires" + }, + "extra_credits_in_usd": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Extra Credits In Usd", + "description": "If set, the account's primary wallet will add extra credits corresponding to this ammount in USD" + }, + "product": { + "type": "string", + "title": "Product", + "description": "This invitations can only be used for this product." + }, + "created": { + "type": "string", + "format": "date-time", + "title": "Created", + "description": "Timestamp for creation" + }, "invitation_url": { - "title": "Invitation Url", + "type": "string", "maxLength": 2083, "minLength": 1, - "type": "string", - "description": "Invitation link", - "format": "uri" + "format": "uri", + "title": "Invitation Url", + "description": "Invitation link" } - } - }, - "_InvitationContentAndLink": { - "title": "_InvitationContentAndLink", + }, + "type": "object", "required": [ "issuer", "guest", + "product", "created", "invitation_url" ], - "type": "object", + "title": "ApiInvitationContentAndLink", + "example": { + "created": "2023-01-11 13:11:47.293595", + "guest": "invitedguest@company.com", + "invitation_url": "https://foo.com/#/registration?invitation=1234", + "issuer": "issuerid", + "product": "osparc", + "trial_account_days": 2 + } + }, + "ApiInvitationInputs": { "properties": { "issuer": { - "title": "Issuer", - "maxLength": 30, - "minLength": 1, "type": "string", - "description": "Identifies who issued the invitation. E.g. an email, a service name etc" + "maxLength": 40, + "minLength": 1, + "title": "Issuer", + "description": "Identifies who issued the invitation. E.g. an email, a service name etc. NOTE: it will be trimmed if exceeds maximum" }, "guest": { - "title": "Guest", "type": "string", - "description": "Invitee's email. Note that the registration can ONLY be used with this email", - "format": "email" + "format": "email", + "title": "Guest", + "description": "Invitee's email. Note that the registration can ONLY be used with this email" }, "trial_account_days": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], "title": "Trial Account Days", - "exclusiveMinimum": true, - "type": "integer", - "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires", - "minimum": 0 + "description": "If set, this invitation will activate a trial account.Sets the number of days from creation until the account expires" }, - "created": { - "title": "Created", - "type": "string", - "description": "Timestamp for creation", - "format": "date-time" + "extra_credits_in_usd": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": true, + "minimum": 0 + }, + { + "type": "null" + } + ], + "title": "Extra Credits In Usd", + "description": "If set, the account's primary wallet will add extra credits corresponding to this ammount in USD" }, - "invitation_url": { - "title": "Invitation Url", - "maxLength": 2083, - "minLength": 1, - "type": "string", - "description": "Invitation link", - "format": "uri" + "product": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Product", + "description": "If None, it will use INVITATIONS_DEFAULT_PRODUCT" } }, - "description": "Data in an invitation", + "type": "object", + "required": [ + "issuer", + "guest" + ], + "title": "ApiInvitationInputs", "example": { - "issuer": "issuerid", "guest": "invitedguest@company.com", - "trial_account_days": 2, - "created": "2023-01-11 12:11:47.293595", - "invitation_url": "https://foo.com/#/registration?invitation=1234" + "issuer": "issuerid", + "trial_account_days": 2 } }, - "_Meta": { - "title": "_Meta", + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", "required": [ - "name", - "version", - "docs_url" + "loc", + "msg", + "type" ], - "type": "object", + "title": "ValidationError" + }, + "_Meta": { "properties": { "name": { - "title": "Name", - "type": "string" + "type": "string", + "title": "Name" }, "version": { - "title": "Version", - "type": "string" + "type": "string", + "title": "Version" }, "docs_url": { - "title": "Docs Url", + "type": "string", "maxLength": 2083, "minLength": 1, - "type": "string", - "format": "uri" + "format": "uri", + "title": "Docs Url" } - } + }, + "type": "object", + "required": [ + "name", + "version", + "docs_url" + ], + "title": "_Meta" } }, "securitySchemes": { diff --git a/services/invitations/requirements/_base.in b/services/invitations/requirements/_base.in index 7c172897a84..f67a4ccf1d8 100644 --- a/services/invitations/requirements/_base.in +++ b/services/invitations/requirements/_base.in @@ -6,6 +6,7 @@ --constraint ./constraints.txt # intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in --requirement ../../../packages/models-library/requirements/_base.in --requirement ../../../packages/settings-library/requirements/_base.in # service-library[fastapi] @@ -13,8 +14,6 @@ --requirement ../../../packages/service-library/requirements/_fastapi.in -cryptography~=39.0.0 # security -fastapi +cryptography packaging typer[all] -uvicorn[standard] diff --git a/services/invitations/requirements/_base.txt b/services/invitations/requirements/_base.txt index a0d01677d4e..1a3c71b6e49 100644 --- a/services/invitations/requirements/_base.txt +++ b/services/invitations/requirements/_base.txt @@ -1,180 +1,537 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aio-pika==8.3.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aiofiles==22.1.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -aiormq==6.6.4 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 # via aio-pika -anyio==3.6.2 +aiosignal==1.3.2 + # via aiohttp +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 # via - # httpcore + # fast-depends + # faststream + # httpx # starlette # watchfiles -arrow==1.2.3 - # via -r requirements/../../../packages/service-library/requirements/_base.in -async-timeout==4.0.2 - # via redis -attrs==21.4.0 +arrow==1.3.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +attrs==25.2.0 + # via + # aiohttp # jsonschema -certifi==2022.12.7 + # referencing +certifi==2025.1.31 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # httpcore # httpx -cffi==1.15.1 + # requests +cffi==1.17.1 # via cryptography -click==8.1.3 +charset-normalizer==3.4.1 + # via requests +click==8.1.8 # via + # rich-toolkit # typer # uvicorn -colorama==0.4.6 - # via typer -commonmark==0.9.1 - # via rich -cryptography==39.0.1 - # via -r requirements/_base.in -dnspython==2.2.1 +cryptography==44.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 # via email-validator -email-validator==1.3.0 - # via pydantic -fastapi==0.89.1 +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 # via # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in - # fastapi-contrib -fastapi-contrib==0.2.11 + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.35 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.69.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc h11==0.14.0 # via # httpcore # uvicorn -httpcore==0.16.3 +h2==4.2.0 # via httpx -httptools==0.5.0 +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 # via uvicorn -httpx==0.23.3 - # via -r requirements/../../../packages/service-library/requirements/_fastapi.in -idna==3.4 +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 # via # anyio # email-validator - # rfc3986 + # httpx + # requests + # yarl +importlib-metadata==8.6.1 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp # yarl -jaeger-client==4.8.0 - # via fastapi-contrib -jsonschema==3.2.0 +opentelemetry-api==1.31.0 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.31.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.31.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.31.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.31.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.52b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.52b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-fastapi==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.52b0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.31.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.31.0 # via - # -c requirements/../../../packages/service-library/requirements/././constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.52b0 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.52b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.15 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in -multidict==6.0.4 - # via yarl -opentracing==2.4.0 - # via - # fastapi-contrib - # jaeger-client -packaging==23.0 - # via -r requirements/_base.in -pamqp==3.2.1 + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 # via aiormq -pycparser==2.21 +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.3 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +pycparser==2.22 # via cffi -pydantic==1.10.2 +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.6 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends # fastapi -pygments==2.14.0 - # via rich -pyinstrument==4.4.0 + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.2 + # via pydantic +pydantic-extra-types==2.10.3 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -pyrsistent==0.19.3 - # via jsonschema -python-dateutil==2.8.2 + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 # via arrow -python-dotenv==0.21.0 - # via uvicorn -pyyaml==5.4.1 +python-dotenv==1.0.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in # uvicorn -redis==4.4.1 +redis==5.2.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # -r requirements/../../../packages/service-library/requirements/_base.in -rfc3986==1.5.0 - # via httpx -rich==12.6.0 - # via typer -shellingham==1.5.0.post1 - # via typer -six==1.16.0 +referencing==0.35.1 # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt # jsonschema - # python-dateutil - # thrift -sniffio==1.3.0 + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 # via - # anyio - # httpcore - # httpx -starlette==0.22.0 - # via fastapi -tenacity==8.1.0 - # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -threadloop==1.0.2 - # via jaeger-client -thrift==0.16.0 - # via jaeger-client -tornado==6.2 + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.23.1 # via - # jaeger-client - # threadloop -tqdm==4.64.1 + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +starlette==0.46.1 # via - # -c requirements/../../../packages/service-library/requirements/./_base.in - # -r requirements/../../../packages/service-library/requirements/_base.in -typer==0.7.0 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/../../../packages/settings-library/requirements/_base.in # -r requirements/_base.in -typing-extensions==4.4.0 + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.12.2 # via # aiodebug + # anyio + # fastapi + # faststream + # opentelemetry-sdk # pydantic - # starlette -uvicorn==0.20.0 + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +urllib3==2.3.0 # via - # -r requirements/../../../packages/service-library/requirements/_fastapi.in - # -r requirements/_base.in -uvloop==0.17.0 + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 # via uvicorn -watchfiles==0.18.1 +watchfiles==1.0.4 # via uvicorn -websockets==10.4 +websockets==15.0.1 # via uvicorn -yarl==1.8.2 +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.18.3 # via + # -r requirements/../../../packages/service-library/requirements/_base.in # aio-pika + # aiohttp # aiormq - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +zipp==3.21.0 + # via importlib-metadata diff --git a/services/invitations/requirements/_test.in b/services/invitations/requirements/_test.in index 06247025522..040fb5659da 100644 --- a/services/invitations/requirements/_test.in +++ b/services/invitations/requirements/_test.in @@ -10,11 +10,11 @@ # --constraint _base.txt -codecov + coverage -coveralls faker httpx +hypothesis pytest pytest-asyncio pytest-cov diff --git a/services/invitations/requirements/_test.txt b/services/invitations/requirements/_test.txt index be7a2c1948b..0b1bfff0ba6 100644 --- a/services/invitations/requirements/_test.txt +++ b/services/invitations/requirements/_test.txt @@ -1,111 +1,81 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -anyio==3.6.2 +anyio==4.8.0 # via # -c requirements/_base.txt - # httpcore -attrs==21.4.0 + # httpx +attrs==25.2.0 # via # -c requirements/_base.txt - # pytest -certifi==2022.12.7 + # hypothesis +certifi==2025.1.31 # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # httpcore # httpx - # requests -charset-normalizer==3.0.1 - # via requests -codecov==2.1.12 - # via -r requirements/_test.in -coverage==6.5.0 +coverage==7.6.12 # via # -r requirements/_test.in - # codecov - # coveralls # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.4.0 +faker==37.0.0 # via -r requirements/_test.in h11==0.14.0 # via # -c requirements/_base.txt # httpcore -httpcore==0.16.3 +httpcore==1.0.7 # via # -c requirements/_base.txt # httpx -httpx==0.23.3 +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +hypothesis==6.129.0 # via -r requirements/_test.in -idna==3.4 +idna==3.10 # via # -c requirements/_base.txt # anyio - # requests - # rfc3986 + # httpx iniconfig==2.0.0 # via pytest -packaging==23.0 +packaging==24.2 # via # -c requirements/_base.txt # pytest # pytest-sugar -pluggy==1.0.0 +pluggy==1.5.0 # via pytest -pytest==7.2.1 +pytest==8.3.5 # via # -r requirements/_test.in # pytest-asyncio # pytest-cov # pytest-sugar -pytest-asyncio==0.20.3 +pytest-asyncio==0.26.0 # via -r requirements/_test.in -pytest-cov==4.0.0 +pytest-cov==6.0.0 # via -r requirements/_test.in -pytest-runner==6.0.0 +pytest-runner==6.0.1 # via -r requirements/_test.in -pytest-sugar==0.9.6 +pytest-sugar==1.0.0 # via -r requirements/_test.in -python-dateutil==2.8.2 +python-dotenv==1.0.1 # via # -c requirements/_base.txt - # faker -python-dotenv==0.21.0 - # via -r requirements/_test.in -requests==2.28.2 - # via - # codecov - # coveralls -rfc3986==1.5.0 - # via - # -c requirements/_base.txt - # httpx -six==1.16.0 - # via - # -c requirements/_base.txt - # python-dateutil -sniffio==1.3.0 + # -r requirements/_test.in +sniffio==1.3.1 # via # -c requirements/_base.txt # anyio - # httpcore - # httpx -termcolor==2.2.0 +sortedcontainers==2.4.0 + # via hypothesis +termcolor==2.5.0 # via pytest-sugar -tomli==2.0.1 - # via - # coverage - # pytest -urllib3==1.26.14 +typing-extensions==4.12.2 # via - # -c requirements/../../../requirements/constraints.txt - # requests + # -c requirements/_base.txt + # anyio +tzdata==2025.1 + # via faker diff --git a/services/invitations/requirements/_tools.txt b/services/invitations/requirements/_tools.txt index d9c99ce78d0..a2cf1e41924 100644 --- a/services/invitations/requirements/_tools.txt +++ b/services/invitations/requirements/_tools.txt @@ -1,93 +1,85 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.14.2 +astroid==3.3.9 # via pylint -black==23.1.0 +black==25.1.0 # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 +build==1.2.2.post1 # via pip-tools bump2version==1.0.1 # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -click==8.1.3 +click==8.1.8 # via # -c requirements/_base.txt # black # pip-tools -dill==0.3.6 +dill==0.3.9 # via pylint -distlib==0.3.6 +distlib==0.3.9 # via virtualenv -filelock==3.9.0 +filelock==3.17.0 # via virtualenv -identify==2.5.18 +identify==2.6.9 # via pre-commit -isort==5.12.0 +isort==6.0.1 # via # -r requirements/../../../requirements/devenv.txt # pylint -lazy-object-proxy==1.9.0 - # via astroid mccabe==0.7.0 # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 + # via + # black + # mypy +nodeenv==1.9.1 # via pre-commit -packaging==23.0 +packaging==24.2 # via + # -c requirements/_base.txt # -c requirements/_test.txt # black # build -pathspec==0.11.0 +pathspec==0.12.1 # via black -pip-tools==6.12.2 +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.0.0 +platformdirs==4.3.6 # via # black # pylint # virtualenv -pre-commit==3.1.1 +pre-commit==4.1.0 # via -r requirements/../../../requirements/devenv.txt -pylint==2.16.2 +pylint==3.3.5 # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 +pyproject-hooks==1.2.0 # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt # -c requirements/_base.txt # pre-commit # watchdog -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 +ruff==0.9.10 + # via -r requirements/../../../requirements/devenv.txt +setuptools==76.0.0 + # via pip-tools +tomlkit==0.13.2 # via pylint -typing-extensions==4.4.0 +typing-extensions==4.12.2 # via # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 + # -c requirements/_test.txt + # mypy +virtualenv==20.29.3 # via pre-commit -watchdog==2.3.1 +watchdog==6.0.0 # via -r requirements/_tools.in -wheel==0.38.4 +wheel==0.45.1 # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/invitations/requirements/ci.txt b/services/invitations/requirements/ci.txt index 38a4d0441ba..bae11460376 100644 --- a/services/invitations/requirements/ci.txt +++ b/services/invitations/requirements/ci.txt @@ -9,12 +9,14 @@ # installs base + tests requirements --requirement _base.txt --requirement _test.txt +--requirement _tools.txt # installs this repo's packages -../../packages/pytest-simcore -../../packages/service-library[fastapi] -../../packages/settings-library -../../packages/models-library +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library # installs current package -. +simcore-service-invitations @ . diff --git a/services/invitations/requirements/dev.txt b/services/invitations/requirements/dev.txt index a45c4db8918..1de98a1f08a 100644 --- a/services/invitations/requirements/dev.txt +++ b/services/invitations/requirements/dev.txt @@ -12,10 +12,12 @@ --requirement _tools.txt # installs this repo's packages +--editable ../../packages/common-library +--editable ../../packages/models-library --editable ../../packages/pytest-simcore --editable ../../packages/service-library[fastapi] --editable ../../packages/settings-library ---editable ../../packages/models-library + # installs current package --editable . diff --git a/services/invitations/requirements/prod.txt b/services/invitations/requirements/prod.txt index 5ff788ade8d..9956e844cd3 100644 --- a/services/invitations/requirements/prod.txt +++ b/services/invitations/requirements/prod.txt @@ -10,8 +10,9 @@ --requirement _base.txt # installs this repo's packages -../../packages/service-library[fastapi] -../../packages/settings-library -../../packages/models-library +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ # installs current package -. +simcore-service-invitations @ . diff --git a/services/invitations/setup.cfg b/services/invitations/setup.cfg index 2a74916e024..1d4afc35977 100644 --- a/services/invitations/setup.cfg +++ b/services/invitations/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.0.1 +current_version = 1.2.0 commit = True message = services/invitations version: {current_version} β†’ {new_version} tag = False @@ -9,5 +9,10 @@ commit_args = --no-verify [tool:pytest] asyncio_mode = auto +asyncio_default_fixture_loop_scope = function markers = testit: "marks test to run during development" + +[mypy] +plugins = + pydantic.mypy diff --git a/services/invitations/setup.py b/services/invitations/setup.py index a30deabc332..5dbebc43999 100755 --- a/services/invitations/setup.py +++ b/services/invitations/setup.py @@ -37,29 +37,30 @@ def read_reqs(reqs_path: Path) -> set[str]: TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) -SETUP = dict( - name=NAME, - version=VERSION, - author=AUTHORS, - description=DESCRIPTION, - long_description=README, - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { "": "src", }, - include_package_data=True, - install_requires=PROD_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { "console_scripts": [ - "simcore-service-invitations = simcore_service_invitations.cli:app", + "simcore-service-invitations = simcore_service_invitations.cli:main", + "simcore-service = simcore_service_invitations.cli:main", ], }, -) +} if __name__ == "__main__": setup(**SETUP) diff --git a/services/invitations/src/simcore_service_invitations/__init__.py b/services/invitations/src/simcore_service_invitations/__init__.py index 94fc632e7af..e69de29bb2d 100644 --- a/services/invitations/src/simcore_service_invitations/__init__.py +++ b/services/invitations/src/simcore_service_invitations/__init__.py @@ -1 +0,0 @@ -from ._meta import __version__ diff --git a/services/invitations/src/simcore_service_invitations/_meta.py b/services/invitations/src/simcore_service_invitations/_meta.py index 863c69e474b..c7b955a4db6 100644 --- a/services/invitations/src/simcore_service_invitations/_meta.py +++ b/services/invitations/src/simcore_service_invitations/_meta.py @@ -1,19 +1,23 @@ """ Application's metadata """ + from typing import Final +from models_library.basic_types import VersionStr from packaging.version import Version from servicelib.utils_meta import PackageInfo +from settings_library.basic_types import VersionTag info: Final = PackageInfo(package_name="simcore-service-invitations") -__version__: Final[str] = info.__version__ +__version__: Final[VersionStr] = info.__version__ PROJECT_NAME: Final[str] = info.project_name VERSION: Final[Version] = info.version -API_VERSION: Final[str] = info.__version__ -API_VTAG: Final[str] = info.api_prefix_path_tag +API_VERSION: Final[VersionStr] = info.__version__ +APP_NAME = PROJECT_NAME +API_VTAG: Final[VersionTag] = VersionTag(info.api_prefix_path_tag) SUMMARY: Final[str] = info.get_summary() diff --git a/services/invitations/src/simcore_service_invitations/api/_dependencies.py b/services/invitations/src/simcore_service_invitations/api/_dependencies.py index 88c46cb4299..0875a5e1d8d 100644 --- a/services/invitations/src/simcore_service_invitations/api/_dependencies.py +++ b/services/invitations/src/simcore_service_invitations/api/_dependencies.py @@ -1,10 +1,10 @@ +# mypy: disable-error-code=truthy-function import logging import secrets -from typing import Any, Callable, Optional from fastapi import Depends, HTTPException, Request, status from fastapi.security import HTTPBasic, HTTPBasicCredentials -from starlette.datastructures import URL +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper from ..core.settings import ApplicationSettings @@ -16,14 +16,6 @@ # -def get_reverse_url_mapper(request: Request) -> Callable: - def _reverse_url_mapper(name: str, **path_params: Any) -> str: - url: URL = request.url_for(name, **path_params) - return f"{url}" - - return _reverse_url_mapper - - def get_settings(request: Request) -> ApplicationSettings: app_settings: ApplicationSettings = request.app.state.settings assert app_settings # nosec @@ -34,7 +26,7 @@ def get_settings(request: Request) -> ApplicationSettings: def get_validated_credentials( - credentials: Optional[HTTPBasicCredentials] = Depends(_get_basic_credentials), + credentials: HTTPBasicCredentials | None = Depends(_get_basic_credentials), settings: ApplicationSettings = Depends(get_settings), ) -> HTTPBasicCredentials: def _is_valid(current: str, expected: str) -> bool: @@ -57,3 +49,13 @@ def _is_valid(current: str, expected: str) -> bool: ) return credentials + + +assert get_reverse_url_mapper # nosec +assert get_app # nosec + + +__all__: tuple[str, ...] = ( + "get_reverse_url_mapper", + "get_app", +) diff --git a/services/invitations/src/simcore_service_invitations/api/_health.py b/services/invitations/src/simcore_service_invitations/api/_health.py index 7d8b180c215..0d2ebada79d 100644 --- a/services/invitations/src/simcore_service_invitations/api/_health.py +++ b/services/invitations/src/simcore_service_invitations/api/_health.py @@ -1,18 +1,11 @@ -import logging -from datetime import datetime +from datetime import datetime, timezone from fastapi import APIRouter from fastapi.responses import PlainTextResponse -logger = logging.getLogger(__name__) - - -# -# ROUTE HANDLERS -# router = APIRouter() @router.get("/", response_class=PlainTextResponse) async def healthcheck(): - return f"{__name__}@{datetime.utcnow().isoformat()}" + return f"{__name__}@{datetime.now(tz=timezone.utc).isoformat()}" diff --git a/services/invitations/src/simcore_service_invitations/api/_invitations.py b/services/invitations/src/simcore_service_invitations/api/_invitations.py index f069513cf5f..36737653902 100644 --- a/services/invitations/src/simcore_service_invitations/api/_invitations.py +++ b/services/invitations/src/simcore_service_invitations/api/_invitations.py @@ -1,125 +1,80 @@ import logging -from datetime import datetime -from typing import Any, Optional +from typing import Annotated -from fastapi import APIRouter, Depends, HTTPException, status +from fastapi import APIRouter, Depends from fastapi.security import HTTPBasicCredentials -from pydantic import BaseModel, Field, HttpUrl +from models_library.api_schemas_invitations.invitations import ( + ApiEncryptedInvitation, + ApiInvitationContent, + ApiInvitationContentAndLink, + ApiInvitationInputs, +) +from models_library.invitations import InvitationContent from ..core.settings import ApplicationSettings -from ..invitations import ( - InvalidInvitationCode, - InvitationContent, - InvitationInputs, - create_invitation_link, - extract_invitation_code_from, +from ..services.invitations import ( + create_invitation_link_and_content, + extract_invitation_code_from_query, extract_invitation_content, ) from ._dependencies import get_settings, get_validated_credentials -logger = logging.getLogger(__name__) +_logger = logging.getLogger(__name__) INVALID_INVITATION_URL_MSG = "Invalid invitation link" -# -# API SCHEMA MODELS -# - - -_INPUTS_EXAMPLE: dict[str, Any] = { - "issuer": "issuerid", - "guest": "invitedguest@company.com", - "trial_account_days": 2, -} - - -class _ApiInvitationInputs(InvitationInputs): - class Config: - schema_extra = {"example": _INPUTS_EXAMPLE} - - -class _ApiInvitationContent(InvitationContent): - class Config: - schema_extra = { - "example": { - **_INPUTS_EXAMPLE, - "created": "2023-01-11 13:11:47.293595", - } - } - -class _InvitationContentAndLink(_ApiInvitationContent): - invitation_url: HttpUrl = Field(..., description="Invitation link") - - class Config: - schema_extra = { - "example": { - **_INPUTS_EXAMPLE, - "created": "2023-01-11 12:11:47.293595", - "invitation_url": "https://foo.com/#/registration?invitation=1234", - } - } - - -class _EncryptedInvitation(BaseModel): - invitation_url: HttpUrl = Field(..., description="Invitation link") - - -# -# ROUTE HANDLERS -# router = APIRouter() @router.post( "/invitations", - response_model=_InvitationContentAndLink, + response_model=ApiInvitationContentAndLink, response_model_by_alias=False, ) async def create_invitation( - invitation_inputs: _ApiInvitationInputs, - settings: ApplicationSettings = Depends(get_settings), - _credentials: Optional[HTTPBasicCredentials] = Depends(get_validated_credentials), + invitation_inputs: ApiInvitationInputs, + settings: Annotated[ApplicationSettings, Depends(get_settings)], + _credentials: Annotated[ + HTTPBasicCredentials | None, Depends(get_validated_credentials) + ], ): """Generates a new invitation code and returns its content and an invitation link""" - invitation_link = create_invitation_link( + invitation_link, invitation_content = create_invitation_link_and_content( invitation_inputs, secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), base_url=settings.INVITATIONS_OSPARC_URL, + default_product=settings.INVITATIONS_DEFAULT_PRODUCT, ) - invitation = _InvitationContentAndLink( + invitation = ApiInvitationContentAndLink( + **invitation_content.model_dump(), invitation_url=invitation_link, - created=datetime.utcnow(), - **invitation_inputs.dict(), ) - logger.info("New invitation: %s", f"{invitation.json(indent=1)}") + _logger.info("New invitation: %s", f"{invitation.model_dump_json(indent=1)}") return invitation @router.post( "/invitations:extract", - response_model=_ApiInvitationContent, + response_model=ApiInvitationContent, response_model_by_alias=False, ) async def extracts_invitation_from_code( - encrypted: _EncryptedInvitation, - settings: ApplicationSettings = Depends(get_settings), - _credentials: Optional[HTTPBasicCredentials] = Depends(get_validated_credentials), + encrypted: ApiEncryptedInvitation, + settings: Annotated[ApplicationSettings, Depends(get_settings)], + _credentials: Annotated[ + HTTPBasicCredentials | None, Depends(get_validated_credentials) + ], ): """Decrypts the invitation code and returns its content""" - try: - invitation = extract_invitation_content( - invitation_code=extract_invitation_code_from(encrypted.invitation_url), - secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), - ) - except InvalidInvitationCode as err: - raise HTTPException( - status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, - detail=INVALID_INVITATION_URL_MSG, - ) from err + invitation: InvitationContent = extract_invitation_content( + invitation_code=extract_invitation_code_from_query(encrypted.invitation_url), + secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), + default_product=settings.INVITATIONS_DEFAULT_PRODUCT, + ) return invitation diff --git a/services/invitations/src/simcore_service_invitations/api/_meta.py b/services/invitations/src/simcore_service_invitations/api/_meta.py index 8d094dbd7be..621f3b4445e 100644 --- a/services/invitations/src/simcore_service_invitations/api/_meta.py +++ b/services/invitations/src/simcore_service_invitations/api/_meta.py @@ -1,5 +1,5 @@ import logging -from typing import Callable +from collections.abc import Callable from fastapi import APIRouter, Depends from pydantic import BaseModel, HttpUrl diff --git a/services/invitations/src/simcore_service_invitations/cli.py b/services/invitations/src/simcore_service_invitations/cli.py index 62c3ba0ca6e..67838b04615 100644 --- a/services/invitations/src/simcore_service_invitations/cli.py +++ b/services/invitations/src/simcore_service_invitations/cli.py @@ -1,56 +1,38 @@ import getpass import logging -from typing import Optional -import rich import typer from cryptography.fernet import Fernet from models_library.emails import LowerCaseEmailStr -from pydantic import HttpUrl, SecretStr, ValidationError, parse_obj_as +from models_library.invitations import InvitationContent, InvitationInputs +from pydantic import EmailStr, HttpUrl, TypeAdapter, ValidationError from rich.console import Console from servicelib.utils_secrets import generate_password -from settings_library.utils_cli import create_settings_command +from settings_library.utils_cli import ( + create_settings_command, + create_version_callback, + print_as_envfile, +) from . import web_server from ._meta import PROJECT_NAME, __version__ from .core.settings import ApplicationSettings, MinimalApplicationSettings -from .invitations import ( - InvalidInvitationCode, - InvitationContent, - InvitationInputs, - create_invitation_link, - extract_invitation_code_from, +from .services.invitations import ( + InvalidInvitationCodeError, + create_invitation_link_and_content, + extract_invitation_code_from_query, extract_invitation_content, ) -# SEE setup entrypoint 'simcore_service_invitations.cli:app' -app = typer.Typer(name=PROJECT_NAME) -log = logging.getLogger(__name__) - -err_console = Console(stderr=True) - - -def _version_callback(value: bool): - if value: - rich.print(__version__) - raise typer.Exit() - +_logger = logging.getLogger(__name__) +_err_console = Console(stderr=True) -@app.callback() -def main( - ctx: typer.Context, - version: Optional[bool] = ( - typer.Option( - None, - "--version", - callback=_version_callback, - is_eager=True, - ) - ), -): - """o2s2parc invitation maker""" - assert ctx # nosec - assert version or not version # nosec +# SEE setup entrypoint 'simcore_service_invitations.cli:main' +main = typer.Typer(name=PROJECT_NAME) +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) +main.callback()(create_version_callback(__version__)) # @@ -58,7 +40,7 @@ def main( # -@app.command() +@main.command() def generate_key( ctx: typer.Context, ): @@ -68,19 +50,19 @@ def generate_key( export INVITATIONS_SECRET_KEY=$(invitations-maker generate-key) """ assert ctx # nosec - print(Fernet.generate_key().decode()) + print(Fernet.generate_key().decode()) # noqa: T201 -@app.command() -def generate_dotenv(ctx: typer.Context, auto_password: bool = False): - """Generates an example of environment variables file (or dot-envfile) +@main.command() +def echo_dotenv( + ctx: typer.Context, *, auto_password: bool = False, minimal: bool = True +): + """Echos an example of environment variables file (or dot-envfile) Usage sample: - $ invitations-maker generate-dotenv > .env - + $ invitations-maker echo-dotenv > .env $ cat .env - $ set -o allexport; source .env; set +o allexport """ assert ctx # nosec @@ -93,34 +75,40 @@ def generate_dotenv(ctx: typer.Context, auto_password: bool = False): ) or generate_password(length=32) settings = ApplicationSettings.create_from_envs( - INVITATIONS_OSPARC_URL="http://127.0.0.1:8000", + INVITATIONS_OSPARC_URL="http://127.0.0.1:8000", # NOSONAR + INVITATIONS_DEFAULT_PRODUCT="s4llite", INVITATIONS_SECRET_KEY=Fernet.generate_key().decode(), INVITATIONS_USERNAME=username, INVITATIONS_PASSWORD=password, ) - for name, value in settings.dict().items(): - if name.startswith("INVITATIONS_"): - value = ( - f"{value.get_secret_value()}" if isinstance(value, SecretStr) else value - ) - print(f"{name}={'null' if value is None else value}") + print_as_envfile( + settings, + compact=False, + verbose=True, + show_secrets=True, + exclude_unset=minimal, + ) -@app.command() +@main.command() def invite( ctx: typer.Context, email: str = typer.Argument( ..., - callback=lambda v: parse_obj_as(LowerCaseEmailStr, v), + callback=lambda v: TypeAdapter(LowerCaseEmailStr).validate_python(v), help="Custom invitation for a given guest", ), issuer: str = typer.Option( - ..., help=InvitationInputs.__fields__["issuer"].field_info.description + ..., help=InvitationInputs.model_fields["issuer"].description + ), + trial_account_days: int = typer.Option( + None, + help=InvitationInputs.model_fields["trial_account_days"].description, ), - trial_account_days: Optional[int] = typer.Option( + product: str = typer.Option( None, - help=InvitationInputs.__fields__["trial_account_days"].field_info.description, + help=InvitationInputs.model_fields["product"].description, ), ): """Creates an invitation link for user with 'email' and issued by 'issuer'""" @@ -129,19 +117,22 @@ def invite( invitation_data = InvitationInputs( issuer=issuer, - guest=email, + guest=TypeAdapter(EmailStr).validate_python(email), trial_account_days=trial_account_days, + extra_credits_in_usd=None, + product=product, ) - invitation_link = create_invitation_link( + invitation_link, _ = create_invitation_link_and_content( invitation_data=invitation_data, - secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), + secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), # pylint:disable=no-member base_url=settings.INVITATIONS_OSPARC_URL, + default_product=settings.INVITATIONS_DEFAULT_PRODUCT, ) - print(invitation_link) + print(invitation_link) # noqa: T201 -@app.command() +@main.command() def extract(ctx: typer.Context, invitation_url: str): """Validates code and extracts invitation's content""" @@ -150,23 +141,24 @@ def extract(ctx: typer.Context, invitation_url: str): try: invitation: InvitationContent = extract_invitation_content( - invitation_code=extract_invitation_code_from( - parse_obj_as(HttpUrl, invitation_url) + invitation_code=extract_invitation_code_from_query( + TypeAdapter(HttpUrl).validate_python(invitation_url) ), - secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), + secret_key=settings.INVITATIONS_SECRET_KEY.get_secret_value().encode(), # pylint:disable=no-member + default_product=settings.INVITATIONS_DEFAULT_PRODUCT, ) + assert invitation.product is not None # nosec - rich.print(invitation.json(indent=1)) - except (InvalidInvitationCode, ValidationError): - err_console.print("[bold red]Invalid code[/bold red]") - + print(invitation.model_dump_json(indent=1)) # noqa: T201 -app.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) + except (InvalidInvitationCodeError, ValidationError): + _err_console.print("[bold red]Invalid code[/bold red]") -@app.command() +@main.command() def serve( ctx: typer.Context, + *, reload: bool = False, ): """Starts server with http API""" diff --git a/services/invitations/src/simcore_service_invitations/core/application.py b/services/invitations/src/simcore_service_invitations/core/application.py index f2089493914..fcfea7234dd 100644 --- a/services/invitations/src/simcore_service_invitations/core/application.py +++ b/services/invitations/src/simcore_service_invitations/core/application.py @@ -1,21 +1,28 @@ -from typing import Optional - from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) from servicelib.fastapi.openapi import override_fastapi_openapi_method +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) from .._meta import ( API_VERSION, API_VTAG, APP_FINISHED_BANNER_MSG, + APP_NAME, APP_STARTED_BANNER_MSG, PROJECT_NAME, SUMMARY, ) from ..api.routes import setup_api_routes +from . import exceptions_handlers from .settings import ApplicationSettings -def create_app(settings: Optional[ApplicationSettings] = None) -> FastAPI: +def create_app(settings: ApplicationSettings | None = None) -> FastAPI: app = FastAPI( title=f"{PROJECT_NAME} web API", @@ -28,14 +35,23 @@ def create_app(settings: Optional[ApplicationSettings] = None) -> FastAPI: override_fastapi_openapi_method(app) # STATE - app.state.settings = settings or ApplicationSettings() + app.state.settings = settings or ApplicationSettings() # type: ignore[call-arg] assert app.state.settings.API_VERSION == API_VERSION # nosec + if app.state.settings.INVITATIONS_TRACING: + setup_tracing(app, app.state.settings.INVITATIONS_TRACING, APP_NAME) + # PLUGINS SETUP setup_api_routes(app) + if app.state.settings.INVITATIONS_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) + + if app.state.settings.INVITATIONS_TRACING: + initialize_fastapi_app_tracing(app) + # ERROR HANDLERS - # ... add here ... + exceptions_handlers.setup(app) # EVENTS async def _on_startup() -> None: diff --git a/services/invitations/src/simcore_service_invitations/core/exceptions_handlers.py b/services/invitations/src/simcore_service_invitations/core/exceptions_handlers.py new file mode 100644 index 00000000000..47c72be56a8 --- /dev/null +++ b/services/invitations/src/simcore_service_invitations/core/exceptions_handlers.py @@ -0,0 +1,40 @@ +import logging + +from fastapi import FastAPI, Request, status +from fastapi.responses import JSONResponse +from servicelib.logging_errors import create_troubleshotting_log_kwargs + +from ..services.invitations import InvalidInvitationCodeError + +_logger = logging.getLogger(__name__) + +INVALID_INVITATION_URL_MSG = "Invalid invitation link" + + +def handle_invalid_invitation_code_error(request: Request, exception: Exception): + assert isinstance(exception, InvalidInvitationCodeError) # nosec + user_msg = INVALID_INVITATION_URL_MSG + + _logger.warning( + **create_troubleshotting_log_kwargs( + user_msg, + error=exception, + error_context={ + "request.method": f"{request.method}", + "request.url": f"{request.url}", + "request.body": getattr(request, "_json", None), + }, + tip="An invitation link could not be extracted", + ) + ) + + return JSONResponse( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + content={"detail": user_msg}, + ) + + +def setup(app: FastAPI): + app.add_exception_handler( + InvalidInvitationCodeError, handle_invalid_invitation_code_error + ) diff --git a/services/invitations/src/simcore_service_invitations/core/settings.py b/services/invitations/src/simcore_service_invitations/core/settings.py index 722e34f162b..2df10527929 100644 --- a/services/invitations/src/simcore_service_invitations/core/settings.py +++ b/services/invitations/src/simcore_service_invitations/core/settings.py @@ -1,15 +1,19 @@ from functools import cached_property -from typing import Optional, cast - -from pydantic import Field, HttpUrl, PositiveInt, SecretStr, validator -from settings_library.base import BaseCustomSettings -from settings_library.basic_types import BuildTargetEnum, LogLevel, VersionTag +from typing import Annotated + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.products import ProductName +from pydantic import AliasChoices, Field, HttpUrl, SecretStr, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.basic_types import LogLevel, VersionTag +from settings_library.tracing import TracingSettings from settings_library.utils_logging import MixinLoggingSettings from .._meta import API_VERSION, API_VTAG, PROJECT_NAME -class _BaseApplicationSettings(BaseCustomSettings, MixinLoggingSettings): +class _BaseApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): """Base settings of any osparc service's app""" # CODE STATICS --------------------------------------------------------- @@ -17,39 +21,47 @@ class _BaseApplicationSettings(BaseCustomSettings, MixinLoggingSettings): APP_NAME: str = PROJECT_NAME API_VTAG: VersionTag = API_VTAG - # IMAGE BUILDTIME ------------------------------------------------------ - # @Makefile - SC_BUILD_DATE: Optional[str] = None - SC_BUILD_TARGET: Optional[BuildTargetEnum] = None - SC_VCS_REF: Optional[str] = None - SC_VCS_URL: Optional[str] = None - - # @Dockerfile - SC_BOOT_TARGET: Optional[BuildTargetEnum] = None - SC_HEALTHCHECK_TIMEOUT: Optional[PositiveInt] = Field( - default=None, - description="If a single run of the check takes longer than timeout seconds " - "then the check is considered to have failed." - "It takes retries consecutive failures of the health check for the container to be considered unhealthy.", - ) - SC_USER_ID: Optional[int] = None - SC_USER_NAME: Optional[str] = None - # RUNTIME ----------------------------------------------------------- - INVITATIONS_LOGLEVEL: LogLevel = Field( - default=LogLevel.INFO, env=["INVITATIONS_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"] - ) + INVITATIONS_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "INVITATIONS_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ), + ] = LogLevel.INFO + + INVITATIONS_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "INVITATIONS_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + + INVITATIONS_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "INVITATIONS_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY @cached_property def LOG_LEVEL(self): return self.INVITATIONS_LOGLEVEL - @validator("INVITATIONS_LOGLEVEL") + @field_validator("INVITATIONS_LOGLEVEL", mode="before") @classmethod - def valid_log_level(cls, value: str) -> str: - # NOTE: mypy is not happy without the cast - return cast(str, cls.validate_log_level(value)) + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) class MinimalApplicationSettings(_BaseApplicationSettings): @@ -59,14 +71,27 @@ class MinimalApplicationSettings(_BaseApplicationSettings): are not related to the web server. """ - INVITATIONS_SECRET_KEY: SecretStr = Field( - ..., - description="Secret key to generate invitations" - 'TIP: python3 -c "from cryptography.fernet import *; print(Fernet.generate_key())"', - min_length=44, - ) - - INVITATIONS_OSPARC_URL: HttpUrl = Field(..., description="Target platform") + INVITATIONS_SWAGGER_API_DOC_ENABLED: Annotated[ + bool, Field(description="If true, it displays swagger doc at /doc") + ] = True + + INVITATIONS_SECRET_KEY: Annotated[ + SecretStr, + Field( + description="Secret key to generate invitations. " + "TIP: simcore-service-invitations generate-key", + min_length=44, + ), + ] + + INVITATIONS_OSPARC_URL: Annotated[HttpUrl, Field(description="Target platform")] + INVITATIONS_DEFAULT_PRODUCT: Annotated[ + ProductName, + Field( + description="Default product if not specified in the request. " + "WARNING: this product must be defined in INVITATIONS_OSPARC_URL", + ), + ] class ApplicationSettings(MinimalApplicationSettings): @@ -75,13 +100,25 @@ class ApplicationSettings(MinimalApplicationSettings): These settings includes extra configuration for the http-API """ - INVITATIONS_USERNAME: str = Field( - ..., - description="Username for HTTP Basic Auth. Required if started as a web app.", - min_length=3, - ) - INVITATIONS_PASSWORD: SecretStr = Field( - ..., - description="Password for HTTP Basic Auth. Required if started as a web app.", - min_length=10, - ) + INVITATIONS_USERNAME: Annotated[ + str, + Field( + description="Username for HTTP Basic Auth. Required if started as a web app.", + min_length=3, + ), + ] + INVITATIONS_PASSWORD: Annotated[ + SecretStr, + Field( + description="Password for HTTP Basic Auth. Required if started as a web app.", + min_length=10, + ), + ] + INVITATIONS_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + INVITATIONS_TRACING: Annotated[ + TracingSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ), + ] diff --git a/services/invitations/src/simcore_service_invitations/invitations.py b/services/invitations/src/simcore_service_invitations/invitations.py deleted file mode 100644 index 45d99bda5f2..00000000000 --- a/services/invitations/src/simcore_service_invitations/invitations.py +++ /dev/null @@ -1,212 +0,0 @@ -import base64 -import binascii -import logging -from datetime import datetime -from typing import Optional, cast -from urllib import parse - -from cryptography.fernet import Fernet, InvalidToken -from models_library.emails import LowerCaseEmailStr -from pydantic import ( - BaseModel, - Field, - HttpUrl, - PositiveInt, - ValidationError, - parse_obj_as, -) -from starlette.datastructures import URL - -logger = logging.getLogger(__name__) - -# -# Errors -# - - -class InvalidInvitationCode(Exception): - ... - - -# -# Models -# - - -class InvitationInputs(BaseModel): - """Input data necessary to create an invitation""" - - issuer: str = Field( - ..., - description="Identifies who issued the invitation. E.g. an email, a service name etc", - min_length=1, - max_length=30, - ) - guest: LowerCaseEmailStr = Field( - ..., - description="Invitee's email. Note that the registration can ONLY be used with this email", - ) - trial_account_days: Optional[PositiveInt] = Field( - None, - description="If set, this invitation will activate a trial account." - "Sets the number of days from creation until the account expires", - ) - - -class InvitationContent(InvitationInputs): - """Data in an invitation""" - - # avoid using default to mark exactly the time - created: datetime = Field(..., description="Timestamp for creation") - - def as_invitation_inputs(self) -> InvitationInputs: - return self.copy(exclude={"created"}) - - -class _ContentWithShortNames(InvitationContent): - """Helper model to serialize/deserialize to json using shorter field names""" - - @classmethod - def serialize(cls, model_data: InvitationContent) -> str: - """Exports to json using *short* aliases and values in order to produce shorter codes""" - model_w_short_aliases = cls.construct(**model_data.dict(exclude_unset=True)) - return model_w_short_aliases.json(exclude_unset=True, by_alias=True) - - @classmethod - def deserialize(cls, raw_data: str) -> InvitationContent: - """Parses a json string and returns InvitationContent model""" - model_w_short_aliases = cls.parse_raw(raw_data) - return InvitationContent.construct( - **model_w_short_aliases.dict(exclude_unset=True) - ) - - class Config: - allow_population_by_field_name = True # NOTE: can parse using field names - allow_mutation = False - anystr_strip_whitespace = True - # NOTE: Can export with alias: short aliases to minimize the size of serialization artifact - fields = { - "issuer": { - "alias": "i", - }, - "guest": { - "alias": "g", - }, - "trial_account_days": { - "alias": "t", - }, - "created": { - "alias": "c", - }, - } - - -# -# Utils -# - - -def _build_link( - base_url: str, - code_url_safe: str, -) -> HttpUrl: - r = URL("/registration").include_query_params(invitation=code_url_safe) - - # Adds query to fragment - base_url = f"{base_url.rstrip('/')}/" - url = URL(base_url).replace(fragment=f"{r}") - return cast(HttpUrl, parse_obj_as(HttpUrl, f"{url}")) - - -def extract_invitation_code_from(invitation_url: HttpUrl) -> str: - """Parses url and extracts invitation""" - try: - query_params = dict(parse.parse_qsl(URL(invitation_url.fragment).query)) - invitation_code: str = query_params["invitation"] - return invitation_code - except KeyError as err: - logger.debug("Invalid invitation: %s", err) - raise InvalidInvitationCode from err - - -def _fernet_encrypt_as_urlsafe_code( - data: bytes, - secret_key: bytes, -) -> bytes: - fernet = Fernet(secret_key) - code: bytes = fernet.encrypt(data) - return base64.urlsafe_b64encode(code) - - -def _create_invitation_code( - invitation_data: InvitationInputs, secret_key: bytes -) -> bytes: - """Produces url-safe invitation code in bytes""" - - # builds content - content = InvitationContent( - **invitation_data.dict(), - created=datetime.utcnow(), - ) - - content_jsonstr: str = _ContentWithShortNames.serialize(content) - assert "\n" not in content_jsonstr # nosec - - # encrypts contents - return _fernet_encrypt_as_urlsafe_code( - data=content_jsonstr.encode(), - secret_key=secret_key, - ) - - -# -# API -# - - -def create_invitation_link( - invitation_data: InvitationInputs, secret_key: bytes, base_url: HttpUrl -) -> HttpUrl: - - invitation_code = _create_invitation_code( - invitation_data=invitation_data, secret_key=secret_key - ) - # Adds message as the invitation in query - url = _build_link( - base_url=base_url, - code_url_safe=invitation_code.decode(), - ) - return url - - -def decrypt_invitation(invitation_code: str, secret_key: bytes) -> InvitationContent: - """ - - WARNING: invitation_code should not be taken directly from the url fragment without 'parse_invitation_code' - - raises cryptography.fernet.InvalidToken if code has a different secret_key (see test_invalid_invitation_secret) - raises pydantic.ValidationError if sent invalid data (see test_invalid_invitation_data) - raises binascii.Error if code is not fernet (binascii.Error)) - """ - # decode urlsafe (symmetric from base64.urlsafe_b64encode(encrypted)) - code: bytes = base64.urlsafe_b64decode(invitation_code) - - fernet = Fernet(secret_key) - decryted: bytes = fernet.decrypt(token=code) - - # parses serialized invitation - content = _ContentWithShortNames.deserialize(raw_data=decryted.decode()) - return content - - -def extract_invitation_content( - invitation_code: str, secret_key: bytes -) -> InvitationContent: - """As decrypt_invitation but raises InvalidInvitationCode if fails""" - try: - return decrypt_invitation( - invitation_code=invitation_code, secret_key=secret_key - ) - except (InvalidToken, ValidationError, binascii.Error) as err: - logger.debug("Invalid code: %s", err) - raise InvalidInvitationCode from err diff --git a/services/invitations/src/simcore_service_invitations/main.py b/services/invitations/src/simcore_service_invitations/main.py new file mode 100644 index 00000000000..4a21e994b31 --- /dev/null +++ b/services/invitations/src/simcore_service_invitations/main.py @@ -0,0 +1,24 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_invitations.core.application import create_app +from simcore_service_invitations.core.settings import ApplicationSettings + +the_settings = ApplicationSettings.create_from_envs() + +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=the_settings.log_level) # NOSONAR +logging.root.setLevel(the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=the_settings.INVITATIONS_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=the_settings.INVITATIONS_LOG_FILTER_MAPPING, + tracing_settings=the_settings.INVITATIONS_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(the_settings) diff --git a/services/invitations/src/simcore_service_invitations/services/__init__.py b/services/invitations/src/simcore_service_invitations/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/invitations/src/simcore_service_invitations/services/invitations.py b/services/invitations/src/simcore_service_invitations/services/invitations.py new file mode 100644 index 00000000000..6200dcf6408 --- /dev/null +++ b/services/invitations/src/simcore_service_invitations/services/invitations.py @@ -0,0 +1,173 @@ +import base64 +import binascii +import logging +from urllib import parse + +from cryptography.fernet import Fernet, InvalidToken +from models_library.invitations import InvitationContent, InvitationInputs +from models_library.products import ProductName +from pydantic import ConfigDict, HttpUrl, TypeAdapter, ValidationError +from starlette.datastructures import URL + +_logger = logging.getLogger(__name__) + + +def _to_initial(v: str): + return v[0] + + +class InvalidInvitationCodeError(Exception): + ... + + +class _ContentWithShortNames(InvitationContent): + """Helper model to serialize/deserialize to json using shorter field names""" + + @classmethod + def serialize(cls, model_obj: InvitationContent) -> str: + """Exports to json using *short* aliases and values in order to produce shorter codes""" + model_w_short_aliases_json: str = cls.model_construct( + **model_obj.model_dump(exclude_unset=True) + ).model_dump_json(exclude_unset=True, by_alias=True) + # NOTE: json arguments try to minimize the amount of data + # serialized. The CONS is that it relies on models in the code + # that might change over time. This might lead to some datasets in codes + # that fail in deserialization + return model_w_short_aliases_json + + @classmethod + def deserialize(cls, raw_json: str) -> InvitationContent: + """Parses a json string and returns InvitationContent model""" + model_w_short_aliases = cls.model_validate_json(raw_json) + return InvitationContent.model_construct( + **model_w_short_aliases.model_dump(exclude_unset=True) + ) + + model_config = ConfigDict( + # NOTE: Can export with alias: short aliases to minimize the size of serialization artifact + alias_generator=_to_initial, + populate_by_name=True, # NOTE: can parse using field names + frozen=True, + str_strip_whitespace=True, + ) + + +# +# Utils +# + + +def _build_link( + base_url: str, + code_url_safe: str, +) -> HttpUrl: + r = URL("/registration").include_query_params(invitation=code_url_safe) + + # Adds query to fragment + url = URL(f"{base_url.rstrip('/')}/").replace(fragment=f"{r}") + return TypeAdapter(HttpUrl).validate_python(f"{url}") + + +def _fernet_encrypt_as_urlsafe_code( + data: bytes, + secret_key: bytes, +) -> bytes: + fernet = Fernet(secret_key) + code: bytes = fernet.encrypt(data) + return base64.urlsafe_b64encode(code) + + +def _create_invitation_code( + content: InvitationContent, + secret_key: bytes, +) -> bytes: + """Produces url-safe invitation code in bytes""" + # shorten names + content_jsonstr: str = _ContentWithShortNames.serialize(content) + assert "\n" not in content_jsonstr # nosec + + # encrypts contents + return _fernet_encrypt_as_urlsafe_code( + data=content_jsonstr.encode(), + secret_key=secret_key, + ) + + +# +# API +# + + +def create_invitation_link_and_content( + invitation_data: InvitationInputs, + secret_key: bytes, + base_url: HttpUrl, + default_product: ProductName, +) -> tuple[HttpUrl, InvitationContent]: + content = InvitationContent.create_from_inputs(invitation_data, default_product) + code = _create_invitation_code(content, secret_key) + # Adds message as the invitation in query + link = _build_link( + base_url=f"{base_url}", + code_url_safe=code.decode(), + ) + return link, content + + +def extract_invitation_code_from_query(invitation_url: HttpUrl) -> str: + """Parses url and extracts invitation code from url's query""" + if not invitation_url.fragment: + msg = "Invalid link format: fragment missing" + raise InvalidInvitationCodeError(msg) + + try: + query_params = dict(parse.parse_qsl(URL(invitation_url.fragment).query)) + invitation_code: str = query_params["invitation"] + return invitation_code + except KeyError as err: + msg = "Invalid link format: fragment misses `invitation` link" + raise InvalidInvitationCodeError(msg) from err + + +def decrypt_invitation( + invitation_code: str, secret_key: bytes, default_product: ProductName +) -> InvitationContent: + """ + + WARNING: invitation_code should not be taken directly from the url fragment without 'parse_invitation_code' + + raises cryptography.fernet.InvalidToken if code has a different secret_key (see test_invalid_invitation_secret) + raises pydantic.ValidationError if sent invalid data (see test_invalid_invitation_data) + raises binascii.Error if code is not fernet (binascii.Error)) + """ + # decode urlsafe (symmetric from base64.urlsafe_b64encode(encrypted)) + code: bytes = base64.urlsafe_b64decode(invitation_code) + + fernet = Fernet(secret_key) + decryted: bytes = fernet.decrypt(token=code) + + # parses serialized invitation + content = _ContentWithShortNames.deserialize(raw_json=decryted.decode()) + if content.product is None: + content.product = default_product + return content + + +def extract_invitation_content( + invitation_code: str, secret_key: bytes, default_product: ProductName +) -> InvitationContent: + """As decrypt_invitation but raises InvalidInvitationCode if fails""" + try: + content = decrypt_invitation( + invitation_code=invitation_code, + secret_key=secret_key, + default_product=default_product, + ) + assert content.product is not None # nosec + return content + + except (InvalidToken, ValidationError, binascii.Error) as err: + msg = ( + "Failed while decripting. TIP: secret key at encryption might be different" + ) + raise InvalidInvitationCodeError(msg) from err diff --git a/services/invitations/src/simcore_service_invitations/web_main.py b/services/invitations/src/simcore_service_invitations/web_main.py deleted file mode 100644 index 1b6a9924799..00000000000 --- a/services/invitations/src/simcore_service_invitations/web_main.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Main application to be deployed by uvicorn (or equivalent) server - -""" -import logging - -from fastapi import FastAPI -from simcore_service_invitations.core.application import create_app -from simcore_service_invitations.core.settings import ApplicationSettings - -the_settings = ApplicationSettings.create_from_envs() - -# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 -logging.basicConfig(level=the_settings.log_level) # NOSONAR -logging.root.setLevel(the_settings.log_level) - -# SINGLETON FastAPI app -the_app: FastAPI = create_app(the_settings) diff --git a/services/invitations/src/simcore_service_invitations/web_server.py b/services/invitations/src/simcore_service_invitations/web_server.py index 47366934206..92015153841 100644 --- a/services/invitations/src/simcore_service_invitations/web_server.py +++ b/services/invitations/src/simcore_service_invitations/web_server.py @@ -4,7 +4,7 @@ def start( - log_level: Literal["info", "debug", "warning", "error"], reload: bool = False + log_level: Literal["info", "debug", "warning", "error"], *, reload: bool = False ): uvicorn.run( "simcore_service_invitations.web_main:the_app", diff --git a/services/invitations/tests/unit/api/conftest.py b/services/invitations/tests/unit/api/conftest.py index 41a447329a6..c558ac496ad 100644 --- a/services/invitations/tests/unit/api/conftest.py +++ b/services/invitations/tests/unit/api/conftest.py @@ -4,12 +4,11 @@ # pylint: disable=too-many-arguments import json -from typing import Iterator, Optional +from collections.abc import Iterator import httpx import pytest from fastapi.testclient import TestClient -from pytest import FixtureRequest from pytest_simcore.helpers.typing_env import EnvVarsDict from simcore_service_invitations.core.application import create_app @@ -19,15 +18,15 @@ def client(app_environment: EnvVarsDict) -> Iterator[TestClient]: print(f"app_environment={json.dumps(app_environment)}") app = create_app() - print("settings:\n", app.state.settings.json(indent=1)) + print("settings:\n", app.state.settings.model_dump_json(indent=1)) with TestClient(app, base_url="http://testserver.test") as client: yield client @pytest.fixture(params=["username", "password", "both", None]) def invalid_basic_auth( - request: FixtureRequest, fake_user_name: str, fake_password: str -) -> Optional[httpx.BasicAuth]: + request: pytest.FixtureRequest, fake_user_name: str, fake_password: str +) -> httpx.BasicAuth | None: invalid_case = request.param if invalid_case is None: diff --git a/services/invitations/tests/unit/api/test_api_dependencies.py b/services/invitations/tests/unit/api/test_api_dependencies.py index ed23f793bdc..38e94a52d74 100644 --- a/services/invitations/tests/unit/api/test_api_dependencies.py +++ b/services/invitations/tests/unit/api/test_api_dependencies.py @@ -3,23 +3,22 @@ # pylint: disable=unused-variable # pylint: disable=too-many-arguments -from typing import Optional import httpx from fastapi import status from fastapi.testclient import TestClient from simcore_service_invitations._meta import API_VTAG -from simcore_service_invitations.invitations import InvitationInputs +from simcore_service_invitations.services.invitations import InvitationInputs def test_invalid_http_basic_auth( client: TestClient, - invalid_basic_auth: Optional[httpx.BasicAuth], + invalid_basic_auth: httpx.BasicAuth | None, invitation_data: InvitationInputs, ): response = client.post( f"/{API_VTAG}/invitations", - json=invitation_data.dict(), + json=invitation_data.model_dump(), auth=invalid_basic_auth, ) assert response.status_code == status.HTTP_401_UNAUTHORIZED, f"{response.json()=}" diff --git a/services/invitations/tests/unit/api/test_api_invitations.py b/services/invitations/tests/unit/api/test_api_invitations.py index 49c128259ad..84f97fb45fe 100644 --- a/services/invitations/tests/unit/api/test_api_invitations.py +++ b/services/invitations/tests/unit/api/test_api_invitations.py @@ -7,38 +7,47 @@ import httpx from fastapi import status from fastapi.testclient import TestClient -from simcore_service_invitations._meta import API_VTAG -from simcore_service_invitations.api._invitations import ( - INVALID_INVITATION_URL_MSG, - _InvitationContentAndLink, +from hypothesis import HealthCheck, given, settings +from hypothesis import strategies as st +from models_library.api_schemas_invitations.invitations import ( + ApiInvitationContentAndLink, ) -from simcore_service_invitations.invitations import ( +from models_library.products import ProductName +from simcore_service_invitations._meta import API_VTAG +from simcore_service_invitations.api._invitations import INVALID_INVITATION_URL_MSG +from simcore_service_invitations.services.invitations import ( InvitationContent, InvitationInputs, - create_invitation_link, + create_invitation_link_and_content, ) +@settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) +@given(invitation_input=st.builds(InvitationInputs, guest=st.emails())) def test_create_invitation( + invitation_input: InvitationInputs, client: TestClient, basic_auth: httpx.BasicAuth, - invitation_data: InvitationInputs, ): response = client.post( f"/{API_VTAG}/invitations", - json={ - "issuer": invitation_data.issuer, - "guest": invitation_data.guest, - "trial_account_days": invitation_data.trial_account_days, - }, + json=invitation_input.model_dump(exclude_none=True), auth=basic_auth, ) assert response.status_code == status.HTTP_200_OK, f"{response.json()=}" - invitation = _InvitationContentAndLink(**response.json()) - assert invitation.issuer == invitation_data.issuer - assert invitation.guest == invitation_data.guest - assert invitation.trial_account_days == invitation_data.trial_account_days + invitation = ApiInvitationContentAndLink(**response.json()) + assert invitation.issuer == invitation_input.issuer + assert invitation.guest == invitation_input.guest + assert invitation.trial_account_days == invitation_input.trial_account_days + + # checks issue with `//` reported in https://github.com/ITISFoundation/osparc-simcore/issues/7055 + assert invitation.invitation_url + assert invitation.invitation_url.path == "/" + + assert invitation.product + if invitation_input.product: + assert invitation.product == invitation_input.product def test_check_invitation( @@ -59,18 +68,20 @@ def test_check_invitation( # up ot here, identifcal to above. # Let's use invitation link - invitation_url = _InvitationContentAndLink.parse_obj(response.json()).invitation_url + invitation_url = ApiInvitationContentAndLink.model_validate( + response.json() + ).invitation_url # check invitation_url response = client.post( f"/{API_VTAG}/invitations:extract", - json={"invitation_url": invitation_url}, + json={"invitation_url": f"{invitation_url}"}, auth=basic_auth, ) assert response.status_code == 200, f"{response.json()=}" # decrypted invitation should be identical to request above - invitation = InvitationContent.parse_obj(response.json()) + invitation = InvitationContent.model_validate(response.json()) assert invitation.issuer == invitation_data.issuer assert invitation.guest == invitation_data.guest assert invitation.trial_account_days == invitation_data.trial_account_days @@ -81,23 +92,25 @@ def test_check_valid_invitation( basic_auth: httpx.BasicAuth, invitation_data: InvitationInputs, secret_key: str, + default_product: ProductName, ): - invitation_url = create_invitation_link( + invitation_url, _ = create_invitation_link_and_content( invitation_data=invitation_data, secret_key=secret_key.encode(), base_url=f"{client.base_url}", + default_product=default_product, ) # check invitation_url response = client.post( f"/{API_VTAG}/invitations:extract", - json={"invitation_url": invitation_url}, + json={"invitation_url": f"{invitation_url}"}, auth=basic_auth, ) assert response.status_code == 200, f"{response.json()=}" # decrypted invitation should be identical to request above - invitation = InvitationContent.parse_obj(response.json()) + invitation = InvitationContent.model_validate(response.json()) assert invitation.issuer == invitation_data.issuer assert invitation.guest == invitation_data.guest @@ -109,24 +122,26 @@ def test_check_invalid_invitation_with_different_secret( basic_auth: httpx.BasicAuth, invitation_data: InvitationInputs, another_secret_key: str, + default_product: ProductName, ): - invitation_url = create_invitation_link( + invitation_url, _ = create_invitation_link_and_content( invitation_data=invitation_data, secret_key=another_secret_key, # <-- NOTE: DIFFERENT secret base_url=f"{client.base_url}", + default_product=default_product, ) # check invitation_url response = client.post( f"/{API_VTAG}/invitations:extract", - json={"invitation_url": invitation_url}, + json={"invitation_url": f"{invitation_url}"}, auth=basic_auth, ) assert ( response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY ), f"{response.json()=}" - assert INVALID_INVITATION_URL_MSG == response.json()["detail"] + assert response.json()["detail"] == INVALID_INVITATION_URL_MSG def test_check_invalid_invitation_with_wrong_fragment( @@ -145,7 +160,7 @@ def test_check_invalid_invitation_with_wrong_fragment( response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY ), f"{response.json()=}" - assert INVALID_INVITATION_URL_MSG == response.json()["detail"] + assert response.json()["detail"] == INVALID_INVITATION_URL_MSG def test_check_invalid_invitation_with_wrong_code( @@ -153,14 +168,16 @@ def test_check_invalid_invitation_with_wrong_code( basic_auth: httpx.BasicAuth, invitation_data: InvitationInputs, another_secret_key: str, + default_product: ProductName, ): - invitation_url = create_invitation_link( + invitation_url, _ = create_invitation_link_and_content( invitation_data=invitation_data, secret_key=another_secret_key, # <-- NOTE: DIFFERENT secret base_url=f"{client.base_url}", + default_product=default_product, ) - invitation_url_with_invalid_code = invitation_url[:-3] + invitation_url_with_invalid_code = f"{invitation_url}"[:-3] # check invitation_url response = client.post( @@ -172,4 +189,4 @@ def test_check_invalid_invitation_with_wrong_code( response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY ), f"{response.json()=}" - assert INVALID_INVITATION_URL_MSG == response.json()["detail"] + assert response.json()["detail"] == INVALID_INVITATION_URL_MSG diff --git a/services/invitations/tests/unit/api/test_api_meta.py b/services/invitations/tests/unit/api/test_api_meta.py index cee4afd13c9..4fe4f39b22c 100644 --- a/services/invitations/tests/unit/api/test_api_meta.py +++ b/services/invitations/tests/unit/api/test_api_meta.py @@ -19,7 +19,7 @@ def test_healthcheck(client: TestClient): def test_meta(client: TestClient): response = client.get(f"/{API_VTAG}/meta") assert response.status_code == status.HTTP_200_OK - meta = _Meta.parse_obj(response.json()) + meta = _Meta.model_validate(response.json()) - response = client.get(meta.docs_url) + response = client.get(f"{meta.docs_url}") assert response.status_code == status.HTTP_200_OK diff --git a/services/invitations/tests/unit/conftest.py b/services/invitations/tests/unit/conftest.py index 8902df77e40..1bed4825448 100644 --- a/services/invitations/tests/unit/conftest.py +++ b/services/invitations/tests/unit/conftest.py @@ -9,10 +9,10 @@ import simcore_service_invitations from cryptography.fernet import Fernet from faker import Faker -from pytest import FixtureRequest, MonkeyPatch +from models_library.products import ProductName +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict -from simcore_service_invitations.invitations import InvitationInputs +from simcore_service_invitations.services.invitations import InvitationInputs pytest_plugins = [ "pytest_simcore.cli_runner", @@ -61,34 +61,53 @@ def fake_password(faker: Faker) -> str: @pytest.fixture def app_environment( - monkeypatch: MonkeyPatch, + monkeypatch: pytest.MonkeyPatch, secret_key: str, fake_user_name: str, fake_password: str, + default_product: ProductName, ) -> EnvVarsDict: - - envs = setenvs_from_dict( + return setenvs_from_dict( monkeypatch, { "INVITATIONS_SECRET_KEY": secret_key, "INVITATIONS_OSPARC_URL": "https://myosparc.org", + "INVITATIONS_DEFAULT_PRODUCT": default_product, "INVITATIONS_USERNAME": fake_user_name, "INVITATIONS_PASSWORD": fake_password, + "INVITATIONS_TRACING": "null", }, ) - return envs - @pytest.fixture(params=[True, False]) -def is_trial_account(request: FixtureRequest) -> bool: +def is_trial_account(request: pytest.FixtureRequest) -> bool: return request.param @pytest.fixture -def invitation_data(is_trial_account: bool, faker: Faker) -> InvitationInputs: - return InvitationInputs( - issuer="LicenseRequestID=123456789", - guest=faker.email(), - trial_account_days=faker.pyint(min_value=1) if is_trial_account else None, - ) +def default_product() -> ProductName: + return "s4llite" + + +@pytest.fixture(params=[None, "osparc", "s4llite", "s4laca"]) +def product(request: pytest.FixtureRequest) -> ProductName | None: + # NOTE: INVITATIONS_DEFAULT_PRODUCT includes the default product field is not defined + return request.param + + +@pytest.fixture +def invitation_data( + is_trial_account: bool, faker: Faker, product: ProductName | None +) -> InvitationInputs: + # first version + kwargs = { + "issuer": "LicenseRequestID=123456789", + "guest": faker.email(), + "trial_account_days": faker.pyint(min_value=1) if is_trial_account else None, + } + # next version, can include product + if product: + kwargs["product"] = product + + return InvitationInputs.model_validate(kwargs) diff --git a/services/invitations/tests/unit/test__model_examples.py b/services/invitations/tests/unit/test__model_examples.py index 75c090e8110..b295c457ea8 100644 --- a/services/invitations/tests/unit/test__model_examples.py +++ b/services/invitations/tests/unit/test__model_examples.py @@ -5,25 +5,28 @@ # pylint: disable=unused-variable import itertools -import json from typing import Any import pytest import simcore_service_invitations import simcore_service_invitations.api._invitations from pydantic import BaseModel -from pytest_simcore.pydantic_models import iter_model_examples_in_module +from pytest_simcore.pydantic_models import ( + assert_validation_model, + iter_model_examples_in_module, +) @pytest.mark.parametrize( "model_cls, example_name, example_data", itertools.chain( iter_model_examples_in_module(simcore_service_invitations.api._invitations), - iter_model_examples_in_module(simcore_service_invitations.invitations), + iter_model_examples_in_module(simcore_service_invitations.services.invitations), ), ) def test_model_examples( - model_cls: type[BaseModel], example_name: int, example_data: Any + model_cls: type[BaseModel], example_name: str, example_data: Any ): - print(example_name, ":", json.dumps(example_data)) - assert model_cls.parse_obj(example_data) + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/services/invitations/tests/unit/test__symmetric_encryption.py b/services/invitations/tests/unit/test__symmetric_encryption.py index f3475a4dede..0b74a6ae37e 100644 --- a/services/invitations/tests/unit/test__symmetric_encryption.py +++ b/services/invitations/tests/unit/test__symmetric_encryption.py @@ -1,10 +1,16 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + import base64 import json import os from urllib.parse import parse_qsl, urlparse +import pytest from cryptography.fernet import Fernet, InvalidToken -from pytest import MonkeyPatch +from faker import Faker from starlette.datastructures import URL @@ -44,18 +50,45 @@ def consume(url): raise except InvalidToken as err: - # TODO: cannot decode print("Invalid Key", err) raise -def test_encrypt_and_decrypt(monkeypatch: MonkeyPatch): +@pytest.fixture( + params=[ + "en_US", # English (United States) + "fr_FR", # French (France) + "de_DE", # German (Germany) + "ru_RU", # Russian + "ja_JP", # Japanese + "zh_CN", # Chinese (Simplified) + "ko_KR", # Korean + "ar_EG", # Arabic (Egypt) + "he_IL", # Hebrew (Israel) + "hi_IN", # Hindi (India) + "th_TH", # Thai (Thailand) + "vi_VN", # Vietnamese (Vietnam) + "ta_IN", # Tamil (India) + ] +) +def fake_email(request): + locale = request.param + faker = Faker(locale) + # Use a localized name for the username part of the email + name = faker.name().replace(" ", "").replace(".", "").lower() + # Construct the email address + return f"{name}@example.{locale.split('_')[-1].lower()}" + + +def test_encrypt_and_decrypt(monkeypatch: pytest.MonkeyPatch, fake_email: str): secret_key = Fernet.generate_key() monkeypatch.setenv("SECRET_KEY", secret_key.decode()) # invitation generator app - invitation_url = produce(guest_email="guest@gmail.com") + invitation_url = produce(guest_email=fake_email) + assert invitation_url.fragment # osparc side invitation_data = consume(invitation_url) print(json.dumps(invitation_data, indent=1)) + assert invitation_data["guest"] == fake_email diff --git a/services/invitations/tests/unit/test_cli.py b/services/invitations/tests/unit/test_cli.py index d8edc74c775..0c4bf15c7a8 100644 --- a/services/invitations/tests/unit/test_cli.py +++ b/services/invitations/tests/unit/test_cli.py @@ -5,39 +5,50 @@ import os +import pytest from faker import Faker -from pytest import MonkeyPatch +from models_library.products import ProductName +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import load_dotenv, setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import load_dotenv, setenvs_from_dict from simcore_service_invitations._meta import API_VERSION -from simcore_service_invitations.cli import app +from simcore_service_invitations.cli import main from simcore_service_invitations.core.settings import ApplicationSettings -from simcore_service_invitations.invitations import InvitationInputs +from simcore_service_invitations.services.invitations import InvitationInputs from typer.testing import CliRunner def test_cli_help_and_version(cli_runner: CliRunner): # invitations-maker --help - result = cli_runner.invoke(app, "--help") + result = cli_runner.invoke(main, "--help") assert result.exit_code == os.EX_OK, result.output - result = cli_runner.invoke(app, "--version") + result = cli_runner.invoke(main, "--version") assert result.exit_code == os.EX_OK, result.output assert result.stdout.strip() == API_VERSION def test_invite_user_and_check_invitation( - cli_runner: CliRunner, faker: Faker, invitation_data: InvitationInputs + cli_runner: CliRunner, + faker: Faker, + invitation_data: InvitationInputs, + default_product: ProductName, ): # invitations-maker generate-key - result = cli_runner.invoke(app, "generate-key") + result = cli_runner.invoke(main, "generate-key") assert result.exit_code == os.EX_OK, result.output # export INVITATIONS_SECRET_KEY=$(invitations-maker generate-key) - environs = dict( - INVITATIONS_SECRET_KEY=result.stdout.strip(), - INVITATIONS_OSPARC_URL=faker.url(), - ) + environs = { + "INVITATIONS_SECRET_KEY": result.stdout.strip(), + "INVITATIONS_OSPARC_URL": faker.url(), + "INVITATIONS_DEFAULT_PRODUCT": default_product, + } + + expected = { + **invitation_data.model_dump(exclude={"product"}), + "product": environs["INVITATIONS_DEFAULT_PRODUCT"], + } # invitations-maker invite guest@email.com --issuer=me --trial-account-days=3 trial_account = "" @@ -45,43 +56,46 @@ def test_invite_user_and_check_invitation( trial_account = f"--trial-account-days={invitation_data.trial_account_days}" result = cli_runner.invoke( - app, + main, f"invite {invitation_data.guest} --issuer={invitation_data.issuer} {trial_account}", env=environs, ) assert result.exit_code == os.EX_OK, result.output - invitation_url = result.stdout - print(invitation_url) + # NOTE: for some reason, when running from CLI the outputs get folded! + invitation_url = result.stdout.replace("\n", "") # invitations-maker extrac https://foo#invitation=123 result = cli_runner.invoke( - app, - f"extract {invitation_url}", + main, + f'extract "{invitation_url}"', env=environs, ) assert result.exit_code == os.EX_OK, result.output - assert invitation_data == InvitationInputs.parse_raw(result.stdout) + assert ( + expected + == TypeAdapter(InvitationInputs).validate_json(result.stdout).model_dump() + ) -def test_generate_dotenv(cli_runner: CliRunner, monkeypatch: MonkeyPatch): - # invitations-maker --generate-dotenv - result = cli_runner.invoke(app, "generate-dotenv --auto-password") +def test_echo_dotenv(cli_runner: CliRunner, monkeypatch: pytest.MonkeyPatch): + # invitations-maker --echo-dotenv + result = cli_runner.invoke(main, "echo-dotenv --auto-password") assert result.exit_code == os.EX_OK, result.output environs = load_dotenv(result.stdout) envs = setenvs_from_dict(monkeypatch, environs) - settings_from_obj = ApplicationSettings.parse_obj(envs) - settings_from_envs = ApplicationSettings() + settings_from_obj = ApplicationSettings.model_validate(envs) + settings_from_envs = ApplicationSettings.create_from_envs() assert settings_from_envs == settings_from_obj def test_list_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): - result = cli_runner.invoke(app, ["settings", "--show-secrets", "--as-json"]) + result = cli_runner.invoke(main, ["settings", "--show-secrets", "--as-json"]) assert result.exit_code == os.EX_OK, result.output print(result.output) - settings = ApplicationSettings.parse_raw(result.output) + settings = ApplicationSettings.model_validate_json(result.output) assert settings == ApplicationSettings.create_from_envs() diff --git a/services/invitations/tests/unit/test_core_settings.py b/services/invitations/tests/unit/test_core_settings.py index 1aa4a921818..7c68e809eda 100644 --- a/services/invitations/tests/unit/test_core_settings.py +++ b/services/invitations/tests/unit/test_core_settings.py @@ -4,28 +4,31 @@ # pylint: disable=too-many-arguments -from pytest import MonkeyPatch +import pytest +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict from pytest_simcore.helpers.typing_env import EnvVarsDict -from pytest_simcore.helpers.utils_envs import setenvs_from_dict from simcore_service_invitations.core.settings import ( ApplicationSettings, MinimalApplicationSettings, ) -def test_valid_cli_application_settings(monkeypatch: MonkeyPatch, secret_key: str): +def test_valid_cli_application_settings( + monkeypatch: pytest.MonkeyPatch, secret_key: str +): setenvs_from_dict( monkeypatch, { "INVITATIONS_SECRET_KEY": secret_key, "INVITATIONS_OSPARC_URL": "https://myosparc.org", + "INVITATIONS_DEFAULT_PRODUCT": "s4llite", }, ) - settings = MinimalApplicationSettings() + settings = MinimalApplicationSettings.create_from_envs() assert settings def test_valid_web_application_settings(app_environment: EnvVarsDict): - settings = ApplicationSettings() + settings = ApplicationSettings.create_from_envs() assert settings diff --git a/services/invitations/tests/unit/test_invitations.py b/services/invitations/tests/unit/test_invitations.py index 2eb59358429..770dba67bb9 100644 --- a/services/invitations/tests/unit/test_invitations.py +++ b/services/invitations/tests/unit/test_invitations.py @@ -4,21 +4,22 @@ # pylint: disable=too-many-arguments import binascii -from datetime import datetime +from collections import Counter +from datetime import UTC, datetime from urllib import parse import cryptography.fernet import pytest from faker import Faker +from models_library.invitations import InvitationContent, InvitationInputs +from models_library.products import ProductName from pydantic import BaseModel, ValidationError -from simcore_service_invitations.invitations import ( - InvalidInvitationCode, - InvitationContent, - InvitationInputs, +from simcore_service_invitations.services.invitations import ( + InvalidInvitationCodeError, _ContentWithShortNames, _create_invitation_code, _fernet_encrypt_as_urlsafe_code, - create_invitation_link, + create_invitation_link_and_content, decrypt_invitation, extract_invitation_content, ) @@ -28,7 +29,7 @@ def test_all_invitation_fields_have_short_and_unique_aliases(): # all have short alias all_alias = [] - for field in _ContentWithShortNames.__fields__.values(): + for field in _ContentWithShortNames.model_fields.values(): assert field.alias assert field.alias not in all_alias all_alias.append(field.alias) @@ -38,8 +39,8 @@ def test_import_and_export_invitation_alias_by_alias( invitation_data: InvitationInputs, ): expected_content = InvitationContent( - **invitation_data.dict(), - created=datetime.utcnow(), + **invitation_data.model_dump(), + created=datetime.now(tz=UTC), ) raw_data = _ContentWithShortNames.serialize(expected_content) @@ -51,35 +52,43 @@ def test_export_by_alias_produces_smaller_strings( invitation_data: InvitationInputs, ): content = InvitationContent( - **invitation_data.dict(), - created=datetime.utcnow(), + **invitation_data.model_dump(), + created=datetime.now(tz=UTC), ) raw_data = _ContentWithShortNames.serialize(content) # export by alias produces smaller strings - assert len(raw_data) < len(content.json()) + assert len(raw_data) < len(content.model_dump_json()) def test_create_and_decrypt_invitation( - invitation_data: InvitationInputs, faker: Faker, secret_key: str + invitation_data: InvitationInputs, + faker: Faker, + secret_key: str, + default_product: ProductName, ): - - invitation_link = create_invitation_link( - invitation_data, secret_key=secret_key.encode(), base_url=faker.url() + invitation_link, _ = create_invitation_link_and_content( + invitation_data, + secret_key=secret_key.encode(), + base_url=faker.url(), + default_product=default_product, ) - - print(invitation_link) - - query_params = dict(parse.parse_qsl(URL(invitation_link.fragment).query)) + assert URL(f"{invitation_link}").fragment + query_params = dict(parse.parse_qsl(URL(URL(f"{invitation_link}").fragment).query)) # will raise TokenError or ValidationError invitation = decrypt_invitation( invitation_code=query_params["invitation"], secret_key=secret_key.encode(), + default_product=default_product, ) assert isinstance(invitation, InvitationContent) - assert invitation.dict(exclude={"created"}) == invitation_data.dict() + assert invitation.product is not None + + expected = invitation_data.model_dump(exclude_none=True) + expected.setdefault("product", default_product) + assert invitation.model_dump(exclude={"created"}, exclude_none=True) == expected # @@ -88,26 +97,36 @@ def test_create_and_decrypt_invitation( @pytest.fixture -def invitation_code(invitation_data: InvitationInputs, secret_key: str) -> str: - return _create_invitation_code( - invitation_data, secret_key=secret_key.encode() - ).decode() +def invitation_code( + invitation_data: InvitationInputs, secret_key: str, default_product: ProductName +) -> str: + content = InvitationContent.create_from_inputs(invitation_data, default_product) + code = _create_invitation_code(content, secret_key.encode()) + return code.decode() def test_valid_invitation_code( secret_key: str, invitation_code: str, invitation_data: InvitationInputs, + default_product: ProductName, ): invitation = decrypt_invitation( invitation_code=invitation_code, secret_key=secret_key.encode(), + default_product=default_product, ) - assert invitation.dict(exclude={"created"}) == invitation_data.dict() + expected = invitation_data.model_dump(exclude_none=True) + expected.setdefault("product", default_product) + assert invitation.model_dump(exclude={"created"}, exclude_none=True) == expected -def test_invalid_invitation_encoding(secret_key: str, invitation_code: str): +def test_invalid_invitation_encoding( + secret_key: str, + invitation_code: str, + default_product: ProductName, +): my_invitation_code = invitation_code[:-1] # strip last (wrong code!) my_secret_key = secret_key.encode() @@ -115,18 +134,24 @@ def test_invalid_invitation_encoding(secret_key: str, invitation_code: str): decrypt_invitation( invitation_code=my_invitation_code, secret_key=my_secret_key, + default_product=default_product, ) assert f"{error_info.value}" == "Incorrect padding" - with pytest.raises(InvalidInvitationCode): + with pytest.raises(InvalidInvitationCodeError): extract_invitation_content( invitation_code=my_invitation_code, secret_key=my_secret_key, + default_product=default_product, ) -def test_invalid_invitation_secret(another_secret_key: str, invitation_code: str): +def test_invalid_invitation_secret( + another_secret_key: str, + invitation_code: str, + default_product: ProductName, +): my_invitation_code = invitation_code my_secret_key = another_secret_key.encode() @@ -134,33 +159,47 @@ def test_invalid_invitation_secret(another_secret_key: str, invitation_code: str decrypt_invitation( invitation_code=my_invitation_code, secret_key=my_secret_key, + default_product=default_product, ) - with pytest.raises(InvalidInvitationCode): + with pytest.raises(InvalidInvitationCodeError): extract_invitation_content( invitation_code=my_invitation_code, secret_key=my_secret_key, + default_product=default_product, ) -def test_invalid_invitation_data(secret_key: str): +def test_invalid_invitation_data(secret_key: str, default_product: ProductName): # encrypts contents - class OtherData(BaseModel): + class OtherModel(BaseModel): foo: int = 123 - my_secret_key = secret_key.encode() - my_invitation_code = _fernet_encrypt_as_urlsafe_code( - data=OtherData().json().encode(), secret_key=my_secret_key + secret = secret_key.encode() + other_code = _fernet_encrypt_as_urlsafe_code( + data=OtherModel().model_dump_json().encode(), secret_key=secret ) with pytest.raises(ValidationError): decrypt_invitation( - invitation_code=my_invitation_code, - secret_key=my_secret_key, + invitation_code=other_code.decode(), + secret_key=secret, + default_product=default_product, ) - with pytest.raises(InvalidInvitationCode): + with pytest.raises(InvalidInvitationCodeError): extract_invitation_content( - invitation_code=my_invitation_code, - secret_key=my_secret_key, + invitation_code=other_code.decode(), + secret_key=secret, + default_product=default_product, ) + + +def test_aliases_uniqueness(): + assert not [ + item + for item, count in Counter( + [field.alias for field in _ContentWithShortNames.model_fields.values()] + ).items() + if count > 1 + ] # nosec diff --git a/services/migration/Dockerfile b/services/migration/Dockerfile index 542b19a4833..77d16f38c6c 100644 --- a/services/migration/Dockerfile +++ b/services/migration/Dockerfile @@ -1,8 +1,24 @@ -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-buster as base +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base LABEL maintainer=sanderegg +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache # simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) ENV SC_USER_ID=8004 \ SC_USER_NAME=scu \ @@ -29,39 +45,45 @@ ENV PATH="${VIRTUAL_ENV}/bin:$PATH" # -------------------------------------------- -FROM base as build +FROM base AS build -RUN apt-get update \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ && apt-get install -y --no-install-recommends \ build-essential \ - git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* + git +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ # NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" +RUN uv venv "${VIRTUAL_ENV}" -RUN pip --no-cache-dir install --upgrade \ - pip~=23.0 \ + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ wheel \ setuptools -WORKDIR /build +WORKDIR /build/packages/postgres-database # install only base 3rd party dependencies -COPY --chown=scu:scu packages/postgres-database/ . -RUN pip --no-cache-dir --quiet \ - install \ - -r requirements/prod.txt \ - && pip freeze - +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install \ + --requirement requirements/prod.txt \ + && uv pip list # -------------------------------------------- -FROM base as production +FROM base AS production ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 # testing defaults ENV POSTGRES_USER=scu \ @@ -76,15 +98,19 @@ WORKDIR /home/scu USER ${SC_USER_NAME} +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu # bring installed package without build tools -COPY --from=build ${VIRTUAL_ENV} ${VIRTUAL_ENV} +COPY --chown=scu:scu --from=build ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY --chown=scu:scu services/migration/docker services/migration/docker +# https://docs.docker.com/reference/dockerfile/#healthcheck HEALTHCHECK \ --interval=10s \ - --timeout=10s \ - --start-period=1ms \ - --retries=3 \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ CMD [ "/bin/sh", "/home/scu/services/migration/docker/healthcheck.sh"] ENTRYPOINT [ "/bin/sh", "services/migration/docker/entrypoint.sh" ] diff --git a/services/migration/Makefile b/services/migration/Makefile new file mode 100644 index 00000000000..c1441ef7fac --- /dev/null +++ b/services/migration/Makefile @@ -0,0 +1,11 @@ +# +# DEVELOPMENT recipes for migration +# + + +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile + +.PHONY: requirements reqs +requirements reqs: ## (or reqs) compiles pip requirements (.in -> .txt) + @$(MAKE_C) requirements reqs diff --git a/services/migration/docker/entrypoint.sh b/services/migration/docker/entrypoint.sh index 2d1e2aaecdf..3b639e936ed 100755 --- a/services/migration/docker/entrypoint.sh +++ b/services/migration/docker/entrypoint.sh @@ -29,5 +29,5 @@ sc-pg upgrade-and-close echo "DONE" >"${SC_DONE_MARK_FILE}" echo "$INFO Migration Done. Wait forever ..." -# TODO: perhaps we should simply stop??? +echo "$INFO local dir after update: $(ls -al)" exec tail -f /dev/null diff --git a/services/migration/requirements/Makefile b/services/migration/requirements/Makefile new file mode 100644 index 00000000000..3f25442b790 --- /dev/null +++ b/services/migration/requirements/Makefile @@ -0,0 +1,6 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/services/migration/requirements/_base.in b/services/migration/requirements/_base.in new file mode 100644 index 00000000000..69165ed97fa --- /dev/null +++ b/services/migration/requirements/_base.in @@ -0,0 +1,7 @@ +# +# Specifies third-party dependencies for 'services/api-server/src' +# +# NOTE: ALL version constraints MUST be commented +# intra-repo constraints +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt diff --git a/services/migration/requirements/_base.txt b/services/migration/requirements/_base.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/migration/requirements/_test.in b/services/migration/requirements/_test.in new file mode 100644 index 00000000000..19494947b64 --- /dev/null +++ b/services/migration/requirements/_test.in @@ -0,0 +1,25 @@ +# +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +docker +jsonschema +pytest +pytest-asyncio +pytest-cov +pytest-docker +pytest-mock +pytest-runner +python-dotenv +pyyaml +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +tenacity diff --git a/services/migration/requirements/_test.txt b/services/migration/requirements/_test.txt new file mode 100644 index 00000000000..f807b504dae --- /dev/null +++ b/services/migration/requirements/_test.txt @@ -0,0 +1,84 @@ +attrs==25.1.0 + # via + # jsonschema + # pytest-docker + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # requests +charset-normalizer==3.4.1 + # via requests +coverage==7.6.12 + # via pytest-cov +docker==7.1.0 + # via -r requirements/_test.in +greenlet==3.1.1 + # via sqlalchemy +idna==3.10 + # via requests +iniconfig==2.0.0 + # via pytest +jsonschema==4.23.0 + # via -r requirements/_test.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +packaging==24.2 + # via pytest +pluggy==1.5.0 + # via pytest +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-docker + # pytest-mock +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-docker==3.2.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +python-dotenv==1.0.1 + # via -r requirements/_test.in +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +referencing==0.35.1 + # via + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via docker +rpds-py==0.23.1 + # via + # jsonschema + # referencing +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +tenacity==9.0.0 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # mypy + # sqlalchemy2-stubs +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # docker + # requests diff --git a/services/migration/requirements/_tools.in b/services/migration/requirements/_tools.in new file mode 100644 index 00000000000..e095b0fe2fc --- /dev/null +++ b/services/migration/requirements/_tools.in @@ -0,0 +1,9 @@ +--constraint ../../../requirements/constraints.txt + +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt + +# basic dev tools +watchdog[watchmedo] diff --git a/services/migration/requirements/_tools.txt b/services/migration/requirements/_tools.txt new file mode 100644 index 00000000000..19e0de20677 --- /dev/null +++ b/services/migration/requirements/_tools.txt @@ -0,0 +1,85 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_test.txt + # pre-commit + # watchdog +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +watchdog==6.0.0 + # via -r requirements/_tools.in +wheel==0.45.1 + # via pip-tools diff --git a/services/migration/requirements/ci.txt b/services/migration/requirements/ci.txt new file mode 100644 index 00000000000..f27407610d3 --- /dev/null +++ b/services/migration/requirements/ci.txt @@ -0,0 +1,16 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/director-v2' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +pytest-simcore @ ../../packages/pytest-simcore/ diff --git a/services/migration/requirements/constraints.txt b/services/migration/requirements/constraints.txt new file mode 100644 index 00000000000..af197ee046c --- /dev/null +++ b/services/migration/requirements/constraints.txt @@ -0,0 +1 @@ +# Add here ONLY this package's constraints diff --git a/services/migration/requirements/dev.txt b/services/migration/requirements/dev.txt new file mode 100644 index 00000000000..035e602bb07 --- /dev/null +++ b/services/migration/requirements/dev.txt @@ -0,0 +1,15 @@ +# Shortcut to install all packages needed to develop 'services/director-v2' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/pytest-simcore/ diff --git a/services/migration/requirements/prod.txt b/services/migration/requirements/prod.txt new file mode 100644 index 00000000000..5d9190e482e --- /dev/null +++ b/services/migration/requirements/prod.txt @@ -0,0 +1,10 @@ +# Shortcut to install 'services/director-v2' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt diff --git a/services/migration/tests/conftest.py b/services/migration/tests/conftest.py new file mode 100644 index 00000000000..2ad21378f67 --- /dev/null +++ b/services/migration/tests/conftest.py @@ -0,0 +1,6 @@ +pytest_plugins = [ + "pytest_simcore.docker_compose", + "pytest_simcore.docker_registry", + "pytest_simcore.docker_swarm", + "pytest_simcore.repository_paths", +] diff --git a/services/migration/tests/integration/test_migration_service.py b/services/migration/tests/integration/test_migration_service.py new file mode 100644 index 00000000000..f32746c1d19 --- /dev/null +++ b/services/migration/tests/integration/test_migration_service.py @@ -0,0 +1,12 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + +pytest_simcore_core_services_selection = ["postgres", "migration"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +def test_migration_service_runs_correctly(docker_stack: dict): + ... diff --git a/services/migration/tests/unit/.gitkeep b/services/migration/tests/unit/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/Dockerfile b/services/notifications/Dockerfile new file mode 100644 index 00000000000..eaff172c13e --- /dev/null +++ b/services/notifications/Dockerfile @@ -0,0 +1,184 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd services/notifications +# docker build -f Dockerfile -t notifications:prod --target production ../../ +# docker run notifications:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=GitHK + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/scu/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/notifications [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/notifications + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/notifications,target=/build/services/notifications,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/notifications [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/scu + +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY --chown=scu:scu services/notifications/docker services/notifications/docker +RUN chmod +x services/notifications/docker/*.sh + + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/notifications/docker/healthcheck.py", "http://localhost:8000/"] + +ENTRYPOINT [ "/bin/sh", "services/notifications/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/notifications/docker/boot.sh"] + +EXPOSE 8000 + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/notifications + +WORKDIR /devel + +RUN chown -R scu:scu "${VIRTUAL_ENV}" + +EXPOSE 8000 +EXPOSE 3000 + +ENTRYPOINT ["/bin/sh", "services/notifications/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/notifications/docker/boot.sh"] diff --git a/services/notifications/Makefile b/services/notifications/Makefile new file mode 100644 index 00000000000..bc14e6354c1 --- /dev/null +++ b/services/notifications/Makefile @@ -0,0 +1,18 @@ +# +# DEVELOPMENT recipes for notifications +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile + + +.env-ignore: + $(APP_CLI_NAME) echo-dotenv > $@ + +.PHONY: openapi.json +openapi-specs: openapi.json +openapi.json: .env-ignore ## produces openapi.json + # generating openapi specs file (need to have the environment set for this) + @set -o allexport; \ + source $<; \ + set +o allexport; \ + python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ diff --git a/services/osparc-gateway-server/VERSION b/services/notifications/VERSION similarity index 100% rename from services/osparc-gateway-server/VERSION rename to services/notifications/VERSION diff --git a/services/notifications/docker/boot.sh b/services/notifications/docker/boot.sh new file mode 100755 index 00000000000..8d079d9bc1b --- /dev/null +++ b/services/notifications/docker/boot.sh @@ -0,0 +1,69 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/notifications + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/notifications/src/simcore_service_notifications && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${NOTIFICATIONS_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --port 8000 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_notifications.main:the_app \ + --host 0.0.0.0 \ + --port 8000 \ + --log-level "${SERVER_LOG_LEVEL}" \ + --no-access-log +fi diff --git a/services/notifications/docker/entrypoint.sh b/services/notifications/docker/entrypoint.sh new file mode 100755 index 00000000000..1568d6affdc --- /dev/null +++ b/services/notifications/docker/entrypoint.sh @@ -0,0 +1,71 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi +fi + + +echo "$INFO Starting $* ..." +echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" +echo " local dir : $(ls -al)" + +exec gosu "$SC_USER_NAME" "$@" diff --git a/services/notifications/docker/healthcheck.py b/services/notifications/docker/healthcheck.py new file mode 100755 index 00000000000..9e3f3274a29 --- /dev/null +++ b/services/notifications/docker/healthcheck.py @@ -0,0 +1,40 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8000/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" +import os +import sys +from urllib.request import urlopen + +SUCCESS, UNHEALTHY = 0, 1 + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) +ok = "debug" in os.environ.get("SC_BOOT_MODE", "").lower() + +# Queries host +# pylint: disable=consider-using-with +ok = ( + ok + or urlopen( + "{host}{baseurl}".format( + host=sys.argv[1], baseurl=os.environ.get("SIMCORE_NODE_BASEPATH", "") + ) # adds a base-path if defined in environ + ).getcode() + == 200 +) + + +sys.exit(SUCCESS if ok else UNHEALTHY) diff --git a/services/notifications/openapi.json b/services/notifications/openapi.json new file mode 100644 index 00000000000..501d6014b46 --- /dev/null +++ b/services/notifications/openapi.json @@ -0,0 +1,54 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "simcore-service-notifications", + "description": "Service used for sending notifications to users via different channels", + "version": "0.0.1" + }, + "servers": [ + { + "url": "/", + "description": "Default server: requests directed to serving url" + } + ], + "paths": { + "/": { + "get": { + "summary": "Check Service Health", + "operationId": "check_service_health__get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthCheckGet" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "HealthCheckGet": { + "properties": { + "timestamp": { + "type": "string", + "title": "Timestamp" + } + }, + "type": "object", + "required": [ + "timestamp" + ], + "title": "HealthCheckGet", + "example": { + "timestamp": "simcore_service_directorv2.api.routes.health@2023-07-03T12:59:12.024551+00:00" + } + } + } + } +} diff --git a/services/notifications/requirements/Makefile b/services/notifications/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/notifications/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/notifications/requirements/_base.in b/services/notifications/requirements/_base.in new file mode 100644 index 00000000000..77bb3fd4051 --- /dev/null +++ b/services/notifications/requirements/_base.in @@ -0,0 +1,19 @@ +# +# Specifies third-party dependencies for 'services/notifications/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + +packaging +pydantic diff --git a/services/notifications/requirements/_base.txt b/services/notifications/requirements/_base.txt new file mode 100644 index 00000000000..bb08727f0d8 --- /dev/null +++ b/services/notifications/requirements/_base.txt @@ -0,0 +1,601 @@ +aio-pika==9.5.5 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.2 + # via aiohttp +alembic==1.15.1 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +anyio==4.9.0 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==25.3.0 + # via + # aiohttp + # jsonschema + # referencing +certifi==2025.1.31 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via + # rich-toolkit + # typer + # uvicorn +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.37 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.69.2 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 + # via sqlalchemy +grpcio==1.71.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.28.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +importlib-metadata==8.6.1 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.2.0 + # via + # aiohttp + # yarl +opentelemetry-api==1.31.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.31.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.31.1 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.31.1 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.31.1 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.52b1 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.52b1 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.52b1 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.52b1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.31.1 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.31.1 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.52b1 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.52b1 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.16 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.1 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==5.29.4 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==7.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pycryptodome==3.22.0 + # via stream-zip +pydantic==2.11.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.33.0 + # via pydantic +pydantic-extra-types==2.10.3 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.7.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.19.1 + # via rich +pyinstrument==5.0.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.1.0 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.24.0 + # via + # jsonschema + # referencing +shellingham==1.5.4 + # via typer +six==1.17.0 + # via python-dateutil +sniffio==1.3.1 + # via anyio +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +starlette==0.46.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.15.2 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241206 + # via arrow +typing-extensions==4.13.0 + # via + # aiodebug + # alembic + # anyio + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer + # typing-inspection +typing-inspection==0.4.0 + # via pydantic +urllib3==2.3.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.5 + # via uvicorn +websockets==15.0.1 + # via uvicorn +wrapt==1.17.2 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +yarl==1.18.3 + # via + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/services/notifications/requirements/_test.in b/services/notifications/requirements/_test.in new file mode 100644 index 00000000000..0269b0db420 --- /dev/null +++ b/services/notifications/requirements/_test.in @@ -0,0 +1,24 @@ +# +# Specifies dependencies required to run 'services/notifications/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +asgi-lifespan +coverage +docker +faker +httpx +pytest +pytest-asyncio +pytest-cov +pytest-mock +pytest-runner +python-dotenv diff --git a/services/notifications/requirements/_test.txt b/services/notifications/requirements/_test.txt new file mode 100644 index 00000000000..483fca1f9a3 --- /dev/null +++ b/services/notifications/requirements/_test.txt @@ -0,0 +1,91 @@ +anyio==4.9.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +certifi==2025.1.31 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.1 + # via + # -c requirements/_base.txt + # requests +coverage==7.7.1 + # via + # -r requirements/_test.in + # pytest-cov +docker==7.1.0 + # via -r requirements/_test.in +faker==37.1.0 + # via -r requirements/_test.in +h11==0.14.0 + # via + # -c requirements/_base.txt + # httpcore +httpcore==1.0.7 + # via + # -c requirements/_base.txt + # httpx +httpx==0.28.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests +iniconfig==2.1.0 + # via pytest +packaging==24.2 + # via + # -c requirements/_base.txt + # pytest +pluggy==1.5.0 + # via pytest +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-mock +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +python-dotenv==1.1.0 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan +typing-extensions==4.13.0 + # via + # -c requirements/_base.txt + # anyio +tzdata==2025.2 + # via faker +urllib3==2.3.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # docker + # requests diff --git a/services/notifications/requirements/_tools.in b/services/notifications/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/services/notifications/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/services/notifications/requirements/_tools.txt b/services/notifications/requirements/_tools.txt new file mode 100644 index 00000000000..4deff3bbf27 --- /dev/null +++ b/services/notifications/requirements/_tools.txt @@ -0,0 +1,82 @@ +astroid==3.3.9 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.8 + # via + # -c requirements/_base.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.18.0 + # via virtualenv +identify==2.6.9 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.7 + # via + # black + # pylint + # virtualenv +pre-commit==4.2.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.6 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # pre-commit +ruff==0.11.2 + # via -r requirements/../../../requirements/devenv.txt +setuptools==78.1.0 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.13.0 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.3 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/services/notifications/requirements/ci.txt b/services/notifications/requirements/ci.txt new file mode 100644 index 00000000000..21975753559 --- /dev/null +++ b/services/notifications/requirements/ci.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/notifications' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library/ +simcore-postgres-database @ ../../packages/postgres-database/ +pytest-simcore @ ../../packages/pytest-simcore/ +simcore-service-library[fastapi] @ ../../packages/service-library/ +simcore-settings-library @ ../../packages/settings-library/ + +# installs current package +simcore-service-notifications @ . diff --git a/services/notifications/requirements/constraints.txt b/services/notifications/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/requirements/dev.txt b/services/notifications/requirements/dev.txt new file mode 100644 index 00000000000..4e73fc7a83a --- /dev/null +++ b/services/notifications/requirements/dev.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages needed to develop 'services/notifications' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/postgres-database +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library + +# installs current package +--editable . diff --git a/services/notifications/requirements/prod.txt b/services/notifications/requirements/prod.txt new file mode 100644 index 00000000000..f203156b59c --- /dev/null +++ b/services/notifications/requirements/prod.txt @@ -0,0 +1,20 @@ +# Shortcut to install 'services/notifications' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library/ +simcore-postgres-database @ ../../packages/postgres-database/ +simcore-service-library[fastapi] @ ../../packages/service-library/ +simcore-settings-library @ ../../packages/settings-library/ + +# installs current package +simcore-service-notifications @ . diff --git a/services/notifications/setup.cfg b/services/notifications/setup.cfg new file mode 100644 index 00000000000..fd462156b58 --- /dev/null +++ b/services/notifications/setup.cfg @@ -0,0 +1,16 @@ +[bumpversion] +current_version = 1 +commit = True +message = services/notifications version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function + +[mypy] +plugins = + pydantic.mypy diff --git a/services/notifications/setup.py b/services/notifications/setup.py new file mode 100755 index 00000000000..5ee97d7cddd --- /dev/null +++ b/services/notifications/setup.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-notifications" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Andrei Neagu (GitHK)",) +DESCRIPTION = "Service used for sending notifications to users via different channels" + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-models-library", + "simcore-service-library", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-notifications = simcore_service_notifications.cli:main", + "simcore-service = simcore_service_notifications.cli:main", + ], + }, +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/notifications/src/simcore_service_notifications/__init__.py b/services/notifications/src/simcore_service_notifications/__init__.py new file mode 100644 index 00000000000..94fc632e7af --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/__init__.py @@ -0,0 +1 @@ +from ._meta import __version__ diff --git a/services/notifications/src/simcore_service_notifications/_meta.py b/services/notifications/src/simcore_service_notifications/_meta.py new file mode 100644 index 00000000000..1f054b45d73 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/_meta.py @@ -0,0 +1,36 @@ +"""Application's metadata""" + +from importlib.metadata import distribution, version +from typing import Final + +from packaging.version import Version + +_current_distribution = distribution("simcore-service-notifications") +__version__: str = version("simcore-service-notifications") + + +APP_NAME: Final[str] = _current_distribution.metadata["Name"] +VERSION: Final[Version] = Version(__version__) +API_VTAG: str = f"v{VERSION.major}" + + +def get_summary() -> str: + return _current_distribution.metadata.get_all("Summary", [""])[-1] + + +SUMMARY: Final[str] = get_summary() + + +APP_STARTED_BANNER_MSG = rf""" + _______ _ ___ _ _ +(_______) _ (_) / __(_) _ (_) + _ _ ___ _| |_ _ _| |__ _ ____ _____ _| |_ _ ___ ____ ___ +| | | |/ _ (_ _| (_ __| |/ ___(____ (_ _| |/ _ \| _ \ /___) +| | | | |_| || |_| | | | | ( (___/ ___ | | |_| | |_| | | | |___ | +|_| |_|\___/ \__|_| |_| |_|\____\_____| \__|_|\___/|_| |_(___/ + {API_VTAG}""" + + +APP_FINISHED_BANNER_MSG = "{:=^100}".format( + f"πŸŽ‰ App {APP_NAME}=={VERSION} shutdown completed πŸŽ‰" +) diff --git a/services/notifications/src/simcore_service_notifications/api/__init__.py b/services/notifications/src/simcore_service_notifications/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/src/simcore_service_notifications/api/rest/__init__.py b/services/notifications/src/simcore_service_notifications/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/src/simcore_service_notifications/api/rest/_health.py b/services/notifications/src/simcore_service_notifications/api/rest/_health.py new file mode 100644 index 00000000000..5f38f21d5e0 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/api/rest/_health.py @@ -0,0 +1,33 @@ +from typing import Annotated + +import arrow +from fastapi import APIRouter, Depends +from models_library.api_schemas__common.health import HealthCheckGet +from models_library.errors import ( + POSRGRES_DATABASE_UNHEALTHY_MSG, + RABBITMQ_CLIENT_UNHEALTHY_MSG, +) +from servicelib.rabbitmq import RabbitMQClient + +from ...clients.postgres import PostgresLiveness +from .dependencies import get_postgres_liveness, get_rabbitmq_client + +router = APIRouter() + + +class HealthCheckError(RuntimeError): + """Failed a health check""" + + +@router.get("/", response_model=HealthCheckGet) +async def check_service_health( + rabbitmq_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client)], + postgres_liveness: Annotated[PostgresLiveness, Depends(get_postgres_liveness)], +): + if not rabbitmq_client.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + if not postgres_liveness.is_responsive: + raise HealthCheckError(POSRGRES_DATABASE_UNHEALTHY_MSG) + + return HealthCheckGet(timestamp=f"{__name__}@{arrow.utcnow().datetime.isoformat()}") diff --git a/services/notifications/src/simcore_service_notifications/api/rest/dependencies.py b/services/notifications/src/simcore_service_notifications/api/rest/dependencies.py new file mode 100644 index 00000000000..962154ea9f7 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/api/rest/dependencies.py @@ -0,0 +1,26 @@ +"""Free functions to inject dependencies in routes handlers""" + +from typing import Annotated, cast + +from fastapi import Depends, FastAPI, Request +from servicelib.rabbitmq._client_rpc import RabbitMQRPCClient + +from ...clients.postgres import PostgresLiveness +from ...clients.postgres import get_postgres_liveness as get_postgress_db_liveness + + +def get_application(request: Request) -> FastAPI: + return cast(FastAPI, request.app) + + +def get_rabbitmq_client( + app: Annotated[FastAPI, Depends(get_application)], +) -> RabbitMQRPCClient: + assert isinstance(app.state.rabbitmq_rpc_server, RabbitMQRPCClient) # nosec + return app.state.rabbitmq_rpc_server + + +def get_postgres_liveness( + app: Annotated[FastAPI, Depends(get_application)], +) -> PostgresLiveness: + return get_postgress_db_liveness(app) diff --git a/services/notifications/src/simcore_service_notifications/api/rest/routing.py b/services/notifications/src/simcore_service_notifications/api/rest/routing.py new file mode 100644 index 00000000000..5fae78c42ce --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/api/rest/routing.py @@ -0,0 +1,14 @@ +from fastapi import FastAPI, HTTPException +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +from ._health import router as health_router + + +def initialize_rest_api(app: FastAPI) -> None: + app.include_router(health_router) + + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) diff --git a/services/notifications/src/simcore_service_notifications/api/rpc/__init__.py b/services/notifications/src/simcore_service_notifications/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/src/simcore_service_notifications/api/rpc/routing.py b/services/notifications/src/simcore_service_notifications/api/rpc/routing.py new file mode 100644 index 00000000000..c43bcdb7c05 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/api/rpc/routing.py @@ -0,0 +1,23 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from models_library.api_schemas_notifications import NOTIFICATIONS_RPC_NAMESPACE +from servicelib.rabbitmq import RPCRouter + +from ...clients.rabbitmq import get_rabbitmq_rpc_server + +ROUTERS: list[RPCRouter] = [ + # import and use all routers here +] + + +async def rpc_api_routes_lifespan(app: FastAPI) -> AsyncIterator[State]: + rpc_server = get_rabbitmq_rpc_server(app) + + for router in ROUTERS: + await rpc_server.register_router( + router, NOTIFICATIONS_RPC_NAMESPACE, app + ) # pragma: no cover + + yield {} diff --git a/services/notifications/src/simcore_service_notifications/cli.py b/services/notifications/src/simcore_service_notifications/cli.py new file mode 100644 index 00000000000..13bee086290 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/cli.py @@ -0,0 +1,82 @@ +import logging +import os + +import typer +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.utils_cli import ( + create_settings_command, + create_version_callback, + print_as_envfile, +) + +from ._meta import APP_NAME, __version__ +from .core.settings import ApplicationSettings + +log = logging.getLogger(__name__) + +main = typer.Typer( + name=APP_NAME, + pretty_exceptions_enable=False, + pretty_exceptions_show_locals=False, +) + +main.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def echo_dotenv(ctx: typer.Context, *, minimal: bool = True) -> None: + """Generates and displays a valid environment variables file (also known as dot-envfile) + + Usage: + $ simcore-service echo-dotenv > .env + $ cat .env + $ set -o allexport; source .env; set +o allexport + """ + assert ctx # nosec + + # NOTE: we normally DO NOT USE `os.environ` to capture env vars but this is a special case + # The idea here is to have a command that can generate a **valid** `.env` file that can be used + # to initialized the app. For that reason we fill required fields of the `ApplicationSettings` with + # "fake" but valid values (e.g. generating a password or adding tags as `replace-with-api-key). + # Nonetheless, if the caller of this CLI has already some **valid** env vars in the environment we want to use them ... + # and that is why we use `os.environ`. + + settings = ApplicationSettings.create_from_envs( + SC_BOOT_MODE="default", + NOTIFICATIONS_POSTGRES=os.environ.get( + "NOTIFICATIONS_POSTGRES", + PostgresSettings.create_from_envs( + POSTGRES_HOST=os.environ.get( + "POSTGRES_HOST", "replace-with-postgres-host" + ), + POSTGRES_USER=os.environ.get( + "POSTGRES_USER", "replace-with-postgres-user" + ), + POSTGRES_DB=os.environ.get("POSTGRES_DB", "replace-with-postgres-db"), + POSTGRES_PASSWORD=os.environ.get( + "POSTGRES_PASSWORD", "replace-with-postgres-password" + ), + ), + ), + NOTIFICATIONS_RABBITMQ=os.environ.get( + "NOTIFICATIONS_RABBITMQ", + RabbitSettings.create_from_envs( + RABBIT_HOST=os.environ.get("RABBIT_HOST", "replace-with-rabbit-host"), + RABBIT_SECURE=os.environ.get("RABBIT_SECURE", "True"), + RABBIT_USER=os.environ.get("RABBIT_USER", "replace-with-rabbit-user"), + RABBIT_PASSWORD=os.environ.get( + "RABBIT_PASSWORD", "replace-with-rabbit-password" + ), + ), + ), + ) + + print_as_envfile( + settings, + compact=False, + verbose=True, + show_secrets=True, + exclude_unset=minimal, + ) diff --git a/services/notifications/src/simcore_service_notifications/clients/__init__.py b/services/notifications/src/simcore_service_notifications/clients/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/src/simcore_service_notifications/clients/postgres/__init__.py b/services/notifications/src/simcore_service_notifications/clients/postgres/__init__.py new file mode 100644 index 00000000000..e0883fba1ce --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/clients/postgres/__init__.py @@ -0,0 +1,33 @@ +import logging +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.fastapi.postgres_lifespan import PostgresLifespanState +from servicelib.logging_utils import log_context + +from ._liveness import PostgresLiveness + +_logger = logging.getLogger(__name__) + + +async def postgres_lifespan(app: FastAPI, state: State) -> AsyncIterator[State]: + app.state.engine = state[PostgresLifespanState.POSTGRES_ASYNC_ENGINE] + + app.state.postgres_liveness = PostgresLiveness(app) + + with log_context(_logger, logging.INFO, msg="setup postgres health"): + await app.state.postgres_liveness.setup() + + yield {} + + with log_context(_logger, logging.INFO, msg="teardown postgres health"): + await app.state.postgres_liveness.teardown() + + +def get_postgres_liveness(app: FastAPI) -> PostgresLiveness: + assert isinstance(app.state.postgres_liveness, PostgresLiveness) # nosec + return app.state.postgres_liveness + + +__all__: tuple[str, ...] = ("PostgresLiveness",) diff --git a/services/notifications/src/simcore_service_notifications/clients/postgres/_liveness.py b/services/notifications/src/simcore_service_notifications/clients/postgres/_liveness.py new file mode 100644 index 00000000000..57bc7a40076 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/clients/postgres/_liveness.py @@ -0,0 +1,43 @@ +import logging +from asyncio import Task +from datetime import timedelta +from typing import Final + +from fastapi import FastAPI +from models_library.healthchecks import IsResponsive, LivenessResult +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task import create_periodic_task +from servicelib.db_asyncpg_utils import check_postgres_liveness +from servicelib.fastapi.db_asyncpg_engine import get_engine +from servicelib.logging_utils import log_catch + +_logger = logging.getLogger(__name__) + +_LVENESS_CHECK_INTERVAL: Final[timedelta] = timedelta(seconds=10) + + +class PostgresLiveness: + def __init__(self, app: FastAPI) -> None: + self.app = app + + self._liveness_result: LivenessResult = IsResponsive(elapsed=timedelta(0)) + self._task: Task | None = None + + async def _check_task(self) -> None: + self._liveness_result = await check_postgres_liveness(get_engine(self.app)) + + @property + def is_responsive(self) -> bool: + return isinstance(self._liveness_result, IsResponsive) + + async def setup(self) -> None: + self._task = create_periodic_task( + self._check_task, + interval=_LVENESS_CHECK_INTERVAL, + task_name="posgress_liveness_check", + ) + + async def teardown(self) -> None: + if self._task is not None: + with log_catch(_logger, reraise=False): + await cancel_wait_task(self._task, max_delay=5) diff --git a/services/notifications/src/simcore_service_notifications/clients/rabbitmq.py b/services/notifications/src/simcore_service_notifications/clients/rabbitmq.py new file mode 100644 index 00000000000..3c205c40162 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/clients/rabbitmq.py @@ -0,0 +1,30 @@ +from collections.abc import AsyncIterator +from typing import cast + +from fastapi import FastAPI +from fastapi_lifespan_manager import State +from servicelib.rabbitmq import RabbitMQRPCClient, wait_till_rabbitmq_responsive +from settings_library.rabbit import RabbitSettings + +from ..core.settings import ApplicationSettings + + +async def rabbitmq_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSettings = app.state.settings + rabbit_settings: RabbitSettings = settings.NOTIFICATIONS_RABBITMQ + app.state.rabbitmq_rpc_server = None + + await wait_till_rabbitmq_responsive(rabbit_settings.dsn) + + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="dynamic_scheduler_rpc_server", settings=rabbit_settings + ) + + yield {} + + await app.state.rabbitmq_rpc_server.close() + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) diff --git a/services/notifications/src/simcore_service_notifications/core/__init__.py b/services/notifications/src/simcore_service_notifications/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/src/simcore_service_notifications/core/application.py b/services/notifications/src/simcore_service_notifications/core/application.py new file mode 100644 index 00000000000..5f3245d9d52 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/core/application.py @@ -0,0 +1,66 @@ +import logging + +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + initialize_prometheus_instrumentation, +) +from servicelib.fastapi.openapi import ( + get_common_oas_options, + override_fastapi_openapi_method, +) +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) +from servicelib.logging_utils import config_all_loggers + +from .._meta import API_VTAG, APP_NAME, SUMMARY, VERSION +from ..api.rest.routing import initialize_rest_api +from . import events +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def _initialise_logger(settings: ApplicationSettings): + # SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 + logging.basicConfig(level=settings.LOG_LEVEL.value) # NOSONAR + logging.root.setLevel(settings.LOG_LEVEL.value) + config_all_loggers( + log_format_local_dev_enabled=settings.NOTIFICATIONS_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=settings.NOTIFICATIONS_VOLUMES_LOG_FILTER_MAPPING, + tracing_settings=settings.NOTIFICATIONS_TRACING, + ) + + +def create_app() -> FastAPI: + settings = ApplicationSettings.create_from_envs() + _logger.debug(settings.model_dump_json(indent=2)) + + _initialise_logger(settings) + + assert settings.SC_BOOT_MODE # nosec + app = FastAPI( + debug=settings.SC_BOOT_MODE.is_devel_mode(), + title=APP_NAME, + description=SUMMARY, + version=f"{VERSION}", + openapi_url=f"/api/{API_VTAG}/openapi.json", + lifespan=events.create_app_lifespan(), + **get_common_oas_options(is_devel_mode=settings.SC_BOOT_MODE.is_devel_mode()), + ) + override_fastapi_openapi_method(app) + app.state.settings = settings + + if settings.NOTIFICATIONS_TRACING: + setup_tracing(app, settings.NOTIFICATIONS_TRACING, APP_NAME) # pragma: no cover + + initialize_rest_api(app) + + if settings.NOTIFICATIONS_PROMETHEUS_INSTRUMENTATION_ENABLED: + initialize_prometheus_instrumentation(app) + + if settings.NOTIFICATIONS_TRACING: + initialize_fastapi_app_tracing(app) + + return app diff --git a/services/notifications/src/simcore_service_notifications/core/events.py b/services/notifications/src/simcore_service_notifications/core/events.py new file mode 100644 index 00000000000..879582575c0 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/core/events.py @@ -0,0 +1,58 @@ +from collections.abc import AsyncIterator + +from fastapi import FastAPI +from fastapi_lifespan_manager import LifespanManager, State +from servicelib.fastapi.monitoring import ( + create_prometheus_instrumentationmain_input_state, + prometheus_instrumentation_lifespan, +) +from servicelib.fastapi.postgres_lifespan import ( + create_postgres_database_input_state, + postgres_database_lifespan, +) + +from .._meta import APP_FINISHED_BANNER_MSG, APP_STARTED_BANNER_MSG +from ..api.rpc.routing import rpc_api_routes_lifespan +from ..clients.postgres import postgres_lifespan +from ..clients.rabbitmq import rabbitmq_lifespan +from .settings import ApplicationSettings + + +async def _banner_lifespan(app: FastAPI) -> AsyncIterator[State]: + _ = app + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + yield {} + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + +async def _settings_lifespan(app: FastAPI) -> AsyncIterator[State]: + settings: ApplicationSettings = app.state.settings + yield { + **create_postgres_database_input_state(settings.NOTIFICATIONS_POSTGRES), + **create_prometheus_instrumentationmain_input_state( + enabled=settings.NOTIFICATIONS_PROMETHEUS_INSTRUMENTATION_ENABLED + ), + } + + +def create_app_lifespan(): + # WARNING: order matters + app_lifespan = LifespanManager() + app_lifespan.add(_settings_lifespan) + + # - postgres + app_lifespan.add(postgres_database_lifespan) + app_lifespan.add(postgres_lifespan) + + # - rabbitmq + app_lifespan.add(rabbitmq_lifespan) + + # - rpc api routes + app_lifespan.add(rpc_api_routes_lifespan) + + # - prometheus instrumentation + app_lifespan.add(prometheus_instrumentation_lifespan) + + app_lifespan.add(_banner_lifespan) + + return app_lifespan diff --git a/services/notifications/src/simcore_service_notifications/core/settings.py b/services/notifications/src/simcore_service_notifications/core/settings.py new file mode 100644 index 00000000000..6f7e13a546e --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/core/settings.py @@ -0,0 +1,81 @@ +from typing import Annotated + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import BootModeEnum, LogLevel +from pydantic import AliasChoices, Field, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.base import BaseCustomSettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + + +class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings): + LOG_LEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices( + "NOTIFICATIONS_LOGLEVEL", + "LOG_LEVEL", + "LOGLEVEL", + ), + ), + ] = LogLevel.WARNING + + SC_BOOT_MODE: BootModeEnum | None + + NOTIFICATIONS_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "NOTIFICATIONS_VOLUMES_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description=( + "Enables local development log format. WARNING: make sure it is " + "disabled if you want to have structured logs!" + ), + ), + ] = False + + NOTIFICATIONS_VOLUMES_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "NOTIFICATIONS_VOLUMES_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + NOTIFICATIONS_RABBITMQ: Annotated[ + RabbitSettings, + Field( + description="settings for service/rabbitmq", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + NOTIFICATIONS_POSTGRES: Annotated[ + PostgresSettings, + Field( + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + NOTIFICATIONS_TRACING: Annotated[ + TracingSettings | None, + Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ), + ] + + NOTIFICATIONS_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + @field_validator("LOG_LEVEL") + @classmethod + def valid_log_level(cls, value) -> LogLevel: + return LogLevel(cls.validate_log_level(value)) diff --git a/services/notifications/src/simcore_service_notifications/main.py b/services/notifications/src/simcore_service_notifications/main.py new file mode 100644 index 00000000000..8b2e0ed3196 --- /dev/null +++ b/services/notifications/src/simcore_service_notifications/main.py @@ -0,0 +1,3 @@ +from simcore_service_notifications.core.application import create_app + +the_app = create_app() diff --git a/services/notifications/src/simcore_service_notifications/services/__init__.py b/services/notifications/src/simcore_service_notifications/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/notifications/tests/conftest.py b/services/notifications/tests/conftest.py new file mode 100644 index 00000000000..a310c11b5d5 --- /dev/null +++ b/services/notifications/tests/conftest.py @@ -0,0 +1,26 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + + +import pytest +from models_library.basic_types import BootModeEnum +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict + +pytest_plugins = [ + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.postgres_service", + "pytest_simcore.rabbit_service", + "pytest_simcore.repository_paths", +] + + +@pytest.fixture +def mock_environment(monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + "LOGLEVEL": "DEBUG", + "SC_BOOT_MODE": BootModeEnum.DEBUG, + }, + ) diff --git a/services/notifications/tests/unit/conftest.py b/services/notifications/tests/unit/conftest.py new file mode 100644 index 00000000000..e1f57c7c5c7 --- /dev/null +++ b/services/notifications/tests/unit/conftest.py @@ -0,0 +1,48 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +from collections.abc import AsyncIterator + +import pytest +import sqlalchemy as sa +from asgi_lifespan import LifespanManager +from fastapi import FastAPI +from fastapi.testclient import TestClient +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from settings_library.rabbit import RabbitSettings +from simcore_service_notifications.core.application import create_app + + +@pytest.fixture +def service_env( + monkeypatch: pytest.MonkeyPatch, + mock_environment: EnvVarsDict, + rabbit_service: RabbitSettings, + postgres_db: sa.engine.Engine, + postgres_env_vars_dict: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **mock_environment, + "RABBIT_HOST": rabbit_service.RABBIT_HOST, + "RABBIT_PASSWORD": rabbit_service.RABBIT_PASSWORD.get_secret_value(), + "RABBIT_PORT": f"{rabbit_service.RABBIT_PORT}", + "RABBIT_SECURE": f"{rabbit_service.RABBIT_SECURE}", + "RABBIT_USER": rabbit_service.RABBIT_USER, + **postgres_env_vars_dict, + }, + ) + + +@pytest.fixture +async def initialized_app(service_env: EnvVarsDict) -> AsyncIterator[FastAPI]: + app: FastAPI = create_app() + + async with LifespanManager(app, startup_timeout=30, shutdown_timeout=30): + yield app + + +@pytest.fixture +def test_client(initialized_app: FastAPI) -> TestClient: + return TestClient(initialized_app) diff --git a/services/notifications/tests/unit/test_api_rest__health.py b/services/notifications/tests/unit/test_api_rest__health.py new file mode 100644 index 00000000000..ba418fe7bc3 --- /dev/null +++ b/services/notifications/tests/unit/test_api_rest__health.py @@ -0,0 +1,56 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + + +import pytest +from fastapi import status +from fastapi.testclient import TestClient +from models_library.api_schemas__common.health import HealthCheckGet +from models_library.errors import ( + POSRGRES_DATABASE_UNHEALTHY_MSG, + RABBITMQ_CLIENT_UNHEALTHY_MSG, +) +from models_library.healthchecks import IsNonResponsive +from pytest_mock import MockerFixture +from simcore_service_notifications.api.rest._health import HealthCheckError + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] + + +def test_health_ok(test_client: TestClient): + response = test_client.get("/") + assert response.status_code == status.HTTP_200_OK + assert HealthCheckGet.model_validate(response.json()) + + +@pytest.fixture +def mock_postgres_liveness(mocker: MockerFixture, test_client: TestClient) -> None: + mocker.patch.object( + test_client.app.state.postgres_liveness, + "_liveness_result", + new=IsNonResponsive(reason="fake"), + ) + + +def test_health_postgres_unhealthy( + mock_postgres_liveness: None, test_client: TestClient +): + with pytest.raises(HealthCheckError) as exc: + test_client.get("/") + assert POSRGRES_DATABASE_UNHEALTHY_MSG in f"{exc.value}" + + +@pytest.fixture +def mock_rabbit_healthy(mocker: MockerFixture, test_client: TestClient) -> None: + mocker.patch.object( + test_client.app.state.rabbitmq_rpc_server, "_healthy_state", new=False + ) + + +def test_health_rabbit_unhealthy(mock_rabbit_healthy: None, test_client: TestClient): + with pytest.raises(HealthCheckError) as exc: + test_client.get("/") + assert RABBITMQ_CLIENT_UNHEALTHY_MSG in f"{exc.value}" diff --git a/services/notifications/tests/unit/test_cli.py b/services/notifications/tests/unit/test_cli.py new file mode 100644 index 00000000000..bcfc7925b61 --- /dev/null +++ b/services/notifications/tests/unit/test_cli.py @@ -0,0 +1,57 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument + +import traceback + +import pytest +from click.testing import Result +from pytest_simcore.helpers.monkeypatch_envs import ( + EnvVarsDict, + load_dotenv, + setenvs_from_dict, +) +from simcore_service_notifications.cli import main +from simcore_service_notifications.core.settings import ApplicationSettings +from typer.testing import CliRunner + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] + + +@pytest.fixture +def cli_runner(service_env: EnvVarsDict) -> CliRunner: + return CliRunner() + + +def _format_cli_error(result: Result) -> str: + assert result.exception + tb_message = "\n".join(traceback.format_tb(result.exception.__traceback__)) + return f"Below exception was raised by the cli:\n{tb_message}" + + +async def test_process_cli_options(cli_runner: CliRunner): + result = cli_runner.invoke(main, ["--help"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + result = cli_runner.invoke(main, ["settings"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + result = cli_runner.invoke(main, ["--version"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + +async def test_echo_dotenv(cli_runner: CliRunner, monkeypatch: pytest.MonkeyPatch): + result = cli_runner.invoke(main, ["echo-dotenv"]) + print(result.stdout) + assert result.exit_code == 0, _format_cli_error(result) + + environs = load_dotenv(result.stdout) + + with monkeypatch.context() as patch: + setenvs_from_dict(patch, environs) + assert ApplicationSettings.create_from_envs() diff --git a/services/opentelemetry-collector-config.yaml b/services/opentelemetry-collector-config.yaml new file mode 100644 index 00000000000..7386666956e --- /dev/null +++ b/services/opentelemetry-collector-config.yaml @@ -0,0 +1,28 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 # Default endpoint for OTLP over gRPC + http: + endpoint: 0.0.0.0:4318 # Default endpoint for OTLP over HTTP +exporters: + otlphttp: + endpoint: ${TRACING_OPENTELEMETRY_COLLECTOR_EXPORTER_ENDPOINT} # Adjust to your Jaeger endpoint +service: + pipelines: + traces: + receivers: [otlp] + exporters: [otlphttp] + processors: [batch,probabilistic_sampler,filter/drop_healthcheck] +processors: + batch: + timeout: 5s + send_batch_size: ${TRACING_OPENTELEMETRY_COLLECTOR_BATCH_SIZE} + probabilistic_sampler: + sampling_percentage: ${TRACING_OPENTELEMETRY_COLLECTOR_SAMPLING_PERCENTAGE} + filter/drop_healthcheck: + error_mode: ignore + traces: + span: + - attributes["http.route"] == "healthcheck_readiness_probe" + - attributes["db.statement"] == "PING" and attributes["db.system"] == "redis" diff --git a/services/osparc-gateway-server/.env-devel b/services/osparc-gateway-server/.env-devel deleted file mode 100644 index 62b5b6c2795..00000000000 --- a/services/osparc-gateway-server/.env-devel +++ /dev/null @@ -1,2 +0,0 @@ -COMPUTATIONAL_SIDECAR_IMAGE=itisfoundation/dask-sidecar:master-github-latest -COMPUTATIONAL_SIDECAR_LOG_LEVEL=INFO diff --git a/services/osparc-gateway-server/.gitignore b/services/osparc-gateway-server/.gitignore deleted file mode 100644 index 4d7a877c063..00000000000 --- a/services/osparc-gateway-server/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.osparc-dask* diff --git a/services/osparc-gateway-server/Dockerfile b/services/osparc-gateway-server/Dockerfile deleted file mode 100644 index 990eda6989e..00000000000 --- a/services/osparc-gateway-server/Dockerfile +++ /dev/null @@ -1,170 +0,0 @@ -# syntax=docker/dockerfile:1 -ARG PYTHON_VERSION="3.9.12" -FROM python:${PYTHON_VERSION}-slim-bullseye as base -ARG TARGETPLATFORM -ARG BUILDPLATFORM -RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM" - -LABEL maintainer=mguidon,sanderegg - -# libffi-dev is needed for ARM architectures -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ - set -eux \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - gosu \ - libffi-dev \ - libffi7 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - # verify that the binary works - && gosu nobody true - - -# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) -ENV SC_USER_ID=8004 \ - SC_USER_NAME=scu \ - SC_BUILD_TARGET=base \ - SC_BOOT_MODE=default - -RUN adduser \ - --uid ${SC_USER_ID} \ - --disabled-password \ - --gecos "" \ - --shell /bin/sh \ - --home /home/${SC_USER_NAME} \ - ${SC_USER_NAME} - - -ENV LANG=C.UTF-8 \ - PYTHONDONTWRITEBYTECODE=1 \ - VIRTUAL_ENV=/home/scu/.venv - -ENV PATH="${VIRTUAL_ENV}/bin:$PATH" - -# for ARM architecture this helps a lot VS building packages -ENV PIP_EXTRA_INDEX_URL=https://www.piwheels.org/simple - - -EXPOSE 8000 - - -# -------------------------- Build stage ------------------- -# Installs build/package management tools and third party dependencies -# -# + /build WORKDIR -# -FROM base as build - -ENV SC_BUILD_TARGET=build - -RUN rm -f /etc/apt/apt.conf.d/docker-clean -RUN --mount=type=cache,id=basecache,target=/var/cache/apt,mode=0755,sharing=locked \ - --mount=type=cache,id=baseapt,target=/var/lib/apt,mode=0755,sharing=locked \ - set -eux \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - golang-go \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -# NOTE: python virtualenv is used here such that installed packages may be moved to production image easily by copying the venv -RUN python -m venv "${VIRTUAL_ENV}" -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install --upgrade \ - pip~=23.0 \ - wheel \ - setuptools - -WORKDIR /build - -# install base 3rd party dependencies (NOTE: this speeds up devel mode) -COPY --chown=scu:scu services/osparc-gateway-server/requirements/_base.txt . -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install \ - --requirement _base.txt - -# in ARM64 mode there is a catch: the piwheels package does not contain the dask-gateway-proxy executable in 64-bit -RUN dpkgArch="$(dpkg --print-architecture)";\ - case "$dpkgArch" in \ - arm64) git clone --depth 1 --branch 0.9.0 https://github.com/dask/dask-gateway.git \ - && cd dask-gateway/osparc-gateway-server \ - && pip install .\ - ;; \ - esac; - -# --------------------------Prod-depends-only stage ------------------- -# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) -# -# + /build -# + services/osparc-gateway-server [scu:scu] WORKDIR -# -FROM build as prod-only-deps - -ENV SC_BUILD_TARGET=prod-only-deps - -COPY --chown=scu:scu services/osparc-gateway-server/ /build/services/osparc-gateway-server - -WORKDIR /build/services/osparc-gateway-server - -RUN --mount=type=cache,mode=0777,target=/root/.cache/pip \ - pip install \ - --requirement requirements/prod.txt - -# --------------------------Production stage ------------------- -# Final cleanup up to reduce image size and startup setup -# Runs as scu (non-root user) -# -# + /home/scu $HOME = WORKDIR -# + services/osparc-gateway-server [scu:scu] -# -FROM base as production - -ENV SC_BUILD_TARGET=production \ - SC_BOOT_MODE=production - -ENV PYTHONOPTIMIZE=TRUE - -WORKDIR /home/scu - -# bring installed package without build tools -COPY --from=prod-only-deps --chown=scu:scu ${VIRTUAL_ENV} ${VIRTUAL_ENV} -# copy docker entrypoint and boot scripts -COPY --chown=scu:scu services/osparc-gateway-server/docker services/osparc-gateway-server/docker - - -# TODO: Create healthcheck -# HEALTHCHECK \ -# --interval=60s \ -# --timeout=60s \ -# --start-period=10s \ -# --retries=3 \ -# CMD ["curl", "-Lf", "http://127.0.0.1:8787/health"] - -ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ] -CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"] - - -# --------------------------Development stage ------------------- -# Source code accessible in host but runs in container -# Runs as scu with same gid/uid as host -# Placed at the end to speed-up the build if images targeting production -# -# + /devel WORKDIR -# + services (mounted volume) -# -FROM build as development - -ENV SC_BUILD_TARGET=development - -WORKDIR /devel -RUN chown -R scu:scu "${VIRTUAL_ENV}" - -# NOTE: devel mode does NOT have HEALTHCHECK - -ENTRYPOINT [ "/bin/sh", "services/osparc-gateway-server/docker/entrypoint.sh" ] -CMD ["/bin/sh", "services/osparc-gateway-server/docker/boot.sh"] diff --git a/services/osparc-gateway-server/Makefile b/services/osparc-gateway-server/Makefile deleted file mode 100644 index 92d7d7712b8..00000000000 --- a/services/osparc-gateway-server/Makefile +++ /dev/null @@ -1,155 +0,0 @@ -# -# Targets for DEVELOPMENT for osparc gateway service service -# -include ../../scripts/common.Makefile -include ../../scripts/common-service.Makefile - - - -APP_PACKAGE_NAME=osparc_gateway_server -SERVICE_NAME=osparc-gateway-server -DASK_SIDECAR_NAME=dask-sidecar - -.env: .env-devel ## creates .env file from defaults in .env-devel - $(if $(wildcard $@), \ - @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ - @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) - - - -## -## INFOS -## -.PHONY: info-images info-swarm -define show-meta - $(foreach iid,$(shell docker images "*/$(1):*" --quiet | sort | uniq),\ - docker image inspect $(iid) | jq '.[0] | {tags:.RepoTags, labels:.Config.Labels, arch:.Architecture}';) -endef - -info-images: ## lists tags and labels of built images. To display one: 'make target=webserver info-images' - @echo "## $(SERVICE_NAME) images:";\ - docker images */$(SERVICE_NAME):*;\ - $(call show-meta,$(SERVICE_NAME)) - @echo "## $(DASK_SIDECAR_NAME) images:";\ - docker images */$(DASK_SIDECAR_NAME):*;\ - $(call show-meta,$(DASK_SIDECAR_NAME)) - - -info-swarm: ## displays info about stacks and networks -ifneq ($(SWARM_HOSTS), ) - # Stacks in swarm - @docker stack ls - # Containers (tasks) running in '$(SWARM_STACK_NAME)' stack - -@docker stack ps $(SWARM_STACK_NAME) - # Services in '$(SWARM_STACK_NAME)' stack - -@docker stack services $(SWARM_STACK_NAME) - # Networks - @docker network ls -endif - -## -## Running Osparc Dask Gateway -## -SWARM_HOSTS = $(shell docker node ls --format="{{.Hostname}}" 2>$(if $(IS_WIN),NUL,/dev/null)) - -PHONY: .init-swarm -.init-swarm: - # Ensures swarm is initialized - $(if $(SWARM_HOSTS),,docker swarm init --advertise-addr=$(get_my_ip)) - -.PHONY: config -export OSPARC_GATEWAY_CONFIG_FILE_HOST = .osparc-dask-gateway-config.py -export SWARM_STACK_NAME ?= dask-gateway -docker-compose-config-cmd=../../scripts/docker/docker-compose-config.bash -docker-compose-configs = $(wildcard services/docker-compose*.yml) - -$(OSPARC_GATEWAY_CONFIG_FILE_HOST): $(CURDIR)/config/default_config.py ## creates config file from defaults in /config/default_config.py - $(if $(wildcard $@), \ - @echo "WARNING ##### $< is newer than $@ ####"; diff -uN $@ $<; false;,\ - @echo "WARNING ##### $@ does not exist, cloning $< as $@ ############"; cp $< $@) -config: $(OSPARC_GATEWAY_CONFIG_FILE_HOST) ## create default configuration file - -.stack-$(SWARM_STACK_NAME)-development.yml: .env $(docker-compose-configs) - # Creating config for stack with 'local/{service}:development' to $@ - @export DOCKER_REGISTRY=local && \ - export DOCKER_IMAGE_TAG=development && \ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml \ - docker-compose.devel.yml > $@ - - -.stack-$(SWARM_STACK_NAME)-production.yml: .env $(docker-compose-configs) - # Creating config for stack with 'local/{service}:production' to $@ - @export DOCKER_REGISTRY=local && \ - export DOCKER_IMAGE_TAG=production && \ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml > $@ - -.stack-$(SWARM_STACK_NAME)-version.yml: .env $(docker-compose-configs) - # Creating config for stack with '$(DOCKER_REGISTRY)/{service}:${DOCKER_IMAGE_TAG}' to $@ - $(docker-compose-config-cmd) \ - docker-compose.yml \ - docker-compose.local.yml > $@ - - -.PHONY: up-devel up-prod up-version up-latest - -define _show_endpoints -# The following endpoints are available -set -o allexport; \ -source $(CURDIR)/.env; \ -set +o allexport; \ -separator=------------------------------------------------------------------------------------;\ -separator=$${separator}$${separator}$${separator};\ -rows="%-22s | %40s | %12s | %12s\n";\ -TableWidth=100;\ -printf "%22s | %40s | %12s | %12s\n" Name Endpoint User Password;\ -printf "%.$${TableWidth}s\n" "$$separator";\ -printf "$$rows" Dask-Gateway 'http://$(get_my_ip):8000' whatever $(filter-out %.password =,$(shell cat $(OSPARC_GATEWAY_CONFIG_FILE_HOST) | grep c.Authenticator.password)); -endef - -show-endpoints: - @$(_show_endpoints) - - -up-devel: .stack-$(SWARM_STACK_NAME)-development.yml .init-swarm config ## Deploys local development stack and ops stack (pass 'make ops_disabled=1 up-...' to disable) - # Deploy stack $(SWARM_STACK_NAME) [back-end] - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) - @$(_show_endpoints) - -up-prod: .stack-$(SWARM_STACK_NAME)-production.yml .init-swarm config ## Deploys local production stack and ops stack (pass 'make ops_disabled=1 up-...' to disable) -ifeq ($(target),) - # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) -else - # deploys ONLY $(target) service - @docker compose --file $< up --detach $(target) -endif - @$(_show_endpoints) - -up up-version: .stack-$(SWARM_STACK_NAME)-version.yml .init-swarm config ## Deploys versioned stack '$(DOCKER_REGISTRY)/{service}:$(DOCKER_IMAGE_TAG)' and ops stack (pass 'make ops_disabled=1 up-...' to disable) - # Deploy stack $(SWARM_STACK_NAME) - @docker stack deploy --with-registry-auth -c $< $(SWARM_STACK_NAME) - @$(_show_endpoints) - -up-latest: - @export DOCKER_IMAGE_TAG=latest && \ - $(MAKE) up-version - -.PHONY: down -down: ## Stops and removes stack - # Removing stacks in reverse order to creation - -@docker stack rm $(SWARM_STACK_NAME) - -@docker stack rm $(SWARM_STACK_NAME)-ops - # Removing generated docker compose configurations, i.e. .stack-* - -@rm $(wildcard .stack-*) - -@rm $(wildcard $(OSPARC_GATEWAY_CONFIG_FILE_HOST)) - -## -## system tests -## -test-system: ## Runs system tests (needs local docker images of osparc-gateway-server and dask-sidecar) - $(MAKE_C) tests/system install-ci - $(MAKE_C) tests/system tests diff --git a/services/osparc-gateway-server/README.md b/services/osparc-gateway-server/README.md deleted file mode 100644 index 1f536df68e4..00000000000 --- a/services/osparc-gateway-server/README.md +++ /dev/null @@ -1 +0,0 @@ -# osparc backend for dask gateway server diff --git a/services/osparc-gateway-server/config/default_config.py b/services/osparc-gateway-server/config/default_config.py deleted file mode 100644 index 4cd75de4d73..00000000000 --- a/services/osparc-gateway-server/config/default_config.py +++ /dev/null @@ -1,12 +0,0 @@ -# pylint: disable=undefined-variable - -# NOTE: this configuration is used by the dask-gateway-server -# it follows [traitlets](https://traitlets.readthedocs.io/en/stable/config.html) configuration files - -# defines the backend to use with the gateway -c.DaskGateway.backend_class = "osparc_gateway_server.backend.osparc.OsparcBackend" # type: ignore -# defines the password for 'simple' authentication -c.Authenticator.password = "asdf" # type: ignore -# defines log levels -c.DaskGateway.log_level = "WARN" # type: ignore -c.Proxy.log_level = "WARN" # type: ignore diff --git a/services/osparc-gateway-server/docker-compose.devel.yml b/services/osparc-gateway-server/docker-compose.devel.yml deleted file mode 100644 index f8ac92aa6bc..00000000000 --- a/services/osparc-gateway-server/docker-compose.devel.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - environment: - - SC_BOOT_MODE=debug-ptvsd - - LOG_LEVEL=debug - - DEBUG=true - volumes: - - ./:/devel/services/osparc-gateway-server - - ../../packages:/devel/packages diff --git a/services/osparc-gateway-server/docker-compose.local.yml b/services/osparc-gateway-server/docker-compose.local.yml deleted file mode 100644 index f5411229c25..00000000000 --- a/services/osparc-gateway-server/docker-compose.local.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - environment: - - SC_BOOT_MODE=${SC_BOOT_MODE:-default} - ports: - - "3100:3000" # ptvsd port diff --git a/services/osparc-gateway-server/docker-compose.yml b/services/osparc-gateway-server/docker-compose.yml deleted file mode 100644 index acdfe4179db..00000000000 --- a/services/osparc-gateway-server/docker-compose.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.9' -services: - osparc-gateway-server: - image: ${DOCKER_REGISTRY:-itisfoundation}/osparc-gateway-server:${DOCKER_IMAGE_TAG:-latest} - ports: - - "8000:8000" - volumes: - - /var/run/docker.sock:/var/run/docker.sock - networks: - - dask_net - configs: - - source: gateway_config - target: ${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py} - environment: - - GATEWAY_WORKERS_NETWORK=${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net - - GATEWAY_SERVER_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_osparc-gateway-server - - COMPUTATIONAL_SIDECAR_VOLUME_NAME=${SWARM_STACK_NAME:?swarm_stack_name_required}_sidecar_data - - COMPUTATIONAL_SIDECAR_IMAGE=${COMPUTATIONAL_SIDECAR_IMAGE:-local/dask-sidecar:production} - - COMPUTATIONAL_SIDECAR_LOG_LEVEL=${COMPUTATIONAL_SIDECAR_LOG_LEVEL:-WARNING} - - COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS=${COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS:-2} - - COMPUTATION_SIDECAR_NON_USABLE_RAM=${COMPUTATION_SIDECAR_NON_USABLE_RAM:-0} - - GATEWAY_SERVER_ONE_WORKER_PER_NODE=${GATEWAY_SERVER_ONE_WORKER_PER_NODE-True} - - GATEWAY_SERVER_CONFIG_FILE_CONTAINER=${GATEWAY_SERVER_CONFIG_FILE_CONTAINER:-/etc/dask/dask_config.py} - deploy: - placement: - constraints: - - node.role == manager -networks: - dask_net: - name: ${SWARM_STACK_NAME:?swarm_stack_name_required}_dask_net - -volumes: - sidecar_data: null - -configs: - gateway_config: - file: ./${OSPARC_GATEWAY_CONFIG_FILE_HOST:?gateway_config_required} diff --git a/services/osparc-gateway-server/docker/boot.sh b/services/osparc-gateway-server/docker/boot.sh deleted file mode 100755 index 4bec0198115..00000000000 --- a/services/osparc-gateway-server/docker/boot.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh -set -o errexit -set -o nounset - -IFS=$(printf '\n\t') - -INFO="INFO: [$(basename "$0")] " - -# BOOTING application --------------------------------------------- -echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." -echo " User :$(id "$(whoami)")" -echo " Workdir :$(pwd)" -echo " env :$(env)" - -if [ "${SC_BUILD_TARGET}" = "development" ]; then - echo "$INFO" "Environment :" - printenv | sed 's/=/: /' | sed 's/^/ /' | sort - echo "$INFO" "Python :" - python --version | sed 's/^/ /' - command -v python | sed 's/^/ /' - cd services/osparc-gateway-server || exit 1 - pip install --no-cache-dir -r requirements/dev.txt - cd - || exit 1 - echo "$INFO" "PIP :" - pip list | sed 's/^/ /' -fi - -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then - exec watchmedo auto-restart \ - --recursive \ - --pattern="*.py;*/src/*" \ - --ignore-patterns="*test*;pytest_simcore/*;setup.py;*ignore*" \ - --ignore-directories -- \ - osparc-gateway-server \ - --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}" \ - --debug -else - exec osparc-gateway-server \ - --config "${GATEWAY_SERVER_CONFIG_FILE_CONTAINER}" -fi diff --git a/services/osparc-gateway-server/docker/entrypoint.sh b/services/osparc-gateway-server/docker/entrypoint.sh deleted file mode 100755 index c70f53f7408..00000000000 --- a/services/osparc-gateway-server/docker/entrypoint.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/sh -set -o errexit -set -o nounset - -IFS=$(printf '\n\t') - -INFO="INFO: [$(basename "$0")] " -WARNING="WARNING: [$(basename "$0")] " -ERROR="ERROR: [$(basename "$0")] " - -# This entrypoint script: -# -# - Executes *inside* of the container upon start as --user [default root] -# - Notice that the container *starts* as --user [default root] but -# *runs* as non-root user [scu] -# -echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." -echo User :"$(id "$(whoami)")" -echo Workdir :"$(pwd)" -echo scuUser :"$(id scu)" - - -if [ "${SC_BUILD_TARGET}" = "development" ] -then - echo "$INFO" "development mode detected..." - # NOTE: expects docker run ... -v $(pwd):/devel/services/osparc-gateway-server - DEVEL_MOUNT=${DEVEL_MOUNT:="/devel/services/osparc-gateway-server"} - - stat $DEVEL_MOUNT > /dev/null 2>&1 || \ - (echo "$ERROR" "You must mount '$DEVEL_MOUNT' to deduce user and group ids" && exit 1) - - echo "setting correct user id/group id..." - HOST_USERID=$(stat --format=%u "${DEVEL_MOUNT}") - HOST_GROUPID=$(stat --format=%g "${DEVEL_MOUNT}") - CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) - if [ "$HOST_USERID" -eq 0 ] - then - echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..." - adduser "$SC_USER_NAME" root - else - echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." - # take host's credentials in $SC_USER_NAME - if [ -z "$CONT_GROUPNAME" ] - then - echo "Creating new group my$SC_USER_NAME" - CONT_GROUPNAME=my$SC_USER_NAME - addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" - else - echo "group already exists" - fi - echo "adding $SC_USER_NAME to group $CONT_GROUPNAME..." - adduser "$SC_USER_NAME" "$CONT_GROUPNAME" - - echo "changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" - usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" - - echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; - # change user property of files already around - echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" - find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; - fi -fi - - -if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ] -then - # NOTE: production does NOT pre-installs ptvsd - pip install --no-cache-dir ptvsd -fi - -DOCKER_MOUNT=/var/run/docker.sock -if stat $DOCKER_MOUNT >/dev/null 2>&1; then - echo "$INFO detected docker socket is mounted, adding user to group..." - GROUPID=$(stat --format=%g $DOCKER_MOUNT) - GROUPNAME=scdocker - - if ! addgroup --gid "$GROUPID" $GROUPNAME > /dev/null 2>&1 - then - echo "$WARNING docker group with $GROUPID already exists, getting group name..." - # if group already exists in container, then reuse name - GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) - echo "$WARNING docker group with $GROUPID has name $GROUPNAME" - fi - adduser "$SC_USER_NAME" "$GROUPNAME" -fi - -echo "$INFO Starting osparc-gateway-server ..." -echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" -echo " local dir : $(ls -al)" - -exec gosu "$SC_USER_NAME" "$@" diff --git a/services/osparc-gateway-server/requirements/Makefile b/services/osparc-gateway-server/requirements/Makefile deleted file mode 100644 index 1118bbf105e..00000000000 --- a/services/osparc-gateway-server/requirements/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Targets to pip-compile requirements -# -include ../../../requirements/base.Makefile - -# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt - -_test.txt: _base.txt diff --git a/services/osparc-gateway-server/requirements/_base.in b/services/osparc-gateway-server/requirements/_base.in deleted file mode 100644 index 156e38d9de7..00000000000 --- a/services/osparc-gateway-server/requirements/_base.in +++ /dev/null @@ -1,9 +0,0 @@ -# Specifies third-party dependencies for the 'osparc-gateway-server' -# -# ---constraint ../../../requirements/constraints.txt ---constraint constraints.txt - -aiodocker -dask-gateway-server[local] -pydantic[email,dotenv] diff --git a/services/osparc-gateway-server/requirements/_base.txt b/services/osparc-gateway-server/requirements/_base.txt deleted file mode 100644 index 3b38780862b..00000000000 --- a/services/osparc-gateway-server/requirements/_base.txt +++ /dev/null @@ -1,67 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_base.txt --resolver=backtracking --strip-extras requirements/_base.in -# -aiodocker==0.21.0 - # via -r requirements/_base.in -aiohttp==3.8.4 - # via - # -c requirements/../../../requirements/constraints.txt - # aiodocker - # dask-gateway-server -aiosignal==1.3.1 - # via aiohttp -async-timeout==4.0.2 - # via aiohttp -attrs==22.2.0 - # via aiohttp -cffi==1.15.1 - # via cryptography -charset-normalizer==3.1.0 - # via aiohttp -colorlog==6.7.0 - # via dask-gateway-server -cryptography==39.0.2 - # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server -dask-gateway-server==2023.1.1 - # via -r requirements/_base.in -dnspython==2.3.0 - # via email-validator -email-validator==1.3.1 - # via pydantic -frozenlist==1.3.3 - # via - # aiohttp - # aiosignal -greenlet==2.0.2 - # via sqlalchemy -idna==3.4 - # via - # email-validator - # yarl -multidict==6.0.4 - # via - # aiohttp - # yarl -pycparser==2.21 - # via cffi -pydantic==1.10.2 - # via -r requirements/_base.in -python-dotenv==1.0.0 - # via pydantic -sqlalchemy==1.4.46 - # via - # -c requirements/../../../requirements/constraints.txt - # dask-gateway-server -traitlets==5.9.0 - # via dask-gateway-server -typing-extensions==4.5.0 - # via - # aiodocker - # pydantic -yarl==1.8.2 - # via aiohttp diff --git a/services/osparc-gateway-server/requirements/_test.in b/services/osparc-gateway-server/requirements/_test.in deleted file mode 100644 index 00b5314e3ab..00000000000 --- a/services/osparc-gateway-server/requirements/_test.in +++ /dev/null @@ -1,25 +0,0 @@ -# -# Specifies dependencies required to run 'osparc-gateway-server' -# ---constraint ../../../requirements/constraints.txt - -# Adds base AS CONSTRAINT specs, not requirement. -# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt -# ---constraint _base.txt ---constraint ../../dask-sidecar/requirements/_dask-distributed.txt - -coverage -coveralls -dask-gateway -debugpy -docker -faker -pytest -pytest-asyncio -pytest-cov -pytest-icdiff -pytest-instafail -pytest-mock -pytest-sugar -tenacity diff --git a/services/osparc-gateway-server/requirements/_test.txt b/services/osparc-gateway-server/requirements/_test.txt deleted file mode 100644 index 510263aa55e..00000000000 --- a/services/osparc-gateway-server/requirements/_test.txt +++ /dev/null @@ -1,211 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiohttp==3.8.4 - # via - # -c requirements/_base.txt - # dask-gateway -aiosignal==1.3.1 - # via - # -c requirements/_base.txt - # aiohttp -async-timeout==4.0.2 - # via - # -c requirements/_base.txt - # aiohttp -attrs==22.2.0 - # via - # -c requirements/_base.txt - # aiohttp - # pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.1.0 - # via - # -c requirements/_base.txt - # aiohttp - # requests -click==8.1.3 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -cloudpickle==2.2.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed -coverage==6.5.0 - # via - # -r requirements/_test.in - # coveralls - # pytest-cov -coveralls==3.3.1 - # via -r requirements/_test.in -dask==2023.3.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -dask-gateway==2023.1.1 - # via -r requirements/_test.in -debugpy==1.6.6 - # via -r requirements/_test.in -distributed==2023.3.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway -docker==6.0.1 - # via -r requirements/_test.in -docopt==0.6.2 - # via coveralls -exceptiongroup==1.1.0 - # via pytest -faker==17.6.0 - # via -r requirements/_test.in -frozenlist==1.3.3 - # via - # -c requirements/_base.txt - # aiohttp - # aiosignal -fsspec==2023.3.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask -heapdict==1.0.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # zict -icdiff==2.0.6 - # via pytest-icdiff -idna==3.4 - # via - # -c requirements/_base.txt - # requests - # yarl -iniconfig==2.0.0 - # via pytest -jinja2==3.1.2 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -locket==1.0.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # partd -markupsafe==2.1.2 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # jinja2 -msgpack==1.0.5 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -multidict==6.0.4 - # via - # -c requirements/_base.txt - # aiohttp - # yarl -packaging==23.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # docker - # pytest - # pytest-sugar -partd==1.3.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask -pluggy==1.0.0 - # via pytest -pprintpp==0.4.0 - # via pytest-icdiff -psutil==5.9.4 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -pytest==7.2.2 - # via - # -r requirements/_test.in - # pytest-asyncio - # pytest-cov - # pytest-icdiff - # pytest-instafail - # pytest-mock - # pytest-sugar -pytest-asyncio==0.20.3 - # via -r requirements/_test.in -pytest-cov==4.0.0 - # via -r requirements/_test.in -pytest-icdiff==0.6 - # via -r requirements/_test.in -pytest-instafail==0.4.2 - # via -r requirements/_test.in -pytest-mock==3.10.0 - # via -r requirements/_test.in -pytest-sugar==0.9.6 - # via -r requirements/_test.in -python-dateutil==2.8.2 - # via faker -pyyaml==5.4.1 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -requests==2.28.2 - # via - # coveralls - # docker -six==1.16.0 - # via python-dateutil -sortedcontainers==2.4.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tblib==1.7.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tenacity==8.2.2 - # via -r requirements/_test.in -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 - # via - # coverage - # pytest -toolz==0.12.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # partd -tornado==6.2 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -urllib3==1.26.14 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # docker - # requests -websocket-client==1.5.1 - # via docker -yarl==1.8.2 - # via - # -c requirements/_base.txt - # aiohttp -zict==2.2.0 - # via - # -c requirements/../../dask-sidecar/requirements/_dask-distributed.txt - # distributed diff --git a/services/osparc-gateway-server/requirements/_tools.in b/services/osparc-gateway-server/requirements/_tools.in deleted file mode 100644 index f91a82de59b..00000000000 --- a/services/osparc-gateway-server/requirements/_tools.in +++ /dev/null @@ -1,8 +0,0 @@ ---constraint ../../../requirements/constraints.txt ---constraint _base.txt ---constraint _test.txt - ---requirement ../../../requirements/devenv.txt - -# basic dev tools -watchdog[watchmedo] diff --git a/services/osparc-gateway-server/requirements/_tools.txt b/services/osparc-gateway-server/requirements/_tools.txt deleted file mode 100644 index 05e1ee85445..00000000000 --- a/services/osparc-gateway-server/requirements/_tools.txt +++ /dev/null @@ -1,93 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.15.0 - # via pylint -black==23.1.0 - # via -r requirements/../../../requirements/devenv.txt -build==0.10.0 - # via pip-tools -bump2version==1.0.1 - # via -r requirements/../../../requirements/devenv.txt -cfgv==3.3.1 - # via pre-commit -click==8.1.3 - # via - # -c requirements/_test.txt - # black - # pip-tools -dill==0.3.6 - # via pylint -distlib==0.3.6 - # via virtualenv -filelock==3.9.0 - # via virtualenv -identify==2.5.19 - # via pre-commit -isort==5.12.0 - # via - # -r requirements/../../../requirements/devenv.txt - # pylint -lazy-object-proxy==1.9.0 - # via astroid -mccabe==0.7.0 - # via pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 - # via pre-commit -packaging==23.0 - # via - # -c requirements/_test.txt - # black - # build -pathspec==0.11.0 - # via black -pip-tools==6.12.3 - # via -r requirements/../../../requirements/devenv.txt -platformdirs==3.1.0 - # via - # black - # pylint - # virtualenv -pre-commit==3.1.1 - # via -r requirements/../../../requirements/devenv.txt -pylint==2.17.0 - # via -r requirements/../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 - # via - # -c requirements/_test.txt - # pre-commit - # watchdog -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 - # via pylint -typing-extensions==4.5.0 - # via - # -c requirements/_base.txt - # astroid - # black - # pylint -virtualenv==20.20.0 - # via pre-commit -watchdog==2.3.1 - # via -r requirements/_tools.in -wheel==0.38.4 - # via pip-tools -wrapt==1.15.0 - # via astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/osparc-gateway-server/requirements/ci.txt b/services/osparc-gateway-server/requirements/ci.txt deleted file mode 100644 index 62a403e8be1..00000000000 --- a/services/osparc-gateway-server/requirements/ci.txt +++ /dev/null @@ -1,18 +0,0 @@ -# Shortcut to install all packages for the contigous integration (CI) of 'services/director-v2' -# -# - As ci.txt but w/ tests -# -# Usage: -# pip install -r requirements/ci.txt -# - - -# installs base + tests requirements ---requirement _base.txt ---requirement _test.txt - -# installs this repo's packages -../../packages/pytest-simcore/ - -# installs current package -. diff --git a/services/osparc-gateway-server/requirements/dev.txt b/services/osparc-gateway-server/requirements/dev.txt deleted file mode 100644 index f2182d2b170..00000000000 --- a/services/osparc-gateway-server/requirements/dev.txt +++ /dev/null @@ -1,18 +0,0 @@ -# Shortcut to install all packages needed to develop 'services/director-v2' -# -# - As ci.txt but with current and repo packages in develop (edit) mode -# -# Usage: -# pip install -r requirements/dev.txt -# - -# installs base + tests + tools requirements ---requirement _base.txt ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages ---editable ../../packages/pytest-simcore/ - -# installs current package ---editable . diff --git a/services/osparc-gateway-server/requirements/prod.txt b/services/osparc-gateway-server/requirements/prod.txt deleted file mode 100644 index fb384898a6a..00000000000 --- a/services/osparc-gateway-server/requirements/prod.txt +++ /dev/null @@ -1,15 +0,0 @@ -# Shortcut to install 'services/director-v2' for production -# -# - As ci.txt but w/o tests -# -# Usage: -# pip install -r requirements/prod.txt -# - -# installs base requirements ---requirement _base.txt - -# installs this repo's packages - -# installs current package -. diff --git a/services/osparc-gateway-server/setup.cfg b/services/osparc-gateway-server/setup.cfg deleted file mode 100644 index b38fcf7e31e..00000000000 --- a/services/osparc-gateway-server/setup.cfg +++ /dev/null @@ -1,11 +0,0 @@ -[bumpversion] -current_version = 0.0.1 -commit = True -message = services/osparc-gateway-server version: {current_version} β†’ {new_version} -tag = False -commit_args = --no-verify - -[bumpversion:file:VERSION] - -[tool:pytest] -asyncio_mode = auto diff --git a/services/osparc-gateway-server/setup.py b/services/osparc-gateway-server/setup.py deleted file mode 100755 index b517cccd943..00000000000 --- a/services/osparc-gateway-server/setup.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /bin/python -import re -import sys -from pathlib import Path - -from setuptools import find_packages, setup - - -def read_reqs(reqs_path: Path) -> set[str]: - return { - r - for r in re.findall( - r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", - reqs_path.read_text(), - re.MULTILINE, - ) - if isinstance(r, str) - } - - -CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent - -INSTALL_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_base.txt")) -TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) - - -SETUP = dict( - name="osparc-gateway-server", - version=(CURRENT_DIR / "VERSION").read_text().strip(), - author="Manuel Guidon (mguidon), Sylvain Anderegg (sanderegg)", - description="Osparc backend for dask-gateway-server", - classifiers=[ - "Development Status :: 1 - Planning", - "License :: OSI Approved :: MIT License", - "Natural Language :: English", - "Programming Language :: Python :: 3.9", - ], - long_description=(CURRENT_DIR / "README.md").read_text(), - license="MIT license", - python_requires="~=3.9", - packages=find_packages(where="src"), - package_dir={ - "": "src", - }, - install_requires=INSTALL_REQUIREMENTS, - test_suite="tests", - tests_require=TEST_REQUIREMENTS, - extras_require={"test": TEST_REQUIREMENTS}, - entry_points={ - "console_scripts": [ - "osparc-gateway-server=osparc_gateway_server.app:start", - ] - }, -) - - -if __name__ == "__main__": - setup(**SETUP) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py b/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py deleted file mode 100644 index 0d83e8059db..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/__init__.py +++ /dev/null @@ -1 +0,0 @@ -package_name = __name__ diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/app.py b/services/osparc-gateway-server/src/osparc_gateway_server/app.py deleted file mode 100644 index 76c5c003310..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import sys - -from dask_gateway_server.app import main - - -def start() -> None: - sys.exit(main()) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py deleted file mode 100644 index 6a42e519f6e..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/errors.py +++ /dev/null @@ -1,14 +0,0 @@ -class OSparcGatewayServerException(Exception): - """Exception raised when there is an exception in oSparc gateway server""" - - -class NoServiceTasksError(OSparcGatewayServerException): - """Exception raised when there is no tasks attached to service""" - - -class TaskNotAssignedError(OSparcGatewayServerException): - """Exception raised when a task is not assigned to a host""" - - -class NoHostFoundError(OSparcGatewayServerException): - """Exception raised when there is no host found""" diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py deleted file mode 100644 index 996b0c010b1..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/models.py +++ /dev/null @@ -1,42 +0,0 @@ -from ipaddress import IPv4Address -from typing import Any, Union - -from pydantic import BaseModel, ByteSize, Field, PositiveFloat, parse_obj_as - -Hostname = str -ResourceName = str -ResourceType = Union[int, float] - - -class NodeResources(BaseModel): - memory: ByteSize - cpus: PositiveFloat - others: dict[ResourceName, ResourceType] = Field(default_factory=dict) - - -class NodeInformation(BaseModel): - docker_node_id: str - ip: IPv4Address - resources: NodeResources - - -ClusterInformation = dict[Hostname, NodeInformation] - - -def cluster_information_from_docker_nodes( - nodes_list: list[dict[str, Any]] -) -> ClusterInformation: - return parse_obj_as( - ClusterInformation, - { - node["Description"]["Hostname"]: { - "docker_node_id": node["ID"], - "ip": node["Status"]["Addr"], - "resources": { - "memory": node["Description"]["Resources"]["MemoryBytes"], - "cpus": node["Description"]["Resources"]["NanoCPUs"] / 1e9, - }, - } - for node in nodes_list - }, - ) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py deleted file mode 100644 index eccd94b1f07..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/osparc.py +++ /dev/null @@ -1,335 +0,0 @@ -import asyncio -from importlib.metadata import version -from typing import Any, AsyncGenerator, Union - -import osparc_gateway_server -from aiodocker import Docker -from aiodocker.exceptions import DockerContainerError -from dask_gateway_server.backends.base import PublicException -from dask_gateway_server.backends.db_base import ( - Cluster, - DBBackendBase, - JobStatus, - Worker, - chain, - islice, - timestamp, -) -from osparc_gateway_server.remote_debug import setup_remote_debugging - -from .errors import NoHostFoundError, NoServiceTasksError, TaskNotAssignedError -from .settings import AppSettings, BootModeEnum -from .utils import ( - OSPARC_SCHEDULER_API_PORT, - DockerSecret, - create_docker_secrets_from_tls_certs_for_cluster, - delete_secrets, - get_cluster_information, - get_next_empty_node_hostname, - get_osparc_scheduler_cmd_modifications, - is_service_task_running, - modify_cmd_argument, - start_service, - stop_service, -) - -# -# https://patorjk.com/software/taag/#p=display&v=0&f=Avatar&t=osparc-gateway-server -# -WELCOME_MSG = r""" - ____ ____ ____ ____ ____ ____ ____ ____ ____ _ __ _____ ____ _____ _____ _ ____ ___ _ ____ _____ ____ _ _____ ____ -/ _ \/ ___\/ __\/ _ \/ __\/ _\ / _ \/ _ \/ ___\/ |/ / / __// _ \/__ __\/ __// \ /|/ _ \\ \// / ___\/ __// __\/ \ |\/ __// __\ -| / \|| \| \/|| / \|| \/|| / _____ | | \|| / \|| \| /_____ | | _| / \| / \ | \ | | ||| / \| \ /_____ | \| \ | \/|| | //| \ | \/| -| \_/|\___ || __/| |-||| /| \_\____\| |_/|| |-||\___ || \\____\| |_//| |-|| | | | /_ | |/\||| |-|| / / \____\\___ || /_ | /| \// | /_ | / -\____/\____/\_/ \_/ \|\_/\_\\____/ \____/\_/ \|\____/\_|\_\ \____\\_/ \| \_/ \____\\_/ \|\_/ \|/_/ \____/\____\\_/\_\\__/ \____\\_/\_\ {} - - -""".format( - version(osparc_gateway_server.package_name) -) - - -class OsparcBackend(DBBackendBase): - """A cluster backend that launches osparc workers. - - Scheduler are spawned as services in a docker swarm - Workers are spawned as services in a docker swarm - """ - - settings: AppSettings - docker_client: Docker - cluster_secrets: list[DockerSecret] = [] - - async def do_setup(self) -> None: - self.settings = AppSettings() - self.log.info( - "osparc-gateway-server application settings:\n%s", - self.settings.json(indent=2), - ) - - if self.settings.SC_BOOT_MODE in [BootModeEnum.DEBUG]: - setup_remote_debugging(logger=self.log) - - # pylint: disable=attribute-defined-outside-init - self.cluster_start_timeout = self.settings.GATEWAY_CLUSTER_START_TIMEOUT - self.worker_start_timeout = self.settings.GATEWAY_WORKER_START_TIMEOUT - self.docker_client = Docker() - - print(WELCOME_MSG, flush=True) - - async def do_cleanup(self) -> None: - await self.docker_client.close() - self.log.info("osparc-gateway-server closed.") - - async def do_start_cluster( - self, cluster: Cluster - ) -> AsyncGenerator[dict[str, Any], None]: - self.log.debug(f"starting {cluster=}") - self.cluster_secrets.extend( - await create_docker_secrets_from_tls_certs_for_cluster( - self.docker_client, self, cluster - ) - ) - self.log.debug("created '%s' for TLS certification", f"{self.cluster_secrets=}") - - # now we need a scheduler (get these auto-generated entries from dask-gateway base class) - scheduler_env = self.get_scheduler_env(cluster) - scheduler_cmd = self.get_scheduler_command(cluster) - # we need a few modifications for running in docker swarm - scheduler_service_name = f"cluster_{cluster.id}_scheduler" - modifications = get_osparc_scheduler_cmd_modifications(scheduler_service_name) - for key, value in modifications.items(): - scheduler_cmd = modify_cmd_argument(scheduler_cmd, key, value) - # start the scheduler - # asyncio.create_task(_background_task(self, cluster)) - async for dask_scheduler_start_result in start_service( - docker_client=self.docker_client, - settings=self.settings, - logger=self.log, - service_name=scheduler_service_name, - base_env=scheduler_env, - cluster_secrets=[ - c for c in self.cluster_secrets if c.cluster.name == cluster.name - ], - cmd=scheduler_cmd, - labels={"cluster_id": f"{cluster.id}", "type": "scheduler"}, - gateway_api_url=self.api_url, - ): - yield dask_scheduler_start_result - - async def do_stop_cluster(self, cluster: Cluster) -> None: - self.log.debug("--> stopping %s", f"{cluster=}") - dask_scheduler_service_id = cluster.state.get("service_id") - await stop_service(self.docker_client, dask_scheduler_service_id, self.log) - await delete_secrets(self.docker_client, cluster) - self.log.debug("<--%s stopped", f"{cluster=}") - - async def do_check_clusters(self, clusters: list[Cluster]) -> list[bool]: - self.log.debug("--> checking statuses of : %s", f"{clusters=}") - ok: list[bool] = await asyncio.gather( - *[self._check_service_status(c) for c in clusters], return_exceptions=True - ) - self.log.debug("<-- clusters status returned: %s", f"{ok=}") - return ok - - async def do_start_worker( - self, worker: Worker - ) -> AsyncGenerator[dict[str, Any], None]: - self.log.debug("--> starting %s", f"{worker=}") - node_hostname = None - try: - node_hostname = await get_next_empty_node_hostname( - self.docker_client, worker.cluster - ) - except (NoServiceTasksError, TaskNotAssignedError) as exc: - # this is a real error - raise PublicException(f"{exc}") from exc - except NoHostFoundError as exc: - # this should not happen since calling do_start_worker is done - # from the on_cluster_heartbeat that checks if we already reached max worker - # What may happen is that a docker node was removed in between and that is an error we can report. - raise PublicException( - "Unexpected error while creating a new worker, there is no available host! Was a docker node removed?" - ) from exc - assert node_hostname is not None # nosec - worker_env = self.get_worker_env(worker.cluster) - dask_scheduler_url = f"tls://cluster_{worker.cluster.id}_scheduler:{OSPARC_SCHEDULER_API_PORT}" # worker.cluster.scheduler_address - # NOTE: the name must be set so that the scheduler knows which worker to wait for - worker_env.update( - { - "DASK_SCHEDULER_URL": dask_scheduler_url, - "DASK_WORKER_NAME": worker.name, - } - ) - async for dask_sidecar_start_result in start_service( - docker_client=self.docker_client, - settings=self.settings, - logger=self.log, - service_name=f"cluster_{worker.cluster.id}_sidecar_{worker.id}", - base_env=worker_env, - cluster_secrets=[ - c for c in self.cluster_secrets if c.cluster.name == worker.cluster.name - ], - cmd=None, - labels={ - "cluster_id": f"{worker.cluster.id}", - "worker_id": f"{worker.id}", - "type": "worker", - }, - gateway_api_url=self.api_url, - placement={"Constraints": [f"node.hostname=={node_hostname}"]}, - ): - yield dask_sidecar_start_result - - async def do_stop_worker(self, worker: Worker) -> None: - self.log.debug("--> Stopping %s", f"{worker=}") - if service_id := worker.state.get("service_id"): - await stop_service(self.docker_client, service_id, self.log) - self.log.debug("<-- %s stopped", f"{worker=}") - else: - self.log.error( - "Worker %s does not have a service id! That is not expected!", - f"{worker=}", - ) - - async def _check_service_status( - self, cluster_service: Union[Worker, Cluster] - ) -> bool: - self.log.debug("--> checking status: %s", f"{cluster_service=}") - if service_id := cluster_service.state.get("service_id"): - self.log.debug("--> checking service '%s' status", f"{service_id}") - try: - service = await self.docker_client.services.inspect(service_id) - if service: - service_name = service["Spec"]["Name"] - return await is_service_task_running( - self.docker_client, service_name, self.log - ) - - except DockerContainerError: - self.log.exception("Error while checking %s", f"{service_id=}") - self.log.warning( - "%s does not have a service id! That is not expected!", - f"{cluster_service=}", - ) - return False - - async def do_check_workers(self, workers: list[Worker]) -> list[bool]: - self.log.debug("--> checking statuses: %s", f"{workers=}") - ok = await asyncio.gather( - *[self._check_service_status(w) for w in workers], return_exceptions=True - ) - self.log.debug("<-- worker status returned: %s", f"{ok=}") - return ok - - async def on_cluster_heartbeat(self, cluster_name, msg) -> None: - # pylint: disable=no-else-continue, unused-variable, too-many-branches - # pylint: disable=too-many-statements - - # HACK: we override the base class heartbeat in order to - # dynamically allow for more or less workers depending on the - # available docker nodes!!! - cluster = self.db.get_cluster(cluster_name) - if cluster is None or cluster.target > JobStatus.RUNNING: - return - - cluster.last_heartbeat = timestamp() - - if cluster.status == JobStatus.RUNNING: - cluster_update = {} - else: - cluster_update = { - "api_address": msg["api_address"], - "scheduler_address": msg["scheduler_address"], - "dashboard_address": msg["dashboard_address"], - } - - count = msg["count"] - active_workers = set(msg["active_workers"]) - closing_workers = set(msg["closing_workers"]) - closed_workers = set(msg["closed_workers"]) - - self.log.info( - "Cluster %s heartbeat [count: %d, n_active: %d, n_closing: %d, n_closed: %d]", - cluster_name, - count, - len(active_workers), - len(closing_workers), - len(closed_workers), - ) - - # THIS IS THE HACK!!! - # original code in dask_gateway_server.backend.db_base - max_workers = cluster.config.get("cluster_max_workers") - if self.settings.GATEWAY_SERVER_ONE_WORKER_PER_NODE: - # cluster_max_workers = len(await get_cluster_information(self.docker_client)) - # if max_workers != cluster_max_workers: - # unfrozen_cluster_config = {k: v for k, v in cluster.config.items()} - # unfrozen_cluster_config["cluster_max_workers"] = cluster_max_workers - # cluster_update["config"] = unfrozen_cluster_config - max_workers = len(await get_cluster_information(self.docker_client)) - if max_workers is not None and count > max_workers: - # This shouldn't happen under normal operation, but could if the - # user does something malicious (or there's a bug). - self.log.info( - "Cluster %s heartbeat requested %d workers, exceeding limit of %s.", - cluster_name, - count, - max_workers, - ) - count = max_workers - - if count != cluster.count: - cluster_update["count"] = count - - created_workers = [] - submitted_workers = [] - target_updates = [] - newly_running = [] - close_expected = [] - for worker in cluster.workers.values(): - if worker.status >= JobStatus.STOPPED: - continue - elif worker.name in closing_workers: - if worker.status < JobStatus.RUNNING: - newly_running.append(worker) - close_expected.append(worker) - elif worker.name in active_workers: - if worker.status < JobStatus.RUNNING: - newly_running.append(worker) - elif worker.name in closed_workers: - target = ( - JobStatus.STOPPED if worker.close_expected else JobStatus.FAILED - ) - target_updates.append((worker, {"target": target})) - else: - if worker.status == JobStatus.SUBMITTED: - submitted_workers.append(worker) - else: - assert worker.status == JobStatus.CREATED - created_workers.append(worker) - - n_pending = len(created_workers) + len(submitted_workers) - n_to_stop = len(active_workers) + n_pending - count - if n_to_stop > 0: - for w in islice(chain(created_workers, submitted_workers), n_to_stop): - target_updates.append((w, {"target": JobStatus.STOPPED})) - - if cluster_update: - self.db.update_cluster(cluster, **cluster_update) - self.queue.put(cluster) - - self.db.update_workers(target_updates) - for w, u in target_updates: - self.queue.put(w) - - if newly_running: - # At least one worker successfully started, reset failure count - cluster.worker_start_failure_count = 0 - self.db.update_workers( - [(w, {"status": JobStatus.RUNNING}) for w in newly_running] - ) - for w in newly_running: - self.log.info("Worker %s is running", w.name) - - self.db.update_workers([(w, {"close_expected": True}) for w in close_expected]) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py deleted file mode 100644 index d639b55e3db..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/settings.py +++ /dev/null @@ -1,75 +0,0 @@ -from enum import Enum -from typing import Optional - -from pydantic import BaseSettings, Field, NonNegativeInt, PositiveInt - - -class BootModeEnum(str, Enum): - """ - Values taken by SC_BOOT_MODE environment variable - set in Dockerfile and used during docker/boot.sh - """ - - DEFAULT = "default" - LOCAL = "local-development" - DEBUG = "debug-ptvsd" - PRODUCTION = "production" - DEVELOPMENT = "development" - - -class AppSettings(BaseSettings): - COMPUTATIONAL_SIDECAR_IMAGE: str = Field( - ..., description="The computational sidecar image in use" - ) - COMPUTATIONAL_SIDECAR_LOG_LEVEL: Optional[str] = Field( - "WARNING", - description="The computational sidecar log level", - env=[ - "COMPUTATIONAL_SIDECAR_LOG_LEVEL", - "LOG_LEVEL", - "LOGLEVEL", - "SIDECAR_LOG_LEVEL", - "SIDECAR_LOGLEVEL", - ], - ) - COMPUTATIONAL_SIDECAR_VOLUME_NAME: str = Field( - ..., description="Named volume for the computational sidecars" - ) - - COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS: NonNegativeInt = Field( - 2, description="Number of CPUS the sidecar should not advertise/use" - ) - - COMPUTATION_SIDECAR_NON_USABLE_RAM: NonNegativeInt = Field( - 0, description="Amount of RAM in bytes, the sidecar should not advertise/use" - ) - - COMPUTATION_SIDECAR_DASK_NTHREADS: Optional[PositiveInt] = Field( - default=None, - description="Allows to override the default number of threads used by the dask-sidecars", - ) - - GATEWAY_WORKERS_NETWORK: str = Field( - ..., - description="The docker network where the gateway workers shall be able to access the gateway", - ) - GATEWAY_SERVER_NAME: str = Field( - ..., - description="The hostname of the gateway server in the GATEWAY_WORKERS_NETWORK network", - ) - - SC_BOOT_MODE: Optional[BootModeEnum] - - GATEWAY_SERVER_ONE_WORKER_PER_NODE: bool = Field( - default=True, - description="Only one dask-worker is allowed per node (default). If disabled, then scaling must be done manually.", - ) - - GATEWAY_CLUSTER_START_TIMEOUT: float = Field( - default=120.0, - description="Allowed timeout to define a starting cluster as failed", - ) - GATEWAY_WORKER_START_TIMEOUT: float = Field( - default=120.0, - description="Allowed timeout to define a starting worker as failed", - ) diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py b/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py deleted file mode 100644 index 9ff984c22b0..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/backend/utils.py +++ /dev/null @@ -1,383 +0,0 @@ -import asyncio -import json -import logging -from collections import deque -from copy import deepcopy -from pathlib import Path -from typing import Any, AsyncGenerator, Final, NamedTuple, Optional - -import aiodocker -from aiodocker import Docker -from dask_gateway_server.backends.db_base import Cluster, DBBackendBase -from yarl import URL - -from .errors import NoHostFoundError, NoServiceTasksError, TaskNotAssignedError -from .models import ClusterInformation, Hostname, cluster_information_from_docker_nodes -from .settings import AppSettings - -_SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR = "/home/scu/shared_computational_data" -_DASK_KEY_CERT_PATH_IN_SIDECAR = Path("/home/scu/dask-credentials") - - -class DockerSecret(NamedTuple): - secret_id: str - secret_name: str - secret_file_name: str - cluster: Cluster - - -async def is_service_task_running( - docker_client: Docker, service_name: str, logger: logging.Logger -) -> bool: - tasks = await docker_client.tasks.list(filters={"service": service_name}) - tasks_current_state = [task["Status"]["State"] for task in tasks] - logger.info( - "%s current service task states are %s", service_name, f"{tasks_current_state=}" - ) - num_running = sum(current == "running" for current in tasks_current_state) - return bool(num_running == 1) - - -async def get_network_id( - docker_client: Docker, network_name: str, logger: logging.Logger -) -> str: - # try to find the network name (usually named STACKNAME_default) - logger.debug("--> finding network id for '%s'", f"{network_name=}") - networks = [ - x - for x in (await docker_client.networks.list()) - if "swarm" in x["Scope"] and network_name == x["Name"] - ] - logger.debug(f"found the following: {networks=}") - if not networks: - raise ValueError(f"network {network_name} not found") - if len(networks) > 1: - # NOTE: this is impossible at the moment. test_utils::test_get_network_id proves it - raise ValueError( - f"network {network_name} is ambiguous, too many network founds: {networks=}" - ) - logger.debug("found '%s'", f"{networks[0]=}") - assert "Id" in networks[0] # nosec - assert isinstance(networks[0]["Id"], str) # nosec - return networks[0]["Id"] - - -def create_service_config( - settings: AppSettings, - service_env: dict[str, Any], - service_name: str, - network_id: str, - service_secrets: list[DockerSecret], - cmd: Optional[list[str]], - labels: dict[str, str], - placement: Optional[dict[str, Any]], - **service_kwargs, -) -> dict[str, Any]: - env = deepcopy(service_env) - env.pop("PATH", None) - # create the secrets array containing the TLS cert/key pair - container_secrets = [] - for s in service_secrets: - container_secrets.append( - { - "SecretName": s.secret_name, - "SecretID": s.secret_id, - "File": { - "Name": f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}", - "UID": "0", - "GID": "0", - "Mode": 0x777, - }, - } - ) - env_updates = {} - for env_name, env_value in env.items(): - if env_value == s.secret_file_name: - env_updates[ - env_name - ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}" - env.update(env_updates) - mounts = [ - # docker socket needed to use the docker api - { - "Source": "/var/run/docker.sock", - "Target": "/var/run/docker.sock", - "Type": "bind", - "ReadOnly": True, - }, - # the sidecar data data is stored in a volume - { - "Source": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME, - "Target": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR, - "Type": "volume", - "ReadOnly": False, - }, - ] - - task_template: dict[str, Any] = { - "ContainerSpec": { - "Env": env, - "Image": settings.COMPUTATIONAL_SIDECAR_IMAGE, - "Init": True, - "Mounts": mounts, - "Secrets": container_secrets, - "Hostname": service_name, - }, - "RestartPolicy": {"Condition": "on-failure"}, - } - - if cmd: - task_template["ContainerSpec"]["Command"] = cmd - if placement: - task_template["Placement"] = placement - - return { - "name": service_name, - "labels": labels, - "task_template": task_template, - "networks": [network_id], - **service_kwargs, - } - - -async def create_or_update_secret( - docker_client: aiodocker.Docker, - target_file_name: str, - cluster: Cluster, - *, - file_path: Optional[Path] = None, - secret_data: Optional[str] = None, -) -> DockerSecret: - if file_path is None and secret_data is None: - raise ValueError( - f"Both {file_path=} and {secret_data=} are empty, that is not allowed" - ) - data = secret_data - if not data and file_path: - data = file_path.read_text() - - docker_secret_name = f"{Path( target_file_name).name}_{cluster.id}" - - secrets = await docker_client.secrets.list(filters={"name": docker_secret_name}) - if secrets: - # we must first delete it as only labels may be updated - secret = secrets[0] - await docker_client.secrets.delete(secret["ID"]) - assert data # nosec - secret = await docker_client.secrets.create( - name=docker_secret_name, - data=data, - labels={"cluster_id": f"{cluster.id}", "cluster_name": f"{cluster.name}"}, - ) - return DockerSecret( - secret_id=secret["ID"], - secret_name=docker_secret_name, - secret_file_name=target_file_name, - cluster=cluster, - ) - - -async def delete_secrets(docker_client: aiodocker.Docker, cluster: Cluster) -> None: - secrets = await docker_client.secrets.list( - filters={"label": f"cluster_id={cluster.id}"} - ) - await asyncio.gather(*[docker_client.secrets.delete(s["ID"]) for s in secrets]) - - -async def start_service( - docker_client: aiodocker.Docker, - settings: AppSettings, - logger: logging.Logger, - service_name: str, - base_env: dict[str, str], - cluster_secrets: list[DockerSecret], - cmd: Optional[list[str]], - labels: dict[str, str], - gateway_api_url: str, - placement: Optional[dict[str, Any]] = None, - **service_kwargs, -) -> AsyncGenerator[dict[str, Any], None]: - service_parameters = {} - try: - assert settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL # nosec - env = deepcopy(base_env) - env.update( - { - # NOTE: the hostname of the gateway API must be - # modified so that the scheduler/sidecar can - # send heartbeats to the gateway - "DASK_GATEWAY_API_URL": f"{URL(gateway_api_url).with_host(settings.GATEWAY_SERVER_NAME)}", - "SIDECAR_COMP_SERVICES_SHARED_FOLDER": _SHARED_COMPUTATIONAL_FOLDER_IN_SIDECAR, - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": settings.COMPUTATIONAL_SIDECAR_VOLUME_NAME, - "LOG_LEVEL": settings.COMPUTATIONAL_SIDECAR_LOG_LEVEL, - "DASK_SIDECAR_NUM_NON_USABLE_CPUS": f"{settings.COMPUTATION_SIDECAR_NUM_NON_USABLE_CPUS}", - "DASK_SIDECAR_NON_USABLE_RAM": f"{settings.COMPUTATION_SIDECAR_NON_USABLE_RAM}", - } - ) - if settings.COMPUTATION_SIDECAR_DASK_NTHREADS: - env["DASK_NTHREADS"] = f"{settings.COMPUTATION_SIDECAR_DASK_NTHREADS}" - - # find service parameters - network_id = await get_network_id( - docker_client, settings.GATEWAY_WORKERS_NETWORK, logger - ) - service_parameters = create_service_config( - settings, - env, - service_name, - network_id, - cluster_secrets, - cmd, - labels=labels, - placement=placement, - **service_kwargs, - ) - - # start service - logger.info("Starting service %s", service_name) - logger.debug("Using parameters %s", json.dumps(service_parameters, indent=2)) - service = await docker_client.services.create(**service_parameters) - logger.info("Service %s started: %s", service_name, f"{service=}") - yield {"service_id": service["ID"]} - - # get the full info from docker - service = await docker_client.services.inspect(service["ID"]) - logger.debug( - "Service '%s' inspection: %s", - service_name, - f"{json.dumps(service, indent=2)}", - ) - - # wait until the service is started - logger.info( - "---> Service started, waiting for service %s to run...", - service_name, - ) - while not await is_service_task_running( - docker_client, service["Spec"]["Name"], logger - ): - yield {"service_id": service["ID"]} - await asyncio.sleep(1) - - # we are done, the service is started - logger.info( - "---> Service %s is started, and has ID %s", - service["Spec"]["Name"], - service["ID"], - ) - yield {"service_id": service["ID"]} - - except (aiodocker.DockerContainerError, aiodocker.DockerError): - logger.exception( - "Unexpected Error while running container with parameters %s", - json.dumps(service_parameters, indent=2), - ) - raise - except asyncio.CancelledError: - logger.warn("Service creation was cancelled") - raise - - -async def stop_service( - docker_client: aiodocker.Docker, service_id: str, logger: logging.Logger -) -> None: - logger.info("Stopping service %s", f"{service_id}") - try: - await docker_client.services.delete(service_id) - logger.info("service %s stopped", f"{service_id=}") - - except aiodocker.DockerContainerError: - logger.exception("Error while stopping service with id %s", f"{service_id=}") - - -async def create_docker_secrets_from_tls_certs_for_cluster( - docker_client: Docker, backend: DBBackendBase, cluster: Cluster -) -> list[DockerSecret]: - tls_cert_path, tls_key_path = backend.get_tls_paths(cluster) - return [ - await create_or_update_secret( - docker_client, - f"{tls_cert_path}", - cluster, - secret_data=cluster.tls_cert.decode(), - ), - await create_or_update_secret( - docker_client, - f"{tls_key_path}", - cluster, - secret_data=cluster.tls_key.decode(), - ), - ] - - -OSPARC_SCHEDULER_API_PORT: Final[int] = 8786 -OSPARC_SCHEDULER_DASHBOARD_PORT: Final[int] = 8787 - - -def get_osparc_scheduler_cmd_modifications( - scheduler_service_name: str, -) -> dict[str, str]: - # NOTE: the healthcheck of itisfoundation/dask-sidecar expects the dashboard - # to be on port 8787 - # (see https://github.com/ITISFoundation/osparc-simcore/blob/f3d98dccdae665d23701b0db4ee917364a0fbd99/services/dask-sidecar/Dockerfile) - return { - "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}", - "--port": f"{OSPARC_SCHEDULER_API_PORT}", - "--host": scheduler_service_name, - } - - -def modify_cmd_argument( - cmd: list[str], argument_name: str, argument_value: str -) -> list[str]: - modified_cmd = deepcopy(cmd) - try: - dashboard_address_arg_index = modified_cmd.index(argument_name) - modified_cmd[dashboard_address_arg_index + 1] = argument_value - except ValueError: - modified_cmd.extend([argument_name, argument_value]) - return modified_cmd - - -async def get_cluster_information(docker_client: Docker) -> ClusterInformation: - cluster_information = cluster_information_from_docker_nodes( - await docker_client.nodes.list() - ) - - return cluster_information - - -async def get_next_empty_node_hostname( - docker_client: Docker, cluster: Cluster -) -> Hostname: - current_count = getattr(get_next_empty_node_hostname, "counter", -1) + 1 - setattr(get_next_empty_node_hostname, "counter", current_count) - - cluster_nodes = deque(await docker_client.nodes.list()) - current_worker_services = await docker_client.services.list( - filters={"label": [f"cluster_id={cluster.id}", "type=worker"]} - ) - used_docker_node_ids = set() - - for service in current_worker_services: - service_tasks = await docker_client.tasks.list( - filters={"service": service["ID"]} - ) - if not service_tasks: - raise NoServiceTasksError(f"service {service} has no tasks attached") - for task in service_tasks: - if task["Status"]["State"] in ("new", "pending"): - raise TaskNotAssignedError(f"task {task} is not assigned to a host yet") - if task["Status"]["State"] in ( - "assigned", - "preparing", - "starting", - "running", - ): - used_docker_node_ids.add(task["NodeID"]) - cluster_nodes.rotate(current_count) - for node in cluster_nodes: - if node["ID"] in used_docker_node_ids: - continue - return f"{node['Description']['Hostname']}" - raise NoHostFoundError("Could not find any empty host") diff --git a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py b/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py deleted file mode 100644 index f66f4b694dd..00000000000 --- a/services/osparc-gateway-server/src/osparc_gateway_server/remote_debug.py +++ /dev/null @@ -1,23 +0,0 @@ -""" Setup remote debugger with debugpy - a debugger for Python - https://github.com/microsoft/debugpy - -""" -import logging - - -def setup_remote_debugging(logger: logging.Logger) -> None: - try: - logger.debug("Attaching debugpy ...") - - import debugpy - - REMOTE_DEBUGGING_PORT = 3000 - debugpy.listen(("0.0.0.0", REMOTE_DEBUGGING_PORT)) # nosec - # debugpy.wait_for_client() - - except ImportError as err: - raise RuntimeError( - "Cannot enable remote debugging. Please install debugpy first" - ) from err - - logger.info("Remote debugging enabled: listening port %s", REMOTE_DEBUGGING_PORT) diff --git a/services/osparc-gateway-server/tests/conftest.py b/services/osparc-gateway-server/tests/conftest.py deleted file mode 100644 index 695f55dcf04..00000000000 --- a/services/osparc-gateway-server/tests/conftest.py +++ /dev/null @@ -1,64 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import asyncio -from pathlib import Path -from typing import Any, AsyncIterator, Awaitable, Callable - -import aiodocker -import pytest -from faker import Faker -from tenacity._asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_plugins = ["pytest_simcore.repository_paths", "pytest_simcore.docker_swarm"] - - -@pytest.fixture(scope="session") -def package_dir(osparc_simcore_services_dir: Path): - package_folder = osparc_simcore_services_dir / "osparc-gateway-server" - assert package_folder.exists() - return package_folder - - -@pytest.fixture -async def async_docker_client() -> AsyncIterator[aiodocker.Docker]: - async with aiodocker.Docker() as docker_client: - yield docker_client - - -@pytest.fixture -async def docker_network( - async_docker_client: aiodocker.Docker, faker: Faker -) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]: - networks = [] - - async def _network_creator(**network_config_kwargs) -> dict[str, Any]: - network = await async_docker_client.networks.create( - config={"Name": faker.uuid4(), "Driver": "overlay"} | network_config_kwargs - ) - assert network - print(f"--> created network {network=}") - networks.append(network) - return await network.show() - - yield _network_creator - - # wait until all networks are really gone - async def _wait_for_network_deletion(network: aiodocker.docker.DockerNetwork): - network_name = (await network.show())["Name"] - await network.delete() - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - print(f"<-- waiting for network '{network_name}' deletion...") - list_of_network_names = [ - n["Name"] for n in await async_docker_client.networks.list() - ] - assert network_name not in list_of_network_names - print(f"<-- network '{network_name}' deleted") - - print(f"<-- removing all networks {networks=}") - await asyncio.gather(*[_wait_for_network_deletion(network) for network in networks]) diff --git a/services/osparc-gateway-server/tests/integration/_dask_helpers.py b/services/osparc-gateway-server/tests/integration/_dask_helpers.py deleted file mode 100644 index e81d0332787..00000000000 --- a/services/osparc-gateway-server/tests/integration/_dask_helpers.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import NamedTuple - -from dask_gateway_server.app import DaskGateway - - -class DaskGatewayServer(NamedTuple): - address: str - proxy_address: str - password: str - server: DaskGateway diff --git a/services/osparc-gateway-server/tests/integration/conftest.py b/services/osparc-gateway-server/tests/integration/conftest.py deleted file mode 100644 index 1e1161670ad..00000000000 --- a/services/osparc-gateway-server/tests/integration/conftest.py +++ /dev/null @@ -1,139 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - - -import asyncio -import json -from typing import Any, AsyncIterator, Awaitable, Callable, Union - -import aiodocker -import dask_gateway -import pytest -import traitlets -import traitlets.config -from _dask_helpers import DaskGatewayServer -from dask_gateway_server.app import DaskGateway -from faker import Faker -from osparc_gateway_server.backend.osparc import OsparcBackend -from osparc_gateway_server.backend.utils import ( - OSPARC_SCHEDULER_API_PORT, - OSPARC_SCHEDULER_DASHBOARD_PORT, -) -from pytest_simcore.helpers.utils_docker import get_localhost_ip -from tenacity._asyncio import AsyncRetrying -from tenacity.wait import wait_fixed - - -@pytest.fixture -async def docker_volume( - async_docker_client: aiodocker.Docker, -) -> AsyncIterator[Callable[[str], Awaitable[dict[str, Any]]]]: - volumes = [] - - async def _volume_creator(name: str) -> dict[str, Any]: - volume = await async_docker_client.volumes.create(config={"Name": name}) - assert volume - print(f"--> created {volume=}") - volumes.append(volume) - return await volume.show() - - yield _volume_creator - - # cleanup - async def _wait_for_volume_deletion(volume: aiodocker.docker.DockerVolume): - inspected_volume = await volume.show() - async for attempt in AsyncRetrying(reraise=True, wait=wait_fixed(1)): - with attempt: - print(f"<-- deleting volume '{inspected_volume['Name']}'...") - await volume.delete() - print(f"<-- volume '{inspected_volume['Name']}' deleted") - - await asyncio.gather(*[_wait_for_volume_deletion(v) for v in volumes]) - - -@pytest.fixture -def gateway_password(faker: Faker) -> str: - return faker.password() - - -def _convert_to_dict(c: Union[traitlets.config.Config, dict]) -> dict[str, Any]: - converted_dict = {} - for x, y in c.items(): - if isinstance(y, (dict, traitlets.config.Config)): - converted_dict[x] = _convert_to_dict(y) - else: - converted_dict[x] = f"{y}" - return converted_dict - - -@pytest.fixture -def mock_scheduler_cmd_modifications(mocker): - """This mock is necessary since: - If the osparc-gateway-server is running in the host then: - - dask-scheduler must start with "" for --host, so the dask-scheduler defines its IP as being in docker_gw_bridge (172.18.0.X), accessible from the host - When the osparc-gateway-server is running as a docker container, then the --host must be set - as "cluster_X_scheduler" since this is the hostname of the container and resolves into the dask-gateway network - """ - mocker.patch( - "osparc_gateway_server.backend.osparc.get_osparc_scheduler_cmd_modifications", - autospec=True, - return_value={ - "--dashboard-address": f":{OSPARC_SCHEDULER_DASHBOARD_PORT}", - "--port": f"{OSPARC_SCHEDULER_API_PORT}", - }, - ) - - -@pytest.fixture -async def local_dask_gateway_server( - mock_scheduler_cmd_modifications, - minimal_config: None, - gateway_password: str, -) -> AsyncIterator[DaskGatewayServer]: - """this code is more or less copy/pasted from dask-gateway repo""" - c = traitlets.config.Config() - c.DaskGateway.backend_class = OsparcBackend # type: ignore - c.DaskGateway.address = "127.0.0.1:0" # type: ignore - c.DaskGateway.log_level = "DEBUG" # type: ignore - c.Proxy.address = f"{get_localhost_ip()}:0" # type: ignore - c.DaskGateway.authenticator_class = "dask_gateway_server.auth.SimpleAuthenticator" # type: ignore - c.SimpleAuthenticator.password = gateway_password # type: ignore - print(f"--> local dask gateway config: {json.dumps(_convert_to_dict(c), indent=2)}") - dask_gateway_server = DaskGateway(config=c) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - print("--> local dask gateway server setup completed") - yield DaskGatewayServer( - f"http://{dask_gateway_server.backend.proxy.address}", - f"gateway://{dask_gateway_server.backend.proxy.tcp_address}", - c.SimpleAuthenticator.password, # type: ignore - dask_gateway_server, - ) - print("<-- local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") - - -@pytest.fixture -async def gateway_client( - local_dask_gateway_server: DaskGatewayServer, -) -> AsyncIterator[dask_gateway.Gateway]: - async with dask_gateway.Gateway( - local_dask_gateway_server.address, - local_dask_gateway_server.proxy_address, - asynchronous=True, - auth=dask_gateway.BasicAuth( - username="pytest_user", password=local_dask_gateway_server.password - ), - ) as gateway: - assert gateway - print(f"--> {gateway} created") - cluster_options = await gateway.cluster_options() - gateway_versions = await gateway.get_versions() - clusters_list = await gateway.list_clusters() - print(f"--> {gateway_versions}, {cluster_options}, {clusters_list}") - for option in cluster_options.items(): - print(f"--> {option}") - yield gateway diff --git a/services/osparc-gateway-server/tests/integration/test_clusters.py b/services/osparc-gateway-server/tests/integration/test_clusters.py deleted file mode 100644 index 1ba132b9ef9..00000000000 --- a/services/osparc-gateway-server/tests/integration/test_clusters.py +++ /dev/null @@ -1,258 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import asyncio -from typing import Any, Awaitable, Callable - -import pytest -from _dask_helpers import DaskGatewayServer -from _pytest.fixtures import FixtureRequest -from _pytest.monkeypatch import MonkeyPatch -from aiodocker import Docker -from dask_gateway import Gateway -from faker import Faker -from pytest_simcore.helpers.utils_docker import get_localhost_ip -from tenacity._asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - - -@pytest.fixture( - params=[ - "local/dask-sidecar:production", - ] -) -def minimal_config( - docker_swarm, - monkeypatch: MonkeyPatch, - faker: Faker, - request: FixtureRequest, -): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr()) - monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip()) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr()) - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_IMAGE", - request.param, # type: ignore - ) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG") - monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False") - - -@pytest.fixture -async def gateway_worker_network( - local_dask_gateway_server: DaskGatewayServer, - docker_network: Callable[..., Awaitable[dict[str, Any]]], -) -> dict[str, Any]: - network = await docker_network( - **{ - "Name": local_dask_gateway_server.server.backend.settings.GATEWAY_WORKERS_NETWORK - } - ) - return network - - -async def assert_services_stability(docker_client: Docker, service_name: str): - list_services = await docker_client.services.list(filters={"name": service_name}) - assert ( - len(list_services) == 1 - ), f"{service_name} is missing from the expected services in {list_services}" - _SECONDS_STABLE = 10 - print(f"--> {service_name} is up, now checking if it is running...") - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks_list = await docker_client.tasks.list( - filters={"service": service_name} - ) - tasks_current_state = [t["Status"]["State"] for t in tasks_list] - print(f"--> {service_name} service task states are {tasks_current_state=}") - num_running = sum(current == "running" for current in tasks_current_state) - assert num_running == 1 - print(f"--> {service_name} is running now") - print( - f"--> {service_name} is running, now checking if it is stable during {_SECONDS_STABLE}s..." - ) - - async def _check_stability(service: dict[str, Any]): - inspected_service = await docker_client.services.inspect(service["ID"]) - # we ensure the service remains stable for _SECONDS_STABLE seconds (e.g. only one task runs) - - print( - f"--> checking {_SECONDS_STABLE} seconds for stability of service {inspected_service['Spec']['Name']=}" - ) - for n in range(_SECONDS_STABLE): - service_tasks = await docker_client.tasks.list( - filters={"service": inspected_service["Spec"]["Name"]} - ) - assert ( - len(service_tasks) == 1 - ), f"The service is not stable it shows {service_tasks}" - print(f"the {service_name=} is stable after {n} seconds...") - await asyncio.sleep(1) - print(f"{service_name=} stable!!") - - await asyncio.gather(*[_check_stability(s) for s in list_services]) - - -async def _wait_for_cluster_services_and_secrets( - async_docker_client: Docker, - num_services: int, - num_secrets: int, - timeout_s: int = 10, -) -> list[dict[str, Any]]: - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(timeout_s) - ): - with attempt: - list_services = await async_docker_client.services.list() - print( - f"--> list of services after {attempt.retry_state.attempt_number}s: {list_services=}, expected {num_services=}" - ) - assert len(list_services) == num_services - # as the secrets - list_secrets = await async_docker_client.secrets.list() - print( - f"--> list of secrets after {attempt.retry_state.attempt_number}s: {list_secrets=}, expected {num_secrets}" - ) - assert len(list_secrets) == num_secrets - return list_services - # needed for pylint - raise AssertionError("Invalid call to _wait_for_cluster_services_and_secrets") - - -async def test_clusters_start_stop( - minimal_config, - gateway_worker_network, - gateway_client: Gateway, - async_docker_client: Docker, -): - """Each cluster is made of 1 scheduler + X number of sidecars (with 0<=X dict[str, Any]: - volume = await docker_volume(faker.pystr()) - return volume - - -@pytest.fixture -def computational_sidecar_mounted_folder() -> str: - return "/comp_shared_folder" - - -@pytest.fixture -def sidecar_envs( - computational_sidecar_mounted_folder: str, - sidecar_computational_shared_volume: dict[str, Any], -) -> dict[str, str]: - envs = { - "SIDECAR_COMP_SERVICES_SHARED_FOLDER": f"{computational_sidecar_mounted_folder}", - "SIDECAR_COMP_SERVICES_SHARED_VOLUME_NAME": f"{sidecar_computational_shared_volume['Name']}", - } - return envs - - -@pytest.fixture -def sidecar_mounts( - sidecar_computational_shared_volume: dict[str, Any], - computational_sidecar_mounted_folder: str, -) -> list[dict[str, Any]]: - mounts = [ # docker socket needed to use the docker api - { - "Source": "/var/run/docker.sock", - "Target": "/var/run/docker.sock", - "Type": "bind", - "ReadOnly": True, - }, - # the sidecar computational data must be mounted - { - "Source": sidecar_computational_shared_volume["Name"], - "Target": computational_sidecar_mounted_folder, - "Type": "volume", - "ReadOnly": False, - }, - ] - return mounts - - -@pytest.fixture -async def create_docker_service( - async_docker_client: aiodocker.Docker, -) -> AsyncIterator[Callable[..., Awaitable[dict[str, Any]]]]: - services = [] - - async def service_creator(**service_kwargs) -> dict[str, Any]: - service = await async_docker_client.services.create(**service_kwargs) - assert service - assert "ID" in service - services.append(service["ID"]) - return await async_docker_client.services.inspect(service["ID"]) - - yield service_creator - # cleanup - await asyncio.gather(*[async_docker_client.services.delete(s) for s in services]) - - -async def _wait_for_service_to_be_ready( - docker_client: aiodocker.Docker, service_name: str -): - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks_list = await docker_client.tasks.list( - filters={"service": service_name} - ) - tasks_current_state = [t["Status"]["State"] for t in tasks_list] - print(f"--> {service_name} service task states are {tasks_current_state=}") - num_running = sum(current == "running" for current in tasks_current_state) - assert num_running == 1 - print(f"--> {service_name} is running now") - - -@pytest.mark.parametrize( - "image_name", - [ - "local/dask-sidecar:production", - ], -) -async def test_computational_sidecar_properly_start_stop( - docker_swarm: None, - sidecar_computational_shared_volume: dict[str, Any], - async_docker_client: aiodocker.Docker, - image_name: str, - sidecar_envs: dict[str, str], - sidecar_mounts: list[dict[str, Any]], - create_docker_service: Callable[..., Awaitable[dict[str, Any]]], -): - scheduler_service = await create_docker_service( - task_template={ - "ContainerSpec": { - "Image": image_name, - "Env": sidecar_envs | {"DASK_START_AS_SCHEDULER": "1"}, - "Init": True, - "Mounts": sidecar_mounts, - } - }, - endpoint_spec={"Ports": [{"PublishedPort": 8786, "TargetPort": 8786}]}, - name="pytest_dask_scheduler", - ) - await _wait_for_service_to_be_ready( - async_docker_client, scheduler_service["Spec"]["Name"] - ) - sidecar_service = await create_docker_service( - task_template={ - "ContainerSpec": { - "Image": image_name, - "Env": sidecar_envs - | {"DASK_SCHEDULER_URL": f"tcp://{get_localhost_ip()}:8786"}, - "Init": True, - "Mounts": sidecar_mounts, - } - }, - name="pytest_dask_sidecar", - ) - await _wait_for_service_to_be_ready( - async_docker_client, sidecar_service["Spec"]["Name"] - ) diff --git a/services/osparc-gateway-server/tests/integration/test_gateway.py b/services/osparc-gateway-server/tests/integration/test_gateway.py deleted file mode 100644 index 205772a0cee..00000000000 --- a/services/osparc-gateway-server/tests/integration/test_gateway.py +++ /dev/null @@ -1,57 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import pytest -import traitlets -import traitlets.config -from _pytest.fixtures import FixtureRequest -from _pytest.monkeypatch import MonkeyPatch -from dask_gateway_server.app import DaskGateway -from faker import Faker -from osparc_gateway_server.backend.osparc import OsparcBackend -from pytest_simcore.helpers.utils_docker import get_localhost_ip - - -@pytest.fixture( - params=[ - "local/dask-sidecar:production", - ] -) -def minimal_config( - docker_swarm, - monkeypatch: MonkeyPatch, - faker: Faker, - request: FixtureRequest, -): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", faker.pystr()) - monkeypatch.setenv("GATEWAY_SERVER_NAME", get_localhost_ip()) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_VOLUME_NAME", faker.pystr()) - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_IMAGE", - request.param, # type: ignore - ) - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_LOG_LEVEL", "DEBUG") - - -async def test_gateway_configuration_through_env_variables( - minimal_config, monkeypatch, faker: Faker -): - cluster_start_timeout = faker.pyfloat() - monkeypatch.setenv("GATEWAY_CLUSTER_START_TIMEOUT", f"{cluster_start_timeout}") - worker_start_timeout = faker.pyfloat() - monkeypatch.setenv("GATEWAY_WORKER_START_TIMEOUT", f"{worker_start_timeout}") - c = traitlets.config.Config() - c.DaskGateway.backend_class = OsparcBackend # type: ignore - dask_gateway_server = DaskGateway(config=c) - dask_gateway_server.initialize([]) # that is a shitty one! - print("--> local dask gateway server initialized") - await dask_gateway_server.setup() - await dask_gateway_server.backend.proxy._proxy_contacted # pylint: disable=protected-access - print("--> local dask gateway server setup completed") - - assert dask_gateway_server.backend.cluster_start_timeout == cluster_start_timeout - assert dask_gateway_server.backend.worker_start_timeout == worker_start_timeout - - print("<-- local dask gateway server switching off...") - await dask_gateway_server.cleanup() - print("...done") diff --git a/services/osparc-gateway-server/tests/system/Makefile b/services/osparc-gateway-server/tests/system/Makefile deleted file mode 100644 index c9bd237e523..00000000000 --- a/services/osparc-gateway-server/tests/system/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# -# Targets for DEVELOPMENT for system tests -# -include ../../../../scripts/common.Makefile - - -.PHONY: requirements -requirements: ## compiles pip requirements (.in -> .txt) - @$(MAKE_C) requirements reqs - - -.PHONY: install install-dev install-prod install-ci - -install: install-ci - -install-dev install-prod install-ci: _check_venv_active ## install requirements in dev/prod/ci mode - # installing in $(subst install-,,$@) mode - pip-sync requirements/$(subst install-,,$@).txt - - -.PHONY: tests -tests: _check_venv_active ## runs all tests [CI] - # running system tests - pytest \ - --asyncio-mode=auto \ - --color=yes \ - --durations=10 \ - -vv \ - $(CURDIR) - -.PHONY: test-dev -tests-dev: _check_venv_active ## runs all tests [DEV] - # running system tests - @pytest \ - --asyncio-mode=auto \ - --color=yes \ - --durations=10 \ - --exitfirst \ - --failed-first \ - --keep-docker-up \ - --pdb \ - -vv \ - $(CURDIR) diff --git a/services/osparc-gateway-server/tests/system/requirements/Makefile b/services/osparc-gateway-server/tests/system/requirements/Makefile deleted file mode 100644 index c447724e305..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# -# Targets to pip-compile requirements -# -include ../../../../../requirements/base.Makefile - -# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt diff --git a/services/osparc-gateway-server/tests/system/requirements/_base.txt b/services/osparc-gateway-server/tests/system/requirements/_base.txt deleted file mode 100644 index 0eb14367cec..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_base.txt +++ /dev/null @@ -1,6 +0,0 @@ -# NOTE: -# This file file is just here as placeholder -# to fulfill dependencies of _tools.txt target in requirements/base.Makefile -# -# This is a pure-tests project and all dependencies are added in _test.in -# diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.in b/services/osparc-gateway-server/tests/system/requirements/_test.in deleted file mode 100644 index bafd14bb4ac..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_test.in +++ /dev/null @@ -1,22 +0,0 @@ ---constraint ../../../../../requirements/constraints.txt ---constraint ../../../../dask-sidecar/requirements/_dask-distributed.txt - - - -aiodocker -codecov -dask-gateway -docker -faker -lz4 -numpy -pylint -pytest -pytest-asyncio -pytest-cov -pytest-icdiff -pytest-instafail -pytest-mock -pytest-runner -pytest-sugar -tenacity diff --git a/services/osparc-gateway-server/tests/system/requirements/_test.txt b/services/osparc-gateway-server/tests/system/requirements/_test.txt deleted file mode 100644 index fd2dc17af56..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_test.txt +++ /dev/null @@ -1,228 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_test.txt --resolver=backtracking --strip-extras requirements/_test.in -# -aiodocker==0.21.0 - # via -r requirements/_test.in -aiohttp==3.8.4 - # via - # -c requirements/../../../../../requirements/constraints.txt - # aiodocker - # dask-gateway -aiosignal==1.3.1 - # via aiohttp -astroid==2.15.0 - # via pylint -async-timeout==4.0.2 - # via aiohttp -attrs==22.2.0 - # via - # aiohttp - # pytest -certifi==2022.12.7 - # via requests -charset-normalizer==3.1.0 - # via - # aiohttp - # requests -click==8.1.3 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -cloudpickle==2.2.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed -codecov==2.1.12 - # via -r requirements/_test.in -coverage==7.2.1 - # via - # codecov - # pytest-cov -dask==2023.3.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -dask-gateway==2023.1.1 - # via -r requirements/_test.in -dill==0.3.6 - # via pylint -distributed==2023.3.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway -docker==6.0.1 - # via -r requirements/_test.in -exceptiongroup==1.1.0 - # via pytest -faker==17.6.0 - # via -r requirements/_test.in -frozenlist==1.3.3 - # via - # aiohttp - # aiosignal -fsspec==2023.3.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask -heapdict==1.0.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # zict -icdiff==2.0.6 - # via pytest-icdiff -idna==3.4 - # via - # requests - # yarl -iniconfig==2.0.0 - # via pytest -isort==5.12.0 - # via pylint -jinja2==3.1.2 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -lazy-object-proxy==1.9.0 - # via astroid -locket==1.0.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # partd -lz4==4.3.2 - # via -r requirements/_test.in -markupsafe==2.1.2 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # jinja2 -mccabe==0.7.0 - # via pylint -msgpack==1.0.5 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -multidict==6.0.4 - # via - # aiohttp - # yarl -numpy==1.24.2 - # via -r requirements/_test.in -packaging==23.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # docker - # pytest - # pytest-sugar -partd==1.3.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask -platformdirs==3.1.0 - # via pylint -pluggy==1.0.0 - # via pytest -pprintpp==0.4.0 - # via pytest-icdiff -psutil==5.9.4 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -pylint==2.17.0 - # via -r requirements/_test.in -pytest==7.2.2 - # via - # -r requirements/_test.in - # pytest-asyncio - # pytest-cov - # pytest-icdiff - # pytest-instafail - # pytest-mock - # pytest-sugar -pytest-asyncio==0.20.3 - # via -r requirements/_test.in -pytest-cov==4.0.0 - # via -r requirements/_test.in -pytest-icdiff==0.6 - # via -r requirements/_test.in -pytest-instafail==0.4.2 - # via -r requirements/_test.in -pytest-mock==3.10.0 - # via -r requirements/_test.in -pytest-runner==6.0.0 - # via -r requirements/_test.in -pytest-sugar==0.9.6 - # via -r requirements/_test.in -python-dateutil==2.8.2 - # via faker -pyyaml==5.4.1 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # dask-gateway - # distributed -requests==2.28.2 - # via - # codecov - # docker -six==1.16.0 - # via python-dateutil -sortedcontainers==2.4.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tblib==1.7.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed -tenacity==8.2.2 - # via -r requirements/_test.in -termcolor==2.2.0 - # via pytest-sugar -tomli==2.0.1 - # via - # coverage - # pylint - # pytest -tomlkit==0.11.6 - # via pylint -toolz==0.12.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask - # distributed - # partd -tornado==6.2 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # dask-gateway - # distributed -typing-extensions==4.5.0 - # via - # aiodocker - # astroid - # pylint -urllib3==1.26.14 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed - # docker - # requests -websocket-client==1.5.1 - # via docker -wrapt==1.15.0 - # via astroid -yarl==1.8.2 - # via aiohttp -zict==2.2.0 - # via - # -c requirements/../../../../dask-sidecar/requirements/_dask-distributed.txt - # distributed diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.in b/services/osparc-gateway-server/tests/system/requirements/_tools.in deleted file mode 100644 index b0503840a27..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_tools.in +++ /dev/null @@ -1,4 +0,0 @@ ---constraint ../../../../../requirements/constraints.txt ---constraint _test.txt - ---requirement ../../../../../requirements/devenv.txt diff --git a/services/osparc-gateway-server/tests/system/requirements/_tools.txt b/services/osparc-gateway-server/tests/system/requirements/_tools.txt deleted file mode 100644 index 0f6b1f8d04e..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/_tools.txt +++ /dev/null @@ -1,103 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --output-file=requirements/_tools.txt --resolver=backtracking --strip-extras requirements/_tools.in -# -astroid==2.15.0 - # via - # -c requirements/_test.txt - # pylint -black==23.1.0 - # via -r requirements/../../../../../requirements/devenv.txt -build==0.10.0 - # via pip-tools -bump2version==1.0.1 - # via -r requirements/../../../../../requirements/devenv.txt -cfgv==3.3.1 - # via pre-commit -click==8.1.3 - # via - # -c requirements/_test.txt - # black - # pip-tools -dill==0.3.6 - # via - # -c requirements/_test.txt - # pylint -distlib==0.3.6 - # via virtualenv -filelock==3.9.0 - # via virtualenv -identify==2.5.19 - # via pre-commit -isort==5.12.0 - # via - # -r requirements/../../../../../requirements/devenv.txt - # pylint -lazy-object-proxy==1.9.0 - # via - # -c requirements/_test.txt - # astroid -mccabe==0.7.0 - # via - # -c requirements/_test.txt - # pylint -mypy-extensions==1.0.0 - # via black -nodeenv==1.7.0 - # via pre-commit -packaging==23.0 - # via - # -c requirements/_test.txt - # black - # build -pathspec==0.11.0 - # via black -pip-tools==6.12.3 - # via -r requirements/../../../../../requirements/devenv.txt -platformdirs==3.1.0 - # via - # -c requirements/_test.txt - # black - # pylint - # virtualenv -pre-commit==3.1.1 - # via -r requirements/../../../../../requirements/devenv.txt -pylint==2.17.0 - # via -r requirements/../../../../../requirements/devenv.txt -pyproject-hooks==1.0.0 - # via build -pyyaml==5.4.1 - # via - # -c requirements/_test.txt - # pre-commit -tomli==2.0.1 - # via - # -c requirements/_test.txt - # black - # build - # pylint - # pyproject-hooks -tomlkit==0.11.6 - # via - # -c requirements/_test.txt - # pylint -typing-extensions==4.5.0 - # via - # -c requirements/_test.txt - # astroid - # black - # pylint -virtualenv==20.20.0 - # via pre-commit -wheel==0.38.4 - # via pip-tools -wrapt==1.15.0 - # via - # -c requirements/_test.txt - # astroid - -# The following packages are considered to be unsafe in a requirements file: -# pip -# setuptools diff --git a/services/osparc-gateway-server/tests/system/requirements/ci.txt b/services/osparc-gateway-server/tests/system/requirements/ci.txt deleted file mode 100644 index 0c449f919a7..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/ci.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Shortcut to install all packages for the contigous integration (CI) of 'services/web/server' -# -# - As ci.txt but w/ tests -# -# Usage: -# pip install -r requirements/ci.txt -# - -# installs base + tests requirements ---requirement _test.txt - -# installs this repo's packages -../../../../packages/pytest-simcore/ diff --git a/services/osparc-gateway-server/tests/system/requirements/dev.txt b/services/osparc-gateway-server/tests/system/requirements/dev.txt deleted file mode 100644 index 436b5550342..00000000000 --- a/services/osparc-gateway-server/tests/system/requirements/dev.txt +++ /dev/null @@ -1,15 +0,0 @@ -# Shortcut to install all packages needed to develop 'services/web/server' -# -# - As ci.txt but with current and repo packages in develop (edit) mode -# -# Usage: -# pip install -r requirements/dev.txt -# - - -# installs base + tests requirements ---requirement _test.txt ---requirement _tools.txt - -# installs this repo's packages ---editable ../../../../packages/pytest-simcore/ diff --git a/services/osparc-gateway-server/tests/system/test_deploy.py b/services/osparc-gateway-server/tests/system/test_deploy.py deleted file mode 100644 index 4dd4e114ec3..00000000000 --- a/services/osparc-gateway-server/tests/system/test_deploy.py +++ /dev/null @@ -1,160 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - - -import asyncio -import json -from copy import deepcopy -from pathlib import Path -from typing import AsyncIterator - -import aiohttp -import dask_gateway -import pytest -from faker import Faker -from pytest_simcore.helpers.utils_docker import get_localhost_ip -from tenacity._asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - -pytest_plugins = ["pytest_simcore.repository_paths", "pytest_simcore.docker_swarm"] - - -@pytest.fixture -async def aiohttp_client() -> AsyncIterator[aiohttp.ClientSession]: - async with aiohttp.ClientSession() as session: - yield session - - -@pytest.fixture -def minimal_config(monkeypatch): - monkeypatch.setenv("SC_BOOT_MODE", "production") - monkeypatch.setenv("GATEWAY_SERVER_ONE_WORKER_PER_NODE", "False") - - -@pytest.fixture(scope="session") -def dask_gateway_entrypoint() -> str: - return f"http://{get_localhost_ip()}:8000" - - -@pytest.fixture(scope="session") -def dask_gateway_password() -> str: - return "asdf" - - -@pytest.fixture -async def dask_gateway_stack_deployed_services( - minimal_config, - package_dir: Path, - docker_swarm, - aiohttp_client: aiohttp.ClientSession, - dask_gateway_entrypoint: str, -): - print("--> Deploying osparc-dask-gateway stack...") - process = await asyncio.create_subprocess_exec( - "make", - "up-prod", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - cwd=package_dir, - ) - stdout, stderr = await process.communicate() - assert ( - process.returncode == 0 - ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\nstderr:{stderr.decode()}" - print(f"{stdout}") - print("--> osparc-dask-gateway stack deployed.") - healtcheck_endpoint = f"{dask_gateway_entrypoint}/api/health" - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - print( - f"--> Connecting to {healtcheck_endpoint}, " - f"attempt {attempt.retry_state.attempt_number}...", - ) - response = await aiohttp_client.get(healtcheck_endpoint) - response.raise_for_status() - print( - f"--> Connection to gateway server succeeded." - f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]", - ) - - yield - print("<-- Stopping osparc-dask-gateway stack...") - process = await asyncio.create_subprocess_exec( - "make", - "down", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - cwd=package_dir, - ) - stdout, stderr = await process.communicate() - assert ( - process.returncode == 0 - ), f"Unexpected error while deploying stack:\nstdout:{stdout.decode()}\n\n{stderr.decode()}" - print(f"{stdout}") - print("<-- osparc-dask-gateway stack stopped.") - - -async def test_deployment( - dask_gateway_stack_deployed_services, - dask_gateway_entrypoint: str, - faker: Faker, - dask_gateway_password: str, -): - gateway = dask_gateway.Gateway( - address=dask_gateway_entrypoint, - auth=dask_gateway.BasicAuth(faker.pystr(), dask_gateway_password), - ) - - with gateway.new_cluster() as cluster: - _NUM_WORKERS = 2 - cluster.scale( - _NUM_WORKERS - ) # when returning we are in the process of creating the workers - - # now wait until we get the workers - workers = None - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - print( - f"--> Waiting to have {_NUM_WORKERS} running," - f" attempt {attempt.retry_state.attempt_number}...", - ) - assert "workers" in cluster.scheduler_info - assert len(cluster.scheduler_info["workers"]) == _NUM_WORKERS - workers = deepcopy(cluster.scheduler_info["workers"]) - print( - f"!-- {_NUM_WORKERS} are running," - f" [{json.dumps(attempt.retry_state.retry_object.statistics)}]", - ) - - # now check all this is stable - _SECONDS_STABLE = 6 - for n in range(_SECONDS_STABLE): - # NOTE: the scheduler_info gets auto-udpated by the dask-gateway internals - assert workers == cluster.scheduler_info["workers"] - await asyncio.sleep(1) - print(f"!-- {_NUM_WORKERS} stable for {n} seconds") - - # send some work - def square(x): - return x**2 - - def neg(x): - return -x - - with cluster.get_client() as client: - square_of_2 = client.submit(square, 2) - assert square_of_2.result(timeout=10) == 4 - assert not square_of_2.exception(timeout=10) - - # now send some more stuff just for the fun - A = client.map(square, range(10)) - B = client.map(neg, A) - - total = client.submit(sum, B) - print("computation completed", total.result(timeout=120)) diff --git a/services/osparc-gateway-server/tests/unit/test_settings.py b/services/osparc-gateway-server/tests/unit/test_settings.py deleted file mode 100644 index 546e278682d..00000000000 --- a/services/osparc-gateway-server/tests/unit/test_settings.py +++ /dev/null @@ -1,21 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import pytest -from _pytest.monkeypatch import MonkeyPatch -from osparc_gateway_server.backend.settings import AppSettings - - -@pytest.fixture -def minimal_config(monkeypatch: MonkeyPatch): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork") - monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver") - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest") - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name" - ) - - -def test_app_settings(minimal_config): - settings = AppSettings() - assert settings diff --git a/services/osparc-gateway-server/tests/unit/test_utils.py b/services/osparc-gateway-server/tests/unit/test_utils.py deleted file mode 100644 index c8f4b2e77c8..00000000000 --- a/services/osparc-gateway-server/tests/unit/test_utils.py +++ /dev/null @@ -1,425 +0,0 @@ -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=redefined-outer-name - -import asyncio -import socket -from copy import deepcopy -from pathlib import Path -from typing import Any, AsyncIterator, Awaitable, Callable -from unittest import mock - -import aiodocker -import pytest -from dask_gateway_server.backends.db_base import Cluster, JobStatus -from faker import Faker -from osparc_gateway_server.backend.errors import NoHostFoundError -from osparc_gateway_server.backend.settings import AppSettings -from osparc_gateway_server.backend.utils import ( - _DASK_KEY_CERT_PATH_IN_SIDECAR, - DockerSecret, - create_or_update_secret, - create_service_config, - delete_secrets, - get_cluster_information, - get_network_id, - get_next_empty_node_hostname, - is_service_task_running, -) -from pytest_mock.plugin import MockerFixture -from tenacity._asyncio import AsyncRetrying -from tenacity.stop import stop_after_delay -from tenacity.wait import wait_fixed - - -@pytest.fixture -def minimal_config(monkeypatch): - monkeypatch.setenv("GATEWAY_WORKERS_NETWORK", "atestnetwork") - monkeypatch.setenv("GATEWAY_SERVER_NAME", "atestserver") - monkeypatch.setenv("COMPUTATIONAL_SIDECAR_IMAGE", "test/localpytest:latest") - monkeypatch.setenv( - "COMPUTATIONAL_SIDECAR_VOLUME_NAME", "sidecar_computational_volume_name" - ) - - -@pytest.fixture() -async def create_docker_service( - docker_swarm, async_docker_client: aiodocker.Docker, faker: Faker -) -> AsyncIterator[Callable[[dict[str, str]], Awaitable[dict[str, Any]]]]: - created_services = [] - - async def _creator(labels: dict[str, str]) -> dict[str, Any]: - service = await async_docker_client.services.create( - task_template={ - "ContainerSpec": { - "Image": "busybox:latest", - "Command": ["sleep", "10000"], - } - }, - name=faker.pystr(), - labels=labels, - ) - assert service - created_services.append(service) - print(f"--> created docker service {service}") - inspected_service = await async_docker_client.services.inspect(service["ID"]) - print(f"--> service inspected returned {inspected_service}") - return inspected_service - - yield _creator - - await asyncio.gather( - *[async_docker_client.services.delete(s["ID"]) for s in created_services] - ) - - -@pytest.fixture -def create_running_service( - async_docker_client: aiodocker.Docker, - create_docker_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]], -) -> Callable[[dict[str, str]], Awaitable[dict[str, Any]]]: - async def _creator(labels: dict[str, str]) -> dict[str, Any]: - service = await create_docker_service(labels) - async for attempt in AsyncRetrying( - reraise=True, wait=wait_fixed(1), stop=stop_after_delay(60) - ): - with attempt: - tasks = await async_docker_client.tasks.list( - filters={"service": f"{service['Spec']['Name']}"} - ) - task_states = [task["Status"]["State"] for task in tasks] - num_running = sum(current == "running" for current in task_states) - print(f"--> service task states {task_states=}") - assert num_running == 1 - print(f"--> service {service['Spec']['Name']} is running now") - return service - raise AssertionError(f"service {service=} could not start") - - return _creator - - -@pytest.fixture -def mocked_logger(mocker: MockerFixture) -> mock.MagicMock: - return mocker.MagicMock() - - -async def test_is_task_running( - docker_swarm, - minimal_config, - async_docker_client: aiodocker.Docker, - create_running_service: Callable[[dict[str, str]], Awaitable[dict[str, Any]]], - mocked_logger: mock.MagicMock, -): - service = await create_running_service({}) - # this service exists and run - assert ( - await is_service_task_running( - async_docker_client, service["Spec"]["Name"], mocked_logger - ) - == True - ) - - # check unknown service raises error - with pytest.raises(aiodocker.DockerError): - await is_service_task_running( - async_docker_client, "unknown_service", mocked_logger - ) - - -async def test_get_network_id( - docker_swarm, - async_docker_client: aiodocker.Docker, - docker_network: Callable[..., Awaitable[dict[str, Any]]], - mocked_logger: mock.MagicMock, -): - # wrong name shall raise - with pytest.raises(ValueError): - await get_network_id(async_docker_client, "a_fake_network_name", mocked_logger) - # create 1 bridge network, shall raise when looking for it - bridge_network = await docker_network(**{"Driver": "bridge"}) - with pytest.raises(ValueError): - await get_network_id(async_docker_client, bridge_network["Name"], mocked_logger) - # create 1 overlay network - overlay_network = await docker_network() - network_id = await get_network_id( - async_docker_client, overlay_network["Name"], mocked_logger - ) - assert network_id == overlay_network["Id"] - - # create a second overlay network with the same name, shall raise on creation, so not possible - with pytest.raises(aiodocker.exceptions.DockerError): - await docker_network(**{"Name": overlay_network["Name"]}) - assert ( - True - ), "If it is possible to have 2 networks with the same name, this must be handled" - - -@pytest.fixture -async def fake_cluster(faker: Faker) -> Cluster: - return Cluster(id=faker.uuid4(), name=faker.pystr(), status=JobStatus.CREATED) - - -@pytest.fixture -async def docker_secret_cleaner( - async_docker_client: aiodocker.Docker, fake_cluster: Cluster -) -> AsyncIterator: - yield - await delete_secrets(async_docker_client, fake_cluster) - - -async def test_create_service_config( - docker_swarm, - async_docker_client: aiodocker.Docker, - minimal_config: None, - faker: Faker, - fake_cluster: Cluster, - docker_secret_cleaner, -): - # let's create some fake service config - settings = AppSettings() # type: ignore - service_env = faker.pydict() - service_name = faker.name() - network_id = faker.uuid4() - cmd = faker.pystr() - fake_labels = faker.pydict() - fake_placement = {"Constraints": [f"node.hostname=={faker.hostname()}"]} - - # create a second one - secrets = [ - await create_or_update_secret( - async_docker_client, - faker.file_path(), - fake_cluster, - secret_data=faker.text(), - ) - for n in range(3) - ] - - assert len(await async_docker_client.secrets.list()) == 3 - - # we shall have some env that tells the service where the secret is located - expected_service_env = deepcopy(service_env) - for s in secrets: - fake_env_key = faker.pystr() - service_env[fake_env_key] = s.secret_file_name - expected_service_env[ - fake_env_key - ] = f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(s.secret_file_name).name}" - - service_parameters = create_service_config( - settings=settings, - service_env=service_env, - service_name=service_name, - network_id=network_id, - service_secrets=secrets, - cmd=cmd, - labels=fake_labels, - placement=fake_placement, - ) - assert service_parameters - assert service_parameters["name"] == service_name - assert network_id in service_parameters["networks"] - - for env_key, env_value in expected_service_env.items(): - assert env_key in service_parameters["task_template"]["ContainerSpec"]["Env"] - assert ( - service_parameters["task_template"]["ContainerSpec"]["Env"][env_key] - == env_value - ) - assert service_parameters["task_template"]["ContainerSpec"]["Command"] == cmd - assert service_parameters["labels"] == fake_labels - assert len(service_parameters["task_template"]["ContainerSpec"]["Secrets"]) == 3 - for service_secret, original_secret in zip( - service_parameters["task_template"]["ContainerSpec"]["Secrets"], secrets - ): - assert service_secret["SecretName"] == original_secret.secret_name - assert service_secret["SecretID"] == original_secret.secret_id - assert ( - service_secret["File"]["Name"] - == f"{_DASK_KEY_CERT_PATH_IN_SIDECAR / Path(original_secret.secret_file_name).name}" - ) - assert service_parameters["task_template"]["Placement"] == fake_placement - - -@pytest.fixture -def fake_secret_file(tmp_path) -> Path: - fake_secret_file = Path(tmp_path / "fake_file") - fake_secret_file.write_text("Hello I am a secret file") - assert fake_secret_file.exists() - return fake_secret_file - - -async def test_create_or_update_docker_secrets_with_invalid_call_raises( - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - faker: Faker, - docker_secret_cleaner, -): - with pytest.raises(ValueError): - await create_or_update_secret( - async_docker_client, - faker.file_path(), - fake_cluster, - ) - - -async def test_create_or_update_docker_secrets( - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_secret_file: Path, - fake_cluster: Cluster, - faker: Faker, - docker_secret_cleaner, -): - list_of_secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(list_of_secrets) == 0 - file_original_size = fake_secret_file.stat().st_size - # check secret creation - secret_target_file_name = faker.file_path() - created_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name, - fake_cluster, - file_path=fake_secret_file, - ) - list_of_secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(list_of_secrets) == 1 - secret = list_of_secrets[0] - assert created_secret.secret_id == secret["ID"] - inspected_secret = await async_docker_client.secrets.inspect(secret["ID"]) - - assert created_secret.secret_name == inspected_secret["Spec"]["Name"] - assert "cluster_id" in inspected_secret["Spec"]["Labels"] - assert inspected_secret["Spec"]["Labels"]["cluster_id"] == fake_cluster.id - assert "cluster_name" in inspected_secret["Spec"]["Labels"] - assert inspected_secret["Spec"]["Labels"]["cluster_name"] == fake_cluster.name - - # check update of secret - fake_secret_file.write_text("some additional stuff in the file") - assert fake_secret_file.stat().st_size != file_original_size - - updated_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name, - fake_cluster, - file_path=fake_secret_file, - ) - assert updated_secret.secret_id != created_secret.secret_id - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 1 - updated_secret = secrets[0] - assert updated_secret != created_secret - - # create a second one - secret_target_file_name2 = faker.file_path() - created_secret: DockerSecret = await create_or_update_secret( - async_docker_client, - secret_target_file_name2, - fake_cluster, - secret_data=faker.text(), - ) - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 2 - - # test deletion - await delete_secrets(async_docker_client, fake_cluster) - secrets = await async_docker_client.secrets.list( - filters={"label": f"cluster_id={fake_cluster.id}"} - ) - assert len(secrets) == 0 - - -async def test_get_cluster_information( - docker_swarm, - async_docker_client: aiodocker.Docker, -): - cluster_information = await get_cluster_information(async_docker_client) - assert cluster_information - - # in testing we do have 1 machine, that is... this very host - assert len(cluster_information) == 1 - assert socket.gethostname() in cluster_information - - -@pytest.fixture() -def fake_docker_nodes(faker: Faker) -> list[dict[str, Any]]: - return [ - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - {"ID": f"{faker.uuid4()}", "Description": {"Hostname": f"{faker.hostname()}"}}, - ] - - -@pytest.fixture() -def mocked_docker_nodes(mocker: MockerFixture, fake_docker_nodes): - mocked_aiodocker_nodes = mocker.patch( - "osparc_gateway_server.backend.utils.aiodocker.nodes.DockerSwarmNodes.list", - autospec=True, - return_value=fake_docker_nodes, - ) - - -async def test_get_empty_node_hostname_rotates_host_names( - fake_docker_nodes: list[dict[str, Any]], - mocked_docker_nodes, - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, -): - available_hostnames = [ - node["Description"]["Hostname"] for node in fake_docker_nodes - ] - num_nodes = len(fake_docker_nodes) - for n in range(num_nodes): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert hostname in available_hostnames - available_hostnames.pop(available_hostnames.index(hostname)) - # let's do it a second time, since it should again go over all the hosts - available_hostnames = [ - node["Description"]["Hostname"] for node in fake_docker_nodes - ] - for n in range(num_nodes): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert hostname in available_hostnames - available_hostnames.pop(available_hostnames.index(hostname)) - - -async def test_get_empty_node_hostname_correctly_checks_services_labels( - docker_swarm, - async_docker_client: aiodocker.Docker, - fake_cluster: Cluster, - create_running_service, -): - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert socket.gethostname() == hostname - - # only services with the required labels shall be used to find if a service is already on a machine - invalid_labels = [ - # no labels - {}, - # only one of the required label - { - "cluster_id": fake_cluster.id, - }, - # only one of the required label - {"type": "worker"}, - ] - await asyncio.gather(*[create_running_service(labels=l) for l in invalid_labels]) - # these services have not the correct labels, so the host is still available - hostname = await get_next_empty_node_hostname(async_docker_client, fake_cluster) - assert socket.gethostname() == hostname - - # now create a service with the required labels - required_labels = {"cluster_id": fake_cluster.id, "type": "worker"} - await create_running_service(labels=required_labels) - with pytest.raises(NoHostFoundError): - await get_next_empty_node_hostname(async_docker_client, fake_cluster) diff --git a/services/payments/Dockerfile b/services/payments/Dockerfile new file mode 100644 index 00000000000..ed5c7d6950e --- /dev/null +++ b/services/payments/Dockerfile @@ -0,0 +1,184 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd sercices/payments +# docker build -f Dockerfile -t payments:prod --target production ../../ +# docker run payments:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=pcrespov + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/scu/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +EXPOSE 8000 + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/payments [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/payments + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/payments,target=/build/services/payments,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/payments [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/scu + +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY --chown=scu:scu services/payments/docker services/payments/docker +RUN chmod +x services/payments/docker/*.sh + + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/payments/docker/healthcheck.py", "http://localhost:8000/"] + +ENTRYPOINT [ "/bin/sh", "services/payments/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/payments/docker/boot.sh"] + + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/payments + +WORKDIR /devel + +RUN chown -R scu:scu "${VIRTUAL_ENV}" + +ENTRYPOINT ["/bin/sh", "services/payments/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/payments/docker/boot.sh"] diff --git a/services/payments/Makefile b/services/payments/Makefile new file mode 100644 index 00000000000..cf361c3c10e --- /dev/null +++ b/services/payments/Makefile @@ -0,0 +1,41 @@ +# +# DEVELOPMENT recipes for payments +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile + + +.env-ignore: + $(APP_CLI_NAME) echo-dotenv --auto-password > $@ + +.PHONY: openapi.json +openapi-specs: openapi.json +openapi.json: .env-ignore ## produces openapi.json + # generating openapi specs file (need to have the environment set for this) + @set -o allexport; \ + source $<; \ + set +o allexport; \ + python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ + + +# NOTE: Create using `ln -s path/to/osparc-config/repo.config .env-secret` +external ?= .env-secret + +test-dev-unit-external: ## runs test-dev against external service defined in $(external) envfile + # Running tests using external environ '$(external)' + $(MAKE) test-dev-unit pytest-parameters="--external-envfile=$(external) -m can_run_against_external" + +test-ci-unit-external: ## runs test-ci against external service defined in $(external) envfile + # Running tests using external environ '$(external)' + $(MAKE) test-ci-unit pytest-parameters="--external-envfile=$(external) -m can_run_against_external" + + +test-repo-config: ## runs validation against `repo.config` files. e.g. `make test-repo-config SEARCH_ROOT=/path/to/ospar-config/deployments` + @if [ -z "$(SEARCH_ROOT)" ]; then \ + echo "Error: SEARCH_ROOT is not set. Please set SEARCH_ROOT to the directory with repo.config files"; \ + exit 1; \ + fi + @for file in $$(find $(SEARCH_ROOT) -type f -name 'repo.config'); do \ + echo "Validating settings for $$file"; \ + pytest --external-envfile="$$file" --pdb tests/unit/test_core_settings.py; \ + done diff --git a/services/payments/README.md b/services/payments/README.md new file mode 100644 index 00000000000..0dec186398d --- /dev/null +++ b/services/payments/README.md @@ -0,0 +1,11 @@ +# payments service + +Payment service acts as intermediary between osparc and a `payments-gateway` connected to an external payment system (e.g. stripe, ...). Therefore the +`payments-gateway` acts as a common interface with the finaly payment system to make osparc independent of that decision. The communication +is implemented using http in two directions. This service communicates with a `payments-gateway` service using an API with this specifications [gateway/openapi.json](gateway/openapi.json) +and the latter is configured to acknoledge back to this service (i.e. web-hook) onto this API with the following specs [openapi.json](openapi.json). + +Here is a diagram of how this service interacts with the rest of internal and external services +![[doc/payments.drawio.svg]] + +- Further details on the use case and requirements in https://github.com/ITISFoundation/osparc-simcore/issues/4657 diff --git a/services/payments/VERSION b/services/payments/VERSION new file mode 100644 index 00000000000..88c5fb891dc --- /dev/null +++ b/services/payments/VERSION @@ -0,0 +1 @@ +1.4.0 diff --git a/services/payments/doc/payments.drawio.svg b/services/payments/doc/payments.drawio.svg new file mode 100644 index 00000000000..118bbf00c1f --- /dev/null +++ b/services/payments/doc/payments.drawio.svg @@ -0,0 +1,382 @@ + + + + + + + + +
+
+
+ aiopg.sa +
+
+
+
+ + aiopg.sa + +
+
+ + + + + +
+
+
+ socketio +
+
+
+
+ + socketio + +
+
+ + + + + +
+
+
+ http +
+
+
+
+ + http + +
+
+ + + + + +
+
+
+ + + Payments + + +
+
+
+
+ + Payments + +
+
+ + + + + +
+
+
+ + webserver + +
+
+
+
+ + webserver + +
+
+ + + + + +
+
+
+ rpc +
+
+
+
+ + rpc + +
+
+ + + + + +
+
+
+ + Payments Gateway + +
+
+
+
+ + Payments Gateway + +
+
+ + + + + +
+
+
+ http +
+
+
+
+ + http + +
+
+ + + + + +
+
+
+ + postgres + +
+
+
+
+ + postgres + +
+
+ + + + + + +
+
+
+ rabbitmq +
+
+
+
+ + rabbitmq + +
+
+ + + + + +
+
+
+ http +
+
+
+
+ + http + +
+
+ + + +
+
+
+ payments +
+
+
+
+ + payments + +
+
+ + + + +
+
+
+ + RUT + +
+
+
+
+ + RUT + +
+
+ + + + + + + + +
+
+
+ payments_transactions +
+ payments_methods +
+
+
+
+ + payments_transactions... + +
+
+ + + + +
+
+
+ resource_usage_tracker +
+
+
+
+ + resource_usage_tracker + +
+
+ + + + +
+
+
+ api/rpc +
+
+
+
+ + api/rpc + +
+
+ + + + +
+
+
+ api/rest +
+
+
+
+ + api/rest + +
+
+ + + + +
+
+
+ webserver +
+
+
+
+ + webserver + +
+
+ + + + +
+
+
+ payments_gateway +
+
+
+
+ + payments_gateway + +
+
+ + + + +
+
+
+ db +
+
+
+
+ + db + +
+
+
+ + + + + Text is not SVG - cannot display + + + +
diff --git a/services/payments/docker/boot.sh b/services/payments/docker/boot.sh new file mode 100755 index 00000000000..1cc69d83665 --- /dev/null +++ b/services/payments/docker/boot.sh @@ -0,0 +1,66 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/payments + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${PAYMENTS_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/payments/src/simcore_service_payments && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${PAYMENTS_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_payments.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" +fi diff --git a/services/payments/docker/entrypoint.sh b/services/payments/docker/entrypoint.sh new file mode 100755 index 00000000000..25153a6b2a2 --- /dev/null +++ b/services/payments/docker/entrypoint.sh @@ -0,0 +1,70 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi +fi + +echo "$INFO Starting $* ..." +echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" +echo " local dir : $(ls -al)" + +exec gosu "$SC_USER_NAME" "$@" diff --git a/services/payments/docker/healthcheck.py b/services/payments/docker/healthcheck.py new file mode 100755 index 00000000000..1c036a49607 --- /dev/null +++ b/services/payments/docker/healthcheck.py @@ -0,0 +1,41 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=20s \ + --start-interval=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + +# Adds a base-path if defined in environ +SIMCORE_NODE_BASEPATH = os.environ.get("SIMCORE_NODE_BASEPATH", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception), urlopen(f"{sys.argv[1]}{SIMCORE_NODE_BASEPATH}") as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/payments/gateway/.gitignore b/services/payments/gateway/.gitignore new file mode 100644 index 00000000000..179980657be --- /dev/null +++ b/services/payments/gateway/.gitignore @@ -0,0 +1 @@ +openapi.json diff --git a/services/payments/gateway/Makefile b/services/payments/gateway/Makefile new file mode 100644 index 00000000000..75154a5c0c8 --- /dev/null +++ b/services/payments/gateway/Makefile @@ -0,0 +1,20 @@ +include ../../../scripts/common.Makefile + + + +.PHONY: openapi.json +openapi.json: ## creates OAS + example_payment_gateway.py openapi > $@ + +.PHONY: up-local +up-local: ## starts payments-gateway sample for local testing + ## Docs in http://127.0.0.1:32769/docs ... + source $(DOT_ENV_FILE) && export PAYMENTS_USERNAME PAYMENTS_PASSWORD && \ + docker run -it \ + -p 32769:8000 \ + --env "PAYMENTS_SERVICE_API_BASE_URL=http://127.0.0.1:8011" \ + --env "PAYMENTS_USERNAME=$$PAYMENTS_USERNAME" \ + --env "PAYMENTS_PASSWORD=$$PAYMENTS_PASSWORD" \ + --name "example_payment_gateway" \ + local/payments:production \ + example_payment_gateway.py run diff --git a/services/payments/openapi.json b/services/payments/openapi.json new file mode 100644 index 00000000000..3a0aaf09fb7 --- /dev/null +++ b/services/payments/openapi.json @@ -0,0 +1,570 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "simcore-service-payments web API", + "description": "Service that manages creation and validation of registration payments", + "version": "1.4.0" + }, + "paths": { + "/": { + "get": { + "summary": "Healthcheck", + "operationId": "healthcheck__get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + }, + "/v1/token": { + "post": { + "tags": [ + "auth" + ], + "summary": "Login To Create Access Token", + "operationId": "login_to_create_access_token", + "requestBody": { + "content": { + "application/x-www-form-urlencoded": { + "schema": { + "$ref": "#/components/schemas/Body_login_to_create_access_token" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Token" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/meta": { + "get": { + "tags": [ + "meta" + ], + "summary": "Get Service Metadata", + "operationId": "get_service_metadata_v1_meta_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Meta" + } + } + } + } + }, + "security": [ + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/payments/{payment_id}:ack": { + "post": { + "tags": [ + "acks" + ], + "summary": "Acknowledge Payment", + "description": "completes (ie. ack) request initated by `/init` on the payments-gateway API", + "operationId": "acknowledge_payment_v1_payments__payment_id__ack_post", + "security": [ + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "payment_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 1, + "maxLength": 100, + "title": "Payment Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AckPayment" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/payments-methods/{payment_method_id}:ack": { + "post": { + "tags": [ + "acks" + ], + "summary": "Acknowledge Payment Method", + "description": "completes (ie. ack) request initated by `/payments-methods:init` on the payments-gateway API", + "operationId": "acknowledge_payment_method_v1_payments_methods__payment_method_id__ack_post", + "security": [ + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "payment_method_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "minLength": 1, + "maxLength": 100, + "title": "Payment Method Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AckPaymentMethod" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "AckPayment": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message" + }, + "provider_payment_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 100, + "minLength": 1 + }, + { + "type": "null" + } + ], + "title": "Provider Payment Id", + "description": "Payment ID from the provider (e.g. stripe payment ID)" + }, + "invoice_url": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Invoice Url", + "description": "Link to invoice is required when success=true" + }, + "invoice_pdf": { + "anyOf": [ + { + "type": "string", + "maxLength": 2083, + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Invoice Pdf", + "description": "Link to invoice PDF" + }, + "stripe_invoice_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 100, + "minLength": 1 + }, + { + "type": "null" + } + ], + "title": "Stripe Invoice Id", + "description": "Stripe invoice ID" + }, + "stripe_customer_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 100, + "minLength": 1 + }, + { + "type": "null" + } + ], + "title": "Stripe Customer Id", + "description": "Stripe customer ID" + }, + "saved": { + "anyOf": [ + { + "$ref": "#/components/schemas/SavedPaymentMethod" + }, + { + "type": "null" + } + ], + "description": "Gets the payment-method if user opted to save it during payment.If used did not opt to save of payment-method was already saved, then it defaults to None" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "AckPayment", + "example": { + "invoice_url": "https://invoices.com/id=12345", + "provider_payment_id": "pi_123ABC", + "saved": { + "payment_method_id": "3FA85F64-5717-4562-B3FC-2C963F66AFA6", + "success": true + }, + "success": true + } + }, + "AckPaymentMethod": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "AckPaymentMethod" + }, + "Body_login_to_create_access_token": { + "properties": { + "grant_type": { + "anyOf": [ + { + "type": "string", + "pattern": "^password$" + }, + { + "type": "null" + } + ], + "title": "Grant Type" + }, + "username": { + "type": "string", + "title": "Username" + }, + "password": { + "type": "string", + "title": "Password" + }, + "scope": { + "type": "string", + "title": "Scope", + "default": "" + }, + "client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Client Id" + }, + "client_secret": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Client Secret" + } + }, + "type": "object", + "required": [ + "username", + "password" + ], + "title": "Body_login_to_create_access_token" + }, + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "Meta": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Version" + }, + "released": { + "anyOf": [ + { + "additionalProperties": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Released", + "description": "Maps every route's path tag with a released version" + }, + "docs_url": { + "type": "string", + "title": "Docs Url" + } + }, + "type": "object", + "required": [ + "name", + "version", + "docs_url" + ], + "title": "Meta", + "example": { + "docs_url": "https://foo.io/doc", + "name": "simcore_service_payments", + "version": "2.4.45" + } + }, + "SavedPaymentMethod": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message" + }, + "payment_method_id": { + "anyOf": [ + { + "type": "string", + "maxLength": 100, + "minLength": 1 + }, + { + "type": "null" + } + ], + "title": "Payment Method Id" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "SavedPaymentMethod" + }, + "Token": { + "properties": { + "access_token": { + "type": "string", + "title": "Access Token" + }, + "token_type": { + "type": "string", + "const": "bearer", + "title": "Token Type" + } + }, + "type": "object", + "required": [ + "access_token", + "token_type" + ], + "title": "Token" + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", + "required": [ + "loc", + "msg", + "type" + ], + "title": "ValidationError" + } + }, + "securitySchemes": { + "OAuth2PasswordBearer": { + "type": "oauth2", + "flows": { + "password": { + "scopes": {}, + "tokenUrl": "/v1/token" + } + } + } + } + } +} diff --git a/services/payments/requirements/Makefile b/services/payments/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/payments/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/payments/requirements/_base.in b/services/payments/requirements/_base.in new file mode 100644 index 00000000000..cb2613eceaa --- /dev/null +++ b/services/payments/requirements/_base.in @@ -0,0 +1,25 @@ +# +# Specifies third-party dependencies for 'services/invitations/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# intra-repo required dependencies +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + +aiosmtplib # notifier +cryptography +Jinja2 # notifier +packaging +python-jose +python-multipart +python-socketio # notifier +typer[all] diff --git a/services/payments/requirements/_base.txt b/services/payments/requirements/_base.txt new file mode 100644 index 00000000000..c28b8f37f40 --- /dev/null +++ b/services/payments/requirements/_base.txt @@ -0,0 +1,663 @@ +aio-pika==9.5.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiocache==0.12.3 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodebug==2.3.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.24.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==24.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiodocker +aiormq==6.8.1 + # via aio-pika +aiosignal==1.3.1 + # via aiohttp +aiosmtplib==3.0.2 + # via -r requirements/_base.in +alembic==1.14.0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +anyio==4.6.2.post1 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +asyncpg==0.30.0 + # via sqlalchemy +attrs==24.2.0 + # via + # aiohttp + # jsonschema + # referencing +bidict==0.23.1 + # via python-socketio +certifi==2024.8.30 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +cffi==1.17.1 + # via cryptography +charset-normalizer==3.4.0 + # via requests +click==8.1.7 + # via + # rich-toolkit + # typer + # uvicorn +cryptography==44.0.0 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.7.0 + # via email-validator +ecdsa==0.19.0 + # via python-jose +email-validator==2.2.0 + # via + # fastapi + # pydantic +exceptiongroup==1.2.2 + # via aio-pika +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.31 + # via -r requirements/../../../packages/service-library/requirements/_base.in +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.66.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.1.1 + # via sqlalchemy +grpcio==1.68.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn + # wsproto +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httpcore==1.0.7 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.27.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.10 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/_base.in + # fastapi +jsonschema==4.23.0 + # via + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2024.10.1 + # via jsonschema +mako==1.3.6 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp + # yarl +opentelemetry-api==1.28.2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.28.2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.28.2 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.28.2 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.28.2 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.49b2 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.49b2 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.49b2 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.49b2 + # via -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-proto==1.28.2 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.28.2 + # via + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.49b2 + # via + # opentelemetry-instrumentation + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.49b2 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.12 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.2 + # via + # -r requirements/_base.in + # opentelemetry-instrumentation +pamqp==3.3.0 + # via aiormq +prometheus-client==0.21.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.2.0 + # via + # aiohttp + # yarl +protobuf==5.29.0 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==6.1.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.10 + # via sqlalchemy +pyasn1==0.6.1 + # via + # python-jose + # rsa +pycparser==2.22 + # via cffi +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.10.0 + # via + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.18.0 + # via rich +pyinstrument==5.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +python-dateutil==2.9.0.post0 + # via arrow +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-engineio==4.10.1 + # via python-socketio +python-jose==3.3.0 + # via -r requirements/_base.in +python-multipart==0.0.20 + # via + # -r requirements/_base.in + # fastapi +python-socketio==5.11.4 + # via -r requirements/_base.in +pyyaml==6.0.2 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.35.1 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +requests==2.32.3 + # via opentelemetry-exporter-otlp-proto-http +rich==13.9.4 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.21.0 + # via + # jsonschema + # referencing +rsa==4.9 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # python-jose +shellingham==1.5.4 + # via typer +simple-websocket==1.1.0 + # via python-engineio +six==1.16.0 + # via + # ecdsa + # python-dateutil +sniffio==1.3.1 + # via + # anyio + # httpx +sqlalchemy==1.4.54 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +starlette==0.41.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==9.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +toolz==1.0.0 + # via -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.67.1 + # via -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.13.1 + # via + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fastapi-cli +types-python-dateutil==2.9.0.20241003 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # alembic + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # pydantic-extra-types + # rich-toolkit + # typer +urllib3==2.2.3 + # via + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==1.0.0 + # via uvicorn +websockets==14.1 + # via uvicorn +wrapt==1.17.0 + # via + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis +wsproto==1.2.0 + # via simple-websocket +yarl==1.18.0 + # via + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.21.0 + # via importlib-metadata diff --git a/services/payments/requirements/_test.in b/services/payments/requirements/_test.in new file mode 100644 index 00000000000..73cb834f2b4 --- /dev/null +++ b/services/payments/requirements/_test.in @@ -0,0 +1,32 @@ +# +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + + +asgi_lifespan +coverage +docker +faker +jsonref +pytest +pytest-asyncio +pytest-cov +pytest-icdiff +pytest-mock +pytest-runner +pytest-sugar +python-dotenv +python-socketio[asyncio_client] +respx +sqlalchemy[mypy] +types-aiofiles +types-PyYAML +types-python-jose diff --git a/services/payments/requirements/_test.txt b/services/payments/requirements/_test.txt new file mode 100644 index 00000000000..7c801660a10 --- /dev/null +++ b/services/payments/requirements/_test.txt @@ -0,0 +1,191 @@ +aiohappyeyeballs==2.4.3 + # via + # -c requirements/_base.txt + # aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # python-socketio +aiosignal==1.3.1 + # via + # -c requirements/_base.txt + # aiohttp +anyio==4.6.2.post1 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==24.2.0 + # via + # -c requirements/_base.txt + # aiohttp +bidict==0.23.1 + # via + # -c requirements/_base.txt + # python-socketio +certifi==2024.8.30 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +charset-normalizer==3.4.0 + # via + # -c requirements/_base.txt + # requests +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +docker==7.1.0 + # via -r requirements/_test.in +faker==36.1.1 + # via -r requirements/_test.in +frozenlist==1.5.0 + # via + # -c requirements/_base.txt + # aiohttp + # aiosignal +greenlet==3.1.1 + # via + # -c requirements/_base.txt + # sqlalchemy +h11==0.14.0 + # via + # -c requirements/_base.txt + # httpcore + # wsproto +httpcore==1.0.7 + # via + # -c requirements/_base.txt + # httpx +httpx==0.27.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # respx +icdiff==2.0.7 + # via pytest-icdiff +idna==3.10 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests + # yarl +iniconfig==2.0.0 + # via pytest +jsonref==1.1.0 + # via -r requirements/_test.in +multidict==6.1.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +packaging==24.2 + # via + # -c requirements/_base.txt + # pytest + # pytest-sugar +pluggy==1.5.0 + # via pytest +pprintpp==0.4.0 + # via pytest-icdiff +propcache==0.2.0 + # via + # -c requirements/_base.txt + # aiohttp + # yarl +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-icdiff + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-icdiff==0.9 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +python-engineio==4.10.1 + # via + # -c requirements/_base.txt + # python-socketio +python-socketio==5.11.4 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +requests==2.32.3 + # via + # -c requirements/_base.txt + # docker +respx==0.22.0 + # via -r requirements/_test.in +simple-websocket==1.1.0 + # via + # -c requirements/_base.txt + # python-engineio +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan + # httpx +sqlalchemy==1.4.54 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +termcolor==2.5.0 + # via pytest-sugar +types-aiofiles==24.1.0.20241221 + # via -r requirements/_test.in +types-pyasn1==0.6.0.20250208 + # via types-python-jose +types-python-jose==3.4.0.20250224 + # via -r requirements/_test.in +types-pyyaml==6.0.12.20241230 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # mypy + # sqlalchemy2-stubs +tzdata==2025.1 + # via faker +urllib3==2.2.3 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # docker + # requests +wsproto==1.2.0 + # via + # -c requirements/_base.txt + # simple-websocket +yarl==1.18.0 + # via + # -c requirements/_base.txt + # aiohttp diff --git a/services/payments/requirements/_tools.in b/services/payments/requirements/_tools.in new file mode 100644 index 00000000000..1def82c12a3 --- /dev/null +++ b/services/payments/requirements/_tools.in @@ -0,0 +1,5 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt diff --git a/services/payments/requirements/_tools.txt b/services/payments/requirements/_tools.txt new file mode 100644 index 00000000000..c49f6c3693d --- /dev/null +++ b/services/payments/requirements/_tools.txt @@ -0,0 +1,85 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.7 + # via + # -c requirements/_base.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # pre-commit +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==75.8.2 + # via pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +wheel==0.45.1 + # via pip-tools diff --git a/services/payments/requirements/ci.txt b/services/payments/requirements/ci.txt new file mode 100644 index 00000000000..562c7eb6d84 --- /dev/null +++ b/services/payments/requirements/ci.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/invitations' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +simcore-postgres-database @ ../../packages/postgres-database +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library + +# installs current package +simcore-service-payments @ . diff --git a/services/payments/requirements/constraints.txt b/services/payments/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/requirements/dev.txt b/services/payments/requirements/dev.txt new file mode 100644 index 00000000000..80aeaf26dbe --- /dev/null +++ b/services/payments/requirements/dev.txt @@ -0,0 +1,23 @@ +# Shortcut to install all packages needed to develop 'services/invitations' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/postgres-database +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library + +# installs current package +--editable . diff --git a/services/payments/requirements/prod.txt b/services/payments/requirements/prod.txt new file mode 100644 index 00000000000..0da328d59e4 --- /dev/null +++ b/services/payments/requirements/prod.txt @@ -0,0 +1,20 @@ +# Shortcut to install 'services/invitations' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-models-library @ ../../packages/models-library +simcore-common-library @ ../../packages/common-library/ +simcore-postgres-database @ ../../packages/postgres-database +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library + +# installs current package +simcore-service-payments @ . diff --git a/services/payments/scripts/example_payment_gateway.py b/services/payments/scripts/example_payment_gateway.py new file mode 100755 index 00000000000..f3e3b64627b --- /dev/null +++ b/services/payments/scripts/example_payment_gateway.py @@ -0,0 +1,473 @@ +#!/usr/bin/env python + +# pylint: disable=protected-access +# pylint: disable=redefined-builtin +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +""" This is a simple example of a payments-gateway service + + - Mainly used to create the openapi specs (SEE `openapi.json`) that the payments service expects + - Also used as a fake payment-gateway for manual exploratory testing +""" + + +import argparse +import datetime +import json +import logging +import types +from dataclasses import dataclass +from typing import Annotated, Any, cast +from uuid import uuid4 + +import httpx +import uvicorn +from fastapi import ( + APIRouter, + Depends, + FastAPI, + Form, + Header, + HTTPException, + Request, + status, +) +from fastapi.encoders import jsonable_encoder +from fastapi.responses import HTMLResponse +from fastapi.routing import APIRoute +from pydantic import HttpUrl, SecretStr +from servicelib.fastapi.openapi import override_fastapi_openapi_method +from settings_library.base import BaseCustomSettings +from simcore_service_payments.models.payments_gateway import ( + BatchGetPaymentMethods, + ErrorModel, + GetPaymentMethod, + InitPayment, + InitPaymentMethod, + PaymentCancelled, + PaymentID, + PaymentInitiated, + PaymentMethodID, + PaymentMethodInitiated, + PaymentMethodsBatch, +) +from simcore_service_payments.models.schemas.acknowledgements import ( + AckPayment, + AckPaymentMethod, + AckPaymentWithPaymentMethod, +) +from simcore_service_payments.models.schemas.auth import Token + +logging.basicConfig(level=logging.INFO) + + +# NOTE: please change every time there is a change in the specs +PAYMENTS_GATEWAY_SPECS_VERSION = "0.3.0" + + +class Settings(BaseCustomSettings): + PAYMENTS_SERVICE_API_BASE_URL: HttpUrl = "http://replace-with-ack-service.io" + PAYMENTS_USERNAME: str = "replace-with_username" + PAYMENTS_PASSWORD: SecretStr = "replace-with-password" + + +def _set_operation_id_as_handler_function_name(router: APIRouter): + for route in router.routes: + if isinstance(route, APIRoute): + assert isinstance(route.endpoint, types.FunctionType) # nosec + route.operation_id = route.endpoint.__name__ + + +ERROR_RESPONSES: dict[str, Any] = {"4XX": {"model": ErrorModel}} +ERROR_HTML_RESPONSES: dict[str, Any] = { + "4XX": {"content": {"text/html": {"schema": {"type": "string"}}}} +} + +FORM_HTML = """ + + + + Credit Card Payment + + +

Enter Credit Card Information

+
+ + +

+ + + +

+ + + +

+ + + +

+ + +
+ + +""" + +ERROR_HTTP = """ + + + + Error {0} + + +

{0}

+ + +""" + + +@dataclass +class PaymentForm: + card_number: Annotated[str, Form(alias="cardNumber")] + card_holder: Annotated[str, Form(alias="cardHolder")] + cvc: Annotated[str, Form()] + expiration_date: Annotated[str, Form(alias="expirationDate")] + + +class PaymentsAuth(httpx.Auth): + def __init__(self, username, password): + self.form_data = {"username": username, "password": password} + self.token = Token(access_token="Undefined", token_type="bearer") + + def build_request_access_token(self): + return httpx.Request( + "POST", + "/v1/token", + data=self.form_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + + def update_tokens(self, response): + assert response.status_code == status.HTTP_200_OK # nosec + token = Token(**response.json()) + assert token.token_type == "bearer" # nosec + self.token = token + + def auth_flow(self, request): + response = yield request + if response.status_code == status.HTTP_401_UNAUTHORIZED: + tokens_response = yield self.build_request_access_token() + self.update_tokens(tokens_response) + + request.headers["Authorization"] = f"Bearer {self.token.access_token}" + yield request + + +async def ack_payment(id_: PaymentID, acked: AckPayment, settings: Settings): + async with httpx.AsyncClient() as client: + await client.post( + f"{settings.PAYMENTS_SERVICE_API_BASE_URL}/v1/payments/{id_}:ack", + json=acked.model_dump(), + auth=PaymentsAuth( + username=settings.PAYMENTS_USERNAME, + password=settings.PAYMENTS_PASSWORD.get_secret_value(), + ), + ) + + +async def ack_payment_method( + id_: PaymentMethodID, acked: AckPaymentMethod, settings: Settings +): + async with httpx.AsyncClient() as client: + await client.post( + f"{settings.PAYMENTS_SERVICE_API_BASE_URL}/v1/payments-methods/{id_}:ack", + json=acked.model_dump(), + auth=PaymentsAuth( + username=settings.PAYMENTS_USERNAME, + password=settings.PAYMENTS_PASSWORD.get_secret_value(), + ), + ) + + +# +# Dependencies +# + + +def get_settings(request: Request) -> Settings: + return cast(Settings, request.app.state.settings) + + +def auth_session(x_init_api_secret: Annotated[str | None, Header()] = None) -> int: + if x_init_api_secret is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail="api secret missing" + ) + return 1 + + +# +# Router factories +# + + +def create_payment_router(): + router = APIRouter( + tags=[ + "payment", + ], + ) + + # payment + @router.post( + "/init", + response_model=PaymentInitiated, + responses=ERROR_RESPONSES, + ) + async def init_payment( + payment: InitPayment, + auth: Annotated[int, Depends(auth_session)], + ): + assert payment # nosec + assert auth # nosec + + id_ = f"{uuid4()}" + return PaymentInitiated(payment_id=id_) + + @router.get( + "/pay", + response_class=HTMLResponse, + responses=ERROR_HTML_RESPONSES, + ) + async def get_payment_form( + id: PaymentID, + ): + assert id # nosec + + return FORM_HTML.format(f"/pay?id={id}", "Submit Payment") + + @router.post( + "/pay", + response_class=HTMLResponse, + responses=ERROR_RESPONSES, + include_in_schema=False, + ) + async def pay( + id: PaymentID, + payment_form: Annotated[PaymentForm, Depends()], + settings: Annotated[Settings, Depends(get_settings)], + ): + """WARNING: this is only for faking pay. DO NOT EXPOSE TO openapi.json""" + acked = AckPayment( + success=True, + message=f"Fake Payment {id}", + invoice_url="https://fakeimg.pl/300/", + saved=None, + ) + await ack_payment(id_=id, acked=acked, settings=settings) + + @router.post( + "/cancel", + response_model=PaymentCancelled, + responses=ERROR_RESPONSES, + ) + async def cancel_payment( + payment: PaymentInitiated, + auth: Annotated[int, Depends(auth_session)], + ): + assert payment # nosec + assert auth # nosec + + return PaymentCancelled(message=f"CANCELLED {payment.payment_id}") + + return router + + +def create_payment_method_router(): + router = APIRouter( + prefix="/payment-methods", + tags=[ + "payment-method", + ], + ) + + # payment-methods + @router.post( + ":init", + response_model=PaymentMethodInitiated, + responses=ERROR_RESPONSES, + ) + async def init_payment_method( + payment_method: InitPaymentMethod, + auth: Annotated[int, Depends(auth_session)], + ): + assert payment_method # nosec + assert auth # nosec + + id_ = f"{uuid4()}" + return PaymentMethodInitiated(payment_method_id=id_) + + @router.get( + "/form", + response_class=HTMLResponse, + responses=ERROR_HTML_RESPONSES, + ) + async def get_form_payment_method( + id: PaymentMethodID, + ): + return FORM_HTML.format(f"/save?id={id}", "Save Payment") + + @router.post( + "/save", + response_class=HTMLResponse, + responses=ERROR_RESPONSES, + include_in_schema=False, + ) + async def save( + id: PaymentMethodID, + payment_form: Annotated[PaymentForm, Depends()], + settings: Annotated[Settings, Depends(get_settings)], + ): + """WARNING: this is only for faking save. DO NOT EXPOSE TO openapi.json""" + card_number_masked = f"**** **** **** {payment_form.card_number[-4:]}" + acked = AckPaymentMethod( + success=True, + message=f"Fake Payment-method saved {card_number_masked}", + ) + await ack_payment_method(id_=id, acked=acked, settings=settings) + + # CRUD payment-methods + @router.post( + ":batchGet", + response_model=PaymentMethodsBatch, + responses=ERROR_RESPONSES, + ) + async def batch_get_payment_methods( + batch: BatchGetPaymentMethods, + auth: Annotated[int, Depends(auth_session)], + ): + assert auth # nosec + assert batch # nosec + return PaymentMethodsBatch( + items=[ + GetPaymentMethod( + id=id_, created=datetime.datetime.now(tz=datetime.timezone.utc) + ) + for id_ in batch.payment_methods_ids + ] + ) + + @router.get( + "/{id}", + response_model=GetPaymentMethod, + responses={ + status.HTTP_404_NOT_FOUND: { + "model": ErrorModel, + "description": "Payment method not found: It was not added or incomplete (i.e. create flow failed or canceled)", + }, + **ERROR_RESPONSES, + }, + ) + async def get_payment_method( + id: PaymentMethodID, + auth: Annotated[int, Depends(auth_session)], + ): + assert id # nosec + assert auth # nosec + + return GetPaymentMethod( + id=id, created=datetime.datetime.now(tz=datetime.timezone.utc) + ) + + @router.delete( + "/{id}", + status_code=status.HTTP_204_NO_CONTENT, + responses=ERROR_RESPONSES, + ) + async def delete_payment_method( + id: PaymentMethodID, + auth: Annotated[int, Depends(auth_session)], + ): + assert id # nosec + assert auth # nosec + + @router.post( + "/{id}:pay", + response_model=AckPaymentWithPaymentMethod, + responses=ERROR_RESPONSES, + ) + async def pay_with_payment_method( + id: PaymentMethodID, + payment: InitPayment, + auth: Annotated[int, Depends(auth_session)], + ): + assert id # nosec + assert payment # nosec + assert auth # nosec + + return AckPaymentWithPaymentMethod( # nosec + success=True, + invoice_url="https://fakeimg.pl/300/", + payment_id=f"{uuid4()}", + message=f"Payed with payment-method {id}", + ) + + return router # nosec + + +def create_app(): + app = FastAPI( + title="osparc-compliant payment-gateway", + version=PAYMENTS_GATEWAY_SPECS_VERSION, + debug=True, + ) + app.openapi_version = "3.0.0" # NOTE: small hack to allow current version of `42Crunch.vscode-openapi` to work with openapi + override_fastapi_openapi_method(app) + + app.state.settings = Settings.create_from_envs() + logging.info(app.state.settings.model_dump_json(indent=2)) + + for factory in ( + create_payment_router, + create_payment_method_router, + ): + router = factory() + _set_operation_id_as_handler_function_name(router) + app.include_router(router) + + return app + + +# +# CLI +# + +the_app = create_app() + + +def run_command(args): + uvicorn.run(the_app, port=8000, host="0.0.0.0") # nosec # NOSONAR + + +def openapi_command(args): + print(json.dumps(jsonable_encoder(the_app.openapi()), indent=1)) + + +if __name__ == "__main__": + # CLI: run or create schema + parser = argparse.ArgumentParser(description="fake payment-gateway") + subparsers = parser.add_subparsers() + + run_parser = subparsers.add_parser("run", help="Run the app") + run_parser.set_defaults(func=run_command) + + openapi_parser = subparsers.add_parser("openapi", help="Prints openapi specs") + openapi_parser.set_defaults(func=openapi_command) + + args = parser.parse_args() + if hasattr(args, "func"): + args.func(args) diff --git a/services/payments/setup.cfg b/services/payments/setup.cfg new file mode 100644 index 00000000000..d651872f245 --- /dev/null +++ b/services/payments/setup.cfg @@ -0,0 +1,23 @@ +[bumpversion] +current_version = 1.4.0 +commit = True +message = services/payments version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] +[bumpversion:file:openapi.json] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" + acceptance_test: "marks tests as 'acceptance tests' i.e. does the system do what the user expects? Typically those are workflows." + can_run_against_external: "marks tests that *can* be run against an external configuration passed by --external-envfile" + + +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.asyncio diff --git a/services/payments/setup.py b/services/payments/setup.py new file mode 100755 index 00000000000..25def4ccac8 --- /dev/null +++ b/services/payments/setup.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-payments" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Pedro Crespo-Valero (pcrespov)",) +DESCRIPTION = "Service that manages creation and validation of registration payments" +README = (CURRENT_DIR / "README.md").read_text() + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-postgres-database", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-payments = simcore_service_payments.cli:main", + "simcore-service = simcore_service_payments.cli:main", + ], + }, + "scripts": ["scripts/example_payment_gateway.py"], +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/payments/src/simcore_service_payments/__init__.py b/services/payments/src/simcore_service_payments/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/_constants.py b/services/payments/src/simcore_service_payments/_constants.py new file mode 100644 index 00000000000..d2ac1f2b03c --- /dev/null +++ b/services/payments/src/simcore_service_payments/_constants.py @@ -0,0 +1,6 @@ +from typing import Final + +ACKED: Final[str] = "Acknowledged" +PAG: Final[str] = "Payments Gateway service" +PGDB: Final[str] = "Postgres service" +RUT: Final[str] = "Resource Usage Tracker service" diff --git a/services/payments/src/simcore_service_payments/_meta.py b/services/payments/src/simcore_service_payments/_meta.py new file mode 100644 index 00000000000..f011e70ea6e --- /dev/null +++ b/services/payments/src/simcore_service_payments/_meta.py @@ -0,0 +1,33 @@ +""" Application's metadata + +""" +from typing import Final + +from models_library.basic_types import VersionStr +from packaging.version import Version +from servicelib.utils_meta import PackageInfo + +info: Final = PackageInfo(package_name="simcore-service-payments") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +APP_NAME: Final[str] = PROJECT_NAME +API_VTAG: Final[str] = info.api_prefix_path_tag +SUMMARY: Final[str] = info.get_summary() + + +# NOTE: https://patorjk.com/software/taag/#p=testall&f=Standard&t=Payments +APP_STARTED_BANNER_MSG = r""" + ____ __ _ _ _ _ ____ __ _ ____ ___ +( _ \ / _\ ( \/ )( \/ )( __)( ( \(_ _)/ ___) + ) __// \ ) / / \/ \ ) _) / / )( \___ \ +(__) \_/\_/(__/ \_)(_/(____)\_)__) (__) (____/ {} +""".format( + f"v{__version__}" +) + + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/payments/src/simcore_service_payments/api/__init__.py b/services/payments/src/simcore_service_payments/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/api/rest/__init__.py b/services/payments/src/simcore_service_payments/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py b/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py new file mode 100644 index 00000000000..ca0d74c8e3e --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/_acknowledgements.py @@ -0,0 +1,147 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, status +from models_library.api_schemas_payments.errors import ( + PaymentMethodNotFoundError, + PaymentNotFoundError, +) +from servicelib.logging_errors import create_troubleshotting_log_kwargs +from servicelib.logging_utils import log_context + +from ..._constants import ACKED, PGDB +from ...db.payments_methods_repo import PaymentsMethodsRepo +from ...db.payments_transactions_repo import PaymentsTransactionsRepo +from ...models.auth import SessionData +from ...models.schemas.acknowledgements import ( + AckPayment, + AckPaymentMethod, + PaymentID, + PaymentMethodID, +) +from ...services import payments, payments_methods +from ...services.notifier import NotifierService +from ...services.resource_usage_tracker import ResourceUsageTrackerApi +from ._dependencies import ( + create_repository, + get_current_session, + get_from_app_state, + get_rut_api, +) + +_logger = logging.getLogger(__name__) + + +router = APIRouter() + + +@router.post("/payments/{payment_id}:ack") +async def acknowledge_payment( + payment_id: PaymentID, + ack: AckPayment, + _session: Annotated[SessionData, Depends(get_current_session)], + repo_pay: Annotated[ + PaymentsTransactionsRepo, Depends(create_repository(PaymentsTransactionsRepo)) + ], + repo_methods: Annotated[ + PaymentsMethodsRepo, Depends(create_repository(PaymentsMethodsRepo)) + ], + rut_api: Annotated[ResourceUsageTrackerApi, Depends(get_rut_api)], + notifier: Annotated[NotifierService, Depends(get_from_app_state(NotifierService))], + background_tasks: BackgroundTasks, +): + """completes (ie. ack) request initated by `/init` on the payments-gateway API""" + + with log_context( + _logger, + logging.INFO, + "%s: Update %s transaction %s in db", + PGDB, + ACKED, + f"{payment_id=}", + ): + try: + transaction = await payments.acknowledge_one_time_payment( + repo_pay, payment_id=payment_id, ack=ack + ) + except PaymentNotFoundError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + + assert f"{payment_id}" == f"{transaction.payment_id}" # nosec + background_tasks.add_task( + payments.on_payment_completed, + transaction, + rut_api, + notifier=notifier, + ) + + if ack.saved: + if ack.saved.payment_method_id is None or not ack.saved.success: + _logger.error( + **create_troubleshotting_log_kwargs( + f"Got ack that {payment_id=} was completed but failed to save the payment-method used for the payment as requested.", + error=RuntimeError("Failed to save payment-method after payment"), + error_context={ + "ack": ack, + "user_id": transaction.user_id, + "payment_id": payment_id, + "transaction": transaction, + }, + tip="This issue is not critical. Since the payment-method could not be saved, " + "the user cannot use it in following payments and will have to re-introduce it manually" + "SEE https://github.com/ITISFoundation/osparc-simcore/issues/6902", + ) + ) + else: + inserted = await payments_methods.insert_payment_method( + repo=repo_methods, + payment_method_id=ack.saved.payment_method_id, + user_id=transaction.user_id, + wallet_id=transaction.wallet_id, + ack=ack.saved, + ) + + background_tasks.add_task( + payments_methods.on_payment_method_completed, + payment_method=inserted, + notifier=notifier, + ) + + +@router.post("/payments-methods/{payment_method_id}:ack") +async def acknowledge_payment_method( + payment_method_id: PaymentMethodID, + ack: AckPaymentMethod, + _session: Annotated[SessionData, Depends(get_current_session)], + repo: Annotated[ + PaymentsMethodsRepo, Depends(create_repository(PaymentsMethodsRepo)) + ], + notifier: Annotated[NotifierService, Depends(get_from_app_state(NotifierService))], + background_tasks: BackgroundTasks, +): + """completes (ie. ack) request initated by `/payments-methods:init` on the payments-gateway API""" + with log_context( + _logger, + logging.INFO, + "%s: Update %s payment-method %s in db", + PGDB, + ACKED, + f"{payment_method_id=}", + ): + try: + acked = await payments_methods.acknowledge_creation_of_payment_method( + repo=repo, payment_method_id=payment_method_id, ack=ack + ) + except PaymentMethodNotFoundError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail=f"{err}" + ) from err + + assert f"{payment_method_id}" == f"{acked.payment_method_id}" # nosec + background_tasks.add_task( + payments_methods.on_payment_method_completed, + payment_method=acked, + notifier=notifier, + ) diff --git a/services/payments/src/simcore_service_payments/api/rest/_auth.py b/services/payments/src/simcore_service_payments/api/rest/_auth.py new file mode 100644 index 00000000000..5a91fd2fbb4 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/_auth.py @@ -0,0 +1,41 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, HTTPException, status +from fastapi.security import OAuth2PasswordRequestForm + +from ...core.settings import ApplicationSettings +from ...models.schemas.auth import Token +from ...services.auth import authenticate_user, encode_access_token +from ._dependencies import get_settings + +_logger = logging.getLogger(__name__) + + +router = APIRouter() + + +@router.post( + "/token", response_model=Token, operation_id="login_to_create_access_token" +) +async def login_to_create_access_token( + form_data: Annotated[OAuth2PasswordRequestForm, Depends()], + settings: Annotated[ApplicationSettings, Depends(get_settings)], +): + # + # OAuth2PasswordRequestForm: OAuth2 specifies that when using the "password flow" + # the client must send a username and password fields as form data + # + if not authenticate_user(form_data.username, form_data.password, settings): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect username or password", + headers={"WWW-Authenticate": "Bearer"}, + ) + + return { + "access_token": encode_access_token( + username=form_data.username, settings=settings + ), + "token_type": "bearer", + } diff --git a/services/payments/src/simcore_service_payments/api/rest/_dependencies.py b/services/payments/src/simcore_service_payments/api/rest/_dependencies.py new file mode 100644 index 00000000000..cc0ead80894 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/_dependencies.py @@ -0,0 +1,88 @@ +# mypy: disable-error-code=truthy-function +import logging +from collections.abc import AsyncGenerator, Callable +from typing import Annotated + +from fastapi import Depends, FastAPI, Request +from fastapi.security import OAuth2PasswordBearer +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..._meta import API_VTAG +from ...core.settings import ApplicationSettings +from ...db.base import BaseRepository +from ...models.auth import SessionData +from ...services.auth import get_session_data +from ...services.postgres import get_engine +from ...services.resource_usage_tracker import ResourceUsageTrackerApi + +_logger = logging.getLogger(__name__) + + +# +# core dependencies +# + + +def get_settings(request: Request) -> ApplicationSettings: + app_settings: ApplicationSettings = request.app.state.settings + assert app_settings # nosec + return app_settings + + +assert get_reverse_url_mapper # nosec +assert get_app # nosec + +# +# services dependencies +# + + +def get_rut_api(request: Request) -> ResourceUsageTrackerApi: + return ResourceUsageTrackerApi.get_from_app_state(request.app) + + +def get_from_app_state( + app_state_mixin_subclass: type[SingletonInAppStateMixin], +) -> Callable: + """Generic getter of app.state objects""" + + def _(app: Annotated[FastAPI, Depends(get_app)]): + return app_state_mixin_subclass.get_from_app_state(app) + + return _ + + +def get_db_engine(request: Request) -> AsyncEngine: + engine: AsyncEngine = get_engine(request.app) + assert engine # nosec + return engine + + +def create_repository(repo_cls: type[BaseRepository]) -> Callable: + """Generic object factory of BaseRepository instances""" + + async def _( + engine: Annotated[AsyncEngine, Depends(get_db_engine)], + ) -> AsyncGenerator[BaseRepository, None]: + yield repo_cls(db_engine=engine) + + return _ + + +# Implements `password` flow defined in OAuth2 +_oauth2_scheme = OAuth2PasswordBearer(tokenUrl=f"/{API_VTAG}/token") + + +async def get_current_session( + settings: Annotated[ApplicationSettings, Depends(get_settings)], + token: Annotated[str, Depends(_oauth2_scheme)], +) -> SessionData: + return get_session_data(token, settings) + + +__all__: tuple[str, ...] = ( + "get_app", + "get_reverse_url_mapper", +) diff --git a/services/payments/src/simcore_service_payments/api/rest/_health.py b/services/payments/src/simcore_service_payments/api/rest/_health.py new file mode 100644 index 00000000000..948317cf883 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/_health.py @@ -0,0 +1,32 @@ +import logging +from typing import Annotated + +import arrow +from fastapi import APIRouter, Depends +from fastapi.responses import PlainTextResponse +from models_library.errors import RABBITMQ_CLIENT_UNHEALTHY_MSG +from servicelib.rabbitmq import RabbitMQClient + +from ...services.rabbitmq import get_rabbitmq_client_from_request + +_logger = logging.getLogger(__name__) + + +class HealthCheckError(RuntimeError): + """Failed a health check""" + + +router = APIRouter() + + +@router.get("/", response_class=PlainTextResponse) +async def healthcheck( + rabbitmq_client: Annotated[ + RabbitMQClient, Depends(get_rabbitmq_client_from_request) + ] +) -> str: + _logger.info("Checking rabbit health check %s", rabbitmq_client.healthy) + if not rabbitmq_client.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + return f"{__name__}@{arrow.utcnow().isoformat()}" diff --git a/services/payments/src/simcore_service_payments/api/rest/_meta.py b/services/payments/src/simcore_service_payments/api/rest/_meta.py new file mode 100644 index 00000000000..dc09e0cf09f --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/_meta.py @@ -0,0 +1,28 @@ +import logging +from collections.abc import Callable +from typing import Annotated + +from fastapi import APIRouter, Depends + +from ..._meta import API_VERSION, PROJECT_NAME +from ...models.auth import SessionData +from ...models.schemas.meta import Meta +from ._dependencies import get_current_session, get_reverse_url_mapper + +_logger = logging.getLogger(__name__) + + +router = APIRouter() + + +@router.get("/meta", response_model=Meta) +async def get_service_metadata( + url_for: Annotated[Callable, Depends(get_reverse_url_mapper)], + session: Annotated[SessionData, Depends(get_current_session)], +): + assert session.username is not None # nosec + return Meta( + name=PROJECT_NAME, + version=API_VERSION, + docs_url=url_for("swagger_ui_html"), + ) diff --git a/services/payments/src/simcore_service_payments/api/rest/routes.py b/services/payments/src/simcore_service_payments/api/rest/routes.py new file mode 100644 index 00000000000..2ddb650ce05 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rest/routes.py @@ -0,0 +1,21 @@ +from fastapi import APIRouter, FastAPI, HTTPException +from servicelib.fastapi.exceptions_utils import ( + handle_errors_as_500, + http_exception_as_json_response, +) + +from ..._meta import API_VTAG +from . import _acknowledgements, _auth, _health, _meta + + +def setup_rest_api(app: FastAPI): + app.include_router(_health.router) + + api_router = APIRouter(prefix=f"/{API_VTAG}") + api_router.include_router(_auth.router, tags=["auth"]) + api_router.include_router(_meta.router, tags=["meta"]) + api_router.include_router(_acknowledgements.router, tags=["acks"]) + app.include_router(api_router) + + app.add_exception_handler(Exception, handle_errors_as_500) + app.add_exception_handler(HTTPException, http_exception_as_json_response) diff --git a/services/payments/src/simcore_service_payments/api/rpc/__init__.py b/services/payments/src/simcore_service_payments/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/api/rpc/_payments.py b/services/payments/src/simcore_service_payments/api/rpc/_payments.py new file mode 100644 index 00000000000..fe6e4db28dc --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rpc/_payments.py @@ -0,0 +1,132 @@ +import logging +from decimal import Decimal + +from fastapi import FastAPI +from models_library.api_schemas_payments.errors import ( + PaymentsError, + PaymentServiceUnavailableError, +) +from models_library.api_schemas_webserver.wallets import ( + PaymentID, + PaymentTransaction, + WalletPaymentInitiated, +) +from models_library.payments import UserInvoiceAddress +from models_library.products import ProductName, StripePriceID, StripeTaxRateID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, HttpUrl +from servicelib.logging_utils import get_log_record_extra, log_context +from servicelib.rabbitmq import RPCRouter + +from ...db.payments_transactions_repo import PaymentsTransactionsRepo +from ...services import payments +from ...services.payments_gateway import PaymentsGatewayApi +from ...services.stripe import StripeApi + +_logger = logging.getLogger(__name__) + + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=(PaymentsError, PaymentServiceUnavailableError)) +async def init_payment( # pylint: disable=too-many-arguments + app: FastAPI, + *, + amount_dollars: Decimal, + target_credits: Decimal, + product_name: str, + wallet_id: WalletID, + wallet_name: str, + user_id: UserID, + user_name: str, + user_email: EmailStr, + user_address: UserInvoiceAddress, + stripe_price_id: StripePriceID, + stripe_tax_rate_id: StripeTaxRateID, + comment: str | None = None, +) -> WalletPaymentInitiated: + with log_context( + _logger, + logging.INFO, + "Init payment to %s", + f"{wallet_id=}", + extra=get_log_record_extra(user_id=user_id), + ): + return await payments.init_one_time_payment( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsTransactionsRepo(db_engine=app.state.engine), + amount_dollars=amount_dollars, + target_credits=target_credits, + product_name=product_name, + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + user_address=user_address, + stripe_price_id=stripe_price_id, + stripe_tax_rate_id=stripe_tax_rate_id, + comment=comment, + ) + + +@router.expose(reraise_if_error_type=(PaymentsError, PaymentServiceUnavailableError)) +async def cancel_payment( + app: FastAPI, + *, + payment_id: PaymentID, + user_id: UserID, + wallet_id: WalletID, +) -> None: + + with log_context( + _logger, + logging.INFO, + "Cancel payment in %s", + f"{wallet_id=}", + extra=get_log_record_extra(user_id=user_id), + ): + await payments.cancel_one_time_payment( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsTransactionsRepo(db_engine=app.state.engine), + payment_id=payment_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + +@router.expose(reraise_if_error_type=(PaymentsError, PaymentServiceUnavailableError)) +async def get_payments_page( + app: FastAPI, + *, + user_id: UserID, + product_name: ProductName, + limit: int | None = None, + offset: int | None = None, +) -> tuple[int, list[PaymentTransaction]]: + return await payments.get_payments_page( + repo=PaymentsTransactionsRepo(db_engine=app.state.engine), + user_id=user_id, + product_name=product_name, + limit=limit, + offset=offset, + ) + + +@router.expose(reraise_if_error_type=(PaymentsError, PaymentServiceUnavailableError)) +async def get_payment_invoice_url( + app: FastAPI, + *, + user_id: UserID, + wallet_id: WalletID, + payment_id: PaymentID, +) -> HttpUrl: + return await payments.get_payment_invoice_url( + repo=PaymentsTransactionsRepo(db_engine=app.state.engine), + stripe_api=StripeApi.get_from_app_state(app), + user_id=user_id, + wallet_id=wallet_id, + payment_id=payment_id, + ) diff --git a/services/payments/src/simcore_service_payments/api/rpc/_payments_methods.py b/services/payments/src/simcore_service_payments/api/rpc/_payments_methods.py new file mode 100644 index 00000000000..360dcf962c0 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rpc/_payments_methods.py @@ -0,0 +1,199 @@ +import logging +from decimal import Decimal + +from fastapi import FastAPI +from models_library.api_schemas_payments.errors import ( + PaymentsError, + PaymentServiceUnavailableError, + PaymentsMethodsError, +) +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodGet, + PaymentMethodID, + PaymentMethodInitiated, +) +from models_library.basic_types import IDStr +from models_library.payments import UserInvoiceAddress +from models_library.products import StripePriceID, StripeTaxRateID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr +from servicelib.logging_utils import get_log_record_extra, log_context +from servicelib.rabbitmq import RPCRouter + +from ...db.payments_methods_repo import PaymentsMethodsRepo +from ...db.payments_transactions_repo import PaymentsTransactionsRepo +from ...services import payments, payments_methods +from ...services.notifier import NotifierService +from ...services.payments_gateway import PaymentsGatewayApi +from ...services.resource_usage_tracker import ResourceUsageTrackerApi + +_logger = logging.getLogger(__name__) + + +router = RPCRouter() + + +@router.expose( + reraise_if_error_type=(PaymentsMethodsError, PaymentServiceUnavailableError) +) +async def init_creation_of_payment_method( + app: FastAPI, + *, + wallet_id: WalletID, + wallet_name: IDStr, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, +) -> PaymentMethodInitiated: + with log_context( + _logger, + logging.INFO, + "Init creation of payment-method to %s", + f"{wallet_id=}", + extra=get_log_record_extra(user_id=user_id), + ): + return await payments_methods.init_creation_of_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsMethodsRepo(db_engine=app.state.engine), + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + ) + + +@router.expose( + reraise_if_error_type=(PaymentsMethodsError, PaymentServiceUnavailableError) +) +async def cancel_creation_of_payment_method( + app: FastAPI, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +) -> None: + with log_context( + _logger, + logging.INFO, + "Cancel creation of payment-method in %s", + f"{wallet_id=}", + extra=get_log_record_extra(user_id=user_id), + ): + await payments_methods.cancel_creation_of_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsMethodsRepo(db_engine=app.state.engine), + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + +@router.expose( + reraise_if_error_type=(PaymentsMethodsError, PaymentServiceUnavailableError) +) +async def list_payment_methods( + app: FastAPI, + *, + user_id: UserID, + wallet_id: WalletID, +) -> list[PaymentMethodGet]: + return await payments_methods.list_payment_methods( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsMethodsRepo(db_engine=app.state.engine), + user_id=user_id, + wallet_id=wallet_id, + ) + + +@router.expose( + reraise_if_error_type=(PaymentsMethodsError, PaymentServiceUnavailableError) +) +async def get_payment_method( + app: FastAPI, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +) -> PaymentMethodGet: + return await payments_methods.get_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsMethodsRepo(db_engine=app.state.engine), + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + +@router.expose( + reraise_if_error_type=(PaymentsMethodsError, PaymentServiceUnavailableError) +) +async def delete_payment_method( + app: FastAPI, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +) -> None: + await payments_methods.delete_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + repo=PaymentsMethodsRepo(db_engine=app.state.engine), + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + +@router.expose( + reraise_if_error_type=( + PaymentsMethodsError, + PaymentsError, + PaymentServiceUnavailableError, + ) +) +async def pay_with_payment_method( # noqa: PLR0913 # pylint: disable=too-many-arguments + app: FastAPI, + *, + payment_method_id: PaymentMethodID, + amount_dollars: Decimal, + target_credits: Decimal, + product_name: str, + wallet_id: WalletID, + wallet_name: str, + user_id: UserID, + user_name: str, + user_email: EmailStr, + user_address: UserInvoiceAddress, + stripe_price_id: StripePriceID, + stripe_tax_rate_id: StripeTaxRateID, + comment: str | None = None, +): + with log_context( + _logger, + logging.INFO, + "Pay w/ %s to %s", + f"{payment_method_id=}", + f"{wallet_id=}", + extra=get_log_record_extra(user_id=user_id), + ): + return await payments.pay_with_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + rut=ResourceUsageTrackerApi.get_from_app_state(app), + repo_transactions=PaymentsTransactionsRepo(db_engine=app.state.engine), + repo_methods=PaymentsMethodsRepo(db_engine=app.state.engine), + notifier=NotifierService.get_from_app_state(app), + payment_method_id=payment_method_id, + amount_dollars=amount_dollars, + target_credits=target_credits, + product_name=product_name, + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_address=user_address, + user_email=user_email, + stripe_price_id=stripe_price_id, + stripe_tax_rate_id=stripe_tax_rate_id, + comment=comment, + ) diff --git a/services/payments/src/simcore_service_payments/api/rpc/routes.py b/services/payments/src/simcore_service_payments/api/rpc/routes.py new file mode 100644 index 00000000000..ee7a8cb5e16 --- /dev/null +++ b/services/payments/src/simcore_service_payments/api/rpc/routes.py @@ -0,0 +1,21 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_payments import PAYMENTS_RPC_NAMESPACE + +from ...services.rabbitmq import get_rabbitmq_rpc_server +from . import _payments, _payments_methods + +_logger = logging.getLogger(__name__) + + +def setup_rpc_api_routes(app: FastAPI) -> None: + async def _on_startup() -> None: + rpc_server = get_rabbitmq_rpc_server(app) + for router in ( + _payments.router, + _payments_methods.router, + ): + await rpc_server.register_router(router, PAYMENTS_RPC_NAMESPACE, app) + + app.add_event_handler("startup", _on_startup) diff --git a/services/payments/src/simcore_service_payments/cli.py b/services/payments/src/simcore_service_payments/cli.py new file mode 100644 index 00000000000..64c67d00e8f --- /dev/null +++ b/services/payments/src/simcore_service_payments/cli.py @@ -0,0 +1,103 @@ +import getpass +import logging +import os + +import typer +from servicelib.utils_secrets import generate_password, generate_token_secret_key +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.utils_cli import ( + create_settings_command, + create_version_callback, + print_as_envfile, +) + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + +main = typer.Typer(name=PROJECT_NAME) + +main.command()( + create_settings_command(settings_cls=ApplicationSettings, logger=_logger) +) +main.callback()(create_version_callback(__version__)) + + +@main.command() +def echo_dotenv( + ctx: typer.Context, *, auto_password: bool = True, minimal: bool = True +): + """Generates and displays a valid environment variables file (also known as dot-envfile) + + Usage: + $ simcore-service-payments echo-dotenv > .env + $ cat .env + $ set -o allexport; source .env; set +o allexport + """ + assert ctx # nosec + + username = getpass.getuser() + password: str = ( + getpass.getpass(prompt="Password [Press Enter to auto-generate]: ") + if not auto_password + else None + ) or generate_password(length=32) + + # NOTE: we normally DO NOT USE `os.environ` to capture env vars but this is a special case + # The idea here is to have a command that can generate a **valid** `.env` file that can be used + # to initialized the app. For that reason we fill required fields of the `ApplicationSettings` with + # "fake" but valid values (e.g. generating a password or adding tags as `replace-with-api-key). + # Nonetheless, if the caller of this CLI has already some **valid** env vars in the environment we want to use them ... + # and that is why we use `os.environ`. + + settings = ApplicationSettings.create_from_envs( + PAYMENTS_ACCESS_TOKEN_SECRET_KEY=generate_token_secret_key(32), + PAYMENTS_USERNAME=username, + PAYMENTS_PASSWORD=password, + PAYMENTS_GATEWAY_URL="http://127.0.0.1:8000", # NOSONAR + PAYMENTS_GATEWAY_API_SECRET=os.environ.get( + "PAYMENTS_GATEWAY_API_SECRET", "replace-with-api-secret" + ), + PAYMENTS_RABBITMQ=os.environ.get( + "PAYMENTS_RABBITMQ", + RabbitSettings.create_from_envs( + RABBIT_HOST=os.environ.get("RABBIT_HOST", "replace-with-rabbit-host"), + RABBIT_SECURE=os.environ.get("RABBIT_SECURE", "0"), + RABBIT_USER=os.environ.get("RABBIT_USER", "replace-with-rabbit-user"), + RABBIT_PASSWORD=os.environ.get( + "RABBIT_PASSWORD", "replace-with-rabbit-user" + ), + ), + ), + PAYMENTS_POSTGRES=os.environ.get( + "PAYMENTS_POSTGRES", + PostgresSettings.create_from_envs( + POSTGRES_HOST=os.environ.get( + "POSTGRES_HOST", "replace-with-postgres-host" + ), + POSTGRES_USER=os.environ.get( + "POSTGRES_USER", "replace-with-postgres-user" + ), + POSTGRES_DB=os.environ.get("POSTGRES_DB", "replace-with-postgres-db"), + POSTGRES_PASSWORD=os.environ.get( + "POSTGRES_PASSWORD", "replace-with-postgres-password" + ), + ), + ), + PAYMENTS_STRIPE_URL=os.environ.get( + "PAYMENTS_STRIPE_URL", "https://api.stripe.com" + ), + PAYMENTS_STRIPE_API_SECRET=os.environ.get( + "PAYMENTS_STRIPE_API_SECRET", "replace-with-api-secret" + ), + ) + + print_as_envfile( + settings, + compact=False, + verbose=True, + show_secrets=True, + exclude_unset=minimal, + ) diff --git a/services/payments/src/simcore_service_payments/core/__init__.py b/services/payments/src/simcore_service_payments/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/core/application.py b/services/payments/src/simcore_service_payments/core/application.py new file mode 100644 index 00000000000..94c6d2ee7e7 --- /dev/null +++ b/services/payments/src/simcore_service_payments/core/application.py @@ -0,0 +1,96 @@ +from fastapi import FastAPI +from servicelib.fastapi.monitoring import ( + setup_prometheus_instrumentation, +) +from servicelib.fastapi.openapi import override_fastapi_openapi_method +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) + +from .._meta import ( + API_VERSION, + API_VTAG, + APP_FINISHED_BANNER_MSG, + APP_NAME, + APP_STARTED_BANNER_MSG, + PROJECT_NAME, + SUMMARY, +) +from ..api.rest.routes import setup_rest_api +from ..api.rpc.routes import setup_rpc_api_routes +from ..services.auto_recharge_listener import setup_auto_recharge_listener +from ..services.notifier import setup_notifier +from ..services.payments_gateway import setup_payments_gateway +from ..services.postgres import setup_postgres +from ..services.rabbitmq import setup_rabbitmq +from ..services.resource_usage_tracker import setup_resource_usage_tracker +from ..services.socketio import setup_socketio +from ..services.stripe import setup_stripe +from .settings import ApplicationSettings + + +def create_app(settings: ApplicationSettings | None = None) -> FastAPI: + + app_settings = settings or ApplicationSettings.create_from_envs() + + app = FastAPI( + title=f"{PROJECT_NAME} web API", + description=SUMMARY, + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url="/doc" if app_settings.PAYMENTS_SWAGGER_API_DOC_ENABLED else None, + redoc_url=None, # default disabled, see below + ) + override_fastapi_openapi_method(app) + + # STATE + app.state.settings = app_settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + + # PLUGINS SETUP + if app.state.settings.PAYMENTS_TRACING: + setup_tracing(app, app.state.settings.PAYMENTS_TRACING, APP_NAME) + + # API w/ postgres db + setup_postgres(app) + + # APIs w/ webserver + setup_rabbitmq(app) + setup_rpc_api_routes(app) + + # APIs w/ payments-gateway + setup_payments_gateway(app) + setup_rest_api(app) + + # APIs w/ RUT + setup_resource_usage_tracker(app) + + # APIs w/ Stripe + setup_stripe(app) + + # Listening to Rabbitmq + setup_auto_recharge_listener(app) + setup_socketio(app) + setup_notifier(app) + + if app.state.settings.PAYMENTS_PROMETHEUS_INSTRUMENTATION_ENABLED: + setup_prometheus_instrumentation(app) + + if app.state.settings.PAYMENTS_TRACING: + initialize_fastapi_app_tracing(app) + + # ERROR HANDLERS + # ... add here ... + + # EVENTS + async def _on_startup() -> None: + print(APP_STARTED_BANNER_MSG, flush=True) # noqa: T201 + + async def _on_shutdown() -> None: + print(APP_FINISHED_BANNER_MSG, flush=True) # noqa: T201 + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + return app diff --git a/services/payments/src/simcore_service_payments/core/errors.py b/services/payments/src/simcore_service_payments/core/errors.py new file mode 100644 index 00000000000..8b5113891fc --- /dev/null +++ b/services/payments/src/simcore_service_payments/core/errors.py @@ -0,0 +1,30 @@ +from common_library.errors_classes import OsparcErrorMixin + + +class _BaseAppError(OsparcErrorMixin, ValueError): + @classmethod + def get_full_class_name(cls) -> str: + # Can be used as unique code identifier + return f"{cls.__module__}.{cls.__name__}" + + +# +# gateway errors +# + + +class PaymentsGatewayError(_BaseAppError): + ... + + +class PaymentsGatewayNotReadyError(PaymentsGatewayError): + msg_template = "Payments-Gateway is unresponsive: {checks}" + + +# +# stripe errors +# + + +class StripeRuntimeError(_BaseAppError): + msg_template = "Stripe unexpected error" diff --git a/services/payments/src/simcore_service_payments/core/settings.py b/services/payments/src/simcore_service_payments/core/settings.py new file mode 100644 index 00000000000..5d9c69d861b --- /dev/null +++ b/services/payments/src/simcore_service_payments/core/settings.py @@ -0,0 +1,202 @@ +from decimal import Decimal +from functools import cached_property +from typing import Annotated + +from common_library.basic_types import DEFAULT_FACTORY +from models_library.basic_types import NonNegativeDecimal +from pydantic import ( + AliasChoices, + EmailStr, + Field, + HttpUrl, + PositiveFloat, + SecretStr, + TypeAdapter, + field_validator, +) +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.application import BaseApplicationSettings +from settings_library.basic_types import LogLevel, VersionTag +from settings_library.email import SMTPSettings +from settings_library.postgres import PostgresSettings +from settings_library.rabbit import RabbitSettings +from settings_library.resource_usage_tracker import ResourceUsageTrackerSettings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + +from .._meta import API_VERSION, API_VTAG, PROJECT_NAME + + +class _BaseApplicationSettings(BaseApplicationSettings, MixinLoggingSettings): + """Base settings of any osparc service's app""" + + # CODE STATICS --------------------------------------------------------- + API_VERSION: str = API_VERSION + APP_NAME: str = PROJECT_NAME + API_VTAG: VersionTag = TypeAdapter(VersionTag).validate_python(API_VTAG) + + # RUNTIME ----------------------------------------------------------- + + PAYMENTS_LOGLEVEL: Annotated[ + LogLevel, + Field( + validation_alias=AliasChoices("PAYMENTS_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"), + ), + ] = LogLevel.INFO + + PAYMENTS_LOG_FORMAT_LOCAL_DEV_ENABLED: Annotated[ + bool, + Field( + validation_alias=AliasChoices( + "LOG_FORMAT_LOCAL_DEV_ENABLED", "PAYMENTS_LOG_FORMAT_LOCAL_DEV_ENABLED" + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ), + ] = False + + PAYMENTS_LOG_FILTER_MAPPING: Annotated[ + dict[LoggerName, list[MessageSubstring]], + Field( + default_factory=dict, + validation_alias=AliasChoices( + "LOG_FILTER_MAPPING", "PAYMENTS_LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ), + ] = DEFAULT_FACTORY + + @cached_property + def LOG_LEVEL(self): # noqa: N802 + return self.PAYMENTS_LOGLEVEL + + @field_validator("PAYMENTS_LOGLEVEL", mode="before") + @classmethod + def _valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +class ApplicationSettings(_BaseApplicationSettings): + """Web app's environment variables + + These settings includes extra configuration for the http-API + """ + + PAYMENTS_GATEWAY_URL: Annotated[ + HttpUrl, Field(description="Base url to the payment gateway") + ] + + PAYMENTS_GATEWAY_API_SECRET: Annotated[ + SecretStr, Field(description="Credentials for payments-gateway api") + ] + + PAYMENTS_USERNAME: Annotated[ + str, + Field( + description="Username for Auth. Required if started as a web app.", + min_length=3, + ), + ] + PAYMENTS_PASSWORD: Annotated[ + SecretStr, + Field( + description="Password for Auth. Required if started as a web app.", + min_length=10, + ), + ] + + PAYMENTS_ACCESS_TOKEN_SECRET_KEY: Annotated[ + SecretStr, + Field( + description="To generate a random password with openssl in hex format with 32 bytes, run `openssl rand -hex 32`", + min_length=30, + ), + ] + PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES: PositiveFloat = 30.0 + + PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS: Annotated[ + NonNegativeDecimal, + Field( + description="Minimum balance in credits to top-up for auto-recharge", + ), + ] = Decimal(100) + + PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT: Annotated[ + NonNegativeDecimal, + Field( + description="Default value in USD on the amount to top-up for auto-recharge (`top_up_amount_in_usd`)", + ), + ] = Decimal(100) + + PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT: Annotated[ + NonNegativeDecimal | None, + Field( + description="Default value in USD for the montly limit for auto-recharge (`monthly_limit_in_usd`)", + ), + ] = Decimal(10_000) + + PAYMENTS_AUTORECHARGE_ENABLED: Annotated[ + bool, + Field( + description="Based on this variable is the auto recharge functionality in Payment service enabled", + ), + ] = False + + PAYMENTS_BCC_EMAIL: Annotated[ + EmailStr | None, + Field( + description="Special email for finance department. Currently used to BCC invoices.", + ), + ] = None + + PAYMENTS_RABBITMQ: Annotated[ + RabbitSettings, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for service/rabbitmq", + ), + ] + + PAYMENTS_TRACING: Annotated[ + TracingSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for opentelemetry tracing", + ), + ] + + PAYMENTS_POSTGRES: Annotated[ + PostgresSettings, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for postgres service", + ), + ] + + PAYMENTS_STRIPE_URL: Annotated[ + HttpUrl, Field(description="Base url to the payment Stripe") + ] + PAYMENTS_STRIPE_API_SECRET: Annotated[ + SecretStr, Field(description="Credentials for Stripe api") + ] + + PAYMENTS_SWAGGER_API_DOC_ENABLED: Annotated[ + bool, Field(description="If true, it displays swagger doc at /doc") + ] = True + + PAYMENTS_RESOURCE_USAGE_TRACKER: Annotated[ + ResourceUsageTrackerSettings, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="settings for RUT service", + ), + ] + + PAYMENTS_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + + PAYMENTS_EMAIL: Annotated[ + SMTPSettings | None, + Field( + json_schema_extra={"auto_default_from_env": True}, + description="optional email (see notifier_email service)", + ), + ] diff --git a/services/payments/src/simcore_service_payments/db/__init__.py b/services/payments/src/simcore_service_payments/db/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/db/auto_recharge_repo.py b/services/payments/src/simcore_service_payments/db/auto_recharge_repo.py new file mode 100644 index 00000000000..aa98896cf13 --- /dev/null +++ b/services/payments/src/simcore_service_payments/db/auto_recharge_repo.py @@ -0,0 +1,74 @@ +from typing import TypeAlias + +from models_library.api_schemas_payments.errors import InvalidPaymentMethodError +from models_library.api_schemas_webserver.wallets import PaymentMethodID +from models_library.basic_types import NonNegativeDecimal +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict, PositiveInt +from simcore_postgres_database.utils_payments_autorecharge import AutoRechargeStmts + +from .base import BaseRepository + +AutoRechargeID: TypeAlias = PositiveInt + + +class PaymentsAutorechargeDB(BaseModel): + wallet_id: WalletID + enabled: bool + primary_payment_method_id: PaymentMethodID + top_up_amount_in_usd: NonNegativeDecimal + monthly_limit_in_usd: NonNegativeDecimal | None + model_config = ConfigDict(from_attributes=True) + + +class AutoRechargeRepo(BaseRepository): + async def get_wallet_autorecharge( + self, + wallet_id: WalletID, + ) -> PaymentsAutorechargeDB | None: + """Annotates init-payment transaction + Raises: + PaymentAlreadyExistsError + """ + + async with self.db_engine.begin() as conn: + stmt = AutoRechargeStmts.get_wallet_autorecharge(wallet_id) + result = await conn.execute(stmt) + row = result.first() + return PaymentsAutorechargeDB.model_validate(row) if row else None + + async def replace_wallet_autorecharge( + self, + user_id: UserID, + wallet_id: WalletID, + new: PaymentsAutorechargeDB, + ) -> PaymentsAutorechargeDB: + """ + Raises: + InvalidPaymentMethodError: if `new` includes some invalid 'primary_payment_method_id' + + """ + async with self.db_engine.begin() as conn: + stmt = AutoRechargeStmts.is_valid_payment_method( + user_id=user_id, + wallet_id=new.wallet_id, + payment_method_id=new.primary_payment_method_id, + ) + + if await conn.scalar(stmt) != new.primary_payment_method_id: + raise InvalidPaymentMethodError( + payment_method_id=new.primary_payment_method_id + ) + + stmt = AutoRechargeStmts.upsert_wallet_autorecharge( + wallet_id=wallet_id, + enabled=new.enabled, + primary_payment_method_id=new.primary_payment_method_id, + top_up_amount_in_usd=new.top_up_amount_in_usd, + monthly_limit_in_usd=new.monthly_limit_in_usd, + ) + result = await conn.execute(stmt) + row = result.first() + assert row # nosec + return PaymentsAutorechargeDB.model_validate(row) diff --git a/services/payments/src/simcore_service_payments/db/base.py b/services/payments/src/simcore_service_payments/db/base.py new file mode 100644 index 00000000000..f197b26721f --- /dev/null +++ b/services/payments/src/simcore_service_payments/db/base.py @@ -0,0 +1,11 @@ +from sqlalchemy.ext.asyncio import AsyncEngine + + +class BaseRepository: + """ + Repositories are pulled at every request + """ + + def __init__(self, db_engine: AsyncEngine): + assert db_engine is not None # nosec + self.db_engine = db_engine diff --git a/services/payments/src/simcore_service_payments/db/payment_users_repo.py b/services/payments/src/simcore_service_payments/db/payment_users_repo.py new file mode 100644 index 00000000000..ec643ee8bca --- /dev/null +++ b/services/payments/src/simcore_service_payments/db/payment_users_repo.py @@ -0,0 +1,66 @@ +import sqlalchemy as sa +from models_library.api_schemas_webserver.wallets import PaymentID +from models_library.groups import GroupID +from models_library.users import UserID +from simcore_postgres_database.models.payments_transactions import payments_transactions +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.users import users + +from .base import BaseRepository + + +class PaymentsUsersRepo(BaseRepository): + # NOTE: + # Currently linked to `users` but expected to be linked to `payments_users` + # when databases are separated. The latter will be a subset copy of the former. + # + + async def _get(self, query): + async with self.db_engine.begin() as conn: + result = await conn.execute(query) + return result.first() + + async def get_primary_group_id(self, user_id: UserID) -> GroupID: + if row := await self._get( + sa.select( + users.c.primary_gid, + ).where(users.c.id == user_id) + ): + return GroupID(row.primary_gid) + + msg = f"{user_id=} not found" + raise ValueError(msg) + + async def get_notification_data(self, user_id: UserID, payment_id: PaymentID): + """Retrives data that will be injected in a notification for the user on this payment""" + if row := await self._get( + sa.select( + payments_transactions.c.payment_id, + users.c.first_name, + users.c.last_name, + users.c.email, + products.c.name.label("product_name"), + products.c.display_name, + products.c.vendor, + products.c.support_email, + ) + .select_from( + sa.join( + sa.join( + payments_transactions, + users, + payments_transactions.c.user_id == users.c.id, + ), + products, + payments_transactions.c.product_name == products.c.name, + ) + ) + .where( + (payments_transactions.c.payment_id == payment_id) + & (payments_transactions.c.user_id == user_id) + ) + ): + return row + + msg = f"{payment_id=} for {user_id=} was not found" + raise ValueError(msg) diff --git a/services/payments/src/simcore_service_payments/db/payments_methods_repo.py b/services/payments/src/simcore_service_payments/db/payments_methods_repo.py new file mode 100644 index 00000000000..cea7b8e6158 --- /dev/null +++ b/services/payments/src/simcore_service_payments/db/payments_methods_repo.py @@ -0,0 +1,194 @@ +import datetime + +import simcore_postgres_database.aiopg_errors as db_errors +import sqlalchemy as sa +from arrow import utcnow +from models_library.api_schemas_payments.errors import ( + PaymentMethodAlreadyAckedError, + PaymentMethodNotFoundError, + PaymentMethodUniqueViolationError, +) +from models_library.api_schemas_webserver.wallets import PaymentMethodID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from simcore_postgres_database.models.payments_methods import ( + InitPromptAckFlowState, + payments_methods, +) + +from ..models.db import PaymentsMethodsDB +from .base import BaseRepository + + +class PaymentsMethodsRepo(BaseRepository): + async def insert_init_payment_method( + self, + payment_method_id: PaymentMethodID, + *, + user_id: UserID, + wallet_id: WalletID, + initiated_at: datetime.datetime, + ) -> PaymentMethodID: + try: + async with self.db_engine.begin() as conn: + await conn.execute( + payments_methods.insert().values( + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + initiated_at=initiated_at, + ) + ) + return payment_method_id + + except db_errors.UniqueViolation as err: + raise PaymentMethodUniqueViolationError( + payment_method_id=payment_method_id + ) from err + + async def update_ack_payment_method( + self, + payment_method_id: PaymentMethodID, + *, + completion_state: InitPromptAckFlowState, + state_message: str | None, + ) -> PaymentsMethodsDB: + if completion_state == InitPromptAckFlowState.PENDING: + msg = f"{completion_state} is not a completion state" + raise ValueError(msg) + + optional = {} + if state_message: + optional["state_message"] = state_message + + async with self.db_engine.begin() as conn: + row = ( + await conn.execute( + sa.select( + payments_methods.c.initiated_at, + payments_methods.c.completed_at, + ) + .where(payments_methods.c.payment_method_id == payment_method_id) + .with_for_update() + ) + ).fetchone() + + if row is None: + raise PaymentMethodNotFoundError(payment_method_id=payment_method_id) + + if row.completed_at is not None: + raise PaymentMethodAlreadyAckedError( + payment_method_id=payment_method_id + ) + + result = await conn.execute( + payments_methods.update() + .values(completed_at=sa.func.now(), state=completion_state, **optional) + .where(payments_methods.c.payment_method_id == payment_method_id) + .returning(sa.literal_column("*")) + ) + row = result.first() + assert row, "execute above should have caught this" # nosec + + return PaymentsMethodsDB.model_validate(row) + + async def insert_payment_method( + self, + payment_method_id: PaymentMethodID, + *, + user_id: UserID, + wallet_id: WalletID, + completion_state: InitPromptAckFlowState, + state_message: str | None, + ) -> PaymentsMethodsDB: + await self.insert_init_payment_method( + payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + initiated_at=utcnow().datetime, + ) + return await self.update_ack_payment_method( + payment_method_id, + completion_state=completion_state, + state_message=state_message, + ) + + async def list_user_payment_methods( + self, + *, + user_id: UserID, + wallet_id: WalletID, + ) -> list[PaymentsMethodsDB]: + # NOTE: we do not expect many payment methods, so no pagination is neede here + async with self.db_engine.begin() as conn: + result = await conn.execute( + payments_methods.select() + .where( + (payments_methods.c.user_id == user_id) + & (payments_methods.c.wallet_id == wallet_id) + & (payments_methods.c.state == InitPromptAckFlowState.SUCCESS) + ) + .order_by(payments_methods.c.created.desc()) + ) # newest first + rows = result.fetchall() or [] + return TypeAdapter(list[PaymentsMethodsDB]).validate_python(rows) + + async def get_payment_method_by_id( + self, + payment_method_id: PaymentMethodID, + ) -> PaymentsMethodsDB: + async with self.db_engine.begin() as conn: + result = await conn.execute( + payments_methods.select().where( + (payments_methods.c.payment_method_id == payment_method_id) + & (payments_methods.c.state == InitPromptAckFlowState.SUCCESS) + ) + ) + row = result.first() + if row is None: + raise PaymentMethodNotFoundError(payment_method_id=payment_method_id) + + return PaymentsMethodsDB.model_validate(row) + + async def get_payment_method( + self, + payment_method_id: PaymentMethodID, + *, + user_id: UserID, + wallet_id: WalletID, + ) -> PaymentsMethodsDB: + async with self.db_engine.begin() as conn: + result = await conn.execute( + payments_methods.select().where( + (payments_methods.c.user_id == user_id) + & (payments_methods.c.wallet_id == wallet_id) + & (payments_methods.c.payment_method_id == payment_method_id) + & (payments_methods.c.state == InitPromptAckFlowState.SUCCESS) + ) + ) + row = result.first() + if row is None: + raise PaymentMethodNotFoundError(payment_method_id=payment_method_id) + + return PaymentsMethodsDB.model_validate(row) + + async def delete_payment_method( + self, + payment_method_id: PaymentMethodID, + *, + user_id: UserID, + wallet_id: WalletID, + ) -> PaymentsMethodsDB | None: + async with self.db_engine.begin() as conn: + result = await conn.execute( + payments_methods.delete() + .where( + (payments_methods.c.user_id == user_id) + & (payments_methods.c.wallet_id == wallet_id) + & (payments_methods.c.payment_method_id == payment_method_id) + ) + .returning(sa.literal_column("*")) + ) + row = result.first() + return row if row is None else PaymentsMethodsDB.model_validate(row) diff --git a/services/payments/src/simcore_service_payments/db/payments_transactions_repo.py b/services/payments/src/simcore_service_payments/db/payments_transactions_repo.py new file mode 100644 index 00000000000..d7f6b893668 --- /dev/null +++ b/services/payments/src/simcore_service_payments/db/payments_transactions_repo.py @@ -0,0 +1,233 @@ +from datetime import datetime, timezone +from decimal import Decimal + +import sqlalchemy as sa +from models_library.api_schemas_payments.errors import ( + PaymentAlreadyAckedError, + PaymentAlreadyExistsError, + PaymentNotFoundError, +) +from models_library.api_schemas_webserver.wallets import PaymentID +from models_library.payments import StripeInvoiceID +from models_library.products import ProductName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import HttpUrl, PositiveInt, TypeAdapter +from simcore_postgres_database import aiopg_errors as pg_errors +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, + payments_transactions, +) + +from ..models.db import PaymentsTransactionsDB +from .base import BaseRepository + + +class PaymentsTransactionsRepo(BaseRepository): + async def insert_init_payment_transaction( + self, + payment_id: PaymentID, + *, + price_dollars: Decimal, + osparc_credits: Decimal, + product_name: str, + user_id: UserID, + user_email: str, + wallet_id: WalletID, + comment: str | None, + initiated_at: datetime, + ) -> PaymentID: + """Annotates init-payment transaction + + Raises: + PaymentAlreadyExistsError + """ + try: + async with self.db_engine.begin() as conn: + await conn.execute( + payments_transactions.insert().values( + payment_id=f"{payment_id}", + price_dollars=price_dollars, + osparc_credits=osparc_credits, + product_name=product_name, + user_id=user_id, + user_email=user_email, + wallet_id=wallet_id, + comment=comment, + initiated_at=initiated_at, + ) + ) + return payment_id + except pg_errors.UniqueViolation as exc: + raise PaymentAlreadyExistsError(payment_id=f"{payment_id}") from exc + + async def update_ack_payment_transaction( + self, + payment_id: PaymentID, + completion_state: PaymentTransactionState, + state_message: str | None, + invoice_url: HttpUrl | None, + stripe_invoice_id: StripeInvoiceID | None, + invoice_pdf_url: HttpUrl | None, + ) -> PaymentsTransactionsDB: + """ + - ACKs payment by updating state with SUCCESS, CANCEL, etc + + Raises: + ValueError + PaymentNotFoundError + PaymentAlreadyAckedError + + """ + + if completion_state == PaymentTransactionState.PENDING: + msg = f"cannot update state with {completion_state=} since it is already initiated" + raise ValueError(msg) + + optional = {} + if state_message: + optional["state_message"] = state_message + + async with self.db_engine.begin() as connection: + row = ( + await connection.execute( + sa.select( + payments_transactions.c.initiated_at, + payments_transactions.c.completed_at, + ) + .where(payments_transactions.c.payment_id == f"{payment_id}") + .with_for_update() + ) + ).fetchone() + + if row is None: + raise PaymentNotFoundError(payment_id=f"{payment_id}") + + if row.completed_at is not None: + assert row.initiated_at < row.completed_at # nosec + raise PaymentAlreadyAckedError(payment_id=f"{payment_id}") + + assert row.initiated_at # nosec + + result = await connection.execute( + payments_transactions.update() + .values( + completed_at=sa.func.now(), + state=completion_state, + invoice_url=f"{invoice_url}" if invoice_url else None, + stripe_invoice_id=stripe_invoice_id, + invoice_pdf_url=f"{invoice_pdf_url}" if invoice_pdf_url else None, + **optional, + ) + .where(payments_transactions.c.payment_id == f"{payment_id}") + .returning(sa.literal_column("*")) + ) + row = result.first() + assert row, "execute above should have caught this" # nosec + + return PaymentsTransactionsDB.model_validate(row) + + async def list_user_payment_transactions( + self, + user_id: UserID, + product_name: ProductName, + *, + offset: PositiveInt | None = None, + limit: PositiveInt | None = None, + ) -> tuple[int, list[PaymentsTransactionsDB]]: + """List payments done by a give user (any wallet) + + Sorted by newest-first + """ + async with self.db_engine.begin() as connection: + result = await connection.execute( + sa.select(sa.func.count()) + .select_from(payments_transactions) + .where( + (payments_transactions.c.user_id == user_id) + & (payments_transactions.c.product_name == product_name) + ) + ) + total_number_of_items = result.scalar() + assert total_number_of_items is not None # nosec + + # NOTE: what if between these two calls there are new rows? can we get this in an atomic call?Γ₯ + stmt = ( + payments_transactions.select() + .where( + (payments_transactions.c.user_id == user_id) + & (payments_transactions.c.product_name == product_name) + ) + .order_by(payments_transactions.c.created.desc()) + ) # newest first + + if offset is not None: + # psycopg2.errors.InvalidRowCountInResultOffsetClause: OFFSET must not be negative + stmt = stmt.offset(offset) + + if limit is not None: + # InvalidRowCountInLimitClause: LIMIT must not be negative + stmt = stmt.limit(limit) + + result = await connection.execute(stmt) + rows = result.fetchall() + return ( + total_number_of_items, + TypeAdapter(list[PaymentsTransactionsDB]).validate_python(rows), + ) + + async def get_payment_transaction( + self, payment_id: PaymentID, user_id: UserID, wallet_id: WalletID + ) -> PaymentsTransactionsDB | None: + # NOTE: user access and rights are expected to be checked at this point + # nonetheless, `user_id` and `wallet_id` are added here for caution + async with self.db_engine.begin() as connection: + result = await connection.execute( + payments_transactions.select().where( + (payments_transactions.c.payment_id == f"{payment_id}") + & (payments_transactions.c.user_id == user_id) + & (payments_transactions.c.wallet_id == wallet_id) + ) + ) + row = result.fetchone() + return PaymentsTransactionsDB.model_validate(row) if row else None + + async def sum_current_month_dollars(self, *, wallet_id: WalletID) -> Decimal: + _current_timestamp = datetime.now(tz=timezone.utc) + _current_month_start_timestamp = _current_timestamp.replace( + day=1, hour=0, minute=0, second=0, microsecond=0 + ) + + async with self.db_engine.begin() as conn: + sum_stmt = sa.select( + sa.func.sum(payments_transactions.c.price_dollars) + ).where( + (payments_transactions.c.wallet_id == wallet_id) + & ( + payments_transactions.c.state.in_( + [ + PaymentTransactionState.SUCCESS, + ] + ) + ) + & ( + payments_transactions.c.completed_at + >= _current_month_start_timestamp + ) + ) + result = await conn.execute(sum_stmt) + row = result.first() + return Decimal(0) if row is None or row[0] is None else Decimal(row[0]) + + async def get_last_payment_transaction_for_wallet( + self, *, wallet_id: WalletID + ) -> PaymentsTransactionsDB | None: + async with self.db_engine.begin() as connection: + result = await connection.execute( + payments_transactions.select() + .where(payments_transactions.c.wallet_id == wallet_id) + .order_by(payments_transactions.c.initiated_at.desc()) + .limit(1) + ) + row = result.fetchone() + return PaymentsTransactionsDB.model_validate(row) if row else None diff --git a/services/payments/src/simcore_service_payments/main.py b/services/payments/src/simcore_service_payments/main.py new file mode 100644 index 00000000000..53e19bd22a1 --- /dev/null +++ b/services/payments/src/simcore_service_payments/main.py @@ -0,0 +1,24 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_payments.core.application import create_app +from simcore_service_payments.core.settings import ApplicationSettings + +_the_settings = ApplicationSettings.create_from_envs() + +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=_the_settings.log_level) # NOSONAR +logging.root.setLevel(_the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=_the_settings.PAYMENTS_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=_the_settings.PAYMENTS_LOG_FILTER_MAPPING, + tracing_settings=_the_settings.PAYMENTS_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(_the_settings) diff --git a/services/payments/src/simcore_service_payments/models/__init__.py b/services/payments/src/simcore_service_payments/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/models/auth.py b/services/payments/src/simcore_service_payments/models/auth.py new file mode 100644 index 00000000000..a187e5a9215 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/auth.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class SessionData(BaseModel): + username: str | None = None diff --git a/services/payments/src/simcore_service_payments/models/db.py b/services/payments/src/simcore_service_payments/models/db.py new file mode 100644 index 00000000000..c858bbc2435 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/db.py @@ -0,0 +1,107 @@ +import datetime +from decimal import Decimal +from typing import Any + +from models_library.api_schemas_webserver.wallets import PaymentID, PaymentMethodID +from models_library.emails import LowerCaseEmailStr +from models_library.payments import StripeInvoiceID +from models_library.products import ProductName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict, HttpUrl +from simcore_postgres_database.models.payments_methods import InitPromptAckFlowState +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, +) + +_EXAMPLE_AFTER_INIT: dict[str, Any] = { + "payment_id": "12345", + "price_dollars": 10.99, + "osparc_credits": 5.0, + "product_name": "osparc-plus", + "user_id": 123, + "user_email": "user@example.com", + "wallet_id": 123, + "comment": "This is a test comment.", + "invoice_url": None, + "stripe_invoice_id": None, + "invoice_pdf_url": None, + "initiated_at": "2023-09-27T10:00:00", + "state": PaymentTransactionState.PENDING, + "completed_at": None, + "state_message": None, +} + + +class PaymentsTransactionsDB(BaseModel): + payment_id: PaymentID + price_dollars: Decimal # accepts negatives + osparc_credits: Decimal # accepts negatives + product_name: ProductName + user_id: UserID + user_email: LowerCaseEmailStr + wallet_id: WalletID + comment: str | None + invoice_url: HttpUrl | None + stripe_invoice_id: StripeInvoiceID | None + invoice_pdf_url: HttpUrl | None + initiated_at: datetime.datetime + completed_at: datetime.datetime | None + state: PaymentTransactionState + state_message: str | None + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ + _EXAMPLE_AFTER_INIT, + # successful completion + { + **_EXAMPLE_AFTER_INIT, + "invoice_url": "https://my-fake-pdf-link.com", + "stripe_invoice_id": "12345", + "invoice_pdf_url": "https://my-fake-pdf-link.com", + "completed_at": "2023-09-27T10:00:10", + "state": "SUCCESS", + "state_message": "Payment completed successfully", + }, + ] + }, + ) + + +_EXAMPLE_AFTER_INIT_PAYMENT_METHOD = { + "payment_method_id": "12345", + "user_id": _EXAMPLE_AFTER_INIT["user_id"], + "user_email": _EXAMPLE_AFTER_INIT["user_email"], + "wallet_id": _EXAMPLE_AFTER_INIT["wallet_id"], + "initiated_at": _EXAMPLE_AFTER_INIT["initiated_at"], + "state": InitPromptAckFlowState.PENDING, + "completed_at": None, + "state_message": None, +} + + +class PaymentsMethodsDB(BaseModel): + payment_method_id: PaymentMethodID + user_id: UserID + wallet_id: WalletID + # State in Flow + initiated_at: datetime.datetime + completed_at: datetime.datetime | None + state: InitPromptAckFlowState + state_message: str | None + model_config = ConfigDict( + from_attributes=True, + json_schema_extra={ + "examples": [ + _EXAMPLE_AFTER_INIT_PAYMENT_METHOD, + # successful completion + { + **_EXAMPLE_AFTER_INIT_PAYMENT_METHOD, + "completed_at": "2023-09-27T10:00:15", + "state": "SUCCESS", + "state_message": "Payment method completed successfully", + }, + ] + }, + ) diff --git a/services/payments/src/simcore_service_payments/models/db_to_api.py b/services/payments/src/simcore_service_payments/models/db_to_api.py new file mode 100644 index 00000000000..c6c79195383 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/db_to_api.py @@ -0,0 +1,41 @@ +from typing import Any + +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodTransaction, + PaymentTransaction, +) + +from .db import PaymentsMethodsDB, PaymentsTransactionsDB + + +def to_payments_api_model(transaction: PaymentsTransactionsDB) -> PaymentTransaction: + data: dict[str, Any] = { + "payment_id": transaction.payment_id, + "price_dollars": transaction.price_dollars, + "osparc_credits": transaction.osparc_credits, + "wallet_id": transaction.wallet_id, + "created_at": transaction.initiated_at, + "state": f"{transaction.state}", + "completed_at": transaction.completed_at, + } + + if transaction.comment: + data["comment"] = transaction.comment + + if transaction.state_message: + data["state_message"] = transaction.state_message + + if transaction.invoice_url: + data["invoice_url"] = transaction.invoice_url + + return PaymentTransaction(**data) + + +def to_payment_method_api_model( + payment_method: PaymentsMethodsDB, +) -> PaymentMethodTransaction: + return PaymentMethodTransaction( + wallet_id=payment_method.wallet_id, + payment_method_id=payment_method.payment_method_id, + state=payment_method.state.value, + ) diff --git a/services/payments/src/simcore_service_payments/models/payments_gateway.py b/services/payments/src/simcore_service_payments/models/payments_gateway.py new file mode 100644 index 00000000000..dc1b3525633 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/payments_gateway.py @@ -0,0 +1,83 @@ +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Literal + +from models_library.api_schemas_webserver.wallets import PaymentID, PaymentMethodID +from models_library.basic_types import AmountDecimal, IDStr +from models_library.payments import UserInvoiceAddress +from models_library.products import StripePriceID, StripeTaxRateID +from pydantic import BaseModel, ConfigDict, EmailStr, Field + +COUNTRIES_WITH_VAT = ["CH", "LI"] + + +class ErrorModel(BaseModel): + message: str + exception: str | None = None + file: Path | str | None = None + line: int | None = None + trace: list | None = None + + +class StripeTaxExempt(str, Enum): + exempt = "exempt" + none = "none" # <-- if customer is from CH or LI + reverse = "reverse" # <-- if customer is outside of CH or LI + + +class InitPayment(BaseModel): + amount_dollars: AmountDecimal + # metadata to store for billing or reference + credits_: AmountDecimal = Field( + ..., + alias="credits", + json_schema_extra={"describe": "This is equal to `quantity` field in Stripe"}, + ) + user_name: IDStr + user_email: EmailStr + user_address: UserInvoiceAddress + wallet_name: IDStr + stripe_price_id: StripePriceID + stripe_tax_rate_id: StripeTaxRateID + stripe_tax_exempt_value: StripeTaxExempt + model_config = ConfigDict(extra="forbid") + + +class PaymentInitiated(BaseModel): + payment_id: PaymentID + + +class PaymentCancelled(BaseModel): + message: str | None = None + + +class InitPaymentMethod(BaseModel): + method: Literal["CC"] = "CC" + # metadata to store for billing or reference + user_name: IDStr + user_email: EmailStr + wallet_name: IDStr + model_config = ConfigDict(extra="forbid") + + +class PaymentMethodInitiated(BaseModel): + payment_method_id: PaymentMethodID + + +class GetPaymentMethod(BaseModel): + id: PaymentMethodID + card_holder_name: str | None = None + card_number_masked: str | None = None + card_type: str | None = None + expiration_month: int | None = None + expiration_year: int | None = None + created: datetime + + +class BatchGetPaymentMethods(BaseModel): + payment_methods_ids: list[PaymentMethodID] + + +class PaymentMethodsBatch(BaseModel): + items: list[GetPaymentMethod] diff --git a/services/payments/src/simcore_service_payments/models/schemas/__init__.py b/services/payments/src/simcore_service_payments/models/schemas/__init__.py new file mode 100644 index 00000000000..8063e97ab6e --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/schemas/__init__.py @@ -0,0 +1 @@ +# NOTE: all models/schemas are not in models_library this rest API is NOT used by any of the services in the stack diff --git a/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py b/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py new file mode 100644 index 00000000000..a9cb86f333a --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/schemas/acknowledgements.py @@ -0,0 +1,137 @@ +# mypy: disable-error-code=truthy-function +from typing import Annotated, Any + +from models_library.api_schemas_webserver.wallets import PaymentID, PaymentMethodID +from models_library.basic_types import IDStr +from pydantic import BaseModel, ConfigDict, Field, HttpUrl, field_validator +from pydantic_core.core_schema import ValidationInfo + + +class _BaseAck(BaseModel): + success: bool + message: str | None = Field(default=None) + + +class _BaseAckPayment(_BaseAck): + provider_payment_id: IDStr | None = Field( + default=None, + description="Payment ID from the provider (e.g. stripe payment ID)", + ) + + invoice_url: HttpUrl | None = Field( + default=None, description="Link to invoice is required when success=true" + ) + # NOTE: Why invoice_pdf, stripe_invoice_id and stripe_customer_id nullable? Currently, we are dependent on a third party that is making + # some changes for us. Adding these fields has a slightly lower priority. If they do not manage it, it is still okay for us. + invoice_pdf: HttpUrl | None = Field(default=None, description="Link to invoice PDF") + stripe_invoice_id: IDStr | None = Field( + default=None, description="Stripe invoice ID" + ) + stripe_customer_id: IDStr | None = Field( + default=None, description="Stripe customer ID" + ) + + +# +# ACK payment-methods +# + + +class AckPaymentMethod(_BaseAck): + ... + + +class SavedPaymentMethod(AckPaymentMethod): + payment_method_id: PaymentMethodID | None = None + + +# +# ACK payments +# + +_ONE_TIME_SUCCESS: dict[str, Any] = { + "success": True, + "provider_payment_id": "pi_123ABC", + "invoice_url": "https://invoices.com/id=12345", +} +_EXAMPLES: list[dict[str, Any]] = [ + # 0. one-time-payment successful + _ONE_TIME_SUCCESS, + # 1. one-time-payment and payment-method-saved successful + { + **_ONE_TIME_SUCCESS, + "saved": { + "success": True, + "payment_method_id": "3FA85F64-5717-4562-B3FC-2C963F66AFA6", + }, + }, + # 2. one-time-payment successful but payment-method-saved failed + { + **_ONE_TIME_SUCCESS, + "saved": { + "success": False, + "message": "Not allowed", + }, + }, + # 3. one-time-payment failure + { + "success": False, + "message": "No more credit", + }, +] + + +class AckPayment(_BaseAckPayment): + + saved: Annotated[ + SavedPaymentMethod | None, + Field( + description="Gets the payment-method if user opted to save it during payment." + "If used did not opt to save of payment-method was already saved, then it defaults to None", + ), + ] = None + + model_config = ConfigDict( + json_schema_extra={ + "example": _EXAMPLES[1].copy(), # shown in openapi.json + "examples": _EXAMPLES, # type:ignore[dict-item] + } + ) + + @field_validator("invoice_url") + @classmethod + def _success_requires_invoice(cls, v, info: ValidationInfo): + success = info.data.get("success") + if success and not v: + msg = "Invoice required on successful payments" + raise ValueError(msg) + return v + + +class AckPaymentWithPaymentMethod(_BaseAckPayment): + # NOTE: This model is equivalent to `AckPayment`, nonetheless + # I decided to separate it for clarity in the OAS since in payments + # w/ payment-method the field `saved` will never be provided, + + payment_id: Annotated[ + PaymentID | None, Field(description="Payment ID from the gateway") + ] = None + + model_config = ConfigDict( + json_schema_extra={ + "example": { + **_ONE_TIME_SUCCESS, + "payment_id": "D19EE68B-B007-4B61-A8BC-32B7115FB244", + }, # shown in openapi.json + } + ) + + +assert PaymentID # nosec +assert PaymentMethodID # nosec + + +__all__: tuple[str, ...] = ( + "PaymentID", + "PaymentMethodID", +) diff --git a/services/payments/src/simcore_service_payments/models/schemas/auth.py b/services/payments/src/simcore_service_payments/models/schemas/auth.py new file mode 100644 index 00000000000..365b1ffe9a4 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/schemas/auth.py @@ -0,0 +1,8 @@ +from typing import Literal + +from pydantic import BaseModel + + +class Token(BaseModel): + access_token: str + token_type: Literal["bearer"] diff --git a/services/payments/src/simcore_service_payments/models/schemas/errors.py b/services/payments/src/simcore_service_payments/models/schemas/errors.py new file mode 100644 index 00000000000..1898f724cc7 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/schemas/errors.py @@ -0,0 +1,6 @@ +# mypy: disable-error-code=truthy-function +from models_library.api_schemas__common.errors import DefaultApiError + +assert DefaultApiError # nosec + +__all__: tuple[str, ...] = ("DefaultApiError",) diff --git a/services/payments/src/simcore_service_payments/models/schemas/meta.py b/services/payments/src/simcore_service_payments/models/schemas/meta.py new file mode 100644 index 00000000000..cf5e7c649a2 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/schemas/meta.py @@ -0,0 +1,15 @@ +from models_library.api_schemas__common.meta import BaseMeta +from pydantic import ConfigDict, HttpUrl + + +class Meta(BaseMeta): + docs_url: HttpUrl + model_config = ConfigDict( + json_schema_extra={ + "example": { + "name": "simcore_service_payments", + "version": "2.4.45", + "docs_url": "https://foo.io/doc", + } + } + ) diff --git a/services/payments/src/simcore_service_payments/models/stripe.py b/services/payments/src/simcore_service_payments/models/stripe.py new file mode 100644 index 00000000000..9b91365df58 --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/stripe.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel, HttpUrl + + +class InvoiceData(BaseModel): + hosted_invoice_url: HttpUrl diff --git a/services/payments/src/simcore_service_payments/models/utils.py b/services/payments/src/simcore_service_payments/models/utils.py new file mode 100644 index 00000000000..af5cbfe537e --- /dev/null +++ b/services/payments/src/simcore_service_payments/models/utils.py @@ -0,0 +1,20 @@ +from models_library.api_schemas_webserver.wallets import PaymentMethodGet + +from .db import PaymentsMethodsDB +from .payments_gateway import GetPaymentMethod + + +def merge_models(got: GetPaymentMethod, acked: PaymentsMethodsDB) -> PaymentMethodGet: + assert acked.completed_at # nosec + + return PaymentMethodGet( + idr=acked.payment_method_id, + wallet_id=acked.wallet_id, + card_holder_name=got.card_holder_name, + card_number_masked=got.card_number_masked, + card_type=got.card_type, + expiration_month=got.expiration_month, + expiration_year=got.expiration_year, + created=acked.completed_at, + auto_recharge=False, # this will be fileld in the web/server + ) diff --git a/services/payments/src/simcore_service_payments/services/__init__.py b/services/payments/src/simcore_service_payments/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/payments/src/simcore_service_payments/services/auth.py b/services/payments/src/simcore_service_payments/services/auth.py new file mode 100644 index 00000000000..604c3eb4a8f --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/auth.py @@ -0,0 +1,95 @@ +""" +Implements OAuth2 with Password and Bearer (w/ JWT tokens) + +""" + +from datetime import timedelta +from typing import TypedDict + +import arrow +from fastapi import HTTPException, status +from jose import JWTError, jwt +from servicelib.utils_secrets import are_secrets_equal + +from ..core.settings import ApplicationSettings +from ..models.auth import SessionData + + +def authenticate_user(username: str, password: str, settings: ApplicationSettings): + return are_secrets_equal( + username + password, + expected=settings.PAYMENTS_USERNAME + + settings.PAYMENTS_PASSWORD.get_secret_value(), + ) + + +# +# JSON Web Tokens (https://jwt.io/introduction/) +# + + +_ALGORITHM = "HS256" + + +def encode_access_token(username: str, settings: ApplicationSettings) -> str: + expire = arrow.utcnow().datetime + timedelta( + minutes=settings.PAYMENTS_ACCESS_TOKEN_EXPIRE_MINUTES + ) + # SEE https://jwt.io/introduction/ + payload_claims = { + # Registered claims + "sub": username, # subject: https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + "exp": expire, # expiration date: https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + } + json_web_token: str = jwt.encode( + payload_claims, + key=settings.PAYMENTS_ACCESS_TOKEN_SECRET_KEY.get_secret_value(), + algorithm=_ALGORITHM, + ) + return json_web_token + + +def decode_access_token(token: str, settings: ApplicationSettings) -> str | None: + """ + Raises: + JWTError: If the signature is invalid in any way. + - ExpiredSignatureError(JWTError): If the signature has expired. + - JWTClaimsError(JWTError): If any claim is invalid in any way. + """ + claims = jwt.decode( + token, + settings.PAYMENTS_ACCESS_TOKEN_SECRET_KEY.get_secret_value(), + algorithms=[_ALGORITHM], + ) + username: str | None = claims.get("sub", None) + return username + + +class _HTTPExceptionKwargs(TypedDict): + status_code: int + detail: str + headers: dict + + +_credencial_401_unauthorized_exception_kwargs: _HTTPExceptionKwargs = { + "status_code": status.HTTP_401_UNAUTHORIZED, + "detail": "Invalid authentication credentials", + "headers": {"WWW-Authenticate": "Bearer"}, +} + + +def get_session_data(token: str, settings: ApplicationSettings) -> SessionData: + """ + Raises: + HTTPException: 401 + """ + + try: + username = decode_access_token(token, settings) + except JWTError as err: + raise HTTPException(**_credencial_401_unauthorized_exception_kwargs) from err + + if username is None: + raise HTTPException(**_credencial_401_unauthorized_exception_kwargs) + + return SessionData(username=username) diff --git a/services/payments/src/simcore_service_payments/services/auto_recharge.py b/services/payments/src/simcore_service_payments/services/auto_recharge.py new file mode 100644 index 00000000000..aa72715816d --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/auto_recharge.py @@ -0,0 +1,122 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_webserver.wallets import ( + GetWalletAutoRecharge, + ReplaceWalletAutoRecharge, +) +from models_library.basic_types import NonNegativeDecimal +from models_library.users import UserID +from models_library.wallets import WalletID + +from ..core.settings import ApplicationSettings +from ..db.auto_recharge_repo import AutoRechargeRepo, PaymentsAutorechargeDB +from ..db.payments_methods_repo import PaymentsMethodsRepo + +_logger = logging.getLogger(__name__) + + +def _from_db_to_api_model( + db_model: PaymentsAutorechargeDB, min_balance_in_credits: NonNegativeDecimal +) -> GetWalletAutoRecharge: + return GetWalletAutoRecharge( + enabled=db_model.enabled, + payment_method_id=db_model.primary_payment_method_id, + min_balance_in_credits=min_balance_in_credits, + top_up_amount_in_usd=db_model.top_up_amount_in_usd, + monthly_limit_in_usd=db_model.monthly_limit_in_usd, + ) + + +def _from_api_to_db_model( + wallet_id: WalletID, api_model: ReplaceWalletAutoRecharge +) -> PaymentsAutorechargeDB: + return PaymentsAutorechargeDB( + wallet_id=wallet_id, + enabled=api_model.enabled, + primary_payment_method_id=api_model.payment_method_id, + top_up_amount_in_usd=api_model.top_up_amount_in_usd, + monthly_limit_in_usd=api_model.monthly_limit_in_usd, + ) + + +# +# payment-autorecharge api +# + +_NEWEST = 0 + + +async def get_wallet_auto_recharge( + settings: ApplicationSettings, + auto_recharge_repo: AutoRechargeRepo, + *, + wallet_id: WalletID, +) -> GetWalletAutoRecharge | None: + payments_autorecharge_db: PaymentsAutorechargeDB | None = ( + await auto_recharge_repo.get_wallet_autorecharge(wallet_id=wallet_id) + ) + if payments_autorecharge_db: + return GetWalletAutoRecharge( + enabled=payments_autorecharge_db.enabled, + payment_method_id=payments_autorecharge_db.primary_payment_method_id, + min_balance_in_credits=settings.PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS, + top_up_amount_in_usd=payments_autorecharge_db.top_up_amount_in_usd, + monthly_limit_in_usd=payments_autorecharge_db.monthly_limit_in_usd, + ) + return None + + +async def get_user_wallet_payment_autorecharge_with_default( + app: FastAPI, + auto_recharge_repo: AutoRechargeRepo, + payments_method_repo: PaymentsMethodsRepo, + *, + user_id: UserID, + wallet_id: WalletID, +) -> GetWalletAutoRecharge: + settings: ApplicationSettings = app.state.settings + + wallet_autorecharge = await get_wallet_auto_recharge( + settings, + auto_recharge_repo, + wallet_id=wallet_id, + ) + if not wallet_autorecharge: + payment_method_id = None + wallet_payment_methods = await payments_method_repo.list_user_payment_methods( + user_id=user_id, + wallet_id=wallet_id, + ) + if wallet_payment_methods: + payment_method_id = wallet_payment_methods[_NEWEST].payment_method_id + + return GetWalletAutoRecharge( + enabled=False, + payment_method_id=payment_method_id, + min_balance_in_credits=settings.PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS, + top_up_amount_in_usd=settings.PAYMENTS_AUTORECHARGE_DEFAULT_TOP_UP_AMOUNT, + monthly_limit_in_usd=settings.PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT, + ) + return wallet_autorecharge + + +async def replace_wallet_payment_autorecharge( + app: FastAPI, + repo: AutoRechargeRepo, + *, + user_id: UserID, + wallet_id: WalletID, + new: ReplaceWalletAutoRecharge, +) -> GetWalletAutoRecharge: + settings: ApplicationSettings = app.state.settings + got: PaymentsAutorechargeDB = await repo.replace_wallet_autorecharge( + user_id=user_id, + wallet_id=wallet_id, + new=_from_api_to_db_model(wallet_id, new), + ) + + return _from_db_to_api_model( + got, + min_balance_in_credits=settings.PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS, + ) diff --git a/services/payments/src/simcore_service_payments/services/auto_recharge_listener.py b/services/payments/src/simcore_service_payments/services/auto_recharge_listener.py new file mode 100644 index 00000000000..26e30bbe4db --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/auto_recharge_listener.py @@ -0,0 +1,47 @@ +import functools +import logging + +from fastapi import FastAPI +from models_library.rabbitmq_messages import WalletCreditsMessage +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import ConsumerTag, QueueName + +from .auto_recharge_process_message import process_message +from .rabbitmq import get_rabbitmq_client + +_logger = logging.getLogger(__name__) + + +async def _subscribe_to_rabbitmq(app) -> tuple[QueueName, ConsumerTag]: + with log_context(_logger, logging.INFO, msg="Subscribing to rabbitmq channel"): + rabbit_client = get_rabbitmq_client(app) + return await rabbit_client.subscribe( + WalletCreditsMessage.get_channel_name(), + message_handler=functools.partial(process_message, app), + exclusive_queue=False, + topics=["#"], + ) + + +async def _unsubscribe_consumer( + app, queue_name: QueueName, consumer_tag: ConsumerTag +) -> None: + with log_context(_logger, logging.INFO, msg="Unsubscribing from rabbitmq queue"): + rabbit_client = get_rabbitmq_client(app) + await rabbit_client.unsubscribe_consumer(queue_name, consumer_tag) + + +def setup_auto_recharge_listener(app: FastAPI): + async def _on_startup() -> None: + app.state.auto_recharge_rabbitmq_consumer = await _subscribe_to_rabbitmq(app) + + async def _on_shutdown() -> None: + assert app.state.auto_recharge_rabbitmq_consumer # nosec + assert isinstance(app.state.auto_recharge_rabbitmq_consumer, tuple) # nosec + if app.state.rabbitmq_client: + # NOTE: We want to have persistent queue, therefore we will unsubscribe only consumer + await _unsubscribe_consumer(app, *app.state.auto_recharge_rabbitmq_consumer) + app.state.auto_recharge_rabbitmq_consumer = None + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/payments/src/simcore_service_payments/services/auto_recharge_process_message.py b/services/payments/src/simcore_service_payments/services/auto_recharge_process_message.py new file mode 100644 index 00000000000..e87e77e5c44 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/auto_recharge_process_message.py @@ -0,0 +1,195 @@ +import logging +from datetime import UTC, datetime, timedelta +from decimal import Decimal +from typing import cast + +from fastapi import FastAPI +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.api_schemas_webserver.wallets import ( + GetWalletAutoRecharge, + PaymentMethodID, +) +from models_library.basic_types import NonNegativeDecimal +from models_library.payments import InvoiceDataGet +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.rabbitmq_messages import WalletCreditsMessage +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from simcore_service_payments.db.auto_recharge_repo import AutoRechargeRepo +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.models.db import PaymentsMethodsDB +from simcore_service_payments.services.resource_usage_tracker import ( + ResourceUsageTrackerApi, +) + +from ..core.settings import ApplicationSettings +from .auto_recharge import get_wallet_auto_recharge +from .notifier import NotifierService +from .payments import pay_with_payment_method +from .payments_gateway import PaymentsGatewayApi +from .rabbitmq import get_rabbitmq_rpc_client + +_logger = logging.getLogger(__name__) + + +async def process_message(app: FastAPI, data: bytes) -> bool: + rabbit_message = TypeAdapter(WalletCreditsMessage).validate_json(data) + _logger.debug("Process msg: %s", rabbit_message) + + settings: ApplicationSettings = app.state.settings + + # Step 1: Check if wallet credits are above the threshold + if await _check_wallet_credits_above_threshold( + settings.PAYMENTS_AUTORECHARGE_MIN_BALANCE_IN_CREDITS, rabbit_message.credits + ): + return True # We do not auto recharge + + # Step 2: Check auto-recharge conditions + _auto_recharge_repo: AutoRechargeRepo = AutoRechargeRepo(db_engine=app.state.engine) + wallet_auto_recharge: GetWalletAutoRecharge | None = await get_wallet_auto_recharge( + settings, _auto_recharge_repo, wallet_id=rabbit_message.wallet_id + ) + if await _check_autorecharge_conditions_not_met(wallet_auto_recharge): + return True # We do not auto recharge + assert wallet_auto_recharge is not None # nosec + assert wallet_auto_recharge.payment_method_id is not None # nosec + + # Step 3: Check spending limits + _payments_transactions_repo = PaymentsTransactionsRepo(db_engine=app.state.engine) + if await _exceeds_monthly_limit( + _payments_transactions_repo, rabbit_message.wallet_id, wallet_auto_recharge + ): + return True # We do not auto recharge + + # Step 4: Check last top-up time + if await _was_wallet_topped_up_recently( + _payments_transactions_repo, rabbit_message.wallet_id + ): + return True # We do not auto recharge + + # Step 5: Check if timestamp when message was created is not too old + if await _is_message_too_old(rabbit_message.created_at): + return True # We do not auto recharge + + # Step 6: Get Payment method + _payments_repo = PaymentsMethodsRepo(db_engine=app.state.engine) + payment_method_db = await _payments_repo.get_payment_method_by_id( + payment_method_id=wallet_auto_recharge.payment_method_id + ) + + # Step 7: Perform auto-recharge + if settings.PAYMENTS_AUTORECHARGE_ENABLED: + await _perform_auto_recharge( + app, rabbit_message, payment_method_db, wallet_auto_recharge + ) + return True + + +async def _check_wallet_credits_above_threshold( + threshold_in_credits: NonNegativeDecimal, _credits: Decimal +) -> bool: + return bool(_credits > threshold_in_credits) + + +async def _check_autorecharge_conditions_not_met( + wallet_auto_recharge: GetWalletAutoRecharge | None, +) -> bool: + return ( + wallet_auto_recharge is None + or wallet_auto_recharge.enabled is False + or wallet_auto_recharge.payment_method_id is None + ) + + +async def _exceeds_monthly_limit( + payments_transactions_repo: PaymentsTransactionsRepo, + wallet_id: WalletID, + wallet_auto_recharge: GetWalletAutoRecharge, +): + cumulative_current_month_spending = ( + await payments_transactions_repo.sum_current_month_dollars(wallet_id=wallet_id) + ) + return ( + wallet_auto_recharge.monthly_limit_in_usd is not None + and cumulative_current_month_spending + + wallet_auto_recharge.top_up_amount_in_usd + > wallet_auto_recharge.monthly_limit_in_usd + ) + + +async def _was_wallet_topped_up_recently( + payments_transactions_repo: PaymentsTransactionsRepo, wallet_id: WalletID +): + """ + As safety, we check if the last transaction was initiated within the last 5 minutes + in that case we do not auto recharge + """ + last_wallet_transaction = ( + await payments_transactions_repo.get_last_payment_transaction_for_wallet( + wallet_id=wallet_id + ) + ) + + current_timestamp = datetime.now(tz=UTC) + current_timestamp_minus_5_minutes = current_timestamp - timedelta(minutes=5) + + return ( + last_wallet_transaction + and last_wallet_transaction.initiated_at > current_timestamp_minus_5_minutes + ) + + +async def _is_message_too_old( + message_timestamp: datetime, +): + """ + As safety, we check if the message was created within the last 5 minutes + if not we do not auto recharge + """ + current_timestamp = datetime.now(tz=UTC) + current_timestamp_minus_5_minutes = current_timestamp - timedelta(minutes=5) + + return message_timestamp < current_timestamp_minus_5_minutes + + +async def _perform_auto_recharge( + app: FastAPI, + rabbit_message: WalletCreditsMessage, + payment_method_db: PaymentsMethodsDB, + wallet_auto_recharge: GetWalletAutoRecharge, +): + rabbitmq_rpc_client = get_rabbitmq_rpc_client(app) + + result = await rabbitmq_rpc_client.request( + WEBSERVER_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_invoice_data"), + user_id=payment_method_db.user_id, + dollar_amount=wallet_auto_recharge.top_up_amount_in_usd, + product_name=rabbit_message.product_name, + ) + invoice_data_get = TypeAdapter(InvoiceDataGet).validate_python(result) + + await pay_with_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + rut=ResourceUsageTrackerApi.get_from_app_state(app), + repo_transactions=PaymentsTransactionsRepo(db_engine=app.state.engine), + repo_methods=PaymentsMethodsRepo(db_engine=app.state.engine), + notifier=NotifierService.get_from_app_state(app), + # + payment_method_id=cast(PaymentMethodID, wallet_auto_recharge.payment_method_id), + amount_dollars=wallet_auto_recharge.top_up_amount_in_usd, + target_credits=invoice_data_get.credit_amount, + product_name=rabbit_message.product_name, + wallet_id=rabbit_message.wallet_id, + wallet_name=f"id={rabbit_message.wallet_id}", + user_id=payment_method_db.user_id, + user_name=invoice_data_get.user_display_name, + user_email=invoice_data_get.user_email, + user_address=invoice_data_get.user_invoice_address, + stripe_price_id=invoice_data_get.stripe_price_id, + stripe_tax_rate_id=invoice_data_get.stripe_tax_rate_id, + comment="Payment generated by auto recharge", + ) diff --git a/services/payments/src/simcore_service_payments/services/healthchecks.py b/services/payments/src/simcore_service_payments/services/healthchecks.py new file mode 100644 index 00000000000..be6344c00ef --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/healthchecks.py @@ -0,0 +1,27 @@ +import asyncio +import logging + +from models_library.healthchecks import LivenessResult +from servicelib.db_asyncpg_utils import check_postgres_liveness +from sqlalchemy.ext.asyncio import AsyncEngine + +from .payments_gateway import PaymentsGatewayApi +from .resource_usage_tracker import ResourceUsageTrackerApi + +_logger = logging.getLogger(__name__) + + +async def create_health_report( + gateway: PaymentsGatewayApi, + rut: ResourceUsageTrackerApi, + engine: AsyncEngine, +) -> dict[str, LivenessResult]: + gateway_liveness, rut_liveness, db_liveness = await asyncio.gather( + gateway.check_liveness(), rut.check_liveness(), check_postgres_liveness(engine) + ) + + return { + "payments_gateway": gateway_liveness, + "resource_usage_tracker": rut_liveness, + "postgres": db_liveness, + } diff --git a/services/payments/src/simcore_service_payments/services/notifier.py b/services/payments/src/simcore_service_payments/services/notifier.py new file mode 100644 index 00000000000..29408ec15c0 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/notifier.py @@ -0,0 +1,107 @@ +import contextlib +import logging + +from fastapi import FastAPI +from models_library.api_schemas_webserver.wallets import PaymentMethodTransaction +from models_library.users import UserID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.utils import fire_and_forget_task + +from ..core.settings import ApplicationSettings +from ..db.payment_users_repo import PaymentsUsersRepo +from ..models.db import PaymentsTransactionsDB +from .notifier_abc import NotificationProvider +from .notifier_email import EmailProvider +from .notifier_ws import WebSocketProvider +from .postgres import get_engine + +_logger = logging.getLogger(__name__) + + +class NotifierService(SingletonInAppStateMixin): + app_state_name: str = "notifier" + + def __init__(self, *providers): + self.providers: list[NotificationProvider] = list(providers) + self._background_tasks = set() + + def _run_in_background(self, coro, suffix): + fire_and_forget_task( + coro, + task_suffix_name=suffix, + fire_and_forget_tasks_collection=self._background_tasks, + ) + + async def notify_payment_completed( + self, + user_id: UserID, + payment: PaymentsTransactionsDB, + *, + exclude: set | None = None, + ): + if payment.completed_at is None: + msg = "Cannot notify incomplete payment" + raise ValueError(msg) + + exclude = exclude or set() + providers = [p for p in self.providers if p.get_name() not in exclude] + + for provider in providers: + self._run_in_background( + provider.notify_payment_completed( + user_id=user_id, + payment=payment, + ), + f"{provider.get_name()}_u_{user_id}_p_{payment.payment_id}", + ) + + async def notify_payment_method_acked( + self, + user_id: UserID, + payment_method: PaymentMethodTransaction, + ): + if payment_method.state == "PENDING": + msg = "Cannot notify unAcked payment-method" + raise ValueError(msg) + + for provider in self.providers: + self._run_in_background( + provider.notify_payment_method_acked( + user_id=user_id, payment_method=payment_method + ), + f"{provider.get_name()}_u_{user_id}_pm_{payment_method.payment_method_id}", + ) + + +def setup_notifier(app: FastAPI): + app_settings: ApplicationSettings = app.state.settings + + async def _on_startup() -> None: + assert app.state.external_socketio # nosec + + providers: list[NotificationProvider] = [ + WebSocketProvider( + sio_manager=app.state.external_socketio, + users_repo=PaymentsUsersRepo(get_engine(app)), + ), + ] + + if email_settings := app_settings.PAYMENTS_EMAIL: + providers.append( + EmailProvider( + email_settings, + users_repo=PaymentsUsersRepo(get_engine(app)), + bcc_email=app_settings.PAYMENTS_BCC_EMAIL, + ) + ) + + notifier = NotifierService(*providers) + notifier.set_to_app_state(app) + assert NotifierService.get_from_app_state(app) == notifier # nosec + + async def _on_shutdown() -> None: + with contextlib.suppress(AttributeError): + NotifierService.pop_from_app_state(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/payments/src/simcore_service_payments/services/notifier_abc.py b/services/payments/src/simcore_service_payments/services/notifier_abc.py new file mode 100644 index 00000000000..cef504bc4e0 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/notifier_abc.py @@ -0,0 +1,31 @@ +import logging +from abc import ABC, abstractmethod + +from models_library.api_schemas_webserver.wallets import PaymentMethodTransaction +from models_library.users import UserID + +from ..models.db import PaymentsTransactionsDB + +_logger = logging.getLogger(__name__) + + +class NotificationProvider(ABC): + @abstractmethod + async def notify_payment_completed( + self, + user_id: UserID, + payment: PaymentsTransactionsDB, + ): + ... + + @abstractmethod + async def notify_payment_method_acked( + self, + user_id: UserID, + payment_method: PaymentMethodTransaction, + ): + ... + + @classmethod + def get_name(cls): + return cls.__name__ diff --git a/services/payments/src/simcore_service_payments/services/notifier_email.py b/services/payments/src/simcore_service_payments/services/notifier_email.py new file mode 100644 index 00000000000..29a423837df --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/notifier_email.py @@ -0,0 +1,356 @@ +import logging +import mimetypes +import re +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from email.headerregistry import Address +from email.message import EmailMessage +from typing import Final + +import httpx +from aiosmtplib import SMTP +from attr import dataclass +from jinja2 import DictLoader, Environment, select_autoescape +from models_library.api_schemas_webserver.wallets import PaymentMethodTransaction +from models_library.products import ProductName +from models_library.users import UserID +from pydantic import EmailStr +from servicelib.logging_errors import create_troubleshotting_log_kwargs +from settings_library.email import EmailProtocol, SMTPSettings +from tenacity import ( + retry, + retry_if_exception_type, + retry_if_result, + stop_after_attempt, + wait_exponential, +) + +from ..db.payment_users_repo import PaymentsUsersRepo +from ..models.db import PaymentsTransactionsDB +from .notifier_abc import NotificationProvider + +_logger = logging.getLogger(__name__) + + +_BASE_HTML: Final[ + str +] = """ + + + + + +{% block title %}{% endblock %} + + + + {% block content %} + {% endblock %} + + +""" + + +_NOTIFY_PAYMENTS_HTML = """ +{% extends 'base.html' %} + +{% block title %}Payment Confirmation{% endblock %} + +{% block content %} +

Dear {{ user.first_name }},

+

We are delighted to confirm the successful processing of your payment of {{ payment.price_dollars }} USD for the purchase of {{ payment.osparc_credits }} credits. +The credits have been added to your {{ product.display_name }} account, and you are all set to utilize them.

+

For more details you can view or download your receipt.

+

Please don't hesitate to contact us at {{ product.support_email }} if you need further help.

+

Best Regards,

+

The {{ product.display_name }} Team

+{% endblock %} +""" + +_NOTIFY_PAYMENTS_TXT = """ +Dear {{ user.first_name }}, + +We are delighted to confirm the successful processing of your payment of {{ payment.price_dollars }} USD for the purchase of {{ payment.osparc_credits }} credits. The credits have been added to your {{ product.display_name }} account, and you are all set to utilize them. + +For more details you can view or download your receipt: {{ payment.invoice_url }}. + +Please don't hesitate to contact us at {{ product.support_email }} if you need further help. + +Best Regards, +The {{ product.display_name }} Team +""" + + +_NOTIFY_PAYMENTS_SUBJECT = "Your Payment {{ payment.price_dollars }} USD for {{ payment.osparc_credits }} Credits Was Successful" + + +_PRODUCT_NOTIFICATIONS_TEMPLATES = { + "base.html": _BASE_HTML, + "notify_payments.html": _NOTIFY_PAYMENTS_HTML, + "notify_payments.txt": _NOTIFY_PAYMENTS_TXT, + "notify_payments-subject.txt": _NOTIFY_PAYMENTS_SUBJECT, +} + + +@dataclass +class _UserData: + first_name: str + last_name: str + email: str + + +@dataclass +class _ProductData: + product_name: ProductName + display_name: str + vendor_display_inline: str + support_email: str + bcc_email: EmailStr | None = None + + +@dataclass +class _PaymentData: + price_dollars: str + osparc_credits: str + invoice_url: str + invoice_pdf_url: str + + +def retry_if_status_code(response): + return response.status_code in ( + 429, + 500, + 502, + 503, + 504, + ) # Retry for these common transient errors + + +exception_retry_condition = retry_if_exception_type( + (httpx.ConnectError, httpx.ReadTimeout) +) +result_retry_condition = retry_if_result(retry_if_status_code) + + +@retry( + retry=exception_retry_condition | result_retry_condition, + wait=wait_exponential(multiplier=1, min=4, max=10), + stop=stop_after_attempt(5), + reraise=True, +) +async def _get_invoice_pdf(invoice_pdf: str) -> httpx.Response: + async with httpx.AsyncClient(follow_redirects=True) as client: + _response = await client.get(invoice_pdf) + _response.raise_for_status() + return _response + + +_INVOICE_FILE_NAME_PATTERN: Final = re.compile(r'filename="(?P[^"]+)"') + + +def _extract_file_name(response: httpx.Response) -> str: + match = _INVOICE_FILE_NAME_PATTERN.search(response.headers["content-disposition"]) + if not match: + error_msg = f"Cannot file pdf invoice {response.request.url}" + raise RuntimeError(error_msg) + + file_name: str = match.group("filename") + return file_name + + +def _guess_file_type(filename: str) -> tuple[str, str]: + mimetype, _encoding = mimetypes.guess_type(filename) + if mimetype: + maintype, subtype = mimetype.split("/", maxsplit=1) + else: + maintype, subtype = "application", "octet-stream" + return maintype, subtype + + +async def _create_user_email( + env: Environment, + user: _UserData, + payment: _PaymentData, + product: _ProductData, +) -> EmailMessage: + # data to interpolate template + data = { + "user": user, + "product": product, + "payment": payment, + } + + email_msg = EmailMessage() + + email_msg["From"] = Address( + display_name=f"{product.display_name} support", + addr_spec=product.support_email, + ) + email_msg["To"] = Address( + display_name=f"{user.first_name} {user.last_name}", + addr_spec=user.email, + ) + email_msg["Subject"] = env.get_template("notify_payments-subject.txt").render(data) + + if product.bcc_email: + email_msg["Bcc"] = product.bcc_email + + # Body + text_template = env.get_template("notify_payments.txt") + email_msg.set_content(text_template.render(data)) + + html_template = env.get_template("notify_payments.html") + email_msg.add_alternative(html_template.render(data), subtype="html") + + if payment.invoice_pdf_url: + try: + # Invoice attachment (It is important that attachment is added after body) + pdf_response = await _get_invoice_pdf(payment.invoice_pdf_url) + + # file + file_name = _extract_file_name(pdf_response) + main_type, sub_type = _guess_file_type(file_name) + + pdf_data = pdf_response.content + + email_msg.add_attachment( + pdf_data, + filename=file_name, + maintype=main_type, + subtype=sub_type, + ) + + except Exception as exc: # pylint: disable=broad-exception-caught + _logger.exception( + **create_troubleshotting_log_kwargs( + "Cannot attach invoice to payment. Email sent w/o attached pdf invoice", + error=exc, + error_context={ + "user": user, + "payment": payment, + "product": product, + }, + tip=f"Check downloading: `wget -v {payment.invoice_pdf_url}`", + ) + ) + + return email_msg + + +@asynccontextmanager +async def _create_email_session( + settings: SMTPSettings, +) -> AsyncIterator[SMTP]: + async with SMTP( + hostname=settings.SMTP_HOST, + port=settings.SMTP_PORT, + # FROM https://aiosmtplib.readthedocs.io/en/stable/usage.html#starttls-connections + # By default, if the server advertises STARTTLS support, aiosmtplib will upgrade the connection automatically. + # Setting use_tls=True for STARTTLS servers will typically result in a connection error + # To opt out of STARTTLS on connect, pass start_tls=False. + # NOTE: for that reason TLS and STARTLS are mutally exclusive + use_tls=settings.SMTP_PROTOCOL == EmailProtocol.TLS, + start_tls=settings.SMTP_PROTOCOL == EmailProtocol.STARTTLS, + ) as smtp: + if settings.has_credentials: + assert settings.SMTP_USERNAME # nosec + assert settings.SMTP_PASSWORD # nosec + await smtp.login( + settings.SMTP_USERNAME, + settings.SMTP_PASSWORD.get_secret_value(), + ) + + yield smtp + + +class EmailProvider(NotificationProvider): + def __init__( + self, + settings: SMTPSettings, + users_repo: PaymentsUsersRepo, + bcc_email: EmailStr | None = None, + ): + self._users_repo = users_repo + self._settings = settings + self._bcc_email = bcc_email + self._jinja_env = Environment( + loader=DictLoader(_PRODUCT_NOTIFICATIONS_TEMPLATES), + autoescape=select_autoescape(["html", "xml"]), + ) + + async def _create_successful_payments_message( + self, + user_id: UserID, + payment: PaymentsTransactionsDB, + ) -> EmailMessage: + data = await self._users_repo.get_notification_data(user_id, payment.payment_id) + data_vendor = data.vendor or {} + + # email for successful payment + msg: EmailMessage = await _create_user_email( + self._jinja_env, + user=_UserData( + first_name=data.first_name, + last_name=data.last_name, + email=data.email, + ), + payment=_PaymentData( + price_dollars=f"{payment.price_dollars:.2f}", + osparc_credits=f"{payment.osparc_credits:.2f}", + invoice_url=f"{payment.invoice_url}", + invoice_pdf_url=f"{payment.invoice_pdf_url}", + ), + product=_ProductData( + product_name=data.product_name, + display_name=data.display_name, + vendor_display_inline=f"{data_vendor.get('name', '')}. {data_vendor.get('address', '')}", + support_email=data.support_email, + bcc_email=self._bcc_email, + ), + ) + + return msg + + async def notify_payment_completed( + self, + user_id: UserID, + payment: PaymentsTransactionsDB, + ): + # NOTE: we only have an email for successful payments + if payment.state == "SUCCESS": + msg = await self._create_successful_payments_message(user_id, payment) + + async with _create_email_session(self._settings) as smtp: + await smtp.send_message(msg) + else: + _logger.debug( + "No email sent when %s did a non-SUCCESS %s", + f"{user_id=}", + f"{payment=}", + ) + + async def notify_payment_method_acked( + self, + user_id: UserID, + payment_method: PaymentMethodTransaction, + ): + assert user_id # nosec + assert payment_method # nosec + _logger.debug("No email sent when payment method is acked") diff --git a/services/payments/src/simcore_service_payments/services/notifier_ws.py b/services/payments/src/simcore_service_payments/services/notifier_ws.py new file mode 100644 index 00000000000..bff540340f4 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/notifier_ws.py @@ -0,0 +1,59 @@ +import logging + +import socketio # type: ignore[import-untyped] +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_payments.socketio import ( + SOCKET_IO_PAYMENT_COMPLETED_EVENT, + SOCKET_IO_PAYMENT_METHOD_ACKED_EVENT, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.api_schemas_webserver.wallets import PaymentMethodTransaction +from models_library.users import UserID + +from ..db.payment_users_repo import PaymentsUsersRepo +from ..models.db import PaymentsTransactionsDB +from .notifier_abc import NotificationProvider + +_logger = logging.getLogger(__name__) + + +class WebSocketProvider(NotificationProvider): + def __init__( + self, sio_manager: socketio.AsyncAioPikaManager, users_repo: PaymentsUsersRepo + ): + self._sio_manager = sio_manager + self._users_repo = users_repo + + async def notify_payment_completed( + self, + user_id: UserID, + payment: PaymentsTransactionsDB, + ): + if payment.completed_at is None: + msg = "Incomplete payment" + raise ValueError(msg) + + user_primary_group_id = await self._users_repo.get_primary_group_id(user_id) + + # NOTE: We assume that the user has been added to all + # rooms associated to his groups + assert payment.completed_at is not None # nosec + + return await self._sio_manager.emit( + SOCKET_IO_PAYMENT_COMPLETED_EVENT, + data=jsonable_encoder(payment, by_alias=True), + room=SocketIORoomStr.from_group_id(user_primary_group_id), + ) + + async def notify_payment_method_acked( + self, + user_id: UserID, + payment_method: PaymentMethodTransaction, + ): + user_primary_group_id = await self._users_repo.get_primary_group_id(user_id) + + return await self._sio_manager.emit( + SOCKET_IO_PAYMENT_METHOD_ACKED_EVENT, + data=jsonable_encoder(payment_method, by_alias=True), + room=SocketIORoomStr.from_group_id(user_primary_group_id), + ) diff --git a/services/payments/src/simcore_service_payments/services/payments.py b/services/payments/src/simcore_service_payments/services/payments.py new file mode 100644 index 00000000000..6e73bb66089 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/payments.py @@ -0,0 +1,352 @@ +""" Functions here support two types of payments worklows: +- One-time payment +- Payment w/ payment-method + +""" +# pylint: disable=too-many-arguments + +import logging +import uuid +from decimal import Decimal +from typing import cast + +import arrow +from models_library.api_schemas_payments.errors import ( + PaymentAlreadyAckedError, + PaymentAlreadyExistsError, + PaymentNotFoundError, +) +from models_library.api_schemas_webserver.wallets import ( + PaymentID, + PaymentMethodID, + PaymentTransaction, + WalletPaymentInitiated, +) +from models_library.basic_types import AmountDecimal, IDStr +from models_library.payments import UserInvoiceAddress +from models_library.products import ProductName, StripePriceID, StripeTaxRateID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, HttpUrl, PositiveInt +from servicelib.logging_utils import log_context +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, +) +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from tenacity import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_attempt + +from .._constants import RUT +from ..db.payments_transactions_repo import PaymentsTransactionsRepo +from ..models.db import PaymentsTransactionsDB +from ..models.db_to_api import to_payments_api_model +from ..models.payments_gateway import ( + COUNTRIES_WITH_VAT, + InitPayment, + PaymentInitiated, + StripeTaxExempt, +) +from ..models.schemas.acknowledgements import AckPayment, AckPaymentWithPaymentMethod +from ..models.stripe import InvoiceData +from ..services.resource_usage_tracker import ResourceUsageTrackerApi +from ..services.stripe import StripeApi +from .notifier import NotifierService +from .notifier_ws import WebSocketProvider +from .payments_gateway import PaymentsGatewayApi + +_logger = logging.getLogger() + + +async def init_one_time_payment( + gateway: PaymentsGatewayApi, + repo: PaymentsTransactionsRepo, + *, + amount_dollars: Decimal, + target_credits: Decimal, + product_name: str, + wallet_id: WalletID, + wallet_name: str, + user_id: UserID, + user_name: str, + user_email: EmailStr, + user_address: UserInvoiceAddress, + stripe_price_id: StripePriceID, + stripe_tax_rate_id: StripeTaxRateID, + comment: str | None = None, +) -> WalletPaymentInitiated: + initiated_at = arrow.utcnow().datetime + + init: PaymentInitiated = await gateway.init_payment( + payment=InitPayment( + amount_dollars=AmountDecimal(amount_dollars), + credits=AmountDecimal(target_credits), + user_name=IDStr(user_name), + user_email=user_email, + user_address=user_address, + wallet_name=IDStr(wallet_name), + stripe_price_id=stripe_price_id, + stripe_tax_rate_id=stripe_tax_rate_id, + stripe_tax_exempt_value=( + StripeTaxExempt.none + if user_address.country in COUNTRIES_WITH_VAT + else StripeTaxExempt.reverse + ), + ), + ) + + submission_link = gateway.get_form_payment_url(init.payment_id) + + payment_id = await repo.insert_init_payment_transaction( + payment_id=init.payment_id, + price_dollars=amount_dollars, + osparc_credits=target_credits, + product_name=product_name, + user_id=user_id, + user_email=user_email, + wallet_id=wallet_id, + comment=comment, + initiated_at=initiated_at, + ) + + assert payment_id == init.payment_id # nosec + + return WalletPaymentInitiated( + payment_id=PaymentID(f"{payment_id}"), + payment_form_url=cast(HttpUrl, f"{submission_link}"), + ) + + +async def cancel_one_time_payment( + gateway: PaymentsGatewayApi, + repo: PaymentsTransactionsRepo, + *, + payment_id: PaymentID, + user_id: UserID, + wallet_id: WalletID, +) -> None: + payment = await repo.get_payment_transaction( + payment_id=payment_id, user_id=user_id, wallet_id=wallet_id + ) + + if payment is None: + raise PaymentNotFoundError(payment_id=payment_id) + + if payment.state.is_completed(): + if payment.state == PaymentTransactionState.CANCELED: + # Avoids error if multiple cancel calls + return + raise PaymentAlreadyAckedError(payment_id=payment_id) + + payment_cancelled = await gateway.cancel_payment( + PaymentInitiated(payment_id=payment_id) + ) + + await repo.update_ack_payment_transaction( + payment_id=payment_id, + completion_state=PaymentTransactionState.CANCELED, + state_message=payment_cancelled.message, + invoice_url=None, + stripe_invoice_id=None, + invoice_pdf_url=None, + ) + + +async def acknowledge_one_time_payment( + repo_transactions: PaymentsTransactionsRepo, + *, + payment_id: PaymentID, + ack: AckPayment, +) -> PaymentsTransactionsDB: + return await repo_transactions.update_ack_payment_transaction( + payment_id=payment_id, + completion_state=( + PaymentTransactionState.SUCCESS + if ack.success + else PaymentTransactionState.FAILED + ), + state_message=ack.message, + invoice_url=ack.invoice_url, + stripe_invoice_id=ack.stripe_invoice_id, + invoice_pdf_url=ack.invoice_pdf, + ) + + +async def on_payment_completed( + transaction: PaymentsTransactionsDB, + rut_api: ResourceUsageTrackerApi, + notifier: NotifierService, + exclude: set | None = None, +): + assert transaction.completed_at is not None # nosec + assert transaction.initiated_at < transaction.completed_at # nosec + + if transaction.state == PaymentTransactionState.SUCCESS: + with log_context( + _logger, + logging.INFO, + "%s: Top-up %s credits for %s", + RUT, + f"{transaction.osparc_credits}", + f"{transaction.payment_id=}", + ): + assert transaction.state == PaymentTransactionState.SUCCESS # nosec + credit_transaction_id = await rut_api.create_credit_transaction( + product_name=transaction.product_name, + wallet_id=transaction.wallet_id, + wallet_name=f"id={transaction.wallet_id}", + user_id=transaction.user_id, + user_email=transaction.user_email, + osparc_credits=transaction.osparc_credits, + payment_transaction_id=transaction.payment_id, + created_at=transaction.completed_at, + ) + + _logger.debug( + "%s: Response to %s was %s", + RUT, + f"{transaction.payment_id=}", + f"{credit_transaction_id=}", + ) + + await notifier.notify_payment_completed( + user_id=transaction.user_id, + payment=transaction, + exclude=exclude, + ) + + +async def pay_with_payment_method( # noqa: PLR0913 + gateway: PaymentsGatewayApi, + rut: ResourceUsageTrackerApi, + repo_transactions: PaymentsTransactionsRepo, + repo_methods: PaymentsMethodsRepo, + notifier: NotifierService, + *, + payment_method_id: PaymentMethodID, + amount_dollars: Decimal, + target_credits: Decimal, + product_name: str, + wallet_id: WalletID, + wallet_name: str, + user_id: UserID, + user_name: str, + user_email: EmailStr, + user_address: UserInvoiceAddress, + stripe_price_id: StripePriceID, + stripe_tax_rate_id: StripeTaxRateID, + comment: str | None = None, +) -> PaymentTransaction: + initiated_at = arrow.utcnow().datetime + + acked = await repo_methods.get_payment_method( + payment_method_id, user_id=user_id, wallet_id=wallet_id + ) + + ack: AckPaymentWithPaymentMethod = await gateway.pay_with_payment_method( + acked.payment_method_id, + payment=InitPayment( + amount_dollars=AmountDecimal(amount_dollars), + credits=AmountDecimal(target_credits), + user_name=IDStr(user_name), + user_email=user_email, + user_address=user_address, + wallet_name=IDStr(wallet_name), + stripe_price_id=stripe_price_id, + stripe_tax_rate_id=stripe_tax_rate_id, + stripe_tax_exempt_value=( + StripeTaxExempt.none + if user_address.country in COUNTRIES_WITH_VAT + else StripeTaxExempt.reverse + ), + ), + ) + + payment_id = ack.payment_id + + async for attempt in AsyncRetrying( + stop=stop_after_attempt(3), + retry=retry_if_exception_type(PaymentAlreadyExistsError), + reraise=True, + ): + with attempt: + payment_id = await repo_transactions.insert_init_payment_transaction( + ack.payment_id or PaymentID(f"{uuid.uuid4()}"), + price_dollars=amount_dollars, + osparc_credits=target_credits, + product_name=product_name, + user_id=user_id, + user_email=user_email, + wallet_id=wallet_id, + comment=comment, + initiated_at=initiated_at, + ) + + assert payment_id is not None # nosec + + transaction = await repo_transactions.update_ack_payment_transaction( + payment_id=payment_id, + completion_state=( + PaymentTransactionState.SUCCESS + if ack.success + else PaymentTransactionState.FAILED + ), + state_message=ack.message, + invoice_url=ack.invoice_url, + stripe_invoice_id=ack.stripe_invoice_id, + invoice_pdf_url=ack.invoice_pdf, + ) + + # NOTE: notifications here are done as background-task after responding `POST /wallets/{wallet_id}/payments-methods/{payment_method_id}:pay` + await on_payment_completed( + transaction, + rut, + notifier=notifier, + exclude={WebSocketProvider.get_name()}, + ) + + return to_payments_api_model(transaction) + + +async def get_payments_page( + repo: PaymentsTransactionsRepo, + *, + user_id: UserID, + product_name: ProductName, + limit: PositiveInt | None = None, + offset: PositiveInt | None = None, +) -> tuple[int, list[PaymentTransaction]]: + """All payments associated to a user (i.e. including all the owned wallets)""" + + total_number_of_items, page = await repo.list_user_payment_transactions( + user_id=user_id, product_name=product_name, offset=offset, limit=limit + ) + + return total_number_of_items, [to_payments_api_model(t) for t in page] + + +async def get_payment_invoice_url( + repo: PaymentsTransactionsRepo, + stripe_api: StripeApi, + *, + user_id: UserID, + wallet_id: WalletID, + payment_id: PaymentID, +) -> HttpUrl: + """Get invoice data from Stripe. As invoice url expires after some time, Stripe always generates a new + invoice url with 10 day validity.""" + + payment: PaymentsTransactionsDB | None = await repo.get_payment_transaction( + payment_id=payment_id, user_id=user_id, wallet_id=wallet_id + ) + if payment is None or payment.invoice_url is None: + raise PaymentNotFoundError(payment_id=payment_id) + + # NOTE: A new method for generating invoices directly from Stripe has been introduced (https://github.com/ITISFoundation/osparc-simcore/pull/5537). + # In order to maintain backward compatibility with older invoices, the old invoice URL is provided in such cases. + if payment.stripe_invoice_id: + invoice_data: InvoiceData = await stripe_api.get_invoice( + payment.stripe_invoice_id + ) + return invoice_data.hosted_invoice_url + return payment.invoice_url diff --git a/services/payments/src/simcore_service_payments/services/payments_gateway.py b/services/payments/src/simcore_service_payments/services/payments_gateway.py new file mode 100644 index 00000000000..812ab087074 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/payments_gateway.py @@ -0,0 +1,221 @@ +""" Interface to communicate with the payment's gateway + +- httpx client with base_url to PAYMENTS_GATEWAY_URL +- Fake gateway service in services/payments/scripts/example_payment_gateway.py + +""" + +import contextlib +import functools +import logging +from collections.abc import Callable +from contextlib import suppress + +import httpx +from common_library.errors_classes import OsparcErrorMixin +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from httpx import URL, HTTPStatusError +from models_library.api_schemas_webserver.wallets import PaymentID, PaymentMethodID +from pydantic import TypeAdapter, ValidationError +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import ( + AttachLifespanMixin, + BaseHTTPApi, + HealthMixinMixin, +) +from servicelib.fastapi.httpx_utils import to_curl_command +from servicelib.fastapi.tracing import setup_httpx_client_tracing + +from ..core.settings import ApplicationSettings +from ..models.payments_gateway import ( + BatchGetPaymentMethods, + ErrorModel, + GetPaymentMethod, + InitPayment, + InitPaymentMethod, + PaymentCancelled, + PaymentInitiated, + PaymentMethodInitiated, + PaymentMethodsBatch, +) +from ..models.schemas.acknowledgements import AckPaymentWithPaymentMethod + +_logger = logging.getLogger(__name__) + + +def _parse_raw_as_or_none(cls: type, text: str | None): + if text: + with suppress(ValidationError): + return TypeAdapter(cls).validate_python(text) + return None + + +class PaymentsGatewayError(OsparcErrorMixin, ValueError): + msg_template = "{operation_id} error {status_code}: {reason}" + + @classmethod + def from_http_status_error( + cls, err: HTTPStatusError, operation_id: str + ) -> "PaymentsGatewayError": + return cls( + operation_id=f"PaymentsGatewayApi.{operation_id}", + reason=f"{err}", + status_code=err.response.status_code, + # extra context for details + http_status_error=err, + model=_parse_raw_as_or_none(ErrorModel, err.response.text), + ) + + def get_detailed_message(self) -> str: + err_json = "null" + if model := getattr(self, "model", None): + err_json = model.model_dump_json(indent=1) + + curl_cmd = "null" + if http_status_error := getattr(self, "http_status_error", None): + curl_cmd = to_curl_command(http_status_error.request) + + return f"{self}\nREQ: '{curl_cmd}'\nRESP: {err_json}" + + +@contextlib.contextmanager +def _raise_as_payments_gateway_error(operation_id: str): + try: + yield + + except HTTPStatusError as err: + error = PaymentsGatewayError.from_http_status_error( + err, operation_id=operation_id + ) + _logger.warning(error.get_detailed_message()) + raise error from err + + +def _handle_status_errors(coro: Callable): + @functools.wraps(coro) + async def _wrapper(self, *args, **kwargs): + with _raise_as_payments_gateway_error(operation_id=coro.__name__): + return await coro(self, *args, **kwargs) + + return _wrapper + + +class _GatewayApiAuth(httpx.Auth): + def __init__(self, secret): + self.token = secret + + def auth_flow(self, request): + request.headers["X-Init-Api-Secret"] = self.token + yield request + + +class PaymentsGatewayApi( + BaseHTTPApi, AttachLifespanMixin, HealthMixinMixin, SingletonInAppStateMixin +): + app_state_name: str = "payment_gateway_api" + + # + # api: one-time-payment workflow + # + + @_handle_status_errors + async def init_payment(self, payment: InitPayment) -> PaymentInitiated: + response = await self.client.post( + "/init", + json=jsonable_encoder(payment.model_dump(exclude_none=True, by_alias=True)), + ) + response.raise_for_status() + return PaymentInitiated.model_validate(response.json()) + + def get_form_payment_url(self, id_: PaymentID) -> URL: + return self.client.base_url.copy_with(path="/pay", params={"id": f"{id_}"}) + + @_handle_status_errors + async def cancel_payment( + self, payment_initiated: PaymentInitiated + ) -> PaymentCancelled: + response = await self.client.post( + "/cancel", + json=jsonable_encoder(payment_initiated), + ) + response.raise_for_status() + return PaymentCancelled.model_validate(response.json()) + + # + # api: payment method workflows + # + + @_handle_status_errors + async def init_payment_method( + self, + payment_method: InitPaymentMethod, + ) -> PaymentMethodInitiated: + response = await self.client.post( + "/payment-methods:init", + json=jsonable_encoder(payment_method), + ) + response.raise_for_status() + return PaymentMethodInitiated.model_validate(response.json()) + + def get_form_payment_method_url(self, id_: PaymentMethodID) -> URL: + return self.client.base_url.copy_with( + path="/payment-methods/form", params={"id": f"{id_}"} + ) + + # CRUD + + @_handle_status_errors + async def get_many_payment_methods( + self, ids_: list[PaymentMethodID] + ) -> list[GetPaymentMethod]: + if not ids_: + return [] + response = await self.client.post( + "/payment-methods:batchGet", + json=jsonable_encoder(BatchGetPaymentMethods(payment_methods_ids=ids_)), + ) + response.raise_for_status() + return PaymentMethodsBatch.model_validate(response.json()).items + + @_handle_status_errors + async def get_payment_method(self, id_: PaymentMethodID) -> GetPaymentMethod: + response = await self.client.get(f"/payment-methods/{id_}") + response.raise_for_status() + return GetPaymentMethod.model_validate(response.json()) + + @_handle_status_errors + async def delete_payment_method(self, id_: PaymentMethodID) -> None: + response = await self.client.delete(f"/payment-methods/{id_}") + response.raise_for_status() + + @_handle_status_errors + async def pay_with_payment_method( + self, + id_: PaymentMethodID, + payment: InitPayment, + ) -> AckPaymentWithPaymentMethod: + response = await self.client.post( + f"/payment-methods/{id_}:pay", + json=jsonable_encoder(payment.model_dump(exclude_none=True, by_alias=True)), + ) + response.raise_for_status() + return AckPaymentWithPaymentMethod.model_validate(response.json()) + + +def setup_payments_gateway(app: FastAPI): + assert app.state # nosec + settings: ApplicationSettings = app.state.settings + + # create + api = PaymentsGatewayApi.from_client_kwargs( + base_url=f"{settings.PAYMENTS_GATEWAY_URL}", + headers={"accept": "application/json"}, + auth=_GatewayApiAuth( + secret=settings.PAYMENTS_GATEWAY_API_SECRET.get_secret_value() + ), + ) + if settings.PAYMENTS_TRACING: + setup_httpx_client_tracing(api.client) + api.attach_lifespan_to(app) + api.set_to_app_state(app) diff --git a/services/payments/src/simcore_service_payments/services/payments_methods.py b/services/payments/src/simcore_service_payments/services/payments_methods.py new file mode 100644 index 00000000000..f20189d7859 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/payments_methods.py @@ -0,0 +1,215 @@ +""" This service just keeps a reference of the payment method `payment_method_id` +and all the details are stored externaly. + +The creation of this resource can use any of these two workflows: +1. init-prompt-ack workflow + - init_creation_of_payment_method + - cancel_creation_of_payment_method (optional) + - acknowledge_creation_of_payment_method +2. direct creation + - create_payment_method + +This resource can also be read or deleted using +- list_payments_methods +- get_payment_method +- delete_payment_method + +""" +import logging +from typing import cast + +import arrow +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodGet, + PaymentMethodID, + PaymentMethodInitiated, +) +from models_library.basic_types import IDStr +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, HttpUrl +from simcore_postgres_database.models.payments_methods import InitPromptAckFlowState + +from ..db.payments_methods_repo import PaymentsMethodsRepo +from ..models.db import PaymentsMethodsDB +from ..models.db_to_api import to_payment_method_api_model +from ..models.payments_gateway import GetPaymentMethod, InitPaymentMethod +from ..models.schemas.acknowledgements import AckPaymentMethod +from ..models.utils import merge_models +from .notifier import NotifierService +from .payments_gateway import PaymentsGatewayApi + +_logger = logging.getLogger(__name__) + + +async def init_creation_of_payment_method( + gateway: PaymentsGatewayApi, + repo: PaymentsMethodsRepo, + *, + wallet_id: WalletID, + wallet_name: IDStr, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, +) -> PaymentMethodInitiated: + initiated_at = arrow.utcnow().datetime + + init = await gateway.init_payment_method( + InitPaymentMethod( + user_name=user_name, + user_email=user_email, + wallet_name=wallet_name, + ) + ) + form_link = gateway.get_form_payment_method_url(init.payment_method_id) + + payment_method_id = await repo.insert_init_payment_method( + init.payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + initiated_at=initiated_at, + ) + assert payment_method_id == init.payment_method_id # nosec + + return PaymentMethodInitiated( + wallet_id=wallet_id, + payment_method_id=payment_method_id, + payment_method_form_url=cast(HttpUrl, f"{form_link}"), + ) + + +async def cancel_creation_of_payment_method( + gateway: PaymentsGatewayApi, + repo: PaymentsMethodsRepo, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +): + # Prevents card from being used + await repo.update_ack_payment_method( + payment_method_id, + completion_state=InitPromptAckFlowState.CANCELED, + state_message="User cancelled", + ) + + # gateway delete + await gateway.delete_payment_method(payment_method_id) + + # delete payment-method in db + await repo.delete_payment_method( + payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + +async def acknowledge_creation_of_payment_method( + repo: PaymentsMethodsRepo, + *, + payment_method_id: PaymentMethodID, + ack: AckPaymentMethod, +) -> PaymentsMethodsDB: + return await repo.update_ack_payment_method( + payment_method_id=payment_method_id, + completion_state=( + InitPromptAckFlowState.SUCCESS + if ack.success + else InitPromptAckFlowState.FAILED + ), + state_message=ack.message, + ) + + +async def on_payment_method_completed( + payment_method: PaymentsMethodsDB, notifier: NotifierService +): + assert payment_method.completed_at is not None # nosec + assert payment_method.initiated_at < payment_method.completed_at # nosec + + if payment_method.state == InitPromptAckFlowState.SUCCESS: + await notifier.notify_payment_method_acked( + user_id=payment_method.user_id, + payment_method=to_payment_method_api_model(payment_method), + ) + + +async def insert_payment_method( + repo: PaymentsMethodsRepo, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, + ack: AckPaymentMethod, +) -> PaymentsMethodsDB: + """Direct creation of payment-method. + NOTE: that this does NOT communicates with the gateway. + Used e.g. when gateway saved payment method after one-time payment + """ + return await repo.insert_payment_method( + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + completion_state=InitPromptAckFlowState.SUCCESS + if ack.success + else InitPromptAckFlowState.FAILED, + state_message=ack.message, + ) + + +async def list_payment_methods( + gateway: PaymentsGatewayApi, + repo: PaymentsMethodsRepo, + *, + user_id: UserID, + wallet_id: WalletID, +) -> list[PaymentMethodGet]: + acked_many = await repo.list_user_payment_methods( + user_id=user_id, wallet_id=wallet_id + ) + assert not any(acked.completed_at is None for acked in acked_many) # nosec + + got_many: list[GetPaymentMethod] = await gateway.get_many_payment_methods( + [acked.payment_method_id for acked in acked_many] + ) + + return [ + merge_models(got, acked) + for acked, got in zip(acked_many, got_many, strict=True) + ] + + +async def get_payment_method( + gateway: PaymentsGatewayApi, + repo: PaymentsMethodsRepo, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +) -> PaymentMethodGet: + acked = await repo.get_payment_method( + payment_method_id, user_id=user_id, wallet_id=wallet_id + ) + assert acked.state == InitPromptAckFlowState.SUCCESS # nosec + + got: GetPaymentMethod = await gateway.get_payment_method(acked.payment_method_id) + return merge_models(got, acked) + + +async def delete_payment_method( + gateway: PaymentsGatewayApi, + repo: PaymentsMethodsRepo, + *, + payment_method_id: PaymentMethodID, + user_id: UserID, + wallet_id: WalletID, +): + acked = await repo.get_payment_method( + payment_method_id, user_id=user_id, wallet_id=wallet_id + ) + + await gateway.delete_payment_method(acked.payment_method_id) + + await repo.delete_payment_method( + acked.payment_method_id, user_id=acked.user_id, wallet_id=acked.wallet_id + ) diff --git a/services/payments/src/simcore_service_payments/services/postgres.py b/services/payments/src/simcore_service_payments/services/postgres.py new file mode 100644 index 00000000000..fd84fba45ce --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/postgres.py @@ -0,0 +1,28 @@ +from fastapi import FastAPI +from servicelib.fastapi.db_asyncpg_engine import close_db_connection, connect_to_db +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..core.settings import ApplicationSettings + + +def get_engine(app: FastAPI) -> AsyncEngine: + assert app.state.engine # nosec + engine: AsyncEngine = app.state.engine + return engine + + +def setup_postgres(app: FastAPI): + app.state.engine = None + + async def _on_startup() -> None: + settings: ApplicationSettings = app.state.settings + await connect_to_db(app, settings.PAYMENTS_POSTGRES) + assert app.state.engine # nosec + assert isinstance(app.state.engine, AsyncEngine) # nosec + + async def _on_shutdown() -> None: + assert app.state.engine # nosec + await close_db_connection(app) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/payments/src/simcore_service_payments/services/rabbitmq.py b/services/payments/src/simcore_service_payments/services/rabbitmq.py new file mode 100644 index 00000000000..70d1fdc5c92 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/rabbitmq.py @@ -0,0 +1,75 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from fastapi.requests import Request +from models_library.rabbitmq_messages import RabbitMessageBase +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) +from settings_library.rabbit import RabbitSettings + +_logger = logging.getLogger(__name__) + + +def get_rabbitmq_settings(app: FastAPI) -> RabbitSettings: + settings: RabbitSettings = app.state.settings.PAYMENTS_RABBITMQ + return settings + + +def setup_rabbitmq(app: FastAPI) -> None: + settings: RabbitSettings = get_rabbitmq_settings(app) + app.state.rabbitmq_client = None + app.state.rabbitmq_rpc_server = None + + async def _on_startup() -> None: + await wait_till_rabbitmq_responsive(settings.dsn) + + app.state.rabbitmq_client = RabbitMQClient( + client_name="payments", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="payments_rpc_server", settings=settings + ) + app.state.rabbitmq_rpc_client = await RabbitMQRPCClient.create( + client_name="payments_rpc_client", settings=settings + ) + + async def _on_shutdown() -> None: + if app.state.rabbitmq_client: + await app.state.rabbitmq_client.close() + app.state.rabbitmq_client = None + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + app.state.rabbitmq_rpc_server = None + if app.state.rabbitmq_rpc_client: + await app.state.rabbitmq_rpc_client.close() + app.state.rabbitmq_rpc_client = None + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + assert app.state.rabbitmq_client # nosec + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_client_from_request(request: Request): + return get_rabbitmq_client(request.app) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + +def get_rabbitmq_rpc_client(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_client # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_client) + + +async def post_message(app: FastAPI, message: RabbitMessageBase) -> None: + await get_rabbitmq_client(app).publish(message.channel_name, message) diff --git a/services/payments/src/simcore_service_payments/services/resource_usage_tracker.py b/services/payments/src/simcore_service_payments/services/resource_usage_tracker.py new file mode 100644 index 00000000000..6ae5d424fdf --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/resource_usage_tracker.py @@ -0,0 +1,80 @@ +""" Interface to communicate with the Resource Usage Tracker (RUT) + +- httpx client with base_url to PAYMENTS_RESOURCE_USAGE_TRACKER + +""" + + +import logging +from datetime import datetime +from decimal import Decimal + +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreateBody, + CreditTransactionCreated, +) +from models_library.products import ProductName +from models_library.resource_tracker import CreditTransactionId +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import ( + AttachLifespanMixin, + BaseHTTPApi, + HealthMixinMixin, +) +from servicelib.fastapi.tracing import setup_httpx_client_tracing + +from ..core.settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +class ResourceUsageTrackerApi( + BaseHTTPApi, AttachLifespanMixin, HealthMixinMixin, SingletonInAppStateMixin +): + app_state_name: str = "source_usage_tracker_api" + + async def create_credit_transaction( + self, + product_name: ProductName, + wallet_id: WalletID, + wallet_name: str, + user_id: UserID, + user_email: str, + osparc_credits: Decimal, + payment_transaction_id: str, + created_at: datetime, + ) -> CreditTransactionId: + """Adds credits to wallet""" + response = await self.client.post( + "/v1/credit-transactions", + json=jsonable_encoder( + CreditTransactionCreateBody( + product_name=product_name, + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_email=user_email, + osparc_credits=osparc_credits, + payment_transaction_id=payment_transaction_id, + created_at=created_at, + ) + ), + ) + credit_transaction = CreditTransactionCreated.model_validate_json(response.text) + return credit_transaction.credit_transaction_id + + +def setup_resource_usage_tracker(app: FastAPI): + assert app.state # nosec + settings: ApplicationSettings = app.state.settings + api = ResourceUsageTrackerApi.from_client_kwargs( + base_url=settings.PAYMENTS_RESOURCE_USAGE_TRACKER.base_url, + ) + if settings.PAYMENTS_TRACING: + setup_httpx_client_tracing(api.client) + api.set_to_app_state(app) + api.attach_lifespan_to(app) diff --git a/services/payments/src/simcore_service_payments/services/socketio.py b/services/payments/src/simcore_service_payments/services/socketio.py new file mode 100644 index 00000000000..bfa4d98c5d6 --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/socketio.py @@ -0,0 +1,33 @@ +import logging + +import socketio # type: ignore[import-untyped] +from fastapi import FastAPI +from servicelib.socketio_utils import cleanup_socketio_async_pubsub_manager +from settings_library.rabbit import RabbitSettings + +from .rabbitmq import get_rabbitmq_settings + +_logger = logging.getLogger(__name__) + + +def setup_socketio(app: FastAPI): + settings: RabbitSettings = get_rabbitmq_settings(app) + + async def _on_startup() -> None: + assert app.state.rabbitmq_client # nosec + + # Connect to the as an external process in write-only mode + # SEE https://python-socketio.readthedocs.io/en/stable/server.html#emitting-from-external-processes + app.state.external_socketio = socketio.AsyncAioPikaManager( + url=settings.dsn, logger=_logger, write_only=True + ) + + async def _on_shutdown() -> None: + + if app.state.external_socketio: + await cleanup_socketio_async_pubsub_manager( + server_manager=app.state.external_socketio + ) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) diff --git a/services/payments/src/simcore_service_payments/services/stripe.py b/services/payments/src/simcore_service_payments/services/stripe.py new file mode 100644 index 00000000000..349de908d7e --- /dev/null +++ b/services/payments/src/simcore_service_payments/services/stripe.py @@ -0,0 +1,99 @@ +""" Interface to communicate with the Resource Usage Tracker (RUT) + +- httpx client with base_url to PAYMENTS_RESOURCE_USAGE_TRACKER + +""" + +import contextlib +import functools +import logging +from collections.abc import Callable + +import httpx +from fastapi import FastAPI +from httpx import HTTPStatusError +from models_library.payments import StripeInvoiceID +from servicelib.fastapi.app_state import SingletonInAppStateMixin +from servicelib.fastapi.http_client import ( + AttachLifespanMixin, + BaseHTTPApi, + HealthMixinMixin, +) +from servicelib.fastapi.tracing import setup_httpx_client_tracing + +from ..core.errors import StripeRuntimeError +from ..core.settings import ApplicationSettings +from ..models.stripe import InvoiceData + +_logger = logging.getLogger(__name__) + + +@contextlib.contextmanager +def _raise_as_stripe_error(): + """https://docs.stripe.com/api/errors""" + try: + yield + + except HTTPStatusError as err: + raise StripeRuntimeError from err + + +def _handle_status_errors(coro: Callable): + @functools.wraps(coro) + async def _wrapper(self, *args, **kwargs): + with _raise_as_stripe_error(): + return await coro(self, *args, **kwargs) + + return _wrapper + + +class _StripeBearerAuth(httpx.Auth): + def __init__(self, token): + self._token = token + + def auth_flow(self, request): + request.headers["Authorization"] = f"Bearer {self._token}" + yield request + + +class StripeApi( + BaseHTTPApi, AttachLifespanMixin, HealthMixinMixin, SingletonInAppStateMixin +): + """https://docs.stripe.com/api""" + + app_state_name: str = "stripe_api" + + async def is_healthy( + self, + ) -> bool: + try: + response = await self.client.get("/v1/products") + response.raise_for_status() + return True + except httpx.HTTPError: + return False + + @_handle_status_errors + async def get_invoice( + self, + stripe_invoice_id: StripeInvoiceID, + ) -> InvoiceData: + + response = await self.client.get(f"/v1/invoices/{stripe_invoice_id}") + response.raise_for_status() + + return InvoiceData.model_validate_json(response.text) + + +def setup_stripe(app: FastAPI): + assert app.state # nosec + settings: ApplicationSettings = app.state.settings + api = StripeApi.from_client_kwargs( + base_url=f"{settings.PAYMENTS_STRIPE_URL}", + auth=_StripeBearerAuth(settings.PAYMENTS_STRIPE_API_SECRET.get_secret_value()), + ) + if settings.PAYMENTS_TRACING: + setup_httpx_client_tracing(api.client) + + api.set_to_app_state(app) + api.attach_lifespan_to(app) diff --git a/services/payments/tests/conftest.py b/services/payments/tests/conftest.py new file mode 100644 index 00000000000..39608fe4e70 --- /dev/null +++ b/services/payments/tests/conftest.py @@ -0,0 +1,93 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from pathlib import Path + +import pytest +import simcore_service_payments +from faker import Faker +from models_library.groups import GroupID +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.utils_secrets import generate_token_secret_key + +pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_payments_data", + "pytest_simcore.faker_products_data", + "pytest_simcore.faker_users_data", + "pytest_simcore.httpbin_service", + "pytest_simcore.postgres_service", + "pytest_simcore.socketio", + "pytest_simcore.rabbit_service", + "pytest_simcore.repository_paths", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "payments" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_payments")) + return service_folder + + +@pytest.fixture(scope="session") +def installed_package_dir() -> Path: + dirpath = Path(simcore_service_payments.__file__).resolve().parent + assert dirpath.exists() + return dirpath + + +@pytest.fixture +def secret_key() -> str: + return generate_token_secret_key(32) + + +@pytest.fixture(scope="session") +def external_envfile_dict(external_envfile_dict: EnvVarsDict) -> EnvVarsDict: + if external_envfile_dict: + assert "PAYMENTS_GATEWAY_API_SECRET" in external_envfile_dict + assert "PAYMENTS_GATEWAY_URL" in external_envfile_dict + return external_envfile_dict + + +@pytest.fixture(scope="session") +def env_devel_dict( + env_devel_dict: EnvVarsDict, external_envfile_dict: EnvVarsDict +) -> EnvVarsDict: + if external_envfile_dict: + return external_envfile_dict + return env_devel_dict + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + docker_compose_service_environment_dict: EnvVarsDict, + secret_key: str, + faker: Faker, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_environment_dict, + "PAYMENTS_ACCESS_TOKEN_SECRET_KEY": secret_key, + "PAYMENTS_USERNAME": faker.user_name(), + "PAYMENTS_PASSWORD": faker.password(), + "PAYMENTS_TRACING": "null", + }, + ) + + +@pytest.fixture +def user_primary_group_id(faker: Faker) -> GroupID: + return TypeAdapter(GroupID).validate_python(faker.pyint()) diff --git a/services/payments/tests/unit/api/conftest.py b/services/payments/tests/unit/api/conftest.py new file mode 100644 index 00000000000..2df16d387e2 --- /dev/null +++ b/services/payments/tests/unit/api/conftest.py @@ -0,0 +1,52 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator + +import httpx +import pytest +from fastapi import FastAPI, status +from httpx._transports.asgi import ASGITransport +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.models.schemas.auth import Token + + +@pytest.fixture +async def client(app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: + # - Needed for app to trigger start/stop event handlers + # - Prefer this client instead of fastapi.testclient.TestClient + async with httpx.AsyncClient( + transport=ASGITransport(app=app), + base_url="http://payments.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + assert isinstance(client._transport, ASGITransport) + yield client + + +@pytest.fixture +async def auth_headers(client: httpx.AsyncClient, app: FastAPI) -> dict[str, str]: + # get access token + settings: ApplicationSettings = app.state.settings + assert settings + + form_data = { + "username": settings.PAYMENTS_USERNAME, + "password": settings.PAYMENTS_PASSWORD.get_secret_value(), + } + + response = await client.post( + "/v1/token", + data=form_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + assert response.status_code == status.HTTP_200_OK, response.text + + token = Token(**response.json()) + assert token.token_type == "bearer" # noqa: S105 + + return {"Authorization": f"Bearer {token.access_token}"} diff --git a/services/payments/tests/unit/api/test__oas_spec.py b/services/payments/tests/unit/api/test__oas_spec.py new file mode 100644 index 00000000000..adf84fa8d4f --- /dev/null +++ b/services/payments/tests/unit/api/test__oas_spec.py @@ -0,0 +1,25 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import json +from pathlib import Path + +from fastapi import FastAPI + + +def test_openapi_json_is_in_sync_with_app_oas( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + app: FastAPI, + project_slug_dir: Path, +): + """ + If this test fails, just 'make openapi.json' + """ + spec_from_app = app.openapi() + open_api_json_file = project_slug_dir / "openapi.json" + stored_openapi_json_file = json.loads(open_api_json_file.read_text()) + assert ( + spec_from_app == stored_openapi_json_file + ), "rerun `make openapi.json` and check differences" diff --git a/services/payments/tests/unit/api/test__one_time_payment_workflows.py b/services/payments/tests/unit/api/test__one_time_payment_workflows.py new file mode 100644 index 00000000000..126116a5dc2 --- /dev/null +++ b/services/payments/tests/unit/api/test__one_time_payment_workflows.py @@ -0,0 +1,138 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_webserver.wallets import WalletPaymentInitiated +from models_library.basic_types import IDStr +from models_library.payments import UserInvoiceAddress +from models_library.products import StripePriceID, StripeTaxRateID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S +from simcore_service_payments.api.rpc.routes import PAYMENTS_RPC_NAMESPACE +from simcore_service_payments.models.schemas.acknowledgements import AckPayment + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.mark.acceptance_test( + "https://github.com/ITISFoundation/osparc-simcore/pull/4715" +) +async def test_successful_one_time_payment_workflow( + is_pdb_enabled: bool, + app: FastAPI, + client: httpx.AsyncClient, + faker: Faker, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + wallet_id: WalletID, + wallet_name: IDStr, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + product_price_stripe_price_id: StripePriceID, + product_price_stripe_tax_rate_id: StripeTaxRateID, + auth_headers: dict[str, str], + payments_clean_db: None, + mocker: MockerFixture, +): + if mock_payments_gateway_service_or_none is None: + pytest.skip("cannot run thist test against external because we ACK here") + + mock_on_payment_completed = mocker.patch( + "simcore_service_payments.api.rest._acknowledgements.payments.on_payment_completed", + autospec=True, + ) + + # ACK via api/rest + inited = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_payment"), + amount_dollars=1000, + target_credits=10000, + product_name="osparc", + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + user_address=UserInvoiceAddress(country="CH"), + stripe_price_id=product_price_stripe_price_id, + stripe_tax_rate_id=product_price_stripe_tax_rate_id, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert isinstance(inited, WalletPaymentInitiated) + assert mock_payments_gateway_service_or_none.routes["init_payment"].called + + # ACK + response = await client.post( + f"/v1/payments/{inited.payment_id}:ack", + json=jsonable_encoder( + AckPayment(success=True, invoice_url=faker.url()).model_dump() + ), + headers=auth_headers, + ) + + assert response.status_code == status.HTTP_200_OK + assert mock_on_payment_completed.called + + # LIST payments via api/rest + got = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_payments_page"), + user_id=user_id, + product_name="osparc", + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + total_number_of_items, transactions = got + assert total_number_of_items == 1 + assert len(transactions) == 1 + + assert transactions[0].state == "SUCCESS" + assert transactions[0].payment_id == inited.payment_id diff --git a/services/payments/tests/unit/api/test__payment_method_workflows.py b/services/payments/tests/unit/api/test__payment_method_workflows.py new file mode 100644 index 00000000000..697ddfd08f3 --- /dev/null +++ b/services/payments/tests/unit/api/test__payment_method_workflows.py @@ -0,0 +1,127 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodGet, + PaymentMethodInitiated, +) +from models_library.basic_types import IDStr +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S +from simcore_service_payments.api.rpc.routes import PAYMENTS_RPC_NAMESPACE +from simcore_service_payments.models.schemas.acknowledgements import AckPayment + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.mark.acceptance_test( + "https://github.com/ITISFoundation/osparc-simcore/pull/4715" +) +async def test_successful_create_payment_method_workflow( + is_pdb_enabled: bool, + app: FastAPI, + client: httpx.AsyncClient, + faker: Faker, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + wallet_id: WalletID, + wallet_name: IDStr, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + auth_headers: dict[str, str], + payments_clean_db: None, + mocker: MockerFixture, +): + if mock_payments_gateway_service_or_none is None: + pytest.skip("cannot run thist test against external because we ACK here") + + mock_on_payment_method_completed = mocker.patch( + "simcore_service_payments.api.rest._acknowledgements.payments_methods.on_payment_method_completed", + autospec=True, + ) + + # INIT via api/rpc + inited = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_creation_of_payment_method"), + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert isinstance(inited, PaymentMethodInitiated) + assert mock_payments_gateway_service_or_none.routes["init_payment_method"].called + + # ACK via api/rest + response = await client.post( + f"/v1/payments-methods/{inited.payment_method_id}:ack", + json=jsonable_encoder( + AckPayment(success=True, invoice_url=faker.url()).model_dump() + ), + headers=auth_headers, + ) + + assert response.status_code == status.HTTP_200_OK + assert mock_on_payment_method_completed.called + + # GET via api/rpc + got = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_payment_method"), + payment_method_id=inited.payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ) + + assert isinstance(got, PaymentMethodGet) + assert got.idr == inited.payment_method_id diff --git a/services/payments/tests/unit/api/test_rest_acknowledgements.py b/services/payments/tests/unit/api/test_rest_acknowledgements.py new file mode 100644 index 00000000000..9c1e226c964 --- /dev/null +++ b/services/payments/tests/unit/api/test_rest_acknowledgements.py @@ -0,0 +1,148 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_payments.errors import ( + PaymentMethodNotFoundError, + PaymentNotFoundError, +) +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_payments.models.schemas.acknowledgements import ( + AckPayment, + AckPaymentMethod, +) +from simcore_service_payments.models.schemas.errors import DefaultApiError + +pytest_simcore_core_services_selection = [ + "postgres", +] + +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, + external_envfile_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + monkeypatch: pytest.MonkeyPatch, +) -> EnvVarsDict: + # set environs + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + **external_envfile_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.fixture +def app( + app: FastAPI, + mocker: MockerFixture, +) -> FastAPI: + app.state.notifier = mocker.MagicMock() + return app + + +@pytest.fixture +async def client( + client: httpx.AsyncClient, external_envfile_dict: EnvVarsDict +) -> AsyncIterator[httpx.AsyncClient]: + # EITHER tests against external payments API + if external_base_url := external_envfile_dict.get("PAYMENTS_SERVICE_API_BASE_URL"): + # If there are external secrets, build a new client and point to `external_base_url` + print( + "🚨 EXTERNAL: tests running against external payment API at", + external_base_url, + ) + async with httpx.AsyncClient( + base_url=external_base_url, + headers={"Content-Type": "application/json"}, + ) as new_client: + yield new_client + # OR tests against app + else: + yield client + + +async def test_payments_api_authentication( + with_disabled_rabbitmq_and_rpc: None, + client: httpx.AsyncClient, + faker: Faker, + auth_headers: dict[str, str], +): + payments_id = faker.uuid4() + payment_ack = jsonable_encoder( + AckPayment(success=True, invoice_url=faker.url()).model_dump() + ) + + # w/o header + response = await client.post( + f"/v1/payments/{payments_id}:ack", + json=payment_ack, + ) + assert response.status_code == status.HTTP_401_UNAUTHORIZED, response.json() + + # w/ header + response = await client.post( + f"/v1/payments/{payments_id}:ack", json=payment_ack, headers=auth_headers + ) + + assert response.status_code == status.HTTP_404_NOT_FOUND, response.json() + error = DefaultApiError.model_validate(response.json()) + assert PaymentNotFoundError.msg_template.format(payment_id=payments_id) == str( + error.detail + ) + + +async def test_payments_methods_api_authentication( + with_disabled_rabbitmq_and_rpc: None, + client: httpx.AsyncClient, + faker: Faker, + auth_headers: dict[str, str], +): + payment_method_id = faker.uuid4() + payment_method_ack = AckPaymentMethod( + success=True, message=faker.word() + ).model_dump() + + # w/o header + response = await client.post( + f"/v1/payments-methods/{payment_method_id}:ack", + json=payment_method_ack, + ) + assert response.status_code == status.HTTP_401_UNAUTHORIZED, response.json() + + # same but w/ header + response = await client.post( + response.request.url.path, + content=response.request.content, + headers=auth_headers, + ) + + assert response.status_code == status.HTTP_404_NOT_FOUND, response.json() + error = DefaultApiError.model_validate(response.json()) + assert PaymentMethodNotFoundError.msg_template.format( + payment_method_id=payment_method_id + ) == str(error.detail) diff --git a/services/payments/tests/unit/api/test_rest_auth.py b/services/payments/tests/unit/api/test_rest_auth.py new file mode 100644 index 00000000000..2139f99d233 --- /dev/null +++ b/services/payments/tests/unit/api/test_rest_auth.py @@ -0,0 +1,69 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from pydantic import HttpUrl +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.models.schemas.auth import Token + + +async def test_bearer_token(httpbin_base_url: HttpUrl, faker: Faker): + bearer_token = faker.word() + headers = {"Authorization": f"Bearer {bearer_token}"} + + async with httpx.AsyncClient( + base_url=f"{httpbin_base_url}", headers=headers + ) as client: + response = await client.get("/bearer") + assert response.json() == {"authenticated": True, "token": bearer_token} + + +@pytest.mark.parametrize("valid_credentials", [True, False]) +async def test_login_to_create_access_token( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + client: httpx.AsyncClient, + app: FastAPI, + faker: Faker, + valid_credentials: bool, +): + # SEE fixture in conftest.py:auth_headers + # + # At some point might want to use httpx plugins as: + # - https://docs.authlib.org/en/latest/client/httpx.html + # OR implement an auth_flow interface + # - https://www.python-httpx.org/advanced/#customizing-authentication + # + # + settings: ApplicationSettings = app.state.settings + assert settings + + form_data = { + "username": settings.PAYMENTS_USERNAME, + "password": settings.PAYMENTS_PASSWORD.get_secret_value(), + } + + if not valid_credentials: + form_data["username"] = faker.user_name() + form_data["password"] = faker.password() + + response = await client.post( + "/v1/token", + data=form_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + + if valid_credentials: + token = Token(**response.json()) + assert response.status_code == status.HTTP_200_OK + assert token.token_type == "bearer" + else: + assert response.status_code == status.HTTP_401_UNAUTHORIZED + error = response.json() + assert "password" in error["detail"] diff --git a/services/payments/tests/unit/api/test_rest_dependencies.py b/services/payments/tests/unit/api/test_rest_dependencies.py new file mode 100644 index 00000000000..5baa08eae4a --- /dev/null +++ b/services/payments/tests/unit/api/test_rest_dependencies.py @@ -0,0 +1,18 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from fastapi import FastAPI +from simcore_service_payments.api.rest._dependencies import _oauth2_scheme + + +def test_oauth_scheme( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + app: FastAPI, +): + expected_token_url = app.router.url_path_for("login_to_create_access_token") + assert _oauth2_scheme.model.flows.password.tokenUrl == expected_token_url diff --git a/services/payments/tests/unit/api/test_rest_meta.py b/services/payments/tests/unit/api/test_rest_meta.py new file mode 100644 index 00000000000..3a6acf2b020 --- /dev/null +++ b/services/payments/tests/unit/api/test_rest_meta.py @@ -0,0 +1,66 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import httpx +import pytest +import simcore_service_payments.api.rest._health as health_module +from fastapi import status +from pytest_mock import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_service_payments._meta import API_VTAG +from simcore_service_payments.api.rest._health import HealthCheckError +from simcore_service_payments.models.schemas.meta import Meta + + +async def test_healthcheck( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + client: httpx.AsyncClient, + mocker: MockerFixture, +): + rabbitmq_mock = mocker.Mock(spec=RabbitMQClient) + rabbitmq_mock.healthy = True + mocker.patch( + "simcore_service_payments.services.rabbitmq.get_rabbitmq_client", + return_value=rabbitmq_mock, + ) + + response = await client.get("/") + assert response.status_code == status.HTTP_200_OK + assert response.text.startswith( + f"{health_module.__name__}@" + ), f"got {response.text!r}" + + +async def test_healthcheck__unhealthy( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + client: httpx.AsyncClient, + mocker: MockerFixture, +): + rabbitmq_mock = mocker.Mock(spec=RabbitMQClient) + rabbitmq_mock.healthy = False + mocker.patch( + "simcore_service_payments.services.rabbitmq.get_rabbitmq_client", + return_value=rabbitmq_mock, + ) + + with pytest.raises(HealthCheckError): + await client.get("/") + + +async def test_meta( + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + client: httpx.AsyncClient, + auth_headers: dict[str, str], +): + response = await client.get(f"/{API_VTAG}/meta", headers=auth_headers) + assert response.status_code == status.HTTP_200_OK + meta = Meta.model_validate(response.json()) + + response = await client.get(f"{meta.docs_url}") + assert response.status_code == status.HTTP_200_OK diff --git a/services/payments/tests/unit/conftest.py b/services/payments/tests/unit/conftest.py new file mode 100644 index 00000000000..de408dadf3d --- /dev/null +++ b/services/payments/tests/unit/conftest.py @@ -0,0 +1,563 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Iterator +from pathlib import Path +from typing import Any, NamedTuple +from unittest.mock import Mock + +import httpx +import jsonref +import pytest +import respx +import sqlalchemy as sa +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI, status +from fastapi.encoders import jsonable_encoder +from models_library.api_schemas_webserver.wallets import PaymentMethodID +from models_library.payments import StripeInvoiceID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.faker_factories import random_payment_method_view +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient +from simcore_postgres_database.models.payments_transactions import payments_transactions +from simcore_service_payments.core.application import create_app +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from simcore_service_payments.models.db import PaymentsMethodsDB +from simcore_service_payments.models.payments_gateway import ( + BatchGetPaymentMethods, + GetPaymentMethod, + InitPayment, + InitPaymentMethod, + PaymentInitiated, + PaymentMethodInitiated, + PaymentMethodsBatch, +) +from simcore_service_payments.models.schemas.acknowledgements import ( + AckPaymentMethod, + AckPaymentWithPaymentMethod, +) +from simcore_service_payments.services import payments_methods +from toolz.dicttoolz import get_in + +# +# rabbit-MQ +# + + +@pytest.fixture +def disable_rabbitmq_and_rpc_setup(mocker: MockerFixture) -> Callable: + def _(): + # The following services are affected if rabbitmq is not in place + mocker.patch("simcore_service_payments.core.application.setup_notifier") + mocker.patch("simcore_service_payments.core.application.setup_socketio") + mocker.patch("simcore_service_payments.core.application.setup_rabbitmq") + mocker.patch("simcore_service_payments.core.application.setup_rpc_api_routes") + mocker.patch( + "simcore_service_payments.core.application.setup_auto_recharge_listener" + ) + + return _ + + +@pytest.fixture +def with_disabled_rabbitmq_and_rpc(disable_rabbitmq_and_rpc_setup: Callable): + disable_rabbitmq_and_rpc_setup() + + +@pytest.fixture +async def rpc_client( + faker: Faker, rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]] +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client(f"web-server-client-{faker.word()}") + + +# +# postgres +# + + +@pytest.fixture +def disable_postgres_setup(mocker: MockerFixture) -> Callable: + def _setup(app: FastAPI): + app.state.engine = ( + Mock() + ) # NOTE: avoids error in api._dependencies::get_db_engine + + def _(): + # The following services are affected if postgres is not in place + mocker.patch( + "simcore_service_payments.core.application.setup_postgres", + spec=True, + side_effect=_setup, + ) + + return _ + + +@pytest.fixture +def with_disabled_postgres(disable_postgres_setup: Callable): + disable_postgres_setup() + + +@pytest.fixture +def wait_for_postgres_ready_and_db_migrated(postgres_db: sa.engine.Engine) -> None: + """ + Typical use-case is to include it in + + @pytest.fixture + def app_environment( + ... + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + ) + """ + assert postgres_db + + +@pytest.fixture +def payments_clean_db(postgres_db: sa.engine.Engine) -> Iterator[None]: + with postgres_db.connect() as con: + yield + con.execute(payments_transactions.delete()) + + +@pytest.fixture +async def create_fake_payment_method_in_db( + app: FastAPI, +) -> AsyncIterable[ + Callable[[PaymentMethodID, WalletID, UserID], Awaitable[PaymentsMethodsDB]] +]: + _repo = PaymentsMethodsRepo(app.state.engine) + _created = [] + + async def _( + payment_method_id: PaymentMethodID, + wallet_id: WalletID, + user_id: UserID, + ) -> PaymentsMethodsDB: + acked = await payments_methods.insert_payment_method( + repo=_repo, + payment_method_id=payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + ack=AckPaymentMethod( + success=True, + message=f"Created with {create_fake_payment_method_in_db.__name__}", + ), + ) + _created.append(acked) + return acked + + yield _ + + for acked in _created: + await _repo.delete_payment_method( + acked.payment_method_id, user_id=acked.user_id, wallet_id=acked.wallet_id + ) + + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 + + +@pytest.fixture +async def app( + app_environment: EnvVarsDict, is_pdb_enabled: bool +) -> AsyncIterator[FastAPI]: + the_test_app = create_app() + async with LifespanManager( + the_test_app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + yield the_test_app + + +# +# mock payments-gateway-service API +# + + +@pytest.fixture +def mock_payments_gateway_service_api_base(app: FastAPI) -> Iterator[MockRouter]: + """ + If external_envfile_dict is present, then this mock is not really used + and instead the test runs against some real services + """ + settings: ApplicationSettings = app.state.settings + + with respx.mock( + base_url=f"{settings.PAYMENTS_GATEWAY_URL}", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + yield respx_mock + + +@pytest.fixture +def mock_payments_routes(faker: Faker) -> Callable: + def _mock(mock_router: MockRouter): + def _init_200(request: httpx.Request): + assert InitPayment.model_validate_json(request.content) is not None + assert "*" not in request.headers["X-Init-Api-Secret"] + + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder(PaymentInitiated(payment_id=faker.uuid4())), + ) + + def _cancel_200(request: httpx.Request): + assert PaymentInitiated.model_validate_json(request.content) is not None + assert "*" not in request.headers["X-Init-Api-Secret"] + + # responds with an empty authough it can also contain a message + return httpx.Response(status.HTTP_200_OK, json={}) + + mock_router.post( + path="/init", + name="init_payment", + ).mock(side_effect=_init_200) + + mock_router.post( + path="/cancel", + name="cancel_payment", + ).mock(side_effect=_cancel_200) + + return _mock + + +@pytest.fixture +def no_funds_payment_method_id(faker: Faker) -> PaymentMethodID: + """Fake Paymets-Gateway will decline payments with this payment-method id due to insufficient -funds + + USE create_fake_payment_method_in_db to inject this payment-method in DB + Emulates https://stripe.com/docs/testing#declined-payments + """ + return TypeAdapter(PaymentMethodID).validate_python("no_funds_payment_method_id") + + +@pytest.fixture +def mock_payments_methods_routes( + faker: Faker, no_funds_payment_method_id: PaymentMethodID +) -> Iterator[Callable]: + class PaymentMethodInfoTuple(NamedTuple): + init: InitPaymentMethod + get: GetPaymentMethod + + _payment_methods: dict[str, PaymentMethodInfoTuple] = {} + + def _mock(mock_router: MockRouter): + def _init(request: httpx.Request): + assert "*" not in request.headers["X-Init-Api-Secret"] + + pm_id = faker.uuid4() + _payment_methods[pm_id] = PaymentMethodInfoTuple( + init=InitPaymentMethod.model_validate_json(request.content), + get=GetPaymentMethod(**random_payment_method_view(id=pm_id)), + ) + + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder(PaymentMethodInitiated(payment_method_id=pm_id)), + ) + + def _get(request: httpx.Request, pm_id: PaymentMethodID): + assert "*" not in request.headers["X-Init-Api-Secret"] + + try: + _, payment_method = _payment_methods[pm_id] + return httpx.Response( + status.HTTP_200_OK, json=jsonable_encoder(payment_method) + ) + except KeyError: + return httpx.Response(status.HTTP_404_NOT_FOUND) + + def _del(request: httpx.Request, pm_id: PaymentMethodID): + assert "*" not in request.headers["X-Init-Api-Secret"] + + try: + _payment_methods.pop(pm_id) + return httpx.Response(status.HTTP_204_NO_CONTENT) + except KeyError: + return httpx.Response(status.HTTP_404_NOT_FOUND) + + def _batch_get(request: httpx.Request): + assert "*" not in request.headers["X-Init-Api-Secret"] + batch = BatchGetPaymentMethods.model_validate_json(request.content) + + try: + items = [_payment_methods[pm].get for pm in batch.payment_methods_ids] + except KeyError: + return httpx.Response(status.HTTP_404_NOT_FOUND) + + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder(PaymentMethodsBatch(items=items)), + ) + + def _pay(request: httpx.Request, pm_id: PaymentMethodID): + assert "*" not in request.headers["X-Init-Api-Secret"] + assert InitPayment.model_validate_json(request.content) is not None + + # checks + _get(request, pm_id) + + payment_id = faker.uuid4() + + if pm_id == no_funds_payment_method_id: + # SEE https://stripe.com/docs/testing#declined-payments + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder( + AckPaymentWithPaymentMethod( + success=False, + message=f"Insufficient Fonds '{pm_id}'", + invoice_url=None, + payment_id=payment_id, + ) + ), + ) + + return httpx.Response( + status.HTTP_200_OK, + json=jsonable_encoder( + AckPaymentWithPaymentMethod( + success=True, + message=f"Payment '{payment_id}' with payment-method '{pm_id}'", + invoice_url=faker.url(), + provider_payment_id="pi_123456ABCDEFG123456ABCDE", + payment_id=payment_id, + invoice_pdf_url="https://invoice.com", + stripe_invoice_id="stripe-invoice-id", + stripe_customer_id="stripe-customer-id", + ) + ), + ) + + # ------ + + mock_router.post( + path="/payment-methods:init", + name="init_payment_method", + ).mock(side_effect=_init) + + mock_router.post( + path="/payment-methods:batchGet", + name="batch_get_payment_methods", + ).mock(side_effect=_batch_get) + + mock_router.get( + path__regex=r"/payment-methods/(?P[\w-]+)$", + name="get_payment_method", + ).mock(side_effect=_get) + + mock_router.delete( + path__regex=r"/payment-methods/(?P[\w-]+)$", + name="delete_payment_method", + ).mock(side_effect=_del) + + mock_router.post( + path__regex=r"/payment-methods/(?P[\w-]+):pay$", + name="pay_with_payment_method", + ).mock(side_effect=_pay) + + yield _mock + + _payment_methods.clear() + + +@pytest.fixture +def mock_payments_gateway_service_or_none( + mock_payments_gateway_service_api_base: MockRouter, + mock_payments_routes: Callable, + mock_payments_methods_routes: Callable, + external_envfile_dict: EnvVarsDict, +) -> MockRouter | None: + # EITHER tests against external payments-gateway + if payments_gateway_url := external_envfile_dict.get("PAYMENTS_GATEWAY_URL"): + print("🚨 EXTERNAL: these tests are running against", f"{payments_gateway_url=}") + mock_payments_gateway_service_api_base.stop() + return None + + # OR tests against mock payments-gateway + mock_payments_routes(mock_payments_gateway_service_api_base) + mock_payments_methods_routes(mock_payments_gateway_service_api_base) + return mock_payments_gateway_service_api_base + + +# +# mock Stripe API +# + + +@pytest.fixture +def mock_payments_stripe_api_base(app: FastAPI) -> Iterator[MockRouter]: + """ + If external_envfile_dict is present, then this mock is not really used + and instead the test runs against some real services + """ + settings: ApplicationSettings = app.state.settings + + with respx.mock( + base_url=f"{settings.PAYMENTS_STRIPE_URL}", + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + yield respx_mock + + +@pytest.fixture +def mock_payments_stripe_routes(faker: Faker) -> Callable: + """Mocks https://docs.stripe.com/api. In the future https://github.com/stripe/stripe-mock might be used""" + + def _mock(mock_router: MockRouter): + def _list_products(request: httpx.Request): + assert "Bearer " in request.headers["authorization"] + + return httpx.Response( + status.HTTP_200_OK, json={"object": "list", "data": []} + ) + + def _get_invoice(request: httpx.Request): + assert "Bearer " in request.headers["authorization"] + + return httpx.Response( + status.HTTP_200_OK, + json={"hosted_invoice_url": "https://fake-invoice.com/?id=12345"}, + ) + + mock_router.get( + path="/v1/products", + name="list_products", + ).mock(side_effect=_list_products) + + mock_router.get( + path__regex=r"(^/v1/invoices/.*)$", + name="get_invoice", + ).mock(side_effect=_get_invoice) + + return _mock + + +@pytest.fixture(scope="session") +def external_stripe_environment( + request: pytest.FixtureRequest, + external_envfile_dict: EnvVarsDict, +) -> EnvVarsDict: + """ + If a file under test folder prefixed with `.env-secret` is present, + then this fixture captures it. + + This technique allows reusing the same tests to check against + external development/production servers + """ + if external_envfile_dict: + assert "PAYMENTS_STRIPE_API_SECRET" in external_envfile_dict + assert "PAYMENTS_STRIPE_URL" in external_envfile_dict + return external_envfile_dict + return {} + + +@pytest.fixture(scope="session") +def external_invoice_id(request: pytest.FixtureRequest) -> str | None: + stripe_invoice_id_or_none = request.config.getoption( + "--external-stripe-invoice-id", default=None + ) + return f"{stripe_invoice_id_or_none}" if stripe_invoice_id_or_none else None + + +@pytest.fixture +def stripe_invoice_id(external_invoice_id: StripeInvoiceID | None) -> StripeInvoiceID: + if external_invoice_id: + print( + f"πŸ“§ EXTERNAL `stripe_invoice_id` detected. Setting stripe_invoice_id={external_invoice_id}" + ) + return StripeInvoiceID(external_invoice_id) + return StripeInvoiceID("in_mYf5CIF3AU6h126Xj47jIPlB") + + +@pytest.fixture +def mock_stripe_or_none( + mock_payments_stripe_api_base: MockRouter, + mock_payments_stripe_routes: Callable, + external_stripe_environment: EnvVarsDict, +) -> MockRouter | None: + # EITHER tests against external Stripe + if payments_stripe_url := external_stripe_environment.get("PAYMENTS_STRIPE_URL"): + print("🚨 EXTERNAL: these tests are running against", f"{payments_stripe_url=}") + mock_payments_stripe_api_base.stop() + return None + + # OR tests against mock Stripe + mock_payments_stripe_routes(mock_payments_stripe_api_base) + return mock_payments_stripe_api_base + + +# +# mock resource-usage-tracker API +# + + +@pytest.fixture +def rut_service_openapi_specs( + osparc_simcore_services_dir: Path, +) -> dict[str, Any]: + openapi_path = ( + osparc_simcore_services_dir / "resource-usage-tracker" / "openapi.json" + ) + return jsonref.loads(openapi_path.read_text()) + + +@pytest.fixture +def mock_resource_usage_tracker_service_api_base( + app: FastAPI, rut_service_openapi_specs: dict[str, Any] +) -> Iterator[MockRouter]: + settings: ApplicationSettings = app.state.settings + with respx.mock( + base_url=settings.PAYMENTS_RESOURCE_USAGE_TRACKER.base_url, + assert_all_called=False, + assert_all_mocked=True, # IMPORTANT: KEEP always True! + ) as respx_mock: + assert "healthcheck" in get_in( + ["paths", "/", "get", "operationId"], + rut_service_openapi_specs, + no_default=True, + ) # type: ignore + respx_mock.get( + path="/", + name="healthcheck", + ).respond(status.HTTP_200_OK) + + yield respx_mock + + +@pytest.fixture +def mock_resoruce_usage_tracker_service_api( + faker: Faker, + mock_resource_usage_tracker_service_api_base: MockRouter, + rut_service_openapi_specs: dict[str, Any], +) -> MockRouter: + # check it exists + get_in( + ["paths", "/v1/credit-transactions", "post", "operationId"], + rut_service_openapi_specs, + no_default=True, + ) + + # fake successful response + mock_resource_usage_tracker_service_api_base.post( + "/v1/credit-transactions" + ).respond(json={"credit_transaction_id": faker.pyint()}) + + return mock_resource_usage_tracker_service_api_base diff --git a/services/payments/tests/unit/test__model_examples.py b/services/payments/tests/unit/test__model_examples.py new file mode 100644 index 00000000000..c52525a65db --- /dev/null +++ b/services/payments/tests/unit/test__model_examples.py @@ -0,0 +1,27 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from typing import Any + +import pytest +import simcore_service_payments.models +from pydantic import BaseModel +from pytest_simcore.pydantic_models import ( + assert_validation_model, + walk_model_examples_in_package, +) + + +@pytest.mark.parametrize( + "model_cls, example_name, example_data", + walk_model_examples_in_package(simcore_service_payments.models), +) +def test_api_server_model_examples( + model_cls: type[BaseModel], example_name: str, example_data: Any +): + assert_validation_model( + model_cls, example_name=example_name, example_data=example_data + ) diff --git a/services/payments/tests/unit/test_cli.py b/services/payments/tests/unit/test_cli.py new file mode 100644 index 00000000000..1fb1db4eded --- /dev/null +++ b/services/payments/tests/unit/test_cli.py @@ -0,0 +1,60 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import os +import traceback + +import pytest +from click.testing import Result +from pytest_simcore.helpers.monkeypatch_envs import load_dotenv, setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_payments._meta import API_VERSION +from simcore_service_payments.cli import main as cli_main +from simcore_service_payments.core.settings import ApplicationSettings +from typer.testing import CliRunner + + +def _format_cli_error(result: Result) -> str: + assert result.exception + tb_message = "\n".join(traceback.format_tb(result.exception.__traceback__)) + return f"Below exception was raised by the cli:\n{tb_message}" + + +def test_cli_help_and_version(cli_runner: CliRunner): + # simcore-service-payments --help + result = cli_runner.invoke(cli_main, "--help") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + result = cli_runner.invoke(cli_main, "--version") + assert result.exit_code == os.EX_OK, result.output + assert result.stdout.strip() == API_VERSION + + +def test_echo_dotenv(cli_runner: CliRunner, monkeypatch: pytest.MonkeyPatch): + # simcore-service-payments echo-dotenv --auto-password + result = cli_runner.invoke(cli_main, "echo-dotenv --auto-password") + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + environs = load_dotenv(result.stdout) + + with monkeypatch.context() as patch: + setenvs_from_dict(patch, environs) + ApplicationSettings.create_from_envs() + + +def test_list_settings(cli_runner: CliRunner, app_environment: EnvVarsDict): + # simcore-service-payments settings --show-secrets --as-json + result = cli_runner.invoke(cli_main, ["settings", "--show-secrets", "--as-json"]) + assert result.exit_code == os.EX_OK, _format_cli_error(result) + + print(result.output) + settings = ApplicationSettings(result.output) + assert settings.model_dump() == ApplicationSettings.create_from_envs().model_dump() + + +def test_main(app_environment: EnvVarsDict): + from simcore_service_payments.main import the_app + + assert the_app diff --git a/services/payments/tests/unit/test_core_settings.py b/services/payments/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..a1d84644d62 --- /dev/null +++ b/services/payments/tests/unit/test_core_settings.py @@ -0,0 +1,24 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_payments.core.settings import ApplicationSettings + + +def test_valid_web_application_settings(app_environment: EnvVarsDict): + """ + We validate actual envfiles (e.g. repo.config files) by passing them via the CLI + + $ ln -s /path/to/osparc-config/deployments/mydeploy.com/repo.config .secrets + $ pytest --external-envfile=.secrets --pdb tests/unit/test_core_settings.py + + """ + settings = ApplicationSettings() # type: ignore + assert settings + + assert settings == ApplicationSettings.create_from_envs() + + assert app_environment["PAYMENTS_LOGLEVEL"] == settings.LOG_LEVEL diff --git a/services/payments/tests/unit/test_db_payments_methods_repo.py b/services/payments/tests/unit/test_db_payments_methods_repo.py new file mode 100644 index 00000000000..47595bb5557 --- /dev/null +++ b/services/payments/tests/unit/test_db_payments_methods_repo.py @@ -0,0 +1,97 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from simcore_service_payments.models.db import InitPromptAckFlowState, PaymentsMethodsDB + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +async def test_create_payments_method_annotations_workflow(app: FastAPI): + + fake = PaymentsMethodsDB( + **PaymentsMethodsDB.model_config["json_schema_extra"]["examples"][1] + ) + + repo = PaymentsMethodsRepo(app.state.engine) + + # annotate init + payment_method_id = await repo.insert_init_payment_method( + fake.payment_method_id, + user_id=fake.user_id, + wallet_id=fake.wallet_id, + initiated_at=fake.initiated_at, + ) + + assert payment_method_id == fake.payment_method_id + + # annotate ack + acked = await repo.update_ack_payment_method( + fake.payment_method_id, + completion_state=InitPromptAckFlowState.SUCCESS, + state_message="DONE", + ) + + # list + listed = await repo.list_user_payment_methods( + user_id=fake.user_id, + wallet_id=fake.wallet_id, + ) + assert len(listed) == 1 + assert listed[0] == acked + + # get + got = await repo.get_payment_method( + payment_method_id, + user_id=fake.user_id, + wallet_id=fake.wallet_id, + ) + assert got == acked + + # delete + deleted = await repo.delete_payment_method( + payment_method_id, + user_id=fake.user_id, + wallet_id=fake.wallet_id, + ) + assert deleted == got + + listed = await repo.list_user_payment_methods( + user_id=fake.user_id, + wallet_id=fake.wallet_id, + ) + assert not listed diff --git a/services/payments/tests/unit/test_db_payments_transactions_repo.py b/services/payments/tests/unit/test_db_payments_transactions_repo.py new file mode 100644 index 00000000000..d4e728d14c5 --- /dev/null +++ b/services/payments/tests/unit/test_db_payments_transactions_repo.py @@ -0,0 +1,89 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.models.db import ( + PaymentsTransactionsDB, + PaymentTransactionState, +) + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +async def test_one_time_payment_annotations_workflow(app: FastAPI): + + fake = PaymentsTransactionsDB( + **PaymentsTransactionsDB.model_config["json_schema_extra"]["examples"][1] + ) + + repo = PaymentsTransactionsRepo(app.state.engine) + + # annotate init + payment_id = await repo.insert_init_payment_transaction( + payment_id=fake.payment_id, + price_dollars=fake.price_dollars, + product_name=fake.product_name, + user_id=fake.user_id, + user_email=fake.user_email, + wallet_id=fake.wallet_id, + comment=fake.comment, + osparc_credits=fake.osparc_credits, + initiated_at=fake.initiated_at, + ) + + # annotate ack + assert fake.invoice_url is not None + transaction_acked = await repo.update_ack_payment_transaction( + payment_id=fake.payment_id, + completion_state=PaymentTransactionState.SUCCESS, + invoice_url=fake.invoice_url, + invoice_pdf_url=fake.invoice_pdf_url, + stripe_invoice_id=fake.stripe_invoice_id, + state_message="DONE", + ) + + assert transaction_acked.payment_id == payment_id + + # list + total_number_of_items, user_payments = await repo.list_user_payment_transactions( + user_id=fake.user_id, product_name=fake.product_name + ) + assert total_number_of_items == 1 + assert len(user_payments) == total_number_of_items + assert user_payments[0] == transaction_acked diff --git a/services/payments/tests/unit/test_db_payments_users_repo.py b/services/payments/tests/unit/test_db_payments_users_repo.py new file mode 100644 index 00000000000..4cff0108033 --- /dev/null +++ b/services/payments/tests/unit/test_db_payments_users_repo.py @@ -0,0 +1,142 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import AsyncIterator +from typing import Any + +import pytest +from fastapi import FastAPI +from models_library.groups import GroupID +from models_library.users import UserID +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.postgres_tools import insert_and_get_row_lifespan +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_postgres_database.models.payments_transactions import payments_transactions +from simcore_postgres_database.models.products import products +from simcore_postgres_database.models.users import users +from simcore_service_payments.db.payment_users_repo import PaymentsUsersRepo +from simcore_service_payments.services.postgres import get_engine + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + postgres_env_vars_dict: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.fixture +async def user( + app: FastAPI, + user: dict[str, Any], + user_id: UserID, +) -> AsyncIterator[dict[str, Any]]: + """ + injects a user in db + """ + assert user_id == user["id"] + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + get_engine(app), + table=users, + values=user, + pk_col=users.c.id, + pk_value=user["id"], + ) as row: + yield row + + +@pytest.fixture +def user_primary_group_id(user: dict[str, Any]) -> GroupID: + # Overrides `user_primary_group_id` since new user triggers an automatic creation of a primary group + return user["primary_gid"] + + +@pytest.fixture +async def product( + app: FastAPI, product: dict[str, Any] +) -> AsyncIterator[dict[str, Any]]: + """ + injects product in db + """ + # NOTE: this fixture ignores products' group-id but it is fine for this test context + assert product["group_id"] is None + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + get_engine(app), + table=products, + values=product, + pk_col=products.c.name, + pk_value=product["name"], + ) as row: + yield row + + +@pytest.fixture +async def successful_transaction( + app: FastAPI, successful_transaction: dict[str, Any] +) -> AsyncIterator[dict[str, Any]]: + """ + injects transaction in db + """ + async with insert_and_get_row_lifespan( # pylint:disable=contextmanager-generator-missing-cleanup + get_engine(app), + table=payments_transactions, + values=successful_transaction, + pk_col=payments_transactions.c.payment_id, + pk_value=successful_transaction["payment_id"], + ) as row: + yield row + + +async def test_payments_user_repo( + app: FastAPI, user_id: UserID, user_primary_group_id: GroupID +): + repo = PaymentsUsersRepo(get_engine(app)) + assert await repo.get_primary_group_id(user_id) == user_primary_group_id + + +async def test_get_notification_data( + app: FastAPI, + user: dict[str, Any], + product: dict[str, Any], + successful_transaction: dict[str, Any], +): + repo = PaymentsUsersRepo(get_engine(app)) + + # check once + data = await repo.get_notification_data( + user_id=user["id"], payment_id=successful_transaction["payment_id"] + ) + + assert data.payment_id == successful_transaction["payment_id"] + assert data.first_name == user["first_name"] + assert data.last_name == user["last_name"] + assert data.email == user["email"] + assert data.product_name == product["name"] + assert data.display_name == product["display_name"] + assert data.vendor == product["vendor"] + assert data.support_email == product["support_email"] diff --git a/services/payments/tests/unit/test_rpc_payments.py b/services/payments/tests/unit/test_rpc_payments.py new file mode 100644 index 00000000000..b755acf7d08 --- /dev/null +++ b/services/payments/tests/unit/test_rpc_payments.py @@ -0,0 +1,155 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from typing import Any + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_payments.errors import PaymentNotFoundError +from models_library.api_schemas_webserver.wallets import WalletPaymentInitiated +from models_library.payments import UserInvoiceAddress +from models_library.rabbitmq_basic_types import RPCMethodName +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient, RPCServerError +from servicelib.rabbitmq._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S +from simcore_service_payments.api.rpc.routes import PAYMENTS_RPC_NAMESPACE + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + external_envfile_dict: EnvVarsDict, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + **postgres_env_vars_dict, + **external_envfile_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.fixture +def init_payment_kwargs(faker: Faker) -> dict[str, Any]: + return { + "amount_dollars": 999999.99609375, # SEE https://github.com/ITISFoundation/appmotion-exchange/issues/2 + "target_credits": 100, + "product_name": "osparc", + "wallet_id": 1, + "wallet_name": "wallet-name", + "user_id": 1, + "user_name": "user", + "user_email": "user@email.com", + "user_address": UserInvoiceAddress(country="CH"), + "stripe_price_id": "stripe-price-id", + "stripe_tax_rate_id": "stripe-tax-rate-id", + } + + +async def test_rpc_init_payment_fail( + app: FastAPI, + rpc_client: RabbitMQRPCClient, + init_payment_kwargs: dict[str, Any], + payments_clean_db: None, +): + assert app + + with pytest.raises(RPCServerError) as exc_info: + await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_payment"), + **init_payment_kwargs, + ) + + error = exc_info.value + assert isinstance(error, RPCServerError) + assert error.exc_type == "httpx.ConnectError" + assert error.method_name == "init_payment" + assert error.exc_message + assert error.traceback + + +async def test_webserver_one_time_payment_workflow( + is_pdb_enabled: bool, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + init_payment_kwargs: dict[str, Any], + payments_clean_db: None, +): + assert app + + result = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_payment"), + **init_payment_kwargs, + ) + + assert isinstance(result, WalletPaymentInitiated) + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes["init_payment"].called + + result = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("cancel_payment"), + payment_id=result.payment_id, + user_id=init_payment_kwargs["user_id"], + wallet_id=init_payment_kwargs["wallet_id"], + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert result is None + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes["cancel_payment"].called + + +async def test_cancel_invalid_payment_id( + is_pdb_enabled: bool, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + init_payment_kwargs: dict[str, Any], + faker: Faker, + payments_clean_db: None, +): + invalid_payment_id = faker.uuid4() + + with pytest.raises(PaymentNotFoundError) as exc_info: + await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("cancel_payment"), + payment_id=invalid_payment_id, + user_id=init_payment_kwargs["user_id"], + wallet_id=init_payment_kwargs["wallet_id"], + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + error = exc_info.value + assert isinstance(error, PaymentNotFoundError) diff --git a/services/payments/tests/unit/test_rpc_payments_methods.py b/services/payments/tests/unit/test_rpc_payments_methods.py new file mode 100644 index 00000000000..e3a6d377e27 --- /dev/null +++ b/services/payments/tests/unit/test_rpc_payments_methods.py @@ -0,0 +1,259 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import pytest +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_webserver.wallets import ( + PaymentMethodInitiated, + PaymentTransaction, +) +from models_library.basic_types import IDStr +from models_library.payments import UserInvoiceAddress +from models_library.products import ProductName, StripePriceID, StripeTaxRateID +from models_library.rabbitmq_basic_types import RPCMethodName +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr, TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq._constants import RPC_REQUEST_DEFAULT_TIMEOUT_S +from simcore_service_payments.api.rpc.routes import PAYMENTS_RPC_NAMESPACE +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.models.db import ( + InitPromptAckFlowState, + PaymentTransactionState, +) +from simcore_service_payments.models.schemas.acknowledgements import AckPaymentMethod +from simcore_service_payments.services.payments_methods import insert_payment_method + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + external_envfile_dict: EnvVarsDict, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + **postgres_env_vars_dict, + **external_envfile_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +async def test_webserver_init_and_cancel_payment_method_workflow( + is_pdb_enabled: bool, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + wallet_name: IDStr, + wallet_id: WalletID, + payments_clean_db: None, +): + assert app + + initiated = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_creation_of_payment_method"), + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + ) + + assert isinstance(initiated, PaymentMethodInitiated) + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes[ + "init_payment_method" + ].called + + cancelled = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("cancel_creation_of_payment_method"), + payment_method_id=initiated.payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + assert cancelled is None + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes[ + "delete_payment_method" + ].called + + +async def test_webserver_crud_payment_method_workflow( + is_pdb_enabled: bool, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + mock_payments_gateway_service_or_none: MockRouter | None, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + wallet_name: IDStr, + wallet_id: WalletID, + payments_clean_db: None, +): + assert app + + inited = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("init_creation_of_payment_method"), + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + ) + + assert isinstance(inited, PaymentMethodInitiated) + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes[ + "init_payment_method" + ].called + + # Faking ACK---- + repo = PaymentsMethodsRepo(app.state.engine) + await repo.update_ack_payment_method( + inited.payment_method_id, + completion_state=InitPromptAckFlowState.SUCCESS, + state_message="FAKED ACK", + ) + # ----- + + listed = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("list_payment_methods"), + user_id=user_id, + wallet_id=wallet_id, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + assert len(listed) == 1 + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes[ + "batch_get_payment_methods" + ].called + + got = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("get_payment_method"), + payment_method_id=inited.payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + assert got == listed[0] + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes["get_payment_method"].called + + await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("delete_payment_method"), + payment_method_id=inited.payment_method_id, + user_id=user_id, + wallet_id=wallet_id, + timeout_s=None if is_pdb_enabled else RPC_REQUEST_DEFAULT_TIMEOUT_S, + ) + + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes[ + "delete_payment_method" + ].called + + +async def test_webserver_pay_with_payment_method_workflow( + is_pdb_enabled: bool, + app: FastAPI, + rpc_client: RabbitMQRPCClient, + mock_resoruce_usage_tracker_service_api: None, + mock_payments_gateway_service_or_none: MockRouter | None, + faker: Faker, + product_name: ProductName, + product_price_stripe_price_id: StripePriceID, + product_price_stripe_tax_rate_id: StripeTaxRateID, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + wallet_name: IDStr, + wallet_id: WalletID, + payments_clean_db: None, +): + assert app + + # faking Payment method + created = await insert_payment_method( + repo=PaymentsMethodsRepo(app.state.engine), + payment_method_id=faker.uuid4(), + user_id=user_id, + wallet_id=wallet_id, + ack=AckPaymentMethod(success=True, message="Faked ACK"), + ) + + transaction = await rpc_client.request( + PAYMENTS_RPC_NAMESPACE, + TypeAdapter(RPCMethodName).validate_python("pay_with_payment_method"), + payment_method_id=created.payment_method_id, + amount_dollars=faker.pyint(), + target_credits=faker.pyint(), + product_name=product_name, + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + user_address=UserInvoiceAddress(country="CH"), + stripe_price_id=product_price_stripe_price_id, + stripe_tax_rate_id=product_price_stripe_tax_rate_id, + comment="Payment with stored credit-card", + ) + + assert isinstance(transaction, PaymentTransaction) + assert transaction.payment_id + assert transaction.state == "SUCCESS" + + payment = await PaymentsTransactionsRepo(app.state.engine).get_payment_transaction( + transaction.payment_id, user_id=user_id, wallet_id=wallet_id + ) + assert payment is not None + assert payment.payment_id == transaction.payment_id + assert payment.state == PaymentTransactionState.SUCCESS + assert payment.comment == "Payment with stored credit-card" diff --git a/services/payments/tests/unit/test_services_auto_recharge_listener.py b/services/payments/tests/unit/test_services_auto_recharge_listener.py new file mode 100644 index 00000000000..e4b22b12d95 --- /dev/null +++ b/services/payments/tests/unit/test_services_auto_recharge_listener.py @@ -0,0 +1,429 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import Awaitable, Callable, Iterator +from datetime import UTC, datetime, timedelta +from decimal import Decimal +from unittest import mock + +import pytest +import sqlalchemy as sa +from faker import Faker +from fastapi import FastAPI +from models_library.api_schemas_webserver import WEBSERVER_RPC_NAMESPACE +from models_library.api_schemas_webserver.wallets import ( + GetWalletAutoRecharge, + PaymentMethodID, +) +from models_library.basic_types import NonNegativeDecimal +from models_library.payments import InvoiceDataGet +from models_library.products import ProductName +from models_library.rabbitmq_messages import WalletCreditsMessage +from models_library.users import UserID +from pytest_mock.plugin import MockerFixture +from pytest_simcore.helpers.faker_factories import ( + random_payment_autorecharge, + random_payment_method, + random_payment_transaction, +) +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient, RPCRouter +from simcore_postgres_database.models.payments_autorecharge import payments_autorecharge +from simcore_postgres_database.models.payments_methods import payments_methods +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, + payments_transactions, +) +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.models.db import PaymentsTransactionsDB +from simcore_service_payments.models.schemas.acknowledgements import ( + AckPaymentWithPaymentMethod, +) +from simcore_service_payments.services.auto_recharge_process_message import ( + _check_autorecharge_conditions_not_met, + _check_wallet_credits_above_threshold, + _exceeds_monthly_limit, + _is_message_too_old, + _was_wallet_topped_up_recently, +) +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, + external_envfile_dict: EnvVarsDict, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + **postgres_env_vars_dict, + **external_envfile_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + "PAYMENTS_AUTORECHARGE_ENABLED": "1", + }, + ) + + +@pytest.fixture +async def mocked_message_parser(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.patch( + "simcore_service_payments.services.auto_recharge_listener.process_message" + ) + + +async def test_process_message__called( + mocked_message_parser: mock.AsyncMock, + app: FastAPI, + create_rabbitmq_client: Callable[[str], RabbitMQClient], +): + publisher = create_rabbitmq_client("publisher") + msg = WalletCreditsMessage(wallet_id=1, credits=Decimal("80.5"), product_name="s4l") + await publisher.publish(WalletCreditsMessage.get_channel_name(), msg) + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + mocked_message_parser.assert_called_once() + + +@pytest.fixture() +def populate_test_db( + postgres_db: sa.engine.Engine, faker: Faker, wallet_id: int +) -> Iterator[None]: + with postgres_db.connect() as con: + _primary_payment_method_id = faker.uuid4() + _completed_at = datetime.now(tz=UTC) + timedelta(minutes=1) + + con.execute( + payments_methods.insert().values( + **random_payment_method( + payment_method_id=_primary_payment_method_id, + wallet_id=wallet_id, + state="SUCCESS", + completed_at=_completed_at, + ) + ) + ) + con.execute( + payments_autorecharge.insert().values( + **random_payment_autorecharge( + primary_payment_method_id=_primary_payment_method_id, + wallet_id=wallet_id, + ) + ) + ) + + yield + + con.execute(payments_methods.delete()) + con.execute(payments_autorecharge.delete()) + + +@pytest.fixture() +def wallet_id(faker: Faker): + return faker.pyint() + + +@pytest.fixture +async def mocked_pay_with_payment_method(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.patch( + "simcore_service_payments.services.payments.PaymentsGatewayApi.pay_with_payment_method", + return_value=AckPaymentWithPaymentMethod.model_construct( + **AckPaymentWithPaymentMethod.model_config["json_schema_extra"]["example"] + ), + ) + + +@pytest.fixture +async def mock_rpc_client( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], + mocker: MockerFixture, +) -> RabbitMQRPCClient: + rpc_client = await rabbitmq_rpc_client("client") + + # mock returned client + mocker.patch( + "simcore_service_payments.services.auto_recharge_process_message.get_rabbitmq_rpc_client", + return_value=rpc_client, + ) + + return rpc_client + + +@pytest.fixture +async def mock_rpc_server( + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], + mocker: MockerFixture, +) -> RabbitMQRPCClient: + rpc_server = await rabbitmq_rpc_client("mock_server") + + router = RPCRouter() + + # mocks the interface defined in the webserver + + @router.expose() + async def get_invoice_data( + user_id: UserID, + dollar_amount: Decimal, + product_name: ProductName, + ) -> InvoiceDataGet: + return InvoiceDataGet.model_validate( + InvoiceDataGet.model_config["json_schema_extra"]["examples"][0] + ) + + await rpc_server.register_router(router, namespace=WEBSERVER_RPC_NAMESPACE) + + return rpc_server + + +async def _assert_payments_transactions_db_row(postgres_db) -> PaymentsTransactionsDB: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.2), + stop=stop_after_delay(10), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt, postgres_db.connect() as con: + result = con.execute(sa.select(payments_transactions)) + row = result.first() + assert row + return PaymentsTransactionsDB.model_validate(row) + + +async def test_process_message__whole_autorecharge_flow_success( + app: FastAPI, + create_rabbitmq_client: Callable[[str], RabbitMQClient], + wallet_id: int, + populate_test_db: None, + mocked_pay_with_payment_method: mock.AsyncMock, + mock_rpc_server: RabbitMQRPCClient, + mock_rpc_client: RabbitMQRPCClient, + mock_resoruce_usage_tracker_service_api: MockRouter, + postgres_db: sa.engine.Engine, +): + publisher = create_rabbitmq_client("publisher") + msg = WalletCreditsMessage( + wallet_id=wallet_id, credits=Decimal("80.5"), product_name="s4l" + ) + await publisher.publish(WalletCreditsMessage.get_channel_name(), msg) + + row = await _assert_payments_transactions_db_row(postgres_db) + assert row.wallet_id == wallet_id + assert row.state == PaymentTransactionState.SUCCESS + assert row.comment == "Payment generated by auto recharge" + assert len(mock_resoruce_usage_tracker_service_api.calls) == 1 + + +@pytest.mark.parametrize( + "_credits,expected", [(Decimal("10001"), True), (Decimal("9999"), False)] +) +async def test_check_wallet_credits_above_threshold( + app: FastAPI, _credits: Decimal, expected: bool +): + settings: ApplicationSettings = app.state.settings + assert settings.PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT + + assert expected == await _check_wallet_credits_above_threshold( + settings.PAYMENTS_AUTORECHARGE_DEFAULT_MONTHLY_LIMIT, _credits + ) + + +@pytest.mark.parametrize( + "get_wallet_auto_recharge,expected", + [ + ( + GetWalletAutoRecharge( + enabled=True, + payment_method_id=PaymentMethodID("123"), + min_balance_in_credits=NonNegativeDecimal(10), + top_up_amount_in_usd=NonNegativeDecimal(10), + monthly_limit_in_usd=NonNegativeDecimal(10), + ), + False, + ), + ( + GetWalletAutoRecharge( + enabled=False, + payment_method_id=PaymentMethodID("123"), + min_balance_in_credits=NonNegativeDecimal(10), + top_up_amount_in_usd=NonNegativeDecimal(10), + monthly_limit_in_usd=NonNegativeDecimal(10), + ), + True, + ), + ( + GetWalletAutoRecharge( + enabled=True, + payment_method_id=None, + min_balance_in_credits=NonNegativeDecimal(10), + top_up_amount_in_usd=NonNegativeDecimal(10), + monthly_limit_in_usd=NonNegativeDecimal(10), + ), + True, + ), + (None, True), + ], +) +async def test_check_autorecharge_conditions_not_met( + app: FastAPI, get_wallet_auto_recharge: GetWalletAutoRecharge, expected: bool +): + assert expected == await _check_autorecharge_conditions_not_met( + get_wallet_auto_recharge + ) + + +@pytest.fixture() +def populate_payment_transaction_db( + postgres_db: sa.engine.Engine, wallet_id: int +) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute( + payments_transactions.insert().values( + **random_payment_transaction( + price_dollars=Decimal(9500), + wallet_id=wallet_id, + state=PaymentTransactionState.SUCCESS, + completed_at=datetime.now(tz=UTC), + initiated_at=datetime.now(tz=UTC) - timedelta(seconds=10), + ) + ) + ) + + yield + + con.execute(payments_transactions.delete()) + + +@pytest.mark.parametrize( + "get_wallet_auto_recharge,expected", + [ + ( + GetWalletAutoRecharge( + enabled=True, + payment_method_id=PaymentMethodID("123"), + min_balance_in_credits=NonNegativeDecimal(10), + top_up_amount_in_usd=NonNegativeDecimal(300), + monthly_limit_in_usd=NonNegativeDecimal(10000), + ), + False, + ), + ( + GetWalletAutoRecharge( + enabled=False, + payment_method_id=PaymentMethodID("123"), + min_balance_in_credits=NonNegativeDecimal(10), + top_up_amount_in_usd=NonNegativeDecimal(1000), + monthly_limit_in_usd=NonNegativeDecimal(10000), + ), + True, + ), + ], +) +async def test_exceeds_monthly_limit( + app: FastAPI, + wallet_id: int, + populate_payment_transaction_db: None, + get_wallet_auto_recharge: GetWalletAutoRecharge, + expected: bool, +): + _payments_transactions_repo = PaymentsTransactionsRepo(db_engine=app.state.engine) + + assert expected == await _exceeds_monthly_limit( + _payments_transactions_repo, wallet_id, get_wallet_auto_recharge + ) + + +async def test_was_wallet_topped_up_recently_true( + app: FastAPI, + wallet_id: int, + populate_payment_transaction_db: None, +): + _payments_transactions_repo = PaymentsTransactionsRepo(db_engine=app.state.engine) + + assert ( + await _was_wallet_topped_up_recently(_payments_transactions_repo, wallet_id) + is True + ) + + +@pytest.fixture() +def populate_payment_transaction_db_with_older_trans( + postgres_db: sa.engine.Engine, wallet_id: int +) -> Iterator[None]: + with postgres_db.connect() as con: + current_timestamp = datetime.now(tz=UTC) + current_timestamp_minus_10_minutes = current_timestamp - timedelta(minutes=10) + + con.execute( + payments_transactions.insert().values( + **random_payment_transaction( + price_dollars=Decimal(9500), + wallet_id=wallet_id, + state=PaymentTransactionState.SUCCESS, + initiated_at=current_timestamp_minus_10_minutes, + ) + ) + ) + + yield + + con.execute(payments_transactions.delete()) + + +async def test_was_wallet_topped_up_recently_false( + app: FastAPI, + wallet_id: int, + populate_payment_transaction_db_with_older_trans: None, +): + _payments_transactions_repo = PaymentsTransactionsRepo(db_engine=app.state.engine) + + assert ( + await _was_wallet_topped_up_recently(_payments_transactions_repo, wallet_id) + is False + ) + + +async def test__is_message_too_old_true(): + _dummy_message_timestamp = datetime.now(tz=UTC) - timedelta(minutes=10) + + assert await _is_message_too_old(_dummy_message_timestamp) is True + + +async def test__is_message_too_old_false(): + _dummy_message_timestamp = datetime.now(tz=UTC) - timedelta(minutes=3) + + assert await _is_message_too_old(_dummy_message_timestamp) is False diff --git a/services/payments/tests/unit/test_services_notifier.py b/services/payments/tests/unit/test_services_notifier.py new file mode 100644 index 00000000000..faeed872a5c --- /dev/null +++ b/services/payments/tests/unit/test_services_notifier.py @@ -0,0 +1,174 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from collections.abc import AsyncIterable, Callable +from contextlib import _AsyncGeneratorContextManager +from typing import Awaitable +from unittest.mock import AsyncMock + +import arrow +import pytest +import socketio +from fastapi import FastAPI +from models_library.api_schemas_payments.socketio import ( + SOCKET_IO_PAYMENT_COMPLETED_EVENT, +) +from models_library.api_schemas_webserver.socketio import SocketIORoomStr +from models_library.api_schemas_webserver.wallets import PaymentTransaction +from models_library.groups import GroupID +from models_library.users import UserID +from pydantic import TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.faker_factories import random_payment_transaction +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from simcore_service_payments.models.db import PaymentsTransactionsDB +from simcore_service_payments.models.db_to_api import to_payments_api_model +from simcore_service_payments.services.notifier import NotifierService +from simcore_service_payments.services.rabbitmq import get_rabbitmq_settings +from socketio import AsyncServer +from tenacity import AsyncRetrying, stop_after_delay +from tenacity.wait import wait_fixed + +pytest_simcore_core_services_selection = [ + "rabbit", +] + + +@pytest.fixture +def mock_db_payments_users_repo(mocker: MockerFixture, user_primary_group_id: GroupID): + mocker.patch( + "simcore_service_payments.db.payment_users_repo.PaymentsUsersRepo.get_primary_group_id", + return_value=user_primary_group_id, + ) + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + rabbit_env_vars_dict: EnvVarsDict, # rabbitMQ settings from 'rabbit' service + # db layer is mocked + with_disabled_postgres: None, + mock_db_payments_users_repo: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_EMAIL", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **rabbit_env_vars_dict, + }, + ) + + +@pytest.fixture +async def socketio_server( + app: FastAPI, + socketio_server_factory: Callable[ + [RabbitSettings], _AsyncGeneratorContextManager[AsyncServer] + ], +) -> AsyncIterable[AsyncServer]: + async with socketio_server_factory(get_rabbitmq_settings(app)) as server: + yield server + + +@pytest.fixture +def room_name(user_primary_group_id: GroupID) -> SocketIORoomStr: + return SocketIORoomStr.from_group_id(user_primary_group_id) + + +@pytest.fixture +async def socketio_client( + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], +) -> AsyncIterable[socketio.AsyncClient]: + async with socketio_client_factory() as client: + yield client + + +@pytest.fixture +async def socketio_client_events( + socketio_client: socketio.AsyncClient, +) -> dict[str, AsyncMock]: + # emulates front-end receiving message + + async def on_payment(data): + assert TypeAdapter(PaymentTransaction).validate_python(data) is not None + + on_event_spy = AsyncMock(wraps=on_payment) + socketio_client.on(SOCKET_IO_PAYMENT_COMPLETED_EVENT, on_event_spy) + + return {on_payment.__name__: on_event_spy} + + +@pytest.fixture +async def notify_payment( + app: FastAPI, user_id: UserID +) -> Callable[[], Awaitable[None]]: + async def _() -> None: + transaction = PaymentsTransactionsDB( + **random_payment_transaction( + user_id=user_id, completed_at=arrow.utcnow().datetime + ) + ) + notifier: NotifierService = NotifierService.get_from_app_state(app) + await notifier.notify_payment_completed( + user_id=transaction.user_id, payment=to_payments_api_model(transaction) + ) + + return _ + + +async def _assert_called_once(mock: AsyncMock) -> None: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), stop=stop_after_delay(5), reraise=True + ): + with attempt: + assert mock.call_count == 1 + + +async def test_emit_message_as_external_process_to_frontend_client( + socketio_server_events: dict[str, AsyncMock], + socketio_client: socketio.AsyncClient, + socketio_client_events: dict[str, AsyncMock], + notify_payment: Callable[[], Awaitable[None]], + socketio_client_factory: Callable[ + [], _AsyncGeneratorContextManager[socketio.AsyncClient] + ], +): + """ + front-end -> socketio client (many different clients) + webserver -> socketio server (one/more replicas) + payments -> Sends messages to clients from external processes (one/more replicas) + """ + + # web server spy events + server_connect = socketio_server_events["connect"] + server_disconnect = socketio_server_events["disconnect"] + server_on_check = socketio_server_events["on_check"] + + # client spy events + client_on_payment = socketio_client_events["on_payment"] + + # checks + assert server_connect.called + assert not server_disconnect.called + + # client emits + await socketio_client.emit("check", data="hoi") + + await _assert_called_once(server_on_check) + + # payment server emits + await notify_payment() + + await _assert_called_once(client_on_payment) diff --git a/services/payments/tests/unit/test_services_notifier_email.py b/services/payments/tests/unit/test_services_notifier_email.py new file mode 100644 index 00000000000..aba8c2c3c59 --- /dev/null +++ b/services/payments/tests/unit/test_services_notifier_email.py @@ -0,0 +1,200 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from types import SimpleNamespace +from typing import Any +from unittest.mock import AsyncMock, MagicMock + +import httpx +import pytest +import respx +from faker import Faker +from fastapi import status +from jinja2 import DictLoader, Environment, select_autoescape +from models_library.products import ProductName +from models_library.users import UserID +from pydantic import EmailStr +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.email import SMTPSettings +from simcore_postgres_database.models.products import Vendor +from simcore_service_payments.db.payment_users_repo import PaymentsUsersRepo +from simcore_service_payments.models.db import PaymentsTransactionsDB +from simcore_service_payments.services.notifier_email import ( + _PRODUCT_NOTIFICATIONS_TEMPLATES, + EmailProvider, + _create_email_session, + _create_user_email, + _PaymentData, + _ProductData, + _UserData, +) + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + external_envfile_dict: EnvVarsDict, + docker_compose_service_environment_dict: EnvVarsDict, +) -> EnvVarsDict: + return setenvs_from_dict( + monkeypatch, + { + **docker_compose_service_environment_dict, + **external_envfile_dict, + }, + ) + + +@pytest.fixture +def smtp_mock_or_none( + mocker: MockerFixture, + is_external_user_email: bool, + user_email: EmailStr, +) -> MagicMock | None: + if not is_external_user_email: + return mocker.patch("simcore_service_payments.services.notifier_email.SMTP") + print("🚨 Emails might be sent to", f"{user_email=}") + return None + + +@pytest.fixture(params=["ok", "ko"]) +def mocked_get_invoice_pdf_response( + request: pytest.FixtureRequest, + respx_mock: respx.MockRouter, + transaction: PaymentsTransactionsDB, +) -> respx.MockRouter: + if request.param == "ok": + file_name = "test-attachment.pdf" + file_content = b"%PDF-1.4 ... (file content here) ..." + + response = httpx.Response( + status.HTTP_200_OK, + content=file_content, + headers={ + "Content-Type": "application/pdf", + "Content-Disposition": f'attachment; filename="{file_name}"', + }, + ) + else: + assert request.param == "ko" + response = httpx.Response( + status.HTTP_404_NOT_FOUND, + text=f"{request.fixturename} is set to '{request.param}'", + ) + + respx_mock.get(f"{transaction.invoice_pdf_url}").mock(return_value=response) + + return respx_mock + + +@pytest.fixture +def transaction( + faker: Faker, successful_transaction: dict[str, Any] +) -> PaymentsTransactionsDB: + kwargs = { + k: successful_transaction[k] + for k in PaymentsTransactionsDB.model_fields.keys() + if k in successful_transaction + } + return PaymentsTransactionsDB(**kwargs) + + +async def test_send_email_workflow( + app_environment: EnvVarsDict, + faker: Faker, + transaction: PaymentsTransactionsDB, + user_email: EmailStr, + product_name: ProductName, + product: dict[str, Any], + smtp_mock_or_none: MagicMock | None, + mocked_get_invoice_pdf_response: respx.MockRouter, +): + """ + Example of usage with external email and envfile + + > pytest --faker-user-email=me@email.me --external-envfile=.myenv -k test_send_email_workflow --pdb tests/unit + """ + + settings = SMTPSettings.create_from_envs() + env = Environment( + loader=DictLoader(_PRODUCT_NOTIFICATIONS_TEMPLATES), + autoescape=select_autoescape(["html", "xml"]), + ) + + user_data = _UserData( + first_name=faker.first_name(), + last_name=faker.last_name(), + email=user_email, + ) + + vendor: Vendor = product["vendor"] + + product_data = _ProductData( # type: ignore + product_name=product_name, + display_name=product["display_name"], + vendor_display_inline=f"{vendor.get('name','')}, {vendor.get('address','')}", + support_email=product["support_email"], + ) + + payment_data = _PaymentData( + price_dollars=f"{transaction.price_dollars:.2f}", + osparc_credits=f"{transaction.osparc_credits:.2f}", + invoice_url=transaction.invoice_url, + invoice_pdf_url=transaction.invoice_pdf_url, + ) + + msg = await _create_user_email(env, user_data, payment_data, product_data) + + async with _create_email_session(settings) as smtp: + await smtp.send_message(msg) + + if smtp_mock_or_none: + assert smtp_mock_or_none.called + assert isinstance(smtp, AsyncMock) + assert smtp.login.called + assert smtp.send_message.called + + +async def test_email_provider( + app_environment: EnvVarsDict, + mocker: MockerFixture, + user_id: UserID, + user_first_name: str, + user_last_name: str, + user_email: EmailStr, + product_name: ProductName, + product: dict[str, Any], + transaction: PaymentsTransactionsDB, + smtp_mock_or_none: MagicMock | None, + mocked_get_invoice_pdf_response: respx.MockRouter, +): + settings = SMTPSettings.create_from_envs() + + # mock access to db + users_repo = PaymentsUsersRepo(MagicMock()) + get_notification_data_mock = mocker.patch.object( + users_repo, + "get_notification_data", + return_value=SimpleNamespace( + payment_id=transaction.payment_id, + first_name=user_first_name, + last_name=user_last_name, + email=user_email, + product_name=product_name, + display_name=product["display_name"], + vendor=product["vendor"], + support_email=product["support_email"], + ), + ) + + provider = EmailProvider(settings, users_repo) + + await provider.notify_payment_completed(user_id=user_id, payment=transaction) + assert get_notification_data_mock.called + + if smtp_mock_or_none: + assert smtp_mock_or_none.called diff --git a/services/payments/tests/unit/test_services_payments.py b/services/payments/tests/unit/test_services_payments.py new file mode 100644 index 00000000000..4cb484aafbb --- /dev/null +++ b/services/payments/tests/unit/test_services_payments.py @@ -0,0 +1,166 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +import asyncio +from collections.abc import Awaitable, Callable +from unittest.mock import MagicMock + +import pytest +from fastapi import FastAPI +from models_library.api_schemas_webserver.wallets import PaymentMethodID +from models_library.basic_types import IDStr +from models_library.payments import UserInvoiceAddress +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import EmailStr +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from simcore_service_payments.db.payments_methods_repo import PaymentsMethodsRepo +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.models.db import PaymentsMethodsDB +from simcore_service_payments.models.db_to_api import to_payments_api_model +from simcore_service_payments.services import payments +from simcore_service_payments.services.notifier import NotifierService +from simcore_service_payments.services.notifier_email import EmailProvider +from simcore_service_payments.services.notifier_ws import WebSocketProvider +from simcore_service_payments.services.payments_gateway import PaymentsGatewayApi +from simcore_service_payments.services.resource_usage_tracker import ( + ResourceUsageTrackerApi, +) + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.fixture +def mock_email_provider(mocker: MockerFixture) -> MagicMock: + mock = mocker.MagicMock(EmailProvider) + mock.get_name.return_value = EmailProvider.get_name() + return mock + + +@pytest.fixture +def mock_ws_provider(mocker: MockerFixture) -> MagicMock: + mock = mocker.MagicMock(WebSocketProvider) + mock.get_name.return_value = WebSocketProvider.get_name() + return mock + + +async def test_fails_to_pay_with_payment_method_without_funds( + app: FastAPI, + create_fake_payment_method_in_db: Callable[ + [PaymentMethodID, WalletID, UserID], Awaitable[PaymentsMethodsDB] + ], + no_funds_payment_method_id: PaymentMethodID, + mock_payments_gateway_service_or_none: MockRouter | None, + wallet_id: WalletID, + wallet_name: IDStr, + user_id: UserID, + user_name: IDStr, + user_email: EmailStr, + payments_clean_db: None, + mocker: MockerFixture, + mock_email_provider: MagicMock, + mock_ws_provider: MagicMock, +): + if mock_payments_gateway_service_or_none is None: + pytest.skip( + "cannot run thist test against external because it setup a payment method" + ) + + payment_method_without_funds = await create_fake_payment_method_in_db( + payment_method_id=no_funds_payment_method_id, + wallet_id=wallet_id, + user_id=user_id, + ) + + rut = ResourceUsageTrackerApi.get_from_app_state(app) + rut_create_credit_transaction = mocker.spy(rut, "create_credit_transaction") + + # Mocker providers + notifier = NotifierService(mock_email_provider, mock_ws_provider) + + payment = await payments.pay_with_payment_method( + gateway=PaymentsGatewayApi.get_from_app_state(app), + rut=rut, + repo_transactions=PaymentsTransactionsRepo(db_engine=app.state.engine), + repo_methods=PaymentsMethodsRepo(db_engine=app.state.engine), + notifier=notifier, + # + payment_method_id=payment_method_without_funds.payment_method_id, + amount_dollars=100, + target_credits=100, + product_name="my_product", + wallet_id=wallet_id, + wallet_name=wallet_name, + user_id=user_id, + user_name=user_name, + user_email=user_email, + user_address=UserInvoiceAddress(country="CH"), + stripe_price_id="stripe-id", + stripe_tax_rate_id="stripe-id", + comment="test_failure_in_pay_with_payment_method", + ) + + # should not add credits + assert not rut_create_credit_transaction.called + + # check resulting payment + assert payment.completed_at is not None + assert payment.created_at < payment.completed_at + assert payment.state == "FAILED" + assert payment.state_message, "expected reason of failure" + + # check notifications triggered as background tasks + await asyncio.sleep(0.1) + assert len(notifier._background_tasks) == 0 # noqa: SLF001 + + assert mock_email_provider.notify_payment_completed.called + assert ( + mock_email_provider.notify_payment_completed.call_args.kwargs["user_id"] + == user_id + ) + assert ( + to_payments_api_model( + mock_email_provider.notify_payment_completed.call_args.kwargs["payment"] + ) + == payment + ) + + # Websockets notification should be in the exclude list + assert not mock_ws_provider.notify_payment_completed.called diff --git a/services/payments/tests/unit/test_services_payments__get_invoice.py b/services/payments/tests/unit/test_services_payments__get_invoice.py new file mode 100644 index 00000000000..ad8b38068d9 --- /dev/null +++ b/services/payments/tests/unit/test_services_payments__get_invoice.py @@ -0,0 +1,121 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Iterator +from datetime import datetime, timedelta, timezone +from decimal import Decimal +from typing import cast + +import pytest +import sqlalchemy as sa +from fastapi import FastAPI +from models_library.api_schemas_webserver.wallets import PaymentID +from models_library.payments import StripeInvoiceID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import HttpUrl +from pytest_simcore.helpers.faker_factories import random_payment_transaction +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from respx import MockRouter +from simcore_postgres_database.models.payments_transactions import ( + PaymentTransactionState, + payments_transactions, +) +from simcore_service_payments.db.payments_transactions_repo import ( + PaymentsTransactionsRepo, +) +from simcore_service_payments.services import payments +from simcore_service_payments.services.stripe import StripeApi + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + postgres_env_vars_dict: EnvVarsDict, + wait_for_postgres_ready_and_db_migrated: None, +): + # set environs + monkeypatch.delenv("PAYMENTS_RABBITMQ", raising=False) + monkeypatch.delenv("PAYMENTS_POSTGRES", raising=False) + + return setenvs_from_dict( + monkeypatch, + { + **app_environment, + **postgres_env_vars_dict, + "POSTGRES_CLIENT_NAME": "payments-service-pg-client", + }, + ) + + +@pytest.fixture() +def populate_payment_transaction_db( + postgres_db: sa.engine.Engine, + wallet_id: int, + user_id: UserID, + invoice_url: HttpUrl, + stripe_invoice_id: StripeInvoiceID | None, +) -> Iterator[PaymentID]: + with postgres_db.connect() as con: + result = con.execute( + payments_transactions.insert() + .values( + **random_payment_transaction( + price_dollars=Decimal(9500), + wallet_id=wallet_id, + user_id=user_id, + state=PaymentTransactionState.SUCCESS, + completed_at=datetime.now(tz=timezone.utc), + initiated_at=datetime.now(tz=timezone.utc) - timedelta(seconds=10), + invoice_url=invoice_url, + stripe_invoice_id=stripe_invoice_id, + ) + ) + .returning(payments_transactions.c.payment_id) + ) + row = result.first() + + yield cast(PaymentID, row[0]) + + con.execute(payments_transactions.delete()) + + +@pytest.mark.parametrize( + "invoice_url,stripe_invoice_id", + [ + ("https://my-fake-pdf-link.com", None), + ("https://my-fake-pdf-link.com", "in_12345"), + ], + indirect=True, +) +async def test_get_payment_invoice_url( + app: FastAPI, + populate_payment_transaction_db: PaymentID, + mock_stripe_or_none: MockRouter | None, + wallet_id: WalletID, + user_id: UserID, +): + invoice_url = await payments.get_payment_invoice_url( + repo=PaymentsTransactionsRepo(db_engine=app.state.engine), + stripe_api=StripeApi.get_from_app_state(app), + # + user_id=user_id, + wallet_id=wallet_id, + payment_id=populate_payment_transaction_db, + ) + assert invoice_url + assert isinstance(invoice_url, HttpUrl) diff --git a/services/payments/tests/unit/test_services_payments_gateway.py b/services/payments/tests/unit/test_services_payments_gateway.py new file mode 100644 index 00000000000..2fa8ebd5d03 --- /dev/null +++ b/services/payments/tests/unit/test_services_payments_gateway.py @@ -0,0 +1,253 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import httpx +import pytest +from faker import Faker +from fastapi import FastAPI, status +from httpx import URL as HttpxURL +from httpx import ASGITransport +from models_library.payments import UserInvoiceAddress +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from respx import MockRouter +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.models.payments_gateway import ( + InitPayment, + InitPaymentMethod, + StripeTaxExempt, +) +from simcore_service_payments.services.payments_gateway import ( + PaymentsGatewayApi, + PaymentsGatewayError, + _raise_as_payments_gateway_error, + setup_payments_gateway, +) +from yarl import URL + + +async def test_setup_payment_gateway_api(app_environment: EnvVarsDict): + new_app = FastAPI() + new_app.state.settings = ApplicationSettings.create_from_envs() + with pytest.raises(AttributeError): + PaymentsGatewayApi.get_from_app_state(new_app) + + setup_payments_gateway(new_app) + payment_gateway_api = PaymentsGatewayApi.get_from_app_state(new_app) + + assert payment_gateway_api is not None + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + external_envfile_dict: EnvVarsDict, +): + # set environs + return setenvs_from_dict( + monkeypatch, + {**app_environment, **external_envfile_dict}, + ) + + +async def test_payment_gateway_responsiveness( + app: FastAPI, + mock_payments_gateway_service_api_base: MockRouter, +): + # NOTE: should be standard practice + payment_gateway_api = PaymentsGatewayApi.get_from_app_state(app) + assert payment_gateway_api + + mock_payments_gateway_service_api_base.get( + path="/", + name="healthcheck", + ).respond(status.HTTP_503_SERVICE_UNAVAILABLE) + + assert await payment_gateway_api.ping() + assert not await payment_gateway_api.is_healthy() + + mock_payments_gateway_service_api_base.get( + path="/", + name="healthcheck", + ).respond(status.HTTP_200_OK) + + assert await payment_gateway_api.ping() + assert await payment_gateway_api.is_healthy() + + +@pytest.fixture( + params=[ + 10, + 999999.99609375, # SEE https://github.com/ITISFoundation/appmotion-exchange/issues/2 + ], +) +def amount_dollars(request: pytest.FixtureRequest) -> float: + return request.param + + +@pytest.mark.acceptance_test( + "https://github.com/ITISFoundation/osparc-simcore/pull/4715" +) +async def test_one_time_payment_workflow( + app: FastAPI, + faker: Faker, + mock_payments_gateway_service_or_none: MockRouter | None, + amount_dollars: float, +): + payment_gateway_api = PaymentsGatewayApi.get_from_app_state(app) + assert payment_gateway_api + + # init + payment_initiated = await payment_gateway_api.init_payment( + payment=InitPayment( + amount_dollars=amount_dollars, + credits=faker.pydecimal(positive=True, right_digits=2, left_digits=4), # type: ignore + user_name=faker.user_name(), + user_email=faker.email(), + user_address=UserInvoiceAddress(country="CH"), + wallet_name=faker.word(), + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + stripe_tax_exempt_value=StripeTaxExempt.none, + ), + ) + + # form url + submission_link = payment_gateway_api.get_form_payment_url( + payment_initiated.payment_id + ) + + app_settings: ApplicationSettings = app.state.settings + assert isinstance(submission_link, HttpxURL) + assert ( + URL(f"{submission_link}").host + == URL(f"{app_settings.PAYMENTS_GATEWAY_URL}").host + ) + + # cancel + payment_canceled = await payment_gateway_api.cancel_payment(payment_initiated) + assert payment_canceled is not None + + # check mock + if mock_payments_gateway_service_or_none: + assert mock_payments_gateway_service_or_none.routes["init_payment"].called + assert mock_payments_gateway_service_or_none.routes["cancel_payment"].called + + +@pytest.mark.can_run_against_external() +async def test_payment_methods_workflow( + app: FastAPI, + faker: Faker, + mock_payments_gateway_service_or_none: MockRouter | None, + amount_dollars: float, +): + payments_gateway_api: PaymentsGatewayApi = PaymentsGatewayApi.get_from_app_state( + app + ) + assert payments_gateway_api + + # init payment-method + initiated = await payments_gateway_api.init_payment_method( + InitPaymentMethod( + user_name=faker.user_name(), + user_email=faker.email(), + wallet_name=faker.word(), + ) + ) + + # from url + form_link = payments_gateway_api.get_form_payment_method_url( + initiated.payment_method_id + ) + + app_settings: ApplicationSettings = app.state.settings + assert isinstance(form_link, HttpxURL) + assert URL(f"{form_link}").host == URL(f"{app_settings.PAYMENTS_GATEWAY_URL}").host + + # CRUD + payment_method_id = initiated.payment_method_id + + # get payment-method + got_payment_method = await payments_gateway_api.get_payment_method( + payment_method_id + ) + assert got_payment_method.id == payment_method_id + print(got_payment_method.model_dump_json(indent=2)) + + # list payment-methods + items = await payments_gateway_api.get_many_payment_methods([payment_method_id]) + + assert items + assert len(items) == 1 + assert items[0] == got_payment_method + + payment_with_payment_method = await payments_gateway_api.pay_with_payment_method( + id_=payment_method_id, + payment=InitPayment( + amount_dollars=amount_dollars, + credits=faker.pydecimal(positive=True, right_digits=2, left_digits=4), # type: ignore + user_name=faker.user_name(), + user_email=faker.email(), + user_address=UserInvoiceAddress(country="CH"), + wallet_name=faker.word(), + stripe_price_id=faker.word(), + stripe_tax_rate_id=faker.word(), + stripe_tax_exempt_value=StripeTaxExempt.none, + ), + ) + assert payment_with_payment_method.success + + # delete payment-method + await payments_gateway_api.delete_payment_method(payment_method_id) + + with pytest.raises(PaymentsGatewayError) as err_info: + await payments_gateway_api.get_payment_method(payment_method_id) + + assert str(err_info.value) + assert err_info.value.operation_id == "PaymentsGatewayApi.get_payment_method" + + http_status_error = err_info.value.http_status_error + assert http_status_error.response.status_code == status.HTTP_404_NOT_FOUND + + if mock_payments_gateway_service_or_none: + # all defined payment-methods + for route in mock_payments_gateway_service_or_none.routes: + if route.name and "payment_method" in route.name: + assert route.called + + +async def test_payments_gateway_error_exception(): + async def _go(): + with _raise_as_payments_gateway_error(operation_id="foo"): + async with httpx.AsyncClient( + transport=ASGITransport(app=FastAPI()), + base_url="http://payments.testserver.io", + ) as client: + response = await client.post("/foo", params={"x": "3"}, json={"y": 12}) + response.raise_for_status() + + with pytest.raises(PaymentsGatewayError) as err_info: + await _go() + err = err_info.value + assert isinstance(err, PaymentsGatewayError) + + assert "curl -X POST" in err.get_detailed_message() + + +async def test_payments_gateway_get_batch_with_no_items( + app: FastAPI, + mock_payments_gateway_service_or_none: MockRouter | None, +): + payments_gateway_api: PaymentsGatewayApi = PaymentsGatewayApi.get_from_app_state( + app + ) + assert payments_gateway_api + + # tests issue found in https://github.com/ITISFoundation/appmotion-exchange/issues/16 + empty_list = [] + assert not await payments_gateway_api.get_many_payment_methods(empty_list) diff --git a/services/payments/tests/unit/test_services_resource_usage_tracker.py b/services/payments/tests/unit/test_services_resource_usage_tracker.py new file mode 100644 index 00000000000..1010f3e3b00 --- /dev/null +++ b/services/payments/tests/unit/test_services_resource_usage_tracker.py @@ -0,0 +1,79 @@ +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from datetime import datetime, timezone + +import pytest +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict +from respx import MockRouter +from simcore_service_payments.core.application import create_app +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.services.resource_usage_tracker import ( + ResourceUsageTrackerApi, + setup_resource_usage_tracker, +) + +MAX_TIME_FOR_APP_TO_STARTUP = 10 +MAX_TIME_FOR_APP_TO_SHUTDOWN = 10 + + +async def test_setup_rut_api(app_environment: EnvVarsDict, is_pdb_enabled: bool): + new_app = FastAPI() + new_app.state.settings = ApplicationSettings.create_from_envs() + with pytest.raises(AttributeError): + ResourceUsageTrackerApi.get_from_app_state(new_app) + + setup_resource_usage_tracker(new_app) + rut_api = ResourceUsageTrackerApi.get_from_app_state(new_app) + + assert rut_api is not None + assert rut_api.client + + async with LifespanManager( + new_app, + startup_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_STARTUP, + shutdown_timeout=None if is_pdb_enabled else MAX_TIME_FOR_APP_TO_SHUTDOWN, + ): + # start event called + assert not rut_api.client.is_closed + + # shutdown event + assert rut_api.client.is_closed + + +@pytest.fixture +def app( + app_environment: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, +): + # NOTE: overrides services/payments/tests/unit/conftest.py:app fixture + return create_app() + + +async def test_add_credits_to_wallet( + app: FastAPI, faker: Faker, mock_resoruce_usage_tracker_service_api: MockRouter +): + # test + rut_api = ResourceUsageTrackerApi.get_from_app_state(app) + + assert ( + await rut_api.create_credit_transaction( + product_name=faker.word(), + wallet_id=faker.pyint(), + wallet_name=faker.word(), + user_id=faker.pyint(), + user_email=faker.email(), + osparc_credits=100, + payment_transaction_id=faker.pystr(), + created_at=datetime.now(tz=timezone.utc), + ) + > 0 + ) diff --git a/services/payments/tests/unit/test_services_stripe.py b/services/payments/tests/unit/test_services_stripe.py new file mode 100644 index 00000000000..fec78997f53 --- /dev/null +++ b/services/payments/tests/unit/test_services_stripe.py @@ -0,0 +1,93 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +import pytest +from fastapi import FastAPI, status +from models_library.payments import StripeInvoiceID +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict +from respx import MockRouter +from simcore_service_payments.core.settings import ApplicationSettings +from simcore_service_payments.services.stripe import StripeApi, setup_stripe + + +async def test_setup_stripe_api(app_environment: EnvVarsDict): + new_app = FastAPI() + new_app.state.settings = ApplicationSettings.create_from_envs() + with pytest.raises(AttributeError): + StripeApi.get_from_app_state(new_app) + + setup_stripe(new_app) + stripe_api = StripeApi.get_from_app_state(new_app) + + assert stripe_api is not None + + +@pytest.fixture +def app_environment( + monkeypatch: pytest.MonkeyPatch, + app_environment: EnvVarsDict, + with_disabled_rabbitmq_and_rpc: None, + with_disabled_postgres: None, + external_stripe_environment: EnvVarsDict, +): + # set environs + return setenvs_from_dict( + monkeypatch, + {**app_environment, **external_stripe_environment}, + ) + + +async def test_stripe_responsiveness( + app: FastAPI, + mock_payments_stripe_api_base: MockRouter, +): + stripe_api: StripeApi = StripeApi.get_from_app_state(app) + assert stripe_api + + mock_payments_stripe_api_base.get( + path="/", + name="ping healthcheck", + ).respond(status.HTTP_503_SERVICE_UNAVAILABLE) + mock_payments_stripe_api_base.get( + path="/v1/products", + name="healthy healthcheck", + ).respond(status.HTTP_503_SERVICE_UNAVAILABLE) + + assert await stripe_api.ping() + assert not await stripe_api.is_healthy() + + mock_payments_stripe_api_base.get( + path="/", + name="ping healthcheck", + ).respond(status.HTTP_200_OK) + mock_payments_stripe_api_base.get( + path="/v1/products", + name="healthy healthcheck", + ).respond(status.HTTP_200_OK) + + assert await stripe_api.ping() + assert await stripe_api.is_healthy() + + +async def test_get_invoice( + app: FastAPI, + mock_stripe_or_none: MockRouter | None, + stripe_invoice_id: StripeInvoiceID, +): + stripe_api: StripeApi = StripeApi.get_from_app_state(app) + assert stripe_api + + assert await stripe_api.is_healthy() + + _invoice = await stripe_api.get_invoice( + stripe_invoice_id=StripeInvoiceID(stripe_invoice_id) + ) + assert _invoice + assert _invoice.hosted_invoice_url + + if mock_stripe_or_none: + assert mock_stripe_or_none.routes["list_products"].called + assert mock_stripe_or_none.routes["get_invoice"].called diff --git a/services/postgres/Makefile b/services/postgres/Makefile new file mode 100644 index 00000000000..f962ffec66c --- /dev/null +++ b/services/postgres/Makefile @@ -0,0 +1,14 @@ +include ../../scripts/common.Makefile + + +ifneq (,$(wildcard $(DOT_ENV_FILE))) + include $(DOT_ENV_FILE) + export $(shell sed 's/=.*//' $(DOT_ENV_FILE)) +endif + + + +scripts/%.sql: scripts/%.sql.template + @echo "Generating SQL script from '$<'..." + @envsubst < $< > $@ + @echo "SQL script generated as '$@'" diff --git a/services/postgres/scripts/.gitignore b/services/postgres/scripts/.gitignore new file mode 100644 index 00000000000..9072771094f --- /dev/null +++ b/services/postgres/scripts/.gitignore @@ -0,0 +1,3 @@ +* +!.gitignore +!*.template.* diff --git a/services/postgres/scripts/create-readonly-user.sql.template b/services/postgres/scripts/create-readonly-user.sql.template new file mode 100644 index 00000000000..28b14f53d4f --- /dev/null +++ b/services/postgres/scripts/create-readonly-user.sql.template @@ -0,0 +1,22 @@ +-- SQL script to create a read-only user and grant privileges + + +--Create the read-only user with a password +CREATE USER ${POSTGRES_READONLY_USER} WITH PASSWORD '${POSTGRES_READONLY_PASSWORD}'; + +--Grant CONNECT privilege to the database (e.g., 'foo' is the database name) +GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${POSTGRES_READONLY_USER}; + +--Grant USAGE privilege on the **public** schema +GRANT USAGE ON SCHEMA public TO ${POSTGRES_READONLY_USER}; + +--Grant SELECT privilege on all existing tables and sequencies in the **public** schema +GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${POSTGRES_READONLY_USER}; +GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO ${POSTGRES_READONLY_USER}; + +--Ensure that future tables created in the public schema and sequencies will have SELECT privilege for the read-only user +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${POSTGRES_READONLY_USER}; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO ${POSTGRES_READONLY_USER}; + +-- Listing all users +SELECT * FROM pg_roles; diff --git a/services/postgres/scripts/remove-readonly-user.sql.template b/services/postgres/scripts/remove-readonly-user.sql.template new file mode 100644 index 00000000000..5a1435ed978 --- /dev/null +++ b/services/postgres/scripts/remove-readonly-user.sql.template @@ -0,0 +1,16 @@ +-- Revoke all privileges the user has on the public schema +REVOKE ALL PRIVILEGES ON SCHEMA public FROM ${POSTGRES_READONLY_USER}; + +-- Revoke all privileges the user has on tables and sequences in the public schema +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM ${POSTGRES_READONLY_USER}; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM ${POSTGRES_READONLY_USER}; + +-- Revoke any future privileges set via ALTER DEFAULT PRIVILEGES +ALTER DEFAULT PRIVILEGES IN SCHEMA public REVOKE ALL ON TABLES FROM ${POSTGRES_READONLY_USER}; +ALTER DEFAULT PRIVILEGES IN SCHEMA public REVOKE ALL ON SEQUENCES FROM ${POSTGRES_READONLY_USER}; + +-- Drop the user +DROP USER ${POSTGRES_READONLY_USER}; + +-- Listing all users +SELECT * FROM pg_roles; diff --git a/services/resource-usage-tracker/.env-devel b/services/resource-usage-tracker/.env-devel new file mode 100644 index 00000000000..e04ff6aeca0 --- /dev/null +++ b/services/resource-usage-tracker/.env-devel @@ -0,0 +1,12 @@ +RESOURCE_USAGE_TRACKER_DEV_FEATURES_ENABLED=1 + +LOG_LEVEL=DEBUG + +POSTGRES_USER=test +POSTGRES_PASSWORD=test +POSTGRES_DB=test +POSTGRES_HOST=localhost + +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED=1 +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC=300 +RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL=6 diff --git a/services/resource-usage-tracker/Dockerfile b/services/resource-usage-tracker/Dockerfile new file mode 100644 index 00000000000..fe26edf9dca --- /dev/null +++ b/services/resource-usage-tracker/Dockerfile @@ -0,0 +1,183 @@ +# syntax=docker/dockerfile:1 + +# Define arguments in the global scope +ARG PYTHON_VERSION="3.10.14" +ARG UV_VERSION="0.6" +FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_build + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-arm64 +# These environment variables are necessary because of https://github.com/astral-sh/uv/issues/6105 +# and until https://gitlab.com/qemu-project/qemu/-/issues/2846 gets fixed +ENV UV_CONCURRENT_INSTALLS=1 + +FROM python:${PYTHON_VERSION}-slim-bookworm AS base-amd64 + +FROM base-${TARGETARCH} AS base + +# +# USAGE: +# cd services/resource-usage-tracker +# docker build -f Dockerfile -t resource-usage-tracker:prod --target production ../../ +# docker run resource-usage-tracker:prod +# +# REQUIRED: context expected at ``osparc-simcore/`` folder because we need access to osparc-simcore/packages + +LABEL maintainer=sanderegg + +# for docker apt caching to work this needs to be added: [https://vsupalov.com/buildkit-cache-mount-dockerfile/] +RUN rm -f /etc/apt/apt.conf.d/docker-clean && \ + echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gosu \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* \ + # verify that the binary works + && gosu nobody true + +# simcore-user uid=8004(scu) gid=8004(scu) groups=8004(scu) +ENV SC_USER_ID=8004 \ + SC_USER_NAME=scu \ + SC_BUILD_TARGET=base \ + SC_BOOT_MODE=default + +RUN adduser \ + --uid ${SC_USER_ID} \ + --disabled-password \ + --gecos "" \ + --shell /bin/sh \ + --home /home/${SC_USER_NAME} \ + ${SC_USER_NAME} + + +# Sets utf-8 encoding for Python et al +ENV LANG=C.UTF-8 + +# Turns off writing .pyc files; superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 \ + VIRTUAL_ENV=/home/scu/.venv + +# Ensures that the python and pip executables used in the image will be +# those from our virtualenv. +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" + +EXPOSE 8000 +EXPOSE 3000 + +# -------------------------- Build stage ------------------- +# Installs build/package management tools and third party dependencies +# +# + /build WORKDIR +# +FROM base AS build + +ENV SC_BUILD_TARGET=build + +RUN --mount=type=cache,target=/var/cache/apt,sharing=private \ + set -eux \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential + +# install UV https://docs.astral.sh/uv/guides/integration/docker/#installing-uv +COPY --from=uv_build /uv /uvx /bin/ + +# NOTE: python virtualenv is used here such that installed +# packages may be moved to production image easily by copying the venv +RUN uv venv "${VIRTUAL_ENV}" + + + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --upgrade \ + wheel \ + setuptools + +WORKDIR /build + +# install base 3rd party dependencies + + + +# --------------------------Prod-depends-only stage ------------------- +# This stage is for production only dependencies that get partially wiped out afterwards (final docker image concerns) +# +# + /build +# + services/resource-usage-tracker [scu:scu] WORKDIR +# +FROM build AS prod-only-deps + +ENV SC_BUILD_TARGET=prod-only-deps + +WORKDIR /build/services/resource-usage-tracker + +RUN \ + --mount=type=bind,source=packages,target=/build/packages,rw \ + --mount=type=bind,source=services/resource-usage-tracker,target=/build/services/resource-usage-tracker,rw \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip sync \ + requirements/prod.txt \ + && uv pip list + + +# --------------------------Production stage ------------------- +# Final cleanup up to reduce image size and startup setup +# Runs as scu (non-root user) +# +# + /home/scu $HOME = WORKDIR +# + services/resource-usage-tracker [scu:scu] +# +FROM base AS production + +ENV SC_BUILD_TARGET=production \ + SC_BOOT_MODE=production + +ENV PYTHONOPTIMIZE=TRUE +# https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode +ENV UV_COMPILE_BYTECODE=1 + +WORKDIR /home/scu + +# ensure home folder is read/writable for user scu +RUN chown -R scu /home/scu +# Starting from clean base image, copies pre-installed virtualenv from prod-only-deps +COPY --chown=scu:scu --from=prod-only-deps ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +# Copies booting scripts +COPY --chown=scu:scu services/resource-usage-tracker/docker services/resource-usage-tracker/docker +RUN chmod +x services/resource-usage-tracker/docker/*.sh + +# https://docs.docker.com/reference/dockerfile/#healthcheck +HEALTHCHECK \ + --interval=10s \ + --timeout=5s \ + --start-period=20s \ + --start-interval=1s \ + --retries=5 \ + CMD ["python3", "services/resource-usage-tracker/docker/healthcheck.py", "http://localhost:8000/"] + +ENTRYPOINT [ "/bin/sh", "services/resource-usage-tracker/docker/entrypoint.sh" ] +CMD ["/bin/sh", "services/resource-usage-tracker/docker/boot.sh"] + + +# --------------------------Development stage ------------------- +# Source code accessible in host but runs in container +# Runs as myu with same gid/uid as host +# Placed at the end to speed-up the build if images targeting production +# +# + /devel WORKDIR +# + services (mounted volume) +# +FROM build AS development + +ENV SC_BUILD_TARGET=development \ + SC_DEVEL_MOUNT=/devel/services/resource-usage-tracker + +WORKDIR /devel + +RUN chown -R scu:scu "${VIRTUAL_ENV}" + +ENTRYPOINT ["/bin/sh", "services/resource-usage-tracker/docker/entrypoint.sh"] +CMD ["/bin/sh", "services/resource-usage-tracker/docker/boot.sh"] diff --git a/services/resource-usage-tracker/Makefile b/services/resource-usage-tracker/Makefile new file mode 100644 index 00000000000..d6d8745bc13 --- /dev/null +++ b/services/resource-usage-tracker/Makefile @@ -0,0 +1,13 @@ +# +# DEVELOPMENT recipes for resource-usage-tracker +# +include ../../scripts/common.Makefile +include ../../scripts/common-service.Makefile + + + +.PHONY: openapi.json +openapi-specs: openapi.json +openapi.json: ## produces openapi.json + # generating openapi specs file (need to have the environment set for this) + @python3 -c "import json; from $(APP_PACKAGE_NAME).main import *; print( json.dumps(the_app.openapi(), indent=2) )" > $@ diff --git a/services/resource-usage-tracker/README.md b/services/resource-usage-tracker/README.md new file mode 100644 index 00000000000..50efdf6221e --- /dev/null +++ b/services/resource-usage-tracker/README.md @@ -0,0 +1,13 @@ +# resource usage tracker + + +Service that collects and stores computational resources usage used in osparc-simcore. Also takes care of computation of used osparc credits. + + +## Credit computation (Collections) +- **PricingPlan**: + Describe the overall billing plan/package. The pricing plan can be connected to one or more services. A specific pricing plan might be defined also for billing storage costs. +- **PricingUnit**: + Specifies the various units/tiers within a pricing plan, that denote different options (resources and costs) for running services/storage costs. For example: a specific pricing plan might offer three tiers based on resources: SMALL, MEDIUM, and LARGE. +- **PricingUnitCreditCost**: + Defines the credit cost for each unit, which can change over time, allowing for pricing flexibility. diff --git a/services/resource-usage-tracker/VERSION b/services/resource-usage-tracker/VERSION new file mode 100644 index 00000000000..3eefcb9dd5b --- /dev/null +++ b/services/resource-usage-tracker/VERSION @@ -0,0 +1 @@ +1.0.0 diff --git a/services/resource-usage-tracker/docker/boot.sh b/services/resource-usage-tracker/docker/boot.sh new file mode 100755 index 00000000000..fe90bb17050 --- /dev/null +++ b/services/resource-usage-tracker/docker/boot.sh @@ -0,0 +1,66 @@ +#!/bin/sh +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " + +echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" + +# +# DEVELOPMENT MODE +# +# - prints environ info +# - installs requirements in mounted volume +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "Environment :" + printenv | sed 's/=/: /' | sed 's/^/ /' | sort + echo "$INFO" "Python :" + python --version | sed 's/^/ /' + command -v python | sed 's/^/ /' + + cd services/resource-usage-tracker + uv pip --quiet sync requirements/dev.txt + cd - + echo "$INFO" "PIP :" + uv pip list +fi + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + # NOTE: production does NOT pre-installs debugpy + if command -v uv >/dev/null 2>&1; then + uv pip install debugpy + else + pip install debugpy + fi +fi + +# +# RUNNING application +# + +APP_LOG_LEVEL=${RESOURCE_USAGE_TRACKER_LOGLEVEL:-${LOG_LEVEL:-${LOGLEVEL:-INFO}}} +SERVER_LOG_LEVEL=$(echo "${APP_LOG_LEVEL}" | tr '[:upper:]' '[:lower:]') +echo "$INFO" "Log-level app/server: $APP_LOG_LEVEL/$SERVER_LOG_LEVEL" + +if [ "${SC_BOOT_MODE}" = "debug" ]; then + reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;) + + exec sh -c " + cd services/resource-usage-tracker/src/simcore_service_resource_usage_tracker && \ + python -Xfrozen_modules=off -m debugpy --listen 0.0.0.0:${RESOURCE_USAGE_TRACKER_REMOTE_DEBUGGING_PORT} -m uvicorn main:the_app \ + --host 0.0.0.0 \ + --reload \ + $reload_dir_packages + --reload-dir . \ + --log-level \"${SERVER_LOG_LEVEL}\" + " +else + exec uvicorn simcore_service_resource_usage_tracker.main:the_app \ + --host 0.0.0.0 \ + --log-level "${SERVER_LOG_LEVEL}" +fi diff --git a/services/resource-usage-tracker/docker/entrypoint.sh b/services/resource-usage-tracker/docker/entrypoint.sh new file mode 100755 index 00000000000..e89ad5408a3 --- /dev/null +++ b/services/resource-usage-tracker/docker/entrypoint.sh @@ -0,0 +1,86 @@ +#!/bin/sh +# +# - Executes *inside* of the container upon start as --user [default root] +# - Notice that the container *starts* as --user [default root] but +# *runs* as non-root user [scu] +# +set -o errexit +set -o nounset + +IFS=$(printf '\n\t') + +INFO="INFO: [$(basename "$0")] " +WARNING="WARNING: [$(basename "$0")] " +ERROR="ERROR: [$(basename "$0")] " + +echo "$INFO" "Entrypoint for stage ${SC_BUILD_TARGET} ..." +echo "$INFO" "User :$(id "$(whoami)")" +echo "$INFO" "Workdir : $(pwd)" +echo "$INFO" "User : $(id scu)" +echo "$INFO" "python : $(command -v python)" +echo "$INFO" "pip : $(command -v pip)" + +# +# DEVELOPMENT MODE +# - expects docker run ... -v $(pwd):$SC_DEVEL_MOUNT +# - mounts source folders +# - deduces host's uid/gip and assigns to user within docker +# +if [ "${SC_BUILD_TARGET}" = "development" ]; then + echo "$INFO" "development mode detected..." + stat "${SC_DEVEL_MOUNT}" >/dev/null 2>&1 || + (echo "$ERROR" "You must mount '$SC_DEVEL_MOUNT' to deduce user and group ids" && exit 1) + + echo "$INFO" "setting correct user id/group id..." + HOST_USERID=$(stat --format=%u "${SC_DEVEL_MOUNT}") + HOST_GROUPID=$(stat --format=%g "${SC_DEVEL_MOUNT}") + CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut --delimiter=: --fields=1) + if [ "$HOST_USERID" -eq 0 ]; then + echo "$WARNING" "Folder mounted owned by root user... adding $SC_USER_NAME to root..." + adduser "$SC_USER_NAME" root + else + echo "$INFO" "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..." + # take host's credentials in $SC_USER_NAME + if [ -z "$CONT_GROUPNAME" ]; then + echo "$WARNING" "Creating new group grp$SC_USER_NAME" + CONT_GROUPNAME=grp$SC_USER_NAME + addgroup --gid "$HOST_GROUPID" "$CONT_GROUPNAME" + else + echo "$INFO" "group already exists" + fi + echo "$INFO" "Adding $SC_USER_NAME to group $CONT_GROUPNAME..." + adduser "$SC_USER_NAME" "$CONT_GROUPNAME" + + echo "$WARNING" "Changing ownership [this could take some time]" + echo "$INFO" "Changing $SC_USER_NAME:$SC_USER_NAME ($SC_USER_ID:$SC_USER_ID) to $SC_USER_NAME:$CONT_GROUPNAME ($HOST_USERID:$HOST_GROUPID)" + usermod --uid "$HOST_USERID" --gid "$HOST_GROUPID" "$SC_USER_NAME" + + echo "$INFO" "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -group "$SC_USER_ID" -exec chgrp --no-dereference "$CONT_GROUPNAME" {} \; + # change user property of files already around + echo "$INFO" "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME" + find / -path /proc -prune -o -user "$SC_USER_ID" -exec chown --no-dereference "$SC_USER_NAME" {} \; + fi +fi + +# Appends docker group if socket is mounted +DOCKER_MOUNT=/var/run/docker.sock +if stat $DOCKER_MOUNT >/dev/null 2>&1; then + echo "$INFO detected docker socket is mounted, adding user to group..." + GROUPID=$(stat --format=%g $DOCKER_MOUNT) + GROUPNAME=scdocker + + if ! addgroup --gid "$GROUPID" $GROUPNAME >/dev/null 2>&1; then + echo "$WARNING docker group with $GROUPID already exists, getting group name..." + # if group already exists in container, then reuse name + GROUPNAME=$(getent group "${GROUPID}" | cut --delimiter=: --fields=1) + echo "$WARNING docker group with $GROUPID has name $GROUPNAME" + fi + adduser "$SC_USER_NAME" "$GROUPNAME" +fi + +echo "$INFO Starting $* ..." +echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")" +echo " local dir : $(ls -al)" + +exec gosu "$SC_USER_NAME" "$@" diff --git a/services/resource-usage-tracker/docker/healthcheck.py b/services/resource-usage-tracker/docker/healthcheck.py new file mode 100755 index 00000000000..1c036a49607 --- /dev/null +++ b/services/resource-usage-tracker/docker/healthcheck.py @@ -0,0 +1,41 @@ +#!/bin/python +""" Healthcheck script to run inside docker + +Example of usage in a Dockerfile +``` + COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py + HEALTHCHECK --interval=30s \ + --timeout=30s \ + --start-period=20s \ + --start-interval=1s \ + --retries=3 \ + CMD python3 docker/healthcheck.py http://localhost:8080/v0/ +``` + +Q&A: + 1. why not to use curl instead of a python script? + - SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/ +""" + +import os +import sys +from contextlib import suppress +from urllib.request import urlopen + +# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, debugpy, etc) +SC_BOOT_MODE = os.environ.get("SC_BOOT_MODE", "") + +# Adds a base-path if defined in environ +SIMCORE_NODE_BASEPATH = os.environ.get("SIMCORE_NODE_BASEPATH", "") + + +def is_service_healthy() -> bool: + if "debug" in SC_BOOT_MODE.lower(): + return True + + with suppress(Exception), urlopen(f"{sys.argv[1]}{SIMCORE_NODE_BASEPATH}") as f: + return f.getcode() == 200 + return False + + +sys.exit(os.EX_OK if is_service_healthy() else os.EX_UNAVAILABLE) diff --git a/services/resource-usage-tracker/openapi.json b/services/resource-usage-tracker/openapi.json new file mode 100644 index 00000000000..cef757856bf --- /dev/null +++ b/services/resource-usage-tracker/openapi.json @@ -0,0 +1,621 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "simcore-service-resource-usage-tracker web API", + "description": "Service that collects and stores computational resources usage used in osparc-simcore", + "version": "1.0.0" + }, + "paths": { + "/": { + "get": { + "summary": "Healthcheck", + "operationId": "healthcheck__get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + }, + "/v1/meta": { + "get": { + "tags": [ + "meta" + ], + "summary": "Get Service Metadata", + "operationId": "get_service_metadata_v1_meta_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_Meta" + } + } + } + } + } + } + }, + "/v1/credit-transactions/credits:sum": { + "post": { + "tags": [ + "credit-transactions" + ], + "summary": "Sum total available credits in the wallet", + "operationId": "get_credit_transactions_sum_v1_credit_transactions_credits_sum_post", + "parameters": [ + { + "name": "product_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Product Name" + } + }, + { + "name": "wallet_id", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WalletTotalCredits" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/credit-transactions": { + "post": { + "tags": [ + "credit-transactions" + ], + "summary": "Top up credits for specific wallet", + "operationId": "create_credit_transaction_v1_credit_transactions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreditTransactionCreateBody" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreditTransactionCreated" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/services/{service_key}/{service_version}/pricing-plan": { + "get": { + "tags": [ + "pricing-plans" + ], + "summary": "Get Service Default Pricing Plan", + "description": "Returns a default pricing plan with pricing details for a specified service", + "operationId": "get_service_default_pricing_plan", + "parameters": [ + { + "name": "service_key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^simcore/services/((comp|dynamic|frontend))/([a-z0-9][a-z0-9_.-]*/)*([a-z0-9-_]+[a-z0-9])$", + "title": "Service Key" + } + }, + { + "name": "service_version", + "in": "path", + "required": true, + "schema": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)(\\.(0|[1-9]\\d*)){2}(-(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*)(\\.(0|[1-9]\\d*|\\d*[-a-zA-Z][-\\da-zA-Z]*))*)?(\\+[-\\da-zA-Z]+(\\.[-\\da-zA-Z-]+)*)?$", + "title": "Service Version" + } + }, + { + "name": "product_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Product Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RutPricingPlanGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}": { + "get": { + "tags": [ + "pricing-plans" + ], + "summary": "Get Pricing Plan Unit", + "description": "Returns a list of service pricing plans with pricing details for a specified service", + "operationId": "list_service_pricing_plans", + "parameters": [ + { + "name": "pricing_plan_id", + "in": "path", + "required": true, + "schema": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Plan Id", + "minimum": 0 + } + }, + { + "name": "pricing_unit_id", + "in": "path", + "required": true, + "schema": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Unit Id", + "minimum": 0 + } + }, + { + "name": "product_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Product Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RutPricingUnitGet" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "CreditTransactionCreateBody": { + "properties": { + "product_name": { + "type": "string", + "title": "Product Name" + }, + "wallet_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 + }, + "wallet_name": { + "type": "string", + "title": "Wallet Name" + }, + "user_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "User Id", + "minimum": 0 + }, + "user_email": { + "type": "string", + "title": "User Email" + }, + "osparc_credits": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "title": "Osparc Credits" + }, + "payment_transaction_id": { + "type": "string", + "title": "Payment Transaction Id" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + } + }, + "type": "object", + "required": [ + "product_name", + "wallet_id", + "wallet_name", + "user_id", + "user_email", + "osparc_credits", + "payment_transaction_id", + "created_at" + ], + "title": "CreditTransactionCreateBody" + }, + "CreditTransactionCreated": { + "properties": { + "credit_transaction_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Credit Transaction Id", + "minimum": 0 + } + }, + "type": "object", + "required": [ + "credit_transaction_id" + ], + "title": "CreditTransactionCreated", + "description": "Response Create Credit Transaction V1 Credit Transactions Post" + }, + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "HardwareInfo": { + "properties": { + "aws_ec2_instances": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Aws Ec2 Instances" + } + }, + "type": "object", + "required": [ + "aws_ec2_instances" + ], + "title": "HardwareInfo" + }, + "PricingPlanClassification": { + "type": "string", + "enum": [ + "TIER", + "LICENSE" + ], + "title": "PricingPlanClassification" + }, + "RutPricingPlanGet": { + "properties": { + "pricing_plan_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Plan Id", + "minimum": 0 + }, + "display_name": { + "type": "string", + "title": "Display Name" + }, + "description": { + "type": "string", + "title": "Description" + }, + "classification": { + "$ref": "#/components/schemas/PricingPlanClassification" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "pricing_plan_key": { + "type": "string", + "title": "Pricing Plan Key" + }, + "pricing_units": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/RutPricingUnitGet" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Pricing Units" + }, + "is_active": { + "type": "boolean", + "title": "Is Active" + } + }, + "type": "object", + "required": [ + "pricing_plan_id", + "display_name", + "description", + "classification", + "created_at", + "pricing_plan_key", + "pricing_units", + "is_active" + ], + "title": "RutPricingPlanGet" + }, + "RutPricingUnitGet": { + "properties": { + "pricing_unit_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Pricing Unit Id", + "minimum": 0 + }, + "unit_name": { + "type": "string", + "title": "Unit Name" + }, + "unit_extra_info": { + "anyOf": [ + { + "$ref": "#/components/schemas/UnitExtraInfoTier" + }, + { + "$ref": "#/components/schemas/UnitExtraInfoLicense" + } + ], + "title": "Unit Extra Info" + }, + "current_cost_per_unit": { + "type": "string", + "title": "Current Cost Per Unit" + }, + "current_cost_per_unit_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Current Cost Per Unit Id", + "minimum": 0 + }, + "default": { + "type": "boolean", + "title": "Default" + }, + "specific_info": { + "$ref": "#/components/schemas/HardwareInfo" + } + }, + "type": "object", + "required": [ + "pricing_unit_id", + "unit_name", + "unit_extra_info", + "current_cost_per_unit", + "current_cost_per_unit_id", + "default", + "specific_info" + ], + "title": "RutPricingUnitGet" + }, + "UnitExtraInfoLicense": { + "properties": { + "num_of_seats": { + "type": "integer", + "minimum": 0, + "title": "Num Of Seats" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "num_of_seats" + ], + "title": "UnitExtraInfoLicense", + "description": "Custom information that is propagated to the frontend. Defined fields are mandatory." + }, + "UnitExtraInfoTier": { + "properties": { + "CPU": { + "type": "integer", + "minimum": 0, + "title": "Cpu" + }, + "RAM": { + "type": "integer", + "minimum": 0, + "title": "Ram" + }, + "VRAM": { + "type": "integer", + "minimum": 0, + "title": "Vram" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "CPU", + "RAM", + "VRAM" + ], + "title": "UnitExtraInfoTier", + "description": "Custom information that is propagated to the frontend. Defined fields are mandatory." + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", + "required": [ + "loc", + "msg", + "type" + ], + "title": "ValidationError" + }, + "WalletTotalCredits": { + "properties": { + "wallet_id": { + "type": "integer", + "exclusiveMinimum": true, + "title": "Wallet Id", + "minimum": 0 + }, + "available_osparc_credits": { + "type": "number", + "title": "Available Osparc Credits" + } + }, + "type": "object", + "required": [ + "wallet_id", + "available_osparc_credits" + ], + "title": "WalletTotalCredits" + }, + "_Meta": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "version": { + "type": "string", + "title": "Version" + }, + "docs_url": { + "type": "string", + "title": "Docs Url" + } + }, + "type": "object", + "required": [ + "name", + "version", + "docs_url" + ], + "title": "_Meta" + } + } + } +} diff --git a/services/resource-usage-tracker/requirements/Makefile b/services/resource-usage-tracker/requirements/Makefile new file mode 100644 index 00000000000..e1319af9d7f --- /dev/null +++ b/services/resource-usage-tracker/requirements/Makefile @@ -0,0 +1,10 @@ +# +# Targets to pip-compile requirements +# +include ../../../requirements/base.Makefile + +# Add here any extra explicit dependency: e.g. _migration.txt: _base.txt + +_base.in: constraints.txt +_test.in: constraints.txt +_tools.in: constraints.txt diff --git a/services/resource-usage-tracker/requirements/_base.in b/services/resource-usage-tracker/requirements/_base.in new file mode 100644 index 00000000000..77cf7864ca6 --- /dev/null +++ b/services/resource-usage-tracker/requirements/_base.in @@ -0,0 +1,23 @@ +# +# Specifies third-party dependencies for 'services/resource-usage-tracker/src' +# +# NOTE: ALL version constraints MUST be commented +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# intra-repo required dependencies +--requirement ../../../packages/aws-library/requirements/_base.in +--requirement ../../../packages/common-library/requirements/_base.in +--requirement ../../../packages/models-library/requirements/_base.in +--requirement ../../../packages/settings-library/requirements/_base.in +--requirement ../../../packages/postgres-database/requirements/_base.in +# service-library[fastapi] +--requirement ../../../packages/service-library/requirements/_base.in +--requirement ../../../packages/service-library/requirements/_fastapi.in + + +aiocache +packaging +prometheus_api_client +shortuuid +typer[all] diff --git a/services/resource-usage-tracker/requirements/_base.txt b/services/resource-usage-tracker/requirements/_base.txt new file mode 100644 index 00000000000..9a87a75c05c --- /dev/null +++ b/services/resource-usage-tracker/requirements/_base.txt @@ -0,0 +1,950 @@ +aio-pika==9.4.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aioboto3==14.3.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +aiobotocore==2.22.0 + # via aioboto3 +aiocache==0.12.2 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/_base.in +aiodebug==2.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiodocker==0.21.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +aiofiles==23.2.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aioboto3 +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.11.18 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # aiobotocore + # aiodocker +aioitertools==0.11.0 + # via aiobotocore +aiormq==6.8.0 + # via aio-pika +aiosignal==1.3.1 + # via aiohttp +alembic==1.13.1 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +annotated-types==0.7.0 + # via pydantic +anyio==4.3.0 + # via + # fast-depends + # faststream + # httpx + # starlette + # watchfiles +arrow==1.3.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +asgiref==3.8.1 + # via opentelemetry-instrumentation-asgi +async-timeout==4.0.3 + # via asyncpg +asyncpg==0.29.0 + # via sqlalchemy +attrs==23.2.0 + # via + # aiohttp + # jsonschema + # referencing +boto3==1.37.3 + # via aiobotocore +botocore==1.37.3 + # via + # aiobotocore + # boto3 + # s3transfer +botocore-stubs==1.34.69 + # via types-aiobotocore +certifi==2024.2.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # httpcore + # httpx + # requests +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # rich-toolkit + # typer + # uvicorn +contourpy==1.2.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +dateparser==1.2.0 + # via prometheus-api-client +deprecated==1.2.14 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +dnspython==2.6.1 + # via email-validator +email-validator==2.1.1 + # via + # fastapi + # pydantic +fast-depends==2.4.12 + # via faststream +fastapi==0.115.12 + # via + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi-lifespan-manager +fastapi-cli==0.0.7 + # via fastapi +fastapi-lifespan-manager==0.1.4 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +faststream==0.5.31 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +fonttools==4.50.0 + # via matplotlib +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +googleapis-common-protos==1.65.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +greenlet==3.0.3 + # via sqlalchemy +grpcio==1.66.0 + # via opentelemetry-exporter-otlp-proto-grpc +h11==0.14.0 + # via + # httpcore + # uvicorn +h2==4.2.0 + # via httpx +hpack==4.1.0 + # via h2 +httmock==1.4.0 + # via prometheus-api-client +httpcore==1.0.4 + # via httpx +httptools==0.6.4 + # via uvicorn +httpx==0.27.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/service-library/requirements/_fastapi.in + # fastapi +hyperframe==6.1.0 + # via h2 +idna==3.6 + # via + # anyio + # email-validator + # httpx + # requests + # yarl +importlib-metadata==8.0.0 + # via opentelemetry-api +jinja2==3.1.6 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +jmespath==1.0.1 + # via + # aiobotocore + # boto3 + # botocore +jsonschema==4.21.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in +jsonschema-specifications==2023.7.1 + # via jsonschema +kiwisolver==1.4.5 + # via matplotlib +mako==1.3.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # alembic +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via + # jinja2 + # mako +matplotlib==3.8.3 + # via prometheus-api-client +mdurl==0.1.2 + # via markdown-it-py +multidict==6.0.5 + # via + # aiobotocore + # aiohttp + # yarl +numpy==1.26.4 + # via + # contourpy + # matplotlib + # pandas + # prometheus-api-client +opentelemetry-api==1.26.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-propagator-aws-xray + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp==1.26.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-exporter-otlp-proto-common==1.26.0 + # via + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc==1.26.0 + # via opentelemetry-exporter-otlp +opentelemetry-exporter-otlp-proto-http==1.26.0 + # via opentelemetry-exporter-otlp +opentelemetry-instrumentation==0.47b0 + # via + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-logging + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests +opentelemetry-instrumentation-aio-pika==0.47b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-asgi==0.47b0 + # via opentelemetry-instrumentation-fastapi +opentelemetry-instrumentation-asyncpg==0.47b0 + # via -r requirements/../../../packages/postgres-database/requirements/_base.in +opentelemetry-instrumentation-botocore==0.47b0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +opentelemetry-instrumentation-fastapi==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-httpx==0.47b0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +opentelemetry-instrumentation-logging==0.47b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-redis==0.47b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-instrumentation-requests==0.47b0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +opentelemetry-propagator-aws-xray==1.0.1 + # via opentelemetry-instrumentation-botocore +opentelemetry-proto==1.26.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.26.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # opentelemetry-exporter-otlp-proto-grpc + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.47b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-asyncpg + # opentelemetry-instrumentation-botocore + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-redis + # opentelemetry-instrumentation-requests + # opentelemetry-sdk +opentelemetry-util-http==0.47b0 + # via + # opentelemetry-instrumentation-asgi + # opentelemetry-instrumentation-fastapi + # opentelemetry-instrumentation-httpx + # opentelemetry-instrumentation-requests +orjson==3.10.0 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +packaging==24.0 + # via + # -r requirements/_base.in + # matplotlib +pamqp==3.3.0 + # via aiormq +pandas==2.2.1 + # via prometheus-api-client +pillow==10.2.0 + # via matplotlib +prometheus-api-client==0.5.5 + # via -r requirements/_base.in +prometheus-client==0.20.0 + # via -r requirements/../../../packages/service-library/requirements/_fastapi.in +propcache==0.3.1 + # via + # aiohttp + # yarl +protobuf==4.25.4 + # via + # googleapis-common-protos + # opentelemetry-proto +psutil==6.0.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +psycopg2-binary==2.9.9 + # via sqlalchemy +pycryptodome==3.21.0 + # via stream-zip +pydantic==2.10.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # fast-depends + # fastapi + # pydantic-extra-types + # pydantic-settings +pydantic-core==2.27.1 + # via pydantic +pydantic-extra-types==2.9.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/_base.in +pydantic-settings==2.6.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in +pygments==2.17.2 + # via rich +pyinstrument==4.6.2 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +pyparsing==3.1.2 + # via matplotlib +python-dateutil==2.9.0.post0 + # via + # aiobotocore + # arrow + # botocore + # dateparser + # matplotlib + # pandas +python-dotenv==1.0.1 + # via + # pydantic-settings + # uvicorn +python-multipart==0.0.20 + # via fastapi +pytz==2024.1 + # via + # dateparser + # pandas +pyyaml==6.0.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # uvicorn +redis==5.2.1 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +referencing==0.29.3 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # jsonschema + # jsonschema-specifications +regex==2023.12.25 + # via dateparser +requests==2.32.2 + # via + # httmock + # opentelemetry-exporter-otlp-proto-http + # prometheus-api-client +rich==13.7.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # rich-toolkit + # typer +rich-toolkit==0.14.7 + # via fastapi-cli +rpds-py==0.18.0 + # via + # jsonschema + # referencing +s3transfer==0.11.3 + # via boto3 +setuptools==74.0.0 + # via opentelemetry-instrumentation +sh==2.0.6 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +shellingham==1.5.4 + # via typer +shortuuid==1.0.13 + # via -r requirements/_base.in +six==1.16.0 + # via python-dateutil +sniffio==1.3.1 + # via + # anyio + # httpx +sqlalchemy==1.4.52 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # alembic +starlette==0.41.2 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # fastapi +stream-zip==0.0.83 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +tenacity==8.5.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +toolz==0.12.1 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +tqdm==4.66.2 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in +typer==0.12.3 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/../../../packages/settings-library/requirements/_base.in + # -r requirements/_base.in + # fastapi-cli +types-aiobotocore==2.19.0 + # via -r requirements/../../../packages/aws-library/requirements/_base.in +types-aiobotocore-ec2==2.19.0 + # via types-aiobotocore +types-aiobotocore-s3==2.19.0.post1 + # via types-aiobotocore +types-aiobotocore-ssm==2.19.0 + # via types-aiobotocore +types-awscrt==0.20.5 + # via botocore-stubs +types-python-dateutil==2.9.0.20240316 + # via arrow +typing-extensions==4.12.2 + # via + # aiodebug + # aiodocker + # alembic + # fastapi + # faststream + # opentelemetry-sdk + # pydantic + # pydantic-core + # rich-toolkit + # typer + # types-aiobotocore + # types-aiobotocore-ec2 + # types-aiobotocore-s3 + # types-aiobotocore-ssm +tzdata==2024.1 + # via pandas +tzlocal==5.2 + # via dateparser +urllib3==2.2.3 + # via + # -c requirements/../../../packages/aws-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/aws-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/postgres-database/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/models-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/service-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../packages/common-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../packages/settings-library/requirements/../../../requirements/constraints.txt + # -c requirements/../../../requirements/constraints.txt + # botocore + # requests +uvicorn==0.34.2 + # via + # fastapi + # fastapi-cli +uvloop==0.21.0 + # via uvicorn +watchfiles==0.21.0 + # via uvicorn +websockets==12.0 + # via uvicorn +wrapt==1.16.0 + # via + # aiobotocore + # deprecated + # opentelemetry-instrumentation + # opentelemetry-instrumentation-aio-pika + # opentelemetry-instrumentation-redis +yarl==1.20.0 + # via + # -r requirements/../../../packages/aws-library/requirements/../../../packages/service-library/requirements/_base.in + # -r requirements/../../../packages/postgres-database/requirements/_base.in + # -r requirements/../../../packages/service-library/requirements/_base.in + # aio-pika + # aiohttp + # aiormq +zipp==3.20.1 + # via importlib-metadata diff --git a/services/resource-usage-tracker/requirements/_test.in b/services/resource-usage-tracker/requirements/_test.in new file mode 100644 index 00000000000..fc35c736ef8 --- /dev/null +++ b/services/resource-usage-tracker/requirements/_test.in @@ -0,0 +1,31 @@ +# +# Specifies dependencies required to run 'services/api-server/test' +# both for unit and integration tests!! +# +--constraint ../../../requirements/constraints.txt +--constraint ./constraints.txt + +# Adds base AS CONSTRAINT specs, not requirement. +# - Resulting _text.txt is a frozen list of EXTRA packages for testing, besides _base.txt +# +--constraint _base.txt + +alembic # migration due to pytest_simcore.postgres_service +asgi-lifespan +coverage +docker +faker +httpx +fakeredis[lua] +moto[server] +pytest +pytest-asyncio +pytest-cov +pytest-mock +pytest-runner +pytest-sugar +python-dotenv +requests-mock +respx +sqlalchemy[mypy] # adds Mypy / Pep-484 Support for ORM Mappings SEE https://docs.sqlalchemy.org/en/20/orm/extensions/mypy.html +types-requests diff --git a/services/resource-usage-tracker/requirements/_test.txt b/services/resource-usage-tracker/requirements/_test.txt new file mode 100644 index 00000000000..9c229a95f2d --- /dev/null +++ b/services/resource-usage-tracker/requirements/_test.txt @@ -0,0 +1,340 @@ +alembic==1.13.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +annotated-types==0.7.0 + # via + # -c requirements/_base.txt + # pydantic +antlr4-python3-runtime==4.13.2 + # via moto +anyio==4.3.0 + # via + # -c requirements/_base.txt + # httpx +asgi-lifespan==2.1.0 + # via -r requirements/_test.in +attrs==23.2.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +aws-sam-translator==1.95.0 + # via cfn-lint +aws-xray-sdk==2.14.0 + # via moto +blinker==1.9.0 + # via flask +boto3==1.37.3 + # via + # -c requirements/_base.txt + # aws-sam-translator + # moto +botocore==1.37.3 + # via + # -c requirements/_base.txt + # aws-xray-sdk + # boto3 + # moto + # s3transfer +certifi==2024.2.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # httpcore + # httpx + # requests +cffi==1.17.1 + # via cryptography +cfn-lint==1.27.0 + # via moto +charset-normalizer==3.3.2 + # via + # -c requirements/_base.txt + # requests +click==8.1.7 + # via + # -c requirements/_base.txt + # flask +coverage==7.6.12 + # via + # -r requirements/_test.in + # pytest-cov +cryptography==44.0.2 + # via + # -c requirements/../../../requirements/constraints.txt + # joserfc + # moto +docker==7.1.0 + # via + # -r requirements/_test.in + # moto +faker==36.1.1 + # via -r requirements/_test.in +fakeredis==2.27.0 + # via -r requirements/_test.in +flask==3.1.0 + # via + # flask-cors + # moto +flask-cors==5.0.1 + # via moto +graphql-core==3.2.6 + # via moto +greenlet==3.0.3 + # via + # -c requirements/_base.txt + # sqlalchemy +h11==0.14.0 + # via + # -c requirements/_base.txt + # httpcore +httpcore==1.0.4 + # via + # -c requirements/_base.txt + # httpx +httpx==0.27.0 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in + # respx +idna==3.6 + # via + # -c requirements/_base.txt + # anyio + # httpx + # requests +iniconfig==2.0.0 + # via pytest +itsdangerous==2.2.0 + # via flask +jinja2==3.1.6 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # flask + # moto +jmespath==1.0.1 + # via + # -c requirements/_base.txt + # boto3 + # botocore +joserfc==1.0.4 + # via moto +jsonpatch==1.33 + # via cfn-lint +jsonpath-ng==1.7.0 + # via moto +jsonpointer==3.0.0 + # via jsonpatch +jsonschema==4.21.1 + # via + # -c requirements/_base.txt + # aws-sam-translator + # openapi-schema-validator + # openapi-spec-validator +jsonschema-path==0.3.4 + # via openapi-spec-validator +jsonschema-specifications==2023.7.1 + # via + # -c requirements/_base.txt + # jsonschema + # openapi-schema-validator +lazy-object-proxy==1.10.0 + # via openapi-spec-validator +lupa==2.4 + # via fakeredis +mako==1.3.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # alembic +markupsafe==3.0.2 + # via + # -c requirements/_base.txt + # jinja2 + # mako + # werkzeug +moto==5.1.4 + # via -r requirements/_test.in +mpmath==1.3.0 + # via sympy +mypy==1.15.0 + # via sqlalchemy +mypy-extensions==1.0.0 + # via mypy +networkx==3.4.2 + # via cfn-lint +openapi-schema-validator==0.6.3 + # via openapi-spec-validator +openapi-spec-validator==0.7.1 + # via moto +packaging==24.0 + # via + # -c requirements/_base.txt + # pytest + # pytest-sugar +pathable==0.4.4 + # via jsonschema-path +pluggy==1.5.0 + # via pytest +ply==3.11 + # via jsonpath-ng +py-partiql-parser==0.6.1 + # via moto +pycparser==2.22 + # via cffi +pydantic==2.10.2 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # aws-sam-translator +pydantic-core==2.27.1 + # via + # -c requirements/_base.txt + # pydantic +pyparsing==3.1.2 + # via + # -c requirements/_base.txt + # moto +pytest==8.3.5 + # via + # -r requirements/_test.in + # pytest-asyncio + # pytest-cov + # pytest-mock + # pytest-sugar +pytest-asyncio==0.26.0 + # via -r requirements/_test.in +pytest-cov==6.0.0 + # via -r requirements/_test.in +pytest-mock==3.14.0 + # via -r requirements/_test.in +pytest-runner==6.0.1 + # via -r requirements/_test.in +pytest-sugar==1.0.0 + # via -r requirements/_test.in +python-dateutil==2.9.0.post0 + # via + # -c requirements/_base.txt + # botocore + # moto +python-dotenv==1.0.1 + # via + # -c requirements/_base.txt + # -r requirements/_test.in +pyyaml==6.0.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # cfn-lint + # jsonschema-path + # moto + # responses +redis==5.2.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # fakeredis +referencing==0.29.3 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # jsonschema + # jsonschema-path + # jsonschema-specifications +regex==2023.12.25 + # via + # -c requirements/_base.txt + # cfn-lint +requests==2.32.2 + # via + # -c requirements/_base.txt + # docker + # jsonschema-path + # moto + # requests-mock + # responses +requests-mock==1.12.1 + # via -r requirements/_test.in +responses==0.25.6 + # via moto +respx==0.22.0 + # via -r requirements/_test.in +rfc3339-validator==0.1.4 + # via openapi-schema-validator +rpds-py==0.18.0 + # via + # -c requirements/_base.txt + # jsonschema + # referencing +s3transfer==0.11.3 + # via + # -c requirements/_base.txt + # boto3 +setuptools==74.0.0 + # via + # -c requirements/_base.txt + # moto +six==1.16.0 + # via + # -c requirements/_base.txt + # python-dateutil + # rfc3339-validator +sniffio==1.3.1 + # via + # -c requirements/_base.txt + # anyio + # asgi-lifespan + # httpx +sortedcontainers==2.4.0 + # via fakeredis +sqlalchemy==1.4.52 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -r requirements/_test.in + # alembic +sqlalchemy2-stubs==0.0.2a38 + # via sqlalchemy +sympy==1.13.3 + # via cfn-lint +termcolor==2.5.0 + # via pytest-sugar +types-requests==2.32.0.20250301 + # via -r requirements/_test.in +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # alembic + # aws-sam-translator + # cfn-lint + # mypy + # pydantic + # pydantic-core + # sqlalchemy2-stubs +tzdata==2024.1 + # via + # -c requirements/_base.txt + # faker +urllib3==2.2.3 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # botocore + # docker + # requests + # responses + # types-requests +werkzeug==3.1.3 + # via + # flask + # flask-cors + # moto +wrapt==1.16.0 + # via + # -c requirements/_base.txt + # aws-xray-sdk +xmltodict==0.14.2 + # via moto diff --git a/services/resource-usage-tracker/requirements/_tools.in b/services/resource-usage-tracker/requirements/_tools.in new file mode 100644 index 00000000000..52a9a39d162 --- /dev/null +++ b/services/resource-usage-tracker/requirements/_tools.in @@ -0,0 +1,7 @@ +--constraint ../../../requirements/constraints.txt +--constraint _base.txt +--constraint _test.txt + +--requirement ../../../requirements/devenv.txt + +watchdog[watchmedo] diff --git a/services/resource-usage-tracker/requirements/_tools.txt b/services/resource-usage-tracker/requirements/_tools.txt new file mode 100644 index 00000000000..4ae88566afd --- /dev/null +++ b/services/resource-usage-tracker/requirements/_tools.txt @@ -0,0 +1,93 @@ +astroid==3.3.8 + # via pylint +black==25.1.0 + # via -r requirements/../../../requirements/devenv.txt +build==1.2.2.post1 + # via pip-tools +bump2version==1.0.1 + # via -r requirements/../../../requirements/devenv.txt +cfgv==3.4.0 + # via pre-commit +click==8.1.7 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # pip-tools +dill==0.3.9 + # via pylint +distlib==0.3.9 + # via virtualenv +filelock==3.17.0 + # via virtualenv +identify==2.6.8 + # via pre-commit +isort==6.0.1 + # via + # -r requirements/../../../requirements/devenv.txt + # pylint +mccabe==0.7.0 + # via pylint +mypy==1.15.0 + # via + # -c requirements/_test.txt + # -r requirements/../../../requirements/devenv.txt +mypy-extensions==1.0.0 + # via + # -c requirements/_test.txt + # black + # mypy +nodeenv==1.9.1 + # via pre-commit +packaging==24.0 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # black + # build +pathspec==0.12.1 + # via black +pip==25.0.1 + # via pip-tools +pip-tools==7.4.1 + # via -r requirements/../../../requirements/devenv.txt +platformdirs==4.3.6 + # via + # black + # pylint + # virtualenv +pre-commit==4.1.0 + # via -r requirements/../../../requirements/devenv.txt +pylint==3.3.4 + # via -r requirements/../../../requirements/devenv.txt +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +pyyaml==6.0.1 + # via + # -c requirements/../../../requirements/constraints.txt + # -c requirements/_base.txt + # -c requirements/_test.txt + # pre-commit + # watchdog +ruff==0.9.9 + # via -r requirements/../../../requirements/devenv.txt +setuptools==74.0.0 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # pip-tools +tomlkit==0.13.2 + # via pylint +typing-extensions==4.12.2 + # via + # -c requirements/_base.txt + # -c requirements/_test.txt + # mypy +virtualenv==20.29.2 + # via pre-commit +watchdog==6.0.0 + # via -r requirements/_tools.in +wheel==0.45.1 + # via pip-tools diff --git a/services/resource-usage-tracker/requirements/ci.txt b/services/resource-usage-tracker/requirements/ci.txt new file mode 100644 index 00000000000..697ade6fa5e --- /dev/null +++ b/services/resource-usage-tracker/requirements/ci.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages for the contigous integration (CI) of 'services/resource-usage-tracker' +# +# - As ci.txt but w/ tests +# +# Usage: +# pip install -r requirements/ci.txt +# + +# installs base + tests requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library +simcore-models-library @ ../../packages/models-library +pytest-simcore @ ../../packages/pytest-simcore +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-postgres-database @ ../../packages/postgres-database + +# installs current package +simcore-service-resource-usage-tracker @ . diff --git a/services/resource-usage-tracker/requirements/constraints.txt b/services/resource-usage-tracker/requirements/constraints.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/requirements/dev.txt b/services/resource-usage-tracker/requirements/dev.txt new file mode 100644 index 00000000000..253940c1800 --- /dev/null +++ b/services/resource-usage-tracker/requirements/dev.txt @@ -0,0 +1,24 @@ +# Shortcut to install all packages needed to develop 'services/resource-usage-tracker' +# +# - As ci.txt but with current and repo packages in develop (edit) mode +# +# Usage: +# pip install -r requirements/dev.txt +# + +# installs base + tests + tools requirements +--requirement _base.txt +--requirement _test.txt +--requirement _tools.txt + +# installs this repo's packages +--editable ../../packages/aws-library +--editable ../../packages/common-library +--editable ../../packages/models-library +--editable ../../packages/pytest-simcore +--editable ../../packages/service-library[fastapi] +--editable ../../packages/settings-library +--editable ../../packages/postgres-database + +# installs current package +--editable . diff --git a/services/resource-usage-tracker/requirements/prod.txt b/services/resource-usage-tracker/requirements/prod.txt new file mode 100644 index 00000000000..b4ea10941d6 --- /dev/null +++ b/services/resource-usage-tracker/requirements/prod.txt @@ -0,0 +1,20 @@ +# Shortcut to install 'services/resource-usage-tracker' for production +# +# - As ci.txt but w/o tests +# +# Usage: +# pip install -r requirements/prod.txt +# + +# installs base requirements +--requirement _base.txt + +# installs this repo's packages +simcore-aws-library @ ../../packages/aws-library +simcore-common-library @ ../../packages/common-library/ +simcore-models-library @ ../../packages/models-library +simcore-service-library[fastapi] @ ../../packages/service-library +simcore-settings-library @ ../../packages/settings-library +simcore-postgres-database @ ../../packages/postgres-database +# installs current package +simcore-service-resource-usage-tracker @ . diff --git a/services/resource-usage-tracker/setup.cfg b/services/resource-usage-tracker/setup.cfg new file mode 100644 index 00000000000..1dbddd94448 --- /dev/null +++ b/services/resource-usage-tracker/setup.cfg @@ -0,0 +1,18 @@ +[bumpversion] +current_version = 1.0.0 +commit = True +message = services/resource-usage-tracker version: {current_version} β†’ {new_version} +tag = False +commit_args = --no-verify + +[bumpversion:file:VERSION] + +[tool:pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +markers = + testit: "marks test to run during development" +[mypy] +plugins = + pydantic.mypy + sqlalchemy.ext.mypy.plugin diff --git a/services/resource-usage-tracker/setup.py b/services/resource-usage-tracker/setup.py new file mode 100755 index 00000000000..9140fbbd1ef --- /dev/null +++ b/services/resource-usage-tracker/setup.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +import re +import sys +from pathlib import Path + +from setuptools import find_packages, setup + + +def read_reqs(reqs_path: Path) -> set[str]: + return { + r + for r in re.findall( + r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)", + reqs_path.read_text(), + re.MULTILINE, + ) + if isinstance(r, str) + } + + +CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent + +NAME = "simcore-service-resource-usage-tracker" +VERSION = (CURRENT_DIR / "VERSION").read_text().strip() +AUTHORS = ("Dustin Kaiser (mrnicegyu11)", "Sylvain Anderegg (sanderegg)") +DESCRIPTION = "Service that collects and stores computational resources usage used in osparc-simcore" +README = (CURRENT_DIR / "README.md").read_text() + +PROD_REQUIREMENTS = tuple( + read_reqs(CURRENT_DIR / "requirements" / "_base.txt") + | { + "simcore-models-library", + "simcore-service-library[fastapi]", + "simcore-settings-library", + } +) + +TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt")) + +SETUP = { + "name": NAME, + "version": VERSION, + "author": AUTHORS, + "description": DESCRIPTION, + "long_description": README, + "license": "MIT license", + "python_requires": ">=3.10", + "packages": find_packages(where="src"), + "package_dir": { + "": "src", + }, + "include_package_data": True, + "install_requires": PROD_REQUIREMENTS, + "test_suite": "tests", + "tests_require": TEST_REQUIREMENTS, + "extras_require": {"test": TEST_REQUIREMENTS}, + "entry_points": { + "console_scripts": [ + "simcore-service-resource-usage-tracker = simcore_service_resource_usage_tracker.cli:app", + "simcore-service = simcore_service_resource_usage_tracker.cli:app", + ], + }, +} + +if __name__ == "__main__": + setup(**SETUP) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/_meta.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/_meta.py new file mode 100644 index 00000000000..d433237ea2a --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/_meta.py @@ -0,0 +1,39 @@ +""" Application's metadata + +""" + +from typing import Final + +from models_library.basic_types import VersionStr +from packaging.version import Version +from pydantic import TypeAdapter +from servicelib.utils_meta import PackageInfo +from settings_library.basic_types import VersionTag + +info: Final = PackageInfo(package_name="simcore-service-resource-usage-tracker") +__version__: Final[VersionStr] = info.__version__ + + +PROJECT_NAME: Final[str] = info.project_name +VERSION: Final[Version] = info.version +API_VERSION: Final[VersionStr] = info.__version__ +API_VTAG: Final[VersionTag] = TypeAdapter(VersionTag).validate_python( + info.api_prefix_path_tag +) +SUMMARY: Final[str] = info.get_summary() +APP_NAME: Final[str] = PROJECT_NAME + +# NOTE: https://texteditor.com/ascii-frames/ +APP_STARTED_BANNER_MSG = r""" +d8888b. d88888b .d8888. .d88b. db db d8888b. .o88b. d88888b db db .d8888. .d8b. d888b d88888b d888888b d8888b. .d8b. .o88b. db dD d88888b d8888b. +88 `8D 88' 88' YP .8P Y8. 88 88 88 `8D d8P Y8 88' 88 88 88' YP d8' `8b 88' Y8b 88' `~~88~~' 88 `8D d8' `8b d8P Y8 88 ,8P' 88' 88 `8D +88oobY' 88ooooo `8bo. 88 88 88 88 88oobY' 8P 88ooooo 88 88 `8bo. 88ooo88 88 88ooooo 88 88oobY' 88ooo88 8P 88,8P 88ooooo 88oobY' +88`8b 88~~~~~ `Y8b. 88 88 88 88 88`8b 8b 88~~~~~ C8888D 88 88 `Y8b. 88~~~88 88 ooo 88~~~~~ C8888D 88 88`8b 88~~~88 8b 88`8b 88~~~~~ 88`8b +88 `88. 88. db 8D `8b d8' 88b d88 88 `88. Y8b d8 88. 88b d88 db 8D 88 88 88. ~8~ 88. 88 88 `88. 88 88 Y8b d8 88 `88. 88. 88 `88. +88 YD Y88888P `8888Y' `Y88P' ~Y8888P' 88 YD `Y88P' Y88888P ~Y8888P' `8888Y' YP YP Y888P Y88888P YP 88 YD YP YP `Y88P' YP YD Y88888P 88 YD {} +""".format( + f"v{__version__}" +) + + +APP_FINISHED_BANNER_MSG = info.get_finished_banner() diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_health.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_health.py new file mode 100644 index 00000000000..720d51b3ad5 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_health.py @@ -0,0 +1,36 @@ +import datetime +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends +from fastapi.responses import PlainTextResponse +from models_library.errors import RABBITMQ_CLIENT_UNHEALTHY_MSG +from servicelib.rabbitmq import RabbitMQClient + +from ...services.modules.rabbitmq import get_rabbitmq_client_from_request + +logger = logging.getLogger(__name__) + + +# +# ROUTE HANDLERS +# +router = APIRouter() + + +class HealthCheckError(RuntimeError): + """Failed a health check""" + + +@router.get("/", response_class=PlainTextResponse) +async def healthcheck( + rabbitmq_client: Annotated[ + RabbitMQClient, Depends(get_rabbitmq_client_from_request) + ], +) -> str: + if not rabbitmq_client.healthy: + raise HealthCheckError(RABBITMQ_CLIENT_UNHEALTHY_MSG) + + # NOTE: check also redis?/postgres connections + + return f"{__name__}@{datetime.datetime.now(datetime.timezone.utc).isoformat()}" diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_meta.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_meta.py new file mode 100644 index 00000000000..5ffa18d202a --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_meta.py @@ -0,0 +1,38 @@ +import logging +from collections.abc import Callable + +from fastapi import APIRouter, Depends +from pydantic import BaseModel, HttpUrl + +from ..._meta import API_VERSION, PROJECT_NAME +from .dependencies import get_reverse_url_mapper + +logger = logging.getLogger(__name__) + + +# +# API SCHEMA MODELS +# + + +class _Meta(BaseModel): + name: str + version: str + docs_url: HttpUrl + + +# +# ROUTE HANDLERS +# +router = APIRouter() + + +@router.get("/meta", response_model=_Meta) +async def get_service_metadata( + url_for: Callable = Depends(get_reverse_url_mapper), +) -> _Meta: + return _Meta( + name=PROJECT_NAME, + version=API_VERSION, + docs_url=url_for("swagger_ui_html"), + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_resource_tracker.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_resource_tracker.py new file mode 100644 index 00000000000..a7397244928 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/_resource_tracker.py @@ -0,0 +1,93 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, status +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreated, + WalletTotalCredits, +) +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingPlanGet, + RutPricingUnitGet, +) +from models_library.resource_tracker import CreditTransactionId + +from ...services import credit_transactions, pricing_plans, pricing_units + +_logger = logging.getLogger(__name__) + + +router = APIRouter() + + +###################### +# CREDIT TRANSACTIONS +###################### + + +@router.post( + "/credit-transactions/credits:sum", + response_model=WalletTotalCredits, + summary="Sum total available credits in the wallet", + tags=["credit-transactions"], +) +async def get_credit_transactions_sum( + wallet_total_credits: Annotated[ + WalletTotalCredits, + Depends(credit_transactions.sum_wallet_credits), + ], +): + return wallet_total_credits + + +@router.post( + "/credit-transactions", + response_model=CreditTransactionCreated, + summary="Top up credits for specific wallet", + status_code=status.HTTP_201_CREATED, + tags=["credit-transactions"], +) +async def create_credit_transaction( + transaction_id: Annotated[ + CreditTransactionId, + Depends(credit_transactions.create_credit_transaction), + ], +): + return {"credit_transaction_id": transaction_id} + + +################ +# PRICING PLANS +################ + + +@router.get( + "/services/{service_key:path}/{service_version}/pricing-plan", + response_model=RutPricingPlanGet, + operation_id="get_service_default_pricing_plan", + description="Returns a default pricing plan with pricing details for a specified service", + tags=["pricing-plans"], +) +async def get_service_default_pricing_plan( + service_pricing_plans: Annotated[ + RutPricingPlanGet, + Depends(pricing_plans.get_service_default_pricing_plan), + ], +): + return service_pricing_plans + + +@router.get( + "/pricing-plans/{pricing_plan_id}/pricing-units/{pricing_unit_id}", + response_model=RutPricingUnitGet, + operation_id="list_service_pricing_plans", + description="Returns a list of service pricing plans with pricing details for a specified service", + tags=["pricing-plans"], +) +async def get_pricing_plan_unit( + pricing_unit: Annotated[ + RutPricingUnitGet, + Depends(pricing_units.get_pricing_unit), + ] +): + return pricing_unit diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py new file mode 100644 index 00000000000..dacf0ff08b5 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/dependencies.py @@ -0,0 +1,27 @@ +# mypy: disable-error-code=truthy-function +# +# DEPENDENCIES +# + +import logging + +from fastapi.requests import Request +from servicelib.fastapi.dependencies import get_app, get_reverse_url_mapper +from sqlalchemy.ext.asyncio import AsyncEngine + +logger = logging.getLogger(__name__) + + +def get_resource_tracker_db_engine(request: Request) -> AsyncEngine: + engine: AsyncEngine = request.app.state.engine + assert engine # nosec + return engine + + +assert get_reverse_url_mapper # nosec +assert get_app # nosec + +__all__: tuple[str, ...] = ( + "get_app", + "get_reverse_url_mapper", +) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/routes.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/routes.py new file mode 100644 index 00000000000..bf9cd5aae9b --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rest/routes.py @@ -0,0 +1,26 @@ +import logging + +from fastapi import APIRouter, FastAPI +from servicelib.logging_utils import log_context + +from ..._meta import API_VTAG +from . import _health, _meta, _resource_tracker + +_logger = logging.getLogger(__name__) + + +def setup_api_routes(app: FastAPI): + """ + Composes resources/sub-resources routers + """ + with log_context( + _logger, + logging.INFO, + msg="RUT setup_api_routes", + ): + app.include_router(_health.router) + + api_router = APIRouter(prefix=f"/{API_VTAG}") + api_router.include_router(_meta.router, tags=["meta"]) + api_router.include_router(_resource_tracker.router) + app.include_router(api_router) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_credit_transactions.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_credit_transactions.py new file mode 100644 index 00000000000..a2b15d5e620 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_credit_transactions.py @@ -0,0 +1,83 @@ +from decimal import Decimal + +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreateBody, + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import CreditTransactionStatus +from models_library.services_types import ServiceRunID +from models_library.wallets import WalletID +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CreditTransactionNotFoundError, + WalletTransactionError, +) + +from ...services import credit_transactions, service_runs + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=()) +async def get_wallet_total_credits( + app: FastAPI, + *, + product_name: ProductName, + wallet_id: WalletID, +) -> WalletTotalCredits: + return await credit_transactions.sum_wallet_credits( + db_engine=app.state.engine, + product_name=product_name, + wallet_id=wallet_id, + ) + + +@router.expose(reraise_if_error_type=(CreditTransactionNotFoundError,)) +async def get_transaction_current_credits_by_service_run_id( + app: FastAPI, + *, + service_run_id: ServiceRunID, +) -> Decimal: + return await credit_transactions.get_transaction_current_credits_by_service_run_id( + db_engine=app.state.engine, + service_run_id=service_run_id, + ) + + +@router.expose(reraise_if_error_type=()) +async def get_project_wallet_total_credits( + app: FastAPI, + *, + product_name: ProductName, + wallet_id: WalletID, + project_id: ProjectID, + transaction_status: CreditTransactionStatus | None = None, +) -> WalletTotalCredits: + return await service_runs.sum_project_wallet_total_credits( + db_engine=app.state.engine, + product_name=product_name, + wallet_id=wallet_id, + project_id=project_id, + transaction_status=transaction_status, + ) + + +@router.expose(reraise_if_error_type=(WalletTransactionError,)) +async def pay_project_debt( + app: FastAPI, + *, + project_id: ProjectID, + current_wallet_transaction: CreditTransactionCreateBody, + new_wallet_transaction: CreditTransactionCreateBody, +) -> None: + await credit_transactions.pay_project_debt( + db_engine=app.state.engine, + rabbitmq_client=app.state.rabbitmq_client, + rut_fire_and_forget_tasks=app.state.rut_fire_and_forget_tasks, + project_id=project_id, + current_wallet_transaction=current_wallet_transaction, + new_wallet_transaction=new_wallet_transaction, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_checkouts.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_checkouts.py new file mode 100644 index 00000000000..859b501d4bd --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_checkouts.py @@ -0,0 +1,99 @@ +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker.licensed_items_checkouts import ( + LicensedItemCheckoutGet, + LicensedItemsCheckoutsPage, +) +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.rest_ordering import OrderBy +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + LICENSES_ERRORS, + LicensedItemCheckoutNotFoundError, +) + +from ...services import licensed_items_checkouts + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=(LicensedItemCheckoutNotFoundError,)) +async def get_licensed_item_checkout( + app: FastAPI, + *, + product_name: ProductName, + licensed_item_checkout_id: LicensedItemCheckoutID, +) -> LicensedItemCheckoutGet: + return await licensed_items_checkouts.get_licensed_item_checkout( + db_engine=app.state.engine, + product_name=product_name, + licensed_item_checkout_id=licensed_item_checkout_id, + ) + + +@router.expose(reraise_if_error_type=LICENSES_ERRORS) +async def get_licensed_items_checkouts_page( + app: FastAPI, + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: int = 0, + limit: int = 20, + order_by: OrderBy, +) -> LicensedItemsCheckoutsPage: + return await licensed_items_checkouts.list_licensed_items_checkouts( + db_engine=app.state.engine, + product_name=product_name, + filter_wallet_id=filter_wallet_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + + +@router.expose(reraise_if_error_type=LICENSES_ERRORS) +async def checkout_licensed_item( + app: FastAPI, + *, + licensed_item_id: LicensedItemID, + key: LicensedItemKey, + version: LicensedItemVersion, + wallet_id: WalletID, + product_name: ProductName, + num_of_seats: int, + service_run_id: ServiceRunID, + user_id: UserID, + user_email: str, +) -> LicensedItemCheckoutGet: + return await licensed_items_checkouts.checkout_licensed_item( + db_engine=app.state.engine, + licensed_item_id=licensed_item_id, + key=key, + version=version, + wallet_id=wallet_id, + product_name=product_name, + num_of_seats=num_of_seats, + service_run_id=service_run_id, + user_id=user_id, + user_email=user_email, + ) + + +@router.expose(reraise_if_error_type=LICENSES_ERRORS) +async def release_licensed_item( + app: FastAPI, + *, + licensed_item_checkout_id: LicensedItemCheckoutID, + product_name: ProductName, +) -> LicensedItemCheckoutGet: + return await licensed_items_checkouts.release_licensed_item( + db_engine=app.state.engine, + licensed_item_checkout_id=licensed_item_checkout_id, + product_name=product_name, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_purchases.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_purchases.py new file mode 100644 index 00000000000..e8f71dfb97d --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_licensed_items_purchases.py @@ -0,0 +1,61 @@ +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker.licensed_items_purchases import ( + LicensedItemPurchaseGet, + LicensedItemsPurchasesPage, +) +from models_library.basic_types import IDStr +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, + LicensedItemsPurchasesCreate, +) +from models_library.rest_ordering import OrderBy +from models_library.wallets import WalletID +from servicelib.rabbitmq import RPCRouter + +from ...services import licensed_items_purchases + +router = RPCRouter() + + +@router.expose(reraise_if_error_type=()) +async def get_licensed_items_purchases_page( + app: FastAPI, + *, + product_name: ProductName, + wallet_id: WalletID, + offset: int = 0, + limit: int = 20, + order_by: OrderBy = OrderBy(field=IDStr("purchased_at")), +) -> LicensedItemsPurchasesPage: + return await licensed_items_purchases.list_licensed_items_purchases( + db_engine=app.state.engine, + product_name=product_name, + offset=offset, + limit=limit, + filter_wallet_id=wallet_id, + order_by=order_by, + ) + + +@router.expose(reraise_if_error_type=()) +async def get_licensed_item_purchase( + app: FastAPI, + *, + product_name: ProductName, + licensed_item_purchase_id: LicensedItemPurchaseID, +) -> LicensedItemPurchaseGet: + return await licensed_items_purchases.get_licensed_item_purchase( + db_engine=app.state.engine, + product_name=product_name, + licensed_item_purchase_id=licensed_item_purchase_id, + ) + + +@router.expose(reraise_if_error_type=()) +async def create_licensed_item_purchase( + app: FastAPI, *, data: LicensedItemsPurchasesCreate +) -> LicensedItemPurchaseGet: + return await licensed_items_purchases.create_licensed_item_purchase( + rabbitmq_client=app.state.rabbitmq_client, db_engine=app.state.engine, data=data + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_pricing_plans.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_pricing_plans.py new file mode 100644 index 00000000000..f5499437c0a --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_pricing_plans.py @@ -0,0 +1,172 @@ +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + PricingPlanToServiceGet, + RutPricingPlanGet, + RutPricingPlanPage, + RutPricingUnitGet, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanCreate, + PricingPlanId, + PricingPlanUpdate, + PricingUnitId, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, +) +from models_library.services import ServiceKey, ServiceVersion +from servicelib.rabbitmq import RPCRouter +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + PricingUnitDuplicationError, +) + +from ...services import pricing_plans, pricing_units + +router = RPCRouter() + + +## Pricing plans + + +@router.expose(reraise_if_error_type=()) +async def get_pricing_plan( + app: FastAPI, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> RutPricingPlanGet: + return await pricing_plans.get_pricing_plan( + product_name=product_name, + pricing_plan_id=pricing_plan_id, + db_engine=app.state.engine, + ) + + +@router.expose(reraise_if_error_type=()) +async def list_pricing_plans_without_pricing_units( + app: FastAPI, + *, + product_name: ProductName, + exclude_inactive: bool, + # pagination + offset: int, + limit: int, +) -> RutPricingPlanPage: + return await pricing_plans.list_pricing_plans_without_pricing_units( + db_engine=app.state.engine, + product_name=product_name, + exclude_inactive=exclude_inactive, + offset=offset, + limit=limit, + ) + + +@router.expose(reraise_if_error_type=()) +async def create_pricing_plan( + app: FastAPI, + *, + data: PricingPlanCreate, +) -> RutPricingPlanGet: + return await pricing_plans.create_pricing_plan( + data=data, + db_engine=app.state.engine, + ) + + +@router.expose(reraise_if_error_type=()) +async def update_pricing_plan( + app: FastAPI, + *, + product_name: ProductName, + data: PricingPlanUpdate, +) -> RutPricingPlanGet: + return await pricing_plans.update_pricing_plan( + product_name=product_name, + data=data, + db_engine=app.state.engine, + ) + + +## Pricing units + + +@router.expose(reraise_if_error_type=()) +async def get_pricing_unit( + app: FastAPI, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, +) -> RutPricingUnitGet: + return await pricing_units.get_pricing_unit( + product_name=product_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + db_engine=app.state.engine, + ) + + +@router.expose(reraise_if_error_type=(PricingUnitDuplicationError,)) +async def create_pricing_unit( + app: FastAPI, + *, + product_name: ProductName, + data: PricingUnitWithCostCreate, +) -> RutPricingUnitGet: + return await pricing_units.create_pricing_unit( + product_name=product_name, + data=data, + db_engine=app.state.engine, + ) + + +@router.expose(reraise_if_error_type=()) +async def update_pricing_unit( + app: FastAPI, + *, + product_name: ProductName, + data: PricingUnitWithCostUpdate, +) -> RutPricingUnitGet: + return await pricing_units.update_pricing_unit( + product_name=product_name, + data=data, + db_engine=app.state.engine, + ) + + +## Pricing plan to service + + +@router.expose(reraise_if_error_type=()) +async def list_connected_services_to_pricing_plan_by_pricing_plan( + app: FastAPI, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> list[PricingPlanToServiceGet]: + output: list[ + PricingPlanToServiceGet + ] = await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( + product_name=product_name, + pricing_plan_id=pricing_plan_id, + db_engine=app.state.engine, + ) + return output + + +@router.expose(reraise_if_error_type=()) +async def connect_service_to_pricing_plan( + app: FastAPI, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> PricingPlanToServiceGet: + return await pricing_plans.connect_service_to_pricing_plan( + product_name=product_name, + pricing_plan_id=pricing_plan_id, + service_key=service_key, + service_version=service_version, + db_engine=app.state.engine, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_service_runs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_service_runs.py new file mode 100644 index 00000000000..db9b155f37d --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/_service_runs.py @@ -0,0 +1,115 @@ +# pylint: disable=too-many-arguments +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + OsparcCreditsAggregatedUsagesPage, + ServiceRunPage, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import ( + CreditTransactionStatus, + ServiceResourceUsagesFilters, + ServicesAggregatedUsagesTimePeriod, + ServicesAggregatedUsagesType, +) +from models_library.rest_ordering import OrderBy +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import AnyUrl +from servicelib.rabbitmq import RPCRouter + +from ...core.settings import ApplicationSettings +from ...services import service_runs +from ...services.modules.s3 import get_s3_client + +router = RPCRouter() + + +## Service runs + + +@router.expose(reraise_if_error_type=()) +async def get_service_run_page( + app: FastAPI, + *, + user_id: UserID, + product_name: ProductName, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + filters: ServiceResourceUsagesFilters | None = None, + transaction_status: CreditTransactionStatus | None = None, + project_id: ProjectID | None = None, + # pagination + offset: int = 0, + limit: int = 20, + # ordering + order_by: OrderBy | None = None, +) -> ServiceRunPage: + return await service_runs.list_service_runs( + db_engine=app.state.engine, + user_id=user_id, + product_name=product_name, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + filters=filters, + transaction_status=transaction_status, + project_id=project_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + + +@router.expose(reraise_if_error_type=()) +async def export_service_runs( + app: FastAPI, + *, + user_id: UserID, + product_name: ProductName, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + order_by: OrderBy | None = None, + filters: ServiceResourceUsagesFilters | None = None, +) -> AnyUrl: + app_settings: ApplicationSettings = app.state.settings + s3_settings = app_settings.RESOURCE_USAGE_TRACKER_S3 + assert s3_settings # nosec + + return await service_runs.export_service_runs( + s3_client=get_s3_client(app), + bucket_name=f"{s3_settings.S3_BUCKET_NAME}", + s3_region=s3_settings.S3_REGION, + user_id=user_id, + product_name=product_name, + db_engine=app.state.engine, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + order_by=order_by, + filters=filters, + ) + + +@router.expose(reraise_if_error_type=()) +async def get_osparc_credits_aggregated_usages_page( + app: FastAPI, + *, + user_id: UserID, + product_name: ProductName, + aggregated_by: ServicesAggregatedUsagesType, + time_period: ServicesAggregatedUsagesTimePeriod, + limit: int = 20, + offset: int = 0, + wallet_id: WalletID, + access_all_wallet_usage: bool = False, +) -> OsparcCreditsAggregatedUsagesPage: + return await service_runs.get_osparc_credits_aggregated_usages_page( + user_id=user_id, + product_name=product_name, + db_engine=app.state.engine, + aggregated_by=aggregated_by, + time_period=time_period, + limit=limit, + offset=offset, + wallet_id=wallet_id, + access_all_wallet_usage=access_all_wallet_usage, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/routes.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/routes.py new file mode 100644 index 00000000000..42767b19525 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/api/rpc/routes.py @@ -0,0 +1,44 @@ +import logging + +from fastapi import FastAPI +from models_library.api_schemas_resource_usage_tracker import ( + RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, +) +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import RPCRouter + +from ...services.modules.rabbitmq import get_rabbitmq_rpc_server +from . import ( + _credit_transactions, + _licensed_items_checkouts, + _licensed_items_purchases, + _pricing_plans, + _service_runs, +) + +_logger = logging.getLogger(__name__) + + +ROUTERS: list[RPCRouter] = [ + _credit_transactions.router, + _service_runs.router, + _pricing_plans.router, + _licensed_items_purchases.router, + _licensed_items_checkouts.router, +] + + +def setup_rpc_api_routes(app: FastAPI) -> None: + async def startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT startup RPC API Routes", + ): + rpc_server = get_rabbitmq_rpc_server(app) + for router in ROUTERS: + await rpc_server.register_router( + router, RESOURCE_USAGE_TRACKER_RPC_NAMESPACE, app + ) + + app.add_event_handler("startup", startup) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/cli.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/cli.py new file mode 100644 index 00000000000..b842ddf358d --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/cli.py @@ -0,0 +1,46 @@ +import logging + +import rich +import typer +from rich.console import Console +from settings_library.utils_cli import create_settings_command + +from ._meta import PROJECT_NAME, __version__ +from .core.settings import ApplicationSettings, MinimalApplicationSettings + +# SEE setup entrypoint 'simcore_service_invitations.cli:app' +app = typer.Typer(name=PROJECT_NAME) +log = logging.getLogger(__name__) + +err_console = Console(stderr=True) + + +def _version_callback(value: bool) -> None: + if value: + rich.print(__version__) + raise typer.Exit + + +@app.callback() +def main(ctx: typer.Context) -> None: + """o2s2parc resource usage tracker""" + assert ctx # nosec + assert True # nosec + + +# +# COMMANDS +# + + +app.command()(create_settings_command(settings_cls=ApplicationSettings, logger=log)) + + +@app.command() +def evaluate(ctx: typer.Context) -> None: + """Evaluates resources and does blahblahb TBD @mrnicegyu11""" + assert ctx # nosec + settings = MinimalApplicationSettings.create_from_envs() + err_console.print( + f"[yellow]running with configuration:\n{settings.model_dump_json(warnings='none')}[/yellow]" + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/application.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/application.py new file mode 100644 index 00000000000..2aacbfb4990 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/application.py @@ -0,0 +1,94 @@ +import logging + +from fastapi import FastAPI +from servicelib.fastapi.openapi import override_fastapi_openapi_method +from servicelib.fastapi.tracing import ( + initialize_fastapi_app_tracing, + setup_tracing, +) + +from .._meta import ( + API_VERSION, + API_VTAG, + APP_FINISHED_BANNER_MSG, + APP_STARTED_BANNER_MSG, + PROJECT_NAME, + SUMMARY, +) +from ..api.rest.routes import setup_api_routes +from ..api.rpc.routes import setup_rpc_api_routes +from ..exceptions.handlers import setup_exception_handlers +from ..services.background_task_periodic_heartbeat_check_setup import ( + setup as setup_background_task_periodic_heartbeat_check, +) +from ..services.fire_and_forget_setup import setup as fire_and_forget_setup +from ..services.modules.db import setup as setup_db +from ..services.modules.rabbitmq import setup as setup_rabbitmq +from ..services.modules.redis import setup as setup_redis +from ..services.modules.s3 import setup as setup_s3 +from ..services.process_message_running_service_setup import ( + setup as setup_process_message_running_service, +) +from .settings import ApplicationSettings + +_logger = logging.getLogger(__name__) + + +def create_app(settings: ApplicationSettings) -> FastAPI: + _logger.info("app settings: %s", settings.model_dump_json(indent=1)) + + app = FastAPI( + debug=settings.RESOURCE_USAGE_TRACKER_DEBUG, + title=f"{PROJECT_NAME} web API", + description=SUMMARY, + version=API_VERSION, + openapi_url=f"/api/{API_VTAG}/openapi.json", + docs_url="/dev/doc", + redoc_url=None, # default disabled, see below + ) + override_fastapi_openapi_method(app) + + # STATE + app.state.settings = settings + assert app.state.settings.API_VERSION == API_VERSION # nosec + + # PLUGINS SETUP + if app.state.settings.RESOURCE_USAGE_TRACKER_TRACING: + setup_tracing( + app, + app.state.settings.RESOURCE_USAGE_TRACKER_TRACING, + app.state.settings.APP_NAME, + ) + setup_api_routes(app) + fire_and_forget_setup(app) + + if settings.RESOURCE_USAGE_TRACKER_POSTGRES: + setup_db(app) + setup_redis(app) + setup_rabbitmq(app) + if settings.RESOURCE_USAGE_TRACKER_S3: + # Needed for CSV export functionality + setup_s3(app) + + setup_rpc_api_routes(app) # Requires Rabbit, S3 + setup_background_task_periodic_heartbeat_check(app) # Requires Redis, DB + + setup_process_message_running_service(app) # Requires Rabbit + + if app.state.settings.RESOURCE_USAGE_TRACKER_TRACING: + initialize_fastapi_app_tracing(app) + + # ERROR HANDLERS + setup_exception_handlers(app) + + # EVENTS + async def _on_startup() -> None: + print(APP_STARTED_BANNER_MSG, flush=True) + + async def _on_shutdown() -> None: + print(APP_FINISHED_BANNER_MSG, flush=True) + + app.add_event_handler("startup", _on_startup) + app.add_event_handler("shutdown", _on_shutdown) + + return app diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/settings.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/settings.py new file mode 100644 index 00000000000..3a534b692dc --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/core/settings.py @@ -0,0 +1,145 @@ +import datetime +from functools import cached_property + +from models_library.basic_types import BootModeEnum +from pydantic import AliasChoices, Field, PositiveInt, field_validator +from servicelib.logging_utils_filtering import LoggerName, MessageSubstring +from settings_library.base import BaseCustomSettings +from settings_library.basic_types import BuildTargetEnum, LogLevel, VersionTag +from settings_library.postgres import PostgresSettings +from settings_library.prometheus import PrometheusSettings +from settings_library.rabbit import RabbitSettings +from settings_library.redis import RedisSettings +from settings_library.s3 import S3Settings +from settings_library.tracing import TracingSettings +from settings_library.utils_logging import MixinLoggingSettings + +from .._meta import API_VERSION, API_VTAG, PROJECT_NAME + + +class _BaseApplicationSettings(BaseCustomSettings, MixinLoggingSettings): + # CODE STATICS --------------------------------------------------------- + API_VERSION: str = API_VERSION + APP_NAME: str = PROJECT_NAME + API_VTAG: VersionTag = API_VTAG + + # IMAGE BUILDTIME ------------------------------------------------------ + # @Makefile + SC_BUILD_DATE: str | None = None + SC_BUILD_TARGET: BuildTargetEnum | None = None + SC_VCS_REF: str | None = None + SC_VCS_URL: str | None = None + + # @Dockerfile + SC_BOOT_MODE: BootModeEnum | None = None + SC_BOOT_TARGET: BuildTargetEnum | None = None + SC_HEALTHCHECK_TIMEOUT: PositiveInt | None = Field( + default=None, + description="If a single run of the check takes longer than timeout seconds " + "then the check is considered to have failed." + "It takes retries consecutive failures of the health check for the container to be considered unhealthy.", + ) + SC_USER_ID: int | None = None + SC_USER_NAME: str | None = None + + # RUNTIME ----------------------------------------------------------- + RESOURCE_USAGE_TRACKER_DEBUG: bool = Field( + default=False, + description="Debug mode", + validation_alias=AliasChoices( + "RESOURCE_USAGE_TRACKER_DEBUG", + "DEBUG", + ), + ) + RESOURCE_USAGE_TRACKER_LOGLEVEL: LogLevel = Field( + default=LogLevel.INFO, + validation_alias=AliasChoices( + "RESOURCE_USAGE_TRACKER_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL" + ), + ) + RESOURCE_USAGE_TRACKER_LOG_FORMAT_LOCAL_DEV_ENABLED: bool = Field( + default=False, + validation_alias=AliasChoices( + "RESOURCE_USAGE_TRACKER_LOG_FORMAT_LOCAL_DEV_ENABLED", + "LOG_FORMAT_LOCAL_DEV_ENABLED", + ), + description="Enables local development log format. WARNING: make sure it is disabled if you want to have structured logs!", + ) + RESOURCE_USAGE_TRACKER_LOG_FILTER_MAPPING: dict[ + LoggerName, list[MessageSubstring] + ] = Field( + default_factory=dict, + validation_alias=AliasChoices( + "RESOURCE_USAGE_TRACKER_LOG_FILTER_MAPPING", "LOG_FILTER_MAPPING" + ), + description="is a dictionary that maps specific loggers (such as 'uvicorn.access' or 'gunicorn.access') to a list of log message patterns that should be filtered out.", + ) + + @cached_property + def LOG_LEVEL(self) -> LogLevel: # noqa: N802 + return self.RESOURCE_USAGE_TRACKER_LOGLEVEL + + @field_validator("RESOURCE_USAGE_TRACKER_LOGLEVEL", mode="before") + @classmethod + def valid_log_level(cls, value: str) -> str: + return cls.validate_log_level(value) + + +class MinimalApplicationSettings(_BaseApplicationSettings): + """Extends base settings with the settings needed to connect with prometheus/DB + + Separated for convenience to run some commands of the CLI that + are not related to the web server. + """ + + RESOURCE_USAGE_TRACKER_PROMETHEUS: PrometheusSettings | None = Field( + json_schema_extra={"auto_default_from_env": True} + ) + + RESOURCE_USAGE_TRACKER_POSTGRES: PostgresSettings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + ) + + RESOURCE_USAGE_TRACKER_REDIS: RedisSettings = Field( + json_schema_extra={"auto_default_from_env": True}, + ) + RESOURCE_USAGE_TRACKER_RABBITMQ: RabbitSettings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + ) + + +class ApplicationSettings(MinimalApplicationSettings): + """Web app's environment variables + + These settings includes extra configuration for the http-API + """ + + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED: bool = Field( + default=True, + description="Possibility to disable RUT background task for checking heartbeats.", + ) + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC: datetime.timedelta = Field( + default=datetime.timedelta(minutes=5), + description="Interval to check heartbeat of running services. (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)", + ) + RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL: int = Field( + default=6, + description="Heartbeat couter limit when RUT considers service as unhealthy.", + ) + RESOURCE_USAGE_TRACKER_PROMETHEUS_INSTRUMENTATION_ENABLED: bool = True + RESOURCE_USAGE_TRACKER_S3: S3Settings | None = Field( + json_schema_extra={"auto_default_from_env": True}, + ) + RESOURCE_USAGE_TRACKER_TRACING: TracingSettings | None = Field( + description="settings for opentelemetry tracing", + json_schema_extra={"auto_default_from_env": True}, + ) + + @field_validator( + "RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC", mode="before" + ) + @classmethod + def _validate_interval(cls, v): + if isinstance(v, str) and v.isnumeric(): + return int(v) + return v diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/errors.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/errors.py new file mode 100644 index 00000000000..55fde04b0f6 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/errors.py @@ -0,0 +1,77 @@ +from common_library.errors_classes import OsparcErrorMixin +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, +) + + +class ResourceUsageTrackerBaseError(OsparcErrorMixin, Exception): + msg_template = "Resource usage Tracker Service Error" + + +class ConfigurationError(ResourceUsageTrackerBaseError): + ... + + +### NotCreatedDBError + + +class NotCreatedDBError(ResourceUsageTrackerBaseError): + msg_template = "Data was not inserted to the DB. Data: {data}" + + +class ServiceRunNotCreatedDBError(NotCreatedDBError): + ... + + +class CreditTransactionNotCreatedDBError(NotCreatedDBError): + ... + + +class PricingPlanNotCreatedDBError(NotCreatedDBError): + ... + + +class PricingUnitNotCreatedDBError(NotCreatedDBError): + ... + + +class PricingUnitCostNotCreatedDBError(NotCreatedDBError): + ... + + +class PricingPlanToServiceNotCreatedDBError(NotCreatedDBError): + ... + + +### DoesNotExistsDBError + + +class PricingPlanDoesNotExistsDBError(ResourceUsageTrackerBaseError): + msg_template = "Pricing plan {pricing_plan_id} does not exists" + + +class PricingPlanAndPricingUnitCombinationDoesNotExistsDBError( + ResourceUsageTrackerBaseError +): + msg_template = "Pricing plan {pricing_plan_id} and pricing unit {pricing_unit_id} does not exists in product {product_name}" + + +class PricingUnitCostDoesNotExistsDBError(ResourceUsageTrackerBaseError): + msg_template = "Pricing unit cost id {pricing_unit_cost_id} does not exists" + + +### NotFoundError + + +class RutNotFoundError(ResourceUsageTrackerBaseError): + ... + + +class PricingPlanNotFoundForServiceError(RutNotFoundError): + msg_template = ( + "Pricing plan not found for service key {service_key} version {service_version}" + ) + + +class LicensedItemPurchaseNotFoundError(RutNotFoundError): + licensed_item_purchase_id: LicensedItemPurchaseID diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/__init__.py new file mode 100644 index 00000000000..8be40a236cf --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/__init__.py @@ -0,0 +1,20 @@ +from fastapi import FastAPI, HTTPException, status + +from ..errors import RutNotFoundError +from ._http_error import ( + http404_error_handler, + http_error_handler, + make_http_error_handler_for_exception, +) + + +def setup_exception_handlers(app: FastAPI) -> None: + app.add_exception_handler(HTTPException, http_error_handler) + app.add_exception_handler(RutNotFoundError, http404_error_handler) + + app.add_exception_handler( + Exception, + make_http_error_handler_for_exception( + status.HTTP_500_INTERNAL_SERVER_ERROR, Exception + ), + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/_http_error.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/_http_error.py new file mode 100644 index 00000000000..3ab692a70dc --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/exceptions/handlers/_http_error.py @@ -0,0 +1,63 @@ +import logging +from collections.abc import Callable +from typing import Awaitable + +from fastapi import HTTPException, status +from fastapi.encoders import jsonable_encoder +from servicelib.logging_errors import create_troubleshotting_log_kwargs +from servicelib.status_codes_utils import is_5xx_server_error +from starlette.requests import Request +from starlette.responses import JSONResponse + +from ...exceptions.errors import RutNotFoundError + +_logger = logging.getLogger(__name__) + + +async def http_error_handler(request: Request, exc: Exception) -> JSONResponse: + assert isinstance(exc, HTTPException) # nosec + + if is_5xx_server_error(exc.status_code): + _logger.exception( + **create_troubleshotting_log_kwargs( + "Unexpected error happened in the Resource Usage Tracker. Please contact support.", + error=exc, + error_context={ + "request": request, + "request.method": f"{request.method}", + }, + ) + ) + return JSONResponse( + content=jsonable_encoder({"errors": [exc.detail]}), status_code=exc.status_code + ) + + +def http404_error_handler( + _: Request, # pylint: disable=unused-argument + exc: Exception, +) -> JSONResponse: + assert isinstance(exc, RutNotFoundError) # nose + return JSONResponse( + status_code=status.HTTP_404_NOT_FOUND, + content={"message": f"{exc.msg_template}"}, + ) + + +def make_http_error_handler_for_exception( + status_code: int, exception_cls: type[BaseException] +) -> Callable[[Request, Exception], Awaitable[JSONResponse]]: + """ + Produces a handler for BaseException-type exceptions which converts them + into an error JSON response with a given status code + + SEE https://docs.python.org/3/library/exceptions.html#concrete-exceptions + """ + + async def _http_error_handler(_: Request, exc: Exception) -> JSONResponse: + assert isinstance(exc, exception_cls) # nosec + return JSONResponse( + content=jsonable_encoder({"errors": [str(exc)]}), status_code=status_code + ) + + return _http_error_handler diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/main.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/main.py new file mode 100644 index 00000000000..079ba5cdf79 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/main.py @@ -0,0 +1,24 @@ +"""Main application to be deployed by uvicorn (or equivalent) server + +""" + +import logging + +from fastapi import FastAPI +from servicelib.logging_utils import config_all_loggers +from simcore_service_resource_usage_tracker.core.application import create_app +from simcore_service_resource_usage_tracker.core.settings import ApplicationSettings + +the_settings = ApplicationSettings.create_from_envs() + +# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3148 +logging.basicConfig(level=the_settings.log_level) # NOSONAR +logging.root.setLevel(the_settings.log_level) +config_all_loggers( + log_format_local_dev_enabled=the_settings.RESOURCE_USAGE_TRACKER_LOG_FORMAT_LOCAL_DEV_ENABLED, + logger_filter_mapping=the_settings.RESOURCE_USAGE_TRACKER_LOG_FILTER_MAPPING, + tracing_settings=the_settings.RESOURCE_USAGE_TRACKER_TRACING, +) + +# SINGLETON FastAPI app +the_app: FastAPI = create_app(the_settings) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/credit_transactions.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/credit_transactions.py new file mode 100644 index 00000000000..564b12ce1d4 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/credit_transactions.py @@ -0,0 +1,76 @@ +from datetime import datetime +from decimal import Decimal + +from models_library.products import ProductName +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionId, + CreditTransactionStatus, + PricingPlanId, + PricingUnitCostId, + PricingUnitId, +) +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict + + +class CreditTransactionCreate(BaseModel): + product_name: ProductName + wallet_id: WalletID + wallet_name: str + pricing_plan_id: PricingPlanId | None + pricing_unit_id: PricingUnitId | None + pricing_unit_cost_id: PricingUnitCostId | None + user_id: UserID + user_email: str + osparc_credits: Decimal + transaction_status: CreditTransactionStatus + transaction_classification: CreditClassification + service_run_id: ServiceRunID | None + payment_transaction_id: str | None + created_at: datetime + last_heartbeat_at: datetime + licensed_item_purchase_id: LicensedItemPurchaseID | None + + +class CreditTransactionCreditsUpdate(BaseModel): + service_run_id: ServiceRunID + osparc_credits: Decimal + last_heartbeat_at: datetime + + +class CreditTransactionCreditsAndStatusUpdate(BaseModel): + service_run_id: ServiceRunID + osparc_credits: Decimal + transaction_status: CreditTransactionStatus + + +class CreditTransactionStatusUpdate(BaseModel): + service_run_id: ServiceRunID + transaction_status: CreditTransactionStatus + + +class CreditTransactionDB(BaseModel): + transaction_id: CreditTransactionId + product_name: ProductName + wallet_id: WalletID + wallet_name: str + pricing_plan_id: PricingPlanId | None + pricing_unit_id: PricingUnitId | None + pricing_unit_cost_id: PricingUnitCostId | None + user_id: UserID + user_email: str + osparc_credits: Decimal + transaction_status: CreditTransactionStatus + transaction_classification: CreditClassification + service_run_id: ServiceRunID | None + payment_transaction_id: str | None + created: datetime + last_heartbeat_at: datetime + modified: datetime + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_checkouts.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_checkouts.py new file mode 100644 index 00000000000..8dd1ff5e929 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_checkouts.py @@ -0,0 +1,50 @@ +from datetime import datetime + +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict + + +class LicensedItemCheckoutDB(BaseModel): + licensed_item_checkout_id: LicensedItemCheckoutID + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + user_id: UserID + user_email: str + product_name: ProductName + service_run_id: ServiceRunID + started_at: datetime + stopped_at: datetime | None + num_of_seats: int + modified: datetime + + model_config = ConfigDict(from_attributes=True) + + +class CreateLicensedItemCheckoutDB(BaseModel): + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + user_id: UserID + user_email: str + product_name: ProductName + service_run_id: ServiceRunID + started_at: datetime + num_of_seats: int + + model_config = ConfigDict(from_attributes=True) + + +class UpdateLicensedItemCheckoutDB(BaseModel): + stopped_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_purchases.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_purchases.py new file mode 100644 index 00000000000..dd23b87e4e8 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/licensed_items_purchases.py @@ -0,0 +1,53 @@ +from datetime import datetime +from decimal import Decimal + +from models_library.emails import LowerCaseEmailStr +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker import PricingUnitCostId +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, +) +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict + + +class LicensedItemsPurchasesDB(BaseModel): + licensed_item_purchase_id: LicensedItemPurchaseID + product_name: ProductName + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + wallet_name: str + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + start_at: datetime + expire_at: datetime + num_of_seats: int + purchased_by_user: UserID + user_email: LowerCaseEmailStr + purchased_at: datetime + modified: datetime + + model_config = ConfigDict(from_attributes=True) + + +class CreateLicensedItemsPurchasesDB(BaseModel): + product_name: ProductName + licensed_item_id: LicensedItemID + key: LicensedItemKey + version: LicensedItemVersion + wallet_id: WalletID + wallet_name: str + pricing_unit_cost_id: PricingUnitCostId + pricing_unit_cost: Decimal + start_at: datetime + expire_at: datetime + num_of_seats: int + purchased_by_user: UserID + user_email: LowerCaseEmailStr + purchased_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_plans.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_plans.py new file mode 100644 index 00000000000..7f27ef1096c --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_plans.py @@ -0,0 +1,31 @@ +from datetime import datetime + +from models_library.resource_tracker import PricingPlanClassification, PricingPlanId +from models_library.services import ServiceKey, ServiceVersion +from pydantic import BaseModel, ConfigDict + +## DB Models + + +class PricingPlansDB(BaseModel): + pricing_plan_id: PricingPlanId + display_name: str + description: str + classification: PricingPlanClassification + is_active: bool + created: datetime + pricing_plan_key: str + model_config = ConfigDict(from_attributes=True) + + +class PricingPlansWithServiceDefaultPlanDB(PricingPlansDB): + service_default_plan: bool + model_config = ConfigDict(from_attributes=True) + + +class PricingPlanToServiceDB(BaseModel): + pricing_plan_id: PricingPlanId + service_key: ServiceKey + service_version: ServiceVersion + created: datetime + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_unit_costs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_unit_costs.py new file mode 100644 index 00000000000..200419fbdca --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_unit_costs.py @@ -0,0 +1,24 @@ +from datetime import datetime +from decimal import Decimal + +from models_library.resource_tracker import ( + PricingPlanId, + PricingUnitCostId, + PricingUnitId, +) +from pydantic import BaseModel, ConfigDict + + +class PricingUnitCostsDB(BaseModel): + pricing_unit_cost_id: PricingUnitCostId + pricing_plan_id: PricingPlanId + pricing_plan_key: str + pricing_unit_id: PricingUnitId + pricing_unit_name: str + cost_per_unit: Decimal + valid_from: datetime + valid_to: datetime | None + created: datetime + comment: str | None + modified: datetime + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_units.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_units.py new file mode 100644 index 00000000000..3cb59c161d4 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/pricing_units.py @@ -0,0 +1,34 @@ +from datetime import datetime +from decimal import Decimal +from typing import Any + +from models_library.resource_tracker import ( + HardwareInfo, + PricingPlanId, + PricingUnitCostId, + PricingUnitId, + UnitExtraInfoLicense, + UnitExtraInfoTier, +) +from pydantic import BaseModel, ConfigDict, field_validator + + +class PricingUnitsDB(BaseModel): + pricing_unit_id: PricingUnitId + pricing_plan_id: PricingPlanId + unit_name: str + unit_extra_info: UnitExtraInfoTier | UnitExtraInfoLicense + default: bool + specific_info: HardwareInfo + created: datetime + modified: datetime + current_cost_per_unit: Decimal + current_cost_per_unit_id: PricingUnitCostId + model_config = ConfigDict(from_attributes=True) + + @field_validator("specific_info", mode="before") + @classmethod + def default_hardware_info_when_empty(cls, v) -> HardwareInfo | Any: + if not v: + return HardwareInfo(aws_ec2_instances=[]) + return v diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/service_runs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/service_runs.py new file mode 100644 index 00000000000..638a0bcb918 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/models/service_runs.py @@ -0,0 +1,120 @@ +from datetime import datetime +from decimal import Decimal + +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.resource_tracker import ( + CreditTransactionStatus, + PricingPlanId, + PricingUnitCostId, + PricingUnitId, + ResourceTrackerServiceType, + ServiceRunStatus, +) +from models_library.services import ServiceKey, ServiceVersion +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import BaseModel, ConfigDict, NonNegativeInt + + +class ServiceRunCreate(BaseModel): + product_name: ProductName + service_run_id: ServiceRunID + wallet_id: WalletID | None + wallet_name: str | None + pricing_plan_id: PricingPlanId | None + pricing_unit_id: PricingUnitId | None + pricing_unit_cost_id: PricingUnitCostId | None + pricing_unit_cost: Decimal | None + simcore_user_agent: str + user_id: UserID + user_email: str + project_id: ProjectID + project_name: str + node_id: NodeID + node_name: str + parent_project_id: ProjectID + root_parent_project_id: ProjectID + root_parent_project_name: str + parent_node_id: NodeID + root_parent_node_id: NodeID + service_key: ServiceKey + service_version: ServiceVersion + service_type: ResourceTrackerServiceType + service_resources: dict + service_additional_metadata: dict + started_at: datetime + service_run_status: ServiceRunStatus + last_heartbeat_at: datetime + + +class ServiceRunLastHeartbeatUpdate(BaseModel): + service_run_id: ServiceRunID + last_heartbeat_at: datetime + + +class ServiceRunStoppedAtUpdate(BaseModel): + service_run_id: ServiceRunID + stopped_at: datetime + service_run_status: ServiceRunStatus + service_run_status_msg: str | None + + +class ServiceRunDB(BaseModel): + product_name: ProductName + service_run_id: ServiceRunID + wallet_id: WalletID | None + wallet_name: str | None + pricing_plan_id: PricingPlanId | None + pricing_unit_id: PricingUnitId | None + pricing_unit_cost_id: PricingUnitCostId | None + pricing_unit_cost: Decimal | None + user_id: UserID + user_email: str + project_id: ProjectID + project_name: str + node_id: NodeID + node_name: str + parent_project_id: ProjectID + root_parent_project_id: ProjectID + root_parent_project_name: str + parent_node_id: NodeID + root_parent_node_id: NodeID + service_key: ServiceKey + service_version: ServiceVersion + service_type: ResourceTrackerServiceType + service_resources: dict + started_at: datetime + stopped_at: datetime | None + service_run_status: ServiceRunStatus + modified: datetime + last_heartbeat_at: datetime + service_run_status_msg: str | None + missed_heartbeat_counter: NonNegativeInt + model_config = ConfigDict(from_attributes=True) + + +class ServiceRunWithCreditsDB(ServiceRunDB): + osparc_credits: Decimal | None = None + transaction_status: CreditTransactionStatus | None + project_tags: list[str] + + model_config = ConfigDict(from_attributes=True) + + +class OsparcCreditsAggregatedByServiceKeyDB(BaseModel): + osparc_credits: Decimal + service_key: ServiceKey + running_time_in_hours: Decimal + + model_config = ConfigDict(from_attributes=True) + + +class ServiceRunForCheckDB(BaseModel): + service_run_id: ServiceRunID + last_heartbeat_at: datetime + missed_heartbeat_counter: NonNegativeInt + modified: datetime + model_config = ConfigDict(from_attributes=True) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py new file mode 100644 index 00000000000..4f61fda9879 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check.py @@ -0,0 +1,192 @@ +import asyncio +import logging +from datetime import UTC, datetime, timedelta + +from fastapi import FastAPI +from models_library.resource_tracker import ( + CreditTransactionStatus, + ResourceTrackerServiceType, + ServiceRunStatus, +) +from models_library.services_types import ServiceRunID +from pydantic import NonNegativeInt, PositiveInt +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..core.settings import ApplicationSettings +from ..models.credit_transactions import CreditTransactionCreditsAndStatusUpdate +from ..models.service_runs import ServiceRunStoppedAtUpdate +from .modules.db import ( + credit_transactions_db, + licensed_items_checkouts_db, + service_runs_db, +) +from .utils import compute_service_run_credit_costs, make_negative + +_logger = logging.getLogger(__name__) + +_BATCH_SIZE = 20 + + +async def _check_service_heartbeat( + db_engine: AsyncEngine, + base_start_timestamp: datetime, + resource_usage_tracker_missed_heartbeat_interval: timedelta, + resource_usage_tracker_missed_heartbeat_counter_fail: NonNegativeInt, + service_run_id: ServiceRunID, + last_heartbeat_at: datetime, + missed_heartbeat_counter: NonNegativeInt, + modified_at: datetime, +): + # Check for missed heartbeats + if ( + # Checks that in last 5 minutes we didn't get any heartbeat (ex. last heartbeat < current time - 5 minutes). + last_heartbeat_at + < base_start_timestamp - resource_usage_tracker_missed_heartbeat_interval + ) and ( # Checks that last modified timestamp is older than some reasonable small threshold (this is here to prevent situation + # when RUT is restarting and in the beginning starts the `check_of_running_services_task`. If the task was already running in + # last 2 minutes it will not allow it to compute. ) + modified_at + < base_start_timestamp - timedelta(minutes=2) + ): + missed_heartbeat_counter += 1 + if ( + missed_heartbeat_counter + > resource_usage_tracker_missed_heartbeat_counter_fail + ): + # Handle unhealthy service + _logger.error( + "Service run id: %s is considered unhealthy and not billed. Counter %s", + service_run_id, + missed_heartbeat_counter, + ) + await _close_unhealthy_service( + db_engine, service_run_id, base_start_timestamp + ) + else: + _logger.warning( + "Service run id: %s missed heartbeat. Counter %s", + service_run_id, + missed_heartbeat_counter, + ) + await service_runs_db.update_service_missed_heartbeat_counter( + db_engine, + service_run_id=service_run_id, + last_heartbeat_at=last_heartbeat_at, + missed_heartbeat_counter=missed_heartbeat_counter, + ) + + +async def _close_unhealthy_service( + db_engine: AsyncEngine, + service_run_id: ServiceRunID, + base_start_timestamp: datetime, +): + + # 1. Close the service_run + update_service_run_stopped_at = ServiceRunStoppedAtUpdate( + service_run_id=service_run_id, + stopped_at=base_start_timestamp, + service_run_status=ServiceRunStatus.ERROR, + service_run_status_msg="Service missed more heartbeats. It's considered unhealthy.", + ) + running_service = await service_runs_db.update_service_run_stopped_at( + db_engine, data=update_service_run_stopped_at + ) + + if running_service is None: + _logger.error( + "Service run id: %s Nothing to update. This should not happen; investigate.", + service_run_id, + ) + return + + # 2. Close the billing transaction (as not billed) + if running_service.wallet_id and running_service.pricing_unit_cost is not None: + computed_credits = await compute_service_run_credit_costs( + running_service.started_at, + running_service.last_heartbeat_at, + running_service.pricing_unit_cost, + ) + # NOTE: I have decided that in the case of an error on our side, we will + # close the Dynamic service as BILLED -> since the user was effectively using it until + # the issue occurred. + # NOTE: Update Jan 2025 - With the introduction of the IN_DEBT state, + # when closing the transaction for the dynamic service as BILLED, it is possible + # that the wallet may show a negative balance during this period, which would typically + # be considered as IN_DEBT. However, I have decided to still close it as BILLED. + # This ensures that the user does not have to explicitly pay the DEBT, as the closure + # was caused by an issue on our side. + _transaction_status = ( + CreditTransactionStatus.NOT_BILLED + if running_service.service_type + == ResourceTrackerServiceType.COMPUTATIONAL_SERVICE + else CreditTransactionStatus.BILLED + ) + update_credit_transaction = CreditTransactionCreditsAndStatusUpdate( + service_run_id=service_run_id, + osparc_credits=make_negative(computed_credits), + transaction_status=_transaction_status, + ) + await credit_transactions_db.update_credit_transaction_credits_and_status( + db_engine, data=update_credit_transaction + ) + + # 3. If the credit transaction status is considered "NOT_BILLED", this might return + # the wallet to positive numbers. If, in the meantime, some transactions were marked as DEBT, + # we need to update them back to the BILLED state. + if _transaction_status == CreditTransactionStatus.NOT_BILLED: + wallet_total_credits = await credit_transactions_db.sum_wallet_credits( + db_engine, + product_name=running_service.product_name, + wallet_id=running_service.wallet_id, + ) + if wallet_total_credits.available_osparc_credits >= 0: + await credit_transactions_db.batch_update_credit_transaction_status_for_in_debt_transactions( + db_engine, + project_id=None, + wallet_id=running_service.wallet_id, + transaction_status=CreditTransactionStatus.BILLED, + ) + + # 4. Release license seats in case some were checked out but not properly released. + await licensed_items_checkouts_db.force_release_license_seats_by_run_id( + db_engine, service_run_id=service_run_id + ) + + +async def check_running_services(app: FastAPI) -> None: + _logger.info("Periodic check started") + + # This check runs across all products + app_settings: ApplicationSettings = app.state.settings + _db_engine = app.state.engine + + base_start_timestamp = datetime.now(tz=UTC) + + # Get all current running services (across all products) + total_count: PositiveInt = await service_runs_db.total_service_runs_with_running_status_across_all_products( + _db_engine + ) + + for offset in range(0, total_count, _BATCH_SIZE): + batch_check_services = await service_runs_db.list_service_runs_with_running_status_across_all_products( + _db_engine, + offset=offset, + limit=_BATCH_SIZE, + ) + + await asyncio.gather( + *( + _check_service_heartbeat( + db_engine=_db_engine, + base_start_timestamp=base_start_timestamp, + resource_usage_tracker_missed_heartbeat_interval=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC, + resource_usage_tracker_missed_heartbeat_counter_fail=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL, + service_run_id=check_service.service_run_id, + last_heartbeat_at=check_service.last_heartbeat_at, + missed_heartbeat_counter=check_service.missed_heartbeat_counter, + modified_at=check_service.modified, + ) + for check_service in batch_check_services + ) + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check_setup.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check_setup.py new file mode 100644 index 00000000000..abaefe1e9b7 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/background_task_periodic_heartbeat_check_setup.py @@ -0,0 +1,81 @@ +import asyncio +import logging +from collections.abc import Awaitable, Callable +from typing import TypedDict + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.background_task_utils import exclusive_periodic +from servicelib.logging_utils import log_catch, log_context + +from ..core.settings import ApplicationSettings +from .background_task_periodic_heartbeat_check import check_running_services +from .modules.redis import get_redis_lock_client + +_logger = logging.getLogger(__name__) + + +_TASK_NAME_PERIODICALY_CHECK_RUNNING_SERVICES = "periodic_check_of_running_services" + + +class RutBackgroundTask(TypedDict): + name: str + task_func: Callable + + +def _on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with ( + log_context( + _logger, + logging.INFO, + msg="RUT background task Periodic check of running services startup..", + ), + log_catch(_logger, reraise=False), + ): + app_settings: ApplicationSettings = app.state.settings + + app.state.rut_background_task__periodic_check_of_running_services = None + + @exclusive_periodic( + get_redis_lock_client(app), + task_interval=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC, + retry_after=app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_INTERVAL_SEC, + ) + async def _periodic_check_running_services() -> None: + await check_running_services(app) + + app.state.rut_background_task__periodic_check_of_running_services = ( + asyncio.create_task( + _periodic_check_running_services(), + name=_TASK_NAME_PERIODICALY_CHECK_RUNNING_SERVICES, + ) + ) + + return _startup + + +def _on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + with ( + log_context( + _logger, + logging.INFO, + msg="RUT background tasks Periodic check of running services shutdown..", + ), + log_catch(_logger, reraise=False), + ): + assert _app # nosec + if _app.state.rut_background_task__periodic_check_of_running_services: + await cancel_wait_task( + _app.state.rut_background_task__periodic_check_of_running_services + ) + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", _on_app_startup(app)) + app.add_event_handler("shutdown", _on_app_shutdown(app)) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py new file mode 100644 index 00000000000..d2108df0a2b --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/credit_transactions.py @@ -0,0 +1,231 @@ +from decimal import Decimal +from typing import Annotated + +from fastapi import Depends +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreateBody, + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionId, + CreditTransactionStatus, +) +from models_library.services_types import ServiceRunID +from models_library.wallets import WalletID +from servicelib.rabbitmq import RabbitMQClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + WalletTransactionError, +) +from servicelib.utils import fire_and_forget_task +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api.rest.dependencies import get_resource_tracker_db_engine +from ..models.credit_transactions import CreditTransactionCreate +from ..services.modules.db import service_runs_db +from .modules.db import credit_transactions_db +from .modules.rabbitmq import get_rabbitmq_client_from_request +from .utils import sum_credit_transactions_and_publish_to_rabbitmq + + +async def create_credit_transaction( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + rabbitmq_client: Annotated[ + RabbitMQClient, Depends(get_rabbitmq_client_from_request) + ], + credit_transaction_create_body: CreditTransactionCreateBody, +) -> CreditTransactionId: + transaction_create = CreditTransactionCreate( + product_name=credit_transaction_create_body.product_name, + wallet_id=credit_transaction_create_body.wallet_id, + wallet_name=credit_transaction_create_body.wallet_name, + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + user_id=credit_transaction_create_body.user_id, + user_email=credit_transaction_create_body.user_email, + osparc_credits=credit_transaction_create_body.osparc_credits, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.ADD_WALLET_TOP_UP, + service_run_id=None, + payment_transaction_id=credit_transaction_create_body.payment_transaction_id, + licensed_item_purchase_id=None, + created_at=credit_transaction_create_body.created_at, + last_heartbeat_at=credit_transaction_create_body.created_at, + ) + async with transaction_context(db_engine) as conn: + transaction_id = await credit_transactions_db.create_credit_transaction( + db_engine, connection=conn, data=transaction_create + ) + + wallet_total_credits = await sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + connection=conn, + rabbitmq_client=rabbitmq_client, + product_name=credit_transaction_create_body.product_name, + wallet_id=credit_transaction_create_body.wallet_id, + ) + if wallet_total_credits.available_osparc_credits >= 0: + # Change status from `IN_DEBT` to `BILLED` + await credit_transactions_db.batch_update_credit_transaction_status_for_in_debt_transactions( + db_engine, + connection=conn, + project_id=None, + wallet_id=credit_transaction_create_body.wallet_id, + transaction_status=CreditTransactionStatus.BILLED, + ) + + return transaction_id + + +async def sum_wallet_credits( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + product_name: ProductName, + wallet_id: WalletID, +) -> WalletTotalCredits: + return await credit_transactions_db.sum_wallet_credits( + db_engine, + product_name=product_name, + wallet_id=wallet_id, + ) + + +async def pay_project_debt( + db_engine: AsyncEngine, + rabbitmq_client: RabbitMQClient, + rut_fire_and_forget_tasks: set, + project_id: ProjectID, + current_wallet_transaction: CreditTransactionCreateBody, + new_wallet_transaction: CreditTransactionCreateBody, +): + # NOTE: `current_wallet_transaction` is the Wallet in DEBT + + total_project_debt_amount = await service_runs_db.sum_project_wallet_total_credits( + db_engine, + product_name=current_wallet_transaction.product_name, + wallet_id=current_wallet_transaction.wallet_id, + project_id=project_id, + transaction_status=CreditTransactionStatus.IN_DEBT, + ) + + if ( + total_project_debt_amount.available_osparc_credits + != new_wallet_transaction.osparc_credits + ): + msg = f"Project DEBT of {total_project_debt_amount.available_osparc_credits} does not equal to payment: new_wallet {new_wallet_transaction.wallet_id} credits {new_wallet_transaction.osparc_credits}, current wallet {current_wallet_transaction.wallet_id} credits {current_wallet_transaction.osparc_credits}" + raise WalletTransactionError(msg=msg) + if ( + -total_project_debt_amount.available_osparc_credits + != current_wallet_transaction.osparc_credits + ): + msg = f"Project DEBT of {total_project_debt_amount.available_osparc_credits} does not equal to payment: new_wallet {new_wallet_transaction.wallet_id} credits {new_wallet_transaction.osparc_credits}, current wallet {current_wallet_transaction.wallet_id} credits {current_wallet_transaction.osparc_credits}" + raise WalletTransactionError(msg=msg) + if current_wallet_transaction.product_name != new_wallet_transaction.product_name: + msg = f"Currently we do not support credit exchange between different products. New wallet {new_wallet_transaction.wallet_id}, current wallet {current_wallet_transaction.wallet_id}" + raise WalletTransactionError(msg=msg) + + # Does the new wallet has enough credits to pay the debt? + new_wallet_total_credit_amount = await credit_transactions_db.sum_wallet_credits( + db_engine, + product_name=new_wallet_transaction.product_name, + wallet_id=new_wallet_transaction.wallet_id, + ) + if ( + new_wallet_total_credit_amount.available_osparc_credits + + total_project_debt_amount.available_osparc_credits + < 0 + ): + msg = f"New wallet {new_wallet_transaction.wallet_id} doesn't have enough credits {new_wallet_total_credit_amount.available_osparc_credits} to pay the debt {total_project_debt_amount.available_osparc_credits} of current wallet {current_wallet_transaction.wallet_id}" + raise WalletTransactionError(msg=msg) + + new_wallet_transaction_create = CreditTransactionCreate( + product_name=new_wallet_transaction.product_name, + wallet_id=new_wallet_transaction.wallet_id, + wallet_name=new_wallet_transaction.wallet_name, + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + user_id=new_wallet_transaction.user_id, + user_email=new_wallet_transaction.user_email, + osparc_credits=new_wallet_transaction.osparc_credits, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.DEDUCT_WALLET_EXCHANGE, + service_run_id=None, + payment_transaction_id=new_wallet_transaction.payment_transaction_id, + licensed_item_purchase_id=None, + created_at=new_wallet_transaction.created_at, + last_heartbeat_at=new_wallet_transaction.created_at, + ) + + current_wallet_transaction_create = CreditTransactionCreate( + product_name=current_wallet_transaction.product_name, + wallet_id=current_wallet_transaction.wallet_id, + wallet_name=current_wallet_transaction.wallet_name, + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + user_id=current_wallet_transaction.user_id, + user_email=current_wallet_transaction.user_email, + osparc_credits=current_wallet_transaction.osparc_credits, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.ADD_WALLET_EXCHANGE, + service_run_id=None, + payment_transaction_id=current_wallet_transaction.payment_transaction_id, + licensed_item_purchase_id=None, + created_at=current_wallet_transaction.created_at, + last_heartbeat_at=current_wallet_transaction.created_at, + ) + + async with transaction_context(db_engine) as conn: + await credit_transactions_db.create_credit_transaction( + db_engine, connection=conn, data=new_wallet_transaction_create + ) + await credit_transactions_db.create_credit_transaction( + db_engine, connection=conn, data=current_wallet_transaction_create + ) + # Change status from `IN_DEBT` to `BILLED` + await credit_transactions_db.batch_update_credit_transaction_status_for_in_debt_transactions( + db_engine, + connection=conn, + project_id=project_id, + wallet_id=current_wallet_transaction_create.wallet_id, + transaction_status=CreditTransactionStatus.BILLED, + ) + + fire_and_forget_task( + sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=new_wallet_transaction_create.product_name, + wallet_id=new_wallet_transaction_create.wallet_id, # <-- New wallet + ), + task_suffix_name=f"sum_and_publish_credits_wallet_id{new_wallet_transaction_create.wallet_id}", + fire_and_forget_tasks_collection=rut_fire_and_forget_tasks, + ) + fire_and_forget_task( + sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=current_wallet_transaction_create.product_name, + wallet_id=current_wallet_transaction_create.wallet_id, # <-- Current wallet + ), + task_suffix_name=f"sum_and_publish_credits_wallet_id{current_wallet_transaction_create.wallet_id}", + fire_and_forget_tasks_collection=rut_fire_and_forget_tasks, + ) + + +async def get_transaction_current_credits_by_service_run_id( + db_engine: AsyncEngine, + *, + service_run_id: ServiceRunID, +) -> Decimal: + return ( + await credit_transactions_db.get_transaction_current_credits_by_service_run_id( + db_engine, + service_run_id=service_run_id, + ) + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/fire_and_forget_setup.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/fire_and_forget_setup.py new file mode 100644 index 00000000000..2523a069974 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/fire_and_forget_setup.py @@ -0,0 +1,42 @@ +import logging +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from servicelib.async_utils import cancel_wait_task +from servicelib.logging_utils import log_catch, log_context + +_logger = logging.getLogger(__name__) + + +def _on_app_startup(_app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="Resource Usage Tracker setup fire and forget tasks..", + ), log_catch(_logger, reraise=False): + _app.state.rut_fire_and_forget_tasks = set() + + return _startup + + +def _on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + with log_context( + _logger, + logging.INFO, + msg="Resource Usage Tracker fire and forget tasks shutdown..", + ), log_catch(_logger, reraise=False): + assert _app # nosec + if _app.state.rut_fire_and_forget_tasks: + for task in _app.state.rut_fire_and_forget_tasks: + await cancel_wait_task(task) + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", _on_app_startup(app)) + app.add_event_handler("shutdown", _on_app_shutdown(app)) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_checkouts.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_checkouts.py new file mode 100644 index 00000000000..549118884a9 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_checkouts.py @@ -0,0 +1,224 @@ +from datetime import UTC, datetime +from typing import Annotated + +from fastapi import Depends +from models_library.api_schemas_resource_usage_tracker.licensed_items_checkouts import ( + LicensedItemCheckoutGet, + LicensedItemsCheckoutsPage, +) +from models_library.licenses import LicensedItemID, LicensedItemKey, LicensedItemVersion +from models_library.products import ProductName +from models_library.resource_tracker import ServiceRunStatus +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.rest_ordering import OrderBy +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CanNotCheckoutNotEnoughAvailableSeatsError, + CanNotCheckoutServiceIsNotRunningError, + NotEnoughAvailableSeatsError, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api.rest.dependencies import get_resource_tracker_db_engine +from ..models.licensed_items_checkouts import ( + CreateLicensedItemCheckoutDB, + LicensedItemCheckoutDB, +) +from .modules.db import ( + licensed_items_checkouts_db, + licensed_items_purchases_db, + service_runs_db, +) + + +async def list_licensed_items_checkouts( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: int, + limit: int, + order_by: OrderBy, +) -> LicensedItemsCheckoutsPage: + total, licensed_items_checkouts_list_db = await licensed_items_checkouts_db.list_( + db_engine, + product_name=product_name, + filter_wallet_id=filter_wallet_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + return LicensedItemsCheckoutsPage( + total=total, + items=[ + LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_db.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_db.licensed_item_id, + key=licensed_item_checkout_db.key, + version=licensed_item_checkout_db.version, + wallet_id=licensed_item_checkout_db.wallet_id, + user_id=licensed_item_checkout_db.user_id, + user_email=licensed_item_checkout_db.user_email, + product_name=licensed_item_checkout_db.product_name, + service_run_id=licensed_item_checkout_db.service_run_id, + started_at=licensed_item_checkout_db.started_at, + stopped_at=licensed_item_checkout_db.stopped_at, + num_of_seats=licensed_item_checkout_db.num_of_seats, + ) + for licensed_item_checkout_db in licensed_items_checkouts_list_db + ], + ) + + +async def get_licensed_item_checkout( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + product_name: ProductName, + licensed_item_checkout_id: LicensedItemCheckoutID, +) -> LicensedItemCheckoutGet: + licensed_item_checkout_db: LicensedItemCheckoutDB = ( + await licensed_items_checkouts_db.get( + db_engine, + product_name=product_name, + licensed_item_checkout_id=licensed_item_checkout_id, + ) + ) + + return LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_db.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_db.licensed_item_id, + key=licensed_item_checkout_db.key, + version=licensed_item_checkout_db.version, + wallet_id=licensed_item_checkout_db.wallet_id, + user_id=licensed_item_checkout_db.user_id, + user_email=licensed_item_checkout_db.user_email, + product_name=licensed_item_checkout_db.product_name, + service_run_id=licensed_item_checkout_db.service_run_id, + started_at=licensed_item_checkout_db.started_at, + stopped_at=licensed_item_checkout_db.stopped_at, + num_of_seats=licensed_item_checkout_db.num_of_seats, + ) + + +async def checkout_licensed_item( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + licensed_item_id: LicensedItemID, + key: LicensedItemKey, + version: LicensedItemVersion, + wallet_id: WalletID, + product_name: ProductName, + num_of_seats: int, + service_run_id: ServiceRunID, + user_id: UserID, + user_email: str, +) -> LicensedItemCheckoutGet: + + _active_purchased_seats: int = await licensed_items_purchases_db.get_active_purchased_seats_for_key_version_wallet( + db_engine, + key=key, + version=version, + wallet_id=wallet_id, + product_name=product_name, + ) + + _currently_used_seats = await licensed_items_checkouts_db.get_currently_used_seats_for_key_version_wallet( + db_engine, + key=key, + version=version, + wallet_id=wallet_id, + product_name=product_name, + ) + + available_seats = _active_purchased_seats - _currently_used_seats + if available_seats <= 0: + raise NotEnoughAvailableSeatsError( + license_item_id=licensed_item_id, available_num_of_seats=available_seats + ) + + if available_seats - num_of_seats < 0: + raise CanNotCheckoutNotEnoughAvailableSeatsError( + license_item_id=licensed_item_id, + available_num_of_seats=available_seats, + num_of_seats=num_of_seats, + ) + + # Check if the service run ID is currently running + service_run = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=service_run_id + ) + if ( + service_run is None + or service_run.service_run_status != ServiceRunStatus.RUNNING + ): + raise CanNotCheckoutServiceIsNotRunningError( + license_item_id=licensed_item_id, service_run=service_run + ) + + _create_item_checkout = CreateLicensedItemCheckoutDB( + licensed_item_id=licensed_item_id, + key=key, + version=version, + wallet_id=wallet_id, + user_id=user_id, + user_email=user_email, + product_name=product_name, + service_run_id=service_run_id, + started_at=datetime.now(tz=UTC), + num_of_seats=num_of_seats, + ) + licensed_item_checkout_db = await licensed_items_checkouts_db.create( + db_engine, data=_create_item_checkout + ) + + # Return checkout ID + return LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_db.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_db.licensed_item_id, + key=licensed_item_checkout_db.key, + version=licensed_item_checkout_db.version, + wallet_id=licensed_item_checkout_db.wallet_id, + user_id=licensed_item_checkout_db.user_id, + user_email=licensed_item_checkout_db.user_email, + product_name=licensed_item_checkout_db.product_name, + service_run_id=licensed_item_checkout_db.service_run_id, + started_at=licensed_item_checkout_db.started_at, + stopped_at=licensed_item_checkout_db.stopped_at, + num_of_seats=licensed_item_checkout_db.num_of_seats, + ) + + +async def release_licensed_item( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + licensed_item_checkout_id: LicensedItemCheckoutID, + product_name: ProductName, +) -> LicensedItemCheckoutGet: + + licensed_item_checkout_db: LicensedItemCheckoutDB = ( + await licensed_items_checkouts_db.update( + db_engine, + licensed_item_checkout_id=licensed_item_checkout_id, + product_name=product_name, + stopped_at=datetime.now(tz=UTC), + ) + ) + + return LicensedItemCheckoutGet( + licensed_item_checkout_id=licensed_item_checkout_db.licensed_item_checkout_id, + licensed_item_id=licensed_item_checkout_db.licensed_item_id, + key=licensed_item_checkout_db.key, + version=licensed_item_checkout_db.version, + wallet_id=licensed_item_checkout_db.wallet_id, + user_id=licensed_item_checkout_db.user_id, + user_email=licensed_item_checkout_db.user_email, + product_name=licensed_item_checkout_db.product_name, + service_run_id=licensed_item_checkout_db.service_run_id, + started_at=licensed_item_checkout_db.started_at, + stopped_at=licensed_item_checkout_db.stopped_at, + num_of_seats=licensed_item_checkout_db.num_of_seats, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_purchases.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_purchases.py new file mode 100644 index 00000000000..0e5c7abef81 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/licensed_items_purchases.py @@ -0,0 +1,189 @@ +from typing import Annotated + +from fastapi import Depends +from models_library.api_schemas_resource_usage_tracker.licensed_items_purchases import ( + LicensedItemPurchaseGet, + LicensedItemsPurchasesPage, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, +) +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, + LicensedItemsPurchasesCreate, +) +from models_library.rest_ordering import OrderBy +from models_library.wallets import WalletID +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api.rest.dependencies import get_resource_tracker_db_engine +from ..models.credit_transactions import CreditTransactionCreate +from ..models.licensed_items_purchases import ( + CreateLicensedItemsPurchasesDB, + LicensedItemsPurchasesDB, +) +from .modules.db import credit_transactions_db, licensed_items_purchases_db +from .modules.rabbitmq import RabbitMQClient, get_rabbitmq_client +from .utils import make_negative, sum_credit_transactions_and_publish_to_rabbitmq + + +async def list_licensed_items_purchases( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: int = 0, + limit: int = 20, + order_by: OrderBy, +) -> LicensedItemsPurchasesPage: + total, licensed_items_purchases_list_db = await licensed_items_purchases_db.list_( + db_engine, + product_name=product_name, + filter_wallet_id=filter_wallet_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + return LicensedItemsPurchasesPage( + total=total, + items=[ + LicensedItemPurchaseGet( + licensed_item_purchase_id=licensed_item_purchase_db.licensed_item_purchase_id, + product_name=licensed_item_purchase_db.product_name, + licensed_item_id=licensed_item_purchase_db.licensed_item_id, + key=licensed_item_purchase_db.key, + version=licensed_item_purchase_db.version, + wallet_id=licensed_item_purchase_db.wallet_id, + wallet_name=licensed_item_purchase_db.wallet_name, + pricing_unit_cost_id=licensed_item_purchase_db.pricing_unit_cost_id, + pricing_unit_cost=licensed_item_purchase_db.pricing_unit_cost, + start_at=licensed_item_purchase_db.start_at, + expire_at=licensed_item_purchase_db.expire_at, + num_of_seats=licensed_item_purchase_db.num_of_seats, + purchased_by_user=licensed_item_purchase_db.purchased_by_user, + user_email=licensed_item_purchase_db.user_email, + purchased_at=licensed_item_purchase_db.purchased_at, + modified=licensed_item_purchase_db.modified, + ) + for licensed_item_purchase_db in licensed_items_purchases_list_db + ], + ) + + +async def get_licensed_item_purchase( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + product_name: ProductName, + licensed_item_purchase_id: LicensedItemPurchaseID, +) -> LicensedItemPurchaseGet: + licensed_item_purchase_db: LicensedItemsPurchasesDB = ( + await licensed_items_purchases_db.get( + db_engine, + product_name=product_name, + licensed_item_purchase_id=licensed_item_purchase_id, + ) + ) + + return LicensedItemPurchaseGet( + licensed_item_purchase_id=licensed_item_purchase_db.licensed_item_purchase_id, + product_name=licensed_item_purchase_db.product_name, + licensed_item_id=licensed_item_purchase_db.licensed_item_id, + key=licensed_item_purchase_db.key, + version=licensed_item_purchase_db.version, + wallet_id=licensed_item_purchase_db.wallet_id, + wallet_name=licensed_item_purchase_db.wallet_name, + pricing_unit_cost_id=licensed_item_purchase_db.pricing_unit_cost_id, + pricing_unit_cost=licensed_item_purchase_db.pricing_unit_cost, + start_at=licensed_item_purchase_db.start_at, + expire_at=licensed_item_purchase_db.expire_at, + num_of_seats=licensed_item_purchase_db.num_of_seats, + purchased_by_user=licensed_item_purchase_db.purchased_by_user, + user_email=licensed_item_purchase_db.user_email, + purchased_at=licensed_item_purchase_db.purchased_at, + modified=licensed_item_purchase_db.modified, + ) + + +async def create_licensed_item_purchase( + rabbitmq_client: Annotated[RabbitMQClient, Depends(get_rabbitmq_client)], + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + *, + data: LicensedItemsPurchasesCreate, +) -> LicensedItemPurchaseGet: + + async with transaction_context(db_engine) as conn: + item_purchase_create = CreateLicensedItemsPurchasesDB( + product_name=data.product_name, + licensed_item_id=data.licensed_item_id, + key=data.key, + version=data.version, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_unit_cost_id=data.pricing_unit_cost_id, + pricing_unit_cost=data.pricing_unit_cost, + start_at=data.start_at, + expire_at=data.expire_at, + num_of_seats=data.num_of_seats, + purchased_by_user=data.purchased_by_user, + user_email=data.user_email, + purchased_at=data.purchased_at, + ) + + licensed_item_purchase_db: LicensedItemsPurchasesDB = ( + await licensed_items_purchases_db.create( + db_engine, connection=conn, data=item_purchase_create + ) + ) + + # Deduct credits from credit_transactions table + transaction_create = CreditTransactionCreate( + product_name=data.product_name, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_cost_id=data.pricing_unit_cost_id, + user_id=data.purchased_by_user, + user_email=data.user_email, + osparc_credits=make_negative(data.pricing_unit_cost), + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.DEDUCT_LICENSE_PURCHASE, + service_run_id=None, + payment_transaction_id=None, + licensed_item_purchase_id=licensed_item_purchase_db.licensed_item_purchase_id, + created_at=data.start_at, + last_heartbeat_at=data.start_at, + ) + await credit_transactions_db.create_credit_transaction( + db_engine, connection=conn, data=transaction_create + ) + + # Publish wallet total credits to RabbitMQ + await sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=data.product_name, + wallet_id=data.wallet_id, + ) + + return LicensedItemPurchaseGet( + licensed_item_purchase_id=licensed_item_purchase_db.licensed_item_purchase_id, + product_name=licensed_item_purchase_db.product_name, + licensed_item_id=licensed_item_purchase_db.licensed_item_id, + key=licensed_item_purchase_db.key, + version=licensed_item_purchase_db.version, + wallet_id=licensed_item_purchase_db.wallet_id, + wallet_name=licensed_item_purchase_db.wallet_name, + pricing_unit_cost_id=licensed_item_purchase_db.pricing_unit_cost_id, + pricing_unit_cost=licensed_item_purchase_db.pricing_unit_cost, + start_at=licensed_item_purchase_db.start_at, + expire_at=licensed_item_purchase_db.expire_at, + num_of_seats=licensed_item_purchase_db.num_of_seats, + purchased_by_user=licensed_item_purchase_db.purchased_by_user, + user_email=licensed_item_purchase_db.user_email, + purchased_at=licensed_item_purchase_db.purchased_at, + modified=licensed_item_purchase_db.modified, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/__init__.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/__init__.py new file mode 100644 index 00000000000..1ccd94f436e --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/__init__.py @@ -0,0 +1,28 @@ +import logging + +from fastapi import FastAPI +from servicelib.fastapi.db_asyncpg_engine import close_db_connection, connect_to_db +from servicelib.logging_utils import log_context + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI): + async def on_startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT startup DB", + ): + await connect_to_db(app, app.state.settings.RESOURCE_USAGE_TRACKER_POSTGRES) + + async def on_shutdown() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT shutdown DB", + ): + await close_db_connection(app) + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py new file mode 100644 index 00000000000..b9b5f3569c5 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/credit_transactions_db.py @@ -0,0 +1,233 @@ +import logging +from decimal import Decimal +from typing import cast + +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import CreditTransactionId, CreditTransactionStatus +from models_library.services_types import ServiceRunID +from models_library.wallets import WalletID +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + CreditTransactionNotFoundError, +) +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.engine import CursorResult +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import CreditTransactionNotCreatedDBError +from ....models.credit_transactions import ( + CreditTransactionCreate, + CreditTransactionCreditsAndStatusUpdate, + CreditTransactionCreditsUpdate, +) + +_logger = logging.getLogger(__name__) + + +async def create_credit_transaction( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreate, +) -> CreditTransactionId: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_credit_transactions.insert() + .values( + product_name=data.product_name, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_cost_id=data.pricing_unit_cost_id, + user_id=data.user_id, + user_email=data.user_email, + osparc_credits=data.osparc_credits, + transaction_status=data.transaction_status, + transaction_classification=data.transaction_classification, + service_run_id=data.service_run_id, + payment_transaction_id=data.payment_transaction_id, + licensed_item_purchase_id=data.licensed_item_purchase_id, + created=data.created_at, + last_heartbeat_at=data.last_heartbeat_at, + modified=sa.func.now(), + ) + .returning(resource_tracker_credit_transactions.c.transaction_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise CreditTransactionNotCreatedDBError(data=data) + return cast(CreditTransactionId, row[0]) + + +async def update_credit_transaction_credits( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreditsUpdate, +) -> CreditTransactionId | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_credit_transactions.update() + .values( + modified=sa.func.now(), + osparc_credits=data.osparc_credits, + last_heartbeat_at=data.last_heartbeat_at, + ) + .where( + ( + resource_tracker_credit_transactions.c.service_run_id + == data.service_run_id + ) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.PENDING + ) + & ( + resource_tracker_credit_transactions.c.last_heartbeat_at + <= data.last_heartbeat_at + ) + ) + .returning(resource_tracker_credit_transactions.c.service_run_id) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return cast(CreditTransactionId | None, row[0]) + + +async def update_credit_transaction_credits_and_status( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreditTransactionCreditsAndStatusUpdate, +) -> CreditTransactionId | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_credit_transactions.update() + .values( + modified=sa.func.now(), + osparc_credits=data.osparc_credits, + transaction_status=data.transaction_status, + ) + .where( + ( + resource_tracker_credit_transactions.c.service_run_id + == data.service_run_id + ) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.PENDING + ) + ) + .returning(resource_tracker_credit_transactions.c.service_run_id) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return cast(CreditTransactionId | None, row[0]) + + +async def batch_update_credit_transaction_status_for_in_debt_transactions( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + project_id: ProjectID | None = None, + wallet_id: WalletID, + transaction_status: CreditTransactionStatus, +) -> None: + update_stmt = ( + resource_tracker_credit_transactions.update() + .values( + modified=sa.func.now(), + transaction_status=transaction_status, + ) + .where( + (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + & ( + resource_tracker_credit_transactions.c.transaction_status + == CreditTransactionStatus.IN_DEBT + ) + ) + ) + + if project_id: + update_stmt = update_stmt.where( + resource_tracker_service_runs.c.project_id == f"{project_id}" + ) + async with transaction_context(engine, connection) as conn: + result = await conn.execute(update_stmt) + # NOTE: see https://docs.sqlalchemy.org/en/20/tutorial/data_update.html#getting-affected-row-count-from-update-delete + assert isinstance(result, CursorResult) # nosec + if result.rowcount: + _logger.info( + "Wallet %s and project %s transactions in DEBT were changed to BILLED. Num. of transaction %s", + wallet_id, + project_id, + result.rowcount, + ) + + +async def sum_wallet_credits( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + wallet_id: WalletID, +) -> WalletTotalCredits: + async with transaction_context(engine, connection) as conn: + sum_stmt = sa.select( + sa.func.sum(resource_tracker_credit_transactions.c.osparc_credits) + ).where( + (resource_tracker_credit_transactions.c.product_name == product_name) + & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + & ( + resource_tracker_credit_transactions.c.transaction_status.in_( + [ + CreditTransactionStatus.BILLED, + CreditTransactionStatus.PENDING, + CreditTransactionStatus.IN_DEBT, + ] + ) + ) + ) + + result = await conn.execute(sum_stmt) + row = result.first() + if row is None or row[0] is None: + return WalletTotalCredits( + wallet_id=wallet_id, available_osparc_credits=Decimal(0) + ) + return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0]) + + +async def get_transaction_current_credits_by_service_run_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunID, +) -> Decimal: + async with transaction_context(engine, connection) as conn: + select_stmt = sa.select( + resource_tracker_credit_transactions.c.osparc_credits + ).where( + resource_tracker_credit_transactions.c.service_run_id == f"{service_run_id}" + ) + result = await conn.execute(select_stmt) + row = result.one_or_none() + if row is None: + raise CreditTransactionNotFoundError(service_run_id=service_run_id) + return Decimal(row[0]) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_checkouts_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_checkouts_db.py new file mode 100644 index 00000000000..96d98359cb6 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_checkouts_db.py @@ -0,0 +1,274 @@ +import logging +from datetime import datetime +from typing import cast + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_checkouts import ( + LicensedItemCheckoutID, +) +from models_library.rest_ordering import OrderBy, OrderDirection +from models_library.services_types import ServiceRunID +from models_library.wallets import WalletID +from pydantic import NonNegativeInt +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + LicensedItemCheckoutNotFoundError, +) +from simcore_postgres_database.models.resource_tracker_licensed_items_checkouts import ( + resource_tracker_licensed_items_checkouts, +) +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....models.licensed_items_checkouts import ( + CreateLicensedItemCheckoutDB, + LicensedItemCheckoutDB, +) +from . import utils as db_utils + +_logger = logging.getLogger(__name__) + + +_SELECTION_ARGS = ( + resource_tracker_licensed_items_checkouts.c.licensed_item_checkout_id, + resource_tracker_licensed_items_checkouts.c.licensed_item_id, + resource_tracker_licensed_items_checkouts.c.key, + resource_tracker_licensed_items_checkouts.c.version, + resource_tracker_licensed_items_checkouts.c.wallet_id, + resource_tracker_licensed_items_checkouts.c.user_id, + resource_tracker_licensed_items_checkouts.c.user_email, + resource_tracker_licensed_items_checkouts.c.product_name, + resource_tracker_licensed_items_checkouts.c.service_run_id, + resource_tracker_licensed_items_checkouts.c.started_at, + resource_tracker_licensed_items_checkouts.c.stopped_at, + resource_tracker_licensed_items_checkouts.c.num_of_seats, + resource_tracker_licensed_items_checkouts.c.modified, +) + +assert set(LicensedItemCheckoutDB.model_fields) == { + c.name for c in _SELECTION_ARGS +} # nosec + + +async def create( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreateLicensedItemCheckoutDB, +) -> LicensedItemCheckoutDB: + async with transaction_context(engine, connection) as conn: + result = await conn.execute( + resource_tracker_licensed_items_checkouts.insert() + .values( + licensed_item_id=data.licensed_item_id, + key=data.key, + version=data.version, + wallet_id=data.wallet_id, + user_id=data.user_id, + user_email=data.user_email, + product_name=data.product_name, + service_run_id=data.service_run_id, + started_at=data.started_at, + stopped_at=None, + num_of_seats=data.num_of_seats, + modified=sa.func.now(), + ) + .returning(*_SELECTION_ARGS) + ) + row = result.first() + return LicensedItemCheckoutDB.model_validate(row) + + +async def list_( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: NonNegativeInt, + limit: NonNegativeInt, + order_by: OrderBy, +) -> tuple[int, list[LicensedItemCheckoutDB]]: + base_query = ( + sa.select(*_SELECTION_ARGS) + .select_from(resource_tracker_licensed_items_checkouts) + .where( + (resource_tracker_licensed_items_checkouts.c.product_name == product_name) + & ( + resource_tracker_licensed_items_checkouts.c.wallet_id + == filter_wallet_id + ) + ) + ) + + # Select total count from base_query + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + + # Ordering and pagination + if order_by.direction == OrderDirection.ASC: + list_query = base_query.order_by( + sa.asc( + getattr(resource_tracker_licensed_items_checkouts.c, order_by.field) + ), + resource_tracker_licensed_items_checkouts.c.licensed_item_checkout_id, + ) + else: + list_query = base_query.order_by( + sa.desc( + getattr( + resource_tracker_licensed_items_checkouts.c, + order_by.field, + resource_tracker_licensed_items_checkouts.c.licensed_item_checkout_id, + ) + ) + ) + list_query = list_query.offset(offset).limit(limit) + + async with pass_or_acquire_connection(engine, connection) as conn: + total_count = await conn.scalar(count_query) + if total_count is None: + total_count = 0 + + result = await conn.stream(list_query) + items: list[LicensedItemCheckoutDB] = [ + LicensedItemCheckoutDB.model_validate(row) async for row in result + ] + + return cast(int, total_count), items + + +async def get( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + licensed_item_checkout_id: LicensedItemCheckoutID, + product_name: ProductName, +) -> LicensedItemCheckoutDB: + base_query = ( + sa.select(*_SELECTION_ARGS) + .select_from(resource_tracker_licensed_items_checkouts) + .where( + ( + resource_tracker_licensed_items_checkouts.c.licensed_item_checkout_id + == licensed_item_checkout_id + ) + & (resource_tracker_licensed_items_checkouts.c.product_name == product_name) + ) + ) + + async with pass_or_acquire_connection(engine, connection) as conn: + result = await conn.stream(base_query) + row = await result.first() + if row is None: + raise LicensedItemCheckoutNotFoundError( + licensed_item_checkout_id=licensed_item_checkout_id + ) + return LicensedItemCheckoutDB.model_validate(row) + + +async def update( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + licensed_item_checkout_id: LicensedItemCheckoutID, + product_name: ProductName, + stopped_at: datetime, +) -> LicensedItemCheckoutDB: + update_stmt = ( + resource_tracker_licensed_items_checkouts.update() + .values( + modified=sa.func.now(), + stopped_at=stopped_at, + ) + .where( + ( + resource_tracker_licensed_items_checkouts.c.licensed_item_checkout_id + == licensed_item_checkout_id + ) + & (resource_tracker_licensed_items_checkouts.c.product_name == product_name) + & (resource_tracker_licensed_items_checkouts.c.stopped_at.is_(None)) + ) + .returning(sa.literal_column("*")) + ) + + async with transaction_context(engine, connection) as conn: + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + raise LicensedItemCheckoutNotFoundError( + licensed_item_checkout_id=licensed_item_checkout_id + ) + return LicensedItemCheckoutDB.model_validate(row) + + +async def get_currently_used_seats_for_key_version_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + key: str, + version: str, + wallet_id: WalletID, + product_name: ProductName, +) -> int: + + sum_stmt = sa.select( + sa.func.sum(resource_tracker_licensed_items_checkouts.c.num_of_seats) + ).where( + (resource_tracker_licensed_items_checkouts.c.wallet_id == wallet_id) + & (resource_tracker_licensed_items_checkouts.c.key == key) + # If purchased version >= requested version, it covers that version + & ( + db_utils.version(resource_tracker_licensed_items_checkouts.c.version) + >= db_utils.version(version) + ) + & (resource_tracker_licensed_items_checkouts.c.product_name == product_name) + & (resource_tracker_licensed_items_checkouts.c.stopped_at.is_(None)) + ) + + async with pass_or_acquire_connection(engine, connection) as conn: + total_sum = await conn.scalar(sum_stmt) + if total_sum is None: + return 0 + return cast(int, total_sum) + + +async def force_release_license_seats_by_run_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunID, +) -> None: + """ + Purpose: This function is utilized by a periodic heartbeat check task that monitors whether running services are + sending heartbeat signals. If heartbeat signals are not received within a specified timeframe and a service is + deemed unhealthy, this function ensures the proper release of any licensed seats that were not correctly released by + the unhealthy service. + Currently, this functionality is primarily used to handle the release of a single seat allocated to the VIP model. + """ + update_stmt = ( + resource_tracker_licensed_items_checkouts.update() + .values( + modified=sa.func.now(), + stopped_at=sa.func.now(), + ) + .where( + ( + resource_tracker_licensed_items_checkouts.c.service_run_id + == service_run_id + ) + & (resource_tracker_licensed_items_checkouts.c.stopped_at.is_(None)) + ) + .returning(sa.literal_column("*")) + ) + + async with transaction_context(engine, connection) as conn: + result = await conn.execute(update_stmt) + released_seats = result.fetchall() + if released_seats: + _logger.error( + "Force release of %s seats: %s", len(released_seats), released_seats + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_purchases_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_purchases_db.py new file mode 100644 index 00000000000..36cffd230a0 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/licensed_items_purchases_db.py @@ -0,0 +1,202 @@ +from datetime import UTC, datetime +from typing import cast + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemPurchaseID, +) +from models_library.rest_ordering import OrderBy, OrderDirection +from models_library.wallets import WalletID +from pydantic import NonNegativeInt +from simcore_postgres_database.models.resource_tracker_licensed_items_purchases import ( + resource_tracker_licensed_items_purchases, +) +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import LicensedItemPurchaseNotFoundError +from ....models.licensed_items_purchases import ( + CreateLicensedItemsPurchasesDB, + LicensedItemsPurchasesDB, +) +from . import utils as db_utils + +_SELECTION_ARGS = ( + resource_tracker_licensed_items_purchases.c.licensed_item_purchase_id, + resource_tracker_licensed_items_purchases.c.product_name, + resource_tracker_licensed_items_purchases.c.licensed_item_id, + resource_tracker_licensed_items_purchases.c.key, + resource_tracker_licensed_items_purchases.c.version, + resource_tracker_licensed_items_purchases.c.wallet_id, + resource_tracker_licensed_items_purchases.c.wallet_name, + resource_tracker_licensed_items_purchases.c.pricing_unit_cost_id, + resource_tracker_licensed_items_purchases.c.pricing_unit_cost, + resource_tracker_licensed_items_purchases.c.start_at, + resource_tracker_licensed_items_purchases.c.expire_at, + resource_tracker_licensed_items_purchases.c.num_of_seats, + resource_tracker_licensed_items_purchases.c.purchased_by_user, + resource_tracker_licensed_items_purchases.c.user_email, + resource_tracker_licensed_items_purchases.c.purchased_at, + resource_tracker_licensed_items_purchases.c.modified, +) + +assert set(LicensedItemsPurchasesDB.model_fields) == { + c.name for c in _SELECTION_ARGS +} # nosec + + +async def create( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: CreateLicensedItemsPurchasesDB, +) -> LicensedItemsPurchasesDB: + async with transaction_context(engine, connection) as conn: + result = await conn.execute( + resource_tracker_licensed_items_purchases.insert() + .values( + product_name=data.product_name, + licensed_item_id=data.licensed_item_id, + key=data.key, + version=data.version, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_unit_cost_id=data.pricing_unit_cost_id, + pricing_unit_cost=data.pricing_unit_cost, + start_at=data.start_at, + expire_at=data.expire_at, + num_of_seats=data.num_of_seats, + purchased_by_user=data.purchased_by_user, + user_email=data.user_email, + purchased_at=data.purchased_at, + modified=sa.func.now(), + ) + .returning(*_SELECTION_ARGS) + ) + row = result.first() + return LicensedItemsPurchasesDB.model_validate(row) + + +async def list_( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + filter_wallet_id: WalletID, + offset: NonNegativeInt, + limit: NonNegativeInt, + order_by: OrderBy, +) -> tuple[int, list[LicensedItemsPurchasesDB]]: + base_query = ( + sa.select(*_SELECTION_ARGS) + .select_from(resource_tracker_licensed_items_purchases) + .where( + (resource_tracker_licensed_items_purchases.c.product_name == product_name) + & ( + resource_tracker_licensed_items_purchases.c.wallet_id + == filter_wallet_id + ) + ) + ) + + # Select total count from base_query + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + + # Ordering and pagination + if order_by.direction == OrderDirection.ASC: + list_query = base_query.order_by( + sa.asc( + getattr(resource_tracker_licensed_items_purchases.c, order_by.field) + ), + resource_tracker_licensed_items_purchases.c.licensed_item_purchase_id, + ) + else: + list_query = base_query.order_by( + sa.desc( + getattr(resource_tracker_licensed_items_purchases.c, order_by.field) + ), + resource_tracker_licensed_items_purchases.c.licensed_item_purchase_id, + ) + list_query = list_query.offset(offset).limit(limit) + + async with pass_or_acquire_connection(engine, connection) as conn: + total_count = await conn.scalar(count_query) + if total_count is None: + total_count = 0 + + result = await conn.stream(list_query) + items: list[LicensedItemsPurchasesDB] = [ + LicensedItemsPurchasesDB.model_validate(row) async for row in result + ] + + return cast(int, total_count), items + + +async def get( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + licensed_item_purchase_id: LicensedItemPurchaseID, + product_name: ProductName, +) -> LicensedItemsPurchasesDB: + base_query = ( + sa.select(*_SELECTION_ARGS) + .select_from(resource_tracker_licensed_items_purchases) + .where( + ( + resource_tracker_licensed_items_purchases.c.licensed_item_purchase_id + == licensed_item_purchase_id + ) + & (resource_tracker_licensed_items_purchases.c.product_name == product_name) + ) + ) + + async with pass_or_acquire_connection(engine, connection) as conn: + result = await conn.stream(base_query) + row = await result.first() + if row is None: + raise LicensedItemPurchaseNotFoundError( + licensed_item_purchase_id=licensed_item_purchase_id + ) + return LicensedItemsPurchasesDB.model_validate(row) + + +async def get_active_purchased_seats_for_key_version_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + key: str, + version: str, + wallet_id: WalletID, + product_name: ProductName, +) -> int: + """ + Exclude expired seats + """ + _current_time = datetime.now(tz=UTC) + + sum_stmt = sa.select( + sa.func.sum(resource_tracker_licensed_items_purchases.c.num_of_seats) + ).where( + (resource_tracker_licensed_items_purchases.c.wallet_id == wallet_id) + & (resource_tracker_licensed_items_purchases.c.key == key) + # If purchased version >= requested version, it covers that version + & ( + db_utils.version(resource_tracker_licensed_items_purchases.c.version) + >= db_utils.version(version) + ) + & (resource_tracker_licensed_items_purchases.c.product_name == product_name) + & (resource_tracker_licensed_items_purchases.c.start_at <= _current_time) + & (resource_tracker_licensed_items_purchases.c.expire_at >= _current_time) + ) + + async with pass_or_acquire_connection(engine, connection) as conn: + total_sum = await conn.scalar(sum_stmt) + if total_sum is None: + return 0 + return cast(int, total_sum) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py new file mode 100644 index 00000000000..397ba7e94e1 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/pricing_plans_db.py @@ -0,0 +1,701 @@ +import logging +from typing import cast + +import sqlalchemy as sa +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanCreate, + PricingPlanId, + PricingPlanUpdate, + PricingUnitCostId, + PricingUnitId, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, +) +from models_library.services import ServiceKey, ServiceVersion +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + PricingUnitDuplicationError, +) +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.utils_repos import transaction_context +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER +from sqlalchemy.exc import IntegrityError as SqlAlchemyIntegrityError +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import ( + PricingPlanAndPricingUnitCombinationDoesNotExistsDBError, + PricingPlanDoesNotExistsDBError, + PricingPlanNotCreatedDBError, + PricingPlanToServiceNotCreatedDBError, + PricingUnitCostDoesNotExistsDBError, + PricingUnitCostNotCreatedDBError, + PricingUnitNotCreatedDBError, +) +from ....models.pricing_plans import ( + PricingPlansDB, + PricingPlansWithServiceDefaultPlanDB, + PricingPlanToServiceDB, +) +from ....models.pricing_unit_costs import PricingUnitCostsDB +from ....models.pricing_units import PricingUnitsDB + +_logger = logging.getLogger(__name__) + + +################################# +# Pricing plans +################################# + + +async def list_active_service_pricing_plans_by_product_and_service( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> list[PricingPlansWithServiceDefaultPlanDB]: + # NOTE: consilidate with utils_services_environmnets.py + def _version(column_or_value): + # converts version value string to array[integer] that can be compared + return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) + + async with transaction_context(engine, connection) as conn: + # Firstly find the correct service version + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + ( + _version(resource_tracker_pricing_plan_to_service.c.service_version) + <= _version(service_version) + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.is_active.is_(True)) + ) + .order_by( + _version( + resource_tracker_pricing_plan_to_service.c.service_version + ).desc() + ) + .limit(1) + ) + result = await conn.execute(query) + row = result.first() + if row is None: + return [] + latest_service_key, latest_service_version = row + # Now choose all pricing plans connected to this service + query = ( + sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + resource_tracker_pricing_plan_to_service.c.service_default_plan, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + ( + _version(resource_tracker_pricing_plan_to_service.c.service_version) + == _version(latest_service_version) + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == latest_service_key + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.is_active.is_(True)) + ) + .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()) + ) + result = await conn.execute(query) + + return [ + PricingPlansWithServiceDefaultPlanDB.model_validate(row) + for row in result.fetchall() + ] + + +async def get_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> PricingPlansDB: + async with transaction_context(engine, connection) as conn: + select_stmt = sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ).where( + (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + result = await conn.execute(select_stmt) + row = result.first() + if row is None: + raise PricingPlanDoesNotExistsDBError(pricing_plan_id=pricing_plan_id) + return PricingPlansDB.model_validate(row) + + +async def list_pricing_plans_by_product( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + exclude_inactive: bool, + # pagination + offset: int, + limit: int, +) -> tuple[int, list[PricingPlansDB]]: + async with transaction_context(engine, connection) as conn: + base_query = sa.select( + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ).where(resource_tracker_pricing_plans.c.product_name == product_name) + + if exclude_inactive is True: + base_query = base_query.where( + resource_tracker_pricing_plans.c.is_active.is_(True) + ) + + # Select total count from base_query + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + + # Default ordering + list_query = base_query.order_by( + resource_tracker_pricing_plans.c.created.asc(), + resource_tracker_pricing_plans.c.pricing_plan_id, + ) + + total_count = await conn.scalar(count_query) + if total_count is None: + total_count = 0 + + result = await conn.execute(list_query.offset(offset).limit(limit)) + + items = [PricingPlansDB.model_validate(row) for row in result.fetchall()] + return cast(int, total_count), items + + +async def create_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingPlanCreate, +) -> PricingPlansDB: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_pricing_plans.insert() + .values( + product_name=data.product_name, + display_name=data.display_name, + description=data.description, + classification=data.classification, + is_active=True, + created=sa.func.now(), + modified=sa.func.now(), + pricing_plan_key=data.pricing_plan_key, + ) + .returning( + *[ + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ] + ) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingPlanNotCreatedDBError(data=data) + return PricingPlansDB.model_validate(row) + + +async def update_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + data: PricingPlanUpdate, +) -> PricingPlansDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_pricing_plans.update() + .values( + display_name=data.display_name, + description=data.description, + is_active=data.is_active, + modified=sa.func.now(), + ) + .where( + ( + resource_tracker_pricing_plans.c.pricing_plan_id + == data.pricing_plan_id + ) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + .returning( + *[ + resource_tracker_pricing_plans.c.pricing_plan_id, + resource_tracker_pricing_plans.c.display_name, + resource_tracker_pricing_plans.c.description, + resource_tracker_pricing_plans.c.classification, + resource_tracker_pricing_plans.c.is_active, + resource_tracker_pricing_plans.c.created, + resource_tracker_pricing_plans.c.pricing_plan_key, + ] + ) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return PricingPlansDB.model_validate(row) + + +################################# +# Pricing plan to service +################################# + + +async def list_connected_services_to_pricing_plan_by_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, +) -> list[PricingPlanToServiceDB]: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + ) + .order_by(resource_tracker_pricing_plan_to_service.c.pricing_plan_id.desc()) + ) + result = await conn.execute(query) + + return [PricingPlanToServiceDB.model_validate(row) for row in result.fetchall()] + + +async def upsert_service_to_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + service_key: ServiceKey, + service_version: ServiceVersion, +) -> PricingPlanToServiceDB: + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ) + .select_from( + resource_tracker_pricing_plan_to_service.join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plan_to_service.c.pricing_plan_id + == resource_tracker_pricing_plans.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_plans.c.product_name == product_name) + & (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_version + == service_version + ) + ) + ) + result = await conn.execute(query) + row = result.first() + + if row is not None: + delete_stmt = resource_tracker_pricing_plan_to_service.delete().where( + (resource_tracker_pricing_plans.c.pricing_plan_id == pricing_plan_id) + & ( + resource_tracker_pricing_plan_to_service.c.service_key + == service_key + ) + & ( + resource_tracker_pricing_plan_to_service.c.service_version + == service_version + ) + ) + await conn.execute(delete_stmt) + + insert_stmt = ( + resource_tracker_pricing_plan_to_service.insert() + .values( + pricing_plan_id=pricing_plan_id, + service_key=service_key, + service_version=service_version, + created=sa.func.now(), + modified=sa.func.now(), + service_default_plan=True, + ) + .returning( + *[ + resource_tracker_pricing_plan_to_service.c.pricing_plan_id, + resource_tracker_pricing_plan_to_service.c.service_key, + resource_tracker_pricing_plan_to_service.c.service_version, + resource_tracker_pricing_plan_to_service.c.created, + ] + ) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingPlanToServiceNotCreatedDBError( + data=f"pricing_plan_id {pricing_plan_id}, service_key {service_key}, service_version {service_version}" + ) + return PricingPlanToServiceDB.model_validate(row) + + +################################# +# Pricing units +################################# + + +def _pricing_units_select_stmt(): + return sa.select( + resource_tracker_pricing_units.c.pricing_unit_id, + resource_tracker_pricing_units.c.pricing_plan_id, + resource_tracker_pricing_units.c.unit_name, + resource_tracker_pricing_units.c.unit_extra_info, + resource_tracker_pricing_units.c.default, + resource_tracker_pricing_units.c.specific_info, + resource_tracker_pricing_units.c.created, + resource_tracker_pricing_units.c.modified, + resource_tracker_pricing_unit_costs.c.cost_per_unit.label( + "current_cost_per_unit" + ), + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id.label( + "current_cost_per_unit_id" + ), + ) + + +async def list_pricing_units_by_pricing_plan( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + pricing_plan_id: PricingPlanId, +) -> list[PricingUnitsDB]: + async with transaction_context(engine, connection) as conn: + query = ( + _pricing_units_select_stmt() + .select_from( + resource_tracker_pricing_units.join( + resource_tracker_pricing_unit_costs, + ( + ( + resource_tracker_pricing_units.c.pricing_plan_id + == resource_tracker_pricing_unit_costs.c.pricing_plan_id + ) + & ( + resource_tracker_pricing_units.c.pricing_unit_id + == resource_tracker_pricing_unit_costs.c.pricing_unit_id + ) + ), + ) + ) + .where( + (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) + ) + .order_by(resource_tracker_pricing_unit_costs.c.cost_per_unit.asc()) + ) + result = await conn.execute(query) + + return [PricingUnitsDB.model_validate(row) for row in result.fetchall()] + + +async def get_valid_pricing_unit( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, +) -> PricingUnitsDB: + async with transaction_context(engine, connection) as conn: + query = ( + _pricing_units_select_stmt() + .select_from( + resource_tracker_pricing_units.join( + resource_tracker_pricing_unit_costs, + ( + ( + resource_tracker_pricing_units.c.pricing_plan_id + == resource_tracker_pricing_unit_costs.c.pricing_plan_id + ) + & ( + resource_tracker_pricing_units.c.pricing_unit_id + == resource_tracker_pricing_unit_costs.c.pricing_unit_id + ) + ), + ).join( + resource_tracker_pricing_plans, + ( + resource_tracker_pricing_plans.c.pricing_plan_id + == resource_tracker_pricing_units.c.pricing_plan_id + ), + ) + ) + .where( + (resource_tracker_pricing_units.c.pricing_plan_id == pricing_plan_id) + & (resource_tracker_pricing_units.c.pricing_unit_id == pricing_unit_id) + & (resource_tracker_pricing_unit_costs.c.valid_to.is_(None)) + & (resource_tracker_pricing_plans.c.product_name == product_name) + ) + ) + result = await conn.execute(query) + + row = result.first() + if row is None: + raise PricingPlanAndPricingUnitCombinationDoesNotExistsDBError( + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + product_name=product_name, + ) + return PricingUnitsDB.model_validate(row) + + +async def create_pricing_unit_with_cost( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingUnitWithCostCreate, + pricing_plan_key: str, +) -> tuple[PricingUnitId, PricingUnitCostId]: + async with transaction_context(engine, connection) as conn: + # pricing units table + insert_stmt = ( + resource_tracker_pricing_units.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + unit_name=data.unit_name, + unit_extra_info=data.unit_extra_info.model_dump(), + default=data.default, + specific_info=data.specific_info.model_dump(), + created=sa.func.now(), + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_units.c.pricing_unit_id) + ) + try: + result = await conn.execute(insert_stmt) + except SqlAlchemyIntegrityError as exc: + raise PricingUnitDuplicationError from exc + row = result.first() + if row is None: + raise PricingUnitNotCreatedDBError(data=data) + _pricing_unit_id = row[0] + + # pricing unit cost table + insert_stmt = ( + resource_tracker_pricing_unit_costs.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + pricing_plan_key=pricing_plan_key, + pricing_unit_id=_pricing_unit_id, + pricing_unit_name=data.unit_name, + cost_per_unit=data.cost_per_unit, + valid_from=sa.func.now(), + valid_to=None, + created=sa.func.now(), + comment=data.comment, + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingUnitCostNotCreatedDBError(data=data) + _pricing_unit_cost_id = row[0] + + return (_pricing_unit_id, _pricing_unit_cost_id) + + +async def update_pricing_unit_with_cost( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: PricingUnitWithCostUpdate, + pricing_plan_key: str, +) -> None: + async with transaction_context(engine, connection) as conn: + # pricing units table + update_stmt = ( + resource_tracker_pricing_units.update() + .values( + unit_name=data.unit_name, + unit_extra_info=data.unit_extra_info.model_dump(), + default=data.default, + specific_info=data.specific_info.model_dump(), + modified=sa.func.now(), + ) + .where( + resource_tracker_pricing_units.c.pricing_unit_id == data.pricing_unit_id + ) + .returning(resource_tracker_pricing_units.c.pricing_unit_id) + ) + await conn.execute(update_stmt) + + # If price change, then we update pricing unit cost table + if data.pricing_unit_cost_update: + # Firstly we close previous price + update_stmt = ( + resource_tracker_pricing_unit_costs.update() + .values( + valid_to=sa.func.now(), # <-- Closing previous price + modified=sa.func.now(), + ) + .where( + resource_tracker_pricing_unit_costs.c.pricing_unit_id + == data.pricing_unit_id + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_id) + ) + result = await conn.execute(update_stmt) + + # Then we create a new price + insert_stmt = ( + resource_tracker_pricing_unit_costs.insert() + .values( + pricing_plan_id=data.pricing_plan_id, + pricing_plan_key=pricing_plan_key, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_name=data.unit_name, + cost_per_unit=data.pricing_unit_cost_update.cost_per_unit, + valid_from=sa.func.now(), + valid_to=None, # <-- New price is valid + created=sa.func.now(), + comment=data.pricing_unit_cost_update.comment, + modified=sa.func.now(), + ) + .returning(resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise PricingUnitCostNotCreatedDBError(data=data) + + +################################# +# Pricing unit-costs +################################# + + +async def get_pricing_unit_cost_by_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + pricing_unit_cost_id: PricingUnitCostId, +) -> PricingUnitCostsDB: + async with transaction_context(engine, connection) as conn: + query = sa.select( + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id, + resource_tracker_pricing_unit_costs.c.pricing_plan_id, + resource_tracker_pricing_unit_costs.c.pricing_plan_key, + resource_tracker_pricing_unit_costs.c.pricing_unit_id, + resource_tracker_pricing_unit_costs.c.pricing_unit_name, + resource_tracker_pricing_unit_costs.c.cost_per_unit, + resource_tracker_pricing_unit_costs.c.valid_from, + resource_tracker_pricing_unit_costs.c.valid_to, + resource_tracker_pricing_unit_costs.c.created, + resource_tracker_pricing_unit_costs.c.comment, + resource_tracker_pricing_unit_costs.c.modified, + ).where( + resource_tracker_pricing_unit_costs.c.pricing_unit_cost_id + == pricing_unit_cost_id + ) + result = await conn.execute(query) + + row = result.first() + if row is None: + raise PricingUnitCostDoesNotExistsDBError( + pricing_unit_cost_id=pricing_unit_cost_id + ) + return PricingUnitCostsDB.model_validate(row) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py new file mode 100644 index 00000000000..ed19570b523 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/service_runs_db.py @@ -0,0 +1,727 @@ +import logging +from datetime import datetime + +# pylint: disable=too-many-arguments +from decimal import Decimal +from typing import cast + +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.api_schemas_storage.storage_schemas import S3BucketName +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, + ServiceRunStatus, +) +from models_library.rest_ordering import OrderBy, OrderDirection +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import PositiveInt +from simcore_postgres_database.models.projects_tags import projects_tags +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_postgres_database.models.tags import tags +from simcore_postgres_database.utils_repos import ( + pass_or_acquire_connection, + transaction_context, +) +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from ....exceptions.errors import ServiceRunNotCreatedDBError +from ....models.service_runs import ( + OsparcCreditsAggregatedByServiceKeyDB, + ServiceRunCreate, + ServiceRunDB, + ServiceRunForCheckDB, + ServiceRunLastHeartbeatUpdate, + ServiceRunStoppedAtUpdate, + ServiceRunWithCreditsDB, +) + +_logger = logging.getLogger(__name__) + + +async def create_service_run( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunCreate, +) -> ServiceRunID: + async with transaction_context(engine, connection) as conn: + insert_stmt = ( + resource_tracker_service_runs.insert() + .values( + product_name=data.product_name, + service_run_id=data.service_run_id, + wallet_id=data.wallet_id, + wallet_name=data.wallet_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + pricing_unit_cost_id=data.pricing_unit_cost_id, + pricing_unit_cost=data.pricing_unit_cost, + simcore_user_agent=data.simcore_user_agent, + user_id=data.user_id, + user_email=data.user_email, + project_id=f"{data.project_id}", + project_name=data.project_name, + node_id=f"{data.node_id}", + node_name=data.node_name, + parent_project_id=f"{data.parent_project_id}", + root_parent_project_id=f"{data.root_parent_project_id}", + root_parent_project_name=data.root_parent_project_name, + parent_node_id=f"{data.parent_node_id}", + root_parent_node_id=f"{data.root_parent_node_id}", + service_key=data.service_key, + service_version=data.service_version, + service_type=data.service_type, + service_resources=data.service_resources, + service_additional_metadata=data.service_additional_metadata, + started_at=data.started_at, + stopped_at=None, + service_run_status=ServiceRunStatus.RUNNING, + modified=sa.func.now(), + last_heartbeat_at=data.last_heartbeat_at, + ) + .returning(resource_tracker_service_runs.c.service_run_id) + ) + result = await conn.execute(insert_stmt) + row = result.first() + if row is None: + raise ServiceRunNotCreatedDBError(data=data) + return cast(ServiceRunID, row[0]) + + +async def update_service_run_last_heartbeat( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunLastHeartbeatUpdate, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + last_heartbeat_at=data.last_heartbeat_at, + missed_heartbeat_counter=0, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == data.service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + & ( + resource_tracker_service_runs.c.last_heartbeat_at + <= data.last_heartbeat_at + ) + ) + .returning(sa.literal_column("*")) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +async def update_service_run_stopped_at( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + data: ServiceRunStoppedAtUpdate, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + stopped_at=data.stopped_at, + service_run_status=data.service_run_status, + service_run_status_msg=data.service_run_status_msg, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == data.service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + ) + .returning(sa.literal_column("*")) + ) + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +async def get_service_run_by_id( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunID, +) -> ServiceRunDB | None: + async with pass_or_acquire_connection(engine, connection) as conn: + stmt = sa.select(resource_tracker_service_runs).where( + resource_tracker_service_runs.c.service_run_id == service_run_id + ) + result = await conn.execute(stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) + + +_project_tags_subquery = ( + sa.select( + projects_tags.c.project_uuid_for_rut, + sa.func.array_agg(tags.c.name).label("project_tags"), + ) + .select_from(projects_tags.join(tags, projects_tags.c.tag_id == tags.c.id)) + .group_by(projects_tags.c.project_uuid_for_rut) +).subquery("project_tags_subquery") + + +async def list_service_runs_by_product_and_user_and_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID | None, + # attribute filtering + service_run_status: ServiceRunStatus | None = None, + started_from: datetime | None = None, + started_until: datetime | None = None, + transaction_status: CreditTransactionStatus | None = None, + project_id: ProjectID | None = None, + # pagination + offset: int, + limit: int, + # ordering + order_by: OrderBy | None = None, +) -> tuple[int, list[ServiceRunWithCreditsDB]]: + async with pass_or_acquire_connection(engine, connection) as conn: + base_query = ( + sa.select( + resource_tracker_service_runs.c.product_name, + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.wallet_id, + resource_tracker_service_runs.c.wallet_name, + resource_tracker_service_runs.c.pricing_plan_id, + resource_tracker_service_runs.c.pricing_unit_id, + resource_tracker_service_runs.c.pricing_unit_cost_id, + resource_tracker_service_runs.c.pricing_unit_cost, + resource_tracker_service_runs.c.user_id, + resource_tracker_service_runs.c.user_email, + resource_tracker_service_runs.c.project_id, + resource_tracker_service_runs.c.project_name, + resource_tracker_service_runs.c.node_id, + resource_tracker_service_runs.c.node_name, + resource_tracker_service_runs.c.parent_project_id, + resource_tracker_service_runs.c.root_parent_project_id, + resource_tracker_service_runs.c.root_parent_project_name, + resource_tracker_service_runs.c.parent_node_id, + resource_tracker_service_runs.c.root_parent_node_id, + resource_tracker_service_runs.c.service_key, + resource_tracker_service_runs.c.service_version, + resource_tracker_service_runs.c.service_type, + resource_tracker_service_runs.c.service_resources, + resource_tracker_service_runs.c.started_at, + resource_tracker_service_runs.c.stopped_at, + resource_tracker_service_runs.c.service_run_status, + resource_tracker_service_runs.c.modified, + resource_tracker_service_runs.c.last_heartbeat_at, + resource_tracker_service_runs.c.service_run_status_msg, + resource_tracker_service_runs.c.missed_heartbeat_counter, + resource_tracker_credit_transactions.c.osparc_credits, + resource_tracker_credit_transactions.c.transaction_status, + sa.func.coalesce( + _project_tags_subquery.c.project_tags, + sa.cast(sa.text("'{}'"), sa.ARRAY(sa.String)), + ).label("project_tags"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + ( + resource_tracker_service_runs.c.product_name + == resource_tracker_credit_transactions.c.product_name + ) + & ( + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id + ), + isouter=True, + ).join( + _project_tags_subquery, + resource_tracker_service_runs.c.root_parent_project_id + == _project_tags_subquery.c.project_uuid_for_rut, + isouter=True, + ) + ) + .where(resource_tracker_service_runs.c.product_name == product_name) + ) + + if user_id: + base_query = base_query.where( + resource_tracker_service_runs.c.user_id == user_id + ) + if wallet_id: + base_query = base_query.where( + resource_tracker_service_runs.c.wallet_id == wallet_id + ) + if service_run_status: + base_query = base_query.where( + resource_tracker_service_runs.c.service_run_status == service_run_status + ) + if started_from: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + if project_id: + base_query = base_query.where( + resource_tracker_service_runs.c.project_id == f"{project_id}" + ) + if transaction_status: + base_query = base_query.where( + resource_tracker_credit_transactions.c.transaction_status + == transaction_status + ) + + # Select total count from base_query + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + + if order_by: + if order_by.direction == OrderDirection.ASC: + list_query = base_query.order_by(sa.asc(order_by.field)) + else: + list_query = base_query.order_by(sa.desc(order_by.field)) + else: + # Default ordering + list_query = base_query.order_by( + resource_tracker_service_runs.c.started_at.desc() + ) + + total_count = await conn.scalar(count_query) + if total_count is None: + total_count = 0 + + result = await conn.stream(list_query.offset(offset).limit(limit)) + items: list[ServiceRunWithCreditsDB] = [ + ServiceRunWithCreditsDB.model_validate(row) async for row in result + ] + + return cast(int, total_count), items + + +async def get_osparc_credits_aggregated_by_service( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID, + offset: int, + limit: int, + started_from: datetime | None = None, + started_until: datetime | None = None, +) -> tuple[int, list[OsparcCreditsAggregatedByServiceKeyDB]]: + async with pass_or_acquire_connection(engine, connection) as conn: + base_query = ( + sa.select( + resource_tracker_service_runs.c.service_key, + sa.func.SUM( + resource_tracker_credit_transactions.c.osparc_credits + ).label("osparc_credits"), + sa.func.SUM( + sa.func.round( + ( + sa.func.extract( + "epoch", + resource_tracker_service_runs.c.stopped_at, + ) + - sa.func.extract( + "epoch", + resource_tracker_service_runs.c.started_at, + ) + ) + / 3600, + 2, + ) + ).label("running_time_in_hours"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + ( + resource_tracker_service_runs.c.product_name + == resource_tracker_credit_transactions.c.product_name + ) + & ( + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id + ), + isouter=True, + ) + ) + .where( + (resource_tracker_service_runs.c.product_name == product_name) + & ( + resource_tracker_credit_transactions.c.transaction_status.in_( + [ + CreditTransactionStatus.BILLED, + CreditTransactionStatus.IN_DEBT, + ] + ) + ) + & ( + resource_tracker_credit_transactions.c.transaction_classification + == CreditClassification.DEDUCT_SERVICE_RUN + ) + & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + ) + .group_by(resource_tracker_service_runs.c.service_key) + ) + + if user_id: + base_query = base_query.where( + resource_tracker_service_runs.c.user_id == user_id + ) + if started_from: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + base_query = base_query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + + subquery = base_query.subquery() + count_query = sa.select(sa.func.count()).select_from(subquery) + count_result = await conn.scalar(count_query) + if count_result is None: + count_result = 0 + + # Default ordering and pagination + list_query = ( + base_query.order_by(resource_tracker_service_runs.c.service_key.asc()) + .offset(offset) + .limit(limit) + ) + list_result = await conn.execute(list_query) + + return ( + cast(int, count_result), + [ + OsparcCreditsAggregatedByServiceKeyDB.model_validate(row) + for row in list_result.fetchall() + ], + ) + + +async def sum_project_wallet_total_credits( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + wallet_id: WalletID, + project_id: ProjectID, + transaction_status: CreditTransactionStatus | None = None, +) -> WalletTotalCredits: + async with pass_or_acquire_connection(engine, connection) as conn: + sum_stmt = ( + sa.select( + sa.func.SUM(resource_tracker_credit_transactions.c.osparc_credits), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + ( + resource_tracker_service_runs.c.product_name + == resource_tracker_credit_transactions.c.product_name + ) + & ( + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id + ), + isouter=True, + ) + ) + .where( + (resource_tracker_service_runs.c.product_name == product_name) + & (resource_tracker_service_runs.c.project_id == f"{project_id}") + & ( + resource_tracker_credit_transactions.c.transaction_classification + == CreditClassification.DEDUCT_SERVICE_RUN + ) + & (resource_tracker_credit_transactions.c.wallet_id == wallet_id) + ) + ) + + if transaction_status: + sum_stmt = sum_stmt.where( + resource_tracker_credit_transactions.c.transaction_status + == transaction_status + ) + else: + sum_stmt = sum_stmt.where( + resource_tracker_credit_transactions.c.transaction_status.in_( + [ + CreditTransactionStatus.BILLED, + CreditTransactionStatus.PENDING, + CreditTransactionStatus.IN_DEBT, + ] + ) + ) + + result = await conn.execute(sum_stmt) + row = result.first() + if row is None or row[0] is None: + return WalletTotalCredits( + wallet_id=wallet_id, available_osparc_credits=Decimal(0) + ) + return WalletTotalCredits(wallet_id=wallet_id, available_osparc_credits=row[0]) + + +async def export_service_runs_table_to_s3( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + s3_bucket_name: S3BucketName, + s3_key: str, + s3_region: str, + user_id: UserID | None, + wallet_id: WalletID | None, + started_from: datetime | None = None, + started_until: datetime | None = None, + order_by: OrderBy | None = None, +): + async with transaction_context(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_service_runs.c.product_name, + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.wallet_name, + resource_tracker_service_runs.c.user_email, + resource_tracker_service_runs.c.root_parent_project_name.label( + "project_name" + ), + resource_tracker_service_runs.c.node_name, + resource_tracker_service_runs.c.service_key, + resource_tracker_service_runs.c.service_version, + resource_tracker_service_runs.c.service_type, + resource_tracker_service_runs.c.started_at, + resource_tracker_service_runs.c.stopped_at, + resource_tracker_credit_transactions.c.osparc_credits, + resource_tracker_credit_transactions.c.transaction_status, + _project_tags_subquery.c.project_tags.label("project_tags"), + ) + .select_from( + resource_tracker_service_runs.join( + resource_tracker_credit_transactions, + resource_tracker_service_runs.c.service_run_id + == resource_tracker_credit_transactions.c.service_run_id, + isouter=True, + ).join( + _project_tags_subquery, + resource_tracker_service_runs.c.root_parent_project_id + == _project_tags_subquery.c.project_uuid_for_rut, + isouter=True, + ) + ) + .where(resource_tracker_service_runs.c.product_name == product_name) + ) + + if user_id: + query = query.where(resource_tracker_service_runs.c.user_id == user_id) + if wallet_id: + query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id) + if started_from: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + + if order_by: + if order_by.direction == OrderDirection.ASC: + query = query.order_by(sa.asc(order_by.field)) + else: + query = query.order_by(sa.desc(order_by.field)) + else: + # Default ordering + query = query.order_by(resource_tracker_service_runs.c.started_at.desc()) + + compiled_query = ( + str(query.compile(compile_kwargs={"literal_binds": True})) + .replace("\n", "") + .replace("'", "''") + ) + + result = await conn.execute( + sa.DDL( + f""" + SELECT * from aws_s3.query_export_to_s3('{compiled_query}', + aws_commons.create_s3_uri('{s3_bucket_name}', '{s3_key}', '{s3_region}'), 'format csv, HEADER true'); + """ # noqa: S608 + ) + ) + row = result.first() + assert row + _logger.info( + "Rows uploaded %s, Files uploaded %s, Bytes uploaded %s", + row[0], + row[1], + row[2], + ) + + +async def total_service_runs_by_product_and_user_and_wallet( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + product_name: ProductName, + user_id: UserID | None, + wallet_id: WalletID | None, + service_run_status: ServiceRunStatus | None = None, + started_from: datetime | None = None, + started_until: datetime | None = None, +) -> PositiveInt: + async with pass_or_acquire_connection(engine, connection) as conn: + query = ( + sa.select(sa.func.count()) + .select_from(resource_tracker_service_runs) + .where(resource_tracker_service_runs.c.product_name == product_name) + ) + + if user_id: + query = query.where(resource_tracker_service_runs.c.user_id == user_id) + if wallet_id: + query = query.where(resource_tracker_service_runs.c.wallet_id == wallet_id) + if started_from: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + >= started_from.date() + ) + if started_until: + query = query.where( + sa.func.DATE(resource_tracker_service_runs.c.started_at) + <= started_until.date() + ) + if service_run_status: + query = query.where( + resource_tracker_service_runs.c.service_run_status == service_run_status + ) + + result = await conn.execute(query) + row = result.first() + return cast(PositiveInt, row[0]) if row else 0 + + +### For Background check purpose: + + +async def list_service_runs_with_running_status_across_all_products( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + offset: int, + limit: int, +) -> list[ServiceRunForCheckDB]: + async with pass_or_acquire_connection(engine, connection) as conn: + query = ( + sa.select( + resource_tracker_service_runs.c.service_run_id, + resource_tracker_service_runs.c.last_heartbeat_at, + resource_tracker_service_runs.c.missed_heartbeat_counter, + resource_tracker_service_runs.c.modified, + ) + .where( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + .order_by(resource_tracker_service_runs.c.started_at.desc()) # NOTE: + .offset(offset) + .limit(limit) + ) + result = await conn.execute(query) + + return [ServiceRunForCheckDB.model_validate(row) for row in result.fetchall()] + + +async def total_service_runs_with_running_status_across_all_products( + engine: AsyncEngine, connection: AsyncConnection | None = None +) -> PositiveInt: + async with pass_or_acquire_connection(engine, connection) as conn: + query = ( + sa.select(sa.func.count()) + .select_from(resource_tracker_service_runs) + .where( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + ) + result = await conn.execute(query) + row = result.first() + return cast(PositiveInt, row[0]) if row else 0 + + +async def update_service_missed_heartbeat_counter( + engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + service_run_id: ServiceRunID, + last_heartbeat_at: datetime, + missed_heartbeat_counter: int, +) -> ServiceRunDB | None: + async with transaction_context(engine, connection) as conn: + update_stmt = ( + resource_tracker_service_runs.update() + .values( + modified=sa.func.now(), + missed_heartbeat_counter=missed_heartbeat_counter, + ) + .where( + (resource_tracker_service_runs.c.service_run_id == service_run_id) + & ( + resource_tracker_service_runs.c.service_run_status + == ServiceRunStatus.RUNNING + ) + & ( + resource_tracker_service_runs.c.last_heartbeat_at + == last_heartbeat_at + ) + ) + .returning(sa.literal_column("*")) + ) + + result = await conn.execute(update_stmt) + row = result.first() + if row is None: + return None + return ServiceRunDB.model_validate(row) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/utils.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/utils.py new file mode 100644 index 00000000000..aa2c5d79926 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/db/utils.py @@ -0,0 +1,7 @@ +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import ARRAY, INTEGER + + +def version(column_or_value): + # converts version value string to array[integer] that can be compared + return sa.func.string_to_array(column_or_value, ".").cast(ARRAY(INTEGER)) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/rabbitmq.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/rabbitmq.py new file mode 100644 index 00000000000..1c827fcf060 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/rabbitmq.py @@ -0,0 +1,74 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from fastapi.requests import Request +from servicelib.logging_utils import log_context +from servicelib.rabbitmq import ( + RabbitMQClient, + RabbitMQRPCClient, + wait_till_rabbitmq_responsive, +) +from settings_library.rabbit import RabbitSettings + +from ...exceptions.errors import ConfigurationError + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT startup Rabbitmq", + ): + app.state.rabbitmq_client = None + settings: RabbitSettings | None = ( + app.state.settings.RESOURCE_USAGE_TRACKER_RABBITMQ + ) + if not settings: + raise ConfigurationError( + msg="Rabbit MQ client is de-activated in the settings" + ) + await wait_till_rabbitmq_responsive(settings.dsn) + app.state.rabbitmq_client = RabbitMQClient( + client_name="resource-usage-tracker", settings=settings + ) + app.state.rabbitmq_rpc_server = await RabbitMQRPCClient.create( + client_name="resource_usage_tracker_rpc_server", settings=settings + ) + + async def on_shutdown() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT shutdown Rabbitmq", + ): + if app.state.rabbitmq_client: + await app.state.rabbitmq_client.close() + if app.state.rabbitmq_rpc_server: + await app.state.rabbitmq_rpc_server.close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_rabbitmq_client_from_request(request: Request): + return get_rabbitmq_client(request.app) + + +def get_rabbitmq_client(app: FastAPI) -> RabbitMQClient: + if not app.state.rabbitmq_client: + raise ConfigurationError( + msg="RabbitMQ client is not available. Please check the configuration." + ) + return cast(RabbitMQClient, app.state.rabbitmq_client) + + +def get_rabbitmq_rpc_server(app: FastAPI) -> RabbitMQRPCClient: + assert app.state.rabbitmq_rpc_server # nosec + return cast(RabbitMQRPCClient, app.state.rabbitmq_rpc_server) + + +__all__ = ("RabbitMQClient",) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/redis.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/redis.py new file mode 100644 index 00000000000..e2790b2a4e9 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/redis.py @@ -0,0 +1,43 @@ +import logging +from typing import cast + +from fastapi import FastAPI +from servicelib.logging_utils import log_context +from servicelib.redis import RedisClientSDK +from settings_library.redis import RedisDatabase, RedisSettings + +from ..._meta import APP_NAME + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT startup Redis", + ): + app.state.redis_client_sdk = None + settings: RedisSettings = app.state.settings.RESOURCE_USAGE_TRACKER_REDIS + redis_locks_dsn = settings.build_redis_dsn(RedisDatabase.LOCKS) + app.state.redis_client_sdk = RedisClientSDK( + redis_locks_dsn, client_name=APP_NAME + ) + + async def on_shutdown() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT shutdown Redis", + ): + redis_client_sdk: None | RedisClientSDK = app.state.redis_client_sdk + if redis_client_sdk: + await redis_client_sdk.shutdown() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_redis_lock_client(app: FastAPI) -> RedisClientSDK: + return cast(RedisClientSDK, app.state.redis_client_sdk) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/s3.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/s3.py new file mode 100644 index 00000000000..b83ce3f49db --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/modules/s3.py @@ -0,0 +1,71 @@ +import logging +from typing import cast + +from aws_library.s3 import S3NotConnectedError, SimcoreS3API +from fastapi import FastAPI +from models_library.api_schemas_storage.storage_schemas import S3BucketName +from pydantic import TypeAdapter +from servicelib.logging_utils import log_context +from settings_library.s3 import S3Settings +from tenacity import ( + AsyncRetrying, + before_sleep_log, + stop_after_delay, + wait_random_exponential, +) + +from ...exceptions.errors import ConfigurationError + +_logger = logging.getLogger(__name__) + + +def setup(app: FastAPI) -> None: + async def on_startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT startup S3", + ): + app.state.s3_client = None + settings: S3Settings | None = app.state.settings.RESOURCE_USAGE_TRACKER_S3 + + if not settings: + _logger.warning("S3 client is de-activated in the settings") + return + + app.state.s3_client = client = await SimcoreS3API.create(settings) + + async for attempt in AsyncRetrying( + reraise=True, + stop=stop_after_delay(120), + wait=wait_random_exponential(max=30), + before_sleep=before_sleep_log(_logger, logging.WARNING), + ): + with attempt: + connected = await client.http_check_bucket_connected( + bucket=TypeAdapter(S3BucketName).validate_python( + settings.S3_BUCKET_NAME + ) + ) + if not connected: + raise S3NotConnectedError # pragma: no cover + + async def on_shutdown() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT shutdown S3", + ): + if app.state.s3_client: + await cast(SimcoreS3API, app.state.s3_client).close() + + app.add_event_handler("startup", on_startup) + app.add_event_handler("shutdown", on_shutdown) + + +def get_s3_client(app: FastAPI) -> SimcoreS3API: + if not app.state.s3_client: + raise ConfigurationError( + msg="S3 client is not available. Please check the configuration." + ) + return cast(SimcoreS3API, app.state.s3_client) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py new file mode 100644 index 00000000000..4ef13f1de09 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_plans.py @@ -0,0 +1,198 @@ +from typing import Annotated + +from fastapi import Depends +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + PricingPlanToServiceGet, + RutPricingPlanGet, + RutPricingPlanPage, + RutPricingUnitGet, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanCreate, + PricingPlanId, + PricingPlanUpdate, +) +from models_library.services import ServiceKey, ServiceVersion +from pydantic import TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api.rest.dependencies import get_resource_tracker_db_engine +from ..exceptions.errors import PricingPlanNotFoundForServiceError +from ..models.pricing_plans import PricingPlansDB, PricingPlanToServiceDB +from ..models.pricing_units import PricingUnitsDB +from .modules.db import pricing_plans_db + + +async def _create_pricing_plan_get( + pricing_plan_db: PricingPlansDB, pricing_plan_unit_db: list[PricingUnitsDB] +) -> RutPricingPlanGet: + return RutPricingPlanGet( + pricing_plan_id=pricing_plan_db.pricing_plan_id, + display_name=pricing_plan_db.display_name, + description=pricing_plan_db.description, + classification=pricing_plan_db.classification, + created_at=pricing_plan_db.created, + pricing_plan_key=pricing_plan_db.pricing_plan_key, + pricing_units=[ + RutPricingUnitGet( + pricing_unit_id=unit.pricing_unit_id, + unit_name=unit.unit_name, + unit_extra_info=unit.unit_extra_info, + current_cost_per_unit=unit.current_cost_per_unit, + current_cost_per_unit_id=unit.current_cost_per_unit_id, + default=unit.default, + specific_info=unit.specific_info, + ) + for unit in pricing_plan_unit_db + ], + is_active=pricing_plan_db.is_active, + ) + + +async def get_service_default_pricing_plan( + product_name: ProductName, + service_key: ServiceKey, + service_version: ServiceVersion, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingPlanGet: + active_service_pricing_plans = ( + await pricing_plans_db.list_active_service_pricing_plans_by_product_and_service( + db_engine, + product_name=product_name, + service_key=service_key, + service_version=service_version, + ) + ) + + default_pricing_plan = None + for active_service_pricing_plan in active_service_pricing_plans: + if active_service_pricing_plan.service_default_plan is True: + default_pricing_plan = active_service_pricing_plan + break + + if default_pricing_plan is None: + raise PricingPlanNotFoundForServiceError( + service_key=service_key, service_version=service_version + ) + + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=default_pricing_plan.pricing_plan_id + ) + + return await _create_pricing_plan_get(default_pricing_plan, pricing_plan_unit_db) + + +async def list_connected_services_to_pricing_plan_by_pricing_plan( + product_name: ProductName, + pricing_plan_id: PricingPlanId, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +): + output_list: list[ + PricingPlanToServiceDB + ] = await pricing_plans_db.list_connected_services_to_pricing_plan_by_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id + ) + return [ + TypeAdapter(PricingPlanToServiceGet).validate_python(item.model_dump()) + for item in output_list + ] + + +async def connect_service_to_pricing_plan( + product_name: ProductName, + pricing_plan_id: PricingPlanId, + service_key: ServiceKey, + service_version: ServiceVersion, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> PricingPlanToServiceGet: + output: PricingPlanToServiceDB = ( + await pricing_plans_db.upsert_service_to_pricing_plan( + db_engine, + product_name=product_name, + pricing_plan_id=pricing_plan_id, + service_key=service_key, + service_version=service_version, + ) + ) + return TypeAdapter(PricingPlanToServiceGet).validate_python(output.model_dump()) + + +async def list_pricing_plans_without_pricing_units( + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], + product_name: ProductName, + exclude_inactive: bool, + # pagination + offset: int, + limit: int, +) -> RutPricingPlanPage: + total, pricing_plans_list_db = await pricing_plans_db.list_pricing_plans_by_product( + db_engine, + product_name=product_name, + exclude_inactive=exclude_inactive, + offset=offset, + limit=limit, + ) + return RutPricingPlanPage( + items=[ + RutPricingPlanGet( + pricing_plan_id=pricing_plan_db.pricing_plan_id, + display_name=pricing_plan_db.display_name, + description=pricing_plan_db.description, + classification=pricing_plan_db.classification, + created_at=pricing_plan_db.created, + pricing_plan_key=pricing_plan_db.pricing_plan_key, + pricing_units=None, + is_active=pricing_plan_db.is_active, + ) + for pricing_plan_db in pricing_plans_list_db + ], + total=total, + ) + + +async def get_pricing_plan( + product_name: ProductName, + pricing_plan_id: PricingPlanId, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingPlanGet: + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=pricing_plan_id + ) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id + ) + return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) + + +async def create_pricing_plan( + data: PricingPlanCreate, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingPlanGet: + pricing_plan_db = await pricing_plans_db.create_pricing_plan(db_engine, data=data) + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id + ) + return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) + + +async def update_pricing_plan( + product_name: ProductName, + data: PricingPlanUpdate, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingPlanGet: + # Check whether pricing plan exists + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id + ) + # Update pricing plan + pricing_plan_updated_db = await pricing_plans_db.update_pricing_plan( + db_engine, product_name=product_name, data=data + ) + if pricing_plan_updated_db: + pricing_plan_db = pricing_plan_updated_db + + pricing_plan_unit_db = await pricing_plans_db.list_pricing_units_by_pricing_plan( + db_engine, pricing_plan_id=pricing_plan_db.pricing_plan_id + ) + return await _create_pricing_plan_get(pricing_plan_db, pricing_plan_unit_db) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py new file mode 100644 index 00000000000..e0867fad494 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/pricing_units.py @@ -0,0 +1,111 @@ +from typing import Annotated + +from fastapi import Depends +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + RutPricingUnitGet, +) +from models_library.products import ProductName +from models_library.resource_tracker import ( + PricingPlanId, + PricingUnitId, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, +) +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..api.rest.dependencies import get_resource_tracker_db_engine +from .modules.db import pricing_plans_db + + +async def get_pricing_unit( + product_name: ProductName, + pricing_plan_id: PricingPlanId, + pricing_unit_id: PricingUnitId, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingUnitGet: + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=pricing_plan_id, + pricing_unit_id=pricing_unit_id, + ) + + return RutPricingUnitGet( + pricing_unit_id=pricing_unit.pricing_unit_id, + unit_name=pricing_unit.unit_name, + unit_extra_info=pricing_unit.unit_extra_info, + current_cost_per_unit=pricing_unit.current_cost_per_unit, + current_cost_per_unit_id=pricing_unit.current_cost_per_unit_id, + default=pricing_unit.default, + specific_info=pricing_unit.specific_info, + ) + + +async def create_pricing_unit( + product_name: ProductName, + data: PricingUnitWithCostCreate, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingUnitGet: + # Check whether pricing plan exists + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id + ) + # Create new pricing unit + pricing_unit_id, _ = await pricing_plans_db.create_pricing_unit_with_cost( + db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key + ) + + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=pricing_unit_id, + ) + return RutPricingUnitGet( + pricing_unit_id=pricing_unit.pricing_unit_id, + unit_name=pricing_unit.unit_name, + unit_extra_info=pricing_unit.unit_extra_info, + current_cost_per_unit=pricing_unit.current_cost_per_unit, + current_cost_per_unit_id=pricing_unit.current_cost_per_unit_id, + default=pricing_unit.default, + specific_info=pricing_unit.specific_info, + ) + + +async def update_pricing_unit( + product_name: ProductName, + data: PricingUnitWithCostUpdate, + db_engine: Annotated[AsyncEngine, Depends(get_resource_tracker_db_engine)], +) -> RutPricingUnitGet: + # Check whether pricing unit exists + await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + ) + # Get pricing plan + pricing_plan_db = await pricing_plans_db.get_pricing_plan( + db_engine, product_name=product_name, pricing_plan_id=data.pricing_plan_id + ) + + # Update pricing unit and cost + await pricing_plans_db.update_pricing_unit_with_cost( + db_engine, data=data, pricing_plan_key=pricing_plan_db.pricing_plan_key + ) + + pricing_unit = await pricing_plans_db.get_valid_pricing_unit( + db_engine, + product_name=product_name, + pricing_plan_id=data.pricing_plan_id, + pricing_unit_id=data.pricing_unit_id, + ) + return RutPricingUnitGet( + pricing_unit_id=pricing_unit.pricing_unit_id, + unit_name=pricing_unit.unit_name, + unit_extra_info=pricing_unit.unit_extra_info, + current_cost_per_unit=pricing_unit.current_cost_per_unit, + current_cost_per_unit_id=pricing_unit.current_cost_per_unit_id, + default=pricing_unit.default, + specific_info=pricing_unit.specific_info, + ) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py new file mode 100644 index 00000000000..9eafc209ee7 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service.py @@ -0,0 +1,335 @@ +import logging +from collections.abc import Awaitable, Callable +from decimal import Decimal + +from fastapi import FastAPI +from fastapi.encoders import jsonable_encoder +from models_library.rabbitmq_messages import ( + CreditsLimit, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingMessages, + RabbitResourceTrackingMessageType, + RabbitResourceTrackingStartedMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, + ResourceTrackerServiceType, + ServiceRunStatus, +) +from models_library.services import ServiceType +from pydantic import TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine + +from ..models.credit_transactions import ( + CreditTransactionCreate, + CreditTransactionCreditsAndStatusUpdate, + CreditTransactionCreditsUpdate, +) +from ..models.service_runs import ( + ServiceRunCreate, + ServiceRunLastHeartbeatUpdate, + ServiceRunStoppedAtUpdate, +) +from .modules.db import ( + credit_transactions_db, + licensed_items_checkouts_db, + pricing_plans_db, + service_runs_db, +) +from .modules.rabbitmq import RabbitMQClient, get_rabbitmq_client +from .utils import ( + compute_service_run_credit_costs, + make_negative, + publish_to_rabbitmq_wallet_credits_limit_reached, + sum_credit_transactions_and_publish_to_rabbitmq, +) + +_logger = logging.getLogger(__name__) + + +async def process_message(app: FastAPI, data: bytes) -> bool: + rabbit_message: RabbitResourceTrackingMessages = TypeAdapter( + RabbitResourceTrackingMessages + ).validate_json(data) + _logger.info( + "Process %s msg service_run_id: %s", + rabbit_message.message_type, + rabbit_message.service_run_id, + ) + _db_engine = app.state.engine + rabbitmq_client = get_rabbitmq_client(app) + + await RABBIT_MSG_TYPE_TO_PROCESS_HANDLER[rabbit_message.message_type]( + _db_engine, rabbit_message, rabbitmq_client + ) + return True + + +async def _process_start_event( + db_engine: AsyncEngine, + msg: RabbitResourceTrackingStartedMessage, + rabbitmq_client: RabbitMQClient, +): + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id + ) + if service_run_db: + # NOTE: After we find out why sometimes RUT recieves multiple start events and fix it, we can change it to log level `error` + _logger.warning( + "On process start event the service run id %s already exists in DB, INVESTIGATE! Current msg created_at: %s, already stored msg created_at: %s", + msg.service_run_id, + msg.created_at, + service_run_db.started_at, + ) + return + + # Prepare `service run` record (if billable `credit transaction`) in the DB + service_type = ( + ResourceTrackerServiceType.COMPUTATIONAL_SERVICE + if msg.service_type == ServiceType.COMPUTATIONAL + else ResourceTrackerServiceType.DYNAMIC_SERVICE + ) + pricing_unit_cost = None + if msg.pricing_unit_cost_id: + pricing_unit_cost_db = await pricing_plans_db.get_pricing_unit_cost_by_id( + db_engine, pricing_unit_cost_id=msg.pricing_unit_cost_id + ) + pricing_unit_cost = pricing_unit_cost_db.cost_per_unit + + create_service_run = ServiceRunCreate( + product_name=msg.product_name, + service_run_id=msg.service_run_id, + wallet_id=msg.wallet_id, + wallet_name=msg.wallet_name, + pricing_plan_id=msg.pricing_plan_id, + pricing_unit_id=msg.pricing_unit_id, + pricing_unit_cost_id=msg.pricing_unit_cost_id, + pricing_unit_cost=pricing_unit_cost, + simcore_user_agent=msg.simcore_user_agent, + user_id=msg.user_id, + user_email=msg.user_email, + project_id=msg.project_id, + project_name=msg.project_name, + node_id=msg.node_id, + node_name=msg.node_name, + parent_project_id=msg.parent_project_id, + root_parent_project_id=msg.root_parent_project_id, + root_parent_project_name=msg.root_parent_project_name, + parent_node_id=msg.parent_node_id, + root_parent_node_id=msg.root_parent_node_id, + service_key=msg.service_key, + service_version=msg.service_version, + service_type=service_type, + service_resources=jsonable_encoder(msg.service_resources), + service_additional_metadata={}, + started_at=msg.created_at, + service_run_status=ServiceRunStatus.RUNNING, + last_heartbeat_at=msg.created_at, + ) + service_run_id = await service_runs_db.create_service_run( + db_engine, data=create_service_run + ) + + if msg.wallet_id and msg.wallet_name: + transaction_create = CreditTransactionCreate( + product_name=msg.product_name, + wallet_id=msg.wallet_id, + wallet_name=msg.wallet_name, + pricing_plan_id=msg.pricing_plan_id, + pricing_unit_id=msg.pricing_unit_id, + pricing_unit_cost_id=msg.pricing_unit_cost_id, + user_id=msg.user_id, + user_email=msg.user_email, + osparc_credits=Decimal("0.0"), + transaction_status=CreditTransactionStatus.PENDING, + transaction_classification=CreditClassification.DEDUCT_SERVICE_RUN, + service_run_id=service_run_id, + payment_transaction_id=None, + licensed_item_purchase_id=None, + created_at=msg.created_at, + last_heartbeat_at=msg.created_at, + ) + await credit_transactions_db.create_credit_transaction( + db_engine, data=transaction_create + ) + + # Publish wallet total credits to RabbitMQ + await sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=msg.product_name, + wallet_id=msg.wallet_id, + ) + + +async def _process_heartbeat_event( + db_engine: AsyncEngine, + msg: RabbitResourceTrackingHeartbeatMessage, + rabbitmq_client: RabbitMQClient, +): + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id + ) + if not service_run_db: + _logger.error( + "Recieved process heartbeat event for service_run_id: %s, but we do not have the started record in the DB, INVESTIGATE!", + msg.service_run_id, + ) + return + if service_run_db.service_run_status in { + ServiceRunStatus.SUCCESS, + ServiceRunStatus.ERROR, + }: + _logger.error( + "Recieved process heartbeat event for service_run_id: %s, but it was already closed, INVESTIGATE!", + msg.service_run_id, + ) + return + + # Update `service run` record (if billable `credit transaction`) in the DB + update_service_run_last_heartbeat = ServiceRunLastHeartbeatUpdate( + service_run_id=msg.service_run_id, last_heartbeat_at=msg.created_at + ) + running_service = await service_runs_db.update_service_run_last_heartbeat( + db_engine, data=update_service_run_last_heartbeat + ) + if running_service is None: + _logger.info("Nothing to update: %s", msg) + return + + if running_service.wallet_id and running_service.pricing_unit_cost is not None: + # Compute currently used credits + computed_credits = await compute_service_run_credit_costs( + running_service.started_at, + msg.created_at, + running_service.pricing_unit_cost, + ) + # Update credits in the transaction table + update_credit_transaction = CreditTransactionCreditsUpdate( + service_run_id=msg.service_run_id, + osparc_credits=make_negative(computed_credits), + last_heartbeat_at=msg.created_at, + ) + await credit_transactions_db.update_credit_transaction_credits( + db_engine, data=update_credit_transaction + ) + # Publish wallet total credits to RabbitMQ + wallet_total_credits = await sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=running_service.product_name, + wallet_id=running_service.wallet_id, + ) + if wallet_total_credits.available_osparc_credits < CreditsLimit.OUT_OF_CREDITS: + await publish_to_rabbitmq_wallet_credits_limit_reached( + db_engine, + rabbitmq_client, + product_name=running_service.product_name, + wallet_id=running_service.wallet_id, + credits_=wallet_total_credits.available_osparc_credits, + credits_limit=CreditsLimit.OUT_OF_CREDITS, + ) + + +async def _process_stop_event( + db_engine: AsyncEngine, + msg: RabbitResourceTrackingStoppedMessage, + rabbitmq_client: RabbitMQClient, +): + service_run_db = await service_runs_db.get_service_run_by_id( + db_engine, service_run_id=msg.service_run_id + ) + if not service_run_db: + # NOTE: ANE/MD discussed. When the RUT receives a stop event and has not received before any start or heartbeat event, it probably means that + # we failed to start container. https://github.com/ITISFoundation/osparc-simcore/issues/5169 + _logger.warning( + "Recieved stop event for service_run_id: %s, but we do not have any record in the DB, therefore the service probably didn't start correctly.", + msg.service_run_id, + ) + return + if service_run_db.service_run_status in { + ServiceRunStatus.SUCCESS, + ServiceRunStatus.ERROR, + }: + _logger.error( + "Recieved stop event for service_run_id: %s, but it was already closed, INVESTIGATE!", + msg.service_run_id, + ) + return + + # Update `service run` record (if billable `credit transaction`) in the DB + _run_status, _run_status_msg = ServiceRunStatus.SUCCESS, None + if msg.simcore_platform_status is SimcorePlatformStatus.BAD: + _run_status, _run_status_msg = ( + ServiceRunStatus.ERROR, + "Director-v2 or Sidecar considers service as unhealthy", + ) + update_service_run_stopped_at = ServiceRunStoppedAtUpdate( + service_run_id=msg.service_run_id, + stopped_at=msg.created_at, + service_run_status=_run_status, + service_run_status_msg=_run_status_msg, + ) + + running_service = await service_runs_db.update_service_run_stopped_at( + db_engine, data=update_service_run_stopped_at + ) + await licensed_items_checkouts_db.force_release_license_seats_by_run_id( + db_engine, service_run_id=msg.service_run_id + ) + + if running_service is None: + _logger.error( + "Nothing to update. This should not happen investigate. service_run_id: %s", + msg.service_run_id, + ) + return + + if running_service.wallet_id and running_service.pricing_unit_cost is not None: + # Compute currently used credits + computed_credits = await compute_service_run_credit_costs( + running_service.started_at, + msg.created_at, + running_service.pricing_unit_cost, + ) + + wallet_total_credits = await credit_transactions_db.sum_wallet_credits( + db_engine, + product_name=running_service.product_name, + wallet_id=running_service.wallet_id, + ) + _transaction_status = ( + CreditTransactionStatus.BILLED + if wallet_total_credits.available_osparc_credits - computed_credits >= 0 + else CreditTransactionStatus.IN_DEBT + ) + # Adjust the status if the platform status is not OK + if msg.simcore_platform_status != SimcorePlatformStatus.OK: + _transaction_status = CreditTransactionStatus.NOT_BILLED + + # Update credits in the transaction table and close the transaction + update_credit_transaction = CreditTransactionCreditsAndStatusUpdate( + service_run_id=msg.service_run_id, + osparc_credits=make_negative(computed_credits), + transaction_status=_transaction_status, + ) + await credit_transactions_db.update_credit_transaction_credits_and_status( + db_engine, data=update_credit_transaction + ) + # Publish wallet total credits to RabbitMQ + await sum_credit_transactions_and_publish_to_rabbitmq( + db_engine, + rabbitmq_client=rabbitmq_client, + product_name=running_service.product_name, + wallet_id=running_service.wallet_id, + ) + + +RABBIT_MSG_TYPE_TO_PROCESS_HANDLER: dict[str, Callable[..., Awaitable[None]]] = { + RabbitResourceTrackingMessageType.TRACKING_STARTED: _process_start_event, + RabbitResourceTrackingMessageType.TRACKING_HEARTBEAT: _process_heartbeat_event, + RabbitResourceTrackingMessageType.TRACKING_STOPPED: _process_stop_event, +} diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service_setup.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service_setup.py new file mode 100644 index 00000000000..cb4bc919503 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/process_message_running_service_setup.py @@ -0,0 +1,66 @@ +import functools +import logging +from collections.abc import Awaitable, Callable + +from fastapi import FastAPI +from models_library.rabbitmq_messages import RabbitResourceTrackingBaseMessage +from servicelib.logging_utils import log_catch, log_context +from servicelib.rabbitmq import RabbitMQClient +from settings_library.rabbit import RabbitSettings + +from ..core.settings import ApplicationSettings +from .modules.rabbitmq import get_rabbitmq_client +from .process_message_running_service import process_message + +_logger = logging.getLogger(__name__) + +_RUT_MESSAGE_TTL_IN_MS = 2 * 60 * 60 * 1000 # 2 hours + + +async def _subscribe_to_rabbitmq(app) -> str: + with log_context(_logger, logging.INFO, msg="Subscribing to rabbitmq channel"): + rabbit_client: RabbitMQClient = get_rabbitmq_client(app) + subscribed_queue, _ = await rabbit_client.subscribe( + RabbitResourceTrackingBaseMessage.get_channel_name(), + message_handler=functools.partial(process_message, app), + exclusive_queue=False, + message_ttl=_RUT_MESSAGE_TTL_IN_MS, + ) + return subscribed_queue + + +def on_app_startup(app: FastAPI) -> Callable[[], Awaitable[None]]: + async def _startup() -> None: + with log_context( + _logger, + logging.INFO, + msg="RUT setup process_message_running_service module.", + ), log_catch(_logger, reraise=False): + app_settings: ApplicationSettings = app.state.settings + app.state.resource_tracker_rabbitmq_consumer = None + settings: RabbitSettings | None = ( + app_settings.RESOURCE_USAGE_TRACKER_RABBITMQ + ) + if not settings: + _logger.warning("RabbitMQ client is de-activated in the settings") + return + app.state.resource_tracker_rabbitmq_consumer = await _subscribe_to_rabbitmq( + app + ) + + return _startup + + +def on_app_shutdown( + _app: FastAPI, +) -> Callable[[], Awaitable[None]]: + async def _stop() -> None: + # NOTE: We want to have persistent queue, therefore we will not unsubscribe + assert _app # nosec + + return _stop + + +def setup(app: FastAPI) -> None: + app.add_event_handler("startup", on_app_startup(app)) + app.add_event_handler("shutdown", on_app_shutdown(app)) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py new file mode 100644 index 00000000000..9a9a1398712 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/service_runs.py @@ -0,0 +1,245 @@ +# pylint: disable=too-many-arguments +from datetime import UTC, datetime, timedelta + +import shortuuid +from aws_library.s3 import SimcoreS3API +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + OsparcCreditsAggregatedByServiceGet, + OsparcCreditsAggregatedUsagesPage, + ServiceRunGet, + ServiceRunPage, +) +from models_library.api_schemas_storage.storage_schemas import S3BucketName +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import ( + CreditTransactionStatus, + ServiceResourceUsagesFilters, + ServicesAggregatedUsagesTimePeriod, + ServicesAggregatedUsagesType, +) +from models_library.rest_ordering import OrderBy +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import AnyUrl, TypeAdapter +from sqlalchemy.ext.asyncio import AsyncEngine + +from .modules.db import service_runs_db + +_PRESIGNED_LINK_EXPIRATION_SEC = 7200 + + +async def list_service_runs( + db_engine: AsyncEngine, + *, + user_id: UserID, + product_name: ProductName, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + filters: ServiceResourceUsagesFilters | None = None, + transaction_status: CreditTransactionStatus | None = None, + project_id: ProjectID | None = None, + offset: int = 0, + limit: int = 20, + order_by: OrderBy | None = None, +) -> ServiceRunPage: + started_from = None + started_until = None + if filters: + started_from = filters.started_at.from_ + started_until = filters.started_at.until + + # Situation when we want to see all usage of a specific user (ex. for Non billable product) + if wallet_id is None and access_all_wallet_usage is False: + ( + total_service_runs, + service_runs_db_model, + ) = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=user_id, + wallet_id=None, + started_from=started_from, + started_until=started_until, + transaction_status=transaction_status, + project_id=project_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + # Situation when accountant user can see all users usage of the wallet + elif wallet_id and access_all_wallet_usage is True: + ( + total_service_runs, + service_runs_db_model, + ) = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=None, + wallet_id=wallet_id, + started_from=started_from, + started_until=started_until, + transaction_status=transaction_status, + project_id=project_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + # Situation when regular user can see only his usage of the wallet + elif wallet_id and access_all_wallet_usage is False: + ( + total_service_runs, + service_runs_db_model, + ) = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=user_id, + wallet_id=wallet_id, + started_from=started_from, + started_until=started_until, + transaction_status=transaction_status, + project_id=project_id, + offset=offset, + limit=limit, + order_by=order_by, + ) + else: + msg = "wallet_id and access_all_wallet_usage parameters must be specified together" + raise ValueError(msg) + + service_runs_api_model: list[ServiceRunGet] = [] + for service in service_runs_db_model: + service_runs_api_model.append( + ServiceRunGet.model_construct( + service_run_id=service.service_run_id, + wallet_id=service.wallet_id, + wallet_name=service.wallet_name, + user_id=service.user_id, + user_email=service.user_email, + project_id=service.project_id, + project_name=service.project_name, + project_tags=service.project_tags, + root_parent_project_id=service.root_parent_project_id, + root_parent_project_name=service.root_parent_project_name, + node_id=service.node_id, + node_name=service.node_name, + service_key=service.service_key, + service_version=service.service_version, + service_type=service.service_type, + started_at=service.started_at, + stopped_at=service.stopped_at, + service_run_status=service.service_run_status, + credit_cost=service.osparc_credits, + transaction_status=service.transaction_status, + ) + ) + + return ServiceRunPage(service_runs_api_model, total_service_runs) + + +async def export_service_runs( + s3_client: SimcoreS3API, + *, + bucket_name: str, + s3_region: str, + user_id: UserID, + product_name: ProductName, + db_engine: AsyncEngine, + wallet_id: WalletID | None = None, + access_all_wallet_usage: bool = False, + order_by: OrderBy | None = None, + filters: ServiceResourceUsagesFilters | None = None, +) -> AnyUrl: + started_from = filters.started_at.from_ if filters else None + started_until = filters.started_at.until if filters else None + + # Create S3 key name + s3_bucket_name = TypeAdapter(S3BucketName).validate_python(bucket_name) + # NOTE: su stands for "service usage" + file_name = f"su_{shortuuid.uuid()}.csv" + s3_object_key = ( + f"resource-usage-tracker-service-runs/{datetime.now(tz=UTC).date()}/{file_name}" + ) + + # Export CSV to S3 + await service_runs_db.export_service_runs_table_to_s3( + db_engine, + product_name=product_name, + s3_bucket_name=s3_bucket_name, + s3_key=s3_object_key, + s3_region=s3_region, + user_id=user_id if access_all_wallet_usage is False else None, + wallet_id=wallet_id, + started_from=started_from, + started_until=started_until, + order_by=order_by, + ) + + # Create presigned S3 link + return await s3_client.create_single_presigned_download_link( + bucket=s3_bucket_name, + object_key=s3_object_key, + expiration_secs=_PRESIGNED_LINK_EXPIRATION_SEC, + ) + + +async def sum_project_wallet_total_credits( + db_engine: AsyncEngine, + *, + product_name: ProductName, + wallet_id: WalletID, + project_id: ProjectID, + transaction_status: CreditTransactionStatus | None = None, +) -> WalletTotalCredits: + return await service_runs_db.sum_project_wallet_total_credits( + db_engine, + product_name=product_name, + wallet_id=wallet_id, + project_id=project_id, + transaction_status=transaction_status, + ) + + +async def get_osparc_credits_aggregated_usages_page( + user_id: UserID, + product_name: ProductName, + db_engine: AsyncEngine, + aggregated_by: ServicesAggregatedUsagesType, + time_period: ServicesAggregatedUsagesTimePeriod, + wallet_id: WalletID, + access_all_wallet_usage: bool = False, + limit: int = 20, + offset: int = 0, +) -> OsparcCreditsAggregatedUsagesPage: + current_datetime = datetime.now(tz=UTC) + started_from = current_datetime - timedelta(days=time_period.value) + + assert aggregated_by == ServicesAggregatedUsagesType.services # nosec + + ( + count_output_list_db, + output_list_db, + ) = await service_runs_db.get_osparc_credits_aggregated_by_service( + db_engine, + product_name=product_name, + user_id=user_id if access_all_wallet_usage is False else None, + wallet_id=wallet_id, + offset=offset, + limit=limit, + started_from=started_from, + started_until=None, + ) + output_api_model: list[OsparcCreditsAggregatedByServiceGet] = [] + for item in output_list_db: + output_api_model.append( + OsparcCreditsAggregatedByServiceGet.model_construct( + osparc_credits=item.osparc_credits, + service_key=item.service_key, + running_time_in_hours=item.running_time_in_hours, + ) + ) + + return OsparcCreditsAggregatedUsagesPage(output_api_model, count_output_list_db) diff --git a/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py new file mode 100644 index 00000000000..ae831a8d615 --- /dev/null +++ b/services/resource-usage-tracker/src/simcore_service_resource_usage_tracker/services/utils.py @@ -0,0 +1,140 @@ +import asyncio +import logging +from datetime import UTC, datetime +from decimal import Decimal + +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.projects_nodes_io import NodeID +from models_library.rabbitmq_messages import ( + CreditsLimit, + WalletCreditsLimitReachedMessage, + WalletCreditsMessage, +) +from models_library.resource_tracker import ServiceRunStatus +from models_library.services_types import ServiceRunID +from models_library.users import UserID +from models_library.wallets import WalletID +from pydantic import PositiveInt +from servicelib.rabbitmq import RabbitMQClient +from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine + +from .modules.db import credit_transactions_db, service_runs_db + +_logger = logging.getLogger(__name__) + + +def make_negative(n): + return -abs(n) + + +async def sum_credit_transactions_and_publish_to_rabbitmq( + db_engine: AsyncEngine, + connection: AsyncConnection | None = None, + *, + rabbitmq_client: RabbitMQClient, + product_name: ProductName, + wallet_id: WalletID, +) -> WalletTotalCredits: + wallet_total_credits = await credit_transactions_db.sum_wallet_credits( + db_engine, + connection=connection, + product_name=product_name, + wallet_id=wallet_id, + ) + publish_message = WalletCreditsMessage.model_construct( + wallet_id=wallet_id, + created_at=datetime.now(tz=UTC), + credits=wallet_total_credits.available_osparc_credits, + product_name=product_name, + ) + await rabbitmq_client.publish(publish_message.channel_name, publish_message) + return wallet_total_credits + + +_BATCH_SIZE = 20 + + +async def _publish_to_rabbitmq_wallet_credits_limit_reached( + rabbitmq_client: RabbitMQClient, + service_run_id: ServiceRunID, + user_id: UserID, + project_id: ProjectID, + node_id: NodeID, + wallet_id: WalletID, + credits_: Decimal, + credits_limit: CreditsLimit, +): + publish_message = WalletCreditsLimitReachedMessage( + service_run_id=service_run_id, + user_id=user_id, + project_id=project_id, + node_id=node_id, + wallet_id=wallet_id, + credits=credits_, + credits_limit=credits_limit, + ) + await rabbitmq_client.publish(publish_message.channel_name, publish_message) + + +async def publish_to_rabbitmq_wallet_credits_limit_reached( + db_engine: AsyncEngine, + rabbitmq_client: RabbitMQClient, + product_name: ProductName, + wallet_id: WalletID, + credits_: Decimal, + credits_limit: CreditsLimit, +): + # Get all current running services for that wallet + total_count: PositiveInt = ( + await service_runs_db.total_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=None, + wallet_id=wallet_id, + service_run_status=ServiceRunStatus.RUNNING, + ) + ) + + for offset in range(0, total_count, _BATCH_SIZE): + ( + _, + batch_services, + ) = await service_runs_db.list_service_runs_by_product_and_user_and_wallet( + db_engine, + product_name=product_name, + user_id=None, + wallet_id=wallet_id, + offset=offset, + limit=_BATCH_SIZE, + service_run_status=ServiceRunStatus.RUNNING, + ) + + await asyncio.gather( + *( + _publish_to_rabbitmq_wallet_credits_limit_reached( + rabbitmq_client=rabbitmq_client, + service_run_id=service.service_run_id, + user_id=service.user_id, + project_id=service.project_id, + node_id=service.node_id, + wallet_id=wallet_id, + credits_=credits_, + credits_limit=credits_limit, + ) + for service in batch_services + ) + ) + + +async def compute_service_run_credit_costs( + start: datetime, stop: datetime, cost_per_unit: Decimal +) -> Decimal: + if start <= stop: + time_delta = stop - start + return round(Decimal(time_delta.total_seconds() / 3600) * cost_per_unit, 2) + msg = f"Stop {stop} is smaller then {start} this should not happen. Investigate." + raise ValueError(msg) diff --git a/services/resource-usage-tracker/tests/__init__.py b/services/resource-usage-tracker/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/catalog/tests/integration/test_none.py b/services/resource-usage-tracker/tests/integration/test_none.py similarity index 100% rename from services/catalog/tests/integration/test_none.py rename to services/resource-usage-tracker/tests/integration/test_none.py diff --git a/services/resource-usage-tracker/tests/unit/api_rest/test_api_meta.py b/services/resource-usage-tracker/tests/unit/api_rest/test_api_meta.py new file mode 100644 index 00000000000..c92bd5b959b --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/api_rest/test_api_meta.py @@ -0,0 +1,74 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from unittest import mock + +import pytest +from fastapi import status +from fastapi.testclient import TestClient +from pytest_mock import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_service_resource_usage_tracker._meta import API_VTAG +from simcore_service_resource_usage_tracker.api.rest._health import HealthCheckError +from simcore_service_resource_usage_tracker.api.rest._meta import _Meta + + +def test_healthcheck( + disabled_database: None, + disabled_prometheus: None, + disabled_rabbitmq: None, + mocked_redis_server: None, + mocked_setup_rabbitmq: mock.MagicMock, + client: TestClient, + mocker: MockerFixture, +): + rabbitmq_mock = mocker.Mock(spec=RabbitMQClient) + rabbitmq_mock.healthy = True + mocker.patch( + "simcore_service_resource_usage_tracker.services.modules.rabbitmq.get_rabbitmq_client", + return_value=rabbitmq_mock, + ) + + response = client.get("/") + assert response.status_code == status.HTTP_200_OK + assert response.text.startswith( + "simcore_service_resource_usage_tracker.api.rest._health@" + ) + + +def test_healthcheck__unhealthy( + disabled_database: None, + disabled_prometheus: None, + disabled_rabbitmq: None, + mocked_redis_server: None, + mocked_setup_rabbitmq: mock.MagicMock, + client: TestClient, + mocker: MockerFixture, +): + rabbitmq_mock = mocker.Mock(spec=RabbitMQClient) + rabbitmq_mock.healthy = False + mocker.patch( + "simcore_service_resource_usage_tracker.services.modules.rabbitmq.get_rabbitmq_client", + return_value=rabbitmq_mock, + ) + + with pytest.raises(HealthCheckError): + client.get("/") + + +def test_meta( + disabled_database: None, + disabled_prometheus: None, + disabled_rabbitmq: None, + mocked_redis_server: None, + mocked_setup_rabbitmq: mock.MagicMock, + client: TestClient, +): + response = client.get(f"/{API_VTAG}/meta") + assert response.status_code == status.HTTP_200_OK + meta = _Meta.model_validate(response.json()) + + response = client.get(f"{meta.docs_url}") + assert response.status_code == status.HTTP_200_OK diff --git a/services/resource-usage-tracker/tests/unit/conftest.py b/services/resource-usage-tracker/tests/unit/conftest.py new file mode 100644 index 00000000000..fa1b857904c --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/conftest.py @@ -0,0 +1,235 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +import json +import re +from collections.abc import AsyncIterator, Callable, Iterator +from pathlib import Path +from random import choice +from typing import Any +from unittest import mock + +import httpx +import pytest +import requests_mock +from asgi_lifespan import LifespanManager +from faker import Faker +from fakeredis.aioredis import FakeRedis +from fastapi import FastAPI +from fastapi.testclient import TestClient +from pytest_mock import MockerFixture +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from settings_library.rabbit import RabbitSettings +from simcore_service_resource_usage_tracker.core.application import create_app +from simcore_service_resource_usage_tracker.core.settings import ApplicationSettings + +pytest_plugins = [ + "pytest_simcore.cli_runner", + "pytest_simcore.docker_compose", + "pytest_simcore.docker_registry", + "pytest_simcore.docker_swarm", + "pytest_simcore.environment_configs", + "pytest_simcore.faker_projects_data", + "pytest_simcore.faker_products_data", + "pytest_simcore.postgres_service", + "pytest_simcore.pydantic_models", + "pytest_simcore.pytest_global_environs", + "pytest_simcore.rabbit_service", + "pytest_simcore.repository_paths", + "pytest_simcore.aws_s3_service", + "pytest_simcore.aws_server", +] + + +@pytest.fixture(scope="session") +def project_slug_dir(osparc_simcore_root_dir: Path) -> Path: + # fixtures in pytest_simcore.environs + service_folder = osparc_simcore_root_dir / "services" / "resource-usage-tracker" + assert service_folder.exists() + assert any(service_folder.glob("src/simcore_service_resource_usage_tracker")) + return service_folder + + +@pytest.fixture +def app_environment( + mock_env_devel_environment: EnvVarsDict, + monkeypatch: pytest.MonkeyPatch, + faker: Faker, +) -> EnvVarsDict: + envs = setenvs_from_dict( + monkeypatch, + { + "POSTGRES_HOST": faker.domain_name(), + "POSTGRES_USER": faker.user_name(), + "POSTGRES_PASSWORD": faker.password(special_chars=False), + "POSTGRES_DB": faker.pystr(), + "PROMETHEUS_URL": f"{choice(['http', 'https'])}://{faker.domain_name()}", + "PROMETHEUS_USERNAME": faker.user_name(), + "PROMETHEUS_PASSWORD": faker.password(special_chars=False), + "RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED": "0", + "RESOURCE_USAGE_TRACKER_TRACING": "null", + }, + ) + + return mock_env_devel_environment | envs + + +@pytest.fixture +def disabled_prometheus( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None: + monkeypatch.delenv("PROMETHEUS_URL") + monkeypatch.delenv("PROMETHEUS_USERNAME") + monkeypatch.delenv("PROMETHEUS_PASSWORD") + + +@pytest.fixture +def disabled_database( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> None: + monkeypatch.delenv("POSTGRES_HOST") + monkeypatch.delenv("POSTGRES_USER") + monkeypatch.delenv("POSTGRES_PASSWORD") + monkeypatch.delenv("POSTGRES_DB") + + +@pytest.fixture +def disabled_rabbitmq(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("RABBIT_HOST") + monkeypatch.delenv("RABBIT_USER") + monkeypatch.delenv("RABBIT_SECURE") + monkeypatch.delenv("RABBIT_PASSWORD") + + +@pytest.fixture +def enabled_rabbitmq( + app_environment: EnvVarsDict, rabbit_service: RabbitSettings +) -> RabbitSettings: + return rabbit_service + + +@pytest.fixture +def app_settings( + app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch +) -> ApplicationSettings: + return ApplicationSettings.create_from_envs() + + +@pytest.fixture +async def initialized_app(app_settings: ApplicationSettings) -> AsyncIterator[FastAPI]: + app = create_app(app_settings) + async with LifespanManager(app): + yield app + + +@pytest.fixture +def client(app_settings: ApplicationSettings) -> Iterator[TestClient]: + app = create_app(app_settings) + with TestClient(app, base_url="http://testserver.test") as client: + yield client + + +@pytest.fixture +async def async_client(initialized_app: FastAPI) -> AsyncIterator[httpx.AsyncClient]: + async with httpx.AsyncClient( + transport=httpx.ASGITransport(app=initialized_app), + base_url=f"http://{initialized_app.title}.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def mocked_prometheus( + requests_mock: requests_mock.Mocker, app_settings: ApplicationSettings +) -> requests_mock.Mocker: + assert app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS + requests_mock.get(f"{app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS.api_url}/") + return requests_mock + + +@pytest.fixture +def get_metric_response(faker: Faker) -> Callable[..., dict[str, Any]]: + def _get_metric(request, context) -> dict[str, Any]: + return { + "data": { + "result": [ + { + "metric": { + "id": "cpu", + "container_label_uuid": faker.uuid4(), + "container_label_simcore_service_settings": json.dumps( + [ + { + "name": "Resources", + "type": "Resources", + "resources": faker.pystr(), + "value": { + "Limits": { + "NanoCPUs": faker.pyint(min_value=1000) + } + }, + } + ] + ), + }, + "value": faker.pylist(allowed_types=(int,)), + } + ] + } + } + + return _get_metric + + +@pytest.fixture +def mocked_prometheus_with_query( + mocked_prometheus: requests_mock.Mocker, + app_settings: ApplicationSettings, + faker: Faker, + get_metric_response, +) -> requests_mock.Mocker: + """overrides with needed calls here""" + assert app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS + pattern = re.compile( + rf"^{re.escape(app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS.api_url)}/api/v1/query\?.*$" + ) + mocked_prometheus.get(pattern, json=get_metric_response) + return mocked_prometheus + + +@pytest.fixture +def disabled_tracker_background_task(mocker: MockerFixture) -> dict[str, mock.Mock]: + mocked_start = mocker.patch( + "simcore_service_resource_usage_tracker.modules.prometheus_containers.plugin.create_periodic_task", + autospec=True, + ) + + mocked_stop = mocker.patch( + "simcore_service_resource_usage_tracker.modules.prometheus_containers.plugin.cancel_wait_task", + autospec=True, + ) + return {"start_task": mocked_start, "stop_task": mocked_stop} + + +@pytest.fixture +async def mocked_redis_server(mocker: MockerFixture) -> None: + mock_redis = FakeRedis() + mocker.patch("redis.asyncio.from_url", return_value=mock_redis) + + +@pytest.fixture +def mocked_setup_rabbitmq(mocker: MockerFixture): + return ( + mocker.patch( + "simcore_service_resource_usage_tracker.core.application.setup_rabbitmq", + autospec=True, + ), + mocker.patch( + "simcore_service_resource_usage_tracker.core.application.setup_rpc_api_routes", + autospec=True, + ), + ) diff --git a/services/resource-usage-tracker/tests/unit/modules/test_rabbitmq.py b/services/resource-usage-tracker/tests/unit/modules/test_rabbitmq.py new file mode 100644 index 00000000000..f69d8989b24 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/modules/test_rabbitmq.py @@ -0,0 +1,29 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from fastapi import FastAPI +from settings_library.rabbit import RabbitSettings +from simcore_service_resource_usage_tracker.services.modules.rabbitmq import ( + get_rabbitmq_client, +) + +# Selection of core and tool services started in this swarm fixture (integration) +pytest_simcore_core_services_selection = [ + "rabbit", +] + +pytest_simcore_ops_services_selection = [] + + +def test_rabbitmq_initializes( + disabled_prometheus: None, + disabled_database: None, + enabled_rabbitmq: RabbitSettings, + mocked_redis_server: None, + initialized_app: FastAPI, +): + assert hasattr(initialized_app.state, "rabbitmq_client") + assert initialized_app.state.rabbitmq_client is not None + assert get_rabbitmq_client(initialized_app) == initialized_app.state.rabbitmq_client diff --git a/services/resource-usage-tracker/tests/unit/modules/test_redis.py b/services/resource-usage-tracker/tests/unit/modules/test_redis.py new file mode 100644 index 00000000000..e518c589c79 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/modules/test_redis.py @@ -0,0 +1,23 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from unittest import mock + +from fastapi import FastAPI +from simcore_service_resource_usage_tracker.services.modules.redis import ( + get_redis_lock_client, +) + + +async def test_redis_raises_if_missing( + disabled_prometheus: None, + disabled_database: None, + disabled_rabbitmq: None, + mocked_setup_rabbitmq: mock.Mock, + mocked_redis_server: None, + initialized_app: FastAPI, +): + client = get_redis_lock_client(initialized_app) + assert await client.ping() is True diff --git a/services/resource-usage-tracker/tests/unit/test_computation_of_credits.py b/services/resource-usage-tracker/tests/unit/test_computation_of_credits.py new file mode 100644 index 00000000000..60ee5beedac --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/test_computation_of_credits.py @@ -0,0 +1,63 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + +from datetime import datetime, timedelta, timezone +from decimal import Decimal + +import pytest +from simcore_service_resource_usage_tracker.services.utils import ( + compute_service_run_credit_costs, +) + + +@pytest.mark.parametrize( + "stop,start,cost_per_unit,expected_cost", + [ + ( + datetime.now(tz=timezone.utc), + datetime.now(tz=timezone.utc) - timedelta(days=1), + Decimal("25"), + Decimal("600"), + ), + ( + datetime.now(tz=timezone.utc), + datetime.now(tz=timezone.utc) - timedelta(days=2.5), + Decimal("40"), + Decimal("2400"), + ), + ( + datetime.now(tz=timezone.utc), + datetime.now(tz=timezone.utc) - timedelta(days=25), + Decimal("12"), + Decimal("7200"), + ), + ( + datetime.now(tz=timezone.utc), + datetime.now(tz=timezone.utc) - timedelta(days=45), + Decimal("13.5"), + Decimal("14580"), + ), + ( + datetime.now(tz=timezone.utc), + datetime.now(tz=timezone.utc) - timedelta(minutes=37), + Decimal("25"), + round(Decimal("15.42"), 2), + ), + ], +) +async def test_credit_computation(stop, start, cost_per_unit, expected_cost): + computed_credits = await compute_service_run_credit_costs( + start, stop, cost_per_unit + ) + assert computed_credits == expected_cost + + +async def test_invalid_dates_in_credit_computation(): + start = datetime.now(tz=timezone.utc) + stop = datetime.now(tz=timezone.utc) - timedelta(minutes=3) + cost_per_unit = Decimal("25") + + with pytest.raises(ValueError): + await compute_service_run_credit_costs(start, stop, cost_per_unit) diff --git a/services/resource-usage-tracker/tests/unit/test_core_settings.py b/services/resource-usage-tracker/tests/unit/test_core_settings.py new file mode 100644 index 00000000000..d338859a550 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/test_core_settings.py @@ -0,0 +1,31 @@ +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable +# pylint: disable=too-many-arguments + + +from pytest_simcore.helpers.typing_env import EnvVarsDict +from simcore_service_resource_usage_tracker.core.settings import ( + ApplicationSettings, + MinimalApplicationSettings, +) + + +def test_valid_cli_application_settings(app_environment: EnvVarsDict): + settings = MinimalApplicationSettings.create_from_envs() + assert settings + assert settings.RESOURCE_USAGE_TRACKER_PROMETHEUS + assert settings.RESOURCE_USAGE_TRACKER_POSTGRES + assert settings.RESOURCE_USAGE_TRACKER_REDIS + assert settings.RESOURCE_USAGE_TRACKER_RABBITMQ + assert settings.LOG_LEVEL + + +def test_valid_web_application_settings(app_environment: EnvVarsDict): + settings = ApplicationSettings.create_from_envs() + assert settings + assert settings.RESOURCE_USAGE_TRACKER_PROMETHEUS + assert settings.RESOURCE_USAGE_TRACKER_POSTGRES + assert settings.RESOURCE_USAGE_TRACKER_REDIS + assert settings.RESOURCE_USAGE_TRACKER_RABBITMQ + assert settings.LOG_LEVEL diff --git a/services/resource-usage-tracker/tests/unit/test_enums.py b/services/resource-usage-tracker/tests/unit/test_enums.py new file mode 100644 index 00000000000..5910692e68d --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/test_enums.py @@ -0,0 +1,25 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from models_library import resource_tracker +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + CreditTransactionClassification, + CreditTransactionStatus, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + ResourceTrackerServiceRunStatus, +) + + +def test_postgres_and_models_library_enums_are_in_sync(): + assert list(resource_tracker.CreditTransactionStatus) == list( + CreditTransactionStatus + ) + assert list(resource_tracker.CreditClassification) == list( + CreditTransactionClassification + ) + assert list(resource_tracker.ServiceRunStatus) == list( + ResourceTrackerServiceRunStatus + ) diff --git a/services/resource-usage-tracker/tests/unit/test_main.py b/services/resource-usage-tracker/tests/unit/test_main.py new file mode 100644 index 00000000000..6d9addd8ee2 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/test_main.py @@ -0,0 +1,12 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name + + +from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict + + +def test_main_app(app_environment: EnvVarsDict): + from simcore_service_resource_usage_tracker.main import the_app, the_settings + + assert the_app.state.settings == the_settings diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/__init__.py b/services/resource-usage-tracker/tests/unit/with_dbs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/conftest.py b/services/resource-usage-tracker/tests/unit/with_dbs/conftest.py new file mode 100644 index 00000000000..8bf5e5ce5ba --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/conftest.py @@ -0,0 +1,287 @@ +# pylint: disable=not-context-manager +# pylint: disable=redefined-outer-name +# pylint: disable=unused-argument +# pylint: disable=unused-variable + +from collections.abc import AsyncIterable, Callable +from datetime import datetime, timezone +from random import choice +from typing import Any, Awaitable + +import httpx +import pytest +import sqlalchemy as sa +from asgi_lifespan import LifespanManager +from faker import Faker +from fastapi import FastAPI +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingMessageType, + RabbitResourceTrackingStartedMessage, +) +from models_library.resource_tracker import CreditTransactionStatus +from pydantic import TypeAdapter +from pytest_simcore.helpers.monkeypatch_envs import setenvs_from_dict +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from settings_library.rabbit import RabbitSettings +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + CreditTransactionClassification, + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_service_resource_usage_tracker.core.application import create_app +from simcore_service_resource_usage_tracker.core.settings import ApplicationSettings +from simcore_service_resource_usage_tracker.models.credit_transactions import ( + CreditTransactionDB, +) +from simcore_service_resource_usage_tracker.models.service_runs import ServiceRunDB +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + + +@pytest.fixture() +def mock_env(monkeypatch: pytest.MonkeyPatch) -> EnvVarsDict: + """This is the base mock envs used to configure the app. + + Do override/extend this fixture to change configurations + """ + env_vars: EnvVarsDict = { + "SC_BOOT_MODE": "production", + "POSTGRES_CLIENT_NAME": "postgres_test_client", + "RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_CHECK_ENABLED": "0", + "RESOURCE_USAGE_TRACKER_TRACING": "null", + } + setenvs_from_dict(monkeypatch, env_vars) + return env_vars + + +@pytest.fixture() +async def initialized_app( + mock_env: EnvVarsDict, + postgres_db: sa.engine.Engine, + postgres_host_config: dict[str, str], +) -> AsyncIterable[FastAPI]: + settings = ApplicationSettings.create_from_envs() + app = create_app(settings) + async with LifespanManager(app): + yield app + + +@pytest.fixture() +async def async_client(initialized_app: FastAPI) -> AsyncIterable[httpx.AsyncClient]: + async with httpx.AsyncClient( + transport=httpx.ASGITransport(app=initialized_app), + base_url="http://resource-usage-tracker.testserver.io", + headers={"Content-Type": "application/json"}, + ) as client: + yield client + + +@pytest.fixture +def random_resource_tracker_service_run(faker: Faker) -> Callable[..., dict[str, Any]]: + def _creator(**overrides) -> dict[str, Any]: + data = { + "product_name": "osparc", + "service_run_id": faker.uuid4(), + "wallet_id": faker.pyint(), + "wallet_name": faker.word(), + "pricing_plan_id": faker.pyint(), + "pricing_unit_id": faker.pyint(), + "pricing_unit_cost_id": faker.pyint(), + "simcore_user_agent": faker.word(), + "user_id": faker.pyint(), + "user_email": faker.email(), + "project_id": faker.uuid4(), + "project_name": faker.word(), + "node_id": faker.uuid4(), + "node_name": faker.word(), + "parent_project_id": faker.uuid4(), + "root_parent_project_id": faker.uuid4(), + "root_parent_project_name": faker.pystr(), + "parent_node_id": faker.uuid4(), + "root_parent_node_id": faker.uuid4(), + "service_key": "simcore/services/dynamic/jupyter-smash", + "service_version": "3.0.7", + "service_type": "DYNAMIC_SERVICE", + "service_resources": {}, + "service_additional_metadata": {}, + "started_at": datetime.now(tz=timezone.utc), + "stopped_at": None, + "service_run_status": "RUNNING", + "modified": datetime.now(tz=timezone.utc), + "last_heartbeat_at": datetime.now(tz=timezone.utc), + "pricing_unit_cost": abs(faker.pyfloat()), + } + data.update(overrides) + return data + + return _creator + + +@pytest.fixture +def random_resource_tracker_credit_transactions( + faker: Faker, +) -> Callable[..., dict[str, Any]]: + def _creator(**overrides) -> dict[str, Any]: + data = { + "product_name": "osparc", + "wallet_id": faker.pyint(), + "wallet_name": faker.word(), + "pricing_plan_id": faker.pyint(), + "pricing_unit_id": faker.pyint(), + "pricing_unit_cost_id": faker.pyint(), + "user_id": faker.pyint(), + "user_email": faker.email(), + "osparc_credits": -abs(faker.pyfloat()), + "transaction_status": choice( + [member.value for member in CreditTransactionStatus] + ), + "transaction_classification": CreditTransactionClassification.DEDUCT_SERVICE_RUN.value, + "service_run_id": faker.uuid4(), + "payment_transaction_id": faker.uuid4(), + "created": datetime.now(tz=timezone.utc), + "last_heartbeat_at": datetime.now(tz=timezone.utc), + "modified": datetime.now(tz=timezone.utc), + } + data.update(overrides) + return data + + return _creator + + +@pytest.fixture() +def resource_tracker_service_run_db(postgres_db: sa.engine.Engine): + with postgres_db.connect() as con: + # removes all service runs before continuing + con.execute(resource_tracker_service_runs.delete()) + yield + con.execute(resource_tracker_service_runs.delete()) + + +async def assert_service_runs_db_row( + postgres_db, service_run_id: str, status: str | None = None +) -> ServiceRunDB: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.2), + stop=stop_after_delay(10), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt, postgres_db.connect() as con: + result = con.execute( + sa.select(resource_tracker_service_runs).where( + resource_tracker_service_runs.c.service_run_id == service_run_id + ) + ) + row = result.first() + assert row + service_run_db = ServiceRunDB.model_validate(row) + if status: + assert service_run_db.service_run_status == status + return service_run_db + raise ValueError + + +async def assert_credit_transactions_db_row( + postgres_db, service_run_id: str, modified_at: datetime | None = None +) -> CreditTransactionDB: + async for attempt in AsyncRetrying( + wait=wait_fixed(0.2), + stop=stop_after_delay(10), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt, postgres_db.connect() as con: + result = con.execute( + sa.select(resource_tracker_credit_transactions).where( + resource_tracker_credit_transactions.c.service_run_id + == service_run_id + ) + ) + row = result.first() + assert row + credit_transaction_db = CreditTransactionDB.model_validate(row) + if modified_at: + assert credit_transaction_db.modified > modified_at + return credit_transaction_db + raise ValueError + + +@pytest.fixture +def random_rabbit_message_heartbeat( + faker: Faker, +) -> Callable[..., RabbitResourceTrackingHeartbeatMessage]: + def _creator(**kwargs: dict[str, Any]) -> RabbitResourceTrackingHeartbeatMessage: + msg_config = {"service_run_id": faker.uuid4(), **kwargs} + + return TypeAdapter(RabbitResourceTrackingHeartbeatMessage).validate_python( + msg_config + ) + + return _creator + + +@pytest.fixture +def random_rabbit_message_start( + faker: Faker, +) -> Callable[..., RabbitResourceTrackingStartedMessage]: + def _creator(**kwargs: dict[str, Any]) -> RabbitResourceTrackingStartedMessage: + msg_config = { + "channel_name": "io.simcore.service.tracking", + "service_run_id": faker.uuid4(), + "created_at": datetime.now(timezone.utc), + "message_type": RabbitResourceTrackingMessageType.TRACKING_STARTED, + "wallet_id": faker.pyint(), + "wallet_name": faker.pystr(), + "pricing_plan_id": faker.pyint(), + "pricing_unit_id": faker.pyint(), + "pricing_unit_cost_id": faker.pyint(), + "product_name": "osparc", + "simcore_user_agent": faker.pystr(), + "user_id": faker.pyint(), + "user_email": faker.email(), + "project_id": faker.uuid4(), + "project_name": faker.pystr(), + "node_id": faker.uuid4(), + "node_name": faker.pystr(), + "parent_project_id": faker.uuid4(), + "root_parent_project_id": faker.uuid4(), + "root_parent_project_name": faker.pystr(), + "parent_node_id": faker.uuid4(), + "root_parent_node_id": faker.uuid4(), + "service_key": "simcore/services/comp/itis/sleeper", + "service_version": "2.1.6", + "service_type": "computational", + "service_resources": { + "container": { + "image": "simcore/services/comp/itis/sleeper:2.1.6", + "resources": { + "CPU": {"limit": 0.1, "reservation": 0.1}, + "RAM": {"limit": 134217728, "reservation": 134217728}, + }, + "boot_modes": ["CPU"], + } + }, + "service_additional_metadata": {}, + **kwargs, + } + + return TypeAdapter(RabbitResourceTrackingStartedMessage).validate_python( + msg_config + ) + + return _creator + + +@pytest.fixture +async def rpc_client( + rabbit_service: RabbitSettings, + initialized_app: FastAPI, + rabbitmq_rpc_client: Callable[[str], Awaitable[RabbitMQRPCClient]], +) -> RabbitMQRPCClient: + return await rabbitmq_rpc_client("client") diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_credit_transactions.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_credit_transactions.py new file mode 100644 index 00000000000..baf61f3b7fc --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_credit_transactions.py @@ -0,0 +1,522 @@ +from collections.abc import Iterator +from datetime import UTC, datetime, timedelta +from decimal import Decimal +from typing import Callable + +import httpx +import pytest +import sqlalchemy as sa +from faker import Faker +from models_library.api_schemas_resource_usage_tracker.credit_transactions import ( + CreditTransactionCreateBody, + WalletTotalCredits, +) +from models_library.products import ProductName +from models_library.projects import ProjectID +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, + ServiceRunStatus, +) +from servicelib.rabbitmq import RabbitMQClient, RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import ( + credit_transactions, + service_runs, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + WalletTransactionError, +) +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_service_resource_usage_tracker.services.service_runs import ServiceRunPage +from starlette import status +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture() +def resource_tracker_credit_transactions_db( + postgres_db: sa.engine.Engine, +) -> Iterator[None]: + with postgres_db.connect() as con: + + yield + + con.execute(resource_tracker_credit_transactions.delete()) + + +_WALLET_ID = 1 + + +async def test_credit_transactions_workflow( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + async_client: httpx.AsyncClient, + resource_tracker_credit_transactions_db: None, + rpc_client: RabbitMQRPCClient, + faker: Faker, +): + url = URL("/v1/credit-transactions") + + response = await async_client.post( + url=f"{url}", + json={ + "product_name": "osparc", + "wallet_id": _WALLET_ID, + "wallet_name": "string", + "user_id": 1, + "user_email": "string", + "osparc_credits": 1234.54, + "payment_transaction_id": "string", + "created_at": "2023-08-31T13:04:23.941Z", + }, + ) + assert response.status_code == status.HTTP_201_CREATED + data = response.json() + assert data["credit_transaction_id"] == 1 + + response = await async_client.post( + url=f"{url}", + json={ + "product_name": "osparc", + "wallet_id": _WALLET_ID, + "wallet_name": "string", + "user_id": 1, + "user_email": "string", + "osparc_credits": 105.5, + "payment_transaction_id": "string", + "created_at": "2023-08-31T13:04:23.941Z", + }, + ) + assert response.status_code == status.HTTP_201_CREATED + data = response.json() + assert data["credit_transaction_id"] == 2 + + response = await async_client.post( + url=f"{url}", + json={ + "product_name": "osparc", + "wallet_id": 2, + "wallet_name": "string", + "user_id": 1, + "user_email": "string", + "osparc_credits": 10.85, + "payment_transaction_id": "string", + "created_at": "2023-08-31T13:04:23.941Z", + }, + ) + assert response.status_code == status.HTTP_201_CREATED + data = response.json() + assert data["credit_transaction_id"] == 3 + + url = URL("/v1/credit-transactions/credits:sum") + response = await async_client.post( + f'{url.with_query({"product_name": "osparc", "wallet_id": 1})}' + ) + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data["wallet_id"] == _WALLET_ID + _expected_credits = Decimal("1340.04") + assert data["available_osparc_credits"] == float(_expected_credits) + + output = await credit_transactions.get_wallet_total_credits( + rpc_client, + product_name="osparc", + wallet_id=_WALLET_ID, + ) + assert output.available_osparc_credits == _expected_credits + + +_USER_ID_1 = 1 +_USER_ID_2 = 2 +_SERVICE_RUN_ID_1 = "1" +_SERVICE_RUN_ID_2 = "2" +_SERVICE_RUN_ID_3 = "3" +_SERVICE_RUN_ID_4 = "4" +_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS = 2 +_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS = 3 + + +@pytest.fixture() +def resource_tracker_setup_db( + postgres_db: sa.engine.Engine, + random_resource_tracker_service_run, + random_resource_tracker_credit_transactions, + project_id: ProjectID, + product_name: ProductName, + faker: Faker, +) -> Iterator[None]: + with postgres_db.connect() as con: + # Service run table + result = con.execute( + resource_tracker_service_runs.insert() + .values( + [ + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_1, + product_name=product_name, + started_at=datetime.now(tz=UTC) - timedelta(hours=1), + stopped_at=datetime.now(tz=UTC), + project_id=project_id, + service_run_status=ServiceRunStatus.SUCCESS, + wallet_id=_WALLET_ID, + ), + random_resource_tracker_service_run( + user_id=_USER_ID_2, # <-- different user + service_run_id=_SERVICE_RUN_ID_2, + product_name=product_name, + started_at=datetime.now(tz=UTC) - timedelta(hours=1), + stopped_at=None, + project_id=project_id, + service_run_status=ServiceRunStatus.RUNNING, # <-- Runnin status + wallet_id=_WALLET_ID, + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_3, + product_name=product_name, + started_at=datetime.now(tz=UTC) - timedelta(hours=1), + stopped_at=datetime.now(tz=UTC), + project_id=project_id, + service_run_status=ServiceRunStatus.SUCCESS, + wallet_id=_WALLET_ID, + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_4, + product_name=product_name, + started_at=datetime.now(tz=UTC) - timedelta(hours=1), + stopped_at=datetime.now(tz=UTC), + project_id=faker.uuid4(), # <-- different project + service_run_status=ServiceRunStatus.SUCCESS, + wallet_id=_WALLET_ID, + ), + ] + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + + # Transaction table + result = con.execute( + resource_tracker_credit_transactions.insert() + .values( + [ + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_1, + product_name=product_name, + payment_transaction_id=None, + osparc_credits=-50, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.DEDUCT_SERVICE_RUN, + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_2, # <-- different user + service_run_id=_SERVICE_RUN_ID_2, + product_name=product_name, + payment_transaction_id=None, + osparc_credits=-70, + transaction_status=CreditTransactionStatus.PENDING, # <-- Pending status + transaction_classification=CreditClassification.DEDUCT_SERVICE_RUN, + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + osparc_credits=-100, + service_run_id=_SERVICE_RUN_ID_3, + product_name=product_name, + payment_transaction_id=None, + transaction_status=CreditTransactionStatus.IN_DEBT, # <-- IN DEBT + transaction_classification=CreditClassification.DEDUCT_SERVICE_RUN, + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + osparc_credits=-90, + service_run_id=_SERVICE_RUN_ID_4, + product_name=product_name, + payment_transaction_id=None, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.DEDUCT_SERVICE_RUN, + wallet_id=_WALLET_ID, + ), + # We will add 2 more wallets for paying a debt test + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + osparc_credits=50, # <-- Not enough credits to pay the DEBT of -100 + service_run_id=None, + product_name=product_name, + payment_transaction_id="INVITATION", + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.ADD_WALLET_TOP_UP, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + osparc_credits=500, # <-- Enough credits to pay the DEBT of -100 + service_run_id=None, + product_name=product_name, + transaction_status=CreditTransactionStatus.BILLED, + transaction_classification=CreditClassification.ADD_WALLET_TOP_UP, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS, + ), + ] + ) + .returning(resource_tracker_credit_transactions) + ) + row = result.first() + assert row + + yield + + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +async def test_get_project_wallet_total_credits( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + project_id: ProjectID, + product_name: ProductName, +): + output = await credit_transactions.get_project_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + project_id=project_id, + ) + assert isinstance(output, WalletTotalCredits) + assert output.available_osparc_credits == -220 + + output = await credit_transactions.get_project_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + project_id=project_id, + transaction_status=CreditTransactionStatus.IN_DEBT, + ) + assert isinstance(output, WalletTotalCredits) + assert output.available_osparc_credits == -100 + + +async def test_pay_project_debt( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + project_id: ProjectID, + product_name: ProductName, + faker: Faker, +): + total_wallet_credits_for_wallet_in_debt_in_beginning = ( + await credit_transactions.get_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + ) + ) + + output = await credit_transactions.get_project_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + project_id=project_id, + transaction_status=CreditTransactionStatus.IN_DEBT, + ) + assert isinstance(output, WalletTotalCredits) + assert output.available_osparc_credits == -100 + _project_debt_amount = output.available_osparc_credits + + # We test situation when new and current wallet transaction amount are not setup properly by the client (ex. webserver) + new_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS, + wallet_name="new wallet", + user_id=_USER_ID_1, + user_email=faker.email(), + osparc_credits=_project_debt_amount - 50, # <-- Negative number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID} to wallet {_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS}", + created_at=datetime.now(UTC), + ) + current_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID, + wallet_name="current wallet", + user_id=_USER_ID_1, + user_email=faker.email(), + osparc_credits=-_project_debt_amount, # <-- Positive number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS} to wallet {_WALLET_ID}", + created_at=datetime.now(UTC), + ) + + with pytest.raises(WalletTransactionError): + await credit_transactions.pay_project_debt( + rpc_client, + project_id=project_id, + current_wallet_transaction=current_wallet_transaction, + new_wallet_transaction=new_wallet_transaction, + ) + + # We test situation when the new wallet doesn't have enough credits to pay the debt + new_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS, + wallet_name="new wallet", + user_id=_USER_ID_1, + user_email="test@test.com", + osparc_credits=_project_debt_amount, # <-- Negative number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID} to wallet {_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS}", + created_at=datetime.now(UTC), + ) + current_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID, + wallet_name="current wallet", + user_id=_USER_ID_1, + user_email="test@test.com", + osparc_credits=-_project_debt_amount, # <-- Positive number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID_FOR_PAYING_DEBT__NOT_ENOUGH_CREDITS} to wallet {_WALLET_ID}", + created_at=datetime.now(UTC), + ) + + with pytest.raises(WalletTransactionError): + await credit_transactions.pay_project_debt( + rpc_client, + project_id=project_id, + current_wallet_transaction=current_wallet_transaction, + new_wallet_transaction=new_wallet_transaction, + ) + + # We test the proper situation, when new wallet pays the debt of the project + new_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS, + wallet_name="new wallet", + user_id=_USER_ID_1, + user_email="test@test.com", + osparc_credits=_project_debt_amount, # <-- Negative number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID} to wallet {_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS}", + created_at=datetime.now(UTC), + ) + current_wallet_transaction = CreditTransactionCreateBody( + product_name=product_name, + wallet_id=_WALLET_ID, + wallet_name="current wallet", + user_id=_USER_ID_1, + user_email="test@test.com", + osparc_credits=-_project_debt_amount, # <-- Positive number + payment_transaction_id=f"Payment transaction from wallet {_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS} to wallet {_WALLET_ID}", + created_at=datetime.now(UTC), + ) + + await credit_transactions.pay_project_debt( + rpc_client, + project_id=project_id, + current_wallet_transaction=current_wallet_transaction, + new_wallet_transaction=new_wallet_transaction, + ) + + # We additionaly check that the project is not in the DEBT anymore + output = await credit_transactions.get_project_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + project_id=project_id, + transaction_status=CreditTransactionStatus.IN_DEBT, + ) + assert isinstance(output, WalletTotalCredits) + assert output.available_osparc_credits == 0 + + # We check whether the credits were deducted from the new wallet + output = await credit_transactions.get_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID_FOR_PAYING_DEBT__ENOUGH_CREDITS, + ) + assert isinstance(output, WalletTotalCredits) + assert ( + output.available_osparc_credits + == 400 # <-- 100 was deduced from the new wallet + ) + + # We check whether the credits were added back to the original wallet + output = await credit_transactions.get_wallet_total_credits( + rpc_client, + product_name=product_name, + wallet_id=_WALLET_ID, + ) + assert isinstance(output, WalletTotalCredits) + assert ( + output.available_osparc_credits + == total_wallet_credits_for_wallet_in_debt_in_beginning.available_osparc_credits + + 100 # <-- 100 was added to the original wallet + ) + + +async def test_list_service_runs_with_transaction_status_filter( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + project_id: ProjectID, + product_name: ProductName, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID_1, + product_name=product_name, + wallet_id=_WALLET_ID, + access_all_wallet_usage=True, + project_id=project_id, + transaction_status=CreditTransactionStatus.PENDING, + offset=0, + limit=1, + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 1 + assert result.total == 1 + + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID_1, + product_name=product_name, + wallet_id=_WALLET_ID, + access_all_wallet_usage=True, + project_id=project_id, + transaction_status=CreditTransactionStatus.IN_DEBT, + offset=0, + limit=1, + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 1 + assert result.total == 1 + + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID_1, + product_name=product_name, + wallet_id=_WALLET_ID, + access_all_wallet_usage=True, + project_id=project_id, + transaction_status=CreditTransactionStatus.BILLED, + offset=0, + limit=1, + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 1 + assert result.total == 1 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_checkouts.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_checkouts.py new file mode 100644 index 00000000000..95c15a38652 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_checkouts.py @@ -0,0 +1,281 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments + + +from datetime import UTC, datetime, timedelta +from decimal import Decimal +from typing import Generator + +import pytest +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.licensed_items_checkouts import ( + LicensedItemCheckoutGet, + LicensedItemsCheckoutsPage, +) +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemsPurchasesCreate, +) +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import ( + licensed_items_checkouts, + licensed_items_purchases, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + NotEnoughAvailableSeatsError, +) +from simcore_postgres_database.models.resource_tracker_licensed_items_checkouts import ( + resource_tracker_licensed_items_checkouts, +) +from simcore_postgres_database.models.resource_tracker_licensed_items_purchases import ( + resource_tracker_licensed_items_purchases, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_USER_ID_1 = 1 +_WALLET_ID = 6 + + +@pytest.fixture() +def resource_tracker_service_run_id( + postgres_db: sa.engine.Engine, random_resource_tracker_service_run +) -> Generator[str, None, None]: + with postgres_db.connect() as con: + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID_1, wallet_id=_WALLET_ID + ) + ) + .returning(resource_tracker_service_runs.c.service_run_id) + ) + row = result.first() + assert row + + yield row[0] + + con.execute(resource_tracker_licensed_items_checkouts.delete()) + con.execute(resource_tracker_licensed_items_purchases.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +async def test_rpc_licensed_items_checkouts_workflow( + mocked_redis_server: None, + resource_tracker_service_run_id: str, + rpc_client: RabbitMQRPCClient, +): + # List licensed items checkouts + output = await licensed_items_checkouts.get_licensed_items_checkouts_page( + rpc_client, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + ) + assert output.total == 0 + assert output.items == [] + + # Purchase license item + _create_data = LicensedItemsPurchasesCreate( + product_name="osparc", + licensed_item_id="beb16d18-d57d-44aa-a638-9727fa4a72ef", + key="Duke", + version="1.0.0", + wallet_id=_WALLET_ID, + wallet_name="My Wallet", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + pricing_unit_cost=Decimal(10), + start_at=datetime.now(tz=UTC), + expire_at=datetime.now(tz=UTC) + timedelta(days=1), + num_of_seats=5, + purchased_by_user=_USER_ID_1, + user_email="test@test.com", + purchased_at=datetime.now(tz=UTC), + ) + created_item = await licensed_items_purchases.create_licensed_item_purchase( + rpc_client, data=_create_data + ) + + # Checkout with num of seats + checkout = await licensed_items_checkouts.checkout_licensed_item( + rpc_client, + licensed_item_id=created_item.licensed_item_id, + key=created_item.key, + version=created_item.version, + wallet_id=_WALLET_ID, + product_name="osparc", + num_of_seats=3, + service_run_id=resource_tracker_service_run_id, + user_id=_USER_ID_1, + user_email="test@test.com", + ) + + # List licensed items checkouts + output = await licensed_items_checkouts.get_licensed_items_checkouts_page( + rpc_client, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + ) + assert output.total == 1 + assert isinstance(output, LicensedItemsCheckoutsPage) + + # Get licensed items checkouts + output = await licensed_items_checkouts.get_licensed_item_checkout( + rpc_client, + product_name="osparc", + licensed_item_checkout_id=output.items[0].licensed_item_checkout_id, + ) + assert isinstance(output, LicensedItemCheckoutGet) + + # Release num of seats + license_item_checkout = await licensed_items_checkouts.release_licensed_item( + rpc_client, + licensed_item_checkout_id=checkout.licensed_item_checkout_id, + product_name="osparc", + ) + assert license_item_checkout + assert isinstance(license_item_checkout.stopped_at, datetime) + + +async def test_rpc_licensed_items_checkouts_can_checkout_older_version( + mocked_redis_server: None, + resource_tracker_service_run_id: str, + rpc_client: RabbitMQRPCClient, +): + # List licensed items checkouts + output = await licensed_items_checkouts.get_licensed_items_checkouts_page( + rpc_client, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + ) + assert output.total == 0 + assert output.items == [] + + # Purchase license item + _create_data = LicensedItemsPurchasesCreate( + product_name="osparc", + licensed_item_id="beb16d18-d57d-44aa-a638-9727fa4a72ef", + key="Duke", + version="2.0.0", + wallet_id=_WALLET_ID, + wallet_name="My Wallet", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + pricing_unit_cost=Decimal(10), + start_at=datetime.now(tz=UTC), + expire_at=datetime.now(tz=UTC) + timedelta(days=1), + num_of_seats=5, + purchased_by_user=_USER_ID_1, + user_email="test@test.com", + purchased_at=datetime.now(tz=UTC), + ) + created_item = await licensed_items_purchases.create_licensed_item_purchase( + rpc_client, data=_create_data + ) + + # Checkout with num of seats + checkout = await licensed_items_checkouts.checkout_licensed_item( + rpc_client, + licensed_item_id=created_item.licensed_item_id, + key="Duke", + version="1.0.0", # <-- Older version + wallet_id=_WALLET_ID, + product_name="osparc", + num_of_seats=3, + service_run_id=resource_tracker_service_run_id, + user_id=_USER_ID_1, + user_email="test@test.com", + ) + + # List licensed items checkouts + output = await licensed_items_checkouts.get_licensed_items_checkouts_page( + rpc_client, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + ) + assert output.total == 1 + assert isinstance(output, LicensedItemsCheckoutsPage) + + # Get licensed items checkouts + output = await licensed_items_checkouts.get_licensed_item_checkout( + rpc_client, + product_name="osparc", + licensed_item_checkout_id=output.items[0].licensed_item_checkout_id, + ) + assert isinstance(output, LicensedItemCheckoutGet) + + # Release num of seats + license_item_checkout = await licensed_items_checkouts.release_licensed_item( + rpc_client, + licensed_item_checkout_id=checkout.licensed_item_checkout_id, + product_name="osparc", + ) + assert license_item_checkout + assert isinstance(license_item_checkout.stopped_at, datetime) + + +async def test_rpc_licensed_items_checkouts_can_not_checkout_newer_version( + mocked_redis_server: None, + resource_tracker_service_run_id: str, + rpc_client: RabbitMQRPCClient, +): + # List licensed items checkouts + output = await licensed_items_checkouts.get_licensed_items_checkouts_page( + rpc_client, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + ) + assert output.total == 0 + assert output.items == [] + + # Purchase license item + _create_data = LicensedItemsPurchasesCreate( + product_name="osparc", + licensed_item_id="beb16d18-d57d-44aa-a638-9727fa4a72ef", + key="Duke", + version="2.0.0", # <-- Older version + wallet_id=_WALLET_ID, + wallet_name="My Wallet", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + pricing_unit_cost=Decimal(10), + start_at=datetime.now(tz=UTC), + expire_at=datetime.now(tz=UTC) + timedelta(days=1), + num_of_seats=5, + purchased_by_user=_USER_ID_1, + user_email="test@test.com", + purchased_at=datetime.now(tz=UTC), + ) + created_item = await licensed_items_purchases.create_licensed_item_purchase( + rpc_client, data=_create_data + ) + + # Checkout with num of seats + with pytest.raises(NotEnoughAvailableSeatsError): + await licensed_items_checkouts.checkout_licensed_item( + rpc_client, + licensed_item_id=created_item.licensed_item_id, + key="Duke", + version="3.0.0", # <-- Newer version + wallet_id=_WALLET_ID, + product_name="osparc", + num_of_seats=3, + service_run_id=resource_tracker_service_run_id, + user_id=_USER_ID_1, + user_email="test@test.com", + ) diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_purchases.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_purchases.py new file mode 100644 index 00000000000..bead8f804b3 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_licensed_items_purchases.py @@ -0,0 +1,80 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments + +from datetime import UTC, datetime +from decimal import Decimal + +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.licensed_items_purchases import ( + LicensedItemPurchaseGet, + LicensedItemsPurchasesPage, +) +from models_library.resource_tracker_licensed_items_purchases import ( + LicensedItemsPurchasesCreate, +) +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import ( + licensed_items_purchases, +) + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_rpc_licensed_items_purchases_workflow( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + rpc_client: RabbitMQRPCClient, +): + result = await licensed_items_purchases.get_licensed_items_purchases_page( + rpc_client, product_name="osparc", wallet_id=1 + ) + assert isinstance(result, LicensedItemsPurchasesPage) # nosec + assert result.items == [] + assert result.total == 0 + + _create_data = LicensedItemsPurchasesCreate( + product_name="osparc", + licensed_item_id="beb16d18-d57d-44aa-a638-9727fa4a72ef", + key="Duke", + version="1.0.0", + wallet_id=1, + wallet_name="My Wallet", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + pricing_unit_cost=Decimal(10), + start_at=datetime.now(tz=UTC), + expire_at=datetime.now(tz=UTC), + num_of_seats=1, + purchased_by_user=1, + user_email="test@test.com", + purchased_at=datetime.now(tz=UTC), + ) + + created_item = await licensed_items_purchases.create_licensed_item_purchase( + rpc_client, data=_create_data + ) + assert isinstance(created_item, LicensedItemPurchaseGet) # nosec + + result = await licensed_items_purchases.get_licensed_item_purchase( + rpc_client, + product_name="osparc", + licensed_item_purchase_id=created_item.licensed_item_purchase_id, + ) + assert isinstance(result, LicensedItemPurchaseGet) # nosec + assert result.licensed_item_purchase_id == created_item.licensed_item_purchase_id + + result = await licensed_items_purchases.get_licensed_items_purchases_page( + rpc_client, product_name="osparc", wallet_id=_create_data.wallet_id + ) + assert isinstance(result, LicensedItemsPurchasesPage) # nosec + assert len(result.items) == 1 + assert result.total == 1 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans.py new file mode 100644 index 00000000000..f26046cf738 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans.py @@ -0,0 +1,284 @@ +# pylint: disable=no-value-for-parameter +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=too-many-arguments +# pylint: disable=unused-argument +# pylint: disable=unused-variable + + +from collections.abc import Iterator +from datetime import datetime, timezone +from decimal import Decimal +from unittest import mock + +import httpx +import pytest +import sqlalchemy as sa +from models_library.resource_tracker import UnitExtraInfoTier +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.models.services import services_meta_data +from starlette import status +from yarl import URL + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + +_SERVICE_KEY = "simcore/services/comp/itis/isolve" +_SERVICE_VERSION = "1.0.16" +_PRICING_PLAN_ID = 1 + +_PRICING_UNIT_ID = 2 + +_SERVICE_KEY_2 = "simcore/services/comp/itis/sleeper" +_SERVICE_VERSION_2 = "2.10.1" +_PRICING_PLAN_ID_2 = 2 + + +@pytest.fixture() +def resource_tracker_pricing_tables_db(postgres_db: sa.engine.Engine) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute( + resource_tracker_pricing_plans.insert().values( + product_name="osparc", + display_name="ISolve Thermal", + description="", + classification="TIER", + is_active=True, + pricing_plan_key="isolve-thermal", + ) + ) + con.execute( + resource_tracker_pricing_plans.insert().values( + product_name="osparc", + display_name="Sleeper", + description="", + classification="TIER", + is_active=True, + pricing_plan_key="sleeper", + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + unit_name="S", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ), + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + pricing_plan_key="isolve-thermal", + pricing_unit_id=1, + pricing_unit_name="S", + cost_per_unit=Decimal("5"), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + unit_name="M", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ), + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + pricing_plan_key="isolve-thermal", + pricing_unit_id=2, + pricing_unit_name="M", + cost_per_unit=Decimal("15.6"), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + unit_name="L", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ), + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + pricing_plan_key="isolve-thermal", + pricing_unit_id=3, + pricing_unit_name="L", + cost_per_unit=Decimal("17.7"), + valid_from=datetime.now(tz=timezone.utc), + valid_to=datetime.now(tz=timezone.utc), + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + pricing_plan_key="isolve-thermal", + pricing_unit_id=3, + pricing_unit_name="L", + cost_per_unit=Decimal("28.9"), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=_PRICING_PLAN_ID_2, + unit_name="XXL", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ), + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=_PRICING_PLAN_ID_2, + pricing_plan_key="sleeper", + pricing_unit_id=4, + pricing_unit_name="XXL", + cost_per_unit=Decimal("68"), + valid_from=datetime.now(tz=timezone.utc), + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + + con.execute( + services_meta_data.insert().values( + key=_SERVICE_KEY, + version=_SERVICE_VERSION, + name="name", + description="description", + ) + ) + con.execute( + resource_tracker_pricing_plan_to_service.insert().values( + pricing_plan_id=_PRICING_PLAN_ID, + service_key=_SERVICE_KEY, + service_version=_SERVICE_VERSION, + service_default_plan=True, + ) + ) + + con.execute( + services_meta_data.insert().values( + key=_SERVICE_KEY_2, + version=_SERVICE_VERSION_2, + name="name", + description="description", + ) + ) + con.execute( + resource_tracker_pricing_plan_to_service.insert().values( + pricing_plan_id=_PRICING_PLAN_ID_2, + service_key=_SERVICE_KEY_2, + service_version=_SERVICE_VERSION_2, + service_default_plan=True, + ) + ) + + yield + + con.execute(resource_tracker_pricing_plan_to_service.delete()) + con.execute(resource_tracker_pricing_units.delete()) + con.execute(resource_tracker_pricing_plans.delete()) + con.execute(resource_tracker_pricing_unit_costs.delete()) + con.execute(services_meta_data.delete()) + + +async def test_get_default_pricing_plan_for_service( + mocked_redis_server: None, + mocked_setup_rabbitmq: mock.Mock, + postgres_db: sa.engine.Engine, + resource_tracker_pricing_tables_db: None, + async_client: httpx.AsyncClient, +): + url = URL(f"/v1/services/{_SERVICE_KEY}/{_SERVICE_VERSION}/pricing-plan") + response = await async_client.get(f'{url.with_query({"product_name": "osparc"})}') + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + assert len(data["pricing_units"]) == 3 + assert data["pricing_units"][0]["unit_name"] == "S" + assert data["pricing_units"][1]["unit_name"] == "M" + assert data["pricing_units"][2]["unit_name"] == "L" + + url = URL(f"/v1/pricing-plans/{_PRICING_PLAN_ID}/pricing-units/{_PRICING_UNIT_ID}") + response = await async_client.get(f'{url.with_query({"product_name": "osparc"})}') + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + assert data["pricing_unit_id"] == _PRICING_UNIT_ID + + url = URL(f"/v1/services/{_SERVICE_KEY_2}/{_SERVICE_VERSION_2}/pricing-plan") + response = await async_client.get(f'{url.with_query({"product_name": "osparc"})}') + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + assert len(data["pricing_units"]) == 1 + assert data["pricing_units"][0]["unit_name"] == "XXL" + + bigger_version = "3.10.5" + url = URL(f"/v1/services/{_SERVICE_KEY_2}/{bigger_version}/pricing-plan") + response = await async_client.get(f'{url.with_query({"product_name": "osparc"})}') + assert response.status_code == status.HTTP_200_OK + data = response.json() + assert data + assert len(data["pricing_units"]) == 1 + assert data["pricing_units"][0]["unit_name"] == "XXL" + + smaller_verion = "1.0.0" + url = URL(f"/v1/services/{_SERVICE_KEY_2}/{smaller_verion}/pricing-plan") + response = await async_client.get(f'{url.with_query({"product_name": "osparc"})}') + assert response.status_code == status.HTTP_404_NOT_FOUND diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans_rpc.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans_rpc.py new file mode 100644 index 00000000000..59087705de0 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_pricing_plans_rpc.py @@ -0,0 +1,572 @@ +from collections.abc import Iterator +from decimal import Decimal + +import pytest +import sqlalchemy as sa +from faker import Faker +from models_library.api_schemas_resource_usage_tracker.pricing_plans import ( + PricingPlanToServiceGet, + RutPricingPlanGet, + RutPricingPlanPage, + RutPricingUnitGet, +) +from models_library.resource_tracker import ( + PricingPlanClassification, + PricingPlanCreate, + PricingPlanUpdate, + PricingUnitCostUpdate, + PricingUnitWithCostCreate, + PricingUnitWithCostUpdate, + SpecificInfo, + UnitExtraInfoLicense, + UnitExtraInfoTier, +) +from models_library.services import ServiceKey, ServiceVersion +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import ( + pricing_plans, + pricing_units, +) +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker.errors import ( + PricingUnitDuplicationError, +) +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.models.services import services_meta_data + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_SERVICE_KEY = "simcore/services/comp/itis/sleeper" +_SERVICE_VERSION_1 = "2.0.2" +_SERVICE_VERSION_2 = "3.0.0" + +_SERVICE_KEY_3 = "simcore/services/comp/itis/different-service" +_SERVICE_VERSION_3 = "1.0.1" + + +@pytest.fixture() +def resource_tracker_setup_db( + postgres_db: sa.engine.Engine, +) -> Iterator[None]: + with postgres_db.connect() as con: + + con.execute( + services_meta_data.insert().values( + key=_SERVICE_KEY, + version=_SERVICE_VERSION_1, + name="name", + description="description", + ) + ) + con.execute( + services_meta_data.insert().values( + key=_SERVICE_KEY, + version=_SERVICE_VERSION_2, + name="name", + description="description", + ) + ) + con.execute( + services_meta_data.insert().values( + key=_SERVICE_KEY_3, + version=_SERVICE_VERSION_3, + name="name", + description="description", + ) + ) + + yield + + con.execute(resource_tracker_pricing_unit_costs.delete()) + con.execute(resource_tracker_pricing_units.delete()) + con.execute(resource_tracker_pricing_plan_to_service.delete()) + con.execute(resource_tracker_pricing_plans.delete()) + con.execute(services_meta_data.delete()) + + +async def test_rpc_pricing_plans_workflow( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + faker: Faker, +): + _display_name = faker.word() + result = await pricing_plans.create_pricing_plan( + rpc_client, + data=PricingPlanCreate( + product_name="osparc", + display_name=_display_name, + description=faker.sentence(), + classification=PricingPlanClassification.TIER, + pricing_plan_key=faker.word(), + ), + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units == [] + assert result.display_name == _display_name + _pricing_plan_id = result.pricing_plan_id + + _update_display_name = "display name updated" + _update_description = "description name updated" + result = await pricing_plans.update_pricing_plan( + rpc_client, + product_name="osparc", + data=PricingPlanUpdate( + pricing_plan_id=_pricing_plan_id, + display_name=_update_display_name, + description=_update_description, + is_active=True, + ), + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units == [] + assert result.display_name == _update_display_name + assert result.description == _update_description + + result = await pricing_plans.get_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units == [] + assert result.display_name == _update_display_name + assert result.description == _update_description + assert result.is_active is True + + result = await pricing_plans.list_pricing_plans_without_pricing_units( + rpc_client, + product_name="osparc", + ) + assert isinstance(result, RutPricingPlanPage) + assert result.total == 1 + assert len(result.items) == 1 + assert isinstance(result.items[0], RutPricingPlanGet) + assert result.items[0].pricing_units is None + + # Now I will deactivate the pricing plan + result = await pricing_plans.update_pricing_plan( + rpc_client, + product_name="osparc", + data=PricingPlanUpdate( + pricing_plan_id=_pricing_plan_id, + display_name=faker.word(), + description=faker.sentence(), + is_active=False, # <-- deactivate + ), + ) + assert isinstance(result, RutPricingPlanGet) + assert result.is_active is False + + +async def test_rpc_pricing_plans_with_units_workflow( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + faker: Faker, +): + _display_name = faker.word() + result = await pricing_plans.create_pricing_plan( + rpc_client, + data=PricingPlanCreate( + product_name="osparc", + display_name=_display_name, + description=faker.sentence(), + classification=PricingPlanClassification.TIER, + pricing_plan_key=faker.word(), + ), + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units == [] + assert result.display_name == _display_name + _pricing_plan_id = result.pricing_plan_id + + result = await pricing_units.create_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostCreate( + pricing_plan_id=_pricing_plan_id, + unit_name="SMALL", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + cost_per_unit=Decimal(10), + comment=faker.sentence(), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result + _first_pricing_unit_id = result.pricing_unit_id + _current_cost_per_unit_id = result.current_cost_per_unit_id + + with pytest.raises(PricingUnitDuplicationError): + await pricing_units.create_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostCreate( + pricing_plan_id=_pricing_plan_id, + unit_name="SMALL", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + cost_per_unit=Decimal(10), + comment=faker.sentence(), + ), + ) + + # Get pricing plan + result = await pricing_plans.get_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units + assert len(result.pricing_units) == 1 + assert result.pricing_units[0].pricing_unit_id == _first_pricing_unit_id + + # Update only pricing unit info with COST update + _unit_name = "VERY SMALL" + result = await pricing_units.update_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostUpdate( + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + unit_name=_unit_name, + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + pricing_unit_cost_update=None, + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result.unit_name == _unit_name + assert result.current_cost_per_unit == Decimal(10) + assert result.current_cost_per_unit_id == _current_cost_per_unit_id + + # Update pricing unit with COST update! + result = await pricing_units.update_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostUpdate( + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + unit_name="MEDIUM", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + pricing_unit_cost_update=PricingUnitCostUpdate( + cost_per_unit=Decimal(15), + comment="Comment update", + ), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result.unit_name == "MEDIUM" + assert result.current_cost_per_unit == Decimal(15) + assert result.current_cost_per_unit_id != _current_cost_per_unit_id + + # Test get pricing unit + result = await pricing_units.get_pricing_unit( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + ) + assert isinstance(result, RutPricingUnitGet) + assert result.current_cost_per_unit == Decimal(15) + + # Create one more unit + result = await pricing_units.create_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostCreate( + pricing_plan_id=_pricing_plan_id, + unit_name="LARGE", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info=SpecificInfo(aws_ec2_instances=[]), + cost_per_unit=Decimal(20), + comment=faker.sentence(), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result + _second_pricing_unit_id = result.pricing_unit_id + + # Get pricing plan with units + result = await pricing_plans.get_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units + assert len(result.pricing_units) == 2 + assert result.pricing_units[0].pricing_unit_id == _first_pricing_unit_id + assert result.pricing_units[1].pricing_unit_id == _second_pricing_unit_id + + +async def test_rpc_pricing_plans_to_service_workflow( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + faker: Faker, +): + result = await pricing_plans.create_pricing_plan( + rpc_client, + data=PricingPlanCreate( + product_name="osparc", + display_name=faker.word(), + description=faker.sentence(), + classification=PricingPlanClassification.TIER, + pricing_plan_key=faker.word(), + ), + ) + assert isinstance(result, RutPricingPlanGet) + _pricing_plan_id = result.pricing_plan_id + + result = ( + await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + ) + assert isinstance(result, list) + assert result == [] + + _first_service_version = ServiceVersion(_SERVICE_VERSION_1) + result = await pricing_plans.connect_service_to_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + service_key=ServiceKey(_SERVICE_KEY), + service_version=_first_service_version, + ) + assert isinstance(result, PricingPlanToServiceGet) + assert result.pricing_plan_id == _pricing_plan_id + assert result.service_version == _first_service_version + + result = ( + await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + ) + assert isinstance(result, list) + assert len(result) == 1 + + # Connect different version + _second_service_version = ServiceVersion(_SERVICE_VERSION_2) + result = await pricing_plans.connect_service_to_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + service_key=ServiceKey(_SERVICE_KEY), + service_version=_second_service_version, + ) + assert isinstance(result, PricingPlanToServiceGet) + assert result.pricing_plan_id == _pricing_plan_id + assert result.service_version == _second_service_version + + result = ( + await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + ) + assert isinstance(result, list) + assert len(result) == 2 + + # Connect different service + _different_service_key = ServiceKey(_SERVICE_KEY_3) + result = await pricing_plans.connect_service_to_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + service_key=_different_service_key, + service_version=ServiceVersion(_SERVICE_VERSION_3), + ) + assert isinstance(result, PricingPlanToServiceGet) + assert result.pricing_plan_id == _pricing_plan_id + assert result.service_key == _different_service_key + + result = ( + await pricing_plans.list_connected_services_to_pricing_plan_by_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + ) + assert isinstance(result, list) + assert len(result) == 3 + + +async def test_rpc_pricing_plans_with_units_workflow__for_licenses( + mocked_redis_server: None, + resource_tracker_setup_db: None, + rpc_client: RabbitMQRPCClient, + faker: Faker, +): + _display_name = faker.word() + result = await pricing_plans.create_pricing_plan( + rpc_client, + data=PricingPlanCreate( + product_name="osparc", + display_name=_display_name, + description=faker.sentence(), + classification=PricingPlanClassification.LICENSE, + pricing_plan_key=faker.word(), + ), + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units == [] + assert result.display_name == _display_name + _pricing_plan_id = result.pricing_plan_id + + result = await pricing_units.create_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostCreate( + pricing_plan_id=_pricing_plan_id, + unit_name="VIP MODEL", + unit_extra_info=UnitExtraInfoLicense.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + cost_per_unit=Decimal(10), + comment=faker.sentence(), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result + _first_pricing_unit_id = result.pricing_unit_id + _current_cost_per_unit_id = result.current_cost_per_unit_id + + # Get pricing plan + result = await pricing_plans.get_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units + assert len(result.pricing_units) == 1 + assert result.pricing_units[0].pricing_unit_id == _first_pricing_unit_id + + # Update only pricing unit info with COST update + _unit_name = "1 seat" + result = await pricing_units.update_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostUpdate( + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + unit_name=_unit_name, + unit_extra_info=UnitExtraInfoLicense.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + pricing_unit_cost_update=None, + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result.unit_name == _unit_name + assert result.current_cost_per_unit == Decimal(10) + assert result.current_cost_per_unit_id == _current_cost_per_unit_id + + # Update pricing unit with COST update! + result = await pricing_units.update_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostUpdate( + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + unit_name=_unit_name, + unit_extra_info=UnitExtraInfoLicense.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info=SpecificInfo(aws_ec2_instances=[]), + pricing_unit_cost_update=PricingUnitCostUpdate( + cost_per_unit=Decimal(15), + comment="Comment update", + ), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result.unit_name == _unit_name + assert result.current_cost_per_unit == Decimal(15) + assert result.current_cost_per_unit_id != _current_cost_per_unit_id + + # Test get pricing unit + result = await pricing_units.get_pricing_unit( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + pricing_unit_id=_first_pricing_unit_id, + ) + assert isinstance(result, RutPricingUnitGet) + assert result.current_cost_per_unit == Decimal(15) + + # Create one more unit + result = await pricing_units.create_pricing_unit( + rpc_client, + product_name="osparc", + data=PricingUnitWithCostCreate( + pricing_plan_id=_pricing_plan_id, + unit_name="5 seats", + unit_extra_info=UnitExtraInfoLicense.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info=SpecificInfo(aws_ec2_instances=[]), + cost_per_unit=Decimal(20), + comment=faker.sentence(), + ), + ) + assert isinstance(result, RutPricingUnitGet) + assert result + _second_pricing_unit_id = result.pricing_unit_id + + # Get pricing plan with units + result = await pricing_plans.get_pricing_plan( + rpc_client, + product_name="osparc", + pricing_plan_id=_pricing_plan_id, + ) + assert isinstance(result, RutPricingPlanGet) + assert result.pricing_units + assert len(result.pricing_units) == 2 + assert result.pricing_units[0].pricing_unit_id == _first_pricing_unit_id + assert result.pricing_units[1].pricing_unit_id == _second_pricing_unit_id diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__export.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__export.py new file mode 100644 index 00000000000..44a6ce56016 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__export.py @@ -0,0 +1,80 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments + +import os +from unittest.mock import AsyncMock, Mock + +import pytest +import sqlalchemy as sa +from moto.server import ThreadedMotoServer +from pydantic import AnyUrl, TypeAdapter +from pytest_mock import MockerFixture +from pytest_simcore.helpers.typing_env import EnvVarsDict +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import service_runs +from settings_library.s3 import S3Settings +from types_aiobotocore_s3 import S3Client + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + +_USER_ID = 1 + + +@pytest.fixture +async def mocked_export(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_resource_usage_tracker.services.service_runs.service_runs_db.export_service_runs_table_to_s3", + autospec=True, + ) + + +@pytest.fixture +async def mocked_presigned_link(mocker: MockerFixture) -> AsyncMock: + return mocker.patch( + "simcore_service_resource_usage_tracker.services.service_runs.SimcoreS3API.create_single_presigned_download_link", + return_value=TypeAdapter(AnyUrl).validate_python("https://www.testing.com/"), + ) + + +@pytest.fixture +async def enable_resource_usage_tracker_s3( + mock_env: EnvVarsDict, + mocked_aws_server: ThreadedMotoServer, + mocked_s3_server_envs: EnvVarsDict, + mocked_s3_server_settings: S3Settings, + s3_client: S3Client, + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Create bucket + await s3_client.create_bucket(Bucket=mocked_s3_server_settings.S3_BUCKET_NAME) + + # Remove the environment variable + if "RESOURCE_USAGE_TRACKER_S3" in os.environ: + monkeypatch.delenv("RESOURCE_USAGE_TRACKER_S3") + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_which_was_billed( + enable_resource_usage_tracker_s3: None, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + rpc_client: RabbitMQRPCClient, + mocked_export: Mock, + mocked_presigned_link: Mock, +): + download_url = await service_runs.export_service_runs( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + ) + assert isinstance(download_url, AnyUrl) # nosec + assert mocked_export.called + assert mocked_presigned_link.called diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_aggregated_usages.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_aggregated_usages.py new file mode 100644 index 00000000000..eea94827a44 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_aggregated_usages.py @@ -0,0 +1,238 @@ +from collections.abc import Iterator +from datetime import datetime, timedelta, timezone + +import pytest +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + OsparcCreditsAggregatedUsagesPage, +) +from models_library.resource_tracker import ( + ServicesAggregatedUsagesTimePeriod, + ServicesAggregatedUsagesType, +) +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import service_runs +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_USER_ID_1 = 1 +_USER_ID_2 = 2 +_SERVICE_RUN_ID_1 = "1" +_SERVICE_RUN_ID_2 = "2" +_SERVICE_RUN_ID_3 = "3" +_SERVICE_RUN_ID_4 = "4" +_SERVICE_RUN_ID_5 = "5" +_SERVICE_RUN_ID_6 = "6" +_WALLET_ID = 6 + + +@pytest.fixture() +def resource_tracker_setup_db( + postgres_db: sa.engine.Engine, + random_resource_tracker_service_run, + random_resource_tracker_credit_transactions, +) -> Iterator[None]: + with postgres_db.connect() as con: + # Service run table + result = con.execute( + resource_tracker_service_runs.insert() + .values( + [ + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_1, + started_at=datetime.now(tz=timezone.utc) - timedelta(hours=1), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/jupyter-smash", + ), + random_resource_tracker_service_run( + user_id=_USER_ID_2, + service_run_id=_SERVICE_RUN_ID_2, + started_at=datetime.now(tz=timezone.utc) - timedelta(hours=1), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/jupyter-smash", + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_3, + started_at=datetime.now(tz=timezone.utc) - timedelta(hours=1), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/jupyter-smash", + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_4, + started_at=datetime.now(tz=timezone.utc) - timedelta(hours=1), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/jupyter-smash", + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_5, + started_at=datetime.now(tz=timezone.utc) - timedelta(days=3), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/jupyter-smash", + ), + random_resource_tracker_service_run( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_6, + started_at=datetime.now(tz=timezone.utc) - timedelta(days=10), + stopped_at=datetime.now(tz=timezone.utc), + service_run_status="SUCCESS", + service_key="simcore/services/dynamic/sim4life", + ), + ] + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + + # Transaction table + result = con.execute( + resource_tracker_credit_transactions.insert() + .values( + [ + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_1, + product_name="osparc", + transaction_status="BILLED", + transaction_classification="DEDUCT_SERVICE_RUN", + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_2, + service_run_id=_SERVICE_RUN_ID_2, + product_name="osparc", + transaction_status="BILLED", + transaction_classification="DEDUCT_SERVICE_RUN", + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_4, + product_name="osparc", + transaction_status="BILLED", + transaction_classification="DEDUCT_SERVICE_RUN", + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_5, + product_name="osparc", + transaction_status="BILLED", + transaction_classification="DEDUCT_SERVICE_RUN", + wallet_id=_WALLET_ID, + ), + random_resource_tracker_credit_transactions( + user_id=_USER_ID_1, + service_run_id=_SERVICE_RUN_ID_6, + product_name="osparc", + transaction_status="BILLED", + transaction_classification="DEDUCT_SERVICE_RUN", + wallet_id=_WALLET_ID, + ), + ] + ) + .returning(resource_tracker_credit_transactions) + ) + row = result.first() + assert row + + yield + + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +async def test_rpc_get_osparc_credits_aggregated_usages_page( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + rpc_client: RabbitMQRPCClient, + resource_tracker_setup_db: dict, +): + result = await service_runs.get_osparc_credits_aggregated_usages_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + aggregated_by=ServicesAggregatedUsagesType.services, + time_period=ServicesAggregatedUsagesTimePeriod.ONE_DAY, + wallet_id=123, # <-- testing non existing wallet + access_all_wallet_usage=False, + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) + assert len(result.items) == 0 + assert result.total == 0 + + result = await service_runs.get_osparc_credits_aggregated_usages_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + aggregated_by=ServicesAggregatedUsagesType.services, + time_period=ServicesAggregatedUsagesTimePeriod.ONE_DAY, # <-- testing + wallet_id=_WALLET_ID, + access_all_wallet_usage=False, + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) + assert len(result.items) == 1 + assert result.total == 1 + first_osparc_credits = result.items[0].osparc_credits + + result = await service_runs.get_osparc_credits_aggregated_usages_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + aggregated_by=ServicesAggregatedUsagesType.services, + time_period=ServicesAggregatedUsagesTimePeriod.ONE_DAY, + wallet_id=_WALLET_ID, + access_all_wallet_usage=True, # <-- testing + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) + assert len(result.items) == 1 + assert result.total == 1 + second_osparc_credits = result.items[0].osparc_credits + assert second_osparc_credits < first_osparc_credits + + result = await service_runs.get_osparc_credits_aggregated_usages_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + aggregated_by=ServicesAggregatedUsagesType.services, + time_period=ServicesAggregatedUsagesTimePeriod.ONE_WEEK, # <-- testing + wallet_id=_WALLET_ID, + access_all_wallet_usage=False, + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) + assert len(result.items) == 1 + assert result.total == 1 + third_osparc_credits = result.items[0].osparc_credits + assert third_osparc_credits < first_osparc_credits + + result = await service_runs.get_osparc_credits_aggregated_usages_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + aggregated_by=ServicesAggregatedUsagesType.services, + time_period=ServicesAggregatedUsagesTimePeriod.ONE_MONTH, # <-- testing + wallet_id=_WALLET_ID, + access_all_wallet_usage=False, + ) + assert isinstance(result, OsparcCreditsAggregatedUsagesPage) + assert len(result.items) == 2 + assert result.total == 2 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_billable.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_billable.py new file mode 100644 index 00000000000..16a95eeb8de --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_billable.py @@ -0,0 +1,212 @@ +from collections.abc import Iterator +from datetime import datetime, timedelta, timezone +from decimal import Decimal + +import pytest +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + ServiceRunPage, +) +from models_library.resource_tracker import ( + CreditTransactionStatus, + ServiceResourceUsagesFilters, + StartedAt, +) +from models_library.rest_ordering import OrderBy, OrderDirection +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq._errors import RPCServerError +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import ( + credit_transactions, + service_runs, +) +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + +_USER_ID = 1 +_SERVICE_RUN_ID_1 = "12345" +_SERVICE_RUN_ID_2 = "54321" + + +@pytest.fixture() +def resource_tracker_setup_db( + postgres_db: sa.engine.Engine, + random_resource_tracker_service_run, + random_resource_tracker_credit_transactions, +) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute(resource_tracker_service_runs.delete()) + con.execute(resource_tracker_credit_transactions.delete()) + # Service run table + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID, + service_run_id=_SERVICE_RUN_ID_1, + product_name="osparc", + started_at=datetime.now(tz=timezone.utc), + ) + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID, + service_run_id=_SERVICE_RUN_ID_2, + product_name="osparc", + started_at=datetime.now(tz=timezone.utc) - timedelta(days=1), + ) + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + + # Transaction table + result = con.execute( + resource_tracker_credit_transactions.insert() + .values( + **random_resource_tracker_credit_transactions( + user_id=_USER_ID, + service_run_id=_SERVICE_RUN_ID_1, + product_name="osparc", + ) + ) + .returning(resource_tracker_credit_transactions) + ) + row = result.first() + assert row + + yield + + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_which_was_billed( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_setup_db: dict, + rpc_client: RabbitMQRPCClient, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + ) + assert isinstance(result, ServiceRunPage) + + assert len(result.items) == 2 + assert result.total == 2 + _get_credit_cost = result.items[0].credit_cost + assert _get_credit_cost + assert _get_credit_cost < 0 + assert result.items[0].transaction_status in list(CreditTransactionStatus) + _get_service_run_id = result.items[0].service_run_id + + result = ( + await credit_transactions.get_transaction_current_credits_by_service_run_id( + rpc_client, + service_run_id=_get_service_run_id, + ) + ) + assert isinstance(result, Decimal) + assert result == _get_credit_cost + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_with_filtered_by__started_at( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_setup_db: dict, + rpc_client: RabbitMQRPCClient, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + filters=ServiceResourceUsagesFilters( + started_at=StartedAt( + from_=datetime.now(timezone.utc) + timedelta(days=1), + until=datetime.now(timezone.utc) + timedelta(days=1), + ) + ), + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 0 + assert result.total == 0 + + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + filters=ServiceResourceUsagesFilters( + started_at=StartedAt( + from_=datetime.now(timezone.utc), + until=datetime.now(timezone.utc), + ) + ), + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 1 + assert result.total == 1 + + +@pytest.mark.parametrize( + "direction,service_run_id", + [(OrderDirection.DESC, _SERVICE_RUN_ID_1), (OrderDirection.ASC, _SERVICE_RUN_ID_2)], +) +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_with_order_by__started_at( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_setup_db: dict, + rpc_client: RabbitMQRPCClient, + direction: OrderDirection, + service_run_id: str, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + order_by=OrderBy(field="started_at", direction=direction), + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 2 + assert result.total == 2 + + assert result.items[0].service_run_id == service_run_id + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_raising_custom_error( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_setup_db: dict, + rpc_client: RabbitMQRPCClient, +): + with pytest.raises(RPCServerError) as e: + await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + access_all_wallet_usage=True, + ) + assert e diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_with_wallet.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_with_wallet.py new file mode 100644 index 00000000000..2c43f986e4f --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_with_wallet.py @@ -0,0 +1,93 @@ +from collections.abc import Iterator + +import pytest +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + ServiceRunPage, +) +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import service_runs +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_TOTAL_GENERATED_RESOURCE_TRACKER_SERVICE_RUNS_ROWS = 10 +_USER_ID_1 = 1 +_USER_ID_2 = 2 +_WALLET_ID = 6 + + +@pytest.fixture() +def resource_tracker_service_run_db( + postgres_db: sa.engine.Engine, random_resource_tracker_service_run +) -> Iterator[list]: + with postgres_db.connect() as con: + con.execute(resource_tracker_service_runs.delete()) + created_services = [] + for _ in range(_TOTAL_GENERATED_RESOURCE_TRACKER_SERVICE_RUNS_ROWS): + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID_1, wallet_id=_WALLET_ID + ) + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + created_services.append(row) + + for _ in range(_TOTAL_GENERATED_RESOURCE_TRACKER_SERVICE_RUNS_ROWS): + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID_2, wallet_id=_WALLET_ID + ) + ) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + created_services.append(row) + + yield created_services + + con.execute(resource_tracker_service_runs.delete()) + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_with_wallet( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_service_run_db: dict, + rpc_client: RabbitMQRPCClient, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + wallet_id=_WALLET_ID, + access_all_wallet_usage=False, + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 10 + assert result.total == 10 + + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID_1, + product_name="osparc", + wallet_id=_WALLET_ID, + access_all_wallet_usage=True, + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 20 + assert result.total == 20 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_without_wallet.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_without_wallet.py new file mode 100644 index 00000000000..0f8cfb6b911 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_api_service_runs__list_without_wallet.py @@ -0,0 +1,76 @@ +from collections.abc import Iterator + +import pytest +import sqlalchemy as sa +from models_library.api_schemas_resource_usage_tracker.service_runs import ( + ServiceRunPage, +) +from servicelib.rabbitmq import RabbitMQRPCClient +from servicelib.rabbitmq.rpc_interfaces.resource_usage_tracker import service_runs +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_TOTAL_GENERATED_RESOURCE_TRACKER_SERVICE_RUNS_ROWS = 30 +_USER_ID = 1 + + +@pytest.fixture() +def resource_tracker_service_run_db( + postgres_db: sa.engine.Engine, random_resource_tracker_service_run +) -> Iterator[list]: + with postgres_db.connect() as con: + # removes all projects before continuing + con.execute(resource_tracker_service_runs.delete()) + created_services = [] + for _ in range(_TOTAL_GENERATED_RESOURCE_TRACKER_SERVICE_RUNS_ROWS): + result = con.execute( + resource_tracker_service_runs.insert() + .values(**random_resource_tracker_service_run(user_id=_USER_ID)) + .returning(resource_tracker_service_runs) + ) + row = result.first() + assert row + created_services.append(row) + yield created_services + + con.execute(resource_tracker_service_runs.delete()) + + +@pytest.mark.rpc_test() +async def test_rpc_list_service_runs_with_wallet( + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_service_run_db: dict, + rpc_client: RabbitMQRPCClient, +): + result = await service_runs.get_service_run_page( + rpc_client, + user_id=_USER_ID, + product_name="osparc", + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 20 + assert result.total == 30 + + result = await service_runs.get_service_run_page( + rpc_client, user_id=_USER_ID, product_name="osparc", offset=5, limit=10 + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 10 + assert result.total == 30 + + result = await service_runs.get_service_run_page( + rpc_client, + user_id=12345, + product_name="non-existing", + ) + assert isinstance(result, ServiceRunPage) + assert len(result.items) == 0 + assert result.total == 0 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py new file mode 100644 index 00000000000..42249956a8e --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_background_task_periodic_heartbeat_check.py @@ -0,0 +1,198 @@ +from collections.abc import Callable, Iterator +from datetime import UTC, datetime, timedelta + +import pytest +import sqlalchemy as sa +from models_library.resource_tracker import ( + CreditTransactionStatus, + ResourceTrackerServiceType, + ServiceRunStatus, +) +from servicelib.rabbitmq import RabbitMQClient +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_service_resource_usage_tracker.core.settings import ApplicationSettings +from simcore_service_resource_usage_tracker.models.credit_transactions import ( + CreditTransactionDB, +) +from simcore_service_resource_usage_tracker.models.service_runs import ServiceRunDB +from simcore_service_resource_usage_tracker.services.background_task_periodic_heartbeat_check import ( + check_running_services, +) + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + +_SERVICE_RUN_ID_OSPARC_10_MIN_OLD = "1" +_SERVICE_RUN_ID_S4L_10_MIN_OLD = "2" +_SERVICE_RUN_ID_OSPARC_NOW = "3" + +_LAST_HEARTBEAT_10_MIN_OLD = datetime.now(tz=UTC) - timedelta(minutes=10) +_LAST_HEARTBEAT_NOW = datetime.now(tz=UTC) + + +@pytest.fixture() +def resource_tracker_setup_db( + postgres_db: sa.engine.Engine, + random_resource_tracker_service_run, + random_resource_tracker_credit_transactions, +) -> Iterator[None]: + with postgres_db.connect() as con: + # Populate service runs table + con.execute( + resource_tracker_service_runs.insert().values( + **random_resource_tracker_service_run( + service_run_id=_SERVICE_RUN_ID_OSPARC_10_MIN_OLD, + service_type=ResourceTrackerServiceType.COMPUTATIONAL_SERVICE, + product_name="osparc", + last_heartbeat_at=_LAST_HEARTBEAT_10_MIN_OLD, + modified=_LAST_HEARTBEAT_10_MIN_OLD, + started_at=_LAST_HEARTBEAT_10_MIN_OLD - timedelta(minutes=1), + ) + ) + ) + con.execute( + resource_tracker_service_runs.insert().values( + **random_resource_tracker_service_run( + service_run_id=_SERVICE_RUN_ID_S4L_10_MIN_OLD, + service_type=ResourceTrackerServiceType.DYNAMIC_SERVICE, + product_name="s4l", + last_heartbeat_at=_LAST_HEARTBEAT_10_MIN_OLD, + modified=_LAST_HEARTBEAT_10_MIN_OLD, + started_at=_LAST_HEARTBEAT_10_MIN_OLD - timedelta(minutes=1), + ) + ) + ) + con.execute( + resource_tracker_service_runs.insert().values( + **random_resource_tracker_service_run( + service_run_id=_SERVICE_RUN_ID_OSPARC_NOW, + product_name="osparc", + modified=_LAST_HEARTBEAT_NOW, + last_heartbeat_at=_LAST_HEARTBEAT_NOW, + ) + ) + ) + # Populate credit transactions table + con.execute( + resource_tracker_credit_transactions.insert().values( + **random_resource_tracker_credit_transactions( + service_run_id=_SERVICE_RUN_ID_OSPARC_10_MIN_OLD, + product_name="osparc", + modified=_LAST_HEARTBEAT_10_MIN_OLD, + last_heartbeat_at=_LAST_HEARTBEAT_10_MIN_OLD, + transaction_status="PENDING", + ) + ) + ) + con.execute( + resource_tracker_credit_transactions.insert().values( + **random_resource_tracker_credit_transactions( + service_run_id=_SERVICE_RUN_ID_S4L_10_MIN_OLD, + product_name="s4l", + modified=_LAST_HEARTBEAT_10_MIN_OLD, + last_heartbeat_at=_LAST_HEARTBEAT_10_MIN_OLD, + transaction_status="PENDING", + ) + ) + ) + con.execute( + resource_tracker_credit_transactions.insert().values( + **random_resource_tracker_credit_transactions( + service_run_id=_SERVICE_RUN_ID_OSPARC_NOW, + product_name="osparc", + modified=_LAST_HEARTBEAT_NOW, + last_heartbeat_at=_LAST_HEARTBEAT_NOW, + transaction_status="PENDING", + ) + ) + ) + + yield + + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +async def test_process_event_functions( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_setup_db, + initialized_app, +): + engine = initialized_app.state.engine + app_settings: ApplicationSettings = initialized_app.state.settings + + for _ in range(app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL): + await check_running_services(initialized_app) + # NOTE: As we are doing check that the modified field needs to be older then some + # threshold, we need to make this field artificaly older in this test + with postgres_db.connect() as con: + fake_old_modified_at = datetime.now(tz=UTC) - timedelta(minutes=5) + update_stmt = resource_tracker_service_runs.update().values( + modified=fake_old_modified_at + ) + con.execute(update_stmt) + + # Check max acceptable missed heartbeats reached before considering them as unhealthy + with postgres_db.connect() as con: + result = con.execute(sa.select(resource_tracker_service_runs)) + service_run_db = [ServiceRunDB.model_validate(row) for row in result] + for service_run in service_run_db: + if service_run.service_run_id in ( + _SERVICE_RUN_ID_OSPARC_10_MIN_OLD, + _SERVICE_RUN_ID_S4L_10_MIN_OLD, + ): + assert ( + service_run.missed_heartbeat_counter + == app_settings.RESOURCE_USAGE_TRACKER_MISSED_HEARTBEAT_COUNTER_FAIL + ) + assert service_run.service_run_status == ServiceRunStatus.RUNNING + else: + assert service_run.missed_heartbeat_counter == 0 + assert service_run.service_run_status == ServiceRunStatus.RUNNING + + # Now we call the function one more time and it should consider some of running services as unhealthy + await check_running_services(initialized_app) + + with postgres_db.connect() as con: + result = con.execute(sa.select(resource_tracker_service_runs)) + service_run_db = [ServiceRunDB.model_validate(row) for row in result] + for service_run in service_run_db: + if service_run.service_run_id in ( + _SERVICE_RUN_ID_OSPARC_10_MIN_OLD, + _SERVICE_RUN_ID_S4L_10_MIN_OLD, + ): + assert service_run.service_run_status == ServiceRunStatus.ERROR + assert service_run.service_run_status_msg is not None + else: + assert service_run.missed_heartbeat_counter == 0 + assert service_run.service_run_status == ServiceRunStatus.RUNNING + + with postgres_db.connect() as con: + result = con.execute(sa.select(resource_tracker_credit_transactions)) + credit_transaction_db = [ + CreditTransactionDB.model_validate(row) for row in result + ] + for transaction in credit_transaction_db: + if transaction.service_run_id in ( + _SERVICE_RUN_ID_OSPARC_10_MIN_OLD, + _SERVICE_RUN_ID_S4L_10_MIN_OLD, + ): + if transaction.service_run_id == _SERVICE_RUN_ID_OSPARC_10_MIN_OLD: + # Computational service is not billed + assert ( + transaction.transaction_status == CreditTransactionStatus.NOT_BILLED + ) + else: + # Dynamic service is billed + assert transaction.transaction_status == CreditTransactionStatus.BILLED + else: + assert transaction.transaction_status == CreditTransactionStatus.PENDING diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_licensed_items_checkouts_db.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_licensed_items_checkouts_db.py new file mode 100644 index 00000000000..11a7015e490 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_licensed_items_checkouts_db.py @@ -0,0 +1,141 @@ +# pylint:disable=unused-variable +# pylint:disable=unused-argument +# pylint:disable=redefined-outer-name +# pylint:disable=too-many-arguments + + +from datetime import UTC, datetime +from typing import Generator +from unittest import mock + +import pytest +import sqlalchemy as sa +from models_library.basic_types import IDStr +from models_library.rest_ordering import OrderBy +from simcore_postgres_database.models.resource_tracker_licensed_items_checkouts import ( + resource_tracker_licensed_items_checkouts, +) +from simcore_postgres_database.models.resource_tracker_service_runs import ( + resource_tracker_service_runs, +) +from simcore_service_resource_usage_tracker.models.licensed_items_checkouts import ( + CreateLicensedItemCheckoutDB, +) +from simcore_service_resource_usage_tracker.services.modules.db import ( + licensed_items_checkouts_db, +) + +pytest_simcore_core_services_selection = [ + "postgres", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +_USER_ID_1 = 1 +_WALLET_ID = 6 + + +@pytest.fixture() +def resource_tracker_service_run_id( + postgres_db: sa.engine.Engine, random_resource_tracker_service_run +) -> Generator[str, None, None]: + with postgres_db.connect() as con: + result = con.execute( + resource_tracker_service_runs.insert() + .values( + **random_resource_tracker_service_run( + user_id=_USER_ID_1, wallet_id=_WALLET_ID + ) + ) + .returning(resource_tracker_service_runs.c.service_run_id) + ) + row = result.first() + assert row + + yield row[0] + + con.execute(resource_tracker_licensed_items_checkouts.delete()) + con.execute(resource_tracker_service_runs.delete()) + + +async def test_licensed_items_checkouts_db__force_release_license_seats_by_run_id( + mocked_redis_server: None, + mocked_setup_rabbitmq: mock.Mock, + resource_tracker_service_run_id, + initialized_app, +): + engine = initialized_app.state.engine + + # SETUP + _create_license_item_checkout_db_1 = CreateLicensedItemCheckoutDB( + licensed_item_id="beb16d18-d57d-44aa-a638-9727fa4a72ef", + key="Duke", + version="1.0.0", + wallet_id=_WALLET_ID, + user_id=_USER_ID_1, + user_email="test@test.com", + product_name="osparc", + service_run_id=resource_tracker_service_run_id, + started_at=datetime.now(tz=UTC), + num_of_seats=1, + ) + await licensed_items_checkouts_db.create( + engine, data=_create_license_item_checkout_db_1 + ) + + _create_license_item_checkout_db_2 = _create_license_item_checkout_db_1.model_dump() + _create_license_item_checkout_db_2[ + "licensed_item_id" + ] = "b1b96583-333f-44d6-b1e0-5c0a8af555bf" + await licensed_items_checkouts_db.create( + engine, + data=CreateLicensedItemCheckoutDB.model_construct( + **_create_license_item_checkout_db_2 + ), + ) + + _create_license_item_checkout_db_3 = _create_license_item_checkout_db_1.model_dump() + _create_license_item_checkout_db_3[ + "licensed_item_id" + ] = "38a5ce59-876f-482a-ace1-d3b2636feac6" + checkout = await licensed_items_checkouts_db.create( + engine, + data=CreateLicensedItemCheckoutDB.model_construct( + **_create_license_item_checkout_db_3 + ), + ) + + _helper_time = datetime.now(UTC) + await licensed_items_checkouts_db.update( + engine, + licensed_item_checkout_id=checkout.licensed_item_checkout_id, + product_name="osparc", + stopped_at=_helper_time, + ) + + # TEST FORCE RELEASE LICENSE SEATS + await licensed_items_checkouts_db.force_release_license_seats_by_run_id( + engine, service_run_id=resource_tracker_service_run_id + ) + + # ASSERT + total, items = await licensed_items_checkouts_db.list_( + engine, + product_name="osparc", + filter_wallet_id=_WALLET_ID, + offset=0, + limit=5, + order_by=OrderBy(field=IDStr("started_at")), + ) + assert total == 3 + assert len(items) == 3 + + _helper_count = 0 + for item in items: + assert isinstance(item.stopped_at, datetime) + if item.stopped_at > _helper_time: + _helper_count += 1 + + assert _helper_count == 2 diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py new file mode 100644 index 00000000000..57eb9735e68 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message.py @@ -0,0 +1,66 @@ +from collections.abc import Callable +from datetime import datetime, timezone + +import sqlalchemy as sa +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from servicelib.rabbitmq import RabbitMQClient +from simcore_service_resource_usage_tracker.services.process_message_running_service import ( + _process_heartbeat_event, + _process_start_event, + _process_stop_event, +) + +from .conftest import assert_service_runs_db_row + +pytest_simcore_core_services_selection = ["postgres", "rabbit"] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_process_event_functions( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_rabbit_message_start, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_service_run_db, + initialized_app, +): + engine = initialized_app.state.engine + publisher = create_rabbitmq_client("publisher") + + msg = random_rabbit_message_start( + wallet_id=None, + wallet_name=None, + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + ) + await _process_start_event(engine, msg, publisher) + output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) + assert output.stopped_at is None + assert output.service_run_status == "RUNNING" + first_occurence_of_last_heartbeat_at = output.last_heartbeat_at + + heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( + service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) + ) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) + output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) + assert output.stopped_at is None + assert output.service_run_status == "RUNNING" + assert first_occurence_of_last_heartbeat_at < output.last_heartbeat_at + + stopped_msg = RabbitResourceTrackingStoppedMessage( + service_run_id=msg.service_run_id, + created_at=datetime.now(tz=timezone.utc), + simcore_platform_status=SimcorePlatformStatus.OK, + ) + await _process_stop_event(engine, stopped_msg, publisher) + output = await assert_service_runs_db_row(postgres_db, msg.service_run_id) + assert output.stopped_at is not None + assert output.service_run_status == "SUCCESS" diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening.py new file mode 100644 index 00000000000..aa5bee5ffd9 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening.py @@ -0,0 +1,60 @@ +from collections.abc import Callable +from datetime import datetime, timezone + +# NOTE: This test fails when running locally and you are connected through VPN: Temporary failure in name resolution [Errno -3] +import sqlalchemy as sa +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingBaseMessage, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from servicelib.rabbitmq import RabbitMQClient + +from .conftest import assert_service_runs_db_row + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +async def test_process_events_via_rabbit( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_rabbit_message_start, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + initialized_app, + resource_tracker_service_run_db, +): + publisher = create_rabbitmq_client("publisher") + msg = random_rabbit_message_start( + wallet_id=None, + wallet_name=None, + pricing_plan_id=None, + pricing_unit_id=None, + pricing_unit_cost_id=None, + ) + await publisher.publish(RabbitResourceTrackingBaseMessage.get_channel_name(), msg) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "RUNNING") + + heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( + service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) + ) + await publisher.publish( + RabbitResourceTrackingBaseMessage.get_channel_name(), heartbeat_msg + ) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "RUNNING") + + stopped_msg = RabbitResourceTrackingStoppedMessage( + service_run_id=msg.service_run_id, + created_at=datetime.now(tz=timezone.utc), + simcore_platform_status=SimcorePlatformStatus.OK, + ) + await publisher.publish( + RabbitResourceTrackingBaseMessage.get_channel_name(), stopped_msg + ) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "SUCCESS") diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening_with_billing.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening_with_billing.py new file mode 100644 index 00000000000..04f6c19d02e --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_triggered_by_listening_with_billing.py @@ -0,0 +1,203 @@ +import asyncio +from datetime import datetime, timezone +from decimal import Decimal +from typing import Callable, Iterator + +import pytest +import sqlalchemy as sa +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingBaseMessage, + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, +) +from models_library.resource_tracker import UnitExtraInfoTier +from servicelib.rabbitmq import RabbitMQClient +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.models.services import services_meta_data + +from .conftest import assert_service_runs_db_row + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture() +def resource_tracker_pricing_tables_db(postgres_db: sa.engine.Engine) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute( + resource_tracker_pricing_plans.insert().values( + product_name="osparc", + display_name="ISolve Thermal", + description="", + classification="TIER", + is_active=True, + pricing_plan_key="isolve-thermal", + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="S", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=1, + pricing_unit_name="S", + cost_per_unit=Decimal(500), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="M", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=2, + pricing_unit_name="M", + cost_per_unit=Decimal(1000), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="L", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=3, + pricing_unit_name="L", + cost_per_unit=Decimal(1500), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + services_meta_data.insert().values( + key="simcore/services/comp/itis/sleeper", + version="1.0.16", + name="name", + description="description", + ) + ) + con.execute( + resource_tracker_pricing_plan_to_service.insert().values( + pricing_plan_id=1, + service_key="simcore/services/comp/itis/sleeper", + service_version="1.0.16", + service_default_plan=True, + ) + ) + + yield + + con.execute(resource_tracker_pricing_plan_to_service.delete()) + con.execute(resource_tracker_pricing_units.delete()) + con.execute(resource_tracker_pricing_plans.delete()) + con.execute(resource_tracker_pricing_unit_costs.delete()) + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(services_meta_data.delete()) + + +@pytest.mark.flaky(max_runs=3) +async def test_process_events_via_rabbit( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_rabbit_message_start, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + initialized_app, + resource_tracker_service_run_db, + resource_tracker_pricing_tables_db, +): + publisher = create_rabbitmq_client("publisher") + msg = random_rabbit_message_start( + wallet_id=1, + wallet_name="what ever", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + ) + await publisher.publish(RabbitResourceTrackingBaseMessage.get_channel_name(), msg) + await asyncio.sleep(3) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "RUNNING") + + heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( + service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) + ) + await publisher.publish( + RabbitResourceTrackingBaseMessage.get_channel_name(), heartbeat_msg + ) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "RUNNING") + + stopped_msg = RabbitResourceTrackingStoppedMessage( + service_run_id=msg.service_run_id, + created_at=datetime.now(tz=timezone.utc), + simcore_platform_status=SimcorePlatformStatus.OK, + ) + await publisher.publish( + RabbitResourceTrackingBaseMessage.get_channel_name(), stopped_msg + ) + await assert_service_runs_db_row(postgres_db, msg.service_run_id, "SUCCESS") diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py new file mode 100644 index 00000000000..bdffb9cec4e --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing.py @@ -0,0 +1,259 @@ +import asyncio +from collections.abc import Callable, Iterator +from datetime import datetime, timezone +from decimal import Decimal +from unittest import mock + +import pytest +import sqlalchemy as sa +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, + WalletCreditsLimitReachedMessage, +) +from models_library.resource_tracker import ( + CreditClassification, + CreditTransactionStatus, + UnitExtraInfoTier, +) +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.models.services import services_meta_data +from simcore_service_resource_usage_tracker.services.process_message_running_service import ( + _process_heartbeat_event, + _process_start_event, + _process_stop_event, +) +from tenacity.asyncio import AsyncRetrying +from tenacity.retry import retry_if_exception_type +from tenacity.stop import stop_after_delay +from tenacity.wait import wait_fixed + +from .conftest import assert_credit_transactions_db_row + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture() +def resource_tracker_pricing_tables_db(postgres_db: sa.engine.Engine) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute( + resource_tracker_pricing_plans.insert().values( + product_name="osparc", + display_name="ISolve Thermal", + description="", + classification="TIER", + is_active=True, + pricing_plan_key="isolve-thermal", + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="S", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=1, + pricing_unit_name="S", + cost_per_unit=Decimal(500), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="M", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=True, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=2, + pricing_unit_name="M", + cost_per_unit=Decimal(1000), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="L", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=3, + pricing_unit_name="L", + cost_per_unit=Decimal(1500), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + services_meta_data.insert().values( + key="simcore/services/comp/itis/sleeper", + version="1.0.16", + name="name", + description="description", + ) + ) + con.execute( + resource_tracker_pricing_plan_to_service.insert().values( + pricing_plan_id=1, + service_key="simcore/services/comp/itis/sleeper", + service_version="1.0.16", + service_default_plan=True, + ) + ) + + yield + + con.execute(resource_tracker_pricing_plan_to_service.delete()) + con.execute(resource_tracker_pricing_units.delete()) + con.execute(resource_tracker_pricing_plans.delete()) + con.execute(resource_tracker_pricing_unit_costs.delete()) + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(services_meta_data.delete()) + + +@pytest.fixture +async def mocked_message_parser(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.AsyncMock(return_value=True) + + +async def test_process_event_functions( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_rabbit_message_start, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_service_run_db, + resource_tracker_pricing_tables_db, + initialized_app, + mocked_message_parser, +): + engine = initialized_app.state.engine + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + await consumer.subscribe( + WalletCreditsLimitReachedMessage.get_channel_name(), + mocked_message_parser, + topics=["#"], + ) + + msg = random_rabbit_message_start( + wallet_id=1, + wallet_name="test", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + ) + + await _process_start_event(engine, msg, publisher) + output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id) + assert output.osparc_credits == 0.0 + assert output.transaction_status == CreditTransactionStatus.PENDING.value + assert ( + output.transaction_classification + == CreditClassification.DEDUCT_SERVICE_RUN.value + ) + first_occurence_of_last_heartbeat_at = output.last_heartbeat_at + modified_at = output.modified + + await asyncio.sleep(0) + heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( + service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) + ) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) + output = await assert_credit_transactions_db_row( + postgres_db, msg.service_run_id, modified_at + ) + first_credits_used = output.osparc_credits + assert first_credits_used < 0.0 + assert output.transaction_status == CreditTransactionStatus.PENDING.value + assert first_occurence_of_last_heartbeat_at < output.last_heartbeat_at + modified_at = output.modified + + await asyncio.sleep( + 2 + ) # NOTE: Computation of credits depends on time ((stop-start)*cost_per_unit) + stopped_msg = RabbitResourceTrackingStoppedMessage( + service_run_id=msg.service_run_id, + created_at=datetime.now(tz=timezone.utc), + simcore_platform_status=SimcorePlatformStatus.OK, + ) + await _process_stop_event(engine, stopped_msg, publisher) + output = await assert_credit_transactions_db_row( + postgres_db, msg.service_run_id, modified_at + ) + assert output.osparc_credits < first_credits_used + assert output.transaction_status == CreditTransactionStatus.IN_DEBT.value + + async for attempt in AsyncRetrying( + wait=wait_fixed(0.1), + stop=stop_after_delay(5), + retry=retry_if_exception_type(AssertionError), + reraise=True, + ): + with attempt: + mocked_message_parser.assert_called_once() diff --git a/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py new file mode 100644 index 00000000000..6c14cc32e13 --- /dev/null +++ b/services/resource-usage-tracker/tests/unit/with_dbs/test_process_rabbitmq_message_with_billing_cost_0.py @@ -0,0 +1,180 @@ +import asyncio +from collections.abc import Callable, Iterator +from datetime import datetime, timezone +from decimal import Decimal +from unittest import mock + +import pytest +import sqlalchemy as sa +from models_library.rabbitmq_messages import ( + RabbitResourceTrackingHeartbeatMessage, + RabbitResourceTrackingStoppedMessage, + SimcorePlatformStatus, + WalletCreditsLimitReachedMessage, +) +from models_library.resource_tracker import UnitExtraInfoTier +from pytest_mock.plugin import MockerFixture +from servicelib.rabbitmq import RabbitMQClient +from simcore_postgres_database.models.resource_tracker_credit_transactions import ( + resource_tracker_credit_transactions, +) +from simcore_postgres_database.models.resource_tracker_pricing_plan_to_service import ( + resource_tracker_pricing_plan_to_service, +) +from simcore_postgres_database.models.resource_tracker_pricing_plans import ( + resource_tracker_pricing_plans, +) +from simcore_postgres_database.models.resource_tracker_pricing_unit_costs import ( + resource_tracker_pricing_unit_costs, +) +from simcore_postgres_database.models.resource_tracker_pricing_units import ( + resource_tracker_pricing_units, +) +from simcore_postgres_database.models.services import services_meta_data +from simcore_service_resource_usage_tracker.services.process_message_running_service import ( + _process_heartbeat_event, + _process_start_event, + _process_stop_event, +) + +from .conftest import assert_credit_transactions_db_row + +pytest_simcore_core_services_selection = [ + "postgres", + "rabbit", +] +pytest_simcore_ops_services_selection = [ + "adminer", +] + + +@pytest.fixture() +def resource_tracker_pricing_tables_db(postgres_db: sa.engine.Engine) -> Iterator[None]: + with postgres_db.connect() as con: + con.execute( + resource_tracker_pricing_plans.insert().values( + product_name="osparc", + display_name="ISolve Thermal", + description="", + classification="TIER", + is_active=True, + pricing_plan_key="isolve-thermal", + ) + ) + con.execute( + resource_tracker_pricing_units.insert().values( + pricing_plan_id=1, + unit_name="S", + unit_extra_info=UnitExtraInfoTier.model_config["json_schema_extra"][ + "examples" + ][0], + default=False, + specific_info={}, + created=datetime.now(tz=timezone.utc), + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + resource_tracker_pricing_unit_costs.insert().values( + pricing_plan_id=1, + pricing_plan_key="isolve-thermal", + pricing_unit_id=1, + pricing_unit_name="S", + cost_per_unit=Decimal(0), + valid_from=datetime.now(tz=timezone.utc), + valid_to=None, + created=datetime.now(tz=timezone.utc), + comment="", + modified=datetime.now(tz=timezone.utc), + ) + ) + con.execute( + services_meta_data.insert().values( + key="simcore/services/comp/itis/sleeper", + version="1.0.16", + name="name", + description="description", + ) + ) + con.execute( + resource_tracker_pricing_plan_to_service.insert().values( + pricing_plan_id=1, + service_key="simcore/services/comp/itis/sleeper", + service_version="1.0.16", + service_default_plan=True, + ) + ) + + yield + + con.execute(resource_tracker_pricing_plan_to_service.delete()) + con.execute(resource_tracker_pricing_units.delete()) + con.execute(resource_tracker_pricing_plans.delete()) + con.execute(resource_tracker_pricing_unit_costs.delete()) + con.execute(resource_tracker_credit_transactions.delete()) + con.execute(services_meta_data.delete()) + + +@pytest.fixture +async def mocked_message_parser(mocker: MockerFixture) -> mock.AsyncMock: + return mocker.AsyncMock(return_value=True) + + +async def test_process_event_functions( + create_rabbitmq_client: Callable[[str], RabbitMQClient], + random_rabbit_message_start, + mocked_redis_server: None, + postgres_db: sa.engine.Engine, + resource_tracker_service_run_db, + resource_tracker_pricing_tables_db, + initialized_app, + mocked_message_parser, +): + engine = initialized_app.state.engine + publisher = create_rabbitmq_client("publisher") + consumer = create_rabbitmq_client("consumer") + await consumer.subscribe( + WalletCreditsLimitReachedMessage.get_channel_name(), + mocked_message_parser, + topics=["#"], + ) + + msg = random_rabbit_message_start( + wallet_id=1, + wallet_name="test", + pricing_plan_id=1, + pricing_unit_id=1, + pricing_unit_cost_id=1, + ) + + await _process_start_event(engine, msg, publisher) + output = await assert_credit_transactions_db_row(postgres_db, msg.service_run_id) + assert output.osparc_credits == 0.0 + assert output.transaction_status == "PENDING" + assert output.transaction_classification == "DEDUCT_SERVICE_RUN" + first_occurence_of_last_heartbeat_at = output.last_heartbeat_at + modified_at = output.modified + + await asyncio.sleep(0) + heartbeat_msg = RabbitResourceTrackingHeartbeatMessage( + service_run_id=msg.service_run_id, created_at=datetime.now(tz=timezone.utc) + ) + await _process_heartbeat_event(engine, heartbeat_msg, publisher) + output = await assert_credit_transactions_db_row( + postgres_db, msg.service_run_id, modified_at + ) + assert output.osparc_credits == 0.0 + assert output.transaction_status == "PENDING" + assert first_occurence_of_last_heartbeat_at < output.last_heartbeat_at + + stopped_msg = RabbitResourceTrackingStoppedMessage( + service_run_id=msg.service_run_id, + created_at=datetime.now(tz=timezone.utc), + simcore_platform_status=SimcorePlatformStatus.OK, + ) + await _process_stop_event(engine, stopped_msg, publisher) + output = await assert_credit_transactions_db_row( + postgres_db, msg.service_run_id, modified_at + ) + assert output.osparc_credits == 0.0 + assert output.transaction_status == "BILLED" diff --git a/services/static-webserver/README.md b/services/static-webserver/README.md index 582f7fb6a3e..5b9d3691fb5 100644 --- a/services/static-webserver/README.md +++ b/services/static-webserver/README.md @@ -2,9 +2,9 @@ Used for static content serving. -### Furtherr steps +## Further steps In the future will fully serve all static content. Currently the `webserver` is still serving the following routes: -- `/` resolves to one of the three index.html pages inside the 4 products (osparc, tis, s4l, s4llite) +- `/` resolves to one of the many index.html pages inside the products (osparc, tis, s4l...) - `/static-frontend-data.json` contains information required by the fronted diff --git a/services/static-webserver/client/.eslintrc.json b/services/static-webserver/client/.eslintrc.json index a31cb1035a6..39bbadd185f 100644 --- a/services/static-webserver/client/.eslintrc.json +++ b/services/static-webserver/client/.eslintrc.json @@ -23,7 +23,7 @@ "error", { "allowAfterThis": true, - "enforceInMethodNames": true + "enforceInMethodNames": false } ], "no-warning-comments": "off", @@ -35,7 +35,12 @@ "ignoreChainWithDepth": 3 } ], - "no-eq-null": 0 + "no-eq-null": 0, + "semi": "off", + "comma-dangle": "off", + "object-curly-spacing": "off", + "no-implicit-coercion": "off", + "arrow-body-style": "off" }, "env": { "browser": true diff --git a/services/static-webserver/client/.gitignore b/services/static-webserver/client/.gitignore index e661698d295..64479ebc19f 100644 --- a/services/static-webserver/client/.gitignore +++ b/services/static-webserver/client/.gitignore @@ -14,6 +14,3 @@ source/resource/iconfont/material # generator outputs /api/ /test/ - -# translations for the moment ignored -*.po diff --git a/services/static-webserver/client/Makefile b/services/static-webserver/client/Makefile index fc276f02dd6..89fafcca1ae 100644 --- a/services/static-webserver/client/Makefile +++ b/services/static-webserver/client/Makefile @@ -10,7 +10,7 @@ export VCS_REF:=$(shell git rev-parse --short HEAD) export VCS_REF_CLIENT:=$(shell git log --pretty=tformat:"%h" -n1 .) export VCS_STATUS_CLIENT:=$(if $(shell git status -s),'modified/untracked','clean') -docker_compose := docker-compose -f tools/docker-compose.yml +docker_compose := docker compose -f tools/docker-compose.yml docker_file := tools/qooxdoo-kit/builder/Dockerfile docker_image := client/$(subst /Dockerfile,,$(docker_file)):latest @@ -45,16 +45,18 @@ follow-dev-logs: ## follow the logs of the qx compiler compile: ## qx compiles host' 'source' -> image's 'build-output' # qx compile 'source' within $(docker_image) image [itisfoundation/qooxdoo-kit:${QOOXDOO_KIT_TAG}] @docker buildx build --file $(docker_file) --tag $(docker_image) \ + --load \ --build-arg tag=${QOOXDOO_KIT_TAG} \ --build-arg VCS_REF=${VCS_REF} \ --build-arg VCS_REF_CLIENT=${VCS_REF_CLIENT} \ --build-arg VCS_STATUS_CLIENT=${VCS_STATUS_CLIENT} \ --build-arg VCS_URL=${VCS_URL} \ --target=build-client . + python ./scripts/post-compile.py touch: ## minimal image build with /project/output-build inside # touch /project/output-build such that multi-stage 'services/web/Dockerfile' can build development target (fixes #1097) - @docker buildx build --file $(docker_file) --tag $(docker_image) --build-arg tag=${QOOXDOO_KIT_TAG} --target=touch . + @docker buildx build --load --file $(docker_file) --tag $(docker_image) --build-arg tag=${QOOXDOO_KIT_TAG} --target=touch . upgrade: ## upgrade to official version of the tool # upgrading to ${QOOXDOO_KIT_TAG} @@ -62,7 +64,7 @@ upgrade: ## upgrade to official version of the tool .PHONY: down down: ## tear down docker-compose - @${docker_compose} down + @${docker_compose} down --remove-orphans # qx serve -------------------------- @@ -86,6 +88,17 @@ serve: compile ## serves site compiled in image in 127.0.0.1:8080 docker run --rm -p 8080:8080 $(docker_image) $(qx_serve) --target=build +# qx translate -------------------------- + +define qx_translate_extract = + qx compile --update-po-files +endef + +.PHONY: translate-extract +translate-extract: translate-extract ## the generated .po files goes to source/translation https://qooxdoo.org/documentation/v7.8/#/development/howto/internationalization?id=translation + # qx compile --update-po-files + $(docker_compose) run $(if $(detached),--detach --name=$(detached),--rm) qooxdoo-kit $(qx_translate_extract) + # misc -------------------------- .PHONY: shell diff --git a/services/static-webserver/client/Manifest.json b/services/static-webserver/client/Manifest.json index 9cca71cf94c..de9e0909368 100644 --- a/services/static-webserver/client/Manifest.json +++ b/services/static-webserver/client/Manifest.json @@ -24,11 +24,12 @@ "script": [ "socketio/socket.io.min.js", "osparc/pacemaker.js", + "osparc/schedulerWorker.js", "svg/svg.js", "svg/svg.path.js", "jsondiffpatch/jsondiffpatch.min.js", "jsontreeviewer/jsonTree.js", - "marked/marked.js", + "marked/marked.min.js", "DOMPurify/purify.min.js" ], "css": [ @@ -40,7 +41,7 @@ "requires": { "@qooxdoo/compiler": "^1.0.0-beta", "@qooxdoo/framework": "^6.0.0-beta", - "ITISFoundation/qx-iconfont-fontawesome5": "^0.2.0", + "ITISFoundation/qx-iconfont-fontawesome5": "^0.2.2", "ITISFoundation/qx-osparc-theme": "^0.5.6", "qooxdoo/qxl.testtapper": "^0.4.3", "qooxdoo/qxl.apiviewer": "^1.0.0-beta", diff --git a/services/static-webserver/client/compile.json b/services/static-webserver/client/compile.json index 79aeb692422..7ede67de747 100644 --- a/services/static-webserver/client/compile.json +++ b/services/static-webserver/client/compile.json @@ -40,7 +40,7 @@ "class": "osparc.Application", "theme": "osparc.theme.products.s4l.ThemeDark", "name": "s4l", - "title": "Sim4Life - ZMT", + "title": "Sim4Life", "include": [ "iconfont.material.Load", "iconfont.fontawesome5.Load", @@ -56,7 +56,7 @@ "class": "osparc.Application", "theme": "osparc.theme.products.s4l.ThemeDark", "name": "s4llite", - "title": "S4L lite", + "title": "Sim4Life.lite", "include": [ "iconfont.material.Load", "iconfont.fontawesome5.Load", @@ -68,11 +68,75 @@ "addTimestampsToUrls": true, "bootPath": "source/boot" }, + { + "class": "osparc.Application", + "theme": "osparc.theme.products.s4l.ThemeDark", + "name": "s4lacad", + "title": "Sim4Life Science", + "include": [ + "iconfont.material.Load", + "iconfont.fontawesome5.Load", + "osparc.theme.products.s4l.ThemeLight" + ], + "environment": { + "product.name": "s4lacad" + }, + "addTimestampsToUrls": true, + "bootPath": "source/boot" + }, + { + "class": "osparc.Application", + "theme": "osparc.theme.products.s4l.ThemeDark", + "name": "s4lengine", + "title": "Sim4Life Engineering", + "include": [ + "iconfont.material.Load", + "iconfont.fontawesome5.Load", + "osparc.theme.products.s4l.ThemeLight" + ], + "environment": { + "product.name": "s4lengine" + }, + "addTimestampsToUrls": true, + "bootPath": "source/boot" + }, + { + "class": "osparc.Application", + "theme": "osparc.theme.products.s4l.ThemeDark", + "name": "s4ldesktop", + "title": "Sim4Life Desktop", + "include": [ + "iconfont.material.Load", + "iconfont.fontawesome5.Load", + "osparc.theme.products.s4l.ThemeLight" + ], + "environment": { + "product.name": "s4ldesktop" + }, + "addTimestampsToUrls": true, + "bootPath": "source/boot" + }, + { + "class": "osparc.Application", + "theme": "osparc.theme.products.s4l.ThemeDark", + "name": "s4ldesktopacad", + "title": "Sim4Life Desktop Academia", + "include": [ + "iconfont.material.Load", + "iconfont.fontawesome5.Load", + "osparc.theme.products.s4l.ThemeLight" + ], + "environment": { + "product.name": "s4ldesktopacad" + }, + "addTimestampsToUrls": true, + "bootPath": "source/boot" + }, { "class": "osparc.Application", "theme": "osparc.theme.products.tis.ThemeDark", "name": "tis", - "title": "TI Plan - IT'IS", + "title": "TIP V3.0 - IT'IS", "include": [ "iconfont.material.Load", "iconfont.fontawesome5.Load", @@ -83,6 +147,22 @@ }, "addTimestampsToUrls": true, "bootPath": "source/boot" + }, + { + "class": "osparc.Application", + "theme": "osparc.theme.products.tis.ThemeDark", + "name": "tiplite", + "title": "TIP.lite - IT'IS", + "include": [ + "iconfont.material.Load", + "iconfont.fontawesome5.Load", + "osparc.theme.products.tis.ThemeLight" + ], + "environment": { + "product.name": "tiplite" + }, + "addTimestampsToUrls": true, + "bootPath": "source/boot" } ], "eslintConfig": { diff --git a/services/static-webserver/client/qx-lock.json b/services/static-webserver/client/qx-lock.json index d52dff39e77..71b270ebe76 100644 --- a/services/static-webserver/client/qx-lock.json +++ b/services/static-webserver/client/qx-lock.json @@ -2,11 +2,11 @@ "libraries": [ { "library_name": "qx-iconfont-fontawesome5", - "library_version": "0.2.0", - "path": "qx_packages/ITISFoundation_qx-iconfont-fontawesome5_v0_2_0", + "library_version": "1.0.0", + "path": "qx_packages/ITISFoundation_qx-iconfont-fontawesome5_v0_2_2", "uri": "ITISFoundation/qx-iconfont-fontawesome5", "repo_name": "ITISFoundation/qx-iconfont-fontawesome5", - "repo_tag": "v0.2.0" + "repo_tag": "v0.2.2" }, { "library_name": "qx-osparc-theme", diff --git a/services/static-webserver/client/scripts/apps_metadata.json b/services/static-webserver/client/scripts/apps_metadata.json new file mode 100644 index 00000000000..38959e46090 --- /dev/null +++ b/services/static-webserver/client/scripts/apps_metadata.json @@ -0,0 +1,95 @@ +{ + "applications": [ + { + "application": "osparc", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/osparc/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/osparc/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/osparc/browserconfig.xml", + "replace_me_og_title": "oSPARC", + "replace_me_og_description": "open online simulations for Stimulating Peripheral Activity to Relieve Conditions", + "replace_me_og_image": "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/services/static-webserver/client/source/resource/osparc/favicon-osparc.png" + } + }, { + "application": "s4l", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "Sim4Life", + "replace_me_og_description": "Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-default.png" + } + }, { + "application": "s4lacad", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "Sim4Life Science", + "replace_me_og_description": "Sim4Life for Science - Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-academy.png" + } + }, { + "application": "s4lengine", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "Sim4Life Engineering", + "replace_me_og_description": "Sim4Life for Engineers - Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-default.png" + } + }, { + "application": "s4ldesktop", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "Sim4Life (Desktop)", + "replace_me_og_description": "Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-default.png" + } + }, { + "application": "s4ldesktopacad", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "Sim4Life Science (Desktop)", + "replace_me_og_description": "Sim4Life for Science - Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-academy.png" + } + }, { + "application": "s4llite", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/s4l/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/s4l/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/s4l/browserconfig.xml", + "replace_me_og_title": "S4L Lite", + "replace_me_og_description": "Sim4Life for Students - Computational life sciences platform that combines computable human phantoms, powerful physics solvers and advanced tissue models.", + "replace_me_og_image": "https://raw.githubusercontent.com/ZurichMedTech/s4l-assets/main/app/full/background-images/S4L/Sim4Life-head-lite.png" + } + }, { + "application": "tis", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/tis/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/tis/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/tis/browserconfig.xml", + "replace_me_og_title": "TI Plan - IT'IS", + "replace_me_og_description": "A tool powered by oΒ²SΒ²PARC technology that reduces optimization of targeted neurostimulation protocols.", + "replace_me_og_image": "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/services/static-webserver/client/source/resource/osparc/tip_splitimage.png" + } + }, { + "application": "tiplite", + "replacements": { + "replace_me_favicon_uri": "/resource/osparc/tis/icons/favicon-32x32.png", + "replace_me_manifest_uri": "/resource/osparc/tis/manifest.json", + "replace_me_browserconfig_uri": "/resource/osparc/tis/browserconfig.xml", + "replace_me_og_title": "TI Plan lite - IT'IS", + "replace_me_og_description": "A tool powered by oΒ²SΒ²PARC technology that reduces optimization of targeted neurostimulation protocols.", + "replace_me_og_image": "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/services/static-webserver/client/source/resource/osparc/tip_splitimage.png" + } + } + ] +} diff --git a/services/static-webserver/client/scripts/post-compile.py b/services/static-webserver/client/scripts/post-compile.py new file mode 100644 index 00000000000..650bcd4e0aa --- /dev/null +++ b/services/static-webserver/client/scripts/post-compile.py @@ -0,0 +1,83 @@ +import json +import os +import random +from pathlib import Path + +output_folders = [ + "source-output", # dev output + "build-output", # default production output + "build-client", # I believe we create the production outputs here +] + + +def _get_applications_from_metadata(): + dirname = os.path.dirname(__file__) + meta_filename = os.path.join(dirname, "apps_metadata.json") + with open(meta_filename) as file: + metadata = json.load(file) + return metadata["applications"] + + +def update_apps_metadata(): + dirname = os.path.dirname(__file__) + applications = _get_applications_from_metadata() + for i in applications: + application = i.get("application") + for output_folder in output_folders: + index_file_path = Path(dirname).joinpath( + "..", output_folder, application, "index.html" + ) + if os.path.isfile(index_file_path): + print(f"Updating app metadata: {index_file_path.resolve()}") + replacements = i.get("replacements") + for key in replacements: + replace_text = replacements[key] + index_file_path.write_text( + index_file_path.read_text().replace( + "${" + key + "}", + replace_text, + ) + ) + + +def _get_output_file_paths(filename): + output_file_paths: list[Path] = [] + dirname = os.path.dirname(__file__) + applications = _get_applications_from_metadata() + for i in applications: + application = i.get("application") + for output_folder in output_folders: + output_file_path = Path(dirname).joinpath( + "..", output_folder, application, filename + ) + if output_file_path.is_file(): + output_file_paths.append(output_file_path) + return output_file_paths + + +def add_no_cache_param(vcs_ref_client): + index_file_paths = _get_output_file_paths("index.html") + for index_file_path in index_file_paths: + print(f"Updating vcs_ref_client: {index_file_path.resolve()}") + index_file_path.write_text( + index_file_path.read_text().replace( + "${boot_params}", + "nocache=" + vcs_ref_client, + ) + ) + + boot_file_paths = _get_output_file_paths("boot.js") + for boot_file_path in boot_file_paths: + print(f"Updating addNoCacheParam URL_PARAMETERS: {boot_file_path.resolve()}") + boot_file_path.write_text( + boot_file_path.read_text().replace( + "addNoCacheParam : false", + "addNoCacheParam : true", + ) + ) + + +if __name__ == "__main__": + update_apps_metadata() + vcs_ref_client = os.getenv("VCS_REF_CLIENT", str(random.random())) + add_no_cache_param(vcs_ref_client) diff --git a/services/static-webserver/client/source/boot/index.html b/services/static-webserver/client/source/boot/index.html index 014f74b544d..6a02f3cda47 100644 --- a/services/static-webserver/client/source/boot/index.html +++ b/services/static-webserver/client/source/boot/index.html @@ -20,18 +20,31 @@ - + - - + + + + + + + + + + + + + + + - - - - + + + + ${appTitle} - +